1;
2;  (c) Copyright 1986 HEWLETT-PACKARD COMPANY
3;
4;  To anyone who acknowledges that this file is provided "AS IS"
5;  without any express or implied warranty:
6;      permission to use, copy, modify, and distribute this file
7;  for any purpose is hereby granted without fee, provided that
8;  the above copyright notice and this notice appears in all
9;  copies, and that the name of Hewlett-Packard Company not be
10;  used in advertising or publicity pertaining to distribution
11;  of the software without specific, written prior permission.
12;  Hewlett-Packard Company makes no representations about the
13;  suitability of this software for any purpose.
14;
15
16; Standard Hardware Register Definitions for Use with Assembler
17; version A.08.06
18;	- fr16-31 added at Utah
19;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20; Hardware General Registers
21r0: .equ	0
22
23r1: .equ	1
24
25r2: .equ	2
26
27r3: .equ	3
28
29r4: .equ	4
30
31r5: .equ	5
32
33r6: .equ	6
34
35r7: .equ	7
36
37r8: .equ	8
38
39r9: .equ	9
40
41r10: .equ	10
42
43r11: .equ	11
44
45r12: .equ	12
46
47r13: .equ	13
48
49r14: .equ	14
50
51r15: .equ	15
52
53r16: .equ	16
54
55r17: .equ	17
56
57r18: .equ	18
58
59r19: .equ	19
60
61r20: .equ	20
62
63r21: .equ	21
64
65r22: .equ	22
66
67r23: .equ	23
68
69r24: .equ	24
70
71r25: .equ	25
72
73r26: .equ	26
74
75r27: .equ	27
76
77r28: .equ	28
78
79r29: .equ	29
80
81r30: .equ	30
82
83r31: .equ	31
84
85; Hardware Space Registers
86sr0: .equ	0
87
88sr1: .equ	1
89
90sr2: .equ	2
91
92sr3: .equ	3
93
94sr4: .equ	4
95
96sr5: .equ	5
97
98sr6: .equ	6
99
100sr7: .equ	7
101
102; Hardware Floating Point Registers
103fr0: .equ	0
104
105fr1: .equ	1
106
107fr2: .equ	2
108
109fr3: .equ	3
110
111fr4: .equ	4
112
113fr5: .equ	5
114
115fr6: .equ	6
116
117fr7: .equ	7
118
119fr8: .equ	8
120
121fr9: .equ	9
122
123fr10: .equ	10
124
125fr11: .equ	11
126
127fr12: .equ	12
128
129fr13: .equ	13
130
131fr14: .equ	14
132
133fr15: .equ	15
134
135fr16: .equ	16
136
137fr17: .equ	17
138
139fr18: .equ	18
140
141fr19: .equ	19
142
143fr20: .equ	20
144
145fr21: .equ	21
146
147fr22: .equ	22
148
149fr23: .equ	23
150
151fr24: .equ	24
152
153fr25: .equ	25
154
155fr26: .equ	26
156
157fr27: .equ	27
158
159fr28: .equ	28
160
161fr29: .equ	29
162
163fr30: .equ	30
164
165fr31: .equ	31
166
167; Hardware Control Registers
168cr0: .equ	0
169
170rctr: .equ	0			; Recovery Counter Register
171
172
173cr8: .equ	8			; Protection ID 1
174
175pidr1: .equ	8
176
177
178cr9: .equ	9			; Protection ID 2
179
180pidr2: .equ	9
181
182
183cr10: .equ	10
184
185ccr: .equ	10			; Coprocessor Confiquration Register
186
187
188cr11: .equ	11
189
190sar: .equ	11			; Shift Amount Register
191
192
193cr12: .equ	12
194
195pidr3: .equ	12			; Protection ID 3
196
197
198cr13: .equ	13
199
200pidr4: .equ	13			; Protection ID 4
201
202
203cr14: .equ	14
204
205iva: .equ	14			; Interrupt Vector Address
206
207
208cr15: .equ	15
209
210eiem: .equ	15			; External Interrupt Enable Mask
211
212
213cr16: .equ	16
214
215itmr: .equ	16			; Interval Timer
216
217
218cr17: .equ	17
219
220pcsq: .equ	17			; Program Counter Space queue
221
222
223cr18: .equ	18
224
225pcoq: .equ	18			; Program Counter Offset queue
226
227
228cr19: .equ	19
229
230iir: .equ	19			; Interruption Instruction Register
231
232
233cr20: .equ	20
234
235isr: .equ	20			; Interruption Space Register
236
237
238cr21: .equ	21
239
240ior: .equ	21			; Interruption Offset Register
241
242
243cr22: .equ	22
244
245ipsw: .equ	22			; Interrpution Processor Status Word
246
247
248cr23: .equ	23
249
250eirr: .equ	23			; External Interrupt Request
251
252
253cr24: .equ	24
254
255ppda: .equ	24			; Physcial Page Directory Address
256
257tr0: .equ	24			; Temporary register 0
258
259
260cr25: .equ	25
261
262hta: .equ	25			; Hash Table Address
263
264tr1: .equ	25			; Temporary register 1
265
266
267cr26: .equ	26
268
269tr2: .equ	26			; Temporary register 2
270
271
272cr27: .equ	27
273
274tr3: .equ	27			; Temporary register 3
275
276
277cr28: .equ	28
278
279tr4: .equ	28			; Temporary register 4
280
281
282cr29: .equ	29
283
284tr5: .equ	29			; Temporary register 5
285
286
287cr30: .equ	30
288
289tr6: .equ	30			; Temporary register 6
290
291
292cr31: .equ	31
293
294tr7: .equ	31			; Temporary register 7
295
296;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
297; Procedure Call Convention                                                  ~
298; Register Definitions for Use with Assembler                                ~
299; version A.08.06
300;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
301; Software Architecture General Registers
302rp: .equ    r2	; return pointer
303
304mrp: .equ	r31	; millicode return pointer
305
306ret0: .equ    r28	; return value
307
308ret1: .equ    r29	; return value (high part of double)
309
310sl: .equ    r29	; static link
311
312sp: .equ 	r30	; stack pointer
313
314dp: .equ	r27	; data pointer
315
316arg0: .equ	r26	; argument
317
318arg1: .equ	r25	; argument or high part of double argument
319
320arg2: .equ	r24	; argument
321
322arg3: .equ	r23	; argument or high part of double argument
323
324;_____________________________________________________________________________
325; Software Architecture Space Registers
326;		sr0	; return link form BLE
327sret: .equ	sr1	; return value
328
329sarg: .equ	sr1	; argument
330
331;		sr4	; PC SPACE tracker
332;		sr5	; process private data
333;_____________________________________________________________________________
334; Software Architecture Pseudo Registers
335previous_sp: .equ	64	; old stack pointer (locates previous frame)
336
337;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
338;~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
339; Standard space and subspace definitions.  version A.08.06
340; These are generally suitable for programs on HP_UX and HPE.
341; Statements commented out are used when building such things as operating
342; system kernels.
343;;;;;;;;;;;;;;;;
344	.SPACE	$TEXT$,		SPNUM=0,SORT=8
345	.subspa $MILLICODE$,	QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=8
346	.subspa $LIT$,		QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=16
347	.subspa $CODE$,		QUAD=0,ALIGN=8,ACCESS=0x2c,SORT=24
348; Additional code subspaces should have ALIGN=8 for an interspace BV
349; and should have SORT=24.
350;
351; For an incomplete executable (program bound to shared libraries),
352; sort keys $GLOBAL$ -1 and $GLOBAL$ -2 are reserved for the $DLT$
353; and $PLT$ subspaces respectively.
354;;;;;;;;;;;;;;;
355	.SPACE $PRIVATE$,	SPNUM=1,PRIVATE,SORT=16
356	.subspa $GLOBAL$,	QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=40
357	.import $global$
358	.subspa $DATA$,		QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=16
359	.subspa $BSS$,		QUAD=1,ALIGN=8,ACCESS=0x1f,SORT=82,ZERO
360
361	.SPACE $TEXT$
362	.SUBSPA $MILLICODE$
363
364	.align 8
365	.EXPORT $$remI,millicode
366;	.IMPORT	cerror
367$$remI:
368	.PROC
369	.CALLINFO millicode
370	.ENTRY
371	addit,= 0,arg1,r0
372	add,>= r0,arg0,ret1
373	sub r0,ret1,ret1
374	sub r0,arg1,r1
375	ds r0,r1,r0
376	or r0,r0,r1
377	add ret1,ret1,ret1
378	ds r1,arg1,r1
379	addc ret1,ret1,ret1
380	ds r1,arg1,r1
381	addc ret1,ret1,ret1
382	ds r1,arg1,r1
383	addc ret1,ret1,ret1
384	ds r1,arg1,r1
385	addc ret1,ret1,ret1
386	ds r1,arg1,r1
387	addc ret1,ret1,ret1
388	ds r1,arg1,r1
389	addc ret1,ret1,ret1
390	ds r1,arg1,r1
391	addc ret1,ret1,ret1
392	ds r1,arg1,r1
393	addc ret1,ret1,ret1
394	ds r1,arg1,r1
395	addc ret1,ret1,ret1
396	ds r1,arg1,r1
397	addc ret1,ret1,ret1
398	ds r1,arg1,r1
399	addc ret1,ret1,ret1
400	ds r1,arg1,r1
401	addc ret1,ret1,ret1
402	ds r1,arg1,r1
403	addc ret1,ret1,ret1
404	ds r1,arg1,r1
405	addc ret1,ret1,ret1
406	ds r1,arg1,r1
407	addc ret1,ret1,ret1
408	ds r1,arg1,r1
409	addc ret1,ret1,ret1
410	ds r1,arg1,r1
411	addc ret1,ret1,ret1
412	ds r1,arg1,r1
413	addc ret1,ret1,ret1
414	ds r1,arg1,r1
415	addc ret1,ret1,ret1
416	ds r1,arg1,r1
417	addc ret1,ret1,ret1
418	ds r1,arg1,r1
419	addc ret1,ret1,ret1
420	ds r1,arg1,r1
421	addc ret1,ret1,ret1
422	ds r1,arg1,r1
423	addc ret1,ret1,ret1
424	ds r1,arg1,r1
425	addc ret1,ret1,ret1
426	ds r1,arg1,r1
427	addc ret1,ret1,ret1
428	ds r1,arg1,r1
429	addc ret1,ret1,ret1
430	ds r1,arg1,r1
431	addc ret1,ret1,ret1
432	ds r1,arg1,r1
433	addc ret1,ret1,ret1
434	ds r1,arg1,r1
435	addc ret1,ret1,ret1
436	ds r1,arg1,r1
437	addc ret1,ret1,ret1
438	ds r1,arg1,r1
439	addc ret1,ret1,ret1
440	ds r1,arg1,r1
441	addc ret1,ret1,ret1
442	movb,>=,n r1,ret1,remI300
443	add,< arg1,r0,r0
444	add,tr r1,arg1,ret1
445	sub r1,arg1,ret1
446remI300: add,>= arg0,r0,r0
447
448	sub r0,ret1,ret1
449	bv r0(r31)
450	nop
451	.EXIT
452	.PROCEND
453
454bit1:  .equ 1
455
456bit30: .equ 30
457bit31: .equ 31
458
459len2:  .equ 2
460
461len4:  .equ 4
462
463
464$$dyncall:
465	.proc
466	.callinfo NO_CALLS
467	.entry
468	.export $$dyncall,MILLICODE
469
470	bb,>=,n	22,bit30,noshlibs
471
472	depi	0,bit31,len2,22
473	ldw	4(22),19
474	ldw	0(22),22
475noshlibs:
476	ldsid	(22),r1
477	mtsp	r1,sr0
478	be	0(sr0,r22)
479	stw	rp,-24(sp)
480	.exit
481	.procend
482
483temp: .EQU	r1
484
485retreg: .EQU	ret1	; r29
486
487
488	.export $$divU,millicode
489	.import $$divU_3,millicode
490	.import $$divU_5,millicode
491	.import $$divU_6,millicode
492	.import $$divU_7,millicode
493	.import $$divU_9,millicode
494	.import $$divU_10,millicode
495	.import $$divU_12,millicode
496	.import $$divU_14,millicode
497	.import $$divU_15,millicode
498$$divU:
499	.proc
500	.callinfo millicode
501	.entry
502; The subtract is not nullified since it does no harm and can be used
503; by the two cases that branch back to "normal".
504	comib,>=  15,arg1,special_divisor
505	sub	r0,arg1,temp		; clear carry, negate the divisor
506	ds	r0,temp,r0		; set V-bit to 1
507normal:
508	add	arg0,arg0,retreg	; shift msb bit into carry
509	ds	r0,arg1,temp		; 1st divide step, if no carry
510	addc	retreg,retreg,retreg	; shift retreg with/into carry
511	ds	temp,arg1,temp		; 2nd divide step
512	addc	retreg,retreg,retreg	; shift retreg with/into carry
513	ds	temp,arg1,temp		; 3rd divide step
514	addc	retreg,retreg,retreg	; shift retreg with/into carry
515	ds	temp,arg1,temp		; 4th divide step
516	addc	retreg,retreg,retreg	; shift retreg with/into carry
517	ds	temp,arg1,temp		; 5th divide step
518	addc	retreg,retreg,retreg	; shift retreg with/into carry
519	ds	temp,arg1,temp		; 6th divide step
520	addc	retreg,retreg,retreg	; shift retreg with/into carry
521	ds	temp,arg1,temp		; 7th divide step
522	addc	retreg,retreg,retreg	; shift retreg with/into carry
523	ds	temp,arg1,temp		; 8th divide step
524	addc	retreg,retreg,retreg	; shift retreg with/into carry
525	ds	temp,arg1,temp		; 9th divide step
526	addc	retreg,retreg,retreg	; shift retreg with/into carry
527	ds	temp,arg1,temp		; 10th divide step
528	addc	retreg,retreg,retreg	; shift retreg with/into carry
529	ds	temp,arg1,temp		; 11th divide step
530	addc	retreg,retreg,retreg	; shift retreg with/into carry
531	ds	temp,arg1,temp		; 12th divide step
532	addc	retreg,retreg,retreg	; shift retreg with/into carry
533	ds	temp,arg1,temp		; 13th divide step
534	addc	retreg,retreg,retreg	; shift retreg with/into carry
535	ds	temp,arg1,temp		; 14th divide step
536	addc	retreg,retreg,retreg	; shift retreg with/into carry
537	ds	temp,arg1,temp		; 15th divide step
538	addc	retreg,retreg,retreg	; shift retreg with/into carry
539	ds	temp,arg1,temp		; 16th divide step
540	addc	retreg,retreg,retreg	; shift retreg with/into carry
541	ds	temp,arg1,temp		; 17th divide step
542	addc	retreg,retreg,retreg	; shift retreg with/into carry
543	ds	temp,arg1,temp		; 18th divide step
544	addc	retreg,retreg,retreg	; shift retreg with/into carry
545	ds	temp,arg1,temp		; 19th divide step
546	addc	retreg,retreg,retreg	; shift retreg with/into carry
547	ds	temp,arg1,temp		; 20th divide step
548	addc	retreg,retreg,retreg	; shift retreg with/into carry
549	ds	temp,arg1,temp		; 21st divide step
550	addc	retreg,retreg,retreg	; shift retreg with/into carry
551	ds	temp,arg1,temp		; 22nd divide step
552	addc	retreg,retreg,retreg	; shift retreg with/into carry
553	ds	temp,arg1,temp		; 23rd divide step
554	addc	retreg,retreg,retreg	; shift retreg with/into carry
555	ds	temp,arg1,temp		; 24th divide step
556	addc	retreg,retreg,retreg	; shift retreg with/into carry
557	ds	temp,arg1,temp		; 25th divide step
558	addc	retreg,retreg,retreg	; shift retreg with/into carry
559	ds	temp,arg1,temp		; 26th divide step
560	addc	retreg,retreg,retreg	; shift retreg with/into carry
561	ds	temp,arg1,temp		; 27th divide step
562	addc	retreg,retreg,retreg	; shift retreg with/into carry
563	ds	temp,arg1,temp		; 28th divide step
564	addc	retreg,retreg,retreg	; shift retreg with/into carry
565	ds	temp,arg1,temp		; 29th divide step
566	addc	retreg,retreg,retreg	; shift retreg with/into carry
567	ds	temp,arg1,temp		; 30th divide step
568	addc	retreg,retreg,retreg	; shift retreg with/into carry
569	ds	temp,arg1,temp		; 31st divide step
570	addc	retreg,retreg,retreg	; shift retreg with/into carry
571	ds	temp,arg1,temp		; 32nd divide step,
572	bv    0(r31)
573	addc	retreg,retreg,retreg	; shift last retreg bit into retreg
574;_____________________________________________________________________________
575; handle the cases where divisor is a small constant or has high bit on
576special_divisor:
577	comib,>  0,arg1,big_divisor
578	nop
579	blr	arg1,r0
580	nop
581zero_divisor: ; this label is here to provide external visibility
582
583	addit,=	0,arg1,0		; trap for zero dvr
584	nop
585	bv    0(r31)			; divisor == 1
586	copy	arg0,retreg
587	bv    0(r31)			; divisor == 2
588	extru	arg0,30,31,retreg
589	 b,n   $$divU_3		; divisor == 3
590	nop
591	bv    0(r31)			; divisor == 4
592	extru	arg0,29,30,retreg
593	 b,n   $$divU_5		; divisor == 5
594	nop
595	 b,n   $$divU_6		; divisor == 6
596	nop
597	 b,n   $$divU_7		; divisor == 7
598	nop
599	bv    0(r31)			; divisor == 8
600	extru	arg0,28,29,retreg
601	 b,n   $$divU_9		; divisor == 9
602	nop
603	 b,n   $$divU_10		; divisor == 10
604	nop
605	b	normal			; divisor == 11
606	ds	r0,temp,r0		; set V-bit to 1
607	 b,n   $$divU_12		; divisor == 12
608	nop
609	b	normal			; divisor == 13
610	ds	r0,temp,r0		; set V-bit to 1
611	 b,n   $$divU_14		; divisor == 14
612	nop
613	 b,n   $$divU_15		; divisor == 15
614	nop
615;_____________________________________________________________________________
616; Handle the case where the high bit is on in the divisor.
617; Compute:	if( dividend>=divisor) quotient=1; else quotient=0;
618; Note:		dividend>==divisor iff dividend-divisor does not borrow
619; and		not borrow iff carry
620big_divisor:
621	sub	arg0,arg1,r0
622	bv    0(r31)
623	addc	r0,r0,retreg
624	.exit
625	.procend
626	.end
627
628t2: .EQU	r1
629
630; x2	.EQU	arg0	; r26
631t1: .EQU	arg1	; r25
632
633; x1	.EQU	ret1	; r29
634;_____________________________________________________________________________
635
636$$divide_by_constant:
637        .PROC
638        .CALLINFO millicode
639	.entry
640
641
642	.export $$divide_by_constant,millicode
643; Provides a "nice" label for the code covered by the unwind descriptor
644; for things like gprof.
645
646
647
648
649
650
651
652
653
654$$divI_2:
655        .EXPORT         $$divI_2,MILLICODE
656        COMCLR,>=       arg0,0,0
657        ADDI            1,arg0,arg0
658        bv    0(r31)
659        EXTRS           arg0,30,31,ret1
660
661
662
663$$divI_4:
664        .EXPORT         $$divI_4,MILLICODE
665        COMCLR,>=       arg0,0,0
666        ADDI            3,arg0,arg0
667        bv    0(r31)
668        EXTRS           arg0,29,30,ret1
669
670
671
672$$divI_8:
673        .EXPORT         $$divI_8,MILLICODE
674        COMCLR,>=       arg0,0,0
675        ADDI            7,arg0,arg0
676        bv    0(r31)
677        EXTRS           arg0,28,29,ret1
678
679
680$$divI_16:
681        .EXPORT         $$divI_16,MILLICODE
682        COMCLR,>=       arg0,0,0
683        ADDI            15,arg0,arg0
684        bv    0(r31)
685        EXTRS           arg0,27,28,ret1
686
687
688
689
690
691
692
693
694
695
696
697$$divI_3:
698        .EXPORT         $$divI_3,MILLICODE
699        COMB,<,N        arg0,0,$neg3
700
701        ADDI            1,arg0,arg0
702        EXTRU           arg0,1,2,ret1
703        SH2ADD          arg0,arg0,arg0
704        B               $pos
705        ADDC            ret1,0,ret1
706
707$neg3:
708        SUBI            1,arg0,arg0
709        EXTRU           arg0,1,2,ret1
710        SH2ADD          arg0,arg0,arg0
711        B               $neg
712        ADDC            ret1,0,ret1
713
714$$divU_3:
715        .EXPORT         $$divU_3,MILLICODE
716        ADDI            1,arg0,arg0
717        ADDC            0,0,ret1
718        SHD             ret1,arg0,30,t1
719        SH2ADD          arg0,arg0,arg0
720        B               $pos
721        ADDC            ret1,t1,ret1
722
723
724
725$$divI_5:
726        .EXPORT         $$divI_5,MILLICODE
727        COMB,<,N        arg0,0,$neg5
728        ADDI            3,arg0,t1
729        SH1ADD          arg0,t1,arg0
730        B               $pos
731        ADDC            0,0,ret1
732
733$neg5:
734        SUB             0,arg0,arg0
735        ADDI            1,arg0,arg0
736        SHD             0,arg0,31,ret1
737        SH1ADD          arg0,arg0,arg0
738        B               $neg
739        ADDC            ret1,0,ret1
740
741$$divU_5:
742        .EXPORT         $$divU_5,MILLICODE
743        ADDI            1,arg0,arg0
744        ADDC            0,0,ret1
745        SHD             ret1,arg0,31,t1
746        SH1ADD          arg0,arg0,arg0
747        B               $pos
748        ADDC            t1,ret1,ret1
749
750
751$$divI_6:
752        .EXPORT         $$divI_6,MILLICODE
753        COMB,<,N        arg0,0,$neg6
754        EXTRU           arg0,30,31,arg0
755        ADDI            5,arg0,t1
756        SH2ADD          arg0,t1,arg0
757        B               $pos
758        ADDC            0,0,ret1
759
760$neg6:
761        SUBI            2,arg0,arg0
762
763
764        EXTRU           arg0,30,31,arg0
765        SHD             0,arg0,30,ret1
766        SH2ADD          arg0,arg0,arg0
767        B               $neg
768        ADDC            ret1,0,ret1
769
770$$divU_6:
771        .EXPORT         $$divU_6,MILLICODE
772        EXTRU           arg0,30,31,arg0
773        ADDI            1,arg0,arg0
774        SHD             0,arg0,30,ret1
775        SH2ADD          arg0,arg0,arg0
776        B               $pos
777        ADDC            ret1,0,ret1
778
779
780$$divU_10:
781        .EXPORT         $$divU_10,MILLICODE
782        EXTRU           arg0,30,31,arg0
783        ADDI            3,arg0,t1
784        SH1ADD          arg0,t1,arg0
785        ADDC            0,0,ret1
786$pos:
787        SHD             ret1,arg0,28,t1
788        SHD             arg0,0,28,t2
789        ADD             arg0,t2,arg0
790        ADDC            ret1,t1,ret1
791$pos_for_17:
792        SHD             ret1,arg0,24,t1
793        SHD             arg0,0,24,t2
794        ADD             arg0,t2,arg0
795        ADDC            ret1,t1,ret1
796
797        SHD             ret1,arg0,16,t1
798        SHD             arg0,0,16,t2
799        ADD             arg0,t2,arg0
800        bv    0(r31)
801        ADDC            ret1,t1,ret1
802
803$$divI_10:
804        .EXPORT         $$divI_10,MILLICODE
805        COMB,<          arg0,0,$neg10
806        COPY            0,ret1
807        EXTRU           arg0,30,31,arg0
808        ADDIB,TR        1,arg0,$pos
809        SH1ADD          arg0,arg0,arg0
810
811$neg10:
812        SUBI            2,arg0,arg0
813
814
815        EXTRU           arg0,30,31,arg0
816        SH1ADD          arg0,arg0,arg0
817$neg:
818        SHD             ret1,arg0,28,t1
819        SHD             arg0,0,28,t2
820        ADD             arg0,t2,arg0
821        ADDC            ret1,t1,ret1
822$neg_for_17:
823        SHD             ret1,arg0,24,t1
824        SHD             arg0,0,24,t2
825        ADD             arg0,t2,arg0
826        ADDC            ret1,t1,ret1
827
828        SHD             ret1,arg0,16,t1
829        SHD             arg0,0,16,t2
830        ADD             arg0,t2,arg0
831        ADDC            ret1,t1,ret1
832        bv    0(r31)
833        SUB             0,ret1,ret1
834
835
836$$divI_12:
837        .EXPORT         $$divI_12,MILLICODE
838        COMB,<          arg0,0,$neg12
839        COPY            0,ret1
840        EXTRU           arg0,29,30,arg0
841        ADDIB,TR        1,arg0,$pos
842        SH2ADD          arg0,arg0,arg0
843
844$neg12:
845        SUBI            4,arg0,arg0
846
847
848        EXTRU           arg0,29,30,arg0
849        B               $neg
850        SH2ADD          arg0,arg0,arg0
851
852$$divU_12:
853        .EXPORT         $$divU_12,MILLICODE
854        EXTRU           arg0,29,30,arg0
855        ADDI            5,arg0,t1
856        SH2ADD          arg0,t1,arg0
857        B               $pos
858        ADDC            0,0,ret1
859
860
861$$divI_15:
862        .EXPORT         $$divI_15,MILLICODE
863        COMB,<          arg0,0,$neg15
864        COPY            0,ret1
865        ADDIB,TR        1,arg0,$pos+4
866        SHD             ret1,arg0,28,t1
867
868$neg15:
869        B               $neg
870        SUBI            1,arg0,arg0
871
872$$divU_15:
873        .EXPORT         $$divU_15,MILLICODE
874        ADDI            1,arg0,arg0
875        B               $pos
876        ADDC            0,0,ret1
877
878
879$$divI_17:
880        .EXPORT         $$divI_17,MILLICODE
881        COMB,<,N        arg0,0,$neg17
882        ADDI            1,arg0,arg0
883        SHD             0,arg0,28,t1
884        SHD             arg0,0,28,t2
885        SUB             t2,arg0,arg0
886        B               $pos_for_17
887        SUBB            t1,0,ret1
888
889$neg17:
890        SUBI            1,arg0,arg0
891        SHD             0,arg0,28,t1
892        SHD             arg0,0,28,t2
893        SUB             t2,arg0,arg0
894        B               $neg_for_17
895        SUBB            t1,0,ret1
896
897$$divU_17:
898        .EXPORT         $$divU_17,MILLICODE
899        ADDI            1,arg0,arg0
900        ADDC            0,0,ret1
901        SHD             ret1,arg0,28,t1
902$u17:
903        SHD             arg0,0,28,t2
904        SUB             t2,arg0,arg0
905        B               $pos_for_17
906        SUBB            t1,ret1,ret1
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943$$divI_7:
944        .EXPORT         $$divI_7,MILLICODE
945        COMB,<,N        arg0,0,$neg7
946$7:
947        ADDI            1,arg0,arg0
948        SHD             0,arg0,29,ret1
949        SH3ADD          arg0,arg0,arg0
950        ADDC            ret1,0,ret1
951$pos7:
952        SHD             ret1,arg0,26,t1
953        SHD             arg0,0,26,t2
954        ADD             arg0,t2,arg0
955        ADDC            ret1,t1,ret1
956
957        SHD             ret1,arg0,20,t1
958        SHD             arg0,0,20,t2
959        ADD             arg0,t2,arg0
960        ADDC            ret1,t1,t1
961
962
963
964        COPY            0,ret1
965        SHD,=           t1,arg0,24,t1
966$1:
967        ADDB,TR         t1,ret1,$2
968        EXTRU           arg0,31,24,arg0
969
970        bv,n  0(r31)
971
972$2:
973        ADDB,TR         t1,arg0,$1
974        EXTRU,=         arg0,7,8,t1
975
976$neg7:
977        SUBI            1,arg0,arg0
978$8:
979        SHD             0,arg0,29,ret1
980        SH3ADD          arg0,arg0,arg0
981        ADDC            ret1,0,ret1
982
983$neg7_shift:
984        SHD             ret1,arg0,26,t1
985        SHD             arg0,0,26,t2
986        ADD             arg0,t2,arg0
987        ADDC            ret1,t1,ret1
988
989        SHD             ret1,arg0,20,t1
990        SHD             arg0,0,20,t2
991        ADD             arg0,t2,arg0
992        ADDC            ret1,t1,t1
993
994
995
996        COPY            0,ret1
997        SHD,=           t1,arg0,24,t1
998$3:
999        ADDB,TR         t1,ret1,$4
1000        EXTRU           arg0,31,24,arg0
1001
1002        bv    0(r31)
1003        SUB             0,ret1,ret1
1004
1005$4:
1006        ADDB,TR         t1,arg0,$3
1007        EXTRU,=         arg0,7,8,t1
1008
1009$$divU_7:
1010        .EXPORT         $$divU_7,MILLICODE
1011        ADDI            1,arg0,arg0
1012        ADDC            0,0,ret1
1013        SHD             ret1,arg0,29,t1
1014        SH3ADD          arg0,arg0,arg0
1015        B               $pos7
1016        ADDC            t1,ret1,ret1
1017
1018
1019$$divI_9:
1020        .EXPORT         $$divI_9,MILLICODE
1021        COMB,<,N        arg0,0,$neg9
1022        ADDI            1,arg0,arg0
1023        SHD             0,arg0,29,t1
1024        SHD             arg0,0,29,t2
1025        SUB             t2,arg0,arg0
1026        B               $pos7
1027        SUBB            t1,0,ret1
1028
1029$neg9:
1030        SUBI            1,arg0,arg0
1031        SHD             0,arg0,29,t1
1032        SHD             arg0,0,29,t2
1033        SUB             t2,arg0,arg0
1034        B               $neg7_shift
1035        SUBB            t1,0,ret1
1036
1037$$divU_9:
1038        .EXPORT         $$divU_9,MILLICODE
1039        ADDI            1,arg0,arg0
1040        ADDC            0,0,ret1
1041        SHD             ret1,arg0,29,t1
1042        SHD             arg0,0,29,t2
1043        SUB             t2,arg0,arg0
1044        B               $pos7
1045        SUBB            t1,ret1,ret1
1046
1047
1048$$divI_14:
1049        .EXPORT         $$divI_14,MILLICODE
1050        COMB,<,N        arg0,0,$neg14
1051$$divU_14:
1052        .EXPORT         $$divU_14,MILLICODE
1053        B               $7
1054        EXTRU           arg0,30,31,arg0
1055
1056$neg14:
1057        SUBI            2,arg0,arg0
1058        B               $8
1059        EXTRU           arg0,30,31,arg0
1060
1061	.exit
1062        .PROCEND
1063	.END
1064
1065rmndr: .EQU	ret1	; r29
1066
1067
1068	.export $$remU,millicode
1069$$remU:
1070	.proc
1071	.callinfo millicode
1072	.entry
1073
1074	comib,>=,n  0,arg1,special_case
1075	sub	r0,arg1,rmndr		; clear carry, negate the divisor
1076	ds	r0,rmndr,r0		; set V-bit to 1
1077	add	arg0,arg0,temp		; shift msb bit into carry
1078	ds	r0,arg1,rmndr		; 1st divide step, if no carry
1079	addc	temp,temp,temp		; shift temp with/into carry
1080	ds	rmndr,arg1,rmndr		; 2nd divide step
1081	addc	temp,temp,temp		; shift temp with/into carry
1082	ds	rmndr,arg1,rmndr		; 3rd divide step
1083	addc	temp,temp,temp		; shift temp with/into carry
1084	ds	rmndr,arg1,rmndr		; 4th divide step
1085	addc	temp,temp,temp		; shift temp with/into carry
1086	ds	rmndr,arg1,rmndr		; 5th divide step
1087	addc	temp,temp,temp		; shift temp with/into carry
1088	ds	rmndr,arg1,rmndr		; 6th divide step
1089	addc	temp,temp,temp		; shift temp with/into carry
1090	ds	rmndr,arg1,rmndr		; 7th divide step
1091	addc	temp,temp,temp		; shift temp with/into carry
1092	ds	rmndr,arg1,rmndr		; 8th divide step
1093	addc	temp,temp,temp		; shift temp with/into carry
1094	ds	rmndr,arg1,rmndr		; 9th divide step
1095	addc	temp,temp,temp		; shift temp with/into carry
1096	ds	rmndr,arg1,rmndr		; 10th divide step
1097	addc	temp,temp,temp		; shift temp with/into carry
1098	ds	rmndr,arg1,rmndr		; 11th divide step
1099	addc	temp,temp,temp		; shift temp with/into carry
1100	ds	rmndr,arg1,rmndr		; 12th divide step
1101	addc	temp,temp,temp		; shift temp with/into carry
1102	ds	rmndr,arg1,rmndr		; 13th divide step
1103	addc	temp,temp,temp		; shift temp with/into carry
1104	ds	rmndr,arg1,rmndr		; 14th divide step
1105	addc	temp,temp,temp		; shift temp with/into carry
1106	ds	rmndr,arg1,rmndr		; 15th divide step
1107	addc	temp,temp,temp		; shift temp with/into carry
1108	ds	rmndr,arg1,rmndr		; 16th divide step
1109	addc	temp,temp,temp		; shift temp with/into carry
1110	ds	rmndr,arg1,rmndr		; 17th divide step
1111	addc	temp,temp,temp		; shift temp with/into carry
1112	ds	rmndr,arg1,rmndr		; 18th divide step
1113	addc	temp,temp,temp		; shift temp with/into carry
1114	ds	rmndr,arg1,rmndr		; 19th divide step
1115	addc	temp,temp,temp		; shift temp with/into carry
1116	ds	rmndr,arg1,rmndr		; 20th divide step
1117	addc	temp,temp,temp		; shift temp with/into carry
1118	ds	rmndr,arg1,rmndr		; 21st divide step
1119	addc	temp,temp,temp		; shift temp with/into carry
1120	ds	rmndr,arg1,rmndr		; 22nd divide step
1121	addc	temp,temp,temp		; shift temp with/into carry
1122	ds	rmndr,arg1,rmndr		; 23rd divide step
1123	addc	temp,temp,temp		; shift temp with/into carry
1124	ds	rmndr,arg1,rmndr		; 24th divide step
1125	addc	temp,temp,temp		; shift temp with/into carry
1126	ds	rmndr,arg1,rmndr		; 25th divide step
1127	addc	temp,temp,temp		; shift temp with/into carry
1128	ds	rmndr,arg1,rmndr		; 26th divide step
1129	addc	temp,temp,temp		; shift temp with/into carry
1130	ds	rmndr,arg1,rmndr		; 27th divide step
1131	addc	temp,temp,temp		; shift temp with/into carry
1132	ds	rmndr,arg1,rmndr		; 28th divide step
1133	addc	temp,temp,temp		; shift temp with/into carry
1134	ds	rmndr,arg1,rmndr		; 29th divide step
1135	addc	temp,temp,temp		; shift temp with/into carry
1136	ds	rmndr,arg1,rmndr		; 30th divide step
1137	addc	temp,temp,temp		; shift temp with/into carry
1138	ds	rmndr,arg1,rmndr		; 31st divide step
1139	addc	temp,temp,temp		; shift temp with/into carry
1140	ds	rmndr,arg1,rmndr		; 32nd divide step,
1141	comiclr,<= 0,rmndr,r0
1142	  add	rmndr,arg1,rmndr	; correction
1143;	.exit
1144	bv,n  0(r31)
1145	nop
1146; Putting >= on the last DS and deleting COMICLR does not work!
1147;_____________________________________________________________________________
1148special_case:
1149	addit,=	0,arg1,r0		; trap on div by zero
1150	sub,>>=	arg0,arg1,rmndr
1151	  copy	arg0,rmndr
1152	bv,n  0(r31)
1153	nop
1154	.exit
1155	.procend
1156	.end
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194; Use bv    0(r31) and bv,n  0(r31) instead.
1195; #define	return		bv	0(%mrp)
1196; #define	return_n	bv,n	0(%mrp)
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224	.subspa $MILLICODE$
1225	.align 16
1226$$mulI:
1227
1228	.proc
1229	.callinfo millicode
1230	.entry
1231	.export $$mulI, millicode
1232	combt,<<=	%r25,%r26,l4	; swap args if unsigned %r25>%r26
1233	copy		0,%r29		; zero out the result
1234	xor		%r26,%r25,%r26	; swap %r26 & %r25 using the
1235	xor		%r26,%r25,%r25	;  old xor trick
1236	xor		%r26,%r25,%r26
1237l4: combt,<=	0,%r26,l3		; if %r26>=0 then proceed like unsigned
1238
1239	zdep		%r25,30,8,%r1	; %r1 = (%r25&0xff)<<1 *********
1240	sub,>		0,%r25,%r1		; otherwise negate both and
1241	combt,<=,n	%r26,%r1,l2	;  swap back if |%r26|<|%r25|
1242	sub		0,%r26,%r25
1243	movb,tr,n	%r1,%r26,l2	; 10th inst.
1244
1245l0: 	add	%r29,%r1,%r29				; add in this partial product
1246
1247l1: zdep	%r26,23,24,%r26			; %r26 <<= 8 ******************
1248
1249l2: zdep		%r25,30,8,%r1	; %r1 = (%r25&0xff)<<1 *********
1250
1251l3: blr		%r1,0		; case on these 8 bits ******
1252
1253	extru		%r25,23,24,%r25	; %r25 >>= 8 ******************
1254
1255;16 insts before this.
1256;			  %r26 <<= 8 **************************
1257x0: comb,<>	%r25,0,l2	! zdep	%r26,23,24,%r26	! bv,n  0(r31)	! nop
1258
1259x1: comb,<>	%r25,0,l1	! 	add	%r29,%r26,%r29	! bv,n  0(r31)	! nop
1260
1261x2: comb,<>	%r25,0,l1	! sh1add	%r26,%r29,%r29	! bv,n  0(r31)	! nop
1262
1263x3: comb,<>	%r25,0,l0	! 	sh1add	%r26,%r26,%r1	! bv    0(r31)	! 	add	%r29,%r1,%r29
1264
1265x4: comb,<>	%r25,0,l1	! sh2add	%r26,%r29,%r29	! bv,n  0(r31)	! nop
1266
1267x5: comb,<>	%r25,0,l0	! 	sh2add	%r26,%r26,%r1	! bv    0(r31)	! 	add	%r29,%r1,%r29
1268
1269x6: 	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh1add	%r1,%r29,%r29	! bv,n  0(r31)
1270
1271x7: 	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh2add	%r26,%r29,%r29	! b,n	ret_t0
1272
1273x8: comb,<>	%r25,0,l1	! sh3add	%r26,%r29,%r29	! bv,n  0(r31)	! nop
1274
1275x9: comb,<>	%r25,0,l0	! 	sh3add	%r26,%r26,%r1	! bv    0(r31)	! 	add	%r29,%r1,%r29
1276
1277x10: 	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh1add	%r1,%r29,%r29	! bv,n  0(r31)
1278
1279x11: 	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh3add	%r26,%r29,%r29	! b,n	ret_t0
1280
1281x12: 	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh2add	%r1,%r29,%r29	! bv,n  0(r31)
1282
1283x13: 	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh3add	%r26,%r29,%r29	! b,n	ret_t0
1284
1285x14: 	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1286
1287x15: 	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! 	sh1add	%r1,%r1,%r1	! b,n	ret_t0
1288
1289x16: zdep	%r26,27,28,%r1	! comb,<>	%r25,0,l1	! 	add	%r29,%r1,%r29	! bv,n  0(r31)
1290
1291x17: 	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh3add	%r26,%r1,%r1	! b,n	ret_t0
1292
1293x18: 	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh1add	%r1,%r29,%r29	! bv,n  0(r31)
1294
1295x19: 	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh1add	%r1,%r26,%r1	! b,n	ret_t0
1296
1297x20: 	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh2add	%r1,%r29,%r29	! bv,n  0(r31)
1298
1299x21: 	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh2add	%r1,%r26,%r1	! b,n	ret_t0
1300
1301x22: 	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1302
1303x23: 	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_t0	! sh1add	%r1,%r26,%r1
1304
1305x24: 	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh3add	%r1,%r29,%r29	! bv,n  0(r31)
1306
1307x25: 	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! 	sh2add	%r1,%r1,%r1	! b,n	ret_t0
1308
1309x26: 	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1310
1311x27: 	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! 	sh3add	%r1,%r1,%r1	! b,n	ret_t0
1312
1313x28: 	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1314
1315x29: 	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_t0	! sh2add	%r1,%r26,%r1
1316
1317x30: 	sh2add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1318
1319x31: zdep	%r26,26,27,%r1	! comb,<>	%r25,0,l0	! sub	%r1,%r26,%r1	! b,n	ret_t0
1320
1321x32: zdep	%r26,26,27,%r1	! comb,<>	%r25,0,l1	! 	add	%r29,%r1,%r29	! bv,n  0(r31)
1322
1323x33: 	sh3add	%r26,0,%r1		! comb,<>	%r25,0,l0	! sh2add	%r1,%r26,%r1	! b,n	ret_t0
1324
1325x34: zdep	%r26,27,28,%r1	! add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1326
1327x35: 	sh3add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_t0	! sh3add	%r26,%r1,%r1
1328
1329x36: 	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh2add	%r1,%r29,%r29	! bv,n  0(r31)
1330
1331x37: 	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh2add	%r1,%r26,%r1	! b,n	ret_t0
1332
1333x38: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1334
1335x39: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_t0	! sh1add	%r1,%r26,%r1
1336
1337x40: 	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh3add	%r1,%r29,%r29	! bv,n  0(r31)
1338
1339x41: 	sh2add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! sh3add	%r1,%r26,%r1	! b,n	ret_t0
1340
1341x42: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1342
1343x43: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! sh1add	%r1,%r26,%r1
1344
1345x44: 	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1346
1347x45: 	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! 	sh2add	%r1,%r1,%r1	! b,n	ret_t0
1348
1349x46: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_t0	! add	%r1,%r26,%r1
1350
1351x47: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_t0	! sh1add	%r26,%r1,%r1
1352
1353x48: 	sh1add	%r26,%r26,%r1		! comb,<>	%r25,0,l0	! zdep	%r1,27,28,%r1	! b,n	ret_t0
1354
1355x49: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_t0	! sh2add	%r26,%r1,%r1
1356
1357x50: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1358
1359x51: 	sh3add	%r26,%r26,%r1		! sh3add	%r26,%r1,%r1	! 	b	e_t0	! 	sh1add	%r1,%r1,%r1
1360
1361x52: 	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1362
1363x53: 	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! sh2add	%r1,%r26,%r1
1364
1365x54: 	sh3add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1366
1367x55: 	sh3add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_t0	! sh1add	%r1,%r26,%r1
1368
1369x56: 	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1370
1371x57: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_t0	! 	sh1add	%r1,%r1,%r1
1372
1373x58: 	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_2t0	! sh2add	%r1,%r26,%r1
1374
1375x59: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_t02a0	! 	sh1add	%r1,%r1,%r1
1376
1377x60: 	sh2add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1378
1379x61: 	sh2add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_t0	! sh2add	%r1,%r26,%r1
1380
1381x62: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1382
1383x63: zdep	%r26,25,26,%r1	! comb,<>	%r25,0,l0	! sub	%r1,%r26,%r1	! b,n	ret_t0
1384
1385x64: zdep	%r26,25,26,%r1	! comb,<>	%r25,0,l1	! 	add	%r29,%r1,%r29	! bv,n  0(r31)
1386
1387x65: 	sh3add	%r26,0,%r1		! comb,<>	%r25,0,l0	! sh3add	%r1,%r26,%r1	! b,n	ret_t0
1388
1389x66: zdep	%r26,26,27,%r1	! add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1390
1391x67: 	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! sh1add	%r1,%r26,%r1
1392
1393x68: 	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1394
1395x69: 	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_t0	! sh2add	%r1,%r26,%r1
1396
1397x70: zdep	%r26,25,26,%r1	! sh2add	%r26,%r1,%r1	! 	b	e_t0	! sh1add	%r26,%r1,%r1
1398
1399x71: 	sh3add	%r26,%r26,%r1		! 	sh3add	%r1,0,%r1	! 	b	e_t0	! sub	%r1,%r26,%r1
1400
1401x72: 	sh3add	%r26,%r26,%r1		! comb,<>	%r25,0,l1	! sh3add	%r1,%r29,%r29	! bv,n  0(r31)
1402
1403x73: 	sh3add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! b	e_shift	! 	add	%r29,%r1,%r29
1404
1405x74: 	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1406
1407x75: 	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! sh1add	%r1,%r26,%r1
1408
1409x76: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1410
1411x77: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_t0	! sh2add	%r1,%r26,%r1
1412
1413x78: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_2t0	! sh1add	%r1,%r26,%r1
1414
1415x79: zdep	%r26,27,28,%r1	! 	sh2add	%r1,%r1,%r1	! 	b	e_t0	! sub	%r1,%r26,%r1
1416
1417x80: zdep	%r26,27,28,%r1	! 	sh2add	%r1,%r1,%r1	! b	e_shift	! 	add	%r29,%r1,%r29
1418
1419x81: 	sh3add	%r26,%r26,%r1		! 	sh3add	%r1,%r1,%r1	! b	e_shift	! 	add	%r29,%r1,%r29
1420
1421x82: 	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1422
1423x83: 	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! 	b	e_t0	! sh1add	%r1,%r26,%r1
1424
1425x84: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1426
1427x85: 	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_t0	! 	sh2add	%r1,%r1,%r1
1428
1429x86: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_2t0	! sh1add	%r1,%r26,%r1
1430
1431x87: 	sh3add	%r26,%r26,%r1		! 	sh3add	%r1,%r1,%r1	! b	e_t02a0	! sh2add	%r26,%r1,%r1
1432
1433x88: 	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1434
1435x89: 	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_t0	! sh3add	%r1,%r26,%r1
1436
1437x90: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1438
1439x91: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_t0	! sh1add	%r1,%r26,%r1
1440
1441x92: 	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_4t0	! sh1add	%r1,%r26,%r1
1442
1443x93: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	! 	b	e_t0	! 	sh1add	%r1,%r1,%r1
1444
1445x94: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_2t0	! sh1add	%r26,%r1,%r1
1446
1447x95: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_t0	! 	sh2add	%r1,%r1,%r1
1448
1449x96: 	sh3add	%r26,0,%r1		! 	sh1add	%r1,%r1,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1450
1451x97: 	sh3add	%r26,0,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_t0	! sh2add	%r1,%r26,%r1
1452
1453x98: zdep	%r26,26,27,%r1	! 	sh1add	%r1,%r1,%r1	! 	b	e_t0	! sh1add	%r26,%r1,%r1
1454
1455x99: 	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! 	sh1add	%r1,%r1,%r1
1456
1457x100: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1458
1459x101: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_t0	! sh2add	%r1,%r26,%r1
1460
1461x102: zdep	%r26,26,27,%r1	! sh1add	%r26,%r1,%r1	! 	b	e_t0	! 	sh1add	%r1,%r1,%r1
1462
1463x103: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! b	e_t02a0	! sh2add	%r1,%r26,%r1
1464
1465x104: 	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1466
1467x105: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! 	sh2add	%r1,%r1,%r1
1468
1469x106: 	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_2t0	! sh2add	%r1,%r26,%r1
1470
1471x107: 	sh3add	%r26,%r26,%r1		! sh2add	%r26,%r1,%r1	! b	e_t02a0	! sh3add	%r1,%r26,%r1
1472
1473x108: 	sh3add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1474
1475x109: 	sh3add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_t0	! sh2add	%r1,%r26,%r1
1476
1477x110: 	sh3add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_2t0	! sh1add	%r1,%r26,%r1
1478
1479x111: 	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! 	sh1add	%r1,%r1,%r1
1480
1481x112: 	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_t0	! zdep	%r1,27,28,%r1
1482
1483x113: 	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_t02a0	! 	sh1add	%r1,%r1,%r1
1484
1485x114: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_2t0	! 	sh1add	%r1,%r1,%r1
1486
1487x115: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_2t0a0	! 	sh1add	%r1,%r1,%r1
1488
1489x116: 	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_4t0	! sh2add	%r1,%r26,%r1
1490
1491x117: 	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! 	sh3add	%r1,%r1,%r1
1492
1493x118: 	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_t0a0	! 	sh3add	%r1,%r1,%r1
1494
1495x119: 	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_t02a0	! 	sh3add	%r1,%r1,%r1
1496
1497x120: 	sh2add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1498
1499x121: 	sh2add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_t0	! sh3add	%r1,%r26,%r1
1500
1501x122: 	sh2add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_2t0	! sh2add	%r1,%r26,%r1
1502
1503x123: 	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! 	b	e_t0	! 	sh1add	%r1,%r1,%r1
1504
1505x124: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1506
1507x125: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_t0	! 	sh2add	%r1,%r1,%r1
1508
1509x126: zdep	%r26,25,26,%r1	! sub	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1510
1511x127: zdep	%r26,24,25,%r1	! comb,<>	%r25,0,l0	! sub	%r1,%r26,%r1	! b,n	ret_t0
1512
1513x128: zdep	%r26,24,25,%r1	! comb,<>	%r25,0,l1	! 	add	%r29,%r1,%r29	! bv,n  0(r31)
1514
1515x129: zdep	%r26,24,25,%r1	! comb,<>	%r25,0,l0	! add	%r1,%r26,%r1	! b,n	ret_t0
1516
1517x130: zdep	%r26,25,26,%r1	! add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1518
1519x131: 	sh3add	%r26,0,%r1		! sh3add	%r1,%r26,%r1	! 	b	e_t0	! sh1add	%r1,%r26,%r1
1520
1521x132: 	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1522
1523x133: 	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! sh2add	%r1,%r26,%r1
1524
1525x134: 	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_2t0	! sh1add	%r1,%r26,%r1
1526
1527x135: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_t0	! 	sh1add	%r1,%r1,%r1
1528
1529x136: 	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1530
1531x137: 	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_t0	! sh3add	%r1,%r26,%r1
1532
1533x138: 	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_2t0	! sh2add	%r1,%r26,%r1
1534
1535x139: 	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! b	e_2t0a0	! sh2add	%r1,%r26,%r1
1536
1537x140: 	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_4t0	! 	sh2add	%r1,%r1,%r1
1538
1539x141: 	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! b	e_4t0a0	! sh1add	%r1,%r26,%r1
1540
1541x142: 	sh3add	%r26,%r26,%r1		! 	sh3add	%r1,0,%r1	! 	b	e_2t0	! sub	%r1,%r26,%r1
1542
1543x143: zdep	%r26,27,28,%r1	! 	sh3add	%r1,%r1,%r1	! 	b	e_t0	! sub	%r1,%r26,%r1
1544
1545x144: 	sh3add	%r26,%r26,%r1		! 	sh3add	%r1,0,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1546
1547x145: 	sh3add	%r26,%r26,%r1		! 	sh3add	%r1,0,%r1	! 	b	e_t0	! sh1add	%r1,%r26,%r1
1548
1549x146: 	sh3add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1550
1551x147: 	sh3add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! 	b	e_t0	! sh1add	%r1,%r26,%r1
1552
1553x148: 	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1554
1555x149: 	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! sh2add	%r1,%r26,%r1
1556
1557x150: 	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_2t0	! sh1add	%r1,%r26,%r1
1558
1559x151: 	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_2t0a0	! sh1add	%r1,%r26,%r1
1560
1561x152: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1562
1563x153: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_t0	! sh3add	%r1,%r26,%r1
1564
1565x154: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_2t0	! sh2add	%r1,%r26,%r1
1566
1567x155: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	! 	b	e_t0	! 	sh2add	%r1,%r1,%r1
1568
1569x156: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_4t0	! sh1add	%r1,%r26,%r1
1570
1571x157: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	! b	e_t02a0	! 	sh2add	%r1,%r1,%r1
1572
1573x158: zdep	%r26,27,28,%r1	! 	sh2add	%r1,%r1,%r1	! 	b	e_2t0	! sub	%r1,%r26,%r1
1574
1575x159: zdep	%r26,26,27,%r1	! 	sh2add	%r1,%r1,%r1	! 	b	e_t0	! sub	%r1,%r26,%r1
1576
1577x160: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,0,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1578
1579x161: 	sh3add	%r26,0,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_t0	! sh2add	%r1,%r26,%r1
1580
1581x162: 	sh3add	%r26,%r26,%r1		! 	sh3add	%r1,%r1,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1582
1583x163: 	sh3add	%r26,%r26,%r1		! 	sh3add	%r1,%r1,%r1	! 	b	e_t0	! sh1add	%r1,%r26,%r1
1584
1585x164: 	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1586
1587x165: 	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! 	sh2add	%r1,%r1,%r1
1588
1589x166: 	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! 	b	e_2t0	! sh1add	%r1,%r26,%r1
1590
1591x167: 	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! b	e_2t0a0	! sh1add	%r1,%r26,%r1
1592
1593x168: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1594
1595x169: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! sh3add	%r1,%r26,%r1
1596
1597x170: zdep	%r26,26,27,%r1	! sh1add	%r26,%r1,%r1	! 	b	e_t0	! 	sh2add	%r1,%r1,%r1
1598
1599x171: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_t0	! 	sh3add	%r1,%r1,%r1
1600
1601x172: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_4t0	! sh1add	%r1,%r26,%r1
1602
1603x173: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_t02a0	! 	sh3add	%r1,%r1,%r1
1604
1605x174: zdep	%r26,26,27,%r1	! sh1add	%r26,%r1,%r1	! b	e_t04a0	! 	sh2add	%r1,%r1,%r1
1606
1607x175: 	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_5t0	! sh1add	%r1,%r26,%r1
1608
1609x176: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_8t0	! add	%r1,%r26,%r1
1610
1611x177: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_8t0a0	! add	%r1,%r26,%r1
1612
1613x178: 	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_2t0	! sh3add	%r1,%r26,%r1
1614
1615x179: 	sh2add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_2t0a0	! sh3add	%r1,%r26,%r1
1616
1617x180: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1618
1619x181: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_t0	! sh2add	%r1,%r26,%r1
1620
1621x182: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_2t0	! sh1add	%r1,%r26,%r1
1622
1623x183: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! b	e_2t0a0	! sh1add	%r1,%r26,%r1
1624
1625x184: 	sh2add	%r26,%r26,%r1		! 	sh3add	%r1,%r1,%r1	! 	b	e_4t0	! add	%r1,%r26,%r1
1626
1627x185: 	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! 	sh2add	%r1,%r1,%r1
1628
1629x186: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	! 	b	e_2t0	! 	sh1add	%r1,%r1,%r1
1630
1631x187: 	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_t02a0	! 	sh2add	%r1,%r1,%r1
1632
1633x188: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_4t0	! sh1add	%r26,%r1,%r1
1634
1635x189: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_t0	! 	sh3add	%r1,%r1,%r1
1636
1637x190: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_2t0	! 	sh2add	%r1,%r1,%r1
1638
1639x191: zdep	%r26,25,26,%r1	! 	sh1add	%r1,%r1,%r1	! 	b	e_t0	! sub	%r1,%r26,%r1
1640
1641x192: 	sh3add	%r26,0,%r1		! 	sh1add	%r1,%r1,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1642
1643x193: 	sh3add	%r26,0,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_t0	! sh3add	%r1,%r26,%r1
1644
1645x194: 	sh3add	%r26,0,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_2t0	! sh2add	%r1,%r26,%r1
1646
1647x195: 	sh3add	%r26,0,%r1		! sh3add	%r1,%r26,%r1	! 	b	e_t0	! 	sh1add	%r1,%r1,%r1
1648
1649x196: 	sh3add	%r26,0,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_4t0	! sh1add	%r1,%r26,%r1
1650
1651x197: 	sh3add	%r26,0,%r1		! 	sh1add	%r1,%r1,%r1	! b	e_4t0a0	! sh1add	%r1,%r26,%r1
1652
1653x198: zdep	%r26,25,26,%r1	! sh1add	%r26,%r1,%r1	! 	b	e_t0	! 	sh1add	%r1,%r1,%r1
1654
1655x199: 	sh3add	%r26,0,%r1		! sh2add	%r1,%r26,%r1	! b	e_2t0a0	! 	sh1add	%r1,%r1,%r1
1656
1657x200: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1658
1659x201: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_t0	! sh3add	%r1,%r26,%r1
1660
1661x202: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_2t0	! sh2add	%r1,%r26,%r1
1662
1663x203: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! b	e_2t0a0	! sh2add	%r1,%r26,%r1
1664
1665x204: 	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_4t0	! 	sh1add	%r1,%r1,%r1
1666
1667x205: 	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! 	b	e_t0	! 	sh2add	%r1,%r1,%r1
1668
1669x206: zdep	%r26,25,26,%r1	! sh2add	%r26,%r1,%r1	! b	e_t02a0	! 	sh1add	%r1,%r1,%r1
1670
1671x207: 	sh3add	%r26,0,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_3t0	! sh2add	%r1,%r26,%r1
1672
1673x208: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_8t0	! add	%r1,%r26,%r1
1674
1675x209: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! b	e_8t0a0	! add	%r1,%r26,%r1
1676
1677x210: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_2t0	! 	sh2add	%r1,%r1,%r1
1678
1679x211: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_2t0a0	! 	sh2add	%r1,%r1,%r1
1680
1681x212: 	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_4t0	! sh2add	%r1,%r26,%r1
1682
1683x213: 	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_4t0a0	! sh2add	%r1,%r26,%r1
1684
1685x214: 	sh3add	%r26,%r26,%r1		! sh2add	%r26,%r1,%r1	! b	e2t04a0	! sh3add	%r1,%r26,%r1
1686
1687x215: 	sh2add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_5t0	! sh1add	%r1,%r26,%r1
1688
1689x216: 	sh3add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1690
1691x217: 	sh3add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_t0	! sh3add	%r1,%r26,%r1
1692
1693x218: 	sh3add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_2t0	! sh2add	%r1,%r26,%r1
1694
1695x219: 	sh3add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! 	b	e_t0	! 	sh1add	%r1,%r1,%r1
1696
1697x220: 	sh1add	%r26,%r26,%r1		! 	sh3add	%r1,%r1,%r1	! 	b	e_4t0	! sh1add	%r1,%r26,%r1
1698
1699x221: 	sh1add	%r26,%r26,%r1		! 	sh3add	%r1,%r1,%r1	! b	e_4t0a0	! sh1add	%r1,%r26,%r1
1700
1701x222: 	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_2t0	! 	sh1add	%r1,%r1,%r1
1702
1703x223: 	sh3add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_2t0a0	! 	sh1add	%r1,%r1,%r1
1704
1705x224: 	sh3add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_8t0	! add	%r1,%r26,%r1
1706
1707x225: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_t0	! 	sh2add	%r1,%r1,%r1
1708
1709x226: 	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_t02a0	! zdep	%r1,26,27,%r1
1710
1711x227: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! b	e_t02a0	! 	sh2add	%r1,%r1,%r1
1712
1713x228: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_4t0	! 	sh1add	%r1,%r1,%r1
1714
1715x229: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_4t0a0	! 	sh1add	%r1,%r1,%r1
1716
1717x230: 	sh3add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_5t0	! add	%r1,%r26,%r1
1718
1719x231: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_3t0	! sh2add	%r1,%r26,%r1
1720
1721x232: 	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! 	b	e_8t0	! sh2add	%r1,%r26,%r1
1722
1723x233: 	sh1add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e_8t0a0	! sh2add	%r1,%r26,%r1
1724
1725x234: 	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! 	b	e_2t0	! 	sh3add	%r1,%r1,%r1
1726
1727x235: 	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e_2t0a0	! 	sh3add	%r1,%r1,%r1
1728
1729x236: 	sh3add	%r26,%r26,%r1		! sh1add	%r1,%r26,%r1	! b	e4t08a0	! 	sh1add	%r1,%r1,%r1
1730
1731x237: zdep	%r26,27,28,%r1	! 	sh2add	%r1,%r1,%r1	! 	b	e_3t0	! sub	%r1,%r26,%r1
1732
1733x238: 	sh1add	%r26,%r26,%r1		! sh2add	%r1,%r26,%r1	! b	e2t04a0	! 	sh3add	%r1,%r1,%r1
1734
1735x239: zdep	%r26,27,28,%r1	! 	sh2add	%r1,%r1,%r1	! b	e_t0ma0	! 	sh1add	%r1,%r1,%r1
1736
1737x240: 	sh3add	%r26,%r26,%r1		! add	%r1,%r26,%r1	! 	b	e_8t0	! 	sh1add	%r1,%r1,%r1
1738
1739x241: 	sh3add	%r26,%r26,%r1		! add	%r1,%r26,%r1	! b	e_8t0a0	! 	sh1add	%r1,%r1,%r1
1740
1741x242: 	sh2add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_2t0	! sh3add	%r1,%r26,%r1
1742
1743x243: 	sh3add	%r26,%r26,%r1		! 	sh3add	%r1,%r1,%r1	! 	b	e_t0	! 	sh1add	%r1,%r1,%r1
1744
1745x244: 	sh2add	%r26,%r26,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_4t0	! sh2add	%r1,%r26,%r1
1746
1747x245: 	sh3add	%r26,0,%r1		! 	sh1add	%r1,%r1,%r1	! 	b	e_5t0	! sh1add	%r1,%r26,%r1
1748
1749x246: 	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! 	b	e_2t0	! 	sh1add	%r1,%r1,%r1
1750
1751x247: 	sh2add	%r26,%r26,%r1		! sh3add	%r1,%r26,%r1	! b	e_2t0a0	! 	sh1add	%r1,%r1,%r1
1752
1753x248: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	! b	e_shift	! sh3add	%r1,%r29,%r29
1754
1755x249: zdep	%r26,26,27,%r1	! sub	%r1,%r26,%r1	! 	b	e_t0	! sh3add	%r1,%r26,%r1
1756
1757x250: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! 	b	e_2t0	! 	sh2add	%r1,%r1,%r1
1758
1759x251: 	sh2add	%r26,%r26,%r1		! 	sh2add	%r1,%r1,%r1	! b	e_2t0a0	! 	sh2add	%r1,%r1,%r1
1760
1761x252: zdep	%r26,25,26,%r1	! sub	%r1,%r26,%r1	! b	e_shift	! sh2add	%r1,%r29,%r29
1762
1763x253: zdep	%r26,25,26,%r1	! sub	%r1,%r26,%r1	! 	b	e_t0	! sh2add	%r1,%r26,%r1
1764
1765x254: zdep	%r26,24,25,%r1	! sub	%r1,%r26,%r1	! b	e_shift	! sh1add	%r1,%r29,%r29
1766
1767x255: zdep	%r26,23,24,%r1	! comb,<>	%r25,0,l0	! sub	%r1,%r26,%r1	! b,n	ret_t0
1768
1769;1040 insts before this.
1770ret_t0: bv    0(r31)
1771
1772e_t0: 	add	%r29,%r1,%r29
1773
1774e_shift: comb,<>	%r25,0,l2
1775
1776	zdep	%r26,23,24,%r26	; %r26 <<= 8 ***********
1777	bv,n  0(r31)
1778e_t0ma0: comb,<>	%r25,0,l0
1779
1780	sub	%r1,%r26,%r1
1781	bv    0(r31)
1782		add	%r29,%r1,%r29
1783e_t0a0: comb,<>	%r25,0,l0
1784
1785	add	%r1,%r26,%r1
1786	bv    0(r31)
1787		add	%r29,%r1,%r29
1788e_t02a0: comb,<>	%r25,0,l0
1789
1790	sh1add	%r26,%r1,%r1
1791	bv    0(r31)
1792		add	%r29,%r1,%r29
1793e_t04a0: comb,<>	%r25,0,l0
1794
1795	sh2add	%r26,%r1,%r1
1796	bv    0(r31)
1797		add	%r29,%r1,%r29
1798e_2t0: comb,<>	%r25,0,l1
1799
1800	sh1add	%r1,%r29,%r29
1801	bv,n  0(r31)
1802e_2t0a0: comb,<>	%r25,0,l0
1803
1804	sh1add	%r1,%r26,%r1
1805	bv    0(r31)
1806		add	%r29,%r1,%r29
1807e2t04a0: sh1add	%r26,%r1,%r1
1808
1809	comb,<>	%r25,0,l1
1810	sh1add	%r1,%r29,%r29
1811	bv,n  0(r31)
1812e_3t0: comb,<>	%r25,0,l0
1813
1814		sh1add	%r1,%r1,%r1
1815	bv    0(r31)
1816		add	%r29,%r1,%r29
1817e_4t0: comb,<>	%r25,0,l1
1818
1819	sh2add	%r1,%r29,%r29
1820	bv,n  0(r31)
1821e_4t0a0: comb,<>	%r25,0,l0
1822
1823	sh2add	%r1,%r26,%r1
1824	bv    0(r31)
1825		add	%r29,%r1,%r29
1826e4t08a0: sh1add	%r26,%r1,%r1
1827
1828	comb,<>	%r25,0,l1
1829	sh2add	%r1,%r29,%r29
1830	bv,n  0(r31)
1831e_5t0: comb,<>	%r25,0,l0
1832
1833		sh2add	%r1,%r1,%r1
1834	bv    0(r31)
1835		add	%r29,%r1,%r29
1836e_8t0: comb,<>	%r25,0,l1
1837
1838	sh3add	%r1,%r29,%r29
1839	bv,n  0(r31)
1840e_8t0a0: comb,<>	%r25,0,l0
1841
1842	sh3add	%r1,%r26,%r1
1843	bv    0(r31)
1844		add	%r29,%r1,%r29
1845
1846	.exit
1847	.procend
1848	.end
1849
1850	.import $$divI_2,millicode
1851	.import $$divI_3,millicode
1852	.import $$divI_4,millicode
1853	.import $$divI_5,millicode
1854	.import $$divI_6,millicode
1855	.import $$divI_7,millicode
1856	.import $$divI_8,millicode
1857	.import $$divI_9,millicode
1858	.import $$divI_10,millicode
1859	.import $$divI_12,millicode
1860	.import $$divI_14,millicode
1861	.import $$divI_15,millicode
1862	.export $$divI,millicode
1863	.export	$$divoI,millicode
1864$$divoI:
1865	.proc
1866	.callinfo millicode
1867	.entry
1868	comib,=,n  -1,arg1,negative1	; when divisor == -1
1869$$divI:
1870	comib,>>=,n 15,arg1,small_divisor
1871	add,>=	0,arg0,retreg		; move dividend, if retreg < 0,
1872normal1:
1873	  sub	0,retreg,retreg		;   make it positive
1874	sub	0,arg1,temp		; clear carry,
1875					;   negate the divisor
1876	ds	0,temp,0		; set V-bit to the comple-
1877					;   ment of the divisor sign
1878	add	retreg,retreg,retreg	; shift msb bit into carry
1879	ds	r0,arg1,temp		; 1st divide step, if no carry
1880	addc	retreg,retreg,retreg	; shift retreg with/into carry
1881	ds	temp,arg1,temp		; 2nd divide step
1882	addc	retreg,retreg,retreg	; shift retreg with/into carry
1883	ds	temp,arg1,temp		; 3rd divide step
1884	addc	retreg,retreg,retreg	; shift retreg with/into carry
1885	ds	temp,arg1,temp		; 4th divide step
1886	addc	retreg,retreg,retreg	; shift retreg with/into carry
1887	ds	temp,arg1,temp		; 5th divide step
1888	addc	retreg,retreg,retreg	; shift retreg with/into carry
1889	ds	temp,arg1,temp		; 6th divide step
1890	addc	retreg,retreg,retreg	; shift retreg with/into carry
1891	ds	temp,arg1,temp		; 7th divide step
1892	addc	retreg,retreg,retreg	; shift retreg with/into carry
1893	ds	temp,arg1,temp		; 8th divide step
1894	addc	retreg,retreg,retreg	; shift retreg with/into carry
1895	ds	temp,arg1,temp		; 9th divide step
1896	addc	retreg,retreg,retreg	; shift retreg with/into carry
1897	ds	temp,arg1,temp		; 10th divide step
1898	addc	retreg,retreg,retreg	; shift retreg with/into carry
1899	ds	temp,arg1,temp		; 11th divide step
1900	addc	retreg,retreg,retreg	; shift retreg with/into carry
1901	ds	temp,arg1,temp		; 12th divide step
1902	addc	retreg,retreg,retreg	; shift retreg with/into carry
1903	ds	temp,arg1,temp		; 13th divide step
1904	addc	retreg,retreg,retreg	; shift retreg with/into carry
1905	ds	temp,arg1,temp		; 14th divide step
1906	addc	retreg,retreg,retreg	; shift retreg with/into carry
1907	ds	temp,arg1,temp		; 15th divide step
1908	addc	retreg,retreg,retreg	; shift retreg with/into carry
1909	ds	temp,arg1,temp		; 16th divide step
1910	addc	retreg,retreg,retreg	; shift retreg with/into carry
1911	ds	temp,arg1,temp		; 17th divide step
1912	addc	retreg,retreg,retreg	; shift retreg with/into carry
1913	ds	temp,arg1,temp		; 18th divide step
1914	addc	retreg,retreg,retreg	; shift retreg with/into carry
1915	ds	temp,arg1,temp		; 19th divide step
1916	addc	retreg,retreg,retreg	; shift retreg with/into carry
1917	ds	temp,arg1,temp		; 20th divide step
1918	addc	retreg,retreg,retreg	; shift retreg with/into carry
1919	ds	temp,arg1,temp		; 21st divide step
1920	addc	retreg,retreg,retreg	; shift retreg with/into carry
1921	ds	temp,arg1,temp		; 22nd divide step
1922	addc	retreg,retreg,retreg	; shift retreg with/into carry
1923	ds	temp,arg1,temp		; 23rd divide step
1924	addc	retreg,retreg,retreg	; shift retreg with/into carry
1925	ds	temp,arg1,temp		; 24th divide step
1926	addc	retreg,retreg,retreg	; shift retreg with/into carry
1927	ds	temp,arg1,temp		; 25th divide step
1928	addc	retreg,retreg,retreg	; shift retreg with/into carry
1929	ds	temp,arg1,temp		; 26th divide step
1930	addc	retreg,retreg,retreg	; shift retreg with/into carry
1931	ds	temp,arg1,temp		; 27th divide step
1932	addc	retreg,retreg,retreg	; shift retreg with/into carry
1933	ds	temp,arg1,temp		; 28th divide step
1934	addc	retreg,retreg,retreg	; shift retreg with/into carry
1935	ds	temp,arg1,temp		; 29th divide step
1936	addc	retreg,retreg,retreg	; shift retreg with/into carry
1937	ds	temp,arg1,temp		; 30th divide step
1938	addc	retreg,retreg,retreg	; shift retreg with/into carry
1939	ds	temp,arg1,temp		; 31st divide step
1940	addc	retreg,retreg,retreg	; shift retreg with/into carry
1941	ds	temp,arg1,temp		; 32nd divide step,
1942	addc	retreg,retreg,retreg	; shift last retreg bit into retreg
1943	xor,>=	arg0,arg1,0		; get correct sign of quotient
1944	  sub	0,retreg,retreg		;   based on operand signs
1945	bv,n  0(r31)
1946	nop
1947;______________________________________________________________________
1948small_divisor:
1949	blr,n	arg1,r0
1950	nop
1951; table for divisor == 0,1, ... ,15
1952	addit,=	0,arg1,r0	; trap if divisor == 0
1953	nop
1954	bv    0(r31)		; divisor == 1
1955	copy	arg0,retreg
1956	 b,n   $$divI_2	; divisor == 2
1957	nop
1958	 b,n   $$divI_3	; divisor == 3
1959	nop
1960	 b,n   $$divI_4	; divisor == 4
1961	nop
1962	 b,n   $$divI_5	; divisor == 5
1963	nop
1964	 b,n   $$divI_6	; divisor == 6
1965	nop
1966	 b,n   $$divI_7	; divisor == 7
1967	nop
1968	 b,n   $$divI_8	; divisor == 8
1969	nop
1970	 b,n   $$divI_9	; divisor == 9
1971	nop
1972	 b,n   $$divI_10	; divisor == 10
1973	nop
1974	b	normal1		; divisor == 11
1975	add,>=	0,arg0,retreg
1976	 b,n   $$divI_12	; divisor == 12
1977	nop
1978	b	normal1		; divisor == 13
1979	add,>=	0,arg0,retreg
1980	 b,n   $$divI_14	; divisor == 14
1981	nop
1982	 b,n   $$divI_15	; divisor == 15
1983	nop
1984;______________________________________________________________________
1985negative1:
1986	sub	0,arg0,retreg	; result is negation of dividend
1987	bv    0(r31)
1988	addo	arg0,arg1,r0	; trap iff dividend==0x80000000 && divisor==-1
1989	.exit
1990	.procend
1991
1992	.subspa $LIT$
1993___hp_free_copyright:
1994	.export ___hp_free_copyright,data
1995	.align 4
1996	.string "(c) Copyright 1986 HEWLETT-PACKARD COMPANY\x0aTo anyone who acknowledges that this file is provided \"AS IS\"\x0awithout any express or implied warranty:\x0a    permission to use, copy, modify, and distribute this file\x0afor any purpose is hereby granted without fee, provided that\x0athe above copyright notice and this notice appears in all\x0acopies, and that the name of Hewlett-Packard Company not be\x0aused in advertising or publicity pertaining to distribution\x0aof the software without specific, written prior permission.\x0aHewlett-Packard Company makes no representations about the\x0asuitability of this software for any purpose.\x0a\x00"
1997	.align 4
1998	.end
1999