xref: /original-bsd/sys/vax/vax/locore.s (revision 4e9331e4)
1/*
2 * Copyright (c) 1980, 1986 Regents of the University of California.
3 * All rights reserved.  The Berkeley software License Agreement
4 * specifies the terms and conditions for redistribution.
5 *
6 *	@(#)locore.s	7.21 (Berkeley) 11/22/89
7 */
8
9#include "psl.h"
10#include "pte.h"
11
12#include "errno.h"
13#include "syscall.h"
14#include "cmap.h"
15
16#include "mtpr.h"
17#include "trap.h"
18#include "cpu.h"
19#include "nexus.h"
20#include "cons.h"
21#include "clock.h"
22#include "ioa.h"
23#include "ka630.h"
24#include "ka650.h"
25#include "ka820.h"
26#include "../vaxuba/ubareg.h"
27
28#include "dz.h"
29#include "uu.h"
30#include "ps.h"
31#include "mba.h"
32#include "uba.h"
33
34	.set	HIGH,0x1f	# mask for total disable
35	.set	MCKVEC,4	# offset into scb of machine check vector
36	.set	NBPG,512
37	.set	PGSHIFT,9
38	.set	SYSTEM,0x80000000	# virtual address of system start
39
40	.set	NISP,3		# number of interrupt stack pages
41
42/*
43 * User structure is UPAGES at top of user space.
44 */
45	.globl	_u
46	.set	_u,0x80000000 - UPAGES*NBPG
47
48	.globl	_intstack
49_intstack:
50	.space	NISP*NBPG
51eintstack:
52
53/*
54 * Do a dump.
55 * Called by auto-restart.
56 * May be called manually.
57 */
58	.align	2
59	.globl	_doadump
60	.globl	_msgbufmapped
61_doadump:
62	nop; nop				# .word 0x0101
63#define	_rpbmap	_Sysmap				# rpb, scb, UNIvec[], istack*4
64	bicl2	$PG_PROT,_rpbmap
65	bisl2	$PG_KW,_rpbmap
66	mtpr	$0,$TBIA
67	tstl	_rpb+RP_FLAG			# dump only once!
68	bneq	1f
69	incl	_rpb+RP_FLAG
70	movl	sp,erpb
71	movab	erpb,sp
72	mfpr	$PCBB,-(sp)
73	mfpr	$MAPEN,-(sp)
74	mfpr	$IPL,-(sp)
75	clrl	_msgbufmapped
76	mtpr	$0,$MAPEN
77	mtpr	$HIGH,$IPL
78	pushr	$0x3fff
79	calls	$0,_dumpsys
801:
81	clrl	r11				# boot flags
82	calls	$0,_vaxboot
83	halt
84
85/*
86 * Interrupt vector routines
87 */
88	.globl	_waittime
89
90#define	SCBVEC(name)	.align 2; .globl _X/**/name; _X/**/name
91#define	PANIC(msg)	clrl _waittime; pushab 1f; \
92			calls $1,_panic; 1: .asciz msg
93#define	PRINTF(n,msg)	pushab 1f; calls $n+1,_printf; MSG(msg)
94#define	MSG(msg)	.data; 1: .asciz msg; .text
95#define	PUSHR		pushr $0x3f
96#define	POPR		popr $0x3f
97
98	.data
99nofault: .long	0	# where to go on predicted machcheck
100	.text
101SCBVEC(machcheck):
102	tstl	nofault
103	bneq	1f
104	PUSHR; pushab 6*4(sp); calls $1,_machinecheck; POPR;
105	addl2 (sp)+,sp; rei
106	.align	2
1071:
108	casel	_cpu,$1,$VAX_MAX
1090:
110	.word	8f-0b		# 1 is 780
111	.word	5f-0b		# 2 is 750
112	.word	5f-0b		# 3 is 730
113	.word	7f-0b		# 4 is 8600
114	.word	5f-0b		# 5 is 8200
115	.word	1f-0b		# 6 is 8800 (unsupported)
116	.word	1f-0b		# 7 is 610  (unsupported)
117	.word	1f-0b		# 8 is 630
118	.word	1f-0b		# 9 is ???
119	.word	9f-0b		# 10 is 650
1205:
121#if defined(VAX8200) || defined(VAX750) || defined(VAX730)
122	mtpr	$0xf,$MCESR
123#endif
124	brb	1f
1257:
126#if VAX8600
127	mtpr	$0,$EHSR
128#endif
129	brb	1f
1308:
131#if VAX780
132	mtpr	$0,$SBIFS
133#endif
134	brb	1f
1359:
136#if VAX650
137	bitl	$PG_V,_KA650MERRmap
138	beql	1f	# don't bother clearing err reg if not mapped in
139	movl	$DSER_CLEAR,_ka650merr+4
140#endif
1411:
142	addl2	(sp)+,sp		# discard mchchk trash
143	movl	nofault,(sp)
144	rei
145
146SCBVEC(kspnotval):
147	PANIC("KSP not valid");
148SCBVEC(powfail):
149	halt
150SCBVEC(chme): SCBVEC(chms): SCBVEC(chmu):
151	PANIC("CHM? in kernel");
152
153SCBVEC(nex0zvec):
154	PUSHR
155	clrl	r0
156	brb	1f
157SCBVEC(nex1zvec):
158	PUSHR
159	movl	$1,r0
1601:
161	cmpl	_cpu,$VAX_8600		# this is a frill
162	beql	2f
163	mfpr	$IPL,-(sp)
164	PRINTF(1, "nexus stray intr ipl%x\n")
165	POPR
166	rei
1672:
168	pushl	r0
169	mfpr	$IPL,-(sp)
170	PRINTF(2, "nexus stray intr ipl%x sbia%d\n")
171	POPR
172	rei
173
174SCBVEC(cmrd):
175	PUSHR; calls $0,_memerr; POPR; rei
176
177SCBVEC(wtime):			/* sbi0err on 8600 */
178#if VAX8600
179	cmpl	_cpu,$VAX_8600
180	bneq	wtimo
181	PANIC("sbia0 error")
182wtimo:
183#endif
184	PUSHR; pushl 6*4(sp); PRINTF(1, "write timeout %x\n"); POPR
185	PANIC("wtimo")
186
187SCBVEC(sbi0fail):
188	PANIC("sbia0 fail")
189SCBVEC(sbi0alert):
190#if VAX8200
191	cmpl	_cpu,$VAX_8200
192	bneq	alert
193	PUSHR; calls $0,_rxcdintr; POPR; rei
194alert:
195#endif
196	PANIC("sbia0 alert")
197SCBVEC(sbi0fault):
198	PANIC("sbia0 fault")
199
200#ifdef notyet
201#if VAX8600
202SCBVEC(sbi1fail):
203	PANIC("sbia1 fail")
204SCBVEC(sbi1alert):
205	PANIC("sbia1 alert")
206SCBVEC(sbi1fault):
207	PANIC("sbia1 fault")
208SCBVEC(sbi1err):
209	PANIC("sbia1 error")
210#endif
211#endif
212
213/*
214 * BI 0 bus error (8200), or SBI silo compare error (others)
215 * VMS boot leaves us 1 BI error to ignore.
216 */
217#if VAX8200 && 0
218	.data
219	.align	2
220_ignorebi: .globl _ignorebi
221	.long	1
222	.text
223#endif VAX8200
224
225SCBVEC(sbisilo):
226#if VAX8200
227	cmpl	_cpu,$VAX_8200
228	bneq	sbisilo
229#if 0
230	blbs	_ignorebi,1f
231#else
232	blbs	_cold,1f
233#endif
234	PUSHR; pushl $0; calls $1,_bi_buserr; POPR
2351:
236	rei
237#endif
238sbisilo:
239	PANIC("sbi silo compare error")
240
241/*
242 * SCB stray interrupt catcher.  Compute and print the actual
243 * SCB vector (for fault diagnosis).
244 */
245	.align	2
246_scbstray: .globl _scbstray
247#define	PJ	PUSHR;jsb 1f
248	/* 128 of 'em */
249	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
250	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
251	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
252	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
253	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
254	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
255	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
256	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
257#if VAX8600
258	/* and another 128, for the second SBIA's scb */
259	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
260	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
261	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
262	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
263	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
264	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
265	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
266	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
267#endif
268#undef PJ
2691:
270	subl3	$_scbstray+8,(sp)+,r0
271	mfpr	$IPL,-(sp)
272	ashl	$-1,r0,-(sp)
273/* call a C handler instead? perhaps later */
274	PRINTF(2, "stray scb intr vec 0x%x ipl%x\n")
275	POPR
276	rei
277
278#if NMBA > 0
279SCBVEC(mba3int):
280	PUSHR; incl _intrcnt+I_MBA3; pushl $3; brb 1f
281SCBVEC(mba2int):
282	PUSHR; incl _intrcnt+I_MBA2; pushl $2; brb 1f
283SCBVEC(mba1int):
284	PUSHR; incl _intrcnt+I_MBA1; pushl $1; brb 1f
285SCBVEC(mba0int):
286	PUSHR; incl _intrcnt+I_MBA0; pushl $0
2871:	calls $1,_mbintr
288	POPR
289	incl	_cnt+V_INTR
290	rei
291#endif
292
293#ifdef DW780
294/*
295 * Registers for the uba handling code
296 */
297#define	rUBANUM	r0
298#define	rUBAHD	r1
299#define	rUVEC	r3
300#define	rUBA	r4
301/* r2,r5 are scratch */
302
303#define	I_UBA	I_UBA0		/* base of UBA interrupt counters */
304
305#if NUBA > 4
306SCBVEC(ua7int):
307	PUSHR; movl $7,rUBANUM; moval _uba_hd+(7*UH_SIZE),rUBAHD; brb 1f
308SCBVEC(ua6int):
309	PUSHR; movl $6,rUBANUM; moval _uba_hd+(6*UH_SIZE),rUBAHD; brb 1f
310SCBVEC(ua5int):
311	PUSHR; movl $5,rUBANUM; moval _uba_hd+(5*UH_SIZE),rUBAHD; brb 1f
312SCBVEC(ua4int):
313	PUSHR; movl $4,rUBANUM; moval _uba_hd+(4*UH_SIZE),rUBAHD; brb 1f
314#endif
315SCBVEC(ua3int):
316	PUSHR; movl $3,rUBANUM; moval _uba_hd+(3*UH_SIZE),rUBAHD; brb 1f
317SCBVEC(ua2int):
318	PUSHR; movl $2,rUBANUM; moval _uba_hd+(2*UH_SIZE),rUBAHD; brb 1f
319SCBVEC(ua1int):
320	PUSHR; movl $1,rUBANUM; moval _uba_hd+(1*UH_SIZE),rUBAHD; brb 1f
321SCBVEC(ua0int):
322	PUSHR; movl $0,rUBANUM; moval _uba_hd+(0*UH_SIZE),rUBAHD;
3231:
324	mfpr	$IPL,r2				/* r2 = mfpr(IPL); */
325	movl	UH_UBA(rUBAHD),rUBA		/* uba = uhp->uh_uba; */
326	movl	UBA_BRRVR-0x14*4(rUBA)[r2],rUVEC
327					/* uvec = uba->uba_brrvr[r2-0x14] */
328ubanorm:
329	bleq	ubaerror
330	addl2	UH_VEC(rUBAHD),rUVEC		/* uvec += uh->uh_vec */
331	bicl3	$3,(rUVEC),r1
332	jmp	2(r1)				/* 2 skips ``pushr $0x3f'' */
333ubaerror:
334	PUSHR; calls $0,_ubaerror; POPR		/* ubaerror r/w's r0-r5 */
335	tstl rUVEC; jneq ubanorm		/* rUVEC contains result */
336	incl _intrcnt+I_UBA[rUBANUM]
337	incl	_cnt+V_INTR
338	POPR
339	rei
340#endif
341SCBVEC(cnrint):
342	PUSHR; calls $0,_cnrint; POPR
343	incl _cnt+V_INTR
344	incl _intrcnt+I_CNR
345	rei
346SCBVEC(cnxint):
347	PUSHR; calls $0,_cnxint; POPR
348	incl _cnt+V_INTR
349	incl _intrcnt+I_CNX
350	rei
351SCBVEC(hardclock):
352	PUSHR
353	mtpr $ICCS_RUN|ICCS_IE|ICCS_INT|ICCS_ERR,$ICCS
354#if NPS > 0
355	pushl	4+6*4(sp); pushl 4+6*4(sp);
356	calls	$2,_psextsync
357#endif
358	pushl 4+6*4(sp); pushl 4+6*4(sp);
359	calls $2,_hardclock			# hardclock(pc,psl)
360	POPR;
361	incl	_cnt+V_INTR
362	incl	_intrcnt+I_CLOCK
363	rei
364SCBVEC(softclock):
365	PUSHR
366	pushl	4+6*4(sp); pushl 4+6*4(sp);
367	calls	$2,_softclock			# softclock(pc,psl)
368	POPR;
369	incl	_cnt+V_SOFT
370	rei
371
372#include "../net/netisr.h"
373	.globl	_netisr
374SCBVEC(netintr):
375	PUSHR
376#include "imp.h"
377#if NIMP > 0
378	bbcc	$NETISR_IMP,_netisr,1f; calls $0,_impintr; 1:
379#endif
380#ifdef INET
381	bbcc	$NETISR_IP,_netisr,1f; calls $0,_ipintr; 1:
382#endif
383#ifdef NS
384	bbcc	$NETISR_NS,_netisr,1f; calls $0,_nsintr; 1:
385#endif
386#ifdef ISO
387	bbcc	$NETISR_ISO,_netisr,1f; calls $0,_clnlintr; 1:
388#endif
389	POPR
390	incl	_cnt+V_SOFT
391	rei
392
393SCBVEC(consdin):
394	PUSHR;
395	incl	_intrcnt+I_TUR
396	casel	_cpu,$VAX_750,$VAX_8200
3970:
398	.word	5f-0b		# 2 is VAX_750
399	.word	3f-0b		# 3 is VAX_730
400	.word	6f-0b		# 4 is VAX_8600
401	.word	7f-0b		# 5 is VAX_8200
402	halt
4035:
404#if defined(VAX750) && !defined(MRSP)
405	jsb	tudma
406#endif
4073:
408#if defined(VAX750) || defined(VAX730)
409	calls	$0,_turintr
410	brb	2f
411#else
412	halt
413#endif
4147:
415#if VAX8200
416	calls	$0,_rx50intr
417	brb	2f
418#else
419	halt
420#endif
4216:
422#if VAX8600
423	calls	$0,_crlintr
424#else
425	halt
426#endif
4272:
428	POPR
429	incl	_cnt+V_INTR
430	rei
431
432#if defined(VAX750) || defined(VAX730)
433SCBVEC(consdout):
434	PUSHR; calls $0,_tuxintr; POPR
435	incl _cnt+V_INTR
436	incl _intrcnt+I_TUX
437	rei
438#else
439SCBVEC(consdout):
440	halt
441#endif
442
443#if NDZ > 0
444/*
445 * DZ pseudo dma routine:
446 *	r0 - controller number
447 */
448	.align	1
449	.globl	dzdma
450dzdma:
451	mull2	$8*20,r0
452	movab	_dzpdma(r0),r3		# pdma structure base
453					# for this controller
454dzploop:
455	movl	r3,r0
456	movl	(r0)+,r1		# device register address
457	movzbl	1(r1),r2		# get line number
458	bitb	$0x80,r2		# TRDY on?
459	beql	dzprei			# no
460	bicb2	$0xf8,r2		# clear garbage bits
461	mull2	$20,r2
462	addl2	r2,r0			# point at line's pdma structure
463	movl	(r0)+,r2		# p_mem
464	cmpl	r2,(r0)+		# p_mem < p_end ?
465	bgequ	dzpcall			# no, go call dzxint
466	movb	(r2)+,6(r1)		# dztbuf = *p_mem++
467	movl	r2,-8(r0)
468	brb 	dzploop			# check for another line
469dzprei:
470	POPR
471	incl	_cnt+V_PDMA
472	rei
473
474dzpcall:
475	pushl	r3
476	pushl	(r0)+			# push tty address
477	calls	$1,*(r0)		# call interrupt rtn
478	movl	(sp)+,r3
479	brb 	dzploop			# check for another line
480#endif
481
482#if NUU > 0 && defined(UUDMA)
483/*
484 * Pseudo DMA routine for tu58 (on DL11)
485 *	r0 - controller number
486 */
487	.align	1
488	.globl	uudma
489uudma:
490	movl	_uudinfo[r0],r2
491	movl	16(r2),r2		# r2 = uuaddr
492	mull3	$48,r0,r3
493	movab	_uu_softc(r3),r5	# r5 = uuc
494
495	cvtwl	2(r2),r1		# c = uuaddr->rdb
496	bbc	$15,r1,1f		# if (c & UUDB_ERROR)
497	movl	$13,16(r5)		#	uuc->tu_state = TUC_RCVERR;
498	rsb				#	let uurintr handle it
4991:
500	tstl	4(r5)			# if (uuc->tu_rcnt) {
501	beql	1f
502	movb	r1,*0(r5)		#	*uuc->tu_rbptr++ = r1
503	incl	(r5)
504	decl	4(r5)			#	if (--uuc->tu_rcnt)
505	beql	2f			#		done
506	tstl	(sp)+
507	POPR				# 	registers saved in ubglue.s
508	rei				# }
5092:
510	cmpl	16(r5),$8		# if (uuc->tu_state != TUS_GETH)
511	beql	2f			# 	let uurintr handle it
5121:
513	rsb
5142:
515	mull2	$14,r0			# sizeof(uudata[ctlr]) = 14
516	movab	_uudata(r0),r4		# data = &uudata[ctlr];
517	cmpb	$1,(r4)			# if (data->pk_flag != TUF_DATA)
518	bneq	1b
519#ifdef notdef
520	/* this is for command packets */
521	beql	1f			# 	r0 = uuc->tu_rbptr
522	movl	(r5),r0
523	brb	2f
5241:					# else
525#endif
526	movl	24(r5),r0		# 	r0 = uuc->tu_addr
5272:
528	movzbl	1(r4),r3		# counter to r3 (data->pk_count)
529	movzwl	(r4),r1			# first word of checksum (=header)
530	mfpr	$IPL,-(sp)		# s = spl5();
531	mtpr	$0x15,$IPL		# to keep disk interrupts out
532	clrw	(r2)			# disable receiver interrupts
5333:	bbc	$7,(r2),3b		# while ((uuaddr->rcs & UUCS_READY)==0);
534	cvtwb	2(r2),(r0)+		# *buffer = uuaddr->rdb & 0xff
535	sobgtr	r3,1f			# continue with next byte ...
536	addw2	2(r2),r1		# unless this was the last (odd count)
537	brb	2f
538
5391:	bbc	$7,(r2),1b		# while ((uuaddr->rcs & UUCS_READY)==0);
540	cvtwb	2(r2),(r0)+		# *buffer = uuaddr->rdb & 0xff
541	addw2	-2(r0),r1		# add to checksum..
5422:
543	adwc	$0,r1			# get the carry
544	sobgtr	r3,3b			# loop while r3 > 0
545/*
546 * We're ready to get the checksum
547 */
5481:	bbc	$7,(r2),1b		# while ((uuaddr->rcs & UUCS_READY)==0);
549	cvtwb	2(r2),12(r4)		# get first (lower) byte
5501:	bbc	$7,(r2),1b
551	cvtwb	2(r2),13(r4)		# ..and second
552	cmpw	12(r4),r1		# is checksum ok?
553	beql	1f
554	movl	$14,16(r5)		# uuc->tu_state = TUS_CHKERR
555	brb	2f			# exit
5561:
557	movl	$11,16(r5)		# uuc->tu_state = TUS_GET (ok)
5582:
559	movw	$0x40,(r2)		# enable receiver interrupts
560	mtpr	(sp)+,$IPL		# splx(s);
561	rsb				# continue processing in uurintr
562#endif
563
564#if defined(VAX750) && !defined(MRSP)
565/*
566 * Pseudo DMA routine for VAX-11/750 console tu58
567 *   	    (without MRSP)
568 */
569	.align	1
570	.globl	tudma
571tudma:
572	movab	_tu,r5			# r5 = tu
573	tstl	4(r5)			# if (tu.tu_rcnt) {
574	beql	3f
575	mfpr	$CSRD,r1		# get data from tu58
576	movb	r1,*0(r5)		#	*tu.tu_rbptr++ = r1
577	incl	(r5)
578	decl	4(r5)			#	if (--tu.tu_rcnt)
579	beql	1f			#		done
580	tstl	(sp)+
581	POPR				# 	registers saved in ubglue.s
582	rei				# 	data handled, done
5831:					# }
584	cmpl	16(r5),$8		# if (tu.tu_state != TUS_GETH)
585	beql	2f			# 	let turintr handle it
5863:
587	rsb
5882:
589	movab	_tudata,r4		# r4 = tudata
590	cmpb	$1,(r4)			# if (tudata.pk_flag != TUF_DATA)
591	bneq	3b			# 	let turintr handle it
5921:					# else
593	movl	24(r5),r1		# get buffer pointer to r1
594	movzbl	1(r4),r3		# counter to r3
595	movzwl	(r4),r0			# first word of checksum (=header)
596	mtpr	$0,$CSRS		# disable receiver interrupts
5973:
598	bsbw	5f			# wait for next byte
599	mfpr	$CSRD,r5
600	movb	r5,(r1)+		# *buffer = rdb
601	sobgtr	r3,1f			# continue with next byte ...
602	mfpr	$CSRD,r2		# unless this was the last (odd count)
603	brb	2f
604
6051:	bsbw	5f			# wait for next byte
606	mfpr	$CSRD,r5
607	movb	r5,(r1)+		# *buffer = rdb
608	movzwl	-2(r1),r2		# get the last word back from memory
6092:
610	addw2	r2,r0			# add to checksum..
611	adwc	$0,r0			# get the carry
612	sobgtr	r3,3b			# loop while r3 > 0
613/*
614 * We're ready to get the checksum.
615 */
616	bsbw	5f
617	movab	_tudata,r4
618	mfpr	$CSRD,r5
619	movb	r5,12(r4)		# get first (lower) byte
620	bsbw	5f
621	mfpr	$CSRD,r5
622	movb	r5,13(r4)		# ..and second
623	movab	_tu,r5
624	cmpw	12(r4),r0		# is checksum ok?
625	beql	1f
626	movl	$14,16(r5)		# tu.tu_state = TUS_CHKERR
627	brb	2f			# exit
6281:
629	movl	$11,16(r5)		# tu.tu_state = TUS_GET
6302:
631	mtpr	$0x40,$CSRS		# enable receiver interrupts
632	rsb				# continue processing in turintr
633/*
634 * Loop until a new byte is ready from
635 * the tu58, make sure we don't loop forever
636 */
6375:
638	movl	$5000,r5		# loop max 5000 times
6391:
640	mfpr	$CSRS,r2
641	bbs	$7,r2,1f
642	sobgtr	r5,1b
643	movab	_tu,r5
644	movl	$13,16(r5)		# return TUS_RCVERR
645	tstl	(sp)+			# and let turintr handle it
6461:
647	rsb
648#endif
649
650/*
651 * BI passive release things.
652 */
653SCBVEC(passiverel):
654	rei				# well that was useless
655
656/*
657 * Stray UNIBUS interrupt catch routines
658 */
659	.data
660	.align	2
661#define	PJ	PUSHR;jsb _Xustray
662	.globl	_catcher
663_catcher:
664	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
665	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
666	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
667	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
668	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
669	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
670	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
671	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
672
673	.globl	_cold
674	.globl	_br
675	.globl	_cvec
676_cold:	.long	1
677_br:	.long	0
678_cvec:	.long	0
679
680	.text
681SCBVEC(ustray):
682	blbc	_cold,1f
683	mfpr	$IPL,r11
684	movl	r11,_br
685	subl3	$_catcher+8,(sp)+,r10
686	ashl	$-1,r10,r10
687	movl	r10,_cvec
688	POPR
689	rei
6901:
691	subl3	$_catcher+8,(sp)+,r0
692	ashl	$-1,r0,-(sp)
693	mfpr	$IPL,-(sp)
694	PRINTF(2, "uba?: stray intr ipl %x vec %o\n")
695	POPR
696	rei
697
698#if VAX630 || VAX650
699/*
700 * Emulation OpCode jump table:
701 *	ONLY GOES FROM 0xf8 (-8) TO 0x3B (59)
702 */
703#define EMUTABLE	0x43
704#define NOEMULATE	.long noemulate
705#define	EMULATE(a)	.long _EM/**/a
706	.globl	_emJUMPtable
707_emJUMPtable:
708/* f8 */	EMULATE(ashp);	EMULATE(cvtlp);	NOEMULATE;	NOEMULATE
709/* fc */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
710/* 00 */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
711/* 04 */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
712/* 08 */	EMULATE(cvtps);	EMULATE(cvtsp);	NOEMULATE;	EMULATE(crc)
713/* 0c */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
714/* 10 */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
715/* 14 */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
716/* 18 */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
717/* 1c */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
718/* 20 */	EMULATE(addp4);	EMULATE(addp6);	EMULATE(subp4);	EMULATE(subp6)
719/* 24 */	EMULATE(cvtpt);	EMULATE(mulp);	EMULATE(cvttp);	EMULATE(divp)
720/* 28 */	NOEMULATE;	EMULATE(cmpc3);	EMULATE(scanc);	EMULATE(spanc)
721/* 2c */	NOEMULATE;	EMULATE(cmpc5);	EMULATE(movtc);	EMULATE(movtuc)
722/* 30 */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
723/* 34 */	EMULATE(movp);	EMULATE(cmpp3);	EMULATE(cvtpl);	EMULATE(cmpp4)
724/* 38 */	EMULATE(editpc); EMULATE(matchc); EMULATE(locc); EMULATE(skpc)
725#endif
726
727/*
728 * Trap and fault vector routines
729 */
730#define	TRAP(a)	pushl $T_/**/a; jbr alltraps
731
732/*
733 * Ast delivery (profiling and/or reschedule)
734 */
735SCBVEC(astflt):
736	pushl $0; TRAP(ASTFLT)
737SCBVEC(privinflt):
738	pushl $0; TRAP(PRIVINFLT)
739SCBVEC(xfcflt):
740	pushl $0; TRAP(XFCFLT)
741SCBVEC(resopflt):
742	pushl $0; TRAP(RESOPFLT)
743SCBVEC(resadflt):
744	pushl $0; TRAP(RESADFLT)
745SCBVEC(bptflt):
746	pushl $0; TRAP(BPTFLT)
747SCBVEC(compatflt):
748	TRAP(COMPATFLT);
749SCBVEC(kdbintr):
750	pushl $0; TRAP(KDBTRAP)
751SCBVEC(tracep):
752	pushl $0; TRAP(TRCTRAP)
753SCBVEC(arithtrap):
754	TRAP(ARITHTRAP)
755SCBVEC(protflt):
756	blbs	(sp)+,segflt
757	TRAP(PROTFLT)
758segflt:
759	TRAP(SEGFLT)
760
761/*
762 * The following is called with the stack set up as follows:
763 *
764 *	  (sp):	Opcode
765 *	 4(sp):	Instruction PC
766 *	 8(sp):	Operand 1
767 *	12(sp):	Operand 2
768 *	16(sp):	Operand 3
769 *	20(sp):	Operand 4
770 *	24(sp):	Operand 5
771 *	28(sp):	Operand 6
772 *	32(sp):	Operand 7 (unused)
773 *	36(sp):	Operand 8 (unused)
774 *	40(sp):	Return PC
775 *	44(sp):	Return PSL
776 *	48(sp): TOS before instruction
777 *
778 * Each individual routine is called with the stack set up as follows:
779 *
780 *	  (sp):	Return address of trap handler
781 *	 4(sp):	Opcode (will get return PSL)
782 *	 8(sp):	Instruction PC
783 *	12(sp):	Operand 1
784 *	16(sp):	Operand 2
785 *	20(sp):	Operand 3
786 *	24(sp):	Operand 4
787 *	28(sp):	Operand 5
788 *	32(sp):	Operand 6
789 *	36(sp):	saved register 11
790 *	40(sp):	saved register 10
791 *	44(sp):	Return PC
792 *	48(sp):	Return PSL
793 *	52(sp): TOS before instruction
794 */
795
796SCBVEC(emulate):
797#if VAX630 || VAX650
798	movl	r11,32(sp)		# save register r11 in unused operand
799	movl	r10,36(sp)		# save register r10 in unused operand
800	cvtbl	(sp),r10		# get opcode
801	addl2	$8,r10			# shift negative opcodes
802	subl3	r10,$EMUTABLE,r11	# forget it if opcode is out of range
803	bcs	noemulate
804	movl	_emJUMPtable[r10],r10	# call appropriate emulation routine
805	jsb	(r10)		# routines put return values into regs 0-5
806	movl	32(sp),r11		# restore register r11
807	movl	36(sp),r10		# restore register r10
808	insv	(sp),$0,$4,44(sp)	# and condition codes in Opcode spot
809	addl2	$40,sp			# adjust stack for return
810	rei
811noemulate:
812	addl2	$48,sp			# adjust stack for
813#endif
814	.word	0xffff			# "reserved instruction fault"
815SCBVEC(emulateFPD):
816	.word	0xffff			# "reserved instruction fault"
817SCBVEC(transflt):
818	bitl	$2,(sp)+
819	bnequ	tableflt
820 	jsb	Fastreclaim		# try and avoid pagein
821	TRAP(PAGEFLT)
822tableflt:
823	TRAP(TABLEFLT)
824
825alltraps:
826	mfpr	$USP,-(sp); calls $0,_trap; mtpr (sp)+,$USP
827	incl	_cnt+V_TRAP
828	addl2	$8,sp			# pop type, code
829	mtpr	$HIGH,$IPL		## dont go to a higher IPL (GROT)
830	rei
831
832SCBVEC(syscall):
833	pushl	$T_SYSCALL
834	mfpr	$USP,-(sp); calls $0,_syscall; mtpr (sp)+,$USP
835	incl	_cnt+V_SYSCALL
836	addl2	$8,sp			# pop type, code
837	mtpr	$HIGH,$IPL		## dont go to a higher IPL (GROT)
838	rei
839
840/*
841 * System page table
842 * Mbmap and Usrptmap are enlarged by CLSIZE entries
843 * as they are managed by resource maps starting with index 1 or CLSIZE.
844 */
845#define	vaddr(x)	((((x)-_Sysmap)/4)*NBPG+0x80000000)
846#define	SYSMAP(mname, vname, npte)			\
847_/**/mname:	.globl	_/**/mname;		\
848	.space	(npte)*4;				\
849	.globl	_/**/vname;			\
850	.set	_/**/vname,vaddr(_/**/mname)
851#define	ADDMAP(npte)	.space	(npte)*4
852
853	.data
854	.align	2
855	SYSMAP(Sysmap	,Sysbase	,SYSPTSIZE	)
856	SYSMAP(Forkmap	,forkutl	,UPAGES		)
857	SYSMAP(Xswapmap	,xswaputl	,UPAGES		)
858	SYSMAP(Xswap2map,xswap2utl	,UPAGES		)
859	SYSMAP(Swapmap	,swaputl	,UPAGES		)
860	SYSMAP(Pushmap	,pushutl	,UPAGES		)
861	SYSMAP(Vfmap	,vfutl		,UPAGES		)
862	SYSMAP(CMAP1	,CADDR1		,1		)
863	SYSMAP(CMAP2	,CADDR2		,1		)
864	SYSMAP(mmap	,vmmap		,1		)
865	SYSMAP(alignmap	,alignutl	,1		)	/* XXX */
866	SYSMAP(msgbufmap,msgbuf		,MSGBUFPTECNT	)
867	SYSMAP(Mbmap	,mbutl		,NMBCLUSTERS*MCLBYTES/NBPG+CLSIZE )
868#ifdef MFS
869#include "../ufs/mfsiom.h"
870	/*
871	 * Used by the mfs_doio() routine for physical I/O
872	 */
873	SYSMAP(Mfsiomap	,mfsiobuf	,MFS_MAPREG )
874#endif /* MFS */
875#ifdef NFS
876#include "../nfs/nfsiom.h"
877	/*
878	 * Used by the nfs_doio() routine for physical I/O
879	 */
880	SYSMAP(Nfsiomap	,nfsiobuf	,NFS_MAPREG )
881#endif /* NFS */
882	/*
883	 * This is the map used by the kernel memory allocator.
884	 * It is expanded as necessary by the special features
885	 * that use it.
886	 *
887	 * XXX: NEED way to compute kmem size from maxusers,
888	 * device complement
889	 */
890	SYSMAP(kmempt	,kmembase	,300*CLSIZE 	)
891#ifdef	GPROF
892				ADDMAP( 600*CLSIZE	)
893#endif
894	SYSMAP(ekmempt	,kmemlimit	,0		)
895
896	SYSMAP(UMBAbeg	,umbabeg	,0		)
897	SYSMAP(Nexmap	,nexus		,16*MAXNNEXUS	)
898#ifdef QBA
899#if (QBAPAGES+UBAIOPAGES) > (UBAPAGES+UBAIOPAGES)*NUBA
900	SYSMAP(UMEMmap	,umem		,(QBAPAGES+UBAIOPAGES) )
901#else
902	SYSMAP(UMEMmap	,umem		,(UBAPAGES+UBAIOPAGES)*NUBA )
903#endif
904#else /* QBA */
905	SYSMAP(UMEMmap	,umem		,(UBAPAGES+UBAIOPAGES)*NUBA )
906#endif /* QBA */
907#if VAX8600
908	SYSMAP(Ioamap	,ioa		,MAXNIOA*IOAMAPSIZ/NBPG	)
909#endif
910#if VAX8200 || VAX630
911	SYSMAP(Clockmap	,ka630clock	,1		)
912#endif
913#if VAX8200
914	/* alas, the clocks on the 8200 and 630 are not quite identical */
915	/* they could be shared for now, but this seemed cleaner */
916	.globl _ka820clock; .set _ka820clock,_ka630clock
917	SYSMAP(Ka820map	,ka820port	,1		)
918	SYSMAP(RX50map	,rx50device	,1		)
919#ifdef notyet
920	SYSMAP(BRAMmap	,ka820bootram	,KA820_BRPAGES	)
921	SYSMAP(EEPROMmap,ka820eeprom	,KA820_EEPAGES	)
922#endif
923#endif
924#if VAX630
925	SYSMAP(Ka630map	,ka630cpu	,1		)
926#endif
927#if VAX650
928 	SYSMAP(KA650MERRmap	,ka650merr	,1		)
929 	SYSMAP(KA650CBDmap	,ka650cbd	,1		)
930 	SYSMAP(KA650SSCmap	,ka650ssc	,3		)
931 	SYSMAP(KA650IPCRmap	,ka650ipcr	,1		)
932 	SYSMAP(KA650CACHEmap	,ka650cache	,KA650_CACHESIZE/NBPG )
933#endif
934#ifdef QBA
935	/*
936	 * qvss and qdss don't coexist - one map will suffice
937	 * for either. qvss is 256K each and qdss is 64K each.
938	 */
939#include "qv.h"
940#include "qd.h"
941#if NQV > 0 || NQD > 0
942	SYSMAP(QVmap	,qvmem		,((512*NQV)+(128*NQD)))
943#endif
944#endif
945	SYSMAP(UMBAend	,umbaend	,0		)
946
947	SYSMAP(Usrptmap	,usrpt		,USRPTSIZE+CLSIZE )
948
949eSysmap:
950	.globl	_Syssize
951	.set	_Syssize,(eSysmap-_Sysmap)/4
952	.text
953
954/*
955 * Initialization
956 *
957 * ipl 0x1f; mapen 0; scbb, pcbb, sbr, slr, isp, ksp not set
958 */
959	.data
960	.globl	_cpu
961_cpu:	.long	0
962	.text
963	.globl	start
964start:
965	.word	0
966	mtpr	$0,$ICCS
967/* set system control block base and system page table params */
968	mtpr	$_scb-0x80000000,$SCBB
969	mtpr	$_Sysmap-0x80000000,$SBR
970	mtpr	$_Syssize,$SLR
971/* double map the kernel into the virtual user addresses of phys mem */
972	mtpr	$_Sysmap,$P0BR
973	mtpr	$_Syssize,$P0LR
974/* set ISP and get cpu type */
975	movl	$_intstack+NISP*NBPG,sp
976	mfpr	$SID,r0
977	movab	_cpu,r1
978	extzv	$24,$8,r0,(r1)
979/* init RPB */
980	movab	_rpb,r0
981	movl	r0,(r0)+			# rp_selfref
982	movab	_doadump,r1
983	movl	r1,(r0)+			# rp_dumprout
984	movl	$0x1f,r2
985	clrl	r3
9861:	addl2	(r1)+,r3; sobgtr r2,1b
987	movl	r3,(r0)+			# rp_chksum
988/* count up memory; _physmem contains limit */
989	clrl	r7
990	ashl	$PGSHIFT,_physmem,r8
991	decl	r8
9921:	pushl	$4; pushl r7; calls $2,_badaddr; tstl r0; bneq 9f
993	acbl	r8,$64*1024,r7,1b
9949:
995#if  VAX630 || VAX650
996/* reserve area at top of memory for processor specific use */
997	cmpb	_cpu,$VAX_630
998	beql	1f
999	cmpb	_cpu,$VAX_650
1000	bneq	2f
1001	subl2	$32768,r7	# space for Qbus map registers
1002	brb	2f
10031:
1004	subl2   $4096,r7	# space for console scratchpad
10052:
1006#endif
1007/* clear memory from kernel bss and pages for proc 0 u. and page table */
1008	movab	_edata,r6; bicl2 $SYSTEM,r6
1009	movab	_end,r5; bicl2 $SYSTEM,r5
1010#ifdef KADB
1011	subl2	$4,r5
10121:	clrl	(r6); acbl r5,$4,r6,1b		# clear just bss
1013	addl2	$4,r5
1014	bbc	$6,r11,0f			# check RB_KDB
1015	bicl3	$SYSTEM,r9,r5			# skip symbol & string tables
1016	bicl3	$SYSTEM,r9,r6			# r9 obtained from boot
1017#endif
10180:	bisl3	$SYSTEM,r5,r9			# convert to virtual address
1019	addl2	$NBPG-1,r9			# roundup to next page
1020	addl2	$(UPAGES*NBPG)+NBPG+NBPG,r5
10211:	clrq	(r6); acbl r5,$8,r6,1b
1022/* trap() and syscall() save r0-r11 in the entry mask (per ../h/reg.h) */
1023/* panic() is convenient place to save all for debugging */
1024	bisw2	$0x0fff,_trap
1025	bisw2	$0x0fff,_syscall
1026	bisw2	$0x0fff,_panic
1027	calls	$0,_fixctlrmask
1028/* initialize system page table: uba vectors and int stack writeable */
1029	clrl	r2
1030	movab	eintstack,r1; bbcc $31,r1,0f; 0: ashl $-PGSHIFT,r1,r1
10311:	bisl3	$PG_V|PG_KW,r2,_Sysmap[r2]; aoblss r1,r2,1b
1032/*
1033 * make rpb read-only as red zone for interrupt stack
1034 * (scb(s) and UNIvec are write-protected later)
1035 */
1036	bicl2	$PG_PROT,_rpbmap
1037	bisl2	$PG_KR,_rpbmap
1038/* make kernel text space read-only */
1039	movab	_etext+NBPG-1,r1; bbcc $31,r1,0f; 0: ashl $-PGSHIFT,r1,r1
10401:	bisl3	$PG_V|PG_URKR,r2,_Sysmap[r2]; aoblss r1,r2,1b
1041/* make kernel data, bss, read-write */
1042	bicl3	$SYSTEM,r9,r1; ashl $-PGSHIFT,r1,r1
10431:	bisl3	$PG_V|PG_KW,r2,_Sysmap[r2]; aoblss r1,r2,1b
1044/* now go to mapped mode */
1045	mtpr	$0,$TBIA; mtpr $1,$MAPEN; jmp *$0f; 0:
1046/* init mem sizes */
1047	ashl	$-PGSHIFT,r7,_physmem
1048/* setup context for proc[0] == Scheduler */
1049	bicl3	$SYSTEM|(NBPG-1),r9,r6	# make phys, page boundary
1050/* setup page table for proc[0] */
1051	ashl	$-PGSHIFT,r6,r3			# r3 = btoc(r6)
1052	bisl3	$PG_V|PG_KW,r3,_Usrptmap	# init first upt entry
1053	incl	r3
1054	movab	_usrpt,r0
1055	mtpr	r0,$TBIS
1056/* init p0br, p0lr */
1057	mtpr	r0,$P0BR
1058	mtpr	$0,$P0LR
1059/* init p1br, p1lr */
1060	movab	NBPG(r0),r0
1061	movl	$0x200000-UPAGES,r1
1062	mtpr	r1,$P1LR
1063	mnegl	r1,r1
1064	moval	-4*UPAGES(r0)[r1],r2
1065	mtpr	r2,$P1BR
1066/* setup mapping for UPAGES of _u */
1067	movl	$UPAGES,r2; movab _u+NBPG*UPAGES,r1; addl2 $UPAGES,r3; jbr 2f
10681:	decl	r3
1069	moval	-NBPG(r1),r1;
1070	bisl3	$PG_V|PG_URKW,r3,-(r0)
1071	mtpr	r1,$TBIS
10722:	sobgeq	r2,1b
1073/* initialize (slightly) the pcb */
1074	movab	UPAGES*NBPG(r1),PCB_KSP(r1)
1075	mnegl	$1,PCB_ESP(r1)
1076	mnegl	$1,PCB_SSP(r1)
1077	movl	r1,PCB_USP(r1)
1078	mfpr	$P0BR,PCB_P0BR(r1)
1079	mfpr	$P0LR,PCB_P0LR(r1)
1080	movb	$4,PCB_P0LR+3(r1)		# disable ast
1081	mfpr	$P1BR,PCB_P1BR(r1)
1082	mfpr	$P1LR,PCB_P1LR(r1)
1083	movl	$CLSIZE,PCB_SZPT(r1)		# init u.u_pcb.pcb_szpt
1084	movl	r9,PCB_R9(r1)
1085	movl	r10,PCB_R10(r1)
1086	movl	r11,PCB_R11(r1)
1087	movab	1f,PCB_PC(r1)			# initial pc
1088	clrl	PCB_PSL(r1)			# mode(k,k), ipl=0
1089	ashl	$PGSHIFT,r3,r3
1090	mtpr	r3,$PCBB			# first pcbb
1091/* set regs, p0br, p0lr, p1br, p1lr, astlvl, ksp and change to kernel mode */
1092	ldpctx
1093	rei
1094/* put signal trampoline code in u. area */
10951:	movab	_u,r0
1096	movc3	$19,sigcode,PCB_SIGC(r0)
1097/* save boot device in global _bootdev */
1098	movl	r10,_bootdev
1099/* save reboot flags in global _boothowto */
1100	movl	r11,_boothowto
1101#ifdef KADB
1102/* save end of symbol & string table in global _bootesym */
1103	subl3	$NBPG-1,r9,_bootesym
1104#endif
1105/* calculate firstaddr, and call main() */
1106	bicl3	$SYSTEM,r9,r0; ashl $-PGSHIFT,r0,-(sp)
1107	addl2	$UPAGES+1,(sp); calls $1,_main
1108/* proc[1] == /etc/init now running here; run icode */
1109	pushl	$PSL_CURMOD|PSL_PRVMOD; pushl $0; rei
1110
1111/* signal trampoline code: it is known that this code takes exactly 19 bytes */
1112/* in ../vax/pcb.h and in the movc3 above */
1113sigcode:
1114	calls	$4,8(pc)	# params pushed by sendsig
1115	movl	sp,ap		# calls frame built by sendsig
1116	chmk	$SYS_sigreturn	# cleanup mask and onsigstack
1117	halt			# sigreturn() does not return!
1118	.word	0x3f		# registers 0-5
1119	callg	(ap),*16(ap)	# call the signal handler
1120	ret			# return to code above
1121
1122	.globl	_icode
1123	.globl	_initflags
1124	.globl	_szicode
1125/*
1126 * Icode is copied out to process 1 to exec /etc/init.
1127 * If the exec fails, process 1 exits.
1128 */
1129_icode:
1130	pushab	b`argv-l0(pc)
1131l0:	pushab	b`init-l1(pc)
1132l1:	pushl	$2
1133	movl	sp,ap
1134	chmk	$SYS_execv
1135	pushl	r0
1136	chmk	$SYS_exit
1137
1138init:	.asciz	"/sbin/init"
1139	.align	2
1140_initflags:
1141	.long	0
1142argv:	.long	init+6-_icode
1143	.long	_initflags-_icode
1144	.long	0
1145_szicode:
1146	.long	_szicode-_icode
1147
1148/*
1149 * Primitives
1150 */
1151
1152#ifdef GPROF
1153#define	ENTRY(name, regs) \
1154	.globl _/**/name; .align 1; _/**/name: .word regs; jsb mcount
1155#define	JSBENTRY(name, regs) \
1156	.globl _/**/name; _/**/name: \
1157	movl fp,-(sp); movab -12(sp),fp; pushr $(regs); jsb mcount; \
1158	popr $(regs); movl (sp)+,fp
1159#else
1160#define	ENTRY(name, regs) \
1161	.globl _/**/name; .align 1; _/**/name: .word regs
1162#define	JSBENTRY(name, regs) \
1163	.globl _/**/name; _/**/name:
1164#endif GPROF
1165#define R0 0x01
1166#define R1 0x02
1167#define R2 0x04
1168#define R3 0x08
1169#define R4 0x10
1170#define R5 0x20
1171#define R6 0x40
1172
1173/*
1174 * badaddr(addr, len)
1175 *	see if access addr with a len type instruction causes a machine check
1176 *	len is length of access (1=byte, 2=short, 4=long)
1177 */
1178	.globl	_badaddr
1179_badaddr:
1180	.word	0
1181	movl	$1,r0
1182	mfpr	$IPL,r1
1183	mtpr	$HIGH,$IPL
1184	movl	4(ap),r3
1185	movl	8(ap),r4
1186	movab	2f,nofault		# jump to 2f on machcheck
1187	bbc	$0,r4,1f; tstb	(r3)
11881:	bbc	$1,r4,1f; tstw	(r3)
11891:	bbc	$2,r4,1f; tstl	(r3)
11901:	clrl	r0			# made it w/o machine checks
11912:	clrl	nofault
1192	mtpr	r1,$IPL
1193	ret
1194
1195/*
1196 * update profiling information for the user
1197 * addupc(pc, &u.u_prof, ticks)
1198 */
1199ENTRY(addupc, 0)
1200	movl	8(ap),r2		# &u.u_prof
1201	subl3	8(r2),4(ap),r0		# corrected pc
1202	blss	9f
1203	extzv	$1,$31,r0,r0		# logical right shift
1204	extzv	$1,$31,12(r2),r1	# ditto for scale
1205	emul	r1,r0,$0,r0
1206	ashq	$-14,r0,r0
1207	tstl	r1
1208	bneq	9f
1209	bicl2	$1,r0
1210	cmpl	r0,4(r2)		# length
1211	bgequ	9f
1212	addl2	(r2),r0			# base
1213	probew	$3,$2,(r0)
1214	beql	8f
1215	addw2	12(ap),(r0)
12169:
1217	ret
12188:
1219	clrl	12(r2)
1220	ret
1221
1222/*
1223 * Copy a null terminated string from the user address space into
1224 * the kernel address space.
1225 *
1226 * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
1227 */
1228ENTRY(copyinstr, R6)
1229	movl	12(ap),r6		# r6 = max length
1230	jlss	8f
1231	movl	4(ap),r1		# r1 = user address
1232	bicl3	$~(NBPG*CLSIZE-1),r1,r2	# r2 = bytes on first page
1233	subl3	r2,$NBPG*CLSIZE,r2
1234	movl	8(ap),r3		# r3 = kernel address
12351:
1236	cmpl	r6,r2			# r2 = min(bytes on page, length left);
1237	jgeq	2f
1238	movl	r6,r2
12392:
1240	prober	$3,r2,(r1)		# bytes accessible?
1241	jeql	8f
1242	subl2	r2,r6			# update bytes left count
1243#ifdef NOSUBSINST
1244	# fake the locc instr. for processors that don't have it
1245	movl	r2,r0
12466:
1247	tstb	(r1)+
1248	jeql	5f
1249	sobgtr	r0,6b
1250	jbr	7f
12515:
1252	decl	r1
1253	jbr	3f
12547:
1255#else
1256	locc	$0,r2,(r1)		# null byte found?
1257	jneq	3f
1258#endif
1259	subl2	r2,r1			# back up pointer updated by `locc'
1260	movc3	r2,(r1),(r3)		# copy in next piece
1261	movl	$(NBPG*CLSIZE),r2	# check next page
1262	tstl	r6			# run out of space?
1263	jneq	1b
1264	movl	$ENOENT,r0		# set error code and return
1265	jbr	9f
12663:
1267	tstl	16(ap)			# return length?
1268	beql	4f
1269	subl3	r6,12(ap),r6		# actual len = maxlen - unused pages
1270	subl2	r0,r6			#	- unused on this page
1271	addl3	$1,r6,*16(ap)		#	+ the null byte
12724:
1273	subl2	r0,r2			# r2 = number of bytes to move
1274	subl2	r2,r1			# back up pointer updated by `locc'
1275	incl	r2			# copy null byte as well
1276	movc3	r2,(r1),(r3)		# copy in last piece
1277	clrl	r0			# redundant
1278	ret
12798:
1280	movl	$EFAULT,r0
12819:
1282	tstl	16(ap)
1283	beql	1f
1284	subl3	r6,12(ap),*16(ap)
12851:
1286	ret
1287
1288/*
1289 * Copy a null terminated string from the kernel
1290 * address space to the user address space.
1291 *
1292 * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
1293 */
1294ENTRY(copyoutstr, R6)
1295	movl	12(ap),r6		# r6 = max length
1296	jlss	8b
1297	movl	4(ap),r1		# r1 = kernel address
1298	movl	8(ap),r3		# r3 = user address
1299	bicl3	$~(NBPG*CLSIZE-1),r3,r2	# r2 = bytes on first page
1300	subl3	r2,$NBPG*CLSIZE,r2
13011:
1302	cmpl	r6,r2			# r2 = min(bytes on page, length left);
1303	jgeq	2f
1304	movl	r6,r2
13052:
1306	probew	$3,r2,(r3)		# bytes accessible?
1307	jeql	8b
1308	subl2	r2,r6			# update bytes left count
1309#ifdef NOSUBSINST
1310	# fake the locc instr. for processors that don't have it
1311	movl	r2,r0
13126:
1313	tstb	(r1)+
1314	jeql	5f
1315	sobgtr	r0,6b
1316	jbr	7f
13175:
1318	decl	r1
1319	jbr	3b
13207:
1321#else
1322	locc	$0,r2,(r1)		# null byte found?
1323	jneq	3b
1324#endif
1325	subl2	r2,r1			# back up pointer updated by `locc'
1326	movc3	r2,(r1),(r3)		# copy in next piece
1327	movl	$(NBPG*CLSIZE),r2	# check next page
1328	tstl	r6			# run out of space?
1329	jneq	1b
1330	movl	$ENOENT,r0		# set error code and return
1331	jbr	9b
1332
1333/*
1334 * Copy a null terminated string from one point to another in
1335 * the kernel address space.
1336 *
1337 * copystr(fromaddr, toaddr, maxlength, &lencopied)
1338 */
1339ENTRY(copystr, R6)
1340	movl	12(ap),r6		# r6 = max length
1341	jlss	8b
1342	movl	4(ap),r1		# r1 = src address
1343	movl	8(ap),r3		# r3 = dest address
13441:
1345	movzwl	$65535,r2		# r2 = bytes in first chunk
1346	cmpl	r6,r2			# r2 = min(bytes in chunk, length left);
1347	jgeq	2f
1348	movl	r6,r2
13492:
1350	subl2	r2,r6			# update bytes left count
1351#ifdef NOSUBSINST
1352	# fake the locc instr. for processors that don't have it
1353	movl	r2,r0
13546:
1355	tstb	(r1)+
1356	jeql	5f
1357	sobgtr	r0,6b
1358	jbr	7f
13595:
1360	decl	r1
1361	jbr	3b
13627:
1363#else
1364	locc	$0,r2,(r1)		# null byte found?
1365	jneq	3b
1366#endif
1367	subl2	r2,r1			# back up pointer updated by `locc'
1368	movc3	r2,(r1),(r3)		# copy in next piece
1369	tstl	r6			# run out of space?
1370	jneq	1b
1371	movl	$ENOENT,r0		# set error code and return
1372	jbr	9b
1373
1374/*
1375 * Copy specified amount of data from user space into the kernel
1376 * Copyin(from, to, len)
1377 *	r1 == from (user source address)
1378 *	r3 == to (kernel destination address)
1379 *	r5 == length
1380 */
1381	.align	1
1382JSBENTRY(Copyin, R1|R3|R5)
1383	cmpl	r5,$(NBPG*CLSIZE)	# probing one page or less ?
1384	bgtru	1f			# no
1385	prober	$3,r5,(r1)		# bytes accessible ?
1386	beql	ersb			# no
1387	movc3	r5,(r1),(r3)
1388/*	clrl	r0			# redundant */
1389	rsb
13901:
1391	blss	ersb			# negative length?
1392	pushl	r6			# r6 = length
1393	movl	r5,r6
1394	bicl3	$~(NBPG*CLSIZE-1),r1,r0	# r0 = bytes on first page
1395	subl3	r0,$(NBPG*CLSIZE),r0
1396	addl2	$(NBPG*CLSIZE),r0	# plus one additional full page
1397	jbr	2f
1398
1399ciloop:
1400	movc3	r0,(r1),(r3)
1401	movl	$(2*NBPG*CLSIZE),r0	# next amount to move
14022:
1403	cmpl	r0,r6
1404	bleq	3f
1405	movl	r6,r0
14063:
1407	prober	$3,r0,(r1)		# bytes accessible ?
1408	beql	ersb1			# no
1409	subl2	r0,r6			# last move?
1410	bneq	ciloop			# no
1411
1412	movc3	r0,(r1),(r3)
1413/*	clrl	r0			# redundant */
1414	movl	(sp)+,r6		# restore r6
1415	rsb
1416
1417ersb1:
1418	movl	(sp)+,r6		# restore r6
1419ersb:
1420	movl	$EFAULT,r0
1421	rsb
1422
1423/*
1424 * Copy specified amount of data from kernel to the user space
1425 * Copyout(from, to, len)
1426 *	r1 == from (kernel source address)
1427 *	r3 == to (user destination address)
1428 *	r5 == length
1429 */
1430	.align	1
1431JSBENTRY(Copyout, R1|R3|R5)
1432	cmpl	r5,$(NBPG*CLSIZE)	# moving one page or less ?
1433	bgtru	1f			# no
1434	probew	$3,r5,(r3)		# bytes writeable?
1435	beql	ersb			# no
1436	movc3	r5,(r1),(r3)
1437/*	clrl	r0			# redundant */
1438	rsb
14391:
1440	blss	ersb			# negative length?
1441	pushl	r6			# r6 = length
1442	movl	r5,r6
1443	bicl3	$~(NBPG*CLSIZE-1),r3,r0	# r0 = bytes on first page
1444	subl3	r0,$(NBPG*CLSIZE),r0
1445	addl2	$(NBPG*CLSIZE),r0	# plus one additional full page
1446	jbr	2f
1447
1448coloop:
1449	movc3	r0,(r1),(r3)
1450	movl	$(2*NBPG*CLSIZE),r0	# next amount to move
14512:
1452	cmpl	r0,r6
1453	bleq	3f
1454	movl	r6,r0
14553:
1456	probew	$3,r0,(r3)		# bytes writeable?
1457	beql	ersb1			# no
1458	subl2	r0,r6			# last move?
1459	bneq	coloop			# no
1460
1461	movc3	r0,(r1),(r3)
1462/*	clrl	r0			# redundant */
1463	movl	(sp)+,r6		# restore r6
1464	rsb
1465
1466/*
1467 * non-local goto's
1468 */
1469#ifdef notdef		/* this is now expanded completely inline */
1470	.align	1
1471JSBENTRY(Setjmp, R0)
1472	movl	fp,(r0)+	# current stack frame
1473	movl	(sp),(r0)	# resuming pc
1474	clrl	r0
1475	rsb
1476#endif
1477
1478#define PCLOC 16	/* location of pc in calls frame */
1479#define APLOC 8		/* location of ap,fp in calls frame */
1480	.align	1
1481JSBENTRY(Longjmp, R0)
1482	movl	(r0)+,newfp	# must save parameters in memory as all
1483	movl	(r0),newpc	# registers may be clobbered.
14841:
1485	cmpl	fp,newfp	# are we there yet?
1486	bgequ	2f		# yes
1487	moval	1b,PCLOC(fp)	# redirect return pc to us!
1488	ret			# pop next frame
14892:
1490	beql	3f		# did we miss our frame?
1491	pushab	4f		# yep ?!?
1492	calls	$1,_panic
14933:
1494	movl	newpc,r0	# all done, just return to the `setjmp'
1495	jmp	(r0)		# ``rsb''
1496
1497	.data
1498newpc:	.space	4
1499newfp:	.space	4
15004:	.asciz	"longjmp"
1501	.text
1502/*
1503 * setjmp that saves all registers as the call frame may not
1504 * be available to recover them in the usual mannor by longjmp.
1505 * Called before swapping out the u. area, restored by resume()
1506 * below.
1507 */
1508ENTRY(savectx, 0)
1509	movl	4(ap),r0
1510	movq	r6,(r0)+
1511	movq	r8,(r0)+
1512	movq	r10,(r0)+
1513	movq	APLOC(fp),(r0)+	# save ap, fp
1514	addl3	$8,ap,(r0)+	# save sp
1515	movl	PCLOC(fp),(r0)	# save pc
1516	clrl	r0
1517	ret
1518
1519#ifdef KADB
1520/*
1521 * C library -- reset, setexit
1522 *
1523 *	reset(x)
1524 * will generate a "return" from
1525 * the last call to
1526 *	setexit()
1527 * by restoring r6 - r12, ap, fp
1528 * and doing a return.
1529 * The returned value is x; on the original
1530 * call the returned value is 0.
1531 */
1532ENTRY(setexit, 0)
1533	movab	setsav,r0
1534	movq	r6,(r0)+
1535	movq	r8,(r0)+
1536	movq	r10,(r0)+
1537	movq	8(fp),(r0)+		# ap, fp
1538	movab	4(ap),(r0)+		# sp
1539	movl	16(fp),(r0)		# pc
1540	clrl	r0
1541	ret
1542
1543ENTRY(reset, 0)
1544	movl	4(ap),r0	# returned value
1545	movab	setsav,r1
1546	movq	(r1)+,r6
1547	movq	(r1)+,r8
1548	movq	(r1)+,r10
1549	movq	(r1)+,r12
1550	movl	(r1)+,sp
1551	jmp 	*(r1)
1552
1553	.data
1554	.align  2
1555setsav:	.space	10*4
1556	.text
1557#endif
1558
1559	.globl	_whichqs
1560	.globl	_qs
1561	.globl	_cnt
1562
1563	.globl	_noproc
1564	.comm	_noproc,4
1565	.globl	_runrun
1566	.comm	_runrun,4
1567
1568/*
1569 * The following primitives use the fancy VAX instructions
1570 * much like VMS does.  _whichqs tells which of the 32 queues _qs
1571 * have processes in them.  Setrq puts processes into queues, Remrq
1572 * removes them from queues.  The running process is on no queue,
1573 * other processes are on a queue related to p->p_pri, divided by 4
1574 * actually to shrink the 0-127 range of priorities into the 32 available
1575 * queues.
1576 */
1577
1578/*
1579 * Setrq(p), using fancy VAX instructions.
1580 *
1581 * Call should be made at splclock(), and p->p_stat should be SRUN
1582 */
1583	.align	1
1584JSBENTRY(Setrq, R0)
1585	tstl	P_RLINK(r0)		## firewall: p->p_rlink must be 0
1586	beql	set1			##
1587	pushab	set3			##
1588	calls	$1,_panic		##
1589set1:
1590	movzbl	P_PRI(r0),r1		# put on queue which is p->p_pri / 4
1591	ashl	$-2,r1,r1
1592	movaq	_qs[r1],r2
1593	insque	(r0),*4(r2)		# at end of queue
1594	bbss	r1,_whichqs,set2	# mark queue non-empty
1595set2:
1596	rsb
1597
1598set3:	.asciz	"setrq"
1599
1600/*
1601 * Remrq(p), using fancy VAX instructions
1602 *
1603 * Call should be made at splclock().
1604 */
1605	.align	1
1606JSBENTRY(Remrq, R0)
1607	movzbl	P_PRI(r0),r1
1608	ashl	$-2,r1,r1
1609	bbsc	r1,_whichqs,rem1
1610	pushab	rem3			# it wasn't recorded to be on its q
1611	calls	$1,_panic
1612rem1:
1613	remque	(r0),r2
1614	beql	rem2
1615	bbss	r1,_whichqs,rem2
1616rem2:
1617	clrl	P_RLINK(r0)		## for firewall checking
1618	rsb
1619
1620rem3:	.asciz	"remrq"
1621
1622/*
1623 * Masterpaddr is the p->p_addr of the running process on the master
1624 * processor.  When a multiprocessor system, the slave processors will have
1625 * an array of slavepaddr's.
1626 */
1627	.globl	_masterpaddr
1628	.data
1629_masterpaddr:
1630	.long	0
1631
1632	.text
1633sw0:	.asciz	"swtch"
1634
1635/*
1636 * When no processes are on the runq, Swtch branches to idle
1637 * to wait for something to come ready.
1638 */
1639	.globl	Idle
1640Idle: idle:
1641	movl	$1,_noproc
1642	mtpr	$0,$IPL			# must allow interrupts here
16431:
1644	tstl	_whichqs		# look for non-empty queue
1645	bneq	sw1
1646	brb	1b
1647
1648badsw:	pushab	sw0
1649	calls	$1,_panic
1650	/*NOTREACHED*/
1651
1652/*
1653 * Swtch(), using fancy VAX instructions
1654 */
1655	.align	1
1656JSBENTRY(Swtch, 0)
1657	incl	_cnt+V_SWTCH
1658sw1:	ffs	$0,$32,_whichqs,r0	# look for non-empty queue
1659	beql	idle			# if none, idle
1660	mtpr	$0x18,$IPL		# lock out all so _whichqs==_qs
1661	bbcc	r0,_whichqs,sw1		# proc moved via interrupt
1662	movaq	_qs[r0],r1
1663	remque	*(r1),r2		# r2 = p = highest pri process
1664	bvs	badsw			# make sure something was there
1665	beql	sw2
1666	insv	$1,r0,$1,_whichqs	# still more procs in this queue
1667sw2:
1668	clrl	_noproc
1669	clrl	_runrun
1670#ifdef notdef
1671	tstl	P_WCHAN(r2)		## firewalls
1672	bneq	badsw			##
1673	cmpb	P_STAT(r2),$SRUN	##
1674	bneq	badsw			##
1675#endif
1676	clrl	P_RLINK(r2)		##
1677	movl	*P_ADDR(r2),r0
1678#ifdef notdef
1679	cmpl	r0,_masterpaddr		# resume of current proc is easy
1680	beql	res0
1681#endif
1682	movl	r0,_masterpaddr
1683	ashl	$PGSHIFT,r0,r0		# r0 = pcbb(p)
1684/* fall into... */
1685
1686/*
1687 * Resume(pf)
1688 */
1689JSBENTRY(Resume, R0)
1690	mtpr	$HIGH,$IPL			# no interrupts, please
1691	movl	_CMAP2,_u+PCB_CMAP2	# yech
1692	svpctx
1693	mtpr	r0,$PCBB
1694	ldpctx
1695	movl	_u+PCB_CMAP2,_CMAP2	# yech
1696	mtpr	$_CADDR2,$TBIS
1697res0:
1698	tstl	_u+PCB_SSWAP
1699	bneq	res1
1700	rei
1701res1:
1702	movl	_u+PCB_SSWAP,r0			# longjmp to saved context
1703	clrl	_u+PCB_SSWAP
1704	movq	(r0)+,r6			# restore r6, r7
1705	movq	(r0)+,r8			# restore r8, r9
1706	movq	(r0)+,r10			# restore r10, r11
1707	movq	(r0)+,r12			# restore ap, fp
1708	movl	(r0)+,r1			# saved sp
1709	cmpl	r1,sp				# must be a pop
1710	bgequ	1f
1711	pushab	2f
1712	calls	$1,_panic
1713	/* NOTREACHED */
17141:
1715	movl	r1,sp				# restore sp
1716	pushl	$PSL_PRVMOD			# return psl
1717	pushl	(r0)				# address to return to
1718	rei
1719
17202:	.asciz	"ldctx"
1721
1722/*
1723 * {fu,su},{byte,word}, all massaged by asm.sed to jsb's
1724 */
1725	.align	1
1726JSBENTRY(Fuword, R0)
1727	prober	$3,$4,(r0)
1728	beql	fserr
1729	movl	(r0),r0
1730	rsb
1731fserr:
1732	mnegl	$1,r0
1733	rsb
1734
1735	.align	1
1736JSBENTRY(Fubyte, R0)
1737	prober	$3,$1,(r0)
1738	beql	fserr
1739	movzbl	(r0),r0
1740	rsb
1741
1742	.align	1
1743JSBENTRY(Suword, R0|R1)
1744	probew	$3,$4,(r0)
1745	beql	fserr
1746	movl	r1,(r0)
1747	clrl	r0
1748	rsb
1749
1750	.align	1
1751JSBENTRY(Subyte, R0|R1)
1752	probew	$3,$1,(r0)
1753	beql	fserr
1754	movb	r1,(r0)
1755	clrl	r0
1756	rsb
1757
1758/*
1759 * Copy 1 relocation unit (NBPG bytes)
1760 * from user virtual address to physical address
1761 */
1762ENTRY(copyseg, 0)
1763	bisl3	$PG_V|PG_KW,8(ap),_CMAP2
1764	mtpr	$_CADDR2,$TBIS	# invalidate entry for copy
1765	movc3	$NBPG,*4(ap),_CADDR2
1766	ret
1767
1768/*
1769 * zero out physical memory
1770 * specified in relocation units (NBPG bytes)
1771 */
1772ENTRY(clearseg, 0)
1773	bisl3	$PG_V|PG_KW,4(ap),_CMAP1
1774	mtpr	$_CADDR1,$TBIS
1775	movc5	$0,(sp),$0,$NBPG,_CADDR1
1776	ret
1777
1778/*
1779 * Check address.
1780 * Given virtual address, byte count, and rw flag
1781 * returns 0 on no access.
1782 */
1783ENTRY(useracc, 0)
1784	movl	4(ap),r0		# get va
1785	movl	8(ap),r1		# count
1786	tstl	12(ap)			# test for read access ?
1787	bneq	userar			# yes
1788	cmpl	$NBPG,r1			# can we do it in one probe ?
1789	bgeq	uaw2			# yes
1790uaw1:
1791	probew	$3,$NBPG,(r0)
1792	beql	uaerr			# no access
1793	addl2	$NBPG,r0
1794	acbl	$NBPG+1,$-NBPG,r1,uaw1
1795uaw2:
1796	probew	$3,r1,(r0)
1797	beql	uaerr
1798	movl	$1,r0
1799	ret
1800
1801userar:
1802	cmpl	$NBPG,r1
1803	bgeq	uar2
1804uar1:
1805	prober	$3,$NBPG,(r0)
1806	beql	uaerr
1807	addl2	$NBPG,r0
1808	acbl	$NBPG+1,$-NBPG,r1,uar1
1809uar2:
1810	prober	$3,r1,(r0)
1811	beql	uaerr
1812	movl	$1,r0
1813	ret
1814uaerr:
1815	clrl	r0
1816	ret
1817
1818/*
1819 * kernacc - check for kernel access privileges
1820 *
1821 * We can't use the probe instruction directly because
1822 * it ors together current and previous mode.
1823 */
1824 ENTRY(kernacc, 0)
1825	movl	4(ap),r0	# virtual address
1826	bbcc	$31,r0,kacc1
1827	bbs	$30,r0,kacerr
1828	mfpr	$SBR,r2		# address and length of page table (system)
1829	bbss	$31,r2,0f; 0:
1830	mfpr	$SLR,r3
1831	brb	kacc2
1832kacc1:
1833	bbsc	$30,r0,kacc3
1834	mfpr	$P0BR,r2	# user P0
1835	mfpr	$P0LR,r3
1836	brb	kacc2
1837kacc3:
1838	mfpr	$P1BR,r2	# user P1 (stack)
1839	mfpr	$P1LR,r3
1840kacc2:
1841	addl3	8(ap),r0,r1	# ending virtual address
1842	addl2	$NBPG-1,r1
1843	ashl	$-PGSHIFT,r0,r0
1844	ashl	$-PGSHIFT,r1,r1
1845	bbs	$31,4(ap),kacc6
1846	bbc	$30,4(ap),kacc6
1847	cmpl	r0,r3		# user stack
1848	blss	kacerr		# address too low
1849	brb	kacc4
1850kacc6:
1851	cmpl	r1,r3		# compare last page to P0LR or SLR
1852	bgtr	kacerr		# address too high
1853kacc4:
1854	movl	(r2)[r0],r3
1855	bbc	$31,4(ap),kacc4a
1856	bbc	$31,r3,kacerr	# valid bit is off
1857kacc4a:
1858	cmpzv	$27,$4,r3,$1	# check protection code
1859	bleq	kacerr		# no access allowed
1860	tstb	12(ap)
1861	bneq	kacc5		# only check read access
1862	cmpzv	$27,$2,r3,$3	# check low 2 bits of prot code
1863	beql	kacerr		# no write access
1864kacc5:
1865	aoblss	r1,r0,kacc4	# next page
1866	movl	$1,r0		# no errors
1867	ret
1868kacerr:
1869	clrl	r0		# error
1870	ret
1871/*
1872 * Extracted and unrolled most common case of pagein (hopefully):
1873 *	resident and not on free list (reclaim of page is purely
1874 *	for the purpose of simulating a reference bit)
1875 *
1876 * Built in constants:
1877 *	CLSIZE of 2, any bit fields in pte's
1878 */
1879	.text
1880	.globl	Fastreclaim
1881Fastreclaim:
1882	PUSHR
1883#ifdef GPROF
1884	movl	fp,-(sp)
1885	movab	12(sp),fp
1886	jsb	mcount
1887	movl	(sp)+,fp
1888#endif GPROF
1889	extzv	$9,$23,28(sp),r3	# virtual address
1890	bicl2	$1,r3			# v = clbase(btop(virtaddr));
1891	movl	_u+U_PROCP,r5		# p = u.u_procp
1892					# from vtopte(p, v) ...
1893	movl	$1,r2			# type = CTEXT;
1894	cmpl	r3,P_TSIZE(r5)
1895	jlssu	1f			# if (isatsv(p, v)) {
1896	addl3	P_TSIZE(r5),P_DSIZE(r5),r0
1897	cmpl	r3,r0
1898	jgequ	2f
1899	clrl	r2			#	type = !CTEXT;
19001:
1901	ashl	$2,r3,r4
1902	addl2	P_P0BR(r5),r4		#	tptopte(p, vtotp(p, v));
1903	jbr	3f
19042:
1905	cvtwl	P_SZPT(r5),r4		# } else (isassv(p, v)) {
1906	ashl	$7,r4,r4
1907	subl2	$0x400000,r4
1908	addl2	r3,r4
1909	ashl	$2,r4,r4
1910	addl2	P_P0BR(r5),r4		#	sptopte(p, vtosp(p, v));
1911	clrl	r2			# 	type = !CTEXT;
19123:					# }
1913	bitb	$0x82,3(r4)
1914	beql	2f			# if (pte->pg_v || pte->pg_fod)
1915	POPR; rsb			#	let pagein handle it
19162:
1917	bicl3	$0xffe00000,(r4),r0
1918	jneq	2f			# if (pte->pg_pfnum == 0)
1919	POPR; rsb			# 	let pagein handle it
19202:
1921	subl2	_firstfree,r0
1922	ashl	$-1,r0,r0
1923	incl	r0			# pgtocm(pte->pg_pfnum)
1924	mull2	$SZ_CMAP,r0
1925	addl2	_cmap,r0		# &cmap[pgtocm(pte->pg_pfnum)]
1926	tstl	r2
1927	jeql	2f			# if (type == CTEXT &&
1928	jbc	$C_INTRANS,(r0),2f	#     c_intrans)
1929	POPR; rsb			# 	let pagein handle it
19302:
1931	jbc	$C_FREE,(r0),2f		# if (c_free)
1932	POPR; rsb			# 	let pagein handle it
19332:
1934	bisb2	$0x80,3(r4)		# pte->pg_v = 1;
1935	jbc	$26,4(r4),2f		# if (anycl(pte, pg_m)
1936	bisb2	$0x04,3(r4)		#	pte->pg_m = 1;
19372:
1938	bicw3	$0x7f,2(r4),r0
1939	bicw3	$0xff80,6(r4),r1
1940	bisw3	r0,r1,6(r4)		# distcl(pte);
1941	ashl	$PGSHIFT,r3,r0
1942	mtpr	r0,$TBIS
1943	addl2	$NBPG,r0
1944	mtpr	r0,$TBIS		# tbiscl(v);
1945	tstl	r2
1946	jeql	2f			# if (type == CTEXT)
1947	movl	P_TEXTP(r5),r0
1948	movl	X_CADDR(r0),r5		# for (p = p->p_textp->x_caddr; p; ) {
1949	jeql	2f
1950	ashl	$2,r3,r3
19513:
1952	addl3	P_P0BR(r5),r3,r0	#	tpte = tptopte(p, tp);
1953	bisb2	$1,P_FLAG+3(r5)		#	p->p_flag |= SPTECHG;
1954	movl	(r4),(r0)+		#	for (i = 0; i < CLSIZE; i++)
1955	movl	4(r4),(r0)		#		tpte[i] = pte[i];
1956	movl	P_XLINK(r5),r5		#	p = p->p_xlink;
1957	jneq	3b			# }
19582:					# collect a few statistics...
1959	incl	_u+U_RU+RU_MINFLT	# u.u_ru.ru_minflt++;
1960	moval	_cnt,r0
1961	incl	V_FAULTS(r0)		# cnt.v_faults++;
1962	incl	V_PGREC(r0)		# cnt.v_pgrec++;
1963	incl	V_FASTPGREC(r0)		# cnt.v_fastpgrec++;
1964	incl	V_TRAP(r0)		# cnt.v_trap++;
1965	POPR
1966	addl2	$8,sp			# pop pc, code
1967	mtpr	$HIGH,$IPL		## dont go to a higher IPL (GROT)
1968	rei
1969