xref: /original-bsd/sys/vax/vax/locore.s (revision 58db4230)
1/*
2 * Copyright (c) 1980, 1986 Regents of the University of California.
3 * All rights reserved.  The Berkeley software License Agreement
4 * specifies the terms and conditions for redistribution.
5 *
6 *	@(#)locore.s	7.28 (Berkeley) 12/16/90
7 */
8
9#include "vax/include/psl.h"
10#include "vax/include/pte.h"
11
12#include "sys/errno.h"
13#include "sys/syscall.h"
14#include "sys/cmap.h"
15
16#include "vax/include/mtpr.h"
17#include "vax/include/trap.h"
18#include "vax/include/cpu.h"
19#include "vax/vax/nexus.h"
20#include "vax/vax/cons.h"
21#include "vax/include/clock.h"
22#include "vax/vax/ioa.h"
23#include "vax/vax/ka630.h"
24#include "vax/vax/ka650.h"
25#include "vax/vax/ka820.h"
26#include "vax/uba/ubareg.h"
27
28#include "dz.h"
29#include "dp.h"
30#include "uu.h"
31#include "ps.h"
32#include "mba.h"
33#include "uba.h"
34#include "imp.h"
35
36	.set	HIGH,0x1f	# mask for total disable
37	.set	MCKVEC,4	# offset into scb of machine check vector
38	.set	NBPG,512
39	.set	PGSHIFT,9
40	.set	SYSTEM,0x80000000	# virtual address of system start
41
42	.set	NISP,3		# number of interrupt stack pages
43
44/*
45 * User structure is UPAGES at top of user space.
46 */
47	.globl	_u
48	.set	_u,0x80000000 - UPAGES*NBPG
49
50	.globl	_intstack
51_intstack:
52	.space	NISP*NBPG
53eintstack:
54
55/*
56 * Do a dump.
57 * Called by auto-restart.
58 * May be called manually.
59 */
60	.align	2
61	.globl	_doadump
62	.globl	_msgbufmapped
63_doadump:
64	nop; nop				# .word 0x0101
65#define	_rpbmap	_Sysmap				# rpb, scb, UNIvec[], istack*4
66	bicl2	$PG_PROT,_rpbmap
67	bisl2	$PG_KW,_rpbmap
68	mtpr	$0,$TBIA
69	tstl	_rpb+RP_FLAG			# dump only once!
70	bneq	1f
71	incl	_rpb+RP_FLAG
72	movl	sp,erpb
73	movab	erpb,sp
74	mfpr	$PCBB,-(sp)
75	mfpr	$MAPEN,-(sp)
76	mfpr	$IPL,-(sp)
77	clrl	_msgbufmapped
78	mtpr	$0,$MAPEN
79	mtpr	$HIGH,$IPL
80	pushr	$0x3fff
81	calls	$0,_dumpsys
821:
83	clrl	r11				# boot flags
84	calls	$0,_vaxboot
85	halt
86
87/*
88 * Interrupt vector routines
89 */
90	.globl	_waittime
91
92#define	SCBVEC(name)	.align 2; .globl _X/**/name; _X/**/name
93#define	PANIC(msg)	clrl _waittime; pushab 1f; \
94			calls $1,_panic; 1: .asciz msg
95#define	PRINTF(n,msg)	pushab 1f; calls $n+1,_printf; MSG(msg)
96#define	MSG(msg)	.data; 1: .asciz msg; .text
97#define	PUSHR		pushr $0x3f
98#define	POPR		popr $0x3f
99
100	.data
101nofault: .long	0	# where to go on predicted machcheck
102	.text
103SCBVEC(machcheck):
104	tstl	nofault
105	bneq	1f
106	PUSHR; pushab 6*4(sp); calls $1,_machinecheck; POPR;
107	addl2 (sp)+,sp; rei
108	.align	2
1091:
110	casel	_cpu,$1,$VAX_MAX
1110:
112	.word	8f-0b		# 1 is 780
113	.word	5f-0b		# 2 is 750
114	.word	5f-0b		# 3 is 730
115	.word	7f-0b		# 4 is 8600
116	.word	5f-0b		# 5 is 8200
117	.word	1f-0b		# 6 is 8800 (unsupported)
118	.word	1f-0b		# 7 is 610  (unsupported)
119	.word	1f-0b		# 8 is 630
120	.word	1f-0b		# 9 is ???
121	.word	9f-0b		# 10 is 650
1225:
123#if defined(VAX8200) || defined(VAX750) || defined(VAX730)
124	mtpr	$0xf,$MCESR
125#endif
126	brb	1f
1277:
128#if VAX8600
129	mtpr	$0,$EHSR
130#endif
131	brb	1f
1328:
133#if VAX780
134	mtpr	$0,$SBIFS
135#endif
136	brb	1f
1379:
138#if VAX650
139	bitl	$PG_V,_KA650MERRmap
140	beql	1f	# don't bother clearing err reg if not mapped in
141	movl	$DSER_CLEAR,_ka650merr+4
142#endif
1431:
144	addl2	(sp)+,sp		# discard mchchk trash
145	movl	nofault,(sp)
146	rei
147
148SCBVEC(kspnotval):
149	PANIC("KSP not valid");
150SCBVEC(powfail):
151	halt
152SCBVEC(chme): SCBVEC(chms): SCBVEC(chmu):
153	PANIC("CHM? in kernel");
154
155SCBVEC(nex0zvec):
156	PUSHR
157	clrl	r0
158	brb	1f
159SCBVEC(nex1zvec):
160	PUSHR
161	movl	$1,r0
1621:
163	cmpl	_cpu,$VAX_8600		# this is a frill
164	beql	2f
165	mfpr	$IPL,-(sp)
166	PRINTF(1, "nexus stray intr ipl%x\n")
167	POPR
168	rei
1692:
170	pushl	r0
171	mfpr	$IPL,-(sp)
172	PRINTF(2, "nexus stray intr ipl%x sbia%d\n")
173	POPR
174	rei
175
176SCBVEC(cmrd):
177	PUSHR; calls $0,_memerr; POPR; rei
178
179SCBVEC(wtime):			/* sbi0err on 8600 */
180#if VAX8600
181	cmpl	_cpu,$VAX_8600
182	bneq	wtimo
183	PANIC("sbia0 error")
184wtimo:
185#endif
186	PUSHR; pushl 6*4(sp); PRINTF(1, "write timeout %x\n"); POPR
187	PANIC("wtimo")
188
189SCBVEC(sbi0fail):
190	PANIC("sbia0 fail")
191SCBVEC(sbi0alert):
192#if VAX8200
193	cmpl	_cpu,$VAX_8200
194	bneq	alert
195	PUSHR; calls $0,_rxcdintr; POPR; rei
196alert:
197#endif
198	PANIC("sbia0 alert")
199SCBVEC(sbi0fault):
200	PANIC("sbia0 fault")
201
202#ifdef notyet
203#if VAX8600
204SCBVEC(sbi1fail):
205	PANIC("sbia1 fail")
206SCBVEC(sbi1alert):
207	PANIC("sbia1 alert")
208SCBVEC(sbi1fault):
209	PANIC("sbia1 fault")
210SCBVEC(sbi1err):
211	PANIC("sbia1 error")
212#endif
213#endif
214
215/*
216 * BI 0 bus error (8200), or SBI silo compare error (others)
217 * VMS boot leaves us 1 BI error to ignore.
218 */
219#if VAX8200 && 0
220	.data
221	.align	2
222_ignorebi: .globl _ignorebi
223	.long	1
224	.text
225#endif VAX8200
226
227SCBVEC(sbisilo):
228#if VAX8200
229	cmpl	_cpu,$VAX_8200
230	bneq	sbisilo
231#if 0
232	blbs	_ignorebi,1f
233#else
234	blbs	_cold,1f
235#endif
236	PUSHR; pushl $0; calls $1,_bi_buserr; POPR
2371:
238	rei
239#endif
240sbisilo:
241	PANIC("sbi silo compare error")
242
243/*
244 * SCB stray interrupt catcher.  Compute and print the actual
245 * SCB vector (for fault diagnosis).
246 */
247	.align	2
248_scbstray: .globl _scbstray
249#define	PJ	PUSHR;jsb 1f
250	/* 128 of 'em */
251	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
252	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
253	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
254	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
255	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
256	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
257	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
258	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
259#if VAX8600
260	/* and another 128, for the second SBIA's scb */
261	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
262	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
263	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
264	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
265	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
266	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
267	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
268	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
269#endif
270#undef PJ
2711:
272	subl3	$_scbstray+8,(sp)+,r0
273	mfpr	$IPL,-(sp)
274	ashl	$-1,r0,-(sp)
275/* call a C handler instead? perhaps later */
276	PRINTF(2, "stray scb intr vec 0x%x ipl%x\n")
277	POPR
278	rei
279
280#if NMBA > 0
281SCBVEC(mba3int):
282	PUSHR; incl _intrcnt+I_MBA3; pushl $3; brb 1f
283SCBVEC(mba2int):
284	PUSHR; incl _intrcnt+I_MBA2; pushl $2; brb 1f
285SCBVEC(mba1int):
286	PUSHR; incl _intrcnt+I_MBA1; pushl $1; brb 1f
287SCBVEC(mba0int):
288	PUSHR; incl _intrcnt+I_MBA0; pushl $0
2891:	calls $1,_mbintr
290	POPR
291	incl	_cnt+V_INTR
292	rei
293#endif
294
295#ifdef DW780
296/*
297 * Registers for the uba handling code
298 */
299#define	rUBANUM	r0
300#define	rUBAHD	r1
301#define	rUVEC	r3
302#define	rUBA	r4
303/* r2,r5 are scratch */
304
305#define	I_UBA	I_UBA0		/* base of UBA interrupt counters */
306
307#if NUBA > 4
308SCBVEC(ua7int):
309	PUSHR; movl $7,rUBANUM; moval _uba_hd+(7*UH_SIZE),rUBAHD; brb 1f
310SCBVEC(ua6int):
311	PUSHR; movl $6,rUBANUM; moval _uba_hd+(6*UH_SIZE),rUBAHD; brb 1f
312SCBVEC(ua5int):
313	PUSHR; movl $5,rUBANUM; moval _uba_hd+(5*UH_SIZE),rUBAHD; brb 1f
314SCBVEC(ua4int):
315	PUSHR; movl $4,rUBANUM; moval _uba_hd+(4*UH_SIZE),rUBAHD; brb 1f
316#endif
317SCBVEC(ua3int):
318	PUSHR; movl $3,rUBANUM; moval _uba_hd+(3*UH_SIZE),rUBAHD; brb 1f
319SCBVEC(ua2int):
320	PUSHR; movl $2,rUBANUM; moval _uba_hd+(2*UH_SIZE),rUBAHD; brb 1f
321SCBVEC(ua1int):
322	PUSHR; movl $1,rUBANUM; moval _uba_hd+(1*UH_SIZE),rUBAHD; brb 1f
323SCBVEC(ua0int):
324	PUSHR; movl $0,rUBANUM; moval _uba_hd+(0*UH_SIZE),rUBAHD;
3251:
326	mfpr	$IPL,r2				/* r2 = mfpr(IPL); */
327	movl	UH_UBA(rUBAHD),rUBA		/* uba = uhp->uh_uba; */
328	movl	UBA_BRRVR-0x14*4(rUBA)[r2],rUVEC
329					/* uvec = uba->uba_brrvr[r2-0x14] */
330ubanorm:
331	bleq	ubaerror
332	addl2	UH_VEC(rUBAHD),rUVEC		/* uvec += uh->uh_vec */
333	bicl3	$3,(rUVEC),r1
334	jmp	2(r1)				/* 2 skips ``pushr $0x3f'' */
335ubaerror:
336	PUSHR; calls $0,_ubaerror; POPR		/* ubaerror r/w's r0-r5 */
337	tstl rUVEC; jneq ubanorm		/* rUVEC contains result */
338	incl _intrcnt+I_UBA[rUBANUM]
339	incl	_cnt+V_INTR
340	POPR
341	rei
342#endif
343SCBVEC(cnrint):
344	PUSHR; calls $0,_cnrint; POPR
345	incl _cnt+V_INTR
346	incl _intrcnt+I_CNR
347	rei
348SCBVEC(cnxint):
349	PUSHR; calls $0,_cnxint; POPR
350	incl _cnt+V_INTR
351	incl _intrcnt+I_CNX
352	rei
353SCBVEC(hardclock):
354	PUSHR
355	mtpr $ICCS_RUN|ICCS_IE|ICCS_INT|ICCS_ERR,$ICCS
356#if NPS > 0
357	pushl	4+6*4(sp); pushl 4+6*4(sp);
358	calls	$2,_psextsync
359#endif
360	pushl 4+6*4(sp); pushl 4+6*4(sp);
361	calls $2,_hardclock			# hardclock(pc,psl)
362	POPR;
363	incl	_cnt+V_INTR
364	incl	_intrcnt+I_CLOCK
365	rei
366SCBVEC(softclock):
367	PUSHR
368	pushl	4+6*4(sp); pushl 4+6*4(sp);
369	calls	$2,_softclock			# softclock(pc,psl)
370	POPR;
371	incl	_cnt+V_SOFT
372	rei
373
374#include "net/netisr.h"
375	.globl	_netisr
376SCBVEC(netintr):
377	PUSHR
378#if NIMP > 0
379	bbcc	$NETISR_IMP,_netisr,1f; calls $0,_impintr; 1:
380#endif
381#ifdef INET
382	bbcc	$NETISR_IP,_netisr,1f; calls $0,_ipintr; 1:
383#endif
384#ifdef NS
385	bbcc	$NETISR_NS,_netisr,1f; calls $0,_nsintr; 1:
386#endif
387#ifdef ISO
388	bbcc	$NETISR_ISO,_netisr,1f; calls $0,_clnlintr; 1:
389#endif
390#ifdef CCITT
391	bbcc	$NETISR_CCITT,_netisr,1f; calls $0,_hdintr; 1:
392#endif
393	POPR
394	incl	_cnt+V_SOFT
395	rei
396
397SCBVEC(consdin):
398	PUSHR;
399	incl	_intrcnt+I_TUR
400	casel	_cpu,$VAX_750,$VAX_8200
4010:
402	.word	5f-0b		# 2 is VAX_750
403	.word	3f-0b		# 3 is VAX_730
404	.word	6f-0b		# 4 is VAX_8600
405	.word	7f-0b		# 5 is VAX_8200
406	halt
4075:
408#if defined(VAX750) && !defined(MRSP)
409	jsb	tudma
410#endif
4113:
412#if defined(VAX750) || defined(VAX730)
413	calls	$0,_turintr
414	brb	2f
415#else
416	halt
417#endif
4187:
419#if VAX8200
420	calls	$0,_rx50intr
421	brb	2f
422#else
423	halt
424#endif
4256:
426#if VAX8600
427	calls	$0,_crlintr
428#else
429	halt
430#endif
4312:
432	POPR
433	incl	_cnt+V_INTR
434	rei
435
436#if defined(VAX750) || defined(VAX730)
437SCBVEC(consdout):
438	PUSHR; calls $0,_tuxintr; POPR
439	incl _cnt+V_INTR
440	incl _intrcnt+I_TUX
441	rei
442#else
443SCBVEC(consdout):
444	halt
445#endif
446
447#if NDZ > 0
448/*
449 * DZ pseudo dma routine:
450 *	r0 - controller number
451 */
452	.align	1
453	.globl	dzdma
454dzdma:
455	mull2	$8*20,r0
456	movab	_dzpdma(r0),r3		# pdma structure base
457					# for this controller
458dzploop:
459	movl	r3,r0
460	movl	(r0)+,r1		# device register address
461	movzbl	1(r1),r2		# get line number
462	bitb	$0x80,r2		# TRDY on?
463	beql	dzprei			# no
464	bicb2	$0xf8,r2		# clear garbage bits
465	mull2	$20,r2
466	addl2	r2,r0			# point at line's pdma structure
467	movl	(r0)+,r2		# p_mem
468	cmpl	r2,(r0)+		# p_mem < p_end ?
469	bgequ	dzpcall			# no, go call dzxint
470	movb	(r2)+,6(r1)		# dztbuf = *p_mem++
471	movl	r2,-8(r0)
472	brb 	dzploop			# check for another line
473dzprei:
474	POPR
475	incl	_cnt+V_PDMA
476	rei
477
478dzpcall:
479	pushl	r3
480	pushl	(r0)+			# push tty address
481	calls	$1,*(r0)		# call interrupt rtn
482	movl	(sp)+,r3
483	brb 	dzploop			# check for another line
484#endif
485
486#if NDP > 0
487/*
488 * DPV-11 pseudo dma routine:
489 *	r0 - controller number
490 */
491	.align	1
492	.globl	dprdma
493	.globl	dpxdma
494dprdma:
495	mull3	$2*20,r0,r3
496	movab	_dppdma+20(r3),r3	# pdma structure base
497	movl	(r3),r1			# device register address
498	movw	(r1),r2			# get dprcsr
499	bitw	$0x400,r2		# Attention on?
500	bneq	dprcall			# yes
501	bitw	$0x80,r2		# Data Ready?
502	beql	dprcall			# no
503	movl	4(r3),r4
504	cmpl	r4,8(r3)		# p_mem < p_end ?
505	bgequ	dprcall			# no, go call dprint
506	movb	2(r1),(r4)+		# *p_mem++ = dptbuf
507	movl	r4,4(r3)		# put back adjusted count
508					# Since we've been interrupted
509	#bitw	$0x4,4(r1)		# check if we can send
510	#beql	dpprei			# no, return
511	#subl2	$20,r3			# point to send pdma
512	#movl	4(r3),r4		# check if
513	#cmpl	r4,8(r3)		# p_mem < p_end ?
514	#bgequ	dpxcall			# no, go call dpxint
515	#tstw	6(r1)			# get dptdsr, sender starved ?
516	#blss	dpxcall			# yes, go call dpxint
517	#movb	(r4)+,6(r1)		# dptbuf = *p_mem++
518	#movl	r4,4(r3)		# put back adjusted count
519dpprei:
520	POPR
521	incl	_cnt+V_PDMA
522	rei
523dprcall:
524	movw	r2,12(r3)
525dpxcall:
526	pushl	r1			# push csr address
527	pushl	r3			# push pdma address
528	pushl	r0			# push unit number
529	calls	$3,*16(r3)		# call interrupt rtn
530	brb	dpprei
531	.globl	dpxdma
532dpxdma:
533	mull3	$2*20,r0,r3
534	movab	_dppdma(r3),r3		# pdma structure base
535	movl	(r3),r1			# device register address
536dpxcheck:
537	movl	4(r3),r4
538	cmpl	r4,8(r3)		# p_mem < p_end ?
539	bgequ	dpxcall			# no, go call dpxint
540	bitw	$0x4,4(r1)		# ok to send
541	beql	dpxcall			# no, go call dpxint
542	tstw	6(r1)			# get dptdsr, sender starved ?
543	blss	dpxcall			# yes, go call dpxint
544	movzbw	(r4)+,6(r1)		# dptbuf = *p_mem++, turn off XSM.
545	movl	r4,4(r3)		# put back adjusted count
546	incl	12(r3)		# positive indication we did everything
547	#addl2	$20,r3			# check if input ready
548	#movw	(r1),r2			# get dprcsr
549	#bitw	$0x400,r2		# Attention on?
550	#bneq	dprcall			# yes
551	#bitw	$0x80,r2		# Data Ready?
552	#beql	dpprei			# no, just return
553	#movl	4(r3),r4
554	#cmpl	r4,8(r3)		# p_mem < p_end ?
555	#bgequ	dprcall			# no, go call dprint
556	#movb	2(r1),(r4)+		# dptbuf = *p_mem++
557	#movl	r4,4(r3)		# put back adjusted count
558	brb	dpprei
559#endif
560
561#if NUU > 0 && defined(UUDMA)
562/*
563 * Pseudo DMA routine for tu58 (on DL11)
564 *	r0 - controller number
565 */
566	.align	1
567	.globl	uudma
568uudma:
569	movl	_uudinfo[r0],r2
570	movl	16(r2),r2		# r2 = uuaddr
571	mull3	$48,r0,r3
572	movab	_uu_softc(r3),r5	# r5 = uuc
573
574	cvtwl	2(r2),r1		# c = uuaddr->rdb
575	bbc	$15,r1,1f		# if (c & UUDB_ERROR)
576	movl	$13,16(r5)		#	uuc->tu_state = TUC_RCVERR;
577	rsb				#	let uurintr handle it
5781:
579	tstl	4(r5)			# if (uuc->tu_rcnt) {
580	beql	1f
581	movb	r1,*0(r5)		#	*uuc->tu_rbptr++ = r1
582	incl	(r5)
583	decl	4(r5)			#	if (--uuc->tu_rcnt)
584	beql	2f			#		done
585	tstl	(sp)+
586	POPR				# 	registers saved in ubglue.s
587	rei				# }
5882:
589	cmpl	16(r5),$8		# if (uuc->tu_state != TUS_GETH)
590	beql	2f			# 	let uurintr handle it
5911:
592	rsb
5932:
594	mull2	$14,r0			# sizeof(uudata[ctlr]) = 14
595	movab	_uudata(r0),r4		# data = &uudata[ctlr];
596	cmpb	$1,(r4)			# if (data->pk_flag != TUF_DATA)
597	bneq	1b
598#ifdef notdef
599	/* this is for command packets */
600	beql	1f			# 	r0 = uuc->tu_rbptr
601	movl	(r5),r0
602	brb	2f
6031:					# else
604#endif
605	movl	24(r5),r0		# 	r0 = uuc->tu_addr
6062:
607	movzbl	1(r4),r3		# counter to r3 (data->pk_count)
608	movzwl	(r4),r1			# first word of checksum (=header)
609	mfpr	$IPL,-(sp)		# s = spl5();
610	mtpr	$0x15,$IPL		# to keep disk interrupts out
611	clrw	(r2)			# disable receiver interrupts
6123:	bbc	$7,(r2),3b		# while ((uuaddr->rcs & UUCS_READY)==0);
613	cvtwb	2(r2),(r0)+		# *buffer = uuaddr->rdb & 0xff
614	sobgtr	r3,1f			# continue with next byte ...
615	addw2	2(r2),r1		# unless this was the last (odd count)
616	brb	2f
617
6181:	bbc	$7,(r2),1b		# while ((uuaddr->rcs & UUCS_READY)==0);
619	cvtwb	2(r2),(r0)+		# *buffer = uuaddr->rdb & 0xff
620	addw2	-2(r0),r1		# add to checksum..
6212:
622	adwc	$0,r1			# get the carry
623	sobgtr	r3,3b			# loop while r3 > 0
624/*
625 * We're ready to get the checksum
626 */
6271:	bbc	$7,(r2),1b		# while ((uuaddr->rcs & UUCS_READY)==0);
628	cvtwb	2(r2),12(r4)		# get first (lower) byte
6291:	bbc	$7,(r2),1b
630	cvtwb	2(r2),13(r4)		# ..and second
631	cmpw	12(r4),r1		# is checksum ok?
632	beql	1f
633	movl	$14,16(r5)		# uuc->tu_state = TUS_CHKERR
634	brb	2f			# exit
6351:
636	movl	$11,16(r5)		# uuc->tu_state = TUS_GET (ok)
6372:
638	movw	$0x40,(r2)		# enable receiver interrupts
639	mtpr	(sp)+,$IPL		# splx(s);
640	rsb				# continue processing in uurintr
641#endif
642
643#if defined(VAX750) && !defined(MRSP)
644/*
645 * Pseudo DMA routine for VAX-11/750 console tu58
646 *   	    (without MRSP)
647 */
648	.align	1
649	.globl	tudma
650tudma:
651	movab	_tu,r5			# r5 = tu
652	tstl	4(r5)			# if (tu.tu_rcnt) {
653	beql	3f
654	mfpr	$CSRD,r1		# get data from tu58
655	movb	r1,*0(r5)		#	*tu.tu_rbptr++ = r1
656	incl	(r5)
657	decl	4(r5)			#	if (--tu.tu_rcnt)
658	beql	1f			#		done
659	tstl	(sp)+
660	POPR				# 	registers saved in ubglue.s
661	rei				# 	data handled, done
6621:					# }
663	cmpl	16(r5),$8		# if (tu.tu_state != TUS_GETH)
664	beql	2f			# 	let turintr handle it
6653:
666	rsb
6672:
668	movab	_tudata,r4		# r4 = tudata
669	cmpb	$1,(r4)			# if (tudata.pk_flag != TUF_DATA)
670	bneq	3b			# 	let turintr handle it
6711:					# else
672	movl	24(r5),r1		# get buffer pointer to r1
673	movzbl	1(r4),r3		# counter to r3
674	movzwl	(r4),r0			# first word of checksum (=header)
675	mtpr	$0,$CSRS		# disable receiver interrupts
6763:
677	bsbw	5f			# wait for next byte
678	mfpr	$CSRD,r5
679	movb	r5,(r1)+		# *buffer = rdb
680	sobgtr	r3,1f			# continue with next byte ...
681	mfpr	$CSRD,r2		# unless this was the last (odd count)
682	brb	2f
683
6841:	bsbw	5f			# wait for next byte
685	mfpr	$CSRD,r5
686	movb	r5,(r1)+		# *buffer = rdb
687	movzwl	-2(r1),r2		# get the last word back from memory
6882:
689	addw2	r2,r0			# add to checksum..
690	adwc	$0,r0			# get the carry
691	sobgtr	r3,3b			# loop while r3 > 0
692/*
693 * We're ready to get the checksum.
694 */
695	bsbw	5f
696	movab	_tudata,r4
697	mfpr	$CSRD,r5
698	movb	r5,12(r4)		# get first (lower) byte
699	bsbw	5f
700	mfpr	$CSRD,r5
701	movb	r5,13(r4)		# ..and second
702	movab	_tu,r5
703	cmpw	12(r4),r0		# is checksum ok?
704	beql	1f
705	movl	$14,16(r5)		# tu.tu_state = TUS_CHKERR
706	brb	2f			# exit
7071:
708	movl	$11,16(r5)		# tu.tu_state = TUS_GET
7092:
710	mtpr	$0x40,$CSRS		# enable receiver interrupts
711	rsb				# continue processing in turintr
712/*
713 * Loop until a new byte is ready from
714 * the tu58, make sure we don't loop forever
715 */
7165:
717	movl	$5000,r5		# loop max 5000 times
7181:
719	mfpr	$CSRS,r2
720	bbs	$7,r2,1f
721	sobgtr	r5,1b
722	movab	_tu,r5
723	movl	$13,16(r5)		# return TUS_RCVERR
724	tstl	(sp)+			# and let turintr handle it
7251:
726	rsb
727#endif
728
729/*
730 * BI passive release things.
731 */
732SCBVEC(passiverel):
733	rei				# well that was useless
734
735/*
736 * Stray UNIBUS interrupt catch routines
737 */
738	.data
739	.align	2
740#define	PJ	PUSHR;jsb _Xustray
741	.globl	_catcher
742_catcher:
743	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
744	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
745	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
746	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
747	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
748	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
749	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
750	PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ;PJ
751
752	.globl	_cold
753	.globl	_br
754	.globl	_cvec
755_cold:	.long	1
756_br:	.long	0
757_cvec:	.long	0
758
759	.text
760SCBVEC(ustray):
761	blbc	_cold,1f
762	mfpr	$IPL,r11
763	movl	r11,_br
764	subl3	$_catcher+8,(sp)+,r10
765	ashl	$-1,r10,r10
766	movl	r10,_cvec
767	POPR
768	rei
7691:
770	subl3	$_catcher+8,(sp)+,r0
771	ashl	$-1,r0,-(sp)
772	mfpr	$IPL,-(sp)
773	PRINTF(2, "uba?: stray intr ipl %x vec %o\n")
774	POPR
775	rei
776
777#if VAX630 || VAX650
778/*
779 * Emulation OpCode jump table:
780 *	ONLY GOES FROM 0xf8 (-8) TO 0x3B (59)
781 */
782#define EMUTABLE	0x43
783#define NOEMULATE	.long noemulate
784#define	EMULATE(a)	.long _EM/**/a
785	.globl	_emJUMPtable
786_emJUMPtable:
787/* f8 */	EMULATE(ashp);	EMULATE(cvtlp);	NOEMULATE;	NOEMULATE
788/* fc */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
789/* 00 */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
790/* 04 */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
791/* 08 */	EMULATE(cvtps);	EMULATE(cvtsp);	NOEMULATE;	EMULATE(crc)
792/* 0c */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
793/* 10 */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
794/* 14 */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
795/* 18 */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
796/* 1c */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
797/* 20 */	EMULATE(addp4);	EMULATE(addp6);	EMULATE(subp4);	EMULATE(subp6)
798/* 24 */	EMULATE(cvtpt);	EMULATE(mulp);	EMULATE(cvttp);	EMULATE(divp)
799/* 28 */	NOEMULATE;	EMULATE(cmpc3);	EMULATE(scanc);	EMULATE(spanc)
800/* 2c */	NOEMULATE;	EMULATE(cmpc5);	EMULATE(movtc);	EMULATE(movtuc)
801/* 30 */	NOEMULATE;	NOEMULATE;	NOEMULATE;	NOEMULATE
802/* 34 */	EMULATE(movp);	EMULATE(cmpp3);	EMULATE(cvtpl);	EMULATE(cmpp4)
803/* 38 */	EMULATE(editpc); EMULATE(matchc); EMULATE(locc); EMULATE(skpc)
804#endif
805
806/*
807 * Trap and fault vector routines
808 */
809#define	TRAP(a)	pushl $T_/**/a; jbr alltraps
810
811/*
812 * Ast delivery (profiling and/or reschedule)
813 */
814SCBVEC(astflt):
815	pushl $0; TRAP(ASTFLT)
816SCBVEC(privinflt):
817	pushl $0; TRAP(PRIVINFLT)
818SCBVEC(xfcflt):
819	pushl $0; TRAP(XFCFLT)
820SCBVEC(resopflt):
821	pushl $0; TRAP(RESOPFLT)
822SCBVEC(resadflt):
823	pushl $0; TRAP(RESADFLT)
824SCBVEC(bptflt):
825	pushl $0; TRAP(BPTFLT)
826SCBVEC(compatflt):
827	TRAP(COMPATFLT);
828SCBVEC(kdbintr):
829	pushl $0; TRAP(KDBTRAP)
830SCBVEC(tracep):
831	pushl $0; TRAP(TRCTRAP)
832SCBVEC(arithtrap):
833	TRAP(ARITHTRAP)
834SCBVEC(protflt):
835	blbs	(sp)+,segflt
836	TRAP(PROTFLT)
837segflt:
838	TRAP(SEGFLT)
839
840/*
841 * The following is called with the stack set up as follows:
842 *
843 *	  (sp):	Opcode
844 *	 4(sp):	Instruction PC
845 *	 8(sp):	Operand 1
846 *	12(sp):	Operand 2
847 *	16(sp):	Operand 3
848 *	20(sp):	Operand 4
849 *	24(sp):	Operand 5
850 *	28(sp):	Operand 6
851 *	32(sp):	Operand 7 (unused)
852 *	36(sp):	Operand 8 (unused)
853 *	40(sp):	Return PC
854 *	44(sp):	Return PSL
855 *	48(sp): TOS before instruction
856 *
857 * Each individual routine is called with the stack set up as follows:
858 *
859 *	  (sp):	Return address of trap handler
860 *	 4(sp):	Opcode (will get return PSL)
861 *	 8(sp):	Instruction PC
862 *	12(sp):	Operand 1
863 *	16(sp):	Operand 2
864 *	20(sp):	Operand 3
865 *	24(sp):	Operand 4
866 *	28(sp):	Operand 5
867 *	32(sp):	Operand 6
868 *	36(sp):	saved register 11
869 *	40(sp):	saved register 10
870 *	44(sp):	Return PC
871 *	48(sp):	Return PSL
872 *	52(sp): TOS before instruction
873 */
874
875SCBVEC(emulate):
876#if VAX630 || VAX650
877	movl	r11,32(sp)		# save register r11 in unused operand
878	movl	r10,36(sp)		# save register r10 in unused operand
879	cvtbl	(sp),r10		# get opcode
880	addl2	$8,r10			# shift negative opcodes
881	subl3	r10,$EMUTABLE,r11	# forget it if opcode is out of range
882	bcs	noemulate
883	movl	_emJUMPtable[r10],r10	# call appropriate emulation routine
884	jsb	(r10)		# routines put return values into regs 0-5
885	movl	32(sp),r11		# restore register r11
886	movl	36(sp),r10		# restore register r10
887	insv	(sp),$0,$4,44(sp)	# and condition codes in Opcode spot
888	addl2	$40,sp			# adjust stack for return
889	rei
890noemulate:
891	addl2	$48,sp			# adjust stack for
892#endif
893	.word	0xffff			# "reserved instruction fault"
894SCBVEC(emulateFPD):
895	.word	0xffff			# "reserved instruction fault"
896SCBVEC(transflt):
897	bitl	$2,(sp)+
898	bnequ	tableflt
899 	jsb	Fastreclaim		# try and avoid pagein
900	TRAP(PAGEFLT)
901tableflt:
902	TRAP(TABLEFLT)
903
904alltraps:
905	mfpr	$USP,-(sp); calls $0,_trap; mtpr (sp)+,$USP
906	incl	_cnt+V_TRAP
907	addl2	$8,sp			# pop type, code
908	mtpr	$HIGH,$IPL		## dont go to a higher IPL (GROT)
909	rei
910
911SCBVEC(syscall):
912	pushl	$T_SYSCALL
913	mfpr	$USP,-(sp); calls $0,_syscall; mtpr (sp)+,$USP
914	incl	_cnt+V_SYSCALL
915	addl2	$8,sp			# pop type, code
916	mtpr	$HIGH,$IPL		## dont go to a higher IPL (GROT)
917	rei
918
919/*
920 * System page table
921 * Mbmap and Usrptmap are enlarged by CLSIZE entries
922 * as they are managed by resource maps starting with index 1 or CLSIZE.
923 */
924#define	vaddr(x)	((((x)-_Sysmap)/4)*NBPG+0x80000000)
925#define	SYSMAP(mname, vname, npte)			\
926_/**/mname:	.globl	_/**/mname;		\
927	.space	(npte)*4;				\
928	.globl	_/**/vname;			\
929	.set	_/**/vname,vaddr(_/**/mname)
930#define	ADDMAP(npte)	.space	(npte)*4
931
932	.data
933	.align	2
934	SYSMAP(Sysmap	,Sysbase	,SYSPTSIZE	)
935	SYSMAP(Forkmap	,forkutl	,UPAGES		)
936	SYSMAP(Xswapmap	,xswaputl	,UPAGES		)
937	SYSMAP(Xswap2map,xswap2utl	,UPAGES		)
938	SYSMAP(Swapmap	,swaputl	,UPAGES		)
939	SYSMAP(Pushmap	,pushutl	,UPAGES		)
940	SYSMAP(Vfmap	,vfutl		,UPAGES		)
941	SYSMAP(CMAP1	,CADDR1		,1		)
942	SYSMAP(CMAP2	,CADDR2		,1		)
943	SYSMAP(mmap	,vmmap		,1		)
944	SYSMAP(alignmap	,alignutl	,1		)	/* XXX */
945	SYSMAP(msgbufmap,msgbuf		,MSGBUFPTECNT	)
946	SYSMAP(Mbmap	,mbutl		,NMBCLUSTERS*MCLBYTES/NBPG+CLSIZE )
947#ifdef MFS
948#include "ufs/mfsiom.h"
949	/*
950	 * Used by the mfs_doio() routine for physical I/O
951	 */
952	SYSMAP(Mfsiomap	,mfsiobuf	,MFS_MAPREG )
953#endif /* MFS */
954#ifdef NFS
955#include "nfs/nfsiom.h"
956	/*
957	 * Used by the nfs_doio() routine for physical I/O
958	 */
959	SYSMAP(Nfsiomap	,nfsiobuf	,NFS_MAPREG )
960#endif /* NFS */
961	/*
962	 * This is the map used by the kernel memory allocator.
963	 * It is expanded as necessary by the special features
964	 * that use it.
965	 *
966	 * XXX: NEED way to compute kmem size from maxusers,
967	 * device complement
968	 */
969	SYSMAP(kmempt	,kmembase	,NKMEMCLUSTERS*CLSIZE )
970#ifdef	SYSVSHM
971				ADDMAP(	SHMMAXPGS	)
972#endif
973#ifdef	GPROF
974				ADDMAP( 600*CLSIZE	)
975#endif
976	SYSMAP(ekmempt	,kmemlimit	,0		)
977
978	SYSMAP(UMBAbeg	,umbabeg	,0		)
979	SYSMAP(Nexmap	,nexus		,16*MAXNNEXUS	)
980#ifdef QBA
981#if (QBAPAGES+UBAIOPAGES) > (UBAPAGES+UBAIOPAGES)*NUBA
982	SYSMAP(UMEMmap	,umem		,(QBAPAGES+UBAIOPAGES) )
983#else
984	SYSMAP(UMEMmap	,umem		,(UBAPAGES+UBAIOPAGES)*NUBA )
985#endif
986#else /* QBA */
987	SYSMAP(UMEMmap	,umem		,(UBAPAGES+UBAIOPAGES)*NUBA )
988#endif /* QBA */
989#if VAX8600
990	SYSMAP(Ioamap	,ioa		,MAXNIOA*IOAMAPSIZ/NBPG	)
991#endif
992#if VAX8200 || VAX630
993	SYSMAP(Clockmap	,ka630clock	,1		)
994#endif
995#if VAX8200
996	/* alas, the clocks on the 8200 and 630 are not quite identical */
997	/* they could be shared for now, but this seemed cleaner */
998	.globl _ka820clock; .set _ka820clock,_ka630clock
999	SYSMAP(Ka820map	,ka820port	,1		)
1000	SYSMAP(RX50map	,rx50device	,1		)
1001#ifdef notyet
1002	SYSMAP(BRAMmap	,ka820bootram	,KA820_BRPAGES	)
1003	SYSMAP(EEPROMmap,ka820eeprom	,KA820_EEPAGES	)
1004#endif
1005#endif
1006#if VAX630
1007	SYSMAP(Ka630map	,ka630cpu	,1		)
1008#endif
1009#if VAX650
1010 	SYSMAP(KA650MERRmap	,ka650merr	,1		)
1011 	SYSMAP(KA650CBDmap	,ka650cbd	,1		)
1012 	SYSMAP(KA650SSCmap	,ka650ssc	,3		)
1013 	SYSMAP(KA650IPCRmap	,ka650ipcr	,1		)
1014 	SYSMAP(KA650CACHEmap	,ka650cache	,KA650_CACHESIZE/NBPG )
1015#endif
1016#ifdef QBA
1017	/*
1018	 * qvss and qdss don't coexist - one map will suffice
1019	 * for either. qvss is 256K each and qdss is 64K each.
1020	 */
1021#include "qv.h"
1022#include "qd.h"
1023#if NQV > 0 || NQD > 0
1024	SYSMAP(QVmap	,qvmem		,((512*NQV)+(128*NQD)))
1025#endif
1026#endif
1027	SYSMAP(UMBAend	,umbaend	,0		)
1028
1029	SYSMAP(Usrptmap	,usrpt		,USRPTSIZE+CLSIZE )
1030
1031eSysmap:
1032	.globl	_Syssize
1033	.set	_Syssize,(eSysmap-_Sysmap)/4
1034	.text
1035
1036/*
1037 * Initialization
1038 *
1039 * ipl 0x1f; mapen 0; scbb, pcbb, sbr, slr, isp, ksp not set
1040 */
1041	.data
1042	.globl	_cpu
1043_cpu:	.long	0
1044	.text
1045	.globl	start
1046start:
1047	.word	0
1048	mtpr	$0,$ICCS
1049/* set system control block base and system page table params */
1050	mtpr	$_scb-0x80000000,$SCBB
1051	mtpr	$_Sysmap-0x80000000,$SBR
1052	mtpr	$_Syssize,$SLR
1053/* double map the kernel into the virtual user addresses of phys mem */
1054	mtpr	$_Sysmap,$P0BR
1055	mtpr	$_Syssize,$P0LR
1056/* set ISP and get cpu type */
1057	movl	$_intstack+NISP*NBPG,sp
1058	mfpr	$SID,r0
1059	movab	_cpu,r1
1060	extzv	$24,$8,r0,(r1)
1061/* init RPB */
1062	movab	_rpb,r0
1063	movl	r0,(r0)+			# rp_selfref
1064	movab	_doadump,r1
1065	movl	r1,(r0)+			# rp_dumprout
1066	movl	$0x1f,r2
1067	clrl	r3
10681:	addl2	(r1)+,r3; sobgtr r2,1b
1069	movl	r3,(r0)+			# rp_chksum
1070/* count up memory; _physmem contains limit */
1071	clrl	r7
1072	ashl	$PGSHIFT,_physmem,r8
1073	decl	r8
10741:	pushl	$4; pushl r7; calls $2,_badaddr; tstl r0; bneq 9f
1075	acbl	r8,$64*1024,r7,1b
10769:
1077#if  VAX630 || VAX650
1078/* reserve area at top of memory for processor specific use */
1079	cmpb	_cpu,$VAX_630
1080	beql	1f
1081	cmpb	_cpu,$VAX_650
1082	bneq	2f
1083	subl2	$32768,r7	# space for Qbus map registers
1084	brb	2f
10851:
1086	subl2   $4096,r7	# space for console scratchpad
10872:
1088#endif
1089/* clear memory from kernel bss and pages for proc 0 u. and page table */
1090	movab	_edata,r6; bicl2 $SYSTEM,r6
1091	movab	_end,r5; bicl2 $SYSTEM,r5
1092#ifdef KADB
1093	subl2	$4,r5
10941:	clrl	(r6); acbl r5,$4,r6,1b		# clear just bss
1095	addl2	$4,r5
1096	bbc	$6,r11,0f			# check RB_KDB
1097	bicl3	$SYSTEM,r9,r5			# skip symbol & string tables
1098	bicl3	$SYSTEM,r9,r6			# r9 obtained from boot
1099#endif
11000:	bisl3	$SYSTEM,r5,r9			# convert to virtual address
1101	addl2	$NBPG-1,r9			# roundup to next page
1102	addl2	$(UPAGES*NBPG)+NBPG+NBPG,r5
11031:	clrq	(r6); acbl r5,$8,r6,1b
1104/* trap() and syscall() save r0-r11 in the entry mask (per ../h/reg.h) */
1105/* panic() is convenient place to save all for debugging */
1106	bisw2	$0x0fff,_trap
1107	bisw2	$0x0fff,_syscall
1108	bisw2	$0x0fff,_panic
1109	calls	$0,_fixctlrmask
1110/* initialize system page table: uba vectors and int stack writeable */
1111	clrl	r2
1112	movab	eintstack,r1; bbcc $31,r1,0f; 0: ashl $-PGSHIFT,r1,r1
11131:	bisl3	$PG_V|PG_KW,r2,_Sysmap[r2]; aoblss r1,r2,1b
1114/*
1115 * make rpb read-only as red zone for interrupt stack
1116 * (scb(s) and UNIvec are write-protected later)
1117 */
1118	bicl2	$PG_PROT,_rpbmap
1119	bisl2	$PG_KR,_rpbmap
1120/* make kernel text space read-only */
1121	movab	_etext+NBPG-1,r1; bbcc $31,r1,0f; 0: ashl $-PGSHIFT,r1,r1
11221:	bisl3	$PG_V|PG_URKR,r2,_Sysmap[r2]; aoblss r1,r2,1b
1123/* make kernel data, bss, read-write */
1124	bicl3	$SYSTEM,r9,r1; ashl $-PGSHIFT,r1,r1
11251:	bisl3	$PG_V|PG_KW,r2,_Sysmap[r2]; aoblss r1,r2,1b
1126/* now go to mapped mode */
1127	mtpr	$0,$TBIA; mtpr $1,$MAPEN; jmp *$0f; 0:
1128/* init mem sizes */
1129	ashl	$-PGSHIFT,r7,_physmem
1130/* setup context for proc[0] == Scheduler */
1131	bicl3	$SYSTEM|(NBPG-1),r9,r6	# make phys, page boundary
1132/* setup page table for proc[0] */
1133	ashl	$-PGSHIFT,r6,r3			# r3 = btoc(r6)
1134	bisl3	$PG_V|PG_KW,r3,_Usrptmap	# init first upt entry
1135	incl	r3
1136	movab	_usrpt,r0
1137	mtpr	r0,$TBIS
1138/* init p0br, p0lr */
1139	mtpr	r0,$P0BR
1140	mtpr	$0,$P0LR
1141/* init p1br, p1lr */
1142	movab	NBPG(r0),r0
1143	movl	$0x200000-UPAGES,r1
1144	mtpr	r1,$P1LR
1145	mnegl	r1,r1
1146	moval	-4*UPAGES(r0)[r1],r2
1147	mtpr	r2,$P1BR
1148/* setup mapping for UPAGES of _u */
1149	movl	$UPAGES,r2; movab _u+NBPG*UPAGES,r1; addl2 $UPAGES,r3; jbr 2f
11501:	decl	r3
1151	moval	-NBPG(r1),r1;
1152	bisl3	$PG_V|PG_URKW,r3,-(r0)
1153	mtpr	r1,$TBIS
11542:	sobgeq	r2,1b
1155/* initialize (slightly) the pcb */
1156	movab	UPAGES*NBPG(r1),PCB_KSP(r1)
1157	mnegl	$1,PCB_ESP(r1)
1158	mnegl	$1,PCB_SSP(r1)
1159	movl	r1,PCB_USP(r1)
1160	mfpr	$P0BR,PCB_P0BR(r1)
1161	mfpr	$P0LR,PCB_P0LR(r1)
1162	movb	$4,PCB_P0LR+3(r1)		# disable ast
1163	mfpr	$P1BR,PCB_P1BR(r1)
1164	mfpr	$P1LR,PCB_P1LR(r1)
1165	movl	$CLSIZE,PCB_SZPT(r1)		# init u.u_pcb.pcb_szpt
1166	movl	r9,PCB_R9(r1)
1167	movl	r10,PCB_R10(r1)
1168	movl	r11,PCB_R11(r1)
1169	movab	1f,PCB_PC(r1)			# initial pc
1170	clrl	PCB_PSL(r1)			# mode(k,k), ipl=0
1171	ashl	$PGSHIFT,r3,r3
1172	mtpr	r3,$PCBB			# first pcbb
1173/* set regs, p0br, p0lr, p1br, p1lr, astlvl, ksp and change to kernel mode */
1174	ldpctx
1175	rei
1176/* put signal trampoline code in u. area */
11771:	movab	_u,r0
1178	movc3	$19,sigcode,PCB_SIGC(r0)
1179/* save boot device in global _bootdev */
1180	movl	r10,_bootdev
1181/* save reboot flags in global _boothowto */
1182	movl	r11,_boothowto
1183#ifdef KADB
1184/* save end of symbol & string table in global _bootesym */
1185	subl3	$NBPG-1,r9,_bootesym
1186#endif
1187/* calculate firstaddr, and call main() */
1188	bicl3	$SYSTEM,r9,r0; ashl $-PGSHIFT,r0,-(sp)
1189	addl2	$UPAGES+1,(sp); calls $1,_main
1190/* proc[1] == /etc/init now running here; run icode */
1191	pushl	$PSL_CURMOD|PSL_PRVMOD; pushl $0; rei
1192
1193/* signal trampoline code: it is known that this code takes exactly 19 bytes */
1194/* in ../vax/pcb.h and in the movc3 above */
1195sigcode:
1196	calls	$4,8(pc)	# params pushed by sendsig
1197	movl	sp,ap		# calls frame built by sendsig
1198	chmk	$SYS_sigreturn	# cleanup mask and onsigstack
1199	halt			# sigreturn() does not return!
1200	.word	0x3f		# registers 0-5
1201	callg	(ap),*16(ap)	# call the signal handler
1202	ret			# return to code above
1203
1204	.globl	_icode
1205	.globl	_initflags
1206	.globl	_szicode
1207/*
1208 * Icode is copied out to process 1 to exec /etc/init.
1209 * If the exec fails, process 1 exits.
1210 */
1211_icode:
1212	pushab	b`argv-l0(pc)
1213l0:	pushab	b`init-l1(pc)
1214l1:	pushl	$2
1215	movl	sp,ap
1216	chmk	$SYS_execv
1217	pushl	r0
1218	chmk	$SYS_exit
1219
1220init:	.asciz	"/sbin/init"
1221	.align	2
1222_initflags:
1223	.long	0
1224argv:	.long	init+6-_icode
1225	.long	_initflags-_icode
1226	.long	0
1227_szicode:
1228	.long	_szicode-_icode
1229
1230/*
1231 * Primitives
1232 */
1233
1234#ifdef GPROF
1235#define	ENTRY(name, regs) \
1236	.globl _/**/name; .align 1; _/**/name: .word regs; jsb mcount
1237#define	JSBENTRY(name, regs) \
1238	.globl _/**/name; _/**/name: \
1239	movl fp,-(sp); movab -12(sp),fp; pushr $(regs); jsb mcount; \
1240	popr $(regs); movl (sp)+,fp
1241#else
1242#define	ENTRY(name, regs) \
1243	.globl _/**/name; .align 1; _/**/name: .word regs
1244#define	JSBENTRY(name, regs) \
1245	.globl _/**/name; _/**/name:
1246#endif GPROF
1247#define R0 0x01
1248#define R1 0x02
1249#define R2 0x04
1250#define R3 0x08
1251#define R4 0x10
1252#define R5 0x20
1253#define R6 0x40
1254
1255/*
1256 * badaddr(addr, len)
1257 *	see if access addr with a len type instruction causes a machine check
1258 *	len is length of access (1=byte, 2=short, 4=long)
1259 */
1260	.globl	_badaddr
1261_badaddr:
1262	.word	0
1263	movl	$1,r0
1264	mfpr	$IPL,r1
1265	mtpr	$HIGH,$IPL
1266	movl	4(ap),r3
1267	movl	8(ap),r4
1268	movab	2f,nofault		# jump to 2f on machcheck
1269	bbc	$0,r4,1f; tstb	(r3)
12701:	bbc	$1,r4,1f; tstw	(r3)
12711:	bbc	$2,r4,1f; tstl	(r3)
12721:	clrl	r0			# made it w/o machine checks
12732:	clrl	nofault
1274	mtpr	r1,$IPL
1275	ret
1276
1277/*
1278 * update profiling information for the user
1279 * addupc(pc, &u.u_prof, ticks)
1280 */
1281ENTRY(addupc, 0)
1282	movl	8(ap),r2		# &u.u_prof
1283	subl3	8(r2),4(ap),r0		# corrected pc
1284	blss	9f
1285	extzv	$1,$31,r0,r0		# logical right shift
1286	extzv	$1,$31,12(r2),r1	# ditto for scale
1287	emul	r1,r0,$0,r0
1288	ashq	$-14,r0,r0
1289	tstl	r1
1290	bneq	9f
1291	bicl2	$1,r0
1292	cmpl	r0,4(r2)		# length
1293	bgequ	9f
1294	addl2	(r2),r0			# base
1295	probew	$3,$2,(r0)
1296	beql	8f
1297	addw2	12(ap),(r0)
12989:
1299	ret
13008:
1301	clrl	12(r2)
1302	ret
1303
1304/*
1305 * Copy a null terminated string from the user address space into
1306 * the kernel address space.
1307 *
1308 * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
1309 */
1310ENTRY(copyinstr, R6)
1311	movl	12(ap),r6		# r6 = max length
1312	jlss	8f
1313	movl	4(ap),r1		# r1 = user address
1314	bicl3	$~(NBPG*CLSIZE-1),r1,r2	# r2 = bytes on first page
1315	subl3	r2,$NBPG*CLSIZE,r2
1316	movl	8(ap),r3		# r3 = kernel address
13171:
1318	cmpl	r6,r2			# r2 = min(bytes on page, length left);
1319	jgeq	2f
1320	movl	r6,r2
13212:
1322	prober	$3,r2,(r1)		# bytes accessible?
1323	jeql	8f
1324	subl2	r2,r6			# update bytes left count
1325#ifdef NOSUBSINST
1326	# fake the locc instr. for processors that don't have it
1327	movl	r2,r0
13286:
1329	tstb	(r1)+
1330	jeql	5f
1331	sobgtr	r0,6b
1332	jbr	7f
13335:
1334	decl	r1
1335	jbr	3f
13367:
1337#else
1338	locc	$0,r2,(r1)		# null byte found?
1339	jneq	3f
1340#endif
1341	subl2	r2,r1			# back up pointer updated by `locc'
1342	movc3	r2,(r1),(r3)		# copy in next piece
1343	movl	$(NBPG*CLSIZE),r2	# check next page
1344	tstl	r6			# run out of space?
1345	jneq	1b
1346	movl	$ENAMETOOLONG,r0	# set error code and return
1347	jbr	9f
13483:
1349	tstl	16(ap)			# return length?
1350	beql	4f
1351	subl3	r6,12(ap),r6		# actual len = maxlen - unused pages
1352	subl2	r0,r6			#	- unused on this page
1353	addl3	$1,r6,*16(ap)		#	+ the null byte
13544:
1355	subl2	r0,r2			# r2 = number of bytes to move
1356	subl2	r2,r1			# back up pointer updated by `locc'
1357	incl	r2			# copy null byte as well
1358	movc3	r2,(r1),(r3)		# copy in last piece
1359	clrl	r0			# redundant
1360	ret
13618:
1362	movl	$EFAULT,r0
13639:
1364	tstl	16(ap)
1365	beql	1f
1366	subl3	r6,12(ap),*16(ap)
13671:
1368	ret
1369
1370/*
1371 * Copy a null terminated string from the kernel
1372 * address space to the user address space.
1373 *
1374 * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
1375 */
1376ENTRY(copyoutstr, R6)
1377	movl	12(ap),r6		# r6 = max length
1378	jlss	8b
1379	movl	4(ap),r1		# r1 = kernel address
1380	movl	8(ap),r3		# r3 = user address
1381	bicl3	$~(NBPG*CLSIZE-1),r3,r2	# r2 = bytes on first page
1382	subl3	r2,$NBPG*CLSIZE,r2
13831:
1384	cmpl	r6,r2			# r2 = min(bytes on page, length left);
1385	jgeq	2f
1386	movl	r6,r2
13872:
1388	probew	$3,r2,(r3)		# bytes accessible?
1389	jeql	8b
1390	subl2	r2,r6			# update bytes left count
1391#ifdef NOSUBSINST
1392	# fake the locc instr. for processors that don't have it
1393	movl	r2,r0
13946:
1395	tstb	(r1)+
1396	jeql	5f
1397	sobgtr	r0,6b
1398	jbr	7f
13995:
1400	decl	r1
1401	jbr	3b
14027:
1403#else
1404	locc	$0,r2,(r1)		# null byte found?
1405	jneq	3b
1406#endif
1407	subl2	r2,r1			# back up pointer updated by `locc'
1408	movc3	r2,(r1),(r3)		# copy in next piece
1409	movl	$(NBPG*CLSIZE),r2	# check next page
1410	tstl	r6			# run out of space?
1411	jneq	1b
1412	movl	$ENAMETOOLONG,r0	# set error code and return
1413	jbr	9b
1414
1415/*
1416 * Copy a null terminated string from one point to another in
1417 * the kernel address space.
1418 *
1419 * copystr(fromaddr, toaddr, maxlength, &lencopied)
1420 */
1421ENTRY(copystr, R6)
1422	movl	12(ap),r6		# r6 = max length
1423	jlss	8b
1424	movl	4(ap),r1		# r1 = src address
1425	movl	8(ap),r3		# r3 = dest address
14261:
1427	movzwl	$65535,r2		# r2 = bytes in first chunk
1428	cmpl	r6,r2			# r2 = min(bytes in chunk, length left);
1429	jgeq	2f
1430	movl	r6,r2
14312:
1432	subl2	r2,r6			# update bytes left count
1433#ifdef NOSUBSINST
1434	# fake the locc instr. for processors that don't have it
1435	movl	r2,r0
14366:
1437	tstb	(r1)+
1438	jeql	5f
1439	sobgtr	r0,6b
1440	jbr	7f
14415:
1442	decl	r1
1443	jbr	3b
14447:
1445#else
1446	locc	$0,r2,(r1)		# null byte found?
1447	jneq	3b
1448#endif
1449	subl2	r2,r1			# back up pointer updated by `locc'
1450	movc3	r2,(r1),(r3)		# copy in next piece
1451	tstl	r6			# run out of space?
1452	jneq	1b
1453	movl	$ENAMETOOLONG,r0	# set error code and return
1454	jbr	9b
1455
1456/*
1457 * Copy specified amount of data from user space into the kernel
1458 * Copyin(from, to, len)
1459 *	r1 == from (user source address)
1460 *	r3 == to (kernel destination address)
1461 *	r5 == length
1462 */
1463	.align	1
1464JSBENTRY(Copyin, R1|R3|R5)
1465	cmpl	r5,$(NBPG*CLSIZE)	# probing one page or less ?
1466	bgtru	1f			# no
1467	prober	$3,r5,(r1)		# bytes accessible ?
1468	beql	ersb			# no
1469	movc3	r5,(r1),(r3)
1470/*	clrl	r0			# redundant */
1471	rsb
14721:
1473	blss	ersb			# negative length?
1474	pushl	r6			# r6 = length
1475	movl	r5,r6
1476	bicl3	$~(NBPG*CLSIZE-1),r1,r0	# r0 = bytes on first page
1477	subl3	r0,$(NBPG*CLSIZE),r0
1478	addl2	$(NBPG*CLSIZE),r0	# plus one additional full page
1479	jbr	2f
1480
1481ciloop:
1482	movc3	r0,(r1),(r3)
1483	movl	$(2*NBPG*CLSIZE),r0	# next amount to move
14842:
1485	cmpl	r0,r6
1486	bleq	3f
1487	movl	r6,r0
14883:
1489	prober	$3,r0,(r1)		# bytes accessible ?
1490	beql	ersb1			# no
1491	subl2	r0,r6			# last move?
1492	bneq	ciloop			# no
1493
1494	movc3	r0,(r1),(r3)
1495/*	clrl	r0			# redundant */
1496	movl	(sp)+,r6		# restore r6
1497	rsb
1498
1499ersb1:
1500	movl	(sp)+,r6		# restore r6
1501ersb:
1502	movl	$EFAULT,r0
1503	rsb
1504
1505/*
1506 * Copy specified amount of data from kernel to the user space
1507 * Copyout(from, to, len)
1508 *	r1 == from (kernel source address)
1509 *	r3 == to (user destination address)
1510 *	r5 == length
1511 */
1512	.align	1
1513JSBENTRY(Copyout, R1|R3|R5)
1514	cmpl	r5,$(NBPG*CLSIZE)	# moving one page or less ?
1515	bgtru	1f			# no
1516	probew	$3,r5,(r3)		# bytes writeable?
1517	beql	ersb			# no
1518	movc3	r5,(r1),(r3)
1519/*	clrl	r0			# redundant */
1520	rsb
15211:
1522	blss	ersb			# negative length?
1523	pushl	r6			# r6 = length
1524	movl	r5,r6
1525	bicl3	$~(NBPG*CLSIZE-1),r3,r0	# r0 = bytes on first page
1526	subl3	r0,$(NBPG*CLSIZE),r0
1527	addl2	$(NBPG*CLSIZE),r0	# plus one additional full page
1528	jbr	2f
1529
1530coloop:
1531	movc3	r0,(r1),(r3)
1532	movl	$(2*NBPG*CLSIZE),r0	# next amount to move
15332:
1534	cmpl	r0,r6
1535	bleq	3f
1536	movl	r6,r0
15373:
1538	probew	$3,r0,(r3)		# bytes writeable?
1539	beql	ersb1			# no
1540	subl2	r0,r6			# last move?
1541	bneq	coloop			# no
1542
1543	movc3	r0,(r1),(r3)
1544/*	clrl	r0			# redundant */
1545	movl	(sp)+,r6		# restore r6
1546	rsb
1547
1548/*
1549 * savectx is like setjmp but saves all registers.
1550 * Called before swapping out the u. area, restored by resume()
1551 * below.
1552 */
1553#define PCLOC 16	/* location of pc in calls frame */
1554#define APLOC 8		/* location of ap,fp in calls frame */
1555
1556ENTRY(savectx, 0)
1557	movl	4(ap),r0
1558	movq	r6,(r0)+
1559	movq	r8,(r0)+
1560	movq	r10,(r0)+
1561	movq	APLOC(fp),(r0)+	# save ap, fp
1562	addl3	$8,ap,(r0)+	# save sp
1563	movl	PCLOC(fp),(r0)	# save pc
1564	clrl	r0
1565	ret
1566
1567#ifdef KADB
1568/*
1569 * C library -- reset, setexit
1570 *
1571 *	reset(x)
1572 * will generate a "return" from
1573 * the last call to
1574 *	setexit()
1575 * by restoring r6 - r12, ap, fp
1576 * and doing a return.
1577 * The returned value is x; on the original
1578 * call the returned value is 0.
1579 */
1580ENTRY(setexit, 0)
1581	movab	setsav,r0
1582	movq	r6,(r0)+
1583	movq	r8,(r0)+
1584	movq	r10,(r0)+
1585	movq	8(fp),(r0)+		# ap, fp
1586	movab	4(ap),(r0)+		# sp
1587	movl	16(fp),(r0)		# pc
1588	clrl	r0
1589	ret
1590
1591ENTRY(reset, 0)
1592	movl	4(ap),r0	# returned value
1593	movab	setsav,r1
1594	movq	(r1)+,r6
1595	movq	(r1)+,r8
1596	movq	(r1)+,r10
1597	movq	(r1)+,r12
1598	movl	(r1)+,sp
1599	jmp 	*(r1)
1600
1601	.data
1602	.align  2
1603setsav:	.space	10*4
1604	.text
1605#endif
1606
1607	.globl	_whichqs
1608	.globl	_qs
1609	.globl	_cnt
1610
1611	.globl	_noproc
1612	.comm	_noproc,4
1613	.globl	_runrun
1614	.comm	_runrun,4
1615
1616/*
1617 * The following primitives use the fancy VAX instructions
1618 * much like VMS does.  _whichqs tells which of the 32 queues _qs
1619 * have processes in them.  Setrq puts processes into queues, Remrq
1620 * removes them from queues.  The running process is on no queue,
1621 * other processes are on a queue related to p->p_pri, divided by 4
1622 * actually to shrink the 0-127 range of priorities into the 32 available
1623 * queues.
1624 */
1625
1626/*
1627 * Setrq(p), using fancy VAX instructions.
1628 *
1629 * Call should be made at splclock(), and p->p_stat should be SRUN
1630 */
1631	.align	1
1632JSBENTRY(Setrq, R0)
1633	tstl	P_RLINK(r0)		## firewall: p->p_rlink must be 0
1634	beql	set1			##
1635	pushab	set3			##
1636	calls	$1,_panic		##
1637set1:
1638	movzbl	P_PRI(r0),r1		# put on queue which is p->p_pri / 4
1639	ashl	$-2,r1,r1
1640	movaq	_qs[r1],r2
1641	insque	(r0),*4(r2)		# at end of queue
1642	bbss	r1,_whichqs,set2	# mark queue non-empty
1643set2:
1644	rsb
1645
1646set3:	.asciz	"setrq"
1647
1648/*
1649 * Remrq(p), using fancy VAX instructions
1650 *
1651 * Call should be made at splclock().
1652 */
1653	.align	1
1654JSBENTRY(Remrq, R0)
1655	movzbl	P_PRI(r0),r1
1656	ashl	$-2,r1,r1
1657	bbsc	r1,_whichqs,rem1
1658	pushab	rem3			# it wasn't recorded to be on its q
1659	calls	$1,_panic
1660rem1:
1661	remque	(r0),r2
1662	beql	rem2
1663	bbss	r1,_whichqs,rem2
1664rem2:
1665	clrl	P_RLINK(r0)		## for firewall checking
1666	rsb
1667
1668rem3:	.asciz	"remrq"
1669
1670/*
1671 * Masterpaddr is the p->p_addr of the running process on the master
1672 * processor.  When a multiprocessor system, the slave processors will have
1673 * an array of slavepaddr's.
1674 */
1675	.globl	_masterpaddr
1676	.data
1677_masterpaddr:
1678	.long	0
1679
1680	.text
1681sw0:	.asciz	"swtch"
1682
1683/*
1684 * When no processes are on the runq, Swtch branches to idle
1685 * to wait for something to come ready.
1686 */
1687	.globl	Idle
1688Idle: idle:
1689	movl	$1,_noproc
1690	mtpr	$0,$IPL			# must allow interrupts here
16911:
1692	tstl	_whichqs		# look for non-empty queue
1693	bneq	sw1
1694	brb	1b
1695
1696badsw:	pushab	sw0
1697	calls	$1,_panic
1698	/*NOTREACHED*/
1699
1700/*
1701 * Swtch(), using fancy VAX instructions
1702 */
1703	.align	1
1704JSBENTRY(Swtch, 0)
1705	incl	_cnt+V_SWTCH
1706sw1:	ffs	$0,$32,_whichqs,r0	# look for non-empty queue
1707	beql	idle			# if none, idle
1708	mtpr	$0x18,$IPL		# lock out all so _whichqs==_qs
1709	bbcc	r0,_whichqs,sw1		# proc moved via interrupt
1710	movaq	_qs[r0],r1
1711	remque	*(r1),r2		# r2 = p = highest pri process
1712	bvs	badsw			# make sure something was there
1713	beql	sw2
1714	insv	$1,r0,$1,_whichqs	# still more procs in this queue
1715sw2:
1716	clrl	_noproc
1717	clrl	_runrun
1718#ifdef notdef
1719	tstl	P_WCHAN(r2)		## firewalls
1720	bneq	badsw			##
1721	cmpb	P_STAT(r2),$SRUN	##
1722	bneq	badsw			##
1723#endif
1724	clrl	P_RLINK(r2)		##
1725	movl	*P_ADDR(r2),r0
1726#ifdef notdef
1727	cmpl	r0,_masterpaddr		# resume of current proc is easy
1728	beql	res0
1729#endif
1730	movl	r0,_masterpaddr
1731	ashl	$PGSHIFT,r0,r0		# r0 = pcbb(p)
1732/* fall into... */
1733
1734/*
1735 * Resume(pf)
1736 */
1737JSBENTRY(Resume, R0)
1738	mtpr	$HIGH,$IPL			# no interrupts, please
1739	movl	_CMAP2,_u+PCB_CMAP2	# yech
1740	svpctx
1741	mtpr	r0,$PCBB
1742	ldpctx
1743	movl	_u+PCB_CMAP2,_CMAP2	# yech
1744	mtpr	$_CADDR2,$TBIS
1745res0:
1746	tstl	_u+PCB_SSWAP
1747	bneq	res1
1748	rei
1749res1:
1750	movl	_u+PCB_SSWAP,r0		# restore alternate saved context
1751	clrl	_u+PCB_SSWAP
1752	movq	(r0)+,r6			# restore r6, r7
1753	movq	(r0)+,r8			# restore r8, r9
1754	movq	(r0)+,r10			# restore r10, r11
1755	movq	(r0)+,r12			# restore ap, fp
1756	movl	(r0)+,r1			# saved sp
1757	cmpl	r1,sp				# must be a pop
1758	bgequ	1f
1759	pushab	2f
1760	calls	$1,_panic
1761	/* NOTREACHED */
17621:
1763	movl	r1,sp				# restore sp
1764	pushl	$PSL_PRVMOD			# return psl
1765	pushl	(r0)				# address to return to
1766	rei
1767
17682:	.asciz	"ldctx"
1769
1770/*
1771 * {fu,su},{byte,word}, all massaged by asm.sed to jsb's
1772 */
1773	.align	1
1774JSBENTRY(Fuword, R0)
1775	prober	$3,$4,(r0)
1776	beql	fserr
1777	movl	(r0),r0
1778	rsb
1779fserr:
1780	mnegl	$1,r0
1781	rsb
1782
1783	.align	1
1784JSBENTRY(Fubyte, R0)
1785	prober	$3,$1,(r0)
1786	beql	fserr
1787	movzbl	(r0),r0
1788	rsb
1789
1790	.align	1
1791JSBENTRY(Suword, R0|R1)
1792	probew	$3,$4,(r0)
1793	beql	fserr
1794	movl	r1,(r0)
1795	clrl	r0
1796	rsb
1797
1798	.align	1
1799JSBENTRY(Subyte, R0|R1)
1800	probew	$3,$1,(r0)
1801	beql	fserr
1802	movb	r1,(r0)
1803	clrl	r0
1804	rsb
1805
1806/*
1807 * Copy 1 relocation unit (NBPG bytes)
1808 * from user virtual address to physical address
1809 */
1810ENTRY(copyseg, 0)
1811	bisl3	$PG_V|PG_KW,8(ap),_CMAP2
1812	mtpr	$_CADDR2,$TBIS	# invalidate entry for copy
1813	movc3	$NBPG,*4(ap),_CADDR2
1814	ret
1815
1816/*
1817 * zero out physical memory
1818 * specified in relocation units (NBPG bytes)
1819 */
1820ENTRY(clearseg, 0)
1821	bisl3	$PG_V|PG_KW,4(ap),_CMAP1
1822	mtpr	$_CADDR1,$TBIS
1823	movc5	$0,(sp),$0,$NBPG,_CADDR1
1824	ret
1825
1826/*
1827 * Check address.
1828 * Given virtual address, byte count, and rw flag
1829 * returns 0 on no access.
1830 */
1831ENTRY(useracc, 0)
1832	movl	4(ap),r0		# get va
1833	movl	8(ap),r1		# count
1834	tstl	12(ap)			# test for read access ?
1835	bneq	userar			# yes
1836	cmpl	$NBPG,r1			# can we do it in one probe ?
1837	bgeq	uaw2			# yes
1838uaw1:
1839	probew	$3,$NBPG,(r0)
1840	beql	uaerr			# no access
1841	addl2	$NBPG,r0
1842	acbl	$NBPG+1,$-NBPG,r1,uaw1
1843uaw2:
1844	probew	$3,r1,(r0)
1845	beql	uaerr
1846	movl	$1,r0
1847	ret
1848
1849userar:
1850	cmpl	$NBPG,r1
1851	bgeq	uar2
1852uar1:
1853	prober	$3,$NBPG,(r0)
1854	beql	uaerr
1855	addl2	$NBPG,r0
1856	acbl	$NBPG+1,$-NBPG,r1,uar1
1857uar2:
1858	prober	$3,r1,(r0)
1859	beql	uaerr
1860	movl	$1,r0
1861	ret
1862uaerr:
1863	clrl	r0
1864	ret
1865
1866/*
1867 * kernacc - check for kernel access privileges
1868 *
1869 * We can't use the probe instruction directly because
1870 * it ors together current and previous mode.
1871 */
1872 ENTRY(kernacc, 0)
1873	movl	4(ap),r0	# virtual address
1874	bbcc	$31,r0,kacc1
1875	bbs	$30,r0,kacerr
1876	mfpr	$SBR,r2		# address and length of page table (system)
1877	bbss	$31,r2,0f; 0:
1878	mfpr	$SLR,r3
1879	brb	kacc2
1880kacc1:
1881	bbsc	$30,r0,kacc3
1882	mfpr	$P0BR,r2	# user P0
1883	mfpr	$P0LR,r3
1884	brb	kacc2
1885kacc3:
1886	mfpr	$P1BR,r2	# user P1 (stack)
1887	mfpr	$P1LR,r3
1888kacc2:
1889	addl3	8(ap),r0,r1	# ending virtual address
1890	addl2	$NBPG-1,r1
1891	ashl	$-PGSHIFT,r0,r0
1892	ashl	$-PGSHIFT,r1,r1
1893	bbs	$31,4(ap),kacc6
1894	bbc	$30,4(ap),kacc6
1895	cmpl	r0,r3		# user stack
1896	blss	kacerr		# address too low
1897	brb	kacc4
1898kacc6:
1899	cmpl	r1,r3		# compare last page to P0LR or SLR
1900	bgtr	kacerr		# address too high
1901kacc4:
1902	movl	(r2)[r0],r3
1903	bbc	$31,4(ap),kacc4a
1904	bbc	$31,r3,kacerr	# valid bit is off
1905kacc4a:
1906	cmpzv	$27,$4,r3,$1	# check protection code
1907	bleq	kacerr		# no access allowed
1908	tstb	12(ap)
1909	bneq	kacc5		# only check read access
1910	cmpzv	$27,$2,r3,$3	# check low 2 bits of prot code
1911	beql	kacerr		# no write access
1912kacc5:
1913	aoblss	r1,r0,kacc4	# next page
1914	movl	$1,r0		# no errors
1915	ret
1916kacerr:
1917	clrl	r0		# error
1918	ret
1919/*
1920 * Extracted and unrolled most common case of pagein (hopefully):
1921 *	resident and not on free list (reclaim of page is purely
1922 *	for the purpose of simulating a reference bit)
1923 *
1924 * Built in constants:
1925 *	CLSIZE of 2, any bit fields in pte's
1926 */
1927	.text
1928	.globl	Fastreclaim
1929Fastreclaim:
1930	PUSHR
1931#ifdef GPROF
1932	movl	fp,-(sp)
1933	movab	12(sp),fp
1934	jsb	mcount
1935	movl	(sp)+,fp
1936#endif GPROF
1937	extzv	$9,$23,28(sp),r3	# virtual address
1938	bicl2	$1,r3			# v = clbase(btop(virtaddr));
1939	movl	_u+U_PROCP,r5		# p = u.u_procp
1940					# from vtopte(p, v) ...
1941	movl	$1,r2			# type = CTEXT;
1942	cmpl	r3,P_TSIZE(r5)
1943	jlssu	1f			# if (isatsv(p, v)) {
1944	addl3	P_TSIZE(r5),P_DSIZE(r5),r0
1945	cmpl	r3,r0
1946	jgequ	2f
1947	clrl	r2			#	type = !CTEXT;
19481:
1949	ashl	$2,r3,r4
1950	addl2	P_P0BR(r5),r4		#	tptopte(p, vtotp(p, v));
1951	jbr	3f
19522:
1953	cvtwl	P_SZPT(r5),r4		# } else (isassv(p, v)) {
1954	ashl	$7,r4,r4
1955	subl2	$0x400000,r4
1956	addl2	r3,r4
1957	ashl	$2,r4,r4
1958	addl2	P_P0BR(r5),r4		#	sptopte(p, vtosp(p, v));
1959	clrl	r2			# 	type = !CTEXT;
19603:					# }
1961	bitb	$0x82,3(r4)
1962	beql	2f			# if (pte->pg_v || pte->pg_fod)
1963	POPR; rsb			#	let pagein handle it
19642:
1965	bicl3	$0xffe00000,(r4),r0
1966	jneq	2f			# if (pte->pg_pfnum == 0)
1967	POPR; rsb			# 	let pagein handle it
19682:
1969	subl2	_firstfree,r0
1970	ashl	$-1,r0,r0
1971	incl	r0			# pgtocm(pte->pg_pfnum)
1972	mull2	$SZ_CMAP,r0
1973	addl2	_cmap,r0		# &cmap[pgtocm(pte->pg_pfnum)]
1974	tstl	r2
1975	jeql	2f			# if (type == CTEXT &&
1976	jbc	$C_INTRANS,(r0),2f	#     c_intrans)
1977	POPR; rsb			# 	let pagein handle it
19782:
1979	jbc	$C_FREE,(r0),2f		# if (c_free)
1980	POPR; rsb			# 	let pagein handle it
19812:
1982	bisb2	$0x80,3(r4)		# pte->pg_v = 1;
1983	jbc	$26,4(r4),2f		# if (anycl(pte, pg_m)
1984	bisb2	$0x04,3(r4)		#	pte->pg_m = 1;
19852:
1986	bicw3	$0x7f,2(r4),r0
1987	bicw3	$0xff80,6(r4),r1
1988	bisw3	r0,r1,6(r4)		# distcl(pte);
1989	ashl	$PGSHIFT,r3,r0
1990	mtpr	r0,$TBIS
1991	addl2	$NBPG,r0
1992	mtpr	r0,$TBIS		# tbiscl(v);
1993	tstl	r2
1994	jeql	2f			# if (type == CTEXT)
1995	movl	P_TEXTP(r5),r0
1996	movl	X_CADDR(r0),r5		# for (p = p->p_textp->x_caddr; p; ) {
1997	jeql	2f
1998	ashl	$2,r3,r3
19993:
2000	addl3	P_P0BR(r5),r3,r0	#	tpte = tptopte(p, tp);
2001	bisb2	$1,P_FLAG+3(r5)		#	p->p_flag |= SPTECHG;
2002	movl	(r4),(r0)+		#	for (i = 0; i < CLSIZE; i++)
2003	movl	4(r4),(r0)		#		tpte[i] = pte[i];
2004	movl	P_XLINK(r5),r5		#	p = p->p_xlink;
2005	jneq	3b			# }
20062:					# collect a few statistics...
2007	incl	_u+U_RU+RU_MINFLT	# u.u_ru.ru_minflt++;
2008	moval	_cnt,r0
2009	incl	V_FAULTS(r0)		# cnt.v_faults++;
2010	incl	V_PGREC(r0)		# cnt.v_pgrec++;
2011	incl	V_FASTPGREC(r0)		# cnt.v_fastpgrec++;
2012	incl	V_TRAP(r0)		# cnt.v_trap++;
2013	POPR
2014	addl2	$8,sp			# pop pc, code
2015	mtpr	$HIGH,$IPL		## dont go to a higher IPL (GROT)
2016	rei
2017