xref: /netbsd/sys/arch/x68k/x68k/locore.s (revision bf9ec67e)
1/*	$NetBSD: locore.s,v 1.67 2001/12/05 18:04:35 atatat Exp $	*/
2
3/*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 *
42 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
43 */
44
45#include "opt_compat_netbsd.h"
46#include "opt_compat_svr4.h"
47#include "opt_compat_sunos.h"
48#include "opt_ddb.h"
49#include "opt_fpsp.h"
50#include "opt_kgdb.h"
51#include "opt_lockdebug.h"
52
53#include "ite.h"
54#include "fd.h"
55#include "par.h"
56#include "assym.h"
57
58#include <machine/asm.h>
59
60| This is for kvm_mkdb, and should be the address of the beginning
61| of the kernel text segment (not necessarily the same as kernbase).
62	.text
63GLOBAL(kernel_text)
64
65/*
66 * Temporary stack for a variety of purposes.
67 * Try and make this the first thing is the data segment so it
68 * is page aligned.  Note that if we overflow here, we run into
69 * our text segment.
70 */
71	.data
72	.space	NBPG
73ASLOCAL(tmpstk)
74
75#include <x68k/x68k/vectors.s>
76
77	.text
78/*
79 * This is where we wind up if the kernel jumps to location 0.
80 * (i.e. a bogus PC)  This is known to immediately follow the vector
81 * table and is hence at 0x400 (see reset vector in vectors.s).
82 */
83	PANIC("kernel jump to zero")
84	/* NOTREACHED */
85
86/*
87 * Trap/interrupt vector routines
88 */
89#include <m68k/m68k/trap_subr.s>
90
91ENTRY_NOPROFILE(buserr)
92ENTRY_NOPROFILE(buserr60)		| XXX
93	tstl	_C_LABEL(nofault)	| device probe?
94	jeq	Lberr			| no, handle as usual
95	movl	_C_LABEL(nofault),%sp@-	| yes,
96	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
97Lberr:
98#if defined(M68040) || defined(M68060)
99	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040/060?
100	jne	_C_LABEL(addrerr)	| no, skip
101	clrl	%sp@-			| stack adjust count
102	moveml	#0xFFFF,%sp@-		| save user registers
103	movl	%usp,%a0		| save the user SP
104	movl	%a0,%sp@(FR_SP)		|   in the savearea
105	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
106#if defined(M68060)
107	cmpl	#CPU_68060,_C_LABEL(cputype) | 68060?
108	jne	Lbenot060
109	movel	%a1@(12),%d0		| grap FSLW
110	btst	#2,%d0			| branch prediction error?
111	jeq	Lnobpe			| no, skip
112	movc	%cacr,%d1
113	orl	#IC60_CABC,%d1		| clear all branch cache entries
114	movc	%d1,%cacr
115	movl	%d0,%d1
116	andl	#0x7ffd,%d1		| check other faults
117	jeq	_ASM_LABEL(faultstkadjnotrap)
118Lnobpe:
119| XXX this is not needed.
120|	movl	%d0,%sp@		| code is FSLW now.
121
122| we need to adjust for misaligned addresses
123	movl	%a1@(8),%d1		| grab VA
124	btst	#27,%d0			| check for mis-aligned access
125	jeq	Lberr3			| no, skip
126	addl	#28,%d1			| yes, get into next page
127					| operand case: 3,
128					| instruction case: 4+12+12
129					| XXX instr. case not done yet
130	andl	#PG_FRAME,%d1		| and truncate
131Lberr3:
132	movl	%d1,%sp@-		| push fault VA
133	movl	%d0,%sp@-		| and FSLW
134	andw	#0x1f80,%d0
135	jeq	Lisberr
136	jra	Lismerr
137Lbenot060:
138#endif
139	moveq	#0,%d0
140	movw	%a1@(12),%d0		| grab SSW
141	movl	%a1@(20),%d1		| and fault VA
142	btst	#11,%d0			| check for mis-aligned access
143	jeq	Lberr2			| no, skip
144	addl	#3,%d1			| yes, get into next page
145	andl	#PG_FRAME,%d1		| and truncate
146Lberr2:
147	movl	%d1,%sp@-		| push fault VA
148	movl	%d0,%sp@-		| and padded SSW
149	btst	#10,%d0			| ATC bit set?
150	jeq	Lisberr			| no, must be a real bus error
151	movc	%dfc,%d1		| yes, get MMU fault
152	movc	%d0,%dfc		| store faulting function code
153	movl	%sp@(4),%a0		| get faulting address
154	.word	0xf568			| ptestr a0@
155	movc	%d1,%dfc
156	.long	0x4e7a0805		| movc mmusr,d0
157	movw	%d0,%sp@		| save (ONLY LOW 16 BITS!)
158	jra	Lismerr
159#endif
160ENTRY_NOPROFILE(addrerr)
161	clrl	%sp@-			| stack adjust count
162	moveml	#0xFFFF,%sp@-		| save user registers
163	movl	%usp,%a0		| save the user SP
164	movl	%a0,%sp@(FR_SP)		|   in the savearea
165	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
166#if defined(M68040) || defined(M68060)
167	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
168	jne	Lbenot040		| no, skip
169	movl	%a1@(8),%sp@-		| yes, push fault address
170	clrl	%sp@-			| no SSW for address fault
171	jra	Lisaerr			| go deal with it
172Lbenot040:
173#endif
174	moveq	#0,%d0
175	movw	%a1@(10),%d0		| grab SSW for fault processing
176	btst	#12,%d0			| RB set?
177	jeq	LbeX0			| no, test RC
178	bset	#14,%d0			| yes, must set FB
179	movw	%d0,%a1@(10)		| for hardware too
180LbeX0:
181	btst	#13,%d0			| RC set?
182	jeq	LbeX1			| no, skip
183	bset	#15,%d0			| yes, must set FC
184	movw	%d0,%a1@(10)		| for hardware too
185LbeX1:
186	btst	#8,%d0			| data fault?
187	jeq	Lbe0			| no, check for hard cases
188	movl	%a1@(16),%d1		| fault address is as given in frame
189	jra	Lbe10			| thats it
190Lbe0:
191	btst	#4,%a1@(6)		| long (type B) stack frame?
192	jne	Lbe4			| yes, go handle
193	movl	%a1@(2),%d1		| no, can use save PC
194	btst	#14,%d0			| FB set?
195	jeq	Lbe3			| no, try FC
196	addql	#4,%d1			| yes, adjust address
197	jra	Lbe10			| done
198Lbe3:
199	btst	#15,%d0			| FC set?
200	jeq	Lbe10			| no, done
201	addql	#2,%d1			| yes, adjust address
202	jra	Lbe10			| done
203Lbe4:
204	movl	%a1@(36),%d1		| long format, use stage B address
205	btst	#15,%d0			| FC set?
206	jeq	Lbe10			| no, all done
207	subql	#2,%d1			| yes, adjust address
208Lbe10:
209	movl	%d1,%sp@-		| push fault VA
210	movl	%d0,%sp@-		| and padded SSW
211	movw	%a1@(6),%d0		| get frame format/vector offset
212	andw	#0x0FFF,%d0		| clear out frame format
213	cmpw	#12,%d0			| address error vector?
214	jeq	Lisaerr			| yes, go to it
215	movl	%d1,%a0			| fault address
216	movl	%sp@,%d0		| function code from ssw
217	btst	#8,%d0			| data fault?
218	jne	Lbe10a
219	movql	#1,%d0			| user program access FC
220					| (we dont separate data/program)
221	btst	#5,%a1@			| supervisor mode?
222	jeq	Lbe10a			| if no, done
223	movql	#5,%d0			| else supervisor program access
224Lbe10a:
225	ptestr	%d0,%a0@,#7		| do a table search
226	pmove	%psr,%sp@		| save result
227	movb	%sp@,%d1
228	btst	#2,%d1			| invalid? (incl. limit viol and berr)
229	jeq	Lmightnotbemerr		| no -> wp check
230	btst	#7,%d1			| is it MMU table berr?
231	jeq	Lismerr			| no, must be fast
232	jra	Lisberr1		| real bus err needs not be fast
233Lmightnotbemerr:
234	btst	#3,%d1			| write protect bit set?
235	jeq	Lisberr1		| no, must be bus error
236	movl	%sp@,%d0		| ssw into low word of d0
237	andw	#0xc0,%d0		| write protect is set on page:
238	cmpw	#0x40,%d0		| was it read cycle?
239	jeq	Lisberr1		| yes, was not WPE, must be bus err
240Lismerr:
241	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
242	jra	_ASM_LABEL(faultstkadj)	| and deal with it
243Lisaerr:
244	movl	#T_ADDRERR,%sp@-	| mark address error
245	jra	_ASM_LABEL(faultstkadj)	| and deal with it
246Lisberr1:
247	clrw	%sp@			| re-clear pad word
248Lisberr:
249	movl	#T_BUSERR,%sp@-		| mark bus error
250	jra	_ASM_LABEL(faultstkadj)	| and deal with it
251
252/*
253 * FP exceptions.
254 */
255#include "opt_fpu_emulate.h"
256ENTRY_NOPROFILE(fpfline)
257#if defined(M68040)
258	cmpl	#FPU_68040,_C_LABEL(fputype) | 64040 FPU?
259	jne	Lfp_unimp		| no, skip FPSP
260	cmpw	#0x202c,%sp@(6)		| format type 2?
261	jne	_C_LABEL(illinst)	| no, not an FP emulation
262#ifdef FPSP
263	jmp	_ASM_LABEL(fpsp_unimp)	| yes, go handle it
264#else
265	clrl	%sp@-			| stack adjust count
266	moveml	#0xFFFF,%sp@-		| save registers
267	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
268	jra	_ASM_LABEL(fault)	| do it
269#endif
270Lfp_unimp:
271#endif
272#ifdef FPU_EMULATE
273	clrl	%sp@-			| stack adjust count
274	moveml	#0xFFFF,%sp@-		| save registers
275	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
276	jra	_ASM_LABEL(fault)	| do it
277#else
278	jra	_C_LABEL(illinst)
279#endif
280
281ENTRY_NOPROFILE(fpunsupp)
282#if defined(M68040)
283	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040?
284	jne	Lfp_unsupp		| no, skip FPSP
285#ifdef FPSP
286	jmp	_ASM_LABEL(fpsp_unsupp)	| yes, go handle it
287#else
288	clrl	%sp@-			| stack adjust count
289	moveml	#0xFFFF,%sp@-		| save registers
290	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
291	jra	_ASM_LABEL(fault)	| do it
292#endif
293Lfp_unsupp:
294#endif
295#ifdef FPU_EMULATE
296	clrl	%sp@-			| stack adjust count
297	moveml	#0xFFFF,%sp@-		| save registers
298	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
299	jra	_ASM_LABEL(fault)	| do it
300#else
301	jra	_C_LABEL(illinst)
302#endif
303
304/*
305 * Handles all other FP coprocessor exceptions.
306 * Note that since some FP exceptions generate mid-instruction frames
307 * and may cause signal delivery, we need to test for stack adjustment
308 * after the trap call.
309 */
310ENTRY_NOPROFILE(fpfault)
311	clrl	%sp@-		| stack adjust count
312	moveml	#0xFFFF,%sp@-	| save user registers
313	movl	%usp,%a0	| and save
314	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
315	clrl	%sp@-		| no VA arg
316	movl	_C_LABEL(curpcb),%a0 | current pcb
317	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
318	fsave	%a0@		| save state
319#if defined(M68040) || defined(M68060)
320	/* always null state frame on 68040, 68060 */
321	cmpl	#FPU_68040,_C_LABEL(fputype)
322	jle	Lfptnull
323#endif
324	tstb	%a0@		| null state frame?
325	jeq	Lfptnull	| yes, safe
326	clrw	%d0		| no, need to tweak BIU
327	movb	%a0@(1),%d0	| get frame size
328	bset	#3,%a0@(0,%d0:w) | set exc_pend bit of BIU
329Lfptnull:
330	fmovem	%fpsr,%sp@-	| push fpsr as code argument
331	frestore %a0@		| restore state
332	movl	#T_FPERR,%sp@-	| push type arg
333	jra	_ASM_LABEL(faultstkadj)	| call trap and deal with stack cleanup
334
335/*
336 * Other exceptions only cause four and six word stack frame and require
337 * no post-trap stack adjustment.
338 */
339
340ENTRY_NOPROFILE(badtrap)
341	moveml	#0xC0C0,%sp@-		| save scratch regs
342	movw	%sp@(22),%sp@-		| push exception vector info
343	clrw	%sp@-
344	movl	%sp@(22),%sp@-		| and PC
345	jbsr	_C_LABEL(straytrap)	| report
346	addql	#8,%sp			| pop args
347	moveml	%sp@+,#0x0303		| restore regs
348	jra	_ASM_LABEL(rei)		| all done
349
350ENTRY_NOPROFILE(trap0)
351	clrl	%sp@-			| stack adjust count
352	moveml	#0xFFFF,%sp@-		| save user registers
353	movl	%usp,%a0		| save the user SP
354	movl	%a0,%sp@(FR_SP)		|   in the savearea
355	movl	%d0,%sp@-		| push syscall number
356	jbsr	_C_LABEL(syscall)	| handle it
357	addql	#4,%sp			| pop syscall arg
358	tstl	_C_LABEL(astpending)
359	jne	Lrei2
360	tstb	_C_LABEL(ssir)
361	jeq	Ltrap1
362	movw	#SPL1,%sr
363	tstb	_C_LABEL(ssir)
364	jne	Lsir1
365Ltrap1:
366	movl	%sp@(FR_SP),%a0		| grab and restore
367	movl	%a0,%usp		|   user SP
368	moveml	%sp@+,#0x7FFF		| restore most registers
369	addql	#8,%sp			| pop SP and stack adjust
370	rte
371
372/*
373 * Trap 12 is the entry point for the cachectl "syscall" (both HPUX & BSD)
374 *	cachectl(command, addr, length)
375 * command in d0, addr in a1, length in d1
376 */
377ENTRY_NOPROFILE(trap12)
378	movl	_C_LABEL(curproc),%sp@-	| push curproc pointer
379	movl	%d1,%sp@-		| push length
380	movl	%a1,%sp@-		| push addr
381	movl	%d0,%sp@-		| push command
382	jbsr	_C_LABEL(cachectl1)	| do it
383	lea	%sp@(16),%sp		| pop args
384	jra	_ASM_LABEL(rei)		| all done
385
386/*
387 * Trace (single-step) trap.  Kernel-mode is special.
388 * User mode traps are simply passed on to trap().
389 */
390ENTRY_NOPROFILE(trace)
391	clrl	%sp@-			| stack adjust count
392	moveml	#0xFFFF,%sp@-
393	moveq	#T_TRACE,%d0
394
395	| Check PSW and see what happen.
396	|   T=0 S=0	(should not happen)
397	|   T=1 S=0	trace trap from user mode
398	|   T=0 S=1	trace trap on a trap instruction
399	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
400
401	movw	%sp@(FR_HW),%d1		| get PSW
402	notw	%d1			| XXX no support for T0 on 680[234]0
403	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
404	jeq	Lkbrkpt			| yes, kernel breakpoint
405	jra	_ASM_LABEL(fault)	| no, user-mode fault
406
407/*
408 * Trap 15 is used for:
409 *	- GDB breakpoints (in user programs)
410 *	- KGDB breakpoints (in the kernel)
411 *	- trace traps for SUN binaries (not fully supported yet)
412 * User mode traps are simply passed to trap().
413 */
414ENTRY_NOPROFILE(trap15)
415	clrl	%sp@-			| stack adjust count
416	moveml	#0xFFFF,%sp@-
417	moveq	#T_TRAP15,%d0
418	movw	%sp@(FR_HW),%d1		| get PSW
419	andw	#PSL_S,%d1		| from system mode?
420	jne	Lkbrkpt			| yes, kernel breakpoint
421	jra	_ASM_LABEL(fault)	| no, user-mode fault
422
423Lkbrkpt: | Kernel-mode breakpoint or trace trap. (d0=trap_type)
424	| Save the system sp rather than the user sp.
425	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
426	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
427	movl	%a6,%sp@(FR_SP)		|  from before trap
428
429	| If were are not on tmpstk switch to it.
430	| (so debugger can change the stack pointer)
431	movl	%a6,%d1
432	cmpl	#_ASM_LABEL(tmpstk),%d1
433	jls	Lbrkpt2			| already on tmpstk
434	| Copy frame to the temporary stack
435	movl	%sp,%a0			| a0=src
436	lea	_ASM_LABEL(tmpstk)-96,%a1 | a1=dst
437	movl	%a1,%sp			| sp=new frame
438	moveq	#FR_SIZE,%d1
439Lbrkpt1:
440	movl	%a0@+,%a1@+
441	subql	#4,%d1
442	jgt	Lbrkpt1
443
444Lbrkpt2:
445	| Call the trap handler for the kernel debugger.
446	| Do not call trap() to do it, so that we can
447	| set breakpoints in trap() if we want.  We know
448	| the trap type is either T_TRACE or T_BREAKPOINT.
449	| If we have both DDB and KGDB, let KGDB see it first,
450	| because KGDB will just return 0 if not connected.
451	| Save args in d2, a2
452	movl	%d0,%d2			| trap type
453	movl	%sp,%a2			| frame ptr
454#ifdef KGDB
455	| Let KGDB handle it (if connected)
456	movl	%a2,%sp@-		| push frame ptr
457	movl	%d2,%sp@-		| push trap type
458	jbsr	_C_LABEL(kgdb_trap)	| handle the trap
459	addql	#8,%sp			| pop args
460	cmpl	#0,%d0			| did kgdb handle it?
461	jne	Lbrkpt3			| yes, done
462#endif
463#ifdef DDB
464	| Let DDB handle it
465	movl	%a2,%sp@-		| push frame ptr
466	movl	%d2,%sp@-		| push trap type
467	jbsr	_C_LABEL(kdb_trap)	| handle the trap
468	addql	#8,%sp			| pop args
469#if 0	/* not needed on hp300 */
470	cmpl	#0,%d0			| did ddb handle it?
471	jne	Lbrkpt3			| yes, done
472#endif
473#endif
474	/* Sun 3 drops into PROM here. */
475Lbrkpt3:
476	| The stack pointer may have been modified, or
477	| data below it modified (by kgdb push call),
478	| so push the hardware frame at the current sp
479	| before restoring registers and returning.
480
481	movl	%sp@(FR_SP),%a0		| modified sp
482	lea	%sp@(FR_SIZE),%a1	| end of our frame
483	movl	%a1@-,%a0@-		| copy 2 longs with
484	movl	%a1@-,%a0@-		| ... predecrement
485	movl	%a0,%sp@(FR_SP)		| sp = h/w frame
486	moveml	%sp@+,#0x7FFF		| restore all but sp
487	movl	%sp@,%sp		| ... and sp
488	rte				| all done
489
490/* Use common m68k sigreturn */
491#include <m68k/m68k/sigreturn.s>
492
493/*
494 * Interrupt handlers. (auto vector.... not used)
495 * original(amiga) routines:
496 *	Level 0:	Spurious: ignored.
497 *	Level 1:	builtin-RS232 TBE, softint (not used yet)
498 *	Level 2:	keyboard (CIA-A) + DMA + SCSI
499 *	Level 3:	VBL
500 *	Level 4:	not used
501 *	Level 5:	builtin-RS232 RBF
502 *	Level 6:	Clock (CIA-B-Timers)
503 *	Level 7:	Non-maskable: shouldn't be possible. ignore.
504 */
505
506/* Provide a generic interrupt dispatcher, only handle hardclock (int6)
507 * specially, to improve performance
508 */
509
510#define INTERRUPT_SAVEREG	moveml	#0xC0C0,%sp@-
511#define INTERRUPT_RESTOREREG	moveml	%sp@+,#0x0303
512
513ENTRY_NOPROFILE(spurintr)	/* level 0 */
514	addql	#1,_C_LABEL(intrcnt)+0
515	rte				| XXX mfpcure (x680x0 hardware bug)
516
517ENTRY_NOPROFILE(kbdtimer)
518	rte
519
520ENTRY_NOPROFILE(powtrap)
521#include "pow.h"
522#if NPOW > 0
523	INTERRUPT_SAVEREG
524	jbsr	_C_LABEL(powintr)
525	INTERRUPT_RESTOREREG
526#endif
527	addql	#1,_C_LABEL(intrcnt)+48
528	addql	#1,_C_LABEL(uvmexp)+UVMEXP_INTRS
529	jra	rei
530
531ENTRY_NOPROFILE(com0trap)
532#include "com.h"
533#if NXCOM > 0
534	INTERRUPT_SAVEREG
535	movel	#0,%sp@-
536	jbsr	_C_LABEL(comintr)
537	addql	#4,%sp
538	INTERRUPT_RESTOREREG
539#endif
540	addql	#1,_C_LABEL(intrcnt)+52
541	addql	#1,_C_LABEL(uvmexp)+UVMEXP_INTRS
542	jra	rei
543
544ENTRY_NOPROFILE(com1trap)
545#if NXCOM > 1
546	INTERRUPT_SAVEREG
547	movel	#1,%sp@-
548	jbsr	_C_LABEL(comintr)
549	addql	#4,%sp
550	INTERRUPT_RESTOREREG
551#endif
552	addql	#1,_C_LABEL(intrcnt)+52
553	addql	#1,_C_LABEL(uvmexp)+UVMEXP_INTRS
554	jra	rei
555
556ENTRY_NOPROFILE(intiotrap)
557	INTERRUPT_SAVEREG
558#if 0
559	movw	#PSL_HIGHIPL,%sr	| XXX
560#endif
561	pea	%sp@(16-(FR_HW))	| XXX
562	jbsr	_C_LABEL(intio_intr)
563	addql	#4,%sp
564	INTERRUPT_RESTOREREG
565	addql	#1,_C_LABEL(uvmexp)+UVMEXP_INTRS
566	jra	rei
567
568ENTRY_NOPROFILE(lev1intr)
569ENTRY_NOPROFILE(lev2intr)
570ENTRY_NOPROFILE(lev3intr)
571ENTRY_NOPROFILE(lev4intr)
572ENTRY_NOPROFILE(lev5intr)
573ENTRY_NOPROFILE(lev6intr)
574	INTERRUPT_SAVEREG
575Lnotdma:
576	lea	_C_LABEL(intrcnt),%a0
577	movw	%sp@(22),%d0		| use vector offset
578	andw	#0xfff,%d0		|   sans frame type
579	addql	#1,%a0@(-0x60,%d0:w)	|     to increment apropos counter
580	movw	%sr,%sp@-		| push current SR value
581	clrw	%sp@-			|    padded to longword
582	jbsr	_C_LABEL(intrhand)	| handle interrupt
583	addql	#4,%sp			| pop SR
584	INTERRUPT_RESTOREREG
585	addql	#1,_C_LABEL(uvmexp)+UVMEXP_INTRS
586	jra	_ASM_LABEL(rei)
587
588ENTRY_NOPROFILE(timertrap)
589	moveml	#0xC0C0,%sp@-		| save scratch registers
590	addql	#1,_C_LABEL(intrcnt)+36	| count hardclock interrupts
591	lea	%sp@(16),%a1		| a1 = &clockframe
592	movl	%a1,%sp@-
593	jbsr	_C_LABEL(hardclock)	| hardclock(&frame)
594	addql	#4,%sp
595	addql	#1,_C_LABEL(uvmexp)+UVMEXP_INTRS | chalk up another interrupt
596	moveml	%sp@+,#0x0303		| restore scratch registers
597	jra	_ASM_LABEL(rei)		| all done
598
599ENTRY_NOPROFILE(lev7intr)
600	addql	#1,_C_LABEL(intrcnt)+28
601	clrl	%sp@-
602	moveml	#0xFFFF,%sp@-		| save registers
603	movl	%usp,%a0		| and save
604	movl	%a0,%sp@(FR_SP)		|   the user stack pointer
605	jbsr	_C_LABEL(nmihand)	| call handler
606	movl	%sp@(FR_SP),%a0		| restore
607	movl	%a0,%usp		|   user SP
608	moveml	%sp@+,#0x7FFF		| and remaining registers
609	addql	#8,%sp			| pop SP and stack adjust
610	jra	_ASM_LABEL(rei)		| all done
611
612/*
613 * floppy ejection trap
614 */
615
616ENTRY_NOPROFILE(fdeject)
617	jra	_ASM_LABEL(rei)
618
619/*
620 * Emulation of VAX REI instruction.
621 *
622 * This code deals with checking for and servicing ASTs
623 * (profiling, scheduling) and software interrupts (network, softclock).
624 * We check for ASTs first, just like the VAX.  To avoid excess overhead
625 * the T_ASTFLT handling code will also check for software interrupts so we
626 * do not have to do it here.  After identifing that we need an AST we
627 * drop the IPL to allow device interrupts.
628 *
629 * This code is complicated by the fact that sendsig may have been called
630 * necessitating a stack cleanup.
631 */
632BSS(ssir,1)
633ASENTRY_NOPROFILE(rei)
634	tstl	_C_LABEL(astpending)	| AST pending?
635	jeq	Lchksir			| no, go check for SIR
636Lrei1:
637	btst	#5,%sp@			| yes, are we returning to user mode?
638	jne	Lchksir			| no, go check for SIR
639	movw	#PSL_LOWIPL,%sr		| lower SPL
640	clrl	%sp@-			| stack adjust
641	moveml	#0xFFFF,%sp@-		| save all registers
642	movl	%usp,%a1		| including
643	movl	%a1,%sp@(FR_SP)		|    the users SP
644Lrei2:
645	clrl	%sp@-			| VA == none
646	clrl	%sp@-			| code == none
647	movl	#T_ASTFLT,%sp@-		| type == async system trap
648	jbsr	_C_LABEL(trap)		| go handle it
649	lea	%sp@(12),%sp		| pop value args
650	movl	%sp@(FR_SP),%a0		| restore user SP
651	movl	%a0,%usp		|   from save area
652	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
653	jne	Laststkadj		| yes, go to it
654	moveml	%sp@+,#0x7FFF		| no, restore most user regs
655	addql	#8,%sp			| toss SP and stack adjust
656	rte				| and do real RTE
657Laststkadj:
658	lea	%sp@(FR_HW),%a1		| pointer to HW frame
659	addql	#8,%a1			| source pointer
660	movl	%a1,%a0			| source
661	addw	%d0,%a0			|  + hole size = dest pointer
662	movl	%a1@-,%a0@-		| copy
663	movl	%a1@-,%a0@-		|  8 bytes
664	movl	%a0,%sp@(FR_SP)		| new SSP
665	moveml	%sp@+,#0x7FFF		| restore user registers
666	movl	%sp@,%sp		| and our SP
667	rte				| and do real RTE
668Lchksir:
669	tstb	_C_LABEL(ssir)		| SIR pending?
670	jeq	Ldorte			| no, all done
671	movl	%d0,%sp@-		| need a scratch register
672	movw	%sp@(4),%d0		| get SR
673	andw	#PSL_IPL7,%d0		| mask all but IPL
674	jne	Lnosir			| came from interrupt, no can do
675	movl	%sp@+,%d0		| restore scratch register
676Lgotsir:
677	movw	#SPL1,%sr		| prevent others from servicing int
678	tstb	_C_LABEL(ssir)		| too late?
679	jeq	Ldorte			| yes, oh well...
680	clrl	%sp@-			| stack adjust
681	moveml	#0xFFFF,%sp@-		| save all registers
682	movl	%usp,%a1		| including
683	movl	%a1,%sp@(FR_SP)		|    the users SP
684Lsir1:
685	clrl	%sp@-			| VA == none
686	clrl	%sp@-			| code == none
687	movl	#T_SSIR,%sp@-		| type == software interrupt
688	jbsr	_C_LABEL(trap)		| go handle it
689	lea	%sp@(12),%sp		| pop value args
690	movl	%sp@(FR_SP),%a0		| restore
691	movl	%a0,%usp		|   user SP
692	moveml	%sp@+,#0x7FFF		| and all remaining registers
693	addql	#8,%sp			| pop SP and stack adjust
694	rte
695Lnosir:
696	movl	%sp@+,%d0		| restore scratch register
697Ldorte:
698	rte				| real return
699
700/*
701 * Macro to relocate a symbol, used before MMU is enabled.
702 */
703#define	_RELOC(var, ar)	\
704	lea	var,ar;	\
705	addl	%a5,ar
706
707#define	RELOC(var, ar)		_RELOC(_C_LABEL(var), ar)
708#define	ASRELOC(var, ar)	_RELOC(_ASM_LABEL(var), ar)
709
710/*
711 * Initialization
712 *
713 * A4 contains the address of the end of the symtab
714 * A5 contains physical load point from boot
715 * VBR contains zero from ROM.  Exceptions will continue to vector
716 * through ROM until MMU is turned on at which time they will vector
717 * through our table (vectors.s).
718 */
719BSS(lowram,4)
720BSS(esym,4)
721
722GLOBAL(_verspad)
723	.word	0
724GLOBAL(boot_version)
725	.word	X68K_BOOTIF_VERS
726
727ASENTRY_NOPROFILE(start)
728	movw	#PSL_HIGHIPL,%sr	| no interrupts
729
730	addql	#4,%sp
731	movel	%sp@+,%a5		| firstpa
732	movel	%sp@+,%d5		| fphysize -- last page
733	movel	%sp@,%a4		| esym
734
735	RELOC(vectab,%a0)		| set Vector Base Register temporaly
736	movc	%a0,%vbr
737
738#if 0	/* XXX this should be done by the boot loader */
739	RELOC(edata, %a0)		| clear out BSS
740	movl	#_C_LABEL(end)-4,%d0	| (must be <= 256 kB)
741	subl	#_C_LABEL(edata),%d0
742	lsrl	#2,%d0
7431:	clrl	%a0@+
744	dbra	%d0,1b
745#endif
746
747	ASRELOC(tmpstk, %a0)
748	movl	%a0,%sp			| give ourselves a temporary stack
749	RELOC(esym, %a0)
750#if 1
751	movl	%a4,%a0@		| store end of symbol table
752#else
753	clrl	%a0@			| no symbol table, yet
754#endif
755	RELOC(lowram, %a0)
756	movl	%a5,%a0@		| store start of physical memory
757
758	RELOC(intr_reset, %a0)
759	jbsr	%a0@			| XXX
760
761	movl	#CACHE_OFF,%d0
762	movc	%d0,%cacr		| clear and disable on-chip cache(s)
763
764/* determine our CPU/MMU combo - check for all regardless of kernel config */
765	movl	#0x200,%d0		| data freeze bit
766	movc	%d0,%cacr		|   only exists on 68030
767	movc	%cacr,%d0		| read it back
768	tstl	%d0			| zero?
769	jeq	Lnot68030		| yes, we have 68020/68040/68060
770	jra	Lstart1			| no, we have 68030
771Lnot68030:
772	bset	#31,%d0			| data cache enable bit
773	movc	%d0,%cacr		|   only exists on 68040/68060
774	movc	%cacr,%d0		| read it back
775	tstl	%d0			| zero?
776	jeq	Lis68020		| yes, we have 68020
777	moveq	#0,%d0			| now turn it back off
778	movec	%d0,%cacr		|   before we access any data
779	.word	0xf4d8			| cinva bc - invalidate caches XXX
780	bset	#30,%d0			| data cache no allocate mode bit
781	movc	%d0,%cacr		|   only exists on 68060
782	movc	%cacr,%d0		| read it back
783	tstl	%d0			| zero?
784	jeq	Lis68040		| yes, we have 68040
785	RELOC(mmutype, %a0)		| no, we have 68060
786	movl	#MMU_68040,%a0@		| with a 68040 compatible MMU
787	RELOC(cputype, %a0)
788	movl	#CPU_68060,%a0@		| and a 68060 CPU
789	jra	Lstart1
790Lis68040:
791	RELOC(mmutype, %a0)
792	movl	#MMU_68040,%a0@		| with a 68040 MMU
793	RELOC(cputype, %a0)
794	movl	#CPU_68040,%a0@		| and a 68040 CPU
795	jra	Lstart1
796Lis68020:
797	RELOC(mmutype, %a0)
798	movl	#MMU_68851,%a0@		| we have PMMU
799	RELOC(cputype, %a0)
800	movl	#CPU_68020,%a0@		| and a 68020 CPU
801
802Lstart1:
803/* initialize source/destination control registers for movs */
804	moveq	#FC_USERD,%d0		| user space
805	movc	%d0,%sfc		|   as source
806	movc	%d0,%dfc		|   and destination of transfers
807/* initialize memory sizes (for pmap_bootstrap) */
808	movl	%d5,%d1			| last page
809	moveq	#PGSHIFT,%d2
810	lsrl	%d2,%d1			| convert to page (click) number
811	RELOC(maxmem, %a0)
812	movl	%d1,%a0@		| save as maxmem
813	movl	%a5,%d0			| lowram value from ROM via boot
814	lsrl	%d2,%d0			| convert to page number
815	subl	%d0,%d1			| compute amount of RAM present
816	RELOC(physmem, %a0)
817	movl	%d1,%a0@		| and physmem
818/* configure kernel and proc0 VA space so we can get going */
819#ifdef DDB
820	RELOC(esym,%a0)			| end of static kernel test/data/syms
821	movl	%a0@,%d5
822	jne	Lstart2
823#endif
824	movl	#_C_LABEL(end),%d5	| end of static kernel text/data
825Lstart2:
826	addl	#NBPG-1,%d5
827	andl	#PG_FRAME,%d5		| round to a page
828	movl	%d5,%a4
829	addl	%a5,%a4			| convert to PA
830	pea	%a5@			| firstpa
831	pea	%a4@			| nextpa
832	RELOC(pmap_bootstrap,%a0)
833	jbsr	%a0@			| pmap_bootstrap(firstpa, nextpa)
834	addql	#8,%sp
835
836/*
837 * Prepare to enable MMU.
838 * Since the kernel is not mapped logical == physical we must insure
839 * that when the MMU is turned on, all prefetched addresses (including
840 * the PC) are valid.  In order guarentee that, we use the last physical
841 * page (which is conveniently mapped == VA) and load it up with enough
842 * code to defeat the prefetch, then we execute the jump back to here.
843 *
844 * Is this all really necessary, or am I paranoid??
845 */
846	RELOC(Sysseg, %a0)		| system segment table addr
847	movl	%a0@,%d1		| read value (a KVA)
848	addl	%a5,%d1			| convert to PA
849	RELOC(mmutype, %a0)
850	cmpl	#MMU_68040,%a0@		| 68040?
851	jne	Lmotommu1		| no, skip
852	.long	0x4e7b1807		| movc d1,srp
853	jra	Lstploaddone
854Lmotommu1:
855	RELOC(protorp, %a0)
856	movl	#0x80000202,%a0@	| nolimit + share global + 4 byte PTEs
857	movl	%d1,%a0@(4)		| + segtable address
858	pmove	%a0@,%srp		| load the supervisor root pointer
859	movl	#0x80000002,%a0@	| reinit upper half for CRP loads
860Lstploaddone:
861	RELOC(mmutype, %a0)
862	cmpl	#MMU_68040,%a0@		| 68040?
863	jne	Lmotommu2		| no, skip
864#include "opt_jupiter.h"
865#ifdef JUPITER
866	/* JUPITER-X: set system register "SUPER" bit */
867	movl	#0x0200a240,%d0		| translate DRAM area transparently
868	.long	0x4e7b0006		| movc d0,dtt0
869	lea	0x00c00000,%a0		| a0: graphic VRAM
870	lea	0x02c00000,%a1		| a1: graphic VRAM ( not JUPITER-X )
871					|     DRAM ( JUPITER-X )
872	movw	%a0@,%d0
873	movw	%d0,%d1
874	notw	%d1
875	movw	%d1,%a1@
876	movw	%d0,%a0@
877	cmpw	%a1@,%d1		| JUPITER-X?
878	jne	Ljupiterdone		| no, skip
879	movl	#0x0100a240,%d0		| to access system register
880	.long	0x4e7b0006		| movc d0,dtt0
881	movb	#0x01,0x01800003	| set "SUPER" bit
882Ljupiterdone:
883#endif /* JUPITER */
884	moveq	#0,%d0			| ensure TT regs are disabled
885	.long	0x4e7b0004		| movc d0,itt0
886	.long	0x4e7b0005		| movc d0,itt1
887	.long	0x4e7b0006		| movc d0,dtt0
888	.long	0x4e7b0007		| movc d0,dtt1
889	.word	0xf4d8			| cinva bc
890	.word	0xf518			| pflusha
891	movl	#0x8000,%d0
892	.long	0x4e7b0003		| movc d0,tc
893#ifdef M68060
894	RELOC(cputype, %a0)
895	cmpl	#CPU_68060,%a0@		| 68060?
896	jne	Lnot060cache
897	movl	#1,%d0
898	.long	0x4e7b0808		| movcl d0,pcr
899	movl	#0xa0808000,%d0
900	movc	%d0,%cacr		| enable store buffer, both caches
901	jmp	Lenab1
902Lnot060cache:
903#endif
904	movl	#0x80008000,%d0
905	movc	%d0,%cacr		| turn on both caches
906	jmp	Lenab1
907Lmotommu2:
908	movl	#0x82c0aa00,%sp@-	| value to load TC with
909	pmove	%sp@,%tc		| load it
910
911/*
912 * Should be running mapped from this point on
913 */
914Lenab1:
915/* set vector base in virtual address */
916	movl	#_C_LABEL(vectab),%d0	| set Vector Base Register
917	movc	%d0,%vbr
918/* select the software page size now */
919	lea	_ASM_LABEL(tmpstk),%sp	| temporary stack
920	jbsr	_C_LABEL(uvm_setpagesize)  | select software page size
921/* detect FPU type */
922	jbsr	_C_LABEL(fpu_probe)
923	movl	%d0,_C_LABEL(fputype)
924/* set kernel stack, user SP, and initial pcb */
925	movl	_C_LABEL(proc0paddr),%a1 | get proc0 pcb addr
926	lea	%a1@(USPACE-4),%sp	| set kernel stack to end of area
927	lea	_C_LABEL(proc0),%a2	| initialize proc0.p_addr so that
928	movl	%a1,%a2@(P_ADDR)	|   we don't deref NULL in trap()
929	movl	#USRSTACK-4,%a2
930	movl	%a2,%usp		| init user SP
931	movl	%a1,_C_LABEL(curpcb)	| proc0 is running
932
933	tstl	_C_LABEL(fputype)	| Have an FPU?
934	jeq	Lenab2			| No, skip.
935	clrl	%a1@(PCB_FPCTX)		| ensure null FP context
936	movl	%a1,%sp@-
937	jbsr	_C_LABEL(m68881_restore) | restore it (does not kill a1)
938	addql	#4,%sp
939Lenab2:
940	cmpl	#MMU_68040,_C_LABEL(mmutype)	| 68040?
941	jeq	Ltbia040		| yes, cache already on
942	pflusha
943	tstl	_C_LABEL(mmutype)
944	jpl	Lenab3			| 68851 implies no d-cache
945	movl	#CACHE_ON,%d0
946	movc	%d0,%cacr		| clear cache(s)
947	jra	Lenab3
948Ltbia040:
949	.word	0xf518
950Lenab3:
951/* final setup for C code */
952	movl	%d7,_C_LABEL(boothowto)	| save reboot flags
953	movl	%d6,_C_LABEL(bootdev)	|   and boot device
954
955/*
956 * Create a fake exception frame so that cpu_fork() can copy it.
957 * main() nevers returns; we exit to user mode from a forked process
958 * later on.
959 */
960	clrw	%sp@-			| vector offset/frame type
961	clrl	%sp@-			| PC - filled in by "execve"
962	movw	#PSL_USER,%sp@-		| in user mode
963	clrl	%sp@-			| stack adjust count and padding
964	lea	%sp@(-64),%sp		| construct space for D0-D7/A0-A7
965	lea	_C_LABEL(proc0),%a0	| save pointer to frame
966	movl	%sp,%a0@(P_MD_REGS)	|   in proc0.p_md.md_regs
967
968	jra	_C_LABEL(main)		| main()
969
970	PANIC("main() returned")	| Yow!  Main returned!
971	/* NOTREACHED */
972
973/*
974 * proc_trampoline: call function in register a2 with a3 as an arg
975 * and then rei.
976 */
977GLOBAL(proc_trampoline)
978	movl	%a3,%sp@-		| push function arg
979	jbsr	%a2@			| call function
980	addql	#4,%sp			| pop arg
981	movl	%sp@(FR_SP),%a0		| grab and load
982	movl	%a0,%usp		|   user SP
983	moveml	%sp@+,#0x7FFF		| restore most user regs
984	addql	#8,%sp			| toss SP and stack adjust
985	jra	_ASM_LABEL(rei)		| and return
986
987/*
988 * Use common m68k sigcode.
989 */
990#include <m68k/m68k/sigcode.s>
991#ifdef COMPAT_SUNOS
992#include <m68k/m68k/sunos_sigcode.s>
993#endif
994#ifdef COMPAT_SVR4
995#include <m68k/m68k/svr4_sigcode.s>
996#endif
997
998/*
999 * Primitives
1000 */
1001
1002/*
1003 * Use common m68k support routines.
1004 */
1005#include <m68k/m68k/support.s>
1006
1007/*
1008 * Use common m68k process manipulation routines.
1009 */
1010#include <m68k/m68k/proc_subr.s>
1011
1012	.data
1013GLOBAL(curpcb)
1014GLOBAL(masterpaddr)		| XXX compatibility (debuggers)
1015	.long	0
1016ASLOCAL(mdpflag)
1017	.byte	0		| copy of proc md_flags low byte
1018	.align	2
1019
1020ASBSS(nullpcb,SIZEOF_PCB)
1021
1022/*
1023 * At exit of a process, do a switch for the last time.
1024 * Switch to a safe stack and PCB, and select a new process to run.  The
1025 * old stack and u-area will be freed by the reaper.
1026 *
1027 * MUST BE CALLED AT SPLHIGH!
1028 */
1029ENTRY(switch_exit)
1030	movl	%sp@(4),%a0
1031	/* save state into garbage pcb */
1032	movl	#_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
1033	lea	_ASM_LABEL(tmpstk),%sp	| goto a tmp stack
1034
1035	/* Schedule the vmspace and stack to be freed. */
1036	movl	%a0,%sp@-		| exit2(p)
1037	jbsr	_C_LABEL(exit2)
1038	lea	%sp@(4),%sp		| pop args
1039
1040#if defined(LOCKDEBUG)
1041	/* Acquire sched_lock */
1042	jbsr	_C_LABEL(sched_lock_idle)
1043#endif
1044
1045	jra	_C_LABEL(cpu_switch)
1046
1047/*
1048 * When no processes are on the runq, Swtch branches to Idle
1049 * to wait for something to come ready.
1050 */
1051ASENTRY_NOPROFILE(Idle)
1052#if defined(LOCKDEBUG)
1053	/* Release sched_lock */
1054	jbsr	 _C_LABEL(sched_unlock_idle)
1055#endif
1056	movw	#PSL_LOWIPL,%sr
1057
1058	/* Try to zero some pages. */
1059	movl	_C_LABEL(uvm)+UVM_PAGE_IDLE_ZERO,%d0
1060	jeq	1f
1061	jbsr	_C_LABEL(uvm_pageidlezero)
1062	jra	2f
10631:
1064	stop	#PSL_LOWIPL
10652:	movw	#PSL_HIGHIPL,%sr
1066#if defined(LOCKDEBUG)
1067	/* Acquire sched_lock */
1068	jbsr	_C_LABEL(sched_lock_idle)
1069#endif
1070	movl	_C_LABEL(sched_whichqs),%d0
1071	jeq	_ASM_LABEL(Idle)
1072	jra	Lsw1
1073
1074Lbadsw:
1075	PANIC("switch")
1076	/*NOTREACHED*/
1077
1078/*
1079 * cpu_switch()
1080 *
1081 * NOTE: On the mc68851 (318/319/330) we attempt to avoid flushing the
1082 * entire ATC.  The effort involved in selective flushing may not be
1083 * worth it, maybe we should just flush the whole thing?
1084 *
1085 * NOTE 2: With the new VM layout we now no longer know if an inactive
1086 * user's PTEs have been changed (formerly denoted by the SPTECHG p_flag
1087 * bit).  For now, we just always flush the full ATC.
1088 */
1089ENTRY(cpu_switch)
1090	movl	_C_LABEL(curpcb),%a0	| current pcb
1091	movw	%sr,%a0@(PCB_PS)	| save sr before changing ipl
1092#ifdef notyet
1093	movl	_C_LABEL(curproc),%sp@-	| remember last proc running
1094#endif
1095	clrl	_C_LABEL(curproc)
1096
1097	/*
1098	 * Find the highest-priority queue that isn't empty,
1099	 * then take the first proc from that queue.
1100	 */
1101	movl	_C_LABEL(sched_whichqs),%d0
1102	jeq	_ASM_LABEL(Idle)
1103Lsw1:
1104	/*
1105	 * Interrupts are blocked, sched_lock is held.  If
1106	 * we come here via Idle, %d0 contains the contents
1107	 * of a non-zero sched_whichqs.
1108	 */
1109	movl	%d0,%d1
1110	negl	%d0
1111	andl	%d1,%d0
1112	bfffo	%d0{#0:#32},%d1
1113	eorib	#31,%d1
1114
1115	movl	%d1,%d0
1116	lslb	#3,%d1			| convert queue number to index
1117	addl	#_C_LABEL(sched_qs),%d1	| locate queue (q)
1118	movl	%d1,%a1
1119	movl	%a1@(P_FORW),%a0	| p = q->p_forw
1120	cmpal	%d1,%a0			| anyone on queue?
1121	jeq	Lbadsw			| no, panic
1122#ifdef DIAGNOSTIC
1123	tstl	%a0@(P_WCHAN)
1124	jne	Lbadsw
1125	cmpb	#SRUN,%a0@(P_STAT)
1126	jne	Lbadsw
1127#endif
1128	movl	%a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
1129	movl	%a0@(P_FORW),%a1	| n = p->p_forw
1130	movl	%d1,%a1@(P_BACK)	| n->p_back = q
1131	cmpal	%d1,%a1			| anyone left on queue?
1132	jne	Lsw2			| yes, skip
1133	movl	_C_LABEL(sched_whichqs),%d1
1134	bclr	%d0,%d1			| no, clear bit
1135	movl	%d1,_C_LABEL(sched_whichqs)
1136Lsw2:
1137	/* p->p_cpu initialized in fork1() for single-processor */
1138	movb	#SONPROC,%a0@(P_STAT)	| p->p_stat = SONPROC
1139	movl	%a0,_C_LABEL(curproc)
1140	clrl	_C_LABEL(want_resched)
1141#ifdef notyet
1142	movl	%sp@+,%a1
1143	cmpl	%a0,%a1			| switching to same proc?
1144	jeq	Lswdone			| yes, skip save and restore
1145#endif
1146	/*
1147	 * Save state of previous process in its pcb.
1148	 */
1149	movl	_C_LABEL(curpcb),%a1
1150	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
1151	movl	%usp,%a2		| grab USP (a2 has been saved)
1152	movl	%a2,%a1@(PCB_USP)	| and save it
1153
1154	tstl	_C_LABEL(fputype)	| Do we have an FPU?
1155	jeq	Lswnofpsave		| No  Then don't attempt save.
1156	lea	%a1@(PCB_FPCTX),%a2	| pointer to FP save area
1157	fsave	%a2@			| save FP state
1158#if defined(M68020) || defined(M68030) || defined(M68040)
1159#if defined(M68060)
1160	cmpl	#FPU_68060,_C_LABEL(fputype)
1161	jeq	Lsavfp60
1162#endif
1163	tstb	%a2@			| null state frame?
1164	jeq	Lswnofpsave		| yes, all done
1165	fmovem	%fp0-%fp7,%a2@(FPF_REGS) | save FP general registers
1166	fmovem	%fpcr/%fpsr/%fpi,%a2@(FPF_FPCR) | save FP control registers
1167#if defined(M68060)
1168	jra	Lswnofpsave
1169Lsavfp60:
1170#endif
1171#endif
1172#if defined(M68060)
1173	tstb	%a2@(2)			| null state frame?
1174	jeq	Lswnofpsave		| yes, all done
1175	fmovem	%fp0-%fp7,%a2@(FPF_REGS) | save FP general registers
1176	fmovem	%fpcr,%a2@(FPF_FPCR)	| save FP control registers
1177	fmovem	%fpsr,%a2@(FPF_FPSR)
1178	fmovem	%fpi,%a2@(FPF_FPI)
1179#endif
1180Lswnofpsave:
1181
1182	clrl	%a0@(P_BACK)		| clear back link
1183	movb	%a0@(P_MD_FLAGS+3),mdpflag | low byte of p_md.md_flags
1184	movl	%a0@(P_ADDR),%a1	| get p_addr
1185	movl	%a1,_C_LABEL(curpcb)
1186
1187#if defined(LOCKDEBUG)
1188	/*
1189	 * Done mucking with the run queues, release the
1190	 * scheduler lock, but keep interrupts out.
1191	 */
1192	movl	%a0,%sp@-		| not args...
1193	movl	%a1,%sp@-		| ...just saving
1194	jbsr	_C_LABEL(sched_unlock_idle)
1195	movl	%sp@+,%a1
1196	movl	%sp@+,%a0
1197#endif
1198
1199	/*
1200	 * Activate process's address space.
1201	 * XXX Should remember the last USTP value loaded, and call this
1202	 * XXX only if it has changed.
1203	 */
1204	pea	%a0@			| push proc
1205	jbsr	_C_LABEL(pmap_activate)	| pmap_activate(p)
1206	addql	#4,%sp
1207	movl	_C_LABEL(curpcb),%a1	| restore p_addr
1208
1209	lea	_ASM_LABEL(tmpstk),%sp	| now goto a tmp stack for NMI
1210
1211	moveml	%a1@(PCB_REGS),#0xFCFC	| and registers
1212	movl	%a1@(PCB_USP),%a0
1213	movl	%a0,%usp		| and USP
1214
1215	tstl	_C_LABEL(fputype)	| If we don't have an FPU,
1216	jeq	Lnofprest		|  don't try to restore it.
1217	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
1218#if defined(M68020) || defined(M68030) || defined(M68040)
1219#if defined(M68060)
1220	cmpl	#FPU_68060,_C_LABEL(fputype)
1221	jeq	Lresfp60rest1
1222#endif
1223	tstb	%a0@			| null state frame?
1224	jeq	Lresfprest		| yes, easy
1225#if defined(M68040)
1226	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040?
1227	jne	Lresnot040		| no, skip
1228	clrl	%sp@-			| yes...
1229	frestore %sp@+			| ...magic!
1230Lresnot040:
1231#endif
1232	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control registers
1233	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1234#if defined(M68060)
1235	jra	Lresfprest
1236Lresfp60rest1:
1237#endif
1238#endif
1239#if defined(M68060)
1240	tstb	%a0@(2)			| null state frame?
1241	jeq	Lresfprest		| yes, easy
1242	fmovem	%a0@(FPF_FPCR),%fpcr	| restore FP control registers
1243	fmovem	%a0@(FPF_FPSR),%fpsr
1244	fmovem	%a0@(FPF_FPI),%fpi
1245	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1246#endif
1247Lresfprest:
1248	frestore %a0@			| restore state
1249Lnofprest:
1250	movw	%a1@(PCB_PS),%sr	| no, restore PS
1251	moveq	#1,%d0			| return 1 (for alternate returns)
1252	rts
1253
1254/*
1255 * savectx(pcb)
1256 * Update pcb, saving current processor state.
1257 */
1258ENTRY(savectx)
1259	movl	%sp@(4),%a1
1260	movw	%sr,%a1@(PCB_PS)
1261	movl	%usp,%a0		| grab USP
1262	movl	%a0,%a1@(PCB_USP)	| and save it
1263	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
1264
1265	tstl	_C_LABEL(fputype)	| Do we have FPU?
1266	jeq	Lsvnofpsave		| No?  Then don't save state.
1267	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
1268	fsave	%a0@			| save FP state
1269#if defined(M68020) || defined(M68030) || defined(M68040)
1270#if defined(M68060)
1271	cmpl	#FPU_68060,_C_LABEL(fputype)
1272	jeq	Lsvsavfp60
1273#endif
1274	tstb	%a0@			| null state frame?
1275	jeq	Lsvnofpsave		| yes, all done
1276	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1277	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control registers
1278#if defined(M68060)
1279	jra	Lsvnofpsave
1280Lsvsavfp60:
1281#endif
1282#endif
1283#if defined(M68060)
1284	tstb	%a0@(2)			| null state frame?
1285	jeq	Lsvnofpsave		| yes, all done
1286	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1287	fmovem	%fpcr,%a0@(FPF_FPCR)	| save FP control registers
1288	fmovem	%fpsr,%a0@(FPF_FPSR)
1289	fmovem	%fpi,%a0@(FPF_FPI)
1290#endif
1291Lsvnofpsave:
1292	moveq	#0,%d0			| return 0
1293	rts
1294
1295#if defined(M68040) || defined(M68060)
1296ENTRY(suline)
1297	movl	%sp@(4),%a0		| address to write
1298	movl	_C_LABEL(curpcb),%a1	| current pcb
1299	movl	#Lslerr,%a1@(PCB_ONFAULT) | where to return to on a fault
1300	movl	%sp@(8),%a1		| address of line
1301	movl	%a1@+,%d0		| get lword
1302	movsl	%d0,%a0@+		| put lword
1303	nop				| sync
1304	movl	%a1@+,%d0		| get lword
1305	movsl	%d0,%a0@+		| put lword
1306	nop				| sync
1307	movl	%a1@+,%d0		| get lword
1308	movsl	%d0,%a0@+		| put lword
1309	nop				| sync
1310	movl	%a1@+,%d0		| get lword
1311	movsl	%d0,%a0@+		| put lword
1312	nop				| sync
1313	moveq	#0,%d0			| indicate no fault
1314	jra	Lsldone
1315Lslerr:
1316	moveq	#-1,%d0
1317Lsldone:
1318	movl	_C_LABEL(curpcb),%a1	| current pcb
1319	clrl	%a1@(PCB_ONFAULT) 	| clear fault address
1320	rts
1321#endif
1322
1323ENTRY(ecacheon)
1324	rts
1325
1326ENTRY(ecacheoff)
1327	rts
1328
1329ENTRY_NOPROFILE(getsfc)
1330	movc	%sfc,%d0
1331	rts
1332
1333ENTRY_NOPROFILE(getdfc)
1334	movc	%dfc,%d0
1335	rts
1336
1337/*
1338 * Load a new user segment table pointer.
1339 */
1340ENTRY(loadustp)
1341	movl	%sp@(4),%d0		| new USTP
1342	moveq	#PGSHIFT,%d1
1343	lsll	%d1,%d0			| convert to addr
1344#if defined(M68040) || defined(M68060)
1345	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1346	jne	LmotommuC		| no, skip
1347	.word	0xf518			| pflusha
1348	.long	0x4e7b0806		| movc d0,urp
1349#ifdef M68060
1350	cmpl	#CPU_68060,_C_LABEL(cputype)
1351	jne	Lldno60
1352	movc	%cacr,%d0
1353	orl	#IC60_CUBC,%d0		| clear user branch cache entries
1354	movc	%d0,%cacr
1355Lldno60:
1356#endif
1357	rts
1358LmotommuC:
1359#endif
1360	pflusha				| flush entire TLB
1361	lea	_C_LABEL(protorp),%a0	| CRP prototype
1362	movl	%d0,%a0@(4)		| stash USTP
1363	pmove	%a0@,%crp		| load root pointer
1364	movl	#CACHE_CLR,%d0
1365	movc	%d0,%cacr		| invalidate cache(s)
1366	rts
1367
1368ENTRY(ploadw)
1369#if defined(M68030)
1370	movl	%sp@(4),%a0		| address to load
1371#if defined(M68040) || defined(M68060)
1372	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1373	jeq	Lploadwskp		| yes, skip
1374#endif
1375	ploadw	#1,%a0@			| pre-load translation
1376Lploadwskp:
1377#endif
1378	rts
1379
1380/*
1381 * Set processor priority level calls.  Most are implemented with
1382 * inline asm expansions.  However, spl0 requires special handling
1383 * as we need to check for our emulated software interrupts.
1384 */
1385
1386ENTRY(spl0)
1387	moveq	#0,%d0
1388	movw	%sr,%d0			| get old SR for return
1389	movw	#PSL_LOWIPL,%sr		| restore new SR
1390	tstb	_C_LABEL(ssir)		| software interrupt pending?
1391	jeq	Lspldone		| no, all done
1392	subql	#4,%sp			| make room for RTE frame
1393	movl	%sp@(4),%sp@(2)		| position return address
1394	clrw	%sp@(6)			| set frame type 0
1395	movw	#PSL_LOWIPL,%sp@	| and new SR
1396	jra	Lgotsir			| go handle it
1397Lspldone:
1398	rts
1399
1400/*
1401 * _delay(u_int N)
1402 *
1403 * Delay for at least (N/256) microsecends.
1404 * This routine depends on the variable:  delay_divisor
1405 * which should be set based on the CPU clock rate.
1406 */
1407ENTRY_NOPROFILE(_delay)
1408	| d0 = arg = (usecs << 8)
1409	movl	%sp@(4),%d0
1410	| d1 = delay_divisor
1411	movl	_C_LABEL(delay_divisor),%d1
1412L_delay:
1413	subl	%d1,%d0
1414	jgt	L_delay
1415	rts
1416
1417/*
1418 * Save and restore 68881 state.
1419 */
1420ENTRY(m68881_save)
1421	movl	%sp@(4),%a0		| save area pointer
1422	fsave	%a0@			| save state
1423#if defined(M68020) || defined(M68030) || defined(M68040)
1424#if defined(M68060)
1425	cmpl	#FPU_68060,_C_LABEL(fputype)
1426	jeq	Lm68060fpsave
1427#endif
1428Lm68881fpsave:
1429	tstb	%a0@			| null state frame?
1430	jeq	Lm68881sdone		| yes, all done
1431	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1432	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control registers
1433Lm68881sdone:
1434	rts
1435#endif
1436#if defined(M68060)
1437Lm68060fpsave:
1438	tstb	%a0@(2)			| null state frame?
1439	jeq	Lm68060sdone		| yes, all done
1440	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1441	fmovem	%fpcr,%a0@(FPF_FPCR)	| save FP control registers
1442	fmovem	%fpsr,%a0@(FPF_FPSR)
1443	fmovem	%fpi,%a0@(FPF_FPI)
1444Lm68060sdone:
1445	rts
1446#endif
1447
1448ENTRY(m68881_restore)
1449	movl	%sp@(4),%a0		| save area pointer
1450#if defined(M68020) || defined(M68030) || defined(M68040)
1451#if defined(M68060)
1452	cmpl	#FPU_68060,_C_LABEL(fputype)
1453	jeq	Lm68060fprestore
1454#endif
1455Lm68881fprestore:
1456	tstb	%a0@			| null state frame?
1457	jeq	Lm68881rdone		| yes, easy
1458	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control registers
1459	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1460Lm68881rdone:
1461	frestore %a0@			| restore state
1462	rts
1463#endif
1464#if defined(M68060)
1465Lm68060fprestore:
1466	tstb	%a0@(2)			| null state frame?
1467	jeq	Lm68060fprdone		| yes, easy
1468	fmovem	%a0@(FPF_FPCR),%fpcr	| restore FP control registers
1469	fmovem	%a0@(FPF_FPSR),%fpsr
1470	fmovem	%a0@(FPF_FPI),%fpi
1471	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1472Lm68060fprdone:
1473	frestore %a0@			| restore state
1474	rts
1475#endif
1476
1477/*
1478 * Handle the nitty-gritty of rebooting the machine.
1479 * Basically we just turn off the MMU and jump to the appropriate ROM routine.
1480 * Note that we must be running in an address range that is mapped one-to-one
1481 * logical to physical so that the PC is still valid immediately after the MMU
1482 * is turned off.  We have conveniently mapped the last page of physical
1483 * memory this way.
1484 */
1485ENTRY_NOPROFILE(doboot)
1486	movw	#PSL_HIGHIPL,%sr	| cut off any interrupts
1487	subal	%a1,%a1			| a1 = 0
1488
1489	movl	#CACHE_OFF,%d0
1490#if defined(M68040) || defined(M68060)
1491	movl	_C_LABEL(mmutype),%d2	| d2 = mmutype
1492	addl	#-MMU_68040,%d2		| 68040?
1493	jne	Ldoboot0		| no, skip
1494	.word	0xf4f8			| cpusha bc - push and invalidate caches
1495	nop
1496	movl	#CACHE40_OFF,%d0
1497Ldoboot0:
1498#endif
1499	movc	%d0,%cacr		| disable on-chip cache(s)
1500
1501	| ok, turn off MMU..
1502Ldoreboot:
1503#if defined(M68040) || defined(M68060)
1504	tstl	%d2			| 68040?
1505	jne	LmotommuF		| no, skip
1506	movc	%a1,%cacr		| caches off
1507	.long	0x4e7b9003		| movc a1(=0),tc ; disable MMU
1508	jra	Ldoreboot1
1509LmotommuF:
1510#endif
1511	clrl	%sp@
1512	pmove	%sp@,%tc		| disable MMU
1513Ldoreboot1:
1514	moveml	0x00ff0000,#0x0101	| get RESET vectors in ROM
1515					|	(d0: ssp, a0: pc)
1516	moveml	#0x0101,%a1@		| put them at 0x0000 (for Xellent30)
1517	movc	%a1,%vbr		| reset Vector Base Register
1518	jmp	%a0@			| reboot X680x0
1519Lebootcode:
1520
1521/*
1522 * Misc. global variables.
1523 */
1524	.data
1525GLOBAL(machineid)
1526	.long	0		| default to X68030
1527
1528GLOBAL(mmutype)
1529	.long	MMU_68030	| default to 030 internal MMU
1530
1531GLOBAL(cputype)
1532	.long	CPU_68030	| default to 68030 CPU
1533
1534#ifdef M68K_MMU_HP
1535GLOBAL(ectype)
1536	.long	EC_NONE		| external cache type, default to none
1537#endif
1538
1539GLOBAL(fputype)
1540	.long	FPU_NONE
1541
1542GLOBAL(protorp)
1543	.long	0,0		| prototype root pointer
1544
1545GLOBAL(want_resched)
1546	.long	0
1547
1548GLOBAL(proc0paddr)
1549	.long	0		| KVA of proc0 u-area
1550
1551GLOBAL(intiolimit)
1552	.long	0		| KVA of end of internal IO space
1553
1554GLOBAL(extiobase)
1555	.long	0		| KVA of base of external IO space
1556#ifdef DEBUG
1557ASGLOBAL(fulltflush)
1558	.long	0
1559
1560ASGLOBAL(fullcflush)
1561	.long	0
1562#endif
1563
1564/* interrupt counters */
1565
1566GLOBAL(intrnames)
1567	.asciz	"spur"
1568	.asciz	"lev1"
1569	.asciz	"lev2"
1570	.asciz	"lev3"
1571	.asciz	"lev4"
1572	.asciz	"lev5"
1573	.asciz	"lev6"
1574	.asciz	"nmi"
1575	.asciz	"audioerr"
1576	.asciz	"clock"
1577	.asciz	"scsi"
1578	.asciz	"audio"
1579	.asciz	"pow"
1580	.asciz	"com"
1581	.space	200
1582GLOBAL(eintrnames)
1583	.even
1584
1585GLOBAL(intrcnt)
1586	.long	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
1587	.space	50
1588GLOBAL(eintrcnt)
1589