xref: /netbsd/sys/arch/x68k/x68k/locore.s (revision 6550d01e)
1/*	$NetBSD: locore.s,v 1.100 2010/12/27 15:39:07 tsutsui Exp $	*/
2
3/*
4 * Copyright (c) 1980, 1990, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 *    may be used to endorse or promote products derived from this software
21 *    without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * from: Utah $Hdr: locore.s 1.66 92/12/22$
36 *
37 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
38 */
39/*
40 * Copyright (c) 1988 University of Utah.
41 *
42 * This code is derived from software contributed to Berkeley by
43 * the Systems Programming Group of the University of Utah Computer
44 * Science Department.
45 *
46 * Redistribution and use in source and binary forms, with or without
47 * modification, are permitted provided that the following conditions
48 * are met:
49 * 1. Redistributions of source code must retain the above copyright
50 *    notice, this list of conditions and the following disclaimer.
51 * 2. Redistributions in binary form must reproduce the above copyright
52 *    notice, this list of conditions and the following disclaimer in the
53 *    documentation and/or other materials provided with the distribution.
54 * 3. All advertising materials mentioning features or use of this software
55 *    must display the following acknowledgement:
56 *	This product includes software developed by the University of
57 *	California, Berkeley and its contributors.
58 * 4. Neither the name of the University nor the names of its contributors
59 *    may be used to endorse or promote products derived from this software
60 *    without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * from: Utah $Hdr: locore.s 1.66 92/12/22$
75 *
76 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
77 */
78
79#include "opt_compat_netbsd.h"
80#include "opt_compat_svr4.h"
81#include "opt_compat_sunos.h"
82#include "opt_ddb.h"
83#include "opt_fpsp.h"
84#include "opt_kgdb.h"
85#include "opt_lockdebug.h"
86#include "opt_m68k_arch.h"
87
88#include "ite.h"
89#include "fd.h"
90#include "par.h"
91#include "assym.h"
92#include "ksyms.h"
93
94#include <machine/asm.h>
95
96| This is for kvm_mkdb, and should be the address of the beginning
97| of the kernel text segment (not necessarily the same as kernbase).
98	.text
99GLOBAL(kernel_text)
100
101/*
102 * Temporary stack for a variety of purposes.
103 * Try and make this the first thing is the data segment so it
104 * is page aligned.  Note that if we overflow here, we run into
105 * our text segment.
106 */
107	.data
108	.space	PAGE_SIZE
109ASLOCAL(tmpstk)
110
111#include <x68k/x68k/vectors.s>
112
113	.text
114/*
115 * This is where we wind up if the kernel jumps to location 0.
116 * (i.e. a bogus PC)  This is known to immediately follow the vector
117 * table and is hence at 0x400 (see reset vector in vectors.s).
118 */
119	PANIC("kernel jump to zero")
120	/* NOTREACHED */
121
122/*
123 * Trap/interrupt vector routines
124 */
125#include <m68k/m68k/trap_subr.s>
126
127ENTRY_NOPROFILE(buserr)
128ENTRY_NOPROFILE(buserr60)		| XXX
129	tstl	_C_LABEL(nofault)	| device probe?
130	jeq	Lberr			| no, handle as usual
131	movl	_C_LABEL(nofault),%sp@-	| yes,
132	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
133Lberr:
134#if defined(M68040) || defined(M68060)
135	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040/060?
136	jne	_C_LABEL(addrerr)	| no, skip
137	clrl	%sp@-			| stack adjust count
138	moveml	#0xFFFF,%sp@-		| save user registers
139	movl	%usp,%a0		| save the user SP
140	movl	%a0,%sp@(FR_SP)		|   in the savearea
141	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
142#if defined(M68060)
143	cmpl	#CPU_68060,_C_LABEL(cputype) | 68060?
144	jne	Lbenot060
145	movel	%a1@(12),%d0		| grap FSLW
146	btst	#2,%d0			| branch prediction error?
147	jeq	Lnobpe			| no, skip
148	movc	%cacr,%d1
149	orl	#IC60_CABC,%d1		| clear all branch cache entries
150	movc	%d1,%cacr
151	movl	%d0,%d1
152	andl	#0x7ffd,%d1		| check other faults
153	jeq	_ASM_LABEL(faultstkadjnotrap2)
154Lnobpe:
155| XXX this is not needed.
156|	movl	%d0,%sp@		| code is FSLW now.
157
158| we need to adjust for misaligned addresses
159	movl	%a1@(8),%d1		| grab VA
160	btst	#27,%d0			| check for mis-aligned access
161	jeq	Lberr3			| no, skip
162	addl	#28,%d1			| yes, get into next page
163					| operand case: 3,
164					| instruction case: 4+12+12
165					| XXX instr. case not done yet
166	andl	#PG_FRAME,%d1		| and truncate
167Lberr3:
168	movl	%d1,%sp@-		| push fault VA
169	movl	%d0,%sp@-		| and FSLW
170	andw	#0x1f80,%d0
171	jeq	Lisberr
172	jra	Lismerr
173Lbenot060:
174#endif
175	moveq	#0,%d0
176	movw	%a1@(12),%d0		| grab SSW
177	movl	%a1@(20),%d1		| and fault VA
178	btst	#11,%d0			| check for mis-aligned access
179	jeq	Lberr2			| no, skip
180	addl	#3,%d1			| yes, get into next page
181	andl	#PG_FRAME,%d1		| and truncate
182Lberr2:
183	movl	%d1,%sp@-		| push fault VA
184	movl	%d0,%sp@-		| and padded SSW
185	btst	#10,%d0			| ATC bit set?
186	jeq	Lisberr			| no, must be a real bus error
187	movc	%dfc,%d1		| yes, get MMU fault
188	movc	%d0,%dfc		| store faulting function code
189	movl	%sp@(4),%a0		| get faulting address
190	.word	0xf568			| ptestr a0@
191	movc	%d1,%dfc
192	.long	0x4e7a0805		| movc mmusr,d0
193	movw	%d0,%sp@		| save (ONLY LOW 16 BITS!)
194	jra	Lismerr
195#endif
196ENTRY_NOPROFILE(addrerr)
197	clrl	%sp@-			| stack adjust count
198	moveml	#0xFFFF,%sp@-		| save user registers
199	movl	%usp,%a0		| save the user SP
200	movl	%a0,%sp@(FR_SP)		|   in the savearea
201	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
202#if defined(M68040) || defined(M68060)
203	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
204	jne	Lbenot040		| no, skip
205	movl	%a1@(8),%sp@-		| yes, push fault address
206	clrl	%sp@-			| no SSW for address fault
207	jra	Lisaerr			| go deal with it
208Lbenot040:
209#endif
210	moveq	#0,%d0
211	movw	%a1@(10),%d0		| grab SSW for fault processing
212	btst	#12,%d0			| RB set?
213	jeq	LbeX0			| no, test RC
214	bset	#14,%d0			| yes, must set FB
215	movw	%d0,%a1@(10)		| for hardware too
216LbeX0:
217	btst	#13,%d0			| RC set?
218	jeq	LbeX1			| no, skip
219	bset	#15,%d0			| yes, must set FC
220	movw	%d0,%a1@(10)		| for hardware too
221LbeX1:
222	btst	#8,%d0			| data fault?
223	jeq	Lbe0			| no, check for hard cases
224	movl	%a1@(16),%d1		| fault address is as given in frame
225	jra	Lbe10			| thats it
226Lbe0:
227	btst	#4,%a1@(6)		| long (type B) stack frame?
228	jne	Lbe4			| yes, go handle
229	movl	%a1@(2),%d1		| no, can use save PC
230	btst	#14,%d0			| FB set?
231	jeq	Lbe3			| no, try FC
232	addql	#4,%d1			| yes, adjust address
233	jra	Lbe10			| done
234Lbe3:
235	btst	#15,%d0			| FC set?
236	jeq	Lbe10			| no, done
237	addql	#2,%d1			| yes, adjust address
238	jra	Lbe10			| done
239Lbe4:
240	movl	%a1@(36),%d1		| long format, use stage B address
241	btst	#15,%d0			| FC set?
242	jeq	Lbe10			| no, all done
243	subql	#2,%d1			| yes, adjust address
244Lbe10:
245	movl	%d1,%sp@-		| push fault VA
246	movl	%d0,%sp@-		| and padded SSW
247	movw	%a1@(6),%d0		| get frame format/vector offset
248	andw	#0x0FFF,%d0		| clear out frame format
249	cmpw	#12,%d0			| address error vector?
250	jeq	Lisaerr			| yes, go to it
251	movl	%d1,%a0			| fault address
252	movl	%sp@,%d0		| function code from ssw
253	btst	#8,%d0			| data fault?
254	jne	Lbe10a
255	movql	#1,%d0			| user program access FC
256					| (we dont separate data/program)
257	btst	#5,%a1@			| supervisor mode?
258	jeq	Lbe10a			| if no, done
259	movql	#5,%d0			| else supervisor program access
260Lbe10a:
261	ptestr	%d0,%a0@,#7		| do a table search
262	pmove	%psr,%sp@		| save result
263	movb	%sp@,%d1
264	btst	#2,%d1			| invalid? (incl. limit viol and berr)
265	jeq	Lmightnotbemerr		| no -> wp check
266	btst	#7,%d1			| is it MMU table berr?
267	jeq	Lismerr			| no, must be fast
268	jra	Lisberr1		| real bus err needs not be fast
269Lmightnotbemerr:
270	btst	#3,%d1			| write protect bit set?
271	jeq	Lisberr1		| no, must be bus error
272	movl	%sp@,%d0		| ssw into low word of d0
273	andw	#0xc0,%d0		| write protect is set on page:
274	cmpw	#0x40,%d0		| was it read cycle?
275	jeq	Lisberr1		| yes, was not WPE, must be bus err
276Lismerr:
277	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
278	jra	_ASM_LABEL(faultstkadj)	| and deal with it
279Lisaerr:
280	movl	#T_ADDRERR,%sp@-	| mark address error
281	jra	_ASM_LABEL(faultstkadj)	| and deal with it
282Lisberr1:
283	clrw	%sp@			| re-clear pad word
284Lisberr:
285	movl	#T_BUSERR,%sp@-		| mark bus error
286	jra	_ASM_LABEL(faultstkadj)	| and deal with it
287
288/*
289 * FP exceptions.
290 */
291#include "opt_fpu_emulate.h"
292ENTRY_NOPROFILE(fpfline)
293#if defined(M68040)
294	cmpl	#FPU_68040,_C_LABEL(fputype) | 64040 FPU?
295	jne	Lfp_unimp		| no, skip FPSP
296	cmpw	#0x202c,%sp@(6)		| format type 2?
297	jne	_C_LABEL(illinst)	| no, not an FP emulation
298#ifdef FPSP
299	jmp	_ASM_LABEL(fpsp_unimp)	| yes, go handle it
300#else
301	clrl	%sp@-			| stack adjust count
302	moveml	#0xFFFF,%sp@-		| save registers
303	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
304	jra	_ASM_LABEL(fault)	| do it
305#endif
306Lfp_unimp:
307#endif
308#ifdef FPU_EMULATE
309	clrl	%sp@-			| stack adjust count
310	moveml	#0xFFFF,%sp@-		| save registers
311	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
312	jra	_ASM_LABEL(fault)	| do it
313#else
314	jra	_C_LABEL(illinst)
315#endif
316
317ENTRY_NOPROFILE(fpunsupp)
318#if defined(M68040)
319	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040?
320	jne	Lfp_unsupp		| no, skip FPSP
321#ifdef FPSP
322	jmp	_ASM_LABEL(fpsp_unsupp)	| yes, go handle it
323#else
324	clrl	%sp@-			| stack adjust count
325	moveml	#0xFFFF,%sp@-		| save registers
326	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
327	jra	_ASM_LABEL(fault)	| do it
328#endif
329Lfp_unsupp:
330#endif
331#ifdef FPU_EMULATE
332	clrl	%sp@-			| stack adjust count
333	moveml	#0xFFFF,%sp@-		| save registers
334	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
335	jra	_ASM_LABEL(fault)	| do it
336#else
337	jra	_C_LABEL(illinst)
338#endif
339
340/*
341 * Handles all other FP coprocessor exceptions.
342 * Note that since some FP exceptions generate mid-instruction frames
343 * and may cause signal delivery, we need to test for stack adjustment
344 * after the trap call.
345 */
346ENTRY_NOPROFILE(fpfault)
347	clrl	%sp@-		| stack adjust count
348	moveml	#0xFFFF,%sp@-	| save user registers
349	movl	%usp,%a0	| and save
350	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
351	clrl	%sp@-		| no VA arg
352	movl	_C_LABEL(curpcb),%a0 | current pcb
353	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
354	fsave	%a0@		| save state
355#if defined(M68040) || defined(M68060)
356	/* always null state frame on 68040, 68060 */
357	cmpl	#FPU_68040,_C_LABEL(fputype)
358	jge	Lfptnull
359#endif
360	tstb	%a0@		| null state frame?
361	jeq	Lfptnull	| yes, safe
362	clrw	%d0		| no, need to tweak BIU
363	movb	%a0@(1),%d0	| get frame size
364	bset	#3,%a0@(0,%d0:w) | set exc_pend bit of BIU
365Lfptnull:
366	fmovem	%fpsr,%sp@-	| push fpsr as code argument
367	frestore %a0@		| restore state
368	movl	#T_FPERR,%sp@-	| push type arg
369	jra	_ASM_LABEL(faultstkadj)	| call trap and deal with stack cleanup
370
371/*
372 * Other exceptions only cause four and six word stack frame and require
373 * no post-trap stack adjustment.
374 */
375
376ENTRY_NOPROFILE(badtrap)
377	moveml	#0xC0C0,%sp@-		| save scratch regs
378	movw	%sp@(22),%sp@-		| push exception vector info
379	clrw	%sp@-
380	movl	%sp@(22),%sp@-		| and PC
381	jbsr	_C_LABEL(straytrap)	| report
382	addql	#8,%sp			| pop args
383	moveml	%sp@+,#0x0303		| restore regs
384	jra	_ASM_LABEL(rei)		| all done
385
386ENTRY_NOPROFILE(trap0)
387	clrl	%sp@-			| stack adjust count
388	moveml	#0xFFFF,%sp@-		| save user registers
389	movl	%usp,%a0		| save the user SP
390	movl	%a0,%sp@(FR_SP)		|   in the savearea
391	movl	%d0,%sp@-		| push syscall number
392	jbsr	_C_LABEL(syscall)	| handle it
393	addql	#4,%sp			| pop syscall arg
394	tstl	_C_LABEL(astpending)
395	jne	Lrei2
396	tstb	_C_LABEL(ssir)
397	jeq	Ltrap1
398	movw	#SPL1,%sr
399	tstb	_C_LABEL(ssir)
400	jne	Lsir1
401Ltrap1:
402	movl	%sp@(FR_SP),%a0		| grab and restore
403	movl	%a0,%usp		|   user SP
404	moveml	%sp@+,#0x7FFF		| restore most registers
405	addql	#8,%sp			| pop SP and stack adjust
406	rte
407
408/*
409 * Trap 12 is the entry point for the cachectl "syscall" (both HPUX & BSD)
410 *	cachectl(command, addr, length)
411 * command in d0, addr in a1, length in d1
412 */
413ENTRY_NOPROFILE(trap12)
414	movl	_C_LABEL(curlwp),%a0
415	movl	%a0@(L_PROC),%sp@-	| push curproc pointer
416	movl	%d1,%sp@-		| push length
417	movl	%a1,%sp@-		| push addr
418	movl	%d0,%sp@-		| push command
419	jbsr	_C_LABEL(cachectl1)	| do it
420	lea	%sp@(16),%sp		| pop args
421	jra	_ASM_LABEL(rei)		| all done
422
423/*
424 * Trace (single-step) trap.  Kernel-mode is special.
425 * User mode traps are simply passed on to trap().
426 */
427ENTRY_NOPROFILE(trace)
428	clrl	%sp@-			| stack adjust count
429	moveml	#0xFFFF,%sp@-
430	moveq	#T_TRACE,%d0
431
432	| Check PSW and see what happen.
433	|   T=0 S=0	(should not happen)
434	|   T=1 S=0	trace trap from user mode
435	|   T=0 S=1	trace trap on a trap instruction
436	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
437
438	movw	%sp@(FR_HW),%d1		| get PSW
439	notw	%d1			| XXX no support for T0 on 680[234]0
440	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
441	jeq	Lkbrkpt			| yes, kernel breakpoint
442	jra	_ASM_LABEL(fault)	| no, user-mode fault
443
444/*
445 * Trap 15 is used for:
446 *	- GDB breakpoints (in user programs)
447 *	- KGDB breakpoints (in the kernel)
448 *	- trace traps for SUN binaries (not fully supported yet)
449 * User mode traps are simply passed to trap().
450 */
451ENTRY_NOPROFILE(trap15)
452	clrl	%sp@-			| stack adjust count
453	moveml	#0xFFFF,%sp@-
454	moveq	#T_TRAP15,%d0
455	movw	%sp@(FR_HW),%d1		| get PSW
456	andw	#PSL_S,%d1		| from system mode?
457	jne	Lkbrkpt			| yes, kernel breakpoint
458	jra	_ASM_LABEL(fault)	| no, user-mode fault
459
460Lkbrkpt: | Kernel-mode breakpoint or trace trap. (d0=trap_type)
461	| Save the system sp rather than the user sp.
462	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
463	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
464	movl	%a6,%sp@(FR_SP)		|  from before trap
465
466	| If were are not on tmpstk switch to it.
467	| (so debugger can change the stack pointer)
468	movl	%a6,%d1
469	cmpl	#_ASM_LABEL(tmpstk),%d1
470	jls	Lbrkpt2			| already on tmpstk
471	| Copy frame to the temporary stack
472	movl	%sp,%a0			| a0=src
473	lea	_ASM_LABEL(tmpstk)-96,%a1 | a1=dst
474	movl	%a1,%sp			| sp=new frame
475	moveq	#FR_SIZE,%d1
476Lbrkpt1:
477	movl	%a0@+,%a1@+
478	subql	#4,%d1
479	jgt	Lbrkpt1
480
481Lbrkpt2:
482	| Call the trap handler for the kernel debugger.
483	| Do not call trap() to do it, so that we can
484	| set breakpoints in trap() if we want.  We know
485	| the trap type is either T_TRACE or T_BREAKPOINT.
486	| If we have both DDB and KGDB, let KGDB see it first,
487	| because KGDB will just return 0 if not connected.
488	| Save args in d2, a2
489	movl	%d0,%d2			| trap type
490	movl	%sp,%a2			| frame ptr
491#ifdef KGDB
492	| Let KGDB handle it (if connected)
493	movl	%a2,%sp@-		| push frame ptr
494	movl	%d2,%sp@-		| push trap type
495	jbsr	_C_LABEL(kgdb_trap)	| handle the trap
496	addql	#8,%sp			| pop args
497	cmpl	#0,%d0			| did kgdb handle it?
498	jne	Lbrkpt3			| yes, done
499#endif
500#ifdef DDB
501	| Let DDB handle it
502	movl	%a2,%sp@-		| push frame ptr
503	movl	%d2,%sp@-		| push trap type
504	jbsr	_C_LABEL(kdb_trap)	| handle the trap
505	addql	#8,%sp			| pop args
506#if 0	/* not needed on hp300 */
507	cmpl	#0,%d0			| did ddb handle it?
508	jne	Lbrkpt3			| yes, done
509#endif
510#endif
511	/* Sun 3 drops into PROM here. */
512Lbrkpt3:
513	| The stack pointer may have been modified, or
514	| data below it modified (by kgdb push call),
515	| so push the hardware frame at the current sp
516	| before restoring registers and returning.
517
518	movl	%sp@(FR_SP),%a0		| modified sp
519	lea	%sp@(FR_SIZE),%a1	| end of our frame
520	movl	%a1@-,%a0@-		| copy 2 longs with
521	movl	%a1@-,%a0@-		| ... predecrement
522	movl	%a0,%sp@(FR_SP)		| sp = h/w frame
523	moveml	%sp@+,#0x7FFF		| restore all but sp
524	movl	%sp@,%sp		| ... and sp
525	rte				| all done
526
527/* Use common m68k sigreturn */
528#include <m68k/m68k/sigreturn.s>
529
530/*
531 * Provide a generic interrupt dispatcher, only handle hardclock (int6)
532 * specially, to improve performance
533 */
534
535ENTRY_NOPROFILE(spurintr)	/* level 0 */
536	addql	#1,_C_LABEL(intrcnt)+0
537	INTERRUPT_SAVEREG
538	CPUINFO_INCREMENT(CI_NINTR)
539	INTERRUPT_RESTOREREG
540	rte				| XXX mfpcure (x680x0 hardware bug)
541
542ENTRY_NOPROFILE(kbdtimer)
543	rte
544
545ENTRY_NOPROFILE(com0trap)
546#include "com.h"
547	INTERRUPT_SAVEREG
548#if NXCOM > 0
549	addql	#1,_C_LABEL(idepth)
550	movel	#0,%sp@-
551	jbsr	_C_LABEL(comintr)
552	addql	#4,%sp
553	subql	#1,_C_LABEL(idepth)
554#endif
555	CPUINFO_INCREMENT(CI_NINTR)
556	INTERRUPT_RESTOREREG
557	addql	#1,_C_LABEL(intrcnt)+36
558	jra	rei
559
560ENTRY_NOPROFILE(com1trap)
561	INTERRUPT_SAVEREG
562#if NXCOM > 1
563	addql	#1,_C_LABEL(idepth)
564	movel	#1,%sp@-
565	jbsr	_C_LABEL(comintr)
566	addql	#4,%sp
567	subql	#1,_C_LABEL(idepth)
568#endif
569	CPUINFO_INCREMENT(CI_NINTR)
570	INTERRUPT_RESTOREREG
571	addql	#1,_C_LABEL(intrcnt)+36
572	jra	rei
573
574ENTRY_NOPROFILE(intiotrap)
575	addql	#1,_C_LABEL(idepth)
576	INTERRUPT_SAVEREG
577	pea	%sp@(16-(FR_HW))	| XXX
578	jbsr	_C_LABEL(intio_intr)
579	addql	#4,%sp
580	CPUINFO_INCREMENT(CI_NINTR)
581	INTERRUPT_RESTOREREG
582	subql	#1,_C_LABEL(idepth)
583	jra	rei
584
585ENTRY_NOPROFILE(lev1intr)
586ENTRY_NOPROFILE(lev2intr)
587ENTRY_NOPROFILE(lev3intr)
588ENTRY_NOPROFILE(lev4intr)
589ENTRY_NOPROFILE(lev5intr)
590ENTRY_NOPROFILE(lev6intr)
591	addql	#1,_C_LABEL(idepth)
592	INTERRUPT_SAVEREG
593Lnotdma:
594	lea	_C_LABEL(intrcnt),%a0
595	movw	%sp@(22),%d0		| use vector offset
596	andw	#0xfff,%d0		|   sans frame type
597	addql	#1,%a0@(-0x60,%d0:w)	|     to increment apropos counter
598	movw	%sr,%sp@-		| push current SR value
599	clrw	%sp@-			|    padded to longword
600	jbsr	_C_LABEL(intrhand)	| handle interrupt
601	addql	#4,%sp			| pop SR
602	CPUINFO_INCREMENT(CI_NINTR)
603	INTERRUPT_RESTOREREG
604	subql	#1,_C_LABEL(idepth)
605	jra	_ASM_LABEL(rei)
606
607ENTRY_NOPROFILE(timertrap)
608	addql	#1,_C_LABEL(idepth)
609	INTERRUPT_SAVEREG		| save scratch registers
610	addql	#1,_C_LABEL(intrcnt)+32	| count hardclock interrupts
611	lea	%sp@(16),%a1		| a1 = &clockframe
612	movl	%a1,%sp@-
613	jbsr	_C_LABEL(hardclock)	| hardclock(&frame)
614	addql	#4,%sp
615	CPUINFO_INCREMENT(CI_NINTR)	| chalk up another interrupt
616	INTERRUPT_RESTOREREG		| restore scratch registers
617	subql	#1,_C_LABEL(idepth)
618	jra	_ASM_LABEL(rei)		| all done
619
620ENTRY_NOPROFILE(lev7intr)
621	addql	#1,_C_LABEL(idepth)
622	addql	#1,_C_LABEL(intrcnt)+28
623	clrl	%sp@-
624	moveml	#0xFFFF,%sp@-		| save registers
625	movl	%usp,%a0		| and save
626	movl	%a0,%sp@(FR_SP)		|   the user stack pointer
627	jbsr	_C_LABEL(nmihand)	| call handler
628	movl	%sp@(FR_SP),%a0		| restore
629	movl	%a0,%usp		|   user SP
630	moveml	%sp@+,#0x7FFF		| and remaining registers
631	addql	#8,%sp			| pop SP and stack adjust
632	subql	#1,_C_LABEL(idepth)
633	jra	_ASM_LABEL(rei)		| all done
634
635/*
636 * floppy ejection trap
637 */
638
639ENTRY_NOPROFILE(fdeject)
640	jra	_ASM_LABEL(rei)
641
642/*
643 * Emulation of VAX REI instruction.
644 *
645 * This code deals with checking for and servicing ASTs
646 * (profiling, scheduling) and software interrupts (network, softclock).
647 * We check for ASTs first, just like the VAX.  To avoid excess overhead
648 * the T_ASTFLT handling code will also check for software interrupts so we
649 * do not have to do it here.  After identifing that we need an AST we
650 * drop the IPL to allow device interrupts.
651 *
652 * This code is complicated by the fact that sendsig may have been called
653 * necessitating a stack cleanup.
654 */
655ASENTRY_NOPROFILE(rei)
656	tstl	_C_LABEL(astpending)	| AST pending?
657	jeq	Lchksir			| no, go check for SIR
658Lrei1:
659	btst	#5,%sp@			| yes, are we returning to user mode?
660	jne	Lchksir			| no, go check for SIR
661	movw	#PSL_LOWIPL,%sr		| lower SPL
662	clrl	%sp@-			| stack adjust
663	moveml	#0xFFFF,%sp@-		| save all registers
664	movl	%usp,%a1		| including
665	movl	%a1,%sp@(FR_SP)		|    the users SP
666Lrei2:
667	clrl	%sp@-			| VA == none
668	clrl	%sp@-			| code == none
669	movl	#T_ASTFLT,%sp@-		| type == async system trap
670	pea	%sp@(12)		| fp = trap frame address
671	jbsr	_C_LABEL(trap)		| go handle it
672	lea	%sp@(16),%sp		| pop value args
673	movl	%sp@(FR_SP),%a0		| restore user SP
674	movl	%a0,%usp		|   from save area
675	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
676	jne	Laststkadj		| yes, go to it
677	moveml	%sp@+,#0x7FFF		| no, restore most user regs
678	addql	#8,%sp			| toss SP and stack adjust
679	rte				| and do real RTE
680Laststkadj:
681	lea	%sp@(FR_HW),%a1		| pointer to HW frame
682	addql	#8,%a1			| source pointer
683	movl	%a1,%a0			| source
684	addw	%d0,%a0			|  + hole size = dest pointer
685	movl	%a1@-,%a0@-		| copy
686	movl	%a1@-,%a0@-		|  8 bytes
687	movl	%a0,%sp@(FR_SP)		| new SSP
688	moveml	%sp@+,#0x7FFF		| restore user registers
689	movl	%sp@,%sp		| and our SP
690	rte				| and do real RTE
691Lchksir:
692	tstb	_C_LABEL(ssir)		| SIR pending?
693	jeq	Ldorte			| no, all done
694	movl	%d0,%sp@-		| need a scratch register
695	movw	%sp@(4),%d0		| get SR
696	andw	#PSL_IPL7,%d0		| mask all but IPL
697	jne	Lnosir			| came from interrupt, no can do
698	movl	%sp@+,%d0		| restore scratch register
699Lgotsir:
700	movw	#SPL1,%sr		| prevent others from servicing int
701	tstb	_C_LABEL(ssir)		| too late?
702	jeq	Ldorte			| yes, oh well...
703	clrl	%sp@-			| stack adjust
704	moveml	#0xFFFF,%sp@-		| save all registers
705	movl	%usp,%a1		| including
706	movl	%a1,%sp@(FR_SP)		|    the users SP
707Lsir1:
708	clrl	%sp@-			| VA == none
709	clrl	%sp@-			| code == none
710	movl	#T_SSIR,%sp@-		| type == software interrupt
711	pea	%sp@(12)		| fp = trap frame address
712	jbsr	_C_LABEL(trap)		| go handle it
713	lea	%sp@(16),%sp		| pop value args
714	movl	%sp@(FR_SP),%a0		| restore
715	movl	%a0,%usp		|   user SP
716	moveml	%sp@+,#0x7FFF		| and all remaining registers
717	addql	#8,%sp			| pop SP and stack adjust
718	rte
719Lnosir:
720	movl	%sp@+,%d0		| restore scratch register
721Ldorte:
722	rte				| real return
723
724/*
725 * Macro to relocate a symbol, used before MMU is enabled.
726 */
727#define	_RELOC(var, ar)	\
728	lea	var,ar;	\
729	addl	%a5,ar
730
731#define	RELOC(var, ar)		_RELOC(_C_LABEL(var), ar)
732#define	ASRELOC(var, ar)	_RELOC(_ASM_LABEL(var), ar)
733
734/*
735 * Initialization
736 *
737 * A4 contains the address of the end of the symtab
738 * A5 contains physical load point from boot
739 * VBR contains zero from ROM.  Exceptions will continue to vector
740 * through ROM until MMU is turned on at which time they will vector
741 * through our table (vectors.s).
742 */
743BSS(lowram,4)
744BSS(esym,4)
745
746GLOBAL(_verspad)
747	.word	0
748GLOBAL(boot_version)
749	.word	X68K_BOOTIF_VERS
750
751ASENTRY_NOPROFILE(start)
752	movw	#PSL_HIGHIPL,%sr	| no interrupts
753
754	addql	#4,%sp
755	movel	%sp@+,%a5		| firstpa
756	movel	%sp@+,%d5		| fphysize -- last page
757	movel	%sp@,%a4		| esym
758
759	RELOC(vectab,%a0)		| set Vector Base Register temporaly
760	movc	%a0,%vbr
761
762#if 0	/* XXX this should be done by the boot loader */
763	RELOC(edata, %a0)		| clear out BSS
764	movl	#_C_LABEL(end)-4,%d0	| (must be <= 256 kB)
765	subl	#_C_LABEL(edata),%d0
766	lsrl	#2,%d0
7671:	clrl	%a0@+
768	dbra	%d0,1b
769#endif
770
771	ASRELOC(tmpstk, %a0)
772	movl	%a0,%sp			| give ourselves a temporary stack
773	RELOC(esym, %a0)
774#if 1
775	movl	%a4,%a0@		| store end of symbol table
776#else
777	clrl	%a0@			| no symbol table, yet
778#endif
779	RELOC(lowram, %a0)
780	movl	%a5,%a0@		| store start of physical memory
781
782	movl	#CACHE_OFF,%d0
783	movc	%d0,%cacr		| clear and disable on-chip cache(s)
784
785/* determine our CPU/MMU combo - check for all regardless of kernel config */
786	movl	#0x200,%d0		| data freeze bit
787	movc	%d0,%cacr		|   only exists on 68030
788	movc	%cacr,%d0		| read it back
789	tstl	%d0			| zero?
790	jeq	Lnot68030		| yes, we have 68020/68040/68060
791	jra	Lstart1			| no, we have 68030
792Lnot68030:
793	bset	#31,%d0			| data cache enable bit
794	movc	%d0,%cacr		|   only exists on 68040/68060
795	movc	%cacr,%d0		| read it back
796	tstl	%d0			| zero?
797	jeq	Lis68020		| yes, we have 68020
798	moveq	#0,%d0			| now turn it back off
799	movec	%d0,%cacr		|   before we access any data
800	.word	0xf4d8			| cinva bc - invalidate caches XXX
801	bset	#30,%d0			| data cache no allocate mode bit
802	movc	%d0,%cacr		|   only exists on 68060
803	movc	%cacr,%d0		| read it back
804	tstl	%d0			| zero?
805	jeq	Lis68040		| yes, we have 68040
806	RELOC(mmutype, %a0)		| no, we have 68060
807	movl	#MMU_68040,%a0@		| with a 68040 compatible MMU
808	RELOC(cputype, %a0)
809	movl	#CPU_68060,%a0@		| and a 68060 CPU
810	jra	Lstart1
811Lis68040:
812	RELOC(mmutype, %a0)
813	movl	#MMU_68040,%a0@		| with a 68040 MMU
814	RELOC(cputype, %a0)
815	movl	#CPU_68040,%a0@		| and a 68040 CPU
816	jra	Lstart1
817Lis68020:
818	RELOC(mmutype, %a0)
819	movl	#MMU_68851,%a0@		| we have PMMU
820	RELOC(cputype, %a0)
821	movl	#CPU_68020,%a0@		| and a 68020 CPU
822
823Lstart1:
824/* initialize source/destination control registers for movs */
825	moveq	#FC_USERD,%d0		| user space
826	movc	%d0,%sfc		|   as source
827	movc	%d0,%dfc		|   and destination of transfers
828/* initialize memory sizes (for pmap_bootstrap) */
829	movl	%d5,%d1			| last page
830	moveq	#PGSHIFT,%d2
831	lsrl	%d2,%d1			| convert to page (click) number
832	RELOC(maxmem, %a0)
833	movl	%d1,%a0@		| save as maxmem
834	movl	%a5,%d0			| lowram value from ROM via boot
835	lsrl	%d2,%d0			| convert to page number
836	subl	%d0,%d1			| compute amount of RAM present
837	RELOC(physmem, %a0)
838	movl	%d1,%a0@		| and physmem
839/* configure kernel and lwp0 VA space so we can get going */
840#if NKSYMS || defined(DDB) || defined(LKM)
841	RELOC(esym,%a0)			| end of static kernel test/data/syms
842	movl	%a0@,%d5
843	jne	Lstart2
844#endif
845	movl	#_C_LABEL(end),%d5	| end of static kernel text/data
846Lstart2:
847	addl	#PAGE_SIZE-1,%d5
848	andl	#PG_FRAME,%d5		| round to a page
849	movl	%d5,%a4
850	addl	%a5,%a4			| convert to PA
851	pea	%a5@			| firstpa
852	pea	%a4@			| nextpa
853	RELOC(pmap_bootstrap,%a0)
854	jbsr	%a0@			| pmap_bootstrap(firstpa, nextpa)
855	addql	#8,%sp
856
857/*
858 * Prepare to enable MMU.
859 * Since the kernel is not mapped logical == physical we must insure
860 * that when the MMU is turned on, all prefetched addresses (including
861 * the PC) are valid.  In order guarantee that, we use the last physical
862 * page (which is conveniently mapped == VA) and load it up with enough
863 * code to defeat the prefetch, then we execute the jump back to here.
864 *
865 * Is this all really necessary, or am I paranoid??
866 */
867	RELOC(Sysseg_pa, %a0)		| system segment table addr
868	movl	%a0@,%d1		| read value (a PA)
869	RELOC(mmutype, %a0)
870	cmpl	#MMU_68040,%a0@		| 68040?
871	jne	Lmotommu1		| no, skip
872	.long	0x4e7b1807		| movc d1,srp
873	jra	Lstploaddone
874Lmotommu1:
875	RELOC(protorp, %a0)
876	movl	#0x80000202,%a0@	| nolimit + share global + 4 byte PTEs
877	movl	%d1,%a0@(4)		| + segtable address
878	pmove	%a0@,%srp		| load the supervisor root pointer
879	movl	#0x80000002,%a0@	| reinit upper half for CRP loads
880Lstploaddone:
881	RELOC(mmutype, %a0)
882	cmpl	#MMU_68040,%a0@		| 68040?
883	jne	Lmotommu2		| no, skip
884#include "opt_jupiter.h"
885#ifdef JUPITER
886	/* JUPITER-X: set system register "SUPER" bit */
887	movl	#0x0200a240,%d0		| translate DRAM area transparently
888	.long	0x4e7b0006		| movc d0,dtt0
889	lea	0x00c00000,%a0		| a0: graphic VRAM
890	lea	0x02c00000,%a1		| a1: graphic VRAM ( not JUPITER-X )
891					|     DRAM ( JUPITER-X )
892	movw	%a0@,%d0
893	movw	%d0,%d1
894	notw	%d1
895	movw	%d1,%a1@
896	movw	%d0,%a0@
897	cmpw	%a1@,%d1		| JUPITER-X?
898	jne	Ljupiterdone		| no, skip
899	movl	#0x0100a240,%d0		| to access system register
900	.long	0x4e7b0006		| movc d0,dtt0
901	movb	#0x01,0x01800003	| set "SUPER" bit
902Ljupiterdone:
903#endif /* JUPITER */
904	moveq	#0,%d0			| ensure TT regs are disabled
905	.long	0x4e7b0004		| movc d0,itt0
906	.long	0x4e7b0005		| movc d0,itt1
907	.long	0x4e7b0006		| movc d0,dtt0
908	.long	0x4e7b0007		| movc d0,dtt1
909	.word	0xf4d8			| cinva bc
910	.word	0xf518			| pflusha
911	movl	#0x8000,%d0
912	.long	0x4e7b0003		| movc d0,tc
913#ifdef M68060
914	RELOC(cputype, %a0)
915	cmpl	#CPU_68060,%a0@		| 68060?
916	jne	Lnot060cache
917	movl	#1,%d0
918	.long	0x4e7b0808		| movcl d0,pcr
919	movl	#0xa0808000,%d0
920	movc	%d0,%cacr		| enable store buffer, both caches
921	jmp	Lenab1
922Lnot060cache:
923#endif
924	movl	#0x80008000,%d0
925	movc	%d0,%cacr		| turn on both caches
926	jmp	Lenab1
927Lmotommu2:
928	movl	#0x82c0aa00,%sp@-	| value to load TC with
929	pmove	%sp@,%tc		| load it
930
931/*
932 * Should be running mapped from this point on
933 */
934Lenab1:
935/* set vector base in virtual address */
936	movl	#_C_LABEL(vectab),%d0	| set Vector Base Register
937	movc	%d0,%vbr
938	lea	_ASM_LABEL(tmpstk),%sp	| temporary stack
939/* detect FPU type */
940	jbsr	_C_LABEL(fpu_probe)
941	movl	%d0,_C_LABEL(fputype)
942/* call final pmap setup */
943	jbsr	_C_LABEL(pmap_bootstrap_finalize)
944/* set kernel stack, user SP */
945	movl	_C_LABEL(lwp0uarea),%a1	| grab lwp0 uarea
946	lea	%a1@(USPACE-4),%sp	| set kernel stack to end of area
947	movl	#USRSTACK-4,%a2
948	movl	%a2,%usp		| init user SP
949
950	tstl	_C_LABEL(fputype)	| Have an FPU?
951	jeq	Lenab2			| No, skip.
952	clrl	%a1@(PCB_FPCTX)		| ensure null FP context
953	movl	%a1,%sp@-
954	jbsr	_C_LABEL(m68881_restore) | restore it (does not kill a1)
955	addql	#4,%sp
956Lenab2:
957	cmpl	#MMU_68040,_C_LABEL(mmutype)	| 68040?
958	jeq	Ltbia040		| yes, cache already on
959	pflusha
960	tstl	_C_LABEL(mmutype)
961	jpl	Lenab3			| 68851 implies no d-cache
962	movl	#CACHE_ON,%d0
963	movc	%d0,%cacr		| clear cache(s)
964	jra	Lenab3
965Ltbia040:
966	.word	0xf518
967Lenab3:
968/* final setup for C code */
969	movl	%d7,_C_LABEL(boothowto)	| save reboot flags
970	movl	%d6,_C_LABEL(bootdev)	|   and boot device
971
972/*
973 * Create a fake exception frame so that cpu_lwp_fork() can copy it.
974 * main() nevers returns; we exit to user mode from a forked process
975 * later on.
976 */
977	clrw	%sp@-			| vector offset/frame type
978	clrl	%sp@-			| PC - filled in by "execve"
979	movw	#PSL_USER,%sp@-		| in user mode
980	clrl	%sp@-			| stack adjust count and padding
981	lea	%sp@(-64),%sp		| construct space for D0-D7/A0-A7
982	lea	_C_LABEL(lwp0),%a0	| save pointer to frame
983	movl	%sp,%a0@(L_MD_REGS)	|   in lwp0.p_md.md_regs
984
985	jra	_C_LABEL(main)		| main()
986
987	PANIC("main() returned")	| Yow!  Main returned!
988	/* NOTREACHED */
989
990/*
991 * Use common m68k sigcode.
992 */
993#include <m68k/m68k/sigcode.s>
994#ifdef COMPAT_SUNOS
995#include <m68k/m68k/sunos_sigcode.s>
996#endif
997#ifdef COMPAT_SVR4
998#include <m68k/m68k/svr4_sigcode.s>
999#endif
1000
1001/*
1002 * Primitives
1003 */
1004
1005/*
1006 * Use common m68k support routines.
1007 */
1008#include <m68k/m68k/support.s>
1009
1010/*
1011 * Use common m68k process/lwp switch and context save subroutines.
1012 */
1013#define FPCOPROC	/* XXX: Temp. Reqd. */
1014#include <m68k/m68k/switch_subr.s>
1015
1016
1017#if defined(M68040) || defined(M68060)
1018ENTRY(suline)
1019	movl	%sp@(4),%a0		| address to write
1020	movl	_C_LABEL(curpcb),%a1	| current pcb
1021	movl	#Lslerr,%a1@(PCB_ONFAULT) | where to return to on a fault
1022	movl	%sp@(8),%a1		| address of line
1023	movl	%a1@+,%d0		| get lword
1024	movsl	%d0,%a0@+		| put lword
1025	nop				| sync
1026	movl	%a1@+,%d0		| get lword
1027	movsl	%d0,%a0@+		| put lword
1028	nop				| sync
1029	movl	%a1@+,%d0		| get lword
1030	movsl	%d0,%a0@+		| put lword
1031	nop				| sync
1032	movl	%a1@+,%d0		| get lword
1033	movsl	%d0,%a0@+		| put lword
1034	nop				| sync
1035	moveq	#0,%d0			| indicate no fault
1036	jra	Lsldone
1037Lslerr:
1038	moveq	#-1,%d0
1039Lsldone:
1040	movl	_C_LABEL(curpcb),%a1	| current pcb
1041	clrl	%a1@(PCB_ONFAULT)	| clear fault address
1042	rts
1043#endif
1044
1045ENTRY(ecacheon)
1046	rts
1047
1048ENTRY(ecacheoff)
1049	rts
1050
1051ENTRY_NOPROFILE(getsfc)
1052	movc	%sfc,%d0
1053	rts
1054
1055ENTRY_NOPROFILE(getdfc)
1056	movc	%dfc,%d0
1057	rts
1058
1059/*
1060 * Load a new user segment table pointer.
1061 */
1062ENTRY(loadustp)
1063	movl	%sp@(4),%d0		| new USTP
1064	moveq	#PGSHIFT,%d1
1065	lsll	%d1,%d0			| convert to addr
1066#if defined(M68040) || defined(M68060)
1067	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1068	jne	LmotommuC		| no, skip
1069	.word	0xf518			| pflusha
1070	.long	0x4e7b0806		| movc d0,urp
1071#ifdef M68060
1072	cmpl	#CPU_68060,_C_LABEL(cputype)
1073	jne	Lldno60
1074	movc	%cacr,%d0
1075	orl	#IC60_CUBC,%d0		| clear user branch cache entries
1076	movc	%d0,%cacr
1077Lldno60:
1078#endif
1079	rts
1080LmotommuC:
1081#endif
1082	pflusha				| flush entire TLB
1083	lea	_C_LABEL(protorp),%a0	| CRP prototype
1084	movl	%d0,%a0@(4)		| stash USTP
1085	pmove	%a0@,%crp		| load root pointer
1086	movl	#CACHE_CLR,%d0
1087	movc	%d0,%cacr		| invalidate cache(s)
1088	rts
1089
1090ENTRY(ploadw)
1091#if defined(M68030)
1092	movl	%sp@(4),%a0		| address to load
1093#if defined(M68040) || defined(M68060)
1094	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1095	jeq	Lploadwskp		| yes, skip
1096#endif
1097	ploadw	#1,%a0@			| pre-load translation
1098Lploadwskp:
1099#endif
1100	rts
1101
1102/*
1103 * Set processor priority level calls.  Most are implemented with
1104 * inline asm expansions.  However, spl0 requires special handling
1105 * as we need to check for our emulated software interrupts.
1106 */
1107
1108ENTRY(spl0)
1109	moveq	#0,%d0
1110	movw	%sr,%d0			| get old SR for return
1111	movw	#PSL_LOWIPL,%sr		| restore new SR
1112	tstb	_C_LABEL(ssir)		| software interrupt pending?
1113	jeq	Lspldone		| no, all done
1114	subql	#4,%sp			| make room for RTE frame
1115	movl	%sp@(4),%sp@(2)		| position return address
1116	clrw	%sp@(6)			| set frame type 0
1117	movw	#PSL_LOWIPL,%sp@	| and new SR
1118	jra	Lgotsir			| go handle it
1119Lspldone:
1120	rts
1121
1122/*
1123 * _delay(u_int N)
1124 *
1125 * Delay for at least (N/256) microseconds.
1126 * This routine depends on the variable:  delay_divisor
1127 * which should be set based on the CPU clock rate.
1128 */
1129ENTRY_NOPROFILE(_delay)
1130	| d0 = arg = (usecs << 8)
1131	movl	%sp@(4),%d0
1132	| d1 = delay_divisor
1133	movl	_C_LABEL(delay_divisor),%d1
1134L_delay:
1135	subl	%d1,%d0
1136	jgt	L_delay
1137	rts
1138
1139/*
1140 * Save and restore 68881 state.
1141 */
1142ENTRY(m68881_save)
1143	movl	%sp@(4),%a0		| save area pointer
1144	fsave	%a0@			| save state
1145#if defined(M68020) || defined(M68030) || defined(M68040)
1146#if defined(M68060)
1147	cmpl	#FPU_68060,_C_LABEL(fputype)
1148	jeq	Lm68060fpsave
1149#endif
1150Lm68881fpsave:
1151	tstb	%a0@			| null state frame?
1152	jeq	Lm68881sdone		| yes, all done
1153	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1154	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control registers
1155Lm68881sdone:
1156	rts
1157#endif
1158#if defined(M68060)
1159Lm68060fpsave:
1160	tstb	%a0@(2)			| null state frame?
1161	jeq	Lm68060sdone		| yes, all done
1162	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1163	fmovem	%fpcr,%a0@(FPF_FPCR)	| save FP control registers
1164	fmovem	%fpsr,%a0@(FPF_FPSR)
1165	fmovem	%fpi,%a0@(FPF_FPI)
1166Lm68060sdone:
1167	rts
1168#endif
1169
1170ENTRY(m68881_restore)
1171	movl	%sp@(4),%a0		| save area pointer
1172#if defined(M68020) || defined(M68030) || defined(M68040)
1173#if defined(M68060)
1174	cmpl	#FPU_68060,_C_LABEL(fputype)
1175	jeq	Lm68060fprestore
1176#endif
1177Lm68881fprestore:
1178	tstb	%a0@			| null state frame?
1179	jeq	Lm68881rdone		| yes, easy
1180	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control registers
1181	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1182Lm68881rdone:
1183	frestore %a0@			| restore state
1184	rts
1185#endif
1186#if defined(M68060)
1187Lm68060fprestore:
1188	tstb	%a0@(2)			| null state frame?
1189	jeq	Lm68060fprdone		| yes, easy
1190	fmovem	%a0@(FPF_FPCR),%fpcr	| restore FP control registers
1191	fmovem	%a0@(FPF_FPSR),%fpsr
1192	fmovem	%a0@(FPF_FPI),%fpi
1193	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1194Lm68060fprdone:
1195	frestore %a0@			| restore state
1196	rts
1197#endif
1198
1199/*
1200 * Handle the nitty-gritty of rebooting the machine.
1201 * Basically we just turn off the MMU and jump to the appropriate ROM routine.
1202 * Note that we must be running in an address range that is mapped one-to-one
1203 * logical to physical so that the PC is still valid immediately after the MMU
1204 * is turned off.  We have conveniently mapped the last page of physical
1205 * memory this way.
1206 */
1207ENTRY_NOPROFILE(doboot)
1208	movw	#PSL_HIGHIPL,%sr	| cut off any interrupts
1209	subal	%a1,%a1			| a1 = 0
1210
1211	movl	#CACHE_OFF,%d0
1212#if defined(M68040) || defined(M68060)
1213	movl	_C_LABEL(mmutype),%d2	| d2 = mmutype
1214	addl	#(-1 * MMU_68040),%d2		| 68040?
1215	jne	Ldoboot0		| no, skip
1216	.word	0xf4f8			| cpusha bc - push and invalidate caches
1217	nop
1218	movl	#CACHE40_OFF,%d0
1219Ldoboot0:
1220#endif
1221	movc	%d0,%cacr		| disable on-chip cache(s)
1222
1223	| ok, turn off MMU..
1224Ldoreboot:
1225#if defined(M68040) || defined(M68060)
1226	tstl	%d2			| 68040?
1227	jne	LmotommuF		| no, skip
1228	movc	%a1,%cacr		| caches off
1229	.long	0x4e7b9003		| movc a1(=0),tc ; disable MMU
1230	jra	Ldoreboot1
1231LmotommuF:
1232#endif
1233	clrl	%sp@
1234	pmove	%sp@,%tc		| disable MMU
1235Ldoreboot1:
1236	moveml	0x00ff0000,#0x0101	| get RESET vectors in ROM
1237					|	(d0: ssp, a0: pc)
1238	moveml	#0x0101,%a1@		| put them at 0x0000 (for Xellent30)
1239	movc	%a1,%vbr		| reset Vector Base Register
1240	jmp	%a0@			| reboot X680x0
1241Lebootcode:
1242
1243/*
1244 * Misc. global variables.
1245 */
1246	.data
1247GLOBAL(machineid)
1248	.long	0		| default to X68030
1249
1250GLOBAL(mmutype)
1251	.long	MMU_68030	| default to 030 internal MMU
1252
1253GLOBAL(cputype)
1254	.long	CPU_68030	| default to 68030 CPU
1255
1256#ifdef M68K_MMU_HP
1257GLOBAL(ectype)
1258	.long	EC_NONE		| external cache type, default to none
1259#endif
1260
1261GLOBAL(fputype)
1262	.long	FPU_NONE
1263
1264GLOBAL(protorp)
1265	.long	0,0		| prototype root pointer
1266
1267GLOBAL(intiobase)
1268	.long	0		| KVA of base of internal IO space
1269
1270GLOBAL(intiolimit)
1271	.long	0		| KVA of end of internal IO space
1272
1273#ifdef DEBUG
1274ASGLOBAL(fulltflush)
1275	.long	0
1276
1277ASGLOBAL(fullcflush)
1278	.long	0
1279#endif
1280
1281/* interrupt counters */
1282
1283GLOBAL(intrnames)
1284	.asciz	"spur"
1285	.asciz	"lev1"
1286	.asciz	"lev2"
1287	.asciz	"lev3"
1288	.asciz	"lev4"
1289	.asciz	"lev5"
1290	.asciz	"lev6"
1291	.asciz	"nmi"
1292	.asciz	"clock"
1293	.asciz	"com"
1294GLOBAL(eintrnames)
1295	.even
1296
1297GLOBAL(intrcnt)
1298	.long	0,0,0,0,0,0,0,0,0,0
1299GLOBAL(eintrcnt)
1300