xref: /netbsd/sys/arch/luna68k/luna68k/locore.s (revision bf9ec67e)
1/* $NetBSD: locore.s,v 1.15 2002/05/14 02:03:00 matt Exp $ */
2
3/*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1980, 1990, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: locore.s 1.66 92/12/22$
41 *
42 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
43 */
44
45#include "opt_compat_netbsd.h"
46#include "opt_ddb.h"
47#include "opt_fpsp.h"
48#include "opt_kgdb.h"
49#include "opt_lockdebug.h"
50
51#include "assym.h"
52#include <machine/asm.h>
53#include <machine/trap.h>
54
55#define	PRINT(msg) \
56	pea	9f		; \
57	jbsr	_C_LABEL(printf); \
58	addl	#4,%sp		; \
59	.data			; \
60     9:	.asciz	msg		; \
61	.text
62#undef	PRINT
63
64/*
65 * Temporary stack for a variety of purposes.
66 * Try and make this the first thing is the data segment so it
67 * is page aligned.  Note that if we overflow here, we run into
68 * our text segment.
69 */
70	.data
71	.space	NBPG
72ASLOCAL(tmpstk)
73
74#include <luna68k/luna68k/vectors.s>
75
76/*
77 * Macro to relocate a symbol, used before MMU is enabled.
78 */
79#define	_RELOC(var,ar)		\
80	lea	var,ar;		\
81	addl	%a5,ar
82#define	RELOC(var,ar)		_RELOC(_C_LABEL(var), ar)
83#define	ASRELOC(var,ar)	_RELOC(_ASM_LABEL(var), ar)
84
85BSS(lowram,4)
86BSS(esym,4)
87
88/*
89 * This is for kvm_mkdb, and should be the address of the beginning
90 * of the kernel text segment (not necessarily the same as kernbase).
91 */
92	.text
93GLOBAL(kernel_text)
94
95/*
96 * start of kernel and .text!
97 */
98ASENTRY_NOPROFILE(start)
99	movw	#PSL_HIGHIPL,%sr	| no interrupts
100	movl	#0,%a5			| RAM starts at 0
101	ASRELOC(tmpstk,%a0)
102	movl	%a0,%sp			| give ourselves a temporary stack
103
104#if 0 /* not sure useful values, need a bootloader tailored for us */
105	RELOC(boothowto,%a0)
106	movl	%d7,%a0@		| save boothowto
107	RELOC(bootdev,%a0)
108	movl	%d6,%a0@		| save bootdev
109	RELOC(esym,%a0)
110	movl	%d5,%a0@		| save esym
111#endif
112	RELOC(edata,%a0)		| clear out BSS
113	movl	#_C_LABEL(end)-4,%d0	| (must be <= 256 kB)
114	subl	#_C_LABEL(edata),%d0
115	lsrl	#2,%d0
1161:	clrl	%a0@+
117	dbra	%d0,1b
118
119#if 1
120	RELOC(esym,%a0)
121	clrl	%a0@			| store end of symbol table XXX
122#endif
123	RELOC(lowram,%a0)
124	movl	%a5,%a0@		| store start of physical memory
125
126	movl	#0x41000000,%a0		| available memory in bytes
127	movl	%a0@(12),%a0		| (int *)base[3])
128	movl	%a0@,%d5
129	RELOC(memavail,%a0)
130	movl	%d5,%a0@		| save memavail
131
132	movl	#0x41000000,%a0		| planemask; 0x0f or 0xff
133	movl	%a0@(184),%a0		| (int *)base[46]
134	movl	%a0@,%d5
135	RELOC(hwplanemask,%a0)
136	movl	%d5,%a0@		| save hwplanemask
137
138	movl	#0x41000000,%a0		| argument of 'x' command on boot
139	movl	%a0@(212),%a0		| (char *)base[53]
140	RELOC(bootarg,%a1)
141	movl	#63,%d0
1421:	movb	%a0@+,%a1@+		| copy to bootarg
143	dbra	%d0,1b			| upto 63 characters
144
145	movl	#CACHE_OFF,%d0
146	movc	%d0,%cacr		| clear and disable on-chip cache(s)
147
148/* determine our CPU/MMU combo - check for all regardless of kernel config */
149	movl	#0x200,%d0		| data freeze bit
150	movc	%d0,%cacr		|   only exists on 68030
151	movc	%cacr,%d0		| read it back
152	tstl	%d0			| zero?
153	jeq	Lnot68030		| yes, we have 68040
154	movl	#CPU_68030,%d0
155	movl	#MMU_68030,%d1
156	movl	#FPU_68881,%d2
157	jra	Lstart0
158Lnot68030:
159	movl	#CPU_68040,%d0
160	movl	#MMU_68040,%d1
161	movl	#FPU_68040,%d2
162Lstart0:
163	RELOC(cputype,%a0)
164	movl	%d0,%a0@
165	RELOC(mmutype,%a0)
166	movl	%d1,%a0@
167	RELOC(fputype,%a0)
168	movl	%d2,%a0@
169
170	/*
171	 * Now that we know what CPU we have, initialize the address error
172	 * and bus error handlers in the vector table:
173	 *
174	 *	vectab+8	bus error
175	 *	vectab+12	address error
176	 */
177	lea	_C_LABEL(cputype),%a0
178	lea	_C_LABEL(vectab),%a2
179#if defined(M68040)
180	cmpl	#CPU_68040,%a0@		| 68040?
181	jne	1f			| no, skip
182	movl	#_C_LABEL(buserr40),%a2@(8)
183	movl	#_C_LABEL(addrerr4060),%a2@(12)
184	jra	Lstart2
1851:
186#endif
187	cmpl	#CPU_68030,%a0@		| 68030?
188	jne	1f			| no, skip
189	movl	#_C_LABEL(busaddrerr2030),%a2@(8)
190	movl	#_C_LABEL(busaddrerr2030),%a2@(12)
191	jra	Lstart2
1921:
193	/* Config botch; no hope. */
194	PANIC("Config botch in locore")
195
196Lstart2:
197/* initialize source/destination control registers for movs */
198	moveq	#FC_USERD,%d0		| user space
199	movc	%d0,%sfc			|   as source
200	movc	%d0,%dfc			|   and destination of transfers
201/* initialize memory sizes (for pmap_bootstrap) */
202	RELOC(memavail,%a0)
203	movl	%a0@,%d1
204	moveq	#PGSHIFT,%d2
205	lsrl	%d2,%d1			| convert to page (click) number
206	RELOC(maxmem,%a0)
207	movl	%d1,%a0@		| save as maxmem
208	RELOC(physmem,%a0)
209	movl	%d1,%a0@		| and physmem
210/* configure kernel and proc0 VA space so we can get going */
211#ifdef DDB
212	RELOC(esym,%a0)			| end of static kernel test/data/syms
213	movl	%a0@,%d2
214	jne	Lstart3
215#endif
216	RELOC(end,%a0)
217	movl	%a0,%d2			| end of static kernel text/data
218Lstart3:
219	addl	#NBPG-1,%d2
220	andl	#PG_FRAME,%d2		| round to a page
221	movl	%d2,%a4
222	addl	%a5,%a4			| convert to PA
223	pea	%a5@			| firstpa
224	pea	%a4@			| nextpa
225	RELOC(pmap_bootstrap,%a0)
226	jbsr	%a0@			| pmap_bootstrap(firstpa, nextpa)
227	addql	#8,%sp
228/*
229 * Enable the MMU.
230 * Since the kernel is mapped logical == physical, we just turn it on.
231 */
232	RELOC(Sysseg,%a0)		| system segment table addr
233	movl	%a0@,%d1		| read value (a KVA)
234	addl	%a5,%d1			| convert to PA (%a5 == 0, indeed)
235#if defined(M68040)
236	RELOC(mmutype,%a0)
237	cmpl	#MMU_68040,%a0@		| 68040?
238	jne	Lmotommu1		| no, skip
239Lmotommu0:
240	.long	0x4e7b1807		| movc %d1,%srp
241	RELOC(proto040tt0,%a0)
242	movl	%a0@,%d0		| tt0 range 4000.0000-7fff.ffff
243	.long	0x4e7b0004		| movc %d0,%itt0
244	.long	0x4e7b0006		| movc %d0,%dtt0
245	RELOC(proto040tt1,%a0)
246	movl	%a0@,%d0		| tt1 range 8000.0000-feff.ffff
247	.long	0x4e7b0005		| movc %d0,%itt1
248	.long	0x4e7b0007		| movc %d0,%dtt1
249	.word	0xf4d8			| cinva bc
250	pflusha				| flush entire ATC
251	RELOC(proto040tc,%a0)
252	movl	%a0@,%d0
253	.long	0x4e7b0003		| movc %d0,%tc
254	movl	#0x80008000,%d0
255	movc	%d0,%cacr		| turn on both caches
256	jmp	Lenab1
257Lmotommu1:
258#endif
259	RELOC(protosrp,%a0)		| nolimit + share global + 4 byte PTEs
260	movl	%d1,%a0@(4)		| + segtable address
261	RELOC(protocrp,%a1)
262	movl	%d1,%a1@(4)		| set lower half of %CRP
263	pmove	%a0@,%srp		| load the supervisor root pointer
264	RELOC(protott0,%a0)		| tt0 range 4000.0000-7fff.ffff
265	.long	0xf0100800		| pmove %a0@,mmutt0
266	RELOC(protott1,%a0)		| tt1 range 8000.0000-ffff.ffff
267	.long	0xf0100c00		| pmove %a0@,mmutt1
268	RELOC(prototc,%a0)		| %tc: SRP,CRP,4KB page,A=10bit,B=10bit
269	pmove	%a0@,%tc
270/*
271 * Should be running mapped from this point on
272 */
273Lenab1:
274/* select the software page size now */
275	lea	_ASM_LABEL(tmpstk),%sp	| temporary stack
276	jbsr	_C_LABEL(uvm_setpagesize) | select software page size
277
278/* set kernel stack, user SP, proc0, and initial pcb */
279	movl	_C_LABEL(proc0paddr),%a1 | get proc0 pcb addr
280	lea	%a1@(USPACE-4),%sp	| set kernel stack to end of area
281	lea	_C_LABEL(proc0),%a2	| initialize proc0.p_addr so that
282	movl	%a1,%a2@(P_ADDR)	|   we don't deref NULL in trap()
283	movl	#USRSTACK-4,%a2
284	movl	%a2,%usp		| init user SP
285	movl	%a1,_C_LABEL(curpcb)	| proc0 is running
286
287	tstl	_C_LABEL(fputype)	| Have an FPU?
288	jeq	Lenab2			| No, skip.
289	clrl	%a1@(PCB_FPCTX)		| ensure null FP context
290	movl	%a1,%sp@-
291	jbsr	_C_LABEL(m68881_restore) | restore it (does not kill %a1)
292	addql	#4,%sp
293Lenab2:
294	pflusha				| flush entire ATC
295	cmpl	#MMU_68040,_C_LABEL(mmutype)	| 68040?
296	jeq	Lenab3			| yes, cache already on
297	tstl	_C_LABEL(mmutype)
298	jpl	Lenab3			| 68851 implies no d-cache
299	movl	#CACHE_ON,%d0
300	movc	%d0,%cacr		| clear cache(s)
301Lenab3:
302
303/* final setup for C code */
304	movl	#_C_LABEL(vectab),%d0	| get our %vbr address
305	movc	%d0,%vbr
306	jbsr	_C_LABEL(luna68k_init)	| additional pre-main initialization
307
308/*
309 * Create a fake exception frame that returns to user mode,
310 * and save its address in p->p_md.md_regs for cpu_fork().
311 * The new frames for process 1 and 2 will be adjusted by
312 * cpu_set_kpc() to arrange for a call to a kernel function
313 * before the new process does its rte out to user mode.
314 */
315	clrw	%sp@-			| vector offset/frame type
316	clrl	%sp@-			| PC - filled in by "execve"
317	movw	#PSL_USER,%sp@-		| in user mode
318	clrl	%sp@-			| stack adjust count and padding
319	lea	%sp@(-64),%sp		| construct space for %D0-%D7/%A0-%A7
320	lea	_C_LABEL(proc0),%a0	| save pointer to frame
321	movl	%sp,%a0@(P_MD_REGS)	|   in proc0.p_md.md_regs
322	jra	_C_LABEL(main)		| main()
323	PANIC("main() returned")
324	/* NOTREACHED */
325
326/*
327 * proc_trampoline: call function in register %a2 with %a3 as an arg
328 * and then rei.
329 */
330GLOBAL(proc_trampoline)
331	movl	%a3,%sp@-		| process' frame pointer in sp
332	jbsr	%a2@
333	addql	#4,%sp
334	movl	%sp@(FR_SP),%a0		| grab and load
335	movl	%a0,%usp		|   user SP
336	moveml	%sp@+,#0x7FFF		| restore most user regs
337	addql	#8,%sp			| toss SP and stack adjust
338	jra	_ASM_LABEL(rei)		| and return
339
340/*
341 * Trap/interrupt vector routines
342 */
343#include <m68k/m68k/trap_subr.s>
344
345	.data
346GLOBAL(m68k_fault_addr)
347	.long	0
348
349#if defined(M68040) || defined(M68060)
350ENTRY_NOPROFILE(addrerr4060)
351	clrl	%sp@-			| stack adjust count
352	moveml	#0xFFFF,%sp@-		| save user registers
353	movl	%usp,%a0		| save the user SP
354	movl	%a0,%sp@(FR_SP)		|   in the savearea
355	movl	%sp@(FR_HW+8),%sp@-
356	clrl	%sp@-			| dummy code
357	movl	#T_ADDRERR,%sp@-	| mark address error
358	jra	_ASM_LABEL(faultstkadj)	| and deal with it
359#endif
360
361#if defined(M68060)
362ENTRY_NOPROFILE(buserr60)
363	clrl	%sp@-			| stack adjust count
364	moveml	#0xFFFF,%sp@-		| save user registers
365	movl	%usp,%a0		| save the user SP
366	movl	%a0,%sp@(FR_SP)		|   in the savearea
367	movel	%sp@(FR_HW+12),%d0	| FSLW
368	btst	#2,%d0			| branch prediction error?
369	jeq	Lnobpe
370	movc	%cacr,%d2
371	orl	#IC60_CABC,%d2		| clear all branch cache entries
372	movc	%d2,%cacr
373	movl	%d0,%d1
374	addql	#1,L60bpe
375	andl	#0x7ffd,%d1
376	jeq	_ASM_LABEL(faultstkadjnotrap2)
377Lnobpe:
378| we need to adjust for misaligned addresses
379	movl	%sp@(FR_HW+8),%d1	| grab VA
380	btst	#27,%d0			| check for mis-aligned access
381	jeq	Lberr3			| no, skip
382	addl	#28,%d1			| yes, get into next page
383					| operand case: 3,
384					| instruction case: 4+12+12
385	andl	#PG_FRAME,%d1           | and truncate
386Lberr3:
387	movl	%d1,%sp@-
388	movl	%d0,%sp@-		| code is FSLW now.
389	andw	#0x1f80,%d0
390	jeq	Lberr60			| it is a bus error
391	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
392	jra	_ASM_LABEL(faultstkadj)	| and deal with it
393Lberr60:
394	tstl	_C_LABEL(nofault)	| catch bus error?
395	jeq	Lisberr			| no, handle as usual
396	movl	%sp@(FR_HW+8+8),_C_LABEL(m68k_fault_addr) | save fault addr
397	movl	_C_LABEL(nofault),%sp@-	| yes,
398	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
399	/* NOTREACHED */
400#endif
401#if defined(M68040)
402ENTRY_NOPROFILE(buserr40)
403	clrl	%sp@-			| stack adjust count
404	moveml	#0xFFFF,%sp@-		| save user registers
405	movl	%usp,%a0		| save the user SP
406	movl	%a0,%sp@(FR_SP)		|   in the savearea
407	movl	%sp@(FR_HW+20),%d1	| get fault address
408	moveq	#0,%d0
409	movw	%sp@(FR_HW+12),%d0	| get SSW
410	btst	#11,%d0			| check for mis-aligned
411	jeq	Lbe1stpg		| no skip
412	addl	#3,%d1			| get into next page
413	andl	#PG_FRAME,%d1		| and truncate
414Lbe1stpg:
415	movl	%d1,%sp@-		| pass fault address.
416	movl	%d0,%sp@-		| pass SSW as code
417	btst	#10,%d0			| test ATC
418	jeq	Lberr40			| it is a bus error
419	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
420	jra	_ASM_LABEL(faultstkadj)	| and deal with it
421Lberr40:
422	tstl	_C_LABEL(nofault)	| catch bus error?
423	jeq	Lisberr			| no, handle as usual
424	movl	%sp@(FR_HW+8+20),_C_LABEL(m68k_fault_addr) | save fault addr
425	movl	_C_LABEL(nofault),%sp@-	| yes,
426	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
427	/* NOTREACHED */
428#endif
429
430ENTRY_NOPROFILE(busaddrerr2030)
431	clrl	%sp@-			| stack adjust count
432	moveml	#0xFFFF,%sp@-		| save user registers
433	movl	%usp,%a0		| save the user SP
434	movl	%a0,%sp@(FR_SP)		|   in the savearea
435	moveq	#0,%d0
436	movw	%sp@(FR_HW+10),%d0	| grab SSW for fault processing
437	btst	#12,%d0			| RB set?
438	jeq	LbeX0			| no, test RC
439	bset	#14,%d0			| yes, must set FB
440	movw	%d0,%sp@(FR_HW+10)	| for hardware too
441LbeX0:
442	btst	#13,%d0			| RC set?
443	jeq	LbeX1			| no, skip
444	bset	#15,%d0			| yes, must set FC
445	movw	%d0,%sp@(FR_HW+10)	| for hardware too
446LbeX1:
447	btst	#8,%d0			| data fault?
448	jeq	Lbe0			| no, check for hard cases
449	movl	%sp@(FR_HW+16),%d1	| fault address is as given in frame
450	jra	Lbe10			| thats it
451Lbe0:
452	btst	#4,%sp@(FR_HW+6)	| long (type B) stack frame?
453	jne	Lbe4			| yes, go handle
454	movl	%sp@(FR_HW+2),%d1	| no, can use save PC
455	btst	#14,%d0			| FB set?
456	jeq	Lbe3			| no, try FC
457	addql	#4,%d1			| yes, adjust address
458	jra	Lbe10			| done
459Lbe3:
460	btst	#15,%d0			| FC set?
461	jeq	Lbe10			| no, done
462	addql	#2,%d1			| yes, adjust address
463	jra	Lbe10			| done
464Lbe4:
465	movl	%sp@(FR_HW+36),%d1	| long format, use stage B address
466	btst	#15,%d0			| FC set?
467	jeq	Lbe10			| no, all done
468	subql	#2,%d1			| yes, adjust address
469Lbe10:
470	movl	%d1,%sp@-		| push fault VA
471	movl	%d0,%sp@-		| and padded SSW
472	movw	%sp@(FR_HW+8+6),%d0	| get frame format/vector offset
473	andw	#0x0FFF,%d0		| clear out frame format
474	cmpw	#12,%d0			| address error vector?
475	jeq	Lisaerr			| yes, go to it
476	movl	%d1,%a0			| fault address
477	movl	%sp@,%d0		| function code from ssw
478	btst	#8,%d0			| data fault?
479	jne	Lbe10a
480	movql	#1,%d0			| user program access FC
481					| (we dont separate data/program)
482	btst	#5,%sp@(FR_HW+8)	| supervisor mode?
483	jeq	Lbe10a			| if no, done
484	movql	#5,%d0			| else supervisor program access
485Lbe10a:
486	ptestr	%d0,%a0@,#7		| do a table search
487	pmove	%psr,%sp@		| save result
488	movb	%sp@,%d1
489	btst	#2,%d1			| invalid (incl. limit viol. and berr)?
490	jeq	Lmightnotbemerr		| no -> wp check
491	btst	#7,%d1			| is it MMU table berr?
492	jne	Lisberr1		| yes, needs not be fast.
493Lismerr:
494	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
495	jra	_ASM_LABEL(faultstkadj)	| and deal with it
496Lmightnotbemerr:
497	btst	#3,%d1			| write protect bit set?
498	jeq	Lisberr1		| no: must be bus error
499	movl	%sp@,%d0		| ssw into low word of %d0
500	andw	#0xc0,%d0		| Write protect is set on page:
501	cmpw	#0x40,%d0		| was it read cycle?
502	jne	Lismerr			| no, was not WPE, must be MMU fault
503	jra	Lisberr1		| real bus err needs not be fast.
504Lisaerr:
505	movl	#T_ADDRERR,%sp@-	| mark address error
506	jra	_ASM_LABEL(faultstkadj)	| and deal with it
507Lisberr1:
508	clrw	%sp@			| re-clear pad word
509	tstl	_C_LABEL(nofault)	| catch bus error?
510	jeq	Lisberr			| no, handle as usual
511	movl	%sp@(FR_HW+8+16),_C_LABEL(m68k_fault_addr) | save fault addr
512	movl	_C_LABEL(nofault),%sp@-	| yes,
513	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
514	/* NOTREACHED */
515	.even
516Lisberr:				| also used by M68040/60
517	movl	#T_BUSERR,%sp@-		| mark bus error
518	jra	_ASM_LABEL(faultstkadj)	| and deal with it
519
520/*
521 * FP exceptions.
522 */
523ENTRY_NOPROFILE(fpfline)
524#if defined(M68040)
525	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040 FPU?
526	jne	Lfp_unimp		| no, skip FPSP
527	cmpw	#0x202c,%sp@(6)		| format type 2?
528	jne	_C_LABEL(illinst)	| no, not an FP emulation
529Ldofp_unimp:
530#ifdef FPSP
531	jmp	_ASM_LABEL(fpsp_unimp)	| yes, go handle it
532#endif
533Lfp_unimp:
534#endif /* M68040 */
535#ifdef FPU_EMULATE
536	clrl	%sp@-			| stack adjust count
537	moveml	#0xFFFF,%sp@-		| save registers
538	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
539	jra	_ASM_LABEL(fault)	| do it
540#else
541	jra	_C_LABEL(illinst)
542#endif
543
544ENTRY_NOPROFILE(fpunsupp)
545#if defined(M68040)
546	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040 FPU?
547	jne	_C_LABEL(illinst)	| no, treat as illinst
548#ifdef FPSP
549	jmp	_ASM_LABEL(fpsp_unsupp)	| yes, go handle it
550#endif
551Lfp_unsupp:
552#endif /* M68040 */
553#ifdef FPU_EMULATE
554	clrl	%sp@-			| stack adjust count
555	moveml	#0xFFFF,%sp@-		| save registers
556	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
557	jra	_ASM_LABEL(fault)	| do it
558#else
559	jra	_C_LABEL(illinst)
560#endif
561
562/*
563 * Handles all other FP coprocessor exceptions.
564 * Note that since some FP exceptions generate mid-instruction frames
565 * and may cause signal delivery, we need to test for stack adjustment
566 * after the trap call.
567 */
568ENTRY_NOPROFILE(fpfault)
569	clrl	%sp@-		| stack adjust count
570	moveml	#0xFFFF,%sp@-	| save user registers
571	movl	%usp,%a0	| and save
572	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
573	clrl	%sp@-		| no VA arg
574	movl	_C_LABEL(curpcb),%a0 | current pcb
575	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
576	fsave	%a0@		| save state
577#if defined(M68040) || defined(M68060)
578	/* always null state frame on 68040, 68060 */
579	cmpl	#FPU_68040,_C_LABEL(fputype)
580	jle	Lfptnull
581#endif
582	tstb	%a0@		| null state frame?
583	jeq	Lfptnull	| yes, safe
584	clrw	%d0		| no, need to tweak BIU
585	movb	%a0@(1),%d0	| get frame size
586	bset	#3,%a0@(0,%d0:w) | set exc_pend bit of BIU
587Lfptnull:
588	fmovem	%fpsr,%sp@-	| push fpsr as code argument
589	frestore %a0@		| restore state
590	movl	#T_FPERR,%sp@-	| push type arg
591	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
592
593/*
594 * Other exceptions only cause four and six word stack frame and require
595 * no post-trap stack adjustment.
596 */
597
598ENTRY_NOPROFILE(badtrap)
599	moveml	#0xC0C0,%sp@-		| save scratch regs
600	movw	%sp@(22),%sp@-		| push exception vector info
601	clrw	%sp@-
602	movl	%sp@(22),%sp@-		| and PC
603	jbsr	_C_LABEL(straytrap)	| report
604	addql	#8,%sp			| pop args
605	moveml	%sp@+,#0x0303		| restore regs
606	jra	_ASM_LABEL(rei)		| all done
607
608ENTRY_NOPROFILE(trap0)
609	clrl	%sp@-			| stack adjust count
610	moveml	#0xFFFF,%sp@-		| save user registers
611	movl	%usp,%a0		| save the user SP
612	movl	%a0,%sp@(FR_SP)		|   in the savearea
613	movl	%d0,%sp@-		| push syscall number
614	jbsr	_C_LABEL(syscall)	| handle it
615	addql	#4,%sp			| pop syscall arg
616	tstl	_C_LABEL(astpending)
617	jne	Lrei2
618	tstb	_C_LABEL(ssir)
619	jeq	Ltrap1
620	movw	#SPL1,%sr
621	tstb	_C_LABEL(ssir)
622	jne	Lsir1
623Ltrap1:
624	movl	%sp@(FR_SP),%a0		| grab and restore
625	movl	%a0,%usp		|   user SP
626	moveml	%sp@+,#0x7FFF		| restore most registers
627	addql	#8,%sp			| pop SP and stack adjust
628	rte
629
630/*
631 * Trap 12 is the entry point for the cachectl "syscall" (both HPUX & BSD)
632 *	cachectl(command, addr, length)
633 * command in %d0, addr in %a1, length in %d1
634 */
635ENTRY_NOPROFILE(trap12)
636	movl	_C_LABEL(curproc),%sp@-	| push curproc pointer
637	movl	%d1,%sp@-		| push length
638	movl	%a1,%sp@-		| push addr
639	movl	%d0,%sp@-		| push command
640	jbsr	_C_LABEL(cachectl1)	| do it
641	lea	%sp@(16),%sp		| pop args
642	jra	_ASM_LABEL(rei)		| all done
643
644/*
645 * Trace (single-step) trap.  Kernel-mode is special.
646 * User mode traps are simply passed on to trap().
647 */
648ENTRY_NOPROFILE(trace)
649	clrl	%sp@-			| stack adjust count
650	moveml	#0xFFFF,%sp@-
651	moveq	#T_TRACE,%d0
652
653	| Check PSW and see what happen.
654	|   T=0 S=0	(should not happen)
655	|   T=1 S=0	trace trap from user mode
656	|   T=0 S=1	trace trap on a trap instruction
657	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
658
659	movw	%sp@(FR_HW),%d1		| get PSW
660	notw	%d1			| XXX no support for T0 on 680[234]0
661	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
662	jeq	Lkbrkpt			| yes, kernel breakpoint
663	jra	_ASM_LABEL(fault)	| no, user-mode fault
664
665/*
666 * Trap 15 is used for:
667 *	- GDB breakpoints (in user programs)
668 *	- KGDB breakpoints (in the kernel)
669 *	- trace traps for SUN binaries (not fully supported yet)
670 * User mode traps are simply passed to trap().
671 */
672ENTRY_NOPROFILE(trap15)
673	clrl	%sp@-			| stack adjust count
674	moveml	#0xFFFF,%sp@-
675	moveq	#T_TRAP15,%d0
676	movw	%sp@(FR_HW),%d1		| get PSW
677	andw	#PSL_S,%d1		| from system mode?
678	jne	Lkbrkpt			| yes, kernel breakpoint
679	jra	_ASM_LABEL(fault)	| no, user-mode fault
680
681Lkbrkpt: | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
682	| Save the system sp rather than the user sp.
683	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
684	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
685	movl	%a6,%sp@(FR_SP)		|  from before trap
686
687	| If were are not on tmpstk switch to it.
688	| (so debugger can change the stack pointer)
689	movl	%a6,%d1
690	cmpl	#_ASM_LABEL(tmpstk),%d1
691	jls	Lbrkpt2			| already on tmpstk
692	| Copy frame to the temporary stack
693	movl	%sp,%a0			| %a0=src
694	lea	_ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
695	movl	%a1,%sp			| sp=new frame
696	moveq	#FR_SIZE,%d1
697Lbrkpt1:
698	movl	%a0@+,%a1@+
699	subql	#4,%d1
700	jgt	Lbrkpt1
701
702Lbrkpt2:
703	| Call the trap handler for the kernel debugger.
704	| Do not call trap() to do it, so that we can
705	| set breakpoints in trap() if we want.  We know
706	| the trap type is either T_TRACE or T_BREAKPOINT.
707	| If we have both DDB and KGDB, let KGDB see it first,
708	| because KGDB will just return 0 if not connected.
709	| Save args in %d2, %a2
710	movl	%d0,%d2			| trap type
711	movl	%sp,%a2			| frame ptr
712#ifdef KGDB
713	| Let KGDB handle it (if connected)
714	movl	%a2,%sp@-		| push frame ptr
715	movl	%d2,%sp@-		| push trap type
716	jbsr	_C_LABEL(kgdb_trap)	| handle the trap
717	addql	#8,%sp			| pop args
718	cmpl	#0,%d0			| did kgdb handle it?
719	jne	Lbrkpt3			| yes, done
720#endif
721#ifdef DDB
722	| Let DDB handle it
723	movl	%a2,%sp@-		| push frame ptr
724	movl	%d2,%sp@-		| push trap type
725	jbsr	_C_LABEL(kdb_trap)	| handle the trap
726	addql	#8,%sp			| pop args
727#if 0	/* not needed on hp300 */
728	cmpl	#0,%d0			| did ddb handle it?
729	jne	Lbrkpt3			| yes, done
730#endif
731#endif
732	/* Sun 3 drops into PROM here. */
733Lbrkpt3:
734	| The stack pointer may have been modified, or
735	| data below it modified (by kgdb push call),
736	| so push the hardware frame at the current sp
737	| before restoring registers and returning.
738
739	movl	%sp@(FR_SP),%a0		| modified sp
740	lea	%sp@(FR_SIZE),%a1	| end of our frame
741	movl	%a1@-,%a0@-		| copy 2 longs with
742	movl	%a1@-,%a0@-		| ... predecrement
743	movl	%a0,%sp@(FR_SP)		| sp = h/w frame
744	moveml	%sp@+,#0x7FFF		| restore all but sp
745	movl	%sp@,%sp		| ... and sp
746	rte				| all done
747
748/* Use common m68k sigreturn */
749#include <m68k/m68k/sigreturn.s>
750
751/*
752 * Interrupt handlers.
753 *
754 * For auto-vectored interrupts, the CPU provides the
755 * vector 0x18+level.  Note we count spurious interrupts,
756 * but don't do anything else with them.
757 *
758 * _intrhand_autovec is the entry point for auto-vectored
759 * interrupts.
760 *
761 * For vectored interrupts, we pull the pc, evec, and exception frame
762 * and pass them to the vectored interrupt dispatcher.  The vectored
763 * interrupt dispatcher will deal with strays.
764 *
765 * _intrhand_vectored is the entry point for vectored interrupts.
766 */
767
768#define INTERRUPT_SAVEREG	moveml	#0xC0C0,%sp@-
769#define INTERRUPT_RESTOREREG	moveml	%sp@+,#0x0303
770
771ENTRY_NOPROFILE(spurintr)		/* Level 0 */
772	addql	#1,_C_LABEL(intrcnt)+0
773	addql	#1,_C_LABEL(uvmexp)+UVMEXP_INTRS
774	jra	_ASM_LABEL(rei)
775
776ENTRY_NOPROFILE(intrhand_autovec)	/* Levels 1 through 6 */
777	INTERRUPT_SAVEREG
778	movw	%sp@(22),%sp@-		| push exception vector
779	clrw	%sp@-
780	jbsr	_C_LABEL(isrdispatch_autovec)	| call dispatcher
781	addql	#4,%sp
782	INTERRUPT_RESTOREREG
783	jra	_ASM_LABEL(rei)		| all done
784
785ENTRY_NOPROFILE(lev7intr)		/* Level 7: NMI */
786	addql	#1,_C_LABEL(intrcnt)+32
787	clrl	%sp@-
788	moveml	#0xFFFF,%sp@-		| save registers
789	movl	%usp,%a0		| and save
790	movl	%a0,%sp@(FR_SP)		|   the user stack pointer
791	jbsr	_C_LABEL(nmihand)	| call handler
792	movl	%sp@(FR_SP),%a0		| restore
793	movl	%a0,%usp		|   user SP
794	moveml	%sp@+,#0x7FFF		| and remaining registers
795	addql	#8,%sp			| pop SP and stack adjust
796	jra	_ASM_LABEL(rei)		| all done
797
798ENTRY_NOPROFILE(intrhand_vectored)
799	INTERRUPT_SAVEREG
800	lea	%sp@(16),%a1		| get pointer to frame
801	movl	%a1,%sp@-
802	movw	%sp@(26),%d0
803	movl	%d0,%sp@-		| push exception vector info
804	movl	%sp@(26),%sp@-		| and PC
805	jbsr	_C_LABEL(isrdispatch_vectored)	| call dispatcher
806	lea	%sp@(12),%sp		| pop value args
807	INTERRUPT_RESTOREREG
808	jra	_ASM_LABEL(rei)		| all done
809
810#if 1	/* XXX wild timer -- how can I disable/enable the interrupt? */
811ENTRY_NOPROFILE(lev5intr)
812	btst	#7,0x63000000		| check whether system clock
813	beq	1f
814	movb	#1,0x63000000		| clear the interrupt
815	tstl	_C_LABEL(clock_enable)	| is hardclock() available?
816	jeq	1f
817	INTERRUPT_SAVEREG
818	lea	%sp@(16),%a1		| %a1 = &clockframe
819	movl	%a1,%sp@-
820	jbsr	_C_LABEL(hardclock)	| hardclock(&frame)
821	addql	#4,%sp
822	addql	#1,_C_LABEL(intrcnt)+20
823	INTERRUPT_RESTOREREG
8241:
825	jra	_ASM_LABEL(rei)		| all done
826#endif
827
828#undef INTERRUPT_SAVEREG
829#undef INTERRUPT_RESTOREREG
830
831/*
832 * Emulation of VAX REI instruction.
833 *
834 * This code deals with checking for and servicing ASTs
835 * (profiling, scheduling) and software interrupts (network, softclock).
836 * We check for ASTs first, just like the VAX.  To avoid excess overhead
837 * the T_ASTFLT handling code will also check for software interrupts so we
838 * do not have to do it here.  After identifing that we need an AST we
839 * drop the IPL to allow device interrupts.
840 *
841 * This code is complicated by the fact that sendsig may have been called
842 * necessitating a stack cleanup.
843 */
844BSS(ssir,1)
845
846ASENTRY_NOPROFILE(rei)
847	tstl	_C_LABEL(astpending)	| AST pending?
848	jeq	Lchksir			| no, go check for SIR
849Lrei1:
850	btst	#5,%sp@			| yes, are we returning to user mode?
851	jne	Lchksir			| no, go check for SIR
852	movw	#PSL_LOWIPL,%sr		| lower SPL
853	clrl	%sp@-			| stack adjust
854	moveml	#0xFFFF,%sp@-		| save all registers
855	movl	%usp,%a1		| including
856	movl	%a1,%sp@(FR_SP)		|    the users SP
857Lrei2:
858	clrl	%sp@-			| VA == none
859	clrl	%sp@-			| code == none
860	movl	#T_ASTFLT,%sp@-		| type == async system trap
861	jbsr	_C_LABEL(trap)		| go handle it
862	lea	%sp@(12),%sp		| pop value args
863	movl	%sp@(FR_SP),%a0		| restore user SP
864	movl	%a0,%usp		|   from save area
865	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
866	jne	Laststkadj		| yes, go to it
867	moveml	%sp@+,#0x7FFF		| no, restore most user regs
868	addql	#8,%sp			| toss SP and stack adjust
869	rte				| and do real RTE
870Laststkadj:
871	lea	%sp@(FR_HW),%a1		| pointer to HW frame
872	addql	#8,%a1			| source pointer
873	movl	%a1,%a0			| source
874	addw	%d0,%a0			|  + hole size = dest pointer
875	movl	%a1@-,%a0@-		| copy
876	movl	%a1@-,%a0@-		|  8 bytes
877	movl	%a0,%sp@(FR_SP)		| new SSP
878	moveml	%sp@+,#0x7FFF		| restore user registers
879	movl	%sp@,%sp		| and our SP
880	rte				| and do real RTE
881Lchksir:
882	tstb	_C_LABEL(ssir)		| SIR pending?
883	jeq	Ldorte			| no, all done
884	movl	%d0,%sp@-		| need a scratch register
885	movw	%sp@(4),%d0		| get SR
886	andw	#PSL_IPL7,%d0		| mask all but IPL
887	jne	Lnosir			| came from interrupt, no can do
888	movl	%sp@+,%d0		| restore scratch register
889Lgotsir:
890	movw	#SPL1,%sr		| prevent others from servicing int
891	tstb	_C_LABEL(ssir)		| too late?
892	jeq	Ldorte			| yes, oh well...
893	clrl	%sp@-			| stack adjust
894	moveml	#0xFFFF,%sp@-		| save all registers
895	movl	%usp,%a1		| including
896	movl	%a1,%sp@(FR_SP)		|    the users SP
897Lsir1:
898	clrl	%sp@-			| VA == none
899	clrl	%sp@-			| code == none
900	movl	#T_SSIR,%sp@-		| type == software interrupt
901	jbsr	_C_LABEL(trap)		| go handle it
902	lea	%sp@(12),%sp		| pop value args
903	movl	%sp@(FR_SP),%a0		| restore
904	movl	%a0,%usp		|   user SP
905	moveml	%sp@+,#0x7FFF		| and all remaining registers
906	addql	#8,%sp			| pop SP and stack adjust
907	rte
908Lnosir:
909	movl	%sp@+,%d0		| restore scratch register
910Ldorte:
911	rte				| real return
912
913/*
914 * Use common m68k sigcode.
915 */
916#include <m68k/m68k/sigcode.s>
917#ifdef COMPAT_SUNOS
918#include <m68k/m68k/sunos_sigcode.s>
919#endif
920#ifdef COMPAT_SVR4
921#include <m68k/m68k/svr4_sigcode.s>
922#endif
923
924/*
925 * Primitives
926 */
927
928/*
929 * Use common m68k support routines.
930 */
931#include <m68k/m68k/support.s>
932
933/*
934 * Use common m68k process manipulation routines.
935 */
936#include <m68k/m68k/proc_subr.s>
937
938	.data
939GLOBAL(curpcb)
940GLOBAL(masterpaddr)
941	.long	0
942
943ASLOCAL(mdpflag)
944	.byte	0		| copy of proc md_flags low byte
945	.align	2
946
947ASBSS(nullpcb, SIZEOF_PCB)
948
949/*
950 * At exit of a process, do a switch for the last time.
951 * Switch to a safe stack and PCB, and select a new process to run.  The
952 * old stack and u-area will be freed by the reaper.
953 *
954 * MUST BE CALLED AT SPLHIGH!
955 */
956ENTRY(switch_exit)
957	movl	%sp@(4),%a0
958	/* save state into garbage pcb */
959	movl	#_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
960	lea	_ASM_LABEL(tmpstk),%sp	| goto a tmp stack
961
962	/* Schedule the vmspace and stack to be freed. */
963	movl	%a0,%sp@-		| exit2(p)
964	jbsr	_C_LABEL(exit2)
965	lea	%sp@(4),%sp		| pop args
966
967#if defined(LOCKDEBUG)
968	/* Acquire sched_lock */
969	jbsr	_C_LABEL(sched_lock_idle)
970#endif
971
972	jra	_C_LABEL(cpu_switch)
973
974/*
975 * When no processes are on the runq, Swtch branches to Idle
976 * to wait for something to come ready.
977 */
978ASENTRY_NOPROFILE(Idle)
979#if defined(LOCKDEBUG)
980	/* Release sched_lock */
981	jbsr	_C_LABEL(sched_unlock_idle)
982#endif
983	stop	#PSL_LOWIPL
984	movw	#PSL_HIGHIPL,%sr
985#if defined(LOCKDEBUG)
986	/* Acquire sched_lock */
987	jbsr	_C_LABEL(sched_lock_idle)
988#endif
989	movl    _C_LABEL(sched_whichqs),%d0
990	jeq     _ASM_LABEL(Idle)
991	jra	Lsw1
992
993Lbadsw:
994	PANIC("switch")
995	/*NOTREACHED*/
996
997/*
998 * cpu_switch()
999 *
1000 * NOTE: On the mc68851 we attempt to avoid flushing the
1001 * entire ATC.  The effort involved in selective flushing may not be
1002 * worth it, maybe we should just flush the whole thing?
1003 *
1004 * NOTE 2: With the new VM layout we now no longer know if an inactive
1005 * user's PTEs have been changed (formerly denoted by the SPTECHG p_flag
1006 * bit).  For now, we just always flush the full ATC.
1007 */
1008ENTRY(cpu_switch)
1009	movl	_C_LABEL(curpcb),%a0	| current pcb
1010	movw	%sr,%a0@(PCB_PS)	| save sr before changing ipl
1011#ifdef notyet
1012	movl	_C_LABEL(curproc),%sp@-	| remember last proc running
1013#endif
1014	clrl	_C_LABEL(curproc)
1015
1016	/*
1017	 * Find the highest-priority queue that isn't empty,
1018	 * then take the first proc from that queue.
1019	 */
1020	movl	_C_LABEL(sched_whichqs),%d0
1021	jeq	_ASM_LABEL(Idle)
1022Lsw1:
1023	/*
1024	 * Interrupts are blocked, sched_lock is held.  If
1025	 * we come here via Idle, %%d0 contains the contents
1026	 * of a non-zero sched_whichqs.
1027	 */
1028	movl	%d0,%d1
1029	negl	%d0
1030	andl	%d1,%d0
1031	bfffo	%d0{#0:#32},%d1
1032	eorib	#31,%d1
1033
1034	movl	%d1,%d0
1035	lslb	#3,%d1			| convert queue number to index
1036	addl	#_C_LABEL(sched_qs),%d1	| locate queue (q)
1037	movl	%d1,%a1
1038	movl	%a1@(P_FORW),%a0	| p = q->p_forw
1039	cmpal	%d1,%a0			| anyone on queue?
1040	jeq	Lbadsw			| no, panic
1041#ifdef DIAGNOSTIC
1042	tstl	%a0@(P_WCHAN)
1043	jne	Lbadsw
1044	cmpb	#SRUN,%a0@(P_STAT)
1045	jne	Lbadsw
1046#endif
1047	movl	%a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
1048	movl	%a0@(P_FORW),%a1	| n = p->p_forw
1049	movl	%d1,%a1@(P_BACK)	| n->p_back = q
1050	cmpal	%d1,%a1			| anyone left on queue?
1051	jne	Lsw2			| yes, skip
1052	movl	_C_LABEL(sched_whichqs),%d1
1053	bclr	%d0,%d1			| no, clear bit
1054	movl	%d1,_C_LABEL(sched_whichqs)
1055Lsw2:
1056	/* p->p_cpu initialized in fork1() for single-processor */
1057	movb	#SONPROC,%a0@(P_STAT)	| p->p_stat = SONPROC
1058	movl	%a0,_C_LABEL(curproc)
1059	clrl	_C_LABEL(want_resched)
1060#ifdef notyet
1061	movl	%sp@+,%a1
1062	cmpl	%a0,%a1			| switching to same proc?
1063	jeq	Lswdone			| yes, skip save and restore
1064#endif
1065	/*
1066	 * Save state of previous process in its pcb.
1067	 */
1068	movl	_C_LABEL(curpcb),%a1
1069	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
1070	movl	%usp,%a2		| grab %USP (%a2 has been saved)
1071	movl	%a2,%a1@(PCB_USP)	| and save it
1072
1073	tstl	_C_LABEL(fputype)	| Do we have an FPU?
1074	jeq	Lswnofpsave		| No  Then don't attempt save.
1075	lea	%a1@(PCB_FPCTX),%a2	| pointer to FP save area
1076	fsave	%a2@			| save FP state
1077#if defined(M68030) || defined(M68040)
1078	tstb	%a2@			| null state frame?
1079	jeq	Lswnofpsave		| yes, all done
1080	fmovem	%fp0-%fp7,%a2@(FPF_REGS) | save FP general registers
1081	fmovem	%fpcr/%fpsr/%fpi,%a2@(FPF_FPCR) | save FP control registers
1082#endif
1083Lswnofpsave:
1084
1085
1086	clrl	%a0@(P_BACK)		| clear back link
1087	movb	%a0@(P_MD_FLAGS+3),mdpflag | low byte of p_md.md_flags
1088	movl	%a0@(P_ADDR),%a1	| get p_addr
1089	movl	%a1,_C_LABEL(curpcb)
1090
1091#if defined(LOCKDEBUG)
1092	/*
1093	 * Done mucking with the run queues, release the
1094	 * scheduler lock, but keep interrupts out.
1095	 */
1096	movl	%%a0,%sp@-		| not args...
1097	movl	%%a1,%sp@-		| ...just saving
1098	jbsr	_C_LABEL(sched_unlock_idle)
1099	movl	%sp@+,%%a1
1100	movl	%sp@+,%%a0
1101#endif
1102
1103	/*
1104	 * Activate process's address space.
1105	 * XXX Should remember the last USTP value loaded, and call this
1106	 * XXX only of it has changed.
1107	 */
1108	pea	%a0@			| push proc
1109	jbsr	_C_LABEL(pmap_activate)	| pmap_activate(p)
1110	addql	#4,%sp
1111	movl	_C_LABEL(curpcb),%a1	| restore p_addr
1112
1113	lea     _ASM_LABEL(tmpstk),%sp  | now goto a tmp stack for NMI
1114
1115	moveml	%a1@(PCB_REGS),#0xFCFC	| and registers
1116	movl	%a1@(PCB_USP),%a0
1117	movl	%a0,%usp		| and %usp
1118
1119	tstl	_C_LABEL(fputype)	| If we don't have an FPU,
1120	jeq	Lnofprest		|  don't try to restore it.
1121	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
1122	tstb	%a0@			| null state frame?
1123	jeq	Lresfprest		| yes, easy
1124#if defined(M68030) || defined(M68040)
1125#if defined(M68040)
1126	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040?
1127	jne	Lresnot040		| no, skip
1128	clrl	%sp@-			| yes...
1129	frestore sp@+			| ...magic!
1130Lresnot040:
1131#endif
1132	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control registers
1133	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1134#endif
1135Lresfprest:
1136	frestore %a0@			| restore state
1137Lnofprest:
1138	movw	%a1@(PCB_PS),%sr	| no, restore PS
1139	moveq	#1,%d0			| return 1 (for alternate returns)
1140	rts
1141
1142/*
1143 * savectx(pcb)
1144 * Update pcb, saving current processor state.
1145 */
1146ENTRY(savectx)
1147	movl	%sp@(4),%a1
1148	movw	%sr,%a1@(PCB_PS)
1149	movl	%usp,%a0		| grab %usp
1150	movl	%a0,%a1@(PCB_USP)	| and save it
1151	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
1152
1153	tstl	_C_LABEL(fputype)	| Do we have FPU?
1154	jeq	Lsvnofpsave		| No?  Then don't save state.
1155	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
1156	fsave	%a0@			| save FP state
1157	tstb	%a0@			| null state frame?
1158	jeq	Lsvnofpsave		| yes, all done
1159	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1160	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control registers
1161Lsvnofpsave:
1162	moveq	#0,%d0			| return 0
1163	rts
1164
1165#if defined(M68040)
1166ENTRY(suline)
1167	movl	%sp@(4),%a0		| address to write
1168	movl	_C_LABEL(curpcb),%a1	| current pcb
1169	movl	#Lslerr,%a1@(PCB_ONFAULT) | where to return to on a fault
1170	movl	%sp@(8),%a1		| address of line
1171	movl	%a1@+,%d0		| get lword
1172	movsl	%d0,%a0@+		| put lword
1173	nop				| sync
1174	movl	%a1@+,%d0		| get lword
1175	movsl	%d0,%a0@+		| put lword
1176	nop				| sync
1177	movl	%a1@+,%d0		| get lword
1178	movsl	%d0,%a0@+		| put lword
1179	nop				| sync
1180	movl	%a1@+,%d0		| get lword
1181	movsl	%d0,%a0@+		| put lword
1182	nop				| sync
1183	moveq	#0,%d0			| indicate no fault
1184	jra	Lsldone
1185Lslerr:
1186	moveq	#-1,%d0
1187Lsldone:
1188	movl	_C_LABEL(curpcb),%a1	| current pcb
1189	clrl	%a1@(PCB_ONFAULT)	| clear fault address
1190	rts
1191#endif
1192
1193
1194ENTRY(ecacheon)
1195	rts
1196
1197ENTRY(ecacheoff)
1198	rts
1199
1200/*
1201 * Get callers current SP value.
1202 * Note that simply taking the address of a local variable in a C function
1203 * doesn't work because callee saved registers may be outside the stack frame
1204 * defined by %A6 (e.g. GCC generated code).
1205 */
1206ENTRY(getsp)
1207	movl	%sp,%d0			| get current SP
1208	addql	#4,%d0			| compensate for return address
1209	rts
1210
1211ENTRY_NOPROFILE(getsfc)
1212	movc	%sfc,%d0
1213	rts
1214
1215ENTRY_NOPROFILE(getdfc)
1216	movc	%dfc,%d0
1217	rts
1218
1219/*
1220 * Load a new user segment table pointer.
1221 */
1222ENTRY(loadustp)
1223	movl	%sp@(4),%d0		| new USTP
1224	moveq	#PGSHIFT, %d1
1225	lsll	%d1,%d0			| convert to addr
1226#if defined(M68040)
1227	cmpl    #MMU_68040,_C_LABEL(mmutype) | 68040?
1228	jne     LmotommuC               | no, skip
1229	.long   0x4e7b0806              | movc %d0,%urp
1230	rts
1231LmotommuC:
1232#endif
1233	lea	_C_LABEL(protocrp),%a0	| %crp prototype
1234	movl	%d0,%a0@(4)		| stash USTP
1235	pmove	%a0@,%crp		| load root pointer
1236	movl	#DC_CLEAR,%d0
1237	movc	%d0,%cacr		| invalidate on-chip d-cache
1238	rts				|   since pmove flushes ATC
1239
1240ENTRY(ploadw)
1241#if defined(M68040)
1242	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1243	jeq	Lploadwskp		| yes, skip
1244#endif
1245	movl	%sp@(4),%a0		| address to load
1246	ploadw	#1,%a0@			| pre-load translation
1247#if defined(M68040)
1248Lploadwskp:
1249#endif
1250	rts
1251
1252/*
1253 * Set processor priority level calls.  Most are implemented with
1254 * inline asm expansions.  However, spl0 requires special handling
1255 * as we need to check for our emulated software interrupts.
1256 */
1257
1258ENTRY(spl0)
1259	moveq	#0,%d0
1260	movw	%sr,%d0			| get old SR for return
1261	movw	#PSL_LOWIPL,%sr		| restore new SR
1262	tstb	_C_LABEL(ssir)		| software interrupt pending?
1263	jeq	Lspldone		| no, all done
1264	subql	#4,%sp			| make room for RTE frame
1265	movl	%sp@(4),%sp@(2)		| position return address
1266	clrw	%sp@(6)			| set frame type 0
1267	movw	#PSL_LOWIPL,%sp@	| and new SR
1268	jra	Lgotsir			| go handle it
1269Lspldone:
1270	rts
1271
1272ENTRY(getsr)
1273	moveq	#0,%d0
1274	movw	%sr,%d0
1275	rts
1276
1277/*
1278 * _delay(unsigned N)
1279 *
1280 * Delay for at least (N/256) microseconds.
1281 * This routine depends on the variable:  delay_divisor
1282 * which should be set based on the CPU clock rate.
1283 */
1284GLOBAL(_delay)
1285	| %d0 = arg = (usecs << 8)
1286	movl	%sp@(4),%d0
1287	| %d1 = delay_divisor
1288	movl	_C_LABEL(delay_divisor),%d1
1289L_delay:
1290	subl	%d1,%d0
1291	jgt	L_delay
1292	rts
1293
1294/*
1295 * Save and restore 68881 state.
1296 */
1297ENTRY(m68881_save)
1298	movl	%sp@(4),%a0		| save area pointer
1299	fsave	%a0@			| save state
1300	tstb	%a0@			| null state frame?
1301	jeq	Lm68881sdone		| yes, all done
1302	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1303	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR) | save FP control registers
1304Lm68881sdone:
1305	rts
1306
1307ENTRY(m68881_restore)
1308	movl	%sp@(4),%a0		| save area pointer
1309	tstb	%a0@			| null state frame?
1310	jeq	Lm68881rdone		| yes, easy
1311	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi | restore FP control registers
1312	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1313Lm68881rdone:
1314	frestore %a0@			| restore state
1315	rts
1316
1317/*
1318 * Do a dump.
1319 * Called by auto-restart.
1320 */
1321GLOBAL(doadump)
1322	jbsr	_C_LABEL(dumpsys)
1323	jbsr	_C_LABEL(doboot)
1324	/*NOTREACHED*/
1325
1326/*
1327 * Handle the nitty-gritty of rebooting the machine.
1328 * Basically we just turn off the MMU, restore the initial %vbr
1329 * and return to monitor.
1330 */
1331ENTRY_NOPROFILE(doboot)
1332        movw	#PSL_HIGHIPL,%sr	| no interrupts
1333#if defined(M68040)
1334	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1335	jeq	Lnocache5		| yes, skip
1336#endif
1337	movl	#CACHE_OFF,%d0
1338	movc	%d0,%cacr		| disable on-chip cache(s)
1339	movl	#0,%a7@			| value for pmove to %tc
1340	pmove	%a7@,%tc		| disable MMU
1341	lea	_ASM_LABEL(nullrp),%a0
1342	pmove	%a0@,%crp		| Invalidate CPU root pointer
1343	pmove	%a0@,%srp		| and the Supervisor root pointer
1344	jra	Lbootcommon
1345#if defined(M68040)
1346Lnocache5:
1347	.word	0xf4f8			| cpusha bc
1348	movql	#0,%d0
1349	movc	%d0,%cacr
1350	.long	0x4e7b0003		| movc %d0,%tc (disable MMU)
1351	.long	0x4e7b0806		| movc %d0,%urp
1352	.long	0x4e7b0807		| movc %d0,%srp
1353#endif /* M68040 */
1354Lbootcommon:
1355	movl	_C_LABEL(boothowto),%d0	| load howto
1356	movl	%sp@(4),%d2		| arg
1357	lea	_ASM_LABEL(tmpstk),%sp	| physical SP in case of NMI
1358	movl	#0,%d3
1359	movc	%d3,%vbr		| monitor %vbr
1360#if 0
1361	andl	#0,%d0			| mask off
1362	tstl	%d0			|
1363	bne	Lsboot			| sboot?
1364	tstl	%d2
1365	beq	Ldoreset
1366#endif
1367
1368	movl	#0x41000000,%a0		| base = (int **)0x4100.0000
1369	movl	%a0@,%d0		| *((int *)base[0])
1370	movc	%d0,%isp		| set initial stack pointer
1371	movc	%d0,%msp		| set initial stack pointer
1372	movl	%a0@(4),%a0		| *((int (*)(void))base[1])
1373	jmp	%a0@			| go cold boot!
1374
1375	.data
1376GLOBAL(cputype)
1377	.long	CPU_68030	| default to 68030
1378GLOBAL(mmutype)
1379	.long	MMU_68030	| default to 68030
1380GLOBAL(fputype)
1381	.long	FPU_68881	| default to 68881
1382
1383GLOBAL(protosrp)
1384	.long	0x80000202,0	| prototype supervisor root pointer
1385GLOBAL(protocrp)
1386	.long	0x80000002,0	| prototype cpu root pointer
1387
1388GLOBAL(prototc)
1389	.long	0x82c0aa00	| %tc (SRP,CRP,4KB page, TIA/TIB=10/10bits)
1390GLOBAL(protott0)		| tt0 0x4000.0000-0x7fff.ffff
1391	.long	0x403f8543	|
1392GLOBAL(protott1)		| tt1 0x8000.0000-0xffff.ffff
1393	.long	0x807f8543	|
1394GLOBAL(proto040tc)
1395	.long	0x8000		| %tc (4KB page)
1396GLOBAL(proto040tt0)		| tt0 0x4000.0000-0x7fff.ffff
1397	.long	0x403fa040	| kernel only, cache inhebit, serialized
1398GLOBAL(proto040tt1)		| tt1 0x8000.0000-0xfeff.ffff
1399	.long	0x807ea040	| kernel only, cache inhebit, serialized
1400nullrp:
1401	.long	0x7fff0001	| do-nothing MMU root pointer
1402
1403GLOBAL(memavail)
1404	.long	0
1405GLOBAL(want_resched)
1406	.long   0
1407GLOBAL(proc0paddr)
1408	.long	0
1409GLOBAL(bootdev)
1410	.long	0
1411GLOBAL(hwplanemask)
1412	.long	0
1413GLOBAL(bootarg)
1414	.long	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
1415
1416#ifdef DEBUG
1417ASGLOBAL(fulltflush)
1418	.long	0
1419
1420ASGLOBAL(fullcflush)
1421	.long	0
1422#endif
1423
1424GLOBAL(intiobase)
1425	.long	0		| KVA of base of internal IO space
1426GLOBAL(intiolimit)
1427	.long	0		| KVA of end of internal IO space
1428GLOBAL(intiobase_phys)
1429	.long	0		| PA of board's I/O registers
1430GLOBAL(intiotop_phys)
1431	.long	0		| PA of top of board's I/O registers
1432
1433GLOBAL(intrnames)
1434	.asciz	"spur"
1435	.asciz	"lev1"
1436	.asciz	"scsi"
1437	.asciz	"network"
1438	.asciz	"lev4"
1439	.asciz	"clock"
1440	.asciz	"serial"
1441	.asciz	"nmi"
1442	.asciz	"statclock"
1443GLOBAL(eintrnames)
1444	.even
1445GLOBAL(intrcnt)
1446	.long	0,0,0,0,0,0,0,0,0,0
1447GLOBAL(eintrcnt)
1448