xref: /netbsd/sys/arch/next68k/next68k/locore.s (revision bf9ec67e)
1/*	$NetBSD: locore.s,v 1.34 2002/05/17 18:03:50 jdolecek Exp $	*/
2
3/*
4 * Copyright (c) 1998 Darrin B. Jewell
5 * Copyright (c) 1994, 1995 Gordon W. Ross
6 * Copyright (c) 1988 University of Utah.
7 * Copyright (c) 1980, 1990, 1993
8 *	The Regents of the University of California.  All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * the Systems Programming Group of the University of Utah Computer
12 * Science Department.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 *    must display the following acknowledgement:
24 *	This product includes software developed by the University of
25 *	California, Berkeley and its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 *    may be used to endorse or promote products derived from this software
28 *    without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * from: Utah $Hdr: locore.s 1.66 92/12/22$
43 *
44 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
45 */
46
47/* This is currently amid development by
48 * Darrin Jewell <jewell@mit.edu>  Fri Jan  2 14:36:47 1998
49 * for the next68k port
50 */
51
52#include "opt_compat_netbsd.h"
53#include "opt_compat_svr4.h"
54#include "opt_compat_sunos.h"
55#include "opt_ddb.h"
56#include "opt_fpsp.h"
57#include "opt_kgdb.h"
58#include "opt_lockdebug.h"
59
60#include "assym.h"
61#include <machine/asm.h>
62#include <machine/trap.h>
63
64#if (!defined(M68040))
65#error "M68040 is not defined! (check that the generated assym.h is not empty)"
66#endif
67
68/*
69 * This is for kvm_mkdb, and should be the address of the beginning
70 * of the kernel text segment (not necessarily the same as kernbase).
71 */
72	.text
73GLOBAL(kernel_text)
74
75 /*
76  * Leave page zero empty so it can be unmapped
77  */
78	.space	NBPG
79
80/*
81 * Temporary stack for a variety of purposes.
82 */
83	.data
84GLOBAL(endstack)
85	.space	NBPG
86GLOBAL(bgnstack)
87ASLOCAL(tmpstk)
88
89#include <next68k/next68k/vectors.s>
90
91/*
92 * Macro to relocate a symbol, used before MMU is enabled.
93 * On the NeXT, memory is laid out as in the mach header
94 * so therefore we need to relocate symbols until the MMU
95 * is turned on.
96 */
97#define	_RELOC(var, ar)		\
98	lea	var,ar;		\
99	addl	%a5,ar
100
101#define	RELOC(var, ar)		_RELOC(_C_LABEL(var), ar)
102#define	ASRELOC(var, ar)	_RELOC(_ASM_LABEL(var), ar)
103
104/*
105 * Initialization info as per grossman's boot loader:
106 *
107 * We are called from the boot prom, not the boot loader. We have the
108 * prom's stack initialized for us and we were called like this:
109 * start(mg, mg->mg_console_i, mg->mg_console_o,
110 *       mg->mg_boot_dev, mg->mg_boot_arg, mg->mg_boot_info,
111 *       mg->mg_sid, mg->mg_pagesize, 4, mg->mg_region,
112 *       etheraddr, mg->mg_boot_file);
113 * so we actually only really need the first parameter from the stack.
114 * Exceptions will be handled by the prom until we feel ready to handle
115 * them ourselves.
116 * By the way, we get loaded at our final address i.e. PA==VA for the kernel.
117 */
118 /* I think the PA==VA comment to be a lie, but I have yet to verify it.
119  * Darrin B Jewell <jewell@mit.edu>  Sun Jan 11 01:05:54 1998
120 */
121BSS(lowram,4)
122BSS(esym,4)
123
124ASENTRY_NOPROFILE(start)
125	movw	#PSL_HIGHIPL,%sr	| no interrupts
126	movl	#CACHE_OFF,%d0
127	movc	%d0,%cacr		| clear and disable on-chip cache(s)
128
129	moveal	#NEXT_RAMBASE,%a5	| amount to RELOC by.
130	RELOC(lowram,%a0)		| store base of memory.
131	movl    %a5,%a0@
132
133	| Create a new stack at address tmpstk, and push
134	| The existing sp onto it as an arg for next68k_bootargs.
135	ASRELOC(tmpstk, %a0)
136	movel	%sp,%a0@-
137	moveal  %a0,%sp
138	moveal  #0,%a6
139
140	/* Read the header to get our segment list */
141	RELOC(next68k_bootargs,%a0)
142	jbsr	%a0@			| next68k_bootargs(args)
143	addqw	#4,%sp			| clear arg from stack.
144
145	/*
146	 * All data registers are now free.  All address registers
147	 * except %a5 are free.  %a5 is used by the RELOC() macro on hp300
148	 * and cannot be used until after the MMU is enabled.
149	 */
150
151/* determine our CPU/MMU combo - check for all regardless of kernel config */
152	movl	#0x200,%d0		| data freeze bit
153	movc	%d0,%cacr		|   only exists on 68030
154	movc	%cacr,%d0		| read it back
155	tstl	%d0			| zero?
156	jeq	Lnot68030		| yes, we have 68020/68040
157
158	/*
159	 * 68030 models
160	 */
161
162	RELOC(mmutype, %a0)		| no, we have 68030
163	movl	#MMU_68030,%a0@		| set to reflect 68030 PMMU
164	RELOC(cputype, %a0)
165	movl	#CPU_68030,%a0@		| and 68030 CPU
166	RELOC(machineid, %a0)
167	movl	#30,%a0@		| @@@ useless
168	jra	Lstart1
169
170	/*
171	 * End of 68030 section
172	 */
173
174Lnot68030:
175	bset	#31,%d0			| data cache enable bit
176	movc	%d0,%cacr		|   only exists on 68040
177	movc	%cacr,%d0		| read it back
178	tstl	%d0			| zero?
179	beq	Lis68020		| yes, we have 68020
180	moveq	#0,%d0			| now turn it back off
181	movec	%d0,%cacr		|   before we access any data
182
183	/*
184	 * 68040 models
185	 */
186
187	RELOC(mmutype, %a0)
188	movl	#MMU_68040,%a0@		| with a 68040 MMU
189	RELOC(cputype, %a0)
190	movl	#CPU_68040,%a0@		| and a 68040 CPU
191	RELOC(fputype, %a0)
192	movl	#FPU_68040,%a0@		| ...and FPU
193#if defined(ENABLE_HP_CODE)
194	RELOC(ectype, %a0)
195	movl	#EC_NONE,%a0@		| and no cache (for now XXX)
196#endif
197	RELOC(machineid, %a0)
198	movl	#40,%a0@		| @@@ useless
199	jra	Lstart1
200
201	/*
202	 * End of 68040 section
203	 */
204
205	/*
206	 * 68020 models
207	 * (There are no 68020 models of NeXT, but we'll pretend)
208	 */
209
210Lis68020:
211	RELOC(mmutype, %a0)
212	movl	#MMU_68851,%a0@		| no, we have PMMU
213	RELOC(fputype, %a0)		| all of the 68020 systems
214	movl	#FPU_68881,%a0@		|   have a 68881 FPU
215	RELOC(cputype, %a0)
216	movl	#CPU_68020,%a0@		| and a 68020 CPU
217	RELOC(machineid, %a0)
218	movl	#20,%a0@			| @@@ useless
219	jra	Lstart1
220
221	/*
222	 * End of 68020 section
223	 */
224
225Lstart1:
226	/*
227	 * Now that we know what CPU we have, initialize the address error
228	 * and bus error handlers in the vector table:
229	 *
230	 *	vectab+8	bus error
231	 *	vectab+12	address error
232	 */
233	RELOC(cputype, %a0)
234#if 0
235	/* XXX assembler/linker feature/bug */
236	RELOC(vectab, %a2)
237#else
238	movl	#_C_LABEL(vectab),%a2
239	addl	%a5,%a2
240#endif
241#if defined(M68040)
242	cmpl	#CPU_68040,%a0@		| 68040?
243	jne	1f			| no, skip
244	movl	#_C_LABEL(buserr40),%a2@(8)
245	movl	#_C_LABEL(addrerr4060),%a2@(12)
246	jra	Lstart2
2471:
248#endif
249#if defined(M68020) || defined(M68030)
250	cmpl	#CPU_68040,%a0@		| 68040?
251	jeq	1f			| yes, skip
252	movl	#_C_LABEL(busaddrerr2030),%a2@(8)
253	movl	#_C_LABEL(busaddrerr2030),%a2@(12)
254	jra	Lstart2
2551:
256#endif
257	/* Config botch; no hope. */
258	PANIC("Config botch in locore")
259
260Lstart2:
261/* initialize source/destination control registers for movs */
262	moveq	#FC_USERD,%d0		| user space
263	movc	%d0,%sfc		|   as source
264	movc	%d0,%dfc		|   and destination of transfers
265/* configure kernel and proc0 VA space so we can get going */
266#ifdef DDB
267	RELOC(esym,%a0)			| end of static kernel test/data/syms
268	movl	%a0@,%d5
269	jne	Lstart3
270#endif
271	movl	#_C_LABEL(end),%d5	| end of static kernel text/data
272
273Lstart3:
274	addl	#NBPG-1,%d5
275	andl	#PG_FRAME,%d5		| round to a page
276	movl	%d5,%a4
277	addl	%a5,%a4			| convert to PA
278	pea	%a5@			| firstpa
279	pea	%a4@			| nextpa
280	RELOC(pmap_bootstrap,%a0)
281	jbsr	%a0@			| pmap_bootstrap(firstpa,nextpa)
282	addql	#8,%sp
283
284/*
285 * Prepare to enable MMU.
286 * Since the kernel is not mapped logical == physical we must insure
287 * that when the MMU is turned on, all prefetched addresses (including
288 * the PC) are valid.  In order guarentee that, we use the last physical
289 * page (which is conveniently mapped == VA) and load it up with enough
290 * code to defeat the prefetch, then we execute the jump back to here.
291 *
292 * Is this all really necessary, or am I paranoid??
293 */
294	RELOC(Sysseg, %a0)		| system segment table addr
295	movl	%a0@,%d1		| read value (a KVA)
296	addl	%a5,%d1			| convert to PA
297
298	RELOC(mmutype, %a0)
299#if defined(ENABLE_HP_CODE)
300	tstl	%a0@			| HP MMU?
301	jeq	Lhpmmu2			| yes, skip
302#endif
303	cmpl	#MMU_68040,%a0@		| 68040?
304	jne	Lmotommu1		| no, skip
305	.long	0x4e7b1807		| movc %d1,%srp
306	jra	Lstploaddone
307Lmotommu1:
308	RELOC(protorp, %a0)
309	movl	#0x80000202,%a0@	| nolimit + share global + 4 byte PTEs
310	movl	%d1,%a0@(4)		| + segtable address
311	pmove	%a0@,%srp		| load the supervisor root pointer
312	movl	#0x80000002,%a0@	| reinit upper half for CRP loads
313
314#if defined(ENABLE_HP_CODE)
315	jra	Lstploaddone		| done
316Lhpmmu2:
317	moveq	#PGSHIFT,%d2
318	lsrl	%d2,%d1			| convert to page frame
319	movl	%d1,INTIOBASE+MMUBASE+MMUSSTP | load in sysseg table register
320#endif
321Lstploaddone:
322#ifdef defined(ENABLE_MAXADDR_TRAMPOLINE)
323	lea	MAXADDR,%a2		| PA of last RAM page
324	ASRELOC(Lhighcode, %a1)		| addr of high code
325	ASRELOC(Lehighcode, %a3)	| end addr
326Lcodecopy:
327	movw	%a1@+,%a2@+		| copy a word
328	cmpl	%a3,%a1			| done yet?
329	jcs	Lcodecopy		| no, keep going
330	jmp	MAXADDR			| go for it!
331	/*
332	 * BEGIN MMU TRAMPOLINE.  This section of code is not
333	 * executed in-place.  It's copied to the last page
334	 * of RAM (mapped va == pa) and executed there.
335	 */
336
337Lhighcode:
338#endif /* ENABLE_MAXADDR_TRAMPOLINE */
339
340	/*
341	 * Set up the vector table, and race to get the MMU
342	 * enabled.
343	 */
344
345	movc    %vbr,%d0		| Keep copy of ROM VBR
346	ASRELOC(save_vbr,%a0)
347	movl    %d0,%a0@
348	movl	#_C_LABEL(vectab),%d0	| set Vector Base Register
349	movc	%d0,%vbr
350
351	RELOC(mmutype, %a0)
352#if defined(ENABLE_HP_CODE)
353	tstl	%a0@			| HP MMU?
354	jeq	Lhpmmu3			| yes, skip
355#endif
356	cmpl	#MMU_68040,%a0@		| 68040?
357	jne	Lmotommu2		| no, skip
358#if defined(ENABLE_HP_CODE)
359	movw	#0,INTIOBASE+MMUBASE+MMUCMD+2
360	movw	#MMU_IEN+MMU_CEN+MMU_FPE,INTIOBASE+MMUBASE+MMUCMD+2
361					| enable FPU and caches
362#endif
363
364	| This is a hack to get PA=KVA when turning on MMU
365	| it will only work on 68040's.  We should fix something
366	| to boot 68030's later.
367	movel	#0x0200c040,%d0		| intio devices are at 0x02000000
368	.long	0x4e7b0004		| movc %d0,%itt0
369	.long	0x4e7b0006		| movc %d0,%dtt0
370	movel	#0x0403c000,%d0		| kernel text and data at 0x04000000
371	.long	0x4e7b0005		| movc %d0,%itt1
372	.long	0x4e7b0007		| movc %d0,%dtt1
373
374	.word	0xf4d8			| cinva bc
375	.word	0xf518			| pflusha
376	movl	#0x8000,%d0
377	.long	0x4e7b0003		| movc %d0,tc
378	movl	#0x80008000,%d0
379	movc	%d0,%cacr		| turn on both caches
380
381	jmp     Lturnoffttr:l		| global jump into mapped memory.
382Lturnoffttr:
383	moveq	#0,%d0			| ensure TT regs are disabled
384	.long	0x4e7b0004		| movc %d0,%itt0
385	.long	0x4e7b0006		| movc %d0,%dtt0
386	.long	0x4e7b0005		| movc %d0,%itt1
387	.long	0x4e7b0007		| movc %d0,%dtt1
388	jmp	Lenab1
389Lmotommu2:
390#if defined(ENABLE_HP_CODE)
391	movl	#MMU_IEN+MMU_FPE,INTIOBASE+MMUBASE+MMUCMD
392					| enable 68881 and i-cache
393#endif
394	RELOC(prototc, %a2)
395	movl	#0x82c0aa00,%a2@	| value to load TC with
396	pmove	%a2@,%tc		| load it
397	jmp	Lenab1:l		| force absolute (not pc-relative) jmp
398#if defined(ENABLE_HP_CODE)
399Lhpmmu3:
400	movl	#0,INTIOBASE+MMUBASE+MMUCMD	| clear external cache
401	movl	#MMU_ENAB,INTIOBASE+MMUBASE+MMUCMD | turn on MMU
402	jmp	Lenab1:l			| jmp to mapped code
403#endif
404#if defined(ENABLE_MAXADDR_TRAMPOLINE)
405Lehighcode:
406
407	/*
408	 * END MMU TRAMPOLINE.  Address register %a5 is now free.
409	 */
410#endif
411
412/*
413 * Should be running mapped from this point on
414 */
415Lenab1:
416/* select the software page size now */
417	lea	_ASM_LABEL(tmpstk),%sp	| temporary stack
418	jbsr	_C_LABEL(uvm_setpagesize) | select software page size
419	bsr     Lpushpc			| Push the PC on the stack.
420Lpushpc:
421
422
423/* set kernel stack, user %SP, and initial pcb */
424	movl	_C_LABEL(proc0paddr),%a1 | get proc0 pcb addr
425	lea	%a1@(USPACE-4),%sp	| set kernel stack to end of area
426	lea	_C_LABEL(proc0),%a2	| initialize proc0.p_addr so that
427	movl	%a1,%a2@(P_ADDR)	|   we don't deref NULL in trap()
428	movl	#USRSTACK-4,%a2
429	movl	%a2,%usp		| init user SP
430	movl	%a1,_C_LABEL(curpcb)	| proc0 is running
431
432	tstl	_C_LABEL(fputype)	| Have an FPU?
433	jeq	Lenab2			| No, skip.
434	clrl	%a1@(PCB_FPCTX)		| ensure null FP context
435	movl	%a1,%sp@-
436	jbsr	_C_LABEL(m68881_restore) | restore it (does not kill %a1)
437	addql	#4,%sp
438Lenab2:
439	cmpl	#MMU_68040,_C_LABEL(mmutype)	| 68040?
440	jeq	Ltbia040		| yes, cache already on
441	pflusha
442	movl	#CACHE_ON,%d0
443	movc	%d0,%cacr		| clear cache(s)
444	jra	Lenab3
445Ltbia040:
446	.word	0xf518
447Lenab3:
448
449	jbsr	_C_LABEL(next68k_init)
450
451/* Final setup for call to main(). */
452/*
453 * Create a fake exception frame so that cpu_fork() can copy it.
454 * main() nevers returns; we exit to user mode from a forked process
455 * later on.
456 */
457	clrw	%sp@-			| vector offset/frame type
458	clrl	%sp@-			| PC - filled in by "execve"
459	movw	#PSL_USER,%sp@-		| in user mode
460	clrl	%sp@-			| stack adjust count and padding
461	lea	%sp@(-64),%sp		| construct space for %D0-%D7/%A0-%A7
462	lea	_C_LABEL(proc0),%a0	| save pointer to frame
463	movl	%sp,%a0@(P_MD_REGS)	|   in proc0.p_md.md_regs
464
465	jra	_C_LABEL(main)		| main()
466	PANIC("main() returned")
467	/* NOTREACHED */
468
469/*
470 * proc_trampoline: call function in register %a2 with %a3 as an arg
471 * and then rei.
472 */
473GLOBAL(proc_trampoline)
474	movl	%a3,%sp@-		| push function arg
475	jbsr	%a2@			| call function
476	addql	#4,%sp			| pop arg
477	movl	%sp@(FR_SP),%a0		| grab and load
478	movl	%a0,%usp		|   user SP
479	moveml	%sp@+,#0x7FFF		| restore most user regs
480	addql	#8,%sp			| toss SP and stack adjust
481	jra	_ASM_LABEL(rei)		| and return
482
483
484/*
485 * Trap/interrupt vector routines
486 */
487#include <m68k/m68k/trap_subr.s>
488
489	.data
490GLOBAL(m68k_fault_addr)
491	.long	0
492
493#if defined(M68040) || defined(M68060)
494ENTRY_NOPROFILE(addrerr4060)
495	clrl	%sp@-			| stack adjust count
496	moveml	#0xFFFF,%sp@-		| save user registers
497	movl	%usp,%a0		| save the user SP
498	movl	%a0,%sp@(FR_SP)		|   in the savearea
499	movl	%sp@(FR_HW+8),%sp@-
500	clrl	%sp@-			| dummy code
501	movl	#T_ADDRERR,%sp@-	| mark address error
502	jra	_ASM_LABEL(faultstkadj)	| and deal with it
503#endif
504
505#if defined(M68060)
506ENTRY_NOPROFILE(buserr60)
507	clrl	%sp@-			| stack adjust count
508	moveml	#0xFFFF,%sp@-		| save user registers
509	movl	%usp,%a0		| save the user SP
510	movl	%a0,%sp@(FR_SP)		|   in the savearea
511	movel	%sp@(FR_HW+12),%d0	| FSLW
512	btst	#2,%d0			| branch prediction error?
513	jeq	Lnobpe
514	movc	%cacr,%d2
515	orl	#IC60_CABC,%d2		| clear all branch cache entries
516	movc	%d2,%cacr
517	movl	%d0,%d1
518	addql	#1,L60bpe
519	andl	#0x7ffd,%d1
520	jeq	_ASM_LABEL(faultstkadjnotrap2)
521Lnobpe:
522| we need to adjust for misaligned addresses
523	movl	%sp@(FR_HW+8),%d1	| grab VA
524	btst	#27,%d0			| check for mis-aligned access
525	jeq	Lberr3			| no, skip
526	addl	#28,%d1			| yes, get into next page
527					| operand case: 3,
528					| instruction case: 4+12+12
529	andl	#PG_FRAME,%d1		| and truncate
530Lberr3:
531	movl	%d1,%sp@-
532	movl	%d0,%sp@-		| code is FSLW now.
533	andw	#0x1f80,%d0
534	jeq	Lberr60			| it is a bus error
535	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
536	jra	_ASM_LABEL(faultstkadj)	| and deal with it
537Lberr60:
538	tstl	_C_LABEL(nofault)	| catch bus error?
539	jeq	Lisberr			| no, handle as usual
540	movl	%sp@(FR_HW+8+8),_C_LABEL(m68k_fault_addr) | save fault addr
541	movl	_C_LABEL(nofault),%sp@-	| yes,
542	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
543	/* NOTREACHED */
544#endif
545#if defined(M68040)
546ENTRY_NOPROFILE(buserr40)
547	clrl	%sp@-			| stack adjust count
548	moveml	#0xFFFF,%sp@-		| save user registers
549	movl	%usp,%a0		| save the user SP
550	movl	%a0,%sp@(FR_SP)		|   in the savearea
551	movl	%sp@(FR_HW+20),%d1	| get fault address
552	moveq	#0,%d0
553	movw	%sp@(FR_HW+12),%d0	| get SSW
554	btst	#11,%d0			| check for mis-aligned
555	jeq	Lbe1stpg		| no skip
556	addl	#3,%d1			| get into next page
557	andl	#PG_FRAME,%d1		| and truncate
558Lbe1stpg:
559	movl	%d1,%sp@-		| pass fault address.
560	movl	%d0,%sp@-		| pass SSW as code
561	btst	#10,%d0			| test ATC
562	jeq	Lberr40			| it is a bus error
563	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
564	jra	_ASM_LABEL(faultstkadj)	| and deal with it
565Lberr40:
566	tstl	_C_LABEL(nofault)	| catch bus error?
567	jeq	Lisberr			| no, handle as usual
568	movl	%sp@(FR_HW+8+20),_C_LABEL(m68k_fault_addr) | save fault addr
569	movl	_C_LABEL(nofault),%sp@-	| yes,
570	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
571	/* NOTREACHED */
572#endif
573
574#if defined(M68020) || defined(M68030)
575ENTRY_NOPROFILE(busaddrerr2030)
576	clrl	%sp@-			| stack adjust count
577	moveml	#0xFFFF,%sp@-		| save user registers
578	movl	%usp,%a0		| save the user SP
579	movl	%a0,%sp@(FR_SP)		|   in the savearea
580	moveq	#0,%d0
581	movw	%sp@(FR_HW+10),%d0	| grab SSW for fault processing
582	btst	#12,%d0			| RB set?
583	jeq	LbeX0			| no, test RC
584	bset	#14,%d0			| yes, must set FB
585	movw	%d0,%sp@(FR_HW+10)	| for hardware too
586LbeX0:
587	btst	#13,%d0			| RC set?
588	jeq	LbeX1			| no, skip
589	bset	#15,%d0			| yes, must set FC
590	movw	%d0,%sp@(FR_HW+10)	| for hardware too
591LbeX1:
592	btst	#8,%d0			| data fault?
593	jeq	Lbe0			| no, check for hard cases
594	movl	%sp@(FR_HW+16),%d1	| fault address is as given in frame
595	jra	Lbe10			| thats it
596Lbe0:
597	btst	#4,%sp@(FR_HW+6)	| long (type B) stack frame?
598	jne	Lbe4			| yes, go handle
599	movl	%sp@(FR_HW+2),%d1	| no, can use save PC
600	btst	#14,%d0			| FB set?
601	jeq	Lbe3			| no, try FC
602	addql	#4,%d1			| yes, adjust address
603	jra	Lbe10			| done
604Lbe3:
605	btst	#15,%d0			| FC set?
606	jeq	Lbe10			| no, done
607	addql	#2,%d1			| yes, adjust address
608	jra	Lbe10			| done
609Lbe4:
610	movl	%sp@(FR_HW+36),%d1	| long format, use stage B address
611	btst	#15,%d0			| FC set?
612	jeq	Lbe10			| no, all done
613	subql	#2,%d1			| yes, adjust address
614Lbe10:
615	movl	%d1,%sp@-		| push fault VA
616	movl	%d0,%sp@-		| and padded SSW
617	movw	%sp@(FR_HW+8+6),%d0	| get frame format/vector offset
618	andw	#0x0FFF,%d0		| clear out frame format
619	cmpw	#12,%d0			| address error vector?
620	jeq	Lisaerr			| yes, go to it
621#if defined(M68K_MMU_MOTOROLA)
622#if defined(M68K_MMU_HP)
623	tstl	_C_LABEL(mmutype)	| HP MMU?
624	jeq	Lbehpmmu		| yes, different MMU fault handler
625#endif
626	movl	%d1,%a0			| fault address
627	movl	%sp@,%d0		| function code from ssw
628	btst	#8,%d0			| data fault?
629	jne	Lbe10a
630	movql	#1,%d0			| user program access FC
631					| (we dont separate data/program)
632	btst	#5,%sp@(FR_HW+8)	| supervisor mode?
633	jeq	Lbe10a			| if no, done
634	movql	#5,%d0			| else supervisor program access
635Lbe10a:
636	ptestr	%d0,%a0@,#7		| do a table search
637	pmove	%psr,%sp@		| save result
638	movb	%sp@,%d1
639	btst	#2,%d1			| invalid (incl. limit viol. and berr)?
640	jeq	Lmightnotbemerr		| no -> wp check
641	btst	#7,%d1			| is it MMU table berr?
642	jne	Lisberr1		| yes, needs not be fast.
643#endif /* M68K_MMU_MOTOROLA */
644Lismerr:
645	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
646	jra	_ASM_LABEL(faultstkadj)	| and deal with it
647#if defined(M68K_MMU_MOTOROLA)
648Lmightnotbemerr:
649	btst	#3,%d1			| write protect bit set?
650	jeq	Lisberr1		| no: must be bus error
651	movl	%sp@,%d0			| ssw into low word of %d0
652	andw	#0xc0,%d0		| Write protect is set on page:
653	cmpw	#0x40,%d0		| was it read cycle?
654	jne	Lismerr			| no, was not WPE, must be MMU fault
655	jra	Lisberr1		| real bus err needs not be fast.
656#endif /* M68K_MMU_MOTOROLA */
657#if defined(M68K_MMU_HP)
658Lbehpmmu:
659	MMUADDR(%a0)
660	movl	%a0@(MMUSTAT),%d0	| read MMU status
661	btst	#3,%d0			| MMU fault?
662	jeq	Lisberr1		| no, just a non-MMU bus error
663	andl	#~MMU_FAULT,%a0@(MMUSTAT)| yes, clear fault bits
664	movw	%d0,%sp@		| pass MMU stat in upper half of code
665	jra	Lismerr			| and handle it
666#endif
667Lisaerr:
668	movl	#T_ADDRERR,%sp@-	| mark address error
669	jra	_ASM_LABEL(faultstkadj)	| and deal with it
670Lisberr1:
671	clrw	%sp@			| re-clear pad word
672	tstl	_C_LABEL(nofault)	| catch bus error?
673	jeq	Lisberr			| no, handle as usual
674	movl	%sp@(FR_HW+8+16),_C_LABEL(m68k_fault_addr) | save fault addr
675	movl	_C_LABEL(nofault),%sp@-	| yes,
676	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
677	/* NOTREACHED */
678#endif /* M68020 || M68030 */
679
680Lisberr:				| also used by M68040/60
681	movl	#T_BUSERR,%sp@-		| mark bus error
682	jra	_ASM_LABEL(faultstkadj)	| and deal with it
683
684/*
685 * FP exceptions.
686 */
687ENTRY_NOPROFILE(fpfline)
688#if defined(M68040)
689	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040 FPU?
690	jne	Lfp_unimp		| no, skip FPSP
691	cmpw	#0x202c,%sp@(6)		| format type 2?
692	jne	_C_LABEL(illinst)	| no, not an FP emulation
693Ldofp_unimp:
694#ifdef FPSP
695	jmp	_ASM_LABEL(fpsp_unimp)	| yes, go handle it
696#endif
697Lfp_unimp:
698#endif /* M68040 */
699#ifdef FPU_EMULATE
700	clrl	%sp@-			| stack adjust count
701	moveml	#0xFFFF,%sp@-		| save registers
702	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
703	jra	_ASM_LABEL(fault)	| do it
704#else
705	jra	_C_LABEL(illinst)
706#endif
707
708ENTRY_NOPROFILE(fpunsupp)
709#if defined(M68040)
710	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040 FPU?
711	jne	_C_LABEL(illinst)	| no, treat as illinst
712#ifdef FPSP
713	jmp	_ASM_LABEL(fpsp_unsupp)	| yes, go handle it
714#endif
715Lfp_unsupp:
716#endif /* M68040 */
717#ifdef FPU_EMULATE
718	clrl	%sp@-			| stack adjust count
719	moveml	#0xFFFF,%sp@-		| save registers
720	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
721	jra	_ASM_LABEL(fault)	| do it
722#else
723	jra	_C_LABEL(illinst)
724#endif
725
726/*
727 * Handles all other FP coprocessor exceptions.
728 * Note that since some FP exceptions generate mid-instruction frames
729 * and may cause signal delivery, we need to test for stack adjustment
730 * after the trap call.
731 */
732ENTRY_NOPROFILE(fpfault)
733	clrl	%sp@-		| stack adjust count
734	moveml	#0xFFFF,%sp@-	| save user registers
735	movl	%usp,%a0		| and save
736	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
737	clrl	%sp@-		| no VA arg
738	movl	_C_LABEL(curpcb),%a0 | current pcb
739	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
740	fsave	%a0@		| save state
741#if defined(M68040) || defined(M68060)
742	/* always null state frame on 68040, 68060 */
743	cmpl	#FPU_68040,_C_LABEL(fputype)
744	jle	Lfptnull
745#endif
746	tstb	%a0@		| null state frame?
747	jeq	Lfptnull	| yes, safe
748	clrw	%d0		| no, need to tweak BIU
749	movb	%a0@(1),%d0	| get frame size
750	bset	#3,%a0@(0,%d0:w) | set exc_pend bit of BIU
751Lfptnull:
752	fmovem	%fpsr,%sp@-	| push %fpsr as code argument
753	frestore %a0@		| restore state
754	movl	#T_FPERR,%sp@-	| push type arg
755	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
756
757/*
758 * Other exceptions only cause four and six word stack frame and require
759 * no post-trap stack adjustment.
760 */
761
762ENTRY_NOPROFILE(badtrap)
763	moveml	#0xC0C0,%sp@-		| save scratch regs
764	movw	%sp@(22),%sp@-		| push exception vector info
765	clrw	%sp@-
766	movl	%sp@(22),%sp@-		| and PC
767	jbsr	_C_LABEL(straytrap)	| report
768	addql	#8,%sp			| pop args
769	moveml	%sp@+,#0x0303		| restore regs
770	jra	_ASM_LABEL(rei)		| all done
771
772ENTRY_NOPROFILE(trap0)
773	clrl	%sp@-			| stack adjust count
774	moveml	#0xFFFF,%sp@-		| save user registers
775	movl	%usp,%a0		| save the user SP
776	movl	%a0,%sp@(FR_SP)		|   in the savearea
777	movl	%d0,%sp@-		| push syscall number
778	jbsr	_C_LABEL(syscall)	| handle it
779	addql	#4,%sp			| pop syscall arg
780	tstl	_C_LABEL(astpending)
781	jne	Lrei2
782	tstb	_C_LABEL(ssir)
783	jeq	Ltrap1
784	movw	#SPL1,%sr
785	tstb	_C_LABEL(ssir)
786	jne	Lsir1
787Ltrap1:
788	movl	%sp@(FR_SP),%a0		| grab and restore
789	movl	%a0,%usp		|   user SP
790	moveml	%sp@+,#0x7FFF		| restore most registers
791	addql	#8,%sp			| pop SP and stack adjust
792	rte
793
794/*
795 * Trap 12 is the entry point for the cachectl "syscall" (both HPUX & BSD)
796 *	cachectl(command, addr, length)
797 * command in %d0, addr in %a1, length in %d1
798 */
799ENTRY_NOPROFILE(trap12)
800	movl	_C_LABEL(curproc),%sp@-	| push curproc pointer
801	movl	%d1,%sp@-		| push length
802	movl	%a1,%sp@-		| push addr
803	movl	%d0,%sp@-		| push command
804	jbsr	_C_LABEL(cachectl1)	| do it
805	lea	%sp@(16),%sp		| pop args
806	jra	_ASM_LABEL(rei)		| all done
807
808/*
809 * Trace (single-step) trap.  Kernel-mode is special.
810 * User mode traps are simply passed on to trap().
811 */
812ENTRY_NOPROFILE(trace)
813	clrl	%sp@-			| stack adjust count
814	moveml	#0xFFFF,%sp@-
815	moveq	#T_TRACE,%d0
816
817	| Check PSW and see what happen.
818	|   T=0 S=0	(should not happen)
819	|   T=1 S=0	trace trap from user mode
820	|   T=0 S=1	trace trap on a trap instruction
821	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
822
823	movw	%sp@(FR_HW),%d1		| get PSW
824	notw	%d1			| XXX no support for T0 on 680[234]0
825	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
826	jeq	Lkbrkpt			| yes, kernel breakpoint
827	jra	_ASM_LABEL(fault)	| no, user-mode fault
828
829/*
830 * Trap 15 is used for:
831 *	- GDB breakpoints (in user programs)
832 *	- KGDB breakpoints (in the kernel)
833 *	- trace traps for SUN binaries (not fully supported yet)
834 * User mode traps are simply passed to trap().
835 */
836ENTRY_NOPROFILE(trap15)
837	clrl	%sp@-			| stack adjust count
838	moveml	#0xFFFF,%sp@-
839	moveq	#T_TRAP15,%d0
840	movw	%sp@(FR_HW),%d1		| get PSW
841	andw	#PSL_S,%d1		| from system mode?
842	jne	Lkbrkpt			| yes, kernel breakpoint
843	jra	_ASM_LABEL(fault)	| no, user-mode fault
844
845Lkbrkpt: | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
846	| Save the system %sp rather than the user %sp.
847	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
848	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
849	movl	%a6,%sp@(FR_SP)		|  from before trap
850
851	| If were are not on tmpstk switch to it.
852	| (so debugger can change the stack pointer)
853	movl	%a6,%d1
854	cmpl	#_ASM_LABEL(tmpstk),%d1
855	jls	Lbrkpt2			| already on tmpstk
856	| Copy frame to the temporary stack
857	movl	%sp,%a0			| %a0=src
858	lea	_ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
859	movl	%a1,%sp			| %sp=new frame
860	moveq	#FR_SIZE,%d1
861Lbrkpt1:
862	movl	%a0@+,%a1@+
863	subql	#4,%d1
864	bgt	Lbrkpt1
865
866Lbrkpt2:
867	| Call the trap handler for the kernel debugger.
868	| Do not call trap() to do it, so that we can
869	| set breakpoints in trap() if we want.  We know
870	| the trap type is either T_TRACE or T_BREAKPOINT.
871	| If we have both DDB and KGDB, let KGDB see it first,
872	| because KGDB will just return 0 if not connected.
873	| Save args in %d2, %a2
874	movl	%d0,%d2			| trap type
875	movl	%sp,%a2			| frame ptr
876#ifdef KGDB
877	| Let KGDB handle it (if connected)
878	movl	%a2,%sp@-		| push frame ptr
879	movl	%d2,%sp@-		| push trap type
880	jbsr	_C_LABEL(kgdb_trap)	| handle the trap
881	addql	#8,%sp			| pop args
882	cmpl	#0,%d0			| did kgdb handle it?
883	jne	Lbrkpt3			| yes, done
884#endif
885#ifdef DDB
886	| Let DDB handle it
887	movl	%a2,%sp@-		| push frame ptr
888	movl	%d2,%sp@-		| push trap type
889	jbsr	_C_LABEL(kdb_trap)	| handle the trap
890	addql	#8,%sp			| pop args
891#if 0	/* not needed on hp300 */
892	cmpl	#0,%d0			| did ddb handle it?
893	jne	Lbrkpt3			| yes, done
894#endif
895#endif
896	/* Sun 3 drops into PROM here. */
897Lbrkpt3:
898	| The stack pointer may have been modified, or
899	| data below it modified (by kgdb push call),
900	| so push the hardware frame at the current %sp
901	| before restoring registers and returning.
902
903	movl	%sp@(FR_SP),%a0		| modified %sp
904	lea	%sp@(FR_SIZE),%a1	| end of our frame
905	movl	%a1@-,%a0@-		| copy 2 longs with
906	movl	%a1@-,%a0@-		| ... predecrement
907	movl	%a0,%sp@(FR_SP)		| %sp = h/w frame
908	moveml	%sp@+,#0x7FFF		| restore all but %sp
909	movl	%sp@,%sp		| ... and %sp
910	rte				| all done
911
912/* Use common m68k sigreturn */
913#include <m68k/m68k/sigreturn.s>
914
915/*
916 * Interrupt handlers.
917 *
918 * For auto-vectored interrupts, the CPU provides the
919 * vector 0x18+level.  Note we count spurious interrupts,
920 * but don't do anything else with them.
921 *
922 * _intrhand_autovec is the entry point for auto-vectored
923 * interrupts.
924 *
925 * For vectored interrupts, we pull the pc, evec, and exception frame
926 * and pass them to the vectored interrupt dispatcher.  The vectored
927 * interrupt dispatcher will deal with strays.
928 *
929 * _intrhand_vectored is the entry point for vectored interrupts.
930 */
931
932#define INTERRUPT_SAVEREG	moveml  #0xC0C0,%sp@-
933#define INTERRUPT_RESTOREREG	moveml  %sp@+,#0x0303
934
935ENTRY_NOPROFILE(spurintr)	/* Level 0 */
936	addql	#1,_C_LABEL(intrcnt)+0
937	addql	#1,_C_LABEL(uvmexp)+UVMEXP_INTRS
938	jra	_ASM_LABEL(rei)
939
940ENTRY_NOPROFILE(intrhand_autovec)	/* Levels 1 through 6 */
941	INTERRUPT_SAVEREG
942	lea	%sp@(16),%a1		| get pointer to frame
943	movl	%a1,%sp@-
944	movw	%sp@(26),%d0
945	movl	%d0,%sp@-		| push exception vector info
946	movl	%sp@(26),%sp@-		| and PC
947	jbsr	_C_LABEL(isrdispatch_autovec)	| call dispatcher
948	lea	%sp@(12),%sp		| pop value args
949	INTERRUPT_RESTOREREG
950	jra	_ASM_LABEL(rei)		| all done
951
952ENTRY_NOPROFILE(lev7intr)	/* level 7: parity errors, reset key */
953	addql	#1,_C_LABEL(intrcnt)+32
954	clrl	%sp@-
955	moveml	#0xFFFF,%sp@-		| save registers
956	movl	%usp,%a0		| and save
957	movl	%a0,%sp@(FR_SP)		|   the user stack pointer
958	jbsr	_C_LABEL(nmihand)	| call handler
959	movl	%sp@(FR_SP),%a0		| restore
960	movl	%a0,%usp			|   user SP
961	moveml	%sp@+,#0x7FFF		| and remaining registers
962	addql	#8,%sp			| pop SP and stack adjust
963	jra	_ASM_LABEL(rei)		| all done
964
965ENTRY_NOPROFILE(intrhand_vectored)
966	INTERRUPT_SAVEREG
967	lea	%sp@(16),%a1		| get pointer to frame
968	movl	%a1,%sp@-
969	movw	%sp@(26),%d0
970	movl	%d0,%sp@-		| push exception vector info
971	movl	%sp@(26),%sp@-		| and PC
972	jbsr	_C_LABEL(isrdispatch_vectored)	| call dispatcher
973	lea	%sp@(12),%sp		| pop value args
974	INTERRUPT_RESTOREREG
975	jra	_ASM_LABEL(rei)		| all done
976
977#undef INTERRUPT_SAVEREG
978#undef INTERRUPT_RESTOREREG
979
980/*
981 * Emulation of VAX REI instruction.
982 *
983 * This code deals with checking for and servicing ASTs
984 * (profiling, scheduling) and software interrupts (network, softclock).
985 * We check for ASTs first, just like the VAX.  To avoid excess overhead
986 * the T_ASTFLT handling code will also check for software interrupts so we
987 * do not have to do it here.  After identifing that we need an AST we
988 * drop the IPL to allow device interrupts.
989 *
990 * This code is complicated by the fact that sendsig may have been called
991 * necessitating a stack cleanup.
992 */
993
994BSS(ssir,1)
995
996ASENTRY_NOPROFILE(rei)
997	tstl	_C_LABEL(astpending)	| AST pending?
998	jeq	Lchksir			| no, go check for SIR
999Lrei1:
1000	btst	#5,%sp@			| yes, are we returning to user mode?
1001	jne	Lchksir			| no, go check for SIR
1002	movw	#PSL_LOWIPL,%sr		| lower SPL
1003	clrl	%sp@-			| stack adjust
1004	moveml	#0xFFFF,%sp@-		| save all registers
1005	movl	%usp,%a1		| including
1006	movl	%a1,%sp@(FR_SP)		|    the users SP
1007Lrei2:
1008	clrl	%sp@-			| VA == none
1009	clrl	%sp@-			| code == none
1010	movl	#T_ASTFLT,%sp@-		| type == async system trap
1011	jbsr	_C_LABEL(trap)		| go handle it
1012	lea	%sp@(12),%sp		| pop value args
1013	movl	%sp@(FR_SP),%a0		| restore user SP
1014	movl	%a0,%usp		|   from save area
1015	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
1016	jne	Laststkadj		| yes, go to it
1017	moveml	%sp@+,#0x7FFF		| no, restore most user regs
1018	addql	#8,%sp			| toss SP and stack adjust
1019	rte				| and do real RTE
1020Laststkadj:
1021	lea	%sp@(FR_HW),%a1		| pointer to HW frame
1022	addql	#8,%a1			| source pointer
1023	movl	%a1,%a0			| source
1024	addw	%d0,%a0			|  + hole size = dest pointer
1025	movl	%a1@-,%a0@-		| copy
1026	movl	%a1@-,%a0@-		|  8 bytes
1027	movl	%a0,%sp@(FR_SP)		| new SSP
1028	moveml	%sp@+,#0x7FFF		| restore user registers
1029	movl	%sp@,%sp		| and our SP
1030	rte				| and do real RTE
1031Lchksir:
1032	tstb	_C_LABEL(ssir)		| SIR pending?
1033	jeq	Ldorte			| no, all done
1034	movl	%d0,%sp@-		| need a scratch register
1035	movw	%sp@(4),%d0		| get SR
1036	andw	#PSL_IPL7,%d0		| mask all but IPL
1037	jne	Lnosir			| came from interrupt, no can do
1038	movl	%sp@+,%d0		| restore scratch register
1039Lgotsir:
1040	movw	#SPL1,%sr		| prevent others from servicing int
1041	tstb	_C_LABEL(ssir)		| too late?
1042	jeq	Ldorte			| yes, oh well...
1043	clrl	%sp@-			| stack adjust
1044	moveml	#0xFFFF,%sp@-		| save all registers
1045	movl	%usp,%a1		| including
1046	movl	%a1,%sp@(FR_SP)		|    the users SP
1047Lsir1:
1048	clrl	%sp@-			| VA == none
1049	clrl	%sp@-			| code == none
1050	movl	#T_SSIR,%sp@-		| type == software interrupt
1051	jbsr	_C_LABEL(trap)		| go handle it
1052	lea	%sp@(12),%sp		| pop value args
1053	movl	%sp@(FR_SP),%a0		| restore
1054	movl	%a0,%usp		|   user SP
1055	moveml	%sp@+,#0x7FFF		| and all remaining registers
1056	addql	#8,%sp			| pop SP and stack adjust
1057	rte
1058Lnosir:
1059	movl	%sp@+,%d0		| restore scratch register
1060Ldorte:
1061	rte				| real return
1062
1063/*
1064 * Use common m68k sigcode.
1065 */
1066#include <m68k/m68k/sigcode.s>
1067#ifdef COMPAT_SUNOS
1068#include <m68k/m68k/sunos_sigcode.s>
1069#endif
1070#ifdef COMPAT_SVR4
1071#include <m68k/m68k/svr4_sigcode.s>
1072#endif
1073
1074/*
1075 * Primitives
1076 */
1077
1078/*
1079 * Use common m68k support routines.
1080 */
1081#include <m68k/m68k/support.s>
1082
1083/*
1084 * Use common m68k process manipulation routines.
1085 */
1086#include <m68k/m68k/proc_subr.s>
1087
1088	.data
1089GLOBAL(curpcb)
1090GLOBAL(masterpaddr)		| XXX compatibility (debuggers)
1091	.long	0
1092
1093ASLOCAL(mdpflag)
1094	.byte	0		| copy of proc md_flags low byte
1095	.align	4
1096
1097ASBSS(nullpcb,SIZEOF_PCB)
1098
1099/*
1100 * At exit of a process, do a switch for the last time.
1101 * Switch to a safe stack and PCB, and select a new process to run.  The
1102 * old stack and u-area will be freed by the reaper.
1103 *
1104 * MUST BE CALLED AT SPLHIGH!
1105 */
1106ENTRY(switch_exit)
1107	movl	%sp@(4),%a0
1108	/* save state into garbage pcb */
1109	movl	#_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
1110	lea	_ASM_LABEL(tmpstk),%sp	| goto a tmp stack
1111
1112	/* Schedule the vmspace and stack to be freed. */
1113	movl	%a0,%sp@-		| exit2(p)
1114	jbsr	_C_LABEL(exit2)
1115	lea	%sp@(4),%sp		| pop args
1116
1117#if defined(LOCKDEBUG)
1118	/* Acquire sched_lock */
1119	jbsr	_C_LABEL(sched_lock_idle)
1120#endif
1121
1122	jra	_C_LABEL(cpu_switch)
1123
1124/*
1125 * When no processes are on the runq, Swtch branches to Idle
1126 * to wait for something to come ready.
1127 */
1128ASENTRY_NOPROFILE(Idle)
1129#if defined(LOCKDEBUG)
1130	/* Release sched_lock */
1131	jbsr	_C_LABEL(sched_unlock_idle)
1132#endif
1133	stop	#PSL_LOWIPL
1134	movw	#PSL_HIGHIPL,%sr
1135#if defined(LOCKDEBUG)
1136	/* Acquire sched_lock */
1137	jbsr	_C_LABEL(sched_lock_idle)
1138#endif
1139	movl	_C_LABEL(sched_whichqs),%d0
1140	jeq	_ASM_LABEL(Idle)
1141	jra	Lsw1
1142
1143Lbadsw:
1144	PANIC("switch")
1145	/*NOTREACHED*/
1146
1147/*
1148 * cpu_switch()
1149 *
1150 * NOTE: On the mc68851 (318/319/330) we attempt to avoid flushing the
1151 * entire ATC.  The effort involved in selective flushing may not be
1152 * worth it, maybe we should just flush the whole thing?
1153 *
1154 * NOTE 2: With the new VM layout we now no longer know if an inactive
1155 * user's PTEs have been changed (formerly denoted by the SPTECHG p_flag
1156 * bit).  For now, we just always flush the full ATC.
1157 */
1158ENTRY(cpu_switch)
1159	movl	_C_LABEL(curpcb),%a0	| current pcb
1160	movw	%sr,%a0@(PCB_PS)	| save sr before changing ipl
1161#ifdef notyet
1162	movl	_C_LABEL(curproc),%sp@-	| remember last proc running
1163#endif
1164	clrl	_C_LABEL(curproc)
1165
1166	/*
1167	 * Find the highest-priority queue that isn't empty,
1168	 * then take the first proc from that queue.
1169	 */
1170	movl	_C_LABEL(sched_whichqs),%d0
1171	jeq	_ASM_LABEL(Idle)
1172Lsw1:
1173	/*
1174	 * Interrupts are blocked, sched_lock is held.  If
1175	 * we come here via Idle, %d0 contains the contents
1176	 * of a non-zero sched_whichqs.
1177	 */
1178	movl	%d0,%d1
1179	negl	%d0
1180	andl	%d1,%d0
1181	bfffo	%d0{#0:#32},%d1
1182	eorib	#31,%d1
1183
1184	movl	%d1,%d0
1185	lslb	#3,%d1			| convert queue number to index
1186	addl	#_C_LABEL(sched_qs),%d1	| locate queue (q)
1187	movl	%d1,%a1
1188	movl	%a1@(P_FORW),%a0	| p = q->p_forw
1189	cmpal	%d1,%a0			| anyone on queue?
1190	jeq	Lbadsw			| no, panic
1191#ifdef DIAGNOSTIC
1192	tstl	%a0@(P_WCHAN)
1193	jne	Lbadsw
1194	cmpb	#SRUN,%a0@(P_STAT)
1195	jne	Lbadsw
1196#endif
1197	movl	%a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
1198	movl	%a0@(P_FORW),%a1	| n = p->p_forw
1199	movl	%d1,%a1@(P_BACK)	| n->p_back = q
1200	cmpal	%d1,%a1			| anyone left on queue?
1201	jne	Lsw2			| yes, skip
1202	movl	_C_LABEL(sched_whichqs),%d1
1203	bclr	%d0,%d1			| no, clear bit
1204	movl	%d1,_C_LABEL(sched_whichqs)
1205Lsw2:
1206	/* p->p_cpu initialized in fork1() for single-processor */
1207	movb	#SONPROC,%a0@(P_STAT)	| p->p_stat = SONPROC
1208	movl	%a0,_C_LABEL(curproc)
1209	clrl	_C_LABEL(want_resched)
1210#ifdef notyet
1211	movl	%sp@+,%a1
1212	cmpl	%a0,%a1			| switching to same proc?
1213	jeq	Lswdone			| yes, skip save and restore
1214#endif
1215	/*
1216	 * Save state of previous process in its pcb.
1217	 */
1218	movl	_C_LABEL(curpcb),%a1
1219	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
1220	movl	%usp,%a2		| grab %USP (%a2 has been saved)
1221	movl	%a2,%a1@(PCB_USP)	| and save it
1222
1223	tstl	_C_LABEL(fputype)	| Do we have an FPU?
1224	jeq	Lswnofpsave		| No  Then don't attempt save.
1225	lea	%a1@(PCB_FPCTX),%a2	| pointer to FP save area
1226	fsave	%a2@			| save FP state
1227	tstb	%a2@			| null state frame?
1228	jeq	Lswnofpsave		| yes, all done
1229	fmovem	%fp0-%fp7,%a2@(FPF_REGS) | save FP general registers
1230	fmovem	%fpcr/%fpsr/%fpi,%a2@(FPF_FPCR)	| save FP control registers
1231Lswnofpsave:
1232
1233	clrl	%a0@(P_BACK)		| clear back link
1234	movb	%a0@(P_MD_FLAGS+3),mdpflag | low byte of p_md.md_flags
1235	movl	%a0@(P_ADDR),%a1	| get p_addr
1236	movl	%a1,_C_LABEL(curpcb)
1237
1238#if defined(LOCKDEBUG)
1239	/*
1240	 * Done mucking with the run queues, release the
1241	 * scheduler lock, but keep interrupts out.
1242	 */
1243	movl	%a0,sp@-		| not args...
1244	movl	%a1,sp@-		| ...just saving
1245	jbsr	_C_LABEL(sched_unlock_idle)
1246	movl	sp@+,%a1
1247	movl	sp@+,%a0
1248#endif
1249
1250	/*
1251	 * Activate process's address space.
1252	 * XXX Should remember the last USTP value loaded, and call this
1253	 * XXX only if it has changed.
1254	 */
1255	pea	%a0@			| push proc
1256	jbsr	_C_LABEL(pmap_activate)	| pmap_activate(p)
1257	addql	#4,%sp
1258	movl	_C_LABEL(curpcb),%a1	| restore p_addr
1259
1260	lea	_ASM_LABEL(tmpstk),%sp	| now goto a tmp stack for NMI
1261
1262	moveml	%a1@(PCB_REGS),#0xFCFC	| and registers
1263	movl	%a1@(PCB_USP),%a0
1264	movl	%a0,%usp		| and %USP
1265
1266	tstl	_C_LABEL(fputype)	| If we don't have an FPU,
1267	jeq	Lnofprest		|  don't try to restore it.
1268	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
1269	tstb	%a0@			| null state frame?
1270	jeq	Lresfprest		| yes, easy
1271#if defined(M68040)
1272#if defined(M68020) || defined(M68030)
1273	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1274	jne	Lresnot040		| no, skip
1275#endif
1276	clrl	%sp@-			| yes...
1277	frestore %sp@+			| ...magic!
1278Lresnot040:
1279#endif
1280	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi	| restore FP control registers
1281	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1282Lresfprest:
1283	frestore %a0@			| restore state
1284
1285Lnofprest:
1286	movw	%a1@(PCB_PS),%sr	| no, restore PS
1287	moveq	#1,%d0			| return 1 (for alternate returns)
1288	rts
1289
1290/*
1291 * savectx(pcb)
1292 * Update pcb, saving current processor state.
1293 */
1294ENTRY(savectx)
1295	movl	%sp@(4),%a1
1296	movw	%sr,%a1@(PCB_PS)
1297	movl	%usp,%a0		| grab %USP
1298	movl	%a0,%a1@(PCB_USP)	| and save it
1299	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
1300
1301	tstl	_C_LABEL(fputype)	| Do we have FPU?
1302	jeq	Lsvnofpsave		| No?  Then don't save state.
1303	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
1304	fsave	%a0@			| save FP state
1305	tstb	%a0@			| null state frame?
1306	jeq	Lsvnofpsave		| yes, all done
1307	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1308	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR)	| save FP control registers
1309Lsvnofpsave:
1310	moveq	#0,%d0			| return 0
1311	rts
1312
1313#if defined(M68040)
1314ENTRY(suline)
1315	movl	%sp@(4),%a0		| address to write
1316	movl	_C_LABEL(curpcb),%a1	| current pcb
1317	movl	#Lslerr,%a1@(PCB_ONFAULT) | where to return to on a fault
1318	movl	%sp@(8),%a1		| address of line
1319	movl	%a1@+,%d0		| get lword
1320	movsl	%d0,%a0@+		| put lword
1321	nop				| sync
1322	movl	%a1@+,%d0		| get lword
1323	movsl	%d0,%a0@+		| put lword
1324	nop				| sync
1325	movl	%a1@+,%d0		| get lword
1326	movsl	%d0,%a0@+		| put lword
1327	nop				| sync
1328	movl	%a1@+,%d0		| get lword
1329	movsl	%d0,%a0@+		| put lword
1330	nop				| sync
1331	moveq	#0,%d0			| indicate no fault
1332	jra	Lsldone
1333Lslerr:
1334	moveq	#-1,%d0
1335Lsldone:
1336	movl	_C_LABEL(curpcb),%a1	| current pcb
1337	clrl	%a1@(PCB_ONFAULT) 	| clear fault address
1338	rts
1339#endif
1340
1341#if defined(ENABLE_HP_CODE)
1342ENTRY(ecacheon)
1343	tstl	_C_LABEL(ectype)
1344	jeq	Lnocache7
1345	MMUADDR(%a0)
1346	orl	#MMU_CEN,%a0@(MMUCMD)
1347Lnocache7:
1348	rts
1349
1350ENTRY(ecacheoff)
1351	tstl	_C_LABEL(ectype)
1352	jeq	Lnocache8
1353	MMUADDR(%a0)
1354	andl	#~MMU_CEN,%a0@(MMUCMD)
1355Lnocache8:
1356	rts
1357#endif
1358
1359ENTRY_NOPROFILE(getsfc)
1360	movc	%sfc,%d0
1361	rts
1362
1363ENTRY_NOPROFILE(getdfc)
1364	movc	%dfc,%d0
1365	rts
1366
1367/*
1368 * Load a new user segment table pointer.
1369 */
1370ENTRY(loadustp)
1371#if defined(M68K_MMU_MOTOROLA)
1372	tstl	_C_LABEL(mmutype)	| HP MMU?
1373	jeq	Lhpmmu9			| yes, skip
1374	movl	%sp@(4),%d0		| new USTP
1375	moveq	#PGSHIFT,%d1
1376	lsll	%d1,%d0			| convert to addr
1377#if defined(M68040)
1378	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1379	jne	LmotommuC		| no, skip
1380	.word	0xf518			| yes, pflusha
1381	.long	0x4e7b0806		| movc %d0,urp
1382	rts
1383LmotommuC:
1384#endif
1385	pflusha				| flush entire TLB
1386	lea	_C_LABEL(protorp),%a0	| CRP prototype
1387	movl	%d0,%a0@(4)		| stash USTP
1388	pmove	%a0@,%crp		| load root pointer
1389	movl	#CACHE_CLR,%d0
1390	movc	%d0,%cacr		| invalidate cache(s)
1391	rts
1392Lhpmmu9:
1393#endif
1394#if defined(M68K_MMU_HP)
1395	movl	#CACHE_CLR,%d0
1396	movc	%d0,%cacr		| invalidate cache(s)
1397	MMUADDR(%a0)
1398	movl	%a0@(MMUTBINVAL),%d1	| invalidate TLB
1399	tstl	_C_LABEL(ectype)	| have external VAC?
1400	jle	1f			| no, skip
1401	andl	#~MMU_CEN,%a0@(MMUCMD)	| toggle cache enable
1402	orl	#MMU_CEN,%a0@(MMUCMD)	| to clear data cache
14031:
1404	movl	%sp@(4),%a0@(MMUUSTP)	| load a new USTP
1405#endif
1406	rts
1407
1408ENTRY(ploadw)
1409#if defined(M68K_MMU_MOTOROLA)
1410	movl	%sp@(4),%a0		| address to load
1411#if defined(M68K_MMU_HP)
1412	tstl	_C_LABEL(mmutype)	| HP MMU?
1413	jeq	Lploadwskp		| yes, skip
1414#endif
1415#if defined(M68040)
1416	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1417	jeq	Lploadwskp		| yes, skip
1418#endif
1419	ploadw	#1,%a0@			| pre-load translation
1420Lploadwskp:
1421#endif
1422	rts
1423
1424/*
1425 * Set processor priority level calls.  Most are implemented with
1426 * inline asm expansions.  However, spl0 requires special handling
1427 * as we need to check for our emulated software interrupts.
1428 */
1429
1430ENTRY(spl0)
1431	moveq	#0,%d0
1432	movw	%sr,%d0			| get old SR for return
1433	movw	#PSL_LOWIPL,%sr		| restore new SR
1434	tstb	_C_LABEL(ssir)		| software interrupt pending?
1435	jeq	Lspldone		| no, all done
1436	subql	#4,%sp			| make room for RTE frame
1437	movl	%sp@(4),%sp@(2)		| position return address
1438	clrw	%sp@(6)			| set frame type 0
1439	movw	#PSL_LOWIPL,%sp@	| and new SR
1440	jra	Lgotsir			| go handle it
1441Lspldone:
1442	rts
1443
1444ENTRY(getsr)
1445	moveq	#0,%d0
1446	movw	%sr,%d0
1447	rts
1448
1449/*
1450 * _delay(u_int N)
1451 *
1452 * Delay for at least (N/256) microsecends.
1453 * This routine depends on the variable:  delay_divisor
1454 * which should be set based on the CPU clock rate.
1455 */
1456ENTRY_NOPROFILE(_delay)
1457	| %d0 = arg = (usecs << 8)
1458	movl	%sp@(4),%d0
1459	| %d1 = delay_divisor
1460	movl	_C_LABEL(delay_divisor),%d1
1461	jra	L_delay			/* Jump into the loop! */
1462
1463	/*
1464	 * Align the branch target of the loop to a half-line (8-byte)
1465	 * boundary to minimize cache effects.  This guarantees both
1466	 * that there will be no prefetch stalls due to cache line burst
1467	 * operations and that the loop will run from a single cache
1468	 * half-line.
1469	 */
1470	.align	8
1471L_delay:
1472	subl	%d1,%d0
1473	jgt	L_delay
1474	rts
1475
1476/*
1477 * Save and restore 68881 state.
1478 */
1479ENTRY(m68881_save)
1480	movl	%sp@(4),%a0		| save area pointer
1481	fsave	%a0@			| save state
1482	tstb	%a0@			| null state frame?
1483	jeq	Lm68881sdone		| yes, all done
1484	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1485	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR)	| save FP control registers
1486Lm68881sdone:
1487	rts
1488
1489ENTRY(m68881_restore)
1490	movl	%sp@(4),%a0		| save area pointer
1491	tstb	%a0@			| null state frame?
1492	jeq	Lm68881rdone		| yes, easy
1493	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi	| restore FP control registers
1494	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1495Lm68881rdone:
1496	frestore %a0@			| restore state
1497	rts
1498
1499/*
1500 * Handle the nitty-gritty of rebooting the machine.
1501 * Basically we just turn off the MMU and jump to the appropriate ROM routine.
1502 * Note that we must be running in an address range that is mapped one-to-one
1503 * logical to physical so that the PC is still valid immediately after the MMU
1504 * is turned off.  We have conveniently mapped the last page of physical
1505 * memory this way.
1506 */
1507ENTRY_NOPROFILE(doboot)
1508	movw	#PSL_HIGHIPL,%sr	| no interrupts
1509
1510	movl	#CACHE_OFF,%d0
1511	movc	%d0,%cacr		| clear and disable on-chip cache(s)
1512
1513	| Turn on physical memory mapping.
1514	| @@@ This is also 68040 specific and needs fixing.
1515	movel	#0x0200c040,%d0		| intio devices are at 0x02000000
1516	.long	0x4e7b0004		| movc %d0,%itt0
1517	.long	0x4e7b0006		| movc %d0,%dtt0
1518	movel	#0x0403c000,%d0		| kernel text and data at 0x04000000
1519	.long	0x4e7b0005		| movc %d0,%itt1
1520	.long	0x4e7b0007		| movc %d0,%dtt1
1521
1522	moveal   #NEXT_RAMBASE,%a5	| amount to RELOC by.
1523
1524	| Create a new stack at address tmpstk, and push
1525	| The existing sp onto it for kicks.
1526	ASRELOC(tmpstk, %a0)
1527	movel	%sp,%a0@-
1528	moveal  %a0,%sp
1529	moveal  #0,%a6
1530
1531	ASRELOC(Ldoboot1, %a0)
1532	jmp     %a0@			| jump into physical address space.
1533Ldoboot1:
1534	ASRELOC(save_vbr, %a0)
1535	movl    %a0@,%d0
1536	movc    %d0,%vbr
1537
1538	| reset the registers as the boot rom likes them:
1539	movel	#0x0200c040,%d0		|
1540	.long	0x4e7b0004		| movc %d0,%itt0
1541	.long	0x4e7b0006		| movc %d0,%dtt0
1542	movel	#0x00ffc000,%d0		|
1543	.long	0x4e7b0005		| movc %d0,%itt1
1544	.long	0x4e7b0007		| movc %d0,%dtt1
1545
1546	RELOC(monbootflag, %a0)
1547	movel %a0,%d0			| "-h" halts instead of reboot.
1548	trap #13
1549
1550hloop:
1551	bra hloop			| This shouldn't be reached.
1552/*
1553 * Misc. global variables.
1554 */
1555	.data
1556GLOBAL(machineid)
1557	.long	0xdeadbeef	| default to @@@
1558
1559GLOBAL(mmuid)
1560	.long	0		| default to nothing
1561
1562GLOBAL(mmutype)
1563	.long	0xdeadbeef	| default to 68040 mmu
1564
1565GLOBAL(cputype)
1566	.long	0xdeadbeef	| default to 68020 CPU
1567
1568#if defined(ENABLE_HP_CODE)
1569GLOBAL(ectype)
1570	.long	EC_NONE		| external cache type, default to none
1571#endif
1572
1573GLOBAL(fputype)
1574	.long	0xdeadbeef	| default to 68882 FPU
1575
1576GLOBAL(protorp)
1577	.long	0,0		| prototype root pointer
1578
1579GLOBAL(prototc)
1580	.long	0		| prototype translation control
1581
1582GLOBAL(want_resched)
1583	.long	0
1584
1585GLOBAL(proc0paddr)
1586	.long	0		| KVA of proc0 u-area
1587
1588GLOBAL(intiobase)
1589	.long	INTIOBASE	| KVA of base of internal IO space
1590
1591GLOBAL(intiolimit)
1592	.long	INTIOTOP	| KVA of end of internal IO space
1593
1594GLOBAL(monobase)
1595	.long	MONOBASE	| KVA of base of mono FB
1596
1597GLOBAL(monolimit)
1598	.long	MONOTOP		| KVA of end of mono FB
1599
1600GLOBAL(colorbase)
1601	.long	COLORBASE	| KVA of base of color FB
1602
1603GLOBAL(colorlimit)
1604	.long	COLORTOP	| KVA of end of color FB
1605
1606ASLOCAL(save_vbr)		| VBR from ROM
1607	.long 0xdeadbeef
1608
1609GLOBAL(monbootflag)
1610	.long 0
1611
1612#if defined(ENABLE_HP_CODE)
1613GLOBAL(extiobase)
1614	.long	0		| KVA of base of external IO space
1615
1616GLOBAL(CLKbase)
1617	.long	0		| KVA of base of clock registers
1618
1619GLOBAL(MMUbase)
1620	.long	0		| KVA of base of HP MMU registers
1621
1622GLOBAL(pagezero)
1623	.long	0		| PA of first page of kernel text
1624#endif
1625
1626#ifdef USELEDS
1627ASLOCAL(heartbeat)
1628	.long	0		| clock ticks since last pulse of heartbeat
1629
1630ASLOCAL(beatstatus)
1631	.long	0		| for determining a fast or slow throb
1632#endif
1633
1634#ifdef DEBUG
1635ASGLOBAL(fulltflush)
1636	.long	0
1637
1638ASGLOBAL(fullcflush)
1639	.long	0
1640#endif
1641
1642/* interrupt counters */
1643GLOBAL(intrnames)
1644	.asciz	"spur"
1645	.asciz	"lev1"
1646	.asciz	"lev2"
1647	.asciz	"lev3"
1648	.asciz	"lev4"
1649	.asciz	"lev5"
1650	.asciz	"lev6"
1651	.asciz  "lev7"
1652	.asciz	"nmi"
1653GLOBAL(eintrnames)
1654	.even
1655GLOBAL(intrcnt)
1656	.long	0,0,0,0,0,0,0,0,0
1657GLOBAL(eintrcnt)
1658
1659