xref: /netbsd/sys/arch/next68k/next68k/locore.s (revision c4a72b64)
1/*	$NetBSD: locore.s,v 1.36 2002/09/11 01:46:34 mycroft Exp $	*/
2
3/*
4 * Copyright (c) 1998 Darrin B. Jewell
5 * Copyright (c) 1994, 1995 Gordon W. Ross
6 * Copyright (c) 1988 University of Utah.
7 * Copyright (c) 1980, 1990, 1993
8 *	The Regents of the University of California.  All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * the Systems Programming Group of the University of Utah Computer
12 * Science Department.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 *    must display the following acknowledgement:
24 *	This product includes software developed by the University of
25 *	California, Berkeley and its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 *    may be used to endorse or promote products derived from this software
28 *    without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * from: Utah $Hdr: locore.s 1.66 92/12/22$
43 *
44 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
45 */
46
47/* This is currently amid development by
48 * Darrin Jewell <jewell@mit.edu>  Fri Jan  2 14:36:47 1998
49 * for the next68k port
50 */
51
52#include "opt_compat_netbsd.h"
53#include "opt_compat_svr4.h"
54#include "opt_compat_sunos.h"
55#include "opt_ddb.h"
56#include "opt_fpsp.h"
57#include "opt_kgdb.h"
58#include "opt_lockdebug.h"
59
60#include "assym.h"
61#include <machine/asm.h>
62#include <machine/trap.h>
63
64#if (!defined(M68040))
65#error "M68040 is not defined! (check that the generated assym.h is not empty)"
66#endif
67
68/*
69 * This is for kvm_mkdb, and should be the address of the beginning
70 * of the kernel text segment (not necessarily the same as kernbase).
71 */
72	.text
73GLOBAL(kernel_text)
74
75 /*
76  * Leave page zero empty so it can be unmapped
77  */
78	.space	NBPG
79
80/*
81 * Temporary stack for a variety of purposes.
82 */
83	.data
84GLOBAL(endstack)
85	.space	NBPG
86GLOBAL(bgnstack)
87ASLOCAL(tmpstk)
88
89#include <next68k/next68k/vectors.s>
90
91/*
92 * Macro to relocate a symbol, used before MMU is enabled.
93 * On the NeXT, memory is laid out as in the mach header
94 * so therefore we need to relocate symbols until the MMU
95 * is turned on.
96 */
97#define	_RELOC(var, ar)		\
98	lea	var,ar;		\
99	addl	%a5,ar
100
101#define	RELOC(var, ar)		_RELOC(_C_LABEL(var), ar)
102#define	ASRELOC(var, ar)	_RELOC(_ASM_LABEL(var), ar)
103
104/*
105 * Initialization info as per grossman's boot loader:
106 *
107 * We are called from the boot prom, not the boot loader. We have the
108 * prom's stack initialized for us and we were called like this:
109 * start(mg, mg->mg_console_i, mg->mg_console_o,
110 *       mg->mg_boot_dev, mg->mg_boot_arg, mg->mg_boot_info,
111 *       mg->mg_sid, mg->mg_pagesize, 4, mg->mg_region,
112 *       etheraddr, mg->mg_boot_file);
113 * so we actually only really need the first parameter from the stack.
114 * Exceptions will be handled by the prom until we feel ready to handle
115 * them ourselves.
116 * By the way, we get loaded at our final address i.e. PA==VA for the kernel.
117 */
118 /* I think the PA==VA comment to be a lie, but I have yet to verify it.
119  * Darrin B Jewell <jewell@mit.edu>  Sun Jan 11 01:05:54 1998
120 */
121BSS(lowram,4)
122BSS(esym,4)
123
124ASENTRY_NOPROFILE(start)
125	movw	#PSL_HIGHIPL,%sr	| no interrupts
126	movl	#CACHE_OFF,%d0
127	movc	%d0,%cacr		| clear and disable on-chip cache(s)
128
129	moveal	#NEXT_RAMBASE,%a5	| amount to RELOC by.
130	RELOC(lowram,%a0)		| store base of memory.
131	movl    %a5,%a0@
132
133	| Create a new stack at address tmpstk, and push
134	| The existing sp onto it as an arg for next68k_bootargs.
135	ASRELOC(tmpstk, %a0)
136	movel	%sp,%a0@-
137	moveal  %a0,%sp
138	moveal  #0,%a6
139
140	/* Read the header to get our segment list */
141	RELOC(next68k_bootargs,%a0)
142	jbsr	%a0@			| next68k_bootargs(args)
143	addqw	#4,%sp			| clear arg from stack.
144
145	/*
146	 * All data registers are now free.  All address registers
147	 * except %a5 are free.  %a5 is used by the RELOC() macro on hp300
148	 * and cannot be used until after the MMU is enabled.
149	 */
150
151/* determine our CPU/MMU combo - check for all regardless of kernel config */
152	movl	#0x200,%d0		| data freeze bit
153	movc	%d0,%cacr		|   only exists on 68030
154	movc	%cacr,%d0		| read it back
155	tstl	%d0			| zero?
156	jeq	Lnot68030		| yes, we have 68020/68040
157
158	/*
159	 * 68030 models
160	 */
161
162	RELOC(mmutype, %a0)		| no, we have 68030
163	movl	#MMU_68030,%a0@		| set to reflect 68030 PMMU
164	RELOC(cputype, %a0)
165	movl	#CPU_68030,%a0@		| and 68030 CPU
166	RELOC(machineid, %a0)
167	movl	#30,%a0@		| @@@ useless
168	jra	Lstart1
169
170	/*
171	 * End of 68030 section
172	 */
173
174Lnot68030:
175	bset	#31,%d0			| data cache enable bit
176	movc	%d0,%cacr		|   only exists on 68040
177	movc	%cacr,%d0		| read it back
178	tstl	%d0			| zero?
179	beq	Lis68020		| yes, we have 68020
180	moveq	#0,%d0			| now turn it back off
181	movec	%d0,%cacr		|   before we access any data
182
183	/*
184	 * 68040 models
185	 */
186
187	RELOC(mmutype, %a0)
188	movl	#MMU_68040,%a0@		| with a 68040 MMU
189	RELOC(cputype, %a0)
190	movl	#CPU_68040,%a0@		| and a 68040 CPU
191	RELOC(fputype, %a0)
192	movl	#FPU_68040,%a0@		| ...and FPU
193#if defined(ENABLE_HP_CODE)
194	RELOC(ectype, %a0)
195	movl	#EC_NONE,%a0@		| and no cache (for now XXX)
196#endif
197	RELOC(machineid, %a0)
198	movl	#40,%a0@		| @@@ useless
199	jra	Lstart1
200
201	/*
202	 * End of 68040 section
203	 */
204
205	/*
206	 * 68020 models
207	 * (There are no 68020 models of NeXT, but we'll pretend)
208	 */
209
210Lis68020:
211	RELOC(mmutype, %a0)
212	movl	#MMU_68851,%a0@		| no, we have PMMU
213	RELOC(fputype, %a0)		| all of the 68020 systems
214	movl	#FPU_68881,%a0@		|   have a 68881 FPU
215	RELOC(cputype, %a0)
216	movl	#CPU_68020,%a0@		| and a 68020 CPU
217	RELOC(machineid, %a0)
218	movl	#20,%a0@			| @@@ useless
219	jra	Lstart1
220
221	/*
222	 * End of 68020 section
223	 */
224
225Lstart1:
226	/*
227	 * Now that we know what CPU we have, initialize the address error
228	 * and bus error handlers in the vector table:
229	 *
230	 *	vectab+8	bus error
231	 *	vectab+12	address error
232	 */
233	RELOC(cputype, %a0)
234#if 0
235	/* XXX assembler/linker feature/bug */
236	RELOC(vectab, %a2)
237#else
238	movl	#_C_LABEL(vectab),%a2
239	addl	%a5,%a2
240#endif
241#if defined(M68040)
242	cmpl	#CPU_68040,%a0@		| 68040?
243	jne	1f			| no, skip
244	movl	#_C_LABEL(buserr40),%a2@(8)
245	movl	#_C_LABEL(addrerr4060),%a2@(12)
246	jra	Lstart2
2471:
248#endif
249#if defined(M68020) || defined(M68030)
250	cmpl	#CPU_68040,%a0@		| 68040?
251	jeq	1f			| yes, skip
252	movl	#_C_LABEL(busaddrerr2030),%a2@(8)
253	movl	#_C_LABEL(busaddrerr2030),%a2@(12)
254	jra	Lstart2
2551:
256#endif
257	/* Config botch; no hope. */
258	PANIC("Config botch in locore")
259
260Lstart2:
261/* initialize source/destination control registers for movs */
262	moveq	#FC_USERD,%d0		| user space
263	movc	%d0,%sfc		|   as source
264	movc	%d0,%dfc		|   and destination of transfers
265/* configure kernel and proc0 VA space so we can get going */
266#ifdef DDB
267	RELOC(esym,%a0)			| end of static kernel test/data/syms
268	movl	%a0@,%d5
269	jne	Lstart3
270#endif
271	movl	#_C_LABEL(end),%d5	| end of static kernel text/data
272
273Lstart3:
274	addl	#NBPG-1,%d5
275	andl	#PG_FRAME,%d5		| round to a page
276	movl	%d5,%a4
277	addl	%a5,%a4			| convert to PA
278	pea	%a5@			| firstpa
279	pea	%a4@			| nextpa
280	RELOC(pmap_bootstrap,%a0)
281	jbsr	%a0@			| pmap_bootstrap(firstpa,nextpa)
282	addql	#8,%sp
283
284/*
285 * Prepare to enable MMU.
286 * Since the kernel is not mapped logical == physical we must insure
287 * that when the MMU is turned on, all prefetched addresses (including
288 * the PC) are valid.  In order guarentee that, we use the last physical
289 * page (which is conveniently mapped == VA) and load it up with enough
290 * code to defeat the prefetch, then we execute the jump back to here.
291 *
292 * Is this all really necessary, or am I paranoid??
293 */
294	RELOC(Sysseg, %a0)		| system segment table addr
295	movl	%a0@,%d1		| read value (a KVA)
296	addl	%a5,%d1			| convert to PA
297
298	RELOC(mmutype, %a0)
299#if defined(ENABLE_HP_CODE)
300	tstl	%a0@			| HP MMU?
301	jeq	Lhpmmu2			| yes, skip
302#endif
303	cmpl	#MMU_68040,%a0@		| 68040?
304	jne	Lmotommu1		| no, skip
305	.long	0x4e7b1807		| movc %d1,%srp
306	jra	Lstploaddone
307Lmotommu1:
308	RELOC(protorp, %a0)
309	movl	#0x80000202,%a0@	| nolimit + share global + 4 byte PTEs
310	movl	%d1,%a0@(4)		| + segtable address
311	pmove	%a0@,%srp		| load the supervisor root pointer
312	movl	#0x80000002,%a0@	| reinit upper half for CRP loads
313
314#if defined(ENABLE_HP_CODE)
315	jra	Lstploaddone		| done
316Lhpmmu2:
317	moveq	#PGSHIFT,%d2
318	lsrl	%d2,%d1			| convert to page frame
319	movl	%d1,INTIOBASE+MMUBASE+MMUSSTP | load in sysseg table register
320#endif
321Lstploaddone:
322#if defined(ENABLE_MAXADDR_TRAMPOLINE)
323	lea	MAXADDR,%a2		| PA of last RAM page
324	ASRELOC(Lhighcode, %a1)		| addr of high code
325	ASRELOC(Lehighcode, %a3)	| end addr
326Lcodecopy:
327	movw	%a1@+,%a2@+		| copy a word
328	cmpl	%a3,%a1			| done yet?
329	jcs	Lcodecopy		| no, keep going
330	jmp	MAXADDR			| go for it!
331	/*
332	 * BEGIN MMU TRAMPOLINE.  This section of code is not
333	 * executed in-place.  It's copied to the last page
334	 * of RAM (mapped va == pa) and executed there.
335	 */
336
337Lhighcode:
338#endif /* ENABLE_MAXADDR_TRAMPOLINE */
339
340	/*
341	 * Set up the vector table, and race to get the MMU
342	 * enabled.
343	 */
344
345	movc    %vbr,%d0		| Keep copy of ROM VBR
346	ASRELOC(save_vbr,%a0)
347	movl    %d0,%a0@
348	movl	#_C_LABEL(vectab),%d0	| set Vector Base Register
349	movc	%d0,%vbr
350
351	RELOC(mmutype, %a0)
352#if defined(ENABLE_HP_CODE)
353	tstl	%a0@			| HP MMU?
354	jeq	Lhpmmu3			| yes, skip
355#endif
356	cmpl	#MMU_68040,%a0@		| 68040?
357	jne	Lmotommu2		| no, skip
358#if defined(ENABLE_HP_CODE)
359	movw	#0,INTIOBASE+MMUBASE+MMUCMD+2
360	movw	#MMU_IEN+MMU_CEN+MMU_FPE,INTIOBASE+MMUBASE+MMUCMD+2
361					| enable FPU and caches
362#endif
363
364	| This is a hack to get PA=KVA when turning on MMU
365	| it will only work on 68040's.  We should fix something
366	| to boot 68030's later.
367	movel	#0x0200c040,%d0		| intio devices are at 0x02000000
368	.long	0x4e7b0004		| movc %d0,%itt0
369	.long	0x4e7b0006		| movc %d0,%dtt0
370	movel	#0x0403c000,%d0		| kernel text and data at 0x04000000
371	.long	0x4e7b0005		| movc %d0,%itt1
372	.long	0x4e7b0007		| movc %d0,%dtt1
373
374	.word	0xf4d8			| cinva bc
375	.word	0xf518			| pflusha
376	movl	#0x8000,%d0
377	.long	0x4e7b0003		| movc %d0,tc
378	movl	#0x80008000,%d0
379	movc	%d0,%cacr		| turn on both caches
380
381	jmp     Lturnoffttr:l		| global jump into mapped memory.
382Lturnoffttr:
383	moveq	#0,%d0			| ensure TT regs are disabled
384	.long	0x4e7b0004		| movc %d0,%itt0
385	.long	0x4e7b0006		| movc %d0,%dtt0
386	.long	0x4e7b0005		| movc %d0,%itt1
387	.long	0x4e7b0007		| movc %d0,%dtt1
388	jmp	Lenab1
389Lmotommu2:
390#if defined(ENABLE_HP_CODE)
391	movl	#MMU_IEN+MMU_FPE,INTIOBASE+MMUBASE+MMUCMD
392					| enable 68881 and i-cache
393#endif
394	RELOC(prototc, %a2)
395	movl	#0x82c0aa00,%a2@	| value to load TC with
396	pmove	%a2@,%tc		| load it
397	jmp	Lenab1:l		| force absolute (not pc-relative) jmp
398#if defined(ENABLE_HP_CODE)
399Lhpmmu3:
400	movl	#0,INTIOBASE+MMUBASE+MMUCMD	| clear external cache
401	movl	#MMU_ENAB,INTIOBASE+MMUBASE+MMUCMD | turn on MMU
402	jmp	Lenab1:l			| jmp to mapped code
403#endif
404#if defined(ENABLE_MAXADDR_TRAMPOLINE)
405Lehighcode:
406
407	/*
408	 * END MMU TRAMPOLINE.  Address register %a5 is now free.
409	 */
410#endif
411
412/*
413 * Should be running mapped from this point on
414 */
415Lenab1:
416/* select the software page size now */
417	lea	_ASM_LABEL(tmpstk),%sp	| temporary stack
418	jbsr	_C_LABEL(uvm_setpagesize) | select software page size
419	bsr     Lpushpc			| Push the PC on the stack.
420Lpushpc:
421
422
423/* set kernel stack, user %SP, and initial pcb */
424	movl	_C_LABEL(proc0paddr),%a1 | get proc0 pcb addr
425	lea	%a1@(USPACE-4),%sp	| set kernel stack to end of area
426	lea	_C_LABEL(proc0),%a2	| initialize proc0.p_addr so that
427	movl	%a1,%a2@(P_ADDR)	|   we don't deref NULL in trap()
428	movl	#USRSTACK-4,%a2
429	movl	%a2,%usp		| init user SP
430	movl	%a1,_C_LABEL(curpcb)	| proc0 is running
431
432	tstl	_C_LABEL(fputype)	| Have an FPU?
433	jeq	Lenab2			| No, skip.
434	clrl	%a1@(PCB_FPCTX)		| ensure null FP context
435	movl	%a1,%sp@-
436	jbsr	_C_LABEL(m68881_restore) | restore it (does not kill %a1)
437	addql	#4,%sp
438Lenab2:
439	cmpl	#MMU_68040,_C_LABEL(mmutype)	| 68040?
440	jeq	Ltbia040		| yes, cache already on
441	pflusha
442	movl	#CACHE_ON,%d0
443	movc	%d0,%cacr		| clear cache(s)
444	jra	Lenab3
445Ltbia040:
446	.word	0xf518
447Lenab3:
448
449	jbsr	_C_LABEL(next68k_init)
450
451/* Final setup for call to main(). */
452/*
453 * Create a fake exception frame so that cpu_fork() can copy it.
454 * main() nevers returns; we exit to user mode from a forked process
455 * later on.
456 */
457	clrw	%sp@-			| vector offset/frame type
458	clrl	%sp@-			| PC - filled in by "execve"
459	movw	#PSL_USER,%sp@-		| in user mode
460	clrl	%sp@-			| stack adjust count and padding
461	lea	%sp@(-64),%sp		| construct space for %D0-%D7/%A0-%A7
462	lea	_C_LABEL(proc0),%a0	| save pointer to frame
463	movl	%sp,%a0@(P_MD_REGS)	|   in proc0.p_md.md_regs
464
465	jra	_C_LABEL(main)		| main()
466	PANIC("main() returned")
467	/* NOTREACHED */
468
469/*
470 * proc_trampoline: call function in register %a2 with %a3 as an arg
471 * and then rei.
472 */
473GLOBAL(proc_trampoline)
474	movl	%a3,%sp@-		| push function arg
475	jbsr	%a2@			| call function
476	addql	#4,%sp			| pop arg
477	movl	%sp@(FR_SP),%a0		| grab and load
478	movl	%a0,%usp		|   user SP
479	moveml	%sp@+,#0x7FFF		| restore most user regs
480	addql	#8,%sp			| toss SP and stack adjust
481	jra	_ASM_LABEL(rei)		| and return
482
483
484/*
485 * Trap/interrupt vector routines
486 */
487#include <m68k/m68k/trap_subr.s>
488
489	.data
490GLOBAL(m68k_fault_addr)
491	.long	0
492
493#if defined(M68040) || defined(M68060)
494ENTRY_NOPROFILE(addrerr4060)
495	clrl	%sp@-			| stack adjust count
496	moveml	#0xFFFF,%sp@-		| save user registers
497	movl	%usp,%a0		| save the user SP
498	movl	%a0,%sp@(FR_SP)		|   in the savearea
499	movl	%sp@(FR_HW+8),%sp@-
500	clrl	%sp@-			| dummy code
501	movl	#T_ADDRERR,%sp@-	| mark address error
502	jra	_ASM_LABEL(faultstkadj)	| and deal with it
503#endif
504
505#if defined(M68060)
506ENTRY_NOPROFILE(buserr60)
507	clrl	%sp@-			| stack adjust count
508	moveml	#0xFFFF,%sp@-		| save user registers
509	movl	%usp,%a0		| save the user SP
510	movl	%a0,%sp@(FR_SP)		|   in the savearea
511	movel	%sp@(FR_HW+12),%d0	| FSLW
512	btst	#2,%d0			| branch prediction error?
513	jeq	Lnobpe
514	movc	%cacr,%d2
515	orl	#IC60_CABC,%d2		| clear all branch cache entries
516	movc	%d2,%cacr
517	movl	%d0,%d1
518	addql	#1,L60bpe
519	andl	#0x7ffd,%d1
520	jeq	_ASM_LABEL(faultstkadjnotrap2)
521Lnobpe:
522| we need to adjust for misaligned addresses
523	movl	%sp@(FR_HW+8),%d1	| grab VA
524	btst	#27,%d0			| check for mis-aligned access
525	jeq	Lberr3			| no, skip
526	addl	#28,%d1			| yes, get into next page
527					| operand case: 3,
528					| instruction case: 4+12+12
529	andl	#PG_FRAME,%d1		| and truncate
530Lberr3:
531	movl	%d1,%sp@-
532	movl	%d0,%sp@-		| code is FSLW now.
533	andw	#0x1f80,%d0
534	jeq	Lberr60			| it is a bus error
535	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
536	jra	_ASM_LABEL(faultstkadj)	| and deal with it
537Lberr60:
538	tstl	_C_LABEL(nofault)	| catch bus error?
539	jeq	Lisberr			| no, handle as usual
540	movl	%sp@(FR_HW+8+8),_C_LABEL(m68k_fault_addr) | save fault addr
541	movl	_C_LABEL(nofault),%sp@-	| yes,
542	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
543	/* NOTREACHED */
544#endif
545#if defined(M68040)
546ENTRY_NOPROFILE(buserr40)
547	clrl	%sp@-			| stack adjust count
548	moveml	#0xFFFF,%sp@-		| save user registers
549	movl	%usp,%a0		| save the user SP
550	movl	%a0,%sp@(FR_SP)		|   in the savearea
551	movl	%sp@(FR_HW+20),%d1	| get fault address
552	moveq	#0,%d0
553	movw	%sp@(FR_HW+12),%d0	| get SSW
554	btst	#11,%d0			| check for mis-aligned
555	jeq	Lbe1stpg		| no skip
556	addl	#3,%d1			| get into next page
557	andl	#PG_FRAME,%d1		| and truncate
558Lbe1stpg:
559	movl	%d1,%sp@-		| pass fault address.
560	movl	%d0,%sp@-		| pass SSW as code
561	btst	#10,%d0			| test ATC
562	jeq	Lberr40			| it is a bus error
563	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
564	jra	_ASM_LABEL(faultstkadj)	| and deal with it
565Lberr40:
566	tstl	_C_LABEL(nofault)	| catch bus error?
567	jeq	Lisberr			| no, handle as usual
568	movl	%sp@(FR_HW+8+20),_C_LABEL(m68k_fault_addr) | save fault addr
569	movl	_C_LABEL(nofault),%sp@-	| yes,
570	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
571	/* NOTREACHED */
572#endif
573
574#if defined(M68020) || defined(M68030)
575ENTRY_NOPROFILE(busaddrerr2030)
576	clrl	%sp@-			| stack adjust count
577	moveml	#0xFFFF,%sp@-		| save user registers
578	movl	%usp,%a0		| save the user SP
579	movl	%a0,%sp@(FR_SP)		|   in the savearea
580	moveq	#0,%d0
581	movw	%sp@(FR_HW+10),%d0	| grab SSW for fault processing
582	btst	#12,%d0			| RB set?
583	jeq	LbeX0			| no, test RC
584	bset	#14,%d0			| yes, must set FB
585	movw	%d0,%sp@(FR_HW+10)	| for hardware too
586LbeX0:
587	btst	#13,%d0			| RC set?
588	jeq	LbeX1			| no, skip
589	bset	#15,%d0			| yes, must set FC
590	movw	%d0,%sp@(FR_HW+10)	| for hardware too
591LbeX1:
592	btst	#8,%d0			| data fault?
593	jeq	Lbe0			| no, check for hard cases
594	movl	%sp@(FR_HW+16),%d1	| fault address is as given in frame
595	jra	Lbe10			| thats it
596Lbe0:
597	btst	#4,%sp@(FR_HW+6)	| long (type B) stack frame?
598	jne	Lbe4			| yes, go handle
599	movl	%sp@(FR_HW+2),%d1	| no, can use save PC
600	btst	#14,%d0			| FB set?
601	jeq	Lbe3			| no, try FC
602	addql	#4,%d1			| yes, adjust address
603	jra	Lbe10			| done
604Lbe3:
605	btst	#15,%d0			| FC set?
606	jeq	Lbe10			| no, done
607	addql	#2,%d1			| yes, adjust address
608	jra	Lbe10			| done
609Lbe4:
610	movl	%sp@(FR_HW+36),%d1	| long format, use stage B address
611	btst	#15,%d0			| FC set?
612	jeq	Lbe10			| no, all done
613	subql	#2,%d1			| yes, adjust address
614Lbe10:
615	movl	%d1,%sp@-		| push fault VA
616	movl	%d0,%sp@-		| and padded SSW
617	movw	%sp@(FR_HW+8+6),%d0	| get frame format/vector offset
618	andw	#0x0FFF,%d0		| clear out frame format
619	cmpw	#12,%d0			| address error vector?
620	jeq	Lisaerr			| yes, go to it
621#if defined(M68K_MMU_MOTOROLA)
622#if defined(M68K_MMU_HP)
623	tstl	_C_LABEL(mmutype)	| HP MMU?
624	jeq	Lbehpmmu		| yes, different MMU fault handler
625#endif
626	movl	%d1,%a0			| fault address
627	movl	%sp@,%d0		| function code from ssw
628	btst	#8,%d0			| data fault?
629	jne	Lbe10a
630	movql	#1,%d0			| user program access FC
631					| (we dont separate data/program)
632	btst	#5,%sp@(FR_HW+8)	| supervisor mode?
633	jeq	Lbe10a			| if no, done
634	movql	#5,%d0			| else supervisor program access
635Lbe10a:
636	ptestr	%d0,%a0@,#7		| do a table search
637	pmove	%psr,%sp@		| save result
638	movb	%sp@,%d1
639	btst	#2,%d1			| invalid (incl. limit viol. and berr)?
640	jeq	Lmightnotbemerr		| no -> wp check
641	btst	#7,%d1			| is it MMU table berr?
642	jne	Lisberr1		| yes, needs not be fast.
643#endif /* M68K_MMU_MOTOROLA */
644Lismerr:
645	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
646	jra	_ASM_LABEL(faultstkadj)	| and deal with it
647#if defined(M68K_MMU_MOTOROLA)
648Lmightnotbemerr:
649	btst	#3,%d1			| write protect bit set?
650	jeq	Lisberr1		| no: must be bus error
651	movl	%sp@,%d0			| ssw into low word of %d0
652	andw	#0xc0,%d0		| Write protect is set on page:
653	cmpw	#0x40,%d0		| was it read cycle?
654	jne	Lismerr			| no, was not WPE, must be MMU fault
655	jra	Lisberr1		| real bus err needs not be fast.
656#endif /* M68K_MMU_MOTOROLA */
657#if defined(M68K_MMU_HP)
658Lbehpmmu:
659	MMUADDR(%a0)
660	movl	%a0@(MMUSTAT),%d0	| read MMU status
661	btst	#3,%d0			| MMU fault?
662	jeq	Lisberr1		| no, just a non-MMU bus error
663	andl	#~MMU_FAULT,%a0@(MMUSTAT)| yes, clear fault bits
664	movw	%d0,%sp@		| pass MMU stat in upper half of code
665	jra	Lismerr			| and handle it
666#endif
667Lisaerr:
668	movl	#T_ADDRERR,%sp@-	| mark address error
669	jra	_ASM_LABEL(faultstkadj)	| and deal with it
670Lisberr1:
671	clrw	%sp@			| re-clear pad word
672	tstl	_C_LABEL(nofault)	| catch bus error?
673	jeq	Lisberr			| no, handle as usual
674	movl	%sp@(FR_HW+8+16),_C_LABEL(m68k_fault_addr) | save fault addr
675	movl	_C_LABEL(nofault),%sp@-	| yes,
676	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
677	/* NOTREACHED */
678#endif /* M68020 || M68030 */
679
680Lisberr:				| also used by M68040/60
681	movl	#T_BUSERR,%sp@-		| mark bus error
682	jra	_ASM_LABEL(faultstkadj)	| and deal with it
683
684/*
685 * FP exceptions.
686 */
687ENTRY_NOPROFILE(fpfline)
688#if defined(M68040)
689	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040 FPU?
690	jne	Lfp_unimp		| no, skip FPSP
691	cmpw	#0x202c,%sp@(6)		| format type 2?
692	jne	_C_LABEL(illinst)	| no, not an FP emulation
693Ldofp_unimp:
694#ifdef FPSP
695	jmp	_ASM_LABEL(fpsp_unimp)	| yes, go handle it
696#endif
697Lfp_unimp:
698#endif /* M68040 */
699#ifdef FPU_EMULATE
700	clrl	%sp@-			| stack adjust count
701	moveml	#0xFFFF,%sp@-		| save registers
702	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
703	jra	_ASM_LABEL(fault)	| do it
704#else
705	jra	_C_LABEL(illinst)
706#endif
707
708ENTRY_NOPROFILE(fpunsupp)
709#if defined(M68040)
710	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040 FPU?
711	jne	_C_LABEL(illinst)	| no, treat as illinst
712#ifdef FPSP
713	jmp	_ASM_LABEL(fpsp_unsupp)	| yes, go handle it
714#endif
715Lfp_unsupp:
716#endif /* M68040 */
717#ifdef FPU_EMULATE
718	clrl	%sp@-			| stack adjust count
719	moveml	#0xFFFF,%sp@-		| save registers
720	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
721	jra	_ASM_LABEL(fault)	| do it
722#else
723	jra	_C_LABEL(illinst)
724#endif
725
726/*
727 * Handles all other FP coprocessor exceptions.
728 * Note that since some FP exceptions generate mid-instruction frames
729 * and may cause signal delivery, we need to test for stack adjustment
730 * after the trap call.
731 */
732ENTRY_NOPROFILE(fpfault)
733	clrl	%sp@-		| stack adjust count
734	moveml	#0xFFFF,%sp@-	| save user registers
735	movl	%usp,%a0		| and save
736	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
737	clrl	%sp@-		| no VA arg
738	movl	_C_LABEL(curpcb),%a0 | current pcb
739	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
740	fsave	%a0@		| save state
741#if defined(M68040) || defined(M68060)
742	/* always null state frame on 68040, 68060 */
743	cmpl	#FPU_68040,_C_LABEL(fputype)
744	jle	Lfptnull
745#endif
746	tstb	%a0@		| null state frame?
747	jeq	Lfptnull	| yes, safe
748	clrw	%d0		| no, need to tweak BIU
749	movb	%a0@(1),%d0	| get frame size
750	bset	#3,%a0@(0,%d0:w) | set exc_pend bit of BIU
751Lfptnull:
752	fmovem	%fpsr,%sp@-	| push %fpsr as code argument
753	frestore %a0@		| restore state
754	movl	#T_FPERR,%sp@-	| push type arg
755	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
756
757/*
758 * Other exceptions only cause four and six word stack frame and require
759 * no post-trap stack adjustment.
760 */
761
762ENTRY_NOPROFILE(badtrap)
763	moveml	#0xC0C0,%sp@-		| save scratch regs
764	movw	%sp@(22),%sp@-		| push exception vector info
765	clrw	%sp@-
766	movl	%sp@(22),%sp@-		| and PC
767	jbsr	_C_LABEL(straytrap)	| report
768	addql	#8,%sp			| pop args
769	moveml	%sp@+,#0x0303		| restore regs
770	jra	_ASM_LABEL(rei)		| all done
771
772ENTRY_NOPROFILE(trap0)
773	clrl	%sp@-			| stack adjust count
774	moveml	#0xFFFF,%sp@-		| save user registers
775	movl	%usp,%a0		| save the user SP
776	movl	%a0,%sp@(FR_SP)		|   in the savearea
777	movl	%d0,%sp@-		| push syscall number
778	jbsr	_C_LABEL(syscall)	| handle it
779	addql	#4,%sp			| pop syscall arg
780	tstl	_C_LABEL(astpending)
781	jne	Lrei2
782	tstb	_C_LABEL(ssir)
783	jeq	Ltrap1
784	movw	#SPL1,%sr
785	tstb	_C_LABEL(ssir)
786	jne	Lsir1
787Ltrap1:
788	movl	%sp@(FR_SP),%a0		| grab and restore
789	movl	%a0,%usp		|   user SP
790	moveml	%sp@+,#0x7FFF		| restore most registers
791	addql	#8,%sp			| pop SP and stack adjust
792	rte
793
794/*
795 * Trap 12 is the entry point for the cachectl "syscall" (both HPUX & BSD)
796 *	cachectl(command, addr, length)
797 * command in %d0, addr in %a1, length in %d1
798 */
799ENTRY_NOPROFILE(trap12)
800	movl	_C_LABEL(curproc),%sp@-	| push curproc pointer
801	movl	%d1,%sp@-		| push length
802	movl	%a1,%sp@-		| push addr
803	movl	%d0,%sp@-		| push command
804	jbsr	_C_LABEL(cachectl1)	| do it
805	lea	%sp@(16),%sp		| pop args
806	jra	_ASM_LABEL(rei)		| all done
807
808/*
809 * Trace (single-step) trap.  Kernel-mode is special.
810 * User mode traps are simply passed on to trap().
811 */
812ENTRY_NOPROFILE(trace)
813	clrl	%sp@-			| stack adjust count
814	moveml	#0xFFFF,%sp@-
815	moveq	#T_TRACE,%d0
816
817	| Check PSW and see what happen.
818	|   T=0 S=0	(should not happen)
819	|   T=1 S=0	trace trap from user mode
820	|   T=0 S=1	trace trap on a trap instruction
821	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
822
823	movw	%sp@(FR_HW),%d1		| get PSW
824	notw	%d1			| XXX no support for T0 on 680[234]0
825	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
826	jeq	Lkbrkpt			| yes, kernel breakpoint
827	jra	_ASM_LABEL(fault)	| no, user-mode fault
828
829/*
830 * Trap 15 is used for:
831 *	- GDB breakpoints (in user programs)
832 *	- KGDB breakpoints (in the kernel)
833 *	- trace traps for SUN binaries (not fully supported yet)
834 * User mode traps are simply passed to trap().
835 */
836ENTRY_NOPROFILE(trap15)
837	clrl	%sp@-			| stack adjust count
838	moveml	#0xFFFF,%sp@-
839	moveq	#T_TRAP15,%d0
840	movw	%sp@(FR_HW),%d1		| get PSW
841	andw	#PSL_S,%d1		| from system mode?
842	jne	Lkbrkpt			| yes, kernel breakpoint
843	jra	_ASM_LABEL(fault)	| no, user-mode fault
844
845Lkbrkpt: | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
846	| Save the system %sp rather than the user %sp.
847	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
848	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
849	movl	%a6,%sp@(FR_SP)		|  from before trap
850
851	| If were are not on tmpstk switch to it.
852	| (so debugger can change the stack pointer)
853	movl	%a6,%d1
854	cmpl	#_ASM_LABEL(tmpstk),%d1
855	jls	Lbrkpt2			| already on tmpstk
856	| Copy frame to the temporary stack
857	movl	%sp,%a0			| %a0=src
858	lea	_ASM_LABEL(tmpstk)-96,%a1 | %a1=dst
859	movl	%a1,%sp			| %sp=new frame
860	moveq	#FR_SIZE,%d1
861Lbrkpt1:
862	movl	%a0@+,%a1@+
863	subql	#4,%d1
864	bgt	Lbrkpt1
865
866Lbrkpt2:
867	| Call the trap handler for the kernel debugger.
868	| Do not call trap() to do it, so that we can
869	| set breakpoints in trap() if we want.  We know
870	| the trap type is either T_TRACE or T_BREAKPOINT.
871	| If we have both DDB and KGDB, let KGDB see it first,
872	| because KGDB will just return 0 if not connected.
873	| Save args in %d2, %a2
874	movl	%d0,%d2			| trap type
875	movl	%sp,%a2			| frame ptr
876#ifdef KGDB
877	| Let KGDB handle it (if connected)
878	movl	%a2,%sp@-		| push frame ptr
879	movl	%d2,%sp@-		| push trap type
880	jbsr	_C_LABEL(kgdb_trap)	| handle the trap
881	addql	#8,%sp			| pop args
882	cmpl	#0,%d0			| did kgdb handle it?
883	jne	Lbrkpt3			| yes, done
884#endif
885#ifdef DDB
886	| Let DDB handle it
887	movl	%a2,%sp@-		| push frame ptr
888	movl	%d2,%sp@-		| push trap type
889	jbsr	_C_LABEL(kdb_trap)	| handle the trap
890	addql	#8,%sp			| pop args
891#if 0	/* not needed on hp300 */
892	cmpl	#0,%d0			| did ddb handle it?
893	jne	Lbrkpt3			| yes, done
894#endif
895#endif
896	/* Sun 3 drops into PROM here. */
897Lbrkpt3:
898	| The stack pointer may have been modified, or
899	| data below it modified (by kgdb push call),
900	| so push the hardware frame at the current %sp
901	| before restoring registers and returning.
902
903	movl	%sp@(FR_SP),%a0		| modified %sp
904	lea	%sp@(FR_SIZE),%a1	| end of our frame
905	movl	%a1@-,%a0@-		| copy 2 longs with
906	movl	%a1@-,%a0@-		| ... predecrement
907	movl	%a0,%sp@(FR_SP)		| %sp = h/w frame
908	moveml	%sp@+,#0x7FFF		| restore all but %sp
909	movl	%sp@,%sp		| ... and %sp
910	rte				| all done
911
912/* Use common m68k sigreturn */
913#include <m68k/m68k/sigreturn.s>
914
915/*
916 * Interrupt handlers.
917 *
918 * For auto-vectored interrupts, the CPU provides the
919 * vector 0x18+level.  Note we count spurious interrupts,
920 * but don't do anything else with them.
921 *
922 * intrhand_autovec is the entry point for auto-vectored
923 * interrupts.
924 *
925 * For vectored interrupts, we pull the pc, evec, and exception frame
926 * and pass them to the vectored interrupt dispatcher.  The vectored
927 * interrupt dispatcher will deal with strays.
928 *
929 * intrhand_vectored is the entry point for vectored interrupts.
930 */
931
932#define INTERRUPT_SAVEREG	moveml  #0xC0C0,%sp@-
933#define INTERRUPT_RESTOREREG	moveml  %sp@+,#0x0303
934
935ENTRY_NOPROFILE(spurintr)	/* Level 0 */
936	addql	#1,_C_LABEL(intrcnt)+0
937	addql	#1,_C_LABEL(uvmexp)+UVMEXP_INTRS
938	jra	_ASM_LABEL(rei)
939
940ENTRY_NOPROFILE(intrhand_autovec)	/* Levels 1 through 6 */
941	addql	#1,_C_LABEL(interrupt_depth)
942	INTERRUPT_SAVEREG
943	lea	%sp@(16),%a1		| get pointer to frame
944	movl	%a1,%sp@-
945	jbsr	_C_LABEL(isrdispatch_autovec)	| call dispatcher
946	addql	#4,%sp
947	jbra	Lintrhand_exit
948
949ENTRY_NOPROFILE(lev7intr)	/* level 7: parity errors, reset key */
950	addql	#1,_C_LABEL(intrcnt)+32
951	clrl	%sp@-
952	moveml	#0xFFFF,%sp@-		| save registers
953	movl	%usp,%a0		| and save
954	movl	%a0,%sp@(FR_SP)		|   the user stack pointer
955	jbsr	_C_LABEL(nmihand)	| call handler
956	movl	%sp@(FR_SP),%a0		| restore
957	movl	%a0,%usp			|   user SP
958	moveml	%sp@+,#0x7FFF		| and remaining registers
959	addql	#8,%sp			| pop SP and stack adjust
960	jra	_ASM_LABEL(rei)		| all done
961
962ENTRY_NOPROFILE(intrhand_vectored)
963	addql	#1,_C_LABEL(interrupt_depth)
964	INTERRUPT_SAVEREG
965	lea	%sp@(16),%a1		| get pointer to frame
966	movl	%a1,%sp@-
967	movw	%sr,%d0
968	bfextu	%d0,21,3,%d0		| Get current ipl
969	movl	%d0,%sp@-		| Push it
970	jbsr	_C_LABEL(isrdispatch_vectored)	| call dispatcher
971	addql	#8,%sp
972Lintrhand_exit:
973	INTERRUPT_RESTOREREG
974	subql	#1,_C_LABEL(interrupt_depth)
975
976	/* FALLTHROUGH to rei */
977	jra	_ASM_LABEL(rei)		| all done
978
979#undef INTERRUPT_SAVEREG
980#undef INTERRUPT_RESTOREREG
981
982/*
983 * Emulation of VAX REI instruction.
984 *
985 * This code deals with checking for and servicing ASTs
986 * (profiling, scheduling) and software interrupts (network, softclock).
987 * We check for ASTs first, just like the VAX.  To avoid excess overhead
988 * the T_ASTFLT handling code will also check for software interrupts so we
989 * do not have to do it here.  After identifing that we need an AST we
990 * drop the IPL to allow device interrupts.
991 *
992 * This code is complicated by the fact that sendsig may have been called
993 * necessitating a stack cleanup.
994 */
995
996BSS(ssir,1)
997
998ASENTRY_NOPROFILE(rei)
999	tstl	_C_LABEL(astpending)	| AST pending?
1000	jeq	Lchksir			| no, go check for SIR
1001Lrei1:
1002	btst	#5,%sp@			| yes, are we returning to user mode?
1003	jne	Lchksir			| no, go check for SIR
1004	movw	#PSL_LOWIPL,%sr		| lower SPL
1005	clrl	%sp@-			| stack adjust
1006	moveml	#0xFFFF,%sp@-		| save all registers
1007	movl	%usp,%a1		| including
1008	movl	%a1,%sp@(FR_SP)		|    the users SP
1009Lrei2:
1010	clrl	%sp@-			| VA == none
1011	clrl	%sp@-			| code == none
1012	movl	#T_ASTFLT,%sp@-		| type == async system trap
1013	jbsr	_C_LABEL(trap)		| go handle it
1014	lea	%sp@(12),%sp		| pop value args
1015	movl	%sp@(FR_SP),%a0		| restore user SP
1016	movl	%a0,%usp		|   from save area
1017	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
1018	jne	Laststkadj		| yes, go to it
1019	moveml	%sp@+,#0x7FFF		| no, restore most user regs
1020	addql	#8,%sp			| toss SP and stack adjust
1021	rte				| and do real RTE
1022Laststkadj:
1023	lea	%sp@(FR_HW),%a1		| pointer to HW frame
1024	addql	#8,%a1			| source pointer
1025	movl	%a1,%a0			| source
1026	addw	%d0,%a0			|  + hole size = dest pointer
1027	movl	%a1@-,%a0@-		| copy
1028	movl	%a1@-,%a0@-		|  8 bytes
1029	movl	%a0,%sp@(FR_SP)		| new SSP
1030	moveml	%sp@+,#0x7FFF		| restore user registers
1031	movl	%sp@,%sp		| and our SP
1032	rte				| and do real RTE
1033Lchksir:
1034	tstb	_C_LABEL(ssir)		| SIR pending?
1035	jeq	Ldorte			| no, all done
1036	movl	%d0,%sp@-		| need a scratch register
1037	movw	%sp@(4),%d0		| get SR
1038	andw	#PSL_IPL7,%d0		| mask all but IPL
1039	jne	Lnosir			| came from interrupt, no can do
1040	movl	%sp@+,%d0		| restore scratch register
1041Lgotsir:
1042	movw	#SPL1,%sr		| prevent others from servicing int
1043	tstb	_C_LABEL(ssir)		| too late?
1044	jeq	Ldorte			| yes, oh well...
1045	clrl	%sp@-			| stack adjust
1046	moveml	#0xFFFF,%sp@-		| save all registers
1047	movl	%usp,%a1		| including
1048	movl	%a1,%sp@(FR_SP)		|    the users SP
1049Lsir1:
1050	clrl	%sp@-			| VA == none
1051	clrl	%sp@-			| code == none
1052	movl	#T_SSIR,%sp@-		| type == software interrupt
1053	jbsr	_C_LABEL(trap)		| go handle it
1054	lea	%sp@(12),%sp		| pop value args
1055	movl	%sp@(FR_SP),%a0		| restore
1056	movl	%a0,%usp		|   user SP
1057	moveml	%sp@+,#0x7FFF		| and all remaining registers
1058	addql	#8,%sp			| pop SP and stack adjust
1059	rte
1060Lnosir:
1061	movl	%sp@+,%d0		| restore scratch register
1062Ldorte:
1063	rte				| real return
1064
1065/*
1066 * Use common m68k sigcode.
1067 */
1068#include <m68k/m68k/sigcode.s>
1069#ifdef COMPAT_SUNOS
1070#include <m68k/m68k/sunos_sigcode.s>
1071#endif
1072#ifdef COMPAT_SVR4
1073#include <m68k/m68k/svr4_sigcode.s>
1074#endif
1075
1076/*
1077 * Primitives
1078 */
1079
1080/*
1081 * Use common m68k support routines.
1082 */
1083#include <m68k/m68k/support.s>
1084
1085/*
1086 * Use common m68k process manipulation routines.
1087 */
1088#include <m68k/m68k/proc_subr.s>
1089
1090	.data
1091GLOBAL(curpcb)
1092GLOBAL(masterpaddr)		| XXX compatibility (debuggers)
1093	.long	0
1094
1095ASLOCAL(mdpflag)
1096	.byte	0		| copy of proc md_flags low byte
1097	.align	4
1098
1099ASBSS(nullpcb,SIZEOF_PCB)
1100
1101/*
1102 * At exit of a process, do a switch for the last time.
1103 * Switch to a safe stack and PCB, and select a new process to run.  The
1104 * old stack and u-area will be freed by the reaper.
1105 *
1106 * MUST BE CALLED AT SPLHIGH!
1107 */
1108ENTRY(switch_exit)
1109	movl	%sp@(4),%a0
1110	/* save state into garbage pcb */
1111	movl	#_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
1112	lea	_ASM_LABEL(tmpstk),%sp	| goto a tmp stack
1113
1114	/* Schedule the vmspace and stack to be freed. */
1115	movl	%a0,%sp@-		| exit2(p)
1116	jbsr	_C_LABEL(exit2)
1117	lea	%sp@(4),%sp		| pop args
1118
1119#if defined(LOCKDEBUG)
1120	/* Acquire sched_lock */
1121	jbsr	_C_LABEL(sched_lock_idle)
1122#endif
1123
1124	jra	_C_LABEL(cpu_switch)
1125
1126/*
1127 * When no processes are on the runq, Swtch branches to Idle
1128 * to wait for something to come ready.
1129 */
1130ASENTRY_NOPROFILE(Idle)
1131#if defined(LOCKDEBUG)
1132	/* Release sched_lock */
1133	jbsr	_C_LABEL(sched_unlock_idle)
1134#endif
1135	stop	#PSL_LOWIPL
1136	movw	#PSL_HIGHIPL,%sr
1137#if defined(LOCKDEBUG)
1138	/* Acquire sched_lock */
1139	jbsr	_C_LABEL(sched_lock_idle)
1140#endif
1141	movl	_C_LABEL(sched_whichqs),%d0
1142	jeq	_ASM_LABEL(Idle)
1143	jra	Lsw1
1144
1145Lbadsw:
1146	PANIC("switch")
1147	/*NOTREACHED*/
1148
1149/*
1150 * cpu_switch()
1151 *
1152 * NOTE: On the mc68851 (318/319/330) we attempt to avoid flushing the
1153 * entire ATC.  The effort involved in selective flushing may not be
1154 * worth it, maybe we should just flush the whole thing?
1155 *
1156 * NOTE 2: With the new VM layout we now no longer know if an inactive
1157 * user's PTEs have been changed (formerly denoted by the SPTECHG p_flag
1158 * bit).  For now, we just always flush the full ATC.
1159 */
1160ENTRY(cpu_switch)
1161	movl	_C_LABEL(curpcb),%a0	| current pcb
1162	movw	%sr,%a0@(PCB_PS)	| save sr before changing ipl
1163#ifdef notyet
1164	movl	_C_LABEL(curproc),%sp@-	| remember last proc running
1165#endif
1166	clrl	_C_LABEL(curproc)
1167
1168	/*
1169	 * Find the highest-priority queue that isn't empty,
1170	 * then take the first proc from that queue.
1171	 */
1172	movl	_C_LABEL(sched_whichqs),%d0
1173	jeq	_ASM_LABEL(Idle)
1174Lsw1:
1175	/*
1176	 * Interrupts are blocked, sched_lock is held.  If
1177	 * we come here via Idle, %d0 contains the contents
1178	 * of a non-zero sched_whichqs.
1179	 */
1180	movl	%d0,%d1
1181	negl	%d0
1182	andl	%d1,%d0
1183	bfffo	%d0{#0:#32},%d1
1184	eorib	#31,%d1
1185
1186	movl	%d1,%d0
1187	lslb	#3,%d1			| convert queue number to index
1188	addl	#_C_LABEL(sched_qs),%d1	| locate queue (q)
1189	movl	%d1,%a1
1190	movl	%a1@(P_FORW),%a0	| p = q->p_forw
1191	cmpal	%d1,%a0			| anyone on queue?
1192	jeq	Lbadsw			| no, panic
1193#ifdef DIAGNOSTIC
1194	tstl	%a0@(P_WCHAN)
1195	jne	Lbadsw
1196	cmpb	#SRUN,%a0@(P_STAT)
1197	jne	Lbadsw
1198#endif
1199	movl	%a0@(P_FORW),%a1@(P_FORW) | q->p_forw = p->p_forw
1200	movl	%a0@(P_FORW),%a1	| n = p->p_forw
1201	movl	%d1,%a1@(P_BACK)	| n->p_back = q
1202	cmpal	%d1,%a1			| anyone left on queue?
1203	jne	Lsw2			| yes, skip
1204	movl	_C_LABEL(sched_whichqs),%d1
1205	bclr	%d0,%d1			| no, clear bit
1206	movl	%d1,_C_LABEL(sched_whichqs)
1207Lsw2:
1208	/* p->p_cpu initialized in fork1() for single-processor */
1209	movb	#SONPROC,%a0@(P_STAT)	| p->p_stat = SONPROC
1210	movl	%a0,_C_LABEL(curproc)
1211	clrl	_C_LABEL(want_resched)
1212#ifdef notyet
1213	movl	%sp@+,%a1
1214	cmpl	%a0,%a1			| switching to same proc?
1215	jeq	Lswdone			| yes, skip save and restore
1216#endif
1217	/*
1218	 * Save state of previous process in its pcb.
1219	 */
1220	movl	_C_LABEL(curpcb),%a1
1221	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
1222	movl	%usp,%a2		| grab %USP (%a2 has been saved)
1223	movl	%a2,%a1@(PCB_USP)	| and save it
1224
1225	tstl	_C_LABEL(fputype)	| Do we have an FPU?
1226	jeq	Lswnofpsave		| No  Then don't attempt save.
1227	lea	%a1@(PCB_FPCTX),%a2	| pointer to FP save area
1228	fsave	%a2@			| save FP state
1229	tstb	%a2@			| null state frame?
1230	jeq	Lswnofpsave		| yes, all done
1231	fmovem	%fp0-%fp7,%a2@(FPF_REGS) | save FP general registers
1232	fmovem	%fpcr/%fpsr/%fpi,%a2@(FPF_FPCR)	| save FP control registers
1233Lswnofpsave:
1234
1235	clrl	%a0@(P_BACK)		| clear back link
1236	movb	%a0@(P_MD_FLAGS+3),mdpflag | low byte of p_md.md_flags
1237	movl	%a0@(P_ADDR),%a1	| get p_addr
1238	movl	%a1,_C_LABEL(curpcb)
1239
1240#if defined(LOCKDEBUG)
1241	/*
1242	 * Done mucking with the run queues, release the
1243	 * scheduler lock, but keep interrupts out.
1244	 */
1245	movl	%a0,sp@-		| not args...
1246	movl	%a1,sp@-		| ...just saving
1247	jbsr	_C_LABEL(sched_unlock_idle)
1248	movl	sp@+,%a1
1249	movl	sp@+,%a0
1250#endif
1251
1252	/*
1253	 * Activate process's address space.
1254	 * XXX Should remember the last USTP value loaded, and call this
1255	 * XXX only if it has changed.
1256	 */
1257	pea	%a0@			| push proc
1258	jbsr	_C_LABEL(pmap_activate)	| pmap_activate(p)
1259	addql	#4,%sp
1260	movl	_C_LABEL(curpcb),%a1	| restore p_addr
1261
1262	lea	_ASM_LABEL(tmpstk),%sp	| now goto a tmp stack for NMI
1263
1264	moveml	%a1@(PCB_REGS),#0xFCFC	| and registers
1265	movl	%a1@(PCB_USP),%a0
1266	movl	%a0,%usp		| and %USP
1267
1268	tstl	_C_LABEL(fputype)	| If we don't have an FPU,
1269	jeq	Lnofprest		|  don't try to restore it.
1270	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
1271	tstb	%a0@			| null state frame?
1272	jeq	Lresfprest		| yes, easy
1273#if defined(M68040)
1274#if defined(M68020) || defined(M68030)
1275	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1276	jne	Lresnot040		| no, skip
1277#endif
1278	clrl	%sp@-			| yes...
1279	frestore %sp@+			| ...magic!
1280Lresnot040:
1281#endif
1282	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi	| restore FP control registers
1283	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1284Lresfprest:
1285	frestore %a0@			| restore state
1286
1287Lnofprest:
1288	movw	%a1@(PCB_PS),%sr	| no, restore PS
1289	moveq	#1,%d0			| return 1 (for alternate returns)
1290	rts
1291
1292/*
1293 * savectx(pcb)
1294 * Update pcb, saving current processor state.
1295 */
1296ENTRY(savectx)
1297	movl	%sp@(4),%a1
1298	movw	%sr,%a1@(PCB_PS)
1299	movl	%usp,%a0		| grab %USP
1300	movl	%a0,%a1@(PCB_USP)	| and save it
1301	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
1302
1303	tstl	_C_LABEL(fputype)	| Do we have FPU?
1304	jeq	Lsvnofpsave		| No?  Then don't save state.
1305	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
1306	fsave	%a0@			| save FP state
1307	tstb	%a0@			| null state frame?
1308	jeq	Lsvnofpsave		| yes, all done
1309	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1310	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR)	| save FP control registers
1311Lsvnofpsave:
1312	moveq	#0,%d0			| return 0
1313	rts
1314
1315#if defined(M68040)
1316ENTRY(suline)
1317	movl	%sp@(4),%a0		| address to write
1318	movl	_C_LABEL(curpcb),%a1	| current pcb
1319	movl	#Lslerr,%a1@(PCB_ONFAULT) | where to return to on a fault
1320	movl	%sp@(8),%a1		| address of line
1321	movl	%a1@+,%d0		| get lword
1322	movsl	%d0,%a0@+		| put lword
1323	nop				| sync
1324	movl	%a1@+,%d0		| get lword
1325	movsl	%d0,%a0@+		| put lword
1326	nop				| sync
1327	movl	%a1@+,%d0		| get lword
1328	movsl	%d0,%a0@+		| put lword
1329	nop				| sync
1330	movl	%a1@+,%d0		| get lword
1331	movsl	%d0,%a0@+		| put lword
1332	nop				| sync
1333	moveq	#0,%d0			| indicate no fault
1334	jra	Lsldone
1335Lslerr:
1336	moveq	#-1,%d0
1337Lsldone:
1338	movl	_C_LABEL(curpcb),%a1	| current pcb
1339	clrl	%a1@(PCB_ONFAULT) 	| clear fault address
1340	rts
1341#endif
1342
1343#if defined(ENABLE_HP_CODE)
1344ENTRY(ecacheon)
1345	tstl	_C_LABEL(ectype)
1346	jeq	Lnocache7
1347	MMUADDR(%a0)
1348	orl	#MMU_CEN,%a0@(MMUCMD)
1349Lnocache7:
1350	rts
1351
1352ENTRY(ecacheoff)
1353	tstl	_C_LABEL(ectype)
1354	jeq	Lnocache8
1355	MMUADDR(%a0)
1356	andl	#~MMU_CEN,%a0@(MMUCMD)
1357Lnocache8:
1358	rts
1359#endif
1360
1361ENTRY_NOPROFILE(getsfc)
1362	movc	%sfc,%d0
1363	rts
1364
1365ENTRY_NOPROFILE(getdfc)
1366	movc	%dfc,%d0
1367	rts
1368
1369/*
1370 * Load a new user segment table pointer.
1371 */
1372ENTRY(loadustp)
1373#if defined(M68K_MMU_MOTOROLA)
1374	tstl	_C_LABEL(mmutype)	| HP MMU?
1375	jeq	Lhpmmu9			| yes, skip
1376	movl	%sp@(4),%d0		| new USTP
1377	moveq	#PGSHIFT,%d1
1378	lsll	%d1,%d0			| convert to addr
1379#if defined(M68040)
1380	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1381	jne	LmotommuC		| no, skip
1382	.word	0xf518			| yes, pflusha
1383	.long	0x4e7b0806		| movc %d0,urp
1384	rts
1385LmotommuC:
1386#endif
1387	pflusha				| flush entire TLB
1388	lea	_C_LABEL(protorp),%a0	| CRP prototype
1389	movl	%d0,%a0@(4)		| stash USTP
1390	pmove	%a0@,%crp		| load root pointer
1391	movl	#CACHE_CLR,%d0
1392	movc	%d0,%cacr		| invalidate cache(s)
1393	rts
1394Lhpmmu9:
1395#endif
1396#if defined(M68K_MMU_HP)
1397	movl	#CACHE_CLR,%d0
1398	movc	%d0,%cacr		| invalidate cache(s)
1399	MMUADDR(%a0)
1400	movl	%a0@(MMUTBINVAL),%d1	| invalidate TLB
1401	tstl	_C_LABEL(ectype)	| have external VAC?
1402	jle	1f			| no, skip
1403	andl	#~MMU_CEN,%a0@(MMUCMD)	| toggle cache enable
1404	orl	#MMU_CEN,%a0@(MMUCMD)	| to clear data cache
14051:
1406	movl	%sp@(4),%a0@(MMUUSTP)	| load a new USTP
1407#endif
1408	rts
1409
1410ENTRY(ploadw)
1411#if defined(M68K_MMU_MOTOROLA)
1412	movl	%sp@(4),%a0		| address to load
1413#if defined(M68K_MMU_HP)
1414	tstl	_C_LABEL(mmutype)	| HP MMU?
1415	jeq	Lploadwskp		| yes, skip
1416#endif
1417#if defined(M68040)
1418	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1419	jeq	Lploadwskp		| yes, skip
1420#endif
1421	ploadw	#1,%a0@			| pre-load translation
1422Lploadwskp:
1423#endif
1424	rts
1425
1426/*
1427 * Set processor priority level calls.  Most are implemented with
1428 * inline asm expansions.  However, spl0 requires special handling
1429 * as we need to check for our emulated software interrupts.
1430 */
1431
1432ENTRY(spl0)
1433	moveq	#0,%d0
1434	movw	%sr,%d0			| get old SR for return
1435	movw	#PSL_LOWIPL,%sr		| restore new SR
1436	tstb	_C_LABEL(ssir)		| software interrupt pending?
1437	jeq	Lspldone		| no, all done
1438	subql	#4,%sp			| make room for RTE frame
1439	movl	%sp@(4),%sp@(2)		| position return address
1440	clrw	%sp@(6)			| set frame type 0
1441	movw	#PSL_LOWIPL,%sp@	| and new SR
1442	jra	Lgotsir			| go handle it
1443Lspldone:
1444	rts
1445
1446ENTRY(getsr)
1447	moveq	#0,%d0
1448	movw	%sr,%d0
1449	rts
1450
1451/*
1452 * _delay(u_int N)
1453 *
1454 * Delay for at least (N/256) microsecends.
1455 * This routine depends on the variable:  delay_divisor
1456 * which should be set based on the CPU clock rate.
1457 */
1458ENTRY_NOPROFILE(_delay)
1459	| %d0 = arg = (usecs << 8)
1460	movl	%sp@(4),%d0
1461	| %d1 = delay_divisor
1462	movl	_C_LABEL(delay_divisor),%d1
1463	jra	L_delay			/* Jump into the loop! */
1464
1465	/*
1466	 * Align the branch target of the loop to a half-line (8-byte)
1467	 * boundary to minimize cache effects.  This guarantees both
1468	 * that there will be no prefetch stalls due to cache line burst
1469	 * operations and that the loop will run from a single cache
1470	 * half-line.
1471	 */
1472	.align	8
1473L_delay:
1474	subl	%d1,%d0
1475	jgt	L_delay
1476	rts
1477
1478/*
1479 * Save and restore 68881 state.
1480 */
1481ENTRY(m68881_save)
1482	movl	%sp@(4),%a0		| save area pointer
1483	fsave	%a0@			| save state
1484	tstb	%a0@			| null state frame?
1485	jeq	Lm68881sdone		| yes, all done
1486	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1487	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR)	| save FP control registers
1488Lm68881sdone:
1489	rts
1490
1491ENTRY(m68881_restore)
1492	movl	%sp@(4),%a0		| save area pointer
1493	tstb	%a0@			| null state frame?
1494	jeq	Lm68881rdone		| yes, easy
1495	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi	| restore FP control registers
1496	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1497Lm68881rdone:
1498	frestore %a0@			| restore state
1499	rts
1500
1501/*
1502 * Handle the nitty-gritty of rebooting the machine.
1503 * Basically we just turn off the MMU and jump to the appropriate ROM routine.
1504 * Note that we must be running in an address range that is mapped one-to-one
1505 * logical to physical so that the PC is still valid immediately after the MMU
1506 * is turned off.  We have conveniently mapped the last page of physical
1507 * memory this way.
1508 */
1509ENTRY_NOPROFILE(doboot)
1510	movw	#PSL_HIGHIPL,%sr	| no interrupts
1511
1512	movl	#CACHE_OFF,%d0
1513	movc	%d0,%cacr		| clear and disable on-chip cache(s)
1514
1515	| Turn on physical memory mapping.
1516	| @@@ This is also 68040 specific and needs fixing.
1517	movel	#0x0200c040,%d0		| intio devices are at 0x02000000
1518	.long	0x4e7b0004		| movc %d0,%itt0
1519	.long	0x4e7b0006		| movc %d0,%dtt0
1520	movel	#0x0403c000,%d0		| kernel text and data at 0x04000000
1521	.long	0x4e7b0005		| movc %d0,%itt1
1522	.long	0x4e7b0007		| movc %d0,%dtt1
1523
1524	moveal   #NEXT_RAMBASE,%a5	| amount to RELOC by.
1525
1526	| Create a new stack at address tmpstk, and push
1527	| The existing sp onto it for kicks.
1528	ASRELOC(tmpstk, %a0)
1529	movel	%sp,%a0@-
1530	moveal  %a0,%sp
1531	moveal  #0,%a6
1532
1533	ASRELOC(Ldoboot1, %a0)
1534	jmp     %a0@			| jump into physical address space.
1535Ldoboot1:
1536	ASRELOC(save_vbr, %a0)
1537	movl    %a0@,%d0
1538	movc    %d0,%vbr
1539
1540	| reset the registers as the boot rom likes them:
1541	movel	#0x0200c040,%d0		|
1542	.long	0x4e7b0004		| movc %d0,%itt0
1543	.long	0x4e7b0006		| movc %d0,%dtt0
1544	movel	#0x00ffc000,%d0		|
1545	.long	0x4e7b0005		| movc %d0,%itt1
1546	.long	0x4e7b0007		| movc %d0,%dtt1
1547
1548	RELOC(monbootflag, %a0)
1549	movel %a0,%d0			| "-h" halts instead of reboot.
1550	trap #13
1551
1552hloop:
1553	bra hloop			| This shouldn't be reached.
1554/*
1555 * Misc. global variables.
1556 */
1557	.data
1558GLOBAL(machineid)
1559	.long	0xdeadbeef	| default to @@@
1560
1561GLOBAL(mmuid)
1562	.long	0		| default to nothing
1563
1564GLOBAL(mmutype)
1565	.long	0xdeadbeef	| default to 68040 mmu
1566
1567GLOBAL(cputype)
1568	.long	0xdeadbeef	| default to 68020 CPU
1569
1570#if defined(ENABLE_HP_CODE)
1571GLOBAL(ectype)
1572	.long	EC_NONE		| external cache type, default to none
1573#endif
1574
1575GLOBAL(fputype)
1576	.long	0xdeadbeef	| default to 68882 FPU
1577
1578GLOBAL(protorp)
1579	.long	0,0		| prototype root pointer
1580
1581GLOBAL(prototc)
1582	.long	0		| prototype translation control
1583
1584GLOBAL(want_resched)
1585	.long	0
1586
1587GLOBAL(proc0paddr)
1588	.long	0		| KVA of proc0 u-area
1589
1590GLOBAL(intiobase)
1591	.long	INTIOBASE	| KVA of base of internal IO space
1592
1593GLOBAL(intiolimit)
1594	.long	INTIOTOP	| KVA of end of internal IO space
1595
1596GLOBAL(monobase)
1597	.long	MONOBASE	| KVA of base of mono FB
1598
1599GLOBAL(monolimit)
1600	.long	MONOTOP		| KVA of end of mono FB
1601
1602GLOBAL(colorbase)
1603	.long	COLORBASE	| KVA of base of color FB
1604
1605GLOBAL(colorlimit)
1606	.long	COLORTOP	| KVA of end of color FB
1607
1608ASLOCAL(save_vbr)		| VBR from ROM
1609	.long 0xdeadbeef
1610
1611GLOBAL(monbootflag)
1612	.long 0
1613
1614#if defined(ENABLE_HP_CODE)
1615GLOBAL(extiobase)
1616	.long	0		| KVA of base of external IO space
1617
1618GLOBAL(CLKbase)
1619	.long	0		| KVA of base of clock registers
1620
1621GLOBAL(MMUbase)
1622	.long	0		| KVA of base of HP MMU registers
1623
1624GLOBAL(pagezero)
1625	.long	0		| PA of first page of kernel text
1626#endif
1627
1628#ifdef USELEDS
1629ASLOCAL(heartbeat)
1630	.long	0		| clock ticks since last pulse of heartbeat
1631
1632ASLOCAL(beatstatus)
1633	.long	0		| for determining a fast or slow throb
1634#endif
1635
1636#ifdef DEBUG
1637ASGLOBAL(fulltflush)
1638	.long	0
1639
1640ASGLOBAL(fullcflush)
1641	.long	0
1642#endif
1643
1644/* interrupt counters */
1645GLOBAL(intrnames)
1646	.asciz	"spur"
1647	.asciz	"lev1"
1648	.asciz	"lev2"
1649	.asciz	"lev3"
1650	.asciz	"lev4"
1651	.asciz	"lev5"
1652	.asciz	"lev6"
1653	.asciz  "lev7"
1654	.asciz	"nmi"
1655	.asciz	"statclock"
1656GLOBAL(eintrnames)
1657	.even
1658GLOBAL(intrcnt)
1659	.long	0,0,0,0,0,0,0,0,0,0
1660GLOBAL(eintrcnt)
1661
1662