xref: /netbsd/sys/arch/sun2/sun2/locore.s (revision bf9ec67e)
1/*	$NetBSD: locore.s,v 1.11 2001/12/07 05:24:56 fredette Exp $	*/
2
3/*
4 * Copyright (c) 2001 Matthew Fredette
5 * Copyright (c) 1994, 1995 Gordon W. Ross
6 * Copyright (c) 1993 Adam Glass
7 * Copyright (c) 1988 University of Utah.
8 * Copyright (c) 1980, 1990, 1993
9 *	The Regents of the University of California.  All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 *    notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 *    notice, this list of conditions and the following disclaimer in the
22 *    documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 *    must display the following acknowledgement:
25 *	This product includes software developed by the University of
26 *	California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 *    may be used to endorse or promote products derived from this software
29 *    without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 *	from: Utah $Hdr: locore.s 1.66 92/12/22$
44 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
45 */
46
47#include "opt_compat_netbsd.h"
48#include "opt_compat_svr4.h"
49#include "opt_compat_sunos.h"
50#include "opt_kgdb.h"
51#include "opt_lockdebug.h"
52
53#include "assym.h"
54#include <machine/asm.h>
55#include <machine/trap.h>
56
57| Remember this is a fun project!
58
59| This is for kvm_mkdb, and should be the address of the beginning
60| of the kernel text segment (not necessarily the same as kernbase).
61	.text
62GLOBAL(kernel_text)
63
64| This is the entry point, as well as the end of the temporary stack
65| used during process switch (two 2K pages ending at start)
66ASGLOBAL(tmpstk)
67ASGLOBAL(start)
68
69| As opposed to the sun3, on the sun2 the kernel is linked low.  The
70| boot loader loads us exactly where we are linked, so we don't have
71| to worry about writing position independent code or moving the
72| kernel around.
73	movw	#PSL_HIGHIPL,%sr	| no interrupts
74	moveq	#FC_CONTROL,%d0		| make movs access "control"
75	movc	%d0,%sfc		| space where the sun2 designers
76	movc	%d0,%dfc		| put all the "useful" stuff
77
78| Set context zero and stay there until pmap_bootstrap.
79	moveq	#0,%d0
80	movsb	%d0,CONTEXT_REG
81	movsb	%d0,SCONTEXT_REG
82
83| Jump around the g0 and g4 entry points.
84	jra	L_high_code
85
86| These entry points are here in pretty low memory, so that they
87| can be reached from virtual address zero using the classic,
88| old-school "g0" and "g4" commands from the monitor.  (I.e.,
89| they need to be reachable using 16-bit displacements from PCs
90| 0 and 4).
91ENTRY(g0_entry)
92	jra	_C_LABEL(g0_handler)
93ENTRY(g4_entry)
94	jra	_C_LABEL(g4_handler)
95
96L_high_code:
97| We are now running in the correctly relocated kernel, so
98| we are no longer restricted to position-independent code.
99
100| Disable interrupts, and initialize the soft copy of the
101| enable register.
102	movsw	SYSTEM_ENAB, %d0	| read the enable register
103	moveq	#ENA_INTS, %d1
104	notw	%d1
105	andw	%d1, %d0
106	movsw	%d0, SYSTEM_ENAB	| disable all interrupts
107	movw	%d0, _C_LABEL(enable_reg_soft)
108
109| Do bootstrap stuff needed before main() gets called.
110| Make sure the initial frame pointer is zero so that
111| the backtrace algorithm used by KGDB terminates nicely.
112	lea	_ASM_LABEL(tmpstk),%sp
113	movl	#0,%a6
114	jsr	_C_LABEL(_bootstrap)	| See locore2.c
115
116| Now that _bootstrap() is done using the PROM functions,
117| we can safely set the %sfc/dfc to something != FC_CONTROL
118	moveq	#FC_USERD,%d0		| make movs access "user data"
119	movc	%d0,%sfc		| space for copyin/copyout
120	movc	%d0,%dfc
121
122| Setup process zero user/kernel stacks.
123	movl	_C_LABEL(proc0paddr),%a1 | get proc0 pcb addr
124	lea	%a1@(USPACE-4),%sp	| set SSP to last word
125	movl	#USRSTACK-4,%a2
126	movl	%a2,%usp		| init user SP
127
128| Note curpcb was already set in _bootstrap().
129| Will do fpu initialization during autoconfig (see fpu.c)
130| The interrupt vector table and stack are now ready.
131| Interrupts will be enabled later, AFTER  autoconfiguration
132| is finished, to avoid spurrious interrupts.
133
134/*
135 * Final preparation for calling main.
136 *
137 * Create a fake exception frame that returns to user mode,
138 * and save its address in p->p_md.md_regs for cpu_fork().
139 * The new frames for process 1 and 2 will be adjusted by
140 * cpu_set_kpc() to arrange for a call to a kernel function
141 * before the new process does its rte out to user mode.
142 */
143	clrw	%sp@-			| tf_format,tf_vector
144	clrl	%sp@-			| tf_pc (filled in later)
145	movw	#PSL_USER,%sp@-		| tf_sr for user mode
146	clrl	%sp@-			| tf_stackadj
147	lea	%sp@(-64),%sp		| tf_regs[16]
148	movl	%sp,%a1			| %a1=trapframe
149	lea	_C_LABEL(proc0),%a0	| proc0.p_md.md_regs =
150	movl	%a1,%a0@(P_MDREGS)	|   trapframe
151	movl	%a2,%a1@(FR_SP)		| a2 == usp (from above)
152	pea	%a1@			| push &trapframe
153	jbsr	_C_LABEL(main)		| main(&trapframe)
154	addql	#4,%sp			| help DDB backtrace
155	trap	#15			| should not get here
156
157| This is used by cpu_fork() to return to user mode.
158| It is called with SP pointing to a struct trapframe.
159GLOBAL(proc_do_uret)
160	movl	%sp@(FR_SP),%a0		| grab and load
161	movl	%a0,%usp		|   user SP
162	moveml	%sp@+,#0x7FFF		| load most registers (all but SSP)
163	addql	#8,%sp			| pop SSP and stack adjust count
164	rte
165
166/*
167 * proc_trampoline:
168 * This is used by cpu_set_kpc() to "push" a function call onto the
169 * kernel stack of some process, very much like a signal delivery.
170 * When we get here, the stack has:
171 *
172 * SP+8:	switchframe from before cpu_set_kpc
173 * SP+4:	void *arg;
174 * SP:  	u_long func;
175 *
176 * On entry, the switchframe pushed by cpu_set_kpc has already been
177 * popped off the stack, so all this needs to do is pop the function
178 * pointer into a register, call it, then pop the arg, and finally
179 * return using the switchframe that remains on the stack.
180 */
181GLOBAL(proc_trampoline)
182	movl	%sp@+,%a0		| function pointer
183	jbsr	%a0@			| (*func)(arg)
184	addql	#4,%sp			| toss the arg
185	rts				| as cpu_switch would do
186
187| That is all the assembly startup code we need on the sun3!
188| The rest of this is like the hp300/locore.s where possible.
189
190/*
191 * Trap/interrupt vector routines
192 */
193#include <m68k/m68k/trap_subr.s>
194
195GLOBAL(buserr)
196	tstl	_C_LABEL(nofault)	| device probe?
197	jeq	_C_LABEL(addrerr)	| no, handle as usual
198	movl	_C_LABEL(nofault),%sp@-	| yes,
199	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
200GLOBAL(addrerr)
201	clrl	%sp@-			| stack adjust count
202	moveml	#0xFFFF,%sp@-		| save user registers
203	movl	%usp,%a0		| save the user SP
204	movl	%a0,%sp@(FR_SP)		|   in the savearea
205	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
206	moveq	#0,%d0
207	movw	%a1@(8),%d0		| grab SSW for fault processing
208	movl	%a1@(10),%d1		| fault address is as given in frame
209	movl	%d1,%sp@-		| push fault VA
210	movl	%d0,%sp@-		| and padded SSW
211	movw	%a1@(6),%d0		| get frame format/vector offset
212	andw	#0x0FFF,%d0		| clear out frame format
213	cmpw	#12,%d0			| address error vector?
214	jeq	Lisaerr			| yes, go to it
215
216/*
217 * the sun2 specific code
218 *
219 * our mission: figure out whether what we are looking at is
220 *              bus error in the UNIX sense, or
221 *	        a memory error i.e a page fault
222 *
223 * [this code replaces similarly mmu specific code in the hp300 code]
224 */
225sun2_mmu_specific:
226	clrl %d0			| make sure top bits are cleard too
227	movl %d1,%sp@-			| save %d1
228	movc %sfc,%d1			| save %sfc to %d1
229	moveq #FC_CONTROL,%d0		| %sfc = FC_CONTROL
230	movc %d0,%sfc
231	movsw BUSERR_REG,%d0		| get value of bus error register
232	movc %d1,%sfc			| restore %sfc
233	movl %sp@+,%d1			| restore %d1
234#ifdef	DEBUG
235	movw %d0, _C_LABEL(buserr_reg)	| save bus error register value
236#endif
237	andb #BUSERR_PROTERR, %d0 	| is this an MMU (protection *or* page unavailable) fault?
238	jeq Lisberr			| non-MMU bus error
239/* End of sun2 specific code. */
240
241Lismerr:
242	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
243	jra	_ASM_LABEL(faultstkadj)	| and deal with it
244Lisaerr:
245	movl	#T_ADDRERR,%sp@-	| mark address error
246	jra	_ASM_LABEL(faultstkadj)	| and deal with it
247Lisberr:
248	movl	#T_BUSERR,%sp@-		| mark bus error
249	jra	_ASM_LABEL(faultstkadj)	| and deal with it
250
251/*
252 * FP exceptions.
253 */
254GLOBAL(fpfline)
255	clrl	%sp@-			| stack adjust count
256	moveml	#0xFFFF,%sp@-		| save registers
257	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
258	jra	_ASM_LABEL(fault)	| do it
259
260GLOBAL(fpunsupp)
261	clrl	%sp@-			| stack adjust count
262	moveml	#0xFFFF,%sp@-		| save registers
263	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
264	jra	_ASM_LABEL(fault)	| do it
265
266| Message for fpfault panic
267Lfp0:
268	.asciz	"fpfault"
269	.even
270
271/*
272 * Handles all other FP coprocessor exceptions.
273 * Since we can never have an FP coprocessor, this just panics.
274 */
275GLOBAL(fpfault)
276	movl	#Lfp0,%sp@-
277	jbsr	_C_LABEL(panic)
278	/*NOTREACHED*/
279
280/*
281 * Other exceptions only cause four and six word stack frame and require
282 * no post-trap stack adjustment.
283 */
284GLOBAL(badtrap)
285	clrl	%sp@-			| stack adjust count
286	moveml	#0xFFFF,%sp@-		| save std frame regs
287	jbsr	_C_LABEL(straytrap)	| report
288	moveml	%sp@+,#0xFFFF		| restore regs
289	addql	#4,%sp			| stack adjust count
290	jra	_ASM_LABEL(rei)		| all done
291
292/*
293 * Trap 0 is for system calls
294 */
295GLOBAL(trap0)
296	clrl	%sp@-			| stack adjust count
297	moveml	#0xFFFF,%sp@-		| save user registers
298	movl	%usp,%a0		| save the user SP
299	movl	%a0,%sp@(FR_SP)		|   in the savearea
300	movl	%d0,%sp@-		| push syscall number
301	jbsr	_C_LABEL(syscall)	| handle it
302	addql	#4,%sp			| pop syscall arg
303	movl	%sp@(FR_SP),%a0		| grab and restore
304	movl	%a0,%usp		|   user SP
305	moveml	%sp@+,#0x7FFF		| restore most registers
306	addql	#8,%sp			| pop SP and stack adjust
307	jra	_ASM_LABEL(rei)		| all done
308
309/*
310 * Trap 12 is the entry point for the cachectl "syscall"
311 *	cachectl(command, addr, length)
312 * command in %d0, addr in %a1, length in %d1
313 */
314GLOBAL(trap12)
315	jra	_ASM_LABEL(rei)		| all done
316
317/*
318 * Trace (single-step) trap.  Kernel-mode is special.
319 * User mode traps are simply passed on to trap().
320 */
321GLOBAL(trace)
322	clrl	%sp@-			| stack adjust count
323	moveml	#0xFFFF,%sp@-
324	moveq	#T_TRACE,%d0
325
326	| Check PSW and see what happen.
327	|   T=0 S=0	(should not happen)
328	|   T=1 S=0	trace trap from user mode
329	|   T=0 S=1	trace trap on a trap instruction
330	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
331
332	movw	%sp@(FR_HW),%d1		| get PSW
333	notw	%d1			| XXX no support for T0 on 680[234]0
334	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
335	jeq	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
336	jra	_ASM_LABEL(fault)	| no, user-mode fault
337
338/*
339 * Trap 15 is used for:
340 *	- GDB breakpoints (in user programs)
341 *	- KGDB breakpoints (in the kernel)
342 *	- trace traps for SUN binaries (not fully supported yet)
343 * User mode traps are simply passed to trap().
344 */
345GLOBAL(trap15)
346	clrl	%sp@-			| stack adjust count
347	moveml	#0xFFFF,%sp@-
348	moveq	#T_TRAP15,%d0
349	btst	#5,%sp@(FR_HW)		| was supervisor mode?
350	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
351	jra	_ASM_LABEL(fault)	| no, user-mode fault
352
353ASLOCAL(kbrkpt)
354	| Kernel-mode breakpoint or trace trap. (%d0=trap_type)
355	| Save the system sp rather than the user sp.
356	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
357	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
358	movl	%a6,%sp@(FR_SP)		|  from before trap
359
360	| If we are not on tmpstk switch to it.
361	| (so debugger can change the stack pointer)
362	movl	%a6,%d1
363	cmpl	#_ASM_LABEL(tmpstk),%d1
364	jls	Lbrkpt2 		| already on tmpstk
365	| Copy frame to the temporary stack
366	movl	%sp,%a0			| %a0=src
367	lea	_ASM_LABEL(tmpstk)-96,%a1	| %a1=dst
368	movl	%a1,%sp			| sp=new frame
369	moveq	#FR_SIZE,%d1
370Lbrkpt1:
371	movl	%a0@+,%a1@+
372	subql	#4,%d1
373	bgt	Lbrkpt1
374
375Lbrkpt2:
376	| Call the trap handler for the kernel debugger.
377	| Do not call trap() to handle it, so that we can
378	| set breakpoints in trap() if we want.  We know
379	| the trap type is either T_TRACE or T_BREAKPOINT.
380	movl	%d0,%sp@-		| push trap type
381	jbsr	_C_LABEL(trap_kdebug)
382	addql	#4,%sp			| pop args
383
384	| The stack pointer may have been modified, or
385	| data below it modified (by kgdb push call),
386	| so push the hardware frame at the current sp
387	| before restoring registers and returning.
388	movl	%sp@(FR_SP),%a0		| modified sp
389	lea	%sp@(FR_SIZE),%a1	| end of our frame
390	movl	%a1@-,%a0@-		| copy 2 longs with
391	movl	%a1@-,%a0@-		| ... predecrement
392	movl	%a0,%sp@(FR_SP)		| sp = h/w frame
393	moveml	%sp@+,#0x7FFF		| restore all but sp
394	movl	%sp@,%sp		| ... and sp
395	rte				| all done
396
397/* Use common m68k sigreturn */
398#include <m68k/m68k/sigreturn.s>
399
400/*
401 * Interrupt handlers.  Most are auto-vectored,
402 * and hard-wired the same way on all sun3 models.
403 * Format in the stack is:
404 *   %d0,%d1,%a0,%a1, sr, pc, vo
405 */
406
407#define INTERRUPT_SAVEREG \
408	moveml	#0xC0C0,%sp@-
409
410#define INTERRUPT_RESTORE \
411	moveml	%sp@+,#0x0303
412
413/*
414 * This is the common auto-vector interrupt handler,
415 * for which the CPU provides the vector=0x18+level.
416 * These are installed in the interrupt vector table.
417 */
418#ifdef __ELF__
419	.align	4
420#else
421	.align	2
422#endif
423GLOBAL(_isr_autovec)
424	INTERRUPT_SAVEREG
425	jbsr	_C_LABEL(isr_autovec)
426	INTERRUPT_RESTORE
427	jra	_ASM_LABEL(rei)
428
429/* clock: see clock.c */
430#ifdef __ELF__
431	.align	4
432#else
433	.align	2
434#endif
435GLOBAL(_isr_clock)
436	INTERRUPT_SAVEREG
437	jbsr	_C_LABEL(clock_intr)
438	INTERRUPT_RESTORE
439	jra	_ASM_LABEL(rei)
440
441| Handler for all vectored interrupts (i.e. VME interrupts)
442#ifdef __ELF__
443	.align	4
444#else
445	.align	2
446#endif
447GLOBAL(_isr_vectored)
448	INTERRUPT_SAVEREG
449	jbsr	_C_LABEL(isr_vectored)
450	INTERRUPT_RESTORE
451	jra	_ASM_LABEL(rei)
452
453#undef	INTERRUPT_SAVEREG
454#undef	INTERRUPT_RESTORE
455
456/* interrupt counters (needed by vmstat) */
457GLOBAL(intrnames)
458	.asciz	"spur"	| 0
459	.asciz	"lev1"	| 1
460	.asciz	"lev2"	| 2
461	.asciz	"lev3"	| 3
462	.asciz	"lev4"	| 4
463	.asciz	"clock"	| 5
464	.asciz	"lev6"	| 6
465	.asciz	"nmi"	| 7
466GLOBAL(eintrnames)
467
468	.data
469	.even
470GLOBAL(intrcnt)
471	.long	0,0,0,0,0,0,0,0,0,0
472GLOBAL(eintrcnt)
473	.text
474
475/*
476 * Emulation of VAX REI instruction.
477 *
478 * This code is (mostly) un-altered from the hp300 code,
479 * except that sun machines do not need a simulated SIR
480 * because they have a real software interrupt register.
481 *
482 * This code deals with checking for and servicing ASTs
483 * (profiling, scheduling) and software interrupts (network, softclock).
484 * We check for ASTs first, just like the VAX.  To avoid excess overhead
485 * the T_ASTFLT handling code will also check for software interrupts so we
486 * do not have to do it here.  After identifying that we need an AST we
487 * drop the IPL to allow device interrupts.
488 *
489 * This code is complicated by the fact that sendsig may have been called
490 * necessitating a stack cleanup.
491 */
492
493ASGLOBAL(rei)
494#ifdef	DIAGNOSTIC
495	tstl	_C_LABEL(panicstr)	| have we paniced?
496	jne	Ldorte			| yes, do not make matters worse
497#endif
498	tstl	_C_LABEL(astpending)	| AST pending?
499	jeq	Ldorte			| no, done
500Lrei1:
501	btst	#5,%sp@			| yes, are we returning to user mode?
502	jne	Ldorte			| no, done
503	movw	#PSL_LOWIPL,%sr		| lower SPL
504	clrl	%sp@-			| stack adjust
505	moveml	#0xFFFF,%sp@-		| save all registers
506	movl	%usp,%a1		| including
507	movl	%a1,%sp@(FR_SP)		|    the users SP
508	clrl	%sp@-			| VA == none
509	clrl	%sp@-			| code == none
510	movl	#T_ASTFLT,%sp@-		| type == async system trap
511	jbsr	_C_LABEL(trap)		| go handle it
512	lea	%sp@(12),%sp		| pop value args
513	movl	%sp@(FR_SP),%a0		| restore user SP
514	movl	%a0,%usp		|   from save area
515	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
516	jne	Laststkadj		| yes, go to it
517	moveml	%sp@+,#0x7FFF		| no, restore most user regs
518	addql	#8,%sp			| toss SP and stack adjust
519	rte				| and do real RTE
520Laststkadj:
521	lea	%sp@(FR_HW),%a1		| pointer to HW frame
522	addql	#8,%a1			| source pointer
523	movl	%a1,%a0			| source
524	addw	%d0,%a0			|  + hole size = dest pointer
525	movl	%a1@-,%a0@-		| copy
526	movl	%a1@-,%a0@-		|  8 bytes
527	movl	%a0,%sp@(FR_SP)		| new SSP
528	moveml	%sp@+,#0x7FFF		| restore user registers
529	movl	%sp@,%sp		| and our SP
530Ldorte:
531	rte				| real return
532
533/*
534 * Initialization is at the beginning of this file, because the
535 * kernel entry point needs to be at zero for compatibility with
536 * the Sun boot loader.  This works on Sun machines because the
537 * interrupt vector table for reset is NOT at address zero.
538 * (The MMU has a "boot" bit that forces access to the PROM)
539 */
540
541/*
542 * Use common m68k sigcode.
543 */
544#include <m68k/m68k/sigcode.s>
545#ifdef COMPAT_SUNOS
546#include <m68k/m68k/sunos_sigcode.s>
547#endif
548#ifdef COMPAT_SVR4
549#include <m68k/m68k/svr4_sigcode.s>
550#endif
551
552	.text
553
554/*
555 * Primitives
556 */
557
558/*
559 * Use common m68k support routines.
560 */
561#include <m68k/m68k/support.s>
562
563BSS(want_resched,4)
564
565/*
566 * Use common m68k process manipulation routines.
567 */
568#include <m68k/m68k/proc_subr.s>
569
570| Message for Lbadsw panic
571Lsw0:
572	.asciz	"cpu_switch"
573	.even
574
575	.data
576GLOBAL(masterpaddr)		| XXX compatibility (debuggers)
577GLOBAL(curpcb)
578	.long	0
579ASBSS(nullpcb,SIZEOF_PCB)
580	.text
581
582/*
583 * At exit of a process, do a cpu_switch for the last time.
584 * Switch to a safe stack and PCB, and select a new process to run.  The
585 * old stack and u-area will be freed by the reaper.
586 */
587ENTRY(switch_exit)
588	movl	%sp@(4),%a0		| struct proc *p
589					| save state into garbage pcb
590	movl	#_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
591	lea	_ASM_LABEL(tmpstk),%sp	| goto a tmp stack
592
593	/* Schedule the vmspace and stack to be freed. */
594	movl	%a0,%sp@-		| exit2(p)
595	jbsr	_C_LABEL(exit2)
596	lea	%sp@(4),%sp		| pop args
597
598#if defined(LOCKDEBUG)
599	/* Acquire sched_lock */
600	jbsr	_C_LABEL(sched_lock_idle)
601#endif
602
603	jra	_C_LABEL(cpu_switch)
604
605/*
606 * When no processes are on the runq, cpu_switch() branches to idle
607 * to wait for something to come ready.
608 */
609Lidle:
610#if defined(LOCKDEBUG)
611	/* Release sched_lock */
612	jbsr	_C_LABEL(sched_unlock_idle)
613#endif
614	stop	#PSL_LOWIPL
615GLOBAL(_Idle)				| See clock.c
616	movw	#PSL_HIGHIPL,%sr
617#if defined(LOCKDEBUG)
618	/* Acquire sched_lock */
619	jbsr	_C_LABEL(sched_lock_idle)
620#endif
621	movl	_C_LABEL(sched_whichqs),%d0
622	jeq	Lidle
623	jra	Lsw1
624
625Lbadsw:
626	movl	#Lsw0,%sp@-
627	jbsr	_C_LABEL(panic)
628	/*NOTREACHED*/
629
630/*
631 * cpu_switch()
632 * Hacked for sun3
633 */
634ENTRY(cpu_switch)
635	movl	_C_LABEL(curpcb),%a0	| current pcb
636	movw	%sr,%a0@(PCB_PS)	| save sr before changing ipl
637#ifdef notyet
638	movl	_C_LABEL(curproc),%sp@-	| remember last proc running
639#endif
640	clrl	_C_LABEL(curproc)
641
642	/*
643	 * Find the highest-priority queue that isn't empty,
644	 * then take the first proc from that queue.
645	 */
646	movl	_C_LABEL(sched_whichqs),%d0
647	jeq	Lidle
648Lsw1:
649	/*
650	 * Interrupts are blocked, sched_lock is held.  If
651	 * we come here via Idle, %d0 contains the contents
652	 * of a non-zero sched_whichqs.
653	 */
654	moveq	#31,%d1
6551:	lsrl	#1,%d0
656	dbcs	%d1,1b
657	eorib	#31,%d1
658
659	movl	%d1,%d0
660	lslb	#3,%d1			| convert queue number to index
661	addl	#_C_LABEL(sched_qs),%d1	| locate queue (q)
662	movl	%d1,%a1
663	movl	%a1@(P_FORW),%a0	| p = q->p_forw
664	cmpal	%d1,%a0			| anyone on queue?
665	jeq	Lbadsw			| no, panic
666#ifdef DIAGNOSTIC
667	tstl	%a0@(P_WCHAN)
668	jne	Lbadsw
669	cmpb	#SRUN,%a0@(P_STAT)
670	jne	Lbadsw
671#endif
672	movl	%a0@(P_FORW),%a1@(P_FORW)	| q->p_forw = p->p_forw
673	movl	%a0@(P_FORW),%a1		| n = p->p_forw
674	movl	%a0@(P_BACK),%a1@(P_BACK)	| n->p_back = q
675	cmpal	%d1,%a1			| anyone left on queue?
676	jne	Lsw2			| yes, skip
677	movl	_C_LABEL(sched_whichqs),%d1
678	bclr	%d0,%d1			| no, clear bit
679	movl	%d1,_C_LABEL(sched_whichqs)
680Lsw2:
681	/* p->p_cpu initialized in fork1() for single-processor */
682	movb	#SONPROC,%a0@(P_STAT)	| p->p_stat = SONPROC
683	movl	%a0,_C_LABEL(curproc)
684	clrl	_C_LABEL(want_resched)
685#ifdef notyet
686	movl	%sp@+,%a1		| XXX - Make this work!
687	cmpl	%a0,%a1			| switching to same proc?
688	jeq	Lswdone			| yes, skip save and restore
689#endif
690	/*
691	 * Save state of previous process in its pcb.
692	 */
693	movl	_C_LABEL(curpcb),%a1
694	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
695	movl	%usp,%a2		| grab USP (a2 has been saved)
696	movl	%a2,%a1@(PCB_USP)	| and save it
697
698	/*
699	 * Now that we have saved all the registers that must be
700	 * preserved, we are free to use those registers until
701	 * we load the registers for the switched-to process.
702	 * In this section, keep:  %a0=curproc, %a1=curpcb
703	 */
704
705	clrl	%a0@(P_BACK)		| clear back link
706	movl	%a0@(P_ADDR),%a1	| get p_addr
707	movl	%a1,_C_LABEL(curpcb)
708
709#if defined(LOCKDEBUG)
710	/*
711	 * Done mucking with the run queues, release the
712	 * scheduler lock, but keep interrupts out.
713	 */
714	movl	%a0,%sp@-		| not args...
715	movl	%a1,%sp@-		| ...just saving
716	jbsr	_C_LABEL(sched_unlock_idle)
717	movl	%sp@+,%a1
718	movl	%sp@+,%a0
719#endif
720
721	/*
722	 * Load the new VM context (new MMU root pointer)
723	 */
724	movl	%a0@(P_VMSPACE),%a2	| vm = p->p_vmspace
725#ifdef DIAGNOSTIC
726| XXX fredette - tstl with an address register EA not supported
727| on the 68010, too lazy to fix this instance now.
728#if 0
729	tstl	%a2			| vm == VM_MAP_NULL?
730	jeq	Lbadsw			| panic
731#endif
732#endif
733	/*
734	 * Call _pmap_switch().
735	 */
736	movl	%a2@(VM_PMAP),%a2 	| pmap = vm->vm_map.pmap
737	pea	%a2@			| push pmap
738	jbsr	_C_LABEL(_pmap_switch)	| _pmap_switch(pmap)
739	addql	#4,%sp
740	movl	_C_LABEL(curpcb),%a1	| restore p_addr
741| Note: pmap_switch will clear the cache if needed.
742
743	/*
744	 * Reload the registers for the new process.
745	 * After this point we can only use %d0,%d1,%a0,%a1
746	 */
747	moveml	%a1@(PCB_REGS),#0xFCFC	| reload registers
748	movl	%a1@(PCB_USP),%a0
749	movl	%a0,%usp		| and USP
750
751	movw	%a1@(PCB_PS),%d0	| no, restore PS
752#ifdef DIAGNOSTIC
753	btst	#13,%d0			| supervisor mode?
754	jeq	Lbadsw			| no? panic!
755#endif
756	movw	%d0,%sr			| OK, restore PS
757	moveq	#1,%d0			| return 1 (for alternate returns)
758	rts
759
760/*
761 * savectx(pcb)
762 * Update pcb, saving current processor state.
763 */
764ENTRY(savectx)
765	movl	%sp@(4),%a1
766	movw	%sr,%a1@(PCB_PS)
767	movl	%usp,%a0		| grab USP
768	movl	%a0,%a1@(PCB_USP)	| and save it
769	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
770
771	moveq	#0,%d0			| return 0
772	rts
773
774/*
775 * Get callers current SP value.
776 * Note that simply taking the address of a local variable in a C function
777 * doesn't work because callee saved registers may be outside the stack frame
778 * defined by A6 (e.g. GCC generated code).
779 *
780 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
781 */
782GLOBAL(getsp)
783	movl	%sp,%d0			| get current SP
784	addql	#4,%d0			| compensate for return address
785	rts
786
787ENTRY(getsfc)
788	movc	%sfc,%d0
789	rts
790
791ENTRY(getdfc)
792	movc	%dfc,%d0
793	rts
794
795ENTRY(getvbr)
796	movc	%vbr,%d0
797#ifdef __ELF__
798	movl	%d0, %a0
799#endif /* __ELF__ */
800	rts
801
802ENTRY(setvbr)
803	movl	%sp@(4),%d0
804	movc	%d0,%vbr
805	rts
806
807/* loadustp, ptest_addr */
808
809/*
810 * Set processor priority level calls.  Most are implemented with
811 * inline asm expansions.  However, we need one instantiation here
812 * in case some non-optimized code makes external references.
813 * Most places will use the inlined functions param.h supplies.
814 */
815
816ENTRY(_getsr)
817	clrl	%d0
818	movw	%sr,%d0
819	rts
820
821ENTRY(_spl)
822	clrl	%d0
823	movw	%sr,%d0
824	movl	%sp@(4),%d1
825	movw	%d1,%sr
826	rts
827
828ENTRY(_splraise)
829	clrl	%d0
830	movw	%sr,%d0
831	movl	%d0,%d1
832	andl	#PSL_HIGHIPL,%d1 	| old &= PSL_HIGHIPL
833	cmpl	%sp@(4),%d1		| (old - new)
834	bge	Lsplr
835	movl	%sp@(4),%d1
836	movw	%d1,%sr
837Lsplr:
838	rts
839
840#ifdef DIAGNOSTIC
841| Message for 68881 save/restore panic
842Lsr0:
843	.asciz	"m68881 save/restore"
844	.even
845#endif
846
847/*
848 * Save and restore 68881 state.
849 */
850ENTRY(m68881_save)
851ENTRY(m68881_restore)
852#ifdef	DIAGNOSTIC
853	movl	#Lsr0,%sp@-
854        jbsr	_C_LABEL(panic)
855        /*NOTREACHED*/
856#else
857	rts
858#endif
859
860/*
861 * _delay(unsigned N)
862 * Delay for at least (N/256) microseconds.
863 * This routine depends on the variable:  delay_divisor
864 * which should be set based on the CPU clock rate.
865 * XXX: Currently this is set based on the CPU model,
866 * XXX: but this should be determined at run time...
867 */
868GLOBAL(_delay)
869	| %d0 = arg = (usecs << 8)
870	movl	%sp@(4),%d0
871	| %d1 = delay_divisor;
872	movl	_C_LABEL(delay_divisor),%d1
873
874	/*
875	 * Align the branch target of the loop to a half-line (8-byte)
876	 * boundary to minimize cache effects.  This guarantees both
877	 * that there will be no prefetch stalls due to cache line burst
878	 * operations and that the loop will run from a single cache
879	 * half-line.
880	 */
881#ifdef __ELF__
882	.align	8
883#else
884	.align	3
885#endif
886L_delay:
887	subl	%d1,%d0
888	jgt	L_delay
889	rts
890
891/*
892 * Set or clear bits in the enable register.  This mimics the
893 * strange behavior in SunOS' locore.o, where they keep a soft
894 * copy of what they think is in the enable register and loop
895 * making a change until it sticks.  This is apparently to
896 * be concurrent-safe without disabling interrupts.  Why you
897 * can't just disable interrupts while mucking with the register
898 * I dunno, but it may jive with sun3/intreg.c using the single-instruction
899 * bit operations and the sun3 intreg being memory-addressable,
900 * i.e., once the sun2 was designed they realized the enable
901 * register had to be treated this way, so on the sun3 they made
902 * it memory-addressable so you could just use the single-instructions.
903 */
904ENTRY(enable_reg_and)
905	movc	%dfc,%a1		| save current dfc
906	moveq	#FC_CONTROL, %d1
907	movc	%d1, %dfc		| make movs access "control"
908	movl	%sp@(4), %d1		| get our AND mask
909	clrl	%d0
9101:	andw	%d1, _C_LABEL(enable_reg_soft)	| do our AND
911	movew	_C_LABEL(enable_reg_soft), %d0	| get the result
912	movsw	%d0, SYSTEM_ENAB		| install the result
913	cmpw	_C_LABEL(enable_reg_soft), %d0
914	bne	1b				| install it again if the soft value changed
915	movc	%a1,%dfc		| restore dfc
916	rts
917
918ENTRY(enable_reg_or)
919	movc	%dfc,%a1		| save current dfc
920	moveq	#FC_CONTROL, %d1
921	movc	%d1, %dfc		| make movs access "control"
922	movl	%sp@(4), %d1		| get our OR mask
923	clrl	%d0
9241:	orw	%d1, _C_LABEL(enable_reg_soft)	| do our OR
925	movew	_C_LABEL(enable_reg_soft), %d0	| get the result
926	movsw	%d0, SYSTEM_ENAB			| install the result
927	cmpw	_C_LABEL(enable_reg_soft), %d0
928	bne	1b				| install it again if the soft value changed
929	movc	%a1,%dfc		| restore dfc
930	rts
931
932/*
933 * Use common m68k 16-bit aligned copy routines.
934 */
935#include <m68k/m68k/w16copy.s>
936
937| Define some addresses, mostly so DDB can print useful info.
938| Not using _C_LABEL() here because these symbols are never
939| referenced by any C code, and if the leading underscore
940| ever goes away, these lines turn into syntax errors...
941	.set	_KERNBASE,KERNBASE
942	.set	_MONSTART,SUN2_MONSTART
943	.set	_PROM_BASE,SUN2_PROM_BASE
944	.set	_MONEND,SUN2_MONEND
945
946|The end!
947