xref: /netbsd/sys/arch/sun2/sun2/locore.s (revision c4a72b64)
1/*	$NetBSD: locore.s,v 1.12 2002/10/20 02:37:33 chs Exp $	*/
2
3/*
4 * Copyright (c) 2001 Matthew Fredette
5 * Copyright (c) 1994, 1995 Gordon W. Ross
6 * Copyright (c) 1993 Adam Glass
7 * Copyright (c) 1988 University of Utah.
8 * Copyright (c) 1980, 1990, 1993
9 *	The Regents of the University of California.  All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 *    notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 *    notice, this list of conditions and the following disclaimer in the
22 *    documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 *    must display the following acknowledgement:
25 *	This product includes software developed by the University of
26 *	California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 *    may be used to endorse or promote products derived from this software
29 *    without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 *	from: Utah $Hdr: locore.s 1.66 92/12/22$
44 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
45 */
46
47#include "opt_compat_netbsd.h"
48#include "opt_compat_svr4.h"
49#include "opt_compat_sunos.h"
50#include "opt_kgdb.h"
51#include "opt_lockdebug.h"
52
53#include "assym.h"
54#include <machine/asm.h>
55#include <machine/trap.h>
56
57| Remember this is a fun project!
58
59| This is for kvm_mkdb, and should be the address of the beginning
60| of the kernel text segment (not necessarily the same as kernbase).
61	.text
62GLOBAL(kernel_text)
63
64| This is the entry point, as well as the end of the temporary stack
65| used during process switch (two 2K pages ending at start)
66ASGLOBAL(tmpstk)
67ASGLOBAL(start)
68
69| As opposed to the sun3, on the sun2 the kernel is linked low.  The
70| boot loader loads us exactly where we are linked, so we don't have
71| to worry about writing position independent code or moving the
72| kernel around.
73	movw	#PSL_HIGHIPL,%sr	| no interrupts
74	moveq	#FC_CONTROL,%d0		| make movs access "control"
75	movc	%d0,%sfc		| space where the sun2 designers
76	movc	%d0,%dfc		| put all the "useful" stuff
77
78| Set context zero and stay there until pmap_bootstrap.
79	moveq	#0,%d0
80	movsb	%d0,CONTEXT_REG
81	movsb	%d0,SCONTEXT_REG
82
83| Jump around the g0 and g4 entry points.
84	jra	L_high_code
85
86| These entry points are here in pretty low memory, so that they
87| can be reached from virtual address zero using the classic,
88| old-school "g0" and "g4" commands from the monitor.  (I.e.,
89| they need to be reachable using 16-bit displacements from PCs
90| 0 and 4).
91ENTRY(g0_entry)
92	jra	_C_LABEL(g0_handler)
93ENTRY(g4_entry)
94	jra	_C_LABEL(g4_handler)
95
96L_high_code:
97| We are now running in the correctly relocated kernel, so
98| we are no longer restricted to position-independent code.
99
100| Disable interrupts, and initialize the soft copy of the
101| enable register.
102	movsw	SYSTEM_ENAB, %d0	| read the enable register
103	moveq	#ENA_INTS, %d1
104	notw	%d1
105	andw	%d1, %d0
106	movsw	%d0, SYSTEM_ENAB	| disable all interrupts
107	movw	%d0, _C_LABEL(enable_reg_soft)
108
109| Do bootstrap stuff needed before main() gets called.
110| Make sure the initial frame pointer is zero so that
111| the backtrace algorithm used by KGDB terminates nicely.
112	lea	_ASM_LABEL(tmpstk),%sp
113	movl	#0,%a6
114	jsr	_C_LABEL(_bootstrap)	| See locore2.c
115
116| Now that _bootstrap() is done using the PROM functions,
117| we can safely set the %sfc/dfc to something != FC_CONTROL
118	moveq	#FC_USERD,%d0		| make movs access "user data"
119	movc	%d0,%sfc		| space for copyin/copyout
120	movc	%d0,%dfc
121
122| Setup process zero user/kernel stacks.
123	movl	_C_LABEL(proc0paddr),%a1 | get proc0 pcb addr
124	lea	%a1@(USPACE-4),%sp	| set SSP to last word
125	movl	#USRSTACK-4,%a2
126	movl	%a2,%usp		| init user SP
127
128| Note curpcb was already set in _bootstrap().
129| Will do fpu initialization during autoconfig (see fpu.c)
130| The interrupt vector table and stack are now ready.
131| Interrupts will be enabled later, AFTER  autoconfiguration
132| is finished, to avoid spurrious interrupts.
133
134/*
135 * Create a fake exception frame so that cpu_fork() can copy it.
136 * main() nevers returns; we exit to user mode from a forked process
137 * later on.
138 */
139	clrw	%sp@-			| tf_format,tf_vector
140	clrl	%sp@-			| tf_pc (filled in later)
141	movw	#PSL_USER,%sp@-		| tf_sr for user mode
142	clrl	%sp@-			| tf_stackadj
143	lea	%sp@(-64),%sp		| tf_regs[16]
144	lea	_C_LABEL(proc0),%a0	| proc0.p_md.md_regs =
145	movl	%a1,%a0@(P_MDREGS)	|   trapframe
146	jbsr	_C_LABEL(main)		| main(&trapframe)
147	PANIC("main() returned")
148
149/*
150 * proc_trampoline: call function in register %a2 with %a3 as an arg
151 * and then rei.
152 */
153GLOBAL(proc_trampoline)
154	movl	%a3,%sp@-		| push function arg
155	jbsr	%a2@			| call function
156	addql	#4,%sp			| pop arg
157	movl	%sp@(FR_SP),%a0		| grab and load
158	movl	%a0,%usp		|   user SP
159	moveml	%sp@+,#0x7FFF		| restore most user regs
160	addql	#8,%sp			| toss SP and stack adjust
161	jra	_ASM_LABEL(rei)		| and return
162
163| That is all the assembly startup code we need on the sun3!
164| The rest of this is like the hp300/locore.s where possible.
165
166/*
167 * Trap/interrupt vector routines
168 */
169#include <m68k/m68k/trap_subr.s>
170
171GLOBAL(buserr)
172	tstl	_C_LABEL(nofault)	| device probe?
173	jeq	_C_LABEL(addrerr)	| no, handle as usual
174	movl	_C_LABEL(nofault),%sp@-	| yes,
175	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
176GLOBAL(addrerr)
177	clrl	%sp@-			| stack adjust count
178	moveml	#0xFFFF,%sp@-		| save user registers
179	movl	%usp,%a0		| save the user SP
180	movl	%a0,%sp@(FR_SP)		|   in the savearea
181	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
182	moveq	#0,%d0
183	movw	%a1@(8),%d0		| grab SSW for fault processing
184	movl	%a1@(10),%d1		| fault address is as given in frame
185	movl	%d1,%sp@-		| push fault VA
186	movl	%d0,%sp@-		| and padded SSW
187	movw	%a1@(6),%d0		| get frame format/vector offset
188	andw	#0x0FFF,%d0		| clear out frame format
189	cmpw	#12,%d0			| address error vector?
190	jeq	Lisaerr			| yes, go to it
191
192/*
193 * the sun2 specific code
194 *
195 * our mission: figure out whether what we are looking at is
196 *              bus error in the UNIX sense, or
197 *	        a memory error i.e a page fault
198 *
199 * [this code replaces similarly mmu specific code in the hp300 code]
200 */
201sun2_mmu_specific:
202	clrl %d0			| make sure top bits are cleard too
203	movl %d1,%sp@-			| save %d1
204	movc %sfc,%d1			| save %sfc to %d1
205	moveq #FC_CONTROL,%d0		| %sfc = FC_CONTROL
206	movc %d0,%sfc
207	movsw BUSERR_REG,%d0		| get value of bus error register
208	movc %d1,%sfc			| restore %sfc
209	movl %sp@+,%d1			| restore %d1
210#ifdef	DEBUG
211	movw %d0, _C_LABEL(buserr_reg)	| save bus error register value
212#endif
213	andb #BUSERR_PROTERR, %d0 	| is this an MMU (protection *or* page unavailable) fault?
214	jeq Lisberr			| non-MMU bus error
215/* End of sun2 specific code. */
216
217Lismerr:
218	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
219	jra	_ASM_LABEL(faultstkadj)	| and deal with it
220Lisaerr:
221	movl	#T_ADDRERR,%sp@-	| mark address error
222	jra	_ASM_LABEL(faultstkadj)	| and deal with it
223Lisberr:
224	movl	#T_BUSERR,%sp@-		| mark bus error
225	jra	_ASM_LABEL(faultstkadj)	| and deal with it
226
227/*
228 * FP exceptions.
229 */
230GLOBAL(fpfline)
231	clrl	%sp@-			| stack adjust count
232	moveml	#0xFFFF,%sp@-		| save registers
233	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
234	jra	_ASM_LABEL(fault)	| do it
235
236GLOBAL(fpunsupp)
237	clrl	%sp@-			| stack adjust count
238	moveml	#0xFFFF,%sp@-		| save registers
239	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
240	jra	_ASM_LABEL(fault)	| do it
241
242| Message for fpfault panic
243Lfp0:
244	.asciz	"fpfault"
245	.even
246
247/*
248 * Handles all other FP coprocessor exceptions.
249 * Since we can never have an FP coprocessor, this just panics.
250 */
251GLOBAL(fpfault)
252	movl	#Lfp0,%sp@-
253	jbsr	_C_LABEL(panic)
254	/*NOTREACHED*/
255
256/*
257 * Other exceptions only cause four and six word stack frame and require
258 * no post-trap stack adjustment.
259 */
260GLOBAL(badtrap)
261	clrl	%sp@-			| stack adjust count
262	moveml	#0xFFFF,%sp@-		| save std frame regs
263	jbsr	_C_LABEL(straytrap)	| report
264	moveml	%sp@+,#0xFFFF		| restore regs
265	addql	#4,%sp			| stack adjust count
266	jra	_ASM_LABEL(rei)		| all done
267
268/*
269 * Trap 0 is for system calls
270 */
271GLOBAL(trap0)
272	clrl	%sp@-			| stack adjust count
273	moveml	#0xFFFF,%sp@-		| save user registers
274	movl	%usp,%a0		| save the user SP
275	movl	%a0,%sp@(FR_SP)		|   in the savearea
276	movl	%d0,%sp@-		| push syscall number
277	jbsr	_C_LABEL(syscall)	| handle it
278	addql	#4,%sp			| pop syscall arg
279	movl	%sp@(FR_SP),%a0		| grab and restore
280	movl	%a0,%usp		|   user SP
281	moveml	%sp@+,#0x7FFF		| restore most registers
282	addql	#8,%sp			| pop SP and stack adjust
283	jra	_ASM_LABEL(rei)		| all done
284
285/*
286 * Trap 12 is the entry point for the cachectl "syscall"
287 *	cachectl(command, addr, length)
288 * command in %d0, addr in %a1, length in %d1
289 */
290GLOBAL(trap12)
291	jra	_ASM_LABEL(rei)		| all done
292
293/*
294 * Trace (single-step) trap.  Kernel-mode is special.
295 * User mode traps are simply passed on to trap().
296 */
297GLOBAL(trace)
298	clrl	%sp@-			| stack adjust count
299	moveml	#0xFFFF,%sp@-
300	moveq	#T_TRACE,%d0
301
302	| Check PSW and see what happen.
303	|   T=0 S=0	(should not happen)
304	|   T=1 S=0	trace trap from user mode
305	|   T=0 S=1	trace trap on a trap instruction
306	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
307
308	movw	%sp@(FR_HW),%d1		| get PSW
309	notw	%d1			| XXX no support for T0 on 680[234]0
310	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
311	jeq	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
312	jra	_ASM_LABEL(fault)	| no, user-mode fault
313
314/*
315 * Trap 15 is used for:
316 *	- GDB breakpoints (in user programs)
317 *	- KGDB breakpoints (in the kernel)
318 *	- trace traps for SUN binaries (not fully supported yet)
319 * User mode traps are simply passed to trap().
320 */
321GLOBAL(trap15)
322	clrl	%sp@-			| stack adjust count
323	moveml	#0xFFFF,%sp@-
324	moveq	#T_TRAP15,%d0
325	btst	#5,%sp@(FR_HW)		| was supervisor mode?
326	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
327	jra	_ASM_LABEL(fault)	| no, user-mode fault
328
329ASLOCAL(kbrkpt)
330	| Kernel-mode breakpoint or trace trap. (%d0=trap_type)
331	| Save the system sp rather than the user sp.
332	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
333	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
334	movl	%a6,%sp@(FR_SP)		|  from before trap
335
336	| If we are not on tmpstk switch to it.
337	| (so debugger can change the stack pointer)
338	movl	%a6,%d1
339	cmpl	#_ASM_LABEL(tmpstk),%d1
340	jls	Lbrkpt2 		| already on tmpstk
341	| Copy frame to the temporary stack
342	movl	%sp,%a0			| %a0=src
343	lea	_ASM_LABEL(tmpstk)-96,%a1	| %a1=dst
344	movl	%a1,%sp			| sp=new frame
345	moveq	#FR_SIZE,%d1
346Lbrkpt1:
347	movl	%a0@+,%a1@+
348	subql	#4,%d1
349	bgt	Lbrkpt1
350
351Lbrkpt2:
352	| Call the trap handler for the kernel debugger.
353	| Do not call trap() to handle it, so that we can
354	| set breakpoints in trap() if we want.  We know
355	| the trap type is either T_TRACE or T_BREAKPOINT.
356	movl	%d0,%sp@-		| push trap type
357	jbsr	_C_LABEL(trap_kdebug)
358	addql	#4,%sp			| pop args
359
360	| The stack pointer may have been modified, or
361	| data below it modified (by kgdb push call),
362	| so push the hardware frame at the current sp
363	| before restoring registers and returning.
364	movl	%sp@(FR_SP),%a0		| modified sp
365	lea	%sp@(FR_SIZE),%a1	| end of our frame
366	movl	%a1@-,%a0@-		| copy 2 longs with
367	movl	%a1@-,%a0@-		| ... predecrement
368	movl	%a0,%sp@(FR_SP)		| sp = h/w frame
369	moveml	%sp@+,#0x7FFF		| restore all but sp
370	movl	%sp@,%sp		| ... and sp
371	rte				| all done
372
373/* Use common m68k sigreturn */
374#include <m68k/m68k/sigreturn.s>
375
376/*
377 * Interrupt handlers.  Most are auto-vectored,
378 * and hard-wired the same way on all sun3 models.
379 * Format in the stack is:
380 *   %d0,%d1,%a0,%a1, sr, pc, vo
381 */
382
383#define INTERRUPT_SAVEREG \
384	moveml	#0xC0C0,%sp@-
385
386#define INTERRUPT_RESTORE \
387	moveml	%sp@+,#0x0303
388
389/*
390 * This is the common auto-vector interrupt handler,
391 * for which the CPU provides the vector=0x18+level.
392 * These are installed in the interrupt vector table.
393 */
394#ifdef __ELF__
395	.align	4
396#else
397	.align	2
398#endif
399GLOBAL(_isr_autovec)
400	INTERRUPT_SAVEREG
401	jbsr	_C_LABEL(isr_autovec)
402	INTERRUPT_RESTORE
403	jra	_ASM_LABEL(rei)
404
405/* clock: see clock.c */
406#ifdef __ELF__
407	.align	4
408#else
409	.align	2
410#endif
411GLOBAL(_isr_clock)
412	INTERRUPT_SAVEREG
413	jbsr	_C_LABEL(clock_intr)
414	INTERRUPT_RESTORE
415	jra	_ASM_LABEL(rei)
416
417| Handler for all vectored interrupts (i.e. VME interrupts)
418#ifdef __ELF__
419	.align	4
420#else
421	.align	2
422#endif
423GLOBAL(_isr_vectored)
424	INTERRUPT_SAVEREG
425	jbsr	_C_LABEL(isr_vectored)
426	INTERRUPT_RESTORE
427	jra	_ASM_LABEL(rei)
428
429#undef	INTERRUPT_SAVEREG
430#undef	INTERRUPT_RESTORE
431
432/* interrupt counters (needed by vmstat) */
433GLOBAL(intrnames)
434	.asciz	"spur"	| 0
435	.asciz	"lev1"	| 1
436	.asciz	"lev2"	| 2
437	.asciz	"lev3"	| 3
438	.asciz	"lev4"	| 4
439	.asciz	"clock"	| 5
440	.asciz	"lev6"	| 6
441	.asciz	"nmi"	| 7
442GLOBAL(eintrnames)
443
444	.data
445	.even
446GLOBAL(intrcnt)
447	.long	0,0,0,0,0,0,0,0,0,0
448GLOBAL(eintrcnt)
449	.text
450
451/*
452 * Emulation of VAX REI instruction.
453 *
454 * This code is (mostly) un-altered from the hp300 code,
455 * except that sun machines do not need a simulated SIR
456 * because they have a real software interrupt register.
457 *
458 * This code deals with checking for and servicing ASTs
459 * (profiling, scheduling) and software interrupts (network, softclock).
460 * We check for ASTs first, just like the VAX.  To avoid excess overhead
461 * the T_ASTFLT handling code will also check for software interrupts so we
462 * do not have to do it here.  After identifying that we need an AST we
463 * drop the IPL to allow device interrupts.
464 *
465 * This code is complicated by the fact that sendsig may have been called
466 * necessitating a stack cleanup.
467 */
468
469ASGLOBAL(rei)
470#ifdef	DIAGNOSTIC
471	tstl	_C_LABEL(panicstr)	| have we paniced?
472	jne	Ldorte			| yes, do not make matters worse
473#endif
474	tstl	_C_LABEL(astpending)	| AST pending?
475	jeq	Ldorte			| no, done
476Lrei1:
477	btst	#5,%sp@			| yes, are we returning to user mode?
478	jne	Ldorte			| no, done
479	movw	#PSL_LOWIPL,%sr		| lower SPL
480	clrl	%sp@-			| stack adjust
481	moveml	#0xFFFF,%sp@-		| save all registers
482	movl	%usp,%a1		| including
483	movl	%a1,%sp@(FR_SP)		|    the users SP
484	clrl	%sp@-			| VA == none
485	clrl	%sp@-			| code == none
486	movl	#T_ASTFLT,%sp@-		| type == async system trap
487	jbsr	_C_LABEL(trap)		| go handle it
488	lea	%sp@(12),%sp		| pop value args
489	movl	%sp@(FR_SP),%a0		| restore user SP
490	movl	%a0,%usp		|   from save area
491	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
492	jne	Laststkadj		| yes, go to it
493	moveml	%sp@+,#0x7FFF		| no, restore most user regs
494	addql	#8,%sp			| toss SP and stack adjust
495	rte				| and do real RTE
496Laststkadj:
497	lea	%sp@(FR_HW),%a1		| pointer to HW frame
498	addql	#8,%a1			| source pointer
499	movl	%a1,%a0			| source
500	addw	%d0,%a0			|  + hole size = dest pointer
501	movl	%a1@-,%a0@-		| copy
502	movl	%a1@-,%a0@-		|  8 bytes
503	movl	%a0,%sp@(FR_SP)		| new SSP
504	moveml	%sp@+,#0x7FFF		| restore user registers
505	movl	%sp@,%sp		| and our SP
506Ldorte:
507	rte				| real return
508
509/*
510 * Initialization is at the beginning of this file, because the
511 * kernel entry point needs to be at zero for compatibility with
512 * the Sun boot loader.  This works on Sun machines because the
513 * interrupt vector table for reset is NOT at address zero.
514 * (The MMU has a "boot" bit that forces access to the PROM)
515 */
516
517/*
518 * Use common m68k sigcode.
519 */
520#include <m68k/m68k/sigcode.s>
521#ifdef COMPAT_SUNOS
522#include <m68k/m68k/sunos_sigcode.s>
523#endif
524#ifdef COMPAT_SVR4
525#include <m68k/m68k/svr4_sigcode.s>
526#endif
527
528	.text
529
530/*
531 * Primitives
532 */
533
534/*
535 * Use common m68k support routines.
536 */
537#include <m68k/m68k/support.s>
538
539BSS(want_resched,4)
540
541/*
542 * Use common m68k process manipulation routines.
543 */
544#include <m68k/m68k/proc_subr.s>
545
546| Message for Lbadsw panic
547Lsw0:
548	.asciz	"cpu_switch"
549	.even
550
551	.data
552GLOBAL(masterpaddr)		| XXX compatibility (debuggers)
553GLOBAL(curpcb)
554	.long	0
555ASBSS(nullpcb,SIZEOF_PCB)
556	.text
557
558/*
559 * At exit of a process, do a cpu_switch for the last time.
560 * Switch to a safe stack and PCB, and select a new process to run.  The
561 * old stack and u-area will be freed by the reaper.
562 */
563ENTRY(switch_exit)
564	movl	%sp@(4),%a0		| struct proc *p
565					| save state into garbage pcb
566	movl	#_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
567	lea	_ASM_LABEL(tmpstk),%sp	| goto a tmp stack
568
569	/* Schedule the vmspace and stack to be freed. */
570	movl	%a0,%sp@-		| exit2(p)
571	jbsr	_C_LABEL(exit2)
572	lea	%sp@(4),%sp		| pop args
573
574#if defined(LOCKDEBUG)
575	/* Acquire sched_lock */
576	jbsr	_C_LABEL(sched_lock_idle)
577#endif
578
579	jra	_C_LABEL(cpu_switch)
580
581/*
582 * When no processes are on the runq, cpu_switch() branches to idle
583 * to wait for something to come ready.
584 */
585Lidle:
586#if defined(LOCKDEBUG)
587	/* Release sched_lock */
588	jbsr	_C_LABEL(sched_unlock_idle)
589#endif
590	stop	#PSL_LOWIPL
591GLOBAL(_Idle)				| See clock.c
592	movw	#PSL_HIGHIPL,%sr
593#if defined(LOCKDEBUG)
594	/* Acquire sched_lock */
595	jbsr	_C_LABEL(sched_lock_idle)
596#endif
597	movl	_C_LABEL(sched_whichqs),%d0
598	jeq	Lidle
599	jra	Lsw1
600
601Lbadsw:
602	movl	#Lsw0,%sp@-
603	jbsr	_C_LABEL(panic)
604	/*NOTREACHED*/
605
606/*
607 * cpu_switch()
608 * Hacked for sun3
609 */
610ENTRY(cpu_switch)
611	movl	_C_LABEL(curpcb),%a0	| current pcb
612	movw	%sr,%a0@(PCB_PS)	| save sr before changing ipl
613#ifdef notyet
614	movl	_C_LABEL(curproc),%sp@-	| remember last proc running
615#endif
616	clrl	_C_LABEL(curproc)
617
618	/*
619	 * Find the highest-priority queue that isn't empty,
620	 * then take the first proc from that queue.
621	 */
622	movl	_C_LABEL(sched_whichqs),%d0
623	jeq	Lidle
624Lsw1:
625	/*
626	 * Interrupts are blocked, sched_lock is held.  If
627	 * we come here via Idle, %d0 contains the contents
628	 * of a non-zero sched_whichqs.
629	 */
630	moveq	#31,%d1
6311:	lsrl	#1,%d0
632	dbcs	%d1,1b
633	eorib	#31,%d1
634
635	movl	%d1,%d0
636	lslb	#3,%d1			| convert queue number to index
637	addl	#_C_LABEL(sched_qs),%d1	| locate queue (q)
638	movl	%d1,%a1
639	movl	%a1@(P_FORW),%a0	| p = q->p_forw
640	cmpal	%d1,%a0			| anyone on queue?
641	jeq	Lbadsw			| no, panic
642#ifdef DIAGNOSTIC
643	tstl	%a0@(P_WCHAN)
644	jne	Lbadsw
645	cmpb	#SRUN,%a0@(P_STAT)
646	jne	Lbadsw
647#endif
648	movl	%a0@(P_FORW),%a1@(P_FORW)	| q->p_forw = p->p_forw
649	movl	%a0@(P_FORW),%a1		| n = p->p_forw
650	movl	%a0@(P_BACK),%a1@(P_BACK)	| n->p_back = q
651	cmpal	%d1,%a1			| anyone left on queue?
652	jne	Lsw2			| yes, skip
653	movl	_C_LABEL(sched_whichqs),%d1
654	bclr	%d0,%d1			| no, clear bit
655	movl	%d1,_C_LABEL(sched_whichqs)
656Lsw2:
657	/* p->p_cpu initialized in fork1() for single-processor */
658	movb	#SONPROC,%a0@(P_STAT)	| p->p_stat = SONPROC
659	movl	%a0,_C_LABEL(curproc)
660	clrl	_C_LABEL(want_resched)
661#ifdef notyet
662	movl	%sp@+,%a1		| XXX - Make this work!
663	cmpl	%a0,%a1			| switching to same proc?
664	jeq	Lswdone			| yes, skip save and restore
665#endif
666	/*
667	 * Save state of previous process in its pcb.
668	 */
669	movl	_C_LABEL(curpcb),%a1
670	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
671	movl	%usp,%a2		| grab USP (a2 has been saved)
672	movl	%a2,%a1@(PCB_USP)	| and save it
673
674	/*
675	 * Now that we have saved all the registers that must be
676	 * preserved, we are free to use those registers until
677	 * we load the registers for the switched-to process.
678	 * In this section, keep:  %a0=curproc, %a1=curpcb
679	 */
680
681	clrl	%a0@(P_BACK)		| clear back link
682	movl	%a0@(P_ADDR),%a1	| get p_addr
683	movl	%a1,_C_LABEL(curpcb)
684
685#if defined(LOCKDEBUG)
686	/*
687	 * Done mucking with the run queues, release the
688	 * scheduler lock, but keep interrupts out.
689	 */
690	movl	%a0,%sp@-		| not args...
691	movl	%a1,%sp@-		| ...just saving
692	jbsr	_C_LABEL(sched_unlock_idle)
693	movl	%sp@+,%a1
694	movl	%sp@+,%a0
695#endif
696
697	/*
698	 * Load the new VM context (new MMU root pointer)
699	 */
700	movl	%a0@(P_VMSPACE),%a2	| vm = p->p_vmspace
701#ifdef DIAGNOSTIC
702| XXX fredette - tstl with an address register EA not supported
703| on the 68010, too lazy to fix this instance now.
704#if 0
705	tstl	%a2			| vm == VM_MAP_NULL?
706	jeq	Lbadsw			| panic
707#endif
708#endif
709	/*
710	 * Call _pmap_switch().
711	 */
712	movl	%a2@(VM_PMAP),%a2 	| pmap = vm->vm_map.pmap
713	pea	%a2@			| push pmap
714	jbsr	_C_LABEL(_pmap_switch)	| _pmap_switch(pmap)
715	addql	#4,%sp
716	movl	_C_LABEL(curpcb),%a1	| restore p_addr
717| Note: pmap_switch will clear the cache if needed.
718
719	/*
720	 * Reload the registers for the new process.
721	 * After this point we can only use %d0,%d1,%a0,%a1
722	 */
723	moveml	%a1@(PCB_REGS),#0xFCFC	| reload registers
724	movl	%a1@(PCB_USP),%a0
725	movl	%a0,%usp		| and USP
726
727	movw	%a1@(PCB_PS),%d0	| no, restore PS
728#ifdef DIAGNOSTIC
729	btst	#13,%d0			| supervisor mode?
730	jeq	Lbadsw			| no? panic!
731#endif
732	movw	%d0,%sr			| OK, restore PS
733	moveq	#1,%d0			| return 1 (for alternate returns)
734	rts
735
736/*
737 * savectx(pcb)
738 * Update pcb, saving current processor state.
739 */
740ENTRY(savectx)
741	movl	%sp@(4),%a1
742	movw	%sr,%a1@(PCB_PS)
743	movl	%usp,%a0		| grab USP
744	movl	%a0,%a1@(PCB_USP)	| and save it
745	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
746
747	moveq	#0,%d0			| return 0
748	rts
749
750/*
751 * Get callers current SP value.
752 * Note that simply taking the address of a local variable in a C function
753 * doesn't work because callee saved registers may be outside the stack frame
754 * defined by A6 (e.g. GCC generated code).
755 *
756 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
757 */
758GLOBAL(getsp)
759	movl	%sp,%d0			| get current SP
760	addql	#4,%d0			| compensate for return address
761	rts
762
763ENTRY(getsfc)
764	movc	%sfc,%d0
765	rts
766
767ENTRY(getdfc)
768	movc	%dfc,%d0
769	rts
770
771ENTRY(getvbr)
772	movc	%vbr,%d0
773#ifdef __ELF__
774	movl	%d0, %a0
775#endif /* __ELF__ */
776	rts
777
778ENTRY(setvbr)
779	movl	%sp@(4),%d0
780	movc	%d0,%vbr
781	rts
782
783/* loadustp, ptest_addr */
784
785/*
786 * Set processor priority level calls.  Most are implemented with
787 * inline asm expansions.  However, we need one instantiation here
788 * in case some non-optimized code makes external references.
789 * Most places will use the inlined functions param.h supplies.
790 */
791
792ENTRY(_getsr)
793	clrl	%d0
794	movw	%sr,%d0
795	rts
796
797ENTRY(_spl)
798	clrl	%d0
799	movw	%sr,%d0
800	movl	%sp@(4),%d1
801	movw	%d1,%sr
802	rts
803
804ENTRY(_splraise)
805	clrl	%d0
806	movw	%sr,%d0
807	movl	%d0,%d1
808	andl	#PSL_HIGHIPL,%d1 	| old &= PSL_HIGHIPL
809	cmpl	%sp@(4),%d1		| (old - new)
810	bge	Lsplr
811	movl	%sp@(4),%d1
812	movw	%d1,%sr
813Lsplr:
814	rts
815
816#ifdef DIAGNOSTIC
817| Message for 68881 save/restore panic
818Lsr0:
819	.asciz	"m68881 save/restore"
820	.even
821#endif
822
823/*
824 * Save and restore 68881 state.
825 */
826ENTRY(m68881_save)
827ENTRY(m68881_restore)
828#ifdef	DIAGNOSTIC
829	movl	#Lsr0,%sp@-
830        jbsr	_C_LABEL(panic)
831        /*NOTREACHED*/
832#else
833	rts
834#endif
835
836/*
837 * _delay(unsigned N)
838 * Delay for at least (N/256) microseconds.
839 * This routine depends on the variable:  delay_divisor
840 * which should be set based on the CPU clock rate.
841 * XXX: Currently this is set based on the CPU model,
842 * XXX: but this should be determined at run time...
843 */
844GLOBAL(_delay)
845	| %d0 = arg = (usecs << 8)
846	movl	%sp@(4),%d0
847	| %d1 = delay_divisor;
848	movl	_C_LABEL(delay_divisor),%d1
849
850	/*
851	 * Align the branch target of the loop to a half-line (8-byte)
852	 * boundary to minimize cache effects.  This guarantees both
853	 * that there will be no prefetch stalls due to cache line burst
854	 * operations and that the loop will run from a single cache
855	 * half-line.
856	 */
857#ifdef __ELF__
858	.align	8
859#else
860	.align	3
861#endif
862L_delay:
863	subl	%d1,%d0
864	jgt	L_delay
865	rts
866
867/*
868 * Set or clear bits in the enable register.  This mimics the
869 * strange behavior in SunOS' locore.o, where they keep a soft
870 * copy of what they think is in the enable register and loop
871 * making a change until it sticks.  This is apparently to
872 * be concurrent-safe without disabling interrupts.  Why you
873 * can't just disable interrupts while mucking with the register
874 * I dunno, but it may jive with sun3/intreg.c using the single-instruction
875 * bit operations and the sun3 intreg being memory-addressable,
876 * i.e., once the sun2 was designed they realized the enable
877 * register had to be treated this way, so on the sun3 they made
878 * it memory-addressable so you could just use the single-instructions.
879 */
880ENTRY(enable_reg_and)
881	movc	%dfc,%a1		| save current dfc
882	moveq	#FC_CONTROL, %d1
883	movc	%d1, %dfc		| make movs access "control"
884	movl	%sp@(4), %d1		| get our AND mask
885	clrl	%d0
8861:	andw	%d1, _C_LABEL(enable_reg_soft)	| do our AND
887	movew	_C_LABEL(enable_reg_soft), %d0	| get the result
888	movsw	%d0, SYSTEM_ENAB		| install the result
889	cmpw	_C_LABEL(enable_reg_soft), %d0
890	bne	1b				| install it again if the soft value changed
891	movc	%a1,%dfc		| restore dfc
892	rts
893
894ENTRY(enable_reg_or)
895	movc	%dfc,%a1		| save current dfc
896	moveq	#FC_CONTROL, %d1
897	movc	%d1, %dfc		| make movs access "control"
898	movl	%sp@(4), %d1		| get our OR mask
899	clrl	%d0
9001:	orw	%d1, _C_LABEL(enable_reg_soft)	| do our OR
901	movew	_C_LABEL(enable_reg_soft), %d0	| get the result
902	movsw	%d0, SYSTEM_ENAB			| install the result
903	cmpw	_C_LABEL(enable_reg_soft), %d0
904	bne	1b				| install it again if the soft value changed
905	movc	%a1,%dfc		| restore dfc
906	rts
907
908/*
909 * Use common m68k 16-bit aligned copy routines.
910 */
911#include <m68k/m68k/w16copy.s>
912
913| Define some addresses, mostly so DDB can print useful info.
914| Not using _C_LABEL() here because these symbols are never
915| referenced by any C code, and if the leading underscore
916| ever goes away, these lines turn into syntax errors...
917	.set	_KERNBASE,KERNBASE
918	.set	_MONSTART,SUN2_MONSTART
919	.set	_PROM_BASE,SUN2_PROM_BASE
920	.set	_MONEND,SUN2_MONEND
921
922|The end!
923