xref: /netbsd/sys/arch/sun3/sun3/locore.s (revision c4a72b64)
1/*	$NetBSD: locore.s,v 1.82 2002/10/20 02:37:35 chs Exp $	*/
2
3/*
4 * Copyright (c) 1994, 1995 Gordon W. Ross
5 * Copyright (c) 1993 Adam Glass
6 * Copyright (c) 1988 University of Utah.
7 * Copyright (c) 1980, 1990, 1993
8 *	The Regents of the University of California.  All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * the Systems Programming Group of the University of Utah Computer
12 * Science Department.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 *    must display the following acknowledgement:
24 *	This product includes software developed by the University of
25 *	California, Berkeley and its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 *    may be used to endorse or promote products derived from this software
28 *    without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 *	from: Utah $Hdr: locore.s 1.66 92/12/22$
43 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
44 */
45
46#include "opt_compat_netbsd.h"
47#include "opt_compat_svr4.h"
48#include "opt_compat_sunos.h"
49#include "opt_kgdb.h"
50#include "opt_lockdebug.h"
51
52#include "assym.h"
53#include <machine/asm.h>
54#include <machine/trap.h>
55
56| Remember this is a fun project!
57
58| This is for kvm_mkdb, and should be the address of the beginning
59| of the kernel text segment (not necessarily the same as kernbase).
60	.text
61GLOBAL(kernel_text)
62
63| This is the entry point, as well as the end of the temporary stack
64| used during process switch (one 8K page ending at start)
65ASGLOBAL(tmpstk)
66ASGLOBAL(start)
67
68| First we need to set it up so we can access the sun MMU, and be otherwise
69| undisturbed.  Until otherwise noted, all code must be position independent
70| as the boot loader put us low in memory, but we are linked high.
71	movw	#PSL_HIGHIPL,%sr	| no interrupts
72	moveq	#FC_CONTROL,%d0		| make movs access "control"
73	movc	%d0,%sfc		| space where the sun3 designers
74	movc	%d0,%dfc		| put all the "useful" stuff
75
76| Set context zero and stay there until pmap_bootstrap.
77	moveq	#0,%d0
78	movsb	%d0,CONTEXT_REG
79
80| In order to "move" the kernel to high memory, we are going to copy the
81| first 4 Mb of pmegs such that we will be mapped at the linked address.
82| This is all done by copying in the segment map (top-level MMU table).
83| We will unscramble which PMEGs we actually need later.
84
85	movl	#(SEGMAP_BASE+0),%a0		| src
86	movl	#(SEGMAP_BASE+KERNBASE),%a1	| dst
87	movl	#(0x400000/NBSG),%d0		| count
88
89L_per_pmeg:
90	movsb	%a0@,%d1		| copy segmap entry
91	movsb	%d1,%a1@
92	addl	#NBSG,%a0		| increment pointers
93	addl	#NBSG,%a1
94	subql	#1,%d0			| decrement count
95	bgt	L_per_pmeg
96
97| Kernel is now double mapped at zero and KERNBASE.
98| Force a long jump to the relocated code (high VA).
99	movl	#IC_CLEAR,%d0		| Flush the I-cache
100	movc	%d0,%cacr
101	jmp	L_high_code:l		| long jump
102
103L_high_code:
104| We are now running in the correctly relocated kernel, so
105| we are no longer restricted to position-independent code.
106
107| Do bootstrap stuff needed before main() gets called.
108| Make sure the initial frame pointer is zero so that
109| the backtrace algorithm used by KGDB terminates nicely.
110	lea	_ASM_LABEL(tmpstk),%sp
111	movl	#0,%a6
112	jsr	_C_LABEL(_bootstrap)	| See locore2.c
113
114| Now that _bootstrap() is done using the PROM functions,
115| we can safely set the %sfc/dfc to something != FC_CONTROL
116	moveq	#FC_USERD,%d0		| make movs access "user data"
117	movc	%d0,%sfc		| space for copyin/copyout
118	movc	%d0,%dfc
119
120| Setup process zero user/kernel stacks.
121	movl	_C_LABEL(proc0paddr),%a1 | get proc0 pcb addr
122	lea	%a1@(USPACE-4),%sp	| set SSP to last word
123	movl	#USRSTACK-4,%a2
124	movl	%a2,%usp		| init user SP
125
126| Note curpcb was already set in _bootstrap().
127| Will do fpu initialization during autoconfig (see fpu.c)
128| The interrupt vector table and stack are now ready.
129| Interrupts will be enabled later, AFTER  autoconfiguration
130| is finished, to avoid spurrious interrupts.
131
132/*
133 * Create a fake exception frame so that cpu_fork() can copy it.
134 * main() nevers returns; we exit to user mode from a forked process
135 * later on.
136 */
137	clrw	%sp@-			| tf_format,tf_vector
138	clrl	%sp@-			| tf_pc (filled in later)
139	movw	#PSL_USER,%sp@-		| tf_sr for user mode
140	clrl	%sp@-			| tf_stackadj
141	lea	%sp@(-64),%sp		| tf_regs[16]
142	lea	_C_LABEL(proc0),%a0	| proc0.p_md.md_regs =
143	movl	%a1,%a0@(P_MDREGS)	|   trapframe
144	jbsr	_C_LABEL(main)		| main(&trapframe)
145	PANIC("main() returned")
146
147/*
148 * proc_trampoline: call function in register %a2 with %a3 as an arg
149 * and then rei.
150 */
151GLOBAL(proc_trampoline)
152	movl	%a3,%sp@-		| push function arg
153	jbsr	%a2@			| call function
154	addql	#4,%sp			| pop arg
155	movl	%sp@(FR_SP),%a0		| grab and load
156	movl	%a0,%usp		|   user SP
157	moveml	%sp@+,#0x7FFF		| restore most user regs
158	addql	#8,%sp			| toss SP and stack adjust
159	jra	_ASM_LABEL(rei)		| and return
160
161| That is all the assembly startup code we need on the sun3!
162| The rest of this is like the hp300/locore.s where possible.
163
164/*
165 * Trap/interrupt vector routines
166 */
167#include <m68k/m68k/trap_subr.s>
168
169GLOBAL(buserr)
170	tstl	_C_LABEL(nofault)	| device probe?
171	jeq	_C_LABEL(addrerr)	| no, handle as usual
172	movl	_C_LABEL(nofault),%sp@-	| yes,
173	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
174GLOBAL(addrerr)
175	clrl	%sp@-			| stack adjust count
176	moveml	#0xFFFF,%sp@-		| save user registers
177	movl	%usp,%a0		| save the user SP
178	movl	%a0,%sp@(FR_SP)		|   in the savearea
179	lea	%sp@(FR_HW),%a1		| grab base of HW berr frame
180	moveq	#0,%d0
181	movw	%a1@(10),%d0		| grab SSW for fault processing
182	btst	#12,%d0			| RB set?
183	jeq	LbeX0			| no, test RC
184	bset	#14,%d0			| yes, must set FB
185	movw	%d0,%a1@(10)		| for hardware too
186LbeX0:
187	btst	#13,%d0			| RC set?
188	jeq	LbeX1			| no, skip
189	bset	#15,%d0			| yes, must set FC
190	movw	%d0,%a1@(10)		| for hardware too
191LbeX1:
192	btst	#8,%d0			| data fault?
193	jeq	Lbe0			| no, check for hard cases
194	movl	%a1@(16),%d1		| fault address is as given in frame
195	jra	Lbe10			| thats it
196Lbe0:
197	btst	#4,%a1@(6)		| long (type B) stack frame?
198	jne	Lbe4			| yes, go handle
199	movl	%a1@(2),%d1		| no, can use save PC
200	btst	#14,%d0			| FB set?
201	jeq	Lbe3			| no, try FC
202	addql	#4,%d1			| yes, adjust address
203	jra	Lbe10			| done
204Lbe3:
205	btst	#15,%d0			| FC set?
206	jeq	Lbe10			| no, done
207	addql	#2,%d1			| yes, adjust address
208	jra	Lbe10			| done
209Lbe4:
210	movl	%a1@(36),%d1		| long format, use stage B address
211	btst	#15,%d0			| FC set?
212	jeq	Lbe10			| no, all done
213	subql	#2,%d1			| yes, adjust address
214Lbe10:
215	movl	%d1,%sp@-		| push fault VA
216	movl	%d0,%sp@-		| and padded SSW
217	movw	%a1@(6),%d0		| get frame format/vector offset
218	andw	#0x0FFF,%d0		| clear out frame format
219	cmpw	#12,%d0			| address error vector?
220	jeq	Lisaerr			| yes, go to it
221
222/*
223 * the sun3 specific code
224 *
225 * our mission: figure out whether what we are looking at is
226 *              bus error in the UNIX sense, or
227 *	        a memory error i.e a page fault
228 *
229 * [this code replaces similarly mmu specific code in the hp300 code]
230 */
231sun3_mmu_specific:
232	clrl %d0			| make sure top bits are cleard too
233	movl %d1,%sp@-			| save %d1
234	movc %sfc,%d1			| save %sfc to %d1
235	moveq #FC_CONTROL,%d0		| %sfc = FC_CONTROL
236	movc %d0,%sfc
237	movsb BUSERR_REG,%d0		| get value of bus error register
238	movc %d1,%sfc			| restore %sfc
239	movl %sp@+,%d1			| restore %d1
240	andb #BUSERR_MMU,%d0 		| is this an MMU fault?
241	jeq Lisberr			| non-MMU bus error
242/* End of sun3 specific code. */
243
244Lismerr:
245	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
246	jra	_ASM_LABEL(faultstkadj)	| and deal with it
247Lisaerr:
248	movl	#T_ADDRERR,%sp@-	| mark address error
249	jra	_ASM_LABEL(faultstkadj)	| and deal with it
250Lisberr:
251	movl	#T_BUSERR,%sp@-		| mark bus error
252	jra	_ASM_LABEL(faultstkadj)	| and deal with it
253
254/*
255 * FP exceptions.
256 */
257GLOBAL(fpfline)
258	clrl	%sp@-			| stack adjust count
259	moveml	#0xFFFF,%sp@-		| save registers
260	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
261	jra	_ASM_LABEL(fault)	| do it
262
263GLOBAL(fpunsupp)
264	clrl	%sp@-			| stack adjust count
265	moveml	#0xFFFF,%sp@-		| save registers
266	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
267	jra	_ASM_LABEL(fault)	| do it
268
269/*
270 * Handles all other FP coprocessor exceptions.
271 * Note that since some FP exceptions generate mid-instruction frames
272 * and may cause signal delivery, we need to test for stack adjustment
273 * after the trap call.
274 */
275GLOBAL(fpfault)
276	clrl	%sp@-		| stack adjust count
277	moveml	#0xFFFF,%sp@-	| save user registers
278	movl	%usp,%a0	| and save
279	movl	%a0,%sp@(FR_SP)	|   the user stack pointer
280	clrl	%sp@-		| no VA arg
281	movl	_C_LABEL(curpcb),%a0	| current pcb
282	lea	%a0@(PCB_FPCTX),%a0 | address of FP savearea
283	fsave	%a0@		| save state
284	tstb	%a0@		| null state frame?
285	jeq	Lfptnull	| yes, safe
286	clrw	%d0		| no, need to tweak BIU
287	movb	%a0@(1),%d0	| get frame size
288	bset	#3,%a0@(0,%d0:w) | set exc_pend bit of BIU
289Lfptnull:
290	fmovem	%fpsr,%sp@-	| push fpsr as code argument
291	frestore %a0@		| restore state
292	movl	#T_FPERR,%sp@-	| push type arg
293	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
294
295/*
296 * Other exceptions only cause four and six word stack frame and require
297 * no post-trap stack adjustment.
298 */
299GLOBAL(badtrap)
300	clrl	%sp@-			| stack adjust count
301	moveml	#0xFFFF,%sp@-		| save std frame regs
302	jbsr	_C_LABEL(straytrap)	| report
303	moveml	%sp@+,#0xFFFF		| restore regs
304	addql	#4,%sp			| stack adjust count
305	jra	_ASM_LABEL(rei)		| all done
306
307/*
308 * Trap 0 is for system calls
309 */
310GLOBAL(trap0)
311	clrl	%sp@-			| stack adjust count
312	moveml	#0xFFFF,%sp@-		| save user registers
313	movl	%usp,%a0		| save the user SP
314	movl	%a0,%sp@(FR_SP)		|   in the savearea
315	movl	%d0,%sp@-		| push syscall number
316	jbsr	_C_LABEL(syscall)	| handle it
317	addql	#4,%sp			| pop syscall arg
318	movl	%sp@(FR_SP),%a0		| grab and restore
319	movl	%a0,%usp		|   user SP
320	moveml	%sp@+,#0x7FFF		| restore most registers
321	addql	#8,%sp			| pop SP and stack adjust
322	jra	_ASM_LABEL(rei)		| all done
323
324/*
325 * Trap 12 is the entry point for the cachectl "syscall"
326 *	cachectl(command, addr, length)
327 * command in %d0, addr in %a1, length in %d1
328 */
329GLOBAL(trap12)
330	movl	_C_LABEL(curproc),%sp@-	| push curproc pointer
331	movl	%d1,%sp@-		| push length
332	movl	%a1,%sp@-		| push addr
333	movl	%d0,%sp@-		| push command
334	jbsr	_C_LABEL(cachectl1)	| do it
335	lea	%sp@(16),%sp		| pop args
336	jra	_ASM_LABEL(rei)		| all done
337
338/*
339 * Trace (single-step) trap.  Kernel-mode is special.
340 * User mode traps are simply passed on to trap().
341 */
342GLOBAL(trace)
343	clrl	%sp@-			| stack adjust count
344	moveml	#0xFFFF,%sp@-
345	moveq	#T_TRACE,%d0
346
347	| Check PSW and see what happen.
348	|   T=0 S=0	(should not happen)
349	|   T=1 S=0	trace trap from user mode
350	|   T=0 S=1	trace trap on a trap instruction
351	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
352
353	movw	%sp@(FR_HW),%d1		| get PSW
354	notw	%d1			| XXX no support for T0 on 680[234]0
355	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
356	jeq	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
357	jra	_ASM_LABEL(fault)	| no, user-mode fault
358
359/*
360 * Trap 15 is used for:
361 *	- GDB breakpoints (in user programs)
362 *	- KGDB breakpoints (in the kernel)
363 *	- trace traps for SUN binaries (not fully supported yet)
364 * User mode traps are simply passed to trap().
365 */
366GLOBAL(trap15)
367	clrl	%sp@-			| stack adjust count
368	moveml	#0xFFFF,%sp@-
369	moveq	#T_TRAP15,%d0
370	btst	#5,%sp@(FR_HW)		| was supervisor mode?
371	jne	_ASM_LABEL(kbrkpt)	|  yes, kernel brkpt
372	jra	_ASM_LABEL(fault)	| no, user-mode fault
373
374ASLOCAL(kbrkpt)
375	| Kernel-mode breakpoint or trace trap. (%d0=trap_type)
376	| Save the system sp rather than the user sp.
377	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
378	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
379	movl	%a6,%sp@(FR_SP)		|  from before trap
380
381	| If we are not on tmpstk switch to it.
382	| (so debugger can change the stack pointer)
383	movl	%a6,%d1
384	cmpl	#_ASM_LABEL(tmpstk),%d1
385	jls	Lbrkpt2 		| already on tmpstk
386	| Copy frame to the temporary stack
387	movl	%sp,%a0			| %a0=src
388	lea	_ASM_LABEL(tmpstk)-96,%a1	| %a1=dst
389	movl	%a1,%sp			| sp=new frame
390	moveq	#FR_SIZE,%d1
391Lbrkpt1:
392	movl	%a0@+,%a1@+
393	subql	#4,%d1
394	bgt	Lbrkpt1
395
396Lbrkpt2:
397	| Call the trap handler for the kernel debugger.
398	| Do not call trap() to handle it, so that we can
399	| set breakpoints in trap() if we want.  We know
400	| the trap type is either T_TRACE or T_BREAKPOINT.
401	movl	%d0,%sp@-		| push trap type
402	jbsr	_C_LABEL(trap_kdebug)
403	addql	#4,%sp			| pop args
404
405	| The stack pointer may have been modified, or
406	| data below it modified (by kgdb push call),
407	| so push the hardware frame at the current sp
408	| before restoring registers and returning.
409	movl	%sp@(FR_SP),%a0		| modified sp
410	lea	%sp@(FR_SIZE),%a1	| end of our frame
411	movl	%a1@-,%a0@-		| copy 2 longs with
412	movl	%a1@-,%a0@-		| ... predecrement
413	movl	%a0,%sp@(FR_SP)		| sp = h/w frame
414	moveml	%sp@+,#0x7FFF		| restore all but sp
415	movl	%sp@,%sp		| ... and sp
416	rte				| all done
417
418/* Use common m68k sigreturn */
419#include <m68k/m68k/sigreturn.s>
420
421/*
422 * Interrupt handlers.  Most are auto-vectored,
423 * and hard-wired the same way on all sun3 models.
424 * Format in the stack is:
425 *   %d0,%d1,%a0,%a1, sr, pc, vo
426 */
427
428#define INTERRUPT_SAVEREG \
429	moveml	#0xC0C0,%sp@-
430
431#define INTERRUPT_RESTORE \
432	moveml	%sp@+,#0x0303
433
434/*
435 * This is the common auto-vector interrupt handler,
436 * for which the CPU provides the vector=0x18+level.
437 * These are installed in the interrupt vector table.
438 */
439#ifdef __ELF__
440	.align	4
441#else
442	.align	2
443#endif
444GLOBAL(_isr_autovec)
445	INTERRUPT_SAVEREG
446	jbsr	_C_LABEL(isr_autovec)
447	INTERRUPT_RESTORE
448	jra	_ASM_LABEL(rei)
449
450/* clock: see clock.c */
451#ifdef __ELF__
452	.align	4
453#else
454	.align	2
455#endif
456GLOBAL(_isr_clock)
457	INTERRUPT_SAVEREG
458	jbsr	_C_LABEL(clock_intr)
459	INTERRUPT_RESTORE
460	jra	_ASM_LABEL(rei)
461
462| Handler for all vectored interrupts (i.e. VME interrupts)
463#ifdef __ELF__
464	.align	4
465#else
466	.align	2
467#endif
468GLOBAL(_isr_vectored)
469	INTERRUPT_SAVEREG
470	jbsr	_C_LABEL(isr_vectored)
471	INTERRUPT_RESTORE
472	jra	_ASM_LABEL(rei)
473
474#undef	INTERRUPT_SAVEREG
475#undef	INTERRUPT_RESTORE
476
477/* interrupt counters (needed by vmstat) */
478GLOBAL(intrnames)
479	.asciz	"spur"	| 0
480	.asciz	"lev1"	| 1
481	.asciz	"lev2"	| 2
482	.asciz	"lev3"	| 3
483	.asciz	"lev4"	| 4
484	.asciz	"clock"	| 5
485	.asciz	"lev6"	| 6
486	.asciz	"nmi"	| 7
487GLOBAL(eintrnames)
488
489	.data
490	.even
491GLOBAL(intrcnt)
492	.long	0,0,0,0,0,0,0,0,0,0
493GLOBAL(eintrcnt)
494	.text
495
496/*
497 * Emulation of VAX REI instruction.
498 *
499 * This code is (mostly) un-altered from the hp300 code,
500 * except that sun machines do not need a simulated SIR
501 * because they have a real software interrupt register.
502 *
503 * This code deals with checking for and servicing ASTs
504 * (profiling, scheduling) and software interrupts (network, softclock).
505 * We check for ASTs first, just like the VAX.  To avoid excess overhead
506 * the T_ASTFLT handling code will also check for software interrupts so we
507 * do not have to do it here.  After identifying that we need an AST we
508 * drop the IPL to allow device interrupts.
509 *
510 * This code is complicated by the fact that sendsig may have been called
511 * necessitating a stack cleanup.
512 */
513
514ASGLOBAL(rei)
515#ifdef	DIAGNOSTIC
516	tstl	_C_LABEL(panicstr)	| have we paniced?
517	jne	Ldorte			| yes, do not make matters worse
518#endif
519	tstl	_C_LABEL(astpending)	| AST pending?
520	jeq	Ldorte			| no, done
521Lrei1:
522	btst	#5,%sp@			| yes, are we returning to user mode?
523	jne	Ldorte			| no, done
524	movw	#PSL_LOWIPL,%sr		| lower SPL
525	clrl	%sp@-			| stack adjust
526	moveml	#0xFFFF,%sp@-		| save all registers
527	movl	%usp,%a1		| including
528	movl	%a1,%sp@(FR_SP)		|    the users SP
529	clrl	%sp@-			| VA == none
530	clrl	%sp@-			| code == none
531	movl	#T_ASTFLT,%sp@-		| type == async system trap
532	jbsr	_C_LABEL(trap)		| go handle it
533	lea	%sp@(12),%sp		| pop value args
534	movl	%sp@(FR_SP),%a0		| restore user SP
535	movl	%a0,%usp		|   from save area
536	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
537	jne	Laststkadj		| yes, go to it
538	moveml	%sp@+,#0x7FFF		| no, restore most user regs
539	addql	#8,%sp			| toss SP and stack adjust
540	rte				| and do real RTE
541Laststkadj:
542	lea	%sp@(FR_HW),%a1		| pointer to HW frame
543	addql	#8,%a1			| source pointer
544	movl	%a1,%a0			| source
545	addw	%d0,%a0			|  + hole size = dest pointer
546	movl	%a1@-,%a0@-		| copy
547	movl	%a1@-,%a0@-		|  8 bytes
548	movl	%a0,%sp@(FR_SP)		| new SSP
549	moveml	%sp@+,#0x7FFF		| restore user registers
550	movl	%sp@,%sp		| and our SP
551Ldorte:
552	rte				| real return
553
554/*
555 * Initialization is at the beginning of this file, because the
556 * kernel entry point needs to be at zero for compatibility with
557 * the Sun boot loader.  This works on Sun machines because the
558 * interrupt vector table for reset is NOT at address zero.
559 * (The MMU has a "boot" bit that forces access to the PROM)
560 */
561
562/*
563 * Use common m68k sigcode.
564 */
565#include <m68k/m68k/sigcode.s>
566#ifdef COMPAT_SUNOS
567#include <m68k/m68k/sunos_sigcode.s>
568#endif
569#ifdef COMPAT_SVR4
570#include <m68k/m68k/svr4_sigcode.s>
571#endif
572
573	.text
574
575/*
576 * Primitives
577 */
578
579/*
580 * Use common m68k support routines.
581 */
582#include <m68k/m68k/support.s>
583
584BSS(want_resched,4)
585
586/*
587 * Use common m68k process manipulation routines.
588 */
589#include <m68k/m68k/proc_subr.s>
590
591| Message for Lbadsw panic
592Lsw0:
593	.asciz	"cpu_switch"
594	.even
595
596	.data
597GLOBAL(masterpaddr)		| XXX compatibility (debuggers)
598GLOBAL(curpcb)
599	.long	0
600ASBSS(nullpcb,SIZEOF_PCB)
601	.text
602
603/*
604 * At exit of a process, do a cpu_switch for the last time.
605 * Switch to a safe stack and PCB, and select a new process to run.  The
606 * old stack and u-area will be freed by the reaper.
607 */
608ENTRY(switch_exit)
609	movl	%sp@(4),%a0		| struct proc *p
610					| save state into garbage pcb
611	movl	#_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
612	lea	_ASM_LABEL(tmpstk),%sp	| goto a tmp stack
613
614	/* Schedule the vmspace and stack to be freed. */
615	movl	%a0,%sp@-		| exit2(p)
616	jbsr	_C_LABEL(exit2)
617	lea	%sp@(4),%sp		| pop args
618
619#if defined(LOCKDEBUG)
620	/* Acquire sched_lock */
621	jbsr	_C_LABEL(sched_lock_idle)
622#endif
623
624	jra	_C_LABEL(cpu_switch)
625
626/*
627 * When no processes are on the runq, cpu_switch() branches to idle
628 * to wait for something to come ready.
629 */
630Lidle:
631#if defined(LOCKDEBUG)
632	/* Release sched_lock */
633	jbsr	_C_LABEL(sched_unlock_idle)
634#endif
635	stop	#PSL_LOWIPL
636GLOBAL(_Idle)				| See clock.c
637	movw	#PSL_HIGHIPL,%sr
638#if defined(LOCKDEBUG)
639	/* Acquire sched_lock */
640	jbsr	_C_LABEL(sched_lock_idle)
641#endif
642	movl	_C_LABEL(sched_whichqs),%d0
643	jeq	Lidle
644	jra	Lsw1
645
646Lbadsw:
647	movl	#Lsw0,%sp@-
648	jbsr	_C_LABEL(panic)
649	/*NOTREACHED*/
650
651/*
652 * cpu_switch()
653 * Hacked for sun3
654 */
655ENTRY(cpu_switch)
656	movl	_C_LABEL(curpcb),%a0	| current pcb
657	movw	%sr,%a0@(PCB_PS)	| save sr before changing ipl
658#ifdef notyet
659	movl	_C_LABEL(curproc),%sp@-	| remember last proc running
660#endif
661	clrl	_C_LABEL(curproc)
662
663	/*
664	 * Find the highest-priority queue that isn't empty,
665	 * then take the first proc from that queue.
666	 */
667	movl	_C_LABEL(sched_whichqs),%d0
668	jeq	Lidle
669Lsw1:
670	/*
671	 * Interrupts are blocked, sched_lock is held.  If
672	 * we come here via Idle, %d0 contains the contents
673	 * of a non-zero sched_whichqs.
674	 */
675	movl	%d0,%d1
676	negl	%d0
677	andl	%d1,%d0
678	bfffo	%d0{#0:#32},%d1
679	eorib	#31,%d1
680
681	movl	%d1,%d0
682	lslb	#3,%d1			| convert queue number to index
683	addl	#_C_LABEL(sched_qs),%d1	| locate queue (q)
684	movl	%d1,%a1
685	movl	%a1@(P_FORW),%a0	| p = q->p_forw
686	cmpal	%d1,%a0			| anyone on queue?
687	jeq	Lbadsw			| no, panic
688#ifdef DIAGNOSTIC
689	tstl	%a0@(P_WCHAN)
690	jne	Lbadsw
691	cmpb	#SRUN,%a0@(P_STAT)
692	jne	Lbadsw
693#endif
694	movl	%a0@(P_FORW),%a1@(P_FORW)	| q->p_forw = p->p_forw
695	movl	%a0@(P_FORW),%a1		| n = p->p_forw
696	movl	%a0@(P_BACK),%a1@(P_BACK)	| n->p_back = q
697	cmpal	%d1,%a1			| anyone left on queue?
698	jne	Lsw2			| yes, skip
699	movl	_C_LABEL(sched_whichqs),%d1
700	bclr	%d0,%d1			| no, clear bit
701	movl	%d1,_C_LABEL(sched_whichqs)
702Lsw2:
703	/* p->p_cpu initialized in fork1() for single-processor */
704	movb	#SONPROC,%a0@(P_STAT)	| p->p_stat = SONPROC
705	movl	%a0,_C_LABEL(curproc)
706	clrl	_C_LABEL(want_resched)
707#ifdef notyet
708	movl	%sp@+,%a1		| XXX - Make this work!
709	cmpl	%a0,%a1			| switching to same proc?
710	jeq	Lswdone			| yes, skip save and restore
711#endif
712	/*
713	 * Save state of previous process in its pcb.
714	 */
715	movl	_C_LABEL(curpcb),%a1
716	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
717	movl	%usp,%a2		| grab USP (a2 has been saved)
718	movl	%a2,%a1@(PCB_USP)	| and save it
719
720	tstl	_C_LABEL(fputype)	| Do we have an fpu?
721	jeq	Lswnofpsave		| No?  Then don't try save.
722	lea	%a1@(PCB_FPCTX),%a2	| pointer to FP save area
723	fsave	%a2@			| save FP state
724	tstb	%a2@			| null state frame?
725	jeq	Lswnofpsave		| yes, all done
726	fmovem	%fp0-%fp7,%a2@(FPF_REGS)	| save FP general regs
727	fmovem	%fpcr/%fpsr/%fpi,%a2@(FPF_FPCR)	| save FP control regs
728Lswnofpsave:
729
730	/*
731	 * Now that we have saved all the registers that must be
732	 * preserved, we are free to use those registers until
733	 * we load the registers for the switched-to process.
734	 * In this section, keep:  %a0=curproc, %a1=curpcb
735	 */
736
737	clrl	%a0@(P_BACK)		| clear back link
738	movl	%a0@(P_ADDR),%a1	| get p_addr
739	movl	%a1,_C_LABEL(curpcb)
740
741#if defined(LOCKDEBUG)
742	/*
743	 * Done mucking with the run queues, release the
744	 * scheduler lock, but keep interrupts out.
745	 */
746	movl	%a0,%sp@-		| not args...
747	movl	%a1,%sp@-		| ...just saving
748	jbsr	_C_LABEL(sched_unlock_idle)
749	movl	%sp@+,%a1
750	movl	%sp@+,%a0
751#endif
752
753	/*
754	 * Load the new VM context (new MMU root pointer)
755	 */
756	movl	%a0@(P_VMSPACE),%a2	| vm = p->p_vmspace
757#ifdef DIAGNOSTIC
758	tstl	%a2			| vm == VM_MAP_NULL?
759	jeq	Lbadsw			| panic
760#endif
761#if 1	/* XXX: PMAP_DEBUG */
762	/*
763	 * Just call _pmap_switch() for now.  Later on,
764	 * use the in-line version below (for speed).
765	 */
766	movl	%a2@(VM_PMAP),%a2 	| pmap = vm->vm_map.pmap
767	pea	%a2@			| push pmap
768	jbsr	_C_LABEL(_pmap_switch)	| _pmap_switch(pmap)
769	addql	#4,%sp
770	movl	_C_LABEL(curpcb),%a1	| restore p_addr
771| Note: pmap_switch will clear the cache if needed.
772#else
773	/* XXX - Later, use this unfinished inline.. */
774	XXX	XXX	(PM_CTXNUM)
775	movl	#IC_CLEAR,%d0
776	movc	%d0,%cacr		| invalidate cache(s)
777#endif
778
779	/*
780	 * Reload the registers for the new process.
781	 * After this point we can only use %d0,%d1,%a0,%a1
782	 */
783	moveml	%a1@(PCB_REGS),#0xFCFC	| reload registers
784	movl	%a1@(PCB_USP),%a0
785	movl	%a0,%usp		| and USP
786
787	tstl	_C_LABEL(fputype)	| If we don't have an fpu,
788	jeq	Lres_skip		|  don't try to restore it.
789	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
790	tstb	%a0@			| null state frame?
791	jeq	Lresfprest		| yes, easy
792	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi	| restore FP control regs
793	fmovem	%a0@(FPF_REGS),%fp0-%fp7	| restore FP general regs
794Lresfprest:
795	frestore %a0@			| restore state
796Lres_skip:
797	movw	%a1@(PCB_PS),%d0	| no, restore PS
798#ifdef DIAGNOSTIC
799	btst	#13,%d0			| supervisor mode?
800	jeq	Lbadsw			| no? panic!
801#endif
802	movw	%d0,%sr			| OK, restore PS
803	moveq	#1,%d0			| return 1 (for alternate returns)
804	movl	%d0,%a0
805	rts
806
807/*
808 * savectx(pcb)
809 * Update pcb, saving current processor state.
810 */
811ENTRY(savectx)
812	movl	%sp@(4),%a1
813	movw	%sr,%a1@(PCB_PS)
814	movl	%usp,%a0		| grab USP
815	movl	%a0,%a1@(PCB_USP)	| and save it
816	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
817
818	tstl	_C_LABEL(fputype)	| Do we have FPU?
819	jeq	Lsavedone		| No?  Then don't save state.
820	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
821	fsave	%a0@			| save FP state
822	tstb	%a0@			| null state frame?
823	jeq	Lsavedone		| yes, all done
824	fmovem	%fp0-%fp7,%a0@(FPF_REGS)	| save FP general regs
825	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR)	| save FP control regs
826Lsavedone:
827	moveq	#0,%d0			| return 0
828	movl	%d0,%a0
829	rts
830
831/* suline() */
832/* TBIA, TBIS, TBIAS, TBIAU */
833
834/*
835 * Invalidate instruction cache
836 */
837ENTRY(ICIA)
838	movl	#IC_CLEAR,%d0
839	movc	%d0,%cacr		| invalidate i-cache
840	rts
841
842/* DCIA, DCIS */
843
844/*
845 * Invalidate data cache.
846 */
847ENTRY(DCIU)
848	rts
849
850/* ICPL, ICPP, DCPL, DCPP, DCPA, DCFL, DCFP */
851/* PCIA, ecacheon, ecacheoff */
852
853/*
854 * Get callers current SP value.
855 * Note that simply taking the address of a local variable in a C function
856 * doesn't work because callee saved registers may be outside the stack frame
857 * defined by A6 (e.g. GCC generated code).
858 *
859 * [I don't think the ENTRY() macro will do the right thing with this -- glass]
860 */
861GLOBAL(getsp)
862	movl	%sp,%d0			| get current SP
863	addql	#4,%d0			| compensate for return address
864	movl	%d0,%a0
865	rts
866
867ENTRY(getsfc)
868	movc	%sfc,%d0
869	movl	%d0,%a0
870	rts
871
872ENTRY(getdfc)
873	movc	%dfc,%d0
874	movl	%d0,%a0
875	rts
876
877ENTRY(getvbr)
878	movc	%vbr,%a0
879	rts
880
881ENTRY(setvbr)
882	movl	%sp@(4),%d0
883	movc	%d0,%vbr
884	rts
885
886/* loadustp, ptest_addr */
887
888/*
889 * Set processor priority level calls.  Most are implemented with
890 * inline asm expansions.  However, we need one instantiation here
891 * in case some non-optimized code makes external references.
892 * Most places will use the inlined functions param.h supplies.
893 */
894
895ENTRY(_getsr)
896	clrl	%d0
897	movw	%sr,%d0
898	movl	%d0,%a0
899	rts
900
901ENTRY(_spl)
902	clrl	%d0
903	movw	%sr,%d0
904	movl	%sp@(4),%d1
905	movw	%d1,%sr
906	rts
907
908ENTRY(_splraise)
909	clrl	%d0
910	movw	%sr,%d0
911	movl	%d0,%d1
912	andl	#PSL_HIGHIPL,%d1 	| old &= PSL_HIGHIPL
913	cmpl	%sp@(4),%d1		| (old - new)
914	bge	Lsplr
915	movl	%sp@(4),%d1
916	movw	%d1,%sr
917Lsplr:
918	rts
919
920/*
921 * Save and restore 68881 state.
922 */
923ENTRY(m68881_save)
924	movl	%sp@(4),%a0		| save area pointer
925	fsave	%a0@			| save state
926	tstb	%a0@			| null state frame?
927	jeq	Lm68881sdone		| yes, all done
928	fmovem	%fp0-%fp7,%a0@(FPF_REGS)	| save FP general regs
929	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR)	| save FP control regs
930Lm68881sdone:
931	rts
932
933ENTRY(m68881_restore)
934	movl	%sp@(4),%a0		| save area pointer
935	tstb	%a0@			| null state frame?
936	jeq	Lm68881rdone		| yes, easy
937	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi	| restore FP control regs
938	fmovem	%a0@(FPF_REGS),%fp0-%fp7	| restore FP general regs
939Lm68881rdone:
940	frestore %a0@			| restore state
941	rts
942
943/*
944 * _delay(unsigned N)
945 * Delay for at least (N/256) microseconds.
946 * This routine depends on the variable:  delay_divisor
947 * which should be set based on the CPU clock rate.
948 * XXX: Currently this is set based on the CPU model,
949 * XXX: but this should be determined at run time...
950 */
951GLOBAL(_delay)
952	| %d0 = arg = (usecs << 8)
953	movl	%sp@(4),%d0
954	| %d1 = delay_divisor;
955	movl	_C_LABEL(delay_divisor),%d1
956	jra	L_delay			/* Jump into the loop! */
957
958	/*
959	 * Align the branch target of the loop to a half-line (8-byte)
960	 * boundary to minimize cache effects.  This guarantees both
961	 * that there will be no prefetch stalls due to cache line burst
962	 * operations and that the loop will run from a single cache
963	 * half-line.
964	 */
965#ifdef __ELF__
966	.align	8
967#else
968	.align	3
969#endif
970L_delay:
971	subl	%d1,%d0
972	jgt	L_delay
973	rts
974
975/*
976 * void set_segmap_allctx(vaddr_t va, int sme)
977 */
978ENTRY(set_segmap_allctx)
979	linkw	%fp,#0
980	moveml	#0x3000,%sp@-
981	movl	8(%fp),%d3		| d3 = va
982	andl	#0xffffffc,%d3
983	bset	#29,%d3
984	movl	%d3,%a1			| a1 = ctrladdr, d3 avail
985	movl	12(%fp),%d1		| d1 = sme
986	moveq	#FC_CONTROL,%d0
987	movl	#CONTEXT_REG,%a0	| a0 = ctxreg
988	movc	%sfc,%d3		| d3 = oldsfc
989	movc	%d0,%sfc
990	movsb	%a0@,%d2
991	andi	#7,%d2			| d2 = oldctx
992	movc	%d3,%sfc		| restore sfc, d3 avail
993	movc	%dfc,%d3		| d3 = olddfc
994	movc	%d0,%dfc
995	movl	#(CONTEXT_NUM - 1),%d0	| d0 = ctx number
9961:
997	movsb	%d0,%a0@		| change to ctx
998	movsb	%d1,%a1@		| set segmap
999	dbf	%d0,1b			| loop setting each ctx
1000	movsb	%d2,%a0@		| restore ctx
1001	movc	%d3,%dfc		| restore dfc
1002	moveml	%sp@+,#0x000c
1003	unlk	%fp
1004	rts
1005
1006| Define some addresses, mostly so DDB can print useful info.
1007| Not using _C_LABEL() here because these symbols are never
1008| referenced by any C code, and if the leading underscore
1009| ever goes away, these lines turn into syntax errors...
1010	.set	_KERNBASE,KERNBASE
1011	.set	_MONSTART,SUN3_MONSTART
1012	.set	_PROM_BASE,SUN3_PROM_BASE
1013	.set	_MONEND,SUN3_MONEND
1014
1015|The end!
1016