xref: /netbsd/sys/arch/hp300/hp300/locore.s (revision c4a72b64)
1/*	$NetBSD: locore.s,v 1.125 2002/11/02 20:03:05 chs Exp $	*/
2
3/*
4 * Copyright (c) 1994, 1995 Gordon W. Ross
5 * Copyright (c) 1988 University of Utah.
6 * Copyright (c) 1980, 1990, 1993
7 *	The Regents of the University of California.  All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *	This product includes software developed by the University of
24 *	California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * from: Utah $Hdr: locore.s 1.66 92/12/22$
42 *
43 *	@(#)locore.s	8.6 (Berkeley) 5/27/94
44 */
45
46#include "opt_compat_netbsd.h"
47#include "opt_compat_svr4.h"
48#include "opt_compat_sunos.h"
49#include "opt_ddb.h"
50#include "opt_fpsp.h"
51#include "opt_kgdb.h"
52#include "opt_lockdebug.h"
53
54#include "assym.h"
55#include <machine/asm.h>
56#include <machine/trap.h>
57
58#include "opt_useleds.h"
59#ifdef USELEDS
60#include <hp300/hp300/leds.h>
61#endif
62
63#define MMUADDR(ar)	movl	_C_LABEL(MMUbase),ar
64#define CLKADDR(ar)	movl	_C_LABEL(CLKbase),ar
65
66/*
67 * This is for kvm_mkdb, and should be the address of the beginning
68 * of the kernel text segment (not necessarily the same as kernbase).
69 */
70	.text
71GLOBAL(kernel_text)
72
73/*
74 * Clear and skip the first page of text; it will not be mapped at
75 * VA 0.
76 *
77 * The bootloader places the bootinfo in this page, and we allocate
78 * a VA for it and map it in pmap_bootstrap().
79 */
80	.fill	NBPG/4,4,0
81
82/*
83 * Temporary stack for a variety of purposes.
84 * Try and make this the first thing is the data segment so it
85 * is page aligned.  Note that if we overflow here, we run into
86 * our text segment.
87 */
88	.data
89	.space	NBPG
90ASLOCAL(tmpstk)
91
92#include <hp300/hp300/vectors.s>
93
94/*
95 * Macro to relocate a symbol, used before MMU is enabled.
96 */
97#ifdef __STDC__
98#define	IMMEDIATE		#
99#define	_RELOC(var, ar)			\
100	movel	IMMEDIATE var,ar;	\
101	addl	%a5,ar
102#else
103#define	_RELOC(var, ar)			\
104	movel	#var,ar;		\
105	addl	%a5,ar
106#endif /* __STDC__ */
107
108#define	RELOC(var, ar)		_RELOC(_C_LABEL(var), ar)
109#define	ASRELOC(var, ar)	_RELOC(_ASM_LABEL(var), ar)
110
111/*
112 * Final bits of grunt work required to reboot the system.  The MMU
113 * must be disabled when this is invoked.
114 */
115#define DOREBOOT						\
116	/* Reset Vector Base Register to what PROM expects. */  \
117	movl	#0,%d0;						\
118	movc	%d0,%vbr;					\
119	/* Jump to REQ_REBOOT */				\
120	jmp	0x1A4;
121
122/*
123 * Initialization
124 *
125 * A4 contains the address of the end of the symtab
126 * A5 contains physical load point from boot
127 * VBR contains zero from ROM.  Exceptions will continue to vector
128 * through ROM until MMU is turned on at which time they will vector
129 * through our table (vectors.s).
130 */
131
132BSS(lowram,4)
133BSS(esym,4)
134
135ASENTRY_NOPROFILE(start)
136	movw	#PSL_HIGHIPL,%sr	| no interrupts
137	ASRELOC(tmpstk, %a0)
138	movl	%a0,%sp			| give ourselves a temporary stack
139	RELOC(esym, %a0)
140#if 1
141	movl	%a4,%a0@		| store end of symbol table
142#else
143	clrl	%a0@			| no symbol table, yet
144#endif
145	RELOC(lowram, %a0)
146	movl	%a5,%a0@		| store start of physical memory
147	movl	#CACHE_OFF,%d0
148	movc	%d0,%cacr		| clear and disable on-chip cache(s)
149
150/* check for internal HP-IB in SYSFLAG */
151	btst	#5,0xfffffed2		| internal HP-IB?
152	jeq	Lhaveihpib		| yes, have HP-IB just continue
153	RELOC(internalhpib, %a0)
154	movl	#0,%a0@			| no, clear associated address
155Lhaveihpib:
156
157	RELOC(boothowto, %a0)		| save reboot flags
158	movl	%d7,%a0@
159	RELOC(bootdev, %a0)		|   and boot device
160	movl	%d6,%a0@
161
162	/*
163	 * All data registers are now free.  All address registers
164	 * except %a5 are free.  %a5 is used by the RELOC() macro,
165	 * and cannot be used until after the MMU is enabled.
166	 */
167
168/* determine our CPU/MMU combo - check for all regardless of kernel config */
169	movl	#INTIOBASE+MMUBASE,%a1
170	movl	#0x200,%d0		| data freeze bit
171	movc	%d0,%cacr		|   only exists on 68030
172	movc	%cacr,%d0		| read it back
173	tstl	%d0			| zero?
174	jeq	Lnot68030		| yes, we have 68020/68040
175
176	/*
177	 * 68030 models
178	 */
179
180	RELOC(mmutype, %a0)		| no, we have 68030
181	movl	#MMU_68030,%a0@		| set to reflect 68030 PMMU
182	RELOC(cputype, %a0)
183	movl	#CPU_68030,%a0@		| and 68030 CPU
184	RELOC(machineid, %a0)
185	movl	#0x80,%a1@(MMUCMD)	| set magic cookie
186	movl	%a1@(MMUCMD),%d0	| read it back
187	btst	#7,%d0			| cookie still on?
188	jeq	Lnot370			| no, 360 or 375
189	movl	#0,%a1@(MMUCMD)		| clear magic cookie
190	movl	%a1@(MMUCMD),%d0	| read it back
191	btst	#7,%d0			| still on?
192	jeq	Lisa370			| no, must be a 370
193	movl	#HP_340,%a0@		| yes, must be a 340
194	jra	Lstart1
195Lnot370:
196	movl	#HP_360,%a0@		| type is at least a 360
197	movl	#0,%a1@(MMUCMD)		| clear magic cookie2
198	movl	%a1@(MMUCMD),%d0	| read it back
199	btst	#16,%d0			| still on?
200	jeq	Lstart1			| no, must be a 360
201	RELOC(mmuid, %a0)		| save MMU ID
202	lsrl	#MMUID_SHIFT,%d0
203	andl	#MMUID_MASK,%d0
204	movl	%d0,%a0@
205	RELOC(machineid, %a0)
206	cmpb	#MMUID_345,%d0		| are we a 345?
207	beq	Lisa345
208	cmpb	#MMUID_375,%d0		| how about a 375?
209	beq	Lisa375
210	movl	#HP_400,%a0@		| must be a 400
211	jra	Lhaspac
212Lisa345:
213	movl	#HP_345,%a0@
214	jra	Lhaspac
215Lisa375:
216	movl	#HP_375,%a0@
217	jra	Lhaspac
218Lisa370:
219	movl	#HP_370,%a0@		| set to 370
220Lhaspac:
221	RELOC(ectype, %a0)
222	movl	#EC_PHYS,%a0@		| also has a physical address cache
223	jra	Lstart1
224
225	/*
226	 * End of 68030 section
227	 */
228
229Lnot68030:
230	bset	#31,%d0			| data cache enable bit
231	movc	%d0,%cacr		|   only exists on 68040
232	movc	%cacr,%d0		| read it back
233	tstl	%d0			| zero?
234	beq	Lis68020		| yes, we have 68020
235	moveq	#0,%d0			| now turn it back off
236	movec	%d0,%cacr		|   before we access any data
237
238	/*
239	 * 68040 models
240	 */
241
242	RELOC(mmutype, %a0)
243	movl	#MMU_68040,%a0@		| with a 68040 MMU
244	RELOC(cputype, %a0)
245	movl	#CPU_68040,%a0@		| and a 68040 CPU
246	RELOC(fputype, %a0)
247	movl	#FPU_68040,%a0@		| ...and FPU
248	RELOC(ectype, %a0)
249	movl	#EC_NONE,%a0@		| and no cache (for now XXX)
250	RELOC(mmuid,%a0)		| save MMU ID
251	movl	%a1@(MMUCMD),%d0
252	lsrl	#MMUID_SHIFT,%d0
253	andl	#MMUID_MASK,%d0
254	movl	%d0,%a0@
255	RELOC(machineid, %a0)
256	cmpb	#MMUID_425_T,%d0	| are we a 425t?
257	jeq	Lisa425
258	cmpb	#MMUID_425_S,%d0	| how about 425s?
259	jeq	Lisa425
260	cmpb	#MMUID_425_E,%d0	| or maybe a 425e?
261	jeq	Lisa425
262	cmpb	#MMUID_433_T,%d0	| or a 433t?
263	jeq	Lisa433
264	cmpb	#MMUID_433_S,%d0	| or a 433s?
265	jeq	Lisa433
266	cmpb	#MMUID_385,%d0		| or a 385?
267	jeq	Lisa385
268	movl	#HP_380,%a0@		| guess we're a 380
269	jra	Lstart1
270Lisa425:
271	movl	#HP_425,%a0@
272	jra	Lstart1
273Lisa433:
274	movl	#HP_433,%a0@
275	jra	Lstart1
276Lisa385:
277	movl	#HP_385,%a0@
278	jra	Lstart1
279
280	/*
281	 * End of 68040 section
282	 */
283
284	/*
285	 * 68020 models
286	 */
287
288Lis68020:
289	RELOC(fputype, %a0)		| all of the 68020 systems
290	movl	#FPU_68881,%a0@		|   have a 68881 FPU
291	movl	#1,%a1@(MMUCMD)		| a 68020, write HP MMU location
292	movl	%a1@(MMUCMD),%d0	| read it back
293	btst	#0,%d0			| non-zero?
294	jne	Lishpmmu		| yes, we have HP MMU
295	RELOC(mmutype, %a0)
296	movl	#MMU_68851,%a0@		| no, we have PMMU
297	RELOC(machineid, %a0)
298	movl	#HP_330,%a0@		| and 330 CPU
299	jra	Lstart1
300Lishpmmu:
301	RELOC(ectype, %a0)		| 320 or 350
302	movl	#EC_VIRT,%a0@		| both have a virtual address cache
303	movl	#0x80,%a1@(MMUCMD)	| set magic cookie
304	movl	%a1@(MMUCMD),%d0	| read it back
305	btst	#7,%d0			| cookie still on?
306	jeq	Lis320			| no, just a 320
307	RELOC(machineid, %a0)
308	movl	#HP_350,%a0@		| yes, a 350
309	jra	Lstart1
310Lis320:
311	RELOC(machineid, %a0)
312	movl	#HP_320,%a0@
313
314	/*
315	 * End of 68020 section
316	 */
317
318Lstart1:
319	/*
320	 * Now that we know what CPU we have, initialize the address error
321	 * and bus error handlers in the vector table:
322	 *
323	 *	vectab+8	bus error
324	 *	vectab+12	address error
325	 */
326	RELOC(cputype, %a0)
327#if 0
328	/* XXX assembler/linker feature/bug */
329	RELOC(vectab, %a2)
330#else
331	movl	#_C_LABEL(vectab),%a2
332	addl	%a5,%a2
333#endif
334#if defined(M68040)
335	cmpl	#CPU_68040,%a0@		| 68040?
336	jne	1f			| no, skip
337	movl	#_C_LABEL(buserr40),%a2@(8)
338	movl	#_C_LABEL(addrerr4060),%a2@(12)
339	jra	Lstart2
3401:
341#endif
342#if defined(M68020) || defined(M68030)
343	cmpl	#CPU_68040,%a0@		| 68040?
344	jeq	1f			| yes, skip
345	movl	#_C_LABEL(busaddrerr2030),%a2@(8)
346	movl	#_C_LABEL(busaddrerr2030),%a2@(12)
347	jra	Lstart2
3481:
349#endif
350	/* Config botch; no hope. */
351	DOREBOOT
352
353Lstart2:
354	movl	#0,%a1@(MMUCMD)		| clear out MMU again
355/* initialize source/destination control registers for movs */
356	moveq	#FC_USERD,%d0		| user space
357	movc	%d0,%sfc		|   as source
358	movc	%d0,%dfc		|   and destination of transfers
359/* initialize memory sizes (for pmap_bootstrap) */
360	movl	#MAXADDR,%d1		| last page
361	moveq	#PGSHIFT,%d2
362	lsrl	%d2,%d1			| convert to page (click) number
363	RELOC(maxmem, %a0)
364	movl	%d1,%a0@		| save as maxmem
365	movl	%a5,%d0			| lowram value from ROM via boot
366	lsrl	%d2,%d0			| convert to page number
367	subl	%d0,%d1			| compute amount of RAM present
368	RELOC(physmem, %a0)
369	movl	%d1,%a0@		| and physmem
370
371/* configure kernel and proc0 VA space so we can get going */
372#ifdef DDB
373	RELOC(esym,%a0)			| end of static kernel test/data/syms
374	movl	%a0@,%d5
375	jne	Lstart3
376#endif
377	movl	#_C_LABEL(end),%d5	| end of static kernel text/data
378Lstart3:
379	addl	#NBPG-1,%d5
380	andl	#PG_FRAME,%d5		| round to a page
381	movl	%d5,%a4
382	addl	%a5,%a4			| convert to PA
383	pea	%a5@			| firstpa
384	pea	%a4@			| nextpa
385	RELOC(pmap_bootstrap,%a0)
386	jbsr	%a0@			| pmap_bootstrap(firstpa, nextpa)
387	addql	#8,%sp
388
389/*
390 * Prepare to enable MMU.
391 * Since the kernel is not mapped logical == physical we must insure
392 * that when the MMU is turned on, all prefetched addresses (including
393 * the PC) are valid.  In order guarentee that, we use the last physical
394 * page (which is conveniently mapped == VA) and load it up with enough
395 * code to defeat the prefetch, then we execute the jump back to here.
396 *
397 * Is this all really necessary, or am I paranoid??
398 */
399	RELOC(Sysseg, %a0)		| system segment table addr
400	movl	%a0@,%d1		| read value (a KVA)
401	addl	%a5,%d1			| convert to PA
402	RELOC(mmutype, %a0)
403	tstl	%a0@			| HP MMU?
404	jeq	Lhpmmu2			| yes, skip
405	cmpl	#MMU_68040,%a0@		| 68040?
406	jne	Lmotommu1		| no, skip
407	.long	0x4e7b1807		| movc %d1,%srp
408	jra	Lstploaddone
409Lmotommu1:
410	RELOC(protorp, %a0)
411	movl	#0x80000202,%a0@	| nolimit + share global + 4 byte PTEs
412	movl	%d1,%a0@(4)		| + segtable address
413	pmove	%a0@,%srp		| load the supervisor root pointer
414	movl	#0x80000002,%a0@	| reinit upper half for CRP loads
415	jra	Lstploaddone		| done
416Lhpmmu2:
417	moveq	#PGSHIFT,%d2
418	lsrl	%d2,%d1			| convert to page frame
419	movl	%d1,INTIOBASE+MMUBASE+MMUSSTP | load in sysseg table register
420Lstploaddone:
421	lea	MAXADDR,%a2		| PA of last RAM page
422#if 0
423	ASRELOC(Lhighcode, %a1)		| addr of high code
424	ASRELOC(Lehighcode, %a3)	| end addr
425#else
426	/* don't want pc-relative addressing */
427	.word	0x43f9			| lea Lhighcode, %a1
428	.long	Lhighcode
429	addl	%a5, %a1
430	.word	0x47f9			| lea Lehighcode, %a3
431	.long	Lehighcode
432	addl	%a5, %a3
433#endif
434Lcodecopy:
435	movw	%a1@+,%a2@+		| copy a word
436	cmpl	%a3,%a1			| done yet?
437	jcs	Lcodecopy		| no, keep going
438	jmp	MAXADDR			| go for it!
439
440	/*
441	 * BEGIN MMU TRAMPOLINE.  This section of code is not
442	 * executed in-place.  It's copied to the last page
443	 * of RAM (mapped va == pa) and executed there.
444	 */
445
446Lhighcode:
447	/*
448	 * Set up the vector table, and race to get the MMU
449	 * enabled.
450	 */
451	movl	#_C_LABEL(vectab),%d0	| set Vector Base Register
452	movc	%d0,%vbr
453
454	RELOC(mmutype, %a0)
455	tstl	%a0@			| HP MMU?
456	jeq	Lhpmmu3			| yes, skip
457	cmpl	#MMU_68040,%a0@		| 68040?
458	jne	Lmotommu2		| no, skip
459	movw	#0,INTIOBASE+MMUBASE+MMUCMD+2
460	movw	#MMU_IEN+MMU_CEN+MMU_FPE,INTIOBASE+MMUBASE+MMUCMD+2
461					| enable FPU and caches
462	moveq	#0,%d0			| ensure TT regs are disabled
463	.long	0x4e7b0004		| movc %d0,%itt0
464	.long	0x4e7b0005		| movc %d0,%itt1
465	.long	0x4e7b0006		| movc %d0,%dtt0
466	.long	0x4e7b0007		| movc %d0,%dtt1
467	.word	0xf4d8			| cinva bc
468	.word	0xf518			| pflusha
469	movl	#0x8000,%d0
470	.long	0x4e7b0003		| movc %d0,%tc
471	movl	#0x80008000,%d0
472	movc	%d0,%cacr		| turn on both caches
473	jmp	Lenab1:l		| forced not be pc-relative
474Lmotommu2:
475	movl	#MMU_IEN+MMU_FPE,INTIOBASE+MMUBASE+MMUCMD
476					| enable 68881 and i-cache
477	RELOC(prototc, %a2)
478	movl	#0x82c0aa00,%a2@	| value to load TC with
479	pmove	%a2@,%tc		| load it
480	jmp	Lenab1:l		| forced not be pc-relative
481Lhpmmu3:
482	movl	#0,INTIOBASE+MMUBASE+MMUCMD		| clear external cache
483	movl	#MMU_ENAB,INTIOBASE+MMUBASE+MMUCMD	| turn on MMU
484	jmp	Lenab1:l		| forced not be pc-relative
485Lehighcode:
486
487	/*
488	 * END MMU TRAMPOLINE.  Address register %a5 is now free.
489	 */
490
491/*
492 * Should be running mapped from this point on
493 */
494Lenab1:
495/* select the software page size now */
496	lea	_ASM_LABEL(tmpstk),%sp		| temporary stack
497	jbsr	_C_LABEL(uvm_setpagesize)  	| select software page size
498/* set kernel stack, user SP, and initial pcb */
499	movl	_C_LABEL(proc0paddr),%a1	| get proc0 pcb addr
500	lea	%a1@(USPACE-4),%sp	| set kernel stack to end of area
501	lea	_C_LABEL(proc0),%a2	| initialize proc0.p_addr so that
502	movl	%a1,%a2@(P_ADDR)	|   we don't deref NULL in trap()
503	movl	#USRSTACK-4,%a2
504	movl	%a2,%usp		| init user SP
505	movl	%a1,_C_LABEL(curpcb)	| proc0 is running
506
507	tstl	_C_LABEL(fputype)	| Have an FPU?
508	jeq	Lenab2			| No, skip.
509	clrl	%a1@(PCB_FPCTX)		| ensure null FP context
510	movl	%a1,%sp@-
511	jbsr	_C_LABEL(m68881_restore) | restore it (does not kill %a1)
512	addql	#4,%sp
513Lenab2:
514/* flush TLB and turn on caches */
515	jbsr	_C_LABEL(_TBIA)		| invalidate TLB
516	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
517	jeq	Lnocache0		| yes, cache already on
518	movl	#CACHE_ON,%d0
519	movc	%d0,%cacr		| clear cache(s)
520	tstl	_C_LABEL(ectype)
521	jeq	Lnocache0
522	MMUADDR(%a0)
523	orl	#MMU_CEN,%a0@(MMUCMD)	| turn on external cache
524Lnocache0:
525/* Final setup for call to main(). */
526	jbsr	_C_LABEL(hp300_init)
527
528/*
529 * Create a fake exception frame so that cpu_fork() can copy it.
530 * main() nevers returns; we exit to user mode from a forked process
531 * later on.
532 */
533	clrw	%sp@-			| vector offset/frame type
534	clrl	%sp@-			| PC - filled in by "execve"
535	movw	#PSL_USER,%sp@-		| in user mode
536	clrl	%sp@-			| stack adjust count and padding
537	lea	%sp@(-64),%sp		| construct space for D0-D7/A0-A7
538	lea	_C_LABEL(proc0),%a0	| save pointer to frame
539	movl	%sp,%a0@(P_MD_REGS)	|   in proc0.p_md.md_regs
540
541	jra	_C_LABEL(main)		| main()
542	PANIC("main() returned")
543	/* NOTREACHED */
544
545/*
546 * proc_trampoline: call function in register %a2 with %a3 as an arg
547 * and then rei.
548 */
549GLOBAL(proc_trampoline)
550	movl	%a3,%sp@-		| push function arg
551	jbsr	%a2@			| call function
552	addql	#4,%sp			| pop arg
553	movl	%sp@(FR_SP),%a0		| grab and load
554	movl	%a0,%usp		|   user SP
555	moveml	%sp@+,#0x7FFF		| restore most user regs
556	addql	#8,%sp			| toss SP and stack adjust
557	jra	_ASM_LABEL(rei)		| and return
558
559
560/*
561 * Trap/interrupt vector routines
562 */
563#include <m68k/m68k/trap_subr.s>
564
565	.data
566GLOBAL(m68k_fault_addr)
567	.long	0
568
569#if defined(M68040) || defined(M68060)
570ENTRY_NOPROFILE(addrerr4060)
571	clrl	%sp@-			| stack adjust count
572	moveml	#0xFFFF,%sp@-		| save user registers
573	movl	%usp,%a0		| save the user SP
574	movl	%a0,%sp@(FR_SP)		|   in the savearea
575	movl	%sp@(FR_HW+8),%sp@-
576	clrl	%sp@-			| dummy code
577	movl	#T_ADDRERR,%sp@-		| mark address error
578	jra	_ASM_LABEL(faultstkadj)	| and deal with it
579#endif
580
581#if defined(M68060)
582ENTRY_NOPROFILE(buserr60)
583	clrl	%sp@-			| stack adjust count
584	moveml	#0xFFFF,%sp@-		| save user registers
585	movl	%usp,%a0		| save the user SP
586	movl	%a0,%sp@(FR_SP)		|   in the savearea
587	movel	%sp@(FR_HW+12),%d0	| FSLW
588	btst	#2,%d0			| branch prediction error?
589	jeq	Lnobpe
590	movc	%cacr,%d2
591	orl	#IC60_CABC,%d2		| clear all branch cache entries
592	movc	%d2,%cacr
593	movl	%d0,%d1
594	addql	#1,L60bpe
595	andl	#0x7ffd,%d1
596	jeq	_ASM_LABEL(faultstkadjnotrap2)
597Lnobpe:
598| we need to adjust for misaligned addresses
599	movl	%sp@(FR_HW+8),%d1	| grab VA
600	btst	#27,%d0			| check for mis-aligned access
601	jeq	Lberr3			| no, skip
602	addl	#28,%d1			| yes, get into next page
603					| operand case: 3,
604					| instruction case: 4+12+12
605	andl	#PG_FRAME,%d1           | and truncate
606Lberr3:
607	movl	%d1,%sp@-
608	movl	%d0,%sp@-		| code is FSLW now.
609	andw	#0x1f80,%d0
610	jeq	Lberr60			| it is a bus error
611	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
612	jra	_ASM_LABEL(faultstkadj)	| and deal with it
613Lberr60:
614	tstl	_C_LABEL(nofault)	| catch bus error?
615	jeq	Lisberr			| no, handle as usual
616	movl	%sp@(FR_HW+8+8),_C_LABEL(m68k_fault_addr) | save fault addr
617	movl	_C_LABEL(nofault),%sp@-	| yes,
618	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
619	/* NOTREACHED */
620#endif
621#if defined(M68040)
622ENTRY_NOPROFILE(buserr40)
623	clrl	%sp@-			| stack adjust count
624	moveml	#0xFFFF,%sp@-		| save user registers
625	movl	%usp,%a0		| save the user SP
626	movl	%a0,%sp@(FR_SP)		|   in the savearea
627	movl	%sp@(FR_HW+20),%d1	| get fault address
628	moveq	#0,%d0
629	movw	%sp@(FR_HW+12),%d0	| get SSW
630	btst	#11,%d0			| check for mis-aligned
631	jeq	Lbe1stpg		| no skip
632	addl	#3,%d1			| get into next page
633	andl	#PG_FRAME,%d1		| and truncate
634Lbe1stpg:
635	movl	%d1,%sp@-		| pass fault address.
636	movl	%d0,%sp@-		| pass SSW as code
637	btst	#10,%d0			| test ATC
638	jeq	Lberr40			| it is a bus error
639	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
640	jra	_ASM_LABEL(faultstkadj)	| and deal with it
641Lberr40:
642	tstl	_C_LABEL(nofault)	| catch bus error?
643	jeq	Lisberr			| no, handle as usual
644	movl	%sp@(FR_HW+8+20),_C_LABEL(m68k_fault_addr) | save fault addr
645	movl	_C_LABEL(nofault),%sp@-	| yes,
646	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
647	/* NOTREACHED */
648#endif
649
650#if defined(M68020) || defined(M68030)
651ENTRY_NOPROFILE(busaddrerr2030)
652	clrl	%sp@-			| stack adjust count
653	moveml	#0xFFFF,%sp@-		| save user registers
654	movl	%usp,%a0		| save the user SP
655	movl	%a0,%sp@(FR_SP)		|   in the savearea
656	moveq	#0,%d0
657	movw	%sp@(FR_HW+10),%d0	| grab SSW for fault processing
658	btst	#12,%d0			| RB set?
659	jeq	LbeX0			| no, test RC
660	bset	#14,%d0			| yes, must set FB
661	movw	%d0,%sp@(FR_HW+10)	| for hardware too
662LbeX0:
663	btst	#13,%d0			| RC set?
664	jeq	LbeX1			| no, skip
665	bset	#15,%d0			| yes, must set FC
666	movw	%d0,%sp@(FR_HW+10)	| for hardware too
667LbeX1:
668	btst	#8,%d0			| data fault?
669	jeq	Lbe0			| no, check for hard cases
670	movl	%sp@(FR_HW+16),%d1	| fault address is as given in frame
671	jra	Lbe10			| thats it
672Lbe0:
673	btst	#4,%sp@(FR_HW+6)	| long (type B) stack frame?
674	jne	Lbe4			| yes, go handle
675	movl	%sp@(FR_HW+2),%d1	| no, can use save PC
676	btst	#14,%d0			| FB set?
677	jeq	Lbe3			| no, try FC
678	addql	#4,%d1			| yes, adjust address
679	jra	Lbe10			| done
680Lbe3:
681	btst	#15,%d0			| FC set?
682	jeq	Lbe10			| no, done
683	addql	#2,%d1			| yes, adjust address
684	jra	Lbe10			| done
685Lbe4:
686	movl	%sp@(FR_HW+36),%d1	| long format, use stage B address
687	btst	#15,%d0			| FC set?
688	jeq	Lbe10			| no, all done
689	subql	#2,%d1			| yes, adjust address
690Lbe10:
691	movl	%d1,%sp@-		| push fault VA
692	movl	%d0,%sp@-		| and padded SSW
693	movw	%sp@(FR_HW+8+6),%d0	| get frame format/vector offset
694	andw	#0x0FFF,%d0		| clear out frame format
695	cmpw	#12,%d0			| address error vector?
696	jeq	Lisaerr			| yes, go to it
697#if defined(M68K_MMU_MOTOROLA)
698#if defined(M68K_MMU_HP)
699	tstl	_C_LABEL(mmutype)	| HP MMU?
700	jeq	Lbehpmmu		| yes, different MMU fault handler
701#endif
702	movl	%d1,%a0			| fault address
703	movl	%sp@,%d0		| function code from ssw
704	btst	#8,%d0			| data fault?
705	jne	Lbe10a
706	movql	#1,%d0			| user program access FC
707					| (we dont separate data/program)
708	btst	#5,%sp@(FR_HW+8)	| supervisor mode?
709	jeq	Lbe10a			| if no, done
710	movql	#5,%d0			| else supervisor program access
711Lbe10a:
712	ptestr	%d0,%a0@,#7		| do a table search
713	pmove	%psr,%sp@		| save result
714	movb	%sp@,%d1
715	btst	#2,%d1			| invalid (incl. limit viol. and berr)?
716	jeq	Lmightnotbemerr		| no -> wp check
717	btst	#7,%d1			| is it MMU table berr?
718	jne	Lisberr1		| yes, needs not be fast.
719#endif /* M68K_MMU_MOTOROLA */
720Lismerr:
721	movl	#T_MMUFLT,%sp@-		| show that we are an MMU fault
722	jra	_ASM_LABEL(faultstkadj)	| and deal with it
723#if defined(M68K_MMU_MOTOROLA)
724Lmightnotbemerr:
725	btst	#3,%d1			| write protect bit set?
726	jeq	Lisberr1		| no: must be bus error
727	movl	%sp@,%d0			| ssw into low word of %d0
728	andw	#0xc0,%d0		| Write protect is set on page:
729	cmpw	#0x40,%d0		| was it read cycle?
730	jne	Lismerr			| no, was not WPE, must be MMU fault
731	jra	Lisberr1		| real bus err needs not be fast.
732#endif /* M68K_MMU_MOTOROLA */
733#if defined(M68K_MMU_HP)
734Lbehpmmu:
735	MMUADDR(%a0)
736	movl	%a0@(MMUSTAT),%d0	| read MMU status
737	btst	#3,%d0			| MMU fault?
738	jeq	Lisberr1		| no, just a non-MMU bus error
739	andl	#~MMU_FAULT,%a0@(MMUSTAT)| yes, clear fault bits
740	movw	%d0,%sp@		| pass MMU stat in upper half of code
741	jra	Lismerr			| and handle it
742#endif
743Lisaerr:
744	movl	#T_ADDRERR,%sp@-	| mark address error
745	jra	_ASM_LABEL(faultstkadj)	| and deal with it
746Lisberr1:
747	clrw	%sp@			| re-clear pad word
748	tstl	_C_LABEL(nofault)	| catch bus error?
749	jeq	Lisberr			| no, handle as usual
750	movl	%sp@(FR_HW+8+16),_C_LABEL(m68k_fault_addr) | save fault addr
751	movl	_C_LABEL(nofault),%sp@-	| yes,
752	jbsr	_C_LABEL(longjmp)	|  longjmp(nofault)
753	/* NOTREACHED */
754#endif /* M68020 || M68030 */
755
756Lisberr:				| also used by M68040/60
757	movl	#T_BUSERR,%sp@-		| mark bus error
758	jra	_ASM_LABEL(faultstkadj)	| and deal with it
759
760/*
761 * FP exceptions.
762 */
763ENTRY_NOPROFILE(fpfline)
764#if defined(M68040)
765	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040 FPU?
766	jne	Lfp_unimp		| no, skip FPSP
767	cmpw	#0x202c,%sp@(6)		| format type 2?
768	jne	_C_LABEL(illinst)	| no, not an FP emulation
769Ldofp_unimp:
770#ifdef FPSP
771	jmp	_ASM_LABEL(fpsp_unimp)	| yes, go handle it
772#endif
773Lfp_unimp:
774#endif /* M68040 */
775#ifdef FPU_EMULATE
776	clrl	%sp@-			| stack adjust count
777	moveml	#0xFFFF,%sp@-		| save registers
778	moveq	#T_FPEMULI,%d0		| denote as FP emulation trap
779	jra	_ASM_LABEL(fault)	| do it
780#else
781	jra	_C_LABEL(illinst)
782#endif
783
784ENTRY_NOPROFILE(fpunsupp)
785#if defined(M68040)
786	cmpl	#FPU_68040,_C_LABEL(fputype) | 68040 FPU?
787	jne	_C_LABEL(illinst)	| no, treat as illinst
788#ifdef FPSP
789	jmp	_ASM_LABEL(fpsp_unsupp)	| yes, go handle it
790#endif
791Lfp_unsupp:
792#endif /* M68040 */
793#ifdef FPU_EMULATE
794	clrl	%sp@-			| stack adjust count
795	moveml	#0xFFFF,%sp@-		| save registers
796	moveq	#T_FPEMULD,%d0		| denote as FP emulation trap
797	jra	_ASM_LABEL(fault)	| do it
798#else
799	jra	_C_LABEL(illinst)
800#endif
801
802/*
803 * Handles all other FP coprocessor exceptions.
804 * Note that since some FP exceptions generate mid-instruction frames
805 * and may cause signal delivery, we need to test for stack adjustment
806 * after the trap call.
807 */
808ENTRY_NOPROFILE(fpfault)
809	clrl	%sp@-			| stack adjust count
810	moveml	#0xFFFF,%sp@-		| save user registers
811	movl	%usp,%a0		| and save
812	movl	%a0,%sp@(FR_SP)		|   the user stack pointer
813	clrl	%sp@-			| no VA arg
814	movl	_C_LABEL(curpcb),%a0	| current pcb
815	lea	%a0@(PCB_FPCTX),%a0	| address of FP savearea
816	fsave	%a0@			| save state
817#if defined(M68040) || defined(M68060)
818	/* always null state frame on 68040, 68060 */
819	cmpl	#FPU_68040,_C_LABEL(fputype)
820	jle	Lfptnull
821#endif
822	tstb	%a0@			| null state frame?
823	jeq	Lfptnull		| yes, safe
824	clrw	%d0			| no, need to tweak BIU
825	movb	%a0@(1),%d0		| get frame size
826	bset	#3,%a0@(0,%d0:w)	| set exc_pend bit of BIU
827Lfptnull:
828	fmovem	%fpsr,%sp@-		| push %fpsr as code argument
829	frestore %a0@			| restore state
830	movl	#T_FPERR,%sp@-		| push type arg
831	jra	_ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup
832
833/*
834 * Other exceptions only cause four and six word stack frame and require
835 * no post-trap stack adjustment.
836 */
837
838ENTRY_NOPROFILE(badtrap)
839	moveml	#0xC0C0,%sp@-		| save scratch regs
840	movw	%sp@(22),%sp@-		| push exception vector info
841	clrw	%sp@-
842	movl	%sp@(22),%sp@-		| and PC
843	jbsr	_C_LABEL(straytrap)	| report
844	addql	#8,%sp			| pop args
845	moveml	%sp@+,#0x0303		| restore regs
846	jra	_ASM_LABEL(rei)		| all done
847
848ENTRY_NOPROFILE(trap0)
849	clrl	%sp@-			| stack adjust count
850	moveml	#0xFFFF,%sp@-		| save user registers
851	movl	%usp,%a0		| save the user SP
852	movl	%a0,%sp@(FR_SP)		|   in the savearea
853	movl	%d0,%sp@-		| push syscall number
854	jbsr	_C_LABEL(syscall)	| handle it
855	addql	#4,%sp			| pop syscall arg
856	tstl	_C_LABEL(astpending)
857	jne	Lrei2
858	tstb	_C_LABEL(ssir)
859	jeq	Ltrap1
860	movw	#SPL1,%sr
861	tstb	_C_LABEL(ssir)
862	jne	Lsir1
863Ltrap1:
864	movl	%sp@(FR_SP),%a0		| grab and restore
865	movl	%a0,%usp			|   user SP
866	moveml	%sp@+,#0x7FFF		| restore most registers
867	addql	#8,%sp			| pop SP and stack adjust
868	rte
869
870/*
871 * Trap 12 is the entry point for the cachectl "syscall" (both HPUX & BSD)
872 *	cachectl(command, addr, length)
873 * command in %d0, addr in %a1, length in %d1
874 */
875ENTRY_NOPROFILE(trap12)
876	movl	_C_LABEL(curproc),%sp@-	| push current proc pointer
877	movl	%d1,%sp@-		| push length
878	movl	%a1,%sp@-		| push addr
879	movl	%d0,%sp@-		| push command
880	jbsr	_C_LABEL(cachectl1)	| do it
881	lea	%sp@(16),%sp		| pop args
882	jra	_ASM_LABEL(rei)		| all done
883
884/*
885 * Trace (single-step) trap.  Kernel-mode is special.
886 * User mode traps are simply passed on to trap().
887 */
888ENTRY_NOPROFILE(trace)
889	clrl	%sp@-			| stack adjust count
890	moveml	#0xFFFF,%sp@-
891	moveq	#T_TRACE,%d0
892
893	| Check PSW and see what happen.
894	|   T=0 S=0	(should not happen)
895	|   T=1 S=0	trace trap from user mode
896	|   T=0 S=1	trace trap on a trap instruction
897	|   T=1 S=1	trace trap from system mode (kernel breakpoint)
898
899	movw	%sp@(FR_HW),%d1		| get PSW
900	notw	%d1			| XXX no support for T0 on 680[234]0
901	andw	#PSL_TS,%d1		| from system mode (T=1, S=1)?
902	jeq	Lkbrkpt			| yes, kernel breakpoint
903	jra	_ASM_LABEL(fault)	| no, user-mode fault
904
905/*
906 * Trap 15 is used for:
907 *	- GDB breakpoints (in user programs)
908 *	- KGDB breakpoints (in the kernel)
909 *	- trace traps for SUN binaries (not fully supported yet)
910 * User mode traps are simply passed to trap().
911 */
912ENTRY_NOPROFILE(trap15)
913	clrl	%sp@-			| stack adjust count
914	moveml	#0xFFFF,%sp@-
915	moveq	#T_TRAP15,%d0
916	movw	%sp@(FR_HW),%d1		| get PSW
917	andw	#PSL_S,%d1		| from system mode?
918	jne	Lkbrkpt			| yes, kernel breakpoint
919	jra	_ASM_LABEL(fault)	| no, user-mode fault
920
921Lkbrkpt: | Kernel-mode breakpoint or trace trap. (%d0=trap_type)
922	| Save the system sp rather than the user sp.
923	movw	#PSL_HIGHIPL,%sr	| lock out interrupts
924	lea	%sp@(FR_SIZE),%a6	| Save stack pointer
925	movl	%a6,%sp@(FR_SP)		|  from before trap
926
927	| If were are not on tmpstk switch to it.
928	| (so debugger can change the stack pointer)
929	movl	%a6,%d1
930	cmpl	#_ASM_LABEL(tmpstk),%d1
931	jls	Lbrkpt2			| already on tmpstk
932	| Copy frame to the temporary stack
933	movl	%sp,%a0			| %a0=src
934	lea	_ASM_LABEL(tmpstk)-96,%a1 | a1=dst
935	movl	%a1,%sp			| %sp=new frame
936	moveq	#FR_SIZE,%d1
937Lbrkpt1:
938	movl	%a0@+,%a1@+
939	subql	#4,%d1
940	bgt	Lbrkpt1
941
942Lbrkpt2:
943	| Call the trap handler for the kernel debugger.
944	| Do not call trap() to do it, so that we can
945	| set breakpoints in trap() if we want.  We know
946	| the trap type is either T_TRACE or T_BREAKPOINT.
947	| If we have both DDB and KGDB, let KGDB see it first,
948	| because KGDB will just return 0 if not connected.
949	| Save args in %d2, %a2
950	movl	%d0,%d2			| trap type
951	movl	%sp,%a2			| frame ptr
952#ifdef KGDB
953	| Let KGDB handle it (if connected)
954	movl	%a2,%sp@-		| push frame ptr
955	movl	%d2,%sp@-		| push trap type
956	jbsr	_C_LABEL(kgdb_trap)	| handle the trap
957	addql	#8,%sp			| pop args
958	cmpl	#0,%d0			| did kgdb handle it?
959	jne	Lbrkpt3			| yes, done
960#endif
961#ifdef DDB
962	| Let DDB handle it
963	movl	%a2,%sp@-		| push frame ptr
964	movl	%d2,%sp@-		| push trap type
965	jbsr	_C_LABEL(kdb_trap)	| handle the trap
966	addql	#8,%sp			| pop args
967#if 0	/* not needed on hp300 */
968	cmpl	#0,%d0			| did ddb handle it?
969	jne	Lbrkpt3			| yes, done
970#endif
971#endif
972	/* Sun 3 drops into PROM here. */
973Lbrkpt3:
974	| The stack pointer may have been modified, or
975	| data below it modified (by kgdb push call),
976	| so push the hardware frame at the current sp
977	| before restoring registers and returning.
978
979	movl	%sp@(FR_SP),%a0		| modified %sp
980	lea	%sp@(FR_SIZE),%a1	| end of our frame
981	movl	%a1@-,%a0@-		| copy 2 longs with
982	movl	%a1@-,%a0@-		| ... predecrement
983	movl	%a0,%sp@(FR_SP)		| %sp = h/w frame
984	moveml	%sp@+,#0x7FFF		| restore all but %sp
985	movl	%sp@,%sp		| ... and %sp
986	rte				| all done
987
988/* Use common m68k sigreturn */
989#include <m68k/m68k/sigreturn.s>
990
991/*
992 * Interrupt handlers.
993 * All device interrupts are auto-vectored.  The CPU provides
994 * the vector 0x18+level.  Note we count spurious interrupts, but
995 * we don't do anything else with them.
996 */
997
998#define INTERRUPT_SAVEREG	moveml	#0xC0C0,%sp@-
999#define INTERRUPT_RESTOREREG	moveml	%sp@+,#0x0303
1000
1001/* 64-bit evcnt counter increments */
1002#define EVCNT_COUNTER(ipl)					\
1003	_C_LABEL(hp300_intr_list) + (ipl)*SIZEOF_HI + HI_EVCNT
1004#define EVCNT_INCREMENT(ipl)					\
1005	movel	%d2,-(%sp);					\
1006	clrl	%d0;						\
1007	moveql	#1,%d1;						\
1008	addl	%d1,EVCNT_COUNTER(ipl)+4;			\
1009	movel	EVCNT_COUNTER(ipl),%d2;				\
1010	addxl	%d0,%d2;					\
1011	movel	%d2,EVCNT_COUNTER(ipl);				\
1012	movel	(%sp)+,%d2
1013
1014ENTRY_NOPROFILE(spurintr)	/* level 0 */
1015	EVCNT_INCREMENT(0)
1016	addql	#1,_C_LABEL(uvmexp)+UVMEXP_INTRS
1017	jra	_ASM_LABEL(rei)
1018
1019ENTRY_NOPROFILE(intrhand)	/* levels 1 through 5 */
1020	INTERRUPT_SAVEREG
1021	movw	%sp@(22),%sp@-		| push exception vector info
1022	clrw	%sp@-
1023	jbsr	_C_LABEL(intr_dispatch)	| call dispatch routine
1024	addql	#4,%sp
1025	INTERRUPT_RESTOREREG
1026	jra	_ASM_LABEL(rei)		| all done
1027
1028ENTRY_NOPROFILE(lev6intr)	/* level 6: clock */
1029	INTERRUPT_SAVEREG
1030	CLKADDR(%a0)
1031	movb	%a0@(CLKSR),%d0		| read clock status
1032Lclkagain:
1033	btst	#0,%d0			| clear timer1 int immediately to
1034	jeq	Lnotim1			|  minimize chance of losing another
1035	movpw	%a0@(CLKMSB1),%d1	|  due to statintr processing delay
1036Lnotim1:
1037	btst	#2,%d0			| timer3 interrupt?
1038	jeq	Lnotim3			| no, skip statclock
1039	movpw	%a0@(CLKMSB3),%d1	| clear timer3 interrupt
1040	lea	%sp@(16),%a1		| a1 = &clockframe
1041	movl	%d0,%sp@-		| save status
1042	movl	%a1,%sp@-
1043	jbsr	_C_LABEL(statintr)	| statintr(&frame)
1044	addql	#4,%sp
1045	movl	%sp@+,%d0		| restore pre-statintr status
1046	CLKADDR(%a0)
1047Lnotim3:
1048	btst	#0,%d0			| timer1 interrupt?
1049	jeq	Lrecheck		| no, skip hardclock
1050	EVCNT_INCREMENT(6)
1051	lea	%sp@(16),%a1		| a1 = &clockframe
1052	movl	%a1,%sp@-
1053#ifdef USELEDS
1054	tstl	_C_LABEL(ledaddr)	| using LEDs?
1055	jeq	Lnoleds0		| no, skip this code
1056	movl	_ASM_LABEL(heartbeat),%d0 | get tick count
1057	addql	#1,%d0			|  increment
1058	movl	_C_LABEL(hz),%d1
1059	addl	#50,%d1			| get the timing a little closer
1060	tstb	_ASM_LABEL(beatstatus)	| time to slow down?
1061	jeq	Lslowthrob		| yes, slow down
1062	lsrl	#3,%d1			| no, fast throb
1063Lslowthrob:
1064	lsrl	#1,%d1			| slow throb
1065	cmpl	%d0,%d1			| are we there yet?
1066	jne	Lnoleds1		| no, nothing to do
1067	addqb	#1,_ASM_LABEL(beatstatus) | incr beat status
1068	cmpb	#3,_ASM_LABEL(beatstatus) | time to reset?
1069	jle	Ltwinkle		  | no, twinkle the lights
1070	movb	#0,_ASM_LABEL(beatstatus) | reset the status indicator
1071Ltwinkle:
1072	movl	#LED_PULSE,%sp@-
1073	movl	#LED_DISK+LED_LANRCV+LED_LANXMT,%sp@-
1074	clrl	%sp@-
1075	jbsr	_C_LABEL(ledcontrol)	| toggle pulse, turn all others off
1076	lea	%sp@(12),%sp
1077	movql	#0,%d0
1078Lnoleds1:
1079	movl	%d0,_ASM_LABEL(heartbeat)
1080Lnoleds0:
1081#endif /* USELEDS */
1082	jbsr	_C_LABEL(hardclock)	| hardclock(&frame)
1083	addql	#4,%sp
1084	CLKADDR(%a0)
1085Lrecheck:
1086	addql	#1,_C_LABEL(uvmexp)+UVMEXP_INTRS | chalk up another interrupt
1087	movb	%a0@(CLKSR),%d0		| see if anything happened
1088	jmi	Lclkagain		|  while we were in hardclock/statintr
1089	INTERRUPT_RESTOREREG
1090	jra	_ASM_LABEL(rei)		| all done
1091
1092ENTRY_NOPROFILE(lev7intr)	/* level 7: parity errors, reset key */
1093	EVCNT_INCREMENT(7)
1094	clrl	%sp@-
1095	moveml	#0xFFFF,%sp@-		| save registers
1096	movl	%usp,%a0		| and save
1097	movl	%a0,%sp@(FR_SP)		|   the user stack pointer
1098	jbsr	_C_LABEL(nmihand)	| call handler
1099	movl	%sp@(FR_SP),%a0		| restore
1100	movl	%a0,%usp		|   user SP
1101	moveml	%sp@+,#0x7FFF		| and remaining registers
1102	addql	#8,%sp			| pop SP and stack adjust
1103	jra	_ASM_LABEL(rei)		| all done
1104
1105/*
1106 * Emulation of VAX REI instruction.
1107 *
1108 * This code deals with checking for and servicing ASTs (profiling,
1109 * scheduling) and software interrupts.  We check for ASTs first, just
1110 * like the VAX.  After identifing that we need an AST we
1111 * drop the IPL to allow device interrupts.
1112 *
1113 * This code is complicated by the fact that sendsig may have been called
1114 * necessitating a stack cleanup.
1115 */
1116
1117ASENTRY_NOPROFILE(rei)
1118	tstl	_C_LABEL(astpending)	| AST pending?
1119	jeq	Lchksir			| no, go check for SIR
1120Lrei1:
1121	btst	#5,%sp@			| yes, are we returning to user mode?
1122	jne	Lchksir			| no, go check for SIR
1123	movw	#PSL_LOWIPL,%sr		| lower SPL
1124	clrl	%sp@-			| stack adjust
1125	moveml	#0xFFFF,%sp@-		| save all registers
1126	movl	%usp,%a1		| including
1127	movl	%a1,%sp@(FR_SP)		|    the users SP
1128Lrei2:
1129	clrl	%sp@-			| VA == none
1130	clrl	%sp@-			| code == none
1131	movl	#T_ASTFLT,%sp@-		| type == async system trap
1132	jbsr	_C_LABEL(trap)		| go handle it
1133	lea	%sp@(12),%sp		| pop value args
1134	movl	%sp@(FR_SP),%a0		| restore user SP
1135	movl	%a0,%usp		|   from save area
1136	movw	%sp@(FR_ADJ),%d0	| need to adjust stack?
1137	jne	Laststkadj		| yes, go to it
1138	moveml	%sp@+,#0x7FFF		| no, restore most user regs
1139	addql	#8,%sp			| toss SP and stack adjust
1140	rte				| and do real RTE
1141Laststkadj:
1142	lea	%sp@(FR_HW),%a1		| pointer to HW frame
1143	addql	#8,%a1			| source pointer
1144	movl	%a1,%a0			| source
1145	addw	%d0,%a0			|  + hole size = dest pointer
1146	movl	%a1@-,%a0@-		| copy
1147	movl	%a1@-,%a0@-		|  8 bytes
1148	movl	%a0,%sp@(FR_SP)		| new SSP
1149	moveml	%sp@+,#0x7FFF		| restore user registers
1150	movl	%sp@,%sp		| and our SP
1151	rte				| and do real RTE
1152Lchksir:
1153	tstb	_C_LABEL(ssir)		| SIR pending?
1154	jeq	Ldorte			| no, all done
1155	movl	%d0,%sp@-		| need a scratch register
1156	movw	%sp@(4),%d0		| get SR
1157	andw	#PSL_IPL7,%d0		| mask all but IPL
1158	jne	Lnosir			| came from interrupt, no can do
1159	movl	%sp@+,%d0		| restore scratch register
1160Lgotsir:
1161	movw	#SPL1,%sr		| prevent others from servicing int
1162	tstb	_C_LABEL(ssir)		| too late?
1163	jeq	Ldorte			| yes, oh well...
1164	clrl	%sp@-			| stack adjust
1165	moveml	#0xFFFF,%sp@-		| save all registers
1166	movl	%usp,%a1		| including
1167	movl	%a1,%sp@(FR_SP)		|    the users SP
1168Lsir1:
1169	clrl	%sp@-			| VA == none
1170	clrl	%sp@-			| code == none
1171	movl	#T_SSIR,%sp@-		| type == software interrupt
1172	jbsr	_C_LABEL(trap)		| go handle it
1173	lea	%sp@(12),%sp		| pop value args
1174	movl	%sp@(FR_SP),%a0		| restore
1175	movl	%a0,%usp		|   user SP
1176	moveml	%sp@+,#0x7FFF		| and all remaining registers
1177	addql	#8,%sp			| pop SP and stack adjust
1178	rte
1179Lnosir:
1180	movl	%sp@+,%d0		| restore scratch register
1181Ldorte:
1182	rte				| real return
1183
1184/*
1185 * Use common m68k sigcode.
1186 */
1187#include <m68k/m68k/sigcode.s>
1188#ifdef COMPAT_SUNOS
1189#include <m68k/m68k/sunos_sigcode.s>
1190#endif
1191#ifdef COMPAT_SVR4
1192#include <m68k/m68k/svr4_sigcode.s>
1193#endif
1194
1195/*
1196 * Primitives
1197 */
1198
1199/*
1200 * Use common m68k support routines.
1201 */
1202#include <m68k/m68k/support.s>
1203
1204/*
1205 * Use common m68k process manipulation routines.
1206 */
1207#include <m68k/m68k/proc_subr.s>
1208
1209	.data
1210GLOBAL(curpcb)
1211GLOBAL(masterpaddr)			| XXX compatibility (debuggers)
1212	.long	0
1213
1214ASLOCAL(mdpflag)
1215	.byte	0			| copy of proc md_flags low byte
1216#ifdef __ELF__
1217	.align	4
1218#else
1219	.align	2
1220#endif
1221
1222ASBSS(nullpcb,SIZEOF_PCB)
1223
1224/*
1225 * At exit of a process, do a switch for the last time.
1226 * Switch to a safe stack and PCB, and select a new process to run.  The
1227 * old stack and u-area will be freed by the reaper.
1228 *
1229 * MUST BE CALLED AT SPLHIGH!
1230 */
1231ENTRY(switch_exit)
1232	movl	%sp@(4),%a0
1233	/* save state into garbage pcb */
1234	movl	#_ASM_LABEL(nullpcb),_C_LABEL(curpcb)
1235	lea	_ASM_LABEL(tmpstk),%sp	| goto a tmp stack
1236
1237	/* Schedule the vmspace and stack to be freed. */
1238	movl	%a0,%sp@-		| exit2(p)
1239	jbsr	_C_LABEL(exit2)
1240	lea	%sp@(4),%sp		| pop args
1241
1242#if defined(LOCKDEBUG)
1243	/* Acquire sched_lock */
1244	jbsr	_C_LABEL(sched_lock_idle)
1245#endif
1246
1247	jra	_C_LABEL(cpu_switch)
1248
1249/*
1250 * When no processes are on the runq, Swtch branches to Idle
1251 * to wait for something to come ready.
1252 */
1253ASENTRY_NOPROFILE(Idle)
1254#if defined(LOCKDEBUG)
1255	/* Release sched_lock */
1256	jbsr	_C_LABEL(sched_unlock_idle)
1257#endif
1258	stop	#PSL_LOWIPL
1259	movw	#PSL_HIGHIPL,%sr
1260#if defined(LOCKDEBUG)
1261	/* Acquire sched_lock */
1262	jbsr	_C_LABEL(sched_lock_idle)
1263#endif
1264	movl	_C_LABEL(sched_whichqs),%d0
1265	jeq	_ASM_LABEL(Idle)
1266	jra	Lsw1
1267
1268Lbadsw:
1269	PANIC("switch")
1270	/*NOTREACHED*/
1271
1272/*
1273 * cpu_switch()
1274 *
1275 * NOTE: On the mc68851 (318/319/330) we attempt to avoid flushing the
1276 * entire ATC.  The effort involved in selective flushing may not be
1277 * worth it, maybe we should just flush the whole thing?
1278 *
1279 * NOTE 2: With the new VM layout we now no longer know if an inactive
1280 * user's PTEs have been changed (formerly denoted by the SPTECHG p_flag
1281 * bit).  For now, we just always flush the full ATC.
1282 */
1283ENTRY(cpu_switch)
1284	movl	_C_LABEL(curpcb),%a0	| current pcb
1285	movw	%sr,%a0@(PCB_PS)	| save %sr before changing ipl
1286#ifdef notyet
1287	movl	_C_LABEL(curproc),%sp@-	| remember last proc running
1288#endif
1289	clrl	_C_LABEL(curproc)
1290
1291	/*
1292	 * Find the highest-priority queue that isn't empty,
1293	 * then take the first proc from that queue.
1294	 */
1295	movl	_C_LABEL(sched_whichqs),%d0
1296	jeq	_ASM_LABEL(Idle)
1297Lsw1:
1298	/*
1299	 * Interrupts are blocked, sched_lock is held.  If
1300	 * we come here via Idle, %d0 contains the contents
1301	 * of a non-zero sched_whichqs.
1302	 */
1303	movl	%d0,%d1
1304	negl	%d0
1305	andl	%d1,%d0
1306	bfffo	%d0{#0:#32},%d1
1307	eorib	#31,%d1
1308
1309	movl	%d1,%d0
1310	lslb	#3,%d1			| convert queue number to index
1311	addl	#_C_LABEL(sched_qs),%d1	| locate queue (q)
1312	movl	%d1,%a1
1313	movl	%a1@(P_FORW),%a0	| p = q->p_forw
1314	cmpal	%d1,%a0			| anyone on queue?
1315	jeq	Lbadsw			| no, panic
1316#ifdef DIAGNOSTIC
1317	tstl	%a0@(P_WCHAN)
1318	jne	Lbadsw
1319	cmpb	#SRUN,%a0@(P_STAT)
1320	jne	Lbadsw
1321#endif
1322	movl	%a0@(P_FORW),%a1@(P_FORW)	| q->p_forw = p->p_forw
1323	movl	%a0@(P_FORW),%a1	| n = p->p_forw
1324	movl	%d1,%a1@(P_BACK)	| n->p_back = q
1325	cmpal	%d1,%a1			| anyone left on queue?
1326	jne	Lsw2			| yes, skip
1327	movl	_C_LABEL(sched_whichqs),%d1
1328	bclr	%d0,%d1			| no, clear bit
1329	movl	%d1,_C_LABEL(sched_whichqs)
1330Lsw2:
1331	/* p->p_cpu initialized in fork1() for single-processor */
1332	movb	#SONPROC,%a0@(P_STAT)	| p->p_stat = SONPROC
1333	movl	%a0,_C_LABEL(curproc)
1334	clrl	_C_LABEL(want_resched)
1335#ifdef notyet
1336	movl	%sp@+,%a1
1337	cmpl	%a0,%a1			| switching to same proc?
1338	jeq	Lswdone			| yes, skip save and restore
1339#endif
1340	/*
1341	 * Save state of previous process in its pcb.
1342	 */
1343	movl	_C_LABEL(curpcb),%a1
1344	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
1345	movl	%usp,%a2		| grab USP (%a2 has been saved)
1346	movl	%a2,%a1@(PCB_USP)	| and save it
1347
1348	tstl	_C_LABEL(fputype)	| Do we have an FPU?
1349	jeq	Lswnofpsave		| No  Then don't attempt save.
1350	lea	%a1@(PCB_FPCTX),%a2	| pointer to FP save area
1351	fsave	%a2@			| save FP state
1352	tstb	%a2@			| null state frame?
1353	jeq	Lswnofpsave		| yes, all done
1354	fmovem	%fp0-%fp7,%a2@(FPF_REGS) | save FP general registers
1355	fmovem	%fpcr/%fpsr/%fpi,%a2@(FPF_FPCR)	| save FP control registers
1356Lswnofpsave:
1357
1358	clrl	%a0@(P_BACK)		| clear back link
1359	movb	%a0@(P_MD_FLAGS+3),mdpflag | low byte of p_md.md_flags
1360	movl	%a0@(P_ADDR),%a1	| get p_addr
1361	movl	%a1,_C_LABEL(curpcb)
1362
1363#if defined(LOCKDEBUG)
1364	/*
1365	 * Done mucking with the run queues, release the
1366	 * scheduler lock, but keep interrupts out.
1367	 */
1368	movl	%a0,sp@-		| not args...
1369	movl	%a1,sp@-		| ...just saving
1370	jbsr	_C_LABEL(sched_unlock_idle)
1371	movl	sp@+,%a1
1372	movl	sp@+,%a0
1373#endif
1374
1375	/*
1376	 * Activate process's address space.
1377	 * XXX Should remember the last USTP value loaded, and call this
1378	 * XXX only if it has changed.
1379	 */
1380	pea	%a0@			| push proc
1381	jbsr	_C_LABEL(pmap_activate)	| pmap_activate(p)
1382	addql	#4,%sp
1383
1384/*
1385 *  Check for restartable atomic sequences (RAS)
1386 */
1387	movl	_C_LABEL(curproc),%a0
1388	tstl	%a0@(P_NRAS)
1389	jeq	1f
1390	movl	%a0@(P_MD_REGS),%a1
1391	movl	%a1@(TF_PC),%sp@-
1392	movl	%a0,%sp@-
1393	jbsr	_C_LABEL(ras_lookup)
1394	addql	#8,%sp
1395	movql	#-1,%d0
1396	cmpl	%a0,%d0
1397	jeq	1f
1398	movl	_C_LABEL(curproc),%a1
1399	movl	%a1@(P_MD_REGS),%a1
1400	movel	%a0,%a1@(TF_PC)
14011:
1402	movl	_C_LABEL(curpcb),%a1	| restore p_addr
1403	lea	_ASM_LABEL(tmpstk),%sp	| now goto a tmp stack for NMI
1404
1405	moveml	%a1@(PCB_REGS),#0xFCFC	| and registers
1406	movl	%a1@(PCB_USP),%a0
1407	movl	%a0,%usp		| and USP
1408
1409	tstl	_C_LABEL(fputype)	| If we don't have an FPU,
1410	jeq	Lnofprest		|  don't try to restore it.
1411	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
1412	tstb	%a0@			| null state frame?
1413	jeq	Lresfprest		| yes, easy
1414#if defined(M68040)
1415#if defined(M68020) || defined(M68030)
1416	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1417	jne	Lresnot040		| no, skip
1418#endif
1419	clrl	%sp@-			| yes...
1420	frestore %sp@+			| ...magic!
1421Lresnot040:
1422#endif
1423	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi	| restore FP control registers
1424	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1425Lresfprest:
1426	frestore %a0@			| restore state
1427
1428Lnofprest:
1429	movw	%a1@(PCB_PS),%sr	| no, restore PS
1430	moveq	#1,%d0			| return 1 (for alternate returns)
1431	rts
1432
1433/*
1434 * savectx(pcb)
1435 * Update pcb, saving current processor state.
1436 */
1437ENTRY(savectx)
1438	movl	%sp@(4),%a1
1439	movw	%sr,%a1@(PCB_PS)
1440	movl	%usp,%a0		| grab USP
1441	movl	%a0,%a1@(PCB_USP)	| and save it
1442	moveml	#0xFCFC,%a1@(PCB_REGS)	| save non-scratch registers
1443
1444	tstl	_C_LABEL(fputype)	| Do we have FPU?
1445	jeq	Lsvnofpsave		| No?  Then don't save state.
1446	lea	%a1@(PCB_FPCTX),%a0	| pointer to FP save area
1447	fsave	%a0@			| save FP state
1448	tstb	%a0@			| null state frame?
1449	jeq	Lsvnofpsave		| yes, all done
1450	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1451	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR)	| save FP control registers
1452Lsvnofpsave:
1453	moveq	#0,%d0			| return 0
1454	rts
1455
1456#if defined(M68040)
1457ENTRY(suline)
1458	movl	%sp@(4),%a0		| address to write
1459	movl	_C_LABEL(curpcb),%a1	| current pcb
1460	movl	#Lslerr,%a1@(PCB_ONFAULT) | where to return to on a fault
1461	movl	%sp@(8),%a1		| address of line
1462	movl	%a1@+,%d0		| get lword
1463	movsl	%d0,%a0@+		| put lword
1464	nop				| sync
1465	movl	%a1@+,%d0		| get lword
1466	movsl	%d0,%a0@+		| put lword
1467	nop				| sync
1468	movl	%a1@+,%d0		| get lword
1469	movsl	%d0,%a0@+		| put lword
1470	nop				| sync
1471	movl	%a1@+,%d0		| get lword
1472	movsl	%d0,%a0@+		| put lword
1473	nop				| sync
1474	moveq	#0,%d0			| indicate no fault
1475	jra	Lsldone
1476Lslerr:
1477	moveq	#-1,%d0
1478Lsldone:
1479	movl	_C_LABEL(curpcb),%a1	| current pcb
1480	clrl	%a1@(PCB_ONFAULT) 	| clear fault address
1481	rts
1482#endif
1483
1484ENTRY(ecacheon)
1485	tstl	_C_LABEL(ectype)
1486	jeq	Lnocache7
1487	MMUADDR(%a0)
1488	orl	#MMU_CEN,%a0@(MMUCMD)
1489Lnocache7:
1490	rts
1491
1492ENTRY(ecacheoff)
1493	tstl	_C_LABEL(ectype)
1494	jeq	Lnocache8
1495	MMUADDR(%a0)
1496	andl	#~MMU_CEN,%a0@(MMUCMD)
1497Lnocache8:
1498	rts
1499
1500ENTRY_NOPROFILE(getsfc)
1501	movc	%sfc,%d0
1502	rts
1503
1504ENTRY_NOPROFILE(getdfc)
1505	movc	%dfc,%d0
1506	rts
1507
1508/*
1509 * Load a new user segment table pointer.
1510 */
1511ENTRY(loadustp)
1512#if defined(M68K_MMU_MOTOROLA)
1513	tstl	_C_LABEL(mmutype)	| HP MMU?
1514	jeq	Lhpmmu9			| yes, skip
1515	movl	%sp@(4),%d0		| new USTP
1516	moveq	#PGSHIFT,%d1
1517	lsll	%d1,%d0			| convert to addr
1518#if defined(M68040)
1519	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1520	jne	LmotommuC		| no, skip
1521	.word	0xf518			| yes, pflusha
1522	.long	0x4e7b0806		| movc %d0,%urp
1523	rts
1524LmotommuC:
1525#endif
1526	pflusha				| flush entire TLB
1527	lea	_C_LABEL(protorp),%a0	| CRP prototype
1528	movl	%d0,%a0@(4)		| stash USTP
1529	pmove	%a0@,%crp		| load root pointer
1530	movl	#CACHE_CLR,%d0
1531	movc	%d0,%cacr		| invalidate cache(s)
1532	rts
1533Lhpmmu9:
1534#endif
1535#if defined(M68K_MMU_HP)
1536	movl	#CACHE_CLR,%d0
1537	movc	%d0,%cacr		| invalidate cache(s)
1538	MMUADDR(%a0)
1539	movl	%a0@(MMUTBINVAL),%d1	| invalidate TLB
1540	tstl	_C_LABEL(ectype)	| have external VAC?
1541	jle	1f			| no, skip
1542	andl	#~MMU_CEN,%a0@(MMUCMD)	| toggle cache enable
1543	orl	#MMU_CEN,%a0@(MMUCMD)	| to clear data cache
15441:
1545	movl	%sp@(4),%a0@(MMUUSTP)	| load a new USTP
1546#endif
1547	rts
1548
1549ENTRY(ploadw)
1550#if defined(M68K_MMU_MOTOROLA)
1551	movl	%sp@(4),%a0		| address to load
1552#if defined(M68K_MMU_HP)
1553	tstl	_C_LABEL(mmutype)	| HP MMU?
1554	jeq	Lploadwskp		| yes, skip
1555#endif
1556#if defined(M68040)
1557	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1558	jeq	Lploadwskp		| yes, skip
1559#endif
1560	ploadw	#1,%a0@			| pre-load translation
1561Lploadwskp:
1562#endif
1563	rts
1564
1565/*
1566 * Set processor priority level calls.  Most are implemented with
1567 * inline asm expansions.  However, spl0 requires special handling
1568 * as we need to check for our emulated software interrupts.
1569 */
1570
1571ENTRY(spl0)
1572	moveq	#0,%d0
1573	movw	%sr,%d0			| get old SR for return
1574	movw	#PSL_LOWIPL,%sr		| restore new SR
1575	tstb	_C_LABEL(ssir)		| software interrupt pending?
1576	jeq	Lspldone		| no, all done
1577	subql	#4,%sp			| make room for RTE frame
1578	movl	%sp@(4),%sp@(2)		| position return address
1579	clrw	%sp@(6)			| set frame type 0
1580	movw	#PSL_LOWIPL,%sp@	| and new SR
1581	jra	Lgotsir			| go handle it
1582Lspldone:
1583	rts
1584
1585/*
1586 * _delay(u_int N)
1587 *
1588 * Delay for at least (N/256) microsecends.
1589 * This routine depends on the variable:  delay_divisor
1590 * which should be set based on the CPU clock rate.
1591 */
1592ENTRY_NOPROFILE(_delay)
1593	| %d0 = arg = (usecs << 8)
1594	movl	%sp@(4),%d0
1595	| %d1 = delay_divisor
1596	movl	_C_LABEL(delay_divisor),%d1
1597	jra	L_delay			/* Jump into the loop! */
1598
1599	/*
1600	 * Align the branch target of the loop to a half-line (8-byte)
1601	 * boundary to minimize cache effects.  This guarantees both
1602	 * that there will be no prefetch stalls due to cache line burst
1603	 * operations and that the loop will run from a single cache
1604	 * half-line.
1605	 */
1606#ifdef __ELF__
1607	.align	8
1608#else
1609	.align	3
1610#endif
1611L_delay:
1612	subl	%d1,%d0
1613	jgt	L_delay
1614	rts
1615
1616/*
1617 * Save and restore 68881 state.
1618 */
1619ENTRY(m68881_save)
1620	movl	%sp@(4),%a0		| save area pointer
1621	fsave	%a0@			| save state
1622	tstb	%a0@			| null state frame?
1623	jeq	Lm68881sdone		| yes, all done
1624	fmovem	%fp0-%fp7,%a0@(FPF_REGS) | save FP general registers
1625	fmovem	%fpcr/%fpsr/%fpi,%a0@(FPF_FPCR)	| save FP control registers
1626Lm68881sdone:
1627	rts
1628
1629ENTRY(m68881_restore)
1630	movl	%sp@(4),%a0		| save area pointer
1631	tstb	%a0@			| null state frame?
1632	jeq	Lm68881rdone		| yes, easy
1633	fmovem	%a0@(FPF_FPCR),%fpcr/%fpsr/%fpi	| restore FP control registers
1634	fmovem	%a0@(FPF_REGS),%fp0-%fp7 | restore FP general registers
1635Lm68881rdone:
1636	frestore %a0@			| restore state
1637	rts
1638
1639/*
1640 * Handle the nitty-gritty of rebooting the machine.
1641 * Basically we just turn off the MMU and jump to the appropriate ROM routine.
1642 * Note that we must be running in an address range that is mapped one-to-one
1643 * logical to physical so that the PC is still valid immediately after the MMU
1644 * is turned off.  We have conveniently mapped the last page of physical
1645 * memory this way.
1646 */
1647ENTRY_NOPROFILE(doboot)
1648#if defined(M68040)
1649	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1650	jeq	Lnocache5		| yes, skip
1651#endif
1652	movl	#CACHE_OFF,%d0
1653	movc	%d0,%cacr		| disable on-chip cache(s)
1654	tstl	_C_LABEL(ectype)	| external cache?
1655	jeq	Lnocache5		| no, skip
1656	MMUADDR(%a0)
1657	andl	#~MMU_CEN,%a0@(MMUCMD)	| disable external cache
1658Lnocache5:
1659	lea	MAXADDR,%a0		| last page of physical memory
1660	movl	_C_LABEL(boothowto),%a0@+ | store howto
1661	movl	_C_LABEL(bootdev),%a0@+	| and devtype
1662	lea	Lbootcode,%a1		| start of boot code
1663	lea	Lebootcode,%a3		| end of boot code
1664Lbootcopy:
1665	movw	%a1@+,%a0@+		| copy a word
1666	cmpl	%a3,%a1			| done yet?
1667	jcs	Lbootcopy		| no, keep going
1668#if defined(M68040)
1669	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1670	jne	LmotommuE		| no, skip
1671	.word	0xf4f8			| cpusha bc
1672LmotommuE:
1673#endif
1674	jmp	MAXADDR+8		| jump to last page
1675
1676Lbootcode:
1677	lea	MAXADDR+0x800,%sp	| physical SP in case of NMI
1678#if defined(M68040)
1679	cmpl	#MMU_68040,_C_LABEL(mmutype) | 68040?
1680	jne	LmotommuF		| no, skip
1681	movl	#0,%d0
1682	movc	%d0,%cacr		| caches off
1683	.long	0x4e7b0003		| movc %d0,%tc
1684	movl	%d2,MAXADDR+NBPG-4	| restore old high page contents
1685	DOREBOOT
1686LmotommuF:
1687#endif
1688#if defined(M68K_MMU_MOTOROLA)
1689	tstl	_C_LABEL(mmutype)	| HP MMU?
1690	jeq	LhpmmuB			| yes, skip
1691	movl	#0,%a0@			| value for pmove to TC (turn off MMU)
1692	pmove	%a0@,%tc		| disable MMU
1693	DOREBOOT
1694LhpmmuB:
1695#endif
1696#if defined(M68K_MMU_HP)
1697	MMUADDR(%a0)
1698	movl	#0xFFFF0000,%a0@(MMUCMD)	| totally disable MMU
1699	movl	%d2,MAXADDR+NBPG-4	| restore old high page contents
1700	DOREBOOT
1701#endif
1702Lebootcode:
1703
1704/*
1705 * Misc. global variables.
1706 */
1707	.data
1708GLOBAL(machineid)
1709	.long	HP_320			| default to 320
1710
1711GLOBAL(mmuid)
1712	.long	0			| default to nothing
1713
1714GLOBAL(mmutype)
1715	.long	MMU_HP			| default to HP MMU
1716
1717GLOBAL(cputype)
1718	.long	CPU_68020		| default to 68020 CPU
1719
1720GLOBAL(ectype)
1721	.long	EC_NONE			| external cache type, default to none
1722
1723GLOBAL(fputype)
1724	.long	FPU_68882		| default to 68882 FPU
1725
1726GLOBAL(protorp)
1727	.long	0,0			| prototype root pointer
1728
1729GLOBAL(prototc)
1730	.long	0			| prototype translation control
1731
1732GLOBAL(internalhpib)
1733	.long	1			| has internal HP-IB, default to yes
1734
1735GLOBAL(want_resched)
1736	.long	0
1737
1738GLOBAL(proc0paddr)
1739	.long	0			| KVA of proc0 u-area
1740
1741GLOBAL(intiobase)
1742	.long	0			| KVA of base of internal IO space
1743
1744GLOBAL(intiolimit)
1745	.long	0			| KVA of end of internal IO space
1746
1747GLOBAL(extiobase)
1748	.long	0			| KVA of base of external IO space
1749
1750GLOBAL(CLKbase)
1751	.long	0			| KVA of base of clock registers
1752
1753GLOBAL(MMUbase)
1754	.long	0			| KVA of base of HP MMU registers
1755
1756#ifdef USELEDS
1757ASLOCAL(heartbeat)
1758	.long	0			| clock ticks since last heartbeat
1759
1760ASLOCAL(beatstatus)
1761	.long	0			| for determining a fast or slow throb
1762#endif
1763
1764#ifdef DEBUG
1765ASGLOBAL(fulltflush)
1766	.long	0
1767
1768ASGLOBAL(fullcflush)
1769	.long	0
1770#endif
1771
1772/*
1773 * Interrupt Counters
1774 *
1775 * We now use the evcnt(9) subsystem, but these are provided to keep
1776 * vmstat(8) happy.
1777 */
1778
1779GLOBAL(intrnames)
1780GLOBAL(eintrnames)
1781GLOBAL(intrcnt)
1782GLOBAL(eintrcnt)
1783	.long 0
1784