xref: /netbsd/sys/arch/amiga/amiga/amiga_init.c (revision 6550d01e)
1 /*	$NetBSD: amiga_init.c,v 1.122 2011/01/15 21:56:53 phx Exp $	*/
2 
3 /*
4  * Copyright (c) 1994 Michael L. Hitch
5  * Copyright (c) 1993 Markus Wild
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Markus Wild.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "opt_amigaccgrf.h"
35 #include "opt_p5ppc68kboard.h"
36 #include "opt_devreload.h"
37 #include "opt_m68k_arch.h"
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: amiga_init.c,v 1.122 2011/01/15 21:56:53 phx Exp $");
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/ioctl.h>
45 #include <sys/select.h>
46 #include <sys/tty.h>
47 #include <sys/buf.h>
48 #include <sys/msgbuf.h>
49 #include <sys/mbuf.h>
50 #include <sys/protosw.h>
51 #include <sys/domain.h>
52 #include <sys/dkbad.h>
53 #include <sys/reboot.h>
54 #include <sys/exec.h>
55 
56 #include <uvm/uvm_extern.h>
57 
58 #include <machine/pte.h>
59 #include <machine/cpu.h>
60 #include <amiga/amiga/cc.h>
61 #include <amiga/amiga/cia.h>
62 #include <amiga/amiga/custom.h>
63 #include <amiga/amiga/cfdev.h>
64 #include <amiga/amiga/drcustom.h>
65 #include <amiga/amiga/gayle.h>
66 #include <amiga/amiga/memlist.h>
67 #include <amiga/dev/zbusvar.h>
68 
69 #define RELOC(v, t)	*((t*)((u_int)&(v) + loadbase))
70 
71 extern u_int	lowram;
72 extern u_int	Umap;
73 extern u_long boot_partition;
74 vaddr_t		amiga_uptbase;
75 #ifdef P5PPC68KBOARD
76 extern int	p5ppc;
77 #endif
78 
79 extern char *esym;
80 
81 #ifdef GRF_AGA
82 extern u_long aga_enable;
83 #endif
84 
85 extern u_long noncontig_enable;
86 
87 /*
88  * some addresses used in locore
89  */
90 vaddr_t INTREQRaddr;
91 vaddr_t INTREQWaddr;
92 
93 /*
94  * these are used by the extended spl?() macros.
95  */
96 volatile unsigned short *amiga_intena_read, *amiga_intena_write;
97 
98 vaddr_t CHIPMEMADDR;
99 vaddr_t chipmem_start;
100 vaddr_t chipmem_end;
101 
102 vaddr_t z2mem_start;		/* XXX */
103 static vaddr_t z2mem_end;		/* XXX */
104 int use_z2_mem = 1;			/* XXX */
105 
106 u_long boot_fphystart, boot_fphysize, boot_cphysize;
107 static u_int start_c_fphystart;
108 static u_int start_c_pstart;
109 
110 static u_long boot_flags;
111 
112 struct boot_memlist *memlist;
113 
114 struct cfdev *cfdev;
115 int ncfdev;
116 
117 u_long scsi_nosync;
118 int shift_nosync;
119 
120 void  start_c(int, u_int, u_int, u_int, char *, u_int, u_long, u_long, u_int);
121 void rollcolor(int);
122 #ifdef DEVRELOAD
123 static int kernel_image_magic_size(void);
124 static void kernel_image_magic_copy(u_char *);
125 int kernel_reload_write(struct uio *);
126 extern void kernel_reload(char *, u_long, u_long, u_long, u_long,
127 	u_long, u_long, u_long, u_long, u_long, u_long);
128 #endif
129 extern void etext(void);
130 void start_c_finish(void);
131 
132 void *
133 chipmem_steal(long amount)
134 {
135 	/*
136 	 * steal from top of chipmem, so we don't collide with
137 	 * the kernel loaded into chipmem in the not-yet-mapped state.
138 	 */
139 	vaddr_t p = chipmem_end - amount;
140 	if (p & 1)
141 		p = p - 1;
142 	chipmem_end = p;
143 	if(chipmem_start > chipmem_end)
144 		panic("not enough chip memory");
145 	return((void *)p);
146 }
147 
148 /*
149  * XXX
150  * used by certain drivers currently to allocate zorro II memory
151  * for bounce buffers, if use_z2_mem is NULL, chipmem will be
152  * returned instead.
153  * XXX
154  */
155 void *
156 alloc_z2mem(long amount)
157 {
158 	if (use_z2_mem && z2mem_end && (z2mem_end - amount) >= z2mem_start) {
159 		z2mem_end -= amount;
160 		return ((void *)z2mem_end);
161 	}
162 	return (alloc_chipmem(amount));
163 }
164 
165 
166 /*
167  * this is the C-level entry function, it's called from locore.s.
168  * Preconditions:
169  *	Interrupts are disabled
170  *	PA may not be == VA, so we may have to relocate addresses
171  *		before enabling the MMU
172  * 	Exec is no longer available (because we're loaded all over
173  *		low memory, no ExecBase is available anymore)
174  *
175  * It's purpose is:
176  *	Do the things that are done in locore.s in the hp300 version,
177  *		this includes allocation of kernel maps and enabling the MMU.
178  *
179  * Some of the code in here is `stolen' from Amiga MACH, and was
180  * written by Bryan Ford and Niklas Hallqvist.
181  *
182  * Very crude 68040 support by Michael L. Hitch.
183  *
184  */
185 
186 int kernel_copyback = 1;
187 
188 __attribute__ ((no_instrument_function))
189 void
190 start_c(id, fphystart, fphysize, cphysize, esym_addr, flags, inh_sync,
191 	boot_part, loadbase)
192 	int id;
193 	u_int fphystart, fphysize, cphysize;
194 	char *esym_addr;
195 	u_int flags;
196 	u_long inh_sync;
197 	u_long boot_part;
198 	u_int loadbase;
199 {
200 	extern char end[];
201 	extern u_int protorp[2];
202 	struct cfdev *cd;
203 	paddr_t pstart, pend;
204 	vaddr_t vstart, vend;
205 	psize_t avail;
206 	paddr_t ptpa;
207 	psize_t ptsize;
208 	u_int ptextra, kstsize;
209 	paddr_t Sysptmap_pa;
210 	register st_entry_t sg_proto, *sg;
211 #if defined(M68040) || defined(M68060)
212 	register st_entry_t *esg;
213 #endif
214 	register pt_entry_t pg_proto, *pg, *epg;
215 	vaddr_t end_loaded;
216 	u_int ncd;
217 #if defined(M68040) || defined(M68060)
218 	u_int i, nl1desc, nl2desc;
219 #endif
220 	vaddr_t kva;
221 	struct boot_memlist *ml;
222 
223 #ifdef DEBUG_KERNEL_START
224 	/* XXX this only is valid if Altais is in slot 0 */
225 	volatile u_int8_t *altaiscolpt = (u_int8_t *)0x200003c8;
226 	volatile u_int8_t *altaiscol = (u_int8_t *)0x200003c9;
227 #endif
228 
229 #ifdef DEBUG_KERNEL_START
230 	if ((id>>24)==0x7D) {
231 		*altaiscolpt = 0;
232 		*altaiscol = 40;
233 		*altaiscol = 0;
234 		*altaiscol = 0;
235 	} else
236 ((volatile struct Custom *)0xdff000)->color[0] = 0xa00;		/* RED */
237 #endif
238 
239 #ifdef LIMITMEM
240 	if (fphysize > LIMITMEM*1024*1024)
241 		fphysize = LIMITMEM*1024*1024;
242 #endif
243 
244 	RELOC(boot_fphystart, u_long) = fphystart;
245 	RELOC(boot_fphysize, u_long) = fphysize;
246 	RELOC(boot_cphysize, u_long) = cphysize;
247 
248 	RELOC(machineid, int) = id;
249 	RELOC(chipmem_end, vaddr_t) = cphysize;
250 	RELOC(esym, char *) = esym_addr;
251 	RELOC(boot_flags, u_long) = flags;
252 	RELOC(boot_partition, u_long) = boot_part;
253 #ifdef GRF_AGA
254 	if (flags & 1)
255 		RELOC(aga_enable, u_long) |= 1;
256 #endif
257 	if (flags & (3 << 1))
258 		RELOC(noncontig_enable, u_long) = (flags >> 1) & 3;
259 
260 	RELOC(scsi_nosync, u_long) = inh_sync;
261 
262 	/*
263 	 * the kernel ends at end(), plus the cfdev and memlist structures
264 	 * we placed there in the loader.  Correct for this now.  Also,
265 	 * account for kernel symbols if they are present.
266 	 */
267 	if (esym_addr == NULL)
268 		end_loaded = (vaddr_t)&end;
269 	else
270 		end_loaded = (vaddr_t)esym_addr;
271 	RELOC(ncfdev, int) = *(int *)(&RELOC(*(u_int *)end_loaded, u_int));
272 	RELOC(cfdev, struct cfdev *) = (struct cfdev *) ((int)end_loaded + 4);
273 	end_loaded += 4 + RELOC(ncfdev, int) * sizeof(struct cfdev);
274 
275 	RELOC(memlist, struct boot_memlist *) =
276 	    (struct boot_memlist *)end_loaded;
277 	ml = &RELOC(*(struct boot_memlist *)end_loaded, struct boot_memlist);
278 	end_loaded = (vaddr_t)&((RELOC(memlist, struct boot_memlist *))->
279 	    m_seg[ml->m_nseg]);
280 
281 	/*
282 	 * Get ZorroII (16-bit) memory if there is any and it's not where the
283 	 * kernel is loaded.
284 	 */
285 	if (ml->m_nseg > 0 && ml->m_nseg < 16 && RELOC(use_z2_mem, int)) {
286 		struct boot_memseg *sp, *esp;
287 
288 		sp = ml->m_seg;
289 		esp = sp + ml->m_nseg;
290 		for (; sp < esp; sp++) {
291 			if ((sp->ms_attrib & (MEMF_FAST | MEMF_24BITDMA))
292 			    != (MEMF_FAST|MEMF_24BITDMA))
293 				continue;
294 			if (sp->ms_start == fphystart)
295 				continue;
296 			RELOC(z2mem_end, paddr_t) =
297 			    sp->ms_start + sp->ms_size;
298 			RELOC(z2mem_start, paddr_t) =
299 			    RELOC(z2mem_end, paddr_t) - MAXPHYS *
300 			    RELOC(use_z2_mem, int) * 7;
301 			RELOC(NZTWOMEMPG, u_int) =
302 			    (RELOC(z2mem_end, paddr_t) -
303 			    RELOC(z2mem_start, paddr_t)) / PAGE_SIZE;
304 			if ((RELOC(z2mem_end, paddr_t) -
305 			    RELOC(z2mem_start, paddr_t)) > sp->ms_size) {
306 				RELOC(NZTWOMEMPG, u_int) = sp->ms_size /
307 				    PAGE_SIZE;
308 				RELOC(z2mem_start, paddr_t) =
309 				    RELOC(z2mem_end, paddr_t) - sp->ms_size;
310 			}
311 			break;
312 		}
313 	}
314 
315 	/*
316 	 * Scan ConfigDev list and get size of Zorro I/O boards that are
317 	 * outside the Zorro II I/O area.
318 	 */
319 	for (RELOC(ZBUSAVAIL, u_int) = 0, cd =
320 	    &RELOC(*RELOC(cfdev, struct cfdev *),struct cfdev),
321 	    ncd = RELOC(ncfdev, int); ncd > 0; ncd--, cd++) {
322 		int bd_type = cd->rom.type & (ERT_TYPEMASK | ERTF_MEMLIST);
323 
324 		if (bd_type != ERT_ZORROIII &&
325 		    (bd_type != ERT_ZORROII || isztwopa(cd->addr)))
326 			continue;	/* It's not Z2 or Z3 I/O board */
327 		/*
328 		 *  Hack to adjust board size for Zorro III boards that
329 		 *  do not specify an extended size or subsize.  This is
330 		 *  specifically for the GVP Spectrum and hopefully won't
331 		 *  break with other boards that configure like this.
332 		 */
333 		if (bd_type == ERT_ZORROIII &&
334 		    !(cd->rom.flags & ERFF_EXTENDED) &&
335 		    (cd->rom.flags & ERT_Z3_SSMASK) == 0)
336 			cd->size = 0x10000 <<
337 			    ((cd->rom.type - 1) & ERT_MEMMASK);
338 		RELOC(ZBUSAVAIL, u_int) += m68k_round_page(cd->size);
339 	}
340 
341 	/*
342 	 * assume KVA_MIN == 0.  We subtract the kernel code (and
343 	 * the configdev's and memlists) from the virtual and
344 	 * phsical starts and ends.
345 	 */
346 	vend   = fphysize;
347 	avail  = vend;
348 	vstart = end_loaded;
349 	vstart = m68k_round_page(vstart);
350 	pstart = (paddr_t)vstart + fphystart;
351 	pend   = vend   + fphystart;
352 	avail -= vstart;
353 
354 	/*
355 	 * save KVA of lwp0 u-area and allocate it.
356 	 */
357 	RELOC(lwp0uarea, vaddr_t) = vstart;
358 	pstart += USPACE;
359 	vstart += USPACE;
360 	avail -= USPACE;
361 
362 #if defined(M68040) || defined(M68060)
363 	if (RELOC(mmutype, int) == MMU_68040)
364 		kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
365 	else
366 #endif
367 		kstsize = 1;
368 
369 	/*
370 	 * allocate the kernel segment table
371 	 */
372 	RELOC(Sysseg_pa, u_int) = pstart;
373 	RELOC(Sysseg, u_int) = vstart;
374 	vstart += PAGE_SIZE * kstsize;
375 	pstart += PAGE_SIZE * kstsize;
376 	avail -= PAGE_SIZE * kstsize;
377 
378 	/*
379 	 * allocate kernel page table map
380 	 */
381 	RELOC(Sysptmap, u_int) = vstart;
382 	Sysptmap_pa = pstart;
383 	vstart += PAGE_SIZE;
384 	pstart += PAGE_SIZE;
385 	avail -= PAGE_SIZE;
386 
387 	/*
388 	 * allocate initial page table pages
389 	 */
390 	ptpa = pstart;
391 #ifdef DRACO
392 	if ((id>>24)==0x7D) {
393 		ptextra = NDRCCPG
394 		    + RELOC(NZTWOMEMPG, u_int)
395 		    + btoc(RELOC(ZBUSAVAIL, u_int));
396 	} else
397 #endif
398 	ptextra = NCHIPMEMPG + NCIAPG + NZTWOROMPG + RELOC(NZTWOMEMPG, u_int) +
399 	    btoc(RELOC(ZBUSAVAIL, u_int)) + NPCMCIAPG;
400 
401 	ptsize = (RELOC(Sysptsize, u_int) +
402 	    howmany(ptextra, NPTEPG)) << PGSHIFT;
403 
404 	vstart += ptsize;
405 	pstart += ptsize;
406 	avail -= ptsize;
407 
408 	/*
409 	 * Sysmap is now placed at the end of Supervisor virtual address space.
410 	 */
411 	RELOC(Sysmap, u_int *) = (u_int *)SYSMAP_VA;
412 
413 	/*
414 	 * initialize segment table and page table map
415 	 */
416 #if defined(M68040) || defined(M68060)
417 	if (RELOC(mmutype, int) == MMU_68040) {
418 		/*
419 		 * First invalidate the entire "segment table" pages
420 		 * (levels 1 and 2 have the same "invalid" values).
421 		 */
422 		sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
423 		esg = &sg[kstsize * NPTEPG];
424 		while (sg < esg)
425 			*sg++ = SG_NV;
426 		/*
427 		 * Initialize level 2 descriptors (which immediately
428 		 * follow the level 1 table).  We need:
429 		 *	NPTEPG / SG4_LEV3SIZE
430 		 * level 2 descriptors to map each of the nptpages
431 		 * pages of PTEs.  Note that we set the "used" bit
432 		 * now to save the HW the expense of doing it.
433 		 */
434 		nl2desc = (ptsize >> PGSHIFT) * (NPTEPG / SG4_LEV3SIZE);
435 		sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
436 		sg = &sg[SG4_LEV1SIZE];
437 		esg = &sg[nl2desc];
438 		sg_proto = ptpa | SG_U | SG_RW | SG_V;
439 		while (sg < esg) {
440 			*sg++ = sg_proto;
441 			sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t));
442 		}
443 
444 		/*
445 		 * Initialize level 1 descriptors.  We need:
446 		 *	howmany(nl2desc, SG4_LEV2SIZE)
447 		 * level 1 descriptors to map the 'nl2desc' level 2's.
448 		 */
449 		nl1desc = howmany(nl2desc, SG4_LEV2SIZE);
450 		sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
451 		esg = &sg[nl1desc];
452 		sg_proto = (paddr_t)&sg[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
453 		while (sg < esg) {
454 			*sg++ = sg_proto;
455 			sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t));
456 		}
457 
458 		/* Sysmap is last entry in level 1 */
459 		sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
460 		sg = &sg[SG4_LEV1SIZE - 1];
461 		*sg = sg_proto;
462 
463 		/*
464 		 * Kernel segment table at end of next level 2 table
465 		 */
466 		i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE);
467 		sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
468 		sg = &sg[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE)];
469 		esg = &sg[NPTEPG / SG4_LEV3SIZE];
470 		sg_proto = Sysptmap_pa | SG_U | SG_RW | SG_V;
471 		while (sg < esg) {
472 			*sg++ = sg_proto;
473 			sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t));
474 		}
475 
476 		/* Include additional level 2 table for Sysmap in protostfree */
477 		RELOC(protostfree, u_int) =
478 		    (~0 << (1 + nl1desc + 1)) /* & ~(~0 << MAXKL2SIZE) */;
479 
480 		/*
481 		 * Initialize Sysptmap
482 		 */
483 		pg = (pt_entry_t *)Sysptmap_pa;
484 		epg = &pg[ptsize >> PGSHIFT];
485 		pg_proto = ptpa | PG_RW | PG_CI | PG_V;
486 		while (pg < epg) {
487 			*pg++ = pg_proto;
488 			pg_proto += PAGE_SIZE;
489 		}
490 		/*
491 		 * Invalidate rest of Sysptmap page
492 		 */
493 		epg = (pt_entry_t *)(Sysptmap_pa + PAGE_SIZE - sizeof(st_entry_t));
494 		while (pg < epg)
495 			*pg++ = SG_NV;
496 		pg = (pt_entry_t *)Sysptmap_pa;
497 		pg = &pg[SYSMAP_VA >> SEGSHIFT];
498 		*pg = Sysptmap_pa | PG_RW | PG_CI | PG_V;
499 	} else
500 #endif /* M68040 */
501 	{
502 		/*
503 		 * Map the page table pages in both the HW segment table
504 		 * and the software Sysptmap.
505 		 */
506 		sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
507 		pg = (pt_entry_t *)Sysptmap_pa;
508 		epg = &pg[ptsize >> PGSHIFT];
509 		sg_proto = ptpa | SG_RW | SG_V;
510 		pg_proto = ptpa | PG_RW | PG_CI | PG_V;
511 		while (pg < epg) {
512 			*sg++ = sg_proto;
513 			*pg++ = pg_proto;
514 			sg_proto += PAGE_SIZE;
515 			pg_proto += PAGE_SIZE;
516 		}
517 		/*
518 		 * invalidate the remainder of each table
519 		 */
520 		epg = (pt_entry_t *)Sysptmap_pa;
521 		epg = &epg[TIA_SIZE];
522 		while (pg < epg) {
523 			*sg++ = SG_NV;
524 			*pg++ = PG_NV;
525 		}
526 		sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
527 		sg = &sg[SYSMAP_VA >> SEGSHIFT];
528 		pg = (pt_entry_t *)Sysptmap_pa;
529 		pg = &pg[SYSMAP_VA >> SEGSHIFT];
530 		*sg = Sysptmap_pa | SG_RW | SG_V;
531 		*pg = Sysptmap_pa | PG_RW | PG_CI | PG_V;
532 		/* XXX zero out rest of page? */
533 	}
534 
535 	/*
536 	 * initialize kernel page table page(s) (assume load at VA 0)
537 	 */
538 	pg_proto = fphystart | PG_RO | PG_V;	/* text pages are RO */
539 	pg       = (pt_entry_t *)ptpa;
540 	*pg++ = PG_NV;				/* Make page 0 invalid */
541 	pg_proto += PAGE_SIZE;
542 	for (kva = PAGE_SIZE; kva < (vaddr_t)etext;
543 	     kva += PAGE_SIZE, pg_proto += PAGE_SIZE)
544 		*pg++ = pg_proto;
545 
546 	/*
547 	 * data, bss and dynamic tables are read/write
548 	 */
549 	pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V;
550 
551 #if defined(M68040) || defined(M68060)
552 	/*
553 	 * Map the kernel segment table cache invalidated for 68040/68060.
554 	 * (for the 68040 not strictly necessary, but recommended by Motorola;
555 	 *  for the 68060 mandatory)
556 	 */
557 	if (RELOC(mmutype, int) == MMU_68040) {
558 
559 		if (RELOC(kernel_copyback, int))
560 			pg_proto |= PG_CCB;
561 
562 		/*
563 		 * ASSUME: segment table and statically allocated page tables
564 		 * of the kernel are contiguously allocated, start at
565 		 * Sysseg and end at the current value of vstart.
566 		 */
567 		for (; kva < RELOC(Sysseg, u_int);
568 		     kva += PAGE_SIZE, pg_proto += PAGE_SIZE)
569 			*pg++ = pg_proto;
570 
571 		pg_proto = (pg_proto & ~PG_CCB) | PG_CI;
572 		for (; kva < vstart; kva += PAGE_SIZE, pg_proto += PAGE_SIZE)
573 			*pg++ = pg_proto;
574 
575 		pg_proto = (pg_proto & ~PG_CI);
576 		if (RELOC(kernel_copyback, int))
577 			pg_proto |= PG_CCB;
578 	}
579 #endif
580 	/*
581 	 * go till end of data allocated so far
582 	 * plus lwp0 u-area (to be allocated)
583 	 */
584 	for (; kva < vstart; kva += PAGE_SIZE, pg_proto += PAGE_SIZE)
585 		*pg++ = pg_proto;
586 	/*
587 	 * invalidate remainder of kernel PT
588 	 */
589 	while (pg < (pt_entry_t *) (ptpa + ptsize))
590 		*pg++ = PG_NV;
591 
592 	/*
593 	 * validate internal IO PTEs following current vstart
594 	 */
595 	pg = &((u_int *)ptpa)[vstart >> PGSHIFT];
596 #ifdef DRACO
597 	if ((id >> 24) == 0x7D) {
598 		RELOC(DRCCADDR, u_int) = vstart;
599 		RELOC(CIAADDR, vaddr_t) =
600 		    RELOC(DRCCADDR, u_int) + DRCIAPG * PAGE_SIZE;
601 		if (RELOC(z2mem_end, vaddr_t) == 0)
602 			RELOC(ZBUSADDR, vaddr_t) =
603 			   RELOC(DRCCADDR, u_int) + NDRCCPG * PAGE_SIZE;
604 		pg_proto = DRCCBASE | PG_RW | PG_CI | PG_V;
605 		while (pg_proto < DRZ2BASE) {
606 			*pg++ = pg_proto;
607 			pg_proto += DRCCSTRIDE;
608 			vstart += PAGE_SIZE;
609 		}
610 
611 		/* NCR 53C710 chip */
612 		*pg++ = DRSCSIBASE | PG_RW | PG_CI | PG_V;
613 		vstart += PAGE_SIZE;
614 
615 #ifdef DEBUG_KERNEL_START
616 		/*
617 		 * early rollcolor Altais mapping
618 		 * XXX (only works if in slot 0)
619 		 */
620 		*pg++ = 0x20000000 | PG_RW | PG_CI | PG_V;
621 		vstart += PAGE_SIZE;
622 #endif
623 	} else
624 #endif
625 	{
626 		RELOC(CHIPMEMADDR, vaddr_t) = vstart;
627 		pg_proto = CHIPMEMBASE | PG_RW | PG_CI | PG_V;
628 						/* CI needed here?? */
629 		while (pg_proto < CHIPMEMTOP) {
630 			*pg++     = pg_proto;
631 			pg_proto += PAGE_SIZE;
632 			vstart   += PAGE_SIZE;
633 		}
634 	}
635 	if (RELOC(z2mem_end, paddr_t)) {			/* XXX */
636 		RELOC(ZTWOMEMADDR, vaddr_t) = vstart;
637 		RELOC(ZBUSADDR, vaddr_t) = RELOC(ZTWOMEMADDR, vaddr_t) +
638 		    RELOC(NZTWOMEMPG, u_int) * PAGE_SIZE;
639 		pg_proto = RELOC(z2mem_start, paddr_t) |	/* XXX */
640 		    PG_RW | PG_V;				/* XXX */
641 		while (pg_proto < RELOC(z2mem_end, paddr_t)) { /* XXX */
642 			*pg++ = pg_proto;			/* XXX */
643 			pg_proto += PAGE_SIZE;			/* XXX */
644 			vstart   += PAGE_SIZE;
645 		}						/* XXX */
646 	}							/* XXX */
647 #ifdef DRACO
648 	if ((id >> 24) != 0x7D)
649 #endif
650 	{
651 		RELOC(CIAADDR, vaddr_t) = vstart;
652 		pg_proto = CIABASE | PG_RW | PG_CI | PG_V;
653 		while (pg_proto < CIATOP) {
654 			*pg++     = pg_proto;
655 			pg_proto += PAGE_SIZE;
656 			vstart   += PAGE_SIZE;
657 		}
658 		RELOC(ZTWOROMADDR, vaddr_t) = vstart;
659 		pg_proto  = ZTWOROMBASE | PG_RW | PG_CI | PG_V;
660 		while (pg_proto < ZTWOROMTOP) {
661 			*pg++     = pg_proto;
662 			pg_proto += PAGE_SIZE;
663 			vstart   += PAGE_SIZE;
664 		}
665 		RELOC(ZBUSADDR, vaddr_t) = vstart;
666 		/* not on 8k boundary :-( */
667 		RELOC(CIAADDR, vaddr_t) += PAGE_SIZE/2;
668 		RELOC(CUSTOMADDR, vaddr_t)  =
669 		    RELOC(ZTWOROMADDR, vaddr_t) - ZTWOROMBASE + CUSTOMBASE;
670 	}
671 
672 	/*
673 	 *[ following page tables MAY be allocated to ZORRO3 space,
674 	 * but they're then later mapped in autoconf.c ]
675 	 */
676 	vstart += RELOC(ZBUSAVAIL, u_int);
677 
678 	/*
679 	 * init mem sizes
680 	 */
681 	RELOC(maxmem, u_int)  = pend >> PGSHIFT;
682 	RELOC(lowram, u_int)  = fphystart;
683 	RELOC(physmem, u_int) = fphysize >> PGSHIFT;
684 
685 	RELOC(virtual_avail, u_int) = vstart;
686 
687 	/*
688 	 * Put user page tables starting at next 16MB boundary, to make kernel
689 	 * dumps more readable, with guaranteed 16MB of.
690 	 * XXX 16 MB instead of 256 MB should be enough, but...
691 	 * we need to fix the fastmem loading first. (see comment at line 375)
692 	 */
693 	RELOC(amiga_uptbase, vaddr_t) =
694 	    roundup(vstart + 0x10000000, 0x10000000);
695 
696 	/*
697 	 * set this before copying the kernel, so the variable is updated in
698 	 * the `real' place too. protorp[0] is already preset to the
699 	 * CRP setting.
700 	 */
701 	RELOC(protorp[1], u_int) = RELOC(Sysseg_pa, u_int);
702 
703 	RELOC(start_c_fphystart, u_int) = fphystart;
704 	RELOC(start_c_pstart, u_int) = pstart;
705 
706 	/*
707 	 * copy over the kernel (and all now initialized variables)
708 	 * to fastram.  DONT use bcopy(), this beast is much larger
709 	 * than 128k !
710 	 */
711 	if (loadbase == 0) {
712 		register paddr_t *lp, *le, *fp;
713 
714 		lp = (paddr_t *)0;
715 		le = (paddr_t *)end_loaded;
716 		fp = (paddr_t *)fphystart;
717 		while (lp < le)
718 			*fp++ = *lp++;
719 	}
720 
721 #ifdef DEBUG_KERNEL_START
722 	if ((id>>24)==0x7D) {
723 		*altaiscolpt = 0;
724 		*altaiscol = 40;
725 		*altaiscol = 40;
726 		*altaiscol = 0;
727 	} else
728 ((volatile struct Custom *)0xdff000)->color[0] = 0xAA0;		/* YELLOW */
729 #endif
730 	/*
731 	 * prepare to enable the MMU
732 	 */
733 #if defined(M68040) || defined(M68060)
734 	if (RELOC(mmutype, int) == MMU_68040) {
735 		if (id & AMIGA_68060) {
736 			/* do i need to clear the branch cache? */
737 			__asm volatile (	".word 0x4e7a,0x0002;"
738 					"orl #0x400000,%%d0;"
739 					".word 0x4e7b,0x0002" : : : "d0");
740 		}
741 
742 		/*
743 		 * movel Sysseg_pa,%a0;
744 		 * movec %a0,%srp;
745 		 */
746 
747 		__asm volatile ("movel %0,%%a0; .word 0x4e7b,0x8807"
748 		    : : "a" (RELOC(Sysseg_pa, u_int)) : "a0");
749 
750 #ifdef DEBUG_KERNEL_START
751 		if ((id>>24)==0x7D) {
752 			*altaiscolpt = 0;
753 			*altaiscol = 40;
754 			*altaiscol = 33;
755 			*altaiscol = 0;
756 		} else
757 ((volatile struct Custom *)0xdff000)->color[0] = 0xA70;		/* ORANGE */
758 #endif
759 	} else
760 #endif
761 	{
762 		/*
763 		 * setup and load SRP
764 		 * nolimit, share global, 4 byte PTE's
765 		 */
766 		(RELOC(protorp[0], u_int)) = 0x80000202;
767 		__asm volatile ("pmove %0@,%%srp":: "a" (&RELOC(protorp, u_int)));
768 	}
769 }
770 
771 void
772 start_c_finish(void)
773 {
774 	extern u_int32_t delaydivisor;
775 #ifdef	P5PPC68KBOARD
776         struct cfdev *cdp, *ecdp;
777 #endif
778 
779 #ifdef DEBUG_KERNEL_START
780 #ifdef DRACO
781 	if ((id >> 24) == 0x7D) { /* mapping on, is_draco() is valid */
782 		int i;
783 		/* XXX experimental Altais register mapping only */
784 		altaiscolpt = (volatile u_int8_t *)(DRCCADDR+PAGE_SIZE*9+0x3c8);
785 		altaiscol = altaiscolpt + 1;
786 		for (i=0; i<140000; i++) {
787 			*altaiscolpt = 0;
788 			*altaiscol = 0;
789 			*altaiscol = 40;
790 			*altaiscol = 0;
791 		}
792 	} else
793 #endif
794 ((volatile struct Custom *)CUSTOMADDR)->color[0] = 0x0a0;	/* GREEN */
795 #endif
796 
797 	pmap_bootstrap(start_c_pstart, start_c_fphystart);
798 	pmap_bootstrap_finalize();
799 
800 	/*
801 	 * to make life easier in locore.s, set these addresses explicitly
802 	 */
803 	CIAAbase = CIAADDR + 0x1001;	/* CIA-A at odd addresses ! */
804 	CIABbase = CIAADDR;
805 	CUSTOMbase = CUSTOMADDR;
806 #ifdef DRACO
807 	if (is_draco()) {
808 		draco_intena = (volatile u_int8_t *)DRCCADDR+1;
809 		draco_intpen = draco_intena + PAGE_SIZE;
810 		draco_intfrc = draco_intpen + PAGE_SIZE;
811 		draco_misc = draco_intfrc + PAGE_SIZE;
812 		draco_ioct = (struct drioct *)(DRCCADDR + DRIOCTLPG*PAGE_SIZE);
813 	} else
814 #endif
815 	{
816 		INTREQRaddr = (vaddr_t)&custom.intreqr;
817 		INTREQWaddr = (vaddr_t)&custom.intreq;
818 	}
819 	/*
820 	 * Get our chip memory allocation system working
821 	 */
822 	chipmem_start += CHIPMEMADDR;
823 	chipmem_end   += CHIPMEMADDR;
824 
825 	/* XXX is: this MUST NOT BE DONE before the pmap_bootstrap() call */
826 	if (z2mem_end) {
827 		z2mem_end = ZTWOMEMADDR + NZTWOMEMPG * PAGE_SIZE;
828 		z2mem_start = ZTWOMEMADDR;
829 	}
830 
831 	/*
832 	 * disable all interrupts but enable allow them to be enabled
833 	 * by specific driver code (global int enable bit)
834 	 */
835 #ifdef DRACO
836 	if (is_draco()) {
837 		/* XXX to be done. For now, just: */
838 		*draco_intena = 0;
839 		*draco_intpen = 0;
840 		*draco_intfrc = 0;
841 		ciaa.icr = 0x7f;			/* and keyboard */
842 		ciab.icr = 0x7f;			/* and again */
843 
844 		draco_ioct->io_control &=
845 		    ~(DRCNTRL_KBDINTENA|DRCNTRL_FDCINTENA); /* and another */
846 
847 		draco_ioct->io_status2 &=
848 		    ~(DRSTAT2_PARIRQENA|DRSTAT2_TMRINTENA); /* some more */
849 
850 		*(volatile u_int8_t *)(DRCCADDR + 1 +
851 		    DRSUPIOPG*PAGE_SIZE + 4*(0x3F8 + 1)) = 0; /* and com0 */
852 
853 		*(volatile u_int8_t *)(DRCCADDR + 1 +
854 		    DRSUPIOPG*PAGE_SIZE + 4*(0x2F8 + 1)) = 0; /* and com1 */
855 
856 		draco_ioct->io_control |= DRCNTRL_WDOGDIS; /* stop Fido */
857 		*draco_misc &= ~1/*DRMISC_FASTZ2*/;
858 
859 	} else
860 #endif
861 	{
862 		custom.intena = 0x7fff;			/* disable ints */
863 		custom.intena = INTF_SETCLR | INTF_INTEN;
864 							/* but allow them */
865 		custom.intreq = 0x7fff;			/* clear any current */
866 		ciaa.icr = 0x7f;			/* and keyboard */
867 		ciab.icr = 0x7f;			/* and again */
868 
869 		/*
870 		 * remember address of read and write intena register for use
871 		 * by extended spl?() macros.
872 		 */
873 		amiga_intena_read  = &custom.intenar;
874 		amiga_intena_write = &custom.intena;
875 	}
876 
877 	/*
878 	 * This is needed for 3000's with superkick ROM's. Bit 7 of
879 	 * 0xde0002 enables the ROM if set. If this isn't set the machine
880 	 * has to be powercycled in order for it to boot again. ICKA! RFH
881 	 */
882 	if (is_a3000()) {
883 		volatile unsigned char *a3000_magic_reset;
884 
885 		a3000_magic_reset = (volatile unsigned char *)ztwomap(0xde0002);
886 
887 		/* Turn SuperKick ROM (V36) back on */
888 		*a3000_magic_reset |= 0x80;
889 	}
890 
891 #ifdef	P5PPC68KBOARD
892 	/*
893 	 * Are we an P5 PPC/68K board? install different reset
894 	 * routine.
895 	 */
896 
897         for (cdp = cfdev, ecdp = &cfdev[ncfdev]; cdp < ecdp; cdp++) {
898 		if (cdp->rom.manid == 8512 &&
899 		    (cdp->rom.prodid == 100 || cdp->rom.prodid == 110)) {
900 		    		p5ppc = 1;
901 				break;
902 			}
903         }
904 #endif
905 	/*
906 	 * preliminary delay divisor value
907 	 */
908 
909 	if (machineid & AMIGA_68060)
910 		delaydivisor = (1024 * 1) / 80;	/* 80 MHz 68060 w. BTC */
911 
912 	else if (machineid & AMIGA_68040)
913 		delaydivisor = (1024 * 3) / 40;	/* 40 MHz 68040 */
914 
915 	else if (machineid & AMIGA_68030)
916 		delaydivisor = (1024 * 8) / 50;	/* 50 MHz 68030 */
917 
918 	else
919 		delaydivisor = (1024 * 8) / 33; /* 33 MHz 68020 */
920 }
921 
922 void
923 rollcolor(int color)
924 {
925 	int s, i;
926 
927 	s = splhigh();
928 	/*
929 	 * need to adjust count -
930 	 * too slow when cache off, too fast when cache on
931 	 */
932 	for (i = 0; i < 400000; i++)
933 		((volatile struct Custom *)CUSTOMbase)->color[0] = color;
934 	splx(s);
935 }
936 
937 #ifdef DEVRELOAD
938 /*
939  * Kernel reloading code
940  */
941 
942 static struct exec kernel_exec;
943 static u_char *kernel_image;
944 static u_long kernel_text_size, kernel_load_ofs;
945 static u_long kernel_load_phase;
946 static u_long kernel_load_endseg;
947 static u_long kernel_symbol_size, kernel_symbol_esym;
948 
949 /* This supports the /dev/reload device, major 2, minor 20,
950    hooked into mem.c.  Author: Bryan Ford.  */
951 
952 /*
953  * This is called below to find out how much magic storage
954  * will be needed after a kernel image to be reloaded.
955  */
956 static int
957 kernel_image_magic_size(void)
958 {
959 	int sz;
960 
961 	/* 4 + cfdev's + Mem_Seg's + 4 */
962 	sz = 8 + ncfdev * sizeof(struct cfdev)
963 	    + memlist->m_nseg * sizeof(struct boot_memseg);
964 	return(sz);
965 }
966 
967 /* This actually copies the magic information.  */
968 static void
969 kernel_image_magic_copy(u_char *dest)
970 {
971 	*((int*)dest) = ncfdev;
972 	dest += 4;
973 	memcpy(dest, cfdev, ncfdev * sizeof(struct cfdev)
974 	    + memlist->m_nseg * sizeof(struct boot_memseg) + 4);
975 }
976 
977 #undef AOUT_LDPGSZ
978 #define AOUT_LDPGSZ 8192 /* XXX ??? */
979 
980 int
981 kernel_reload_write(struct uio *uio)
982 {
983 	extern int eclockfreq;
984 	struct iovec *iov;
985 	int error, c;
986 
987 	iov = uio->uio_iov;
988 
989 	if (kernel_image == 0) {
990 		/*
991 		 * We have to get at least the whole exec header
992 		 * in the first write.
993 		 */
994 		if (iov->iov_len < sizeof(kernel_exec))
995 			return ENOEXEC;		/* XXX */
996 
997 		/*
998 		 * Pull in the exec header and check it.
999 		 */
1000 		if ((error = uiomove((void *)&kernel_exec, sizeof(kernel_exec),
1001 		     uio)) != 0)
1002 			return(error);
1003 		printf("loading kernel %ld+%ld+%ld+%ld\n", kernel_exec.a_text,
1004 		    kernel_exec.a_data, kernel_exec.a_bss,
1005 		    esym == NULL ? 0 : kernel_exec.a_syms);
1006 		/*
1007 		 * Looks good - allocate memory for a kernel image.
1008 		 */
1009 		kernel_text_size = (kernel_exec.a_text
1010 			+ AOUT_LDPGSZ - 1) & (-AOUT_LDPGSZ);
1011 		/*
1012 		 * Estimate space needed for symbol names, since we don't
1013 		 * know how big it really is.
1014 		 */
1015 		if (esym != NULL) {
1016 			kernel_symbol_size = kernel_exec.a_syms;
1017 			kernel_symbol_size += 16 * (kernel_symbol_size / 12);
1018 		}
1019 		/*
1020 		 * XXX - should check that image will fit in CHIP memory
1021 		 * XXX return an error if it doesn't
1022 		 */
1023 		if ((kernel_text_size + kernel_exec.a_data +
1024 		    kernel_exec.a_bss + kernel_symbol_size +
1025 		    kernel_image_magic_size()) > boot_cphysize)
1026 			return (EFBIG);
1027 		kernel_image = malloc(kernel_text_size + kernel_exec.a_data
1028 			+ kernel_exec.a_bss
1029 			+ kernel_symbol_size
1030 			+ kernel_image_magic_size(),
1031 			M_TEMP, M_WAITOK);
1032 		kernel_load_ofs = 0;
1033 		kernel_load_phase = 0;
1034 		kernel_load_endseg = kernel_exec.a_text;
1035 		return(0);
1036 	}
1037 	/*
1038 	 * Continue loading in the kernel image.
1039 	 */
1040 	c = min(iov->iov_len, kernel_load_endseg - kernel_load_ofs);
1041 	c = min(c, MAXPHYS);
1042 	if ((error = uiomove(kernel_image + kernel_load_ofs, (int)c, uio)) != 0)
1043 		return(error);
1044 	kernel_load_ofs += c;
1045 
1046 	/*
1047 	 * Fun and games to handle loading symbols - the length of the
1048 	 * string table isn't know until after the symbol table has
1049 	 * been loaded.  We have to load the kernel text, data, and
1050 	 * the symbol table, then get the size of the strings.  A
1051 	 * new kernel image is then allocated and the data currently
1052 	 * loaded moved to the new image.  Then continue reading the
1053 	 * string table.  This has problems if there isn't enough
1054 	 * room to allocate space for the two copies of the kernel
1055 	 * image.  So the approach I took is to guess at the size
1056 	 * of the symbol strings.  If the guess is wrong, the symbol
1057 	 * table is ignored.
1058 	 */
1059 
1060 	if (kernel_load_ofs != kernel_load_endseg)
1061 		return(0);
1062 
1063 	switch (kernel_load_phase) {
1064 	case 0:		/* done loading kernel text */
1065 		kernel_load_ofs = kernel_text_size;
1066 		kernel_load_endseg = kernel_load_ofs + kernel_exec.a_data;
1067 		kernel_load_phase = 1;
1068 		break;
1069 	case 1:		/* done loading kernel data */
1070 		for(c = 0; c < kernel_exec.a_bss; c++)
1071 			kernel_image[kernel_load_ofs + c] = 0;
1072 		kernel_load_ofs += kernel_exec.a_bss;
1073 		if (esym) {
1074 			kernel_load_endseg = kernel_load_ofs
1075 			    + kernel_exec.a_syms + 8;
1076 			*((u_long *)(kernel_image + kernel_load_ofs)) =
1077 			    kernel_exec.a_syms;
1078 			kernel_load_ofs += 4;
1079 			kernel_load_phase = 3;
1080 			break;
1081 		}
1082 		/*FALLTHROUGH*/
1083 	case 2:		/* done loading kernel */
1084 
1085 		/*
1086 		 * Put the finishing touches on the kernel image.
1087 		 */
1088 		kernel_image_magic_copy(kernel_image + kernel_load_ofs);
1089 		/*
1090 		 * Start the new kernel with code in locore.s.
1091 		 */
1092 		kernel_reload(kernel_image,
1093 		    kernel_load_ofs + kernel_image_magic_size(),
1094 		    kernel_exec.a_entry, boot_fphystart, boot_fphysize,
1095 		    boot_cphysize, kernel_symbol_esym, eclockfreq,
1096 		    boot_flags, scsi_nosync, boot_partition);
1097 		/*
1098 		 * kernel_reload() now checks to see if the reload_code
1099 		 * is at the same location in the new kernel.
1100 		 * If it isn't, it will return and we will return
1101 		 * an error.
1102 		 */
1103 		free(kernel_image, M_TEMP);
1104 		kernel_image = NULL;
1105 		return (ENODEV);	/* Say operation not supported */
1106 	case 3:		/* done loading kernel symbol table */
1107 		c = *((u_long *)(kernel_image + kernel_load_ofs - 4));
1108 		if (c > 16 * (kernel_exec.a_syms / 12))
1109 			c = 16 * (kernel_exec.a_syms / 12);
1110 		kernel_load_endseg += c - 4;
1111 		kernel_symbol_esym = kernel_load_endseg;
1112 #ifdef notyet
1113 		kernel_image_copy = kernel_image;
1114 		kernel_image = malloc(kernel_load_ofs + c
1115 		    + kernel_image_magic_size(), M_TEMP, M_WAITOK);
1116 		if (kernel_image == NULL)
1117 			panic("kernel_reload failed second malloc");
1118 		for (c = 0; c < kernel_load_ofs; c += MAXPHYS)
1119 			memcpy(kernel_image + c, kernel_image_copy + c,
1120 			    (kernel_load_ofs - c) > MAXPHYS ? MAXPHYS :
1121 			    kernel_load_ofs - c);
1122 #endif
1123 		kernel_load_phase = 2;
1124 	}
1125 	return(0);
1126 }
1127 #endif
1128