xref: /netbsd/sys/arch/amiga/amiga/amiga_init.c (revision c4a72b64)
1 /*	$NetBSD: amiga_init.c,v 1.80 2002/09/02 12:54:40 aymeric Exp $	*/
2 
3 /*
4  * Copyright (c) 1994 Michael L. Hitch
5  * Copyright (c) 1993 Markus Wild
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Markus Wild.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "opt_amigaccgrf.h"
35 #include "opt_p5ppc68kboard.h"
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: amiga_init.c,v 1.80 2002/09/02 12:54:40 aymeric Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <uvm/uvm_extern.h>
44 #include <sys/user.h>
45 #include <sys/ioctl.h>
46 #include <sys/select.h>
47 #include <sys/tty.h>
48 #include <sys/proc.h>
49 #include <sys/buf.h>
50 #include <sys/msgbuf.h>
51 #include <sys/mbuf.h>
52 #include <sys/protosw.h>
53 #include <sys/domain.h>
54 #include <sys/dkbad.h>
55 #include <sys/reboot.h>
56 #include <sys/exec.h>
57 #include <machine/pte.h>
58 #include <machine/cpu.h>
59 #include <amiga/amiga/cc.h>
60 #include <amiga/amiga/cia.h>
61 #include <amiga/amiga/custom.h>
62 #include <amiga/amiga/cfdev.h>
63 #include <amiga/amiga/drcustom.h>
64 #include <amiga/amiga/gayle.h>
65 #include <amiga/amiga/memlist.h>
66 #include <amiga/dev/zbusvar.h>
67 
68 #define RELOC(v, t)	*((t*)((u_int)&(v) + loadbase))
69 
70 extern u_int	lowram;
71 extern u_int	Sysptmap, Sysptsize, Sysseg, Umap, proc0paddr;
72 extern u_int	Sysseg_pa;
73 extern u_int	virtual_avail;
74 #if defined(M68040) || defined(M68060)
75 extern int	protostfree;
76 #endif
77 extern u_long boot_partition;
78 vaddr_t		amiga_uptbase;
79 #ifdef P5PPC68KBOARD
80 extern int	p5ppc;
81 #endif
82 
83 extern char *esym;
84 
85 #ifdef GRF_AGA
86 extern u_long aga_enable;
87 #endif
88 
89 extern u_long noncontig_enable;
90 
91 /*
92  * some addresses used in locore
93  */
94 vaddr_t INTREQRaddr;
95 vaddr_t INTREQWaddr;
96 
97 /*
98  * these are used by the extended spl?() macros.
99  */
100 volatile unsigned short *amiga_intena_read, *amiga_intena_write;
101 
102 /*
103  * the number of pages in our hw mapping and the start address
104  */
105 vaddr_t amigahwaddr;
106 u_int namigahwpg;
107 
108 vaddr_t amigashdwaddr;
109 u_int namigashdwpg;
110 
111 vaddr_t CHIPMEMADDR;
112 vaddr_t chipmem_start;
113 vaddr_t chipmem_end;
114 
115 vaddr_t z2mem_start;		/* XXX */
116 static vaddr_t z2mem_end;		/* XXX */
117 int use_z2_mem = 1;			/* XXX */
118 
119 u_long boot_fphystart, boot_fphysize, boot_cphysize;
120 
121 static u_long boot_flags;
122 
123 struct boot_memlist *memlist;
124 
125 struct cfdev *cfdev;
126 int ncfdev;
127 
128 u_long scsi_nosync;
129 int shift_nosync;
130 
131 void  start_c(int, u_int, u_int, u_int, char *, u_int, u_long, u_long);
132 void rollcolor(int);
133 static int kernel_image_magic_size(void);
134 static void kernel_image_magic_copy(u_char *);
135 int kernel_reload_write(struct uio *);
136 extern void kernel_reload(char *, u_long, u_long, u_long, u_long,
137 	u_long, u_long, u_long, u_long, u_long, u_long);
138 extern void etext(void);
139 void start_c_cleanup(void);
140 
141 void *
142 chipmem_steal(long amount)
143 {
144 	/*
145 	 * steal from top of chipmem, so we don't collide with
146 	 * the kernel loaded into chipmem in the not-yet-mapped state.
147 	 */
148 	vaddr_t p = chipmem_end - amount;
149 	if (p & 1)
150 		p = p - 1;
151 	chipmem_end = p;
152 	if(chipmem_start > chipmem_end)
153 		panic("not enough chip memory");
154 	return((void *)p);
155 }
156 
157 /*
158  * XXX
159  * used by certain drivers currently to allocate zorro II memory
160  * for bounce buffers, if use_z2_mem is NULL, chipmem will be
161  * returned instead.
162  * XXX
163  */
164 void *
165 alloc_z2mem(amount)
166 	long amount;
167 {
168 	if (use_z2_mem && z2mem_end && (z2mem_end - amount) >= z2mem_start) {
169 		z2mem_end -= amount;
170 		return ((void *)z2mem_end);
171 	}
172 	return (alloc_chipmem(amount));
173 }
174 
175 
176 /*
177  * this is the C-level entry function, it's called from locore.s.
178  * Preconditions:
179  *	Interrupts are disabled
180  *	PA may not be == VA, so we may have to relocate addresses
181  *		before enabling the MMU
182  * 	Exec is no longer available (because we're loaded all over
183  *		low memory, no ExecBase is available anymore)
184  *
185  * It's purpose is:
186  *	Do the things that are done in locore.s in the hp300 version,
187  *		this includes allocation of kernel maps and enabling the MMU.
188  *
189  * Some of the code in here is `stolen' from Amiga MACH, and was
190  * written by Bryan Ford and Niklas Hallqvist.
191  *
192  * Very crude 68040 support by Michael L. Hitch.
193  *
194  */
195 
196 int kernel_copyback = 1;
197 
198 void
199 start_c(id, fphystart, fphysize, cphysize, esym_addr, flags, inh_sync,
200 								boot_part)
201 	int id;
202 	u_int fphystart, fphysize, cphysize;
203 	char *esym_addr;
204 	u_int flags;
205 	u_long inh_sync;
206 	u_long boot_part;
207 {
208 	extern char end[];
209 	extern u_int protorp[2];
210 	struct cfdev *cd;
211 	u_int pstart, pend, vstart, vend, avail;
212 	u_int pt, ptpa, ptsize, ptextra, kstsize;
213 	u_int Sysptmap_pa;
214 	register st_entry_t sg_proto, *sg, *esg;
215 	register pt_entry_t pg_proto, *pg;
216 	u_int tc, end_loaded, ncd, i;
217 	struct boot_memlist *ml;
218 	u_int loadbase = 0;	/* XXXXXXXXXXXXXXXXXXXXXXXXXXXX */
219 	u_int *shadow_pt = 0;	/* XXXXXXXXXXXXXXXXXXXXXXXXXXXX */
220 #ifdef	P5PPC68KBOARD
221         struct cfdev *cdp, *ecdp;
222 #endif
223 
224 #ifdef DEBUG_KERNEL_START
225 	/* XXX this only is valid if Altais is in slot 0 */
226 	volatile u_int8_t *altaiscolpt = (u_int8_t *)0x200003c8;
227 	volatile u_int8_t *altaiscol = (u_int8_t *)0x200003c9;
228 #endif
229 
230 	if ((u_int)&loadbase > cphysize)
231 		loadbase = fphystart;
232 
233 #ifdef DEBUG_KERNEL_START
234 	if ((id>>24)==0x7D) {
235 		*altaiscolpt = 0;
236 		*altaiscol = 40;
237 		*altaiscol = 0;
238 		*altaiscol = 0;
239 	} else
240 ((volatile struct Custom *)0xdff000)->color[0] = 0xa00;		/* RED */
241 #endif
242 
243 #ifdef LIMITMEM
244 	if (fphysize > LIMITMEM*1024*1024)
245 		fphysize = LIMITMEM*1024*1024;
246 #endif
247 
248 	RELOC(boot_fphystart, u_long) = fphystart;
249 	RELOC(boot_fphysize, u_long) = fphysize;
250 	RELOC(boot_cphysize, u_long) = cphysize;
251 
252 	RELOC(machineid, int) = id;
253 	RELOC(chipmem_end, vaddr_t) = cphysize;
254 	RELOC(esym, char *) = esym_addr;
255 	RELOC(boot_flags, u_long) = flags;
256 	RELOC(boot_partition, u_long) = boot_part;
257 #ifdef GRF_AGA
258 	if (flags & 1)
259 		RELOC(aga_enable, u_long) |= 1;
260 #endif
261 	if (flags & (3 << 1))
262 		RELOC(noncontig_enable, u_long) = (flags >> 1) & 3;
263 
264 	RELOC(scsi_nosync, u_long) = inh_sync;
265 
266 	/*
267 	 * the kernel ends at end(), plus the cfdev and memlist structures
268 	 * we placed there in the loader.  Correct for this now.  Also,
269 	 * account for kernel symbols if they are present.
270 	 */
271 	if (esym_addr == NULL)
272 		end_loaded = (u_int) &end;
273 	else
274 		end_loaded = (u_int) esym_addr;
275 	RELOC(ncfdev, int) = *(int *)(&RELOC(*(u_int *)end_loaded, u_int));
276 	RELOC(cfdev, struct cfdev *) = (struct cfdev *) ((int)end_loaded + 4);
277 	end_loaded += 4 + RELOC(ncfdev, int) * sizeof(struct cfdev);
278 
279 	RELOC(memlist, struct boot_memlist *) =
280 	    (struct boot_memlist *)end_loaded;
281 	ml = &RELOC(*(struct boot_memlist *)end_loaded, struct boot_memlist);
282 	end_loaded = (u_int) &((RELOC(memlist, struct boot_memlist *))->
283 	    m_seg[ml->m_nseg]);
284 
285 	/*
286 	 * Get ZorroII (16-bit) memory if there is any and it's not where the
287 	 * kernel is loaded.
288 	 */
289 	if (ml->m_nseg > 0 && ml->m_nseg < 16 && RELOC(use_z2_mem, int)) {
290 		struct boot_memseg *sp, *esp;
291 
292 		sp = ml->m_seg;
293 		esp = sp + ml->m_nseg;
294 		for (; sp < esp; sp++) {
295 			if ((sp->ms_attrib & (MEMF_FAST | MEMF_24BITDMA))
296 			    != (MEMF_FAST|MEMF_24BITDMA))
297 				continue;
298 			if (sp->ms_start == fphystart)
299 				continue;
300 			RELOC(z2mem_end, paddr_t) =
301 			    sp->ms_start + sp->ms_size;
302 			RELOC(z2mem_start, paddr_t) =
303 			    RELOC(z2mem_end, paddr_t) - MAXPHYS *
304 			    RELOC(use_z2_mem, int) * 7;
305 			RELOC(NZTWOMEMPG, u_int) =
306 			    (RELOC(z2mem_end, paddr_t) -
307 			    RELOC(z2mem_start, paddr_t)) / NBPG;
308 			if ((RELOC(z2mem_end, paddr_t) -
309 			    RELOC(z2mem_start, paddr_t)) > sp->ms_size) {
310 				RELOC(NZTWOMEMPG, u_int) = sp->ms_size / NBPG;
311 				RELOC(z2mem_start, paddr_t) =
312 				    RELOC(z2mem_end, paddr_t) - sp->ms_size;
313 			}
314 			break;
315 		}
316 	}
317 
318 	/*
319 	 * Scan ConfigDev list and get size of Zorro I/O boards that are
320 	 * outside the Zorro II I/O area.
321 	 */
322 	for (RELOC(ZBUSAVAIL, u_int) = 0, cd =
323 	    &RELOC(*RELOC(cfdev, struct cfdev *),struct cfdev),
324 	    ncd = RELOC(ncfdev, int); ncd > 0; ncd--, cd++) {
325 		int bd_type = cd->rom.type & (ERT_TYPEMASK | ERTF_MEMLIST);
326 
327 		if (bd_type != ERT_ZORROIII &&
328 		    (bd_type != ERT_ZORROII || isztwopa(cd->addr)))
329 			continue;	/* It's not Z2 or Z3 I/O board */
330 		/*
331 		 *  Hack to adjust board size for Zorro III boards that
332 		 *  do not specify an extended size or subsize.  This is
333 		 *  specifically for the GVP Spectrum and hopefully won't
334 		 *  break with other boards that configure like this.
335 		 */
336 		if (bd_type == ERT_ZORROIII &&
337 		    !(cd->rom.flags & ERFF_EXTENDED) &&
338 		    (cd->rom.flags & ERT_Z3_SSMASK) == 0)
339 			cd->size = 0x10000 <<
340 			    ((cd->rom.type - 1) & ERT_MEMMASK);
341 		RELOC(ZBUSAVAIL, u_int) += m68k_round_page(cd->size);
342 	}
343 
344 	/*
345 	 * assume KVA_MIN == 0.  We subtract the kernel code (and
346 	 * the configdev's and memlists) from the virtual and
347 	 * phsical starts and ends.
348 	 */
349 	vend   = fphysize;
350 	avail  = vend;
351 	vstart = (u_int) end_loaded;
352 	vstart = m68k_round_page (vstart);
353 	pstart = vstart + fphystart;
354 	pend   = vend   + fphystart;
355 	avail -= vstart;
356 
357 #if defined(M68040) || defined(M68060)
358 	if (RELOC(mmutype, int) == MMU_68040)
359 		kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
360 	else
361 #endif
362 		kstsize = 1;
363 
364 	/*
365 	 * allocate the kernel segment table
366 	 */
367 	RELOC(Sysseg_pa, u_int) = pstart;
368 	RELOC(Sysseg, u_int) = vstart;
369 	vstart += NBPG * kstsize;
370 	pstart += NBPG * kstsize;
371 	avail -= NBPG * kstsize;
372 
373 	/*
374 	 * allocate initial page table pages
375 	 */
376 	pt = vstart;
377 	ptpa = pstart;
378 #ifdef DRACO
379 	if ((id>>24)==0x7D) {
380 		ptextra = NDRCCPG
381 		    + RELOC(NZTWOMEMPG, u_int)
382 		    + btoc(RELOC(ZBUSAVAIL, u_int));
383 	} else
384 #endif
385 	ptextra = NCHIPMEMPG + NCIAPG + NZTWOROMPG + RELOC(NZTWOMEMPG, u_int) +
386 	    btoc(RELOC(ZBUSAVAIL, u_int)) + NPCMCIAPG;
387 	/*
388 	 * if kernel shadow mapping will overlap any initial mapping
389 	 * of Zorro I/O space or the page table map, we need to
390 	 * adjust things to remove the overlap.
391 	 */
392 	if (loadbase != 0) {
393 		/* What to do, what to do? */
394 	}
395 
396 	ptsize = (RELOC(Sysptsize, u_int) +
397 	    howmany(ptextra, NPTEPG)) << PGSHIFT;
398 
399 	vstart += ptsize;
400 	pstart += ptsize;
401 	avail -= ptsize;
402 
403 	/*
404 	 * allocate kernel page table map
405 	 */
406 	RELOC(Sysptmap, u_int) = vstart;
407 	Sysptmap_pa = pstart;
408 	vstart += NBPG;
409 	pstart += NBPG;
410 	avail -= NBPG;
411 
412 	/*
413 	 * pt maps the first N megs of ram Sysptmap comes directly
414 	 * after pt (ptpa) and so it must map >= N meg + Its one
415 	 * page and so it must map 8M of space.  Specifically
416 	 * Sysptmap holds the pte's that map the kerne page tables.
417 	 *
418 	 * We want Sysmap to be the first address mapped by Sysptmap.
419 	 * this will be the address just above what pt,pt+ptsize maps.
420 	 * pt[0] maps address 0 so:
421 	 *
422 	 *		ptsize
423 	 * Sysmap  =	------ * NBPG
424 	 *		  4
425 	 */
426 	RELOC(Sysmap, u_int *) = (u_int *)(ptsize * (NBPG / 4));
427 
428 	/*
429 	 * initialize segment table and page table map
430 	 */
431 #if defined(M68040) || defined(M68060)
432 	if (RELOC(mmutype, int) == MMU_68040) {
433 		/*
434 		 * First invalidate the entire "segment table" pages
435 		 * (levels 1 and 2 have the same "invalid" values).
436 		 */
437 		sg = (u_int *)RELOC(Sysseg_pa, u_int);
438 		esg = &sg[kstsize * NPTEPG];
439 		while (sg < esg)
440 			*sg++ = SG_NV;
441 		/*
442 		 * Initialize level 2 descriptors (which immediately
443 		 * follow the level 1 table).  We need:
444 		 *	NPTEPG / SG4_LEV3SIZE
445 		 * level 2 descriptors to map each of the nptpages + 1
446 		 * pages of PTEs.  Note that we set the "used" bit
447 		 * now to save the HW the expense of doing it.
448 		 */
449 		i = ((ptsize >> PGSHIFT) + 1) * (NPTEPG / SG4_LEV3SIZE);
450 		sg = &((u_int *)(RELOC(Sysseg_pa, u_int)))[SG4_LEV1SIZE];
451 		if (loadbase != 0)
452 			/* start of next L2 table */
453 			shadow_pt = &sg[roundup(i, SG4_LEV2SIZE)];
454 		esg = &sg[i];
455 		sg_proto = ptpa | SG_U | SG_RW | SG_V;
456 		while (sg < esg) {
457 			*sg++ = sg_proto;
458 			sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t));
459 		}
460 		/*
461 		 * Initialize level 1 descriptors.  We need:
462 		 *	roundup(num, SG4_LEV2SIZE) / SG4_LEVEL2SIZE
463 		 * level 1 descriptors to map the 'num' level 2's.
464 		 */
465 		i = roundup(i, SG4_LEV2SIZE) / SG4_LEV2SIZE;
466 		RELOC(protostfree, u_int) =
467 		    (-1 << (i + 1)) /* & ~(-1 << MAXKL2SIZE) */;
468 		sg = (u_int *) RELOC(Sysseg_pa, u_int);
469 		esg = &sg[i];
470 		sg_proto = (u_int)&sg[SG4_LEV1SIZE] | SG_U | SG_RW |SG_V;
471 		while (sg < esg) {
472 			*sg++ = sg_proto;
473 			sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t));
474 		}
475 		if (loadbase != 0) {
476 			sg = (u_int *)RELOC(Sysseg_pa, u_int);
477 			if (sg[loadbase >> SG4_SHIFT1] == 0) {
478 				/* allocate another level 2 table */
479 				sg[loadbase >> SG4_SHIFT1] =
480 				    (u_int)shadow_pt | SG_U | SG_RW | SG_V;
481 				shadow_pt = NULL;
482 				RELOC(protostfree, u_int) =
483 				    RELOC(protostfree, u_int) << 1;
484 			}
485 			sg = (u_int *)(sg[loadbase >> SG4_SHIFT1] & SG4_ADDR1);
486 			if (sg[(loadbase & SG4_MASK2) >> SG4_SHIFT2] == 0) {
487 				/* no page table exists, need to allocate it */
488 				sg_proto = pstart | SG_U | SG_RW | SG_V;
489 				sg = &sg[(loadbase & SG4_MASK2) >> SG4_SHIFT2];
490 				sg = (u_int *)((int)sg &
491 					~(NBPG / SG4_LEV3SIZE - 1));
492 				esg = &sg[NPTEPG / SG4_LEV3SIZE];
493 				while (sg < esg) {
494 					*sg++ = sg_proto;
495 					sg_proto += SG4_LEV3SIZE *
496 						sizeof (st_entry_t);
497 				}
498 				pg = (u_int *) pstart;
499 				esg = (u_int *)&pg[NPTEPG];
500 				while (pg < esg)
501 					*pg++ = PG_NV;
502 				pstart += NBPG;
503 				vstart += NBPG;
504 				avail -= NBPG;
505 				/* ptmap??? */
506 			}
507 			sg = (u_int *)RELOC(Sysseg_pa, u_int);
508 			sg = (u_int *)(sg[loadbase >> SG4_SHIFT1] & SG4_ADDR1);
509 			shadow_pt =
510 			    ((u_int *)(sg[(loadbase & SG4_MASK2) >> SG4_SHIFT2]
511 				& SG4_ADDR1)) +
512 			    ((loadbase & SG4_MASK3) >> SG4_SHIFT3); /* XXX is */
513 
514 		}
515 		/*
516 		 * Initialize Sysptmap
517 		 */
518 		sg = (u_int *) Sysptmap_pa;
519 		esg = &sg[(ptsize >> PGSHIFT) + 1];
520 		pg_proto = ptpa | PG_RW | PG_CI | PG_V;
521 		while (sg < esg) {
522 			*sg++ = pg_proto;
523 			pg_proto += NBPG;
524 		}
525 		/*
526 		 * Invalidate rest of Sysptmap page
527 		 */
528 		esg = (u_int *)(Sysptmap_pa + NBPG);
529 		while (sg < esg)
530 			*sg++ = SG_NV;
531 	} else
532 #endif /* M68040 */
533 	{
534 		/*
535 		 * Map the page table pages in both the HW segment table
536 		 * and the software Sysptmap.  Note that Sysptmap is also
537 		 * considered a PT page, hence the +1.
538 		 */
539 		sg = (u_int *)RELOC(Sysseg_pa, u_int);
540 		pg = (u_int *)Sysptmap_pa;
541 		esg = &pg[(ptsize >> PGSHIFT) + 1];
542 		sg_proto = ptpa | SG_RW | SG_V;
543 		pg_proto = ptpa | PG_RW | PG_CI | PG_V;
544 		while (pg < esg) {
545 			*sg++ = sg_proto;
546 			*pg++ = pg_proto;
547 			sg_proto += NBPG;
548 			pg_proto += NBPG;
549 		}
550 		/*
551 		 * invalidate the remainder of each table
552 		 */
553 		esg = (u_int *)(Sysptmap_pa + NBPG);
554 		while (pg < esg) {
555 			*sg++ = SG_NV;
556 			*pg++ = PG_NV;
557 		}
558 
559 		if (loadbase != 0) {
560 			sg = (u_int *)RELOC(Sysseg_pa, u_int);
561 			if (sg[loadbase >> SG_ISHIFT] == 0) {
562 				/* no page table exists, need to allocate it */
563 				sg[loadbase >> SG_ISHIFT] =
564 				    pstart | SG_RW | SG_V;
565 				pg = (u_int *)Sysptmap_pa;
566 				pg[loadbase >> SG_ISHIFT] =
567 				    pstart | PG_RW | PG_CI | PG_V;
568 				pg = (u_int *) pstart;
569 				esg = (u_int *)&pg[NPTEPG];
570 				while (pg < esg)
571 					*pg++ = PG_NV;
572 				pstart += NBPG;
573 				vstart += NBPG;
574 				avail -= NBPG;
575 			}
576 			shadow_pt =
577 			    ((u_int *)(sg[loadbase >> SG_ISHIFT] & 0xffffff00))
578 			    + ((loadbase & SG_PMASK) >> SG_PSHIFT);
579 		}
580 	}
581 
582 	/*
583 	 * initialize kernel page table page(s) (assume load at VA 0)
584 	 */
585 	pg_proto = fphystart | PG_RO | PG_V;	/* text pages are RO */
586 	pg       = (u_int *) ptpa;
587 	*pg++ = PG_NV;				/* Make page 0 invalid */
588 	pg_proto += NBPG;
589 	for (i = NBPG; i < (u_int) etext; i += NBPG, pg_proto += NBPG)
590 		*pg++ = pg_proto;
591 
592 	/*
593 	 * data, bss and dynamic tables are read/write
594 	 */
595 	pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V;
596 
597 #if defined(M68040) || defined(M68060)
598 	/*
599 	 * map the kernel segment table cache invalidated for
600 	 * these machines (for the 68040 not strictly necessary, but
601 	 * recommended by Motorola; for the 68060 mandatory)
602 	 */
603 	if (RELOC(mmutype, int) == MMU_68040) {
604 
605 		if (RELOC(kernel_copyback, int))
606 			pg_proto |= PG_CCB;
607 
608 		/*
609 		 * ASSUME: segment table and statically allocated page tables
610 		 * of the kernel are contiguously allocated, start at
611 		 * Sysseg and end at the current value of vstart.
612 		 */
613 		for (; i<RELOC(Sysseg, u_int); i+= NBPG, pg_proto += NBPG)
614 			*pg++ = pg_proto;
615 
616 		pg_proto = (pg_proto & ~PG_CCB) | PG_CI;
617 		for (; i < vstart; i += NBPG, pg_proto += NBPG)
618 			*pg++ = pg_proto;
619 
620 		pg_proto = (pg_proto & ~PG_CI);
621 		if (RELOC(kernel_copyback, int))
622 			pg_proto |= PG_CCB;
623 	}
624 #endif
625 	/*
626 	 * go till end of data allocated so far
627 	 * plus proc0 u-area (to be allocated)
628 	 */
629 	for (; i < vstart + USPACE; i += NBPG, pg_proto += NBPG)
630 		*pg++ = pg_proto;
631 	/*
632 	 * invalidate remainder of kernel PT
633 	 */
634 	while (pg < (u_int *) (ptpa + ptsize))
635 		*pg++ = PG_NV;
636 
637 	/*
638 	 * go back and validate internal IO PTEs
639 	 * at end of allocated PT space
640 	 */
641 	pg      -= ptextra;
642 #ifdef DRACO
643 	if ((id >> 24) == 0x7D) {
644 		pg_proto = DRCCBASE | PG_RW | PG_CI | PG_V;
645 		while (pg_proto < DRZ2BASE) {
646 			*pg++ = pg_proto;
647 			pg_proto += DRCCSTRIDE;
648 		}
649 
650 		/* NCR 53C710 chip */
651 		*pg++ = DRSCSIBASE | PG_RW | PG_CI | PG_V;
652 
653 #ifdef DEBUG_KERNEL_START
654 		/*
655 		 * early rollcolor Altais mapping
656 		 * XXX (only works if in slot 0)
657 		 */
658 		*pg++ = 0x20000000 | PG_RW | PG_CI | PG_V;
659 #endif
660 	} else
661 #endif
662 	{
663 		pg_proto = CHIPMEMBASE | PG_RW | PG_CI | PG_V;
664 						/* CI needed here?? */
665 		while (pg_proto < CHIPMEMTOP) {
666 			*pg++     = pg_proto;
667 			pg_proto += NBPG;
668 		}
669 	}
670 	if (RELOC(z2mem_end, paddr_t)) {			/* XXX */
671 		pg_proto = RELOC(z2mem_start, paddr_t) |	/* XXX */
672 		    PG_RW | PG_V;				/* XXX */
673 		while (pg_proto < RELOC(z2mem_end, paddr_t)) { /* XXX */
674 			*pg++ = pg_proto;			/* XXX */
675 			pg_proto += NBPG;			/* XXX */
676 		}						/* XXX */
677 	}							/* XXX */
678 #ifdef DRACO
679 	if ((id >> 24) != 0x7D)
680 #endif
681 	{
682 		pg_proto = CIABASE | PG_RW | PG_CI | PG_V;
683 		while (pg_proto < CIATOP) {
684 			*pg++     = pg_proto;
685 			pg_proto += NBPG;
686 		}
687 		pg_proto  = ZTWOROMBASE | PG_RW | PG_CI | PG_V;
688 		while (pg_proto < ZTWOROMTOP) {
689 			*pg++     = pg_proto;
690 			pg_proto += NBPG;
691 		}
692 	}
693 
694 	/*
695 	 * Initial any "shadow" mapping of the kernel
696 	 */
697 	if (loadbase != 0 && shadow_pt != 0) {
698 		RELOC(amigashdwaddr, vaddr_t) = (u_int)shadow_pt - loadbase;
699 		RELOC(namigashdwpg, u_int) = (vstart + USPACE) >> PGSHIFT;
700 		pg_proto = fphystart | PG_RO | PG_V;
701 		pg = shadow_pt;
702 		*pg++ = PG_NV;			/* Make page 0 invalid */
703 		pg_proto += NBPG;
704 		for (i = NBPG; i < (u_int)etext; i += NBPG, pg_proto += NBPG)
705 			*pg++ = pg_proto;
706 		pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V;
707 		for (; i < vstart + USPACE; i += NBPG, pg_proto += NBPG)
708 			*pg++ = pg_proto;
709 	}
710 
711 	/*
712 	 *[ following page tables MAY be allocated to ZORRO3 space,
713 	 * but they're then later mapped in autoconf.c ]
714 	 */
715 
716 	/* zero out proc0 user area */
717 /*	bzero ((u_char *)pstart, USPACE);*/	/* XXXXXXXXXXXXXXXXXXXXX */
718 
719 	/*
720 	 * save KVA of proc0 u-area and allocate it.
721 	 */
722 	RELOC(proc0paddr, u_int) = vstart;
723 	pstart += USPACE;
724 	vstart += USPACE;
725 	avail -= USPACE;
726 
727 	/*
728 	 * init mem sizes
729 	 */
730 	RELOC(maxmem, u_int)  = pend >> PGSHIFT;
731 	RELOC(lowram, u_int)  = fphystart;
732 	RELOC(physmem, u_int) = fphysize >> PGSHIFT;
733 
734 	/*
735 	 * Put user page tables starting at next 16MB boundary, to make kernel
736 	 * dumps more readable, with guaranteed 16MB of.
737 	 * XXX depends on Sysmap being last.
738 	 * XXX 16 MB instead of 256 MB should be enough, but...
739 	 * we need to fix the fastmem loading first. (see comment at line 375)
740 	 */
741 	RELOC(amiga_uptbase, vaddr_t) =
742 	    roundup(RELOC(Sysmap, u_int) + 0x10000000, 0x10000000);
743 
744 	/*
745 	 * get the pmap module in sync with reality.
746 	 */
747 /*	pmap_bootstrap(pstart, fphystart);*/	/* XXXXXXXXXXXXXXXXXXXXXXx*/
748 
749 	/*
750 	 * record base KVA of IO spaces which are just before Sysmap
751 	 */
752 #ifdef DRACO
753 	if ((id >> 24) == 0x7D) {
754 		RELOC(DRCCADDR, u_int) =
755 		    (u_int)RELOC(Sysmap, u_int) - ptextra * NBPG;
756 
757 		RELOC(CIAADDR, vaddr_t) =
758 		    RELOC(DRCCADDR, u_int) + DRCIAPG * NBPG;
759 
760 		if (RELOC(z2mem_end, vaddr_t)) {		/* XXX */
761 			RELOC(ZTWOMEMADDR, vaddr_t) =
762 			    RELOC(DRCCADDR, u_int) + NDRCCPG * NBPG;
763 
764 			RELOC(ZBUSADDR, vaddr_t) =
765 			    RELOC(ZTWOMEMADDR, vaddr_t) +
766 			    RELOC(NZTWOMEMPG, u_int)*NBPG;
767 		} else {
768 			RELOC(ZBUSADDR, vaddr_t) =
769 			    RELOC(DRCCADDR, u_int) + NDRCCPG * NBPG;
770 		}
771 
772 		/*
773 		 * some nice variables for pmap to use
774 		 */
775 		RELOC(amigahwaddr, vaddr_t) = RELOC(DRCCADDR, u_int);
776 	} else
777 #endif
778 	{
779 		RELOC(CHIPMEMADDR, vaddr_t) =
780 		    (u_int)RELOC(Sysmap, u_int) - ptextra * NBPG;
781 		if (RELOC(z2mem_end, u_int) == 0)
782 			RELOC(CIAADDR, vaddr_t) =
783 			    RELOC(CHIPMEMADDR, vaddr_t) + NCHIPMEMPG * NBPG;
784 		else {
785 			RELOC(ZTWOMEMADDR, vaddr_t) =
786 			    RELOC(CHIPMEMADDR, vaddr_t) + NCHIPMEMPG * NBPG;
787 			RELOC(CIAADDR, vaddr_t) = RELOC(ZTWOMEMADDR, vaddr_t) +
788 				RELOC(NZTWOMEMPG, u_int) * NBPG;
789 		}
790 		RELOC(ZTWOROMADDR, vaddr_t)  =
791 		    RELOC(CIAADDR, vaddr_t) + NCIAPG * NBPG;
792 		RELOC(ZBUSADDR, vaddr_t) =
793 		    RELOC(ZTWOROMADDR, vaddr_t) + NZTWOROMPG * NBPG;
794 		/* not on 8k boundary :-( */
795 		RELOC(CIAADDR, vaddr_t) += NBPG/2;
796 		RELOC(CUSTOMADDR, vaddr_t)  =
797 		    RELOC(ZTWOROMADDR, vaddr_t) - ZTWOROMBASE + CUSTOMBASE;
798 		/*
799 		 * some nice variables for pmap to use
800 		 */
801 		RELOC(amigahwaddr, vaddr_t) = RELOC(CHIPMEMADDR, vaddr_t);
802 	}
803 
804 	/* Set number of pages to reserve for mapping Amiga hardware pages */
805 	RELOC(namigahwpg, u_int) = ptextra;
806 
807 	/*
808 	 * set this before copying the kernel, so the variable is updated in
809 	 * the `real' place too. protorp[0] is already preset to the
810 	 * CRP setting.
811 	 */
812 	RELOC(protorp[1], u_int) = RELOC(Sysseg_pa, u_int);
813 
814 	/*
815 	 * copy over the kernel (and all now initialized variables)
816 	 * to fastram.  DONT use bcopy(), this beast is much larger
817 	 * than 128k !
818 	 */
819 	if (loadbase == 0) {
820 		register u_int *lp, *le, *fp;
821 
822 		lp = 0;
823 		le = (u_int *)end_loaded;
824 		fp = (u_int *)fphystart;
825 		while (lp < le)
826 			*fp++ = *lp++;
827 	}
828 
829 #ifdef DEBUG_KERNEL_START
830 	if ((id>>24)==0x7D) {
831 		*altaiscolpt = 0;
832 		*altaiscol = 40;
833 		*altaiscol = 40;
834 		*altaiscol = 0;
835 	} else
836 ((volatile struct Custom *)0xdff000)->color[0] = 0xAA0;		/* YELLOW */
837 #endif
838 	/*
839 	 * prepare to enable the MMU
840 	 */
841 #if defined(M68040) || defined(M68060)
842 	if (RELOC(mmutype, int) == MMU_68040) {
843 		/*
844 		 * movel Sysseg_pa,a0;
845 		 * movec a0,SRP;
846 		 * pflusha;
847 		 * movel #$0xc000,d0;
848 		 * movec d0,TC
849 		 */
850 
851 		if (id & AMIGA_68060) {
852 			/* do i need to clear the branch cache? */
853 			asm volatile (	".word 0x4e7a,0x0002;"
854 					"orl #0x400000,%%d0;"
855 					".word 0x4e7b,0x0002" : : : "d0");
856 		}
857 
858 		asm volatile ("movel %0,%%a0; .word 0x4e7b,0x8807"
859 		    : : "a" (RELOC(Sysseg_pa, u_int)) : "a0");
860 		asm volatile (".word 0xf518" : : );
861 
862 #ifdef DEBUG_KERNEL_START
863 		if ((id>>24)==0x7D) {
864 			*altaiscolpt = 0;
865 			*altaiscol = 40;
866 			*altaiscol = 33;
867 			*altaiscol = 0;
868 		} else
869 ((volatile struct Custom *)0xdff000)->color[0] = 0xA70;		/* ORANGE */
870 #endif
871 
872 		asm volatile ("movel #0xc000,%%d0; .word 0x4e7b,0x0003"
873 		    : : :"d0" );
874 	} else
875 #endif
876 	{
877 
878 		/*
879 		 * setup and load SRP
880 		 * nolimit, share global, 4 byte PTE's
881 		 */
882 		(RELOC(protorp[0], u_int)) = 0x80000202;
883 		asm volatile ("pmove %0@,%%srp":: "a" (&RELOC(protorp, u_int)));
884 		/*
885 		 * setup and load TC register.
886 		 * enable_cpr, enable_srp, pagesize=8k,
887 		 * A = 8 bits, B = 11 bits
888 		 */
889 		tc = 0x82d08b00;
890 		asm volatile ("pmove %0@,%%tc" : : "a" (&tc));
891 	}
892 #ifdef DEBUG_KERNEL_START
893 #ifdef DRACO
894 	if ((id >> 24) == 0x7D) { /* mapping on, is_draco() is valid */
895 		int i;
896 		/* XXX experimental Altais register mapping only */
897 		altaiscolpt = (volatile u_int8_t *)(DRCCADDR+NBPG*9+0x3c8);
898 		altaiscol = altaiscolpt + 1;
899 		for (i=0; i<140000; i++) {
900 			*altaiscolpt = 0;
901 			*altaiscol = 0;
902 			*altaiscol = 40;
903 			*altaiscol = 0;
904 		}
905 	} else
906 #endif
907 ((volatile struct Custom *)CUSTOMADDR)->color[0] = 0x0a0;	/* GREEN */
908 #endif
909 
910 	bzero ((u_char *)proc0paddr, USPACE);	/* XXXXXXXXXXXXXXXXXXXXX */
911 	pmap_bootstrap(pstart, fphystart);	/* XXXXXXXXXXXXXXXXXXXXXXx*/
912 
913 	/*
914 	 * to make life easier in locore.s, set these addresses explicitly
915 	 */
916 	CIAAbase = CIAADDR + 0x1001;	/* CIA-A at odd addresses ! */
917 	CIABbase = CIAADDR;
918 	CUSTOMbase = CUSTOMADDR;
919 #ifdef DRACO
920 	if (is_draco()) {
921 		draco_intena = (volatile u_int8_t *)DRCCADDR+1;
922 		draco_intpen = draco_intena + NBPG;
923 		draco_intfrc = draco_intpen + NBPG;
924 		draco_misc = draco_intfrc + NBPG;
925 		draco_ioct = (struct drioct *)(DRCCADDR + DRIOCTLPG*NBPG);
926 	} else
927 #endif
928 	{
929 		INTREQRaddr = (vaddr_t)&custom.intreqr;
930 		INTREQWaddr = (vaddr_t)&custom.intreq;
931 	}
932 	/*
933 	 * Get our chip memory allocation system working
934 	 */
935 	chipmem_start += CHIPMEMADDR;
936 	chipmem_end   += CHIPMEMADDR;
937 
938 	/* XXX is: this MUST NOT BE DONE before the pmap_bootstrap() call */
939 	if (z2mem_end) {
940 		z2mem_end = ZTWOMEMADDR + NZTWOMEMPG * NBPG;
941 		z2mem_start = ZTWOMEMADDR;
942 	}
943 
944 	i = *(int *)proc0paddr;
945 	*(volatile int *)proc0paddr = i;
946 
947 	/*
948 	 * disable all interupts but enable allow them to be enabled
949 	 * by specific driver code (global int enable bit)
950 	 */
951 #ifdef DRACO
952 	if (is_draco()) {
953 		/* XXX to be done. For now, just: */
954 		*draco_intena = 0;
955 		*draco_intpen = 0;
956 		*draco_intfrc = 0;
957 		ciaa.icr = 0x7f;			/* and keyboard */
958 		ciab.icr = 0x7f;			/* and again */
959 
960 		draco_ioct->io_control &=
961 		    ~(DRCNTRL_KBDINTENA|DRCNTRL_FDCINTENA); /* and another */
962 
963 		draco_ioct->io_status2 &=
964 		    ~(DRSTAT2_PARIRQENA|DRSTAT2_TMRINTENA); /* some more */
965 
966 		*(volatile u_int8_t *)(DRCCADDR + 1 +
967 		    DRSUPIOPG*NBPG + 4*(0x3F8 + 1)) = 0; /* and com0 */
968 
969 		*(volatile u_int8_t *)(DRCCADDR + 1 +
970 		    DRSUPIOPG*NBPG + 4*(0x2F8 + 1)) = 0; /* and com1 */
971 
972 		draco_ioct->io_control |= DRCNTRL_WDOGDIS; /* stop Fido */
973 		*draco_misc &= ~1/*DRMISC_FASTZ2*/;
974 
975 	} else
976 #endif
977 	{
978 		custom.intena = 0x7fff;			/* disable ints */
979 		custom.intena = INTF_SETCLR | INTF_INTEN;
980 							/* but allow them */
981 		custom.intreq = 0x7fff;			/* clear any current */
982 		ciaa.icr = 0x7f;			/* and keyboard */
983 		ciab.icr = 0x7f;			/* and again */
984 
985 		/*
986 		 * remember address of read and write intena register for use
987 		 * by extended spl?() macros.
988 		 */
989 		amiga_intena_read  = &custom.intenar;
990 		amiga_intena_write = &custom.intena;
991 	}
992 
993 	/*
994 	 * This is needed for 3000's with superkick ROM's. Bit 7 of
995 	 * 0xde0002 enables the ROM if set. If this isn't set the machine
996 	 * has to be powercycled in order for it to boot again. ICKA! RFH
997 	 */
998 	if (is_a3000()) {
999 		volatile unsigned char *a3000_magic_reset;
1000 
1001 		a3000_magic_reset = (unsigned char *)ztwomap(0xde0002);
1002 
1003 		/* Turn SuperKick ROM (V36) back on */
1004 		*a3000_magic_reset |= 0x80;
1005 	}
1006 
1007 #ifdef	P5PPC68KBOARD
1008 	/*
1009 	 * Are we an P5 PPC/68K board? install different reset
1010 	 * routine.
1011 	 */
1012 
1013         for (cdp = cfdev, ecdp = &cfdev[ncfdev]; cdp < ecdp; cdp++) {
1014 		if (cdp->rom.manid == 8512 &&
1015 		    (cdp->rom.prodid == 100 || cdp->rom.prodid == 110)) {
1016 		    		p5ppc = 1;
1017 				break;
1018 			}
1019         }
1020 #endif
1021 }
1022 
1023 void
1024 start_c_cleanup()
1025 {
1026 	u_int *sg, *esg;
1027 	extern u_int32_t delaydivisor;
1028 
1029 	/*
1030 	 * remove shadow mapping of kernel?
1031 	 */
1032 	if (amigashdwaddr == 0)
1033 		return;
1034 	sg = (u_int *) amigashdwaddr;
1035 	esg = (u_int *)&sg[namigashdwpg];
1036 	while (sg < esg)
1037 		*sg++ = PG_NV;
1038 
1039 	/*
1040 	 * preliminary delay divisor value
1041 	 */
1042 
1043 	if (machineid & AMIGA_68060)
1044 		delaydivisor = (1024 * 1) / 80;	/* 80 MHz 68060 w. BTC */
1045 
1046 	else if (machineid & AMIGA_68040)
1047 		delaydivisor = (1024 * 3) / 40;	/* 40 MHz 68040 */
1048 
1049 	else if (machineid & AMIGA_68030)
1050 		delaydivisor = (1024 * 8) / 50;	/* 50 MHz 68030 */
1051 
1052 	else
1053 		delaydivisor = (1024 * 8) / 33; /* 33 MHz 68020 */
1054 }
1055 
1056 void
1057 rollcolor(color)
1058 	int color;
1059 {
1060 	int s, i;
1061 
1062 	s = splhigh();
1063 	/*
1064 	 * need to adjust count -
1065 	 * too slow when cache off, too fast when cache on
1066 	 */
1067 	for (i = 0; i < 400000; i++)
1068 		((volatile struct Custom *)CUSTOMbase)->color[0] = color;
1069 	splx(s);
1070 }
1071 
1072 /*
1073  * Kernel reloading code
1074  */
1075 
1076 static struct exec kernel_exec;
1077 static u_char *kernel_image;
1078 static u_long kernel_text_size, kernel_load_ofs;
1079 static u_long kernel_load_phase;
1080 static u_long kernel_load_endseg;
1081 static u_long kernel_symbol_size, kernel_symbol_esym;
1082 
1083 /* This supports the /dev/reload device, major 2, minor 20,
1084    hooked into mem.c.  Author: Bryan Ford.  */
1085 
1086 /*
1087  * This is called below to find out how much magic storage
1088  * will be needed after a kernel image to be reloaded.
1089  */
1090 static int
1091 kernel_image_magic_size()
1092 {
1093 	int sz;
1094 
1095 	/* 4 + cfdev's + Mem_Seg's + 4 */
1096 	sz = 8 + ncfdev * sizeof(struct cfdev)
1097 	    + memlist->m_nseg * sizeof(struct boot_memseg);
1098 	return(sz);
1099 }
1100 
1101 /* This actually copies the magic information.  */
1102 static void
1103 kernel_image_magic_copy(dest)
1104 	u_char *dest;
1105 {
1106 	*((int*)dest) = ncfdev;
1107 	dest += 4;
1108 	bcopy(cfdev, dest, ncfdev * sizeof(struct cfdev)
1109 	    + memlist->m_nseg * sizeof(struct boot_memseg) + 4);
1110 }
1111 
1112 #undef __LDPGSZ
1113 #define __LDPGSZ 8192 /* XXX ??? */
1114 
1115 int
1116 kernel_reload_write(uio)
1117 	struct uio *uio;
1118 {
1119 	extern int eclockfreq;
1120 	struct iovec *iov;
1121 	int error, c;
1122 
1123 	iov = uio->uio_iov;
1124 
1125 	if (kernel_image == 0) {
1126 		/*
1127 		 * We have to get at least the whole exec header
1128 		 * in the first write.
1129 		 */
1130 		if (iov->iov_len < sizeof(kernel_exec))
1131 			return ENOEXEC;		/* XXX */
1132 
1133 		/*
1134 		 * Pull in the exec header and check it.
1135 		 */
1136 		if ((error = uiomove((caddr_t)&kernel_exec, sizeof(kernel_exec),
1137 		     uio)) != 0)
1138 			return(error);
1139 		printf("loading kernel %ld+%ld+%ld+%ld\n", kernel_exec.a_text,
1140 		    kernel_exec.a_data, kernel_exec.a_bss,
1141 		    esym == NULL ? 0 : kernel_exec.a_syms);
1142 		/*
1143 		 * Looks good - allocate memory for a kernel image.
1144 		 */
1145 		kernel_text_size = (kernel_exec.a_text
1146 			+ __LDPGSZ - 1) & (-__LDPGSZ);
1147 		/*
1148 		 * Estimate space needed for symbol names, since we don't
1149 		 * know how big it really is.
1150 		 */
1151 		if (esym != NULL) {
1152 			kernel_symbol_size = kernel_exec.a_syms;
1153 			kernel_symbol_size += 16 * (kernel_symbol_size / 12);
1154 		}
1155 		/*
1156 		 * XXX - should check that image will fit in CHIP memory
1157 		 * XXX return an error if it doesn't
1158 		 */
1159 		if ((kernel_text_size + kernel_exec.a_data +
1160 		    kernel_exec.a_bss + kernel_symbol_size +
1161 		    kernel_image_magic_size()) > boot_cphysize)
1162 			return (EFBIG);
1163 		kernel_image = malloc(kernel_text_size + kernel_exec.a_data
1164 			+ kernel_exec.a_bss
1165 			+ kernel_symbol_size
1166 			+ kernel_image_magic_size(),
1167 			M_TEMP, M_WAITOK);
1168 		kernel_load_ofs = 0;
1169 		kernel_load_phase = 0;
1170 		kernel_load_endseg = kernel_exec.a_text;
1171 		return(0);
1172 	}
1173 	/*
1174 	 * Continue loading in the kernel image.
1175 	 */
1176 	c = min(iov->iov_len, kernel_load_endseg - kernel_load_ofs);
1177 	c = min(c, MAXPHYS);
1178 	if ((error = uiomove(kernel_image + kernel_load_ofs, (int)c, uio)) != 0)
1179 		return(error);
1180 	kernel_load_ofs += c;
1181 
1182 	/*
1183 	 * Fun and games to handle loading symbols - the length of the
1184 	 * string table isn't know until after the symbol table has
1185 	 * been loaded.  We have to load the kernel text, data, and
1186 	 * the symbol table, then get the size of the strings.  A
1187 	 * new kernel image is then allocated and the data currently
1188 	 * loaded moved to the new image.  Then continue reading the
1189 	 * string table.  This has problems if there isn't enough
1190 	 * room to allocate space for the two copies of the kernel
1191 	 * image.  So the approach I took is to guess at the size
1192 	 * of the symbol strings.  If the guess is wrong, the symbol
1193 	 * table is ignored.
1194 	 */
1195 
1196 	if (kernel_load_ofs != kernel_load_endseg)
1197 		return(0);
1198 
1199 	switch (kernel_load_phase) {
1200 	case 0:		/* done loading kernel text */
1201 		kernel_load_ofs = kernel_text_size;
1202 		kernel_load_endseg = kernel_load_ofs + kernel_exec.a_data;
1203 		kernel_load_phase = 1;
1204 		break;
1205 	case 1:		/* done loading kernel data */
1206 		for(c = 0; c < kernel_exec.a_bss; c++)
1207 			kernel_image[kernel_load_ofs + c] = 0;
1208 		kernel_load_ofs += kernel_exec.a_bss;
1209 		if (esym) {
1210 			kernel_load_endseg = kernel_load_ofs
1211 			    + kernel_exec.a_syms + 8;
1212 			*((u_long *)(kernel_image + kernel_load_ofs)) =
1213 			    kernel_exec.a_syms;
1214 			kernel_load_ofs += 4;
1215 			kernel_load_phase = 3;
1216 			break;
1217 		}
1218 		/*FALLTHROUGH*/
1219 	case 2:		/* done loading kernel */
1220 
1221 		/*
1222 		 * Put the finishing touches on the kernel image.
1223 		 */
1224 		kernel_image_magic_copy(kernel_image + kernel_load_ofs);
1225 		/*
1226 		 * Start the new kernel with code in locore.s.
1227 		 */
1228 		kernel_reload(kernel_image,
1229 		    kernel_load_ofs + kernel_image_magic_size(),
1230 		    kernel_exec.a_entry, boot_fphystart, boot_fphysize,
1231 		    boot_cphysize, kernel_symbol_esym, eclockfreq,
1232 		    boot_flags, scsi_nosync, boot_partition);
1233 		/*
1234 		 * kernel_reload() now checks to see if the reload_code
1235 		 * is at the same location in the new kernel.
1236 		 * If it isn't, it will return and we will return
1237 		 * an error.
1238 		 */
1239 		free(kernel_image, M_TEMP);
1240 		kernel_image = NULL;
1241 		return (ENODEV);	/* Say operation not supported */
1242 	case 3:		/* done loading kernel symbol table */
1243 		c = *((u_long *)(kernel_image + kernel_load_ofs - 4));
1244 		if (c > 16 * (kernel_exec.a_syms / 12))
1245 			c = 16 * (kernel_exec.a_syms / 12);
1246 		kernel_load_endseg += c - 4;
1247 		kernel_symbol_esym = kernel_load_endseg;
1248 #ifdef notyet
1249 		kernel_image_copy = kernel_image;
1250 		kernel_image = malloc(kernel_load_ofs + c
1251 		    + kernel_image_magic_size(), M_TEMP, M_WAITOK);
1252 		if (kernel_image == NULL)
1253 			panic("kernel_reload failed second malloc");
1254 		for (c = 0; c < kernel_load_ofs; c += MAXPHYS)
1255 			bcopy(kernel_image_copy + c, kernel_image + c,
1256 			    (kernel_load_ofs - c) > MAXPHYS ? MAXPHYS :
1257 			    kernel_load_ofs - c);
1258 #endif
1259 		kernel_load_phase = 2;
1260 	}
1261 	return(0);
1262 }
1263