xref: /netbsd/sys/arch/amigappc/amigappc/machdep.c (revision c4a72b64)
1 /* $NetBSD: machdep.c,v 1.22 2002/09/25 22:21:03 thorpej Exp $ */
2 
3 /*
4  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5  * Copyright (C) 1995, 1996 TooLs GmbH.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by TooLs GmbH.
19  * 4. The name of TooLs GmbH may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "opt_ddb.h"
35 #include "opt_ipkdb.h"
36 
37 #include <sys/param.h>
38 #include <sys/buf.h>
39 #include <sys/exec.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/mount.h>
43 #include <sys/msgbuf.h>
44 #include <sys/proc.h>
45 #include <sys/syslog.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/user.h>
49 
50 #include <uvm/uvm_extern.h>
51 
52 #include <machine/powerpc.h>
53 #include <machine/bat.h>
54 #include <machine/mtpr.h>
55 #include <machine/trap.h>
56 #include <machine/hid.h>
57 #include <machine/cpu.h>
58 
59 #include <amiga/amiga/cc.h>
60 #include <amiga/amiga/cia.h>
61 
62 /*
63  * Global variables used here and there
64  * from macppc/machdep.c
65  */
66 struct pcb *curpcb;
67 struct pmap *curpm;
68 struct proc *fpuproc;
69 
70 extern struct user *proc0paddr;
71 
72 /* from amiga/machdep.c */
73 char cpu_model[80];
74 char machine[] = MACHINE;
75 char machine_arch[] = MACHINE_ARCH;
76 
77 /* XXX: should be in extintr.c */
78 volatile int cpl, ipending, astpending, tickspending;
79 int imask[NIPL];
80 
81 /* Our exported CPU info; we can have only one. */
82 struct cpu_info cpu_info_store;
83 
84 struct vm_map *exec_map = NULL;
85 struct vm_map *mb_map = NULL;
86 struct vm_map *phys_map = NULL;
87 
88 struct bat battable[16];
89 extern int aga_enable, eclockfreq;
90 
91 #define PPCMEMREGIONS 32
92 static struct mem_region PPCmem[PPCMEMREGIONS + 1], PPCavail[PPCMEMREGIONS + 3];
93 
94 void show_me_regs(void);
95 
96 void
97 initppc(startkernel, endkernel)
98         u_int startkernel, endkernel;
99 {
100 	extern void cpu_fail(void);
101 	extern adamint, adamintsize;
102 	extern extint, extsize;
103 	extern trapcode, trapsize;
104 	extern alitrap, alisize;
105 	extern dsitrap, dsisize;
106 	extern isitrap, isisize;
107 	extern decrint, decrsize;
108 	extern tlbimiss, tlbimsize;
109 	extern tlbdlmiss, tlbdlmsize;
110 	extern tlbdsmiss, tlbdsmsize;
111 #ifdef DDB
112 	extern ddblow, ddbsize;
113 #endif
114 #ifdef IPKDB
115 	extern ipkdblow, ipkdbsize;
116 #endif
117 	int exc, scratch;
118 
119 	/* force memory mapping */
120 	PPCmem[0].start = 0x8000000;
121 	PPCmem[0].size  = 0x5f80000;
122 	PPCmem[1].start = 0x7c00000;
123 	PPCmem[1].size  = 0x0400000;
124 	PPCmem[2].start = 0x0;
125 	PPCmem[2].size  = 0x0;
126 
127 	PPCavail[0].start = 0x8000000;
128 	PPCavail[0].size  = 0x5f80000;
129 	PPCavail[1].start = (0x7c00000 + endkernel + PGOFSET) & ~PGOFSET;
130 	PPCavail[1].size  = 0x8000000 - PPCavail[1].start;
131 /*
132 	PPCavail[1].start = 0x7c00000;
133 	PPCavail[1].size  = 0x0400000;
134 */
135 	PPCavail[2].start = 0x0;
136 	PPCavail[2].size  = 0x0;
137 
138 	CHIPMEMADDR = 0x0;
139 	chipmem_start = 0x0;
140 	chipmem_end  = 0x200000;
141 
142 	CIAADDR = 0xbfd000;
143 	CIAAbase = CIAADDR + 0x1001;
144 	CIABbase = CIAADDR;
145 
146 	CUSTOMADDR = 0xdff000;
147 	CUSTOMbase = CUSTOMADDR;
148 
149 	eclockfreq = 709379;
150 
151 	aga_enable = 1;
152 	machineid = 4000 << 16;
153 
154 	/* Initialize BAT tables */
155 	battable[0].batl = BATL(0x00000000, BAT_I|BAT_G, BAT_PP_RW);
156 	battable[0].batu = BATU(0x00000000, BAT_BL_16M, BAT_Vs);
157 	battable[1].batl = BATL(0x08000000, 0, BAT_PP_RW);
158 	battable[1].batu = BATU(0x08000000, BAT_BL_128M, BAT_Vs);
159 	battable[2].batl = BATL(0x07000000, 0, BAT_PP_RW);
160 	battable[2].batu = BATU(0x07000000, BAT_BL_16M, BAT_Vs);
161 	battable[3].batl = BATL(0xfff00000, 0, BAT_PP_RW);
162 	battable[3].batu = BATU(0xfff00000, BAT_BL_512K, BAT_Vs);
163 
164 	/* Load BAT registers */
165 	asm volatile ("mtibatl 0,%0; mtibatu 0,%1;"
166 		"mtdbatl 0,%0; mtdbatu 0,%1" ::
167 		"r"(battable[0].batl), "r"(battable[0].batu));
168 	asm volatile ("mtibatl 1,%0; mtibatu 1,%1;"
169 		"mtdbatl 1,%0; mtdbatu 1,%1" ::
170 		"r"(battable[1].batl), "r"(battable[1].batu));
171 	asm volatile ("mtibatl 2,%0; mtibatu 2,%1;"
172 		"mtdbatl 2,%0; mtdbatu 2,%1" ::
173 		"r"(battable[2].batl), "r"(battable[2].batu));
174 	asm volatile ("mtibatl 3,%0; mtibatu 3,%1;"
175 		"mtdbatl 3,%0; mtdbatu 3,%1" ::
176 		"r"(battable[3].batl), "r"(battable[3].batu));
177 
178 	proc0.p_addr = proc0paddr;
179 	bzero(proc0.p_addr, sizeof *proc0.p_addr);
180 	curpcb = &proc0paddr->u_pcb;
181 	curpm = curpcb->pcb_pmreal = curpcb->pcb_pm = pmap_kernel();
182 
183 	/*
184 	 * Set up trap vectors
185 	 */
186 	for (exc = EXC_RSVD + EXC_UPPER; exc <= EXC_LAST + EXC_UPPER; exc += 0x100) {
187 		switch (exc - EXC_UPPER) {
188 		default:
189 			memcpy((void *)exc, &trapcode, (size_t)&trapsize);
190 			break;
191 		case EXC_MCHK:
192 			memcpy((void *)exc, &adamint, (size_t)&adamintsize);
193 			break;
194 		case EXC_EXI:
195 			memcpy((void *)exc, &extint, (size_t)&extsize);
196 			/*
197 			 * This one is (potentially) installed during autoconf
198 			 */
199 			break;
200 		case EXC_ALI:
201 			memcpy((void *)exc, &alitrap, (size_t)&alisize);
202 			break;
203 		case EXC_DSI:
204 			memcpy((void *)exc, &dsitrap, (size_t)&dsisize);
205 			break;
206 		case EXC_ISI:
207 			memcpy((void *)exc, &isitrap, (size_t)&isisize);
208 			break;
209 		case EXC_DECR:
210 			memcpy((void *)exc, &decrint, (size_t)&decrsize);
211 			break;
212 		case EXC_IMISS:
213 			memcpy((void *)exc, &tlbimiss, (size_t)&tlbimsize);
214 			break;
215 		case EXC_DLMISS:
216 			memcpy((void *)exc, &tlbdlmiss, (size_t)&tlbdlmsize);
217 			break;
218 		case EXC_DSMISS:
219 			memcpy((void *)exc, &tlbdsmiss, (size_t)&tlbdsmsize);
220 			break;
221 
222 #if defined(DDB) || defined(IPKDB)
223 		case EXC_PGM:
224 		case EXC_TRC:
225 		case EXC_BPT:
226 #if defined(DDB)
227 			memcpy((void *)exc, &ddblow, (size_t)&ddbsize);
228 #else
229 			memcpy((void *)exc, &ipkdblow, (size_t)&ipkdbsize);
230 #endif
231 			break;
232 #endif /* DDB || IPKDB */
233 		}
234 	}
235 
236 	/* External interrupt handler install
237 	*/
238 	__syncicache((void *)(EXC_RST + EXC_UPPER), EXC_LAST - EXC_RST + 0x100);
239 
240 	/*
241 	 * Enable translation and interrupts
242 	 */
243 	asm volatile ("mfmsr %0; ori %0,%0,%1; mtmsr %0; isync" :
244 		"=r"(scratch) : "K"(PSL_EE|PSL_IR|PSL_DR|PSL_ME|PSL_RI));
245 
246 	/*
247 	 * Set the page size
248 	 */
249 	uvm_setpagesize();
250 
251 	/*
252 	 * Initialize pmap module
253 	 */
254 	pmap_bootstrap(startkernel, endkernel, NULL);
255 }
256 
257 
258 /* XXX: for a while here, from intr.h */
259 volatile int
260 splraise(ncpl)
261 	int ncpl;
262 {
263 	int ocpl;
264 	volatile unsigned char *p5_ipl = (void *)0xf60030;
265 
266 	ocpl = ~(*p5_ipl) & 7;
267 
268 	__asm__ volatile("sync; eieio\n");	/* don't reorder.... */
269 
270 	/*custom.intreq = 0x7fff;*/
271 	if (ncpl > ocpl) {
272 		/* disable int */
273 		/*p5_ipl = 0xc0;*/
274 		/* clear bits */
275 		*p5_ipl = 7;
276 		/* set new priority */
277 		*p5_ipl = 0x80 | (~ncpl & 7);
278 		/* enable int */
279 		/*p5_ipl = 0x40;*/
280 	}
281 
282 	__asm__ volatile("sync; eieio\n");	/* reorder protect */
283 	return (ocpl);
284 }
285 
286 volatile void
287 splx(ncpl)
288 	int ncpl;
289 {
290 	volatile unsigned char *p5_ipl = (void *)0xf60030;
291 
292 	__asm__ volatile("sync; eieio\n");	/* reorder protect */
293 
294 /*	if (ipending & ~ncpl)
295 		do_pending_int();*/
296 
297 	custom.intreq = custom.intreqr;
298 	/* disable int */
299 	/*p5_ipl = 0xc0;*/
300 	/* clear bits */
301 	*p5_ipl = 0x07;
302 	/* set new priority */
303 	*p5_ipl = 0x80 | (~ncpl & 7);
304 	/* enable int */
305 	/*p5_ipl = 0x40;*/
306 
307 	__asm__ volatile("sync; eieio\n");	/* reorder protect */
308 }
309 
310 volatile int
311 spllower(ncpl)
312 	int ncpl;
313 {
314 	int ocpl;
315 	volatile unsigned char *p5_ipl = (void *)0xf60030;
316 
317 	ocpl = ~(*p5_ipl) & 7;
318 
319 	__asm__ volatile("sync; eieio\n");	/* reorder protect */
320 
321 /*	if (ipending & ~ncpl)
322 		do_pending_int();*/
323 
324 	/*custom.intreq = 0x7fff;*/
325 	if (ncpl < ocpl) {
326 		/* disable int */
327 		/*p5_ipl = 0xc0;*/
328 		/* clear bits */
329 		*p5_ipl = 7;
330 		/* set new priority */
331 		*p5_ipl = 0x80 | (~ncpl & 7);
332 		/* enable int */
333 		/*p5_ipl = 0x40;*/
334 	}
335 
336 	__asm__ volatile("sync; eieio\n");	/* reorder protect */
337 	return (ocpl);
338 }
339 
340 /* Following code should be implemented with lwarx/stwcx to avoid
341  * the disable/enable. i need to read the manual once more.... */
342 volatile void
343 softintr(ipl)
344 	int ipl;
345 {
346 	int msrsave;
347 
348 	__asm__ volatile("mfmsr %0" : "=r"(msrsave));
349 	__asm__ volatile("mtmsr %0" :: "r"(msrsave & ~PSL_EE));
350 	ipending |= 1 << ipl;
351 	__asm__ volatile("mtmsr %0" :: "r"(msrsave));
352 }
353 /* XXX: end of intr.h */
354 
355 
356 /* show PPC registers */
357 void show_me_regs()
358 {
359 	register u_long	scr0, scr1, scr2, scr3;
360 
361 	asm volatile ("mfspr %0,1; mfspr %1,8; mfspr %2,9; mfspr %3,18"
362 		: "=r"(scr0),"=r"(scr1),"=r"(scr2),"=r"(scr3) :);
363 	printf("XER   %08lx\tLR    %08lx\tCTR   %08lx\tDSISR %08lx\n",
364 		scr0, scr1, scr2, scr3);
365 
366 	asm volatile ("mfspr %0,19; mfspr %1,22; mfspr %2,25; mfspr %3,26"
367 		: "=r"(scr0),"=r"(scr1),"=r"(scr2),"=r"(scr3) :);
368 	printf("DAR   %08lx\tDEC   %08lx\tSDR1  %08lx\tSRR0  %08lx\n",
369 		scr0, scr1, scr2, scr3);
370 
371 	asm volatile ("mfspr %0,27; mfspr %1,268; mfspr %2,269; mfspr %3,272"
372 		: "=r"(scr0),"=r"(scr1),"=r"(scr2),"=r"(scr3) :);
373 	printf("SRR1  %08lx\tTBL   %08lx\tTBU   %08lx\tSPRG0 %08lx\n",
374 		scr0, scr1, scr2, scr3);
375 
376 	asm volatile ("mfspr %0,273; mfspr %1,274; mfspr %2,275; mfspr %3,282"
377 		: "=r"(scr0),"=r"(scr1),"=r"(scr2),"=r"(scr3) :);
378 	printf("SPRG1 %08lx\tSPRG2 %08lx\tSPRG3 %08lx\tEAR   %08lx\n",
379 		scr0, scr1, scr2, scr3);
380 
381 	asm volatile ("mfspr %0,528; mfspr %1,529; mfspr %2,530; mfspr %3,531"
382 		: "=r"(scr0),"=r"(scr1),"=r"(scr2),"=r"(scr3) :);
383 	printf("IBAT0U%08lx\tIBAT0L%08lx\tIBAT1U%08lx\tIBAT1L%08lx\n",
384 		scr0, scr1, scr2, scr3);
385 
386 	asm volatile ("mfspr %0,532; mfspr %1,533; mfspr %2,534; mfspr %3,535"
387 		: "=r"(scr0),"=r"(scr1),"=r"(scr2),"=r"(scr3) :);
388 	printf("IBAT2U%08lx\tIBAT2L%08lx\tIBAT3U%08lx\tIBAT3L%08lx\n",
389 		scr0, scr1, scr2, scr3);
390 
391 	asm volatile ("mfspr %0,536; mfspr %1,537; mfspr %2,538; mfspr %3,539"
392 		: "=r"(scr0),"=r"(scr1),"=r"(scr2),"=r"(scr3) :);
393 	printf("DBAT0U%08lx\tDBAT0L%08lx\tDBAT1U%08lx\tDBAT1L%08lx\n",
394 		scr0, scr1, scr2, scr3);
395 
396 	asm volatile ("mfspr %0,540; mfspr %1,541; mfspr %2,542; mfspr %3,543"
397 		: "=r"(scr0),"=r"(scr1),"=r"(scr2),"=r"(scr3) :);
398 	printf("DBAT2U%08lx\tDBAT2L%08lx\tDBAT3U%08lx\tDBAT3L%08lx\n",
399 		scr0, scr1, scr2, scr3);
400 
401 	asm volatile ("mfspr %0,1008; mfspr %1,1009; mfspr %2,1010; mfspr %3,1013"
402 		: "=r"(scr0),"=r"(scr1),"=r"(scr2),"=r"(scr3) :);
403 	printf("HID0  %08lx\tHID1  %08lx\tIABR  %08lx\tDABR  %08lx\n",
404 		scr0, scr1, scr2, scr3);
405 
406 	asm volatile ("mfspr %0,953; mfspr %1,954; mfspr %2,957; mfspr %3,958"
407 		: "=r"(scr0),"=r"(scr1),"=r"(scr2),"=r"(scr3) :);
408 	printf("PCM1  %08lx\tPCM2  %08lx\tPCM3  %08lx\tPCM4  %08lx\n",
409 		scr0, scr1, scr2, scr3);
410 
411 	asm volatile ("mfspr %0,952; mfspr %1,956; mfspr %2,959; mfspr %3,955"
412 		: "=r"(scr0),"=r"(scr1),"=r"(scr2),"=r"(scr3) :);
413 	printf("MMCR0 %08lx\tMMCR1 %08lx\tSDA   %08lx\tSIA   %08lx\n",
414 		scr0, scr1, scr2, scr3);
415 }
416 
417 
418 /*
419  * This is called during initppc, before the system is really initialized.
420  * It shall provide the total and the available regions of RAM.
421  * Both lists must have a zero-size entry as terminator.
422  * The available regions need not take the kernel into account, but needs
423  * to provide space for two additional entry beyond the terminating one.
424  */
425 void
426 mem_regions(memp, availp)
427 	struct mem_region **memp, **availp;
428 {
429 	*memp = PPCmem;
430 	*availp = PPCavail;
431 }
432 
433 
434 /* XXX */
435 void
436 do_pending_int(void)
437 {
438 	asm volatile ("sync");
439 }
440 
441 /*
442  * Interrupt handler
443  */
444 void
445 intrhand()
446 {
447 	register unsigned short ireq;
448 
449 	ireq = custom.intreqr;
450 
451 	/* transmit buffer empty */
452 	if (ireq & INTF_TBE) {
453 #if NSER > 0
454 		ser_outintr();
455 #else
456 		custom.intreq = INTF_TBE;
457 #endif
458 	}
459 
460 	/* disk block */
461 	if (ireq & INTF_DSKBLK) {
462 #if NFD > 0
463 		fdintr(0);
464 #endif
465 		custom.intreq = INTF_DSKBLK;
466 	}
467 
468 	/* software */
469 	if (ireq & INTF_SOFTINT) {
470 		custom.intreq = INTF_SOFTINT;
471 	}
472 
473 	/* ports */
474 	if (ireq & INTF_PORTS) {
475 		ciaa_intr();
476 		custom.intreq = INTF_PORTS;
477 	}
478 
479 	/* vertical blank */
480 	if (ireq & INTF_VERTB) {
481 		vbl_handler();
482 	}
483 
484 	/* blitter */
485 	if (ireq & INTF_BLIT) {
486 		blitter_handler();
487 	}
488 
489 	/* copper */
490 	if (ireq & INTF_COPER) {
491 		copper_handler();
492 	}
493 }
494 
495 
496 struct isr *isr_ports;
497 struct isr *isr_exter;
498 
499 void
500 add_isr(isr)
501 	struct isr *isr;
502 {
503 	struct isr **p, *q;
504 
505 	p = isr->isr_ipl == 2 ? &isr_ports : &isr_exter;
506 
507 	while ((q = *p) != NULL) {
508 		p = &q->isr_forw;
509 	}
510 	isr->isr_forw = NULL;
511 	*p = isr;
512 	/* enable interrupt */
513 	custom.intena = isr->isr_ipl == 2 ? INTF_SETCLR | INTF_PORTS :
514 						INTF_SETCLR | INTF_EXTER;
515 }
516 
517 void
518 remove_isr(isr)
519 	struct isr *isr;
520 {
521 	struct isr **p, *q;
522 
523 	p = isr->isr_ipl == 6 ? &isr_exter : &isr_ports;
524 
525 	while ((q = *p) != NULL && q != isr) {
526 		p = &q->isr_forw;
527 	}
528 	if (q) {
529 		*p = q->isr_forw;
530 	}
531 	else {
532 		panic("remove_isr: handler not registered");
533 	}
534 
535 	/* disable interrupt if no more handlers */
536 	p = isr->isr_ipl == 6 ? &isr_exter : &isr_ports;
537 	if (*p == NULL) {
538 		custom.intena = isr->isr_ipl == 6 ? INTF_EXTER : INTF_PORTS;
539 	}
540 }
541 
542 
543 /*
544  * this is a handy package to have asynchronously executed
545  * function calls executed at very low interrupt priority.
546  * Example for use is keyboard repeat, where the repeat
547  * handler running at splclock() triggers such a (hardware
548  * aided) software interrupt.
549  * Note: the installed functions are currently called in a
550  * LIFO fashion, might want to change this to FIFO
551  * later.
552  */
553 struct si_callback {
554 	struct si_callback *next;
555 	void (*function) __P((void *rock1, void *rock2));
556 	void *rock1, *rock2;
557 };
558 static struct si_callback *si_callbacks;
559 static struct si_callback *si_free;
560 #ifdef DIAGNOSTIC
561 static int ncb;		/* number of callback blocks allocated */
562 static int ncbd;	/* number of callback blocks dynamically allocated */
563 #endif
564 
565 void
566 alloc_sicallback()
567 {
568 	struct si_callback *si;
569 	int s;
570 
571 	si = (struct si_callback *)malloc(sizeof(*si), M_TEMP, M_NOWAIT);
572 	if (si == NULL)	{
573 		return;
574 	}
575 	s = splhigh();
576 	si->next = si_free;
577 	si_free = si;
578 	splx(s);
579 #ifdef DIAGNOSTIC
580 	++ncb;
581 #endif
582 }
583 
584 
585 /*
586 int
587 sys_sysarch()
588 {
589 return 0;
590 }*/
591 
592 void
593 identifycpu()
594 {
595 	register int pvr, hid1;
596 	char *mach, *pup, *cpu;
597 	static const char pll[] = {10, 10, 70, 0, 20, 65, 25, 45,
598 			30, 55, 40, 50, 15, 60, 35, 0};
599 	const char *p5type_p = (const char *)0xf00010;
600 	int cpuclock, busclock;
601 
602 	/* Amiga type */
603 	if (is_a4000()) {
604 		mach = "Amiga 4000";
605 	}
606 	else if (is_a3000()) {
607 		mach = "Amiga 3000";
608 	}
609 	else {
610 		mach = "Amiga 1200";
611 	}
612 
613 	asm ("mfpvr %0; mfspr %1,1009" : "=r"(pvr), "=r"(hid1));
614 
615 	/* XXX removethis */printf("p5serial = %8s\n", p5type_p);
616 	switch (p5type_p[0]) {
617 	case 'D':
618 		pup = "[PowerUP]";
619 		break;
620 	case 'E':
621 		pup = "[CSPPC]";
622 		break;
623 	case 'F':
624 		pup = "[CS Mk.III]";
625 		break;
626 	case 'I':
627 		pup = "[BlizzardPPC]";
628 		break;
629 	default:
630 		pup = "";
631 		break;
632 	}
633 
634 	switch (p5type_p[1]) {
635 	case 'A':
636 		busclock = 60000000/4;
637 		cpuclock = 600;
638 		break;
639 	/* case B, C, D */
640 	default:
641 		busclock = 66000000/4;
642 		cpuclock = 666;
643 		break;
644 	}
645 	/*
646 	 * compute cpuclock based on PLL configuration in HID1
647 	 * XXX: based on 604e, should work for 603e
648 	 */
649 	hid1 = hid1>>28 & 0xf;
650 	cpuclock = cpuclock*pll[hid1]/100;
651 
652 	/* find CPU type */
653 	switch (pvr >> 16) {
654 	case 1:
655 		cpu = "601";
656 		break;
657 	case 3:
658 		cpu = "603";
659 		break;
660 	case 4:
661 		cpu = "604";
662 		break;
663 	case 5:
664 		cpu = "602";
665 		break;
666 	case 6:
667 		cpu = "603e";
668 		break;
669 	case 7:
670 		cpu = "603e+";
671 		break;
672 	case 8:
673 		cpu = "750";
674 		break;
675 	case 9:
676 	case 10:
677 		cpu = "604e";
678 		break;
679 	case 12:
680 		cpu = "7400";
681 		break;
682 	case 20:
683 		cpu = "620";
684 		break;
685 	default:
686 		cpu = "unknown";
687 		break;
688 	}
689 
690 	snprintf(cpu_model, sizeof(cpu_model),
691 		"%s %s (%s v%d.%d %d MHz, busclk %d kHz)", mach, pup, cpu,
692 		pvr>>8 & 0xff, pvr & 0xff, cpuclock, busclock / 1000);
693 	printf("%s\n", cpu_model);
694 }
695 
696 /*
697  * Machine dependent startup code
698  */
699 void
700 cpu_startup()
701 {
702 	u_int i, base, residual;
703 	caddr_t	v;
704 	vaddr_t minaddr, maxaddr;
705 	vsize_t size;
706 	char pbuf[9];
707 
708 	initmsgbuf((caddr_t)msgbuf_paddr, round_page(MSGBUFSIZE));
709 
710 	proc0.p_addr = proc0paddr;
711 	v = (caddr_t)proc0paddr + USPACE;
712 
713 	printf(version);
714 	identifycpu();
715 
716 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
717 	printf("total memory = %s\n", pbuf);
718 
719 	/*
720 	 * Find out how much space we need, allocate it,
721 	 * and then give everything true virtual addresses
722 	 */
723 	size = (int)allocsys(NULL, NULL);
724 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0) {
725 		panic("startup: no room for tables");
726 	}
727 	if (allocsys(v, NULL) - v != size) {
728 		panic("startup: table size inconsistency");
729 	}
730 
731 	/*
732 	 * Now allocate buffers proper; they are different than the above
733 	 * in that they usually occupy more virtual memory than physical
734 	 */
735 	size = MAXBSIZE * nbuf;
736 	minaddr = 0;
737 	if (uvm_map(kernel_map, (vaddr_t *)&minaddr, round_page(size), NULL,
738 		UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
739 		UVM_INH_NONE, UVM_ADV_NORMAL, 0)) != 0) {
740 		panic("startup: cannot allocate VM for buffers");
741 	}
742 	buffers = (char *)minaddr;
743 	base = bufpages / nbuf;
744 	residual = bufpages % nbuf;
745 	if (base >= MAXBSIZE) {
746 		/* Don't want to alloc more physical mem than ever needed */
747 		base = MAXBSIZE;
748 		residual = 0;
749 	}
750 	for (i = 0; i < nbuf; i++) {
751 		vsize_t curbufsize;
752 		vaddr_t curbuf;
753 		struct vm_page *pg;
754 
755 		/*
756 		 * Each buffer has MAXBSIZE bytes of VM space allocated.
757 		 * Of that MAXBSIZE space, we allocate and map (base+1) pages
758 		 * for the first "residual" buffers, and then we allocate
759 		 * "base" pages for the rest.
760 		 */
761 		curbuf = (vaddr_t)buffers + i * MAXBSIZE;
762 		curbufsize = NBPG * (i < residual ? base + 1 : base);
763 
764 		while (curbufsize) {
765 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
766 			if (pg == NULL) {
767 				panic("cpu_startup: not enough memory for "
768 					"buffer cache");
769 			}
770 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
771 			    VM_PROT_READ | VM_PROT_WRITE);
772 			curbuf += PAGE_SIZE;
773 			curbufsize -= PAGE_SIZE;
774 		}
775 	}
776 	pmap_update(kernel_map->pmap);
777 
778 	/*
779 	 * Allocate a submap for exec arguments.  This map effectively
780 	 * limits the number of processes exec'ing at any time
781 	 */
782 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
783 				16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
784 
785 	/*
786 	 * Allocate a submap for physio
787 	 */
788 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
789 				VM_PHYS_SIZE, 0, FALSE, NULL);
790 
791 	/*
792 	 * No need to allocate an mbuf cluster submap.  Mbuf clusters
793 	 * are allocated via the pool allocator, and we use direct-mapped
794 	 * pool pages
795 	 */
796 
797 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
798 	printf("avail memory = %s\n", pbuf);
799 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
800 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
801 
802 	/*
803 	 * Set up the buffers, so they can be used to read disk labels
804 	 */
805 	bufinit();
806 }
807 
808 /*
809  * consinit
810  * Initialize system console.
811  */
812 void
813 consinit()
814 {
815 	custom_chips_init();
816 	/*
817 	** Initialize the console before we print anything out.
818 	*/
819 	cninit();
820 }
821 
822 /*
823  * Halt or reboot the machine after syncing/dumping according to howto
824  */
825 void
826 cpu_reboot(howto, what)
827 	int howto;
828 	char *what;
829 {
830 	static int syncing;
831 	static char str[256];
832 
833 	howto = 0;
834 }
835 
836 int
837 lcsplx(ipl)
838 	int ipl;
839 {
840 	return spllower(ipl);   /* XXX */
841 }
842 
843 /*
844  * Convert kernel VA to physical address
845  */
846 int
847 kvtop(addr)
848 	caddr_t addr;
849 {
850 	vaddr_t va;
851 	paddr_t pa;
852 	int off;
853 	extern char end[];
854 
855 	if (addr < end)
856 		return (int)addr;
857 
858 	va = trunc_page((vaddr_t)addr);
859 	off = (int)addr - va;
860 
861 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE) {
862 		/*printf("kvtop: zero page frame (va=0x%x)\n", addr);*/
863 		return (int)addr;
864 	}
865 
866 	return((int)pa + off);
867 }
868