1 /* $NetBSD: oea_machdep.c,v 1.84 2022/08/07 09:37:46 andvar Exp $ */
2
3 /*
4 * Copyright (C) 2002 Matt Thomas
5 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6 * Copyright (C) 1995, 1996 TooLs GmbH.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by TooLs GmbH.
20 * 4. The name of TooLs GmbH may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: oea_machdep.c,v 1.84 2022/08/07 09:37:46 andvar Exp $");
37
38 #ifdef _KERNEL_OPT
39 #include "opt_altivec.h"
40 #include "opt_ddb.h"
41 #include "opt_kgdb.h"
42 #include "opt_multiprocessor.h"
43 #include "opt_ppcarch.h"
44 #endif
45
46 #include <sys/param.h>
47 #include <sys/buf.h>
48 #include <sys/boot_flag.h>
49 #include <sys/exec.h>
50 #include <sys/kernel.h>
51 #include <sys/mbuf.h>
52 #include <sys/mount.h>
53 #include <sys/msgbuf.h>
54 #include <sys/proc.h>
55 #include <sys/reboot.h>
56 #include <sys/syscallargs.h>
57 #include <sys/syslog.h>
58 #include <sys/systm.h>
59 #include <sys/cpu.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #ifdef DDB
64 #include <powerpc/db_machdep.h>
65 #include <ddb/db_extern.h>
66 #endif
67
68 #ifdef KGDB
69 #include <sys/kgdb.h>
70 #endif
71
72 #include <machine/powerpc.h>
73
74 #include <powerpc/trap.h>
75 #include <powerpc/spr.h>
76 #include <powerpc/pte.h>
77 #include <powerpc/altivec.h>
78 #include <powerpc/pcb.h>
79
80 #include <powerpc/oea/bat.h>
81 #include <powerpc/oea/cpufeat.h>
82 #include <powerpc/oea/spr.h>
83 #include <powerpc/oea/sr_601.h>
84
85 char machine[] = MACHINE; /* from <machine/param.h> */
86 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
87
88 struct vm_map *phys_map = NULL;
89
90 /*
91 * Global variables used here and there
92 */
93 static void trap0(void *);
94
95 /* XXXSL: The battable is not initialized to non-zero for PPC_OEA64 and PPC_OEA64_BRIDGE */
96 struct bat battable[BAT_VA2IDX(0xffffffff)+1];
97
98 register_t iosrtable[16]; /* I/O segments, for kernel_pmap setup */
99 #ifndef MSGBUFADDR
100 paddr_t msgbuf_paddr;
101 #endif
102
103 extern int dsitrap_fix_dbat4[];
104 extern int dsitrap_fix_dbat5[];
105 extern int dsitrap_fix_dbat6[];
106 extern int dsitrap_fix_dbat7[];
107
108 /*
109 * Load pointer with 0 behind GCC's back, otherwise it will
110 * emit a "trap" instead.
111 */
112 static __inline__ uintptr_t
zero_value(void)113 zero_value(void)
114 {
115 uintptr_t dont_tell_gcc;
116
117 __asm volatile ("li %0, 0" : "=r"(dont_tell_gcc) :);
118 return dont_tell_gcc;
119 }
120
121 void
oea_init(void (* handler)(void))122 oea_init(void (*handler)(void))
123 {
124 extern int trapcode[], trapsize[];
125 extern int sctrap[], scsize[];
126 extern int alitrap[], alisize[];
127 extern int dsitrap[], dsisize[];
128 extern int trapstart[], trapend[];
129 #ifdef PPC_OEA601
130 extern int dsi601trap[], dsi601size[];
131 #endif
132 extern int decrint[], decrsize[];
133 extern int tlbimiss[], tlbimsize[];
134 extern int tlbdlmiss[], tlbdlmsize[];
135 extern int tlbdsmiss[], tlbdsmsize[];
136 #if defined(DDB) || defined(KGDB)
137 extern int ddblow[], ddbsize[];
138 #endif
139 #ifdef ALTIVEC
140 register_t msr;
141 #endif
142 uintptr_t exc, exc_base;
143 #if defined(ALTIVEC) || defined(PPC_OEA)
144 register_t scratch;
145 #endif
146 unsigned int cpuvers;
147 size_t size;
148 struct cpu_info * const ci = &cpu_info[0];
149
150 #ifdef PPC_HIGH_VEC
151 exc_base = EXC_HIGHVEC;
152 #else
153 exc_base = zero_value();
154 #endif
155 KASSERT(mfspr(SPR_SPRG0) == (uintptr_t)ci);
156
157 #if defined (PPC_OEA64_BRIDGE) && defined (PPC_OEA)
158 if (oeacpufeat & OEACPU_64_BRIDGE)
159 pmap_setup64bridge();
160 else
161 pmap_setup32();
162 #endif
163
164
165 cpuvers = mfpvr() >> 16;
166
167 /*
168 * Initialize proc0 and current pcb and pmap pointers.
169 */
170 (void) ci;
171 KASSERT(ci != NULL);
172 KASSERT(curcpu() == ci);
173 KASSERT(lwp0.l_cpu == ci);
174
175 curpcb = lwp_getpcb(&lwp0);
176 memset(curpcb, 0, sizeof(struct pcb));
177
178 #ifdef ALTIVEC
179 /*
180 * Initialize the vectors with NaNs
181 */
182 for (scratch = 0; scratch < 32; scratch++) {
183 curpcb->pcb_vr.vreg[scratch][0] = 0x7FFFDEAD;
184 curpcb->pcb_vr.vreg[scratch][1] = 0x7FFFDEAD;
185 curpcb->pcb_vr.vreg[scratch][2] = 0x7FFFDEAD;
186 curpcb->pcb_vr.vreg[scratch][3] = 0x7FFFDEAD;
187 }
188 #endif
189 curpm = curpcb->pcb_pm = pmap_kernel();
190
191 /*
192 * Cause a PGM trap if we branch to 0.
193 *
194 * XXX GCC4.1 complains about memset on address zero, so
195 * don't use the builtin.
196 */
197 #undef memset
198 memset(0, 0, 0x100);
199
200 /*
201 * Set up trap vectors. Don't assume vectors are on 0x100.
202 */
203 for (exc = exc_base; exc <= exc_base + EXC_LAST; exc += 0x100) {
204 switch (exc - exc_base) {
205 default:
206 size = (size_t)trapsize;
207 memcpy((void *)exc, trapcode, size);
208 break;
209 #if 0
210 case EXC_EXI:
211 /*
212 * This one is (potentially) installed during autoconf
213 */
214 break;
215 #endif
216 case EXC_SC:
217 size = (size_t)scsize;
218 memcpy((void *)exc, sctrap, size);
219 break;
220 case EXC_ALI:
221 size = (size_t)alisize;
222 memcpy((void *)exc, alitrap, size);
223 break;
224 case EXC_DSI:
225 #ifdef PPC_OEA601
226 if (cpuvers == MPC601) {
227 size = (size_t)dsi601size;
228 memcpy((void *)exc, dsi601trap, size);
229 break;
230 } else
231 #endif /* PPC_OEA601 */
232 if (oeacpufeat & OEACPU_NOBAT) {
233 size = (size_t)alisize;
234 memcpy((void *)exc, alitrap, size);
235 } else {
236 size = (size_t)dsisize;
237 memcpy((void *)exc, dsitrap, size);
238 }
239 break;
240 case EXC_DECR:
241 size = (size_t)decrsize;
242 memcpy((void *)exc, decrint, size);
243 break;
244 case EXC_IMISS:
245 size = (size_t)tlbimsize;
246 memcpy((void *)exc, tlbimiss, size);
247 break;
248 case EXC_DLMISS:
249 size = (size_t)tlbdlmsize;
250 memcpy((void *)exc, tlbdlmiss, size);
251 break;
252 case EXC_DSMISS:
253 size = (size_t)tlbdsmsize;
254 memcpy((void *)exc, tlbdsmiss, size);
255 break;
256 case EXC_PERF:
257 size = (size_t)trapsize;
258 memcpy((void *)exc, trapcode, size);
259 memcpy((void *)(exc_base + EXC_VEC), trapcode, size);
260 break;
261 #if defined(DDB) || defined(KGDB)
262 case EXC_RUNMODETRC:
263 #ifdef PPC_OEA601
264 if (cpuvers != MPC601)
265 #endif
266 {
267 size = (size_t)trapsize;
268 memcpy((void *)exc, trapcode, size);
269 break;
270 }
271 /* FALLTHROUGH */
272 case EXC_PGM:
273 case EXC_TRC:
274 case EXC_BPT:
275 size = (size_t)ddbsize;
276 memcpy((void *)exc, ddblow, size);
277 break;
278 #endif /* DDB || KGDB */
279 }
280 #if 0
281 exc += roundup(size, 32);
282 #endif
283 }
284
285 /*
286 * Install a branch absolute to trap0 to force a panic.
287 */
288 if ((uintptr_t)trap0 < 0x2000000) {
289 uint32_t *p = (uint32_t *)zero_value();
290
291 p[0] = 0x7c6802a6;
292 p[1] = 0x48000002 | (uintptr_t) trap0;
293 }
294
295 /*
296 * Get the cache sizes because install_extint calls __syncicache.
297 */
298 cpu_probe_cache();
299
300 #define MxSPR_MASK 0x7c1fffff
301 #define MFSPR_MQ 0x7c0002a6
302 #define MTSPR_MQ 0x7c0003a6
303 #define MTSPR_IBAT0L 0x7c1183a6
304 #define MTSPR_IBAT1L 0x7c1383a6
305 #define NOP 0x60000000
306 #define B 0x48000000
307 #define TLBSYNC 0x7c00046c
308 #define SYNC 0x7c0004ac
309 #ifdef PPC_OEA64_BRIDGE
310 #define MFMSR_MASK 0xfc1fffff
311 #define MFMSR 0x7c0000a6
312 #define MTMSRD_MASK 0xfc1effff
313 #define MTMSRD 0x7c000164
314 #define RLDICL_MASK 0xfc00001c
315 #define RLDICL 0x78000000
316 #define RFID 0x4c000024
317 #define RFI 0x4c000064
318 #endif
319
320 #ifdef ALTIVEC
321 #define MFSPR_VRSAVE 0x7c0042a6
322 #define MTSPR_VRSAVE 0x7c0043a6
323
324 /*
325 * Try to set the VEC bit in the MSR. If it doesn't get set, we are
326 * not on a AltiVec capable processor.
327 */
328 __asm volatile (
329 "mfmsr %0; oris %1,%0,%2@h; mtmsr %1; isync; "
330 "mfmsr %1; mtmsr %0; isync"
331 : "=r"(msr), "=r"(scratch)
332 : "J"(PSL_VEC));
333
334 /*
335 * If we aren't on an AltiVec capable processor, we need to zap any of
336 * the sequences we save/restore the VRSAVE SPR into NOPs.
337 */
338 if (scratch & PSL_VEC) {
339 cpu_altivec = 1;
340 } else {
341 for (int *ip = trapstart; ip < trapend; ip++) {
342 if ((ip[0] & MxSPR_MASK) == MFSPR_VRSAVE) {
343 ip[0] = NOP; /* mfspr */
344 ip[1] = NOP; /* stw */
345 } else if ((ip[0] & MxSPR_MASK) == MTSPR_VRSAVE) {
346 ip[-1] = NOP; /* lwz */
347 ip[0] = NOP; /* mtspr */
348 }
349 }
350 }
351 #endif
352
353 /* XXX It would seem like this code could be elided ifndef 601, but
354 * doing so breaks my power3 machine.
355 */
356 /*
357 * If we aren't on a MPC601 processor, we need to zap any of the
358 * sequences we save/restore the MQ SPR into NOPs, and skip over the
359 * sequences where we zap/restore BAT registers on kernel exit/entry.
360 */
361 if (cpuvers != MPC601) {
362 for (int *ip = trapstart; ip < trapend; ip++) {
363 if ((ip[0] & MxSPR_MASK) == MFSPR_MQ) {
364 ip[0] = NOP; /* mfspr */
365 ip[1] = NOP; /* stw */
366 } else if ((ip[0] & MxSPR_MASK) == MTSPR_MQ) {
367 ip[-1] = NOP; /* lwz */
368 ip[0] = NOP; /* mtspr */
369 } else if ((ip[0] & MxSPR_MASK) == MTSPR_IBAT0L) {
370 if ((ip[1] & MxSPR_MASK) == MTSPR_IBAT1L)
371 ip[-1] = B | 0x14; /* li */
372 else
373 ip[-4] = B | 0x24; /* lis */
374 }
375 }
376 }
377
378 #ifdef PPC_OEA64_BRIDGE
379 if ((oeacpufeat & OEACPU_64_BRIDGE) == 0) {
380 for (int *ip = (int *)exc_base;
381 (uintptr_t)ip <= exc_base + EXC_LAST;
382 ip++) {
383 if ((ip[0] & MFMSR_MASK) == MFMSR
384 && (ip[1] & RLDICL_MASK) == RLDICL
385 && (ip[2] & MTMSRD_MASK) == MTMSRD) {
386 *ip++ = NOP;
387 *ip++ = NOP;
388 ip[0] = NOP;
389 } else if (*ip == RFID) {
390 *ip = RFI;
391 }
392 }
393
394 /*
395 * Now replace each rfid instruction with a rfi instruction.
396 */
397 for (int *ip = trapstart; ip < trapend; ip++) {
398 if ((ip[0] & MFMSR_MASK) == MFMSR
399 && (ip[1] & RLDICL_MASK) == RLDICL
400 && (ip[2] & MTMSRD_MASK) == MTMSRD) {
401 *ip++ = NOP;
402 *ip++ = NOP;
403 ip[0] = NOP;
404 } else if (*ip == RFID) {
405 *ip = RFI;
406 }
407 }
408 }
409 #endif /* PPC_OEA64_BRIDGE */
410
411 /*
412 * Sync the changed instructions.
413 */
414 __syncicache((void *) trapstart,
415 (uintptr_t) trapend - (uintptr_t) trapstart);
416 __syncicache(dsitrap_fix_dbat4, 16);
417 __syncicache(dsitrap_fix_dbat7, 8);
418 #ifdef PPC_OEA601
419
420 /*
421 * If we are on a MPC601 processor, we need to zap any tlbsync
422 * instructions into sync. This differs from the above in
423 * examining all kernel text, as opposed to just the exception handling.
424 * We sync the icache on every instruction found since there are
425 * only very few of them.
426 */
427 if (cpuvers == MPC601) {
428 extern int kernel_text[], etext[];
429 int *ip;
430
431 for (ip = kernel_text; ip < etext; ip++) {
432 if (*ip == TLBSYNC) {
433 *ip = SYNC;
434 __syncicache(ip, sizeof(*ip));
435 }
436 }
437 }
438 #endif /* PPC_OEA601 */
439
440 /*
441 * Configure a PSL user mask matching this processor.
442 * Don't allow to set PSL_FP/PSL_VEC, since that will affect PCU.
443 */
444 cpu_psluserset = PSL_EE | PSL_PR | PSL_ME | PSL_IR | PSL_DR | PSL_RI;
445 cpu_pslusermod = PSL_FE0 | PSL_FE1 | PSL_LE | PSL_SE | PSL_BE;
446 #ifdef PPC_OEA601
447 if (cpuvers == MPC601) {
448 cpu_psluserset &= PSL_601_MASK;
449 cpu_pslusermod &= PSL_601_MASK;
450 }
451 #endif
452 #ifdef PPC_HIGH_VEC
453 cpu_psluserset |= PSL_IP; /* XXX ok? */
454 #endif
455
456 /*
457 * external interrupt handler install
458 */
459 if (handler)
460 oea_install_extint(handler);
461
462 __syncicache((void *)exc_base, EXC_LAST + 0x100);
463
464 /*
465 * Now enable translation (and machine checks/recoverable interrupts).
466 */
467 #ifdef PPC_OEA
468 __asm volatile ("sync; mfmsr %0; ori %0,%0,%1; mtmsr %0; isync"
469 : "=r"(scratch)
470 : "K"(PSL_IR|PSL_DR|PSL_ME|PSL_RI));
471 #endif
472
473 /*
474 * Let's take all the indirect calls via our stubs and patch
475 * them to be direct calls.
476 */
477 cpu_fixup_stubs();
478
479 KASSERT(curcpu() == ci);
480 }
481
482 #ifdef PPC_OEA601
483 static void
mpc601_ioseg_add(paddr_t pa,register_t len)484 mpc601_ioseg_add(paddr_t pa, register_t len)
485 {
486 const u_int i = pa >> ADDR_SR_SHFT;
487
488 if (len != BAT_BL_256M)
489 panic("mpc601_ioseg_add: len != 256M");
490
491 /*
492 * Translate into an I/O segment, load it, and stash away for use
493 * in pmap_bootstrap().
494 */
495 iosrtable[i] = SR601(SR601_Ks, SR601_BUID_MEMFORCED, 0, i);
496
497 /*
498 * XXX Setting segment register 0xf on my powermac 7200
499 * wedges machine so set later in pmap.c
500 */
501 /*
502 __asm volatile ("mtsrin %0,%1"
503 :: "r"(iosrtable[i]),
504 "r"(pa));
505 */
506 }
507 #endif /* PPC_OEA601 */
508
509 #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
510 #define DBAT_SET(n, batl, batu) \
511 do { \
512 mtspr(SPR_DBAT##n##L, (batl)); \
513 mtspr(SPR_DBAT##n##U, (batu)); \
514 } while (/*CONSTCOND*/ 0)
515 #define DBAT_RESET(n) DBAT_SET(n, 0, 0)
516 #define DBATU_GET(n) mfspr(SPR_DBAT##n##U)
517 #define IBAT_SET(n, batl, batu) \
518 do { \
519 mtspr(SPR_IBAT##n##L, (batl)); \
520 mtspr(SPR_IBAT##n##U, (batu)); \
521 } while (/*CONSTCOND*/ 0)
522 #define IBAT_RESET(n) IBAT_SET(n, 0, 0)
523
524 void
oea_iobat_add(paddr_t pa,register_t len)525 oea_iobat_add(paddr_t pa, register_t len)
526 {
527 static int z = 1;
528 const u_int n = BAT_BL_TO_SIZE(len) / BAT_BL_TO_SIZE(BAT_BL_8M);
529 const u_int i = BAT_VA2IDX(pa) & -n; /* in case pa was in the middle */
530 const int after_bat3 = (oeacpufeat & OEACPU_HIGHBAT) ? 4 : 8;
531
532 KASSERT(len >= BAT_BL_8M);
533
534 #ifdef PPC_OEA601
535 if (mfpvr() >> 16 == MPC601) {
536 /* Use I/O segments on the BAT-starved 601. */
537 mpc601_ioseg_add(pa, len);
538 return;
539 }
540 #endif /* PPC_OEA601 */
541
542 /*
543 * If the caller wanted a bigger BAT than the hardware supports,
544 * split it into smaller BATs.
545 */
546 if (len > BAT_BL_256M && (oeacpufeat & OEACPU_XBSEN) == 0) {
547 u_int xn = BAT_BL_TO_SIZE(len) >> 28;
548 while (xn-- > 0) {
549 oea_iobat_add(pa, BAT_BL_256M);
550 pa += 0x10000000;
551 }
552 return;
553 }
554
555 const register_t batl = BATL(pa, BAT_I|BAT_G, BAT_PP_RW);
556 const register_t batu = BATU(pa, len, BAT_Vs);
557
558 for (u_int j = 0; j < n; j++) {
559 battable[i + j].batl = batl;
560 battable[i + j].batu = batu;
561 }
562
563 /*
564 * Let's start loading the BAT registers.
565 */
566 switch (z) {
567 case 1:
568 DBAT_SET(1, batl, batu);
569 z = 2;
570 break;
571 case 2:
572 DBAT_SET(2, batl, batu);
573 z = 3;
574 break;
575 case 3:
576 DBAT_SET(3, batl, batu);
577 z = after_bat3; /* no highbat, skip to end */
578 break;
579 case 4:
580 DBAT_SET(4, batl, batu);
581 z = 5;
582 break;
583 case 5:
584 DBAT_SET(5, batl, batu);
585 z = 6;
586 break;
587 case 6:
588 DBAT_SET(6, batl, batu);
589 z = 7;
590 break;
591 case 7:
592 DBAT_SET(7, batl, batu);
593 z = 8;
594 break;
595 default:
596 break;
597 }
598 }
599
600 void
oea_iobat_remove(paddr_t pa)601 oea_iobat_remove(paddr_t pa)
602 {
603 const u_int i = BAT_VA2IDX(pa);
604
605 if (!BAT_VA_MATCH_P(battable[i].batu, pa) ||
606 !BAT_VALID_P(battable[i].batu, PSL_PR))
607 return;
608 const int n =
609 __SHIFTOUT(battable[i].batu, (BAT_XBL|BAT_BL) & ~BAT_BL_8M) + 1;
610 KASSERT((n & (n-1)) == 0); /* power of 2 */
611 KASSERT((i & (n-1)) == 0); /* multiple of n */
612
613 memset(&battable[i], 0, n*sizeof(battable[0]));
614
615 const int maxbat = oeacpufeat & OEACPU_HIGHBAT ? 8 : 4;
616 for (u_int k = 1 ; k < maxbat; k++) {
617 register_t batu;
618 switch (k) {
619 case 1:
620 batu = DBATU_GET(1);
621 if (BAT_VA_MATCH_P(batu, pa) &&
622 BAT_VALID_P(batu, PSL_PR))
623 DBAT_RESET(1);
624 break;
625 case 2:
626 batu = DBATU_GET(2);
627 if (BAT_VA_MATCH_P(batu, pa) &&
628 BAT_VALID_P(batu, PSL_PR))
629 DBAT_RESET(2);
630 break;
631 case 3:
632 batu = DBATU_GET(3);
633 if (BAT_VA_MATCH_P(batu, pa) &&
634 BAT_VALID_P(batu, PSL_PR))
635 DBAT_RESET(3);
636 break;
637 case 4:
638 batu = DBATU_GET(4);
639 if (BAT_VA_MATCH_P(batu, pa) &&
640 BAT_VALID_P(batu, PSL_PR))
641 DBAT_RESET(4);
642 break;
643 case 5:
644 batu = DBATU_GET(5);
645 if (BAT_VA_MATCH_P(batu, pa) &&
646 BAT_VALID_P(batu, PSL_PR))
647 DBAT_RESET(5);
648 break;
649 case 6:
650 batu = DBATU_GET(6);
651 if (BAT_VA_MATCH_P(batu, pa) &&
652 BAT_VALID_P(batu, PSL_PR))
653 DBAT_RESET(6);
654 break;
655 case 7:
656 batu = DBATU_GET(7);
657 if (BAT_VA_MATCH_P(batu, pa) &&
658 BAT_VALID_P(batu, PSL_PR))
659 DBAT_RESET(7);
660 break;
661 default:
662 break;
663 }
664 }
665 }
666
667 void
oea_batinit(paddr_t pa,...)668 oea_batinit(paddr_t pa, ...)
669 {
670 struct mem_region *allmem, *availmem, *mp;
671 register_t msr = mfmsr();
672 va_list ap;
673 #ifdef PPC_OEA601
674 unsigned int cpuvers;
675
676 cpuvers = mfpvr() >> 16;
677 #endif /* PPC_OEA601 */
678
679 /*
680 * we need to call this before zapping BATs so OF calls work
681 */
682 mem_regions(&allmem, &availmem);
683
684 /*
685 * Initialize BAT registers to unmapped to not generate
686 * overlapping mappings below.
687 *
688 * The 601's implementation differs in the Valid bit being situated
689 * in the lower BAT register, and in being a unified BAT only whose
690 * four entries are accessed through the IBAT[0-3] SPRs.
691 *
692 * Also, while the 601 does distinguish between supervisor/user
693 * protection keys, it does _not_ distinguish between validity in
694 * supervisor/user mode.
695 */
696 if ((msr & (PSL_IR|PSL_DR)) == 0) {
697 #ifdef PPC_OEA601
698 if (cpuvers == MPC601) {
699 __asm volatile ("mtibatl 0,%0" :: "r"(0));
700 __asm volatile ("mtibatl 1,%0" :: "r"(0));
701 __asm volatile ("mtibatl 2,%0" :: "r"(0));
702 __asm volatile ("mtibatl 3,%0" :: "r"(0));
703 } else
704 #endif /* PPC_OEA601 */
705 {
706 DBAT_RESET(0); IBAT_RESET(0);
707 DBAT_RESET(1); IBAT_RESET(1);
708 DBAT_RESET(2); IBAT_RESET(2);
709 DBAT_RESET(3); IBAT_RESET(3);
710 if (oeacpufeat & OEACPU_HIGHBAT) {
711 DBAT_RESET(4); IBAT_RESET(4);
712 DBAT_RESET(5); IBAT_RESET(5);
713 DBAT_RESET(6); IBAT_RESET(6);
714 DBAT_RESET(7); IBAT_RESET(7);
715
716 /*
717 * Change the first instruction to branch to
718 * dsitrap_fix_dbat6
719 */
720 dsitrap_fix_dbat4[0] &= ~0xfffc;
721 dsitrap_fix_dbat4[0]
722 += (uintptr_t)dsitrap_fix_dbat6
723 - (uintptr_t)&dsitrap_fix_dbat4[0];
724
725 /*
726 * Change the second instruction to branch to
727 * dsitrap_fix_dbat5 if bit 30 (aka bit 1) is
728 * true.
729 */
730 dsitrap_fix_dbat4[1] = 0x419e0000
731 + (uintptr_t)dsitrap_fix_dbat5
732 - (uintptr_t)&dsitrap_fix_dbat4[1];
733
734 /*
735 * Change it to load dbat4 instead of dbat2
736 */
737 dsitrap_fix_dbat4[2] = 0x7fd88ba6;
738 dsitrap_fix_dbat4[3] = 0x7ff98ba6;
739
740 /*
741 * Change it to load dbat7 instead of dbat3
742 */
743 dsitrap_fix_dbat7[0] = 0x7fde8ba6;
744 dsitrap_fix_dbat7[1] = 0x7fff8ba6;
745 }
746 }
747 }
748
749 /*
750 * Set up BAT to map physical memory
751 */
752 #ifdef PPC_OEA601
753 if (cpuvers == MPC601) {
754 int i;
755
756 /*
757 * Set up battable to map the lowest 256 MB area.
758 * Map the lowest 32 MB area via BAT[0-3];
759 * BAT[01] are fixed, BAT[23] are floating.
760 */
761 for (i = 0; i < 32; i++) {
762 battable[i].batl = BATL601(i << 23,
763 BAT601_BSM_8M, BAT601_V);
764 battable[i].batu = BATU601(i << 23,
765 BAT601_M, BAT601_Ku, BAT601_PP_NONE);
766 }
767 __asm volatile ("mtibatu 0,%1; mtibatl 0,%0"
768 :: "r"(battable[0x00000000 >> 23].batl),
769 "r"(battable[0x00000000 >> 23].batu));
770 __asm volatile ("mtibatu 1,%1; mtibatl 1,%0"
771 :: "r"(battable[0x00800000 >> 23].batl),
772 "r"(battable[0x00800000 >> 23].batu));
773 __asm volatile ("mtibatu 2,%1; mtibatl 2,%0"
774 :: "r"(battable[0x01000000 >> 23].batl),
775 "r"(battable[0x01000000 >> 23].batu));
776 __asm volatile ("mtibatu 3,%1; mtibatl 3,%0"
777 :: "r"(battable[0x01800000 >> 23].batl),
778 "r"(battable[0x01800000 >> 23].batu));
779 }
780 #endif /* PPC_OEA601 */
781
782 /*
783 * Now setup other fixed bat registers
784 *
785 * Note that we still run in real mode, and the BAT
786 * registers were cleared above.
787 */
788
789 /*
790 * Add any I/O BATs specified;
791 */
792 va_start(ap, pa);
793 while (pa != 0) {
794 register_t len = va_arg(ap, register_t);
795 oea_iobat_add(pa, len);
796 pa = va_arg(ap, paddr_t);
797 }
798 va_end(ap);
799
800 /*
801 * Set up battable to map all RAM regions.
802 */
803 #ifdef PPC_OEA601
804 if (cpuvers == MPC601) {
805 for (mp = allmem; mp->size; mp++) {
806 paddr_t paddr = mp->start & 0xff800000;
807 paddr_t end = mp->start + mp->size;
808
809 do {
810 u_int ix = paddr >> 23;
811
812 battable[ix].batl =
813 BATL601(paddr, BAT601_BSM_8M, BAT601_V);
814 battable[ix].batu =
815 BATU601(paddr, BAT601_M, BAT601_Ku, BAT601_PP_NONE);
816 paddr += (1 << 23);
817 } while (paddr < end);
818 }
819 } else
820 #endif
821 {
822 const register_t bat_inc = BAT_IDX2VA(1);
823 for (mp = allmem; mp->size; mp++) {
824 paddr_t paddr = mp->start & -bat_inc;
825 paddr_t end = roundup2(mp->start + mp->size, bat_inc);
826
827 /*
828 * If the next entries are adjacent, merge them
829 * into this one
830 */
831 while (mp[1].size && end == (mp[1].start & -bat_inc)) {
832 mp++;
833 end = roundup2(mp->start + mp->size, bat_inc);
834 }
835
836 while (paddr < end) {
837 register_t bl = (oeacpufeat & OEACPU_XBSEN
838 ? BAT_BL_2G
839 : BAT_BL_256M);
840 psize_t size = BAT_BL_TO_SIZE(bl);
841 u_int n = BAT_VA2IDX(size);
842 u_int i = BAT_VA2IDX(paddr);
843
844 while ((paddr & (size - 1))
845 || paddr + size > end) {
846 size >>= 1;
847 bl = (bl >> 1) & (BAT_XBL|BAT_BL);
848 n >>= 1;
849 }
850
851 KASSERT(size >= bat_inc);
852 KASSERT(n >= 1);
853 KASSERT(bl >= BAT_BL_8M);
854
855 register_t batl = BATL(paddr, BAT_M, BAT_PP_RW);
856 register_t batu = BATU(paddr, bl, BAT_Vs);
857
858 for (; n-- > 0; i++) {
859 battable[i].batl = batl;
860 battable[i].batu = batu;
861 }
862 paddr += size;
863 }
864 }
865 /*
866 * Set up BAT0 to only map the lowest area.
867 */
868 __asm volatile ("mtibatl 0,%0; mtibatu 0,%1;"
869 "mtdbatl 0,%0; mtdbatu 0,%1;"
870 :: "r"(battable[0].batl), "r"(battable[0].batu));
871 }
872 }
873 #endif /* PPC_OEA || PPC_OEA64_BRIDGE */
874
875 void
oea_install_extint(void (* handler)(void))876 oea_install_extint(void (*handler)(void))
877 {
878 extern int extint[], extsize[];
879 extern int extint_call[];
880 uintptr_t offset = (uintptr_t)handler - (uintptr_t)extint_call;
881 #ifdef PPC_HIGH_VEC
882 const uintptr_t exc_exi_base = EXC_HIGHVEC + EXC_EXI;
883 #else
884 const uintptr_t exc_exi_base = EXC_EXI;
885 #endif
886 int omsr, msr;
887
888 #ifdef DIAGNOSTIC
889 if (offset > 0x1ffffff)
890 panic("install_extint: %p too far away (%#lx)", handler,
891 (unsigned long) offset);
892 #endif
893 __asm volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1"
894 : "=r" (omsr), "=r" (msr)
895 : "K" ((u_short)~PSL_EE));
896 extint_call[0] = (extint_call[0] & 0xfc000003) | offset;
897 __syncicache((void *)extint_call, sizeof extint_call[0]);
898 memcpy((void *)exc_exi_base, extint, (size_t)extsize);
899 #ifdef PPC_OEA64_BRIDGE
900 if ((oeacpufeat & OEACPU_64_BRIDGE) == 0) {
901 for (int *ip = (int *)exc_exi_base;
902 (uintptr_t)ip <= exc_exi_base + (size_t)extsize;
903 ip++) {
904 if ((ip[0] & MFMSR_MASK) == MFMSR
905 && (ip[1] & RLDICL_MASK) == RLDICL
906 && (ip[2] & MTMSRD_MASK) == MTMSRD) {
907 *ip++ = NOP;
908 *ip++ = NOP;
909 ip[0] = NOP;
910 } else if (*ip == RFID) {
911 *ip = RFI;
912 }
913 }
914 }
915 #endif
916 __syncicache((void *)exc_exi_base, (size_t)extsize);
917
918 __asm volatile ("mtmsr %0" :: "r"(omsr));
919 }
920
921 /*
922 * Machine dependent startup code.
923 */
924 void
oea_startup(const char * model)925 oea_startup(const char *model)
926 {
927 uintptr_t sz;
928 void *v;
929 vaddr_t minaddr, maxaddr;
930 char pbuf[9], mstr[128];
931
932 KASSERT(curcpu() != NULL);
933 KASSERT(lwp0.l_cpu != NULL);
934 KASSERT(curcpu()->ci_idepth == -1);
935
936 sz = round_page(MSGBUFSIZE);
937 #ifdef MSGBUFADDR
938 v = (void *) MSGBUFADDR;
939 #else
940 /*
941 * If the msgbuf is not in segment 0, allocate KVA for it and access
942 * it via mapped pages. [This prevents unneeded BAT switches.]
943 */
944 v = (void *) msgbuf_paddr;
945 if (msgbuf_paddr + sz > SEGMENT_LENGTH) {
946 u_int i;
947
948 minaddr = 0;
949 if (uvm_map(kernel_map, &minaddr, sz,
950 NULL, UVM_UNKNOWN_OFFSET, 0,
951 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
952 UVM_INH_NONE, UVM_ADV_NORMAL, 0)) != 0)
953 panic("startup: cannot allocate VM for msgbuf");
954 v = (void *)minaddr;
955 for (i = 0; i < sz; i += PAGE_SIZE) {
956 pmap_kenter_pa(minaddr + i, msgbuf_paddr + i,
957 VM_PROT_READ|VM_PROT_WRITE, 0);
958 }
959 pmap_update(pmap_kernel());
960 }
961 #endif
962 initmsgbuf(v, sz);
963
964 printf("%s%s", copyright, version);
965 if (model != NULL)
966 printf("Model: %s\n", model);
967 cpu_identify(mstr, sizeof(mstr));
968 cpu_setmodel("%s", mstr);
969
970 format_bytes(pbuf, sizeof(pbuf), ctob((u_int)physmem));
971 printf("total memory = %s\n", pbuf);
972
973 /*
974 * Allocate away the pages that map to 0xDEA[CDE]xxxx. Do this after
975 * the bufpages are allocated in case they overlap since it's not
976 * fatal if we can't allocate these.
977 */
978 if (KERNEL_SR == 13 || KERNEL2_SR == 14) {
979 int error;
980 minaddr = 0xDEAC0000;
981 error = uvm_map(kernel_map, &minaddr, 0x30000,
982 NULL, UVM_UNKNOWN_OFFSET, 0,
983 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
984 UVM_ADV_NORMAL, UVM_FLAG_FIXED));
985 if (error != 0 || minaddr != 0xDEAC0000)
986 printf("oea_startup: failed to allocate DEAD "
987 "ZONE: error=%d\n", error);
988 }
989
990 minaddr = 0;
991
992 /*
993 * Allocate a submap for physio
994 */
995 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
996 VM_PHYS_SIZE, 0, false, NULL);
997
998 format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
999 printf("avail memory = %s\n", pbuf);
1000
1001 #ifdef MULTIPROCESSOR
1002 kcpuset_create(&cpuset_info.cpus_running, true);
1003 kcpuset_create(&cpuset_info.cpus_hatched, true);
1004 kcpuset_create(&cpuset_info.cpus_paused, true);
1005 kcpuset_create(&cpuset_info.cpus_resumed, true);
1006 kcpuset_create(&cpuset_info.cpus_halted, true);
1007
1008 kcpuset_set(cpuset_info.cpus_running, cpu_number());
1009 #endif
1010 }
1011
1012 /*
1013 * Crash dump handling.
1014 */
1015
1016 void
oea_dumpsys(void)1017 oea_dumpsys(void)
1018 {
1019 printf("dumpsys: TBD\n");
1020 }
1021
1022 /*
1023 * Convert kernel VA to physical address
1024 */
1025 paddr_t
kvtop(void * addr)1026 kvtop(void *addr)
1027 {
1028 vaddr_t va;
1029 paddr_t pa;
1030 uintptr_t off;
1031 extern char end[];
1032
1033 if (addr < (void *)end)
1034 return (paddr_t)addr;
1035
1036 va = trunc_page((vaddr_t)addr);
1037 off = (uintptr_t)addr - va;
1038
1039 if (pmap_extract(pmap_kernel(), va, &pa) == false) {
1040 /*printf("kvtop: zero page frame (va=0x%x)\n", addr);*/
1041 return (paddr_t)addr;
1042 }
1043
1044 return(pa + off);
1045 }
1046
1047 /*
1048 * Allocate vm space and mapin the I/O address
1049 */
1050 void *
mapiodev(paddr_t pa,psize_t len,bool prefetchable)1051 mapiodev(paddr_t pa, psize_t len, bool prefetchable)
1052 {
1053 paddr_t faddr;
1054 vaddr_t taddr, va;
1055 int off;
1056
1057 faddr = trunc_page(pa);
1058 off = pa - faddr;
1059 len = round_page(off + len);
1060 va = taddr = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY);
1061
1062 if (va == 0)
1063 return NULL;
1064
1065 for (; len > 0; len -= PAGE_SIZE) {
1066 pmap_kenter_pa(taddr, faddr, VM_PROT_READ | VM_PROT_WRITE,
1067 (prefetchable ? PMAP_MD_PREFETCHABLE : PMAP_NOCACHE));
1068 faddr += PAGE_SIZE;
1069 taddr += PAGE_SIZE;
1070 }
1071 pmap_update(pmap_kernel());
1072 return (void *)(va + off);
1073 }
1074
1075 void
unmapiodev(vaddr_t va,vsize_t len)1076 unmapiodev(vaddr_t va, vsize_t len)
1077 {
1078 paddr_t faddr;
1079
1080 if (! va)
1081 return;
1082
1083 faddr = trunc_page(va);
1084 len = round_page(va - faddr + len);
1085
1086 pmap_kremove(faddr, len);
1087 pmap_update(pmap_kernel());
1088 uvm_km_free(kernel_map, faddr, len, UVM_KMF_VAONLY);
1089 }
1090
1091 void
trap0(void * lr)1092 trap0(void *lr)
1093 {
1094 panic("call to null-ptr from %p", lr);
1095 }
1096