xref: /freebsd/sys/powerpc/aim/aim_machdep.c (revision 685dc743)
1 /*-
2  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3  * Copyright (C) 1995, 1996 TooLs GmbH.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by TooLs GmbH.
17  * 4. The name of TooLs GmbH may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*-
32  * Copyright (C) 2001 Benno Rice
33  * All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  *
44  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  *	$NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55  */
56 
57 #include <sys/cdefs.h>
58 #include "opt_ddb.h"
59 #include "opt_kstack_pages.h"
60 #include "opt_platform.h"
61 
62 #include <sys/endian.h>
63 #include <sys/param.h>
64 #include <sys/proc.h>
65 #include <sys/systm.h>
66 #include <sys/bio.h>
67 #include <sys/buf.h>
68 #include <sys/bus.h>
69 #include <sys/cons.h>
70 #include <sys/cpu.h>
71 #include <sys/eventhandler.h>
72 #include <sys/exec.h>
73 #include <sys/imgact.h>
74 #include <sys/kdb.h>
75 #include <sys/kernel.h>
76 #include <sys/ktr.h>
77 #include <sys/linker.h>
78 #include <sys/lock.h>
79 #include <sys/malloc.h>
80 #include <sys/mbuf.h>
81 #include <sys/msgbuf.h>
82 #include <sys/mutex.h>
83 #include <sys/ptrace.h>
84 #include <sys/reboot.h>
85 #include <sys/rwlock.h>
86 #include <sys/signalvar.h>
87 #include <sys/syscallsubr.h>
88 #include <sys/sysctl.h>
89 #include <sys/sysent.h>
90 #include <sys/sysproto.h>
91 #include <sys/ucontext.h>
92 #include <sys/uio.h>
93 #include <sys/vmmeter.h>
94 #include <sys/vnode.h>
95 
96 #include <net/netisr.h>
97 
98 #include <vm/vm.h>
99 #include <vm/vm_extern.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_pager.h>
105 
106 #include <machine/altivec.h>
107 #ifndef __powerpc64__
108 #include <machine/bat.h>
109 #endif
110 #include <machine/cpu.h>
111 #include <machine/elf.h>
112 #include <machine/fpu.h>
113 #include <machine/hid.h>
114 #include <machine/kdb.h>
115 #include <machine/md_var.h>
116 #include <machine/metadata.h>
117 #include <machine/mmuvar.h>
118 #include <machine/pcb.h>
119 #include <machine/sigframe.h>
120 #include <machine/spr.h>
121 #include <machine/trap.h>
122 #include <machine/vmparam.h>
123 #include <machine/ofw_machdep.h>
124 
125 #include <ddb/ddb.h>
126 
127 #include <dev/ofw/openfirm.h>
128 
129 #ifdef __powerpc64__
130 #include "mmu_oea64.h"
131 #endif
132 
133 #ifndef __powerpc64__
134 struct bat	battable[16];
135 #endif
136 
137 int radix_mmu = 0;
138 
139 #ifndef __powerpc64__
140 /* Bits for running on 64-bit systems in 32-bit mode. */
141 extern void	*testppc64, *testppc64size;
142 extern void	*restorebridge, *restorebridgesize;
143 extern void	*rfid_patch, *rfi_patch1, *rfi_patch2;
144 extern void	*trapcode64;
145 
146 extern Elf_Addr	_GLOBAL_OFFSET_TABLE_[];
147 #endif
148 
149 extern void	*rstcode, *rstcodeend;
150 extern void	*trapcode, *trapcodeend;
151 extern void	*hypertrapcode, *hypertrapcodeend;
152 extern void	*generictrap, *generictrap64;
153 extern void	*alitrap, *aliend;
154 extern void	*dsitrap, *dsiend;
155 extern void	*decrint, *decrsize;
156 extern void     *extint, *extsize;
157 extern void	*dblow, *dbend;
158 extern void	*imisstrap, *imisssize;
159 extern void	*dlmisstrap, *dlmisssize;
160 extern void	*dsmisstrap, *dsmisssize;
161 
162 extern void *ap_pcpu;
163 extern void __restartkernel(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr);
164 extern void __restartkernel_virtual(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr);
165 
166 void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry,
167     void *mdp, uint32_t mdp_cookie);
168 void aim_cpu_init(vm_offset_t toc);
169 
170 void
aim_early_init(vm_offset_t fdt,vm_offset_t toc,vm_offset_t ofentry,void * mdp,uint32_t mdp_cookie)171 aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
172     uint32_t mdp_cookie)
173 {
174 	register_t	scratch;
175 
176 	/*
177 	 * If running from an FDT, make sure we are in real mode to avoid
178 	 * tromping on firmware page tables. Everything in the kernel assumes
179 	 * 1:1 mappings out of firmware, so this won't break anything not
180 	 * already broken. This doesn't work if there is live OF, since OF
181 	 * may internally use non-1:1 mappings.
182 	 */
183 	if (ofentry == 0)
184 		mtmsr(mfmsr() & ~(PSL_IR | PSL_DR));
185 
186 #ifdef __powerpc64__
187 	/*
188 	 * Relocate to high memory so that the kernel
189 	 * can execute from the direct map.
190 	 *
191 	 * If we are in virtual mode already, use a special entry point
192 	 * that sets up a temporary DMAP to execute from until we can
193 	 * properly set up the MMU.
194 	 */
195 	if ((vm_offset_t)&aim_early_init < DMAP_BASE_ADDRESS) {
196 		if (mfmsr() & PSL_DR) {
197 			__restartkernel_virtual(fdt, 0, ofentry, mdp,
198 			    mdp_cookie, DMAP_BASE_ADDRESS, mfmsr());
199 		} else {
200 			__restartkernel(fdt, 0, ofentry, mdp, mdp_cookie,
201 			    DMAP_BASE_ADDRESS, mfmsr());
202 		}
203 	}
204 #endif
205 
206 	/* Various very early CPU fix ups */
207 	switch (mfpvr() >> 16) {
208 		/*
209 		 * PowerPC 970 CPUs have a misfeature requested by Apple that
210 		 * makes them pretend they have a 32-byte cacheline. Turn this
211 		 * off before we measure the cacheline size.
212 		 */
213 		case IBM970:
214 		case IBM970FX:
215 		case IBM970MP:
216 		case IBM970GX:
217 			scratch = mfspr(SPR_HID5);
218 			scratch &= ~HID5_970_DCBZ_SIZE_HI;
219 			mtspr(SPR_HID5, scratch);
220 			break;
221 	#ifdef __powerpc64__
222 		case IBMPOWER7:
223 		case IBMPOWER7PLUS:
224 		case IBMPOWER8:
225 		case IBMPOWER8E:
226 		case IBMPOWER8NVL:
227 		case IBMPOWER9:
228 			/* XXX: get from ibm,slb-size in device tree */
229 			n_slbs = 32;
230 			break;
231 	#endif
232 	}
233 }
234 
235 void
aim_cpu_init(vm_offset_t toc)236 aim_cpu_init(vm_offset_t toc)
237 {
238 	size_t		trap_offset, trapsize;
239 	vm_offset_t	trap;
240 	register_t	msr;
241 	uint8_t		*cache_check;
242 	int		cacheline_warn;
243 #ifndef __powerpc64__
244 	register_t	scratch;
245 	int		ppc64;
246 #endif
247 
248 	trap_offset = 0;
249 	cacheline_warn = 0;
250 
251 	/* General setup for AIM CPUs */
252 	psl_kernset = PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI;
253 
254 #ifdef __powerpc64__
255 	psl_kernset |= PSL_SF;
256 	if (mfmsr() & PSL_HV)
257 		psl_kernset |= PSL_HV;
258 
259 #if BYTE_ORDER == LITTLE_ENDIAN
260 	psl_kernset |= PSL_LE;
261 #endif
262 
263 #endif
264 	psl_userset = psl_kernset | PSL_PR;
265 #ifdef __powerpc64__
266 	psl_userset32 = psl_userset & ~PSL_SF;
267 #endif
268 
269 	/*
270 	 * Zeroed bits in this variable signify that the value of the bit
271 	 * in its position is allowed to vary between userspace contexts.
272 	 *
273 	 * All other bits are required to be identical for every userspace
274 	 * context. The actual *value* of the bit is determined by
275 	 * psl_userset and/or psl_userset32, and is not allowed to change.
276 	 *
277 	 * Remember to update this set when implementing support for
278 	 * *conditionally* enabling a processor facility. Failing to do
279 	 * this will cause swapcontext() in userspace to break when a
280 	 * process uses a conditionally-enabled facility.
281 	 *
282 	 * When *unconditionally* implementing support for a processor
283 	 * facility, update psl_userset / psl_userset32 instead.
284 	 *
285 	 * See the access control check in set_mcontext().
286 	 */
287 	psl_userstatic = ~(PSL_VSX | PSL_VEC | PSL_FP | PSL_FE0 | PSL_FE1);
288 	/*
289 	 * Mask bits from the SRR1 that aren't really the MSR:
290 	 * Bits 1-4, 10-15 (ppc32), 33-36, 42-47 (ppc64)
291 	 */
292 	psl_userstatic &= ~0x783f0000UL;
293 
294 	/*
295 	 * Initialize the interrupt tables and figure out our cache line
296 	 * size and whether or not we need the 64-bit bridge code.
297 	 */
298 
299 	/*
300 	 * Disable translation in case the vector area hasn't been
301 	 * mapped (G5). Note that no OFW calls can be made until
302 	 * translation is re-enabled.
303 	 */
304 
305 	msr = mfmsr();
306 	mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
307 
308 	/*
309 	 * Measure the cacheline size using dcbz
310 	 *
311 	 * Use EXC_PGM as a playground. We are about to overwrite it
312 	 * anyway, we know it exists, and we know it is cache-aligned.
313 	 */
314 
315 	cache_check = (void *)EXC_PGM;
316 
317 	for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
318 		cache_check[cacheline_size] = 0xff;
319 
320 	__asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
321 
322 	/* Find the first byte dcbz did not zero to get the cache line size */
323 	for (cacheline_size = 0; cacheline_size < 0x100 &&
324 	    cache_check[cacheline_size] == 0; cacheline_size++);
325 
326 	/* Work around psim bug */
327 	if (cacheline_size == 0) {
328 		cacheline_warn = 1;
329 		cacheline_size = 32;
330 	}
331 
332 	#ifndef __powerpc64__
333 	/*
334 	 * Figure out whether we need to use the 64 bit PMAP. This works by
335 	 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
336 	 * and setting ppc64 = 0 if that causes a trap.
337 	 */
338 
339 	ppc64 = 1;
340 
341 	bcopy(&testppc64, (void *)EXC_PGM,  (size_t)&testppc64size);
342 	__syncicache((void *)EXC_PGM, (size_t)&testppc64size);
343 
344 	__asm __volatile("\
345 		mfmsr %0;	\
346 		mtsprg2 %1;	\
347 				\
348 		mtmsrd %0;	\
349 		mfsprg2 %1;"
350 	    : "=r"(scratch), "=r"(ppc64));
351 
352 	if (ppc64)
353 		cpu_features |= PPC_FEATURE_64;
354 
355 	/*
356 	 * Now copy restorebridge into all the handlers, if necessary,
357 	 * and set up the trap tables.
358 	 */
359 
360 	if (cpu_features & PPC_FEATURE_64) {
361 		/* Patch the two instances of rfi -> rfid */
362 		bcopy(&rfid_patch,&rfi_patch1,4);
363 	#ifdef KDB
364 		/* rfi_patch2 is at the end of dbleave */
365 		bcopy(&rfid_patch,&rfi_patch2,4);
366 	#endif
367 	}
368 	#else /* powerpc64 */
369 	cpu_features |= PPC_FEATURE_64;
370 	#endif
371 
372 	trapsize = (size_t)&trapcodeend - (size_t)&trapcode;
373 
374 	/*
375 	 * Copy generic handler into every possible trap. Special cases will get
376 	 * different ones in a minute.
377 	 */
378 	for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20)
379 		bcopy(&trapcode, (void *)trap, trapsize);
380 
381 	#ifndef __powerpc64__
382 	if (cpu_features & PPC_FEATURE_64) {
383 		/*
384 		 * Copy a code snippet to restore 32-bit bridge mode
385 		 * to the top of every non-generic trap handler
386 		 */
387 
388 		trap_offset += (size_t)&restorebridgesize;
389 		bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
390 		bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
391 		bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
392 		bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
393 		bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
394 		bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
395 		bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
396 	} else {
397 		/*
398 		 * Use an IBAT and a DBAT to map the bottom 256M segment.
399 		 *
400 		 * It is very important to do it *now* to avoid taking a
401 		 * fault in .text / .data before the MMU is bootstrapped,
402 		 * because until then, the translation data has not been
403 		 * copied over from OpenFirmware, so our DSI/ISI will fail
404 		 * to find a match.
405 		 */
406 
407 		battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
408 		battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
409 
410 		__asm (".balign 32; \n"
411 		    "mtibatu 0,%0; mtibatl 0,%1; isync; \n"
412 		    "mtdbatu 0,%0; mtdbatl 0,%1; isync"
413 		    :: "r"(battable[0].batu), "r"(battable[0].batl));
414 	}
415 	#else
416 	trapsize = (size_t)&hypertrapcodeend - (size_t)&hypertrapcode;
417 	bcopy(&hypertrapcode, (void *)(EXC_HEA + trap_offset), trapsize);
418 	bcopy(&hypertrapcode, (void *)(EXC_HMI + trap_offset), trapsize);
419 	bcopy(&hypertrapcode, (void *)(EXC_HVI + trap_offset), trapsize);
420 	bcopy(&hypertrapcode, (void *)(EXC_SOFT_PATCH + trap_offset), trapsize);
421 	#endif
422 
423 	bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend -
424 	    (size_t)&rstcode);
425 
426 #ifdef KDB
427 	bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend -
428 	    (size_t)&dblow);
429 	bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend -
430 	    (size_t)&dblow);
431 	bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend -
432 	    (size_t)&dblow);
433 	bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend -
434 	    (size_t)&dblow);
435 #endif
436 	bcopy(&alitrap,  (void *)(EXC_ALI + trap_offset),  (size_t)&aliend -
437 	    (size_t)&alitrap);
438 	bcopy(&dsitrap,  (void *)(EXC_DSI + trap_offset),  (size_t)&dsiend -
439 	    (size_t)&dsitrap);
440 
441 	/* Set address of generictrap for self-reloc calculations */
442 	*((void **)TRAP_GENTRAP) = &generictrap;
443 	#ifdef __powerpc64__
444 	/* Set TOC base so that the interrupt code can get at it */
445 	*((void **)TRAP_ENTRY) = &generictrap;
446 	*((register_t *)TRAP_TOCBASE) = toc;
447 	#else
448 	/* Set branch address for trap code */
449 	if (cpu_features & PPC_FEATURE_64)
450 		*((void **)TRAP_ENTRY) = &generictrap64;
451 	else
452 		*((void **)TRAP_ENTRY) = &generictrap;
453 	*((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_;
454 
455 	/* G2-specific TLB miss helper handlers */
456 	bcopy(&imisstrap, (void *)EXC_IMISS,  (size_t)&imisssize);
457 	bcopy(&dlmisstrap, (void *)EXC_DLMISS,  (size_t)&dlmisssize);
458 	bcopy(&dsmisstrap, (void *)EXC_DSMISS,  (size_t)&dsmisssize);
459 	#endif
460 	__syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
461 
462 	/*
463 	 * Restore MSR
464 	 */
465 	mtmsr(msr);
466 
467 	/* Warn if cachline size was not determined */
468 	if (cacheline_warn == 1) {
469 		printf("WARNING: cacheline size undetermined, setting to 32\n");
470 	}
471 
472 	/*
473 	 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
474 	 * in case the platform module had a better idea of what we
475 	 * should do.
476 	 */
477 	if (radix_mmu)
478 		pmap_mmu_install(MMU_TYPE_RADIX, BUS_PROBE_GENERIC);
479 	else if (cpu_features & PPC_FEATURE_64)
480 		pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
481 	else
482 		pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
483 }
484 
485 /*
486  * Shutdown the CPU as much as possible.
487  */
488 void
cpu_halt(void)489 cpu_halt(void)
490 {
491 
492 	OF_exit();
493 }
494 
495 int
ptrace_single_step(struct thread * td)496 ptrace_single_step(struct thread *td)
497 {
498 	struct trapframe *tf;
499 
500 	tf = td->td_frame;
501 	tf->srr1 |= PSL_SE;
502 
503 	return (0);
504 }
505 
506 int
ptrace_clear_single_step(struct thread * td)507 ptrace_clear_single_step(struct thread *td)
508 {
509 	struct trapframe *tf;
510 
511 	tf = td->td_frame;
512 	tf->srr1 &= ~PSL_SE;
513 
514 	return (0);
515 }
516 
517 void
kdb_cpu_clear_singlestep(void)518 kdb_cpu_clear_singlestep(void)
519 {
520 
521 	kdb_frame->srr1 &= ~PSL_SE;
522 }
523 
524 void
kdb_cpu_set_singlestep(void)525 kdb_cpu_set_singlestep(void)
526 {
527 
528 	kdb_frame->srr1 |= PSL_SE;
529 }
530 
531 /*
532  * Initialise a struct pcpu.
533  */
534 void
cpu_pcpu_init(struct pcpu * pcpu,int cpuid,size_t sz)535 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
536 {
537 #ifdef __powerpc64__
538 /* Copy the SLB contents from the current CPU */
539 memcpy(pcpu->pc_aim.slb, PCPU_GET(aim.slb), sizeof(pcpu->pc_aim.slb));
540 #endif
541 }
542 
543 /* Return 0 on handled success, otherwise signal number. */
544 int
cpu_machine_check(struct thread * td,struct trapframe * frame,int * ucode)545 cpu_machine_check(struct thread *td, struct trapframe *frame, int *ucode)
546 {
547 #ifdef __powerpc64__
548 	/*
549 	 * This block is 64-bit CPU specific currently.  Punt running in 32-bit
550 	 * mode on 64-bit CPUs.
551 	 */
552 	/* Check if the important information is in DSISR */
553 	if ((frame->srr1 & SRR1_MCHK_DATA) != 0) {
554 		printf("Machine check, DSISR: %016lx\n", frame->cpu.aim.dsisr);
555 		/* SLB multi-hit is recoverable. */
556 		if ((frame->cpu.aim.dsisr & DSISR_MC_SLB_MULTIHIT) != 0)
557 			return (0);
558 		if ((frame->cpu.aim.dsisr &
559 		    (DSISR_MC_DERAT_MULTIHIT | DSISR_MC_TLB_MULTIHIT)) != 0) {
560 			pmap_tlbie_all();
561 			return (0);
562 		}
563 		/* TODO: Add other machine check recovery procedures. */
564 	} else {
565 		if ((frame->srr1 & SRR1_MCHK_IFETCH_M) == SRR1_MCHK_IFETCH_SLBMH)
566 			return (0);
567 	}
568 #endif
569 	*ucode = BUS_OBJERR;
570 	return (SIGBUS);
571 }
572 
573 #ifndef __powerpc64__
574 uint64_t
va_to_vsid(pmap_t pm,vm_offset_t va)575 va_to_vsid(pmap_t pm, vm_offset_t va)
576 {
577 	return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
578 }
579 
580 #endif
581 
582 void
pmap_early_io_map_init(void)583 pmap_early_io_map_init(void)
584 {
585 	if ((cpu_features2 & PPC_FEATURE2_ARCH_3_00) == 0)
586 		radix_mmu = 0;
587 	else {
588 		radix_mmu = 1;
589 		TUNABLE_INT_FETCH("radix_mmu", &radix_mmu);
590 	}
591 
592 	/*
593 	 * When using Radix, set the start and end of kva early, to be able to
594 	 * use KVAs on pmap_early_io_map and avoid issues when remapping them
595 	 * later.
596 	 */
597 	if (radix_mmu) {
598 		virtual_avail = VM_MIN_KERNEL_ADDRESS;
599 		virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
600 	}
601 }
602 
603 /*
604  * These functions need to provide addresses that both (a) work in real mode
605  * (or whatever mode/circumstances the kernel is in in early boot (now)) and
606  * (b) can still, in principle, work once the kernel is going. Because these
607  * rely on existing mappings/real mode, unmap is a no-op.
608  */
609 vm_offset_t
pmap_early_io_map(vm_paddr_t pa,vm_size_t size)610 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
611 {
612 	KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!"));
613 
614 	/*
615 	 * If we have the MMU up in early boot, assume it is 1:1. Otherwise,
616 	 * try to get the address in a memory region compatible with the
617 	 * direct map for efficiency later.
618 	 * Except for Radix MMU, for which current implementation doesn't
619 	 * support mapping arbitrary virtual addresses, such as the ones
620 	 * generated by "direct mapping" I/O addresses. In this case, use
621 	 * addresses from KVA area.
622 	 */
623 	if (mfmsr() & PSL_DR)
624 		return (pa);
625 	else if (radix_mmu) {
626 		vm_offset_t va;
627 
628 		va = virtual_avail;
629 		virtual_avail += round_page(size + pa - trunc_page(pa));
630 		return (va);
631 	} else
632 		return (DMAP_BASE_ADDRESS + pa);
633 }
634 
635 void
pmap_early_io_unmap(vm_offset_t va,vm_size_t size)636 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
637 {
638 
639 	KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!"));
640 }
641 
642 /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */
643 void
flush_disable_caches(void)644 flush_disable_caches(void)
645 {
646 	register_t msr;
647 	register_t msscr0;
648 	register_t cache_reg;
649 	volatile uint32_t *memp;
650 	int i;
651 	int x;
652 
653 	msr = mfmsr();
654 	powerpc_sync();
655 	mtmsr(msr & ~(PSL_EE | PSL_DR));
656 	msscr0 = mfspr(SPR_MSSCR0);
657 	msscr0 &= ~MSSCR0_L2PFE;
658 	mtspr(SPR_MSSCR0, msscr0);
659 	powerpc_sync();
660 	isync();
661 	/* 7e00066c: dssall */
662 	__asm__ __volatile__(".long 0x7e00066c; sync");
663 	powerpc_sync();
664 	isync();
665 	__asm__ __volatile__("dcbf 0,%0" :: "r"(0));
666 	__asm__ __volatile__("dcbf 0,%0" :: "r"(0));
667 	__asm__ __volatile__("dcbf 0,%0" :: "r"(0));
668 
669 	/* Lock the L1 Data cache. */
670 	mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF);
671 	powerpc_sync();
672 	isync();
673 
674 	mtspr(SPR_LDSTCR, 0);
675 
676 	/*
677 	 * Perform this in two stages: Flush the cache starting in RAM, then do it
678 	 * from ROM.
679 	 */
680 	memp = (volatile uint32_t *)0x00000000;
681 	for (i = 0; i < 128 * 1024; i++) {
682 		(void)*memp;
683 		__asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
684 		memp += 32/sizeof(*memp);
685 	}
686 
687 	memp = (volatile uint32_t *)0xfff00000;
688 	x = 0xfe;
689 
690 	for (; x != 0xff;) {
691 		mtspr(SPR_LDSTCR, x);
692 		for (i = 0; i < 128; i++) {
693 			(void)*memp;
694 			__asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
695 			memp += 32/sizeof(*memp);
696 		}
697 		x = ((x << 1) | 1) & 0xff;
698 	}
699 	mtspr(SPR_LDSTCR, 0);
700 
701 	cache_reg = mfspr(SPR_L2CR);
702 	if (cache_reg & L2CR_L2E) {
703 		cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450);
704 		mtspr(SPR_L2CR, cache_reg);
705 		powerpc_sync();
706 		mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF);
707 		while (mfspr(SPR_L2CR) & L2CR_L2HWF)
708 			; /* Busy wait for cache to flush */
709 		powerpc_sync();
710 		cache_reg &= ~L2CR_L2E;
711 		mtspr(SPR_L2CR, cache_reg);
712 		powerpc_sync();
713 		mtspr(SPR_L2CR, cache_reg | L2CR_L2I);
714 		powerpc_sync();
715 		while (mfspr(SPR_L2CR) & L2CR_L2I)
716 			; /* Busy wait for L2 cache invalidate */
717 		powerpc_sync();
718 	}
719 
720 	cache_reg = mfspr(SPR_L3CR);
721 	if (cache_reg & L3CR_L3E) {
722 		cache_reg &= ~(L3CR_L3IO | L3CR_L3DO);
723 		mtspr(SPR_L3CR, cache_reg);
724 		powerpc_sync();
725 		mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF);
726 		while (mfspr(SPR_L3CR) & L3CR_L3HWF)
727 			; /* Busy wait for cache to flush */
728 		powerpc_sync();
729 		cache_reg &= ~L3CR_L3E;
730 		mtspr(SPR_L3CR, cache_reg);
731 		powerpc_sync();
732 		mtspr(SPR_L3CR, cache_reg | L3CR_L3I);
733 		powerpc_sync();
734 		while (mfspr(SPR_L3CR) & L3CR_L3I)
735 			; /* Busy wait for L3 cache invalidate */
736 		powerpc_sync();
737 	}
738 
739 	mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE);
740 	powerpc_sync();
741 	isync();
742 
743 	mtmsr(msr);
744 }
745 
746 #ifndef __powerpc64__
747 void
mpc745x_sleep(void)748 mpc745x_sleep(void)
749 {
750 	static u_quad_t timebase = 0;
751 	static register_t sprgs[4];
752 	static register_t srrs[2];
753 
754 	jmp_buf resetjb;
755 	struct thread *fputd;
756 	struct thread *vectd;
757 	register_t hid0;
758 	register_t msr;
759 	register_t saved_msr;
760 
761 	ap_pcpu = pcpup;
762 
763 	PCPU_SET(restore, &resetjb);
764 
765 	saved_msr = mfmsr();
766 	fputd = PCPU_GET(fputhread);
767 	vectd = PCPU_GET(vecthread);
768 	if (fputd != NULL)
769 		save_fpu(fputd);
770 	if (vectd != NULL)
771 		save_vec(vectd);
772 	if (setjmp(resetjb) == 0) {
773 		sprgs[0] = mfspr(SPR_SPRG0);
774 		sprgs[1] = mfspr(SPR_SPRG1);
775 		sprgs[2] = mfspr(SPR_SPRG2);
776 		sprgs[3] = mfspr(SPR_SPRG3);
777 		srrs[0] = mfspr(SPR_SRR0);
778 		srrs[1] = mfspr(SPR_SRR1);
779 		timebase = mftb();
780 		powerpc_sync();
781 		flush_disable_caches();
782 		hid0 = mfspr(SPR_HID0);
783 		hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP;
784 		powerpc_sync();
785 		isync();
786 		msr = mfmsr() | PSL_POW;
787 		mtspr(SPR_HID0, hid0);
788 		powerpc_sync();
789 
790 		while (1)
791 			mtmsr(msr);
792 	}
793 	/* XXX: The mttb() means this *only* works on single-CPU systems. */
794 	mttb(timebase);
795 	PCPU_SET(curthread, curthread);
796 	PCPU_SET(curpcb, curthread->td_pcb);
797 	pmap_activate(curthread);
798 	powerpc_sync();
799 	mtspr(SPR_SPRG0, sprgs[0]);
800 	mtspr(SPR_SPRG1, sprgs[1]);
801 	mtspr(SPR_SPRG2, sprgs[2]);
802 	mtspr(SPR_SPRG3, sprgs[3]);
803 	mtspr(SPR_SRR0, srrs[0]);
804 	mtspr(SPR_SRR1, srrs[1]);
805 	mtmsr(saved_msr);
806 	if (fputd == curthread)
807 		enable_fpu(curthread);
808 	if (vectd == curthread)
809 		enable_vec(curthread);
810 	powerpc_sync();
811 }
812 #endif
813