xref: /freebsd/sys/powerpc/aim/aim_machdep.c (revision b64b3133)
1 /*-
2  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3  * Copyright (C) 1995, 1996 TooLs GmbH.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by TooLs GmbH.
17  * 4. The name of TooLs GmbH may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*-
32  * Copyright (C) 2001 Benno Rice
33  * All rights reserved.
34  *
35  * Redistribution and use in source and binary forms, with or without
36  * modification, are permitted provided that the following conditions
37  * are met:
38  * 1. Redistributions of source code must retain the above copyright
39  *    notice, this list of conditions and the following disclaimer.
40  * 2. Redistributions in binary form must reproduce the above copyright
41  *    notice, this list of conditions and the following disclaimer in the
42  *    documentation and/or other materials provided with the distribution.
43  *
44  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54  *	$NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55  */
56 
57 #include <sys/cdefs.h>
58 __FBSDID("$FreeBSD$");
59 
60 #include "opt_ddb.h"
61 #include "opt_kstack_pages.h"
62 #include "opt_platform.h"
63 
64 #include <sys/param.h>
65 #include <sys/proc.h>
66 #include <sys/systm.h>
67 #include <sys/bio.h>
68 #include <sys/buf.h>
69 #include <sys/bus.h>
70 #include <sys/cons.h>
71 #include <sys/cpu.h>
72 #include <sys/eventhandler.h>
73 #include <sys/exec.h>
74 #include <sys/imgact.h>
75 #include <sys/kdb.h>
76 #include <sys/kernel.h>
77 #include <sys/ktr.h>
78 #include <sys/linker.h>
79 #include <sys/lock.h>
80 #include <sys/malloc.h>
81 #include <sys/mbuf.h>
82 #include <sys/msgbuf.h>
83 #include <sys/mutex.h>
84 #include <sys/ptrace.h>
85 #include <sys/reboot.h>
86 #include <sys/rwlock.h>
87 #include <sys/signalvar.h>
88 #include <sys/syscallsubr.h>
89 #include <sys/sysctl.h>
90 #include <sys/sysent.h>
91 #include <sys/sysproto.h>
92 #include <sys/ucontext.h>
93 #include <sys/uio.h>
94 #include <sys/vmmeter.h>
95 #include <sys/vnode.h>
96 
97 #include <net/netisr.h>
98 
99 #include <vm/vm.h>
100 #include <vm/vm_extern.h>
101 #include <vm/vm_kern.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_map.h>
104 #include <vm/vm_object.h>
105 #include <vm/vm_pager.h>
106 
107 #include <machine/altivec.h>
108 #ifndef __powerpc64__
109 #include <machine/bat.h>
110 #endif
111 #include <machine/cpu.h>
112 #include <machine/elf.h>
113 #include <machine/fpu.h>
114 #include <machine/hid.h>
115 #include <machine/kdb.h>
116 #include <machine/md_var.h>
117 #include <machine/metadata.h>
118 #include <machine/mmuvar.h>
119 #include <machine/pcb.h>
120 #include <machine/reg.h>
121 #include <machine/sigframe.h>
122 #include <machine/spr.h>
123 #include <machine/trap.h>
124 #include <machine/vmparam.h>
125 #include <machine/ofw_machdep.h>
126 
127 #include <ddb/ddb.h>
128 
129 #include <dev/ofw/openfirm.h>
130 
131 #ifdef __powerpc64__
132 #include "mmu_oea64.h"
133 #endif
134 
135 #ifndef __powerpc64__
136 struct bat	battable[16];
137 #endif
138 
139 int radix_mmu = 0;
140 
141 #ifndef __powerpc64__
142 /* Bits for running on 64-bit systems in 32-bit mode. */
143 extern void	*testppc64, *testppc64size;
144 extern void	*restorebridge, *restorebridgesize;
145 extern void	*rfid_patch, *rfi_patch1, *rfi_patch2;
146 extern void	*trapcode64;
147 
148 extern Elf_Addr	_GLOBAL_OFFSET_TABLE_[];
149 #endif
150 
151 extern void	*rstcode, *rstcodeend;
152 extern void	*trapcode, *trapcodeend;
153 extern void	*hypertrapcode, *hypertrapcodeend;
154 extern void	*generictrap, *generictrap64;
155 extern void	*alitrap, *aliend;
156 extern void	*dsitrap, *dsiend;
157 extern void	*decrint, *decrsize;
158 extern void     *extint, *extsize;
159 extern void	*dblow, *dbend;
160 extern void	*imisstrap, *imisssize;
161 extern void	*dlmisstrap, *dlmisssize;
162 extern void	*dsmisstrap, *dsmisssize;
163 
164 extern void *ap_pcpu;
165 extern void __restartkernel(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr);
166 extern void __restartkernel_virtual(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr);
167 
168 void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry,
169     void *mdp, uint32_t mdp_cookie);
170 void aim_cpu_init(vm_offset_t toc);
171 
172 void
173 aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
174     uint32_t mdp_cookie)
175 {
176 	register_t	scratch;
177 
178 	/*
179 	 * If running from an FDT, make sure we are in real mode to avoid
180 	 * tromping on firmware page tables. Everything in the kernel assumes
181 	 * 1:1 mappings out of firmware, so this won't break anything not
182 	 * already broken. This doesn't work if there is live OF, since OF
183 	 * may internally use non-1:1 mappings.
184 	 */
185 	if (ofentry == 0)
186 		mtmsr(mfmsr() & ~(PSL_IR | PSL_DR));
187 
188 #ifdef __powerpc64__
189 	/*
190 	 * Relocate to high memory so that the kernel
191 	 * can execute from the direct map.
192 	 *
193 	 * If we are in virtual mode already, use a special entry point
194 	 * that sets up a temporary DMAP to execute from until we can
195 	 * properly set up the MMU.
196 	 */
197 	if ((vm_offset_t)&aim_early_init < DMAP_BASE_ADDRESS) {
198 		if (mfmsr() & PSL_DR) {
199 			__restartkernel_virtual(fdt, 0, ofentry, mdp,
200 			    mdp_cookie, DMAP_BASE_ADDRESS, mfmsr());
201 		} else {
202 			__restartkernel(fdt, 0, ofentry, mdp, mdp_cookie,
203 			    DMAP_BASE_ADDRESS, mfmsr());
204 		}
205 	}
206 #endif
207 
208 	/* Various very early CPU fix ups */
209 	switch (mfpvr() >> 16) {
210 		/*
211 		 * PowerPC 970 CPUs have a misfeature requested by Apple that
212 		 * makes them pretend they have a 32-byte cacheline. Turn this
213 		 * off before we measure the cacheline size.
214 		 */
215 		case IBM970:
216 		case IBM970FX:
217 		case IBM970MP:
218 		case IBM970GX:
219 			scratch = mfspr(SPR_HID5);
220 			scratch &= ~HID5_970_DCBZ_SIZE_HI;
221 			mtspr(SPR_HID5, scratch);
222 			break;
223 	#ifdef __powerpc64__
224 		case IBMPOWER7:
225 		case IBMPOWER7PLUS:
226 		case IBMPOWER8:
227 		case IBMPOWER8E:
228 		case IBMPOWER8NVL:
229 		case IBMPOWER9:
230 			/* XXX: get from ibm,slb-size in device tree */
231 			n_slbs = 32;
232 			break;
233 	#endif
234 	}
235 }
236 
237 void
238 aim_cpu_init(vm_offset_t toc)
239 {
240 	size_t		trap_offset, trapsize;
241 	vm_offset_t	trap;
242 	register_t	msr;
243 	uint8_t		*cache_check;
244 	int		cacheline_warn;
245 #ifndef __powerpc64__
246 	register_t	scratch;
247 	int		ppc64;
248 #endif
249 
250 	trap_offset = 0;
251 	cacheline_warn = 0;
252 
253 	/* General setup for AIM CPUs */
254 	psl_kernset = PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI;
255 
256 #ifdef __powerpc64__
257 	psl_kernset |= PSL_SF;
258 	if (mfmsr() & PSL_HV)
259 		psl_kernset |= PSL_HV;
260 #endif
261 	psl_userset = psl_kernset | PSL_PR;
262 #ifdef __powerpc64__
263 	psl_userset32 = psl_userset & ~PSL_SF;
264 #endif
265 
266 	/*
267 	 * Zeroed bits in this variable signify that the value of the bit
268 	 * in its position is allowed to vary between userspace contexts.
269 	 *
270 	 * All other bits are required to be identical for every userspace
271 	 * context. The actual *value* of the bit is determined by
272 	 * psl_userset and/or psl_userset32, and is not allowed to change.
273 	 *
274 	 * Remember to update this set when implementing support for
275 	 * *conditionally* enabling a processor facility. Failing to do
276 	 * this will cause swapcontext() in userspace to break when a
277 	 * process uses a conditionally-enabled facility.
278 	 *
279 	 * When *unconditionally* implementing support for a processor
280 	 * facility, update psl_userset / psl_userset32 instead.
281 	 *
282 	 * See the access control check in set_mcontext().
283 	 */
284 	psl_userstatic = ~(PSL_VSX | PSL_VEC | PSL_FP | PSL_FE0 | PSL_FE1);
285 	/*
286 	 * Mask bits from the SRR1 that aren't really the MSR:
287 	 * Bits 1-4, 10-15 (ppc32), 33-36, 42-47 (ppc64)
288 	 */
289 	psl_userstatic &= ~0x783f0000UL;
290 
291 	/*
292 	 * Initialize the interrupt tables and figure out our cache line
293 	 * size and whether or not we need the 64-bit bridge code.
294 	 */
295 
296 	/*
297 	 * Disable translation in case the vector area hasn't been
298 	 * mapped (G5). Note that no OFW calls can be made until
299 	 * translation is re-enabled.
300 	 */
301 
302 	msr = mfmsr();
303 	mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI);
304 
305 	/*
306 	 * Measure the cacheline size using dcbz
307 	 *
308 	 * Use EXC_PGM as a playground. We are about to overwrite it
309 	 * anyway, we know it exists, and we know it is cache-aligned.
310 	 */
311 
312 	cache_check = (void *)EXC_PGM;
313 
314 	for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++)
315 		cache_check[cacheline_size] = 0xff;
316 
317 	__asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory");
318 
319 	/* Find the first byte dcbz did not zero to get the cache line size */
320 	for (cacheline_size = 0; cacheline_size < 0x100 &&
321 	    cache_check[cacheline_size] == 0; cacheline_size++);
322 
323 	/* Work around psim bug */
324 	if (cacheline_size == 0) {
325 		cacheline_warn = 1;
326 		cacheline_size = 32;
327 	}
328 
329 	#ifndef __powerpc64__
330 	/*
331 	 * Figure out whether we need to use the 64 bit PMAP. This works by
332 	 * executing an instruction that is only legal on 64-bit PPC (mtmsrd),
333 	 * and setting ppc64 = 0 if that causes a trap.
334 	 */
335 
336 	ppc64 = 1;
337 
338 	bcopy(&testppc64, (void *)EXC_PGM,  (size_t)&testppc64size);
339 	__syncicache((void *)EXC_PGM, (size_t)&testppc64size);
340 
341 	__asm __volatile("\
342 		mfmsr %0;	\
343 		mtsprg2 %1;	\
344 				\
345 		mtmsrd %0;	\
346 		mfsprg2 %1;"
347 	    : "=r"(scratch), "=r"(ppc64));
348 
349 	if (ppc64)
350 		cpu_features |= PPC_FEATURE_64;
351 
352 	/*
353 	 * Now copy restorebridge into all the handlers, if necessary,
354 	 * and set up the trap tables.
355 	 */
356 
357 	if (cpu_features & PPC_FEATURE_64) {
358 		/* Patch the two instances of rfi -> rfid */
359 		bcopy(&rfid_patch,&rfi_patch1,4);
360 	#ifdef KDB
361 		/* rfi_patch2 is at the end of dbleave */
362 		bcopy(&rfid_patch,&rfi_patch2,4);
363 	#endif
364 	}
365 	#else /* powerpc64 */
366 	cpu_features |= PPC_FEATURE_64;
367 	#endif
368 
369 	trapsize = (size_t)&trapcodeend - (size_t)&trapcode;
370 
371 	/*
372 	 * Copy generic handler into every possible trap. Special cases will get
373 	 * different ones in a minute.
374 	 */
375 	for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20)
376 		bcopy(&trapcode, (void *)trap, trapsize);
377 
378 	#ifndef __powerpc64__
379 	if (cpu_features & PPC_FEATURE_64) {
380 		/*
381 		 * Copy a code snippet to restore 32-bit bridge mode
382 		 * to the top of every non-generic trap handler
383 		 */
384 
385 		trap_offset += (size_t)&restorebridgesize;
386 		bcopy(&restorebridge, (void *)EXC_RST, trap_offset);
387 		bcopy(&restorebridge, (void *)EXC_DSI, trap_offset);
388 		bcopy(&restorebridge, (void *)EXC_ALI, trap_offset);
389 		bcopy(&restorebridge, (void *)EXC_PGM, trap_offset);
390 		bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset);
391 		bcopy(&restorebridge, (void *)EXC_TRC, trap_offset);
392 		bcopy(&restorebridge, (void *)EXC_BPT, trap_offset);
393 	} else {
394 		/*
395 		 * Use an IBAT and a DBAT to map the bottom 256M segment.
396 		 *
397 		 * It is very important to do it *now* to avoid taking a
398 		 * fault in .text / .data before the MMU is bootstrapped,
399 		 * because until then, the translation data has not been
400 		 * copied over from OpenFirmware, so our DSI/ISI will fail
401 		 * to find a match.
402 		 */
403 
404 		battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
405 		battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
406 
407 		__asm (".balign 32; \n"
408 		    "mtibatu 0,%0; mtibatl 0,%1; isync; \n"
409 		    "mtdbatu 0,%0; mtdbatl 0,%1; isync"
410 		    :: "r"(battable[0].batu), "r"(battable[0].batl));
411 	}
412 	#else
413 	trapsize = (size_t)&hypertrapcodeend - (size_t)&hypertrapcode;
414 	bcopy(&hypertrapcode, (void *)(EXC_HEA + trap_offset), trapsize);
415 	bcopy(&hypertrapcode, (void *)(EXC_HMI + trap_offset), trapsize);
416 	bcopy(&hypertrapcode, (void *)(EXC_HVI + trap_offset), trapsize);
417 	bcopy(&hypertrapcode, (void *)(EXC_SOFT_PATCH + trap_offset), trapsize);
418 	#endif
419 
420 	bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend -
421 	    (size_t)&rstcode);
422 
423 #ifdef KDB
424 	bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend -
425 	    (size_t)&dblow);
426 	bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend -
427 	    (size_t)&dblow);
428 	bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend -
429 	    (size_t)&dblow);
430 	bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend -
431 	    (size_t)&dblow);
432 #endif
433 	bcopy(&alitrap,  (void *)(EXC_ALI + trap_offset),  (size_t)&aliend -
434 	    (size_t)&alitrap);
435 	bcopy(&dsitrap,  (void *)(EXC_DSI + trap_offset),  (size_t)&dsiend -
436 	    (size_t)&dsitrap);
437 
438 	/* Set address of generictrap for self-reloc calculations */
439 	*((void **)TRAP_GENTRAP) = &generictrap;
440 	#ifdef __powerpc64__
441 	/* Set TOC base so that the interrupt code can get at it */
442 	*((void **)TRAP_ENTRY) = &generictrap;
443 	*((register_t *)TRAP_TOCBASE) = toc;
444 	#else
445 	/* Set branch address for trap code */
446 	if (cpu_features & PPC_FEATURE_64)
447 		*((void **)TRAP_ENTRY) = &generictrap64;
448 	else
449 		*((void **)TRAP_ENTRY) = &generictrap;
450 	*((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_;
451 
452 	/* G2-specific TLB miss helper handlers */
453 	bcopy(&imisstrap, (void *)EXC_IMISS,  (size_t)&imisssize);
454 	bcopy(&dlmisstrap, (void *)EXC_DLMISS,  (size_t)&dlmisssize);
455 	bcopy(&dsmisstrap, (void *)EXC_DSMISS,  (size_t)&dsmisssize);
456 	#endif
457 	__syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
458 
459 	/*
460 	 * Restore MSR
461 	 */
462 	mtmsr(msr);
463 
464 	/* Warn if cachline size was not determined */
465 	if (cacheline_warn == 1) {
466 		printf("WARNING: cacheline size undetermined, setting to 32\n");
467 	}
468 
469 	/*
470 	 * Initialise virtual memory. Use BUS_PROBE_GENERIC priority
471 	 * in case the platform module had a better idea of what we
472 	 * should do.
473 	 */
474 	if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) {
475 		radix_mmu = 0;
476 		TUNABLE_INT_FETCH("radix_mmu", &radix_mmu);
477 		if (radix_mmu)
478 			pmap_mmu_install(MMU_TYPE_RADIX, BUS_PROBE_GENERIC);
479 		else
480 			pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
481 	} else if (cpu_features & PPC_FEATURE_64)
482 		pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC);
483 	else
484 		pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC);
485 }
486 
487 /*
488  * Shutdown the CPU as much as possible.
489  */
490 void
491 cpu_halt(void)
492 {
493 
494 	OF_exit();
495 }
496 
497 int
498 ptrace_single_step(struct thread *td)
499 {
500 	struct trapframe *tf;
501 
502 	tf = td->td_frame;
503 	tf->srr1 |= PSL_SE;
504 
505 	return (0);
506 }
507 
508 int
509 ptrace_clear_single_step(struct thread *td)
510 {
511 	struct trapframe *tf;
512 
513 	tf = td->td_frame;
514 	tf->srr1 &= ~PSL_SE;
515 
516 	return (0);
517 }
518 
519 void
520 kdb_cpu_clear_singlestep(void)
521 {
522 
523 	kdb_frame->srr1 &= ~PSL_SE;
524 }
525 
526 void
527 kdb_cpu_set_singlestep(void)
528 {
529 
530 	kdb_frame->srr1 |= PSL_SE;
531 }
532 
533 /*
534  * Initialise a struct pcpu.
535  */
536 void
537 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
538 {
539 #ifdef __powerpc64__
540 /* Copy the SLB contents from the current CPU */
541 memcpy(pcpu->pc_aim.slb, PCPU_GET(aim.slb), sizeof(pcpu->pc_aim.slb));
542 #endif
543 }
544 
545 /* Return 0 on handled success, otherwise signal number. */
546 int
547 cpu_machine_check(struct thread *td, struct trapframe *frame, int *ucode)
548 {
549 #ifdef __powerpc64__
550 	/*
551 	 * This block is 64-bit CPU specific currently.  Punt running in 32-bit
552 	 * mode on 64-bit CPUs.
553 	 */
554 	/* Check if the important information is in DSISR */
555 	if ((frame->srr1 & SRR1_MCHK_DATA) != 0) {
556 		printf("Machine check, DSISR: %016lx\n", frame->cpu.aim.dsisr);
557 		/* SLB multi-hit is recoverable. */
558 		if ((frame->cpu.aim.dsisr & DSISR_MC_SLB_MULTIHIT) != 0)
559 			return (0);
560 		if ((frame->cpu.aim.dsisr & DSISR_MC_DERAT_MULTIHIT) != 0) {
561 			pmap_tlbie_all();
562 			return (0);
563 		}
564 		/* TODO: Add other machine check recovery procedures. */
565 	} else {
566 		if ((frame->srr1 & SRR1_MCHK_IFETCH_M) == SRR1_MCHK_IFETCH_SLBMH)
567 			return (0);
568 	}
569 #endif
570 	*ucode = BUS_OBJERR;
571 	return (SIGBUS);
572 }
573 
574 #ifndef __powerpc64__
575 uint64_t
576 va_to_vsid(pmap_t pm, vm_offset_t va)
577 {
578 	return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK);
579 }
580 
581 #endif
582 
583 /*
584  * These functions need to provide addresses that both (a) work in real mode
585  * (or whatever mode/circumstances the kernel is in in early boot (now)) and
586  * (b) can still, in principle, work once the kernel is going. Because these
587  * rely on existing mappings/real mode, unmap is a no-op.
588  */
589 vm_offset_t
590 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
591 {
592 	KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!"));
593 
594 	/*
595 	 * If we have the MMU up in early boot, assume it is 1:1. Otherwise,
596 	 * try to get the address in a memory region compatible with the
597 	 * direct map for efficiency later.
598 	 */
599 	if (mfmsr() & PSL_DR)
600 		return (pa);
601 	else
602 		return (DMAP_BASE_ADDRESS + pa);
603 }
604 
605 void
606 pmap_early_io_unmap(vm_offset_t va, vm_size_t size)
607 {
608 
609 	KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!"));
610 }
611 
612 /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */
613 void
614 flush_disable_caches(void)
615 {
616 	register_t msr;
617 	register_t msscr0;
618 	register_t cache_reg;
619 	volatile uint32_t *memp;
620 	uint32_t temp;
621 	int i;
622 	int x;
623 
624 	msr = mfmsr();
625 	powerpc_sync();
626 	mtmsr(msr & ~(PSL_EE | PSL_DR));
627 	msscr0 = mfspr(SPR_MSSCR0);
628 	msscr0 &= ~MSSCR0_L2PFE;
629 	mtspr(SPR_MSSCR0, msscr0);
630 	powerpc_sync();
631 	isync();
632 	__asm__ __volatile__("dssall; sync");
633 	powerpc_sync();
634 	isync();
635 	__asm__ __volatile__("dcbf 0,%0" :: "r"(0));
636 	__asm__ __volatile__("dcbf 0,%0" :: "r"(0));
637 	__asm__ __volatile__("dcbf 0,%0" :: "r"(0));
638 
639 	/* Lock the L1 Data cache. */
640 	mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF);
641 	powerpc_sync();
642 	isync();
643 
644 	mtspr(SPR_LDSTCR, 0);
645 
646 	/*
647 	 * Perform this in two stages: Flush the cache starting in RAM, then do it
648 	 * from ROM.
649 	 */
650 	memp = (volatile uint32_t *)0x00000000;
651 	for (i = 0; i < 128 * 1024; i++) {
652 		temp = *memp;
653 		__asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
654 		memp += 32/sizeof(*memp);
655 	}
656 
657 	memp = (volatile uint32_t *)0xfff00000;
658 	x = 0xfe;
659 
660 	for (; x != 0xff;) {
661 		mtspr(SPR_LDSTCR, x);
662 		for (i = 0; i < 128; i++) {
663 			temp = *memp;
664 			__asm__ __volatile__("dcbf 0,%0" :: "r"(memp));
665 			memp += 32/sizeof(*memp);
666 		}
667 		x = ((x << 1) | 1) & 0xff;
668 	}
669 	mtspr(SPR_LDSTCR, 0);
670 
671 	cache_reg = mfspr(SPR_L2CR);
672 	if (cache_reg & L2CR_L2E) {
673 		cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450);
674 		mtspr(SPR_L2CR, cache_reg);
675 		powerpc_sync();
676 		mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF);
677 		while (mfspr(SPR_L2CR) & L2CR_L2HWF)
678 			; /* Busy wait for cache to flush */
679 		powerpc_sync();
680 		cache_reg &= ~L2CR_L2E;
681 		mtspr(SPR_L2CR, cache_reg);
682 		powerpc_sync();
683 		mtspr(SPR_L2CR, cache_reg | L2CR_L2I);
684 		powerpc_sync();
685 		while (mfspr(SPR_L2CR) & L2CR_L2I)
686 			; /* Busy wait for L2 cache invalidate */
687 		powerpc_sync();
688 	}
689 
690 	cache_reg = mfspr(SPR_L3CR);
691 	if (cache_reg & L3CR_L3E) {
692 		cache_reg &= ~(L3CR_L3IO | L3CR_L3DO);
693 		mtspr(SPR_L3CR, cache_reg);
694 		powerpc_sync();
695 		mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF);
696 		while (mfspr(SPR_L3CR) & L3CR_L3HWF)
697 			; /* Busy wait for cache to flush */
698 		powerpc_sync();
699 		cache_reg &= ~L3CR_L3E;
700 		mtspr(SPR_L3CR, cache_reg);
701 		powerpc_sync();
702 		mtspr(SPR_L3CR, cache_reg | L3CR_L3I);
703 		powerpc_sync();
704 		while (mfspr(SPR_L3CR) & L3CR_L3I)
705 			; /* Busy wait for L3 cache invalidate */
706 		powerpc_sync();
707 	}
708 
709 	mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE);
710 	powerpc_sync();
711 	isync();
712 
713 	mtmsr(msr);
714 }
715 
716 void
717 cpu_sleep()
718 {
719 	static u_quad_t timebase = 0;
720 	static register_t sprgs[4];
721 	static register_t srrs[2];
722 
723 	jmp_buf resetjb;
724 	struct thread *fputd;
725 	struct thread *vectd;
726 	register_t hid0;
727 	register_t msr;
728 	register_t saved_msr;
729 
730 	ap_pcpu = pcpup;
731 
732 	PCPU_SET(restore, &resetjb);
733 
734 	saved_msr = mfmsr();
735 	fputd = PCPU_GET(fputhread);
736 	vectd = PCPU_GET(vecthread);
737 	if (fputd != NULL)
738 		save_fpu(fputd);
739 	if (vectd != NULL)
740 		save_vec(vectd);
741 	if (setjmp(resetjb) == 0) {
742 		sprgs[0] = mfspr(SPR_SPRG0);
743 		sprgs[1] = mfspr(SPR_SPRG1);
744 		sprgs[2] = mfspr(SPR_SPRG2);
745 		sprgs[3] = mfspr(SPR_SPRG3);
746 		srrs[0] = mfspr(SPR_SRR0);
747 		srrs[1] = mfspr(SPR_SRR1);
748 		timebase = mftb();
749 		powerpc_sync();
750 		flush_disable_caches();
751 		hid0 = mfspr(SPR_HID0);
752 		hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP;
753 		powerpc_sync();
754 		isync();
755 		msr = mfmsr() | PSL_POW;
756 		mtspr(SPR_HID0, hid0);
757 		powerpc_sync();
758 
759 		while (1)
760 			mtmsr(msr);
761 	}
762 	platform_smp_timebase_sync(timebase, 0);
763 	PCPU_SET(curthread, curthread);
764 	PCPU_SET(curpcb, curthread->td_pcb);
765 	pmap_activate(curthread);
766 	powerpc_sync();
767 	mtspr(SPR_SPRG0, sprgs[0]);
768 	mtspr(SPR_SPRG1, sprgs[1]);
769 	mtspr(SPR_SPRG2, sprgs[2]);
770 	mtspr(SPR_SPRG3, sprgs[3]);
771 	mtspr(SPR_SRR0, srrs[0]);
772 	mtspr(SPR_SRR1, srrs[1]);
773 	mtmsr(saved_msr);
774 	if (fputd == curthread)
775 		enable_fpu(curthread);
776 	if (vectd == curthread)
777 		enable_vec(curthread);
778 	powerpc_sync();
779 }
780