1 /* $NetBSD: powerpc_machdep.c,v 1.71 2015/01/23 07:27:05 nonaka Exp $ */
2
3 /*
4 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by TooLs GmbH.
19 * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: powerpc_machdep.c,v 1.71 2015/01/23 07:27:05 nonaka Exp $");
36
37 #include "opt_altivec.h"
38 #include "opt_ddb.h"
39 #include "opt_modular.h"
40 #include "opt_multiprocessor.h"
41 #include "opt_ppcarch.h"
42
43 #include <sys/param.h>
44 #include <sys/conf.h>
45 #include <sys/disklabel.h>
46 #include <sys/exec.h>
47 #include <sys/kauth.h>
48 #include <sys/pool.h>
49 #include <sys/proc.h>
50 #include <sys/signal.h>
51 #include <sys/sysctl.h>
52 #include <sys/ucontext.h>
53 #include <sys/cpu.h>
54 #include <sys/module.h>
55 #include <sys/device.h>
56 #include <sys/pcu.h>
57 #include <sys/atomic.h>
58 #include <sys/kmem.h>
59 #include <sys/xcall.h>
60 #include <sys/ipi.h>
61
62 #include <dev/mm.h>
63
64 #include <powerpc/fpu.h>
65 #include <powerpc/pcb.h>
66 #include <powerpc/psl.h>
67 #include <powerpc/userret.h>
68 #if defined(ALTIVEC) || defined(PPC_HAVE_SPE)
69 #include <powerpc/altivec.h>
70 #endif
71
72 #ifdef MULTIPROCESSOR
73 #include <powerpc/pic/ipivar.h>
74 #include <machine/cpu_counter.h>
75 #endif
76
77 #ifdef DDB
78 #include <machine/db_machdep.h>
79 #include <ddb/db_output.h>
80 #endif
81
82 int cpu_timebase;
83 int cpu_printfataltraps = 1;
84 #if !defined(PPC_IBM4XX)
85 extern int powersave;
86 #endif
87
88 /* exported variable to be filled in by the bootloaders */
89 char *booted_kernel;
90
91 const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
92 #if defined(PPC_HAVE_FPU)
93 [PCU_FPU] = &fpu_ops,
94 #endif
95 #if defined(ALTIVEC) || defined(PPC_HAVE_SPE)
96 [PCU_VEC] = &vec_ops,
97 #endif
98 };
99
100 #ifdef MULTIPROCESSOR
101 struct cpuset_info cpuset_info;
102 #endif
103
104 /*
105 * Set set up registers on exec.
106 */
107 void
setregs(struct lwp * l,struct exec_package * epp,vaddr_t stack)108 setregs(struct lwp *l, struct exec_package *epp, vaddr_t stack)
109 {
110 struct proc * const p = l->l_proc;
111 struct trapframe * const tf = l->l_md.md_utf;
112 struct pcb * const pcb = lwp_getpcb(l);
113 struct ps_strings arginfo;
114 vaddr_t func = epp->ep_entry;
115
116 memset(tf, 0, sizeof *tf);
117 tf->tf_fixreg[1] = -roundup(-stack + 8, 16);
118
119 /*
120 * XXX Machine-independent code has already copied arguments and
121 * XXX environment to userland. Get them back here.
122 */
123 (void)copyin_psstrings(p, &arginfo);
124
125 /*
126 * Set up arguments for _start():
127 * _start(argc, argv, envp, obj, cleanup, ps_strings);
128 *
129 * Notes:
130 * - obj and cleanup are the auxiliary and termination
131 * vectors. They are fixed up by ld.elf_so.
132 * - ps_strings is a NetBSD extension, and will be
133 * ignored by executables which are strictly
134 * compliant with the SVR4 ABI.
135 *
136 * XXX We have to set both regs and retval here due to different
137 * XXX calling convention in trap.c and init_main.c.
138 */
139 tf->tf_fixreg[3] = arginfo.ps_nargvstr;
140 tf->tf_fixreg[4] = (register_t)arginfo.ps_argvstr;
141 tf->tf_fixreg[5] = (register_t)arginfo.ps_envstr;
142 tf->tf_fixreg[6] = 0; /* auxillary vector */
143 tf->tf_fixreg[7] = 0; /* termination vector */
144 tf->tf_fixreg[8] = p->p_psstrp; /* NetBSD extension */
145
146 #ifdef _LP64
147 /*
148 * For native ELF64, entry point to the function
149 * descriptor which contains the real function address
150 * and its TOC base address.
151 */
152 uintptr_t fdesc[3] = { [0] = func, [1] = 0, [2] = 0 };
153 copyin((void *)func, fdesc, sizeof(fdesc));
154 tf->tf_fixreg[2] = fdesc[1] + epp->ep_entryoffset;
155 func = fdesc[0] + epp->ep_entryoffset;
156 #endif
157 tf->tf_srr0 = func;
158 tf->tf_srr1 = PSL_MBO | PSL_USERSET;
159 #ifdef ALTIVEC
160 tf->tf_vrsave = 0;
161 #endif
162 pcb->pcb_flags = PSL_FE_DFLT;
163 }
164
165 /*
166 * Machine dependent system variables.
167 */
168 static int
sysctl_machdep_cacheinfo(SYSCTLFN_ARGS)169 sysctl_machdep_cacheinfo(SYSCTLFN_ARGS)
170 {
171 struct sysctlnode node = *rnode;
172
173 node.sysctl_data = &curcpu()->ci_ci;
174 node.sysctl_size = sizeof(curcpu()->ci_ci);
175 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
176 }
177
178 #if !defined (PPC_IBM4XX)
179 static int
sysctl_machdep_powersave(SYSCTLFN_ARGS)180 sysctl_machdep_powersave(SYSCTLFN_ARGS)
181 {
182 struct sysctlnode node = *rnode;
183
184 if (powersave < 0)
185 node.sysctl_flags &= ~CTLFLAG_READWRITE;
186 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
187 }
188 #endif
189
190 static int
sysctl_machdep_booted_device(SYSCTLFN_ARGS)191 sysctl_machdep_booted_device(SYSCTLFN_ARGS)
192 {
193 struct sysctlnode node;
194
195 if (booted_device == NULL)
196 return (EOPNOTSUPP);
197
198 const char * const xname = device_xname(booted_device);
199
200 node = *rnode;
201 node.sysctl_data = __UNCONST(xname);
202 node.sysctl_size = strlen(xname) + 1;
203 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
204 }
205
206 static int
sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)207 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
208 {
209 struct sysctlnode node;
210
211 if (booted_kernel == NULL || booted_kernel[0] == '\0')
212 return (EOPNOTSUPP);
213
214 node = *rnode;
215 node.sysctl_data = booted_kernel;
216 node.sysctl_size = strlen(booted_kernel) + 1;
217 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
218 }
219
220 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
221 {
222
223 sysctl_createv(clog, 0, NULL, NULL,
224 CTLFLAG_PERMANENT,
225 CTLTYPE_NODE, "machdep", NULL,
226 NULL, 0, NULL, 0,
227 CTL_MACHDEP, CTL_EOL);
228
229 /* Deprecated */
230 sysctl_createv(clog, 0, NULL, NULL,
231 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
232 CTLTYPE_INT, "cachelinesize", NULL,
233 NULL, curcpu()->ci_ci.dcache_line_size, NULL, 0,
234 CTL_MACHDEP, CPU_CACHELINE, CTL_EOL);
235 sysctl_createv(clog, 0, NULL, NULL,
236 CTLFLAG_PERMANENT,
237 CTLTYPE_INT, "timebase", NULL,
238 NULL, 0, &cpu_timebase, 0,
239 CTL_MACHDEP, CPU_TIMEBASE, CTL_EOL);
240 sysctl_createv(clog, 0, NULL, NULL,
241 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
242 CTLTYPE_INT, "printfataltraps", NULL,
243 NULL, 0, &cpu_printfataltraps, 0,
244 CTL_MACHDEP, CPU_PRINTFATALTRAPS, CTL_EOL);
245 /* Use this instead of CPU_CACHELINE */
246 sysctl_createv(clog, 0, NULL, NULL,
247 CTLFLAG_PERMANENT,
248 CTLTYPE_STRUCT, "cacheinfo", NULL,
249 sysctl_machdep_cacheinfo, 0, NULL, 0,
250 CTL_MACHDEP, CPU_CACHEINFO, CTL_EOL);
251 #if !defined (PPC_IBM4XX)
252 sysctl_createv(clog, 0, NULL, NULL,
253 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
254 CTLTYPE_INT, "powersave", NULL,
255 sysctl_machdep_powersave, 0, &powersave, 0,
256 CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL);
257 #endif
258 #if defined(PPC_IBM4XX) || defined(PPC_BOOKE)
259 sysctl_createv(clog, 0, NULL, NULL,
260 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
261 CTLTYPE_INT, "altivec", NULL,
262 NULL, 0, NULL, 0,
263 CTL_MACHDEP, CPU_ALTIVEC, CTL_EOL);
264 #else
265 sysctl_createv(clog, 0, NULL, NULL,
266 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
267 CTLTYPE_INT, "altivec", NULL,
268 NULL, cpu_altivec, NULL, 0,
269 CTL_MACHDEP, CPU_ALTIVEC, CTL_EOL);
270 #endif
271 #ifdef PPC_BOOKE
272 sysctl_createv(clog, 0, NULL, NULL,
273 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
274 CTLTYPE_INT, "execprot", NULL,
275 NULL, 1, NULL, 0,
276 CTL_MACHDEP, CPU_EXECPROT, CTL_EOL);
277 #endif
278 sysctl_createv(clog, 0, NULL, NULL,
279 CTLFLAG_PERMANENT,
280 CTLTYPE_STRING, "booted_device", NULL,
281 sysctl_machdep_booted_device, 0, NULL, 0,
282 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
283 sysctl_createv(clog, 0, NULL, NULL,
284 CTLFLAG_PERMANENT,
285 CTLTYPE_STRING, "booted_kernel", NULL,
286 sysctl_machdep_booted_kernel, 0, NULL, 0,
287 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
288 }
289
290 /*
291 * Crash dump handling.
292 */
293 u_int32_t dumpmag = 0x8fca0101; /* magic number */
294 int dumpsize = 0; /* size of dump in pages */
295 long dumplo = -1; /* blocks */
296
297 /*
298 * This is called by main to set dumplo and dumpsize.
299 */
300 void
cpu_dumpconf(void)301 cpu_dumpconf(void)
302 {
303 int nblks; /* size of dump device */
304 int skip;
305
306 if (dumpdev == NODEV)
307 return;
308 nblks = bdev_size(dumpdev);
309 if (nblks <= ctod(1))
310 return;
311
312 dumpsize = physmem;
313
314 /* Skip enough blocks at start of disk to preserve an eventual disklabel. */
315 skip = LABELSECTOR + 1;
316 skip += ctod(1) - 1;
317 skip = ctod(dtoc(skip));
318 if (dumplo < skip)
319 dumplo = skip;
320
321 /* Put dump at end of partition */
322 if (dumpsize > dtoc(nblks - dumplo))
323 dumpsize = dtoc(nblks - dumplo);
324 if (dumplo < nblks - ctod(dumpsize))
325 dumplo = nblks - ctod(dumpsize);
326 }
327
328 /*
329 * Start a new LWP
330 */
331 void
startlwp(void * arg)332 startlwp(void *arg)
333 {
334 ucontext_t * const uc = arg;
335 lwp_t * const l = curlwp;
336 struct trapframe * const tf = l->l_md.md_utf;
337 int error __diagused;
338
339 error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
340 KASSERT(error == 0);
341
342 kmem_free(uc, sizeof(ucontext_t));
343 userret(l, tf);
344 }
345
346 /*
347 * Process the tail end of a posix_spawn() for the child.
348 */
349 void
cpu_spawn_return(struct lwp * l)350 cpu_spawn_return(struct lwp *l)
351 {
352 struct trapframe * const tf = l->l_md.md_utf;
353
354 userret(l, tf);
355 }
356
357 bool
cpu_intr_p(void)358 cpu_intr_p(void)
359 {
360
361 return curcpu()->ci_idepth >= 0;
362 }
363
364 void
cpu_idle(void)365 cpu_idle(void)
366 {
367 KASSERT(mfmsr() & PSL_EE);
368 KASSERT(curcpu()->ci_cpl == IPL_NONE);
369 (*curcpu()->ci_idlespin)();
370 }
371
372 void
cpu_ast(struct lwp * l,struct cpu_info * ci)373 cpu_ast(struct lwp *l, struct cpu_info *ci)
374 {
375 l->l_md.md_astpending = 0; /* we are about to do it */
376
377 if (l->l_pflag & LP_OWEUPC) {
378 l->l_pflag &= ~LP_OWEUPC;
379 ADDUPROF(l);
380 }
381
382 /* Check whether we are being preempted. */
383 if (ci->ci_want_resched) {
384 preempt();
385 }
386 }
387
388 void
cpu_need_resched(struct cpu_info * ci,int flags)389 cpu_need_resched(struct cpu_info *ci, int flags)
390 {
391 struct lwp * const l = ci->ci_data.cpu_onproc;
392 #if defined(MULTIPROCESSOR)
393 struct cpu_info * const cur_ci = curcpu();
394 #endif
395
396 KASSERT(kpreempt_disabled());
397
398 #ifdef MULTIPROCESSOR
399 atomic_or_uint(&ci->ci_want_resched, flags);
400 #else
401 ci->ci_want_resched |= flags;
402 #endif
403
404 if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
405 /*
406 * No point doing anything, it will switch soon.
407 * Also here to prevent an assertion failure in
408 * kpreempt() due to preemption being set on a
409 * soft interrupt LWP.
410 */
411 return;
412 }
413
414 if (__predict_false(l == ci->ci_data.cpu_idlelwp)) {
415 #if defined(MULTIPROCESSOR)
416 /*
417 * If the other CPU is idling, it must be waiting for an
418 * interrupt. So give it one.
419 */
420 if (__predict_false(ci != cur_ci))
421 cpu_send_ipi(cpu_index(ci), IPI_NOMESG);
422 #endif
423 return;
424 }
425
426 #ifdef __HAVE_PREEMPTION
427 if (flags & RESCHED_KPREEMPT) {
428 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
429 if (ci == cur_ci) {
430 softint_trigger(SOFTINT_KPREEMPT);
431 } else {
432 cpu_send_ipi(cpu_index(ci), IPI_KPREEMPT);
433 }
434 return;
435 }
436 #endif
437 l->l_md.md_astpending = 1; /* force call to ast() */
438 #if defined(MULTIPROCESSOR)
439 if (ci != cur_ci && (flags & RESCHED_IMMED)) {
440 cpu_send_ipi(cpu_index(ci), IPI_NOMESG);
441 }
442 #endif
443 }
444
445 void
cpu_need_proftick(lwp_t * l)446 cpu_need_proftick(lwp_t *l)
447 {
448 l->l_pflag |= LP_OWEUPC;
449 l->l_md.md_astpending = 1;
450 }
451
452 void
cpu_signotify(lwp_t * l)453 cpu_signotify(lwp_t *l)
454 {
455 l->l_md.md_astpending = 1;
456 }
457
458 vaddr_t
cpu_lwp_pc(lwp_t * l)459 cpu_lwp_pc(lwp_t *l)
460 {
461 return l->l_md.md_utf->tf_srr0;
462 }
463
464 bool
cpu_clkf_usermode(const struct clockframe * cf)465 cpu_clkf_usermode(const struct clockframe *cf)
466 {
467 return (cf->cf_srr1 & PSL_PR) != 0;
468 }
469
470 vaddr_t
cpu_clkf_pc(const struct clockframe * cf)471 cpu_clkf_pc(const struct clockframe *cf)
472 {
473 return cf->cf_srr0;
474 }
475
476 bool
cpu_clkf_intr(const struct clockframe * cf)477 cpu_clkf_intr(const struct clockframe *cf)
478 {
479 return cf->cf_idepth > 0;
480 }
481
482 #ifdef MULTIPROCESSOR
483 /*
484 * MD support for xcall(9) interface.
485 */
486
487 void
xc_send_ipi(struct cpu_info * ci)488 xc_send_ipi(struct cpu_info *ci)
489 {
490 KASSERT(kpreempt_disabled());
491 KASSERT(curcpu() != ci);
492
493 cpuid_t target = (ci != NULL ? cpu_index(ci) : IPI_DST_NOTME);
494
495 /* Unicast: remote CPU. */
496 /* Broadcast: all, but local CPU (caller will handle it). */
497 cpu_send_ipi(target, IPI_XCALL);
498 }
499
500 void
cpu_ipi(struct cpu_info * ci)501 cpu_ipi(struct cpu_info *ci)
502 {
503 KASSERT(kpreempt_disabled());
504 KASSERT(curcpu() != ci);
505
506 cpuid_t target = (ci != NULL ? cpu_index(ci) : IPI_DST_NOTME);
507
508 /* Unicast: remote CPU. */
509 /* Broadcast: all, but local CPU (caller will handle it). */
510 cpu_send_ipi(target, IPI_GENERIC);
511 }
512
513 /* XXX kcpuset_create(9), kcpuset_clone(9) couldn't use interrupt context */
514 typedef uint32_t __cpuset_t;
515 CTASSERT(MAXCPUS <= 32);
516
517 #define CPUSET_SINGLE(cpu) ((__cpuset_t)1 << (cpu))
518
519 #define CPUSET_ADD(set, cpu) atomic_or_32(&(set), CPUSET_SINGLE(cpu))
520 #define CPUSET_DEL(set, cpu) atomic_and_32(&(set), ~CPUSET_SINGLE(cpu))
521 #define CPUSET_SUB(set1, set2) atomic_and_32(&(set1), ~(set2))
522
523 #define CPUSET_EXCEPT(set, cpu) ((set) & ~CPUSET_SINGLE(cpu))
524
525 #define CPUSET_HAS_P(set, cpu) ((set) & CPUSET_SINGLE(cpu))
526 #define CPUSET_NEXT(set) (ffs(set) - 1)
527
528 #define CPUSET_EMPTY_P(set) ((set) == (__cpuset_t)0)
529 #define CPUSET_EQUAL_P(set1, set2) ((set1) == (set2))
530 #define CPUSET_CLEAR(set) ((set) = (__cpuset_t)0)
531 #define CPUSET_ASSIGN(set1, set2) ((set1) = (set2))
532
533 #define CPUSET_EXPORT(kset, set) kcpuset_export_u32((kset), &(set), sizeof(set))
534
535 /*
536 * Send an inter-processor interupt to CPUs in cpuset (excludes curcpu())
537 */
538 static void
cpu_multicast_ipi(__cpuset_t cpuset,uint32_t msg)539 cpu_multicast_ipi(__cpuset_t cpuset, uint32_t msg)
540 {
541 CPU_INFO_ITERATOR cii;
542 struct cpu_info *ci;
543
544 CPUSET_DEL(cpuset, cpu_index(curcpu()));
545 if (CPUSET_EMPTY_P(cpuset))
546 return;
547
548 for (CPU_INFO_FOREACH(cii, ci)) {
549 const int index = cpu_index(ci);
550 if (CPUSET_HAS_P(cpuset, index)) {
551 CPUSET_DEL(cpuset, index);
552 cpu_send_ipi(index, msg);
553 }
554 }
555 }
556
557 static void
cpu_ipi_error(const char * s,kcpuset_t * succeeded,__cpuset_t expected)558 cpu_ipi_error(const char *s, kcpuset_t *succeeded, __cpuset_t expected)
559 {
560 __cpuset_t cpuset;
561
562 CPUSET_EXPORT(succeeded, cpuset);
563 CPUSET_SUB(expected, cpuset);
564 if (!CPUSET_EMPTY_P(expected)) {
565 printf("Failed to %s:", s);
566 do {
567 const int index = CPUSET_NEXT(expected);
568 CPUSET_DEL(expected, index);
569 printf(" cpu%d", index);
570 } while (!CPUSET_EMPTY_P(expected));
571 printf("\n");
572 }
573 }
574
575 static int
cpu_ipi_wait(kcpuset_t * watchset,__cpuset_t mask)576 cpu_ipi_wait(kcpuset_t *watchset, __cpuset_t mask)
577 {
578 uint64_t tmout = curcpu()->ci_data.cpu_cc_freq; /* some finite amount of time */
579 __cpuset_t cpuset;
580
581 while (tmout--) {
582 CPUSET_EXPORT(watchset, cpuset);
583 if (cpuset == mask)
584 return 0; /* success */
585 }
586 return 1; /* timed out */
587 }
588
589 /*
590 * Halt this cpu.
591 */
592 void
cpu_halt(void)593 cpu_halt(void)
594 {
595 struct cpuset_info * const csi = &cpuset_info;
596 const cpuid_t index = cpu_index(curcpu());
597
598 printf("cpu%ld: shutting down\n", index);
599 kcpuset_set(csi->cpus_halted, index);
600 spl0(); /* allow interrupts e.g. further ipi ? */
601
602 /* spin */
603 for (;;)
604 continue;
605 /*NOTREACHED*/
606 }
607
608 /*
609 * Halt all running cpus, excluding current cpu.
610 */
611 void
cpu_halt_others(void)612 cpu_halt_others(void)
613 {
614 struct cpuset_info * const csi = &cpuset_info;
615 const cpuid_t index = cpu_index(curcpu());
616 __cpuset_t cpumask, cpuset, halted;
617
618 KASSERT(kpreempt_disabled());
619
620 CPUSET_EXPORT(csi->cpus_running, cpuset);
621 CPUSET_DEL(cpuset, index);
622 CPUSET_ASSIGN(cpumask, cpuset);
623 CPUSET_EXPORT(csi->cpus_halted, halted);
624 CPUSET_SUB(cpuset, halted);
625
626 if (CPUSET_EMPTY_P(cpuset))
627 return;
628
629 cpu_multicast_ipi(cpuset, IPI_HALT);
630 if (cpu_ipi_wait(csi->cpus_halted, cpumask))
631 cpu_ipi_error("halt", csi->cpus_halted, cpumask);
632
633 /*
634 * TBD
635 * Depending on available firmware methods, other cpus will
636 * either shut down themselfs, or spin and wait for us to
637 * stop them.
638 */
639 }
640
641 /*
642 * Pause this cpu.
643 */
644 void
cpu_pause(struct trapframe * tf)645 cpu_pause(struct trapframe *tf)
646 {
647 volatile struct cpuset_info * const csi = &cpuset_info;
648 int s = splhigh();
649 const cpuid_t index = cpu_index(curcpu());
650
651 for (;;) {
652 kcpuset_set(csi->cpus_paused, index);
653 while (kcpuset_isset(csi->cpus_paused, index))
654 docritpollhooks();
655 kcpuset_set(csi->cpus_resumed, index);
656 #ifdef DDB
657 if (ddb_running_on_this_cpu_p())
658 cpu_Debugger();
659 if (ddb_running_on_any_cpu_p())
660 continue;
661 #endif /* DDB */
662 break;
663 }
664
665 splx(s);
666 }
667
668 /*
669 * Pause all running cpus, excluding current cpu.
670 */
671 void
cpu_pause_others(void)672 cpu_pause_others(void)
673 {
674 struct cpuset_info * const csi = &cpuset_info;
675 const cpuid_t index = cpu_index(curcpu());
676 __cpuset_t cpuset;
677
678 KASSERT(kpreempt_disabled());
679
680 CPUSET_EXPORT(csi->cpus_running, cpuset);
681 CPUSET_DEL(cpuset, index);
682
683 if (CPUSET_EMPTY_P(cpuset))
684 return;
685
686 cpu_multicast_ipi(cpuset, IPI_SUSPEND);
687 if (cpu_ipi_wait(csi->cpus_paused, cpuset))
688 cpu_ipi_error("pause", csi->cpus_paused, cpuset);
689 }
690
691 /*
692 * Resume a single cpu.
693 */
694 void
cpu_resume(cpuid_t index)695 cpu_resume(cpuid_t index)
696 {
697 struct cpuset_info * const csi = &cpuset_info;
698 __cpuset_t cpuset = CPUSET_SINGLE(index);
699
700 kcpuset_zero(csi->cpus_resumed);
701 kcpuset_clear(csi->cpus_paused, index);
702
703 if (cpu_ipi_wait(csi->cpus_paused, cpuset))
704 cpu_ipi_error("resume", csi->cpus_resumed, cpuset);
705 }
706
707 /*
708 * Resume all paused cpus.
709 */
710 void
cpu_resume_others(void)711 cpu_resume_others(void)
712 {
713 struct cpuset_info * const csi = &cpuset_info;
714 __cpuset_t cpuset;
715
716 kcpuset_zero(csi->cpus_resumed);
717 CPUSET_EXPORT(csi->cpus_paused, cpuset);
718 kcpuset_zero(csi->cpus_paused);
719
720 if (cpu_ipi_wait(csi->cpus_resumed, cpuset))
721 cpu_ipi_error("resume", csi->cpus_resumed, cpuset);
722 }
723
724 int
cpu_is_paused(int index)725 cpu_is_paused(int index)
726 {
727 struct cpuset_info * const csi = &cpuset_info;
728
729 return kcpuset_isset(csi->cpus_paused, index);
730 }
731
732 #ifdef DDB
733 void
cpu_debug_dump(void)734 cpu_debug_dump(void)
735 {
736 struct cpuset_info * const csi = &cpuset_info;
737 CPU_INFO_ITERATOR cii;
738 struct cpu_info *ci;
739 char running, hatched, paused, resumed, halted;
740
741 #ifdef _LP64
742 db_printf("CPU CPUID STATE CPUINFO CPL INT MTX IPIS\n");
743 #else
744 db_printf("CPU CPUID STATE CPUINFO CPL INT MTX IPIS\n");
745 #endif
746 for (CPU_INFO_FOREACH(cii, ci)) {
747 const cpuid_t index = cpu_index(ci);
748 hatched = (kcpuset_isset(csi->cpus_hatched, index) ? 'H' : '-');
749 running = (kcpuset_isset(csi->cpus_running, index) ? 'R' : '-');
750 paused = (kcpuset_isset(csi->cpus_paused, index) ? 'P' : '-');
751 resumed = (kcpuset_isset(csi->cpus_resumed, index) ? 'r' : '-');
752 halted = (kcpuset_isset(csi->cpus_halted, index) ? 'h' : '-');
753 db_printf("%3ld 0x%03x %c%c%c%c%c %p %3d %3d %3d 0x%08x\n",
754 index, ci->ci_cpuid,
755 running, hatched, paused, resumed, halted,
756 ci, ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count,
757 ci->ci_pending_ipis);
758 }
759 }
760 #endif /* DDB */
761 #endif /* MULTIPROCESSOR */
762
763 #ifdef MODULAR
764 /*
765 * Push any modules loaded by the boot loader.
766 */
767 void
module_init_md(void)768 module_init_md(void)
769 {
770 }
771 #endif /* MODULAR */
772
773 bool
mm_md_direct_mapped_phys(paddr_t pa,vaddr_t * vap)774 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
775 {
776 if (atop(pa) < physmem) {
777 *vap = pa;
778 return true;
779 }
780
781 return false;
782 }
783
784 int
mm_md_physacc(paddr_t pa,vm_prot_t prot)785 mm_md_physacc(paddr_t pa, vm_prot_t prot)
786 {
787
788 return (atop(pa) < physmem) ? 0 : EFAULT;
789 }
790
791 int
mm_md_kernacc(void * va,vm_prot_t prot,bool * handled)792 mm_md_kernacc(void *va, vm_prot_t prot, bool *handled)
793 {
794 if (atop((paddr_t)va) < physmem) {
795 *handled = true;
796 return 0;
797 }
798
799 if ((vaddr_t)va < VM_MIN_KERNEL_ADDRESS
800 || (vaddr_t)va >= VM_MAX_KERNEL_ADDRESS)
801 return EFAULT;
802
803 *handled = false;
804 return 0;
805 }
806