1 /* $NetBSD: cpu.c,v 1.107 2022/05/22 11:27:33 andvar Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
35 * All rights reserved.
36 *
37 * Author: Chris G. Demetriou
38 *
39 * Permission to use, copy, modify and distribute this software and
40 * its documentation is hereby granted, provided that both the copyright
41 * notice and this permission notice appear in all copies of the
42 * software, derivative works or modified versions, and any portions
43 * thereof, and that both notices appear in supporting documentation.
44 *
45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 *
49 * Carnegie Mellon requests users of this software to return to
50 *
51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
52 * School of Computer Science
53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890
55 *
56 * any improvements or extensions that they make and grant Carnegie the
57 * rights to redistribute these changes.
58 */
59
60 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
61
62 __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.107 2022/05/22 11:27:33 andvar Exp $");
63
64 #include "opt_ddb.h"
65 #include "opt_multiprocessor.h"
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/device.h>
70 #include <sys/kmem.h>
71 #include <sys/proc.h>
72 #include <sys/atomic.h>
73 #include <sys/cpu.h>
74 #include <sys/sysctl.h>
75
76 #include <uvm/uvm_extern.h>
77
78 #include <machine/autoconf.h>
79 #include <machine/cpuvar.h>
80 #include <machine/rpb.h>
81 #include <machine/prom.h>
82 #include <machine/alpha.h>
83
84 struct cpu_info cpu_info_primary __cacheline_aligned = {
85 .ci_curlwp = &lwp0
86 };
87 struct cpu_info *cpu_info_list __read_mostly = &cpu_info_primary;
88
89 #if defined(MULTIPROCESSOR)
90 /*
91 * Array of CPU info structures. Must be statically-allocated because
92 * curproc, etc. are used early.
93 */
94 struct cpu_info *cpu_info[ALPHA_MAXPROCS];
95
96 /* Bitmask of CPUs booted, currently running, and paused. */
97 volatile u_long cpus_booted __read_mostly;
98 volatile u_long cpus_running __read_mostly;
99 volatile u_long cpus_paused __read_mostly;
100
101 void cpu_boot_secondary(struct cpu_info *);
102 #endif /* MULTIPROCESSOR */
103
104 static void
cpu_idle_default(void)105 cpu_idle_default(void)
106 {
107 /*
108 * Default is to do nothing. Platform code can overwrite
109 * as needed.
110 */
111 }
112
113 void
cpu_idle_wtint(void)114 cpu_idle_wtint(void)
115 {
116 /*
117 * Some PALcode versions implement the WTINT call to idle
118 * in a low power mode.
119 */
120 alpha_pal_wtint(0);
121 }
122
123 void (*cpu_idle_fn)(void) __read_mostly = cpu_idle_default;
124
125 /*
126 * The Implementation Version and the Architecture Mask must be
127 * consistent across all CPUs in the system, so we set it for the
128 * primary and announce the AMASK extensions if they exist.
129 *
130 * Note, we invert the AMASK so that if a bit is set, it means "has
131 * extension".
132 */
133 u_long cpu_implver __read_mostly;
134 u_long cpu_amask __read_mostly;
135
136 /* Definition of the driver for autoconfig. */
137 static int cpumatch(device_t, cfdata_t, void *);
138 static void cpuattach(device_t, device_t, void *);
139
140 CFATTACH_DECL_NEW(cpu, sizeof(struct cpu_softc),
141 cpumatch, cpuattach, NULL, NULL);
142
143 static void cpu_announce_extensions(struct cpu_info *);
144
145 extern struct cfdriver cpu_cd;
146
147 static const char * const lcaminor[] = {
148 "",
149 "21066", "21066",
150 "21068", "21068",
151 "21066A", "21068A",
152 NULL
153 };
154
155 const struct cputable_struct {
156 const char *cpu_evname;
157 const char *cpu_major_name;
158 const char * const *cpu_minor_names;
159 } cpunametable[] = {
160 [PCS_PROC_EV3] ={ "EV3", NULL, NULL },
161 [PCS_PROC_EV4] ={ "EV4", "21064", NULL },
162 [PCS_PROC_SIMULATION]={ "Sim", NULL, NULL },
163 [PCS_PROC_LCA4] ={ "LCA4", NULL, lcaminor },
164 [PCS_PROC_EV5] ={ "EV5", "21164", NULL },
165 [PCS_PROC_EV45] ={ "EV45", "21064A", NULL },
166 [PCS_PROC_EV56] ={ "EV56", "21164A", NULL },
167 [PCS_PROC_EV6] ={ "EV6", "21264", NULL },
168 [PCS_PROC_PCA56] ={ "PCA56", "21164PC", NULL },
169 [PCS_PROC_PCA57] ={ "PCA57", "21164PC"/*XXX*/,NULL },
170 [PCS_PROC_EV67] ={ "EV67", "21264A", NULL },
171 [PCS_PROC_EV68CB] ={ "EV68CB", "21264C", NULL },
172 [PCS_PROC_EV68AL] ={ "EV68AL", "21264B", NULL },
173 [PCS_PROC_EV68CX] ={ "EV68CX", "21264D", NULL },
174 [PCS_PROC_EV7] ={ "EV7", "21364", NULL },
175 [PCS_PROC_EV79] ={ "EV79", NULL, NULL },
176 [PCS_PROC_EV69] ={ "EV69", NULL, NULL },
177 };
178
179 static bool
cpu_description(const struct cpu_softc * const sc,char * const buf,size_t const buflen)180 cpu_description(const struct cpu_softc * const sc,
181 char * const buf, size_t const buflen)
182 {
183 const char * const *s;
184 const char *ev;
185 int i;
186
187 const uint32_t major = sc->sc_major_type;
188 const uint32_t minor = sc->sc_minor_type;
189
190 if (major < __arraycount(cpunametable) &&
191 (ev = cpunametable[major].cpu_evname) != NULL) {
192 s = cpunametable[major].cpu_minor_names;
193 for (i = 0; s != NULL && s[i] != NULL; i++) {
194 if (i == minor && strlen(s[i]) != 0) {
195 break;
196 }
197 }
198 if (s == NULL || s[i] == NULL) {
199 s = &cpunametable[major].cpu_major_name;
200 i = 0;
201 if (s[i] == NULL) {
202 s = NULL;
203 }
204 }
205
206 /*
207 * Example strings:
208 *
209 * Sim-0
210 * 21068-3 (LCA4) [uses minor table]
211 * 21264C-5 (EV68CB)
212 * 21164PC-1 (PCA56)
213 */
214 if (s != NULL) {
215 snprintf(buf, buflen, "%s-%d (%s)", s[i], minor, ev);
216 } else {
217 snprintf(buf, buflen, "%s-%d", ev, minor);
218 }
219 return true;
220 }
221
222 snprintf(buf, buflen, "UNKNOWN CPU TYPE (%u:%u)", major, minor);
223 return false;
224 }
225
226 static int
cpu_sysctl_model(SYSCTLFN_ARGS)227 cpu_sysctl_model(SYSCTLFN_ARGS)
228 {
229 struct sysctlnode node = *rnode;
230 const struct cpu_softc * const sc = node.sysctl_data;
231 char model[32];
232
233 cpu_description(sc, model, sizeof(model));
234 node.sysctl_data = model;
235 return sysctl_lookup(SYSCTLFN_CALL(&node));
236 }
237
238 static int
cpu_sysctl_amask_bit(SYSCTLFN_ARGS,unsigned long const bit)239 cpu_sysctl_amask_bit(SYSCTLFN_ARGS, unsigned long const bit)
240 {
241 struct sysctlnode node = *rnode;
242 const struct cpu_softc * const sc = node.sysctl_data;
243
244 bool result = (sc->sc_amask & bit) ? true : false;
245 node.sysctl_data = &result;
246 return sysctl_lookup(SYSCTLFN_CALL(&node));
247 }
248
249 static int
cpu_sysctl_bwx(SYSCTLFN_ARGS)250 cpu_sysctl_bwx(SYSCTLFN_ARGS)
251 {
252 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_BWX);
253 }
254
255 static int
cpu_sysctl_fix(SYSCTLFN_ARGS)256 cpu_sysctl_fix(SYSCTLFN_ARGS)
257 {
258 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_FIX);
259 }
260
261 static int
cpu_sysctl_cix(SYSCTLFN_ARGS)262 cpu_sysctl_cix(SYSCTLFN_ARGS)
263 {
264 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_CIX);
265 }
266
267 static int
cpu_sysctl_mvi(SYSCTLFN_ARGS)268 cpu_sysctl_mvi(SYSCTLFN_ARGS)
269 {
270 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_MVI);
271 }
272
273 static int
cpu_sysctl_pat(SYSCTLFN_ARGS)274 cpu_sysctl_pat(SYSCTLFN_ARGS)
275 {
276 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_PAT);
277 }
278
279 static int
cpu_sysctl_pmi(SYSCTLFN_ARGS)280 cpu_sysctl_pmi(SYSCTLFN_ARGS)
281 {
282 return cpu_sysctl_amask_bit(SYSCTLFN_CALL(rnode), ALPHA_AMASK_PMI);
283 }
284
285 static int
cpu_sysctl_primary(SYSCTLFN_ARGS)286 cpu_sysctl_primary(SYSCTLFN_ARGS)
287 {
288 struct sysctlnode node = *rnode;
289 const struct cpu_softc * const sc = node.sysctl_data;
290
291 bool result = CPU_IS_PRIMARY(sc->sc_ci);
292 node.sysctl_data = &result;
293 return sysctl_lookup(SYSCTLFN_CALL(&node));
294 }
295
296 /*
297 * The following is an attempt to map out how booting secondary CPUs
298 * works.
299 *
300 * As we find processors during the autoconfiguration sequence, all
301 * processors have idle stacks and PCBs created for them, including
302 * the primary (although the primary idles on lwp0's PCB until its
303 * idle PCB is created).
304 *
305 * Right before calling uvm_scheduler(), main() calls, on lwp0's
306 * context, cpu_boot_secondary_processors(). This is our key to
307 * actually spin up the additional processor's we've found. We
308 * run through our cpu_info[] array looking for secondary processors
309 * with idle PCBs, and spin them up.
310 *
311 * The spinup involves switching the secondary processor to the
312 * OSF/1 PALcode, setting the entry point to cpu_spinup_trampoline(),
313 * and sending a "START" message to the secondary's console.
314 *
315 * Upon successful processor bootup, the cpu_spinup_trampoline will call
316 * cpu_hatch(), which will print a message indicating that the processor
317 * is running, and will set the "hatched" flag in its softc. At the end
318 * of cpu_hatch() is a spin-forever loop; we do not yet attempt to schedule
319 * anything on secondary CPUs.
320 */
321
322 static int
cpumatch(device_t parent,cfdata_t cfdata,void * aux)323 cpumatch(device_t parent, cfdata_t cfdata, void *aux)
324 {
325 struct mainbus_attach_args *ma = aux;
326
327 /* make sure that we're looking for a CPU. */
328 if (strcmp(ma->ma_name, cpu_cd.cd_name) != 0)
329 return (0);
330
331 /* XXX CHECK SLOT? */
332 /* XXX CHECK PRIMARY? */
333
334 return (1);
335 }
336
337 static void
cpuattach(device_t parent,device_t self,void * aux)338 cpuattach(device_t parent, device_t self, void *aux)
339 {
340 struct cpu_softc * const sc = device_private(self);
341 const struct mainbus_attach_args * const ma = aux;
342 struct cpu_info *ci;
343 char model[32];
344
345 const bool primary = ma->ma_slot == hwrpb->rpb_primary_cpu_id;
346
347 sc->sc_dev = self;
348
349 const struct pcs * const p = LOCATE_PCS(hwrpb, ma->ma_slot);
350 sc->sc_major_type = PCS_CPU_MAJORTYPE(p);
351 sc->sc_minor_type = PCS_CPU_MINORTYPE(p);
352
353 const bool recognized = cpu_description(sc, model, sizeof(model));
354
355 aprint_normal(": ID %d%s, ", ma->ma_slot, primary ? " (primary)" : "");
356 if (recognized) {
357 aprint_normal("%s", model);
358 } else {
359 aprint_error("%s", model);
360 }
361
362 aprint_naive("\n");
363 aprint_normal("\n");
364
365 if (p->pcs_proc_var != 0) {
366 bool needcomma = false;
367 const char *vaxfp = "";
368 const char *ieeefp = "";
369 const char *pe = "";
370
371 if (p->pcs_proc_var & PCS_VAR_VAXFP) {
372 sc->sc_vax_fp = true;
373 vaxfp = "VAX FP support";
374 needcomma = true;
375 }
376 if (p->pcs_proc_var & PCS_VAR_IEEEFP) {
377 sc->sc_ieee_fp = true;
378 ieeefp = ", IEEE FP support";
379 if (!needcomma)
380 ieeefp += 2;
381 needcomma = true;
382 }
383 if (p->pcs_proc_var & PCS_VAR_PE) {
384 sc->sc_primary_eligible = true;
385 pe = ", Primary Eligible";
386 if (!needcomma)
387 pe += 2;
388 needcomma = true;
389 }
390 aprint_debug_dev(sc->sc_dev, "%s%s%s", vaxfp, ieeefp, pe);
391 if (p->pcs_proc_var & PCS_VAR_RESERVED)
392 aprint_debug("%sreserved bits: %#lx",
393 needcomma ? ", " : "",
394 p->pcs_proc_var & PCS_VAR_RESERVED);
395 aprint_debug("\n");
396 }
397
398 if (ma->ma_slot > ALPHA_WHAMI_MAXID) {
399 if (primary)
400 panic("cpu_attach: primary CPU ID too large");
401 aprint_error_dev(sc->sc_dev,
402 "processor ID too large, ignoring\n");
403 return;
404 }
405
406 if (primary) {
407 ci = &cpu_info_primary;
408 } else {
409 /*
410 * kmem_zalloc() will guarante cache line alignment for
411 * all allocations >= CACHE_LINE_SIZE.
412 */
413 ci = kmem_zalloc(sizeof(*ci), KM_SLEEP);
414 KASSERT(((uintptr_t)ci & (CACHE_LINE_SIZE - 1)) == 0);
415 }
416 #if defined(MULTIPROCESSOR)
417 cpu_info[ma->ma_slot] = ci;
418 #endif
419 ci->ci_cpuid = ma->ma_slot;
420 ci->ci_softc = sc;
421 ci->ci_pcc_freq = hwrpb->rpb_cc_freq;
422
423 sc->sc_ci = ci;
424
425 #if defined(MULTIPROCESSOR)
426 /*
427 * Make sure the processor is available for use.
428 */
429 if ((p->pcs_flags & PCS_PA) == 0) {
430 if (primary)
431 panic("cpu_attach: primary not available?!");
432 aprint_normal_dev(sc->sc_dev,
433 "processor not available for use\n");
434 return;
435 }
436
437 /* Make sure the processor has valid PALcode. */
438 if ((p->pcs_flags & PCS_PV) == 0) {
439 if (primary)
440 panic("cpu_attach: primary has invalid PALcode?!");
441 aprint_error_dev(sc->sc_dev, "PALcode not valid\n");
442 return;
443 }
444 #endif /* MULTIPROCESSOR */
445
446 /*
447 * If we're the primary CPU, no more work to do; we're already
448 * running!
449 */
450 if (primary) {
451 cpu_announce_extensions(ci);
452 #if defined(MULTIPROCESSOR)
453 ci->ci_flags |= CPUF_PRIMARY|CPUF_RUNNING;
454 atomic_or_ulong(&cpus_booted, (1UL << ma->ma_slot));
455 atomic_or_ulong(&cpus_running, (1UL << ma->ma_slot));
456 #endif /* MULTIPROCESSOR */
457 } else {
458 #if defined(MULTIPROCESSOR)
459 int error;
460
461 error = mi_cpu_attach(ci);
462 if (error != 0) {
463 aprint_error_dev(sc->sc_dev,
464 "mi_cpu_attach failed with %d\n", error);
465 return;
466 }
467
468 /*
469 * Boot the secondary processor. It will announce its
470 * extensions, and then spin until we tell it to go
471 * on its merry way.
472 */
473 cpu_boot_secondary(ci);
474
475 /*
476 * Link the processor into the list.
477 */
478 ci->ci_next = cpu_info_list->ci_next;
479 cpu_info_list->ci_next = ci;
480 #else /* ! MULTIPROCESSOR */
481 aprint_normal_dev(sc->sc_dev, "processor off-line; "
482 "multiprocessor support not present in kernel\n");
483 #endif /* MULTIPROCESSOR */
484 }
485
486 evcnt_attach_dynamic(&sc->sc_evcnt_clock, EVCNT_TYPE_INTR,
487 NULL, device_xname(sc->sc_dev), "clock");
488 evcnt_attach_dynamic(&sc->sc_evcnt_device, EVCNT_TYPE_INTR,
489 NULL, device_xname(sc->sc_dev), "device");
490 #if defined(MULTIPROCESSOR)
491 alpha_ipi_init(ci);
492 #endif
493
494 struct sysctllog **log = &sc->sc_sysctllog;
495 const struct sysctlnode *rnode, *cnode;
496 int error;
497
498 error = sysctl_createv(log, 0, NULL, &rnode, CTLFLAG_PERMANENT,
499 CTLTYPE_NODE, device_xname(sc->sc_dev),
500 SYSCTL_DESCR("cpu properties"),
501 NULL, 0,
502 NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
503 if (error)
504 return;
505
506 error = sysctl_createv(log, 0, &rnode, &cnode,
507 CTLFLAG_PERMANENT, CTLTYPE_STRING, "model",
508 SYSCTL_DESCR("cpu model"),
509 cpu_sysctl_model, 0,
510 (void *)sc, 0, CTL_CREATE, CTL_EOL);
511 if (error)
512 return;
513
514 error = sysctl_createv(log, 0, &rnode, &cnode,
515 CTLFLAG_PERMANENT, CTLTYPE_INT, "major",
516 SYSCTL_DESCR("cpu major type"),
517 NULL, 0,
518 &sc->sc_major_type, 0, CTL_CREATE, CTL_EOL);
519 if (error)
520 return;
521
522 error = sysctl_createv(log, 0, &rnode, &cnode,
523 CTLFLAG_PERMANENT, CTLTYPE_INT, "minor",
524 SYSCTL_DESCR("cpu minor type"),
525 NULL, 0,
526 &sc->sc_minor_type, 0, CTL_CREATE, CTL_EOL);
527 if (error)
528 return;
529
530 error = sysctl_createv(log, 0, &rnode, &cnode,
531 CTLFLAG_PERMANENT, CTLTYPE_LONG, "implver",
532 SYSCTL_DESCR("cpu implementation version"),
533 NULL, 0,
534 &sc->sc_implver, 0, CTL_CREATE, CTL_EOL);
535 if (error)
536 return;
537
538 error = sysctl_createv(log, 0, &rnode, &cnode,
539 CTLFLAG_PERMANENT|CTLFLAG_HEX, CTLTYPE_LONG, "amask",
540 SYSCTL_DESCR("architecture extensions mask"),
541 NULL, 0,
542 &sc->sc_amask, 0, CTL_CREATE, CTL_EOL);
543 if (error)
544 return;
545
546 error = sysctl_createv(log, 0, &rnode, &cnode,
547 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "bwx",
548 SYSCTL_DESCR("cpu supports BWX extension"),
549 cpu_sysctl_bwx, 0,
550 (void *)sc, 0, CTL_CREATE, CTL_EOL);
551 if (error)
552 return;
553
554 error = sysctl_createv(log, 0, &rnode, &cnode,
555 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "fix",
556 SYSCTL_DESCR("cpu supports FIX extension"),
557 cpu_sysctl_fix, 0,
558 (void *)sc, 0, CTL_CREATE, CTL_EOL);
559 if (error)
560 return;
561
562 error = sysctl_createv(log, 0, &rnode, &cnode,
563 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "cix",
564 SYSCTL_DESCR("cpu supports CIX extension"),
565 cpu_sysctl_cix, 0,
566 (void *)sc, 0, CTL_CREATE, CTL_EOL);
567 if (error)
568 return;
569
570 error = sysctl_createv(log, 0, &rnode, &cnode,
571 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "mvi",
572 SYSCTL_DESCR("cpu supports MVI extension"),
573 cpu_sysctl_mvi, 0,
574 (void *)sc, 0, CTL_CREATE, CTL_EOL);
575 if (error)
576 return;
577
578 error = sysctl_createv(log, 0, &rnode, &cnode,
579 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "pat",
580 SYSCTL_DESCR("cpu supports PAT extension"),
581 cpu_sysctl_pat, 0,
582 (void *)sc, 0, CTL_CREATE, CTL_EOL);
583 if (error)
584 return;
585
586 error = sysctl_createv(log, 0, &rnode, &cnode,
587 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "pmi",
588 SYSCTL_DESCR("cpu supports PMI extension"),
589 cpu_sysctl_pmi, 0,
590 (void *)sc, 0, CTL_CREATE, CTL_EOL);
591 if (error)
592 return;
593
594 error = sysctl_createv(log, 0, &rnode, &cnode,
595 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "vax_fp",
596 SYSCTL_DESCR("cpu supports VAX FP"),
597 NULL, 0,
598 &sc->sc_vax_fp, 0, CTL_CREATE, CTL_EOL);
599 if (error)
600 return;
601
602 error = sysctl_createv(log, 0, &rnode, &cnode,
603 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "ieee_fp",
604 SYSCTL_DESCR("cpu supports IEEE FP"),
605 NULL, 0,
606 &sc->sc_ieee_fp, 0, CTL_CREATE, CTL_EOL);
607 if (error)
608 return;
609
610 error = sysctl_createv(log, 0, &rnode, &cnode,
611 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "primary_eligible",
612 SYSCTL_DESCR("cpu is primary-eligible"),
613 NULL, 0,
614 &sc->sc_primary_eligible, 0, CTL_CREATE, CTL_EOL);
615 if (error)
616 return;
617
618 error = sysctl_createv(log, 0, &rnode, &cnode,
619 CTLFLAG_PERMANENT, CTLTYPE_BOOL, "primary",
620 SYSCTL_DESCR("cpu is the primary cpu"),
621 cpu_sysctl_primary, 0,
622 (void *)sc, 0, CTL_CREATE, CTL_EOL);
623 if (error)
624 return;
625
626 error = sysctl_createv(log, 0, &rnode, &cnode,
627 CTLFLAG_PERMANENT, CTLTYPE_LONG, "cpu_id",
628 SYSCTL_DESCR("hardware cpu ID"),
629 NULL, 0,
630 &sc->sc_ci->ci_cpuid, 0, CTL_CREATE, CTL_EOL);
631 if (error)
632 return;
633
634 error = sysctl_createv(log, 0, &rnode, &cnode,
635 CTLFLAG_PERMANENT, CTLTYPE_LONG, "pcc_freq",
636 SYSCTL_DESCR("PCC frequency"),
637 NULL, 0,
638 &sc->sc_ci->ci_pcc_freq, 0, CTL_CREATE, CTL_EOL);
639 if (error)
640 return;
641 }
642
643 static void
cpu_announce_extensions(struct cpu_info * ci)644 cpu_announce_extensions(struct cpu_info *ci)
645 {
646 u_long implver, amask = 0;
647 char bits[64];
648
649 implver = alpha_implver();
650 if (implver >= ALPHA_IMPLVER_EV5)
651 amask = (~alpha_amask(ALPHA_AMASK_ALL)) & ALPHA_AMASK_ALL;
652
653 ci->ci_softc->sc_implver = implver;
654 ci->ci_softc->sc_amask = amask;
655
656 if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id) {
657 cpu_implver = implver;
658 cpu_amask = amask;
659 } else {
660 if (implver < cpu_implver)
661 aprint_error_dev(ci->ci_softc->sc_dev,
662 "WARNING: IMPLVER %lu < %lu\n",
663 implver, cpu_implver);
664
665 /*
666 * Cap the system architecture mask to the intersection
667 * of features supported by all processors in the system.
668 */
669 cpu_amask &= amask;
670 }
671
672 if (amask) {
673 snprintb(bits, sizeof(bits),
674 ALPHA_AMASK_BITS, amask);
675 aprint_normal_dev(ci->ci_softc->sc_dev,
676 "Architecture extensions: %s\n", bits);
677 }
678 }
679
680 #if defined(MULTIPROCESSOR)
681 void
cpu_boot_secondary_processors(void)682 cpu_boot_secondary_processors(void)
683 {
684 struct cpu_info *ci;
685 u_long i;
686 bool did_patch = false;
687
688 for (i = 0; i < ALPHA_MAXPROCS; i++) {
689 ci = cpu_info[i];
690 if (ci == NULL || ci->ci_data.cpu_idlelwp == NULL)
691 continue;
692 if (CPU_IS_PRIMARY(ci))
693 continue;
694 if ((cpus_booted & (1UL << i)) == 0)
695 continue;
696
697 /* Patch MP-criticial kernel routines. */
698 if (did_patch == false) {
699 alpha_patch(true);
700 did_patch = true;
701 }
702
703 /*
704 * Launch the processor.
705 */
706 atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
707 atomic_or_ulong(&cpus_running, (1U << i));
708 }
709 }
710
711 void
cpu_boot_secondary(struct cpu_info * ci)712 cpu_boot_secondary(struct cpu_info *ci)
713 {
714 long timeout;
715 struct pcs *pcsp, *primary_pcsp;
716 struct pcb *pcb;
717 u_long cpumask;
718
719 pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp);
720 primary_pcsp = LOCATE_PCS(hwrpb, hwrpb->rpb_primary_cpu_id);
721 pcsp = LOCATE_PCS(hwrpb, ci->ci_cpuid);
722 cpumask = (1UL << ci->ci_cpuid);
723
724 /*
725 * Set up the PCS's HWPCB to match ours.
726 */
727 memcpy(pcsp->pcs_hwpcb, &pcb->pcb_hw, sizeof(pcb->pcb_hw));
728
729 /*
730 * Set up the HWRPB to restart the secondary processor
731 * with our spin-up trampoline.
732 */
733 hwrpb->rpb_restart = (uint64_t) cpu_spinup_trampoline;
734 hwrpb->rpb_restart_val = (uint64_t) ci;
735 hwrpb->rpb_checksum = hwrpb_checksum();
736
737 /*
738 * Configure the CPU to start in OSF/1 PALcode by copying
739 * the primary CPU's PALcode revision info to the secondary
740 * CPUs PCS.
741 */
742 memcpy(&pcsp->pcs_pal_rev, &primary_pcsp->pcs_pal_rev,
743 sizeof(pcsp->pcs_pal_rev));
744 pcsp->pcs_flags |= (PCS_CV|PCS_RC);
745 pcsp->pcs_flags &= ~PCS_BIP;
746
747 /* Make sure the secondary console sees all this. */
748 alpha_mb();
749
750 /* Send a "START" command to the secondary CPU's console. */
751 if (cpu_iccb_send(ci->ci_cpuid, "START\r\n")) {
752 aprint_error_dev(ci->ci_softc->sc_dev,
753 "unable to issue `START' command\n");
754 return;
755 }
756
757 /* Wait for the processor to boot. */
758 for (timeout = 10000; timeout != 0; timeout--) {
759 alpha_mb();
760 if (pcsp->pcs_flags & PCS_BIP)
761 break;
762 delay(1000);
763 }
764 if (timeout == 0)
765 aprint_error_dev(ci->ci_softc->sc_dev,
766 "processor failed to boot\n");
767
768 /*
769 * ...and now wait for verification that it's running kernel
770 * code.
771 */
772 for (timeout = 10000; timeout != 0; timeout--) {
773 alpha_mb();
774 if (cpus_booted & cpumask)
775 break;
776 delay(1000);
777 }
778 if (timeout == 0)
779 aprint_error_dev(ci->ci_softc->sc_dev,
780 "processor failed to hatch\n");
781 }
782
783 void
cpu_pause_resume(u_long cpu_id,int pause)784 cpu_pause_resume(u_long cpu_id, int pause)
785 {
786 u_long cpu_mask = (1UL << cpu_id);
787
788 if (pause) {
789 atomic_or_ulong(&cpus_paused, cpu_mask);
790 alpha_send_ipi(cpu_id, ALPHA_IPI_PAUSE);
791 } else
792 atomic_and_ulong(&cpus_paused, ~cpu_mask);
793 }
794
795 void
cpu_pause_resume_all(int pause)796 cpu_pause_resume_all(int pause)
797 {
798 struct cpu_info *ci, *self = curcpu();
799 CPU_INFO_ITERATOR cii;
800
801 for (CPU_INFO_FOREACH(cii, ci)) {
802 if (ci == self)
803 continue;
804 cpu_pause_resume(ci->ci_cpuid, pause);
805 }
806 }
807
808 void
cpu_halt(void)809 cpu_halt(void)
810 {
811 struct cpu_info *ci = curcpu();
812 u_long cpu_id = cpu_number();
813 struct pcs *pcsp = LOCATE_PCS(hwrpb, cpu_id);
814
815 aprint_normal_dev(ci->ci_softc->sc_dev, "shutting down...\n");
816
817 pcsp->pcs_flags &= ~(PCS_RC | PCS_HALT_REQ);
818 pcsp->pcs_flags |= PCS_HALT_STAY_HALTED;
819
820 atomic_and_ulong(&cpus_running, ~(1UL << cpu_id));
821 atomic_and_ulong(&cpus_booted, ~(1U << cpu_id));
822
823 alpha_pal_halt();
824 /* NOTREACHED */
825 }
826
827 void
cpu_hatch(struct cpu_info * ci)828 cpu_hatch(struct cpu_info *ci)
829 {
830 u_long cpu_id = cpu_number();
831 u_long cpumask = (1UL << cpu_id);
832
833 /* pmap initialization for this processor. */
834 pmap_init_cpu(ci);
835
836 /* Initialize trap vectors for this processor. */
837 trap_init();
838
839 /* Yahoo! We're running kernel code! Announce it! */
840 cpu_announce_extensions(ci);
841
842 atomic_or_ulong(&cpus_booted, cpumask);
843
844 /*
845 * Spin here until we're told we can start.
846 */
847 while ((cpus_running & cpumask) == 0)
848 /* spin */ ;
849
850 /*
851 * Invalidate the TLB and sync the I-stream before we
852 * jump into the kernel proper. We have to do this
853 * because we haven't been getting IPIs while we've
854 * been spinning.
855 */
856 ALPHA_TBIA();
857 alpha_pal_imb();
858
859 if (alpha_use_cctr) {
860 cc_init_secondary(ci);
861 }
862
863 cpu_initclocks_secondary();
864 }
865
866 int
cpu_iccb_send(long cpu_id,const char * msg)867 cpu_iccb_send(long cpu_id, const char *msg)
868 {
869 struct pcs *pcsp = LOCATE_PCS(hwrpb, cpu_id);
870 int timeout;
871 u_long cpumask = (1UL << cpu_id);
872
873 /* Wait for the ICCB to become available. */
874 for (timeout = 10000; timeout != 0; timeout--) {
875 alpha_mb();
876 if ((hwrpb->rpb_rxrdy & cpumask) == 0)
877 break;
878 delay(1000);
879 }
880 if (timeout == 0)
881 return (EIO);
882
883 /*
884 * Copy the message into the ICCB, and tell the secondary console
885 * that it's there. Ensure the buffer is initialized before we
886 * set the rxrdy bits, as a store-release.
887 */
888 strcpy(pcsp->pcs_iccb.iccb_rxbuf, msg);
889 pcsp->pcs_iccb.iccb_rxlen = strlen(msg);
890 membar_release();
891 atomic_or_ulong(&hwrpb->rpb_rxrdy, cpumask);
892
893 /* Wait for the message to be received. */
894 for (timeout = 10000; timeout != 0; timeout--) {
895 alpha_mb();
896 if ((hwrpb->rpb_rxrdy & cpumask) == 0)
897 break;
898 delay(1000);
899 }
900 if (timeout == 0)
901 return (EIO);
902
903 return (0);
904 }
905
906 void
cpu_iccb_receive(void)907 cpu_iccb_receive(void)
908 {
909 #if 0 /* Don't bother... we don't get any important messages anyhow. */
910 uint64_t txrdy;
911 char *cp1, *cp2, buf[80];
912 struct pcs *pcsp;
913 u_int cnt;
914 long cpu_id;
915
916 txrdy = hwrpb->rpb_txrdy;
917
918 for (cpu_id = 0; cpu_id < hwrpb->rpb_pcs_cnt; cpu_id++) {
919 if (txrdy & (1UL << cpu_id)) {
920 pcsp = LOCATE_PCS(hwrpb, cpu_id);
921 printf("Inter-console message from CPU %lu "
922 "HALT REASON = 0x%lx, FLAGS = 0x%lx\n",
923 cpu_id, pcsp->pcs_halt_reason, pcsp->pcs_flags);
924
925 cnt = pcsp->pcs_iccb.iccb_txlen;
926 if (cnt >= 80) {
927 printf("Malformed inter-console message\n");
928 continue;
929 }
930 cp1 = pcsp->pcs_iccb.iccb_txbuf;
931 cp2 = buf;
932 while (cnt--) {
933 if (*cp1 != '\r' && *cp1 != '\n')
934 *cp2++ = *cp1;
935 cp1++;
936 }
937 *cp2 = '\0';
938 printf("Message from CPU %lu: %s\n", cpu_id, buf);
939 }
940 }
941 #endif /* 0 */
942 hwrpb->rpb_txrdy = 0;
943 alpha_mb();
944 }
945
946 #if defined(DDB)
947
948 #include <ddb/db_output.h>
949 #include <machine/db_machdep.h>
950
951 /*
952 * Dump CPU information from DDB.
953 */
954 void
cpu_debug_dump(void)955 cpu_debug_dump(void)
956 {
957 struct cpu_info *ci;
958 CPU_INFO_ITERATOR cii;
959
960 db_printf("addr dev id flags ipis curproc\n");
961 for (CPU_INFO_FOREACH(cii, ci)) {
962 db_printf("%p %s %lu %lx %lx %p\n",
963 ci,
964 device_xname(ci->ci_softc->sc_dev),
965 ci->ci_cpuid,
966 ci->ci_flags,
967 ci->ci_ipis,
968 ci->ci_curlwp);
969 }
970 }
971
972 #endif /* DDB */
973
974 #endif /* MULTIPROCESSOR */
975