xref: /openbsd/sys/arch/macppc/macppc/cpu.c (revision ab14eefc)
1 /*	$OpenBSD: cpu.c,v 1.90 2024/10/24 17:37:06 gkoehler Exp $ */
2 
3 /*
4  * Copyright (c) 1997 Per Fogelstrom
5  * Copyright (c) 1997 RTMX Inc
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed under OpenBSD for RTMX Inc
18  *	North Carolina, USA, by Per Fogelstrom, Opsycon AB, Sweden.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
26  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/sysctl.h>
40 #include <sys/task.h>
41 #include <sys/device.h>
42 
43 #include <uvm/uvm_extern.h>
44 
45 #include <dev/ofw/openfirm.h>
46 
47 #include <machine/autoconf.h>
48 #include <powerpc/bat.h>
49 #include <machine/cpu.h>
50 #include <machine/trap.h>
51 #include <machine/elf.h>
52 #include <powerpc/hid.h>
53 
54 /* SCOM addresses (24-bit) */
55 #define SCOM_PCR	0x0aa001 /* Power Control Register */
56 #define SCOM_PSR	0x408001 /* Power Tuning Status Register */
57 
58 /* SCOMC format */
59 #define SCOMC_ADDR_SHIFT	8
60 #define SCOMC_ADDR_MASK		0xffff0000
61 #define SCOMC_READ		0x00008000
62 
63 /* Power (Tuning) Status Register */
64 #define PSR_CMD_RECEIVED	0x2000000000000000LL
65 #define PSR_CMD_COMPLETED	0x1000000000000000LL
66 #define PSR_FREQ_MASK		0x0300000000000000LL
67 #define PSR_FREQ_HALF		0x0100000000000000LL
68 
69 struct cpu_info cpu_info[PPC_MAXPROCS];
70 
71 char cpu_model[80];
72 char machine[] = MACHINE;	/* cpu architecture */
73 
74 /* Definition of the driver for autoconfig. */
75 int	cpumatch(struct device *, void *, void *);
76 void	cpuattach(struct device *, struct device *, void *);
77 
78 const struct cfattach cpu_ca = {
79 	sizeof(struct device), cpumatch, cpuattach
80 };
81 
82 struct cfdriver cpu_cd = {
83 	NULL, "cpu", DV_DULL
84 };
85 
86 void ppc64_scale_frequency(u_int);
87 void (*ppc64_slew_voltage)(u_int);
88 void ppc64_setperf(int);
89 
90 void config_l2cr(int);
91 
92 int
cpumatch(struct device * parent,void * cfdata,void * aux)93 cpumatch(struct device *parent, void *cfdata, void *aux)
94 {
95 	struct confargs *ca = aux;
96 	int *reg = ca->ca_reg;
97 
98 	/* make sure that we're looking for a CPU. */
99 	if (strcmp(ca->ca_name, cpu_cd.cd_name) != 0)
100 		return (0);
101 
102 	if (reg[0] >= PPC_MAXPROCS)
103 		return (0);
104 
105 	return (1);
106 }
107 
108 u_int32_t ppc_curfreq;
109 u_int32_t ppc_maxfreq;
110 
111 int
ppc_cpuspeed(int * freq)112 ppc_cpuspeed(int *freq)
113 {
114 	*freq = ppc_curfreq;
115 
116 	return (0);
117 }
118 
119 static u_int32_t ppc_power_mode_data[2];
120 
121 void
ppc64_scale_frequency(u_int freq_scale)122 ppc64_scale_frequency(u_int freq_scale)
123 {
124 	u_int64_t psr;
125 	int s;
126 
127 	s = ppc_intr_disable();
128 
129 	/* Clear PCRH and PCR. */
130 	ppc_mtscomd(0x00000000);
131 	ppc_mtscomc(SCOM_PCR << SCOMC_ADDR_SHIFT);
132 	ppc_mtscomd(0x80000000);
133 	ppc_mtscomc(SCOM_PCR << SCOMC_ADDR_SHIFT);
134 
135 	/* Set PCR. */
136 	ppc_mtscomd(ppc_power_mode_data[freq_scale] | 0x80000000);
137 	ppc_mtscomc(SCOM_PCR << SCOMC_ADDR_SHIFT);
138 
139 	/* Wait until frequency change is completed. */
140 	do {
141 		ppc64_mtscomc((SCOM_PSR << SCOMC_ADDR_SHIFT) | SCOMC_READ);
142 		psr = ppc64_mfscomd();
143 		ppc64_mfscomc();
144 		if (psr & PSR_CMD_COMPLETED)
145 			break;
146 		DELAY(100);
147 	} while (psr & PSR_CMD_RECEIVED);
148 
149 	if ((psr & PSR_FREQ_MASK) == PSR_FREQ_HALF)
150 		ppc_curfreq = ppc_maxfreq / 2;
151 	else
152 		ppc_curfreq = ppc_maxfreq;
153 
154 	ppc_intr_enable(s);
155 }
156 
157 extern int perflevel;
158 
159 struct task ppc64_setperf_task;
160 int ppc64_perflevel;
161 
162 void
ppc64_do_setperf(void * arg)163 ppc64_do_setperf(void *arg)
164 {
165 	if (ppc64_perflevel <= 50) {
166 		if (ppc_curfreq == ppc_maxfreq / 2)
167 			return;
168 
169 		ppc64_scale_frequency(FREQ_HALF);
170 		if (ppc64_slew_voltage)
171 			ppc64_slew_voltage(FREQ_HALF);
172 	} else {
173 		if (ppc_curfreq == ppc_maxfreq)
174 			return;
175 
176 		if (ppc64_slew_voltage)
177 			ppc64_slew_voltage(FREQ_FULL);
178 		ppc64_scale_frequency(FREQ_FULL);
179 	}
180 }
181 
182 void
ppc64_setperf(int level)183 ppc64_setperf(int level)
184 {
185 	ppc64_perflevel = level;
186 	task_add(systq, &ppc64_setperf_task);
187 }
188 
189 void
cpuattach(struct device * parent,struct device * dev,void * aux)190 cpuattach(struct device *parent, struct device *dev, void *aux)
191 {
192 	struct confargs *ca = aux;
193 	int *reg = ca->ca_reg;
194 	u_int32_t cpu, pvr, hid0;
195 	char name[32];
196 	int qhandle, phandle, len;
197 	u_int32_t clock_freq = 0, timebase = 0;
198 	struct cpu_info *ci;
199 
200 	ci = &cpu_info[reg[0]];
201 	ci->ci_cpuid = reg[0];
202 	ci->ci_dev = dev;
203 
204 	hwcap = PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | PPC_FEATURE_HAS_MMU;
205 
206 	pvr = ppc_mfpvr();
207 	cpu = pvr >> 16;
208 	switch (cpu) {
209 	case PPC_CPU_MPC601:
210 		snprintf(cpu_model, sizeof(cpu_model), "601");
211 		break;
212 	case PPC_CPU_MPC603:
213 		snprintf(cpu_model, sizeof(cpu_model), "603");
214 		break;
215 	case PPC_CPU_MPC604:
216 		snprintf(cpu_model, sizeof(cpu_model), "604");
217 		break;
218 	case PPC_CPU_MPC603e:
219 		snprintf(cpu_model, sizeof(cpu_model), "603e");
220 		break;
221 	case PPC_CPU_MPC603ev:
222 		snprintf(cpu_model, sizeof(cpu_model), "603ev");
223 		break;
224 	case PPC_CPU_MPC750:
225 		snprintf(cpu_model, sizeof(cpu_model), "750");
226 		break;
227 	case PPC_CPU_MPC604ev:
228 		snprintf(cpu_model, sizeof(cpu_model), "604ev");
229 		break;
230 	case PPC_CPU_MPC7400:
231 		ppc_altivec = 1;
232 		snprintf(cpu_model, sizeof(cpu_model), "7400");
233 		break;
234 	case PPC_CPU_MPC7447A:
235 		ppc_altivec = 1;
236 		snprintf(cpu_model, sizeof(cpu_model), "7447A");
237 		break;
238 	case PPC_CPU_MPC7448:
239 		ppc_altivec = 1;
240 		snprintf(cpu_model, sizeof(cpu_model), "7448");
241 		break;
242 	case PPC_CPU_IBM970:
243 		ppc_altivec = 1;
244 		snprintf(cpu_model, sizeof(cpu_model), "970");
245 		break;
246 	case PPC_CPU_IBM970FX:
247 		ppc_altivec = 1;
248 		snprintf(cpu_model, sizeof(cpu_model), "970FX");
249 		break;
250 	case PPC_CPU_IBM970MP:
251 		ppc_altivec = 1;
252 		snprintf(cpu_model, sizeof(cpu_model), "970MP");
253 		break;
254 	case PPC_CPU_IBM750FX:
255 		snprintf(cpu_model, sizeof(cpu_model), "750FX");
256 		break;
257 	case PPC_CPU_MPC7410:
258 		ppc_altivec = 1;
259 		snprintf(cpu_model, sizeof(cpu_model), "7410");
260 		break;
261 	case PPC_CPU_MPC7450:
262 		ppc_altivec = 1;
263 		if ((pvr & 0xf) < 3)
264 			snprintf(cpu_model, sizeof(cpu_model), "7450");
265 		 else
266 			snprintf(cpu_model, sizeof(cpu_model), "7451");
267 		break;
268 	case PPC_CPU_MPC7455:
269 		ppc_altivec = 1;
270 		snprintf(cpu_model, sizeof(cpu_model), "7455");
271 		break;
272 	case PPC_CPU_MPC7457:
273 		ppc_altivec = 1;
274 		snprintf(cpu_model, sizeof(cpu_model), "7457");
275 		break;
276 	default:
277 		snprintf(cpu_model, sizeof(cpu_model), "Version %x", cpu);
278 		break;
279 	}
280 #ifndef ALTIVEC			/* altivec support absent from kernel */
281 	ppc_altivec = 0;
282 #endif
283 	if (ppc_altivec)
284 		hwcap |= PPC_FEATURE_HAS_ALTIVEC;
285 
286 	snprintf(cpu_model + strlen(cpu_model),
287 	    sizeof(cpu_model) - strlen(cpu_model),
288 	    " (Revision 0x%x)", pvr & 0xffff);
289 	printf(": %s", cpu_model);
290 
291 	for (qhandle = OF_peer(0); qhandle; qhandle = phandle) {
292                 len = OF_getprop(qhandle, "device_type", name, sizeof(name));
293                 if (len >= 0 && strcmp(name, "cpu") == 0) {
294 			OF_getprop(qhandle, "clock-frequency", &clock_freq,
295 			    sizeof(clock_freq));
296 			OF_getprop(qhandle, "timebase-frequency", &timebase,
297 			    sizeof(timebase));
298 			break;
299 		}
300                 if ((phandle = OF_child(qhandle)))
301                         continue;
302                 while (qhandle) {
303                         if ((phandle = OF_peer(qhandle)))
304                                 break;
305                         qhandle = OF_parent(qhandle);
306                 }
307 	}
308 
309 	if (timebase != 0) {
310 		ticks_per_sec = timebase;
311 		ns_per_tick = 1000000000 / ticks_per_sec;
312 	}
313 
314 
315 	if (clock_freq != 0) {
316 		/* Openfirmware stores clock in Hz, not MHz */
317 		clock_freq /= 1000000;
318 		printf(": %d MHz", clock_freq);
319 		ppc_curfreq = ppc_maxfreq = clock_freq;
320 		cpu_cpuspeed = ppc_cpuspeed;
321 	}
322 
323 	if (cpu == PPC_CPU_IBM970FX) {
324 		u_int64_t psr;
325 		int s;
326 
327 		s = ppc_intr_disable();
328 		ppc64_mtscomc((SCOM_PSR << SCOMC_ADDR_SHIFT) | SCOMC_READ);
329 		psr = ppc64_mfscomd();
330 		ppc64_mfscomc();
331 		ppc_intr_enable(s);
332 
333 		if ((psr & PSR_FREQ_MASK) == PSR_FREQ_HALF) {
334 			ppc_curfreq = ppc_maxfreq / 2;
335 			perflevel = 50;
336 		}
337 
338 		if (OF_getprop(qhandle, "power-mode-data",
339 		    &ppc_power_mode_data, sizeof ppc_power_mode_data) >= 8) {
340 			task_set(&ppc64_setperf_task, ppc64_do_setperf, NULL);
341 			cpu_setperf = ppc64_setperf;
342 		}
343 	}
344 
345 	/* power savings mode */
346 	hid0 = ppc_mfhid0();
347 
348 	switch (cpu) {
349 	case PPC_CPU_MPC603:
350 	case PPC_CPU_MPC603e:
351 	case PPC_CPU_MPC750:
352 	case PPC_CPU_MPC7400:
353 	case PPC_CPU_IBM750FX:
354 	case PPC_CPU_MPC7410:
355 		/* select DOZE mode */
356 		hid0 &= ~(HID0_NAP | HID0_SLEEP);
357 		hid0 |= HID0_DOZE | HID0_DPM;
358 		ppc_cpuidle = 1;
359 		break;
360 	case PPC_CPU_MPC7447A:
361 	case PPC_CPU_MPC7448:
362 	case PPC_CPU_MPC7450:
363 	case PPC_CPU_MPC7455:
364 	case PPC_CPU_MPC7457:
365 		/* select NAP mode */
366 		hid0 &= ~(HID0_DOZE | HID0_SLEEP);
367 		hid0 |= HID0_NAP | HID0_DPM;
368 		/* try some other flags */
369 		hid0 |= HID0_SGE | HID0_BTIC;
370 		hid0 |= HID0_LRSTK | HID0_FOLD | HID0_BHT;
371 		/* Disable BTIC on 7450 Rev 2.0 or earlier */
372 		if (cpu == PPC_CPU_MPC7450 && (pvr & 0xffff) < 0x0200)
373 			hid0 &= ~HID0_BTIC;
374 		ppc_cpuidle = 1;
375 		break;
376 	case PPC_CPU_IBM970:
377 	case PPC_CPU_IBM970FX:
378 		/* select NAP mode */
379 		hid0 &= ~(HID0_DOZE | HID0_DEEPNAP);
380 		hid0 |= HID0_NAP | HID0_DPM;
381 		ppc_cpuidle = 1;
382 		break;
383 	case PPC_CPU_IBM970MP:
384 		/* select DEEPNAP mode, which requires NAP */
385 		hid0 &= ~HID0_DOZE;
386 		hid0 |= HID0_DEEPNAP | HID0_NAP | HID0_DPM;
387 		ppc_cpuidle = 1;
388 		break;
389 	}
390 	ppc_mthid0(hid0);
391 
392 	/* if processor is G3 or G4, configure L2 cache */
393 	switch (cpu) {
394 	case PPC_CPU_MPC750:
395 	case PPC_CPU_MPC7400:
396 	case PPC_CPU_IBM750FX:
397 	case PPC_CPU_MPC7410:
398 	case PPC_CPU_MPC7447A:
399 	case PPC_CPU_MPC7448:
400 	case PPC_CPU_MPC7450:
401 	case PPC_CPU_MPC7455:
402 	case PPC_CPU_MPC7457:
403 		config_l2cr(cpu);
404 		break;
405 	}
406 	printf("\n");
407 }
408 
409 /* L2CR bit definitions */
410 #define L2CR_L2E        0x80000000 /* 0: L2 enable */
411 #define L2CR_L2PE       0x40000000 /* 1: L2 data parity enable */
412 #define L2CR_L2SIZ      0x30000000 /* 2-3: L2 size */
413 #define  L2SIZ_RESERVED         0x00000000
414 #define  L2SIZ_256K             0x10000000
415 #define  L2SIZ_512K             0x20000000
416 #define  L2SIZ_1M       0x30000000
417 #define L2CR_L2CLK      0x0e000000 /* 4-6: L2 clock ratio */
418 #define  L2CLK_DIS              0x00000000 /* disable L2 clock */
419 #define  L2CLK_10               0x02000000 /* core clock / 1   */
420 #define  L2CLK_15               0x04000000 /*            / 1.5 */
421 #define  L2CLK_20               0x08000000 /*            / 2   */
422 #define  L2CLK_25               0x0a000000 /*            / 2.5 */
423 #define  L2CLK_30               0x0c000000 /*            / 3   */
424 #define L2CR_L2RAM      0x01800000 /* 7-8: L2 RAM type */
425 #define  L2RAM_FLOWTHRU_BURST   0x00000000
426 #define  L2RAM_PIPELINE_BURST   0x01000000
427 #define  L2RAM_PIPELINE_LATE    0x01800000
428 #define L2CR_L2DO       0x00400000 /* 9: L2 data-only.
429                                       Setting this bit disables instruction
430                                       caching. */
431 #define L2CR_L2I        0x00200000 /* 10: L2 global invalidate. */
432 #define L2CR_L2CTL      0x00100000 /* 11: L2 RAM control (ZZ enable).
433                                       Enables automatic operation of the
434                                       L2ZZ (low-power mode) signal. */
435 #define L2CR_L2WT       0x00080000 /* 12: L2 write-through. */
436 #define L2CR_L2TS       0x00040000 /* 13: L2 test support. */
437 #define L2CR_L2OH       0x00030000 /* 14-15: L2 output hold. */
438 #define L2CR_L2SL       0x00008000 /* 16: L2 DLL slow. */
439 #define L2CR_L2DF       0x00004000 /* 17: L2 differential clock. */
440 #define L2CR_L2BYP      0x00002000 /* 18: L2 DLL bypass. */
441 #define L2CR_L2IP       0x00000001 /* 31: L2 global invalidate in progress
442 				       (read only). */
443 #ifdef L2CR_CONFIG
444 u_int l2cr_config = L2CR_CONFIG;
445 #else
446 u_int l2cr_config = 0;
447 #endif
448 
449 /* L3CR bit definitions */
450 #define   L3CR_L3E                0x80000000 /*  0: L3 enable */
451 #define   L3CR_L3SIZ              0x10000000 /*  3: L3 size (0=1MB, 1=2MB) */
452 
453 void
config_l2cr(int cpu)454 config_l2cr(int cpu)
455 {
456 	u_int l2cr, x;
457 
458 	l2cr = ppc_mfl2cr();
459 
460 	/*
461 	 * Configure L2 cache if not enabled.
462 	 */
463 	if ((l2cr & L2CR_L2E) == 0 && l2cr_config != 0) {
464 		l2cr = l2cr_config;
465 		ppc_mtl2cr(l2cr);
466 
467 		/* Wait for L2 clock to be stable (640 L2 clocks). */
468 		delay(100);
469 
470 		/* Invalidate all L2 contents. */
471 		l2cr |= L2CR_L2I;
472 		ppc_mtl2cr(l2cr);
473 		do {
474 			x = ppc_mfl2cr();
475 		} while (x & L2CR_L2IP);
476 
477 		/* Enable L2 cache. */
478 		l2cr &= ~L2CR_L2I;
479 		l2cr |= L2CR_L2E;
480 		ppc_mtl2cr(l2cr);
481 	}
482 
483 	if (l2cr & L2CR_L2E) {
484 		if (cpu == PPC_CPU_MPC7450 || cpu == PPC_CPU_MPC7455) {
485 			u_int l3cr;
486 
487 			printf(": 256KB L2 cache");
488 
489 			l3cr = ppc_mfl3cr();
490 			if (l3cr & L3CR_L3E)
491 				printf(", %cMB L3 cache",
492 				    l3cr & L3CR_L3SIZ ? '2' : '1');
493 		} else if (cpu == PPC_CPU_IBM750FX ||
494 			   cpu == PPC_CPU_MPC7447A || cpu == PPC_CPU_MPC7457)
495 			printf(": 512KB L2 cache");
496 		else if (cpu == PPC_CPU_MPC7448)
497 			printf(": 1MB L2 cache");
498 		else {
499 			switch (l2cr & L2CR_L2SIZ) {
500 			case L2SIZ_256K:
501 				printf(": 256KB");
502 				break;
503 			case L2SIZ_512K:
504 				printf(": 512KB");
505 				break;
506 			case L2SIZ_1M:
507 				printf(": 1MB");
508 				break;
509 			default:
510 				printf(": unknown size");
511 			}
512 			printf(" backside cache");
513 		}
514 #if 0
515 		switch (l2cr & L2CR_L2RAM) {
516 		case L2RAM_FLOWTHRU_BURST:
517 			printf(" Flow-through synchronous burst SRAM");
518 			break;
519 		case L2RAM_PIPELINE_BURST:
520 			printf(" Pipelined synchronous burst SRAM");
521 			break;
522 		case L2RAM_PIPELINE_LATE:
523 			printf(" Pipelined synchronous late-write SRAM");
524 			break;
525 		default:
526 			printf(" unknown type");
527 		}
528 
529 		if (l2cr & L2CR_L2PE)
530 			printf(" with parity");
531 #endif
532 	} else
533 		printf(": L2 cache not enabled");
534 }
535 
536 #ifdef MULTIPROCESSOR
537 
538 #define	INTSTK	(8*1024)		/* 8K interrupt stack */
539 
540 int cpu_spinup(struct device *, struct cpu_info *);
541 void cpu_hatch(void);
542 void cpu_spinup_trampoline(void);
543 
544 struct cpu_hatch_data {
545 	uint64_t tb;
546 	struct cpu_info *ci;
547 	uint32_t hid0;
548 	uint64_t hid1;
549 	uint64_t hid4;
550 	uint64_t hid5;
551 	int l2cr;
552 	int running;
553 };
554 
555 volatile struct cpu_hatch_data *cpu_hatch_data;
556 volatile void *cpu_hatch_stack;
557 
558 /*
559  * XXX Due to a bug in our OpenFirmware interface/memory mapping,
560  * machines with 64bit CPUs hang in the OF_finddevice() call below
561  * if this array is stored on the stack.
562  */
563 char cpuname[64];
564 
565 int
cpu_spinup(struct device * self,struct cpu_info * ci)566 cpu_spinup(struct device *self, struct cpu_info *ci)
567 {
568 	volatile struct cpu_hatch_data hatch_data, *h = &hatch_data;
569 	int i;
570 	struct pglist mlist;
571 	struct vm_page *m;
572 	int error;
573 	int size = 0;
574 	char *cp;
575 	u_char *reset_cpu;
576 	u_int node;
577 
578         /*
579          * Allocate some contiguous pages for the interrupt stack
580          * from the lowest 256MB (because bat0 always maps it va == pa).
581          */
582         size += INTSTK;
583         size += 8192;   /* SPILLSTK(1k) + DDBSTK(7k) */
584 
585 	TAILQ_INIT(&mlist);
586 	error = uvm_pglistalloc(size, 0x0, 0x10000000 - 1, 0, 0,
587 	    &mlist, 1, UVM_PLA_WAITOK);
588 	if (error) {
589 		printf(": unable to allocate idle stack\n");
590 		return -1;
591 	}
592 
593 	m = TAILQ_FIRST(&mlist);
594 	cp = (char *)VM_PAGE_TO_PHYS(m);
595 	bzero(cp, size);
596 
597 	ci->ci_intstk = cp + INTSTK;
598 	cpu_hatch_stack = ci->ci_intstk - sizeof(struct trapframe);
599 
600 	h->ci = ci;
601 	h->running = 0;
602 	h->hid0 = ppc_mfhid0();
603 	if (ppc_proc_is_64b) {
604 		h->hid1 = ppc64_mfhid1();
605 		h->hid4 = ppc64_mfhid4();
606 		h->hid5 = ppc64_mfhid5();
607 	} else {
608 		h->l2cr = ppc_mfl2cr();
609 	}
610 	cpu_hatch_data = h;
611 
612 	__asm volatile ("sync; isync");
613 
614 	/* XXX OpenPIC */
615 	{
616 		int off;
617 
618 		*(u_int *)EXC_RST = 0x48000002 | (u_int)cpu_spinup_trampoline;
619 		syncicache((void *)EXC_RST, 0x100);
620 
621 		h->running = -1;
622 
623 		snprintf(cpuname, sizeof(cpuname), "/cpus/@%x", ci->ci_cpuid);
624 		node = OF_finddevice(cpuname);
625 		if (node == -1) {
626 			printf(": unable to locate OF node %s\n", cpuname);
627 			return  -1;
628 		}
629 		if (OF_getprop(node, "soft-reset", &off, 4) == 4) {
630 			reset_cpu = mapiodev(0x80000000 + off, 1);
631 			*reset_cpu = 0x4;
632 			__asm volatile ("eieio" ::: "memory");
633 			*reset_cpu = 0x0;
634 			__asm volatile ("eieio" ::: "memory");
635 		} else {
636 			/* Start secondary CPU. */
637 			reset_cpu = mapiodev(0x80000000 + 0x5c, 1);
638 			*reset_cpu = 0x4;
639 			__asm volatile ("eieio" ::: "memory");
640 			*reset_cpu = 0x0;
641 			__asm volatile ("eieio" ::: "memory");
642 		}
643 
644 		/* Sync timebase. */
645 		h->tb = ppc_mftb() + 100000;	/* 3ms @ 33MHz  */
646 
647 		while (h->tb > ppc_mftb())
648 			;
649                 __asm volatile ("sync; isync");
650                 h->running = 0;
651 
652                 delay(500000);
653 	}
654 
655 
656 	for (i = 0; i < 0x3fffffff; i++)
657 		if (h->running) {
658 			break;
659 		}
660 
661 	return 0;
662 }
663 
664 volatile static int start_secondary_cpu;
665 
666 void
cpu_boot_secondary_processors(void)667 cpu_boot_secondary_processors(void)
668 {
669 	struct cpu_info *ci;
670 	int i;
671 
672 	for (i = 0; i < PPC_MAXPROCS; i++) {
673 		ci = &cpu_info[i];
674 		if (ci->ci_cpuid == 0)
675 			continue;
676 		ci->ci_randseed = (arc4random() & 0x7fffffff) + 1;
677 
678 		clockqueue_init(&ci->ci_queue);
679 		sched_init_cpu(ci);
680 
681 		cpu_spinup(NULL, ci);
682 	}
683 
684 	start_secondary_cpu = 1;
685 	__asm volatile ("sync");
686 }
687 
688 void cpu_startclock(void);
689 
690 void
cpu_hatch(void)691 cpu_hatch(void)
692 {
693 	volatile struct cpu_hatch_data *h = cpu_hatch_data;
694 	int intrstate;
695 
696         /* Initialize timebase. */
697 	ppc_mttb(0);
698 
699 	/* Initialize curcpu(). */
700 	ppc_mtsprg0((u_int)h->ci);
701 
702 	ppc_mtibat0u(0);
703 	ppc_mtibat1u(0);
704 	ppc_mtibat2u(0);
705 	ppc_mtibat3u(0);
706 	ppc_mtdbat0u(0);
707 	ppc_mtdbat1u(0);
708 	ppc_mtdbat2u(0);
709 	ppc_mtdbat3u(0);
710 
711 	if (ppc_proc_is_64b) {
712 		/*
713 		 * The Hardware Interrupt Offset Register should be
714 		 * cleared after initialization.
715 		 */
716 		ppc_mthior(0);
717 		__asm volatile ("sync");
718 
719 		ppc_mthid0(h->hid0);
720 		ppc64_mthid1(h->hid1);
721 		ppc64_mthid4(h->hid4);
722 		ppc64_mthid5(h->hid5);
723 	} else if (h->l2cr != 0) {
724 		u_int x;
725 
726 		ppc_mthid0(h->hid0);
727 		ppc_mtl2cr(h->l2cr & ~L2CR_L2E);
728 
729 		/* Wait for L2 clock to be stable (640 L2 clocks). */
730 		delay(100);
731 
732 		/* Invalidate all L2 contents. */
733 		ppc_mtl2cr((h->l2cr & ~L2CR_L2E)|L2CR_L2I);
734 		do {
735 			x = ppc_mfl2cr();
736 		} while (x & L2CR_L2IP);
737 
738 		ppc_mtl2cr(h->l2cr);
739 	}
740 
741 	/*
742 	 * Now enable translation (and machine checks/recoverable interrupts).
743 	 */
744 	pmap_enable_mmu();
745 
746 	/* XXX OpenPIC */
747 	{
748 		/* Sync timebase. */
749 		while (h->running == -1)
750 			;
751                 __asm volatile ("sync; isync");
752                 ppc_mttb(h->tb);
753 	}
754 
755 	ncpus++;
756 	h->running = 1;
757 	__asm volatile ("eieio" ::: "memory");
758 
759 	while (start_secondary_cpu == 0)
760 		;
761 
762 	__asm volatile ("sync; isync");
763 
764 	curcpu()->ci_ipending = 0;
765 	curcpu()->ci_cpl = 0;
766 
767 	intrstate = ppc_intr_disable();
768 	cpu_startclock();
769 	ppc_intr_enable(intrstate);
770 
771 	/* Enable inter-processor interrupts. */
772 	openpic_set_priority(curcpu()->ci_cpuid, 14);
773 
774 	sched_toidle();
775 }
776 #endif
777