xref: /netbsd/sys/arch/macppc/macppc/cpu.c (revision c4a72b64)
1 /*	$NetBSD: cpu.c,v 1.30 2002/10/02 05:30:45 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 Tsubai Masanari.
5  * Copyright (c) 1998, 1999, 2001 Internet Research Institute, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by
19  *	Internet Research Institute, Inc.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "opt_l2cr_config.h"
36 #include "opt_multiprocessor.h"
37 #include "opt_altivec.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/device.h>
42 
43 #include <uvm/uvm_extern.h>
44 #include <dev/ofw/openfirm.h>
45 #include <powerpc/mpc6xx/hid.h>
46 #include <powerpc/openpic.h>
47 #include <powerpc/atomic.h>
48 #include <powerpc/spr.h>
49 #ifdef ALTIVEC
50 #include <powerpc/altivec.h>
51 #endif
52 
53 #include <machine/autoconf.h>
54 #include <machine/bat.h>
55 #include <machine/fpu.h>
56 #include <machine/pcb.h>
57 #include <machine/pio.h>
58 #include <machine/trap.h>
59 
60 int cpumatch(struct device *, struct cfdata *, void *);
61 void cpuattach(struct device *, struct device *, void *);
62 
63 void identifycpu(char *);
64 static void ohare_init(void);
65 int cpu_spinup(struct device *, struct cpu_info *);
66 void cpu_hatch(void);
67 void cpu_spinup_trampoline(void);
68 int cpuintr(void *);
69 
70 CFATTACH_DECL(cpu, sizeof(struct device),
71     cpumatch, cpuattach, NULL, NULL);
72 
73 extern struct cfdriver cpu_cd;
74 
75 #define HH_ARBCONF		0xf8000090
76 #define HH_INTR_SECONDARY	0xf80000c0
77 #define HH_INTR_PRIMARY		0xf3019000
78 #define GC_IPI_IRQ		30
79 
80 int
81 cpumatch(parent, cf, aux)
82 	struct device *parent;
83 	struct cfdata *cf;
84 	void *aux;
85 {
86 	struct confargs *ca = aux;
87 	int *reg = ca->ca_reg;
88 	int node;
89 
90 	if (strcmp(ca->ca_name, cpu_cd.cd_name) != 0)
91 		return 0;
92 
93 	node = OF_finddevice("/cpus");
94 	if (node != -1) {
95 		for (node = OF_child(node); node != 0; node = OF_peer(node)) {
96 			uint32_t cpunum;
97 			int l;
98 			l = OF_getprop(node, "reg", &cpunum, sizeof(cpunum));
99 			if (l == 4 && reg[0] == cpunum)
100 				return 1;
101 		}
102 	}
103 	switch (reg[0]) {
104 	case 0:	/* primary CPU */
105 		return 1;
106 	case 1:	/* secondary CPU */
107 		if (OF_finddevice("/hammerhead") != -1)
108 			if (in32rb(HH_ARBCONF) & 0x02)
109 				return 1;
110 		break;
111 	}
112 
113 	return 0;
114 }
115 
116 void
117 cpuattach(parent, self, aux)
118 	struct device *parent, *self;
119 	void *aux;
120 {
121 	struct cpu_info *ci;
122 	struct confargs *ca = aux;
123 	int id = ca->ca_reg[0];
124 
125 	ci = cpu_attach_common(self, id);
126 	if (ci == NULL)
127 		return;
128 
129 	if (id > 0) {
130 #ifdef MULTIPROCESSOR
131 		cpu_spinup(self, ci);
132 #endif
133 		return;
134 	}
135 
136 	if (OF_finddevice("/bandit/ohare") != -1) {
137 		printf("%s", self->dv_xname);
138 		ohare_init();
139 	}
140 }
141 
142 #define CACHE_REG 0xf8000000
143 
144 void
145 ohare_init()
146 {
147 	u_int *cache_reg, x;
148 
149 	/* enable L2 cache */
150 	cache_reg = mapiodev(CACHE_REG, NBPG);
151 	if (((cache_reg[2] >> 24) & 0x0f) >= 3) {
152 		x = cache_reg[4];
153 		if ((x & 0x10) == 0)
154                 	x |= 0x04000000;
155 		else
156                 	x |= 0x04000020;
157 
158 		cache_reg[4] = x;
159 		printf(": ohare L2 cache enabled\n");
160 	}
161 }
162 
163 #ifdef MULTIPROCESSOR
164 
165 struct cpu_hatch_data {
166 	struct device *self;
167 	struct cpu_info *ci;
168 	int running;
169 	int pir;
170 	int hid0;
171 	int sdr1;
172 	int sr[16];
173 	int tbu, tbl;
174 };
175 
176 volatile struct cpu_hatch_data *cpu_hatch_data;
177 volatile int cpu_hatch_stack;
178 
179 int
180 cpu_spinup(self, ci)
181 	struct device *self;
182 	struct cpu_info *ci;
183 {
184 	volatile struct cpu_hatch_data hatch_data, *h = &hatch_data;
185 	int i;
186 	struct pcb *pcb;
187 	struct pglist mlist;
188 	int pvr, vers;
189 	int error;
190 	int size = 0;
191 	char *cp;
192 
193 	pvr = mfpvr();
194 	vers = pvr >> 16;
195 
196 	/*
197 	 * Allocate some contiguous pages for the idle PCB and stack
198 	 * from the lowest 256MB (because bat0 always maps it va == pa).
199 	 */
200 	size += USPACE;
201 	size += 8192;	/* INTSTK */
202 	size += 4096;	/* SPILLSTK */
203 
204 	error = uvm_pglistalloc(size, 0x0, 0x10000000, 0, 0, &mlist, 1, 1);
205 	if (error) {
206 		printf(": unable to allocate idle stack\n");
207 		return -1;
208 	}
209 
210 	cp = (void *)VM_PAGE_TO_PHYS(TAILQ_FIRST(&mlist));
211 	memset(cp, 0, size);
212 
213 	pcb = (struct pcb *)cp;
214 	cp += USPACE;
215 	cpu_info[1].ci_idle_pcb = pcb;
216 
217 	cpu_info[1].ci_intstk = cp + 8192;
218 	cp += 8192;
219 	cpu_info[1].ci_spillstk = cp + 4096;
220 	cp += 4096;
221 
222 	/*
223 	 * Initialize the idle stack pointer, reserving space for an
224 	 * (empty) trapframe (XXX is the trapframe really necessary?)
225 	 */
226 	pcb->pcb_sp = (paddr_t)pcb + USPACE - sizeof(struct trapframe);
227 
228 	cpu_hatch_data = h;
229 	h->running = 0;
230 	h->self = self;
231 	h->ci = ci;
232 	h->pir = 1;
233 	cpu_hatch_stack = pcb->pcb_sp;
234 	cpu_info[1].ci_lasttb = cpu_info[0].ci_lasttb;
235 
236 	/* copy special registers */
237 	asm volatile ("mfspr %0,%1" : "=r"(h->hid0) : "n"(SPR_HID0));
238 	asm volatile ("mfsdr1 %0" : "=r"(h->sdr1));
239 	for (i = 0; i < 16; i++)
240 		asm ("mfsrin %0,%1" : "=r"(h->sr[i]) : "r"(i << ADDR_SR_SHFT));
241 
242 	asm volatile ("sync; isync");
243 
244 	if (openpic_base) {
245 		u_int kl_base = 0x80000000;	/* XXX */
246 		u_int gpio = kl_base + 0x5c;	/* XXX */
247 		uint64_t tb;
248 
249 		*(u_int *)EXC_RST =		/* ba cpu_spinup_trampoline */
250 		    0x48000002 | (u_int)cpu_spinup_trampoline;
251 		__syncicache((void *)EXC_RST, 0x100);
252 
253 		h->running = -1;
254 
255 		/* Start secondary cpu. */
256 		out8(gpio, 4);
257 		out8(gpio, 5);
258 
259 		/* Sync timebase. */
260 		tb = mftb();
261 		tb += 100000;  /* 3ms @ 33MHz */
262 
263 		h->tbu = tb >> 32;
264 		h->tbl = tb & 0xffffffff;
265 
266 		while (tb > mftb())
267 			;
268 
269 		asm volatile ("sync; isync");
270 		h->running = 0;
271 
272 		delay(500000);
273 	} else {
274 		/* Start secondary cpu and stop timebase. */
275 		out32(0xf2800000, (int)cpu_spinup_trampoline);
276 		out32(HH_INTR_SECONDARY, ~0);
277 		out32(HH_INTR_SECONDARY, 0);
278 
279 		/* sync timebase (XXX shouldn't be zero'ed) */
280 		asm volatile ("mttbl %0; mttbu %0; mttbl %0" :: "r"(0));
281 
282 		/*
283 		 * wait for secondary spin up (1.5ms @ 604/200MHz)
284 		 * XXX we cannot use delay() here because timebase is not
285 		 * running.
286 		 */
287 		for (i = 0; i < 100000; i++)
288 			if (h->running)
289 				break;
290 
291 		/* Start timebase. */
292 		out32(0xf2800000, 0x100);
293 		out32(HH_INTR_SECONDARY, ~0);
294 		out32(HH_INTR_SECONDARY, 0);
295 	}
296 
297 	delay(100000);		/* wait for secondary printf */
298 
299 	if (h->running == 0) {
300 		printf(": secondary cpu didn't start\n");
301 		return -1;
302 	}
303 
304 	if (!openpic_base) {
305 		/* Register IPI. */
306 		intr_establish(GC_IPI_IRQ, IST_LEVEL, IPL_HIGH, cpuintr, NULL);
307 	}
308 
309 	return 0;
310 }
311 
312 volatile static int start_secondary_cpu;
313 extern long ticks_per_intr;
314 
315 void
316 cpu_hatch()
317 {
318 	volatile struct cpu_hatch_data *h = cpu_hatch_data;
319 	u_int msr;
320 	int i;
321 
322 	/* Initialize timebase. */
323 	asm ("mttbl %0; mttbu %0; mttbl %0" :: "r"(0));
324 
325 	/* Set PIR (Processor Identification Register).  i.e. whoami */
326 	asm volatile ("mtspr 1023,%0" :: "r"(h->pir));
327 	asm volatile ("mtsprg 0,%0" :: "r"(h->ci));
328 
329 	/* Initialize MMU. */
330 	asm ("mtibatu 0,%0" :: "r"(0));
331 	asm ("mtibatu 1,%0" :: "r"(0));
332 	asm ("mtibatu 2,%0" :: "r"(0));
333 	asm ("mtibatu 3,%0" :: "r"(0));
334 	asm ("mtdbatu 0,%0" :: "r"(0));
335 	asm ("mtdbatu 1,%0" :: "r"(0));
336 	asm ("mtdbatu 2,%0" :: "r"(0));
337 	asm ("mtdbatu 3,%0" :: "r"(0));
338 
339 	asm ("mtspr %1,%0" :: "r"(h->hid0), "n"(SPR_HID0));
340 
341 	asm ("mtibatl 0,%0; mtibatu 0,%1;"
342 	     "mtdbatl 0,%0; mtdbatu 0,%1;"
343 		:: "r"(battable[0].batl), "r"(battable[0].batu));
344 
345 	if (openpic_base) {
346 		asm ("mtibatl 1,%0; mtibatu 1,%1;"
347 		     "mtdbatl 1,%0; mtdbatu 1,%1;"
348 			:: "r"(battable[0x8].batl), "r"(battable[0x8].batu));
349 	} else {
350 		asm ("mtibatl 1,%0; mtibatu 1,%1;"
351 		     "mtdbatl 1,%0; mtdbatu 1,%1;"
352 			:: "r"(battable[0xf].batl), "r"(battable[0xf].batu));
353 	}
354 
355 	for (i = 0; i < 16; i++)
356 		asm ("mtsrin %0,%1" :: "r"(h->sr[i]), "r"(i << ADDR_SR_SHFT));
357 	asm ("mtsdr1 %0" :: "r"(h->sdr1));
358 
359 	asm volatile ("isync");
360 
361 	/* Enable I/D address translations. */
362 	asm volatile ("mfmsr %0" : "=r"(msr));
363 	msr |= PSL_IR|PSL_DR|PSL_ME|PSL_RI;
364 	asm volatile ("mtmsr %0" :: "r"(msr));
365 
366 	asm volatile ("sync; isync");
367 
368 	if (openpic_base) {
369 		/* Sync timebase. */
370 		u_int tbu = h->tbu;
371 		u_int tbl = h->tbl;
372 		while (h->running == -1)
373 			;
374 		asm volatile ("sync; isync");
375 		asm volatile ("mttbl %0" :: "r"(0));
376 		asm volatile ("mttbu %0" :: "r"(tbu));
377 		asm volatile ("mttbl %0" :: "r"(tbl));
378 	}
379 
380 	cpu_setup(h->self, h->ci);
381 
382 	h->running = 1;
383 	asm volatile ("sync; isync");
384 
385 	while (start_secondary_cpu == 0)
386 		;
387 
388 	asm volatile ("sync; isync");
389 
390 	printf("cpu%d: started\n", cpu_number());
391 	asm volatile ("mtdec %0" :: "r"(ticks_per_intr));
392 
393 	if (openpic_base)
394 		openpic_set_priority(cpu_number(), 0);
395 	else
396 		out32(HH_INTR_SECONDARY, ~0);	/* Reset interrupt. */
397 
398 	curcpu()->ci_ipending = 0;
399 	curcpu()->ci_cpl = 0;
400 }
401 
402 void
403 cpu_boot_secondary_processors()
404 {
405 
406 	start_secondary_cpu = 1;
407 	asm volatile ("sync");
408 }
409 
410 static volatile u_long IPI[CPU_MAXNUM];
411 
412 void
413 macppc_send_ipi(ci, mesg)
414 	volatile struct cpu_info *ci;
415 	u_long mesg;
416 {
417 	int cpu_id = ci->ci_cpuid;
418 
419 	/* printf("send_ipi(%d, 0x%lx)\n", cpu_id, mesg); */
420 	atomic_setbits_ulong(&IPI[cpu_id], mesg);
421 
422 	if (openpic_base) {
423 		openpic_write(OPENPIC_IPI(cpu_number(), 1), 1 << cpu_id);
424 	} else {
425 		switch (cpu_id) {
426 		case 0:
427 			in32(HH_INTR_PRIMARY);
428 			break;
429 		case 1:
430 			out32(HH_INTR_SECONDARY, ~0);
431 			out32(HH_INTR_SECONDARY, 0);
432 			break;
433 		}
434 	}
435 }
436 
437 /*
438  * Process IPIs.  External interrupts are blocked.
439  */
440 int
441 cpuintr(v)
442 	void *v;
443 {
444 	int cpu_id = cpu_number();
445 	int msr;
446 	u_long ipi;
447 
448 	/* printf("cpuintr{%d}\n", cpu_id); */
449 
450 	ipi = atomic_loadlatch_ulong(&IPI[cpu_id], 0);
451 	if (ipi & MACPPC_IPI_FLUSH_FPU) {
452 		save_fpu_cpu();
453 	}
454 #ifdef ALTIVEC
455 	if (ipi & MACPPC_IPI_FLUSH_VEC) {
456 		save_vec_cpu();
457 	}
458 #endif
459 	if (ipi & MACPPC_IPI_HALT) {
460 		printf("halt{%d}\n", cpu_id);
461 		msr = (mfmsr() & ~PSL_EE) | PSL_POW;
462 		for (;;) {
463 			asm volatile ("sync; isync");
464 			mtmsr(msr);
465 		}
466 	}
467 	return 1;
468 }
469 #endif /* MULTIPROCESSOR */
470