xref: /openbsd/sys/arch/octeon/dev/octcit.c (revision 4cfece93)
1 /*	$OpenBSD: octcit.c,v 1.12 2019/09/01 12:16:01 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 2017, 2019 Visa Hankala
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for OCTEON Central Interrupt Unit version 3 (CIU3).
21  *
22  * CIU3 is present on CN72xx, CN73xx, CN77xx, and CN78xx.
23  */
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/atomic.h>
28 #include <sys/conf.h>
29 #include <sys/device.h>
30 #include <sys/evcount.h>
31 #include <sys/kernel.h>
32 #include <sys/malloc.h>
33 
34 #include <dev/ofw/fdt.h>
35 #include <dev/ofw/openfirm.h>
36 
37 #include <mips64/mips_cpu.h>
38 
39 #include <machine/autoconf.h>
40 #include <machine/fdt.h>
41 #include <machine/intr.h>
42 #include <machine/octeonreg.h>
43 
44 #define CIU3_IDT(core, ipl)		((core) * 4 + (ipl))
45 #define CIU3_IDT_CTL(idt)		((idt) *  8 + 0x110000u)
46 #define CIU3_IDT_PP(idt)		((idt) * 32 + 0x120000u)
47 #define CIU3_IDT_IO(idt)		((idt) *  8 + 0x130000u)
48 #define CIU3_DEST_PP_INT(core)		((core) * 8 + 0x200000u)
49 #define   CIU3_DEST_PP_INT_INTSN		0x000fffff00000000ull
50 #define   CIU3_DEST_PP_INT_INTSN_SHIFT		32
51 #define   CIU3_DEST_PP_INT_INTR			0x0000000000000001ull
52 #define CIU3_ISC_CTL(intsn)		((intsn) * 8 + 0x80000000u)
53 #define   CIU3_ISC_CTL_IDT			0x0000000000ff0000ull
54 #define   CIU3_ISC_CTL_IDT_SHIFT		16
55 #define   CIU3_ISC_CTL_IMP			0x0000000000008000ull
56 #define   CIU3_ISC_CTL_EN			0x0000000000000002ull
57 #define   CIU3_ISC_CTL_RAW			0x0000000000000001ull
58 #define CIU3_ISC_W1C(intsn)		((intsn) * 8 + 0x90000000u)
59 #define   CIU3_ISC_W1C_EN			0x0000000000000002ull
60 #define   CIU3_ISC_W1C_RAW			0x0000000000000001ull
61 #define CIU3_ISC_W1S(intsn)		((intsn) * 8 + 0xa0000000u)
62 #define   CIU3_ISC_W1S_EN			0x0000000000000002ull
63 #define   CIU3_ISC_W1S_RAW			0x0000000000000001ull
64 #define CIU3_NINTSN			(1u << 20)
65 
66 #define IS_MBOX(intsn)			(((intsn) >> 12) == 4)
67 #define MBOX_INTSN(core)		((core) + 0x4000u)
68 
69 #define CIU3_RD_8(sc, reg) \
70 	bus_space_read_8((sc)->sc_iot, (sc)->sc_ioh, (reg))
71 #define CIU3_WR_8(sc, reg, val) \
72 	bus_space_write_8((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
73 
74 #define INTPRI_CIU_0	(INTPRI_CLOCK + 1)
75 
76 #define HASH_SIZE			64
77 
78 struct octcit_intrhand {
79 	SLIST_ENTRY(octcit_intrhand)
80 				 ih_list;
81 	int			(*ih_func)(void *);
82 	void			*ih_arg;
83 	int			 ih_intsn;
84 	int			 ih_flags;
85 #define CIH_MPSAFE			0x01
86 #define CIH_EDGE			0x02	/* edge-triggered */
87 	int			 ih_level;
88 	struct evcount		 ih_count;
89 };
90 
91 struct octcit_softc {
92 	struct device		 sc_dev;
93 	bus_space_tag_t		 sc_iot;
94 	bus_space_handle_t	 sc_ioh;
95 
96 	SLIST_HEAD(, octcit_intrhand)
97 				 sc_handlers[HASH_SIZE];
98 	int			 sc_minipl[MAXCPUS];
99 	int			(*sc_ipi_handler)(void *);
100 
101 	struct intr_controller	 sc_ic;
102 };
103 
104 int	 octcit_match(struct device *, void *, void *);
105 void	 octcit_attach(struct device *, struct device *, void *);
106 
107 void	 octcit_init(void);
108 uint32_t octcit_intr(uint32_t, struct trapframe *);
109 void	*octcit_intr_establish(int, int, int (*)(void *), void *,
110 	    const char *);
111 void	*octcit_intr_establish_intsn(int, int, int, int (*)(void *),
112 	    void *, const char *);
113 void	*octcit_intr_establish_fdt_idx(void *, int, int, int,
114 	    int (*)(void *), void *, const char *);
115 void	 octcit_intr_disestablish(void *);
116 void	 octcit_intr_barrier(void *);
117 void	 octcit_splx(int);
118 
119 uint32_t octcit_ipi_intr(uint32_t, struct trapframe *);
120 int	 octcit_ipi_establish(int (*)(void *), cpuid_t);
121 void	 octcit_ipi_set(cpuid_t);
122 void	 octcit_ipi_clear(cpuid_t);
123 
124 const struct cfattach octcit_ca = {
125 	sizeof(struct octcit_softc), octcit_match, octcit_attach
126 };
127 
128 struct cfdriver octcit_cd = {
129 	NULL, "octcit", DV_DULL
130 };
131 
132 struct octcit_softc	*octcit_sc;
133 
134 int
135 octcit_match(struct device *parent, void *match, void *aux)
136 {
137 	struct fdt_attach_args *faa = aux;
138 
139 	return OF_is_compatible(faa->fa_node, "cavium,octeon-7890-ciu3");
140 }
141 
142 void
143 octcit_attach(struct device *parent, struct device *self, void *aux)
144 {
145 	struct fdt_attach_args *faa = aux;
146 	struct octcit_softc *sc = (struct octcit_softc *)self;
147 	uint64_t val;
148 	int hash, intsn;
149 
150 	if (faa->fa_nreg != 1) {
151 		printf(": expected one IO space, got %d\n", faa->fa_nreg);
152 		return;
153 	}
154 
155 	sc->sc_iot = faa->fa_iot;
156 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, faa->fa_reg[0].size,
157 	    0, &sc->sc_ioh)) {
158 		printf(": could not map IO space\n");
159 		return;
160 	}
161 
162 	for (hash = 0; hash < HASH_SIZE; hash++)
163 		SLIST_INIT(&sc->sc_handlers[hash]);
164 
165 	/* Disable all interrupts and acknowledge any pending ones. */
166 	for (intsn = 0; intsn < CIU3_NINTSN; intsn++) {
167 		val = CIU3_RD_8(sc, CIU3_ISC_CTL(intsn));
168 		if (ISSET(val, CIU3_ISC_CTL_IMP)) {
169 			CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_CTL_RAW);
170 			CIU3_WR_8(sc, CIU3_ISC_CTL(intsn), 0);
171 			(void)CIU3_RD_8(sc, CIU3_ISC_CTL(intsn));
172 		}
173 	}
174 
175 	printf("\n");
176 
177 	sc->sc_ic.ic_cookie = sc;
178 	sc->sc_ic.ic_node = faa->fa_node;
179 	sc->sc_ic.ic_init = octcit_init;
180 	sc->sc_ic.ic_establish = octcit_intr_establish;
181 	sc->sc_ic.ic_establish_fdt_idx = octcit_intr_establish_fdt_idx;
182 	sc->sc_ic.ic_disestablish = octcit_intr_disestablish;
183 	sc->sc_ic.ic_intr_barrier = octcit_intr_barrier;
184 #ifdef MULTIPROCESSOR
185 	sc->sc_ic.ic_ipi_establish = octcit_ipi_establish;
186 	sc->sc_ic.ic_ipi_set = octcit_ipi_set;
187 	sc->sc_ic.ic_ipi_clear = octcit_ipi_clear;
188 #endif
189 
190 	octcit_sc = sc;
191 
192 	set_intr(INTPRI_CIU_0, CR_INT_0, octcit_intr);
193 #ifdef MULTIPROCESSOR
194 	set_intr(INTPRI_IPI, CR_INT_1, octcit_ipi_intr);
195 #endif
196 
197 	octcit_init();
198 
199 	register_splx_handler(octcit_splx);
200 	octeon_intr_register(&sc->sc_ic);
201 }
202 
203 static inline int
204 intsn_hash(int intsn)
205 {
206 	int tmp;
207 
208 	tmp = intsn * 0xffb;
209 	return ((tmp >> 14) ^ tmp) & (HASH_SIZE - 1);
210 }
211 
212 void
213 octcit_init(void)
214 {
215 	struct cpu_info *ci = curcpu();
216 	struct octcit_softc *sc = octcit_sc;
217 	int core = ci->ci_cpuid;
218 
219 	sc->sc_minipl[ci->ci_cpuid] = IPL_HIGH;
220 
221 	/*
222 	 * Set up interrupt routing.
223 	 */
224 
225 	/* Route IP2. */
226 	CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core, 0)), 0);
227 	CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)), 1ul << core);
228 	CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 0)), 0);
229 
230 	/* Route IP3. */
231 	CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core , 1)), 1);
232 	CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 1)), 1ul << core);
233 	CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 1)), 0);
234 
235 	/* Disable IP4. */
236 	CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core, 2)), 0);
237 	CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 2)), 0);
238 	CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 2)), 0);
239 
240 	/* Disable IP5. */
241 	CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core, 3)), 0);
242 	CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 3)), 0);
243 	CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 3)), 0);
244 }
245 
246 void *
247 octcit_intr_establish(int irq, int level, int (*func)(void *), void *arg,
248     const char *name)
249 {
250 	return octcit_intr_establish_intsn(irq, level, CIH_EDGE, func, arg,
251 	    name);
252 }
253 
254 void *
255 octcit_intr_establish_intsn(int intsn, int level, int flags,
256     int (*func)(void *), void *arg, const char *name)
257 {
258 	struct cpu_info *ci = curcpu();
259 	struct octcit_intrhand *ih;
260 	struct octcit_softc *sc = octcit_sc;
261 	uint64_t val;
262 	int s;
263 
264 	if ((unsigned int)intsn > CIU3_NINTSN)
265 		panic("%s: illegal intsn 0x%x", __func__, intsn);
266 
267 	if (IS_MBOX(intsn))
268 		panic("%s: mbox intsn 0x%x not allowed", __func__, intsn);
269 
270 	if (ISSET(level, IPL_MPSAFE))
271 		flags |= CIH_MPSAFE;
272 	level &= ~IPL_MPSAFE;
273 
274 	ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
275 	if (ih == NULL)
276 		return NULL;
277 
278 	ih->ih_func = func;
279 	ih->ih_arg = arg;
280 	ih->ih_level = level;
281 	ih->ih_flags = flags;
282 	ih->ih_intsn = intsn;
283 	evcount_attach(&ih->ih_count, name, &ih->ih_intsn);
284 
285 	s = splhigh();
286 
287 	SLIST_INSERT_HEAD(&sc->sc_handlers[intsn_hash(intsn)], ih, ih_list);
288 	if (sc->sc_minipl[ci->ci_cpuid] > level)
289 		sc->sc_minipl[ci->ci_cpuid] = level;
290 
291 	val = CIU3_ISC_CTL_EN | (CIU3_IDT(ci->ci_cpuid, 0) <<
292 	    CIU3_ISC_CTL_IDT_SHIFT);
293 	CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_W1C_EN);
294 	CIU3_WR_8(sc, CIU3_ISC_CTL(intsn), val);
295 	(void)CIU3_RD_8(sc, CIU3_ISC_CTL(intsn));
296 
297 	splx(s);
298 
299 	return ih;
300 }
301 
302 void *
303 octcit_intr_establish_fdt_idx(void *cookie, int node, int idx, int level,
304     int (*func)(void *), void *arg, const char *name)
305 {
306 	uint32_t *cells;
307 	int flags = 0;
308 	int intsn, len, type;
309 
310 	len = OF_getproplen(node, "interrupts");
311 	if (len / (sizeof(uint32_t) * 2) <= idx ||
312 	    len % (sizeof(uint32_t) * 2) != 0)
313 		return NULL;
314 
315 	cells = malloc(len, M_TEMP, M_NOWAIT);
316 	if (cells == NULL)
317 		return NULL;
318 
319 	OF_getpropintarray(node, "interrupts", cells, len);
320 	intsn = cells[idx * 2];
321 	type = cells[idx * 2 + 1];
322 
323 	free(cells, M_TEMP, len);
324 
325 	if (type != 4)
326 		flags |= CIH_EDGE;
327 
328 	return octcit_intr_establish_intsn(intsn, level, flags, func, arg,
329 	    name);
330 }
331 
332 void
333 octcit_intr_disestablish(void *_ih)
334 {
335 	struct cpu_info *ci = curcpu();
336 	struct octcit_intrhand *ih = _ih;
337 	struct octcit_intrhand *tmp;
338 	struct octcit_softc *sc = octcit_sc;
339 	unsigned int count;
340 	int found = 0;
341 	int hash = intsn_hash(ih->ih_intsn);
342 	int i, s;
343 
344 	count = 0;
345 	SLIST_FOREACH(tmp, &sc->sc_handlers[hash], ih_list) {
346 		if (tmp->ih_intsn == ih->ih_intsn)
347 			count++;
348 		if (tmp == ih)
349 			found = 1;
350 	}
351 	if (found == 0)
352 		panic("%s: intrhand %p not registered", __func__, ih);
353 
354 	s = splhigh();
355 
356 	if (count == 0) {
357 		CIU3_WR_8(sc, CIU3_ISC_W1C(ih->ih_intsn), CIU3_ISC_W1C_EN);
358 		CIU3_WR_8(sc, CIU3_ISC_CTL(ih->ih_intsn), 0);
359 		(void)CIU3_RD_8(sc, CIU3_ISC_CTL(ih->ih_intsn));
360 	}
361 
362 	SLIST_REMOVE(&sc->sc_handlers[hash], ih, octcit_intrhand, ih_list);
363 	evcount_detach(&ih->ih_count);
364 
365 	/* Recompute IPL floor if necessary. */
366 	if (sc->sc_minipl[ci->ci_cpuid] == ih->ih_level) {
367 		sc->sc_minipl[ci->ci_cpuid] = IPL_HIGH;
368 		for (i = 0; i < HASH_SIZE; i++) {
369 			SLIST_FOREACH(tmp, &sc->sc_handlers[i], ih_list) {
370 				if (sc->sc_minipl[ci->ci_cpuid] >
371 				    tmp->ih_level)
372 					sc->sc_minipl[ci->ci_cpuid] =
373 					    tmp->ih_level;
374 			}
375 		}
376 	}
377 
378 	splx(s);
379 
380 	free(ih, M_DEVBUF, sizeof(*ih));
381 }
382 
383 void
384 octcit_intr_barrier(void *_ih)
385 {
386 	sched_barrier(NULL);
387 }
388 
389 uint32_t
390 octcit_intr(uint32_t hwpend, struct trapframe *frame)
391 {
392 	struct cpu_info *ci = curcpu();
393 	struct octcit_intrhand *ih;
394 	struct octcit_softc *sc = octcit_sc;
395 	uint64_t destpp;
396 	uint64_t intsn;
397 	unsigned int core = ci->ci_cpuid;
398 	int handled = 0;
399 	int ipl;
400 	int ret;
401 #ifdef MULTIPROCESSOR
402 	register_t sr;
403 	int need_lock;
404 #endif
405 
406 	if (frame->ipl >= sc->sc_minipl[ci->ci_cpuid]) {
407 		/* Disable IP2. */
408 		CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)), 0);
409 		(void)CIU3_RD_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)));
410 		return hwpend;
411 	}
412 
413 	destpp = CIU3_RD_8(sc, CIU3_DEST_PP_INT(core));
414 	if (!ISSET(destpp, CIU3_DEST_PP_INT_INTR))
415 		goto spurious;
416 
417 	ipl = ci->ci_ipl;
418 
419 	intsn = (destpp & CIU3_DEST_PP_INT_INTSN) >>
420 	    CIU3_DEST_PP_INT_INTSN_SHIFT;
421 	SLIST_FOREACH(ih, &sc->sc_handlers[intsn_hash(intsn)], ih_list) {
422 		if (ih->ih_intsn != intsn)
423 			continue;
424 
425 		splraise(ih->ih_level);
426 
427 		/* Acknowledge the interrupt. */
428 		if (ISSET(ih->ih_flags, CIH_EDGE)) {
429 			CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_CTL_RAW);
430 			(void)CIU3_RD_8(sc, CIU3_ISC_W1C(intsn));
431 		}
432 
433 #ifdef MULTIPROCESSOR
434 		if (ih->ih_level < IPL_IPI) {
435 			sr = getsr();
436 			ENABLEIPI();
437 		}
438 		if (ISSET(ih->ih_flags, CIH_MPSAFE))
439 			need_lock = 0;
440 		else
441 			need_lock = 1;
442 		if (need_lock)
443 			__mp_lock(&kernel_lock);
444 #endif
445 		ret = (*ih->ih_func)(ih->ih_arg);
446 #ifdef MULTIPROCESSOR
447 		if (need_lock)
448 			__mp_unlock(&kernel_lock);
449 		if (ih->ih_level < IPL_IPI)
450 			setsr(sr);
451 #endif
452 
453 		if (ret != 0) {
454 			handled = 1;
455 			atomic_inc_long(
456 			    (unsigned long *)&ih->ih_count.ec_count);
457 		}
458 
459 		/*
460 		 * Stop processing when one handler has claimed the interrupt.
461 		 * This saves cycles because interrupt sharing should not
462 		 * happen on this hardware.
463 		 */
464 		if (ret == 1)
465 			break;
466 	}
467 
468 	ci->ci_ipl = ipl;
469 
470 spurious:
471 	if (handled == 0)
472 		printf("%s: spurious interrupt 0x%016llx on cpu %lu\n",
473 		    sc->sc_dev.dv_xname, destpp, ci->ci_cpuid);
474 
475 	return hwpend;
476 }
477 
478 void
479 octcit_splx(int newipl)
480 {
481 	struct octcit_softc *sc = octcit_sc;
482 	struct cpu_info *ci = curcpu();
483 	unsigned int core = ci->ci_cpuid;
484 
485 	ci->ci_ipl = newipl;
486 
487 	if (newipl < sc->sc_minipl[ci->ci_cpuid]) {
488 		CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)), 1ul << core);
489 		(void)CIU3_RD_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)));
490 	}
491 
492 	/* If we still have softints pending trigger processing. */
493 	if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
494 		setsoftintr0();
495 }
496 
497 #ifdef MULTIPROCESSOR
498 uint32_t
499 octcit_ipi_intr(uint32_t hwpend, struct trapframe *frame)
500 {
501 	struct octcit_softc *sc = octcit_sc;
502 	u_long cpuid = cpu_number();
503 
504 	if (sc->sc_ipi_handler != NULL)
505 		sc->sc_ipi_handler((void *)cpuid);
506 
507 	return hwpend;
508 }
509 
510 int
511 octcit_ipi_establish(int (*func)(void *), cpuid_t cpuid)
512 {
513 	struct octcit_softc *sc = octcit_sc;
514 	uint64_t val;
515 	int intsn;
516 
517 	if (cpuid == 0)
518 		sc->sc_ipi_handler = func;
519 
520 	intsn = MBOX_INTSN(cpuid);
521 	val = CIU3_ISC_CTL_EN | (CIU3_IDT(cpuid, 1) << CIU3_ISC_CTL_IDT_SHIFT);
522 	CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_W1C_EN);
523 	CIU3_WR_8(sc, CIU3_ISC_CTL(intsn), val);
524 	(void)CIU3_RD_8(sc, CIU3_ISC_CTL(intsn));
525 
526 	return 0;
527 }
528 
529 void
530 octcit_ipi_set(cpuid_t cpuid)
531 {
532 	struct octcit_softc *sc = octcit_sc;
533 	uint64_t reg = CIU3_ISC_W1S(MBOX_INTSN(cpuid));
534 
535 	CIU3_WR_8(sc, reg, CIU3_ISC_W1S_RAW);
536 	(void)CIU3_RD_8(sc, reg);
537 }
538 
539 void
540 octcit_ipi_clear(cpuid_t cpuid)
541 {
542 	struct octcit_softc *sc = octcit_sc;
543 	uint64_t reg = CIU3_ISC_W1C(MBOX_INTSN(cpuid));
544 
545 	CIU3_WR_8(sc, reg, CIU3_ISC_W1C_RAW);
546 	(void)CIU3_RD_8(sc, reg);
547 }
548 #endif /* MULTIPROCESSOR */
549