1 /* $OpenBSD: octcit.c,v 1.14 2022/12/11 05:31:05 visa Exp $ */
2
3 /*
4 * Copyright (c) 2017, 2019 Visa Hankala
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 /*
20 * Driver for OCTEON Central Interrupt Unit version 3 (CIU3).
21 *
22 * CIU3 is present on CN72xx, CN73xx, CN77xx, and CN78xx.
23 */
24
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/conf.h>
28 #include <sys/device.h>
29 #include <sys/evcount.h>
30 #include <sys/kernel.h>
31 #include <sys/malloc.h>
32
33 #include <dev/ofw/fdt.h>
34 #include <dev/ofw/openfirm.h>
35
36 #include <mips64/mips_cpu.h>
37
38 #include <machine/autoconf.h>
39 #include <machine/fdt.h>
40 #include <machine/intr.h>
41 #include <machine/octeonreg.h>
42
43 #define CIU3_IDT(core, ipl) ((core) * 4 + (ipl))
44 #define CIU3_IDT_CTL(idt) ((idt) * 8 + 0x110000u)
45 #define CIU3_IDT_PP(idt) ((idt) * 32 + 0x120000u)
46 #define CIU3_IDT_IO(idt) ((idt) * 8 + 0x130000u)
47 #define CIU3_DEST_PP_INT(core) ((core) * 8 + 0x200000u)
48 #define CIU3_DEST_PP_INT_INTSN 0x000fffff00000000ull
49 #define CIU3_DEST_PP_INT_INTSN_SHIFT 32
50 #define CIU3_DEST_PP_INT_INTR 0x0000000000000001ull
51 #define CIU3_ISC_CTL(intsn) ((intsn) * 8 + 0x80000000u)
52 #define CIU3_ISC_CTL_IDT 0x0000000000ff0000ull
53 #define CIU3_ISC_CTL_IDT_SHIFT 16
54 #define CIU3_ISC_CTL_IMP 0x0000000000008000ull
55 #define CIU3_ISC_CTL_EN 0x0000000000000002ull
56 #define CIU3_ISC_CTL_RAW 0x0000000000000001ull
57 #define CIU3_ISC_W1C(intsn) ((intsn) * 8 + 0x90000000u)
58 #define CIU3_ISC_W1C_EN 0x0000000000000002ull
59 #define CIU3_ISC_W1C_RAW 0x0000000000000001ull
60 #define CIU3_ISC_W1S(intsn) ((intsn) * 8 + 0xa0000000u)
61 #define CIU3_ISC_W1S_EN 0x0000000000000002ull
62 #define CIU3_ISC_W1S_RAW 0x0000000000000001ull
63 #define CIU3_NINTSN (1u << 20)
64
65 #define IS_MBOX(intsn) (((intsn) >> 12) == 4)
66 #define MBOX_INTSN(core) ((core) + 0x4000u)
67
68 #define CIU3_RD_8(sc, reg) \
69 bus_space_read_8((sc)->sc_iot, (sc)->sc_ioh, (reg))
70 #define CIU3_WR_8(sc, reg, val) \
71 bus_space_write_8((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
72
73 #define INTPRI_CIU_0 (INTPRI_CLOCK + 1)
74
75 #define HASH_SIZE 64
76
77 struct octcit_intrhand {
78 SLIST_ENTRY(octcit_intrhand)
79 ih_list;
80 int (*ih_func)(void *);
81 void *ih_arg;
82 int ih_intsn;
83 int ih_flags;
84 #define CIH_MPSAFE 0x01
85 #define CIH_EDGE 0x02 /* edge-triggered */
86 int ih_level;
87 struct evcount ih_count;
88 };
89
90 struct octcit_softc {
91 struct device sc_dev;
92 bus_space_tag_t sc_iot;
93 bus_space_handle_t sc_ioh;
94
95 SLIST_HEAD(, octcit_intrhand)
96 sc_handlers[HASH_SIZE];
97 int sc_minipl[MAXCPUS];
98 int (*sc_ipi_handler)(void *);
99
100 struct intr_controller sc_ic;
101 };
102
103 int octcit_match(struct device *, void *, void *);
104 void octcit_attach(struct device *, struct device *, void *);
105
106 void octcit_init(void);
107 uint32_t octcit_intr(uint32_t, struct trapframe *);
108 void *octcit_intr_establish(int, int, int (*)(void *), void *,
109 const char *);
110 void *octcit_intr_establish_intsn(int, int, int, int (*)(void *),
111 void *, const char *);
112 void *octcit_intr_establish_fdt_idx(void *, int, int, int,
113 int (*)(void *), void *, const char *);
114 void octcit_intr_disestablish(void *);
115 void octcit_intr_barrier(void *);
116 void octcit_splx(int);
117
118 uint32_t octcit_ipi_intr(uint32_t, struct trapframe *);
119 int octcit_ipi_establish(int (*)(void *), cpuid_t);
120 void octcit_ipi_set(cpuid_t);
121 void octcit_ipi_clear(cpuid_t);
122
123 const struct cfattach octcit_ca = {
124 sizeof(struct octcit_softc), octcit_match, octcit_attach
125 };
126
127 struct cfdriver octcit_cd = {
128 NULL, "octcit", DV_DULL
129 };
130
131 struct octcit_softc *octcit_sc;
132
133 int
octcit_match(struct device * parent,void * match,void * aux)134 octcit_match(struct device *parent, void *match, void *aux)
135 {
136 struct fdt_attach_args *faa = aux;
137
138 return OF_is_compatible(faa->fa_node, "cavium,octeon-7890-ciu3");
139 }
140
141 void
octcit_attach(struct device * parent,struct device * self,void * aux)142 octcit_attach(struct device *parent, struct device *self, void *aux)
143 {
144 struct fdt_attach_args *faa = aux;
145 struct octcit_softc *sc = (struct octcit_softc *)self;
146 uint64_t val;
147 int hash, intsn;
148
149 if (faa->fa_nreg != 1) {
150 printf(": expected one IO space, got %d\n", faa->fa_nreg);
151 return;
152 }
153
154 sc->sc_iot = faa->fa_iot;
155 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, faa->fa_reg[0].size,
156 0, &sc->sc_ioh)) {
157 printf(": could not map IO space\n");
158 return;
159 }
160
161 for (hash = 0; hash < HASH_SIZE; hash++)
162 SLIST_INIT(&sc->sc_handlers[hash]);
163
164 /* Disable all interrupts and acknowledge any pending ones. */
165 for (intsn = 0; intsn < CIU3_NINTSN; intsn++) {
166 val = CIU3_RD_8(sc, CIU3_ISC_CTL(intsn));
167 if (ISSET(val, CIU3_ISC_CTL_IMP)) {
168 CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_CTL_RAW);
169 CIU3_WR_8(sc, CIU3_ISC_CTL(intsn), 0);
170 (void)CIU3_RD_8(sc, CIU3_ISC_CTL(intsn));
171 }
172 }
173
174 printf("\n");
175
176 sc->sc_ic.ic_cookie = sc;
177 sc->sc_ic.ic_node = faa->fa_node;
178 sc->sc_ic.ic_init = octcit_init;
179 sc->sc_ic.ic_establish = octcit_intr_establish;
180 sc->sc_ic.ic_establish_fdt_idx = octcit_intr_establish_fdt_idx;
181 sc->sc_ic.ic_disestablish = octcit_intr_disestablish;
182 sc->sc_ic.ic_intr_barrier = octcit_intr_barrier;
183 #ifdef MULTIPROCESSOR
184 sc->sc_ic.ic_ipi_establish = octcit_ipi_establish;
185 sc->sc_ic.ic_ipi_set = octcit_ipi_set;
186 sc->sc_ic.ic_ipi_clear = octcit_ipi_clear;
187 #endif
188
189 octcit_sc = sc;
190
191 set_intr(INTPRI_CIU_0, CR_INT_0, octcit_intr);
192 #ifdef MULTIPROCESSOR
193 set_intr(INTPRI_IPI, CR_INT_1, octcit_ipi_intr);
194 #endif
195
196 octcit_init();
197
198 register_splx_handler(octcit_splx);
199 octeon_intr_register(&sc->sc_ic);
200 }
201
202 static inline int
intsn_hash(int intsn)203 intsn_hash(int intsn)
204 {
205 int tmp;
206
207 tmp = intsn * 0xffb;
208 return ((tmp >> 14) ^ tmp) & (HASH_SIZE - 1);
209 }
210
211 void
octcit_init(void)212 octcit_init(void)
213 {
214 struct cpu_info *ci = curcpu();
215 struct octcit_softc *sc = octcit_sc;
216 int core = ci->ci_cpuid;
217
218 sc->sc_minipl[ci->ci_cpuid] = IPL_HIGH;
219
220 /*
221 * Set up interrupt routing.
222 */
223
224 /* Route IP2. */
225 CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core, 0)), 0);
226 CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)), 1ul << core);
227 CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 0)), 0);
228
229 /* Route IP3. */
230 CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core , 1)), 1);
231 CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 1)), 1ul << core);
232 CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 1)), 0);
233
234 /* Disable IP4. */
235 CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core, 2)), 0);
236 CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 2)), 0);
237 CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 2)), 0);
238
239 /* Disable IP5. */
240 CIU3_WR_8(sc, CIU3_IDT_CTL(CIU3_IDT(core, 3)), 0);
241 CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 3)), 0);
242 CIU3_WR_8(sc, CIU3_IDT_IO(CIU3_IDT(core, 3)), 0);
243 }
244
245 void *
octcit_intr_establish(int irq,int level,int (* func)(void *),void * arg,const char * name)246 octcit_intr_establish(int irq, int level, int (*func)(void *), void *arg,
247 const char *name)
248 {
249 return octcit_intr_establish_intsn(irq, level, CIH_EDGE, func, arg,
250 name);
251 }
252
253 void *
octcit_intr_establish_intsn(int intsn,int level,int flags,int (* func)(void *),void * arg,const char * name)254 octcit_intr_establish_intsn(int intsn, int level, int flags,
255 int (*func)(void *), void *arg, const char *name)
256 {
257 struct cpu_info *ci = curcpu();
258 struct octcit_intrhand *ih;
259 struct octcit_softc *sc = octcit_sc;
260 uint64_t val;
261 int s;
262
263 if ((unsigned int)intsn > CIU3_NINTSN)
264 panic("%s: illegal intsn 0x%x", __func__, intsn);
265
266 if (IS_MBOX(intsn))
267 panic("%s: mbox intsn 0x%x not allowed", __func__, intsn);
268
269 if (ISSET(level, IPL_MPSAFE))
270 flags |= CIH_MPSAFE;
271 level &= ~IPL_MPSAFE;
272
273 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT);
274 if (ih == NULL)
275 return NULL;
276
277 ih->ih_func = func;
278 ih->ih_arg = arg;
279 ih->ih_level = level;
280 ih->ih_flags = flags;
281 ih->ih_intsn = intsn;
282 evcount_attach(&ih->ih_count, name, &ih->ih_intsn);
283 evcount_percpu(&ih->ih_count);
284
285 s = splhigh();
286
287 SLIST_INSERT_HEAD(&sc->sc_handlers[intsn_hash(intsn)], ih, ih_list);
288 if (sc->sc_minipl[ci->ci_cpuid] > level)
289 sc->sc_minipl[ci->ci_cpuid] = level;
290
291 val = CIU3_ISC_CTL_EN | (CIU3_IDT(ci->ci_cpuid, 0) <<
292 CIU3_ISC_CTL_IDT_SHIFT);
293 CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_W1C_EN);
294 CIU3_WR_8(sc, CIU3_ISC_CTL(intsn), val);
295 (void)CIU3_RD_8(sc, CIU3_ISC_CTL(intsn));
296
297 splx(s);
298
299 return ih;
300 }
301
302 void *
octcit_intr_establish_fdt_idx(void * cookie,int node,int idx,int level,int (* func)(void *),void * arg,const char * name)303 octcit_intr_establish_fdt_idx(void *cookie, int node, int idx, int level,
304 int (*func)(void *), void *arg, const char *name)
305 {
306 uint32_t *cells;
307 int flags = 0;
308 int intsn, len, type;
309
310 len = OF_getproplen(node, "interrupts");
311 if (len / (sizeof(uint32_t) * 2) <= idx ||
312 len % (sizeof(uint32_t) * 2) != 0)
313 return NULL;
314
315 cells = malloc(len, M_TEMP, M_NOWAIT);
316 if (cells == NULL)
317 return NULL;
318
319 OF_getpropintarray(node, "interrupts", cells, len);
320 intsn = cells[idx * 2];
321 type = cells[idx * 2 + 1];
322
323 free(cells, M_TEMP, len);
324
325 if (type != 4)
326 flags |= CIH_EDGE;
327
328 return octcit_intr_establish_intsn(intsn, level, flags, func, arg,
329 name);
330 }
331
332 void
octcit_intr_disestablish(void * _ih)333 octcit_intr_disestablish(void *_ih)
334 {
335 struct cpu_info *ci = curcpu();
336 struct octcit_intrhand *ih = _ih;
337 struct octcit_intrhand *tmp;
338 struct octcit_softc *sc = octcit_sc;
339 unsigned int count;
340 int found = 0;
341 int hash = intsn_hash(ih->ih_intsn);
342 int i, s;
343
344 count = 0;
345 SLIST_FOREACH(tmp, &sc->sc_handlers[hash], ih_list) {
346 if (tmp->ih_intsn == ih->ih_intsn)
347 count++;
348 if (tmp == ih)
349 found = 1;
350 }
351 if (found == 0)
352 panic("%s: intrhand %p not registered", __func__, ih);
353
354 s = splhigh();
355
356 if (count == 0) {
357 CIU3_WR_8(sc, CIU3_ISC_W1C(ih->ih_intsn), CIU3_ISC_W1C_EN);
358 CIU3_WR_8(sc, CIU3_ISC_CTL(ih->ih_intsn), 0);
359 (void)CIU3_RD_8(sc, CIU3_ISC_CTL(ih->ih_intsn));
360 }
361
362 SLIST_REMOVE(&sc->sc_handlers[hash], ih, octcit_intrhand, ih_list);
363 evcount_detach(&ih->ih_count);
364
365 /* Recompute IPL floor if necessary. */
366 if (sc->sc_minipl[ci->ci_cpuid] == ih->ih_level) {
367 sc->sc_minipl[ci->ci_cpuid] = IPL_HIGH;
368 for (i = 0; i < HASH_SIZE; i++) {
369 SLIST_FOREACH(tmp, &sc->sc_handlers[i], ih_list) {
370 if (sc->sc_minipl[ci->ci_cpuid] >
371 tmp->ih_level)
372 sc->sc_minipl[ci->ci_cpuid] =
373 tmp->ih_level;
374 }
375 }
376 }
377
378 splx(s);
379
380 free(ih, M_DEVBUF, sizeof(*ih));
381 }
382
383 void
octcit_intr_barrier(void * _ih)384 octcit_intr_barrier(void *_ih)
385 {
386 sched_barrier(NULL);
387 }
388
389 uint32_t
octcit_intr(uint32_t hwpend,struct trapframe * frame)390 octcit_intr(uint32_t hwpend, struct trapframe *frame)
391 {
392 struct cpu_info *ci = curcpu();
393 struct octcit_intrhand *ih;
394 struct octcit_softc *sc = octcit_sc;
395 uint64_t destpp;
396 uint64_t intsn;
397 unsigned int core = ci->ci_cpuid;
398 int handled = 0;
399 int ipl;
400 int ret;
401 #ifdef MULTIPROCESSOR
402 register_t sr;
403 int need_lock;
404 #endif
405
406 if (frame->ipl >= sc->sc_minipl[ci->ci_cpuid]) {
407 /* Disable IP2. */
408 CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)), 0);
409 (void)CIU3_RD_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)));
410 return hwpend;
411 }
412
413 destpp = CIU3_RD_8(sc, CIU3_DEST_PP_INT(core));
414 if (!ISSET(destpp, CIU3_DEST_PP_INT_INTR))
415 goto spurious;
416
417 ipl = ci->ci_ipl;
418
419 intsn = (destpp & CIU3_DEST_PP_INT_INTSN) >>
420 CIU3_DEST_PP_INT_INTSN_SHIFT;
421 SLIST_FOREACH(ih, &sc->sc_handlers[intsn_hash(intsn)], ih_list) {
422 if (ih->ih_intsn != intsn)
423 continue;
424
425 splraise(ih->ih_level);
426
427 /* Acknowledge the interrupt. */
428 if (ISSET(ih->ih_flags, CIH_EDGE)) {
429 CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_CTL_RAW);
430 (void)CIU3_RD_8(sc, CIU3_ISC_W1C(intsn));
431 }
432
433 #ifdef MULTIPROCESSOR
434 if (ih->ih_level < IPL_IPI) {
435 sr = getsr();
436 ENABLEIPI();
437 }
438 if (ISSET(ih->ih_flags, CIH_MPSAFE))
439 need_lock = 0;
440 else
441 need_lock = 1;
442 if (need_lock)
443 __mp_lock(&kernel_lock);
444 #endif
445 ret = (*ih->ih_func)(ih->ih_arg);
446 #ifdef MULTIPROCESSOR
447 if (need_lock)
448 __mp_unlock(&kernel_lock);
449 if (ih->ih_level < IPL_IPI)
450 setsr(sr);
451 #endif
452
453 if (ret != 0) {
454 handled = 1;
455 evcount_inc(&ih->ih_count);
456 }
457
458 /*
459 * Stop processing when one handler has claimed the interrupt.
460 * This saves cycles because interrupt sharing should not
461 * happen on this hardware.
462 */
463 if (ret == 1)
464 break;
465 }
466
467 ci->ci_ipl = ipl;
468
469 spurious:
470 if (handled == 0)
471 printf("%s: spurious interrupt 0x%016llx on cpu %lu\n",
472 sc->sc_dev.dv_xname, destpp, ci->ci_cpuid);
473
474 return hwpend;
475 }
476
477 void
octcit_splx(int newipl)478 octcit_splx(int newipl)
479 {
480 struct octcit_softc *sc = octcit_sc;
481 struct cpu_info *ci = curcpu();
482 unsigned int core = ci->ci_cpuid;
483
484 ci->ci_ipl = newipl;
485
486 if (newipl < sc->sc_minipl[ci->ci_cpuid]) {
487 CIU3_WR_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)), 1ul << core);
488 (void)CIU3_RD_8(sc, CIU3_IDT_PP(CIU3_IDT(core, 0)));
489 }
490
491 /* Trigger deferred clock interrupt if it is now unmasked. */
492 if (ci->ci_clock_deferred && newipl < IPL_CLOCK)
493 md_triggerclock();
494
495 /* If we still have softints pending trigger processing. */
496 if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
497 setsoftintr0();
498 }
499
500 #ifdef MULTIPROCESSOR
501 uint32_t
octcit_ipi_intr(uint32_t hwpend,struct trapframe * frame)502 octcit_ipi_intr(uint32_t hwpend, struct trapframe *frame)
503 {
504 struct octcit_softc *sc = octcit_sc;
505 u_long cpuid = cpu_number();
506
507 if (sc->sc_ipi_handler != NULL)
508 sc->sc_ipi_handler((void *)cpuid);
509
510 return hwpend;
511 }
512
513 int
octcit_ipi_establish(int (* func)(void *),cpuid_t cpuid)514 octcit_ipi_establish(int (*func)(void *), cpuid_t cpuid)
515 {
516 struct octcit_softc *sc = octcit_sc;
517 uint64_t val;
518 int intsn;
519
520 if (cpuid == 0)
521 sc->sc_ipi_handler = func;
522
523 intsn = MBOX_INTSN(cpuid);
524 val = CIU3_ISC_CTL_EN | (CIU3_IDT(cpuid, 1) << CIU3_ISC_CTL_IDT_SHIFT);
525 CIU3_WR_8(sc, CIU3_ISC_W1C(intsn), CIU3_ISC_W1C_EN);
526 CIU3_WR_8(sc, CIU3_ISC_CTL(intsn), val);
527 (void)CIU3_RD_8(sc, CIU3_ISC_CTL(intsn));
528
529 return 0;
530 }
531
532 void
octcit_ipi_set(cpuid_t cpuid)533 octcit_ipi_set(cpuid_t cpuid)
534 {
535 struct octcit_softc *sc = octcit_sc;
536 uint64_t reg = CIU3_ISC_W1S(MBOX_INTSN(cpuid));
537
538 CIU3_WR_8(sc, reg, CIU3_ISC_W1S_RAW);
539 (void)CIU3_RD_8(sc, reg);
540 }
541
542 void
octcit_ipi_clear(cpuid_t cpuid)543 octcit_ipi_clear(cpuid_t cpuid)
544 {
545 struct octcit_softc *sc = octcit_sc;
546 uint64_t reg = CIU3_ISC_W1C(MBOX_INTSN(cpuid));
547
548 CIU3_WR_8(sc, reg, CIU3_ISC_W1C_RAW);
549 (void)CIU3_RD_8(sc, reg);
550 }
551 #endif /* MULTIPROCESSOR */
552