xref: /openbsd/sys/arch/octeon/dev/octciu.c (revision 3cab2bb3)
1 /*	$OpenBSD: octciu.c,v 1.17 2019/09/01 12:16:01 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2004 Opsycon AB  (www.opsycon.se)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 /*
30  * Driver for OCTEON Central Interrupt Unit (CIU).
31  *
32  * CIU is present at least on CN3xxx, CN5xxx, CN60xx, CN61xx,
33  * CN70xx, and CN71xx.
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/atomic.h>
39 #include <sys/conf.h>
40 #include <sys/device.h>
41 #include <sys/evcount.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 
45 #include <dev/ofw/fdt.h>
46 #include <dev/ofw/openfirm.h>
47 
48 #include <mips64/mips_cpu.h>
49 
50 #include <machine/autoconf.h>
51 #include <machine/fdt.h>
52 #include <machine/intr.h>
53 #include <machine/octeonreg.h>
54 
55 #define OCTCIU_NINTS 192
56 
57 #define INTPRI_CIU_0	(INTPRI_CLOCK + 1)
58 #define INTPRI_CIU_1	(INTPRI_CLOCK + 2)
59 
60 struct intrbank {
61 	uint64_t	en;		/* enable mask register */
62 	uint64_t	sum;		/* service request register */
63 	int		id;		/* bank number */
64 };
65 
66 #define NBANKS		3
67 #define BANK_SIZE	64
68 #define IRQ_TO_BANK(x)	((x) >> 6)
69 #define IRQ_TO_BIT(x)	((x) & 0x3f)
70 
71 #define IS_WORKQ_IRQ(x)	((unsigned int)(x) < 16)
72 
73 struct octciu_intrhand {
74 	SLIST_ENTRY(octciu_intrhand)
75 				 ih_list;
76 	int			(*ih_fun)(void *);
77 	void			*ih_arg;
78 	int			 ih_level;
79 	int			 ih_irq;
80 	struct evcount		 ih_count;
81 	int			 ih_flags;
82 	cpuid_t			 ih_cpuid;
83 };
84 
85 /* ih_flags */
86 #define CIH_MPSAFE	0x01
87 
88 struct octciu_cpu {
89 	struct intrbank		 scpu_ibank[NBANKS];
90 	uint64_t		 scpu_intem[NBANKS];
91 	uint64_t		 scpu_imask[NIPLS][NBANKS];
92 };
93 
94 struct octciu_softc {
95 	struct device		 sc_dev;
96 	bus_space_tag_t		 sc_iot;
97 	bus_space_handle_t	 sc_ioh;
98 	struct octciu_cpu	 sc_cpu[MAXCPUS];
99 	SLIST_HEAD(, octciu_intrhand)
100 				 sc_intrhand[OCTCIU_NINTS];
101 	unsigned int		 sc_nbanks;
102 
103 	int			(*sc_ipi_handler)(void *);
104 
105 	struct intr_controller	 sc_ic;
106 };
107 
108 int	 octciu_match(struct device *, void *, void *);
109 void	 octciu_attach(struct device *, struct device *, void *);
110 
111 void	 octciu_init(void);
112 void	 octciu_intr_makemasks(struct octciu_softc *);
113 uint32_t octciu_intr0(uint32_t, struct trapframe *);
114 uint32_t octciu_intr2(uint32_t, struct trapframe *);
115 uint32_t octciu_intr_bank(struct octciu_softc *, struct intrbank *,
116 	    struct trapframe *);
117 void	*octciu_intr_establish(int, int, int (*)(void *), void *,
118 	    const char *);
119 void	*octciu_intr_establish_fdt_idx(void *, int, int, int,
120 	    int (*)(void *), void *, const char *);
121 void	 octciu_intr_disestablish(void *);
122 void	 octciu_intr_barrier(void *);
123 void	 octciu_splx(int);
124 
125 uint32_t octciu_ipi_intr(uint32_t, struct trapframe *);
126 int	 octciu_ipi_establish(int (*)(void *), cpuid_t);
127 void	 octciu_ipi_set(cpuid_t);
128 void	 octciu_ipi_clear(cpuid_t);
129 
130 const struct cfattach octciu_ca = {
131 	sizeof(struct octciu_softc), octciu_match, octciu_attach
132 };
133 
134 struct cfdriver octciu_cd = {
135 	NULL, "octciu", DV_DULL
136 };
137 
138 struct octciu_softc	*octciu_sc;
139 
140 int
141 octciu_match(struct device *parent, void *match, void *aux)
142 {
143 	struct fdt_attach_args *faa = aux;
144 
145 	return OF_is_compatible(faa->fa_node, "cavium,octeon-3860-ciu");
146 }
147 
148 void
149 octciu_attach(struct device *parent, struct device *self, void *aux)
150 {
151 	struct fdt_attach_args *faa = aux;
152 	struct octciu_softc *sc = (struct octciu_softc *)self;
153 	int i;
154 
155 	if (faa->fa_nreg != 1) {
156 		printf(": expected one IO space, got %d\n", faa->fa_nreg);
157 		return;
158 	}
159 
160 	sc->sc_iot = faa->fa_iot;
161 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, faa->fa_reg[0].size,
162 	    0, &sc->sc_ioh)) {
163 		printf(": could not map IO space\n");
164 		return;
165 	}
166 
167 	if (octeon_ver == OCTEON_2 || octeon_ver == OCTEON_3)
168 		sc->sc_nbanks = 3;
169 	else
170 		sc->sc_nbanks = 2;
171 
172 	for (i = 0; i < OCTCIU_NINTS; i++)
173 		SLIST_INIT(&sc->sc_intrhand[i]);
174 
175 	printf("\n");
176 
177 	sc->sc_ic.ic_cookie = sc;
178 	sc->sc_ic.ic_node = faa->fa_node;
179 	sc->sc_ic.ic_init = octciu_init;
180 	sc->sc_ic.ic_establish = octciu_intr_establish;
181 	sc->sc_ic.ic_establish_fdt_idx = octciu_intr_establish_fdt_idx;
182 	sc->sc_ic.ic_disestablish = octciu_intr_disestablish;
183 	sc->sc_ic.ic_intr_barrier = octciu_intr_barrier;
184 #ifdef MULTIPROCESSOR
185 	sc->sc_ic.ic_ipi_establish = octciu_ipi_establish;
186 	sc->sc_ic.ic_ipi_set = octciu_ipi_set;
187 	sc->sc_ic.ic_ipi_clear = octciu_ipi_clear;
188 #endif
189 
190 	octciu_sc = sc;
191 
192 	set_intr(INTPRI_CIU_0, CR_INT_0, octciu_intr0);
193 	if (sc->sc_nbanks == 3)
194 		set_intr(INTPRI_CIU_1, CR_INT_2, octciu_intr2);
195 #ifdef MULTIPROCESSOR
196 	set_intr(INTPRI_IPI, CR_INT_1, octciu_ipi_intr);
197 #endif
198 
199 	octciu_init();
200 
201 	register_splx_handler(octciu_splx);
202 	octeon_intr_register(&sc->sc_ic);
203 }
204 
205 void
206 octciu_init(void)
207 {
208 	struct octciu_softc *sc = octciu_sc;
209 	struct octciu_cpu *scpu;
210 	int cpuid = cpu_number();
211 	int s;
212 
213 	scpu = &sc->sc_cpu[cpuid];
214 
215 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP2_EN0(cpuid), 0);
216 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid), 0);
217 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP2_EN1(cpuid), 0);
218 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN1(cpuid), 0);
219 
220 	if (sc->sc_nbanks == 3)
221 		bus_space_write_8(sc->sc_iot, sc->sc_ioh,
222 		    CIU_IP4_EN2(cpuid), 0);
223 
224 	scpu->scpu_ibank[0].en = CIU_IP2_EN0(cpuid);
225 	scpu->scpu_ibank[0].sum = CIU_IP2_SUM0(cpuid);
226 	scpu->scpu_ibank[0].id = 0;
227 	scpu->scpu_ibank[1].en = CIU_IP2_EN1(cpuid);
228 	scpu->scpu_ibank[1].sum = CIU_INT32_SUM1;
229 	scpu->scpu_ibank[1].id = 1;
230 	scpu->scpu_ibank[2].en = CIU_IP4_EN2(cpuid);
231 	scpu->scpu_ibank[2].sum = CIU_IP4_SUM2(cpuid);
232 	scpu->scpu_ibank[2].id = 2;
233 
234 	s = splhigh();
235 	octciu_intr_makemasks(sc);
236 	splx(s);	/* causes hw mask update */
237 }
238 
239 void *
240 octciu_intr_establish(int irq, int level, int (*ih_fun)(void *),
241     void *ih_arg, const char *ih_what)
242 {
243 	struct octciu_softc *sc = octciu_sc;
244 	struct octciu_intrhand *ih, *last, *tmp;
245 	int cpuid = cpu_number();
246 	int flags;
247 	int s;
248 
249 #ifdef DIAGNOSTIC
250 	if (irq >= sc->sc_nbanks * BANK_SIZE || irq < 0)
251 		panic("%s: illegal irq %d", __func__, irq);
252 #endif
253 
254 #ifdef MULTIPROCESSOR
255 	/* Span work queue interrupts across CPUs. */
256 	if (IS_WORKQ_IRQ(irq))
257 		cpuid = irq % ncpus;
258 #endif
259 
260 	flags = (level & IPL_MPSAFE) ? CIH_MPSAFE : 0;
261 	level &= ~IPL_MPSAFE;
262 
263 	ih = malloc(sizeof *ih, M_DEVBUF, M_NOWAIT);
264 	if (ih == NULL)
265 		return NULL;
266 
267 	ih->ih_fun = ih_fun;
268 	ih->ih_arg = ih_arg;
269 	ih->ih_level = level;
270 	ih->ih_flags = flags;
271 	ih->ih_irq = irq;
272 	ih->ih_cpuid = cpuid;
273 	evcount_attach(&ih->ih_count, ih_what, &ih->ih_irq);
274 
275 	s = splhigh();
276 
277 	if (SLIST_EMPTY(&sc->sc_intrhand[irq])) {
278 		SLIST_INSERT_HEAD(&sc->sc_intrhand[irq], ih, ih_list);
279 	} else {
280 		last = NULL;
281 		SLIST_FOREACH(tmp, &sc->sc_intrhand[irq], ih_list)
282 			last = tmp;
283 		SLIST_INSERT_AFTER(last, ih, ih_list);
284 	}
285 
286 	sc->sc_cpu[cpuid].scpu_intem[IRQ_TO_BANK(irq)] |=
287 	    1UL << IRQ_TO_BIT(irq);
288 	octciu_intr_makemasks(sc);
289 
290 	splx(s);	/* causes hw mask update */
291 
292 	return (ih);
293 }
294 
295 void *
296 octciu_intr_establish_fdt_idx(void *cookie, int node, int idx, int level,
297     int (*ih_fun)(void *), void *ih_arg, const char *ih_what)
298 {
299 	uint32_t *cells;
300 	int irq, len;
301 
302 	len = OF_getproplen(node, "interrupts");
303 	if (len / (sizeof(uint32_t) * 2) <= idx ||
304 	    len % (sizeof(uint32_t) * 2) != 0)
305 		return NULL;
306 
307 	cells = malloc(len, M_TEMP, M_NOWAIT);
308 	if (cells == NULL)
309 		return NULL;
310 
311 	OF_getpropintarray(node, "interrupts", cells, len);
312 	irq = cells[idx * 2] * BANK_SIZE + cells[idx * 2 + 1];
313 
314 	free(cells, M_TEMP, len);
315 
316 	return octciu_intr_establish(irq, level, ih_fun, ih_arg, ih_what);
317 }
318 
319 void
320 octciu_intr_disestablish(void *_ih)
321 {
322 	struct octciu_intrhand *ih = _ih;
323 	struct octciu_intrhand *tmp;
324 	struct octciu_softc *sc = octciu_sc;
325 	unsigned int irq = ih->ih_irq;
326 	int cpuid = cpu_number();
327 	int found = 0;
328 	int s;
329 
330 	KASSERT(irq < sc->sc_nbanks * BANK_SIZE);
331 	KASSERT(!IS_WORKQ_IRQ(irq));
332 
333 	s = splhigh();
334 
335 	SLIST_FOREACH(tmp, &sc->sc_intrhand[irq], ih_list) {
336 		if (tmp == ih) {
337 			found = 1;
338 			break;
339 		}
340 	}
341 	if (found == 0)
342 		panic("%s: intrhand %p not registered", __func__, ih);
343 
344 	SLIST_REMOVE(&sc->sc_intrhand[irq], ih, octciu_intrhand, ih_list);
345 	evcount_detach(&ih->ih_count);
346 
347 	if (SLIST_EMPTY(&sc->sc_intrhand[irq])) {
348 		sc->sc_cpu[cpuid].scpu_intem[IRQ_TO_BANK(irq)] &=
349 		    ~(1UL << IRQ_TO_BIT(irq));
350 	}
351 
352 	octciu_intr_makemasks(sc);
353 	splx(s);	/* causes hw mask update */
354 
355 	free(ih, M_DEVBUF, sizeof(*ih));
356 }
357 
358 void
359 octciu_intr_barrier(void *_ih)
360 {
361 	struct cpu_info *ci = NULL;
362 #ifdef MULTIPROCESSOR
363 	struct octciu_intrhand *ih = _ih;
364 
365 	if (IS_WORKQ_IRQ(ih->ih_irq))
366 		ci = get_cpu_info(ih->ih_irq % ncpus);
367 #endif
368 
369 	sched_barrier(ci);
370 }
371 
372 /*
373  * Recompute interrupt masks.
374  */
375 void
376 octciu_intr_makemasks(struct octciu_softc *sc)
377 {
378 	cpuid_t cpuid = cpu_number();
379 	struct octciu_cpu *scpu = &sc->sc_cpu[cpuid];
380 	struct octciu_intrhand *q;
381 	uint intrlevel[OCTCIU_NINTS];
382 	int irq, level;
383 
384 	/* First, figure out which levels each IRQ uses. */
385 	for (irq = 0; irq < OCTCIU_NINTS; irq++) {
386 		uint levels = 0;
387 		SLIST_FOREACH(q, &sc->sc_intrhand[irq], ih_list) {
388 			if (q->ih_cpuid == cpuid)
389 				levels |= 1 << q->ih_level;
390 		}
391 		intrlevel[irq] = levels;
392 	}
393 
394 	/*
395 	 * Then figure out which IRQs use each level.
396 	 * Note that we make sure never to overwrite imask[IPL_HIGH], in
397 	 * case an interrupt occurs during intr_disestablish() and causes
398 	 * an unfortunate splx() while we are here recomputing the masks.
399 	 */
400 	for (level = IPL_NONE; level < NIPLS; level++) {
401 		uint64_t mask[NBANKS] = {};
402 		for (irq = 0; irq < OCTCIU_NINTS; irq++)
403 			if (intrlevel[irq] & (1 << level))
404 				mask[IRQ_TO_BANK(irq)] |=
405 				    1UL << IRQ_TO_BIT(irq);
406 		scpu->scpu_imask[level][0] = mask[0];
407 		scpu->scpu_imask[level][1] = mask[1];
408 		scpu->scpu_imask[level][2] = mask[2];
409 	}
410 	/*
411 	 * There are tty, network and disk drivers that use free() at interrupt
412 	 * time, so vm > (tty | net | bio).
413 	 *
414 	 * Enforce a hierarchy that gives slow devices a better chance at not
415 	 * dropping data.
416 	 */
417 #define ADD_MASK(dst, src) do {	\
418 	dst[0] |= src[0];	\
419 	dst[1] |= src[1];	\
420 	dst[2] |= src[2];	\
421 } while (0)
422 	ADD_MASK(scpu->scpu_imask[IPL_NET], scpu->scpu_imask[IPL_BIO]);
423 	ADD_MASK(scpu->scpu_imask[IPL_TTY], scpu->scpu_imask[IPL_NET]);
424 	ADD_MASK(scpu->scpu_imask[IPL_VM], scpu->scpu_imask[IPL_TTY]);
425 	ADD_MASK(scpu->scpu_imask[IPL_CLOCK], scpu->scpu_imask[IPL_VM]);
426 	ADD_MASK(scpu->scpu_imask[IPL_HIGH], scpu->scpu_imask[IPL_CLOCK]);
427 	ADD_MASK(scpu->scpu_imask[IPL_IPI], scpu->scpu_imask[IPL_HIGH]);
428 
429 	/*
430 	 * These are pseudo-levels.
431 	 */
432 	scpu->scpu_imask[IPL_NONE][0] = 0;
433 	scpu->scpu_imask[IPL_NONE][1] = 0;
434 	scpu->scpu_imask[IPL_NONE][2] = 0;
435 }
436 
437 static inline int
438 octciu_next_irq(uint64_t *isr)
439 {
440 	uint64_t irq, tmp = *isr;
441 
442 	if (tmp == 0)
443 		return -1;
444 
445 	asm volatile (
446 	"	.set push\n"
447 	"	.set mips64\n"
448 	"	dclz	%0, %0\n"
449 	"	.set pop\n"
450 	: "=r" (tmp) : "0" (tmp));
451 
452 	irq = 63u - tmp;
453 	*isr &= ~(1u << irq);
454 	return irq;
455 }
456 
457 /*
458  * Dispatch interrupts in given bank.
459  */
460 uint32_t
461 octciu_intr_bank(struct octciu_softc *sc, struct intrbank *bank,
462     struct trapframe *frame)
463 {
464 	struct cpu_info *ci = curcpu();
465 	struct octciu_intrhand *ih;
466 	struct octciu_cpu *scpu = &sc->sc_cpu[ci->ci_cpuid];
467 	uint64_t imr, isr, mask;
468 	int handled, ipl, irq;
469 #ifdef MULTIPROCESSOR
470 	register_t sr;
471 	int need_lock;
472 #endif
473 
474 	isr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, bank->sum);
475 	imr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, bank->en);
476 
477 	isr &= imr;
478 	if (isr == 0)
479 		return 0;	/* not for us */
480 
481 	/*
482 	 * Mask all pending interrupts.
483 	 */
484 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, bank->en, imr & ~isr);
485 
486 	/*
487 	 * If interrupts are spl-masked, mask them and wait for splx()
488 	 * to reenable them when necessary.
489 	 */
490 	if ((mask = isr & scpu->scpu_imask[frame->ipl][bank->id])
491 	    != 0) {
492 		isr &= ~mask;
493 		imr &= ~mask;
494 	}
495 	if (isr == 0)
496 		return 1;
497 
498 	/*
499 	 * Now process allowed interrupts.
500 	 */
501 
502 	ipl = ci->ci_ipl;
503 
504 	while ((irq = octciu_next_irq(&isr)) >= 0) {
505 		irq += bank->id * BANK_SIZE;
506 		handled = 0;
507 		SLIST_FOREACH(ih, &sc->sc_intrhand[irq], ih_list) {
508 			splraise(ih->ih_level);
509 #ifdef MULTIPROCESSOR
510 			if (ih->ih_level < IPL_IPI) {
511 				sr = getsr();
512 				ENABLEIPI();
513 			}
514 			if (ih->ih_flags & CIH_MPSAFE)
515 				need_lock = 0;
516 			else
517 				need_lock = 1;
518 			if (need_lock)
519 				__mp_lock(&kernel_lock);
520 #endif
521 			if ((*ih->ih_fun)(ih->ih_arg) != 0) {
522 				handled = 1;
523 				atomic_inc_long(
524 				    (unsigned long *)&ih->ih_count.ec_count);
525 			}
526 #ifdef MULTIPROCESSOR
527 			if (need_lock)
528 				__mp_unlock(&kernel_lock);
529 			if (ih->ih_level < IPL_IPI)
530 				setsr(sr);
531 #endif
532 		}
533 		if (!handled)
534 			printf("%s: spurious interrupt %d on cpu %lu\n",
535 			    sc->sc_dev.dv_xname, irq, ci->ci_cpuid);
536 	}
537 
538 	ci->ci_ipl = ipl;
539 
540 	/*
541 	 * Reenable interrupts which have been serviced.
542 	 */
543 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, bank->en, imr);
544 
545 	return 1;
546 }
547 
548 uint32_t
549 octciu_intr0(uint32_t hwpend, struct trapframe *frame)
550 {
551 	struct octciu_softc *sc = octciu_sc;
552 	struct octciu_cpu *scpu = &sc->sc_cpu[cpu_number()];
553 	int handled;
554 
555 	handled = octciu_intr_bank(sc, &scpu->scpu_ibank[0], frame);
556 	handled |= octciu_intr_bank(sc, &scpu->scpu_ibank[1], frame);
557 	return handled ? hwpend : 0;
558 }
559 
560 uint32_t
561 octciu_intr2(uint32_t hwpend, struct trapframe *frame)
562 {
563 	struct octciu_softc *sc = octciu_sc;
564 	struct octciu_cpu *scpu = &sc->sc_cpu[cpu_number()];
565 	int handled;
566 
567 	handled = octciu_intr_bank(sc, &scpu->scpu_ibank[2], frame);
568 	return handled ? hwpend : 0;
569 }
570 
571 void
572 octciu_splx(int newipl)
573 {
574 	struct cpu_info *ci = curcpu();
575 	struct octciu_softc *sc = octciu_sc;
576 	struct octciu_cpu *scpu = &sc->sc_cpu[ci->ci_cpuid];
577 
578 	ci->ci_ipl = newipl;
579 
580 	/* Set hardware masks. */
581 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, scpu->scpu_ibank[0].en,
582 	    scpu->scpu_intem[0] & ~scpu->scpu_imask[newipl][0]);
583 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, scpu->scpu_ibank[1].en,
584 	    scpu->scpu_intem[1] & ~scpu->scpu_imask[newipl][1]);
585 
586 	if (sc->sc_nbanks == 3)
587 		bus_space_write_8(sc->sc_iot, sc->sc_ioh,
588 		    scpu->scpu_ibank[2].en,
589 		    scpu->scpu_intem[2] & ~scpu->scpu_imask[newipl][2]);
590 
591 	/* If we still have softints pending trigger processing. */
592 	if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
593 		setsoftintr0();
594 }
595 
596 #ifdef MULTIPROCESSOR
597 uint32_t
598 octciu_ipi_intr(uint32_t hwpend, struct trapframe *frame)
599 {
600 	struct octciu_softc *sc = octciu_sc;
601 	u_long cpuid = cpu_number();
602 
603 	/*
604 	 * Mask all pending interrupts.
605 	 */
606 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid), 0);
607 
608 	if (sc->sc_ipi_handler == NULL)
609 		return hwpend;
610 
611 	sc->sc_ipi_handler((void *)cpuid);
612 
613 	/*
614 	 * Reenable interrupts which have been serviced.
615 	 */
616 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid),
617 		(1ULL << CIU_INT_MBOX0)|(1ULL << CIU_INT_MBOX1));
618 	return hwpend;
619 }
620 
621 int
622 octciu_ipi_establish(int (*func)(void *), cpuid_t cpuid)
623 {
624 	struct octciu_softc *sc = octciu_sc;
625 
626 	if (cpuid == 0)
627 		sc->sc_ipi_handler = func;
628 
629 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_CLR(cpuid),
630 		0xffffffff);
631 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid),
632 		(1ULL << CIU_INT_MBOX0)|(1ULL << CIU_INT_MBOX1));
633 
634 	return 0;
635 }
636 
637 void
638 octciu_ipi_set(cpuid_t cpuid)
639 {
640 	struct octciu_softc *sc = octciu_sc;
641 
642 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_SET(cpuid), 1);
643 }
644 
645 void
646 octciu_ipi_clear(cpuid_t cpuid)
647 {
648 	struct octciu_softc *sc = octciu_sc;
649 	uint64_t clr;
650 
651 	clr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_CLR(cpuid));
652 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_CLR(cpuid), clr);
653 }
654 #endif /* MULTIPROCESSOR */
655