xref: /openbsd/sys/arch/octeon/dev/octciu.c (revision ae38162b)
1 /*	$OpenBSD: octciu.c,v 1.19 2022/12/11 05:31:05 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2004 Opsycon AB  (www.opsycon.se)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 /*
30  * Driver for OCTEON Central Interrupt Unit (CIU).
31  *
32  * CIU is present at least on CN3xxx, CN5xxx, CN60xx, CN61xx,
33  * CN70xx, and CN71xx.
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/conf.h>
39 #include <sys/device.h>
40 #include <sys/evcount.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 
44 #include <dev/ofw/fdt.h>
45 #include <dev/ofw/openfirm.h>
46 
47 #include <mips64/mips_cpu.h>
48 
49 #include <machine/autoconf.h>
50 #include <machine/fdt.h>
51 #include <machine/intr.h>
52 #include <machine/octeonreg.h>
53 
54 #define OCTCIU_NINTS 192
55 
56 #define INTPRI_CIU_0	(INTPRI_CLOCK + 1)
57 #define INTPRI_CIU_1	(INTPRI_CLOCK + 2)
58 
59 struct intrbank {
60 	uint64_t	en;		/* enable mask register */
61 	uint64_t	sum;		/* service request register */
62 	int		id;		/* bank number */
63 };
64 
65 #define NBANKS		3
66 #define BANK_SIZE	64
67 #define IRQ_TO_BANK(x)	((x) >> 6)
68 #define IRQ_TO_BIT(x)	((x) & 0x3f)
69 
70 #define IS_WORKQ_IRQ(x)	((unsigned int)(x) < 16)
71 
72 struct octciu_intrhand {
73 	SLIST_ENTRY(octciu_intrhand)
74 				 ih_list;
75 	int			(*ih_fun)(void *);
76 	void			*ih_arg;
77 	int			 ih_level;
78 	int			 ih_irq;
79 	struct evcount		 ih_count;
80 	int			 ih_flags;
81 	cpuid_t			 ih_cpuid;
82 };
83 
84 /* ih_flags */
85 #define CIH_MPSAFE	0x01
86 
87 struct octciu_cpu {
88 	struct intrbank		 scpu_ibank[NBANKS];
89 	uint64_t		 scpu_intem[NBANKS];
90 	uint64_t		 scpu_imask[NIPLS][NBANKS];
91 };
92 
93 struct octciu_softc {
94 	struct device		 sc_dev;
95 	bus_space_tag_t		 sc_iot;
96 	bus_space_handle_t	 sc_ioh;
97 	struct octciu_cpu	 sc_cpu[MAXCPUS];
98 	SLIST_HEAD(, octciu_intrhand)
99 				 sc_intrhand[OCTCIU_NINTS];
100 	unsigned int		 sc_nbanks;
101 
102 	int			(*sc_ipi_handler)(void *);
103 
104 	struct intr_controller	 sc_ic;
105 };
106 
107 int	 octciu_match(struct device *, void *, void *);
108 void	 octciu_attach(struct device *, struct device *, void *);
109 
110 void	 octciu_init(void);
111 void	 octciu_intr_makemasks(struct octciu_softc *);
112 uint32_t octciu_intr0(uint32_t, struct trapframe *);
113 uint32_t octciu_intr2(uint32_t, struct trapframe *);
114 uint32_t octciu_intr_bank(struct octciu_softc *, struct intrbank *,
115 	    struct trapframe *);
116 void	*octciu_intr_establish(int, int, int (*)(void *), void *,
117 	    const char *);
118 void	*octciu_intr_establish_fdt_idx(void *, int, int, int,
119 	    int (*)(void *), void *, const char *);
120 void	 octciu_intr_disestablish(void *);
121 void	 octciu_intr_barrier(void *);
122 void	 octciu_splx(int);
123 
124 uint32_t octciu_ipi_intr(uint32_t, struct trapframe *);
125 int	 octciu_ipi_establish(int (*)(void *), cpuid_t);
126 void	 octciu_ipi_set(cpuid_t);
127 void	 octciu_ipi_clear(cpuid_t);
128 
129 const struct cfattach octciu_ca = {
130 	sizeof(struct octciu_softc), octciu_match, octciu_attach
131 };
132 
133 struct cfdriver octciu_cd = {
134 	NULL, "octciu", DV_DULL
135 };
136 
137 struct octciu_softc	*octciu_sc;
138 
139 int
octciu_match(struct device * parent,void * match,void * aux)140 octciu_match(struct device *parent, void *match, void *aux)
141 {
142 	struct fdt_attach_args *faa = aux;
143 
144 	return OF_is_compatible(faa->fa_node, "cavium,octeon-3860-ciu");
145 }
146 
147 void
octciu_attach(struct device * parent,struct device * self,void * aux)148 octciu_attach(struct device *parent, struct device *self, void *aux)
149 {
150 	struct fdt_attach_args *faa = aux;
151 	struct octciu_softc *sc = (struct octciu_softc *)self;
152 	int i;
153 
154 	if (faa->fa_nreg != 1) {
155 		printf(": expected one IO space, got %d\n", faa->fa_nreg);
156 		return;
157 	}
158 
159 	sc->sc_iot = faa->fa_iot;
160 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, faa->fa_reg[0].size,
161 	    0, &sc->sc_ioh)) {
162 		printf(": could not map IO space\n");
163 		return;
164 	}
165 
166 	if (octeon_ver == OCTEON_2 || octeon_ver == OCTEON_3)
167 		sc->sc_nbanks = 3;
168 	else
169 		sc->sc_nbanks = 2;
170 
171 	for (i = 0; i < OCTCIU_NINTS; i++)
172 		SLIST_INIT(&sc->sc_intrhand[i]);
173 
174 	printf("\n");
175 
176 	sc->sc_ic.ic_cookie = sc;
177 	sc->sc_ic.ic_node = faa->fa_node;
178 	sc->sc_ic.ic_init = octciu_init;
179 	sc->sc_ic.ic_establish = octciu_intr_establish;
180 	sc->sc_ic.ic_establish_fdt_idx = octciu_intr_establish_fdt_idx;
181 	sc->sc_ic.ic_disestablish = octciu_intr_disestablish;
182 	sc->sc_ic.ic_intr_barrier = octciu_intr_barrier;
183 #ifdef MULTIPROCESSOR
184 	sc->sc_ic.ic_ipi_establish = octciu_ipi_establish;
185 	sc->sc_ic.ic_ipi_set = octciu_ipi_set;
186 	sc->sc_ic.ic_ipi_clear = octciu_ipi_clear;
187 #endif
188 
189 	octciu_sc = sc;
190 
191 	set_intr(INTPRI_CIU_0, CR_INT_0, octciu_intr0);
192 	if (sc->sc_nbanks == 3)
193 		set_intr(INTPRI_CIU_1, CR_INT_2, octciu_intr2);
194 #ifdef MULTIPROCESSOR
195 	set_intr(INTPRI_IPI, CR_INT_1, octciu_ipi_intr);
196 #endif
197 
198 	octciu_init();
199 
200 	register_splx_handler(octciu_splx);
201 	octeon_intr_register(&sc->sc_ic);
202 }
203 
204 void
octciu_init(void)205 octciu_init(void)
206 {
207 	struct octciu_softc *sc = octciu_sc;
208 	struct octciu_cpu *scpu;
209 	int cpuid = cpu_number();
210 	int s;
211 
212 	scpu = &sc->sc_cpu[cpuid];
213 
214 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP2_EN0(cpuid), 0);
215 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid), 0);
216 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP2_EN1(cpuid), 0);
217 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN1(cpuid), 0);
218 
219 	if (sc->sc_nbanks == 3)
220 		bus_space_write_8(sc->sc_iot, sc->sc_ioh,
221 		    CIU_IP4_EN2(cpuid), 0);
222 
223 	scpu->scpu_ibank[0].en = CIU_IP2_EN0(cpuid);
224 	scpu->scpu_ibank[0].sum = CIU_IP2_SUM0(cpuid);
225 	scpu->scpu_ibank[0].id = 0;
226 	scpu->scpu_ibank[1].en = CIU_IP2_EN1(cpuid);
227 	scpu->scpu_ibank[1].sum = CIU_INT32_SUM1;
228 	scpu->scpu_ibank[1].id = 1;
229 	scpu->scpu_ibank[2].en = CIU_IP4_EN2(cpuid);
230 	scpu->scpu_ibank[2].sum = CIU_IP4_SUM2(cpuid);
231 	scpu->scpu_ibank[2].id = 2;
232 
233 	s = splhigh();
234 	octciu_intr_makemasks(sc);
235 	splx(s);	/* causes hw mask update */
236 }
237 
238 void *
octciu_intr_establish(int irq,int level,int (* ih_fun)(void *),void * ih_arg,const char * ih_what)239 octciu_intr_establish(int irq, int level, int (*ih_fun)(void *),
240     void *ih_arg, const char *ih_what)
241 {
242 	struct octciu_softc *sc = octciu_sc;
243 	struct octciu_intrhand *ih, *last, *tmp;
244 	int cpuid = cpu_number();
245 	int flags;
246 	int s;
247 
248 #ifdef DIAGNOSTIC
249 	if (irq >= sc->sc_nbanks * BANK_SIZE || irq < 0)
250 		panic("%s: illegal irq %d", __func__, irq);
251 #endif
252 
253 #ifdef MULTIPROCESSOR
254 	/* Span work queue interrupts across CPUs. */
255 	if (IS_WORKQ_IRQ(irq))
256 		cpuid = irq % ncpus;
257 #endif
258 
259 	flags = (level & IPL_MPSAFE) ? CIH_MPSAFE : 0;
260 	level &= ~IPL_MPSAFE;
261 
262 	ih = malloc(sizeof *ih, M_DEVBUF, M_NOWAIT);
263 	if (ih == NULL)
264 		return NULL;
265 
266 	ih->ih_fun = ih_fun;
267 	ih->ih_arg = ih_arg;
268 	ih->ih_level = level;
269 	ih->ih_flags = flags;
270 	ih->ih_irq = irq;
271 	ih->ih_cpuid = cpuid;
272 	evcount_attach(&ih->ih_count, ih_what, &ih->ih_irq);
273 	evcount_percpu(&ih->ih_count);
274 
275 	s = splhigh();
276 
277 	if (SLIST_EMPTY(&sc->sc_intrhand[irq])) {
278 		SLIST_INSERT_HEAD(&sc->sc_intrhand[irq], ih, ih_list);
279 	} else {
280 		last = NULL;
281 		SLIST_FOREACH(tmp, &sc->sc_intrhand[irq], ih_list)
282 			last = tmp;
283 		SLIST_INSERT_AFTER(last, ih, ih_list);
284 	}
285 
286 	sc->sc_cpu[cpuid].scpu_intem[IRQ_TO_BANK(irq)] |=
287 	    1UL << IRQ_TO_BIT(irq);
288 	octciu_intr_makemasks(sc);
289 
290 	splx(s);	/* causes hw mask update */
291 
292 	return (ih);
293 }
294 
295 void *
octciu_intr_establish_fdt_idx(void * cookie,int node,int idx,int level,int (* ih_fun)(void *),void * ih_arg,const char * ih_what)296 octciu_intr_establish_fdt_idx(void *cookie, int node, int idx, int level,
297     int (*ih_fun)(void *), void *ih_arg, const char *ih_what)
298 {
299 	uint32_t *cells;
300 	int irq, len;
301 
302 	len = OF_getproplen(node, "interrupts");
303 	if (len / (sizeof(uint32_t) * 2) <= idx ||
304 	    len % (sizeof(uint32_t) * 2) != 0)
305 		return NULL;
306 
307 	cells = malloc(len, M_TEMP, M_NOWAIT);
308 	if (cells == NULL)
309 		return NULL;
310 
311 	OF_getpropintarray(node, "interrupts", cells, len);
312 	irq = cells[idx * 2] * BANK_SIZE + cells[idx * 2 + 1];
313 
314 	free(cells, M_TEMP, len);
315 
316 	return octciu_intr_establish(irq, level, ih_fun, ih_arg, ih_what);
317 }
318 
319 void
octciu_intr_disestablish(void * _ih)320 octciu_intr_disestablish(void *_ih)
321 {
322 	struct octciu_intrhand *ih = _ih;
323 	struct octciu_intrhand *tmp;
324 	struct octciu_softc *sc = octciu_sc;
325 	unsigned int irq = ih->ih_irq;
326 	int cpuid = cpu_number();
327 	int found = 0;
328 	int s;
329 
330 	KASSERT(irq < sc->sc_nbanks * BANK_SIZE);
331 	KASSERT(!IS_WORKQ_IRQ(irq));
332 
333 	s = splhigh();
334 
335 	SLIST_FOREACH(tmp, &sc->sc_intrhand[irq], ih_list) {
336 		if (tmp == ih) {
337 			found = 1;
338 			break;
339 		}
340 	}
341 	if (found == 0)
342 		panic("%s: intrhand %p not registered", __func__, ih);
343 
344 	SLIST_REMOVE(&sc->sc_intrhand[irq], ih, octciu_intrhand, ih_list);
345 	evcount_detach(&ih->ih_count);
346 
347 	if (SLIST_EMPTY(&sc->sc_intrhand[irq])) {
348 		sc->sc_cpu[cpuid].scpu_intem[IRQ_TO_BANK(irq)] &=
349 		    ~(1UL << IRQ_TO_BIT(irq));
350 	}
351 
352 	octciu_intr_makemasks(sc);
353 	splx(s);	/* causes hw mask update */
354 
355 	free(ih, M_DEVBUF, sizeof(*ih));
356 }
357 
358 void
octciu_intr_barrier(void * _ih)359 octciu_intr_barrier(void *_ih)
360 {
361 	struct cpu_info *ci = NULL;
362 #ifdef MULTIPROCESSOR
363 	struct octciu_intrhand *ih = _ih;
364 
365 	if (IS_WORKQ_IRQ(ih->ih_irq))
366 		ci = get_cpu_info(ih->ih_irq % ncpus);
367 #endif
368 
369 	sched_barrier(ci);
370 }
371 
372 /*
373  * Recompute interrupt masks.
374  */
375 void
octciu_intr_makemasks(struct octciu_softc * sc)376 octciu_intr_makemasks(struct octciu_softc *sc)
377 {
378 	cpuid_t cpuid = cpu_number();
379 	struct octciu_cpu *scpu = &sc->sc_cpu[cpuid];
380 	struct octciu_intrhand *q;
381 	uint intrlevel[OCTCIU_NINTS];
382 	int irq, level;
383 
384 	/* First, figure out which levels each IRQ uses. */
385 	for (irq = 0; irq < OCTCIU_NINTS; irq++) {
386 		uint levels = 0;
387 		SLIST_FOREACH(q, &sc->sc_intrhand[irq], ih_list) {
388 			if (q->ih_cpuid == cpuid)
389 				levels |= 1 << q->ih_level;
390 		}
391 		intrlevel[irq] = levels;
392 	}
393 
394 	/*
395 	 * Then figure out which IRQs use each level.
396 	 * Note that we make sure never to overwrite imask[IPL_HIGH], in
397 	 * case an interrupt occurs during intr_disestablish() and causes
398 	 * an unfortunate splx() while we are here recomputing the masks.
399 	 */
400 	for (level = IPL_NONE; level < NIPLS; level++) {
401 		uint64_t mask[NBANKS] = {};
402 		for (irq = 0; irq < OCTCIU_NINTS; irq++)
403 			if (intrlevel[irq] & (1 << level))
404 				mask[IRQ_TO_BANK(irq)] |=
405 				    1UL << IRQ_TO_BIT(irq);
406 		scpu->scpu_imask[level][0] = mask[0];
407 		scpu->scpu_imask[level][1] = mask[1];
408 		scpu->scpu_imask[level][2] = mask[2];
409 	}
410 	/*
411 	 * There are tty, network and disk drivers that use free() at interrupt
412 	 * time, so vm > (tty | net | bio).
413 	 *
414 	 * Enforce a hierarchy that gives slow devices a better chance at not
415 	 * dropping data.
416 	 */
417 #define ADD_MASK(dst, src) do {	\
418 	dst[0] |= src[0];	\
419 	dst[1] |= src[1];	\
420 	dst[2] |= src[2];	\
421 } while (0)
422 	ADD_MASK(scpu->scpu_imask[IPL_NET], scpu->scpu_imask[IPL_BIO]);
423 	ADD_MASK(scpu->scpu_imask[IPL_TTY], scpu->scpu_imask[IPL_NET]);
424 	ADD_MASK(scpu->scpu_imask[IPL_VM], scpu->scpu_imask[IPL_TTY]);
425 	ADD_MASK(scpu->scpu_imask[IPL_CLOCK], scpu->scpu_imask[IPL_VM]);
426 	ADD_MASK(scpu->scpu_imask[IPL_HIGH], scpu->scpu_imask[IPL_CLOCK]);
427 	ADD_MASK(scpu->scpu_imask[IPL_IPI], scpu->scpu_imask[IPL_HIGH]);
428 
429 	/*
430 	 * These are pseudo-levels.
431 	 */
432 	scpu->scpu_imask[IPL_NONE][0] = 0;
433 	scpu->scpu_imask[IPL_NONE][1] = 0;
434 	scpu->scpu_imask[IPL_NONE][2] = 0;
435 }
436 
437 static inline int
octciu_next_irq(uint64_t * isr)438 octciu_next_irq(uint64_t *isr)
439 {
440 	uint64_t irq, tmp = *isr;
441 
442 	if (tmp == 0)
443 		return -1;
444 
445 	asm volatile (
446 	"	.set push\n"
447 	"	.set mips64\n"
448 	"	dclz	%0, %0\n"
449 	"	.set pop\n"
450 	: "=r" (tmp) : "0" (tmp));
451 
452 	irq = 63u - tmp;
453 	*isr &= ~(1u << irq);
454 	return irq;
455 }
456 
457 /*
458  * Dispatch interrupts in given bank.
459  */
460 uint32_t
octciu_intr_bank(struct octciu_softc * sc,struct intrbank * bank,struct trapframe * frame)461 octciu_intr_bank(struct octciu_softc *sc, struct intrbank *bank,
462     struct trapframe *frame)
463 {
464 	struct cpu_info *ci = curcpu();
465 	struct octciu_intrhand *ih;
466 	struct octciu_cpu *scpu = &sc->sc_cpu[ci->ci_cpuid];
467 	uint64_t imr, isr, mask;
468 	int handled, ipl, irq;
469 #ifdef MULTIPROCESSOR
470 	register_t sr;
471 	int need_lock;
472 #endif
473 
474 	isr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, bank->sum);
475 	imr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, bank->en);
476 
477 	isr &= imr;
478 	if (isr == 0)
479 		return 0;	/* not for us */
480 
481 	/*
482 	 * Mask all pending interrupts.
483 	 */
484 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, bank->en, imr & ~isr);
485 
486 	/*
487 	 * If interrupts are spl-masked, mask them and wait for splx()
488 	 * to reenable them when necessary.
489 	 */
490 	if ((mask = isr & scpu->scpu_imask[frame->ipl][bank->id])
491 	    != 0) {
492 		isr &= ~mask;
493 		imr &= ~mask;
494 	}
495 	if (isr == 0)
496 		return 1;
497 
498 	/*
499 	 * Now process allowed interrupts.
500 	 */
501 
502 	ipl = ci->ci_ipl;
503 
504 	while ((irq = octciu_next_irq(&isr)) >= 0) {
505 		irq += bank->id * BANK_SIZE;
506 		handled = 0;
507 		SLIST_FOREACH(ih, &sc->sc_intrhand[irq], ih_list) {
508 			splraise(ih->ih_level);
509 #ifdef MULTIPROCESSOR
510 			if (ih->ih_level < IPL_IPI) {
511 				sr = getsr();
512 				ENABLEIPI();
513 			}
514 			if (ih->ih_flags & CIH_MPSAFE)
515 				need_lock = 0;
516 			else
517 				need_lock = 1;
518 			if (need_lock)
519 				__mp_lock(&kernel_lock);
520 #endif
521 			if ((*ih->ih_fun)(ih->ih_arg) != 0) {
522 				handled = 1;
523 				evcount_inc(&ih->ih_count);
524 			}
525 #ifdef MULTIPROCESSOR
526 			if (need_lock)
527 				__mp_unlock(&kernel_lock);
528 			if (ih->ih_level < IPL_IPI)
529 				setsr(sr);
530 #endif
531 		}
532 		if (!handled)
533 			printf("%s: spurious interrupt %d on cpu %lu\n",
534 			    sc->sc_dev.dv_xname, irq, ci->ci_cpuid);
535 	}
536 
537 	ci->ci_ipl = ipl;
538 
539 	/*
540 	 * Reenable interrupts which have been serviced.
541 	 */
542 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, bank->en, imr);
543 
544 	return 1;
545 }
546 
547 uint32_t
octciu_intr0(uint32_t hwpend,struct trapframe * frame)548 octciu_intr0(uint32_t hwpend, struct trapframe *frame)
549 {
550 	struct octciu_softc *sc = octciu_sc;
551 	struct octciu_cpu *scpu = &sc->sc_cpu[cpu_number()];
552 	int handled;
553 
554 	handled = octciu_intr_bank(sc, &scpu->scpu_ibank[0], frame);
555 	handled |= octciu_intr_bank(sc, &scpu->scpu_ibank[1], frame);
556 	return handled ? hwpend : 0;
557 }
558 
559 uint32_t
octciu_intr2(uint32_t hwpend,struct trapframe * frame)560 octciu_intr2(uint32_t hwpend, struct trapframe *frame)
561 {
562 	struct octciu_softc *sc = octciu_sc;
563 	struct octciu_cpu *scpu = &sc->sc_cpu[cpu_number()];
564 	int handled;
565 
566 	handled = octciu_intr_bank(sc, &scpu->scpu_ibank[2], frame);
567 	return handled ? hwpend : 0;
568 }
569 
570 void
octciu_splx(int newipl)571 octciu_splx(int newipl)
572 {
573 	struct cpu_info *ci = curcpu();
574 	struct octciu_softc *sc = octciu_sc;
575 	struct octciu_cpu *scpu = &sc->sc_cpu[ci->ci_cpuid];
576 
577 	ci->ci_ipl = newipl;
578 
579 	/* Set hardware masks. */
580 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, scpu->scpu_ibank[0].en,
581 	    scpu->scpu_intem[0] & ~scpu->scpu_imask[newipl][0]);
582 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, scpu->scpu_ibank[1].en,
583 	    scpu->scpu_intem[1] & ~scpu->scpu_imask[newipl][1]);
584 
585 	if (sc->sc_nbanks == 3)
586 		bus_space_write_8(sc->sc_iot, sc->sc_ioh,
587 		    scpu->scpu_ibank[2].en,
588 		    scpu->scpu_intem[2] & ~scpu->scpu_imask[newipl][2]);
589 
590 	/* Trigger deferred clock interrupt if it is now unmasked. */
591 	if (ci->ci_clock_deferred && newipl < IPL_CLOCK)
592 		md_triggerclock();
593 
594 	/* If we still have softints pending trigger processing. */
595 	if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
596 		setsoftintr0();
597 }
598 
599 #ifdef MULTIPROCESSOR
600 uint32_t
octciu_ipi_intr(uint32_t hwpend,struct trapframe * frame)601 octciu_ipi_intr(uint32_t hwpend, struct trapframe *frame)
602 {
603 	struct octciu_softc *sc = octciu_sc;
604 	u_long cpuid = cpu_number();
605 
606 	/*
607 	 * Mask all pending interrupts.
608 	 */
609 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid), 0);
610 
611 	if (sc->sc_ipi_handler == NULL)
612 		return hwpend;
613 
614 	sc->sc_ipi_handler((void *)cpuid);
615 
616 	/*
617 	 * Reenable interrupts which have been serviced.
618 	 */
619 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid),
620 		(1ULL << CIU_INT_MBOX0)|(1ULL << CIU_INT_MBOX1));
621 	return hwpend;
622 }
623 
624 int
octciu_ipi_establish(int (* func)(void *),cpuid_t cpuid)625 octciu_ipi_establish(int (*func)(void *), cpuid_t cpuid)
626 {
627 	struct octciu_softc *sc = octciu_sc;
628 
629 	if (cpuid == 0)
630 		sc->sc_ipi_handler = func;
631 
632 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_CLR(cpuid),
633 		0xffffffff);
634 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid),
635 		(1ULL << CIU_INT_MBOX0)|(1ULL << CIU_INT_MBOX1));
636 
637 	return 0;
638 }
639 
640 void
octciu_ipi_set(cpuid_t cpuid)641 octciu_ipi_set(cpuid_t cpuid)
642 {
643 	struct octciu_softc *sc = octciu_sc;
644 
645 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_SET(cpuid), 1);
646 }
647 
648 void
octciu_ipi_clear(cpuid_t cpuid)649 octciu_ipi_clear(cpuid_t cpuid)
650 {
651 	struct octciu_softc *sc = octciu_sc;
652 	uint64_t clr;
653 
654 	clr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_CLR(cpuid));
655 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_CLR(cpuid), clr);
656 }
657 #endif /* MULTIPROCESSOR */
658