xref: /openbsd/sys/arch/riscv64/dev/plic.c (revision 771fbea0)
1 /*	$OpenBSD: plic.c,v 1.7 2021/05/19 17:39:49 kettenis Exp $	*/
2 
3 /*
4  * Copyright (c) 2020, Mars Li <mengshi.li.mars@gmail.com>
5  * Copyright (c) 2020, Brian Bamsch <bbamsch@google.com>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/queue.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/evcount.h>
26 
27 #include <machine/bus.h>
28 #include <machine/fdt.h>
29 #include <machine/cpu.h>
30 #include "riscv64/dev/riscv_cpu_intc.h"
31 
32 #include <dev/ofw/openfirm.h>
33 #include <dev/ofw/fdt.h>
34 
35 /*
36  * This driver implements a version of the RISC-V PLIC with the actual layout
37  * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
38  *
39  *     https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
40  *
41  * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
42  * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
43  * Spec.
44  */
45 
46 #define	PLIC_MAX_IRQS		1024
47 
48 #define	PLIC_PRIORITY_BASE	0x000000U
49 
50 #define	PLIC_ENABLE_BASE	0x002000U
51 #define	PLIC_ENABLE_STRIDE	0x80U
52 #define	IRQ_ENABLE		1
53 #define	IRQ_DISABLE		0
54 
55 #define	PLIC_CONTEXT_BASE	0x200000U
56 #define	PLIC_CONTEXT_STRIDE	0x1000U
57 #define	PLIC_CONTEXT_THRESHOLD	0x0U
58 #define	PLIC_CONTEXT_CLAIM	0x4U
59 
60 #define	PLIC_PRIORITY(n)	(PLIC_PRIORITY_BASE + (n) * sizeof(uint32_t))
61 #define	PLIC_ENABLE(sc, n, h)						\
62     (sc->sc_contexts[h].enable_offset + ((n) / 32) * sizeof(uint32_t))
63 #define	PLIC_THRESHOLD(sc, h)						\
64     (sc->sc_contexts[h].context_offset + PLIC_CONTEXT_THRESHOLD)
65 #define	PLIC_CLAIM(sc, h)						\
66     (sc->sc_contexts[h].context_offset + PLIC_CONTEXT_CLAIM)
67 
68 
69 struct plic_intrhand {
70 	TAILQ_ENTRY(plic_intrhand) ih_list; /* link on intrq list */
71 	int (*ih_func)(void *);		/* handler */
72 	void *ih_arg;			/* arg for handler */
73 	int ih_ipl;			/* IPL_* */
74 	int ih_flags;
75 	int ih_irq;			/* IRQ number */
76 	struct evcount	ih_count;
77 	char *ih_name;
78 	struct cpu_info *ih_ci;
79 };
80 
81 /*
82  * One interrupt source could have multiple handler attached,
83  * each handler could have different priority level,
84  * we track the max and min priority level.
85  */
86 struct plic_irqsrc {
87 	TAILQ_HEAD(, plic_intrhand) is_list; /* handler list */
88 	int	is_irq_max;	/* IRQ to mask while handling */
89 	int	is_irq_min;	/* lowest IRQ when shared */
90 };
91 
92 struct plic_context {
93 	bus_size_t enable_offset;
94 	bus_size_t context_offset;
95 };
96 
97 struct plic_softc {
98 	struct device		sc_dev;
99 	int			sc_node;
100 	bus_space_tag_t		sc_iot;
101 	bus_space_handle_t	sc_ioh;
102 	struct plic_irqsrc	*sc_isrcs;
103 	struct plic_context	sc_contexts[MAXCPUS];
104 	int			sc_ndev;
105 	struct interrupt_controller	sc_intc;
106 };
107 struct plic_softc *plic = NULL;
108 
109 int	plic_match(struct device *, void *, void *);
110 void	plic_attach(struct device *, struct device *, void *);
111 int	plic_irq_handler(void *);
112 int	plic_irq_dispatch(uint32_t, void *);
113 void	*plic_intr_establish(int, int, struct cpu_info *,
114 	    int (*)(void *), void *, char *);
115 void	*plic_intr_establish_fdt(void *, int *, int, struct cpu_info *,
116 	    int (*)(void *), void *, char *);
117 void	plic_intr_disestablish(void *);
118 void	plic_intr_route(void *, int, struct cpu_info *);
119 void	plic_intr_barrier(void *);
120 
121 void	plic_splx(int);
122 int	plic_spllower(int);
123 int	plic_splraise(int);
124 void	plic_setipl(int);
125 void	plic_calc_mask(void);
126 
127 /* helper function */
128 int	plic_get_cpuid(int);
129 void	plic_set_priority(int, uint32_t);
130 void	plic_set_threshold(int, uint32_t);
131 void	plic_intr_route_grid(int, int, int);
132 void	plic_intr_enable_with_pri(int, uint32_t, int);
133 void	plic_intr_disable(int, int);
134 
135 
136 struct cfattach plic_ca = {
137 	sizeof(struct plic_softc), plic_match, plic_attach,
138 };
139 
140 struct cfdriver plic_cd = {
141 	NULL, "plic", DV_DULL
142 };
143 
144 int plic_attached = 0;
145 
146 int
147 plic_match(struct device *parent, void *cfdata, void *aux)
148 {
149 	struct fdt_attach_args *faa = aux;
150 
151 	if (plic_attached)
152 		return 0; // Only expect one instance of PLIC
153 
154 	return (OF_is_compatible(faa->fa_node, "riscv,plic0") ||
155 		OF_is_compatible(faa->fa_node, "sifive,plic-1.0.0"));
156 }
157 
158 void
159 plic_attach(struct device *parent, struct device *dev, void *aux)
160 {
161 	struct plic_softc *sc;
162 	struct fdt_attach_args *faa;
163 	uint32_t *cells;
164 	uint32_t irq;
165 	uint32_t cpu;
166 	int node;
167 	int len;
168 	int ncell;
169 	int context;
170 	int i;
171 	struct cpu_info *ci;
172 	CPU_INFO_ITERATOR cii;
173 
174 	if (plic_attached)
175 		return;
176 
177 	plic = sc = (struct plic_softc *)dev;
178 	faa = (struct fdt_attach_args *)aux;
179 
180 	if (faa->fa_nreg < 1)
181 		return;
182 
183 	sc->sc_node = node = faa->fa_node;
184 	sc->sc_iot = faa->fa_iot;
185 
186 	/* determine number of devices sending intr to this ic */
187 	sc->sc_ndev = OF_getpropint(faa->fa_node, "riscv,ndev", -1);
188 	if (sc->sc_ndev < 0) {
189 		printf(": unable to resolve number of devices\n");
190 		return;
191 	}
192 
193 	if (sc->sc_ndev >= PLIC_MAX_IRQS) {
194 		printf(": invalid ndev (%d)\n", sc->sc_ndev);
195 		return;
196 	}
197 
198 	/* map interrupt controller to va space */
199 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
200 	    faa->fa_reg[0].size, 0, &sc->sc_ioh))
201 		panic("%s: bus_space_map failed!", __func__);
202 
203 	sc->sc_isrcs = mallocarray(PLIC_MAX_IRQS, sizeof(struct plic_irqsrc),
204 			M_DEVBUF, M_ZERO | M_NOWAIT);
205 
206 	for (irq = 1; irq <= sc->sc_ndev; irq++) {
207 		TAILQ_INIT(&sc->sc_isrcs[irq].is_list);
208 		plic_set_priority(irq, 0);// Mask interrupt
209 	}
210 
211 	/*
212 	 * Calculate the per-cpu enable and context register offsets.
213 	 *
214 	 * This is tricky for a few reasons. The PLIC divides the interrupt
215 	 * enable, threshold, and claim bits by "context"
216 	 *
217 	 * The tricky part is that the PLIC spec imposes no restrictions on how
218 	 * these contexts are laid out. So for example, there is no guarantee
219 	 * that each CPU will have both a machine mode and supervisor context,
220 	 * or that different PLIC implementations will organize the context
221 	 * registers in the same way. On top of this, we must handle the fact
222 	 * that cpuid != hartid, as they may have been renumbered during boot.
223 	 * We perform the following steps:
224 	 *
225 	 * 1. Examine the PLIC's "interrupts-extended" property and skip any
226 	 *    entries that are not for supervisor external interrupts.
227 	 *
228 	 * 2. Walk up the device tree to find the corresponding CPU, using node
229 	 *    property to identify the cpuid.
230 	 *
231 	 * 3. Calculate the register offsets based on the context number.
232 	 */
233 	len = OF_getproplen(node, "interrupts-extended");
234 	if (len <= 0) {
235 		printf(": could not find interrupts-extended\n");
236 		return;
237 	}
238 
239 	cells = malloc(len, M_TEMP, M_WAITOK);
240 	ncell = len / sizeof(*cells);
241 	if (OF_getpropintarray(node, "interrupts-extended", cells, len) < 0) {
242 		printf(": failed to read interrupts-extended\n");
243 		free(cells, M_TEMP, len);
244 		return;
245 	}
246 
247 	for (i = 0, context = 0; i < ncell; i += 2, context++) {
248 		/* Skip M-mode external interrupts */
249 		if (cells[i + 1] != IRQ_EXTERNAL_SUPERVISOR)
250 			continue;
251 
252 		/* Get the corresponding cpuid. */
253 		cpu = plic_get_cpuid(OF_getnodebyphandle(cells[i]));
254 		if (cpu < 0) {
255 			printf(": invalid hart!\n");
256 			free(cells, M_TEMP, len);
257 			return;
258 		}
259 
260 		/*
261 		 * Set the enable and context register offsets for the CPU.
262 		 *
263 		 * We assume S-mode handler always comes later than M-mode
264 		 * handler, but this might be a little fragile.
265 		 *
266 		 * XXX
267 		 * sifive spec doesn't list hart0 S-mode enable/contexts
268 		 * in its memory map, but QEMU emulates hart0 S-mode
269 		 * enable/contexts? Otherwise the following offset calculation
270 		 * would point to hart1 M-mode enable/contexts.
271 		 */
272 		sc->sc_contexts[cpu].enable_offset = PLIC_ENABLE_BASE +
273 		    context * PLIC_ENABLE_STRIDE;
274 		sc->sc_contexts[cpu].context_offset = PLIC_CONTEXT_BASE +
275 		    context * PLIC_CONTEXT_STRIDE;
276 	}
277 
278 	free(cells, M_TEMP, len);
279 
280 	/* Set CPU interrupt priority thresholds to minimum */
281 	CPU_INFO_FOREACH(cii, ci) {
282 		plic_set_threshold(ci->ci_cpuid, 0);
283 	}
284 
285 	plic_setipl(IPL_HIGH);  /* XXX ??? */
286 	plic_calc_mask();
287 
288 	/*
289 	 * insert self into the external interrupt handler entry in
290 	 * global interrupt handler vector
291 	 */
292 	riscv_intc_intr_establish(IRQ_EXTERNAL_SUPERVISOR, 0,
293 			plic_irq_handler, NULL, "plic0");
294 
295 	/*
296 	 * From now on, spl update must be enforeced to plic, so
297 	 * spl* routine should be updated.
298 	 */
299 	riscv_set_intr_func(plic_splraise, plic_spllower,
300 			plic_splx, plic_setipl);
301 
302 	plic_attached = 1;
303 
304 	/* enable external interrupt */
305 	csr_set(sie, SIE_SEIE);
306 
307 	sc->sc_intc.ic_node = faa->fa_node;
308 	sc->sc_intc.ic_cookie = sc;
309 	sc->sc_intc.ic_establish = plic_intr_establish_fdt;
310 	sc->sc_intc.ic_disestablish = plic_intr_disestablish;
311 	sc->sc_intc.ic_route = plic_intr_route;
312 	// sc->sc_intc.ic_cpu_enable = XXX Per-CPU Initialization?
313 	sc->sc_intc.ic_barrier = plic_intr_barrier;
314 
315 	riscv_intr_register_fdt(&sc->sc_intc);
316 
317 	printf("\n");
318 }
319 
320 int
321 plic_irq_handler(void *frame)
322 {
323 	struct plic_softc* sc;
324 	uint32_t pending;
325 	uint32_t cpu;
326 	int handled = 0;
327 
328 	sc = plic;
329 	cpu = cpu_number();
330 
331 	pending = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
332 			PLIC_CLAIM(sc, cpu));
333 
334 	if (pending >= sc->sc_ndev) {
335 		printf("plic0: pending %x\n", pending);
336 		return 0;
337 	}
338 
339 	if (pending) {
340 		handled = plic_irq_dispatch(pending, frame);
341 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
342 				PLIC_CLAIM(sc, cpu), pending);
343 
344 //#define DEBUG_INTC
345 #ifdef DEBUG_INTC
346 		if (handled == 0) {
347 			printf("plic handled == 0 on pending %d\n", pending);
348 		}
349 #endif /* DEBUG_INTC */
350 	}
351 
352 	return handled;
353 }
354 
355 int
356 plic_irq_dispatch(uint32_t irq,	void *frame)
357 {
358 	int pri, s;
359 	int handled = 0;
360 	struct plic_softc* sc;
361 	struct plic_intrhand *ih;
362 	void *arg;
363 
364 #ifdef DEBUG_INTC
365 	printf("plic irq %d fired\n", irq);
366 #endif
367 
368 	sc = plic;
369 	pri = sc->sc_isrcs[irq].is_irq_max;
370 	s = plic_splraise(pri);
371 	TAILQ_FOREACH(ih, &sc->sc_isrcs[irq].is_list, ih_list) {
372 #ifdef MULTIPROCESSOR
373 		int need_lock;
374 
375 		if (ih->ih_flags & IPL_MPSAFE)
376 			need_lock = 0;
377 		else
378 			need_lock = s < IPL_SCHED;
379 
380 		if (need_lock)
381 			KERNEL_LOCK();
382 #endif
383 
384 		if (ih->ih_arg != 0)
385 			arg = ih->ih_arg;
386 		else
387 			arg = frame;
388 
389 		intr_enable();
390 		handled = ih->ih_func(arg);
391 		intr_disable();
392 		if (handled)
393 			ih->ih_count.ec_count++;
394 
395 #ifdef MULTIPROCESSOR
396 		if (need_lock)
397 			KERNEL_UNLOCK();
398 #endif
399 	}
400 
401 	plic_splx(s);
402 	return handled;
403 }
404 
405 void *
406 plic_intr_establish(int irqno, int level, struct cpu_info *ci,
407     int (*func)(void *), void *arg, char *name)
408 {
409 	struct plic_softc *sc = plic;
410 	struct plic_intrhand *ih;
411 	u_long sie;
412 
413 	if (irqno < 0 || irqno >= PLIC_MAX_IRQS)
414 		panic("plic_intr_establish: bogus irqnumber %d: %s",
415 		    irqno, name);
416 
417 	if (ci == NULL)
418 		ci = &cpu_info_primary;
419 
420 	ih = malloc(sizeof *ih, M_DEVBUF, M_WAITOK);
421 	ih->ih_func = func;
422 	ih->ih_arg = arg;
423 	ih->ih_ipl = level & IPL_IRQMASK;
424 	ih->ih_flags = level & IPL_FLAGMASK;
425 	ih->ih_irq = irqno;
426 	ih->ih_name = name;
427 	ih->ih_ci = ci;
428 
429 	sie = intr_disable();
430 
431 	TAILQ_INSERT_TAIL(&sc->sc_isrcs[irqno].is_list, ih, ih_list);
432 
433 	if (name != NULL)
434 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
435 
436 #ifdef DEBUG_INTC
437 	printf("%s irq %d level %d [%s]\n", __func__, irqno, level,
438 	    name);
439 #endif
440 
441 	plic_calc_mask();
442 
443 	intr_restore(sie);
444 	return (ih);
445 }
446 
447 void *
448 plic_intr_establish_fdt(void *cookie, int *cell, int level,
449     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
450 {
451 	return plic_intr_establish(cell[0], level, ci, func, arg, name);
452 }
453 
454 void
455 plic_intr_disestablish(void *cookie)
456 {
457 	struct plic_softc *sc = plic;
458 	struct plic_intrhand *ih = cookie;
459 	int irqno = ih->ih_irq;
460 	u_long sie;
461 
462 	sie = intr_disable();
463 
464 	TAILQ_REMOVE(&sc->sc_isrcs[irqno].is_list, ih, ih_list);
465 	if (ih->ih_name != NULL)
466 		evcount_detach(&ih->ih_count);
467 
468 	intr_restore(sie);
469 
470 	free(ih, M_DEVBUF, 0);
471 }
472 
473 void
474 plic_intr_route(void *cookie, int enable, struct cpu_info *ci)
475 {
476 	struct plic_softc	*sc = plic;
477 	struct plic_intrhand	*ih = cookie;
478 
479 	int		irq = ih->ih_irq;
480 	int		cpu = ci->ci_cpuid;
481 	uint32_t	min_pri = sc->sc_isrcs[irq].is_irq_min;
482 
483 	if (enable == IRQ_ENABLE) {
484 		plic_intr_enable_with_pri(irq, min_pri, cpu);
485 	} else {
486 		plic_intr_route_grid(irq, IRQ_DISABLE, cpu);
487 	}
488 }
489 
490 void
491 plic_intr_barrier(void *cookie)
492 {
493 	struct plic_intrhand *ih = cookie;
494 
495 	sched_barrier(ih->ih_ci);
496 }
497 
498 void
499 plic_splx(int new)
500 {
501 	/* XXX
502 	 * how to do pending external interrupt ?
503 	 * After set the new threshold, if there is any pending
504 	 * external interrupts whose priority is now greater than the
505 	 * threshold, they will get passed through plic to cpu,
506 	 * trigger a new claim/complete cycle.
507 	 * So there is no need to handle pending external intr here.
508 	 *
509 	 */
510 	struct cpu_info *ci = curcpu();
511 
512 	/* Pending software intr is handled here */
513 	if (ci->ci_ipending & riscv_smask[new])
514 		riscv_do_pending_intr(new);
515 
516 	plic_setipl(new);
517 }
518 
519 int
520 plic_spllower(int new)
521 {
522 	struct cpu_info *ci = curcpu();
523 	int old = ci->ci_cpl;
524 	plic_splx(new);
525 	return (old);
526 }
527 
528 int
529 plic_splraise(int new)
530 {
531 	struct cpu_info *ci = curcpu();
532 	int old;
533 	old = ci->ci_cpl;
534 
535 	/*
536 	 * setipl must always be called because there is a race window
537 	 * where the variable is updated before the mask is set
538 	 * an interrupt occurs in that window without the mask always
539 	 * being set, the hardware might not get updated on the next
540 	 * splraise completely messing up spl protection.
541 	 */
542 	if (old > new)
543 		new = old;
544 
545 	plic_setipl(new);
546 
547 	return (old);
548 }
549 
550 void
551 plic_setipl(int new)
552 {
553 	struct cpu_info		*ci = curcpu();
554 	u_long sie;
555 
556 	/* disable here is only to keep hardware in sync with ci->ci_cpl */
557 	sie = intr_disable();
558 	ci->ci_cpl = new;
559 
560 	/* higher values are higher priority */
561 	plic_set_threshold(ci->ci_cpuid, new);
562 
563 	intr_restore(sie);
564 }
565 
566  /*
567   * update the max/min priority for an interrupt src,
568   * and enforce the updated priority to plic.
569   * this should be called whenever a new handler is attached.
570   */
571 void
572 plic_calc_mask(void)
573 {
574 	struct cpu_info		*ci = curcpu();
575 	struct plic_softc	*sc = plic;
576 	struct plic_intrhand	*ih;
577 	int			irq;
578 
579 	/* PLIC irq 0 is reserved, thus we start from 1 */
580 	for (irq = 1; irq <= sc->sc_ndev; irq++) {
581 		int max = IPL_NONE;
582 		int min = IPL_HIGH;
583 		TAILQ_FOREACH(ih, &sc->sc_isrcs[irq].is_list, ih_list) {
584 			if (ih->ih_ipl > max)
585 				max = ih->ih_ipl;
586 
587 			if (ih->ih_ipl < min)
588 				min = ih->ih_ipl;
589 		}
590 
591 		if (max == IPL_NONE)
592 			min = IPL_NONE;
593 
594 		if (sc->sc_isrcs[irq].is_irq_max == max &&
595 		    sc->sc_isrcs[irq].is_irq_min == min)
596 			continue;
597 
598 		sc->sc_isrcs[irq].is_irq_max = max;
599 		sc->sc_isrcs[irq].is_irq_min = min;
600 
601 		/* Enable interrupts at lower levels, clear -> enable */
602 		/* Set interrupt priority/enable */
603 		if (min != IPL_NONE) {
604 			plic_intr_enable_with_pri(irq, min, ci->ci_cpuid);
605 		} else {
606 			plic_intr_disable(irq, ci->ci_cpuid);
607 		}
608 	}
609 
610 	plic_setipl(ci->ci_cpl);
611 }
612 
613 /***************** helper functions *****************/
614 
615 /*
616  * OpenBSD saves cpu node info in ci struct, so we can search
617  * cpuid by node matching
618  */
619 int
620 plic_get_cpuid(int intc)
621 {
622 	uint32_t hart;
623 	int parent_node;
624 	struct cpu_info *ci;
625 	CPU_INFO_ITERATOR cii;
626 
627 	/* Check the interrupt controller layout. */
628 	if (OF_getpropintarray(intc, "#interrupt-cells", &hart,
629 	    sizeof(hart)) < 0) {
630 		printf(": could not find #interrupt-cells for phandle %u\n", intc);
631 		return (-1);
632 	}
633 
634 	/*
635 	 * The parent of the interrupt-controller is the CPU we are
636 	 * interested in, so search for its OF node index.
637 	 */
638 	parent_node = OF_parent(intc);
639 	CPU_INFO_FOREACH(cii, ci) {
640 		if (ci->ci_node == parent_node)
641 			return ci->ci_cpuid;
642 	}
643 	return -1;
644 }
645 
646 /* update priority for intr src 'irq' */
647 void
648 plic_set_priority(int irq, uint32_t pri)
649 {
650 	struct plic_softc	*sc = plic;
651 	uint32_t		prival;
652 
653 	/*
654 	 * sifive plic only has 0 - 7 priority levels, yet OpenBSD defines
655 	 * 0 - 12 priority levels(level 1 - 4 are for SOFT*, level 12
656 	 * is for IPI. They should NEVER be passed to plic.
657 	 * So we calculate plic priority in the following way:
658 	 */
659 	if (pri <= 4 || pri >= 12)//invalid input
660 		prival = 0;//effectively disable this intr source
661 	else
662 		prival = pri - 4;
663 
664 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
665 			PLIC_PRIORITY(irq), prival);
666 }
667 
668 /* update threshold for 'cpu' */
669 void
670 plic_set_threshold(int cpu, uint32_t threshold)
671 {
672 	struct plic_softc	*sc = plic;
673 	uint32_t		prival;
674 
675 	if (threshold < 4) // enable everything (as far as plic is concerned)
676 		prival = 0;
677 	else if (threshold >= 12) // invalid priority level ?
678 		prival = IPL_HIGH - 4; // XXX Device-specific high threshold
679 	else // everything else
680 		prival = threshold - 4; // XXX Device-specific threshold offset
681 
682 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
683 			PLIC_THRESHOLD(sc, cpu), prival);
684 }
685 
686 /*
687  * turns on/off the route from intr source 'irq'
688  * to context 'ci' based on 'enable'
689  */
690 void
691 plic_intr_route_grid(int irq, int enable, int cpu)
692 {
693 	struct plic_softc	*sc = plic;
694 	uint32_t		val, mask;
695 
696 	if (irq == 0)
697 		return;
698 
699 	KASSERT(cpu < MAXCPUS);
700 
701 	mask = (1 << (irq % 32));
702 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
703 			PLIC_ENABLE(sc, irq, cpu));
704 	if (enable == IRQ_ENABLE)
705 		val |= mask;
706 	else
707 		val &= ~mask;
708 
709 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
710 			PLIC_ENABLE(sc, irq, cpu), val);
711 }
712 
713 /*
714  * Enable intr src 'irq' to cpu 'cpu' by setting:
715  * - priority
716  * - threshold
717  * - enable bit
718  */
719 void
720 plic_intr_enable_with_pri(int irq, uint32_t min_pri, int cpu)
721 {
722 	plic_set_priority(irq, min_pri);
723 	plic_set_threshold(cpu, min_pri-1);
724 	plic_intr_route_grid(irq, IRQ_ENABLE, cpu);
725 }
726 
727 void
728 plic_intr_disable(int irq, int cpu)
729 {
730 	plic_set_priority(irq, 0);
731 	plic_set_threshold(cpu, IPL_HIGH);
732 	plic_intr_route_grid(irq, IRQ_DISABLE, cpu);
733 }
734 /***************** end of helper functions *****************/
735