xref: /openbsd/sys/arch/riscv64/dev/plic.c (revision e4bb5015)
1 /*
2  * Copyright (c) 2020, Mars Li <mengshi.li.mars@gmail.com>
3  * Copyright (c) 2020, Brian Bamsch <bbamsch@google.com>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/queue.h>
21 #include <sys/malloc.h>
22 #include <sys/device.h>
23 #include <sys/evcount.h>
24 
25 #include <machine/bus.h>
26 #include <machine/fdt.h>
27 #include <machine/cpu.h>
28 #include "riscv64/dev/riscv_cpu_intc.h"
29 
30 #include <dev/ofw/openfirm.h>
31 #include <dev/ofw/fdt.h>
32 
33 /*
34  * This driver implements a version of the RISC-V PLIC with the actual layout
35  * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
36  *
37  *     https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
38  *
39  * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
40  * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
41  * Spec.
42  */
43 
44 #define	PLIC_MAX_IRQS		1024
45 
46 #define	PLIC_PRIORITY_BASE	0x000000U
47 
48 #define	PLIC_ENABLE_BASE	0x002000U
49 #define	PLIC_ENABLE_STRIDE	0x80U
50 #define	IRQ_ENABLE		1
51 #define	IRQ_DISABLE		0
52 
53 #define	PLIC_CONTEXT_BASE	0x200000U
54 #define	PLIC_CONTEXT_STRIDE	0x1000U
55 #define	PLIC_CONTEXT_THRESHOLD	0x0U
56 #define	PLIC_CONTEXT_CLAIM	0x4U
57 
58 #define	PLIC_PRIORITY(n)	(PLIC_PRIORITY_BASE + (n) * sizeof(uint32_t))
59 #define	PLIC_ENABLE(sc, n, h)						\
60     (sc->sc_contexts[h].enable_offset + ((n) / 32) * sizeof(uint32_t))
61 #define	PLIC_THRESHOLD(sc, h)						\
62     (sc->sc_contexts[h].context_offset + PLIC_CONTEXT_THRESHOLD)
63 #define	PLIC_CLAIM(sc, h)						\
64     (sc->sc_contexts[h].context_offset + PLIC_CONTEXT_CLAIM)
65 
66 
67 struct plic_intrhand {
68 	TAILQ_ENTRY(plic_intrhand) ih_list; /* link on intrq list */
69 	int (*ih_func)(void *);		/* handler */
70 	void *ih_arg;			/* arg for handler */
71 	int ih_ipl;			/* IPL_* */
72 	int ih_flags;
73 	int ih_irq;			/* IRQ number */
74 	struct evcount	ih_count;
75 	char *ih_name;
76 };
77 
78 /*
79  * One interrupt source could have multiple handler attached,
80  * each handler could have different priority level,
81  * we track the max and min priority level.
82  */
83 struct plic_irqsrc {
84 	TAILQ_HEAD(, plic_intrhand) is_list; /* handler list */
85 	int	is_irq_max;	/* IRQ to mask while handling */
86 	int	is_irq_min;	/* lowest IRQ when shared */
87 };
88 
89 struct plic_context {
90 	bus_size_t enable_offset;
91 	bus_size_t context_offset;
92 };
93 
94 struct plic_softc {
95 	struct device		sc_dev;
96 	int			sc_node;
97 	bus_space_tag_t		sc_iot;
98 	bus_space_handle_t	sc_ioh;
99 	struct plic_irqsrc	*sc_isrcs;
100 	struct plic_context	sc_contexts[MAXCPUS];
101 	int			sc_ndev;
102 	struct interrupt_controller	sc_intc;
103 };
104 struct plic_softc *plic = NULL;
105 
106 int	plic_match(struct device *, void *, void *);
107 void	plic_attach(struct device *, struct device *, void *);
108 int	plic_irq_handler(void *);
109 int	plic_irq_dispatch(uint32_t, void *);
110 void	*plic_intr_establish(int, int, int (*)(void *),
111 		void *, char *);
112 void	*plic_intr_establish_fdt(void *, int *, int, int (*)(void *),
113 		void *, char *);
114 void	plic_intr_disestablish(void *);
115 void	plic_intr_route(void *, int, struct cpu_info *);
116 
117 void	plic_splx(int);
118 int	plic_spllower(int);
119 int	plic_splraise(int);
120 void	plic_setipl(int);
121 void	plic_calc_mask(void);
122 
123 /* helper function */
124 int	plic_get_cpuid(int);
125 void	plic_set_priority(int, uint32_t);
126 void	plic_set_threshold(int, uint32_t);
127 void	plic_intr_route_grid(int, int, int);
128 void	plic_intr_enable_with_pri(int, uint32_t, int);
129 void	plic_intr_disable(int, int);
130 
131 
132 struct cfattach plic_ca = {
133 	sizeof(struct plic_softc), plic_match, plic_attach,
134 };
135 
136 struct cfdriver plic_cd = {
137 	NULL, "plic", DV_DULL
138 };
139 
140 int plic_attached = 0;
141 
142 int
143 plic_match(struct device *parent, void *cfdata, void *aux)
144 {
145 	struct fdt_attach_args *faa = aux;
146 
147 	if (plic_attached)
148 		return 0; // Only expect one instance of PLIC
149 
150 	return (OF_is_compatible(faa->fa_node, "riscv,plic0") ||
151 		OF_is_compatible(faa->fa_node, "sifive,plic-1.0.0"));
152 }
153 
154 void
155 plic_attach(struct device *parent, struct device *dev, void *aux)
156 {
157 	struct plic_softc *sc;
158 	struct fdt_attach_args *faa;
159 	uint32_t *cells;
160 	uint32_t irq;
161 	uint32_t cpu;
162 	int node;
163 	int len;
164 	int ncell;
165 	int context;
166 	int i;
167 	struct cpu_info *ci;
168 	CPU_INFO_ITERATOR cii;
169 
170 	if (plic_attached)
171 		return;
172 
173 	plic = sc = (struct plic_softc *)dev;
174 	faa = (struct fdt_attach_args *)aux;
175 
176 	if (faa->fa_nreg < 1)
177 		return;
178 
179 	sc->sc_node = node = faa->fa_node;
180 	sc->sc_iot = faa->fa_iot;
181 
182 	/* determine number of devices sending intr to this ic */
183 	sc->sc_ndev = OF_getpropint(faa->fa_node, "riscv,ndev", -1);
184 	if (sc->sc_ndev < 0) {
185 		printf(": unable to resolve number of devices\n");
186 		return;
187 	}
188 
189 	if (sc->sc_ndev >= PLIC_MAX_IRQS) {
190 		printf(": invalid ndev (%d)\n", sc->sc_ndev);
191 		return;
192 	}
193 
194 	/* map interrupt controller to va space */
195 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
196 	    faa->fa_reg[0].size, 0, &sc->sc_ioh))
197 		panic("%s: bus_space_map failed!", __func__);
198 
199 	sc->sc_isrcs = mallocarray(PLIC_MAX_IRQS, sizeof(struct plic_irqsrc),
200 			M_DEVBUF, M_ZERO | M_NOWAIT);
201 
202 	for (irq = 1; irq <= sc->sc_ndev; irq++) {
203 		TAILQ_INIT(&sc->sc_isrcs[irq].is_list);
204 		plic_set_priority(irq, 0);// Mask interrupt
205 	}
206 
207 	/*
208 	 * Calculate the per-cpu enable and context register offsets.
209 	 *
210 	 * This is tricky for a few reasons. The PLIC divides the interrupt
211 	 * enable, threshold, and claim bits by "context"
212 	 *
213 	 * The tricky part is that the PLIC spec imposes no restrictions on how
214 	 * these contexts are laid out. So for example, there is no guarantee
215 	 * that each CPU will have both a machine mode and supervisor context,
216 	 * or that different PLIC implementations will organize the context
217 	 * registers in the same way. On top of this, we must handle the fact
218 	 * that cpuid != hartid, as they may have been renumbered during boot.
219 	 * We perform the following steps:
220 	 *
221 	 * 1. Examine the PLIC's "interrupts-extended" property and skip any
222 	 *    entries that are not for supervisor external interrupts.
223 	 *
224 	 * 2. Walk up the device tree to find the corresponding CPU, using node
225 	 *    property to identify the cpuid.
226 	 *
227 	 * 3. Calculate the register offsets based on the context number.
228 	 */
229 	len = OF_getproplen(node, "interrupts-extended");
230 	if (len <= 0) {
231 		printf(": could not find interrupts-extended\n");
232 		return;
233 	}
234 
235 	cells = malloc(len, M_TEMP, M_WAITOK);
236 	ncell = len / sizeof(*cells);
237 	if (OF_getpropintarray(node, "interrupts-extended", cells, len) < 0) {
238 		printf(": failed to read interrupts-extended\n");
239 		free(cells, M_TEMP, len);
240 		return;
241 	}
242 
243 	for (i = 0, context = 0; i < ncell; i += 2, context++) {
244 		/* Skip M-mode external interrupts */
245 		if (cells[i + 1] != IRQ_EXTERNAL_SUPERVISOR)
246 			continue;
247 
248 		/* Get the corresponding cpuid. */
249 		cpu = plic_get_cpuid(OF_getnodebyphandle(cells[i]));
250 		if (cpu < 0) {
251 			printf(": invalid hart!\n");
252 			free(cells, M_TEMP, len);
253 			return;
254 		}
255 
256 		/*
257 		 * Set the enable and context register offsets for the CPU.
258 		 *
259 		 * We assume S-mode handler always comes later than M-mode
260 		 * handler, but this might be a little fragile.
261 		 *
262 		 * XXX
263 		 * sifive spec doesn't list hart0 S-mode enable/contexts
264 		 * in its memory map, but QEMU emulates hart0 S-mode
265 		 * enable/contexts? Otherwise the following offset calculation
266 		 * would point to hart1 M-mode enable/contexts.
267 		 */
268 		sc->sc_contexts[cpu].enable_offset = PLIC_ENABLE_BASE +
269 		    context * PLIC_ENABLE_STRIDE;
270 		sc->sc_contexts[cpu].context_offset = PLIC_CONTEXT_BASE +
271 		    context * PLIC_CONTEXT_STRIDE;
272 	}
273 
274 	free(cells, M_TEMP, len);
275 
276 	/* Set CPU interrupt priority thresholds to minimum */
277 	CPU_INFO_FOREACH(cii, ci) {
278 		plic_set_threshold(ci->ci_cpuid, 0);
279 	}
280 
281 	plic_setipl(IPL_HIGH);  /* XXX ??? */
282 	plic_calc_mask();
283 
284 	/*
285 	 * insert self into the external interrupt handler entry in
286 	 * global interrupt handler vector
287 	 */
288 	riscv_intc_intr_establish(IRQ_EXTERNAL_SUPERVISOR, 0,
289 			plic_irq_handler, NULL, "plic0");
290 
291 	/*
292 	 * From now on, spl update must be enforeced to plic, so
293 	 * spl* routine should be updated.
294 	 */
295 	riscv_set_intr_func(plic_splraise, plic_spllower,
296 			plic_splx, plic_setipl);
297 
298 	plic_attached = 1;
299 
300 	/* enable external interrupt */
301 	csr_set(sie, SIE_SEIE);
302 
303 	sc->sc_intc.ic_node = faa->fa_node;
304 	sc->sc_intc.ic_cookie = sc;
305 	sc->sc_intc.ic_establish = plic_intr_establish_fdt;
306 	sc->sc_intc.ic_disestablish = plic_intr_disestablish;
307 	sc->sc_intc.ic_route = plic_intr_route;
308 	// sc->sc_intc.ic_cpu_enable = XXX Per-CPU Initialization?
309 
310 	riscv_intr_register_fdt(&sc->sc_intc);
311 
312 	printf("\n");
313 }
314 
315 int
316 plic_irq_handler(void *frame)
317 {
318 	struct plic_softc* sc;
319 	uint32_t pending;
320 	uint32_t cpu;
321 	int handled = 0;
322 
323 	sc = plic;
324 	cpu = cpu_number();
325 
326 	pending = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
327 			PLIC_CLAIM(sc, cpu));
328 
329 	if (pending >= sc->sc_ndev) {
330 		printf("plic0: pending %x\n", pending);
331 		return 0;
332 	}
333 
334 	if (pending) {
335 		handled = plic_irq_dispatch(pending, frame);
336 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
337 				PLIC_CLAIM(sc, cpu), pending);
338 
339 //#define DEBUG_INTC
340 #ifdef DEBUG_INTC
341 		if (handled == 0) {
342 			printf("plic handled == 0 on pending %d\n", pending);
343 		}
344 #endif /* DEBUG_INTC */
345 	}
346 
347 	return handled;
348 }
349 
350 int
351 plic_irq_dispatch(uint32_t irq,	void *frame)
352 {
353 	int pri, s;
354 	int handled = 0;
355 	struct plic_softc* sc;
356 	struct plic_intrhand *ih;
357 	void *arg;
358 
359 #ifdef DEBUG_INTC
360 	printf("plic irq %d fired\n", irq);
361 #endif
362 
363 	sc = plic;
364 	pri = sc->sc_isrcs[irq].is_irq_max;
365 	s = plic_splraise(pri);
366 	TAILQ_FOREACH(ih, &sc->sc_isrcs[irq].is_list, ih_list) {
367 #ifdef MULTIPROCESSOR
368 		int need_lock;
369 
370 		if (ih->ih_flags & IPL_MPSAFE)
371 			need_lock = 0;
372 		else
373 			need_lock = s < IPL_SCHED;
374 
375 		if (need_lock)
376 			KERNEL_LOCK();
377 #endif
378 
379 		if (ih->ih_arg != 0)
380 			arg = ih->ih_arg;
381 		else
382 			arg = frame;
383 
384 // comment for now, ?!
385 //		enable_interrupts();	//XXX allow preemption?
386 		handled = ih->ih_func(arg);
387 //		disable_interrupts();
388 		if (handled)
389 			ih->ih_count.ec_count++;
390 
391 #ifdef MULTIPROCESSOR
392 		if (need_lock)
393 			KERNEL_UNLOCK();
394 #endif
395 	}
396 
397 	plic_splx(s);
398 	return handled;
399 }
400 
401 void *
402 plic_intr_establish(int irqno, int level, int (*func)(void *),
403     void *arg, char *name)
404 {
405 	struct plic_softc *sc = plic;
406 	struct plic_intrhand *ih;
407 	int sie;
408 
409 	if (irqno < 0 || irqno >= PLIC_MAX_IRQS)
410 		panic("plic_intr_establish: bogus irqnumber %d: %s",
411 		    irqno, name);
412 	sie = disable_interrupts();
413 
414 	ih = malloc(sizeof *ih, M_DEVBUF, M_WAITOK);
415 	ih->ih_func = func;
416 	ih->ih_arg = arg;
417 	ih->ih_ipl = level & IPL_IRQMASK;
418 	ih->ih_flags = level & IPL_FLAGMASK;
419 	ih->ih_irq = irqno;
420 	ih->ih_name = name;
421 
422 	TAILQ_INSERT_TAIL(&sc->sc_isrcs[irqno].is_list, ih, ih_list);
423 
424 	if (name != NULL)
425 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
426 
427 #ifdef DEBUG_INTC
428 	printf("%s irq %d level %d [%s]\n", __func__, irqno, level,
429 	    name);
430 #endif
431 
432 	plic_calc_mask();
433 
434 	restore_interrupts(sie);
435 	return (ih);
436 }
437 
438 void *
439 plic_intr_establish_fdt(void *cookie, int *cell, int level,
440     int (*func)(void *), void *arg, char *name)
441 {
442 	return plic_intr_establish(cell[0], level, func, arg, name);
443 }
444 
445 void
446 plic_intr_disestablish(void *cookie)
447 {
448 	struct plic_softc *sc = plic;
449 	struct plic_intrhand *ih = cookie;
450 	int irqno = ih->ih_irq;
451 	int sie;
452 
453 	sie = disable_interrupts();
454 	TAILQ_REMOVE(&sc->sc_isrcs[irqno].is_list, ih, ih_list);
455 	if (ih->ih_name != NULL)
456 		evcount_detach(&ih->ih_count);
457 	free(ih, M_DEVBUF, 0);
458 	restore_interrupts(sie);
459 }
460 
461 void
462 plic_intr_route(void *cookie, int enable, struct cpu_info *ci)
463 {
464 	struct plic_softc	*sc = plic;
465 	struct plic_intrhand	*ih = cookie;
466 
467 	int		irq = ih->ih_irq;
468 	int		cpu = ci->ci_cpuid;
469 	uint32_t	min_pri = sc->sc_isrcs[irq].is_irq_min;
470 
471 	if (enable == IRQ_ENABLE) {
472 		plic_intr_enable_with_pri(irq, min_pri, cpu);
473 	} else {
474 		plic_intr_route_grid(irq, IRQ_DISABLE, cpu);
475 	}
476 }
477 
478 void
479 plic_splx(int new)
480 {
481 	/* XXX
482 	 * how to do pending external interrupt ?
483 	 * After set the new threshold, if there is any pending
484 	 * external interrupts whose priority is now greater than the
485 	 * threshold, they will get passed through plic to cpu,
486 	 * trigger a new claim/complete cycle.
487 	 * So there is no need to handle pending external intr here.
488 	 *
489 	 */
490 	struct cpu_info *ci = curcpu();
491 
492 	/* Pending software intr is handled here */
493 	if (ci->ci_ipending & riscv_smask[new])
494 		riscv_do_pending_intr(new);
495 
496 	plic_setipl(new);
497 }
498 
499 int
500 plic_spllower(int new)
501 {
502 	struct cpu_info *ci = curcpu();
503 	int old = ci->ci_cpl;
504 	plic_splx(new);
505 	return (old);
506 }
507 
508 int
509 plic_splraise(int new)
510 {
511 	struct cpu_info *ci = curcpu();
512 	int old;
513 	old = ci->ci_cpl;
514 
515 	/*
516 	 * setipl must always be called because there is a race window
517 	 * where the variable is updated before the mask is set
518 	 * an interrupt occurs in that window without the mask always
519 	 * being set, the hardware might not get updated on the next
520 	 * splraise completely messing up spl protection.
521 	 */
522 	if (old > new)
523 		new = old;
524 
525 	plic_setipl(new);
526 
527 	return (old);
528 }
529 
530 void
531 plic_setipl(int new)
532 {
533 	struct cpu_info		*ci = curcpu();
534 	uint64_t sie;
535 
536 	/* disable here is only to keep hardware in sync with ci->ci_cpl */
537 	sie = disable_interrupts();
538 	ci->ci_cpl = new;
539 
540 	/* higher values are higher priority */
541 	plic_set_threshold(ci->ci_cpuid, new);
542 
543 	restore_interrupts(sie);
544 }
545 
546  /*
547   * update the max/min priority for an interrupt src,
548   * and enforce the updated priority to plic.
549   * this should be called whenever a new handler is attached.
550   */
551 void
552 plic_calc_mask(void)
553 {
554 	struct cpu_info		*ci = curcpu();
555 	struct plic_softc	*sc = plic;
556 	struct plic_intrhand	*ih;
557 	int			irq;
558 
559 	/* PLIC irq 0 is reserved, thus we start from 1 */
560 	for (irq = 1; irq <= sc->sc_ndev; irq++) {
561 		int max = IPL_NONE;
562 		int min = IPL_HIGH;
563 		TAILQ_FOREACH(ih, &sc->sc_isrcs[irq].is_list, ih_list) {
564 			if (ih->ih_ipl > max)
565 				max = ih->ih_ipl;
566 
567 			if (ih->ih_ipl < min)
568 				min = ih->ih_ipl;
569 		}
570 
571 		if (max == IPL_NONE)
572 			min = IPL_NONE;
573 
574 		if (sc->sc_isrcs[irq].is_irq_max == max &&
575 		    sc->sc_isrcs[irq].is_irq_min == min)
576 			continue;
577 
578 		sc->sc_isrcs[irq].is_irq_max = max;
579 		sc->sc_isrcs[irq].is_irq_min = min;
580 
581 		/* Enable interrupts at lower levels, clear -> enable */
582 		/* Set interrupt priority/enable */
583 		if (min != IPL_NONE) {
584 			plic_intr_enable_with_pri(irq, min, ci->ci_cpuid);
585 		} else {
586 			plic_intr_disable(irq, ci->ci_cpuid);
587 		}
588 	}
589 
590 	plic_setipl(ci->ci_cpl);
591 }
592 
593 /***************** helper functions *****************/
594 
595 /*
596  * OpenBSD saves cpu node info in ci struct, so we can search
597  * cpuid by node matching
598  */
599 int
600 plic_get_cpuid(int intc)
601 {
602 	uint32_t hart;
603 	int parent_node;
604 	struct cpu_info *ci;
605 	CPU_INFO_ITERATOR cii;
606 
607 	/* Check the interrupt controller layout. */
608 	if (OF_getpropintarray(intc, "#interrupt-cells", &hart,
609 	    sizeof(hart)) < 0) {
610 		printf(": could not find #interrupt-cells for phandle %u\n", intc);
611 		return (-1);
612 	}
613 
614 	/*
615 	 * The parent of the interrupt-controller is the CPU we are
616 	 * interested in, so search for its OF node index.
617 	 */
618 	parent_node = OF_parent(intc);
619 	CPU_INFO_FOREACH(cii, ci) {
620 		if (ci->ci_node == parent_node)
621 			return ci->ci_cpuid;
622 	}
623 	return -1;
624 }
625 
626 /* update priority for intr src 'irq' */
627 void
628 plic_set_priority(int irq, uint32_t pri)
629 {
630 	struct plic_softc	*sc = plic;
631 	uint32_t		prival;
632 
633 	/*
634 	 * sifive plic only has 0 - 7 priority levels, yet OpenBSD defines
635 	 * 0 - 12 priority levels(level 1 - 4 are for SOFT*, level 12
636 	 * is for IPI. They should NEVER be passed to plic.
637 	 * So we calculate plic priority in the following way:
638 	 */
639 	if (pri <= 4 || pri >= 12)//invalid input
640 		prival = 0;//effectively disable this intr source
641 	else
642 		prival = pri - 4;
643 
644 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
645 			PLIC_PRIORITY(irq), prival);
646 }
647 
648 /* update threshold for 'cpu' */
649 void
650 plic_set_threshold(int cpu, uint32_t threshold)
651 {
652 	struct plic_softc	*sc = plic;
653 	uint32_t		prival;
654 
655 	if (threshold < 4) // enable everything (as far as plic is concerned)
656 		prival = 0;
657 	else if (threshold >= 12) // invalid priority level ?
658 		prival = IPL_HIGH - 4; // XXX Device-specific high threshold
659 	else // everything else
660 		prival = threshold - 4; // XXX Device-specific threshold offset
661 
662 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
663 			PLIC_THRESHOLD(sc, cpu), prival);
664 }
665 
666 /*
667  * turns on/off the route from intr source 'irq'
668  * to context 'ci' based on 'enable'
669  */
670 void
671 plic_intr_route_grid(int irq, int enable, int cpu)
672 {
673 	struct plic_softc	*sc = plic;
674 	uint32_t		val, mask;
675 
676 	if (irq == 0)
677 		return;
678 
679 	KASSERT(cpu < MAXCPUS);
680 
681 	mask = (1 << (irq % 32));
682 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
683 			PLIC_ENABLE(sc, irq, cpu));
684 	if (enable == IRQ_ENABLE)
685 		val |= mask;
686 	else
687 		val &= ~mask;
688 
689 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
690 			PLIC_ENABLE(sc, irq, cpu), val);
691 }
692 
693 /*
694  * Enable intr src 'irq' to cpu 'cpu' by setting:
695  * - priority
696  * - threshold
697  * - enable bit
698  */
699 void
700 plic_intr_enable_with_pri(int irq, uint32_t min_pri, int cpu)
701 {
702 	plic_set_priority(irq, min_pri);
703 	plic_set_threshold(cpu, min_pri-1);
704 	plic_intr_route_grid(irq, IRQ_ENABLE, cpu);
705 }
706 
707 void
708 plic_intr_disable(int irq, int cpu)
709 {
710 	plic_set_priority(irq, 0);
711 	plic_set_threshold(cpu, IPL_HIGH);
712 	plic_intr_route_grid(irq, IRQ_DISABLE, cpu);
713 }
714 /***************** end of helper functions *****************/
715