xref: /openbsd/sys/arch/riscv64/dev/plic.c (revision 9593dc34)
1 /*	$OpenBSD: plic.c,v 1.13 2024/09/04 07:54:51 mglocker Exp $	*/
2 
3 /*
4  * Copyright (c) 2020, Mars Li <mengshi.li.mars@gmail.com>
5  * Copyright (c) 2020, Brian Bamsch <bbamsch@google.com>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/queue.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/evcount.h>
26 
27 #include <machine/bus.h>
28 #include <machine/fdt.h>
29 #include <machine/cpu.h>
30 #include <machine/sbi.h>
31 #include "riscv64/dev/riscv_cpu_intc.h"
32 
33 #include <dev/ofw/openfirm.h>
34 #include <dev/ofw/fdt.h>
35 
36 /*
37  * This driver implements a version of the RISC-V PLIC with the actual layout
38  * specified in chapter 8 of the SiFive U5 Coreplex Series Manual:
39  *
40  *     https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf
41  *
42  * The largest number supported by devices marked as 'sifive,plic-1.0.0', is
43  * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged
44  * Spec.
45  */
46 
47 #define	PLIC_MAX_IRQS		1024
48 
49 #define	PLIC_PRIORITY_BASE	0x000000U
50 
51 #define	PLIC_ENABLE_BASE	0x002000U
52 #define	PLIC_ENABLE_STRIDE	0x80U
53 #define	IRQ_ENABLE		1
54 #define	IRQ_DISABLE		0
55 
56 #define	PLIC_CONTEXT_BASE	0x200000U
57 #define	PLIC_CONTEXT_STRIDE	0x1000U
58 #define	PLIC_CONTEXT_THRESHOLD	0x0U
59 #define	PLIC_CONTEXT_CLAIM	0x4U
60 
61 #define	PLIC_PRIORITY(n)	(PLIC_PRIORITY_BASE + (n) * sizeof(uint32_t))
62 #define	PLIC_ENABLE(sc, n, h)						\
63     (sc->sc_contexts[h].enable_offset + ((n) / 32) * sizeof(uint32_t))
64 #define	PLIC_THRESHOLD(sc, h)						\
65     (sc->sc_contexts[h].context_offset + PLIC_CONTEXT_THRESHOLD)
66 #define	PLIC_CLAIM(sc, h)						\
67     (sc->sc_contexts[h].context_offset + PLIC_CONTEXT_CLAIM)
68 
69 
70 struct plic_intrhand {
71 	TAILQ_ENTRY(plic_intrhand) ih_list; /* link on intrq list */
72 	int (*ih_func)(void *);		/* handler */
73 	void *ih_arg;			/* arg for handler */
74 	int ih_ipl;			/* IPL_* */
75 	int ih_flags;
76 	int ih_irq;			/* IRQ number */
77 	struct evcount	ih_count;
78 	char *ih_name;
79 	struct cpu_info *ih_ci;
80 };
81 
82 /*
83  * One interrupt source could have multiple handler attached,
84  * each handler could have different priority level,
85  * we track the max and min priority level.
86  */
87 struct plic_irqsrc {
88 	TAILQ_HEAD(, plic_intrhand) is_list; /* handler list */
89 	int	is_irq_max;	/* IRQ to mask while handling */
90 	int	is_irq_min;	/* lowest IRQ when shared */
91 };
92 
93 struct plic_context {
94 	bus_size_t enable_offset;
95 	bus_size_t context_offset;
96 };
97 
98 struct plic_softc {
99 	struct device		sc_dev;
100 	int			sc_node;
101 	bus_space_tag_t		sc_iot;
102 	bus_space_handle_t	sc_ioh;
103 	struct plic_irqsrc	*sc_isrcs;
104 	struct plic_context	sc_contexts[MAXCPUS];
105 	int			sc_ndev;
106 	struct interrupt_controller	sc_intc;
107 };
108 struct plic_softc *plic = NULL;
109 
110 int	plic_match(struct device *, void *, void *);
111 void	plic_attach(struct device *, struct device *, void *);
112 int	plic_irq_handler(void *);
113 int	plic_irq_dispatch(uint32_t, void *);
114 void	*plic_intr_establish(int, int, struct cpu_info *,
115 	    int (*)(void *), void *, char *);
116 void	*plic_intr_establish_fdt(void *, int *, int, struct cpu_info *,
117 	    int (*)(void *), void *, char *);
118 void	plic_intr_disestablish(void *);
119 void	plic_intr_route(void *, int, struct cpu_info *);
120 void	plic_intr_barrier(void *);
121 
122 void	plic_splx(int);
123 int	plic_spllower(int);
124 int	plic_splraise(int);
125 void	plic_setipl(int);
126 void	plic_calc_mask(void);
127 
128 /* helper function */
129 int	plic_get_cpuid(int);
130 void	plic_set_priority(int, uint32_t);
131 void	plic_set_threshold(int, uint32_t);
132 void	plic_intr_route_grid(int, int, int);
133 void	plic_intr_enable_with_pri(int, uint32_t, int);
134 void	plic_intr_disable(int, int);
135 
136 
137 const struct cfattach plic_ca = {
138 	sizeof(struct plic_softc), plic_match, plic_attach,
139 };
140 
141 struct cfdriver plic_cd = {
142 	NULL, "plic", DV_DULL
143 };
144 
145 int plic_attached = 0;
146 
147 int
plic_match(struct device * parent,void * cfdata,void * aux)148 plic_match(struct device *parent, void *cfdata, void *aux)
149 {
150 	struct fdt_attach_args *faa = aux;
151 
152 	if (plic_attached)
153 		return 0; // Only expect one instance of PLIC
154 
155 	return (OF_is_compatible(faa->fa_node, "riscv,plic0") ||
156 	    OF_is_compatible(faa->fa_node, "sifive,plic-1.0.0") ||
157 	    OF_is_compatible(faa->fa_node, "thead,c900-plic"));
158 }
159 
160 void
plic_attach(struct device * parent,struct device * dev,void * aux)161 plic_attach(struct device *parent, struct device *dev, void *aux)
162 {
163 	struct plic_softc *sc;
164 	struct fdt_attach_args *faa;
165 	uint32_t *cells;
166 	uint32_t irq;
167 	int cpu;
168 	int node;
169 	int len;
170 	int ncell;
171 	int context;
172 	int i;
173 	struct cpu_info *ci;
174 	CPU_INFO_ITERATOR cii;
175 
176 	if (plic_attached)
177 		return;
178 
179 	plic = sc = (struct plic_softc *)dev;
180 	faa = (struct fdt_attach_args *)aux;
181 
182 	if (faa->fa_nreg < 1)
183 		return;
184 
185 	sc->sc_node = node = faa->fa_node;
186 	sc->sc_iot = faa->fa_iot;
187 
188 	/* determine number of devices sending intr to this ic */
189 	sc->sc_ndev = OF_getpropint(faa->fa_node, "riscv,ndev", -1);
190 	if (sc->sc_ndev < 0) {
191 		printf(": unable to resolve number of devices\n");
192 		return;
193 	}
194 
195 	if (sc->sc_ndev >= PLIC_MAX_IRQS) {
196 		printf(": invalid ndev (%d)\n", sc->sc_ndev);
197 		return;
198 	}
199 
200 	/* map interrupt controller to va space */
201 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
202 	    faa->fa_reg[0].size, 0, &sc->sc_ioh))
203 		panic("%s: bus_space_map failed!", __func__);
204 
205 	sc->sc_isrcs = mallocarray(PLIC_MAX_IRQS, sizeof(struct plic_irqsrc),
206 			M_DEVBUF, M_ZERO | M_NOWAIT);
207 
208 	for (irq = 1; irq <= sc->sc_ndev; irq++) {
209 		TAILQ_INIT(&sc->sc_isrcs[irq].is_list);
210 		plic_set_priority(irq, 0);// Mask interrupt
211 	}
212 
213 	/*
214 	 * Calculate the per-cpu enable and context register offsets.
215 	 *
216 	 * This is tricky for a few reasons. The PLIC divides the interrupt
217 	 * enable, threshold, and claim bits by "context"
218 	 *
219 	 * The tricky part is that the PLIC spec imposes no restrictions on how
220 	 * these contexts are laid out. So for example, there is no guarantee
221 	 * that each CPU will have both a machine mode and supervisor context,
222 	 * or that different PLIC implementations will organize the context
223 	 * registers in the same way. On top of this, we must handle the fact
224 	 * that cpuid != hartid, as they may have been renumbered during boot.
225 	 * We perform the following steps:
226 	 *
227 	 * 1. Examine the PLIC's "interrupts-extended" property and skip any
228 	 *    entries that are not for supervisor external interrupts.
229 	 *
230 	 * 2. Walk up the device tree to find the corresponding CPU, using node
231 	 *    property to identify the cpuid.
232 	 *
233 	 * 3. Calculate the register offsets based on the context number.
234 	 */
235 	len = OF_getproplen(node, "interrupts-extended");
236 	if (len <= 0) {
237 		printf(": could not find interrupts-extended\n");
238 		return;
239 	}
240 
241 	cells = malloc(len, M_TEMP, M_WAITOK);
242 	ncell = len / sizeof(*cells);
243 	if (OF_getpropintarray(node, "interrupts-extended", cells, len) < 0) {
244 		printf(": failed to read interrupts-extended\n");
245 		free(cells, M_TEMP, len);
246 		return;
247 	}
248 
249 	for (i = 0, context = 0; i < ncell; i += 2, context++) {
250 		/* Skip M-mode external interrupts */
251 		if (cells[i + 1] != IRQ_EXTERNAL_SUPERVISOR)
252 			continue;
253 
254 		/* Get the corresponding cpuid. */
255 		cpu = plic_get_cpuid(OF_getnodebyphandle(cells[i]));
256 		if (cpu < 0)
257 			continue;
258 
259 		/*
260 		 * Set the enable and context register offsets for the CPU.
261 		 *
262 		 * We assume S-mode handler always comes later than M-mode
263 		 * handler, but this might be a little fragile.
264 		 *
265 		 * XXX
266 		 * sifive spec doesn't list hart0 S-mode enable/contexts
267 		 * in its memory map, but QEMU emulates hart0 S-mode
268 		 * enable/contexts? Otherwise the following offset calculation
269 		 * would point to hart1 M-mode enable/contexts.
270 		 */
271 		sc->sc_contexts[cpu].enable_offset = PLIC_ENABLE_BASE +
272 		    context * PLIC_ENABLE_STRIDE;
273 		sc->sc_contexts[cpu].context_offset = PLIC_CONTEXT_BASE +
274 		    context * PLIC_CONTEXT_STRIDE;
275 	}
276 
277 	free(cells, M_TEMP, len);
278 
279 	/* Set CPU interrupt priority thresholds to minimum */
280 	CPU_INFO_FOREACH(cii, ci) {
281 		plic_set_threshold(ci->ci_cpuid, 0);
282 	}
283 
284 	plic_setipl(IPL_HIGH);  /* XXX ??? */
285 	plic_calc_mask();
286 
287 	/*
288 	 * insert self into the external interrupt handler entry in
289 	 * global interrupt handler vector
290 	 */
291 	riscv_intc_intr_establish(IRQ_EXTERNAL_SUPERVISOR, 0,
292 			plic_irq_handler, NULL, "plic0");
293 
294 	/*
295 	 * From now on, spl update must be enforced to plic, so
296 	 * spl* routine should be updated.
297 	 */
298 	riscv_set_intr_func(plic_splraise, plic_spllower,
299 			plic_splx, plic_setipl);
300 
301 	plic_attached = 1;
302 
303 	/* enable external interrupt */
304 	csr_set(sie, SIE_SEIE);
305 
306 	sc->sc_intc.ic_node = faa->fa_node;
307 	sc->sc_intc.ic_cookie = sc;
308 	sc->sc_intc.ic_establish = plic_intr_establish_fdt;
309 	sc->sc_intc.ic_disestablish = plic_intr_disestablish;
310 	sc->sc_intc.ic_route = plic_intr_route;
311 	// sc->sc_intc.ic_cpu_enable = XXX Per-CPU Initialization?
312 	sc->sc_intc.ic_barrier = plic_intr_barrier;
313 
314 	riscv_intr_register_fdt(&sc->sc_intc);
315 
316 	printf("\n");
317 }
318 
319 int
plic_irq_handler(void * frame)320 plic_irq_handler(void *frame)
321 {
322 	struct plic_softc* sc;
323 	uint32_t pending;
324 	uint32_t cpu;
325 	int handled = 0;
326 
327 	sc = plic;
328 	cpu = cpu_number();
329 
330 	pending = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
331 			PLIC_CLAIM(sc, cpu));
332 
333 	if (pending >= sc->sc_ndev) {
334 		printf("plic0: pending %x\n", pending);
335 		return 0;
336 	}
337 
338 	if (pending) {
339 		handled = plic_irq_dispatch(pending, frame);
340 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
341 				PLIC_CLAIM(sc, cpu), pending);
342 
343 //#define DEBUG_INTC
344 #ifdef DEBUG_INTC
345 		if (handled == 0) {
346 			printf("plic handled == 0 on pending %d\n", pending);
347 		}
348 #endif /* DEBUG_INTC */
349 	}
350 
351 	return handled;
352 }
353 
354 int
plic_irq_dispatch(uint32_t irq,void * frame)355 plic_irq_dispatch(uint32_t irq,	void *frame)
356 {
357 	int pri, s;
358 	int handled = 0;
359 	struct plic_softc* sc;
360 	struct plic_intrhand *ih;
361 	void *arg;
362 
363 #ifdef DEBUG_INTC
364 	printf("plic irq %d fired\n", irq);
365 #endif
366 
367 	sc = plic;
368 	pri = sc->sc_isrcs[irq].is_irq_max;
369 	s = plic_splraise(pri);
370 	TAILQ_FOREACH(ih, &sc->sc_isrcs[irq].is_list, ih_list) {
371 #ifdef MULTIPROCESSOR
372 		int need_lock;
373 
374 		if (ih->ih_flags & IPL_MPSAFE)
375 			need_lock = 0;
376 		else
377 			need_lock = s < IPL_SCHED;
378 
379 		if (need_lock)
380 			KERNEL_LOCK();
381 #endif
382 
383 		if (ih->ih_arg)
384 			arg = ih->ih_arg;
385 		else
386 			arg = frame;
387 
388 		intr_enable();
389 		handled = ih->ih_func(arg);
390 		intr_disable();
391 		if (handled)
392 			ih->ih_count.ec_count++;
393 
394 #ifdef MULTIPROCESSOR
395 		if (need_lock)
396 			KERNEL_UNLOCK();
397 #endif
398 	}
399 
400 	plic_splx(s);
401 	return handled;
402 }
403 
404 void *
plic_intr_establish(int irqno,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)405 plic_intr_establish(int irqno, int level, struct cpu_info *ci,
406     int (*func)(void *), void *arg, char *name)
407 {
408 	struct plic_softc *sc = plic;
409 	struct plic_intrhand *ih;
410 	u_long sie;
411 
412 	if (irqno < 0 || irqno >= PLIC_MAX_IRQS)
413 		panic("plic_intr_establish: bogus irqnumber %d: %s",
414 		    irqno, name);
415 
416 	if (ci == NULL)
417 		ci = &cpu_info_primary;
418 
419 	ih = malloc(sizeof *ih, M_DEVBUF, M_WAITOK);
420 	ih->ih_func = func;
421 	ih->ih_arg = arg;
422 	ih->ih_ipl = level & IPL_IRQMASK;
423 	ih->ih_flags = level & IPL_FLAGMASK;
424 	ih->ih_irq = irqno;
425 	ih->ih_name = name;
426 	ih->ih_ci = ci;
427 
428 	sie = intr_disable();
429 
430 	TAILQ_INSERT_TAIL(&sc->sc_isrcs[irqno].is_list, ih, ih_list);
431 
432 	if (name != NULL)
433 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
434 
435 #ifdef DEBUG_INTC
436 	printf("%s irq %d level %d [%s]\n", __func__, irqno, level,
437 	    name);
438 #endif
439 
440 	plic_calc_mask();
441 
442 	intr_restore(sie);
443 	return (ih);
444 }
445 
446 void *
plic_intr_establish_fdt(void * cookie,int * cell,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)447 plic_intr_establish_fdt(void *cookie, int *cell, int level,
448     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
449 {
450 	return plic_intr_establish(cell[0], level, ci, func, arg, name);
451 }
452 
453 void
plic_intr_disestablish(void * cookie)454 plic_intr_disestablish(void *cookie)
455 {
456 	struct plic_softc *sc = plic;
457 	struct plic_intrhand *ih = cookie;
458 	int irqno = ih->ih_irq;
459 	u_long sie;
460 
461 	sie = intr_disable();
462 
463 	TAILQ_REMOVE(&sc->sc_isrcs[irqno].is_list, ih, ih_list);
464 	if (ih->ih_name != NULL)
465 		evcount_detach(&ih->ih_count);
466 
467 	intr_restore(sie);
468 
469 	free(ih, M_DEVBUF, 0);
470 }
471 
472 void
plic_intr_route(void * cookie,int enable,struct cpu_info * ci)473 plic_intr_route(void *cookie, int enable, struct cpu_info *ci)
474 {
475 	struct plic_softc	*sc = plic;
476 	struct plic_intrhand	*ih = cookie;
477 
478 	int		irq = ih->ih_irq;
479 	int		cpu = ci->ci_cpuid;
480 	uint32_t	min_pri = sc->sc_isrcs[irq].is_irq_min;
481 
482 	if (enable == IRQ_ENABLE) {
483 		plic_intr_enable_with_pri(irq, min_pri, cpu);
484 	} else {
485 		plic_intr_route_grid(irq, IRQ_DISABLE, cpu);
486 	}
487 }
488 
489 void
plic_intr_barrier(void * cookie)490 plic_intr_barrier(void *cookie)
491 {
492 	struct plic_intrhand *ih = cookie;
493 
494 	sched_barrier(ih->ih_ci);
495 }
496 
497 void
plic_splx(int new)498 plic_splx(int new)
499 {
500 	/* XXX
501 	 * how to do pending external interrupt ?
502 	 * After set the new threshold, if there is any pending
503 	 * external interrupts whose priority is now greater than the
504 	 * threshold, they will get passed through plic to cpu,
505 	 * trigger a new claim/complete cycle.
506 	 * So there is no need to handle pending external intr here.
507 	 *
508 	 */
509 	struct cpu_info *ci = curcpu();
510 
511 	/* Pending software intr is handled here */
512 	if (ci->ci_ipending & riscv_smask[new])
513 		riscv_do_pending_intr(new);
514 
515 	plic_setipl(new);
516 }
517 
518 int
plic_spllower(int new)519 plic_spllower(int new)
520 {
521 	struct cpu_info *ci = curcpu();
522 	int old = ci->ci_cpl;
523 	plic_splx(new);
524 	return (old);
525 }
526 
527 int
plic_splraise(int new)528 plic_splraise(int new)
529 {
530 	struct cpu_info *ci = curcpu();
531 	int old;
532 	old = ci->ci_cpl;
533 
534 	/*
535 	 * setipl must always be called because there is a race window
536 	 * where the variable is updated before the mask is set
537 	 * an interrupt occurs in that window without the mask always
538 	 * being set, the hardware might not get updated on the next
539 	 * splraise completely messing up spl protection.
540 	 */
541 	if (old > new)
542 		new = old;
543 
544 	plic_setipl(new);
545 
546 	return (old);
547 }
548 
549 void
plic_setipl(int new)550 plic_setipl(int new)
551 {
552 	struct cpu_info		*ci = curcpu();
553 	u_long sie;
554 
555 	/* disable here is only to keep hardware in sync with ci->ci_cpl */
556 	sie = intr_disable();
557 	ci->ci_cpl = new;
558 
559 	/* higher values are higher priority */
560 	plic_set_threshold(ci->ci_cpuid, new);
561 
562 	/* trigger deferred timer interrupt if cpl is now low enough */
563 	if (ci->ci_timer_deferred && new < IPL_CLOCK)
564 		sbi_set_timer(0);
565 
566 	intr_restore(sie);
567 }
568 
569  /*
570   * update the max/min priority for an interrupt src,
571   * and enforce the updated priority to plic.
572   * this should be called whenever a new handler is attached.
573   */
574 void
plic_calc_mask(void)575 plic_calc_mask(void)
576 {
577 	struct cpu_info		*ci = curcpu();
578 	struct plic_softc	*sc = plic;
579 	struct plic_intrhand	*ih;
580 	int			irq;
581 
582 	/* PLIC irq 0 is reserved, thus we start from 1 */
583 	for (irq = 1; irq <= sc->sc_ndev; irq++) {
584 		int max = IPL_NONE;
585 		int min = IPL_HIGH;
586 		TAILQ_FOREACH(ih, &sc->sc_isrcs[irq].is_list, ih_list) {
587 			if (ih->ih_ipl > max)
588 				max = ih->ih_ipl;
589 
590 			if (ih->ih_ipl < min)
591 				min = ih->ih_ipl;
592 		}
593 
594 		if (max == IPL_NONE)
595 			min = IPL_NONE;
596 
597 		if (sc->sc_isrcs[irq].is_irq_max == max &&
598 		    sc->sc_isrcs[irq].is_irq_min == min)
599 			continue;
600 
601 		sc->sc_isrcs[irq].is_irq_max = max;
602 		sc->sc_isrcs[irq].is_irq_min = min;
603 
604 		/* Enable interrupts at lower levels, clear -> enable */
605 		/* Set interrupt priority/enable */
606 		if (min != IPL_NONE) {
607 			plic_intr_enable_with_pri(irq, min, ci->ci_cpuid);
608 		} else {
609 			plic_intr_disable(irq, ci->ci_cpuid);
610 		}
611 	}
612 
613 	plic_setipl(ci->ci_cpl);
614 }
615 
616 /***************** helper functions *****************/
617 
618 /*
619  * OpenBSD saves cpu node info in ci struct, so we can search
620  * cpuid by node matching
621  */
622 int
plic_get_cpuid(int intc)623 plic_get_cpuid(int intc)
624 {
625 	uint32_t hart;
626 	int parent_node;
627 	struct cpu_info *ci;
628 	CPU_INFO_ITERATOR cii;
629 
630 	/* Check the interrupt controller layout. */
631 	if (OF_getpropintarray(intc, "#interrupt-cells", &hart,
632 	    sizeof(hart)) < 0) {
633 		printf(": could not find #interrupt-cells for phandle %u\n", intc);
634 		return (-1);
635 	}
636 
637 	/*
638 	 * The parent of the interrupt-controller is the CPU we are
639 	 * interested in, so search for its OF node index.
640 	 */
641 	parent_node = OF_parent(intc);
642 	CPU_INFO_FOREACH(cii, ci) {
643 		if (ci->ci_node == parent_node)
644 			return ci->ci_cpuid;
645 	}
646 	return -1;
647 }
648 
649 /* update priority for intr src 'irq' */
650 void
plic_set_priority(int irq,uint32_t pri)651 plic_set_priority(int irq, uint32_t pri)
652 {
653 	struct plic_softc	*sc = plic;
654 	uint32_t		prival;
655 
656 	/*
657 	 * sifive plic only has 0 - 7 priority levels, yet OpenBSD defines
658 	 * 0 - 12 priority levels(level 1 - 4 are for SOFT*, level 12
659 	 * is for IPI. They should NEVER be passed to plic.
660 	 * So we calculate plic priority in the following way:
661 	 */
662 	if (pri <= 4 || pri >= 12)//invalid input
663 		prival = 0;//effectively disable this intr source
664 	else
665 		prival = pri - 4;
666 
667 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
668 			PLIC_PRIORITY(irq), prival);
669 }
670 
671 /* update threshold for 'cpu' */
672 void
plic_set_threshold(int cpu,uint32_t threshold)673 plic_set_threshold(int cpu, uint32_t threshold)
674 {
675 	struct plic_softc	*sc = plic;
676 	uint32_t		prival;
677 
678 	if (threshold < 4) // enable everything (as far as plic is concerned)
679 		prival = 0;
680 	else if (threshold >= 12) // invalid priority level ?
681 		prival = IPL_HIGH - 4; // XXX Device-specific high threshold
682 	else // everything else
683 		prival = threshold - 4; // XXX Device-specific threshold offset
684 
685 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
686 			PLIC_THRESHOLD(sc, cpu), prival);
687 }
688 
689 /*
690  * turns on/off the route from intr source 'irq'
691  * to context 'ci' based on 'enable'
692  */
693 void
plic_intr_route_grid(int irq,int enable,int cpu)694 plic_intr_route_grid(int irq, int enable, int cpu)
695 {
696 	struct plic_softc	*sc = plic;
697 	uint32_t		val, mask;
698 
699 	if (irq == 0)
700 		return;
701 
702 	KASSERT(cpu < MAXCPUS);
703 
704 	mask = (1 << (irq % 32));
705 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
706 			PLIC_ENABLE(sc, irq, cpu));
707 	if (enable == IRQ_ENABLE)
708 		val |= mask;
709 	else
710 		val &= ~mask;
711 
712 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
713 			PLIC_ENABLE(sc, irq, cpu), val);
714 }
715 
716 /*
717  * Enable intr src 'irq' to cpu 'cpu' by setting:
718  * - priority
719  * - threshold
720  * - enable bit
721  */
722 void
plic_intr_enable_with_pri(int irq,uint32_t min_pri,int cpu)723 plic_intr_enable_with_pri(int irq, uint32_t min_pri, int cpu)
724 {
725 	plic_set_priority(irq, min_pri);
726 	plic_set_threshold(cpu, min_pri-1);
727 	plic_intr_route_grid(irq, IRQ_ENABLE, cpu);
728 }
729 
730 void
plic_intr_disable(int irq,int cpu)731 plic_intr_disable(int irq, int cpu)
732 {
733 	plic_set_priority(irq, 0);
734 	plic_set_threshold(cpu, IPL_HIGH);
735 	plic_intr_route_grid(irq, IRQ_DISABLE, cpu);
736 }
737 /***************** end of helper functions *****************/
738