1 /* $OpenBSD: ampintc.c,v 1.32 2023/09/22 01:10:43 jsg Exp $ */
2 /*
3 * Copyright (c) 2007,2009,2011 Dale Rahn <drahn@openbsd.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 /*
19 * This driver implements the interrupt controller as specified in
20 * DDI0407E_cortex_a9_mpcore_r2p0_trm with the
21 * IHI0048A_gic_architecture_spec_v1_0 underlying specification
22 */
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/queue.h>
26 #include <sys/malloc.h>
27 #include <sys/device.h>
28 #include <sys/evcount.h>
29
30 #include <uvm/uvm_extern.h>
31
32 #include <machine/bus.h>
33 #include <machine/fdt.h>
34
35 #include <arm/cpufunc.h>
36
37 #include <dev/ofw/fdt.h>
38 #include <dev/ofw/openfirm.h>
39
40 #include <machine/simplebusvar.h>
41
42 /* registers */
43 #define ICD_DCR 0x000
44 #define ICD_DCR_ES 0x00000001
45 #define ICD_DCR_ENS 0x00000002
46
47 #define ICD_ICTR 0x004
48 #define ICD_ICTR_LSPI_SH 11
49 #define ICD_ICTR_LSPI_M 0x1f
50 #define ICD_ICTR_CPU_SH 5
51 #define ICD_ICTR_CPU_M 0x07
52 #define ICD_ICTR_ITL_SH 0
53 #define ICD_ICTR_ITL_M 0x1f
54 #define ICD_IDIR 0x008
55 #define ICD_DIR_PROD_SH 24
56 #define ICD_DIR_PROD_M 0xff
57 #define ICD_DIR_REV_SH 12
58 #define ICD_DIR_REV_M 0xfff
59 #define ICD_DIR_IMP_SH 0
60 #define ICD_DIR_IMP_M 0xfff
61
62 #define IRQ_TO_REG32(i) (((i) >> 5) & 0x1f)
63 #define IRQ_TO_REG32BIT(i) ((i) & 0x1f)
64 #define IRQ_TO_REG4(i) (((i) >> 2) & 0xff)
65 #define IRQ_TO_REG4BIT(i) ((i) & 0x3)
66 #define IRQ_TO_REG16(i) (((i) >> 4) & 0x3f)
67 #define IRQ_TO_REG16BIT(i) ((i) & 0xf)
68 #define IRQ_TO_REGBIT_S(i) 8
69 #define IRQ_TO_REG4BIT_M(i) 8
70
71 #define ICD_ISRn(i) (0x080 + (IRQ_TO_REG32(i) * 4))
72 #define ICD_ISERn(i) (0x100 + (IRQ_TO_REG32(i) * 4))
73 #define ICD_ICERn(i) (0x180 + (IRQ_TO_REG32(i) * 4))
74 #define ICD_ISPRn(i) (0x200 + (IRQ_TO_REG32(i) * 4))
75 #define ICD_ICPRn(i) (0x280 + (IRQ_TO_REG32(i) * 4))
76 #define ICD_ABRn(i) (0x300 + (IRQ_TO_REG32(i) * 4))
77 #define ICD_IPRn(i) (0x400 + (i))
78 #define ICD_IPTRn(i) (0x800 + (i))
79 #define ICD_ICRn(i) (0xC00 + (IRQ_TO_REG16(i) * 4))
80 #define ICD_ICR_TRIG_LEVEL(i) (0x0 << (IRQ_TO_REG16BIT(i) * 2))
81 #define ICD_ICR_TRIG_EDGE(i) (0x2 << (IRQ_TO_REG16BIT(i) * 2))
82 #define ICD_ICR_TRIG_MASK(i) (0x2 << (IRQ_TO_REG16BIT(i) * 2))
83
84 /*
85 * what about (ppi|spi)_status
86 */
87 #define ICD_PPI 0xD00
88 #define ICD_PPI_GTIMER (1 << 11)
89 #define ICD_PPI_FIQ (1 << 12)
90 #define ICD_PPI_PTIMER (1 << 13)
91 #define ICD_PPI_PWDOG (1 << 14)
92 #define ICD_PPI_IRQ (1 << 15)
93 #define ICD_SPI_BASE 0xD04
94 #define ICD_SPIn(i) (ICD_SPI_BASE + ((i) * 4))
95
96
97 #define ICD_SGIR 0xF00
98
99 #define ICD_PERIPH_ID_0 0xFD0
100 #define ICD_PERIPH_ID_1 0xFD4
101 #define ICD_PERIPH_ID_2 0xFD8
102 #define ICD_PERIPH_ID_3 0xFDC
103 #define ICD_PERIPH_ID_4 0xFE0
104 #define ICD_PERIPH_ID_5 0xFE4
105 #define ICD_PERIPH_ID_6 0xFE8
106 #define ICD_PERIPH_ID_7 0xFEC
107
108 #define ICD_COMP_ID_0 0xFEC
109 #define ICD_COMP_ID_1 0xFEC
110 #define ICD_COMP_ID_2 0xFEC
111 #define ICD_COMP_ID_3 0xFEC
112
113
114 #define ICPICR 0x00
115 #define ICPIPMR 0x04
116 /* XXX - must left justify bits to 0 - 7 */
117 #define ICMIPMR_SH 4
118 #define ICPBPR 0x08
119 #define ICPIAR 0x0C
120 #define ICPIAR_IRQ_SH 0
121 #define ICPIAR_IRQ_M 0x3ff
122 #define ICPIAR_CPUID_SH 10
123 #define ICPIAR_CPUID_M 0x7
124 #define ICPIAR_NO_PENDING_IRQ ICPIAR_IRQ_M
125 #define ICPEOIR 0x10
126 #define ICPPRP 0x14
127 #define ICPHPIR 0x18
128 #define ICPIIR 0xFC
129
130 /*
131 * what about periph_id and component_id
132 */
133
134 #define IRQ_ENABLE 1
135 #define IRQ_DISABLE 0
136
137 struct ampintc_softc {
138 struct simplebus_softc sc_sbus;
139 struct intrq *sc_handler;
140 int sc_nintr;
141 bus_space_tag_t sc_iot;
142 bus_space_handle_t sc_d_ioh, sc_p_ioh;
143 uint8_t sc_cpu_mask[ICD_ICTR_CPU_M + 1];
144 struct evcount sc_spur;
145 struct interrupt_controller sc_ic;
146 int sc_ipi_reason[ICD_ICTR_CPU_M + 1];
147 int sc_ipi_num[2];
148 };
149 struct ampintc_softc *ampintc;
150
151
152 struct intrhand {
153 TAILQ_ENTRY(intrhand) ih_list; /* link on intrq list */
154 int (*ih_func)(void *); /* handler */
155 void *ih_arg; /* arg for handler */
156 int ih_ipl; /* IPL_* */
157 int ih_flags;
158 int ih_irq; /* IRQ number */
159 struct evcount ih_count;
160 char *ih_name;
161 };
162
163 struct intrq {
164 TAILQ_HEAD(, intrhand) iq_list; /* handler list */
165 int iq_irq_max; /* IRQ to mask while handling */
166 int iq_irq_min; /* lowest IRQ when shared */
167 int iq_ist; /* share type */
168 };
169
170
171 int ampintc_match(struct device *, void *, void *);
172 void ampintc_attach(struct device *, struct device *, void *);
173 void ampintc_cpuinit(void);
174 int ampintc_spllower(int);
175 void ampintc_splx(int);
176 int ampintc_splraise(int);
177 void ampintc_setipl(int);
178 void ampintc_calc_mask(void);
179 void *ampintc_intr_establish(int, int, int, struct cpu_info *,
180 int (*)(void *), void *, char *);
181 void *ampintc_intr_establish_ext(int, int, struct cpu_info *,
182 int (*)(void *), void *, char *);
183 void *ampintc_intr_establish_fdt(void *, int *, int,
184 struct cpu_info *, int (*)(void *), void *, char *);
185 void ampintc_intr_disestablish(void *);
186 void ampintc_irq_handler(void *);
187 const char *ampintc_intr_string(void *);
188 uint32_t ampintc_iack(void);
189 void ampintc_eoi(uint32_t);
190 void ampintc_set_priority(int, int);
191 void ampintc_intr_enable(int);
192 void ampintc_intr_disable(int);
193 void ampintc_intr_config(int, int);
194 void ampintc_route(int, int, struct cpu_info *);
195 void ampintc_route_irq(void *, int, struct cpu_info *);
196
197 int ampintc_ipi_combined(void *);
198 int ampintc_ipi_nop(void *);
199 int ampintc_ipi_ddb(void *);
200 void ampintc_send_ipi(struct cpu_info *, int);
201
202 const struct cfattach ampintc_ca = {
203 sizeof (struct ampintc_softc), ampintc_match, ampintc_attach
204 };
205
206 struct cfdriver ampintc_cd = {
207 NULL, "ampintc", DV_DULL
208 };
209
210 static char *ampintc_compatibles[] = {
211 "arm,cortex-a7-gic",
212 "arm,cortex-a9-gic",
213 "arm,cortex-a15-gic",
214 "arm,gic-400",
215 NULL
216 };
217
218 int
ampintc_match(struct device * parent,void * cfdata,void * aux)219 ampintc_match(struct device *parent, void *cfdata, void *aux)
220 {
221 struct fdt_attach_args *faa = aux;
222 int i;
223
224 for (i = 0; ampintc_compatibles[i]; i++)
225 if (OF_is_compatible(faa->fa_node, ampintc_compatibles[i]))
226 return (1);
227
228 return (0);
229 }
230
231 void
ampintc_attach(struct device * parent,struct device * self,void * aux)232 ampintc_attach(struct device *parent, struct device *self, void *aux)
233 {
234 struct ampintc_softc *sc = (struct ampintc_softc *)self;
235 struct fdt_attach_args *faa = aux;
236 int i, nintr, ncpu;
237 uint32_t ictr;
238 #ifdef MULTIPROCESSOR
239 int nipi, ipiirq[2];
240 #endif
241
242 ampintc = sc;
243
244 arm_init_smask();
245
246 sc->sc_iot = faa->fa_iot;
247
248 /* First row: ICD */
249 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
250 faa->fa_reg[0].size, 0, &sc->sc_d_ioh))
251 panic("%s: ICD bus_space_map failed!", __func__);
252
253 /* Second row: ICP */
254 if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
255 faa->fa_reg[1].size, 0, &sc->sc_p_ioh))
256 panic("%s: ICP bus_space_map failed!", __func__);
257
258 evcount_attach(&sc->sc_spur, "irq1023/spur", NULL);
259
260 ictr = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICTR);
261 nintr = 32 * ((ictr >> ICD_ICTR_ITL_SH) & ICD_ICTR_ITL_M);
262 nintr += 32; /* ICD_ICTR + 1, irq 0-31 is SGI, 32+ is PPI */
263 sc->sc_nintr = nintr;
264 ncpu = ((ictr >> ICD_ICTR_CPU_SH) & ICD_ICTR_CPU_M) + 1;
265 printf(" nirq %d, ncpu %d", nintr, ncpu);
266
267 KASSERT(curcpu()->ci_cpuid <= ICD_ICTR_CPU_M);
268 sc->sc_cpu_mask[curcpu()->ci_cpuid] =
269 bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(0));
270
271 /* Disable all interrupts, clear all pending */
272 for (i = 0; i < nintr/32; i++) {
273 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
274 ICD_ICERn(i*32), ~0);
275 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh,
276 ICD_ICPRn(i*32), ~0);
277 }
278 for (i = 0; i < nintr; i++) {
279 /* lowest priority ?? */
280 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i), 0xff);
281 /* target no cpus */
282 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(i), 0);
283 }
284 for (i = 2; i < nintr/16; i++) {
285 /* irq 32 - N */
286 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(i*16), 0);
287 }
288
289 /* software reset of the part? */
290 /* set protection bit (kernel only)? */
291
292 /* XXX - check power saving bit */
293
294 sc->sc_handler = mallocarray(nintr, sizeof(*sc->sc_handler), M_DEVBUF,
295 M_ZERO | M_NOWAIT);
296 for (i = 0; i < nintr; i++) {
297 TAILQ_INIT(&sc->sc_handler[i].iq_list);
298 }
299
300 ampintc_setipl(IPL_HIGH); /* XXX ??? */
301 ampintc_calc_mask();
302
303 /* insert self as interrupt handler */
304 arm_set_intr_handler(ampintc_splraise, ampintc_spllower, ampintc_splx,
305 ampintc_setipl, ampintc_intr_establish_ext,
306 ampintc_intr_disestablish, ampintc_intr_string, ampintc_irq_handler);
307
308 #ifdef MULTIPROCESSOR
309 /* setup IPI interrupts */
310
311 /*
312 * Ideally we want two IPI interrupts, one for NOP and one for
313 * DDB, however we can survive if only one is available it is
314 * possible that most are not available to the non-secure OS.
315 */
316 nipi = 0;
317 for (i = 0; i < 16; i++) {
318 int reg, oldreg;
319
320 oldreg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh,
321 ICD_IPRn(i));
322 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i),
323 oldreg ^ 0x20);
324
325 /* if this interrupt is not usable, route will be zero */
326 reg = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i));
327 if (reg == oldreg)
328 continue;
329
330 /* return to original value, will be set when used */
331 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(i),
332 oldreg);
333
334 if (nipi == 0)
335 printf(" ipi: %d", i);
336 else
337 printf(", %d", i);
338 ipiirq[nipi++] = i;
339 if (nipi == 2)
340 break;
341 }
342
343 if (nipi == 0)
344 panic ("no irq available for IPI");
345
346 switch (nipi) {
347 case 1:
348 ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
349 IPL_IPI|IPL_MPSAFE, ampintc_ipi_combined, sc, "ipi");
350 sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
351 sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[0];
352 break;
353 case 2:
354 ampintc_intr_establish(ipiirq[0], IST_EDGE_RISING,
355 IPL_IPI|IPL_MPSAFE, ampintc_ipi_nop, sc, "ipinop");
356 sc->sc_ipi_num[ARM_IPI_NOP] = ipiirq[0];
357 ampintc_intr_establish(ipiirq[1], IST_EDGE_RISING,
358 IPL_IPI|IPL_MPSAFE, ampintc_ipi_ddb, sc, "ipiddb");
359 sc->sc_ipi_num[ARM_IPI_DDB] = ipiirq[1];
360 break;
361 default:
362 panic("nipi unexpected number %d", nipi);
363 }
364
365 intr_send_ipi_func = ampintc_send_ipi;
366 #endif
367
368 /* enable interrupts */
369 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_DCR, 3);
370 bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
371 enable_interrupts(PSR_I);
372
373 sc->sc_ic.ic_node = faa->fa_node;
374 sc->sc_ic.ic_cookie = self;
375 sc->sc_ic.ic_establish = ampintc_intr_establish_fdt;
376 sc->sc_ic.ic_disestablish = ampintc_intr_disestablish;
377 sc->sc_ic.ic_route = ampintc_route_irq;
378 sc->sc_ic.ic_cpu_enable = ampintc_cpuinit;
379 arm_intr_register_fdt(&sc->sc_ic);
380
381 /* attach GICv2M frame controller */
382 simplebus_attach(parent, &sc->sc_sbus.sc_dev, faa);
383 }
384
385 void
ampintc_set_priority(int irq,int pri)386 ampintc_set_priority(int irq, int pri)
387 {
388 struct ampintc_softc *sc = ampintc;
389 uint32_t prival;
390
391 /*
392 * We only use 16 (13 really) interrupt priorities,
393 * and a CPU is only required to implement bit 4-7 of each field
394 * so shift into the top bits.
395 * also low values are higher priority thus IPL_HIGH - pri
396 */
397 prival = (IPL_HIGH - pri) << ICMIPMR_SH;
398 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPRn(irq), prival);
399 }
400
401 void
ampintc_setipl(int new)402 ampintc_setipl(int new)
403 {
404 struct cpu_info *ci = curcpu();
405 struct ampintc_softc *sc = ampintc;
406 int psw;
407
408 /* disable here is only to keep hardware in sync with ci->ci_cpl */
409 psw = disable_interrupts(PSR_I);
410 ci->ci_cpl = new;
411
412 /* low values are higher priority thus IPL_HIGH - pri */
413 bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPIPMR,
414 (IPL_HIGH - new) << ICMIPMR_SH);
415 restore_interrupts(psw);
416 }
417
418 void
ampintc_intr_enable(int irq)419 ampintc_intr_enable(int irq)
420 {
421 struct ampintc_softc *sc = ampintc;
422
423 #ifdef DEBUG
424 printf("enable irq %d register %x bitmask %08x\n",
425 irq, ICD_ISERn(irq), 1 << IRQ_TO_REG32BIT(irq));
426 #endif
427
428 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ISERn(irq),
429 1 << IRQ_TO_REG32BIT(irq));
430 }
431
432 void
ampintc_intr_disable(int irq)433 ampintc_intr_disable(int irq)
434 {
435 struct ampintc_softc *sc = ampintc;
436
437 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICERn(irq),
438 1 << IRQ_TO_REG32BIT(irq));
439 }
440
441 void
ampintc_intr_config(int irqno,int type)442 ampintc_intr_config(int irqno, int type)
443 {
444 struct ampintc_softc *sc = ampintc;
445 uint32_t ctrl;
446
447 ctrl = bus_space_read_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(irqno));
448
449 ctrl &= ~ICD_ICR_TRIG_MASK(irqno);
450 if (type == IST_EDGE_RISING)
451 ctrl |= ICD_ICR_TRIG_EDGE(irqno);
452 else
453 ctrl |= ICD_ICR_TRIG_LEVEL(irqno);
454
455 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(irqno), ctrl);
456 }
457
458 void
ampintc_calc_mask(void)459 ampintc_calc_mask(void)
460 {
461 struct cpu_info *ci = curcpu();
462 struct ampintc_softc *sc = ampintc;
463 struct intrhand *ih;
464 int irq;
465
466 for (irq = 0; irq < sc->sc_nintr; irq++) {
467 int max = IPL_NONE;
468 int min = IPL_HIGH;
469 TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
470 if (ih->ih_ipl > max)
471 max = ih->ih_ipl;
472
473 if (ih->ih_ipl < min)
474 min = ih->ih_ipl;
475 }
476
477 if (max == IPL_NONE)
478 min = IPL_NONE;
479
480 if (sc->sc_handler[irq].iq_irq_max == max &&
481 sc->sc_handler[irq].iq_irq_min == min)
482 continue;
483
484 sc->sc_handler[irq].iq_irq_max = max;
485 sc->sc_handler[irq].iq_irq_min = min;
486
487 /* Enable interrupts at lower levels, clear -> enable */
488 /* Set interrupt priority/enable */
489 if (min != IPL_NONE) {
490 ampintc_set_priority(irq, min);
491 ampintc_intr_enable(irq);
492 ampintc_route(irq, IRQ_ENABLE, ci);
493 } else {
494 ampintc_intr_disable(irq);
495 ampintc_route(irq, IRQ_DISABLE, ci);
496 }
497 }
498 ampintc_setipl(ci->ci_cpl);
499 }
500
501 void
ampintc_splx(int new)502 ampintc_splx(int new)
503 {
504 struct cpu_info *ci = curcpu();
505
506 if (ci->ci_ipending & arm_smask[new])
507 arm_do_pending_intr(new);
508
509 ampintc_setipl(new);
510 }
511
512 int
ampintc_spllower(int new)513 ampintc_spllower(int new)
514 {
515 struct cpu_info *ci = curcpu();
516 int old = ci->ci_cpl;
517 ampintc_splx(new);
518 return (old);
519 }
520
521 int
ampintc_splraise(int new)522 ampintc_splraise(int new)
523 {
524 struct cpu_info *ci = curcpu();
525 int old;
526 old = ci->ci_cpl;
527
528 /*
529 * setipl must always be called because there is a race window
530 * where the variable is updated before the mask is set
531 * an interrupt occurs in that window without the mask always
532 * being set, the hardware might not get updated on the next
533 * splraise completely messing up spl protection.
534 */
535 if (old > new)
536 new = old;
537
538 ampintc_setipl(new);
539
540 return (old);
541 }
542
543
544 uint32_t
ampintc_iack(void)545 ampintc_iack(void)
546 {
547 uint32_t intid;
548 struct ampintc_softc *sc = ampintc;
549
550 intid = bus_space_read_4(sc->sc_iot, sc->sc_p_ioh, ICPIAR);
551
552 return (intid);
553 }
554
555 void
ampintc_eoi(uint32_t eoi)556 ampintc_eoi(uint32_t eoi)
557 {
558 struct ampintc_softc *sc = ampintc;
559
560 bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPEOIR, eoi);
561 }
562
563 void
ampintc_route(int irq,int enable,struct cpu_info * ci)564 ampintc_route(int irq, int enable, struct cpu_info *ci)
565 {
566 struct ampintc_softc *sc = ampintc;
567 uint8_t mask, val;
568
569 KASSERT(ci->ci_cpuid <= ICD_ICTR_CPU_M);
570 mask = sc->sc_cpu_mask[ci->ci_cpuid];
571
572 val = bus_space_read_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(irq));
573 if (enable == IRQ_ENABLE)
574 val |= mask;
575 else
576 val &= ~mask;
577 bus_space_write_1(sc->sc_iot, sc->sc_d_ioh, ICD_IPTRn(irq), val);
578 }
579
580 void
ampintc_cpuinit(void)581 ampintc_cpuinit(void)
582 {
583 struct ampintc_softc *sc = ampintc;
584 int i;
585
586 /* XXX - this is the only cpu specific call to set this */
587 if (sc->sc_cpu_mask[cpu_number()] == 0) {
588 for (i = 0; i < 32; i++) {
589 int cpumask =
590 bus_space_read_1(sc->sc_iot, sc->sc_d_ioh,
591 ICD_IPTRn(i));
592
593 if (cpumask != 0) {
594 sc->sc_cpu_mask[cpu_number()] = cpumask;
595 break;
596 }
597 }
598 }
599
600 if (sc->sc_cpu_mask[cpu_number()] == 0)
601 panic("could not determine cpu target mask");
602 }
603
604 void
ampintc_route_irq(void * v,int enable,struct cpu_info * ci)605 ampintc_route_irq(void *v, int enable, struct cpu_info *ci)
606 {
607 struct ampintc_softc *sc = ampintc;
608 struct intrhand *ih = v;
609
610 bus_space_write_4(sc->sc_iot, sc->sc_p_ioh, ICPICR, 1);
611 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_ICRn(ih->ih_irq), 0);
612 if (enable) {
613 ampintc_set_priority(ih->ih_irq,
614 sc->sc_handler[ih->ih_irq].iq_irq_min);
615 ampintc_intr_enable(ih->ih_irq);
616 }
617
618 ampintc_route(ih->ih_irq, enable, ci);
619 }
620
621 void
ampintc_irq_handler(void * frame)622 ampintc_irq_handler(void *frame)
623 {
624 struct ampintc_softc *sc = ampintc;
625 struct intrhand *ih;
626 void *arg;
627 uint32_t iack_val;
628 int irq, pri, s, handled;
629
630 iack_val = ampintc_iack();
631 #ifdef DEBUG_INTC
632 if (iack_val != 27)
633 printf("irq %d fired\n", iack_val);
634 else {
635 static int cnt = 0;
636 if ((cnt++ % 100) == 0) {
637 printf("irq %d fired * _100\n", iack_val);
638 #ifdef DDB
639 db_enter();
640 #endif
641 }
642
643 }
644 #endif
645
646 irq = iack_val & ICPIAR_IRQ_M;
647
648 if (irq == 1023) {
649 sc->sc_spur.ec_count++;
650 return;
651 }
652
653 if (irq >= sc->sc_nintr)
654 return;
655
656 pri = sc->sc_handler[irq].iq_irq_max;
657 s = ampintc_splraise(pri);
658 TAILQ_FOREACH(ih, &sc->sc_handler[irq].iq_list, ih_list) {
659 #ifdef MULTIPROCESSOR
660 int need_lock;
661
662 if (ih->ih_flags & IPL_MPSAFE)
663 need_lock = 0;
664 else
665 need_lock = s < IPL_SCHED;
666
667 if (need_lock)
668 KERNEL_LOCK();
669 #endif
670
671 if (ih->ih_arg)
672 arg = ih->ih_arg;
673 else
674 arg = frame;
675
676 enable_interrupts(PSR_I);
677 handled = ih->ih_func(arg);
678 disable_interrupts(PSR_I);
679 if (handled)
680 ih->ih_count.ec_count++;
681
682 #ifdef MULTIPROCESSOR
683 if (need_lock)
684 KERNEL_UNLOCK();
685 #endif
686 }
687 ampintc_eoi(iack_val);
688
689 ampintc_splx(s);
690 }
691
692 void *
ampintc_intr_establish_ext(int irqno,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)693 ampintc_intr_establish_ext(int irqno, int level, struct cpu_info *ci,
694 int (*func)(void *), void *arg, char *name)
695 {
696 return ampintc_intr_establish(irqno+32, IST_LEVEL_HIGH, level,
697 ci, func, arg, name);
698 }
699
700 void *
ampintc_intr_establish_fdt(void * cookie,int * cell,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)701 ampintc_intr_establish_fdt(void *cookie, int *cell, int level,
702 struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
703 {
704 struct ampintc_softc *sc = (struct ampintc_softc *)cookie;
705 int irq;
706 int type;
707
708 /* 2nd cell contains the interrupt number */
709 irq = cell[1];
710
711 /* 1st cell contains type: 0 SPI (32-X), 1 PPI (16-31) */
712 if (cell[0] == 0)
713 irq += 32;
714 else if (cell[0] == 1)
715 irq += 16;
716 else
717 panic("%s: bogus interrupt type", sc->sc_sbus.sc_dev.dv_xname);
718
719 /* SPIs are only active-high level or low-to-high edge */
720 if (cell[2] & 0x3)
721 type = IST_EDGE_RISING;
722 else
723 type = IST_LEVEL_HIGH;
724
725 return ampintc_intr_establish(irq, type, level, ci, func, arg, name);
726 }
727
728 void *
ampintc_intr_establish(int irqno,int type,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)729 ampintc_intr_establish(int irqno, int type, int level, struct cpu_info *ci,
730 int (*func)(void *), void *arg, char *name)
731 {
732 struct ampintc_softc *sc = ampintc;
733 struct intrhand *ih;
734 int psw;
735
736 if (irqno < 0 || irqno >= sc->sc_nintr)
737 panic("ampintc_intr_establish: bogus irqnumber %d: %s",
738 irqno, name);
739
740 if (ci == NULL)
741 ci = &cpu_info_primary;
742 else if (!CPU_IS_PRIMARY(ci))
743 return NULL;
744
745 if (irqno < 16) {
746 /* SGI are only EDGE */
747 type = IST_EDGE_RISING;
748 } else if (irqno < 32) {
749 /* PPI are only LEVEL */
750 type = IST_LEVEL_HIGH;
751 }
752
753 ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
754 ih->ih_func = func;
755 ih->ih_arg = arg;
756 ih->ih_ipl = level & IPL_IRQMASK;
757 ih->ih_flags = level & IPL_FLAGMASK;
758 ih->ih_irq = irqno;
759 ih->ih_name = name;
760
761 psw = disable_interrupts(PSR_I);
762
763 TAILQ_INSERT_TAIL(&sc->sc_handler[irqno].iq_list, ih, ih_list);
764
765 if (name != NULL)
766 evcount_attach(&ih->ih_count, name, &ih->ih_irq);
767
768 #ifdef DEBUG_INTC
769 printf("ampintc_intr_establish irq %d level %d [%s]\n", irqno, level,
770 name);
771 #endif
772
773 ampintc_intr_config(irqno, type);
774 ampintc_calc_mask();
775
776 restore_interrupts(psw);
777 return (ih);
778 }
779
780 void
ampintc_intr_disestablish(void * cookie)781 ampintc_intr_disestablish(void *cookie)
782 {
783 struct ampintc_softc *sc = ampintc;
784 struct intrhand *ih = cookie;
785 int psw;
786
787 #ifdef DEBUG_INTC
788 printf("ampintc_intr_disestablish irq %d level %d [%s]\n",
789 ih->ih_irq, ih->ih_ipl, ih->ih_name);
790 #endif
791
792 psw = disable_interrupts(PSR_I);
793
794 TAILQ_REMOVE(&sc->sc_handler[ih->ih_irq].iq_list, ih, ih_list);
795 if (ih->ih_name != NULL)
796 evcount_detach(&ih->ih_count);
797 free(ih, M_DEVBUF, sizeof(*ih));
798
799 ampintc_calc_mask();
800
801 restore_interrupts(psw);
802 }
803
804 const char *
ampintc_intr_string(void * cookie)805 ampintc_intr_string(void *cookie)
806 {
807 struct intrhand *ih = (struct intrhand *)cookie;
808 static char irqstr[1 + sizeof("ampintc irq ") + 4];
809
810 snprintf(irqstr, sizeof irqstr, "ampintc irq %d", ih->ih_irq);
811 return irqstr;
812 }
813
814 /*
815 * GICv2m frame controller for MSI interrupts.
816 */
817 #define GICV2M_TYPER 0x008
818 #define GICV2M_TYPER_SPI_BASE(x) (((x) >> 16) & 0x3ff)
819 #define GICV2M_TYPER_SPI_COUNT(x) (((x) >> 0) & 0x3ff)
820 #define GICV2M_SETSPI_NS 0x040
821
822 int ampintc_msi_match(struct device *, void *, void *);
823 void ampintc_msi_attach(struct device *, struct device *, void *);
824 void *ampintc_intr_establish_msi(void *, uint64_t *, uint64_t *,
825 int , struct cpu_info *, int (*)(void *), void *, char *);
826 void ampintc_intr_disestablish_msi(void *);
827
828 struct ampintc_msi_softc {
829 struct device sc_dev;
830 bus_space_tag_t sc_iot;
831 bus_space_handle_t sc_ioh;
832 paddr_t sc_addr;
833 int sc_bspi;
834 int sc_nspi;
835 void **sc_spi;
836 struct interrupt_controller sc_ic;
837 };
838
839 const struct cfattach ampintcmsi_ca = {
840 sizeof (struct ampintc_msi_softc), ampintc_msi_match, ampintc_msi_attach
841 };
842
843 struct cfdriver ampintcmsi_cd = {
844 NULL, "ampintcmsi", DV_DULL
845 };
846
847 int
ampintc_msi_match(struct device * parent,void * cfdata,void * aux)848 ampintc_msi_match(struct device *parent, void *cfdata, void *aux)
849 {
850 struct fdt_attach_args *faa = aux;
851
852 return OF_is_compatible(faa->fa_node, "arm,gic-v2m-frame");
853 }
854
855 void
ampintc_msi_attach(struct device * parent,struct device * self,void * aux)856 ampintc_msi_attach(struct device *parent, struct device *self, void *aux)
857 {
858 struct ampintc_msi_softc *sc = (struct ampintc_msi_softc *)self;
859 struct fdt_attach_args *faa = aux;
860 uint32_t typer;
861
862 sc->sc_iot = faa->fa_iot;
863 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
864 faa->fa_reg[0].size, 0, &sc->sc_ioh))
865 panic("%s: bus_space_map failed!", __func__);
866
867 /* XXX: Hack to retrieve the physical address (from a CPU PoV). */
868 if (!pmap_extract(pmap_kernel(), sc->sc_ioh, &sc->sc_addr)) {
869 printf(": cannot retrieve msi addr\n");
870 return;
871 }
872
873 typer = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GICV2M_TYPER);
874 sc->sc_bspi = GICV2M_TYPER_SPI_BASE(typer);
875 sc->sc_nspi = GICV2M_TYPER_SPI_COUNT(typer);
876
877 sc->sc_bspi = OF_getpropint(faa->fa_node,
878 "arm,msi-base-spi", sc->sc_bspi);
879 sc->sc_nspi = OF_getpropint(faa->fa_node,
880 "arm,msi-num-spis", sc->sc_nspi);
881
882 printf(": nspi %d\n", sc->sc_nspi);
883
884 sc->sc_spi = mallocarray(sc->sc_nspi, sizeof(void *), M_DEVBUF,
885 M_WAITOK|M_ZERO);
886
887 sc->sc_ic.ic_node = faa->fa_node;
888 sc->sc_ic.ic_cookie = sc;
889 sc->sc_ic.ic_establish_msi = ampintc_intr_establish_msi;
890 sc->sc_ic.ic_disestablish = ampintc_intr_disestablish_msi;
891 arm_intr_register_fdt(&sc->sc_ic);
892 }
893
894 void *
ampintc_intr_establish_msi(void * self,uint64_t * addr,uint64_t * data,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)895 ampintc_intr_establish_msi(void *self, uint64_t *addr, uint64_t *data,
896 int level, struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
897 {
898 struct ampintc_msi_softc *sc = (struct ampintc_msi_softc *)self;
899 void *cookie;
900 int i;
901
902 for (i = 0; i < sc->sc_nspi; i++) {
903 if (sc->sc_spi[i] != NULL)
904 continue;
905
906 cookie = ampintc_intr_establish(sc->sc_bspi + i,
907 IST_EDGE_RISING, level, ci, func, arg, name);
908 if (cookie == NULL)
909 return NULL;
910
911 *addr = sc->sc_addr + GICV2M_SETSPI_NS;
912 *data = sc->sc_bspi + i;
913 sc->sc_spi[i] = cookie;
914 return &sc->sc_spi[i];
915 }
916
917 return NULL;
918 }
919
920 void
ampintc_intr_disestablish_msi(void * cookie)921 ampintc_intr_disestablish_msi(void *cookie)
922 {
923 ampintc_intr_disestablish(*(void **)cookie);
924 *(void **)cookie = NULL;
925 }
926
927 #ifdef MULTIPROCESSOR
928 int
ampintc_ipi_ddb(void * v)929 ampintc_ipi_ddb(void *v)
930 {
931 /* XXX */
932 db_enter();
933 return 1;
934 }
935
936 int
ampintc_ipi_nop(void * v)937 ampintc_ipi_nop(void *v)
938 {
939 /* Nothing to do here, just enough to wake up from WFI */
940 return 1;
941 }
942
943 int
ampintc_ipi_combined(void * v)944 ampintc_ipi_combined(void *v)
945 {
946 struct ampintc_softc *sc = (struct ampintc_softc *)v;
947
948 if (sc->sc_ipi_reason[cpu_number()] == ARM_IPI_DDB) {
949 sc->sc_ipi_reason[cpu_number()] = ARM_IPI_NOP;
950 return ampintc_ipi_ddb(v);
951 } else {
952 return ampintc_ipi_nop(v);
953 }
954 }
955
956 void
ampintc_send_ipi(struct cpu_info * ci,int id)957 ampintc_send_ipi(struct cpu_info *ci, int id)
958 {
959 struct ampintc_softc *sc = ampintc;
960 int sendmask;
961
962 if (ci == curcpu() && id == ARM_IPI_NOP)
963 return;
964
965 /* never overwrite IPI_DDB with IPI_NOP */
966 if (id == ARM_IPI_DDB)
967 sc->sc_ipi_reason[ci->ci_cpuid] = id;
968
969 /* currently will only send to one cpu */
970 sendmask = 1 << (16 + ci->ci_cpuid);
971 sendmask |= sc->sc_ipi_num[id];
972
973 bus_space_write_4(sc->sc_iot, sc->sc_d_ioh, ICD_SGIR, sendmask);
974 }
975 #endif
976