1 /* $NetBSD: intr.c,v 1.118 2013/11/16 23:54:01 mrg Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)intr.c 8.3 (Berkeley) 11/11/93
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.118 2013/11/16 23:54:01 mrg Exp $");
45
46 #include "opt_multiprocessor.h"
47 #include "opt_sparc_arch.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/cpu.h>
54 #include <sys/intr.h>
55 #include <sys/atomic.h>
56
57 #include <uvm/uvm_extern.h>
58
59 #include <dev/cons.h>
60
61 #include <machine/ctlreg.h>
62 #include <machine/instr.h>
63 #include <machine/trap.h>
64 #include <machine/promlib.h>
65
66 #include <sparc/sparc/asm.h>
67 #include <sparc/sparc/cpuvar.h>
68
69 #if defined(MULTIPROCESSOR) && defined(DDB)
70 #include <machine/db_machdep.h>
71 #endif
72
73 #if defined(MULTIPROCESSOR)
74 static int intr_biglock_wrapper(void *);
75
76 void *xcall_cookie;
77 #endif
78
79 void strayintr(struct clockframe *);
80 #ifdef DIAGNOSTIC
81 void bogusintr(struct clockframe *);
82 #endif
83
84 /*
85 * Stray interrupt handler. Clear it if possible.
86 * If not, and if we get 10 interrupts in 10 seconds, panic.
87 * XXXSMP: We are holding the kernel lock at entry & exit.
88 */
89 void
strayintr(struct clockframe * fp)90 strayintr(struct clockframe *fp)
91 {
92 static int straytime, nstray;
93 char bits[64];
94 int timesince;
95
96 #if defined(MULTIPROCESSOR)
97 /*
98 * XXX
99 *
100 * Don't whine about zs interrupts on MP. We sometimes get
101 * stray interrupts when polled kernel output on cpu>0 eats
102 * the interrupt and cpu0 sees it.
103 */
104 #define ZS_INTR_IPL 12
105 if (fp->ipl == ZS_INTR_IPL)
106 return;
107 #endif
108
109 snprintb(bits, sizeof(bits), PSR_BITS, fp->psr);
110 printf("stray interrupt cpu%d ipl 0x%x pc=0x%x npc=0x%x psr=%s\n",
111 cpu_number(), fp->ipl, fp->pc, fp->npc, bits);
112
113 timesince = time_uptime - straytime;
114 if (timesince <= 10) {
115 if (++nstray > 10)
116 panic("crazy interrupts");
117 } else {
118 straytime = time_uptime;
119 nstray = 1;
120 }
121 }
122
123
124 #ifdef DIAGNOSTIC
125 /*
126 * Bogus interrupt for which neither hard nor soft interrupt bit in
127 * the IPR was set.
128 */
129 void
bogusintr(struct clockframe * fp)130 bogusintr(struct clockframe *fp)
131 {
132 char bits[64];
133
134 #if defined(MULTIPROCESSOR)
135 /*
136 * XXX as above.
137 */
138 if (fp->ipl == ZS_INTR_IPL)
139 return;
140 #endif
141
142 snprintb(bits, sizeof(bits), PSR_BITS, fp->psr);
143 printf("cpu%d: bogus interrupt ipl 0x%x pc=0x%x npc=0x%x psr=%s\n",
144 cpu_number(), fp->ipl, fp->pc, fp->npc, bits);
145 }
146 #endif /* DIAGNOSTIC */
147
148 /*
149 * Get module ID of interrupt target.
150 */
151 u_int
getitr(void)152 getitr(void)
153 {
154 #if defined(MULTIPROCESSOR)
155 u_int v;
156
157 if (!CPU_ISSUN4M || sparc_ncpus <= 1)
158 return (0);
159
160 v = *((u_int *)ICR_ITR);
161 return (v + 8);
162 #else
163 return (0);
164 #endif
165 }
166
167 /*
168 * Set interrupt target.
169 * Return previous value.
170 */
171 u_int
setitr(u_int mid)172 setitr(u_int mid)
173 {
174 #if defined(MULTIPROCESSOR)
175 u_int v;
176
177 if (!CPU_ISSUN4M || sparc_ncpus <= 1)
178 return (0);
179
180 v = *((u_int *)ICR_ITR);
181 *((u_int *)ICR_ITR) = CPU_MID2CPUNO(mid);
182 return (v + 8);
183 #else
184 return (0);
185 #endif
186 }
187
188 #if (defined(SUN4M) && !defined(MSIIEP)) || defined(SUN4D)
189 void nmi_hard(void);
190 void nmi_soft(struct trapframe *);
191
192 int (*memerr_handler)(void);
193 int (*sbuserr_handler)(void);
194 int (*vmeerr_handler)(void);
195 int (*moduleerr_handler)(void);
196
197 #if defined(MULTIPROCESSOR)
198 static volatile u_int nmi_hard_wait = 0;
199 int drop_into_rom_on_fatal = 1;
200 #endif
201
202 void
nmi_hard(void)203 nmi_hard(void)
204 {
205 /*
206 * A level 15 hard interrupt.
207 */
208 int fatal = 0;
209 uint32_t si;
210 char bits[64];
211 u_int afsr, afva;
212
213 /* Tally */
214 cpuinfo.ci_intrcnt[15].ev_count++;
215 cpuinfo.ci_data.cpu_nintr++;
216
217 afsr = afva = 0;
218 if ((*cpuinfo.get_asyncflt)(&afsr, &afva) == 0) {
219 snprintb(bits, sizeof(bits), AFSR_BITS, afsr);
220 printf("Async registers (mid %d): afsr=%s; afva=0x%x%x\n",
221 cpuinfo.mid, bits,
222 (afsr & AFSR_AFA) >> AFSR_AFA_RSHIFT, afva);
223 }
224
225 #if defined(MULTIPROCESSOR)
226 /*
227 * Increase nmi_hard_wait. If we aren't the master, loop while this
228 * variable is non-zero. If we are the master, loop while this
229 * variable is less than the number of cpus.
230 */
231 atomic_inc_uint(&nmi_hard_wait);
232
233 if (cpuinfo.master == 0) {
234 while (nmi_hard_wait)
235 ;
236 return;
237 } else {
238 int n = 100000;
239
240 while (nmi_hard_wait < sparc_ncpus) {
241 DELAY(1);
242 if (n-- > 0)
243 continue;
244 printf("nmi_hard: SMP botch.");
245 break;
246 }
247 }
248 #endif
249
250 /*
251 * Examine pending system interrupts.
252 */
253 si = *((uint32_t *)ICR_SI_PEND);
254 snprintb(bits, sizeof(bits), SINTR_BITS, si);
255 printf("cpu%d: NMI: system interrupts: %s\n", cpu_number(), bits);
256
257
258 if ((si & SINTR_M) != 0) {
259 /* ECC memory error */
260 if (memerr_handler != NULL)
261 fatal |= (*memerr_handler)();
262 }
263 if ((si & SINTR_I) != 0) {
264 /* MBus/SBus async error */
265 if (sbuserr_handler != NULL)
266 fatal |= (*sbuserr_handler)();
267 }
268 if ((si & SINTR_V) != 0) {
269 /* VME async error */
270 if (vmeerr_handler != NULL)
271 fatal |= (*vmeerr_handler)();
272 }
273 if ((si & SINTR_ME) != 0) {
274 /* Module async error */
275 if (moduleerr_handler != NULL)
276 fatal |= (*moduleerr_handler)();
277 }
278
279 #if defined(MULTIPROCESSOR)
280 /*
281 * Tell everyone else we've finished dealing with the hard NMI.
282 */
283 nmi_hard_wait = 0;
284 if (fatal && drop_into_rom_on_fatal) {
285 prom_abort();
286 return;
287 }
288 #endif
289
290 if (fatal)
291 panic("nmi");
292 }
293
294 /*
295 * Non-maskable soft interrupt level 15 handler
296 */
297 void
nmi_soft(struct trapframe * tf)298 nmi_soft(struct trapframe *tf)
299 {
300
301 /* Tally */
302 cpuinfo.ci_sintrcnt[15].ev_count++;
303 cpuinfo.ci_data.cpu_nintr++;
304
305 if (cpuinfo.mailbox) {
306 /* Check PROM messages */
307 uint8_t msg = *(uint8_t *)cpuinfo.mailbox;
308 switch (msg) {
309 case OPENPROM_MBX_STOP:
310 case OPENPROM_MBX_WD:
311 /* In case there's an xcall in progress (unlikely) */
312 spl0();
313 #ifdef MULTIPROCESSOR
314 cpu_ready_mask &= ~(1 << cpu_number());
315 #endif
316 prom_cpustop(0);
317 break;
318 case OPENPROM_MBX_ABORT:
319 case OPENPROM_MBX_BPT:
320 prom_cpuidle(0);
321 /*
322 * We emerge here after someone does a
323 * prom_resumecpu(ournode).
324 */
325 return;
326 default:
327 break;
328 }
329 }
330
331 #if defined(MULTIPROCESSOR)
332 switch (cpuinfo.msg_lev15.tag) {
333 case XPMSG15_PAUSECPU:
334 /* XXX - assumes DDB is the only user of mp_pause_cpu() */
335 cpuinfo.flags |= CPUFLG_PAUSED;
336 #if defined(DDB)
337 /* trap(T_DBPAUSE) */
338 __asm("ta 0x8b");
339 #else
340 while (cpuinfo.flags & CPUFLG_PAUSED)
341 /* spin */;
342 #endif /* DDB */
343 }
344 cpuinfo.msg_lev15.tag = 0;
345 #endif /* MULTIPROCESSOR */
346 }
347
348 #if defined(MULTIPROCESSOR)
349 /*
350 * Respond to an xcall() request from another CPU.
351 *
352 * This is also called directly from xcall() if we notice an
353 * incoming message while we're waiting to grab the xpmsg_lock.
354 * We pass the address of xcallintr() itself to indicate that
355 * this is not a real interrupt.
356 */
357 void
xcallintr(void * v)358 xcallintr(void *v)
359 {
360
361 kpreempt_disable();
362
363 /* Tally */
364 if (v != xcallintr)
365 cpuinfo.ci_sintrcnt[13].ev_count++;
366
367 /* notyet - cpuinfo.msg.received = 1; */
368 switch (cpuinfo.msg.tag) {
369 case XPMSG_FUNC:
370 {
371 volatile struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func;
372
373 if (p->func)
374 (*p->func)(p->arg0, p->arg1, p->arg2);
375 break;
376 }
377 }
378 cpuinfo.msg.tag = 0;
379 cpuinfo.msg.complete = 1;
380
381 kpreempt_enable();
382 }
383 #endif /* MULTIPROCESSOR */
384 #endif /* SUN4M || SUN4D */
385
386
387 #ifdef MSIIEP
388 /*
389 * It's easier to make this separate so that not to further obscure
390 * SUN4M case with more ifdefs. There's no common functionality
391 * anyway.
392 */
393
394 #include <sparc/sparc/msiiepreg.h>
395
396 void nmi_hard_msiiep(void);
397 void nmi_soft_msiiep(void);
398
399
400 void
nmi_hard_msiiep(void)401 nmi_hard_msiiep(void)
402 {
403 uint32_t si;
404 char bits[128];
405 int fatal = 0;
406
407 si = mspcic_read_4(pcic_sys_ipr);
408 snprintb(bits, sizeof(bits), MSIIEP_SYS_IPR_BITS, si);
409 printf("NMI: system interrupts: %s\n", bits);
410
411
412 if (si & MSIIEP_SYS_IPR_MEM_FAULT) {
413 uint32_t afsr, afar, mfsr, mfar;
414
415 afar = *(volatile uint32_t *)MSIIEP_AFAR;
416 afsr = *(volatile uint32_t *)MSIIEP_AFSR;
417
418 mfar = *(volatile uint32_t *)MSIIEP_MFAR;
419 mfsr = *(volatile uint32_t *)MSIIEP_MFSR;
420
421 if (afsr & MSIIEP_AFSR_ERR) {
422 snprintb(bits, sizeof(bits), MSIIEP_AFSR_BITS, afsr);
423 printf("async fault: afsr=%s; afar=%08x\n", bits, afar);
424 }
425
426 if (mfsr & MSIIEP_MFSR_ERR) {
427 snprintb(bits, sizeof(bits), MSIIEP_MFSR_BITS, mfsr);
428 printf("mem fault: mfsr=%s; mfar=%08x\n", bits, mfar);
429 }
430
431 fatal = 0;
432 }
433
434 if (si & MSIIEP_SYS_IPR_SERR) { /* XXX */
435 printf("serr#\n");
436 fatal = 0;
437 }
438
439 if (si & MSIIEP_SYS_IPR_DMA_ERR) {
440 printf("dma: %08x\n",
441 mspcic_read_stream_4(pcic_iotlb_err_addr));
442 fatal = 0;
443 }
444
445 if (si & MSIIEP_SYS_IPR_PIO_ERR) {
446 printf("pio: addr=%08x, cmd=%x stat=%04x\n",
447 mspcic_read_stream_4(pcic_pio_err_addr),
448 mspcic_read_stream_1(pcic_pio_err_cmd),
449 mspcic_read_stream_2(pcic_stat));
450 fatal = 0;
451 }
452
453 if (fatal)
454 panic("nmi");
455
456 /* Clear the NMI if it was PCIC related */
457 mspcic_write_1(pcic_sys_ipr_clr, MSIIEP_SYS_IPR_CLR_ALL);
458 }
459
460
461 void
nmi_soft_msiiep(void)462 nmi_soft_msiiep(void)
463 {
464
465 panic("soft nmi");
466 }
467
468 #endif /* MSIIEP */
469
470
471 /*
472 * Level 15 interrupts are special, and not vectored here.
473 * Only `prewired' interrupts appear here; boot-time configured devices
474 * are attached via intr_establish() below.
475 */
476 struct intrhand *intrhand[15] = {
477 NULL, /* 0 = error */
478 NULL, /* 1 = software level 1 + Sbus */
479 NULL, /* 2 = Sbus level 2 (4m: Sbus L1) */
480 NULL, /* 3 = SCSI + DMA + Sbus level 3 (4m: L2,lpt)*/
481 NULL, /* 4 = software level 4 (tty softint) (scsi) */
482 NULL, /* 5 = Ethernet + Sbus level 4 (4m: Sbus L3) */
483 NULL, /* 6 = software level 6 (not used) (4m: enet)*/
484 NULL, /* 7 = video + Sbus level 5 */
485 NULL, /* 8 = Sbus level 6 */
486 NULL, /* 9 = Sbus level 7 */
487 NULL, /* 10 = counter 0 = clock */
488 NULL, /* 11 = floppy */
489 NULL, /* 12 = zs hardware interrupt */
490 NULL, /* 13 = audio chip */
491 NULL, /* 14 = counter 1 = profiling timer */
492 };
493
494 /*
495 * Soft interrupts use a separate set of handler chains.
496 * This is necessary since soft interrupt handlers do not return a value
497 * and therefore cannot be mixed with hardware interrupt handlers on a
498 * shared handler chain.
499 */
500 struct intrhand *sintrhand[15] = { NULL };
501
502 static void
ih_insert(struct intrhand ** head,struct intrhand * ih)503 ih_insert(struct intrhand **head, struct intrhand *ih)
504 {
505 struct intrhand **p, *q;
506 /*
507 * This is O(N^2) for long chains, but chains are never long
508 * and we do want to preserve order.
509 */
510 for (p = head; (q = *p) != NULL; p = &q->ih_next)
511 continue;
512 *p = ih;
513 ih->ih_next = NULL;
514 }
515
516 static void
ih_remove(struct intrhand ** head,struct intrhand * ih)517 ih_remove(struct intrhand **head, struct intrhand *ih)
518 {
519 struct intrhand **p, *q;
520
521 for (p = head; (q = *p) != ih; p = &q->ih_next)
522 continue;
523 if (q == NULL)
524 panic("intr_remove: intrhand %p fun %p arg %p",
525 ih, ih->ih_fun, ih->ih_arg);
526
527 *p = q->ih_next;
528 q->ih_next = NULL;
529 }
530
531 static int fastvec; /* marks fast vectors (see below) */
532 extern int sparc_interrupt4m[];
533 extern int sparc_interrupt44c[];
534
535 #ifdef DIAGNOSTIC
536 static void
check_tv(int level)537 check_tv(int level)
538 {
539 struct trapvec *tv;
540 int displ;
541
542 /* double check for legal hardware interrupt */
543 tv = &trapbase[T_L1INT - 1 + level];
544 displ = (CPU_ISSUN4M || CPU_ISSUN4D)
545 ? &sparc_interrupt4m[0] - &tv->tv_instr[1]
546 : &sparc_interrupt44c[0] - &tv->tv_instr[1];
547
548 /* has to be `mov level,%l3; ba _sparc_interrupt; rdpsr %l0' */
549 if (tv->tv_instr[0] != I_MOVi(I_L3, level) ||
550 tv->tv_instr[1] != I_BA(0, displ) ||
551 tv->tv_instr[2] != I_RDPSR(I_L0))
552 panic("intr_establish(%d)\n0x%x 0x%x 0x%x != 0x%x 0x%x 0x%x",
553 level,
554 tv->tv_instr[0], tv->tv_instr[1], tv->tv_instr[2],
555 I_MOVi(I_L3, level), I_BA(0, displ), I_RDPSR(I_L0));
556 }
557 #endif
558
559 /*
560 * Wire a fast trap vector. Only one such fast trap is legal for any
561 * interrupt, and it must be a hardware interrupt.
562 */
563 static void
inst_fasttrap(int level,void (* vec)(void))564 inst_fasttrap(int level, void (*vec)(void))
565 {
566 struct trapvec *tv;
567 u_long hi22, lo10;
568 int s;
569
570 if (CPU_ISSUN4 || CPU_ISSUN4C) {
571 /* Can't wire to softintr slots */
572 if (level == 1 || level == 4 || level == 6)
573 return;
574 }
575
576 #ifdef DIAGNOSTIC
577 check_tv(level);
578 #endif
579
580 tv = &trapbase[T_L1INT - 1 + level];
581 hi22 = ((u_long)vec) >> 10;
582 lo10 = ((u_long)vec) & 0x3ff;
583 s = splhigh();
584
585 /* kernel text is write protected -- let us in for a moment */
586 pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE,
587 VM_PROT_READ|VM_PROT_WRITE);
588 cpuinfo.cache_flush_all();
589 tv->tv_instr[0] = I_SETHI(I_L3, hi22); /* sethi %hi(vec),%l3 */
590 tv->tv_instr[1] = I_JMPLri(I_G0, I_L3, lo10);/* jmpl %l3+%lo(vec),%g0 */
591 tv->tv_instr[2] = I_RDPSR(I_L0); /* mov %psr, %l0 */
592 pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, VM_PROT_READ);
593 cpuinfo.cache_flush_all();
594 fastvec |= 1 << level;
595 splx(s);
596 }
597
598 /*
599 * Uninstall a fast trap handler.
600 */
601 static void
uninst_fasttrap(int level)602 uninst_fasttrap(int level)
603 {
604 struct trapvec *tv;
605 int displ; /* suspenders, belt, and buttons too */
606 int s;
607
608 tv = &trapbase[T_L1INT - 1 + level];
609 s = splhigh();
610 displ = (CPU_ISSUN4M || CPU_ISSUN4D)
611 ? &sparc_interrupt4m[0] - &tv->tv_instr[1]
612 : &sparc_interrupt44c[0] - &tv->tv_instr[1];
613
614 /* kernel text is write protected -- let us in for a moment */
615 pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE,
616 VM_PROT_READ|VM_PROT_WRITE);
617 cpuinfo.cache_flush_all();
618 tv->tv_instr[0] = I_MOVi(I_L3, level);
619 tv->tv_instr[1] = I_BA(0, displ);
620 tv->tv_instr[2] = I_RDPSR(I_L0);
621 pmap_kprotect((vaddr_t)tv & -PAGE_SIZE, PAGE_SIZE, VM_PROT_READ);
622 cpuinfo.cache_flush_all();
623 fastvec &= ~(1 << level);
624 splx(s);
625 }
626
627 /*
628 * Attach an interrupt handler to the vector chain for the given level.
629 * This is not possible if it has been taken away as a fast vector.
630 */
631 void
intr_establish(int level,int classipl,struct intrhand * ih,void (* vec)(void),bool maybe_mpsafe)632 intr_establish(int level, int classipl,
633 struct intrhand *ih, void (*vec)(void),
634 bool maybe_mpsafe)
635 {
636 int s = splhigh();
637 #ifdef MULTIPROCESSOR
638 bool mpsafe;
639 #endif /* MULTIPROCESSOR */
640 if (classipl == 0)
641 classipl = level;
642
643 #ifdef MULTIPROCESSOR
644 mpsafe = (classipl != IPL_VM) || maybe_mpsafe;
645 #endif
646
647 #ifdef DIAGNOSTIC
648 if (CPU_ISSUN4C) {
649 /*
650 * Check reserved softintr slots on SUN4C only.
651 * No check for SUN4, as 4/300's have
652 * esp0 at level 4 and le0 at level 6.
653 */
654 if (level == 1 || level == 4 || level == 6)
655 panic("intr_establish: reserved softintr level");
656 }
657 #endif
658
659 /*
660 * If a `fast vector' is currently tied to this level, we must
661 * first undo that.
662 */
663 if (fastvec & (1 << level)) {
664 printf("intr_establish: untie fast vector at level %d\n",
665 level);
666 uninst_fasttrap(level);
667 } else if (vec != NULL &&
668 intrhand[level] == NULL && sintrhand[level] == NULL) {
669 inst_fasttrap(level, vec);
670 }
671
672 /* A requested IPL cannot exceed its device class level */
673 if (classipl < level)
674 panic("intr_establish: class lvl (%d) < pil (%d)\n",
675 classipl, level);
676
677 /* pre-shift to PIL field in %psr */
678 ih->ih_classipl = (classipl << 8) & PSR_PIL;
679
680 #ifdef MULTIPROCESSOR
681 if (!mpsafe) {
682 ih->ih_realfun = ih->ih_fun;
683 ih->ih_realarg = ih->ih_arg;
684 ih->ih_fun = intr_biglock_wrapper;
685 ih->ih_arg = ih;
686 }
687 #endif /* MULTIPROCESSOR */
688
689 ih_insert(&intrhand[level], ih);
690 splx(s);
691 }
692
693 void
intr_disestablish(int level,struct intrhand * ih)694 intr_disestablish(int level, struct intrhand *ih)
695 {
696
697 ih_remove(&intrhand[level], ih);
698 }
699
700 /*
701 * This is a softintr cookie. NB that sic_pilreq MUST be the
702 * first element in the struct, because the softintr_schedule()
703 * macro in intr.h casts cookies to int * to get it. On a
704 * sun4m, sic_pilreq is an actual processor interrupt level that
705 * is passed to raise(), and on a sun4 or sun4c sic_pilreq is a
706 * bit to set in the interrupt enable register with ienab_bis().
707 */
708 struct softintr_cookie {
709 int sic_pilreq; /* CPU-specific bits; MUST be first! */
710 int sic_pil; /* Actual machine PIL that is used */
711 struct intrhand sic_hand;
712 };
713
714 /*
715 * softintr_init(): initialise the MI softintr system.
716 */
717 void
sparc_softintr_init(void)718 sparc_softintr_init(void)
719 {
720
721 #if defined(MULTIPROCESSOR) && (defined(SUN4M) || defined(SUN4D))
722 /* Establish a standard soft interrupt handler for cross calls */
723 xcall_cookie = sparc_softintr_establish(13, xcallintr, NULL);
724 #endif
725 }
726
727 /*
728 * softintr_establish(): MI interface. establish a func(arg) as a
729 * software interrupt.
730 */
731 void *
sparc_softintr_establish(int level,void (* fun)(void *),void * arg)732 sparc_softintr_establish(int level, void (*fun)(void *), void *arg)
733 {
734 struct softintr_cookie *sic;
735 struct intrhand *ih;
736 int pilreq;
737 int pil;
738 #ifdef MULTIPROCESSOR
739 bool mpsafe = (level != IPL_VM);
740 #endif /* MULTIPROCESSOR */
741
742 /*
743 * On a sun4m, the processor interrupt level is stored
744 * in the softintr cookie to be passed to raise().
745 *
746 * On a sun4 or sun4c the appropriate bit to set
747 * in the interrupt enable register is stored in
748 * the softintr cookie to be passed to ienab_bis().
749 */
750 pil = pilreq = level;
751 if (CPU_ISSUN4 || CPU_ISSUN4C) {
752 /* Select the most suitable of three available softint levels */
753 if (level >= 1 && level < 4) {
754 pil = 1;
755 pilreq = IE_L1;
756 } else if (level >= 4 && level < 6) {
757 pil = 4;
758 pilreq = IE_L4;
759 } else {
760 pil = 6;
761 pilreq = IE_L6;
762 }
763 }
764
765 sic = malloc(sizeof(*sic), M_DEVBUF, 0);
766 sic->sic_pil = pil;
767 sic->sic_pilreq = pilreq;
768 ih = &sic->sic_hand;
769 #ifdef MULTIPROCESSOR
770 if (!mpsafe) {
771 ih->ih_realfun = (int (*)(void *))fun;
772 ih->ih_realarg = arg;
773 ih->ih_fun = intr_biglock_wrapper;
774 ih->ih_arg = ih;
775 } else
776 #endif /* MULTIPROCESSOR */
777 {
778 ih->ih_fun = (int (*)(void *))fun;
779 ih->ih_arg = arg;
780 }
781
782 /*
783 * Always run the handler at the requested level, which might
784 * be higher than the hardware can provide.
785 *
786 * pre-shift to PIL field in %psr
787 */
788 ih->ih_classipl = (level << 8) & PSR_PIL;
789
790 if (fastvec & (1 << pil)) {
791 printf("softintr_establish: untie fast vector at level %d\n",
792 pil);
793 uninst_fasttrap(level);
794 }
795
796 ih_insert(&sintrhand[pil], ih);
797 return (void *)sic;
798 }
799
800 /*
801 * softintr_disestablish(): MI interface. disestablish the specified
802 * software interrupt.
803 */
804 void
sparc_softintr_disestablish(void * cookie)805 sparc_softintr_disestablish(void *cookie)
806 {
807 struct softintr_cookie *sic = cookie;
808
809 ih_remove(&sintrhand[sic->sic_pil], &sic->sic_hand);
810 free(cookie, M_DEVBUF);
811 }
812
813 #if 0
814 void
815 sparc_softintr_schedule(void *cookie)
816 {
817 struct softintr_cookie *sic = cookie;
818 if (CPU_ISSUN4M || CPU_ISSUN4D) {
819 #if defined(SUN4M) || defined(SUN4D)
820 extern void raise(int,int);
821 raise(0, sic->sic_pilreq);
822 #endif
823 } else {
824 #if defined(SUN4) || defined(SUN4C)
825 ienab_bis(sic->sic_pilreq);
826 #endif
827 }
828 }
829 #endif
830
831 #ifdef MULTIPROCESSOR
832
833 /*
834 * intr_biglock_wrapper: grab biglock and call a real interrupt handler.
835 */
836
837 static int
intr_biglock_wrapper(void * vp)838 intr_biglock_wrapper(void *vp)
839 {
840 struct intrhand *ih = vp;
841 int ret;
842
843 KERNEL_LOCK(1, NULL);
844
845 ret = (*ih->ih_realfun)(ih->ih_realarg);
846
847 KERNEL_UNLOCK_ONE(NULL);
848
849 return ret;
850 }
851 #endif /* MULTIPROCESSOR */
852
853 bool
cpu_intr_p(void)854 cpu_intr_p(void)
855 {
856 int idepth;
857
858 kpreempt_disable();
859 idepth = curcpu()->ci_idepth;
860 kpreempt_enable();
861
862 return idepth != 0;
863 }
864