1 /* $OpenBSD: interrupt.c,v 1.20 2024/09/04 07:54:52 mglocker Exp $ */
2 /* $NetBSD: interrupt.c,v 1.18 2006/01/25 00:02:57 uwe Exp $ */
3
4 /*-
5 * Copyright (c) 2002 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by UCHIYAMA Yasushi.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36
37 #include <uvm/uvm_extern.h> /* uvmexp.intrs */
38
39 #include <sh/clock.h>
40 #include <sh/trap.h>
41 #include <sh/intcreg.h>
42 #include <sh/tmureg.h>
43 #include <machine/atomic.h>
44 #include <machine/intr.h>
45
46 void intc_intr_priority(int, int);
47 struct intc_intrhand *intc_alloc_ih(void);
48 void intc_free_ih(struct intc_intrhand *);
49 int intc_unknown_intr(void *);
50
51 #ifdef SH4
52 void intpri_intr_enable(int);
53 void intpri_intr_disable(int);
54 #endif
55
56 void tmu1_oneshot(void);
57 int tmu1_intr(void *);
58 void setsoft(int);
59
60 /*
61 * EVTCODE to intc_intrhand mapper.
62 * max #76 is SH4_INTEVT_TMU4 (0xb80)
63 */
64 int8_t __intc_evtcode_to_ih[128];
65
66 struct intc_intrhand __intc_intrhand[_INTR_N + 1] = {
67 /* Place holder interrupt handler for unregistered interrupt. */
68 [0] = { .ih_func = intc_unknown_intr, .ih_level = 0xf0 }
69 };
70
71 struct sh_soft_intr sh_soft_intrs[_IPL_NSOFT];
72
73 /*
74 * SH INTC support.
75 */
76 void
intc_init(void)77 intc_init(void)
78 {
79
80 switch (cpu_product) {
81 #ifdef SH3
82 case CPU_PRODUCT_7709:
83 case CPU_PRODUCT_7709A:
84 _reg_write_2(SH7709_IPRC, 0);
85 _reg_write_2(SH7709_IPRD, 0);
86 _reg_write_2(SH7709_IPRE, 0);
87 /* FALLTHROUGH */
88 case CPU_PRODUCT_7708:
89 case CPU_PRODUCT_7708S:
90 case CPU_PRODUCT_7708R:
91 _reg_write_2(SH3_IPRA, 0);
92 _reg_write_2(SH3_IPRB, 0);
93 break;
94 #endif /* SH3 */
95
96 #ifdef SH4
97 case CPU_PRODUCT_7751:
98 case CPU_PRODUCT_7751R:
99 _reg_write_4(SH4_INTPRI00, 0);
100 _reg_write_4(SH4_INTMSK00, INTMSK00_MASK_ALL);
101 /* FALLTHROUGH */
102 case CPU_PRODUCT_7750S:
103 case CPU_PRODUCT_7750R:
104 _reg_write_2(SH4_IPRD, 0);
105 /* FALLTHROUGH */
106 case CPU_PRODUCT_7750:
107 _reg_write_2(SH4_IPRA, 0);
108 _reg_write_2(SH4_IPRB, 0);
109 _reg_write_2(SH4_IPRC, 0);
110 break;
111 #endif /* SH4 */
112 }
113 }
114
115 void *
intc_intr_establish(int evtcode,int trigger,int level,int (* ih_func)(void *),void * ih_arg,const char * name)116 intc_intr_establish(int evtcode, int trigger, int level,
117 int (*ih_func)(void *), void *ih_arg, const char *name)
118 {
119 struct intc_intrhand *ih;
120
121 KDASSERT(evtcode >= 0x200 && level > 0);
122
123 ih = intc_alloc_ih();
124 ih->ih_func = ih_func;
125 ih->ih_arg = ih_arg;
126 ih->ih_level = level << 4; /* convert to SR.IMASK format. */
127 ih->ih_evtcode = evtcode;
128 ih->ih_irq = evtcode >> 5;
129 ih->ih_name = name;
130 if (name)
131 evcount_attach(&ih->ih_count, name, &ih->ih_irq);
132
133 /* Map interrupt handler */
134 EVTCODE_TO_IH_INDEX(evtcode) = ih->ih_idx;
135
136 /* Priority */
137 intc_intr_priority(evtcode, level);
138
139 /* Sense select (SH7709, SH7709A only) XXX notyet */
140
141 return (ih);
142 }
143
144 void
intc_intr_disestablish(void * arg)145 intc_intr_disestablish(void *arg)
146 {
147 struct intc_intrhand *ih = arg;
148 int evtcode = ih->ih_evtcode;
149
150 /* Mask interrupt if IPR can manage it. if not, cascaded ICU will do */
151 intc_intr_priority(evtcode, 0);
152
153 /* Unmap interrupt handler */
154 EVTCODE_TO_IH_INDEX(evtcode) = 0;
155
156 if (ih->ih_name)
157 evcount_detach(&ih->ih_count);
158 intc_free_ih(ih);
159 }
160
161 void
intc_intr_disable(int evtcode)162 intc_intr_disable(int evtcode)
163 {
164 int s;
165
166 s = _cpu_intr_suspend();
167 KASSERT(EVTCODE_TO_IH_INDEX(evtcode) != 0); /* there is a handler */
168 switch (evtcode) {
169 default:
170 intc_intr_priority(evtcode, 0);
171 break;
172
173 #ifdef SH4
174 case SH4_INTEVT_PCISERR:
175 case SH4_INTEVT_PCIDMA3:
176 case SH4_INTEVT_PCIDMA2:
177 case SH4_INTEVT_PCIDMA1:
178 case SH4_INTEVT_PCIDMA0:
179 case SH4_INTEVT_PCIPWON:
180 case SH4_INTEVT_PCIPWDWN:
181 case SH4_INTEVT_PCIERR:
182 intpri_intr_disable(evtcode);
183 break;
184 #endif
185 }
186 _cpu_intr_resume(s);
187 }
188
189 void
intc_intr_enable(int evtcode)190 intc_intr_enable(int evtcode)
191 {
192 struct intc_intrhand *ih;
193 int s;
194
195 s = _cpu_intr_suspend();
196 KASSERT(EVTCODE_TO_IH_INDEX(evtcode) != 0); /* there is a handler */
197 switch (evtcode) {
198 default:
199 ih = EVTCODE_IH(evtcode);
200 /* ih_level is in the SR.IMASK format */
201 intc_intr_priority(evtcode, (ih->ih_level >> 4));
202 break;
203
204 #ifdef SH4
205 case SH4_INTEVT_PCISERR:
206 case SH4_INTEVT_PCIDMA3:
207 case SH4_INTEVT_PCIDMA2:
208 case SH4_INTEVT_PCIDMA1:
209 case SH4_INTEVT_PCIDMA0:
210 case SH4_INTEVT_PCIPWON:
211 case SH4_INTEVT_PCIPWDWN:
212 case SH4_INTEVT_PCIERR:
213 intpri_intr_enable(evtcode);
214 break;
215 #endif
216 }
217 _cpu_intr_resume(s);
218 }
219
220
221 /*
222 * int intc_intr_priority(int evtcode, int level)
223 * Setup interrupt priority register.
224 * SH7708, SH7708S, SH7708R, SH7750, SH7750S ... evtcode is INTEVT
225 * SH7709, SH7709A ... evtcode is INTEVT2
226 */
227 void
intc_intr_priority(int evtcode,int level)228 intc_intr_priority(int evtcode, int level)
229 {
230 volatile uint16_t *iprreg;
231 int pos;
232 uint16_t r;
233
234 #define __SH_IPR(_sh, _ipr, _pos) \
235 do { \
236 iprreg = (volatile uint16_t *)(SH ## _sh ## _IPR ## _ipr); \
237 pos = (_pos); \
238 } while (/*CONSTCOND*/0)
239
240 #define SH3_IPR(_ipr, _pos) __SH_IPR(3, _ipr, _pos)
241 #define SH4_IPR(_ipr, _pos) __SH_IPR(4, _ipr, _pos)
242 #define SH7709_IPR(_ipr, _pos) __SH_IPR(7709, _ipr, _pos)
243
244 #define SH_IPR(_ipr, _pos) \
245 do { \
246 if (CPU_IS_SH3) \
247 SH3_IPR(_ipr, _pos); \
248 else \
249 SH4_IPR(_ipr, _pos); \
250 } while (/*CONSTCOND*/0)
251
252 iprreg = 0;
253 pos = -1;
254
255 switch (evtcode) {
256 case SH_INTEVT_TMU0_TUNI0:
257 SH_IPR(A, 12);
258 break;
259 case SH_INTEVT_TMU1_TUNI1:
260 SH_IPR(A, 8);
261 break;
262 case SH_INTEVT_TMU2_TUNI2:
263 SH_IPR(A, 4);
264 break;
265 case SH_INTEVT_WDT_ITI:
266 SH_IPR(B, 12);
267 break;
268 case SH_INTEVT_SCI_ERI:
269 case SH_INTEVT_SCI_RXI:
270 case SH_INTEVT_SCI_TXI:
271 case SH_INTEVT_SCI_TEI:
272 SH_IPR(B, 4);
273 break;
274 }
275
276 #ifdef SH3
277 if (CPU_IS_SH3) {
278 switch (evtcode) {
279 case SH7709_INTEVT2_IRQ3:
280 SH7709_IPR(C, 12);
281 break;
282 case SH7709_INTEVT2_IRQ2:
283 SH7709_IPR(C, 8);
284 break;
285 case SH7709_INTEVT2_IRQ1:
286 SH7709_IPR(C, 4);
287 break;
288 case SH7709_INTEVT2_IRQ0:
289 SH7709_IPR(C, 0);
290 break;
291 case SH7709_INTEVT2_PINT07:
292 SH7709_IPR(D, 12);
293 break;
294 case SH7709_INTEVT2_PINT8F:
295 SH7709_IPR(D, 8);
296 break;
297 case SH7709_INTEVT2_IRQ5:
298 SH7709_IPR(D, 4);
299 break;
300 case SH7709_INTEVT2_IRQ4:
301 SH7709_IPR(D, 0);
302 break;
303 case SH7709_INTEVT2_DEI0:
304 case SH7709_INTEVT2_DEI1:
305 case SH7709_INTEVT2_DEI2:
306 case SH7709_INTEVT2_DEI3:
307 SH7709_IPR(E, 12);
308 break;
309 case SH7709_INTEVT2_IRDA_ERI:
310 case SH7709_INTEVT2_IRDA_RXI:
311 case SH7709_INTEVT2_IRDA_BRI:
312 case SH7709_INTEVT2_IRDA_TXI:
313 SH7709_IPR(E, 8);
314 break;
315 case SH7709_INTEVT2_SCIF_ERI:
316 case SH7709_INTEVT2_SCIF_RXI:
317 case SH7709_INTEVT2_SCIF_BRI:
318 case SH7709_INTEVT2_SCIF_TXI:
319 SH7709_IPR(E, 4);
320 break;
321 case SH7709_INTEVT2_ADC:
322 SH7709_IPR(E, 0);
323 break;
324 }
325 }
326 #endif /* SH3 */
327
328 #ifdef SH4
329 if (CPU_IS_SH4) {
330 switch (evtcode) {
331 case SH4_INTEVT_SCIF_ERI:
332 case SH4_INTEVT_SCIF_RXI:
333 case SH4_INTEVT_SCIF_BRI:
334 case SH4_INTEVT_SCIF_TXI:
335 SH4_IPR(C, 4);
336 break;
337
338 #if 0
339 case SH4_INTEVT_PCISERR:
340 case SH4_INTEVT_PCIDMA3:
341 case SH4_INTEVT_PCIDMA2:
342 case SH4_INTEVT_PCIDMA1:
343 case SH4_INTEVT_PCIDMA0:
344 case SH4_INTEVT_PCIPWON:
345 case SH4_INTEVT_PCIPWDWN:
346 case SH4_INTEVT_PCIERR:
347 #endif
348 case SH4_INTEVT_TMU3:
349 case SH4_INTEVT_TMU4:
350 intpri_intr_priority(evtcode, level);
351 break;
352 }
353 }
354 #endif /* SH4 */
355
356 /*
357 * XXX: This function gets called even for interrupts that
358 * don't have their priority defined by IPR registers.
359 */
360 if (pos < 0)
361 return;
362
363 r = _reg_read_2(iprreg);
364 r = (r & ~(0xf << (pos))) | (level << (pos));
365 _reg_write_2(iprreg, r);
366 }
367
368 /*
369 * Interrupt handler holder allocator.
370 */
371 struct intc_intrhand *
intc_alloc_ih(void)372 intc_alloc_ih(void)
373 {
374 /* #0 is reserved for unregistered interrupt. */
375 struct intc_intrhand *ih = &__intc_intrhand[1];
376 int i;
377
378 for (i = 1; i <= _INTR_N; i++, ih++)
379 if (ih->ih_idx == 0) { /* no driver uses this. */
380 ih->ih_idx = i; /* register myself */
381 return (ih);
382 }
383
384 panic("increase _INTR_N greater than %d", _INTR_N);
385 return (NULL);
386 }
387
388 void
intc_free_ih(struct intc_intrhand * ih)389 intc_free_ih(struct intc_intrhand *ih)
390 {
391 ih->ih_idx = 0;
392 memset(ih, 0, sizeof(*ih));
393 }
394
395 /* Place-holder for debugging */
396 int
intc_unknown_intr(void * arg)397 intc_unknown_intr(void *arg)
398 {
399 printf("INTEVT=0x%x", _reg_read_4(SH_(INTEVT)));
400 if (cpu_product == CPU_PRODUCT_7709 || cpu_product == CPU_PRODUCT_7709A)
401 printf(" INTEVT2=0x%x", _reg_read_4(SH7709_INTEVT2));
402 printf("\n");
403
404 panic("unknown interrupt");
405 /* NOTREACHED */
406 return (0);
407 }
408
409 #ifdef SH4 /* SH7751 support */
410
411 /*
412 * INTPRIxx
413 */
414 void
intpri_intr_priority(int evtcode,int level)415 intpri_intr_priority(int evtcode, int level)
416 {
417 volatile uint32_t *iprreg;
418 uint32_t r;
419 int pos;
420
421 if (!CPU_IS_SH4)
422 return;
423
424 switch (cpu_product) {
425 default:
426 return;
427
428 case CPU_PRODUCT_7751:
429 case CPU_PRODUCT_7751R:
430 break;
431 }
432
433 iprreg = (volatile uint32_t *)SH4_INTPRI00;
434 pos = -1;
435
436 switch (evtcode) {
437 case SH4_INTEVT_PCIDMA3:
438 case SH4_INTEVT_PCIDMA2:
439 case SH4_INTEVT_PCIDMA1:
440 case SH4_INTEVT_PCIDMA0:
441 case SH4_INTEVT_PCIPWDWN:
442 case SH4_INTEVT_PCIPWON:
443 case SH4_INTEVT_PCIERR:
444 pos = 0;
445 break;
446
447 case SH4_INTEVT_PCISERR:
448 pos = 4;
449 break;
450
451 case SH4_INTEVT_TMU3:
452 pos = 8;
453 break;
454
455 case SH4_INTEVT_TMU4:
456 pos = 12;
457 break;
458 }
459
460 if (pos < 0) {
461 return;
462 }
463
464 r = _reg_read_4(iprreg);
465 r = (r & ~(0xf << pos)) | (level << pos);
466 _reg_write_4(iprreg, r);
467 }
468
469 void
intpri_intr_enable(int evtcode)470 intpri_intr_enable(int evtcode)
471 {
472 volatile uint32_t *iprreg;
473 uint32_t bit;
474
475 if (!CPU_IS_SH4)
476 return;
477
478 switch (cpu_product) {
479 default:
480 return;
481
482 case CPU_PRODUCT_7751:
483 case CPU_PRODUCT_7751R:
484 break;
485 }
486
487 iprreg = (volatile uint32_t *)SH4_INTMSKCLR00;
488 bit = 0;
489
490 switch (evtcode) {
491 case SH4_INTEVT_PCISERR:
492 case SH4_INTEVT_PCIDMA3:
493 case SH4_INTEVT_PCIDMA2:
494 case SH4_INTEVT_PCIDMA1:
495 case SH4_INTEVT_PCIDMA0:
496 case SH4_INTEVT_PCIPWON:
497 case SH4_INTEVT_PCIPWDWN:
498 case SH4_INTEVT_PCIERR:
499 bit = (1 << ((evtcode - SH4_INTEVT_PCISERR) >> 5));
500 break;
501
502 case SH4_INTEVT_TMU3:
503 bit = INTREQ00_TUNI3;
504 break;
505
506 case SH4_INTEVT_TMU4:
507 bit = INTREQ00_TUNI4;
508 break;
509 }
510
511 if ((bit == 0) || (iprreg == NULL)) {
512 return;
513 }
514
515 _reg_write_4(iprreg, bit);
516 }
517
518 void
intpri_intr_disable(int evtcode)519 intpri_intr_disable(int evtcode)
520 {
521 volatile uint32_t *iprreg;
522 uint32_t bit;
523
524 if (!CPU_IS_SH4)
525 return;
526
527 switch (cpu_product) {
528 default:
529 return;
530
531 case CPU_PRODUCT_7751:
532 case CPU_PRODUCT_7751R:
533 break;
534 }
535
536 iprreg = (volatile uint32_t *)SH4_INTMSK00;
537 bit = 0;
538
539 switch (evtcode) {
540 case SH4_INTEVT_PCISERR:
541 case SH4_INTEVT_PCIDMA3:
542 case SH4_INTEVT_PCIDMA2:
543 case SH4_INTEVT_PCIDMA1:
544 case SH4_INTEVT_PCIDMA0:
545 case SH4_INTEVT_PCIPWON:
546 case SH4_INTEVT_PCIPWDWN:
547 case SH4_INTEVT_PCIERR:
548 bit = (1 << ((evtcode - SH4_INTEVT_PCISERR) >> 5));
549 break;
550
551 case SH4_INTEVT_TMU3:
552 bit = INTREQ00_TUNI3;
553 break;
554
555 case SH4_INTEVT_TMU4:
556 bit = INTREQ00_TUNI4;
557 break;
558 }
559
560 if ((bit == 0) || (iprreg == NULL)) {
561 return;
562 }
563
564 _reg_write_4(iprreg, bit);
565 }
566 #endif /* SH4 */
567
568 /*
569 * Software interrupt support
570 */
571 void
softintr_init(void)572 softintr_init(void)
573 {
574 struct sh_soft_intr *asi;
575 int i;
576
577 for (i = 0; i < _IPL_NSOFT; i++) {
578 asi = &sh_soft_intrs[i];
579 TAILQ_INIT(&asi->softintr_q);
580 mtx_init(&asi->softintr_lock, IPL_HIGH);
581 asi->softintr_ipl = IPL_SOFT + i;
582 }
583
584 intc_intr_establish(SH_INTEVT_TMU1_TUNI1, IST_LEVEL, IPL_SOFTNET,
585 tmu1_intr, NULL, "tmu1");
586 }
587
588 void
softintr_dispatch(int ipl)589 softintr_dispatch(int ipl)
590 {
591 struct sh_soft_intr *asi;
592 struct sh_soft_intrhand *sih;
593
594 asi = &sh_soft_intrs[ipl - IPL_SOFT];
595
596 for (;;) {
597 mtx_enter(&asi->softintr_lock);
598 sih = TAILQ_FIRST(&asi->softintr_q);
599 if (sih == NULL) {
600 mtx_leave(&asi->softintr_lock);
601 break;
602 }
603 TAILQ_REMOVE(&asi->softintr_q, sih, sih_q);
604 sih->sih_pending = 0;
605
606 uvmexp.softs++;
607 mtx_leave(&asi->softintr_lock);
608
609 (*sih->sih_fn)(sih->sih_arg);
610 }
611 }
612
613 void
setsoft(int ipl)614 setsoft(int ipl)
615 {
616 tmu1_oneshot();
617 }
618
619 /* Register a software interrupt handler. */
620 void *
softintr_establish(int ipl,void (* func)(void *),void * arg)621 softintr_establish(int ipl, void (*func)(void *), void *arg)
622 {
623 struct sh_soft_intr *asi;
624 struct sh_soft_intrhand *sih;
625
626 if (__predict_false(ipl >= (IPL_SOFT + _IPL_NSOFT) ||
627 ipl < IPL_SOFT))
628 panic("softintr_establish");
629
630 sih = malloc(sizeof(*sih), M_DEVBUF, M_NOWAIT);
631
632 asi = &sh_soft_intrs[ipl - IPL_SOFT];
633 if (__predict_true(sih != NULL)) {
634 sih->sih_intrhead = asi;
635 sih->sih_fn = func;
636 sih->sih_arg = arg;
637 sih->sih_pending = 0;
638 }
639
640 return (sih);
641 }
642
643 void
intr_barrier(void * cookie)644 intr_barrier(void *cookie)
645 {
646 }
647
648 /* Unregister a software interrupt handler. */
649 void
softintr_disestablish(void * arg)650 softintr_disestablish(void *arg)
651 {
652 struct sh_soft_intrhand *sih = arg;
653 struct sh_soft_intr *asi = sih->sih_intrhead;
654
655 mtx_enter(&asi->softintr_lock);
656 if (sih->sih_pending) {
657 TAILQ_REMOVE(&asi->softintr_q, sih, sih_q);
658 sih->sih_pending = 0;
659 }
660 mtx_leave(&asi->softintr_lock);
661
662 free(sih, M_DEVBUF, sizeof *sih);
663 }
664
665 /* Schedule a software interrupt. */
666 void
softintr_schedule(void * arg)667 softintr_schedule(void *arg)
668 {
669 struct sh_soft_intrhand *sih = arg;
670 struct sh_soft_intr *si = sih->sih_intrhead;
671
672 mtx_enter(&si->softintr_lock);
673 if (sih->sih_pending == 0) {
674 TAILQ_INSERT_TAIL(&si->softintr_q, sih, sih_q);
675 sih->sih_pending = 1;
676 setsoft(si->softintr_ipl);
677 }
678 mtx_leave(&si->softintr_lock);
679 }
680
681 /*
682 * Software interrupt is simulated with TMU one-shot timer.
683 */
684 void
tmu1_oneshot(void)685 tmu1_oneshot(void)
686 {
687 _reg_bclr_1(SH_(TSTR), TSTR_STR1);
688 _reg_write_4(SH_(TCNT1), 0);
689 _reg_bset_1(SH_(TSTR), TSTR_STR1);
690 }
691
692 int
tmu1_intr(void * arg)693 tmu1_intr(void *arg)
694 {
695 _reg_bclr_1(SH_(TSTR), TSTR_STR1);
696 _reg_bclr_2(SH_(TCR1), TCR_UNF);
697
698 softintr_dispatch(IPL_SOFTSERIAL);
699 softintr_dispatch(IPL_SOFTNET);
700 softintr_dispatch(IPL_SOFTCLOCK);
701 softintr_dispatch(IPL_SOFT);
702
703 return (0);
704 }
705