1 /* $NetBSD: evtchn.c,v 1.100 2022/09/07 00:40:19 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2006 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 */
27
28 /*
29 *
30 * Copyright (c) 2004 Christian Limpach.
31 * Copyright (c) 2004, K A Fraser.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
44 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
46 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
48 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
52 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 */
54
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.100 2022/09/07 00:40:19 knakahara Exp $");
58
59 #include "opt_xen.h"
60 #include "isa.h"
61 #include "pci.h"
62
63 #include <sys/param.h>
64 #include <sys/cpu.h>
65 #include <sys/kernel.h>
66 #include <sys/systm.h>
67 #include <sys/device.h>
68 #include <sys/proc.h>
69 #include <sys/kmem.h>
70 #include <sys/reboot.h>
71 #include <sys/mutex.h>
72 #include <sys/interrupt.h>
73 #include <sys/xcall.h>
74
75 #include <uvm/uvm.h>
76
77 #include <xen/intr.h>
78
79 #include <xen/xen.h>
80 #include <xen/hypervisor.h>
81 #include <xen/evtchn.h>
82 #include <xen/xenfunc.h>
83
84 /* maximum number of (v)CPUs supported */
85 #ifdef XENPV
86 #define NBSD_XEN_MAX_VCPUS XEN_LEGACY_MAX_VCPUS
87 #else
88 #include <xen/include/public/hvm/hvm_info_table.h>
89 #define NBSD_XEN_MAX_VCPUS HVM_MAX_VCPUS
90 #endif
91
92 #define NR_PIRQS NR_EVENT_CHANNELS
93
94 /*
95 * This lock protects updates to the following mapping and reference-count
96 * arrays. The lock does not need to be acquired to read the mapping tables.
97 */
98 static kmutex_t evtchn_lock;
99
100 /* event handlers */
101 struct evtsource *evtsource[NR_EVENT_CHANNELS];
102
103 /* Reference counts for bindings to event channels XXX: redo for SMP */
104 static uint8_t evtch_bindcount[NR_EVENT_CHANNELS];
105
106 /* event-channel <-> VCPU mapping for IPIs. XXX: redo for SMP. */
107 static evtchn_port_t vcpu_ipi_to_evtch[NBSD_XEN_MAX_VCPUS];
108
109 /* event-channel <-> VCPU mapping for VIRQ_TIMER. XXX: redo for SMP. */
110 static int virq_timer_to_evtch[NBSD_XEN_MAX_VCPUS];
111
112 /* event-channel <-> VIRQ mapping. */
113 static int virq_to_evtch[NR_VIRQS];
114
115
116 #if defined(XENPV) && (NPCI > 0 || NISA > 0)
117 /* event-channel <-> PIRQ mapping */
118 static int pirq_to_evtch[NR_PIRQS];
119 /* PIRQ needing notify */
120 static int evtch_to_pirq_eoi[NR_EVENT_CHANNELS];
121 int pirq_interrupt(void *);
122 #endif /* defined(XENPV) && (NPCI > 0 || NISA > 0) */
123
124 static void xen_evtchn_mask(struct pic *, int);
125 static void xen_evtchn_unmask(struct pic *, int);
126 static void xen_evtchn_addroute(struct pic *, struct cpu_info *, int, int, int);
127 static void xen_evtchn_delroute(struct pic *, struct cpu_info *, int, int, int);
128 static bool xen_evtchn_trymask(struct pic *, int);
129 static void xen_intr_get_devname(const char *, char *, size_t);
130 static void xen_intr_get_assigned(const char *, kcpuset_t *);
131 static uint64_t xen_intr_get_count(const char *, u_int);
132
133 struct pic xen_pic = {
134 .pic_name = "xenev0",
135 .pic_type = PIC_XEN,
136 .pic_vecbase = 0,
137 .pic_apicid = 0,
138 .pic_lock = __SIMPLELOCK_UNLOCKED,
139 .pic_hwmask = xen_evtchn_mask,
140 .pic_hwunmask = xen_evtchn_unmask,
141 .pic_addroute = xen_evtchn_addroute,
142 .pic_delroute = xen_evtchn_delroute,
143 .pic_trymask = xen_evtchn_trymask,
144 .pic_level_stubs = xenev_stubs,
145 .pic_edge_stubs = xenev_stubs,
146 .pic_intr_get_devname = xen_intr_get_devname,
147 .pic_intr_get_assigned = xen_intr_get_assigned,
148 .pic_intr_get_count = xen_intr_get_count,
149 };
150
151 /*
152 * We try to stick to the traditional x86 PIC semantics wrt Xen
153 * events.
154 *
155 * PIC pins exist in a global namespace which may be hierarchical, and
156 * are mapped to a cpu bus concept called 'IRQ' numbers, which are
157 * also global, but linear. Thus a PIC, pin tuple will always map to
158 * an IRQ number. These tuples can alias to the same IRQ number, thus
159 * causing IRQ "sharing". IRQ numbers can be bound to specific CPUs,
160 * and to specific callback vector indices on the CPU called idt_vec,
161 * which are aliases to handlers meant to run on destination
162 * CPUs. This binding can also happen at interrupt time and resolved
163 * 'round-robin' between all CPUs, depending on the lapic setup. In
164 * this case, all CPUs need to have identical idt_vec->handler
165 * mappings.
166 *
167 * The job of pic_addroute() is to setup the 'wiring' between the
168 * source pin, and the destination CPU handler, ideally on a specific
169 * CPU in MP systems (or 'round-robin').
170 *
171 * On Xen, a global namespace of 'events' exist, which are initially
172 * bound to nothing. This is similar to the relationship between
173 * realworld realworld IRQ numbers wrt PIC pins, since before routing,
174 * IRQ numbers by themselves have no causal connection setup with the
175 * real world. (Except for the hardwired cases on the PC Architecture,
176 * which we ignore for the purpose of this description). However the
177 * really important routing is from pin to idt_vec. On PIC_XEN, all
178 * three (pic, irq, idt_vec) belong to the same namespace and are
179 * identical. Further, the mapping between idt_vec and the actual
180 * callback handler is setup via calls to the evtchn.h api - this
181 * last bit is analogous to x86/idt.c:idt_vec_set() on real h/w
182 *
183 * For now we handle two cases:
184 * - IPC style events - eg: timer, PV devices, etc.
185 * - dom0 physical irq bound events.
186 *
187 * In the case of IPC style events, we currently externalise the
188 * event binding by using evtchn.h functions. From the POV of
189 * PIC_XEN , 'pin' , 'irq' and 'idt_vec' are all identical to the
190 * port number of the event.
191 *
192 * In the case of dom0 physical irq bound events, we currently
193 * event binding by exporting evtchn.h functions. From the POV of
194 * PIC_LAPIC/PIC_IOAPIC, the 'pin' is the hardware pin, the 'irq' is
195 * the x86 global irq number - the port number is extracted out of a
196 * global array (this is currently kludgy and breaks API abstraction)
197 * and the binding happens during pic_addroute() of the ioapic.
198 *
199 * Later when we integrate more tightly with x86/intr.c, we will be
200 * able to conform better to (PIC_LAPIC/PIC_IOAPIC)->PIC_XEN
201 * cascading model.
202 */
203
204 int debug_port = -1;
205
206 /* #define IRQ_DEBUG 4 */
207
208 /* http://mail-index.netbsd.org/port-amd64/2004/02/22/0000.html */
209 #ifdef MULTIPROCESSOR
210
211 /*
212 * intr_biglock_wrapper: grab biglock and call a real interrupt handler.
213 */
214
215 int
xen_intr_biglock_wrapper(void * vp)216 xen_intr_biglock_wrapper(void *vp)
217 {
218 struct intrhand *ih = vp;
219 int ret;
220
221 KERNEL_LOCK(1, NULL);
222
223 ret = (*ih->ih_realfun)(ih->ih_realarg);
224
225 KERNEL_UNLOCK_ONE(NULL);
226
227 return ret;
228 }
229 #endif /* MULTIPROCESSOR */
230
231 void
events_default_setup(void)232 events_default_setup(void)
233 {
234 int i;
235
236 /* No VCPU -> event mappings. */
237 for (i = 0; i < NBSD_XEN_MAX_VCPUS; i++)
238 vcpu_ipi_to_evtch[i] = -1;
239
240 /* No VIRQ_TIMER -> event mappings. */
241 for (i = 0; i < NBSD_XEN_MAX_VCPUS; i++)
242 virq_timer_to_evtch[i] = -1;
243
244 /* No VIRQ -> event mappings. */
245 for (i = 0; i < NR_VIRQS; i++)
246 virq_to_evtch[i] = -1;
247
248 #if defined(XENPV) && (NPCI > 0 || NISA > 0)
249 /* No PIRQ -> event mappings. */
250 for (i = 0; i < NR_PIRQS; i++)
251 pirq_to_evtch[i] = -1;
252 for (i = 0; i < NR_EVENT_CHANNELS; i++)
253 evtch_to_pirq_eoi[i] = -1;
254 #endif /* defined(XENPV) && (NPCI > 0 || NISA > 0) */
255
256 /* No event-channel are 'live' right now. */
257 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
258 evtsource[i] = NULL;
259 evtch_bindcount[i] = 0;
260 hypervisor_mask_event(i);
261 }
262
263 }
264
265 void
events_init(void)266 events_init(void)
267 {
268 mutex_init(&evtchn_lock, MUTEX_DEFAULT, IPL_NONE);
269
270 (void)events_resume();
271 }
272
273 bool
events_resume(void)274 events_resume(void)
275 {
276 debug_port = bind_virq_to_evtch(VIRQ_DEBUG);
277
278 KASSERT(debug_port != -1);
279
280 aprint_verbose("VIRQ_DEBUG interrupt using event channel %d\n",
281 debug_port);
282 /*
283 * Don't call event_set_handler(), we'll use a shortcut. Just set
284 * evtsource[] to a non-NULL value so that evtchn_do_event will
285 * be called.
286 */
287 evtsource[debug_port] = (void *)-1;
288 xen_atomic_set_bit(&curcpu()->ci_evtmask[0], debug_port);
289 hypervisor_unmask_event(debug_port);
290 x86_enable_intr(); /* at long last... */
291
292 return true;
293 }
294
295 bool
events_suspend(void)296 events_suspend(void)
297 {
298 int evtch;
299
300 x86_disable_intr();
301
302 /* VIRQ_DEBUG is the last interrupt to remove */
303 evtch = unbind_virq_from_evtch(VIRQ_DEBUG);
304
305 KASSERT(evtch != -1);
306
307 hypervisor_mask_event(evtch);
308 /* Remove the non-NULL value set in events_init() */
309 evtsource[evtch] = NULL;
310 aprint_verbose("VIRQ_DEBUG interrupt disabled, "
311 "event channel %d removed\n", evtch);
312
313 return true;
314 }
315
316 unsigned int
evtchn_do_event(int evtch,struct intrframe * regs)317 evtchn_do_event(int evtch, struct intrframe *regs)
318 {
319 struct cpu_info *ci;
320 int ilevel;
321 struct intrhand *ih;
322 int (*ih_fun)(void *, void *);
323 uint64_t iplmask;
324
325 KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch);
326 KASSERTMSG(evtch < NR_EVENT_CHANNELS,
327 "evtch number %d > NR_EVENT_CHANNELS", evtch);
328
329 #ifdef IRQ_DEBUG
330 if (evtch == IRQ_DEBUG)
331 printf("evtchn_do_event: evtch %d\n", evtch);
332 #endif
333 ci = curcpu();
334
335 /*
336 * Shortcut for the debug handler, we want it to always run,
337 * regardless of the IPL level.
338 */
339 if (__predict_false(evtch == debug_port)) {
340 xen_debug_handler(NULL);
341 hypervisor_unmask_event(debug_port);
342 return 0;
343 }
344
345 KASSERTMSG(evtsource[evtch] != NULL, "unknown event %d", evtch);
346
347 if (evtsource[evtch]->ev_cpu != ci)
348 return 0;
349
350 ci->ci_data.cpu_nintr++;
351 evtsource[evtch]->ev_evcnt.ev_count++;
352 ilevel = ci->ci_ilevel;
353
354 if (evtsource[evtch]->ev_maxlevel <= ilevel) {
355 #ifdef IRQ_DEBUG
356 if (evtch == IRQ_DEBUG)
357 printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n",
358 evtch, evtsource[evtch]->ev_maxlevel, ilevel);
359 #endif
360 hypervisor_set_ipending(evtsource[evtch]->ev_imask,
361 evtch >> LONG_SHIFT,
362 evtch & LONG_MASK);
363 ih = evtsource[evtch]->ev_handlers;
364 while (ih != NULL) {
365 ih->ih_pending++;
366 ih = ih->ih_evt_next;
367 }
368
369 /* leave masked */
370
371 return 0;
372 }
373 ci->ci_ilevel = evtsource[evtch]->ev_maxlevel;
374 iplmask = evtsource[evtch]->ev_imask;
375 KASSERT(ci->ci_ilevel >= IPL_VM);
376 KASSERT(cpu_intr_p());
377 x86_enable_intr();
378 ih = evtsource[evtch]->ev_handlers;
379 while (ih != NULL) {
380 KASSERT(ih->ih_cpu == ci);
381 #if 0
382 if (ih->ih_cpu != ci) {
383 hypervisor_send_event(ih->ih_cpu, evtch);
384 iplmask &= ~(1ULL << XEN_IPL2SIR(ih->ih_level));
385 ih = ih->ih_evt_next;
386 continue;
387 }
388 #endif
389 if (ih->ih_level <= ilevel) {
390 #ifdef IRQ_DEBUG
391 if (evtch == IRQ_DEBUG)
392 printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel);
393 #endif
394 x86_disable_intr();
395 hypervisor_set_ipending(iplmask,
396 evtch >> LONG_SHIFT, evtch & LONG_MASK);
397 /* leave masked */
398 while (ih != NULL) {
399 ih->ih_pending++;
400 ih = ih->ih_evt_next;
401 }
402 goto splx;
403 }
404 iplmask &= ~(1ULL << XEN_IPL2SIR(ih->ih_level));
405 ci->ci_ilevel = ih->ih_level;
406 ih->ih_pending = 0;
407 ih_fun = (void *)ih->ih_fun;
408 ih_fun(ih->ih_arg, regs);
409 ih = ih->ih_evt_next;
410 }
411 x86_disable_intr();
412 hypervisor_unmask_event(evtch);
413 #if defined(XENPV) && (NPCI > 0 || NISA > 0)
414 hypervisor_ack_pirq_event(evtch);
415 #endif /* defined(XENPV) && (NPCI > 0 || NISA > 0) */
416
417 splx:
418 ci->ci_ilevel = ilevel;
419 return 0;
420 }
421
422 #define PRIuCPUID "lu" /* XXX: move this somewhere more appropriate */
423
424 /* PIC callbacks */
425 /* pic "pin"s are conceptually mapped to event port numbers */
426 static void
xen_evtchn_mask(struct pic * pic,int pin)427 xen_evtchn_mask(struct pic *pic, int pin)
428 {
429 evtchn_port_t evtchn = pin;
430
431 KASSERT(pic->pic_type == PIC_XEN);
432 KASSERT(evtchn < NR_EVENT_CHANNELS);
433
434 hypervisor_mask_event(evtchn);
435 }
436
437 static void
xen_evtchn_unmask(struct pic * pic,int pin)438 xen_evtchn_unmask(struct pic *pic, int pin)
439 {
440 evtchn_port_t evtchn = pin;
441
442 KASSERT(pic->pic_type == PIC_XEN);
443 KASSERT(evtchn < NR_EVENT_CHANNELS);
444
445 hypervisor_unmask_event(evtchn);
446
447 }
448
449
450 static void
xen_evtchn_addroute(struct pic * pic,struct cpu_info * ci,int pin,int idt_vec,int type)451 xen_evtchn_addroute(struct pic *pic, struct cpu_info *ci, int pin, int idt_vec, int type)
452 {
453
454 evtchn_port_t evtchn = pin;
455
456 /* Events are simulated as level triggered interrupts */
457 KASSERT(type == IST_LEVEL);
458
459 KASSERT(evtchn < NR_EVENT_CHANNELS);
460 #if notyet
461 evtchn_port_t boundport = idt_vec;
462 #endif
463
464 KASSERT(pic->pic_type == PIC_XEN);
465
466 xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn);
467
468 }
469
470 static void
xen_evtchn_delroute(struct pic * pic,struct cpu_info * ci,int pin,int idt_vec,int type)471 xen_evtchn_delroute(struct pic *pic, struct cpu_info *ci, int pin, int idt_vec, int type)
472 {
473 /*
474 * XXX: In the future, this is a great place to
475 * 'unbind' events to underlying events and cpus.
476 * For now, just disable interrupt servicing on this cpu for
477 * this pin aka cpu.
478 */
479 evtchn_port_t evtchn = pin;
480
481 /* Events are simulated as level triggered interrupts */
482 KASSERT(type == IST_LEVEL);
483
484 KASSERT(evtchn < NR_EVENT_CHANNELS);
485 #if notyet
486 evtchn_port_t boundport = idt_vec;
487 #endif
488
489 KASSERT(pic->pic_type == PIC_XEN);
490
491 xen_atomic_clear_bit(&ci->ci_evtmask[0], evtchn);
492 }
493
494 /*
495 * xen_evtchn_trymask(pic, pin)
496 *
497 * If there are interrupts pending on the bus-shared pic, return
498 * false. Otherwise, mask interrupts on the bus-shared pic and
499 * return true.
500 */
501 static bool
xen_evtchn_trymask(struct pic * pic,int pin)502 xen_evtchn_trymask(struct pic *pic, int pin)
503 {
504 volatile struct shared_info *s = HYPERVISOR_shared_info;
505 unsigned long masked __diagused;
506
507 /* Mask it. */
508 masked = xen_atomic_test_and_set_bit(&s->evtchn_mask[0], pin);
509
510 /*
511 * Caller is responsible for calling trymask only when the
512 * interrupt pin is not masked, and for serializing calls to
513 * trymask.
514 */
515 KASSERT(!masked);
516
517 /*
518 * Check whether there were any interrupts pending when we
519 * masked it. If there were, unmask and abort.
520 */
521 if (xen_atomic_test_bit(&s->evtchn_pending[0], pin)) {
522 xen_atomic_clear_bit(&s->evtchn_mask[0], pin);
523 return false;
524 }
525
526 /* Success: masked, not pending. */
527 return true;
528 }
529
530 evtchn_port_t
bind_vcpu_to_evtch(cpuid_t vcpu)531 bind_vcpu_to_evtch(cpuid_t vcpu)
532 {
533 evtchn_op_t op;
534 evtchn_port_t evtchn;
535
536 mutex_spin_enter(&evtchn_lock);
537
538 evtchn = vcpu_ipi_to_evtch[vcpu];
539 if (evtchn == -1) {
540 op.cmd = EVTCHNOP_bind_ipi;
541 op.u.bind_ipi.vcpu = (uint32_t) vcpu;
542 if (HYPERVISOR_event_channel_op(&op) != 0)
543 panic("Failed to bind ipi to VCPU %"PRIuCPUID"\n", vcpu);
544 evtchn = op.u.bind_ipi.port;
545
546 vcpu_ipi_to_evtch[vcpu] = evtchn;
547 }
548
549 evtch_bindcount[evtchn]++;
550
551 mutex_spin_exit(&evtchn_lock);
552
553 return evtchn;
554 }
555
556 int
bind_virq_to_evtch(int virq)557 bind_virq_to_evtch(int virq)
558 {
559 evtchn_op_t op;
560 int evtchn;
561
562 mutex_spin_enter(&evtchn_lock);
563
564 /*
565 * XXX: The only per-cpu VIRQ we currently use is VIRQ_TIMER.
566 * Please re-visit this implementation when others are used.
567 * Note: VIRQ_DEBUG is special-cased, and not used or bound on APs.
568 * XXX: event->virq/ipi can be unified in a linked-list
569 * implementation.
570 */
571 struct cpu_info *ci = curcpu();
572
573 if (virq == VIRQ_DEBUG && ci != &cpu_info_primary) {
574 mutex_spin_exit(&evtchn_lock);
575 return -1;
576 }
577
578 if (virq == VIRQ_TIMER) {
579 evtchn = virq_timer_to_evtch[ci->ci_vcpuid];
580 } else {
581 evtchn = virq_to_evtch[virq];
582 }
583
584 /* Allocate a channel if there is none already allocated */
585 if (evtchn == -1) {
586 op.cmd = EVTCHNOP_bind_virq;
587 op.u.bind_virq.virq = virq;
588 op.u.bind_virq.vcpu = ci->ci_vcpuid;
589 if (HYPERVISOR_event_channel_op(&op) != 0)
590 panic("Failed to bind virtual IRQ %d\n", virq);
591 evtchn = op.u.bind_virq.port;
592 }
593
594 /* Set event channel */
595 if (virq == VIRQ_TIMER) {
596 virq_timer_to_evtch[ci->ci_vcpuid] = evtchn;
597 } else {
598 virq_to_evtch[virq] = evtchn;
599 }
600
601 /* Increase ref counter */
602 evtch_bindcount[evtchn]++;
603
604 mutex_spin_exit(&evtchn_lock);
605
606 return evtchn;
607 }
608
609 int
unbind_virq_from_evtch(int virq)610 unbind_virq_from_evtch(int virq)
611 {
612 evtchn_op_t op;
613 int evtchn;
614
615 struct cpu_info *ci = curcpu();
616
617 if (virq == VIRQ_TIMER) {
618 evtchn = virq_timer_to_evtch[ci->ci_vcpuid];
619 }
620 else {
621 evtchn = virq_to_evtch[virq];
622 }
623
624 if (evtchn == -1) {
625 return -1;
626 }
627
628 mutex_spin_enter(&evtchn_lock);
629
630 evtch_bindcount[evtchn]--;
631 if (evtch_bindcount[evtchn] == 0) {
632 op.cmd = EVTCHNOP_close;
633 op.u.close.port = evtchn;
634 if (HYPERVISOR_event_channel_op(&op) != 0)
635 panic("Failed to unbind virtual IRQ %d\n", virq);
636
637 if (virq == VIRQ_TIMER) {
638 virq_timer_to_evtch[ci->ci_vcpuid] = -1;
639 } else {
640 virq_to_evtch[virq] = -1;
641 }
642 }
643
644 mutex_spin_exit(&evtchn_lock);
645
646 return evtchn;
647 }
648
649 #if defined(XENPV) && (NPCI > 0 || NISA > 0)
650 int
get_pirq_to_evtch(int pirq)651 get_pirq_to_evtch(int pirq)
652 {
653 int evtchn;
654
655 if (pirq == -1) /* Match previous behaviour */
656 return -1;
657
658 if (pirq >= NR_PIRQS) {
659 panic("pirq %d out of bound, increase NR_PIRQS", pirq);
660 }
661 mutex_spin_enter(&evtchn_lock);
662
663 evtchn = pirq_to_evtch[pirq];
664
665 mutex_spin_exit(&evtchn_lock);
666
667 return evtchn;
668 }
669
670 int
bind_pirq_to_evtch(int pirq)671 bind_pirq_to_evtch(int pirq)
672 {
673 evtchn_op_t op;
674 int evtchn;
675
676 if (pirq >= NR_PIRQS) {
677 panic("pirq %d out of bound, increase NR_PIRQS", pirq);
678 }
679
680 mutex_spin_enter(&evtchn_lock);
681
682 evtchn = pirq_to_evtch[pirq];
683 if (evtchn == -1) {
684 op.cmd = EVTCHNOP_bind_pirq;
685 op.u.bind_pirq.pirq = pirq;
686 op.u.bind_pirq.flags = BIND_PIRQ__WILL_SHARE;
687 if (HYPERVISOR_event_channel_op(&op) != 0)
688 panic("Failed to bind physical IRQ %d\n", pirq);
689 evtchn = op.u.bind_pirq.port;
690
691 #ifdef IRQ_DEBUG
692 printf("pirq %d evtchn %d\n", pirq, evtchn);
693 #endif
694 pirq_to_evtch[pirq] = evtchn;
695 }
696
697 evtch_bindcount[evtchn]++;
698
699 mutex_spin_exit(&evtchn_lock);
700
701 return evtchn;
702 }
703
704 int
unbind_pirq_from_evtch(int pirq)705 unbind_pirq_from_evtch(int pirq)
706 {
707 evtchn_op_t op;
708 int evtchn = pirq_to_evtch[pirq];
709
710 mutex_spin_enter(&evtchn_lock);
711
712 evtch_bindcount[evtchn]--;
713 if (evtch_bindcount[evtchn] == 0) {
714 op.cmd = EVTCHNOP_close;
715 op.u.close.port = evtchn;
716 if (HYPERVISOR_event_channel_op(&op) != 0)
717 panic("Failed to unbind physical IRQ %d\n", pirq);
718
719 pirq_to_evtch[pirq] = -1;
720 }
721
722 mutex_spin_exit(&evtchn_lock);
723
724 return evtchn;
725 }
726
727 struct pintrhand *
pirq_establish(int pirq,int evtch,int (* func)(void *),void * arg,int level,const char * intrname,const char * xname,bool known_mpsafe)728 pirq_establish(int pirq, int evtch, int (*func)(void *), void *arg, int level,
729 const char *intrname, const char *xname, bool known_mpsafe)
730 {
731 struct pintrhand *ih;
732
733 ih = kmem_zalloc(sizeof(struct pintrhand),
734 cold ? KM_NOSLEEP : KM_SLEEP);
735 if (ih == NULL) {
736 printf("pirq_establish: can't allocate handler info\n");
737 return NULL;
738 }
739
740 KASSERT(evtch > 0);
741
742 ih->pirq = pirq;
743 ih->evtch = evtch;
744 ih->func = func;
745 ih->arg = arg;
746
747 if (event_set_handler(evtch, pirq_interrupt, ih, level, intrname,
748 xname, known_mpsafe, NULL) == NULL) {
749 kmem_free(ih, sizeof(struct pintrhand));
750 return NULL;
751 }
752
753 hypervisor_prime_pirq_event(pirq, evtch);
754 hypervisor_unmask_event(evtch);
755 hypervisor_ack_pirq_event(evtch);
756 return ih;
757 }
758
759 void
pirq_disestablish(struct pintrhand * ih)760 pirq_disestablish(struct pintrhand *ih)
761 {
762 int error = event_remove_handler(ih->evtch, pirq_interrupt, ih);
763 if (error) {
764 printf("pirq_disestablish(%p): %d\n", ih, error);
765 return;
766 }
767 kmem_free(ih, sizeof(struct pintrhand));
768 }
769
770 int
pirq_interrupt(void * arg)771 pirq_interrupt(void *arg)
772 {
773 struct pintrhand *ih = arg;
774 int ret;
775
776 ret = ih->func(ih->arg);
777 #ifdef IRQ_DEBUG
778 if (ih->evtch == IRQ_DEBUG)
779 printf("pirq_interrupt irq %d ret %d\n", ih->pirq, ret);
780 #endif
781 return ret;
782 }
783
784 #endif /* defined(XENPV) && (NPCI > 0 || NISA > 0) */
785
786
787 /*
788 * Recalculate the interrupt from scratch for an event source.
789 */
790 static void
intr_calculatemasks(struct evtsource * evts,int evtch,struct cpu_info * ci)791 intr_calculatemasks(struct evtsource *evts, int evtch, struct cpu_info *ci)
792 {
793 struct intrhand *ih;
794 int cpu_receive = 0;
795
796 evts->ev_maxlevel = IPL_NONE;
797 evts->ev_imask = 0;
798 for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) {
799 KASSERT(ih->ih_cpu == curcpu());
800 if (ih->ih_level > evts->ev_maxlevel)
801 evts->ev_maxlevel = ih->ih_level;
802 evts->ev_imask |= (1 << XEN_IPL2SIR(ih->ih_level));
803 if (ih->ih_cpu == ci)
804 cpu_receive = 1;
805 }
806 if (cpu_receive)
807 xen_atomic_set_bit(&curcpu()->ci_evtmask[0], evtch);
808 else
809 xen_atomic_clear_bit(&curcpu()->ci_evtmask[0], evtch);
810 }
811
812
813 struct event_set_handler_args {
814 struct intrhand *ih;
815 struct intrsource *ipls;
816 struct evtsource *evts;
817 int evtch;
818 };
819
820 /*
821 * Called on bound CPU to handle event_set_handler()
822 * caller (on initiating CPU) holds cpu_lock on our behalf
823 * arg1: struct event_set_handler_args *
824 * arg2: NULL
825 */
826
827 static void
event_set_handler_xcall(void * arg1,void * arg2)828 event_set_handler_xcall(void *arg1, void *arg2)
829 {
830 struct event_set_handler_args *esh_args = arg1;
831 struct intrhand **ihp, *ih = esh_args->ih;
832 struct evtsource *evts = esh_args->evts;
833
834 const u_long psl = x86_read_psl();
835 x86_disable_intr();
836 /* sort by IPL order, higher first */
837 for (ihp = &evts->ev_handlers; *ihp != NULL;
838 ihp = &((*ihp)->ih_evt_next)) {
839 if ((*ihp)->ih_level < ih->ih_level)
840 break;
841 }
842 /* insert before *ihp */
843 ih->ih_evt_next = *ihp;
844 *ihp = ih;
845 #ifndef XENPV
846 evts->ev_isl->is_handlers = evts->ev_handlers;
847 #endif
848 /* register per-cpu handler for spllower() */
849 struct cpu_info *ci = ih->ih_cpu;
850 int sir = XEN_IPL2SIR(ih->ih_level);
851 KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH);
852
853 KASSERT(ci == curcpu());
854 if (ci->ci_isources[sir] == NULL) {
855 KASSERT(esh_args->ipls != NULL);
856 ci->ci_isources[sir] = esh_args->ipls;
857 }
858 struct intrsource *ipls = ci->ci_isources[sir];
859 ih->ih_next = ipls->is_handlers;
860 ipls->is_handlers = ih;
861 x86_intr_calculatemasks(ci);
862
863 intr_calculatemasks(evts, esh_args->evtch, ci);
864 x86_write_psl(psl);
865 }
866
867 struct intrhand *
event_set_handler(int evtch,int (* func)(void *),void * arg,int level,const char * intrname,const char * xname,bool mpsafe,struct cpu_info * ci)868 event_set_handler(int evtch, int (*func)(void *), void *arg, int level,
869 const char *intrname, const char *xname, bool mpsafe, struct cpu_info *ci)
870 {
871 struct event_set_handler_args esh_args;
872 char intrstr_buf[INTRIDBUF];
873 bool bind = false;
874
875 memset(&esh_args, 0, sizeof(esh_args));
876
877 /*
878 * if ci is not specified, we bind to the current cpu.
879 * if ci has been proviced by the called, we assume
880 * he will do the EVTCHNOP_bind_vcpu if needed.
881 */
882 if (ci == NULL) {
883 ci = curcpu();
884 bind = true;
885 }
886
887
888 #ifdef IRQ_DEBUG
889 printf("event_set_handler IRQ %d handler %p\n", evtch, func);
890 #endif
891
892 KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch);
893 KASSERTMSG(evtch < NR_EVENT_CHANNELS,
894 "evtch number %d > NR_EVENT_CHANNELS", evtch);
895 KASSERT(xname != NULL);
896
897 #if 0
898 printf("event_set_handler evtch %d handler %p level %d\n", evtch,
899 handler, level);
900 #endif
901 esh_args.ih = kmem_zalloc(sizeof (struct intrhand), KM_NOSLEEP);
902 if (esh_args.ih == NULL)
903 panic("can't allocate fixed interrupt source");
904
905
906 esh_args.ih->ih_pic = &xen_pic;
907 esh_args.ih->ih_level = level;
908 esh_args.ih->ih_fun = esh_args.ih->ih_realfun = func;
909 esh_args.ih->ih_arg = esh_args.ih->ih_realarg = arg;
910 esh_args.ih->ih_evt_next = NULL;
911 esh_args.ih->ih_next = NULL;
912 esh_args.ih->ih_pending = 0;
913 esh_args.ih->ih_cpu = ci;
914 esh_args.ih->ih_pin = evtch;
915 #ifdef MULTIPROCESSOR
916 if (!mpsafe) {
917 esh_args.ih->ih_fun = xen_intr_biglock_wrapper;
918 esh_args.ih->ih_arg = esh_args.ih;
919 }
920 #endif /* MULTIPROCESSOR */
921 KASSERT(mpsafe || level < IPL_HIGH);
922
923 mutex_enter(&cpu_lock);
924 /* allocate IPL source if needed */
925 int sir = XEN_IPL2SIR(level);
926 if (ci->ci_isources[sir] == NULL) {
927 struct intrsource *ipls;
928 ipls = kmem_zalloc(sizeof (struct intrsource), KM_NOSLEEP);
929 if (ipls == NULL)
930 panic("can't allocate fixed interrupt source");
931 ipls->is_recurse = xenev_stubs[level - IPL_VM].ist_recurse;
932 ipls->is_resume = xenev_stubs[level - IPL_VM].ist_resume;
933 ipls->is_pic = &xen_pic;
934 esh_args.ipls = ipls;
935 /*
936 * note that we can't set ci_isources here, as
937 * the assembly can't handle is_handlers being NULL
938 */
939 }
940 /* register handler for event channel */
941 if (evtsource[evtch] == NULL) {
942 struct evtsource *evts;
943 evtchn_op_t op;
944 if (intrname == NULL)
945 intrname = intr_create_intrid(-1, &xen_pic, evtch,
946 intrstr_buf, sizeof(intrstr_buf));
947 evts = kmem_zalloc(sizeof (struct evtsource),
948 KM_NOSLEEP);
949 if (evts == NULL)
950 panic("can't allocate fixed interrupt source");
951
952 evts->ev_cpu = ci;
953 strlcpy(evts->ev_intrname, intrname, sizeof(evts->ev_intrname));
954
955 evcnt_attach_dynamic(&evts->ev_evcnt, EVCNT_TYPE_INTR, NULL,
956 device_xname(ci->ci_dev), evts->ev_intrname);
957 if (bind) {
958 op.cmd = EVTCHNOP_bind_vcpu;
959 op.u.bind_vcpu.port = evtch;
960 op.u.bind_vcpu.vcpu = ci->ci_vcpuid;
961 if (HYPERVISOR_event_channel_op(&op) != 0) {
962 panic("Failed to bind event %d to VCPU %s %d",
963 evtch, device_xname(ci->ci_dev),
964 ci->ci_vcpuid);
965 }
966 }
967 #ifndef XENPV
968 evts->ev_isl = intr_allocate_io_intrsource(intrname);
969 evts->ev_isl->is_pic = &xen_pic;
970 #endif
971 evtsource[evtch] = evts;
972 }
973 esh_args.evts = evtsource[evtch];
974
975 // append device name
976 if (esh_args.evts->ev_xname[0] != '\0') {
977 strlcat(esh_args.evts->ev_xname, ", ",
978 sizeof(esh_args.evts->ev_xname));
979 }
980 strlcat(esh_args.evts->ev_xname, xname,
981 sizeof(esh_args.evts->ev_xname));
982
983 esh_args.evtch = evtch;
984
985 if (ci == curcpu() || !mp_online) {
986 event_set_handler_xcall(&esh_args, NULL);
987 } else {
988 uint64_t where = xc_unicast(0, event_set_handler_xcall,
989 &esh_args, NULL, ci);
990 xc_wait(where);
991 }
992
993 mutex_exit(&cpu_lock);
994 return esh_args.ih;
995 }
996
997 /*
998 * Called on bound CPU to handle event_remove_handler()
999 * caller (on initiating CPU) holds cpu_lock on our behalf
1000 * arg1: evtch
1001 * arg2: struct intrhand *ih
1002 */
1003
1004 static void
event_remove_handler_xcall(void * arg1,void * arg2)1005 event_remove_handler_xcall(void *arg1, void *arg2)
1006 {
1007 struct intrsource *ipls;
1008 struct evtsource *evts;
1009 struct intrhand **ihp;
1010 struct cpu_info *ci;
1011 struct intrhand *ih = arg2;
1012 int evtch = (intptr_t)(arg1);
1013
1014 evts = evtsource[evtch];
1015 KASSERT(evts != NULL);
1016 KASSERT(ih != NULL);
1017 ci = ih->ih_cpu;
1018 KASSERT(ci == curcpu());
1019
1020 const u_long psl = x86_read_psl();
1021 x86_disable_intr();
1022
1023 for (ihp = &evts->ev_handlers; *ihp != NULL;
1024 ihp = &(*ihp)->ih_evt_next) {
1025 if ((*ihp) == ih)
1026 break;
1027 }
1028 if (*(ihp) == NULL) {
1029 panic("event_remove_handler_xcall: not in ev_handlers");
1030 }
1031
1032 *ihp = ih->ih_evt_next;
1033
1034 int sir = XEN_IPL2SIR(ih->ih_level);
1035 KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH);
1036 ipls = ci->ci_isources[sir];
1037 for (ihp = &ipls->is_handlers; *ihp != NULL; ihp = &(*ihp)->ih_next) {
1038 if (*ihp == ih)
1039 break;
1040 }
1041 if (*ihp == NULL)
1042 panic("event_remove_handler_xcall: not in is_handlers");
1043 *ihp = ih->ih_next;
1044 intr_calculatemasks(evts, evtch, ci);
1045 #ifndef XENPV
1046 evts->ev_isl->is_handlers = evts->ev_handlers;
1047 #endif
1048 if (evts->ev_handlers == NULL)
1049 xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch);
1050
1051 x86_write_psl(psl);
1052 }
1053
1054 int
event_remove_handler(int evtch,int (* func)(void *),void * arg)1055 event_remove_handler(int evtch, int (*func)(void *), void *arg)
1056 {
1057 struct intrhand *ih;
1058 struct cpu_info *ci;
1059 struct evtsource *evts;
1060
1061 mutex_enter(&cpu_lock);
1062 evts = evtsource[evtch];
1063 if (evts == NULL)
1064 return ENOENT;
1065
1066 for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) {
1067 if (ih->ih_realfun == func && ih->ih_realarg == arg)
1068 break;
1069 }
1070 if (ih == NULL) {
1071 mutex_exit(&cpu_lock);
1072 return ENOENT;
1073 }
1074 ci = ih->ih_cpu;
1075
1076 if (ci == curcpu() || !mp_online) {
1077 event_remove_handler_xcall((void *)(intptr_t)evtch, ih);
1078 } else {
1079 uint64_t where = xc_unicast(0, event_remove_handler_xcall,
1080 (void *)(intptr_t)evtch, ih, ci);
1081 xc_wait(where);
1082 }
1083
1084 kmem_free(ih, sizeof (struct intrhand));
1085 if (evts->ev_handlers == NULL) {
1086 #ifndef XENPV
1087 KASSERT(evts->ev_isl->is_handlers == NULL);
1088 intr_free_io_intrsource(evts->ev_intrname);
1089 #endif
1090 evcnt_detach(&evts->ev_evcnt);
1091 kmem_free(evts, sizeof (struct evtsource));
1092 evtsource[evtch] = NULL;
1093 }
1094 mutex_exit(&cpu_lock);
1095 return 0;
1096 }
1097
1098 #if defined(XENPV) && (NPCI > 0 || NISA > 0)
1099 void
hypervisor_prime_pirq_event(int pirq,unsigned int evtch)1100 hypervisor_prime_pirq_event(int pirq, unsigned int evtch)
1101 {
1102 struct physdev_irq_status_query irq_status;
1103 irq_status.irq = pirq;
1104 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status) < 0)
1105 panic("HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY)");
1106 if (irq_status.flags & XENIRQSTAT_needs_eoi) {
1107 evtch_to_pirq_eoi[evtch] = pirq;
1108 #ifdef IRQ_DEBUG
1109 printf("pirq %d needs notify\n", pirq);
1110 #endif
1111 }
1112 }
1113
1114 void
hypervisor_ack_pirq_event(unsigned int evtch)1115 hypervisor_ack_pirq_event(unsigned int evtch)
1116 {
1117 #ifdef IRQ_DEBUG
1118 if (evtch == IRQ_DEBUG)
1119 printf("%s: evtch %d\n", __func__, evtch);
1120 #endif
1121
1122 if (evtch_to_pirq_eoi[evtch] > 0) {
1123 struct physdev_eoi eoi;
1124 eoi.irq = evtch_to_pirq_eoi[evtch];
1125 #ifdef IRQ_DEBUG
1126 if (evtch == IRQ_DEBUG)
1127 printf("pirq_notify(%d)\n", evtch);
1128 #endif
1129 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
1130 }
1131 }
1132 #endif /* defined(XENPV) && (NPCI > 0 || NISA > 0) */
1133
1134 int
xen_debug_handler(void * arg)1135 xen_debug_handler(void *arg)
1136 {
1137 struct cpu_info *ci = curcpu();
1138 int i;
1139 int xci_ilevel = ci->ci_ilevel;
1140 int xci_ipending = ci->ci_ipending;
1141 int xci_idepth = ci->ci_idepth;
1142 u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending;
1143 u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask;
1144 u_long pending_sel = ci->ci_vcpu->evtchn_pending_sel;
1145 unsigned long evtchn_mask[sizeof(unsigned long) * 8];
1146 unsigned long evtchn_pending[sizeof(unsigned long) * 8];
1147
1148 u_long p;
1149
1150 p = (u_long)&HYPERVISOR_shared_info->evtchn_mask[0];
1151 memcpy(evtchn_mask, (void *)p, sizeof(evtchn_mask));
1152 p = (u_long)&HYPERVISOR_shared_info->evtchn_pending[0];
1153 memcpy(evtchn_pending, (void *)p, sizeof(evtchn_pending));
1154
1155 __insn_barrier();
1156 printf("debug event\n");
1157 printf("ci_ilevel 0x%x ci_ipending 0x%x ci_idepth %d\n",
1158 xci_ilevel, xci_ipending, xci_idepth);
1159 printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld"
1160 " evtchn_pending_sel 0x%lx\n",
1161 upcall_pending, upcall_mask, pending_sel);
1162 printf("evtchn_mask");
1163 for (i = 0 ; i <= LONG_MASK; i++)
1164 printf(" %lx", (u_long)evtchn_mask[i]);
1165 printf("\n");
1166 printf("evtchn_pending");
1167 for (i = 0 ; i <= LONG_MASK; i++)
1168 printf(" %lx", (u_long)evtchn_pending[i]);
1169 printf("\n");
1170 return 0;
1171 }
1172
1173 static struct evtsource *
event_get_handler(const char * intrid)1174 event_get_handler(const char *intrid)
1175 {
1176 for (int i = 0; i < NR_EVENT_CHANNELS; i++) {
1177 if (evtsource[i] == NULL || i == debug_port)
1178 continue;
1179
1180 struct evtsource *evp = evtsource[i];
1181
1182 if (strcmp(evp->ev_intrname, intrid) == 0)
1183 return evp;
1184 }
1185
1186 return NULL;
1187 }
1188
1189 static uint64_t
xen_intr_get_count(const char * intrid,u_int cpu_idx)1190 xen_intr_get_count(const char *intrid, u_int cpu_idx)
1191 {
1192 int count = 0;
1193 struct evtsource *evp;
1194
1195 mutex_spin_enter(&evtchn_lock);
1196
1197 evp = event_get_handler(intrid);
1198 if (evp != NULL && cpu_idx == cpu_index(evp->ev_cpu))
1199 count = evp->ev_evcnt.ev_count;
1200
1201 mutex_spin_exit(&evtchn_lock);
1202
1203 return count;
1204 }
1205
1206 static void
xen_intr_get_assigned(const char * intrid,kcpuset_t * cpuset)1207 xen_intr_get_assigned(const char *intrid, kcpuset_t *cpuset)
1208 {
1209 struct evtsource *evp;
1210
1211 kcpuset_zero(cpuset);
1212
1213 mutex_spin_enter(&evtchn_lock);
1214
1215 evp = event_get_handler(intrid);
1216 if (evp != NULL)
1217 kcpuset_set(cpuset, cpu_index(evp->ev_cpu));
1218
1219 mutex_spin_exit(&evtchn_lock);
1220 }
1221
1222 static void
xen_intr_get_devname(const char * intrid,char * buf,size_t len)1223 xen_intr_get_devname(const char *intrid, char *buf, size_t len)
1224 {
1225 struct evtsource *evp;
1226
1227 mutex_spin_enter(&evtchn_lock);
1228
1229 evp = event_get_handler(intrid);
1230 strlcpy(buf, evp ? evp->ev_xname : "unknown", len);
1231
1232 mutex_spin_exit(&evtchn_lock);
1233 }
1234
1235 #ifdef XENPV
1236 /*
1237 * MI interface for subr_interrupt.
1238 */
1239 struct intrids_handler *
interrupt_construct_intrids(const kcpuset_t * cpuset)1240 interrupt_construct_intrids(const kcpuset_t *cpuset)
1241 {
1242 struct intrids_handler *ii_handler;
1243 intrid_t *ids;
1244 int i, count, off;
1245 struct evtsource *evp;
1246
1247 if (kcpuset_iszero(cpuset))
1248 return 0;
1249
1250 /*
1251 * Count the number of interrupts which affinity to any cpu of "cpuset".
1252 */
1253 count = 0;
1254 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1255 evp = evtsource[i];
1256
1257 if (evp == NULL || i == debug_port)
1258 continue;
1259
1260 if (!kcpuset_isset(cpuset, cpu_index(evp->ev_cpu)))
1261 continue;
1262
1263 count++;
1264 }
1265
1266 ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count,
1267 KM_SLEEP);
1268 if (ii_handler == NULL)
1269 return NULL;
1270 ii_handler->iih_nids = count;
1271 if (count == 0)
1272 return ii_handler;
1273
1274 ids = ii_handler->iih_intrids;
1275 mutex_spin_enter(&evtchn_lock);
1276 for (i = 0, off = 0; i < NR_EVENT_CHANNELS && off < count; i++) {
1277 evp = evtsource[i];
1278
1279 if (evp == NULL || i == debug_port)
1280 continue;
1281
1282 if (!kcpuset_isset(cpuset, cpu_index(evp->ev_cpu)))
1283 continue;
1284
1285 snprintf(ids[off], sizeof(intrid_t), "%s", evp->ev_intrname);
1286 off++;
1287 }
1288 mutex_spin_exit(&evtchn_lock);
1289 return ii_handler;
1290 }
1291 __strong_alias(interrupt_get_count, xen_intr_get_count);
1292 __strong_alias(interrupt_get_assigned, xen_intr_get_assigned);
1293 __strong_alias(interrupt_get_devname, xen_intr_get_devname);
1294 __strong_alias(x86_intr_get_count, xen_intr_get_count);
1295 __strong_alias(x86_intr_get_assigned, xen_intr_get_assigned);
1296 __strong_alias(x86_intr_get_devname, xen_intr_get_devname);
1297 #endif /* XENPV */
1298