xref: /netbsd/sys/arch/xen/xen/evtchn.c (revision 6550d01e)
1 /*	$NetBSD: evtchn.c,v 1.47 2010/12/20 00:25:46 matt Exp $	*/
2 
3 /*
4  * Copyright (c) 2006 Manuel Bouyer.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  */
27 
28 /*
29  *
30  * Copyright (c) 2004 Christian Limpach.
31  * Copyright (c) 2004, K A Fraser.
32  * All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce the above copyright
40  *    notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
44  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
46  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
47  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
48  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
52  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53  */
54 
55 
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.47 2010/12/20 00:25:46 matt Exp $");
58 
59 #include "opt_xen.h"
60 #include "isa.h"
61 #include "pci.h"
62 
63 #include <sys/param.h>
64 #include <sys/kernel.h>
65 #include <sys/systm.h>
66 #include <sys/device.h>
67 #include <sys/proc.h>
68 #include <sys/malloc.h>
69 #include <sys/reboot.h>
70 #include <sys/simplelock.h>
71 
72 #include <uvm/uvm.h>
73 
74 #include <machine/intrdefs.h>
75 
76 #include <xen/xen.h>
77 #include <xen/hypervisor.h>
78 #include <xen/evtchn.h>
79 #include <xen/xenfunc.h>
80 
81 /*
82  * This lock protects updates to the following mapping and reference-count
83  * arrays. The lock does not need to be acquired to read the mapping tables.
84  */
85 static struct simplelock irq_mapping_update_lock = SIMPLELOCK_INITIALIZER;
86 
87 /* event handlers */
88 struct evtsource *evtsource[NR_EVENT_CHANNELS];
89 
90 /* Reference counts for bindings to event channels */
91 static uint8_t evtch_bindcount[NR_EVENT_CHANNELS];
92 
93 /* event-channel <-> VIRQ mapping. */
94 static int virq_to_evtch[NR_VIRQS];
95 
96 
97 #if NPCI > 0 || NISA > 0
98 /* event-channel <-> PIRQ mapping */
99 static int pirq_to_evtch[NR_PIRQS];
100 /* PIRQ needing notify */
101 static uint32_t pirq_needs_unmask_notify[NR_EVENT_CHANNELS / 32];
102 int pirq_interrupt(void *);
103 physdev_op_t physdev_op_notify = {
104 	.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY,
105 };
106 #endif
107 
108 int debug_port = -1;
109 
110 // #define IRQ_DEBUG 4
111 
112 /* http://mail-index.netbsd.org/port-amd64/2004/02/22/0000.html */
113 #ifdef MULTIPROCESSOR
114 
115 /*
116  * intr_biglock_wrapper: grab biglock and call a real interrupt handler.
117  */
118 
119 int
120 intr_biglock_wrapper(void *vp)
121 {
122 	struct intrhand *ih = vp;
123 	int ret;
124 
125 	KERNEL_LOCK(1, NULL);
126 
127 	ret = (*ih->ih_realfun)(ih->ih_realarg);
128 
129 	KERNEL_UNLOCK_ONE(NULL);
130 
131 	return ret;
132 }
133 #endif /* MULTIPROCESSOR */
134 
135 void
136 events_default_setup(void)
137 {
138 	int i;
139 
140 	/* No VIRQ -> event mappings. */
141 	for (i = 0; i < NR_VIRQS; i++)
142 		virq_to_evtch[i] = -1;
143 
144 #if NPCI > 0 || NISA > 0
145 	/* No PIRQ -> event mappings. */
146 	for (i = 0; i < NR_PIRQS; i++)
147 		pirq_to_evtch[i] = -1;
148 	for (i = 0; i < NR_EVENT_CHANNELS / 32; i++)
149 		pirq_needs_unmask_notify[i] = 0;
150 #endif
151 
152 	/* No event-channel are 'live' right now. */
153 	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
154 		evtsource[i] = NULL;
155 		evtch_bindcount[i] = 0;
156 		hypervisor_mask_event(i);
157 	}
158 
159 }
160 
161 void
162 events_init(void)
163 {
164 	debug_port = bind_virq_to_evtch(VIRQ_DEBUG);
165 	aprint_verbose("debug virtual interrupt using event channel %d\n",
166 	    debug_port);
167 	/*
168 	 * Don't call event_set_handler(), we'll use a shortcut. Just set
169 	 * evtsource[] to a non-NULL value so that evtchn_do_event will
170 	 * be called.
171 	 */
172 	evtsource[debug_port] = (void *)-1;
173 	hypervisor_enable_event(debug_port);
174 
175 	x86_enable_intr();		/* at long last... */
176 }
177 
178 unsigned int
179 evtchn_do_event(int evtch, struct intrframe *regs)
180 {
181 	struct cpu_info *ci;
182 	int ilevel;
183 	struct intrhand *ih;
184 	int	(*ih_fun)(void *, void *);
185 	uint32_t iplmask;
186 	int i;
187 	uint32_t iplbit;
188 
189 #ifdef DIAGNOSTIC
190 	if (evtch >= NR_EVENT_CHANNELS) {
191 		printf("event number %d > NR_IRQS\n", evtch);
192 		panic("evtchn_do_event");
193 	}
194 #endif
195 
196 #ifdef IRQ_DEBUG
197 	if (evtch == IRQ_DEBUG)
198 		printf("evtchn_do_event: evtch %d\n", evtch);
199 #endif
200 	ci = &cpu_info_primary;
201 
202 	/*
203 	 * Shortcut for the debug handler, we want it to always run,
204 	 * regardless of the IPL level.
205 	 */
206 	if (__predict_false(evtch == debug_port)) {
207 		xen_debug_handler(NULL);
208 		hypervisor_enable_event(evtch);
209 		return 0;
210 	}
211 
212 #ifdef DIAGNOSTIC
213 	if (evtsource[evtch] == NULL) {
214 		panic("evtchn_do_event: unknown event");
215 	}
216 #endif
217 	ci->ci_data.cpu_nintr++;
218 	evtsource[evtch]->ev_evcnt.ev_count++;
219 	ilevel = ci->ci_ilevel;
220 	if (evtsource[evtch]->ev_maxlevel <= ilevel) {
221 #ifdef IRQ_DEBUG
222 		if (evtch == IRQ_DEBUG)
223 		    printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n",
224 		    evtch, evtsource[evtch]->ev_maxlevel, ilevel);
225 #endif
226 		hypervisor_set_ipending(evtsource[evtch]->ev_imask,
227 		    evtch >> LONG_SHIFT, evtch & LONG_MASK);
228 		/* leave masked */
229 		return 0;
230 	}
231 	ci->ci_ilevel = evtsource[evtch]->ev_maxlevel;
232 	iplmask = evtsource[evtch]->ev_imask;
233 	sti();
234 	ih = evtsource[evtch]->ev_handlers;
235 	while (ih != NULL) {
236 		if (ih->ih_level <= ilevel) {
237 #ifdef IRQ_DEBUG
238 		if (evtch == IRQ_DEBUG)
239 		    printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel);
240 #endif
241 			cli();
242 			hypervisor_set_ipending(iplmask,
243 			    evtch >> LONG_SHIFT, evtch & LONG_MASK);
244 			/* leave masked */
245 			goto splx;
246 		}
247 		iplmask &= ~IUNMASK(ci, ih->ih_level);
248 		ci->ci_ilevel = ih->ih_level;
249 		ih_fun = (void *)ih->ih_fun;
250 		ih_fun(ih->ih_arg, regs);
251 		ih = ih->ih_evt_next;
252 	}
253 	cli();
254 	hypervisor_enable_event(evtch);
255 splx:
256 	/*
257 	 * C version of spllower(). ASTs will be checked when
258 	 * hypevisor_callback() exits, so no need to check here.
259 	 */
260 	iplmask = (IUNMASK(ci, ilevel) & ci->ci_ipending);
261 	while (iplmask != 0) {
262 		iplbit = 1 << (NIPL - 1);
263 		i = (NIPL - 1);
264 		while (iplmask != 0 && i > ilevel) {
265 			while (iplmask & iplbit) {
266 				ci->ci_ipending &= ~iplbit;
267 				ci->ci_ilevel = i;
268 				for (ih = ci->ci_isources[i]->ipl_handlers;
269 				    ih != NULL; ih = ih->ih_ipl_next) {
270 					sti();
271 					ih_fun = (void *)ih->ih_fun;
272 					ih_fun(ih->ih_arg, regs);
273 					cli();
274 					if (ci->ci_ilevel != i) {
275 						printf("evtchn_do_event: "
276 						    "handler %p didn't lower "
277 						    "ipl %d %d\n",
278 						    ih_fun, ci->ci_ilevel, i);
279 						ci->ci_ilevel = i;
280 					}
281 				}
282 				hypervisor_enable_ipl(i);
283 				/* more pending IPLs may have been registered */
284 				iplmask =
285 				    (IUNMASK(ci, ilevel) & ci->ci_ipending);
286 			}
287 			i--;
288 			iplbit >>= 1;
289 		}
290 	}
291 	ci->ci_ilevel = ilevel;
292 	return 0;
293 }
294 
295 int
296 bind_virq_to_evtch(int virq)
297 {
298 	evtchn_op_t op;
299 	int evtchn, s;
300 
301 	s = splhigh();
302 	simple_lock(&irq_mapping_update_lock);
303 
304 	evtchn = virq_to_evtch[virq];
305 	if (evtchn == -1) {
306 		op.cmd = EVTCHNOP_bind_virq;
307 		op.u.bind_virq.virq = virq;
308 		op.u.bind_virq.vcpu = 0;
309 		if (HYPERVISOR_event_channel_op(&op) != 0)
310 			panic("Failed to bind virtual IRQ %d\n", virq);
311 		evtchn = op.u.bind_virq.port;
312 
313 		virq_to_evtch[virq] = evtchn;
314 	}
315 
316 	evtch_bindcount[evtchn]++;
317 
318 	simple_unlock(&irq_mapping_update_lock);
319 	splx(s);
320 
321 	return evtchn;
322 }
323 
324 int
325 unbind_virq_from_evtch(int virq)
326 {
327 	evtchn_op_t op;
328 	int evtchn = virq_to_evtch[virq];
329 	int s = splhigh();
330 
331 	simple_lock(&irq_mapping_update_lock);
332 
333 	evtch_bindcount[evtchn]--;
334 	if (evtch_bindcount[evtchn] == 0) {
335 		op.cmd = EVTCHNOP_close;
336 		op.u.close.port = evtchn;
337 		if (HYPERVISOR_event_channel_op(&op) != 0)
338 			panic("Failed to unbind virtual IRQ %d\n", virq);
339 
340 		virq_to_evtch[virq] = -1;
341 	}
342 
343 	simple_unlock(&irq_mapping_update_lock);
344 	splx(s);
345 
346 	return evtchn;
347 }
348 
349 #if NPCI > 0 || NISA > 0
350 int
351 bind_pirq_to_evtch(int pirq)
352 {
353 	evtchn_op_t op;
354 	int evtchn, s;
355 
356 	if (pirq >= NR_PIRQS) {
357 		panic("pirq %d out of bound, increase NR_PIRQS", pirq);
358 	}
359 
360 	s = splhigh();
361 	simple_lock(&irq_mapping_update_lock);
362 
363 	evtchn = pirq_to_evtch[pirq];
364 	if (evtchn == -1) {
365 		op.cmd = EVTCHNOP_bind_pirq;
366 		op.u.bind_pirq.pirq = pirq;
367 		op.u.bind_pirq.flags = BIND_PIRQ__WILL_SHARE;
368 		if (HYPERVISOR_event_channel_op(&op) != 0)
369 			panic("Failed to bind physical IRQ %d\n", pirq);
370 		evtchn = op.u.bind_pirq.port;
371 
372 #ifdef IRQ_DEBUG
373 		printf("pirq %d evtchn %d\n", pirq, evtchn);
374 #endif
375 		pirq_to_evtch[pirq] = evtchn;
376 	}
377 
378 	evtch_bindcount[evtchn]++;
379 
380 	simple_unlock(&irq_mapping_update_lock);
381 	splx(s);
382 
383 	return evtchn;
384 }
385 
386 int
387 unbind_pirq_from_evtch(int pirq)
388 {
389 	evtchn_op_t op;
390 	int evtchn = pirq_to_evtch[pirq];
391 	int s = splhigh();
392 
393 	simple_lock(&irq_mapping_update_lock);
394 
395 	evtch_bindcount[evtchn]--;
396 	if (evtch_bindcount[evtchn] == 0) {
397 		op.cmd = EVTCHNOP_close;
398 		op.u.close.port = evtchn;
399 		if (HYPERVISOR_event_channel_op(&op) != 0)
400 			panic("Failed to unbind physical IRQ %d\n", pirq);
401 
402 		pirq_to_evtch[pirq] = -1;
403 	}
404 
405 	simple_unlock(&irq_mapping_update_lock);
406 	splx(s);
407 
408 	return evtchn;
409 }
410 
411 struct pintrhand *
412 pirq_establish(int pirq, int evtch, int (*func)(void *), void *arg, int level,
413     const char *evname)
414 {
415 	struct pintrhand *ih;
416 	physdev_op_t physdev_op;
417 
418 	ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
419 	if (ih == NULL) {
420 		printf("pirq_establish: can't malloc handler info\n");
421 		return NULL;
422 	}
423 	if (event_set_handler(evtch, pirq_interrupt, ih, level, evname) != 0) {
424 		free(ih, M_DEVBUF);
425 		return NULL;
426 	}
427 	ih->pirq = pirq;
428 	ih->evtch = evtch;
429 	ih->func = func;
430 	ih->arg = arg;
431 
432 	physdev_op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
433 	physdev_op.u.irq_status_query.irq = pirq;
434 	if (HYPERVISOR_physdev_op(&physdev_op) < 0)
435 		panic("HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY)");
436 	if (physdev_op.u.irq_status_query.flags &
437 	    PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY) {
438 		pirq_needs_unmask_notify[evtch >> 5] |= (1 << (evtch & 0x1f));
439 #ifdef IRQ_DEBUG
440 		printf("pirq %d needs notify\n", pirq);
441 #endif
442 	}
443 	hypervisor_enable_event(evtch);
444 	return ih;
445 }
446 
447 int
448 pirq_interrupt(void *arg)
449 {
450 	struct pintrhand *ih = arg;
451 	int ret;
452 
453 
454 	ret = ih->func(ih->arg);
455 #ifdef IRQ_DEBUG
456 	if (ih->evtch == IRQ_DEBUG)
457 	    printf("pirq_interrupt irq %d ret %d\n", ih->pirq, ret);
458 #endif
459 	return ret;
460 }
461 
462 #endif /* NPCI > 0 || NISA > 0 */
463 
464 int
465 event_set_handler(int evtch, int (*func)(void *), void *arg, int level,
466     const char *evname)
467 {
468 	struct cpu_info *ci = &cpu_info_primary;
469 	struct evtsource *evts;
470 	struct intrhand *ih, **ihp;
471 	int s;
472 #ifdef MULTIPROCESSOR
473 	bool mpsafe = (level != IPL_VM);
474 #endif /* MULTIPROCESSOR */
475 
476 #ifdef IRQ_DEBUG
477 	printf("event_set_handler IRQ %d handler %p\n", evtch, func);
478 #endif
479 
480 #ifdef DIAGNOSTIC
481 	if (evtch >= NR_EVENT_CHANNELS) {
482 		printf("evtch number %d > NR_EVENT_CHANNELS\n", evtch);
483 		panic("event_set_handler");
484 	}
485 #endif
486 
487 #if 0
488 	printf("event_set_handler evtch %d handler %p level %d\n", evtch,
489 	       handler, level);
490 #endif
491 	ih = malloc(sizeof (struct intrhand), M_DEVBUF,
492 	    M_WAITOK|M_ZERO);
493 	if (ih == NULL)
494 		panic("can't allocate fixed interrupt source");
495 
496 
497 	ih->ih_level = level;
498 	ih->ih_fun = ih->ih_realfun = func;
499 	ih->ih_arg = ih->ih_realarg = arg;
500 	ih->ih_evt_next = NULL;
501 	ih->ih_ipl_next = NULL;
502 #ifdef MULTIPROCESSOR
503 	if (!mpsafe) {
504 		ih->ih_fun = intr_biglock_wrapper;
505 		ih->ih_arg = ih;
506 	}
507 #endif /* MULTIPROCESSOR */
508 
509 	s = splhigh();
510 
511 	/* register handler for spllower() */
512 	event_set_iplhandler(ih, level);
513 
514 	/* register handler for event channel */
515 	if (evtsource[evtch] == NULL) {
516 		evts = malloc(sizeof (struct evtsource),
517 		    M_DEVBUF, M_WAITOK|M_ZERO);
518 		if (evts == NULL)
519 			panic("can't allocate fixed interrupt source");
520 		evts->ev_handlers = ih;
521 		evtsource[evtch] = evts;
522 		if (evname)
523 			strncpy(evts->ev_evname, evname,
524 			    sizeof(evts->ev_evname));
525 		else
526 			snprintf(evts->ev_evname, sizeof(evts->ev_evname),
527 			    "evt%d", evtch);
528 		evcnt_attach_dynamic(&evts->ev_evcnt, EVCNT_TYPE_INTR, NULL,
529 		    device_xname(ci->ci_dev), evts->ev_evname);
530 	} else {
531 		evts = evtsource[evtch];
532 		/* sort by IPL order, higher first */
533 		for (ihp = &evts->ev_handlers; ; ihp = &((*ihp)->ih_evt_next)) {
534 			if ((*ihp)->ih_level < ih->ih_level) {
535 				/* insert before *ihp */
536 				ih->ih_evt_next = *ihp;
537 				*ihp = ih;
538 				break;
539 			}
540 			if ((*ihp)->ih_evt_next == NULL) {
541 				(*ihp)->ih_evt_next = ih;
542 				break;
543 			}
544 		}
545 	}
546 
547 	intr_calculatemasks(evts);
548 	splx(s);
549 
550 	return 0;
551 }
552 
553 void
554 event_set_iplhandler(struct intrhand *ih, int level)
555 {
556 	struct cpu_info *ci = &cpu_info_primary;
557 	struct iplsource *ipls;
558 
559 	if (ci->ci_isources[level] == NULL) {
560 		ipls = malloc(sizeof (struct iplsource),
561 		    M_DEVBUF, M_WAITOK|M_ZERO);
562 		if (ipls == NULL)
563 			panic("can't allocate fixed interrupt source");
564 		ipls->ipl_recurse = xenev_stubs[level].ist_recurse;
565 		ipls->ipl_resume = xenev_stubs[level].ist_resume;
566 		ipls->ipl_handlers = ih;
567 		ci->ci_isources[level] = ipls;
568 	} else {
569 		ipls = ci->ci_isources[level];
570 		ih->ih_ipl_next = ipls->ipl_handlers;
571 		ipls->ipl_handlers = ih;
572 	}
573 }
574 
575 int
576 event_remove_handler(int evtch, int (*func)(void *), void *arg)
577 {
578 	struct iplsource *ipls;
579 	struct evtsource *evts;
580 	struct intrhand *ih;
581 	struct intrhand **ihp;
582 	struct cpu_info *ci = &cpu_info_primary;
583 
584 	evts = evtsource[evtch];
585 	if (evts == NULL)
586 		return ENOENT;
587 
588 	for (ihp = &evts->ev_handlers, ih = evts->ev_handlers;
589 	    ih != NULL;
590 	    ihp = &ih->ih_evt_next, ih = ih->ih_evt_next) {
591 		if (ih->ih_fun == func && ih->ih_arg == arg)
592 			break;
593 	}
594 	if (ih == NULL)
595 		return ENOENT;
596 	*ihp = ih->ih_evt_next;
597 
598 	ipls = ci->ci_isources[ih->ih_level];
599 	for (ihp = &ipls->ipl_handlers, ih = ipls->ipl_handlers;
600 	    ih != NULL;
601 	    ihp = &ih->ih_ipl_next, ih = ih->ih_ipl_next) {
602 		if (ih->ih_fun == func && ih->ih_arg == arg)
603 			break;
604 	}
605 	if (ih == NULL)
606 		panic("event_remove_handler");
607 	*ihp = ih->ih_ipl_next;
608 	free(ih, M_DEVBUF);
609 	if (evts->ev_handlers == NULL) {
610 		evcnt_detach(&evts->ev_evcnt);
611 		free(evts, M_DEVBUF);
612 		evtsource[evtch] = NULL;
613 	} else {
614 		intr_calculatemasks(evts);
615 	}
616 	return 0;
617 }
618 
619 void
620 hypervisor_enable_event(unsigned int evtch)
621 {
622 #ifdef IRQ_DEBUG
623 	if (evtch == IRQ_DEBUG)
624 		printf("hypervisor_enable_evtch: evtch %d\n", evtch);
625 #endif
626 
627 	hypervisor_unmask_event(evtch);
628 #if NPCI > 0 || NISA > 0
629 	if (pirq_needs_unmask_notify[evtch >> 5] & (1 << (evtch & 0x1f))) {
630 #ifdef  IRQ_DEBUG
631 		if (evtch == IRQ_DEBUG)
632 		    printf("pirq_notify(%d)\n", evtch);
633 #endif
634 		(void)HYPERVISOR_physdev_op(&physdev_op_notify);
635 	}
636 #endif /* NPCI > 0 || NISA > 0 */
637 }
638 
639 int
640 xen_debug_handler(void *arg)
641 {
642 	struct cpu_info *ci = curcpu();
643 	int i;
644 	int xci_ilevel = ci->ci_ilevel;
645 	int xci_ipending = ci->ci_ipending;
646 	int xci_idepth = ci->ci_idepth;
647 	u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending;
648 	u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask;
649 	u_long pending_sel = ci->ci_vcpu->evtchn_pending_sel;
650 	unsigned long evtchn_mask[sizeof(unsigned long) * 8];
651 	unsigned long evtchn_pending[sizeof(unsigned long) * 8];
652 
653 	u_long p;
654 
655 	p = (u_long)&HYPERVISOR_shared_info->evtchn_mask[0];
656 	memcpy(evtchn_mask, (void *)p, sizeof(evtchn_mask));
657 	p = (u_long)&HYPERVISOR_shared_info->evtchn_pending[0];
658 	memcpy(evtchn_pending, (void *)p, sizeof(evtchn_pending));
659 
660 	__insn_barrier();
661 	printf("debug event\n");
662 	printf("ci_ilevel 0x%x ci_ipending 0x%x ci_idepth %d\n",
663 	    xci_ilevel, xci_ipending, xci_idepth);
664 	printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld"
665 	    " evtchn_pending_sel 0x%lx\n",
666 		upcall_pending, upcall_mask, pending_sel);
667 	printf("evtchn_mask");
668 	for (i = 0 ; i <= LONG_MASK; i++)
669 		printf(" %lx", (u_long)evtchn_mask[i]);
670 	printf("\n");
671 	printf("evtchn_pending");
672 	for (i = 0 ; i <= LONG_MASK; i++)
673 		printf(" %lx", (u_long)evtchn_pending[i]);
674 	printf("\n");
675 	return 0;
676 }
677