xref: /freebsd/sys/x86/xen/xen_arch_intr.c (revision 1d386b48)
1 /*-
2  * SPDX-License-Identifier: MIT OR GPL-2.0-only
3  *
4  * Copyright © 2015 Julien Grall
5  * Copyright © 2013 Spectra Logic Corporation
6  * Copyright © 2018 John Baldwin/The FreeBSD Foundation
7  * Copyright © 2019 Roger Pau Monné/Citrix Systems R&D
8  * Copyright © 2021 Elliott Mitchell
9  *
10  * This file may be distributed separately from the Linux kernel, or
11  * incorporated into other software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31 
32 #include <sys/cdefs.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/malloc.h>
37 #include <sys/kernel.h>
38 #include <sys/limits.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/interrupt.h>
42 #include <sys/pcpu.h>
43 #include <sys/proc.h>
44 #include <sys/smp.h>
45 #include <sys/stddef.h>
46 
47 #include <xen/xen-os.h>
48 #include <xen/xen_intr.h>
49 #include <machine/xen/arch-intr.h>
50 
51 #include <x86/apicvar.h>
52 
53 /************************ Xen x86 interrupt interface ************************/
54 
55 /*
56  * Pointers to the interrupt counters
57  */
58 DPCPU_DEFINE_STATIC(u_long *, pintrcnt);
59 
60 static void
61 xen_intrcnt_init(void *dummy __unused)
62 {
63 	unsigned int i;
64 
65 	if (!xen_domain())
66 		return;
67 
68 	CPU_FOREACH(i) {
69 		char buf[MAXCOMLEN + 1];
70 
71 		snprintf(buf, sizeof(buf), "cpu%d:xen", i);
72 		intrcnt_add(buf, DPCPU_ID_PTR(i, pintrcnt));
73 	}
74 }
75 SYSINIT(xen_intrcnt_init, SI_SUB_INTR, SI_ORDER_MIDDLE, xen_intrcnt_init, NULL);
76 
77 /*
78  * Transition from assembly language, called from
79  * sys/{amd64/amd64|i386/i386}/apic_vector.S
80  */
81 extern void xen_arch_intr_handle_upcall(struct trapframe *);
82 void
83 xen_arch_intr_handle_upcall(struct trapframe *trap_frame)
84 {
85 	struct trapframe *old;
86 
87 	/*
88 	 * Disable preemption in order to always check and fire events
89 	 * on the right vCPU
90 	 */
91 	critical_enter();
92 
93 	++*DPCPU_GET(pintrcnt);
94 
95 	++curthread->td_intr_nesting_level;
96 	old = curthread->td_intr_frame;
97 	curthread->td_intr_frame = trap_frame;
98 
99 	xen_intr_handle_upcall(NULL);
100 
101 	curthread->td_intr_frame = old;
102 	--curthread->td_intr_nesting_level;
103 
104 	if (xen_evtchn_needs_ack)
105 		lapic_eoi();
106 
107 	critical_exit();
108 }
109 
110 /******************************** EVTCHN PIC *********************************/
111 
112 static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
113 
114 /*
115  * Lock for x86-related structures.  Notably modifying
116  * xen_intr_auto_vector_count, and allocating interrupts require this lock be
117  * held.
118  */
119 static struct mtx	xen_intr_x86_lock;
120 
121 static u_int		first_evtchn_irq;
122 
123 static u_int		xen_intr_auto_vector_count;
124 
125 /*
126  * list of released isrcs
127  * This is meant to overlay struct xenisrc, with only the xen_arch_isrc_t
128  * portion being preserved, everything else can be wiped.
129  */
130 struct avail_list {
131 	xen_arch_isrc_t preserve;
132 	SLIST_ENTRY(avail_list) free;
133 };
134 static SLIST_HEAD(free, avail_list) avail_list =
135     SLIST_HEAD_INITIALIZER(avail_list);
136 
137 void
138 xen_intr_alloc_irqs(void)
139 {
140 
141 	if (num_io_irqs > UINT_MAX - NR_EVENT_CHANNELS)
142 		panic("IRQ allocation overflow (num_msi_irqs too high?)");
143 	first_evtchn_irq = num_io_irqs;
144 	num_io_irqs += NR_EVENT_CHANNELS;
145 }
146 
147 static void
148 xen_intr_pic_enable_source(struct intsrc *isrc)
149 {
150 
151 	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
152 	    "xi_arch MUST be at top of xenisrc for x86");
153 	xen_intr_enable_source((struct xenisrc *)isrc);
154 }
155 
156 /*
157  * Perform any necessary end-of-interrupt acknowledgements.
158  *
159  * \param isrc  The interrupt source to EOI.
160  */
161 static void
162 xen_intr_pic_disable_source(struct intsrc *isrc, int eoi)
163 {
164 
165 	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
166 	    "xi_arch MUST be at top of xenisrc for x86");
167 	xen_intr_disable_source((struct xenisrc *)isrc);
168 }
169 
170 static void
171 xen_intr_pic_eoi_source(struct intsrc *isrc)
172 {
173 
174 	/* Nothing to do on end-of-interrupt */
175 }
176 
177 static void
178 xen_intr_pic_enable_intr(struct intsrc *isrc)
179 {
180 
181 	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
182 	    "xi_arch MUST be at top of xenisrc for x86");
183 	xen_intr_enable_intr((struct xenisrc *)isrc);
184 }
185 
186 static void
187 xen_intr_pic_disable_intr(struct intsrc *isrc)
188 {
189 
190 	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
191 	    "xi_arch MUST be at top of xenisrc for x86");
192 	xen_intr_disable_intr((struct xenisrc *)isrc);
193 }
194 
195 /**
196  * Determine the global interrupt vector number for
197  * a Xen interrupt source.
198  *
199  * \param isrc  The interrupt source to query.
200  *
201  * \return  The vector number corresponding to the given interrupt source.
202  */
203 static int
204 xen_intr_pic_vector(struct intsrc *isrc)
205 {
206 
207 	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
208 	    "xi_arch MUST be at top of xenisrc for x86");
209 
210 	return (((struct xenisrc *)isrc)->xi_arch.vector);
211 }
212 
213 /**
214  * Determine whether or not interrupt events are pending on the
215  * the given interrupt source.
216  *
217  * \param isrc  The interrupt source to query.
218  *
219  * \returns  0 if no events are pending, otherwise non-zero.
220  */
221 static int
222 xen_intr_pic_source_pending(struct intsrc *isrc)
223 {
224 	/*
225 	 * EventChannels are edge triggered and never masked.
226 	 * There can be no pending events.
227 	 */
228 	return (0);
229 }
230 
231 /**
232  * Prepare this PIC for system suspension.
233  */
234 static void
235 xen_intr_pic_suspend(struct pic *pic)
236 {
237 
238 	/* Nothing to do on suspend */
239 }
240 
241 static void
242 xen_intr_pic_resume(struct pic *pic, bool suspend_cancelled)
243 {
244 
245 	if (!suspend_cancelled)
246 		xen_intr_resume();
247 }
248 
249 /**
250  * Perform configuration of an interrupt source.
251  *
252  * \param isrc  The interrupt source to configure.
253  * \param trig  Edge or level.
254  * \param pol   Active high or low.
255  *
256  * \returns  0 if no events are pending, otherwise non-zero.
257  */
258 static int
259 xen_intr_pic_config_intr(struct intsrc *isrc, enum intr_trigger trig,
260     enum intr_polarity pol)
261 {
262 	/* Configuration is only possible via the evtchn apis. */
263 	return (ENODEV);
264 }
265 
266 
267 static int
268 xen_intr_pic_assign_cpu(struct intsrc *isrc, u_int apic_id)
269 {
270 
271 	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
272 	    "xi_arch MUST be at top of xenisrc for x86");
273 	return (xen_intr_assign_cpu((struct xenisrc *)isrc,
274 	    apic_cpuid(apic_id)));
275 }
276 
277 /**
278  * PIC interface for all event channel port types except physical IRQs.
279  */
280 static struct pic xen_intr_pic = {
281 	.pic_enable_source  = xen_intr_pic_enable_source,
282 	.pic_disable_source = xen_intr_pic_disable_source,
283 	.pic_eoi_source     = xen_intr_pic_eoi_source,
284 	.pic_enable_intr    = xen_intr_pic_enable_intr,
285 	.pic_disable_intr   = xen_intr_pic_disable_intr,
286 	.pic_vector         = xen_intr_pic_vector,
287 	.pic_source_pending = xen_intr_pic_source_pending,
288 	.pic_suspend        = xen_intr_pic_suspend,
289 	.pic_resume         = xen_intr_pic_resume,
290 	.pic_config_intr    = xen_intr_pic_config_intr,
291 	.pic_assign_cpu     = xen_intr_pic_assign_cpu,
292 };
293 
294 /******************************* ARCH wrappers *******************************/
295 
296 void
297 xen_arch_intr_init(void)
298 {
299 	int error;
300 
301 	mtx_init(&xen_intr_x86_lock, "xen-x86-table-lock", NULL, MTX_DEF);
302 
303 	error = intr_register_pic(&xen_intr_pic);
304 	if (error != 0)
305 		panic("%s(): failed registering Xen/x86 PIC, error=%d\n",
306 		    __func__, error);
307 }
308 
309 /**
310  * Allocate a Xen interrupt source object.
311  *
312  * \param type  The type of interrupt source to create.
313  *
314  * \return  A pointer to a newly allocated Xen interrupt source
315  *          object or NULL.
316  */
317 struct xenisrc *
318 xen_arch_intr_alloc(void)
319 {
320 	static int warned;
321 	struct xenisrc *isrc;
322 	unsigned int vector;
323 	int error;
324 
325 	mtx_lock(&xen_intr_x86_lock);
326 	isrc = (struct xenisrc *)SLIST_FIRST(&avail_list);
327 	if (isrc != NULL) {
328 		SLIST_REMOVE_HEAD(&avail_list, free);
329 		mtx_unlock(&xen_intr_x86_lock);
330 
331 		KASSERT(isrc->xi_arch.intsrc.is_pic == &xen_intr_pic,
332 		    ("interrupt not owned by Xen code?"));
333 
334 		KASSERT(isrc->xi_arch.intsrc.is_handlers == 0,
335 		    ("Free evtchn still has handlers"));
336 
337 		return (isrc);
338 	}
339 
340 	if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) {
341 		if (!warned) {
342 			warned = 1;
343 			printf("%s: Xen interrupts exhausted.\n", __func__);
344 		}
345 		mtx_unlock(&xen_intr_x86_lock);
346 		return (NULL);
347 	}
348 
349 	vector = first_evtchn_irq + xen_intr_auto_vector_count;
350 	xen_intr_auto_vector_count++;
351 
352 	KASSERT((intr_lookup_source(vector) == NULL),
353 	    ("Trying to use an already allocated vector"));
354 
355 	mtx_unlock(&xen_intr_x86_lock);
356 	isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
357 	isrc->xi_arch.intsrc.is_pic = &xen_intr_pic;
358 	isrc->xi_arch.vector = vector;
359 	error = intr_register_source(&isrc->xi_arch.intsrc);
360 	if (error != 0)
361 		panic("%s(): failed registering interrupt %u, error=%d\n",
362 		    __func__, vector, error);
363 
364 	return (isrc);
365 }
366 
367 void
368 xen_arch_intr_release(struct xenisrc *isrc)
369 {
370 
371 	KASSERT(isrc->xi_arch.intsrc.is_handlers == 0,
372 	    ("Release called, but xenisrc still in use"));
373 
374 	_Static_assert(sizeof(struct xenisrc) >= sizeof(struct avail_list),
375 	    "unused structure MUST be no larger than in-use structure");
376 	_Static_assert(offsetof(struct xenisrc, xi_arch) ==
377 	    offsetof(struct avail_list, preserve),
378 	    "unused structure does not properly overlay in-use structure");
379 
380 	mtx_lock(&xen_intr_x86_lock);
381 	SLIST_INSERT_HEAD(&avail_list, (struct avail_list *)isrc, free);
382 	mtx_unlock(&xen_intr_x86_lock);
383 }
384