xref: /netbsd/sys/arch/alpha/common/shared_intr.c (revision 000b8fbc)
1 /* $NetBSD: shared_intr.c,v 1.29 2021/07/04 22:36:43 thorpej Exp $ */
2 
3 /*
4  * Copyright (c) 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1996 Carnegie-Mellon University.
34  * All rights reserved.
35  *
36  * Authors: Chris G. Demetriou
37  *
38  * Permission to use, copy, modify and distribute this software and
39  * its documentation is hereby granted, provided that both the copyright
40  * notice and this permission notice appear in all copies of the
41  * software, derivative works or modified versions, and any portions
42  * thereof, and that both notices appear in supporting documentation.
43  *
44  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47  *
48  * Carnegie Mellon requests users of this software to return to
49  *
50  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
51  *  School of Computer Science
52  *  Carnegie Mellon University
53  *  Pittsburgh PA 15213-3890
54  *
55  * any improvements or extensions that they make and grant Carnegie the
56  * rights to redistribute these changes.
57  */
58 
59 /*
60  * Common shared-interrupt-line functionality.
61  */
62 
63 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
64 
65 __KERNEL_RCSID(0, "$NetBSD: shared_intr.c,v 1.29 2021/07/04 22:36:43 thorpej Exp $");
66 
67 #include <sys/param.h>
68 #include <sys/kernel.h>
69 #include <sys/cpu.h>
70 #include <sys/kmem.h>
71 #include <sys/kmem.h>
72 #include <sys/systm.h>
73 #include <sys/syslog.h>
74 #include <sys/queue.h>
75 #include <sys/atomic.h>
76 #include <sys/intr.h>
77 #include <sys/xcall.h>
78 
79 static const char *
intr_typename(int type)80 intr_typename(int type)
81 {
82 
83 	switch (type) {
84 	case IST_UNUSABLE:
85 		return ("disabled");
86 	case IST_NONE:
87 		return ("none");
88 	case IST_PULSE:
89 		return ("pulsed");
90 	case IST_EDGE:
91 		return ("edge-triggered");
92 	case IST_LEVEL:
93 		return ("level-triggered");
94 	}
95 	panic("intr_typename: unknown type %d", type);
96 }
97 
98 struct alpha_shared_intr *
alpha_shared_intr_alloc(unsigned int n)99 alpha_shared_intr_alloc(unsigned int n)
100 {
101 	struct alpha_shared_intr *intr;
102 	unsigned int i;
103 
104 	KASSERT(n != 0);
105 
106 	intr = kmem_alloc(n * sizeof(*intr), KM_SLEEP);
107 	for (i = 0; i < n; i++) {
108 		TAILQ_INIT(&intr[i].intr_q);
109 		intr[i].intr_sharetype = IST_NONE;
110 		intr[i].intr_dfltsharetype = IST_NONE;
111 		intr[i].intr_nstrays = 0;
112 		intr[i].intr_maxstrays = 0;
113 		intr[i].intr_private = NULL;
114 		intr[i].intr_cpu = NULL;
115 		intr[i].intr_string = kmem_asprintf("irq %u", i);
116 	}
117 
118 	return (intr);
119 }
120 
121 int
alpha_shared_intr_dispatch(struct alpha_shared_intr * intr,unsigned int num)122 alpha_shared_intr_dispatch(struct alpha_shared_intr *intr, unsigned int num)
123 {
124 	struct alpha_shared_intrhand *ih;
125 	int rv, handled;
126 
127 	atomic_add_long(&intr[num].intr_evcnt.ev_count, 1);
128 
129 	ih = intr[num].intr_q.tqh_first;
130 	handled = 0;
131 	while (ih != NULL) {
132 
133 		/*
134 		 * The handler returns one of three values:
135 		 *   0:	This interrupt wasn't for me.
136 		 *   1: This interrupt was for me.
137 		 *  -1: This interrupt might have been for me, but I can't say
138 		 *      for sure.
139 		 */
140 
141 		rv = (*ih->ih_fn)(ih->ih_arg);
142 
143 		handled = handled || (rv != 0);
144 		ih = ih->ih_q.tqe_next;
145 	}
146 
147 	return (handled);
148 }
149 
150 static int
alpha_shared_intr_wrapper(void * const arg)151 alpha_shared_intr_wrapper(void * const arg)
152 {
153 	struct alpha_shared_intrhand * const ih = arg;
154 	int rv;
155 
156 	KERNEL_LOCK(1, NULL);
157 	rv = (*ih->ih_real_fn)(ih->ih_real_arg);
158 	KERNEL_UNLOCK_ONE(NULL);
159 
160 	return rv;
161 }
162 
163 struct alpha_shared_intrhand *
alpha_shared_intr_alloc_intrhand(struct alpha_shared_intr * intr,unsigned int num,int type,int level,int flags,int (* fn)(void *),void * arg,const char * basename)164 alpha_shared_intr_alloc_intrhand(struct alpha_shared_intr *intr,
165     unsigned int num, int type, int level, int flags,
166     int (*fn)(void *), void *arg, const char *basename)
167 {
168 	struct alpha_shared_intrhand *ih;
169 
170 	if (intr[num].intr_sharetype == IST_UNUSABLE) {
171 		printf("%s: %s irq %d: unusable\n", __func__,
172 		    basename, num);
173 		return NULL;
174 	}
175 
176 	KASSERT(type != IST_NONE);
177 
178 	ih = kmem_alloc(sizeof(*ih), KM_SLEEP);
179 
180 	ih->ih_intrhead = intr;
181 	ih->ih_fn = ih->ih_real_fn = fn;
182 	ih->ih_arg = ih->ih_real_arg = arg;
183 	ih->ih_level = level;
184 	ih->ih_type = type;
185 	ih->ih_num = num;
186 
187 	/*
188 	 * Non-MPSAFE interrupts get a wrapper that takes the
189 	 * KERNEL_LOCK.
190 	 */
191 	if ((flags & ALPHA_INTR_MPSAFE) == 0) {
192 		ih->ih_fn = alpha_shared_intr_wrapper;
193 		ih->ih_arg = ih;
194 	}
195 
196 	return (ih);
197 }
198 
199 void
alpha_shared_intr_free_intrhand(struct alpha_shared_intrhand * ih)200 alpha_shared_intr_free_intrhand(struct alpha_shared_intrhand *ih)
201 {
202 
203 	kmem_free(ih, sizeof(*ih));
204 }
205 
206 static void
alpha_shared_intr_link_unlink_xcall(void * arg1,void * arg2)207 alpha_shared_intr_link_unlink_xcall(void *arg1, void *arg2)
208 {
209 	struct alpha_shared_intrhand *ih = arg1;
210 	struct alpha_shared_intr *intr = ih->ih_intrhead;
211 	unsigned int num = ih->ih_num;
212 
213 	struct cpu_info *ci = intr[num].intr_cpu;
214 
215 	KASSERT(ci != NULL);
216 	KASSERT(ci == curcpu() || !mp_online);
217 	KASSERT(!cpu_intr_p());
218 
219 	const unsigned long psl = alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
220 
221 	if (arg2 != NULL) {
222 		TAILQ_INSERT_TAIL(&intr[num].intr_q, ih, ih_q);
223 		ci->ci_nintrhand++;
224 	} else {
225 		TAILQ_REMOVE(&intr[num].intr_q, ih, ih_q);
226 		ci->ci_nintrhand--;
227 	}
228 
229 	alpha_pal_swpipl(psl);
230 }
231 
232 bool
alpha_shared_intr_link(struct alpha_shared_intr * intr,struct alpha_shared_intrhand * ih,const char * basename)233 alpha_shared_intr_link(struct alpha_shared_intr *intr,
234     struct alpha_shared_intrhand *ih, const char *basename)
235 {
236 	int type = ih->ih_type;
237 	unsigned int num = ih->ih_num;
238 
239 	KASSERT(mutex_owned(&cpu_lock));
240 	KASSERT(ih->ih_intrhead == intr);
241 
242 	switch (intr[num].intr_sharetype) {
243 	case IST_EDGE:
244 	case IST_LEVEL:
245 		if (type == intr[num].intr_sharetype)
246 			break;
247 	case IST_PULSE:
248 		if (type != IST_NONE) {
249 			if (intr[num].intr_q.tqh_first == NULL) {
250 				printf("alpha_shared_intr_establish: %s irq %d: warning: using %s on %s\n",
251 				    basename, num, intr_typename(type),
252 				    intr_typename(intr[num].intr_sharetype));
253 				type = intr[num].intr_sharetype;
254 			} else {
255 				printf("alpha_shared_intr_establish: %s irq %d: can't share %s with %s\n",
256 				    basename, num, intr_typename(type),
257 				    intr_typename(intr[num].intr_sharetype));
258 				return (false);
259 			}
260 		}
261 		break;
262 
263 	case IST_NONE:
264 		/* not currently used; safe */
265 		break;
266 	}
267 
268 	intr[num].intr_sharetype = type;
269 
270 	/*
271 	 * If a CPU hasn't been assigned yet, just give it to the
272 	 * primary.
273 	 */
274 	if (intr[num].intr_cpu == NULL) {
275 		intr[num].intr_cpu = &cpu_info_primary;
276 	}
277 
278 	kpreempt_disable();
279 	if (intr[num].intr_cpu == curcpu() || !mp_online) {
280 		alpha_shared_intr_link_unlink_xcall(ih, ih);
281 	} else {
282 		uint64_t where = xc_unicast(XC_HIGHPRI,
283 		    alpha_shared_intr_link_unlink_xcall, ih, ih,
284 		    intr->intr_cpu);
285 		xc_wait(where);
286 	}
287 	kpreempt_enable();
288 
289 	return (true);
290 }
291 
292 void
alpha_shared_intr_unlink(struct alpha_shared_intr * intr,struct alpha_shared_intrhand * ih,const char * basename)293 alpha_shared_intr_unlink(struct alpha_shared_intr *intr,
294     struct alpha_shared_intrhand *ih, const char *basename)
295 {
296 	unsigned int num = ih->ih_num;
297 
298 	KASSERT(mutex_owned(&cpu_lock));
299 
300 	kpreempt_disable();
301 	if (intr[num].intr_cpu == curcpu() || !mp_online) {
302 		alpha_shared_intr_link_unlink_xcall(ih, NULL);
303 	} else {
304 		uint64_t where = xc_unicast(XC_HIGHPRI,
305 		    alpha_shared_intr_link_unlink_xcall, ih, NULL,
306 		    intr->intr_cpu);
307 		xc_wait(where);
308 	}
309 	kpreempt_enable();
310 }
311 
312 int
alpha_shared_intr_get_sharetype(struct alpha_shared_intr * intr,unsigned int num)313 alpha_shared_intr_get_sharetype(struct alpha_shared_intr *intr,
314     unsigned int num)
315 {
316 
317 	return (intr[num].intr_sharetype);
318 }
319 
320 int
alpha_shared_intr_isactive(struct alpha_shared_intr * intr,unsigned int num)321 alpha_shared_intr_isactive(struct alpha_shared_intr *intr, unsigned int num)
322 {
323 
324 	return (intr[num].intr_q.tqh_first != NULL);
325 }
326 
327 int
alpha_shared_intr_firstactive(struct alpha_shared_intr * intr,unsigned int num)328 alpha_shared_intr_firstactive(struct alpha_shared_intr *intr, unsigned int num)
329 {
330 
331 	return (intr[num].intr_q.tqh_first != NULL &&
332 		intr[num].intr_q.tqh_first->ih_q.tqe_next == NULL);
333 }
334 
335 void
alpha_shared_intr_set_dfltsharetype(struct alpha_shared_intr * intr,unsigned int num,int newdfltsharetype)336 alpha_shared_intr_set_dfltsharetype(struct alpha_shared_intr *intr,
337     unsigned int num, int newdfltsharetype)
338 {
339 
340 #ifdef DIAGNOSTIC
341 	if (alpha_shared_intr_isactive(intr, num))
342 		panic("alpha_shared_intr_set_dfltsharetype on active intr");
343 #endif
344 
345 	intr[num].intr_dfltsharetype = newdfltsharetype;
346 	intr[num].intr_sharetype = intr[num].intr_dfltsharetype;
347 }
348 
349 void
alpha_shared_intr_set_maxstrays(struct alpha_shared_intr * intr,unsigned int num,int newmaxstrays)350 alpha_shared_intr_set_maxstrays(struct alpha_shared_intr *intr,
351     unsigned int num, int newmaxstrays)
352 {
353 	int s = splhigh();
354 	intr[num].intr_maxstrays = newmaxstrays;
355 	intr[num].intr_nstrays = 0;
356 	splx(s);
357 }
358 
359 void
alpha_shared_intr_reset_strays(struct alpha_shared_intr * intr,unsigned int num)360 alpha_shared_intr_reset_strays(struct alpha_shared_intr *intr,
361     unsigned int num)
362 {
363 
364 	/*
365 	 * Don't bother blocking interrupts; this doesn't have to be
366 	 * precise, but it does need to be fast.
367 	 */
368 	intr[num].intr_nstrays = 0;
369 }
370 
371 void
alpha_shared_intr_stray(struct alpha_shared_intr * intr,unsigned int num,const char * basename)372 alpha_shared_intr_stray(struct alpha_shared_intr *intr, unsigned int num,
373     const char *basename)
374 {
375 
376 	intr[num].intr_nstrays++;
377 
378 	if (intr[num].intr_maxstrays == 0)
379 		return;
380 
381 	if (intr[num].intr_nstrays <= intr[num].intr_maxstrays)
382 		log(LOG_ERR, "stray %s irq %d%s\n", basename, num,
383 		    intr[num].intr_nstrays >= intr[num].intr_maxstrays ?
384 		      "; stopped logging" : "");
385 }
386 
387 void
alpha_shared_intr_set_private(struct alpha_shared_intr * intr,unsigned int num,void * v)388 alpha_shared_intr_set_private(struct alpha_shared_intr *intr,
389     unsigned int num, void *v)
390 {
391 
392 	intr[num].intr_private = v;
393 }
394 
395 void *
alpha_shared_intr_get_private(struct alpha_shared_intr * intr,unsigned int num)396 alpha_shared_intr_get_private(struct alpha_shared_intr *intr,
397     unsigned int num)
398 {
399 
400 	return (intr[num].intr_private);
401 }
402 
403 static unsigned int
alpha_shared_intr_q_count_handlers(struct alpha_shared_intr * intr_q)404 alpha_shared_intr_q_count_handlers(struct alpha_shared_intr *intr_q)
405 {
406 	unsigned int cnt = 0;
407 	struct alpha_shared_intrhand *ih;
408 
409 	TAILQ_FOREACH(ih, &intr_q->intr_q, ih_q) {
410 		cnt++;
411 	}
412 
413 	return cnt;
414 }
415 
416 static void
alpha_shared_intr_set_cpu_xcall(void * arg1,void * arg2)417 alpha_shared_intr_set_cpu_xcall(void *arg1, void *arg2)
418 {
419 	struct alpha_shared_intr *intr_q = arg1;
420 	struct cpu_info *ci = arg2;
421 	unsigned int cnt = alpha_shared_intr_q_count_handlers(intr_q);
422 
423 	KASSERT(ci == curcpu() || !mp_online);
424 
425 	ci->ci_nintrhand += cnt;
426 	KASSERT(cnt <= ci->ci_nintrhand);
427 }
428 
429 static void
alpha_shared_intr_unset_cpu_xcall(void * arg1,void * arg2)430 alpha_shared_intr_unset_cpu_xcall(void *arg1, void *arg2)
431 {
432 	struct alpha_shared_intr *intr_q = arg1;
433 	struct cpu_info *ci = arg2;
434 	unsigned int cnt = alpha_shared_intr_q_count_handlers(intr_q);
435 
436 	KASSERT(ci == curcpu() || !mp_online);
437 
438 	KASSERT(cnt <= ci->ci_nintrhand);
439 	ci->ci_nintrhand -= cnt;
440 }
441 
442 void
alpha_shared_intr_set_cpu(struct alpha_shared_intr * intr,unsigned int num,struct cpu_info * ci)443 alpha_shared_intr_set_cpu(struct alpha_shared_intr *intr, unsigned int num,
444     struct cpu_info *ci)
445 {
446 	struct cpu_info *old_ci;
447 
448 	KASSERT(mutex_owned(&cpu_lock));
449 
450 	old_ci = intr[num].intr_cpu;
451 	intr[num].intr_cpu = ci;
452 
453 	if (old_ci != NULL && old_ci != ci) {
454 		kpreempt_disable();
455 
456 		if (ci == curcpu() || !mp_online) {
457 			alpha_shared_intr_set_cpu_xcall(&intr[num], ci);
458 		} else {
459 			uint64_t where = xc_unicast(XC_HIGHPRI,
460 			    alpha_shared_intr_set_cpu_xcall, &intr[num],
461 			    ci, ci);
462 			xc_wait(where);
463 		}
464 
465 		if (old_ci == curcpu() || !mp_online) {
466 			alpha_shared_intr_unset_cpu_xcall(&intr[num], old_ci);
467 		} else {
468 			uint64_t where = xc_unicast(XC_HIGHPRI,
469 			    alpha_shared_intr_unset_cpu_xcall, &intr[num],
470 			    old_ci, old_ci);
471 			xc_wait(where);
472 		}
473 
474 		kpreempt_enable();
475 	}
476 }
477 
478 struct cpu_info *
alpha_shared_intr_get_cpu(struct alpha_shared_intr * intr,unsigned int num)479 alpha_shared_intr_get_cpu(struct alpha_shared_intr *intr, unsigned int num)
480 {
481 
482 	return (intr[num].intr_cpu);
483 }
484 
485 struct evcnt *
alpha_shared_intr_evcnt(struct alpha_shared_intr * intr,unsigned int num)486 alpha_shared_intr_evcnt(struct alpha_shared_intr *intr,
487     unsigned int num)
488 {
489 
490 	return (&intr[num].intr_evcnt);
491 }
492 
493 void
alpha_shared_intr_set_string(struct alpha_shared_intr * intr,unsigned int num,char * str)494 alpha_shared_intr_set_string(struct alpha_shared_intr *intr,
495     unsigned int num, char *str)
496 {
497 	char *ostr = intr[num].intr_string;
498 	intr[num].intr_string = str;
499 	kmem_strfree(ostr);
500 }
501 
502 const char *
alpha_shared_intr_string(struct alpha_shared_intr * intr,unsigned int num)503 alpha_shared_intr_string(struct alpha_shared_intr *intr,
504     unsigned int num)
505 {
506 
507 	return (intr[num].intr_string);
508 }
509