xref: /openbsd/sys/arch/sparc64/sparc64/intr.c (revision 7f757eb4)
1 /*	$OpenBSD: intr.c,v 1.68 2024/11/08 08:44:07 miod Exp $	*/
2 /*	$NetBSD: intr.c,v 1.39 2001/07/19 23:38:11 eeh Exp $ */
3 
4 /*
5  * Copyright (c) 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This software was developed by the Computer Systems Engineering group
9  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
10  * contributed to Berkeley.
11  *
12  * All advertising materials mentioning features or use of this software
13  * must display the following acknowledgement:
14  *	This product includes software developed by the University of
15  *	California, Lawrence Berkeley Laboratory.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT OT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	@(#)intr.c	8.3 (Berkeley) 11/11/93
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 
49 #include <dev/cons.h>
50 
51 #include <machine/atomic.h>
52 #include <machine/cpu.h>
53 #include <machine/ctlreg.h>
54 #include <machine/instr.h>
55 #include <machine/trap.h>
56 
57 /* Grab interrupt map stuff (what is it doing there???) */
58 #include <sparc64/dev/iommureg.h>
59 
60 /*
61  * The following array is to used by locore.s to map interrupt packets
62  * to the proper IPL to send ourselves a softint.  It should be filled
63  * in as the devices are probed.  We should eventually change this to a
64  * vector table and call these things directly.
65  */
66 struct intrhand *intrlev[MAXINTNUM];
67 
68 #define INTR_DEVINO	0x8000
69 
70 int	intr_handler(struct trapframe *, struct intrhand *);
71 int	intr_list_handler(void *);
72 void	intr_ack(struct intrhand *);
73 
74 int
intr_handler(struct trapframe * tf,struct intrhand * ih)75 intr_handler(struct trapframe *tf, struct intrhand *ih)
76 {
77 	struct cpu_info *ci = curcpu();
78 	int rc;
79 #ifdef MULTIPROCESSOR
80 	int need_lock;
81 
82 	if (ih->ih_mpsafe)
83 		need_lock = 0;
84 	else
85 		need_lock = tf->tf_pil < PIL_SCHED && tf->tf_pil != PIL_CLOCK;
86 
87 	if (need_lock)
88 		KERNEL_LOCK();
89 #endif
90 	ci->ci_idepth++;
91 	rc = (*ih->ih_fun)(ih->ih_arg ? ih->ih_arg : tf);
92 	ci->ci_idepth--;
93 #ifdef MULTIPROCESSOR
94 	if (need_lock)
95 		KERNEL_UNLOCK();
96 #endif
97 	return rc;
98 }
99 
100 /*
101  * PCI devices can share interrupts so we need to have
102  * a handler to hand out interrupts.
103  */
104 int
intr_list_handler(void * arg)105 intr_list_handler(void *arg)
106 {
107 	struct cpu_info *ci = curcpu();
108 	struct intrhand *ih = arg;
109 	int claimed = 0, rv, ipl = ci->ci_handled_intr_level;
110 
111 	while (ih) {
112 		sparc_wrpr(pil, ih->ih_pil, 0);
113 		ci->ci_handled_intr_level = ih->ih_pil;
114 
115 		rv = ih->ih_fun(ih->ih_arg);
116 		if (rv) {
117 			ih->ih_count.ec_count++;
118 			claimed = 1;
119 			if (rv == 1)
120 				break;
121 		}
122 
123 		ih = ih->ih_next;
124 	}
125 	sparc_wrpr(pil, ipl, 0);
126 	ci->ci_handled_intr_level = ipl;
127 
128 	return (claimed);
129 }
130 
131 void
intr_ack(struct intrhand * ih)132 intr_ack(struct intrhand *ih)
133 {
134 	*ih->ih_clr = INTCLR_IDLE;
135 }
136 
137 /*
138  * Attach an interrupt handler to the vector chain.
139  */
140 void
intr_establish(struct intrhand * ih)141 intr_establish(struct intrhand *ih)
142 {
143 	struct intrhand *q;
144 	u_int64_t m, id;
145 	int s;
146 
147 	s = splhigh();
148 
149 	ih->ih_pending = NULL;
150 	ih->ih_next = NULL;
151 	if (ih->ih_cpu == NULL)
152 		ih->ih_cpu = curcpu();
153 	else if (!ih->ih_mpsafe) {
154 		panic("non-mpsafe interrupt \"%s\" "
155 		    "established on a specific cpu", ih->ih_name);
156 	}
157 	if (ih->ih_clr)
158 		ih->ih_ack = intr_ack;
159 	else
160 		ih->ih_ack = NULL;
161 
162 	if (strlen(ih->ih_name) == 0)
163 		evcount_attach(&ih->ih_count, "unknown", NULL);
164 	else
165 		evcount_attach(&ih->ih_count, ih->ih_name, NULL);
166 
167 	if (ih->ih_number & INTR_DEVINO) {
168 		splx(s);
169 		return;
170 	}
171 
172 	/*
173 	 * Store in fast lookup table
174 	 */
175 	if (ih->ih_number <= 0 || ih->ih_number >= MAXINTNUM)
176 		panic("intr_establish: bad intr number %x", ih->ih_number);
177 
178 	q = intrlev[ih->ih_number];
179 	if (q == NULL) {
180 		/* No interrupt already there, just put handler in place. */
181 		intrlev[ih->ih_number] = ih;
182 	} else {
183 		struct intrhand *nih, *pih;
184 		int ipl;
185 
186 		/*
187 		 * Interrupt is already there.  We need to create a
188 		 * new interrupt handler and interpose it.
189 		 */
190 #ifdef DEBUG
191 		printf("intr_establish: intr reused %x\n", ih->ih_number);
192 #endif
193 		if (q->ih_fun != intr_list_handler) {
194 			nih = malloc(sizeof(struct intrhand),
195 			    M_DEVBUF, M_NOWAIT | M_ZERO);
196 			if (nih == NULL)
197 				panic("intr_establish");
198 
199 			nih->ih_fun = intr_list_handler;
200 			nih->ih_arg = q;
201 			nih->ih_number = q->ih_number;
202 			nih->ih_pil = min(q->ih_pil, ih->ih_pil);
203 			nih->ih_map = q->ih_map;
204 			nih->ih_clr = q->ih_clr;
205 			nih->ih_ack = q->ih_ack;
206 			q->ih_ack = NULL;
207 			nih->ih_bus = q->ih_bus;
208 			nih->ih_cpu = q->ih_cpu;
209 
210 			intrlev[ih->ih_number] = q = nih;
211 		} else
212 			q->ih_pil = min(q->ih_pil, ih->ih_pil);
213 
214 		ih->ih_ack = NULL;
215 
216 		/* Add ih to list in priority order. */
217 		pih = q;
218 		nih = pih->ih_arg;
219 		ipl = nih->ih_pil;
220 		while (nih && ih->ih_pil <= nih->ih_pil) {
221 			ipl = nih->ih_pil;
222 			pih = nih;
223 			nih = nih->ih_next;
224 		}
225 #if DEBUG
226 		printf("intr_establish: inserting pri %i after %i\n",
227 		    ih->ih_pil, ipl);
228 #endif
229 		if (pih == q) {
230 			ih->ih_next = pih->ih_arg;
231 			pih->ih_arg = ih;
232 		} else {
233 			ih->ih_next = pih->ih_next;
234 			pih->ih_next = ih;
235 		}
236 	}
237 
238 	if (ih->ih_clr != NULL)			/* Set interrupt to idle */
239 		*ih->ih_clr = INTCLR_IDLE;
240 
241 	if (ih->ih_map) {
242 		id = ih->ih_cpu->ci_upaid;
243 		m = *ih->ih_map;
244 		if (INTTID(m) != id) {
245 #ifdef DEBUG
246 			printf("\nintr_establish: changing map 0x%llx -> ", m);
247 #endif
248 			m = (m & ~INTMAP_TID) | (id << INTTID_SHIFT);
249 #ifdef DEBUG
250 			printf("0x%llx (id=%llx) ", m, id);
251 #endif
252 		}
253 		m |= INTMAP_V;
254 		*ih->ih_map = m;
255 	}
256 
257 #ifdef DEBUG
258 	printf("\nintr_establish: vector %x pil %x mapintr %p "
259 	    "clrintr %p fun %p arg %p target %d",
260 	    ih->ih_number, ih->ih_pil, (void *)ih->ih_map,
261 	    (void *)ih->ih_clr, (void *)ih->ih_fun,
262 	    (void *)ih->ih_arg, (int)(ih->ih_map ? INTTID(*ih->ih_map) : -1));
263 #endif
264 
265 	splx(s);
266 }
267 
268 int
splraise(int ipl)269 splraise(int ipl)
270 {
271 	KASSERT(ipl >= IPL_NONE);
272 	return (_splraise(ipl));
273 }
274 
275 void
intr_barrier(void * cookie)276 intr_barrier(void *cookie)
277 {
278 	struct intrhand *ih = cookie;
279 
280 	sched_barrier(ih->ih_cpu);
281 }
282 
283 void *
softintr_establish(int level,void (* fun)(void *),void * arg)284 softintr_establish(int level, void (*fun)(void *), void *arg)
285 {
286 	struct intrhand *ih;
287 
288 	if (level == IPL_TTY)
289 		level = IPL_SOFTTTY;
290 
291 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK | M_ZERO);
292 	ih->ih_fun = (int (*)(void *))fun;	/* XXX */
293 	ih->ih_arg = arg;
294 	ih->ih_pil = level;
295 	ih->ih_pending = NULL;
296 	ih->ih_ack = NULL;
297 	ih->ih_clr = NULL;
298 	return (ih);
299 }
300 
301 void
softintr_disestablish(void * cookie)302 softintr_disestablish(void *cookie)
303 {
304 	struct intrhand *ih = cookie;
305 
306 	free(ih, M_DEVBUF, sizeof(*ih));
307 }
308 
309 void
softintr_schedule(void * cookie)310 softintr_schedule(void *cookie)
311 {
312 	struct intrhand *ih = cookie;
313 
314 	send_softint(ih->ih_pil, ih);
315 }
316 
317 #ifdef DIAGNOSTIC
318 void
splassert_check(int wantipl,const char * func)319 splassert_check(int wantipl, const char *func)
320 {
321 	struct cpu_info *ci = curcpu();
322 	int oldipl;
323 
324 	__asm volatile("rdpr %%pil,%0" : "=r" (oldipl));
325 
326 	if (oldipl < wantipl) {
327 		splassert_fail(wantipl, oldipl, func);
328 	}
329 
330 	if (ci->ci_handled_intr_level > wantipl) {
331 		/*
332 		 * XXX - need to show difference between what's blocked and
333 		 * what's running.
334 		 */
335 		splassert_fail(wantipl, ci->ci_handled_intr_level, func);
336 	}
337 }
338 #endif
339 
340 #ifdef SUN4V
341 
342 #include <machine/hypervisor.h>
343 
344 uint64_t sun4v_group_interrupt_major;
345 
346 int64_t
sun4v_intr_devino_to_sysino(uint64_t devhandle,uint64_t devino,uint64_t * ino)347 sun4v_intr_devino_to_sysino(uint64_t devhandle, uint64_t devino, uint64_t *ino)
348 {
349 	if (sun4v_group_interrupt_major < 3)
350 		return hv_intr_devino_to_sysino(devhandle, devino, ino);
351 
352 	KASSERT(INTVEC(devino) == devino);
353 	*ino = devino | INTR_DEVINO;
354 	return H_EOK;
355 }
356 
357 int64_t
sun4v_intr_setcookie(uint64_t devhandle,uint64_t ino,uint64_t cookie_value)358 sun4v_intr_setcookie(uint64_t devhandle, uint64_t ino, uint64_t cookie_value)
359 {
360 	if (sun4v_group_interrupt_major < 3)
361 		return H_EOK;
362 
363 	return hv_vintr_setcookie(devhandle, ino, cookie_value);
364 }
365 
366 int64_t
sun4v_intr_setenabled(uint64_t devhandle,uint64_t ino,uint64_t intr_enabled)367 sun4v_intr_setenabled(uint64_t devhandle, uint64_t ino, uint64_t intr_enabled)
368 {
369 	if (sun4v_group_interrupt_major < 3)
370 		return hv_intr_setenabled(ino, intr_enabled);
371 
372 	return hv_vintr_setenabled(devhandle, ino, intr_enabled);
373 }
374 
375 int64_t
sun4v_intr_setstate(uint64_t devhandle,uint64_t ino,uint64_t intr_state)376 sun4v_intr_setstate(uint64_t devhandle, uint64_t ino, uint64_t intr_state)
377 {
378 	if (sun4v_group_interrupt_major < 3)
379 		return hv_intr_setstate(ino, intr_state);
380 
381 	return hv_vintr_setstate(devhandle, ino, intr_state);
382 }
383 
384 int64_t
sun4v_intr_settarget(uint64_t devhandle,uint64_t ino,uint64_t cpuid)385 sun4v_intr_settarget(uint64_t devhandle, uint64_t ino, uint64_t cpuid)
386 {
387 	if (sun4v_group_interrupt_major < 3)
388 		return hv_intr_settarget(ino, cpuid);
389 
390 	return hv_vintr_settarget(devhandle, ino, cpuid);
391 }
392 
393 #endif
394