xref: /openbsd/sys/arch/i386/include/intr.h (revision 73471bf0)
1 /*	$OpenBSD: intr.h,v 1.49 2021/12/14 18:16:14 deraadt Exp $	*/
2 /*	$NetBSD: intr.h,v 1.5 1996/05/13 06:11:28 mycroft Exp $	*/
3 
4 /*
5  * Copyright (c) 1996 Charles M. Hannum.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Charles M. Hannum.
18  * 4. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #ifndef _MACHINE_INTR_H_
34 #define _MACHINE_INTR_H_
35 
36 #include <machine/intrdefs.h>
37 
38 #ifndef _LOCORE
39 #include <sys/mutex.h>
40 #include <machine/cpu.h>
41 
42 extern volatile u_int32_t lapic_tpr;	/* Current interrupt priority level. */
43 
44 extern int imask[];	/* Bitmasks telling what interrupts are blocked. */
45 extern int iunmask[];	/* Bitmasks telling what interrupts are accepted. */
46 
47 #define IMASK(level) imask[IPL(level)]
48 #define IUNMASK(level) iunmask[IPL(level)]
49 
50 extern void Xspllower(void);
51 
52 extern int splraise(int);
53 extern int spllower(int);
54 extern void splx(int);
55 extern void softintr(int);
56 
57 /*
58  * compiler barrier: prevent reordering of instructions.
59  * This prevents the compiler from reordering code around
60  * this "instruction", acting as a sequence point for code generation.
61  */
62 
63 #define	__splbarrier() __asm volatile("":::"memory")
64 
65 /* SPL asserts */
66 #ifdef DIAGNOSTIC
67 /*
68  * Although this function is implemented in MI code, it must be in this MD
69  * header because we don't want this header to include MI includes.
70  */
71 void splassert_fail(int, int, const char *);
72 extern int splassert_ctl;
73 void splassert_check(int, const char *);
74 #define splassert(__wantipl) do {			\
75 	if (splassert_ctl > 0) {			\
76 		splassert_check(__wantipl, __func__);	\
77 	}						\
78 } while (0)
79 #define splsoftassert(wantipl) splassert(wantipl)
80 #else
81 #define splassert(wantipl)	do { /* nada */ } while (0)
82 #define splsoftassert(wantipl)	do { /* nada */ } while (0)
83 #endif
84 
85 /*
86  * Define the splraise and splx code in macros, so that the code can be
87  * reused in a profiling build in a way that does not cause recursion.
88  */
89 #define _SPLRAISE(ocpl, ncpl) 		\
90 	ocpl = lapic_tpr;		\
91 	if (ncpl > ocpl)		\
92 		lapic_tpr = ncpl
93 
94 
95 #define _SPLX(ncpl) 			\
96 	lapic_tpr = ncpl;		\
97 	if (curcpu()->ci_ipending & IUNMASK(ncpl))	\
98 		Xspllower()
99 
100 /*
101  * Hardware interrupt masks
102  */
103 #define	splbio()	splraise(IPL_BIO)
104 #define	splnet()	splraise(IPL_NET)
105 #define	spltty()	splraise(IPL_TTY)
106 #define	splaudio()	splraise(IPL_AUDIO)
107 #define	splclock()	splraise(IPL_CLOCK)
108 #define	splstatclock()	splclock()
109 #define splipi()	splraise(IPL_IPI)
110 
111 /*
112  * Software interrupt masks
113  */
114 #define	splsoftclock()		splraise(IPL_SOFTCLOCK)
115 #define	splsoftnet()		splraise(IPL_SOFTNET)
116 #define	splsofttty()		splraise(IPL_SOFTTTY)
117 
118 /*
119  * Miscellaneous
120  */
121 #define	splvm()		splraise(IPL_VM)
122 #define	splhigh()	splraise(IPL_HIGH)
123 #define	splsched()	splraise(IPL_SCHED)
124 #define	spl0()		spllower(IPL_NONE)
125 
126 #include <machine/pic.h>
127 
128 struct cpu_info;
129 
130 void intr_barrier(void *);
131 
132 #ifdef MULTIPROCESSOR
133 void i386_send_ipi(struct cpu_info *, int);
134 int i386_fast_ipi(struct cpu_info *, int);
135 void i386_broadcast_ipi(int);
136 void i386_ipi_handler(void);
137 void i386_setperf_ipi(struct cpu_info *);
138 
139 extern void (*ipifunc[I386_NIPI])(struct cpu_info *);
140 #endif
141 
142 #endif /* !_LOCORE */
143 
144 /*
145  * Generic software interrupt support.
146  */
147 
148 #define	I386_SOFTINTR_SOFTCLOCK		0
149 #define	I386_SOFTINTR_SOFTNET		1
150 #define	I386_SOFTINTR_SOFTTTY		2
151 #define	I386_NSOFTINTR			3
152 
153 #ifndef _LOCORE
154 #include <sys/queue.h>
155 
156 struct i386_soft_intrhand {
157 	TAILQ_ENTRY(i386_soft_intrhand)
158 		sih_q;
159 	struct i386_soft_intr *sih_intrhead;
160 	void	(*sih_fn)(void *);
161 	void	*sih_arg;
162 	int	sih_pending;
163 };
164 
165 struct i386_soft_intr {
166 	TAILQ_HEAD(, i386_soft_intrhand)
167 			softintr_q;
168 	int		softintr_ssir;
169 	struct mutex	softintr_lock;
170 };
171 
172 void	*softintr_establish(int, void (*)(void *), void *);
173 void	softintr_disestablish(void *);
174 void	softintr_init(void);
175 void	softintr_dispatch(int);
176 
177 #define	softintr_schedule(arg)						\
178 do {									\
179 	struct i386_soft_intrhand *__sih = (arg);			\
180 	struct i386_soft_intr *__si = __sih->sih_intrhead;		\
181 									\
182 	mtx_enter(&__si->softintr_lock);				\
183 	if (__sih->sih_pending == 0) {					\
184 		TAILQ_INSERT_TAIL(&__si->softintr_q, __sih, sih_q);	\
185 		__sih->sih_pending = 1;					\
186 		softintr(__si->softintr_ssir);				\
187 	}								\
188 	mtx_leave(&__si->softintr_lock);				\
189 } while (/*CONSTCOND*/ 0)
190 #endif /* _LOCORE */
191 
192 #endif /* !_MACHINE_INTR_H_ */
193