xref: /openbsd/sys/sys/mutex.h (revision 52ac02e4)
1 /*	$OpenBSD: mutex.h,v 1.22 2024/05/16 09:30:03 kettenis Exp $	*/
2 
3 /*
4  * Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _SYS_MUTEX_H_
20 #define _SYS_MUTEX_H_
21 
22 /*
23  * A mutex is:
24  *  - owned by a cpu.
25  *  - non-recursive.
26  *  - spinning.
27  *  - not providing mutual exclusion between processes, only cpus.
28  *  - providing interrupt blocking when necessary.
29  *
30  * Different mutexes can be nested, but not interleaved. This is ok:
31  * "mtx_enter(foo); mtx_enter(bar); mtx_leave(bar); mtx_leave(foo);"
32  * This is _not_ ok:
33  * "mtx_enter(foo); mtx_enter(bar); mtx_leave(foo); mtx_leave(bar);"
34  */
35 
36 /*
37  * To prevent lock ordering problems with the kernel lock, we need to
38  * make sure we block all interrupts that can grab the kernel lock.
39  * The simplest way to achieve this is to make sure mutexes always
40  * raise the interrupt priority level to the highest level that has
41  * interrupts that grab the kernel lock.
42  */
43 #ifdef MULTIPROCESSOR
44 #define __MUTEX_IPL(ipl) \
45 	(((ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl))
46 #else
47 #define __MUTEX_IPL(ipl) (ipl)
48 #endif
49 
50 #include <machine/mutex.h>
51 
52 #ifdef __USE_MI_MUTEX
53 
54 #include <sys/_lock.h>
55 
56 struct mutex {
57 	void *volatile mtx_owner;
58 	int mtx_wantipl;
59 	int mtx_oldipl;
60 #ifdef WITNESS
61 	struct lock_object mtx_lock_obj;
62 #endif
63 };
64 
65 #ifdef WITNESS
66 #define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
67 	{ NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) }
68 #else
69 #define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
70 	{ NULL, __MUTEX_IPL((ipl)), IPL_NONE }
71 #endif
72 
73 void __mtx_init(struct mutex *, int);
74 #define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl)))
75 
76 #ifdef DIAGNOSTIC
77 #define MUTEX_ASSERT_LOCKED(mtx) do {					\
78 	if (((mtx)->mtx_owner != curcpu()) && !(panicstr || db_active))	\
79 		panic("mutex %p not held in %s", (mtx), __func__);	\
80 } while (0)
81 
82 #define MUTEX_ASSERT_UNLOCKED(mtx) do {					\
83 	if (((mtx)->mtx_owner == curcpu()) && !(panicstr || db_active))	\
84 		panic("mutex %p held in %s", (mtx), __func__);		\
85 } while (0)
86 #else
87 #define MUTEX_ASSERT_LOCKED(mtx) do { (void)(mtx); } while (0)
88 #define MUTEX_ASSERT_UNLOCKED(mtx) do { (void)(mtx); } while (0)
89 #endif
90 
91 #define MUTEX_LOCK_OBJECT(mtx)	(&(mtx)->mtx_lock_obj)
92 #define MUTEX_OLDIPL(mtx)	(mtx)->mtx_oldipl
93 
94 #endif	/* __USE_MI_MUTEX */
95 
96 
97 #define MTX_LO_FLAGS(flags) \
98 	((!((flags) & MTX_NOWITNESS) ? LO_WITNESS : 0) | \
99 	 ((flags) & MTX_DUPOK ? LO_DUPOK : 0) | \
100 	 LO_INITIALIZED | (LO_CLASS_MUTEX << LO_CLASSSHIFT))
101 
102 #define __MTX_STRING(x) #x
103 #define __MTX_S(x) __MTX_STRING(x)
104 #define __MTX_NAME __FILE__ ":" __MTX_S(__LINE__)
105 
106 #define MTX_LO_INITIALIZER(name, flags) \
107 	{ .lo_type = &(const struct lock_type){ .lt_name = __MTX_NAME }, \
108 	  .lo_name = (name), \
109 	  .lo_flags = MTX_LO_FLAGS(flags) }
110 
111 #define MTX_NOWITNESS	0x01
112 #define MTX_DUPOK	0x02
113 
114 #define MUTEX_INITIALIZER(ipl) \
115 	MUTEX_INITIALIZER_FLAGS(ipl, __MTX_NAME, 0)
116 
117 /*
118  * Some architectures need to do magic for the ipl, so they need a macro.
119  */
120 #ifndef _mtx_init
121 void _mtx_init(struct mutex *, int);
122 #endif
123 
124 void	mtx_enter(struct mutex *);
125 int	mtx_enter_try(struct mutex *);
126 void	mtx_leave(struct mutex *);
127 
128 #define mtx_init(m, ipl)	mtx_init_flags(m, ipl, NULL, 0)
129 
130 #define mtx_owned(mtx) \
131 	(((mtx)->mtx_owner == curcpu()) || panicstr || db_active)
132 
133 #ifdef WITNESS
134 
135 void	_mtx_init_flags(struct mutex *, int, const char *, int,
136 	    const struct lock_type *);
137 
138 #define mtx_init_flags(m, ipl, name, flags) do {			\
139 	static const struct lock_type __lock_type = { .lt_name = #m };	\
140 	_mtx_init_flags(m, ipl, name, flags, &__lock_type);		\
141 } while (0)
142 
143 #else /* WITNESS */
144 
145 #define mtx_init_flags(m, ipl, name, flags) do {			\
146 	(void)(name); (void)(flags);					\
147 	_mtx_init(m, ipl);						\
148 } while (0)
149 
150 #define _mtx_init_flags(m,i,n,f,t)	_mtx_init(m,i)
151 
152 #endif /* WITNESS */
153 
154 #if defined(_KERNEL) && defined(DDB)
155 
156 struct db_mutex {
157 	struct cpu_info	*mtx_owner;
158 	unsigned long	 mtx_intr_state;
159 };
160 
161 #define DB_MUTEX_INITIALIZER	{ NULL, 0 }
162 
163 void	db_mtx_enter(struct db_mutex *);
164 void	db_mtx_leave(struct db_mutex *);
165 
166 #endif /* _KERNEL && DDB */
167 
168 #endif
169