xref: /netbsd/sys/sys/mutex.h (revision c3cea132)
1 /*	$NetBSD: mutex.h,v 1.27 2023/04/12 06:35:40 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe and Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _SYS_MUTEX_H_
33 #define	_SYS_MUTEX_H_
34 
35 /*
36  * There are 2 types of mutexes:
37  *
38  *	* Adaptive -- If the lock is already held, the thread attempting
39  *	  to acquire the lock determines if the thread that holds it is
40  *	  currently running.  If so, it spins, else it sleeps.
41  *
42  *	* Spin -- If the lock is already held, the thread attempting to
43  *	  acquire the lock spins.  The IPL will be raised on entry.
44  *
45  * Machine dependent code must provide the following:
46  *
47  *	struct mutex
48  *		The actual mutex structure.  This structure is mostly
49  *		opaque to machine-independent code; most access are done
50  *		through macros.  However, machine-independent code must
51  *		be able to access the following members:
52  *
53  *		uintptr_t		mtx_owner
54  *		ipl_cookie_t		mtx_ipl
55  *		__cpu_simple_lock_t	mtx_lock
56  *
57  * If an architecture can be considered 'simple' (no interlock required in
58  * the MP case, or no MP) it need only define __HAVE_SIMPLE_MUTEXES and
59  * provide the following:
60  *
61  *	struct mutex
62  *
63  *		[additionally:]
64  *		volatile integer	mtx_id
65  *
66  * 	MUTEX_CAS(ptr, old, new)
67  *		Perform an atomic "compare and swap" operation and
68  *		evaluate to true or false according to the success
69  *
70  * Otherwise, the following must be defined:
71  *
72  *	MUTEX_INITIALIZE_SPIN(mtx, dodebug, minipl)
73  *		Initialize a spin mutex.
74  *
75  *	MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug)
76  *		Initialize an adaptive mutex.
77  *
78  *	MUTEX_DESTROY(mtx)
79  *		Tear down a mutex.
80  *
81  *	MUTEX_ADAPTIVE_P(mtx)
82  *		Evaluates to true if the mutex is an adaptive mutex.
83  *
84  *	MUTEX_SPIN_P(mtx)
85  *		Evaluates to true if the mutex is a spin mutex.
86  *
87  *	MUTEX_OWNER(owner)
88  *		Returns the owner of the adaptive mutex (LWP address).
89  *
90  *	MUTEX_OWNED(owner)
91  *		Returns non-zero if an adaptive mutex is currently
92  *		held by an LWP.
93  *
94  *	MUTEX_HAS_WAITERS(mtx)
95  *		Returns true if the mutex has waiters.
96  *
97  *	MUTEX_SET_WAITERS(mtx)
98  *		Mark the mutex has having waiters.
99  *
100  *	MUTEX_ACQUIRE(mtx, owner)
101  *		Try to acquire an adaptive mutex such that:
102  *			if (lock held OR waiters)
103  *				return 0;
104  *			else
105  *				return 1;
106  *		Must be MP/interrupt atomic.
107  *
108  *	MUTEX_RELEASE(mtx)
109  *		Release the lock and clear the "has waiters" indication.
110  *		Must be interrupt atomic, need not be MP safe.
111  *
112  *	MUTEX_DEBUG_P(mtx)
113  *		Evaluates to true if the mutex is initialized with
114  *		dodebug==true.  Only used in the LOCKDEBUG case.
115  *
116  * Machine dependent code may optionally provide stubs for the following
117  * functions to implement the easy (unlocked / no waiters) cases.  If
118  * these stubs are provided, __HAVE_MUTEX_STUBS should be defined.
119  *
120  *	mutex_enter()
121  *	mutex_exit()
122  *
123  * Two additional stubs may be implemented that handle only the spinlock
124  * case, primarily for the scheduler.  __HAVE_SPIN_MUTEX_STUBS should be
125  * defined if these are provided:
126  *
127  *	mutex_spin_enter()
128  *	mutex_spin_exit()
129  */
130 
131 #if defined(_KERNEL_OPT)
132 #include "opt_lockdebug.h"
133 #endif
134 
135 #if !defined(_KERNEL)
136 #include <sys/types.h>
137 #include <sys/inttypes.h>
138 #endif
139 
140 typedef enum kmutex_type_t {
141 	MUTEX_SPIN = 0,		/* To get a spin mutex at IPL_NONE */
142 	MUTEX_ADAPTIVE = 1,	/* For porting code written for Solaris */
143 	MUTEX_DEFAULT = 2,	/* The only native, endorsed type */
144 	MUTEX_DRIVER = 3,	/* For porting code written for Solaris */
145 	MUTEX_NODEBUG = 4	/* Disables LOCKDEBUG; use with care */
146 } kmutex_type_t;
147 
148 typedef struct kmutex kmutex_t;
149 
150 #if defined(__MUTEX_PRIVATE)
151 
152 #define	MUTEX_THREAD			((uintptr_t)-16L)
153 
154 #define	MUTEX_BIT_SPIN			0x01
155 #define	MUTEX_BIT_WAITERS		0x02
156 
157 #if defined(LOCKDEBUG)
158 #define	MUTEX_BIT_NODEBUG		0x04	/* LOCKDEBUG disabled */
159 #else
160 #define	MUTEX_BIT_NODEBUG		0x00	/* do nothing */
161 #endif	/* LOCKDEBUG */
162 
163 #define	MUTEX_SPIN_IPL(mtx)		((mtx)->mtx_ipl)
164 #define	MUTEX_SPIN_OLDSPL(ci)		((ci)->ci_mtx_oldspl)
165 
166 void	mutex_vector_enter(kmutex_t *);
167 void	mutex_vector_exit(kmutex_t *);
168 void	mutex_spin_retry(kmutex_t *);
169 void	mutex_wakeup(kmutex_t *);
170 
171 #endif	/* __MUTEX_PRIVATE */
172 
173 #ifdef _KERNEL
174 #include <sys/intr.h>
175 #endif
176 
177 #include <machine/mutex.h>
178 
179 /*
180  * Return true if no spin mutexes are held by the current CPU.
181  */
182 #ifndef MUTEX_NO_SPIN_ACTIVE_P
183 #define	MUTEX_NO_SPIN_ACTIVE_P(ci)	((ci)->ci_mtx_count == 0)
184 #endif
185 
186 #ifdef _KERNEL
187 
188 void	_mutex_init(kmutex_t *, kmutex_type_t, int, uintptr_t);
189 void	mutex_init(kmutex_t *, kmutex_type_t, int);
190 void	mutex_destroy(kmutex_t *);
191 
192 void	mutex_enter(kmutex_t *);
193 void	mutex_exit(kmutex_t *);
194 
195 void	mutex_spin_enter(kmutex_t *);
196 void	mutex_spin_exit(kmutex_t *);
197 
198 int	mutex_tryenter(kmutex_t *);
199 
200 int	mutex_owned(const kmutex_t *);
201 int	mutex_ownable(const kmutex_t *);
202 bool	mutex_owner_running(const kmutex_t *);
203 
204 void	mutex_obj_init(void);
205 kmutex_t *mutex_obj_alloc(kmutex_type_t, int);
206 void	mutex_obj_hold(kmutex_t *);
207 bool	mutex_obj_free(kmutex_t *);
208 u_int	mutex_obj_refcnt(kmutex_t *);
209 kmutex_t *mutex_obj_tryalloc(kmutex_type_t, int);
210 
211 #endif /* _KERNEL */
212 
213 #endif /* _SYS_MUTEX_H_ */
214