xref: /openbsd/sys/sys/smr.h (revision 4ead40c1)
1 /*	$OpenBSD: smr.h,v 1.9 2022/07/25 08:06:44 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 2019 Visa Hankala
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _SYS_SMR_H_
20 #define _SYS_SMR_H_
21 
22 #include <sys/queue.h>
23 
24 struct smr_entry {
25 	SIMPLEQ_ENTRY(smr_entry)	smr_list;
26 	void				(*smr_func)(void *);
27 	void				*smr_arg;
28 };
29 
30 SIMPLEQ_HEAD(smr_entry_list, smr_entry);
31 
32 #ifdef _KERNEL
33 
34 #include <sys/atomic.h>
35 
36 void	smr_startup(void);
37 void	smr_startup_thread(void);
38 void	smr_idle(void);
39 void	smr_read_enter(void);
40 void	smr_read_leave(void);
41 
42 void	smr_call_impl(struct smr_entry *, void (*)(void *), void *, int);
43 void	smr_barrier_impl(int);
44 
45 #define smr_call(entry, func, arg)	smr_call_impl(entry, func, arg, 0)
46 #define smr_barrier()			smr_barrier_impl(0)
47 #define smr_flush()			smr_barrier_impl(1)
48 
49 static inline void
smr_init(struct smr_entry * smr)50 smr_init(struct smr_entry *smr)
51 {
52 	smr->smr_func = NULL;
53 	smr->smr_arg = NULL;
54 }
55 
56 #ifdef DIAGNOSTIC
57 #define SMR_ASSERT_CRITICAL() do {					\
58 	if (panicstr == NULL && !db_active)				\
59 		KASSERT(curcpu()->ci_schedstate.spc_smrdepth > 0);	\
60 } while (0)
61 #define SMR_ASSERT_NONCRITICAL() do {					\
62 	if (panicstr == NULL && !db_active)				\
63 		KASSERT(curcpu()->ci_schedstate.spc_smrdepth == 0);	\
64 } while (0)
65 #else
66 #define SMR_ASSERT_CRITICAL()		do {} while (0)
67 #define SMR_ASSERT_NONCRITICAL()	do {} while (0)
68 #endif
69 
70 #endif /* _KERNEL */
71 
72 #define SMR_PTR_GET(pptr)		READ_ONCE(*pptr)
73 
74 #define SMR_PTR_GET_LOCKED(pptr)	(*(pptr))
75 
76 #define SMR_PTR_SET_LOCKED(pptr, val) do {				\
77 	membar_producer();						\
78 	WRITE_ONCE(*pptr, val);						\
79 } while (0)
80 
81 /*
82  * List implementations for use with safe memory reclamation.
83  */
84 
85 /*
86  * Copyright (c) 1991, 1993
87  *	The Regents of the University of California.  All rights reserved.
88  *
89  * Redistribution and use in source and binary forms, with or without
90  * modification, are permitted provided that the following conditions
91  * are met:
92  * 1. Redistributions of source code must retain the above copyright
93  *    notice, this list of conditions and the following disclaimer.
94  * 2. Redistributions in binary form must reproduce the above copyright
95  *    notice, this list of conditions and the following disclaimer in the
96  *    documentation and/or other materials provided with the distribution.
97  * 3. Neither the name of the University nor the names of its contributors
98  *    may be used to endorse or promote products derived from this software
99  *    without specific prior written permission.
100  *
101  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
102  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
103  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
104  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
105  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
106  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
107  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
108  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
109  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
110  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
111  * SUCH DAMAGE.
112  *
113  *	@(#)queue.h	8.5 (Berkeley) 8/20/94
114  */
115 
116 #include <sys/_null.h>
117 
118 /*
119  * This file defines three types of data structures: singly-linked lists,
120  * lists, and tail queues.
121  *
122  *
123  * A singly-linked list is headed by a single forward pointer. The elements
124  * are singly linked for minimum space and pointer manipulation overhead at
125  * the expense of O(n) removal for arbitrary elements. New elements can be
126  * added to the list after an existing element or at the head of the list.
127  * Elements being removed from the head of the list should use the explicit
128  * macro for this purpose for optimum efficiency. A singly-linked list may
129  * only be traversed in the forward direction.  Singly-linked lists are ideal
130  * for applications with large datasets and few or no removals or for
131  * implementing a LIFO queue.
132  *
133  * A list is headed by a single forward pointer (or an array of forward
134  * pointers for a hash table header). The elements are doubly linked
135  * so that an arbitrary element can be removed without a need to
136  * traverse the list. New elements can be added to the list before
137  * or after an existing element or at the head of the list. A list
138  * may only be traversed in the forward direction.
139  *
140  * A tail queue is headed by a pair of pointers, one to the head of the
141  * list and the other to the tail of the list. The elements are doubly
142  * linked so that an arbitrary element can be removed without a need to
143  * traverse the list. New elements can be added to the list before or
144  * after an existing element, at the head of the list, or at the end of
145  * the list. A tail queue may only be traversed in the forward direction
146  * by lock-free readers.
147  */
148 
149 /*
150  * Singly-linked List definitions.
151  */
152 #define SMR_SLIST_HEAD(name, type)					\
153 struct name {								\
154 	struct type *smr_slh_first;	/* first element, SMR-protected */\
155 }
156 
157 #define	SMR_SLIST_HEAD_INITIALIZER(head)				\
158 	{ .smr_slh_first = NULL }
159 
160 #define SMR_SLIST_ENTRY(type)						\
161 struct {								\
162 	struct type *smr_sle_next;	/* next element, SMR-protected */\
163 }
164 
165 /*
166  * Singly-linked List access methods.
167  */
168 #define	SMR_SLIST_END(head)	NULL
169 
170 #define	SMR_SLIST_FIRST(head) \
171 	SMR_PTR_GET(&(head)->smr_slh_first)
172 #define	SMR_SLIST_NEXT(elm, field) \
173 	SMR_PTR_GET(&(elm)->field.smr_sle_next)
174 
175 #define	SMR_SLIST_FIRST_LOCKED(head) \
176 	SMR_PTR_GET_LOCKED(&(head)->smr_slh_first)
177 #define	SMR_SLIST_EMPTY_LOCKED(head) \
178 	(SMR_SLIST_FIRST_LOCKED(head) == SMR_SLIST_END(head))
179 #define	SMR_SLIST_NEXT_LOCKED(elm, field) \
180 	SMR_PTR_GET_LOCKED(&(elm)->field.smr_sle_next)
181 
182 #define	SMR_SLIST_FOREACH(var, head, field)				\
183 	for ((var) = SMR_SLIST_FIRST(head);				\
184 	    (var) != SMR_SLIST_END(head);				\
185 	    (var) = SMR_SLIST_NEXT(var, field))
186 
187 #define	SMR_SLIST_FOREACH_LOCKED(var, head, field)			\
188 	for ((var) = SMR_SLIST_FIRST_LOCKED(head);			\
189 	    (var) != SMR_SLIST_END(head);				\
190 	    (var) = SMR_SLIST_NEXT_LOCKED(var, field))
191 
192 #define	SMR_SLIST_FOREACH_SAFE_LOCKED(var, head, field, tvar)		\
193 	for ((var) = SMR_SLIST_FIRST_LOCKED(head);			\
194 	    (var) && ((tvar) = SMR_SLIST_NEXT_LOCKED(var, field), 1);	\
195 	    (var) = (tvar))
196 
197 /*
198  * Singly-linked List functions.
199  */
200 #define	SMR_SLIST_INIT(head) do {					\
201 	(head)->smr_slh_first = SMR_SLIST_END(head);			\
202 } while (0)
203 
204 #define	SMR_SLIST_INSERT_AFTER_LOCKED(slistelm, elm, field) do {	\
205 	(elm)->field.smr_sle_next = (slistelm)->field.smr_sle_next;	\
206 	membar_producer();						\
207 	(slistelm)->field.smr_sle_next = (elm);				\
208 } while (0)
209 
210 #define	SMR_SLIST_INSERT_HEAD_LOCKED(head, elm, field) do {		\
211 	(elm)->field.smr_sle_next = (head)->smr_slh_first;		\
212 	membar_producer();						\
213 	(head)->smr_slh_first = (elm);					\
214 } while (0)
215 
216 #define	SMR_SLIST_REMOVE_AFTER_LOCKED(elm, field) do {			\
217 	(elm)->field.smr_sle_next =					\
218 	    (elm)->field.smr_sle_next->field.smr_sle_next;		\
219 } while (0)
220 
221 #define	SMR_SLIST_REMOVE_HEAD_LOCKED(head, field) do {			\
222 	(head)->smr_slh_first = (head)->smr_slh_first->field.smr_sle_next;\
223 } while (0)
224 
225 #define	SMR_SLIST_REMOVE_LOCKED(head, elm, type, field) do {		\
226 	if ((head)->smr_slh_first == (elm)) {				\
227 		SMR_SLIST_REMOVE_HEAD_LOCKED((head), field);		\
228 	} else {							\
229 		struct type *curelm = (head)->smr_slh_first;		\
230 									\
231 		while (curelm->field.smr_sle_next != (elm))		\
232 			curelm = curelm->field.smr_sle_next;		\
233 		curelm->field.smr_sle_next =				\
234 		    curelm->field.smr_sle_next->field.smr_sle_next;	\
235 	}								\
236 	/* (elm)->field.smr_sle_next must be left intact to allow	\
237 	 * any concurrent readers to proceed iteration. */		\
238 } while (0)
239 
240 /*
241  * List definitions.
242  */
243 #define	SMR_LIST_HEAD(name, type)					\
244 struct name {								\
245 	struct type *smr_lh_first;	/* first element, SMR-protected */\
246 }
247 
248 #define	SMR_LIST_HEAD_INITIALIZER(head)					\
249 	{ .smr_lh_first = NULL }
250 
251 #define	SMR_LIST_ENTRY(type)						\
252 struct {								\
253 	struct type *smr_le_next;	/* next element, SMR-protected */\
254 	struct type **smr_le_prev;	/* address of previous next element */\
255 }
256 
257 /*
258  * List access methods.
259  */
260 #define	SMR_LIST_END(head)	NULL
261 
262 #define	SMR_LIST_FIRST(head) \
263 	SMR_PTR_GET(&(head)->smr_lh_first)
264 #define	SMR_LIST_NEXT(elm, field) \
265 	SMR_PTR_GET(&(elm)->field.smr_le_next)
266 
267 #define	SMR_LIST_FIRST_LOCKED(head)		((head)->smr_lh_first)
268 #define	SMR_LIST_NEXT_LOCKED(elm, field)	((elm)->field.smr_le_next)
269 #define	SMR_LIST_EMPTY_LOCKED(head) \
270 	(SMR_LIST_FIRST_LOCKED(head) == SMR_LIST_END(head))
271 
272 #define	SMR_LIST_FOREACH(var, head, field)				\
273 	for((var) = SMR_LIST_FIRST(head);				\
274 	    (var)!= SMR_LIST_END(head);					\
275 	    (var) = SMR_LIST_NEXT(var, field))
276 
277 #define	SMR_LIST_FOREACH_LOCKED(var, head, field)			\
278 	for((var) = SMR_LIST_FIRST_LOCKED(head);			\
279 	    (var)!= SMR_LIST_END(head);					\
280 	    (var) = SMR_LIST_NEXT_LOCKED(var, field))
281 
282 #define	SMR_LIST_FOREACH_SAFE_LOCKED(var, head, field, tvar)		\
283 	for ((var) = SMR_LIST_FIRST_LOCKED(head);			\
284 	    (var) && ((tvar) = SMR_LIST_NEXT_LOCKED(var, field), 1);	\
285 	    (var) = (tvar))
286 
287 /*
288  * List functions.
289  */
290 #define	SMR_LIST_INIT(head) do {					\
291 	(head)->smr_lh_first = SMR_LIST_END(head);			\
292 } while (0)
293 
294 #define	SMR_LIST_INSERT_AFTER_LOCKED(listelm, elm, field) do {		\
295 	(elm)->field.smr_le_next = (listelm)->field.smr_le_next;	\
296 	if ((listelm)->field.smr_le_next != NULL)			\
297 		(listelm)->field.smr_le_next->field.smr_le_prev =	\
298 		    &(elm)->field.smr_le_next;				\
299 	(elm)->field.smr_le_prev = &(listelm)->field.smr_le_next;	\
300 	membar_producer();						\
301 	(listelm)->field.smr_le_next = (elm);				\
302 } while (0)
303 
304 #define	SMR_LIST_INSERT_BEFORE_LOCKED(listelm, elm, field) do {		\
305 	(elm)->field.smr_le_prev = (listelm)->field.smr_le_prev;	\
306 	(elm)->field.smr_le_next = (listelm);				\
307 	membar_producer();						\
308 	*(listelm)->field.smr_le_prev = (elm);				\
309 	(listelm)->field.smr_le_prev = &(elm)->field.smr_le_next;	\
310 } while (0)
311 
312 #define	SMR_LIST_INSERT_HEAD_LOCKED(head, elm, field) do {		\
313 	(elm)->field.smr_le_next = (head)->smr_lh_first;		\
314 	(elm)->field.smr_le_prev = &(head)->smr_lh_first;		\
315 	if ((head)->smr_lh_first != NULL)				\
316 		(head)->smr_lh_first->field.smr_le_prev =		\
317 		    &(elm)->field.smr_le_next;				\
318 	membar_producer();						\
319 	(head)->smr_lh_first = (elm);					\
320 } while (0)
321 
322 #define	SMR_LIST_REMOVE_LOCKED(elm, field) do {				\
323 	if ((elm)->field.smr_le_next != NULL)				\
324 		(elm)->field.smr_le_next->field.smr_le_prev =		\
325 		    (elm)->field.smr_le_prev;				\
326 	*(elm)->field.smr_le_prev = (elm)->field.smr_le_next;		\
327 	/* (elm)->field.smr_le_next must be left intact to allow	\
328 	 * any concurrent readers to proceed iteration. */		\
329 } while (0)
330 
331 /*
332  * Tail queue definitions.
333  */
334 #define	SMR_TAILQ_HEAD(name, type)					\
335 struct name {								\
336 	struct type *smr_tqh_first;	/* first element, SMR-protected */\
337 	struct type **smr_tqh_last;	/* last element */		\
338 }
339 
340 #define	SMR_TAILQ_HEAD_INITIALIZER(head)				\
341 	{ .smr_tqh_first = NULL, .smr_tqh_last = &(head).smr_tqh_first }
342 
343 #define	SMR_TAILQ_ENTRY(type)						\
344 struct {								\
345 	struct type *smr_tqe_next;	/* next element, SMR-protected */\
346 	struct type **smr_tqe_prev;	/* address of previous next element */\
347 }
348 
349 /*
350  * Tail queue access methods.
351  */
352 #define	SMR_TAILQ_END(head)	NULL
353 
354 #define	SMR_TAILQ_FIRST(head) \
355 	SMR_PTR_GET(&(head)->smr_tqh_first)
356 #define	SMR_TAILQ_NEXT(elm, field) \
357 	SMR_PTR_GET(&(elm)->field.smr_tqe_next)
358 
359 #define	SMR_TAILQ_FIRST_LOCKED(head)		((head)->smr_tqh_first)
360 #define	SMR_TAILQ_NEXT_LOCKED(elm, field)	((elm)->field.smr_tqe_next)
361 #define	SMR_TAILQ_LAST_LOCKED(head, headname) \
362 	(*(((struct headname *)((head)->smr_tqh_last))->smr_tqh_last))
363 #define	SMR_TAILQ_EMPTY_LOCKED(head) \
364 	(SMR_TAILQ_FIRST_LOCKED(head) == SMR_TAILQ_END(head))
365 
366 #define	SMR_TAILQ_FOREACH(var, head, field)				\
367 	for((var) = SMR_TAILQ_FIRST(head);				\
368 	    (var)!= SMR_TAILQ_END(head);				\
369 	    (var) = SMR_TAILQ_NEXT(var, field))
370 
371 #define	SMR_TAILQ_FOREACH_LOCKED(var, head, field)			\
372 	for((var) = SMR_TAILQ_FIRST_LOCKED(head);			\
373 	    (var)!= SMR_TAILQ_END(head);				\
374 	    (var) = SMR_TAILQ_NEXT_LOCKED(var, field))
375 
376 #define	SMR_TAILQ_FOREACH_SAFE_LOCKED(var, head, field, tvar)		\
377 	for ((var) = SMR_TAILQ_FIRST_LOCKED(head);			\
378 	    (var) && ((tvar) = SMR_TAILQ_NEXT_LOCKED(var, field), 1);	\
379 	    (var) = (tvar))
380 
381 /*
382  * Tail queue functions.
383  */
384 #define	SMR_TAILQ_INIT(head) do {					\
385 	(head)->smr_tqh_first = SMR_TAILQ_END(head);			\
386 	(head)->smr_tqh_last = &(head)->smr_tqh_first;			\
387 } while (0)
388 
389 #define	SMR_TAILQ_INSERT_AFTER_LOCKED(head, listelm, elm, field) do {	\
390 	(elm)->field.smr_tqe_next = (listelm)->field.smr_tqe_next;	\
391 	if ((listelm)->field.smr_tqe_next != NULL)			\
392 		(listelm)->field.smr_tqe_next->field.smr_tqe_prev =	\
393 		    &(elm)->field.smr_tqe_next;				\
394 	else								\
395 		(head)->smr_tqh_last = &(elm)->field.smr_tqe_next;	\
396 	(elm)->field.smr_tqe_prev = &(listelm)->field.smr_tqe_next;	\
397 	membar_producer();						\
398 	(listelm)->field.smr_tqe_next = (elm);				\
399 } while (0)
400 
401 #define	SMR_TAILQ_INSERT_BEFORE_LOCKED(listelm, elm, field) do {	\
402 	(elm)->field.smr_tqe_prev = (listelm)->field.smr_tqe_prev;	\
403 	(elm)->field.smr_tqe_next = (listelm);				\
404 	membar_producer();						\
405 	*(listelm)->field.smr_tqe_prev = (elm);				\
406 	(listelm)->field.smr_tqe_prev = &(elm)->field.smr_tqe_next;	\
407 } while (0)
408 
409 #define	SMR_TAILQ_INSERT_HEAD_LOCKED(head, elm, field) do {		\
410 	(elm)->field.smr_tqe_next = (head)->smr_tqh_first;		\
411 	(elm)->field.smr_tqe_prev = &(head)->smr_tqh_first;		\
412 	if ((head)->smr_tqh_first != NULL)				\
413 		(head)->smr_tqh_first->field.smr_tqe_prev =		\
414 		    &(elm)->field.smr_tqe_next;				\
415 	else								\
416 		(head)->smr_tqh_last = &(elm)->field.smr_tqe_next;	\
417 	membar_producer();						\
418 	(head)->smr_tqh_first = (elm);					\
419 } while (0)
420 
421 #define	SMR_TAILQ_INSERT_TAIL_LOCKED(head, elm, field) do {		\
422 	(elm)->field.smr_tqe_next = NULL;				\
423 	(elm)->field.smr_tqe_prev = (head)->smr_tqh_last;		\
424 	membar_producer();						\
425 	*(head)->smr_tqh_last = (elm);					\
426 	(head)->smr_tqh_last = &(elm)->field.smr_tqe_next;		\
427 } while (0)
428 
429 #define	SMR_TAILQ_REMOVE_LOCKED(head, elm, field) do {			\
430 	if ((elm)->field.smr_tqe_next != NULL)				\
431 		(elm)->field.smr_tqe_next->field.smr_tqe_prev =		\
432 		    (elm)->field.smr_tqe_prev;				\
433 	else								\
434 		(head)->smr_tqh_last = (elm)->field.smr_tqe_prev;	\
435 	*(elm)->field.smr_tqe_prev = (elm)->field.smr_tqe_next;		\
436 	/* (elm)->field.smr_tqe_next must be left intact to allow	\
437 	 * any concurrent readers to proceed iteration. */		\
438 } while (0)
439 
440 #endif /* !_SYS_SMR_ */
441