xref: /minix/sys/sys/queue.h (revision 84d9c625)
1 /*	$NetBSD: queue.h,v 1.64 2013/11/27 12:24:56 joerg Exp $	*/
2 
3 /*
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)queue.h	8.5 (Berkeley) 8/20/94
32  */
33 
34 #ifndef	_SYS_QUEUE_H_
35 #define	_SYS_QUEUE_H_
36 
37 /*
38  * This file defines five types of data structures: singly-linked lists,
39  * lists, simple queues, tail queues, and circular queues.
40  *
41  * A singly-linked list is headed by a single forward pointer. The
42  * elements are singly linked for minimum space and pointer manipulation
43  * overhead at the expense of O(n) removal for arbitrary elements. New
44  * elements can be added to the list after an existing element or at the
45  * head of the list.  Elements being removed from the head of the list
46  * should use the explicit macro for this purpose for optimum
47  * efficiency. A singly-linked list may only be traversed in the forward
48  * direction.  Singly-linked lists are ideal for applications with large
49  * datasets and few or no removals or for implementing a LIFO queue.
50  *
51  * A list is headed by a single forward pointer (or an array of forward
52  * pointers for a hash table header). The elements are doubly linked
53  * so that an arbitrary element can be removed without a need to
54  * traverse the list. New elements can be added to the list before
55  * or after an existing element or at the head of the list. A list
56  * may only be traversed in the forward direction.
57  *
58  * A simple queue is headed by a pair of pointers, one the head of the
59  * list and the other to the tail of the list. The elements are singly
60  * linked to save space, so elements can only be removed from the
61  * head of the list. New elements can be added to the list after
62  * an existing element, at the head of the list, or at the end of the
63  * list. A simple queue may only be traversed in the forward direction.
64  *
65  * A tail queue is headed by a pair of pointers, one to the head of the
66  * list and the other to the tail of the list. The elements are doubly
67  * linked so that an arbitrary element can be removed without a need to
68  * traverse the list. New elements can be added to the list before or
69  * after an existing element, at the head of the list, or at the end of
70  * the list. A tail queue may be traversed in either direction.
71  *
72  * A circle queue is headed by a pair of pointers, one to the head of the
73  * list and the other to the tail of the list. The elements are doubly
74  * linked so that an arbitrary element can be removed without a need to
75  * traverse the list. New elements can be added to the list before or after
76  * an existing element, at the head of the list, or at the end of the list.
77  * A circle queue may be traversed in either direction, but has a more
78  * complex end of list detection.
79  *
80  * For details on the use of these macros, see the queue(3) manual page.
81  */
82 
83 /*
84  * Include the definition of NULL only on NetBSD because sys/null.h
85  * is not available elsewhere.  This conditional makes the header
86  * portable and it can simply be dropped verbatim into any system.
87  * The caveat is that on other systems some other header
88  * must provide NULL before the macros can be used.
89  */
90 #if defined(__NetBSD__) || defined(__minix)
91 #include <sys/null.h>
92 #endif
93 
94 /*
95  * Singly-linked List definitions.
96  */
97 #define	SLIST_HEAD(name, type)						\
98 struct name {								\
99 	struct type *slh_first;	/* first element */			\
100 }
101 
102 #define	SLIST_HEAD_INITIALIZER(head)					\
103 	{ NULL }
104 
105 #define	SLIST_ENTRY(type)						\
106 struct {								\
107 	struct type *sle_next;	/* next element */			\
108 }
109 
110 /*
111  * Singly-linked List access methods.
112  */
113 #define	SLIST_FIRST(head)	((head)->slh_first)
114 #define	SLIST_END(head)		NULL
115 #define	SLIST_EMPTY(head)	((head)->slh_first == NULL)
116 #define	SLIST_NEXT(elm, field)	((elm)->field.sle_next)
117 
118 #define	SLIST_FOREACH(var, head, field)					\
119 	for((var) = (head)->slh_first;					\
120 	    (var) != SLIST_END(head);					\
121 	    (var) = (var)->field.sle_next)
122 
123 #define	SLIST_FOREACH_SAFE(var, head, field, tvar)			\
124 	for ((var) = SLIST_FIRST((head));				\
125 	    (var) != SLIST_END(head) &&					\
126 	    ((tvar) = SLIST_NEXT((var), field), 1);			\
127 	    (var) = (tvar))
128 
129 /*
130  * Singly-linked List functions.
131  */
132 #define	SLIST_INIT(head) do {						\
133 	(head)->slh_first = SLIST_END(head);				\
134 } while (/*CONSTCOND*/0)
135 
136 #define	SLIST_INSERT_AFTER(slistelm, elm, field) do {			\
137 	(elm)->field.sle_next = (slistelm)->field.sle_next;		\
138 	(slistelm)->field.sle_next = (elm);				\
139 } while (/*CONSTCOND*/0)
140 
141 #define	SLIST_INSERT_HEAD(head, elm, field) do {			\
142 	(elm)->field.sle_next = (head)->slh_first;			\
143 	(head)->slh_first = (elm);					\
144 } while (/*CONSTCOND*/0)
145 
146 #define	SLIST_REMOVE_AFTER(slistelm, field) do {			\
147 	(slistelm)->field.sle_next =					\
148 	    SLIST_NEXT(SLIST_NEXT((slistelm), field), field);		\
149 } while (/*CONSTCOND*/0)
150 
151 #define	SLIST_REMOVE_HEAD(head, field) do {				\
152 	(head)->slh_first = (head)->slh_first->field.sle_next;		\
153 } while (/*CONSTCOND*/0)
154 
155 #define	SLIST_REMOVE(head, elm, type, field) do {			\
156 	if ((head)->slh_first == (elm)) {				\
157 		SLIST_REMOVE_HEAD((head), field);			\
158 	}								\
159 	else {								\
160 		struct type *curelm = (head)->slh_first;		\
161 		while(curelm->field.sle_next != (elm))			\
162 			curelm = curelm->field.sle_next;		\
163 		curelm->field.sle_next =				\
164 		    curelm->field.sle_next->field.sle_next;		\
165 	}								\
166 } while (/*CONSTCOND*/0)
167 
168 
169 /*
170  * List definitions.
171  */
172 #define	LIST_HEAD(name, type)						\
173 struct name {								\
174 	struct type *lh_first;	/* first element */			\
175 }
176 
177 #define	LIST_HEAD_INITIALIZER(head)					\
178 	{ NULL }
179 
180 #define	LIST_ENTRY(type)						\
181 struct {								\
182 	struct type *le_next;	/* next element */			\
183 	struct type **le_prev;	/* address of previous next element */	\
184 }
185 
186 /*
187  * List access methods.
188  */
189 #define	LIST_FIRST(head)		((head)->lh_first)
190 #define	LIST_END(head)			NULL
191 #define	LIST_EMPTY(head)		((head)->lh_first == LIST_END(head))
192 #define	LIST_NEXT(elm, field)		((elm)->field.le_next)
193 
194 #define	LIST_FOREACH(var, head, field)					\
195 	for ((var) = ((head)->lh_first);				\
196 	    (var) != LIST_END(head);					\
197 	    (var) = ((var)->field.le_next))
198 
199 #define	LIST_FOREACH_SAFE(var, head, field, tvar)			\
200 	for ((var) = LIST_FIRST((head));				\
201 	    (var) != LIST_END(head) &&					\
202 	    ((tvar) = LIST_NEXT((var), field), 1);			\
203 	    (var) = (tvar))
204 
205 /*
206  * List functions.
207  */
208 #if defined(_KERNEL) && defined(QUEUEDEBUG)
209 #define	QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)			\
210 	if ((head)->lh_first &&						\
211 	    (head)->lh_first->field.le_prev != &(head)->lh_first)	\
212 		panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
213 #define	QUEUEDEBUG_LIST_OP(elm, field)					\
214 	if ((elm)->field.le_next &&					\
215 	    (elm)->field.le_next->field.le_prev !=			\
216 	    &(elm)->field.le_next)					\
217 		panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
218 	if (*(elm)->field.le_prev != (elm))				\
219 		panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__);
220 #define	QUEUEDEBUG_LIST_POSTREMOVE(elm, field)				\
221 	(elm)->field.le_next = (void *)1L;				\
222 	(elm)->field.le_prev = (void *)1L;
223 #else
224 #define	QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
225 #define	QUEUEDEBUG_LIST_OP(elm, field)
226 #define	QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
227 #endif
228 
229 #define	LIST_INIT(head) do {						\
230 	(head)->lh_first = LIST_END(head);				\
231 } while (/*CONSTCOND*/0)
232 
233 #define	LIST_INSERT_AFTER(listelm, elm, field) do {			\
234 	QUEUEDEBUG_LIST_OP((listelm), field)				\
235 	if (((elm)->field.le_next = (listelm)->field.le_next) != 	\
236 	    LIST_END(head))						\
237 		(listelm)->field.le_next->field.le_prev =		\
238 		    &(elm)->field.le_next;				\
239 	(listelm)->field.le_next = (elm);				\
240 	(elm)->field.le_prev = &(listelm)->field.le_next;		\
241 } while (/*CONSTCOND*/0)
242 
243 #define	LIST_INSERT_BEFORE(listelm, elm, field) do {			\
244 	QUEUEDEBUG_LIST_OP((listelm), field)				\
245 	(elm)->field.le_prev = (listelm)->field.le_prev;		\
246 	(elm)->field.le_next = (listelm);				\
247 	*(listelm)->field.le_prev = (elm);				\
248 	(listelm)->field.le_prev = &(elm)->field.le_next;		\
249 } while (/*CONSTCOND*/0)
250 
251 #define	LIST_INSERT_HEAD(head, elm, field) do {				\
252 	QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field)		\
253 	if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\
254 		(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
255 	(head)->lh_first = (elm);					\
256 	(elm)->field.le_prev = &(head)->lh_first;			\
257 } while (/*CONSTCOND*/0)
258 
259 #define	LIST_REMOVE(elm, field) do {					\
260 	QUEUEDEBUG_LIST_OP((elm), field)				\
261 	if ((elm)->field.le_next != NULL)				\
262 		(elm)->field.le_next->field.le_prev = 			\
263 		    (elm)->field.le_prev;				\
264 	*(elm)->field.le_prev = (elm)->field.le_next;			\
265 	QUEUEDEBUG_LIST_POSTREMOVE((elm), field)			\
266 } while (/*CONSTCOND*/0)
267 
268 #define LIST_REPLACE(elm, elm2, field) do {				\
269 	if (((elm2)->field.le_next = (elm)->field.le_next) != NULL)	\
270 		(elm2)->field.le_next->field.le_prev =			\
271 		    &(elm2)->field.le_next;				\
272 	(elm2)->field.le_prev = (elm)->field.le_prev;			\
273 	*(elm2)->field.le_prev = (elm2);				\
274 	QUEUEDEBUG_LIST_POSTREMOVE((elm), field)			\
275 } while (/*CONSTCOND*/0)
276 
277 /*
278  * Simple queue definitions.
279  */
280 #define	SIMPLEQ_HEAD(name, type)					\
281 struct name {								\
282 	struct type *sqh_first;	/* first element */			\
283 	struct type **sqh_last;	/* addr of last next element */		\
284 }
285 
286 #define	SIMPLEQ_HEAD_INITIALIZER(head)					\
287 	{ NULL, &(head).sqh_first }
288 
289 #define	SIMPLEQ_ENTRY(type)						\
290 struct {								\
291 	struct type *sqe_next;	/* next element */			\
292 }
293 
294 /*
295  * Simple queue access methods.
296  */
297 #define	SIMPLEQ_FIRST(head)		((head)->sqh_first)
298 #define	SIMPLEQ_END(head)		NULL
299 #define	SIMPLEQ_EMPTY(head)		((head)->sqh_first == SIMPLEQ_END(head))
300 #define	SIMPLEQ_NEXT(elm, field)	((elm)->field.sqe_next)
301 
302 #define	SIMPLEQ_FOREACH(var, head, field)				\
303 	for ((var) = ((head)->sqh_first);				\
304 	    (var) != SIMPLEQ_END(head);					\
305 	    (var) = ((var)->field.sqe_next))
306 
307 #define	SIMPLEQ_FOREACH_SAFE(var, head, field, next)			\
308 	for ((var) = ((head)->sqh_first);				\
309 	    (var) != SIMPLEQ_END(head) &&				\
310 	    ((next = ((var)->field.sqe_next)), 1);			\
311 	    (var) = (next))
312 
313 /*
314  * Simple queue functions.
315  */
316 #define	SIMPLEQ_INIT(head) do {						\
317 	(head)->sqh_first = NULL;					\
318 	(head)->sqh_last = &(head)->sqh_first;				\
319 } while (/*CONSTCOND*/0)
320 
321 #define	SIMPLEQ_INSERT_HEAD(head, elm, field) do {			\
322 	if (((elm)->field.sqe_next = (head)->sqh_first) == NULL)	\
323 		(head)->sqh_last = &(elm)->field.sqe_next;		\
324 	(head)->sqh_first = (elm);					\
325 } while (/*CONSTCOND*/0)
326 
327 #define	SIMPLEQ_INSERT_TAIL(head, elm, field) do {			\
328 	(elm)->field.sqe_next = NULL;					\
329 	*(head)->sqh_last = (elm);					\
330 	(head)->sqh_last = &(elm)->field.sqe_next;			\
331 } while (/*CONSTCOND*/0)
332 
333 #define	SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
334 	if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
335 		(head)->sqh_last = &(elm)->field.sqe_next;		\
336 	(listelm)->field.sqe_next = (elm);				\
337 } while (/*CONSTCOND*/0)
338 
339 #define	SIMPLEQ_REMOVE_HEAD(head, field) do {				\
340 	if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
341 		(head)->sqh_last = &(head)->sqh_first;			\
342 } while (/*CONSTCOND*/0)
343 
344 #define SIMPLEQ_REMOVE_AFTER(head, elm, field) do {			\
345 	if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
346 	    == NULL)							\
347 		(head)->sqh_last = &(elm)->field.sqe_next;		\
348 } while (/*CONSTCOND*/0)
349 
350 #define	SIMPLEQ_REMOVE(head, elm, type, field) do {			\
351 	if ((head)->sqh_first == (elm)) {				\
352 		SIMPLEQ_REMOVE_HEAD((head), field);			\
353 	} else {							\
354 		struct type *curelm = (head)->sqh_first;		\
355 		while (curelm->field.sqe_next != (elm))			\
356 			curelm = curelm->field.sqe_next;		\
357 		if ((curelm->field.sqe_next =				\
358 			curelm->field.sqe_next->field.sqe_next) == NULL) \
359 			    (head)->sqh_last = &(curelm)->field.sqe_next; \
360 	}								\
361 } while (/*CONSTCOND*/0)
362 
363 #define	SIMPLEQ_CONCAT(head1, head2) do {				\
364 	if (!SIMPLEQ_EMPTY((head2))) {					\
365 		*(head1)->sqh_last = (head2)->sqh_first;		\
366 		(head1)->sqh_last = (head2)->sqh_last;		\
367 		SIMPLEQ_INIT((head2));					\
368 	}								\
369 } while (/*CONSTCOND*/0)
370 
371 #define	SIMPLEQ_LAST(head, type, field)					\
372 	(SIMPLEQ_EMPTY((head)) ?						\
373 		NULL :							\
374 	        ((struct type *)(void *)				\
375 		((char *)((head)->sqh_last) - offsetof(struct type, field))))
376 
377 /*
378  * Tail queue definitions.
379  */
380 #define	_TAILQ_HEAD(name, type, qual)					\
381 struct name {								\
382 	qual type *tqh_first;		/* first element */		\
383 	qual type *qual *tqh_last;	/* addr of last next element */	\
384 }
385 #define TAILQ_HEAD(name, type)	_TAILQ_HEAD(name, struct type,)
386 
387 #define	TAILQ_HEAD_INITIALIZER(head)					\
388 	{ TAILQ_END(head), &(head).tqh_first }
389 
390 #define	_TAILQ_ENTRY(type, qual)					\
391 struct {								\
392 	qual type *tqe_next;		/* next element */		\
393 	qual type *qual *tqe_prev;	/* address of previous next element */\
394 }
395 #define TAILQ_ENTRY(type)	_TAILQ_ENTRY(struct type,)
396 
397 /*
398  * Tail queue access methods.
399  */
400 #define	TAILQ_FIRST(head)		((head)->tqh_first)
401 #define	TAILQ_END(head)			(NULL)
402 #define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
403 #define	TAILQ_LAST(head, headname) \
404 	(*(((struct headname *)((head)->tqh_last))->tqh_last))
405 #define	TAILQ_PREV(elm, headname, field) \
406 	(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
407 #define	TAILQ_EMPTY(head)		(TAILQ_FIRST(head) == TAILQ_END(head))
408 
409 
410 #define	TAILQ_FOREACH(var, head, field)					\
411 	for ((var) = ((head)->tqh_first);				\
412 	    (var) != TAILQ_END(head);					\
413 	    (var) = ((var)->field.tqe_next))
414 
415 #define	TAILQ_FOREACH_SAFE(var, head, field, next)			\
416 	for ((var) = ((head)->tqh_first);				\
417 	    (var) != TAILQ_END(head) &&					\
418 	    ((next) = TAILQ_NEXT(var, field), 1); (var) = (next))
419 
420 #define	TAILQ_FOREACH_REVERSE(var, head, headname, field)		\
421 	for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
422 	    (var) != TAILQ_END(head);					\
423 	    (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
424 
425 #define	TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev)	\
426 	for ((var) = TAILQ_LAST((head), headname);			\
427 	    (var) != TAILQ_END(head) && 				\
428 	    ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev))
429 
430 /*
431  * Tail queue functions.
432  */
433 #if defined(_KERNEL) && defined(QUEUEDEBUG)
434 #define	QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)			\
435 	if ((head)->tqh_first &&					\
436 	    (head)->tqh_first->field.tqe_prev != &(head)->tqh_first)	\
437 		panic("TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
438 #define	QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)			\
439 	if (*(head)->tqh_last != NULL)					\
440 		panic("TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__);
441 #define	QUEUEDEBUG_TAILQ_OP(elm, field)					\
442 	if ((elm)->field.tqe_next &&					\
443 	    (elm)->field.tqe_next->field.tqe_prev !=			\
444 	    &(elm)->field.tqe_next)					\
445 		panic("TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
446 	if (*(elm)->field.tqe_prev != (elm))				\
447 		panic("TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__);
448 #define	QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)			\
449 	if ((elm)->field.tqe_next == NULL &&				\
450 	    (head)->tqh_last != &(elm)->field.tqe_next)			\
451 		panic("TAILQ_PREREMOVE head %p elm %p %s:%d",		\
452 		      (head), (elm), __FILE__, __LINE__);
453 #define	QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)				\
454 	(elm)->field.tqe_next = (void *)1L;				\
455 	(elm)->field.tqe_prev = (void *)1L;
456 #else
457 #define	QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
458 #define	QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
459 #define	QUEUEDEBUG_TAILQ_OP(elm, field)
460 #define	QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
461 #define	QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
462 #endif
463 
464 #define	TAILQ_INIT(head) do {						\
465 	(head)->tqh_first = TAILQ_END(head);				\
466 	(head)->tqh_last = &(head)->tqh_first;				\
467 } while (/*CONSTCOND*/0)
468 
469 #define	TAILQ_INSERT_HEAD(head, elm, field) do {			\
470 	QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field)		\
471 	if (((elm)->field.tqe_next = (head)->tqh_first) != TAILQ_END(head))\
472 		(head)->tqh_first->field.tqe_prev =			\
473 		    &(elm)->field.tqe_next;				\
474 	else								\
475 		(head)->tqh_last = &(elm)->field.tqe_next;		\
476 	(head)->tqh_first = (elm);					\
477 	(elm)->field.tqe_prev = &(head)->tqh_first;			\
478 } while (/*CONSTCOND*/0)
479 
480 #define	TAILQ_INSERT_TAIL(head, elm, field) do {			\
481 	QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field)		\
482 	(elm)->field.tqe_next = TAILQ_END(head);			\
483 	(elm)->field.tqe_prev = (head)->tqh_last;			\
484 	*(head)->tqh_last = (elm);					\
485 	(head)->tqh_last = &(elm)->field.tqe_next;			\
486 } while (/*CONSTCOND*/0)
487 
488 #define	TAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
489 	QUEUEDEBUG_TAILQ_OP((listelm), field)				\
490 	if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != 	\
491 	    TAILQ_END(head))						\
492 		(elm)->field.tqe_next->field.tqe_prev = 		\
493 		    &(elm)->field.tqe_next;				\
494 	else								\
495 		(head)->tqh_last = &(elm)->field.tqe_next;		\
496 	(listelm)->field.tqe_next = (elm);				\
497 	(elm)->field.tqe_prev = &(listelm)->field.tqe_next;		\
498 } while (/*CONSTCOND*/0)
499 
500 #define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
501 	QUEUEDEBUG_TAILQ_OP((listelm), field)				\
502 	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
503 	(elm)->field.tqe_next = (listelm);				\
504 	*(listelm)->field.tqe_prev = (elm);				\
505 	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
506 } while (/*CONSTCOND*/0)
507 
508 #define	TAILQ_REMOVE(head, elm, field) do {				\
509 	QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field)		\
510 	QUEUEDEBUG_TAILQ_OP((elm), field)				\
511 	if (((elm)->field.tqe_next) != TAILQ_END(head))			\
512 		(elm)->field.tqe_next->field.tqe_prev = 		\
513 		    (elm)->field.tqe_prev;				\
514 	else								\
515 		(head)->tqh_last = (elm)->field.tqe_prev;		\
516 	*(elm)->field.tqe_prev = (elm)->field.tqe_next;			\
517 	QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field);			\
518 } while (/*CONSTCOND*/0)
519 
520 #define TAILQ_REPLACE(head, elm, elm2, field) do {                      \
521         if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != 	\
522 	    TAILQ_END(head))   						\
523                 (elm2)->field.tqe_next->field.tqe_prev =                \
524                     &(elm2)->field.tqe_next;                            \
525         else                                                            \
526                 (head)->tqh_last = &(elm2)->field.tqe_next;             \
527         (elm2)->field.tqe_prev = (elm)->field.tqe_prev;                 \
528         *(elm2)->field.tqe_prev = (elm2);                               \
529 	QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field);			\
530 } while (/*CONSTCOND*/0)
531 
532 #define	TAILQ_CONCAT(head1, head2, field) do {				\
533 	if (!TAILQ_EMPTY(head2)) {					\
534 		*(head1)->tqh_last = (head2)->tqh_first;		\
535 		(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last;	\
536 		(head1)->tqh_last = (head2)->tqh_last;			\
537 		TAILQ_INIT((head2));					\
538 	}								\
539 } while (/*CONSTCOND*/0)
540 
541 /*
542  * Singly-linked Tail queue declarations.
543  */
544 #define	STAILQ_HEAD(name, type)						\
545 struct name {								\
546 	struct type *stqh_first;	/* first element */		\
547 	struct type **stqh_last;	/* addr of last next element */	\
548 }
549 
550 #define	STAILQ_HEAD_INITIALIZER(head)					\
551 	{ NULL, &(head).stqh_first }
552 
553 #define	STAILQ_ENTRY(type)						\
554 struct {								\
555 	struct type *stqe_next;	/* next element */			\
556 }
557 
558 /*
559  * Singly-linked Tail queue access methods.
560  */
561 #define	STAILQ_FIRST(head)	((head)->stqh_first)
562 #define	STAILQ_END(head)	NULL
563 #define	STAILQ_NEXT(elm, field)	((elm)->field.stqe_next)
564 #define	STAILQ_EMPTY(head)	(STAILQ_FIRST(head) == STAILQ_END(head))
565 
566 /*
567  * Singly-linked Tail queue functions.
568  */
569 #define	STAILQ_INIT(head) do {						\
570 	(head)->stqh_first = NULL;					\
571 	(head)->stqh_last = &(head)->stqh_first;				\
572 } while (/*CONSTCOND*/0)
573 
574 #define	STAILQ_INSERT_HEAD(head, elm, field) do {			\
575 	if (((elm)->field.stqe_next = (head)->stqh_first) == NULL)	\
576 		(head)->stqh_last = &(elm)->field.stqe_next;		\
577 	(head)->stqh_first = (elm);					\
578 } while (/*CONSTCOND*/0)
579 
580 #define	STAILQ_INSERT_TAIL(head, elm, field) do {			\
581 	(elm)->field.stqe_next = NULL;					\
582 	*(head)->stqh_last = (elm);					\
583 	(head)->stqh_last = &(elm)->field.stqe_next;			\
584 } while (/*CONSTCOND*/0)
585 
586 #define	STAILQ_INSERT_AFTER(head, listelm, elm, field) do {		\
587 	if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
588 		(head)->stqh_last = &(elm)->field.stqe_next;		\
589 	(listelm)->field.stqe_next = (elm);				\
590 } while (/*CONSTCOND*/0)
591 
592 #define	STAILQ_REMOVE_HEAD(head, field) do {				\
593 	if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
594 		(head)->stqh_last = &(head)->stqh_first;			\
595 } while (/*CONSTCOND*/0)
596 
597 #define	STAILQ_REMOVE(head, elm, type, field) do {			\
598 	if ((head)->stqh_first == (elm)) {				\
599 		STAILQ_REMOVE_HEAD((head), field);			\
600 	} else {							\
601 		struct type *curelm = (head)->stqh_first;		\
602 		while (curelm->field.stqe_next != (elm))			\
603 			curelm = curelm->field.stqe_next;		\
604 		if ((curelm->field.stqe_next =				\
605 			curelm->field.stqe_next->field.stqe_next) == NULL) \
606 			    (head)->stqh_last = &(curelm)->field.stqe_next; \
607 	}								\
608 } while (/*CONSTCOND*/0)
609 
610 #define	STAILQ_FOREACH(var, head, field)				\
611 	for ((var) = ((head)->stqh_first);				\
612 		(var);							\
613 		(var) = ((var)->field.stqe_next))
614 
615 #define	STAILQ_FOREACH_SAFE(var, head, field, tvar)			\
616 	for ((var) = STAILQ_FIRST((head));				\
617 	    (var) && ((tvar) = STAILQ_NEXT((var), field), 1);		\
618 	    (var) = (tvar))
619 
620 #define	STAILQ_CONCAT(head1, head2) do {				\
621 	if (!STAILQ_EMPTY((head2))) {					\
622 		*(head1)->stqh_last = (head2)->stqh_first;		\
623 		(head1)->stqh_last = (head2)->stqh_last;		\
624 		STAILQ_INIT((head2));					\
625 	}								\
626 } while (/*CONSTCOND*/0)
627 
628 #define	STAILQ_LAST(head, type, field)					\
629 	(STAILQ_EMPTY((head)) ?						\
630 		NULL :							\
631 	        ((struct type *)(void *)				\
632 		((char *)((head)->stqh_last) - offsetof(struct type, field))))
633 
634 
635 #ifndef _KERNEL
636 /*
637  * Circular queue definitions. Do not use. We still keep the macros
638  * for compatibility but because of pointer aliasing issues their use
639  * is discouraged!
640  */
641 
642 /*
643  * __launder_type():  We use this ugly hack to work around the the compiler
644  * noticing that two types may not alias each other and elide tests in code.
645  * We hit this in the CIRCLEQ macros when comparing 'struct name *' and
646  * 'struct type *' (see CIRCLEQ_HEAD()).  Modern compilers (such as GCC
647  * 4.8) declare these comparisons as always false, causing the code to
648  * not run as designed.
649  *
650  * This hack is only to be used for comparisons and thus can be fully const.
651  * Do not use for assignment.
652  *
653  * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix
654  * this by changing the head/tail sentinal values, but see the note above
655  * this one.
656  */
657 static __inline const void * __launder_type(const void *);
658 static __inline const void *
659 __launder_type(const void *__x)
660 {
661 	__asm __volatile("" : "+r" (__x));
662 	return __x;
663 }
664 
665 #if defined(_KERNEL) && defined(QUEUEDEBUG)
666 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)				\
667 	if ((head)->cqh_first != CIRCLEQ_ENDC(head) &&			\
668 	    (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head))	\
669 		panic("CIRCLEQ head forw %p %s:%d", (head),		\
670 		      __FILE__, __LINE__);				\
671 	if ((head)->cqh_last != CIRCLEQ_ENDC(head) &&			\
672 	    (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head))	\
673 		panic("CIRCLEQ head back %p %s:%d", (head),		\
674 		      __FILE__, __LINE__);
675 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)			\
676 	if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) {		\
677 		if ((head)->cqh_last != (elm))				\
678 			panic("CIRCLEQ elm last %p %s:%d", (elm),	\
679 			      __FILE__, __LINE__);			\
680 	} else {							\
681 		if ((elm)->field.cqe_next->field.cqe_prev != (elm))	\
682 			panic("CIRCLEQ elm forw %p %s:%d", (elm),	\
683 			      __FILE__, __LINE__);			\
684 	}								\
685 	if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) {		\
686 		if ((head)->cqh_first != (elm))				\
687 			panic("CIRCLEQ elm first %p %s:%d", (elm),	\
688 			      __FILE__, __LINE__);			\
689 	} else {							\
690 		if ((elm)->field.cqe_prev->field.cqe_next != (elm))	\
691 			panic("CIRCLEQ elm prev %p %s:%d", (elm),	\
692 			      __FILE__, __LINE__);			\
693 	}
694 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)			\
695 	(elm)->field.cqe_next = (void *)1L;				\
696 	(elm)->field.cqe_prev = (void *)1L;
697 #else
698 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
699 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
700 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
701 #endif
702 
703 #define	CIRCLEQ_HEAD(name, type)					\
704 struct name {								\
705 	struct type *cqh_first;		/* first element */		\
706 	struct type *cqh_last;		/* last element */		\
707 }
708 
709 #define	CIRCLEQ_HEAD_INITIALIZER(head)					\
710 	{ CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
711 
712 #define	CIRCLEQ_ENTRY(type)						\
713 struct {								\
714 	struct type *cqe_next;		/* next element */		\
715 	struct type *cqe_prev;		/* previous element */		\
716 }
717 
718 /*
719  * Circular queue functions.
720  */
721 #define	CIRCLEQ_INIT(head) do {						\
722 	(head)->cqh_first = CIRCLEQ_END(head);				\
723 	(head)->cqh_last = CIRCLEQ_END(head);				\
724 } while (/*CONSTCOND*/0)
725 
726 #define	CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do {		\
727 	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
728 	QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field)		\
729 	(elm)->field.cqe_next = (listelm)->field.cqe_next;		\
730 	(elm)->field.cqe_prev = (listelm);				\
731 	if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head))		\
732 		(head)->cqh_last = (elm);				\
733 	else								\
734 		(listelm)->field.cqe_next->field.cqe_prev = (elm);	\
735 	(listelm)->field.cqe_next = (elm);				\
736 } while (/*CONSTCOND*/0)
737 
738 #define	CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do {		\
739 	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
740 	QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field)		\
741 	(elm)->field.cqe_next = (listelm);				\
742 	(elm)->field.cqe_prev = (listelm)->field.cqe_prev;		\
743 	if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head))		\
744 		(head)->cqh_first = (elm);				\
745 	else								\
746 		(listelm)->field.cqe_prev->field.cqe_next = (elm);	\
747 	(listelm)->field.cqe_prev = (elm);				\
748 } while (/*CONSTCOND*/0)
749 
750 #define	CIRCLEQ_INSERT_HEAD(head, elm, field) do {			\
751 	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
752 	(elm)->field.cqe_next = (head)->cqh_first;			\
753 	(elm)->field.cqe_prev = CIRCLEQ_END(head);			\
754 	if ((head)->cqh_last == CIRCLEQ_ENDC(head))			\
755 		(head)->cqh_last = (elm);				\
756 	else								\
757 		(head)->cqh_first->field.cqe_prev = (elm);		\
758 	(head)->cqh_first = (elm);					\
759 } while (/*CONSTCOND*/0)
760 
761 #define	CIRCLEQ_INSERT_TAIL(head, elm, field) do {			\
762 	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
763 	(elm)->field.cqe_next = CIRCLEQ_END(head);			\
764 	(elm)->field.cqe_prev = (head)->cqh_last;			\
765 	if ((head)->cqh_first == CIRCLEQ_ENDC(head))			\
766 		(head)->cqh_first = (elm);				\
767 	else								\
768 		(head)->cqh_last->field.cqe_next = (elm);		\
769 	(head)->cqh_last = (elm);					\
770 } while (/*CONSTCOND*/0)
771 
772 #define	CIRCLEQ_REMOVE(head, elm, field) do {				\
773 	QUEUEDEBUG_CIRCLEQ_HEAD((head), field)				\
774 	QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field)			\
775 	if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head))		\
776 		(head)->cqh_last = (elm)->field.cqe_prev;		\
777 	else								\
778 		(elm)->field.cqe_next->field.cqe_prev =			\
779 		    (elm)->field.cqe_prev;				\
780 	if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head))		\
781 		(head)->cqh_first = (elm)->field.cqe_next;		\
782 	else								\
783 		(elm)->field.cqe_prev->field.cqe_next =			\
784 		    (elm)->field.cqe_next;				\
785 	QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field)			\
786 } while (/*CONSTCOND*/0)
787 
788 #define	CIRCLEQ_FOREACH(var, head, field)				\
789 	for ((var) = ((head)->cqh_first);				\
790 		(var) != CIRCLEQ_ENDC(head);				\
791 		(var) = ((var)->field.cqe_next))
792 
793 #define	CIRCLEQ_FOREACH_REVERSE(var, head, field)			\
794 	for ((var) = ((head)->cqh_last);				\
795 		(var) != CIRCLEQ_ENDC(head);				\
796 		(var) = ((var)->field.cqe_prev))
797 
798 /*
799  * Circular queue access methods.
800  */
801 #define	CIRCLEQ_FIRST(head)		((head)->cqh_first)
802 #define	CIRCLEQ_LAST(head)		((head)->cqh_last)
803 /* For comparisons */
804 #define	CIRCLEQ_ENDC(head)		(__launder_type(head))
805 /* For assignments */
806 #define	CIRCLEQ_END(head)		((void *)(head))
807 #define	CIRCLEQ_NEXT(elm, field)	((elm)->field.cqe_next)
808 #define	CIRCLEQ_PREV(elm, field)	((elm)->field.cqe_prev)
809 #define	CIRCLEQ_EMPTY(head)						\
810     (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head))
811 
812 #define CIRCLEQ_LOOP_NEXT(head, elm, field)				\
813 	(((elm)->field.cqe_next == CIRCLEQ_ENDC(head))			\
814 	    ? ((head)->cqh_first)					\
815 	    : (elm->field.cqe_next))
816 #define CIRCLEQ_LOOP_PREV(head, elm, field)				\
817 	(((elm)->field.cqe_prev == CIRCLEQ_ENDC(head))			\
818 	    ? ((head)->cqh_last)					\
819 	    : (elm->field.cqe_prev))
820 #endif /* !_KERNEL */
821 
822 #endif	/* !_SYS_QUEUE_H_ */
823