xref: /netbsd/sys/external/bsd/drm2/include/linux/rbtree.h (revision d4c6ef0c)
1 /*	$NetBSD: rbtree.h,v 1.18 2022/02/27 14:18:34 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _LINUX_RBTREE_H_
33 #define _LINUX_RBTREE_H_
34 
35 #include <sys/rbtree.h>
36 
37 #include <lib/libkern/libkern.h>
38 
39 struct rb_root {
40 	struct rb_tree	rbr_tree;
41 };
42 
43 struct rb_root_cached {
44 	struct rb_root	rb_root; /* Linux API name */
45 };
46 
47 #define	rb_entry(P, T, F) container_of(P, T, F)
48 #define	rb_entry_safe(P, T, F)						      \
49 ({									      \
50 	__typeof__(P) __p = (P);					      \
51 	__p ? container_of(__p, T, F) : NULL;				      \
52 })
53 
54 /*
55  * Several of these functions take const inputs and return non-const
56  * outputs.  That is a deliberate choice.  It would be better if these
57  * functions could be const-polymorphic -- return const if given const,
58  * return non-const if given non-const -- but C doesn't let us express
59  * that.  We are using them to adapt Linux code that is defined in
60  * terms of token-substitution macros, without types of their own,
61  * which happen to work out with both const and non-const variants.
62  * Presumably the Linux code compiles upstream and has some level of
63  * const type-checking in Linux, so this abuse of __UNCONST does not
64  * carry substantial risk over to this code here.
65  */
66 
67 static inline bool
RB_EMPTY_ROOT(const struct rb_root * root)68 RB_EMPTY_ROOT(const struct rb_root *root)
69 {
70 
71 	return RB_TREE_MIN(__UNCONST(&root->rbr_tree)) == NULL;
72 }
73 
74 static inline struct rb_node *
rb_first(const struct rb_root * root)75 rb_first(const struct rb_root *root)
76 {
77 	char *vnode = RB_TREE_MIN(__UNCONST(&root->rbr_tree));
78 
79 	if (vnode)
80 		vnode += root->rbr_tree.rbt_ops->rbto_node_offset;
81 	return (struct rb_node *)vnode;
82 }
83 
84 static inline struct rb_node *
rb_next2(const struct rb_root * root,const struct rb_node * rbnode)85 rb_next2(const struct rb_root *root, const struct rb_node *rbnode)
86 {
87 	char *vnode = (char *)__UNCONST(rbnode);
88 
89 	vnode -= root->rbr_tree.rbt_ops->rbto_node_offset;
90 	vnode = RB_TREE_NEXT(__UNCONST(&root->rbr_tree), vnode);
91 	if (vnode)
92 		vnode += root->rbr_tree.rbt_ops->rbto_node_offset;
93 	return (struct rb_node *)vnode;
94 }
95 
96 static inline struct rb_node *
rb_last(const struct rb_root * root)97 rb_last(const struct rb_root *root)
98 {
99 	char *vnode = RB_TREE_MAX(__UNCONST(&root->rbr_tree));
100 
101 	if (vnode)
102 		vnode += root->rbr_tree.rbt_ops->rbto_node_offset;
103 	return (struct rb_node *)vnode;
104 }
105 
106 static inline struct rb_node *
rb_first_cached(const struct rb_root_cached * root)107 rb_first_cached(const struct rb_root_cached *root)
108 {
109 	return rb_first(&root->rb_root);
110 }
111 
112 static inline void
rb_erase(struct rb_node * rbnode,struct rb_root * root)113 rb_erase(struct rb_node *rbnode, struct rb_root *root)
114 {
115 	struct rb_tree *tree = &root->rbr_tree;
116 	void *node = (char *)rbnode - tree->rbt_ops->rbto_node_offset;
117 
118 	rb_tree_remove_node(tree, node);
119 }
120 
121 static inline void
rb_erase_cached(struct rb_node * rbnode,struct rb_root_cached * root)122 rb_erase_cached(struct rb_node *rbnode, struct rb_root_cached *root)
123 {
124 	rb_erase(rbnode, &root->rb_root);
125 }
126 
127 static inline void
rb_replace_node(struct rb_node * old,struct rb_node * new,struct rb_root * root)128 rb_replace_node(struct rb_node *old, struct rb_node *new, struct rb_root *root)
129 {
130 	void *vold = (char *)old - root->rbr_tree.rbt_ops->rbto_node_offset;
131 	void *vnew = (char *)new - root->rbr_tree.rbt_ops->rbto_node_offset;
132 	void *collision __diagused;
133 
134 	rb_tree_remove_node(&root->rbr_tree, vold);
135 	collision = rb_tree_insert_node(&root->rbr_tree, vnew);
136 	KASSERT(collision == vnew);
137 }
138 
139 static inline void
rb_replace_node_cached(struct rb_node * old,struct rb_node * new,struct rb_root_cached * root)140 rb_replace_node_cached(struct rb_node *old, struct rb_node *new,
141     struct rb_root_cached *root)
142 {
143 	rb_replace_node(old, new, &root->rb_root);
144 }
145 
146 /*
147  * This violates the abstraction of rbtree(3) for postorder traversal
148  * -- children first, then parents -- so it is safe for cleanup code
149  * that just frees all the nodes without removing them from the tree.
150  */
151 static inline struct rb_node *
rb_first_postorder(const struct rb_root * root)152 rb_first_postorder(const struct rb_root *root)
153 {
154 	struct rb_node *node, *child;
155 
156 	if ((node = root->rbr_tree.rbt_root) == NULL)
157 		return NULL;
158 	for (;; node = child) {
159 		if ((child = node->rb_left) != NULL)
160 			continue;
161 		if ((child = node->rb_right) != NULL)
162 			continue;
163 		return node;
164 	}
165 }
166 
167 static inline struct rb_node *
rb_next2_postorder(const struct rb_root * root,struct rb_node * node)168 rb_next2_postorder(const struct rb_root *root, struct rb_node *node)
169 {
170 	struct rb_node *parent, *child;
171 
172 	if (node == NULL)
173 		return NULL;
174 
175 	/*
176 	 * If we're at the root, there are no more siblings and no
177 	 * parent, so post-order iteration is done.
178 	 */
179 	if (RB_ROOT_P(&root->rbr_tree, node))
180 		return NULL;
181 	parent = RB_FATHER(node);	/* kinda sexist, innit */
182 	KASSERT(parent != NULL);
183 
184 	/*
185 	 * If we're the right child, we've already processed the left
186 	 * child (which may be gone by now), so just return the parent.
187 	 */
188 	if (RB_RIGHT_P(node))
189 		return parent;
190 
191 	/*
192 	 * Otherwise, move down to the leftmost child of our right
193 	 * sibling -- or return the parent if there is none.
194 	 */
195 	if ((node = parent->rb_right) == NULL)
196 		return parent;
197 	for (;; node = child) {
198 		if ((child = node->rb_left) != NULL)
199 			continue;
200 		if ((child = node->rb_right) != NULL)
201 			continue;
202 		return node;
203 	}
204 }
205 
206 /*
207  * Extension to Linux API, which allows copying a struct rb_root object
208  * with `=' or `memcpy' and no additional relocation.
209  */
210 static inline void
rb_move(struct rb_root * to,struct rb_root * from)211 rb_move(struct rb_root *to, struct rb_root *from)
212 {
213 	struct rb_node *root;
214 
215 	*to = *from;
216 	memset(from, 0, sizeof(*from)); /* paranoia */
217 	if ((root = to->rbr_tree.rbt_root) == NULL)
218 		return;
219 
220 	/*
221 	 * The root node's `parent' is a strict-aliasing-unsafe hack
222 	 * pointing at the root of the tree.
223 	 */
224 	RB_SET_FATHER(root, (struct rb_node *)(void *)&to->rbr_tree.rbt_root);
225 }
226 
227 #define	rbtree_postorder_for_each_entry_safe(ENTRY, TMP, ROOT, FIELD)	      \
228 	for ((ENTRY) = rb_entry_safe(rb_first_postorder(ROOT),		      \
229 		    __typeof__(*(ENTRY)), FIELD);			      \
230 		((ENTRY) != NULL &&					      \
231 		    ((TMP) = rb_entry_safe(rb_next2_postorder((ROOT),	      \
232 			    &(ENTRY)->FIELD), __typeof__(*(ENTRY)), FIELD),   \
233 			1));						      \
234 		(ENTRY) = (TMP))
235 
236 #endif  /* _LINUX_RBTREE_H_ */
237