1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29 #ifndef _LINUXKPI_LINUX_RBTREE_H_
30 #define _LINUXKPI_LINUX_RBTREE_H_
31
32 #ifndef _STANDALONE
33 #include <sys/stddef.h>
34 #endif
35
36 #include <sys/types.h>
37 #include <sys/tree.h>
38
39 struct rb_node {
40 RB_ENTRY(rb_node) __entry;
41 };
42 #define rb_left __entry.rbe_link[_RB_L]
43 #define rb_right __entry.rbe_link[_RB_R]
44
45 /*
46 * We provide a false structure that has the same bit pattern as tree.h
47 * presents so it matches the member names expected by linux.
48 */
49 struct rb_root {
50 struct rb_node *rb_node;
51 };
52
53 struct rb_root_cached {
54 struct rb_root rb_root;
55 struct rb_node *rb_leftmost;
56 };
57
58 /*
59 * In linux all of the comparisons are done by the caller.
60 */
61 int panic_cmp(struct rb_node *one, struct rb_node *two);
62
63 RB_HEAD(linux_root, rb_node);
64 RB_PROTOTYPE(linux_root, rb_node, __entry, panic_cmp);
65
66 #define rb_parent(r) RB_PARENT(r, __entry)
67 #define rb_entry(ptr, type, member) container_of(ptr, type, member)
68 #define rb_entry_safe(ptr, type, member) \
69 ((ptr) != NULL ? rb_entry(ptr, type, member) : NULL)
70
71 #define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
72 #define RB_EMPTY_NODE(node) (RB_PARENT(node, __entry) == node)
73 #define RB_CLEAR_NODE(node) RB_SET_PARENT(node, node, __entry)
74
75 #define rb_insert_color(node, root) do { \
76 if (rb_parent(node)) \
77 linux_root_RB_INSERT_COLOR((struct linux_root *)(root), \
78 rb_parent(node), (node)); \
79 } while (0)
80 #define rb_erase(node, root) \
81 linux_root_RB_REMOVE((struct linux_root *)(root), (node))
82 #define rb_next(node) RB_NEXT(linux_root, NULL, (node))
83 #define rb_prev(node) RB_PREV(linux_root, NULL, (node))
84 #define rb_first(root) RB_MIN(linux_root, (struct linux_root *)(root))
85 #define rb_last(root) RB_MAX(linux_root, (struct linux_root *)(root))
86 #define rb_first_cached(root) (root)->rb_leftmost
87
88 static inline struct rb_node *
__rb_deepest_left(struct rb_node * node)89 __rb_deepest_left(struct rb_node *node)
90 {
91 struct rb_node *parent = NULL;
92 while (node != NULL) {
93 parent = node;
94 if (RB_LEFT(node, __entry))
95 node = RB_LEFT(node, __entry);
96 else
97 node = RB_RIGHT(node, __entry);
98 }
99 return (parent);
100 }
101
102 static inline struct rb_node *
rb_next_postorder(const struct rb_node * node)103 rb_next_postorder(const struct rb_node *node)
104 {
105 struct rb_node *parent =
106 RB_PARENT(__DECONST(struct rb_node *, node), __entry);
107 /* left -> right, right -> root */
108 if (parent != NULL &&
109 (node == RB_LEFT(parent, __entry)) &&
110 (RB_RIGHT(parent, __entry)))
111 return (__rb_deepest_left(RB_RIGHT(parent, __entry)));
112 else
113 return (parent);
114 }
115
116 #define rbtree_postorder_for_each_entry_safe(x, y, head, member) \
117 for ((x) = rb_entry_safe(__rb_deepest_left((head)->rb_node), \
118 __typeof(*x), member); \
119 ((x) != NULL) && ((y) = \
120 rb_entry_safe(rb_next_postorder(&x->member), typeof(*x), member), 1); \
121 (x) = (y))
122
123 static inline void
rb_link_node(struct rb_node * node,struct rb_node * parent,struct rb_node ** rb_link)124 rb_link_node(struct rb_node *node, struct rb_node *parent,
125 struct rb_node **rb_link)
126 {
127 RB_SET(node, parent, __entry);
128 *rb_link = node;
129 }
130
131 static inline void
rb_replace_node(struct rb_node * victim,struct rb_node * new,struct rb_root * root)132 rb_replace_node(struct rb_node *victim, struct rb_node *new,
133 struct rb_root *root)
134 {
135
136 RB_SWAP_CHILD((struct linux_root *)root, rb_parent(victim),
137 victim, new, __entry);
138 if (RB_LEFT(victim, __entry))
139 RB_SET_PARENT(RB_LEFT(victim, __entry), new, __entry);
140 if (RB_RIGHT(victim, __entry))
141 RB_SET_PARENT(RB_RIGHT(victim, __entry), new, __entry);
142 *new = *victim;
143 }
144
145 static inline void
rb_insert_color_cached(struct rb_node * node,struct rb_root_cached * root,bool leftmost)146 rb_insert_color_cached(struct rb_node *node, struct rb_root_cached *root,
147 bool leftmost)
148 {
149 if (rb_parent(node))
150 linux_root_RB_INSERT_COLOR((struct linux_root *)&root->rb_root,
151 rb_parent(node), node);
152 if (leftmost)
153 root->rb_leftmost = node;
154 }
155
156 static inline struct rb_node *
rb_erase_cached(struct rb_node * node,struct rb_root_cached * root)157 rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
158 {
159 struct rb_node *retval;
160
161 if (node == root->rb_leftmost)
162 retval = root->rb_leftmost = linux_root_RB_NEXT(node);
163 else
164 retval = NULL;
165 linux_root_RB_REMOVE((struct linux_root *)&root->rb_root, node);
166 return (retval);
167 }
168
169 static inline void
rb_replace_node_cached(struct rb_node * old,struct rb_node * new,struct rb_root_cached * root)170 rb_replace_node_cached(struct rb_node *old, struct rb_node *new,
171 struct rb_root_cached *root)
172 {
173 rb_replace_node(old, new, &root->rb_root);
174 if (root->rb_leftmost == old)
175 root->rb_leftmost = new;
176 }
177
178 #undef RB_ROOT
179 #define RB_ROOT (struct rb_root) { NULL }
180 #define RB_ROOT_CACHED (struct rb_root_cached) { RB_ROOT, NULL }
181
182 #endif /* _LINUXKPI_LINUX_RBTREE_H_ */
183