1 /* radare2 - LGPL - Copyright 2019 - thestr4ng3r */
2 
3 #include <r_util/r_intervaltree.h>
4 #include <r_util/r_assert.h>
5 
6 #define unwrap(rbnode) container_of (rbnode, RIntervalNode, node)
7 
node_max(RBNode * node)8 static void node_max(RBNode *node) {
9 	RIntervalNode *intervalnode = unwrap (node);
10 	intervalnode->max_end = intervalnode->end;
11 	int i;
12 	for (i = 0; i < 2; i++) {
13 		if (node->child[i]) {
14 			ut64 end = unwrap (node->child[i])->max_end;
15 			if (end > intervalnode->max_end) {
16 				intervalnode->max_end = end;
17 			}
18 		}
19 	}
20 }
21 
cmp(const void * incoming,const RBNode * in_tree,void * user)22 static int cmp(const void *incoming, const RBNode *in_tree, void *user) {
23 	ut64 incoming_start = *(ut64 *)incoming;
24 	ut64 other_start = container_of (in_tree, const RIntervalNode, node)->start;
25 	if (incoming_start < other_start) {
26 		return -1;
27 	}
28 	if (incoming_start > other_start) {
29 		return 1;
30 	}
31 	return 0;
32 }
33 
34 // like cmp, but handles searches for an exact RIntervalNode * in the tree instead of only comparing the start values
cmp_exact_node(const void * incoming,const RBNode * in_tree,void * user)35 static int cmp_exact_node(const void *incoming, const RBNode *in_tree, void *user) {
36 	RIntervalNode *incoming_node = (RIntervalNode *)incoming;
37 	const RIntervalNode *node = container_of (in_tree, const RIntervalNode, node);
38 	if (node == incoming_node) {
39 		return 0;
40 	}
41 	if (incoming_node->start < node->start) {
42 		return -1;
43 	}
44 	if (incoming_node->start > node->start) {
45 		return 1;
46 	}
47 	// Here we have the same start value, but a different pointer.
48 	// This means we need to guide the caller into the direction where the actual node is.
49 	// Since we have nothing to compare anymore, we have to iterate through all the same-start children to find the correct path.
50 	RBIter *path_cache = user;
51 	if (!path_cache->len) {
52 		RBNode *cur = (RBNode *)&node->node;
53 		// go down to the leftmost child that has the same start
54 		while (cur) {
55 			path_cache->path[path_cache->len++] = cur;
56 			if (incoming_node->start <= unwrap (cur)->start) {
57 				cur = cur->child[0];
58 			} else {
59 				cur = cur->child[1];
60 			}
61 		}
62 		// iterate through all children with the same start and stop when the pointer is identical
63 		// The RBIter works a bit different than normal here. We store each node in the path, including right-descended ones
64 		// because we want to get the full path in the end.
65 		while (r_rbtree_iter_has (path_cache)) {
66 			RIntervalNode *intervalnode = r_rbtree_iter_get (path_cache, RIntervalNode, node);
67 			if (intervalnode == incoming_node || intervalnode->start > incoming_node->start) {
68 				break;
69 			}
70 			// r_rbtree_iter_next does not work here
71 			RBNode *rbnode = &intervalnode->node;
72 			if (rbnode->child[1]) {
73 				// next node after the current is always the leftmost in the right branch
74 				for (rbnode = rbnode->child[1]; rbnode; rbnode = rbnode->child[0]) {
75 					path_cache->path[path_cache->len++] = rbnode;
76 				}
77 			} else {
78 				// if there is no right branch, go up
79 				do {
80 					rbnode = path_cache->path[--path_cache->len];
81 				} while (path_cache->len && path_cache->path[path_cache->len - 1]->child[1] == rbnode);
82 			}
83 		}
84 	}
85 
86 	RBNode *next_child = NULL;
87 	// Go through the path to find the next node one step down
88 	size_t i;
89 	for (i = 0; i < path_cache->len - 1; i++) {
90 		if (unwrap (path_cache->path[i]) == node) {
91 			next_child = path_cache->path[i + 1];
92 			break;
93 		}
94 	}
95 
96 	// Determine the direction from the next child node
97 	return (next_child && node->node.child[0] == next_child) ? -1 : 1;
98 }
99 
r_interval_tree_init(RIntervalTree * tree,RIntervalNodeFree free)100 R_API void r_interval_tree_init(RIntervalTree *tree, RIntervalNodeFree free) {
101 	tree->root = NULL;
102 	tree->free = free;
103 }
104 
interval_node_free(RBNode * node,void * user)105 static void interval_node_free(RBNode *node, void *user) {
106 	RIntervalNode *ragenode /* >:-O */ = unwrap (node);
107 	if (user) {
108 		((RContRBFree)user) (ragenode->data);
109 	}
110 	free (ragenode);
111 }
112 
r_interval_tree_fini(RIntervalTree * tree)113 R_API void r_interval_tree_fini(RIntervalTree *tree) {
114 	if (!tree || !tree->root) {
115 		return;
116 	}
117 	r_rbtree_free (&tree->root->node, interval_node_free, tree->free);
118 }
119 
r_interval_tree_insert(RIntervalTree * tree,ut64 start,ut64 end,void * data)120 R_API bool r_interval_tree_insert(RIntervalTree *tree, ut64 start, ut64 end, void *data) {
121 	r_return_val_if_fail (end >= start, false);
122 	RIntervalNode *node = R_NEW0 (RIntervalNode);
123 	if (!node) {
124 		return false;
125 	}
126 	node->start = start;
127 	node->end = end;
128 	node->data = data;
129 	RBNode *root = tree->root ? &tree->root->node : NULL;
130 	bool r = r_rbtree_aug_insert (&root, &start, &node->node, cmp, NULL, node_max);
131 	tree->root = unwrap (root);
132 	if (!r) {
133 		free (node);
134 	}
135 	return r;
136 }
137 
r_interval_tree_delete(RIntervalTree * tree,RIntervalNode * node,bool free)138 R_API bool r_interval_tree_delete(RIntervalTree *tree, RIntervalNode *node, bool free) {
139 	RBNode *root = &tree->root->node;
140 	RBIter path_cache = { 0 };
141 	bool r = r_rbtree_aug_delete (&root, node, cmp_exact_node, &path_cache, interval_node_free, free ? tree->free : NULL, node_max);
142 	tree->root = root ? unwrap (root) : NULL;
143 	return r;
144 }
145 
r_interval_tree_resize(RIntervalTree * tree,RIntervalNode * node,ut64 new_start,ut64 new_end)146 R_API bool r_interval_tree_resize(RIntervalTree *tree, RIntervalNode *node, ut64 new_start, ut64 new_end) {
147 	r_return_val_if_fail (new_end >= new_start, false);
148 	if (node->start != new_start) {
149 		// Start change means the tree needs a different structure
150 		void *data = node->data;
151 		if (!r_interval_tree_delete (tree, node, false)) {
152 			return false;
153 		}
154 		return r_interval_tree_insert (tree, new_start, new_end, data);
155 	}
156 	if (node->end != new_end) {
157 		// Only end change just needs the updated augmented max value to be propagated upwards
158 		node->end = new_end;
159 		RBIter path_cache = { 0 };
160 		return r_rbtree_aug_update_sum (&tree->root->node, node, &node->node, cmp_exact_node, &path_cache, node_max);
161 	}
162 	// no change
163 	return true;
164 }
165 
166 // This must always return the topmost node that matches start!
167 // Otherwise r_interval_tree_first_at will break!!!
r_interval_tree_node_at(RIntervalTree * tree,ut64 start)168 R_API RIntervalNode *r_interval_tree_node_at(RIntervalTree *tree, ut64 start) {
169 	RIntervalNode *node = tree->root;
170 	while (node) {
171 		if (start < node->start) {
172 			node = unwrap (node->node.child[0]);
173 		} else if (start > node->start) {
174 			node = unwrap (node->node.child[1]);
175 		} else {
176 			return node;
177 		}
178 	}
179 	return NULL;
180 }
181 
r_interval_tree_first_at(RIntervalTree * tree,ut64 start)182 R_API RBIter r_interval_tree_first_at(RIntervalTree *tree, ut64 start) {
183 	RBIter it = { 0 };
184 
185 	// Find the topmost node matching start so we have a sub-tree with all entries that we want to find.
186 	RIntervalNode *top_intervalnode = r_interval_tree_node_at (tree, start);
187 	if (!top_intervalnode) {
188 		return it;
189 	}
190 
191 	// If there are more nodes with the same key, they can be in both children.
192 	RBNode *node = &top_intervalnode->node;
193 	while (node) {
194 		if (start <= unwrap (node)->start) {
195 			it.path[it.len++] = node;
196 			node = node->child[0];
197 		} else {
198 			node = node->child[1];
199 		}
200 	}
201 
202 	return it;
203 }
204 
r_interval_tree_node_at_data(RIntervalTree * tree,ut64 start,void * data)205 R_API RIntervalNode *r_interval_tree_node_at_data(RIntervalTree *tree, ut64 start, void *data) {
206 	RBIter it = r_interval_tree_first_at (tree, start);
207 	while (r_rbtree_iter_has (&it)) {
208 		RIntervalNode *intervalnode = r_rbtree_iter_get (&it, RIntervalNode, node);
209 		if (intervalnode->start != start) {
210 			break;
211 		}
212 		if (intervalnode->data == data) {
213 			return intervalnode;
214 		}
215 		r_rbtree_iter_next (&it);
216 	}
217 	return NULL;
218 }
219 
r_interval_tree_all_at(RIntervalTree * tree,ut64 start,RIntervalIterCb cb,void * user)220 R_API bool r_interval_tree_all_at(RIntervalTree *tree, ut64 start, RIntervalIterCb cb, void *user) {
221 	RBIter it = r_interval_tree_first_at (tree, start);
222 	bool ret = true;
223 	while (r_rbtree_iter_has (&it)) {
224 		RIntervalNode *intervalnode = r_rbtree_iter_get (&it, RIntervalNode, node);
225 		if (intervalnode->start != start) {
226 			break;
227 		}
228 		ret = cb (intervalnode, user);
229 		if (!ret) {
230 			break;
231 		}
232 		r_rbtree_iter_next (&it);
233 	}
234 	return ret;
235 }
236 
r_interval_node_all_in(RIntervalNode * node,ut64 value,bool end_inclusive,RIntervalIterCb cb,void * user)237 R_API bool r_interval_node_all_in(RIntervalNode *node, ut64 value, bool end_inclusive, RIntervalIterCb cb, void *user) {
238 	while (node && value < node->start) {
239 		// less than the current node, but might still be contained further down
240 		node = unwrap (node->node.child[0]);
241 	}
242 	if (!node) {
243 		return true;
244 	}
245 	if (end_inclusive ? value > node->max_end : value >= node->max_end) {
246 		return true;
247 	}
248 	if (end_inclusive ? value <= node->end : value < node->end) {
249 		if (!cb (node, user)) {
250 			return false;
251 		}
252 	}
253 	// This can be done more efficiently by building the stack manually
254 	bool ret = r_interval_node_all_in (unwrap (node->node.child[0]), value, end_inclusive, cb, user);
255 	if (!ret) {
256 		return false;
257 	}
258 	return r_interval_node_all_in (unwrap (node->node.child[1]), value, end_inclusive, cb, user);
259 }
260 
r_interval_tree_all_in(RIntervalTree * tree,ut64 value,bool end_inclusive,RIntervalIterCb cb,void * user)261 R_API bool r_interval_tree_all_in(RIntervalTree *tree, ut64 value, bool end_inclusive, RIntervalIterCb cb, void *user) {
262 	// all in! ��
263 	return r_interval_node_all_in (tree->root, value, end_inclusive, cb, user);
264 }
265 
r_interval_node_all_intersect(RIntervalNode * node,ut64 start,ut64 end,bool end_inclusive,RIntervalIterCb cb,void * user)266 static bool r_interval_node_all_intersect(RIntervalNode *node, ut64 start, ut64 end, bool end_inclusive, RIntervalIterCb cb, void *user) {
267 	r_return_val_if_fail (end >= start, true);
268 	while (node && (end_inclusive ? end < node->start : end <= node->start)) {
269 		// less than the current node, but might still be contained further down
270 		node = unwrap (node->node.child[0]);
271 	}
272 	if (!node) {
273 		return true;
274 	}
275 	if (end_inclusive ? start > node->max_end : start >= node->max_end) {
276 		return true;
277 	}
278 	if (end_inclusive ? start <= node->end : start < node->end) {
279 		if (!cb (node, user)) {
280 			return false;
281 		}
282 	}
283 	// This can be done more efficiently by building the stack manually
284 	if (!r_interval_node_all_intersect (unwrap (node->node.child[0]), start, end, end_inclusive, cb, user)) {
285 		return false;
286 	}
287 	return r_interval_node_all_intersect (unwrap (node->node.child[1]), start, end, end_inclusive, cb, user);
288 }
289 
r_interval_tree_all_intersect(RIntervalTree * tree,ut64 start,ut64 end,bool end_inclusive,RIntervalIterCb cb,void * user)290 R_API bool r_interval_tree_all_intersect(RIntervalTree *tree, ut64 start, ut64 end, bool end_inclusive, RIntervalIterCb cb, void *user) {
291 	return r_interval_node_all_intersect (tree->root, start, end, end_inclusive, cb, user);
292 }
293