1 /* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */ 2 /* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */ 3 /* 4 * Copyright 2002 Niels Provos <provos@citi.umich.edu> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #ifndef _SYS_TREE_H_ 29 #define _SYS_TREE_H_ 30 31 #ifndef _SYS_SPINLOCK_H_ 32 #include <sys/spinlock.h> 33 #endif 34 35 void rb_spin_lock(struct spinlock *spin); 36 void rb_spin_unlock(struct spinlock *spin); 37 38 /* 39 * This file defines data structures for different types of trees: 40 * splay trees and red-black trees. 41 * 42 * A splay tree is a self-organizing data structure. Every operation 43 * on the tree causes a splay to happen. The splay moves the requested 44 * node to the root of the tree and partly rebalances it. 45 * 46 * This has the benefit that request locality causes faster lookups as 47 * the requested nodes move to the top of the tree. On the other hand, 48 * every lookup causes memory writes. 49 * 50 * The Balance Theorem bounds the total access time for m operations 51 * and n inserts on an initially empty tree as O((m + n)lg n). The 52 * amortized cost for a sequence of m accesses to a splay tree is O(lg n); 53 * 54 * A red-black tree is a binary search tree with the node color as an 55 * extra attribute. It fulfills a set of conditions: 56 * - every search path from the root to a leaf consists of the 57 * same number of black nodes, 58 * - each red node (except for the root) has a black parent, 59 * - each leaf node is black. 60 * 61 * Every operation on a red-black tree is bounded as O(lg n). 62 * The maximum height of a red-black tree is 2lg (n+1). 63 */ 64 65 #define SPLAY_HEAD(name, type) \ 66 struct name { \ 67 struct type *sph_root; /* root of the tree */ \ 68 } 69 70 #define SPLAY_INITIALIZER(root) \ 71 { NULL } 72 73 #define SPLAY_INIT(root) do { \ 74 (root)->sph_root = NULL; \ 75 } while (/*CONSTCOND*/ 0) 76 77 #define SPLAY_ENTRY(type) \ 78 struct { \ 79 struct type *spe_left; /* left element */ \ 80 struct type *spe_right; /* right element */ \ 81 } 82 83 #define SPLAY_LEFT(elm, field) (elm)->field.spe_left 84 #define SPLAY_RIGHT(elm, field) (elm)->field.spe_right 85 #define SPLAY_ROOT(head) (head)->sph_root 86 #define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) 87 88 /* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */ 89 #define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \ 90 SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ 91 SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ 92 (head)->sph_root = tmp; \ 93 } while (/*CONSTCOND*/ 0) 94 95 #define SPLAY_ROTATE_LEFT(head, tmp, field) do { \ 96 SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ 97 SPLAY_LEFT(tmp, field) = (head)->sph_root; \ 98 (head)->sph_root = tmp; \ 99 } while (/*CONSTCOND*/ 0) 100 101 #define SPLAY_LINKLEFT(head, tmp, field) do { \ 102 SPLAY_LEFT(tmp, field) = (head)->sph_root; \ 103 tmp = (head)->sph_root; \ 104 (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ 105 } while (/*CONSTCOND*/ 0) 106 107 #define SPLAY_LINKRIGHT(head, tmp, field) do { \ 108 SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ 109 tmp = (head)->sph_root; \ 110 (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ 111 } while (/*CONSTCOND*/ 0) 112 113 #define SPLAY_ASSEMBLE(head, node, left, right, field) do { \ 114 SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ 115 SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\ 116 SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ 117 SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ 118 } while (/*CONSTCOND*/ 0) 119 120 /* Generates prototypes and inline functions */ 121 122 #define SPLAY_PROTOTYPE(name, type, field, cmp) \ 123 void name##_SPLAY(struct name *, struct type *); \ 124 void name##_SPLAY_MINMAX(struct name *, int); \ 125 struct type *name##_SPLAY_INSERT(struct name *, struct type *); \ 126 struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \ 127 \ 128 /* Finds the node with the same key as elm */ \ 129 static __inline struct type * \ 130 name##_SPLAY_FIND(struct name *head, struct type *elm) \ 131 { \ 132 if (SPLAY_EMPTY(head)) \ 133 return(NULL); \ 134 name##_SPLAY(head, elm); \ 135 if ((cmp)(elm, (head)->sph_root) == 0) \ 136 return (head->sph_root); \ 137 return (NULL); \ 138 } \ 139 \ 140 static __inline struct type * \ 141 name##_SPLAY_NEXT(struct name *head, struct type *elm) \ 142 { \ 143 name##_SPLAY(head, elm); \ 144 if (SPLAY_RIGHT(elm, field) != NULL) { \ 145 elm = SPLAY_RIGHT(elm, field); \ 146 while (SPLAY_LEFT(elm, field) != NULL) { \ 147 elm = SPLAY_LEFT(elm, field); \ 148 } \ 149 } else \ 150 elm = NULL; \ 151 return (elm); \ 152 } \ 153 \ 154 static __inline struct type * \ 155 name##_SPLAY_MIN_MAX(struct name *head, int val) \ 156 { \ 157 name##_SPLAY_MINMAX(head, val); \ 158 return (SPLAY_ROOT(head)); \ 159 } 160 161 /* Main splay operation. 162 * Moves node close to the key of elm to top 163 */ 164 #define SPLAY_GENERATE(name, type, field, cmp) \ 165 struct type * \ 166 name##_SPLAY_INSERT(struct name *head, struct type *elm) \ 167 { \ 168 if (SPLAY_EMPTY(head)) { \ 169 SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ 170 } else { \ 171 int __comp; \ 172 name##_SPLAY(head, elm); \ 173 __comp = (cmp)(elm, (head)->sph_root); \ 174 if(__comp < 0) { \ 175 SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\ 176 SPLAY_RIGHT(elm, field) = (head)->sph_root; \ 177 SPLAY_LEFT((head)->sph_root, field) = NULL; \ 178 } else if (__comp > 0) { \ 179 SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\ 180 SPLAY_LEFT(elm, field) = (head)->sph_root; \ 181 SPLAY_RIGHT((head)->sph_root, field) = NULL; \ 182 } else \ 183 return ((head)->sph_root); \ 184 } \ 185 (head)->sph_root = (elm); \ 186 return (NULL); \ 187 } \ 188 \ 189 struct type * \ 190 name##_SPLAY_REMOVE(struct name *head, struct type *elm) \ 191 { \ 192 struct type *__tmp; \ 193 if (SPLAY_EMPTY(head)) \ 194 return (NULL); \ 195 name##_SPLAY(head, elm); \ 196 if ((cmp)(elm, (head)->sph_root) == 0) { \ 197 if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ 198 (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\ 199 } else { \ 200 __tmp = SPLAY_RIGHT((head)->sph_root, field); \ 201 (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\ 202 name##_SPLAY(head, elm); \ 203 SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ 204 } \ 205 return (elm); \ 206 } \ 207 return (NULL); \ 208 } \ 209 \ 210 void \ 211 name##_SPLAY(struct name *head, struct type *elm) \ 212 { \ 213 struct type __node, *__left, *__right, *__tmp; \ 214 int __comp; \ 215 \ 216 SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ 217 __left = __right = &__node; \ 218 \ 219 while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \ 220 if (__comp < 0) { \ 221 __tmp = SPLAY_LEFT((head)->sph_root, field); \ 222 if (__tmp == NULL) \ 223 break; \ 224 if ((cmp)(elm, __tmp) < 0){ \ 225 SPLAY_ROTATE_RIGHT(head, __tmp, field); \ 226 if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ 227 break; \ 228 } \ 229 SPLAY_LINKLEFT(head, __right, field); \ 230 } else if (__comp > 0) { \ 231 __tmp = SPLAY_RIGHT((head)->sph_root, field); \ 232 if (__tmp == NULL) \ 233 break; \ 234 if ((cmp)(elm, __tmp) > 0){ \ 235 SPLAY_ROTATE_LEFT(head, __tmp, field); \ 236 if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ 237 break; \ 238 } \ 239 SPLAY_LINKRIGHT(head, __left, field); \ 240 } \ 241 } \ 242 SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ 243 } \ 244 \ 245 /* Splay with either the minimum or the maximum element \ 246 * Used to find minimum or maximum element in tree. \ 247 */ \ 248 void name##_SPLAY_MINMAX(struct name *head, int __comp) \ 249 { \ 250 struct type __node, *__left, *__right, *__tmp; \ 251 \ 252 SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ 253 __left = __right = &__node; \ 254 \ 255 while (1) { \ 256 if (__comp < 0) { \ 257 __tmp = SPLAY_LEFT((head)->sph_root, field); \ 258 if (__tmp == NULL) \ 259 break; \ 260 if (__comp < 0){ \ 261 SPLAY_ROTATE_RIGHT(head, __tmp, field); \ 262 if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ 263 break; \ 264 } \ 265 SPLAY_LINKLEFT(head, __right, field); \ 266 } else if (__comp > 0) { \ 267 __tmp = SPLAY_RIGHT((head)->sph_root, field); \ 268 if (__tmp == NULL) \ 269 break; \ 270 if (__comp > 0) { \ 271 SPLAY_ROTATE_LEFT(head, __tmp, field); \ 272 if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ 273 break; \ 274 } \ 275 SPLAY_LINKRIGHT(head, __left, field); \ 276 } \ 277 } \ 278 SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ 279 } 280 281 #define SPLAY_NEGINF -1 282 #define SPLAY_INF 1 283 284 #define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) 285 #define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) 286 #define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) 287 #define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) 288 #define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \ 289 : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) 290 #define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \ 291 : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) 292 293 #define SPLAY_FOREACH(x, name, head) \ 294 for ((x) = SPLAY_MIN(name, head); \ 295 (x) != NULL; \ 296 (x) = SPLAY_NEXT(name, head, x)) 297 298 /* 299 * Macros that define a red-black tree 300 */ 301 302 #define RB_SCAN_INFO(name, type) \ 303 struct name##_scan_info { \ 304 struct name##_scan_info *link; \ 305 struct type *node; \ 306 } 307 308 #define RB_HEAD(name, type) \ 309 struct name { \ 310 struct type *rbh_root; /* root of the tree */ \ 311 struct name##_scan_info *rbh_inprog; /* scans in progress */ \ 312 struct spinlock rbh_spin; \ 313 } 314 315 #define RB_INITIALIZER(root) \ 316 { NULL, NULL, SPINLOCK_INITIALIZER(root.spin, "root.spin") } 317 318 #define RB_INIT(root) do { \ 319 (root)->rbh_root = NULL; \ 320 (root)->rbh_inprog = NULL; \ 321 } while (/*CONSTCOND*/ 0) 322 323 #ifdef _KERNEL 324 #define RB_SCAN_LOCK(spin) rb_spin_lock(spin) 325 #define RB_SCAN_UNLOCK(spin) rb_spin_unlock(spin) 326 #else 327 #define RB_SCAN_LOCK(spin) 328 #define RB_SCAN_UNLOCK(spin) 329 #endif 330 331 #define RB_BLACK 0 332 #define RB_RED 1 333 #define RB_ENTRY(type) \ 334 struct { \ 335 struct type *rbe_left; /* left element */ \ 336 struct type *rbe_right; /* right element */ \ 337 struct type *rbe_parent; /* parent element */ \ 338 int rbe_color; /* node color */ \ 339 } 340 341 #define RB_LEFT(elm, field) (elm)->field.rbe_left 342 #define RB_RIGHT(elm, field) (elm)->field.rbe_right 343 #define RB_PARENT(elm, field) (elm)->field.rbe_parent 344 #define RB_COLOR(elm, field) (elm)->field.rbe_color 345 #define RB_ROOT(head) (head)->rbh_root 346 #define RB_INPROG(head) (head)->rbh_inprog 347 #define RB_EMPTY(head) (RB_ROOT(head) == NULL) 348 349 #define RB_SET(elm, parent, field) do { \ 350 RB_PARENT(elm, field) = parent; \ 351 RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \ 352 RB_COLOR(elm, field) = RB_RED; \ 353 } while (/*CONSTCOND*/ 0) 354 355 #define RB_SET_BLACKRED(black, red, field) do { \ 356 RB_COLOR(black, field) = RB_BLACK; \ 357 RB_COLOR(red, field) = RB_RED; \ 358 } while (/*CONSTCOND*/ 0) 359 360 #ifndef RB_AUGMENT 361 #define RB_AUGMENT(x) do {} while (0) 362 #endif 363 364 #define RB_ROTATE_LEFT(head, elm, tmp, field) do { \ 365 (tmp) = RB_RIGHT(elm, field); \ 366 if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \ 367 RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \ 368 } \ 369 RB_AUGMENT(elm); \ 370 if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ 371 if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ 372 RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ 373 else \ 374 RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ 375 } else \ 376 (head)->rbh_root = (tmp); \ 377 RB_LEFT(tmp, field) = (elm); \ 378 RB_PARENT(elm, field) = (tmp); \ 379 RB_AUGMENT(tmp); \ 380 if ((RB_PARENT(tmp, field))) \ 381 RB_AUGMENT(RB_PARENT(tmp, field)); \ 382 } while (/*CONSTCOND*/ 0) 383 384 #define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \ 385 (tmp) = RB_LEFT(elm, field); \ 386 if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \ 387 RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \ 388 } \ 389 RB_AUGMENT(elm); \ 390 if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ 391 if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ 392 RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ 393 else \ 394 RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ 395 } else \ 396 (head)->rbh_root = (tmp); \ 397 RB_RIGHT(tmp, field) = (elm); \ 398 RB_PARENT(elm, field) = (tmp); \ 399 RB_AUGMENT(tmp); \ 400 if ((RB_PARENT(tmp, field))) \ 401 RB_AUGMENT(RB_PARENT(tmp, field)); \ 402 } while (/*CONSTCOND*/ 0) 403 404 /* Generates prototypes and inline functions */ 405 #define RB_PROTOTYPE(name, type, field, cmp) \ 406 _RB_PROTOTYPE(name, type, field, cmp,) 407 #define RB_PROTOTYPE_STATIC(name, type, field, cmp) \ 408 _RB_PROTOTYPE(name, type, field, cmp, __unused static) 409 410 #define _RB_PROTOTYPE(name, type, field, cmp, STORQUAL) \ 411 STORQUAL void name##_RB_INSERT_COLOR(struct name *, struct type *); \ 412 STORQUAL void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\ 413 STORQUAL struct type *name##_RB_REMOVE(struct name *, struct type *); \ 414 STORQUAL struct type *name##_RB_INSERT(struct name *, struct type *); \ 415 STORQUAL struct type *name##_RB_FIND(struct name *, struct type *); \ 416 STORQUAL int name##_RB_SCAN(struct name *, int (*)(struct type *, void *),\ 417 int (*)(struct type *, void *), void *); \ 418 STORQUAL int name##_RB_SCAN_NOLK(struct name *, int (*)(struct type *, void *),\ 419 int (*)(struct type *, void *), void *); \ 420 STORQUAL struct type *name##_RB_NEXT(struct type *); \ 421 STORQUAL struct type *name##_RB_PREV(struct type *); \ 422 STORQUAL struct type *name##_RB_MINMAX(struct name *, int); \ 423 RB_SCAN_INFO(name, type) \ 424 425 /* 426 * A version which supplies a fast lookup routine for an exact match 427 * on a numeric field. 428 */ 429 #define RB_PROTOTYPE2(name, type, field, cmp, datatype) \ 430 RB_PROTOTYPE(name, type, field, cmp); \ 431 struct type *name##_RB_LOOKUP(struct name *, datatype); \ 432 struct type *name##_RB_LOOKUP_REL(struct name *, datatype, struct type *) \ 433 434 /* 435 * A version which supplies a fast lookup routine for a numeric 436 * field which resides within a ranged object, either using (begin,end), 437 * or using (begin,size). 438 */ 439 #define RB_PROTOTYPE3(name, type, field, cmp, datatype) \ 440 RB_PROTOTYPE2(name, type, field, cmp, datatype); \ 441 struct type *name##_RB_RLOOKUP(struct name *, datatype) \ 442 443 #define RB_PROTOTYPE4(name, type, field, cmp, datatype) \ 444 RB_PROTOTYPE2(name, type, field, cmp, datatype); \ 445 struct type *name##_RB_RLOOKUP(struct name *, datatype) \ 446 447 #define RB_PROTOTYPEX(name, ext, type, field, cmp, datatype) \ 448 RB_PROTOTYPE(name, type, field, cmp); \ 449 struct type *name##_RB_LOOKUP_##ext (struct name *, datatype) \ 450 451 /* Main rb operation. 452 * Moves node close to the key of elm to top 453 */ 454 #define RB_GENERATE(name, type, field, cmp) \ 455 _RB_GENERATE(name, type, field, cmp,) 456 457 #define RB_GENERATE_STATIC(name, type, field, cmp) \ 458 _RB_GENERATE(name, type, field, cmp, __unused static) 459 460 #define _RB_GENERATE(name, type, field, cmp, STORQUAL) \ 461 STORQUAL void \ 462 name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \ 463 { \ 464 struct type *parent, *gparent, *tmp; \ 465 while ((parent = RB_PARENT(elm, field)) != NULL && \ 466 RB_COLOR(parent, field) == RB_RED) { \ 467 gparent = RB_PARENT(parent, field); \ 468 if (parent == RB_LEFT(gparent, field)) { \ 469 tmp = RB_RIGHT(gparent, field); \ 470 if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ 471 RB_COLOR(tmp, field) = RB_BLACK; \ 472 RB_SET_BLACKRED(parent, gparent, field);\ 473 elm = gparent; \ 474 continue; \ 475 } \ 476 if (RB_RIGHT(parent, field) == elm) { \ 477 RB_ROTATE_LEFT(head, parent, tmp, field);\ 478 tmp = parent; \ 479 parent = elm; \ 480 elm = tmp; \ 481 } \ 482 RB_SET_BLACKRED(parent, gparent, field); \ 483 RB_ROTATE_RIGHT(head, gparent, tmp, field); \ 484 } else { \ 485 tmp = RB_LEFT(gparent, field); \ 486 if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ 487 RB_COLOR(tmp, field) = RB_BLACK; \ 488 RB_SET_BLACKRED(parent, gparent, field);\ 489 elm = gparent; \ 490 continue; \ 491 } \ 492 if (RB_LEFT(parent, field) == elm) { \ 493 RB_ROTATE_RIGHT(head, parent, tmp, field);\ 494 tmp = parent; \ 495 parent = elm; \ 496 elm = tmp; \ 497 } \ 498 RB_SET_BLACKRED(parent, gparent, field); \ 499 RB_ROTATE_LEFT(head, gparent, tmp, field); \ 500 } \ 501 } \ 502 RB_COLOR(head->rbh_root, field) = RB_BLACK; \ 503 } \ 504 \ 505 STORQUAL void \ 506 name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, \ 507 struct type *elm) \ 508 { \ 509 struct type *tmp; \ 510 while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \ 511 elm != RB_ROOT(head)) { \ 512 if (RB_LEFT(parent, field) == elm) { \ 513 tmp = RB_RIGHT(parent, field); \ 514 if (RB_COLOR(tmp, field) == RB_RED) { \ 515 RB_SET_BLACKRED(tmp, parent, field); \ 516 RB_ROTATE_LEFT(head, parent, tmp, field);\ 517 tmp = RB_RIGHT(parent, field); \ 518 } \ 519 if ((RB_LEFT(tmp, field) == NULL || \ 520 RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ 521 (RB_RIGHT(tmp, field) == NULL || \ 522 RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ 523 RB_COLOR(tmp, field) = RB_RED; \ 524 elm = parent; \ 525 parent = RB_PARENT(elm, field); \ 526 } else { \ 527 if (RB_RIGHT(tmp, field) == NULL || \ 528 RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\ 529 struct type *oleft; \ 530 if ((oleft = RB_LEFT(tmp, field)) \ 531 != NULL) \ 532 RB_COLOR(oleft, field) = RB_BLACK;\ 533 RB_COLOR(tmp, field) = RB_RED; \ 534 RB_ROTATE_RIGHT(head, tmp, oleft, field);\ 535 tmp = RB_RIGHT(parent, field); \ 536 } \ 537 RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ 538 RB_COLOR(parent, field) = RB_BLACK; \ 539 if (RB_RIGHT(tmp, field)) \ 540 RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\ 541 RB_ROTATE_LEFT(head, parent, tmp, field);\ 542 elm = RB_ROOT(head); \ 543 break; \ 544 } \ 545 } else { \ 546 tmp = RB_LEFT(parent, field); \ 547 if (RB_COLOR(tmp, field) == RB_RED) { \ 548 RB_SET_BLACKRED(tmp, parent, field); \ 549 RB_ROTATE_RIGHT(head, parent, tmp, field);\ 550 tmp = RB_LEFT(parent, field); \ 551 } \ 552 if ((RB_LEFT(tmp, field) == NULL || \ 553 RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ 554 (RB_RIGHT(tmp, field) == NULL || \ 555 RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ 556 RB_COLOR(tmp, field) = RB_RED; \ 557 elm = parent; \ 558 parent = RB_PARENT(elm, field); \ 559 } else { \ 560 if (RB_LEFT(tmp, field) == NULL || \ 561 RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\ 562 struct type *oright; \ 563 if ((oright = RB_RIGHT(tmp, field)) \ 564 != NULL) \ 565 RB_COLOR(oright, field) = RB_BLACK;\ 566 RB_COLOR(tmp, field) = RB_RED; \ 567 RB_ROTATE_LEFT(head, tmp, oright, field);\ 568 tmp = RB_LEFT(parent, field); \ 569 } \ 570 RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ 571 RB_COLOR(parent, field) = RB_BLACK; \ 572 if (RB_LEFT(tmp, field)) \ 573 RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\ 574 RB_ROTATE_RIGHT(head, parent, tmp, field);\ 575 elm = RB_ROOT(head); \ 576 break; \ 577 } \ 578 } \ 579 } \ 580 if (elm) \ 581 RB_COLOR(elm, field) = RB_BLACK; \ 582 } \ 583 \ 584 STORQUAL struct type * \ 585 name##_RB_REMOVE(struct name *head, struct type *elm) \ 586 { \ 587 struct type *child, *parent, *old; \ 588 struct name##_scan_info *inprog; \ 589 int color; \ 590 \ 591 for (inprog = RB_INPROG(head); inprog; inprog = inprog->link) { \ 592 if (inprog->node == elm) \ 593 inprog->node = RB_NEXT(name, head, elm); \ 594 } \ 595 \ 596 old = elm; \ 597 if (RB_LEFT(elm, field) == NULL) \ 598 child = RB_RIGHT(elm, field); \ 599 else if (RB_RIGHT(elm, field) == NULL) \ 600 child = RB_LEFT(elm, field); \ 601 else { \ 602 struct type *left; \ 603 elm = RB_RIGHT(elm, field); \ 604 while ((left = RB_LEFT(elm, field)) != NULL) \ 605 elm = left; \ 606 child = RB_RIGHT(elm, field); \ 607 parent = RB_PARENT(elm, field); \ 608 color = RB_COLOR(elm, field); \ 609 if (child) \ 610 RB_PARENT(child, field) = parent; \ 611 if (parent) { \ 612 if (RB_LEFT(parent, field) == elm) \ 613 RB_LEFT(parent, field) = child; \ 614 else \ 615 RB_RIGHT(parent, field) = child; \ 616 RB_AUGMENT(parent); \ 617 } else \ 618 RB_ROOT(head) = child; \ 619 if (RB_PARENT(elm, field) == old) \ 620 parent = elm; \ 621 (elm)->field = (old)->field; \ 622 if (RB_PARENT(old, field)) { \ 623 if (RB_LEFT(RB_PARENT(old, field), field) == old)\ 624 RB_LEFT(RB_PARENT(old, field), field) = elm;\ 625 else \ 626 RB_RIGHT(RB_PARENT(old, field), field) = elm;\ 627 RB_AUGMENT(RB_PARENT(old, field)); \ 628 } else \ 629 RB_ROOT(head) = elm; \ 630 RB_PARENT(RB_LEFT(old, field), field) = elm; \ 631 if (RB_RIGHT(old, field)) \ 632 RB_PARENT(RB_RIGHT(old, field), field) = elm; \ 633 if (parent) { \ 634 left = parent; \ 635 do { \ 636 RB_AUGMENT(left); \ 637 } while ((left = RB_PARENT(left, field)) != NULL); \ 638 } \ 639 goto color; \ 640 } \ 641 parent = RB_PARENT(elm, field); \ 642 color = RB_COLOR(elm, field); \ 643 if (child) \ 644 RB_PARENT(child, field) = parent; \ 645 if (parent) { \ 646 if (RB_LEFT(parent, field) == elm) \ 647 RB_LEFT(parent, field) = child; \ 648 else \ 649 RB_RIGHT(parent, field) = child; \ 650 RB_AUGMENT(parent); \ 651 } else \ 652 RB_ROOT(head) = child; \ 653 color: \ 654 if (color == RB_BLACK) \ 655 name##_RB_REMOVE_COLOR(head, parent, child); \ 656 return (old); \ 657 } \ 658 \ 659 /* Inserts a node into the RB tree */ \ 660 STORQUAL struct type * \ 661 name##_RB_INSERT(struct name *head, struct type *elm) \ 662 { \ 663 struct type *tmp; \ 664 struct type *parent = NULL; \ 665 int comp = 0; \ 666 tmp = RB_ROOT(head); \ 667 while (tmp) { \ 668 parent = tmp; \ 669 comp = (cmp)(elm, parent); \ 670 if (comp < 0) \ 671 tmp = RB_LEFT(tmp, field); \ 672 else if (comp > 0) \ 673 tmp = RB_RIGHT(tmp, field); \ 674 else \ 675 return(tmp); \ 676 } \ 677 RB_SET(elm, parent, field); \ 678 if (parent != NULL) { \ 679 if (comp < 0) \ 680 RB_LEFT(parent, field) = elm; \ 681 else \ 682 RB_RIGHT(parent, field) = elm; \ 683 RB_AUGMENT(parent); \ 684 } else \ 685 RB_ROOT(head) = elm; \ 686 name##_RB_INSERT_COLOR(head, elm); \ 687 return (NULL); \ 688 } \ 689 \ 690 /* Finds the node with the same key as elm */ \ 691 STORQUAL struct type * \ 692 name##_RB_FIND(struct name *head, struct type *elm) \ 693 { \ 694 struct type *tmp = RB_ROOT(head); \ 695 int comp; \ 696 while (tmp) { \ 697 comp = cmp(elm, tmp); \ 698 if (comp < 0) \ 699 tmp = RB_LEFT(tmp, field); \ 700 else if (comp > 0) \ 701 tmp = RB_RIGHT(tmp, field); \ 702 else \ 703 return (tmp); \ 704 } \ 705 return (NULL); \ 706 } \ 707 \ 708 /* \ 709 * Issue a callback for all matching items. The scan function must \ 710 * return < 0 for items below the desired range, 0 for items within \ 711 * the range, and > 0 for items beyond the range. Any item may be \ 712 * deleted while the scan is in progress. \ 713 */ \ 714 static int \ 715 name##_SCANCMP_ALL(struct type *type __unused, void *data __unused) \ 716 { \ 717 return(0); \ 718 } \ 719 \ 720 static __inline void \ 721 name##_scan_info_link(struct name##_scan_info *scan, struct name *head) \ 722 { \ 723 RB_SCAN_LOCK(&head->rbh_spin); \ 724 scan->link = RB_INPROG(head); \ 725 RB_INPROG(head) = scan; \ 726 RB_SCAN_UNLOCK(&head->rbh_spin); \ 727 } \ 728 \ 729 static __inline void \ 730 name##_scan_info_done(struct name##_scan_info *scan, struct name *head) \ 731 { \ 732 struct name##_scan_info **infopp; \ 733 \ 734 RB_SCAN_LOCK(&head->rbh_spin); \ 735 infopp = &RB_INPROG(head); \ 736 while (*infopp != scan) \ 737 infopp = &(*infopp)->link; \ 738 *infopp = scan->link; \ 739 RB_SCAN_UNLOCK(&head->rbh_spin); \ 740 } \ 741 \ 742 static __inline int \ 743 _##name##_RB_SCAN(struct name *head, \ 744 int (*scancmp)(struct type *, void *), \ 745 int (*callback)(struct type *, void *), \ 746 void *data, int uselock) \ 747 { \ 748 struct name##_scan_info info; \ 749 struct type *best; \ 750 struct type *tmp; \ 751 int count; \ 752 int comp; \ 753 \ 754 if (scancmp == NULL) \ 755 scancmp = name##_SCANCMP_ALL; \ 756 \ 757 /* \ 758 * Locate the first element. \ 759 */ \ 760 tmp = RB_ROOT(head); \ 761 best = NULL; \ 762 while (tmp) { \ 763 comp = scancmp(tmp, data); \ 764 if (comp < 0) { \ 765 tmp = RB_RIGHT(tmp, field); \ 766 } else if (comp > 0) { \ 767 tmp = RB_LEFT(tmp, field); \ 768 } else { \ 769 best = tmp; \ 770 if (RB_LEFT(tmp, field) == NULL) \ 771 break; \ 772 tmp = RB_LEFT(tmp, field); \ 773 } \ 774 } \ 775 count = 0; \ 776 if (best) { \ 777 info.node = RB_NEXT(name, head, best); \ 778 if (uselock) \ 779 name##_scan_info_link(&info, head); \ 780 while ((comp = callback(best, data)) >= 0) { \ 781 count += comp; \ 782 best = info.node; \ 783 if (best == NULL || scancmp(best, data) != 0) \ 784 break; \ 785 info.node = RB_NEXT(name, head, best); \ 786 } \ 787 if (uselock) \ 788 name##_scan_info_done(&info, head); \ 789 if (comp < 0) /* error or termination */ \ 790 count = comp; \ 791 } \ 792 return(count); \ 793 } \ 794 \ 795 STORQUAL int \ 796 name##_RB_SCAN(struct name *head, \ 797 int (*scancmp)(struct type *, void *), \ 798 int (*callback)(struct type *, void *), \ 799 void *data) \ 800 { \ 801 return _##name##_RB_SCAN(head, scancmp, callback, data, 1); \ 802 } \ 803 \ 804 STORQUAL int \ 805 name##_RB_SCAN_NOLK(struct name *head, \ 806 int (*scancmp)(struct type *, void *), \ 807 int (*callback)(struct type *, void *), \ 808 void *data) \ 809 { \ 810 return _##name##_RB_SCAN(head, scancmp, callback, data, 0); \ 811 } \ 812 \ 813 /* ARGSUSED */ \ 814 STORQUAL struct type * \ 815 name##_RB_NEXT(struct type *elm) \ 816 { \ 817 if (RB_RIGHT(elm, field)) { \ 818 elm = RB_RIGHT(elm, field); \ 819 while (RB_LEFT(elm, field)) \ 820 elm = RB_LEFT(elm, field); \ 821 } else { \ 822 if (RB_PARENT(elm, field) && \ 823 (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ 824 elm = RB_PARENT(elm, field); \ 825 else { \ 826 while (RB_PARENT(elm, field) && \ 827 (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\ 828 elm = RB_PARENT(elm, field); \ 829 elm = RB_PARENT(elm, field); \ 830 } \ 831 } \ 832 return (elm); \ 833 } \ 834 \ 835 /* ARGSUSED */ \ 836 STORQUAL struct type * \ 837 name##_RB_PREV(struct type *elm) \ 838 { \ 839 if (RB_LEFT(elm, field)) { \ 840 elm = RB_LEFT(elm, field); \ 841 while (RB_RIGHT(elm, field)) \ 842 elm = RB_RIGHT(elm, field); \ 843 } else { \ 844 if (RB_PARENT(elm, field) && \ 845 (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \ 846 elm = RB_PARENT(elm, field); \ 847 else { \ 848 while (RB_PARENT(elm, field) && \ 849 (elm == RB_LEFT(RB_PARENT(elm, field), field)))\ 850 elm = RB_PARENT(elm, field); \ 851 elm = RB_PARENT(elm, field); \ 852 } \ 853 } \ 854 return (elm); \ 855 } \ 856 \ 857 STORQUAL struct type * \ 858 name##_RB_MINMAX(struct name *head, int val) \ 859 { \ 860 struct type *tmp = RB_ROOT(head); \ 861 struct type *parent = NULL; \ 862 while (tmp) { \ 863 parent = tmp; \ 864 if (val < 0) \ 865 tmp = RB_LEFT(tmp, field); \ 866 else \ 867 tmp = RB_RIGHT(tmp, field); \ 868 } \ 869 return (parent); \ 870 } 871 872 /* 873 * This extended version implements a fast LOOKUP function given 874 * a numeric data type. 875 * 876 * The element whos index/offset field is exactly the specified value 877 * will be returned, or NULL. 878 */ 879 #define RB_GENERATE2(name, type, field, cmp, datatype, indexfield) \ 880 RB_GENERATE(name, type, field, cmp) \ 881 \ 882 struct type * \ 883 name##_RB_LOOKUP(struct name *head, datatype value) \ 884 { \ 885 struct type *tmp; \ 886 \ 887 tmp = RB_ROOT(head); \ 888 while (tmp) { \ 889 if (value > tmp->indexfield) \ 890 tmp = RB_RIGHT(tmp, field); \ 891 else if (value < tmp->indexfield) \ 892 tmp = RB_LEFT(tmp, field); \ 893 else \ 894 return(tmp); \ 895 } \ 896 return(NULL); \ 897 } \ 898 \ 899 struct type * \ 900 name##_RB_LOOKUP_REL(struct name *head, datatype value, struct type *rel)\ 901 { \ 902 struct type *tmp; \ 903 \ 904 if (value == rel->indexfield - 1) { \ 905 tmp = name##_RB_PREV(rel); \ 906 if (tmp && value != tmp->indexfield) \ 907 tmp = NULL; \ 908 return tmp; \ 909 } \ 910 if (value == rel->indexfield + 1) { \ 911 tmp = name##_RB_NEXT(rel); \ 912 if (tmp && value != tmp->indexfield) \ 913 tmp = NULL; \ 914 return tmp; \ 915 } \ 916 \ 917 tmp = RB_ROOT(head); \ 918 while (tmp) { \ 919 if (value > tmp->indexfield) \ 920 tmp = RB_RIGHT(tmp, field); \ 921 else if (value < tmp->indexfield) \ 922 tmp = RB_LEFT(tmp, field); \ 923 else \ 924 return(tmp); \ 925 } \ 926 return(NULL); \ 927 } \ 928 929 /* 930 * This extended version implements a fast ranged-based LOOKUP function 931 * given a numeric data type, for data types with a beginning and end 932 * (end is inclusive). 933 * 934 * The element whos range contains the specified value is returned, or NULL 935 */ 936 #define RB_GENERATE3(name, type, field, cmp, datatype, begfield, endfield) \ 937 RB_GENERATE2(name, type, field, cmp, datatype, begfield) \ 938 \ 939 struct type * \ 940 name##_RB_RLOOKUP(struct name *head, datatype value) \ 941 { \ 942 struct type *tmp; \ 943 \ 944 tmp = RB_ROOT(head); \ 945 while (tmp) { \ 946 if (value >= tmp->begfield && value <= tmp->endfield) \ 947 return(tmp); \ 948 if (value > tmp->begfield) \ 949 tmp = RB_RIGHT(tmp, field); \ 950 else \ 951 tmp = RB_LEFT(tmp, field); \ 952 } \ 953 return(NULL); \ 954 } \ 955 956 /* 957 * This extended version implements a fast ranged-based LOOKUP function 958 * given a numeric data type, for data types with a beginning and size. 959 * 960 * WARNING: The full range of the data type is not supported due to a 961 * boundary condition at the end, where (beginning + size) might overflow. 962 * 963 * The element whos range contains the specified value is returned, or NULL 964 */ 965 #define RB_GENERATE4(name, type, field, cmp, datatype, begfield, sizefield) \ 966 RB_GENERATE2(name, type, field, cmp, datatype, begfield) \ 967 \ 968 struct type * \ 969 name##_RB_RLOOKUP(struct name *head, datatype value) \ 970 { \ 971 struct type *tmp; \ 972 \ 973 tmp = RB_ROOT(head); \ 974 while (tmp) { \ 975 if (value >= tmp->begfield && \ 976 value < tmp->begfield + tmp->sizefield) { \ 977 return(tmp); \ 978 } \ 979 if (value > tmp->begfield) \ 980 tmp = RB_RIGHT(tmp, field); \ 981 else \ 982 tmp = RB_LEFT(tmp, field); \ 983 } \ 984 return(NULL); \ 985 } \ 986 987 /* 988 * This generates a custom lookup function for a red-black tree. 989 * Note that the macro may be used with a storage qualifier. 990 */ 991 992 #define RB_GENERATE_XLOOKUP(name, ext, type, field, xcmp, datatype) \ 993 _RB_GENERATE_XLOOKUP(name, ext, type, field, xcmp, datatype,) 994 #define RB_GENERATE_XLOOKUP_STATIC(name, ext, type, field, xcmp, datatype) \ 995 _RB_GENERATE_XLOOKUP(name, ext, type, field, xcmp, datatype, __unused static) 996 997 #define _RB_GENERATE_XLOOKUP(name, ext, type, field, xcmp, datatype, STORQUAL)\ 998 \ 999 STORQUAL struct type * \ 1000 name##_RB_LOOKUP_##ext (struct name *head, datatype value) \ 1001 { \ 1002 struct type *tmp; \ 1003 int r; \ 1004 \ 1005 tmp = RB_ROOT(head); \ 1006 while (tmp) { \ 1007 r = xcmp(value, tmp); \ 1008 if (r == 0) \ 1009 return(tmp); \ 1010 if (r > 0) \ 1011 tmp = RB_RIGHT(tmp, field); \ 1012 else \ 1013 tmp = RB_LEFT(tmp, field); \ 1014 } \ 1015 return(NULL); \ 1016 } \ 1017 1018 1019 #define RB_NEGINF -1 1020 #define RB_INF 1 1021 1022 #define RB_INSERT(name, root, elm) name##_RB_INSERT(root, elm) 1023 #define RB_REMOVE(name, root, elm) name##_RB_REMOVE(root, elm) 1024 #define RB_FIND(name, root, elm) name##_RB_FIND(root, elm) 1025 #define RB_LOOKUP(name, root, value) name##_RB_LOOKUP(root, value) 1026 #define RB_RLOOKUP(name, root, value) name##_RB_RLOOKUP(root, value) 1027 #define RB_SCAN(name, root, cmp, callback, data) \ 1028 name##_RB_SCAN(root, cmp, callback, data) 1029 #define RB_SCAN_NOLK(name, root, cmp, callback, data) \ 1030 name##_RB_SCAN_NOLK(root, cmp, callback, data) 1031 #define RB_FIRST(name, root) name##_RB_MINMAX(root, RB_NEGINF) 1032 #define RB_NEXT(name, root, elm) name##_RB_NEXT(elm) 1033 #define RB_PREV(name, root, elm) name##_RB_PREV(elm) 1034 #define RB_MIN(name, root) name##_RB_MINMAX(root, RB_NEGINF) 1035 #define RB_MAX(name, root) name##_RB_MINMAX(root, RB_INF) 1036 1037 #define RB_FOREACH(x, name, head) \ 1038 for ((x) = RB_MIN(name, head); \ 1039 (x) != NULL; \ 1040 (x) = name##_RB_NEXT(x)) 1041 1042 #define RB_FOREACH_FROM(x, name, y) \ 1043 for ((x) = (y); \ 1044 ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ 1045 (x) = (y)) 1046 1047 #define RB_FOREACH_SAFE(x, name, head, y) \ 1048 for ((x) = RB_MIN(name, head); \ 1049 ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ 1050 (x) = (y)) 1051 1052 #define RB_FOREACH_REVERSE(x, name, head) \ 1053 for ((x) = RB_MAX(name, head); \ 1054 (x) != NULL; \ 1055 (x) = name##_RB_PREV(x)) 1056 1057 #define RB_FOREACH_REVERSE_FROM(x, name, y) \ 1058 for ((x) = (y); \ 1059 ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ 1060 (x) = (y)) 1061 1062 #define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \ 1063 for ((x) = RB_MAX(name, head); \ 1064 ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ 1065 (x) = (y)) 1066 1067 #endif /* _SYS_TREE_H_ */ 1068