1 /* 2 * Copyright (c) 1988, 1989 Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)radix.c 7.14 (Berkeley) 05/21/92 8 */ 9 10 /* 11 * Routines to build and maintain radix trees for routing lookups. 12 */ 13 #ifndef RNF_NORMAL 14 #include "param.h" 15 #include "radix.h" 16 #include "malloc.h" 17 #define M_DONTWAIT M_NOWAIT 18 #endif 19 struct radix_mask *rn_mkfreelist; 20 struct radix_node_head *mask_rnhead; 21 #define rn_maskhead (mask_rnhead->rnh_treetop) 22 #undef Bcmp 23 #define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l)) 24 /* 25 * The data structure for the keys is a radix tree with one way 26 * branching removed. The index rn_b at an internal node n represents a bit 27 * position to be tested. The tree is arranged so that all descendants 28 * of a node n have keys whose bits all agree up to position rn_b - 1. 29 * (We say the index of n is rn_b.) 30 * 31 * There is at least one descendant which has a one bit at position rn_b, 32 * and at least one with a zero there. 33 * 34 * A route is determined by a pair of key and mask. We require that the 35 * bit-wise logical and of the key and mask to be the key. 36 * We define the index of a route to associated with the mask to be 37 * the first bit number in the mask where 0 occurs (with bit number 0 38 * representing the highest order bit). 39 * 40 * We say a mask is normal if every bit is 0, past the index of the mask. 41 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b, 42 * and m is a normal mask, then the route applies to every descendant of n. 43 * If the index(m) < rn_b, this implies the trailing last few bits of k 44 * before bit b are all 0, (and hence consequently true of every descendant 45 * of n), so the route applies to all descendants of the node as well. 46 * 47 * The present version of the code makes no use of normal routes, 48 * but similar logic shows that a non-normal mask m such that 49 * index(m) <= index(n) could potentially apply to many children of n. 50 * Thus, for each non-host route, we attach its mask to a list at an internal 51 * node as high in the tree as we can go. 52 */ 53 54 struct radix_node * 55 rn_search(v, head) 56 struct radix_node *head; 57 register caddr_t v; 58 { 59 register struct radix_node *x; 60 61 for (x = head; x->rn_b >= 0;) { 62 if (x->rn_bmask & v[x->rn_off]) 63 x = x->rn_r; 64 else 65 x = x->rn_l; 66 } 67 return x; 68 }; 69 70 struct radix_node * 71 rn_search_m(v, head, m) 72 struct radix_node *head; 73 register caddr_t v, m; 74 { 75 register struct radix_node *x; 76 77 for (x = head; x->rn_b >= 0;) { 78 if ((x->rn_bmask & m[x->rn_off]) && 79 (x->rn_bmask & v[x->rn_off])) 80 x = x->rn_r; 81 else 82 x = x->rn_l; 83 } 84 return x; 85 }; 86 87 rn_refines(m, n) 88 register caddr_t m, n; 89 { 90 register caddr_t lim, lim2 = lim = n + *(u_char *)n; 91 int longer = (*(u_char *)n++) - (int)(*(u_char *)m++); 92 int masks_are_equal = 1; 93 94 if (longer > 0) 95 lim -= longer; 96 while (n < lim) { 97 if (*n & ~(*m)) 98 return 0; 99 if (*n++ != *m++) 100 masks_are_equal = 0; 101 102 } 103 while (n < lim2) 104 if (*n++) 105 return 0; 106 if (masks_are_equal && (longer < 0)) 107 for (lim2 = m - longer; m < lim2; ) 108 if (*m++) 109 return 1; 110 return (!masks_are_equal); 111 } 112 113 114 #define MAXKEYLEN 52 115 static int gotOddMasks; 116 static char maskedKey[MAXKEYLEN]; 117 118 struct radix_node * 119 rn_match(v, head) 120 struct radix_node *head; 121 caddr_t v; 122 { 123 register struct radix_node *t = head, *x; 124 register caddr_t cp = v, cp2, cp3; 125 caddr_t cplim, mstart; 126 struct radix_node *saved_t; 127 int off = t->rn_off, vlen = *(u_char *)cp, matched_off; 128 129 /* 130 * Open code rn_search(v, head) to avoid overhead of extra 131 * subroutine call. 132 */ 133 for (; t->rn_b >= 0; ) { 134 if (t->rn_bmask & cp[t->rn_off]) 135 t = t->rn_r; 136 else 137 t = t->rn_l; 138 } 139 /* 140 * See if we match exactly as a host destination 141 */ 142 cp += off; cp2 = t->rn_key + off; cplim = v + vlen; 143 for (; cp < cplim; cp++, cp2++) 144 if (*cp != *cp2) 145 goto on1; 146 /* 147 * This extra grot is in case we are explicitly asked 148 * to look up the default. Ugh! 149 */ 150 if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey) 151 t = t->rn_dupedkey; 152 return t; 153 on1: 154 matched_off = cp - v; 155 saved_t = t; 156 do { 157 if (t->rn_mask) { 158 /* 159 * Even if we don't match exactly as a hosts; 160 * we may match if the leaf we wound up at is 161 * a route to a net. 162 */ 163 cp3 = matched_off + t->rn_mask; 164 cp2 = matched_off + t->rn_key; 165 for (; cp < cplim; cp++) 166 if ((*cp2++ ^ *cp) & *cp3++) 167 break; 168 if (cp == cplim) 169 return t; 170 cp = matched_off + v; 171 } 172 } while (t = t->rn_dupedkey); 173 t = saved_t; 174 /* start searching up the tree */ 175 do { 176 register struct radix_mask *m; 177 t = t->rn_p; 178 if (m = t->rn_mklist) { 179 /* 180 * After doing measurements here, it may 181 * turn out to be faster to open code 182 * rn_search_m here instead of always 183 * copying and masking. 184 */ 185 off = min(t->rn_off, matched_off); 186 mstart = maskedKey + off; 187 do { 188 cp2 = mstart; 189 cp3 = m->rm_mask + off; 190 for (cp = v + off; cp < cplim;) 191 *cp2++ = *cp++ & *cp3++; 192 x = rn_search(maskedKey, t); 193 while (x && x->rn_mask != m->rm_mask) 194 x = x->rn_dupedkey; 195 if (x && 196 (Bcmp(mstart, x->rn_key + off, 197 vlen - off) == 0)) 198 return x; 199 } while (m = m->rm_mklist); 200 } 201 } while (t != head); 202 return 0; 203 }; 204 205 #ifdef RN_DEBUG 206 int rn_nodenum; 207 struct radix_node *rn_clist; 208 int rn_saveinfo; 209 #endif 210 211 struct radix_node * 212 rn_newpair(v, b, nodes) 213 caddr_t v; 214 int b; 215 struct radix_node nodes[2]; 216 { 217 register struct radix_node *tt = nodes, *t = tt + 1; 218 t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7); 219 t->rn_l = tt; t->rn_off = b >> 3; 220 tt->rn_b = -1; tt->rn_key = v; tt->rn_p = t; 221 tt->rn_flags = t->rn_flags = RNF_ACTIVE; 222 #ifdef RN_DEBUG 223 tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; 224 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; 225 #endif 226 return t; 227 } 228 229 int rn_debug = 1; 230 struct radix_node * 231 rn_insert(v, head, dupentry, nodes) 232 caddr_t v; 233 struct radix_node *head; 234 int *dupentry; 235 struct radix_node nodes[2]; 236 { 237 int head_off = head->rn_off, vlen = (int)*((u_char *)v); 238 register struct radix_node *t = rn_search(v, head); 239 register caddr_t cp = v + head_off; 240 register int b; 241 struct radix_node *tt; 242 /* 243 *find first bit at which v and t->rn_key differ 244 */ 245 { 246 register caddr_t cp2 = t->rn_key + head_off; 247 register int cmp_res; 248 caddr_t cplim = v + vlen; 249 250 while (cp < cplim) 251 if (*cp2++ != *cp++) 252 goto on1; 253 *dupentry = 1; 254 return t; 255 on1: 256 *dupentry = 0; 257 cmp_res = (cp[-1] ^ cp2[-1]) & 0xff; 258 for (b = (cp - v) << 3; cmp_res; b--) 259 cmp_res >>= 1; 260 } 261 { 262 register struct radix_node *p, *x = head; 263 cp = v; 264 do { 265 p = x; 266 if (cp[x->rn_off] & x->rn_bmask) 267 x = x->rn_r; 268 else x = x->rn_l; 269 } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */ 270 #ifdef RN_DEBUG 271 if (rn_debug) 272 printf("Going In:\n"), traverse(p); 273 #endif 274 t = rn_newpair(v, b, nodes); tt = t->rn_l; 275 if ((cp[p->rn_off] & p->rn_bmask) == 0) 276 p->rn_l = t; 277 else 278 p->rn_r = t; 279 x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */ 280 if ((cp[t->rn_off] & t->rn_bmask) == 0) { 281 t->rn_r = x; 282 } else { 283 t->rn_r = tt; t->rn_l = x; 284 } 285 #ifdef RN_DEBUG 286 if (rn_debug) 287 printf("Coming out:\n"), traverse(p); 288 #endif 289 } 290 return (tt); 291 } 292 293 struct radix_node * 294 rn_addmask(netmask, search, skip) 295 caddr_t netmask; 296 int search, skip; 297 { 298 register struct radix_node *x; 299 register caddr_t cp, cplim; 300 register int b, mlen, j; 301 int maskduplicated; 302 303 mlen = *(u_char *)netmask; 304 if (search) { 305 x = rn_search(netmask, rn_maskhead); 306 mlen = *(u_char *)netmask; 307 if (Bcmp(netmask, x->rn_key, mlen) == 0) 308 return (x); 309 } 310 R_Malloc(x, struct radix_node *, MAXKEYLEN + 2 * sizeof (*x)); 311 if (x == 0) 312 return (0); 313 Bzero(x, MAXKEYLEN + 2 * sizeof (*x)); 314 cp = (caddr_t)(x + 2); 315 Bcopy(netmask, cp, mlen); 316 netmask = cp; 317 x = rn_insert(netmask, rn_maskhead, &maskduplicated, x); 318 /* 319 * Calculate index of mask. 320 */ 321 cplim = netmask + mlen; 322 for (cp = netmask + skip; cp < cplim; cp++) 323 if (*(u_char *)cp != 0xff) 324 break; 325 b = (cp - netmask) << 3; 326 if (cp != cplim) { 327 if (*cp != 0) { 328 gotOddMasks = 1; 329 for (j = 0x80; j; b++, j >>= 1) 330 if ((j & *cp) == 0) 331 break; 332 } 333 } 334 x->rn_b = -1 - b; 335 return (x); 336 } 337 338 struct radix_node * 339 rn_addroute(v, netmask, head, treenodes) 340 caddr_t v, netmask; 341 struct radix_node *head; 342 struct radix_node treenodes[2]; 343 { 344 register int j; 345 register caddr_t cp; 346 register struct radix_node *t, *x, *tt; 347 short b = 0, b_leaf; 348 int vlen = *(u_char *)v, mlen, keyduplicated; 349 caddr_t cplim; unsigned char *maskp; 350 struct radix_mask *m, **mp; 351 struct radix_node *saved_tt; 352 353 /* 354 * In dealing with non-contiguous masks, there may be 355 * many different routes which have the same mask. 356 * We will find it useful to have a unique pointer to 357 * the mask to speed avoiding duplicate references at 358 * nodes and possibly save time in calculating indices. 359 */ 360 if (netmask) { 361 x = rn_search(netmask, rn_maskhead); 362 mlen = *(u_char *)netmask; 363 if (Bcmp(netmask, x->rn_key, mlen) != 0) { 364 x = rn_addmask(netmask, 0, head->rn_off); 365 if (x == 0) 366 return (0); 367 } 368 netmask = x->rn_key; 369 b = -1 - x->rn_b; 370 } 371 /* 372 * Deal with duplicated keys: attach node to previous instance 373 */ 374 saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes); 375 if (keyduplicated) { 376 do { 377 if (tt->rn_mask == netmask) 378 return (0); 379 t = tt; 380 if (netmask == 0 || 381 (tt->rn_mask && rn_refines(netmask, tt->rn_mask))) 382 break; 383 } while (tt = tt->rn_dupedkey); 384 /* 385 * If the mask is not duplicated, we wouldn't 386 * find it among possible duplicate key entries 387 * anyway, so the above test doesn't hurt. 388 * 389 * We sort the masks for a duplicated key the same way as 390 * in a masklist -- most specific to least specific. 391 * This may require the unfortunate nuisance of relocating 392 * the head of the list. 393 */ 394 if (tt && t == saved_tt) { 395 struct radix_node *xx = x; 396 /* link in at head of list */ 397 (tt = treenodes)->rn_dupedkey = t; 398 tt->rn_flags = t->rn_flags; 399 tt->rn_p = x = t->rn_p; 400 if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt; 401 saved_tt = tt; x = xx; 402 } else { 403 (tt = treenodes)->rn_dupedkey = t->rn_dupedkey; 404 t->rn_dupedkey = tt; 405 } 406 #ifdef RN_DEBUG 407 t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; 408 tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; 409 #endif 410 t = saved_tt; 411 tt->rn_key = (caddr_t) v; 412 tt->rn_b = -1; 413 tt->rn_flags = t->rn_flags & ~RNF_ROOT; 414 } 415 /* 416 * Put mask in tree. 417 */ 418 if (netmask) { 419 tt->rn_mask = netmask; 420 tt->rn_b = x->rn_b; 421 } 422 t = saved_tt->rn_p; 423 b_leaf = -1 - t->rn_b; 424 if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r; 425 /* Promote general routes from below */ 426 if (x->rn_b < 0) { 427 if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) { 428 MKGet(m); 429 if (m) { 430 Bzero(m, sizeof *m); 431 m->rm_b = x->rn_b; 432 m->rm_mask = x->rn_mask; 433 x->rn_mklist = t->rn_mklist = m; 434 } 435 } 436 } else if (x->rn_mklist) { 437 /* 438 * Skip over masks whose index is > that of new node 439 */ 440 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) 441 if (m->rm_b >= b_leaf) 442 break; 443 t->rn_mklist = m; *mp = 0; 444 } 445 /* Add new route to highest possible ancestor's list */ 446 if ((netmask == 0) || (b > t->rn_b )) 447 return tt; /* can't lift at all */ 448 b_leaf = tt->rn_b; 449 do { 450 x = t; 451 t = t->rn_p; 452 } while (b <= t->rn_b && x != head); 453 /* 454 * Search through routes associated with node to 455 * insert new route according to index. 456 * For nodes of equal index, place more specific 457 * masks first. 458 */ 459 cplim = netmask + mlen; 460 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) { 461 if (m->rm_b < b_leaf) 462 continue; 463 if (m->rm_b > b_leaf) 464 break; 465 if (m->rm_mask == netmask) { 466 m->rm_refs++; 467 tt->rn_mklist = m; 468 return tt; 469 } 470 if (rn_refines(netmask, m->rm_mask)) 471 break; 472 } 473 MKGet(m); 474 if (m == 0) { 475 printf("Mask for route not entered\n"); 476 return (tt); 477 } 478 Bzero(m, sizeof *m); 479 m->rm_b = b_leaf; 480 m->rm_mask = netmask; 481 m->rm_mklist = *mp; 482 *mp = m; 483 tt->rn_mklist = m; 484 return tt; 485 } 486 487 struct radix_node * 488 rn_delete(v, netmask, head) 489 caddr_t v, netmask; 490 struct radix_node *head; 491 { 492 register struct radix_node *t, *p, *x = head; 493 register struct radix_node *tt = rn_search(v, x); 494 int b, head_off = x->rn_off, vlen = * (u_char *) v; 495 struct radix_mask *m, *saved_m, **mp; 496 struct radix_node *dupedkey, *saved_tt = tt; 497 498 if (tt == 0 || 499 Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off)) 500 return (0); 501 /* 502 * Delete our route from mask lists. 503 */ 504 if (dupedkey = tt->rn_dupedkey) { 505 if (netmask) 506 netmask = rn_search(netmask, rn_maskhead)->rn_key; 507 while (tt->rn_mask != netmask) 508 if ((tt = tt->rn_dupedkey) == 0) 509 return (0); 510 } 511 if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0) 512 goto on1; 513 if (m->rm_mask != tt->rn_mask) { 514 printf("rn_delete: inconsistent annotation\n"); 515 goto on1; 516 } 517 if (--m->rm_refs >= 0) 518 goto on1; 519 b = -1 - tt->rn_b; 520 t = saved_tt->rn_p; 521 if (b > t->rn_b) 522 goto on1; /* Wasn't lifted at all */ 523 do { 524 x = t; 525 t = t->rn_p; 526 } while (b <= t->rn_b && x != head); 527 for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) 528 if (m == saved_m) { 529 *mp = m->rm_mklist; 530 MKFree(m); 531 break; 532 } 533 if (m == 0) 534 printf("rn_delete: couldn't find our annotation\n"); 535 on1: 536 /* 537 * Eliminate us from tree 538 */ 539 if (tt->rn_flags & RNF_ROOT) 540 return (0); 541 #ifdef RN_DEBUG 542 /* Get us out of the creation list */ 543 for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {} 544 if (t) t->rn_ybro = tt->rn_ybro; 545 #endif RN_DEBUG 546 t = tt->rn_p; 547 if (dupedkey) { 548 if (tt == saved_tt) { 549 x = dupedkey; x->rn_p = t; 550 if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x; 551 } else { 552 for (x = p = saved_tt; p && p->rn_dupedkey != tt;) 553 p = p->rn_dupedkey; 554 if (p) p->rn_dupedkey = tt->rn_dupedkey; 555 else printf("rn_delete: couldn't find us\n"); 556 } 557 t = tt + 1; 558 if (t->rn_flags & RNF_ACTIVE) { 559 #ifndef RN_DEBUG 560 *++x = *t; p = t->rn_p; 561 #else 562 b = t->rn_info; *++x = *t; t->rn_info = b; p = t->rn_p; 563 #endif 564 if (p->rn_l == t) p->rn_l = x; else p->rn_r = x; 565 x->rn_l->rn_p = x; x->rn_r->rn_p = x; 566 } 567 goto out; 568 } 569 if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l; 570 p = t->rn_p; 571 if (p->rn_r == t) p->rn_r = x; else p->rn_l = x; 572 x->rn_p = p; 573 /* 574 * Demote routes attached to us. 575 */ 576 if (t->rn_mklist) { 577 if (x->rn_b >= 0) { 578 for (mp = &x->rn_mklist; m = *mp;) 579 mp = &m->rm_mklist; 580 *mp = t->rn_mklist; 581 } else { 582 for (m = t->rn_mklist; m;) { 583 struct radix_mask *mm = m->rm_mklist; 584 if (m == x->rn_mklist && (--(m->rm_refs) < 0)) { 585 x->rn_mklist = 0; 586 MKFree(m); 587 } else 588 printf("%s %x at %x\n", 589 "rn_delete: Orphaned Mask", m, x); 590 m = mm; 591 } 592 } 593 } 594 /* 595 * We may be holding an active internal node in the tree. 596 */ 597 x = tt + 1; 598 if (t != x) { 599 #ifndef RN_DEBUG 600 *t = *x; 601 #else 602 b = t->rn_info; *t = *x; t->rn_info = b; 603 #endif 604 t->rn_l->rn_p = t; t->rn_r->rn_p = t; 605 p = x->rn_p; 606 if (p->rn_l == x) p->rn_l = t; else p->rn_r = t; 607 } 608 out: 609 tt->rn_flags &= ~RNF_ACTIVE; 610 tt[1].rn_flags &= ~RNF_ACTIVE; 611 return (tt); 612 } 613 614 rn_walk(rn, f, w) 615 register struct radix_node *rn; 616 register int (*f)(); 617 caddr_t w; 618 { 619 int error; 620 struct radix_node *orn; 621 for (;;) { 622 while (rn->rn_b >= 0) 623 rn = rn->rn_l; /* First time through node, go left */ 624 for (orn = rn; rn; rn = rn->rn_dupedkey) /* Process Leaves */ 625 if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w))) 626 return (error); 627 for (rn = orn; rn->rn_p->rn_r == rn; ) { /* If at right child */ 628 rn = rn->rn_p; /* go back up */ 629 if (rn->rn_flags & RNF_ROOT) 630 return 0; 631 } 632 rn = rn->rn_p->rn_r; /* otherwhise, go right*/ 633 } 634 } 635 char rn_zeros[MAXKEYLEN], rn_ones[MAXKEYLEN]; 636 637 rn_inithead(head, off) 638 struct radix_node_head **head; 639 int off; 640 { 641 register struct radix_node_head *rnh; 642 register struct radix_node *t, *tt, *ttt; 643 if (*head) 644 return (1); 645 R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh)); 646 if (rnh == 0) 647 return (0); 648 Bzero(rnh, sizeof (*rnh)); 649 *head = rnh; 650 t = rn_newpair(rn_zeros, off, rnh->rnh_nodes); 651 ttt = rnh->rnh_nodes + 2; 652 t->rn_r = ttt; 653 t->rn_p = t; 654 tt = t->rn_l; 655 tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE; 656 tt->rn_b = -1 - off; 657 *ttt = *tt; 658 ttt->rn_key = rn_ones; 659 rnh->rnh_add = rn_addroute; 660 rnh->rnh_delete = rn_delete; 661 rnh->rnh_match = rn_match; 662 rnh->rnh_walk = rn_walk; 663 rnh->rnh_treetop = t; 664 if (mask_rnhead == 0) { 665 caddr_t cp = rn_ones, cplim = rn_ones + MAXKEYLEN; 666 while (cp < cplim) 667 *cp++ = -1; 668 if (rn_inithead(&mask_rnhead, 0) == 0) { 669 Free(rnh); 670 *head = 0; 671 return (0); 672 } 673 } 674 return (1); 675 } 676