xref: /original-bsd/sys/net/radix.c (revision d272e02a)
1 /*
2  * Copyright (c) 1988, 1989  Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)radix.c	7.9 (Berkeley) 02/04/91
8  */
9 
10 /*
11  * Routines to build and maintain radix trees for routing lookups.
12  */
13 #ifndef RNF_NORMAL
14 #include "param.h"
15 #include "radix.h"
16 #include "malloc.h"
17 #define	M_DONTWAIT M_NOWAIT
18 #endif
19 struct radix_node_head *mask_rnhead;
20 #define rn_maskhead mask_rnhead->rnh_treetop
21 struct radix_mask *rn_mkfreelist;
22 struct radix_node_head *radix_node_head;
23 #undef Bcmp
24 #define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l))
25 /*
26  * The data structure for the keys is a radix tree with one way
27  * branching removed.  The index rn_b at an internal node n represents a bit
28  * position to be tested.  The tree is arranged so that all descendants
29  * of a node n have keys whose bits all agree up to position rn_b - 1.
30  * (We say the index of n is rn_b.)
31  *
32  * There is at least one descendant which has a one bit at position rn_b,
33  * and at least one with a zero there.
34  *
35  * A route is determined by a pair of key and mask.  We require that the
36  * bit-wise logical and of the key and mask to be the key.
37  * We define the index of a route to associated with the mask to be
38  * the first bit number in the mask where 0 occurs (with bit number 0
39  * representing the highest order bit).
40  *
41  * We say a mask is normal if every bit is 0, past the index of the mask.
42  * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
43  * and m is a normal mask, then the route applies to every descendant of n.
44  * If the index(m) < rn_b, this implies the trailing last few bits of k
45  * before bit b are all 0, (and hence consequently true of every descendant
46  * of n), so the route applies to all descendants of the node as well.
47  *
48  * The present version of the code makes no use of normal routes,
49  * but similar logic shows that a non-normal mask m such that
50  * index(m) <= index(n) could potentially apply to many children of n.
51  * Thus, for each non-host route, we attach its mask to a list at an internal
52  * node as high in the tree as we can go.
53  */
54 
55 struct radix_node *
56 rn_search(v, head)
57 	struct radix_node *head;
58 	register caddr_t v;
59 {
60 	register struct radix_node *x;
61 
62 	for (x = head; x->rn_b >= 0;) {
63 		if (x->rn_bmask & v[x->rn_off])
64 			x = x->rn_r;
65 		else
66 			x = x->rn_l;
67 	}
68 	return x;
69 };
70 
71 struct radix_node *
72 rn_search_m(v, head, m)
73 	struct radix_node *head;
74 	register caddr_t v, m;
75 {
76 	register struct radix_node *x;
77 
78 	for (x = head; x->rn_b >= 0;) {
79 		if ((x->rn_bmask & m[x->rn_off]) &&
80 		    (x->rn_bmask & v[x->rn_off]))
81 			x = x->rn_r;
82 		else
83 			x = x->rn_l;
84 	}
85 	return x;
86 };
87 
88 
89 static int gotOddMasks;
90 static char maskedKey[MAXKEYLEN];
91 
92 struct radix_node *
93 rn_match(v, head)
94 	struct radix_node *head;
95 	caddr_t v;
96 {
97 	register struct radix_node *t = head, *x;
98 	register caddr_t cp = v, cp2, cp3;
99 	caddr_t cplim, mstart;
100 	struct radix_node *saved_t;
101 	int off = t->rn_off, vlen = *(u_char *)cp, matched_off;
102 
103 	/*
104 	 * Open code rn_search(v, head) to avoid overhead of extra
105 	 * subroutine call.
106 	 */
107 	for (; t->rn_b >= 0; ) {
108 		if (t->rn_bmask & cp[t->rn_off])
109 			t = t->rn_r;
110 		else
111 			t = t->rn_l;
112 	}
113 	/*
114 	 * See if we match exactly as a host destination
115 	 */
116 	cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
117 	for (; cp < cplim; cp++, cp2++)
118 		if (*cp != *cp2)
119 			goto on1;
120 	/*
121 	 * This extra grot is in case we are explicitly asked
122 	 * to look up the default.  Ugh!
123 	 */
124 	if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey)
125 		t = t->rn_dupedkey;
126 	return t;
127 on1:
128 	matched_off = cp - v;
129 	saved_t = t;
130 	do {
131 	    if (t->rn_mask) {
132 		/*
133 		 * Even if we don't match exactly as a hosts;
134 		 * we may match if the leaf we wound up at is
135 		 * a route to a net.
136 		 */
137 		cp3 = matched_off + t->rn_mask;
138 		cp2 = matched_off + t->rn_key;
139 		for (; cp < cplim; cp++)
140 			if ((*cp2++ ^ *cp) & *cp3++)
141 				break;
142 		if (cp == cplim)
143 			return t;
144 		cp = matched_off + v;
145 	    }
146 	} while (t = t->rn_dupedkey);
147 	t = saved_t;
148 	/* start searching up the tree */
149 	do {
150 		register struct radix_mask *m;
151 		t = t->rn_p;
152 		if (m = t->rn_mklist) {
153 			/*
154 			 * After doing measurements here, it may
155 			 * turn out to be faster to open code
156 			 * rn_search_m here instead of always
157 			 * copying and masking.
158 			 */
159 			off = min(t->rn_off, matched_off);
160 			mstart = maskedKey + off;
161 			do {
162 				cp2 = mstart;
163 				cp3 = m->rm_mask + off;
164 				for (cp = v + off; cp < cplim;)
165 					*cp2++ =  *cp++ & *cp3++;
166 				x = rn_search(maskedKey, t);
167 				while (x && x->rn_mask != m->rm_mask)
168 					x = x->rn_dupedkey;
169 				if (x &&
170 				    (Bcmp(mstart, x->rn_key + off,
171 					vlen - off) == 0))
172 					    return x;
173 			} while (m = m->rm_mklist);
174 		}
175 	} while (t != head);
176 	return 0;
177 };
178 
179 #ifdef RN_DEBUG
180 int	rn_nodenum;
181 struct	radix_node *rn_clist;
182 int	rn_saveinfo;
183 #endif
184 
185 struct radix_node *
186 rn_newpair(v, b, nodes)
187 	caddr_t v;
188 	struct radix_node nodes[2];
189 {
190 	register struct radix_node *tt = nodes, *t = tt + 1;
191 	t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7);
192 	t->rn_l = tt; t->rn_off = b >> 3;
193 	tt->rn_b = -1; tt->rn_key = v; tt->rn_p = t;
194 	tt->rn_flags = t->rn_flags = RNF_ACTIVE;
195 #ifdef RN_DEBUG
196 	tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
197 	tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
198 #endif
199 	return t;
200 }
201 
202 int rn_debug =  1;
203 struct radix_node *
204 rn_insert(v, head, dupentry, nodes)
205 	caddr_t v;
206 	struct radix_node *head;
207 	int *dupentry;
208 	struct radix_node nodes[2];
209 {
210 	int head_off = head->rn_off, vlen = (int)*((u_char *)v);
211 	register struct radix_node *t = rn_search(v, head);
212 	register caddr_t cp = v + head_off;
213 	register int b;
214 	struct radix_node *tt;
215     	/*
216 	 *find first bit at which v and t->rn_key differ
217 	 */
218     {
219 	register caddr_t cp2 = t->rn_key + head_off;
220 	register int cmp_res;
221 	caddr_t cplim = v + vlen;
222 
223 	while (cp < cplim)
224 		if (*cp2++ != *cp++)
225 			goto on1;
226 	*dupentry = 1;
227 	return t;
228 on1:
229 	*dupentry = 0;
230 	cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
231 	for (b = (cp - v) << 3; cmp_res; b--)
232 		cmp_res >>= 1;
233     }
234     {
235 	register struct radix_node *p, *x = head;
236 	cp = v;
237 	do {
238 		p = x;
239 		if (cp[x->rn_off] & x->rn_bmask)
240 			x = x->rn_r;
241 		else x = x->rn_l;
242 	} while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */
243 #ifdef RN_DEBUG
244 	if (rn_debug)
245 		printf("Going In:\n"), traverse(p);
246 #endif
247 	t = rn_newpair(v, b, nodes); tt = t->rn_l;
248 	if ((cp[p->rn_off] & p->rn_bmask) == 0)
249 		p->rn_l = t;
250 	else
251 		p->rn_r = t;
252 	x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */
253 	if ((cp[t->rn_off] & t->rn_bmask) == 0) {
254 		t->rn_r = x;
255 	} else {
256 		t->rn_r = tt; t->rn_l = x;
257 	}
258 #ifdef RN_DEBUG
259 	if (rn_debug)
260 		printf("Coming out:\n"), traverse(p);
261 #endif
262     }
263 	return (tt);
264 }
265 
266 struct radix_node *
267 rn_addmask(netmask, search, skip)
268 caddr_t netmask;
269 {
270 	register struct radix_node *x;
271 	register caddr_t cp, cplim;
272 	register int b, mlen, j;
273 	int maskduplicated;
274 
275 	mlen = *(u_char *)netmask;
276 	if (search) {
277 		x = rn_search(netmask, rn_maskhead);
278 		mlen = *(u_char *)netmask;
279 		if (Bcmp(netmask, x->rn_key, mlen) == 0)
280 			return (x);
281 	}
282 	R_Malloc(x, struct radix_node *, MAXKEYLEN + 2 * sizeof (*x));
283 	if (x == 0)
284 		return (0);
285 	Bzero(x, MAXKEYLEN + 2 * sizeof (*x));
286 	cp = (caddr_t)(x + 2);
287 	Bcopy(netmask, cp, mlen);
288 	netmask = cp;
289 	x = rn_insert(netmask, rn_maskhead, &maskduplicated, x);
290 	/*
291 	 * Calculate index of mask.
292 	 */
293 	cplim = netmask + mlen;
294 	for (cp = netmask + skip; cp < cplim; cp++)
295 		if (*(u_char *)cp != 0xff)
296 			break;
297 	b = (cp - netmask) << 3;
298 	if (cp != cplim) {
299 		if (*cp != 0) {
300 			gotOddMasks = 1;
301 			for (j = 0x80; j; b++, j >>= 1)
302 				if ((j & *cp) == 0)
303 					break;
304 		}
305 	}
306 	x->rn_b = -1 - b;
307 	return (x);
308 }
309 
310 struct radix_node *
311 rn_addroute(v, netmask, head, treenodes)
312 struct radix_node *head;
313 	caddr_t netmask, v;
314 	struct radix_node treenodes[2];
315 {
316 	register int j;
317 	register caddr_t cp;
318 	register struct radix_node *t, *x, *tt;
319 	short b = 0, b_leaf;
320 	int vlen = *(u_char *)v, mlen, keyduplicated;
321 	caddr_t cplim; unsigned char *maskp;
322 	struct radix_mask *m, **mp;
323 	struct radix_node *saved_tt;
324 
325 	/*
326 	 * In dealing with non-contiguous masks, there may be
327 	 * many different routes which have the same mask.
328 	 * We will find it useful to have a unique pointer to
329 	 * the mask to speed avoiding duplicate references at
330 	 * nodes and possibly save time in calculating indices.
331 	 */
332 	if (netmask)  {
333 		x = rn_search(netmask, rn_maskhead);
334 		mlen = *(u_char *)netmask;
335 		if (Bcmp(netmask, x->rn_key, mlen) != 0) {
336 			x = rn_addmask(netmask, 0, head->rn_off);
337 			if (x == 0)
338 				return (0);
339 		}
340 		netmask = x->rn_key;
341 		b = -1 - x->rn_b;
342 	}
343 	/*
344 	 * Deal with duplicated keys: attach node to previous instance
345 	 */
346 	saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
347 	if (keyduplicated) {
348 		do {
349 			if (tt->rn_mask == netmask)
350 				return (0);
351 			t = tt;
352 		} while (tt = tt->rn_dupedkey);
353 		/*
354 		 * If the mask is not duplicated, we wouldn't
355 		 * find it among possible duplicate key entries
356 		 * anyway, so the above test doesn't hurt.
357 		 *
358 		 * XXX: we really ought to sort the masks
359 		 * for a duplicated key the same way as in a masklist.
360 		 * It is an unfortunate pain having to relocate
361 		 * the head of the list.
362 		 */
363 		t->rn_dupedkey = tt = treenodes;
364 #ifdef RN_DEBUG
365 		t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
366 		tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
367 #endif
368 		t = saved_tt;
369 		tt->rn_key = (caddr_t) v;
370 		tt->rn_b = -1;
371 		tt->rn_flags = t->rn_flags & ~RNF_ROOT;
372 	}
373 	/*
374 	 * Put mask in tree.
375 	 */
376 	if (netmask) {
377 		tt->rn_mask = netmask;
378 		tt->rn_b = x->rn_b;
379 	}
380 	t = saved_tt->rn_p;
381 	b_leaf = -1 - t->rn_b;
382 	if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r;
383 	/* Promote general routes from below */
384 	if (x->rn_b < 0) {
385 		if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) {
386 			MKGet(m);
387 			if (m) {
388 				Bzero(m, sizeof *m);
389 				m->rm_b = x->rn_b;
390 				m->rm_mask = x->rn_mask;
391 				x->rn_mklist = t->rn_mklist = m;
392 			}
393 		}
394 	} else if (x->rn_mklist) {
395 		/*
396 		 * Skip over masks whose index is > that of new node
397 		 */
398 		for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
399 			if (m->rm_b >= b_leaf)
400 				break;
401 		t->rn_mklist = m; *mp = 0;
402 	}
403 	/* Add new route to highest possible ancestor's list */
404 	if ((netmask == 0) || (b > t->rn_b ))
405 		return tt; /* can't lift at all */
406 	b_leaf = tt->rn_b;
407 	do {
408 		x = t;
409 		t = t->rn_p;
410 	} while (b <= t->rn_b && x != head);
411 	/*
412 	 * Search through routes associated with node to
413 	 * insert new route according to index.
414 	 * For nodes of equal index, place more specific
415 	 * masks first.
416 	 */
417 	cplim = netmask + mlen;
418 	for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist) {
419 		if (m->rm_b < b_leaf)
420 			continue;
421 		if (m->rm_b > b_leaf)
422 			break;
423 		if (m->rm_mask == netmask) {
424 			m->rm_refs++;
425 			tt->rn_mklist = m;
426 			return tt;
427 		}
428 		maskp = (u_char *)m->rm_mask;
429 		for (cp = netmask; cp < cplim; cp++)
430 			if (*(u_char *)cp > *maskp++)
431 				goto on2;
432 	}
433 on2:
434 	MKGet(m);
435 	if (m == 0) {
436 		printf("Mask for route not entered\n");
437 		return (tt);
438 	}
439 	Bzero(m, sizeof *m);
440 	m->rm_b = b_leaf;
441 	m->rm_mask = netmask;
442 	m->rm_mklist = *mp;
443 	*mp = m;
444 	tt->rn_mklist = m;
445 	return tt;
446 }
447 
448 struct radix_node *
449 rn_delete(v, netmask, head)
450 	caddr_t v, netmask;
451 	struct radix_node *head;
452 {
453 	register struct radix_node *t, *p, *x = head;
454 	register struct radix_node *tt = rn_search(v, x);
455 	int b, head_off = x->rn_off, vlen =  * (u_char *) v;
456 	struct radix_mask *m, *saved_m, **mp;
457 	struct radix_node *dupedkey, *saved_tt = tt;
458 
459 	if (tt == 0 ||
460 	    Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
461 		return (0);
462 	/*
463 	 * Delete our route from mask lists.
464 	 */
465 	if (dupedkey = tt->rn_dupedkey) {
466 		if (netmask)
467 			netmask = rn_search(netmask, rn_maskhead)->rn_key;
468 		while (tt->rn_mask != netmask)
469 			if ((tt = tt->rn_dupedkey) == 0)
470 				return (0);
471 	}
472 	if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0)
473 		goto on1;
474 	if (m->rm_mask != tt->rn_mask) {
475 		printf("rn_delete: inconsistent annotation\n");
476 		goto on1;
477 	}
478 	if (--m->rm_refs >= 0)
479 		goto on1;
480 	b = -1 - tt->rn_b;
481 	t = saved_tt->rn_p;
482 	if (b > t->rn_b)
483 		goto on1; /* Wasn't lifted at all */
484 	do {
485 		x = t;
486 		t = t->rn_p;
487 	} while (b <= t->rn_b && x != head);
488 	for (mp = &x->rn_mklist; m = *mp; mp = &m->rm_mklist)
489 		if (m == saved_m) {
490 			*mp = m->rm_mklist;
491 			MKFree(m);
492 			break;
493 		}
494 	if (m == 0)
495 		printf("rn_delete: couldn't find our annotation\n");
496 on1:
497 	/*
498 	 * Eliminate us from tree
499 	 */
500 	if (tt->rn_flags & RNF_ROOT)
501 		return (0);
502 #ifdef RN_DEBUG
503 	/* Get us out of the creation list */
504 	for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
505 	if (t) t->rn_ybro = tt->rn_ybro;
506 #endif RN_DEBUG
507 	t = tt->rn_p;
508 	if (dupedkey) {
509 		if (tt == saved_tt) {
510 			x = dupedkey; x->rn_p = t;
511 			if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x;
512 #ifndef RN_DEBUG
513 			x++; t = tt + 1; *x = *t; p = t->rn_p;
514 #else
515 			x++; b = x->rn_info; t = tt + 1; *x = *t; p = t->rn_p;
516 			x->rn_info = b;
517 #endif
518 			if (p->rn_l == t) p->rn_l = x; else p->rn_r = x;
519 			x->rn_l->rn_p = x; x->rn_r->rn_p = x;
520 		} else {
521 			for (p = saved_tt; p && p->rn_dupedkey != tt;)
522 				p = p->rn_dupedkey;
523 			if (p) p->rn_dupedkey = tt->rn_dupedkey;
524 			else printf("rn_delete: couldn't find us\n");
525 		}
526 		goto out;
527 	}
528 	if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l;
529 	p = t->rn_p;
530 	if (p->rn_r == t) p->rn_r = x; else p->rn_l = x;
531 	x->rn_p = p;
532 	/*
533 	 * Demote routes attached to us.
534 	 */
535 	if (t->rn_mklist) {
536 		if (x->rn_b >= 0) {
537 			for (mp = &x->rn_mklist; m = *mp;)
538 				mp = &m->rm_mklist;
539 			*mp = t->rn_mklist;
540 		} else {
541 			for (m = t->rn_mklist; m;) {
542 				struct radix_mask *mm = m->rm_mklist;
543 				if (m == x->rn_mklist && (--(m->rm_refs) < 0)) {
544 					x->rn_mklist = 0;
545 					MKFree(m);
546 				} else
547 					printf("%s %x at %x\n",
548 					    "rn_delete: Orphaned Mask", m, x);
549 				m = mm;
550 			}
551 		}
552 	}
553 	/*
554 	 * We may be holding an active internal node in the tree.
555 	 */
556 	x = tt + 1;
557 	if (t != x) {
558 #ifndef RN_DEBUG
559 		*t = *x;
560 #else
561 		b = t->rn_info; *t = *x; t->rn_info = b;
562 #endif
563 		t->rn_l->rn_p = t; t->rn_r->rn_p = t;
564 		p = x->rn_p;
565 		if (p->rn_l == x) p->rn_l = t; else p->rn_r = t;
566 	}
567 out:
568 	tt->rn_flags &= ~RNF_ACTIVE;
569 	tt[1].rn_flags &= ~RNF_ACTIVE;
570 	return (tt);
571 }
572 char rn_zeros[MAXKEYLEN], rn_ones[MAXKEYLEN];
573 
574 rn_inithead(head, off, af)
575 struct radix_node_head **head;
576 int off;
577 {
578 	register struct radix_node_head *rnh;
579 	register struct radix_node *t, *tt, *ttt;
580 	if (*head)
581 		return (1);
582 	R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh));
583 	if (rnh == 0)
584 		return (0);
585 	Bzero(rnh, sizeof (*rnh));
586 	*head = rnh;
587 	t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
588 	ttt = rnh->rnh_nodes + 2;
589 	t->rn_r = ttt;
590 	t->rn_p = t;
591 	tt = t->rn_l;
592 	tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
593 	tt->rn_b = -1 - off;
594 	*ttt = *tt;
595 	ttt->rn_key = rn_ones;
596 	rnh->rnh_af = af;
597 	rnh->rnh_treetop = t;
598 	if (radix_node_head == 0) {
599 		caddr_t cp = rn_ones, cplim = rn_ones + MAXKEYLEN;
600 		while (cp < cplim)
601 			*cp++ = -1;
602 		if (rn_inithead(&radix_node_head, 0, 0) == 0) {
603 			Free(rnh);
604 			*head = 0;
605 			return (0);
606 		}
607 		mask_rnhead = radix_node_head;
608 	}
609 	rnh->rnh_next = radix_node_head->rnh_next;
610 	if (radix_node_head != rnh)
611 		radix_node_head->rnh_next = rnh;
612 	return (1);
613 }
614