xref: /dragonfly/sys/kern/subr_rman.c (revision 28c7b939)
1 /*
2  * Copyright 1998 Massachusetts Institute of Technology
3  *
4  * Permission to use, copy, modify, and distribute this software and
5  * its documentation for any purpose and without fee is hereby
6  * granted, provided that both the above copyright notice and this
7  * permission notice appear in all copies, that both the above
8  * copyright notice and this permission notice appear in all
9  * supporting documentation, and that the name of M.I.T. not be used
10  * in advertising or publicity pertaining to distribution of the
11  * software without specific, written prior permission.  M.I.T. makes
12  * no representations about the suitability of this software for any
13  * purpose.  It is provided "as is" without express or implied
14  * warranty.
15  *
16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/kern/subr_rman.c,v 1.10.2.1 2001/06/05 08:06:08 imp Exp $
30  * $DragonFly: src/sys/kern/subr_rman.c,v 1.5 2003/11/15 21:05:43 dillon Exp $
31  */
32 
33 /*
34  * The kernel resource manager.  This code is responsible for keeping track
35  * of hardware resources which are apportioned out to various drivers.
36  * It does not actually assign those resources, and it is not expected
37  * that end-device drivers will call into this code directly.  Rather,
38  * the code which implements the buses that those devices are attached to,
39  * and the code which manages CPU resources, will call this code, and the
40  * end-device drivers will make upcalls to that code to actually perform
41  * the allocation.
42  *
43  * There are two sorts of resources managed by this code.  The first is
44  * the more familiar array (RMAN_ARRAY) type; resources in this class
45  * consist of a sequence of individually-allocatable objects which have
46  * been numbered in some well-defined order.  Most of the resources
47  * are of this type, as it is the most familiar.  The second type is
48  * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
49  * resources in which each instance is indistinguishable from every
50  * other instance).  The principal anticipated application of gauges
51  * is in the context of power consumption, where a bus may have a specific
52  * power budget which all attached devices share.  RMAN_GAUGE is not
53  * implemented yet.
54  *
55  * For array resources, we make one simplifying assumption: two clients
56  * sharing the same resource must use the same range of indices.  That
57  * is to say, sharing of overlapping-but-not-identical regions is not
58  * permitted.
59  */
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
64 #include <sys/lock.h>
65 #include <sys/malloc.h>
66 #include <sys/bus.h>		/* XXX debugging */
67 #include <machine/bus.h>
68 #include <sys/rman.h>
69 
70 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
71 
72 struct	rman_head rman_head;
73 static	struct lwkt_token rman_tok; /* mutex to protect rman_head */
74 static	int int_rman_activate_resource(struct rman *rm, struct resource *r,
75 				       struct resource **whohas);
76 static	int int_rman_deactivate_resource(struct resource *r);
77 static	int int_rman_release_resource(struct rman *rm, struct resource *r);
78 
79 #define	CIRCLEQ_TERMCOND(var, head)	(var == (void *)&(head))
80 
81 int
82 rman_init(struct rman *rm)
83 {
84 	static int once;
85 
86 	if (once == 0) {
87 		once = 1;
88 		TAILQ_INIT(&rman_head);
89 		lwkt_inittoken(&rman_tok);
90 	}
91 
92 	if (rm->rm_type == RMAN_UNINIT)
93 		panic("rman_init");
94 	if (rm->rm_type == RMAN_GAUGE)
95 		panic("implement RMAN_GAUGE");
96 
97 	CIRCLEQ_INIT(&rm->rm_list);
98 	rm->rm_slock = malloc(sizeof *rm->rm_slock, M_RMAN, M_NOWAIT);
99 	if (rm->rm_slock == NULL)
100 		return ENOMEM;
101 	lwkt_inittoken(rm->rm_slock);
102 
103 	lwkt_gettoken(&rman_tok);
104 	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
105 	lwkt_reltoken(&rman_tok);
106 	return 0;
107 }
108 
109 /*
110  * NB: this interface is not robust against programming errors which
111  * add multiple copies of the same region.
112  */
113 int
114 rman_manage_region(struct rman *rm, u_long start, u_long end)
115 {
116 	struct resource *r, *s;
117 
118 	r = malloc(sizeof *r, M_RMAN, M_NOWAIT);
119 	if (r == 0)
120 		return ENOMEM;
121 	bzero(r, sizeof *r);
122 	r->r_sharehead = 0;
123 	r->r_start = start;
124 	r->r_end = end;
125 	r->r_flags = 0;
126 	r->r_dev = 0;
127 	r->r_rm = rm;
128 
129 	lwkt_gettoken(rm->rm_slock);
130 	for (s = CIRCLEQ_FIRST(&rm->rm_list);
131 	     !CIRCLEQ_TERMCOND(s, rm->rm_list) && s->r_end < r->r_start;
132 	     s = CIRCLEQ_NEXT(s, r_link))
133 		;
134 
135 	if (CIRCLEQ_TERMCOND(s, rm->rm_list)) {
136 		CIRCLEQ_INSERT_TAIL(&rm->rm_list, r, r_link);
137 	} else {
138 		CIRCLEQ_INSERT_BEFORE(&rm->rm_list, s, r, r_link);
139 	}
140 
141 	lwkt_reltoken(rm->rm_slock);
142 	return 0;
143 }
144 
145 int
146 rman_fini(struct rman *rm)
147 {
148 	struct resource *r;
149 
150 	lwkt_gettoken(rm->rm_slock);
151 	CIRCLEQ_FOREACH(r, &rm->rm_list, r_link) {
152 		if (r->r_flags & RF_ALLOCATED) {
153 			lwkt_reltoken(rm->rm_slock);
154 			return EBUSY;
155 		}
156 	}
157 
158 	/*
159 	 * There really should only be one of these if we are in this
160 	 * state and the code is working properly, but it can't hurt.
161 	 */
162 	while (!CIRCLEQ_EMPTY(&rm->rm_list)) {
163 		r = CIRCLEQ_FIRST(&rm->rm_list);
164 		CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
165 		free(r, M_RMAN);
166 	}
167 	lwkt_reltoken(rm->rm_slock);
168 	lwkt_gettoken(&rman_tok);
169 	TAILQ_REMOVE(&rman_head, rm, rm_link);
170 	lwkt_reltoken(&rman_tok);
171 	free(rm->rm_slock, M_RMAN);
172 
173 	return 0;
174 }
175 
176 struct resource *
177 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
178 		      u_int flags, struct device *dev)
179 {
180 	u_int	want_activate;
181 	struct	resource *r, *s, *rv;
182 	u_long	rstart, rend;
183 
184 	rv = 0;
185 
186 #ifdef RMAN_DEBUG
187 	printf("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
188 	       "%#lx, flags %u, device %s%d\n", rm->rm_descr, start, end,
189 	       count, flags, device_get_name(dev), device_get_unit(dev));
190 #endif /* RMAN_DEBUG */
191 	want_activate = (flags & RF_ACTIVE);
192 	flags &= ~RF_ACTIVE;
193 
194 	lwkt_gettoken(rm->rm_slock);
195 
196 	for (r = CIRCLEQ_FIRST(&rm->rm_list);
197 	     !CIRCLEQ_TERMCOND(r, rm->rm_list) && r->r_end < start;
198 	     r = CIRCLEQ_NEXT(r, r_link))
199 		;
200 
201 	if (CIRCLEQ_TERMCOND(r, rm->rm_list)) {
202 #ifdef RMAN_DEBUG
203 		printf("could not find a region\n");
204 #endif
205 		goto out;
206 	}
207 
208 	/*
209 	 * First try to find an acceptable totally-unshared region.
210 	 */
211 	for (s = r; !CIRCLEQ_TERMCOND(s, rm->rm_list);
212 	     s = CIRCLEQ_NEXT(s, r_link)) {
213 #ifdef RMAN_DEBUG
214 		printf("considering [%#lx, %#lx]\n", s->r_start, s->r_end);
215 #endif /* RMAN_DEBUG */
216 		if (s->r_start > end) {
217 #ifdef RMAN_DEBUG
218 			printf("s->r_start (%#lx) > end (%#lx)\n", s->r_start, end);
219 #endif /* RMAN_DEBUG */
220 			break;
221 		}
222 		if (s->r_flags & RF_ALLOCATED) {
223 #ifdef RMAN_DEBUG
224 			printf("region is allocated\n");
225 #endif /* RMAN_DEBUG */
226 			continue;
227 		}
228 		rstart = max(s->r_start, start);
229 		rstart = (rstart + ((1ul << RF_ALIGNMENT(flags))) - 1) &
230 		    ~((1ul << RF_ALIGNMENT(flags)) - 1);
231 		rend = min(s->r_end, max(start + count, end));
232 #ifdef RMAN_DEBUG
233 		printf("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
234 		       rstart, rend, (rend - rstart + 1), count);
235 #endif /* RMAN_DEBUG */
236 
237 		if ((rend - rstart + 1) >= count) {
238 #ifdef RMAN_DEBUG
239 			printf("candidate region: [%#lx, %#lx], size %#lx\n",
240 			       rend, rstart, (rend - rstart + 1));
241 #endif /* RMAN_DEBUG */
242 			if ((s->r_end - s->r_start + 1) == count) {
243 #ifdef RMAN_DEBUG
244 				printf("candidate region is entire chunk\n");
245 #endif /* RMAN_DEBUG */
246 				rv = s;
247 				rv->r_flags |= RF_ALLOCATED | flags;
248 				rv->r_dev = dev;
249 				goto out;
250 			}
251 
252 			/*
253 			 * If s->r_start < rstart and
254 			 *    s->r_end > rstart + count - 1, then
255 			 * we need to split the region into three pieces
256 			 * (the middle one will get returned to the user).
257 			 * Otherwise, we are allocating at either the
258 			 * beginning or the end of s, so we only need to
259 			 * split it in two.  The first case requires
260 			 * two new allocations; the second requires but one.
261 			 */
262 			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT);
263 			if (rv == 0)
264 				goto out;
265 			bzero(rv, sizeof *rv);
266 			rv->r_start = rstart;
267 			rv->r_end = rstart + count - 1;
268 			rv->r_flags = flags | RF_ALLOCATED;
269 			rv->r_dev = dev;
270 			rv->r_sharehead = 0;
271 			rv->r_rm = rm;
272 
273 			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
274 #ifdef RMAN_DEBUG
275 				printf("splitting region in three parts: "
276 				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
277 				       s->r_start, rv->r_start - 1,
278 				       rv->r_start, rv->r_end,
279 				       rv->r_end + 1, s->r_end);
280 #endif /* RMAN_DEBUG */
281 				/*
282 				 * We are allocating in the middle.
283 				 */
284 				r = malloc(sizeof *r, M_RMAN, M_NOWAIT);
285 				if (r == 0) {
286 					free(rv, M_RMAN);
287 					rv = 0;
288 					goto out;
289 				}
290 				bzero(r, sizeof *r);
291 				r->r_start = rv->r_end + 1;
292 				r->r_end = s->r_end;
293 				r->r_flags = s->r_flags;
294 				r->r_dev = 0;
295 				r->r_sharehead = 0;
296 				r->r_rm = rm;
297 				s->r_end = rv->r_start - 1;
298 				CIRCLEQ_INSERT_AFTER(&rm->rm_list, s, rv,
299 						     r_link);
300 				CIRCLEQ_INSERT_AFTER(&rm->rm_list, rv, r,
301 						     r_link);
302 			} else if (s->r_start == rv->r_start) {
303 #ifdef RMAN_DEBUG
304 				printf("allocating from the beginning\n");
305 #endif /* RMAN_DEBUG */
306 				/*
307 				 * We are allocating at the beginning.
308 				 */
309 				s->r_start = rv->r_end + 1;
310 				CIRCLEQ_INSERT_BEFORE(&rm->rm_list, s, rv,
311 						      r_link);
312 			} else {
313 #ifdef RMAN_DEBUG
314 				printf("allocating at the end\n");
315 #endif /* RMAN_DEBUG */
316 				/*
317 				 * We are allocating at the end.
318 				 */
319 				s->r_end = rv->r_start - 1;
320 				CIRCLEQ_INSERT_AFTER(&rm->rm_list, s, rv,
321 						     r_link);
322 			}
323 			goto out;
324 		}
325 	}
326 
327 	/*
328 	 * Now find an acceptable shared region, if the client's requirements
329 	 * allow sharing.  By our implementation restriction, a candidate
330 	 * region must match exactly by both size and sharing type in order
331 	 * to be considered compatible with the client's request.  (The
332 	 * former restriction could probably be lifted without too much
333 	 * additional work, but this does not seem warranted.)
334 	 */
335 #ifdef RMAN_DEBUG
336 	printf("no unshared regions found\n");
337 #endif /* RMAN_DEBUG */
338 	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
339 		goto out;
340 
341 	for (s = r; !CIRCLEQ_TERMCOND(s, rm->rm_list);
342 	     s = CIRCLEQ_NEXT(s, r_link)) {
343 		if (s->r_start > end)
344 			break;
345 		if ((s->r_flags & flags) != flags)
346 			continue;
347 		rstart = max(s->r_start, start);
348 		rend = min(s->r_end, max(start + count, end));
349 		if (s->r_start >= start && s->r_end <= end
350 		    && (s->r_end - s->r_start + 1) == count) {
351 			rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT);
352 			if (rv == 0)
353 				goto out;
354 			bzero(rv, sizeof *rv);
355 			rv->r_start = s->r_start;
356 			rv->r_end = s->r_end;
357 			rv->r_flags = s->r_flags &
358 				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
359 			rv->r_dev = dev;
360 			rv->r_rm = rm;
361 			if (s->r_sharehead == 0) {
362 				s->r_sharehead = malloc(sizeof *s->r_sharehead,
363 							M_RMAN, M_NOWAIT);
364 				if (s->r_sharehead == 0) {
365 					free(rv, M_RMAN);
366 					rv = 0;
367 					goto out;
368 				}
369 				bzero(s->r_sharehead, sizeof *s->r_sharehead);
370 				LIST_INIT(s->r_sharehead);
371 				LIST_INSERT_HEAD(s->r_sharehead, s,
372 						 r_sharelink);
373 				s->r_flags |= RF_FIRSTSHARE;
374 			}
375 			rv->r_sharehead = s->r_sharehead;
376 			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
377 			goto out;
378 		}
379 	}
380 
381 	/*
382 	 * We couldn't find anything.
383 	 */
384 out:
385 	/*
386 	 * If the user specified RF_ACTIVE in the initial flags,
387 	 * which is reflected in `want_activate', we attempt to atomically
388 	 * activate the resource.  If this fails, we release the resource
389 	 * and indicate overall failure.  (This behavior probably doesn't
390 	 * make sense for RF_TIMESHARE-type resources.)
391 	 */
392 	if (rv && want_activate) {
393 		struct resource *whohas;
394 		if (int_rman_activate_resource(rm, rv, &whohas)) {
395 			int_rman_release_resource(rm, rv);
396 			rv = 0;
397 		}
398 	}
399 
400 	lwkt_reltoken(rm->rm_slock);
401 	return (rv);
402 }
403 
404 static int
405 int_rman_activate_resource(struct rman *rm, struct resource *r,
406 			   struct resource **whohas)
407 {
408 	struct resource *s;
409 	int ok;
410 
411 	/*
412 	 * If we are not timesharing, then there is nothing much to do.
413 	 * If we already have the resource, then there is nothing at all to do.
414 	 * If we are not on a sharing list with anybody else, then there is
415 	 * little to do.
416 	 */
417 	if ((r->r_flags & RF_TIMESHARE) == 0
418 	    || (r->r_flags & RF_ACTIVE) != 0
419 	    || r->r_sharehead == 0) {
420 		r->r_flags |= RF_ACTIVE;
421 		return 0;
422 	}
423 
424 	ok = 1;
425 	for (s = LIST_FIRST(r->r_sharehead); s && ok;
426 	     s = LIST_NEXT(s, r_sharelink)) {
427 		if ((s->r_flags & RF_ACTIVE) != 0) {
428 			ok = 0;
429 			*whohas = s;
430 		}
431 	}
432 	if (ok) {
433 		r->r_flags |= RF_ACTIVE;
434 		return 0;
435 	}
436 	return EBUSY;
437 }
438 
439 int
440 rman_activate_resource(struct resource *r)
441 {
442 	int rv;
443 	struct resource *whohas;
444 	struct rman *rm;
445 
446 	rm = r->r_rm;
447 	lwkt_gettoken(rm->rm_slock);
448 	rv = int_rman_activate_resource(rm, r, &whohas);
449 	lwkt_reltoken(rm->rm_slock);
450 	return rv;
451 }
452 
453 int
454 rman_await_resource(struct resource *r, int slpflags, int timo)
455 {
456 	int	rv, s;
457 	struct	resource *whohas;
458 	struct	rman *rm;
459 
460 	rm = r->r_rm;
461 	for (;;) {
462 		lwkt_gettoken(rm->rm_slock);
463 		rv = int_rman_activate_resource(rm, r, &whohas);
464 		if (rv != EBUSY)
465 			return (rv);	/* returns with simple token */
466 
467 		if (r->r_sharehead == 0)
468 			panic("rman_await_resource");
469 		/*
470 		 * splhigh hopefully will prevent a race between
471 		 * lwkt_reltoken and tsleep where a process
472 		 * could conceivably get in and release the resource
473 		 * before we have a chance to sleep on it. YYY
474 		 */
475 		s = splhigh();
476 		whohas->r_flags |= RF_WANTED;
477 		lwkt_reltoken(rm->rm_slock);	/* YYY */
478 		rv = tsleep(r->r_sharehead, slpflags, "rmwait", timo);
479 		if (rv) {
480 			splx(s);
481 			return rv;
482 		}
483 		lwkt_gettoken(rm->rm_slock);
484 		splx(s);
485 	}
486 }
487 
488 static int
489 int_rman_deactivate_resource(struct resource *r)
490 {
491 	struct	rman *rm;
492 
493 	rm = r->r_rm;
494 	r->r_flags &= ~RF_ACTIVE;
495 	if (r->r_flags & RF_WANTED) {
496 		r->r_flags &= ~RF_WANTED;
497 		wakeup(r->r_sharehead);
498 	}
499 	return 0;
500 }
501 
502 int
503 rman_deactivate_resource(struct resource *r)
504 {
505 	struct	rman *rm;
506 
507 	rm = r->r_rm;
508 	lwkt_gettoken(rm->rm_slock);
509 	int_rman_deactivate_resource(r);
510 	lwkt_reltoken(rm->rm_slock);
511 	return 0;
512 }
513 
514 static int
515 int_rman_release_resource(struct rman *rm, struct resource *r)
516 {
517 	struct	resource *s, *t;
518 
519 	if (r->r_flags & RF_ACTIVE)
520 		int_rman_deactivate_resource(r);
521 
522 	/*
523 	 * Check for a sharing list first.  If there is one, then we don't
524 	 * have to think as hard.
525 	 */
526 	if (r->r_sharehead) {
527 		/*
528 		 * If a sharing list exists, then we know there are at
529 		 * least two sharers.
530 		 *
531 		 * If we are in the main circleq, appoint someone else.
532 		 */
533 		LIST_REMOVE(r, r_sharelink);
534 		s = LIST_FIRST(r->r_sharehead);
535 		if (r->r_flags & RF_FIRSTSHARE) {
536 			s->r_flags |= RF_FIRSTSHARE;
537 			CIRCLEQ_INSERT_BEFORE(&rm->rm_list, r, s, r_link);
538 			CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
539 		}
540 
541 		/*
542 		 * Make sure that the sharing list goes away completely
543 		 * if the resource is no longer being shared at all.
544 		 */
545 		if (LIST_NEXT(s, r_sharelink) == 0) {
546 			free(s->r_sharehead, M_RMAN);
547 			s->r_sharehead = 0;
548 			s->r_flags &= ~RF_FIRSTSHARE;
549 		}
550 		goto out;
551 	}
552 
553 	/*
554 	 * Look at the adjacent resources in the list and see if our
555 	 * segment can be merged with any of them.
556 	 */
557 	s = CIRCLEQ_PREV(r, r_link);
558 	t = CIRCLEQ_NEXT(r, r_link);
559 
560 	if (s != (void *)&rm->rm_list && (s->r_flags & RF_ALLOCATED) == 0
561 	    && t != (void *)&rm->rm_list && (t->r_flags & RF_ALLOCATED) == 0) {
562 		/*
563 		 * Merge all three segments.
564 		 */
565 		s->r_end = t->r_end;
566 		CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
567 		CIRCLEQ_REMOVE(&rm->rm_list, t, r_link);
568 		free(t, M_RMAN);
569 	} else if (s != (void *)&rm->rm_list
570 		   && (s->r_flags & RF_ALLOCATED) == 0) {
571 		/*
572 		 * Merge previous segment with ours.
573 		 */
574 		s->r_end = r->r_end;
575 		CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
576 	} else if (t != (void *)&rm->rm_list
577 		   && (t->r_flags & RF_ALLOCATED) == 0) {
578 		/*
579 		 * Merge next segment with ours.
580 		 */
581 		t->r_start = r->r_start;
582 		CIRCLEQ_REMOVE(&rm->rm_list, r, r_link);
583 	} else {
584 		/*
585 		 * At this point, we know there is nothing we
586 		 * can potentially merge with, because on each
587 		 * side, there is either nothing there or what is
588 		 * there is still allocated.  In that case, we don't
589 		 * want to remove r from the list; we simply want to
590 		 * change it to an unallocated region and return
591 		 * without freeing anything.
592 		 */
593 		r->r_flags &= ~RF_ALLOCATED;
594 		return 0;
595 	}
596 
597 out:
598 	free(r, M_RMAN);
599 	return 0;
600 }
601 
602 int
603 rman_release_resource(struct resource *r)
604 {
605 	int	rv;
606 	struct	rman *rm = r->r_rm;
607 
608 	lwkt_gettoken(rm->rm_slock);
609 	rv = int_rman_release_resource(rm, r);
610 	lwkt_reltoken(rm->rm_slock);
611 	return (rv);
612 }
613 
614 uint32_t
615 rman_make_alignment_flags(uint32_t size)
616 {
617 	int	i;
618 
619 	/*
620 	 * Find the hightest bit set, and add one if more than one bit
621 	 * set.  We're effectively computing the ceil(log2(size)) here.
622 	 */
623 	for (i = 32; i > 0; i--)
624 		if ((1 << i) & size)
625 			break;
626 	if (~(1 << i) & size)
627 		i++;
628 
629 	return(RF_ALIGNMENT_LOG2(i));
630 }
631