xref: /freebsd/sys/kern/subr_rman.c (revision e0c4386e)
1 /*-
2  * Copyright 1998 Massachusetts Institute of Technology
3  *
4  * Permission to use, copy, modify, and distribute this software and
5  * its documentation for any purpose and without fee is hereby
6  * granted, provided that both the above copyright notice and this
7  * permission notice appear in all copies, that both the above
8  * copyright notice and this permission notice appear in all
9  * supporting documentation, and that the name of M.I.T. not be used
10  * in advertising or publicity pertaining to distribution of the
11  * software without specific, written prior permission.  M.I.T. makes
12  * no representations about the suitability of this software for any
13  * purpose.  It is provided "as is" without express or implied
14  * warranty.
15  *
16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * The kernel resource manager.  This code is responsible for keeping track
32  * of hardware resources which are apportioned out to various drivers.
33  * It does not actually assign those resources, and it is not expected
34  * that end-device drivers will call into this code directly.  Rather,
35  * the code which implements the buses that those devices are attached to,
36  * and the code which manages CPU resources, will call this code, and the
37  * end-device drivers will make upcalls to that code to actually perform
38  * the allocation.
39  *
40  * There are two sorts of resources managed by this code.  The first is
41  * the more familiar array (RMAN_ARRAY) type; resources in this class
42  * consist of a sequence of individually-allocatable objects which have
43  * been numbered in some well-defined order.  Most of the resources
44  * are of this type, as it is the most familiar.  The second type is
45  * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46  * resources in which each instance is indistinguishable from every
47  * other instance).  The principal anticipated application of gauges
48  * is in the context of power consumption, where a bus may have a specific
49  * power budget which all attached devices share.  RMAN_GAUGE is not
50  * implemented yet.
51  *
52  * For array resources, we make one simplifying assumption: two clients
53  * sharing the same resource must use the same range of indices.  That
54  * is to say, sharing of overlapping-but-not-identical regions is not
55  * permitted.
56  */
57 
58 #include "opt_ddb.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
64 #include <sys/lock.h>
65 #include <sys/malloc.h>
66 #include <sys/mutex.h>
67 #include <sys/bus.h>		/* XXX debugging */
68 #include <machine/bus.h>
69 #include <sys/rman.h>
70 #include <sys/sysctl.h>
71 
72 #ifdef DDB
73 #include <ddb/ddb.h>
74 #endif
75 
76 /*
77  * We use a linked list rather than a bitmap because we need to be able to
78  * represent potentially huge objects (like all of a processor's physical
79  * address space).
80  */
81 struct resource_i {
82 	struct resource		r_r;
83 	TAILQ_ENTRY(resource_i)	r_link;
84 	LIST_ENTRY(resource_i)	r_sharelink;
85 	LIST_HEAD(, resource_i)	*r_sharehead;
86 	rman_res_t	r_start;	/* index of the first entry in this resource */
87 	rman_res_t	r_end;		/* index of the last entry (inclusive) */
88 	u_int	r_flags;
89 	void	*r_virtual;	/* virtual address of this resource */
90 	void	*r_irq_cookie;	/* interrupt cookie for this (interrupt) resource */
91 	device_t r_dev;	/* device which has allocated this resource */
92 	struct rman *r_rm;	/* resource manager from whence this came */
93 	int	r_rid;		/* optional rid for this resource. */
94 };
95 
96 static int rman_debug = 0;
97 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RWTUN,
98     &rman_debug, 0, "rman debug");
99 
100 #define DPRINTF(params) if (rman_debug) printf params
101 
102 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
103 
104 struct rman_head rman_head;
105 static struct mtx rman_mtx; /* mutex to protect rman_head */
106 static int int_rman_release_resource(struct rman *rm, struct resource_i *r);
107 
108 static __inline struct resource_i *
109 int_alloc_resource(int malloc_flag)
110 {
111 	struct resource_i *r;
112 
113 	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
114 	if (r != NULL) {
115 		r->r_r.__r_i = r;
116 	}
117 	return (r);
118 }
119 
120 int
121 rman_init(struct rman *rm)
122 {
123 	static int once = 0;
124 
125 	if (once == 0) {
126 		once = 1;
127 		TAILQ_INIT(&rman_head);
128 		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
129 	}
130 
131 	if (rm->rm_start == 0 && rm->rm_end == 0)
132 		rm->rm_end = ~0;
133 	if (rm->rm_type == RMAN_UNINIT)
134 		panic("rman_init");
135 	if (rm->rm_type == RMAN_GAUGE)
136 		panic("implement RMAN_GAUGE");
137 
138 	TAILQ_INIT(&rm->rm_list);
139 	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
140 	if (rm->rm_mtx == NULL)
141 		return ENOMEM;
142 	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
143 
144 	mtx_lock(&rman_mtx);
145 	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
146 	mtx_unlock(&rman_mtx);
147 	return 0;
148 }
149 
150 int
151 rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end)
152 {
153 	struct resource_i *r, *s, *t;
154 	int rv = 0;
155 
156 	DPRINTF(("rman_manage_region: <%s> request: start %#jx, end %#jx\n",
157 	    rm->rm_descr, start, end));
158 	if (start < rm->rm_start || end > rm->rm_end)
159 		return EINVAL;
160 	r = int_alloc_resource(M_NOWAIT);
161 	if (r == NULL)
162 		return ENOMEM;
163 	r->r_start = start;
164 	r->r_end = end;
165 	r->r_rm = rm;
166 
167 	mtx_lock(rm->rm_mtx);
168 
169 	/* Skip entries before us. */
170 	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
171 		if (s->r_end == ~0)
172 			break;
173 		if (s->r_end + 1 >= r->r_start)
174 			break;
175 	}
176 
177 	/* If we ran off the end of the list, insert at the tail. */
178 	if (s == NULL) {
179 		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
180 	} else {
181 		/* Check for any overlap with the current region. */
182 		if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
183 			rv = EBUSY;
184 			goto out;
185 		}
186 
187 		/* Check for any overlap with the next region. */
188 		t = TAILQ_NEXT(s, r_link);
189 		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
190 			rv = EBUSY;
191 			goto out;
192 		}
193 
194 		/*
195 		 * See if this region can be merged with the next region.  If
196 		 * not, clear the pointer.
197 		 */
198 		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
199 			t = NULL;
200 
201 		/* See if we can merge with the current region. */
202 		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
203 			/* Can we merge all 3 regions? */
204 			if (t != NULL) {
205 				s->r_end = t->r_end;
206 				TAILQ_REMOVE(&rm->rm_list, t, r_link);
207 				free(r, M_RMAN);
208 				free(t, M_RMAN);
209 			} else {
210 				s->r_end = r->r_end;
211 				free(r, M_RMAN);
212 			}
213 		} else if (t != NULL) {
214 			/* Can we merge with just the next region? */
215 			t->r_start = r->r_start;
216 			free(r, M_RMAN);
217 		} else if (s->r_end < r->r_start) {
218 			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
219 		} else {
220 			TAILQ_INSERT_BEFORE(s, r, r_link);
221 		}
222 	}
223 out:
224 	mtx_unlock(rm->rm_mtx);
225 	return rv;
226 }
227 
228 int
229 rman_init_from_resource(struct rman *rm, struct resource *r)
230 {
231 	int rv;
232 
233 	if ((rv = rman_init(rm)) != 0)
234 		return (rv);
235 	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
236 }
237 
238 int
239 rman_fini(struct rman *rm)
240 {
241 	struct resource_i *r;
242 
243 	mtx_lock(rm->rm_mtx);
244 	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
245 		if (r->r_flags & RF_ALLOCATED) {
246 			mtx_unlock(rm->rm_mtx);
247 			return EBUSY;
248 		}
249 	}
250 
251 	/*
252 	 * There really should only be one of these if we are in this
253 	 * state and the code is working properly, but it can't hurt.
254 	 */
255 	while (!TAILQ_EMPTY(&rm->rm_list)) {
256 		r = TAILQ_FIRST(&rm->rm_list);
257 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
258 		free(r, M_RMAN);
259 	}
260 	mtx_unlock(rm->rm_mtx);
261 	mtx_lock(&rman_mtx);
262 	TAILQ_REMOVE(&rman_head, rm, rm_link);
263 	mtx_unlock(&rman_mtx);
264 	mtx_destroy(rm->rm_mtx);
265 	free(rm->rm_mtx, M_RMAN);
266 
267 	return 0;
268 }
269 
270 int
271 rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
272 {
273 	struct resource_i *r;
274 
275 	mtx_lock(rm->rm_mtx);
276 	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
277 		if (!(r->r_flags & RF_ALLOCATED)) {
278 			*start = r->r_start;
279 			*end = r->r_end;
280 			mtx_unlock(rm->rm_mtx);
281 			return (0);
282 		}
283 	}
284 	mtx_unlock(rm->rm_mtx);
285 	return (ENOENT);
286 }
287 
288 int
289 rman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
290 {
291 	struct resource_i *r;
292 
293 	mtx_lock(rm->rm_mtx);
294 	TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
295 		if (!(r->r_flags & RF_ALLOCATED)) {
296 			*start = r->r_start;
297 			*end = r->r_end;
298 			mtx_unlock(rm->rm_mtx);
299 			return (0);
300 		}
301 	}
302 	mtx_unlock(rm->rm_mtx);
303 	return (ENOENT);
304 }
305 
306 /* Shrink or extend one or both ends of an allocated resource. */
307 int
308 rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
309 {
310 	struct resource_i *r, *s, *t, *new;
311 	struct rman *rm;
312 
313 	/* Not supported for shared resources. */
314 	r = rr->__r_i;
315 	if (r->r_flags & RF_SHAREABLE)
316 		return (EINVAL);
317 
318 	/*
319 	 * This does not support wholesale moving of a resource.  At
320 	 * least part of the desired new range must overlap with the
321 	 * existing resource.
322 	 */
323 	if (end < r->r_start || r->r_end < start)
324 		return (EINVAL);
325 
326 	/*
327 	 * Find the two resource regions immediately adjacent to the
328 	 * allocated resource.
329 	 */
330 	rm = r->r_rm;
331 	mtx_lock(rm->rm_mtx);
332 #ifdef INVARIANTS
333 	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
334 		if (s == r)
335 			break;
336 	}
337 	if (s == NULL)
338 		panic("resource not in list");
339 #endif
340 	s = TAILQ_PREV(r, resource_head, r_link);
341 	t = TAILQ_NEXT(r, r_link);
342 	KASSERT(s == NULL || s->r_end + 1 == r->r_start,
343 	    ("prev resource mismatch"));
344 	KASSERT(t == NULL || r->r_end + 1 == t->r_start,
345 	    ("next resource mismatch"));
346 
347 	/*
348 	 * See if the changes are permitted.  Shrinking is always allowed,
349 	 * but growing requires sufficient room in the adjacent region.
350 	 */
351 	if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
352 	    s->r_start > start)) {
353 		mtx_unlock(rm->rm_mtx);
354 		return (EBUSY);
355 	}
356 	if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
357 	    t->r_end < end)) {
358 		mtx_unlock(rm->rm_mtx);
359 		return (EBUSY);
360 	}
361 
362 	/*
363 	 * While holding the lock, grow either end of the resource as
364 	 * needed and shrink either end if the shrinking does not require
365 	 * allocating a new resource.  We can safely drop the lock and then
366 	 * insert a new range to handle the shrinking case afterwards.
367 	 */
368 	if (start < r->r_start ||
369 	    (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
370 		KASSERT(s->r_flags == 0, ("prev is busy"));
371 		r->r_start = start;
372 		if (s->r_start == start) {
373 			TAILQ_REMOVE(&rm->rm_list, s, r_link);
374 			free(s, M_RMAN);
375 		} else
376 			s->r_end = start - 1;
377 	}
378 	if (end > r->r_end ||
379 	    (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
380 		KASSERT(t->r_flags == 0, ("next is busy"));
381 		r->r_end = end;
382 		if (t->r_end == end) {
383 			TAILQ_REMOVE(&rm->rm_list, t, r_link);
384 			free(t, M_RMAN);
385 		} else
386 			t->r_start = end + 1;
387 	}
388 	mtx_unlock(rm->rm_mtx);
389 
390 	/*
391 	 * Handle the shrinking cases that require allocating a new
392 	 * resource to hold the newly-free region.  We have to recheck
393 	 * if we still need this new region after acquiring the lock.
394 	 */
395 	if (start > r->r_start) {
396 		new = int_alloc_resource(M_WAITOK);
397 		new->r_start = r->r_start;
398 		new->r_end = start - 1;
399 		new->r_rm = rm;
400 		mtx_lock(rm->rm_mtx);
401 		r->r_start = start;
402 		s = TAILQ_PREV(r, resource_head, r_link);
403 		if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
404 			s->r_end = start - 1;
405 			free(new, M_RMAN);
406 		} else
407 			TAILQ_INSERT_BEFORE(r, new, r_link);
408 		mtx_unlock(rm->rm_mtx);
409 	}
410 	if (end < r->r_end) {
411 		new = int_alloc_resource(M_WAITOK);
412 		new->r_start = end + 1;
413 		new->r_end = r->r_end;
414 		new->r_rm = rm;
415 		mtx_lock(rm->rm_mtx);
416 		r->r_end = end;
417 		t = TAILQ_NEXT(r, r_link);
418 		if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
419 			t->r_start = end + 1;
420 			free(new, M_RMAN);
421 		} else
422 			TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
423 		mtx_unlock(rm->rm_mtx);
424 	}
425 	return (0);
426 }
427 
428 #define	SHARE_TYPE(f)	(f & (RF_SHAREABLE | RF_PREFETCHABLE))
429 
430 struct resource *
431 rman_reserve_resource_bound(struct rman *rm, rman_res_t start, rman_res_t end,
432 			    rman_res_t count, rman_res_t bound, u_int flags,
433 			    device_t dev)
434 {
435 	u_int new_rflags;
436 	struct resource_i *r, *s, *rv;
437 	rman_res_t rstart, rend, amask, bmask;
438 
439 	rv = NULL;
440 
441 	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#jx, %#jx], "
442 	       "length %#jx, flags %x, device %s\n", rm->rm_descr, start, end,
443 	       count, flags,
444 	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
445 	KASSERT(count != 0, ("%s: attempted to allocate an empty range",
446 	    __func__));
447 	KASSERT((flags & RF_FIRSTSHARE) == 0,
448 	    ("invalid flags %#x", flags));
449 	new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED;
450 
451 	mtx_lock(rm->rm_mtx);
452 
453 	r = TAILQ_FIRST(&rm->rm_list);
454 	if (r == NULL) {
455 	    DPRINTF(("NULL list head\n"));
456 	} else {
457 	    DPRINTF(("rman_reserve_resource_bound: trying %#jx <%#jx,%#jx>\n",
458 		    r->r_end, start, count-1));
459 	}
460 	for (r = TAILQ_FIRST(&rm->rm_list);
461 	     r && r->r_end < start + count - 1;
462 	     r = TAILQ_NEXT(r, r_link)) {
463 		;
464 		DPRINTF(("rman_reserve_resource_bound: tried %#jx <%#jx,%#jx>\n",
465 			r->r_end, start, count-1));
466 	}
467 
468 	if (r == NULL) {
469 		DPRINTF(("could not find a region\n"));
470 		goto out;
471 	}
472 
473 	amask = (1ull << RF_ALIGNMENT(flags)) - 1;
474 	KASSERT(start <= RM_MAX_END - amask,
475 	    ("start (%#jx) + amask (%#jx) would wrap around", start, amask));
476 
477 	/* If bound is 0, bmask will also be 0 */
478 	bmask = ~(bound - 1);
479 	/*
480 	 * First try to find an acceptable totally-unshared region.
481 	 */
482 	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
483 		DPRINTF(("considering [%#jx, %#jx]\n", s->r_start, s->r_end));
484 		/*
485 		 * The resource list is sorted, so there is no point in
486 		 * searching further once r_start is too large.
487 		 */
488 		if (s->r_start > end - (count - 1)) {
489 			DPRINTF(("s->r_start (%#jx) + count - 1> end (%#jx)\n",
490 			    s->r_start, end));
491 			break;
492 		}
493 		if (s->r_start > RM_MAX_END - amask) {
494 			DPRINTF(("s->r_start (%#jx) + amask (%#jx) too large\n",
495 			    s->r_start, amask));
496 			break;
497 		}
498 		if (s->r_flags & RF_ALLOCATED) {
499 			DPRINTF(("region is allocated\n"));
500 			continue;
501 		}
502 		rstart = ummax(s->r_start, start);
503 		/*
504 		 * Try to find a region by adjusting to boundary and alignment
505 		 * until both conditions are satisfied. This is not an optimal
506 		 * algorithm, but in most cases it isn't really bad, either.
507 		 */
508 		do {
509 			rstart = (rstart + amask) & ~amask;
510 			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
511 				rstart += bound - (rstart & ~bmask);
512 		} while ((rstart & amask) != 0 && rstart < end &&
513 		    rstart < s->r_end);
514 		rend = ummin(s->r_end, ummax(rstart + count - 1, end));
515 		if (rstart > rend) {
516 			DPRINTF(("adjusted start exceeds end\n"));
517 			continue;
518 		}
519 		DPRINTF(("truncated region: [%#jx, %#jx]; size %#jx (requested %#jx)\n",
520 		       rstart, rend, (rend - rstart + 1), count));
521 
522 		if ((rend - rstart) >= (count - 1)) {
523 			DPRINTF(("candidate region: [%#jx, %#jx], size %#jx\n",
524 			       rstart, rend, (rend - rstart + 1)));
525 			if ((s->r_end - s->r_start + 1) == count) {
526 				DPRINTF(("candidate region is entire chunk\n"));
527 				rv = s;
528 				rv->r_flags = new_rflags;
529 				rv->r_dev = dev;
530 				goto out;
531 			}
532 
533 			/*
534 			 * If s->r_start < rstart and
535 			 *    s->r_end > rstart + count - 1, then
536 			 * we need to split the region into three pieces
537 			 * (the middle one will get returned to the user).
538 			 * Otherwise, we are allocating at either the
539 			 * beginning or the end of s, so we only need to
540 			 * split it in two.  The first case requires
541 			 * two new allocations; the second requires but one.
542 			 */
543 			rv = int_alloc_resource(M_NOWAIT);
544 			if (rv == NULL)
545 				goto out;
546 			rv->r_start = rstart;
547 			rv->r_end = rstart + count - 1;
548 			rv->r_flags = new_rflags;
549 			rv->r_dev = dev;
550 			rv->r_rm = rm;
551 
552 			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
553 				DPRINTF(("splitting region in three parts: "
554 				       "[%#jx, %#jx]; [%#jx, %#jx]; [%#jx, %#jx]\n",
555 				       s->r_start, rv->r_start - 1,
556 				       rv->r_start, rv->r_end,
557 				       rv->r_end + 1, s->r_end));
558 				/*
559 				 * We are allocating in the middle.
560 				 */
561 				r = int_alloc_resource(M_NOWAIT);
562 				if (r == NULL) {
563 					free(rv, M_RMAN);
564 					rv = NULL;
565 					goto out;
566 				}
567 				r->r_start = rv->r_end + 1;
568 				r->r_end = s->r_end;
569 				r->r_flags = s->r_flags;
570 				r->r_rm = rm;
571 				s->r_end = rv->r_start - 1;
572 				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
573 						     r_link);
574 				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
575 						     r_link);
576 			} else if (s->r_start == rv->r_start) {
577 				DPRINTF(("allocating from the beginning\n"));
578 				/*
579 				 * We are allocating at the beginning.
580 				 */
581 				s->r_start = rv->r_end + 1;
582 				TAILQ_INSERT_BEFORE(s, rv, r_link);
583 			} else {
584 				DPRINTF(("allocating at the end\n"));
585 				/*
586 				 * We are allocating at the end.
587 				 */
588 				s->r_end = rv->r_start - 1;
589 				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
590 						     r_link);
591 			}
592 			goto out;
593 		}
594 	}
595 
596 	/*
597 	 * Now find an acceptable shared region, if the client's requirements
598 	 * allow sharing.  By our implementation restriction, a candidate
599 	 * region must match exactly by both size and sharing type in order
600 	 * to be considered compatible with the client's request.  (The
601 	 * former restriction could probably be lifted without too much
602 	 * additional work, but this does not seem warranted.)
603 	 */
604 	DPRINTF(("no unshared regions found\n"));
605 	if ((flags & RF_SHAREABLE) == 0)
606 		goto out;
607 
608 	for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
609 		if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
610 		    s->r_start >= start &&
611 		    (s->r_end - s->r_start + 1) == count &&
612 		    (s->r_start & amask) == 0 &&
613 		    ((s->r_start ^ s->r_end) & bmask) == 0) {
614 			rv = int_alloc_resource(M_NOWAIT);
615 			if (rv == NULL)
616 				goto out;
617 			rv->r_start = s->r_start;
618 			rv->r_end = s->r_end;
619 			rv->r_flags = new_rflags;
620 			rv->r_dev = dev;
621 			rv->r_rm = rm;
622 			if (s->r_sharehead == NULL) {
623 				s->r_sharehead = malloc(sizeof *s->r_sharehead,
624 						M_RMAN, M_NOWAIT | M_ZERO);
625 				if (s->r_sharehead == NULL) {
626 					free(rv, M_RMAN);
627 					rv = NULL;
628 					goto out;
629 				}
630 				LIST_INIT(s->r_sharehead);
631 				LIST_INSERT_HEAD(s->r_sharehead, s,
632 						 r_sharelink);
633 				s->r_flags |= RF_FIRSTSHARE;
634 			}
635 			rv->r_sharehead = s->r_sharehead;
636 			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
637 			goto out;
638 		}
639 	}
640 	/*
641 	 * We couldn't find anything.
642 	 */
643 
644 out:
645 	mtx_unlock(rm->rm_mtx);
646 	return (rv == NULL ? NULL : &rv->r_r);
647 }
648 
649 struct resource *
650 rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end,
651 		      rman_res_t count, u_int flags, device_t dev)
652 {
653 
654 	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
655 	    dev));
656 }
657 
658 int
659 rman_activate_resource(struct resource *re)
660 {
661 	struct resource_i *r;
662 	struct rman *rm;
663 
664 	r = re->__r_i;
665 	rm = r->r_rm;
666 	mtx_lock(rm->rm_mtx);
667 	r->r_flags |= RF_ACTIVE;
668 	mtx_unlock(rm->rm_mtx);
669 	return 0;
670 }
671 
672 int
673 rman_deactivate_resource(struct resource *r)
674 {
675 	struct rman *rm;
676 
677 	rm = r->__r_i->r_rm;
678 	mtx_lock(rm->rm_mtx);
679 	r->__r_i->r_flags &= ~RF_ACTIVE;
680 	mtx_unlock(rm->rm_mtx);
681 	return 0;
682 }
683 
684 static int
685 int_rman_release_resource(struct rman *rm, struct resource_i *r)
686 {
687 	struct resource_i *s, *t;
688 
689 	if (r->r_flags & RF_ACTIVE)
690 		r->r_flags &= ~RF_ACTIVE;
691 
692 	/*
693 	 * Check for a sharing list first.  If there is one, then we don't
694 	 * have to think as hard.
695 	 */
696 	if (r->r_sharehead) {
697 		/*
698 		 * If a sharing list exists, then we know there are at
699 		 * least two sharers.
700 		 *
701 		 * If we are in the main circleq, appoint someone else.
702 		 */
703 		LIST_REMOVE(r, r_sharelink);
704 		s = LIST_FIRST(r->r_sharehead);
705 		if (r->r_flags & RF_FIRSTSHARE) {
706 			s->r_flags |= RF_FIRSTSHARE;
707 			TAILQ_INSERT_BEFORE(r, s, r_link);
708 			TAILQ_REMOVE(&rm->rm_list, r, r_link);
709 		}
710 
711 		/*
712 		 * Make sure that the sharing list goes away completely
713 		 * if the resource is no longer being shared at all.
714 		 */
715 		if (LIST_NEXT(s, r_sharelink) == NULL) {
716 			free(s->r_sharehead, M_RMAN);
717 			s->r_sharehead = NULL;
718 			s->r_flags &= ~RF_FIRSTSHARE;
719 		}
720 		goto out;
721 	}
722 
723 	/*
724 	 * Look at the adjacent resources in the list and see if our
725 	 * segment can be merged with any of them.  If either of the
726 	 * resources is allocated or is not exactly adjacent then they
727 	 * cannot be merged with our segment.
728 	 */
729 	s = TAILQ_PREV(r, resource_head, r_link);
730 	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
731 	    s->r_end + 1 != r->r_start))
732 		s = NULL;
733 	t = TAILQ_NEXT(r, r_link);
734 	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
735 	    r->r_end + 1 != t->r_start))
736 		t = NULL;
737 
738 	if (s != NULL && t != NULL) {
739 		/*
740 		 * Merge all three segments.
741 		 */
742 		s->r_end = t->r_end;
743 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
744 		TAILQ_REMOVE(&rm->rm_list, t, r_link);
745 		free(t, M_RMAN);
746 	} else if (s != NULL) {
747 		/*
748 		 * Merge previous segment with ours.
749 		 */
750 		s->r_end = r->r_end;
751 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
752 	} else if (t != NULL) {
753 		/*
754 		 * Merge next segment with ours.
755 		 */
756 		t->r_start = r->r_start;
757 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
758 	} else {
759 		/*
760 		 * At this point, we know there is nothing we
761 		 * can potentially merge with, because on each
762 		 * side, there is either nothing there or what is
763 		 * there is still allocated.  In that case, we don't
764 		 * want to remove r from the list; we simply want to
765 		 * change it to an unallocated region and return
766 		 * without freeing anything.
767 		 */
768 		r->r_flags &= ~RF_ALLOCATED;
769 		r->r_dev = NULL;
770 		return 0;
771 	}
772 
773 out:
774 	free(r, M_RMAN);
775 	return 0;
776 }
777 
778 int
779 rman_release_resource(struct resource *re)
780 {
781 	int rv;
782 	struct resource_i *r;
783 	struct rman *rm;
784 
785 	r = re->__r_i;
786 	rm = r->r_rm;
787 	mtx_lock(rm->rm_mtx);
788 	rv = int_rman_release_resource(rm, r);
789 	mtx_unlock(rm->rm_mtx);
790 	return (rv);
791 }
792 
793 uint32_t
794 rman_make_alignment_flags(uint32_t size)
795 {
796 	int i;
797 
798 	/*
799 	 * Find the hightest bit set, and add one if more than one bit
800 	 * set.  We're effectively computing the ceil(log2(size)) here.
801 	 */
802 	for (i = 31; i > 0; i--)
803 		if ((1 << i) & size)
804 			break;
805 	if (~(1 << i) & size)
806 		i++;
807 
808 	return(RF_ALIGNMENT_LOG2(i));
809 }
810 
811 void
812 rman_set_start(struct resource *r, rman_res_t start)
813 {
814 
815 	r->__r_i->r_start = start;
816 }
817 
818 rman_res_t
819 rman_get_start(struct resource *r)
820 {
821 
822 	return (r->__r_i->r_start);
823 }
824 
825 void
826 rman_set_end(struct resource *r, rman_res_t end)
827 {
828 
829 	r->__r_i->r_end = end;
830 }
831 
832 rman_res_t
833 rman_get_end(struct resource *r)
834 {
835 
836 	return (r->__r_i->r_end);
837 }
838 
839 rman_res_t
840 rman_get_size(struct resource *r)
841 {
842 
843 	return (r->__r_i->r_end - r->__r_i->r_start + 1);
844 }
845 
846 u_int
847 rman_get_flags(struct resource *r)
848 {
849 
850 	return (r->__r_i->r_flags);
851 }
852 
853 void
854 rman_set_virtual(struct resource *r, void *v)
855 {
856 
857 	r->__r_i->r_virtual = v;
858 }
859 
860 void *
861 rman_get_virtual(struct resource *r)
862 {
863 
864 	return (r->__r_i->r_virtual);
865 }
866 
867 void
868 rman_set_irq_cookie(struct resource *r, void *c)
869 {
870 
871 	r->__r_i->r_irq_cookie = c;
872 }
873 
874 void *
875 rman_get_irq_cookie(struct resource *r)
876 {
877 
878 	return (r->__r_i->r_irq_cookie);
879 }
880 
881 void
882 rman_set_bustag(struct resource *r, bus_space_tag_t t)
883 {
884 
885 	r->r_bustag = t;
886 }
887 
888 bus_space_tag_t
889 rman_get_bustag(struct resource *r)
890 {
891 
892 	return (r->r_bustag);
893 }
894 
895 void
896 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
897 {
898 
899 	r->r_bushandle = h;
900 }
901 
902 bus_space_handle_t
903 rman_get_bushandle(struct resource *r)
904 {
905 
906 	return (r->r_bushandle);
907 }
908 
909 void
910 rman_set_mapping(struct resource *r, struct resource_map *map)
911 {
912 
913 	KASSERT(rman_get_size(r) == map->r_size,
914 	    ("rman_set_mapping: size mismatch"));
915 	rman_set_bustag(r, map->r_bustag);
916 	rman_set_bushandle(r, map->r_bushandle);
917 	rman_set_virtual(r, map->r_vaddr);
918 }
919 
920 void
921 rman_get_mapping(struct resource *r, struct resource_map *map)
922 {
923 
924 	map->r_bustag = rman_get_bustag(r);
925 	map->r_bushandle = rman_get_bushandle(r);
926 	map->r_size = rman_get_size(r);
927 	map->r_vaddr = rman_get_virtual(r);
928 }
929 
930 void
931 rman_set_rid(struct resource *r, int rid)
932 {
933 
934 	r->__r_i->r_rid = rid;
935 }
936 
937 int
938 rman_get_rid(struct resource *r)
939 {
940 
941 	return (r->__r_i->r_rid);
942 }
943 
944 void
945 rman_set_device(struct resource *r, device_t dev)
946 {
947 
948 	r->__r_i->r_dev = dev;
949 }
950 
951 device_t
952 rman_get_device(struct resource *r)
953 {
954 
955 	return (r->__r_i->r_dev);
956 }
957 
958 int
959 rman_is_region_manager(struct resource *r, struct rman *rm)
960 {
961 
962 	return (r->__r_i->r_rm == rm);
963 }
964 
965 /*
966  * Sysctl interface for scanning the resource lists.
967  *
968  * We take two input parameters; the index into the list of resource
969  * managers, and the resource offset into the list.
970  */
971 static int
972 sysctl_rman(SYSCTL_HANDLER_ARGS)
973 {
974 	int			*name = (int *)arg1;
975 	u_int			namelen = arg2;
976 	int			rman_idx, res_idx;
977 	struct rman		*rm;
978 	struct resource_i	*res;
979 	struct resource_i	*sres;
980 	struct u_rman		urm;
981 	struct u_resource	ures;
982 	int			error;
983 
984 	if (namelen != 3)
985 		return (EINVAL);
986 
987 	if (bus_data_generation_check(name[0]))
988 		return (EINVAL);
989 	rman_idx = name[1];
990 	res_idx = name[2];
991 
992 	/*
993 	 * Find the indexed resource manager
994 	 */
995 	mtx_lock(&rman_mtx);
996 	TAILQ_FOREACH(rm, &rman_head, rm_link) {
997 		if (rman_idx-- == 0)
998 			break;
999 	}
1000 	mtx_unlock(&rman_mtx);
1001 	if (rm == NULL)
1002 		return (ENOENT);
1003 
1004 	/*
1005 	 * If the resource index is -1, we want details on the
1006 	 * resource manager.
1007 	 */
1008 	if (res_idx == -1) {
1009 		bzero(&urm, sizeof(urm));
1010 		urm.rm_handle = (uintptr_t)rm;
1011 		if (rm->rm_descr != NULL)
1012 			strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
1013 		urm.rm_start = rm->rm_start;
1014 		urm.rm_size = rm->rm_end - rm->rm_start + 1;
1015 		urm.rm_type = rm->rm_type;
1016 
1017 		error = SYSCTL_OUT(req, &urm, sizeof(urm));
1018 		return (error);
1019 	}
1020 
1021 	/*
1022 	 * Find the indexed resource and return it.
1023 	 */
1024 	mtx_lock(rm->rm_mtx);
1025 	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1026 		if (res->r_sharehead != NULL) {
1027 			LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1028 				if (res_idx-- == 0) {
1029 					res = sres;
1030 					goto found;
1031 				}
1032 		}
1033 		else if (res_idx-- == 0)
1034 				goto found;
1035 	}
1036 	mtx_unlock(rm->rm_mtx);
1037 	return (ENOENT);
1038 
1039 found:
1040 	bzero(&ures, sizeof(ures));
1041 	ures.r_handle = (uintptr_t)res;
1042 	ures.r_parent = (uintptr_t)res->r_rm;
1043 	ures.r_device = (uintptr_t)res->r_dev;
1044 	if (res->r_dev != NULL) {
1045 		if (device_get_name(res->r_dev) != NULL) {
1046 			snprintf(ures.r_devname, RM_TEXTLEN,
1047 			    "%s%d",
1048 			    device_get_name(res->r_dev),
1049 			    device_get_unit(res->r_dev));
1050 		} else {
1051 			strlcpy(ures.r_devname, "nomatch",
1052 			    RM_TEXTLEN);
1053 		}
1054 	} else {
1055 		ures.r_devname[0] = '\0';
1056 	}
1057 	ures.r_start = res->r_start;
1058 	ures.r_size = res->r_end - res->r_start + 1;
1059 	ures.r_flags = res->r_flags;
1060 
1061 	mtx_unlock(rm->rm_mtx);
1062 	error = SYSCTL_OUT(req, &ures, sizeof(ures));
1063 	return (error);
1064 }
1065 
1066 static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD | CTLFLAG_MPSAFE,
1067     sysctl_rman,
1068     "kernel resource manager");
1069 
1070 #ifdef DDB
1071 static void
1072 dump_rman_header(struct rman *rm)
1073 {
1074 
1075 	if (db_pager_quit)
1076 		return;
1077 	db_printf("rman %p: %s (0x%jx-0x%jx full range)\n",
1078 	    rm, rm->rm_descr, (rman_res_t)rm->rm_start, (rman_res_t)rm->rm_end);
1079 }
1080 
1081 static void
1082 dump_rman(struct rman *rm)
1083 {
1084 	struct resource_i *r;
1085 	const char *devname;
1086 
1087 	if (db_pager_quit)
1088 		return;
1089 	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1090 		if (r->r_dev != NULL) {
1091 			devname = device_get_nameunit(r->r_dev);
1092 			if (devname == NULL)
1093 				devname = "nomatch";
1094 		} else
1095 			devname = NULL;
1096 		db_printf("    0x%jx-0x%jx (RID=%d) ",
1097 		    r->r_start, r->r_end, r->r_rid);
1098 		if (devname != NULL)
1099 			db_printf("(%s)\n", devname);
1100 		else
1101 			db_printf("----\n");
1102 		if (db_pager_quit)
1103 			return;
1104 	}
1105 }
1106 
1107 DB_SHOW_COMMAND(rman, db_show_rman)
1108 {
1109 
1110 	if (have_addr) {
1111 		dump_rman_header((struct rman *)addr);
1112 		dump_rman((struct rman *)addr);
1113 	}
1114 }
1115 
1116 DB_SHOW_COMMAND_FLAGS(rmans, db_show_rmans, DB_CMD_MEMSAFE)
1117 {
1118 	struct rman *rm;
1119 
1120 	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1121 		dump_rman_header(rm);
1122 	}
1123 }
1124 
1125 DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1126 {
1127 	struct rman *rm;
1128 
1129 	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1130 		dump_rman_header(rm);
1131 		dump_rman(rm);
1132 	}
1133 }
1134 DB_SHOW_ALIAS_FLAGS(allrman, db_show_all_rman, DB_CMD_MEMSAFE);
1135 #endif
1136