xref: /freebsd/sys/kern/subr_rman.c (revision b30a80b6)
1 /*-
2  * Copyright 1998 Massachusetts Institute of Technology
3  *
4  * Permission to use, copy, modify, and distribute this software and
5  * its documentation for any purpose and without fee is hereby
6  * granted, provided that both the above copyright notice and this
7  * permission notice appear in all copies, that both the above
8  * copyright notice and this permission notice appear in all
9  * supporting documentation, and that the name of M.I.T. not be used
10  * in advertising or publicity pertaining to distribution of the
11  * software without specific, written prior permission.  M.I.T. makes
12  * no representations about the suitability of this software for any
13  * purpose.  It is provided "as is" without express or implied
14  * warranty.
15  *
16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * The kernel resource manager.  This code is responsible for keeping track
32  * of hardware resources which are apportioned out to various drivers.
33  * It does not actually assign those resources, and it is not expected
34  * that end-device drivers will call into this code directly.  Rather,
35  * the code which implements the buses that those devices are attached to,
36  * and the code which manages CPU resources, will call this code, and the
37  * end-device drivers will make upcalls to that code to actually perform
38  * the allocation.
39  *
40  * There are two sorts of resources managed by this code.  The first is
41  * the more familiar array (RMAN_ARRAY) type; resources in this class
42  * consist of a sequence of individually-allocatable objects which have
43  * been numbered in some well-defined order.  Most of the resources
44  * are of this type, as it is the most familiar.  The second type is
45  * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46  * resources in which each instance is indistinguishable from every
47  * other instance).  The principal anticipated application of gauges
48  * is in the context of power consumption, where a bus may have a specific
49  * power budget which all attached devices share.  RMAN_GAUGE is not
50  * implemented yet.
51  *
52  * For array resources, we make one simplifying assumption: two clients
53  * sharing the same resource must use the same range of indices.  That
54  * is to say, sharing of overlapping-but-not-identical regions is not
55  * permitted.
56  */
57 
58 #include "opt_ddb.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
64 #include <sys/lock.h>
65 #include <sys/malloc.h>
66 #include <sys/mutex.h>
67 #include <sys/bus.h>		/* XXX debugging */
68 #include <machine/bus.h>
69 #include <sys/rman.h>
70 #include <sys/sysctl.h>
71 
72 #ifdef DDB
73 #include <ddb/ddb.h>
74 #endif
75 
76 /*
77  * We use a linked list rather than a bitmap because we need to be able to
78  * represent potentially huge objects (like all of a processor's physical
79  * address space).
80  */
81 struct resource_i {
82 	struct resource		r_r;
83 	TAILQ_ENTRY(resource_i)	r_link;
84 	LIST_ENTRY(resource_i)	r_sharelink;
85 	LIST_HEAD(, resource_i)	*r_sharehead;
86 	rman_res_t	r_start;	/* index of the first entry in this resource */
87 	rman_res_t	r_end;		/* index of the last entry (inclusive) */
88 	u_int	r_flags;
89 	void	*r_virtual;	/* virtual address of this resource */
90 	void	*r_irq_cookie;	/* interrupt cookie for this (interrupt) resource */
91 	device_t r_dev;	/* device which has allocated this resource */
92 	struct rman *r_rm;	/* resource manager from whence this came */
93 	int	r_rid;		/* optional rid for this resource. */
94 	int	r_type;		/* optional type for this resource. */
95 };
96 
97 static int rman_debug = 0;
98 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RWTUN,
99     &rman_debug, 0, "rman debug");
100 
101 #define DPRINTF(params) if (rman_debug) printf params
102 
103 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
104 
105 struct rman_head rman_head;
106 static struct mtx rman_mtx; /* mutex to protect rman_head */
107 static int int_rman_release_resource(struct rman *rm, struct resource_i *r);
108 
109 static __inline struct resource_i *
int_alloc_resource(int malloc_flag)110 int_alloc_resource(int malloc_flag)
111 {
112 	struct resource_i *r;
113 
114 	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
115 	if (r != NULL) {
116 		r->r_r.__r_i = r;
117 	}
118 	return (r);
119 }
120 
121 int
rman_init(struct rman * rm)122 rman_init(struct rman *rm)
123 {
124 	static int once = 0;
125 
126 	if (once == 0) {
127 		once = 1;
128 		TAILQ_INIT(&rman_head);
129 		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
130 	}
131 
132 	if (rm->rm_start == 0 && rm->rm_end == 0)
133 		rm->rm_end = ~0;
134 	if (rm->rm_type == RMAN_UNINIT)
135 		panic("rman_init");
136 	if (rm->rm_type == RMAN_GAUGE)
137 		panic("implement RMAN_GAUGE");
138 
139 	TAILQ_INIT(&rm->rm_list);
140 	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
141 	if (rm->rm_mtx == NULL)
142 		return ENOMEM;
143 	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
144 
145 	mtx_lock(&rman_mtx);
146 	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
147 	mtx_unlock(&rman_mtx);
148 	return 0;
149 }
150 
151 int
rman_manage_region(struct rman * rm,rman_res_t start,rman_res_t end)152 rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end)
153 {
154 	struct resource_i *r, *s, *t;
155 	int rv = 0;
156 
157 	DPRINTF(("rman_manage_region: <%s> request: start %#jx, end %#jx\n",
158 	    rm->rm_descr, start, end));
159 	if (start < rm->rm_start || end > rm->rm_end)
160 		return EINVAL;
161 	r = int_alloc_resource(M_NOWAIT);
162 	if (r == NULL)
163 		return ENOMEM;
164 	r->r_start = start;
165 	r->r_end = end;
166 	r->r_rm = rm;
167 
168 	mtx_lock(rm->rm_mtx);
169 
170 	/* Skip entries before us. */
171 	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
172 		if (s->r_end == ~0)
173 			break;
174 		if (s->r_end + 1 >= r->r_start)
175 			break;
176 	}
177 
178 	/* If we ran off the end of the list, insert at the tail. */
179 	if (s == NULL) {
180 		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
181 	} else {
182 		/* Check for any overlap with the current region. */
183 		if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
184 			rv = EBUSY;
185 			goto out;
186 		}
187 
188 		/* Check for any overlap with the next region. */
189 		t = TAILQ_NEXT(s, r_link);
190 		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
191 			rv = EBUSY;
192 			goto out;
193 		}
194 
195 		/*
196 		 * See if this region can be merged with the next region.  If
197 		 * not, clear the pointer.
198 		 */
199 		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
200 			t = NULL;
201 
202 		/* See if we can merge with the current region. */
203 		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
204 			/* Can we merge all 3 regions? */
205 			if (t != NULL) {
206 				s->r_end = t->r_end;
207 				TAILQ_REMOVE(&rm->rm_list, t, r_link);
208 				free(r, M_RMAN);
209 				free(t, M_RMAN);
210 			} else {
211 				s->r_end = r->r_end;
212 				free(r, M_RMAN);
213 			}
214 		} else if (t != NULL) {
215 			/* Can we merge with just the next region? */
216 			t->r_start = r->r_start;
217 			free(r, M_RMAN);
218 		} else if (s->r_end < r->r_start) {
219 			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
220 		} else {
221 			TAILQ_INSERT_BEFORE(s, r, r_link);
222 		}
223 	}
224 out:
225 	mtx_unlock(rm->rm_mtx);
226 	return rv;
227 }
228 
229 int
rman_init_from_resource(struct rman * rm,struct resource * r)230 rman_init_from_resource(struct rman *rm, struct resource *r)
231 {
232 	int rv;
233 
234 	if ((rv = rman_init(rm)) != 0)
235 		return (rv);
236 	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
237 }
238 
239 int
rman_fini(struct rman * rm)240 rman_fini(struct rman *rm)
241 {
242 	struct resource_i *r;
243 
244 	mtx_lock(rm->rm_mtx);
245 	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
246 		if (r->r_flags & RF_ALLOCATED) {
247 			mtx_unlock(rm->rm_mtx);
248 			return EBUSY;
249 		}
250 	}
251 
252 	/*
253 	 * There really should only be one of these if we are in this
254 	 * state and the code is working properly, but it can't hurt.
255 	 */
256 	while (!TAILQ_EMPTY(&rm->rm_list)) {
257 		r = TAILQ_FIRST(&rm->rm_list);
258 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
259 		free(r, M_RMAN);
260 	}
261 	mtx_unlock(rm->rm_mtx);
262 	mtx_lock(&rman_mtx);
263 	TAILQ_REMOVE(&rman_head, rm, rm_link);
264 	mtx_unlock(&rman_mtx);
265 	mtx_destroy(rm->rm_mtx);
266 	free(rm->rm_mtx, M_RMAN);
267 
268 	return 0;
269 }
270 
271 int
rman_first_free_region(struct rman * rm,rman_res_t * start,rman_res_t * end)272 rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
273 {
274 	struct resource_i *r;
275 
276 	mtx_lock(rm->rm_mtx);
277 	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
278 		if (!(r->r_flags & RF_ALLOCATED)) {
279 			*start = r->r_start;
280 			*end = r->r_end;
281 			mtx_unlock(rm->rm_mtx);
282 			return (0);
283 		}
284 	}
285 	mtx_unlock(rm->rm_mtx);
286 	return (ENOENT);
287 }
288 
289 int
rman_last_free_region(struct rman * rm,rman_res_t * start,rman_res_t * end)290 rman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
291 {
292 	struct resource_i *r;
293 
294 	mtx_lock(rm->rm_mtx);
295 	TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
296 		if (!(r->r_flags & RF_ALLOCATED)) {
297 			*start = r->r_start;
298 			*end = r->r_end;
299 			mtx_unlock(rm->rm_mtx);
300 			return (0);
301 		}
302 	}
303 	mtx_unlock(rm->rm_mtx);
304 	return (ENOENT);
305 }
306 
307 /* Shrink or extend one or both ends of an allocated resource. */
308 int
rman_adjust_resource(struct resource * rr,rman_res_t start,rman_res_t end)309 rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
310 {
311 	struct resource_i *r, *s, *t, *new;
312 	struct rman *rm;
313 
314 	/* Not supported for shared resources. */
315 	r = rr->__r_i;
316 	if (r->r_flags & RF_SHAREABLE)
317 		return (EINVAL);
318 
319 	/*
320 	 * This does not support wholesale moving of a resource.  At
321 	 * least part of the desired new range must overlap with the
322 	 * existing resource.
323 	 */
324 	if (end < r->r_start || r->r_end < start)
325 		return (EINVAL);
326 
327 	/*
328 	 * Find the two resource regions immediately adjacent to the
329 	 * allocated resource.
330 	 */
331 	rm = r->r_rm;
332 	mtx_lock(rm->rm_mtx);
333 #ifdef INVARIANTS
334 	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
335 		if (s == r)
336 			break;
337 	}
338 	if (s == NULL)
339 		panic("resource not in list");
340 #endif
341 	s = TAILQ_PREV(r, resource_head, r_link);
342 	t = TAILQ_NEXT(r, r_link);
343 	KASSERT(s == NULL || s->r_end + 1 == r->r_start,
344 	    ("prev resource mismatch"));
345 	KASSERT(t == NULL || r->r_end + 1 == t->r_start,
346 	    ("next resource mismatch"));
347 
348 	/*
349 	 * See if the changes are permitted.  Shrinking is always allowed,
350 	 * but growing requires sufficient room in the adjacent region.
351 	 */
352 	if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
353 	    s->r_start > start)) {
354 		mtx_unlock(rm->rm_mtx);
355 		return (EBUSY);
356 	}
357 	if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
358 	    t->r_end < end)) {
359 		mtx_unlock(rm->rm_mtx);
360 		return (EBUSY);
361 	}
362 
363 	/*
364 	 * While holding the lock, grow either end of the resource as
365 	 * needed and shrink either end if the shrinking does not require
366 	 * allocating a new resource.  We can safely drop the lock and then
367 	 * insert a new range to handle the shrinking case afterwards.
368 	 */
369 	if (start < r->r_start ||
370 	    (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
371 		KASSERT(s->r_flags == 0, ("prev is busy"));
372 		r->r_start = start;
373 		if (s->r_start == start) {
374 			TAILQ_REMOVE(&rm->rm_list, s, r_link);
375 			free(s, M_RMAN);
376 		} else
377 			s->r_end = start - 1;
378 	}
379 	if (end > r->r_end ||
380 	    (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
381 		KASSERT(t->r_flags == 0, ("next is busy"));
382 		r->r_end = end;
383 		if (t->r_end == end) {
384 			TAILQ_REMOVE(&rm->rm_list, t, r_link);
385 			free(t, M_RMAN);
386 		} else
387 			t->r_start = end + 1;
388 	}
389 	mtx_unlock(rm->rm_mtx);
390 
391 	/*
392 	 * Handle the shrinking cases that require allocating a new
393 	 * resource to hold the newly-free region.  We have to recheck
394 	 * if we still need this new region after acquiring the lock.
395 	 */
396 	if (start > r->r_start) {
397 		new = int_alloc_resource(M_WAITOK);
398 		new->r_start = r->r_start;
399 		new->r_end = start - 1;
400 		new->r_rm = rm;
401 		mtx_lock(rm->rm_mtx);
402 		r->r_start = start;
403 		s = TAILQ_PREV(r, resource_head, r_link);
404 		if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
405 			s->r_end = start - 1;
406 			free(new, M_RMAN);
407 		} else
408 			TAILQ_INSERT_BEFORE(r, new, r_link);
409 		mtx_unlock(rm->rm_mtx);
410 	}
411 	if (end < r->r_end) {
412 		new = int_alloc_resource(M_WAITOK);
413 		new->r_start = end + 1;
414 		new->r_end = r->r_end;
415 		new->r_rm = rm;
416 		mtx_lock(rm->rm_mtx);
417 		r->r_end = end;
418 		t = TAILQ_NEXT(r, r_link);
419 		if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
420 			t->r_start = end + 1;
421 			free(new, M_RMAN);
422 		} else
423 			TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
424 		mtx_unlock(rm->rm_mtx);
425 	}
426 	return (0);
427 }
428 
429 #define	SHARE_TYPE(f)	(f & (RF_SHAREABLE | RF_PREFETCHABLE))
430 
431 struct resource *
rman_reserve_resource_bound(struct rman * rm,rman_res_t start,rman_res_t end,rman_res_t count,rman_res_t bound,u_int flags,device_t dev)432 rman_reserve_resource_bound(struct rman *rm, rman_res_t start, rman_res_t end,
433 			    rman_res_t count, rman_res_t bound, u_int flags,
434 			    device_t dev)
435 {
436 	u_int new_rflags;
437 	struct resource_i *r, *s, *rv;
438 	rman_res_t rstart, rend, amask, bmask;
439 
440 	rv = NULL;
441 
442 	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#jx, %#jx], "
443 	       "length %#jx, flags %x, device %s\n", rm->rm_descr, start, end,
444 	       count, flags,
445 	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
446 	KASSERT(count != 0, ("%s: attempted to allocate an empty range",
447 	    __func__));
448 	KASSERT((flags & RF_FIRSTSHARE) == 0,
449 	    ("invalid flags %#x", flags));
450 	new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED;
451 
452 	mtx_lock(rm->rm_mtx);
453 
454 	r = TAILQ_FIRST(&rm->rm_list);
455 	if (r == NULL) {
456 	    DPRINTF(("NULL list head\n"));
457 	} else {
458 	    DPRINTF(("rman_reserve_resource_bound: trying %#jx <%#jx,%#jx>\n",
459 		    r->r_end, start, count-1));
460 	}
461 	for (r = TAILQ_FIRST(&rm->rm_list);
462 	     r && r->r_end < start + count - 1;
463 	     r = TAILQ_NEXT(r, r_link)) {
464 		;
465 		DPRINTF(("rman_reserve_resource_bound: tried %#jx <%#jx,%#jx>\n",
466 			r->r_end, start, count-1));
467 	}
468 
469 	if (r == NULL) {
470 		DPRINTF(("could not find a region\n"));
471 		goto out;
472 	}
473 
474 	amask = (1ull << RF_ALIGNMENT(flags)) - 1;
475 	KASSERT(start <= RM_MAX_END - amask,
476 	    ("start (%#jx) + amask (%#jx) would wrap around", start, amask));
477 
478 	/* If bound is 0, bmask will also be 0 */
479 	bmask = ~(bound - 1);
480 	/*
481 	 * First try to find an acceptable totally-unshared region.
482 	 */
483 	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
484 		DPRINTF(("considering [%#jx, %#jx]\n", s->r_start, s->r_end));
485 		/*
486 		 * The resource list is sorted, so there is no point in
487 		 * searching further once r_start is too large.
488 		 */
489 		if (s->r_start > end - (count - 1)) {
490 			DPRINTF(("s->r_start (%#jx) + count - 1> end (%#jx)\n",
491 			    s->r_start, end));
492 			break;
493 		}
494 		if (s->r_start > RM_MAX_END - amask) {
495 			DPRINTF(("s->r_start (%#jx) + amask (%#jx) too large\n",
496 			    s->r_start, amask));
497 			break;
498 		}
499 		if (s->r_flags & RF_ALLOCATED) {
500 			DPRINTF(("region is allocated\n"));
501 			continue;
502 		}
503 		rstart = ummax(s->r_start, start);
504 		/*
505 		 * Try to find a region by adjusting to boundary and alignment
506 		 * until both conditions are satisfied. This is not an optimal
507 		 * algorithm, but in most cases it isn't really bad, either.
508 		 */
509 		do {
510 			rstart = (rstart + amask) & ~amask;
511 			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
512 				rstart += bound - (rstart & ~bmask);
513 		} while ((rstart & amask) != 0 && rstart < end &&
514 		    rstart < s->r_end);
515 		rend = ummin(s->r_end, ummax(rstart + count - 1, end));
516 		if (rstart > rend) {
517 			DPRINTF(("adjusted start exceeds end\n"));
518 			continue;
519 		}
520 		DPRINTF(("truncated region: [%#jx, %#jx]; size %#jx (requested %#jx)\n",
521 		       rstart, rend, (rend - rstart + 1), count));
522 
523 		if ((rend - rstart) >= (count - 1)) {
524 			DPRINTF(("candidate region: [%#jx, %#jx], size %#jx\n",
525 			       rstart, rend, (rend - rstart + 1)));
526 			if ((s->r_end - s->r_start + 1) == count) {
527 				DPRINTF(("candidate region is entire chunk\n"));
528 				rv = s;
529 				rv->r_flags = new_rflags;
530 				rv->r_dev = dev;
531 				goto out;
532 			}
533 
534 			/*
535 			 * If s->r_start < rstart and
536 			 *    s->r_end > rstart + count - 1, then
537 			 * we need to split the region into three pieces
538 			 * (the middle one will get returned to the user).
539 			 * Otherwise, we are allocating at either the
540 			 * beginning or the end of s, so we only need to
541 			 * split it in two.  The first case requires
542 			 * two new allocations; the second requires but one.
543 			 */
544 			rv = int_alloc_resource(M_NOWAIT);
545 			if (rv == NULL)
546 				goto out;
547 			rv->r_start = rstart;
548 			rv->r_end = rstart + count - 1;
549 			rv->r_flags = new_rflags;
550 			rv->r_dev = dev;
551 			rv->r_rm = rm;
552 
553 			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
554 				DPRINTF(("splitting region in three parts: "
555 				       "[%#jx, %#jx]; [%#jx, %#jx]; [%#jx, %#jx]\n",
556 				       s->r_start, rv->r_start - 1,
557 				       rv->r_start, rv->r_end,
558 				       rv->r_end + 1, s->r_end));
559 				/*
560 				 * We are allocating in the middle.
561 				 */
562 				r = int_alloc_resource(M_NOWAIT);
563 				if (r == NULL) {
564 					free(rv, M_RMAN);
565 					rv = NULL;
566 					goto out;
567 				}
568 				r->r_start = rv->r_end + 1;
569 				r->r_end = s->r_end;
570 				r->r_flags = s->r_flags;
571 				r->r_rm = rm;
572 				s->r_end = rv->r_start - 1;
573 				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
574 						     r_link);
575 				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
576 						     r_link);
577 			} else if (s->r_start == rv->r_start) {
578 				DPRINTF(("allocating from the beginning\n"));
579 				/*
580 				 * We are allocating at the beginning.
581 				 */
582 				s->r_start = rv->r_end + 1;
583 				TAILQ_INSERT_BEFORE(s, rv, r_link);
584 			} else {
585 				DPRINTF(("allocating at the end\n"));
586 				/*
587 				 * We are allocating at the end.
588 				 */
589 				s->r_end = rv->r_start - 1;
590 				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
591 						     r_link);
592 			}
593 			goto out;
594 		}
595 	}
596 
597 	/*
598 	 * Now find an acceptable shared region, if the client's requirements
599 	 * allow sharing.  By our implementation restriction, a candidate
600 	 * region must match exactly by both size and sharing type in order
601 	 * to be considered compatible with the client's request.  (The
602 	 * former restriction could probably be lifted without too much
603 	 * additional work, but this does not seem warranted.)
604 	 */
605 	DPRINTF(("no unshared regions found\n"));
606 	if ((flags & RF_SHAREABLE) == 0)
607 		goto out;
608 
609 	for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
610 		if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
611 		    s->r_start >= start &&
612 		    (s->r_end - s->r_start + 1) == count &&
613 		    (s->r_start & amask) == 0 &&
614 		    ((s->r_start ^ s->r_end) & bmask) == 0) {
615 			rv = int_alloc_resource(M_NOWAIT);
616 			if (rv == NULL)
617 				goto out;
618 			rv->r_start = s->r_start;
619 			rv->r_end = s->r_end;
620 			rv->r_flags = new_rflags;
621 			rv->r_dev = dev;
622 			rv->r_rm = rm;
623 			if (s->r_sharehead == NULL) {
624 				s->r_sharehead = malloc(sizeof *s->r_sharehead,
625 						M_RMAN, M_NOWAIT | M_ZERO);
626 				if (s->r_sharehead == NULL) {
627 					free(rv, M_RMAN);
628 					rv = NULL;
629 					goto out;
630 				}
631 				LIST_INIT(s->r_sharehead);
632 				LIST_INSERT_HEAD(s->r_sharehead, s,
633 						 r_sharelink);
634 				s->r_flags |= RF_FIRSTSHARE;
635 			}
636 			rv->r_sharehead = s->r_sharehead;
637 			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
638 			goto out;
639 		}
640 	}
641 	/*
642 	 * We couldn't find anything.
643 	 */
644 
645 out:
646 	mtx_unlock(rm->rm_mtx);
647 	return (rv == NULL ? NULL : &rv->r_r);
648 }
649 
650 struct resource *
rman_reserve_resource(struct rman * rm,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags,device_t dev)651 rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end,
652 		      rman_res_t count, u_int flags, device_t dev)
653 {
654 
655 	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
656 	    dev));
657 }
658 
659 int
rman_activate_resource(struct resource * re)660 rman_activate_resource(struct resource *re)
661 {
662 	struct resource_i *r;
663 	struct rman *rm;
664 
665 	r = re->__r_i;
666 	rm = r->r_rm;
667 	mtx_lock(rm->rm_mtx);
668 	r->r_flags |= RF_ACTIVE;
669 	mtx_unlock(rm->rm_mtx);
670 	return 0;
671 }
672 
673 int
rman_deactivate_resource(struct resource * r)674 rman_deactivate_resource(struct resource *r)
675 {
676 	struct rman *rm;
677 
678 	rm = r->__r_i->r_rm;
679 	mtx_lock(rm->rm_mtx);
680 	r->__r_i->r_flags &= ~RF_ACTIVE;
681 	mtx_unlock(rm->rm_mtx);
682 	return 0;
683 }
684 
685 static int
int_rman_release_resource(struct rman * rm,struct resource_i * r)686 int_rman_release_resource(struct rman *rm, struct resource_i *r)
687 {
688 	struct resource_i *s, *t;
689 
690 	if (r->r_flags & RF_ACTIVE)
691 		r->r_flags &= ~RF_ACTIVE;
692 
693 	/*
694 	 * Check for a sharing list first.  If there is one, then we don't
695 	 * have to think as hard.
696 	 */
697 	if (r->r_sharehead) {
698 		/*
699 		 * If a sharing list exists, then we know there are at
700 		 * least two sharers.
701 		 *
702 		 * If we are in the main circleq, appoint someone else.
703 		 */
704 		LIST_REMOVE(r, r_sharelink);
705 		s = LIST_FIRST(r->r_sharehead);
706 		if (r->r_flags & RF_FIRSTSHARE) {
707 			s->r_flags |= RF_FIRSTSHARE;
708 			TAILQ_INSERT_BEFORE(r, s, r_link);
709 			TAILQ_REMOVE(&rm->rm_list, r, r_link);
710 		}
711 
712 		/*
713 		 * Make sure that the sharing list goes away completely
714 		 * if the resource is no longer being shared at all.
715 		 */
716 		if (LIST_NEXT(s, r_sharelink) == NULL) {
717 			free(s->r_sharehead, M_RMAN);
718 			s->r_sharehead = NULL;
719 			s->r_flags &= ~RF_FIRSTSHARE;
720 		}
721 		goto out;
722 	}
723 
724 	/*
725 	 * Look at the adjacent resources in the list and see if our
726 	 * segment can be merged with any of them.  If either of the
727 	 * resources is allocated or is not exactly adjacent then they
728 	 * cannot be merged with our segment.
729 	 */
730 	s = TAILQ_PREV(r, resource_head, r_link);
731 	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
732 	    s->r_end + 1 != r->r_start))
733 		s = NULL;
734 	t = TAILQ_NEXT(r, r_link);
735 	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
736 	    r->r_end + 1 != t->r_start))
737 		t = NULL;
738 
739 	if (s != NULL && t != NULL) {
740 		/*
741 		 * Merge all three segments.
742 		 */
743 		s->r_end = t->r_end;
744 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
745 		TAILQ_REMOVE(&rm->rm_list, t, r_link);
746 		free(t, M_RMAN);
747 	} else if (s != NULL) {
748 		/*
749 		 * Merge previous segment with ours.
750 		 */
751 		s->r_end = r->r_end;
752 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
753 	} else if (t != NULL) {
754 		/*
755 		 * Merge next segment with ours.
756 		 */
757 		t->r_start = r->r_start;
758 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
759 	} else {
760 		/*
761 		 * At this point, we know there is nothing we
762 		 * can potentially merge with, because on each
763 		 * side, there is either nothing there or what is
764 		 * there is still allocated.  In that case, we don't
765 		 * want to remove r from the list; we simply want to
766 		 * change it to an unallocated region and return
767 		 * without freeing anything.
768 		 */
769 		r->r_flags &= ~RF_ALLOCATED;
770 		r->r_dev = NULL;
771 		return 0;
772 	}
773 
774 out:
775 	free(r, M_RMAN);
776 	return 0;
777 }
778 
779 int
rman_release_resource(struct resource * re)780 rman_release_resource(struct resource *re)
781 {
782 	int rv;
783 	struct resource_i *r;
784 	struct rman *rm;
785 
786 	r = re->__r_i;
787 	rm = r->r_rm;
788 	mtx_lock(rm->rm_mtx);
789 	rv = int_rman_release_resource(rm, r);
790 	mtx_unlock(rm->rm_mtx);
791 	return (rv);
792 }
793 
794 uint32_t
rman_make_alignment_flags(uint32_t size)795 rman_make_alignment_flags(uint32_t size)
796 {
797 	int i;
798 
799 	/*
800 	 * Find the hightest bit set, and add one if more than one bit
801 	 * set.  We're effectively computing the ceil(log2(size)) here.
802 	 */
803 	for (i = 31; i > 0; i--)
804 		if ((1 << i) & size)
805 			break;
806 	if (~(1 << i) & size)
807 		i++;
808 
809 	return(RF_ALIGNMENT_LOG2(i));
810 }
811 
812 rman_res_t
rman_get_start(struct resource * r)813 rman_get_start(struct resource *r)
814 {
815 
816 	return (r->__r_i->r_start);
817 }
818 
819 rman_res_t
rman_get_end(struct resource * r)820 rman_get_end(struct resource *r)
821 {
822 
823 	return (r->__r_i->r_end);
824 }
825 
826 rman_res_t
rman_get_size(struct resource * r)827 rman_get_size(struct resource *r)
828 {
829 
830 	return (r->__r_i->r_end - r->__r_i->r_start + 1);
831 }
832 
833 u_int
rman_get_flags(struct resource * r)834 rman_get_flags(struct resource *r)
835 {
836 
837 	return (r->__r_i->r_flags);
838 }
839 
840 void
rman_set_virtual(struct resource * r,void * v)841 rman_set_virtual(struct resource *r, void *v)
842 {
843 
844 	r->__r_i->r_virtual = v;
845 }
846 
847 void *
rman_get_virtual(struct resource * r)848 rman_get_virtual(struct resource *r)
849 {
850 
851 	return (r->__r_i->r_virtual);
852 }
853 
854 void
rman_set_irq_cookie(struct resource * r,void * c)855 rman_set_irq_cookie(struct resource *r, void *c)
856 {
857 
858 	r->__r_i->r_irq_cookie = c;
859 }
860 
861 void *
rman_get_irq_cookie(struct resource * r)862 rman_get_irq_cookie(struct resource *r)
863 {
864 
865 	return (r->__r_i->r_irq_cookie);
866 }
867 
868 void
rman_set_bustag(struct resource * r,bus_space_tag_t t)869 rman_set_bustag(struct resource *r, bus_space_tag_t t)
870 {
871 
872 	r->r_bustag = t;
873 }
874 
875 bus_space_tag_t
rman_get_bustag(struct resource * r)876 rman_get_bustag(struct resource *r)
877 {
878 
879 	return (r->r_bustag);
880 }
881 
882 void
rman_set_bushandle(struct resource * r,bus_space_handle_t h)883 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
884 {
885 
886 	r->r_bushandle = h;
887 }
888 
889 bus_space_handle_t
rman_get_bushandle(struct resource * r)890 rman_get_bushandle(struct resource *r)
891 {
892 
893 	return (r->r_bushandle);
894 }
895 
896 void
rman_set_mapping(struct resource * r,struct resource_map * map)897 rman_set_mapping(struct resource *r, struct resource_map *map)
898 {
899 
900 	KASSERT(rman_get_size(r) == map->r_size,
901 	    ("rman_set_mapping: size mismatch"));
902 	rman_set_bustag(r, map->r_bustag);
903 	rman_set_bushandle(r, map->r_bushandle);
904 	rman_set_virtual(r, map->r_vaddr);
905 }
906 
907 void
rman_get_mapping(struct resource * r,struct resource_map * map)908 rman_get_mapping(struct resource *r, struct resource_map *map)
909 {
910 
911 	map->r_bustag = rman_get_bustag(r);
912 	map->r_bushandle = rman_get_bushandle(r);
913 	map->r_size = rman_get_size(r);
914 	map->r_vaddr = rman_get_virtual(r);
915 }
916 
917 void
rman_set_rid(struct resource * r,int rid)918 rman_set_rid(struct resource *r, int rid)
919 {
920 
921 	r->__r_i->r_rid = rid;
922 }
923 
924 int
rman_get_rid(struct resource * r)925 rman_get_rid(struct resource *r)
926 {
927 
928 	return (r->__r_i->r_rid);
929 }
930 
931 void
rman_set_type(struct resource * r,int type)932 rman_set_type(struct resource *r, int type)
933 {
934 	r->__r_i->r_type = type;
935 }
936 
937 int
rman_get_type(struct resource * r)938 rman_get_type(struct resource *r)
939 {
940 	return (r->__r_i->r_type);
941 }
942 
943 void
rman_set_device(struct resource * r,device_t dev)944 rman_set_device(struct resource *r, device_t dev)
945 {
946 
947 	r->__r_i->r_dev = dev;
948 }
949 
950 device_t
rman_get_device(struct resource * r)951 rman_get_device(struct resource *r)
952 {
953 
954 	return (r->__r_i->r_dev);
955 }
956 
957 int
rman_is_region_manager(struct resource * r,struct rman * rm)958 rman_is_region_manager(struct resource *r, struct rman *rm)
959 {
960 
961 	return (r->__r_i->r_rm == rm);
962 }
963 
964 /*
965  * Sysctl interface for scanning the resource lists.
966  *
967  * We take two input parameters; the index into the list of resource
968  * managers, and the resource offset into the list.
969  */
970 static int
sysctl_rman(SYSCTL_HANDLER_ARGS)971 sysctl_rman(SYSCTL_HANDLER_ARGS)
972 {
973 	int			*name = (int *)arg1;
974 	u_int			namelen = arg2;
975 	int			rman_idx, res_idx;
976 	struct rman		*rm;
977 	struct resource_i	*res;
978 	struct resource_i	*sres;
979 	struct u_rman		urm;
980 	struct u_resource	ures;
981 	int			error;
982 
983 	if (namelen != 3)
984 		return (EINVAL);
985 
986 	if (bus_data_generation_check(name[0]))
987 		return (EINVAL);
988 	rman_idx = name[1];
989 	res_idx = name[2];
990 
991 	/*
992 	 * Find the indexed resource manager
993 	 */
994 	mtx_lock(&rman_mtx);
995 	TAILQ_FOREACH(rm, &rman_head, rm_link) {
996 		if (rman_idx-- == 0)
997 			break;
998 	}
999 	mtx_unlock(&rman_mtx);
1000 	if (rm == NULL)
1001 		return (ENOENT);
1002 
1003 	/*
1004 	 * If the resource index is -1, we want details on the
1005 	 * resource manager.
1006 	 */
1007 	if (res_idx == -1) {
1008 		bzero(&urm, sizeof(urm));
1009 		urm.rm_handle = (uintptr_t)rm;
1010 		if (rm->rm_descr != NULL)
1011 			strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
1012 		urm.rm_start = rm->rm_start;
1013 		urm.rm_size = rm->rm_end - rm->rm_start + 1;
1014 		urm.rm_type = rm->rm_type;
1015 
1016 		error = SYSCTL_OUT(req, &urm, sizeof(urm));
1017 		return (error);
1018 	}
1019 
1020 	/*
1021 	 * Find the indexed resource and return it.
1022 	 */
1023 	mtx_lock(rm->rm_mtx);
1024 	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1025 		if (res->r_sharehead != NULL) {
1026 			LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1027 				if (res_idx-- == 0) {
1028 					res = sres;
1029 					goto found;
1030 				}
1031 		}
1032 		else if (res_idx-- == 0)
1033 				goto found;
1034 	}
1035 	mtx_unlock(rm->rm_mtx);
1036 	return (ENOENT);
1037 
1038 found:
1039 	bzero(&ures, sizeof(ures));
1040 	ures.r_handle = (uintptr_t)res;
1041 	ures.r_parent = (uintptr_t)res->r_rm;
1042 	ures.r_device = (uintptr_t)res->r_dev;
1043 	if (res->r_dev != NULL) {
1044 		if (device_get_name(res->r_dev) != NULL) {
1045 			snprintf(ures.r_devname, RM_TEXTLEN,
1046 			    "%s%d",
1047 			    device_get_name(res->r_dev),
1048 			    device_get_unit(res->r_dev));
1049 		} else {
1050 			strlcpy(ures.r_devname, "nomatch",
1051 			    RM_TEXTLEN);
1052 		}
1053 	} else {
1054 		ures.r_devname[0] = '\0';
1055 	}
1056 	ures.r_start = res->r_start;
1057 	ures.r_size = res->r_end - res->r_start + 1;
1058 	ures.r_flags = res->r_flags;
1059 
1060 	mtx_unlock(rm->rm_mtx);
1061 	error = SYSCTL_OUT(req, &ures, sizeof(ures));
1062 	return (error);
1063 }
1064 
1065 static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD | CTLFLAG_MPSAFE,
1066     sysctl_rman,
1067     "kernel resource manager");
1068 
1069 #ifdef DDB
1070 static void
dump_rman_header(struct rman * rm)1071 dump_rman_header(struct rman *rm)
1072 {
1073 
1074 	if (db_pager_quit)
1075 		return;
1076 	db_printf("rman %p: %s (0x%jx-0x%jx full range)\n",
1077 	    rm, rm->rm_descr, (rman_res_t)rm->rm_start, (rman_res_t)rm->rm_end);
1078 }
1079 
1080 static void
dump_rman(struct rman * rm)1081 dump_rman(struct rman *rm)
1082 {
1083 	struct resource_i *r;
1084 	const char *devname;
1085 
1086 	if (db_pager_quit)
1087 		return;
1088 	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1089 		if (r->r_dev != NULL) {
1090 			devname = device_get_nameunit(r->r_dev);
1091 			if (devname == NULL)
1092 				devname = "nomatch";
1093 		} else
1094 			devname = NULL;
1095 		db_printf("    0x%jx-0x%jx (RID=%d) ",
1096 		    r->r_start, r->r_end, r->r_rid);
1097 		if (devname != NULL)
1098 			db_printf("(%s)\n", devname);
1099 		else
1100 			db_printf("----\n");
1101 		if (db_pager_quit)
1102 			return;
1103 	}
1104 }
1105 
DB_SHOW_COMMAND(rman,db_show_rman)1106 DB_SHOW_COMMAND(rman, db_show_rman)
1107 {
1108 
1109 	if (have_addr) {
1110 		dump_rman_header((struct rman *)addr);
1111 		dump_rman((struct rman *)addr);
1112 	}
1113 }
1114 
DB_SHOW_COMMAND_FLAGS(rmans,db_show_rmans,DB_CMD_MEMSAFE)1115 DB_SHOW_COMMAND_FLAGS(rmans, db_show_rmans, DB_CMD_MEMSAFE)
1116 {
1117 	struct rman *rm;
1118 
1119 	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1120 		dump_rman_header(rm);
1121 	}
1122 }
1123 
DB_SHOW_ALL_COMMAND(rman,db_show_all_rman)1124 DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1125 {
1126 	struct rman *rm;
1127 
1128 	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1129 		dump_rman_header(rm);
1130 		dump_rman(rm);
1131 	}
1132 }
1133 DB_SHOW_ALIAS_FLAGS(allrman, db_show_all_rman, DB_CMD_MEMSAFE);
1134 #endif
1135