xref: /dragonfly/sys/kern/subr_rman.c (revision 32efd857)
1 /*
2  * Copyright 1998 Massachusetts Institute of Technology
3  *
4  * Permission to use, copy, modify, and distribute this software and
5  * its documentation for any purpose and without fee is hereby
6  * granted, provided that both the above copyright notice and this
7  * permission notice appear in all copies, that both the above
8  * copyright notice and this permission notice appear in all
9  * supporting documentation, and that the name of M.I.T. not be used
10  * in advertising or publicity pertaining to distribution of the
11  * software without specific, written prior permission.  M.I.T. makes
12  * no representations about the suitability of this software for any
13  * purpose.  It is provided "as is" without express or implied
14  * warranty.
15  *
16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/kern/subr_rman.c,v 1.10.2.1 2001/06/05 08:06:08 imp Exp $
30  */
31 
32 /*
33  * The kernel resource manager.  This code is responsible for keeping track
34  * of hardware resources which are apportioned out to various drivers.
35  * It does not actually assign those resources, and it is not expected
36  * that end-device drivers will call into this code directly.  Rather,
37  * the code which implements the buses that those devices are attached to,
38  * and the code which manages CPU resources, will call this code, and the
39  * end-device drivers will make upcalls to that code to actually perform
40  * the allocation.
41  *
42  * There are two sorts of resources managed by this code.  The first is
43  * the more familiar array (RMAN_ARRAY) type; resources in this class
44  * consist of a sequence of individually-allocatable objects which have
45  * been numbered in some well-defined order.  Most of the resources
46  * are of this type, as it is the most familiar.  The second type is
47  * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
48  * resources in which each instance is indistinguishable from every
49  * other instance).  The principal anticipated application of gauges
50  * is in the context of power consumption, where a bus may have a specific
51  * power budget which all attached devices share.  RMAN_GAUGE is not
52  * implemented yet.
53  *
54  * For array resources, we make one simplifying assumption: two clients
55  * sharing the same resource must use the same range of indices.  That
56  * is to say, sharing of overlapping-but-not-identical regions is not
57  * permitted.
58  */
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/lock.h>
64 #include <sys/malloc.h>
65 #include <sys/bus.h>		/* XXX debugging */
66 #include <sys/rman.h>
67 #include <sys/sysctl.h>
68 
69 static int rman_debug = 0;
70 TUNABLE_INT("debug.rman_debug", &rman_debug);
71 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
72     &rman_debug, 0, "rman debug");
73 
74 #define DPRINTF(params) if (rman_debug) kprintf params
75 
76 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
77 
78 TAILQ_HEAD(rman_head, rman);
79 static struct rman_head rman_head;
80 static struct lwkt_token rman_tok;	/* mutex to protect rman_head */
81 static int int_rman_activate_resource(struct rman *rm, struct resource *r,
82 				       struct resource **whohas);
83 static int int_rman_deactivate_resource(struct resource *r);
84 static int int_rman_release_resource(struct rman *rm, struct resource *r);
85 
86 int
87 rman_init(struct rman *rm, int cpuid)
88 {
89 	static int once;
90 
91 	if (once == 0) {
92 		once = 1;
93 		TAILQ_INIT(&rman_head);
94 		lwkt_token_init(&rman_tok, "rman");
95 	}
96 
97 	if (rm->rm_type == RMAN_UNINIT)
98 		panic("rman_init");
99 	if (rm->rm_type == RMAN_GAUGE)
100 		panic("implement RMAN_GAUGE");
101 
102 	TAILQ_INIT(&rm->rm_list);
103 	rm->rm_slock = kmalloc(sizeof *rm->rm_slock, M_RMAN, M_NOWAIT);
104 	if (rm->rm_slock == NULL)
105 		return ENOMEM;
106 	lwkt_token_init(rm->rm_slock, "rmanslock");
107 
108 	rm->rm_cpuid = cpuid;
109 	rm->rm_hold = 0;
110 
111 	lwkt_gettoken(&rman_tok);
112 	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
113 	lwkt_reltoken(&rman_tok);
114 
115 	return 0;
116 }
117 
118 /*
119  * NB: this interface is not robust against programming errors which
120  * add multiple copies of the same region.
121  */
122 int
123 rman_manage_region(struct rman *rm, u_long start, u_long end)
124 {
125 	struct resource *r, *s;
126 
127 	DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
128 	    rm->rm_descr, start, end));
129 	r = kmalloc(sizeof *r, M_RMAN, M_NOWAIT | M_ZERO);
130 	if (r == NULL)
131 		return ENOMEM;
132 	r->r_sharehead = 0;
133 	r->r_start = start;
134 	r->r_end = end;
135 	r->r_flags = 0;
136 	r->r_dev = 0;
137 	r->r_rm = rm;
138 
139 	lwkt_gettoken(rm->rm_slock);
140 	for (s = TAILQ_FIRST(&rm->rm_list);
141 	     s && s->r_end < r->r_start;
142 	     s = TAILQ_NEXT(s, r_link))
143 		;
144 
145 	if (s == NULL)
146 		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
147 	else
148 		TAILQ_INSERT_BEFORE(s, r, r_link);
149 
150 	lwkt_reltoken(rm->rm_slock);
151 	return 0;
152 }
153 
154 int
155 rman_fini(struct rman *rm)
156 {
157 	struct resource *r;
158 
159 	/*
160 	 * All resources must already have been deallocated.
161 	 */
162 	lwkt_gettoken(rm->rm_slock);
163 	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
164 		if (r->r_flags & RF_ALLOCATED) {
165 			lwkt_reltoken(rm->rm_slock);
166 			return EBUSY;
167 		}
168 	}
169 
170 	/*
171 	 * Protected list removal.  Once removed, wait for any temporary
172 	 * holds to be dropped before actually destroying the resource.
173 	 */
174 	lwkt_gettoken(&rman_tok);
175 	TAILQ_REMOVE(&rman_head, rm, rm_link);
176 	lwkt_reltoken(&rman_tok);
177 
178 	if (rm->rm_hold) {
179 		kprintf("debug: rman_fini(): rm_hold race fixed on %s\n",
180 			rm->rm_descr);
181 		while (rm->rm_hold)
182 			tsleep(rm, 0, "rmfree", 2);
183 	}
184 
185 	/*
186 	 * Destroy all elements remaining on rm_list
187 	 */
188 	while ((r = TAILQ_FIRST(&rm->rm_list)) != NULL) {
189 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
190 		kfree(r, M_RMAN);
191 	}
192 	lwkt_reltoken(rm->rm_slock);
193 
194 	/*
195 	 * Final cleanup
196 	 */
197 	lwkt_token_uninit(rm->rm_slock);
198 	kfree(rm->rm_slock, M_RMAN);
199 	rm->rm_slock = NULL;
200 
201 	return 0;
202 }
203 
204 struct resource *
205 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
206 		      u_int flags, device_t dev)
207 {
208 	u_int	want_activate;
209 	struct	resource *r, *s, *rv;
210 	u_long	rstart, rend;
211 
212 	rv = NULL;
213 
214 	DPRINTF(("rman_reserve_resource: <%s> request: [%#lx, %#lx], length "
215 	       "%#lx, flags %u, device %s\n", rm->rm_descr, start, end,
216 	       count, flags,
217 	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
218 	want_activate = (flags & RF_ACTIVE);
219 	flags &= ~RF_ACTIVE;
220 
221 	lwkt_gettoken(rm->rm_slock);
222 
223 	for (r = TAILQ_FIRST(&rm->rm_list);
224 	     r && r->r_end < start + count - 1;
225 	     r = TAILQ_NEXT(r, r_link))
226 		;
227 
228 	if (r == NULL) {
229 		DPRINTF(("could not find a region\n"));
230 		goto out;
231 	}
232 
233 	/*
234 	 * First try to find an acceptable totally-unshared region.
235 	 */
236 	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
237 		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
238 		if (s->r_start > end - (count - 1)) {
239 			DPRINTF(("s->r_start (%#lx) > end (%#lx)\n",
240 			    s->r_start, end));
241 			break;
242 		}
243 		if (s->r_flags & RF_ALLOCATED) {
244 			DPRINTF(("region is allocated\n"));
245 			continue;
246 		}
247 		rstart = ulmax(s->r_start, start);
248 		rstart = rounddown2(rstart + (1ul << RF_ALIGNMENT(flags)) - 1,
249 		    1ul << RF_ALIGNMENT(flags));
250 		rend = ulmin(s->r_end, ulmax(start + count - 1, end));
251 		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
252 		       rstart, rend, (rend - rstart + 1), count));
253 
254 		if ((rend - rstart + 1) >= count) {
255 			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
256 			       rstart, rend, (rend - rstart + 1)));
257 			if ((s->r_end - s->r_start + 1) == count) {
258 				DPRINTF(("candidate region is entire chunk\n"));
259 				rv = s;
260 				rv->r_flags |= RF_ALLOCATED | flags;
261 				rv->r_dev = dev;
262 				goto out;
263 			}
264 
265 			/*
266 			 * If s->r_start < rstart and
267 			 *    s->r_end > rstart + count - 1, then
268 			 * we need to split the region into three pieces
269 			 * (the middle one will get returned to the user).
270 			 * Otherwise, we are allocating at either the
271 			 * beginning or the end of s, so we only need to
272 			 * split it in two.  The first case requires
273 			 * two new allocations; the second requires but one.
274 			 */
275 			rv = kmalloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
276 			if (rv == NULL)
277 				goto out;
278 			rv->r_start = rstart;
279 			rv->r_end = rstart + count - 1;
280 			rv->r_flags = flags | RF_ALLOCATED;
281 			rv->r_dev = dev;
282 			rv->r_sharehead = 0;
283 			rv->r_rm = rm;
284 
285 			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
286 				DPRINTF(("splitting region in three parts: "
287 				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
288 				       s->r_start, rv->r_start - 1,
289 				       rv->r_start, rv->r_end,
290 				       rv->r_end + 1, s->r_end));
291 				/*
292 				 * We are allocating in the middle.
293 				 */
294 				r = kmalloc(sizeof *r, M_RMAN,
295 				    M_NOWAIT | M_ZERO);
296 				if (r == NULL) {
297 					kfree(rv, M_RMAN);
298 					rv = NULL;
299 					goto out;
300 				}
301 				r->r_start = rv->r_end + 1;
302 				r->r_end = s->r_end;
303 				r->r_flags = s->r_flags;
304 				r->r_dev = 0;
305 				r->r_sharehead = 0;
306 				r->r_rm = rm;
307 				s->r_end = rv->r_start - 1;
308 				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
309 						     r_link);
310 				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
311 						     r_link);
312 			} else if (s->r_start == rv->r_start) {
313 				DPRINTF(("allocating from the beginning\n"));
314 				/*
315 				 * We are allocating at the beginning.
316 				 */
317 				s->r_start = rv->r_end + 1;
318 				TAILQ_INSERT_BEFORE(s, rv, r_link);
319 			} else {
320 				DPRINTF(("allocating at the end\n"));
321 				/*
322 				 * We are allocating at the end.
323 				 */
324 				s->r_end = rv->r_start - 1;
325 				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
326 						     r_link);
327 			}
328 			goto out;
329 		}
330 	}
331 
332 	/*
333 	 * Now find an acceptable shared region, if the client's requirements
334 	 * allow sharing.  By our implementation restriction, a candidate
335 	 * region must match exactly by both size and sharing type in order
336 	 * to be considered compatible with the client's request.  (The
337 	 * former restriction could probably be lifted without too much
338 	 * additional work, but this does not seem warranted.)
339 	 */
340 	DPRINTF(("no unshared regions found\n"));
341 	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
342 		goto out;
343 
344 	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
345 		if (s->r_start > end)
346 			break;
347 		if ((s->r_flags & flags) != flags)
348 			continue;
349 		rstart = ulmax(s->r_start, start);
350 		rend = ulmin(s->r_end, ulmax(start + count, end));
351 		if (s->r_start >= start && s->r_end <= end
352 		    && (s->r_end - s->r_start + 1) == count) {
353 			rv = kmalloc(sizeof *rv, M_RMAN, M_NOWAIT | M_ZERO);
354 			if (rv == NULL)
355 				goto out;
356 			rv->r_start = s->r_start;
357 			rv->r_end = s->r_end;
358 			rv->r_flags = s->r_flags &
359 				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
360 			rv->r_dev = dev;
361 			rv->r_rm = rm;
362 			if (s->r_sharehead == 0) {
363 				s->r_sharehead = kmalloc(sizeof *s->r_sharehead,
364 							M_RMAN,
365 							M_NOWAIT | M_ZERO);
366 				if (s->r_sharehead == 0) {
367 					kfree(rv, M_RMAN);
368 					rv = NULL;
369 					goto out;
370 				}
371 				LIST_INIT(s->r_sharehead);
372 				LIST_INSERT_HEAD(s->r_sharehead, s,
373 						 r_sharelink);
374 				s->r_flags |= RF_FIRSTSHARE;
375 			}
376 			rv->r_sharehead = s->r_sharehead;
377 			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
378 			goto out;
379 		}
380 	}
381 
382 	/*
383 	 * We couldn't find anything.
384 	 */
385 	DPRINTF(("no region found\n"));
386 out:
387 	/*
388 	 * If the user specified RF_ACTIVE in the initial flags,
389 	 * which is reflected in `want_activate', we attempt to atomically
390 	 * activate the resource.  If this fails, we release the resource
391 	 * and indicate overall failure.  (This behavior probably doesn't
392 	 * make sense for RF_TIMESHARE-type resources.)
393 	 */
394 	if (rv && want_activate) {
395 		struct resource *whohas;
396 		DPRINTF(("activating region\n"));
397 		if (int_rman_activate_resource(rm, rv, &whohas)) {
398 			int_rman_release_resource(rm, rv);
399 			rv = NULL;
400 		}
401 	}
402 	lwkt_reltoken(rm->rm_slock);
403 	return (rv);
404 }
405 
406 static int
407 int_rman_activate_resource(struct rman *rm, struct resource *r,
408 			   struct resource **whohas)
409 {
410 	struct resource *s;
411 	int ok;
412 
413 	/*
414 	 * If we are not timesharing, then there is nothing much to do.
415 	 * If we already have the resource, then there is nothing at all to do.
416 	 * If we are not on a sharing list with anybody else, then there is
417 	 * little to do.
418 	 */
419 	if ((r->r_flags & RF_TIMESHARE) == 0
420 	    || (r->r_flags & RF_ACTIVE) != 0
421 	    || r->r_sharehead == 0) {
422 		r->r_flags |= RF_ACTIVE;
423 		return 0;
424 	}
425 
426 	ok = 1;
427 	for (s = LIST_FIRST(r->r_sharehead); s && ok;
428 	     s = LIST_NEXT(s, r_sharelink)) {
429 		if ((s->r_flags & RF_ACTIVE) != 0) {
430 			ok = 0;
431 			*whohas = s;
432 		}
433 	}
434 	if (ok) {
435 		r->r_flags |= RF_ACTIVE;
436 		return 0;
437 	}
438 	return EBUSY;
439 }
440 
441 int
442 rman_activate_resource(struct resource *r)
443 {
444 	int rv;
445 	struct resource *whohas;
446 	struct rman *rm;
447 
448 	rm = r->r_rm;
449 	lwkt_gettoken(rm->rm_slock);
450 	rv = int_rman_activate_resource(rm, r, &whohas);
451 	lwkt_reltoken(rm->rm_slock);
452 	return rv;
453 }
454 
455 #if 0
456 
457 /* XXX */
458 int
459 rman_await_resource(struct resource *r, int slpflags, int timo)
460 {
461 	int	rv;
462 	struct	resource *whohas;
463 	struct	rman *rm;
464 
465 	rm = r->r_rm;
466 	for (;;) {
467 		lwkt_gettoken(rm->rm_slock);
468 		rv = int_rman_activate_resource(rm, r, &whohas);
469 		if (rv != EBUSY)
470 			return (rv);	/* returns with ilock held */
471 
472 		if (r->r_sharehead == 0)
473 			panic("rman_await_resource");
474 		/*
475 		 * A critical section will hopefully will prevent a race
476 		 * between lwkt_reltoken and tsleep where a process
477 		 * could conceivably get in and release the resource
478 		 * before we have a chance to sleep on it. YYY
479 		 */
480 		crit_enter();
481 		whohas->r_flags |= RF_WANTED;
482 		rv = tsleep(r->r_sharehead, slpflags, "rmwait", timo);
483 		if (rv) {
484 			lwkt_reltoken(rm->rm_slock);
485 			crit_exit();
486 			return rv;
487 		}
488 		crit_exit();
489 	}
490 }
491 
492 #endif
493 
494 static int
495 int_rman_deactivate_resource(struct resource *r)
496 {
497 	r->r_flags &= ~RF_ACTIVE;
498 	if (r->r_flags & RF_WANTED) {
499 		r->r_flags &= ~RF_WANTED;
500 		wakeup(r->r_sharehead);
501 	}
502 	return 0;
503 }
504 
505 int
506 rman_deactivate_resource(struct resource *r)
507 {
508 	struct rman *rm;
509 
510 	rm = r->r_rm;
511 	lwkt_gettoken(rm->rm_slock);
512 	int_rman_deactivate_resource(r);
513 	lwkt_reltoken(rm->rm_slock);
514 	return 0;
515 }
516 
517 static int
518 int_rman_release_resource(struct rman *rm, struct resource *r)
519 {
520 	struct	resource *s, *t;
521 
522 	if (r->r_flags & RF_ACTIVE)
523 		int_rman_deactivate_resource(r);
524 
525 	/*
526 	 * Check for a sharing list first.  If there is one, then we don't
527 	 * have to think as hard.
528 	 */
529 	if (r->r_sharehead) {
530 		/*
531 		 * If a sharing list exists, then we know there are at
532 		 * least two sharers.
533 		 *
534 		 * If we are in the main circleq, appoint someone else.
535 		 */
536 		LIST_REMOVE(r, r_sharelink);
537 		s = LIST_FIRST(r->r_sharehead);
538 		if (r->r_flags & RF_FIRSTSHARE) {
539 			s->r_flags |= RF_FIRSTSHARE;
540 			TAILQ_INSERT_BEFORE(r, s, r_link);
541 			TAILQ_REMOVE(&rm->rm_list, r, r_link);
542 		}
543 
544 		/*
545 		 * Make sure that the sharing list goes away completely
546 		 * if the resource is no longer being shared at all.
547 		 */
548 		if (LIST_NEXT(s, r_sharelink) == 0) {
549 			kfree(s->r_sharehead, M_RMAN);
550 			s->r_sharehead = 0;
551 			s->r_flags &= ~RF_FIRSTSHARE;
552 		}
553 		goto out;
554 	}
555 
556 	/*
557 	 * Look at the adjacent resources in the list and see if our
558 	 * segment can be merged with any of them.
559 	 */
560 	s = TAILQ_PREV(r, resource_head, r_link);
561 	t = TAILQ_NEXT(r, r_link);
562 
563 	if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0
564 	    && t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
565 		/*
566 		 * Merge all three segments.
567 		 */
568 		s->r_end = t->r_end;
569 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
570 		TAILQ_REMOVE(&rm->rm_list, t, r_link);
571 		kfree(t, M_RMAN);
572 	} else if (s != NULL && (s->r_flags & RF_ALLOCATED) == 0) {
573 		/*
574 		 * Merge previous segment with ours.
575 		 */
576 		s->r_end = r->r_end;
577 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
578 	} else if (t != NULL && (t->r_flags & RF_ALLOCATED) == 0) {
579 		/*
580 		 * Merge next segment with ours.
581 		 */
582 		t->r_start = r->r_start;
583 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
584 	} else {
585 		/*
586 		 * At this point, we know there is nothing we
587 		 * can potentially merge with, because on each
588 		 * side, there is either nothing there or what is
589 		 * there is still allocated.  In that case, we don't
590 		 * want to remove r from the list; we simply want to
591 		 * change it to an unallocated region and return
592 		 * without freeing anything.
593 		 */
594 		r->r_flags &= ~RF_ALLOCATED;
595 		return 0;
596 	}
597 
598 out:
599 	kfree(r, M_RMAN);
600 	return 0;
601 }
602 
603 int
604 rman_release_resource(struct resource *r)
605 {
606 	struct	rman *rm = r->r_rm;
607 	int	rv;
608 
609 	lwkt_gettoken(rm->rm_slock);
610 	rv = int_rman_release_resource(rm, r);
611 	lwkt_reltoken(rm->rm_slock);
612 	return (rv);
613 }
614 
615 /*
616  * Find the hightest bit set, and add one if more than one bit
617  * set.  We're effectively computing the ceil(log2(size)) here.
618  *
619  * This function cannot compute alignments above (1LU<<63)+1
620  * as this would require returning '64' which will not fit in
621  * the flags field and doesn't work well for calculations either.
622  */
623 uint32_t
624 rman_make_alignment_flags(size_t size)
625 {
626 	int i;
627 
628 	for (i = 63; i; --i) {
629 		if ((1LU << i) & size)
630 			break;
631 	}
632 	if (~(1LU << i) & size)
633 		++i;
634 	if (i == 64)
635 		i = 63;
636 	return(RF_ALIGNMENT_LOG2(i));
637 }
638 
639 /*
640  * Sysctl interface for scanning the resource lists.
641  *
642  * We take two input parameters; the index into the list of resource
643  * managers, and the resource offset into the list.
644  */
645 static int
646 sysctl_rman(SYSCTL_HANDLER_ARGS)
647 {
648 	int			*name = (int *)arg1;
649 	u_int			namelen = arg2;
650 	int			rman_idx, res_idx;
651 	struct rman		*rm;
652 	struct resource		*res;
653 	struct u_rman		urm;
654 	struct u_resource	ures;
655 	int			error;
656 
657 	if (namelen != 3)
658 		return (EINVAL);
659 
660 	if (bus_data_generation_check(name[0]))
661 		return (EINVAL);
662 	rman_idx = name[1];
663 	res_idx = name[2];
664 
665 	/*
666 	 * Find the indexed resource manager
667 	 */
668 	error = ENOENT;
669 	lwkt_gettoken(&rman_tok);
670 
671 	TAILQ_FOREACH(rm, &rman_head, rm_link) {
672 		if (rman_idx-- == 0)
673 			break;
674 	}
675 	if (rm == NULL)
676 		goto done;
677 
678 	/*
679 	 * If the resource index is -1, we want details on the
680 	 * resource manager.
681 	 */
682 	if (res_idx == -1) {
683 		urm.rm_handle = (uintptr_t)rm;
684 		strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
685 		urm.rm_start = rm->rm_start;
686 		urm.rm_size = rm->rm_end - rm->rm_start + 1;
687 		urm.rm_type = rm->rm_type;
688 
689 		error = SYSCTL_OUT(req, &urm, sizeof(urm));
690 		goto done;
691 	}
692 
693 	/*
694 	 * Find the indexed resource and return it.
695 	 */
696 	atomic_add_int(&rm->rm_hold, 1);	/* temp prevent destruction */
697 	lwkt_gettoken(rm->rm_slock);
698 
699 	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
700 		if (res_idx-- == 0) {
701 			ures.r_handle = (uintptr_t)res;
702 			ures.r_parent = (uintptr_t)res->r_rm;
703 			ures.r_device = (uintptr_t)res->r_dev;
704 			if (res->r_dev != NULL) {
705 				if (device_get_name(res->r_dev) != NULL) {
706 					ksnprintf(ures.r_devname, RM_TEXTLEN,
707 					    "%s%d",
708 					    device_get_name(res->r_dev),
709 					    device_get_unit(res->r_dev));
710 				} else {
711 					strlcpy(ures.r_devname, "nomatch",
712 					    RM_TEXTLEN);
713 				}
714 			} else {
715 				ures.r_devname[0] = '\0';
716 			}
717 			ures.r_start = res->r_start;
718 			ures.r_size = res->r_end - res->r_start + 1;
719 			ures.r_flags = res->r_flags;
720 
721 			error = SYSCTL_OUT(req, &ures, sizeof(ures));
722 			break;
723 		}
724 	}
725 	lwkt_reltoken(rm->rm_slock);
726 	atomic_add_int(&rm->rm_hold, -1);
727 done:
728 	lwkt_reltoken(&rman_tok);
729 
730 	return (ENOENT);
731 }
732 
733 SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
734     "kernel resource manager");
735