xref: /freebsd/sys/dev/bhnd/bhndb/bhndb_subr.c (revision 315ee00f)
1 /*-
2  * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
3  * Copyright (c) 2017 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Landon Fuller
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17  *    redistribution must be conditioned upon including a substantially
18  *    similar Disclaimer requirement for further binary redistribution.
19  *
20  * NO WARRANTY
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGES.
32  */
33 
34 #include <sys/cdefs.h>
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/limits.h>
38 
39 #include "bhndb_private.h"
40 #include "bhndbvar.h"
41 
42 static int	bhndb_dma_tag_create(device_t dev, bus_dma_tag_t parent_dmat,
43 		    const struct bhnd_dma_translation *translation,
44 		    bus_dma_tag_t *dmat);
45 
46 /**
47  * Attach a BHND bridge device to @p parent.
48  *
49  * @param parent A parent PCI device.
50  * @param[out] bhndb On success, the probed and attached bhndb bridge device.
51  * @param unit The device unit number, or -1 to select the next available unit
52  * number.
53  *
54  * @retval 0 success
55  * @retval non-zero Failed to attach the bhndb device.
56  */
57 int
58 bhndb_attach_bridge(device_t parent, device_t *bhndb, int unit)
59 {
60 	int error;
61 
62 	*bhndb = device_add_child(parent, "bhndb", unit);
63 	if (*bhndb == NULL)
64 		return (ENXIO);
65 
66 	if (!(error = device_probe_and_attach(*bhndb)))
67 		return (0);
68 
69 	if ((device_delete_child(parent, *bhndb)))
70 		device_printf(parent, "failed to detach bhndb child\n");
71 
72 	return (error);
73 }
74 
75 /*
76  * Call BHNDB_SUSPEND_RESOURCE() for all resources in @p rl.
77  */
78 static void
79 bhndb_do_suspend_resources(device_t dev, struct resource_list *rl)
80 {
81 	struct resource_list_entry *rle;
82 
83 	/* Suspend all child resources. */
84 	STAILQ_FOREACH(rle, rl, link) {
85 		/* Skip non-allocated resources */
86 		if (rle->res == NULL)
87 			continue;
88 
89 		BHNDB_SUSPEND_RESOURCE(device_get_parent(dev), dev, rle->type,
90 		    rle->res);
91 	}
92 }
93 
94 /**
95  * Helper function for implementing BUS_RESUME_CHILD() on bridged
96  * bhnd(4) buses.
97  *
98  * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST()
99  * to find the child's resources and call BHNDB_SUSPEND_RESOURCE() for all
100  * child resources, ensuring that the device's allocated bridge resources
101  * will be available to other devices during bus resumption.
102  *
103  * Before suspending any resources, @p child is suspended by
104  * calling bhnd_generic_suspend_child().
105  *
106  * If @p child is not a direct child of @p dev, suspension is delegated to
107  * the @p dev parent.
108  */
109 int
110 bhnd_generic_br_suspend_child(device_t dev, device_t child)
111 {
112 	struct resource_list		*rl;
113 	int				 error;
114 
115 	if (device_get_parent(child) != dev)
116 		BUS_SUSPEND_CHILD(device_get_parent(dev), child);
117 
118 	if (device_is_suspended(child))
119 		return (EBUSY);
120 
121 	/* Suspend the child device */
122 	if ((error = bhnd_generic_suspend_child(dev, child)))
123 		return (error);
124 
125 	/* Fetch the resource list. If none, there's nothing else to do */
126 	rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child);
127 	if (rl == NULL)
128 		return (0);
129 
130 	/* Suspend all child resources. */
131 	bhndb_do_suspend_resources(dev, rl);
132 
133 	return (0);
134 }
135 
136 /**
137  * Helper function for implementing BUS_RESUME_CHILD() on bridged
138  * bhnd(4) bus devices.
139  *
140  * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST()
141  * to find the child's resources and call BHNDB_RESUME_RESOURCE() for all
142  * child resources, before delegating to bhnd_generic_resume_child().
143  *
144  * If resource resumption fails, @p child will not be resumed.
145  *
146  * If @p child is not a direct child of @p dev, suspension is delegated to
147  * the @p dev parent.
148  */
149 int
150 bhnd_generic_br_resume_child(device_t dev, device_t child)
151 {
152 	struct resource_list		*rl;
153 	struct resource_list_entry	*rle;
154 	int				 error;
155 
156 	if (device_get_parent(child) != dev)
157 		BUS_RESUME_CHILD(device_get_parent(dev), child);
158 
159 	if (!device_is_suspended(child))
160 		return (EBUSY);
161 
162 	/* Fetch the resource list. If none, there's nothing else to do */
163 	rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child);
164 	if (rl == NULL)
165 		return (bhnd_generic_resume_child(dev, child));
166 
167 	/* Resume all resources */
168 	STAILQ_FOREACH(rle, rl, link) {
169 		/* Skip non-allocated resources */
170 		if (rle->res == NULL)
171 			continue;
172 
173 		error = BHNDB_RESUME_RESOURCE(device_get_parent(dev), dev,
174 		    rle->type, rle->res);
175 		if (error) {
176 			/* Put all resources back into a suspend state */
177 			bhndb_do_suspend_resources(dev, rl);
178 			return (error);
179 		}
180 	}
181 
182 	/* Now that all resources are resumed, resume child */
183 	if ((error = bhnd_generic_resume_child(dev, child))) {
184 		/* Put all resources back into a suspend state */
185 		bhndb_do_suspend_resources(dev, rl);
186 	}
187 
188 	return (error);
189 }
190 
191 /**
192  * Find a host resource of @p type that maps the given range.
193  *
194  * @param hr The resource state to search.
195  * @param type The resource type to search for (see SYS_RES_*).
196  * @param start The start address of the range to search for.
197  * @param count The size of the range to search for.
198  *
199  * @retval resource the host resource containing the requested range.
200  * @retval NULL if no resource containing the requested range can be found.
201  */
202 struct resource *
203 bhndb_host_resource_for_range(struct bhndb_host_resources *hr, int type,
204     rman_res_t start, rman_res_t count)
205 {
206 	for (u_int i = 0; hr->resource_specs[i].type != -1; i++) {
207 		struct resource *r = hr->resources[i];
208 
209 		if (hr->resource_specs[i].type != type)
210 			continue;
211 
212 		/* Verify range */
213 		if (rman_get_start(r) > start)
214 			continue;
215 
216 		if (rman_get_end(r) < (start + count - 1))
217 			continue;
218 
219 		return (r);
220 	}
221 
222 	return (NULL);
223 }
224 
225 /**
226  * Find a host resource of that matches the given register window definition.
227  *
228  * @param hr The resource state to search.
229  * @param win A register window definition.
230  *
231  * @retval resource the host resource corresponding to @p win.
232  * @retval NULL if no resource corresponding to @p win can be found.
233  */
234 struct resource *
235 bhndb_host_resource_for_regwin(struct bhndb_host_resources *hr,
236     const struct bhndb_regwin *win)
237 {
238 	const struct resource_spec *rspecs;
239 
240 	rspecs = hr->resource_specs;
241 	for (u_int i = 0; rspecs[i].type != -1; i++) {
242 		if (win->res.type != rspecs[i].type)
243 			continue;
244 
245 		if (win->res.rid != rspecs[i].rid)
246 			continue;
247 
248 		/* Found declared resource */
249 		return (hr->resources[i]);
250 	}
251 
252 	device_printf(hr->owner, "missing regwin resource spec "
253 	    "(type=%d, rid=%d)\n", win->res.type, win->res.rid);
254 
255 	return (NULL);
256 }
257 
258 /**
259  * Allocate and initialize a new resource state structure.
260  *
261  * @param dev The bridge device.
262  * @param parent_dev The parent device from which host resources should be
263  * allocated.
264  * @param cfg The hardware configuration to be used.
265  */
266 struct bhndb_resources *
267 bhndb_alloc_resources(device_t dev, device_t parent_dev,
268     const struct bhndb_hwcfg *cfg)
269 {
270 	struct bhndb_resources		*r;
271 	const struct bhndb_regwin	*win;
272 	bus_size_t			 last_window_size;
273 	int				 rnid;
274 	int				 error;
275 	bool				 free_ht_mem, free_br_mem, free_br_irq;
276 
277 	free_ht_mem = false;
278 	free_br_mem = false;
279 	free_br_irq = false;
280 
281 	r = malloc(sizeof(*r), M_BHND, M_NOWAIT|M_ZERO);
282 	if (r == NULL)
283 		return (NULL);
284 
285 	/* Basic initialization */
286 	r->dev = dev;
287 	r->cfg = cfg;
288 	r->res = NULL;
289 	r->min_prio = BHNDB_PRIORITY_NONE;
290 	STAILQ_INIT(&r->bus_regions);
291 	STAILQ_INIT(&r->bus_intrs);
292 
293 	mtx_init(&r->dw_steal_mtx, device_get_nameunit(dev),
294 	    "bhndb dwa_steal lock", MTX_SPIN);
295 
296 	/* Initialize host address space resource manager. */
297 	r->ht_mem_rman.rm_start = 0;
298 	r->ht_mem_rman.rm_end = ~0;
299 	r->ht_mem_rman.rm_type = RMAN_ARRAY;
300 	r->ht_mem_rman.rm_descr = "BHNDB host memory";
301 	if ((error = rman_init(&r->ht_mem_rman))) {
302 		device_printf(r->dev, "could not initialize ht_mem_rman\n");
303 		goto failed;
304 	}
305 	free_ht_mem = true;
306 
307 	/* Initialize resource manager for the bridged address space. */
308 	r->br_mem_rman.rm_start = 0;
309 	r->br_mem_rman.rm_end = BUS_SPACE_MAXADDR_32BIT;
310 	r->br_mem_rman.rm_type = RMAN_ARRAY;
311 	r->br_mem_rman.rm_descr = "BHNDB bridged memory";
312 
313 	if ((error = rman_init(&r->br_mem_rman))) {
314 		device_printf(r->dev, "could not initialize br_mem_rman\n");
315 		goto failed;
316 	}
317 	free_br_mem = true;
318 
319 	error = rman_manage_region(&r->br_mem_rman, 0, BUS_SPACE_MAXADDR_32BIT);
320 	if (error) {
321 		device_printf(r->dev, "could not configure br_mem_rman\n");
322 		goto failed;
323 	}
324 
325 	/* Initialize resource manager for the bridged interrupt controller. */
326 	r->br_irq_rman.rm_start = 0;
327 	r->br_irq_rman.rm_end = RM_MAX_END;
328 	r->br_irq_rman.rm_type = RMAN_ARRAY;
329 	r->br_irq_rman.rm_descr = "BHNDB bridged interrupts";
330 
331 	if ((error = rman_init(&r->br_irq_rman))) {
332 		device_printf(r->dev, "could not initialize br_irq_rman\n");
333 		goto failed;
334 	}
335 	free_br_irq = true;
336 
337 	error = rman_manage_region(&r->br_irq_rman, 0, RM_MAX_END);
338 	if (error) {
339 		device_printf(r->dev, "could not configure br_irq_rman\n");
340 		goto failed;
341 	}
342 
343 	/* Fetch the dynamic regwin count and verify that it does not exceed
344 	 * what is representable via our freelist bitstring. */
345 	r->dwa_count = bhndb_regwin_count(cfg->register_windows,
346 	    BHNDB_REGWIN_T_DYN);
347 	if (r->dwa_count >= INT_MAX) {
348 		device_printf(r->dev, "max dynamic regwin count exceeded\n");
349 		goto failed;
350 	}
351 
352 	/* Allocate the dynamic window allocation table. */
353 	r->dw_alloc = malloc(sizeof(r->dw_alloc[0]) * r->dwa_count, M_BHND,
354 	    M_NOWAIT);
355 	if (r->dw_alloc == NULL)
356 		goto failed;
357 
358 	/* Allocate the dynamic window allocation freelist */
359 	r->dwa_freelist = bit_alloc(r->dwa_count, M_BHND, M_NOWAIT);
360 	if (r->dwa_freelist == NULL)
361 		goto failed;
362 
363 	/* Initialize the dynamic window table */
364 	rnid = 0;
365 	last_window_size = 0;
366 	for (win = cfg->register_windows;
367 	    win->win_type != BHNDB_REGWIN_T_INVALID; win++)
368 	{
369 		struct bhndb_dw_alloc *dwa;
370 
371 		/* Skip non-DYN windows */
372 		if (win->win_type != BHNDB_REGWIN_T_DYN)
373 			continue;
374 
375 		/* Validate the window size */
376 		if (win->win_size == 0) {
377 			device_printf(r->dev, "ignoring zero-length dynamic "
378 			    "register window\n");
379 			continue;
380 		} else if (last_window_size == 0) {
381 			last_window_size = win->win_size;
382 		} else if (last_window_size != win->win_size) {
383 			/*
384 			 * No existing hardware should trigger this.
385 			 *
386 			 * If you run into this in the future, the dynamic
387 			 * window allocator and the resource priority system
388 			 * will need to be extended to support multiple register
389 			 * window allocation pools.
390 			 */
391 			device_printf(r->dev, "devices that vend multiple "
392 			    "dynamic register window sizes are not currently "
393 			    "supported\n");
394 			goto failed;
395 		}
396 
397 		dwa = &r->dw_alloc[rnid];
398 		dwa->win = win;
399 		dwa->parent_res = NULL;
400 		dwa->rnid = rnid;
401 		dwa->target = 0x0;
402 
403 		LIST_INIT(&dwa->refs);
404 		rnid++;
405 	}
406 
407 	/* Allocate host resources */
408 	error = bhndb_alloc_host_resources(&r->res, dev, parent_dev, r->cfg);
409 	if (error) {
410 		device_printf(r->dev,
411 		    "could not allocate host resources on %s: %d\n",
412 		    device_get_nameunit(parent_dev), error);
413 		goto failed;
414 	}
415 
416 	/* Populate (and validate) parent resource references for all
417 	 * dynamic windows */
418 	for (size_t i = 0; i < r->dwa_count; i++) {
419 		struct bhndb_dw_alloc		*dwa;
420 		const struct bhndb_regwin	*win;
421 
422 		dwa = &r->dw_alloc[i];
423 		win = dwa->win;
424 
425 		/* Find and validate corresponding resource. */
426 		dwa->parent_res = bhndb_host_resource_for_regwin(r->res, win);
427 		if (dwa->parent_res == NULL) {
428 			device_printf(r->dev, "no host resource found for %u "
429 			    "register window with offset %#jx and "
430 			    "size %#jx\n",
431 			    win->win_type,
432 			    (uintmax_t)win->win_offset,
433 			    (uintmax_t)win->win_size);
434 
435 			error = ENXIO;
436 			goto failed;
437 		}
438 
439 		if (rman_get_size(dwa->parent_res) < win->win_offset +
440 		    win->win_size)
441 		{
442 			device_printf(r->dev, "resource %d too small for "
443 			    "register window with offset %llx and size %llx\n",
444 			    rman_get_rid(dwa->parent_res),
445 			    (unsigned long long) win->win_offset,
446 			    (unsigned long long) win->win_size);
447 
448 			error = EINVAL;
449 			goto failed;
450 		}
451 	}
452 
453 	/* Add allocated memory resources to our host memory resource manager */
454 	for (u_int i = 0; r->res->resource_specs[i].type != -1; i++) {
455 		struct resource *res;
456 
457 		/* skip non-memory resources */
458 		if (r->res->resource_specs[i].type != SYS_RES_MEMORY)
459 			continue;
460 
461 		/* add host resource to set of managed regions */
462 		res = r->res->resources[i];
463 		error = rman_manage_region(&r->ht_mem_rman,
464 		    rman_get_start(res), rman_get_end(res));
465 		if (error) {
466 			device_printf(r->dev,
467 			    "could not register host memory region with "
468 			    "ht_mem_rman: %d\n", error);
469 			goto failed;
470 		}
471 	}
472 
473 	return (r);
474 
475 failed:
476 	if (free_ht_mem)
477 		rman_fini(&r->ht_mem_rman);
478 
479 	if (free_br_mem)
480 		rman_fini(&r->br_mem_rman);
481 
482 	if (free_br_irq)
483 		rman_fini(&r->br_irq_rman);
484 
485 	if (r->dw_alloc != NULL)
486 		free(r->dw_alloc, M_BHND);
487 
488 	if (r->dwa_freelist != NULL)
489 		free(r->dwa_freelist, M_BHND);
490 
491 	if (r->res != NULL)
492 		bhndb_release_host_resources(r->res);
493 
494 	mtx_destroy(&r->dw_steal_mtx);
495 
496 	free(r, M_BHND);
497 
498 	return (NULL);
499 }
500 
501 /**
502  * Create a new DMA tag for the given @p translation.
503  *
504  * @param	dev		The bridge device.
505  * @param	parent_dmat	The parent DMA tag, or NULL if none.
506  * @param	translation	The DMA translation for which a DMA tag will
507  *				be created.
508  * @param[out]	dmat		On success, the newly created DMA tag.
509  *
510  * @retval 0		success
511  * @retval non-zero	if creating the new DMA tag otherwise fails, a regular
512  *			unix error code will be returned.
513  */
514 static int
515 bhndb_dma_tag_create(device_t dev, bus_dma_tag_t parent_dmat,
516     const struct bhnd_dma_translation *translation, bus_dma_tag_t *dmat)
517 {
518 	bus_dma_tag_t	translation_tag;
519 	bhnd_addr_t	dt_mask;
520 	bus_addr_t	lowaddr, highaddr;
521 	bus_size_t	maxsegsz;
522 	int		error;
523 
524 	highaddr = BUS_SPACE_MAXADDR;
525 	maxsegsz = BUS_SPACE_MAXSIZE;
526 
527 	/* Determine full addressable mask */
528 	dt_mask = (translation->addr_mask | translation->addrext_mask);
529 	KASSERT(dt_mask != 0, ("DMA addr_mask invalid: %#jx",
530 		(uintmax_t)dt_mask));
531 
532 	/* (addr_mask|addrext_mask) is our maximum supported address */
533 	lowaddr = MIN(dt_mask, BUS_SPACE_MAXADDR);
534 
535 	/* Constrain to translation window size */
536 	if (translation->addr_mask < maxsegsz)
537 		maxsegsz = translation->addr_mask;
538 
539 	/* Create our DMA tag */
540 	error = bus_dma_tag_create(parent_dmat,
541 	    1, 0,			/* alignment, boundary */
542 	    lowaddr, highaddr,
543 	    NULL, NULL,			/* filter, filterarg */
544 	    BUS_SPACE_MAXSIZE, 0,	/* maxsize, nsegments */
545 	    maxsegsz, 0,		/* maxsegsize, flags */
546 	    NULL, NULL,			/* lockfunc, lockarg */
547 	    &translation_tag);
548 	if (error) {
549 		device_printf(dev, "failed to create bridge DMA tag: %d\n",
550 		    error);
551 		return (error);
552 	}
553 
554 	*dmat = translation_tag;
555 	return (0);
556 }
557 
558 /**
559  * Deallocate the given bridge resource structure and any associated resources.
560  *
561  * @param br Resource state to be deallocated.
562  */
563 void
564 bhndb_free_resources(struct bhndb_resources *br)
565 {
566 	struct bhndb_region		*region, *r_next;
567 	struct bhndb_dw_alloc		*dwa;
568 	struct bhndb_dw_rentry		*dwr, *dwr_next;
569 	struct bhndb_intr_handler	*ih;
570 	bool				 leaked_regions, leaked_intrs;
571 
572 	leaked_regions = false;
573 	leaked_intrs = false;
574 
575 	/* No window regions may still be held */
576 	if (!bhndb_dw_all_free(br)) {
577 		for (int i = 0; i < br->dwa_count; i++) {
578 			dwa = &br->dw_alloc[i];
579 
580 			/* Skip free dynamic windows */
581 			if (bhndb_dw_is_free(br, dwa))
582 				continue;
583 
584 			device_printf(br->dev,
585 			    "leaked dynamic register window %d\n", dwa->rnid);
586 			leaked_regions = true;
587 		}
588 	}
589 
590 	/* There should be no interrupt handlers still registered */
591 	STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) {
592 		device_printf(br->dev, "interrupt handler leaked %p\n",
593 		    ih->ih_cookiep);
594 	}
595 
596 	if (leaked_intrs || leaked_regions) {
597 		panic("leaked%s%s", leaked_intrs ? " active interrupts" : "",
598 		    leaked_regions ? " active register windows" : "");
599 	}
600 
601 	/* Release host resources allocated through our parent. */
602 	if (br->res != NULL)
603 		bhndb_release_host_resources(br->res);
604 
605 	/* Clean up resource reservations */
606 	for (size_t i = 0; i < br->dwa_count; i++) {
607 		dwa = &br->dw_alloc[i];
608 
609 		LIST_FOREACH_SAFE(dwr, &dwa->refs, dw_link, dwr_next) {
610 			LIST_REMOVE(dwr, dw_link);
611 			free(dwr, M_BHND);
612 		}
613 	}
614 
615 	/* Release bus regions */
616 	STAILQ_FOREACH_SAFE(region, &br->bus_regions, link, r_next) {
617 		STAILQ_REMOVE(&br->bus_regions, region, bhndb_region, link);
618 		free(region, M_BHND);
619 	}
620 
621 	/* Release our resource managers */
622 	rman_fini(&br->ht_mem_rman);
623 	rman_fini(&br->br_mem_rman);
624 	rman_fini(&br->br_irq_rman);
625 
626 	free(br->dw_alloc, M_BHND);
627 	free(br->dwa_freelist, M_BHND);
628 
629 	mtx_destroy(&br->dw_steal_mtx);
630 
631 	free(br, M_BHND);
632 }
633 
634 /**
635  * Allocate host bus resources defined by @p hwcfg.
636  *
637  * On success, the caller assumes ownership of the allocated host resources,
638  * which must be freed via bhndb_release_host_resources().
639  *
640  * @param[out]	resources	On success, the allocated host resources.
641  * @param	dev		The bridge device.
642  * @param	parent_dev	The parent device from which host resources
643  *				should be allocated (e.g. via
644  *				bus_alloc_resources()).
645  * @param	hwcfg		The hardware configuration defining the host
646  *				resources to be allocated
647  */
648 int
649 bhndb_alloc_host_resources(struct bhndb_host_resources **resources,
650     device_t dev, device_t parent_dev, const struct bhndb_hwcfg *hwcfg)
651 {
652 	struct bhndb_host_resources		*hr;
653 	const struct bhnd_dma_translation	*dt;
654 	bus_dma_tag_t				 parent_dmat;
655 	size_t					 nres, ndt;
656 	int					 error;
657 
658 	parent_dmat = bus_get_dma_tag(parent_dev);
659 
660 	hr = malloc(sizeof(*hr), M_BHND, M_WAITOK);
661 	hr->owner = parent_dev;
662 	hr->cfg = hwcfg;
663 	hr->resource_specs = NULL;
664 	hr->resources = NULL;
665 	hr->dma_tags = NULL;
666 	hr->num_dma_tags = 0;
667 
668 	/* Determine our bridge resource count from the hardware config. */
669 	nres = 0;
670 	for (size_t i = 0; hwcfg->resource_specs[i].type != -1; i++)
671 		nres++;
672 
673 	/* Determine the total count and validate our DMA translation table. */
674 	ndt = 0;
675 	for (dt = hwcfg->dma_translations; dt != NULL &&
676 	    !BHND_DMA_IS_TRANSLATION_TABLE_END(dt); dt++)
677 	{
678 		/* Validate the defined translation */
679 		if ((dt->base_addr & dt->addr_mask) != 0) {
680 			device_printf(dev, "invalid DMA translation; base "
681 			    "address %#jx overlaps address mask %#jx",
682 			    (uintmax_t)dt->base_addr, (uintmax_t)dt->addr_mask);
683 
684 			error = EINVAL;
685 			goto failed;
686 		}
687 
688 		if ((dt->addrext_mask & dt->addr_mask) != 0) {
689 			device_printf(dev, "invalid DMA translation; addrext "
690 			    "mask %#jx overlaps address mask %#jx",
691 			    (uintmax_t)dt->addrext_mask,
692 			    (uintmax_t)dt->addr_mask);
693 
694 			error = EINVAL;
695 			goto failed;
696 		}
697 
698 		/* Increment our entry count */
699 		ndt++;
700 	}
701 
702 	/* Allocate our DMA tags */
703 	hr->dma_tags = malloc(sizeof(*hr->dma_tags) * ndt, M_BHND,
704 	    M_WAITOK|M_ZERO);
705 	for (size_t i = 0; i < ndt; i++) {
706 		error = bhndb_dma_tag_create(dev, parent_dmat,
707 		    &hwcfg->dma_translations[i], &hr->dma_tags[i]);
708 		if (error)
709 			goto failed;
710 
711 		hr->num_dma_tags++;
712 	}
713 
714 	/* Allocate space for a non-const copy of our resource_spec
715 	 * table; this will be updated with the RIDs assigned by
716 	 * bus_alloc_resources. */
717 	hr->resource_specs = malloc(sizeof(hr->resource_specs[0]) * (nres + 1),
718 	    M_BHND, M_WAITOK);
719 
720 	/* Initialize and terminate the table */
721 	for (size_t i = 0; i < nres; i++)
722 		hr->resource_specs[i] = hwcfg->resource_specs[i];
723 
724 	hr->resource_specs[nres].type = -1;
725 
726 	/* Allocate space for our resource references */
727 	hr->resources = malloc(sizeof(hr->resources[0]) * nres, M_BHND,
728 	    M_WAITOK);
729 
730 	/* Allocate host resources */
731 	error = bus_alloc_resources(hr->owner, hr->resource_specs,
732 	    hr->resources);
733 	if (error) {
734 		device_printf(dev, "could not allocate bridge resources via "
735 		    "%s: %d\n", device_get_nameunit(parent_dev), error);
736 		goto failed;
737 	}
738 
739 	*resources = hr;
740 	return (0);
741 
742 failed:
743 	if (hr->resource_specs != NULL)
744 		free(hr->resource_specs, M_BHND);
745 
746 	if (hr->resources != NULL)
747 		free(hr->resources, M_BHND);
748 
749 	for (size_t i = 0; i < hr->num_dma_tags; i++)
750 		bus_dma_tag_destroy(hr->dma_tags[i]);
751 
752 	if (hr->dma_tags != NULL)
753 		free(hr->dma_tags, M_BHND);
754 
755 	free(hr, M_BHND);
756 
757 	return (error);
758 }
759 
760 /**
761  * Deallocate a set of bridge host resources.
762  *
763  * @param hr The resources to be freed.
764  */
765 void
766 bhndb_release_host_resources(struct bhndb_host_resources *hr)
767 {
768 	bus_release_resources(hr->owner, hr->resource_specs, hr->resources);
769 
770 	for (size_t i = 0; i < hr->num_dma_tags; i++)
771 		bus_dma_tag_destroy(hr->dma_tags[i]);
772 
773 	free(hr->resources, M_BHND);
774 	free(hr->resource_specs, M_BHND);
775 	free(hr->dma_tags, M_BHND);
776 	free(hr, M_BHND);
777 }
778 
779 /**
780  * Search @p cores for the core serving as the bhnd host bridge.
781  *
782  * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged
783  * bhnd(4) devices to determine the hostb core:
784  *
785  * - The core must have a Broadcom vendor ID.
786  * - The core devclass must match the bridge type.
787  * - The core must be the first device on the bus with the bridged device
788  *   class.
789  *
790  * @param	cores		The core table to search.
791  * @param	ncores		The number of cores in @p cores.
792  * @param	bridge_devclass	The expected device class of the bridge core.
793  * @param[out]	core		If found, the matching host bridge core info.
794  *
795  * @retval 0		success
796  * @retval ENOENT	not found
797  */
798 int
799 bhndb_find_hostb_core(struct bhnd_core_info *cores, u_int ncores,
800     bhnd_devclass_t bridge_devclass, struct bhnd_core_info *core)
801 {
802 	struct bhnd_core_match	 md;
803 	struct bhnd_core_info	*match;
804 	u_int			 match_core_idx;
805 
806 	/* Set up a match descriptor for the required device class. */
807 	md = (struct bhnd_core_match) {
808 		BHND_MATCH_CORE_CLASS(bridge_devclass),
809 		BHND_MATCH_CORE_UNIT(0)
810 	};
811 
812 	/* Find the matching core with the lowest core index */
813 	match = NULL;
814 	match_core_idx = UINT_MAX;
815 
816 	for (u_int i = 0; i < ncores; i++) {
817 		if (!bhnd_core_matches(&cores[i], &md))
818 			continue;
819 
820 		/* Lower core indices take precedence */
821 		if (match != NULL && match_core_idx < match->core_idx)
822 			continue;
823 
824 		match = &cores[i];
825 		match_core_idx = match->core_idx;
826 	}
827 
828 	if (match == NULL)
829 		return (ENOENT);
830 
831 	*core = *match;
832 	return (0);
833 }
834 
835 /**
836  * Allocate a host interrupt source and its backing SYS_RES_IRQ host resource.
837  *
838  * @param owner	The device to be used to allocate a SYS_RES_IRQ
839  *		resource with @p rid.
840  * @param rid	The resource ID of the IRQ to be allocated.
841  * @param start	The start value to be passed to bus_alloc_resource().
842  * @param end	The end value to be passed to bus_alloc_resource().
843  * @param count	The count to be passed to bus_alloc_resource().
844  * @param flags	The flags to be passed to bus_alloc_resource().
845  *
846  * @retval non-NULL	success
847  * @retval NULL		if allocation fails.
848  */
849 struct bhndb_intr_isrc *
850 bhndb_alloc_intr_isrc(device_t owner, int rid, rman_res_t start, rman_res_t end,
851     rman_res_t count, u_int flags)
852 {
853 	struct bhndb_intr_isrc *isrc;
854 
855 	isrc = malloc(sizeof(*isrc), M_BHND, M_NOWAIT);
856 	if (isrc == NULL)
857 		return (NULL);
858 
859 	isrc->is_owner = owner;
860 	isrc->is_rid = rid;
861 	isrc->is_res = bus_alloc_resource(owner, SYS_RES_IRQ, &isrc->is_rid,
862 	    start, end, count, flags);
863 	if (isrc->is_res == NULL) {
864 		free(isrc, M_BHND);
865 		return (NULL);
866 	}
867 
868 	return (isrc);
869 }
870 
871 /**
872  * Free a host interrupt source and its backing host resource.
873  *
874  * @param isrc	The interrupt source to be freed.
875  */
876 void
877 bhndb_free_intr_isrc(struct bhndb_intr_isrc *isrc)
878 {
879 	bus_release_resource(isrc->is_owner, SYS_RES_IRQ, isrc->is_rid,
880 	    isrc->is_res);
881 	free(isrc, M_BHND);
882 }
883 
884 /**
885  * Allocate and initialize a new interrupt handler entry.
886  *
887  * @param owner	The child device that owns this entry.
888  * @param r	The child's interrupt resource.
889  * @param isrc	The isrc mapped for this entry.
890  *
891  * @retval non-NULL	success
892  * @retval NULL		if allocation fails.
893  */
894 struct bhndb_intr_handler *
895 bhndb_alloc_intr_handler(device_t owner, struct resource *r,
896     struct bhndb_intr_isrc *isrc)
897 {
898 	struct bhndb_intr_handler *ih;
899 
900 	ih = malloc(sizeof(*ih), M_BHND, M_NOWAIT | M_ZERO);
901 	ih->ih_owner = owner;
902 	ih->ih_res = r;
903 	ih->ih_isrc = isrc;
904 	ih->ih_cookiep = NULL;
905 	ih->ih_active = false;
906 
907 	return (ih);
908 }
909 
910 /**
911  * Free an interrupt handler entry.
912  *
913  * @param br The resource state owning @p ih.
914  * @param ih The interrupt handler entry to be removed.
915  */
916 void
917 bhndb_free_intr_handler(struct bhndb_intr_handler *ih)
918 {
919 	KASSERT(!ih->ih_active, ("free of active interrupt handler %p",
920 	    ih->ih_cookiep));
921 
922 	free(ih, M_BHND);
923 }
924 
925 /**
926  * Add an active interrupt handler to the given resource state.
927   *
928  * @param br The resource state to be modified.
929  * @param ih The interrupt handler entry to be added.
930  */
931 void
932 bhndb_register_intr_handler(struct bhndb_resources *br,
933     struct bhndb_intr_handler *ih)
934 {
935 	KASSERT(!ih->ih_active, ("duplicate registration of interrupt "
936 	    "handler %p", ih->ih_cookiep));
937 	KASSERT(ih->ih_cookiep != NULL, ("missing cookiep"));
938 
939 	ih->ih_active = true;
940 	STAILQ_INSERT_HEAD(&br->bus_intrs, ih, ih_link);
941 }
942 
943 /**
944  * Remove an interrupt handler from the given resource state.
945  *
946  * @param br The resource state containing @p ih.
947  * @param ih The interrupt handler entry to be removed.
948  */
949 void
950 bhndb_deregister_intr_handler(struct bhndb_resources *br,
951     struct bhndb_intr_handler *ih)
952 {
953 	KASSERT(ih->ih_active, ("duplicate deregistration of interrupt "
954 	    "handler %p", ih->ih_cookiep));
955 
956 	KASSERT(bhndb_find_intr_handler(br, ih) == ih,
957 	    ("unknown interrupt handler %p", ih));
958 
959 	STAILQ_REMOVE(&br->bus_intrs, ih, bhndb_intr_handler, ih_link);
960 	ih->ih_active = false;
961 }
962 
963 /**
964  * Return the interrupt handler entry corresponding to @p cookiep, or NULL
965  * if no entry is found.
966  *
967  * @param br The resource state to search for the given @p cookiep.
968  * @param cookiep The interrupt handler's bus-assigned cookiep value.
969  */
970 struct bhndb_intr_handler *
971 bhndb_find_intr_handler(struct bhndb_resources *br, void *cookiep)
972 {
973 	struct bhndb_intr_handler *ih;
974 
975 	STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) {
976 		if (ih == cookiep)
977 			return (ih);
978 	}
979 
980 	/* Not found */
981 	return (NULL);
982 }
983 
984 /**
985  * Find the maximum start and end limits of the bridged resource @p r.
986  *
987  * If the resource is not currently mapped by the bridge, ENOENT will be
988  * returned.
989  *
990  * @param	br		The resource state to search.
991  * @param	type The resource type (see SYS_RES_*).
992  * @param	r The resource to search for in @p br.
993  * @param[out]	start	On success, the minimum supported start address.
994  * @param[out]	end	On success, the maximum supported end address.
995  *
996  * @retval 0		success
997  * @retval ENOENT	no active mapping found for @p r of @p type
998  */
999 int
1000 bhndb_find_resource_limits(struct bhndb_resources *br, int type,
1001     struct resource *r, rman_res_t *start, rman_res_t *end)
1002 {
1003 	struct bhndb_dw_alloc		*dynamic;
1004 	struct bhndb_region		*sregion;
1005 	struct bhndb_intr_handler	*ih;
1006 
1007 	switch (type) {
1008 	case SYS_RES_IRQ:
1009 		/* Is this one of ours? */
1010 		STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) {
1011 			if (ih->ih_res == r)
1012 				continue;
1013 
1014 			/* We don't support adjusting IRQ resource limits */
1015 			*start = rman_get_start(r);
1016 			*end = rman_get_end(r);
1017 			return (0);
1018 		}
1019 
1020 		/* Not found */
1021 		return (ENOENT);
1022 
1023 	case SYS_RES_MEMORY: {
1024 		/* Check for an enclosing dynamic register window */
1025 		if ((dynamic = bhndb_dw_find_resource(br, r))) {
1026 			*start = dynamic->target;
1027 			*end = dynamic->target + dynamic->win->win_size - 1;
1028 			return (0);
1029 		}
1030 
1031 		/* Check for a static region */
1032 		sregion = bhndb_find_resource_region(br, rman_get_start(r),
1033 		rman_get_size(r));
1034 		if (sregion != NULL && sregion->static_regwin != NULL) {
1035 			*start = sregion->addr;
1036 			*end = sregion->addr + sregion->size - 1;
1037 
1038 			return (0);
1039 		}
1040 
1041 		/* Not found */
1042 		return (ENOENT);
1043 	}
1044 
1045 	default:
1046 		device_printf(br->dev, "unknown resource type: %d\n", type);
1047 		return (ENOENT);
1048 	}
1049 }
1050 
1051 /**
1052  * Add a bus region entry to @p r for the given base @p addr and @p size.
1053  *
1054  * @param br The resource state to which the bus region entry will be added.
1055  * @param addr The base address of this region.
1056  * @param size The size of this region.
1057  * @param priority The resource priority to be assigned to allocations
1058  * made within this bus region.
1059  * @param alloc_flags resource allocation flags (@see bhndb_alloc_flags)
1060  * @param static_regwin If available, a static register window mapping this
1061  * bus region entry. If not available, NULL.
1062  *
1063  * @retval 0 success
1064  * @retval non-zero if adding the bus region fails.
1065  */
1066 int
1067 bhndb_add_resource_region(struct bhndb_resources *br, bhnd_addr_t addr,
1068     bhnd_size_t size, bhndb_priority_t priority, uint32_t alloc_flags,
1069     const struct bhndb_regwin *static_regwin)
1070 {
1071 	struct bhndb_region	*reg;
1072 
1073 	/* Insert in the bus resource list */
1074 	reg = malloc(sizeof(*reg), M_BHND, M_NOWAIT);
1075 	if (reg == NULL)
1076 		return (ENOMEM);
1077 
1078 	*reg = (struct bhndb_region) {
1079 		.addr = addr,
1080 		.size = size,
1081 		.priority = priority,
1082 		.alloc_flags = alloc_flags,
1083 		.static_regwin = static_regwin
1084 	};
1085 
1086 	STAILQ_INSERT_HEAD(&br->bus_regions, reg, link);
1087 
1088 	return (0);
1089 }
1090 
1091 /**
1092  * Return true if a mapping of @p size bytes at @p addr is provided by either
1093  * one contiguous bus region, or by multiple discontiguous regions.
1094  *
1095  * @param br The resource state to query.
1096  * @param addr The requested starting address.
1097  * @param size The requested size.
1098  */
1099 bool
1100 bhndb_has_static_region_mapping(struct bhndb_resources *br,
1101     bhnd_addr_t addr, bhnd_size_t size)
1102 {
1103 	struct bhndb_region	*region;
1104 	bhnd_addr_t		 r_addr;
1105 
1106 	r_addr = addr;
1107 	while ((region = bhndb_find_resource_region(br, r_addr, 1)) != NULL) {
1108 		/* Must be backed by a static register window */
1109 		if (region->static_regwin == NULL)
1110 			return (false);
1111 
1112 		/* Adjust the search offset */
1113 		r_addr += region->size;
1114 
1115 		/* Have we traversed a complete (if discontiguous) mapping? */
1116 		if (r_addr == addr + size)
1117 			return (true);
1118 	}
1119 
1120 	/* No complete mapping found */
1121 	return (false);
1122 }
1123 
1124 /**
1125  * Find the bus region that maps @p size bytes at @p addr.
1126  *
1127  * @param br The resource state to search.
1128  * @param addr The requested starting address.
1129  * @param size The requested size.
1130  *
1131  * @retval bhndb_region A region that fully contains the requested range.
1132  * @retval NULL If no mapping region can be found.
1133  */
1134 struct bhndb_region *
1135 bhndb_find_resource_region(struct bhndb_resources *br, bhnd_addr_t addr,
1136     bhnd_size_t size)
1137 {
1138 	struct bhndb_region *region;
1139 
1140 	STAILQ_FOREACH(region, &br->bus_regions, link) {
1141 		/* Request must fit within the region's mapping  */
1142 		if (addr < region->addr)
1143 			continue;
1144 
1145 		if (addr + size > region->addr + region->size)
1146 			continue;
1147 
1148 		return (region);
1149 	}
1150 
1151 	/* Not found */
1152 	return (NULL);
1153 }
1154 
1155 /**
1156  * Find the entry matching @p r in @p dwa's references, if any.
1157  *
1158  * @param dwa The dynamic window allocation to search
1159  * @param r The resource to search for in @p dwa.
1160  */
1161 static struct bhndb_dw_rentry *
1162 bhndb_dw_find_resource_entry(struct bhndb_dw_alloc *dwa, struct resource *r)
1163 {
1164 	struct bhndb_dw_rentry	*rentry;
1165 
1166 	LIST_FOREACH(rentry, &dwa->refs, dw_link) {
1167 		struct resource *dw_res = rentry->dw_res;
1168 
1169 		/* Match dev/rid/addr/size */
1170 		if (rman_get_device(dw_res)	!= rman_get_device(r) ||
1171 			rman_get_rid(dw_res)	!= rman_get_rid(r) ||
1172 			rman_get_start(dw_res)	!= rman_get_start(r) ||
1173 			rman_get_size(dw_res)	!= rman_get_size(r))
1174 		{
1175 			continue;
1176 		}
1177 
1178 		/* Matching allocation found */
1179 		return (rentry);
1180 	}
1181 
1182 	return (NULL);
1183 }
1184 
1185 /**
1186  * Find the dynamic region allocated for @p r, if any.
1187  *
1188  * @param br The resource state to search.
1189  * @param r The resource to search for.
1190  *
1191  * @retval bhndb_dw_alloc The allocation record for @p r.
1192  * @retval NULL if no dynamic window is allocated for @p r.
1193  */
1194 struct bhndb_dw_alloc *
1195 bhndb_dw_find_resource(struct bhndb_resources *br, struct resource *r)
1196 {
1197 	struct bhndb_dw_alloc	*dwa;
1198 
1199 	for (size_t i = 0; i < br->dwa_count; i++) {
1200 		dwa = &br->dw_alloc[i];
1201 
1202 		/* Skip free dynamic windows */
1203 		if (bhndb_dw_is_free(br, dwa))
1204 			continue;
1205 
1206 		/* Matching allocation found? */
1207 		if (bhndb_dw_find_resource_entry(dwa, r) != NULL)
1208 			return (dwa);
1209 	}
1210 
1211 	return (NULL);
1212 }
1213 
1214 /**
1215  * Find an existing dynamic window mapping @p size bytes
1216  * at @p addr. The window may or may not be free.
1217  *
1218  * @param br The resource state to search.
1219  * @param addr The requested starting address.
1220  * @param size The requested size.
1221  *
1222  * @retval bhndb_dw_alloc A window allocation that fully contains the requested
1223  * range.
1224  * @retval NULL If no mapping region can be found.
1225  */
1226 struct bhndb_dw_alloc *
1227 bhndb_dw_find_mapping(struct bhndb_resources *br, bhnd_addr_t addr,
1228     bhnd_size_t size)
1229 {
1230 	struct bhndb_dw_alloc		*dwr;
1231 	const struct bhndb_regwin	*win;
1232 
1233 	/* Search for an existing dynamic mapping of this address range. */
1234 	for (size_t i = 0; i < br->dwa_count; i++) {
1235 		dwr = &br->dw_alloc[i];
1236 		win = dwr->win;
1237 
1238 		/* Verify the range */
1239 		if (addr < dwr->target)
1240 			continue;
1241 
1242 		if (addr + size > dwr->target + win->win_size)
1243 			continue;
1244 
1245 		/* Found a usable mapping */
1246 		return (dwr);
1247 	}
1248 
1249 	/* not found */
1250 	return (NULL);
1251 }
1252 
1253 /**
1254  * Retain a reference to @p dwa for use by @p res.
1255  *
1256  * @param br The resource state owning @p dwa.
1257  * @param dwa The allocation record to be retained.
1258  * @param res The resource that will own a reference to @p dwa.
1259  *
1260  * @retval 0 success
1261  * @retval ENOMEM Failed to allocate a new reference structure.
1262  */
1263 int
1264 bhndb_dw_retain(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa,
1265     struct resource *res)
1266 {
1267 	struct bhndb_dw_rentry *rentry;
1268 
1269 	KASSERT(bhndb_dw_find_resource_entry(dwa, res) == NULL,
1270 	    ("double-retain of dynamic window for same resource"));
1271 
1272 	/* Insert a reference entry; we use M_NOWAIT to allow use from
1273 	 * within a non-sleepable lock */
1274 	rentry = malloc(sizeof(*rentry), M_BHND, M_NOWAIT);
1275 	if (rentry == NULL)
1276 		return (ENOMEM);
1277 
1278 	rentry->dw_res = res;
1279 	LIST_INSERT_HEAD(&dwa->refs, rentry, dw_link);
1280 
1281 	/* Update the free list */
1282 	bit_set(br->dwa_freelist, dwa->rnid);
1283 
1284 	return (0);
1285 }
1286 
1287 /**
1288  * Release a reference to @p dwa previously retained by @p res. If the
1289  * reference count of @p dwa reaches zero, it will be added to the
1290  * free list.
1291  *
1292  * @param br The resource state owning @p dwa.
1293  * @param dwa The allocation record to be released.
1294  * @param res The resource that currently owns a reference to @p dwa.
1295  */
1296 void
1297 bhndb_dw_release(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa,
1298     struct resource *r)
1299 {
1300 	struct bhndb_dw_rentry	*rentry;
1301 
1302 	/* Find the rentry */
1303 	rentry = bhndb_dw_find_resource_entry(dwa, r);
1304 	KASSERT(rentry != NULL, ("over release of resource entry"));
1305 
1306 	LIST_REMOVE(rentry, dw_link);
1307 	free(rentry, M_BHND);
1308 
1309 	/* If this was the last reference, update the free list */
1310 	if (LIST_EMPTY(&dwa->refs))
1311 		bit_clear(br->dwa_freelist, dwa->rnid);
1312 }
1313 
1314 /**
1315  * Attempt to set (or reset) the target address of @p dwa to map @p size bytes
1316  * at @p addr.
1317  *
1318  * This will apply any necessary window alignment and verify that
1319  * the window is capable of mapping the requested range prior to modifying
1320  * therecord.
1321  *
1322  * @param dev The device on which to issue the BHNDB_SET_WINDOW_ADDR() request.
1323  * @param br The resource state owning @p dwa.
1324  * @param dwa The allocation record to be configured.
1325  * @param addr The address to be mapped via @p dwa.
1326  * @param size The number of bytes to be mapped at @p addr.
1327  *
1328  * @retval 0 success
1329  * @retval non-zero no usable register window available.
1330  */
1331 int
1332 bhndb_dw_set_addr(device_t dev, struct bhndb_resources *br,
1333     struct bhndb_dw_alloc *dwa, bus_addr_t addr, bus_size_t size)
1334 {
1335 	const struct bhndb_regwin	*rw;
1336 	bus_addr_t			 offset;
1337 	int				 error;
1338 
1339 	rw = dwa->win;
1340 
1341 	KASSERT(bhndb_dw_is_free(br, dwa) || mtx_owned(&br->dw_steal_mtx),
1342 	    ("attempting to set the target address on an in-use window"));
1343 
1344 	/* Page-align the target address */
1345 	offset = addr % rw->win_size;
1346 	dwa->target = addr - offset;
1347 
1348 	/* Verify that the window is large enough for the full target */
1349 	if (rw->win_size - offset < size)
1350 		return (ENOMEM);
1351 
1352 	/* Update the window target */
1353 	error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target);
1354 	if (error) {
1355 		dwa->target = 0x0;
1356 		return (error);
1357 	}
1358 
1359 	return (0);
1360 }
1361 
1362 /**
1363  * Steal an in-use allocation record from @p br, returning the record's current
1364  * target in @p saved on success.
1365  *
1366  * This function acquires a mutex and disables interrupts; callers should
1367  * avoid holding a stolen window longer than required to issue an I/O
1368  * request.
1369  *
1370  * A successful call to bhndb_dw_steal() must be balanced with a call to
1371  * bhndb_dw_return_stolen().
1372  *
1373  * @param br The resource state from which a window should be stolen.
1374  * @param saved The stolen window's saved target address.
1375  *
1376  * @retval non-NULL success
1377  * @retval NULL no dynamic window regions are defined.
1378  */
1379 struct bhndb_dw_alloc *
1380 bhndb_dw_steal(struct bhndb_resources *br, bus_addr_t *saved)
1381 {
1382 	struct bhndb_dw_alloc *dw_stolen;
1383 
1384 	KASSERT(bhndb_dw_next_free(br) == NULL,
1385 	    ("attempting to steal an in-use window while free windows remain"));
1386 
1387 	/* Nothing to steal from? */
1388 	if (br->dwa_count == 0)
1389 		return (NULL);
1390 
1391 	/*
1392 	 * Acquire our steal spinlock; this will be released in
1393 	 * bhndb_dw_return_stolen().
1394 	 *
1395 	 * Acquiring also disables interrupts, which is required when one is
1396 	 * stealing an in-use existing register window.
1397 	 */
1398 	mtx_lock_spin(&br->dw_steal_mtx);
1399 
1400 	dw_stolen = &br->dw_alloc[0];
1401 	*saved = dw_stolen->target;
1402 	return (dw_stolen);
1403 }
1404 
1405 /**
1406  * Return an allocation record previously stolen using bhndb_dw_steal().
1407  *
1408  * @param dev The device on which to issue a BHNDB_SET_WINDOW_ADDR() request.
1409  * @param br The resource state owning @p dwa.
1410  * @param dwa The allocation record to be returned.
1411  * @param saved The original target address provided by bhndb_dw_steal().
1412  */
1413 void
1414 bhndb_dw_return_stolen(device_t dev, struct bhndb_resources *br,
1415     struct bhndb_dw_alloc *dwa, bus_addr_t saved)
1416 {
1417 	int error;
1418 
1419 	mtx_assert(&br->dw_steal_mtx, MA_OWNED);
1420 
1421 	error = bhndb_dw_set_addr(dev, br, dwa, saved, 0);
1422 	if (error) {
1423 		panic("failed to restore register window target %#jx: %d\n",
1424 		    (uintmax_t)saved, error);
1425 	}
1426 
1427 	mtx_unlock_spin(&br->dw_steal_mtx);
1428 }
1429 
1430 /**
1431  * Return the count of @p type register windows in @p table.
1432  *
1433  * @param table The table to search.
1434  * @param type The required window type, or BHNDB_REGWIN_T_INVALID to
1435  * count all register window types.
1436  */
1437 size_t
1438 bhndb_regwin_count(const struct bhndb_regwin *table,
1439     bhndb_regwin_type_t type)
1440 {
1441 	const struct bhndb_regwin	*rw;
1442 	size_t				 count;
1443 
1444 	count = 0;
1445 	for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) {
1446 		if (type == BHNDB_REGWIN_T_INVALID || rw->win_type == type)
1447 			count++;
1448 	}
1449 
1450 	return (count);
1451 }
1452 
1453 /**
1454  * Search @p table for the first window with the given @p type.
1455  *
1456  * @param table The table to search.
1457  * @param type The required window type.
1458  * @param min_size The minimum window size.
1459  *
1460  * @retval bhndb_regwin The first matching window.
1461  * @retval NULL If no window of the requested type could be found.
1462  */
1463 const struct bhndb_regwin *
1464 bhndb_regwin_find_type(const struct bhndb_regwin *table,
1465     bhndb_regwin_type_t type, bus_size_t min_size)
1466 {
1467 	const struct bhndb_regwin *rw;
1468 
1469 	for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++)
1470 	{
1471 		if (rw->win_type == type && rw->win_size >= min_size)
1472 			return (rw);
1473 	}
1474 
1475 	return (NULL);
1476 }
1477 
1478 /**
1479  * Search @p windows for the first matching core window.
1480  *
1481  * @param table The table to search.
1482  * @param class The required core class.
1483  * @param unit The required core unit, or -1.
1484  * @param port_type The required port type.
1485  * @param port The required port.
1486  * @param region The required region.
1487  * @param offset The required readable core register block offset.
1488  * @param min_size The required minimum readable size at @p offset.
1489  *
1490  * @retval bhndb_regwin The first matching window.
1491  * @retval NULL If no matching window was found.
1492  */
1493 const struct bhndb_regwin *
1494 bhndb_regwin_find_core(const struct bhndb_regwin *table, bhnd_devclass_t class,
1495     int unit, bhnd_port_type port_type, u_int port, u_int region,
1496     bus_size_t offset, bus_size_t min_size)
1497 {
1498 	const struct bhndb_regwin *rw;
1499 
1500 	for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++)
1501 	{
1502 		bus_size_t rw_offset;
1503 
1504 		/* Match on core, port, and region attributes */
1505 		if (rw->win_type != BHNDB_REGWIN_T_CORE)
1506 			continue;
1507 
1508 		if (rw->d.core.class != class)
1509 			continue;
1510 
1511 		if (unit != -1 && rw->d.core.unit != unit)
1512 			continue;
1513 
1514 		if (rw->d.core.port_type != port_type)
1515 			continue;
1516 
1517 		if (rw->d.core.port != port)
1518 			continue;
1519 
1520 		if (rw->d.core.region != region)
1521 			continue;
1522 
1523 		/* Verify that the requested range is mapped within
1524 		 * this register window */
1525 		if (rw->d.core.offset > offset)
1526 			continue;
1527 
1528 		rw_offset = offset - rw->d.core.offset;
1529 
1530 		if (rw->win_size < rw_offset)
1531 			continue;
1532 
1533 		if (rw->win_size - rw_offset < min_size)
1534 			continue;
1535 
1536 		return (rw);
1537 	}
1538 
1539 	return (NULL);
1540 }
1541 
1542 /**
1543  * Search @p windows for the best available window of at least @p min_size.
1544  *
1545  * Search order:
1546  * - BHND_REGWIN_T_CORE
1547  * - BHND_REGWIN_T_DYN
1548  *
1549  * @param table The table to search.
1550  * @param class The required core class.
1551  * @param unit The required core unit, or -1.
1552  * @param port_type The required port type.
1553  * @param port The required port.
1554  * @param region The required region.
1555  * @param offset The required readable core register block offset.
1556  * @param min_size The required minimum readable size at @p offset.
1557  *
1558  * @retval bhndb_regwin The first matching window.
1559  * @retval NULL If no matching window was found.
1560  */
1561 const struct bhndb_regwin *
1562 bhndb_regwin_find_best(const struct bhndb_regwin *table,
1563     bhnd_devclass_t class, int unit, bhnd_port_type port_type, u_int port,
1564     u_int region, bus_size_t offset, bus_size_t min_size)
1565 {
1566 	const struct bhndb_regwin *rw;
1567 
1568 	/* Prefer a fixed core mapping */
1569 	rw = bhndb_regwin_find_core(table, class, unit, port_type,
1570 	    port, region, offset, min_size);
1571 	if (rw != NULL)
1572 		return (rw);
1573 
1574 	/* Fall back on a generic dynamic window */
1575 	return (bhndb_regwin_find_type(table, BHNDB_REGWIN_T_DYN, min_size));
1576 }
1577 
1578 /**
1579  * Return true if @p regw defines a BHNDB_REGWIN_T_CORE register window
1580  * that matches against @p core.
1581  *
1582  * @param regw A register window to match against.
1583  * @param core The bhnd(4) core info to match against @p regw.
1584  */
1585 bool
1586 bhndb_regwin_match_core(const struct bhndb_regwin *regw,
1587     struct bhnd_core_info *core)
1588 {
1589 	/* Only core windows are supported */
1590 	if (regw->win_type != BHNDB_REGWIN_T_CORE)
1591 		return (false);
1592 
1593 	/* Device class must match */
1594 	if (bhnd_core_class(core) != regw->d.core.class)
1595 		return (false);
1596 
1597 	/* Device unit must match */
1598 	if (core->unit != regw->d.core.unit)
1599 		return (false);
1600 
1601 	/* Matches */
1602 	return (true);
1603 }
1604 
1605 /**
1606  * Search for a core resource priority descriptor in @p table that matches
1607  * @p core.
1608  *
1609  * @param table The table to search.
1610  * @param core The core to match against @p table.
1611  */
1612 const struct bhndb_hw_priority *
1613 bhndb_hw_priority_find_core(const struct bhndb_hw_priority *table,
1614     struct bhnd_core_info *core)
1615 {
1616 	const struct bhndb_hw_priority	*hp;
1617 
1618 	for (hp = table; hp->ports != NULL; hp++) {
1619 		if (bhnd_core_matches(core, &hp->match))
1620 			return (hp);
1621 	}
1622 
1623 	/* not found */
1624 	return (NULL);
1625 }
1626 
1627 /**
1628  * Search for a port resource priority descriptor in @p table.
1629  *
1630  * @param table The table to search.
1631  * @param core The core to match against @p table.
1632  * @param port_type The required port type.
1633  * @param port The required port.
1634  * @param region The required region.
1635  */
1636 const struct bhndb_port_priority *
1637 bhndb_hw_priorty_find_port(const struct bhndb_hw_priority *table,
1638     struct bhnd_core_info *core, bhnd_port_type port_type, u_int port,
1639     u_int region)
1640 {
1641 	const struct bhndb_hw_priority		*hp;
1642 
1643 	if ((hp = bhndb_hw_priority_find_core(table, core)) == NULL)
1644 		return (NULL);
1645 
1646 	for (u_int i = 0; i < hp->num_ports; i++) {
1647 		const struct bhndb_port_priority *pp = &hp->ports[i];
1648 
1649 		if (pp->type != port_type)
1650 			continue;
1651 
1652 		if (pp->port != port)
1653 			continue;
1654 
1655 		if (pp->region != region)
1656 			continue;
1657 
1658 		return (pp);
1659 	}
1660 
1661 	/* not found */
1662 	return (NULL);
1663 }
1664