xref: /freebsd/sys/arm/mv/mv_pci.c (revision 8a0a413e)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2008 MARVELL INTERNATIONAL LTD.
5  * Copyright (c) 2010 The FreeBSD Foundation
6  * Copyright (c) 2010-2015 Semihalf
7  * All rights reserved.
8  *
9  * Developed by Semihalf.
10  *
11  * Portions of this software were developed by Semihalf
12  * under sponsorship from the FreeBSD Foundation.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of MARVELL nor the names of contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38 
39 /*
40  * Marvell integrated PCI/PCI-Express controller driver.
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/malloc.h>
51 #include <sys/module.h>
52 #include <sys/mutex.h>
53 #include <sys/queue.h>
54 #include <sys/bus.h>
55 #include <sys/rman.h>
56 #include <sys/endian.h>
57 #include <sys/devmap.h>
58 
59 #include <machine/fdt.h>
60 #include <machine/intr.h>
61 
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64 
65 #include <dev/fdt/fdt_common.h>
66 #include <dev/ofw/ofw_bus.h>
67 #include <dev/ofw/ofw_bus_subr.h>
68 #include <dev/ofw/ofw_pci.h>
69 #include <dev/pci/pcivar.h>
70 #include <dev/pci/pcireg.h>
71 #include <dev/pci/pcib_private.h>
72 
73 #include "ofw_bus_if.h"
74 #include "pcib_if.h"
75 
76 #include <machine/resource.h>
77 #include <machine/bus.h>
78 
79 #include <arm/mv/mvreg.h>
80 #include <arm/mv/mvvar.h>
81 #include <arm/mv/mvwin.h>
82 
83 #ifdef DEBUG
84 #define debugf(fmt, args...) do { printf(fmt,##args); } while (0)
85 #else
86 #define debugf(fmt, args...)
87 #endif
88 
89 /*
90  * Code and data related to fdt-based PCI configuration.
91  *
92  * This stuff used to be in dev/fdt/fdt_pci.c and fdt_common.h, but it was
93  * always Marvell-specific so that was deleted and the code now lives here.
94  */
95 
96 struct mv_pci_range {
97 	u_long	base_pci;
98 	u_long	base_parent;
99 	u_long	len;
100 };
101 
102 #define FDT_RANGES_CELLS	((3 + 3 + 2) * 2)
103 
104 static void
105 mv_pci_range_dump(struct mv_pci_range *range)
106 {
107 #ifdef DEBUG
108 	printf("\n");
109 	printf("  base_pci = 0x%08lx\n", range->base_pci);
110 	printf("  base_par = 0x%08lx\n", range->base_parent);
111 	printf("  len      = 0x%08lx\n", range->len);
112 #endif
113 }
114 
115 static int
116 mv_pci_ranges_decode(phandle_t node, struct mv_pci_range *io_space,
117     struct mv_pci_range *mem_space)
118 {
119 	pcell_t ranges[FDT_RANGES_CELLS];
120 	struct mv_pci_range *pci_space;
121 	pcell_t addr_cells, size_cells, par_addr_cells;
122 	pcell_t *rangesptr;
123 	pcell_t cell0, cell1, cell2;
124 	int tuple_size, tuples, i, rv, offset_cells, len;
125 
126 	/*
127 	 * Retrieve 'ranges' property.
128 	 */
129 	if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0)
130 		return (EINVAL);
131 	if (addr_cells != 3 || size_cells != 2)
132 		return (ERANGE);
133 
134 	par_addr_cells = fdt_parent_addr_cells(node);
135 	if (par_addr_cells > 3)
136 		return (ERANGE);
137 
138 	len = OF_getproplen(node, "ranges");
139 	if (len > sizeof(ranges))
140 		return (ENOMEM);
141 
142 	if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0)
143 		return (EINVAL);
144 
145 	tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells +
146 	    size_cells);
147 	tuples = len / tuple_size;
148 
149 	/*
150 	 * Initialize the ranges so that we don't have to worry about
151 	 * having them all defined in the FDT. In particular, it is
152 	 * perfectly fine not to want I/O space on PCI buses.
153 	 */
154 	bzero(io_space, sizeof(*io_space));
155 	bzero(mem_space, sizeof(*mem_space));
156 
157 	rangesptr = &ranges[0];
158 	offset_cells = 0;
159 	for (i = 0; i < tuples; i++) {
160 		cell0 = fdt_data_get((void *)rangesptr, 1);
161 		rangesptr++;
162 		cell1 = fdt_data_get((void *)rangesptr, 1);
163 		rangesptr++;
164 		cell2 = fdt_data_get((void *)rangesptr, 1);
165 		rangesptr++;
166 
167 		if (cell0 & 0x02000000) {
168 			pci_space = mem_space;
169 		} else if (cell0 & 0x01000000) {
170 			pci_space = io_space;
171 		} else {
172 			rv = ERANGE;
173 			goto out;
174 		}
175 
176 		if (par_addr_cells == 3) {
177 			/*
178 			 * This is a PCI subnode 'ranges'. Skip cell0 and
179 			 * cell1 of this entry and only use cell2.
180 			 */
181 			offset_cells = 2;
182 			rangesptr += offset_cells;
183 		}
184 
185 		if ((par_addr_cells - offset_cells) > 2) {
186 			rv = ERANGE;
187 			goto out;
188 		}
189 		pci_space->base_parent = fdt_data_get((void *)rangesptr,
190 		    par_addr_cells - offset_cells);
191 		rangesptr += par_addr_cells - offset_cells;
192 
193 		if (size_cells > 2) {
194 			rv = ERANGE;
195 			goto out;
196 		}
197 		pci_space->len = fdt_data_get((void *)rangesptr, size_cells);
198 		rangesptr += size_cells;
199 
200 		pci_space->base_pci = cell2;
201 	}
202 	rv = 0;
203 out:
204 	return (rv);
205 }
206 
207 static int
208 mv_pci_ranges(phandle_t node, struct mv_pci_range *io_space,
209     struct mv_pci_range *mem_space)
210 {
211 	int err;
212 
213 	debugf("Processing PCI node: %x\n", node);
214 	if ((err = mv_pci_ranges_decode(node, io_space, mem_space)) != 0) {
215 		debugf("could not decode parent PCI node 'ranges'\n");
216 		return (err);
217 	}
218 
219 	debugf("Post fixup dump:\n");
220 	mv_pci_range_dump(io_space);
221 	mv_pci_range_dump(mem_space);
222 	return (0);
223 }
224 
225 int
226 mv_pci_devmap(phandle_t node, struct devmap_entry *devmap, vm_offset_t io_va,
227     vm_offset_t mem_va)
228 {
229 	struct mv_pci_range io_space, mem_space;
230 	int error;
231 
232 	if ((error = mv_pci_ranges_decode(node, &io_space, &mem_space)) != 0)
233 		return (error);
234 
235 	devmap->pd_va = (io_va ? io_va : io_space.base_parent);
236 	devmap->pd_pa = io_space.base_parent;
237 	devmap->pd_size = io_space.len;
238 	devmap++;
239 
240 	devmap->pd_va = (mem_va ? mem_va : mem_space.base_parent);
241 	devmap->pd_pa = mem_space.base_parent;
242 	devmap->pd_size = mem_space.len;
243 	return (0);
244 }
245 
246 /*
247  * Code and data related to the Marvell pcib driver.
248  */
249 
250 #define PCI_CFG_ENA		(1U << 31)
251 #define PCI_CFG_BUS(bus)	(((bus) & 0xff) << 16)
252 #define PCI_CFG_DEV(dev)	(((dev) & 0x1f) << 11)
253 #define PCI_CFG_FUN(fun)	(((fun) & 0x7) << 8)
254 #define PCI_CFG_PCIE_REG(reg)	((reg) & 0xfc)
255 
256 #define PCI_REG_CFG_ADDR	0x0C78
257 #define PCI_REG_CFG_DATA	0x0C7C
258 
259 #define PCIE_REG_CFG_ADDR	0x18F8
260 #define PCIE_REG_CFG_DATA	0x18FC
261 #define PCIE_REG_CONTROL	0x1A00
262 #define   PCIE_CTRL_LINK1X	0x00000001
263 #define PCIE_REG_STATUS		0x1A04
264 #define PCIE_REG_IRQ_MASK	0x1910
265 
266 #define PCIE_CONTROL_ROOT_CMPLX	(1 << 1)
267 #define PCIE_CONTROL_HOT_RESET	(1 << 24)
268 
269 #define PCIE_LINK_TIMEOUT	1000000
270 
271 #define PCIE_STATUS_LINK_DOWN	1
272 #define PCIE_STATUS_DEV_OFFS	16
273 
274 /* Minimum PCI Memory and I/O allocations taken from PCI spec (in bytes) */
275 #define PCI_MIN_IO_ALLOC	4
276 #define PCI_MIN_MEM_ALLOC	16
277 
278 #define BITS_PER_UINT32		(NBBY * sizeof(uint32_t))
279 
280 struct mv_pcib_softc {
281 	device_t	sc_dev;
282 
283 	struct rman	sc_mem_rman;
284 	bus_addr_t	sc_mem_base;
285 	bus_addr_t	sc_mem_size;
286 	uint32_t	sc_mem_map[MV_PCI_MEM_SLICE_SIZE /
287 	    (PCI_MIN_MEM_ALLOC * BITS_PER_UINT32)];
288 	int		sc_win_target;
289 	int		sc_mem_win_attr;
290 
291 	struct rman	sc_io_rman;
292 	bus_addr_t	sc_io_base;
293 	bus_addr_t	sc_io_size;
294 	uint32_t	sc_io_map[MV_PCI_IO_SLICE_SIZE /
295 	    (PCI_MIN_IO_ALLOC * BITS_PER_UINT32)];
296 	int		sc_io_win_attr;
297 
298 	struct resource	*sc_res;
299 	bus_space_handle_t sc_bsh;
300 	bus_space_tag_t	sc_bst;
301 	int		sc_rid;
302 
303 	struct mtx	sc_msi_mtx;
304 	uint32_t	sc_msi_bitmap;
305 
306 	int		sc_busnr;		/* Host bridge bus number */
307 	int		sc_devnr;		/* Host bridge device number */
308 	int		sc_type;
309 	int		sc_mode;		/* Endpoint / Root Complex */
310 
311 	struct ofw_bus_iinfo	sc_pci_iinfo;
312 };
313 
314 /* Local forward prototypes */
315 static int mv_pcib_decode_win(phandle_t, struct mv_pcib_softc *);
316 static void mv_pcib_hw_cfginit(void);
317 static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *, u_int, u_int,
318     u_int, u_int, int);
319 static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *, u_int, u_int,
320     u_int, u_int, uint32_t, int);
321 static int mv_pcib_init(struct mv_pcib_softc *, int, int);
322 static int mv_pcib_init_all_bars(struct mv_pcib_softc *, int, int, int, int);
323 static void mv_pcib_init_bridge(struct mv_pcib_softc *, int, int, int);
324 static inline void pcib_write_irq_mask(struct mv_pcib_softc *, uint32_t);
325 static void mv_pcib_enable(struct mv_pcib_softc *, uint32_t);
326 static int mv_pcib_mem_init(struct mv_pcib_softc *);
327 
328 /* Forward prototypes */
329 static int mv_pcib_probe(device_t);
330 static int mv_pcib_attach(device_t);
331 
332 static struct resource *mv_pcib_alloc_resource(device_t, device_t, int, int *,
333     rman_res_t, rman_res_t, rman_res_t, u_int);
334 static int mv_pcib_release_resource(device_t, device_t, int, int,
335     struct resource *);
336 static int mv_pcib_read_ivar(device_t, device_t, int, uintptr_t *);
337 static int mv_pcib_write_ivar(device_t, device_t, int, uintptr_t);
338 
339 static int mv_pcib_maxslots(device_t);
340 static uint32_t mv_pcib_read_config(device_t, u_int, u_int, u_int, u_int, int);
341 static void mv_pcib_write_config(device_t, u_int, u_int, u_int, u_int,
342     uint32_t, int);
343 static int mv_pcib_route_interrupt(device_t, device_t, int);
344 #if defined(SOC_MV_ARMADAXP)
345 static int mv_pcib_alloc_msi(device_t, device_t, int, int, int *);
346 static int mv_pcib_map_msi(device_t, device_t, int, uint64_t *, uint32_t *);
347 static int mv_pcib_release_msi(device_t, device_t, int, int *);
348 #endif
349 
350 /*
351  * Bus interface definitions.
352  */
353 static device_method_t mv_pcib_methods[] = {
354 	/* Device interface */
355 	DEVMETHOD(device_probe,			mv_pcib_probe),
356 	DEVMETHOD(device_attach,		mv_pcib_attach),
357 
358 	/* Bus interface */
359 	DEVMETHOD(bus_read_ivar,		mv_pcib_read_ivar),
360 	DEVMETHOD(bus_write_ivar,		mv_pcib_write_ivar),
361 	DEVMETHOD(bus_alloc_resource,		mv_pcib_alloc_resource),
362 	DEVMETHOD(bus_release_resource,		mv_pcib_release_resource),
363 	DEVMETHOD(bus_activate_resource,	bus_generic_activate_resource),
364 	DEVMETHOD(bus_deactivate_resource,	bus_generic_deactivate_resource),
365 	DEVMETHOD(bus_setup_intr,		bus_generic_setup_intr),
366 	DEVMETHOD(bus_teardown_intr,		bus_generic_teardown_intr),
367 
368 	/* pcib interface */
369 	DEVMETHOD(pcib_maxslots,		mv_pcib_maxslots),
370 	DEVMETHOD(pcib_read_config,		mv_pcib_read_config),
371 	DEVMETHOD(pcib_write_config,		mv_pcib_write_config),
372 	DEVMETHOD(pcib_route_interrupt,		mv_pcib_route_interrupt),
373 	DEVMETHOD(pcib_request_feature,		pcib_request_feature_allow),
374 #if defined(SOC_MV_ARMADAXP)
375 	DEVMETHOD(pcib_alloc_msi,		mv_pcib_alloc_msi),
376 	DEVMETHOD(pcib_release_msi,		mv_pcib_release_msi),
377 	DEVMETHOD(pcib_map_msi,			mv_pcib_map_msi),
378 #endif
379 
380 	/* OFW bus interface */
381 	DEVMETHOD(ofw_bus_get_compat,   ofw_bus_gen_get_compat),
382 	DEVMETHOD(ofw_bus_get_model,    ofw_bus_gen_get_model),
383 	DEVMETHOD(ofw_bus_get_name,     ofw_bus_gen_get_name),
384 	DEVMETHOD(ofw_bus_get_node,     ofw_bus_gen_get_node),
385 	DEVMETHOD(ofw_bus_get_type,     ofw_bus_gen_get_type),
386 
387 	DEVMETHOD_END
388 };
389 
390 static driver_t mv_pcib_driver = {
391 	"pcib",
392 	mv_pcib_methods,
393 	sizeof(struct mv_pcib_softc),
394 };
395 
396 devclass_t pcib_devclass;
397 
398 DRIVER_MODULE(pcib, ofwbus, mv_pcib_driver, pcib_devclass, 0, 0);
399 DRIVER_MODULE(pcib, pcib_ctrl, mv_pcib_driver, pcib_devclass, 0, 0);
400 
401 static struct mtx pcicfg_mtx;
402 
403 static int
404 mv_pcib_probe(device_t self)
405 {
406 	phandle_t node;
407 
408 	node = ofw_bus_get_node(self);
409 	if (!fdt_is_type(node, "pci"))
410 		return (ENXIO);
411 
412 	if (!(ofw_bus_is_compatible(self, "mrvl,pcie") ||
413 	    ofw_bus_is_compatible(self, "mrvl,pci")))
414 		return (ENXIO);
415 
416 	device_set_desc(self, "Marvell Integrated PCI/PCI-E Controller");
417 	return (BUS_PROBE_DEFAULT);
418 }
419 
420 static int
421 mv_pcib_attach(device_t self)
422 {
423 	struct mv_pcib_softc *sc;
424 	phandle_t node, parnode;
425 	uint32_t val, reg0;
426 	int err, bus, devfn, port_id;
427 
428 	sc = device_get_softc(self);
429 	sc->sc_dev = self;
430 
431 	node = ofw_bus_get_node(self);
432 	parnode = OF_parent(node);
433 
434 	if (OF_getencprop(node, "marvell,pcie-port", &(port_id),
435 	    sizeof(port_id)) <= 0) {
436 		/* If port ID does not exist in the FDT set value to 0 */
437 		if (!OF_hasprop(node, "marvell,pcie-port"))
438 			port_id = 0;
439 		else
440 			return(ENXIO);
441 	}
442 
443 	if (ofw_bus_node_is_compatible(node, "mrvl,pcie")) {
444 		sc->sc_type = MV_TYPE_PCIE;
445 		sc->sc_win_target = MV_WIN_PCIE_TARGET(port_id);
446 		sc->sc_mem_win_attr = MV_WIN_PCIE_MEM_ATTR(port_id);
447 		sc->sc_io_win_attr = MV_WIN_PCIE_IO_ATTR(port_id);
448 	} else if (ofw_bus_node_is_compatible(node, "mrvl,pci")) {
449 		sc->sc_type = MV_TYPE_PCI;
450 		sc->sc_win_target = MV_WIN_PCI_TARGET;
451 		sc->sc_mem_win_attr = MV_WIN_PCI_MEM_ATTR;
452 		sc->sc_io_win_attr = MV_WIN_PCI_IO_ATTR;
453 	} else
454 		return (ENXIO);
455 
456 	/*
457 	 * Retrieve our mem-mapped registers range.
458 	 */
459 	sc->sc_rid = 0;
460 	sc->sc_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &sc->sc_rid,
461 	    RF_ACTIVE);
462 	if (sc->sc_res == NULL) {
463 		device_printf(self, "could not map memory\n");
464 		return (ENXIO);
465 	}
466 	sc->sc_bst = rman_get_bustag(sc->sc_res);
467 	sc->sc_bsh = rman_get_bushandle(sc->sc_res);
468 
469 	val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_CONTROL);
470 	sc->sc_mode = (val & PCIE_CONTROL_ROOT_CMPLX ? MV_MODE_ROOT :
471 	    MV_MODE_ENDPOINT);
472 
473 	/*
474 	 * Get PCI interrupt info.
475 	 */
476 	if (sc->sc_mode == MV_MODE_ROOT)
477 		ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(pcell_t));
478 
479 	/*
480 	 * Configure decode windows for PCI(E) access.
481 	 */
482 	if (mv_pcib_decode_win(node, sc) != 0)
483 		return (ENXIO);
484 
485 	mv_pcib_hw_cfginit();
486 
487 	/*
488 	 * Enable PCIE device.
489 	 */
490 	mv_pcib_enable(sc, port_id);
491 
492 	/*
493 	 * Memory management.
494 	 */
495 	err = mv_pcib_mem_init(sc);
496 	if (err)
497 		return (err);
498 
499 	/*
500 	 * Preliminary bus enumeration to find first linked devices and set
501 	 * appropriate bus number from which should start the actual enumeration
502 	 */
503 	for (bus = 0; bus < PCI_BUSMAX; bus++) {
504 		for (devfn = 0; devfn < mv_pcib_maxslots(self); devfn++) {
505 			reg0 = mv_pcib_read_config(self, bus, devfn, devfn & 0x7, 0x0, 4);
506 			if (reg0 == (~0U))
507 				continue; /* no device */
508 			else {
509 				sc->sc_busnr = bus; /* update bus number */
510 				break;
511 			}
512 		}
513 	}
514 
515 	if (sc->sc_mode == MV_MODE_ROOT) {
516 		err = mv_pcib_init(sc, sc->sc_busnr,
517 		    mv_pcib_maxslots(sc->sc_dev));
518 		if (err)
519 			goto error;
520 
521 		device_add_child(self, "pci", -1);
522 	} else {
523 		sc->sc_devnr = 1;
524 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
525 		    PCIE_REG_STATUS, 1 << PCIE_STATUS_DEV_OFFS);
526 		device_add_child(self, "pci_ep", -1);
527 	}
528 
529 	mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF);
530 	return (bus_generic_attach(self));
531 
532 error:
533 	/* XXX SYS_RES_ should be released here */
534 	rman_fini(&sc->sc_mem_rman);
535 	rman_fini(&sc->sc_io_rman);
536 
537 	return (err);
538 }
539 
540 static void
541 mv_pcib_enable(struct mv_pcib_softc *sc, uint32_t unit)
542 {
543 	uint32_t val;
544 #if !defined(SOC_MV_ARMADAXP)
545 	int timeout;
546 
547 	/*
548 	 * Check if PCIE device is enabled.
549 	 */
550 	if (read_cpu_ctrl(CPU_CONTROL) & CPU_CONTROL_PCIE_DISABLE(unit)) {
551 		write_cpu_ctrl(CPU_CONTROL, read_cpu_ctrl(CPU_CONTROL) &
552 		    ~(CPU_CONTROL_PCIE_DISABLE(unit)));
553 
554 		timeout = PCIE_LINK_TIMEOUT;
555 		val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
556 		    PCIE_REG_STATUS);
557 		while (((val & PCIE_STATUS_LINK_DOWN) == 1) && (timeout > 0)) {
558 			DELAY(1000);
559 			timeout -= 1000;
560 			val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
561 			    PCIE_REG_STATUS);
562 		}
563 	}
564 #endif
565 
566 
567 	if (sc->sc_mode == MV_MODE_ROOT) {
568 		/*
569 		 * Enable PCI bridge.
570 		 */
571 		val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND);
572 		val |= PCIM_CMD_SERRESPEN | PCIM_CMD_BUSMASTEREN |
573 		    PCIM_CMD_MEMEN | PCIM_CMD_PORTEN;
574 		bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND, val);
575 	}
576 }
577 
578 static int
579 mv_pcib_mem_init(struct mv_pcib_softc *sc)
580 {
581 	int err;
582 
583 	/*
584 	 * Memory management.
585 	 */
586 	sc->sc_mem_rman.rm_type = RMAN_ARRAY;
587 	err = rman_init(&sc->sc_mem_rman);
588 	if (err)
589 		return (err);
590 
591 	sc->sc_io_rman.rm_type = RMAN_ARRAY;
592 	err = rman_init(&sc->sc_io_rman);
593 	if (err) {
594 		rman_fini(&sc->sc_mem_rman);
595 		return (err);
596 	}
597 
598 	err = rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base,
599 	    sc->sc_mem_base + sc->sc_mem_size - 1);
600 	if (err)
601 		goto error;
602 
603 	err = rman_manage_region(&sc->sc_io_rman, sc->sc_io_base,
604 	    sc->sc_io_base + sc->sc_io_size - 1);
605 	if (err)
606 		goto error;
607 
608 	return (0);
609 
610 error:
611 	rman_fini(&sc->sc_mem_rman);
612 	rman_fini(&sc->sc_io_rman);
613 
614 	return (err);
615 }
616 
617 static inline uint32_t
618 pcib_bit_get(uint32_t *map, uint32_t bit)
619 {
620 	uint32_t n = bit / BITS_PER_UINT32;
621 
622 	bit = bit % BITS_PER_UINT32;
623 	return (map[n] & (1 << bit));
624 }
625 
626 static inline void
627 pcib_bit_set(uint32_t *map, uint32_t bit)
628 {
629 	uint32_t n = bit / BITS_PER_UINT32;
630 
631 	bit = bit % BITS_PER_UINT32;
632 	map[n] |= (1 << bit);
633 }
634 
635 static inline uint32_t
636 pcib_map_check(uint32_t *map, uint32_t start, uint32_t bits)
637 {
638 	uint32_t i;
639 
640 	for (i = start; i < start + bits; i++)
641 		if (pcib_bit_get(map, i))
642 			return (0);
643 
644 	return (1);
645 }
646 
647 static inline void
648 pcib_map_set(uint32_t *map, uint32_t start, uint32_t bits)
649 {
650 	uint32_t i;
651 
652 	for (i = start; i < start + bits; i++)
653 		pcib_bit_set(map, i);
654 }
655 
656 /*
657  * The idea of this allocator is taken from ARM No-Cache memory
658  * management code (sys/arm/arm/vm_machdep.c).
659  */
660 static bus_addr_t
661 pcib_alloc(struct mv_pcib_softc *sc, uint32_t smask)
662 {
663 	uint32_t bits, bits_limit, i, *map, min_alloc, size;
664 	bus_addr_t addr = 0;
665 	bus_addr_t base;
666 
667 	if (smask & 1) {
668 		base = sc->sc_io_base;
669 		min_alloc = PCI_MIN_IO_ALLOC;
670 		bits_limit = sc->sc_io_size / min_alloc;
671 		map = sc->sc_io_map;
672 		smask &= ~0x3;
673 	} else {
674 		base = sc->sc_mem_base;
675 		min_alloc = PCI_MIN_MEM_ALLOC;
676 		bits_limit = sc->sc_mem_size / min_alloc;
677 		map = sc->sc_mem_map;
678 		smask &= ~0xF;
679 	}
680 
681 	size = ~smask + 1;
682 	bits = size / min_alloc;
683 
684 	for (i = 0; i + bits <= bits_limit; i += bits)
685 		if (pcib_map_check(map, i, bits)) {
686 			pcib_map_set(map, i, bits);
687 			addr = base + (i * min_alloc);
688 			return (addr);
689 		}
690 
691 	return (addr);
692 }
693 
694 static int
695 mv_pcib_init_bar(struct mv_pcib_softc *sc, int bus, int slot, int func,
696     int barno)
697 {
698 	uint32_t addr, bar;
699 	int reg, width;
700 
701 	reg = PCIR_BAR(barno);
702 
703 	/*
704 	 * Need to init the BAR register with 0xffffffff before correct
705 	 * value can be read.
706 	 */
707 	mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, ~0, 4);
708 	bar = mv_pcib_read_config(sc->sc_dev, bus, slot, func, reg, 4);
709 	if (bar == 0)
710 		return (1);
711 
712 	/* Calculate BAR size: 64 or 32 bit (in 32-bit units) */
713 	width = ((bar & 7) == 4) ? 2 : 1;
714 
715 	addr = pcib_alloc(sc, bar);
716 	if (!addr)
717 		return (-1);
718 
719 	if (bootverbose)
720 		printf("PCI %u:%u:%u: reg %x: smask=%08x: addr=%08x\n",
721 		    bus, slot, func, reg, bar, addr);
722 
723 	mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, addr, 4);
724 	if (width == 2)
725 		mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg + 4,
726 		    0, 4);
727 
728 	return (width);
729 }
730 
731 static void
732 mv_pcib_init_bridge(struct mv_pcib_softc *sc, int bus, int slot, int func)
733 {
734 	bus_addr_t io_base, mem_base;
735 	uint32_t io_limit, mem_limit;
736 	int secbus;
737 
738 	io_base = sc->sc_io_base;
739 	io_limit = io_base + sc->sc_io_size - 1;
740 	mem_base = sc->sc_mem_base;
741 	mem_limit = mem_base + sc->sc_mem_size - 1;
742 
743 	/* Configure I/O decode registers */
744 	mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEL_1,
745 	    io_base >> 8, 1);
746 	mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEH_1,
747 	    io_base >> 16, 2);
748 	mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITL_1,
749 	    io_limit >> 8, 1);
750 	mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITH_1,
751 	    io_limit >> 16, 2);
752 
753 	/* Configure memory decode registers */
754 	mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMBASE_1,
755 	    mem_base >> 16, 2);
756 	mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMLIMIT_1,
757 	    mem_limit >> 16, 2);
758 
759 	/* Disable memory prefetch decode */
760 	mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEL_1,
761 	    0x10, 2);
762 	mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEH_1,
763 	    0x0, 4);
764 	mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITL_1,
765 	    0xF, 2);
766 	mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITH_1,
767 	    0x0, 4);
768 
769 	secbus = mv_pcib_read_config(sc->sc_dev, bus, slot, func,
770 	    PCIR_SECBUS_1, 1);
771 
772 	/* Configure buses behind the bridge */
773 	mv_pcib_init(sc, secbus, PCI_SLOTMAX);
774 }
775 
776 static int
777 mv_pcib_init(struct mv_pcib_softc *sc, int bus, int maxslot)
778 {
779 	int slot, func, maxfunc, error;
780 	uint8_t hdrtype, command, class, subclass;
781 
782 	for (slot = 0; slot <= maxslot; slot++) {
783 		maxfunc = 0;
784 		for (func = 0; func <= maxfunc; func++) {
785 			hdrtype = mv_pcib_read_config(sc->sc_dev, bus, slot,
786 			    func, PCIR_HDRTYPE, 1);
787 
788 			if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
789 				continue;
790 
791 			if (func == 0 && (hdrtype & PCIM_MFDEV))
792 				maxfunc = PCI_FUNCMAX;
793 
794 			command = mv_pcib_read_config(sc->sc_dev, bus, slot,
795 			    func, PCIR_COMMAND, 1);
796 			command &= ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN);
797 			mv_pcib_write_config(sc->sc_dev, bus, slot, func,
798 			    PCIR_COMMAND, command, 1);
799 
800 			error = mv_pcib_init_all_bars(sc, bus, slot, func,
801 			    hdrtype);
802 
803 			if (error)
804 				return (error);
805 
806 			command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN |
807 			    PCIM_CMD_PORTEN;
808 			mv_pcib_write_config(sc->sc_dev, bus, slot, func,
809 			    PCIR_COMMAND, command, 1);
810 
811 			/* Handle PCI-PCI bridges */
812 			class = mv_pcib_read_config(sc->sc_dev, bus, slot,
813 			    func, PCIR_CLASS, 1);
814 			subclass = mv_pcib_read_config(sc->sc_dev, bus, slot,
815 			    func, PCIR_SUBCLASS, 1);
816 
817 			if (class != PCIC_BRIDGE ||
818 			    subclass != PCIS_BRIDGE_PCI)
819 				continue;
820 
821 			mv_pcib_init_bridge(sc, bus, slot, func);
822 		}
823 	}
824 
825 	/* Enable all ABCD interrupts */
826 	pcib_write_irq_mask(sc, (0xF << 24));
827 
828 	return (0);
829 }
830 
831 static int
832 mv_pcib_init_all_bars(struct mv_pcib_softc *sc, int bus, int slot,
833     int func, int hdrtype)
834 {
835 	int maxbar, bar, i;
836 
837 	maxbar = (hdrtype & PCIM_HDRTYPE) ? 0 : 6;
838 	bar = 0;
839 
840 	/* Program the base address registers */
841 	while (bar < maxbar) {
842 		i = mv_pcib_init_bar(sc, bus, slot, func, bar);
843 		bar += i;
844 		if (i < 0) {
845 			device_printf(sc->sc_dev,
846 			    "PCI IO/Memory space exhausted\n");
847 			return (ENOMEM);
848 		}
849 	}
850 
851 	return (0);
852 }
853 
854 static struct resource *
855 mv_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid,
856     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
857 {
858 	struct mv_pcib_softc *sc = device_get_softc(dev);
859 	struct rman *rm = NULL;
860 	struct resource *res;
861 
862 	switch (type) {
863 	case SYS_RES_IOPORT:
864 		rm = &sc->sc_io_rman;
865 		break;
866 	case SYS_RES_MEMORY:
867 		rm = &sc->sc_mem_rman;
868 		break;
869 	default:
870 		return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev,
871 		    type, rid, start, end, count, flags));
872 	}
873 
874 	if (RMAN_IS_DEFAULT_RANGE(start, end)) {
875 		start = sc->sc_mem_base;
876 		end = sc->sc_mem_base + sc->sc_mem_size - 1;
877 		count = sc->sc_mem_size;
878 	}
879 
880 	if ((start < sc->sc_mem_base) || (start + count - 1 != end) ||
881 	    (end > sc->sc_mem_base + sc->sc_mem_size - 1))
882 		return (NULL);
883 
884 	res = rman_reserve_resource(rm, start, end, count, flags, child);
885 	if (res == NULL)
886 		return (NULL);
887 
888 	rman_set_rid(res, *rid);
889 	rman_set_bustag(res, fdtbus_bs_tag);
890 	rman_set_bushandle(res, start);
891 
892 	if (flags & RF_ACTIVE)
893 		if (bus_activate_resource(child, type, *rid, res)) {
894 			rman_release_resource(res);
895 			return (NULL);
896 		}
897 
898 	return (res);
899 }
900 
901 static int
902 mv_pcib_release_resource(device_t dev, device_t child, int type, int rid,
903     struct resource *res)
904 {
905 
906 	if (type != SYS_RES_IOPORT && type != SYS_RES_MEMORY)
907 		return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
908 		    type, rid, res));
909 
910 	return (rman_release_resource(res));
911 }
912 
913 static int
914 mv_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
915 {
916 	struct mv_pcib_softc *sc = device_get_softc(dev);
917 
918 	switch (which) {
919 	case PCIB_IVAR_BUS:
920 		*result = sc->sc_busnr;
921 		return (0);
922 	case PCIB_IVAR_DOMAIN:
923 		*result = device_get_unit(dev);
924 		return (0);
925 	}
926 
927 	return (ENOENT);
928 }
929 
930 static int
931 mv_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
932 {
933 	struct mv_pcib_softc *sc = device_get_softc(dev);
934 
935 	switch (which) {
936 	case PCIB_IVAR_BUS:
937 		sc->sc_busnr = value;
938 		return (0);
939 	}
940 
941 	return (ENOENT);
942 }
943 
944 static inline void
945 pcib_write_irq_mask(struct mv_pcib_softc *sc, uint32_t mask)
946 {
947 
948 	if (sc->sc_type != MV_TYPE_PCIE)
949 		return;
950 
951 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_IRQ_MASK, mask);
952 }
953 
954 static void
955 mv_pcib_hw_cfginit(void)
956 {
957 	static int opened = 0;
958 
959 	if (opened)
960 		return;
961 
962 	mtx_init(&pcicfg_mtx, "pcicfg", NULL, MTX_SPIN);
963 	opened = 1;
964 }
965 
966 static uint32_t
967 mv_pcib_hw_cfgread(struct mv_pcib_softc *sc, u_int bus, u_int slot,
968     u_int func, u_int reg, int bytes)
969 {
970 	uint32_t addr, data, ca, cd;
971 
972 	ca = (sc->sc_type != MV_TYPE_PCI) ?
973 	    PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR;
974 	cd = (sc->sc_type != MV_TYPE_PCI) ?
975 	    PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA;
976 	addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) |
977 	    PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg);
978 
979 	mtx_lock_spin(&pcicfg_mtx);
980 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr);
981 
982 	data = ~0;
983 	switch (bytes) {
984 	case 1:
985 		data = bus_space_read_1(sc->sc_bst, sc->sc_bsh,
986 		    cd + (reg & 3));
987 		break;
988 	case 2:
989 		data = le16toh(bus_space_read_2(sc->sc_bst, sc->sc_bsh,
990 		    cd + (reg & 2)));
991 		break;
992 	case 4:
993 		data = le32toh(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
994 		    cd));
995 		break;
996 	}
997 	mtx_unlock_spin(&pcicfg_mtx);
998 	return (data);
999 }
1000 
1001 static void
1002 mv_pcib_hw_cfgwrite(struct mv_pcib_softc *sc, u_int bus, u_int slot,
1003     u_int func, u_int reg, uint32_t data, int bytes)
1004 {
1005 	uint32_t addr, ca, cd;
1006 
1007 	ca = (sc->sc_type != MV_TYPE_PCI) ?
1008 	    PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR;
1009 	cd = (sc->sc_type != MV_TYPE_PCI) ?
1010 	    PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA;
1011 	addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) |
1012 	    PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg);
1013 
1014 	mtx_lock_spin(&pcicfg_mtx);
1015 	bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr);
1016 
1017 	switch (bytes) {
1018 	case 1:
1019 		bus_space_write_1(sc->sc_bst, sc->sc_bsh,
1020 		    cd + (reg & 3), data);
1021 		break;
1022 	case 2:
1023 		bus_space_write_2(sc->sc_bst, sc->sc_bsh,
1024 		    cd + (reg & 2), htole16(data));
1025 		break;
1026 	case 4:
1027 		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1028 		    cd, htole32(data));
1029 		break;
1030 	}
1031 	mtx_unlock_spin(&pcicfg_mtx);
1032 }
1033 
1034 static int
1035 mv_pcib_maxslots(device_t dev)
1036 {
1037 	struct mv_pcib_softc *sc = device_get_softc(dev);
1038 
1039 	return ((sc->sc_type != MV_TYPE_PCI) ? 1 : PCI_SLOTMAX);
1040 }
1041 
1042 static int
1043 mv_pcib_root_slot(device_t dev, u_int bus, u_int slot, u_int func)
1044 {
1045 #if defined(SOC_MV_ARMADA38X)
1046 	struct mv_pcib_softc *sc = device_get_softc(dev);
1047 	uint32_t vendor, device;
1048 
1049 	vendor = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_VENDOR,
1050 	    PCIR_VENDOR_LENGTH);
1051 	device = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_DEVICE,
1052 	    PCIR_DEVICE_LENGTH) & MV_DEV_FAMILY_MASK;
1053 
1054 	return (vendor == PCI_VENDORID_MRVL && device == MV_DEV_ARMADA38X);
1055 #else
1056 	/* On platforms other than Armada38x, root link is always at slot 0 */
1057 	return (slot == 0);
1058 #endif
1059 }
1060 
1061 static uint32_t
1062 mv_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func,
1063     u_int reg, int bytes)
1064 {
1065 	struct mv_pcib_softc *sc = device_get_softc(dev);
1066 
1067 	/* Return ~0 if link is inactive or trying to read from Root */
1068 	if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) &
1069 	    PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func))
1070 		return (~0U);
1071 
1072 	return (mv_pcib_hw_cfgread(sc, bus, slot, func, reg, bytes));
1073 }
1074 
1075 static void
1076 mv_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func,
1077     u_int reg, uint32_t val, int bytes)
1078 {
1079 	struct mv_pcib_softc *sc = device_get_softc(dev);
1080 
1081 	/* Return if link is inactive or trying to write to Root */
1082 	if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) &
1083 	    PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func))
1084 		return;
1085 
1086 	mv_pcib_hw_cfgwrite(sc, bus, slot, func, reg, val, bytes);
1087 }
1088 
1089 static int
1090 mv_pcib_route_interrupt(device_t bus, device_t dev, int pin)
1091 {
1092 	struct mv_pcib_softc *sc;
1093 	struct ofw_pci_register reg;
1094 	uint32_t pintr, mintr[4];
1095 	int icells;
1096 	phandle_t iparent;
1097 
1098 	sc = device_get_softc(bus);
1099 	pintr = pin;
1100 
1101 	/* Fabricate imap information in case this isn't an OFW device */
1102 	bzero(&reg, sizeof(reg));
1103 	reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) |
1104 	    (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) |
1105 	    (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT);
1106 
1107 	icells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo,
1108 	    &reg, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr),
1109 	    &iparent);
1110 	if (icells > 0)
1111 		return (ofw_bus_map_intr(dev, iparent, icells, mintr));
1112 
1113 	/* Maybe it's a real interrupt, not an intpin */
1114 	if (pin > 4)
1115 		return (pin);
1116 
1117 	device_printf(bus, "could not route pin %d for device %d.%d\n",
1118 	    pin, pci_get_slot(dev), pci_get_function(dev));
1119 	return (PCI_INVALID_IRQ);
1120 }
1121 
1122 static int
1123 mv_pcib_decode_win(phandle_t node, struct mv_pcib_softc *sc)
1124 {
1125 	struct mv_pci_range io_space, mem_space;
1126 	device_t dev;
1127 	int error;
1128 
1129 	dev = sc->sc_dev;
1130 
1131 	if ((error = mv_pci_ranges(node, &io_space, &mem_space)) != 0) {
1132 		device_printf(dev, "could not retrieve 'ranges' data\n");
1133 		return (error);
1134 	}
1135 
1136 	/* Configure CPU decoding windows */
1137 	error = decode_win_cpu_set(sc->sc_win_target,
1138 	    sc->sc_io_win_attr, io_space.base_parent, io_space.len, ~0);
1139 	if (error < 0) {
1140 		device_printf(dev, "could not set up CPU decode "
1141 		    "window for PCI IO\n");
1142 		return (ENXIO);
1143 	}
1144 	error = decode_win_cpu_set(sc->sc_win_target,
1145 	    sc->sc_mem_win_attr, mem_space.base_parent, mem_space.len,
1146 	    mem_space.base_parent);
1147 	if (error < 0) {
1148 		device_printf(dev, "could not set up CPU decode "
1149 		    "windows for PCI MEM\n");
1150 		return (ENXIO);
1151 	}
1152 
1153 	sc->sc_io_base = io_space.base_parent;
1154 	sc->sc_io_size = io_space.len;
1155 
1156 	sc->sc_mem_base = mem_space.base_parent;
1157 	sc->sc_mem_size = mem_space.len;
1158 
1159 	return (0);
1160 }
1161 
1162 #if defined(SOC_MV_ARMADAXP)
1163 static int
1164 mv_pcib_map_msi(device_t dev, device_t child, int irq, uint64_t *addr,
1165     uint32_t *data)
1166 {
1167 	struct mv_pcib_softc *sc;
1168 
1169 	sc = device_get_softc(dev);
1170 	irq = irq - MSI_IRQ;
1171 
1172 	/* validate parameters */
1173 	if (isclr(&sc->sc_msi_bitmap, irq)) {
1174 		device_printf(dev, "invalid MSI 0x%x\n", irq);
1175 		return (EINVAL);
1176 	}
1177 
1178 	mv_msi_data(irq, addr, data);
1179 
1180 	debugf("%s: irq: %d addr: %jx data: %x\n",
1181 	    __func__, irq, *addr, *data);
1182 
1183 	return (0);
1184 }
1185 
1186 static int
1187 mv_pcib_alloc_msi(device_t dev, device_t child, int count,
1188     int maxcount __unused, int *irqs)
1189 {
1190 	struct mv_pcib_softc *sc;
1191 	u_int start = 0, i;
1192 
1193 	if (powerof2(count) == 0 || count > MSI_IRQ_NUM)
1194 		return (EINVAL);
1195 
1196 	sc = device_get_softc(dev);
1197 	mtx_lock(&sc->sc_msi_mtx);
1198 
1199 	for (start = 0; (start + count) < MSI_IRQ_NUM; start++) {
1200 		for (i = start; i < start + count; i++) {
1201 			if (isset(&sc->sc_msi_bitmap, i))
1202 				break;
1203 		}
1204 		if (i == start + count)
1205 			break;
1206 	}
1207 
1208 	if ((start + count) == MSI_IRQ_NUM) {
1209 		mtx_unlock(&sc->sc_msi_mtx);
1210 		return (ENXIO);
1211 	}
1212 
1213 	for (i = start; i < start + count; i++) {
1214 		setbit(&sc->sc_msi_bitmap, i);
1215 		*irqs++ = MSI_IRQ + i;
1216 	}
1217 	debugf("%s: start: %x count: %x\n", __func__, start, count);
1218 
1219 	mtx_unlock(&sc->sc_msi_mtx);
1220 	return (0);
1221 }
1222 
1223 static int
1224 mv_pcib_release_msi(device_t dev, device_t child, int count, int *irqs)
1225 {
1226 	struct mv_pcib_softc *sc;
1227 	u_int i;
1228 
1229 	sc = device_get_softc(dev);
1230 	mtx_lock(&sc->sc_msi_mtx);
1231 
1232 	for (i = 0; i < count; i++)
1233 		clrbit(&sc->sc_msi_bitmap, irqs[i] - MSI_IRQ);
1234 
1235 	mtx_unlock(&sc->sc_msi_mtx);
1236 	return (0);
1237 }
1238 #endif
1239 
1240