xref: /openbsd/sys/dev/fdt/mvkpcie.c (revision d89ec533)
1 /*	$OpenBSD: mvkpcie.c,v 1.11 2021/10/04 19:04:12 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
4  * Copyright (c) 2020 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/device.h>
22 #include <sys/extent.h>
23 #include <sys/malloc.h>
24 #include <sys/evcount.h>
25 
26 #include <machine/intr.h>
27 #include <machine/bus.h>
28 #include <machine/fdt.h>
29 
30 #include <dev/pci/pcidevs.h>
31 #include <dev/pci/pcireg.h>
32 #include <dev/pci/pcivar.h>
33 #include <dev/pci/ppbreg.h>
34 
35 #include <dev/ofw/openfirm.h>
36 #include <dev/ofw/ofw_clock.h>
37 #include <dev/ofw/ofw_gpio.h>
38 #include <dev/ofw/ofw_misc.h>
39 #include <dev/ofw/ofw_pinctrl.h>
40 #include <dev/ofw/ofw_power.h>
41 #include <dev/ofw/fdt.h>
42 
43 /* Registers */
44 #define PCIE_DEV_ID			0x0000
45 #define PCIE_CMD			0x0004
46 #define PCIE_DEV_REV			0x0008
47 #define PCIE_DEV_CTRL_STATS		0x00c8
48 #define  PCIE_DEV_CTRL_STATS_SNOOP		(1 << 1)
49 #define  PCIE_DEV_CTRL_STATS_RELAX_ORDER	(1 << 4)
50 #define  PCIE_DEV_CTRL_STATS_MAX_PAYLOAD_7	(0x7 << 5)
51 #define  PCIE_DEV_CTRL_STATS_MAX_RD_REQ_SZ	(0x2 << 12)
52 #define PCIE_LINK_CTRL_STAT		0x00d0
53 #define  PCIE_LINK_CTRL_STAT_LINK_L0S_ENTRY	(1 << 0)
54 #define  PCIE_LINK_CTRL_STAT_LINK_TRAINING	(1 << 5)
55 #define  PCIE_LINK_CTRL_STAT_LINK_WIDTH_1	(1 << 20)
56 #define PCIE_ERR_CAPCTL			0x0118
57 #define  PCIE_ERR_CAPCTL_ECRC_CHK_TX		(1 << 5)
58 #define  PCIE_ERR_CAPCTL_ECRC_CHK_TX_EN		(1 << 6)
59 #define  PCIE_ERR_CAPCTL_ECRC_CHCK		(1 << 7)
60 #define  PCIE_ERR_CAPCTL_ECRC_CHCK_RCV		(1 << 8)
61 #define PIO_CTRL			0x4000
62 #define  PIO_CTRL_TYPE_MASK			(0xf << 0)
63 #define  PIO_CTRL_TYPE_RD0			(0x8 << 0)
64 #define  PIO_CTRL_TYPE_RD1			(0x9 << 0)
65 #define  PIO_CTRL_TYPE_WR0			(0xa << 0)
66 #define  PIO_CTRL_TYPE_WR1			(0xb << 0)
67 #define  PIO_CTRL_ADDR_WIN_DISABLE		(1 << 24)
68 #define PIO_STAT			0x4004
69 #define  PIO_STAT_COMP_STATUS			(0x7 << 7)
70 #define PIO_ADDR_LS			0x4008
71 #define PIO_ADDR_MS			0x400c
72 #define PIO_WR_DATA			0x4010
73 #define PIO_WR_DATA_STRB		0x4014
74 #define  PIO_WR_DATA_STRB_VALUE			0xf
75 #define PIO_RD_DATA			0x4018
76 #define PIO_START			0x401c
77 #define  PIO_START_STOP				(0 << 0)
78 #define  PIO_START_START			(1 << 0)
79 #define PIO_ISR				0x4020
80 #define  PIO_ISR_CLEAR				(1 << 0)
81 #define PIO_ISRM			0x4024
82 #define PCIE_CORE_CTRL0			0x4800
83 #define  PCIE_CORE_CTRL0_GEN_1			(0 << 0)
84 #define  PCIE_CORE_CTRL0_GEN_2			(1 << 0)
85 #define  PCIE_CORE_CTRL0_GEN_3			(2 << 0)
86 #define  PCIE_CORE_CTRL0_GEN_MASK		(0x3 << 0)
87 #define  PCIE_CORE_CTRL0_IS_RC			(1 << 2)
88 #define  PCIE_CORE_CTRL0_LANE_1			(0 << 3)
89 #define  PCIE_CORE_CTRL0_LANE_2			(1 << 3)
90 #define  PCIE_CORE_CTRL0_LANE_4			(2 << 3)
91 #define  PCIE_CORE_CTRL0_LANE_8			(3 << 3)
92 #define  PCIE_CORE_CTRL0_LANE_MASK		(0x3 << 3)
93 #define  PCIE_CORE_CTRL0_LINK_TRAINING		(1 << 6)
94 #define PCIE_CORE_CTRL2			0x4808
95 #define  PCIE_CORE_CTRL2_RESERVED		(0x7 << 0)
96 #define  PCIE_CORE_CTRL2_TD_ENABLE		(1 << 4)
97 #define  PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE	(1 << 5)
98 #define  PCIE_CORE_CTRL2_OB_WIN_ENABLE		(1 << 6)
99 #define  PCIE_CORE_CTRL2_MSI_ENABLE		(1 << 10)
100 #define PCIE_CORE_ISR0_STATUS		0x4840
101 #define PCIE_CORE_ISR0_MASK		0x4844
102 #define  PCIE_CORE_ISR0_MASK_MSI_INT		(1 << 24)
103 #define  PCIE_CORE_ISR0_MASK_ALL		0x07ffffff
104 #define PCIE_CORE_ISR1_STATUS		0x4848
105 #define PCIE_CORE_ISR1_MASK		0x484c
106 #define  PCIE_CORE_ISR1_MASK_ALL		0x00000ff0
107 #define  PCIE_CORE_ISR1_MASK_INTX(x)		(1 << (x + 8))
108 #define PCIE_CORE_MSI_ADDR_LOW		0x4850
109 #define PCIE_CORE_MSI_ADDR_HIGH		0x4854
110 #define PCIE_CORE_MSI_STATUS		0x4858
111 #define PCIE_CORE_MSI_MASK		0x485c
112 #define PCIE_CORE_MSI_PAYLOAD		0x489c
113 #define LMI_CFG				0x6000
114 #define  LMI_CFG_LTSSM_VAL(x)			(((x) >> 24) & 0x3f)
115 #define  LMI_CFG_LTSSM_L0			0x10
116 #define LMI_DEBUG_CTRL			0x6208
117 #define  LMI_DEBUG_CTRL_DIS_ORD_CHK		(1 << 30)
118 #define CTRL_CORE_CONFIG		0x18000
119 #define  CTRL_CORE_CONFIG_MODE_DIRECT		(0 << 0)
120 #define  CTRL_CORE_CONFIG_MODE_COMMAND		(1 << 0)
121 #define  CTRL_CORE_CONFIG_MODE_MASK		(1 << 0)
122 #define HOST_CTRL_INT_STATUS		0x1b000
123 #define HOST_CTRL_INT_MASK		0x1b004
124 #define  HOST_CTRL_INT_MASK_CORE_INT		(1 << 16)
125 #define  HOST_CTRL_INT_MASK_ALL			0xfff0fb
126 
127 #define HREAD4(sc, reg)							\
128 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
129 #define HWRITE4(sc, reg, val)						\
130 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
131 #define HSET4(sc, reg, bits)						\
132 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
133 #define HCLR4(sc, reg, bits)						\
134 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
135 
136 struct mvkpcie_dmamem {
137 	bus_dmamap_t		mdm_map;
138 	bus_dma_segment_t	mdm_seg;
139 	size_t			mdm_size;
140 	caddr_t			mdm_kva;
141 };
142 
143 #define MVKPCIE_DMA_MAP(_mdm)	((_mdm)->mdm_map)
144 #define MVKPCIE_DMA_LEN(_mdm)	((_mdm)->mdm_size)
145 #define MVKPCIE_DMA_DVA(_mdm)	((uint64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
146 #define MVKPCIE_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
147 
148 struct intrhand {
149 	int (*ih_func)(void *);		/* handler */
150 	void *ih_arg;			/* arg for handler */
151 	int ih_ipl;			/* IPL_* */
152 	int ih_irq;			/* IRQ number */
153 	struct evcount	ih_count;
154 	char *ih_name;
155 	void *ih_sc;
156 };
157 
158 struct mvkpcie_range {
159 	uint32_t		flags;
160 	uint64_t		pci_base;
161 	uint64_t		phys_base;
162 	uint64_t		size;
163 };
164 
165 struct mvkpcie_softc {
166 	struct device		sc_dev;
167 	bus_space_tag_t		sc_iot;
168 	bus_space_handle_t	sc_ioh;
169 	bus_dma_tag_t		sc_dmat;
170 
171 	bus_addr_t		sc_io_base;
172 	bus_addr_t		sc_io_bus_addr;
173 	bus_size_t		sc_io_size;
174 	bus_addr_t		sc_mem_base;
175 	bus_addr_t		sc_mem_bus_addr;
176 	bus_size_t		sc_mem_size;
177 
178 	int			sc_node;
179 	int			sc_acells;
180 	int			sc_scells;
181 	int			sc_pacells;
182 	int			sc_pscells;
183 	struct mvkpcie_range	*sc_ranges;
184 	int			sc_nranges;
185 
186 	struct bus_space	sc_bus_iot;
187 	struct bus_space	sc_bus_memt;
188 
189 	struct machine_pci_chipset sc_pc;
190 	int			sc_bus;
191 
192 	uint32_t		sc_bridge_command;
193 	uint32_t		sc_bridge_businfo;
194 	uint32_t		sc_bridge_iostatus;
195 	uint32_t		sc_bridge_io_hi;
196 	uint32_t		sc_bridge_mem;
197 
198 	struct interrupt_controller sc_ic;
199 	struct intrhand		*sc_intx_handlers[4];
200 	struct interrupt_controller sc_msi_ic;
201 	struct intrhand		*sc_msi_handlers[32];
202 	struct mvkpcie_dmamem	*sc_msi_addr;
203 	void			*sc_ih;
204 	int			sc_ipl;
205 };
206 
207 int mvkpcie_match(struct device *, void *, void *);
208 void mvkpcie_attach(struct device *, struct device *, void *);
209 
210 struct cfattach mvkpcie_ca = {
211 	sizeof (struct mvkpcie_softc), mvkpcie_match, mvkpcie_attach
212 };
213 
214 struct cfdriver mvkpcie_cd = {
215 	NULL, "mvkpcie", DV_DULL
216 };
217 
218 int
219 mvkpcie_match(struct device *parent, void *match, void *aux)
220 {
221 	struct fdt_attach_args *faa = aux;
222 
223 	return OF_is_compatible(faa->fa_node, "marvell,armada-3700-pcie");
224 }
225 
226 int	mvkpcie_link_up(struct mvkpcie_softc *);
227 
228 void	mvkpcie_attach_hook(struct device *, struct device *,
229 	    struct pcibus_attach_args *);
230 int	mvkpcie_bus_maxdevs(void *, int);
231 pcitag_t mvkpcie_make_tag(void *, int, int, int);
232 void	mvkpcie_decompose_tag(void *, pcitag_t, int *, int *, int *);
233 int	mvkpcie_conf_size(void *, pcitag_t);
234 pcireg_t mvkpcie_conf_read(void *, pcitag_t, int);
235 void	mvkpcie_conf_write(void *, pcitag_t, int, pcireg_t);
236 int	mvkpcie_probe_device_hook(void *, struct pci_attach_args *);
237 
238 int	mvkpcie_intr_map(struct pci_attach_args *, pci_intr_handle_t *);
239 const char *mvkpcie_intr_string(void *, pci_intr_handle_t);
240 void	*mvkpcie_intr_establish(void *, pci_intr_handle_t, int,
241 	    struct cpu_info *, int (*)(void *), void *, char *);
242 void	mvkpcie_intr_disestablish(void *, void *);
243 
244 int	mvkpcie_bs_iomap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
245 	    bus_space_handle_t *);
246 int	mvkpcie_bs_memmap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
247 	    bus_space_handle_t *);
248 
249 int	mvkpcie_intc_intr(void *);
250 void	*mvkpcie_intc_intr_establish(void *, int *, int, struct cpu_info *,
251 	    int (*)(void *), void *, char *);
252 void	mvkpcie_intc_intr_disestablish(void *);
253 void	*mvkpcie_intc_intr_establish_msi(void *, uint64_t *, uint64_t *,
254 	    int , struct cpu_info *, int (*)(void *), void *, char *);
255 void	mvkpcie_intc_intr_disestablish_msi(void *);
256 void	mvkpcie_intc_intr_barrier(void *);
257 void	mvkpcie_intc_recalc_ipl(struct mvkpcie_softc *);
258 
259 struct mvkpcie_dmamem *mvkpcie_dmamem_alloc(struct mvkpcie_softc *, bus_size_t,
260 	    bus_size_t);
261 void	mvkpcie_dmamem_free(struct mvkpcie_softc *, struct mvkpcie_dmamem *);
262 
263 void
264 mvkpcie_attach(struct device *parent, struct device *self, void *aux)
265 {
266 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)self;
267 	struct fdt_attach_args *faa = aux;
268 	struct pcibus_attach_args pba;
269 	uint32_t *reset_gpio;
270 	ssize_t reset_gpiolen;
271 	bus_addr_t iobase, iolimit;
272 	bus_addr_t membase, memlimit;
273 	uint32_t bus_range[2];
274 	uint32_t *ranges;
275 	int i, j, nranges, rangeslen;
276 	pcireg_t csr, bir, blr;
277 	uint32_t reg;
278 	int node;
279 	int timo;
280 
281 	if (faa->fa_nreg < 1) {
282 		printf(": no registers\n");
283 		return;
284 	}
285 
286 	sc->sc_iot = faa->fa_iot;
287 	sc->sc_dmat = faa->fa_dmat;
288 	sc->sc_node = faa->fa_node;
289 
290 	sc->sc_acells = OF_getpropint(sc->sc_node, "#address-cells",
291 	    faa->fa_acells);
292 	sc->sc_scells = OF_getpropint(sc->sc_node, "#size-cells",
293 	    faa->fa_scells);
294 	sc->sc_pacells = faa->fa_acells;
295 	sc->sc_pscells = faa->fa_scells;
296 
297 	rangeslen = OF_getproplen(sc->sc_node, "ranges");
298 	if (rangeslen <= 0 || (rangeslen % sizeof(uint32_t)) ||
299 	     (rangeslen / sizeof(uint32_t)) % (sc->sc_acells +
300 	     sc->sc_pacells + sc->sc_scells)) {
301 		printf(": invalid ranges property\n");
302 		return;
303 	}
304 
305 	sc->sc_msi_addr = mvkpcie_dmamem_alloc(sc, sizeof(uint16_t),
306 	    sizeof(uint64_t));
307 	if (sc->sc_msi_addr == NULL) {
308 		printf(": cannot allocate MSI address\n");
309 		return;
310 	}
311 
312 	ranges = malloc(rangeslen, M_TEMP, M_WAITOK);
313 	OF_getpropintarray(sc->sc_node, "ranges", ranges,
314 	    rangeslen);
315 
316 	nranges = (rangeslen / sizeof(uint32_t)) /
317 	    (sc->sc_acells + sc->sc_pacells + sc->sc_scells);
318 	sc->sc_ranges = mallocarray(nranges,
319 	    sizeof(struct mvkpcie_range), M_TEMP, M_WAITOK);
320 	sc->sc_nranges = nranges;
321 
322 	for (i = 0, j = 0; i < sc->sc_nranges; i++) {
323 		sc->sc_ranges[i].flags = ranges[j++];
324 		sc->sc_ranges[i].pci_base = ranges[j++];
325 		if (sc->sc_acells - 1 == 2) {
326 			sc->sc_ranges[i].pci_base <<= 32;
327 			sc->sc_ranges[i].pci_base |= ranges[j++];
328 		}
329 		sc->sc_ranges[i].phys_base = ranges[j++];
330 		if (sc->sc_pacells == 2) {
331 			sc->sc_ranges[i].phys_base <<= 32;
332 			sc->sc_ranges[i].phys_base |= ranges[j++];
333 		}
334 		sc->sc_ranges[i].size = ranges[j++];
335 		if (sc->sc_scells == 2) {
336 			sc->sc_ranges[i].size <<= 32;
337 			sc->sc_ranges[i].size |= ranges[j++];
338 		}
339 	}
340 
341 	free(ranges, M_TEMP, rangeslen);
342 
343 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
344 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
345 		free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
346 		    sizeof(struct mvkpcie_range));
347 		printf(": can't map ctrl registers\n");
348 		return;
349 	}
350 
351 	printf("\n");
352 
353 	pinctrl_byname(sc->sc_node, "default");
354 
355 	clock_set_assigned(sc->sc_node);
356 	clock_enable_all(sc->sc_node);
357 
358 	reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpios");
359 	if (reset_gpiolen > 0) {
360 		/* Link training needs to be disabled during PCIe reset. */
361 		HCLR4(sc, PCIE_CORE_CTRL0, PCIE_CORE_CTRL0_LINK_TRAINING);
362 
363 		reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK);
364 		OF_getpropintarray(sc->sc_node, "reset-gpios", reset_gpio,
365 		    reset_gpiolen);
366 
367 		/* Issue PCIe reset. */
368 		gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT);
369 		gpio_controller_set_pin(reset_gpio, 1);
370 		delay(10000);
371 		gpio_controller_set_pin(reset_gpio, 0);
372 
373 		free(reset_gpio, M_TEMP, reset_gpiolen);
374 	}
375 
376 	reg = HREAD4(sc, CTRL_CORE_CONFIG);
377 	reg &= ~CTRL_CORE_CONFIG_MODE_MASK;
378 	reg |= CTRL_CORE_CONFIG_MODE_DIRECT;
379 	HWRITE4(sc, CTRL_CORE_CONFIG, reg);
380 
381 	HSET4(sc, PCIE_CORE_CTRL0, PCIE_CORE_CTRL0_IS_RC);
382 
383 	HWRITE4(sc, PCIE_ERR_CAPCTL,
384 	    PCIE_ERR_CAPCTL_ECRC_CHK_TX |
385 	    PCIE_ERR_CAPCTL_ECRC_CHK_TX_EN |
386 	    PCIE_ERR_CAPCTL_ECRC_CHCK |
387 	    PCIE_ERR_CAPCTL_ECRC_CHCK_RCV);
388 
389 	HWRITE4(sc, PCIE_DEV_CTRL_STATS,
390 	    PCIE_DEV_CTRL_STATS_MAX_PAYLOAD_7 |
391 	    PCIE_DEV_CTRL_STATS_MAX_RD_REQ_SZ);
392 
393 	HWRITE4(sc, PCIE_CORE_CTRL2,
394 	    PCIE_CORE_CTRL2_RESERVED |
395 	    PCIE_CORE_CTRL2_TD_ENABLE);
396 
397 	reg = HREAD4(sc, LMI_DEBUG_CTRL);
398 	reg |= LMI_DEBUG_CTRL_DIS_ORD_CHK;
399 	HWRITE4(sc, LMI_DEBUG_CTRL, reg);
400 
401 	reg = HREAD4(sc, PCIE_CORE_CTRL0);
402 	reg &= ~PCIE_CORE_CTRL0_GEN_MASK;
403 	reg |= PCIE_CORE_CTRL0_GEN_2;
404 	HWRITE4(sc, PCIE_CORE_CTRL0, reg);
405 
406 	reg = HREAD4(sc, PCIE_CORE_CTRL0);
407 	reg &= ~PCIE_CORE_CTRL0_LANE_MASK;
408 	reg |= PCIE_CORE_CTRL0_LANE_1;
409 	HWRITE4(sc, PCIE_CORE_CTRL0, reg);
410 
411 	HSET4(sc, PCIE_CORE_CTRL2, PCIE_CORE_CTRL2_MSI_ENABLE);
412 
413 	HWRITE4(sc, PCIE_CORE_ISR0_STATUS, PCIE_CORE_ISR0_MASK_ALL);
414 	HWRITE4(sc, PCIE_CORE_ISR1_STATUS, PCIE_CORE_ISR1_MASK_ALL);
415 	HWRITE4(sc, HOST_CTRL_INT_STATUS, HOST_CTRL_INT_MASK_ALL);
416 
417 	HWRITE4(sc, PCIE_CORE_ISR0_MASK, PCIE_CORE_ISR0_MASK_ALL &
418 	    ~PCIE_CORE_ISR0_MASK_MSI_INT);
419 	HWRITE4(sc, PCIE_CORE_ISR1_MASK, PCIE_CORE_ISR1_MASK_ALL);
420 	HWRITE4(sc, PCIE_CORE_MSI_MASK, 0);
421 	HWRITE4(sc, HOST_CTRL_INT_MASK, HOST_CTRL_INT_MASK_ALL &
422 	    ~HOST_CTRL_INT_MASK_CORE_INT);
423 
424 	HSET4(sc, PCIE_CORE_CTRL2, PCIE_CORE_CTRL2_OB_WIN_ENABLE);
425 	HSET4(sc, PIO_CTRL, PIO_CTRL_ADDR_WIN_DISABLE);
426 
427 	delay(100 * 1000);
428 
429 	HSET4(sc, PCIE_CORE_CTRL0, PCIE_CORE_CTRL0_LINK_TRAINING);
430 	HSET4(sc, PCIE_LINK_CTRL_STAT, PCIE_LINK_CTRL_STAT_LINK_TRAINING);
431 
432 	for (timo = 40; timo > 0; timo--) {
433 		if (mvkpcie_link_up(sc))
434 			break;
435 		delay(1000);
436 	}
437 	if (timo == 0) {
438 		printf("%s: timeout\n", sc->sc_dev.dv_xname);
439 		return;
440 	}
441 
442 	HWRITE4(sc, PCIE_LINK_CTRL_STAT,
443 	    PCIE_LINK_CTRL_STAT_LINK_L0S_ENTRY |
444 	    PCIE_LINK_CTRL_STAT_LINK_WIDTH_1);
445 
446 	HSET4(sc, PCIE_CMD, PCI_COMMAND_IO_ENABLE |
447 	    PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE);
448 
449 	HWRITE4(sc, PCIE_CORE_MSI_ADDR_LOW,
450 	    MVKPCIE_DMA_DVA(sc->sc_msi_addr) & 0xffffffff);
451 	HWRITE4(sc, PCIE_CORE_MSI_ADDR_HIGH,
452 	    MVKPCIE_DMA_DVA(sc->sc_msi_addr) >> 32);
453 
454 	/* Set up address translation for I/O space. */
455 	sc->sc_io_bus_addr = sc->sc_mem_bus_addr = -1;
456 	for (i = 0; i < sc->sc_nranges; i++) {
457 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
458 		    sc->sc_ranges[i].size > 0) {
459 			sc->sc_io_base = sc->sc_ranges[i].phys_base;
460 			sc->sc_io_bus_addr = sc->sc_ranges[i].pci_base;
461 			sc->sc_io_size = sc->sc_ranges[i].size;
462 		}
463 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000 &&
464 		    sc->sc_ranges[i].size > 0) {
465 			sc->sc_mem_base = sc->sc_ranges[i].phys_base;
466 			sc->sc_mem_bus_addr = sc->sc_ranges[i].pci_base;
467 			sc->sc_mem_size = sc->sc_ranges[i].size;
468 		}
469 	}
470 
471 	/* Set up bus range. */
472 	if (OF_getpropintarray(sc->sc_node, "bus-range", bus_range,
473 	    sizeof(bus_range)) != sizeof(bus_range) ||
474 	    bus_range[0] >= 256 || bus_range[1] >= 256) {
475 		bus_range[0] = 0;
476 		bus_range[1] = 255;
477 	}
478 	sc->sc_bus = bus_range[0];
479 
480 	/* Initialize command/status. */
481 	csr = PCI_COMMAND_MASTER_ENABLE;
482 	if (sc->sc_io_size > 0)
483 		csr |= PCI_COMMAND_IO_ENABLE;
484 	if (sc->sc_mem_size > 0)
485 		csr |= PCI_COMMAND_MEM_ENABLE;
486 	sc->sc_bridge_command = csr;
487 
488 	/* Initialize bus range. */
489 	bir = bus_range[0];
490 	bir |= ((bus_range[0] + 1) << 8);
491 	bir |= (bus_range[1] << 16);
492 	sc->sc_bridge_businfo = bir;
493 
494 	/* Initialize I/O window. */
495 	iobase = sc->sc_io_bus_addr;
496 	iolimit = iobase + sc->sc_io_size - 1;
497 	blr = (iolimit & PPB_IO_MASK) | (PPB_IO_32BIT << PPB_IOLIMIT_SHIFT);
498 	blr |= ((iobase & PPB_IO_MASK) >> PPB_IO_SHIFT) | PPB_IO_32BIT;
499 	sc->sc_bridge_iostatus = blr;
500 	blr = (iobase & 0xffff0000) >> 16;
501 	blr |= iolimit & 0xffff0000;
502 	sc->sc_bridge_io_hi = blr;
503 
504 	/* Initialize memory mapped I/O window. */
505 	membase = sc->sc_mem_bus_addr;
506 	memlimit = membase + sc->sc_mem_size - 1;
507 	blr = memlimit & PPB_MEM_MASK;
508 	blr |= (membase >> PPB_MEM_SHIFT);
509 	sc->sc_bridge_mem = blr;
510 
511 	memcpy(&sc->sc_bus_iot, sc->sc_iot, sizeof(sc->sc_bus_iot));
512 	sc->sc_bus_iot.bus_private = sc;
513 	sc->sc_bus_iot._space_map = mvkpcie_bs_iomap;
514 	memcpy(&sc->sc_bus_memt, sc->sc_iot, sizeof(sc->sc_bus_memt));
515 	sc->sc_bus_memt.bus_private = sc;
516 	sc->sc_bus_memt._space_map = mvkpcie_bs_memmap;
517 
518 	sc->sc_pc.pc_conf_v = sc;
519 	sc->sc_pc.pc_attach_hook = mvkpcie_attach_hook;
520 	sc->sc_pc.pc_bus_maxdevs = mvkpcie_bus_maxdevs;
521 	sc->sc_pc.pc_make_tag = mvkpcie_make_tag;
522 	sc->sc_pc.pc_decompose_tag = mvkpcie_decompose_tag;
523 	sc->sc_pc.pc_conf_size = mvkpcie_conf_size;
524 	sc->sc_pc.pc_conf_read = mvkpcie_conf_read;
525 	sc->sc_pc.pc_conf_write = mvkpcie_conf_write;
526 	sc->sc_pc.pc_probe_device_hook = mvkpcie_probe_device_hook;
527 
528 	sc->sc_pc.pc_intr_v = sc;
529 	sc->sc_pc.pc_intr_map = mvkpcie_intr_map;
530 	sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
531 	sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
532 	sc->sc_pc.pc_intr_string = mvkpcie_intr_string;
533 	sc->sc_pc.pc_intr_establish = mvkpcie_intr_establish;
534 	sc->sc_pc.pc_intr_disestablish = mvkpcie_intr_disestablish;
535 
536 	memset(&pba, 0, sizeof(pba));
537 	pba.pba_busname = "pci";
538 	pba.pba_iot = &sc->sc_bus_iot;
539 	pba.pba_memt = &sc->sc_bus_memt;
540 	pba.pba_dmat = faa->fa_dmat;
541 	pba.pba_pc = &sc->sc_pc;
542 	pba.pba_domain = pci_ndomains++;
543 	pba.pba_bus = sc->sc_bus;
544 	pba.pba_flags |= PCI_FLAGS_MSI_ENABLED;
545 
546 	node = OF_getnodebyname(faa->fa_node, "interrupt-controller");
547 	if (node) {
548 		sc->sc_ic.ic_node = node;
549 		sc->sc_ic.ic_cookie = self;
550 		sc->sc_ic.ic_establish = mvkpcie_intc_intr_establish;
551 		sc->sc_ic.ic_disestablish = mvkpcie_intc_intr_disestablish;
552 		arm_intr_register_fdt(&sc->sc_ic);
553 	}
554 
555 	sc->sc_msi_ic.ic_node = faa->fa_node;
556 	sc->sc_msi_ic.ic_cookie = self;
557 	sc->sc_msi_ic.ic_establish_msi = mvkpcie_intc_intr_establish_msi;
558 	sc->sc_msi_ic.ic_disestablish = mvkpcie_intc_intr_disestablish_msi;
559 	sc->sc_msi_ic.ic_barrier = mvkpcie_intc_intr_barrier;
560 	arm_intr_register_fdt(&sc->sc_msi_ic);
561 
562 	config_found(self, &pba, NULL);
563 }
564 
565 int
566 mvkpcie_link_up(struct mvkpcie_softc *sc)
567 {
568 	uint32_t reg;
569 
570 	reg = HREAD4(sc, LMI_CFG);
571 	return LMI_CFG_LTSSM_VAL(reg) >= LMI_CFG_LTSSM_L0;
572 }
573 
574 void
575 mvkpcie_attach_hook(struct device *parent, struct device *self,
576     struct pcibus_attach_args *pba)
577 {
578 }
579 
580 int
581 mvkpcie_bus_maxdevs(void *v, int bus)
582 {
583 	struct mvkpcie_softc *sc = v;
584 
585 	if (bus == sc->sc_bus || bus == sc->sc_bus + 1)
586 		return 1;
587 	return 32;
588 }
589 
590 pcitag_t
591 mvkpcie_make_tag(void *v, int bus, int device, int function)
592 {
593 	return ((bus << 20) | (device << 15) | (function << 12));
594 }
595 
596 void
597 mvkpcie_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp)
598 {
599 	if (bp != NULL)
600 		*bp = (tag >> 20) & 0xff;
601 	if (dp != NULL)
602 		*dp = (tag >> 15) & 0x1f;
603 	if (fp != NULL)
604 		*fp = (tag >> 12) & 0x7;
605 }
606 
607 int
608 mvkpcie_conf_size(void *v, pcitag_t tag)
609 {
610 	return PCIE_CONFIG_SPACE_SIZE;
611 }
612 
613 pcireg_t
614 mvkpcie_conf_read_bridge(struct mvkpcie_softc *sc, int reg)
615 {
616 	switch (reg) {
617 	case PCI_ID_REG:
618 		return PCI_VENDOR_MARVELL |
619 		    (HREAD4(sc, PCIE_DEV_ID) & 0xffff0000);
620 	case PCI_COMMAND_STATUS_REG:
621 		return sc->sc_bridge_command;
622 	case PCI_CLASS_REG:
623 		return PCI_CLASS_BRIDGE << PCI_CLASS_SHIFT |
624 		    PCI_SUBCLASS_BRIDGE_PCI << PCI_SUBCLASS_SHIFT |
625 		    (HREAD4(sc, PCIE_DEV_REV) & 0xff);
626 	case PCI_BHLC_REG:
627 		return 1 << PCI_HDRTYPE_SHIFT |
628 		    0x10 << PCI_CACHELINE_SHIFT;
629 	case PPB_REG_BUSINFO:
630 		return sc->sc_bridge_businfo;
631 	case PPB_REG_IOSTATUS:
632 		return sc->sc_bridge_iostatus;
633 	case PPB_REG_MEM:
634 		return sc->sc_bridge_mem;
635 	case PPB_REG_IO_HI:
636 		return sc->sc_bridge_io_hi;
637 	case PPB_REG_PREFMEM:
638 	case PPB_REG_PREFBASE_HI32:
639 	case PPB_REG_PREFLIM_HI32:
640 	case PPB_REG_BRIDGECONTROL:
641 		return 0;
642 	default:
643 		break;
644 	}
645 	return 0;
646 }
647 
648 void
649 mvkpcie_conf_write_bridge(struct mvkpcie_softc *sc, int reg, pcireg_t data)
650 {
651 	/* Treat emulated bridge registers as read-only. */
652 }
653 
654 pcireg_t
655 mvkpcie_conf_read(void *v, pcitag_t tag, int off)
656 {
657 	struct mvkpcie_softc *sc = v;
658 	int bus, dev, fn;
659 	uint32_t reg;
660 	int i;
661 
662 	mvkpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
663 	if (bus == sc->sc_bus) {
664 		KASSERT(dev == 0);
665 		return mvkpcie_conf_read_bridge(sc, off);
666 	}
667 
668 	HWRITE4(sc, PIO_START, PIO_START_STOP);
669 	HWRITE4(sc, PIO_ISR, PIO_ISR_CLEAR);
670 	reg = HREAD4(sc, PIO_CTRL);
671 	reg &= ~PIO_CTRL_TYPE_MASK;
672 	if (bus == sc->sc_bus + 1)
673 		reg |= PIO_CTRL_TYPE_RD0;
674 	else
675 		reg |= PIO_CTRL_TYPE_RD1;
676 	HWRITE4(sc, PIO_CTRL, reg);
677 	HWRITE4(sc, PIO_ADDR_LS, tag | off);
678 	HWRITE4(sc, PIO_ADDR_MS, 0);
679 	HWRITE4(sc, PIO_WR_DATA_STRB, PIO_WR_DATA_STRB_VALUE);
680 	HWRITE4(sc, PIO_START, PIO_START_START);
681 
682 	for (i = 500; i > 0; i--) {
683 		if (HREAD4(sc, PIO_START) == 0 &&
684 		    HREAD4(sc, PIO_ISR) != 0)
685 			break;
686 		delay(2);
687 	}
688 	if (i == 0) {
689 		printf("%s: timeout\n", sc->sc_dev.dv_xname);
690 		return 0xffffffff;
691 	}
692 
693 	return HREAD4(sc, PIO_RD_DATA);
694 }
695 
696 void
697 mvkpcie_conf_write(void *v, pcitag_t tag, int off, pcireg_t data)
698 {
699 	struct mvkpcie_softc *sc = v;
700 	int bus, dev, fn;
701 	uint32_t reg;
702 	int i;
703 
704 	mvkpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
705 	if (bus == sc->sc_bus) {
706 		KASSERT(dev == 0);
707 		mvkpcie_conf_write_bridge(sc, off, data);
708 		return;
709 	}
710 
711 	HWRITE4(sc, PIO_START, PIO_START_STOP);
712 	HWRITE4(sc, PIO_ISR, PIO_ISR_CLEAR);
713 	reg = HREAD4(sc, PIO_CTRL);
714 	reg &= ~PIO_CTRL_TYPE_MASK;
715 	if (bus == sc->sc_bus + 1)
716 		reg |= PIO_CTRL_TYPE_WR0;
717 	else
718 		reg |= PIO_CTRL_TYPE_WR1;
719 	HWRITE4(sc, PIO_CTRL, reg);
720 	HWRITE4(sc, PIO_ADDR_LS, tag | off);
721 	HWRITE4(sc, PIO_ADDR_MS, 0);
722 	HWRITE4(sc, PIO_WR_DATA, data);
723 	HWRITE4(sc, PIO_WR_DATA_STRB, PIO_WR_DATA_STRB_VALUE);
724 	HWRITE4(sc, PIO_START, PIO_START_START);
725 
726 	for (i = 500; i > 0; i--) {
727 		if (HREAD4(sc, PIO_START) == 0 &&
728 		    HREAD4(sc, PIO_ISR) != 0)
729 			break;
730 		delay(2);
731 	}
732 	if (i == 0) {
733 		printf("%s: timeout\n", sc->sc_dev.dv_xname);
734 		return;
735 	}
736 }
737 
738 int
739 mvkpcie_probe_device_hook(void *v, struct pci_attach_args *pa)
740 {
741 	return 0;
742 }
743 
744 int
745 mvkpcie_intr_map(struct pci_attach_args *pa, pci_intr_handle_t *ihp)
746 {
747 	int pin = pa->pa_rawintrpin;
748 
749 	if (pin == 0 || pin > PCI_INTERRUPT_PIN_MAX)
750 		return -1;
751 
752 	if (pa->pa_tag == 0)
753 		return -1;
754 
755 	ihp->ih_pc = pa->pa_pc;
756 	ihp->ih_tag = pa->pa_intrtag;
757 	ihp->ih_intrpin = pa->pa_intrpin;
758 	ihp->ih_type = PCI_INTX;
759 
760 	return 0;
761 }
762 
763 const char *
764 mvkpcie_intr_string(void *v, pci_intr_handle_t ih)
765 {
766 	switch (ih.ih_type) {
767 	case PCI_MSI:
768 		return "msi";
769 	case PCI_MSIX:
770 		return "msix";
771 	}
772 
773 	return "intx";
774 }
775 
776 void *
777 mvkpcie_intr_establish(void *v, pci_intr_handle_t ih, int level,
778     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
779 {
780 	struct mvkpcie_softc *sc = v;
781 	void *cookie;
782 
783 	KASSERT(ih.ih_type != PCI_NONE);
784 
785 	if (ih.ih_type != PCI_INTX) {
786 		uint64_t addr, data;
787 
788 		/* Assume hardware passes Requester ID as sideband data. */
789 		data = pci_requester_id(ih.ih_pc, ih.ih_tag);
790 		cookie = fdt_intr_establish_msi_cpu(sc->sc_node, &addr,
791 		    &data, level, ci, func, arg, (void *)name);
792 		if (cookie == NULL)
793 			return NULL;
794 
795 		/* TODO: translate address to the PCI device's view */
796 
797 		if (ih.ih_type == PCI_MSIX) {
798 			pci_msix_enable(ih.ih_pc, ih.ih_tag,
799 			    &sc->sc_bus_memt, ih.ih_intrpin, addr, data);
800 		} else
801 			pci_msi_enable(ih.ih_pc, ih.ih_tag, addr, data);
802 	} else {
803 		int bus, dev, fn;
804 		uint32_t reg[4];
805 
806 		mvkpcie_decompose_tag(sc, ih.ih_tag, &bus, &dev, &fn);
807 
808 		reg[0] = bus << 16 | dev << 11 | fn << 8;
809 		reg[1] = reg[2] = 0;
810 		reg[3] = ih.ih_intrpin;
811 
812 		cookie = fdt_intr_establish_imap_cpu(sc->sc_node, reg,
813 		    sizeof(reg), level, ci, func, arg, name);
814 	}
815 
816 	return cookie;
817 }
818 
819 void
820 mvkpcie_intr_disestablish(void *v, void *cookie)
821 {
822 	panic("%s", __func__);
823 }
824 
825 int
826 mvkpcie_bs_iomap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
827     int flags, bus_space_handle_t *bshp)
828 {
829 	struct mvkpcie_softc *sc = t->bus_private;
830 	int i;
831 
832 	for (i = 0; i < sc->sc_nranges; i++) {
833 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
834 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
835 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
836 
837 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
838 		    addr >= pci_start && addr + size <= pci_end) {
839 			return bus_space_map(sc->sc_iot,
840 			    addr - pci_start + phys_start, size, flags, bshp);
841 		}
842 	}
843 
844 	return ENXIO;
845 }
846 
847 int
848 mvkpcie_bs_memmap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
849     int flags, bus_space_handle_t *bshp)
850 {
851 	struct mvkpcie_softc *sc = t->bus_private;
852 	int i;
853 
854 	for (i = 0; i < sc->sc_nranges; i++) {
855 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
856 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
857 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
858 
859 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000 &&
860 		    addr >= pci_start && addr + size <= pci_end) {
861 			return bus_space_map(sc->sc_iot,
862 			    addr - pci_start + phys_start, size, flags, bshp);
863 		}
864 	}
865 
866 	return ENXIO;
867 }
868 
869 int
870 mvkpcie_intc_intr(void *cookie)
871 {
872 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)cookie;
873 	struct intrhand *ih;
874 	uint32_t pending;
875 	int i, s;
876 
877 	if (!(HREAD4(sc, HOST_CTRL_INT_STATUS) & HOST_CTRL_INT_MASK_CORE_INT))
878 		return 0;
879 
880 	if (HREAD4(sc, PCIE_CORE_ISR0_STATUS) & PCIE_CORE_ISR0_MASK_MSI_INT) {
881 		pending = HREAD4(sc, PCIE_CORE_MSI_STATUS);
882 		while (pending) {
883 			i = ffs(pending) - 1;
884 			HWRITE4(sc, PCIE_CORE_MSI_STATUS, (1 << i));
885 			pending &= ~(1 << i);
886 
887 			i = HREAD4(sc, PCIE_CORE_MSI_PAYLOAD) & 0xff;
888 			if ((ih = sc->sc_msi_handlers[i]) != NULL) {
889 				s = splraise(ih->ih_ipl);
890 				if (ih->ih_func(ih->ih_arg))
891 					ih->ih_count.ec_count++;
892 				splx(s);
893 			}
894 		}
895 		HWRITE4(sc, PCIE_CORE_ISR0_STATUS, PCIE_CORE_ISR0_MASK_MSI_INT);
896 	}
897 
898 	pending = HREAD4(sc, PCIE_CORE_ISR1_STATUS);
899 	for (i = 0; i < nitems(sc->sc_intx_handlers); i++) {
900 		if (pending & PCIE_CORE_ISR1_MASK_INTX(i)) {
901 			if ((ih = sc->sc_intx_handlers[i]) != NULL) {
902 				s = splraise(ih->ih_ipl);
903 				if (ih->ih_func(ih->ih_arg))
904 					ih->ih_count.ec_count++;
905 				splx(s);
906 			}
907 		}
908 	}
909 	HWRITE4(sc, PCIE_CORE_ISR1_STATUS, pending);
910 
911 	HWRITE4(sc, HOST_CTRL_INT_STATUS, HOST_CTRL_INT_MASK_CORE_INT);
912 	return 1;
913 }
914 
915 void *
916 mvkpcie_intc_intr_establish(void *cookie, int *cell, int level,
917     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
918 {
919 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)cookie;
920 	struct intrhand *ih;
921 	int irq = cell[0];
922 	int s;
923 
924 	if (ci != NULL && !CPU_IS_PRIMARY(ci))
925 		return NULL;
926 
927 	if (irq < 0 || irq > nitems(sc->sc_intx_handlers))
928 		return NULL;
929 
930 	/* Don't allow shared interrupts for now. */
931 	if (sc->sc_intx_handlers[irq])
932 		return NULL;
933 
934 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
935 	ih->ih_func = func;
936 	ih->ih_arg = arg;
937 	ih->ih_ipl = level & IPL_IRQMASK;
938 	ih->ih_irq = irq;
939 	ih->ih_name = name;
940 	ih->ih_sc = sc;
941 
942 	s = splhigh();
943 
944 	sc->sc_intx_handlers[irq] = ih;
945 
946 	if (name != NULL)
947 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
948 
949 	mvkpcie_intc_recalc_ipl(sc);
950 
951 	splx(s);
952 
953 	HCLR4(sc, PCIE_CORE_ISR1_MASK, PCIE_CORE_ISR1_MASK_INTX(irq));
954 
955 	return (ih);
956 }
957 
958 void
959 mvkpcie_intc_intr_disestablish(void *cookie)
960 {
961 	struct intrhand *ih = cookie;
962 	struct mvkpcie_softc *sc = ih->ih_sc;
963 	int s;
964 
965 	HSET4(sc, PCIE_CORE_ISR1_MASK, PCIE_CORE_ISR1_MASK_INTX(ih->ih_irq));
966 
967 	s = splhigh();
968 
969 	sc->sc_intx_handlers[ih->ih_irq] = NULL;
970 	if (ih->ih_name != NULL)
971 		evcount_detach(&ih->ih_count);
972 	free(ih, M_DEVBUF, sizeof(*ih));
973 
974 	mvkpcie_intc_recalc_ipl(sc);
975 
976 	splx(s);
977 }
978 
979 void *
980 mvkpcie_intc_intr_establish_msi(void *cookie, uint64_t *addr, uint64_t *data,
981     int level, struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
982 {
983 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)cookie;
984 	struct intrhand *ih;
985 	int i, s;
986 
987 	if (ci != NULL && !CPU_IS_PRIMARY(ci))
988 		return NULL;
989 
990 	for (i = 0; i < nitems(sc->sc_msi_handlers); i++) {
991 		if (sc->sc_msi_handlers[i] == NULL)
992 			break;
993 	}
994 
995 	if (i == nitems(sc->sc_msi_handlers))
996 		return NULL;
997 
998 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
999 	ih->ih_func = func;
1000 	ih->ih_arg = arg;
1001 	ih->ih_ipl = level & IPL_IRQMASK;
1002 	ih->ih_irq = i;
1003 	ih->ih_name = name;
1004 	ih->ih_sc = sc;
1005 
1006 	s = splhigh();
1007 
1008 	sc->sc_msi_handlers[i] = ih;
1009 
1010 	if (name != NULL)
1011 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
1012 
1013 	mvkpcie_intc_recalc_ipl(sc);
1014 
1015 	*addr = MVKPCIE_DMA_DVA(sc->sc_msi_addr);
1016 	*data = i;
1017 
1018 	splx(s);
1019 	return (ih);
1020 }
1021 
1022 void
1023 mvkpcie_intc_intr_disestablish_msi(void *cookie)
1024 {
1025 	struct intrhand *ih = cookie;
1026 	struct mvkpcie_softc *sc = ih->ih_sc;
1027 	int s;
1028 
1029 	s = splhigh();
1030 
1031 	sc->sc_msi_handlers[ih->ih_irq] = NULL;
1032 	if (ih->ih_name != NULL)
1033 		evcount_detach(&ih->ih_count);
1034 	free(ih, M_DEVBUF, sizeof(*ih));
1035 
1036 	mvkpcie_intc_recalc_ipl(sc);
1037 
1038 	splx(s);
1039 }
1040 
1041 void
1042 mvkpcie_intc_intr_barrier(void *cookie)
1043 {
1044 	struct intrhand *ih = cookie;
1045 	struct mvkpcie_softc *sc = ih->ih_sc;
1046 
1047 	intr_barrier(sc->sc_ih);
1048 }
1049 
1050 void
1051 mvkpcie_intc_recalc_ipl(struct mvkpcie_softc *sc)
1052 {
1053 	struct intrhand *ih;
1054 	int max = IPL_NONE;
1055 	int min = IPL_HIGH;
1056 	int irq;
1057 
1058 	for (irq = 0; irq < nitems(sc->sc_intx_handlers); irq++) {
1059 		ih = sc->sc_intx_handlers[irq];
1060 		if (ih == NULL)
1061 			continue;
1062 
1063 		if (ih->ih_ipl > max)
1064 			max = ih->ih_ipl;
1065 
1066 		if (ih->ih_ipl < min)
1067 			min = ih->ih_ipl;
1068 	}
1069 
1070 	for (irq = 0; irq < nitems(sc->sc_msi_handlers); irq++) {
1071 		ih = sc->sc_msi_handlers[irq];
1072 		if (ih == NULL)
1073 			continue;
1074 
1075 		if (ih->ih_ipl > max)
1076 			max = ih->ih_ipl;
1077 
1078 		if (ih->ih_ipl < min)
1079 			min = ih->ih_ipl;
1080 	}
1081 
1082 	if (max == IPL_NONE)
1083 		min = IPL_NONE;
1084 
1085 	if (sc->sc_ipl != max) {
1086 		sc->sc_ipl = max;
1087 
1088 		if (sc->sc_ih != NULL)
1089 			fdt_intr_disestablish(sc->sc_ih);
1090 
1091 		if (sc->sc_ipl != IPL_NONE)
1092 			sc->sc_ih = fdt_intr_establish(sc->sc_node, sc->sc_ipl,
1093 			    mvkpcie_intc_intr, sc, sc->sc_dev.dv_xname);
1094 	}
1095 }
1096 
1097 /* Only needed for the 16-bit MSI address */
1098 struct mvkpcie_dmamem *
1099 mvkpcie_dmamem_alloc(struct mvkpcie_softc *sc, bus_size_t size, bus_size_t align)
1100 {
1101 	struct mvkpcie_dmamem *mdm;
1102 	int nsegs;
1103 
1104 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
1105 	mdm->mdm_size = size;
1106 
1107 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1108 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
1109 		goto mdmfree;
1110 
1111 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
1112 	    &nsegs, BUS_DMA_WAITOK) != 0)
1113 		goto destroy;
1114 
1115 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1116 	    &mdm->mdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1117 		goto free;
1118 
1119 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1120 	    NULL, BUS_DMA_WAITOK) != 0)
1121 		goto unmap;
1122 
1123 	bzero(mdm->mdm_kva, size);
1124 
1125 	return (mdm);
1126 
1127 unmap:
1128 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1129 free:
1130 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1131 destroy:
1132 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1133 mdmfree:
1134 	free(mdm, M_DEVBUF, sizeof(*mdm));
1135 
1136 	return (NULL);
1137 }
1138 
1139 void
1140 mvkpcie_dmamem_free(struct mvkpcie_softc *sc, struct mvkpcie_dmamem *mdm)
1141 {
1142 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1143 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1144 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1145 	free(mdm, M_DEVBUF, sizeof(*mdm));
1146 }
1147