xref: /openbsd/sys/dev/fdt/mvkpcie.c (revision 56d02c00)
1 /*	$OpenBSD: mvkpcie.c,v 1.14 2024/02/03 10:37:26 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
4  * Copyright (c) 2020 Patrick Wildt <patrick@blueri.se>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/device.h>
22 #include <sys/extent.h>
23 #include <sys/malloc.h>
24 #include <sys/evcount.h>
25 
26 #include <machine/intr.h>
27 #include <machine/bus.h>
28 #include <machine/fdt.h>
29 
30 #include <dev/pci/pcidevs.h>
31 #include <dev/pci/pcireg.h>
32 #include <dev/pci/pcivar.h>
33 #include <dev/pci/ppbreg.h>
34 
35 #include <dev/ofw/openfirm.h>
36 #include <dev/ofw/ofw_clock.h>
37 #include <dev/ofw/ofw_gpio.h>
38 #include <dev/ofw/ofw_misc.h>
39 #include <dev/ofw/ofw_pinctrl.h>
40 #include <dev/ofw/ofw_power.h>
41 #include <dev/ofw/fdt.h>
42 
43 /* Registers */
44 #define PCIE_DEV_ID			0x0000
45 #define PCIE_CMD			0x0004
46 #define PCIE_DEV_REV			0x0008
47 #define PCIE_DEV_CTRL_STATS		0x00c8
48 #define  PCIE_DEV_CTRL_STATS_SNOOP		(1 << 1)
49 #define  PCIE_DEV_CTRL_STATS_RELAX_ORDER	(1 << 4)
50 #define  PCIE_DEV_CTRL_STATS_MAX_PAYLOAD_7	(0x7 << 5)
51 #define  PCIE_DEV_CTRL_STATS_MAX_RD_REQ_SZ	(0x2 << 12)
52 #define PCIE_LINK_CTRL_STAT		0x00d0
53 #define  PCIE_LINK_CTRL_STAT_LINK_L0S_ENTRY	(1 << 0)
54 #define  PCIE_LINK_CTRL_STAT_LINK_TRAINING	(1 << 5)
55 #define  PCIE_LINK_CTRL_STAT_LINK_WIDTH_1	(1 << 20)
56 #define PCIE_ERR_CAPCTL			0x0118
57 #define  PCIE_ERR_CAPCTL_ECRC_CHK_TX		(1 << 5)
58 #define  PCIE_ERR_CAPCTL_ECRC_CHK_TX_EN		(1 << 6)
59 #define  PCIE_ERR_CAPCTL_ECRC_CHCK		(1 << 7)
60 #define  PCIE_ERR_CAPCTL_ECRC_CHCK_RCV		(1 << 8)
61 #define PIO_CTRL			0x4000
62 #define  PIO_CTRL_TYPE_MASK			(0xf << 0)
63 #define  PIO_CTRL_TYPE_RD0			(0x8 << 0)
64 #define  PIO_CTRL_TYPE_RD1			(0x9 << 0)
65 #define  PIO_CTRL_TYPE_WR0			(0xa << 0)
66 #define  PIO_CTRL_TYPE_WR1			(0xb << 0)
67 #define  PIO_CTRL_ADDR_WIN_DISABLE		(1 << 24)
68 #define PIO_STAT			0x4004
69 #define  PIO_STAT_COMP_STATUS			(0x7 << 7)
70 #define PIO_ADDR_LS			0x4008
71 #define PIO_ADDR_MS			0x400c
72 #define PIO_WR_DATA			0x4010
73 #define PIO_WR_DATA_STRB		0x4014
74 #define  PIO_WR_DATA_STRB_VALUE			0xf
75 #define PIO_RD_DATA			0x4018
76 #define PIO_START			0x401c
77 #define  PIO_START_STOP				(0 << 0)
78 #define  PIO_START_START			(1 << 0)
79 #define PIO_ISR				0x4020
80 #define  PIO_ISR_CLEAR				(1 << 0)
81 #define PIO_ISRM			0x4024
82 #define PCIE_CORE_CTRL0			0x4800
83 #define  PCIE_CORE_CTRL0_GEN_1			(0 << 0)
84 #define  PCIE_CORE_CTRL0_GEN_2			(1 << 0)
85 #define  PCIE_CORE_CTRL0_GEN_3			(2 << 0)
86 #define  PCIE_CORE_CTRL0_GEN_MASK		(0x3 << 0)
87 #define  PCIE_CORE_CTRL0_IS_RC			(1 << 2)
88 #define  PCIE_CORE_CTRL0_LANE_1			(0 << 3)
89 #define  PCIE_CORE_CTRL0_LANE_2			(1 << 3)
90 #define  PCIE_CORE_CTRL0_LANE_4			(2 << 3)
91 #define  PCIE_CORE_CTRL0_LANE_8			(3 << 3)
92 #define  PCIE_CORE_CTRL0_LANE_MASK		(0x3 << 3)
93 #define  PCIE_CORE_CTRL0_LINK_TRAINING		(1 << 6)
94 #define PCIE_CORE_CTRL2			0x4808
95 #define  PCIE_CORE_CTRL2_RESERVED		(0x7 << 0)
96 #define  PCIE_CORE_CTRL2_TD_ENABLE		(1 << 4)
97 #define  PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE	(1 << 5)
98 #define  PCIE_CORE_CTRL2_OB_WIN_ENABLE		(1 << 6)
99 #define  PCIE_CORE_CTRL2_MSI_ENABLE		(1 << 10)
100 #define PCIE_CORE_ISR0_STATUS		0x4840
101 #define PCIE_CORE_ISR0_MASK		0x4844
102 #define  PCIE_CORE_ISR0_MASK_MSI_INT		(1 << 24)
103 #define  PCIE_CORE_ISR0_MASK_ALL		0x07ffffff
104 #define PCIE_CORE_ISR1_STATUS		0x4848
105 #define PCIE_CORE_ISR1_MASK		0x484c
106 #define  PCIE_CORE_ISR1_MASK_ALL		0x00000ff0
107 #define  PCIE_CORE_ISR1_MASK_INTX(x)		(1 << (x + 8))
108 #define PCIE_CORE_MSI_ADDR_LOW		0x4850
109 #define PCIE_CORE_MSI_ADDR_HIGH		0x4854
110 #define PCIE_CORE_MSI_STATUS		0x4858
111 #define PCIE_CORE_MSI_MASK		0x485c
112 #define PCIE_CORE_MSI_PAYLOAD		0x489c
113 #define LMI_CFG				0x6000
114 #define  LMI_CFG_LTSSM_VAL(x)			(((x) >> 24) & 0x3f)
115 #define  LMI_CFG_LTSSM_L0			0x10
116 #define LMI_DEBUG_CTRL			0x6208
117 #define  LMI_DEBUG_CTRL_DIS_ORD_CHK		(1 << 30)
118 #define CTRL_CORE_CONFIG		0x18000
119 #define  CTRL_CORE_CONFIG_MODE_DIRECT		(0 << 0)
120 #define  CTRL_CORE_CONFIG_MODE_COMMAND		(1 << 0)
121 #define  CTRL_CORE_CONFIG_MODE_MASK		(1 << 0)
122 #define HOST_CTRL_INT_STATUS		0x1b000
123 #define HOST_CTRL_INT_MASK		0x1b004
124 #define  HOST_CTRL_INT_MASK_CORE_INT		(1 << 16)
125 #define  HOST_CTRL_INT_MASK_ALL			0xfff0fb
126 
127 #define HREAD4(sc, reg)							\
128 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
129 #define HWRITE4(sc, reg, val)						\
130 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
131 #define HSET4(sc, reg, bits)						\
132 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
133 #define HCLR4(sc, reg, bits)						\
134 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
135 
136 struct mvkpcie_dmamem {
137 	bus_dmamap_t		mdm_map;
138 	bus_dma_segment_t	mdm_seg;
139 	size_t			mdm_size;
140 	caddr_t			mdm_kva;
141 };
142 
143 #define MVKPCIE_DMA_MAP(_mdm)	((_mdm)->mdm_map)
144 #define MVKPCIE_DMA_LEN(_mdm)	((_mdm)->mdm_size)
145 #define MVKPCIE_DMA_DVA(_mdm)	((uint64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
146 #define MVKPCIE_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
147 
148 struct intrhand {
149 	int (*ih_func)(void *);		/* handler */
150 	void *ih_arg;			/* arg for handler */
151 	int ih_ipl;			/* IPL_* */
152 	int ih_irq;			/* IRQ number */
153 	struct evcount	ih_count;
154 	char *ih_name;
155 	void *ih_sc;
156 };
157 
158 struct mvkpcie_range {
159 	uint32_t		flags;
160 	uint64_t		pci_base;
161 	uint64_t		phys_base;
162 	uint64_t		size;
163 };
164 
165 struct mvkpcie_softc {
166 	struct device		sc_dev;
167 	bus_space_tag_t		sc_iot;
168 	bus_space_handle_t	sc_ioh;
169 	bus_dma_tag_t		sc_dmat;
170 
171 	bus_addr_t		sc_io_base;
172 	bus_addr_t		sc_io_bus_addr;
173 	bus_size_t		sc_io_size;
174 	bus_addr_t		sc_mem_base;
175 	bus_addr_t		sc_mem_bus_addr;
176 	bus_size_t		sc_mem_size;
177 
178 	int			sc_node;
179 	int			sc_acells;
180 	int			sc_scells;
181 	int			sc_pacells;
182 	int			sc_pscells;
183 	struct mvkpcie_range	*sc_ranges;
184 	int			sc_nranges;
185 
186 	struct bus_space	sc_bus_iot;
187 	struct bus_space	sc_bus_memt;
188 
189 	struct machine_pci_chipset sc_pc;
190 	int			sc_bus;
191 
192 	uint32_t		sc_bridge_command;
193 	uint32_t		sc_bridge_businfo;
194 	uint32_t		sc_bridge_iostatus;
195 	uint32_t		sc_bridge_io_hi;
196 	uint32_t		sc_bridge_mem;
197 
198 	struct interrupt_controller sc_ic;
199 	struct intrhand		*sc_intx_handlers[4];
200 	struct interrupt_controller sc_msi_ic;
201 	struct intrhand		*sc_msi_handlers[32];
202 	struct mvkpcie_dmamem	*sc_msi_addr;
203 	void			*sc_ih;
204 	int			sc_ipl;
205 };
206 
207 int mvkpcie_match(struct device *, void *, void *);
208 void mvkpcie_attach(struct device *, struct device *, void *);
209 
210 const struct cfattach mvkpcie_ca = {
211 	sizeof (struct mvkpcie_softc), mvkpcie_match, mvkpcie_attach
212 };
213 
214 struct cfdriver mvkpcie_cd = {
215 	NULL, "mvkpcie", DV_DULL
216 };
217 
218 int
mvkpcie_match(struct device * parent,void * match,void * aux)219 mvkpcie_match(struct device *parent, void *match, void *aux)
220 {
221 	struct fdt_attach_args *faa = aux;
222 
223 	return OF_is_compatible(faa->fa_node, "marvell,armada-3700-pcie");
224 }
225 
226 int	mvkpcie_link_up(struct mvkpcie_softc *);
227 
228 void	mvkpcie_attach_hook(struct device *, struct device *,
229 	    struct pcibus_attach_args *);
230 int	mvkpcie_bus_maxdevs(void *, int);
231 pcitag_t mvkpcie_make_tag(void *, int, int, int);
232 void	mvkpcie_decompose_tag(void *, pcitag_t, int *, int *, int *);
233 int	mvkpcie_conf_size(void *, pcitag_t);
234 pcireg_t mvkpcie_conf_read(void *, pcitag_t, int);
235 void	mvkpcie_conf_write(void *, pcitag_t, int, pcireg_t);
236 int	mvkpcie_probe_device_hook(void *, struct pci_attach_args *);
237 
238 int	mvkpcie_intr_map(struct pci_attach_args *, pci_intr_handle_t *);
239 const char *mvkpcie_intr_string(void *, pci_intr_handle_t);
240 void	*mvkpcie_intr_establish(void *, pci_intr_handle_t, int,
241 	    struct cpu_info *, int (*)(void *), void *, char *);
242 void	mvkpcie_intr_disestablish(void *, void *);
243 
244 int	mvkpcie_bs_iomap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
245 	    bus_space_handle_t *);
246 int	mvkpcie_bs_memmap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
247 	    bus_space_handle_t *);
248 
249 int	mvkpcie_intc_intr(void *);
250 void	*mvkpcie_intc_intr_establish(void *, int *, int, struct cpu_info *,
251 	    int (*)(void *), void *, char *);
252 void	mvkpcie_intc_intr_disestablish(void *);
253 void	*mvkpcie_intc_intr_establish_msi(void *, uint64_t *, uint64_t *,
254 	    int , struct cpu_info *, int (*)(void *), void *, char *);
255 void	mvkpcie_intc_intr_disestablish_msi(void *);
256 void	mvkpcie_intc_intr_barrier(void *);
257 void	mvkpcie_intc_recalc_ipl(struct mvkpcie_softc *);
258 
259 struct mvkpcie_dmamem *mvkpcie_dmamem_alloc(struct mvkpcie_softc *, bus_size_t,
260 	    bus_size_t);
261 void	mvkpcie_dmamem_free(struct mvkpcie_softc *, struct mvkpcie_dmamem *);
262 
263 void
mvkpcie_attach(struct device * parent,struct device * self,void * aux)264 mvkpcie_attach(struct device *parent, struct device *self, void *aux)
265 {
266 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)self;
267 	struct fdt_attach_args *faa = aux;
268 	struct pcibus_attach_args pba;
269 	uint32_t *reset_gpio;
270 	ssize_t reset_gpiolen;
271 	bus_addr_t iobase, iolimit;
272 	bus_addr_t membase, memlimit;
273 	uint32_t bus_range[2];
274 	uint32_t *ranges;
275 	int i, j, nranges, rangeslen;
276 	pcireg_t csr, bir, blr;
277 	uint32_t reg;
278 	int node;
279 	int timo;
280 
281 	if (faa->fa_nreg < 1) {
282 		printf(": no registers\n");
283 		return;
284 	}
285 
286 	sc->sc_iot = faa->fa_iot;
287 	sc->sc_dmat = faa->fa_dmat;
288 	sc->sc_node = faa->fa_node;
289 
290 	sc->sc_acells = OF_getpropint(sc->sc_node, "#address-cells",
291 	    faa->fa_acells);
292 	sc->sc_scells = OF_getpropint(sc->sc_node, "#size-cells",
293 	    faa->fa_scells);
294 	sc->sc_pacells = faa->fa_acells;
295 	sc->sc_pscells = faa->fa_scells;
296 
297 	rangeslen = OF_getproplen(sc->sc_node, "ranges");
298 	if (rangeslen <= 0 || (rangeslen % sizeof(uint32_t)) ||
299 	     (rangeslen / sizeof(uint32_t)) % (sc->sc_acells +
300 	     sc->sc_pacells + sc->sc_scells)) {
301 		printf(": invalid ranges property\n");
302 		return;
303 	}
304 
305 	sc->sc_msi_addr = mvkpcie_dmamem_alloc(sc, sizeof(uint16_t),
306 	    sizeof(uint64_t));
307 	if (sc->sc_msi_addr == NULL) {
308 		printf(": cannot allocate MSI address\n");
309 		return;
310 	}
311 
312 	ranges = malloc(rangeslen, M_TEMP, M_WAITOK);
313 	OF_getpropintarray(sc->sc_node, "ranges", ranges,
314 	    rangeslen);
315 
316 	nranges = (rangeslen / sizeof(uint32_t)) /
317 	    (sc->sc_acells + sc->sc_pacells + sc->sc_scells);
318 	sc->sc_ranges = mallocarray(nranges,
319 	    sizeof(struct mvkpcie_range), M_TEMP, M_WAITOK);
320 	sc->sc_nranges = nranges;
321 
322 	for (i = 0, j = 0; i < sc->sc_nranges; i++) {
323 		sc->sc_ranges[i].flags = ranges[j++];
324 		sc->sc_ranges[i].pci_base = ranges[j++];
325 		if (sc->sc_acells - 1 == 2) {
326 			sc->sc_ranges[i].pci_base <<= 32;
327 			sc->sc_ranges[i].pci_base |= ranges[j++];
328 		}
329 		sc->sc_ranges[i].phys_base = ranges[j++];
330 		if (sc->sc_pacells == 2) {
331 			sc->sc_ranges[i].phys_base <<= 32;
332 			sc->sc_ranges[i].phys_base |= ranges[j++];
333 		}
334 		sc->sc_ranges[i].size = ranges[j++];
335 		if (sc->sc_scells == 2) {
336 			sc->sc_ranges[i].size <<= 32;
337 			sc->sc_ranges[i].size |= ranges[j++];
338 		}
339 	}
340 
341 	free(ranges, M_TEMP, rangeslen);
342 
343 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
344 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
345 		free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
346 		    sizeof(struct mvkpcie_range));
347 		printf(": can't map ctrl registers\n");
348 		return;
349 	}
350 
351 	printf("\n");
352 
353 	pinctrl_byname(sc->sc_node, "default");
354 
355 	clock_set_assigned(sc->sc_node);
356 	clock_enable_all(sc->sc_node);
357 
358 	reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpios");
359 	if (reset_gpiolen > 0) {
360 		/* Link training needs to be disabled during PCIe reset. */
361 		HCLR4(sc, PCIE_CORE_CTRL0, PCIE_CORE_CTRL0_LINK_TRAINING);
362 
363 		reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK);
364 		OF_getpropintarray(sc->sc_node, "reset-gpios", reset_gpio,
365 		    reset_gpiolen);
366 
367 		/* Issue PCIe reset. */
368 		gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT);
369 		gpio_controller_set_pin(reset_gpio, 1);
370 		delay(10000);
371 		gpio_controller_set_pin(reset_gpio, 0);
372 
373 		free(reset_gpio, M_TEMP, reset_gpiolen);
374 	}
375 
376 	reg = HREAD4(sc, CTRL_CORE_CONFIG);
377 	reg &= ~CTRL_CORE_CONFIG_MODE_MASK;
378 	reg |= CTRL_CORE_CONFIG_MODE_DIRECT;
379 	HWRITE4(sc, CTRL_CORE_CONFIG, reg);
380 
381 	HSET4(sc, PCIE_CORE_CTRL0, PCIE_CORE_CTRL0_IS_RC);
382 
383 	HWRITE4(sc, PCIE_ERR_CAPCTL,
384 	    PCIE_ERR_CAPCTL_ECRC_CHK_TX |
385 	    PCIE_ERR_CAPCTL_ECRC_CHK_TX_EN |
386 	    PCIE_ERR_CAPCTL_ECRC_CHCK |
387 	    PCIE_ERR_CAPCTL_ECRC_CHCK_RCV);
388 
389 	HWRITE4(sc, PCIE_DEV_CTRL_STATS,
390 	    PCIE_DEV_CTRL_STATS_MAX_PAYLOAD_7 |
391 	    PCIE_DEV_CTRL_STATS_MAX_RD_REQ_SZ);
392 
393 	HWRITE4(sc, PCIE_CORE_CTRL2,
394 	    PCIE_CORE_CTRL2_RESERVED |
395 	    PCIE_CORE_CTRL2_TD_ENABLE);
396 
397 	reg = HREAD4(sc, LMI_DEBUG_CTRL);
398 	reg |= LMI_DEBUG_CTRL_DIS_ORD_CHK;
399 	HWRITE4(sc, LMI_DEBUG_CTRL, reg);
400 
401 	reg = HREAD4(sc, PCIE_CORE_CTRL0);
402 	reg &= ~PCIE_CORE_CTRL0_GEN_MASK;
403 	reg |= PCIE_CORE_CTRL0_GEN_2;
404 	HWRITE4(sc, PCIE_CORE_CTRL0, reg);
405 
406 	reg = HREAD4(sc, PCIE_CORE_CTRL0);
407 	reg &= ~PCIE_CORE_CTRL0_LANE_MASK;
408 	reg |= PCIE_CORE_CTRL0_LANE_1;
409 	HWRITE4(sc, PCIE_CORE_CTRL0, reg);
410 
411 	HSET4(sc, PCIE_CORE_CTRL2, PCIE_CORE_CTRL2_MSI_ENABLE);
412 
413 	HWRITE4(sc, PCIE_CORE_ISR0_STATUS, PCIE_CORE_ISR0_MASK_ALL);
414 	HWRITE4(sc, PCIE_CORE_ISR1_STATUS, PCIE_CORE_ISR1_MASK_ALL);
415 	HWRITE4(sc, HOST_CTRL_INT_STATUS, HOST_CTRL_INT_MASK_ALL);
416 
417 	HWRITE4(sc, PCIE_CORE_ISR0_MASK, PCIE_CORE_ISR0_MASK_ALL &
418 	    ~PCIE_CORE_ISR0_MASK_MSI_INT);
419 	HWRITE4(sc, PCIE_CORE_ISR1_MASK, PCIE_CORE_ISR1_MASK_ALL);
420 	HWRITE4(sc, PCIE_CORE_MSI_MASK, 0);
421 	HWRITE4(sc, HOST_CTRL_INT_MASK, HOST_CTRL_INT_MASK_ALL &
422 	    ~HOST_CTRL_INT_MASK_CORE_INT);
423 
424 	HSET4(sc, PCIE_CORE_CTRL2, PCIE_CORE_CTRL2_OB_WIN_ENABLE);
425 	HSET4(sc, PIO_CTRL, PIO_CTRL_ADDR_WIN_DISABLE);
426 
427 	delay(100 * 1000);
428 
429 	HSET4(sc, PCIE_CORE_CTRL0, PCIE_CORE_CTRL0_LINK_TRAINING);
430 	HSET4(sc, PCIE_LINK_CTRL_STAT, PCIE_LINK_CTRL_STAT_LINK_TRAINING);
431 
432 	for (timo = 40; timo > 0; timo--) {
433 		if (mvkpcie_link_up(sc))
434 			break;
435 		delay(1000);
436 	}
437 	if (timo == 0) {
438 		printf("%s: timeout\n", sc->sc_dev.dv_xname);
439 		return;
440 	}
441 
442 	HWRITE4(sc, PCIE_LINK_CTRL_STAT,
443 	    PCIE_LINK_CTRL_STAT_LINK_L0S_ENTRY |
444 	    PCIE_LINK_CTRL_STAT_LINK_WIDTH_1);
445 
446 	HSET4(sc, PCIE_CMD, PCI_COMMAND_IO_ENABLE |
447 	    PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE);
448 
449 	HWRITE4(sc, PCIE_CORE_MSI_ADDR_LOW,
450 	    MVKPCIE_DMA_DVA(sc->sc_msi_addr) & 0xffffffff);
451 	HWRITE4(sc, PCIE_CORE_MSI_ADDR_HIGH,
452 	    MVKPCIE_DMA_DVA(sc->sc_msi_addr) >> 32);
453 
454 	/* Set up address translation for I/O space. */
455 	sc->sc_io_bus_addr = sc->sc_mem_bus_addr = -1;
456 	for (i = 0; i < sc->sc_nranges; i++) {
457 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
458 		    sc->sc_ranges[i].size > 0) {
459 			sc->sc_io_base = sc->sc_ranges[i].phys_base;
460 			sc->sc_io_bus_addr = sc->sc_ranges[i].pci_base;
461 			sc->sc_io_size = sc->sc_ranges[i].size;
462 		}
463 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000 &&
464 		    sc->sc_ranges[i].size > 0) {
465 			sc->sc_mem_base = sc->sc_ranges[i].phys_base;
466 			sc->sc_mem_bus_addr = sc->sc_ranges[i].pci_base;
467 			sc->sc_mem_size = sc->sc_ranges[i].size;
468 		}
469 	}
470 
471 	/* Set up bus range. */
472 	if (OF_getpropintarray(sc->sc_node, "bus-range", bus_range,
473 	    sizeof(bus_range)) != sizeof(bus_range) ||
474 	    bus_range[0] >= 256 || bus_range[1] >= 256) {
475 		bus_range[0] = 0;
476 		bus_range[1] = 255;
477 	}
478 	sc->sc_bus = bus_range[0];
479 
480 	/* Initialize command/status. */
481 	csr = PCI_COMMAND_MASTER_ENABLE;
482 	if (sc->sc_io_size > 0)
483 		csr |= PCI_COMMAND_IO_ENABLE;
484 	if (sc->sc_mem_size > 0)
485 		csr |= PCI_COMMAND_MEM_ENABLE;
486 	sc->sc_bridge_command = csr;
487 
488 	/* Initialize bus range. */
489 	bir = bus_range[0];
490 	bir |= ((bus_range[0] + 1) << 8);
491 	bir |= (bus_range[1] << 16);
492 	sc->sc_bridge_businfo = bir;
493 
494 	/* Initialize I/O window. */
495 	iobase = sc->sc_io_bus_addr;
496 	iolimit = iobase + sc->sc_io_size - 1;
497 	blr = (iolimit & PPB_IO_MASK) | (PPB_IO_32BIT << PPB_IOLIMIT_SHIFT);
498 	blr |= ((iobase & PPB_IO_MASK) >> PPB_IO_SHIFT) | PPB_IO_32BIT;
499 	sc->sc_bridge_iostatus = blr;
500 	blr = (iobase & 0xffff0000) >> 16;
501 	blr |= iolimit & 0xffff0000;
502 	sc->sc_bridge_io_hi = blr;
503 
504 	/* Initialize memory mapped I/O window. */
505 	membase = sc->sc_mem_bus_addr;
506 	memlimit = membase + sc->sc_mem_size - 1;
507 	blr = memlimit & PPB_MEM_MASK;
508 	blr |= (membase >> PPB_MEM_SHIFT);
509 	sc->sc_bridge_mem = blr;
510 
511 	memcpy(&sc->sc_bus_iot, sc->sc_iot, sizeof(sc->sc_bus_iot));
512 	sc->sc_bus_iot.bus_private = sc;
513 	sc->sc_bus_iot._space_map = mvkpcie_bs_iomap;
514 	memcpy(&sc->sc_bus_memt, sc->sc_iot, sizeof(sc->sc_bus_memt));
515 	sc->sc_bus_memt.bus_private = sc;
516 	sc->sc_bus_memt._space_map = mvkpcie_bs_memmap;
517 
518 	sc->sc_pc.pc_conf_v = sc;
519 	sc->sc_pc.pc_attach_hook = mvkpcie_attach_hook;
520 	sc->sc_pc.pc_bus_maxdevs = mvkpcie_bus_maxdevs;
521 	sc->sc_pc.pc_make_tag = mvkpcie_make_tag;
522 	sc->sc_pc.pc_decompose_tag = mvkpcie_decompose_tag;
523 	sc->sc_pc.pc_conf_size = mvkpcie_conf_size;
524 	sc->sc_pc.pc_conf_read = mvkpcie_conf_read;
525 	sc->sc_pc.pc_conf_write = mvkpcie_conf_write;
526 	sc->sc_pc.pc_probe_device_hook = mvkpcie_probe_device_hook;
527 
528 	sc->sc_pc.pc_intr_v = sc;
529 	sc->sc_pc.pc_intr_map = mvkpcie_intr_map;
530 	sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
531 	sc->sc_pc.pc_intr_map_msivec = _pci_intr_map_msivec;
532 	sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
533 	sc->sc_pc.pc_intr_string = mvkpcie_intr_string;
534 	sc->sc_pc.pc_intr_establish = mvkpcie_intr_establish;
535 	sc->sc_pc.pc_intr_disestablish = mvkpcie_intr_disestablish;
536 
537 	memset(&pba, 0, sizeof(pba));
538 	pba.pba_busname = "pci";
539 	pba.pba_iot = &sc->sc_bus_iot;
540 	pba.pba_memt = &sc->sc_bus_memt;
541 	pba.pba_dmat = faa->fa_dmat;
542 	pba.pba_pc = &sc->sc_pc;
543 	pba.pba_domain = pci_ndomains++;
544 	pba.pba_bus = sc->sc_bus;
545 	pba.pba_flags |= PCI_FLAGS_MSI_ENABLED;
546 
547 	node = OF_getnodebyname(faa->fa_node, "interrupt-controller");
548 	if (node) {
549 		sc->sc_ic.ic_node = node;
550 		sc->sc_ic.ic_cookie = self;
551 		sc->sc_ic.ic_establish = mvkpcie_intc_intr_establish;
552 		sc->sc_ic.ic_disestablish = mvkpcie_intc_intr_disestablish;
553 		arm_intr_register_fdt(&sc->sc_ic);
554 	}
555 
556 	sc->sc_msi_ic.ic_node = faa->fa_node;
557 	sc->sc_msi_ic.ic_cookie = self;
558 	sc->sc_msi_ic.ic_establish_msi = mvkpcie_intc_intr_establish_msi;
559 	sc->sc_msi_ic.ic_disestablish = mvkpcie_intc_intr_disestablish_msi;
560 	sc->sc_msi_ic.ic_barrier = mvkpcie_intc_intr_barrier;
561 	arm_intr_register_fdt(&sc->sc_msi_ic);
562 
563 	config_found(self, &pba, NULL);
564 }
565 
566 int
mvkpcie_link_up(struct mvkpcie_softc * sc)567 mvkpcie_link_up(struct mvkpcie_softc *sc)
568 {
569 	uint32_t reg;
570 
571 	reg = HREAD4(sc, LMI_CFG);
572 	return LMI_CFG_LTSSM_VAL(reg) >= LMI_CFG_LTSSM_L0;
573 }
574 
575 void
mvkpcie_attach_hook(struct device * parent,struct device * self,struct pcibus_attach_args * pba)576 mvkpcie_attach_hook(struct device *parent, struct device *self,
577     struct pcibus_attach_args *pba)
578 {
579 }
580 
581 int
mvkpcie_bus_maxdevs(void * v,int bus)582 mvkpcie_bus_maxdevs(void *v, int bus)
583 {
584 	struct mvkpcie_softc *sc = v;
585 
586 	if (bus == sc->sc_bus || bus == sc->sc_bus + 1)
587 		return 1;
588 	return 32;
589 }
590 
591 pcitag_t
mvkpcie_make_tag(void * v,int bus,int device,int function)592 mvkpcie_make_tag(void *v, int bus, int device, int function)
593 {
594 	return ((bus << 20) | (device << 15) | (function << 12));
595 }
596 
597 void
mvkpcie_decompose_tag(void * v,pcitag_t tag,int * bp,int * dp,int * fp)598 mvkpcie_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp)
599 {
600 	if (bp != NULL)
601 		*bp = (tag >> 20) & 0xff;
602 	if (dp != NULL)
603 		*dp = (tag >> 15) & 0x1f;
604 	if (fp != NULL)
605 		*fp = (tag >> 12) & 0x7;
606 }
607 
608 int
mvkpcie_conf_size(void * v,pcitag_t tag)609 mvkpcie_conf_size(void *v, pcitag_t tag)
610 {
611 	return PCIE_CONFIG_SPACE_SIZE;
612 }
613 
614 pcireg_t
mvkpcie_conf_read_bridge(struct mvkpcie_softc * sc,int reg)615 mvkpcie_conf_read_bridge(struct mvkpcie_softc *sc, int reg)
616 {
617 	switch (reg) {
618 	case PCI_ID_REG:
619 		return PCI_VENDOR_MARVELL |
620 		    (HREAD4(sc, PCIE_DEV_ID) & 0xffff0000);
621 	case PCI_COMMAND_STATUS_REG:
622 		return sc->sc_bridge_command;
623 	case PCI_CLASS_REG:
624 		return PCI_CLASS_BRIDGE << PCI_CLASS_SHIFT |
625 		    PCI_SUBCLASS_BRIDGE_PCI << PCI_SUBCLASS_SHIFT |
626 		    (HREAD4(sc, PCIE_DEV_REV) & 0xff);
627 	case PCI_BHLC_REG:
628 		return 1 << PCI_HDRTYPE_SHIFT |
629 		    0x10 << PCI_CACHELINE_SHIFT;
630 	case PPB_REG_BUSINFO:
631 		return sc->sc_bridge_businfo;
632 	case PPB_REG_IOSTATUS:
633 		return sc->sc_bridge_iostatus;
634 	case PPB_REG_MEM:
635 		return sc->sc_bridge_mem;
636 	case PPB_REG_IO_HI:
637 		return sc->sc_bridge_io_hi;
638 	case PPB_REG_PREFMEM:
639 	case PPB_REG_PREFBASE_HI32:
640 	case PPB_REG_PREFLIM_HI32:
641 	case PPB_REG_BRIDGECONTROL:
642 		return 0;
643 	default:
644 		break;
645 	}
646 	return 0;
647 }
648 
649 void
mvkpcie_conf_write_bridge(struct mvkpcie_softc * sc,int reg,pcireg_t data)650 mvkpcie_conf_write_bridge(struct mvkpcie_softc *sc, int reg, pcireg_t data)
651 {
652 	/* Treat emulated bridge registers as read-only. */
653 }
654 
655 pcireg_t
mvkpcie_conf_read(void * v,pcitag_t tag,int off)656 mvkpcie_conf_read(void *v, pcitag_t tag, int off)
657 {
658 	struct mvkpcie_softc *sc = v;
659 	int bus, dev, fn;
660 	uint32_t reg;
661 	int i;
662 
663 	mvkpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
664 	if (bus == sc->sc_bus) {
665 		KASSERT(dev == 0);
666 		return mvkpcie_conf_read_bridge(sc, off);
667 	}
668 
669 	HWRITE4(sc, PIO_START, PIO_START_STOP);
670 	HWRITE4(sc, PIO_ISR, PIO_ISR_CLEAR);
671 	reg = HREAD4(sc, PIO_CTRL);
672 	reg &= ~PIO_CTRL_TYPE_MASK;
673 	if (bus == sc->sc_bus + 1)
674 		reg |= PIO_CTRL_TYPE_RD0;
675 	else
676 		reg |= PIO_CTRL_TYPE_RD1;
677 	HWRITE4(sc, PIO_CTRL, reg);
678 	HWRITE4(sc, PIO_ADDR_LS, tag | off);
679 	HWRITE4(sc, PIO_ADDR_MS, 0);
680 	HWRITE4(sc, PIO_WR_DATA_STRB, PIO_WR_DATA_STRB_VALUE);
681 	HWRITE4(sc, PIO_START, PIO_START_START);
682 
683 	for (i = 500; i > 0; i--) {
684 		if (HREAD4(sc, PIO_START) == 0 &&
685 		    HREAD4(sc, PIO_ISR) != 0)
686 			break;
687 		delay(2);
688 	}
689 	if (i == 0) {
690 		printf("%s: timeout\n", sc->sc_dev.dv_xname);
691 		return 0xffffffff;
692 	}
693 
694 	return HREAD4(sc, PIO_RD_DATA);
695 }
696 
697 void
mvkpcie_conf_write(void * v,pcitag_t tag,int off,pcireg_t data)698 mvkpcie_conf_write(void *v, pcitag_t tag, int off, pcireg_t data)
699 {
700 	struct mvkpcie_softc *sc = v;
701 	int bus, dev, fn;
702 	uint32_t reg;
703 	int i;
704 
705 	mvkpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
706 	if (bus == sc->sc_bus) {
707 		KASSERT(dev == 0);
708 		mvkpcie_conf_write_bridge(sc, off, data);
709 		return;
710 	}
711 
712 	HWRITE4(sc, PIO_START, PIO_START_STOP);
713 	HWRITE4(sc, PIO_ISR, PIO_ISR_CLEAR);
714 	reg = HREAD4(sc, PIO_CTRL);
715 	reg &= ~PIO_CTRL_TYPE_MASK;
716 	if (bus == sc->sc_bus + 1)
717 		reg |= PIO_CTRL_TYPE_WR0;
718 	else
719 		reg |= PIO_CTRL_TYPE_WR1;
720 	HWRITE4(sc, PIO_CTRL, reg);
721 	HWRITE4(sc, PIO_ADDR_LS, tag | off);
722 	HWRITE4(sc, PIO_ADDR_MS, 0);
723 	HWRITE4(sc, PIO_WR_DATA, data);
724 	HWRITE4(sc, PIO_WR_DATA_STRB, PIO_WR_DATA_STRB_VALUE);
725 	HWRITE4(sc, PIO_START, PIO_START_START);
726 
727 	for (i = 500; i > 0; i--) {
728 		if (HREAD4(sc, PIO_START) == 0 &&
729 		    HREAD4(sc, PIO_ISR) != 0)
730 			break;
731 		delay(2);
732 	}
733 	if (i == 0) {
734 		printf("%s: timeout\n", sc->sc_dev.dv_xname);
735 		return;
736 	}
737 }
738 
739 int
mvkpcie_probe_device_hook(void * v,struct pci_attach_args * pa)740 mvkpcie_probe_device_hook(void *v, struct pci_attach_args *pa)
741 {
742 	return 0;
743 }
744 
745 int
mvkpcie_intr_map(struct pci_attach_args * pa,pci_intr_handle_t * ihp)746 mvkpcie_intr_map(struct pci_attach_args *pa, pci_intr_handle_t *ihp)
747 {
748 	int pin = pa->pa_rawintrpin;
749 
750 	if (pin == 0 || pin > PCI_INTERRUPT_PIN_MAX)
751 		return -1;
752 
753 	if (pa->pa_tag == 0)
754 		return -1;
755 
756 	ihp->ih_pc = pa->pa_pc;
757 	ihp->ih_tag = pa->pa_intrtag;
758 	ihp->ih_intrpin = pa->pa_intrpin;
759 	ihp->ih_type = PCI_INTX;
760 
761 	return 0;
762 }
763 
764 const char *
mvkpcie_intr_string(void * v,pci_intr_handle_t ih)765 mvkpcie_intr_string(void *v, pci_intr_handle_t ih)
766 {
767 	switch (ih.ih_type) {
768 	case PCI_MSI:
769 		return "msi";
770 	case PCI_MSIX:
771 		return "msix";
772 	}
773 
774 	return "intx";
775 }
776 
777 void *
mvkpcie_intr_establish(void * v,pci_intr_handle_t ih,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)778 mvkpcie_intr_establish(void *v, pci_intr_handle_t ih, int level,
779     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
780 {
781 	struct mvkpcie_softc *sc = v;
782 	void *cookie;
783 
784 	KASSERT(ih.ih_type != PCI_NONE);
785 
786 	if (ih.ih_type != PCI_INTX) {
787 		uint64_t addr = 0, data;
788 
789 		/* Assume hardware passes Requester ID as sideband data. */
790 		data = pci_requester_id(ih.ih_pc, ih.ih_tag);
791 		cookie = fdt_intr_establish_msi_cpu(sc->sc_node, &addr,
792 		    &data, level, ci, func, arg, (void *)name);
793 		if (cookie == NULL)
794 			return NULL;
795 
796 		/* TODO: translate address to the PCI device's view */
797 
798 		if (ih.ih_type == PCI_MSIX) {
799 			pci_msix_enable(ih.ih_pc, ih.ih_tag,
800 			    &sc->sc_bus_memt, ih.ih_intrpin, addr, data);
801 		} else
802 			pci_msi_enable(ih.ih_pc, ih.ih_tag, addr, data);
803 	} else {
804 		int bus, dev, fn;
805 		uint32_t reg[4];
806 
807 		mvkpcie_decompose_tag(sc, ih.ih_tag, &bus, &dev, &fn);
808 
809 		reg[0] = bus << 16 | dev << 11 | fn << 8;
810 		reg[1] = reg[2] = 0;
811 		reg[3] = ih.ih_intrpin;
812 
813 		cookie = fdt_intr_establish_imap_cpu(sc->sc_node, reg,
814 		    sizeof(reg), level, ci, func, arg, name);
815 	}
816 
817 	return cookie;
818 }
819 
820 void
mvkpcie_intr_disestablish(void * v,void * cookie)821 mvkpcie_intr_disestablish(void *v, void *cookie)
822 {
823 	panic("%s", __func__);
824 }
825 
826 int
mvkpcie_bs_iomap(bus_space_tag_t t,bus_addr_t addr,bus_size_t size,int flags,bus_space_handle_t * bshp)827 mvkpcie_bs_iomap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
828     int flags, bus_space_handle_t *bshp)
829 {
830 	struct mvkpcie_softc *sc = t->bus_private;
831 	int i;
832 
833 	for (i = 0; i < sc->sc_nranges; i++) {
834 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
835 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
836 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
837 
838 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
839 		    addr >= pci_start && addr + size <= pci_end) {
840 			return bus_space_map(sc->sc_iot,
841 			    addr - pci_start + phys_start, size, flags, bshp);
842 		}
843 	}
844 
845 	return ENXIO;
846 }
847 
848 int
mvkpcie_bs_memmap(bus_space_tag_t t,bus_addr_t addr,bus_size_t size,int flags,bus_space_handle_t * bshp)849 mvkpcie_bs_memmap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
850     int flags, bus_space_handle_t *bshp)
851 {
852 	struct mvkpcie_softc *sc = t->bus_private;
853 	int i;
854 
855 	for (i = 0; i < sc->sc_nranges; i++) {
856 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
857 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
858 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
859 
860 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000 &&
861 		    addr >= pci_start && addr + size <= pci_end) {
862 			return bus_space_map(sc->sc_iot,
863 			    addr - pci_start + phys_start, size, flags, bshp);
864 		}
865 	}
866 
867 	return ENXIO;
868 }
869 
870 int
mvkpcie_intc_intr(void * cookie)871 mvkpcie_intc_intr(void *cookie)
872 {
873 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)cookie;
874 	struct intrhand *ih;
875 	uint32_t pending;
876 	int i, s;
877 
878 	if (!(HREAD4(sc, HOST_CTRL_INT_STATUS) & HOST_CTRL_INT_MASK_CORE_INT))
879 		return 0;
880 
881 	if (HREAD4(sc, PCIE_CORE_ISR0_STATUS) & PCIE_CORE_ISR0_MASK_MSI_INT) {
882 		pending = HREAD4(sc, PCIE_CORE_MSI_STATUS);
883 		while (pending) {
884 			i = ffs(pending) - 1;
885 			HWRITE4(sc, PCIE_CORE_MSI_STATUS, (1 << i));
886 			pending &= ~(1 << i);
887 
888 			i = HREAD4(sc, PCIE_CORE_MSI_PAYLOAD) & 0xff;
889 			if ((ih = sc->sc_msi_handlers[i]) != NULL) {
890 				s = splraise(ih->ih_ipl);
891 				if (ih->ih_func(ih->ih_arg))
892 					ih->ih_count.ec_count++;
893 				splx(s);
894 			}
895 		}
896 		HWRITE4(sc, PCIE_CORE_ISR0_STATUS, PCIE_CORE_ISR0_MASK_MSI_INT);
897 	}
898 
899 	pending = HREAD4(sc, PCIE_CORE_ISR1_STATUS);
900 	for (i = 0; i < nitems(sc->sc_intx_handlers); i++) {
901 		if (pending & PCIE_CORE_ISR1_MASK_INTX(i)) {
902 			if ((ih = sc->sc_intx_handlers[i]) != NULL) {
903 				s = splraise(ih->ih_ipl);
904 				if (ih->ih_func(ih->ih_arg))
905 					ih->ih_count.ec_count++;
906 				splx(s);
907 			}
908 		}
909 	}
910 	HWRITE4(sc, PCIE_CORE_ISR1_STATUS, pending);
911 
912 	HWRITE4(sc, HOST_CTRL_INT_STATUS, HOST_CTRL_INT_MASK_CORE_INT);
913 	return 1;
914 }
915 
916 void *
mvkpcie_intc_intr_establish(void * cookie,int * cell,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)917 mvkpcie_intc_intr_establish(void *cookie, int *cell, int level,
918     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
919 {
920 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)cookie;
921 	struct intrhand *ih;
922 	int irq = cell[0];
923 	int s;
924 
925 	if (ci != NULL && !CPU_IS_PRIMARY(ci))
926 		return NULL;
927 
928 	if (irq < 0 || irq >= nitems(sc->sc_intx_handlers))
929 		return NULL;
930 
931 	/* Don't allow shared interrupts for now. */
932 	if (sc->sc_intx_handlers[irq])
933 		return NULL;
934 
935 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
936 	ih->ih_func = func;
937 	ih->ih_arg = arg;
938 	ih->ih_ipl = level & IPL_IRQMASK;
939 	ih->ih_irq = irq;
940 	ih->ih_name = name;
941 	ih->ih_sc = sc;
942 
943 	s = splhigh();
944 
945 	sc->sc_intx_handlers[irq] = ih;
946 
947 	if (name != NULL)
948 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
949 
950 	mvkpcie_intc_recalc_ipl(sc);
951 
952 	splx(s);
953 
954 	HCLR4(sc, PCIE_CORE_ISR1_MASK, PCIE_CORE_ISR1_MASK_INTX(irq));
955 
956 	return (ih);
957 }
958 
959 void
mvkpcie_intc_intr_disestablish(void * cookie)960 mvkpcie_intc_intr_disestablish(void *cookie)
961 {
962 	struct intrhand *ih = cookie;
963 	struct mvkpcie_softc *sc = ih->ih_sc;
964 	int s;
965 
966 	HSET4(sc, PCIE_CORE_ISR1_MASK, PCIE_CORE_ISR1_MASK_INTX(ih->ih_irq));
967 
968 	s = splhigh();
969 
970 	sc->sc_intx_handlers[ih->ih_irq] = NULL;
971 	if (ih->ih_name != NULL)
972 		evcount_detach(&ih->ih_count);
973 	free(ih, M_DEVBUF, sizeof(*ih));
974 
975 	mvkpcie_intc_recalc_ipl(sc);
976 
977 	splx(s);
978 }
979 
980 void *
mvkpcie_intc_intr_establish_msi(void * cookie,uint64_t * addr,uint64_t * data,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)981 mvkpcie_intc_intr_establish_msi(void *cookie, uint64_t *addr, uint64_t *data,
982     int level, struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
983 {
984 	struct mvkpcie_softc *sc = (struct mvkpcie_softc *)cookie;
985 	struct intrhand *ih;
986 	int i, s;
987 
988 	if (ci != NULL && !CPU_IS_PRIMARY(ci))
989 		return NULL;
990 
991 	for (i = 0; i < nitems(sc->sc_msi_handlers); i++) {
992 		if (sc->sc_msi_handlers[i] == NULL)
993 			break;
994 	}
995 
996 	if (i == nitems(sc->sc_msi_handlers))
997 		return NULL;
998 
999 	ih = malloc(sizeof(*ih), M_DEVBUF, M_WAITOK);
1000 	ih->ih_func = func;
1001 	ih->ih_arg = arg;
1002 	ih->ih_ipl = level & IPL_IRQMASK;
1003 	ih->ih_irq = i;
1004 	ih->ih_name = name;
1005 	ih->ih_sc = sc;
1006 
1007 	s = splhigh();
1008 
1009 	sc->sc_msi_handlers[i] = ih;
1010 
1011 	if (name != NULL)
1012 		evcount_attach(&ih->ih_count, name, &ih->ih_irq);
1013 
1014 	mvkpcie_intc_recalc_ipl(sc);
1015 
1016 	*addr = MVKPCIE_DMA_DVA(sc->sc_msi_addr);
1017 	*data = i;
1018 
1019 	splx(s);
1020 	return (ih);
1021 }
1022 
1023 void
mvkpcie_intc_intr_disestablish_msi(void * cookie)1024 mvkpcie_intc_intr_disestablish_msi(void *cookie)
1025 {
1026 	struct intrhand *ih = cookie;
1027 	struct mvkpcie_softc *sc = ih->ih_sc;
1028 	int s;
1029 
1030 	s = splhigh();
1031 
1032 	sc->sc_msi_handlers[ih->ih_irq] = NULL;
1033 	if (ih->ih_name != NULL)
1034 		evcount_detach(&ih->ih_count);
1035 	free(ih, M_DEVBUF, sizeof(*ih));
1036 
1037 	mvkpcie_intc_recalc_ipl(sc);
1038 
1039 	splx(s);
1040 }
1041 
1042 void
mvkpcie_intc_intr_barrier(void * cookie)1043 mvkpcie_intc_intr_barrier(void *cookie)
1044 {
1045 	struct intrhand *ih = cookie;
1046 	struct mvkpcie_softc *sc = ih->ih_sc;
1047 
1048 	intr_barrier(sc->sc_ih);
1049 }
1050 
1051 void
mvkpcie_intc_recalc_ipl(struct mvkpcie_softc * sc)1052 mvkpcie_intc_recalc_ipl(struct mvkpcie_softc *sc)
1053 {
1054 	struct intrhand *ih;
1055 	int max = IPL_NONE;
1056 	int min = IPL_HIGH;
1057 	int irq;
1058 
1059 	for (irq = 0; irq < nitems(sc->sc_intx_handlers); irq++) {
1060 		ih = sc->sc_intx_handlers[irq];
1061 		if (ih == NULL)
1062 			continue;
1063 
1064 		if (ih->ih_ipl > max)
1065 			max = ih->ih_ipl;
1066 
1067 		if (ih->ih_ipl < min)
1068 			min = ih->ih_ipl;
1069 	}
1070 
1071 	for (irq = 0; irq < nitems(sc->sc_msi_handlers); irq++) {
1072 		ih = sc->sc_msi_handlers[irq];
1073 		if (ih == NULL)
1074 			continue;
1075 
1076 		if (ih->ih_ipl > max)
1077 			max = ih->ih_ipl;
1078 
1079 		if (ih->ih_ipl < min)
1080 			min = ih->ih_ipl;
1081 	}
1082 
1083 	if (max == IPL_NONE)
1084 		min = IPL_NONE;
1085 
1086 	if (sc->sc_ipl != max) {
1087 		sc->sc_ipl = max;
1088 
1089 		if (sc->sc_ih != NULL)
1090 			fdt_intr_disestablish(sc->sc_ih);
1091 
1092 		if (sc->sc_ipl != IPL_NONE)
1093 			sc->sc_ih = fdt_intr_establish(sc->sc_node, sc->sc_ipl,
1094 			    mvkpcie_intc_intr, sc, sc->sc_dev.dv_xname);
1095 	}
1096 }
1097 
1098 /* Only needed for the 16-bit MSI address */
1099 struct mvkpcie_dmamem *
mvkpcie_dmamem_alloc(struct mvkpcie_softc * sc,bus_size_t size,bus_size_t align)1100 mvkpcie_dmamem_alloc(struct mvkpcie_softc *sc, bus_size_t size, bus_size_t align)
1101 {
1102 	struct mvkpcie_dmamem *mdm;
1103 	int nsegs;
1104 
1105 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
1106 	mdm->mdm_size = size;
1107 
1108 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1109 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
1110 		goto mdmfree;
1111 
1112 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
1113 	    &nsegs, BUS_DMA_WAITOK) != 0)
1114 		goto destroy;
1115 
1116 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1117 	    &mdm->mdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1118 		goto free;
1119 
1120 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1121 	    NULL, BUS_DMA_WAITOK) != 0)
1122 		goto unmap;
1123 
1124 	bzero(mdm->mdm_kva, size);
1125 
1126 	return (mdm);
1127 
1128 unmap:
1129 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1130 free:
1131 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1132 destroy:
1133 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1134 mdmfree:
1135 	free(mdm, M_DEVBUF, sizeof(*mdm));
1136 
1137 	return (NULL);
1138 }
1139 
1140 void
mvkpcie_dmamem_free(struct mvkpcie_softc * sc,struct mvkpcie_dmamem * mdm)1141 mvkpcie_dmamem_free(struct mvkpcie_softc *sc, struct mvkpcie_dmamem *mdm)
1142 {
1143 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1144 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1145 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1146 	free(mdm, M_DEVBUF, sizeof(*mdm));
1147 }
1148