xref: /openbsd/sys/dev/fdt/dwpcie.c (revision 4bc9c22f)
1 /*	$OpenBSD: dwpcie.c,v 1.53 2024/03/29 12:45:13 kettenis Exp $	*/
2 /*
3  * Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/device.h>
21 #include <sys/evcount.h>
22 #include <sys/extent.h>
23 #include <sys/malloc.h>
24 
25 #include <machine/intr.h>
26 #include <machine/bus.h>
27 #include <machine/fdt.h>
28 
29 #include <dev/pci/pcidevs.h>
30 #include <dev/pci/pcireg.h>
31 #include <dev/pci/pcivar.h>
32 #include <dev/pci/ppbreg.h>
33 
34 #include <dev/ofw/openfirm.h>
35 #include <dev/ofw/ofw_clock.h>
36 #include <dev/ofw/ofw_gpio.h>
37 #include <dev/ofw/ofw_misc.h>
38 #include <dev/ofw/ofw_pinctrl.h>
39 #include <dev/ofw/ofw_power.h>
40 #include <dev/ofw/ofw_regulator.h>
41 #include <dev/ofw/fdt.h>
42 
43 /* Registers */
44 #define PCIE_PORT_LINK_CTRL		0x710
45 #define  PCIE_PORT_LINK_CTRL_LANES_MASK			(0x3f << 16)
46 #define  PCIE_PORT_LINK_CTRL_LANES_1			(0x1 << 16)
47 #define  PCIE_PORT_LINK_CTRL_LANES_2			(0x3 << 16)
48 #define  PCIE_PORT_LINK_CTRL_LANES_4			(0x7 << 16)
49 #define  PCIE_PORT_LINK_CTRL_LANES_8			(0xf << 16)
50 #define PCIE_PHY_DEBUG_R1		0x72c
51 #define  PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING	(1 << 29)
52 #define  PCIE_PHY_DEBUG_R1_XMLH_LINK_UP			(1 << 4)
53 #define PCIE_LINK_WIDTH_SPEED_CTRL	0x80c
54 #define  PCIE_LINK_WIDTH_SPEED_CTRL_LANES_MASK		(0x1f << 8)
55 #define  PCIE_LINK_WIDTH_SPEED_CTRL_LANES_1		(0x1 << 8)
56 #define  PCIE_LINK_WIDTH_SPEED_CTRL_LANES_2		(0x2 << 8)
57 #define  PCIE_LINK_WIDTH_SPEED_CTRL_LANES_4		(0x4 << 8)
58 #define  PCIE_LINK_WIDTH_SPEED_CTRL_LANES_8		(0x8 << 8)
59 #define  PCIE_LINK_WIDTH_SPEED_CTRL_CHANGE		(1 << 17)
60 
61 #define PCIE_MSI_ADDR_LO	0x820
62 #define PCIE_MSI_ADDR_HI	0x824
63 #define PCIE_MSI_INTR0_ENABLE	0x828
64 #define PCIE_MSI_INTR0_MASK	0x82c
65 #define PCIE_MSI_INTR0_STATUS	0x830
66 
67 #define MISC_CONTROL_1		0x8bc
68 #define  MISC_CONTROL_1_DBI_RO_WR_EN	(1 << 0)
69 #define IATU_VIEWPORT		0x900
70 #define  IATU_VIEWPORT_INDEX0		0
71 #define  IATU_VIEWPORT_INDEX1		1
72 #define  IATU_VIEWPORT_INDEX2		2
73 #define  IATU_VIEWPORT_INDEX3		3
74 #define IATU_OFFSET_VIEWPORT	0x904
75 #define IATU_OFFSET_UNROLL(x)	(0x200 * (x))
76 #define IATU_REGION_CTRL_1	0x000
77 #define  IATU_REGION_CTRL_1_TYPE_MEM	0
78 #define  IATU_REGION_CTRL_1_TYPE_IO	2
79 #define  IATU_REGION_CTRL_1_TYPE_CFG0	4
80 #define  IATU_REGION_CTRL_1_TYPE_CFG1	5
81 #define IATU_REGION_CTRL_2	0x004
82 #define  IATU_REGION_CTRL_2_REGION_EN	(1U << 31)
83 #define IATU_LWR_BASE_ADDR	0x08
84 #define IATU_UPPER_BASE_ADDR	0x0c
85 #define IATU_LIMIT_ADDR		0x10
86 #define IATU_LWR_TARGET_ADDR	0x14
87 #define IATU_UPPER_TARGET_ADDR	0x18
88 
89 /* Marvell ARMADA 8k registers */
90 #define PCIE_GLOBAL_CTRL	0x8000
91 #define  PCIE_GLOBAL_CTRL_APP_LTSSM_EN		(1 << 2)
92 #define  PCIE_GLOBAL_CTRL_DEVICE_TYPE_MASK	(0xf << 4)
93 #define  PCIE_GLOBAL_CTRL_DEVICE_TYPE_RC	(0x4 << 4)
94 #define PCIE_GLOBAL_STATUS	0x8008
95 #define  PCIE_GLOBAL_STATUS_RDLH_LINK_UP	(1 << 1)
96 #define  PCIE_GLOBAL_STATUS_PHY_LINK_UP		(1 << 9)
97 #define PCIE_PM_STATUS		0x8014
98 #define PCIE_GLOBAL_INT_CAUSE	0x801c
99 #define PCIE_GLOBAL_INT_MASK	0x8020
100 #define  PCIE_GLOBAL_INT_MASK_INT_A		(1 << 9)
101 #define  PCIE_GLOBAL_INT_MASK_INT_B		(1 << 10)
102 #define  PCIE_GLOBAL_INT_MASK_INT_C		(1 << 11)
103 #define  PCIE_GLOBAL_INT_MASK_INT_D		(1 << 12)
104 #define PCIE_ARCACHE_TRC	0x8050
105 #define  PCIE_ARCACHE_TRC_DEFAULT		0x3511
106 #define PCIE_AWCACHE_TRC	0x8054
107 #define  PCIE_AWCACHE_TRC_DEFAULT		0x5311
108 #define PCIE_ARUSER		0x805c
109 #define PCIE_AWUSER		0x8060
110 #define  PCIE_AXUSER_DOMAIN_MASK		(0x3 << 4)
111 #define  PCIE_AXUSER_DOMAIN_INNER_SHARABLE	(0x1 << 4)
112 #define  PCIE_AXUSER_DOMAIN_OUTER_SHARABLE	(0x2 << 4)
113 #define PCIE_STREAMID		0x8064
114 #define  PCIE_STREAMID_FUNC_BITS(x)		((x) << 0)
115 #define  PCIE_STREAMID_DEV_BITS(x)		((x) << 4)
116 #define  PCIE_STREAMID_BUS_BITS(x)		((x) << 8)
117 #define  PCIE_STREAMID_ROOTPORT(x)		((x) << 12)
118 #define  PCIE_STREAMID_8040			\
119     (PCIE_STREAMID_ROOTPORT(0x80) | PCIE_STREAMID_BUS_BITS(2) | \
120      PCIE_STREAMID_DEV_BITS(2) | PCIE_STREAMID_FUNC_BITS(3))
121 
122 /* Amlogic G12A registers */
123 #define PCIE_CFG0		0x0000
124 #define  PCIE_CFG0_APP_LTSSM_EN			(1 << 7)
125 #define PCIE_STATUS12		0x0030
126 #define  PCIE_STATUS12_RDLH_LINK_UP		(1 << 16)
127 #define  PCIE_STATUS12_LTSSM_MASK		(0x1f << 10)
128 #define  PCIE_STATUS12_LTSSM_UP			(0x11 << 10)
129 #define  PCIE_STATUS12_SMLH_LINK_UP		(1 << 6)
130 
131 /* NXP i.MX8MQ registers */
132 #define PCIE_RC_LCR				0x7c
133 #define  PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1		0x1
134 #define  PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2		0x2
135 #define  PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK		0xf
136 #define  PCIE_RC_LCR_L1EL_MASK				(0x7 << 15)
137 #define  PCIE_RC_LCR_L1EL_64US				(0x6 << 15)
138 
139 #define IOMUXC_GPR12				0x30
140 #define  IMX8MQ_GPR_PCIE2_DEVICE_TYPE_MASK		(0xf << 8)
141 #define  IMX8MQ_GPR_PCIE2_DEVICE_TYPE_RC		(0x4 << 8)
142 #define  IMX8MQ_GPR_PCIE1_DEVICE_TYPE_MASK		(0xf << 12)
143 #define  IMX8MQ_GPR_PCIE1_DEVICE_TYPE_RC		(0x4 << 12)
144 #define IOMUXC_GPR14				0x38
145 #define IOMUXC_GPR16				0x40
146 #define  IMX8MQ_GPR_PCIE_REF_USE_PAD			(1 << 9)
147 #define  IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN		(1 << 10)
148 #define  IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE		(1 << 11)
149 #define  IMX8MM_GPR_PCIE_SSC_EN				(1 << 16)
150 #define  IMX8MM_GPR_PCIE_POWER_OFF			(1 << 17)
151 #define  IMX8MM_GPR_PCIE_CMN_RST			(1 << 18)
152 #define  IMX8MM_GPR_PCIE_AUX_EN				(1 << 19)
153 #define  IMX8MM_GPR_PCIE_REF_CLK_MASK			(0x3 << 24)
154 #define  IMX8MM_GPR_PCIE_REF_CLK_PLL			(0x3 << 24)
155 #define  IMX8MM_GPR_PCIE_REF_CLK_EXT			(0x2 << 24)
156 
157 #define IMX8MM_PCIE_PHY_CMN_REG62			0x188
158 #define  IMX8MM_PCIE_PHY_CMN_REG62_PLL_CLK_OUT			0x08
159 #define IMX8MM_PCIE_PHY_CMN_REG64			0x190
160 #define  IMX8MM_PCIE_PHY_CMN_REG64_AUX_RX_TX_TERM		0x8c
161 #define IMX8MM_PCIE_PHY_CMN_REG75			0x1d4
162 #define  IMX8MM_PCIE_PHY_CMN_REG75_PLL_DONE			0x3
163 #define IMX8MM_PCIE_PHY_TRSV_REG5			0x414
164 #define  IMX8MM_PCIE_PHY_TRSV_REG5_GEN1_DEEMP			0x2d
165 #define IMX8MM_PCIE_PHY_TRSV_REG6			0x418
166 #define  IMX8MM_PCIE_PHY_TRSV_REG6_GEN2_DEEMP			0xf
167 
168 #define ANATOP_PLLOUT_CTL			0x74
169 #define  ANATOP_PLLOUT_CTL_CKE				(1 << 4)
170 #define  ANATOP_PLLOUT_CTL_SEL_SYSPLL1			0xb
171 #define  ANATOP_PLLOUT_CTL_SEL_MASK			0xf
172 #define ANATOP_PLLOUT_DIV			0x7c
173 #define  ANATOP_PLLOUT_DIV_SYSPLL1			0x7
174 
175 /* Rockchip RK3568/RK3588 registers */
176 #define PCIE_CLIENT_GENERAL_CON			0x0000
177 #define  PCIE_CLIENT_DEV_TYPE_RC		((0xf << 4) << 16 | (0x4 << 4))
178 #define  PCIE_CLIENT_LINK_REQ_RST_GRT		((1 << 3) << 16 | (1 << 3))
179 #define  PCIE_CLIENT_APP_LTSSM_ENABLE		((1 << 2) << 16 | (1 << 2))
180 #define PCIE_CLIENT_INTR_STATUS_LEGACY		0x0008
181 #define PCIE_CLIENT_INTR_MASK_LEGACY		0x001c
182 #define PCIE_CLIENT_HOT_RESET_CTRL		0x0180
183 #define  PCIE_CLIENT_APP_LTSSM_ENABLE_ENHANCE	((1 << 4) << 16 | (1 << 4))
184 #define PCIE_CLIENT_LTSSM_STATUS		0x0300
185 #define  PCIE_CLIENT_RDLH_LINK_UP		(1 << 17)
186 #define  PCIE_CLIENT_SMLH_LINK_UP		(1 << 16)
187 #define  PCIE_CLIENT_LTSSM_MASK			(0x1f << 0)
188 #define  PCIE_CLIENT_LTSSM_UP			(0x11 << 0)
189 
190 #define HREAD4(sc, reg)							\
191 	(bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
192 #define HWRITE4(sc, reg, val)						\
193 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
194 #define HSET4(sc, reg, bits)						\
195 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
196 #define HCLR4(sc, reg, bits)						\
197 	HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
198 
199 struct dwpcie_range {
200 	uint32_t		flags;
201 	uint64_t		pci_base;
202 	uint64_t		phys_base;
203 	uint64_t		size;
204 };
205 
206 struct dwpcie_intx {
207 	int			(*di_func)(void *);
208 	void			*di_arg;
209 	int			di_ipl;
210 	int			di_flags;
211 	int			di_pin;
212 	struct evcount		di_count;
213 	char			*di_name;
214 	struct dwpcie_softc	*di_sc;
215 	TAILQ_ENTRY(dwpcie_intx) di_next;
216 };
217 
218 #define DWPCIE_NUM_MSI		32
219 
220 struct dwpcie_msi {
221 	int			(*dm_func)(void *);
222 	void			*dm_arg;
223 	int			dm_ipl;
224 	int			dm_flags;
225 	int			dm_vec;
226 	struct evcount		dm_count;
227 	char			*dm_name;
228 };
229 
230 struct dwpcie_softc {
231 	struct device		sc_dev;
232 	bus_space_tag_t		sc_iot;
233 	bus_space_handle_t	sc_ioh;
234 	bus_dma_tag_t		sc_dmat;
235 
236 	bus_addr_t		sc_ctrl_base;
237 	bus_size_t		sc_ctrl_size;
238 
239 	bus_addr_t		sc_conf_base;
240 	bus_size_t		sc_conf_size;
241 	bus_space_handle_t	sc_conf_ioh;
242 
243 	bus_addr_t		sc_glue_base;
244 	bus_size_t		sc_glue_size;
245 	bus_space_handle_t	sc_glue_ioh;
246 
247 	bus_addr_t		sc_atu_base;
248 	bus_size_t		sc_atu_size;
249 	bus_space_handle_t	sc_atu_ioh;
250 
251 	bus_addr_t		sc_io_base;
252 	bus_addr_t		sc_io_bus_addr;
253 	bus_size_t		sc_io_size;
254 	bus_addr_t		sc_mem_base;
255 	bus_addr_t		sc_mem_bus_addr;
256 	bus_size_t		sc_mem_size;
257 	bus_addr_t		sc_pmem_base;
258 	bus_addr_t		sc_pmem_bus_addr;
259 	bus_size_t		sc_pmem_size;
260 
261 	int			sc_node;
262 	int			sc_acells;
263 	int			sc_scells;
264 	int			sc_pacells;
265 	int			sc_pscells;
266 	struct dwpcie_range	*sc_ranges;
267 	int			sc_nranges;
268 
269 	struct bus_space	sc_bus_iot;
270 	struct bus_space	sc_bus_memt;
271 
272 	struct machine_pci_chipset sc_pc;
273 	int			sc_bus;
274 
275 	int			sc_num_viewport;
276 	int			sc_atu_unroll;
277 	int			sc_atu_viewport;
278 
279 	void			*sc_ih;
280 	struct interrupt_controller sc_ic;
281 	TAILQ_HEAD(,dwpcie_intx) sc_intx[4];
282 
283 	uint64_t		sc_msi_addr;
284 	struct dwpcie_msi	sc_msi[DWPCIE_NUM_MSI];
285 };
286 
287 struct dwpcie_intr_handle {
288 	struct machine_intr_handle pih_ih;
289 	struct dwpcie_softc	*pih_sc;
290 	struct dwpcie_msi	*pih_dm;
291 	bus_dma_tag_t		pih_dmat;
292 	bus_dmamap_t		pih_map;
293 };
294 
295 int dwpcie_match(struct device *, void *, void *);
296 void dwpcie_attach(struct device *, struct device *, void *);
297 
298 const struct cfattach	dwpcie_ca = {
299 	sizeof (struct dwpcie_softc), dwpcie_match, dwpcie_attach
300 };
301 
302 struct cfdriver dwpcie_cd = {
303 	NULL, "dwpcie", DV_DULL
304 };
305 
306 int
dwpcie_match(struct device * parent,void * match,void * aux)307 dwpcie_match(struct device *parent, void *match, void *aux)
308 {
309 	struct fdt_attach_args *faa = aux;
310 
311 	return (OF_is_compatible(faa->fa_node, "amlogic,g12a-pcie") ||
312 	    OF_is_compatible(faa->fa_node, "baikal,bm1000-pcie") ||
313 	    OF_is_compatible(faa->fa_node, "fsl,imx8mm-pcie") ||
314 	    OF_is_compatible(faa->fa_node, "fsl,imx8mq-pcie") ||
315 	    OF_is_compatible(faa->fa_node, "marvell,armada8k-pcie") ||
316 	    OF_is_compatible(faa->fa_node, "qcom,pcie-sc8280xp") ||
317 	    OF_is_compatible(faa->fa_node, "rockchip,rk3568-pcie") ||
318 	    OF_is_compatible(faa->fa_node, "rockchip,rk3588-pcie") ||
319 	    OF_is_compatible(faa->fa_node, "sifive,fu740-pcie"));
320 }
321 
322 void	dwpcie_attach_deferred(struct device *);
323 
324 void	dwpcie_atu_disable(struct dwpcie_softc *, int);
325 void	dwpcie_atu_config(struct dwpcie_softc *, int, int,
326 	    uint64_t, uint64_t, uint64_t);
327 void	dwpcie_link_config(struct dwpcie_softc *);
328 int	dwpcie_link_up(struct dwpcie_softc *);
329 
330 int	dwpcie_armada8k_init(struct dwpcie_softc *);
331 int	dwpcie_armada8k_link_up(struct dwpcie_softc *);
332 int	dwpcie_armada8k_intr(void *);
333 
334 int	dwpcie_g12a_init(struct dwpcie_softc *);
335 int	dwpcie_g12a_link_up(struct dwpcie_softc *);
336 
337 int	dwpcie_imx8mq_init(struct dwpcie_softc *);
338 int	dwpcie_imx8mq_intr(void *);
339 
340 int	dwpcie_fu740_init(struct dwpcie_softc *);
341 
342 int	dwpcie_rk3568_init(struct dwpcie_softc *);
343 int	dwpcie_rk3568_intr(void *);
344 void	*dwpcie_rk3568_intr_establish(void *, int *, int,
345  	    struct cpu_info *, int (*)(void *), void *, char *);
346 void	dwpcie_rk3568_intr_disestablish(void *);
347 void	dwpcie_rk3568_intr_barrier(void *);
348 
349 int	dwpcie_sc8280xp_init(struct dwpcie_softc *);
350 
351 void	dwpcie_attach_hook(struct device *, struct device *,
352 	    struct pcibus_attach_args *);
353 int	dwpcie_bus_maxdevs(void *, int);
354 pcitag_t dwpcie_make_tag(void *, int, int, int);
355 void	dwpcie_decompose_tag(void *, pcitag_t, int *, int *, int *);
356 int	dwpcie_conf_size(void *, pcitag_t);
357 pcireg_t dwpcie_conf_read(void *, pcitag_t, int);
358 void	dwpcie_conf_write(void *, pcitag_t, int, pcireg_t);
359 int	dwpcie_probe_device_hook(void *, struct pci_attach_args *);
360 
361 int	dwpcie_intr_map(struct pci_attach_args *, pci_intr_handle_t *);
362 const char *dwpcie_intr_string(void *, pci_intr_handle_t);
363 void	*dwpcie_intr_establish(void *, pci_intr_handle_t, int,
364 	    struct cpu_info *, int (*)(void *), void *, char *);
365 void	dwpcie_intr_disestablish(void *, void *);
366 
367 int	dwpcie_bs_iomap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
368 	    bus_space_handle_t *);
369 int	dwpcie_bs_memmap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
370 	    bus_space_handle_t *);
371 
372 struct interrupt_controller dwpcie_ic = {
373 	.ic_barrier = intr_barrier
374 };
375 
376 void
dwpcie_attach(struct device * parent,struct device * self,void * aux)377 dwpcie_attach(struct device *parent, struct device *self, void *aux)
378 {
379 	struct dwpcie_softc *sc = (struct dwpcie_softc *)self;
380 	struct fdt_attach_args *faa = aux;
381 	uint32_t *ranges;
382 	int i, j, nranges, rangeslen;
383 	int atu, config, ctrl, glue;
384 
385 	if (faa->fa_nreg < 2) {
386 		printf(": no registers\n");
387 		return;
388 	}
389 
390 	sc->sc_ctrl_base = faa->fa_reg[0].addr;
391 	sc->sc_ctrl_size = faa->fa_reg[0].size;
392 
393 	ctrl = OF_getindex(faa->fa_node, "dbi", "reg-names");
394 	if (ctrl >= 0 && ctrl < faa->fa_nreg) {
395 		sc->sc_ctrl_base = faa->fa_reg[ctrl].addr;
396 		sc->sc_ctrl_size = faa->fa_reg[ctrl].size;
397 	}
398 
399 	config = OF_getindex(faa->fa_node, "config", "reg-names");
400 	if (config < 0 || config >= faa->fa_nreg) {
401 		printf(": no config registers\n");
402 		return;
403 	}
404 
405 	sc->sc_conf_base = faa->fa_reg[config].addr;
406 	sc->sc_conf_size = faa->fa_reg[config].size;
407 
408 	sc->sc_atu_base = sc->sc_ctrl_base + 0x300000;
409 	sc->sc_atu_size = sc->sc_ctrl_size - 0x300000;
410 
411 	atu = OF_getindex(faa->fa_node, "atu", "reg-names");
412 	if (atu >= 0 && atu < faa->fa_nreg) {
413 		sc->sc_atu_base = faa->fa_reg[atu].addr;
414 		sc->sc_atu_size = faa->fa_reg[atu].size;
415 	}
416 
417 	if (OF_is_compatible(faa->fa_node, "amlogic,g12a-pcie")) {
418 		glue = OF_getindex(faa->fa_node, "cfg", "reg-names");
419 		if (glue < 0 || glue >= faa->fa_nreg) {
420 			printf(": no glue registers\n");
421 			return;
422 		}
423 
424 		sc->sc_glue_base = faa->fa_reg[glue].addr;
425 		sc->sc_glue_size = faa->fa_reg[glue].size;
426 	}
427 
428 	if (OF_is_compatible(faa->fa_node, "rockchip,rk3568-pcie") ||
429 	    OF_is_compatible(faa->fa_node, "rockchip,rk3588-pcie")) {
430 		glue = OF_getindex(faa->fa_node, "apb", "reg-names");
431 		if (glue < 0 || glue >= faa->fa_nreg) {
432 			printf(": no glue registers\n");
433 			return;
434 		}
435 
436 		sc->sc_glue_base = faa->fa_reg[glue].addr;
437 		sc->sc_glue_size = faa->fa_reg[glue].size;
438 	}
439 
440 	sc->sc_iot = faa->fa_iot;
441 	sc->sc_dmat = faa->fa_dmat;
442 	sc->sc_node = faa->fa_node;
443 
444 	sc->sc_acells = OF_getpropint(sc->sc_node, "#address-cells",
445 	    faa->fa_acells);
446 	sc->sc_scells = OF_getpropint(sc->sc_node, "#size-cells",
447 	    faa->fa_scells);
448 	sc->sc_pacells = faa->fa_acells;
449 	sc->sc_pscells = faa->fa_scells;
450 
451 	rangeslen = OF_getproplen(sc->sc_node, "ranges");
452 	if (rangeslen <= 0 || (rangeslen % sizeof(uint32_t)) ||
453 	     (rangeslen / sizeof(uint32_t)) % (sc->sc_acells +
454 	     sc->sc_pacells + sc->sc_scells)) {
455 		printf(": invalid ranges property\n");
456 		return;
457 	}
458 
459 	ranges = malloc(rangeslen, M_TEMP, M_WAITOK);
460 	OF_getpropintarray(sc->sc_node, "ranges", ranges,
461 	    rangeslen);
462 
463 	nranges = (rangeslen / sizeof(uint32_t)) /
464 	    (sc->sc_acells + sc->sc_pacells + sc->sc_scells);
465 	sc->sc_ranges = mallocarray(nranges,
466 	    sizeof(struct dwpcie_range), M_TEMP, M_WAITOK);
467 	sc->sc_nranges = nranges;
468 
469 	for (i = 0, j = 0; i < sc->sc_nranges; i++) {
470 		sc->sc_ranges[i].flags = ranges[j++];
471 		sc->sc_ranges[i].pci_base = ranges[j++];
472 		if (sc->sc_acells - 1 == 2) {
473 			sc->sc_ranges[i].pci_base <<= 32;
474 			sc->sc_ranges[i].pci_base |= ranges[j++];
475 		}
476 		sc->sc_ranges[i].phys_base = ranges[j++];
477 		if (sc->sc_pacells == 2) {
478 			sc->sc_ranges[i].phys_base <<= 32;
479 			sc->sc_ranges[i].phys_base |= ranges[j++];
480 		}
481 		sc->sc_ranges[i].size = ranges[j++];
482 		if (sc->sc_scells == 2) {
483 			sc->sc_ranges[i].size <<= 32;
484 			sc->sc_ranges[i].size |= ranges[j++];
485 		}
486 	}
487 
488 	free(ranges, M_TEMP, rangeslen);
489 
490 	if (bus_space_map(sc->sc_iot, sc->sc_ctrl_base,
491 	    sc->sc_ctrl_size, 0, &sc->sc_ioh)) {
492 		free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
493 		    sizeof(struct dwpcie_range));
494 		printf(": can't map ctrl registers\n");
495 		return;
496 	}
497 
498 	if (bus_space_map(sc->sc_iot, sc->sc_conf_base,
499 	    sc->sc_conf_size, 0, &sc->sc_conf_ioh)) {
500 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ctrl_size);
501 		free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
502 		    sizeof(struct dwpcie_range));
503 		printf(": can't map config registers\n");
504 		return;
505 	}
506 
507 	sc->sc_num_viewport = OF_getpropint(sc->sc_node, "num-viewport", 2);
508 
509 	printf("\n");
510 
511 	pinctrl_byname(sc->sc_node, "default");
512 	clock_set_assigned(sc->sc_node);
513 
514 	config_defer(self, dwpcie_attach_deferred);
515 }
516 
517 void
dwpcie_attach_deferred(struct device * self)518 dwpcie_attach_deferred(struct device *self)
519 {
520 	struct dwpcie_softc *sc = (struct dwpcie_softc *)self;
521 	struct pcibus_attach_args pba;
522 	bus_addr_t iobase, iolimit;
523 	bus_addr_t membase, memlimit;
524 	bus_addr_t pmembase, pmemlimit;
525 	uint32_t bus_range[2];
526 	pcireg_t bir, blr, csr;
527 	int i, error = 0;
528 
529 	if (OF_is_compatible(sc->sc_node, "marvell,armada8k-pcie"))
530 		error = dwpcie_armada8k_init(sc);
531 	if (OF_is_compatible(sc->sc_node, "amlogic,g12a-pcie"))
532 		error = dwpcie_g12a_init(sc);
533 	if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie") ||
534 	    OF_is_compatible(sc->sc_node, "fsl,imx8mq-pcie"))
535 		error = dwpcie_imx8mq_init(sc);
536 	if (OF_is_compatible(sc->sc_node, "qcom,pcie-sc8280xp"))
537 		error = dwpcie_sc8280xp_init(sc);
538 	if (OF_is_compatible(sc->sc_node, "rockchip,rk3568-pcie") ||
539 	    OF_is_compatible(sc->sc_node, "rockchip,rk3588-pcie"))
540 		error = dwpcie_rk3568_init(sc);
541 	if (OF_is_compatible(sc->sc_node, "sifive,fu740-pcie"))
542 		error = dwpcie_fu740_init(sc);
543 	if (error != 0) {
544 		bus_space_unmap(sc->sc_iot, sc->sc_conf_ioh, sc->sc_conf_size);
545 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ctrl_size);
546 		free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
547 		    sizeof(struct dwpcie_range));
548 		printf("%s: can't initialize hardware\n",
549 		    sc->sc_dev.dv_xname);
550 		return;
551 	}
552 
553 	sc->sc_atu_viewport = -1;
554 	if (HREAD4(sc, IATU_VIEWPORT) == 0xffffffff) {
555 		sc->sc_atu_unroll = 1;
556 		if (bus_space_map(sc->sc_iot, sc->sc_atu_base,
557 		    sc->sc_atu_size, 0, &sc->sc_atu_ioh)) {
558 			bus_space_unmap(sc->sc_iot, sc->sc_conf_ioh,
559 			    sc->sc_conf_size);
560 			bus_space_unmap(sc->sc_iot, sc->sc_ioh,
561 			    sc->sc_ctrl_size);
562 			free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
563 			    sizeof(struct dwpcie_range));
564 			printf("%s: can't map atu registers\n",
565 			    sc->sc_dev.dv_xname);
566 			return;
567 		}
568 	}
569 
570 	/* Set up address translation for I/O space. */
571 	for (i = 0; i < sc->sc_nranges; i++) {
572 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
573 		    sc->sc_ranges[i].size > 0) {
574 			sc->sc_io_base = sc->sc_ranges[i].phys_base;
575 			sc->sc_io_bus_addr = sc->sc_ranges[i].pci_base;
576 			sc->sc_io_size = sc->sc_ranges[i].size;
577 		}
578 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000 &&
579 		    sc->sc_ranges[i].size > 0) {
580 			sc->sc_mem_base = sc->sc_ranges[i].phys_base;
581 			sc->sc_mem_bus_addr = sc->sc_ranges[i].pci_base;
582 			sc->sc_mem_size = sc->sc_ranges[i].size;
583 		}
584 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x03000000 &&
585 		    sc->sc_ranges[i].size > 0) {
586 			sc->sc_pmem_base = sc->sc_ranges[i].phys_base;
587 			sc->sc_pmem_bus_addr = sc->sc_ranges[i].pci_base;
588 			sc->sc_pmem_size = sc->sc_ranges[i].size;
589 		}
590 	}
591 	if (sc->sc_mem_size == 0) {
592 		printf("%s: no memory mapped I/O window\n",
593 		    sc->sc_dev.dv_xname);
594 		return;
595 	}
596 
597 	/*
598 	 * Disable prefetchable memory mapped I/O window if we don't
599 	 * have enough viewports to enable it.
600 	 */
601 	if (sc->sc_num_viewport < 4)
602 		sc->sc_pmem_size = 0;
603 
604 	for (i = 0; i < sc->sc_num_viewport; i++)
605 		dwpcie_atu_disable(sc, i);
606 
607 	dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX0,
608 	    IATU_REGION_CTRL_1_TYPE_MEM, sc->sc_mem_base,
609 	    sc->sc_mem_bus_addr, sc->sc_mem_size);
610 	if (sc->sc_num_viewport > 2 && sc->sc_io_size > 0)
611 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX2,
612 		    IATU_REGION_CTRL_1_TYPE_IO, sc->sc_io_base,
613 		    sc->sc_io_bus_addr, sc->sc_io_size);
614 	if (sc->sc_num_viewport > 3 && sc->sc_pmem_size > 0)
615 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX3,
616 		    IATU_REGION_CTRL_1_TYPE_MEM, sc->sc_pmem_base,
617 		    sc->sc_pmem_bus_addr, sc->sc_pmem_size);
618 
619 	/* Enable modification of read-only bits. */
620 	HSET4(sc, MISC_CONTROL_1, MISC_CONTROL_1_DBI_RO_WR_EN);
621 
622 	/* A Root Port is a PCI-PCI Bridge. */
623 	HWRITE4(sc, PCI_CLASS_REG,
624 	    PCI_CLASS_BRIDGE << PCI_CLASS_SHIFT |
625 	    PCI_SUBCLASS_BRIDGE_PCI << PCI_SUBCLASS_SHIFT);
626 
627 	/* Clear BAR as U-Boot seems to leave garbage in it. */
628 	HWRITE4(sc, PCI_MAPREG_START, PCI_MAPREG_MEM_TYPE_64BIT);
629 	HWRITE4(sc, PCI_MAPREG_START + 4, 0);
630 
631 	/* Enable 32-bit I/O addressing. */
632 	HSET4(sc, PPB_REG_IOSTATUS,
633 	    PPB_IO_32BIT | (PPB_IO_32BIT << PPB_IOLIMIT_SHIFT));
634 
635 	/* Make sure read-only bits are write-protected. */
636 	HCLR4(sc, MISC_CONTROL_1, MISC_CONTROL_1_DBI_RO_WR_EN);
637 
638 	/* Set up bus range. */
639 	if (OF_getpropintarray(sc->sc_node, "bus-range", bus_range,
640 	    sizeof(bus_range)) != sizeof(bus_range)) {
641 		bus_range[0] = 0;
642 		bus_range[1] = 31;
643 	}
644 	sc->sc_bus = bus_range[0];
645 
646 	/* Initialize bus range. */
647 	bir = bus_range[0];
648 	bir |= ((bus_range[0] + 1) << 8);
649 	bir |= (bus_range[1] << 16);
650 	HWRITE4(sc, PPB_REG_BUSINFO, bir);
651 
652 	/* Initialize memory mapped I/O window. */
653 	membase = sc->sc_mem_bus_addr;
654 	memlimit = membase + sc->sc_mem_size - 1;
655 	blr = memlimit & PPB_MEM_MASK;
656 	blr |= (membase >> PPB_MEM_SHIFT);
657 	HWRITE4(sc, PPB_REG_MEM, blr);
658 
659 	/* Initialize I/O window. */
660 	if (sc->sc_io_size > 0) {
661 		iobase = sc->sc_io_bus_addr;
662 		iolimit = iobase + sc->sc_io_size - 1;
663 		blr = iolimit & PPB_IO_MASK;
664 		blr |= (iobase >> PPB_IO_SHIFT);
665 		HWRITE4(sc, PPB_REG_IOSTATUS, blr);
666 		blr = (iobase & 0xffff0000) >> 16;
667 		blr |= iolimit & 0xffff0000;
668 		HWRITE4(sc, PPB_REG_IO_HI, blr);
669 	} else {
670 		HWRITE4(sc, PPB_REG_IOSTATUS, 0x000000ff);
671 		HWRITE4(sc, PPB_REG_IO_HI, 0x0000ffff);
672 	}
673 
674 	/* Initialize prefetchable memory mapped I/O window. */
675 	if (sc->sc_pmem_size > 0) {
676 		pmembase = sc->sc_pmem_bus_addr;
677 		pmemlimit = pmembase + sc->sc_pmem_size - 1;
678 		blr = pmemlimit & PPB_MEM_MASK;
679 		blr |= ((pmembase & PPB_MEM_MASK) >> PPB_MEM_SHIFT);
680 		HWRITE4(sc, PPB_REG_PREFMEM, blr);
681 		HWRITE4(sc, PPB_REG_PREFBASE_HI32, pmembase >> 32);
682 		HWRITE4(sc, PPB_REG_PREFLIM_HI32, pmemlimit >> 32);
683 	} else {
684 		HWRITE4(sc, PPB_REG_PREFMEM, 0x0000ffff);
685 		HWRITE4(sc, PPB_REG_PREFBASE_HI32, 0);
686 		HWRITE4(sc, PPB_REG_PREFLIM_HI32, 0);
687 	}
688 
689 	csr = PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE;
690 	if (sc->sc_io_size > 0)
691 		csr |= PCI_COMMAND_IO_ENABLE;
692 	HWRITE4(sc, PCI_COMMAND_STATUS_REG, csr);
693 
694 	memcpy(&sc->sc_bus_iot, sc->sc_iot, sizeof(sc->sc_bus_iot));
695 	sc->sc_bus_iot.bus_private = sc;
696 	sc->sc_bus_iot._space_map = dwpcie_bs_iomap;
697 	memcpy(&sc->sc_bus_memt, sc->sc_iot, sizeof(sc->sc_bus_memt));
698 	sc->sc_bus_memt.bus_private = sc;
699 	sc->sc_bus_memt._space_map = dwpcie_bs_memmap;
700 
701 	sc->sc_pc.pc_conf_v = sc;
702 	sc->sc_pc.pc_attach_hook = dwpcie_attach_hook;
703 	sc->sc_pc.pc_bus_maxdevs = dwpcie_bus_maxdevs;
704 	sc->sc_pc.pc_make_tag = dwpcie_make_tag;
705 	sc->sc_pc.pc_decompose_tag = dwpcie_decompose_tag;
706 	sc->sc_pc.pc_conf_size = dwpcie_conf_size;
707 	sc->sc_pc.pc_conf_read = dwpcie_conf_read;
708 	sc->sc_pc.pc_conf_write = dwpcie_conf_write;
709 	sc->sc_pc.pc_probe_device_hook = dwpcie_probe_device_hook;
710 
711 	sc->sc_pc.pc_intr_v = sc;
712 	sc->sc_pc.pc_intr_map = dwpcie_intr_map;
713 	sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
714 	sc->sc_pc.pc_intr_map_msivec = _pci_intr_map_msivec;
715 	sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
716 	sc->sc_pc.pc_intr_string = dwpcie_intr_string;
717 	sc->sc_pc.pc_intr_establish = dwpcie_intr_establish;
718 	sc->sc_pc.pc_intr_disestablish = dwpcie_intr_disestablish;
719 
720 	memset(&pba, 0, sizeof(pba));
721 	pba.pba_busname = "pci";
722 	pba.pba_iot = &sc->sc_bus_iot;
723 	pba.pba_memt = &sc->sc_bus_memt;
724 	pba.pba_dmat = sc->sc_dmat;
725 	pba.pba_pc = &sc->sc_pc;
726 	pba.pba_domain = pci_ndomains++;
727 	pba.pba_bus = sc->sc_bus;
728 	if (OF_is_compatible(sc->sc_node, "baikal,bm1000-pcie") ||
729 	    OF_is_compatible(sc->sc_node, "marvell,armada8k-pcie") ||
730 	    OF_getproplen(sc->sc_node, "msi-map") > 0 ||
731 	    sc->sc_msi_addr)
732 		pba.pba_flags |= PCI_FLAGS_MSI_ENABLED;
733 	if (OF_getproplen(sc->sc_node, "msi-map") > 0)
734 		pba.pba_flags |= PCI_FLAGS_MSIVEC_ENABLED;
735 
736 	pci_dopm = 1;
737 
738 	config_found(self, &pba, NULL);
739 }
740 
741 void
dwpcie_link_config(struct dwpcie_softc * sc)742 dwpcie_link_config(struct dwpcie_softc *sc)
743 {
744 	uint32_t mode, width, reg;
745 	int lanes;
746 
747 	lanes = OF_getpropint(sc->sc_node, "num-lanes", 0);
748 
749 	switch (lanes) {
750 	case 1:
751 		mode = PCIE_PORT_LINK_CTRL_LANES_1;
752 		width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_1;
753 		break;
754 	case 2:
755 		mode = PCIE_PORT_LINK_CTRL_LANES_2;
756 		width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_2;
757 		break;
758 	case 4:
759 		mode = PCIE_PORT_LINK_CTRL_LANES_4;
760 		width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_4;
761 		break;
762 	case 8:
763 		mode = PCIE_PORT_LINK_CTRL_LANES_8;
764 		width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_8;
765 		break;
766 	default:
767 		printf("%s: %d lanes not supported\n", __func__, lanes);
768 		return;
769 	}
770 
771 	reg = HREAD4(sc, PCIE_PORT_LINK_CTRL);
772 	reg &= ~PCIE_PORT_LINK_CTRL_LANES_MASK;
773 	reg |= mode;
774 	HWRITE4(sc, PCIE_PORT_LINK_CTRL, reg);
775 
776 	reg = HREAD4(sc, PCIE_LINK_WIDTH_SPEED_CTRL);
777 	reg &= ~PCIE_LINK_WIDTH_SPEED_CTRL_LANES_MASK;
778 	reg |= width;
779 	HWRITE4(sc, PCIE_LINK_WIDTH_SPEED_CTRL, reg);
780 
781 	reg = HREAD4(sc, PCIE_LINK_WIDTH_SPEED_CTRL);
782 	reg |= PCIE_LINK_WIDTH_SPEED_CTRL_CHANGE;
783 	HWRITE4(sc, PCIE_LINK_WIDTH_SPEED_CTRL, reg);
784 }
785 
786 int
dwpcie_msi_intr(void * arg)787 dwpcie_msi_intr(void *arg)
788 {
789 	struct dwpcie_softc *sc = arg;
790 	struct dwpcie_msi *dm;
791 	uint32_t status;
792 	int vec, s;
793 
794 	status = HREAD4(sc, PCIE_MSI_INTR0_STATUS);
795 	if (status == 0)
796 		return 0;
797 
798 	HWRITE4(sc, PCIE_MSI_INTR0_STATUS, status);
799 	while (status) {
800 		vec = ffs(status) - 1;
801 		status &= ~(1U << vec);
802 
803 		dm = &sc->sc_msi[vec];
804 		if (dm->dm_func == NULL)
805 			continue;
806 
807 		if ((dm->dm_flags & IPL_MPSAFE) == 0)
808 			KERNEL_LOCK();
809 		s = splraise(dm->dm_ipl);
810 		if (dm->dm_func(dm->dm_arg))
811 			dm->dm_count.ec_count++;
812 		splx(s);
813 		if ((dm->dm_flags & IPL_MPSAFE) == 0)
814 			KERNEL_UNLOCK();
815 	}
816 
817 	return 1;
818 }
819 
820 int
dwpcie_msi_init(struct dwpcie_softc * sc)821 dwpcie_msi_init(struct dwpcie_softc *sc)
822 {
823 	bus_dma_segment_t seg;
824 	bus_dmamap_t map;
825 	uint64_t addr;
826 	int error, rseg;
827 
828 	/*
829 	 * Allocate some DMA memory such that we have a "safe" target
830 	 * address for MSIs.
831 	 */
832 	error = bus_dmamem_alloc(sc->sc_dmat, sizeof(uint32_t),
833 	    sizeof(uint32_t), 0, &seg, 1, &rseg, BUS_DMA_WAITOK);
834 	if (error)
835 		return error;
836 
837 	/*
838 	 * Translate the CPU address into a bus address that we can
839 	 * program into the hardware.
840 	 */
841 	error = bus_dmamap_create(sc->sc_dmat, sizeof(uint32_t), 1,
842 	    sizeof(uint32_t), 0, BUS_DMA_WAITOK, &map);
843 	if (error) {
844 		bus_dmamem_free(sc->sc_dmat, &seg, 1);
845 		return error;
846 	}
847 	error = bus_dmamap_load_raw(sc->sc_dmat, map, &seg, 1,
848 	    sizeof(uint32_t), BUS_DMA_WAITOK);
849 	if (error) {
850 		bus_dmamap_destroy(sc->sc_dmat, map);
851 		bus_dmamem_free(sc->sc_dmat, &seg, 1);
852 		return error;
853 	}
854 
855 	addr = map->dm_segs[0].ds_addr;
856 	HWRITE4(sc, PCIE_MSI_ADDR_LO, addr);
857 	HWRITE4(sc, PCIE_MSI_ADDR_HI, addr >> 32);
858 
859 	bus_dmamap_unload(sc->sc_dmat, map);
860 	bus_dmamap_destroy(sc->sc_dmat, map);
861 
862 	/* Enable, mask and clear all MSIs. */
863 	HWRITE4(sc, PCIE_MSI_INTR0_ENABLE, 0xffffffff);
864 	HWRITE4(sc, PCIE_MSI_INTR0_MASK, 0xffffffff);
865 	HWRITE4(sc, PCIE_MSI_INTR0_STATUS, 0xffffffff);
866 
867 	KASSERT(sc->sc_ih == NULL);
868 	sc->sc_ih = fdt_intr_establish(sc->sc_node, IPL_BIO | IPL_MPSAFE,
869 	    dwpcie_msi_intr, sc, sc->sc_dev.dv_xname);
870 	if (sc->sc_ih == NULL) {
871 		bus_dmamem_free(sc->sc_dmat, &seg, 1);
872 		return EINVAL;
873 	}
874 
875 	/*
876 	 * Hold on to the DMA memory such that nobody can use it to
877 	 * actually do DMA transfers.
878 	 */
879 
880 	sc->sc_msi_addr = addr;
881 	return 0;
882 }
883 
884 int
dwpcie_armada8k_init(struct dwpcie_softc * sc)885 dwpcie_armada8k_init(struct dwpcie_softc *sc)
886 {
887 	uint32_t reg;
888 	int timo;
889 
890 	clock_enable_all(sc->sc_node);
891 
892 	dwpcie_link_config(sc);
893 
894 	if (!dwpcie_armada8k_link_up(sc)) {
895 		reg = HREAD4(sc, PCIE_GLOBAL_CTRL);
896 		reg &= ~PCIE_GLOBAL_CTRL_APP_LTSSM_EN;
897 		HWRITE4(sc, PCIE_GLOBAL_CTRL, reg);
898 	}
899 
900 	/*
901 	 * Setup Requester-ID to Stream-ID mapping
902 	 * XXX: TF-A is supposed to set this up, but doesn't!
903 	 */
904 	HWRITE4(sc, PCIE_STREAMID, PCIE_STREAMID_8040);
905 
906 	/* Enable Root Complex mode. */
907 	reg = HREAD4(sc, PCIE_GLOBAL_CTRL);
908 	reg &= ~PCIE_GLOBAL_CTRL_DEVICE_TYPE_MASK;
909 	reg |= PCIE_GLOBAL_CTRL_DEVICE_TYPE_RC;
910 	HWRITE4(sc, PCIE_GLOBAL_CTRL, reg);
911 
912 	HWRITE4(sc, PCIE_ARCACHE_TRC, PCIE_ARCACHE_TRC_DEFAULT);
913 	HWRITE4(sc, PCIE_AWCACHE_TRC, PCIE_AWCACHE_TRC_DEFAULT);
914 	reg = HREAD4(sc, PCIE_ARUSER);
915 	reg &= ~PCIE_AXUSER_DOMAIN_MASK;
916 	reg |= PCIE_AXUSER_DOMAIN_OUTER_SHARABLE;
917 	HWRITE4(sc, PCIE_ARUSER, reg);
918 	reg = HREAD4(sc, PCIE_AWUSER);
919 	reg &= ~PCIE_AXUSER_DOMAIN_MASK;
920 	reg |= PCIE_AXUSER_DOMAIN_OUTER_SHARABLE;
921 	HWRITE4(sc, PCIE_AWUSER, reg);
922 
923 	if (!dwpcie_armada8k_link_up(sc)) {
924 		reg = HREAD4(sc, PCIE_GLOBAL_CTRL);
925 		reg |= PCIE_GLOBAL_CTRL_APP_LTSSM_EN;
926 		HWRITE4(sc, PCIE_GLOBAL_CTRL, reg);
927 	}
928 
929 	for (timo = 40; timo > 0; timo--) {
930 		if (dwpcie_armada8k_link_up(sc))
931 			break;
932 		delay(1000);
933 	}
934 	if (timo == 0)
935 		return ETIMEDOUT;
936 
937 	sc->sc_ih = fdt_intr_establish(sc->sc_node, IPL_AUDIO | IPL_MPSAFE,
938 	    dwpcie_armada8k_intr, sc, sc->sc_dev.dv_xname);
939 
940 	/* Unmask INTx interrupts. */
941 	HWRITE4(sc, PCIE_GLOBAL_INT_MASK,
942 	    PCIE_GLOBAL_INT_MASK_INT_A | PCIE_GLOBAL_INT_MASK_INT_B |
943 	    PCIE_GLOBAL_INT_MASK_INT_C | PCIE_GLOBAL_INT_MASK_INT_D);
944 
945 	return 0;
946 }
947 
948 int
dwpcie_armada8k_link_up(struct dwpcie_softc * sc)949 dwpcie_armada8k_link_up(struct dwpcie_softc *sc)
950 {
951 	uint32_t reg, mask;
952 
953 	mask = PCIE_GLOBAL_STATUS_RDLH_LINK_UP;
954 	mask |= PCIE_GLOBAL_STATUS_PHY_LINK_UP;
955 	reg = HREAD4(sc, PCIE_GLOBAL_STATUS);
956 	return ((reg & mask) == mask);
957 }
958 
959 int
dwpcie_armada8k_intr(void * arg)960 dwpcie_armada8k_intr(void *arg)
961 {
962 	struct dwpcie_softc *sc = arg;
963 	uint32_t cause;
964 
965 	/* Acknowledge interrupts. */
966 	cause = HREAD4(sc, PCIE_GLOBAL_INT_CAUSE);
967 	HWRITE4(sc, PCIE_GLOBAL_INT_CAUSE, cause);
968 
969 	/* INTx interrupt, so not really ours. */
970 	return 0;
971 }
972 
973 int
dwpcie_g12a_init(struct dwpcie_softc * sc)974 dwpcie_g12a_init(struct dwpcie_softc *sc)
975 {
976 	uint32_t *reset_gpio;
977 	ssize_t reset_gpiolen;
978 	uint32_t reg;
979 	int error, timo;
980 
981 	reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpios");
982 	if (reset_gpiolen <= 0)
983 		return ENXIO;
984 
985 	if (bus_space_map(sc->sc_iot, sc->sc_glue_base,
986 	    sc->sc_glue_size, 0, &sc->sc_glue_ioh))
987 		return ENOMEM;
988 
989 	power_domain_enable(sc->sc_node);
990 
991 	phy_enable(sc->sc_node, "pcie");
992 
993 	reset_assert_all(sc->sc_node);
994 	delay(500);
995 	reset_deassert_all(sc->sc_node);
996 	delay(500);
997 
998 	clock_set_frequency(sc->sc_node, "port", 100000000UL);
999 	clock_enable_all(sc->sc_node);
1000 
1001 	reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK);
1002 	OF_getpropintarray(sc->sc_node, "reset-gpios", reset_gpio,
1003 	    reset_gpiolen);
1004 	gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT);
1005 	gpio_controller_set_pin(reset_gpio, 1);
1006 
1007 	dwpcie_link_config(sc);
1008 
1009 	reg = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_CFG0);
1010 	reg |= PCIE_CFG0_APP_LTSSM_EN;
1011 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_CFG0, reg);
1012 
1013 	gpio_controller_set_pin(reset_gpio, 1);
1014 	delay(500);
1015 	gpio_controller_set_pin(reset_gpio, 0);
1016 
1017 	free(reset_gpio, M_TEMP, reset_gpiolen);
1018 
1019 	for (timo = 40; timo > 0; timo--) {
1020 		if (dwpcie_g12a_link_up(sc))
1021 			break;
1022 		delay(1000);
1023 	}
1024 	if (timo == 0)
1025 		return ETIMEDOUT;
1026 
1027 	error = dwpcie_msi_init(sc);
1028 	if (error)
1029 		return error;
1030 
1031 	return 0;
1032 }
1033 
1034 int
dwpcie_g12a_link_up(struct dwpcie_softc * sc)1035 dwpcie_g12a_link_up(struct dwpcie_softc *sc)
1036 {
1037 	uint32_t reg;
1038 
1039 	reg = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_STATUS12);
1040 	if ((reg & PCIE_STATUS12_SMLH_LINK_UP) &&
1041 	    (reg & PCIE_STATUS12_RDLH_LINK_UP) &&
1042 	    (reg & PCIE_STATUS12_LTSSM_MASK) == PCIE_STATUS12_LTSSM_UP)
1043 		return 1;
1044 	return 0;
1045 }
1046 
1047 int
dwpcie_imx8mq_init(struct dwpcie_softc * sc)1048 dwpcie_imx8mq_init(struct dwpcie_softc *sc)
1049 {
1050 	uint32_t *clkreq_gpio, *disable_gpio, *reset_gpio;
1051 	ssize_t clkreq_gpiolen, disable_gpiolen, reset_gpiolen;
1052 	struct regmap *anatop, *gpr, *phy;
1053 	uint32_t off, reg;
1054 	int error, timo;
1055 
1056 	if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie")) {
1057 		anatop = regmap_bycompatible("fsl,imx8mm-anatop");
1058 		gpr = regmap_bycompatible("fsl,imx8mm-iomuxc-gpr");
1059 		phy = regmap_bycompatible("fsl,imx7d-pcie-phy");
1060 		KASSERT(phy != NULL);
1061 	} else {
1062 		anatop = regmap_bycompatible("fsl,imx8mq-anatop");
1063 		gpr = regmap_bycompatible("fsl,imx8mq-iomuxc-gpr");
1064 	}
1065 	KASSERT(anatop != NULL);
1066 	KASSERT(gpr != NULL);
1067 
1068 	clkreq_gpiolen = OF_getproplen(sc->sc_node, "clkreq-gpio");
1069 	disable_gpiolen = OF_getproplen(sc->sc_node, "disable-gpio");
1070 	reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpio");
1071 
1072 	if (clkreq_gpiolen > 0) {
1073 		clkreq_gpio = malloc(clkreq_gpiolen, M_TEMP, M_WAITOK);
1074 		OF_getpropintarray(sc->sc_node, "clkreq-gpio", clkreq_gpio,
1075 		    clkreq_gpiolen);
1076 		gpio_controller_config_pin(clkreq_gpio, GPIO_CONFIG_OUTPUT);
1077 		gpio_controller_set_pin(clkreq_gpio, 1);
1078 	}
1079 
1080 	if (disable_gpiolen > 0) {
1081 		disable_gpio = malloc(disable_gpiolen, M_TEMP, M_WAITOK);
1082 		OF_getpropintarray(sc->sc_node, "disable-gpio", disable_gpio,
1083 		    disable_gpiolen);
1084 		gpio_controller_config_pin(disable_gpio, GPIO_CONFIG_OUTPUT);
1085 		gpio_controller_set_pin(disable_gpio, 0);
1086 	}
1087 
1088 	if (reset_gpiolen > 0) {
1089 		reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK);
1090 		OF_getpropintarray(sc->sc_node, "reset-gpio", reset_gpio,
1091 		    reset_gpiolen);
1092 		gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT);
1093 		gpio_controller_set_pin(reset_gpio, 1);
1094 	}
1095 
1096 	power_domain_enable(sc->sc_node);
1097 	reset_assert(sc->sc_node, "pciephy");
1098 	reset_assert(sc->sc_node, "apps");
1099 
1100 	reg = regmap_read_4(gpr, IOMUXC_GPR12);
1101 	if (OF_getpropint(sc->sc_node, "ctrl-id", 0) == 0) {
1102 		off = IOMUXC_GPR14;
1103 		reg &= ~IMX8MQ_GPR_PCIE1_DEVICE_TYPE_MASK;
1104 		reg |= IMX8MQ_GPR_PCIE1_DEVICE_TYPE_RC;
1105 	} else {
1106 		off = IOMUXC_GPR16;
1107 		reg &= ~IMX8MQ_GPR_PCIE2_DEVICE_TYPE_MASK;
1108 		reg |= IMX8MQ_GPR_PCIE2_DEVICE_TYPE_RC;
1109 	}
1110 	regmap_write_4(gpr, IOMUXC_GPR12, reg);
1111 
1112 	if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie")) {
1113 		if (OF_getproplen(sc->sc_node, "ext_osc") == 0 ||
1114 		    OF_getpropint(sc->sc_node, "ext_osc", 0)) {
1115 			reg = regmap_read_4(gpr, off);
1116 			reg &= ~(IMX8MQ_GPR_PCIE_REF_USE_PAD |
1117 			    IMX8MM_GPR_PCIE_SSC_EN |
1118 			    IMX8MM_GPR_PCIE_POWER_OFF |
1119 			    IMX8MM_GPR_PCIE_REF_CLK_MASK);
1120 			reg |= (IMX8MM_GPR_PCIE_AUX_EN |
1121 			    IMX8MM_GPR_PCIE_REF_CLK_EXT);
1122 			regmap_write_4(gpr, off, reg);
1123 			delay(100);
1124 			reg = regmap_read_4(gpr, off);
1125 			reg |= IMX8MM_GPR_PCIE_CMN_RST;
1126 			regmap_write_4(gpr, off, reg);
1127 			delay(200);
1128 		} else {
1129 			reg = regmap_read_4(gpr, off);
1130 			reg &= ~(IMX8MQ_GPR_PCIE_REF_USE_PAD |
1131 			    IMX8MM_GPR_PCIE_SSC_EN |
1132 			    IMX8MM_GPR_PCIE_POWER_OFF |
1133 			    IMX8MM_GPR_PCIE_REF_CLK_MASK);
1134 			reg |= (IMX8MM_GPR_PCIE_AUX_EN |
1135 			    IMX8MM_GPR_PCIE_REF_CLK_PLL);
1136 			regmap_write_4(gpr, off, reg);
1137 			delay(100);
1138 			regmap_write_4(phy, IMX8MM_PCIE_PHY_CMN_REG62,
1139 			    IMX8MM_PCIE_PHY_CMN_REG62_PLL_CLK_OUT);
1140 			regmap_write_4(phy, IMX8MM_PCIE_PHY_CMN_REG64,
1141 			    IMX8MM_PCIE_PHY_CMN_REG64_AUX_RX_TX_TERM);
1142 			reg = regmap_read_4(gpr, off);
1143 			reg |= IMX8MM_GPR_PCIE_CMN_RST;
1144 			regmap_write_4(gpr, off, reg);
1145 			delay(200);
1146 			regmap_write_4(phy, IMX8MM_PCIE_PHY_TRSV_REG5,
1147 			    IMX8MM_PCIE_PHY_TRSV_REG5_GEN1_DEEMP);
1148 			regmap_write_4(phy, IMX8MM_PCIE_PHY_TRSV_REG6,
1149 			    IMX8MM_PCIE_PHY_TRSV_REG6_GEN2_DEEMP);
1150 		}
1151 	} else {
1152 		if (OF_getproplen(sc->sc_node, "ext_osc") == 0 ||
1153 		    OF_getpropint(sc->sc_node, "ext_osc", 0)) {
1154 			reg = regmap_read_4(gpr, off);
1155 			reg |= IMX8MQ_GPR_PCIE_REF_USE_PAD;
1156 			regmap_write_4(gpr, off, reg);
1157 		} else {
1158 			reg = regmap_read_4(gpr, off);
1159 			reg &= ~IMX8MQ_GPR_PCIE_REF_USE_PAD;
1160 			regmap_write_4(gpr, off, reg);
1161 
1162 			regmap_write_4(anatop, ANATOP_PLLOUT_CTL,
1163 			    ANATOP_PLLOUT_CTL_CKE |
1164 			    ANATOP_PLLOUT_CTL_SEL_SYSPLL1);
1165 			regmap_write_4(anatop, ANATOP_PLLOUT_DIV,
1166 			    ANATOP_PLLOUT_DIV_SYSPLL1);
1167 		}
1168 	}
1169 
1170 	clock_enable(sc->sc_node, "pcie_phy");
1171 	clock_enable(sc->sc_node, "pcie_bus");
1172 	clock_enable(sc->sc_node, "pcie");
1173 	clock_enable(sc->sc_node, "pcie_aux");
1174 
1175 	/* Allow clocks to stabilize. */
1176 	delay(200);
1177 
1178 	if (reset_gpiolen > 0) {
1179 		gpio_controller_set_pin(reset_gpio, 1);
1180 		delay(100000);
1181 		gpio_controller_set_pin(reset_gpio, 0);
1182 	}
1183 
1184 	reset_deassert(sc->sc_node, "pciephy");
1185 
1186 	if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie")) {
1187 		for (timo = 2000; timo > 0; timo--) {
1188 			if (regmap_read_4(phy, IMX8MM_PCIE_PHY_CMN_REG75) ==
1189 			    IMX8MM_PCIE_PHY_CMN_REG75_PLL_DONE)
1190 				break;
1191 			delay(10);
1192 		}
1193 		if (timo == 0) {
1194 			error = ETIMEDOUT;
1195 			goto err;
1196 		}
1197 	}
1198 
1199 	reg = HREAD4(sc, 0x100000 + PCIE_RC_LCR);
1200 	reg &= ~PCIE_RC_LCR_L1EL_MASK;
1201 	reg |= PCIE_RC_LCR_L1EL_64US;
1202 	HWRITE4(sc, 0x100000 + PCIE_RC_LCR, reg);
1203 
1204 	dwpcie_link_config(sc);
1205 
1206 	reg = HREAD4(sc, PCIE_RC_LCR);
1207 	reg &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
1208 	reg |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
1209 	HWRITE4(sc, PCIE_RC_LCR, reg);
1210 
1211 	reset_deassert(sc->sc_node, "apps");
1212 
1213 	for (timo = 20000; timo > 0; timo--) {
1214 		if (dwpcie_link_up(sc))
1215 			break;
1216 		delay(10);
1217 	}
1218 	if (timo == 0) {
1219 		error = ETIMEDOUT;
1220 		goto err;
1221 	}
1222 
1223 	if (OF_getpropint(sc->sc_node, "fsl,max-link-speed", 1) >= 2) {
1224 		reg = HREAD4(sc, PCIE_RC_LCR);
1225 		reg &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
1226 		reg |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
1227 		HWRITE4(sc, PCIE_RC_LCR, reg);
1228 
1229 		reg = HREAD4(sc, PCIE_LINK_WIDTH_SPEED_CTRL);
1230 		reg |= PCIE_LINK_WIDTH_SPEED_CTRL_CHANGE;
1231 		HWRITE4(sc, PCIE_LINK_WIDTH_SPEED_CTRL, reg);
1232 
1233 		for (timo = 20000; timo > 0; timo--) {
1234 			if (dwpcie_link_up(sc))
1235 				break;
1236 			delay(10);
1237 		}
1238 		if (timo == 0) {
1239 			error = ETIMEDOUT;
1240 			goto err;
1241 		}
1242 	}
1243 
1244 	sc->sc_ih = fdt_intr_establish(sc->sc_node, IPL_AUDIO | IPL_MPSAFE,
1245 	    dwpcie_imx8mq_intr, sc, sc->sc_dev.dv_xname);
1246 
1247 	/* Unmask INTx interrupts. */
1248 	HWRITE4(sc, PCIE_GLOBAL_INT_MASK,
1249 	    PCIE_GLOBAL_INT_MASK_INT_A | PCIE_GLOBAL_INT_MASK_INT_B |
1250 	    PCIE_GLOBAL_INT_MASK_INT_C | PCIE_GLOBAL_INT_MASK_INT_D);
1251 
1252 	error = 0;
1253 err:
1254 	if (clkreq_gpiolen > 0)
1255 		free(clkreq_gpio, M_TEMP, clkreq_gpiolen);
1256 	if (disable_gpiolen > 0)
1257 		free(disable_gpio, M_TEMP, disable_gpiolen);
1258 	if (reset_gpiolen > 0)
1259 		free(reset_gpio, M_TEMP, reset_gpiolen);
1260 	return error;
1261 }
1262 
1263 int
dwpcie_imx8mq_intr(void * arg)1264 dwpcie_imx8mq_intr(void *arg)
1265 {
1266 	struct dwpcie_softc *sc = arg;
1267 	uint32_t cause;
1268 
1269 	/* Acknowledge interrupts. */
1270 	cause = HREAD4(sc, PCIE_GLOBAL_INT_CAUSE);
1271 	HWRITE4(sc, PCIE_GLOBAL_INT_CAUSE, cause);
1272 
1273 	/* INTx interrupt, so not really ours. */
1274 	return 0;
1275 }
1276 
1277 int
dwpcie_fu740_init(struct dwpcie_softc * sc)1278 dwpcie_fu740_init(struct dwpcie_softc *sc)
1279 {
1280 	sc->sc_num_viewport = 8;
1281 
1282 	return 0;
1283 }
1284 
1285 int
dwpcie_rk3568_link_up(struct dwpcie_softc * sc)1286 dwpcie_rk3568_link_up(struct dwpcie_softc *sc)
1287 {
1288 	uint32_t reg;
1289 
1290 	reg = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh,
1291 	    PCIE_CLIENT_LTSSM_STATUS);
1292 	if ((reg & PCIE_CLIENT_SMLH_LINK_UP) &&
1293 	    (reg & PCIE_CLIENT_RDLH_LINK_UP) &&
1294 	    (reg & PCIE_CLIENT_LTSSM_MASK) == PCIE_CLIENT_LTSSM_UP)
1295 		return 1;
1296 	return 0;
1297 }
1298 
1299 int
dwpcie_rk3568_init(struct dwpcie_softc * sc)1300 dwpcie_rk3568_init(struct dwpcie_softc *sc)
1301 {
1302 	uint32_t *reset_gpio;
1303 	ssize_t reset_gpiolen;
1304 	int error, idx, node;
1305 	int pin, timo;
1306 
1307 	sc->sc_num_viewport = 8;
1308 
1309 	if (bus_space_map(sc->sc_iot, sc->sc_glue_base,
1310 	    sc->sc_glue_size, 0, &sc->sc_glue_ioh))
1311 		return ENOMEM;
1312 
1313 	reset_assert_all(sc->sc_node);
1314 	/* Power must be enabled before initializing the PHY. */
1315 	regulator_enable(OF_getpropint(sc->sc_node, "vpcie3v3-supply", 0));
1316 	phy_enable(sc->sc_node, "pcie-phy");
1317 	reset_deassert_all(sc->sc_node);
1318 
1319 	clock_enable_all(sc->sc_node);
1320 
1321 	if (dwpcie_rk3568_link_up(sc))
1322 		return 0;
1323 
1324 	reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpios");
1325 	if (reset_gpiolen > 0) {
1326 		reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK);
1327 		OF_getpropintarray(sc->sc_node, "reset-gpios", reset_gpio,
1328 		    reset_gpiolen);
1329 		gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT);
1330 		gpio_controller_set_pin(reset_gpio, 1);
1331 	}
1332 
1333 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1334 	    PCIE_CLIENT_HOT_RESET_CTRL, PCIE_CLIENT_APP_LTSSM_ENABLE_ENHANCE);
1335 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1336 	    PCIE_CLIENT_GENERAL_CON, PCIE_CLIENT_DEV_TYPE_RC);
1337 
1338 	/* Assert PERST#. */
1339 	if (reset_gpiolen > 0)
1340 		gpio_controller_set_pin(reset_gpio, 0);
1341 
1342 	dwpcie_link_config(sc);
1343 
1344 	/* Enable LTSSM. */
1345 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_CLIENT_GENERAL_CON,
1346 	    PCIE_CLIENT_LINK_REQ_RST_GRT | PCIE_CLIENT_APP_LTSSM_ENABLE);
1347 
1348 	/*
1349 	 * PERST# must remain asserted for at least 100us after the
1350 	 * reference clock becomes stable.  But also has to remain
1351 	 * active at least 100ms after power up.  Since we may have
1352 	 * just powered on the device, play it safe and use 100ms.
1353 	 */
1354 	delay(100000);
1355 
1356 	/* Deassert PERST#. */
1357 	if (reset_gpiolen > 0)
1358 		gpio_controller_set_pin(reset_gpio, 1);
1359 
1360 	/* Wait for the link to come up. */
1361 	for (timo = 100; timo > 0; timo--) {
1362 		if (dwpcie_rk3568_link_up(sc))
1363 			break;
1364 		delay(10000);
1365 	}
1366 	if (timo == 0) {
1367 		error = ETIMEDOUT;
1368 		goto err;
1369 	}
1370 
1371 	node = OF_getnodebyname(sc->sc_node, "legacy-interrupt-controller");
1372 	idx = OF_getindex(sc->sc_node, "legacy", "interrupt-names");
1373 	if (node && idx != -1) {
1374 		sc->sc_ih = fdt_intr_establish_idx(sc->sc_node, idx,
1375 		    IPL_BIO | IPL_MPSAFE, dwpcie_rk3568_intr, sc,
1376 		    sc->sc_dev.dv_xname);
1377 	}
1378 
1379 	if (sc->sc_ih) {
1380 		for (pin = 0; pin < nitems(sc->sc_intx); pin++)
1381 			TAILQ_INIT(&sc->sc_intx[pin]);
1382 		sc->sc_ic.ic_node = node;
1383 		sc->sc_ic.ic_cookie = sc;
1384 		sc->sc_ic.ic_establish = dwpcie_rk3568_intr_establish;
1385 		sc->sc_ic.ic_disestablish = dwpcie_rk3568_intr_disestablish;
1386 		sc->sc_ic.ic_barrier = dwpcie_rk3568_intr_barrier;
1387 		fdt_intr_register(&sc->sc_ic);
1388 	}
1389 
1390 	error = 0;
1391 err:
1392 	if (reset_gpiolen > 0)
1393 		free(reset_gpio, M_TEMP, reset_gpiolen);
1394 
1395 	return error;
1396 }
1397 
1398 int
dwpcie_rk3568_intr(void * arg)1399 dwpcie_rk3568_intr(void *arg)
1400 {
1401 	struct dwpcie_softc *sc = arg;
1402 	struct dwpcie_intx *di;
1403 	uint32_t status;
1404 	int pin, s;
1405 
1406 	status = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh,
1407 	    PCIE_CLIENT_INTR_STATUS_LEGACY);
1408 	for (pin = 0; pin < nitems(sc->sc_intx); pin++) {
1409 		if ((status & (1 << pin)) == 0)
1410 			continue;
1411 
1412 		TAILQ_FOREACH(di, &sc->sc_intx[pin], di_next) {
1413 			if ((di->di_flags & IPL_MPSAFE) == 0)
1414 				KERNEL_LOCK();
1415 			s = splraise(di->di_ipl);
1416 			if (di->di_func(di->di_arg))
1417 				di->di_count.ec_count++;
1418 			splx(s);
1419 			if ((di->di_flags & IPL_MPSAFE) == 0)
1420 				KERNEL_UNLOCK();
1421 		}
1422 	}
1423 
1424 	return 1;
1425 }
1426 
1427 void *
dwpcie_rk3568_intr_establish(void * cookie,int * cell,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)1428 dwpcie_rk3568_intr_establish(void *cookie, int *cell, int level,
1429     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
1430 {
1431 	struct dwpcie_softc *sc = (struct dwpcie_softc *)cookie;
1432 	struct dwpcie_intx *di;
1433 	int pin = cell[0];
1434 	uint32_t mask = (1U << pin);
1435 
1436 	if (ci != NULL && !CPU_IS_PRIMARY(ci))
1437 		return NULL;
1438 
1439 	if (pin < 0 || pin >= nitems(sc->sc_intx))
1440 		return NULL;
1441 
1442 	/* Mask the interrupt. */
1443 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1444 	    PCIE_CLIENT_INTR_MASK_LEGACY, (mask << 16) | mask);
1445 	intr_barrier(sc->sc_ih);
1446 
1447 	di = malloc(sizeof(*di), M_DEVBUF, M_WAITOK | M_ZERO);
1448 	di->di_func = func;
1449 	di->di_arg = arg;
1450 	di->di_ipl = level & IPL_IRQMASK;
1451 	di->di_flags = level & IPL_FLAGMASK;
1452 	di->di_pin = pin;
1453 	di->di_name = name;
1454 	if (name != NULL)
1455 		evcount_attach(&di->di_count, name, &di->di_pin);
1456 	di->di_sc = sc;
1457 	TAILQ_INSERT_TAIL(&sc->sc_intx[pin], di, di_next);
1458 
1459 	/* Unmask the interrupt. */
1460 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1461 	    PCIE_CLIENT_INTR_MASK_LEGACY, mask << 16);
1462 
1463 	return di;
1464 }
1465 
1466 void
dwpcie_rk3568_intr_disestablish(void * cookie)1467 dwpcie_rk3568_intr_disestablish(void *cookie)
1468 {
1469 	struct dwpcie_intx *di = cookie;
1470 	struct dwpcie_softc *sc = di->di_sc;
1471 	uint32_t mask = (1U << di->di_pin);
1472 
1473 	/* Mask the interrupt. */
1474 	bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1475 	    PCIE_CLIENT_INTR_MASK_LEGACY, (mask << 16) | mask);
1476 	intr_barrier(sc->sc_ih);
1477 
1478 	if (di->di_name)
1479 		evcount_detach(&di->di_count);
1480 
1481 	TAILQ_REMOVE(&sc->sc_intx[di->di_pin], di, di_next);
1482 
1483 	if (!TAILQ_EMPTY(&sc->sc_intx[di->di_pin])) {
1484 		/* Unmask the interrupt. */
1485 		bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1486 		    PCIE_CLIENT_INTR_MASK_LEGACY, mask << 16);
1487 	}
1488 
1489 	free(di, M_DEVBUF, sizeof(*di));
1490 }
1491 
1492 void
dwpcie_rk3568_intr_barrier(void * cookie)1493 dwpcie_rk3568_intr_barrier(void *cookie)
1494 {
1495 	struct dwpcie_intx *di = cookie;
1496 	struct dwpcie_softc *sc = di->di_sc;
1497 
1498 	intr_barrier(sc->sc_ih);
1499 }
1500 
1501 int
dwpcie_sc8280xp_init(struct dwpcie_softc * sc)1502 dwpcie_sc8280xp_init(struct dwpcie_softc *sc)
1503 {
1504 	sc->sc_num_viewport = 8;
1505 
1506 	return 0;
1507 }
1508 
1509 void
dwpcie_atu_write(struct dwpcie_softc * sc,int index,off_t reg,uint32_t val)1510 dwpcie_atu_write(struct dwpcie_softc *sc, int index, off_t reg,
1511     uint32_t val)
1512 {
1513 	if (sc->sc_atu_unroll) {
1514 		bus_space_write_4(sc->sc_iot, sc->sc_atu_ioh,
1515 		    IATU_OFFSET_UNROLL(index) + reg, val);
1516 		return;
1517 	}
1518 
1519 	if (sc->sc_atu_viewport != index) {
1520 		HWRITE4(sc, IATU_VIEWPORT, index);
1521 		sc->sc_atu_viewport = index;
1522 	}
1523 
1524 	HWRITE4(sc, IATU_OFFSET_VIEWPORT + reg, val);
1525 }
1526 
1527 uint32_t
dwpcie_atu_read(struct dwpcie_softc * sc,int index,off_t reg)1528 dwpcie_atu_read(struct dwpcie_softc *sc, int index, off_t reg)
1529 {
1530 	if (sc->sc_atu_unroll) {
1531 		return bus_space_read_4(sc->sc_iot, sc->sc_atu_ioh,
1532 		    IATU_OFFSET_UNROLL(index) + reg);
1533 	}
1534 
1535 	if (sc->sc_atu_viewport != index) {
1536 		HWRITE4(sc, IATU_VIEWPORT, index);
1537 		sc->sc_atu_viewport = index;
1538 	}
1539 
1540 	return HREAD4(sc, IATU_OFFSET_VIEWPORT + reg);
1541 }
1542 
1543 void
dwpcie_atu_disable(struct dwpcie_softc * sc,int index)1544 dwpcie_atu_disable(struct dwpcie_softc *sc, int index)
1545 {
1546 	dwpcie_atu_write(sc, index, IATU_REGION_CTRL_2, 0);
1547 }
1548 
1549 void
dwpcie_atu_config(struct dwpcie_softc * sc,int index,int type,uint64_t cpu_addr,uint64_t pci_addr,uint64_t size)1550 dwpcie_atu_config(struct dwpcie_softc *sc, int index, int type,
1551     uint64_t cpu_addr, uint64_t pci_addr, uint64_t size)
1552 {
1553 	uint32_t reg;
1554 	int timo;
1555 
1556 	dwpcie_atu_write(sc, index, IATU_LWR_BASE_ADDR, cpu_addr);
1557 	dwpcie_atu_write(sc, index, IATU_UPPER_BASE_ADDR, cpu_addr >> 32);
1558 	dwpcie_atu_write(sc, index, IATU_LIMIT_ADDR, cpu_addr + size - 1);
1559 	dwpcie_atu_write(sc, index, IATU_LWR_TARGET_ADDR, pci_addr);
1560 	dwpcie_atu_write(sc, index, IATU_UPPER_TARGET_ADDR, pci_addr >> 32);
1561 	dwpcie_atu_write(sc, index, IATU_REGION_CTRL_1, type);
1562 	dwpcie_atu_write(sc, index, IATU_REGION_CTRL_2,
1563 	    IATU_REGION_CTRL_2_REGION_EN);
1564 
1565 	for (timo = 5; timo > 0; timo--) {
1566 		reg = dwpcie_atu_read(sc, index, IATU_REGION_CTRL_2);
1567 		if (reg & IATU_REGION_CTRL_2_REGION_EN)
1568 			break;
1569 		delay(9000);
1570 	}
1571 	if (timo == 0)
1572 		printf("%s:%d: timeout\n", __func__, __LINE__);
1573 }
1574 
1575 int
dwpcie_link_up(struct dwpcie_softc * sc)1576 dwpcie_link_up(struct dwpcie_softc *sc)
1577 {
1578 	uint32_t reg;
1579 
1580 	reg = HREAD4(sc, PCIE_PHY_DEBUG_R1);
1581 	if ((reg & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP) != 0 &&
1582 	    (reg & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING) == 0)
1583 		return 1;
1584 	return 0;
1585 }
1586 
1587 void
dwpcie_attach_hook(struct device * parent,struct device * self,struct pcibus_attach_args * pba)1588 dwpcie_attach_hook(struct device *parent, struct device *self,
1589     struct pcibus_attach_args *pba)
1590 {
1591 }
1592 
1593 int
dwpcie_bus_maxdevs(void * v,int bus)1594 dwpcie_bus_maxdevs(void *v, int bus)
1595 {
1596 	struct dwpcie_softc *sc = v;
1597 
1598 	if (bus == sc->sc_bus || bus == sc->sc_bus + 1)
1599 		return 1;
1600 	return 32;
1601 }
1602 
1603 int
dwpcie_find_node(int node,int bus,int device,int function)1604 dwpcie_find_node(int node, int bus, int device, int function)
1605 {
1606 	uint32_t reg[5];
1607 	uint32_t phys_hi;
1608 	int child;
1609 
1610 	phys_hi = ((bus << 16) | (device << 11) | (function << 8));
1611 
1612 	for (child = OF_child(node); child; child = OF_peer(child)) {
1613 		if (OF_getpropintarray(child, "reg",
1614 		    reg, sizeof(reg)) != sizeof(reg))
1615 			continue;
1616 
1617 		if (reg[0] == phys_hi)
1618 			return child;
1619 
1620 		node = dwpcie_find_node(child, bus, device, function);
1621 		if (node)
1622 			return node;
1623 	}
1624 
1625 	return 0;
1626 }
1627 
1628 pcitag_t
dwpcie_make_tag(void * v,int bus,int device,int function)1629 dwpcie_make_tag(void *v, int bus, int device, int function)
1630 {
1631 	struct dwpcie_softc *sc = v;
1632 	int node;
1633 
1634 	node = dwpcie_find_node(sc->sc_node, bus, device, function);
1635 	return (((pcitag_t)node << 32) |
1636 	    (bus << 24) | (device << 19) | (function << 16));
1637 }
1638 
1639 void
dwpcie_decompose_tag(void * v,pcitag_t tag,int * bp,int * dp,int * fp)1640 dwpcie_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp)
1641 {
1642 	if (bp != NULL)
1643 		*bp = (tag >> 24) & 0xff;
1644 	if (dp != NULL)
1645 		*dp = (tag >> 19) & 0x1f;
1646 	if (fp != NULL)
1647 		*fp = (tag >> 16) & 0x7;
1648 }
1649 
1650 int
dwpcie_conf_size(void * v,pcitag_t tag)1651 dwpcie_conf_size(void *v, pcitag_t tag)
1652 {
1653 	return PCIE_CONFIG_SPACE_SIZE;
1654 }
1655 
1656 pcireg_t
dwpcie_conf_read(void * v,pcitag_t tag,int reg)1657 dwpcie_conf_read(void *v, pcitag_t tag, int reg)
1658 {
1659 	struct dwpcie_softc *sc = v;
1660 	int bus, dev, fn;
1661 	uint32_t ret;
1662 
1663 	dwpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
1664 	if (bus == sc->sc_bus) {
1665 		KASSERT(dev == 0);
1666 		tag = dwpcie_make_tag(sc, 0, dev, fn);
1667 		return HREAD4(sc, PCITAG_OFFSET(tag) | reg);
1668 	}
1669 
1670 	if (bus == sc->sc_bus + 1) {
1671 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1672 		    IATU_REGION_CTRL_1_TYPE_CFG0,
1673 		    sc->sc_conf_base, PCITAG_OFFSET(tag),
1674 		    sc->sc_conf_size);
1675 	} else {
1676 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1677 		    IATU_REGION_CTRL_1_TYPE_CFG1,
1678 		    sc->sc_conf_base, PCITAG_OFFSET(tag),
1679 		    sc->sc_conf_size);
1680 	}
1681 
1682 	ret = bus_space_read_4(sc->sc_iot, sc->sc_conf_ioh, reg);
1683 
1684 	if (sc->sc_num_viewport <= 2 && sc->sc_io_size > 0) {
1685 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1686 		    IATU_REGION_CTRL_1_TYPE_IO, sc->sc_io_base,
1687 		    sc->sc_io_bus_addr, sc->sc_io_size);
1688 	}
1689 
1690 	return ret;
1691 }
1692 
1693 void
dwpcie_conf_write(void * v,pcitag_t tag,int reg,pcireg_t data)1694 dwpcie_conf_write(void *v, pcitag_t tag, int reg, pcireg_t data)
1695 {
1696 	struct dwpcie_softc *sc = v;
1697 	int bus, dev, fn;
1698 
1699 	dwpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
1700 	if (bus == sc->sc_bus) {
1701 		KASSERT(dev == 0);
1702 		tag = dwpcie_make_tag(sc, 0, dev, fn);
1703 		HWRITE4(sc, PCITAG_OFFSET(tag) | reg, data);
1704 		return;
1705 	}
1706 
1707 	if (bus == sc->sc_bus + 1) {
1708 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1709 		    IATU_REGION_CTRL_1_TYPE_CFG0,
1710 		    sc->sc_conf_base, PCITAG_OFFSET(tag),
1711 		    sc->sc_conf_size);
1712 	} else {
1713 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1714 		    IATU_REGION_CTRL_1_TYPE_CFG1,
1715 		    sc->sc_conf_base, PCITAG_OFFSET(tag),
1716 		    sc->sc_conf_size);
1717 	}
1718 
1719 	bus_space_write_4(sc->sc_iot, sc->sc_conf_ioh, reg, data);
1720 
1721 	if (sc->sc_num_viewport <= 2 && sc->sc_io_size > 0) {
1722 		dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1723 		    IATU_REGION_CTRL_1_TYPE_IO, sc->sc_io_base,
1724 		    sc->sc_io_bus_addr, sc->sc_io_size);
1725 	}
1726 }
1727 
1728 int
dwpcie_probe_device_hook(void * v,struct pci_attach_args * pa)1729 dwpcie_probe_device_hook(void *v, struct pci_attach_args *pa)
1730 {
1731 	struct dwpcie_softc *sc = v;
1732 	uint16_t rid;
1733 	int i;
1734 
1735 	rid = pci_requester_id(pa->pa_pc, pa->pa_tag);
1736 	pa->pa_dmat = iommu_device_map_pci(sc->sc_node, rid, pa->pa_dmat);
1737 
1738 	for (i = 0; i < sc->sc_nranges; i++) {
1739 		iommu_reserve_region_pci(sc->sc_node, rid,
1740 		    sc->sc_ranges[i].pci_base, sc->sc_ranges[i].size);
1741 	}
1742 
1743 	return 0;
1744 }
1745 
1746 int
dwpcie_intr_map(struct pci_attach_args * pa,pci_intr_handle_t * ihp)1747 dwpcie_intr_map(struct pci_attach_args *pa, pci_intr_handle_t *ihp)
1748 {
1749 	int pin = pa->pa_rawintrpin;
1750 
1751 	if (pin == 0 || pin > PCI_INTERRUPT_PIN_MAX)
1752 		return -1;
1753 
1754 	if (pa->pa_tag == 0)
1755 		return -1;
1756 
1757 	ihp->ih_pc = pa->pa_pc;
1758 	ihp->ih_tag = pa->pa_intrtag;
1759 	ihp->ih_intrpin = pa->pa_intrpin;
1760 	ihp->ih_type = PCI_INTX;
1761 
1762 	return 0;
1763 }
1764 
1765 const char *
dwpcie_intr_string(void * v,pci_intr_handle_t ih)1766 dwpcie_intr_string(void *v, pci_intr_handle_t ih)
1767 {
1768 	switch (ih.ih_type) {
1769 	case PCI_MSI:
1770 		return "msi";
1771 	case PCI_MSIX:
1772 		return "msix";
1773 	}
1774 
1775 	return "intx";
1776 }
1777 
1778 struct dwpcie_msi *
dwpcie_msi_establish(struct dwpcie_softc * sc,int level,int (* func)(void *),void * arg,char * name)1779 dwpcie_msi_establish(struct dwpcie_softc *sc, int level,
1780     int (*func)(void *), void *arg, char *name)
1781 {
1782 	struct dwpcie_msi *dm;
1783 	int vec;
1784 
1785 	for (vec = 0; vec < DWPCIE_NUM_MSI; vec++) {
1786 		dm = &sc->sc_msi[vec];
1787 		if (dm->dm_func == NULL)
1788 			break;
1789 	}
1790 	if (vec == DWPCIE_NUM_MSI)
1791 		return NULL;
1792 
1793 	dm->dm_func = func;
1794 	dm->dm_arg = arg;
1795 	dm->dm_ipl = level & IPL_IRQMASK;
1796 	dm->dm_flags = level & IPL_FLAGMASK;
1797 	dm->dm_vec = vec;
1798 	dm->dm_name = name;
1799 	if (name != NULL)
1800 		evcount_attach(&dm->dm_count, name, &dm->dm_vec);
1801 
1802 	/* Unmask the MSI. */
1803 	HCLR4(sc, PCIE_MSI_INTR0_MASK, (1U << vec));
1804 
1805 	return dm;
1806 }
1807 
1808 void
dwpcie_msi_disestablish(struct dwpcie_softc * sc,struct dwpcie_msi * dm)1809 dwpcie_msi_disestablish(struct dwpcie_softc *sc, struct dwpcie_msi *dm)
1810 {
1811 	/* Mask the MSI. */
1812 	HSET4(sc, PCIE_MSI_INTR0_MASK, (1U << dm->dm_vec));
1813 
1814 	if (dm->dm_name)
1815 		evcount_detach(&dm->dm_count);
1816 	dm->dm_func = NULL;
1817 }
1818 
1819 void *
dwpcie_intr_establish(void * v,pci_intr_handle_t ih,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)1820 dwpcie_intr_establish(void *v, pci_intr_handle_t ih, int level,
1821     struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
1822 {
1823 	struct dwpcie_softc *sc = v;
1824 	struct dwpcie_intr_handle *pih;
1825 	void *cookie = NULL;
1826 
1827 	KASSERT(ih.ih_type != PCI_NONE);
1828 
1829 	if (ih.ih_type != PCI_INTX) {
1830 		struct dwpcie_msi *dm = NULL;
1831 		bus_dma_tag_t dmat = ih.ih_dmat;
1832 		bus_dma_segment_t seg;
1833 		bus_dmamap_t map;
1834 		uint64_t addr, data;
1835 
1836 		if (sc->sc_msi_addr) {
1837 			if (ih.ih_type == PCI_MSI && ih.ih_intrpin > 0)
1838 				return NULL;
1839 			dm = dwpcie_msi_establish(sc, level, func, arg, name);
1840 			if (dm == NULL)
1841 				return NULL;
1842 			addr = sc->sc_msi_addr;
1843 			data = dm->dm_vec;
1844 		} else {
1845 			/*
1846 			 * Assume hardware passes Requester ID as
1847 			 * sideband data.
1848 			 */
1849 			addr = ih.ih_intrpin;
1850 			data = pci_requester_id(ih.ih_pc, ih.ih_tag);
1851 			cookie = fdt_intr_establish_msi_cpu(sc->sc_node, &addr,
1852 			    &data, level, ci, func, arg, (void *)name);
1853 			if (cookie == NULL)
1854 				return NULL;
1855 		}
1856 
1857 		pih = malloc(sizeof(*pih), M_DEVBUF, M_WAITOK | M_ZERO);
1858 		pih->pih_ih.ih_ic = &dwpcie_ic;
1859 		pih->pih_ih.ih_ih = cookie;
1860 		pih->pih_sc = sc;
1861 		pih->pih_dm = dm;
1862 
1863 		if (sc->sc_msi_addr == 0) {
1864 			if (bus_dmamap_create(dmat, sizeof(uint32_t), 1,
1865 			    sizeof(uint32_t), 0, BUS_DMA_WAITOK, &map)) {
1866 				free(pih, M_DEVBUF, sizeof(*pih));
1867 				fdt_intr_disestablish(cookie);
1868 				return NULL;
1869 			}
1870 
1871 			memset(&seg, 0, sizeof(seg));
1872 			seg.ds_addr = addr;
1873 			seg.ds_len = sizeof(uint32_t);
1874 
1875 			if (bus_dmamap_load_raw(dmat, map, &seg, 1,
1876 			    sizeof(uint32_t), BUS_DMA_WAITOK)) {
1877 				bus_dmamap_destroy(dmat, map);
1878 				free(pih, M_DEVBUF, sizeof(*pih));
1879 				fdt_intr_disestablish(cookie);
1880 				return NULL;
1881 			}
1882 
1883 			addr = map->dm_segs[0].ds_addr;
1884 			pih->pih_dmat = dmat;
1885 			pih->pih_map = map;
1886 		}
1887 
1888 		if (ih.ih_type == PCI_MSIX) {
1889 			pci_msix_enable(ih.ih_pc, ih.ih_tag,
1890 			    &sc->sc_bus_memt, ih.ih_intrpin, addr, data);
1891 		} else
1892 			pci_msi_enable(ih.ih_pc, ih.ih_tag, addr, data);
1893 	} else {
1894 		int bus, dev, fn;
1895 		uint32_t reg[4];
1896 
1897 		dwpcie_decompose_tag(sc, ih.ih_tag, &bus, &dev, &fn);
1898 
1899 		reg[0] = bus << 16 | dev << 11 | fn << 8;
1900 		reg[1] = reg[2] = 0;
1901 		reg[3] = ih.ih_intrpin;
1902 
1903 		cookie = fdt_intr_establish_imap_cpu(sc->sc_node, reg,
1904 		    sizeof(reg), level, ci, func, arg, name);
1905 		if (cookie == NULL)
1906 			return NULL;
1907 
1908 		pih = malloc(sizeof(*pih), M_DEVBUF, M_WAITOK | M_ZERO);
1909 		pih->pih_ih.ih_ic = &dwpcie_ic;
1910 		pih->pih_ih.ih_ih = cookie;
1911 	}
1912 
1913 	return pih;
1914 }
1915 
1916 void
dwpcie_intr_disestablish(void * v,void * cookie)1917 dwpcie_intr_disestablish(void *v, void *cookie)
1918 {
1919 	struct dwpcie_intr_handle *pih = cookie;
1920 
1921 	if (pih->pih_dm)
1922 		dwpcie_msi_disestablish(pih->pih_sc, pih->pih_dm);
1923 	else
1924 		fdt_intr_disestablish(pih->pih_ih.ih_ih);
1925 
1926 	if (pih->pih_dmat) {
1927 		bus_dmamap_unload(pih->pih_dmat, pih->pih_map);
1928 		bus_dmamap_destroy(pih->pih_dmat, pih->pih_map);
1929 	}
1930 
1931 	free(pih, M_DEVBUF, sizeof(*pih));
1932 }
1933 
1934 int
dwpcie_bs_iomap(bus_space_tag_t t,bus_addr_t addr,bus_size_t size,int flags,bus_space_handle_t * bshp)1935 dwpcie_bs_iomap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
1936     int flags, bus_space_handle_t *bshp)
1937 {
1938 	struct dwpcie_softc *sc = t->bus_private;
1939 	int i;
1940 
1941 	for (i = 0; i < sc->sc_nranges; i++) {
1942 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
1943 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
1944 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
1945 
1946 		if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
1947 		    addr >= pci_start && addr + size <= pci_end) {
1948 			return bus_space_map(sc->sc_iot,
1949 			    addr - pci_start + phys_start, size, flags, bshp);
1950 		}
1951 	}
1952 
1953 	return ENXIO;
1954 }
1955 
1956 int
dwpcie_bs_memmap(bus_space_tag_t t,bus_addr_t addr,bus_size_t size,int flags,bus_space_handle_t * bshp)1957 dwpcie_bs_memmap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
1958     int flags, bus_space_handle_t *bshp)
1959 {
1960 	struct dwpcie_softc *sc = t->bus_private;
1961 	int i;
1962 
1963 	for (i = 0; i < sc->sc_nranges; i++) {
1964 		uint64_t pci_start = sc->sc_ranges[i].pci_base;
1965 		uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
1966 		uint64_t phys_start = sc->sc_ranges[i].phys_base;
1967 
1968 		if ((sc->sc_ranges[i].flags & 0x02000000) == 0x02000000 &&
1969 		    addr >= pci_start && addr + size <= pci_end) {
1970 			return bus_space_map(sc->sc_iot,
1971 			    addr - pci_start + phys_start, size, flags, bshp);
1972 		}
1973 	}
1974 
1975 	return ENXIO;
1976 }
1977