1 /* $OpenBSD: dwpcie.c,v 1.57 2024/09/01 03:08:56 jsg Exp $ */
2 /*
3 * Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/device.h>
21 #include <sys/evcount.h>
22 #include <sys/extent.h>
23 #include <sys/malloc.h>
24
25 #include <machine/intr.h>
26 #include <machine/bus.h>
27 #include <machine/fdt.h>
28
29 #include <dev/pci/pcidevs.h>
30 #include <dev/pci/pcireg.h>
31 #include <dev/pci/pcivar.h>
32 #include <dev/pci/ppbreg.h>
33
34 #include <dev/ofw/openfirm.h>
35 #include <dev/ofw/ofw_clock.h>
36 #include <dev/ofw/ofw_gpio.h>
37 #include <dev/ofw/ofw_misc.h>
38 #include <dev/ofw/ofw_pinctrl.h>
39 #include <dev/ofw/ofw_power.h>
40 #include <dev/ofw/ofw_regulator.h>
41 #include <dev/ofw/fdt.h>
42
43 /* Registers */
44 #define PCIE_PORT_LINK_CTRL 0x710
45 #define PCIE_PORT_LINK_CTRL_LANES_MASK (0x3f << 16)
46 #define PCIE_PORT_LINK_CTRL_LANES_1 (0x1 << 16)
47 #define PCIE_PORT_LINK_CTRL_LANES_2 (0x3 << 16)
48 #define PCIE_PORT_LINK_CTRL_LANES_4 (0x7 << 16)
49 #define PCIE_PORT_LINK_CTRL_LANES_8 (0xf << 16)
50 #define PCIE_PHY_DEBUG_R1 0x72c
51 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
52 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
53 #define PCIE_LINK_WIDTH_SPEED_CTRL 0x80c
54 #define PCIE_LINK_WIDTH_SPEED_CTRL_LANES_MASK (0x1f << 8)
55 #define PCIE_LINK_WIDTH_SPEED_CTRL_LANES_1 (0x1 << 8)
56 #define PCIE_LINK_WIDTH_SPEED_CTRL_LANES_2 (0x2 << 8)
57 #define PCIE_LINK_WIDTH_SPEED_CTRL_LANES_4 (0x4 << 8)
58 #define PCIE_LINK_WIDTH_SPEED_CTRL_LANES_8 (0x8 << 8)
59 #define PCIE_LINK_WIDTH_SPEED_CTRL_CHANGE (1 << 17)
60
61 #define PCIE_MSI_ADDR_LO 0x820
62 #define PCIE_MSI_ADDR_HI 0x824
63 #define PCIE_MSI_INTR_ENABLE(x) (0x828 + (x) * 12)
64 #define PCIE_MSI_INTR_MASK(x) (0x82c + (x) * 12)
65 #define PCIE_MSI_INTR_STATUS(x) (0x830 + (x) * 12)
66
67 #define MISC_CONTROL_1 0x8bc
68 #define MISC_CONTROL_1_DBI_RO_WR_EN (1 << 0)
69 #define IATU_VIEWPORT 0x900
70 #define IATU_VIEWPORT_INDEX0 0
71 #define IATU_VIEWPORT_INDEX1 1
72 #define IATU_VIEWPORT_INDEX2 2
73 #define IATU_VIEWPORT_INDEX3 3
74 #define IATU_OFFSET_VIEWPORT 0x904
75 #define IATU_OFFSET_UNROLL(x) (0x200 * (x))
76 #define IATU_REGION_CTRL_1 0x000
77 #define IATU_REGION_CTRL_1_TYPE_MEM 0
78 #define IATU_REGION_CTRL_1_TYPE_IO 2
79 #define IATU_REGION_CTRL_1_TYPE_CFG0 4
80 #define IATU_REGION_CTRL_1_TYPE_CFG1 5
81 #define IATU_REGION_CTRL_2 0x004
82 #define IATU_REGION_CTRL_2_REGION_EN (1U << 31)
83 #define IATU_LWR_BASE_ADDR 0x08
84 #define IATU_UPPER_BASE_ADDR 0x0c
85 #define IATU_LIMIT_ADDR 0x10
86 #define IATU_LWR_TARGET_ADDR 0x14
87 #define IATU_UPPER_TARGET_ADDR 0x18
88
89 /* Marvell ARMADA 8k registers */
90 #define PCIE_GLOBAL_CTRL 0x8000
91 #define PCIE_GLOBAL_CTRL_APP_LTSSM_EN (1 << 2)
92 #define PCIE_GLOBAL_CTRL_DEVICE_TYPE_MASK (0xf << 4)
93 #define PCIE_GLOBAL_CTRL_DEVICE_TYPE_RC (0x4 << 4)
94 #define PCIE_GLOBAL_STATUS 0x8008
95 #define PCIE_GLOBAL_STATUS_RDLH_LINK_UP (1 << 1)
96 #define PCIE_GLOBAL_STATUS_PHY_LINK_UP (1 << 9)
97 #define PCIE_PM_STATUS 0x8014
98 #define PCIE_GLOBAL_INT_CAUSE 0x801c
99 #define PCIE_GLOBAL_INT_MASK 0x8020
100 #define PCIE_GLOBAL_INT_MASK_INT_A (1 << 9)
101 #define PCIE_GLOBAL_INT_MASK_INT_B (1 << 10)
102 #define PCIE_GLOBAL_INT_MASK_INT_C (1 << 11)
103 #define PCIE_GLOBAL_INT_MASK_INT_D (1 << 12)
104 #define PCIE_ARCACHE_TRC 0x8050
105 #define PCIE_ARCACHE_TRC_DEFAULT 0x3511
106 #define PCIE_AWCACHE_TRC 0x8054
107 #define PCIE_AWCACHE_TRC_DEFAULT 0x5311
108 #define PCIE_ARUSER 0x805c
109 #define PCIE_AWUSER 0x8060
110 #define PCIE_AXUSER_DOMAIN_MASK (0x3 << 4)
111 #define PCIE_AXUSER_DOMAIN_INNER_SHARABLE (0x1 << 4)
112 #define PCIE_AXUSER_DOMAIN_OUTER_SHARABLE (0x2 << 4)
113 #define PCIE_STREAMID 0x8064
114 #define PCIE_STREAMID_FUNC_BITS(x) ((x) << 0)
115 #define PCIE_STREAMID_DEV_BITS(x) ((x) << 4)
116 #define PCIE_STREAMID_BUS_BITS(x) ((x) << 8)
117 #define PCIE_STREAMID_ROOTPORT(x) ((x) << 12)
118 #define PCIE_STREAMID_8040 \
119 (PCIE_STREAMID_ROOTPORT(0x80) | PCIE_STREAMID_BUS_BITS(2) | \
120 PCIE_STREAMID_DEV_BITS(2) | PCIE_STREAMID_FUNC_BITS(3))
121
122 /* Amlogic G12A registers */
123 #define PCIE_CFG0 0x0000
124 #define PCIE_CFG0_APP_LTSSM_EN (1 << 7)
125 #define PCIE_STATUS12 0x0030
126 #define PCIE_STATUS12_RDLH_LINK_UP (1 << 16)
127 #define PCIE_STATUS12_LTSSM_MASK (0x1f << 10)
128 #define PCIE_STATUS12_LTSSM_UP (0x11 << 10)
129 #define PCIE_STATUS12_SMLH_LINK_UP (1 << 6)
130
131 /* NXP i.MX8MQ registers */
132 #define PCIE_RC_LCR 0x7c
133 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
134 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
135 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
136 #define PCIE_RC_LCR_L1EL_MASK (0x7 << 15)
137 #define PCIE_RC_LCR_L1EL_64US (0x6 << 15)
138
139 #define IOMUXC_GPR12 0x30
140 #define IMX8MQ_GPR_PCIE2_DEVICE_TYPE_MASK (0xf << 8)
141 #define IMX8MQ_GPR_PCIE2_DEVICE_TYPE_RC (0x4 << 8)
142 #define IMX8MQ_GPR_PCIE1_DEVICE_TYPE_MASK (0xf << 12)
143 #define IMX8MQ_GPR_PCIE1_DEVICE_TYPE_RC (0x4 << 12)
144 #define IOMUXC_GPR14 0x38
145 #define IOMUXC_GPR16 0x40
146 #define IMX8MQ_GPR_PCIE_REF_USE_PAD (1 << 9)
147 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN (1 << 10)
148 #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE (1 << 11)
149 #define IMX8MM_GPR_PCIE_SSC_EN (1 << 16)
150 #define IMX8MM_GPR_PCIE_POWER_OFF (1 << 17)
151 #define IMX8MM_GPR_PCIE_CMN_RST (1 << 18)
152 #define IMX8MM_GPR_PCIE_AUX_EN (1 << 19)
153 #define IMX8MM_GPR_PCIE_REF_CLK_MASK (0x3 << 24)
154 #define IMX8MM_GPR_PCIE_REF_CLK_PLL (0x3 << 24)
155 #define IMX8MM_GPR_PCIE_REF_CLK_EXT (0x2 << 24)
156
157 #define IMX8MM_PCIE_PHY_CMN_REG62 0x188
158 #define IMX8MM_PCIE_PHY_CMN_REG62_PLL_CLK_OUT 0x08
159 #define IMX8MM_PCIE_PHY_CMN_REG64 0x190
160 #define IMX8MM_PCIE_PHY_CMN_REG64_AUX_RX_TX_TERM 0x8c
161 #define IMX8MM_PCIE_PHY_CMN_REG75 0x1d4
162 #define IMX8MM_PCIE_PHY_CMN_REG75_PLL_DONE 0x3
163 #define IMX8MM_PCIE_PHY_TRSV_REG5 0x414
164 #define IMX8MM_PCIE_PHY_TRSV_REG5_GEN1_DEEMP 0x2d
165 #define IMX8MM_PCIE_PHY_TRSV_REG6 0x418
166 #define IMX8MM_PCIE_PHY_TRSV_REG6_GEN2_DEEMP 0xf
167
168 #define ANATOP_PLLOUT_CTL 0x74
169 #define ANATOP_PLLOUT_CTL_CKE (1 << 4)
170 #define ANATOP_PLLOUT_CTL_SEL_SYSPLL1 0xb
171 #define ANATOP_PLLOUT_CTL_SEL_MASK 0xf
172 #define ANATOP_PLLOUT_DIV 0x7c
173 #define ANATOP_PLLOUT_DIV_SYSPLL1 0x7
174
175 /* Rockchip RK3568/RK3588 registers */
176 #define PCIE_CLIENT_GENERAL_CON 0x0000
177 #define PCIE_CLIENT_DEV_TYPE_RC ((0xf << 4) << 16 | (0x4 << 4))
178 #define PCIE_CLIENT_LINK_REQ_RST_GRT ((1 << 3) << 16 | (1 << 3))
179 #define PCIE_CLIENT_APP_LTSSM_ENABLE ((1 << 2) << 16 | (1 << 2))
180 #define PCIE_CLIENT_INTR_STATUS_LEGACY 0x0008
181 #define PCIE_CLIENT_INTR_MASK_LEGACY 0x001c
182 #define PCIE_CLIENT_HOT_RESET_CTRL 0x0180
183 #define PCIE_CLIENT_APP_LTSSM_ENABLE_ENHANCE ((1 << 4) << 16 | (1 << 4))
184 #define PCIE_CLIENT_LTSSM_STATUS 0x0300
185 #define PCIE_CLIENT_RDLH_LINK_UP (1 << 17)
186 #define PCIE_CLIENT_SMLH_LINK_UP (1 << 16)
187 #define PCIE_CLIENT_LTSSM_MASK (0x1f << 0)
188 #define PCIE_CLIENT_LTSSM_UP (0x11 << 0)
189
190 #define HREAD4(sc, reg) \
191 (bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
192 #define HWRITE4(sc, reg, val) \
193 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
194 #define HSET4(sc, reg, bits) \
195 HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
196 #define HCLR4(sc, reg, bits) \
197 HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
198
199 struct dwpcie_range {
200 uint32_t flags;
201 uint64_t pci_base;
202 uint64_t phys_base;
203 uint64_t size;
204 };
205
206 struct dwpcie_intx {
207 int (*di_func)(void *);
208 void *di_arg;
209 int di_ipl;
210 int di_flags;
211 int di_pin;
212 struct evcount di_count;
213 char *di_name;
214 struct dwpcie_softc *di_sc;
215 TAILQ_ENTRY(dwpcie_intx) di_next;
216 };
217
218 #define DWPCIE_MAX_MSI 64
219
220 struct dwpcie_msi {
221 int (*dm_func)(void *);
222 void *dm_arg;
223 int dm_ipl;
224 int dm_flags;
225 int dm_vec;
226 int dm_nvec;
227 struct evcount dm_count;
228 char *dm_name;
229 };
230
231 struct dwpcie_softc {
232 struct device sc_dev;
233 bus_space_tag_t sc_iot;
234 bus_space_handle_t sc_ioh;
235 bus_dma_tag_t sc_dmat;
236
237 bus_addr_t sc_ctrl_base;
238 bus_size_t sc_ctrl_size;
239
240 bus_addr_t sc_conf_base;
241 bus_size_t sc_conf_size;
242 bus_space_handle_t sc_conf_ioh;
243
244 bus_addr_t sc_glue_base;
245 bus_size_t sc_glue_size;
246 bus_space_handle_t sc_glue_ioh;
247
248 bus_addr_t sc_atu_base;
249 bus_size_t sc_atu_size;
250 bus_space_handle_t sc_atu_ioh;
251
252 bus_addr_t sc_io_base;
253 bus_addr_t sc_io_bus_addr;
254 bus_size_t sc_io_size;
255 bus_addr_t sc_mem_base;
256 bus_addr_t sc_mem_bus_addr;
257 bus_size_t sc_mem_size;
258 bus_addr_t sc_pmem_base;
259 bus_addr_t sc_pmem_bus_addr;
260 bus_size_t sc_pmem_size;
261
262 int sc_node;
263 int sc_acells;
264 int sc_scells;
265 int sc_pacells;
266 int sc_pscells;
267 struct dwpcie_range *sc_ranges;
268 int sc_nranges;
269
270 struct bus_space sc_bus_iot;
271 struct bus_space sc_bus_memt;
272
273 struct machine_pci_chipset sc_pc;
274 int sc_bus;
275
276 int sc_num_viewport;
277 int sc_atu_unroll;
278 int sc_atu_viewport;
279
280 void *sc_ih;
281 struct interrupt_controller sc_ic;
282 TAILQ_HEAD(,dwpcie_intx) sc_intx[4];
283
284 void *sc_msi_ih[2];
285 uint64_t sc_msi_addr;
286 uint64_t sc_msi_mask;
287 struct dwpcie_msi sc_msi[DWPCIE_MAX_MSI];
288 int sc_num_msi;
289 };
290
291 struct dwpcie_intr_handle {
292 struct machine_intr_handle pih_ih;
293 struct dwpcie_softc *pih_sc;
294 struct dwpcie_msi *pih_dm;
295 bus_dma_tag_t pih_dmat;
296 bus_dmamap_t pih_map;
297 };
298
299 int dwpcie_match(struct device *, void *, void *);
300 void dwpcie_attach(struct device *, struct device *, void *);
301
302 const struct cfattach dwpcie_ca = {
303 sizeof (struct dwpcie_softc), dwpcie_match, dwpcie_attach
304 };
305
306 struct cfdriver dwpcie_cd = {
307 NULL, "dwpcie", DV_DULL
308 };
309
310 int
dwpcie_match(struct device * parent,void * match,void * aux)311 dwpcie_match(struct device *parent, void *match, void *aux)
312 {
313 struct fdt_attach_args *faa = aux;
314
315 return (OF_is_compatible(faa->fa_node, "amlogic,g12a-pcie") ||
316 OF_is_compatible(faa->fa_node, "baikal,bm1000-pcie") ||
317 OF_is_compatible(faa->fa_node, "fsl,imx8mm-pcie") ||
318 OF_is_compatible(faa->fa_node, "fsl,imx8mq-pcie") ||
319 OF_is_compatible(faa->fa_node, "marvell,armada8k-pcie") ||
320 OF_is_compatible(faa->fa_node, "qcom,pcie-sc8280xp") ||
321 OF_is_compatible(faa->fa_node, "qcom,pcie-x1e80100") ||
322 OF_is_compatible(faa->fa_node, "rockchip,rk3568-pcie") ||
323 OF_is_compatible(faa->fa_node, "rockchip,rk3588-pcie") ||
324 OF_is_compatible(faa->fa_node, "sifive,fu740-pcie"));
325 }
326
327 void dwpcie_attach_deferred(struct device *);
328
329 void dwpcie_atu_disable(struct dwpcie_softc *, int);
330 void dwpcie_atu_config(struct dwpcie_softc *, int, int,
331 uint64_t, uint64_t, uint64_t);
332 void dwpcie_link_config(struct dwpcie_softc *);
333 int dwpcie_link_up(struct dwpcie_softc *);
334
335 int dwpcie_armada8k_init(struct dwpcie_softc *);
336 int dwpcie_armada8k_link_up(struct dwpcie_softc *);
337 int dwpcie_armada8k_intr(void *);
338
339 int dwpcie_g12a_init(struct dwpcie_softc *);
340 int dwpcie_g12a_link_up(struct dwpcie_softc *);
341
342 int dwpcie_imx8mq_init(struct dwpcie_softc *);
343 int dwpcie_imx8mq_intr(void *);
344
345 int dwpcie_fu740_init(struct dwpcie_softc *);
346
347 int dwpcie_rk3568_init(struct dwpcie_softc *);
348 int dwpcie_rk3568_intr(void *);
349 void *dwpcie_rk3568_intr_establish(void *, int *, int,
350 struct cpu_info *, int (*)(void *), void *, char *);
351 void dwpcie_rk3568_intr_disestablish(void *);
352 void dwpcie_rk3568_intr_barrier(void *);
353
354 int dwpcie_sc8280xp_init(struct dwpcie_softc *);
355
356 void dwpcie_attach_hook(struct device *, struct device *,
357 struct pcibus_attach_args *);
358 int dwpcie_bus_maxdevs(void *, int);
359 pcitag_t dwpcie_make_tag(void *, int, int, int);
360 void dwpcie_decompose_tag(void *, pcitag_t, int *, int *, int *);
361 int dwpcie_conf_size(void *, pcitag_t);
362 pcireg_t dwpcie_conf_read(void *, pcitag_t, int);
363 void dwpcie_conf_write(void *, pcitag_t, int, pcireg_t);
364 int dwpcie_probe_device_hook(void *, struct pci_attach_args *);
365
366 int dwpcie_intr_map(struct pci_attach_args *, pci_intr_handle_t *);
367 const char *dwpcie_intr_string(void *, pci_intr_handle_t);
368 void *dwpcie_intr_establish(void *, pci_intr_handle_t, int,
369 struct cpu_info *, int (*)(void *), void *, char *);
370 void dwpcie_intr_disestablish(void *, void *);
371
372 int dwpcie_bs_iomap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
373 bus_space_handle_t *);
374 int dwpcie_bs_memmap(bus_space_tag_t, bus_addr_t, bus_size_t, int,
375 bus_space_handle_t *);
376
377 struct interrupt_controller dwpcie_ic = {
378 .ic_barrier = intr_barrier
379 };
380
381 void
dwpcie_attach(struct device * parent,struct device * self,void * aux)382 dwpcie_attach(struct device *parent, struct device *self, void *aux)
383 {
384 struct dwpcie_softc *sc = (struct dwpcie_softc *)self;
385 struct fdt_attach_args *faa = aux;
386 uint32_t *ranges;
387 int i, j, nranges, rangeslen;
388 int atu, config, ctrl, glue;
389
390 if (faa->fa_nreg < 2) {
391 printf(": no registers\n");
392 return;
393 }
394
395 sc->sc_ctrl_base = faa->fa_reg[0].addr;
396 sc->sc_ctrl_size = faa->fa_reg[0].size;
397
398 ctrl = OF_getindex(faa->fa_node, "dbi", "reg-names");
399 if (ctrl >= 0 && ctrl < faa->fa_nreg) {
400 sc->sc_ctrl_base = faa->fa_reg[ctrl].addr;
401 sc->sc_ctrl_size = faa->fa_reg[ctrl].size;
402 }
403
404 config = OF_getindex(faa->fa_node, "config", "reg-names");
405 if (config < 0 || config >= faa->fa_nreg) {
406 printf(": no config registers\n");
407 return;
408 }
409
410 sc->sc_conf_base = faa->fa_reg[config].addr;
411 sc->sc_conf_size = faa->fa_reg[config].size;
412
413 sc->sc_atu_base = sc->sc_ctrl_base + 0x300000;
414 sc->sc_atu_size = sc->sc_ctrl_size - 0x300000;
415
416 atu = OF_getindex(faa->fa_node, "atu", "reg-names");
417 if (atu >= 0 && atu < faa->fa_nreg) {
418 sc->sc_atu_base = faa->fa_reg[atu].addr;
419 sc->sc_atu_size = faa->fa_reg[atu].size;
420 }
421
422 if (OF_is_compatible(faa->fa_node, "amlogic,g12a-pcie")) {
423 glue = OF_getindex(faa->fa_node, "cfg", "reg-names");
424 if (glue < 0 || glue >= faa->fa_nreg) {
425 printf(": no glue registers\n");
426 return;
427 }
428
429 sc->sc_glue_base = faa->fa_reg[glue].addr;
430 sc->sc_glue_size = faa->fa_reg[glue].size;
431 }
432
433 if (OF_is_compatible(faa->fa_node, "rockchip,rk3568-pcie") ||
434 OF_is_compatible(faa->fa_node, "rockchip,rk3588-pcie")) {
435 glue = OF_getindex(faa->fa_node, "apb", "reg-names");
436 if (glue < 0 || glue >= faa->fa_nreg) {
437 printf(": no glue registers\n");
438 return;
439 }
440
441 sc->sc_glue_base = faa->fa_reg[glue].addr;
442 sc->sc_glue_size = faa->fa_reg[glue].size;
443 }
444
445 sc->sc_iot = faa->fa_iot;
446 sc->sc_dmat = faa->fa_dmat;
447 sc->sc_node = faa->fa_node;
448
449 sc->sc_acells = OF_getpropint(sc->sc_node, "#address-cells",
450 faa->fa_acells);
451 sc->sc_scells = OF_getpropint(sc->sc_node, "#size-cells",
452 faa->fa_scells);
453 sc->sc_pacells = faa->fa_acells;
454 sc->sc_pscells = faa->fa_scells;
455
456 rangeslen = OF_getproplen(sc->sc_node, "ranges");
457 if (rangeslen <= 0 || (rangeslen % sizeof(uint32_t)) ||
458 (rangeslen / sizeof(uint32_t)) % (sc->sc_acells +
459 sc->sc_pacells + sc->sc_scells)) {
460 printf(": invalid ranges property\n");
461 return;
462 }
463
464 ranges = malloc(rangeslen, M_TEMP, M_WAITOK);
465 OF_getpropintarray(sc->sc_node, "ranges", ranges,
466 rangeslen);
467
468 nranges = (rangeslen / sizeof(uint32_t)) /
469 (sc->sc_acells + sc->sc_pacells + sc->sc_scells);
470 sc->sc_ranges = mallocarray(nranges,
471 sizeof(struct dwpcie_range), M_TEMP, M_WAITOK);
472 sc->sc_nranges = nranges;
473
474 for (i = 0, j = 0; i < sc->sc_nranges; i++) {
475 sc->sc_ranges[i].flags = ranges[j++];
476 sc->sc_ranges[i].pci_base = ranges[j++];
477 if (sc->sc_acells - 1 == 2) {
478 sc->sc_ranges[i].pci_base <<= 32;
479 sc->sc_ranges[i].pci_base |= ranges[j++];
480 }
481 sc->sc_ranges[i].phys_base = ranges[j++];
482 if (sc->sc_pacells == 2) {
483 sc->sc_ranges[i].phys_base <<= 32;
484 sc->sc_ranges[i].phys_base |= ranges[j++];
485 }
486 sc->sc_ranges[i].size = ranges[j++];
487 if (sc->sc_scells == 2) {
488 sc->sc_ranges[i].size <<= 32;
489 sc->sc_ranges[i].size |= ranges[j++];
490 }
491 }
492
493 free(ranges, M_TEMP, rangeslen);
494
495 if (bus_space_map(sc->sc_iot, sc->sc_ctrl_base,
496 sc->sc_ctrl_size, 0, &sc->sc_ioh)) {
497 free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
498 sizeof(struct dwpcie_range));
499 printf(": can't map ctrl registers\n");
500 return;
501 }
502
503 if (bus_space_map(sc->sc_iot, sc->sc_conf_base,
504 sc->sc_conf_size, 0, &sc->sc_conf_ioh)) {
505 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ctrl_size);
506 free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
507 sizeof(struct dwpcie_range));
508 printf(": can't map config registers\n");
509 return;
510 }
511
512 sc->sc_num_viewport = OF_getpropint(sc->sc_node, "num-viewport", 2);
513
514 printf("\n");
515
516 pinctrl_byname(sc->sc_node, "default");
517 clock_set_assigned(sc->sc_node);
518
519 config_defer(self, dwpcie_attach_deferred);
520 }
521
522 void
dwpcie_attach_deferred(struct device * self)523 dwpcie_attach_deferred(struct device *self)
524 {
525 struct dwpcie_softc *sc = (struct dwpcie_softc *)self;
526 struct pcibus_attach_args pba;
527 bus_addr_t iobase, iolimit;
528 bus_addr_t membase, memlimit;
529 bus_addr_t pmembase, pmemlimit;
530 uint32_t bus_range[2];
531 pcireg_t bir, blr, csr;
532 int i, error = 0;
533
534 if (OF_is_compatible(sc->sc_node, "marvell,armada8k-pcie"))
535 error = dwpcie_armada8k_init(sc);
536 if (OF_is_compatible(sc->sc_node, "amlogic,g12a-pcie"))
537 error = dwpcie_g12a_init(sc);
538 if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie") ||
539 OF_is_compatible(sc->sc_node, "fsl,imx8mq-pcie"))
540 error = dwpcie_imx8mq_init(sc);
541 if (OF_is_compatible(sc->sc_node, "qcom,pcie-sc8280xp") ||
542 OF_is_compatible(sc->sc_node, "qcom,pcie-x1e80100"))
543 error = dwpcie_sc8280xp_init(sc);
544 if (OF_is_compatible(sc->sc_node, "rockchip,rk3568-pcie") ||
545 OF_is_compatible(sc->sc_node, "rockchip,rk3588-pcie"))
546 error = dwpcie_rk3568_init(sc);
547 if (OF_is_compatible(sc->sc_node, "sifive,fu740-pcie"))
548 error = dwpcie_fu740_init(sc);
549 if (error != 0) {
550 bus_space_unmap(sc->sc_iot, sc->sc_conf_ioh, sc->sc_conf_size);
551 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ctrl_size);
552 free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
553 sizeof(struct dwpcie_range));
554 printf("%s: can't initialize hardware\n",
555 sc->sc_dev.dv_xname);
556 return;
557 }
558
559 sc->sc_atu_viewport = -1;
560 if (HREAD4(sc, IATU_VIEWPORT) == 0xffffffff) {
561 sc->sc_atu_unroll = 1;
562 if (bus_space_map(sc->sc_iot, sc->sc_atu_base,
563 sc->sc_atu_size, 0, &sc->sc_atu_ioh)) {
564 bus_space_unmap(sc->sc_iot, sc->sc_conf_ioh,
565 sc->sc_conf_size);
566 bus_space_unmap(sc->sc_iot, sc->sc_ioh,
567 sc->sc_ctrl_size);
568 free(sc->sc_ranges, M_TEMP, sc->sc_nranges *
569 sizeof(struct dwpcie_range));
570 printf("%s: can't map atu registers\n",
571 sc->sc_dev.dv_xname);
572 return;
573 }
574 }
575
576 /* Set up address translation for I/O space. */
577 for (i = 0; i < sc->sc_nranges; i++) {
578 if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
579 sc->sc_ranges[i].size > 0) {
580 sc->sc_io_base = sc->sc_ranges[i].phys_base;
581 sc->sc_io_bus_addr = sc->sc_ranges[i].pci_base;
582 sc->sc_io_size = sc->sc_ranges[i].size;
583 }
584 if ((sc->sc_ranges[i].flags & 0x03000000) == 0x02000000 &&
585 sc->sc_ranges[i].size > 0) {
586 sc->sc_mem_base = sc->sc_ranges[i].phys_base;
587 sc->sc_mem_bus_addr = sc->sc_ranges[i].pci_base;
588 sc->sc_mem_size = sc->sc_ranges[i].size;
589 }
590 if ((sc->sc_ranges[i].flags & 0x03000000) == 0x03000000 &&
591 sc->sc_ranges[i].size > 0) {
592 sc->sc_pmem_base = sc->sc_ranges[i].phys_base;
593 sc->sc_pmem_bus_addr = sc->sc_ranges[i].pci_base;
594 sc->sc_pmem_size = sc->sc_ranges[i].size;
595 }
596 }
597 if (sc->sc_mem_size == 0) {
598 printf("%s: no memory mapped I/O window\n",
599 sc->sc_dev.dv_xname);
600 return;
601 }
602
603 /*
604 * Disable prefetchable memory mapped I/O window if we don't
605 * have enough viewports to enable it.
606 */
607 if (sc->sc_num_viewport < 4)
608 sc->sc_pmem_size = 0;
609
610 for (i = 0; i < sc->sc_num_viewport; i++)
611 dwpcie_atu_disable(sc, i);
612
613 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX0,
614 IATU_REGION_CTRL_1_TYPE_MEM, sc->sc_mem_base,
615 sc->sc_mem_bus_addr, sc->sc_mem_size);
616 if (sc->sc_num_viewport > 2 && sc->sc_io_size > 0)
617 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX2,
618 IATU_REGION_CTRL_1_TYPE_IO, sc->sc_io_base,
619 sc->sc_io_bus_addr, sc->sc_io_size);
620 if (sc->sc_num_viewport > 3 && sc->sc_pmem_size > 0)
621 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX3,
622 IATU_REGION_CTRL_1_TYPE_MEM, sc->sc_pmem_base,
623 sc->sc_pmem_bus_addr, sc->sc_pmem_size);
624
625 /* Enable modification of read-only bits. */
626 HSET4(sc, MISC_CONTROL_1, MISC_CONTROL_1_DBI_RO_WR_EN);
627
628 /* A Root Port is a PCI-PCI Bridge. */
629 HWRITE4(sc, PCI_CLASS_REG,
630 PCI_CLASS_BRIDGE << PCI_CLASS_SHIFT |
631 PCI_SUBCLASS_BRIDGE_PCI << PCI_SUBCLASS_SHIFT);
632
633 /* Clear BAR as U-Boot seems to leave garbage in it. */
634 HWRITE4(sc, PCI_MAPREG_START, PCI_MAPREG_MEM_TYPE_64BIT);
635 HWRITE4(sc, PCI_MAPREG_START + 4, 0);
636
637 /* Enable 32-bit I/O addressing. */
638 HSET4(sc, PPB_REG_IOSTATUS,
639 PPB_IO_32BIT | (PPB_IO_32BIT << PPB_IOLIMIT_SHIFT));
640
641 /* Make sure read-only bits are write-protected. */
642 HCLR4(sc, MISC_CONTROL_1, MISC_CONTROL_1_DBI_RO_WR_EN);
643
644 /* Set up bus range. */
645 if (OF_getpropintarray(sc->sc_node, "bus-range", bus_range,
646 sizeof(bus_range)) != sizeof(bus_range)) {
647 bus_range[0] = 0;
648 bus_range[1] = 31;
649 }
650 sc->sc_bus = bus_range[0];
651
652 /* Initialize bus range. */
653 bir = bus_range[0];
654 bir |= ((bus_range[0] + 1) << 8);
655 bir |= (bus_range[1] << 16);
656 HWRITE4(sc, PPB_REG_BUSINFO, bir);
657
658 /* Initialize memory mapped I/O window. */
659 membase = sc->sc_mem_bus_addr;
660 memlimit = membase + sc->sc_mem_size - 1;
661 blr = memlimit & PPB_MEM_MASK;
662 blr |= (membase >> PPB_MEM_SHIFT);
663 HWRITE4(sc, PPB_REG_MEM, blr);
664
665 /* Initialize I/O window. */
666 if (sc->sc_io_size > 0) {
667 iobase = sc->sc_io_bus_addr;
668 iolimit = iobase + sc->sc_io_size - 1;
669 blr = iolimit & PPB_IO_MASK;
670 blr |= (iobase >> PPB_IO_SHIFT);
671 HWRITE4(sc, PPB_REG_IOSTATUS, blr);
672 blr = (iobase & 0xffff0000) >> 16;
673 blr |= iolimit & 0xffff0000;
674 HWRITE4(sc, PPB_REG_IO_HI, blr);
675 } else {
676 HWRITE4(sc, PPB_REG_IOSTATUS, 0x000000ff);
677 HWRITE4(sc, PPB_REG_IO_HI, 0x0000ffff);
678 }
679
680 /* Initialize prefetchable memory mapped I/O window. */
681 if (sc->sc_pmem_size > 0) {
682 pmembase = sc->sc_pmem_bus_addr;
683 pmemlimit = pmembase + sc->sc_pmem_size - 1;
684 blr = pmemlimit & PPB_MEM_MASK;
685 blr |= ((pmembase & PPB_MEM_MASK) >> PPB_MEM_SHIFT);
686 HWRITE4(sc, PPB_REG_PREFMEM, blr);
687 HWRITE4(sc, PPB_REG_PREFBASE_HI32, pmembase >> 32);
688 HWRITE4(sc, PPB_REG_PREFLIM_HI32, pmemlimit >> 32);
689 } else {
690 HWRITE4(sc, PPB_REG_PREFMEM, 0x0000ffff);
691 HWRITE4(sc, PPB_REG_PREFBASE_HI32, 0);
692 HWRITE4(sc, PPB_REG_PREFLIM_HI32, 0);
693 }
694
695 csr = PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE;
696 if (sc->sc_io_size > 0)
697 csr |= PCI_COMMAND_IO_ENABLE;
698 HWRITE4(sc, PCI_COMMAND_STATUS_REG, csr);
699
700 memcpy(&sc->sc_bus_iot, sc->sc_iot, sizeof(sc->sc_bus_iot));
701 sc->sc_bus_iot.bus_private = sc;
702 sc->sc_bus_iot._space_map = dwpcie_bs_iomap;
703 memcpy(&sc->sc_bus_memt, sc->sc_iot, sizeof(sc->sc_bus_memt));
704 sc->sc_bus_memt.bus_private = sc;
705 sc->sc_bus_memt._space_map = dwpcie_bs_memmap;
706
707 sc->sc_pc.pc_conf_v = sc;
708 sc->sc_pc.pc_attach_hook = dwpcie_attach_hook;
709 sc->sc_pc.pc_bus_maxdevs = dwpcie_bus_maxdevs;
710 sc->sc_pc.pc_make_tag = dwpcie_make_tag;
711 sc->sc_pc.pc_decompose_tag = dwpcie_decompose_tag;
712 sc->sc_pc.pc_conf_size = dwpcie_conf_size;
713 sc->sc_pc.pc_conf_read = dwpcie_conf_read;
714 sc->sc_pc.pc_conf_write = dwpcie_conf_write;
715 sc->sc_pc.pc_probe_device_hook = dwpcie_probe_device_hook;
716
717 sc->sc_pc.pc_intr_v = sc;
718 sc->sc_pc.pc_intr_map = dwpcie_intr_map;
719 sc->sc_pc.pc_intr_map_msi = _pci_intr_map_msi;
720 sc->sc_pc.pc_intr_map_msivec = _pci_intr_map_msivec;
721 sc->sc_pc.pc_intr_map_msix = _pci_intr_map_msix;
722 sc->sc_pc.pc_intr_string = dwpcie_intr_string;
723 sc->sc_pc.pc_intr_establish = dwpcie_intr_establish;
724 sc->sc_pc.pc_intr_disestablish = dwpcie_intr_disestablish;
725
726 memset(&pba, 0, sizeof(pba));
727 pba.pba_busname = "pci";
728 pba.pba_iot = &sc->sc_bus_iot;
729 pba.pba_memt = &sc->sc_bus_memt;
730 pba.pba_dmat = sc->sc_dmat;
731 pba.pba_pc = &sc->sc_pc;
732 pba.pba_domain = pci_ndomains++;
733 pba.pba_bus = sc->sc_bus;
734
735 if (OF_is_compatible(sc->sc_node, "baikal,bm1000-pcie") ||
736 OF_is_compatible(sc->sc_node, "marvell,armada8k-pcie") ||
737 OF_getproplen(sc->sc_node, "msi-map") > 0 ||
738 sc->sc_msi_addr)
739 pba.pba_flags |= PCI_FLAGS_MSI_ENABLED;
740
741 /*
742 * Only support multiple MSI vectors if we have enough MSI
743 * interrupts (or are using an external interrupt controller
744 * that hopefully supports plenty of MSI interrupts).
745 */
746 if (OF_getproplen(sc->sc_node, "msi-map") > 0 ||
747 sc->sc_num_msi > 32)
748 pba.pba_flags |= PCI_FLAGS_MSIVEC_ENABLED;
749
750 pci_dopm = 1;
751
752 config_found(self, &pba, NULL);
753 }
754
755 void
dwpcie_link_config(struct dwpcie_softc * sc)756 dwpcie_link_config(struct dwpcie_softc *sc)
757 {
758 uint32_t mode, width, reg;
759 int lanes;
760
761 lanes = OF_getpropint(sc->sc_node, "num-lanes", 0);
762
763 switch (lanes) {
764 case 1:
765 mode = PCIE_PORT_LINK_CTRL_LANES_1;
766 width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_1;
767 break;
768 case 2:
769 mode = PCIE_PORT_LINK_CTRL_LANES_2;
770 width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_2;
771 break;
772 case 4:
773 mode = PCIE_PORT_LINK_CTRL_LANES_4;
774 width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_4;
775 break;
776 case 8:
777 mode = PCIE_PORT_LINK_CTRL_LANES_8;
778 width = PCIE_LINK_WIDTH_SPEED_CTRL_LANES_8;
779 break;
780 default:
781 printf("%s: %d lanes not supported\n", __func__, lanes);
782 return;
783 }
784
785 reg = HREAD4(sc, PCIE_PORT_LINK_CTRL);
786 reg &= ~PCIE_PORT_LINK_CTRL_LANES_MASK;
787 reg |= mode;
788 HWRITE4(sc, PCIE_PORT_LINK_CTRL, reg);
789
790 reg = HREAD4(sc, PCIE_LINK_WIDTH_SPEED_CTRL);
791 reg &= ~PCIE_LINK_WIDTH_SPEED_CTRL_LANES_MASK;
792 reg |= width;
793 HWRITE4(sc, PCIE_LINK_WIDTH_SPEED_CTRL, reg);
794
795 reg = HREAD4(sc, PCIE_LINK_WIDTH_SPEED_CTRL);
796 reg |= PCIE_LINK_WIDTH_SPEED_CTRL_CHANGE;
797 HWRITE4(sc, PCIE_LINK_WIDTH_SPEED_CTRL, reg);
798 }
799
800 int
dwpcie_msi_intr(struct dwpcie_softc * sc,int idx)801 dwpcie_msi_intr(struct dwpcie_softc *sc, int idx)
802 {
803 struct dwpcie_msi *dm;
804 uint32_t status;
805 int vec, s;
806
807 status = HREAD4(sc, PCIE_MSI_INTR_STATUS(idx));
808 if (status == 0)
809 return 0;
810
811 HWRITE4(sc, PCIE_MSI_INTR_STATUS(idx), status);
812 while (status) {
813 vec = ffs(status) - 1;
814 status &= ~(1U << vec);
815
816 dm = &sc->sc_msi[idx * 32 + vec];
817 if (dm->dm_func == NULL)
818 continue;
819
820 if ((dm->dm_flags & IPL_MPSAFE) == 0)
821 KERNEL_LOCK();
822 s = splraise(dm->dm_ipl);
823 if (dm->dm_func(dm->dm_arg))
824 dm->dm_count.ec_count++;
825 splx(s);
826 if ((dm->dm_flags & IPL_MPSAFE) == 0)
827 KERNEL_UNLOCK();
828 }
829
830 return 1;
831 }
832
833 int
dwpcie_msi0_intr(void * arg)834 dwpcie_msi0_intr(void *arg)
835 {
836 return dwpcie_msi_intr(arg, 0);
837 }
838
839 int
dwpcie_msi1_intr(void * arg)840 dwpcie_msi1_intr(void *arg)
841 {
842 return dwpcie_msi_intr(arg, 1);
843 }
844
845 int
dwpcie_msi_init(struct dwpcie_softc * sc)846 dwpcie_msi_init(struct dwpcie_softc *sc)
847 {
848 bus_dma_segment_t seg;
849 bus_dmamap_t map;
850 uint64_t addr;
851 int error, rseg;
852 int idx;
853
854 /*
855 * Allocate some DMA memory such that we have a "safe" target
856 * address for MSIs.
857 */
858 error = bus_dmamem_alloc(sc->sc_dmat, sizeof(uint32_t),
859 sizeof(uint32_t), 0, &seg, 1, &rseg, BUS_DMA_WAITOK);
860 if (error)
861 return error;
862
863 /*
864 * Translate the CPU address into a bus address that we can
865 * program into the hardware.
866 */
867 error = bus_dmamap_create(sc->sc_dmat, sizeof(uint32_t), 1,
868 sizeof(uint32_t), 0, BUS_DMA_WAITOK, &map);
869 if (error) {
870 bus_dmamem_free(sc->sc_dmat, &seg, 1);
871 return error;
872 }
873 error = bus_dmamap_load_raw(sc->sc_dmat, map, &seg, 1,
874 sizeof(uint32_t), BUS_DMA_WAITOK);
875 if (error) {
876 bus_dmamap_destroy(sc->sc_dmat, map);
877 bus_dmamem_free(sc->sc_dmat, &seg, 1);
878 return error;
879 }
880
881 addr = map->dm_segs[0].ds_addr;
882 HWRITE4(sc, PCIE_MSI_ADDR_LO, addr);
883 HWRITE4(sc, PCIE_MSI_ADDR_HI, addr >> 32);
884
885 bus_dmamap_unload(sc->sc_dmat, map);
886 bus_dmamap_destroy(sc->sc_dmat, map);
887
888 /*
889 * See if the device tree indicates that the hardware supports
890 * more than 32 vectors. Some hardware supports more than 64,
891 * but 64 is good enough for now.
892 */
893 idx = OF_getindex(sc->sc_node, "msi1", "interrupt-names");
894 if (idx == -1)
895 sc->sc_num_msi = 32;
896 else
897 sc->sc_num_msi = 64;
898 KASSERT(sc->sc_num_msi <= DWPCIE_MAX_MSI);
899
900 /* Enable, mask and clear all MSIs. */
901 for (idx = 0; idx < sc->sc_num_msi / 32; idx++) {
902 HWRITE4(sc, PCIE_MSI_INTR_ENABLE(idx), 0xffffffff);
903 HWRITE4(sc, PCIE_MSI_INTR_MASK(idx), 0xffffffff);
904 HWRITE4(sc, PCIE_MSI_INTR_STATUS(idx), 0xffffffff);
905 }
906
907 idx = OF_getindex(sc->sc_node, "msi0", "interrupt-names");
908 if (idx == -1)
909 idx = 0;
910
911 sc->sc_msi_ih[0] = fdt_intr_establish_idx(sc->sc_node, idx,
912 IPL_BIO | IPL_MPSAFE, dwpcie_msi0_intr, sc, sc->sc_dev.dv_xname);
913 if (sc->sc_msi_ih[0] == NULL) {
914 bus_dmamem_free(sc->sc_dmat, &seg, 1);
915 return EINVAL;
916 }
917
918 idx = OF_getindex(sc->sc_node, "msi1", "interrupt-names");
919 if (idx == -1)
920 goto finish;
921
922 sc->sc_msi_ih[1] = fdt_intr_establish_idx(sc->sc_node, idx,
923 IPL_BIO | IPL_MPSAFE, dwpcie_msi1_intr, sc, sc->sc_dev.dv_xname);
924 if (sc->sc_msi_ih[1] == NULL)
925 sc->sc_num_msi = 32;
926
927 finish:
928 /*
929 * Hold on to the DMA memory such that nobody can use it to
930 * actually do DMA transfers.
931 */
932
933 sc->sc_msi_addr = addr;
934 return 0;
935 }
936
937 int
dwpcie_armada8k_init(struct dwpcie_softc * sc)938 dwpcie_armada8k_init(struct dwpcie_softc *sc)
939 {
940 uint32_t reg;
941 int timo;
942
943 clock_enable_all(sc->sc_node);
944
945 dwpcie_link_config(sc);
946
947 if (!dwpcie_armada8k_link_up(sc)) {
948 reg = HREAD4(sc, PCIE_GLOBAL_CTRL);
949 reg &= ~PCIE_GLOBAL_CTRL_APP_LTSSM_EN;
950 HWRITE4(sc, PCIE_GLOBAL_CTRL, reg);
951 }
952
953 /*
954 * Setup Requester-ID to Stream-ID mapping
955 * XXX: TF-A is supposed to set this up, but doesn't!
956 */
957 HWRITE4(sc, PCIE_STREAMID, PCIE_STREAMID_8040);
958
959 /* Enable Root Complex mode. */
960 reg = HREAD4(sc, PCIE_GLOBAL_CTRL);
961 reg &= ~PCIE_GLOBAL_CTRL_DEVICE_TYPE_MASK;
962 reg |= PCIE_GLOBAL_CTRL_DEVICE_TYPE_RC;
963 HWRITE4(sc, PCIE_GLOBAL_CTRL, reg);
964
965 HWRITE4(sc, PCIE_ARCACHE_TRC, PCIE_ARCACHE_TRC_DEFAULT);
966 HWRITE4(sc, PCIE_AWCACHE_TRC, PCIE_AWCACHE_TRC_DEFAULT);
967 reg = HREAD4(sc, PCIE_ARUSER);
968 reg &= ~PCIE_AXUSER_DOMAIN_MASK;
969 reg |= PCIE_AXUSER_DOMAIN_OUTER_SHARABLE;
970 HWRITE4(sc, PCIE_ARUSER, reg);
971 reg = HREAD4(sc, PCIE_AWUSER);
972 reg &= ~PCIE_AXUSER_DOMAIN_MASK;
973 reg |= PCIE_AXUSER_DOMAIN_OUTER_SHARABLE;
974 HWRITE4(sc, PCIE_AWUSER, reg);
975
976 if (!dwpcie_armada8k_link_up(sc)) {
977 reg = HREAD4(sc, PCIE_GLOBAL_CTRL);
978 reg |= PCIE_GLOBAL_CTRL_APP_LTSSM_EN;
979 HWRITE4(sc, PCIE_GLOBAL_CTRL, reg);
980 }
981
982 for (timo = 40; timo > 0; timo--) {
983 if (dwpcie_armada8k_link_up(sc))
984 break;
985 delay(1000);
986 }
987 if (timo == 0)
988 return ETIMEDOUT;
989
990 sc->sc_ih = fdt_intr_establish(sc->sc_node, IPL_AUDIO | IPL_MPSAFE,
991 dwpcie_armada8k_intr, sc, sc->sc_dev.dv_xname);
992
993 /* Unmask INTx interrupts. */
994 HWRITE4(sc, PCIE_GLOBAL_INT_MASK,
995 PCIE_GLOBAL_INT_MASK_INT_A | PCIE_GLOBAL_INT_MASK_INT_B |
996 PCIE_GLOBAL_INT_MASK_INT_C | PCIE_GLOBAL_INT_MASK_INT_D);
997
998 return 0;
999 }
1000
1001 int
dwpcie_armada8k_link_up(struct dwpcie_softc * sc)1002 dwpcie_armada8k_link_up(struct dwpcie_softc *sc)
1003 {
1004 uint32_t reg, mask;
1005
1006 mask = PCIE_GLOBAL_STATUS_RDLH_LINK_UP;
1007 mask |= PCIE_GLOBAL_STATUS_PHY_LINK_UP;
1008 reg = HREAD4(sc, PCIE_GLOBAL_STATUS);
1009 return ((reg & mask) == mask);
1010 }
1011
1012 int
dwpcie_armada8k_intr(void * arg)1013 dwpcie_armada8k_intr(void *arg)
1014 {
1015 struct dwpcie_softc *sc = arg;
1016 uint32_t cause;
1017
1018 /* Acknowledge interrupts. */
1019 cause = HREAD4(sc, PCIE_GLOBAL_INT_CAUSE);
1020 HWRITE4(sc, PCIE_GLOBAL_INT_CAUSE, cause);
1021
1022 /* INTx interrupt, so not really ours. */
1023 return 0;
1024 }
1025
1026 int
dwpcie_g12a_init(struct dwpcie_softc * sc)1027 dwpcie_g12a_init(struct dwpcie_softc *sc)
1028 {
1029 uint32_t *reset_gpio;
1030 ssize_t reset_gpiolen;
1031 uint32_t reg;
1032 int error, timo;
1033
1034 reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpios");
1035 if (reset_gpiolen <= 0)
1036 return ENXIO;
1037
1038 if (bus_space_map(sc->sc_iot, sc->sc_glue_base,
1039 sc->sc_glue_size, 0, &sc->sc_glue_ioh))
1040 return ENOMEM;
1041
1042 power_domain_enable(sc->sc_node);
1043
1044 phy_enable(sc->sc_node, "pcie");
1045
1046 reset_assert_all(sc->sc_node);
1047 delay(500);
1048 reset_deassert_all(sc->sc_node);
1049 delay(500);
1050
1051 clock_set_frequency(sc->sc_node, "port", 100000000UL);
1052 clock_enable_all(sc->sc_node);
1053
1054 reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK);
1055 OF_getpropintarray(sc->sc_node, "reset-gpios", reset_gpio,
1056 reset_gpiolen);
1057 gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT);
1058 gpio_controller_set_pin(reset_gpio, 1);
1059
1060 dwpcie_link_config(sc);
1061
1062 reg = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_CFG0);
1063 reg |= PCIE_CFG0_APP_LTSSM_EN;
1064 bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_CFG0, reg);
1065
1066 gpio_controller_set_pin(reset_gpio, 1);
1067 delay(500);
1068 gpio_controller_set_pin(reset_gpio, 0);
1069
1070 free(reset_gpio, M_TEMP, reset_gpiolen);
1071
1072 for (timo = 40; timo > 0; timo--) {
1073 if (dwpcie_g12a_link_up(sc))
1074 break;
1075 delay(1000);
1076 }
1077 if (timo == 0)
1078 return ETIMEDOUT;
1079
1080 error = dwpcie_msi_init(sc);
1081 if (error)
1082 return error;
1083
1084 return 0;
1085 }
1086
1087 int
dwpcie_g12a_link_up(struct dwpcie_softc * sc)1088 dwpcie_g12a_link_up(struct dwpcie_softc *sc)
1089 {
1090 uint32_t reg;
1091
1092 reg = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_STATUS12);
1093 if ((reg & PCIE_STATUS12_SMLH_LINK_UP) &&
1094 (reg & PCIE_STATUS12_RDLH_LINK_UP) &&
1095 (reg & PCIE_STATUS12_LTSSM_MASK) == PCIE_STATUS12_LTSSM_UP)
1096 return 1;
1097 return 0;
1098 }
1099
1100 int
dwpcie_imx8mq_init(struct dwpcie_softc * sc)1101 dwpcie_imx8mq_init(struct dwpcie_softc *sc)
1102 {
1103 uint32_t *clkreq_gpio, *disable_gpio, *reset_gpio;
1104 ssize_t clkreq_gpiolen, disable_gpiolen, reset_gpiolen;
1105 struct regmap *anatop, *gpr, *phy;
1106 uint32_t off, reg;
1107 int error, timo;
1108
1109 if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie")) {
1110 anatop = regmap_bycompatible("fsl,imx8mm-anatop");
1111 gpr = regmap_bycompatible("fsl,imx8mm-iomuxc-gpr");
1112 phy = regmap_bycompatible("fsl,imx7d-pcie-phy");
1113 KASSERT(phy != NULL);
1114 } else {
1115 anatop = regmap_bycompatible("fsl,imx8mq-anatop");
1116 gpr = regmap_bycompatible("fsl,imx8mq-iomuxc-gpr");
1117 }
1118 KASSERT(anatop != NULL);
1119 KASSERT(gpr != NULL);
1120
1121 clkreq_gpiolen = OF_getproplen(sc->sc_node, "clkreq-gpio");
1122 disable_gpiolen = OF_getproplen(sc->sc_node, "disable-gpio");
1123 reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpio");
1124
1125 if (clkreq_gpiolen > 0) {
1126 clkreq_gpio = malloc(clkreq_gpiolen, M_TEMP, M_WAITOK);
1127 OF_getpropintarray(sc->sc_node, "clkreq-gpio", clkreq_gpio,
1128 clkreq_gpiolen);
1129 gpio_controller_config_pin(clkreq_gpio, GPIO_CONFIG_OUTPUT);
1130 gpio_controller_set_pin(clkreq_gpio, 1);
1131 }
1132
1133 if (disable_gpiolen > 0) {
1134 disable_gpio = malloc(disable_gpiolen, M_TEMP, M_WAITOK);
1135 OF_getpropintarray(sc->sc_node, "disable-gpio", disable_gpio,
1136 disable_gpiolen);
1137 gpio_controller_config_pin(disable_gpio, GPIO_CONFIG_OUTPUT);
1138 gpio_controller_set_pin(disable_gpio, 0);
1139 }
1140
1141 if (reset_gpiolen > 0) {
1142 reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK);
1143 OF_getpropintarray(sc->sc_node, "reset-gpio", reset_gpio,
1144 reset_gpiolen);
1145 gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT);
1146 gpio_controller_set_pin(reset_gpio, 1);
1147 }
1148
1149 power_domain_enable(sc->sc_node);
1150 reset_assert(sc->sc_node, "pciephy");
1151 reset_assert(sc->sc_node, "apps");
1152
1153 reg = regmap_read_4(gpr, IOMUXC_GPR12);
1154 if (OF_getpropint(sc->sc_node, "ctrl-id", 0) == 0) {
1155 off = IOMUXC_GPR14;
1156 reg &= ~IMX8MQ_GPR_PCIE1_DEVICE_TYPE_MASK;
1157 reg |= IMX8MQ_GPR_PCIE1_DEVICE_TYPE_RC;
1158 } else {
1159 off = IOMUXC_GPR16;
1160 reg &= ~IMX8MQ_GPR_PCIE2_DEVICE_TYPE_MASK;
1161 reg |= IMX8MQ_GPR_PCIE2_DEVICE_TYPE_RC;
1162 }
1163 regmap_write_4(gpr, IOMUXC_GPR12, reg);
1164
1165 if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie")) {
1166 if (OF_getproplen(sc->sc_node, "ext_osc") == 0 ||
1167 OF_getpropint(sc->sc_node, "ext_osc", 0)) {
1168 reg = regmap_read_4(gpr, off);
1169 reg &= ~(IMX8MQ_GPR_PCIE_REF_USE_PAD |
1170 IMX8MM_GPR_PCIE_SSC_EN |
1171 IMX8MM_GPR_PCIE_POWER_OFF |
1172 IMX8MM_GPR_PCIE_REF_CLK_MASK);
1173 reg |= (IMX8MM_GPR_PCIE_AUX_EN |
1174 IMX8MM_GPR_PCIE_REF_CLK_EXT);
1175 regmap_write_4(gpr, off, reg);
1176 delay(100);
1177 reg = regmap_read_4(gpr, off);
1178 reg |= IMX8MM_GPR_PCIE_CMN_RST;
1179 regmap_write_4(gpr, off, reg);
1180 delay(200);
1181 } else {
1182 reg = regmap_read_4(gpr, off);
1183 reg &= ~(IMX8MQ_GPR_PCIE_REF_USE_PAD |
1184 IMX8MM_GPR_PCIE_SSC_EN |
1185 IMX8MM_GPR_PCIE_POWER_OFF |
1186 IMX8MM_GPR_PCIE_REF_CLK_MASK);
1187 reg |= (IMX8MM_GPR_PCIE_AUX_EN |
1188 IMX8MM_GPR_PCIE_REF_CLK_PLL);
1189 regmap_write_4(gpr, off, reg);
1190 delay(100);
1191 regmap_write_4(phy, IMX8MM_PCIE_PHY_CMN_REG62,
1192 IMX8MM_PCIE_PHY_CMN_REG62_PLL_CLK_OUT);
1193 regmap_write_4(phy, IMX8MM_PCIE_PHY_CMN_REG64,
1194 IMX8MM_PCIE_PHY_CMN_REG64_AUX_RX_TX_TERM);
1195 reg = regmap_read_4(gpr, off);
1196 reg |= IMX8MM_GPR_PCIE_CMN_RST;
1197 regmap_write_4(gpr, off, reg);
1198 delay(200);
1199 regmap_write_4(phy, IMX8MM_PCIE_PHY_TRSV_REG5,
1200 IMX8MM_PCIE_PHY_TRSV_REG5_GEN1_DEEMP);
1201 regmap_write_4(phy, IMX8MM_PCIE_PHY_TRSV_REG6,
1202 IMX8MM_PCIE_PHY_TRSV_REG6_GEN2_DEEMP);
1203 }
1204 } else {
1205 if (OF_getproplen(sc->sc_node, "ext_osc") == 0 ||
1206 OF_getpropint(sc->sc_node, "ext_osc", 0)) {
1207 reg = regmap_read_4(gpr, off);
1208 reg |= IMX8MQ_GPR_PCIE_REF_USE_PAD;
1209 regmap_write_4(gpr, off, reg);
1210 } else {
1211 reg = regmap_read_4(gpr, off);
1212 reg &= ~IMX8MQ_GPR_PCIE_REF_USE_PAD;
1213 regmap_write_4(gpr, off, reg);
1214
1215 regmap_write_4(anatop, ANATOP_PLLOUT_CTL,
1216 ANATOP_PLLOUT_CTL_CKE |
1217 ANATOP_PLLOUT_CTL_SEL_SYSPLL1);
1218 regmap_write_4(anatop, ANATOP_PLLOUT_DIV,
1219 ANATOP_PLLOUT_DIV_SYSPLL1);
1220 }
1221 }
1222
1223 clock_enable(sc->sc_node, "pcie_phy");
1224 clock_enable(sc->sc_node, "pcie_bus");
1225 clock_enable(sc->sc_node, "pcie");
1226 clock_enable(sc->sc_node, "pcie_aux");
1227
1228 /* Allow clocks to stabilize. */
1229 delay(200);
1230
1231 if (reset_gpiolen > 0) {
1232 gpio_controller_set_pin(reset_gpio, 1);
1233 delay(100000);
1234 gpio_controller_set_pin(reset_gpio, 0);
1235 }
1236
1237 reset_deassert(sc->sc_node, "pciephy");
1238
1239 if (OF_is_compatible(sc->sc_node, "fsl,imx8mm-pcie")) {
1240 for (timo = 2000; timo > 0; timo--) {
1241 if (regmap_read_4(phy, IMX8MM_PCIE_PHY_CMN_REG75) ==
1242 IMX8MM_PCIE_PHY_CMN_REG75_PLL_DONE)
1243 break;
1244 delay(10);
1245 }
1246 if (timo == 0) {
1247 error = ETIMEDOUT;
1248 goto err;
1249 }
1250 }
1251
1252 reg = HREAD4(sc, 0x100000 + PCIE_RC_LCR);
1253 reg &= ~PCIE_RC_LCR_L1EL_MASK;
1254 reg |= PCIE_RC_LCR_L1EL_64US;
1255 HWRITE4(sc, 0x100000 + PCIE_RC_LCR, reg);
1256
1257 dwpcie_link_config(sc);
1258
1259 reg = HREAD4(sc, PCIE_RC_LCR);
1260 reg &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
1261 reg |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
1262 HWRITE4(sc, PCIE_RC_LCR, reg);
1263
1264 reset_deassert(sc->sc_node, "apps");
1265
1266 for (timo = 20000; timo > 0; timo--) {
1267 if (dwpcie_link_up(sc))
1268 break;
1269 delay(10);
1270 }
1271 if (timo == 0) {
1272 error = ETIMEDOUT;
1273 goto err;
1274 }
1275
1276 if (OF_getpropint(sc->sc_node, "fsl,max-link-speed", 1) >= 2) {
1277 reg = HREAD4(sc, PCIE_RC_LCR);
1278 reg &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
1279 reg |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
1280 HWRITE4(sc, PCIE_RC_LCR, reg);
1281
1282 reg = HREAD4(sc, PCIE_LINK_WIDTH_SPEED_CTRL);
1283 reg |= PCIE_LINK_WIDTH_SPEED_CTRL_CHANGE;
1284 HWRITE4(sc, PCIE_LINK_WIDTH_SPEED_CTRL, reg);
1285
1286 for (timo = 20000; timo > 0; timo--) {
1287 if (dwpcie_link_up(sc))
1288 break;
1289 delay(10);
1290 }
1291 if (timo == 0) {
1292 error = ETIMEDOUT;
1293 goto err;
1294 }
1295 }
1296
1297 sc->sc_ih = fdt_intr_establish(sc->sc_node, IPL_AUDIO | IPL_MPSAFE,
1298 dwpcie_imx8mq_intr, sc, sc->sc_dev.dv_xname);
1299
1300 /* Unmask INTx interrupts. */
1301 HWRITE4(sc, PCIE_GLOBAL_INT_MASK,
1302 PCIE_GLOBAL_INT_MASK_INT_A | PCIE_GLOBAL_INT_MASK_INT_B |
1303 PCIE_GLOBAL_INT_MASK_INT_C | PCIE_GLOBAL_INT_MASK_INT_D);
1304
1305 error = 0;
1306 err:
1307 if (clkreq_gpiolen > 0)
1308 free(clkreq_gpio, M_TEMP, clkreq_gpiolen);
1309 if (disable_gpiolen > 0)
1310 free(disable_gpio, M_TEMP, disable_gpiolen);
1311 if (reset_gpiolen > 0)
1312 free(reset_gpio, M_TEMP, reset_gpiolen);
1313 return error;
1314 }
1315
1316 int
dwpcie_imx8mq_intr(void * arg)1317 dwpcie_imx8mq_intr(void *arg)
1318 {
1319 struct dwpcie_softc *sc = arg;
1320 uint32_t cause;
1321
1322 /* Acknowledge interrupts. */
1323 cause = HREAD4(sc, PCIE_GLOBAL_INT_CAUSE);
1324 HWRITE4(sc, PCIE_GLOBAL_INT_CAUSE, cause);
1325
1326 /* INTx interrupt, so not really ours. */
1327 return 0;
1328 }
1329
1330 int
dwpcie_fu740_init(struct dwpcie_softc * sc)1331 dwpcie_fu740_init(struct dwpcie_softc *sc)
1332 {
1333 sc->sc_num_viewport = 8;
1334
1335 return 0;
1336 }
1337
1338 int
dwpcie_rk3568_link_up(struct dwpcie_softc * sc)1339 dwpcie_rk3568_link_up(struct dwpcie_softc *sc)
1340 {
1341 uint32_t reg;
1342
1343 reg = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh,
1344 PCIE_CLIENT_LTSSM_STATUS);
1345 if ((reg & PCIE_CLIENT_SMLH_LINK_UP) &&
1346 (reg & PCIE_CLIENT_RDLH_LINK_UP) &&
1347 (reg & PCIE_CLIENT_LTSSM_MASK) == PCIE_CLIENT_LTSSM_UP)
1348 return 1;
1349 return 0;
1350 }
1351
1352 int
dwpcie_rk3568_init(struct dwpcie_softc * sc)1353 dwpcie_rk3568_init(struct dwpcie_softc *sc)
1354 {
1355 uint32_t *reset_gpio;
1356 ssize_t reset_gpiolen;
1357 int error, idx, node;
1358 int pin, timo;
1359
1360 sc->sc_num_viewport = 8;
1361
1362 if (bus_space_map(sc->sc_iot, sc->sc_glue_base,
1363 sc->sc_glue_size, 0, &sc->sc_glue_ioh))
1364 return ENOMEM;
1365
1366 reset_assert_all(sc->sc_node);
1367 /* Power must be enabled before initializing the PHY. */
1368 regulator_enable(OF_getpropint(sc->sc_node, "vpcie3v3-supply", 0));
1369 phy_enable(sc->sc_node, "pcie-phy");
1370 reset_deassert_all(sc->sc_node);
1371
1372 clock_enable_all(sc->sc_node);
1373
1374 if (dwpcie_rk3568_link_up(sc))
1375 return 0;
1376
1377 reset_gpiolen = OF_getproplen(sc->sc_node, "reset-gpios");
1378 if (reset_gpiolen > 0) {
1379 reset_gpio = malloc(reset_gpiolen, M_TEMP, M_WAITOK);
1380 OF_getpropintarray(sc->sc_node, "reset-gpios", reset_gpio,
1381 reset_gpiolen);
1382 gpio_controller_config_pin(reset_gpio, GPIO_CONFIG_OUTPUT);
1383 gpio_controller_set_pin(reset_gpio, 1);
1384 }
1385
1386 bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1387 PCIE_CLIENT_HOT_RESET_CTRL, PCIE_CLIENT_APP_LTSSM_ENABLE_ENHANCE);
1388 bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1389 PCIE_CLIENT_GENERAL_CON, PCIE_CLIENT_DEV_TYPE_RC);
1390
1391 /* Assert PERST#. */
1392 if (reset_gpiolen > 0)
1393 gpio_controller_set_pin(reset_gpio, 0);
1394
1395 dwpcie_link_config(sc);
1396
1397 /* Enable LTSSM. */
1398 bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh, PCIE_CLIENT_GENERAL_CON,
1399 PCIE_CLIENT_LINK_REQ_RST_GRT | PCIE_CLIENT_APP_LTSSM_ENABLE);
1400
1401 /*
1402 * PERST# must remain asserted for at least 100us after the
1403 * reference clock becomes stable. But also has to remain
1404 * active at least 100ms after power up. Since we may have
1405 * just powered on the device, play it safe and use 100ms.
1406 */
1407 delay(100000);
1408
1409 /* Deassert PERST#. */
1410 if (reset_gpiolen > 0)
1411 gpio_controller_set_pin(reset_gpio, 1);
1412
1413 /* Wait for the link to come up. */
1414 for (timo = 100; timo > 0; timo--) {
1415 if (dwpcie_rk3568_link_up(sc))
1416 break;
1417 delay(10000);
1418 }
1419 if (timo == 0) {
1420 error = ETIMEDOUT;
1421 goto err;
1422 }
1423
1424 node = OF_getnodebyname(sc->sc_node, "legacy-interrupt-controller");
1425 idx = OF_getindex(sc->sc_node, "legacy", "interrupt-names");
1426 if (node && idx != -1) {
1427 sc->sc_ih = fdt_intr_establish_idx(sc->sc_node, idx,
1428 IPL_BIO | IPL_MPSAFE, dwpcie_rk3568_intr, sc,
1429 sc->sc_dev.dv_xname);
1430 }
1431
1432 if (sc->sc_ih) {
1433 for (pin = 0; pin < nitems(sc->sc_intx); pin++)
1434 TAILQ_INIT(&sc->sc_intx[pin]);
1435 sc->sc_ic.ic_node = node;
1436 sc->sc_ic.ic_cookie = sc;
1437 sc->sc_ic.ic_establish = dwpcie_rk3568_intr_establish;
1438 sc->sc_ic.ic_disestablish = dwpcie_rk3568_intr_disestablish;
1439 sc->sc_ic.ic_barrier = dwpcie_rk3568_intr_barrier;
1440 fdt_intr_register(&sc->sc_ic);
1441 }
1442
1443 error = 0;
1444 err:
1445 if (reset_gpiolen > 0)
1446 free(reset_gpio, M_TEMP, reset_gpiolen);
1447
1448 return error;
1449 }
1450
1451 int
dwpcie_rk3568_intr(void * arg)1452 dwpcie_rk3568_intr(void *arg)
1453 {
1454 struct dwpcie_softc *sc = arg;
1455 struct dwpcie_intx *di;
1456 uint32_t status;
1457 int pin, s;
1458
1459 status = bus_space_read_4(sc->sc_iot, sc->sc_glue_ioh,
1460 PCIE_CLIENT_INTR_STATUS_LEGACY);
1461 for (pin = 0; pin < nitems(sc->sc_intx); pin++) {
1462 if ((status & (1 << pin)) == 0)
1463 continue;
1464
1465 TAILQ_FOREACH(di, &sc->sc_intx[pin], di_next) {
1466 if ((di->di_flags & IPL_MPSAFE) == 0)
1467 KERNEL_LOCK();
1468 s = splraise(di->di_ipl);
1469 if (di->di_func(di->di_arg))
1470 di->di_count.ec_count++;
1471 splx(s);
1472 if ((di->di_flags & IPL_MPSAFE) == 0)
1473 KERNEL_UNLOCK();
1474 }
1475 }
1476
1477 return 1;
1478 }
1479
1480 void *
dwpcie_rk3568_intr_establish(void * cookie,int * cell,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)1481 dwpcie_rk3568_intr_establish(void *cookie, int *cell, int level,
1482 struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
1483 {
1484 struct dwpcie_softc *sc = (struct dwpcie_softc *)cookie;
1485 struct dwpcie_intx *di;
1486 int pin = cell[0];
1487 uint32_t mask = (1U << pin);
1488
1489 if (ci != NULL && !CPU_IS_PRIMARY(ci))
1490 return NULL;
1491
1492 if (pin < 0 || pin >= nitems(sc->sc_intx))
1493 return NULL;
1494
1495 /* Mask the interrupt. */
1496 bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1497 PCIE_CLIENT_INTR_MASK_LEGACY, (mask << 16) | mask);
1498 intr_barrier(sc->sc_ih);
1499
1500 di = malloc(sizeof(*di), M_DEVBUF, M_WAITOK | M_ZERO);
1501 di->di_func = func;
1502 di->di_arg = arg;
1503 di->di_ipl = level & IPL_IRQMASK;
1504 di->di_flags = level & IPL_FLAGMASK;
1505 di->di_pin = pin;
1506 di->di_name = name;
1507 if (name != NULL)
1508 evcount_attach(&di->di_count, name, &di->di_pin);
1509 di->di_sc = sc;
1510 TAILQ_INSERT_TAIL(&sc->sc_intx[pin], di, di_next);
1511
1512 /* Unmask the interrupt. */
1513 bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1514 PCIE_CLIENT_INTR_MASK_LEGACY, mask << 16);
1515
1516 return di;
1517 }
1518
1519 void
dwpcie_rk3568_intr_disestablish(void * cookie)1520 dwpcie_rk3568_intr_disestablish(void *cookie)
1521 {
1522 struct dwpcie_intx *di = cookie;
1523 struct dwpcie_softc *sc = di->di_sc;
1524 uint32_t mask = (1U << di->di_pin);
1525
1526 /* Mask the interrupt. */
1527 bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1528 PCIE_CLIENT_INTR_MASK_LEGACY, (mask << 16) | mask);
1529 intr_barrier(sc->sc_ih);
1530
1531 if (di->di_name)
1532 evcount_detach(&di->di_count);
1533
1534 TAILQ_REMOVE(&sc->sc_intx[di->di_pin], di, di_next);
1535
1536 if (!TAILQ_EMPTY(&sc->sc_intx[di->di_pin])) {
1537 /* Unmask the interrupt. */
1538 bus_space_write_4(sc->sc_iot, sc->sc_glue_ioh,
1539 PCIE_CLIENT_INTR_MASK_LEGACY, mask << 16);
1540 }
1541
1542 free(di, M_DEVBUF, sizeof(*di));
1543 }
1544
1545 void
dwpcie_rk3568_intr_barrier(void * cookie)1546 dwpcie_rk3568_intr_barrier(void *cookie)
1547 {
1548 struct dwpcie_intx *di = cookie;
1549 struct dwpcie_softc *sc = di->di_sc;
1550
1551 intr_barrier(sc->sc_ih);
1552 }
1553
1554 int
dwpcie_sc8280xp_init(struct dwpcie_softc * sc)1555 dwpcie_sc8280xp_init(struct dwpcie_softc *sc)
1556 {
1557 sc->sc_num_viewport = 8;
1558
1559 if (OF_getproplen(sc->sc_node, "msi-map") <= 0)
1560 return dwpcie_msi_init(sc);
1561
1562 return 0;
1563 }
1564
1565 void
dwpcie_atu_write(struct dwpcie_softc * sc,int index,off_t reg,uint32_t val)1566 dwpcie_atu_write(struct dwpcie_softc *sc, int index, off_t reg,
1567 uint32_t val)
1568 {
1569 if (sc->sc_atu_unroll) {
1570 bus_space_write_4(sc->sc_iot, sc->sc_atu_ioh,
1571 IATU_OFFSET_UNROLL(index) + reg, val);
1572 return;
1573 }
1574
1575 if (sc->sc_atu_viewport != index) {
1576 HWRITE4(sc, IATU_VIEWPORT, index);
1577 sc->sc_atu_viewport = index;
1578 }
1579
1580 HWRITE4(sc, IATU_OFFSET_VIEWPORT + reg, val);
1581 }
1582
1583 uint32_t
dwpcie_atu_read(struct dwpcie_softc * sc,int index,off_t reg)1584 dwpcie_atu_read(struct dwpcie_softc *sc, int index, off_t reg)
1585 {
1586 if (sc->sc_atu_unroll) {
1587 return bus_space_read_4(sc->sc_iot, sc->sc_atu_ioh,
1588 IATU_OFFSET_UNROLL(index) + reg);
1589 }
1590
1591 if (sc->sc_atu_viewport != index) {
1592 HWRITE4(sc, IATU_VIEWPORT, index);
1593 sc->sc_atu_viewport = index;
1594 }
1595
1596 return HREAD4(sc, IATU_OFFSET_VIEWPORT + reg);
1597 }
1598
1599 void
dwpcie_atu_disable(struct dwpcie_softc * sc,int index)1600 dwpcie_atu_disable(struct dwpcie_softc *sc, int index)
1601 {
1602 dwpcie_atu_write(sc, index, IATU_REGION_CTRL_2, 0);
1603 }
1604
1605 void
dwpcie_atu_config(struct dwpcie_softc * sc,int index,int type,uint64_t cpu_addr,uint64_t pci_addr,uint64_t size)1606 dwpcie_atu_config(struct dwpcie_softc *sc, int index, int type,
1607 uint64_t cpu_addr, uint64_t pci_addr, uint64_t size)
1608 {
1609 uint32_t reg;
1610 int timo;
1611
1612 dwpcie_atu_write(sc, index, IATU_LWR_BASE_ADDR, cpu_addr);
1613 dwpcie_atu_write(sc, index, IATU_UPPER_BASE_ADDR, cpu_addr >> 32);
1614 dwpcie_atu_write(sc, index, IATU_LIMIT_ADDR, cpu_addr + size - 1);
1615 dwpcie_atu_write(sc, index, IATU_LWR_TARGET_ADDR, pci_addr);
1616 dwpcie_atu_write(sc, index, IATU_UPPER_TARGET_ADDR, pci_addr >> 32);
1617 dwpcie_atu_write(sc, index, IATU_REGION_CTRL_1, type);
1618 dwpcie_atu_write(sc, index, IATU_REGION_CTRL_2,
1619 IATU_REGION_CTRL_2_REGION_EN);
1620
1621 for (timo = 5; timo > 0; timo--) {
1622 reg = dwpcie_atu_read(sc, index, IATU_REGION_CTRL_2);
1623 if (reg & IATU_REGION_CTRL_2_REGION_EN)
1624 break;
1625 delay(9000);
1626 }
1627 if (timo == 0)
1628 printf("%s:%d: timeout\n", __func__, __LINE__);
1629 }
1630
1631 int
dwpcie_link_up(struct dwpcie_softc * sc)1632 dwpcie_link_up(struct dwpcie_softc *sc)
1633 {
1634 uint32_t reg;
1635
1636 reg = HREAD4(sc, PCIE_PHY_DEBUG_R1);
1637 if ((reg & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP) != 0 &&
1638 (reg & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING) == 0)
1639 return 1;
1640 return 0;
1641 }
1642
1643 void
dwpcie_attach_hook(struct device * parent,struct device * self,struct pcibus_attach_args * pba)1644 dwpcie_attach_hook(struct device *parent, struct device *self,
1645 struct pcibus_attach_args *pba)
1646 {
1647 }
1648
1649 int
dwpcie_bus_maxdevs(void * v,int bus)1650 dwpcie_bus_maxdevs(void *v, int bus)
1651 {
1652 struct dwpcie_softc *sc = v;
1653
1654 if (bus == sc->sc_bus || bus == sc->sc_bus + 1)
1655 return 1;
1656 return 32;
1657 }
1658
1659 int
dwpcie_find_node(int node,int bus,int device,int function)1660 dwpcie_find_node(int node, int bus, int device, int function)
1661 {
1662 uint32_t reg[5];
1663 uint32_t phys_hi;
1664 int child;
1665
1666 phys_hi = ((bus << 16) | (device << 11) | (function << 8));
1667
1668 for (child = OF_child(node); child; child = OF_peer(child)) {
1669 if (OF_getpropintarray(child, "reg",
1670 reg, sizeof(reg)) != sizeof(reg))
1671 continue;
1672
1673 if (reg[0] == phys_hi)
1674 return child;
1675
1676 node = dwpcie_find_node(child, bus, device, function);
1677 if (node)
1678 return node;
1679 }
1680
1681 return 0;
1682 }
1683
1684 pcitag_t
dwpcie_make_tag(void * v,int bus,int device,int function)1685 dwpcie_make_tag(void *v, int bus, int device, int function)
1686 {
1687 struct dwpcie_softc *sc = v;
1688 int node;
1689
1690 node = dwpcie_find_node(sc->sc_node, bus, device, function);
1691 return (((pcitag_t)node << 32) |
1692 (bus << 24) | (device << 19) | (function << 16));
1693 }
1694
1695 void
dwpcie_decompose_tag(void * v,pcitag_t tag,int * bp,int * dp,int * fp)1696 dwpcie_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp)
1697 {
1698 if (bp != NULL)
1699 *bp = (tag >> 24) & 0xff;
1700 if (dp != NULL)
1701 *dp = (tag >> 19) & 0x1f;
1702 if (fp != NULL)
1703 *fp = (tag >> 16) & 0x7;
1704 }
1705
1706 int
dwpcie_conf_size(void * v,pcitag_t tag)1707 dwpcie_conf_size(void *v, pcitag_t tag)
1708 {
1709 return PCIE_CONFIG_SPACE_SIZE;
1710 }
1711
1712 pcireg_t
dwpcie_conf_read(void * v,pcitag_t tag,int reg)1713 dwpcie_conf_read(void *v, pcitag_t tag, int reg)
1714 {
1715 struct dwpcie_softc *sc = v;
1716 int bus, dev, fn;
1717 uint32_t ret;
1718
1719 dwpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
1720 if (bus == sc->sc_bus) {
1721 KASSERT(dev == 0);
1722 tag = dwpcie_make_tag(sc, 0, dev, fn);
1723 return HREAD4(sc, PCITAG_OFFSET(tag) | reg);
1724 }
1725
1726 if (bus == sc->sc_bus + 1) {
1727 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1728 IATU_REGION_CTRL_1_TYPE_CFG0,
1729 sc->sc_conf_base, PCITAG_OFFSET(tag),
1730 sc->sc_conf_size);
1731 } else {
1732 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1733 IATU_REGION_CTRL_1_TYPE_CFG1,
1734 sc->sc_conf_base, PCITAG_OFFSET(tag),
1735 sc->sc_conf_size);
1736 }
1737
1738 ret = bus_space_read_4(sc->sc_iot, sc->sc_conf_ioh, reg);
1739
1740 if (sc->sc_num_viewport <= 2 && sc->sc_io_size > 0) {
1741 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1742 IATU_REGION_CTRL_1_TYPE_IO, sc->sc_io_base,
1743 sc->sc_io_bus_addr, sc->sc_io_size);
1744 }
1745
1746 return ret;
1747 }
1748
1749 void
dwpcie_conf_write(void * v,pcitag_t tag,int reg,pcireg_t data)1750 dwpcie_conf_write(void *v, pcitag_t tag, int reg, pcireg_t data)
1751 {
1752 struct dwpcie_softc *sc = v;
1753 int bus, dev, fn;
1754
1755 dwpcie_decompose_tag(sc, tag, &bus, &dev, &fn);
1756 if (bus == sc->sc_bus) {
1757 KASSERT(dev == 0);
1758 tag = dwpcie_make_tag(sc, 0, dev, fn);
1759 HWRITE4(sc, PCITAG_OFFSET(tag) | reg, data);
1760 return;
1761 }
1762
1763 if (bus == sc->sc_bus + 1) {
1764 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1765 IATU_REGION_CTRL_1_TYPE_CFG0,
1766 sc->sc_conf_base, PCITAG_OFFSET(tag),
1767 sc->sc_conf_size);
1768 } else {
1769 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1770 IATU_REGION_CTRL_1_TYPE_CFG1,
1771 sc->sc_conf_base, PCITAG_OFFSET(tag),
1772 sc->sc_conf_size);
1773 }
1774
1775 bus_space_write_4(sc->sc_iot, sc->sc_conf_ioh, reg, data);
1776
1777 if (sc->sc_num_viewport <= 2 && sc->sc_io_size > 0) {
1778 dwpcie_atu_config(sc, IATU_VIEWPORT_INDEX1,
1779 IATU_REGION_CTRL_1_TYPE_IO, sc->sc_io_base,
1780 sc->sc_io_bus_addr, sc->sc_io_size);
1781 }
1782 }
1783
1784 int
dwpcie_probe_device_hook(void * v,struct pci_attach_args * pa)1785 dwpcie_probe_device_hook(void *v, struct pci_attach_args *pa)
1786 {
1787 struct dwpcie_softc *sc = v;
1788 uint16_t rid;
1789 int i;
1790
1791 rid = pci_requester_id(pa->pa_pc, pa->pa_tag);
1792 pa->pa_dmat = iommu_device_map_pci(sc->sc_node, rid, pa->pa_dmat);
1793
1794 for (i = 0; i < sc->sc_nranges; i++) {
1795 iommu_reserve_region_pci(sc->sc_node, rid,
1796 sc->sc_ranges[i].pci_base, sc->sc_ranges[i].size);
1797 }
1798
1799 return 0;
1800 }
1801
1802 int
dwpcie_intr_map(struct pci_attach_args * pa,pci_intr_handle_t * ihp)1803 dwpcie_intr_map(struct pci_attach_args *pa, pci_intr_handle_t *ihp)
1804 {
1805 int pin = pa->pa_rawintrpin;
1806
1807 if (pin == 0 || pin > PCI_INTERRUPT_PIN_MAX)
1808 return -1;
1809
1810 if (pa->pa_tag == 0)
1811 return -1;
1812
1813 ihp->ih_pc = pa->pa_pc;
1814 ihp->ih_tag = pa->pa_intrtag;
1815 ihp->ih_intrpin = pa->pa_intrpin;
1816 ihp->ih_type = PCI_INTX;
1817
1818 return 0;
1819 }
1820
1821 const char *
dwpcie_intr_string(void * v,pci_intr_handle_t ih)1822 dwpcie_intr_string(void *v, pci_intr_handle_t ih)
1823 {
1824 switch (ih.ih_type) {
1825 case PCI_MSI:
1826 return "msi";
1827 case PCI_MSIX:
1828 return "msix";
1829 }
1830
1831 return "intx";
1832 }
1833
1834 struct dwpcie_msi *
dwpcie_msi_establish(struct dwpcie_softc * sc,pci_intr_handle_t * ihp,int level,int (* func)(void *),void * arg,char * name)1835 dwpcie_msi_establish(struct dwpcie_softc *sc, pci_intr_handle_t *ihp,
1836 int level, int (*func)(void *), void *arg, char *name)
1837 {
1838 pci_chipset_tag_t pc = ihp->ih_pc;
1839 pcitag_t tag = ihp->ih_tag;
1840 struct dwpcie_msi *dm;
1841 uint64_t msi_mask;
1842 int vec = ihp->ih_intrpin;
1843 int base, mme, nvec, off;
1844 pcireg_t reg;
1845
1846 if (ihp->ih_type == PCI_MSI) {
1847 if (pci_get_capability(pc, tag, PCI_CAP_MSI, &off, ®) == 0)
1848 panic("%s: no msi capability", __func__);
1849
1850 reg = pci_conf_read(ihp->ih_pc, ihp->ih_tag, off);
1851 mme = ((reg & PCI_MSI_MC_MME_MASK) >> PCI_MSI_MC_MME_SHIFT);
1852 if (vec >= (1 << mme))
1853 return NULL;
1854 if (reg & PCI_MSI_MC_C64)
1855 base = pci_conf_read(pc, tag, off + PCI_MSI_MD64);
1856 else
1857 base = pci_conf_read(pc, tag, off + PCI_MSI_MD32);
1858 } else {
1859 mme = 0;
1860 base = 0;
1861 }
1862
1863 if (vec == 0) {
1864 /*
1865 * Pre-allocate all the requested vectors. Remember
1866 * the number of requested vectors such that we can
1867 * deallocate them in one go.
1868 */
1869 msi_mask = (1ULL << (1 << mme)) - 1;
1870 while (vec <= sc->sc_num_msi - (1 << mme)) {
1871 if ((sc->sc_msi_mask & (msi_mask << vec)) == 0) {
1872 sc->sc_msi_mask |= (msi_mask << vec);
1873 break;
1874 }
1875 vec += (1 << mme);
1876 }
1877 base = vec;
1878 nvec = (1 << mme);
1879 } else {
1880 KASSERT(ihp->ih_type == PCI_MSI);
1881 vec += base;
1882 nvec = 0;
1883 }
1884
1885 if (vec >= sc->sc_num_msi)
1886 return NULL;
1887
1888 if (ihp->ih_type == PCI_MSI) {
1889 if (reg & PCI_MSI_MC_C64)
1890 pci_conf_write(pc, tag, off + PCI_MSI_MD64, base);
1891 else
1892 pci_conf_write(pc, tag, off + PCI_MSI_MD32, base);
1893 }
1894
1895 dm = &sc->sc_msi[vec];
1896 KASSERT(dm->dm_func == NULL);
1897
1898 dm->dm_func = func;
1899 dm->dm_arg = arg;
1900 dm->dm_ipl = level & IPL_IRQMASK;
1901 dm->dm_flags = level & IPL_FLAGMASK;
1902 dm->dm_vec = vec;
1903 dm->dm_nvec = nvec;
1904 dm->dm_name = name;
1905 if (name != NULL)
1906 evcount_attach(&dm->dm_count, name, &dm->dm_vec);
1907
1908 /* Unmask the MSI. */
1909 HCLR4(sc, PCIE_MSI_INTR_MASK(vec / 32), (1U << (vec % 32)));
1910
1911 return dm;
1912 }
1913
1914 void
dwpcie_msi_disestablish(struct dwpcie_softc * sc,struct dwpcie_msi * dm)1915 dwpcie_msi_disestablish(struct dwpcie_softc *sc, struct dwpcie_msi *dm)
1916 {
1917 uint64_t msi_mask = (1ULL << dm->dm_nvec) - 1;
1918
1919 /* Mask the MSI. */
1920 HSET4(sc, PCIE_MSI_INTR_MASK(dm->dm_vec / 32),
1921 (1U << (dm->dm_vec % 32)));
1922
1923 if (dm->dm_name)
1924 evcount_detach(&dm->dm_count);
1925 dm->dm_func = NULL;
1926
1927 /*
1928 * Unallocate all allocated vetcors if this is the first
1929 * vector for the device.
1930 */
1931 sc->sc_msi_mask &= ~(msi_mask << dm->dm_vec);
1932 }
1933
1934 void *
dwpcie_intr_establish(void * v,pci_intr_handle_t ih,int level,struct cpu_info * ci,int (* func)(void *),void * arg,char * name)1935 dwpcie_intr_establish(void *v, pci_intr_handle_t ih, int level,
1936 struct cpu_info *ci, int (*func)(void *), void *arg, char *name)
1937 {
1938 struct dwpcie_softc *sc = v;
1939 struct dwpcie_intr_handle *pih;
1940 void *cookie = NULL;
1941
1942 KASSERT(ih.ih_type != PCI_NONE);
1943
1944 if (ih.ih_type != PCI_INTX) {
1945 struct dwpcie_msi *dm = NULL;
1946 bus_dma_tag_t dmat = ih.ih_dmat;
1947 bus_dma_segment_t seg;
1948 bus_dmamap_t map;
1949 uint64_t addr, data;
1950
1951 if (sc->sc_msi_addr) {
1952 dm = dwpcie_msi_establish(sc, &ih, level, func, arg, name);
1953 if (dm == NULL)
1954 return NULL;
1955 addr = sc->sc_msi_addr;
1956 data = dm->dm_vec;
1957 } else {
1958 /*
1959 * Assume hardware passes Requester ID as
1960 * sideband data.
1961 */
1962 addr = ih.ih_intrpin;
1963 data = pci_requester_id(ih.ih_pc, ih.ih_tag);
1964 cookie = fdt_intr_establish_msi_cpu(sc->sc_node, &addr,
1965 &data, level, ci, func, arg, (void *)name);
1966 if (cookie == NULL)
1967 return NULL;
1968 }
1969
1970 pih = malloc(sizeof(*pih), M_DEVBUF, M_WAITOK | M_ZERO);
1971 pih->pih_ih.ih_ic = &dwpcie_ic;
1972 pih->pih_ih.ih_ih = cookie;
1973 pih->pih_sc = sc;
1974 pih->pih_dm = dm;
1975
1976 if (sc->sc_msi_addr == 0) {
1977 if (bus_dmamap_create(dmat, sizeof(uint32_t), 1,
1978 sizeof(uint32_t), 0, BUS_DMA_WAITOK, &map)) {
1979 free(pih, M_DEVBUF, sizeof(*pih));
1980 fdt_intr_disestablish(cookie);
1981 return NULL;
1982 }
1983
1984 memset(&seg, 0, sizeof(seg));
1985 seg.ds_addr = addr;
1986 seg.ds_len = sizeof(uint32_t);
1987
1988 if (bus_dmamap_load_raw(dmat, map, &seg, 1,
1989 sizeof(uint32_t), BUS_DMA_WAITOK)) {
1990 bus_dmamap_destroy(dmat, map);
1991 free(pih, M_DEVBUF, sizeof(*pih));
1992 fdt_intr_disestablish(cookie);
1993 return NULL;
1994 }
1995
1996 addr = map->dm_segs[0].ds_addr;
1997 pih->pih_dmat = dmat;
1998 pih->pih_map = map;
1999 }
2000
2001 if (ih.ih_type == PCI_MSIX) {
2002 pci_msix_enable(ih.ih_pc, ih.ih_tag,
2003 &sc->sc_bus_memt, ih.ih_intrpin, addr, data);
2004 } else
2005 pci_msi_enable(ih.ih_pc, ih.ih_tag, addr, data);
2006 } else {
2007 int bus, dev, fn;
2008 uint32_t reg[4];
2009
2010 dwpcie_decompose_tag(sc, ih.ih_tag, &bus, &dev, &fn);
2011
2012 reg[0] = bus << 16 | dev << 11 | fn << 8;
2013 reg[1] = reg[2] = 0;
2014 reg[3] = ih.ih_intrpin;
2015
2016 cookie = fdt_intr_establish_imap_cpu(sc->sc_node, reg,
2017 sizeof(reg), level, ci, func, arg, name);
2018 if (cookie == NULL)
2019 return NULL;
2020
2021 pih = malloc(sizeof(*pih), M_DEVBUF, M_WAITOK | M_ZERO);
2022 pih->pih_ih.ih_ic = &dwpcie_ic;
2023 pih->pih_ih.ih_ih = cookie;
2024 }
2025
2026 return pih;
2027 }
2028
2029 void
dwpcie_intr_disestablish(void * v,void * cookie)2030 dwpcie_intr_disestablish(void *v, void *cookie)
2031 {
2032 struct dwpcie_intr_handle *pih = cookie;
2033
2034 if (pih->pih_dm)
2035 dwpcie_msi_disestablish(pih->pih_sc, pih->pih_dm);
2036 else
2037 fdt_intr_disestablish(pih->pih_ih.ih_ih);
2038
2039 if (pih->pih_dmat) {
2040 bus_dmamap_unload(pih->pih_dmat, pih->pih_map);
2041 bus_dmamap_destroy(pih->pih_dmat, pih->pih_map);
2042 }
2043
2044 free(pih, M_DEVBUF, sizeof(*pih));
2045 }
2046
2047 int
dwpcie_bs_iomap(bus_space_tag_t t,bus_addr_t addr,bus_size_t size,int flags,bus_space_handle_t * bshp)2048 dwpcie_bs_iomap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
2049 int flags, bus_space_handle_t *bshp)
2050 {
2051 struct dwpcie_softc *sc = t->bus_private;
2052 int i;
2053
2054 for (i = 0; i < sc->sc_nranges; i++) {
2055 uint64_t pci_start = sc->sc_ranges[i].pci_base;
2056 uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
2057 uint64_t phys_start = sc->sc_ranges[i].phys_base;
2058
2059 if ((sc->sc_ranges[i].flags & 0x03000000) == 0x01000000 &&
2060 addr >= pci_start && addr + size <= pci_end) {
2061 return bus_space_map(sc->sc_iot,
2062 addr - pci_start + phys_start, size, flags, bshp);
2063 }
2064 }
2065
2066 return ENXIO;
2067 }
2068
2069 int
dwpcie_bs_memmap(bus_space_tag_t t,bus_addr_t addr,bus_size_t size,int flags,bus_space_handle_t * bshp)2070 dwpcie_bs_memmap(bus_space_tag_t t, bus_addr_t addr, bus_size_t size,
2071 int flags, bus_space_handle_t *bshp)
2072 {
2073 struct dwpcie_softc *sc = t->bus_private;
2074 int i;
2075
2076 for (i = 0; i < sc->sc_nranges; i++) {
2077 uint64_t pci_start = sc->sc_ranges[i].pci_base;
2078 uint64_t pci_end = pci_start + sc->sc_ranges[i].size;
2079 uint64_t phys_start = sc->sc_ranges[i].phys_base;
2080
2081 if ((sc->sc_ranges[i].flags & 0x02000000) == 0x02000000 &&
2082 addr >= pci_start && addr + size <= pci_end) {
2083 return bus_space_map(sc->sc_iot,
2084 addr - pci_start + phys_start, size, flags, bshp);
2085 }
2086 }
2087
2088 return ENXIO;
2089 }
2090