xref: /freebsd/sys/dev/pci/pci_dw_mv.c (revision a91a2465)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 Michal Meloun <mmel@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 /* Armada 8k DesignWare PCIe driver */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/devmap.h>
35 #include <sys/proc.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/rman.h>
41 #include <sys/sysctl.h>
42 
43 #include <machine/bus.h>
44 #include <machine/intr.h>
45 #include <machine/resource.h>
46 
47 #include <dev/clk/clk.h>
48 #include <dev/phy/phy.h>
49 #include <dev/ofw/ofw_bus.h>
50 #include <dev/ofw/ofw_bus_subr.h>
51 #include <dev/ofw/ofw_pci.h>
52 #include <dev/ofw/ofwpci.h>
53 #include <dev/pci/pcivar.h>
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcib_private.h>
56 #include <dev/pci/pci_dw.h>
57 
58 #include "pcib_if.h"
59 #include "pci_dw_if.h"
60 
61 #define MV_GLOBAL_CONTROL_REG		0x8000
62 #define PCIE_APP_LTSSM_EN		(1 << 2)
63 
64 #define MV_GLOBAL_STATUS_REG		0x8008
65 #define	 MV_STATUS_RDLH_LINK_UP			(1 << 1)
66 #define  MV_STATUS_PHY_LINK_UP			(1 << 9)
67 
68 #define MV_INT_CAUSE1			0x801C
69 #define MV_INT_MASK1			0x8020
70 #define  INT_A_ASSERT_MASK			(1 <<  9)
71 #define  INT_B_ASSERT_MASK			(1 << 10)
72 #define  INT_C_ASSERT_MASK			(1 << 11)
73 #define  INT_D_ASSERT_MASK			(1 << 12)
74 
75 #define MV_INT_CAUSE2			0x8024
76 #define MV_INT_MASK2			0x8028
77 #define MV_ERR_INT_CAUSE		0x802C
78 #define MV_ERR_INT_MASK			0x8030
79 
80 #define MV_ARCACHE_TRC_REG		0x8050
81 #define MV_AWCACHE_TRC_REG		0x8054
82 #define MV_ARUSER_REG			0x805C
83 #define MV_AWUSER_REG			0x8060
84 
85 #define	MV_MAX_LANES	8
86 struct pci_mv_softc {
87 	struct pci_dw_softc	dw_sc;
88 	device_t		dev;
89 	phandle_t		node;
90 	struct resource 	*irq_res;
91 	void			*intr_cookie;
92 	phy_t			phy[MV_MAX_LANES];
93 	clk_t			clk_core;
94 	clk_t			clk_reg;
95 };
96 
97 /* Compatible devices. */
98 static struct ofw_compat_data compat_data[] = {
99 	{"marvell,armada8k-pcie", 1},
100 	{NULL,		 	  0},
101 };
102 
103 static int
104 pci_mv_phy_init(struct pci_mv_softc *sc)
105 {
106 	int i, rv;
107 
108 	for (i = 0; i < MV_MAX_LANES; i++) {
109 		rv =  phy_get_by_ofw_idx(sc->dev, sc->node, i, &(sc->phy[i]));
110 		if (rv != 0 && rv != ENOENT) {
111 			device_printf(sc->dev, "Cannot get phy[%d]\n", i);
112 /* XXX revert when phy driver will be implemented */
113 #if 0
114 		goto fail;
115 #else
116 		continue;
117 #endif
118 		}
119 		if (sc->phy[i] == NULL)
120 			continue;
121 		rv = phy_enable(sc->phy[i]);
122 		if (rv != 0) {
123 			device_printf(sc->dev, "Cannot enable phy[%d]\n", i);
124 			goto fail;
125 		}
126 	}
127 	return (0);
128 
129 fail:
130 	for (i = 0; i < MV_MAX_LANES; i++) {
131 		if (sc->phy[i] == NULL)
132 			continue;
133 		phy_release(sc->phy[i]);
134 	  }
135 
136 	return (rv);
137 }
138 
139 static void
140 pci_mv_init(struct pci_mv_softc *sc)
141 {
142 	uint32_t reg;
143 
144 	/* Set device configuration to RC */
145 	reg = pci_dw_dbi_rd4(sc->dev, MV_GLOBAL_CONTROL_REG);
146 	reg &= ~0x000000F0;
147 	reg |= 0x000000040;
148 	pci_dw_dbi_wr4(sc->dev, MV_GLOBAL_CONTROL_REG, reg);
149 
150 	/* AxCache master transaction attribures */
151 	pci_dw_dbi_wr4(sc->dev, MV_ARCACHE_TRC_REG, 0x3511);
152 	pci_dw_dbi_wr4(sc->dev, MV_AWCACHE_TRC_REG, 0x5311);
153 
154 	/* AxDomain master transaction attribures */
155 	pci_dw_dbi_wr4(sc->dev, MV_ARUSER_REG, 0x0002);
156 	pci_dw_dbi_wr4(sc->dev, MV_AWUSER_REG, 0x0002);
157 
158 	/* Enable all INTx interrupt (virtuual) pins */
159 	reg = pci_dw_dbi_rd4(sc->dev, MV_INT_MASK1);
160 	reg |= INT_A_ASSERT_MASK | INT_B_ASSERT_MASK |
161 	       INT_C_ASSERT_MASK | INT_D_ASSERT_MASK;
162 	pci_dw_dbi_wr4(sc->dev, MV_INT_MASK1, reg);
163 
164 	/* Enable local interrupts */
165 	pci_dw_dbi_wr4(sc->dev, DW_MSI_INTR0_MASK, 0xFFFFFFFF);
166 	pci_dw_dbi_wr4(sc->dev, MV_INT_MASK1, 0x0001FE00);
167 	pci_dw_dbi_wr4(sc->dev, MV_INT_MASK2, 0x00000000);
168 	pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE1, 0xFFFFFFFF);
169 	pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE2, 0xFFFFFFFF);
170 
171 	/* Errors have own interrupt, not yet populated in DTt */
172 	pci_dw_dbi_wr4(sc->dev, MV_ERR_INT_MASK, 0);
173 }
174 
175 static int pci_mv_intr(void *arg)
176 {
177 	struct pci_mv_softc *sc = arg;
178 	uint32_t cause1, cause2;
179 
180 	/* Ack all interrups */
181 	cause1 = pci_dw_dbi_rd4(sc->dev, MV_INT_CAUSE1);
182 	cause2 = pci_dw_dbi_rd4(sc->dev, MV_INT_CAUSE2);
183 
184 	pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE1, cause1);
185 	pci_dw_dbi_wr4(sc->dev, MV_INT_CAUSE2, cause2);
186 	return (FILTER_HANDLED);
187 }
188 
189 static int
190 pci_mv_get_link(device_t dev, bool *status)
191 {
192 	uint32_t reg;
193 
194 	reg = pci_dw_dbi_rd4(dev, MV_GLOBAL_STATUS_REG);
195 	if ((reg & (MV_STATUS_RDLH_LINK_UP | MV_STATUS_PHY_LINK_UP)) ==
196 	    (MV_STATUS_RDLH_LINK_UP | MV_STATUS_PHY_LINK_UP))
197 		*status = true;
198 	else
199 		*status = false;
200 
201 	return (0);
202 }
203 
204 static int
205 pci_mv_probe(device_t dev)
206 {
207 
208 	if (!ofw_bus_status_okay(dev))
209 		return (ENXIO);
210 
211 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
212 		return (ENXIO);
213 
214 	device_set_desc(dev, "Marvell Armada8K PCI-E Controller");
215 	return (BUS_PROBE_DEFAULT);
216 }
217 
218 static int
219 pci_mv_attach(device_t dev)
220 {
221 	struct resource_map_request req;
222 	struct resource_map map;
223 	struct pci_mv_softc *sc;
224 	phandle_t node;
225 	int rv;
226 	int rid;
227 
228 	sc = device_get_softc(dev);
229 	node = ofw_bus_get_node(dev);
230 	sc->dev = dev;
231 	sc->node = node;
232 
233 	rid = 0;
234 	sc->dw_sc.dbi_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
235 	    RF_ACTIVE | RF_UNMAPPED);
236 	if (sc->dw_sc.dbi_res == NULL) {
237 		device_printf(dev, "Cannot allocate DBI memory\n");
238 		rv = ENXIO;
239 		goto out;
240 	}
241 
242 	resource_init_map_request(&req);
243 	req.memattr = VM_MEMATTR_DEVICE_NP;
244 	rv = bus_map_resource(dev, SYS_RES_MEMORY, sc->dw_sc.dbi_res, &req,
245 	    &map);
246 	if (rv != 0) {
247 		device_printf(dev, "could not map memory.\n");
248 		return (rv);
249 	}
250 	rman_set_mapping(sc->dw_sc.dbi_res, &map);
251 
252 	/* PCI interrupt */
253 	rid = 0;
254 	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
255 	    RF_ACTIVE | RF_SHAREABLE);
256 	if (sc->irq_res == NULL) {
257 		device_printf(dev, "Cannot allocate IRQ resources\n");
258 		rv = ENXIO;
259 		goto out;
260 	}
261 
262 	/* Clocks */
263 	rv = clk_get_by_ofw_name(sc->dev, 0, "core", &sc->clk_core);
264 	if (rv != 0) {
265 		device_printf(sc->dev, "Cannot get 'core' clock\n");
266 		rv = ENXIO;
267 		goto out;
268 	}
269 
270 	rv = clk_get_by_ofw_name(sc->dev, 0, "reg", &sc->clk_reg);
271 	if (rv != 0) {
272 		device_printf(sc->dev, "Cannot get 'reg' clock\n");
273 		rv = ENXIO;
274 		goto out;
275 	}
276 
277 	rv = clk_enable(sc->clk_core);
278 	if (rv != 0) {
279 		device_printf(sc->dev, "Cannot enable 'core' clock\n");
280 		rv = ENXIO;
281 		goto out;
282 	}
283 
284 	rv = clk_enable(sc->clk_reg);
285 	if (rv != 0) {
286 		device_printf(sc->dev, "Cannot enable 'reg' clock\n");
287 		rv = ENXIO;
288 		goto out;
289 	}
290 
291 	rv = pci_mv_phy_init(sc);
292 	if (rv)
293 		goto out;
294 
295 	rv = pci_dw_init(dev);
296 	if (rv != 0)
297 		goto out;
298 
299 	pci_mv_init(sc);
300 
301 	/* Setup interrupt  */
302 	if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
303 		    pci_mv_intr, NULL, sc, &sc->intr_cookie)) {
304 		device_printf(dev, "cannot setup interrupt handler\n");
305 		rv = ENXIO;
306 		goto out;
307 	}
308 
309 	return (bus_generic_attach(dev));
310 out:
311 	/* XXX Cleanup */
312 	return (rv);
313 }
314 
315 static device_method_t pci_mv_methods[] = {
316 	/* Device interface */
317 	DEVMETHOD(device_probe,			pci_mv_probe),
318 	DEVMETHOD(device_attach,		pci_mv_attach),
319 
320 	DEVMETHOD(pci_dw_get_link,		pci_mv_get_link),
321 
322 	DEVMETHOD_END
323 };
324 
325 DEFINE_CLASS_1(pcib, pci_mv_driver, pci_mv_methods,
326     sizeof(struct pci_mv_softc), pci_dw_driver);
327 DRIVER_MODULE( pci_mv, simplebus, pci_mv_driver, NULL, NULL);
328