1 /* $OpenBSD: if_dwge.c,v 1.22 2024/02/08 20:50:34 kettenis Exp $ */
2 /*
3 * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
4 * Copyright (c) 2017 Patrick Wildt <patrick@blueri.se>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 /*
20 * Driver for the Synopsys Designware ethernet controller.
21 */
22
23 #include "bpfilter.h"
24 #include "kstat.h"
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/device.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/mbuf.h>
32 #include <sys/queue.h>
33 #include <sys/socket.h>
34 #include <sys/sockio.h>
35 #include <sys/timeout.h>
36
37 #include <machine/bus.h>
38 #include <machine/fdt.h>
39
40 #include <net/if.h>
41 #include <net/if_media.h>
42
43 #include <dev/ofw/openfirm.h>
44 #include <dev/ofw/ofw_clock.h>
45 #include <dev/ofw/ofw_gpio.h>
46 #include <dev/ofw/ofw_misc.h>
47 #include <dev/ofw/ofw_pinctrl.h>
48 #include <dev/ofw/ofw_regulator.h>
49 #include <dev/ofw/fdt.h>
50
51 #include <dev/mii/mii.h>
52 #include <dev/mii/miivar.h>
53
54 #if NBPFILTER > 0
55 #include <net/bpf.h>
56 #endif
57
58 #if NKSTAT > 0
59 #include <sys/kstat.h>
60 #endif
61
62 #include <netinet/in.h>
63 #include <netinet/if_ether.h>
64
65 /* Registers */
66
67 #define GMAC_MAC_CONF 0x0000
68 #define GMAC_MAC_CONF_JD (1 << 22)
69 #define GMAC_MAC_CONF_BE (1 << 21)
70 #define GMAC_MAC_CONF_DCRS (1 << 16)
71 #define GMAC_MAC_CONF_PS (1 << 15)
72 #define GMAC_MAC_CONF_FES (1 << 14)
73 #define GMAC_MAC_CONF_LM (1 << 12)
74 #define GMAC_MAC_CONF_DM (1 << 11)
75 #define GMAC_MAC_CONF_TE (1 << 3)
76 #define GMAC_MAC_CONF_RE (1 << 2)
77 #define GMAC_MAC_FRM_FILT 0x0004
78 #define GMAC_MAC_FRM_FILT_PM (1 << 4)
79 #define GMAC_MAC_FRM_FILT_HMC (1 << 2)
80 #define GMAC_MAC_FRM_FILT_PR (1 << 0)
81 #define GMAC_HASH_TAB_HI 0x0008
82 #define GMAC_HASH_TAB_LO 0x000c
83 #define GMAC_GMII_ADDR 0x0010
84 #define GMAC_GMII_ADDR_PA_SHIFT 11
85 #define GMAC_GMII_ADDR_GR_SHIFT 6
86 #define GMAC_GMII_ADDR_CR_SHIFT 2
87 #define GMAC_GMII_ADDR_CR_MASK 0xf
88 #define GMAC_GMII_ADDR_CR_DIV_42 0
89 #define GMAC_GMII_ADDR_CR_DIV_62 1
90 #define GMAC_GMII_ADDR_CR_DIV_16 2
91 #define GMAC_GMII_ADDR_CR_DIV_26 3
92 #define GMAC_GMII_ADDR_CR_DIV_102 4
93 #define GMAC_GMII_ADDR_CR_DIV_124 5
94 #define GMAC_GMII_ADDR_GW (1 << 1)
95 #define GMAC_GMII_ADDR_GB (1 << 0)
96 #define GMAC_GMII_DATA 0x0014
97 #define GMAC_VERSION 0x0020
98 #define GMAC_VERSION_SNPS_MASK 0xff
99 #define GMAC_INT_MASK 0x003c
100 #define GMAC_INT_MASK_LPIIM (1 << 10)
101 #define GMAC_INT_MASK_PIM (1 << 3)
102 #define GMAC_INT_MASK_RIM (1 << 0)
103 #define GMAC_MAC_ADDR0_HI 0x0040
104 #define GMAC_MAC_ADDR0_LO 0x0044
105 #define GMAC_MAC_MMC_CTRL 0x0100
106 #define GMAC_MAC_MMC_CTRL_ROR (1 << 2)
107 #define GMAC_MAC_MMC_CTRL_CR (1 << 0)
108 #define GMAC_MMC_RX_INT_MSK 0x010c
109 #define GMAC_MMC_TX_INT_MSK 0x0110
110 #define GMAC_MMC_TXOCTETCNT_GB 0x0114
111 #define GMAC_MMC_TXFRMCNT_GB 0x0118
112 #define GMAC_MMC_TXUNDFLWERR 0x0148
113 #define GMAC_MMC_TXCARERR 0x0160
114 #define GMAC_MMC_TXOCTETCNT_G 0x0164
115 #define GMAC_MMC_TXFRMCNT_G 0x0168
116 #define GMAC_MMC_RXFRMCNT_GB 0x0180
117 #define GMAC_MMC_RXOCTETCNT_GB 0x0184
118 #define GMAC_MMC_RXOCTETCNT_G 0x0188
119 #define GMAC_MMC_RXMCFRMCNT_G 0x0190
120 #define GMAC_MMC_RXCRCERR 0x0194
121 #define GMAC_MMC_RXLENERR 0x01c8
122 #define GMAC_MMC_RXFIFOOVRFLW 0x01d4
123 #define GMAC_MMC_IPC_INT_MSK 0x0200
124 #define GMAC_BUS_MODE 0x1000
125 #define GMAC_BUS_MODE_8XPBL (1 << 24)
126 #define GMAC_BUS_MODE_USP (1 << 23)
127 #define GMAC_BUS_MODE_RPBL_MASK (0x3f << 17)
128 #define GMAC_BUS_MODE_RPBL_SHIFT 17
129 #define GMAC_BUS_MODE_FB (1 << 16)
130 #define GMAC_BUS_MODE_PBL_MASK (0x3f << 8)
131 #define GMAC_BUS_MODE_PBL_SHIFT 8
132 #define GMAC_BUS_MODE_SWR (1 << 0)
133 #define GMAC_TX_POLL_DEMAND 0x1004
134 #define GMAC_RX_DESC_LIST_ADDR 0x100c
135 #define GMAC_TX_DESC_LIST_ADDR 0x1010
136 #define GMAC_STATUS 0x1014
137 #define GMAC_STATUS_MMC (1 << 27)
138 #define GMAC_STATUS_RI (1 << 6)
139 #define GMAC_STATUS_TU (1 << 2)
140 #define GMAC_STATUS_TI (1 << 0)
141 #define GMAC_OP_MODE 0x1018
142 #define GMAC_OP_MODE_RSF (1 << 25)
143 #define GMAC_OP_MODE_TSF (1 << 21)
144 #define GMAC_OP_MODE_FTF (1 << 20)
145 #define GMAC_OP_MODE_TTC_MASK (0x7 << 14)
146 #define GMAC_OP_MODE_TTC_64 (0x0 << 14)
147 #define GMAC_OP_MODE_TTC_128 (0x1 << 14)
148 #define GMAC_OP_MODE_ST (1 << 13)
149 #define GMAC_OP_MODE_RTC_MASK (0x3 << 3)
150 #define GMAC_OP_MODE_RTC_64 (0x0 << 3)
151 #define GMAC_OP_MODE_RTC_128 (0x3 << 3)
152 #define GMAC_OP_MODE_OSF (1 << 2)
153 #define GMAC_OP_MODE_SR (1 << 1)
154 #define GMAC_INT_ENA 0x101c
155 #define GMAC_INT_ENA_NIE (1 << 16)
156 #define GMAC_INT_ENA_RIE (1 << 6)
157 #define GMAC_INT_ENA_TUE (1 << 2)
158 #define GMAC_INT_ENA_TIE (1 << 0)
159 #define GMAC_AXI_BUS_MODE 0x1028
160 #define GMAC_AXI_BUS_MODE_WR_OSR_LMT_MASK (0xf << 20)
161 #define GMAC_AXI_BUS_MODE_WR_OSR_LMT_SHIFT 20
162 #define GMAC_AXI_BUS_MODE_RD_OSR_LMT_MASK (0xf << 16)
163 #define GMAC_AXI_BUS_MODE_RD_OSR_LMT_SHIFT 16
164 #define GMAC_AXI_BUS_MODE_BLEN_256 (1 << 7)
165 #define GMAC_AXI_BUS_MODE_BLEN_128 (1 << 6)
166 #define GMAC_AXI_BUS_MODE_BLEN_64 (1 << 5)
167 #define GMAC_AXI_BUS_MODE_BLEN_32 (1 << 4)
168 #define GMAC_AXI_BUS_MODE_BLEN_16 (1 << 3)
169 #define GMAC_AXI_BUS_MODE_BLEN_8 (1 << 2)
170 #define GMAC_AXI_BUS_MODE_BLEN_4 (1 << 1)
171 #define GMAC_HW_FEATURE 0x1058
172 #define GMAC_HW_FEATURE_ENHDESSEL (1 << 24)
173
174 /*
175 * DWGE descriptors.
176 */
177
178 struct dwge_desc {
179 uint32_t sd_status;
180 uint32_t sd_len;
181 uint32_t sd_addr;
182 uint32_t sd_next;
183 };
184
185 /* Tx status bits. */
186 #define TDES0_DB (1 << 0)
187 #define TDES0_UF (1 << 1)
188 #define TDES0_ED (1 << 2)
189 #define TDES0_CC_MASK (0xf << 3)
190 #define TDES0_CC_SHIFT 3
191 #define TDES0_EC (1 << 8)
192 #define TDES0_LC (1 << 9)
193 #define TDES0_NC (1 << 10)
194 #define TDES0_PCE (1 << 12)
195 #define TDES0_JT (1 << 14)
196 #define TDES0_IHE (1 << 16)
197 #define TDES0_OWN (1U << 31)
198
199 #define ETDES0_TCH (1 << 20)
200 #define ETDES0_FS (1 << 28)
201 #define ETDES0_LS (1 << 29)
202 #define ETDES0_IC (1 << 30)
203
204 /* Rx status bits */
205 #define RDES0_PE (1 << 0)
206 #define RDES0_CE (1 << 1)
207 #define RDES0_RE (1 << 3)
208 #define RDES0_RWT (1 << 4)
209 #define RDES0_FT (1 << 5)
210 #define RDES0_LC (1 << 6)
211 #define RDES0_IPC (1 << 7)
212 #define RDES0_LS (1 << 8)
213 #define RDES0_FS (1 << 9)
214 #define RDES0_OE (1 << 11)
215 #define RDES0_SAF (1 << 13)
216 #define RDES0_DE (1 << 14)
217 #define RDES0_ES (1 << 15)
218 #define RDES0_FL_MASK 0x3fff
219 #define RDES0_FL_SHIFT 16
220 #define RDES0_AFM (1 << 30)
221 #define RDES0_OWN (1U << 31)
222
223 /* Tx size bits */
224 #define TDES1_TBS1 (0xfff << 0)
225 #define TDES1_TCH (1 << 24)
226 #define TDES1_DC (1 << 26)
227 #define TDES1_CIC_MASK (0x3 << 27)
228 #define TDES1_CIC_IP (1 << 27)
229 #define TDES1_CIC_NO_PSE (2 << 27)
230 #define TDES1_CIC_FULL (3 << 27)
231 #define TDES1_FS (1 << 29)
232 #define TDES1_LS (1 << 30)
233 #define TDES1_IC (1U << 31)
234
235 /* Rx size bits */
236 #define RDES1_RBS1 (0xfff << 0)
237 #define RDES1_RCH (1 << 24)
238 #define RDES1_DIC (1U << 31)
239
240 #define ERDES1_RCH (1 << 14)
241
242 struct dwge_buf {
243 bus_dmamap_t tb_map;
244 struct mbuf *tb_m;
245 };
246
247 #define DWGE_NTXDESC 512
248 #define DWGE_NTXSEGS 16
249
250 #define DWGE_NRXDESC 512
251
252 struct dwge_dmamem {
253 bus_dmamap_t tdm_map;
254 bus_dma_segment_t tdm_seg;
255 size_t tdm_size;
256 caddr_t tdm_kva;
257 };
258 #define DWGE_DMA_MAP(_tdm) ((_tdm)->tdm_map)
259 #define DWGE_DMA_LEN(_tdm) ((_tdm)->tdm_size)
260 #define DWGE_DMA_DVA(_tdm) ((_tdm)->tdm_map->dm_segs[0].ds_addr)
261 #define DWGE_DMA_KVA(_tdm) ((void *)(_tdm)->tdm_kva)
262
263 struct dwge_softc {
264 struct device sc_dev;
265 int sc_node;
266 bus_space_tag_t sc_iot;
267 bus_space_handle_t sc_ioh;
268 bus_dma_tag_t sc_dmat;
269 void *sc_ih;
270
271 struct if_device sc_ifd;
272
273 struct arpcom sc_ac;
274 #define sc_lladdr sc_ac.ac_enaddr
275 struct mii_data sc_mii;
276 #define sc_media sc_mii.mii_media
277 uint64_t sc_fixed_media;
278 int sc_link;
279 int sc_phyloc;
280 int sc_force_thresh_dma_mode;
281 int sc_enh_desc;
282 int sc_defrag;
283
284 struct dwge_dmamem *sc_txring;
285 struct dwge_buf *sc_txbuf;
286 struct dwge_desc *sc_txdesc;
287 int sc_tx_prod;
288 int sc_tx_cons;
289
290 struct dwge_dmamem *sc_rxring;
291 struct dwge_buf *sc_rxbuf;
292 struct dwge_desc *sc_rxdesc;
293 int sc_rx_prod;
294 struct if_rxring sc_rx_ring;
295 int sc_rx_cons;
296
297 struct timeout sc_tick;
298 struct timeout sc_rxto;
299
300 uint32_t sc_clk;
301
302 bus_size_t sc_clk_sel;
303 uint32_t sc_clk_sel_125;
304 uint32_t sc_clk_sel_25;
305 uint32_t sc_clk_sel_2_5;
306
307 #if NKSTAT > 0
308 struct mutex sc_kstat_mtx;
309 struct kstat *sc_kstat;
310 #endif
311 };
312
313 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
314
315 int dwge_match(struct device *, void *, void *);
316 void dwge_attach(struct device *, struct device *, void *);
317 void dwge_setup_allwinner(struct dwge_softc *);
318 void dwge_setup_rockchip(struct dwge_softc *);
319
320 const struct cfattach dwge_ca = {
321 sizeof(struct dwge_softc), dwge_match, dwge_attach
322 };
323
324 struct cfdriver dwge_cd = {
325 NULL, "dwge", DV_IFNET
326 };
327
328 void dwge_reset_phy(struct dwge_softc *);
329
330 uint32_t dwge_read(struct dwge_softc *, bus_addr_t);
331 void dwge_write(struct dwge_softc *, bus_addr_t, uint32_t);
332
333 int dwge_ioctl(struct ifnet *, u_long, caddr_t);
334 void dwge_start(struct ifqueue *);
335 void dwge_watchdog(struct ifnet *);
336
337 int dwge_media_change(struct ifnet *);
338 void dwge_media_status(struct ifnet *, struct ifmediareq *);
339
340 int dwge_mii_readreg(struct device *, int, int);
341 void dwge_mii_writereg(struct device *, int, int, int);
342 void dwge_mii_statchg(struct device *);
343
344 void dwge_lladdr_read(struct dwge_softc *, uint8_t *);
345 void dwge_lladdr_write(struct dwge_softc *);
346
347 void dwge_tick(void *);
348 void dwge_rxtick(void *);
349
350 int dwge_intr(void *);
351 void dwge_tx_proc(struct dwge_softc *);
352 void dwge_rx_proc(struct dwge_softc *);
353
354 void dwge_up(struct dwge_softc *);
355 void dwge_down(struct dwge_softc *);
356 void dwge_iff(struct dwge_softc *);
357 int dwge_encap(struct dwge_softc *, struct mbuf *, int *, int *);
358
359 void dwge_reset(struct dwge_softc *);
360 void dwge_stop_dma(struct dwge_softc *);
361
362 struct dwge_dmamem *
363 dwge_dmamem_alloc(struct dwge_softc *, bus_size_t, bus_size_t);
364 void dwge_dmamem_free(struct dwge_softc *, struct dwge_dmamem *);
365 struct mbuf *dwge_alloc_mbuf(struct dwge_softc *, bus_dmamap_t);
366 void dwge_fill_rx_ring(struct dwge_softc *);
367
368 #if NKSTAT > 0
369 int dwge_kstat_read(struct kstat *);
370 void dwge_kstat_attach(struct dwge_softc *);
371 #endif
372
373 int
dwge_match(struct device * parent,void * cfdata,void * aux)374 dwge_match(struct device *parent, void *cfdata, void *aux)
375 {
376 struct fdt_attach_args *faa = aux;
377
378 return (OF_is_compatible(faa->fa_node, "allwinner,sun7i-a20-gmac") ||
379 OF_is_compatible(faa->fa_node, "amlogic,meson-axg-dwmac") ||
380 OF_is_compatible(faa->fa_node, "amlogic,meson-g12a-dwmac") ||
381 OF_is_compatible(faa->fa_node, "rockchip,rk3288-gmac") ||
382 OF_is_compatible(faa->fa_node, "rockchip,rk3308-mac") ||
383 OF_is_compatible(faa->fa_node, "rockchip,rk3328-gmac") ||
384 OF_is_compatible(faa->fa_node, "rockchip,rk3399-gmac") ||
385 OF_is_compatible(faa->fa_node, "snps,dwmac"));
386 }
387
388 void
dwge_attach(struct device * parent,struct device * self,void * aux)389 dwge_attach(struct device *parent, struct device *self, void *aux)
390 {
391 struct dwge_softc *sc = (void *)self;
392 struct fdt_attach_args *faa = aux;
393 struct ifnet *ifp = &sc->sc_ac.ac_if;
394 uint32_t phy, phy_supply;
395 uint32_t axi_config;
396 uint32_t mode, pbl;
397 uint32_t version;
398 uint32_t feature;
399 int node;
400
401 sc->sc_node = faa->fa_node;
402 sc->sc_iot = faa->fa_iot;
403 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
404 faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
405 printf("%s: cannot map registers\n", self->dv_xname);
406 return;
407 }
408 sc->sc_dmat = faa->fa_dmat;
409
410 /* Lookup PHY. */
411 phy = OF_getpropint(faa->fa_node, "phy", 0);
412 if (phy == 0)
413 phy = OF_getpropint(faa->fa_node, "phy-handle", 0);
414 node = OF_getnodebyphandle(phy);
415 if (node)
416 sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
417 else
418 sc->sc_phyloc = MII_PHY_ANY;
419
420 pinctrl_byname(faa->fa_node, "default");
421
422 /* Enable clocks. */
423 clock_set_assigned(faa->fa_node);
424 clock_enable(faa->fa_node, "stmmaceth");
425 reset_deassert(faa->fa_node, "stmmaceth");
426 if (OF_is_compatible(faa->fa_node, "rockchip,rk3288-gmac") ||
427 OF_is_compatible(faa->fa_node, "rockchip,rk3308-mac") ||
428 OF_is_compatible(faa->fa_node, "rockchip,rk3328-gmac") ||
429 OF_is_compatible(faa->fa_node, "rockchip,rk3399-gmac")) {
430 clock_enable(faa->fa_node, "mac_clk_rx");
431 clock_enable(faa->fa_node, "mac_clk_tx");
432 clock_enable(faa->fa_node, "aclk_mac");
433 clock_enable(faa->fa_node, "pclk_mac");
434 }
435 delay(5000);
436
437 version = dwge_read(sc, GMAC_VERSION);
438 printf(": rev 0x%02x", version & GMAC_VERSION_SNPS_MASK);
439
440 if ((version & GMAC_VERSION_SNPS_MASK) > 0x35) {
441 feature = dwge_read(sc, GMAC_HW_FEATURE);
442 if (feature & GMAC_HW_FEATURE_ENHDESSEL)
443 sc->sc_enh_desc = 1;
444 }
445
446 /*
447 * The GMAC on the StarFive JH7100 (core version 3.70)
448 * sometimes transmits corrupted packets. The exact
449 * conditions under which this happens are unclear, but
450 * defragmenting mbufs before transmitting them fixes the
451 * issue.
452 */
453 /* XXX drop "starfive,jh7100-gmac" in the future */
454 if (OF_is_compatible(faa->fa_node, "starfive,jh7100-gmac") ||
455 OF_is_compatible(faa->fa_node, "starfive,jh7100-dwmac"))
456 sc->sc_defrag = 1;
457
458 /* Power up PHY. */
459 phy_supply = OF_getpropint(faa->fa_node, "phy-supply", 0);
460 if (phy_supply)
461 regulator_enable(phy_supply);
462
463 /* Reset PHY */
464 dwge_reset_phy(sc);
465
466 node = OF_getnodebyname(faa->fa_node, "fixed-link");
467 if (node) {
468 ifp->if_baudrate = IF_Mbps(OF_getpropint(node, "speed", 0));
469
470 switch (OF_getpropint(node, "speed", 0)) {
471 case 1000:
472 sc->sc_fixed_media = IFM_ETHER | IFM_1000_T;
473 break;
474 case 100:
475 sc->sc_fixed_media = IFM_ETHER | IFM_100_TX;
476 break;
477 default:
478 sc->sc_fixed_media = IFM_ETHER | IFM_AUTO;
479 break;
480 }
481
482 if (OF_getpropbool(node, "full-duplex")) {
483 ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
484 sc->sc_fixed_media |= IFM_FDX;
485 } else {
486 ifp->if_link_state = LINK_STATE_UP;
487 }
488 }
489
490 sc->sc_clk = clock_get_frequency(faa->fa_node, "stmmaceth");
491 if (sc->sc_clk > 250000000)
492 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_124;
493 else if (sc->sc_clk > 150000000)
494 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_102;
495 else if (sc->sc_clk > 100000000)
496 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_62;
497 else if (sc->sc_clk > 60000000)
498 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_42;
499 else if (sc->sc_clk > 35000000)
500 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_26;
501 else
502 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_16;
503
504 if (OF_getprop(faa->fa_node, "local-mac-address",
505 &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
506 dwge_lladdr_read(sc, sc->sc_lladdr);
507 printf(", address %s\n", ether_sprintf(sc->sc_lladdr));
508
509 timeout_set(&sc->sc_tick, dwge_tick, sc);
510 timeout_set(&sc->sc_rxto, dwge_rxtick, sc);
511
512 ifp->if_softc = sc;
513 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
514 ifp->if_xflags = IFXF_MPSAFE;
515 ifp->if_ioctl = dwge_ioctl;
516 ifp->if_qstart = dwge_start;
517 ifp->if_watchdog = dwge_watchdog;
518 ifq_init_maxlen(&ifp->if_snd, DWGE_NTXDESC - 1);
519 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
520
521 ifp->if_capabilities = IFCAP_VLAN_MTU;
522
523 sc->sc_mii.mii_ifp = ifp;
524 sc->sc_mii.mii_readreg = dwge_mii_readreg;
525 sc->sc_mii.mii_writereg = dwge_mii_writereg;
526 sc->sc_mii.mii_statchg = dwge_mii_statchg;
527
528 ifmedia_init(&sc->sc_media, 0, dwge_media_change, dwge_media_status);
529
530 /* Do hardware specific initializations. */
531 if (OF_is_compatible(faa->fa_node, "allwinner,sun7i-a20-gmac"))
532 dwge_setup_allwinner(sc);
533 if (OF_is_compatible(faa->fa_node, "rockchip,rk3288-gmac") ||
534 OF_is_compatible(faa->fa_node, "rockchip,rk3308-mac") ||
535 OF_is_compatible(faa->fa_node, "rockchip,rk3328-gmac") ||
536 OF_is_compatible(faa->fa_node, "rockchip,rk3399-gmac"))
537 dwge_setup_rockchip(sc);
538
539 if (OF_getpropbool(faa->fa_node, "snps,force_thresh_dma_mode"))
540 sc->sc_force_thresh_dma_mode = 1;
541
542 dwge_reset(sc);
543
544 /* Configure MAC. */
545 dwge_write(sc, GMAC_MAC_CONF, dwge_read(sc, GMAC_MAC_CONF) |
546 GMAC_MAC_CONF_JD | GMAC_MAC_CONF_BE | GMAC_MAC_CONF_DCRS);
547
548 /* Configure DMA engine. */
549 mode = dwge_read(sc, GMAC_BUS_MODE);
550 mode |= GMAC_BUS_MODE_USP;
551 if (!OF_getpropbool(faa->fa_node, "snps,no-pbl-x8"))
552 mode |= GMAC_BUS_MODE_8XPBL;
553 mode &= ~(GMAC_BUS_MODE_RPBL_MASK | GMAC_BUS_MODE_PBL_MASK);
554 pbl = OF_getpropint(faa->fa_node, "snps,pbl", 8);
555 mode |= pbl << GMAC_BUS_MODE_RPBL_SHIFT;
556 mode |= pbl << GMAC_BUS_MODE_PBL_SHIFT;
557 if (OF_getpropbool(faa->fa_node, "snps,fixed-burst"))
558 mode |= GMAC_BUS_MODE_FB;
559 dwge_write(sc, GMAC_BUS_MODE, mode);
560
561 /* Configure AXI master. */
562 axi_config = OF_getpropint(faa->fa_node, "snps,axi-config", 0);
563 node = OF_getnodebyphandle(axi_config);
564 if (node) {
565 uint32_t blen[7] = { 0 };
566 uint32_t osr_lmt;
567 int i;
568
569 mode = dwge_read(sc, GMAC_AXI_BUS_MODE);
570
571 osr_lmt = OF_getpropint(node, "snps,wr_osr_lmt", 1);
572 mode &= ~GMAC_AXI_BUS_MODE_WR_OSR_LMT_MASK;
573 mode |= (osr_lmt << GMAC_AXI_BUS_MODE_WR_OSR_LMT_SHIFT);
574 osr_lmt = OF_getpropint(node, "snps,rd_osr_lmt", 1);
575 mode &= ~GMAC_AXI_BUS_MODE_RD_OSR_LMT_MASK;
576 mode |= (osr_lmt << GMAC_AXI_BUS_MODE_RD_OSR_LMT_SHIFT);
577
578 OF_getpropintarray(node, "snps,blen", blen, sizeof(blen));
579 for (i = 0; i < nitems(blen); i++) {
580 switch (blen[i]) {
581 case 256:
582 mode |= GMAC_AXI_BUS_MODE_BLEN_256;
583 break;
584 case 128:
585 mode |= GMAC_AXI_BUS_MODE_BLEN_128;
586 break;
587 case 64:
588 mode |= GMAC_AXI_BUS_MODE_BLEN_64;
589 break;
590 case 32:
591 mode |= GMAC_AXI_BUS_MODE_BLEN_32;
592 break;
593 case 16:
594 mode |= GMAC_AXI_BUS_MODE_BLEN_16;
595 break;
596 case 8:
597 mode |= GMAC_AXI_BUS_MODE_BLEN_8;
598 break;
599 case 4:
600 mode |= GMAC_AXI_BUS_MODE_BLEN_4;
601 break;
602 }
603 }
604
605 dwge_write(sc, GMAC_AXI_BUS_MODE, mode);
606 }
607
608 if (sc->sc_fixed_media == 0) {
609 mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
610 (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY, 0);
611 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
612 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
613 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0,
614 NULL);
615 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
616 } else
617 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
618 } else {
619 ifmedia_add(&sc->sc_media, sc->sc_fixed_media, 0, NULL);
620 ifmedia_set(&sc->sc_media, sc->sc_fixed_media);
621
622 /* force a configuration of the clocks/mac */
623 sc->sc_mii.mii_statchg(self);
624 }
625
626 if_attach(ifp);
627 ether_ifattach(ifp);
628 #if NKSTAT > 0
629 dwge_kstat_attach(sc);
630 #endif
631
632 /* Disable interrupts. */
633 dwge_write(sc, GMAC_INT_ENA, 0);
634 dwge_write(sc, GMAC_INT_MASK,
635 GMAC_INT_MASK_LPIIM | GMAC_INT_MASK_PIM | GMAC_INT_MASK_RIM);
636 dwge_write(sc, GMAC_MMC_IPC_INT_MSK, 0xffffffff);
637
638 sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE,
639 dwge_intr, sc, sc->sc_dev.dv_xname);
640 if (sc->sc_ih == NULL)
641 printf("%s: can't establish interrupt\n", sc->sc_dev.dv_xname);
642
643 sc->sc_ifd.if_node = faa->fa_node;
644 sc->sc_ifd.if_ifp = ifp;
645 if_register(&sc->sc_ifd);
646 }
647
648 void
dwge_reset_phy(struct dwge_softc * sc)649 dwge_reset_phy(struct dwge_softc *sc)
650 {
651 uint32_t *gpio;
652 uint32_t delays[3];
653 int active = 1;
654 int len;
655
656 len = OF_getproplen(sc->sc_node, "snps,reset-gpio");
657 if (len <= 0)
658 return;
659
660 gpio = malloc(len, M_TEMP, M_WAITOK);
661
662 /* Gather information. */
663 OF_getpropintarray(sc->sc_node, "snps,reset-gpio", gpio, len);
664 if (OF_getpropbool(sc->sc_node, "snps-reset-active-low"))
665 active = 0;
666 delays[0] = delays[1] = delays[2] = 0;
667 OF_getpropintarray(sc->sc_node, "snps,reset-delays-us", delays,
668 sizeof(delays));
669
670 /* Perform reset sequence. */
671 gpio_controller_config_pin(gpio, GPIO_CONFIG_OUTPUT);
672 gpio_controller_set_pin(gpio, !active);
673 delay(delays[0]);
674 gpio_controller_set_pin(gpio, active);
675 delay(delays[1]);
676 gpio_controller_set_pin(gpio, !active);
677 delay(delays[2]);
678
679 free(gpio, M_TEMP, len);
680 }
681
682 uint32_t
dwge_read(struct dwge_softc * sc,bus_addr_t addr)683 dwge_read(struct dwge_softc *sc, bus_addr_t addr)
684 {
685 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, addr);
686 }
687
688 void
dwge_write(struct dwge_softc * sc,bus_addr_t addr,uint32_t data)689 dwge_write(struct dwge_softc *sc, bus_addr_t addr, uint32_t data)
690 {
691 bus_space_write_4(sc->sc_iot, sc->sc_ioh, addr, data);
692 }
693
694 void
dwge_lladdr_read(struct dwge_softc * sc,uint8_t * lladdr)695 dwge_lladdr_read(struct dwge_softc *sc, uint8_t *lladdr)
696 {
697 uint32_t machi, maclo;
698
699 machi = dwge_read(sc, GMAC_MAC_ADDR0_HI);
700 maclo = dwge_read(sc, GMAC_MAC_ADDR0_LO);
701
702 lladdr[0] = (maclo >> 0) & 0xff;
703 lladdr[1] = (maclo >> 8) & 0xff;
704 lladdr[2] = (maclo >> 16) & 0xff;
705 lladdr[3] = (maclo >> 24) & 0xff;
706 lladdr[4] = (machi >> 0) & 0xff;
707 lladdr[5] = (machi >> 8) & 0xff;
708 }
709
710 void
dwge_lladdr_write(struct dwge_softc * sc)711 dwge_lladdr_write(struct dwge_softc *sc)
712 {
713 dwge_write(sc, GMAC_MAC_ADDR0_HI,
714 sc->sc_lladdr[5] << 8 | sc->sc_lladdr[4] << 0);
715 dwge_write(sc, GMAC_MAC_ADDR0_LO,
716 sc->sc_lladdr[3] << 24 | sc->sc_lladdr[2] << 16 |
717 sc->sc_lladdr[1] << 8 | sc->sc_lladdr[0] << 0);
718 }
719
720 void
dwge_start(struct ifqueue * ifq)721 dwge_start(struct ifqueue *ifq)
722 {
723 struct ifnet *ifp = ifq->ifq_if;
724 struct dwge_softc *sc = ifp->if_softc;
725 struct mbuf *m;
726 int error, idx, left, used;
727
728 if (!(ifp->if_flags & IFF_RUNNING))
729 return;
730 if (ifq_is_oactive(&ifp->if_snd))
731 return;
732 if (ifq_empty(&ifp->if_snd))
733 return;
734 if (!sc->sc_link)
735 return;
736
737 idx = sc->sc_tx_prod;
738 left = sc->sc_tx_cons;
739 if (left <= idx)
740 left += DWGE_NTXDESC;
741 left -= idx;
742 used = 0;
743
744 for (;;) {
745 if (used + DWGE_NTXSEGS + 1 > left) {
746 ifq_set_oactive(ifq);
747 break;
748 }
749
750 m = ifq_dequeue(ifq);
751 if (m == NULL)
752 break;
753
754 error = dwge_encap(sc, m, &idx, &used);
755 if (error == EFBIG) {
756 m_freem(m); /* give up: drop it */
757 ifp->if_oerrors++;
758 continue;
759 }
760
761 #if NBPFILTER > 0
762 if (ifp->if_bpf)
763 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
764 #endif
765 }
766
767 if (sc->sc_tx_prod != idx) {
768 sc->sc_tx_prod = idx;
769
770 /* Set a timeout in case the chip goes out to lunch. */
771 ifp->if_timer = 5;
772
773 dwge_write(sc, GMAC_TX_POLL_DEMAND, 0xffffffff);
774 }
775 }
776
777 int
dwge_ioctl(struct ifnet * ifp,u_long cmd,caddr_t addr)778 dwge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
779 {
780 struct dwge_softc *sc = ifp->if_softc;
781 struct ifreq *ifr = (struct ifreq *)addr;
782 int error = 0, s;
783
784 s = splnet();
785
786 switch (cmd) {
787 case SIOCSIFADDR:
788 ifp->if_flags |= IFF_UP;
789 /* FALLTHROUGH */
790 case SIOCSIFFLAGS:
791 if (ifp->if_flags & IFF_UP) {
792 if (ifp->if_flags & IFF_RUNNING)
793 error = ENETRESET;
794 else
795 dwge_up(sc);
796 } else {
797 if (ifp->if_flags & IFF_RUNNING)
798 dwge_down(sc);
799 }
800 break;
801
802 case SIOCGIFMEDIA:
803 case SIOCSIFMEDIA:
804 if (sc->sc_fixed_media != 0)
805 error = ENOTTY;
806 else
807 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
808 break;
809
810 case SIOCGIFRXR:
811 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
812 NULL, MCLBYTES, &sc->sc_rx_ring);
813 break;
814
815 default:
816 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
817 break;
818 }
819
820 if (error == ENETRESET) {
821 if (ifp->if_flags & IFF_RUNNING)
822 dwge_iff(sc);
823 error = 0;
824 }
825
826 splx(s);
827 return (error);
828 }
829
830 void
dwge_watchdog(struct ifnet * ifp)831 dwge_watchdog(struct ifnet *ifp)
832 {
833 printf("%s\n", __func__);
834 }
835
836 int
dwge_media_change(struct ifnet * ifp)837 dwge_media_change(struct ifnet *ifp)
838 {
839 struct dwge_softc *sc = ifp->if_softc;
840
841 if (LIST_FIRST(&sc->sc_mii.mii_phys))
842 mii_mediachg(&sc->sc_mii);
843
844 return (0);
845 }
846
847 void
dwge_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)848 dwge_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
849 {
850 struct dwge_softc *sc = ifp->if_softc;
851
852 if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
853 mii_pollstat(&sc->sc_mii);
854 ifmr->ifm_active = sc->sc_mii.mii_media_active;
855 ifmr->ifm_status = sc->sc_mii.mii_media_status;
856 }
857 }
858
859 int
dwge_mii_readreg(struct device * self,int phy,int reg)860 dwge_mii_readreg(struct device *self, int phy, int reg)
861 {
862 struct dwge_softc *sc = (void *)self;
863 int n;
864
865 dwge_write(sc, GMAC_GMII_ADDR,
866 sc->sc_clk << GMAC_GMII_ADDR_CR_SHIFT |
867 phy << GMAC_GMII_ADDR_PA_SHIFT |
868 reg << GMAC_GMII_ADDR_GR_SHIFT |
869 GMAC_GMII_ADDR_GB);
870 for (n = 0; n < 1000; n++) {
871 if ((dwge_read(sc, GMAC_GMII_ADDR) & GMAC_GMII_ADDR_GB) == 0)
872 return dwge_read(sc, GMAC_GMII_DATA);
873 delay(10);
874 }
875
876 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
877 return (0);
878 }
879
880 void
dwge_mii_writereg(struct device * self,int phy,int reg,int val)881 dwge_mii_writereg(struct device *self, int phy, int reg, int val)
882 {
883 struct dwge_softc *sc = (void *)self;
884 int n;
885
886 dwge_write(sc, GMAC_GMII_DATA, val);
887 dwge_write(sc, GMAC_GMII_ADDR,
888 sc->sc_clk << GMAC_GMII_ADDR_CR_SHIFT |
889 phy << GMAC_GMII_ADDR_PA_SHIFT |
890 reg << GMAC_GMII_ADDR_GR_SHIFT |
891 GMAC_GMII_ADDR_GW | GMAC_GMII_ADDR_GB);
892 for (n = 0; n < 1000; n++) {
893 if ((dwge_read(sc, GMAC_GMII_ADDR) & GMAC_GMII_ADDR_GB) == 0)
894 return;
895 delay(10);
896 }
897
898 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
899 }
900
901 void
dwge_mii_statchg(struct device * self)902 dwge_mii_statchg(struct device *self)
903 {
904 struct dwge_softc *sc = (void *)self;
905 uint32_t conf;
906 uint64_t media_active;
907
908 conf = dwge_read(sc, GMAC_MAC_CONF);
909 conf &= ~(GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES);
910
911 media_active = sc->sc_fixed_media;
912 if (media_active == 0)
913 media_active = sc->sc_mii.mii_media_active;
914
915 switch (IFM_SUBTYPE(media_active)) {
916 case IFM_1000_SX:
917 case IFM_1000_LX:
918 case IFM_1000_CX:
919 case IFM_1000_T:
920 sc->sc_link = 1;
921 break;
922 case IFM_100_TX:
923 conf |= GMAC_MAC_CONF_PS | GMAC_MAC_CONF_FES;
924 sc->sc_link = 1;
925 break;
926 case IFM_10_T:
927 conf |= GMAC_MAC_CONF_PS;
928 sc->sc_link = 1;
929 break;
930 default:
931 sc->sc_link = 0;
932 return;
933 }
934
935 if (sc->sc_link == 0)
936 return;
937
938 conf &= ~GMAC_MAC_CONF_DM;
939 if ((media_active & IFM_GMASK) == IFM_FDX)
940 conf |= GMAC_MAC_CONF_DM;
941
942 /* XXX: RX/TX flow control? */
943
944 dwge_write(sc, GMAC_MAC_CONF, conf);
945 }
946
947 void
dwge_tick(void * arg)948 dwge_tick(void *arg)
949 {
950 struct dwge_softc *sc = arg;
951 int s;
952
953 s = splnet();
954 mii_tick(&sc->sc_mii);
955 splx(s);
956
957 timeout_add_sec(&sc->sc_tick, 1);
958 }
959
960 void
dwge_rxtick(void * arg)961 dwge_rxtick(void *arg)
962 {
963 struct dwge_softc *sc = arg;
964 uint32_t mode;
965 int s;
966
967 s = splnet();
968
969 mode = dwge_read(sc, GMAC_OP_MODE);
970 dwge_write(sc, GMAC_OP_MODE, mode & ~GMAC_OP_MODE_SR);
971
972 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring),
973 0, DWGE_DMA_LEN(sc->sc_rxring),
974 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
975
976 dwge_write(sc, GMAC_RX_DESC_LIST_ADDR, 0);
977
978 sc->sc_rx_prod = sc->sc_rx_cons = 0;
979 dwge_fill_rx_ring(sc);
980
981 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring),
982 0, DWGE_DMA_LEN(sc->sc_rxring),
983 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
984
985 dwge_write(sc, GMAC_RX_DESC_LIST_ADDR, DWGE_DMA_DVA(sc->sc_rxring));
986 dwge_write(sc, GMAC_OP_MODE, mode);
987
988 splx(s);
989 }
990
991 int
dwge_intr(void * arg)992 dwge_intr(void *arg)
993 {
994 struct dwge_softc *sc = arg;
995 uint32_t reg;
996
997 reg = dwge_read(sc, GMAC_STATUS);
998 dwge_write(sc, GMAC_STATUS, reg);
999
1000 if (reg & GMAC_STATUS_RI)
1001 dwge_rx_proc(sc);
1002
1003 if (reg & GMAC_STATUS_TI ||
1004 reg & GMAC_STATUS_TU)
1005 dwge_tx_proc(sc);
1006
1007 #if NKSTAT > 0
1008 if (reg & GMAC_STATUS_MMC) {
1009 mtx_enter(&sc->sc_kstat_mtx);
1010 dwge_kstat_read(sc->sc_kstat);
1011 mtx_leave(&sc->sc_kstat_mtx);
1012 }
1013 #endif
1014
1015 return (1);
1016 }
1017
1018 void
dwge_tx_proc(struct dwge_softc * sc)1019 dwge_tx_proc(struct dwge_softc *sc)
1020 {
1021 struct ifnet *ifp = &sc->sc_ac.ac_if;
1022 struct dwge_desc *txd;
1023 struct dwge_buf *txb;
1024 int idx, txfree;
1025
1026 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring), 0,
1027 DWGE_DMA_LEN(sc->sc_txring),
1028 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1029
1030 txfree = 0;
1031 while (sc->sc_tx_cons != sc->sc_tx_prod) {
1032 idx = sc->sc_tx_cons;
1033 KASSERT(idx < DWGE_NTXDESC);
1034
1035 txd = &sc->sc_txdesc[idx];
1036 if (txd->sd_status & TDES0_OWN)
1037 break;
1038
1039 txb = &sc->sc_txbuf[idx];
1040 if (txb->tb_m) {
1041 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1042 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1043 bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1044
1045 m_freem(txb->tb_m);
1046 txb->tb_m = NULL;
1047 }
1048
1049 txfree++;
1050
1051 if (sc->sc_tx_cons == (DWGE_NTXDESC - 1))
1052 sc->sc_tx_cons = 0;
1053 else
1054 sc->sc_tx_cons++;
1055
1056 txd->sd_status = sc->sc_enh_desc ? ETDES0_TCH : 0;
1057 }
1058
1059 if (sc->sc_tx_cons == sc->sc_tx_prod)
1060 ifp->if_timer = 0;
1061
1062 if (txfree) {
1063 if (ifq_is_oactive(&ifp->if_snd))
1064 ifq_restart(&ifp->if_snd);
1065 }
1066 }
1067
1068 void
dwge_rx_proc(struct dwge_softc * sc)1069 dwge_rx_proc(struct dwge_softc *sc)
1070 {
1071 struct ifnet *ifp = &sc->sc_ac.ac_if;
1072 struct dwge_desc *rxd;
1073 struct dwge_buf *rxb;
1074 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1075 struct mbuf *m;
1076 int idx, len, cnt, put;
1077
1078 if ((ifp->if_flags & IFF_RUNNING) == 0)
1079 return;
1080
1081 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 0,
1082 DWGE_DMA_LEN(sc->sc_rxring),
1083 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1084
1085 cnt = if_rxr_inuse(&sc->sc_rx_ring);
1086 put = 0;
1087 while (put < cnt) {
1088 idx = sc->sc_rx_cons;
1089 KASSERT(idx < DWGE_NRXDESC);
1090
1091 rxd = &sc->sc_rxdesc[idx];
1092 if (rxd->sd_status & RDES0_OWN)
1093 break;
1094
1095 len = (rxd->sd_status >> RDES0_FL_SHIFT) & RDES0_FL_MASK;
1096 rxb = &sc->sc_rxbuf[idx];
1097 KASSERT(rxb->tb_m);
1098
1099 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1100 len, BUS_DMASYNC_POSTREAD);
1101 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1102
1103 m = rxb->tb_m;
1104 rxb->tb_m = NULL;
1105 if (rxd->sd_status & RDES0_ES) {
1106 ifp->if_ierrors++;
1107 m_freem(m);
1108 } else {
1109 /* Strip off CRC. */
1110 len -= ETHER_CRC_LEN;
1111 KASSERT(len > 0);
1112
1113 m->m_pkthdr.len = m->m_len = len;
1114
1115 ml_enqueue(&ml, m);
1116 }
1117
1118 put++;
1119 if (sc->sc_rx_cons == (DWGE_NRXDESC - 1))
1120 sc->sc_rx_cons = 0;
1121 else
1122 sc->sc_rx_cons++;
1123 }
1124
1125 if_rxr_put(&sc->sc_rx_ring, put);
1126 if (ifiq_input(&ifp->if_rcv, &ml))
1127 if_rxr_livelocked(&sc->sc_rx_ring);
1128
1129 dwge_fill_rx_ring(sc);
1130
1131 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring), 0,
1132 DWGE_DMA_LEN(sc->sc_rxring),
1133 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1134
1135 }
1136
1137 void
dwge_up(struct dwge_softc * sc)1138 dwge_up(struct dwge_softc *sc)
1139 {
1140 struct ifnet *ifp = &sc->sc_ac.ac_if;
1141 struct dwge_buf *txb, *rxb;
1142 uint32_t mode;
1143 int i;
1144
1145 /* Allocate Tx descriptor ring. */
1146 sc->sc_txring = dwge_dmamem_alloc(sc,
1147 DWGE_NTXDESC * sizeof(struct dwge_desc), 8);
1148 sc->sc_txdesc = DWGE_DMA_KVA(sc->sc_txring);
1149
1150 sc->sc_txbuf = malloc(sizeof(struct dwge_buf) * DWGE_NTXDESC,
1151 M_DEVBUF, M_WAITOK);
1152 for (i = 0; i < DWGE_NTXDESC; i++) {
1153 txb = &sc->sc_txbuf[i];
1154 bus_dmamap_create(sc->sc_dmat, MCLBYTES, DWGE_NTXSEGS,
1155 MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
1156 txb->tb_m = NULL;
1157
1158 sc->sc_txdesc[i].sd_next =
1159 DWGE_DMA_DVA(sc->sc_txring) +
1160 ((i+1) % DWGE_NTXDESC) * sizeof(struct dwge_desc);
1161 if (sc->sc_enh_desc)
1162 sc->sc_txdesc[i].sd_status = ETDES0_TCH;
1163 else
1164 sc->sc_txdesc[i].sd_len = TDES1_TCH;
1165 }
1166
1167 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring),
1168 0, DWGE_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE);
1169
1170 sc->sc_tx_prod = sc->sc_tx_cons = 0;
1171
1172 dwge_write(sc, GMAC_TX_DESC_LIST_ADDR, DWGE_DMA_DVA(sc->sc_txring));
1173
1174 /* Allocate descriptor ring. */
1175 sc->sc_rxring = dwge_dmamem_alloc(sc,
1176 DWGE_NRXDESC * sizeof(struct dwge_desc), 8);
1177 sc->sc_rxdesc = DWGE_DMA_KVA(sc->sc_rxring);
1178
1179 sc->sc_rxbuf = malloc(sizeof(struct dwge_buf) * DWGE_NRXDESC,
1180 M_DEVBUF, M_WAITOK);
1181
1182 for (i = 0; i < DWGE_NRXDESC; i++) {
1183 rxb = &sc->sc_rxbuf[i];
1184 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1185 MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
1186 rxb->tb_m = NULL;
1187
1188 sc->sc_rxdesc[i].sd_next =
1189 DWGE_DMA_DVA(sc->sc_rxring) +
1190 ((i+1) % DWGE_NRXDESC) * sizeof(struct dwge_desc);
1191 sc->sc_rxdesc[i].sd_len =
1192 sc->sc_enh_desc ? ERDES1_RCH : RDES1_RCH;
1193 }
1194
1195 if_rxr_init(&sc->sc_rx_ring, 2, DWGE_NRXDESC);
1196
1197 sc->sc_rx_prod = sc->sc_rx_cons = 0;
1198 dwge_fill_rx_ring(sc);
1199
1200 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_rxring),
1201 0, DWGE_DMA_LEN(sc->sc_rxring),
1202 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1203
1204 dwge_write(sc, GMAC_RX_DESC_LIST_ADDR, DWGE_DMA_DVA(sc->sc_rxring));
1205
1206 dwge_lladdr_write(sc);
1207
1208 /* Configure media. */
1209 if (LIST_FIRST(&sc->sc_mii.mii_phys))
1210 mii_mediachg(&sc->sc_mii);
1211
1212 /* Program promiscuous mode and multicast filters. */
1213 dwge_iff(sc);
1214
1215 ifp->if_flags |= IFF_RUNNING;
1216 ifq_clr_oactive(&ifp->if_snd);
1217
1218 dwge_write(sc, GMAC_INT_ENA, GMAC_INT_ENA_NIE |
1219 GMAC_INT_ENA_RIE | GMAC_INT_ENA_TIE | GMAC_INT_ENA_TUE);
1220
1221 mode = dwge_read(sc, GMAC_OP_MODE);
1222 if (sc->sc_force_thresh_dma_mode) {
1223 mode &= ~(GMAC_OP_MODE_TSF | GMAC_OP_MODE_TTC_MASK);
1224 mode |= GMAC_OP_MODE_TTC_128;
1225 mode &= ~(GMAC_OP_MODE_RSF | GMAC_OP_MODE_RTC_MASK);
1226 mode |= GMAC_OP_MODE_RTC_128;
1227 } else {
1228 mode |= GMAC_OP_MODE_TSF | GMAC_OP_MODE_OSF;
1229 mode |= GMAC_OP_MODE_RSF;
1230 }
1231 dwge_write(sc, GMAC_OP_MODE, mode | GMAC_OP_MODE_ST | GMAC_OP_MODE_SR);
1232
1233 dwge_write(sc, GMAC_MAC_CONF, dwge_read(sc, GMAC_MAC_CONF) |
1234 GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE);
1235
1236 if (sc->sc_fixed_media == 0)
1237 timeout_add_sec(&sc->sc_tick, 1);
1238 }
1239
1240 void
dwge_down(struct dwge_softc * sc)1241 dwge_down(struct dwge_softc *sc)
1242 {
1243 struct ifnet *ifp = &sc->sc_ac.ac_if;
1244 struct dwge_buf *txb, *rxb;
1245 uint32_t dmactrl;
1246 int i;
1247
1248 timeout_del(&sc->sc_rxto);
1249 if (sc->sc_fixed_media == 0)
1250 timeout_del(&sc->sc_tick);
1251
1252 ifp->if_flags &= ~IFF_RUNNING;
1253 ifq_clr_oactive(&ifp->if_snd);
1254 ifp->if_timer = 0;
1255
1256 dwge_stop_dma(sc);
1257
1258 dwge_write(sc, GMAC_MAC_CONF, dwge_read(sc,
1259 GMAC_MAC_CONF) & ~(GMAC_MAC_CONF_TE | GMAC_MAC_CONF_RE));
1260
1261 dmactrl = dwge_read(sc, GMAC_OP_MODE);
1262 dmactrl &= ~(GMAC_OP_MODE_ST | GMAC_OP_MODE_SR);
1263 dwge_write(sc, GMAC_OP_MODE, dmactrl);
1264
1265 dwge_write(sc, GMAC_INT_ENA, 0);
1266
1267 intr_barrier(sc->sc_ih);
1268 ifq_barrier(&ifp->if_snd);
1269
1270 for (i = 0; i < DWGE_NTXDESC; i++) {
1271 txb = &sc->sc_txbuf[i];
1272 if (txb->tb_m) {
1273 bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1274 txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1275 bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1276 m_freem(txb->tb_m);
1277 }
1278 bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
1279 }
1280
1281 dwge_dmamem_free(sc, sc->sc_txring);
1282 free(sc->sc_txbuf, M_DEVBUF, 0);
1283
1284 for (i = 0; i < DWGE_NRXDESC; i++) {
1285 rxb = &sc->sc_rxbuf[i];
1286 if (rxb->tb_m) {
1287 bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1288 rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1289 bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1290 m_freem(rxb->tb_m);
1291 }
1292 bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
1293 }
1294
1295 dwge_dmamem_free(sc, sc->sc_rxring);
1296 free(sc->sc_rxbuf, M_DEVBUF, 0);
1297 }
1298
1299 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */
1300 static uint32_t
bitrev32(uint32_t x)1301 bitrev32(uint32_t x)
1302 {
1303 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1304 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1305 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1306 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1307
1308 return (x >> 16) | (x << 16);
1309 }
1310
1311 void
dwge_iff(struct dwge_softc * sc)1312 dwge_iff(struct dwge_softc *sc)
1313 {
1314 struct arpcom *ac = &sc->sc_ac;
1315 struct ifnet *ifp = &sc->sc_ac.ac_if;
1316 struct ether_multi *enm;
1317 struct ether_multistep step;
1318 uint32_t crc, hash[2], hashbit, hashreg;
1319 uint32_t reg;
1320
1321 reg = 0;
1322
1323 ifp->if_flags &= ~IFF_ALLMULTI;
1324 bzero(hash, sizeof(hash));
1325 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1326 ifp->if_flags |= IFF_ALLMULTI;
1327 reg |= GMAC_MAC_FRM_FILT_PM;
1328 if (ifp->if_flags & IFF_PROMISC)
1329 reg |= GMAC_MAC_FRM_FILT_PR;
1330 } else {
1331 reg |= GMAC_MAC_FRM_FILT_HMC;
1332 ETHER_FIRST_MULTI(step, ac, enm);
1333 while (enm != NULL) {
1334 crc = ether_crc32_le(enm->enm_addrlo,
1335 ETHER_ADDR_LEN) & 0x7f;
1336
1337 crc = bitrev32(~crc) >> 26;
1338 hashreg = (crc >> 5);
1339 hashbit = (crc & 0x1f);
1340 hash[hashreg] |= (1 << hashbit);
1341
1342 ETHER_NEXT_MULTI(step, enm);
1343 }
1344 }
1345
1346 dwge_lladdr_write(sc);
1347
1348 dwge_write(sc, GMAC_HASH_TAB_HI, hash[1]);
1349 dwge_write(sc, GMAC_HASH_TAB_LO, hash[0]);
1350
1351 dwge_write(sc, GMAC_MAC_FRM_FILT, reg);
1352 }
1353
1354 int
dwge_encap(struct dwge_softc * sc,struct mbuf * m,int * idx,int * used)1355 dwge_encap(struct dwge_softc *sc, struct mbuf *m, int *idx, int *used)
1356 {
1357 struct dwge_desc *txd, *txd_start;
1358 bus_dmamap_t map;
1359 int cur, frag, i;
1360
1361 cur = frag = *idx;
1362 map = sc->sc_txbuf[cur].tb_map;
1363
1364 if (sc->sc_defrag) {
1365 if (m_defrag(m, M_DONTWAIT))
1366 return (ENOBUFS);
1367 }
1368
1369 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
1370 if (m_defrag(m, M_DONTWAIT))
1371 return (EFBIG);
1372 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT))
1373 return (EFBIG);
1374 }
1375
1376 /* Sync the DMA map. */
1377 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1378 BUS_DMASYNC_PREWRITE);
1379
1380 txd = txd_start = &sc->sc_txdesc[frag];
1381 for (i = 0; i < map->dm_nsegs; i++) {
1382 txd->sd_addr = map->dm_segs[i].ds_addr;
1383 if (sc->sc_enh_desc) {
1384 txd->sd_status = ETDES0_TCH;
1385 txd->sd_len = map->dm_segs[i].ds_len;
1386 if (i == 0)
1387 txd->sd_status |= ETDES0_FS;
1388 if (i == (map->dm_nsegs - 1))
1389 txd->sd_status |= ETDES0_LS | ETDES0_IC;
1390 } else {
1391 txd->sd_status = 0;
1392 txd->sd_len = map->dm_segs[i].ds_len | TDES1_TCH;
1393 if (i == 0)
1394 txd->sd_len |= TDES1_FS;
1395 if (i == (map->dm_nsegs - 1))
1396 txd->sd_len |= TDES1_LS | TDES1_IC;
1397 }
1398 if (i != 0)
1399 txd->sd_status |= TDES0_OWN;
1400
1401 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring),
1402 frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1403
1404 cur = frag;
1405 if (frag == (DWGE_NTXDESC - 1)) {
1406 txd = &sc->sc_txdesc[0];
1407 frag = 0;
1408 } else {
1409 txd++;
1410 frag++;
1411 }
1412 KASSERT(frag != sc->sc_tx_cons);
1413 }
1414
1415 txd_start->sd_status |= TDES0_OWN;
1416 bus_dmamap_sync(sc->sc_dmat, DWGE_DMA_MAP(sc->sc_txring),
1417 *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
1418
1419 KASSERT(sc->sc_txbuf[cur].tb_m == NULL);
1420 sc->sc_txbuf[*idx].tb_map = sc->sc_txbuf[cur].tb_map;
1421 sc->sc_txbuf[cur].tb_map = map;
1422 sc->sc_txbuf[cur].tb_m = m;
1423
1424 *idx = frag;
1425 *used += map->dm_nsegs;
1426
1427 return (0);
1428 }
1429
1430 void
dwge_reset(struct dwge_softc * sc)1431 dwge_reset(struct dwge_softc *sc)
1432 {
1433 int n;
1434
1435 dwge_stop_dma(sc);
1436
1437 dwge_write(sc, GMAC_BUS_MODE, dwge_read(sc, GMAC_BUS_MODE) |
1438 GMAC_BUS_MODE_SWR);
1439
1440 for (n = 0; n < 30000; n++) {
1441 if ((dwge_read(sc, GMAC_BUS_MODE) &
1442 GMAC_BUS_MODE_SWR) == 0)
1443 return;
1444 delay(10);
1445 }
1446
1447 printf("%s: reset timeout\n", sc->sc_dev.dv_xname);
1448 }
1449
1450 void
dwge_stop_dma(struct dwge_softc * sc)1451 dwge_stop_dma(struct dwge_softc *sc)
1452 {
1453 uint32_t dmactrl;
1454
1455 /* Stop DMA. */
1456 dmactrl = dwge_read(sc, GMAC_OP_MODE);
1457 dmactrl &= ~GMAC_OP_MODE_ST;
1458 dmactrl |= GMAC_OP_MODE_FTF;
1459 dwge_write(sc, GMAC_OP_MODE, dmactrl);
1460 }
1461
1462 struct dwge_dmamem *
dwge_dmamem_alloc(struct dwge_softc * sc,bus_size_t size,bus_size_t align)1463 dwge_dmamem_alloc(struct dwge_softc *sc, bus_size_t size, bus_size_t align)
1464 {
1465 struct dwge_dmamem *tdm;
1466 int nsegs;
1467
1468 tdm = malloc(sizeof(*tdm), M_DEVBUF, M_WAITOK | M_ZERO);
1469 tdm->tdm_size = size;
1470
1471 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1472 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
1473 goto tdmfree;
1474
1475 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &tdm->tdm_seg, 1,
1476 &nsegs, BUS_DMA_WAITOK) != 0)
1477 goto destroy;
1478
1479 if (bus_dmamem_map(sc->sc_dmat, &tdm->tdm_seg, nsegs, size,
1480 &tdm->tdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1481 goto free;
1482
1483 if (bus_dmamap_load(sc->sc_dmat, tdm->tdm_map, tdm->tdm_kva, size,
1484 NULL, BUS_DMA_WAITOK) != 0)
1485 goto unmap;
1486
1487 bzero(tdm->tdm_kva, size);
1488
1489 return (tdm);
1490
1491 unmap:
1492 bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, size);
1493 free:
1494 bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1495 destroy:
1496 bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1497 tdmfree:
1498 free(tdm, M_DEVBUF, 0);
1499
1500 return (NULL);
1501 }
1502
1503 void
dwge_dmamem_free(struct dwge_softc * sc,struct dwge_dmamem * tdm)1504 dwge_dmamem_free(struct dwge_softc *sc, struct dwge_dmamem *tdm)
1505 {
1506 bus_dmamem_unmap(sc->sc_dmat, tdm->tdm_kva, tdm->tdm_size);
1507 bus_dmamem_free(sc->sc_dmat, &tdm->tdm_seg, 1);
1508 bus_dmamap_destroy(sc->sc_dmat, tdm->tdm_map);
1509 free(tdm, M_DEVBUF, 0);
1510 }
1511
1512 struct mbuf *
dwge_alloc_mbuf(struct dwge_softc * sc,bus_dmamap_t map)1513 dwge_alloc_mbuf(struct dwge_softc *sc, bus_dmamap_t map)
1514 {
1515 struct mbuf *m = NULL;
1516
1517 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1518 if (!m)
1519 return (NULL);
1520 m->m_len = m->m_pkthdr.len = MCLBYTES;
1521 m_adj(m, ETHER_ALIGN);
1522
1523 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1524 printf("%s: could not load mbuf DMA map", DEVNAME(sc));
1525 m_freem(m);
1526 return (NULL);
1527 }
1528
1529 bus_dmamap_sync(sc->sc_dmat, map, 0,
1530 m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1531
1532 return (m);
1533 }
1534
1535 void
dwge_fill_rx_ring(struct dwge_softc * sc)1536 dwge_fill_rx_ring(struct dwge_softc *sc)
1537 {
1538 struct dwge_desc *rxd;
1539 struct dwge_buf *rxb;
1540 u_int slots;
1541
1542 for (slots = if_rxr_get(&sc->sc_rx_ring, DWGE_NRXDESC);
1543 slots > 0; slots--) {
1544 rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
1545 rxb->tb_m = dwge_alloc_mbuf(sc, rxb->tb_map);
1546 if (rxb->tb_m == NULL)
1547 break;
1548
1549 rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
1550 rxd->sd_len = rxb->tb_map->dm_segs[0].ds_len;
1551 rxd->sd_len |= sc->sc_enh_desc ? ERDES1_RCH : RDES1_RCH;
1552 rxd->sd_addr = rxb->tb_map->dm_segs[0].ds_addr;
1553 rxd->sd_status = RDES0_OWN;
1554
1555 if (sc->sc_rx_prod == (DWGE_NRXDESC - 1))
1556 sc->sc_rx_prod = 0;
1557 else
1558 sc->sc_rx_prod++;
1559 }
1560 if_rxr_put(&sc->sc_rx_ring, slots);
1561
1562 if (if_rxr_inuse(&sc->sc_rx_ring) == 0)
1563 timeout_add(&sc->sc_rxto, 1);
1564 }
1565
1566 /*
1567 * Allwinner A20/A31.
1568 */
1569
1570 void
dwge_setup_allwinner(struct dwge_softc * sc)1571 dwge_setup_allwinner(struct dwge_softc *sc)
1572 {
1573 char phy_mode[8];
1574 uint32_t freq;
1575
1576 /* default to RGMII */
1577 OF_getprop(sc->sc_node, "phy-mode", phy_mode, sizeof(phy_mode));
1578 if (strcmp(phy_mode, "mii") == 0)
1579 freq = 25000000;
1580 else
1581 freq = 125000000;
1582 clock_set_frequency(sc->sc_node, "allwinner_gmac_tx", freq);
1583 }
1584
1585 /*
1586 * Rockchip RK3288/RK3399.
1587 */
1588
1589 /* RK3308 registers */
1590 #define RK3308_GRF_MAC_CON0 0x04a0
1591 #define RK3308_MAC_SPEED_100M ((0x1 << 0) << 16 | (0x1 << 0))
1592 #define RK3308_MAC_SPEED_10M ((0x1 << 0) << 16 | (0x0 << 0))
1593 #define RK3308_INTF_SEL_RMII ((0x1 << 4) << 16 | (0x1 << 4))
1594
1595 /* RK3288 registers */
1596 #define RK3288_GRF_SOC_CON1 0x0248
1597 #define RK3288_GMAC_PHY_INTF_SEL_RGMII ((0x7 << 6) << 16 | (0x1 << 6))
1598 #define RK3288_GMAC_PHY_INTF_SEL_RMII ((0x7 << 6) << 16 | (0x4 << 6))
1599 #define RK3288_RMII_MODE_RMII ((1 << 14) << 16 | (1 << 14))
1600 #define RK3288_RMII_MODE_MII ((1 << 14) << 16 | (0 << 14))
1601 #define RK3288_GMAC_CLK_SEL_125 ((0x3 << 12) << 16 | (0x0 << 12))
1602 #define RK3288_GMAC_CLK_SEL_25 ((0x3 << 12) << 16 | (0x3 << 12))
1603 #define RK3288_GMAC_CLK_SEL_2_5 ((0x3 << 12) << 16 | (0x2 << 12))
1604
1605 #define RK3288_GRF_SOC_CON3 0x0250
1606 #define RK3288_GMAC_RXCLK_DLY_ENA ((1 << 15) << 16 | (1 << 15))
1607 #define RK3288_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 7) << 16 | ((val) << 7))
1608 #define RK3288_GMAC_TXCLK_DLY_ENA ((1 << 14) << 16 | (1 << 14))
1609 #define RK3288_GMAC_CLK_TX_DL_CFG(val) ((0x7f << 0) << 16 | ((val) << 0))
1610
1611 /* RK3328 registers */
1612 #define RK3328_GRF_MAC_CON0 0x0900
1613 #define RK3328_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 7) << 16 | ((val) << 7))
1614 #define RK3328_GMAC_CLK_TX_DL_CFG(val) ((0x7f << 0) << 16 | ((val) << 0))
1615
1616 #define RK3328_GRF_MAC_CON1 0x0904
1617 #define RK3328_GMAC_PHY_INTF_SEL_RGMII ((0x7 << 4) << 16 | (0x1 << 4))
1618 #define RK3328_GMAC_PHY_INTF_SEL_RMII ((0x7 << 4) << 16 | (0x4 << 4))
1619 #define RK3328_RMII_MODE_RMII ((1 << 9) << 16 | (1 << 9))
1620 #define RK3328_RMII_MODE_MII ((1 << 9) << 16 | (0 << 9))
1621 #define RK3328_GMAC_CLK_SEL_125 ((0x3 << 11) << 16 | (0x0 << 11))
1622 #define RK3328_GMAC_CLK_SEL_25 ((0x3 << 11) << 16 | (0x3 << 11))
1623 #define RK3328_GMAC_CLK_SEL_2_5 ((0x3 << 11) << 16 | (0x2 << 11))
1624 #define RK3328_GMAC_RXCLK_DLY_ENA ((1 << 1) << 16 | (1 << 1))
1625 #define RK3328_GMAC_TXCLK_DLY_ENA ((1 << 0) << 16 | (1 << 0))
1626
1627 /* RK3399 registers */
1628 #define RK3399_GRF_SOC_CON5 0xc214
1629 #define RK3399_GMAC_PHY_INTF_SEL_RGMII ((0x7 << 9) << 16 | (0x1 << 9))
1630 #define RK3399_GMAC_PHY_INTF_SEL_RMII ((0x7 << 9) << 16 | (0x4 << 9))
1631 #define RK3399_RMII_MODE_RMII ((1 << 6) << 16 | (1 << 6))
1632 #define RK3399_RMII_MODE_MII ((1 << 6) << 16 | (0 << 6))
1633 #define RK3399_GMAC_CLK_SEL_125 ((0x3 << 4) << 16 | (0x0 << 4))
1634 #define RK3399_GMAC_CLK_SEL_25 ((0x3 << 4) << 16 | (0x3 << 4))
1635 #define RK3399_GMAC_CLK_SEL_2_5 ((0x3 << 4) << 16 | (0x2 << 4))
1636 #define RK3399_GRF_SOC_CON6 0xc218
1637 #define RK3399_GMAC_RXCLK_DLY_ENA ((1 << 15) << 16 | (1 << 15))
1638 #define RK3399_GMAC_CLK_RX_DL_CFG(val) ((0x7f << 8) << 16 | ((val) << 8))
1639 #define RK3399_GMAC_TXCLK_DLY_ENA ((1 << 7) << 16 | (1 << 7))
1640 #define RK3399_GMAC_CLK_TX_DL_CFG(val) ((0x7f << 0) << 16 | ((val) << 0))
1641
1642 void dwge_mii_statchg_rockchip(struct device *);
1643
1644 void
dwge_setup_rockchip(struct dwge_softc * sc)1645 dwge_setup_rockchip(struct dwge_softc *sc)
1646 {
1647 struct regmap *rm;
1648 uint32_t grf;
1649 int tx_delay, rx_delay;
1650 char clock_mode[8];
1651
1652 grf = OF_getpropint(sc->sc_node, "rockchip,grf", 0);
1653 rm = regmap_byphandle(grf);
1654 if (rm == NULL)
1655 return;
1656
1657 tx_delay = OF_getpropint(sc->sc_node, "tx_delay", 0x30);
1658 rx_delay = OF_getpropint(sc->sc_node, "rx_delay", 0x10);
1659
1660 if (OF_is_compatible(sc->sc_node, "rockchip,rk3288-gmac")) {
1661 /* Use RGMII interface. */
1662 regmap_write_4(rm, RK3288_GRF_SOC_CON1,
1663 RK3288_GMAC_PHY_INTF_SEL_RGMII | RK3288_RMII_MODE_MII);
1664
1665 /* Program clock delay lines. */
1666 regmap_write_4(rm, RK3288_GRF_SOC_CON3,
1667 RK3288_GMAC_TXCLK_DLY_ENA | RK3288_GMAC_RXCLK_DLY_ENA |
1668 RK3288_GMAC_CLK_TX_DL_CFG(tx_delay) |
1669 RK3288_GMAC_CLK_RX_DL_CFG(rx_delay));
1670
1671 /* Clock speed bits. */
1672 sc->sc_clk_sel = RK3288_GRF_SOC_CON1;
1673 sc->sc_clk_sel_2_5 = RK3288_GMAC_CLK_SEL_2_5;
1674 sc->sc_clk_sel_25 = RK3288_GMAC_CLK_SEL_25;
1675 sc->sc_clk_sel_125 = RK3288_GMAC_CLK_SEL_125;
1676 } else if (OF_is_compatible(sc->sc_node, "rockchip,rk3308-mac")) {
1677 /* Use RMII interface. */
1678 regmap_write_4(rm, RK3308_GRF_MAC_CON0,
1679 RK3308_INTF_SEL_RMII | RK3308_MAC_SPEED_100M);
1680
1681 /* Adjust MAC clock if necessary. */
1682 OF_getprop(sc->sc_node, "clock_in_out", clock_mode,
1683 sizeof(clock_mode));
1684 if (strcmp(clock_mode, "output") == 0) {
1685 clock_set_frequency(sc->sc_node, "stmmaceth",
1686 50000000);
1687 sc->sc_clk = GMAC_GMII_ADDR_CR_DIV_26;
1688 }
1689
1690 /* Clock speed bits. */
1691 sc->sc_clk_sel = RK3308_GRF_MAC_CON0;
1692 sc->sc_clk_sel_2_5 = RK3308_MAC_SPEED_10M;
1693 sc->sc_clk_sel_25 = RK3308_MAC_SPEED_100M;
1694 } else if (OF_is_compatible(sc->sc_node, "rockchip,rk3328-gmac")) {
1695 /* Use RGMII interface. */
1696 regmap_write_4(rm, RK3328_GRF_MAC_CON1,
1697 RK3328_GMAC_PHY_INTF_SEL_RGMII | RK3328_RMII_MODE_MII);
1698
1699 /* Program clock delay lines. */
1700 regmap_write_4(rm, RK3328_GRF_MAC_CON0,
1701 RK3328_GMAC_CLK_TX_DL_CFG(tx_delay) |
1702 RK3328_GMAC_CLK_RX_DL_CFG(rx_delay));
1703 regmap_write_4(rm, RK3328_GRF_MAC_CON1,
1704 RK3328_GMAC_TXCLK_DLY_ENA | RK3328_GMAC_RXCLK_DLY_ENA);
1705
1706 /* Clock speed bits. */
1707 sc->sc_clk_sel = RK3328_GRF_MAC_CON1;
1708 sc->sc_clk_sel_2_5 = RK3328_GMAC_CLK_SEL_2_5;
1709 sc->sc_clk_sel_25 = RK3328_GMAC_CLK_SEL_25;
1710 sc->sc_clk_sel_125 = RK3328_GMAC_CLK_SEL_125;
1711 } else {
1712 /* Use RGMII interface. */
1713 regmap_write_4(rm, RK3399_GRF_SOC_CON5,
1714 RK3399_GMAC_PHY_INTF_SEL_RGMII | RK3399_RMII_MODE_MII);
1715
1716 /* Program clock delay lines. */
1717 regmap_write_4(rm, RK3399_GRF_SOC_CON6,
1718 RK3399_GMAC_TXCLK_DLY_ENA | RK3399_GMAC_RXCLK_DLY_ENA |
1719 RK3399_GMAC_CLK_TX_DL_CFG(tx_delay) |
1720 RK3399_GMAC_CLK_RX_DL_CFG(rx_delay));
1721
1722 /* Clock speed bits. */
1723 sc->sc_clk_sel = RK3399_GRF_SOC_CON5;
1724 sc->sc_clk_sel_2_5 = RK3399_GMAC_CLK_SEL_2_5;
1725 sc->sc_clk_sel_25 = RK3399_GMAC_CLK_SEL_25;
1726 sc->sc_clk_sel_125 = RK3399_GMAC_CLK_SEL_125;
1727 }
1728
1729 sc->sc_mii.mii_statchg = dwge_mii_statchg_rockchip;
1730 }
1731
1732 void
dwge_mii_statchg_rockchip(struct device * self)1733 dwge_mii_statchg_rockchip(struct device *self)
1734 {
1735 struct dwge_softc *sc = (void *)self;
1736 struct regmap *rm;
1737 uint32_t grf;
1738 uint32_t gmac_clk_sel = 0;
1739 uint64_t media_active;
1740
1741 dwge_mii_statchg(self);
1742
1743 grf = OF_getpropint(sc->sc_node, "rockchip,grf", 0);
1744 rm = regmap_byphandle(grf);
1745 if (rm == NULL)
1746 return;
1747
1748 media_active = sc->sc_fixed_media;
1749 if (media_active == 0)
1750 media_active = sc->sc_mii.mii_media_active;
1751
1752 switch (IFM_SUBTYPE(media_active)) {
1753 case IFM_10_T:
1754 gmac_clk_sel = sc->sc_clk_sel_2_5;
1755 break;
1756 case IFM_100_TX:
1757 gmac_clk_sel = sc->sc_clk_sel_25;
1758 break;
1759 case IFM_1000_T:
1760 gmac_clk_sel = sc->sc_clk_sel_125;
1761 break;
1762 }
1763
1764 regmap_write_4(rm, sc->sc_clk_sel, gmac_clk_sel);
1765 }
1766
1767 #if NKSTAT > 0
1768
1769 struct dwge_counter {
1770 const char *c_name;
1771 enum kstat_kv_unit c_unit;
1772 uint32_t c_reg;
1773 };
1774
1775 const struct dwge_counter dwge_counters[] = {
1776 { "tx octets total", KSTAT_KV_U_BYTES, GMAC_MMC_TXOCTETCNT_GB },
1777 { "tx frames total", KSTAT_KV_U_PACKETS, GMAC_MMC_TXFRMCNT_GB },
1778 { "tx underflow", KSTAT_KV_U_PACKETS, GMAC_MMC_TXUNDFLWERR },
1779 { "tx carrier err", KSTAT_KV_U_PACKETS, GMAC_MMC_TXCARERR },
1780 { "tx good octets", KSTAT_KV_U_BYTES, GMAC_MMC_TXOCTETCNT_G },
1781 { "tx good frames", KSTAT_KV_U_PACKETS, GMAC_MMC_TXFRMCNT_G },
1782 { "rx frames total", KSTAT_KV_U_PACKETS, GMAC_MMC_RXFRMCNT_GB },
1783 { "rx octets total", KSTAT_KV_U_BYTES, GMAC_MMC_RXOCTETCNT_GB },
1784 { "rx good octets", KSTAT_KV_U_BYTES, GMAC_MMC_RXOCTETCNT_G },
1785 { "rx good mcast", KSTAT_KV_U_PACKETS, GMAC_MMC_RXMCFRMCNT_G },
1786 { "rx crc errors", KSTAT_KV_U_PACKETS, GMAC_MMC_RXCRCERR },
1787 { "rx len errors", KSTAT_KV_U_PACKETS, GMAC_MMC_RXLENERR },
1788 { "rx fifo err", KSTAT_KV_U_PACKETS, GMAC_MMC_RXFIFOOVRFLW },
1789 };
1790
1791 void
dwge_kstat_attach(struct dwge_softc * sc)1792 dwge_kstat_attach(struct dwge_softc *sc)
1793 {
1794 struct kstat *ks;
1795 struct kstat_kv *kvs;
1796 int i;
1797
1798 mtx_init(&sc->sc_kstat_mtx, IPL_NET);
1799
1800 /* clear counters, enable reset-on-read */
1801 dwge_write(sc, GMAC_MAC_MMC_CTRL, GMAC_MAC_MMC_CTRL_ROR |
1802 GMAC_MAC_MMC_CTRL_CR);
1803
1804 ks = kstat_create(DEVNAME(sc), 0, "dwge-stats", 0,
1805 KSTAT_T_KV, 0);
1806 if (ks == NULL)
1807 return;
1808
1809 kvs = mallocarray(nitems(dwge_counters), sizeof(*kvs), M_DEVBUF,
1810 M_WAITOK | M_ZERO);
1811 for (i = 0; i < nitems(dwge_counters); i++) {
1812 kstat_kv_unit_init(&kvs[i], dwge_counters[i].c_name,
1813 KSTAT_KV_T_COUNTER64, dwge_counters[i].c_unit);
1814 }
1815
1816 kstat_set_mutex(ks, &sc->sc_kstat_mtx);
1817 ks->ks_softc = sc;
1818 ks->ks_data = kvs;
1819 ks->ks_datalen = nitems(dwge_counters) * sizeof(*kvs);
1820 ks->ks_read = dwge_kstat_read;
1821 sc->sc_kstat = ks;
1822 kstat_install(ks);
1823 }
1824
1825 int
dwge_kstat_read(struct kstat * ks)1826 dwge_kstat_read(struct kstat *ks)
1827 {
1828 struct kstat_kv *kvs = ks->ks_data;
1829 struct dwge_softc *sc = ks->ks_softc;
1830 int i;
1831
1832 for (i = 0; i < nitems(dwge_counters); i++)
1833 kstat_kv_u64(&kvs[i]) += dwge_read(sc, dwge_counters[i].c_reg);
1834
1835 getnanouptime(&ks->ks_updated);
1836 return 0;
1837 }
1838
1839 #endif
1840