1 /* $OpenBSD: if_fec.c,v 1.14 2022/01/09 05:42:37 jsg Exp $ */
2 /*
3 * Copyright (c) 2012-2013,2019 Patrick Wildt <patrick@blueri.se>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/sockio.h>
21 #include <sys/queue.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/evcount.h>
25 #include <sys/socket.h>
26 #include <sys/timeout.h>
27 #include <sys/mbuf.h>
28 #include <machine/intr.h>
29 #include <machine/bus.h>
30 #include <machine/fdt.h>
31
32 #include "bpfilter.h"
33
34 #include <net/if.h>
35 #include <net/if_media.h>
36 #if NBPFILTER > 0
37 #include <net/bpf.h>
38 #endif
39
40 #include <netinet/in.h>
41 #include <netinet/if_ether.h>
42
43 #include <dev/mii/mii.h>
44 #include <dev/mii/miivar.h>
45 #include <dev/mii/miidevs.h>
46
47 #include <dev/ofw/openfirm.h>
48 #include <dev/ofw/ofw_clock.h>
49 #include <dev/ofw/ofw_gpio.h>
50 #include <dev/ofw/ofw_pinctrl.h>
51 #include <dev/ofw/fdt.h>
52
53 /* configuration registers */
54 #define ENET_EIR 0x004
55 #define ENET_EIMR 0x008
56 #define ENET_RDAR 0x010
57 #define ENET_TDAR 0x014
58 #define ENET_ECR 0x024
59 #define ENET_MMFR 0x040
60 #define ENET_MSCR 0x044
61 #define ENET_MIBC 0x064
62 #define ENET_RCR 0x084
63 #define ENET_TCR 0x0C4
64 #define ENET_PALR 0x0E4
65 #define ENET_PAUR 0x0E8
66 #define ENET_OPD 0x0EC
67 #define ENET_IAUR 0x118
68 #define ENET_IALR 0x11C
69 #define ENET_GAUR 0x120
70 #define ENET_GALR 0x124
71 #define ENET_TFWR 0x144
72 #define ENET_RDSR 0x180
73 #define ENET_TDSR 0x184
74 #define ENET_MRBR 0x188
75 #define ENET_RSFL 0x190
76 #define ENET_RSEM 0x194
77 #define ENET_RAEM 0x198
78 #define ENET_RAFL 0x19C
79 #define ENET_TSEM 0x1A0
80 #define ENET_TAEM 0x1A4
81 #define ENET_TAFL 0x1A8
82 #define ENET_TIPG 0x1AC
83 #define ENET_FTRL 0x1B0
84 #define ENET_TACC 0x1C0
85 #define ENET_RACC 0x1C4
86
87 #define ENET_RDAR_RDAR (1 << 24)
88 #define ENET_TDAR_TDAR (1 << 24)
89 #define ENET_ECR_RESET (1 << 0)
90 #define ENET_ECR_ETHEREN (1 << 1)
91 #define ENET_ECR_EN1588 (1 << 4)
92 #define ENET_ECR_SPEED (1 << 5)
93 #define ENET_ECR_DBSWP (1 << 8)
94 #define ENET_MMFR_TA (2 << 16)
95 #define ENET_MMFR_RA_SHIFT 18
96 #define ENET_MMFR_PA_SHIFT 23
97 #define ENET_MMFR_OP_WR (1 << 28)
98 #define ENET_MMFR_OP_RD (2 << 28)
99 #define ENET_MMFR_ST (1 << 30)
100 #define ENET_RCR_MII_MODE (1 << 2)
101 #define ENET_RCR_PROM (1 << 3)
102 #define ENET_RCR_FCE (1 << 5)
103 #define ENET_RCR_RGMII_MODE (1 << 6)
104 #define ENET_RCR_RMII_10T (1 << 9)
105 #define ENET_RCR_MAX_FL(x) (((x) & 0x3fff) << 16)
106 #define ENET_TCR_FDEN (1 << 2)
107 #define ENET_EIR_MII (1 << 23)
108 #define ENET_EIR_RXF (1 << 25)
109 #define ENET_EIR_TXF (1 << 27)
110 #define ENET_TFWR_STRFWD (1 << 8)
111 #define ENET_RACC_SHIFT16 (1 << 7)
112
113 /* statistics counters */
114
115 /* 1588 control */
116 #define ENET_ATCR 0x400
117 #define ENET_ATVR 0x404
118 #define ENET_ATOFF 0x408
119 #define ENET_ATPER 0x40C
120 #define ENET_ATCOR 0x410
121 #define ENET_ATINC 0x414
122 #define ENET_ATSTMP 0x418
123
124 /* capture / compare block */
125 #define ENET_TGSR 0x604
126 #define ENET_TCSR0 0x608
127 #define ENET_TCCR0 0x60C
128 #define ENET_TCSR1 0x610
129 #define ENET_TCCR1 0x614
130 #define ENET_TCSR2 0x618
131 #define ENET_TCCR2 0x61C
132 #define ENET_TCSR3 0x620
133 #define ENET_TCCR3 0x624
134
135 #define ENET_MII_CLK 2500000
136 #define ENET_ALIGNMENT 16
137
138 #define HREAD4(sc, reg) \
139 (bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg)))
140 #define HWRITE4(sc, reg, val) \
141 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
142 #define HSET4(sc, reg, bits) \
143 HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits))
144 #define HCLR4(sc, reg, bits) \
145 HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits))
146
147 #define ENET_MAX_BUF_SIZE 1522
148 #define ENET_MAX_PKT_SIZE 1536
149
150 #define ENET_ROUNDUP(size, unit) (((size) + (unit) - 1) & ~((unit) - 1))
151
152 /* buffer descriptor status bits */
153 #define ENET_RXD_EMPTY (1 << 15)
154 #define ENET_RXD_WRAP (1 << 13)
155 #define ENET_RXD_INTR (1 << 12)
156 #define ENET_RXD_LAST (1 << 11)
157 #define ENET_RXD_MISS (1 << 8)
158 #define ENET_RXD_BC (1 << 7)
159 #define ENET_RXD_MC (1 << 6)
160 #define ENET_RXD_LG (1 << 5)
161 #define ENET_RXD_NO (1 << 4)
162 #define ENET_RXD_CR (1 << 2)
163 #define ENET_RXD_OV (1 << 1)
164 #define ENET_RXD_TR (1 << 0)
165
166 #define ENET_TXD_READY (1 << 15)
167 #define ENET_TXD_WRAP (1 << 13)
168 #define ENET_TXD_INTR (1 << 12)
169 #define ENET_TXD_LAST (1 << 11)
170 #define ENET_TXD_TC (1 << 10)
171 #define ENET_TXD_ABC (1 << 9)
172 #define ENET_TXD_STATUS_MASK 0x3ff
173
174 #ifdef ENET_ENHANCED_BD
175 /* enhanced */
176 #define ENET_RXD_INT (1 << 23)
177
178 #define ENET_TXD_INT (1 << 30)
179 #endif
180
181 struct fec_buf {
182 bus_dmamap_t fb_map;
183 struct mbuf *fb_m;
184 struct mbuf *fb_m0;
185 };
186
187 /* what should we use? */
188 #define ENET_NTXDESC 256
189 #define ENET_NTXSEGS 16
190 #define ENET_NRXDESC 256
191
192 struct fec_dmamem {
193 bus_dmamap_t fdm_map;
194 bus_dma_segment_t fdm_seg;
195 size_t fdm_size;
196 caddr_t fdm_kva;
197 };
198 #define ENET_DMA_MAP(_fdm) ((_fdm)->fdm_map)
199 #define ENET_DMA_LEN(_fdm) ((_fdm)->fdm_size)
200 #define ENET_DMA_DVA(_fdm) ((_fdm)->fdm_map->dm_segs[0].ds_addr)
201 #define ENET_DMA_KVA(_fdm) ((void *)(_fdm)->fdm_kva)
202
203 struct fec_desc {
204 uint16_t fd_len; /* payload's length in bytes */
205 uint16_t fd_status; /* BD's status (see datasheet) */
206 uint32_t fd_addr; /* payload's buffer address */
207 #ifdef ENET_ENHANCED_BD
208 uint32_t fd_enhanced_status; /* enhanced status with IEEE 1588 */
209 uint32_t fd_reserved0; /* reserved */
210 uint32_t fd_update_done; /* buffer descriptor update done */
211 uint32_t fd_timestamp; /* IEEE 1588 timestamp */
212 uint32_t fd_reserved1[2]; /* reserved */
213 #endif
214 };
215
216 struct fec_softc {
217 struct device sc_dev;
218 struct arpcom sc_ac;
219 struct mii_data sc_mii;
220 int sc_node;
221 bus_space_tag_t sc_iot;
222 bus_space_handle_t sc_ioh;
223 void *sc_ih[3]; /* Interrupt handler */
224 bus_dma_tag_t sc_dmat;
225
226 struct fec_dmamem *sc_txring;
227 struct fec_buf *sc_txbuf;
228 struct fec_desc *sc_txdesc;
229 int sc_tx_prod;
230 int sc_tx_cnt;
231 int sc_tx_cons;
232 int sc_tx_bounce;
233
234 struct fec_dmamem *sc_rxring;
235 struct fec_buf *sc_rxbuf;
236 struct fec_desc *sc_rxdesc;
237 int sc_rx_prod;
238 struct if_rxring sc_rx_ring;
239 int sc_rx_cons;
240
241 struct timeout sc_tick;
242 uint32_t sc_phy_speed;
243 };
244
245 struct fec_softc *fec_sc;
246
247 int fec_match(struct device *, void *, void *);
248 void fec_attach(struct device *, struct device *, void *);
249 void fec_phy_init(struct fec_softc *, struct mii_softc *);
250 int fec_ioctl(struct ifnet *, u_long, caddr_t);
251 void fec_start(struct ifnet *);
252 int fec_encap(struct fec_softc *, struct mbuf *, int *);
253 void fec_init_txd(struct fec_softc *);
254 void fec_init_rxd(struct fec_softc *);
255 void fec_init(struct fec_softc *);
256 void fec_stop(struct fec_softc *);
257 void fec_iff(struct fec_softc *);
258 int fec_intr(void *);
259 void fec_tx_proc(struct fec_softc *);
260 void fec_rx_proc(struct fec_softc *);
261 void fec_tick(void *);
262 int fec_miibus_readreg(struct device *, int, int);
263 void fec_miibus_writereg(struct device *, int, int, int);
264 void fec_miibus_statchg(struct device *);
265 int fec_ifmedia_upd(struct ifnet *);
266 void fec_ifmedia_sts(struct ifnet *, struct ifmediareq *);
267 struct fec_dmamem *fec_dmamem_alloc(struct fec_softc *, bus_size_t, bus_size_t);
268 void fec_dmamem_free(struct fec_softc *, struct fec_dmamem *);
269 struct mbuf *fec_alloc_mbuf(struct fec_softc *, bus_dmamap_t);
270 void fec_fill_rx_ring(struct fec_softc *);
271
272 const struct cfattach fec_ca = {
273 sizeof (struct fec_softc), fec_match, fec_attach
274 };
275
276 struct cfdriver fec_cd = {
277 NULL, "fec", DV_IFNET
278 };
279
280 int
fec_match(struct device * parent,void * match,void * aux)281 fec_match(struct device *parent, void *match, void *aux)
282 {
283 struct fdt_attach_args *faa = aux;
284
285 return (OF_is_compatible(faa->fa_node, "fsl,imx6q-fec") ||
286 OF_is_compatible(faa->fa_node, "fsl,imx6sx-fec") ||
287 OF_is_compatible(faa->fa_node, "fsl,imx8mq-fec"));
288 }
289
290 void
fec_attach(struct device * parent,struct device * self,void * aux)291 fec_attach(struct device *parent, struct device *self, void *aux)
292 {
293 struct fec_softc *sc = (struct fec_softc *) self;
294 struct fdt_attach_args *faa = aux;
295 struct fec_buf *txb, *rxb;
296 struct mii_data *mii;
297 struct mii_softc *child;
298 struct ifnet *ifp;
299 uint32_t phy_reset_gpio[3];
300 uint32_t phy_reset_duration;
301 int i, s;
302
303 if (faa->fa_nreg < 1)
304 return;
305
306 sc->sc_node = faa->fa_node;
307 sc->sc_iot = faa->fa_iot;
308 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
309 faa->fa_reg[0].size, 0, &sc->sc_ioh))
310 panic("fec_attach: bus_space_map failed!");
311
312 sc->sc_dmat = faa->fa_dmat;
313
314 pinctrl_byname(faa->fa_node, "default");
315
316 /* power it up */
317 clock_enable_all(faa->fa_node);
318
319 /* reset PHY */
320 if (OF_getpropintarray(faa->fa_node, "phy-reset-gpios", phy_reset_gpio,
321 sizeof(phy_reset_gpio)) == sizeof(phy_reset_gpio)) {
322 phy_reset_duration = OF_getpropint(faa->fa_node,
323 "phy-reset-duration", 1);
324 if (phy_reset_duration > 1000)
325 phy_reset_duration = 1;
326
327 /*
328 * The Linux people really screwed the pooch here.
329 * The Linux kernel always treats the gpio as
330 * active-low, even if it is marked as active-high in
331 * the device tree. As a result the device tree for
332 * many boards incorrectly marks the gpio as
333 * active-high.
334 */
335 phy_reset_gpio[2] = GPIO_ACTIVE_LOW;
336 gpio_controller_config_pin(phy_reset_gpio, GPIO_CONFIG_OUTPUT);
337
338 /*
339 * On some Cubox-i machines we need to hold the PHY in
340 * reset a little bit longer than specified.
341 */
342 gpio_controller_set_pin(phy_reset_gpio, 1);
343 delay((phy_reset_duration + 1) * 1000);
344 gpio_controller_set_pin(phy_reset_gpio, 0);
345 delay(1000);
346 }
347 printf("\n");
348
349 /* Figure out the hardware address. Must happen before reset. */
350 OF_getprop(faa->fa_node, "local-mac-address", sc->sc_ac.ac_enaddr,
351 sizeof(sc->sc_ac.ac_enaddr));
352
353 /* reset the controller */
354 HSET4(sc, ENET_ECR, ENET_ECR_RESET);
355 while (HREAD4(sc, ENET_ECR) & ENET_ECR_ETHEREN)
356 continue;
357
358 HWRITE4(sc, ENET_EIMR, 0);
359 HWRITE4(sc, ENET_EIR, 0xffffffff);
360
361 sc->sc_ih[0] = fdt_intr_establish_idx(faa->fa_node, 0, IPL_NET,
362 fec_intr, sc, sc->sc_dev.dv_xname);
363 sc->sc_ih[1] = fdt_intr_establish_idx(faa->fa_node, 1, IPL_NET,
364 fec_intr, sc, sc->sc_dev.dv_xname);
365 sc->sc_ih[2] = fdt_intr_establish_idx(faa->fa_node, 2, IPL_NET,
366 fec_intr, sc, sc->sc_dev.dv_xname);
367
368 /* Tx bounce buffer to align to 16. */
369 if (OF_is_compatible(faa->fa_node, "fsl,imx6q-fec"))
370 sc->sc_tx_bounce = 1;
371
372 /* Allocate Tx descriptor ring. */
373 sc->sc_txring = fec_dmamem_alloc(sc,
374 ENET_NTXDESC * sizeof(struct fec_desc), 64);
375 if (sc->sc_txring == NULL) {
376 printf("%s: could not allocate Tx descriptor ring\n",
377 sc->sc_dev.dv_xname);
378 goto bad;
379 }
380 sc->sc_txdesc = ENET_DMA_KVA(sc->sc_txring);
381
382 /* Allocate Tx descriptors. */
383 sc->sc_txbuf = malloc(sizeof(struct fec_buf) * ENET_NTXDESC,
384 M_DEVBUF, M_WAITOK);
385 for (i = 0; i < ENET_NTXDESC; i++) {
386 txb = &sc->sc_txbuf[i];
387 bus_dmamap_create(sc->sc_dmat, MCLBYTES, ENET_NTXSEGS,
388 MCLBYTES, 0, BUS_DMA_WAITOK, &txb->fb_map);
389 txb->fb_m = txb->fb_m0 = NULL;
390 }
391
392 /* Allocate Rx descriptor ring. */
393 sc->sc_rxring = fec_dmamem_alloc(sc,
394 ENET_NRXDESC * sizeof(struct fec_desc), 64);
395 if (sc->sc_rxring == NULL) {
396 printf("%s: could not allocate Rx descriptor ring\n",
397 sc->sc_dev.dv_xname);
398 for (i = 0; i < ENET_NTXDESC; i++) {
399 txb = &sc->sc_txbuf[i];
400 bus_dmamap_destroy(sc->sc_dmat, txb->fb_map);
401 }
402 free(sc->sc_txbuf, M_DEVBUF,
403 sizeof(struct fec_buf) * ENET_NTXDESC);
404 fec_dmamem_free(sc, sc->sc_txring);
405 goto bad;
406 }
407 sc->sc_rxdesc = ENET_DMA_KVA(sc->sc_rxring);
408
409 /* Allocate Rx descriptors. */
410 sc->sc_rxbuf = malloc(sizeof(struct fec_buf) * ENET_NRXDESC,
411 M_DEVBUF, M_WAITOK);
412 for (i = 0; i < ENET_NRXDESC; i++) {
413 rxb = &sc->sc_rxbuf[i];
414 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
415 MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->fb_map);
416 rxb->fb_m = NULL;
417 }
418
419 s = splnet();
420
421 ifp = &sc->sc_ac.ac_if;
422 ifp->if_softc = sc;
423 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
424 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
425 ifp->if_ioctl = fec_ioctl;
426 ifp->if_start = fec_start;
427 ifp->if_capabilities = IFCAP_VLAN_MTU;
428
429 printf("%s: address %s\n", sc->sc_dev.dv_xname,
430 ether_sprintf(sc->sc_ac.ac_enaddr));
431
432 /*
433 * Initialize the MII clock. The formula is:
434 *
435 * ENET_MII_CLK = ref_freq / ((phy_speed + 1) x 2)
436 * phy_speed = (((ref_freq / ENET_MII_CLK) / 2) - 1)
437 */
438 sc->sc_phy_speed = clock_get_frequency(sc->sc_node, "ipg");
439 sc->sc_phy_speed = (sc->sc_phy_speed + (ENET_MII_CLK - 1)) / ENET_MII_CLK;
440 sc->sc_phy_speed = (sc->sc_phy_speed / 2) - 1;
441 HWRITE4(sc, ENET_MSCR, (sc->sc_phy_speed << 1) | 0x100);
442
443 /* Initialize MII/media info. */
444 mii = &sc->sc_mii;
445 mii->mii_ifp = ifp;
446 mii->mii_readreg = fec_miibus_readreg;
447 mii->mii_writereg = fec_miibus_writereg;
448 mii->mii_statchg = fec_miibus_statchg;
449
450 ifmedia_init(&mii->mii_media, 0, fec_ifmedia_upd, fec_ifmedia_sts);
451 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
452
453 child = LIST_FIRST(&mii->mii_phys);
454 if (child)
455 fec_phy_init(sc, child);
456
457 if (LIST_FIRST(&mii->mii_phys) == NULL) {
458 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
459 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
460 } else
461 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
462
463 if_attach(ifp);
464 ether_ifattach(ifp);
465 splx(s);
466
467 timeout_set(&sc->sc_tick, fec_tick, sc);
468
469 fec_sc = sc;
470 return;
471
472 bad:
473 bus_space_unmap(sc->sc_iot, sc->sc_ioh, faa->fa_reg[0].size);
474 }
475
476 void
fec_phy_init(struct fec_softc * sc,struct mii_softc * child)477 fec_phy_init(struct fec_softc *sc, struct mii_softc *child)
478 {
479 struct device *dev = (struct device *)sc;
480 int phy = child->mii_phy;
481 uint32_t reg;
482
483 if (child->mii_oui == MII_OUI_ATHEROS &&
484 child->mii_model == MII_MODEL_ATHEROS_AR8035) {
485 /* disable SmartEEE */
486 fec_miibus_writereg(dev, phy, 0x0d, 0x0003);
487 fec_miibus_writereg(dev, phy, 0x0e, 0x805d);
488 fec_miibus_writereg(dev, phy, 0x0d, 0x4003);
489 reg = fec_miibus_readreg(dev, phy, 0x0e);
490 fec_miibus_writereg(dev, phy, 0x0e, reg & ~0x0100);
491
492 /* enable 125MHz clk output */
493 fec_miibus_writereg(dev, phy, 0x0d, 0x0007);
494 fec_miibus_writereg(dev, phy, 0x0e, 0x8016);
495 fec_miibus_writereg(dev, phy, 0x0d, 0x4007);
496
497 reg = fec_miibus_readreg(dev, phy, 0x0e) & 0xffe3;
498 fec_miibus_writereg(dev, phy, 0x0e, reg | 0x18);
499
500 /* tx clock delay */
501 fec_miibus_writereg(dev, phy, 0x1d, 0x0005);
502 reg = fec_miibus_readreg(dev, phy, 0x1e);
503 fec_miibus_writereg(dev, phy, 0x1e, reg | 0x0100);
504
505 PHY_RESET(child);
506 }
507
508 if (child->mii_oui == MII_OUI_MICREL &&
509 child->mii_model == MII_MODEL_MICREL_KSZ9021) {
510 uint32_t rxc, rxdv, txc, txen;
511 uint32_t rxd0, rxd1, rxd2, rxd3;
512 uint32_t txd0, txd1, txd2, txd3;
513 uint32_t val, phy;
514 int node;
515
516 node = sc->sc_node;
517 phy = OF_getpropint(sc->sc_node, "phy-handle", 0);
518 if (phy)
519 node = OF_getnodebyphandle(phy);
520 rxc = OF_getpropint(node, "rxc-skew-ps", 1400) / 200;
521 rxdv = OF_getpropint(node, "rxdv-skew-ps", 1400) / 200;
522 txc = OF_getpropint(node, "txc-skew-ps", 1400) / 200;
523 txen = OF_getpropint(node, "txen-skew-ps", 1400) / 200;
524 rxd0 = OF_getpropint(node, "rxd0-skew-ps", 1400) / 200;
525 rxd1 = OF_getpropint(node, "rxd1-skew-ps", 1400) / 200;
526 rxd2 = OF_getpropint(node, "rxd2-skew-ps", 1400) / 200;
527 rxd3 = OF_getpropint(node, "rxd3-skew-ps", 1400) / 200;
528 txd0 = OF_getpropint(node, "txd0-skew-ps", 1400) / 200;
529 txd1 = OF_getpropint(node, "txd1-skew-ps", 1400) / 200;
530 txd2 = OF_getpropint(node, "txd2-skew-ps", 1400) / 200;
531 txd3 = OF_getpropint(node, "txd3-skew-ps", 1400) / 200;
532
533 val = ((rxc & 0xf) << 12) | ((rxdv & 0xf) << 8) |
534 ((txc & 0xf) << 4) | ((txen & 0xf) << 0);
535 fec_miibus_writereg(dev, phy, 0x0b, 0x8104);
536 fec_miibus_writereg(dev, phy, 0x0c, val);
537
538 val = ((rxd3 & 0xf) << 12) | ((rxd2 & 0xf) << 8) |
539 ((rxd1 & 0xf) << 4) | ((rxd0 & 0xf) << 0);
540 fec_miibus_writereg(dev, phy, 0x0b, 0x8105);
541 fec_miibus_writereg(dev, phy, 0x0c, val);
542
543 val = ((txd3 & 0xf) << 12) | ((txd2 & 0xf) << 8) |
544 ((txd1 & 0xf) << 4) | ((txd0 & 0xf) << 0);
545 fec_miibus_writereg(dev, phy, 0x0b, 0x8106);
546 fec_miibus_writereg(dev, phy, 0x0c, val);
547 }
548
549 if (child->mii_oui == MII_OUI_MICREL &&
550 child->mii_model == MII_MODEL_MICREL_KSZ9031) {
551 uint32_t rxc, rxdv, txc, txen;
552 uint32_t rxd0, rxd1, rxd2, rxd3;
553 uint32_t txd0, txd1, txd2, txd3;
554 uint32_t val, phy;
555 int node;
556
557 node = sc->sc_node;
558 phy = OF_getpropint(sc->sc_node, "phy-handle", 0);
559 if (phy)
560 node = OF_getnodebyphandle(phy);
561 rxc = OF_getpropint(node, "rxc-skew-ps", 900) / 60;
562 rxdv = OF_getpropint(node, "rxdv-skew-ps", 420) / 60;
563 txc = OF_getpropint(node, "txc-skew-ps", 900) / 60;
564 txen = OF_getpropint(node, "txen-skew-ps", 420) / 60;
565 rxd0 = OF_getpropint(node, "rxd0-skew-ps", 420) / 60;
566 rxd1 = OF_getpropint(node, "rxd1-skew-ps", 420) / 60;
567 rxd2 = OF_getpropint(node, "rxd2-skew-ps", 420) / 60;
568 rxd3 = OF_getpropint(node, "rxd3-skew-ps", 420) / 60;
569 txd0 = OF_getpropint(node, "txd0-skew-ps", 420) / 60;
570 txd1 = OF_getpropint(node, "txd1-skew-ps", 420) / 60;
571 txd2 = OF_getpropint(node, "txd2-skew-ps", 420) / 60;
572 txd3 = OF_getpropint(node, "txd3-skew-ps", 420) / 60;
573
574 val = ((rxdv & 0xf) << 4) || ((txen & 0xf) << 0);
575 fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
576 fec_miibus_writereg(dev, phy, 0x0e, 0x0004);
577 fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
578 fec_miibus_writereg(dev, phy, 0x0e, val);
579
580 val = ((rxd3 & 0xf) << 12) | ((rxd2 & 0xf) << 8) |
581 ((rxd1 & 0xf) << 4) | ((rxd0 & 0xf) << 0);
582 fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
583 fec_miibus_writereg(dev, phy, 0x0e, 0x0005);
584 fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
585 fec_miibus_writereg(dev, phy, 0x0e, val);
586
587 val = ((txd3 & 0xf) << 12) | ((txd2 & 0xf) << 8) |
588 ((txd1 & 0xf) << 4) | ((txd0 & 0xf) << 0);
589 fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
590 fec_miibus_writereg(dev, phy, 0x0e, 0x0006);
591 fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
592 fec_miibus_writereg(dev, phy, 0x0e, val);
593
594 val = ((txc & 0x1f) << 5) || ((rxc & 0x1f) << 0);
595 fec_miibus_writereg(dev, phy, 0x0d, 0x0002);
596 fec_miibus_writereg(dev, phy, 0x0e, 0x0008);
597 fec_miibus_writereg(dev, phy, 0x0d, 0x4002);
598 fec_miibus_writereg(dev, phy, 0x0e, val);
599 }
600 }
601
602 void
fec_init_rxd(struct fec_softc * sc)603 fec_init_rxd(struct fec_softc *sc)
604 {
605 struct fec_desc *rxd;
606
607 sc->sc_rx_prod = sc->sc_rx_cons = 0;
608
609 memset(sc->sc_rxdesc, 0, ENET_DMA_LEN(sc->sc_rxring));
610 rxd = &sc->sc_rxdesc[ENET_NRXDESC - 1];
611 rxd->fd_status = ENET_RXD_WRAP;
612 }
613
614 void
fec_init_txd(struct fec_softc * sc)615 fec_init_txd(struct fec_softc *sc)
616 {
617 struct fec_desc *txd;
618
619 sc->sc_tx_prod = sc->sc_tx_cons = 0;
620 sc->sc_tx_cnt = 0;
621
622 memset(sc->sc_txdesc, 0, ENET_DMA_LEN(sc->sc_txring));
623 txd = &sc->sc_txdesc[ENET_NTXDESC - 1];
624 txd->fd_status = ENET_TXD_WRAP;
625 }
626
627 void
fec_init(struct fec_softc * sc)628 fec_init(struct fec_softc *sc)
629 {
630 struct ifnet *ifp = &sc->sc_ac.ac_if;
631 int speed = 0;
632
633 /* reset the controller */
634 HSET4(sc, ENET_ECR, ENET_ECR_RESET);
635 while (HREAD4(sc, ENET_ECR) & ENET_ECR_ETHEREN)
636 continue;
637
638 /* set hw address */
639 HWRITE4(sc, ENET_PALR,
640 (sc->sc_ac.ac_enaddr[0] << 24) |
641 (sc->sc_ac.ac_enaddr[1] << 16) |
642 (sc->sc_ac.ac_enaddr[2] << 8) |
643 sc->sc_ac.ac_enaddr[3]);
644 HWRITE4(sc, ENET_PAUR,
645 (sc->sc_ac.ac_enaddr[4] << 24) |
646 (sc->sc_ac.ac_enaddr[5] << 16));
647
648 /* clear outstanding interrupts */
649 HWRITE4(sc, ENET_EIR, 0xffffffff);
650
651 /* set max receive buffer size, 3-0 bits always zero for alignment */
652 HWRITE4(sc, ENET_MRBR, ENET_MAX_PKT_SIZE);
653
654 /* init descriptor */
655 fec_init_txd(sc);
656 fec_init_rxd(sc);
657
658 /* fill RX ring */
659 if_rxr_init(&sc->sc_rx_ring, 2, ENET_NRXDESC);
660 fec_fill_rx_ring(sc);
661
662 bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_txring),
663 0, ENET_DMA_LEN(sc->sc_txring), BUS_DMASYNC_PREWRITE);
664 bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_rxring),
665 0, ENET_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_PREWRITE);
666
667 /* set descriptor */
668 HWRITE4(sc, ENET_TDSR, ENET_DMA_DVA(sc->sc_txring));
669 HWRITE4(sc, ENET_RDSR, ENET_DMA_DVA(sc->sc_rxring));
670
671 /* set it to full-duplex */
672 HWRITE4(sc, ENET_TCR, ENET_TCR_FDEN);
673
674 /*
675 * Set max frame length to 1518 or 1522 with VLANs,
676 * pause frames and promisc mode.
677 * XXX: RGMII mode - phy dependant
678 */
679 HWRITE4(sc, ENET_RCR,
680 ENET_RCR_MAX_FL(1522) | ENET_RCR_RGMII_MODE | ENET_RCR_MII_MODE |
681 ENET_RCR_FCE);
682
683 HWRITE4(sc, ENET_MSCR, (sc->sc_phy_speed << 1) | 0x100);
684
685 HWRITE4(sc, ENET_RACC, ENET_RACC_SHIFT16);
686 HWRITE4(sc, ENET_FTRL, ENET_MAX_BUF_SIZE);
687
688 /* RX FIFO threshold and pause */
689 HWRITE4(sc, ENET_RSEM, 0x84);
690 HWRITE4(sc, ENET_RSFL, 16);
691 HWRITE4(sc, ENET_RAEM, 8);
692 HWRITE4(sc, ENET_RAFL, 8);
693 HWRITE4(sc, ENET_OPD, 0xFFF0);
694
695 /* do store and forward, only i.MX6, needs to be set correctly else */
696 HWRITE4(sc, ENET_TFWR, ENET_TFWR_STRFWD);
697
698 /* enable gigabit-ethernet and set it to support little-endian */
699 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
700 case IFM_1000_T: /* Gigabit */
701 speed |= ENET_ECR_SPEED;
702 break;
703 default:
704 speed &= ~ENET_ECR_SPEED;
705 }
706 HWRITE4(sc, ENET_ECR, ENET_ECR_ETHEREN | speed | ENET_ECR_DBSWP);
707
708 #ifdef ENET_ENHANCED_BD
709 HSET4(sc, ENET_ECR, ENET_ECR_EN1588);
710 #endif
711
712 /* rx descriptors are ready */
713 HWRITE4(sc, ENET_RDAR, ENET_RDAR_RDAR);
714
715 /* program promiscuous mode and multicast filters */
716 fec_iff(sc);
717
718 timeout_add_sec(&sc->sc_tick, 1);
719
720 /* Indicate we are up and running. */
721 ifp->if_flags |= IFF_RUNNING;
722 ifq_clr_oactive(&ifp->if_snd);
723
724 /* enable interrupts for tx/rx */
725 HWRITE4(sc, ENET_EIMR, ENET_EIR_TXF | ENET_EIR_RXF);
726
727 fec_start(ifp);
728 }
729
730 void
fec_stop(struct fec_softc * sc)731 fec_stop(struct fec_softc *sc)
732 {
733 struct ifnet *ifp = &sc->sc_ac.ac_if;
734 struct fec_buf *txb, *rxb;
735 int i;
736
737 /*
738 * Mark the interface down and cancel the watchdog timer.
739 */
740 ifp->if_flags &= ~IFF_RUNNING;
741 ifp->if_timer = 0;
742 ifq_clr_oactive(&ifp->if_snd);
743
744 timeout_del(&sc->sc_tick);
745
746 /* reset the controller */
747 HSET4(sc, ENET_ECR, ENET_ECR_RESET);
748 while (HREAD4(sc, ENET_ECR) & ENET_ECR_ETHEREN)
749 continue;
750
751 HWRITE4(sc, ENET_MSCR, (sc->sc_phy_speed << 1) | 0x100);
752
753 for (i = 0; i < ENET_NTXDESC; i++) {
754 txb = &sc->sc_txbuf[i];
755 if (txb->fb_m == NULL)
756 continue;
757 bus_dmamap_sync(sc->sc_dmat, txb->fb_map, 0,
758 txb->fb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
759 bus_dmamap_unload(sc->sc_dmat, txb->fb_map);
760 m_freem(txb->fb_m);
761 m_freem(txb->fb_m0);
762 txb->fb_m = txb->fb_m0 = NULL;
763 }
764 for (i = 0; i < ENET_NRXDESC; i++) {
765 rxb = &sc->sc_rxbuf[i];
766 if (rxb->fb_m == NULL)
767 continue;
768 bus_dmamap_sync(sc->sc_dmat, rxb->fb_map, 0,
769 rxb->fb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
770 bus_dmamap_unload(sc->sc_dmat, rxb->fb_map);
771 if_rxr_put(&sc->sc_rx_ring, 1);
772 rxb->fb_m = NULL;
773 }
774 }
775
776 void
fec_iff(struct fec_softc * sc)777 fec_iff(struct fec_softc *sc)
778 {
779 struct arpcom *ac = &sc->sc_ac;
780 struct ifnet *ifp = &sc->sc_ac.ac_if;
781 struct ether_multi *enm;
782 struct ether_multistep step;
783 uint64_t ghash = 0, ihash = 0;
784 uint32_t h;
785
786 ifp->if_flags &= ~IFF_ALLMULTI;
787
788 if (ifp->if_flags & IFF_PROMISC) {
789 ifp->if_flags |= IFF_ALLMULTI;
790 ihash = 0xffffffffffffffffLLU;
791 } else if (ac->ac_multirangecnt > 0) {
792 ifp->if_flags |= IFF_ALLMULTI;
793 ghash = 0xffffffffffffffffLLU;
794 } else {
795 ETHER_FIRST_MULTI(step, ac, enm);
796 while (enm != NULL) {
797 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
798
799 ghash |= 1LLU << (((uint8_t *)&h)[3] >> 2);
800
801 ETHER_NEXT_MULTI(step, enm);
802 }
803 }
804
805 HWRITE4(sc, ENET_GAUR, (uint32_t)(ghash >> 32));
806 HWRITE4(sc, ENET_GALR, (uint32_t)ghash);
807
808 HWRITE4(sc, ENET_IAUR, (uint32_t)(ihash >> 32));
809 HWRITE4(sc, ENET_IALR, (uint32_t)ihash);
810 }
811
812 int
fec_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)813 fec_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
814 {
815 struct fec_softc *sc = ifp->if_softc;
816 struct ifreq *ifr = (struct ifreq *)data;
817 int s, error = 0;
818
819 s = splnet();
820
821 switch (cmd) {
822 case SIOCSIFADDR:
823 ifp->if_flags |= IFF_UP;
824 if (!(ifp->if_flags & IFF_RUNNING))
825 fec_init(sc);
826 break;
827
828 case SIOCSIFFLAGS:
829 if (ifp->if_flags & IFF_UP) {
830 if (ifp->if_flags & IFF_RUNNING)
831 error = ENETRESET;
832 else
833 fec_init(sc);
834 } else {
835 if (ifp->if_flags & IFF_RUNNING)
836 fec_stop(sc);
837 }
838 break;
839
840 case SIOCGIFMEDIA:
841 case SIOCSIFMEDIA:
842 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
843 break;
844
845 default:
846 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
847 }
848
849 if (error == ENETRESET) {
850 if (ifp->if_flags & IFF_RUNNING)
851 fec_iff(sc);
852 error = 0;
853 }
854
855 splx(s);
856 return(error);
857 }
858
859 void
fec_start(struct ifnet * ifp)860 fec_start(struct ifnet *ifp)
861 {
862 struct fec_softc *sc = ifp->if_softc;
863 struct mbuf *m = NULL;
864 int error, idx;
865
866 if (!(ifp->if_flags & IFF_RUNNING))
867 return;
868 if (ifq_is_oactive(&ifp->if_snd))
869 return;
870 if (ifq_empty(&ifp->if_snd))
871 return;
872
873 idx = sc->sc_tx_prod;
874 while ((sc->sc_txdesc[idx].fd_status & ENET_TXD_READY) == 0) {
875 m = ifq_deq_begin(&ifp->if_snd);
876 if (m == NULL)
877 break;
878
879 error = fec_encap(sc, m, &idx);
880 if (error == ENOBUFS) {
881 ifq_deq_rollback(&ifp->if_snd, m);
882 ifq_set_oactive(&ifp->if_snd);
883 break;
884 }
885 if (error == EFBIG) {
886 ifq_deq_commit(&ifp->if_snd, m);
887 m_freem(m); /* give up: drop it */
888 ifp->if_oerrors++;
889 continue;
890 }
891
892 ifq_deq_commit(&ifp->if_snd, m);
893
894 #if NBPFILTER > 0
895 if (ifp->if_bpf)
896 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
897 #endif
898 }
899
900 if (sc->sc_tx_prod != idx) {
901 sc->sc_tx_prod = idx;
902
903 /* Set a timeout in case the chip goes out to lunch. */
904 ifp->if_timer = 5;
905 }
906 }
907
908 int
fec_encap(struct fec_softc * sc,struct mbuf * m0,int * idx)909 fec_encap(struct fec_softc *sc, struct mbuf *m0, int *idx)
910 {
911 struct fec_desc *txd, *txd_start;
912 bus_dmamap_t map;
913 struct mbuf *m;
914 int cur, frag, i;
915 int ret;
916
917 m = m0;
918 cur = frag = *idx;
919 map = sc->sc_txbuf[cur].fb_map;
920
921 if (sc->sc_tx_bounce) {
922 m = m_dup_pkt(m0, 0, M_DONTWAIT);
923 if (m == NULL) {
924 ret = ENOBUFS;
925 goto fail;
926 }
927 }
928
929 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
930 if (m_defrag(m, M_DONTWAIT)) {
931 ret = EFBIG;
932 goto fail;
933 }
934 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT)) {
935 ret = EFBIG;
936 goto fail;
937 }
938 }
939
940 if (map->dm_nsegs > (ENET_NTXDESC - sc->sc_tx_cnt - 2)) {
941 bus_dmamap_unload(sc->sc_dmat, map);
942 ret = ENOBUFS;
943 goto fail;
944 }
945
946 /* Sync the DMA map. */
947 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
948 BUS_DMASYNC_PREWRITE);
949
950 txd = txd_start = &sc->sc_txdesc[frag];
951 for (i = 0; i < map->dm_nsegs; i++) {
952 txd->fd_addr = map->dm_segs[i].ds_addr;
953 txd->fd_len = map->dm_segs[i].ds_len;
954 txd->fd_status &= ENET_TXD_WRAP;
955 if (i == (map->dm_nsegs - 1))
956 txd->fd_status |= ENET_TXD_LAST | ENET_TXD_TC;
957 if (i != 0)
958 txd->fd_status |= ENET_TXD_READY;
959
960 bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_txring),
961 frag * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
962
963 cur = frag;
964 if (frag == (ENET_NTXDESC - 1)) {
965 txd = &sc->sc_txdesc[0];
966 frag = 0;
967 } else {
968 txd++;
969 frag++;
970 }
971 KASSERT(frag != sc->sc_tx_cons);
972 }
973
974 txd_start->fd_status |= ENET_TXD_READY;
975 bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_txring),
976 *idx * sizeof(*txd), sizeof(*txd), BUS_DMASYNC_PREWRITE);
977
978 HWRITE4(sc, ENET_TDAR, ENET_TDAR_TDAR);
979
980 KASSERT(sc->sc_txbuf[cur].fb_m == NULL);
981 KASSERT(sc->sc_txbuf[cur].fb_m0 == NULL);
982 sc->sc_txbuf[*idx].fb_map = sc->sc_txbuf[cur].fb_map;
983 sc->sc_txbuf[cur].fb_map = map;
984 sc->sc_txbuf[cur].fb_m = m;
985 if (m != m0)
986 sc->sc_txbuf[cur].fb_m0 = m0;
987
988 sc->sc_tx_cnt += map->dm_nsegs;
989 *idx = frag;
990
991 return (0);
992
993 fail:
994 if (m != m0)
995 m_freem(m);
996 return (ret);
997 }
998
999 /*
1000 * Established by attachment driver at interrupt priority IPL_NET.
1001 */
1002 int
fec_intr(void * arg)1003 fec_intr(void *arg)
1004 {
1005 struct fec_softc *sc = arg;
1006 struct ifnet *ifp = &sc->sc_ac.ac_if;
1007 u_int32_t status;
1008
1009 /* Find out which interrupts are pending. */
1010 status = HREAD4(sc, ENET_EIR);
1011
1012 /* Acknowledge the interrupts we are about to handle. */
1013 status &= (ENET_EIR_RXF | ENET_EIR_TXF);
1014 HWRITE4(sc, ENET_EIR, status);
1015
1016 /*
1017 * Handle incoming packets.
1018 */
1019 if (ISSET(status, ENET_EIR_RXF))
1020 fec_rx_proc(sc);
1021
1022 /*
1023 * Handle transmitted packets.
1024 */
1025 if (ISSET(status, ENET_EIR_TXF))
1026 fec_tx_proc(sc);
1027
1028 /* Try to transmit. */
1029 if (ifp->if_flags & IFF_RUNNING && !ifq_empty(&ifp->if_snd))
1030 fec_start(ifp);
1031
1032 return 1;
1033 }
1034
1035 void
fec_tx_proc(struct fec_softc * sc)1036 fec_tx_proc(struct fec_softc *sc)
1037 {
1038 struct ifnet *ifp = &sc->sc_ac.ac_if;
1039 struct fec_desc *txd;
1040 struct fec_buf *txb;
1041 int idx;
1042
1043 bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_txring), 0,
1044 ENET_DMA_LEN(sc->sc_txring),
1045 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1046
1047 while (sc->sc_tx_cnt > 0) {
1048 idx = sc->sc_tx_cons;
1049 KASSERT(idx < ENET_NTXDESC);
1050
1051 txd = &sc->sc_txdesc[idx];
1052 if (txd->fd_status & ENET_TXD_READY)
1053 break;
1054
1055 txb = &sc->sc_txbuf[idx];
1056 if (txb->fb_m) {
1057 bus_dmamap_sync(sc->sc_dmat, txb->fb_map, 0,
1058 txb->fb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1059 bus_dmamap_unload(sc->sc_dmat, txb->fb_map);
1060
1061 m_freem(txb->fb_m);
1062 m_freem(txb->fb_m0);
1063 txb->fb_m = txb->fb_m0 = NULL;
1064 }
1065
1066 ifq_clr_oactive(&ifp->if_snd);
1067
1068 sc->sc_tx_cnt--;
1069
1070 if (sc->sc_tx_cons == (ENET_NTXDESC - 1))
1071 sc->sc_tx_cons = 0;
1072 else
1073 sc->sc_tx_cons++;
1074
1075 txd->fd_status &= ENET_TXD_WRAP;
1076 }
1077
1078 if (sc->sc_tx_cnt == 0)
1079 ifp->if_timer = 0;
1080 else /* ERR006358 */
1081 HWRITE4(sc, ENET_TDAR, ENET_TDAR_TDAR);
1082 }
1083
1084 void
fec_rx_proc(struct fec_softc * sc)1085 fec_rx_proc(struct fec_softc *sc)
1086 {
1087 struct ifnet *ifp = &sc->sc_ac.ac_if;
1088 struct fec_desc *rxd;
1089 struct fec_buf *rxb;
1090 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1091 struct mbuf *m;
1092 int idx, len;
1093
1094 if ((ifp->if_flags & IFF_RUNNING) == 0)
1095 return;
1096
1097 bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_rxring), 0,
1098 ENET_DMA_LEN(sc->sc_rxring),
1099 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1100
1101 while (if_rxr_inuse(&sc->sc_rx_ring) > 0) {
1102 idx = sc->sc_rx_cons;
1103 KASSERT(idx < ENET_NRXDESC);
1104
1105 rxd = &sc->sc_rxdesc[idx];
1106 if (rxd->fd_status & ENET_RXD_EMPTY)
1107 break;
1108
1109 len = rxd->fd_len;
1110 rxb = &sc->sc_rxbuf[idx];
1111 KASSERT(rxb->fb_m);
1112
1113 bus_dmamap_sync(sc->sc_dmat, rxb->fb_map, 0,
1114 len, BUS_DMASYNC_POSTREAD);
1115 bus_dmamap_unload(sc->sc_dmat, rxb->fb_map);
1116
1117 /* Strip off CRC. */
1118 len -= ETHER_CRC_LEN;
1119 KASSERT(len > 0);
1120
1121 m = rxb->fb_m;
1122 rxb->fb_m = NULL;
1123
1124 m_adj(m, ETHER_ALIGN);
1125 m->m_pkthdr.len = m->m_len = len;
1126
1127 ml_enqueue(&ml, m);
1128
1129 if_rxr_put(&sc->sc_rx_ring, 1);
1130 if (sc->sc_rx_cons == (ENET_NRXDESC - 1))
1131 sc->sc_rx_cons = 0;
1132 else
1133 sc->sc_rx_cons++;
1134 }
1135
1136 if (ifiq_input(&ifp->if_rcv, &ml))
1137 if_rxr_livelocked(&sc->sc_rx_ring);
1138
1139 fec_fill_rx_ring(sc);
1140
1141 bus_dmamap_sync(sc->sc_dmat, ENET_DMA_MAP(sc->sc_rxring), 0,
1142 ENET_DMA_LEN(sc->sc_rxring),
1143 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1144
1145 /* rx descriptors are ready */
1146 HWRITE4(sc, ENET_RDAR, ENET_RDAR_RDAR);
1147 }
1148
1149 void
fec_tick(void * arg)1150 fec_tick(void *arg)
1151 {
1152 struct fec_softc *sc = arg;
1153 int s;
1154
1155 s = splnet();
1156 mii_tick(&sc->sc_mii);
1157 splx(s);
1158
1159 timeout_add_sec(&sc->sc_tick, 1);
1160 }
1161
1162 /*
1163 * MII
1164 * Interrupts need ENET_ECR_ETHEREN to be set,
1165 * so we just read the interrupt status registers.
1166 */
1167 int
fec_miibus_readreg(struct device * dev,int phy,int reg)1168 fec_miibus_readreg(struct device *dev, int phy, int reg)
1169 {
1170 int r = 0;
1171 struct fec_softc *sc = (struct fec_softc *)dev;
1172
1173 HWRITE4(sc, ENET_EIR, ENET_EIR_MII);
1174
1175 bus_space_write_4(sc->sc_iot, sc->sc_ioh, ENET_MMFR,
1176 ENET_MMFR_ST | ENET_MMFR_OP_RD | ENET_MMFR_TA |
1177 phy << ENET_MMFR_PA_SHIFT | reg << ENET_MMFR_RA_SHIFT);
1178
1179 while(!(HREAD4(sc, ENET_EIR) & ENET_EIR_MII));
1180
1181 r = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ENET_MMFR);
1182
1183 return (r & 0xffff);
1184 }
1185
1186 void
fec_miibus_writereg(struct device * dev,int phy,int reg,int val)1187 fec_miibus_writereg(struct device *dev, int phy, int reg, int val)
1188 {
1189 struct fec_softc *sc = (struct fec_softc *)dev;
1190
1191 HWRITE4(sc, ENET_EIR, ENET_EIR_MII);
1192
1193 bus_space_write_4(sc->sc_iot, sc->sc_ioh, ENET_MMFR,
1194 ENET_MMFR_ST | ENET_MMFR_OP_WR | ENET_MMFR_TA |
1195 phy << ENET_MMFR_PA_SHIFT | reg << ENET_MMFR_RA_SHIFT |
1196 (val & 0xffff));
1197
1198 while(!(HREAD4(sc, ENET_EIR) & ENET_EIR_MII));
1199
1200 return;
1201 }
1202
1203 void
fec_miibus_statchg(struct device * dev)1204 fec_miibus_statchg(struct device *dev)
1205 {
1206 struct fec_softc *sc = (struct fec_softc *)dev;
1207 uint32_t ecr, rcr;
1208
1209 ecr = HREAD4(sc, ENET_ECR) & ~ENET_ECR_SPEED;
1210 rcr = HREAD4(sc, ENET_RCR) & ~ENET_RCR_RMII_10T;
1211 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1212 case IFM_1000_T: /* Gigabit */
1213 ecr |= ENET_ECR_SPEED;
1214 break;
1215 case IFM_100_TX:
1216 break;
1217 case IFM_10_T:
1218 rcr |= ENET_RCR_RMII_10T;
1219 break;
1220 }
1221 HWRITE4(sc, ENET_ECR, ecr);
1222 HWRITE4(sc, ENET_RCR, rcr);
1223
1224 return;
1225 }
1226
1227 int
fec_ifmedia_upd(struct ifnet * ifp)1228 fec_ifmedia_upd(struct ifnet *ifp)
1229 {
1230 struct fec_softc *sc = ifp->if_softc;
1231 struct mii_data *mii = &sc->sc_mii;
1232 int err;
1233 if (mii->mii_instance) {
1234 struct mii_softc *miisc;
1235
1236 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1237 mii_phy_reset(miisc);
1238 }
1239 err = mii_mediachg(mii);
1240 return (err);
1241 }
1242
1243 void
fec_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)1244 fec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1245 {
1246 struct fec_softc *sc = ifp->if_softc;
1247 struct mii_data *mii = &sc->sc_mii;
1248
1249 mii_pollstat(mii);
1250
1251 ifmr->ifm_active = mii->mii_media_active;
1252 ifmr->ifm_status = mii->mii_media_status;
1253 }
1254
1255 /*
1256 * Manage DMA'able memory.
1257 */
1258 struct fec_dmamem *
fec_dmamem_alloc(struct fec_softc * sc,bus_size_t size,bus_size_t align)1259 fec_dmamem_alloc(struct fec_softc *sc, bus_size_t size, bus_size_t align)
1260 {
1261 struct fec_dmamem *fdm;
1262 int nsegs;
1263
1264 fdm = malloc(sizeof(*fdm), M_DEVBUF, M_WAITOK | M_ZERO);
1265 fdm->fdm_size = size;
1266
1267 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1268 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &fdm->fdm_map) != 0)
1269 goto fdmfree;
1270
1271 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &fdm->fdm_seg, 1,
1272 &nsegs, BUS_DMA_WAITOK) != 0)
1273 goto destroy;
1274
1275 if (bus_dmamem_map(sc->sc_dmat, &fdm->fdm_seg, nsegs, size,
1276 &fdm->fdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
1277 goto free;
1278
1279 if (bus_dmamap_load(sc->sc_dmat, fdm->fdm_map, fdm->fdm_kva, size,
1280 NULL, BUS_DMA_WAITOK) != 0)
1281 goto unmap;
1282
1283 return (fdm);
1284
1285 unmap:
1286 bus_dmamem_unmap(sc->sc_dmat, fdm->fdm_kva, size);
1287 free:
1288 bus_dmamem_free(sc->sc_dmat, &fdm->fdm_seg, 1);
1289 destroy:
1290 bus_dmamap_destroy(sc->sc_dmat, fdm->fdm_map);
1291 fdmfree:
1292 free(fdm, M_DEVBUF, sizeof(*fdm));
1293
1294 return (NULL);
1295 }
1296
1297 void
fec_dmamem_free(struct fec_softc * sc,struct fec_dmamem * fdm)1298 fec_dmamem_free(struct fec_softc *sc, struct fec_dmamem *fdm)
1299 {
1300 bus_dmamem_unmap(sc->sc_dmat, fdm->fdm_kva, fdm->fdm_size);
1301 bus_dmamem_free(sc->sc_dmat, &fdm->fdm_seg, 1);
1302 bus_dmamap_destroy(sc->sc_dmat, fdm->fdm_map);
1303 free(fdm, M_DEVBUF, sizeof(*fdm));
1304 }
1305
1306 struct mbuf *
fec_alloc_mbuf(struct fec_softc * sc,bus_dmamap_t map)1307 fec_alloc_mbuf(struct fec_softc *sc, bus_dmamap_t map)
1308 {
1309 struct mbuf *m = NULL;
1310
1311 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1312 if (!m)
1313 return (NULL);
1314 m->m_len = m->m_pkthdr.len = MCLBYTES;
1315
1316 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1317 printf("%s: could not load mbuf DMA map",
1318 sc->sc_dev.dv_xname);
1319 m_freem(m);
1320 return (NULL);
1321 }
1322
1323 bus_dmamap_sync(sc->sc_dmat, map, 0,
1324 m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1325
1326 return (m);
1327 }
1328
1329 void
fec_fill_rx_ring(struct fec_softc * sc)1330 fec_fill_rx_ring(struct fec_softc *sc)
1331 {
1332 struct fec_desc *rxd;
1333 struct fec_buf *rxb;
1334 u_int slots;
1335
1336 for (slots = if_rxr_get(&sc->sc_rx_ring, ENET_NRXDESC);
1337 slots > 0; slots--) {
1338 rxb = &sc->sc_rxbuf[sc->sc_rx_prod];
1339 rxb->fb_m = fec_alloc_mbuf(sc, rxb->fb_map);
1340 if (rxb->fb_m == NULL)
1341 break;
1342 rxd = &sc->sc_rxdesc[sc->sc_rx_prod];
1343 rxd->fd_len = rxb->fb_map->dm_segs[0].ds_len - 1;
1344 rxd->fd_addr = rxb->fb_map->dm_segs[0].ds_addr;
1345 rxd->fd_status &= ENET_RXD_WRAP;
1346 rxd->fd_status |= ENET_RXD_EMPTY;
1347
1348 if (sc->sc_rx_prod == (ENET_NRXDESC - 1))
1349 sc->sc_rx_prod = 0;
1350 else
1351 sc->sc_rx_prod++;
1352 }
1353 if_rxr_put(&sc->sc_rx_ring, slots);
1354 }
1355