1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_arp.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_llc.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56 #include <net/if_vlan_var.h>
57
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
63
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66
67 #include <machine/bus.h>
68
69 #include <dev/vte/if_vtereg.h>
70 #include <dev/vte/if_vtevar.h>
71
72 /* "device miibus" required. See GENERIC if you get errors here. */
73 #include "miibus_if.h"
74
75 MODULE_DEPEND(vte, pci, 1, 1, 1);
76 MODULE_DEPEND(vte, ether, 1, 1, 1);
77 MODULE_DEPEND(vte, miibus, 1, 1, 1);
78
79 /* Tunables. */
80 static int tx_deep_copy = 1;
81 TUNABLE_INT("hw.vte.tx_deep_copy", &tx_deep_copy);
82
83 /*
84 * Devices supported by this driver.
85 */
86 static const struct vte_ident vte_ident_table[] = {
87 { VENDORID_RDC, DEVICEID_RDC_R6040, "RDC R6040 FastEthernet"},
88 { 0, 0, NULL}
89 };
90
91 static int vte_attach(device_t);
92 static int vte_detach(device_t);
93 static int vte_dma_alloc(struct vte_softc *);
94 static void vte_dma_free(struct vte_softc *);
95 static void vte_dmamap_cb(void *, bus_dma_segment_t *, int, int);
96 static struct vte_txdesc *
97 vte_encap(struct vte_softc *, struct mbuf **);
98 static const struct vte_ident *
99 vte_find_ident(device_t);
100 #ifndef __NO_STRICT_ALIGNMENT
101 static struct mbuf *
102 vte_fixup_rx(if_t, struct mbuf *);
103 #endif
104 static void vte_get_macaddr(struct vte_softc *);
105 static void vte_init(void *);
106 static void vte_init_locked(struct vte_softc *);
107 static int vte_init_rx_ring(struct vte_softc *);
108 static int vte_init_tx_ring(struct vte_softc *);
109 static void vte_intr(void *);
110 static int vte_ioctl(if_t, u_long, caddr_t);
111 static uint64_t vte_get_counter(if_t, ift_counter);
112 static void vte_mac_config(struct vte_softc *);
113 static int vte_miibus_readreg(device_t, int, int);
114 static void vte_miibus_statchg(device_t);
115 static int vte_miibus_writereg(device_t, int, int, int);
116 static int vte_mediachange(if_t);
117 static int vte_mediachange_locked(if_t);
118 static void vte_mediastatus(if_t, struct ifmediareq *);
119 static int vte_newbuf(struct vte_softc *, struct vte_rxdesc *);
120 static int vte_probe(device_t);
121 static void vte_reset(struct vte_softc *);
122 static int vte_resume(device_t);
123 static void vte_rxeof(struct vte_softc *);
124 static void vte_rxfilter(struct vte_softc *);
125 static int vte_shutdown(device_t);
126 static void vte_start(if_t);
127 static void vte_start_locked(struct vte_softc *);
128 static void vte_start_mac(struct vte_softc *);
129 static void vte_stats_clear(struct vte_softc *);
130 static void vte_stats_update(struct vte_softc *);
131 static void vte_stop(struct vte_softc *);
132 static void vte_stop_mac(struct vte_softc *);
133 static int vte_suspend(device_t);
134 static void vte_sysctl_node(struct vte_softc *);
135 static void vte_tick(void *);
136 static void vte_txeof(struct vte_softc *);
137 static void vte_watchdog(struct vte_softc *);
138 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
139 static int sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS);
140
141 static device_method_t vte_methods[] = {
142 /* Device interface. */
143 DEVMETHOD(device_probe, vte_probe),
144 DEVMETHOD(device_attach, vte_attach),
145 DEVMETHOD(device_detach, vte_detach),
146 DEVMETHOD(device_shutdown, vte_shutdown),
147 DEVMETHOD(device_suspend, vte_suspend),
148 DEVMETHOD(device_resume, vte_resume),
149
150 /* MII interface. */
151 DEVMETHOD(miibus_readreg, vte_miibus_readreg),
152 DEVMETHOD(miibus_writereg, vte_miibus_writereg),
153 DEVMETHOD(miibus_statchg, vte_miibus_statchg),
154
155 DEVMETHOD_END
156 };
157
158 static driver_t vte_driver = {
159 "vte",
160 vte_methods,
161 sizeof(struct vte_softc)
162 };
163
164 DRIVER_MODULE(vte, pci, vte_driver, 0, 0);
165 DRIVER_MODULE(miibus, vte, miibus_driver, 0, 0);
166
167 static int
vte_miibus_readreg(device_t dev,int phy,int reg)168 vte_miibus_readreg(device_t dev, int phy, int reg)
169 {
170 struct vte_softc *sc;
171 int i;
172
173 sc = device_get_softc(dev);
174
175 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ |
176 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
177 for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
178 DELAY(5);
179 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0)
180 break;
181 }
182
183 if (i == 0) {
184 device_printf(sc->vte_dev, "phy read timeout : %d\n", reg);
185 return (0);
186 }
187
188 return (CSR_READ_2(sc, VTE_MMRD));
189 }
190
191 static int
vte_miibus_writereg(device_t dev,int phy,int reg,int val)192 vte_miibus_writereg(device_t dev, int phy, int reg, int val)
193 {
194 struct vte_softc *sc;
195 int i;
196
197 sc = device_get_softc(dev);
198
199 CSR_WRITE_2(sc, VTE_MMWD, val);
200 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE |
201 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT));
202 for (i = VTE_PHY_TIMEOUT; i > 0; i--) {
203 DELAY(5);
204 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0)
205 break;
206 }
207
208 if (i == 0)
209 device_printf(sc->vte_dev, "phy write timeout : %d\n", reg);
210
211 return (0);
212 }
213
214 static void
vte_miibus_statchg(device_t dev)215 vte_miibus_statchg(device_t dev)
216 {
217 struct vte_softc *sc;
218 struct mii_data *mii;
219 if_t ifp;
220 uint16_t val;
221
222 sc = device_get_softc(dev);
223
224 mii = device_get_softc(sc->vte_miibus);
225 ifp = sc->vte_ifp;
226 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
227 return;
228
229 sc->vte_flags &= ~VTE_FLAG_LINK;
230 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
231 (IFM_ACTIVE | IFM_AVALID)) {
232 switch (IFM_SUBTYPE(mii->mii_media_active)) {
233 case IFM_10_T:
234 case IFM_100_TX:
235 sc->vte_flags |= VTE_FLAG_LINK;
236 break;
237 default:
238 break;
239 }
240 }
241
242 /* Stop RX/TX MACs. */
243 vte_stop_mac(sc);
244 /* Program MACs with resolved duplex and flow control. */
245 if ((sc->vte_flags & VTE_FLAG_LINK) != 0) {
246 /*
247 * Timer waiting time : (63 + TIMER * 64) MII clock.
248 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps).
249 */
250 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
251 val = 18 << VTE_IM_TIMER_SHIFT;
252 else
253 val = 1 << VTE_IM_TIMER_SHIFT;
254 val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT;
255 /* 48.6us for 100Mbps, 50.8us for 10Mbps */
256 CSR_WRITE_2(sc, VTE_MRICR, val);
257
258 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
259 val = 18 << VTE_IM_TIMER_SHIFT;
260 else
261 val = 1 << VTE_IM_TIMER_SHIFT;
262 val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT;
263 /* 48.6us for 100Mbps, 50.8us for 10Mbps */
264 CSR_WRITE_2(sc, VTE_MTICR, val);
265
266 vte_mac_config(sc);
267 vte_start_mac(sc);
268 }
269 }
270
271 static void
vte_mediastatus(if_t ifp,struct ifmediareq * ifmr)272 vte_mediastatus(if_t ifp, struct ifmediareq *ifmr)
273 {
274 struct vte_softc *sc;
275 struct mii_data *mii;
276
277 sc = if_getsoftc(ifp);
278 VTE_LOCK(sc);
279 if ((if_getflags(ifp) & IFF_UP) == 0) {
280 VTE_UNLOCK(sc);
281 return;
282 }
283 mii = device_get_softc(sc->vte_miibus);
284
285 mii_pollstat(mii);
286 ifmr->ifm_status = mii->mii_media_status;
287 ifmr->ifm_active = mii->mii_media_active;
288 VTE_UNLOCK(sc);
289 }
290
291 static int
vte_mediachange(if_t ifp)292 vte_mediachange(if_t ifp)
293 {
294 struct vte_softc *sc;
295 int error;
296
297 sc = if_getsoftc(ifp);
298 VTE_LOCK(sc);
299 error = vte_mediachange_locked(ifp);
300 VTE_UNLOCK(sc);
301 return (error);
302 }
303
304 static int
vte_mediachange_locked(if_t ifp)305 vte_mediachange_locked(if_t ifp)
306 {
307 struct vte_softc *sc;
308 struct mii_data *mii;
309 struct mii_softc *miisc;
310 int error;
311
312 sc = if_getsoftc(ifp);
313 mii = device_get_softc(sc->vte_miibus);
314 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
315 PHY_RESET(miisc);
316 error = mii_mediachg(mii);
317
318 return (error);
319 }
320
321 static const struct vte_ident *
vte_find_ident(device_t dev)322 vte_find_ident(device_t dev)
323 {
324 const struct vte_ident *ident;
325 uint16_t vendor, devid;
326
327 vendor = pci_get_vendor(dev);
328 devid = pci_get_device(dev);
329 for (ident = vte_ident_table; ident->name != NULL; ident++) {
330 if (vendor == ident->vendorid && devid == ident->deviceid)
331 return (ident);
332 }
333
334 return (NULL);
335 }
336
337 static int
vte_probe(device_t dev)338 vte_probe(device_t dev)
339 {
340 const struct vte_ident *ident;
341
342 ident = vte_find_ident(dev);
343 if (ident != NULL) {
344 device_set_desc(dev, ident->name);
345 return (BUS_PROBE_DEFAULT);
346 }
347
348 return (ENXIO);
349 }
350
351 static void
vte_get_macaddr(struct vte_softc * sc)352 vte_get_macaddr(struct vte_softc *sc)
353 {
354 uint16_t mid;
355
356 /*
357 * It seems there is no way to reload station address and
358 * it is supposed to be set by BIOS.
359 */
360 mid = CSR_READ_2(sc, VTE_MID0L);
361 sc->vte_eaddr[0] = (mid >> 0) & 0xFF;
362 sc->vte_eaddr[1] = (mid >> 8) & 0xFF;
363 mid = CSR_READ_2(sc, VTE_MID0M);
364 sc->vte_eaddr[2] = (mid >> 0) & 0xFF;
365 sc->vte_eaddr[3] = (mid >> 8) & 0xFF;
366 mid = CSR_READ_2(sc, VTE_MID0H);
367 sc->vte_eaddr[4] = (mid >> 0) & 0xFF;
368 sc->vte_eaddr[5] = (mid >> 8) & 0xFF;
369 }
370
371 static int
vte_attach(device_t dev)372 vte_attach(device_t dev)
373 {
374 struct vte_softc *sc;
375 if_t ifp;
376 uint16_t macid;
377 int error, rid;
378
379 error = 0;
380 sc = device_get_softc(dev);
381 sc->vte_dev = dev;
382
383 mtx_init(&sc->vte_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
384 MTX_DEF);
385 callout_init_mtx(&sc->vte_tick_ch, &sc->vte_mtx, 0);
386 sc->vte_ident = vte_find_ident(dev);
387
388 /* Map the device. */
389 pci_enable_busmaster(dev);
390 sc->vte_res_id = PCIR_BAR(1);
391 sc->vte_res_type = SYS_RES_MEMORY;
392 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
393 &sc->vte_res_id, RF_ACTIVE);
394 if (sc->vte_res == NULL) {
395 sc->vte_res_id = PCIR_BAR(0);
396 sc->vte_res_type = SYS_RES_IOPORT;
397 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type,
398 &sc->vte_res_id, RF_ACTIVE);
399 if (sc->vte_res == NULL) {
400 device_printf(dev, "cannot map memory/ports.\n");
401 mtx_destroy(&sc->vte_mtx);
402 return (ENXIO);
403 }
404 }
405 if (bootverbose) {
406 device_printf(dev, "using %s space register mapping\n",
407 sc->vte_res_type == SYS_RES_MEMORY ? "memory" : "I/O");
408 device_printf(dev, "MAC Identifier : 0x%04x\n",
409 CSR_READ_2(sc, VTE_MACID));
410 macid = CSR_READ_2(sc, VTE_MACID_REV);
411 device_printf(dev, "MAC Id. 0x%02x, Rev. 0x%02x\n",
412 (macid & VTE_MACID_MASK) >> VTE_MACID_SHIFT,
413 (macid & VTE_MACID_REV_MASK) >> VTE_MACID_REV_SHIFT);
414 }
415
416 rid = 0;
417 sc->vte_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
418 RF_SHAREABLE | RF_ACTIVE);
419 if (sc->vte_irq == NULL) {
420 device_printf(dev, "cannot allocate IRQ resources.\n");
421 error = ENXIO;
422 goto fail;
423 }
424
425 /* Reset the ethernet controller. */
426 vte_reset(sc);
427
428 if ((error = vte_dma_alloc(sc)) != 0)
429 goto fail;
430
431 /* Create device sysctl node. */
432 vte_sysctl_node(sc);
433
434 /* Load station address. */
435 vte_get_macaddr(sc);
436
437 ifp = sc->vte_ifp = if_alloc(IFT_ETHER);
438 if (ifp == NULL) {
439 device_printf(dev, "cannot allocate ifnet structure.\n");
440 error = ENXIO;
441 goto fail;
442 }
443
444 if_setsoftc(ifp, sc);
445 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
446 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
447 if_setioctlfn(ifp, vte_ioctl);
448 if_setstartfn(ifp, vte_start);
449 if_setinitfn(ifp, vte_init);
450 if_setgetcounterfn(ifp, vte_get_counter);
451 if_setsendqlen(ifp, VTE_TX_RING_CNT - 1);
452 if_setsendqready(ifp);
453
454 /*
455 * Set up MII bus.
456 * BIOS would have initialized VTE_MPSCCR to catch PHY
457 * status changes so driver may be able to extract
458 * configured PHY address. Since it's common to see BIOS
459 * fails to initialize the register(including the sample
460 * board I have), let mii(4) probe it. This is more
461 * reliable than relying on BIOS's initialization.
462 *
463 * Advertising flow control capability to mii(4) was
464 * intentionally disabled due to severe problems in TX
465 * pause frame generation. See vte_rxeof() for more
466 * details.
467 */
468 error = mii_attach(dev, &sc->vte_miibus, ifp, vte_mediachange,
469 vte_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
470 if (error != 0) {
471 device_printf(dev, "attaching PHYs failed\n");
472 goto fail;
473 }
474
475 ether_ifattach(ifp, sc->vte_eaddr);
476
477 /* VLAN capability setup. */
478 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
479 if_setcapenable(ifp, if_getcapabilities(ifp));
480 /* Tell the upper layer we support VLAN over-sized frames. */
481 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
482
483 error = bus_setup_intr(dev, sc->vte_irq, INTR_TYPE_NET | INTR_MPSAFE,
484 NULL, vte_intr, sc, &sc->vte_intrhand);
485 if (error != 0) {
486 device_printf(dev, "could not set up interrupt handler.\n");
487 ether_ifdetach(ifp);
488 goto fail;
489 }
490
491 fail:
492 if (error != 0)
493 vte_detach(dev);
494
495 return (error);
496 }
497
498 static int
vte_detach(device_t dev)499 vte_detach(device_t dev)
500 {
501 struct vte_softc *sc;
502 if_t ifp;
503
504 sc = device_get_softc(dev);
505
506 ifp = sc->vte_ifp;
507 if (device_is_attached(dev)) {
508 VTE_LOCK(sc);
509 vte_stop(sc);
510 VTE_UNLOCK(sc);
511 callout_drain(&sc->vte_tick_ch);
512 ether_ifdetach(ifp);
513 }
514
515 if (sc->vte_miibus != NULL) {
516 device_delete_child(dev, sc->vte_miibus);
517 sc->vte_miibus = NULL;
518 }
519 bus_generic_detach(dev);
520
521 if (sc->vte_intrhand != NULL) {
522 bus_teardown_intr(dev, sc->vte_irq, sc->vte_intrhand);
523 sc->vte_intrhand = NULL;
524 }
525 if (sc->vte_irq != NULL) {
526 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vte_irq);
527 sc->vte_irq = NULL;
528 }
529 if (sc->vte_res != NULL) {
530 bus_release_resource(dev, sc->vte_res_type, sc->vte_res_id,
531 sc->vte_res);
532 sc->vte_res = NULL;
533 }
534 if (ifp != NULL) {
535 if_free(ifp);
536 sc->vte_ifp = NULL;
537 }
538 vte_dma_free(sc);
539 mtx_destroy(&sc->vte_mtx);
540
541 return (0);
542 }
543
544 #define VTE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
545 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
546
547 static void
vte_sysctl_node(struct vte_softc * sc)548 vte_sysctl_node(struct vte_softc *sc)
549 {
550 struct sysctl_ctx_list *ctx;
551 struct sysctl_oid_list *child, *parent;
552 struct sysctl_oid *tree;
553 struct vte_hw_stats *stats;
554 int error;
555
556 stats = &sc->vte_stats;
557 ctx = device_get_sysctl_ctx(sc->vte_dev);
558 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vte_dev));
559
560 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
561 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
562 &sc->vte_int_rx_mod, 0, sysctl_hw_vte_int_mod, "I",
563 "vte RX interrupt moderation");
564 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
565 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
566 &sc->vte_int_tx_mod, 0, sysctl_hw_vte_int_mod, "I",
567 "vte TX interrupt moderation");
568 /* Pull in device tunables. */
569 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
570 error = resource_int_value(device_get_name(sc->vte_dev),
571 device_get_unit(sc->vte_dev), "int_rx_mod", &sc->vte_int_rx_mod);
572 if (error == 0) {
573 if (sc->vte_int_rx_mod < VTE_IM_BUNDLE_MIN ||
574 sc->vte_int_rx_mod > VTE_IM_BUNDLE_MAX) {
575 device_printf(sc->vte_dev, "int_rx_mod value out of "
576 "range; using default: %d\n",
577 VTE_IM_RX_BUNDLE_DEFAULT);
578 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT;
579 }
580 }
581
582 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
583 error = resource_int_value(device_get_name(sc->vte_dev),
584 device_get_unit(sc->vte_dev), "int_tx_mod", &sc->vte_int_tx_mod);
585 if (error == 0) {
586 if (sc->vte_int_tx_mod < VTE_IM_BUNDLE_MIN ||
587 sc->vte_int_tx_mod > VTE_IM_BUNDLE_MAX) {
588 device_printf(sc->vte_dev, "int_tx_mod value out of "
589 "range; using default: %d\n",
590 VTE_IM_TX_BUNDLE_DEFAULT);
591 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT;
592 }
593 }
594
595 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
596 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "VTE statistics");
597 parent = SYSCTL_CHILDREN(tree);
598
599 /* RX statistics. */
600 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
601 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics");
602 child = SYSCTL_CHILDREN(tree);
603 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
604 &stats->rx_frames, "Good frames");
605 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
606 &stats->rx_bcast_frames, "Good broadcast frames");
607 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
608 &stats->rx_mcast_frames, "Good multicast frames");
609 VTE_SYSCTL_STAT_ADD32(ctx, child, "runt",
610 &stats->rx_runts, "Too short frames");
611 VTE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
612 &stats->rx_crcerrs, "CRC errors");
613 VTE_SYSCTL_STAT_ADD32(ctx, child, "long_frames",
614 &stats->rx_long_frames,
615 "Frames that have longer length than maximum packet length");
616 VTE_SYSCTL_STAT_ADD32(ctx, child, "fifo_full",
617 &stats->rx_fifo_full, "FIFO full");
618 VTE_SYSCTL_STAT_ADD32(ctx, child, "desc_unavail",
619 &stats->rx_desc_unavail, "Descriptor unavailable frames");
620 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
621 &stats->rx_pause_frames, "Pause control frames");
622
623 /* TX statistics. */
624 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
625 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics");
626 child = SYSCTL_CHILDREN(tree);
627 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
628 &stats->tx_frames, "Good frames");
629 VTE_SYSCTL_STAT_ADD32(ctx, child, "underruns",
630 &stats->tx_underruns, "FIFO underruns");
631 VTE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
632 &stats->tx_late_colls, "Late collisions");
633 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
634 &stats->tx_pause_frames, "Pause control frames");
635 }
636
637 #undef VTE_SYSCTL_STAT_ADD32
638
639 struct vte_dmamap_arg {
640 bus_addr_t vte_busaddr;
641 };
642
643 static void
vte_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)644 vte_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
645 {
646 struct vte_dmamap_arg *ctx;
647
648 if (error != 0)
649 return;
650
651 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
652
653 ctx = (struct vte_dmamap_arg *)arg;
654 ctx->vte_busaddr = segs[0].ds_addr;
655 }
656
657 static int
vte_dma_alloc(struct vte_softc * sc)658 vte_dma_alloc(struct vte_softc *sc)
659 {
660 struct vte_txdesc *txd;
661 struct vte_rxdesc *rxd;
662 struct vte_dmamap_arg ctx;
663 int error, i;
664
665 /* Create parent DMA tag. */
666 error = bus_dma_tag_create(
667 bus_get_dma_tag(sc->vte_dev), /* parent */
668 1, 0, /* alignment, boundary */
669 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
670 BUS_SPACE_MAXADDR, /* highaddr */
671 NULL, NULL, /* filter, filterarg */
672 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
673 0, /* nsegments */
674 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
675 0, /* flags */
676 NULL, NULL, /* lockfunc, lockarg */
677 &sc->vte_cdata.vte_parent_tag);
678 if (error != 0) {
679 device_printf(sc->vte_dev,
680 "could not create parent DMA tag.\n");
681 goto fail;
682 }
683
684 /* Create DMA tag for TX descriptor ring. */
685 error = bus_dma_tag_create(
686 sc->vte_cdata.vte_parent_tag, /* parent */
687 VTE_TX_RING_ALIGN, 0, /* alignment, boundary */
688 BUS_SPACE_MAXADDR, /* lowaddr */
689 BUS_SPACE_MAXADDR, /* highaddr */
690 NULL, NULL, /* filter, filterarg */
691 VTE_TX_RING_SZ, /* maxsize */
692 1, /* nsegments */
693 VTE_TX_RING_SZ, /* maxsegsize */
694 0, /* flags */
695 NULL, NULL, /* lockfunc, lockarg */
696 &sc->vte_cdata.vte_tx_ring_tag);
697 if (error != 0) {
698 device_printf(sc->vte_dev,
699 "could not create TX ring DMA tag.\n");
700 goto fail;
701 }
702
703 /* Create DMA tag for RX free descriptor ring. */
704 error = bus_dma_tag_create(
705 sc->vte_cdata.vte_parent_tag, /* parent */
706 VTE_RX_RING_ALIGN, 0, /* alignment, boundary */
707 BUS_SPACE_MAXADDR, /* lowaddr */
708 BUS_SPACE_MAXADDR, /* highaddr */
709 NULL, NULL, /* filter, filterarg */
710 VTE_RX_RING_SZ, /* maxsize */
711 1, /* nsegments */
712 VTE_RX_RING_SZ, /* maxsegsize */
713 0, /* flags */
714 NULL, NULL, /* lockfunc, lockarg */
715 &sc->vte_cdata.vte_rx_ring_tag);
716 if (error != 0) {
717 device_printf(sc->vte_dev,
718 "could not create RX ring DMA tag.\n");
719 goto fail;
720 }
721
722 /* Allocate DMA'able memory and load the DMA map for TX ring. */
723 error = bus_dmamem_alloc(sc->vte_cdata.vte_tx_ring_tag,
724 (void **)&sc->vte_cdata.vte_tx_ring,
725 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
726 &sc->vte_cdata.vte_tx_ring_map);
727 if (error != 0) {
728 device_printf(sc->vte_dev,
729 "could not allocate DMA'able memory for TX ring.\n");
730 goto fail;
731 }
732 ctx.vte_busaddr = 0;
733 error = bus_dmamap_load(sc->vte_cdata.vte_tx_ring_tag,
734 sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring,
735 VTE_TX_RING_SZ, vte_dmamap_cb, &ctx, 0);
736 if (error != 0 || ctx.vte_busaddr == 0) {
737 device_printf(sc->vte_dev,
738 "could not load DMA'able memory for TX ring.\n");
739 goto fail;
740 }
741 sc->vte_cdata.vte_tx_ring_paddr = ctx.vte_busaddr;
742
743 /* Allocate DMA'able memory and load the DMA map for RX ring. */
744 error = bus_dmamem_alloc(sc->vte_cdata.vte_rx_ring_tag,
745 (void **)&sc->vte_cdata.vte_rx_ring,
746 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
747 &sc->vte_cdata.vte_rx_ring_map);
748 if (error != 0) {
749 device_printf(sc->vte_dev,
750 "could not allocate DMA'able memory for RX ring.\n");
751 goto fail;
752 }
753 ctx.vte_busaddr = 0;
754 error = bus_dmamap_load(sc->vte_cdata.vte_rx_ring_tag,
755 sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring,
756 VTE_RX_RING_SZ, vte_dmamap_cb, &ctx, 0);
757 if (error != 0 || ctx.vte_busaddr == 0) {
758 device_printf(sc->vte_dev,
759 "could not load DMA'able memory for RX ring.\n");
760 goto fail;
761 }
762 sc->vte_cdata.vte_rx_ring_paddr = ctx.vte_busaddr;
763
764 /* Create TX buffer parent tag. */
765 error = bus_dma_tag_create(
766 bus_get_dma_tag(sc->vte_dev), /* parent */
767 1, 0, /* alignment, boundary */
768 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
769 BUS_SPACE_MAXADDR, /* highaddr */
770 NULL, NULL, /* filter, filterarg */
771 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
772 0, /* nsegments */
773 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
774 0, /* flags */
775 NULL, NULL, /* lockfunc, lockarg */
776 &sc->vte_cdata.vte_buffer_tag);
777 if (error != 0) {
778 device_printf(sc->vte_dev,
779 "could not create parent buffer DMA tag.\n");
780 goto fail;
781 }
782
783 /* Create DMA tag for TX buffers. */
784 error = bus_dma_tag_create(
785 sc->vte_cdata.vte_buffer_tag, /* parent */
786 1, 0, /* alignment, boundary */
787 BUS_SPACE_MAXADDR, /* lowaddr */
788 BUS_SPACE_MAXADDR, /* highaddr */
789 NULL, NULL, /* filter, filterarg */
790 MCLBYTES, /* maxsize */
791 1, /* nsegments */
792 MCLBYTES, /* maxsegsize */
793 0, /* flags */
794 NULL, NULL, /* lockfunc, lockarg */
795 &sc->vte_cdata.vte_tx_tag);
796 if (error != 0) {
797 device_printf(sc->vte_dev, "could not create TX DMA tag.\n");
798 goto fail;
799 }
800
801 /* Create DMA tag for RX buffers. */
802 error = bus_dma_tag_create(
803 sc->vte_cdata.vte_buffer_tag, /* parent */
804 VTE_RX_BUF_ALIGN, 0, /* alignment, boundary */
805 BUS_SPACE_MAXADDR, /* lowaddr */
806 BUS_SPACE_MAXADDR, /* highaddr */
807 NULL, NULL, /* filter, filterarg */
808 MCLBYTES, /* maxsize */
809 1, /* nsegments */
810 MCLBYTES, /* maxsegsize */
811 0, /* flags */
812 NULL, NULL, /* lockfunc, lockarg */
813 &sc->vte_cdata.vte_rx_tag);
814 if (error != 0) {
815 device_printf(sc->vte_dev, "could not create RX DMA tag.\n");
816 goto fail;
817 }
818 /* Create DMA maps for TX buffers. */
819 for (i = 0; i < VTE_TX_RING_CNT; i++) {
820 txd = &sc->vte_cdata.vte_txdesc[i];
821 txd->tx_m = NULL;
822 txd->tx_dmamap = NULL;
823 error = bus_dmamap_create(sc->vte_cdata.vte_tx_tag, 0,
824 &txd->tx_dmamap);
825 if (error != 0) {
826 device_printf(sc->vte_dev,
827 "could not create TX dmamap.\n");
828 goto fail;
829 }
830 }
831 /* Create DMA maps for RX buffers. */
832 if ((error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
833 &sc->vte_cdata.vte_rx_sparemap)) != 0) {
834 device_printf(sc->vte_dev,
835 "could not create spare RX dmamap.\n");
836 goto fail;
837 }
838 for (i = 0; i < VTE_RX_RING_CNT; i++) {
839 rxd = &sc->vte_cdata.vte_rxdesc[i];
840 rxd->rx_m = NULL;
841 rxd->rx_dmamap = NULL;
842 error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0,
843 &rxd->rx_dmamap);
844 if (error != 0) {
845 device_printf(sc->vte_dev,
846 "could not create RX dmamap.\n");
847 goto fail;
848 }
849 }
850
851 fail:
852 return (error);
853 }
854
855 static void
vte_dma_free(struct vte_softc * sc)856 vte_dma_free(struct vte_softc *sc)
857 {
858 struct vte_txdesc *txd;
859 struct vte_rxdesc *rxd;
860 int i;
861
862 /* TX buffers. */
863 if (sc->vte_cdata.vte_tx_tag != NULL) {
864 for (i = 0; i < VTE_TX_RING_CNT; i++) {
865 txd = &sc->vte_cdata.vte_txdesc[i];
866 if (txd->tx_dmamap != NULL) {
867 bus_dmamap_destroy(sc->vte_cdata.vte_tx_tag,
868 txd->tx_dmamap);
869 txd->tx_dmamap = NULL;
870 }
871 }
872 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_tag);
873 sc->vte_cdata.vte_tx_tag = NULL;
874 }
875 /* RX buffers */
876 if (sc->vte_cdata.vte_rx_tag != NULL) {
877 for (i = 0; i < VTE_RX_RING_CNT; i++) {
878 rxd = &sc->vte_cdata.vte_rxdesc[i];
879 if (rxd->rx_dmamap != NULL) {
880 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
881 rxd->rx_dmamap);
882 rxd->rx_dmamap = NULL;
883 }
884 }
885 if (sc->vte_cdata.vte_rx_sparemap != NULL) {
886 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag,
887 sc->vte_cdata.vte_rx_sparemap);
888 sc->vte_cdata.vte_rx_sparemap = NULL;
889 }
890 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_tag);
891 sc->vte_cdata.vte_rx_tag = NULL;
892 }
893 /* TX descriptor ring. */
894 if (sc->vte_cdata.vte_tx_ring_tag != NULL) {
895 if (sc->vte_cdata.vte_tx_ring_paddr != 0)
896 bus_dmamap_unload(sc->vte_cdata.vte_tx_ring_tag,
897 sc->vte_cdata.vte_tx_ring_map);
898 if (sc->vte_cdata.vte_tx_ring != NULL)
899 bus_dmamem_free(sc->vte_cdata.vte_tx_ring_tag,
900 sc->vte_cdata.vte_tx_ring,
901 sc->vte_cdata.vte_tx_ring_map);
902 sc->vte_cdata.vte_tx_ring = NULL;
903 sc->vte_cdata.vte_tx_ring_paddr = 0;
904 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_ring_tag);
905 sc->vte_cdata.vte_tx_ring_tag = NULL;
906 }
907 /* RX ring. */
908 if (sc->vte_cdata.vte_rx_ring_tag != NULL) {
909 if (sc->vte_cdata.vte_rx_ring_paddr != 0)
910 bus_dmamap_unload(sc->vte_cdata.vte_rx_ring_tag,
911 sc->vte_cdata.vte_rx_ring_map);
912 if (sc->vte_cdata.vte_rx_ring != NULL)
913 bus_dmamem_free(sc->vte_cdata.vte_rx_ring_tag,
914 sc->vte_cdata.vte_rx_ring,
915 sc->vte_cdata.vte_rx_ring_map);
916 sc->vte_cdata.vte_rx_ring = NULL;
917 sc->vte_cdata.vte_rx_ring_paddr = 0;
918 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_ring_tag);
919 sc->vte_cdata.vte_rx_ring_tag = NULL;
920 }
921 if (sc->vte_cdata.vte_buffer_tag != NULL) {
922 bus_dma_tag_destroy(sc->vte_cdata.vte_buffer_tag);
923 sc->vte_cdata.vte_buffer_tag = NULL;
924 }
925 if (sc->vte_cdata.vte_parent_tag != NULL) {
926 bus_dma_tag_destroy(sc->vte_cdata.vte_parent_tag);
927 sc->vte_cdata.vte_parent_tag = NULL;
928 }
929 }
930
931 static int
vte_shutdown(device_t dev)932 vte_shutdown(device_t dev)
933 {
934
935 return (vte_suspend(dev));
936 }
937
938 static int
vte_suspend(device_t dev)939 vte_suspend(device_t dev)
940 {
941 struct vte_softc *sc;
942 if_t ifp;
943
944 sc = device_get_softc(dev);
945
946 VTE_LOCK(sc);
947 ifp = sc->vte_ifp;
948 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
949 vte_stop(sc);
950 VTE_UNLOCK(sc);
951
952 return (0);
953 }
954
955 static int
vte_resume(device_t dev)956 vte_resume(device_t dev)
957 {
958 struct vte_softc *sc;
959 if_t ifp;
960
961 sc = device_get_softc(dev);
962
963 VTE_LOCK(sc);
964 ifp = sc->vte_ifp;
965 if ((if_getflags(ifp) & IFF_UP) != 0) {
966 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
967 vte_init_locked(sc);
968 }
969 VTE_UNLOCK(sc);
970
971 return (0);
972 }
973
974 static struct vte_txdesc *
vte_encap(struct vte_softc * sc,struct mbuf ** m_head)975 vte_encap(struct vte_softc *sc, struct mbuf **m_head)
976 {
977 struct vte_txdesc *txd;
978 struct mbuf *m, *n;
979 bus_dma_segment_t txsegs[1];
980 int copy, error, nsegs, padlen;
981
982 VTE_LOCK_ASSERT(sc);
983
984 M_ASSERTPKTHDR((*m_head));
985
986 txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod];
987 m = *m_head;
988 /*
989 * Controller doesn't auto-pad, so we have to make sure pad
990 * short frames out to the minimum frame length.
991 */
992 if (m->m_pkthdr.len < VTE_MIN_FRAMELEN)
993 padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len;
994 else
995 padlen = 0;
996
997 /*
998 * Controller does not support multi-fragmented TX buffers.
999 * Controller spends most of its TX processing time in
1000 * de-fragmenting TX buffers. Either faster CPU or more
1001 * advanced controller DMA engine is required to speed up
1002 * TX path processing.
1003 * To mitigate the de-fragmenting issue, perform deep copy
1004 * from fragmented mbuf chains to a pre-allocated mbuf
1005 * cluster with extra cost of kernel memory. For frames
1006 * that is composed of single TX buffer, the deep copy is
1007 * bypassed.
1008 */
1009 if (tx_deep_copy != 0) {
1010 copy = 0;
1011 if (m->m_next != NULL)
1012 copy++;
1013 if (padlen > 0 && (M_WRITABLE(m) == 0 ||
1014 padlen > M_TRAILINGSPACE(m)))
1015 copy++;
1016 if (copy != 0) {
1017 /* Avoid expensive m_defrag(9) and do deep copy. */
1018 n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod];
1019 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *));
1020 n->m_pkthdr.len = m->m_pkthdr.len;
1021 n->m_len = m->m_pkthdr.len;
1022 m = n;
1023 txd->tx_flags |= VTE_TXMBUF;
1024 }
1025
1026 if (padlen > 0) {
1027 /* Zero out the bytes in the pad area. */
1028 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1029 m->m_pkthdr.len += padlen;
1030 m->m_len = m->m_pkthdr.len;
1031 }
1032 } else {
1033 if (M_WRITABLE(m) == 0) {
1034 if (m->m_next != NULL || padlen > 0) {
1035 /* Get a writable copy. */
1036 m = m_dup(*m_head, M_NOWAIT);
1037 /* Release original mbuf chains. */
1038 m_freem(*m_head);
1039 if (m == NULL) {
1040 *m_head = NULL;
1041 return (NULL);
1042 }
1043 *m_head = m;
1044 }
1045 }
1046
1047 if (m->m_next != NULL) {
1048 m = m_defrag(*m_head, M_NOWAIT);
1049 if (m == NULL) {
1050 m_freem(*m_head);
1051 *m_head = NULL;
1052 return (NULL);
1053 }
1054 *m_head = m;
1055 }
1056
1057 if (padlen > 0) {
1058 if (M_TRAILINGSPACE(m) < padlen) {
1059 m = m_defrag(*m_head, M_NOWAIT);
1060 if (m == NULL) {
1061 m_freem(*m_head);
1062 *m_head = NULL;
1063 return (NULL);
1064 }
1065 *m_head = m;
1066 }
1067 /* Zero out the bytes in the pad area. */
1068 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1069 m->m_pkthdr.len += padlen;
1070 m->m_len = m->m_pkthdr.len;
1071 }
1072 }
1073
1074 error = bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_tx_tag,
1075 txd->tx_dmamap, m, txsegs, &nsegs, 0);
1076 if (error != 0) {
1077 txd->tx_flags &= ~VTE_TXMBUF;
1078 return (NULL);
1079 }
1080 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1081 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
1082 BUS_DMASYNC_PREWRITE);
1083
1084 txd->tx_desc->dtlen = htole16(VTE_TX_LEN(txsegs[0].ds_len));
1085 txd->tx_desc->dtbp = htole32(txsegs[0].ds_addr);
1086 sc->vte_cdata.vte_tx_cnt++;
1087 /* Update producer index. */
1088 VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT);
1089
1090 /* Finally hand over ownership to controller. */
1091 txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN);
1092 txd->tx_m = m;
1093
1094 return (txd);
1095 }
1096
1097 static void
vte_start(if_t ifp)1098 vte_start(if_t ifp)
1099 {
1100 struct vte_softc *sc;
1101
1102 sc = if_getsoftc(ifp);
1103 VTE_LOCK(sc);
1104 vte_start_locked(sc);
1105 VTE_UNLOCK(sc);
1106 }
1107
1108 static void
vte_start_locked(struct vte_softc * sc)1109 vte_start_locked(struct vte_softc *sc)
1110 {
1111 if_t ifp;
1112 struct vte_txdesc *txd;
1113 struct mbuf *m_head;
1114 int enq;
1115
1116 ifp = sc->vte_ifp;
1117
1118 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1119 IFF_DRV_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0)
1120 return;
1121
1122 for (enq = 0; !if_sendq_empty(ifp); ) {
1123 /* Reserve one free TX descriptor. */
1124 if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) {
1125 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1126 break;
1127 }
1128 m_head = if_dequeue(ifp);
1129 if (m_head == NULL)
1130 break;
1131 /*
1132 * Pack the data into the transmit ring. If we
1133 * don't have room, set the OACTIVE flag and wait
1134 * for the NIC to drain the ring.
1135 */
1136 if ((txd = vte_encap(sc, &m_head)) == NULL) {
1137 if (m_head != NULL)
1138 if_sendq_prepend(ifp, m_head);
1139 break;
1140 }
1141
1142 enq++;
1143 /*
1144 * If there's a BPF listener, bounce a copy of this frame
1145 * to him.
1146 */
1147 ETHER_BPF_MTAP(ifp, m_head);
1148 /* Free consumed TX frame. */
1149 if ((txd->tx_flags & VTE_TXMBUF) != 0)
1150 m_freem(m_head);
1151 }
1152
1153 if (enq > 0) {
1154 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1155 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
1156 BUS_DMASYNC_PREWRITE);
1157 CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START);
1158 sc->vte_watchdog_timer = VTE_TX_TIMEOUT;
1159 }
1160 }
1161
1162 static void
vte_watchdog(struct vte_softc * sc)1163 vte_watchdog(struct vte_softc *sc)
1164 {
1165 if_t ifp;
1166
1167 VTE_LOCK_ASSERT(sc);
1168
1169 if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer)
1170 return;
1171
1172 ifp = sc->vte_ifp;
1173 if_printf(sc->vte_ifp, "watchdog timeout -- resetting\n");
1174 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1175 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1176 vte_init_locked(sc);
1177 if (!if_sendq_empty(ifp))
1178 vte_start_locked(sc);
1179 }
1180
1181 static int
vte_ioctl(if_t ifp,u_long cmd,caddr_t data)1182 vte_ioctl(if_t ifp, u_long cmd, caddr_t data)
1183 {
1184 struct vte_softc *sc;
1185 struct ifreq *ifr;
1186 struct mii_data *mii;
1187 int error;
1188
1189 sc = if_getsoftc(ifp);
1190 ifr = (struct ifreq *)data;
1191 error = 0;
1192 switch (cmd) {
1193 case SIOCSIFFLAGS:
1194 VTE_LOCK(sc);
1195 if ((if_getflags(ifp) & IFF_UP) != 0) {
1196 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
1197 ((if_getflags(ifp) ^ sc->vte_if_flags) &
1198 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1199 vte_rxfilter(sc);
1200 else
1201 vte_init_locked(sc);
1202 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
1203 vte_stop(sc);
1204 sc->vte_if_flags = if_getflags(ifp);
1205 VTE_UNLOCK(sc);
1206 break;
1207 case SIOCADDMULTI:
1208 case SIOCDELMULTI:
1209 VTE_LOCK(sc);
1210 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
1211 vte_rxfilter(sc);
1212 VTE_UNLOCK(sc);
1213 break;
1214 case SIOCSIFMEDIA:
1215 case SIOCGIFMEDIA:
1216 mii = device_get_softc(sc->vte_miibus);
1217 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1218 break;
1219 default:
1220 error = ether_ioctl(ifp, cmd, data);
1221 break;
1222 }
1223
1224 return (error);
1225 }
1226
1227 static void
vte_mac_config(struct vte_softc * sc)1228 vte_mac_config(struct vte_softc *sc)
1229 {
1230 struct mii_data *mii;
1231 uint16_t mcr;
1232
1233 VTE_LOCK_ASSERT(sc);
1234
1235 mii = device_get_softc(sc->vte_miibus);
1236 mcr = CSR_READ_2(sc, VTE_MCR0);
1237 mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX);
1238 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1239 mcr |= MCR0_FULL_DUPLEX;
1240 #ifdef notyet
1241 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1242 mcr |= MCR0_FC_ENB;
1243 /*
1244 * The data sheet is not clear whether the controller
1245 * honors received pause frames or not. The is no
1246 * separate control bit for RX pause frame so just
1247 * enable MCR0_FC_ENB bit.
1248 */
1249 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1250 mcr |= MCR0_FC_ENB;
1251 #endif
1252 }
1253 CSR_WRITE_2(sc, VTE_MCR0, mcr);
1254 }
1255
1256 static void
vte_stats_clear(struct vte_softc * sc)1257 vte_stats_clear(struct vte_softc *sc)
1258 {
1259
1260 /* Reading counter registers clears its contents. */
1261 CSR_READ_2(sc, VTE_CNT_RX_DONE);
1262 CSR_READ_2(sc, VTE_CNT_MECNT0);
1263 CSR_READ_2(sc, VTE_CNT_MECNT1);
1264 CSR_READ_2(sc, VTE_CNT_MECNT2);
1265 CSR_READ_2(sc, VTE_CNT_MECNT3);
1266 CSR_READ_2(sc, VTE_CNT_TX_DONE);
1267 CSR_READ_2(sc, VTE_CNT_MECNT4);
1268 CSR_READ_2(sc, VTE_CNT_PAUSE);
1269 }
1270
1271 static void
vte_stats_update(struct vte_softc * sc)1272 vte_stats_update(struct vte_softc *sc)
1273 {
1274 struct vte_hw_stats *stat;
1275 uint16_t value;
1276
1277 VTE_LOCK_ASSERT(sc);
1278
1279 stat = &sc->vte_stats;
1280
1281 CSR_READ_2(sc, VTE_MECISR);
1282 /* RX stats. */
1283 stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE);
1284 value = CSR_READ_2(sc, VTE_CNT_MECNT0);
1285 stat->rx_bcast_frames += (value >> 8);
1286 stat->rx_mcast_frames += (value & 0xFF);
1287 value = CSR_READ_2(sc, VTE_CNT_MECNT1);
1288 stat->rx_runts += (value >> 8);
1289 stat->rx_crcerrs += (value & 0xFF);
1290 value = CSR_READ_2(sc, VTE_CNT_MECNT2);
1291 stat->rx_long_frames += (value & 0xFF);
1292 value = CSR_READ_2(sc, VTE_CNT_MECNT3);
1293 stat->rx_fifo_full += (value >> 8);
1294 stat->rx_desc_unavail += (value & 0xFF);
1295
1296 /* TX stats. */
1297 stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE);
1298 value = CSR_READ_2(sc, VTE_CNT_MECNT4);
1299 stat->tx_underruns += (value >> 8);
1300 stat->tx_late_colls += (value & 0xFF);
1301
1302 value = CSR_READ_2(sc, VTE_CNT_PAUSE);
1303 stat->tx_pause_frames += (value >> 8);
1304 stat->rx_pause_frames += (value & 0xFF);
1305 }
1306
1307 static uint64_t
vte_get_counter(if_t ifp,ift_counter cnt)1308 vte_get_counter(if_t ifp, ift_counter cnt)
1309 {
1310 struct vte_softc *sc;
1311 struct vte_hw_stats *stat;
1312
1313 sc = if_getsoftc(ifp);
1314 stat = &sc->vte_stats;
1315
1316 switch (cnt) {
1317 case IFCOUNTER_OPACKETS:
1318 return (stat->tx_frames);
1319 case IFCOUNTER_COLLISIONS:
1320 return (stat->tx_late_colls);
1321 case IFCOUNTER_OERRORS:
1322 return (stat->tx_late_colls + stat->tx_underruns);
1323 case IFCOUNTER_IPACKETS:
1324 return (stat->rx_frames);
1325 case IFCOUNTER_IERRORS:
1326 return (stat->rx_crcerrs + stat->rx_runts +
1327 stat->rx_long_frames + stat->rx_fifo_full);
1328 default:
1329 return (if_get_counter_default(ifp, cnt));
1330 }
1331 }
1332
1333 static void
vte_intr(void * arg)1334 vte_intr(void *arg)
1335 {
1336 struct vte_softc *sc;
1337 if_t ifp;
1338 uint16_t status;
1339 int n;
1340
1341 sc = (struct vte_softc *)arg;
1342 VTE_LOCK(sc);
1343
1344 ifp = sc->vte_ifp;
1345 /* Reading VTE_MISR acknowledges interrupts. */
1346 status = CSR_READ_2(sc, VTE_MISR);
1347 if ((status & VTE_INTRS) == 0) {
1348 /* Not ours. */
1349 VTE_UNLOCK(sc);
1350 return;
1351 }
1352
1353 /* Disable interrupts. */
1354 CSR_WRITE_2(sc, VTE_MIER, 0);
1355 for (n = 8; (status & VTE_INTRS) != 0;) {
1356 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1357 break;
1358 if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL |
1359 MISR_RX_FIFO_FULL)) != 0)
1360 vte_rxeof(sc);
1361 if ((status & MISR_TX_DONE) != 0)
1362 vte_txeof(sc);
1363 if ((status & MISR_EVENT_CNT_OFLOW) != 0)
1364 vte_stats_update(sc);
1365 if (!if_sendq_empty(ifp))
1366 vte_start_locked(sc);
1367 if (--n > 0)
1368 status = CSR_READ_2(sc, VTE_MISR);
1369 else
1370 break;
1371 }
1372
1373 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1374 /* Re-enable interrupts. */
1375 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1376 }
1377 VTE_UNLOCK(sc);
1378 }
1379
1380 static void
vte_txeof(struct vte_softc * sc)1381 vte_txeof(struct vte_softc *sc)
1382 {
1383 if_t ifp;
1384 struct vte_txdesc *txd;
1385 uint16_t status;
1386 int cons, prog;
1387
1388 VTE_LOCK_ASSERT(sc);
1389
1390 ifp = sc->vte_ifp;
1391
1392 if (sc->vte_cdata.vte_tx_cnt == 0)
1393 return;
1394 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1395 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_POSTREAD |
1396 BUS_DMASYNC_POSTWRITE);
1397 cons = sc->vte_cdata.vte_tx_cons;
1398 /*
1399 * Go through our TX list and free mbufs for those
1400 * frames which have been transmitted.
1401 */
1402 for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) {
1403 txd = &sc->vte_cdata.vte_txdesc[cons];
1404 status = le16toh(txd->tx_desc->dtst);
1405 if ((status & VTE_DTST_TX_OWN) != 0)
1406 break;
1407 sc->vte_cdata.vte_tx_cnt--;
1408 /* Reclaim transmitted mbufs. */
1409 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap,
1410 BUS_DMASYNC_POSTWRITE);
1411 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap);
1412 if ((txd->tx_flags & VTE_TXMBUF) == 0)
1413 m_freem(txd->tx_m);
1414 txd->tx_flags &= ~VTE_TXMBUF;
1415 txd->tx_m = NULL;
1416 prog++;
1417 VTE_DESC_INC(cons, VTE_TX_RING_CNT);
1418 }
1419
1420 if (prog > 0) {
1421 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1422 sc->vte_cdata.vte_tx_cons = cons;
1423 /*
1424 * Unarm watchdog timer only when there is no pending
1425 * frames in TX queue.
1426 */
1427 if (sc->vte_cdata.vte_tx_cnt == 0)
1428 sc->vte_watchdog_timer = 0;
1429 }
1430 }
1431
1432 static int
vte_newbuf(struct vte_softc * sc,struct vte_rxdesc * rxd)1433 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd)
1434 {
1435 struct mbuf *m;
1436 bus_dma_segment_t segs[1];
1437 bus_dmamap_t map;
1438 int nsegs;
1439
1440 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1441 if (m == NULL)
1442 return (ENOBUFS);
1443 m->m_len = m->m_pkthdr.len = MCLBYTES;
1444 m_adj(m, sizeof(uint32_t));
1445
1446 if (bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_rx_tag,
1447 sc->vte_cdata.vte_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1448 m_freem(m);
1449 return (ENOBUFS);
1450 }
1451 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1452
1453 if (rxd->rx_m != NULL) {
1454 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
1455 BUS_DMASYNC_POSTREAD);
1456 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap);
1457 }
1458 map = rxd->rx_dmamap;
1459 rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap;
1460 sc->vte_cdata.vte_rx_sparemap = map;
1461 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap,
1462 BUS_DMASYNC_PREREAD);
1463 rxd->rx_m = m;
1464 rxd->rx_desc->drbp = htole32(segs[0].ds_addr);
1465 rxd->rx_desc->drlen = htole16(VTE_RX_LEN(segs[0].ds_len));
1466 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1467
1468 return (0);
1469 }
1470
1471 /*
1472 * It's not supposed to see this controller on strict-alignment
1473 * architectures but make it work for completeness.
1474 */
1475 #ifndef __NO_STRICT_ALIGNMENT
1476 static struct mbuf *
vte_fixup_rx(if_t ifp,struct mbuf * m)1477 vte_fixup_rx(if_t ifp, struct mbuf *m)
1478 {
1479 uint16_t *src, *dst;
1480 int i;
1481
1482 src = mtod(m, uint16_t *);
1483 dst = src - 1;
1484
1485 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1486 *dst++ = *src++;
1487 m->m_data -= ETHER_ALIGN;
1488 return (m);
1489 }
1490 #endif
1491
1492 static void
vte_rxeof(struct vte_softc * sc)1493 vte_rxeof(struct vte_softc *sc)
1494 {
1495 if_t ifp;
1496 struct vte_rxdesc *rxd;
1497 struct mbuf *m;
1498 uint16_t status, total_len;
1499 int cons, prog;
1500
1501 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1502 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_POSTREAD |
1503 BUS_DMASYNC_POSTWRITE);
1504 cons = sc->vte_cdata.vte_rx_cons;
1505 ifp = sc->vte_ifp;
1506 for (prog = 0; (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0; prog++,
1507 VTE_DESC_INC(cons, VTE_RX_RING_CNT)) {
1508 rxd = &sc->vte_cdata.vte_rxdesc[cons];
1509 status = le16toh(rxd->rx_desc->drst);
1510 if ((status & VTE_DRST_RX_OWN) != 0)
1511 break;
1512 total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen));
1513 m = rxd->rx_m;
1514 if ((status & VTE_DRST_RX_OK) == 0) {
1515 /* Discard errored frame. */
1516 rxd->rx_desc->drlen =
1517 htole16(MCLBYTES - sizeof(uint32_t));
1518 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1519 continue;
1520 }
1521 if (vte_newbuf(sc, rxd) != 0) {
1522 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1523 rxd->rx_desc->drlen =
1524 htole16(MCLBYTES - sizeof(uint32_t));
1525 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN);
1526 continue;
1527 }
1528
1529 /*
1530 * It seems there is no way to strip FCS bytes.
1531 */
1532 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1533 m->m_pkthdr.rcvif = ifp;
1534 #ifndef __NO_STRICT_ALIGNMENT
1535 vte_fixup_rx(ifp, m);
1536 #endif
1537 VTE_UNLOCK(sc);
1538 if_input(ifp, m);
1539 VTE_LOCK(sc);
1540 }
1541
1542 if (prog > 0) {
1543 /* Update the consumer index. */
1544 sc->vte_cdata.vte_rx_cons = cons;
1545 /*
1546 * Sync updated RX descriptors such that controller see
1547 * modified RX buffer addresses.
1548 */
1549 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1550 sc->vte_cdata.vte_rx_ring_map,
1551 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1552 #ifdef notyet
1553 /*
1554 * Update residue counter. Controller does not
1555 * keep track of number of available RX descriptors
1556 * such that driver should have to update VTE_MRDCR
1557 * to make controller know how many free RX
1558 * descriptors were added to controller. This is
1559 * a similar mechanism used in VIA velocity
1560 * controllers and it indicates controller just
1561 * polls OWN bit of current RX descriptor pointer.
1562 * A couple of severe issues were seen on sample
1563 * board where the controller continuously emits TX
1564 * pause frames once RX pause threshold crossed.
1565 * Once triggered it never recovered form that
1566 * state, I couldn't find a way to make it back to
1567 * work at least. This issue effectively
1568 * disconnected the system from network. Also, the
1569 * controller used 00:00:00:00:00:00 as source
1570 * station address of TX pause frame. Probably this
1571 * is one of reason why vendor recommends not to
1572 * enable flow control on R6040 controller.
1573 */
1574 CSR_WRITE_2(sc, VTE_MRDCR, prog |
1575 (((VTE_RX_RING_CNT * 2) / 10) <<
1576 VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1577 #endif
1578 }
1579 }
1580
1581 static void
vte_tick(void * arg)1582 vte_tick(void *arg)
1583 {
1584 struct vte_softc *sc;
1585 struct mii_data *mii;
1586
1587 sc = (struct vte_softc *)arg;
1588
1589 VTE_LOCK_ASSERT(sc);
1590
1591 mii = device_get_softc(sc->vte_miibus);
1592 mii_tick(mii);
1593 vte_stats_update(sc);
1594 vte_txeof(sc);
1595 vte_watchdog(sc);
1596 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1597 }
1598
1599 static void
vte_reset(struct vte_softc * sc)1600 vte_reset(struct vte_softc *sc)
1601 {
1602 uint16_t mcr, mdcsc;
1603 int i;
1604
1605 mdcsc = CSR_READ_2(sc, VTE_MDCSC);
1606 mcr = CSR_READ_2(sc, VTE_MCR1);
1607 CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET);
1608 for (i = VTE_RESET_TIMEOUT; i > 0; i--) {
1609 DELAY(10);
1610 if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0)
1611 break;
1612 }
1613 if (i == 0)
1614 device_printf(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr);
1615 /*
1616 * Follow the guide of vendor recommended way to reset MAC.
1617 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is
1618 * not reliable so manually reset internal state machine.
1619 */
1620 CSR_WRITE_2(sc, VTE_MACSM, 0x0002);
1621 CSR_WRITE_2(sc, VTE_MACSM, 0);
1622 DELAY(5000);
1623
1624 /*
1625 * On some SoCs (like Vortex86DX3) MDC speed control register value
1626 * needs to be restored to original value instead of default one,
1627 * otherwise some PHY registers may fail to be read.
1628 */
1629 if (mdcsc != MDCSC_DEFAULT)
1630 CSR_WRITE_2(sc, VTE_MDCSC, mdcsc);
1631 }
1632
1633 static void
vte_init(void * xsc)1634 vte_init(void *xsc)
1635 {
1636 struct vte_softc *sc;
1637
1638 sc = (struct vte_softc *)xsc;
1639 VTE_LOCK(sc);
1640 vte_init_locked(sc);
1641 VTE_UNLOCK(sc);
1642 }
1643
1644 static void
vte_init_locked(struct vte_softc * sc)1645 vte_init_locked(struct vte_softc *sc)
1646 {
1647 if_t ifp;
1648 bus_addr_t paddr;
1649 uint8_t *eaddr;
1650
1651 VTE_LOCK_ASSERT(sc);
1652
1653 ifp = sc->vte_ifp;
1654
1655 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
1656 return;
1657 /*
1658 * Cancel any pending I/O.
1659 */
1660 vte_stop(sc);
1661 /*
1662 * Reset the chip to a known state.
1663 */
1664 vte_reset(sc);
1665
1666 /* Initialize RX descriptors. */
1667 if (vte_init_rx_ring(sc) != 0) {
1668 device_printf(sc->vte_dev, "no memory for RX buffers.\n");
1669 vte_stop(sc);
1670 return;
1671 }
1672 if (vte_init_tx_ring(sc) != 0) {
1673 device_printf(sc->vte_dev, "no memory for TX buffers.\n");
1674 vte_stop(sc);
1675 return;
1676 }
1677
1678 /*
1679 * Reprogram the station address. Controller supports up
1680 * to 4 different station addresses so driver programs the
1681 * first station address as its own ethernet address and
1682 * configure the remaining three addresses as perfect
1683 * multicast addresses.
1684 */
1685 eaddr = if_getlladdr(sc->vte_ifp);
1686 CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]);
1687 CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]);
1688 CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]);
1689
1690 /* Set TX descriptor base addresses. */
1691 paddr = sc->vte_cdata.vte_tx_ring_paddr;
1692 CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16);
1693 CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF);
1694 /* Set RX descriptor base addresses. */
1695 paddr = sc->vte_cdata.vte_rx_ring_paddr;
1696 CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16);
1697 CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF);
1698 /*
1699 * Initialize RX descriptor residue counter and set RX
1700 * pause threshold to 20% of available RX descriptors.
1701 * See comments on vte_rxeof() for details on flow control
1702 * issues.
1703 */
1704 CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) |
1705 (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT));
1706
1707 /*
1708 * Always use maximum frame size that controller can
1709 * support. Otherwise received frames that has longer
1710 * frame length than vte(4) MTU would be silently dropped
1711 * in controller. This would break path-MTU discovery as
1712 * sender wouldn't get any responses from receiver. The
1713 * RX buffer size should be multiple of 4.
1714 * Note, jumbo frames are silently ignored by controller
1715 * and even MAC counters do not detect them.
1716 */
1717 CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX);
1718
1719 /* Configure FIFO. */
1720 CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 |
1721 MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 |
1722 MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT);
1723
1724 /*
1725 * Configure TX/RX MACs. Actual resolved duplex and flow
1726 * control configuration is done after detecting a valid
1727 * link. Note, we don't generate early interrupt here
1728 * as well since FreeBSD does not have interrupt latency
1729 * problems like Windows.
1730 */
1731 CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT);
1732 /*
1733 * We manually keep track of PHY status changes to
1734 * configure resolved duplex and flow control since only
1735 * duplex configuration can be automatically reflected to
1736 * MCR0.
1737 */
1738 CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 |
1739 MCR1_EXCESS_COL_RETRY_16);
1740
1741 /* Initialize RX filter. */
1742 vte_rxfilter(sc);
1743
1744 /* Disable TX/RX interrupt moderation control. */
1745 CSR_WRITE_2(sc, VTE_MRICR, 0);
1746 CSR_WRITE_2(sc, VTE_MTICR, 0);
1747
1748 /* Enable MAC event counter interrupts. */
1749 CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS);
1750 /* Clear MAC statistics. */
1751 vte_stats_clear(sc);
1752
1753 /* Acknowledge all pending interrupts and clear it. */
1754 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS);
1755 CSR_WRITE_2(sc, VTE_MISR, 0);
1756
1757 sc->vte_flags &= ~VTE_FLAG_LINK;
1758 /* Switch to the current media. */
1759 vte_mediachange_locked(ifp);
1760
1761 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc);
1762
1763 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1764 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1765 }
1766
1767 static void
vte_stop(struct vte_softc * sc)1768 vte_stop(struct vte_softc *sc)
1769 {
1770 if_t ifp;
1771 struct vte_txdesc *txd;
1772 struct vte_rxdesc *rxd;
1773 int i;
1774
1775 VTE_LOCK_ASSERT(sc);
1776 /*
1777 * Mark the interface down and cancel the watchdog timer.
1778 */
1779 ifp = sc->vte_ifp;
1780 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1781 sc->vte_flags &= ~VTE_FLAG_LINK;
1782 callout_stop(&sc->vte_tick_ch);
1783 sc->vte_watchdog_timer = 0;
1784 vte_stats_update(sc);
1785 /* Disable interrupts. */
1786 CSR_WRITE_2(sc, VTE_MIER, 0);
1787 CSR_WRITE_2(sc, VTE_MECIER, 0);
1788 /* Stop RX/TX MACs. */
1789 vte_stop_mac(sc);
1790 /* Clear interrupts. */
1791 CSR_READ_2(sc, VTE_MISR);
1792 /*
1793 * Free TX/RX mbufs still in the queues.
1794 */
1795 for (i = 0; i < VTE_RX_RING_CNT; i++) {
1796 rxd = &sc->vte_cdata.vte_rxdesc[i];
1797 if (rxd->rx_m != NULL) {
1798 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag,
1799 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
1800 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag,
1801 rxd->rx_dmamap);
1802 m_freem(rxd->rx_m);
1803 rxd->rx_m = NULL;
1804 }
1805 }
1806 for (i = 0; i < VTE_TX_RING_CNT; i++) {
1807 txd = &sc->vte_cdata.vte_txdesc[i];
1808 if (txd->tx_m != NULL) {
1809 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag,
1810 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1811 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag,
1812 txd->tx_dmamap);
1813 if ((txd->tx_flags & VTE_TXMBUF) == 0)
1814 m_freem(txd->tx_m);
1815 txd->tx_m = NULL;
1816 txd->tx_flags &= ~VTE_TXMBUF;
1817 }
1818 }
1819 /* Free TX mbuf pools used for deep copy. */
1820 for (i = 0; i < VTE_TX_RING_CNT; i++) {
1821 if (sc->vte_cdata.vte_txmbufs[i] != NULL) {
1822 m_freem(sc->vte_cdata.vte_txmbufs[i]);
1823 sc->vte_cdata.vte_txmbufs[i] = NULL;
1824 }
1825 }
1826 }
1827
1828 static void
vte_start_mac(struct vte_softc * sc)1829 vte_start_mac(struct vte_softc *sc)
1830 {
1831 uint16_t mcr;
1832 int i;
1833
1834 VTE_LOCK_ASSERT(sc);
1835
1836 /* Enable RX/TX MACs. */
1837 mcr = CSR_READ_2(sc, VTE_MCR0);
1838 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) !=
1839 (MCR0_RX_ENB | MCR0_TX_ENB)) {
1840 mcr |= MCR0_RX_ENB | MCR0_TX_ENB;
1841 CSR_WRITE_2(sc, VTE_MCR0, mcr);
1842 for (i = VTE_TIMEOUT; i > 0; i--) {
1843 mcr = CSR_READ_2(sc, VTE_MCR0);
1844 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) ==
1845 (MCR0_RX_ENB | MCR0_TX_ENB))
1846 break;
1847 DELAY(10);
1848 }
1849 if (i == 0)
1850 device_printf(sc->vte_dev,
1851 "could not enable RX/TX MAC(0x%04x)!\n", mcr);
1852 }
1853 }
1854
1855 static void
vte_stop_mac(struct vte_softc * sc)1856 vte_stop_mac(struct vte_softc *sc)
1857 {
1858 uint16_t mcr;
1859 int i;
1860
1861 VTE_LOCK_ASSERT(sc);
1862
1863 /* Disable RX/TX MACs. */
1864 mcr = CSR_READ_2(sc, VTE_MCR0);
1865 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) {
1866 mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB);
1867 CSR_WRITE_2(sc, VTE_MCR0, mcr);
1868 for (i = VTE_TIMEOUT; i > 0; i--) {
1869 mcr = CSR_READ_2(sc, VTE_MCR0);
1870 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0)
1871 break;
1872 DELAY(10);
1873 }
1874 if (i == 0)
1875 device_printf(sc->vte_dev,
1876 "could not disable RX/TX MAC(0x%04x)!\n", mcr);
1877 }
1878 }
1879
1880 static int
vte_init_tx_ring(struct vte_softc * sc)1881 vte_init_tx_ring(struct vte_softc *sc)
1882 {
1883 struct vte_tx_desc *desc;
1884 struct vte_txdesc *txd;
1885 bus_addr_t addr;
1886 int i;
1887
1888 VTE_LOCK_ASSERT(sc);
1889
1890 sc->vte_cdata.vte_tx_prod = 0;
1891 sc->vte_cdata.vte_tx_cons = 0;
1892 sc->vte_cdata.vte_tx_cnt = 0;
1893
1894 /* Pre-allocate TX mbufs for deep copy. */
1895 if (tx_deep_copy != 0) {
1896 for (i = 0; i < VTE_TX_RING_CNT; i++) {
1897 sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_NOWAIT,
1898 MT_DATA, M_PKTHDR);
1899 if (sc->vte_cdata.vte_txmbufs[i] == NULL)
1900 return (ENOBUFS);
1901 sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES;
1902 sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES;
1903 }
1904 }
1905 desc = sc->vte_cdata.vte_tx_ring;
1906 bzero(desc, VTE_TX_RING_SZ);
1907 for (i = 0; i < VTE_TX_RING_CNT; i++) {
1908 txd = &sc->vte_cdata.vte_txdesc[i];
1909 txd->tx_m = NULL;
1910 if (i != VTE_TX_RING_CNT - 1)
1911 addr = sc->vte_cdata.vte_tx_ring_paddr +
1912 sizeof(struct vte_tx_desc) * (i + 1);
1913 else
1914 addr = sc->vte_cdata.vte_tx_ring_paddr +
1915 sizeof(struct vte_tx_desc) * 0;
1916 desc = &sc->vte_cdata.vte_tx_ring[i];
1917 desc->dtnp = htole32(addr);
1918 txd->tx_desc = desc;
1919 }
1920
1921 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag,
1922 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD |
1923 BUS_DMASYNC_PREWRITE);
1924 return (0);
1925 }
1926
1927 static int
vte_init_rx_ring(struct vte_softc * sc)1928 vte_init_rx_ring(struct vte_softc *sc)
1929 {
1930 struct vte_rx_desc *desc;
1931 struct vte_rxdesc *rxd;
1932 bus_addr_t addr;
1933 int i;
1934
1935 VTE_LOCK_ASSERT(sc);
1936
1937 sc->vte_cdata.vte_rx_cons = 0;
1938 desc = sc->vte_cdata.vte_rx_ring;
1939 bzero(desc, VTE_RX_RING_SZ);
1940 for (i = 0; i < VTE_RX_RING_CNT; i++) {
1941 rxd = &sc->vte_cdata.vte_rxdesc[i];
1942 rxd->rx_m = NULL;
1943 if (i != VTE_RX_RING_CNT - 1)
1944 addr = sc->vte_cdata.vte_rx_ring_paddr +
1945 sizeof(struct vte_rx_desc) * (i + 1);
1946 else
1947 addr = sc->vte_cdata.vte_rx_ring_paddr +
1948 sizeof(struct vte_rx_desc) * 0;
1949 desc = &sc->vte_cdata.vte_rx_ring[i];
1950 desc->drnp = htole32(addr);
1951 rxd->rx_desc = desc;
1952 if (vte_newbuf(sc, rxd) != 0)
1953 return (ENOBUFS);
1954 }
1955
1956 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag,
1957 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_PREREAD |
1958 BUS_DMASYNC_PREWRITE);
1959
1960 return (0);
1961 }
1962
1963 struct vte_maddr_ctx {
1964 uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3];
1965 uint16_t mchash[4];
1966 u_int nperf;
1967 };
1968
1969 static u_int
vte_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)1970 vte_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1971 {
1972 struct vte_maddr_ctx *ctx = arg;
1973 uint8_t *eaddr;
1974 uint32_t crc;
1975
1976 /*
1977 * Program the first 3 multicast groups into the perfect filter.
1978 * For all others, use the hash table.
1979 */
1980 if (ctx->nperf < VTE_RXFILT_PERFECT_CNT) {
1981 eaddr = LLADDR(sdl);
1982 ctx->rxfilt_perf[ctx->nperf][0] = eaddr[1] << 8 | eaddr[0];
1983 ctx->rxfilt_perf[ctx->nperf][1] = eaddr[3] << 8 | eaddr[2];
1984 ctx->rxfilt_perf[ctx->nperf][2] = eaddr[5] << 8 | eaddr[4];
1985 ctx->nperf++;
1986
1987 return (1);
1988 }
1989 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
1990 ctx->mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F);
1991
1992 return (1);
1993 }
1994
1995 static void
vte_rxfilter(struct vte_softc * sc)1996 vte_rxfilter(struct vte_softc *sc)
1997 {
1998 if_t ifp;
1999 struct vte_maddr_ctx ctx;
2000 uint16_t mcr;
2001 int i;
2002
2003 VTE_LOCK_ASSERT(sc);
2004
2005 ifp = sc->vte_ifp;
2006
2007 bzero(ctx.mchash, sizeof(ctx.mchash));
2008 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
2009 ctx.rxfilt_perf[i][0] = 0xFFFF;
2010 ctx.rxfilt_perf[i][1] = 0xFFFF;
2011 ctx.rxfilt_perf[i][2] = 0xFFFF;
2012 }
2013 ctx.nperf = 0;
2014
2015 mcr = CSR_READ_2(sc, VTE_MCR0);
2016 mcr &= ~(MCR0_PROMISC | MCR0_MULTICAST);
2017 mcr |= MCR0_BROADCAST_DIS;
2018 if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
2019 mcr &= ~MCR0_BROADCAST_DIS;
2020 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2021 if ((if_getflags(ifp) & IFF_PROMISC) != 0)
2022 mcr |= MCR0_PROMISC;
2023 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
2024 mcr |= MCR0_MULTICAST;
2025 ctx.mchash[0] = 0xFFFF;
2026 ctx.mchash[1] = 0xFFFF;
2027 ctx.mchash[2] = 0xFFFF;
2028 ctx.mchash[3] = 0xFFFF;
2029 goto chipit;
2030 }
2031
2032 if_foreach_llmaddr(ifp, vte_hash_maddr, &ctx);
2033 if (ctx.mchash[0] != 0 || ctx.mchash[1] != 0 ||
2034 ctx.mchash[2] != 0 || ctx.mchash[3] != 0)
2035 mcr |= MCR0_MULTICAST;
2036
2037 chipit:
2038 /* Program multicast hash table. */
2039 CSR_WRITE_2(sc, VTE_MAR0, ctx.mchash[0]);
2040 CSR_WRITE_2(sc, VTE_MAR1, ctx.mchash[1]);
2041 CSR_WRITE_2(sc, VTE_MAR2, ctx.mchash[2]);
2042 CSR_WRITE_2(sc, VTE_MAR3, ctx.mchash[3]);
2043 /* Program perfect filter table. */
2044 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) {
2045 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0,
2046 ctx.rxfilt_perf[i][0]);
2047 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2,
2048 ctx.rxfilt_perf[i][1]);
2049 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4,
2050 ctx.rxfilt_perf[i][2]);
2051 }
2052 CSR_WRITE_2(sc, VTE_MCR0, mcr);
2053 CSR_READ_2(sc, VTE_MCR0);
2054 }
2055
2056 static int
sysctl_int_range(SYSCTL_HANDLER_ARGS,int low,int high)2057 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2058 {
2059 int error, value;
2060
2061 if (arg1 == NULL)
2062 return (EINVAL);
2063 value = *(int *)arg1;
2064 error = sysctl_handle_int(oidp, &value, 0, req);
2065 if (error || req->newptr == NULL)
2066 return (error);
2067 if (value < low || value > high)
2068 return (EINVAL);
2069 *(int *)arg1 = value;
2070
2071 return (0);
2072 }
2073
2074 static int
sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS)2075 sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS)
2076 {
2077
2078 return (sysctl_int_range(oidp, arg1, arg2, req,
2079 VTE_IM_BUNDLE_MIN, VTE_IM_BUNDLE_MAX));
2080 }
2081