1 /*-
2 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * Driver for Attansic Technology Corp. L2 FastEthernet adapter.
26 *
27 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
28 *
29 * $FreeBSD: src/sys/dev/ae/if_ae.c,v 1.1.2.3.2.1 2009/04/15 03:14:26 kensmith Exp $
30 */
31
32 #include <sys/param.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/bus.h>
36 #include <sys/interrupt.h>
37 #include <sys/malloc.h>
38 #include <sys/proc.h>
39 #include <sys/rman.h>
40 #include <sys/serialize.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
44
45 #include <net/ethernet.h>
46 #include <net/if.h>
47 #include <net/bpf.h>
48 #include <net/if_arp.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51 #include <net/ifq_var.h>
52 #include <net/vlan/if_vlan_var.h>
53 #include <net/vlan/if_vlan_ether.h>
54
55 #include <bus/pci/pcireg.h>
56 #include <bus/pci/pcivar.h>
57 #include "pcidevs.h"
58
59 #include <dev/netif/mii_layer/miivar.h>
60
61 #include <dev/netif/ae/if_aereg.h>
62 #include <dev/netif/ae/if_aevar.h>
63
64 /* "device miibus" required. See GENERIC if you get errors here. */
65 #include "miibus_if.h"
66
67 /*
68 * Devices supported by this driver.
69 */
70 static const struct ae_dev {
71 uint16_t ae_vendorid;
72 uint16_t ae_deviceid;
73 const char *ae_name;
74 } ae_devs[] = {
75 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
76 "Attansic Technology Corp, L2 Fast Ethernet" },
77 /* Required last entry */
78 { 0, 0, NULL }
79 };
80
81
82 static int ae_probe(device_t);
83 static int ae_attach(device_t);
84 static int ae_detach(device_t);
85 static int ae_shutdown(device_t);
86 static int ae_suspend(device_t);
87 static int ae_resume(device_t);
88 static int ae_miibus_readreg(device_t, int, int);
89 static int ae_miibus_writereg(device_t, int, int, int);
90 static void ae_miibus_statchg(device_t);
91
92 static int ae_mediachange(struct ifnet *);
93 static void ae_mediastatus(struct ifnet *, struct ifmediareq *);
94 static void ae_init(void *);
95 static void ae_start(struct ifnet *, struct ifaltq_subque *);
96 static int ae_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
97 static void ae_watchdog(struct ifnet *);
98 static void ae_stop(struct ae_softc *);
99 static void ae_tick(void *);
100
101 static void ae_intr(void *);
102 static void ae_tx_intr(struct ae_softc *);
103 static void ae_rx_intr(struct ae_softc *);
104 static int ae_rxeof(struct ae_softc *, struct ae_rxd *);
105
106 static int ae_encap(struct ae_softc *, struct mbuf **);
107 static void ae_sysctl_node(struct ae_softc *);
108 static void ae_phy_reset(struct ae_softc *);
109 static int ae_reset(struct ae_softc *);
110 static void ae_pcie_init(struct ae_softc *);
111 static void ae_get_eaddr(struct ae_softc *);
112 static void ae_dma_free(struct ae_softc *);
113 static int ae_dma_alloc(struct ae_softc *);
114 static void ae_mac_config(struct ae_softc *);
115 static void ae_stop_rxmac(struct ae_softc *);
116 static void ae_stop_txmac(struct ae_softc *);
117 static void ae_rxfilter(struct ae_softc *);
118 static void ae_rxvlan(struct ae_softc *);
119 static void ae_update_stats_rx(uint16_t, struct ae_stats *);
120 static void ae_update_stats_tx(uint16_t, struct ae_stats *);
121 static void ae_powersave_disable(struct ae_softc *);
122 static void ae_powersave_enable(struct ae_softc *);
123
124 static device_method_t ae_methods[] = {
125 /* Device interface. */
126 DEVMETHOD(device_probe, ae_probe),
127 DEVMETHOD(device_attach, ae_attach),
128 DEVMETHOD(device_detach, ae_detach),
129 DEVMETHOD(device_shutdown, ae_shutdown),
130 DEVMETHOD(device_suspend, ae_suspend),
131 DEVMETHOD(device_resume, ae_resume),
132
133 /* Bus interface. */
134 DEVMETHOD(bus_print_child, bus_generic_print_child),
135 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
136
137 /* MII interface. */
138 DEVMETHOD(miibus_readreg, ae_miibus_readreg),
139 DEVMETHOD(miibus_writereg, ae_miibus_writereg),
140 DEVMETHOD(miibus_statchg, ae_miibus_statchg),
141 { NULL, NULL }
142 };
143
144 static driver_t ae_driver = {
145 "ae",
146 ae_methods,
147 sizeof(struct ae_softc)
148 };
149
150 static devclass_t ae_devclass;
151 DECLARE_DUMMY_MODULE(if_ae);
152 MODULE_DEPEND(if_ae, miibus, 1, 1, 1);
153 DRIVER_MODULE(if_ae, pci, ae_driver, ae_devclass, NULL, NULL);
154 DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, NULL, NULL);
155
156 /* Register access macros. */
157 #define AE_WRITE_4(_sc, reg, val) \
158 bus_space_write_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
159 #define AE_WRITE_2(_sc, reg, val) \
160 bus_space_write_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
161 #define AE_WRITE_1(_sc, reg, val) \
162 bus_space_write_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
163 #define AE_READ_4(_sc, reg) \
164 bus_space_read_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
165 #define AE_READ_2(_sc, reg) \
166 bus_space_read_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
167 #define AE_READ_1(_sc, reg) \
168 bus_space_read_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
169
170 #define AE_PHY_READ(sc, reg) \
171 ae_miibus_readreg(sc->ae_dev, 0, reg)
172 #define AE_PHY_WRITE(sc, reg, val) \
173 ae_miibus_writereg(sc->ae_dev, 0, reg, val)
174 #define AE_CHECK_EADDR_VALID(eaddr) \
175 ((eaddr[0] == 0 && eaddr[1] == 0) || \
176 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
177 #define AE_RXD_VLAN(vtag) \
178 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
179 #define AE_TXD_VLAN(vtag) \
180 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
181
182 /*
183 * ae statistics.
184 */
185 #define STATS_ENTRY(node, desc, field) \
186 { node, desc, offsetof(struct ae_stats, field) }
187 struct {
188 const char *node;
189 const char *desc;
190 intptr_t offset;
191 } ae_stats_tx[] = {
192 STATS_ENTRY("bcast", "broadcast frames", tx_bcast),
193 STATS_ENTRY("mcast", "multicast frames", tx_mcast),
194 STATS_ENTRY("pause", "PAUSE frames", tx_pause),
195 STATS_ENTRY("control", "control frames", tx_ctrl),
196 STATS_ENTRY("defers", "deferrals occuried", tx_defer),
197 STATS_ENTRY("exc_defers", "excessive deferrals occuried", tx_excdefer),
198 STATS_ENTRY("singlecols", "single collisions occuried", tx_singlecol),
199 STATS_ENTRY("multicols", "multiple collisions occuried", tx_multicol),
200 STATS_ENTRY("latecols", "late collisions occuried", tx_latecol),
201 STATS_ENTRY("aborts", "transmit aborts due collisions", tx_abortcol),
202 STATS_ENTRY("underruns", "Tx FIFO underruns", tx_underrun)
203 }, ae_stats_rx[] = {
204 STATS_ENTRY("bcast", "broadcast frames", rx_bcast),
205 STATS_ENTRY("mcast", "multicast frames", rx_mcast),
206 STATS_ENTRY("pause", "PAUSE frames", rx_pause),
207 STATS_ENTRY("control", "control frames", rx_ctrl),
208 STATS_ENTRY("crc_errors", "frames with CRC errors", rx_crcerr),
209 STATS_ENTRY("code_errors", "frames with invalid opcode", rx_codeerr),
210 STATS_ENTRY("runt", "runt frames", rx_runt),
211 STATS_ENTRY("frag", "fragmented frames", rx_frag),
212 STATS_ENTRY("align_errors", "frames with alignment errors", rx_align),
213 STATS_ENTRY("truncated", "frames truncated due to Rx FIFO inderrun",
214 rx_trunc)
215 };
216 #define AE_STATS_RX_LEN NELEM(ae_stats_rx)
217 #define AE_STATS_TX_LEN NELEM(ae_stats_tx)
218
219 static void
ae_stop(struct ae_softc * sc)220 ae_stop(struct ae_softc *sc)
221 {
222 struct ifnet *ifp = &sc->arpcom.ac_if;
223 int i;
224
225 ASSERT_SERIALIZED(ifp->if_serializer);
226
227 ifp->if_flags &= ~IFF_RUNNING;
228 ifq_clr_oactive(&ifp->if_snd);
229 ifp->if_timer = 0;
230
231 sc->ae_flags &= ~AE_FLAG_LINK;
232 callout_stop(&sc->ae_tick_ch);
233
234 /*
235 * Clear and disable interrupts.
236 */
237 AE_WRITE_4(sc, AE_IMR_REG, 0);
238 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
239
240 /*
241 * Stop Rx/Tx MACs.
242 */
243 ae_stop_txmac(sc);
244 ae_stop_rxmac(sc);
245
246 /*
247 * Stop DMA engines.
248 */
249 AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
250 AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
251
252 /*
253 * Wait for everything to enter idle state.
254 */
255 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
256 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
257 break;
258 DELAY(100);
259 }
260 if (i == AE_IDLE_TIMEOUT)
261 if_printf(ifp, "could not enter idle state in stop.\n");
262 }
263
264 static void
ae_stop_rxmac(struct ae_softc * sc)265 ae_stop_rxmac(struct ae_softc *sc)
266 {
267 uint32_t val;
268 int i;
269
270 /*
271 * Stop Rx MAC engine.
272 */
273 val = AE_READ_4(sc, AE_MAC_REG);
274 if ((val & AE_MAC_RX_EN) != 0) {
275 val &= ~AE_MAC_RX_EN;
276 AE_WRITE_4(sc, AE_MAC_REG, val);
277 }
278
279 /*
280 * Stop Rx DMA engine.
281 */
282 if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
283 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
284
285 /*
286 * Wait for IDLE state.
287 */
288 for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
289 val = AE_READ_4(sc, AE_IDLE_REG);
290 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
291 break;
292 DELAY(100);
293 }
294 if (i == AE_IDLE_TIMEOUT) {
295 if_printf(&sc->arpcom.ac_if,
296 "timed out while stopping Rx MAC.\n");
297 }
298 }
299
300 static void
ae_stop_txmac(struct ae_softc * sc)301 ae_stop_txmac(struct ae_softc *sc)
302 {
303 uint32_t val;
304 int i;
305
306 /*
307 * Stop Tx MAC engine.
308 */
309 val = AE_READ_4(sc, AE_MAC_REG);
310 if ((val & AE_MAC_TX_EN) != 0) {
311 val &= ~AE_MAC_TX_EN;
312 AE_WRITE_4(sc, AE_MAC_REG, val);
313 }
314
315 /*
316 * Stop Tx DMA engine.
317 */
318 if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
319 AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
320
321 /*
322 * Wait for IDLE state.
323 */
324 for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
325 val = AE_READ_4(sc, AE_IDLE_REG);
326 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
327 break;
328 DELAY(100);
329 }
330 if (i == AE_IDLE_TIMEOUT) {
331 if_printf(&sc->arpcom.ac_if,
332 "timed out while stopping Tx MAC.\n");
333 }
334 }
335
336 /*
337 * Callback from MII layer when media changes.
338 */
339 static void
ae_miibus_statchg(device_t dev)340 ae_miibus_statchg(device_t dev)
341 {
342 struct ae_softc *sc = device_get_softc(dev);
343 struct ifnet *ifp = &sc->arpcom.ac_if;
344 struct mii_data *mii;
345 uint32_t val;
346
347 ASSERT_SERIALIZED(ifp->if_serializer);
348
349 if ((ifp->if_flags & IFF_RUNNING) == 0)
350 return;
351
352 mii = device_get_softc(sc->ae_miibus);
353 sc->ae_flags &= ~AE_FLAG_LINK;
354 if ((mii->mii_media_status & IFM_AVALID) != 0) {
355 switch (IFM_SUBTYPE(mii->mii_media_active)) {
356 case IFM_10_T:
357 case IFM_100_TX:
358 sc->ae_flags |= AE_FLAG_LINK;
359 break;
360 default:
361 break;
362 }
363 }
364
365 /* Stop Rx/Tx MACs. */
366 ae_stop_rxmac(sc);
367 ae_stop_txmac(sc);
368
369 /* Program MACs with resolved speed/duplex/flow-control. */
370 if ((sc->ae_flags & AE_FLAG_LINK) != 0) {
371 ae_mac_config(sc);
372
373 /*
374 * Restart DMA engines.
375 */
376 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
377 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
378
379 /*
380 * Enable Rx and Tx MACs.
381 */
382 val = AE_READ_4(sc, AE_MAC_REG);
383 val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
384 AE_WRITE_4(sc, AE_MAC_REG, val);
385 }
386 }
387
388 static void
ae_sysctl_node(struct ae_softc * sc)389 ae_sysctl_node(struct ae_softc *sc)
390 {
391 struct sysctl_ctx_list *ctx;
392 struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
393 struct ae_stats *ae_stats;
394 unsigned int i;
395
396 ae_stats = &sc->stats;
397
398 ctx = device_get_sysctl_ctx(sc->ae_dev);
399 root = device_get_sysctl_tree(sc->ae_dev);
400 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
401 CTLFLAG_RD, NULL, "ae statistics");
402 if (stats == NULL) {
403 device_printf(sc->ae_dev, "can't add stats sysctl node\n");
404 return;
405 }
406
407 /*
408 * Receiver statistcics.
409 */
410 stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
411 CTLFLAG_RD, NULL, "Rx MAC statistics");
412 if (stats_rx != NULL) {
413 for (i = 0; i < AE_STATS_RX_LEN; i++) {
414 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_rx),
415 OID_AUTO, ae_stats_rx[i].node, CTLFLAG_RD,
416 (char *)ae_stats + ae_stats_rx[i].offset, 0,
417 ae_stats_rx[i].desc);
418 }
419 }
420
421 /*
422 * Transmitter statistcics.
423 */
424 stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
425 CTLFLAG_RD, NULL, "Tx MAC statistics");
426 if (stats_tx != NULL) {
427 for (i = 0; i < AE_STATS_TX_LEN; i++) {
428 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_tx),
429 OID_AUTO, ae_stats_tx[i].node, CTLFLAG_RD,
430 (char *)ae_stats + ae_stats_tx[i].offset, 0,
431 ae_stats_tx[i].desc);
432 }
433 }
434 }
435
436 static int
ae_miibus_readreg(device_t dev,int phy,int reg)437 ae_miibus_readreg(device_t dev, int phy, int reg)
438 {
439 struct ae_softc *sc = device_get_softc(dev);
440 uint32_t val;
441 int i;
442
443 /*
444 * Locking is done in upper layers.
445 */
446 if (phy != sc->ae_phyaddr)
447 return (0);
448 val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
449 AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
450 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
451 AE_WRITE_4(sc, AE_MDIO_REG, val);
452
453 /*
454 * Wait for operation to complete.
455 */
456 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
457 DELAY(2);
458 val = AE_READ_4(sc, AE_MDIO_REG);
459 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
460 break;
461 }
462 if (i == AE_MDIO_TIMEOUT) {
463 device_printf(sc->ae_dev, "phy read timeout: %d.\n", reg);
464 return (0);
465 }
466 return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
467 }
468
469 static int
ae_miibus_writereg(device_t dev,int phy,int reg,int val)470 ae_miibus_writereg(device_t dev, int phy, int reg, int val)
471 {
472 struct ae_softc *sc = device_get_softc(dev);
473 uint32_t aereg;
474 int i;
475
476 /*
477 * Locking is done in upper layers.
478 */
479 if (phy != sc->ae_phyaddr)
480 return (0);
481 aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
482 AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
483 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
484 ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
485 AE_WRITE_4(sc, AE_MDIO_REG, aereg);
486
487 /*
488 * Wait for operation to complete.
489 */
490 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
491 DELAY(2);
492 aereg = AE_READ_4(sc, AE_MDIO_REG);
493 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
494 break;
495 }
496 if (i == AE_MDIO_TIMEOUT)
497 device_printf(sc->ae_dev, "phy write timeout: %d.\n", reg);
498 return (0);
499 }
500
501 static int
ae_probe(device_t dev)502 ae_probe(device_t dev)
503 {
504 uint16_t vendor, devid;
505 const struct ae_dev *sp;
506
507 vendor = pci_get_vendor(dev);
508 devid = pci_get_device(dev);
509 for (sp = ae_devs; sp->ae_name != NULL; sp++) {
510 if (vendor == sp->ae_vendorid &&
511 devid == sp->ae_deviceid) {
512 device_set_desc(dev, sp->ae_name);
513 return (0);
514 }
515 }
516 return (ENXIO);
517 }
518
519 static int
ae_dma_alloc(struct ae_softc * sc)520 ae_dma_alloc(struct ae_softc *sc)
521 {
522 bus_addr_t busaddr;
523 int error;
524
525 /*
526 * Create parent DMA tag.
527 */
528 error = bus_dma_tag_create(NULL, 1, 0,
529 BUS_SPACE_MAXADDR_32BIT,
530 BUS_SPACE_MAXADDR,
531 BUS_SPACE_MAXSIZE_32BIT,
532 0,
533 BUS_SPACE_MAXSIZE_32BIT,
534 0, &sc->dma_parent_tag);
535 if (error) {
536 device_printf(sc->ae_dev, "could not creare parent DMA tag.\n");
537 return (error);
538 }
539
540 /*
541 * Create DMA stuffs for TxD.
542 */
543 sc->txd_base = bus_dmamem_coherent_any(sc->dma_parent_tag, 4,
544 AE_TXD_BUFSIZE_DEFAULT, BUS_DMA_WAITOK | BUS_DMA_ZERO,
545 &sc->dma_txd_tag, &sc->dma_txd_map,
546 &sc->dma_txd_busaddr);
547 if (sc->txd_base == NULL) {
548 device_printf(sc->ae_dev, "could not creare TxD DMA stuffs.\n");
549 return ENOMEM;
550 }
551
552 /*
553 * Create DMA stuffs for TxS.
554 */
555 sc->txs_base = bus_dmamem_coherent_any(sc->dma_parent_tag, 4,
556 AE_TXS_COUNT_DEFAULT * 4, BUS_DMA_WAITOK | BUS_DMA_ZERO,
557 &sc->dma_txs_tag, &sc->dma_txs_map,
558 &sc->dma_txs_busaddr);
559 if (sc->txs_base == NULL) {
560 device_printf(sc->ae_dev, "could not creare TxS DMA stuffs.\n");
561 return ENOMEM;
562 }
563
564 /*
565 * Create DMA stuffs for RxD.
566 */
567 sc->rxd_base_dma = bus_dmamem_coherent_any(sc->dma_parent_tag, 128,
568 AE_RXD_COUNT_DEFAULT * 1536 + 120,
569 BUS_DMA_WAITOK | BUS_DMA_ZERO,
570 &sc->dma_rxd_tag, &sc->dma_rxd_map,
571 &busaddr);
572 if (sc->rxd_base_dma == NULL) {
573 device_printf(sc->ae_dev, "could not creare RxD DMA stuffs.\n");
574 return ENOMEM;
575 }
576 sc->dma_rxd_busaddr = busaddr + 120;
577 sc->rxd_base = (struct ae_rxd *)(sc->rxd_base_dma + 120);
578
579 return (0);
580 }
581
582 static void
ae_mac_config(struct ae_softc * sc)583 ae_mac_config(struct ae_softc *sc)
584 {
585 struct mii_data *mii;
586 uint32_t val;
587
588 mii = device_get_softc(sc->ae_miibus);
589 val = AE_READ_4(sc, AE_MAC_REG);
590 val &= ~AE_MAC_FULL_DUPLEX;
591 /* XXX disable AE_MAC_TX_FLOW_EN? */
592 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
593 val |= AE_MAC_FULL_DUPLEX;
594 AE_WRITE_4(sc, AE_MAC_REG, val);
595 }
596
597 static int
ae_rxeof(struct ae_softc * sc,struct ae_rxd * rxd)598 ae_rxeof(struct ae_softc *sc, struct ae_rxd *rxd)
599 {
600 struct ifnet *ifp = &sc->arpcom.ac_if;
601 struct mbuf *m;
602 unsigned int size;
603 uint16_t flags;
604
605 flags = le16toh(rxd->flags);
606 #ifdef AE_DEBUG
607 if_printf(ifp, "Rx interrupt occuried.\n");
608 #endif
609 size = le16toh(rxd->len) - ETHER_CRC_LEN;
610 if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN -
611 sizeof(struct ether_vlan_header))) {
612 if_printf(ifp, "Runt frame received.");
613 return (EIO);
614 }
615
616 m = m_devget(&rxd->data[0], size, 0, ifp);
617 if (m == NULL)
618 return (ENOBUFS);
619
620 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
621 (flags & AE_RXD_HAS_VLAN)) {
622 m->m_pkthdr.ether_vlantag = AE_RXD_VLAN(le16toh(rxd->vlan));
623 m->m_flags |= M_VLANTAG;
624 }
625 ifp->if_input(ifp, m, NULL, -1);
626
627 return (0);
628 }
629
630 static void
ae_rx_intr(struct ae_softc * sc)631 ae_rx_intr(struct ae_softc *sc)
632 {
633 struct ifnet *ifp = &sc->arpcom.ac_if;
634 struct ae_rxd *rxd;
635 uint16_t flags;
636 int error;
637
638 /*
639 * Syncronize DMA buffers.
640 */
641 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
642 BUS_DMASYNC_POSTREAD);
643 for (;;) {
644 rxd = (struct ae_rxd *)(sc->rxd_base + sc->rxd_cur);
645
646 flags = le16toh(rxd->flags);
647 if ((flags & AE_RXD_UPDATE) == 0)
648 break;
649 rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
650
651 /* Update stats. */
652 ae_update_stats_rx(flags, &sc->stats);
653
654 /*
655 * Update position index.
656 */
657 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
658 if ((flags & AE_RXD_SUCCESS) == 0) {
659 IFNET_STAT_INC(ifp, ierrors, 1);
660 continue;
661 }
662
663 error = ae_rxeof(sc, rxd);
664 if (error)
665 IFNET_STAT_INC(ifp, ierrors, 1);
666 else
667 IFNET_STAT_INC(ifp, ipackets, 1);
668 }
669
670 /* Update Rx index. */
671 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
672 }
673
674 static void
ae_tx_intr(struct ae_softc * sc)675 ae_tx_intr(struct ae_softc *sc)
676 {
677 struct ifnet *ifp = &sc->arpcom.ac_if;
678 struct ae_txd *txd;
679 struct ae_txs *txs;
680 uint16_t flags;
681
682 /*
683 * Syncronize DMA buffers.
684 */
685 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_POSTREAD);
686 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_POSTREAD);
687
688 for (;;) {
689 txs = sc->txs_base + sc->txs_ack;
690
691 flags = le16toh(txs->flags);
692 if ((flags & AE_TXS_UPDATE) == 0)
693 break;
694 txs->flags = htole16(flags & ~AE_TXS_UPDATE);
695
696 /* Update stats. */
697 ae_update_stats_tx(flags, &sc->stats);
698
699 /*
700 * Update TxS position.
701 */
702 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
703 sc->ae_flags |= AE_FLAG_TXAVAIL;
704 txd = (struct ae_txd *)(sc->txd_base + sc->txd_ack);
705 if (txs->len != txd->len) {
706 device_printf(sc->ae_dev, "Size mismatch: "
707 "TxS:%d TxD:%d\n",
708 le16toh(txs->len), le16toh(txd->len));
709 }
710
711 /*
712 * Move txd ack and align on 4-byte boundary.
713 */
714 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 4 + 3) & ~3) %
715 AE_TXD_BUFSIZE_DEFAULT;
716 if ((flags & AE_TXS_SUCCESS) != 0)
717 IFNET_STAT_INC(ifp, opackets, 1);
718 else
719 IFNET_STAT_INC(ifp, oerrors, 1);
720 sc->tx_inproc--;
721 }
722
723 if (sc->tx_inproc < 0) {
724 /* XXX assert? */
725 if_printf(ifp, "Received stray Tx interrupt(s).\n");
726 sc->tx_inproc = 0;
727 }
728 if (sc->tx_inproc == 0)
729 ifp->if_timer = 0; /* Unarm watchdog. */
730 if (sc->ae_flags & AE_FLAG_TXAVAIL) {
731 ifq_clr_oactive(&ifp->if_snd);
732 if (!ifq_is_empty(&ifp->if_snd))
733 #ifdef foo
734 ae_intr(sc);
735 #else
736 if_devstart(ifp);
737 #endif
738 }
739
740 /*
741 * Syncronize DMA buffers.
742 */
743 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREWRITE);
744 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_PREWRITE);
745 }
746
747 static void
ae_intr(void * xsc)748 ae_intr(void *xsc)
749 {
750 struct ae_softc *sc = xsc;
751 struct ifnet *ifp = &sc->arpcom.ac_if;
752 uint32_t val;
753
754 ASSERT_SERIALIZED(ifp->if_serializer);
755
756 val = AE_READ_4(sc, AE_ISR_REG);
757 if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
758 return;
759
760 #ifdef foo
761 AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
762 #endif
763
764 /* Read interrupt status. */
765 val = AE_READ_4(sc, AE_ISR_REG);
766
767 /* Clear interrupts and disable them. */
768 AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
769
770 if (ifp->if_flags & IFF_RUNNING) {
771 if (val & (AE_ISR_DMAR_TIMEOUT |
772 AE_ISR_DMAW_TIMEOUT |
773 AE_ISR_PHY_LINKDOWN)) {
774 ae_init(sc);
775 }
776 if (val & AE_ISR_TX_EVENT)
777 ae_tx_intr(sc);
778 if (val & AE_ISR_RX_EVENT)
779 ae_rx_intr(sc);
780 }
781
782 /* Re-enable interrupts. */
783 AE_WRITE_4(sc, AE_ISR_REG, 0);
784 }
785
786 static void
ae_init(void * xsc)787 ae_init(void *xsc)
788 {
789 struct ae_softc *sc = xsc;
790 struct ifnet *ifp = &sc->arpcom.ac_if;
791 struct mii_data *mii;
792 uint8_t eaddr[ETHER_ADDR_LEN];
793 uint32_t val;
794 bus_addr_t addr;
795
796 ASSERT_SERIALIZED(ifp->if_serializer);
797
798 mii = device_get_softc(sc->ae_miibus);
799 ae_stop(sc);
800 ae_reset(sc);
801 ae_pcie_init(sc);
802 ae_powersave_disable(sc);
803
804 /*
805 * Clear and disable interrupts.
806 */
807 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
808
809 /*
810 * Set the MAC address.
811 */
812 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
813 val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
814 AE_WRITE_4(sc, AE_EADDR0_REG, val);
815 val = eaddr[0] << 8 | eaddr[1];
816 AE_WRITE_4(sc, AE_EADDR1_REG, val);
817
818 /*
819 * Set ring buffers base addresses.
820 */
821 addr = sc->dma_rxd_busaddr;
822 AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
823 AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
824 addr = sc->dma_txd_busaddr;
825 AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
826 addr = sc->dma_txs_busaddr;
827 AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
828
829 /*
830 * Configure ring buffers sizes.
831 */
832 AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
833 AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
834 AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
835
836 /*
837 * Configure interframe gap parameters.
838 */
839 val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
840 AE_IFG_TXIPG_MASK) |
841 ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
842 AE_IFG_RXIPG_MASK) |
843 ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
844 AE_IFG_IPGR1_MASK) |
845 ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
846 AE_IFG_IPGR2_MASK);
847 AE_WRITE_4(sc, AE_IFG_REG, val);
848
849 /*
850 * Configure half-duplex operation.
851 */
852 val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
853 AE_HDPX_LCOL_MASK) |
854 ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
855 AE_HDPX_RETRY_MASK) |
856 ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
857 AE_HDPX_ABEBT_MASK) |
858 ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
859 AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
860 AE_WRITE_4(sc, AE_HDPX_REG, val);
861
862 /*
863 * Configure interrupt moderate timer.
864 */
865 AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
866 val = AE_READ_4(sc, AE_MASTER_REG);
867 val |= AE_MASTER_IMT_EN;
868 AE_WRITE_4(sc, AE_MASTER_REG, val);
869
870 /*
871 * Configure interrupt clearing timer.
872 */
873 AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
874
875 /*
876 * Configure MTU.
877 */
878 val = ifp->if_mtu + ETHER_HDR_LEN + sizeof(struct ether_vlan_header) +
879 ETHER_CRC_LEN;
880 AE_WRITE_2(sc, AE_MTU_REG, val);
881
882 /*
883 * Configure cut-through threshold.
884 */
885 AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
886
887 /*
888 * Configure flow control.
889 */
890 AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
891 AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
892 (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
893 (AE_RXD_COUNT_DEFAULT / 12));
894
895 /*
896 * Init mailboxes.
897 */
898 sc->txd_cur = sc->rxd_cur = 0;
899 sc->txs_ack = sc->txd_ack = 0;
900 sc->rxd_cur = 0;
901 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
902 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
903 sc->tx_inproc = 0;
904 sc->ae_flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */
905
906 /*
907 * Enable DMA.
908 */
909 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
910 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
911
912 /*
913 * Check if everything is OK.
914 */
915 val = AE_READ_4(sc, AE_ISR_REG);
916 if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
917 device_printf(sc->ae_dev, "Initialization failed.\n");
918 return;
919 }
920
921 /*
922 * Clear interrupt status.
923 */
924 AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
925 AE_WRITE_4(sc, AE_ISR_REG, 0x0);
926
927 /*
928 * Enable interrupts.
929 */
930 val = AE_READ_4(sc, AE_MASTER_REG);
931 AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
932 AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
933
934 /*
935 * Disable WOL.
936 */
937 AE_WRITE_4(sc, AE_WOL_REG, 0);
938
939 /*
940 * Configure MAC.
941 */
942 val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
943 AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
944 AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
945 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
946 ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
947 AE_MAC_PREAMBLE_MASK);
948 AE_WRITE_4(sc, AE_MAC_REG, val);
949
950 /*
951 * Configure Rx MAC.
952 */
953 ae_rxfilter(sc);
954 ae_rxvlan(sc);
955
956 /*
957 * Enable Tx/Rx.
958 */
959 val = AE_READ_4(sc, AE_MAC_REG);
960 AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
961
962 sc->ae_flags &= ~AE_FLAG_LINK;
963 mii_mediachg(mii); /* Switch to the current media. */
964
965 callout_reset(&sc->ae_tick_ch, hz, ae_tick, sc);
966 ifp->if_flags |= IFF_RUNNING;
967 ifq_clr_oactive(&ifp->if_snd);
968 }
969
970 static void
ae_watchdog(struct ifnet * ifp)971 ae_watchdog(struct ifnet *ifp)
972 {
973 struct ae_softc *sc = ifp->if_softc;
974
975 ASSERT_SERIALIZED(ifp->if_serializer);
976
977 if ((sc->ae_flags & AE_FLAG_LINK) == 0)
978 if_printf(ifp, "watchdog timeout (missed link).\n");
979 else
980 if_printf(ifp, "watchdog timeout - resetting.\n");
981 IFNET_STAT_INC(ifp, oerrors, 1);
982
983 ae_init(sc);
984 if (!ifq_is_empty(&ifp->if_snd))
985 if_devstart(ifp);
986 }
987
988 static void
ae_tick(void * xsc)989 ae_tick(void *xsc)
990 {
991 struct ae_softc *sc = xsc;
992 struct ifnet *ifp = &sc->arpcom.ac_if;
993 struct mii_data *mii = device_get_softc(sc->ae_miibus);
994
995 lwkt_serialize_enter(ifp->if_serializer);
996 mii_tick(mii);
997 callout_reset(&sc->ae_tick_ch, hz, ae_tick, sc);
998 lwkt_serialize_exit(ifp->if_serializer);
999 }
1000
1001 static void
ae_rxvlan(struct ae_softc * sc)1002 ae_rxvlan(struct ae_softc *sc)
1003 {
1004 struct ifnet *ifp = &sc->arpcom.ac_if;
1005 uint32_t val;
1006
1007 val = AE_READ_4(sc, AE_MAC_REG);
1008 val &= ~AE_MAC_RMVLAN_EN;
1009 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1010 val |= AE_MAC_RMVLAN_EN;
1011 AE_WRITE_4(sc, AE_MAC_REG, val);
1012 }
1013
1014 static void
ae_rxfilter(struct ae_softc * sc)1015 ae_rxfilter(struct ae_softc *sc)
1016 {
1017 struct ifnet *ifp = &sc->arpcom.ac_if;
1018 struct ifmultiaddr *ifma;
1019 uint32_t crc;
1020 uint32_t mchash[2];
1021 uint32_t rxcfg;
1022
1023 rxcfg = AE_READ_4(sc, AE_MAC_REG);
1024 rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
1025 rxcfg |= AE_MAC_BCAST_EN;
1026 if (ifp->if_flags & IFF_PROMISC)
1027 rxcfg |= AE_MAC_PROMISC_EN;
1028 if (ifp->if_flags & IFF_ALLMULTI)
1029 rxcfg |= AE_MAC_MCAST_EN;
1030
1031 /*
1032 * Wipe old settings.
1033 */
1034 AE_WRITE_4(sc, AE_REG_MHT0, 0);
1035 AE_WRITE_4(sc, AE_REG_MHT1, 0);
1036 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1037 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
1038 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
1039 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
1040 return;
1041 }
1042
1043 /*
1044 * Load multicast tables.
1045 */
1046 bzero(mchash, sizeof(mchash));
1047 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1048 if (ifma->ifma_addr->sa_family != AF_LINK)
1049 continue;
1050 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1051 ifma->ifma_addr), ETHER_ADDR_LEN);
1052 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
1053 }
1054 AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
1055 AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
1056 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
1057 }
1058
1059 static unsigned int
ae_tx_avail_size(struct ae_softc * sc)1060 ae_tx_avail_size(struct ae_softc *sc)
1061 {
1062 unsigned int avail;
1063
1064 if (sc->txd_cur >= sc->txd_ack)
1065 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
1066 else
1067 avail = sc->txd_ack - sc->txd_cur;
1068 return (avail - 4); /* 4-byte header. */
1069 }
1070
1071 static int
ae_encap(struct ae_softc * sc,struct mbuf ** m_head)1072 ae_encap(struct ae_softc *sc, struct mbuf **m_head)
1073 {
1074 struct mbuf *m0;
1075 struct ae_txd *hdr;
1076 unsigned int to_end;
1077 uint16_t len;
1078
1079 M_ASSERTPKTHDR((*m_head));
1080 m0 = *m_head;
1081 len = m0->m_pkthdr.len;
1082 if ((sc->ae_flags & AE_FLAG_TXAVAIL) == 0 ||
1083 ae_tx_avail_size(sc) < len) {
1084 #ifdef AE_DEBUG
1085 if_printf(sc->ifp, "No free Tx available.\n");
1086 #endif
1087 return ENOBUFS;
1088 }
1089
1090 hdr = (struct ae_txd *)(sc->txd_base + sc->txd_cur);
1091 bzero(hdr, sizeof(*hdr));
1092
1093 /* Header size. */
1094 sc->txd_cur = (sc->txd_cur + 4) % AE_TXD_BUFSIZE_DEFAULT;
1095
1096 /* Space available to the end of the ring */
1097 to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
1098
1099 if (to_end >= len) {
1100 m_copydata(m0, 0, len, sc->txd_base + sc->txd_cur);
1101 } else {
1102 m_copydata(m0, 0, to_end, sc->txd_base + sc->txd_cur);
1103 m_copydata(m0, to_end, len - to_end, sc->txd_base);
1104 }
1105
1106 /*
1107 * Set TxD flags and parameters.
1108 */
1109 if ((m0->m_flags & M_VLANTAG) != 0) {
1110 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vlantag));
1111 hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
1112 } else {
1113 hdr->len = htole16(len);
1114 }
1115
1116 /*
1117 * Set current TxD position and round up to a 4-byte boundary.
1118 */
1119 sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1120 if (sc->txd_cur == sc->txd_ack)
1121 sc->ae_flags &= ~AE_FLAG_TXAVAIL;
1122 #ifdef AE_DEBUG
1123 if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
1124 #endif
1125
1126 /*
1127 * Update TxS position and check if there are empty TxS available.
1128 */
1129 sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
1130 sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
1131 if (sc->txs_cur == sc->txs_ack)
1132 sc->ae_flags &= ~AE_FLAG_TXAVAIL;
1133
1134 /*
1135 * Synchronize DMA memory.
1136 */
1137 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREWRITE);
1138 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_PREWRITE);
1139
1140 return (0);
1141 }
1142
1143 static void
ae_start(struct ifnet * ifp,struct ifaltq_subque * ifsq)1144 ae_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1145 {
1146 struct ae_softc *sc = ifp->if_softc;
1147 int error, trans;
1148
1149 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1150 ASSERT_SERIALIZED(ifp->if_serializer);
1151
1152 #ifdef AE_DEBUG
1153 if_printf(ifp, "Start called.\n");
1154 #endif
1155 if ((sc->ae_flags & AE_FLAG_LINK) == 0) {
1156 ifq_purge(&ifp->if_snd);
1157 return;
1158 }
1159 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1160 return;
1161
1162 trans = 0;
1163 while (!ifq_is_empty(&ifp->if_snd)) {
1164 struct mbuf *m0;
1165
1166 m0 = ifq_dequeue(&ifp->if_snd);
1167 if (m0 == NULL)
1168 break; /* Nothing to do. */
1169
1170 error = ae_encap(sc, &m0);
1171 if (error != 0) {
1172 if (m0 != NULL) {
1173 ifq_prepend(&ifp->if_snd, m0);
1174 ifq_set_oactive(&ifp->if_snd);
1175 #ifdef AE_DEBUG
1176 if_printf(ifp, "Setting OACTIVE.\n");
1177 #endif
1178 }
1179 break;
1180 }
1181 trans = 1;
1182 sc->tx_inproc++;
1183
1184 /* Bounce a copy of the frame to BPF. */
1185 ETHER_BPF_MTAP(ifp, m0);
1186 m_freem(m0);
1187 }
1188 if (trans) { /* Something was dequeued. */
1189 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
1190 ifp->if_timer = AE_TX_TIMEOUT; /* Load watchdog. */
1191 #ifdef AE_DEBUG
1192 if_printf(ifp, "%d packets dequeued.\n", count);
1193 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
1194 #endif
1195 }
1196 }
1197
1198 static int
ae_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data,struct ucred * cr)1199 ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1200 {
1201 struct ae_softc *sc = ifp->if_softc;
1202 struct ifreq *ifr;
1203 struct mii_data *mii;
1204 int error = 0, mask;
1205
1206 ASSERT_SERIALIZED(ifp->if_serializer);
1207
1208 ifr = (struct ifreq *)data;
1209 switch (cmd) {
1210 case SIOCSIFFLAGS:
1211 if (ifp->if_flags & IFF_UP) {
1212 if (ifp->if_flags & IFF_RUNNING) {
1213 if (((ifp->if_flags ^ sc->ae_if_flags)
1214 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1215 ae_rxfilter(sc);
1216 } else {
1217 ae_init(sc);
1218 }
1219 } else {
1220 if (ifp->if_flags & IFF_RUNNING)
1221 ae_stop(sc);
1222 }
1223 sc->ae_if_flags = ifp->if_flags;
1224 break;
1225
1226 case SIOCADDMULTI:
1227 case SIOCDELMULTI:
1228 if (ifp->if_flags & IFF_RUNNING)
1229 ae_rxfilter(sc);
1230 break;
1231
1232 case SIOCSIFMEDIA:
1233 case SIOCGIFMEDIA:
1234 mii = device_get_softc(sc->ae_miibus);
1235 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1236 break;
1237
1238 case SIOCSIFCAP:
1239 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1240 if (mask & IFCAP_VLAN_HWTAGGING) {
1241 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1242 ae_rxvlan(sc);
1243 }
1244 break;
1245
1246 default:
1247 error = ether_ioctl(ifp, cmd, data);
1248 break;
1249 }
1250 return (error);
1251 }
1252
1253 static int
ae_attach(device_t dev)1254 ae_attach(device_t dev)
1255 {
1256 struct ae_softc *sc = device_get_softc(dev);
1257 struct ifnet *ifp = &sc->arpcom.ac_if;
1258 int error = 0;
1259
1260 sc->ae_dev = dev;
1261 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1262 callout_init(&sc->ae_tick_ch);
1263
1264 /* Enable bus mastering */
1265 pci_enable_busmaster(dev);
1266
1267 /*
1268 * Allocate memory mapped IO
1269 */
1270 sc->ae_mem_rid = PCIR_BAR(0);
1271 sc->ae_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1272 &sc->ae_mem_rid, RF_ACTIVE);
1273 if (sc->ae_mem_res == NULL) {
1274 device_printf(dev, "can't allocate IO memory\n");
1275 return ENXIO;
1276 }
1277 sc->ae_mem_bt = rman_get_bustag(sc->ae_mem_res);
1278 sc->ae_mem_bh = rman_get_bushandle(sc->ae_mem_res);
1279
1280 /*
1281 * Allocate IRQ
1282 */
1283 sc->ae_irq_rid = 0;
1284 sc->ae_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1285 &sc->ae_irq_rid,
1286 RF_SHAREABLE | RF_ACTIVE);
1287 if (sc->ae_irq_res == NULL) {
1288 device_printf(dev, "can't allocate irq\n");
1289 error = ENXIO;
1290 goto fail;
1291 }
1292
1293 /* Set PHY address. */
1294 sc->ae_phyaddr = AE_PHYADDR_DEFAULT;
1295
1296 /* Create sysctl tree */
1297 ae_sysctl_node(sc);
1298
1299 /* Reset PHY. */
1300 ae_phy_reset(sc);
1301
1302 /*
1303 * Reset the ethernet controller.
1304 */
1305 ae_reset(sc);
1306 ae_pcie_init(sc);
1307
1308 /*
1309 * Get PCI and chip id/revision.
1310 */
1311 sc->ae_rev = pci_get_revid(dev);
1312 sc->ae_chip_rev =
1313 (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
1314 AE_MASTER_REVNUM_MASK;
1315 if (bootverbose) {
1316 device_printf(dev, "PCI device revision : 0x%04x\n", sc->ae_rev);
1317 device_printf(dev, "Chip id/revision : 0x%04x\n",
1318 sc->ae_chip_rev);
1319 }
1320
1321 /*
1322 * XXX
1323 * Unintialized hardware returns an invalid chip id/revision
1324 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
1325 * unplugged cable results in putting hardware into automatic
1326 * power down mode which in turn returns invalld chip revision.
1327 */
1328 if (sc->ae_chip_rev == 0xFFFF) {
1329 device_printf(dev,"invalid chip revision : 0x%04x -- "
1330 "not initialized?\n", sc->ae_chip_rev);
1331 error = ENXIO;
1332 goto fail;
1333 }
1334 #if 0
1335 /* Get DMA parameters from PCIe device control register. */
1336 pcie_ptr = pci_get_pciecap_ptr(dev);
1337 if (pcie_ptr) {
1338 uint16_t devctl;
1339 sc->ae_flags |= AE_FLAG_PCIE;
1340 devctl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
1341 /* Max read request size. */
1342 sc->ae_dma_rd_burst = ((devctl >> 12) & 0x07) <<
1343 DMA_CFG_RD_BURST_SHIFT;
1344 /* Max payload size. */
1345 sc->ae_dma_wr_burst = ((devctl >> 5) & 0x07) <<
1346 DMA_CFG_WR_BURST_SHIFT;
1347 if (bootverbose) {
1348 device_printf(dev, "Read request size : %d bytes.\n",
1349 128 << ((devctl >> 12) & 0x07));
1350 device_printf(dev, "TLP payload size : %d bytes.\n",
1351 128 << ((devctl >> 5) & 0x07));
1352 }
1353 } else {
1354 sc->ae_dma_rd_burst = DMA_CFG_RD_BURST_128;
1355 sc->ae_dma_wr_burst = DMA_CFG_WR_BURST_128;
1356 }
1357 #endif
1358
1359 /* Create DMA stuffs */
1360 error = ae_dma_alloc(sc);
1361 if (error)
1362 goto fail;
1363
1364 /* Load station address. */
1365 ae_get_eaddr(sc);
1366
1367 ifp->if_softc = sc;
1368 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1369 ifp->if_ioctl = ae_ioctl;
1370 ifp->if_start = ae_start;
1371 ifp->if_init = ae_init;
1372 ifp->if_watchdog = ae_watchdog;
1373 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN - 1);
1374 ifq_set_ready(&ifp->if_snd);
1375 ifp->if_capabilities = IFCAP_VLAN_MTU |
1376 IFCAP_VLAN_HWTAGGING;
1377 ifp->if_hwassist = 0;
1378 ifp->if_capenable = ifp->if_capabilities;
1379
1380 /* Set up MII bus. */
1381 error = mii_phy_probe(dev, &sc->ae_miibus,
1382 ae_mediachange, ae_mediastatus);
1383 if (error) {
1384 device_printf(dev, "no PHY found!\n");
1385 goto fail;
1386 }
1387 ether_ifattach(ifp, sc->ae_eaddr, NULL);
1388
1389 /* Tell the upper layer(s) we support long frames. */
1390 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1391
1392 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->ae_irq_res));
1393
1394 error = bus_setup_intr(dev, sc->ae_irq_res, INTR_MPSAFE, ae_intr, sc,
1395 &sc->ae_irq_handle, ifp->if_serializer);
1396 if (error) {
1397 device_printf(dev, "could not set up interrupt handler.\n");
1398 ether_ifdetach(ifp);
1399 goto fail;
1400 }
1401 return 0;
1402 fail:
1403 ae_detach(dev);
1404 return (error);
1405 }
1406
1407 static int
ae_detach(device_t dev)1408 ae_detach(device_t dev)
1409 {
1410 struct ae_softc *sc = device_get_softc(dev);
1411
1412 if (device_is_attached(dev)) {
1413 struct ifnet *ifp = &sc->arpcom.ac_if;
1414
1415 lwkt_serialize_enter(ifp->if_serializer);
1416 sc->ae_flags |= AE_FLAG_DETACH;
1417 ae_stop(sc);
1418 bus_teardown_intr(dev, sc->ae_irq_res, sc->ae_irq_handle);
1419 lwkt_serialize_exit(ifp->if_serializer);
1420
1421 ether_ifdetach(ifp);
1422 }
1423
1424 if (sc->ae_miibus != NULL)
1425 device_delete_child(dev, sc->ae_miibus);
1426 bus_generic_detach(dev);
1427
1428 if (sc->ae_irq_res != NULL) {
1429 bus_release_resource(dev, SYS_RES_IRQ, sc->ae_irq_rid,
1430 sc->ae_irq_res);
1431 }
1432 if (sc->ae_mem_res != NULL) {
1433 bus_release_resource(dev, SYS_RES_MEMORY, sc->ae_mem_rid,
1434 sc->ae_mem_res);
1435 }
1436 ae_dma_free(sc);
1437
1438 return (0);
1439 }
1440
1441 static void
ae_dma_free(struct ae_softc * sc)1442 ae_dma_free(struct ae_softc *sc)
1443 {
1444 if (sc->dma_txd_tag != NULL) {
1445 bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
1446 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
1447 sc->dma_txd_map);
1448 bus_dma_tag_destroy(sc->dma_txd_tag);
1449 }
1450 if (sc->dma_txs_tag != NULL) {
1451 bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
1452 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
1453 sc->dma_txs_map);
1454 bus_dma_tag_destroy(sc->dma_txs_tag);
1455 }
1456 if (sc->dma_rxd_tag != NULL) {
1457 bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
1458 bus_dmamem_free(sc->dma_rxd_tag,
1459 sc->rxd_base_dma, sc->dma_rxd_map);
1460 bus_dma_tag_destroy(sc->dma_rxd_tag);
1461 }
1462 if (sc->dma_parent_tag != NULL)
1463 bus_dma_tag_destroy(sc->dma_parent_tag);
1464 }
1465
1466 static void
ae_pcie_init(struct ae_softc * sc)1467 ae_pcie_init(struct ae_softc *sc)
1468 {
1469 AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG,
1470 AE_PCIE_LTSSM_TESTMODE_DEFAULT);
1471 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG,
1472 AE_PCIE_DLL_TX_CTRL_DEFAULT);
1473 }
1474
1475 static void
ae_phy_reset(struct ae_softc * sc)1476 ae_phy_reset(struct ae_softc *sc)
1477 {
1478 AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
1479 DELAY(1000); /* XXX: pause(9) ? */
1480 }
1481
1482 static int
ae_reset(struct ae_softc * sc)1483 ae_reset(struct ae_softc *sc)
1484 {
1485 int i;
1486
1487 /*
1488 * Issue a soft reset.
1489 */
1490 AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
1491 bus_space_barrier(sc->ae_mem_bt, sc->ae_mem_bh, AE_MASTER_REG, 4,
1492 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1493
1494 /*
1495 * Wait for reset to complete.
1496 */
1497 for (i = 0; i < AE_RESET_TIMEOUT; i++) {
1498 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
1499 break;
1500 DELAY(10);
1501 }
1502 if (i == AE_RESET_TIMEOUT) {
1503 device_printf(sc->ae_dev, "reset timeout.\n");
1504 return (ENXIO);
1505 }
1506
1507 /*
1508 * Wait for everything to enter idle state.
1509 */
1510 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1511 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
1512 break;
1513 DELAY(100);
1514 }
1515 if (i == AE_IDLE_TIMEOUT) {
1516 device_printf(sc->ae_dev, "could not enter idle state.\n");
1517 return (ENXIO);
1518 }
1519 return (0);
1520 }
1521
1522 static int
ae_check_eeprom_present(struct ae_softc * sc,int * vpdc)1523 ae_check_eeprom_present(struct ae_softc *sc, int *vpdc)
1524 {
1525 int error;
1526 uint32_t val;
1527
1528 /*
1529 * Not sure why, but Linux does this.
1530 */
1531 val = AE_READ_4(sc, AE_SPICTL_REG);
1532 if ((val & AE_SPICTL_VPD_EN) != 0) {
1533 val &= ~AE_SPICTL_VPD_EN;
1534 AE_WRITE_4(sc, AE_SPICTL_REG, val);
1535 }
1536 error = pci_find_extcap(sc->ae_dev, PCIY_VPD, vpdc);
1537 return (error);
1538 }
1539
1540 static int
ae_vpd_read_word(struct ae_softc * sc,int reg,uint32_t * word)1541 ae_vpd_read_word(struct ae_softc *sc, int reg, uint32_t *word)
1542 {
1543 uint32_t val;
1544 int i;
1545
1546 AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */
1547
1548 /*
1549 * VPD registers start at offset 0x100. Read them.
1550 */
1551 val = 0x100 + reg * 4;
1552 AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
1553 AE_VPD_CAP_ADDR_MASK);
1554 for (i = 0; i < AE_VPD_TIMEOUT; i++) {
1555 DELAY(2000);
1556 val = AE_READ_4(sc, AE_VPD_CAP_REG);
1557 if ((val & AE_VPD_CAP_DONE) != 0)
1558 break;
1559 }
1560 if (i == AE_VPD_TIMEOUT) {
1561 device_printf(sc->ae_dev, "timeout reading VPD register %d.\n",
1562 reg);
1563 return (ETIMEDOUT);
1564 }
1565 *word = AE_READ_4(sc, AE_VPD_DATA_REG);
1566 return (0);
1567 }
1568
1569 static int
ae_get_vpd_eaddr(struct ae_softc * sc,uint32_t * eaddr)1570 ae_get_vpd_eaddr(struct ae_softc *sc, uint32_t *eaddr)
1571 {
1572 uint32_t word, reg, val;
1573 int error;
1574 int found;
1575 int vpdc;
1576 int i;
1577
1578 /*
1579 * Check for EEPROM.
1580 */
1581 error = ae_check_eeprom_present(sc, &vpdc);
1582 if (error != 0)
1583 return (error);
1584
1585 /*
1586 * Read the VPD configuration space.
1587 * Each register is prefixed with signature,
1588 * so we can check if it is valid.
1589 */
1590 for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
1591 error = ae_vpd_read_word(sc, i, &word);
1592 if (error != 0)
1593 break;
1594
1595 /*
1596 * Check signature.
1597 */
1598 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
1599 break;
1600 reg = word >> AE_VPD_REG_SHIFT;
1601 i++; /* Move to the next word. */
1602 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
1603 continue;
1604
1605 error = ae_vpd_read_word(sc, i, &val);
1606 if (error != 0)
1607 break;
1608 if (reg == AE_EADDR0_REG)
1609 eaddr[0] = val;
1610 else
1611 eaddr[1] = val;
1612 found++;
1613 }
1614 if (found < 2)
1615 return (ENOENT);
1616
1617 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1618 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1619 if (bootverbose)
1620 device_printf(sc->ae_dev,
1621 "VPD ethernet address registers are invalid.\n");
1622 return (EINVAL);
1623 }
1624 return (0);
1625 }
1626
1627 static int
ae_get_reg_eaddr(struct ae_softc * sc,uint32_t * eaddr)1628 ae_get_reg_eaddr(struct ae_softc *sc, uint32_t *eaddr)
1629 {
1630 /*
1631 * BIOS is supposed to set this.
1632 */
1633 eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
1634 eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
1635 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1636 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1637 if (bootverbose)
1638 device_printf(sc->ae_dev,
1639 "Ethetnet address registers are invalid.\n");
1640 return (EINVAL);
1641 }
1642 return (0);
1643 }
1644
1645 static void
ae_get_eaddr(struct ae_softc * sc)1646 ae_get_eaddr(struct ae_softc *sc)
1647 {
1648 uint32_t eaddr[2] = {0, 0};
1649 int error;
1650
1651 /*
1652 *Check for EEPROM.
1653 */
1654 error = ae_get_vpd_eaddr(sc, eaddr);
1655 if (error)
1656 error = ae_get_reg_eaddr(sc, eaddr);
1657 if (error) {
1658 if (bootverbose)
1659 device_printf(sc->ae_dev,
1660 "Generating random ethernet address.\n");
1661 eaddr[0] = karc4random();
1662 /*
1663 * Set OUI to ASUSTek COMPUTER INC.
1664 */
1665 sc->ae_eaddr[0] = 0x02; /* U/L bit set. */
1666 sc->ae_eaddr[1] = 0x1f;
1667 sc->ae_eaddr[2] = 0xc6;
1668 sc->ae_eaddr[3] = (eaddr[0] >> 16) & 0xff;
1669 sc->ae_eaddr[4] = (eaddr[0] >> 8) & 0xff;
1670 sc->ae_eaddr[5] = (eaddr[0] >> 0) & 0xff;
1671 } else {
1672 sc->ae_eaddr[0] = (eaddr[1] >> 8) & 0xff;
1673 sc->ae_eaddr[1] = (eaddr[1] >> 0) & 0xff;
1674 sc->ae_eaddr[2] = (eaddr[0] >> 24) & 0xff;
1675 sc->ae_eaddr[3] = (eaddr[0] >> 16) & 0xff;
1676 sc->ae_eaddr[4] = (eaddr[0] >> 8) & 0xff;
1677 sc->ae_eaddr[5] = (eaddr[0] >> 0) & 0xff;
1678 }
1679 }
1680
1681 static int
ae_mediachange(struct ifnet * ifp)1682 ae_mediachange(struct ifnet *ifp)
1683 {
1684 struct ae_softc *sc = ifp->if_softc;
1685 struct mii_data *mii = device_get_softc(sc->ae_miibus);
1686 int error;
1687
1688 ASSERT_SERIALIZED(ifp->if_serializer);
1689 if (mii->mii_instance != 0) {
1690 struct mii_softc *miisc;
1691 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1692 mii_phy_reset(miisc);
1693 }
1694 error = mii_mediachg(mii);
1695 return (error);
1696 }
1697
1698 static void
ae_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)1699 ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1700 {
1701 struct ae_softc *sc = ifp->if_softc;
1702 struct mii_data *mii = device_get_softc(sc->ae_miibus);
1703
1704 ASSERT_SERIALIZED(ifp->if_serializer);
1705 mii_pollstat(mii);
1706 ifmr->ifm_status = mii->mii_media_status;
1707 ifmr->ifm_active = mii->mii_media_active;
1708 }
1709
1710 static void
ae_update_stats_tx(uint16_t flags,struct ae_stats * stats)1711 ae_update_stats_tx(uint16_t flags, struct ae_stats *stats)
1712 {
1713 if ((flags & AE_TXS_BCAST) != 0)
1714 stats->tx_bcast++;
1715 if ((flags & AE_TXS_MCAST) != 0)
1716 stats->tx_mcast++;
1717 if ((flags & AE_TXS_PAUSE) != 0)
1718 stats->tx_pause++;
1719 if ((flags & AE_TXS_CTRL) != 0)
1720 stats->tx_ctrl++;
1721 if ((flags & AE_TXS_DEFER) != 0)
1722 stats->tx_defer++;
1723 if ((flags & AE_TXS_EXCDEFER) != 0)
1724 stats->tx_excdefer++;
1725 if ((flags & AE_TXS_SINGLECOL) != 0)
1726 stats->tx_singlecol++;
1727 if ((flags & AE_TXS_MULTICOL) != 0)
1728 stats->tx_multicol++;
1729 if ((flags & AE_TXS_LATECOL) != 0)
1730 stats->tx_latecol++;
1731 if ((flags & AE_TXS_ABORTCOL) != 0)
1732 stats->tx_abortcol++;
1733 if ((flags & AE_TXS_UNDERRUN) != 0)
1734 stats->tx_underrun++;
1735 }
1736
1737 static void
ae_update_stats_rx(uint16_t flags,struct ae_stats * stats)1738 ae_update_stats_rx(uint16_t flags, struct ae_stats *stats)
1739 {
1740 if ((flags & AE_RXD_BCAST) != 0)
1741 stats->rx_bcast++;
1742 if ((flags & AE_RXD_MCAST) != 0)
1743 stats->rx_mcast++;
1744 if ((flags & AE_RXD_PAUSE) != 0)
1745 stats->rx_pause++;
1746 if ((flags & AE_RXD_CTRL) != 0)
1747 stats->rx_ctrl++;
1748 if ((flags & AE_RXD_CRCERR) != 0)
1749 stats->rx_crcerr++;
1750 if ((flags & AE_RXD_CODEERR) != 0)
1751 stats->rx_codeerr++;
1752 if ((flags & AE_RXD_RUNT) != 0)
1753 stats->rx_runt++;
1754 if ((flags & AE_RXD_FRAG) != 0)
1755 stats->rx_frag++;
1756 if ((flags & AE_RXD_TRUNC) != 0)
1757 stats->rx_trunc++;
1758 if ((flags & AE_RXD_ALIGN) != 0)
1759 stats->rx_align++;
1760 }
1761
1762 static int
ae_resume(device_t dev)1763 ae_resume(device_t dev)
1764 {
1765 struct ae_softc *sc = device_get_softc(dev);
1766 struct ifnet *ifp = &sc->arpcom.ac_if;
1767
1768 lwkt_serialize_enter(ifp->if_serializer);
1769 #if 0
1770 AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */
1771 #endif
1772 ae_phy_reset(sc);
1773 if ((ifp->if_flags & IFF_UP) != 0)
1774 ae_init(sc);
1775 lwkt_serialize_exit(ifp->if_serializer);
1776 return (0);
1777 }
1778
1779 static int
ae_suspend(device_t dev)1780 ae_suspend(device_t dev)
1781 {
1782 struct ae_softc *sc = device_get_softc(dev);
1783 struct ifnet *ifp = &sc->arpcom.ac_if;
1784
1785 lwkt_serialize_enter(ifp->if_serializer);
1786 ae_stop(sc);
1787 #if 0
1788 /* we don't use ae_pm_init because we don't want WOL */
1789 ae_pm_init(sc);
1790 #endif
1791 lwkt_serialize_exit(ifp->if_serializer);
1792 return (0);
1793 }
1794
1795 static int
ae_shutdown(device_t dev)1796 ae_shutdown(device_t dev)
1797 {
1798 struct ae_softc *sc = device_get_softc(dev);
1799 struct ifnet *ifp = &sc->arpcom.ac_if;
1800
1801 ae_suspend(dev);
1802
1803 lwkt_serialize_enter(ifp->if_serializer);
1804 ae_powersave_enable(sc);
1805 lwkt_serialize_exit(ifp->if_serializer);
1806
1807 return (0);
1808 }
1809
1810 static void
ae_powersave_disable(struct ae_softc * sc)1811 ae_powersave_disable(struct ae_softc *sc)
1812 {
1813 uint32_t val;
1814
1815 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1816 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1817 if (val & AE_PHY_DBG_POWERSAVE) {
1818 val &= ~AE_PHY_DBG_POWERSAVE;
1819 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
1820 DELAY(1000);
1821 }
1822 }
1823
1824 static void
ae_powersave_enable(struct ae_softc * sc)1825 ae_powersave_enable(struct ae_softc *sc)
1826 {
1827 uint32_t val;
1828
1829 /*
1830 * XXX magic numbers.
1831 */
1832 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1833 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1834 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
1835 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
1836 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
1837 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
1838 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
1839 }
1840