1 /* $OpenBSD: if_alc.c,v 1.59 2024/08/31 16:23:09 deraadt Exp $ */
2 /*-
3 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /* Driver for Atheros AR813x/AR815x/AR816x/AR817x PCIe Ethernet. */
30
31 #include "bpfilter.h"
32 #include "vlan.h"
33
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/systm.h>
37 #include <sys/sockio.h>
38 #include <sys/mbuf.h>
39 #include <sys/queue.h>
40 #include <sys/device.h>
41 #include <sys/timeout.h>
42
43 #include <machine/bus.h>
44
45 #include <net/if.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48
49 #include <netinet/in.h>
50 #include <netinet/if_ether.h>
51
52 #if NBPFILTER > 0
53 #include <net/bpf.h>
54 #endif
55
56 #include <dev/mii/mii.h>
57 #include <dev/mii/miivar.h>
58
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
61 #include <dev/pci/pcidevs.h>
62
63 #include <dev/pci/if_alcreg.h>
64
65 int alc_match(struct device *, void *, void *);
66 void alc_attach(struct device *, struct device *, void *);
67 int alc_detach(struct device *, int);
68 int alc_activate(struct device *, int);
69
70 int alc_init(struct ifnet *);
71 void alc_start(struct ifnet *);
72 int alc_ioctl(struct ifnet *, u_long, caddr_t);
73 void alc_watchdog(struct ifnet *);
74 int alc_mediachange(struct ifnet *);
75 void alc_mediastatus(struct ifnet *, struct ifmediareq *);
76
77 void alc_aspm(struct alc_softc *, int, uint64_t);
78 void alc_aspm_813x(struct alc_softc *, uint64_t);
79 void alc_aspm_816x(struct alc_softc *, int);
80 void alc_disable_l0s_l1(struct alc_softc *);
81 int alc_dma_alloc(struct alc_softc *);
82 void alc_dma_free(struct alc_softc *);
83 int alc_encap(struct alc_softc *, struct mbuf *);
84 void alc_get_macaddr(struct alc_softc *);
85 void alc_get_macaddr_813x(struct alc_softc *);
86 void alc_get_macaddr_816x(struct alc_softc *);
87 void alc_get_macaddr_par(struct alc_softc *);
88 void alc_init_cmb(struct alc_softc *);
89 void alc_init_rr_ring(struct alc_softc *);
90 int alc_init_rx_ring(struct alc_softc *);
91 void alc_init_smb(struct alc_softc *);
92 void alc_init_tx_ring(struct alc_softc *);
93 int alc_intr(void *);
94 void alc_mac_config(struct alc_softc *);
95 int alc_mii_readreg_813x(struct device *, int, int);
96 int alc_mii_readreg_816x(struct device *, int, int);
97 void alc_mii_writereg_813x(struct device *, int, int, int);
98 void alc_mii_writereg_816x(struct device *, int, int, int);
99 void alc_dsp_fixup(struct alc_softc *, int);
100 int alc_miibus_readreg(struct device *, int, int);
101 void alc_miibus_statchg(struct device *);
102 void alc_miibus_writereg(struct device *, int, int, int);
103 int alc_miidbg_readreg(struct alc_softc *, int);
104 void alc_miidbg_writereg(struct alc_softc *, int, int);
105 int alc_miiext_readreg(struct alc_softc *, int, int);
106 void alc_miiext_writereg(struct alc_softc *, int, int, int);
107 void alc_phy_reset_813x(struct alc_softc *);
108 void alc_phy_reset_816x(struct alc_softc *);
109 int alc_newbuf(struct alc_softc *, struct alc_rxdesc *);
110 void alc_phy_down(struct alc_softc *);
111 void alc_phy_reset(struct alc_softc *);
112 void alc_reset(struct alc_softc *);
113 void alc_rxeof(struct alc_softc *, struct rx_rdesc *);
114 int alc_rxintr(struct alc_softc *);
115 void alc_iff(struct alc_softc *);
116 void alc_rxvlan(struct alc_softc *);
117 void alc_start_queue(struct alc_softc *);
118 void alc_stats_clear(struct alc_softc *);
119 void alc_stats_update(struct alc_softc *);
120 void alc_stop(struct alc_softc *);
121 void alc_stop_mac(struct alc_softc *);
122 void alc_stop_queue(struct alc_softc *);
123 void alc_tick(void *);
124 void alc_txeof(struct alc_softc *);
125 void alc_init_pcie(struct alc_softc *, int);
126 void alc_config_msi(struct alc_softc *);
127 int alc_dma_alloc(struct alc_softc *);
128 void alc_dma_free(struct alc_softc *);
129 int alc_encap(struct alc_softc *, struct mbuf *);
130 void alc_osc_reset(struct alc_softc *);
131
132 uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0, 0 };
133
134 const struct pci_matchid alc_devices[] = {
135 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1C },
136 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C },
137 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1D },
138 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1D_1 },
139 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C_1 },
140 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C_2 },
141 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8161 },
142 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8162 },
143 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8171 },
144 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8172 },
145 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2200 },
146 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2400 },
147 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2500 }
148 };
149
150 const struct cfattach alc_ca = {
151 sizeof (struct alc_softc), alc_match, alc_attach, alc_detach,
152 alc_activate
153 };
154
155 struct cfdriver alc_cd = {
156 NULL, "alc", DV_IFNET
157 };
158
159 int alcdebug = 0;
160 #define DPRINTF(x) do { if (alcdebug) printf x; } while (0)
161
162 #define ALC_CSUM_FEATURES (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)
163
164 int
alc_miibus_readreg(struct device * dev,int phy,int reg)165 alc_miibus_readreg(struct device *dev, int phy, int reg)
166 {
167 struct alc_softc *sc = (struct alc_softc *)dev;
168 uint32_t v;
169
170 if (phy != sc->alc_phyaddr)
171 return (0);
172
173 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
174 v = alc_mii_readreg_816x(dev, phy, reg);
175 else
176 v = alc_mii_readreg_813x(dev, phy, reg);
177
178 return (v);
179 }
180
181 int
alc_mii_readreg_813x(struct device * dev,int phy,int reg)182 alc_mii_readreg_813x(struct device *dev, int phy, int reg)
183 {
184 struct alc_softc *sc = (struct alc_softc *)dev;
185 uint32_t v;
186 int i;
187
188 /*
189 * For AR8132 fast ethernet controller, do not report 1000baseT
190 * capability to mii(4). Even though AR8132 uses the same
191 * model/revision number of F1 gigabit PHY, the PHY has no
192 * ability to establish 1000baseT link.
193 */
194 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 &&
195 reg == MII_EXTSR)
196 return (0);
197
198 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
199 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
200 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
201 DELAY(5);
202 v = CSR_READ_4(sc, ALC_MDIO);
203 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
204 break;
205 }
206
207 if (i == 0) {
208 printf("%s: phy read timeout: phy %d, reg %d\n",
209 sc->sc_dev.dv_xname, phy, reg);
210 return (0);
211 }
212
213 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
214 }
215
216 int
alc_mii_readreg_816x(struct device * dev,int phy,int reg)217 alc_mii_readreg_816x(struct device *dev, int phy, int reg)
218 {
219 struct alc_softc *sc = (struct alc_softc *)dev;
220 uint32_t clk, v;
221 int i;
222
223 if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
224 clk = MDIO_CLK_25_128;
225 else
226 clk = MDIO_CLK_25_4;
227 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
228 MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg));
229 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
230 DELAY(5);
231 v = CSR_READ_4(sc, ALC_MDIO);
232 if ((v & MDIO_OP_BUSY) == 0)
233 break;
234 }
235
236 if (i == 0) {
237 printf("%s: phy read timeout: phy %d, reg %d\n",
238 sc->sc_dev.dv_xname, phy, reg);
239 return (0);
240 }
241
242 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
243 }
244
245 void
alc_miibus_writereg(struct device * dev,int phy,int reg,int val)246 alc_miibus_writereg(struct device *dev, int phy, int reg, int val)
247 {
248 struct alc_softc *sc = (struct alc_softc *)dev;
249
250 if (phy != sc->alc_phyaddr)
251 return;
252
253 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
254 alc_mii_writereg_816x(dev, phy, reg, val);
255 else
256 alc_mii_writereg_813x(dev, phy, reg, val);
257 }
258
259 void
alc_mii_writereg_813x(struct device * dev,int phy,int reg,int val)260 alc_mii_writereg_813x(struct device *dev, int phy, int reg, int val)
261 {
262 struct alc_softc *sc = (struct alc_softc *)dev;
263 uint32_t v;
264 int i;
265
266 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
267 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
268 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
269 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
270 DELAY(5);
271 v = CSR_READ_4(sc, ALC_MDIO);
272 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
273 break;
274 }
275
276 if (i == 0)
277 printf("%s: phy write timeout: phy %d, reg %d\n",
278 sc->sc_dev.dv_xname, phy, reg);
279 }
280
281 void
alc_mii_writereg_816x(struct device * dev,int phy,int reg,int val)282 alc_mii_writereg_816x(struct device *dev, int phy, int reg, int val)
283 {
284 struct alc_softc *sc = (struct alc_softc *)dev;
285 uint32_t clk, v;
286 int i;
287
288 if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
289 clk = MDIO_CLK_25_128;
290 else
291 clk = MDIO_CLK_25_4;
292 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
293 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) |
294 MDIO_SUP_PREAMBLE | clk);
295 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
296 DELAY(5);
297 v = CSR_READ_4(sc, ALC_MDIO);
298 if ((v & MDIO_OP_BUSY) == 0)
299 break;
300 }
301
302 if (i == 0)
303 printf("%s: phy write timeout: phy %d, reg %d\n",
304 sc->sc_dev.dv_xname, phy, reg);
305 }
306
307 void
alc_miibus_statchg(struct device * dev)308 alc_miibus_statchg(struct device *dev)
309 {
310 struct alc_softc *sc = (struct alc_softc *)dev;
311 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
312 struct mii_data *mii = &sc->sc_miibus;
313 uint32_t reg;
314
315 if ((ifp->if_flags & IFF_RUNNING) == 0)
316 return;
317
318 sc->alc_flags &= ~ALC_FLAG_LINK;
319 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
320 (IFM_ACTIVE | IFM_AVALID)) {
321 switch (IFM_SUBTYPE(mii->mii_media_active)) {
322 case IFM_10_T:
323 case IFM_100_TX:
324 sc->alc_flags |= ALC_FLAG_LINK;
325 break;
326 case IFM_1000_T:
327 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
328 sc->alc_flags |= ALC_FLAG_LINK;
329 break;
330 default:
331 break;
332 }
333 }
334 /* Stop Rx/Tx MACs. */
335 alc_stop_mac(sc);
336
337 /* Program MACs with resolved speed/duplex/flow-control. */
338 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
339 alc_start_queue(sc);
340 alc_mac_config(sc);
341 /* Re-enable Tx/Rx MACs. */
342 reg = CSR_READ_4(sc, ALC_MAC_CFG);
343 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
344 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
345 }
346 alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active));
347 alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active));
348 }
349
350 int
alc_miidbg_readreg(struct alc_softc * sc,int reg)351 alc_miidbg_readreg(struct alc_softc *sc, int reg)
352 {
353 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
354 reg);
355 return (alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
356 ALC_MII_DBG_DATA));
357 }
358
359
360 void
alc_miidbg_writereg(struct alc_softc * sc,int reg,int val)361 alc_miidbg_writereg(struct alc_softc *sc, int reg, int val)
362 {
363 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
364 reg);
365 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
366 val);
367 }
368
369 int
alc_miiext_readreg(struct alc_softc * sc,int devaddr,int reg)370 alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg)
371 {
372 uint32_t clk, v;
373 int i;
374
375 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
376 EXT_MDIO_DEVADDR(devaddr));
377 if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
378 clk = MDIO_CLK_25_128;
379 else
380 clk = MDIO_CLK_25_4;
381 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
382 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
383 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
384 DELAY(5);
385 v = CSR_READ_4(sc, ALC_MDIO);
386 if ((v & MDIO_OP_BUSY) == 0)
387 break;
388 }
389
390 if (i == 0) {
391 printf("%s: phy ext read timeout: phy %d, reg %d\n",
392 sc->sc_dev.dv_xname, devaddr, reg);
393 return (0);
394 }
395
396 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
397 }
398
399 void
alc_miiext_writereg(struct alc_softc * sc,int devaddr,int reg,int val)400 alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, int val)
401 {
402 uint32_t clk, v;
403 int i;
404
405 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
406 EXT_MDIO_DEVADDR(devaddr));
407 if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
408 clk = MDIO_CLK_25_128;
409 else
410 clk = MDIO_CLK_25_4;
411 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
412 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) |
413 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
414 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
415 DELAY(5);
416 v = CSR_READ_4(sc, ALC_MDIO);
417 if ((v & MDIO_OP_BUSY) == 0)
418 break;
419 }
420
421 if (i == 0)
422 printf("%s: phy ext write timeout: phy %d, reg %d\n",
423 sc->sc_dev.dv_xname, devaddr, reg);
424 }
425
426 void
alc_dsp_fixup(struct alc_softc * sc,int media)427 alc_dsp_fixup(struct alc_softc *sc, int media)
428 {
429 uint16_t agc, len, val;
430
431 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
432 return;
433 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0)
434 return;
435
436 /*
437 * Vendor PHY magic.
438 * 1000BT/AZ, wrong cable length
439 */
440 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
441 len = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6);
442 len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) &
443 EXT_CLDCTL6_CAB_LEN_MASK;
444 agc = alc_miidbg_readreg(sc, MII_DBG_AGC);
445 agc = (agc >> DBG_AGC_2_VGA_SHIFT) & DBG_AGC_2_VGA_MASK;
446 if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G &&
447 agc > DBG_AGC_LONG1G_LIMT) ||
448 (media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT &&
449 agc > DBG_AGC_LONG1G_LIMT)) {
450 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
451 DBG_AZ_ANADECT_LONG);
452 val = alc_miiext_readreg(sc, MII_EXT_ANEG,
453 MII_EXT_ANEG_AFE);
454 val |= ANEG_AFEE_10BT_100M_TH;
455 alc_miiext_writereg(sc, MII_EXT_ANEG,
456 MII_EXT_ANEG_AFE, val);
457 } else {
458 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
459 DBG_AZ_ANADECT_DEFAULT);
460 val = alc_miiext_readreg(sc, MII_EXT_ANEG,
461 MII_EXT_ANEG_AFE);
462 val &= ~ANEG_AFEE_10BT_100M_TH;
463 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE,
464 val);
465 }
466 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
467 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
468 if (media == IFM_1000_T) {
469 /*
470 * Giga link threshold, raise the tolerance of
471 * noise 50%.
472 */
473 val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB);
474 val &= ~DBG_MSE20DB_TH_MASK;
475 val |= (DBG_MSE20DB_TH_HI <<
476 DBG_MSE20DB_TH_SHIFT);
477 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
478 } else if (media == IFM_100_TX)
479 alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
480 DBG_MSE16DB_UP);
481 }
482 } else {
483 val = alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE);
484 val &= ~ANEG_AFEE_10BT_100M_TH;
485 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val);
486 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
487 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
488 alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
489 DBG_MSE16DB_DOWN);
490 val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB);
491 val &= ~DBG_MSE20DB_TH_MASK;
492 val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT);
493 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
494 }
495 }
496 }
497
498 void
alc_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)499 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
500 {
501 struct alc_softc *sc = ifp->if_softc;
502 struct mii_data *mii = &sc->sc_miibus;
503
504 if ((ifp->if_flags & IFF_UP) == 0)
505 return;
506
507 mii_pollstat(mii);
508 ifmr->ifm_status = mii->mii_media_status;
509 ifmr->ifm_active = mii->mii_media_active;
510 }
511
512 int
alc_mediachange(struct ifnet * ifp)513 alc_mediachange(struct ifnet *ifp)
514 {
515 struct alc_softc *sc = ifp->if_softc;
516 struct mii_data *mii = &sc->sc_miibus;
517 int error;
518
519 if (mii->mii_instance != 0) {
520 struct mii_softc *miisc;
521
522 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
523 mii_phy_reset(miisc);
524 }
525 error = mii_mediachg(mii);
526
527 return (error);
528 }
529
530 int
alc_match(struct device * dev,void * match,void * aux)531 alc_match(struct device *dev, void *match, void *aux)
532 {
533 return pci_matchbyid((struct pci_attach_args *)aux, alc_devices,
534 nitems(alc_devices));
535 }
536
537 void
alc_get_macaddr(struct alc_softc * sc)538 alc_get_macaddr(struct alc_softc *sc)
539 {
540 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
541 alc_get_macaddr_816x(sc);
542 else
543 alc_get_macaddr_813x(sc);
544 }
545
546 void
alc_get_macaddr_813x(struct alc_softc * sc)547 alc_get_macaddr_813x(struct alc_softc *sc)
548 {
549 uint32_t opt;
550 uint16_t val;
551 int eeprom, i;
552
553 eeprom = 0;
554 opt = CSR_READ_4(sc, ALC_OPT_CFG);
555 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
556 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
557 /*
558 * EEPROM found, let TWSI reload EEPROM configuration.
559 * This will set ethernet address of controller.
560 */
561 eeprom++;
562 switch (sc->sc_product) {
563 case PCI_PRODUCT_ATTANSIC_L1C:
564 case PCI_PRODUCT_ATTANSIC_L2C:
565 if ((opt & OPT_CFG_CLK_ENB) == 0) {
566 opt |= OPT_CFG_CLK_ENB;
567 CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
568 CSR_READ_4(sc, ALC_OPT_CFG);
569 DELAY(1000);
570 }
571 break;
572 case PCI_PRODUCT_ATTANSIC_L1D:
573 case PCI_PRODUCT_ATTANSIC_L1D_1:
574 case PCI_PRODUCT_ATTANSIC_L2C_1:
575 case PCI_PRODUCT_ATTANSIC_L2C_2:
576 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
577 ALC_MII_DBG_ADDR, 0x00);
578 val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
579 ALC_MII_DBG_DATA);
580 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
581 ALC_MII_DBG_DATA, val & 0xFF7F);
582 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
583 ALC_MII_DBG_ADDR, 0x3B);
584 val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
585 ALC_MII_DBG_DATA);
586 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
587 ALC_MII_DBG_DATA, val | 0x0008);
588 DELAY(20);
589 break;
590 }
591
592 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
593 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
594 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
595 CSR_READ_4(sc, ALC_WOL_CFG);
596
597 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
598 TWSI_CFG_SW_LD_START);
599 for (i = 100; i > 0; i--) {
600 DELAY(1000);
601 if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
602 TWSI_CFG_SW_LD_START) == 0)
603 break;
604 }
605 if (i == 0)
606 printf("%s: reloading EEPROM timeout!\n",
607 sc->sc_dev.dv_xname);
608 } else {
609 if (alcdebug)
610 printf("%s: EEPROM not found!\n", sc->sc_dev.dv_xname);
611 }
612 if (eeprom != 0) {
613 switch (sc->sc_product) {
614 case PCI_PRODUCT_ATTANSIC_L1C:
615 case PCI_PRODUCT_ATTANSIC_L2C:
616 if ((opt & OPT_CFG_CLK_ENB) != 0) {
617 opt &= ~OPT_CFG_CLK_ENB;
618 CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
619 CSR_READ_4(sc, ALC_OPT_CFG);
620 DELAY(1000);
621 }
622 break;
623 case PCI_PRODUCT_ATTANSIC_L1D:
624 case PCI_PRODUCT_ATTANSIC_L1D_1:
625 case PCI_PRODUCT_ATTANSIC_L2C_1:
626 case PCI_PRODUCT_ATTANSIC_L2C_2:
627 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
628 ALC_MII_DBG_ADDR, 0x00);
629 val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
630 ALC_MII_DBG_DATA);
631 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
632 ALC_MII_DBG_DATA, val | 0x0080);
633 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
634 ALC_MII_DBG_ADDR, 0x3B);
635 val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
636 ALC_MII_DBG_DATA);
637 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
638 ALC_MII_DBG_DATA, val & 0xFFF7);
639 DELAY(20);
640 break;
641 }
642 }
643
644 alc_get_macaddr_par(sc);
645 }
646
647 void
alc_get_macaddr_816x(struct alc_softc * sc)648 alc_get_macaddr_816x(struct alc_softc *sc)
649 {
650 uint32_t reg;
651 int i, reloaded;
652
653 reloaded = 0;
654 /* Try to reload station address via TWSI. */
655 for (i = 100; i > 0; i--) {
656 reg = CSR_READ_4(sc, ALC_SLD);
657 if ((reg & (SLD_PROGRESS | SLD_START)) == 0)
658 break;
659 DELAY(1000);
660 }
661 if (i != 0) {
662 CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START);
663 for (i = 100; i > 0; i--) {
664 DELAY(1000);
665 reg = CSR_READ_4(sc, ALC_SLD);
666 if ((reg & SLD_START) == 0)
667 break;
668 }
669 if (i != 0)
670 reloaded++;
671 else if (alcdebug)
672 printf("%s: reloading station address via TWSI timed"
673 "out!\n", sc->sc_dev.dv_xname);
674 }
675
676 /* Try to reload station address from EEPROM or FLASH. */
677 if (reloaded == 0) {
678 reg = CSR_READ_4(sc, ALC_EEPROM_LD);
679 if ((reg & (EEPROM_LD_EEPROM_EXIST |
680 EEPROM_LD_FLASH_EXIST)) != 0) {
681 for (i = 100; i > 0; i--) {
682 reg = CSR_READ_4(sc, ALC_EEPROM_LD);
683 if ((reg & (EEPROM_LD_PROGRESS |
684 EEPROM_LD_START)) == 0)
685 break;
686 DELAY(1000);
687 }
688 if (i != 0) {
689 CSR_WRITE_4(sc, ALC_EEPROM_LD, reg |
690 EEPROM_LD_START);
691 for (i = 100; i > 0; i--) {
692 DELAY(1000);
693 reg = CSR_READ_4(sc, ALC_EEPROM_LD);
694 if ((reg & EEPROM_LD_START) == 0)
695 break;
696 }
697 } else if (alcdebug)
698 printf("%s: reloading EEPROM/FLASH timed out!\n",
699 sc->sc_dev.dv_xname);
700 }
701 }
702
703 alc_get_macaddr_par(sc);
704 }
705
706 void
alc_get_macaddr_par(struct alc_softc * sc)707 alc_get_macaddr_par(struct alc_softc *sc)
708 {
709 uint32_t ea[2];
710
711 ea[0] = CSR_READ_4(sc, ALC_PAR0);
712 ea[1] = CSR_READ_4(sc, ALC_PAR1);
713 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
714 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
715 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
716 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
717 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
718 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
719 }
720
721 void
alc_disable_l0s_l1(struct alc_softc * sc)722 alc_disable_l0s_l1(struct alc_softc *sc)
723 {
724 uint32_t pmcfg;
725
726 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
727 /* Another magic from vendor. */
728 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
729 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
730 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
731 PM_CFG_MAC_ASPM_CHK | PM_CFG_SERDES_PD_EX_L1);
732 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB |
733 PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_L1_ENB;
734 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
735 }
736 }
737
738 void
alc_phy_reset(struct alc_softc * sc)739 alc_phy_reset(struct alc_softc *sc)
740 {
741 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
742 alc_phy_reset_816x(sc);
743 else
744 alc_phy_reset_813x(sc);
745 }
746
747 void
alc_phy_reset_813x(struct alc_softc * sc)748 alc_phy_reset_813x(struct alc_softc *sc)
749 {
750 uint16_t data;
751
752 /* Reset magic from Linux. */
753 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET);
754 CSR_READ_2(sc, ALC_GPHY_CFG);
755 DELAY(10 * 1000);
756
757 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
758 GPHY_CFG_SEL_ANA_RESET);
759 CSR_READ_2(sc, ALC_GPHY_CFG);
760 DELAY(10 * 1000);
761
762 /* DSP fixup, Vendor magic. */
763 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) {
764 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
765 ALC_MII_DBG_ADDR, 0x000A);
766 data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
767 ALC_MII_DBG_DATA);
768 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
769 ALC_MII_DBG_DATA, data & 0xDFFF);
770 }
771 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D ||
772 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
773 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 ||
774 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) {
775 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
776 ALC_MII_DBG_ADDR, 0x003B);
777 data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
778 ALC_MII_DBG_DATA);
779 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
780 ALC_MII_DBG_DATA, data & 0xFFF7);
781 DELAY(20 * 1000);
782 }
783 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D) {
784 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
785 ALC_MII_DBG_ADDR, 0x0029);
786 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
787 ALC_MII_DBG_DATA, 0x929D);
788 }
789 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1C ||
790 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C ||
791 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
792 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) {
793 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
794 ALC_MII_DBG_ADDR, 0x0029);
795 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
796 ALC_MII_DBG_DATA, 0xB6DD);
797 }
798
799 /* Load DSP codes, vendor magic. */
800 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
801 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
802 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
803 ALC_MII_DBG_ADDR, MII_ANA_CFG18);
804 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
805 ALC_MII_DBG_DATA, data);
806
807 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
808 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
809 ANA_SERDES_EN_LCKDT;
810 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
811 ALC_MII_DBG_ADDR, MII_ANA_CFG5);
812 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
813 ALC_MII_DBG_DATA, data);
814
815 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
816 ANA_LONG_CABLE_TH_100_MASK) |
817 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
818 ANA_SHORT_CABLE_TH_100_SHIFT) |
819 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
820 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
821 ALC_MII_DBG_ADDR, MII_ANA_CFG54);
822 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
823 ALC_MII_DBG_DATA, data);
824
825 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
826 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
827 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
828 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
829 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
830 ALC_MII_DBG_ADDR, MII_ANA_CFG4);
831 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
832 ALC_MII_DBG_DATA, data);
833
834 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
835 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
836 ANA_OEN_125M;
837 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
838 ALC_MII_DBG_ADDR, MII_ANA_CFG0);
839 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
840 ALC_MII_DBG_DATA, data);
841 DELAY(1000);
842
843 /* Disable hibernation. */
844 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
845 0x0029);
846 data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
847 ALC_MII_DBG_DATA);
848 data &= ~0x8000;
849 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
850 data);
851
852 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
853 0x000B);
854 data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
855 ALC_MII_DBG_DATA);
856 data &= ~0x8000;
857 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
858 data);
859 }
860
861 void
alc_phy_reset_816x(struct alc_softc * sc)862 alc_phy_reset_816x(struct alc_softc *sc)
863 {
864 uint32_t val;
865
866 val = CSR_READ_4(sc, ALC_GPHY_CFG);
867 val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
868 GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON |
869 GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB);
870 val |= GPHY_CFG_SEL_ANA_RESET;
871 /* Disable PHY hibernation. */
872 val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN);
873 CSR_WRITE_4(sc, ALC_GPHY_CFG, val);
874 DELAY(10);
875 CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET);
876 DELAY(800);
877 /* Vendor PHY magic. */
878 /* Disable PHY hibernation. */
879 alc_miidbg_writereg(sc, MII_DBG_LEGCYPS,
880 DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB);
881 alc_miidbg_writereg(sc, MII_DBG_HIBNEG, DBG_HIBNEG_DEFAULT &
882 ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE));
883 alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT);
884 /* XXX Disable EEE. */
885 val = CSR_READ_4(sc, ALC_LPI_CTL);
886 val &= ~LPI_CTL_ENB;
887 CSR_WRITE_4(sc, ALC_LPI_CTL, val);
888 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0);
889 /* PHY power saving. */
890 alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT);
891 alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT);
892 alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT);
893 alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT);
894 val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2);
895 val &= ~DBG_GREENCFG2_GATE_DFSE_EN;
896 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val);
897 /* RTL8139C, 120m issue. */
898 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78,
899 ANEG_NLP78_120M_DEFAULT);
900 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10,
901 ANEG_S3DIG10_DEFAULT);
902 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) {
903 /* Turn off half amplitude. */
904 val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3);
905 val |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT;
906 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, val);
907 /* Turn off Green feature. */
908 val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2);
909 val |= DBG_GREENCFG2_BP_GREEN;
910 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val);
911 /* Turn off half bias. */
912 val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5);
913 val |= EXT_CLDCTL5_BP_VD_HLFBIAS;
914 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, val);
915 }
916 }
917
918 void
alc_phy_down(struct alc_softc * sc)919 alc_phy_down(struct alc_softc *sc)
920 {
921 uint32_t gphy;
922
923 switch (sc->sc_product) {
924 case PCI_PRODUCT_ATTANSIC_AR8161:
925 case PCI_PRODUCT_ATTANSIC_E2200:
926 case PCI_PRODUCT_ATTANSIC_E2400:
927 case PCI_PRODUCT_ATTANSIC_E2500:
928 case PCI_PRODUCT_ATTANSIC_AR8162:
929 case PCI_PRODUCT_ATTANSIC_AR8171:
930 case PCI_PRODUCT_ATTANSIC_AR8172:
931 gphy = CSR_READ_4(sc, ALC_GPHY_CFG);
932 gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
933 GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON);
934 gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
935 GPHY_CFG_SEL_ANA_RESET;
936 gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW;
937 CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy);
938 break;
939 case PCI_PRODUCT_ATTANSIC_L1D:
940 case PCI_PRODUCT_ATTANSIC_L1D_1:
941 case PCI_PRODUCT_ATTANSIC_L2C_1:
942 case PCI_PRODUCT_ATTANSIC_L2C_2:
943 /*
944 * GPHY power down caused more problems on AR8151 v2.0.
945 * When driver is reloaded after GPHY power down,
946 * accesses to PHY/MAC registers hung the system. Only
947 * cold boot recovered from it. I'm not sure whether
948 * AR8151 v1.0 also requires this one though. I don't
949 * have AR8151 v1.0 controller in hand.
950 * The only option left is to isolate the PHY and
951 * initiates power down the PHY which in turn saves
952 * more power when driver is unloaded.
953 */
954 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
955 MII_BMCR, BMCR_ISO | BMCR_PDOWN);
956 break;
957 default:
958 /* Force PHY down. */
959 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
960 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
961 GPHY_CFG_PWDOWN_HW);
962 DELAY(1000);
963 break;
964 }
965 }
966
967 void
alc_aspm(struct alc_softc * sc,int init,uint64_t media)968 alc_aspm(struct alc_softc *sc, int init, uint64_t media)
969 {
970 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
971 alc_aspm_816x(sc, init);
972 else
973 alc_aspm_813x(sc, media);
974 }
975
976 void
alc_aspm_813x(struct alc_softc * sc,uint64_t media)977 alc_aspm_813x(struct alc_softc *sc, uint64_t media)
978 {
979 uint32_t pmcfg;
980 uint16_t linkcfg;
981
982 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
983 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
984 (ALC_FLAG_APS | ALC_FLAG_PCIE))
985 linkcfg = CSR_READ_2(sc, sc->alc_expcap + PCI_PCIE_LCSR);
986 else
987 linkcfg = 0;
988 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
989 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
990 pmcfg |= PM_CFG_MAC_ASPM_CHK;
991 pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT);
992 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
993
994 if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
995 /* Disable extended sync except AR8152 B v1.0 */
996 linkcfg &= ~0x80;
997 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 &&
998 sc->alc_rev == ATHEROS_AR8152_B_V10)
999 linkcfg |= 0x80;
1000 CSR_WRITE_2(sc, sc->alc_expcap + PCI_PCIE_LCSR, linkcfg);
1001 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
1002 PM_CFG_HOTRST);
1003 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
1004 PM_CFG_L1_ENTRY_TIMER_SHIFT);
1005 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
1006 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
1007 PM_CFG_PM_REQ_TIMER_SHIFT);
1008 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
1009 }
1010
1011 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
1012 if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
1013 pmcfg |= PM_CFG_ASPM_L0S_ENB;
1014 if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
1015 pmcfg |= PM_CFG_ASPM_L1_ENB;
1016 if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
1017 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1)
1018 pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
1019 pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
1020 PM_CFG_SERDES_PLL_L1_ENB |
1021 PM_CFG_SERDES_BUDS_RX_L1_ENB);
1022 pmcfg |= PM_CFG_CLK_SWH_L1;
1023 if (media == IFM_100_TX || media == IFM_1000_T) {
1024 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
1025 switch (sc->sc_product) {
1026 case PCI_PRODUCT_ATTANSIC_L2C_1:
1027 pmcfg |= (7 <<
1028 PM_CFG_L1_ENTRY_TIMER_SHIFT);
1029 break;
1030 case PCI_PRODUCT_ATTANSIC_L1D_1:
1031 case PCI_PRODUCT_ATTANSIC_L2C_2:
1032 pmcfg |= (4 <<
1033 PM_CFG_L1_ENTRY_TIMER_SHIFT);
1034 break;
1035 default:
1036 pmcfg |= (15 <<
1037 PM_CFG_L1_ENTRY_TIMER_SHIFT);
1038 break;
1039 }
1040 }
1041 } else {
1042 pmcfg |= PM_CFG_SERDES_L1_ENB |
1043 PM_CFG_SERDES_PLL_L1_ENB |
1044 PM_CFG_SERDES_BUDS_RX_L1_ENB;
1045 pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
1046 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
1047 }
1048 } else {
1049 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
1050 PM_CFG_SERDES_PLL_L1_ENB);
1051 pmcfg |= PM_CFG_CLK_SWH_L1;
1052 if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
1053 pmcfg |= PM_CFG_ASPM_L1_ENB;
1054 }
1055 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
1056 }
1057
1058 void
alc_aspm_816x(struct alc_softc * sc,int init)1059 alc_aspm_816x(struct alc_softc *sc, int init)
1060 {
1061 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1062 uint32_t pmcfg;
1063
1064 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
1065 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK;
1066 pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT;
1067 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
1068 pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT;
1069 pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK;
1070 pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT;
1071 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV;
1072 pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S |
1073 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB |
1074 PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
1075 PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB |
1076 PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST);
1077 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
1078 (sc->alc_rev & 0x01) != 0)
1079 pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB;
1080 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
1081 /* Link up, enable both L0s, L1s. */
1082 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
1083 PM_CFG_MAC_ASPM_CHK;
1084 } else {
1085 if (init != 0)
1086 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
1087 PM_CFG_MAC_ASPM_CHK;
1088 else if ((ifp->if_flags & IFF_RUNNING) != 0)
1089 pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK;
1090 }
1091 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
1092 }
1093
1094 void
alc_init_pcie(struct alc_softc * sc,int base)1095 alc_init_pcie(struct alc_softc *sc, int base)
1096 {
1097 const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
1098 uint32_t cap, ctl, val;
1099 int state;
1100
1101 /* Clear data link and flow-control protocol error. */
1102 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
1103 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
1104 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
1105
1106 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
1107 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
1108 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
1109 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
1110 CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
1111 PCIE_PHYMISC_FORCE_RCV_DET);
1112 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 &&
1113 sc->alc_rev == ATHEROS_AR8152_B_V10) {
1114 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
1115 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
1116 PCIE_PHYMISC2_SERDES_TH_MASK);
1117 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
1118 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
1119 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
1120 }
1121 /* Disable ASPM L0S and L1. */
1122 cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1123 base + PCI_PCIE_LCAP) >> 16;
1124 if ((cap & 0x00000c00) != 0) {
1125 ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1126 base + PCI_PCIE_LCSR) >> 16;
1127 if ((ctl & 0x08) != 0)
1128 sc->alc_rcb = DMA_CFG_RCB_128;
1129 if (alcdebug)
1130 printf("%s: RCB %u bytes\n",
1131 sc->sc_dev.dv_xname,
1132 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
1133 state = ctl & 0x03;
1134 if (state & 0x01)
1135 sc->alc_flags |= ALC_FLAG_L0S;
1136 if (state & 0x02)
1137 sc->alc_flags |= ALC_FLAG_L1S;
1138 if (alcdebug)
1139 printf("%s: ASPM %s %s\n",
1140 sc->sc_dev.dv_xname,
1141 aspm_state[state],
1142 state == 0 ? "disabled" : "enabled");
1143 alc_disable_l0s_l1(sc);
1144 }
1145 } else {
1146 val = CSR_READ_4(sc, ALC_PDLL_TRNS1);
1147 val &= ~PDLL_TRNS1_D3PLLOFF_ENB;
1148 CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val);
1149 val = CSR_READ_4(sc, ALC_MASTER_CFG);
1150 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
1151 (sc->alc_rev & 0x01) != 0) {
1152 if ((val & MASTER_WAKEN_25M) == 0 ||
1153 (val & MASTER_CLK_SEL_DIS) == 0) {
1154 val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS;
1155 CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
1156 }
1157 } else {
1158 if ((val & MASTER_WAKEN_25M) == 0 ||
1159 (val & MASTER_CLK_SEL_DIS) != 0) {
1160 val |= MASTER_WAKEN_25M;
1161 val &= ~MASTER_CLK_SEL_DIS;
1162 CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
1163 }
1164 }
1165 }
1166 }
1167
1168 void
alc_config_msi(struct alc_softc * sc)1169 alc_config_msi(struct alc_softc *sc)
1170 {
1171 uint32_t ctl, mod;
1172
1173 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
1174 /*
1175 * It seems interrupt moderation is controlled by
1176 * ALC_MSI_RETRANS_TIMER register if MSI/MSIX is active.
1177 * Driver uses RX interrupt moderation parameter to
1178 * program ALC_MSI_RETRANS_TIMER register.
1179 */
1180 ctl = CSR_READ_4(sc, ALC_MSI_RETRANS_TIMER);
1181 ctl &= ~MSI_RETRANS_TIMER_MASK;
1182 ctl &= ~MSI_RETRANS_MASK_SEL_LINE;
1183 mod = ALC_USECS(sc->alc_int_rx_mod);
1184 if (mod == 0)
1185 mod = 1;
1186 ctl |= mod;
1187 if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
1188 CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl |
1189 MSI_RETRANS_MASK_SEL_LINE);
1190 else
1191 CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, 0);
1192 }
1193 }
1194
1195 void
alc_attach(struct device * parent,struct device * self,void * aux)1196 alc_attach(struct device *parent, struct device *self, void *aux)
1197 {
1198 struct alc_softc *sc = (struct alc_softc *)self;
1199 struct pci_attach_args *pa = aux;
1200 pci_chipset_tag_t pc = pa->pa_pc;
1201 pci_intr_handle_t ih;
1202 const char *intrstr;
1203 struct ifnet *ifp;
1204 pcireg_t memtype;
1205 uint16_t burst;
1206 int base, error = 0;
1207
1208 /* Set PHY address. */
1209 sc->alc_phyaddr = ALC_PHY_ADDR;
1210
1211 /* Get PCI and chip id/revision. */
1212 sc->sc_product = PCI_PRODUCT(pa->pa_id);
1213 sc->alc_rev = PCI_REVISION(pa->pa_class);
1214
1215 /*
1216 * One odd thing is AR8132 uses the same PHY hardware(F1
1217 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
1218 * the PHY supports 1000Mbps but that's not true. The PHY
1219 * used in AR8132 can't establish gigabit link even if it
1220 * shows the same PHY model/revision number of AR8131.
1221 */
1222 switch (sc->sc_product) {
1223 case PCI_PRODUCT_ATTANSIC_E2200:
1224 case PCI_PRODUCT_ATTANSIC_E2400:
1225 case PCI_PRODUCT_ATTANSIC_E2500:
1226 sc->alc_flags |= ALC_FLAG_E2X00;
1227 /* FALLTHROUGH */
1228 case PCI_PRODUCT_ATTANSIC_AR8161:
1229 if (AR816X_REV(sc->alc_rev) == 0)
1230 sc->alc_flags |= ALC_FLAG_LINK_WAR;
1231 /* FALLTHROUGH */
1232 case PCI_PRODUCT_ATTANSIC_AR8171:
1233 sc->alc_flags |= ALC_FLAG_AR816X_FAMILY;
1234 break;
1235 case PCI_PRODUCT_ATTANSIC_AR8162:
1236 case PCI_PRODUCT_ATTANSIC_AR8172:
1237 sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY;
1238 break;
1239 case PCI_PRODUCT_ATTANSIC_L2C_1:
1240 case PCI_PRODUCT_ATTANSIC_L2C_2:
1241 sc->alc_flags |= ALC_FLAG_APS;
1242 /* FALLTHROUGH */
1243 case PCI_PRODUCT_ATTANSIC_L2C:
1244 sc->alc_flags |= ALC_FLAG_FASTETHER;
1245 break;
1246 case PCI_PRODUCT_ATTANSIC_L1D:
1247 case PCI_PRODUCT_ATTANSIC_L1D_1:
1248 sc->alc_flags |= ALC_FLAG_APS;
1249 /* FALLTHROUGH */
1250 default:
1251 break;
1252 }
1253 sc->alc_flags |= ALC_FLAG_JUMBO;
1254
1255 /*
1256 * Allocate IO memory
1257 */
1258 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR);
1259 if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
1260 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
1261 printf(": can't map mem space\n");
1262 return;
1263 }
1264
1265 sc->alc_flags |= ALC_FLAG_MSI;
1266 if (pci_intr_map_msi(pa, &ih) != 0) {
1267 if (pci_intr_map(pa, &ih) != 0) {
1268 printf(": can't map interrupt\n");
1269 goto fail;
1270 }
1271 sc->alc_flags &= ~ALC_FLAG_MSI;
1272 }
1273
1274 /*
1275 * Allocate IRQ
1276 */
1277 intrstr = pci_intr_string(pc, ih);
1278 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, alc_intr, sc,
1279 sc->sc_dev.dv_xname);
1280 if (sc->sc_irq_handle == NULL) {
1281 printf(": could not establish interrupt");
1282 if (intrstr != NULL)
1283 printf(" at %s", intrstr);
1284 printf("\n");
1285 goto fail;
1286 }
1287 printf(": %s", intrstr);
1288
1289 alc_config_msi(sc);
1290
1291 sc->sc_dmat = pa->pa_dmat;
1292 sc->sc_pct = pa->pa_pc;
1293 sc->sc_pcitag = pa->pa_tag;
1294
1295 switch (sc->sc_product) {
1296 case PCI_PRODUCT_ATTANSIC_L1D:
1297 case PCI_PRODUCT_ATTANSIC_L1D_1:
1298 case PCI_PRODUCT_ATTANSIC_L2C_1:
1299 case PCI_PRODUCT_ATTANSIC_L2C_2:
1300 sc->alc_max_framelen = 6 * 1024;
1301 break;
1302 default:
1303 sc->alc_max_framelen = 9 * 1024;
1304 break;
1305 }
1306
1307 /*
1308 * It seems that AR813x/AR815x has silicon bug for SMB. In
1309 * addition, Atheros said that enabling SMB wouldn't improve
1310 * performance. However I think it's bad to access lots of
1311 * registers to extract MAC statistics.
1312 */
1313 sc->alc_flags |= ALC_FLAG_SMB_BUG;
1314 /*
1315 * Don't use Tx CMB. It is known to have silicon bug.
1316 */
1317 sc->alc_flags |= ALC_FLAG_CMB_BUG;
1318 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
1319 MASTER_CHIP_REV_SHIFT;
1320 if (alcdebug) {
1321 printf("%s: PCI device revision : 0x%04x\n",
1322 sc->sc_dev.dv_xname, sc->alc_rev);
1323 printf("%s: Chip id/revision : 0x%04x\n",
1324 sc->sc_dev.dv_xname, sc->alc_chip_rev);
1325 printf("%s: %u Tx FIFO, %u Rx FIFO\n", sc->sc_dev.dv_xname,
1326 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
1327 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
1328 }
1329
1330 /* Initialize DMA parameters. */
1331 sc->alc_dma_rd_burst = 0;
1332 sc->alc_dma_wr_burst = 0;
1333 sc->alc_rcb = DMA_CFG_RCB_64;
1334 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
1335 &base, NULL)) {
1336 sc->alc_flags |= ALC_FLAG_PCIE;
1337 sc->alc_expcap = base;
1338 burst = CSR_READ_2(sc, base + PCI_PCIE_DCSR);
1339 sc->alc_dma_rd_burst = (burst & 0x7000) >> 12;
1340 sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5;
1341 if (alcdebug) {
1342 printf("%s: Read request size : %u bytes.\n",
1343 sc->sc_dev.dv_xname,
1344 alc_dma_burst[sc->alc_dma_rd_burst]);
1345 printf("%s: TLP payload size : %u bytes.\n",
1346 sc->sc_dev.dv_xname,
1347 alc_dma_burst[sc->alc_dma_wr_burst]);
1348 }
1349 if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024)
1350 sc->alc_dma_rd_burst = 3;
1351 if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
1352 sc->alc_dma_wr_burst = 3;
1353 /*
1354 * Force maximum payload size to 128 bytes for
1355 * E2200/E2400/E2500/AR8162/AR8171/AR8172.
1356 * Otherwise it triggers DMA write error.
1357 */
1358 if ((sc->alc_flags &
1359 (ALC_FLAG_E2X00 | ALC_FLAG_AR816X_FAMILY)) != 0)
1360 sc->alc_dma_wr_burst = 0;
1361 alc_init_pcie(sc, base);
1362 }
1363
1364 /* Reset PHY. */
1365 alc_phy_reset(sc);
1366
1367 /* Reset the ethernet controller. */
1368 alc_stop_mac(sc);
1369 alc_reset(sc);
1370
1371 error = alc_dma_alloc(sc);
1372 if (error)
1373 goto fail;
1374
1375 /* Load station address. */
1376 alc_get_macaddr(sc);
1377
1378 ifp = &sc->sc_arpcom.ac_if;
1379 ifp->if_softc = sc;
1380 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1381 ifp->if_ioctl = alc_ioctl;
1382 ifp->if_start = alc_start;
1383 ifp->if_watchdog = alc_watchdog;
1384 ifq_init_maxlen(&ifp->if_snd, ALC_TX_RING_CNT - 1);
1385 bcopy(sc->alc_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
1386 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1387
1388 ifp->if_capabilities = IFCAP_VLAN_MTU;
1389
1390 #ifdef ALC_CHECKSUM
1391 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
1392 IFCAP_CSUM_UDPv4;
1393 #endif
1394
1395 #if NVLAN > 0
1396 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1397 #endif
1398
1399 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1400
1401 /* Set up MII bus. */
1402 sc->sc_miibus.mii_ifp = ifp;
1403 sc->sc_miibus.mii_readreg = alc_miibus_readreg;
1404 sc->sc_miibus.mii_writereg = alc_miibus_writereg;
1405 sc->sc_miibus.mii_statchg = alc_miibus_statchg;
1406
1407 ifmedia_init(&sc->sc_miibus.mii_media, 0, alc_mediachange,
1408 alc_mediastatus);
1409 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
1410 MII_OFFSET_ANY, MIIF_DOPAUSE);
1411
1412 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
1413 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
1414 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
1415 0, NULL);
1416 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
1417 } else
1418 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
1419
1420 if_attach(ifp);
1421 ether_ifattach(ifp);
1422
1423 timeout_set(&sc->alc_tick_ch, alc_tick, sc);
1424
1425 return;
1426 fail:
1427 alc_dma_free(sc);
1428 if (sc->sc_irq_handle != NULL)
1429 pci_intr_disestablish(pc, sc->sc_irq_handle);
1430 if (sc->sc_mem_size)
1431 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
1432 }
1433
1434 int
alc_detach(struct device * self,int flags)1435 alc_detach(struct device *self, int flags)
1436 {
1437 struct alc_softc *sc = (struct alc_softc *)self;
1438 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1439 int s;
1440
1441 s = splnet();
1442 alc_stop(sc);
1443 splx(s);
1444
1445 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
1446
1447 /* Delete all remaining media. */
1448 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
1449
1450 ether_ifdetach(ifp);
1451 if_detach(ifp);
1452 alc_dma_free(sc);
1453
1454 alc_phy_down(sc);
1455 if (sc->sc_irq_handle != NULL) {
1456 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
1457 sc->sc_irq_handle = NULL;
1458 }
1459
1460 return (0);
1461 }
1462
1463 int
alc_activate(struct device * self,int act)1464 alc_activate(struct device *self, int act)
1465 {
1466 struct alc_softc *sc = (struct alc_softc *)self;
1467 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1468
1469 switch (act) {
1470 case DVACT_SUSPEND:
1471 if (ifp->if_flags & IFF_RUNNING)
1472 alc_stop(sc);
1473 break;
1474 case DVACT_RESUME:
1475 if (ifp->if_flags & IFF_UP)
1476 alc_init(ifp);
1477 break;
1478 }
1479 return (0);
1480 }
1481
1482 int
alc_dma_alloc(struct alc_softc * sc)1483 alc_dma_alloc(struct alc_softc *sc)
1484 {
1485 struct alc_txdesc *txd;
1486 struct alc_rxdesc *rxd;
1487 int nsegs, error, i;
1488
1489 /*
1490 * Create DMA stuffs for TX ring
1491 */
1492 error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1,
1493 ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map);
1494 if (error)
1495 return (ENOBUFS);
1496
1497 /* Allocate DMA'able memory for TX ring */
1498 error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ,
1499 ETHER_ALIGN, 0, &sc->alc_rdata.alc_tx_ring_seg, 1,
1500 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1501 if (error) {
1502 printf("%s: could not allocate DMA'able memory for Tx ring.\n",
1503 sc->sc_dev.dv_xname);
1504 return (error);
1505 }
1506
1507 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg,
1508 nsegs, ALC_TX_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_tx_ring,
1509 BUS_DMA_NOWAIT);
1510 if (error)
1511 return (ENOBUFS);
1512
1513 /* Load the DMA map for Tx ring. */
1514 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map,
1515 sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
1516 if (error) {
1517 printf("%s: could not load DMA'able memory for Tx ring.\n",
1518 sc->sc_dev.dv_xname);
1519 bus_dmamem_free(sc->sc_dmat,
1520 (bus_dma_segment_t *)&sc->alc_rdata.alc_tx_ring, 1);
1521 return (error);
1522 }
1523
1524 sc->alc_rdata.alc_tx_ring_paddr =
1525 sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr;
1526
1527 /*
1528 * Create DMA stuffs for RX ring
1529 */
1530 error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1,
1531 ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map);
1532 if (error)
1533 return (ENOBUFS);
1534
1535 /* Allocate DMA'able memory for RX ring */
1536 error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ,
1537 ETHER_ALIGN, 0, &sc->alc_rdata.alc_rx_ring_seg, 1,
1538 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1539 if (error) {
1540 printf("%s: could not allocate DMA'able memory for Rx ring.\n",
1541 sc->sc_dev.dv_xname);
1542 return (error);
1543 }
1544
1545 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg,
1546 nsegs, ALC_RX_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_rx_ring,
1547 BUS_DMA_NOWAIT);
1548 if (error)
1549 return (ENOBUFS);
1550
1551 /* Load the DMA map for Rx ring. */
1552 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map,
1553 sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
1554 if (error) {
1555 printf("%s: could not load DMA'able memory for Rx ring.\n",
1556 sc->sc_dev.dv_xname);
1557 bus_dmamem_free(sc->sc_dmat,
1558 (bus_dma_segment_t *)sc->alc_rdata.alc_rx_ring, 1);
1559 return (error);
1560 }
1561
1562 sc->alc_rdata.alc_rx_ring_paddr =
1563 sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr;
1564
1565 /*
1566 * Create DMA stuffs for RX return ring
1567 */
1568 error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1,
1569 ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map);
1570 if (error)
1571 return (ENOBUFS);
1572
1573 /* Allocate DMA'able memory for RX return ring */
1574 error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ,
1575 ETHER_ALIGN, 0, &sc->alc_rdata.alc_rr_ring_seg, 1,
1576 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1577 if (error) {
1578 printf("%s: could not allocate DMA'able memory for Rx "
1579 "return ring.\n", sc->sc_dev.dv_xname);
1580 return (error);
1581 }
1582
1583 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg,
1584 nsegs, ALC_RR_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_rr_ring,
1585 BUS_DMA_NOWAIT);
1586 if (error)
1587 return (ENOBUFS);
1588
1589 /* Load the DMA map for Rx return ring. */
1590 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map,
1591 sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
1592 if (error) {
1593 printf("%s: could not load DMA'able memory for Rx return ring."
1594 "\n", sc->sc_dev.dv_xname);
1595 bus_dmamem_free(sc->sc_dmat,
1596 (bus_dma_segment_t *)&sc->alc_rdata.alc_rr_ring, 1);
1597 return (error);
1598 }
1599
1600 sc->alc_rdata.alc_rr_ring_paddr =
1601 sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr;
1602
1603 /*
1604 * Create DMA stuffs for CMB block
1605 */
1606 error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1,
1607 ALC_CMB_SZ, 0, BUS_DMA_NOWAIT,
1608 &sc->alc_cdata.alc_cmb_map);
1609 if (error)
1610 return (ENOBUFS);
1611
1612 /* Allocate DMA'able memory for CMB block */
1613 error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ,
1614 ETHER_ALIGN, 0, &sc->alc_rdata.alc_cmb_seg, 1,
1615 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1616 if (error) {
1617 printf("%s: could not allocate DMA'able memory for "
1618 "CMB block\n", sc->sc_dev.dv_xname);
1619 return (error);
1620 }
1621
1622 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg,
1623 nsegs, ALC_CMB_SZ, (caddr_t *)&sc->alc_rdata.alc_cmb,
1624 BUS_DMA_NOWAIT);
1625 if (error)
1626 return (ENOBUFS);
1627
1628 /* Load the DMA map for CMB block. */
1629 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map,
1630 sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL,
1631 BUS_DMA_WAITOK);
1632 if (error) {
1633 printf("%s: could not load DMA'able memory for CMB block\n",
1634 sc->sc_dev.dv_xname);
1635 bus_dmamem_free(sc->sc_dmat,
1636 (bus_dma_segment_t *)&sc->alc_rdata.alc_cmb, 1);
1637 return (error);
1638 }
1639
1640 sc->alc_rdata.alc_cmb_paddr =
1641 sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr;
1642
1643 /*
1644 * Create DMA stuffs for SMB block
1645 */
1646 error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1,
1647 ALC_SMB_SZ, 0, BUS_DMA_NOWAIT,
1648 &sc->alc_cdata.alc_smb_map);
1649 if (error)
1650 return (ENOBUFS);
1651
1652 /* Allocate DMA'able memory for SMB block */
1653 error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ,
1654 ETHER_ALIGN, 0, &sc->alc_rdata.alc_smb_seg, 1,
1655 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1656 if (error) {
1657 printf("%s: could not allocate DMA'able memory for "
1658 "SMB block\n", sc->sc_dev.dv_xname);
1659 return (error);
1660 }
1661
1662 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg,
1663 nsegs, ALC_SMB_SZ, (caddr_t *)&sc->alc_rdata.alc_smb,
1664 BUS_DMA_NOWAIT);
1665 if (error)
1666 return (ENOBUFS);
1667
1668 /* Load the DMA map for SMB block */
1669 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map,
1670 sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL,
1671 BUS_DMA_WAITOK);
1672 if (error) {
1673 printf("%s: could not load DMA'able memory for SMB block\n",
1674 sc->sc_dev.dv_xname);
1675 bus_dmamem_free(sc->sc_dmat,
1676 (bus_dma_segment_t *)&sc->alc_rdata.alc_smb, 1);
1677 return (error);
1678 }
1679
1680 sc->alc_rdata.alc_smb_paddr =
1681 sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr;
1682
1683
1684 /* Create DMA maps for Tx buffers. */
1685 for (i = 0; i < ALC_TX_RING_CNT; i++) {
1686 txd = &sc->alc_cdata.alc_txdesc[i];
1687 txd->tx_m = NULL;
1688 txd->tx_dmamap = NULL;
1689 error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE,
1690 ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
1691 &txd->tx_dmamap);
1692 if (error) {
1693 printf("%s: could not create Tx dmamap.\n",
1694 sc->sc_dev.dv_xname);
1695 return (error);
1696 }
1697 }
1698
1699 /* Create DMA maps for Rx buffers. */
1700 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1701 BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap);
1702 if (error) {
1703 printf("%s: could not create spare Rx dmamap.\n",
1704 sc->sc_dev.dv_xname);
1705 return (error);
1706 }
1707
1708 for (i = 0; i < ALC_RX_RING_CNT; i++) {
1709 rxd = &sc->alc_cdata.alc_rxdesc[i];
1710 rxd->rx_m = NULL;
1711 rxd->rx_dmamap = NULL;
1712 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1713 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
1714 if (error) {
1715 printf("%s: could not create Rx dmamap.\n",
1716 sc->sc_dev.dv_xname);
1717 return (error);
1718 }
1719 }
1720
1721 return (0);
1722 }
1723
1724 void
alc_dma_free(struct alc_softc * sc)1725 alc_dma_free(struct alc_softc *sc)
1726 {
1727 struct alc_txdesc *txd;
1728 struct alc_rxdesc *rxd;
1729 int i;
1730
1731 /* Tx buffers */
1732 for (i = 0; i < ALC_TX_RING_CNT; i++) {
1733 txd = &sc->alc_cdata.alc_txdesc[i];
1734 if (txd->tx_dmamap != NULL) {
1735 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
1736 txd->tx_dmamap = NULL;
1737 }
1738 }
1739 /* Rx buffers */
1740 for (i = 0; i < ALC_RX_RING_CNT; i++) {
1741 rxd = &sc->alc_cdata.alc_rxdesc[i];
1742 if (rxd->rx_dmamap != NULL) {
1743 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
1744 rxd->rx_dmamap = NULL;
1745 }
1746 }
1747 if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1748 bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap);
1749 sc->alc_cdata.alc_rx_sparemap = NULL;
1750 }
1751
1752 /* Tx ring. */
1753 if (sc->alc_cdata.alc_tx_ring_map != NULL)
1754 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map);
1755 if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1756 sc->alc_rdata.alc_tx_ring != NULL)
1757 bus_dmamem_free(sc->sc_dmat,
1758 (bus_dma_segment_t *)sc->alc_rdata.alc_tx_ring, 1);
1759 sc->alc_rdata.alc_tx_ring = NULL;
1760 sc->alc_cdata.alc_tx_ring_map = NULL;
1761
1762 /* Rx ring. */
1763 if (sc->alc_cdata.alc_rx_ring_map != NULL)
1764 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map);
1765 if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1766 sc->alc_rdata.alc_rx_ring != NULL)
1767 bus_dmamem_free(sc->sc_dmat,
1768 (bus_dma_segment_t *)sc->alc_rdata.alc_rx_ring, 1);
1769 sc->alc_rdata.alc_rx_ring = NULL;
1770 sc->alc_cdata.alc_rx_ring_map = NULL;
1771
1772 /* Rx return ring. */
1773 if (sc->alc_cdata.alc_rr_ring_map != NULL)
1774 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map);
1775 if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1776 sc->alc_rdata.alc_rr_ring != NULL)
1777 bus_dmamem_free(sc->sc_dmat,
1778 (bus_dma_segment_t *)sc->alc_rdata.alc_rr_ring, 1);
1779 sc->alc_rdata.alc_rr_ring = NULL;
1780 sc->alc_cdata.alc_rr_ring_map = NULL;
1781
1782 /* CMB block */
1783 if (sc->alc_cdata.alc_cmb_map != NULL)
1784 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map);
1785 if (sc->alc_cdata.alc_cmb_map != NULL &&
1786 sc->alc_rdata.alc_cmb != NULL)
1787 bus_dmamem_free(sc->sc_dmat,
1788 (bus_dma_segment_t *)sc->alc_rdata.alc_cmb, 1);
1789 sc->alc_rdata.alc_cmb = NULL;
1790 sc->alc_cdata.alc_cmb_map = NULL;
1791
1792 /* SMB block */
1793 if (sc->alc_cdata.alc_smb_map != NULL)
1794 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map);
1795 if (sc->alc_cdata.alc_smb_map != NULL &&
1796 sc->alc_rdata.alc_smb != NULL)
1797 bus_dmamem_free(sc->sc_dmat,
1798 (bus_dma_segment_t *)sc->alc_rdata.alc_smb, 1);
1799 sc->alc_rdata.alc_smb = NULL;
1800 sc->alc_cdata.alc_smb_map = NULL;
1801 }
1802
1803 int
alc_encap(struct alc_softc * sc,struct mbuf * m)1804 alc_encap(struct alc_softc *sc, struct mbuf *m)
1805 {
1806 struct alc_txdesc *txd, *txd_last;
1807 struct tx_desc *desc;
1808 bus_dmamap_t map;
1809 uint32_t cflags, poff, vtag;
1810 int error, idx, prod;
1811
1812 cflags = vtag = 0;
1813 poff = 0;
1814
1815 prod = sc->alc_cdata.alc_tx_prod;
1816 txd = &sc->alc_cdata.alc_txdesc[prod];
1817 txd_last = txd;
1818 map = txd->tx_dmamap;
1819
1820 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
1821 if (error != 0 && error != EFBIG)
1822 goto drop;
1823 if (error != 0) {
1824 if (m_defrag(m, M_DONTWAIT)) {
1825 error = ENOBUFS;
1826 goto drop;
1827 }
1828 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1829 BUS_DMA_NOWAIT);
1830 if (error != 0)
1831 goto drop;
1832 }
1833
1834 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1835 BUS_DMASYNC_PREWRITE);
1836
1837 desc = NULL;
1838 idx = 0;
1839 #if NVLAN > 0
1840 /* Configure VLAN hardware tag insertion. */
1841 if (m->m_flags & M_VLANTAG) {
1842 vtag = htons(m->m_pkthdr.ether_vtag);
1843 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1844 cflags |= TD_INS_VLAN_TAG;
1845 }
1846 #endif
1847 /* Configure Tx checksum offload. */
1848 if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1849 cflags |= TD_CUSTOM_CSUM;
1850 /* Set checksum start offset. */
1851 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1852 TD_PLOAD_OFFSET_MASK;
1853 }
1854
1855 for (; idx < map->dm_nsegs; idx++) {
1856 desc = &sc->alc_rdata.alc_tx_ring[prod];
1857 desc->len =
1858 htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag);
1859 desc->flags = htole32(cflags);
1860 desc->addr = htole64(map->dm_segs[idx].ds_addr);
1861 sc->alc_cdata.alc_tx_cnt++;
1862 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1863 }
1864
1865 /* Update producer index. */
1866 sc->alc_cdata.alc_tx_prod = prod;
1867
1868 /* Finally set EOP on the last descriptor. */
1869 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
1870 desc = &sc->alc_rdata.alc_tx_ring[prod];
1871 desc->flags |= htole32(TD_EOP);
1872
1873 /* Swap dmamap of the first and the last. */
1874 txd = &sc->alc_cdata.alc_txdesc[prod];
1875 map = txd_last->tx_dmamap;
1876 txd_last->tx_dmamap = txd->tx_dmamap;
1877 txd->tx_dmamap = map;
1878 txd->tx_m = m;
1879
1880 return (0);
1881
1882 drop:
1883 m_freem(m);
1884 return (error);
1885 }
1886
1887 void
alc_start(struct ifnet * ifp)1888 alc_start(struct ifnet *ifp)
1889 {
1890 struct alc_softc *sc = ifp->if_softc;
1891 struct mbuf *m;
1892 int enq = 0;
1893
1894 /* Reclaim transmitted frames. */
1895 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
1896 alc_txeof(sc);
1897
1898 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1899 return;
1900 if ((sc->alc_flags & ALC_FLAG_LINK) == 0)
1901 return;
1902 if (ifq_empty(&ifp->if_snd))
1903 return;
1904
1905 for (;;) {
1906 if (sc->alc_cdata.alc_tx_cnt + ALC_MAXTXSEGS >=
1907 ALC_TX_RING_CNT - 3) {
1908 ifq_set_oactive(&ifp->if_snd);
1909 break;
1910 }
1911
1912 m = ifq_dequeue(&ifp->if_snd);
1913 if (m == NULL)
1914 break;
1915
1916 if (alc_encap(sc, m) != 0) {
1917 ifp->if_oerrors++;
1918 continue;
1919 }
1920 enq++;
1921
1922 #if NBPFILTER > 0
1923 /*
1924 * If there's a BPF listener, bounce a copy of this frame
1925 * to him.
1926 */
1927 if (ifp->if_bpf != NULL)
1928 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1929 #endif
1930 }
1931
1932 if (enq > 0) {
1933 /* Sync descriptors. */
1934 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1935 sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1936 BUS_DMASYNC_PREWRITE);
1937 /* Kick. Assume we're using normal Tx priority queue. */
1938 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
1939 CSR_WRITE_2(sc, ALC_MBOX_TD_PRI0_PROD_IDX,
1940 (uint16_t)sc->alc_cdata.alc_tx_prod);
1941 else
1942 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
1943 (sc->alc_cdata.alc_tx_prod <<
1944 MBOX_TD_PROD_LO_IDX_SHIFT) &
1945 MBOX_TD_PROD_LO_IDX_MASK);
1946 /* Set a timeout in case the chip goes out to lunch. */
1947 ifp->if_timer = ALC_TX_TIMEOUT;
1948 }
1949 }
1950
1951 void
alc_watchdog(struct ifnet * ifp)1952 alc_watchdog(struct ifnet *ifp)
1953 {
1954 struct alc_softc *sc = ifp->if_softc;
1955
1956 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
1957 printf("%s: watchdog timeout (missed link)\n",
1958 sc->sc_dev.dv_xname);
1959 ifp->if_oerrors++;
1960 alc_init(ifp);
1961 return;
1962 }
1963
1964 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1965 ifp->if_oerrors++;
1966 alc_init(ifp);
1967 alc_start(ifp);
1968 }
1969
1970 int
alc_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)1971 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1972 {
1973 struct alc_softc *sc = ifp->if_softc;
1974 struct mii_data *mii = &sc->sc_miibus;
1975 struct ifreq *ifr = (struct ifreq *)data;
1976 int s, error = 0;
1977
1978 s = splnet();
1979
1980 switch (cmd) {
1981 case SIOCSIFADDR:
1982 ifp->if_flags |= IFF_UP;
1983 if (!(ifp->if_flags & IFF_RUNNING))
1984 alc_init(ifp);
1985 break;
1986
1987 case SIOCSIFFLAGS:
1988 if (ifp->if_flags & IFF_UP) {
1989 if (ifp->if_flags & IFF_RUNNING)
1990 error = ENETRESET;
1991 else
1992 alc_init(ifp);
1993 } else {
1994 if (ifp->if_flags & IFF_RUNNING)
1995 alc_stop(sc);
1996 }
1997 break;
1998
1999 case SIOCSIFMEDIA:
2000 case SIOCGIFMEDIA:
2001 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2002 break;
2003
2004 default:
2005 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
2006 break;
2007 }
2008
2009 if (error == ENETRESET) {
2010 if (ifp->if_flags & IFF_RUNNING)
2011 alc_iff(sc);
2012 error = 0;
2013 }
2014
2015 splx(s);
2016 return (error);
2017 }
2018
2019 void
alc_mac_config(struct alc_softc * sc)2020 alc_mac_config(struct alc_softc *sc)
2021 {
2022 struct mii_data *mii;
2023 uint32_t reg;
2024
2025 mii = &sc->sc_miibus;
2026 reg = CSR_READ_4(sc, ALC_MAC_CFG);
2027 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
2028 MAC_CFG_SPEED_MASK);
2029 if ((sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D ||
2030 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
2031 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2 ||
2032 sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2033 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
2034 /* Reprogram MAC with resolved speed/duplex. */
2035 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2036 case IFM_10_T:
2037 case IFM_100_TX:
2038 reg |= MAC_CFG_SPEED_10_100;
2039 break;
2040 case IFM_1000_T:
2041 reg |= MAC_CFG_SPEED_1000;
2042 break;
2043 }
2044 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2045 reg |= MAC_CFG_FULL_DUPLEX;
2046 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2047 reg |= MAC_CFG_TX_FC;
2048 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2049 reg |= MAC_CFG_RX_FC;
2050 }
2051 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2052 }
2053
2054 void
alc_stats_clear(struct alc_softc * sc)2055 alc_stats_clear(struct alc_softc *sc)
2056 {
2057 struct smb sb, *smb;
2058 uint32_t *reg;
2059 int i;
2060
2061 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2062 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2063 sc->alc_cdata.alc_smb_map->dm_mapsize,
2064 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2065 smb = sc->alc_rdata.alc_smb;
2066 /* Update done, clear. */
2067 smb->updated = 0;
2068 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2069 sc->alc_cdata.alc_smb_map->dm_mapsize,
2070 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2071 } else {
2072 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2073 reg++) {
2074 CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2075 i += sizeof(uint32_t);
2076 }
2077 /* Read Tx statistics. */
2078 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2079 reg++) {
2080 CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2081 i += sizeof(uint32_t);
2082 }
2083 }
2084 }
2085
2086 void
alc_stats_update(struct alc_softc * sc)2087 alc_stats_update(struct alc_softc *sc)
2088 {
2089 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2090 struct alc_hw_stats *stat;
2091 struct smb sb, *smb;
2092 uint32_t *reg;
2093 int i;
2094
2095 stat = &sc->alc_stats;
2096 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2097 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2098 sc->alc_cdata.alc_smb_map->dm_mapsize,
2099 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2100 smb = sc->alc_rdata.alc_smb;
2101 if (smb->updated == 0)
2102 return;
2103 } else {
2104 smb = &sb;
2105 /* Read Rx statistics. */
2106 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2107 reg++) {
2108 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2109 i += sizeof(uint32_t);
2110 }
2111 /* Read Tx statistics. */
2112 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2113 reg++) {
2114 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2115 i += sizeof(uint32_t);
2116 }
2117 }
2118
2119 /* Rx stats. */
2120 stat->rx_frames += smb->rx_frames;
2121 stat->rx_bcast_frames += smb->rx_bcast_frames;
2122 stat->rx_mcast_frames += smb->rx_mcast_frames;
2123 stat->rx_pause_frames += smb->rx_pause_frames;
2124 stat->rx_control_frames += smb->rx_control_frames;
2125 stat->rx_crcerrs += smb->rx_crcerrs;
2126 stat->rx_lenerrs += smb->rx_lenerrs;
2127 stat->rx_bytes += smb->rx_bytes;
2128 stat->rx_runts += smb->rx_runts;
2129 stat->rx_fragments += smb->rx_fragments;
2130 stat->rx_pkts_64 += smb->rx_pkts_64;
2131 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2132 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2133 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2134 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2135 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2136 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2137 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2138 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2139 stat->rx_rrs_errs += smb->rx_rrs_errs;
2140 stat->rx_alignerrs += smb->rx_alignerrs;
2141 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2142 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2143 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2144
2145 /* Tx stats. */
2146 stat->tx_frames += smb->tx_frames;
2147 stat->tx_bcast_frames += smb->tx_bcast_frames;
2148 stat->tx_mcast_frames += smb->tx_mcast_frames;
2149 stat->tx_pause_frames += smb->tx_pause_frames;
2150 stat->tx_excess_defer += smb->tx_excess_defer;
2151 stat->tx_control_frames += smb->tx_control_frames;
2152 stat->tx_deferred += smb->tx_deferred;
2153 stat->tx_bytes += smb->tx_bytes;
2154 stat->tx_pkts_64 += smb->tx_pkts_64;
2155 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2156 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2157 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2158 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2159 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2160 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2161 stat->tx_single_colls += smb->tx_single_colls;
2162 stat->tx_multi_colls += smb->tx_multi_colls;
2163 stat->tx_late_colls += smb->tx_late_colls;
2164 stat->tx_excess_colls += smb->tx_excess_colls;
2165 stat->tx_underrun += smb->tx_underrun;
2166 stat->tx_desc_underrun += smb->tx_desc_underrun;
2167 stat->tx_lenerrs += smb->tx_lenerrs;
2168 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2169 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2170 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2171
2172 ifp->if_collisions += smb->tx_single_colls +
2173 smb->tx_multi_colls * 2 + smb->tx_late_colls +
2174 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2175
2176 ifp->if_oerrors += smb->tx_late_colls + smb->tx_excess_colls +
2177 smb->tx_underrun + smb->tx_pkts_truncated;
2178
2179 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2180 smb->rx_runts + smb->rx_pkts_truncated +
2181 smb->rx_fifo_oflows + smb->rx_rrs_errs +
2182 smb->rx_alignerrs;
2183
2184 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2185 /* Update done, clear. */
2186 smb->updated = 0;
2187 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2188 sc->alc_cdata.alc_smb_map->dm_mapsize,
2189 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2190 }
2191 }
2192
2193 int
alc_intr(void * arg)2194 alc_intr(void *arg)
2195 {
2196 struct alc_softc *sc = arg;
2197 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2198 uint32_t status;
2199 int claimed = 0;
2200
2201 status = CSR_READ_4(sc, ALC_INTR_STATUS);
2202 if ((status & ALC_INTRS) == 0)
2203 return (0);
2204
2205 /* Disable interrupts. */
2206 CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT);
2207
2208 status = CSR_READ_4(sc, ALC_INTR_STATUS);
2209 if ((status & ALC_INTRS) == 0)
2210 goto back;
2211
2212 /* Acknowledge and disable interrupts. */
2213 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
2214
2215 if (ifp->if_flags & IFF_RUNNING) {
2216 int error = 0;
2217
2218 if (status & INTR_RX_PKT) {
2219 error = alc_rxintr(sc);
2220 if (error) {
2221 alc_init(ifp);
2222 return (0);
2223 }
2224 }
2225 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
2226 INTR_TXQ_TO_RST)) {
2227 if (status & INTR_DMA_RD_TO_RST)
2228 printf("%s: DMA read error! -- resetting\n",
2229 sc->sc_dev.dv_xname);
2230 if (status & INTR_DMA_WR_TO_RST)
2231 printf("%s: DMA write error! -- resetting\n",
2232 sc->sc_dev.dv_xname);
2233 if (status & INTR_TXQ_TO_RST)
2234 printf("%s: TxQ reset! -- resetting\n",
2235 sc->sc_dev.dv_xname);
2236 alc_init(ifp);
2237 return (0);
2238 }
2239
2240 alc_txeof(sc);
2241 alc_start(ifp);
2242 }
2243
2244 claimed = 1;
2245 back:
2246 /* Re-enable interrupts. */
2247 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
2248 return (claimed);
2249 }
2250
2251 void
alc_txeof(struct alc_softc * sc)2252 alc_txeof(struct alc_softc *sc)
2253 {
2254 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2255 struct alc_txdesc *txd;
2256 uint32_t cons, prod;
2257 int prog;
2258
2259 if (sc->alc_cdata.alc_tx_cnt == 0)
2260 return;
2261 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
2262 sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
2263 BUS_DMASYNC_POSTWRITE);
2264 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2265 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2266 sc->alc_cdata.alc_cmb_map->dm_mapsize,
2267 BUS_DMASYNC_POSTREAD);
2268 prod = sc->alc_rdata.alc_cmb->cons;
2269 } else {
2270 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2271 prod = CSR_READ_2(sc, ALC_MBOX_TD_PRI0_CONS_IDX);
2272 else {
2273 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
2274 /* Assume we're using normal Tx priority queue. */
2275 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
2276 MBOX_TD_CONS_LO_IDX_SHIFT;
2277 }
2278 }
2279 cons = sc->alc_cdata.alc_tx_cons;
2280 /*
2281 * Go through our Tx list and free mbufs for those
2282 * frames which have been transmitted.
2283 */
2284 for (prog = 0; cons != prod; prog++,
2285 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
2286 if (sc->alc_cdata.alc_tx_cnt <= 0)
2287 break;
2288 prog++;
2289 ifq_clr_oactive(&ifp->if_snd);
2290 sc->alc_cdata.alc_tx_cnt--;
2291 txd = &sc->alc_cdata.alc_txdesc[cons];
2292 if (txd->tx_m != NULL) {
2293 /* Reclaim transmitted mbufs. */
2294 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
2295 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2296 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2297 m_freem(txd->tx_m);
2298 txd->tx_m = NULL;
2299 }
2300 }
2301
2302 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2303 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2304 sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2305 sc->alc_cdata.alc_tx_cons = cons;
2306 /*
2307 * Unarm watchdog timer only when there is no pending
2308 * frames in Tx queue.
2309 */
2310 if (sc->alc_cdata.alc_tx_cnt == 0)
2311 ifp->if_timer = 0;
2312 }
2313
2314 int
alc_newbuf(struct alc_softc * sc,struct alc_rxdesc * rxd)2315 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd)
2316 {
2317 struct mbuf *m;
2318 bus_dmamap_t map;
2319 int error;
2320
2321 MGETHDR(m, M_DONTWAIT, MT_DATA);
2322 if (m == NULL)
2323 return (ENOBUFS);
2324 MCLGET(m, M_DONTWAIT);
2325 if (!(m->m_flags & M_EXT)) {
2326 m_freem(m);
2327 return (ENOBUFS);
2328 }
2329
2330 m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
2331
2332 error = bus_dmamap_load_mbuf(sc->sc_dmat,
2333 sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT);
2334
2335 if (error != 0) {
2336 m_freem(m);
2337 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
2338 return (error);
2339 }
2340
2341 if (rxd->rx_m != NULL) {
2342 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2343 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2344 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2345 }
2346 map = rxd->rx_dmamap;
2347 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
2348 sc->alc_cdata.alc_rx_sparemap = map;
2349 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize,
2350 BUS_DMASYNC_PREREAD);
2351 rxd->rx_m = m;
2352 rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2353 return (0);
2354 }
2355
2356 int
alc_rxintr(struct alc_softc * sc)2357 alc_rxintr(struct alc_softc *sc)
2358 {
2359 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2360 struct rx_rdesc *rrd;
2361 uint32_t nsegs, status;
2362 int rr_cons, prog;
2363
2364 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2365 sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
2366 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2367 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2368 sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
2369 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2370 rr_cons = sc->alc_cdata.alc_rr_cons;
2371 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) {
2372 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
2373 status = letoh32(rrd->status);
2374 if ((status & RRD_VALID) == 0)
2375 break;
2376 nsegs = RRD_RD_CNT(letoh32(rrd->rdinfo));
2377 if (nsegs == 0) {
2378 /* This should not happen! */
2379 if (alcdebug)
2380 printf("%s: unexpected segment count -- "
2381 "resetting\n", sc->sc_dev.dv_xname);
2382 return (EIO);
2383 }
2384 alc_rxeof(sc, rrd);
2385 /* Clear Rx return status. */
2386 rrd->status = 0;
2387 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
2388 sc->alc_cdata.alc_rx_cons += nsegs;
2389 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
2390 prog += nsegs;
2391 }
2392
2393 if (prog > 0) {
2394 /* Update the consumer index. */
2395 sc->alc_cdata.alc_rr_cons = rr_cons;
2396 /* Sync Rx return descriptors. */
2397 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2398 sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
2399 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2400 /*
2401 * Sync updated Rx descriptors such that controller see
2402 * modified buffer addresses.
2403 */
2404 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2405 sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
2406 BUS_DMASYNC_PREWRITE);
2407 /*
2408 * Let controller know availability of new Rx buffers.
2409 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
2410 * it may be possible to update ALC_MBOX_RD0_PROD_IDX
2411 * only when Rx buffer pre-fetching is required. In
2412 * addition we already set ALC_RX_RD_FREE_THRESH to
2413 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
2414 * it still seems that pre-fetching needs more
2415 * experimentation.
2416 */
2417 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2418 CSR_WRITE_2(sc, ALC_MBOX_RD0_PROD_IDX,
2419 (uint16_t)sc->alc_cdata.alc_rx_cons);
2420 else
2421 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
2422 sc->alc_cdata.alc_rx_cons);
2423 }
2424
2425 return (0);
2426 }
2427
2428 /* Receive a frame. */
2429 void
alc_rxeof(struct alc_softc * sc,struct rx_rdesc * rrd)2430 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
2431 {
2432 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2433 struct alc_rxdesc *rxd;
2434 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2435 struct mbuf *mp, *m;
2436 uint32_t rdinfo, status;
2437 int count, nsegs, rx_cons;
2438
2439 status = letoh32(rrd->status);
2440 rdinfo = letoh32(rrd->rdinfo);
2441 rx_cons = RRD_RD_IDX(rdinfo);
2442 nsegs = RRD_RD_CNT(rdinfo);
2443
2444 sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
2445 if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) {
2446 /*
2447 * We want to pass the following frames to upper
2448 * layer regardless of error status of Rx return
2449 * ring.
2450 *
2451 * o IP/TCP/UDP checksum is bad.
2452 * o frame length and protocol specific length
2453 * does not match.
2454 *
2455 * Force network stack compute checksum for
2456 * errored frames.
2457 */
2458 if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN |
2459 RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0)
2460 return;
2461 }
2462
2463 for (count = 0; count < nsegs; count++,
2464 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
2465 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
2466 mp = rxd->rx_m;
2467 /* Add a new receive buffer to the ring. */
2468 if (alc_newbuf(sc, rxd) != 0) {
2469 ifp->if_iqdrops++;
2470 /* Reuse Rx buffers. */
2471 m_freem(sc->alc_cdata.alc_rxhead);
2472 break;
2473 }
2474
2475 /*
2476 * Assume we've received a full sized frame.
2477 * Actual size is fixed when we encounter the end of
2478 * multi-segmented frame.
2479 */
2480 mp->m_len = sc->alc_buf_size;
2481
2482 /* Chain received mbufs. */
2483 if (sc->alc_cdata.alc_rxhead == NULL) {
2484 sc->alc_cdata.alc_rxhead = mp;
2485 sc->alc_cdata.alc_rxtail = mp;
2486 } else {
2487 mp->m_flags &= ~M_PKTHDR;
2488 sc->alc_cdata.alc_rxprev_tail =
2489 sc->alc_cdata.alc_rxtail;
2490 sc->alc_cdata.alc_rxtail->m_next = mp;
2491 sc->alc_cdata.alc_rxtail = mp;
2492 }
2493
2494 if (count == nsegs - 1) {
2495 /* Last desc. for this frame. */
2496 m = sc->alc_cdata.alc_rxhead;
2497 m->m_flags |= M_PKTHDR;
2498 /*
2499 * It seems that L1C/L2C controller has no way
2500 * to tell hardware to strip CRC bytes.
2501 */
2502 m->m_pkthdr.len =
2503 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
2504 if (nsegs > 1) {
2505 /* Set last mbuf size. */
2506 mp->m_len = sc->alc_cdata.alc_rxlen -
2507 (nsegs - 1) * sc->alc_buf_size;
2508 /* Remove the CRC bytes in chained mbufs. */
2509 if (mp->m_len <= ETHER_CRC_LEN) {
2510 sc->alc_cdata.alc_rxtail =
2511 sc->alc_cdata.alc_rxprev_tail;
2512 sc->alc_cdata.alc_rxtail->m_len -=
2513 (ETHER_CRC_LEN - mp->m_len);
2514 sc->alc_cdata.alc_rxtail->m_next = NULL;
2515 m_freem(mp);
2516 } else {
2517 mp->m_len -= ETHER_CRC_LEN;
2518 }
2519 } else
2520 m->m_len = m->m_pkthdr.len;
2521 /*
2522 * Due to hardware bugs, Rx checksum offloading
2523 * was intentionally disabled.
2524 */
2525 #if NVLAN > 0
2526 if (status & RRD_VLAN_TAG) {
2527 u_int32_t vtag = RRD_VLAN(letoh32(rrd->vtag));
2528 m->m_pkthdr.ether_vtag = ntohs(vtag);
2529 m->m_flags |= M_VLANTAG;
2530 }
2531 #endif
2532
2533
2534 ml_enqueue(&ml, m);
2535 }
2536 }
2537 if_input(ifp, &ml);
2538
2539 /* Reset mbuf chains. */
2540 ALC_RXCHAIN_RESET(sc);
2541 }
2542
2543 void
alc_tick(void * xsc)2544 alc_tick(void *xsc)
2545 {
2546 struct alc_softc *sc = xsc;
2547 struct mii_data *mii = &sc->sc_miibus;
2548 int s;
2549
2550 s = splnet();
2551 mii_tick(mii);
2552 alc_stats_update(sc);
2553
2554 timeout_add_sec(&sc->alc_tick_ch, 1);
2555 splx(s);
2556 }
2557
2558 void
alc_osc_reset(struct alc_softc * sc)2559 alc_osc_reset(struct alc_softc *sc)
2560 {
2561 uint32_t reg;
2562
2563 reg = CSR_READ_4(sc, ALC_MISC3);
2564 reg &= ~MISC3_25M_BY_SW;
2565 reg |= MISC3_25M_NOTO_INTNL;
2566 CSR_WRITE_4(sc, ALC_MISC3, reg);
2567 reg = CSR_READ_4(sc, ALC_MISC);
2568 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) {
2569 /*
2570 * Restore over-current protection default value.
2571 * This value could be reset by MAC reset.
2572 */
2573 reg &= ~MISC_PSW_OCP_MASK;
2574 reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT);
2575 reg &= ~MISC_INTNLOSC_OPEN;
2576 CSR_WRITE_4(sc, ALC_MISC, reg);
2577 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
2578 reg = CSR_READ_4(sc, ALC_MISC2);
2579 reg &= ~MISC2_CALB_START;
2580 CSR_WRITE_4(sc, ALC_MISC2, reg);
2581 CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START);
2582 } else {
2583 reg &= ~MISC_INTNLOSC_OPEN;
2584 /* Disable isolate for revision A devices. */
2585 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
2586 reg &= ~MISC_ISO_ENB;
2587 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
2588 CSR_WRITE_4(sc, ALC_MISC, reg);
2589 }
2590 DELAY(20);
2591 }
2592
2593 void
alc_reset(struct alc_softc * sc)2594 alc_reset(struct alc_softc *sc)
2595 {
2596 uint32_t reg, pmcfg = 0;
2597 int i;
2598
2599 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2600 /* Reset workaround. */
2601 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1);
2602 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
2603 (sc->alc_rev & 0x01) != 0) {
2604 /* Disable L0s/L1s before reset. */
2605 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
2606 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB |
2607 PM_CFG_ASPM_L1_ENB))!= 0) {
2608 pmcfg &= ~(PM_CFG_ASPM_L0S_ENB |
2609 PM_CFG_ASPM_L1_ENB);
2610 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
2611 }
2612 }
2613 }
2614 reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2615 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
2616 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2617
2618 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2619 for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2620 DELAY(10);
2621 if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0)
2622 break;
2623 }
2624 if (i == 0)
2625 printf("MAC reset timeout!\n");
2626 }
2627 for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2628 DELAY(10);
2629 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2630 break;
2631 }
2632 if (i == 0)
2633 printf("%s: master reset timeout!\n", sc->sc_dev.dv_xname);
2634
2635 for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2636 reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2637 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC |
2638 IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
2639 break;
2640 DELAY(10);
2641 }
2642
2643 if (i == 0)
2644 printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname,
2645 reg);
2646
2647 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2648 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
2649 (sc->alc_rev & 0x01) != 0) {
2650 reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2651 reg |= MASTER_CLK_SEL_DIS;
2652 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2653 /* Restore L0s/L1s config. */
2654 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB |
2655 PM_CFG_ASPM_L1_ENB)) != 0)
2656 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
2657 }
2658 alc_osc_reset(sc);
2659 reg = CSR_READ_4(sc, ALC_MISC3);
2660 reg &= ~MISC3_25M_BY_SW;
2661 reg |= MISC3_25M_NOTO_INTNL;
2662 CSR_WRITE_4(sc, ALC_MISC3, reg);
2663 reg = CSR_READ_4(sc, ALC_MISC);
2664 reg &= ~MISC_INTNLOSC_OPEN;
2665 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
2666 reg &= ~MISC_ISO_ENB;
2667 CSR_WRITE_4(sc, ALC_MISC, reg);
2668 DELAY(20);
2669 }
2670 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
2671 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 ||
2672 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2)
2673 CSR_WRITE_4(sc, ALC_SERDES_LOCK,
2674 CSR_READ_4(sc, ALC_SERDES_LOCK) |
2675 SERDES_MAC_CLK_SLOWDOWN | SERDES_PHY_CLK_SLOWDOWN);
2676 }
2677
2678 int
alc_init(struct ifnet * ifp)2679 alc_init(struct ifnet *ifp)
2680 {
2681 struct alc_softc *sc = ifp->if_softc;
2682 uint8_t eaddr[ETHER_ADDR_LEN];
2683 bus_addr_t paddr;
2684 uint32_t reg, rxf_hi, rxf_lo;
2685 int error;
2686
2687 /*
2688 * Cancel any pending I/O.
2689 */
2690 alc_stop(sc);
2691 /*
2692 * Reset the chip to a known state.
2693 */
2694 alc_reset(sc);
2695
2696 /* Initialize Rx descriptors. */
2697 error = alc_init_rx_ring(sc);
2698 if (error != 0) {
2699 printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname);
2700 alc_stop(sc);
2701 return (error);
2702 }
2703 alc_init_rr_ring(sc);
2704 alc_init_tx_ring(sc);
2705 alc_init_cmb(sc);
2706 alc_init_smb(sc);
2707
2708 /* Enable all clocks. */
2709 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2710 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB |
2711 CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB |
2712 CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB |
2713 CLK_GATING_RXMAC_ENB);
2714 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0)
2715 CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER,
2716 IDLE_DECISN_TIMER_DEFAULT_1MS);
2717 } else
2718 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0);
2719
2720 /* Reprogram the station address. */
2721 bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
2722 CSR_WRITE_4(sc, ALC_PAR0,
2723 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2724 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2725 /*
2726 * Clear WOL status and disable all WOL feature as WOL
2727 * would interfere Rx operation under normal environments.
2728 */
2729 CSR_READ_4(sc, ALC_WOL_CFG);
2730 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2731 /* Set Tx descriptor base addresses. */
2732 paddr = sc->alc_rdata.alc_tx_ring_paddr;
2733 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2734 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2735 /* We don't use high priority ring. */
2736 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2737 /* Set Tx descriptor counter. */
2738 CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2739 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2740 /* Set Rx descriptor base addresses. */
2741 paddr = sc->alc_rdata.alc_rx_ring_paddr;
2742 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2743 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2744 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2745 /* We use one Rx ring. */
2746 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2747 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2748 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2749 }
2750 /* Set Rx descriptor counter. */
2751 CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2752 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2753
2754 /*
2755 * Let hardware split jumbo frames into alc_max_buf_sized chunks.
2756 * if it do not fit the buffer size. Rx return descriptor holds
2757 * a counter that indicates how many fragments were made by the
2758 * hardware. The buffer size should be multiple of 8 bytes.
2759 * Since hardware has limit on the size of buffer size, always
2760 * use the maximum value.
2761 * For strict-alignment architectures make sure to reduce buffer
2762 * size by 8 bytes to make room for alignment fixup.
2763 */
2764 sc->alc_buf_size = RX_BUF_SIZE_MAX;
2765 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
2766
2767 paddr = sc->alc_rdata.alc_rr_ring_paddr;
2768 /* Set Rx return descriptor base addresses. */
2769 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2770 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2771 /* We use one Rx return ring. */
2772 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
2773 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
2774 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
2775 }
2776 /* Set Rx return descriptor counter. */
2777 CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
2778 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
2779 paddr = sc->alc_rdata.alc_cmb_paddr;
2780 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2781 paddr = sc->alc_rdata.alc_smb_paddr;
2782 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2783 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2784
2785 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) {
2786 /* Reconfigure SRAM - Vendor magic. */
2787 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
2788 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
2789 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
2790 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
2791 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
2792 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
2793 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
2794 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
2795 }
2796
2797 /* Tell hardware that we're ready to load DMA blocks. */
2798 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
2799
2800 /* Configure interrupt moderation timer. */
2801 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
2802 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
2803 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
2804 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0)
2805 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
2806 CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
2807 /*
2808 * We don't want to automatic interrupt clear as task queue
2809 * for the interrupt should know interrupt status.
2810 */
2811 reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2812 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
2813 reg |= MASTER_SA_TIMER_ENB;
2814 if (ALC_USECS(sc->alc_int_rx_mod) != 0)
2815 reg |= MASTER_IM_RX_TIMER_ENB;
2816 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 &&
2817 ALC_USECS(sc->alc_int_tx_mod) != 0)
2818 reg |= MASTER_IM_TX_TIMER_ENB;
2819 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2820 /*
2821 * Disable interrupt re-trigger timer. We don't want automatic
2822 * re-triggering of un-ACKed interrupts.
2823 */
2824 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
2825 /* Configure CMB. */
2826 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2827 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3);
2828 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER,
2829 ALC_USECS(sc->alc_int_tx_mod));
2830 } else {
2831 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2832 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
2833 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
2834 } else
2835 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
2836 }
2837 /*
2838 * Hardware can be configured to issue SMB interrupt based
2839 * on programmed interval. Since there is a callout that is
2840 * invoked for every hz in driver we use that instead of
2841 * relying on periodic SMB interrupt.
2842 */
2843 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
2844 /* Clear MAC statistics. */
2845 alc_stats_clear(sc);
2846
2847 /*
2848 * Always use maximum frame size that controller can support.
2849 * Otherwise received frames that has larger frame length
2850 * than alc(4) MTU would be silently dropped in hardware. This
2851 * would make path-MTU discovery hard as sender wouldn't get
2852 * any responses from receiver. alc(4) supports
2853 * multi-fragmented frames on Rx path so it has no issue on
2854 * assembling fragmented frames. Using maximum frame size also
2855 * removes the need to reinitialize hardware when interface
2856 * MTU configuration was changed.
2857 *
2858 * Be conservative in what you do, be liberal in what you
2859 * accept from others - RFC 793.
2860 */
2861 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_max_framelen);
2862
2863 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2864 /* Disable header split(?) */
2865 CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
2866 /* Configure IPG/IFG parameters. */
2867 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
2868 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) &
2869 IPG_IFG_IPGT_MASK) |
2870 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) &
2871 IPG_IFG_MIFG_MASK) |
2872 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) &
2873 IPG_IFG_IPG1_MASK) |
2874 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) &
2875 IPG_IFG_IPG2_MASK));
2876 /* Set parameters for half-duplex media. */
2877 CSR_WRITE_4(sc, ALC_HDPX_CFG,
2878 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2879 HDPX_CFG_LCOL_MASK) |
2880 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2881 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2882 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2883 HDPX_CFG_ABEBT_MASK) |
2884 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2885 HDPX_CFG_JAMIPG_MASK));
2886 }
2887
2888 /*
2889 * Set TSO/checksum offload threshold. For frames that is
2890 * larger than this threshold, hardware wouldn't do
2891 * TSO/checksum offloading.
2892 */
2893 reg = (sc->alc_max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
2894 TSO_OFFLOAD_THRESH_MASK;
2895 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2896 reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB;
2897 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg);
2898 /* Configure TxQ. */
2899 reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
2900 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
2901 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 ||
2902 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2)
2903 reg >>= 1;
2904 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
2905 TXQ_CFG_TD_BURST_MASK;
2906 reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB;
2907 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
2908 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2909 reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT |
2910 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT |
2911 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT |
2912 HQTD_CFG_BURST_ENB);
2913 CSR_WRITE_4(sc, ALC_HQTD_CFG, reg);
2914 reg = WRR_PRI_RESTRICT_NONE;
2915 reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT |
2916 WRR_PRI_DEFAULT << WRR_PRI1_SHIFT |
2917 WRR_PRI_DEFAULT << WRR_PRI2_SHIFT |
2918 WRR_PRI_DEFAULT << WRR_PRI3_SHIFT);
2919 CSR_WRITE_4(sc, ALC_WRR, reg);
2920 } else {
2921 /* Configure Rx free descriptor pre-fetching. */
2922 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
2923 ((RX_RD_FREE_THRESH_HI_DEFAULT <<
2924 RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) |
2925 ((RX_RD_FREE_THRESH_LO_DEFAULT <<
2926 RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK));
2927 }
2928
2929 /*
2930 * Configure flow control parameters.
2931 * XON : 80% of Rx FIFO
2932 * XOFF : 30% of Rx FIFO
2933 */
2934 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2935 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
2936 reg &= SRAM_RX_FIFO_LEN_MASK;
2937 reg *= 8;
2938 if (reg > 8 * 1024)
2939 reg -= RX_FIFO_PAUSE_816X_RSVD;
2940 else
2941 reg -= RX_BUF_SIZE_MAX;
2942 reg /= 8;
2943 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
2944 ((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2945 RX_FIFO_PAUSE_THRESH_LO_MASK) |
2946 (((RX_FIFO_PAUSE_816X_RSVD / 8) <<
2947 RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2948 RX_FIFO_PAUSE_THRESH_HI_MASK));
2949 } else if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1C||
2950 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C) {
2951 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
2952 rxf_hi = (reg * 8) / 10;
2953 rxf_lo = (reg * 3) / 10;
2954 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
2955 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2956 RX_FIFO_PAUSE_THRESH_LO_MASK) |
2957 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2958 RX_FIFO_PAUSE_THRESH_HI_MASK));
2959 }
2960
2961 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2962 /* Disable RSS until I understand L1C/L2C's RSS logic. */
2963 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
2964 CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
2965 }
2966
2967 /* Configure RxQ. */
2968 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
2969 RXQ_CFG_RD_BURST_MASK;
2970 reg |= RXQ_CFG_RSS_MODE_DIS;
2971 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2972 reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT <<
2973 RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) &
2974 RXQ_CFG_816X_IDT_TBL_SIZE_MASK;
2975 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
2976 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
2977 } else {
2978 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 &&
2979 sc->sc_product != PCI_PRODUCT_ATTANSIC_L1D_1)
2980 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
2981 }
2982 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2983
2984 /* Configure DMA parameters. */
2985 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
2986 reg |= sc->alc_rcb;
2987 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2988 reg |= DMA_CFG_CMB_ENB;
2989 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
2990 reg |= DMA_CFG_SMB_ENB;
2991 else
2992 reg |= DMA_CFG_SMB_DIS;
2993 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
2994 DMA_CFG_RD_BURST_SHIFT;
2995 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
2996 DMA_CFG_WR_BURST_SHIFT;
2997 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
2998 DMA_CFG_RD_DELAY_CNT_MASK;
2999 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
3000 DMA_CFG_WR_DELAY_CNT_MASK;
3001 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
3002 switch (AR816X_REV(sc->alc_rev)) {
3003 case AR816X_REV_A0:
3004 case AR816X_REV_A1:
3005 reg |= DMA_CFG_RD_CHNL_SEL_2;
3006 break;
3007 case AR816X_REV_B0:
3008 /* FALLTHROUGH */
3009 default:
3010 reg |= DMA_CFG_RD_CHNL_SEL_4;
3011 break;
3012 }
3013 }
3014 CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3015
3016 /*
3017 * Configure Tx/Rx MACs.
3018 * - Auto-padding for short frames.
3019 * - Enable CRC generation.
3020 * Actual reconfiguration of MAC for resolved speed/duplex
3021 * is followed after detection of link establishment.
3022 * AR813x/AR815x always does checksum computation regardless
3023 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
3024 * have bug in protocol field in Rx return structure so
3025 * these controllers can't handle fragmented frames. Disable
3026 * Rx checksum offloading until there is a newer controller
3027 * that has sane implementation.
3028 */
3029 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
3030 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
3031 MAC_CFG_PREAMBLE_MASK);
3032 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
3033 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D ||
3034 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
3035 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2)
3036 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
3037 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
3038 reg |= MAC_CFG_SPEED_10_100;
3039 else
3040 reg |= MAC_CFG_SPEED_1000;
3041 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3042
3043 /* Set up the receive filter. */
3044 alc_iff(sc);
3045
3046 alc_rxvlan(sc);
3047
3048 /* Acknowledge all pending interrupts and clear it. */
3049 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
3050 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3051 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
3052
3053 ifp->if_flags |= IFF_RUNNING;
3054 ifq_clr_oactive(&ifp->if_snd);
3055
3056 sc->alc_flags &= ~ALC_FLAG_LINK;
3057 /* Switch to the current media. */
3058 alc_mediachange(ifp);
3059
3060 timeout_add_sec(&sc->alc_tick_ch, 1);
3061
3062 return (0);
3063 }
3064
3065 void
alc_stop(struct alc_softc * sc)3066 alc_stop(struct alc_softc *sc)
3067 {
3068 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3069 struct alc_txdesc *txd;
3070 struct alc_rxdesc *rxd;
3071 uint32_t reg;
3072 int i;
3073
3074 /*
3075 * Mark the interface down and cancel the watchdog timer.
3076 */
3077 ifp->if_flags &= ~IFF_RUNNING;
3078 ifq_clr_oactive(&ifp->if_snd);
3079 ifp->if_timer = 0;
3080
3081 timeout_del(&sc->alc_tick_ch);
3082 sc->alc_flags &= ~ALC_FLAG_LINK;
3083
3084 alc_stats_update(sc);
3085
3086 /* Disable interrupts. */
3087 CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
3088 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3089
3090 /* Disable DMA. */
3091 reg = CSR_READ_4(sc, ALC_DMA_CFG);
3092 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
3093 reg |= DMA_CFG_SMB_DIS;
3094 CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3095 DELAY(1000);
3096
3097 /* Stop Rx/Tx MACs. */
3098 alc_stop_mac(sc);
3099
3100 /* Disable interrupts which might be touched in taskq handler. */
3101 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3102
3103 /* Disable L0s/L1s */
3104 reg = CSR_READ_4(sc, ALC_PM_CFG);
3105 if ((reg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB))!= 0) {
3106 reg &= ~(PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB);
3107 CSR_WRITE_4(sc, ALC_PM_CFG, reg);
3108 }
3109
3110 /* Reclaim Rx buffers that have been processed. */
3111 m_freem(sc->alc_cdata.alc_rxhead);
3112 ALC_RXCHAIN_RESET(sc);
3113 /*
3114 * Free Tx/Rx mbufs still in the queues.
3115 */
3116 for (i = 0; i < ALC_RX_RING_CNT; i++) {
3117 rxd = &sc->alc_cdata.alc_rxdesc[i];
3118 if (rxd->rx_m != NULL) {
3119 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
3120 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3121 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
3122 m_freem(rxd->rx_m);
3123 rxd->rx_m = NULL;
3124 }
3125 }
3126 for (i = 0; i < ALC_TX_RING_CNT; i++) {
3127 txd = &sc->alc_cdata.alc_txdesc[i];
3128 if (txd->tx_m != NULL) {
3129 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
3130 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3131 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
3132 m_freem(txd->tx_m);
3133 txd->tx_m = NULL;
3134 }
3135 }
3136 }
3137
3138 void
alc_stop_mac(struct alc_softc * sc)3139 alc_stop_mac(struct alc_softc *sc)
3140 {
3141 uint32_t reg;
3142 int i;
3143
3144 alc_stop_queue(sc);
3145 /* Disable Rx/Tx MAC. */
3146 reg = CSR_READ_4(sc, ALC_MAC_CFG);
3147 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
3148 reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
3149 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3150 }
3151 for (i = ALC_TIMEOUT; i > 0; i--) {
3152 reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3153 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0)
3154 break;
3155 DELAY(10);
3156 }
3157 if (i == 0)
3158 printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n",
3159 sc->sc_dev.dv_xname, reg);
3160 }
3161
3162 void
alc_start_queue(struct alc_softc * sc)3163 alc_start_queue(struct alc_softc *sc)
3164 {
3165 uint32_t qcfg[] = {
3166 0,
3167 RXQ_CFG_QUEUE0_ENB,
3168 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
3169 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
3170 RXQ_CFG_ENB
3171 };
3172 uint32_t cfg;
3173
3174 /* Enable RxQ. */
3175 cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
3176 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
3177 cfg &= ~RXQ_CFG_ENB;
3178 cfg |= qcfg[1];
3179 } else
3180 cfg |= RXQ_CFG_QUEUE0_ENB;
3181
3182 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
3183 /* Enable TxQ. */
3184 cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
3185 cfg |= TXQ_CFG_ENB;
3186 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
3187 }
3188
3189 void
alc_stop_queue(struct alc_softc * sc)3190 alc_stop_queue(struct alc_softc *sc)
3191 {
3192 uint32_t reg;
3193 int i;
3194
3195 /* Disable RxQ. */
3196 reg = CSR_READ_4(sc, ALC_RXQ_CFG);
3197 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
3198 if ((reg & RXQ_CFG_ENB) != 0) {
3199 reg &= ~RXQ_CFG_ENB;
3200 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3201 }
3202 } else {
3203 if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) {
3204 reg &= ~RXQ_CFG_QUEUE0_ENB;
3205 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3206 }
3207 }
3208 /* Disable TxQ. */
3209 reg = CSR_READ_4(sc, ALC_TXQ_CFG);
3210 if ((reg & TXQ_CFG_ENB) != 0) {
3211 reg &= ~TXQ_CFG_ENB;
3212 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
3213 }
3214 DELAY(40);
3215 for (i = ALC_TIMEOUT; i > 0; i--) {
3216 reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3217 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
3218 break;
3219 DELAY(10);
3220 }
3221 if (i == 0)
3222 printf("%s: could not disable RxQ/TxQ (0x%08x)!\n",
3223 sc->sc_dev.dv_xname, reg);
3224 }
3225
3226 void
alc_init_tx_ring(struct alc_softc * sc)3227 alc_init_tx_ring(struct alc_softc *sc)
3228 {
3229 struct alc_ring_data *rd;
3230 struct alc_txdesc *txd;
3231 int i;
3232
3233 sc->alc_cdata.alc_tx_prod = 0;
3234 sc->alc_cdata.alc_tx_cons = 0;
3235 sc->alc_cdata.alc_tx_cnt = 0;
3236
3237 rd = &sc->alc_rdata;
3238 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ);
3239 for (i = 0; i < ALC_TX_RING_CNT; i++) {
3240 txd = &sc->alc_cdata.alc_txdesc[i];
3241 txd->tx_m = NULL;
3242 }
3243
3244 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
3245 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3246 }
3247
3248 int
alc_init_rx_ring(struct alc_softc * sc)3249 alc_init_rx_ring(struct alc_softc *sc)
3250 {
3251 struct alc_ring_data *rd;
3252 struct alc_rxdesc *rxd;
3253 int i;
3254
3255 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
3256 rd = &sc->alc_rdata;
3257 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ);
3258 for (i = 0; i < ALC_RX_RING_CNT; i++) {
3259 rxd = &sc->alc_cdata.alc_rxdesc[i];
3260 rxd->rx_m = NULL;
3261 rxd->rx_desc = &rd->alc_rx_ring[i];
3262 if (alc_newbuf(sc, rxd) != 0)
3263 return (ENOBUFS);
3264 }
3265
3266 /*
3267 * Since controller does not update Rx descriptors, driver
3268 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
3269 * is enough to ensure coherence.
3270 */
3271 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
3272 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3273 /* Let controller know availability of new Rx buffers. */
3274 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
3275
3276 return (0);
3277 }
3278
3279 void
alc_init_rr_ring(struct alc_softc * sc)3280 alc_init_rr_ring(struct alc_softc *sc)
3281 {
3282 struct alc_ring_data *rd;
3283
3284 sc->alc_cdata.alc_rr_cons = 0;
3285 ALC_RXCHAIN_RESET(sc);
3286
3287 rd = &sc->alc_rdata;
3288 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ);
3289 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
3290 sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
3291 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3292 }
3293
3294 void
alc_init_cmb(struct alc_softc * sc)3295 alc_init_cmb(struct alc_softc *sc)
3296 {
3297 struct alc_ring_data *rd;
3298
3299 rd = &sc->alc_rdata;
3300 bzero(rd->alc_cmb, ALC_CMB_SZ);
3301 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
3302 sc->alc_cdata.alc_cmb_map->dm_mapsize,
3303 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3304 }
3305
3306 void
alc_init_smb(struct alc_softc * sc)3307 alc_init_smb(struct alc_softc *sc)
3308 {
3309 struct alc_ring_data *rd;
3310
3311 rd = &sc->alc_rdata;
3312 bzero(rd->alc_smb, ALC_SMB_SZ);
3313 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
3314 sc->alc_cdata.alc_smb_map->dm_mapsize,
3315 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3316 }
3317
3318 void
alc_rxvlan(struct alc_softc * sc)3319 alc_rxvlan(struct alc_softc *sc)
3320 {
3321 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3322 uint32_t reg;
3323
3324 reg = CSR_READ_4(sc, ALC_MAC_CFG);
3325 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
3326 reg |= MAC_CFG_VLAN_TAG_STRIP;
3327 else
3328 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3329 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3330 }
3331
3332 void
alc_iff(struct alc_softc * sc)3333 alc_iff(struct alc_softc *sc)
3334 {
3335 struct arpcom *ac = &sc->sc_arpcom;
3336 struct ifnet *ifp = &ac->ac_if;
3337 struct ether_multi *enm;
3338 struct ether_multistep step;
3339 uint32_t crc;
3340 uint32_t mchash[2];
3341 uint32_t rxcfg;
3342
3343 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
3344 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3345 ifp->if_flags &= ~IFF_ALLMULTI;
3346
3347 /*
3348 * Always accept broadcast frames.
3349 */
3350 rxcfg |= MAC_CFG_BCAST;
3351
3352 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
3353 ifp->if_flags |= IFF_ALLMULTI;
3354 if (ifp->if_flags & IFF_PROMISC)
3355 rxcfg |= MAC_CFG_PROMISC;
3356 else
3357 rxcfg |= MAC_CFG_ALLMULTI;
3358 mchash[0] = mchash[1] = 0xFFFFFFFF;
3359 } else {
3360 /* Program new filter. */
3361 bzero(mchash, sizeof(mchash));
3362
3363 ETHER_FIRST_MULTI(step, ac, enm);
3364 while (enm != NULL) {
3365 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
3366
3367 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3368
3369 ETHER_NEXT_MULTI(step, enm);
3370 }
3371 }
3372
3373 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
3374 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
3375 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
3376 }
3377