1 /*-
2 * Copyright (c) 1997, 1998
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 /*
35 * RealTek 8129/8139 PCI NIC driver
36 *
37 * Supports several extremely cheap PCI 10/100 adapters based on
38 * the RealTek chipset. Datasheets can be obtained from
39 * www.realtek.com.tw.
40 *
41 * Written by Bill Paul <wpaul@ctr.columbia.edu>
42 * Electrical Engineering Department
43 * Columbia University, New York City
44 */
45 /*
46 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
47 * probably the worst PCI ethernet controller ever made, with the possible
48 * exception of the FEAST chip made by SMC. The 8139 supports bus-master
49 * DMA, but it has a terrible interface that nullifies any performance
50 * gains that bus-master DMA usually offers.
51 *
52 * For transmission, the chip offers a series of four TX descriptor
53 * registers. Each transmit frame must be in a contiguous buffer, aligned
54 * on a longword (32-bit) boundary. This means we almost always have to
55 * do mbuf copies in order to transmit a frame, except in the unlikely
56 * case where a) the packet fits into a single mbuf, and b) the packet
57 * is 32-bit aligned within the mbuf's data area. The presence of only
58 * four descriptor registers means that we can never have more than four
59 * packets queued for transmission at any one time.
60 *
61 * Reception is not much better. The driver has to allocate a single large
62 * buffer area (up to 64K in size) into which the chip will DMA received
63 * frames. Because we don't know where within this region received packets
64 * will begin or end, we have no choice but to copy data from the buffer
65 * area into mbufs in order to pass the packets up to the higher protocol
66 * levels.
67 *
68 * It's impossible given this rotten design to really achieve decent
69 * performance at 100Mbps, unless you happen to have a 400Mhz PII or
70 * some equally overmuscled CPU to drive it.
71 *
72 * On the bright side, the 8139 does have a built-in PHY, although
73 * rather than using an MDIO serial interface like most other NICs, the
74 * PHY registers are directly accessible through the 8139's register
75 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
76 * filter.
77 *
78 * The 8129 chip is an older version of the 8139 that uses an external PHY
79 * chip. The 8129 has a serial MDIO interface for accessing the MII where
80 * the 8139 lets you directly access the on-board PHY registers. We need
81 * to select which interface to use depending on the chip type.
82 */
83
84 #ifdef HAVE_KERNEL_OPTION_HEADERS
85 #include "opt_device_polling.h"
86 #endif
87
88 #include <sys/param.h>
89 #include <sys/endian.h>
90 #include <sys/systm.h>
91 #include <sys/sockio.h>
92 #include <sys/mbuf.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/module.h>
96 #include <sys/socket.h>
97 #include <sys/sysctl.h>
98
99 #include <net/if.h>
100 #include <net/if_var.h>
101 #include <net/if_arp.h>
102 #include <net/ethernet.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_types.h>
106
107 #include <net/bpf.h>
108
109 #include <machine/bus.h>
110 #include <machine/resource.h>
111 #include <sys/bus.h>
112 #include <sys/rman.h>
113
114 #include <dev/mii/mii.h>
115 #include <dev/mii/mii_bitbang.h>
116 #include <dev/mii/miivar.h>
117
118 #include <dev/pci/pcireg.h>
119 #include <dev/pci/pcivar.h>
120
121 MODULE_DEPEND(rl, pci, 1, 1, 1);
122 MODULE_DEPEND(rl, ether, 1, 1, 1);
123 MODULE_DEPEND(rl, miibus, 1, 1, 1);
124
125 /* "device miibus" required. See GENERIC if you get errors here. */
126 #include "miibus_if.h"
127
128 #include <dev/rl/if_rlreg.h>
129
130 /*
131 * Various supported device vendors/types and their names.
132 */
133 static const struct rl_type rl_devs[] = {
134 { RT_VENDORID, RT_DEVICEID_8129, RL_8129,
135 "RealTek 8129 10/100BaseTX" },
136 { RT_VENDORID, RT_DEVICEID_8139, RL_8139,
137 "RealTek 8139 10/100BaseTX" },
138 { RT_VENDORID, RT_DEVICEID_8139D, RL_8139,
139 "RealTek 8139 10/100BaseTX" },
140 { RT_VENDORID, RT_DEVICEID_8138, RL_8139,
141 "RealTek 8139 10/100BaseTX CardBus" },
142 { RT_VENDORID, RT_DEVICEID_8100, RL_8139,
143 "RealTek 8100 10/100BaseTX" },
144 { ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
145 "Accton MPX 5030/5038 10/100BaseTX" },
146 { DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139,
147 "Delta Electronics 8139 10/100BaseTX" },
148 { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139,
149 "Addtron Technology 8139 10/100BaseTX" },
150 { DLINK_VENDORID, DLINK_DEVICEID_520TX_REVC1, RL_8139,
151 "D-Link DFE-520TX (rev. C1) 10/100BaseTX" },
152 { DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139,
153 "D-Link DFE-530TX+ 10/100BaseTX" },
154 { DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139,
155 "D-Link DFE-690TXD 10/100BaseTX" },
156 { NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
157 "Nortel Networks 10/100BaseTX" },
158 { COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139,
159 "Corega FEther CB-TXD" },
160 { COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139,
161 "Corega FEtherII CB-TXD" },
162 { PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139,
163 "Peppercon AG ROL-F" },
164 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139,
165 "Planex FNW-3603-TX" },
166 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139,
167 "Planex FNW-3800-TX" },
168 { CP_VENDORID, RT_DEVICEID_8139, RL_8139,
169 "Compaq HNE-300" },
170 { LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139,
171 "LevelOne FPC-0106TX" },
172 { EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139,
173 "Edimax EP-4103DL CardBus" }
174 };
175
176 static int rl_attach(device_t);
177 static int rl_detach(device_t);
178 static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int);
179 static int rl_dma_alloc(struct rl_softc *);
180 static void rl_dma_free(struct rl_softc *);
181 static void rl_eeprom_putbyte(struct rl_softc *, int);
182 static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *);
183 static int rl_encap(struct rl_softc *, struct mbuf **);
184 static int rl_list_tx_init(struct rl_softc *);
185 static int rl_list_rx_init(struct rl_softc *);
186 static int rl_ifmedia_upd(if_t);
187 static void rl_ifmedia_sts(if_t, struct ifmediareq *);
188 static int rl_ioctl(if_t, u_long, caddr_t);
189 static void rl_intr(void *);
190 static void rl_init(void *);
191 static void rl_init_locked(struct rl_softc *sc);
192 static int rl_miibus_readreg(device_t, int, int);
193 static void rl_miibus_statchg(device_t);
194 static int rl_miibus_writereg(device_t, int, int, int);
195 #ifdef DEVICE_POLLING
196 static int rl_poll(if_t ifp, enum poll_cmd cmd, int count);
197 static int rl_poll_locked(if_t ifp, enum poll_cmd cmd, int count);
198 #endif
199 static int rl_probe(device_t);
200 static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int);
201 static void rl_reset(struct rl_softc *);
202 static int rl_resume(device_t);
203 static int rl_rxeof(struct rl_softc *);
204 static void rl_rxfilter(struct rl_softc *);
205 static int rl_shutdown(device_t);
206 static void rl_start(if_t);
207 static void rl_start_locked(if_t);
208 static void rl_stop(struct rl_softc *);
209 static int rl_suspend(device_t);
210 static void rl_tick(void *);
211 static void rl_txeof(struct rl_softc *);
212 static void rl_watchdog(struct rl_softc *);
213 static void rl_setwol(struct rl_softc *);
214 static void rl_clrwol(struct rl_softc *);
215
216 /*
217 * MII bit-bang glue
218 */
219 static uint32_t rl_mii_bitbang_read(device_t);
220 static void rl_mii_bitbang_write(device_t, uint32_t);
221
222 static const struct mii_bitbang_ops rl_mii_bitbang_ops = {
223 rl_mii_bitbang_read,
224 rl_mii_bitbang_write,
225 {
226 RL_MII_DATAOUT, /* MII_BIT_MDO */
227 RL_MII_DATAIN, /* MII_BIT_MDI */
228 RL_MII_CLK, /* MII_BIT_MDC */
229 RL_MII_DIR, /* MII_BIT_DIR_HOST_PHY */
230 0, /* MII_BIT_DIR_PHY_HOST */
231 }
232 };
233
234 static device_method_t rl_methods[] = {
235 /* Device interface */
236 DEVMETHOD(device_probe, rl_probe),
237 DEVMETHOD(device_attach, rl_attach),
238 DEVMETHOD(device_detach, rl_detach),
239 DEVMETHOD(device_suspend, rl_suspend),
240 DEVMETHOD(device_resume, rl_resume),
241 DEVMETHOD(device_shutdown, rl_shutdown),
242
243 /* MII interface */
244 DEVMETHOD(miibus_readreg, rl_miibus_readreg),
245 DEVMETHOD(miibus_writereg, rl_miibus_writereg),
246 DEVMETHOD(miibus_statchg, rl_miibus_statchg),
247
248 DEVMETHOD_END
249 };
250
251 static driver_t rl_driver = {
252 "rl",
253 rl_methods,
254 sizeof(struct rl_softc)
255 };
256
257 DRIVER_MODULE(rl, pci, rl_driver, 0, 0);
258 MODULE_PNP_INFO("U16:vendor;U16:device", pci, rl, rl_devs,
259 nitems(rl_devs) - 1);
260 DRIVER_MODULE(rl, cardbus, rl_driver, 0, 0);
261 DRIVER_MODULE(miibus, rl, miibus_driver, 0, 0);
262
263 #define EE_SET(x) \
264 CSR_WRITE_1(sc, RL_EECMD, \
265 CSR_READ_1(sc, RL_EECMD) | x)
266
267 #define EE_CLR(x) \
268 CSR_WRITE_1(sc, RL_EECMD, \
269 CSR_READ_1(sc, RL_EECMD) & ~x)
270
271 /*
272 * Send a read command and address to the EEPROM, check for ACK.
273 */
274 static void
rl_eeprom_putbyte(struct rl_softc * sc,int addr)275 rl_eeprom_putbyte(struct rl_softc *sc, int addr)
276 {
277 int d, i;
278
279 d = addr | sc->rl_eecmd_read;
280
281 /*
282 * Feed in each bit and strobe the clock.
283 */
284 for (i = 0x400; i; i >>= 1) {
285 if (d & i) {
286 EE_SET(RL_EE_DATAIN);
287 } else {
288 EE_CLR(RL_EE_DATAIN);
289 }
290 DELAY(100);
291 EE_SET(RL_EE_CLK);
292 DELAY(150);
293 EE_CLR(RL_EE_CLK);
294 DELAY(100);
295 }
296 }
297
298 /*
299 * Read a word of data stored in the EEPROM at address 'addr.'
300 */
301 static void
rl_eeprom_getword(struct rl_softc * sc,int addr,uint16_t * dest)302 rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest)
303 {
304 int i;
305 uint16_t word = 0;
306
307 /* Enter EEPROM access mode. */
308 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
309
310 /*
311 * Send address of word we want to read.
312 */
313 rl_eeprom_putbyte(sc, addr);
314
315 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
316
317 /*
318 * Start reading bits from EEPROM.
319 */
320 for (i = 0x8000; i; i >>= 1) {
321 EE_SET(RL_EE_CLK);
322 DELAY(100);
323 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
324 word |= i;
325 EE_CLR(RL_EE_CLK);
326 DELAY(100);
327 }
328
329 /* Turn off EEPROM access mode. */
330 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
331
332 *dest = word;
333 }
334
335 /*
336 * Read a sequence of words from the EEPROM.
337 */
338 static void
rl_read_eeprom(struct rl_softc * sc,uint8_t * dest,int off,int cnt,int swap)339 rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap)
340 {
341 int i;
342 uint16_t word = 0, *ptr;
343
344 for (i = 0; i < cnt; i++) {
345 rl_eeprom_getword(sc, off + i, &word);
346 ptr = (uint16_t *)(dest + (i * 2));
347 if (swap)
348 *ptr = ntohs(word);
349 else
350 *ptr = word;
351 }
352 }
353
354 /*
355 * Read the MII serial port for the MII bit-bang module.
356 */
357 static uint32_t
rl_mii_bitbang_read(device_t dev)358 rl_mii_bitbang_read(device_t dev)
359 {
360 struct rl_softc *sc;
361 uint32_t val;
362
363 sc = device_get_softc(dev);
364
365 val = CSR_READ_1(sc, RL_MII);
366 CSR_BARRIER(sc, RL_MII, 1,
367 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
368
369 return (val);
370 }
371
372 /*
373 * Write the MII serial port for the MII bit-bang module.
374 */
375 static void
rl_mii_bitbang_write(device_t dev,uint32_t val)376 rl_mii_bitbang_write(device_t dev, uint32_t val)
377 {
378 struct rl_softc *sc;
379
380 sc = device_get_softc(dev);
381
382 CSR_WRITE_1(sc, RL_MII, val);
383 CSR_BARRIER(sc, RL_MII, 1,
384 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
385 }
386
387 static int
rl_miibus_readreg(device_t dev,int phy,int reg)388 rl_miibus_readreg(device_t dev, int phy, int reg)
389 {
390 struct rl_softc *sc;
391 uint16_t rl8139_reg;
392
393 sc = device_get_softc(dev);
394
395 if (sc->rl_type == RL_8139) {
396 switch (reg) {
397 case MII_BMCR:
398 rl8139_reg = RL_BMCR;
399 break;
400 case MII_BMSR:
401 rl8139_reg = RL_BMSR;
402 break;
403 case MII_ANAR:
404 rl8139_reg = RL_ANAR;
405 break;
406 case MII_ANER:
407 rl8139_reg = RL_ANER;
408 break;
409 case MII_ANLPAR:
410 rl8139_reg = RL_LPAR;
411 break;
412 case MII_PHYIDR1:
413 case MII_PHYIDR2:
414 return (0);
415 /*
416 * Allow the rlphy driver to read the media status
417 * register. If we have a link partner which does not
418 * support NWAY, this is the register which will tell
419 * us the results of parallel detection.
420 */
421 case RL_MEDIASTAT:
422 return (CSR_READ_1(sc, RL_MEDIASTAT));
423 default:
424 device_printf(sc->rl_dev, "bad phy register\n");
425 return (0);
426 }
427 return (CSR_READ_2(sc, rl8139_reg));
428 }
429
430 return (mii_bitbang_readreg(dev, &rl_mii_bitbang_ops, phy, reg));
431 }
432
433 static int
rl_miibus_writereg(device_t dev,int phy,int reg,int data)434 rl_miibus_writereg(device_t dev, int phy, int reg, int data)
435 {
436 struct rl_softc *sc;
437 uint16_t rl8139_reg;
438
439 sc = device_get_softc(dev);
440
441 if (sc->rl_type == RL_8139) {
442 switch (reg) {
443 case MII_BMCR:
444 rl8139_reg = RL_BMCR;
445 break;
446 case MII_BMSR:
447 rl8139_reg = RL_BMSR;
448 break;
449 case MII_ANAR:
450 rl8139_reg = RL_ANAR;
451 break;
452 case MII_ANER:
453 rl8139_reg = RL_ANER;
454 break;
455 case MII_ANLPAR:
456 rl8139_reg = RL_LPAR;
457 break;
458 case MII_PHYIDR1:
459 case MII_PHYIDR2:
460 return (0);
461 break;
462 default:
463 device_printf(sc->rl_dev, "bad phy register\n");
464 return (0);
465 }
466 CSR_WRITE_2(sc, rl8139_reg, data);
467 return (0);
468 }
469
470 mii_bitbang_writereg(dev, &rl_mii_bitbang_ops, phy, reg, data);
471
472 return (0);
473 }
474
475 static void
rl_miibus_statchg(device_t dev)476 rl_miibus_statchg(device_t dev)
477 {
478 struct rl_softc *sc;
479 if_t ifp;
480 struct mii_data *mii;
481
482 sc = device_get_softc(dev);
483 mii = device_get_softc(sc->rl_miibus);
484 ifp = sc->rl_ifp;
485 if (mii == NULL || ifp == NULL ||
486 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
487 return;
488
489 sc->rl_flags &= ~RL_FLAG_LINK;
490 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
491 (IFM_ACTIVE | IFM_AVALID)) {
492 switch (IFM_SUBTYPE(mii->mii_media_active)) {
493 case IFM_10_T:
494 case IFM_100_TX:
495 sc->rl_flags |= RL_FLAG_LINK;
496 break;
497 default:
498 break;
499 }
500 }
501 /*
502 * RealTek controllers do not provide any interface to
503 * Tx/Rx MACs for resolved speed, duplex and flow-control
504 * parameters.
505 */
506 }
507
508 static u_int
rl_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)509 rl_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
510 {
511 uint32_t *hashes = arg;
512 int h;
513
514 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
515 if (h < 32)
516 hashes[0] |= (1 << h);
517 else
518 hashes[1] |= (1 << (h - 32));
519
520 return (1);
521 }
522
523 /*
524 * Program the 64-bit multicast hash filter.
525 */
526 static void
rl_rxfilter(struct rl_softc * sc)527 rl_rxfilter(struct rl_softc *sc)
528 {
529 if_t ifp = sc->rl_ifp;
530 uint32_t hashes[2] = { 0, 0 };
531 uint32_t rxfilt;
532
533 RL_LOCK_ASSERT(sc);
534
535 rxfilt = CSR_READ_4(sc, RL_RXCFG);
536 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD |
537 RL_RXCFG_RX_MULTI);
538 /* Always accept frames destined for this host. */
539 rxfilt |= RL_RXCFG_RX_INDIV;
540 /* Set capture broadcast bit to capture broadcast frames. */
541 if (if_getflags(ifp) & IFF_BROADCAST)
542 rxfilt |= RL_RXCFG_RX_BROAD;
543 if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
544 rxfilt |= RL_RXCFG_RX_MULTI;
545 if (if_getflags(ifp) & IFF_PROMISC)
546 rxfilt |= RL_RXCFG_RX_ALLPHYS;
547 hashes[0] = 0xFFFFFFFF;
548 hashes[1] = 0xFFFFFFFF;
549 } else {
550 /* Now program new ones. */
551 if_foreach_llmaddr(ifp, rl_hash_maddr, hashes);
552 if (hashes[0] != 0 || hashes[1] != 0)
553 rxfilt |= RL_RXCFG_RX_MULTI;
554 }
555
556 CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
557 CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
558 CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
559 }
560
561 static void
rl_reset(struct rl_softc * sc)562 rl_reset(struct rl_softc *sc)
563 {
564 int i;
565
566 RL_LOCK_ASSERT(sc);
567
568 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
569
570 for (i = 0; i < RL_TIMEOUT; i++) {
571 DELAY(10);
572 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
573 break;
574 }
575 if (i == RL_TIMEOUT)
576 device_printf(sc->rl_dev, "reset never completed!\n");
577 }
578
579 /*
580 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device
581 * IDs against our list and return a device name if we find a match.
582 */
583 static int
rl_probe(device_t dev)584 rl_probe(device_t dev)
585 {
586 const struct rl_type *t;
587 uint16_t devid, revid, vendor;
588 int i;
589
590 vendor = pci_get_vendor(dev);
591 devid = pci_get_device(dev);
592 revid = pci_get_revid(dev);
593
594 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
595 if (revid == 0x20) {
596 /* 8139C+, let re(4) take care of this device. */
597 return (ENXIO);
598 }
599 }
600 t = rl_devs;
601 for (i = 0; i < nitems(rl_devs); i++, t++) {
602 if (vendor == t->rl_vid && devid == t->rl_did) {
603 device_set_desc(dev, t->rl_name);
604 return (BUS_PROBE_DEFAULT);
605 }
606 }
607
608 return (ENXIO);
609 }
610
611 struct rl_dmamap_arg {
612 bus_addr_t rl_busaddr;
613 };
614
615 static void
rl_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)616 rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
617 {
618 struct rl_dmamap_arg *ctx;
619
620 if (error != 0)
621 return;
622
623 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
624
625 ctx = (struct rl_dmamap_arg *)arg;
626 ctx->rl_busaddr = segs[0].ds_addr;
627 }
628
629 /*
630 * Attach the interface. Allocate softc structures, do ifmedia
631 * setup and ethernet/BPF attach.
632 */
633 static int
rl_attach(device_t dev)634 rl_attach(device_t dev)
635 {
636 uint8_t eaddr[ETHER_ADDR_LEN];
637 uint16_t as[3];
638 if_t ifp;
639 struct rl_softc *sc;
640 const struct rl_type *t;
641 struct sysctl_ctx_list *ctx;
642 struct sysctl_oid_list *children;
643 int error = 0, hwrev, i, phy, pmc, rid;
644 int prefer_iomap, unit;
645 uint16_t rl_did = 0;
646 char tn[32];
647
648 sc = device_get_softc(dev);
649 unit = device_get_unit(dev);
650 sc->rl_dev = dev;
651
652 sc->rl_twister_enable = 0;
653 snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit);
654 TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable);
655 ctx = device_get_sysctl_ctx(sc->rl_dev);
656 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
657 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD,
658 &sc->rl_twister_enable, 0, "");
659
660 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
661 MTX_DEF);
662 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
663
664 pci_enable_busmaster(dev);
665
666 /*
667 * Map control/status registers.
668 * Default to using PIO access for this driver. On SMP systems,
669 * there appear to be problems with memory mapped mode: it looks
670 * like doing too many memory mapped access back to back in rapid
671 * succession can hang the bus. I'm inclined to blame this on
672 * crummy design/construction on the part of RealTek. Memory
673 * mapped mode does appear to work on uniprocessor systems though.
674 */
675 prefer_iomap = 1;
676 snprintf(tn, sizeof(tn), "dev.rl.%d.prefer_iomap", unit);
677 TUNABLE_INT_FETCH(tn, &prefer_iomap);
678 if (prefer_iomap) {
679 sc->rl_res_id = PCIR_BAR(0);
680 sc->rl_res_type = SYS_RES_IOPORT;
681 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
682 &sc->rl_res_id, RF_ACTIVE);
683 }
684 if (prefer_iomap == 0 || sc->rl_res == NULL) {
685 sc->rl_res_id = PCIR_BAR(1);
686 sc->rl_res_type = SYS_RES_MEMORY;
687 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
688 &sc->rl_res_id, RF_ACTIVE);
689 }
690 if (sc->rl_res == NULL) {
691 device_printf(dev, "couldn't map ports/memory\n");
692 error = ENXIO;
693 goto fail;
694 }
695
696 #ifdef notdef
697 /*
698 * Detect the Realtek 8139B. For some reason, this chip is very
699 * unstable when left to autoselect the media
700 * The best workaround is to set the device to the required
701 * media type or to set it to the 10 Meg speed.
702 */
703 if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF)
704 device_printf(dev,
705 "Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n");
706 #endif
707
708 sc->rl_btag = rman_get_bustag(sc->rl_res);
709 sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
710
711 /* Allocate interrupt */
712 rid = 0;
713 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
714 RF_SHAREABLE | RF_ACTIVE);
715
716 if (sc->rl_irq[0] == NULL) {
717 device_printf(dev, "couldn't map interrupt\n");
718 error = ENXIO;
719 goto fail;
720 }
721
722 sc->rl_cfg0 = RL_8139_CFG0;
723 sc->rl_cfg1 = RL_8139_CFG1;
724 sc->rl_cfg2 = 0;
725 sc->rl_cfg3 = RL_8139_CFG3;
726 sc->rl_cfg4 = RL_8139_CFG4;
727 sc->rl_cfg5 = RL_8139_CFG5;
728
729 /*
730 * Reset the adapter. Only take the lock here as it's needed in
731 * order to call rl_reset().
732 */
733 RL_LOCK(sc);
734 rl_reset(sc);
735 RL_UNLOCK(sc);
736
737 sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
738 rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0);
739 if (rl_did != 0x8129)
740 sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
741
742 /*
743 * Get station address from the EEPROM.
744 */
745 rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0);
746 for (i = 0; i < 3; i++) {
747 eaddr[(i * 2) + 0] = as[i] & 0xff;
748 eaddr[(i * 2) + 1] = as[i] >> 8;
749 }
750
751 /*
752 * Now read the exact device type from the EEPROM to find
753 * out if it's an 8129 or 8139.
754 */
755 rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0);
756
757 t = rl_devs;
758 sc->rl_type = 0;
759 while(t->rl_name != NULL) {
760 if (rl_did == t->rl_did) {
761 sc->rl_type = t->rl_basetype;
762 break;
763 }
764 t++;
765 }
766
767 if (sc->rl_type == 0) {
768 device_printf(dev, "unknown device ID: %x assuming 8139\n",
769 rl_did);
770 sc->rl_type = RL_8139;
771 /*
772 * Read RL_IDR register to get ethernet address as accessing
773 * EEPROM may not extract correct address.
774 */
775 for (i = 0; i < ETHER_ADDR_LEN; i++)
776 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
777 }
778
779 if ((error = rl_dma_alloc(sc)) != 0)
780 goto fail;
781
782 ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
783 if (ifp == NULL) {
784 device_printf(dev, "can not if_alloc()\n");
785 error = ENOSPC;
786 goto fail;
787 }
788
789 #define RL_PHYAD_INTERNAL 0
790
791 /* Do MII setup */
792 phy = MII_PHY_ANY;
793 if (sc->rl_type == RL_8139)
794 phy = RL_PHYAD_INTERNAL;
795 error = mii_attach(dev, &sc->rl_miibus, ifp, rl_ifmedia_upd,
796 rl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
797 if (error != 0) {
798 device_printf(dev, "attaching PHYs failed\n");
799 goto fail;
800 }
801
802 if_setsoftc(ifp, sc);
803 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
804 if_setmtu(ifp, ETHERMTU);
805 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
806 if_setioctlfn(ifp, rl_ioctl);
807 if_setstartfn(ifp, rl_start);
808 if_setinitfn(ifp, rl_init);
809 if_setcapabilities(ifp, IFCAP_VLAN_MTU);
810 /* Check WOL for RTL8139B or newer controllers. */
811 if (sc->rl_type == RL_8139 &&
812 pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
813 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
814 switch (hwrev) {
815 case RL_HWREV_8139B:
816 case RL_HWREV_8130:
817 case RL_HWREV_8139C:
818 case RL_HWREV_8139D:
819 case RL_HWREV_8101:
820 case RL_HWREV_8100:
821 if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
822 /* Disable WOL. */
823 rl_clrwol(sc);
824 break;
825 default:
826 break;
827 }
828 }
829 if_setcapenable(ifp, if_getcapabilities(ifp));
830 if_setcapenablebit(ifp, 0, (IFCAP_WOL_UCAST | IFCAP_WOL_MCAST));
831 #ifdef DEVICE_POLLING
832 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
833 #endif
834 if_setsendqlen(ifp, ifqmaxlen);
835 if_setsendqready(ifp);
836
837 /*
838 * Call MI attach routine.
839 */
840 ether_ifattach(ifp, eaddr);
841
842 /* Hook interrupt last to avoid having to lock softc */
843 error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE,
844 NULL, rl_intr, sc, &sc->rl_intrhand[0]);
845 if (error) {
846 device_printf(sc->rl_dev, "couldn't set up irq\n");
847 ether_ifdetach(ifp);
848 }
849
850 fail:
851 if (error)
852 rl_detach(dev);
853
854 return (error);
855 }
856
857 /*
858 * Shutdown hardware and free up resources. This can be called any
859 * time after the mutex has been initialized. It is called in both
860 * the error case in attach and the normal detach case so it needs
861 * to be careful about only freeing resources that have actually been
862 * allocated.
863 */
864 static int
rl_detach(device_t dev)865 rl_detach(device_t dev)
866 {
867 struct rl_softc *sc;
868 if_t ifp;
869
870 sc = device_get_softc(dev);
871 ifp = sc->rl_ifp;
872
873 KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
874
875 #ifdef DEVICE_POLLING
876 if (if_getcapenable(ifp) & IFCAP_POLLING)
877 ether_poll_deregister(ifp);
878 #endif
879 /* These should only be active if attach succeeded */
880 if (device_is_attached(dev)) {
881 RL_LOCK(sc);
882 rl_stop(sc);
883 RL_UNLOCK(sc);
884 callout_drain(&sc->rl_stat_callout);
885 ether_ifdetach(ifp);
886 }
887 #if 0
888 sc->suspended = 1;
889 #endif
890 if (sc->rl_miibus)
891 device_delete_child(dev, sc->rl_miibus);
892 bus_generic_detach(dev);
893
894 if (sc->rl_intrhand[0])
895 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
896 if (sc->rl_irq[0])
897 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]);
898 if (sc->rl_res)
899 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
900 sc->rl_res);
901
902 if (ifp)
903 if_free(ifp);
904
905 rl_dma_free(sc);
906
907 mtx_destroy(&sc->rl_mtx);
908
909 return (0);
910 }
911
912 static int
rl_dma_alloc(struct rl_softc * sc)913 rl_dma_alloc(struct rl_softc *sc)
914 {
915 struct rl_dmamap_arg ctx;
916 int error, i;
917
918 /*
919 * Allocate the parent bus DMA tag appropriate for PCI.
920 */
921 error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev), /* parent */
922 1, 0, /* alignment, boundary */
923 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
924 BUS_SPACE_MAXADDR, /* highaddr */
925 NULL, NULL, /* filter, filterarg */
926 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
927 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
928 0, /* flags */
929 NULL, NULL, /* lockfunc, lockarg */
930 &sc->rl_parent_tag);
931 if (error) {
932 device_printf(sc->rl_dev,
933 "failed to create parent DMA tag.\n");
934 goto fail;
935 }
936 /* Create DMA tag for Rx memory block. */
937 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */
938 RL_RX_8139_BUF_ALIGN, 0, /* alignment, boundary */
939 BUS_SPACE_MAXADDR, /* lowaddr */
940 BUS_SPACE_MAXADDR, /* highaddr */
941 NULL, NULL, /* filter, filterarg */
942 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1, /* maxsize,nsegments */
943 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, /* maxsegsize */
944 0, /* flags */
945 NULL, NULL, /* lockfunc, lockarg */
946 &sc->rl_cdata.rl_rx_tag);
947 if (error) {
948 device_printf(sc->rl_dev,
949 "failed to create Rx memory block DMA tag.\n");
950 goto fail;
951 }
952 /* Create DMA tag for Tx buffer. */
953 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */
954 RL_TX_8139_BUF_ALIGN, 0, /* alignment, boundary */
955 BUS_SPACE_MAXADDR, /* lowaddr */
956 BUS_SPACE_MAXADDR, /* highaddr */
957 NULL, NULL, /* filter, filterarg */
958 MCLBYTES, 1, /* maxsize, nsegments */
959 MCLBYTES, /* maxsegsize */
960 0, /* flags */
961 NULL, NULL, /* lockfunc, lockarg */
962 &sc->rl_cdata.rl_tx_tag);
963 if (error) {
964 device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n");
965 goto fail;
966 }
967
968 /*
969 * Allocate DMA'able memory and load DMA map for Rx memory block.
970 */
971 error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag,
972 (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK |
973 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap);
974 if (error != 0) {
975 device_printf(sc->rl_dev,
976 "failed to allocate Rx DMA memory block.\n");
977 goto fail;
978 }
979 ctx.rl_busaddr = 0;
980 error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag,
981 sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf,
982 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx,
983 BUS_DMA_NOWAIT);
984 if (error != 0 || ctx.rl_busaddr == 0) {
985 device_printf(sc->rl_dev,
986 "could not load Rx DMA memory block.\n");
987 goto fail;
988 }
989 sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr;
990
991 /* Create DMA maps for Tx buffers. */
992 for (i = 0; i < RL_TX_LIST_CNT; i++) {
993 sc->rl_cdata.rl_tx_chain[i] = NULL;
994 sc->rl_cdata.rl_tx_dmamap[i] = NULL;
995 error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0,
996 &sc->rl_cdata.rl_tx_dmamap[i]);
997 if (error != 0) {
998 device_printf(sc->rl_dev,
999 "could not create Tx dmamap.\n");
1000 goto fail;
1001 }
1002 }
1003
1004 /* Leave a few bytes before the start of the RX ring buffer. */
1005 sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf;
1006 sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE;
1007
1008 fail:
1009 return (error);
1010 }
1011
1012 static void
rl_dma_free(struct rl_softc * sc)1013 rl_dma_free(struct rl_softc *sc)
1014 {
1015 int i;
1016
1017 /* Rx memory block. */
1018 if (sc->rl_cdata.rl_rx_tag != NULL) {
1019 if (sc->rl_cdata.rl_rx_buf_paddr != 0)
1020 bus_dmamap_unload(sc->rl_cdata.rl_rx_tag,
1021 sc->rl_cdata.rl_rx_dmamap);
1022 if (sc->rl_cdata.rl_rx_buf_ptr != NULL)
1023 bus_dmamem_free(sc->rl_cdata.rl_rx_tag,
1024 sc->rl_cdata.rl_rx_buf_ptr,
1025 sc->rl_cdata.rl_rx_dmamap);
1026 sc->rl_cdata.rl_rx_buf_ptr = NULL;
1027 sc->rl_cdata.rl_rx_buf = NULL;
1028 sc->rl_cdata.rl_rx_buf_paddr = 0;
1029 bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag);
1030 sc->rl_cdata.rl_tx_tag = NULL;
1031 }
1032
1033 /* Tx buffers. */
1034 if (sc->rl_cdata.rl_tx_tag != NULL) {
1035 for (i = 0; i < RL_TX_LIST_CNT; i++) {
1036 if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) {
1037 bus_dmamap_destroy(
1038 sc->rl_cdata.rl_tx_tag,
1039 sc->rl_cdata.rl_tx_dmamap[i]);
1040 sc->rl_cdata.rl_tx_dmamap[i] = NULL;
1041 }
1042 }
1043 bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag);
1044 sc->rl_cdata.rl_tx_tag = NULL;
1045 }
1046
1047 if (sc->rl_parent_tag != NULL) {
1048 bus_dma_tag_destroy(sc->rl_parent_tag);
1049 sc->rl_parent_tag = NULL;
1050 }
1051 }
1052
1053 /*
1054 * Initialize the transmit descriptors.
1055 */
1056 static int
rl_list_tx_init(struct rl_softc * sc)1057 rl_list_tx_init(struct rl_softc *sc)
1058 {
1059 struct rl_chain_data *cd;
1060 int i;
1061
1062 RL_LOCK_ASSERT(sc);
1063
1064 cd = &sc->rl_cdata;
1065 for (i = 0; i < RL_TX_LIST_CNT; i++) {
1066 cd->rl_tx_chain[i] = NULL;
1067 CSR_WRITE_4(sc,
1068 RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000);
1069 }
1070
1071 sc->rl_cdata.cur_tx = 0;
1072 sc->rl_cdata.last_tx = 0;
1073
1074 return (0);
1075 }
1076
1077 static int
rl_list_rx_init(struct rl_softc * sc)1078 rl_list_rx_init(struct rl_softc *sc)
1079 {
1080
1081 RL_LOCK_ASSERT(sc);
1082
1083 bzero(sc->rl_cdata.rl_rx_buf_ptr,
1084 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ);
1085 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap,
1086 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1087
1088 return (0);
1089 }
1090
1091 /*
1092 * A frame has been uploaded: pass the resulting mbuf chain up to
1093 * the higher level protocols.
1094 *
1095 * You know there's something wrong with a PCI bus-master chip design
1096 * when you have to use m_devget().
1097 *
1098 * The receive operation is badly documented in the datasheet, so I'll
1099 * attempt to document it here. The driver provides a buffer area and
1100 * places its base address in the RX buffer start address register.
1101 * The chip then begins copying frames into the RX buffer. Each frame
1102 * is preceded by a 32-bit RX status word which specifies the length
1103 * of the frame and certain other status bits. Each frame (starting with
1104 * the status word) is also 32-bit aligned. The frame length is in the
1105 * first 16 bits of the status word; the lower 15 bits correspond with
1106 * the 'rx status register' mentioned in the datasheet.
1107 *
1108 * Note: to make the Alpha happy, the frame payload needs to be aligned
1109 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes)
1110 * as the offset argument to m_devget().
1111 */
1112 static int
rl_rxeof(struct rl_softc * sc)1113 rl_rxeof(struct rl_softc *sc)
1114 {
1115 struct mbuf *m;
1116 if_t ifp = sc->rl_ifp;
1117 uint8_t *rxbufpos;
1118 int total_len = 0;
1119 int wrap = 0;
1120 int rx_npkts = 0;
1121 uint32_t rxstat;
1122 uint16_t cur_rx;
1123 uint16_t limit;
1124 uint16_t max_bytes, rx_bytes = 0;
1125
1126 RL_LOCK_ASSERT(sc);
1127
1128 bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap,
1129 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1130
1131 cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN;
1132
1133 /* Do not try to read past this point. */
1134 limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN;
1135
1136 if (limit < cur_rx)
1137 max_bytes = (RL_RXBUFLEN - cur_rx) + limit;
1138 else
1139 max_bytes = limit - cur_rx;
1140
1141 while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
1142 #ifdef DEVICE_POLLING
1143 if (if_getcapenable(ifp) & IFCAP_POLLING) {
1144 if (sc->rxcycles <= 0)
1145 break;
1146 sc->rxcycles--;
1147 }
1148 #endif
1149 rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
1150 rxstat = le32toh(*(uint32_t *)rxbufpos);
1151
1152 /*
1153 * Here's a totally undocumented fact for you. When the
1154 * RealTek chip is in the process of copying a packet into
1155 * RAM for you, the length will be 0xfff0. If you spot a
1156 * packet header with this value, you need to stop. The
1157 * datasheet makes absolutely no mention of this and
1158 * RealTek should be shot for this.
1159 */
1160 total_len = rxstat >> 16;
1161 if (total_len == RL_RXSTAT_UNFINISHED)
1162 break;
1163
1164 if (!(rxstat & RL_RXSTAT_RXOK) ||
1165 total_len < ETHER_MIN_LEN ||
1166 total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
1167 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1168 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1169 rl_init_locked(sc);
1170 return (rx_npkts);
1171 }
1172
1173 /* No errors; receive the packet. */
1174 rx_bytes += total_len + 4;
1175
1176 /*
1177 * XXX The RealTek chip includes the CRC with every
1178 * received frame, and there's no way to turn this
1179 * behavior off (at least, I can't find anything in
1180 * the manual that explains how to do it) so we have
1181 * to trim off the CRC manually.
1182 */
1183 total_len -= ETHER_CRC_LEN;
1184
1185 /*
1186 * Avoid trying to read more bytes than we know
1187 * the chip has prepared for us.
1188 */
1189 if (rx_bytes > max_bytes)
1190 break;
1191
1192 rxbufpos = sc->rl_cdata.rl_rx_buf +
1193 ((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN);
1194 if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN))
1195 rxbufpos = sc->rl_cdata.rl_rx_buf;
1196
1197 wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos;
1198 if (total_len > wrap) {
1199 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1200 NULL);
1201 if (m != NULL)
1202 m_copyback(m, wrap, total_len - wrap,
1203 sc->rl_cdata.rl_rx_buf);
1204 cur_rx = (total_len - wrap + ETHER_CRC_LEN);
1205 } else {
1206 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1207 NULL);
1208 cur_rx += total_len + 4 + ETHER_CRC_LEN;
1209 }
1210
1211 /* Round up to 32-bit boundary. */
1212 cur_rx = (cur_rx + 3) & ~3;
1213 CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16);
1214
1215 if (m == NULL) {
1216 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1217 continue;
1218 }
1219
1220 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1221 RL_UNLOCK(sc);
1222 if_input(ifp, m);
1223 RL_LOCK(sc);
1224 rx_npkts++;
1225 }
1226
1227 /* No need to sync Rx memory block as we didn't modify it. */
1228 return (rx_npkts);
1229 }
1230
1231 /*
1232 * A frame was downloaded to the chip. It's safe for us to clean up
1233 * the list buffers.
1234 */
1235 static void
rl_txeof(struct rl_softc * sc)1236 rl_txeof(struct rl_softc *sc)
1237 {
1238 if_t ifp = sc->rl_ifp;
1239 uint32_t txstat;
1240
1241 RL_LOCK_ASSERT(sc);
1242
1243 /*
1244 * Go through our tx list and free mbufs for those
1245 * frames that have been uploaded.
1246 */
1247 do {
1248 if (RL_LAST_TXMBUF(sc) == NULL)
1249 break;
1250 txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc));
1251 if (!(txstat & (RL_TXSTAT_TX_OK|
1252 RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT)))
1253 break;
1254
1255 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & RL_TXSTAT_COLLCNT) >> 24);
1256
1257 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc),
1258 BUS_DMASYNC_POSTWRITE);
1259 bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc));
1260 m_freem(RL_LAST_TXMBUF(sc));
1261 RL_LAST_TXMBUF(sc) = NULL;
1262 /*
1263 * If there was a transmit underrun, bump the TX threshold.
1264 * Make sure not to overflow the 63 * 32byte we can address
1265 * with the 6 available bit.
1266 */
1267 if ((txstat & RL_TXSTAT_TX_UNDERRUN) &&
1268 (sc->rl_txthresh < 2016))
1269 sc->rl_txthresh += 32;
1270 if (txstat & RL_TXSTAT_TX_OK)
1271 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1272 else {
1273 int oldthresh;
1274 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1275 if ((txstat & RL_TXSTAT_TXABRT) ||
1276 (txstat & RL_TXSTAT_OUTOFWIN))
1277 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1278 oldthresh = sc->rl_txthresh;
1279 /* error recovery */
1280 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1281 rl_init_locked(sc);
1282 /* restore original threshold */
1283 sc->rl_txthresh = oldthresh;
1284 return;
1285 }
1286 RL_INC(sc->rl_cdata.last_tx);
1287 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1288 } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx);
1289
1290 if (RL_LAST_TXMBUF(sc) == NULL)
1291 sc->rl_watchdog_timer = 0;
1292 }
1293
1294 static void
rl_twister_update(struct rl_softc * sc)1295 rl_twister_update(struct rl_softc *sc)
1296 {
1297 uint16_t linktest;
1298 /*
1299 * Table provided by RealTek (Kinston <shangh@realtek.com.tw>) for
1300 * Linux driver. Values undocumented otherwise.
1301 */
1302 static const uint32_t param[4][4] = {
1303 {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
1304 {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1305 {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1306 {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
1307 };
1308
1309 /*
1310 * Tune the so-called twister registers of the RTL8139. These
1311 * are used to compensate for impedance mismatches. The
1312 * method for tuning these registers is undocumented and the
1313 * following procedure is collected from public sources.
1314 */
1315 switch (sc->rl_twister)
1316 {
1317 case CHK_LINK:
1318 /*
1319 * If we have a sufficient link, then we can proceed in
1320 * the state machine to the next stage. If not, then
1321 * disable further tuning after writing sane defaults.
1322 */
1323 if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) {
1324 CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD);
1325 sc->rl_twister = FIND_ROW;
1326 } else {
1327 CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD);
1328 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1329 CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1330 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1331 sc->rl_twister = DONE;
1332 }
1333 break;
1334 case FIND_ROW:
1335 /*
1336 * Read how long it took to see the echo to find the tuning
1337 * row to use.
1338 */
1339 linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1340 if (linktest == RL_CSCFG_ROW3)
1341 sc->rl_twist_row = 3;
1342 else if (linktest == RL_CSCFG_ROW2)
1343 sc->rl_twist_row = 2;
1344 else if (linktest == RL_CSCFG_ROW1)
1345 sc->rl_twist_row = 1;
1346 else
1347 sc->rl_twist_row = 0;
1348 sc->rl_twist_col = 0;
1349 sc->rl_twister = SET_PARAM;
1350 break;
1351 case SET_PARAM:
1352 if (sc->rl_twist_col == 0)
1353 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1354 CSR_WRITE_4(sc, RL_PARA7C,
1355 param[sc->rl_twist_row][sc->rl_twist_col]);
1356 if (++sc->rl_twist_col == 4) {
1357 if (sc->rl_twist_row == 3)
1358 sc->rl_twister = RECHK_LONG;
1359 else
1360 sc->rl_twister = DONE;
1361 }
1362 break;
1363 case RECHK_LONG:
1364 /*
1365 * For long cables, we have to double check to make sure we
1366 * don't mistune.
1367 */
1368 linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1369 if (linktest == RL_CSCFG_ROW3)
1370 sc->rl_twister = DONE;
1371 else {
1372 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE);
1373 sc->rl_twister = RETUNE;
1374 }
1375 break;
1376 case RETUNE:
1377 /* Retune for a shorter cable (try column 2) */
1378 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1379 CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1380 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1381 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1382 sc->rl_twist_row--;
1383 sc->rl_twist_col = 0;
1384 sc->rl_twister = SET_PARAM;
1385 break;
1386
1387 case DONE:
1388 break;
1389 }
1390
1391 }
1392
1393 static void
rl_tick(void * xsc)1394 rl_tick(void *xsc)
1395 {
1396 struct rl_softc *sc = xsc;
1397 struct mii_data *mii;
1398 int ticks;
1399
1400 RL_LOCK_ASSERT(sc);
1401 /*
1402 * If we're doing the twister cable calibration, then we need to defer
1403 * watchdog timeouts. This is a no-op in normal operations, but
1404 * can falsely trigger when the cable calibration takes a while and
1405 * there was traffic ready to go when rl was started.
1406 *
1407 * We don't defer mii_tick since that updates the mii status, which
1408 * helps the twister process, at least according to similar patches
1409 * for the Linux driver I found online while doing the fixes. Worst
1410 * case is a few extra mii reads during calibration.
1411 */
1412 mii = device_get_softc(sc->rl_miibus);
1413 mii_tick(mii);
1414 if ((sc->rl_flags & RL_FLAG_LINK) == 0)
1415 rl_miibus_statchg(sc->rl_dev);
1416 if (sc->rl_twister_enable) {
1417 if (sc->rl_twister == DONE)
1418 rl_watchdog(sc);
1419 else
1420 rl_twister_update(sc);
1421 if (sc->rl_twister == DONE)
1422 ticks = hz;
1423 else
1424 ticks = hz / 10;
1425 } else {
1426 rl_watchdog(sc);
1427 ticks = hz;
1428 }
1429
1430 callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc);
1431 }
1432
1433 #ifdef DEVICE_POLLING
1434 static int
rl_poll(if_t ifp,enum poll_cmd cmd,int count)1435 rl_poll(if_t ifp, enum poll_cmd cmd, int count)
1436 {
1437 struct rl_softc *sc = if_getsoftc(ifp);
1438 int rx_npkts = 0;
1439
1440 RL_LOCK(sc);
1441 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1442 rx_npkts = rl_poll_locked(ifp, cmd, count);
1443 RL_UNLOCK(sc);
1444 return (rx_npkts);
1445 }
1446
1447 static int
rl_poll_locked(if_t ifp,enum poll_cmd cmd,int count)1448 rl_poll_locked(if_t ifp, enum poll_cmd cmd, int count)
1449 {
1450 struct rl_softc *sc = if_getsoftc(ifp);
1451 int rx_npkts;
1452
1453 RL_LOCK_ASSERT(sc);
1454
1455 sc->rxcycles = count;
1456 rx_npkts = rl_rxeof(sc);
1457 rl_txeof(sc);
1458
1459 if (!if_sendq_empty(ifp))
1460 rl_start_locked(ifp);
1461
1462 if (cmd == POLL_AND_CHECK_STATUS) {
1463 uint16_t status;
1464
1465 /* We should also check the status register. */
1466 status = CSR_READ_2(sc, RL_ISR);
1467 if (status == 0xffff)
1468 return (rx_npkts);
1469 if (status != 0)
1470 CSR_WRITE_2(sc, RL_ISR, status);
1471
1472 /* XXX We should check behaviour on receiver stalls. */
1473
1474 if (status & RL_ISR_SYSTEM_ERR) {
1475 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1476 rl_init_locked(sc);
1477 }
1478 }
1479 return (rx_npkts);
1480 }
1481 #endif /* DEVICE_POLLING */
1482
1483 static void
rl_intr(void * arg)1484 rl_intr(void *arg)
1485 {
1486 struct rl_softc *sc = arg;
1487 if_t ifp = sc->rl_ifp;
1488 uint16_t status;
1489 int count;
1490
1491 RL_LOCK(sc);
1492
1493 if (sc->suspended)
1494 goto done_locked;
1495
1496 #ifdef DEVICE_POLLING
1497 if (if_getcapenable(ifp) & IFCAP_POLLING)
1498 goto done_locked;
1499 #endif
1500
1501 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1502 goto done_locked2;
1503 status = CSR_READ_2(sc, RL_ISR);
1504 if (status == 0xffff || (status & RL_INTRS) == 0)
1505 goto done_locked;
1506 /*
1507 * Ours, disable further interrupts.
1508 */
1509 CSR_WRITE_2(sc, RL_IMR, 0);
1510 for (count = 16; count > 0; count--) {
1511 CSR_WRITE_2(sc, RL_ISR, status);
1512 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1513 if (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR))
1514 rl_rxeof(sc);
1515 if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR))
1516 rl_txeof(sc);
1517 if (status & RL_ISR_SYSTEM_ERR) {
1518 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1519 rl_init_locked(sc);
1520 RL_UNLOCK(sc);
1521 return;
1522 }
1523 }
1524 status = CSR_READ_2(sc, RL_ISR);
1525 /* If the card has gone away, the read returns 0xffff. */
1526 if (status == 0xffff || (status & RL_INTRS) == 0)
1527 break;
1528 }
1529
1530 if (!if_sendq_empty(ifp))
1531 rl_start_locked(ifp);
1532
1533 done_locked2:
1534 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1535 CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1536 done_locked:
1537 RL_UNLOCK(sc);
1538 }
1539
1540 /*
1541 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1542 * pointers to the fragment pointers.
1543 */
1544 static int
rl_encap(struct rl_softc * sc,struct mbuf ** m_head)1545 rl_encap(struct rl_softc *sc, struct mbuf **m_head)
1546 {
1547 struct mbuf *m;
1548 bus_dma_segment_t txsegs[1];
1549 int error, nsegs, padlen;
1550
1551 RL_LOCK_ASSERT(sc);
1552
1553 m = *m_head;
1554 padlen = 0;
1555 /*
1556 * Hardware doesn't auto-pad, so we have to make sure
1557 * pad short frames out to the minimum frame length.
1558 */
1559 if (m->m_pkthdr.len < RL_MIN_FRAMELEN)
1560 padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len;
1561 /*
1562 * The RealTek is brain damaged and wants longword-aligned
1563 * TX buffers, plus we can only have one fragment buffer
1564 * per packet. We have to copy pretty much all the time.
1565 */
1566 if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 ||
1567 (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) {
1568 m = m_defrag(*m_head, M_NOWAIT);
1569 if (m == NULL) {
1570 m_freem(*m_head);
1571 *m_head = NULL;
1572 return (ENOMEM);
1573 }
1574 }
1575 *m_head = m;
1576
1577 if (padlen > 0) {
1578 /*
1579 * Make security-conscious people happy: zero out the
1580 * bytes in the pad area, since we don't know what
1581 * this mbuf cluster buffer's previous user might
1582 * have left in it.
1583 */
1584 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1585 m->m_pkthdr.len += padlen;
1586 m->m_len = m->m_pkthdr.len;
1587 }
1588
1589 error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag,
1590 RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0);
1591 if (error != 0)
1592 return (error);
1593 if (nsegs == 0) {
1594 m_freem(*m_head);
1595 *m_head = NULL;
1596 return (EIO);
1597 }
1598
1599 RL_CUR_TXMBUF(sc) = m;
1600 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc),
1601 BUS_DMASYNC_PREWRITE);
1602 CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr));
1603
1604 return (0);
1605 }
1606
1607 /*
1608 * Main transmit routine.
1609 */
1610 static void
rl_start(if_t ifp)1611 rl_start(if_t ifp)
1612 {
1613 struct rl_softc *sc = if_getsoftc(ifp);
1614
1615 RL_LOCK(sc);
1616 rl_start_locked(ifp);
1617 RL_UNLOCK(sc);
1618 }
1619
1620 static void
rl_start_locked(if_t ifp)1621 rl_start_locked(if_t ifp)
1622 {
1623 struct rl_softc *sc = if_getsoftc(ifp);
1624 struct mbuf *m_head = NULL;
1625
1626 RL_LOCK_ASSERT(sc);
1627
1628 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1629 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
1630 return;
1631
1632 while (RL_CUR_TXMBUF(sc) == NULL) {
1633 m_head = if_dequeue(ifp);
1634
1635 if (m_head == NULL)
1636 break;
1637
1638 if (rl_encap(sc, &m_head)) {
1639 if (m_head == NULL)
1640 break;
1641 if_sendq_prepend(ifp, m_head);
1642 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1643 break;
1644 }
1645
1646 /* Pass a copy of this mbuf chain to the bpf subsystem. */
1647 BPF_MTAP(ifp, RL_CUR_TXMBUF(sc));
1648
1649 /* Transmit the frame. */
1650 CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc),
1651 RL_TXTHRESH(sc->rl_txthresh) |
1652 RL_CUR_TXMBUF(sc)->m_pkthdr.len);
1653
1654 RL_INC(sc->rl_cdata.cur_tx);
1655
1656 /* Set a timeout in case the chip goes out to lunch. */
1657 sc->rl_watchdog_timer = 5;
1658 }
1659
1660 /*
1661 * We broke out of the loop because all our TX slots are
1662 * full. Mark the NIC as busy until it drains some of the
1663 * packets from the queue.
1664 */
1665 if (RL_CUR_TXMBUF(sc) != NULL)
1666 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1667 }
1668
1669 static void
rl_init(void * xsc)1670 rl_init(void *xsc)
1671 {
1672 struct rl_softc *sc = xsc;
1673
1674 RL_LOCK(sc);
1675 rl_init_locked(sc);
1676 RL_UNLOCK(sc);
1677 }
1678
1679 static void
rl_init_locked(struct rl_softc * sc)1680 rl_init_locked(struct rl_softc *sc)
1681 {
1682 if_t ifp = sc->rl_ifp;
1683 struct mii_data *mii;
1684 uint32_t eaddr[2];
1685
1686 RL_LOCK_ASSERT(sc);
1687
1688 mii = device_get_softc(sc->rl_miibus);
1689
1690 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
1691 return;
1692
1693 /*
1694 * Cancel pending I/O and free all RX/TX buffers.
1695 */
1696 rl_stop(sc);
1697
1698 rl_reset(sc);
1699 if (sc->rl_twister_enable) {
1700 /*
1701 * Reset twister register tuning state. The twister
1702 * registers and their tuning are undocumented, but
1703 * are necessary to cope with bad links. rl_twister =
1704 * DONE here will disable this entirely.
1705 */
1706 sc->rl_twister = CHK_LINK;
1707 }
1708
1709 /*
1710 * Init our MAC address. Even though the chipset
1711 * documentation doesn't mention it, we need to enter "Config
1712 * register write enable" mode to modify the ID registers.
1713 */
1714 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
1715 bzero(eaddr, sizeof(eaddr));
1716 bcopy(if_getlladdr(sc->rl_ifp), eaddr, ETHER_ADDR_LEN);
1717 CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]);
1718 CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]);
1719 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1720
1721 /* Init the RX memory block pointer register. */
1722 CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr +
1723 RL_RX_8139_BUF_RESERVE);
1724 /* Init TX descriptors. */
1725 rl_list_tx_init(sc);
1726 /* Init Rx memory block. */
1727 rl_list_rx_init(sc);
1728
1729 /*
1730 * Enable transmit and receive.
1731 */
1732 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1733
1734 /*
1735 * Set the initial TX and RX configuration.
1736 */
1737 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1738 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
1739
1740 /* Set RX filter. */
1741 rl_rxfilter(sc);
1742
1743 #ifdef DEVICE_POLLING
1744 /* Disable interrupts if we are polling. */
1745 if (if_getcapenable(ifp) & IFCAP_POLLING)
1746 CSR_WRITE_2(sc, RL_IMR, 0);
1747 else
1748 #endif
1749 /* Enable interrupts. */
1750 CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1751
1752 /* Set initial TX threshold */
1753 sc->rl_txthresh = RL_TX_THRESH_INIT;
1754
1755 /* Start RX/TX process. */
1756 CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
1757
1758 /* Enable receiver and transmitter. */
1759 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1760
1761 sc->rl_flags &= ~RL_FLAG_LINK;
1762 mii_mediachg(mii);
1763
1764 CSR_WRITE_1(sc, sc->rl_cfg1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
1765
1766 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1767 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1768
1769 callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc);
1770 }
1771
1772 /*
1773 * Set media options.
1774 */
1775 static int
rl_ifmedia_upd(if_t ifp)1776 rl_ifmedia_upd(if_t ifp)
1777 {
1778 struct rl_softc *sc = if_getsoftc(ifp);
1779 struct mii_data *mii;
1780
1781 mii = device_get_softc(sc->rl_miibus);
1782
1783 RL_LOCK(sc);
1784 mii_mediachg(mii);
1785 RL_UNLOCK(sc);
1786
1787 return (0);
1788 }
1789
1790 /*
1791 * Report current media status.
1792 */
1793 static void
rl_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)1794 rl_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
1795 {
1796 struct rl_softc *sc = if_getsoftc(ifp);
1797 struct mii_data *mii;
1798
1799 mii = device_get_softc(sc->rl_miibus);
1800
1801 RL_LOCK(sc);
1802 mii_pollstat(mii);
1803 ifmr->ifm_active = mii->mii_media_active;
1804 ifmr->ifm_status = mii->mii_media_status;
1805 RL_UNLOCK(sc);
1806 }
1807
1808 static int
rl_ioctl(if_t ifp,u_long command,caddr_t data)1809 rl_ioctl(if_t ifp, u_long command, caddr_t data)
1810 {
1811 struct ifreq *ifr = (struct ifreq *)data;
1812 struct mii_data *mii;
1813 struct rl_softc *sc = if_getsoftc(ifp);
1814 int error = 0, mask;
1815
1816 switch (command) {
1817 case SIOCSIFFLAGS:
1818 RL_LOCK(sc);
1819 if (if_getflags(ifp) & IFF_UP) {
1820 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
1821 ((if_getflags(ifp) ^ sc->rl_if_flags) &
1822 (IFF_PROMISC | IFF_ALLMULTI)))
1823 rl_rxfilter(sc);
1824 else
1825 rl_init_locked(sc);
1826 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1827 rl_stop(sc);
1828 sc->rl_if_flags = if_getflags(ifp);
1829 RL_UNLOCK(sc);
1830 break;
1831 case SIOCADDMULTI:
1832 case SIOCDELMULTI:
1833 RL_LOCK(sc);
1834 rl_rxfilter(sc);
1835 RL_UNLOCK(sc);
1836 break;
1837 case SIOCGIFMEDIA:
1838 case SIOCSIFMEDIA:
1839 mii = device_get_softc(sc->rl_miibus);
1840 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1841 break;
1842 case SIOCSIFCAP:
1843 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1844 #ifdef DEVICE_POLLING
1845 if (ifr->ifr_reqcap & IFCAP_POLLING &&
1846 !(if_getcapenable(ifp) & IFCAP_POLLING)) {
1847 error = ether_poll_register(rl_poll, ifp);
1848 if (error)
1849 return(error);
1850 RL_LOCK(sc);
1851 /* Disable interrupts */
1852 CSR_WRITE_2(sc, RL_IMR, 0x0000);
1853 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1854 RL_UNLOCK(sc);
1855 return (error);
1856
1857 }
1858 if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1859 if_getcapenable(ifp) & IFCAP_POLLING) {
1860 error = ether_poll_deregister(ifp);
1861 /* Enable interrupts. */
1862 RL_LOCK(sc);
1863 CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1864 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1865 RL_UNLOCK(sc);
1866 return (error);
1867 }
1868 #endif /* DEVICE_POLLING */
1869 if ((mask & IFCAP_WOL) != 0 &&
1870 (if_getcapabilities(ifp) & IFCAP_WOL) != 0) {
1871 if ((mask & IFCAP_WOL_UCAST) != 0)
1872 if_togglecapenable(ifp, IFCAP_WOL_UCAST);
1873 if ((mask & IFCAP_WOL_MCAST) != 0)
1874 if_togglecapenable(ifp, IFCAP_WOL_MCAST);
1875 if ((mask & IFCAP_WOL_MAGIC) != 0)
1876 if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
1877 }
1878 break;
1879 default:
1880 error = ether_ioctl(ifp, command, data);
1881 break;
1882 }
1883
1884 return (error);
1885 }
1886
1887 static void
rl_watchdog(struct rl_softc * sc)1888 rl_watchdog(struct rl_softc *sc)
1889 {
1890
1891 RL_LOCK_ASSERT(sc);
1892
1893 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0)
1894 return;
1895
1896 device_printf(sc->rl_dev, "watchdog timeout\n");
1897 if_inc_counter(sc->rl_ifp, IFCOUNTER_OERRORS, 1);
1898
1899 rl_txeof(sc);
1900 rl_rxeof(sc);
1901 if_setdrvflagbits(sc->rl_ifp, 0, IFF_DRV_RUNNING);
1902 rl_init_locked(sc);
1903 }
1904
1905 /*
1906 * Stop the adapter and free any mbufs allocated to the
1907 * RX and TX lists.
1908 */
1909 static void
rl_stop(struct rl_softc * sc)1910 rl_stop(struct rl_softc *sc)
1911 {
1912 int i;
1913 if_t ifp = sc->rl_ifp;
1914
1915 RL_LOCK_ASSERT(sc);
1916
1917 sc->rl_watchdog_timer = 0;
1918 callout_stop(&sc->rl_stat_callout);
1919 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1920 sc->rl_flags &= ~RL_FLAG_LINK;
1921
1922 CSR_WRITE_1(sc, RL_COMMAND, 0x00);
1923 CSR_WRITE_2(sc, RL_IMR, 0x0000);
1924 for (i = 0; i < RL_TIMEOUT; i++) {
1925 DELAY(10);
1926 if ((CSR_READ_1(sc, RL_COMMAND) &
1927 (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0)
1928 break;
1929 }
1930 if (i == RL_TIMEOUT)
1931 device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n");
1932
1933 /*
1934 * Free the TX list buffers.
1935 */
1936 for (i = 0; i < RL_TX_LIST_CNT; i++) {
1937 if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
1938 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag,
1939 sc->rl_cdata.rl_tx_dmamap[i],
1940 BUS_DMASYNC_POSTWRITE);
1941 bus_dmamap_unload(sc->rl_cdata.rl_tx_tag,
1942 sc->rl_cdata.rl_tx_dmamap[i]);
1943 m_freem(sc->rl_cdata.rl_tx_chain[i]);
1944 sc->rl_cdata.rl_tx_chain[i] = NULL;
1945 CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)),
1946 0x0000000);
1947 }
1948 }
1949 }
1950
1951 /*
1952 * Device suspend routine. Stop the interface and save some PCI
1953 * settings in case the BIOS doesn't restore them properly on
1954 * resume.
1955 */
1956 static int
rl_suspend(device_t dev)1957 rl_suspend(device_t dev)
1958 {
1959 struct rl_softc *sc;
1960
1961 sc = device_get_softc(dev);
1962
1963 RL_LOCK(sc);
1964 rl_stop(sc);
1965 rl_setwol(sc);
1966 sc->suspended = 1;
1967 RL_UNLOCK(sc);
1968
1969 return (0);
1970 }
1971
1972 /*
1973 * Device resume routine. Restore some PCI settings in case the BIOS
1974 * doesn't, re-enable busmastering, and restart the interface if
1975 * appropriate.
1976 */
1977 static int
rl_resume(device_t dev)1978 rl_resume(device_t dev)
1979 {
1980 struct rl_softc *sc;
1981 if_t ifp;
1982 int pmc;
1983 uint16_t pmstat;
1984
1985 sc = device_get_softc(dev);
1986 ifp = sc->rl_ifp;
1987
1988 RL_LOCK(sc);
1989
1990 if ((if_getcapabilities(ifp) & IFCAP_WOL) != 0 &&
1991 pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
1992 /* Disable PME and clear PME status. */
1993 pmstat = pci_read_config(sc->rl_dev,
1994 pmc + PCIR_POWER_STATUS, 2);
1995 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1996 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1997 pci_write_config(sc->rl_dev,
1998 pmc + PCIR_POWER_STATUS, pmstat, 2);
1999 }
2000 /*
2001 * Clear WOL matching such that normal Rx filtering
2002 * wouldn't interfere with WOL patterns.
2003 */
2004 rl_clrwol(sc);
2005 }
2006
2007 /* reinitialize interface if necessary */
2008 if (if_getflags(ifp) & IFF_UP)
2009 rl_init_locked(sc);
2010
2011 sc->suspended = 0;
2012
2013 RL_UNLOCK(sc);
2014
2015 return (0);
2016 }
2017
2018 /*
2019 * Stop all chip I/O so that the kernel's probe routines don't
2020 * get confused by errant DMAs when rebooting.
2021 */
2022 static int
rl_shutdown(device_t dev)2023 rl_shutdown(device_t dev)
2024 {
2025 struct rl_softc *sc;
2026
2027 sc = device_get_softc(dev);
2028
2029 RL_LOCK(sc);
2030 rl_stop(sc);
2031 /*
2032 * Mark interface as down since otherwise we will panic if
2033 * interrupt comes in later on, which can happen in some
2034 * cases.
2035 */
2036 if_setflagbits(sc->rl_ifp, 0, IFF_UP);
2037 rl_setwol(sc);
2038 RL_UNLOCK(sc);
2039
2040 return (0);
2041 }
2042
2043 static void
rl_setwol(struct rl_softc * sc)2044 rl_setwol(struct rl_softc *sc)
2045 {
2046 if_t ifp;
2047 int pmc;
2048 uint16_t pmstat;
2049 uint8_t v;
2050
2051 RL_LOCK_ASSERT(sc);
2052
2053 ifp = sc->rl_ifp;
2054 if ((if_getcapabilities(ifp) & IFCAP_WOL) == 0)
2055 return;
2056 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
2057 return;
2058
2059 /* Enable config register write. */
2060 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2061
2062 /* Enable PME. */
2063 v = CSR_READ_1(sc, sc->rl_cfg1);
2064 v &= ~RL_CFG1_PME;
2065 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
2066 v |= RL_CFG1_PME;
2067 CSR_WRITE_1(sc, sc->rl_cfg1, v);
2068
2069 v = CSR_READ_1(sc, sc->rl_cfg3);
2070 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2071 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
2072 v |= RL_CFG3_WOL_MAGIC;
2073 CSR_WRITE_1(sc, sc->rl_cfg3, v);
2074
2075 v = CSR_READ_1(sc, sc->rl_cfg5);
2076 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2077 v &= ~RL_CFG5_WOL_LANWAKE;
2078 if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0)
2079 v |= RL_CFG5_WOL_UCAST;
2080 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0)
2081 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
2082 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
2083 v |= RL_CFG5_WOL_LANWAKE;
2084 CSR_WRITE_1(sc, sc->rl_cfg5, v);
2085
2086 /* Config register write done. */
2087 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2088
2089 /* Request PME if WOL is requested. */
2090 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
2091 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2092 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
2093 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2094 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2095 }
2096
2097 static void
rl_clrwol(struct rl_softc * sc)2098 rl_clrwol(struct rl_softc *sc)
2099 {
2100 if_t ifp;
2101 uint8_t v;
2102
2103 ifp = sc->rl_ifp;
2104 if ((if_getcapabilities(ifp) & IFCAP_WOL) == 0)
2105 return;
2106
2107 /* Enable config register write. */
2108 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2109
2110 v = CSR_READ_1(sc, sc->rl_cfg3);
2111 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2112 CSR_WRITE_1(sc, sc->rl_cfg3, v);
2113
2114 /* Config register write done. */
2115 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2116
2117 v = CSR_READ_1(sc, sc->rl_cfg5);
2118 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2119 v &= ~RL_CFG5_WOL_LANWAKE;
2120 CSR_WRITE_1(sc, sc->rl_cfg5, v);
2121 }
2122