1 /* $OpenBSD: dc.c,v 1.159 2024/11/05 18:58:59 miod Exp $ */
2
3 /*
4 * Copyright (c) 1997, 1998, 1999
5 * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Bill Paul.
18 * 4. Neither the name of the author nor the names of any co-contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: src/sys/pci/if_dc.c,v 1.43 2001/01/19 23:55:07 wpaul Exp $
35 */
36
37 /*
38 * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143
39 * series chips and several workalikes including the following:
40 *
41 * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com)
42 * Macronix/Lite-On 82c115 PNIC II (www.macronix.com)
43 * Lite-On 82c168/82c169 PNIC (www.litecom.com)
44 * ASIX Electronics AX88140A (www.asix.com.tw)
45 * ASIX Electronics AX88141 (www.asix.com.tw)
46 * ADMtek AL981 (www.admtek.com.tw)
47 * ADMtek AN983 (www.admtek.com.tw)
48 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com)
49 * Accton EN1217, EN2242 (www.accton.com)
50 * Xircom X3201 (www.xircom.com)
51 *
52 * Datasheets for the 21143 are available at developer.intel.com.
53 * Datasheets for the clone parts can be found at their respective sites.
54 * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.)
55 * The PNIC II is essentially a Macronix 98715A chip; the only difference
56 * worth noting is that its multicast hash table is only 128 bits wide
57 * instead of 512.
58 *
59 * Written by Bill Paul <wpaul@ee.columbia.edu>
60 * Electrical Engineering Department
61 * Columbia University, New York City
62 */
63
64 /*
65 * The Intel 21143 is the successor to the DEC 21140. It is basically
66 * the same as the 21140 but with a few new features. The 21143 supports
67 * three kinds of media attachments:
68 *
69 * o MII port, for 10Mbps and 100Mbps support and NWAY
70 * autonegotiation provided by an external PHY.
71 * o SYM port, for symbol mode 100Mbps support.
72 * o 10baseT port.
73 * o AUI/BNC port.
74 *
75 * The 100Mbps SYM port and 10baseT port can be used together in
76 * combination with the internal NWAY support to create a 10/100
77 * autosensing configuration.
78 *
79 * Note that not all tulip workalikes are handled in this driver: we only
80 * deal with those which are relatively well behaved. The Winbond is
81 * handled separately due to its different register offsets and the
82 * special handling needed for its various bugs. The PNIC is handled
83 * here, but I'm not thrilled about it.
84 *
85 * All of the workalike chips use some form of MII transceiver support
86 * with the exception of the Macronix chips, which also have a SYM port.
87 * The ASIX AX88140A is also documented to have a SYM port, but all
88 * the cards I've seen use an MII transceiver, probably because the
89 * AX88140A doesn't support internal NWAY.
90 */
91
92 #include "bpfilter.h"
93
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/mbuf.h>
97 #include <sys/socket.h>
98 #include <sys/ioctl.h>
99 #include <sys/errno.h>
100 #include <sys/malloc.h>
101 #include <sys/kernel.h>
102 #include <sys/device.h>
103 #include <sys/timeout.h>
104
105 #include <net/if.h>
106
107 #include <netinet/in.h>
108 #include <netinet/if_ether.h>
109
110 #include <net/if_media.h>
111
112 #if NBPFILTER > 0
113 #include <net/bpf.h>
114 #endif
115
116 #include <dev/mii/mii.h>
117 #include <dev/mii/miivar.h>
118
119 #include <machine/bus.h>
120 #include <dev/pci/pcidevs.h>
121
122 #include <dev/ic/dcreg.h>
123
124 /*
125 * The Davicom DM9102 has a broken DMA engine that reads beyond the
126 * end of the programmed transfer. Architectures with a proper IOMMU
127 * (such as sparc64) will trap on this access. To avoid having to
128 * copy each transmitted mbuf to guarantee enough trailing space,
129 * those architectures should implement BUS_DMA_OVERRUN that takes
130 * appropriate action to tolerate this behaviour.
131 */
132 #ifndef BUS_DMA_OVERRUN
133 #define BUS_DMA_OVERRUN 0
134 #endif
135
136 int dc_intr(void *);
137 int dc_newbuf(struct dc_softc *, int, struct mbuf *);
138 int dc_encap(struct dc_softc *, bus_dmamap_t, struct mbuf *, u_int32_t *);
139
140 void dc_pnic_rx_bug_war(struct dc_softc *, int);
141 int dc_rx_resync(struct dc_softc *);
142 int dc_rxeof(struct dc_softc *);
143 void dc_txeof(struct dc_softc *);
144 void dc_tick(void *);
145 void dc_tx_underrun(struct dc_softc *);
146 void dc_start(struct ifnet *);
147 int dc_ioctl(struct ifnet *, u_long, caddr_t);
148 void dc_watchdog(struct ifnet *);
149 int dc_ifmedia_upd(struct ifnet *);
150 void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *);
151
152 void dc_delay(struct dc_softc *);
153 void dc_eeprom_width(struct dc_softc *);
154 void dc_eeprom_idle(struct dc_softc *);
155 void dc_eeprom_putbyte(struct dc_softc *, int);
156 void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *);
157 void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *);
158 void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *);
159 void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int);
160
161 void dc_mii_writebit(struct dc_softc *, int);
162 int dc_mii_readbit(struct dc_softc *);
163 void dc_mii_sync(struct dc_softc *);
164 void dc_mii_send(struct dc_softc *, u_int32_t, int);
165 int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *);
166 int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *);
167 int dc_miibus_readreg(struct device *, int, int);
168 void dc_miibus_writereg(struct device *, int, int, int);
169 void dc_miibus_statchg(struct device *);
170
171 void dc_setcfg(struct dc_softc *, uint64_t);
172 u_int32_t dc_crc_le(struct dc_softc *, caddr_t);
173 u_int32_t dc_crc_be(caddr_t);
174 void dc_setfilt_21143(struct dc_softc *);
175 void dc_setfilt_asix(struct dc_softc *);
176 void dc_setfilt_admtek(struct dc_softc *);
177 void dc_setfilt_xircom(struct dc_softc *);
178
179 void dc_setfilt(struct dc_softc *);
180
181 void dc_reset(struct dc_softc *);
182 int dc_list_rx_init(struct dc_softc *);
183 int dc_list_tx_init(struct dc_softc *);
184
185 void dc_read_srom(struct dc_softc *, int);
186 void dc_parse_21143_srom(struct dc_softc *);
187 void dc_decode_leaf_sia(struct dc_softc *,
188 struct dc_eblock_sia *);
189 void dc_decode_leaf_mii(struct dc_softc *,
190 struct dc_eblock_mii *);
191 void dc_decode_leaf_sym(struct dc_softc *,
192 struct dc_eblock_sym *);
193 void dc_apply_fixup(struct dc_softc *, uint64_t);
194
195 #define DC_SETBIT(sc, reg, x) \
196 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
197
198 #define DC_CLRBIT(sc, reg, x) \
199 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
200
201 #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x))
202 #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x))
203
204 void
dc_delay(struct dc_softc * sc)205 dc_delay(struct dc_softc *sc)
206 {
207 int idx;
208
209 for (idx = (300 / 33) + 1; idx > 0; idx--)
210 CSR_READ_4(sc, DC_BUSCTL);
211 }
212
213 void
dc_eeprom_width(struct dc_softc * sc)214 dc_eeprom_width(struct dc_softc *sc)
215 {
216 int i;
217
218 /* Force EEPROM to idle state. */
219 dc_eeprom_idle(sc);
220
221 /* Enter EEPROM access mode. */
222 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
223 dc_delay(sc);
224 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
225 dc_delay(sc);
226 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
227 dc_delay(sc);
228 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
229 dc_delay(sc);
230
231 for (i = 3; i--;) {
232 if (6 & (1 << i))
233 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
234 else
235 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
236 dc_delay(sc);
237 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
238 dc_delay(sc);
239 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
240 dc_delay(sc);
241 }
242
243 for (i = 1; i <= 12; i++) {
244 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
245 dc_delay(sc);
246 if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) {
247 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
248 dc_delay(sc);
249 break;
250 }
251 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
252 dc_delay(sc);
253 }
254
255 /* Turn off EEPROM access mode. */
256 dc_eeprom_idle(sc);
257
258 if (i < 4 || i > 12)
259 sc->dc_romwidth = 6;
260 else
261 sc->dc_romwidth = i;
262
263 /* Enter EEPROM access mode. */
264 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
265 dc_delay(sc);
266 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
267 dc_delay(sc);
268 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
269 dc_delay(sc);
270 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
271 dc_delay(sc);
272
273 /* Turn off EEPROM access mode. */
274 dc_eeprom_idle(sc);
275 }
276
277 void
dc_eeprom_idle(struct dc_softc * sc)278 dc_eeprom_idle(struct dc_softc *sc)
279 {
280 int i;
281
282 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
283 dc_delay(sc);
284 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
285 dc_delay(sc);
286 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
287 dc_delay(sc);
288 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
289 dc_delay(sc);
290
291 for (i = 0; i < 25; i++) {
292 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
293 dc_delay(sc);
294 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
295 dc_delay(sc);
296 }
297
298 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
299 dc_delay(sc);
300 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS);
301 dc_delay(sc);
302 CSR_WRITE_4(sc, DC_SIO, 0x00000000);
303 }
304
305 /*
306 * Send a read command and address to the EEPROM, check for ACK.
307 */
308 void
dc_eeprom_putbyte(struct dc_softc * sc,int addr)309 dc_eeprom_putbyte(struct dc_softc *sc, int addr)
310 {
311 int d, i;
312
313 d = DC_EECMD_READ >> 6;
314
315 for (i = 3; i--; ) {
316 if (d & (1 << i))
317 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
318 else
319 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN);
320 dc_delay(sc);
321 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK);
322 dc_delay(sc);
323 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
324 dc_delay(sc);
325 }
326
327 /*
328 * Feed in each bit and strobe the clock.
329 */
330 for (i = sc->dc_romwidth; i--;) {
331 if (addr & (1 << i)) {
332 SIO_SET(DC_SIO_EE_DATAIN);
333 } else {
334 SIO_CLR(DC_SIO_EE_DATAIN);
335 }
336 dc_delay(sc);
337 SIO_SET(DC_SIO_EE_CLK);
338 dc_delay(sc);
339 SIO_CLR(DC_SIO_EE_CLK);
340 dc_delay(sc);
341 }
342 }
343
344 /*
345 * Read a word of data stored in the EEPROM at address 'addr.'
346 * The PNIC 82c168/82c169 has its own non-standard way to read
347 * the EEPROM.
348 */
349 void
dc_eeprom_getword_pnic(struct dc_softc * sc,int addr,u_int16_t * dest)350 dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest)
351 {
352 int i;
353 u_int32_t r;
354
355 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr);
356
357 for (i = 0; i < DC_TIMEOUT; i++) {
358 DELAY(1);
359 r = CSR_READ_4(sc, DC_SIO);
360 if (!(r & DC_PN_SIOCTL_BUSY)) {
361 *dest = (u_int16_t)(r & 0xFFFF);
362 return;
363 }
364 }
365 }
366
367 /*
368 * Read a word of data stored in the EEPROM at address 'addr.'
369 * The Xircom X3201 has its own non-standard way to read
370 * the EEPROM, too.
371 */
372 void
dc_eeprom_getword_xircom(struct dc_softc * sc,int addr,u_int16_t * dest)373 dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest)
374 {
375 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
376
377 addr *= 2;
378 CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
379 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff;
380 addr += 1;
381 CSR_WRITE_4(sc, DC_ROM, addr | 0x160);
382 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8;
383
384 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ);
385 }
386
387 /*
388 * Read a word of data stored in the EEPROM at address 'addr.'
389 */
390 void
dc_eeprom_getword(struct dc_softc * sc,int addr,u_int16_t * dest)391 dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest)
392 {
393 int i;
394 u_int16_t word = 0;
395
396 /* Force EEPROM to idle state. */
397 dc_eeprom_idle(sc);
398
399 /* Enter EEPROM access mode. */
400 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL);
401 dc_delay(sc);
402 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ);
403 dc_delay(sc);
404 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK);
405 dc_delay(sc);
406 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS);
407 dc_delay(sc);
408
409 /*
410 * Send address of word we want to read.
411 */
412 dc_eeprom_putbyte(sc, addr);
413
414 /*
415 * Start reading bits from EEPROM.
416 */
417 for (i = 0x8000; i; i >>= 1) {
418 SIO_SET(DC_SIO_EE_CLK);
419 dc_delay(sc);
420 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)
421 word |= i;
422 dc_delay(sc);
423 SIO_CLR(DC_SIO_EE_CLK);
424 dc_delay(sc);
425 }
426
427 /* Turn off EEPROM access mode. */
428 dc_eeprom_idle(sc);
429
430 *dest = word;
431 }
432
433 /*
434 * Read a sequence of words from the EEPROM.
435 */
436 void
dc_read_eeprom(struct dc_softc * sc,caddr_t dest,int off,int cnt,int swap)437 dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt,
438 int swap)
439 {
440 int i;
441 u_int16_t word = 0, *ptr;
442
443 for (i = 0; i < cnt; i++) {
444 if (DC_IS_PNIC(sc))
445 dc_eeprom_getword_pnic(sc, off + i, &word);
446 else if (DC_IS_XIRCOM(sc))
447 dc_eeprom_getword_xircom(sc, off + i, &word);
448 else
449 dc_eeprom_getword(sc, off + i, &word);
450 ptr = (u_int16_t *)(dest + (i * 2));
451 if (swap)
452 *ptr = betoh16(word);
453 else
454 *ptr = letoh16(word);
455 }
456 }
457
458 /*
459 * The following two routines are taken from the Macronix 98713
460 * Application Notes pp.19-21.
461 */
462 /*
463 * Write a bit to the MII bus.
464 */
465 void
dc_mii_writebit(struct dc_softc * sc,int bit)466 dc_mii_writebit(struct dc_softc *sc, int bit)
467 {
468 if (bit)
469 CSR_WRITE_4(sc, DC_SIO,
470 DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT);
471 else
472 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
473
474 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
475 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
476 }
477
478 /*
479 * Read a bit from the MII bus.
480 */
481 int
dc_mii_readbit(struct dc_softc * sc)482 dc_mii_readbit(struct dc_softc *sc)
483 {
484 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR);
485 CSR_READ_4(sc, DC_SIO);
486 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK);
487 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK);
488 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN)
489 return (1);
490 return (0);
491 }
492
493 /*
494 * Sync the PHYs by setting data bit and strobing the clock 32 times.
495 */
496 void
dc_mii_sync(struct dc_softc * sc)497 dc_mii_sync(struct dc_softc *sc)
498 {
499 int i;
500
501 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE);
502
503 for (i = 0; i < 32; i++)
504 dc_mii_writebit(sc, 1);
505 }
506
507 /*
508 * Clock a series of bits through the MII.
509 */
510 void
dc_mii_send(struct dc_softc * sc,u_int32_t bits,int cnt)511 dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt)
512 {
513 int i;
514
515 for (i = (0x1 << (cnt - 1)); i; i >>= 1)
516 dc_mii_writebit(sc, bits & i);
517 }
518
519 /*
520 * Read an PHY register through the MII.
521 */
522 int
dc_mii_readreg(struct dc_softc * sc,struct dc_mii_frame * frame)523 dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame)
524 {
525 int i, ack, s;
526
527 s = splnet();
528
529 /*
530 * Set up frame for RX.
531 */
532 frame->mii_stdelim = DC_MII_STARTDELIM;
533 frame->mii_opcode = DC_MII_READOP;
534 frame->mii_turnaround = 0;
535 frame->mii_data = 0;
536
537 /*
538 * Sync the PHYs.
539 */
540 dc_mii_sync(sc);
541
542 /*
543 * Send command/address info.
544 */
545 dc_mii_send(sc, frame->mii_stdelim, 2);
546 dc_mii_send(sc, frame->mii_opcode, 2);
547 dc_mii_send(sc, frame->mii_phyaddr, 5);
548 dc_mii_send(sc, frame->mii_regaddr, 5);
549
550 #ifdef notdef
551 /* Idle bit */
552 dc_mii_writebit(sc, 1);
553 dc_mii_writebit(sc, 0);
554 #endif
555
556 /* Check for ack */
557 ack = dc_mii_readbit(sc);
558
559 /*
560 * Now try reading data bits. If the ack failed, we still
561 * need to clock through 16 cycles to keep the PHY(s) in sync.
562 */
563 if (ack) {
564 for(i = 0; i < 16; i++) {
565 dc_mii_readbit(sc);
566 }
567 goto fail;
568 }
569
570 for (i = 0x8000; i; i >>= 1) {
571 if (!ack) {
572 if (dc_mii_readbit(sc))
573 frame->mii_data |= i;
574 }
575 }
576
577 fail:
578
579 dc_mii_writebit(sc, 0);
580 dc_mii_writebit(sc, 0);
581
582 splx(s);
583
584 if (ack)
585 return (1);
586 return (0);
587 }
588
589 /*
590 * Write to a PHY register through the MII.
591 */
592 int
dc_mii_writereg(struct dc_softc * sc,struct dc_mii_frame * frame)593 dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame)
594 {
595 int s;
596
597 s = splnet();
598 /*
599 * Set up frame for TX.
600 */
601
602 frame->mii_stdelim = DC_MII_STARTDELIM;
603 frame->mii_opcode = DC_MII_WRITEOP;
604 frame->mii_turnaround = DC_MII_TURNAROUND;
605
606 /*
607 * Sync the PHYs.
608 */
609 dc_mii_sync(sc);
610
611 dc_mii_send(sc, frame->mii_stdelim, 2);
612 dc_mii_send(sc, frame->mii_opcode, 2);
613 dc_mii_send(sc, frame->mii_phyaddr, 5);
614 dc_mii_send(sc, frame->mii_regaddr, 5);
615 dc_mii_send(sc, frame->mii_turnaround, 2);
616 dc_mii_send(sc, frame->mii_data, 16);
617
618 /* Idle bit. */
619 dc_mii_writebit(sc, 0);
620 dc_mii_writebit(sc, 0);
621
622 splx(s);
623 return (0);
624 }
625
626 int
dc_miibus_readreg(struct device * self,int phy,int reg)627 dc_miibus_readreg(struct device *self, int phy, int reg)
628 {
629 struct dc_mii_frame frame;
630 struct dc_softc *sc = (struct dc_softc *)self;
631 int i, rval, phy_reg;
632
633 /*
634 * Note: both the AL981 and AN983 have internal PHYs,
635 * however the AL981 provides direct access to the PHY
636 * registers while the AN983 uses a serial MII interface.
637 * The AN983's MII interface is also buggy in that you
638 * can read from any MII address (0 to 31), but only address 1
639 * behaves normally. To deal with both cases, we pretend
640 * that the PHY is at MII address 1.
641 */
642 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
643 return (0);
644
645 /*
646 * Note: the ukphy probs of the RS7112 report a PHY at
647 * MII address 0 (possibly HomePNA?) and 1 (ethernet)
648 * so we only respond to correct one.
649 */
650 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
651 return (0);
652
653 if (sc->dc_pmode != DC_PMODE_MII) {
654 if (phy == (MII_NPHY - 1)) {
655 switch(reg) {
656 case MII_BMSR:
657 /*
658 * Fake something to make the probe
659 * code think there's a PHY here.
660 */
661 return (BMSR_MEDIAMASK);
662 break;
663 case MII_PHYIDR1:
664 if (DC_IS_PNIC(sc))
665 return (PCI_VENDOR_LITEON);
666 return (PCI_VENDOR_DEC);
667 break;
668 case MII_PHYIDR2:
669 if (DC_IS_PNIC(sc))
670 return (PCI_PRODUCT_LITEON_PNIC);
671 return (PCI_PRODUCT_DEC_21142);
672 break;
673 default:
674 return (0);
675 break;
676 }
677 } else
678 return (0);
679 }
680
681 if (DC_IS_PNIC(sc)) {
682 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ |
683 (phy << 23) | (reg << 18));
684 for (i = 0; i < DC_TIMEOUT; i++) {
685 DELAY(1);
686 rval = CSR_READ_4(sc, DC_PN_MII);
687 if (!(rval & DC_PN_MII_BUSY)) {
688 rval &= 0xFFFF;
689 return (rval == 0xFFFF ? 0 : rval);
690 }
691 }
692 return (0);
693 }
694
695 if (DC_IS_COMET(sc)) {
696 switch(reg) {
697 case MII_BMCR:
698 phy_reg = DC_AL_BMCR;
699 break;
700 case MII_BMSR:
701 phy_reg = DC_AL_BMSR;
702 break;
703 case MII_PHYIDR1:
704 phy_reg = DC_AL_VENID;
705 break;
706 case MII_PHYIDR2:
707 phy_reg = DC_AL_DEVID;
708 break;
709 case MII_ANAR:
710 phy_reg = DC_AL_ANAR;
711 break;
712 case MII_ANLPAR:
713 phy_reg = DC_AL_LPAR;
714 break;
715 case MII_ANER:
716 phy_reg = DC_AL_ANER;
717 break;
718 default:
719 printf("%s: phy_read: bad phy register %x\n",
720 sc->sc_dev.dv_xname, reg);
721 return (0);
722 break;
723 }
724
725 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF;
726
727 if (rval == 0xFFFF)
728 return (0);
729 return (rval);
730 }
731
732 bzero(&frame, sizeof(frame));
733
734 frame.mii_phyaddr = phy;
735 frame.mii_regaddr = reg;
736 if (sc->dc_type == DC_TYPE_98713) {
737 phy_reg = CSR_READ_4(sc, DC_NETCFG);
738 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
739 }
740 dc_mii_readreg(sc, &frame);
741 if (sc->dc_type == DC_TYPE_98713)
742 CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
743
744 return (frame.mii_data);
745 }
746
747 void
dc_miibus_writereg(struct device * self,int phy,int reg,int data)748 dc_miibus_writereg(struct device *self, int phy, int reg, int data)
749 {
750 struct dc_softc *sc = (struct dc_softc *)self;
751 struct dc_mii_frame frame;
752 int i, phy_reg;
753
754 bzero(&frame, sizeof(frame));
755
756 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR)
757 return;
758 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR)
759 return;
760
761 if (DC_IS_PNIC(sc)) {
762 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE |
763 (phy << 23) | (reg << 10) | data);
764 for (i = 0; i < DC_TIMEOUT; i++) {
765 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY))
766 break;
767 }
768 return;
769 }
770
771 if (DC_IS_COMET(sc)) {
772 switch(reg) {
773 case MII_BMCR:
774 phy_reg = DC_AL_BMCR;
775 break;
776 case MII_BMSR:
777 phy_reg = DC_AL_BMSR;
778 break;
779 case MII_PHYIDR1:
780 phy_reg = DC_AL_VENID;
781 break;
782 case MII_PHYIDR2:
783 phy_reg = DC_AL_DEVID;
784 break;
785 case MII_ANAR:
786 phy_reg = DC_AL_ANAR;
787 break;
788 case MII_ANLPAR:
789 phy_reg = DC_AL_LPAR;
790 break;
791 case MII_ANER:
792 phy_reg = DC_AL_ANER;
793 break;
794 default:
795 printf("%s: phy_write: bad phy register %x\n",
796 sc->sc_dev.dv_xname, reg);
797 return;
798 }
799
800 CSR_WRITE_4(sc, phy_reg, data);
801 return;
802 }
803
804 frame.mii_phyaddr = phy;
805 frame.mii_regaddr = reg;
806 frame.mii_data = data;
807
808 if (sc->dc_type == DC_TYPE_98713) {
809 phy_reg = CSR_READ_4(sc, DC_NETCFG);
810 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL);
811 }
812 dc_mii_writereg(sc, &frame);
813 if (sc->dc_type == DC_TYPE_98713)
814 CSR_WRITE_4(sc, DC_NETCFG, phy_reg);
815 }
816
817 void
dc_miibus_statchg(struct device * self)818 dc_miibus_statchg(struct device *self)
819 {
820 struct dc_softc *sc = (struct dc_softc *)self;
821 struct mii_data *mii;
822 struct ifmedia *ifm;
823
824 if (DC_IS_ADMTEK(sc))
825 return;
826
827 mii = &sc->sc_mii;
828 ifm = &mii->mii_media;
829 if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
830 dc_setcfg(sc, ifm->ifm_media);
831 sc->dc_if_media = ifm->ifm_media;
832 } else {
833 dc_setcfg(sc, mii->mii_media_active);
834 sc->dc_if_media = mii->mii_media_active;
835 }
836 }
837
838 #define DC_BITS_512 9
839 #define DC_BITS_128 7
840 #define DC_BITS_64 6
841
842 u_int32_t
dc_crc_le(struct dc_softc * sc,caddr_t addr)843 dc_crc_le(struct dc_softc *sc, caddr_t addr)
844 {
845 u_int32_t crc;
846
847 /* Compute CRC for the address value. */
848 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
849
850 /*
851 * The hash table on the PNIC II and the MX98715AEC-C/D/E
852 * chips is only 128 bits wide.
853 */
854 if (sc->dc_flags & DC_128BIT_HASH)
855 return (crc & ((1 << DC_BITS_128) - 1));
856
857 /* The hash table on the MX98715BEC is only 64 bits wide. */
858 if (sc->dc_flags & DC_64BIT_HASH)
859 return (crc & ((1 << DC_BITS_64) - 1));
860
861 /* Xircom's hash filtering table is different (read: weird) */
862 /* Xircom uses the LEAST significant bits */
863 if (DC_IS_XIRCOM(sc)) {
864 if ((crc & 0x180) == 0x180)
865 return (crc & 0x0F) + (crc & 0x70)*3 + (14 << 4);
866 else
867 return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4);
868 }
869
870 return (crc & ((1 << DC_BITS_512) - 1));
871 }
872
873 /*
874 * Calculate CRC of a multicast group address, return the lower 6 bits.
875 */
876 #define dc_crc_be(addr) ((ether_crc32_be(addr,ETHER_ADDR_LEN) >> 26) \
877 & 0x0000003F)
878
879 /*
880 * 21143-style RX filter setup routine. Filter programming is done by
881 * downloading a special setup frame into the TX engine. 21143, Macronix,
882 * PNIC, PNIC II and Davicom chips are programmed this way.
883 *
884 * We always program the chip using 'hash perfect' mode, i.e. one perfect
885 * address (our node address) and a 512-bit hash filter for multicast
886 * frames. We also sneak the broadcast address into the hash filter since
887 * we need that too.
888 */
889 void
dc_setfilt_21143(struct dc_softc * sc)890 dc_setfilt_21143(struct dc_softc *sc)
891 {
892 struct arpcom *ac = &sc->sc_arpcom;
893 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
894 struct ether_multi *enm;
895 struct ether_multistep step;
896 struct dc_desc *sframe;
897 u_int32_t h, *sp;
898 int i;
899
900 i = sc->dc_cdata.dc_tx_prod;
901 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
902 sc->dc_cdata.dc_tx_cnt++;
903 sframe = &sc->dc_ldata->dc_tx_list[i];
904 sp = &sc->dc_ldata->dc_sbuf[0];
905 bzero(sp, DC_SFRAME_LEN);
906
907 sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
908 offsetof(struct dc_list_data, dc_sbuf));
909 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
910 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
911
912 sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
913 (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
914
915 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC));
916 ifp->if_flags &= ~IFF_ALLMULTI;
917
918 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
919 ifp->if_flags |= IFF_ALLMULTI;
920 if (ifp->if_flags & IFF_PROMISC)
921 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
922 else
923 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
924 } else {
925 ETHER_FIRST_MULTI(step, ac, enm);
926 while (enm != NULL) {
927 h = dc_crc_le(sc, enm->enm_addrlo);
928
929 sp[h >> 4] |= htole32(1 << (h & 0xF));
930
931 ETHER_NEXT_MULTI(step, enm);
932 }
933 }
934
935 /*
936 * Always accept broadcast frames.
937 */
938 h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr);
939 sp[h >> 4] |= htole32(1 << (h & 0xF));
940
941 /* Set our MAC address */
942 sp[39] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
943 sp[40] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
944 sp[41] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
945
946 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
947 offsetof(struct dc_list_data, dc_sbuf[0]),
948 sizeof(struct dc_list_data) -
949 offsetof(struct dc_list_data, dc_sbuf[0]),
950 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
951
952 sframe->dc_status = htole32(DC_TXSTAT_OWN);
953
954 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
955 offsetof(struct dc_list_data, dc_tx_list[i]),
956 sizeof(struct dc_desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
957
958 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
959
960 /*
961 * The PNIC takes an exceedingly long time to process its
962 * setup frame; wait 10ms after posting the setup frame
963 * before proceeding, just so it has time to swallow its
964 * medicine.
965 */
966 DELAY(10000);
967
968 ifp->if_timer = 5;
969 }
970
971 void
dc_setfilt_admtek(struct dc_softc * sc)972 dc_setfilt_admtek(struct dc_softc *sc)
973 {
974 struct arpcom *ac = &sc->sc_arpcom;
975 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
976 struct ether_multi *enm;
977 struct ether_multistep step;
978 u_int32_t hashes[2];
979 int h = 0;
980
981 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC));
982 bzero(hashes, sizeof(hashes));
983 ifp->if_flags &= ~IFF_ALLMULTI;
984
985 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
986 ifp->if_flags |= IFF_ALLMULTI;
987 if (ifp->if_flags & IFF_PROMISC)
988 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
989 else
990 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
991 } else {
992 /* now program new ones */
993 ETHER_FIRST_MULTI(step, ac, enm);
994 while (enm != NULL) {
995 if (DC_IS_CENTAUR(sc))
996 h = dc_crc_le(sc, enm->enm_addrlo);
997 else
998 h = dc_crc_be(enm->enm_addrlo);
999
1000 if (h < 32)
1001 hashes[0] |= (1 << h);
1002 else
1003 hashes[1] |= (1 << (h - 32));
1004
1005 ETHER_NEXT_MULTI(step, enm);
1006 }
1007 }
1008
1009 /* Init our MAC address */
1010 CSR_WRITE_4(sc, DC_AL_PAR0, ac->ac_enaddr[3] << 24 |
1011 ac->ac_enaddr[2] << 16 | ac->ac_enaddr[1] << 8 | ac->ac_enaddr[0]);
1012 CSR_WRITE_4(sc, DC_AL_PAR1, ac->ac_enaddr[5] << 8 | ac->ac_enaddr[4]);
1013
1014 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]);
1015 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]);
1016 }
1017
1018 void
dc_setfilt_asix(struct dc_softc * sc)1019 dc_setfilt_asix(struct dc_softc *sc)
1020 {
1021 struct arpcom *ac = &sc->sc_arpcom;
1022 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1023 struct ether_multi *enm;
1024 struct ether_multistep step;
1025 u_int32_t hashes[2];
1026 int h = 0;
1027
1028 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_AX_NETCFG_RX_BROAD |
1029 DC_NETCFG_RX_PROMISC));
1030 bzero(hashes, sizeof(hashes));
1031 ifp->if_flags &= ~IFF_ALLMULTI;
1032
1033 /*
1034 * Always accept broadcast frames.
1035 */
1036 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD);
1037
1038 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1039 ifp->if_flags |= IFF_ALLMULTI;
1040 if (ifp->if_flags & IFF_PROMISC)
1041 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1042 else
1043 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1044 } else {
1045 /* now program new ones */
1046 ETHER_FIRST_MULTI(step, ac, enm);
1047 while (enm != NULL) {
1048 h = dc_crc_be(enm->enm_addrlo);
1049
1050 if (h < 32)
1051 hashes[0] |= (1 << h);
1052 else
1053 hashes[1] |= (1 << (h - 32));
1054
1055 ETHER_NEXT_MULTI(step, enm);
1056 }
1057 }
1058
1059 /* Init our MAC address */
1060 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0);
1061 CSR_WRITE_4(sc, DC_AX_FILTDATA,
1062 *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[0]));
1063 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1);
1064 CSR_WRITE_4(sc, DC_AX_FILTDATA,
1065 *(u_int32_t *)(&sc->sc_arpcom.ac_enaddr[4]));
1066
1067 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0);
1068 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]);
1069 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1);
1070 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]);
1071 }
1072
1073 void
dc_setfilt_xircom(struct dc_softc * sc)1074 dc_setfilt_xircom(struct dc_softc *sc)
1075 {
1076 struct arpcom *ac = &sc->sc_arpcom;
1077 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1078 struct ether_multi *enm;
1079 struct ether_multistep step;
1080 struct dc_desc *sframe;
1081 u_int32_t h, *sp;
1082 int i;
1083
1084 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1085
1086 i = sc->dc_cdata.dc_tx_prod;
1087 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT);
1088 sc->dc_cdata.dc_tx_cnt++;
1089 sframe = &sc->dc_ldata->dc_tx_list[i];
1090 sp = &sc->dc_ldata->dc_sbuf[0];
1091 bzero(sp, DC_SFRAME_LEN);
1092
1093 sframe->dc_data = htole32(sc->sc_listmap->dm_segs[0].ds_addr +
1094 offsetof(struct dc_list_data, dc_sbuf));
1095 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP |
1096 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT);
1097
1098 sc->dc_cdata.dc_tx_chain[i].sd_mbuf =
1099 (struct mbuf *)&sc->dc_ldata->dc_sbuf[0];
1100
1101 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ALLMULTI | DC_NETCFG_RX_PROMISC));
1102 ifp->if_flags &= ~IFF_ALLMULTI;
1103
1104 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1105 ifp->if_flags |= IFF_ALLMULTI;
1106 if (ifp->if_flags & IFF_PROMISC)
1107 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC);
1108 else
1109 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI);
1110 } else {
1111 /* now program new ones */
1112 ETHER_FIRST_MULTI(step, ac, enm);
1113 while (enm != NULL) {
1114 h = dc_crc_le(sc, enm->enm_addrlo);
1115
1116 sp[h >> 4] |= htole32(1 << (h & 0xF));
1117
1118 ETHER_NEXT_MULTI(step, enm);
1119 }
1120 }
1121
1122 /*
1123 * Always accept broadcast frames.
1124 */
1125 h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr);
1126 sp[h >> 4] |= htole32(1 << (h & 0xF));
1127
1128 /* Set our MAC address */
1129 sp[0] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 0);
1130 sp[1] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 1);
1131 sp[2] = DC_SP_FIELD(sc->sc_arpcom.ac_enaddr, 2);
1132
1133 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
1134 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
1135 ifp->if_flags |= IFF_RUNNING;
1136 sframe->dc_status = htole32(DC_TXSTAT_OWN);
1137 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
1138
1139 /*
1140 * wait some time...
1141 */
1142 DELAY(1000);
1143
1144 ifp->if_timer = 5;
1145 }
1146
1147 void
dc_setfilt(struct dc_softc * sc)1148 dc_setfilt(struct dc_softc *sc)
1149 {
1150 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) ||
1151 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc))
1152 dc_setfilt_21143(sc);
1153
1154 if (DC_IS_ASIX(sc))
1155 dc_setfilt_asix(sc);
1156
1157 if (DC_IS_ADMTEK(sc))
1158 dc_setfilt_admtek(sc);
1159
1160 if (DC_IS_XIRCOM(sc))
1161 dc_setfilt_xircom(sc);
1162 }
1163
1164 /*
1165 * In order to fiddle with the
1166 * 'full-duplex' and '100Mbps' bits in the netconfig register, we
1167 * first have to put the transmit and/or receive logic in the idle state.
1168 */
1169 void
dc_setcfg(struct dc_softc * sc,uint64_t media)1170 dc_setcfg(struct dc_softc *sc, uint64_t media)
1171 {
1172 int i, restart = 0;
1173 u_int32_t isr;
1174
1175 if (IFM_SUBTYPE(media) == IFM_NONE)
1176 return;
1177
1178 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) {
1179 restart = 1;
1180 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON));
1181
1182 for (i = 0; i < DC_TIMEOUT; i++) {
1183 isr = CSR_READ_4(sc, DC_ISR);
1184 if (isr & DC_ISR_TX_IDLE &&
1185 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1186 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT))
1187 break;
1188 DELAY(10);
1189 }
1190
1191 if (i == DC_TIMEOUT) {
1192 if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc))
1193 printf("%s: failed to force tx to idle state\n",
1194 sc->sc_dev.dv_xname);
1195 if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED ||
1196 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
1197 !DC_HAS_BROKEN_RXSTATE(sc))
1198 printf("%s: failed to force rx to idle state\n",
1199 sc->sc_dev.dv_xname);
1200 }
1201 }
1202
1203 if (IFM_SUBTYPE(media) == IFM_100_TX) {
1204 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1205 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1206 if (sc->dc_pmode == DC_PMODE_MII) {
1207 int watchdogreg;
1208
1209 if (DC_IS_INTEL(sc)) {
1210 /* there's a write enable bit here that reads as 1 */
1211 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1212 watchdogreg &= ~DC_WDOG_CTLWREN;
1213 watchdogreg |= DC_WDOG_JABBERDIS;
1214 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1215 } else {
1216 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1217 }
1218 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1219 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1220 if (sc->dc_type == DC_TYPE_98713)
1221 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1222 DC_NETCFG_SCRAMBLER));
1223 if (!DC_IS_DAVICOM(sc))
1224 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1225 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1226 if (DC_IS_INTEL(sc))
1227 dc_apply_fixup(sc, IFM_AUTO);
1228 } else {
1229 if (DC_IS_PNIC(sc)) {
1230 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL);
1231 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1232 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1233 }
1234 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1235 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1236 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1237 if (DC_IS_INTEL(sc))
1238 dc_apply_fixup(sc,
1239 (media & IFM_GMASK) == IFM_FDX ?
1240 IFM_100_TX|IFM_FDX : IFM_100_TX);
1241 }
1242 }
1243
1244 if (IFM_SUBTYPE(media) == IFM_10_T) {
1245 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL);
1246 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT);
1247 if (sc->dc_pmode == DC_PMODE_MII) {
1248 int watchdogreg;
1249
1250 if (DC_IS_INTEL(sc)) {
1251 /* there's a write enable bit here that reads as 1 */
1252 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG);
1253 watchdogreg &= ~DC_WDOG_CTLWREN;
1254 watchdogreg |= DC_WDOG_JABBERDIS;
1255 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg);
1256 } else {
1257 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS);
1258 }
1259 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS|
1260 DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER));
1261 if (sc->dc_type == DC_TYPE_98713)
1262 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1263 if (!DC_IS_DAVICOM(sc))
1264 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1265 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1266 if (DC_IS_INTEL(sc))
1267 dc_apply_fixup(sc, IFM_AUTO);
1268 } else {
1269 if (DC_IS_PNIC(sc)) {
1270 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL);
1271 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP);
1272 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL);
1273 }
1274 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1275 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS);
1276 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER);
1277 if (DC_IS_INTEL(sc)) {
1278 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET);
1279 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF);
1280 if ((media & IFM_GMASK) == IFM_FDX)
1281 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D);
1282 else
1283 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F);
1284 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1285 DC_CLRBIT(sc, DC_10BTCTRL,
1286 DC_TCTL_AUTONEGENBL);
1287 dc_apply_fixup(sc,
1288 (media & IFM_GMASK) == IFM_FDX ?
1289 IFM_10_T|IFM_FDX : IFM_10_T);
1290 DELAY(20000);
1291 }
1292 }
1293 }
1294
1295 /*
1296 * If this is a Davicom DM9102A card with a DM9801 HomePNA
1297 * PHY and we want HomePNA mode, set the portsel bit to turn
1298 * on the external MII port.
1299 */
1300 if (DC_IS_DAVICOM(sc)) {
1301 if (IFM_SUBTYPE(media) == IFM_HPNA_1) {
1302 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1303 sc->dc_link = 1;
1304 } else {
1305 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL);
1306 }
1307 }
1308
1309 if ((media & IFM_GMASK) == IFM_FDX) {
1310 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1311 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1312 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1313 } else {
1314 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX);
1315 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc))
1316 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX);
1317 }
1318
1319 if (restart)
1320 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON);
1321 }
1322
1323 void
dc_reset(struct dc_softc * sc)1324 dc_reset(struct dc_softc *sc)
1325 {
1326 int i;
1327
1328 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1329
1330 for (i = 0; i < DC_TIMEOUT; i++) {
1331 DELAY(10);
1332 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET))
1333 break;
1334 }
1335
1336 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) ||
1337 DC_IS_INTEL(sc) || DC_IS_CONEXANT(sc)) {
1338 DELAY(10000);
1339 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET);
1340 i = 0;
1341 }
1342
1343 if (i == DC_TIMEOUT)
1344 printf("%s: reset never completed!\n", sc->sc_dev.dv_xname);
1345
1346 /* Wait a little while for the chip to get its brains in order. */
1347 DELAY(1000);
1348
1349 CSR_WRITE_4(sc, DC_IMR, 0x00000000);
1350 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000);
1351 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000);
1352
1353 /*
1354 * Bring the SIA out of reset. In some cases, it looks
1355 * like failing to unreset the SIA soon enough gets it
1356 * into a state where it will never come out of reset
1357 * until we reset the whole chip again.
1358 */
1359 if (DC_IS_INTEL(sc)) {
1360 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET);
1361 CSR_WRITE_4(sc, DC_10BTCTRL, 0);
1362 CSR_WRITE_4(sc, DC_WATCHDOG, 0);
1363 }
1364
1365 if (sc->dc_type == DC_TYPE_21145)
1366 dc_setcfg(sc, IFM_10_T);
1367 }
1368
1369 void
dc_apply_fixup(struct dc_softc * sc,uint64_t media)1370 dc_apply_fixup(struct dc_softc *sc, uint64_t media)
1371 {
1372 struct dc_mediainfo *m;
1373 u_int8_t *p;
1374 int i;
1375 u_int32_t reg;
1376
1377 m = sc->dc_mi;
1378
1379 while (m != NULL) {
1380 if (m->dc_media == media)
1381 break;
1382 m = m->dc_next;
1383 }
1384
1385 if (m == NULL)
1386 return;
1387
1388 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) {
1389 reg = (p[0] | (p[1] << 8)) << 16;
1390 CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1391 }
1392
1393 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) {
1394 reg = (p[0] | (p[1] << 8)) << 16;
1395 CSR_WRITE_4(sc, DC_WATCHDOG, reg);
1396 }
1397 }
1398
1399 void
dc_decode_leaf_sia(struct dc_softc * sc,struct dc_eblock_sia * l)1400 dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l)
1401 {
1402 struct dc_mediainfo *m;
1403
1404 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1405 if (m == NULL)
1406 return;
1407 switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) {
1408 case DC_SIA_CODE_10BT:
1409 m->dc_media = IFM_10_T;
1410 break;
1411 case DC_SIA_CODE_10BT_FDX:
1412 m->dc_media = IFM_10_T|IFM_FDX;
1413 break;
1414 case DC_SIA_CODE_10B2:
1415 m->dc_media = IFM_10_2;
1416 break;
1417 case DC_SIA_CODE_10B5:
1418 m->dc_media = IFM_10_5;
1419 break;
1420 default:
1421 break;
1422 }
1423
1424 /*
1425 * We need to ignore CSR13, CSR14, CSR15 for SIA mode.
1426 * Things apparently already work for cards that do
1427 * supply Media Specific Data.
1428 */
1429 if (l->dc_sia_code & DC_SIA_CODE_EXT) {
1430 m->dc_gp_len = 2;
1431 m->dc_gp_ptr =
1432 (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl;
1433 } else {
1434 m->dc_gp_len = 2;
1435 m->dc_gp_ptr =
1436 (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl;
1437 }
1438
1439 m->dc_next = sc->dc_mi;
1440 sc->dc_mi = m;
1441
1442 sc->dc_pmode = DC_PMODE_SIA;
1443 }
1444
1445 void
dc_decode_leaf_sym(struct dc_softc * sc,struct dc_eblock_sym * l)1446 dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l)
1447 {
1448 struct dc_mediainfo *m;
1449
1450 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1451 if (m == NULL)
1452 return;
1453 if (l->dc_sym_code == DC_SYM_CODE_100BT)
1454 m->dc_media = IFM_100_TX;
1455
1456 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX)
1457 m->dc_media = IFM_100_TX|IFM_FDX;
1458
1459 m->dc_gp_len = 2;
1460 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl;
1461
1462 m->dc_next = sc->dc_mi;
1463 sc->dc_mi = m;
1464
1465 sc->dc_pmode = DC_PMODE_SYM;
1466 }
1467
1468 void
dc_decode_leaf_mii(struct dc_softc * sc,struct dc_eblock_mii * l)1469 dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l)
1470 {
1471 u_int8_t *p;
1472 struct dc_mediainfo *m;
1473
1474 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1475 if (m == NULL)
1476 return;
1477 /* We abuse IFM_AUTO to represent MII. */
1478 m->dc_media = IFM_AUTO;
1479 m->dc_gp_len = l->dc_gpr_len;
1480
1481 p = (u_int8_t *)l;
1482 p += sizeof(struct dc_eblock_mii);
1483 m->dc_gp_ptr = p;
1484 p += 2 * l->dc_gpr_len;
1485 m->dc_reset_len = *p;
1486 p++;
1487 m->dc_reset_ptr = p;
1488
1489 m->dc_next = sc->dc_mi;
1490 sc->dc_mi = m;
1491 }
1492
1493 void
dc_read_srom(struct dc_softc * sc,int bits)1494 dc_read_srom(struct dc_softc *sc, int bits)
1495 {
1496 sc->dc_sromsize = 2 << bits;
1497 sc->dc_srom = malloc(sc->dc_sromsize, M_DEVBUF, M_NOWAIT);
1498 if (sc->dc_srom == NULL)
1499 return;
1500 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (sc->dc_sromsize / 2), 0);
1501 }
1502
1503 void
dc_parse_21143_srom(struct dc_softc * sc)1504 dc_parse_21143_srom(struct dc_softc *sc)
1505 {
1506 struct dc_leaf_hdr *lhdr;
1507 struct dc_eblock_hdr *hdr;
1508 int have_mii, i, loff;
1509 char *ptr;
1510
1511 have_mii = 0;
1512 loff = sc->dc_srom[27];
1513 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]);
1514
1515 ptr = (char *)lhdr;
1516 ptr += sizeof(struct dc_leaf_hdr) - 1;
1517 /*
1518 * Look if we got a MII media block.
1519 */
1520 for (i = 0; i < lhdr->dc_mcnt; i++) {
1521 hdr = (struct dc_eblock_hdr *)ptr;
1522 if (hdr->dc_type == DC_EBLOCK_MII)
1523 have_mii++;
1524
1525 ptr += (hdr->dc_len & 0x7F);
1526 ptr++;
1527 }
1528
1529 /*
1530 * Do the same thing again. Only use SIA and SYM media
1531 * blocks if no MII media block is available.
1532 */
1533 ptr = (char *)lhdr;
1534 ptr += sizeof(struct dc_leaf_hdr) - 1;
1535 for (i = 0; i < lhdr->dc_mcnt; i++) {
1536 hdr = (struct dc_eblock_hdr *)ptr;
1537 switch(hdr->dc_type) {
1538 case DC_EBLOCK_MII:
1539 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr);
1540 break;
1541 case DC_EBLOCK_SIA:
1542 if (! have_mii)
1543 dc_decode_leaf_sia(sc,
1544 (struct dc_eblock_sia *)hdr);
1545 break;
1546 case DC_EBLOCK_SYM:
1547 if (! have_mii)
1548 dc_decode_leaf_sym(sc,
1549 (struct dc_eblock_sym *)hdr);
1550 break;
1551 default:
1552 /* Don't care. Yet. */
1553 break;
1554 }
1555 ptr += (hdr->dc_len & 0x7F);
1556 ptr++;
1557 }
1558 }
1559
1560 /*
1561 * Attach the interface. Allocate softc structures, do ifmedia
1562 * setup and ethernet/BPF attach.
1563 */
1564 void
dc_attach(struct dc_softc * sc)1565 dc_attach(struct dc_softc *sc)
1566 {
1567 struct ifnet *ifp;
1568 int mac_offset, tmp, i;
1569 u_int32_t reg;
1570
1571 /*
1572 * Get station address from the EEPROM.
1573 */
1574 if (sc->sc_hasmac)
1575 goto hasmac;
1576
1577 switch(sc->dc_type) {
1578 case DC_TYPE_98713:
1579 case DC_TYPE_98713A:
1580 case DC_TYPE_987x5:
1581 case DC_TYPE_PNICII:
1582 dc_read_eeprom(sc, (caddr_t)&mac_offset,
1583 (DC_EE_NODEADDR_OFFSET / 2), 1, 0);
1584 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1585 (mac_offset / 2), 3, 0);
1586 break;
1587 case DC_TYPE_PNIC:
1588 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 0, 3, 1);
1589 break;
1590 case DC_TYPE_DM9102:
1591 case DC_TYPE_21143:
1592 case DC_TYPE_21145:
1593 case DC_TYPE_ASIX:
1594 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1595 DC_EE_NODEADDR, 3, 0);
1596 break;
1597 case DC_TYPE_AL981:
1598 case DC_TYPE_AN983:
1599 reg = CSR_READ_4(sc, DC_AL_PAR0);
1600 sc->sc_arpcom.ac_enaddr[0] = (reg & 0xff);
1601 sc->sc_arpcom.ac_enaddr[1] = (reg >> 8) & 0xff;
1602 sc->sc_arpcom.ac_enaddr[2] = (reg >> 16) & 0xff;
1603 sc->sc_arpcom.ac_enaddr[3] = (reg >> 24) & 0xff;
1604 reg = CSR_READ_4(sc, DC_AL_PAR1);
1605 sc->sc_arpcom.ac_enaddr[4] = (reg & 0xff);
1606 sc->sc_arpcom.ac_enaddr[5] = (reg >> 8) & 0xff;
1607 break;
1608 case DC_TYPE_CONEXANT:
1609 bcopy(&sc->dc_srom + DC_CONEXANT_EE_NODEADDR,
1610 &sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
1611 break;
1612 case DC_TYPE_XIRCOM:
1613 /* Some newer units have the MAC at offset 8 */
1614 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 8, 3, 0);
1615
1616 if (sc->sc_arpcom.ac_enaddr[0] == 0x00 &&
1617 sc->sc_arpcom.ac_enaddr[1] == 0x10 &&
1618 sc->sc_arpcom.ac_enaddr[2] == 0xa4)
1619 break;
1620 if (sc->sc_arpcom.ac_enaddr[0] == 0x00 &&
1621 sc->sc_arpcom.ac_enaddr[1] == 0x80 &&
1622 sc->sc_arpcom.ac_enaddr[2] == 0xc7)
1623 break;
1624 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr, 3, 3, 0);
1625 break;
1626 default:
1627 dc_read_eeprom(sc, (caddr_t)&sc->sc_arpcom.ac_enaddr,
1628 DC_EE_NODEADDR, 3, 0);
1629 break;
1630 }
1631 hasmac:
1632
1633 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct dc_list_data),
1634 PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
1635 BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) {
1636 printf(": can't alloc list mem\n");
1637 goto fail;
1638 }
1639 if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
1640 sizeof(struct dc_list_data), &sc->sc_listkva,
1641 BUS_DMA_NOWAIT) != 0) {
1642 printf(": can't map list mem\n");
1643 goto fail;
1644 }
1645 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct dc_list_data), 1,
1646 sizeof(struct dc_list_data), 0, BUS_DMA_NOWAIT,
1647 &sc->sc_listmap) != 0) {
1648 printf(": can't alloc list map\n");
1649 goto fail;
1650 }
1651 if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
1652 sizeof(struct dc_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
1653 printf(": can't load list map\n");
1654 goto fail;
1655 }
1656 sc->dc_ldata = (struct dc_list_data *)sc->sc_listkva;
1657
1658 for (i = 0; i < DC_RX_LIST_CNT; i++) {
1659 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
1660 0, BUS_DMA_NOWAIT,
1661 &sc->dc_cdata.dc_rx_chain[i].sd_map) != 0) {
1662 printf(": can't create rx map\n");
1663 return;
1664 }
1665 }
1666 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1667 BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
1668 printf(": can't create rx spare map\n");
1669 return;
1670 }
1671
1672 for (i = 0; i < DC_TX_LIST_CNT; i++) {
1673 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1674 (sc->dc_flags & DC_TX_COALESCE) ? 1 : DC_TX_LIST_CNT - 5,
1675 MCLBYTES, 0, BUS_DMA_NOWAIT,
1676 &sc->dc_cdata.dc_tx_chain[i].sd_map) != 0) {
1677 printf(": can't create tx map\n");
1678 return;
1679 }
1680 }
1681 if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1682 (sc->dc_flags & DC_TX_COALESCE) ? 1 : DC_TX_LIST_CNT - 5,
1683 MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
1684 printf(": can't create tx spare map\n");
1685 return;
1686 }
1687
1688 /*
1689 * A 21143 or clone chip was detected. Inform the world.
1690 */
1691 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1692
1693 ifp = &sc->sc_arpcom.ac_if;
1694 ifp->if_softc = sc;
1695 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1696 ifp->if_ioctl = dc_ioctl;
1697 ifp->if_start = dc_start;
1698 ifp->if_watchdog = dc_watchdog;
1699 ifq_init_maxlen(&ifp->if_snd, DC_TX_LIST_CNT - 1);
1700 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1701
1702 ifp->if_capabilities = IFCAP_VLAN_MTU;
1703
1704 /* Do MII setup. If this is a 21143, check for a PHY on the
1705 * MII bus after applying any necessary fixups to twiddle the
1706 * GPIO bits. If we don't end up finding a PHY, restore the
1707 * old selection (SIA only or SIA/SYM) and attach the dcphy
1708 * driver instead.
1709 */
1710 if (DC_IS_INTEL(sc)) {
1711 dc_apply_fixup(sc, IFM_AUTO);
1712 tmp = sc->dc_pmode;
1713 sc->dc_pmode = DC_PMODE_MII;
1714 }
1715
1716 /*
1717 * Setup General Purpose port mode and data so the tulip can talk
1718 * to the MII. This needs to be done before mii_attach so that
1719 * we can actually see them.
1720 */
1721 if (DC_IS_XIRCOM(sc)) {
1722 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
1723 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1724 DELAY(10);
1725 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
1726 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
1727 DELAY(10);
1728 }
1729
1730 sc->sc_mii.mii_ifp = ifp;
1731 sc->sc_mii.mii_readreg = dc_miibus_readreg;
1732 sc->sc_mii.mii_writereg = dc_miibus_writereg;
1733 sc->sc_mii.mii_statchg = dc_miibus_statchg;
1734 ifmedia_init(&sc->sc_mii.mii_media, 0, dc_ifmedia_upd, dc_ifmedia_sts);
1735 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1736 MII_OFFSET_ANY, 0);
1737
1738 if (DC_IS_INTEL(sc)) {
1739 if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1740 sc->dc_pmode = tmp;
1741 if (sc->dc_pmode != DC_PMODE_SIA)
1742 sc->dc_pmode = DC_PMODE_SYM;
1743 sc->dc_flags |= DC_21143_NWAY;
1744 if (sc->dc_flags & DC_MOMENCO_BOTCH)
1745 sc->dc_pmode = DC_PMODE_MII;
1746 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff,
1747 MII_PHY_ANY, MII_OFFSET_ANY, 0);
1748 } else {
1749 /* we have a PHY, so we must clear this bit */
1750 sc->dc_flags &= ~DC_TULIP_LEDS;
1751 }
1752 }
1753
1754 if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1755 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1756 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1757 printf("%s: MII without any PHY!\n", sc->sc_dev.dv_xname);
1758 } else if (sc->dc_type == DC_TYPE_21145) {
1759 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T);
1760 } else
1761 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1762
1763 if (DC_IS_DAVICOM(sc) && sc->dc_revision >= DC_REVISION_DM9102A)
1764 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_HPNA_1,0,NULL);
1765
1766 if (DC_IS_ADMTEK(sc)) {
1767 /*
1768 * Set automatic TX underrun recovery for the ADMtek chips
1769 */
1770 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR);
1771 }
1772
1773 /*
1774 * Call MI attach routines.
1775 */
1776 if_attach(ifp);
1777 ether_ifattach(ifp);
1778
1779 fail:
1780 return;
1781 }
1782
1783 /*
1784 * Initialize the transmit descriptors.
1785 */
1786 int
dc_list_tx_init(struct dc_softc * sc)1787 dc_list_tx_init(struct dc_softc *sc)
1788 {
1789 struct dc_chain_data *cd;
1790 struct dc_list_data *ld;
1791 int i;
1792 bus_addr_t next;
1793
1794 cd = &sc->dc_cdata;
1795 ld = sc->dc_ldata;
1796 for (i = 0; i < DC_TX_LIST_CNT; i++) {
1797 next = sc->sc_listmap->dm_segs[0].ds_addr;
1798 if (i == (DC_TX_LIST_CNT - 1))
1799 next +=
1800 offsetof(struct dc_list_data, dc_tx_list[0]);
1801 else
1802 next +=
1803 offsetof(struct dc_list_data, dc_tx_list[i + 1]);
1804 cd->dc_tx_chain[i].sd_mbuf = NULL;
1805 ld->dc_tx_list[i].dc_data = htole32(0);
1806 ld->dc_tx_list[i].dc_ctl = htole32(0);
1807 ld->dc_tx_list[i].dc_next = htole32(next);
1808 }
1809
1810 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0;
1811
1812 return (0);
1813 }
1814
1815
1816 /*
1817 * Initialize the RX descriptors and allocate mbufs for them. Note that
1818 * we arrange the descriptors in a closed ring, so that the last descriptor
1819 * points back to the first.
1820 */
1821 int
dc_list_rx_init(struct dc_softc * sc)1822 dc_list_rx_init(struct dc_softc *sc)
1823 {
1824 struct dc_chain_data *cd;
1825 struct dc_list_data *ld;
1826 int i;
1827 bus_addr_t next;
1828
1829 cd = &sc->dc_cdata;
1830 ld = sc->dc_ldata;
1831
1832 for (i = 0; i < DC_RX_LIST_CNT; i++) {
1833 if (dc_newbuf(sc, i, NULL) == ENOBUFS)
1834 return (ENOBUFS);
1835 next = sc->sc_listmap->dm_segs[0].ds_addr;
1836 if (i == (DC_RX_LIST_CNT - 1))
1837 next +=
1838 offsetof(struct dc_list_data, dc_rx_list[0]);
1839 else
1840 next +=
1841 offsetof(struct dc_list_data, dc_rx_list[i + 1]);
1842 ld->dc_rx_list[i].dc_next = htole32(next);
1843 }
1844
1845 cd->dc_rx_prod = 0;
1846
1847 return (0);
1848 }
1849
1850 /*
1851 * Initialize an RX descriptor and attach an MBUF cluster.
1852 */
1853 int
dc_newbuf(struct dc_softc * sc,int i,struct mbuf * m)1854 dc_newbuf(struct dc_softc *sc, int i, struct mbuf *m)
1855 {
1856 struct mbuf *m_new = NULL;
1857 struct dc_desc *c;
1858 bus_dmamap_t map;
1859
1860 c = &sc->dc_ldata->dc_rx_list[i];
1861
1862 if (m == NULL) {
1863 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1864 if (m_new == NULL)
1865 return (ENOBUFS);
1866
1867 MCLGET(m_new, M_DONTWAIT);
1868 if (!(m_new->m_flags & M_EXT)) {
1869 m_freem(m_new);
1870 return (ENOBUFS);
1871 }
1872 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1873 if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rx_sparemap,
1874 m_new, BUS_DMA_NOWAIT) != 0) {
1875 m_freem(m_new);
1876 return (ENOBUFS);
1877 }
1878 map = sc->dc_cdata.dc_rx_chain[i].sd_map;
1879 sc->dc_cdata.dc_rx_chain[i].sd_map = sc->sc_rx_sparemap;
1880 sc->sc_rx_sparemap = map;
1881 } else {
1882 /*
1883 * We're re-using a previously allocated mbuf;
1884 * be sure to re-init pointers and lengths to
1885 * default values.
1886 */
1887 m_new = m;
1888 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1889 m_new->m_data = m_new->m_ext.ext_buf;
1890 }
1891
1892 m_adj(m_new, sizeof(u_int64_t));
1893
1894 /*
1895 * If this is a PNIC chip, zero the buffer. This is part
1896 * of the workaround for the receive bug in the 82c168 and
1897 * 82c169 chips.
1898 */
1899 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR)
1900 bzero(mtod(m_new, char *), m_new->m_len);
1901
1902 bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map, 0,
1903 sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
1904 BUS_DMASYNC_PREREAD);
1905
1906 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = m_new;
1907 c->dc_data = htole32(
1908 sc->dc_cdata.dc_rx_chain[i].sd_map->dm_segs[0].ds_addr +
1909 sizeof(u_int64_t));
1910 c->dc_ctl = htole32(DC_RXCTL_RLINK | ETHER_MAX_DIX_LEN);
1911 c->dc_status = htole32(DC_RXSTAT_OWN);
1912
1913 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1914 offsetof(struct dc_list_data, dc_rx_list[i]),
1915 sizeof(struct dc_desc),
1916 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1917
1918 return (0);
1919 }
1920
1921 /*
1922 * Grrrrr.
1923 * The PNIC chip has a terrible bug in it that manifests itself during
1924 * periods of heavy activity. The exact mode of failure if difficult to
1925 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it
1926 * will happen on slow machines. The bug is that sometimes instead of
1927 * uploading one complete frame during reception, it uploads what looks
1928 * like the entire contents of its FIFO memory. The frame we want is at
1929 * the end of the whole mess, but we never know exactly how much data has
1930 * been uploaded, so salvaging the frame is hard.
1931 *
1932 * There is only one way to do it reliably, and it's disgusting.
1933 * Here's what we know:
1934 *
1935 * - We know there will always be somewhere between one and three extra
1936 * descriptors uploaded.
1937 *
1938 * - We know the desired received frame will always be at the end of the
1939 * total data upload.
1940 *
1941 * - We know the size of the desired received frame because it will be
1942 * provided in the length field of the status word in the last descriptor.
1943 *
1944 * Here's what we do:
1945 *
1946 * - When we allocate buffers for the receive ring, we bzero() them.
1947 * This means that we know that the buffer contents should be all
1948 * zeros, except for data uploaded by the chip.
1949 *
1950 * - We also force the PNIC chip to upload frames that include the
1951 * ethernet CRC at the end.
1952 *
1953 * - We gather all of the bogus frame data into a single buffer.
1954 *
1955 * - We then position a pointer at the end of this buffer and scan
1956 * backwards until we encounter the first non-zero byte of data.
1957 * This is the end of the received frame. We know we will encounter
1958 * some data at the end of the frame because the CRC will always be
1959 * there, so even if the sender transmits a packet of all zeros,
1960 * we won't be fooled.
1961 *
1962 * - We know the size of the actual received frame, so we subtract
1963 * that value from the current pointer location. This brings us
1964 * to the start of the actual received packet.
1965 *
1966 * - We copy this into an mbuf and pass it on, along with the actual
1967 * frame length.
1968 *
1969 * The performance hit is tremendous, but it beats dropping frames all
1970 * the time.
1971 */
1972
1973 #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG)
1974 void
dc_pnic_rx_bug_war(struct dc_softc * sc,int idx)1975 dc_pnic_rx_bug_war(struct dc_softc *sc, int idx)
1976 {
1977 struct dc_desc *cur_rx;
1978 struct dc_desc *c = NULL;
1979 struct mbuf *m = NULL;
1980 unsigned char *ptr;
1981 int i, total_len;
1982 u_int32_t rxstat = 0;
1983
1984 i = sc->dc_pnic_rx_bug_save;
1985 cur_rx = &sc->dc_ldata->dc_rx_list[idx];
1986 ptr = sc->dc_pnic_rx_buf;
1987 bzero(ptr, ETHER_MAX_DIX_LEN * 5);
1988
1989 /* Copy all the bytes from the bogus buffers. */
1990 while (1) {
1991 c = &sc->dc_ldata->dc_rx_list[i];
1992 rxstat = letoh32(c->dc_status);
1993 m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
1994 bcopy(mtod(m, char *), ptr, ETHER_MAX_DIX_LEN);
1995 ptr += ETHER_MAX_DIX_LEN;
1996 /* If this is the last buffer, break out. */
1997 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG)
1998 break;
1999 dc_newbuf(sc, i, m);
2000 DC_INC(i, DC_RX_LIST_CNT);
2001 }
2002
2003 /* Find the length of the actual receive frame. */
2004 total_len = DC_RXBYTES(rxstat);
2005
2006 /* Scan backwards until we hit a non-zero byte. */
2007 while(*ptr == 0x00)
2008 ptr--;
2009
2010 /* Round off. */
2011 if ((unsigned long)(ptr) & 0x3)
2012 ptr -= 1;
2013
2014 /* Now find the start of the frame. */
2015 ptr -= total_len;
2016 if (ptr < sc->dc_pnic_rx_buf)
2017 ptr = sc->dc_pnic_rx_buf;
2018
2019 /*
2020 * Now copy the salvaged frame to the last mbuf and fake up
2021 * the status word to make it look like a successful
2022 * frame reception.
2023 */
2024 dc_newbuf(sc, i, m);
2025 bcopy(ptr, mtod(m, char *), total_len);
2026 cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG);
2027 }
2028
2029 /*
2030 * This routine searches the RX ring for dirty descriptors in the
2031 * event that the rxeof routine falls out of sync with the chip's
2032 * current descriptor pointer. This may happen sometimes as a result
2033 * of a "no RX buffer available" condition that happens when the chip
2034 * consumes all of the RX buffers before the driver has a chance to
2035 * process the RX ring. This routine may need to be called more than
2036 * once to bring the driver back in sync with the chip, however we
2037 * should still be getting RX DONE interrupts to drive the search
2038 * for new packets in the RX ring, so we should catch up eventually.
2039 */
2040 int
dc_rx_resync(struct dc_softc * sc)2041 dc_rx_resync(struct dc_softc *sc)
2042 {
2043 u_int32_t stat;
2044 int i, pos, offset;
2045
2046 pos = sc->dc_cdata.dc_rx_prod;
2047
2048 for (i = 0; i < DC_RX_LIST_CNT; i++) {
2049
2050 offset = offsetof(struct dc_list_data, dc_rx_list[pos]);
2051 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2052 offset, sizeof(struct dc_desc),
2053 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2054
2055 stat = sc->dc_ldata->dc_rx_list[pos].dc_status;
2056 if (!(stat & htole32(DC_RXSTAT_OWN)))
2057 break;
2058 DC_INC(pos, DC_RX_LIST_CNT);
2059 }
2060
2061 /* If the ring really is empty, then just return. */
2062 if (i == DC_RX_LIST_CNT)
2063 return (0);
2064
2065 /* We've fallen behind the chip: catch it. */
2066 sc->dc_cdata.dc_rx_prod = pos;
2067
2068 return (EAGAIN);
2069 }
2070
2071 /*
2072 * A frame has been uploaded: pass the resulting mbuf chain up to
2073 * the higher level protocols.
2074 */
2075 int
dc_rxeof(struct dc_softc * sc)2076 dc_rxeof(struct dc_softc *sc)
2077 {
2078 struct mbuf *m;
2079 struct ifnet *ifp;
2080 struct dc_desc *cur_rx;
2081 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2082 int i, offset, total_len = 0, consumed = 0;
2083 u_int32_t rxstat;
2084
2085 ifp = &sc->sc_arpcom.ac_if;
2086 i = sc->dc_cdata.dc_rx_prod;
2087
2088 for(;;) {
2089 struct mbuf *m0 = NULL;
2090
2091 offset = offsetof(struct dc_list_data, dc_rx_list[i]);
2092 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2093 offset, sizeof(struct dc_desc),
2094 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2095
2096 cur_rx = &sc->dc_ldata->dc_rx_list[i];
2097 rxstat = letoh32(cur_rx->dc_status);
2098 if (rxstat & DC_RXSTAT_OWN)
2099 break;
2100
2101 m = sc->dc_cdata.dc_rx_chain[i].sd_mbuf;
2102 total_len = DC_RXBYTES(rxstat);
2103
2104 bus_dmamap_sync(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map,
2105 0, sc->dc_cdata.dc_rx_chain[i].sd_map->dm_mapsize,
2106 BUS_DMASYNC_POSTREAD);
2107
2108 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) {
2109 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) {
2110 if (rxstat & DC_RXSTAT_FIRSTFRAG)
2111 sc->dc_pnic_rx_bug_save = i;
2112 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) {
2113 DC_INC(i, DC_RX_LIST_CNT);
2114 continue;
2115 }
2116 dc_pnic_rx_bug_war(sc, i);
2117 rxstat = letoh32(cur_rx->dc_status);
2118 total_len = DC_RXBYTES(rxstat);
2119 }
2120 }
2121
2122 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
2123
2124 /*
2125 * If an error occurs, update stats, clear the
2126 * status word and leave the mbuf cluster in place:
2127 * it should simply get re-used next time this descriptor
2128 * comes up in the ring. However, don't report long
2129 * frames as errors since they could be VLANs.
2130 */
2131 if ((rxstat & DC_RXSTAT_RXERR)) {
2132 if (!(rxstat & DC_RXSTAT_GIANT) ||
2133 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE |
2134 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN |
2135 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) {
2136 ifp->if_ierrors++;
2137 if (rxstat & DC_RXSTAT_COLLSEEN)
2138 ifp->if_collisions++;
2139 dc_newbuf(sc, i, m);
2140 if (rxstat & DC_RXSTAT_CRCERR) {
2141 DC_INC(i, DC_RX_LIST_CNT);
2142 continue;
2143 } else {
2144 dc_init(sc);
2145 break;
2146 }
2147 }
2148 }
2149
2150 /* No errors; receive the packet. */
2151 total_len -= ETHER_CRC_LEN;
2152
2153 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN);
2154 dc_newbuf(sc, i, m);
2155 DC_INC(i, DC_RX_LIST_CNT);
2156 if (m0 == NULL) {
2157 ifp->if_ierrors++;
2158 continue;
2159 }
2160 m = m0;
2161
2162 consumed++;
2163 ml_enqueue(&ml, m);
2164 }
2165
2166 sc->dc_cdata.dc_rx_prod = i;
2167
2168 if_input(ifp, &ml);
2169
2170 return (consumed);
2171 }
2172
2173 /*
2174 * A frame was downloaded to the chip. It's safe for us to clean up
2175 * the list buffers.
2176 */
2177
2178 void
dc_txeof(struct dc_softc * sc)2179 dc_txeof(struct dc_softc *sc)
2180 {
2181 struct dc_desc *cur_tx = NULL;
2182 struct ifnet *ifp;
2183 int idx, offset;
2184
2185 ifp = &sc->sc_arpcom.ac_if;
2186
2187 /*
2188 * Go through our tx list and free mbufs for those
2189 * frames that have been transmitted.
2190 */
2191 idx = sc->dc_cdata.dc_tx_cons;
2192 while(idx != sc->dc_cdata.dc_tx_prod) {
2193 u_int32_t txstat;
2194
2195 offset = offsetof(struct dc_list_data, dc_tx_list[idx]);
2196 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2197 offset, sizeof(struct dc_desc),
2198 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2199
2200 cur_tx = &sc->dc_ldata->dc_tx_list[idx];
2201 txstat = letoh32(cur_tx->dc_status);
2202
2203 if (txstat & DC_TXSTAT_OWN)
2204 break;
2205
2206 if (!(cur_tx->dc_ctl & htole32(DC_TXCTL_LASTFRAG)) ||
2207 cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2208 if (cur_tx->dc_ctl & htole32(DC_TXCTL_SETUP)) {
2209 /*
2210 * Yes, the PNIC is so brain damaged
2211 * that it will sometimes generate a TX
2212 * underrun error while DMAing the RX
2213 * filter setup frame. If we detect this,
2214 * we have to send the setup frame again,
2215 * or else the filter won't be programmed
2216 * correctly.
2217 */
2218 if (DC_IS_PNIC(sc)) {
2219 if (txstat & DC_TXSTAT_ERRSUM)
2220 dc_setfilt(sc);
2221 }
2222 sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2223 }
2224 sc->dc_cdata.dc_tx_cnt--;
2225 DC_INC(idx, DC_TX_LIST_CNT);
2226 continue;
2227 }
2228
2229 if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) {
2230 /*
2231 * XXX: Why does my Xircom taunt me so?
2232 * For some reason it likes setting the CARRLOST flag
2233 * even when the carrier is there. wtf?!
2234 * Who knows, but Conexant chips have the
2235 * same problem. Maybe they took lessons
2236 * from Xircom.
2237 */
2238 if (/*sc->dc_type == DC_TYPE_21143 &&*/
2239 sc->dc_pmode == DC_PMODE_MII &&
2240 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2241 DC_TXSTAT_NOCARRIER)))
2242 txstat &= ~DC_TXSTAT_ERRSUM;
2243 } else {
2244 if (/*sc->dc_type == DC_TYPE_21143 &&*/
2245 sc->dc_pmode == DC_PMODE_MII &&
2246 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM|
2247 DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST)))
2248 txstat &= ~DC_TXSTAT_ERRSUM;
2249 }
2250
2251 if (txstat & DC_TXSTAT_ERRSUM) {
2252 ifp->if_oerrors++;
2253 if (txstat & DC_TXSTAT_EXCESSCOLL)
2254 ifp->if_collisions++;
2255 if (txstat & DC_TXSTAT_LATECOLL)
2256 ifp->if_collisions++;
2257 if (!(txstat & DC_TXSTAT_UNDERRUN)) {
2258 dc_init(sc);
2259 return;
2260 }
2261 }
2262
2263 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3;
2264
2265 if (sc->dc_cdata.dc_tx_chain[idx].sd_map->dm_nsegs != 0) {
2266 bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[idx].sd_map;
2267
2268 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2269 BUS_DMASYNC_POSTWRITE);
2270 bus_dmamap_unload(sc->sc_dmat, map);
2271 }
2272 if (sc->dc_cdata.dc_tx_chain[idx].sd_mbuf != NULL) {
2273 m_freem(sc->dc_cdata.dc_tx_chain[idx].sd_mbuf);
2274 sc->dc_cdata.dc_tx_chain[idx].sd_mbuf = NULL;
2275 }
2276
2277 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2278 offset, sizeof(struct dc_desc),
2279 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2280
2281 sc->dc_cdata.dc_tx_cnt--;
2282 DC_INC(idx, DC_TX_LIST_CNT);
2283 }
2284 sc->dc_cdata.dc_tx_cons = idx;
2285
2286 if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt > 5)
2287 ifq_clr_oactive(&ifp->if_snd);
2288 if (sc->dc_cdata.dc_tx_cnt == 0)
2289 ifp->if_timer = 0;
2290 }
2291
2292 void
dc_tick(void * xsc)2293 dc_tick(void *xsc)
2294 {
2295 struct dc_softc *sc = (struct dc_softc *)xsc;
2296 struct mii_data *mii;
2297 struct ifnet *ifp;
2298 int s;
2299 u_int32_t r;
2300
2301 s = splnet();
2302
2303 ifp = &sc->sc_arpcom.ac_if;
2304 mii = &sc->sc_mii;
2305
2306 if (sc->dc_flags & DC_REDUCED_MII_POLL) {
2307 if (sc->dc_flags & DC_21143_NWAY) {
2308 r = CSR_READ_4(sc, DC_10BTSTAT);
2309 if (IFM_SUBTYPE(mii->mii_media_active) ==
2310 IFM_100_TX && (r & DC_TSTAT_LS100)) {
2311 sc->dc_link = 0;
2312 mii_mediachg(mii);
2313 }
2314 if (IFM_SUBTYPE(mii->mii_media_active) ==
2315 IFM_10_T && (r & DC_TSTAT_LS10)) {
2316 sc->dc_link = 0;
2317 mii_mediachg(mii);
2318 }
2319 if (sc->dc_link == 0)
2320 mii_tick(mii);
2321 } else {
2322 /*
2323 * For NICs which never report DC_RXSTATE_WAIT, we
2324 * have to bite the bullet...
2325 */
2326 if ((DC_HAS_BROKEN_RXSTATE(sc) || (CSR_READ_4(sc,
2327 DC_ISR) & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) &&
2328 sc->dc_cdata.dc_tx_cnt == 0 && !DC_IS_ASIX(sc)) {
2329 mii_tick(mii);
2330 if (!(mii->mii_media_status & IFM_ACTIVE))
2331 sc->dc_link = 0;
2332 }
2333 }
2334 } else
2335 mii_tick(mii);
2336
2337 /*
2338 * When the init routine completes, we expect to be able to send
2339 * packets right away, and in fact the network code will send a
2340 * gratuitous ARP the moment the init routine marks the interface
2341 * as running. However, even though the MAC may have been initialized,
2342 * there may be a delay of a few seconds before the PHY completes
2343 * autonegotiation and the link is brought up. Any transmissions
2344 * made during that delay will be lost. Dealing with this is tricky:
2345 * we can't just pause in the init routine while waiting for the
2346 * PHY to come ready since that would bring the whole system to
2347 * a screeching halt for several seconds.
2348 *
2349 * What we do here is prevent the TX start routine from sending
2350 * any packets until a link has been established. After the
2351 * interface has been initialized, the tick routine will poll
2352 * the state of the PHY until the IFM_ACTIVE flag is set. Until
2353 * that time, packets will stay in the send queue, and once the
2354 * link comes up, they will be flushed out to the wire.
2355 */
2356 if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE &&
2357 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2358 sc->dc_link++;
2359 if (ifq_empty(&ifp->if_snd) == 0)
2360 dc_start(ifp);
2361 }
2362
2363 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link)
2364 timeout_add_msec(&sc->dc_tick_tmo, 100);
2365 else
2366 timeout_add_sec(&sc->dc_tick_tmo, 1);
2367
2368 splx(s);
2369 }
2370
2371 /* A transmit underrun has occurred. Back off the transmit threshold,
2372 * or switch to store and forward mode if we have to.
2373 */
2374 void
dc_tx_underrun(struct dc_softc * sc)2375 dc_tx_underrun(struct dc_softc *sc)
2376 {
2377 u_int32_t isr;
2378 int i;
2379
2380 if (DC_IS_DAVICOM(sc))
2381 dc_init(sc);
2382
2383 if (DC_IS_INTEL(sc)) {
2384 /*
2385 * The real 21143 requires that the transmitter be idle
2386 * in order to change the transmit threshold or store
2387 * and forward state.
2388 */
2389 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2390
2391 for (i = 0; i < DC_TIMEOUT; i++) {
2392 isr = CSR_READ_4(sc, DC_ISR);
2393 if (isr & DC_ISR_TX_IDLE)
2394 break;
2395 DELAY(10);
2396 }
2397 if (i == DC_TIMEOUT) {
2398 printf("%s: failed to force tx to idle state\n",
2399 sc->sc_dev.dv_xname);
2400 dc_init(sc);
2401 }
2402 }
2403
2404 sc->dc_txthresh += DC_TXTHRESH_INC;
2405 if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2406 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2407 } else {
2408 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2409 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2410 }
2411
2412 if (DC_IS_INTEL(sc))
2413 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2414
2415 return;
2416 }
2417
2418 int
dc_intr(void * arg)2419 dc_intr(void *arg)
2420 {
2421 struct dc_softc *sc;
2422 struct ifnet *ifp;
2423 u_int32_t status, ints;
2424 int claimed = 0;
2425
2426 sc = arg;
2427
2428 ifp = &sc->sc_arpcom.ac_if;
2429
2430 ints = CSR_READ_4(sc, DC_ISR);
2431 if ((ints & DC_INTRS) == 0)
2432 return (claimed);
2433 if (ints == 0xffffffff)
2434 return (0);
2435
2436 /* Suppress unwanted interrupts */
2437 if (!(ifp->if_flags & IFF_UP)) {
2438 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS)
2439 dc_stop(sc, 0);
2440 return (claimed);
2441 }
2442
2443 /* Disable interrupts. */
2444 CSR_WRITE_4(sc, DC_IMR, 0x00000000);
2445
2446 while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) &&
2447 status != 0xFFFFFFFF &&
2448 (ifp->if_flags & IFF_RUNNING)) {
2449
2450 claimed = 1;
2451 CSR_WRITE_4(sc, DC_ISR, status);
2452
2453 if (status & DC_ISR_RX_OK) {
2454 if (dc_rxeof(sc) == 0) {
2455 while(dc_rx_resync(sc))
2456 dc_rxeof(sc);
2457 }
2458 }
2459
2460 if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF))
2461 dc_txeof(sc);
2462
2463 if (status & DC_ISR_TX_IDLE) {
2464 dc_txeof(sc);
2465 if (sc->dc_cdata.dc_tx_cnt) {
2466 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2467 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2468 }
2469 }
2470
2471 if (status & DC_ISR_TX_UNDERRUN)
2472 dc_tx_underrun(sc);
2473
2474 if ((status & DC_ISR_RX_WATDOGTIMEO)
2475 || (status & DC_ISR_RX_NOBUF)) {
2476 if (dc_rxeof(sc) == 0) {
2477 while(dc_rx_resync(sc))
2478 dc_rxeof(sc);
2479 }
2480 }
2481
2482 if (status & DC_ISR_BUS_ERR)
2483 dc_init(sc);
2484 }
2485
2486 /* Re-enable interrupts. */
2487 CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2488
2489 if (ifq_empty(&ifp->if_snd) == 0)
2490 dc_start(ifp);
2491
2492 return (claimed);
2493 }
2494
2495 /*
2496 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2497 * pointers to the fragment pointers.
2498 */
2499 int
dc_encap(struct dc_softc * sc,bus_dmamap_t map,struct mbuf * m,u_int32_t * idx)2500 dc_encap(struct dc_softc *sc, bus_dmamap_t map, struct mbuf *m, u_int32_t *idx)
2501 {
2502 struct dc_desc *f = NULL;
2503 int frag, cur, cnt = 0, i;
2504
2505 cur = frag = *idx;
2506
2507 for (i = 0; i < map->dm_nsegs; i++) {
2508 f = &sc->dc_ldata->dc_tx_list[frag];
2509 f->dc_ctl = htole32(DC_TXCTL_TLINK | map->dm_segs[i].ds_len);
2510 if (cnt == 0) {
2511 f->dc_status = htole32(0);
2512 f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG);
2513 } else
2514 f->dc_status = htole32(DC_TXSTAT_OWN);
2515 f->dc_data = htole32(map->dm_segs[i].ds_addr);
2516 cur = frag;
2517 DC_INC(frag, DC_TX_LIST_CNT);
2518 cnt++;
2519 }
2520
2521 sc->dc_cdata.dc_tx_cnt += cnt;
2522 sc->dc_cdata.dc_tx_chain[cur].sd_mbuf = m;
2523 sc->sc_tx_sparemap = sc->dc_cdata.dc_tx_chain[cur].sd_map;
2524 sc->dc_cdata.dc_tx_chain[cur].sd_map = map;
2525 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG);
2526 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG)
2527 sc->dc_ldata->dc_tx_list[*idx].dc_ctl |=
2528 htole32(DC_TXCTL_FINT);
2529 if (sc->dc_flags & DC_TX_INTR_ALWAYS)
2530 sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2531 htole32(DC_TXCTL_FINT);
2532 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64)
2533 sc->dc_ldata->dc_tx_list[cur].dc_ctl |=
2534 htole32(DC_TXCTL_FINT);
2535 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2536 BUS_DMASYNC_PREWRITE);
2537
2538 sc->dc_ldata->dc_tx_list[*idx].dc_status = htole32(DC_TXSTAT_OWN);
2539
2540 *idx = frag;
2541
2542 return (0);
2543 }
2544
2545 /*
2546 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2547 * to the mbuf data regions directly in the transmit lists. We also save a
2548 * copy of the pointers since the transmit list fragment pointers are
2549 * physical addresses.
2550 */
2551
2552 static inline int
dc_fits(struct dc_softc * sc,int idx,bus_dmamap_t map)2553 dc_fits(struct dc_softc *sc, int idx, bus_dmamap_t map)
2554 {
2555 if (sc->dc_flags & DC_TX_ADMTEK_WAR) {
2556 if (sc->dc_cdata.dc_tx_prod != idx &&
2557 idx + map->dm_nsegs >= DC_TX_LIST_CNT)
2558 return (0);
2559 }
2560
2561 if (sc->dc_cdata.dc_tx_cnt + map->dm_nsegs + 5 > DC_TX_LIST_CNT)
2562 return (0);
2563
2564 return (1);
2565 }
2566
2567 void
dc_start(struct ifnet * ifp)2568 dc_start(struct ifnet *ifp)
2569 {
2570 struct dc_softc *sc = ifp->if_softc;
2571 bus_dmamap_t map;
2572 struct mbuf *m;
2573 int idx;
2574
2575 if (!sc->dc_link && ifq_len(&ifp->if_snd) < 10)
2576 return;
2577
2578 if (ifq_is_oactive(&ifp->if_snd))
2579 return;
2580
2581 idx = sc->dc_cdata.dc_tx_prod;
2582
2583 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2584 offsetof(struct dc_list_data, dc_tx_list),
2585 sizeof(struct dc_desc) * DC_TX_LIST_CNT,
2586 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2587
2588 for (;;) {
2589 m = ifq_deq_begin(&ifp->if_snd);
2590 if (m == NULL)
2591 break;
2592
2593 map = sc->sc_tx_sparemap;
2594 switch (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2595 BUS_DMA_NOWAIT | BUS_DMA_OVERRUN)) {
2596 case 0:
2597 break;
2598 case EFBIG:
2599 if (m_defrag(m, M_DONTWAIT) == 0 &&
2600 bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2601 BUS_DMA_NOWAIT | BUS_DMA_OVERRUN) == 0)
2602 break;
2603
2604 /* FALLTHROUGH */
2605 default:
2606 ifq_deq_commit(&ifp->if_snd, m);
2607 m_freem(m);
2608 ifp->if_oerrors++;
2609 continue;
2610 }
2611
2612 if (!dc_fits(sc, idx, map)) {
2613 bus_dmamap_unload(sc->sc_dmat, map);
2614 ifq_deq_rollback(&ifp->if_snd, m);
2615 ifq_set_oactive(&ifp->if_snd);
2616 break;
2617 }
2618
2619 /* now we are committed to transmit the packet */
2620 ifq_deq_commit(&ifp->if_snd, m);
2621
2622 if (dc_encap(sc, map, m, &idx) != 0) {
2623 m_freem(m);
2624 ifp->if_oerrors++;
2625 continue;
2626 }
2627
2628 /*
2629 * If there's a BPF listener, bounce a copy of this frame
2630 * to him.
2631 */
2632 #if NBPFILTER > 0
2633 if (ifp->if_bpf)
2634 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2635 #endif
2636
2637 if (sc->dc_flags & DC_TX_ONE) {
2638 ifq_set_oactive(&ifp->if_snd);
2639 break;
2640 }
2641 }
2642
2643 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2644 offsetof(struct dc_list_data, dc_tx_list),
2645 sizeof(struct dc_desc) * DC_TX_LIST_CNT,
2646 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2647
2648 if (idx == sc->dc_cdata.dc_tx_prod)
2649 return;
2650
2651 /* Transmit */
2652 sc->dc_cdata.dc_tx_prod = idx;
2653 if (!(sc->dc_flags & DC_TX_POLL))
2654 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF);
2655
2656 /*
2657 * Set a timeout in case the chip goes out to lunch.
2658 */
2659 ifp->if_timer = 5;
2660 }
2661
2662 void
dc_init(void * xsc)2663 dc_init(void *xsc)
2664 {
2665 struct dc_softc *sc = xsc;
2666 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2667 struct mii_data *mii;
2668 int s;
2669
2670 s = splnet();
2671
2672 mii = &sc->sc_mii;
2673
2674 /*
2675 * Cancel pending I/O and free all RX/TX buffers.
2676 */
2677 dc_stop(sc, 0);
2678 dc_reset(sc);
2679
2680 /*
2681 * Set cache alignment and burst length.
2682 */
2683 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc))
2684 CSR_WRITE_4(sc, DC_BUSCTL, 0);
2685 else
2686 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE);
2687 /*
2688 * Evenly share the bus between receive and transmit process.
2689 */
2690 if (DC_IS_INTEL(sc))
2691 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION);
2692 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) {
2693 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA);
2694 } else {
2695 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG);
2696 }
2697 if (sc->dc_flags & DC_TX_POLL)
2698 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1);
2699 switch(sc->dc_cachesize) {
2700 case 32:
2701 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG);
2702 break;
2703 case 16:
2704 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG);
2705 break;
2706 case 8:
2707 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG);
2708 break;
2709 case 0:
2710 default:
2711 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE);
2712 break;
2713 }
2714
2715 if (sc->dc_flags & DC_TX_STORENFWD)
2716 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2717 else {
2718 if (sc->dc_txthresh > DC_TXTHRESH_MAX) {
2719 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2720 } else {
2721 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD);
2722 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh);
2723 }
2724 }
2725
2726 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC);
2727 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF);
2728
2729 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) {
2730 /*
2731 * The app notes for the 98713 and 98715A say that
2732 * in order to have the chips operate properly, a magic
2733 * number must be written to CSR16. Macronix does not
2734 * document the meaning of these bits so there's no way
2735 * to know exactly what they do. The 98713 has a magic
2736 * number all its own; the rest all use a different one.
2737 */
2738 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000);
2739 if (sc->dc_type == DC_TYPE_98713)
2740 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713);
2741 else
2742 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715);
2743 }
2744
2745 if (DC_IS_XIRCOM(sc)) {
2746 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN |
2747 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2748 DELAY(10);
2749 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN |
2750 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT);
2751 DELAY(10);
2752 }
2753
2754 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH);
2755 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN);
2756
2757 /* Init circular RX list. */
2758 if (dc_list_rx_init(sc) == ENOBUFS) {
2759 printf("%s: initialization failed: no "
2760 "memory for rx buffers\n", sc->sc_dev.dv_xname);
2761 dc_stop(sc, 0);
2762 splx(s);
2763 return;
2764 }
2765
2766 /*
2767 * Init tx descriptors.
2768 */
2769 dc_list_tx_init(sc);
2770
2771 /*
2772 * Sync down both lists initialized.
2773 */
2774 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
2775 0, sc->sc_listmap->dm_mapsize,
2776 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2777
2778 /*
2779 * Load the address of the RX list.
2780 */
2781 CSR_WRITE_4(sc, DC_RXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2782 offsetof(struct dc_list_data, dc_rx_list[0]));
2783 CSR_WRITE_4(sc, DC_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
2784 offsetof(struct dc_list_data, dc_tx_list[0]));
2785
2786 /*
2787 * Enable interrupts.
2788 */
2789 CSR_WRITE_4(sc, DC_IMR, DC_INTRS);
2790 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF);
2791
2792 /* Enable transmitter. */
2793 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON);
2794
2795 /*
2796 * If this is an Intel 21143 and we're not using the
2797 * MII port, program the LED control pins so we get
2798 * link and activity indications.
2799 */
2800 if (sc->dc_flags & DC_TULIP_LEDS) {
2801 CSR_WRITE_4(sc, DC_WATCHDOG,
2802 DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY);
2803 CSR_WRITE_4(sc, DC_WATCHDOG, 0);
2804 }
2805
2806 /*
2807 * Load the RX/multicast filter. We do this sort of late
2808 * because the filter programming scheme on the 21143 and
2809 * some clones requires DMAing a setup frame via the TX
2810 * engine, and we need the transmitter enabled for that.
2811 */
2812 dc_setfilt(sc);
2813
2814 /* Enable receiver. */
2815 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON);
2816 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF);
2817
2818 mii_mediachg(mii);
2819 dc_setcfg(sc, sc->dc_if_media);
2820
2821 ifp->if_flags |= IFF_RUNNING;
2822 ifq_clr_oactive(&ifp->if_snd);
2823
2824 splx(s);
2825
2826 timeout_set(&sc->dc_tick_tmo, dc_tick, sc);
2827
2828 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1)
2829 sc->dc_link = 1;
2830 else {
2831 if (sc->dc_flags & DC_21143_NWAY)
2832 timeout_add_msec(&sc->dc_tick_tmo, 100);
2833 else
2834 timeout_add_sec(&sc->dc_tick_tmo, 1);
2835 }
2836
2837 #ifdef SRM_MEDIA
2838 if(sc->dc_srm_media) {
2839 struct ifreq ifr;
2840
2841 ifr.ifr_media = sc->dc_srm_media;
2842 ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA);
2843 sc->dc_srm_media = 0;
2844 }
2845 #endif
2846 }
2847
2848 /*
2849 * Set media options.
2850 */
2851 int
dc_ifmedia_upd(struct ifnet * ifp)2852 dc_ifmedia_upd(struct ifnet *ifp)
2853 {
2854 struct dc_softc *sc;
2855 struct mii_data *mii;
2856 struct ifmedia *ifm;
2857
2858 sc = ifp->if_softc;
2859 mii = &sc->sc_mii;
2860 mii_mediachg(mii);
2861
2862 ifm = &mii->mii_media;
2863
2864 if (DC_IS_DAVICOM(sc) &&
2865 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1)
2866 dc_setcfg(sc, ifm->ifm_media);
2867 else
2868 sc->dc_link = 0;
2869
2870 return (0);
2871 }
2872
2873 /*
2874 * Report current media status.
2875 */
2876 void
dc_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)2877 dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2878 {
2879 struct dc_softc *sc;
2880 struct mii_data *mii;
2881 struct ifmedia *ifm;
2882
2883 sc = ifp->if_softc;
2884 mii = &sc->sc_mii;
2885 mii_pollstat(mii);
2886 ifm = &mii->mii_media;
2887 if (DC_IS_DAVICOM(sc)) {
2888 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) {
2889 ifmr->ifm_active = ifm->ifm_media;
2890 ifmr->ifm_status = 0;
2891 return;
2892 }
2893 }
2894 ifmr->ifm_active = mii->mii_media_active;
2895 ifmr->ifm_status = mii->mii_media_status;
2896 }
2897
2898 int
dc_ioctl(struct ifnet * ifp,u_long command,caddr_t data)2899 dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2900 {
2901 struct dc_softc *sc = ifp->if_softc;
2902 struct ifreq *ifr = (struct ifreq *) data;
2903 int s, error = 0;
2904
2905 s = splnet();
2906
2907 switch(command) {
2908 case SIOCSIFADDR:
2909 ifp->if_flags |= IFF_UP;
2910 if (!(ifp->if_flags & IFF_RUNNING))
2911 dc_init(sc);
2912 break;
2913 case SIOCSIFFLAGS:
2914 if (ifp->if_flags & IFF_UP) {
2915 if (ifp->if_flags & IFF_RUNNING)
2916 error = ENETRESET;
2917 else {
2918 sc->dc_txthresh = 0;
2919 dc_init(sc);
2920 }
2921 } else {
2922 if (ifp->if_flags & IFF_RUNNING)
2923 dc_stop(sc, 0);
2924 }
2925 break;
2926 case SIOCGIFMEDIA:
2927 case SIOCSIFMEDIA:
2928 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
2929 #ifdef SRM_MEDIA
2930 if (sc->dc_srm_media)
2931 sc->dc_srm_media = 0;
2932 #endif
2933 break;
2934 default:
2935 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data);
2936 }
2937
2938 if (error == ENETRESET) {
2939 if (ifp->if_flags & IFF_RUNNING)
2940 dc_setfilt(sc);
2941 error = 0;
2942 }
2943
2944 splx(s);
2945 return (error);
2946 }
2947
2948 void
dc_watchdog(struct ifnet * ifp)2949 dc_watchdog(struct ifnet *ifp)
2950 {
2951 struct dc_softc *sc;
2952
2953 sc = ifp->if_softc;
2954
2955 ifp->if_oerrors++;
2956 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2957
2958 dc_init(sc);
2959
2960 if (ifq_empty(&ifp->if_snd) == 0)
2961 dc_start(ifp);
2962 }
2963
2964 /*
2965 * Stop the adapter and free any mbufs allocated to the
2966 * RX and TX lists.
2967 */
2968 void
dc_stop(struct dc_softc * sc,int softonly)2969 dc_stop(struct dc_softc *sc, int softonly)
2970 {
2971 struct ifnet *ifp;
2972 u_int32_t isr;
2973 int i;
2974
2975 ifp = &sc->sc_arpcom.ac_if;
2976 ifp->if_timer = 0;
2977
2978 timeout_del(&sc->dc_tick_tmo);
2979
2980 ifp->if_flags &= ~IFF_RUNNING;
2981 ifq_clr_oactive(&ifp->if_snd);
2982
2983 if (!softonly) {
2984 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON));
2985
2986 for (i = 0; i < DC_TIMEOUT; i++) {
2987 isr = CSR_READ_4(sc, DC_ISR);
2988 if ((isr & DC_ISR_TX_IDLE ||
2989 (isr & DC_ISR_TX_STATE) == DC_TXSTATE_RESET) &&
2990 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED)
2991 break;
2992 DELAY(10);
2993 }
2994
2995 if (i == DC_TIMEOUT) {
2996 if (!((isr & DC_ISR_TX_IDLE) ||
2997 (isr & DC_ISR_TX_STATE) == DC_TXSTATE_RESET) &&
2998 !DC_IS_ASIX(sc) && !DC_IS_DAVICOM(sc))
2999 printf("%s: failed to force tx to idle state\n",
3000 sc->sc_dev.dv_xname);
3001 if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED) &&
3002 !DC_HAS_BROKEN_RXSTATE(sc))
3003 printf("%s: failed to force rx to idle state\n",
3004 sc->sc_dev.dv_xname);
3005 }
3006
3007 CSR_WRITE_4(sc, DC_IMR, 0x00000000);
3008 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000);
3009 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000);
3010 sc->dc_link = 0;
3011 }
3012
3013 /*
3014 * Free data in the RX lists.
3015 */
3016 for (i = 0; i < DC_RX_LIST_CNT; i++) {
3017 if (sc->dc_cdata.dc_rx_chain[i].sd_map->dm_nsegs != 0) {
3018 bus_dmamap_t map = sc->dc_cdata.dc_rx_chain[i].sd_map;
3019
3020 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3021 BUS_DMASYNC_POSTREAD);
3022 bus_dmamap_unload(sc->sc_dmat, map);
3023 }
3024 if (sc->dc_cdata.dc_rx_chain[i].sd_mbuf != NULL) {
3025 m_freem(sc->dc_cdata.dc_rx_chain[i].sd_mbuf);
3026 sc->dc_cdata.dc_rx_chain[i].sd_mbuf = NULL;
3027 }
3028 }
3029 bzero(&sc->dc_ldata->dc_rx_list, sizeof(sc->dc_ldata->dc_rx_list));
3030
3031 /*
3032 * Free the TX list buffers.
3033 */
3034 for (i = 0; i < DC_TX_LIST_CNT; i++) {
3035 if (sc->dc_cdata.dc_tx_chain[i].sd_map->dm_nsegs != 0) {
3036 bus_dmamap_t map = sc->dc_cdata.dc_tx_chain[i].sd_map;
3037
3038 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3039 BUS_DMASYNC_POSTWRITE);
3040 bus_dmamap_unload(sc->sc_dmat, map);
3041 }
3042 if (sc->dc_cdata.dc_tx_chain[i].sd_mbuf != NULL) {
3043 if (sc->dc_ldata->dc_tx_list[i].dc_ctl &
3044 htole32(DC_TXCTL_SETUP)) {
3045 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3046 continue;
3047 }
3048 m_freem(sc->dc_cdata.dc_tx_chain[i].sd_mbuf);
3049 sc->dc_cdata.dc_tx_chain[i].sd_mbuf = NULL;
3050 }
3051 }
3052 bzero(&sc->dc_ldata->dc_tx_list, sizeof(sc->dc_ldata->dc_tx_list));
3053
3054 bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
3055 0, sc->sc_listmap->dm_mapsize,
3056 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3057 }
3058
3059 int
dc_activate(struct device * self,int act)3060 dc_activate(struct device *self, int act)
3061 {
3062 struct dc_softc *sc = (struct dc_softc *)self;
3063 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3064
3065 switch (act) {
3066 case DVACT_SUSPEND:
3067 if (ifp->if_flags & IFF_RUNNING)
3068 dc_stop(sc, 0);
3069 break;
3070 case DVACT_RESUME:
3071 if (ifp->if_flags & IFF_UP)
3072 dc_init(sc);
3073 break;
3074 }
3075 return (0);
3076 }
3077
3078 int
dc_detach(struct dc_softc * sc)3079 dc_detach(struct dc_softc *sc)
3080 {
3081 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3082 int i;
3083
3084 dc_stop(sc, 1);
3085
3086 if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL)
3087 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3088
3089 if (sc->dc_srom)
3090 free(sc->dc_srom, M_DEVBUF, sc->dc_sromsize);
3091
3092 for (i = 0; i < DC_RX_LIST_CNT; i++)
3093 bus_dmamap_destroy(sc->sc_dmat, sc->dc_cdata.dc_rx_chain[i].sd_map);
3094 if (sc->sc_rx_sparemap)
3095 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_sparemap);
3096 for (i = 0; i < DC_TX_LIST_CNT; i++)
3097 bus_dmamap_destroy(sc->sc_dmat, sc->dc_cdata.dc_tx_chain[i].sd_map);
3098 if (sc->sc_tx_sparemap)
3099 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_sparemap);
3100
3101 /// XXX bus_dmamap_sync
3102 bus_dmamap_unload(sc->sc_dmat, sc->sc_listmap);
3103 bus_dmamem_unmap(sc->sc_dmat, sc->sc_listkva, sc->sc_listnseg);
3104 bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap);
3105 bus_dmamem_free(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg);
3106
3107 ether_ifdetach(ifp);
3108 if_detach(ifp);
3109 return (0);
3110 }
3111
3112 struct cfdriver dc_cd = {
3113 NULL, "dc", DV_IFNET
3114 };
3115