1 /*
2 * Copyright (c) 2002 Myson Technology Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * Written by: yen_cw@myson.com.tw available at: http://www.myson.com.tw/
27 *
28 * $FreeBSD: src/sys/dev/my/if_my.c,v 1.2.2.4 2002/04/17 02:05:27 julian Exp $
29 *
30 * Myson fast ethernet PCI NIC driver
31 *
32 * $Id: if_my.c,v 1.40 2001/11/30 03:55:00 <yen_cw@myson.com.tw> wpaul Exp $
33 */
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sockio.h>
37 #include <sys/mbuf.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/interrupt.h>
41 #include <sys/socket.h>
42 #include <sys/queue.h>
43 #include <sys/bus.h>
44 #include <sys/module.h>
45 #include <sys/serialize.h>
46 #include <sys/rman.h>
47
48 #include <sys/thread2.h>
49
50 #include <net/if.h>
51 #include <net/ifq_var.h>
52 #include <net/if_arp.h>
53 #include <net/ethernet.h>
54 #include <net/if_media.h>
55 #include <net/if_dl.h>
56 #include <net/bpf.h>
57
58 #include <vm/vm.h> /* for vtophys */
59 #include <vm/pmap.h> /* for vtophys */
60 #include <machine/clock.h> /* for DELAY */
61
62 #include <bus/pci/pcireg.h>
63 #include <bus/pci/pcivar.h>
64
65 /*
66 * #define MY_USEIOSPACE
67 */
68
69 static int MY_USEIOSPACE = 1;
70
71 #ifdef MY_USEIOSPACE
72 #define MY_RES SYS_RES_IOPORT
73 #define MY_RID MY_PCI_LOIO
74 #else
75 #define MY_RES SYS_RES_MEMORY
76 #define MY_RID MY_PCI_LOMEM
77 #endif
78
79
80 #include "if_myreg.h"
81
82 /*
83 * Various supported device vendors/types and their names.
84 */
85 static struct my_type my_devs[] = {
86 {MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"},
87 {MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"},
88 {MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"},
89 {0, 0, NULL}
90 };
91
92 /*
93 * Various supported PHY vendors/types and their names. Note that this driver
94 * will work with pretty much any MII-compliant PHY, so failure to positively
95 * identify the chip is not a fatal error.
96 */
97 static struct my_type my_phys[] = {
98 {MysonPHYID0, MysonPHYID0, "<MYSON MTD981>"},
99 {SeeqPHYID0, SeeqPHYID0, "<SEEQ 80225>"},
100 {AhdocPHYID0, AhdocPHYID0, "<AHDOC 101>"},
101 {MarvellPHYID0, MarvellPHYID0, "<MARVELL 88E1000>"},
102 {LevelOnePHYID0, LevelOnePHYID0, "<LevelOne LXT1000>"},
103 {0, 0, "<MII-compliant physical interface>"}
104 };
105
106 static int my_probe(device_t);
107 static int my_attach(device_t);
108 static int my_detach(device_t);
109 static int my_newbuf(struct my_softc *, struct my_chain_onefrag *);
110 static int my_encap(struct my_softc *, struct my_chain *, struct mbuf *);
111 static void my_rxeof(struct my_softc *);
112 static void my_txeof(struct my_softc *);
113 static void my_txeoc(struct my_softc *);
114 static void my_intr(void *);
115 static void my_start(struct ifnet *, struct ifaltq_subque *);
116 static int my_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
117 static void my_init(void *);
118 static void my_stop(struct my_softc *);
119 static void my_watchdog(struct ifnet *);
120 static void my_shutdown(device_t);
121 static int my_ifmedia_upd(struct ifnet *);
122 static void my_ifmedia_sts(struct ifnet *, struct ifmediareq *);
123 static u_int16_t my_phy_readreg(struct my_softc *, int);
124 static void my_phy_writereg(struct my_softc *, int, int);
125 static void my_autoneg_xmit(struct my_softc *);
126 static void my_autoneg_mii(struct my_softc *, int, int);
127 static void my_setmode_mii(struct my_softc *, int);
128 static void my_getmode_mii(struct my_softc *);
129 static void my_setcfg(struct my_softc *, int);
130 static u_int8_t my_calchash(caddr_t);
131 static void my_setmulti(struct my_softc *);
132 static void my_reset(struct my_softc *);
133 static int my_list_rx_init(struct my_softc *);
134 static int my_list_tx_init(struct my_softc *);
135 static long my_send_cmd_to_phy(struct my_softc *, int, int);
136
137 #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
138 #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
139
140 static device_method_t my_methods[] = {
141 /* Device interface */
142 DEVMETHOD(device_probe, my_probe),
143 DEVMETHOD(device_attach, my_attach),
144 DEVMETHOD(device_detach, my_detach),
145 DEVMETHOD(device_shutdown, my_shutdown),
146
147 DEVMETHOD_END
148 };
149
150 static driver_t my_driver = {
151 "my",
152 my_methods,
153 sizeof(struct my_softc)
154 };
155
156 static devclass_t my_devclass;
157
158 DECLARE_DUMMY_MODULE(if_my);
159 DRIVER_MODULE(if_my, pci, my_driver, my_devclass, NULL, NULL);
160
161 static long
my_send_cmd_to_phy(struct my_softc * sc,int opcode,int regad)162 my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad)
163 {
164 long miir;
165 int i;
166 int mask, data;
167
168 /* enable MII output */
169 miir = CSR_READ_4(sc, MY_MANAGEMENT);
170 miir &= 0xfffffff0;
171
172 miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO;
173
174 /* send 32 1's preamble */
175 for (i = 0; i < 32; i++) {
176 /* low MDC; MDO is already high (miir) */
177 miir &= ~MY_MASK_MIIR_MII_MDC;
178 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
179
180 /* high MDC */
181 miir |= MY_MASK_MIIR_MII_MDC;
182 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
183 }
184
185 /* calculate ST+OP+PHYAD+REGAD+TA */
186 data = opcode | (sc->my_phy_addr << 7) | (regad << 2);
187
188 /* sent out */
189 mask = 0x8000;
190 while (mask) {
191 /* low MDC, prepare MDO */
192 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
193 if (mask & data)
194 miir |= MY_MASK_MIIR_MII_MDO;
195
196 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
197 /* high MDC */
198 miir |= MY_MASK_MIIR_MII_MDC;
199 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
200 DELAY(30);
201
202 /* next */
203 mask >>= 1;
204 if (mask == 0x2 && opcode == MY_OP_READ)
205 miir &= ~MY_MASK_MIIR_MII_WRITE;
206 }
207
208 return miir;
209 }
210
211
212 static u_int16_t
my_phy_readreg(struct my_softc * sc,int reg)213 my_phy_readreg(struct my_softc * sc, int reg)
214 {
215 long miir;
216 int mask, data;
217
218 if (sc->my_info->my_did == MTD803ID)
219 data = CSR_READ_2(sc, MY_PHYBASE + reg * 2);
220 else {
221 miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg);
222
223 /* read data */
224 mask = 0x8000;
225 data = 0;
226 while (mask) {
227 /* low MDC */
228 miir &= ~MY_MASK_MIIR_MII_MDC;
229 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
230
231 /* read MDI */
232 miir = CSR_READ_4(sc, MY_MANAGEMENT);
233 if (miir & MY_MASK_MIIR_MII_MDI)
234 data |= mask;
235
236 /* high MDC, and wait */
237 miir |= MY_MASK_MIIR_MII_MDC;
238 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
239 DELAY(30);
240
241 /* next */
242 mask >>= 1;
243 }
244
245 /* low MDC */
246 miir &= ~MY_MASK_MIIR_MII_MDC;
247 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
248 }
249
250 return (u_int16_t) data;
251 }
252
253
254 static void
my_phy_writereg(struct my_softc * sc,int reg,int data)255 my_phy_writereg(struct my_softc * sc, int reg, int data)
256 {
257 long miir;
258 int mask;
259
260 if (sc->my_info->my_did == MTD803ID)
261 CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data);
262 else {
263 miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg);
264
265 /* write data */
266 mask = 0x8000;
267 while (mask) {
268 /* low MDC, prepare MDO */
269 miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO);
270 if (mask & data)
271 miir |= MY_MASK_MIIR_MII_MDO;
272 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
273 DELAY(1);
274
275 /* high MDC */
276 miir |= MY_MASK_MIIR_MII_MDC;
277 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
278 DELAY(1);
279
280 /* next */
281 mask >>= 1;
282 }
283
284 /* low MDC */
285 miir &= ~MY_MASK_MIIR_MII_MDC;
286 CSR_WRITE_4(sc, MY_MANAGEMENT, miir);
287 }
288 }
289
290 static u_int8_t
my_calchash(caddr_t addr)291 my_calchash(caddr_t addr)
292 {
293 u_int32_t crc, carry;
294 int i, j;
295 u_int8_t c;
296
297 /* Compute CRC for the address value. */
298 crc = 0xFFFFFFFF; /* initial value */
299
300 for (i = 0; i < 6; i++) {
301 c = *(addr + i);
302 for (j = 0; j < 8; j++) {
303 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
304 crc <<= 1;
305 c >>= 1;
306 if (carry)
307 crc = (crc ^ 0x04c11db6) | carry;
308 }
309 }
310
311 /*
312 * return the filter bit position Note: I arrived at the following
313 * nonsense through experimentation. It's not the usual way to
314 * generate the bit position but it's the only thing I could come up
315 * with that works.
316 */
317 return (~(crc >> 26) & 0x0000003F);
318 }
319
320
321 /*
322 * Program the 64-bit multicast hash filter.
323 */
324 static void
my_setmulti(struct my_softc * sc)325 my_setmulti(struct my_softc * sc)
326 {
327 struct ifnet *ifp = &sc->arpcom.ac_if;
328 int h = 0;
329 u_int32_t hashes[2] = {0, 0};
330 struct ifmultiaddr *ifma;
331 u_int32_t rxfilt;
332 int mcnt = 0;
333
334 rxfilt = CSR_READ_4(sc, MY_TCRRCR);
335
336 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
337 rxfilt |= MY_AM;
338 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
339 CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF);
340 CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF);
341
342 return;
343 }
344 /* first, zot all the existing hash bits */
345 CSR_WRITE_4(sc, MY_MAR0, 0);
346 CSR_WRITE_4(sc, MY_MAR1, 0);
347
348 /* now program new ones */
349 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
350 if (ifma->ifma_addr->sa_family != AF_LINK)
351 continue;
352 h = my_calchash(LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
353 if (h < 32)
354 hashes[0] |= (1 << h);
355 else
356 hashes[1] |= (1 << (h - 32));
357 mcnt++;
358 }
359
360 if (mcnt)
361 rxfilt |= MY_AM;
362 else
363 rxfilt &= ~MY_AM;
364 CSR_WRITE_4(sc, MY_MAR0, hashes[0]);
365 CSR_WRITE_4(sc, MY_MAR1, hashes[1]);
366 CSR_WRITE_4(sc, MY_TCRRCR, rxfilt);
367 }
368
369 /*
370 * Initiate an autonegotiation session.
371 */
372 static void
my_autoneg_xmit(struct my_softc * sc)373 my_autoneg_xmit(struct my_softc * sc)
374 {
375 u_int16_t phy_sts = 0;
376
377 my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET);
378 DELAY(500);
379 while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET);
380
381 phy_sts = my_phy_readreg(sc, PHY_BMCR);
382 phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR;
383 my_phy_writereg(sc, PHY_BMCR, phy_sts);
384 }
385
386
387 /*
388 * Invoke autonegotiation on a PHY.
389 */
390 static void
my_autoneg_mii(struct my_softc * sc,int flag,int verbose)391 my_autoneg_mii(struct my_softc * sc, int flag, int verbose)
392 {
393 u_int16_t phy_sts = 0, media, advert, ability;
394 u_int16_t ability2 = 0;
395 struct ifnet *ifp = &sc->arpcom.ac_if;
396 struct ifmedia *ifm = &sc->ifmedia;
397
398 ifm->ifm_media = IFM_ETHER | IFM_AUTO;
399
400 #ifndef FORCE_AUTONEG_TFOUR
401 /*
402 * First, see if autoneg is supported. If not, there's no point in
403 * continuing.
404 */
405 phy_sts = my_phy_readreg(sc, PHY_BMSR);
406 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) {
407 if (verbose)
408 kprintf("my%d: autonegotiation not supported\n",
409 sc->my_unit);
410 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
411 return;
412 }
413 #endif
414 switch (flag) {
415 case MY_FLAG_FORCEDELAY:
416 /*
417 * XXX Never use this option anywhere but in the probe
418 * routine: making the kernel stop dead in its tracks for
419 * three whole seconds after we've gone multi-user is really
420 * bad manners.
421 */
422 my_autoneg_xmit(sc);
423 DELAY(5000000);
424 break;
425 case MY_FLAG_SCHEDDELAY:
426 /*
427 * Wait for the transmitter to go idle before starting an
428 * autoneg session, otherwise my_start() may clobber our
429 * timeout, and we don't want to allow transmission during an
430 * autoneg session since that can screw it up.
431 */
432 if (sc->my_cdata.my_tx_head != NULL) {
433 sc->my_want_auto = 1;
434 return;
435 }
436 my_autoneg_xmit(sc);
437 ifp->if_timer = 5;
438 sc->my_autoneg = 1;
439 sc->my_want_auto = 0;
440 return;
441 case MY_FLAG_DELAYTIMEO:
442 ifp->if_timer = 0;
443 sc->my_autoneg = 0;
444 break;
445 default:
446 kprintf("my%d: invalid autoneg flag: %d\n", sc->my_unit, flag);
447 return;
448 }
449
450 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) {
451 if (verbose)
452 kprintf("my%d: autoneg complete, ", sc->my_unit);
453 phy_sts = my_phy_readreg(sc, PHY_BMSR);
454 } else {
455 if (verbose)
456 kprintf("my%d: autoneg not complete, ", sc->my_unit);
457 }
458
459 media = my_phy_readreg(sc, PHY_BMCR);
460
461 /* Link is good. Report modes and set duplex mode. */
462 if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) {
463 if (verbose)
464 kprintf("my%d: link status good. ", sc->my_unit);
465 advert = my_phy_readreg(sc, PHY_ANAR);
466 ability = my_phy_readreg(sc, PHY_LPAR);
467 if ((sc->my_pinfo->my_vid == MarvellPHYID0) ||
468 (sc->my_pinfo->my_vid == LevelOnePHYID0)) {
469 ability2 = my_phy_readreg(sc, PHY_1000SR);
470 if (ability2 & PHY_1000SR_1000BTXFULL) {
471 advert = 0;
472 ability = 0;
473 /*
474 * this version did not support 1000M,
475 * ifm->ifm_media =
476 * IFM_ETHER | IFM_1000_T | IFM_FDX;
477 */
478 ifm->ifm_media =
479 IFM_ETHER | IFM_100_TX | IFM_FDX;
480 media &= ~PHY_BMCR_SPEEDSEL;
481 media |= PHY_BMCR_1000;
482 media |= PHY_BMCR_DUPLEX;
483 kprintf("(full-duplex, 1000Mbps)\n");
484 } else if (ability2 & PHY_1000SR_1000BTXHALF) {
485 advert = 0;
486 ability = 0;
487 /*
488 * this version did not support 1000M,
489 * ifm->ifm_media = IFM_ETHER | IFM_1000_T;
490 */
491 ifm->ifm_media = IFM_ETHER | IFM_100_TX;
492 media &= ~PHY_BMCR_SPEEDSEL;
493 media &= ~PHY_BMCR_DUPLEX;
494 media |= PHY_BMCR_1000;
495 kprintf("(half-duplex, 1000Mbps)\n");
496 }
497 }
498 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) {
499 ifm->ifm_media = IFM_ETHER | IFM_100_T4;
500 media |= PHY_BMCR_SPEEDSEL;
501 media &= ~PHY_BMCR_DUPLEX;
502 kprintf("(100baseT4)\n");
503 } else if (advert & PHY_ANAR_100BTXFULL &&
504 ability & PHY_ANAR_100BTXFULL) {
505 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
506 media |= PHY_BMCR_SPEEDSEL;
507 media |= PHY_BMCR_DUPLEX;
508 kprintf("(full-duplex, 100Mbps)\n");
509 } else if (advert & PHY_ANAR_100BTXHALF &&
510 ability & PHY_ANAR_100BTXHALF) {
511 ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
512 media |= PHY_BMCR_SPEEDSEL;
513 media &= ~PHY_BMCR_DUPLEX;
514 kprintf("(half-duplex, 100Mbps)\n");
515 } else if (advert & PHY_ANAR_10BTFULL &&
516 ability & PHY_ANAR_10BTFULL) {
517 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
518 media &= ~PHY_BMCR_SPEEDSEL;
519 media |= PHY_BMCR_DUPLEX;
520 kprintf("(full-duplex, 10Mbps)\n");
521 } else if (advert) {
522 ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
523 media &= ~PHY_BMCR_SPEEDSEL;
524 media &= ~PHY_BMCR_DUPLEX;
525 kprintf("(half-duplex, 10Mbps)\n");
526 }
527 media &= ~PHY_BMCR_AUTONEGENBL;
528
529 /* Set ASIC's duplex mode to match the PHY. */
530 my_phy_writereg(sc, PHY_BMCR, media);
531 my_setcfg(sc, media);
532 } else {
533 if (verbose)
534 kprintf("my%d: no carrier\n", sc->my_unit);
535 }
536
537 my_init(sc);
538 if (sc->my_tx_pend) {
539 sc->my_autoneg = 0;
540 sc->my_tx_pend = 0;
541 if_devstart(ifp);
542 }
543 }
544
545 /*
546 * To get PHY ability.
547 */
548 static void
my_getmode_mii(struct my_softc * sc)549 my_getmode_mii(struct my_softc * sc)
550 {
551 struct ifnet *ifp = &sc->arpcom.ac_if;
552 u_int16_t bmsr;
553
554 bmsr = my_phy_readreg(sc, PHY_BMSR);
555 if (bootverbose)
556 kprintf("my%d: PHY status word: %x\n", sc->my_unit, bmsr);
557
558 /* fallback */
559 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX;
560
561 if (bmsr & PHY_BMSR_10BTHALF) {
562 if (bootverbose)
563 kprintf("my%d: 10Mbps half-duplex mode supported\n",
564 sc->my_unit);
565 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX,
566 0, NULL);
567 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
568 }
569 if (bmsr & PHY_BMSR_10BTFULL) {
570 if (bootverbose)
571 kprintf("my%d: 10Mbps full-duplex mode supported\n",
572 sc->my_unit);
573
574 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
575 0, NULL);
576 sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX;
577 }
578 if (bmsr & PHY_BMSR_100BTXHALF) {
579 if (bootverbose)
580 kprintf("my%d: 100Mbps half-duplex mode supported\n",
581 sc->my_unit);
582 ifp->if_baudrate = 100000000;
583 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
584 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX,
585 0, NULL);
586 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX;
587 }
588 if (bmsr & PHY_BMSR_100BTXFULL) {
589 if (bootverbose)
590 kprintf("my%d: 100Mbps full-duplex mode supported\n",
591 sc->my_unit);
592 ifp->if_baudrate = 100000000;
593 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
594 0, NULL);
595 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX;
596 }
597 /* Some also support 100BaseT4. */
598 if (bmsr & PHY_BMSR_100BT4) {
599 if (bootverbose)
600 kprintf("my%d: 100baseT4 mode supported\n", sc->my_unit);
601 ifp->if_baudrate = 100000000;
602 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL);
603 sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4;
604 #ifdef FORCE_AUTONEG_TFOUR
605 if (bootverbose)
606 kprintf("my%d: forcing on autoneg support for BT4\n",
607 sc->my_unit);
608 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL):
609 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
610 #endif
611 }
612 #if 0 /* this version did not support 1000M, */
613 if (sc->my_pinfo->my_vid == MarvellPHYID0) {
614 if (bootverbose)
615 kprintf("my%d: 1000Mbps half-duplex mode supported\n",
616 sc->my_unit);
617
618 ifp->if_baudrate = 1000000000;
619 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
620 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX,
621 0, NULL);
622 if (bootverbose)
623 kprintf("my%d: 1000Mbps full-duplex mode supported\n",
624 sc->my_unit);
625 ifp->if_baudrate = 1000000000;
626 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
627 0, NULL);
628 sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX;
629 }
630 #endif
631 if (bmsr & PHY_BMSR_CANAUTONEG) {
632 if (bootverbose)
633 kprintf("my%d: autoneg supported\n", sc->my_unit);
634 ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
635 sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO;
636 }
637 }
638
639 /*
640 * Set speed and duplex mode.
641 */
642 static void
my_setmode_mii(struct my_softc * sc,int media)643 my_setmode_mii(struct my_softc * sc, int media)
644 {
645 struct ifnet *ifp = &sc->arpcom.ac_if;
646 u_int16_t bmcr;
647
648 /*
649 * If an autoneg session is in progress, stop it.
650 */
651 if (sc->my_autoneg) {
652 kprintf("my%d: canceling autoneg session\n", sc->my_unit);
653 ifp->if_timer = sc->my_autoneg = sc->my_want_auto = 0;
654 bmcr = my_phy_readreg(sc, PHY_BMCR);
655 bmcr &= ~PHY_BMCR_AUTONEGENBL;
656 my_phy_writereg(sc, PHY_BMCR, bmcr);
657 }
658 kprintf("my%d: selecting MII, ", sc->my_unit);
659 bmcr = my_phy_readreg(sc, PHY_BMCR);
660 bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 |
661 PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK);
662
663 #if 0 /* this version did not support 1000M, */
664 if (IFM_SUBTYPE(media) == IFM_1000_T) {
665 kprintf("1000Mbps/T4, half-duplex\n");
666 bmcr &= ~PHY_BMCR_SPEEDSEL;
667 bmcr &= ~PHY_BMCR_DUPLEX;
668 bmcr |= PHY_BMCR_1000;
669 }
670 #endif
671 if (IFM_SUBTYPE(media) == IFM_100_T4) {
672 kprintf("100Mbps/T4, half-duplex\n");
673 bmcr |= PHY_BMCR_SPEEDSEL;
674 bmcr &= ~PHY_BMCR_DUPLEX;
675 }
676 if (IFM_SUBTYPE(media) == IFM_100_TX) {
677 kprintf("100Mbps, ");
678 bmcr |= PHY_BMCR_SPEEDSEL;
679 }
680 if (IFM_SUBTYPE(media) == IFM_10_T) {
681 kprintf("10Mbps, ");
682 bmcr &= ~PHY_BMCR_SPEEDSEL;
683 }
684 if ((media & IFM_GMASK) == IFM_FDX) {
685 kprintf("full duplex\n");
686 bmcr |= PHY_BMCR_DUPLEX;
687 } else {
688 kprintf("half duplex\n");
689 bmcr &= ~PHY_BMCR_DUPLEX;
690 }
691 my_phy_writereg(sc, PHY_BMCR, bmcr);
692 my_setcfg(sc, bmcr);
693 }
694
695 /*
696 * The Myson manual states that in order to fiddle with the 'full-duplex' and
697 * '100Mbps' bits in the netconfig register, we first have to put the
698 * transmit and/or receive logic in the idle state.
699 */
700 static void
my_setcfg(struct my_softc * sc,int bmcr)701 my_setcfg(struct my_softc * sc, int bmcr)
702 {
703 int i, restart = 0;
704
705 if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) {
706 restart = 1;
707 MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE));
708 for (i = 0; i < MY_TIMEOUT; i++) {
709 DELAY(10);
710 if (!(CSR_READ_4(sc, MY_TCRRCR) &
711 (MY_TXRUN | MY_RXRUN)))
712 break;
713 }
714 if (i == MY_TIMEOUT)
715 kprintf("my%d: failed to force tx and rx to idle \n",
716 sc->my_unit);
717 }
718 MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000);
719 MY_CLRBIT(sc, MY_TCRRCR, MY_PS10);
720 if (bmcr & PHY_BMCR_1000)
721 MY_SETBIT(sc, MY_TCRRCR, MY_PS1000);
722 else if (!(bmcr & PHY_BMCR_SPEEDSEL))
723 MY_SETBIT(sc, MY_TCRRCR, MY_PS10);
724 if (bmcr & PHY_BMCR_DUPLEX)
725 MY_SETBIT(sc, MY_TCRRCR, MY_FD);
726 else
727 MY_CLRBIT(sc, MY_TCRRCR, MY_FD);
728 if (restart)
729 MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE);
730 }
731
732 static void
my_reset(struct my_softc * sc)733 my_reset(struct my_softc * sc)
734 {
735 int i;
736
737 MY_SETBIT(sc, MY_BCR, MY_SWR);
738 for (i = 0; i < MY_TIMEOUT; i++) {
739 DELAY(10);
740 if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR))
741 break;
742 }
743 if (i == MY_TIMEOUT)
744 kprintf("m0x%d: reset never completed!\n", sc->my_unit);
745
746 /* Wait a little while for the chip to get its brains in order. */
747 DELAY(1000);
748 }
749
750 /*
751 * Probe for a Myson chip. Check the PCI vendor and device IDs against our
752 * list and return a device name if we find a match.
753 */
754 static int
my_probe(device_t dev)755 my_probe(device_t dev)
756 {
757 struct my_type *t;
758 uint16_t vendor, product;
759
760 vendor = pci_get_vendor(dev);
761 product = pci_get_device(dev);
762
763 for (t = my_devs; t->my_name != NULL; t++) {
764 if (vendor == t->my_vid && product == t->my_did) {
765 device_set_desc(dev, t->my_name);
766 return (0);
767 }
768 }
769
770 return (ENXIO);
771 }
772
773 /*
774 * Attach the interface. Allocate softc structures, do ifmedia setup and
775 * ethernet/BPF attach.
776 */
777 static int
my_attach(device_t dev)778 my_attach(device_t dev)
779 {
780 int i;
781 u_char eaddr[ETHER_ADDR_LEN];
782 u_int32_t command, iobase;
783 struct my_softc *sc;
784 struct ifnet *ifp;
785 int media = IFM_ETHER | IFM_100_TX | IFM_FDX;
786 unsigned int round;
787 caddr_t roundptr;
788 struct my_type *p;
789 u_int16_t phy_vid, phy_did, phy_sts = 0;
790 int rid, unit, error = 0;
791 struct my_type *t;
792 uint16_t vendor, product;
793
794 vendor = pci_get_vendor(dev);
795 product = pci_get_device(dev);
796
797 for (t = my_devs; t->my_name != NULL; t++) {
798 if (vendor == t->my_vid && product == t->my_did)
799 break;
800 }
801
802 if (t->my_name == NULL)
803 return(ENXIO);
804
805 sc = device_get_softc(dev);
806 unit = device_get_unit(dev);
807
808 /*
809 * Map control/status registers.
810 */
811 command = pci_read_config(dev, PCIR_COMMAND, 4);
812 command |= (PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
813 pci_write_config(dev, PCIR_COMMAND, command & 0x000000ff, 4);
814 command = pci_read_config(dev, PCIR_COMMAND, 4);
815
816 if (t->my_did == MTD800ID) {
817 iobase = pci_read_config(dev, MY_PCI_LOIO, 4);
818 if (iobase & 0x300)
819 MY_USEIOSPACE = 0;
820 }
821 if (MY_USEIOSPACE) {
822 if (!(command & PCIM_CMD_PORTEN)) {
823 kprintf("my%d: failed to enable I/O ports!\n", unit);
824 error = ENXIO;
825 return(error);
826 }
827 } else {
828 if (!(command & PCIM_CMD_MEMEN)) {
829 kprintf("my%d: failed to enable memory mapping!\n",
830 unit);
831 error = ENXIO;
832 return(error);
833 }
834 }
835
836 rid = MY_RID;
837 sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE);
838
839 if (sc->my_res == NULL) {
840 kprintf("my%d: couldn't map ports/memory\n", unit);
841 error = ENXIO;
842 goto fail;
843 }
844 sc->my_btag = rman_get_bustag(sc->my_res);
845 sc->my_bhandle = rman_get_bushandle(sc->my_res);
846
847 rid = 0;
848 sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
849 RF_SHAREABLE | RF_ACTIVE);
850
851 if (sc->my_irq == NULL) {
852 kprintf("my%d: couldn't map interrupt\n", unit);
853 error = ENXIO;
854 goto fail;
855 }
856
857 sc->my_info = t;
858
859 /* Reset the adapter. */
860 my_reset(sc);
861
862 /*
863 * Get station address
864 */
865 for (i = 0; i < ETHER_ADDR_LEN; ++i)
866 eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i);
867
868 sc->my_unit = unit;
869
870 sc->my_ldata_ptr = kmalloc(sizeof(struct my_list_data) + 8,
871 M_DEVBUF, M_WAITOK);
872 sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr;
873 round = (uintptr_t)sc->my_ldata_ptr & 0xF;
874 roundptr = sc->my_ldata_ptr;
875 for (i = 0; i < 8; i++) {
876 if (round % 8) {
877 round++;
878 roundptr++;
879 } else
880 break;
881 }
882 sc->my_ldata = (struct my_list_data *) roundptr;
883 bzero(sc->my_ldata, sizeof(struct my_list_data));
884
885 ifp = &sc->arpcom.ac_if;
886 ifp->if_softc = sc;
887 if_initname(ifp, "my", unit);
888 ifp->if_mtu = ETHERMTU;
889 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
890 ifp->if_ioctl = my_ioctl;
891 ifp->if_start = my_start;
892 ifp->if_watchdog = my_watchdog;
893 ifp->if_init = my_init;
894 ifp->if_baudrate = 10000000;
895 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
896 ifq_set_ready(&ifp->if_snd);
897
898 if (sc->my_info->my_did == MTD803ID)
899 sc->my_pinfo = my_phys;
900 else {
901 if (bootverbose)
902 kprintf("my%d: probing for a PHY\n", sc->my_unit);
903 for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) {
904 if (bootverbose)
905 kprintf("my%d: checking address: %d\n",
906 sc->my_unit, i);
907 sc->my_phy_addr = i;
908 phy_sts = my_phy_readreg(sc, PHY_BMSR);
909 if ((phy_sts != 0) && (phy_sts != 0xffff))
910 break;
911 else
912 phy_sts = 0;
913 }
914 if (phy_sts) {
915 phy_vid = my_phy_readreg(sc, PHY_VENID);
916 phy_did = my_phy_readreg(sc, PHY_DEVID);
917 if (bootverbose) {
918 kprintf("my%d: found PHY at address %d, ",
919 sc->my_unit, sc->my_phy_addr);
920 kprintf("vendor id: %x device id: %x\n",
921 phy_vid, phy_did);
922 }
923 p = my_phys;
924 while (p->my_vid) {
925 if (phy_vid == p->my_vid) {
926 sc->my_pinfo = p;
927 break;
928 }
929 p++;
930 }
931 if (sc->my_pinfo == NULL)
932 sc->my_pinfo = &my_phys[PHY_UNKNOWN];
933 if (bootverbose)
934 kprintf("my%d: PHY type: %s\n",
935 sc->my_unit, sc->my_pinfo->my_name);
936 } else {
937 kprintf("my%d: MII without any phy!\n", sc->my_unit);
938 error = ENXIO;
939 goto fail;
940 }
941 }
942
943 /* Do ifmedia setup. */
944 ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts);
945 my_getmode_mii(sc);
946 my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1);
947 media = sc->ifmedia.ifm_media;
948 my_stop(sc);
949 ifmedia_set(&sc->ifmedia, media);
950
951 ether_ifattach(ifp, eaddr, NULL);
952
953 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->my_irq));
954
955 error = bus_setup_intr(dev, sc->my_irq, INTR_MPSAFE,
956 my_intr, sc, &sc->my_intrhand,
957 ifp->if_serializer);
958 if (error) {
959 ether_ifdetach(ifp);
960 kprintf("my%d: couldn't set up irq\n", unit);
961 goto fail;
962 }
963
964 return (0);
965
966 fail:
967 my_detach(dev);
968 return (error);
969 }
970
971 static int
my_detach(device_t dev)972 my_detach(device_t dev)
973 {
974 struct my_softc *sc = device_get_softc(dev);
975 struct ifnet *ifp = &sc->arpcom.ac_if;
976
977 if (device_is_attached(dev)) {
978 lwkt_serialize_enter(ifp->if_serializer);
979 my_stop(sc);
980 bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand);
981 lwkt_serialize_exit(ifp->if_serializer);
982
983 ether_ifdetach(ifp);
984 }
985
986 if (sc->my_irq)
987 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
988 if (sc->my_res)
989 bus_release_resource(dev, MY_RES, MY_RID, sc->my_res);
990
991 return (0);
992 }
993
994
995 /*
996 * Initialize the transmit descriptors.
997 */
998 static int
my_list_tx_init(struct my_softc * sc)999 my_list_tx_init(struct my_softc * sc)
1000 {
1001 struct my_chain_data *cd;
1002 struct my_list_data *ld;
1003 int i;
1004
1005 cd = &sc->my_cdata;
1006 ld = sc->my_ldata;
1007 for (i = 0; i < MY_TX_LIST_CNT; i++) {
1008 cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i];
1009 if (i == (MY_TX_LIST_CNT - 1))
1010 cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0];
1011 else
1012 cd->my_tx_chain[i].my_nextdesc =
1013 &cd->my_tx_chain[i + 1];
1014 }
1015 cd->my_tx_free = &cd->my_tx_chain[0];
1016 cd->my_tx_tail = cd->my_tx_head = NULL;
1017 return (0);
1018 }
1019
1020 /*
1021 * Initialize the RX descriptors and allocate mbufs for them. Note that we
1022 * arrange the descriptors in a closed ring, so that the last descriptor
1023 * points back to the first.
1024 */
1025 static int
my_list_rx_init(struct my_softc * sc)1026 my_list_rx_init(struct my_softc * sc)
1027 {
1028 struct my_chain_data *cd;
1029 struct my_list_data *ld;
1030 int i;
1031
1032 cd = &sc->my_cdata;
1033 ld = sc->my_ldata;
1034 for (i = 0; i < MY_RX_LIST_CNT; i++) {
1035 cd->my_rx_chain[i].my_ptr =
1036 (struct my_desc *) & ld->my_rx_list[i];
1037 if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS)
1038 return (ENOBUFS);
1039 if (i == (MY_RX_LIST_CNT - 1)) {
1040 cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0];
1041 ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]);
1042 } else {
1043 cd->my_rx_chain[i].my_nextdesc =
1044 &cd->my_rx_chain[i + 1];
1045 ld->my_rx_list[i].my_next =
1046 vtophys(&ld->my_rx_list[i + 1]);
1047 }
1048 }
1049 cd->my_rx_head = &cd->my_rx_chain[0];
1050 return (0);
1051 }
1052
1053 /*
1054 * Initialize an RX descriptor and attach an MBUF cluster.
1055 */
1056 static int
my_newbuf(struct my_softc * sc,struct my_chain_onefrag * c)1057 my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c)
1058 {
1059 struct mbuf *m_new = NULL;
1060
1061 MGETHDR(m_new, M_NOWAIT, MT_DATA);
1062 if (m_new == NULL) {
1063 kprintf("my%d: no memory for rx list -- packet dropped!\n",
1064 sc->my_unit);
1065 return (ENOBUFS);
1066 }
1067 MCLGET(m_new, M_NOWAIT);
1068 if (!(m_new->m_flags & M_EXT)) {
1069 kprintf("my%d: no memory for rx list -- packet dropped!\n",
1070 sc->my_unit);
1071 m_freem(m_new);
1072 return (ENOBUFS);
1073 }
1074 c->my_mbuf = m_new;
1075 c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t));
1076 c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift;
1077 c->my_ptr->my_status = MY_OWNByNIC;
1078 return (0);
1079 }
1080
1081 /*
1082 * A frame has been uploaded: pass the resulting mbuf chain up to the higher
1083 * level protocols.
1084 */
1085 static void
my_rxeof(struct my_softc * sc)1086 my_rxeof(struct my_softc * sc)
1087 {
1088 struct mbuf *m;
1089 struct ifnet *ifp = &sc->arpcom.ac_if;
1090 struct my_chain_onefrag *cur_rx;
1091 int total_len = 0;
1092 u_int32_t rxstat;
1093
1094 while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status)
1095 & MY_OWNByNIC)) {
1096 cur_rx = sc->my_cdata.my_rx_head;
1097 sc->my_cdata.my_rx_head = cur_rx->my_nextdesc;
1098
1099 if (rxstat & MY_ES) { /* error summary: give up this rx pkt */
1100 IFNET_STAT_INC(ifp, ierrors, 1);
1101 cur_rx->my_ptr->my_status = MY_OWNByNIC;
1102 continue;
1103 }
1104 /* No errors; receive the packet. */
1105 total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift;
1106 total_len -= ETHER_CRC_LEN;
1107
1108 if (total_len < MINCLSIZE) {
1109 m = m_devget(mtod(cur_rx->my_mbuf, void *),
1110 total_len, 0, ifp);
1111 cur_rx->my_ptr->my_status = MY_OWNByNIC;
1112 if (m == NULL) {
1113 IFNET_STAT_INC(ifp, ierrors, 1);
1114 continue;
1115 }
1116 } else {
1117 m = cur_rx->my_mbuf;
1118 /*
1119 * Try to conjure up a new mbuf cluster. If that
1120 * fails, it means we have an out of memory condition
1121 * and should leave the buffer in place and continue.
1122 * This will result in a lost packet, but there's
1123 * little else we can do in this situation.
1124 */
1125 if (my_newbuf(sc, cur_rx) == ENOBUFS) {
1126 IFNET_STAT_INC(ifp, ierrors, 1);
1127 cur_rx->my_ptr->my_status = MY_OWNByNIC;
1128 continue;
1129 }
1130 m->m_pkthdr.rcvif = ifp;
1131 m->m_pkthdr.len = m->m_len = total_len;
1132 }
1133 IFNET_STAT_INC(ifp, ipackets, 1);
1134 ifp->if_input(ifp, m, NULL, -1);
1135 }
1136 }
1137
1138
1139 /*
1140 * A frame was downloaded to the chip. It's safe for us to clean up the list
1141 * buffers.
1142 */
1143 static void
my_txeof(struct my_softc * sc)1144 my_txeof(struct my_softc * sc)
1145 {
1146 struct ifnet *ifp = &sc->arpcom.ac_if;
1147 struct my_chain *cur_tx;
1148
1149 /* Clear the timeout timer. */
1150 ifp->if_timer = 0;
1151 if (sc->my_cdata.my_tx_head == NULL)
1152 return;
1153 /*
1154 * Go through our tx list and free mbufs for those frames that have
1155 * been transmitted.
1156 */
1157 while (sc->my_cdata.my_tx_head->my_mbuf != NULL) {
1158 u_int32_t txstat;
1159
1160 cur_tx = sc->my_cdata.my_tx_head;
1161 txstat = MY_TXSTATUS(cur_tx);
1162 if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT)
1163 break;
1164 if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) {
1165 if (txstat & MY_TXERR) {
1166 IFNET_STAT_INC(ifp, oerrors, 1);
1167 if (txstat & MY_EC) /* excessive collision */
1168 IFNET_STAT_INC(ifp, collisions, 1);
1169 if (txstat & MY_LC) /* late collision */
1170 IFNET_STAT_INC(ifp, collisions, 1);
1171 }
1172 IFNET_STAT_INC(ifp, collisions,
1173 (txstat & MY_NCRMASK) >> MY_NCRShift);
1174 }
1175 IFNET_STAT_INC(ifp, opackets, 1);
1176 m_freem(cur_tx->my_mbuf);
1177 cur_tx->my_mbuf = NULL;
1178 if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) {
1179 sc->my_cdata.my_tx_head = NULL;
1180 sc->my_cdata.my_tx_tail = NULL;
1181 break;
1182 }
1183 sc->my_cdata.my_tx_head = cur_tx->my_nextdesc;
1184 }
1185 if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) {
1186 IFNET_STAT_INC(ifp, collisions,
1187 (CSR_READ_4(sc, MY_TSR) & MY_NCRMask));
1188 }
1189 }
1190
1191 /*
1192 * TX 'end of channel' interrupt handler.
1193 */
1194 static void
my_txeoc(struct my_softc * sc)1195 my_txeoc(struct my_softc * sc)
1196 {
1197 struct ifnet *ifp = &sc->arpcom.ac_if;
1198
1199 ifp->if_timer = 0;
1200 if (sc->my_cdata.my_tx_head == NULL) {
1201 ifq_clr_oactive(&ifp->if_snd);
1202 sc->my_cdata.my_tx_tail = NULL;
1203 if (sc->my_want_auto)
1204 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1205 } else {
1206 if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) {
1207 MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC;
1208 ifp->if_timer = 5;
1209 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF);
1210 }
1211 }
1212 }
1213
1214 static void
my_intr(void * arg)1215 my_intr(void *arg)
1216 {
1217 struct my_softc *sc = arg;
1218 struct ifnet *ifp = &sc->arpcom.ac_if;
1219 u_int32_t status;
1220
1221 if (!(ifp->if_flags & IFF_UP))
1222 return;
1223
1224 /* Disable interrupts. */
1225 CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1226
1227 for (;;) {
1228 status = CSR_READ_4(sc, MY_ISR);
1229 status &= MY_INTRS;
1230 if (status)
1231 CSR_WRITE_4(sc, MY_ISR, status);
1232 else
1233 break;
1234
1235 if (status & MY_RI) /* receive interrupt */
1236 my_rxeof(sc);
1237
1238 if ((status & MY_RBU) || (status & MY_RxErr)) {
1239 /* rx buffer unavailable or rx error */
1240 IFNET_STAT_INC(ifp, ierrors, 1);
1241 #ifdef foo
1242 my_stop(sc);
1243 my_reset(sc);
1244 my_init(sc);
1245 #endif
1246 }
1247 if (status & MY_TI) /* tx interrupt */
1248 my_txeof(sc);
1249 if (status & MY_ETI) /* tx early interrupt */
1250 my_txeof(sc);
1251 if (status & MY_TBU) /* tx buffer unavailable */
1252 my_txeoc(sc);
1253
1254 #if 0 /* 90/1/18 delete */
1255 if (status & MY_FBE) {
1256 my_reset(sc);
1257 my_init(sc);
1258 }
1259 #endif
1260
1261 }
1262
1263 /* Re-enable interrupts. */
1264 CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1265 if (!ifq_is_empty(&ifp->if_snd))
1266 if_devstart(ifp);
1267 }
1268
1269 /*
1270 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1271 * pointers to the fragment pointers.
1272 */
1273 static int
my_encap(struct my_softc * sc,struct my_chain * c,struct mbuf * m_head)1274 my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head)
1275 {
1276 struct my_desc *f = NULL;
1277 int total_len;
1278 struct mbuf *m, *m_new = NULL;
1279
1280 /* calculate the total tx pkt length */
1281 total_len = 0;
1282 for (m = m_head; m != NULL; m = m->m_next)
1283 total_len += m->m_len;
1284 /*
1285 * Start packing the mbufs in this chain into the fragment pointers.
1286 * Stop when we run out of fragments or hit the end of the mbuf
1287 * chain.
1288 */
1289 m = m_head;
1290 MGETHDR(m_new, M_NOWAIT, MT_DATA);
1291 if (m_new == NULL) {
1292 kprintf("my%d: no memory for tx list", sc->my_unit);
1293 return (1);
1294 }
1295 if (m_head->m_pkthdr.len > MHLEN) {
1296 MCLGET(m_new, M_NOWAIT);
1297 if (!(m_new->m_flags & M_EXT)) {
1298 m_freem(m_new);
1299 kprintf("my%d: no memory for tx list", sc->my_unit);
1300 return (1);
1301 }
1302 }
1303 m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, void *));
1304 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1305 m_freem(m_head);
1306 m_head = m_new;
1307 f = &c->my_ptr->my_frag[0];
1308 f->my_status = 0;
1309 f->my_data = vtophys(mtod(m_new, caddr_t));
1310 total_len = m_new->m_len;
1311 f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable;
1312 f->my_ctl |= total_len << MY_PKTShift; /* pkt size */
1313 f->my_ctl |= total_len; /* buffer size */
1314 /* 89/12/29 add, for mtd891 *//* [ 89? ] */
1315 if (sc->my_info->my_did == MTD891ID)
1316 f->my_ctl |= MY_ETIControl | MY_RetryTxLC;
1317 c->my_mbuf = m_head;
1318 c->my_lastdesc = 0;
1319 MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]);
1320 return (0);
1321 }
1322
1323 /*
1324 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1325 * to the mbuf data regions directly in the transmit lists. We also save a
1326 * copy of the pointers since the transmit list fragment pointers are
1327 * physical addresses.
1328 */
1329 static void
my_start(struct ifnet * ifp,struct ifaltq_subque * ifsq)1330 my_start(struct ifnet * ifp, struct ifaltq_subque *ifsq)
1331 {
1332 struct my_softc *sc = ifp->if_softc;
1333 struct mbuf *m_head = NULL;
1334 struct my_chain *cur_tx = NULL, *start_tx;
1335
1336 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1337 crit_enter();
1338
1339 if (sc->my_autoneg) {
1340 ifq_purge(&ifp->if_snd);
1341 sc->my_tx_pend = 1;
1342 crit_exit();
1343 return;
1344 }
1345 /*
1346 * Check for an available queue slot. If there are none, punt.
1347 */
1348 if (sc->my_cdata.my_tx_free->my_mbuf != NULL) {
1349 ifq_set_oactive(&ifp->if_snd);
1350 crit_exit();
1351 return;
1352 }
1353
1354 start_tx = sc->my_cdata.my_tx_free;
1355 while (sc->my_cdata.my_tx_free->my_mbuf == NULL) {
1356 m_head = ifq_dequeue(&ifp->if_snd);
1357 if (m_head == NULL)
1358 break;
1359
1360 /* Pick a descriptor off the free list. */
1361 cur_tx = sc->my_cdata.my_tx_free;
1362 sc->my_cdata.my_tx_free = cur_tx->my_nextdesc;
1363
1364 /* Pack the data into the descriptor. */
1365 my_encap(sc, cur_tx, m_head);
1366
1367 if (cur_tx != start_tx)
1368 MY_TXOWN(cur_tx) = MY_OWNByNIC;
1369 BPF_MTAP(ifp, cur_tx->my_mbuf);
1370 }
1371 /*
1372 * If there are no packets queued, bail.
1373 */
1374 if (cur_tx == NULL) {
1375 crit_exit();
1376 return;
1377 }
1378 /*
1379 * Place the request for the upload interrupt in the last descriptor
1380 * in the chain. This way, if we're chaining several packets at once,
1381 * we'll only get an interupt once for the whole chain rather than
1382 * once for each packet.
1383 */
1384 MY_TXCTL(cur_tx) |= MY_TXIC;
1385 cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC;
1386 sc->my_cdata.my_tx_tail = cur_tx;
1387 if (sc->my_cdata.my_tx_head == NULL)
1388 sc->my_cdata.my_tx_head = start_tx;
1389 MY_TXOWN(start_tx) = MY_OWNByNIC;
1390 CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); /* tx polling demand */
1391
1392 /*
1393 * Set a timeout in case the chip goes out to lunch.
1394 */
1395 ifp->if_timer = 5;
1396
1397 crit_exit();
1398 }
1399
1400 static void
my_init(void * xsc)1401 my_init(void *xsc)
1402 {
1403 struct my_softc *sc = xsc;
1404 struct ifnet *ifp = &sc->arpcom.ac_if;
1405 u_int16_t phy_bmcr = 0;
1406
1407 crit_enter();
1408 if (sc->my_autoneg) {
1409 crit_exit();
1410 return;
1411 }
1412 if (sc->my_pinfo != NULL)
1413 phy_bmcr = my_phy_readreg(sc, PHY_BMCR);
1414 /*
1415 * Cancel pending I/O and free all RX/TX buffers.
1416 */
1417 my_stop(sc);
1418 my_reset(sc);
1419
1420 /*
1421 * Set cache alignment and burst length.
1422 */
1423 #if 0 /* 89/9/1 modify, */
1424 CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512);
1425 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF);
1426 #endif
1427 CSR_WRITE_4(sc, MY_BCR, MY_PBL8);
1428 CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512);
1429 /*
1430 * 89/12/29 add, for mtd891,
1431 */
1432 if (sc->my_info->my_did == MTD891ID) {
1433 MY_SETBIT(sc, MY_BCR, MY_PROG);
1434 MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced);
1435 }
1436 my_setcfg(sc, phy_bmcr);
1437 /* Init circular RX list. */
1438 if (my_list_rx_init(sc) == ENOBUFS) {
1439 kprintf("my%d: init failed: no memory for rx buffers\n",
1440 sc->my_unit);
1441 my_stop(sc);
1442 crit_exit();
1443 return;
1444 }
1445 /* Init TX descriptors. */
1446 my_list_tx_init(sc);
1447
1448 /* If we want promiscuous mode, set the allframes bit. */
1449 if (ifp->if_flags & IFF_PROMISC)
1450 MY_SETBIT(sc, MY_TCRRCR, MY_PROM);
1451 else
1452 MY_CLRBIT(sc, MY_TCRRCR, MY_PROM);
1453
1454 /*
1455 * Set capture broadcast bit to capture broadcast frames.
1456 */
1457 if (ifp->if_flags & IFF_BROADCAST)
1458 MY_SETBIT(sc, MY_TCRRCR, MY_AB);
1459 else
1460 MY_CLRBIT(sc, MY_TCRRCR, MY_AB);
1461
1462 /*
1463 * Program the multicast filter, if necessary.
1464 */
1465 my_setmulti(sc);
1466
1467 /*
1468 * Load the address of the RX list.
1469 */
1470 MY_CLRBIT(sc, MY_TCRRCR, MY_RE);
1471 CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0]));
1472
1473 /*
1474 * Enable interrupts.
1475 */
1476 CSR_WRITE_4(sc, MY_IMR, MY_INTRS);
1477 CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF);
1478
1479 /* Enable receiver and transmitter. */
1480 MY_SETBIT(sc, MY_TCRRCR, MY_RE);
1481 MY_CLRBIT(sc, MY_TCRRCR, MY_TE);
1482 CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0]));
1483 MY_SETBIT(sc, MY_TCRRCR, MY_TE);
1484
1485 /* Restore state of BMCR */
1486 if (sc->my_pinfo != NULL)
1487 my_phy_writereg(sc, PHY_BMCR, phy_bmcr);
1488 ifp->if_flags |= IFF_RUNNING;
1489 ifq_clr_oactive(&ifp->if_snd);
1490 crit_exit();
1491 }
1492
1493 /*
1494 * Set media options.
1495 */
1496
1497 static int
my_ifmedia_upd(struct ifnet * ifp)1498 my_ifmedia_upd(struct ifnet * ifp)
1499 {
1500 struct my_softc *sc = ifp->if_softc;
1501 struct ifmedia *ifm = &sc->ifmedia;
1502
1503 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1504 return (EINVAL);
1505
1506 crit_enter();
1507
1508 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO)
1509 my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1);
1510 else
1511 my_setmode_mii(sc, ifm->ifm_media);
1512
1513 crit_exit();
1514
1515 return (0);
1516 }
1517
1518 /*
1519 * Report current media status.
1520 */
1521
1522 static void
my_ifmedia_sts(struct ifnet * ifp,struct ifmediareq * ifmr)1523 my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr)
1524 {
1525 struct my_softc *sc = ifp->if_softc;
1526 u_int16_t advert = 0, ability = 0;
1527
1528 crit_enter();
1529
1530 ifmr->ifm_active = IFM_ETHER;
1531 if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) {
1532 #if 0 /* this version did not support 1000M, */
1533 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000)
1534 ifmr->ifm_active = IFM_ETHER | IFM_1000TX;
1535 #endif
1536 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL)
1537 ifmr->ifm_active = IFM_ETHER | IFM_100_TX;
1538 else
1539 ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1540 if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX)
1541 ifmr->ifm_active |= IFM_FDX;
1542 else
1543 ifmr->ifm_active |= IFM_HDX;
1544
1545 crit_exit();
1546
1547 return;
1548 }
1549 ability = my_phy_readreg(sc, PHY_LPAR);
1550 advert = my_phy_readreg(sc, PHY_ANAR);
1551
1552 #if 0 /* this version did not support 1000M, */
1553 if (sc->my_pinfo->my_vid == MarvellPHYID0) {
1554 ability2 = my_phy_readreg(sc, PHY_1000SR);
1555 if (ability2 & PHY_1000SR_1000BTXFULL) {
1556 advert = 0;
1557 ability = 0;
1558 ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX;
1559 } else if (ability & PHY_1000SR_1000BTXHALF) {
1560 advert = 0;
1561 ability = 0;
1562 ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_HDX;
1563 }
1564 }
1565 #endif
1566 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4)
1567 ifmr->ifm_active = IFM_ETHER | IFM_100_T4;
1568 else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL)
1569 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1570 else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF)
1571 ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX;
1572 else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL)
1573 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX;
1574 else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF)
1575 ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX;
1576
1577 crit_exit();
1578 }
1579
1580 static int
my_ioctl(struct ifnet * ifp,u_long command,caddr_t data,struct ucred * cr)1581 my_ioctl(struct ifnet * ifp, u_long command, caddr_t data, struct ucred *cr)
1582 {
1583 struct my_softc *sc = ifp->if_softc;
1584 struct ifreq *ifr = (struct ifreq *) data;
1585 int error = 0;
1586
1587 crit_enter();
1588 switch (command) {
1589 case SIOCSIFFLAGS:
1590 if (ifp->if_flags & IFF_UP)
1591 my_init(sc);
1592 else if (ifp->if_flags & IFF_RUNNING)
1593 my_stop(sc);
1594 error = 0;
1595 break;
1596 case SIOCADDMULTI:
1597 case SIOCDELMULTI:
1598 my_setmulti(sc);
1599 error = 0;
1600 break;
1601 case SIOCGIFMEDIA:
1602 case SIOCSIFMEDIA:
1603 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1604 break;
1605 default:
1606 error = ether_ioctl(ifp, command, data);
1607 break;
1608 }
1609
1610 crit_exit();
1611 return (error);
1612 }
1613
1614 static void
my_watchdog(struct ifnet * ifp)1615 my_watchdog(struct ifnet * ifp)
1616 {
1617 struct my_softc *sc = ifp->if_softc;
1618
1619 crit_enter();
1620
1621 if (sc->my_autoneg) {
1622 my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1);
1623 crit_exit();
1624 return;
1625 }
1626 IFNET_STAT_INC(ifp, oerrors, 1);
1627 kprintf("my%d: watchdog timeout\n", sc->my_unit);
1628 if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT))
1629 kprintf("my%d: no carrier - transceiver cable problem?\n",
1630 sc->my_unit);
1631 my_stop(sc);
1632 my_reset(sc);
1633 my_init(sc);
1634 if (!ifq_is_empty(&ifp->if_snd))
1635 if_devstart(ifp);
1636 crit_exit();
1637 }
1638
1639
1640 /*
1641 * Stop the adapter and free any mbufs allocated to the RX and TX lists.
1642 */
1643 static void
my_stop(struct my_softc * sc)1644 my_stop(struct my_softc * sc)
1645 {
1646 struct ifnet *ifp = &sc->arpcom.ac_if;
1647 int i;
1648
1649 ifp->if_timer = 0;
1650
1651 MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE));
1652 CSR_WRITE_4(sc, MY_IMR, 0x00000000);
1653 CSR_WRITE_4(sc, MY_TXLBA, 0x00000000);
1654 CSR_WRITE_4(sc, MY_RXLBA, 0x00000000);
1655
1656 /*
1657 * Free data in the RX lists.
1658 */
1659 for (i = 0; i < MY_RX_LIST_CNT; i++) {
1660 if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) {
1661 m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf);
1662 sc->my_cdata.my_rx_chain[i].my_mbuf = NULL;
1663 }
1664 }
1665 bzero((char *)&sc->my_ldata->my_rx_list,
1666 sizeof(sc->my_ldata->my_rx_list));
1667 /*
1668 * Free the TX list buffers.
1669 */
1670 for (i = 0; i < MY_TX_LIST_CNT; i++) {
1671 if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) {
1672 m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf);
1673 sc->my_cdata.my_tx_chain[i].my_mbuf = NULL;
1674 }
1675 }
1676 bzero((char *)&sc->my_ldata->my_tx_list,
1677 sizeof(sc->my_ldata->my_tx_list));
1678 ifp->if_flags &= ~IFF_RUNNING;
1679 ifq_clr_oactive(&ifp->if_snd);
1680 }
1681
1682 /*
1683 * Stop all chip I/O so that the kernel's probe routines don't get confused
1684 * by errant DMAs when rebooting.
1685 */
1686 static void
my_shutdown(device_t dev)1687 my_shutdown(device_t dev)
1688 {
1689 struct my_softc *sc;
1690
1691 sc = device_get_softc(dev);
1692 my_stop(sc);
1693 return;
1694 }
1695