xref: /dragonfly/sys/dev/netif/tx/if_tx.c (revision 5de36205)
1 /*-
2  * Copyright (c) 1997 Semen Ustimenko (semenu@FreeBSD.org)
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/tx/if_tx.c,v 1.61.2.1 2002/10/29 01:43:49 semenu Exp $
27  * $DragonFly: src/sys/dev/netif/tx/if_tx.c,v 1.26 2005/06/14 14:19:22 joerg Exp $
28  */
29 
30 /*
31  * EtherPower II 10/100 Fast Ethernet (SMC 9432 serie)
32  *
33  * These cards are based on SMC83c17x (EPIC) chip and one of the various
34  * PHYs (QS6612, AC101 and LXT970 were seen). The media support depends on
35  * card model. All cards support 10baseT/UTP and 100baseTX half- and full-
36  * duplex (SMB9432TX). SMC9432BTX also supports 10baseT/BNC. SMC9432FTX also
37  * supports fibre optics.
38  *
39  * Thanks are going to Steve Bauer and Jason Wright.
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sockio.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/socket.h>
49 #include <sys/queue.h>
50 #include <sys/thread2.h>
51 
52 #include <net/if.h>
53 #include <net/ifq_var.h>
54 #include <net/if_arp.h>
55 #include <net/ethernet.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 
59 #include <net/bpf.h>
60 
61 #include <net/vlan/if_vlan_var.h>
62 
63 #include <vm/vm.h>		/* for vtophys */
64 #include <vm/pmap.h>		/* for vtophys */
65 #include <machine/bus_memio.h>
66 #include <machine/bus_pio.h>
67 #include <machine/bus.h>
68 #include <machine/resource.h>
69 #include <sys/bus.h>
70 #include <sys/rman.h>
71 
72 #include <bus/pci/pcireg.h>
73 #include <bus/pci/pcivar.h>
74 
75 #include "../mii_layer/mii.h"
76 #include "../mii_layer/miivar.h"
77 #include "../mii_layer/miidevs.h"
78 #include "../mii_layer/lxtphyreg.h"
79 
80 #include "miibus_if.h"
81 
82 #include "if_txreg.h"
83 #include "if_txvar.h"
84 
85 static int epic_ifioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
86 static void epic_intr(void *);
87 static void epic_tx_underrun(epic_softc_t *);
88 static int epic_common_attach(epic_softc_t *);
89 static void epic_ifstart(struct ifnet *);
90 static void epic_ifwatchdog(struct ifnet *);
91 static void epic_stats_update(void *);
92 static int epic_init(epic_softc_t *);
93 static void epic_stop(epic_softc_t *);
94 static void epic_rx_done(epic_softc_t *);
95 static void epic_tx_done(epic_softc_t *);
96 static int epic_init_rings(epic_softc_t *);
97 static void epic_free_rings(epic_softc_t *);
98 static void epic_stop_activity(epic_softc_t *);
99 static int epic_queue_last_packet(epic_softc_t *);
100 static void epic_start_activity(epic_softc_t *);
101 static void epic_set_rx_mode(epic_softc_t *);
102 static void epic_set_tx_mode(epic_softc_t *);
103 static void epic_set_mc_table(epic_softc_t *);
104 static int epic_read_eeprom(epic_softc_t *,u_int16_t);
105 static void epic_output_eepromw(epic_softc_t *, u_int16_t);
106 static u_int16_t epic_input_eepromw(epic_softc_t *);
107 static u_int8_t epic_eeprom_clock(epic_softc_t *,u_int8_t);
108 static void epic_write_eepromreg(epic_softc_t *,u_int8_t);
109 static u_int8_t epic_read_eepromreg(epic_softc_t *);
110 
111 static int epic_read_phy_reg(epic_softc_t *, int, int);
112 static void epic_write_phy_reg(epic_softc_t *, int, int, int);
113 
114 static int epic_miibus_readreg(device_t, int, int);
115 static int epic_miibus_writereg(device_t, int, int, int);
116 static void epic_miibus_statchg(device_t);
117 static void epic_miibus_mediainit(device_t);
118 
119 static int epic_ifmedia_upd(struct ifnet *);
120 static void epic_ifmedia_sts(struct ifnet *, struct ifmediareq *);
121 
122 static int epic_probe(device_t);
123 static int epic_attach(device_t);
124 static void epic_shutdown(device_t);
125 static int epic_detach(device_t);
126 static struct epic_type *epic_devtype(device_t);
127 
128 static device_method_t epic_methods[] = {
129 	/* Device interface */
130 	DEVMETHOD(device_probe,		epic_probe),
131 	DEVMETHOD(device_attach,	epic_attach),
132 	DEVMETHOD(device_detach,	epic_detach),
133 	DEVMETHOD(device_shutdown,	epic_shutdown),
134 
135 	/* MII interface */
136 	DEVMETHOD(miibus_readreg,	epic_miibus_readreg),
137 	DEVMETHOD(miibus_writereg,	epic_miibus_writereg),
138 	DEVMETHOD(miibus_statchg,	epic_miibus_statchg),
139 	DEVMETHOD(miibus_mediainit,	epic_miibus_mediainit),
140 
141 	{ 0, 0 }
142 };
143 
144 static driver_t epic_driver = {
145 	"tx",
146 	epic_methods,
147 	sizeof(epic_softc_t)
148 };
149 
150 static devclass_t epic_devclass;
151 
152 DECLARE_DUMMY_MODULE(if_tx);
153 MODULE_DEPEND(if_tx, miibus, 1, 1, 1);
154 DRIVER_MODULE(if_tx, pci, epic_driver, epic_devclass, 0, 0);
155 DRIVER_MODULE(miibus, tx, miibus_driver, miibus_devclass, 0, 0);
156 
157 static struct epic_type epic_devs[] = {
158 	{ SMC_VENDORID, SMC_DEVICEID_83C170,
159 		"SMC EtherPower II 10/100" },
160 	{ 0, 0, NULL }
161 };
162 
163 static int
164 epic_probe(dev)
165 	device_t dev;
166 {
167 	struct epic_type *t;
168 
169 	t = epic_devtype(dev);
170 
171 	if (t != NULL) {
172 		device_set_desc(dev, t->name);
173 		return(0);
174 	}
175 
176 	return(ENXIO);
177 }
178 
179 static struct epic_type *
180 epic_devtype(dev)
181 	device_t dev;
182 {
183 	struct epic_type *t;
184 
185 	t = epic_devs;
186 
187 	while(t->name != NULL) {
188 		if ((pci_get_vendor(dev) == t->ven_id) &&
189 		    (pci_get_device(dev) == t->dev_id)) {
190 			return(t);
191 		}
192 		t++;
193 	}
194 	return (NULL);
195 }
196 
197 #if defined(EPIC_USEIOSPACE)
198 #define	EPIC_RES	SYS_RES_IOPORT
199 #define	EPIC_RID	PCIR_BASEIO
200 #else
201 #define	EPIC_RES	SYS_RES_MEMORY
202 #define	EPIC_RID	PCIR_BASEMEM
203 #endif
204 
205 /*
206  * Attach routine: map registers, allocate softc, rings and descriptors.
207  * Reset to known state.
208  */
209 static int
210 epic_attach(dev)
211 	device_t dev;
212 {
213 	struct ifnet *ifp;
214 	epic_softc_t *sc;
215 	int error;
216 	int i, rid, tmp;
217 
218 	sc = device_get_softc(dev);
219 
220 	/* Preinitialize softc structure */
221 	bzero(sc, sizeof(epic_softc_t));
222 	sc->dev = dev;
223 	callout_init(&sc->tx_stat_timer);
224 
225 	/* Fill ifnet structure */
226 	ifp = &sc->sc_if;
227 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
228 	ifp->if_softc = sc;
229 	ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST;
230 	ifp->if_ioctl = epic_ifioctl;
231 	ifp->if_start = epic_ifstart;
232 	ifp->if_watchdog = epic_ifwatchdog;
233 	ifp->if_init = (if_init_f_t*)epic_init;
234 	ifp->if_timer = 0;
235 	ifp->if_baudrate = 10000000;
236 	ifq_set_maxlen(&ifp->if_snd, TX_RING_SIZE - 1);
237 	ifq_set_ready(&ifp->if_snd);
238 
239 	pci_enable_busmaster(dev);
240 
241 	rid = EPIC_RID;
242 	sc->res = bus_alloc_resource_any(dev, EPIC_RES, &rid, RF_ACTIVE);
243 
244 	if (sc->res == NULL) {
245 		device_printf(dev, "couldn't map ports/memory\n");
246 		error = ENXIO;
247 		goto fail;
248 	}
249 
250 	sc->sc_st = rman_get_bustag(sc->res);
251 	sc->sc_sh = rman_get_bushandle(sc->res);
252 
253 	/* Allocate interrupt */
254 	rid = 0;
255 	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
256 	    RF_SHAREABLE | RF_ACTIVE);
257 
258 	if (sc->irq == NULL) {
259 		device_printf(dev, "couldn't map interrupt\n");
260 		error = ENXIO;
261 		goto fail;
262 	}
263 
264 	/* Do OS independent part, including chip wakeup and reset */
265 	error = epic_common_attach(sc);
266 	if (error) {
267 		error = ENXIO;
268 		goto fail;
269 	}
270 
271 	/* Do ifmedia setup */
272 	if (mii_phy_probe(dev, &sc->miibus,
273 	    epic_ifmedia_upd, epic_ifmedia_sts)) {
274 		device_printf(dev, "ERROR! MII without any PHY!?\n");
275 		error = ENXIO;
276 		goto fail;
277 	}
278 
279 	/* board type and ... */
280 	printf(" type ");
281 	for(i=0x2c;i<0x32;i++) {
282 		tmp = epic_read_eeprom(sc, i);
283 		if (' ' == (u_int8_t)tmp) break;
284 		printf("%c", (u_int8_t)tmp);
285 		tmp >>= 8;
286 		if (' ' == (u_int8_t)tmp) break;
287 		printf("%c", (u_int8_t)tmp);
288 	}
289 	printf("\n");
290 
291 	/* Attach to OS's managers */
292 	ether_ifattach(ifp, sc->sc_macaddr);
293 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
294 
295 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET,
296 			       epic_intr, sc, &sc->sc_ih, NULL);
297 
298 	if (error) {
299 		device_printf(dev, "couldn't set up irq\n");
300 		ether_ifdetach(ifp);
301 		goto fail;
302 	}
303 
304 	return(0);
305 
306 fail:
307 	epic_detach(dev);
308 	return(error);
309 }
310 
311 /*
312  * Detach driver and free resources
313  */
314 static int
315 epic_detach(dev)
316 	device_t dev;
317 {
318 	struct ifnet *ifp;
319 	epic_softc_t *sc;
320 
321 	sc = device_get_softc(dev);
322 	ifp = &sc->arpcom.ac_if;
323 
324 	crit_enter();
325 
326 	if (device_is_attached(dev)) {
327 		ether_ifdetach(ifp);
328 		epic_stop(sc);
329 	}
330 
331 	if (sc->miibus)
332 		device_delete_child(dev, sc->miibus);
333 	bus_generic_detach(dev);
334 
335 	if (sc->sc_ih)
336 		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
337 
338 	crit_exit();
339 
340 	if (sc->irq)
341 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
342 	if (sc->res)
343 		bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
344 
345 	if (sc->tx_flist)
346 		free(sc->tx_flist, M_DEVBUF);
347 	if (sc->tx_desc)
348 		free(sc->tx_desc, M_DEVBUF);
349 	if (sc->rx_desc)
350 		free(sc->rx_desc, M_DEVBUF);
351 
352 	return(0);
353 }
354 
355 #undef	EPIC_RES
356 #undef	EPIC_RID
357 
358 /*
359  * Stop all chip I/O so that the kernel's probe routines don't
360  * get confused by errant DMAs when rebooting.
361  */
362 static void
363 epic_shutdown(dev)
364 	device_t dev;
365 {
366 	epic_softc_t *sc;
367 
368 	sc = device_get_softc(dev);
369 
370 	epic_stop(sc);
371 
372 	return;
373 }
374 
375 /*
376  * This is if_ioctl handler.
377  */
378 static int
379 epic_ifioctl(ifp, command, data, cr)
380 	struct ifnet *ifp;
381 	u_long command;
382 	caddr_t data;
383 	struct ucred *cr;
384 {
385 	epic_softc_t *sc = ifp->if_softc;
386 	struct mii_data	*mii;
387 	struct ifreq *ifr = (struct ifreq *) data;
388 	int error = 0;
389 
390 	crit_enter();
391 
392 	switch (command) {
393 	case SIOCSIFMTU:
394 		if (ifp->if_mtu == ifr->ifr_mtu)
395 			break;
396 
397 		/* XXX Though the datasheet doesn't imply any
398 		 * limitations on RX and TX sizes beside max 64Kb
399 		 * DMA transfer, seems we can't send more then 1600
400 		 * data bytes per ethernet packet. (Transmitter hangs
401 		 * up if more data is sent)
402 		 */
403 		if (ifr->ifr_mtu + ifp->if_hdrlen <= EPIC_MAX_MTU) {
404 			ifp->if_mtu = ifr->ifr_mtu;
405 			epic_stop(sc);
406 			epic_init(sc);
407 		} else
408 			error = EINVAL;
409 		break;
410 
411 	case SIOCSIFFLAGS:
412 		/*
413 		 * If the interface is marked up and stopped, then start it.
414 		 * If it is marked down and running, then stop it.
415 		 */
416 		if (ifp->if_flags & IFF_UP) {
417 			if ((ifp->if_flags & IFF_RUNNING) == 0) {
418 				epic_init(sc);
419 				break;
420 			}
421 		} else {
422 			if (ifp->if_flags & IFF_RUNNING) {
423 				epic_stop(sc);
424 				break;
425 			}
426 		}
427 
428 		/* Handle IFF_PROMISC and IFF_ALLMULTI flags */
429 		epic_stop_activity(sc);
430 		epic_set_mc_table(sc);
431 		epic_set_rx_mode(sc);
432 		epic_start_activity(sc);
433 		break;
434 
435 	case SIOCADDMULTI:
436 	case SIOCDELMULTI:
437 		epic_set_mc_table(sc);
438 		error = 0;
439 		break;
440 
441 	case SIOCSIFMEDIA:
442 	case SIOCGIFMEDIA:
443 		mii = device_get_softc(sc->miibus);
444 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
445 		break;
446 
447 	default:
448 		error = ether_ioctl(ifp, command, data);
449 		break;
450 	}
451 	crit_exit();
452 
453 	return error;
454 }
455 
456 /*
457  * OS-independed part of attach process. allocate memory for descriptors
458  * and frag lists, wake up chip, read MAC address and PHY identyfier.
459  * Return -1 on failure.
460  */
461 static int
462 epic_common_attach(sc)
463 	epic_softc_t *sc;
464 {
465 	int i;
466 
467 	sc->tx_flist = malloc(sizeof(struct epic_frag_list)*TX_RING_SIZE,
468 	    M_DEVBUF, M_WAITOK | M_ZERO);
469 	sc->tx_desc = malloc(sizeof(struct epic_tx_desc)*TX_RING_SIZE,
470 	    M_DEVBUF, M_WAITOK | M_ZERO);
471 	sc->rx_desc = malloc(sizeof(struct epic_rx_desc)*RX_RING_SIZE,
472 	    M_DEVBUF, M_WAITOK | M_ZERO);
473 
474 	/* Bring the chip out of low-power mode. */
475 	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
476 	DELAY(500);
477 
478 	/* Workaround for Application Note 7-15 */
479 	for (i=0; i<16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
480 
481 	/* Read mac address from EEPROM */
482 	for (i = 0; i < ETHER_ADDR_LEN / sizeof(u_int16_t); i++)
483 		((u_int16_t *)sc->sc_macaddr)[i] = epic_read_eeprom(sc,i);
484 
485 	/* Set Non-Volatile Control Register from EEPROM */
486 	CSR_WRITE_4(sc, NVCTL, epic_read_eeprom(sc, EEPROM_NVCTL) & 0x1F);
487 
488 	/* Set defaults */
489 	sc->tx_threshold = TRANSMIT_THRESHOLD;
490 	sc->txcon = TXCON_DEFAULT;
491 	sc->miicfg = MIICFG_SMI_ENABLE;
492 	sc->phyid = EPIC_UNKN_PHY;
493 	sc->serinst = -1;
494 
495 	/* Fetch card id */
496 	sc->cardvend = pci_get_subvendor(sc->dev);
497 	sc->cardid = pci_get_subdevice(sc->dev);
498 
499 	if (sc->cardvend != SMC_VENDORID)
500 		device_printf(sc->dev, "unknown card vendor %04xh\n", sc->cardvend);
501 
502 	return 0;
503 }
504 
505 /*
506  * This is if_start handler. It takes mbufs from if_snd queue
507  * and queue them for transmit, one by one, until TX ring become full
508  * or queue become empty.
509  */
510 static void
511 epic_ifstart(ifp)
512 	struct ifnet * ifp;
513 {
514 	epic_softc_t *sc = ifp->if_softc;
515 	struct epic_tx_buffer *buf;
516 	struct epic_tx_desc *desc;
517 	struct epic_frag_list *flist;
518 	struct mbuf *m0;
519 	struct mbuf *m;
520 	int i;
521 
522 	while (sc->pending_txs < TX_RING_SIZE) {
523 		buf = sc->tx_buffer + sc->cur_tx;
524 		desc = sc->tx_desc + sc->cur_tx;
525 		flist = sc->tx_flist + sc->cur_tx;
526 
527 		/* Get next packet to send */
528 		m0 = ifq_dequeue(&ifp->if_snd);
529 
530 		/* If nothing to send, return */
531 		if (m0 == NULL)
532 			return;
533 
534 		/* Fill fragments list */
535 		for (m = m0, i = 0;
536 		    (NULL != m) && (i < EPIC_MAX_FRAGS);
537 		    m = m->m_next, i++) {
538 			flist->frag[i].fraglen = m->m_len;
539 			flist->frag[i].fragaddr = vtophys(mtod(m, caddr_t));
540 		}
541 		flist->numfrags = i;
542 
543 		/* If packet was more than EPIC_MAX_FRAGS parts, */
544 		/* recopy packet to new allocated mbuf cluster */
545 		if (NULL != m) {
546 			EPIC_MGETCLUSTER(m);
547 			if (NULL == m) {
548 				m_freem(m0);
549 				ifp->if_oerrors++;
550 				continue;
551 			}
552 
553 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
554 			flist->frag[0].fraglen =
555 			     m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
556 			m->m_pkthdr.rcvif = ifp;
557 
558 			flist->numfrags = 1;
559 			flist->frag[0].fragaddr = vtophys(mtod(m, caddr_t));
560 			m_freem(m0);
561 			m0 = m;
562 		}
563 
564 		buf->mbuf = m0;
565 		sc->pending_txs++;
566 		sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
567 		desc->control = 0x01;
568 		desc->txlength =
569 		    max(m0->m_pkthdr.len,ETHER_MIN_LEN-ETHER_CRC_LEN);
570 		desc->status = 0x8000;
571 		CSR_WRITE_4(sc, COMMAND, COMMAND_TXQUEUED);
572 
573 		/* Set watchdog timer */
574 		ifp->if_timer = 8;
575 
576 		BPF_MTAP(ifp, m0);
577 	}
578 
579 	ifp->if_flags |= IFF_OACTIVE;
580 
581 	return;
582 
583 }
584 
585 /*
586  * Synopsis: Finish all received frames.
587  */
588 static void
589 epic_rx_done(sc)
590 	epic_softc_t *sc;
591 {
592 	u_int16_t len;
593 	struct ifnet *ifp = &sc->sc_if;
594 	struct epic_rx_buffer *buf;
595 	struct epic_rx_desc *desc;
596 	struct mbuf *m;
597 
598 	while ((sc->rx_desc[sc->cur_rx].status & 0x8000) == 0) {
599 		buf = sc->rx_buffer + sc->cur_rx;
600 		desc = sc->rx_desc + sc->cur_rx;
601 
602 		/* Switch to next descriptor */
603 		sc->cur_rx = (sc->cur_rx+1) & RX_RING_MASK;
604 
605 		/*
606 		 * Check for RX errors. This should only happen if
607 		 * SAVE_ERRORED_PACKETS is set. RX errors generate
608 		 * RXE interrupt usually.
609 		 */
610 		if ((desc->status & 1) == 0) {
611 			sc->sc_if.if_ierrors++;
612 			desc->status = 0x8000;
613 			continue;
614 		}
615 
616 		/* Save packet length and mbuf contained packet */
617 		len = desc->rxlength - ETHER_CRC_LEN;
618 		m = buf->mbuf;
619 
620 		/* Try to get mbuf cluster */
621 		EPIC_MGETCLUSTER(buf->mbuf);
622 		if (NULL == buf->mbuf) {
623 			buf->mbuf = m;
624 			desc->status = 0x8000;
625 			ifp->if_ierrors++;
626 			continue;
627 		}
628 
629 		/* Point to new mbuf, and give descriptor to chip */
630 		desc->bufaddr = vtophys(mtod(buf->mbuf, caddr_t));
631 		desc->status = 0x8000;
632 
633 		/* First mbuf in packet holds the ethernet and packet headers */
634 		m->m_pkthdr.rcvif = ifp;
635 		m->m_pkthdr.len = m->m_len = len;
636 
637 		/* Give mbuf to OS */
638 		(*ifp->if_input)(ifp, m);
639 
640 		/* Successfuly received frame */
641 		ifp->if_ipackets++;
642 	}
643 
644 	return;
645 }
646 
647 /*
648  * Synopsis: Do last phase of transmission. I.e. if desc is
649  * transmitted, decrease pending_txs counter, free mbuf contained
650  * packet, switch to next descriptor and repeat until no packets
651  * are pending or descriptor is not transmitted yet.
652  */
653 static void
654 epic_tx_done(sc)
655 	epic_softc_t *sc;
656 {
657 	struct epic_tx_buffer *buf;
658 	struct epic_tx_desc *desc;
659 	u_int16_t status;
660 
661 	while (sc->pending_txs > 0) {
662 		buf = sc->tx_buffer + sc->dirty_tx;
663 		desc = sc->tx_desc + sc->dirty_tx;
664 		status = desc->status;
665 
666 		/* If packet is not transmitted, thou followed */
667 		/* packets are not transmitted too */
668 		if (status & 0x8000) break;
669 
670 		/* Packet is transmitted. Switch to next and */
671 		/* free mbuf */
672 		sc->pending_txs--;
673 		sc->dirty_tx = (sc->dirty_tx + 1) & TX_RING_MASK;
674 		m_freem(buf->mbuf);
675 		buf->mbuf = NULL;
676 
677 		/* Check for errors and collisions */
678 		if (status & 0x0001) sc->sc_if.if_opackets++;
679 		else sc->sc_if.if_oerrors++;
680 		sc->sc_if.if_collisions += (status >> 8) & 0x1F;
681 #if defined(EPIC_DIAG)
682 		if ((status & 0x1001) == 0x1001) {
683 			if_printf(&sc->sc_if,
684 				  "Tx ERROR: excessive coll. number\n");
685 		}
686 #endif
687 	}
688 
689 	if (sc->pending_txs < TX_RING_SIZE)
690 		sc->sc_if.if_flags &= ~IFF_OACTIVE;
691 }
692 
693 /*
694  * Interrupt function
695  */
696 static void
697 epic_intr(arg)
698     void *arg;
699 {
700     epic_softc_t * sc = (epic_softc_t *) arg;
701     int status, i = 4;
702 
703     while (i-- && ((status = CSR_READ_4(sc, INTSTAT)) & INTSTAT_INT_ACTV)) {
704 	CSR_WRITE_4(sc, INTSTAT, status);
705 
706 	if (status & (INTSTAT_RQE|INTSTAT_RCC|INTSTAT_OVW)) {
707 	    epic_rx_done(sc);
708 	    if (status & (INTSTAT_RQE|INTSTAT_OVW)) {
709 #if defined(EPIC_DIAG)
710 		if (status & INTSTAT_OVW)
711 		    if_printf(&sc->sc_if, "RX buffer overflow\n");
712 		if (status & INTSTAT_RQE)
713 		    if_printf(&sc->sc_if, "RX FIFO overflow\n");
714 #endif
715 		if ((CSR_READ_4(sc, COMMAND) & COMMAND_RXQUEUED) == 0)
716 		    CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED);
717 		sc->sc_if.if_ierrors++;
718 	    }
719 	}
720 
721 	if (status & (INTSTAT_TXC|INTSTAT_TCC|INTSTAT_TQE)) {
722 	    epic_tx_done(sc);
723 	    if (!ifq_is_empty(&sc->sc_if.if_snd))
724 		    epic_ifstart(&sc->sc_if);
725 	}
726 
727 	/* Check for rare errors */
728 	if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
729 		      INTSTAT_APE|INTSTAT_DPE|INTSTAT_TXU|INTSTAT_RXE)) {
730     	    if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
731 			  INTSTAT_APE|INTSTAT_DPE)) {
732 		if_printf(&sc->sc_if, "PCI fatal errors occured: %s%s%s%s\n",
733 		    (status&INTSTAT_PMA)?"PMA ":"",
734 		    (status&INTSTAT_PTA)?"PTA ":"",
735 		    (status&INTSTAT_APE)?"APE ":"",
736 		    (status&INTSTAT_DPE)?"DPE":""
737 		);
738 
739 		epic_stop(sc);
740 		epic_init(sc);
741 
742 	    	break;
743 	    }
744 
745 	    if (status & INTSTAT_RXE) {
746 #if defined(EPIC_DIAG)
747 		if_printf(sc->sc_if, "CRC/Alignment error\n");
748 #endif
749 		sc->sc_if.if_ierrors++;
750 	    }
751 
752 	    if (status & INTSTAT_TXU) {
753 		epic_tx_underrun(sc);
754 		sc->sc_if.if_oerrors++;
755 	    }
756 	}
757     }
758 
759     /* If no packets are pending, then no timeouts */
760     if (sc->pending_txs == 0) sc->sc_if.if_timer = 0;
761 
762     return;
763 }
764 
765 /*
766  * Handle the TX underrun error: increase the TX threshold
767  * and restart the transmitter.
768  */
769 static void
770 epic_tx_underrun(sc)
771 	epic_softc_t *sc;
772 {
773 	if (sc->tx_threshold > TRANSMIT_THRESHOLD_MAX) {
774 		sc->txcon &= ~TXCON_EARLY_TRANSMIT_ENABLE;
775 #if defined(EPIC_DIAG)
776 		if_printf(&sc->sc_if, "Tx UNDERRUN: early TX disabled\n");
777 #endif
778 	} else {
779 		sc->tx_threshold += 0x40;
780 #if defined(EPIC_DIAG)
781 		if_printf(&sc->sc_if, "Tx UNDERRUN: "
782 			  "TX threshold increased to %d\n", sc->tx_threshold);
783 #endif
784 	}
785 
786 	/* We must set TXUGO to reset the stuck transmitter */
787 	CSR_WRITE_4(sc, COMMAND, COMMAND_TXUGO);
788 
789 	/* Update the TX threshold */
790 	epic_stop_activity(sc);
791 	epic_set_tx_mode(sc);
792 	epic_start_activity(sc);
793 
794 	return;
795 }
796 
797 /*
798  * Synopsis: This one is called if packets wasn't transmitted
799  * during timeout. Try to deallocate transmitted packets, and
800  * if success continue to work.
801  */
802 static void
803 epic_ifwatchdog(ifp)
804 	struct ifnet *ifp;
805 {
806 	epic_softc_t *sc = ifp->if_softc;
807 
808 	crit_enter();
809 
810 	if_printf(ifp, "device timeout %d packets\n", sc->pending_txs);
811 
812 	/* Try to finish queued packets */
813 	epic_tx_done(sc);
814 
815 	/* If not successful */
816 	if (sc->pending_txs > 0) {
817 
818 		ifp->if_oerrors+=sc->pending_txs;
819 
820 		/* Reinitialize board */
821 		if_printf(ifp, "reinitialization\n");
822 		epic_stop(sc);
823 		epic_init(sc);
824 
825 	} else
826 		if_printf(ifp, "seems we can continue normaly\n");
827 
828 	/* Start output */
829 	if (!ifq_is_empty(&ifp->if_snd))
830 		epic_ifstart(ifp);
831 
832 	crit_exit();
833 }
834 
835 /*
836  * Despite the name of this function, it doesn't update statistics, it only
837  * helps in autonegotiation process.
838  */
839 static void
840 epic_stats_update(void *xsc)
841 {
842 	epic_softc_t *sc = xsc;
843 	struct mii_data * mii;
844 
845 	crit_enter();
846 
847 	mii = device_get_softc(sc->miibus);
848 	mii_tick(mii);
849 
850 	callout_reset(&sc->tx_stat_timer, hz, epic_stats_update, sc);
851 
852 	crit_exit();
853 }
854 
855 /*
856  * Set media options.
857  */
858 static int
859 epic_ifmedia_upd(ifp)
860 	struct ifnet *ifp;
861 {
862 	epic_softc_t *sc;
863 	struct mii_data *mii;
864 	struct ifmedia *ifm;
865 	struct mii_softc *miisc;
866 	int cfg, media;
867 
868 	sc = ifp->if_softc;
869 	mii = device_get_softc(sc->miibus);
870 	ifm = &mii->mii_media;
871 	media = ifm->ifm_cur->ifm_media;
872 
873 	/* Do not do anything if interface is not up */
874 	if ((ifp->if_flags & IFF_UP) == 0)
875 		return (0);
876 
877 	/*
878 	 * Lookup current selected PHY
879 	 */
880 	if (IFM_INST(media) == sc->serinst) {
881 		sc->phyid = EPIC_SERIAL;
882 		sc->physc = NULL;
883 	} else {
884 		/* If we're not selecting serial interface, select MII mode */
885 		sc->miicfg &= ~MIICFG_SERIAL_ENABLE;
886 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
887 
888 		/* Default to unknown PHY */
889 		sc->phyid = EPIC_UNKN_PHY;
890 
891 		/* Lookup selected PHY */
892 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
893 		     miisc = LIST_NEXT(miisc, mii_list)) {
894 			if (IFM_INST(media) == miisc->mii_inst) {
895 				sc->physc = miisc;
896 				break;
897 			}
898 		}
899 
900 		/* Identify selected PHY */
901 		if (sc->physc) {
902 			int id1, id2, model, oui;
903 
904 			id1 = PHY_READ(sc->physc, MII_PHYIDR1);
905 			id2 = PHY_READ(sc->physc, MII_PHYIDR2);
906 
907 			oui = MII_OUI(id1, id2);
908 			model = MII_MODEL(id2);
909 			switch (oui) {
910 			case MII_OUI_QUALSEMI:
911 				if (model == MII_MODEL_QUALSEMI_QS6612)
912 					sc->phyid = EPIC_QS6612_PHY;
913 				break;
914 			case MII_OUI_xxALTIMA:
915 				if (model == MII_MODEL_xxALTIMA_AC101)
916 					sc->phyid = EPIC_AC101_PHY;
917 				break;
918 			case MII_OUI_xxLEVEL1:
919 				if (model == MII_MODEL_xxLEVEL1_LXT970)
920 					sc->phyid = EPIC_LXT970_PHY;
921 				break;
922 			}
923 		}
924 	}
925 
926 	/*
927 	 * Do PHY specific card setup
928 	 */
929 
930 	/* Call this, to isolate all not selected PHYs and
931 	 * set up selected
932 	 */
933 	mii_mediachg(mii);
934 
935 	/* Do our own setup */
936 	switch (sc->phyid) {
937 	case EPIC_QS6612_PHY:
938 		break;
939 	case EPIC_AC101_PHY:
940 		/* We have to powerup fiber tranceivers */
941 		if (IFM_SUBTYPE(media) == IFM_100_FX)
942 			sc->miicfg |= MIICFG_694_ENABLE;
943 		else
944 			sc->miicfg &= ~MIICFG_694_ENABLE;
945 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
946 
947 		break;
948 	case EPIC_LXT970_PHY:
949 		/* We have to powerup fiber tranceivers */
950 		cfg = PHY_READ(sc->physc, MII_LXTPHY_CONFIG);
951 		if (IFM_SUBTYPE(media) == IFM_100_FX)
952 			cfg |= CONFIG_LEDC1 | CONFIG_LEDC0;
953 		else
954 			cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
955 		PHY_WRITE(sc->physc, MII_LXTPHY_CONFIG, cfg);
956 
957 		break;
958 	case EPIC_SERIAL:
959 		/* Select serial PHY, (10base2/BNC usually) */
960 		sc->miicfg |= MIICFG_694_ENABLE | MIICFG_SERIAL_ENABLE;
961 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
962 
963 		/* There is no driver to fill this */
964 		mii->mii_media_active = media;
965 		mii->mii_media_status = 0;
966 
967 		/* We need to call this manualy as i wasn't called
968 		 * in mii_mediachg()
969 		 */
970 		epic_miibus_statchg(sc->dev);
971 
972 		break;
973 	default:
974 		if_printf(ifp, "ERROR! Unknown PHY selected\n");
975 		return (EINVAL);
976 	}
977 
978 	return(0);
979 }
980 
981 /*
982  * Report current media status.
983  */
984 static void
985 epic_ifmedia_sts(ifp, ifmr)
986 	struct ifnet *ifp;
987 	struct ifmediareq *ifmr;
988 {
989 	epic_softc_t *sc;
990 	struct mii_data *mii;
991 	struct ifmedia *ifm;
992 
993 	sc = ifp->if_softc;
994 	mii = device_get_softc(sc->miibus);
995 	ifm = &mii->mii_media;
996 
997 	/* Nothing should be selected if interface is down */
998 	if ((ifp->if_flags & IFF_UP) == 0) {
999 		ifmr->ifm_active = IFM_NONE;
1000 		ifmr->ifm_status = 0;
1001 
1002 		return;
1003 	}
1004 
1005 	/* Call underlying pollstat, if not serial PHY */
1006 	if (sc->phyid != EPIC_SERIAL)
1007 		mii_pollstat(mii);
1008 
1009 	/* Simply copy media info */
1010 	ifmr->ifm_active = mii->mii_media_active;
1011 	ifmr->ifm_status = mii->mii_media_status;
1012 
1013 	return;
1014 }
1015 
1016 /*
1017  * Callback routine, called on media change.
1018  */
1019 static void
1020 epic_miibus_statchg(dev)
1021 	device_t dev;
1022 {
1023 	epic_softc_t *sc;
1024 	struct mii_data *mii;
1025 	int media;
1026 
1027 	sc = device_get_softc(dev);
1028 	mii = device_get_softc(sc->miibus);
1029 	media = mii->mii_media_active;
1030 
1031 	sc->txcon &= ~(TXCON_LOOPBACK_MODE | TXCON_FULL_DUPLEX);
1032 
1033 	/* If we are in full-duplex mode or loopback operation,
1034 	 * we need to decouple receiver and transmitter.
1035 	 */
1036 	if (IFM_OPTIONS(media) & (IFM_FDX | IFM_LOOP))
1037  		sc->txcon |= TXCON_FULL_DUPLEX;
1038 
1039 	/* On some cards we need manualy set fullduplex led */
1040 	if (sc->cardid == SMC9432FTX ||
1041 	    sc->cardid == SMC9432FTX_SC) {
1042 		if (IFM_OPTIONS(media) & IFM_FDX)
1043 			sc->miicfg |= MIICFG_694_ENABLE;
1044 		else
1045 			sc->miicfg &= ~MIICFG_694_ENABLE;
1046 
1047 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1048 	}
1049 
1050 	/* Update baudrate */
1051 	if (IFM_SUBTYPE(media) == IFM_100_TX ||
1052 	    IFM_SUBTYPE(media) == IFM_100_FX)
1053 		sc->sc_if.if_baudrate = 100000000;
1054 	else
1055 		sc->sc_if.if_baudrate = 10000000;
1056 
1057 	epic_stop_activity(sc);
1058 	epic_set_tx_mode(sc);
1059 	epic_start_activity(sc);
1060 
1061 	return;
1062 }
1063 
1064 static void
1065 epic_miibus_mediainit(dev)
1066 	device_t dev;
1067 {
1068 	epic_softc_t *sc;
1069 	struct mii_data *mii;
1070 	struct ifmedia *ifm;
1071 	int media;
1072 
1073 	sc = device_get_softc(dev);
1074 	mii = device_get_softc(sc->miibus);
1075 	ifm = &mii->mii_media;
1076 
1077 	/* Add Serial Media Interface if present, this applies to
1078 	 * SMC9432BTX serie
1079 	 */
1080 	if (CSR_READ_4(sc, MIICFG) & MIICFG_PHY_PRESENT) {
1081 		/* Store its instance */
1082 		sc->serinst = mii->mii_instance++;
1083 
1084 		/* Add as 10base2/BNC media */
1085 		media = IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->serinst);
1086 		ifmedia_add(ifm, media, 0, NULL);
1087 
1088 		/* Report to user */
1089 		if_printf(&sc->sc_if, "serial PHY detected (10Base2/BNC)\n");
1090 	}
1091 
1092 	return;
1093 }
1094 
1095 /*
1096  * Reset chip, allocate rings, and update media.
1097  */
1098 static int
1099 epic_init(sc)
1100 	epic_softc_t *sc;
1101 {
1102 	struct ifnet *ifp = &sc->sc_if;
1103 	int	i;
1104 
1105 	crit_enter();
1106 
1107 	/* If interface is already running, then we need not do anything */
1108 	if (ifp->if_flags & IFF_RUNNING) {
1109 		crit_exit();
1110 		return 0;
1111 	}
1112 
1113 	/* Soft reset the chip (we have to power up card before) */
1114 	CSR_WRITE_4(sc, GENCTL, 0);
1115 	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
1116 
1117 	/*
1118 	 * Reset takes 15 pci ticks which depends on PCI bus speed.
1119 	 * Assuming it >= 33000000 hz, we have wait at least 495e-6 sec.
1120 	 */
1121 	DELAY(500);
1122 
1123 	/* Wake up */
1124 	CSR_WRITE_4(sc, GENCTL, 0);
1125 
1126 	/* Workaround for Application Note 7-15 */
1127 	for (i=0; i<16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
1128 
1129 	/* Initialize rings */
1130 	if (epic_init_rings(sc)) {
1131 		if_printf(ifp, "failed to init rings\n");
1132 		crit_exit();
1133 		return -1;
1134 	}
1135 
1136 	/* Give rings to EPIC */
1137 	CSR_WRITE_4(sc, PRCDAR, vtophys(sc->rx_desc));
1138 	CSR_WRITE_4(sc, PTCDAR, vtophys(sc->tx_desc));
1139 
1140 	/* Put node address to EPIC */
1141 	CSR_WRITE_4(sc, LAN0, ((u_int16_t *)sc->sc_macaddr)[0]);
1142 	CSR_WRITE_4(sc, LAN1, ((u_int16_t *)sc->sc_macaddr)[1]);
1143 	CSR_WRITE_4(sc, LAN2, ((u_int16_t *)sc->sc_macaddr)[2]);
1144 
1145 	/* Set tx mode, includeing transmit threshold */
1146 	epic_set_tx_mode(sc);
1147 
1148 	/* Compute and set RXCON. */
1149 	epic_set_rx_mode(sc);
1150 
1151 	/* Set multicast table */
1152 	epic_set_mc_table(sc);
1153 
1154 	/* Enable interrupts by setting the interrupt mask. */
1155 	CSR_WRITE_4(sc, INTMASK,
1156 		INTSTAT_RCC  | /* INTSTAT_RQE | INTSTAT_OVW | INTSTAT_RXE | */
1157 		/* INTSTAT_TXC | */ INTSTAT_TCC | INTSTAT_TQE | INTSTAT_TXU |
1158 		INTSTAT_FATAL);
1159 
1160 	/* Acknowledge all pending interrupts */
1161 	CSR_WRITE_4(sc, INTSTAT, CSR_READ_4(sc, INTSTAT));
1162 
1163 	/* Enable interrupts,  set for PCI read multiple and etc */
1164 	CSR_WRITE_4(sc, GENCTL,
1165 		GENCTL_ENABLE_INTERRUPT | GENCTL_MEMORY_READ_MULTIPLE |
1166 		GENCTL_ONECOPY | GENCTL_RECEIVE_FIFO_THRESHOLD64);
1167 
1168 	/* Mark interface running ... */
1169 	if (ifp->if_flags & IFF_UP) ifp->if_flags |= IFF_RUNNING;
1170 	else ifp->if_flags &= ~IFF_RUNNING;
1171 
1172 	/* ... and free */
1173 	ifp->if_flags &= ~IFF_OACTIVE;
1174 
1175 	/* Start Rx process */
1176 	epic_start_activity(sc);
1177 
1178 	/* Set appropriate media */
1179 	epic_ifmedia_upd(ifp);
1180 
1181 	callout_reset(&sc->tx_stat_timer, hz, epic_stats_update, sc);
1182 
1183 	crit_exit();
1184 
1185 	return 0;
1186 }
1187 
1188 /*
1189  * Synopsis: calculate and set Rx mode. Chip must be in idle state to
1190  * access RXCON.
1191  */
1192 static void
1193 epic_set_rx_mode(sc)
1194 	epic_softc_t *sc;
1195 {
1196 	u_int32_t 		flags = sc->sc_if.if_flags;
1197 	u_int32_t 		rxcon = RXCON_DEFAULT;
1198 
1199 #if defined(EPIC_EARLY_RX)
1200 	rxcon |= RXCON_EARLY_RX;
1201 #endif
1202 
1203 	rxcon |= (flags & IFF_PROMISC) ? RXCON_PROMISCUOUS_MODE : 0;
1204 
1205 	CSR_WRITE_4(sc, RXCON, rxcon);
1206 
1207 	return;
1208 }
1209 
1210 /*
1211  * Synopsis: Set transmit control register. Chip must be in idle state to
1212  * access TXCON.
1213  */
1214 static void
1215 epic_set_tx_mode(sc)
1216 	epic_softc_t *sc;
1217 {
1218 	if (sc->txcon & TXCON_EARLY_TRANSMIT_ENABLE)
1219 		CSR_WRITE_4(sc, ETXTHR, sc->tx_threshold);
1220 
1221 	CSR_WRITE_4(sc, TXCON, sc->txcon);
1222 }
1223 
1224 /*
1225  * Synopsis: Program multicast filter honoring IFF_ALLMULTI and IFF_PROMISC
1226  * flags. (Note, that setting PROMISC bit in EPIC's RXCON will only touch
1227  * individual frames, multicast filter must be manually programmed)
1228  *
1229  * Note: EPIC must be in idle state.
1230  */
1231 static void
1232 epic_set_mc_table(sc)
1233 	epic_softc_t *sc;
1234 {
1235 	struct ifnet *ifp = &sc->sc_if;
1236 	struct ifmultiaddr *ifma;
1237 	u_int16_t filter[4];
1238 	u_int8_t h;
1239 
1240 	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1241 		CSR_WRITE_4(sc, MC0, 0xFFFF);
1242 		CSR_WRITE_4(sc, MC1, 0xFFFF);
1243 		CSR_WRITE_4(sc, MC2, 0xFFFF);
1244 		CSR_WRITE_4(sc, MC3, 0xFFFF);
1245 
1246 		return;
1247 	}
1248 
1249 	filter[0] = 0;
1250 	filter[1] = 0;
1251 	filter[2] = 0;
1252 	filter[3] = 0;
1253 
1254 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1255 		if (ifma->ifma_addr->sa_family != AF_LINK)
1256 			continue;
1257 		h = (ether_crc32_be(
1258 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1259 			ETHER_ADDR_LEN) >> 26) & 0x3f;
1260 		filter[h >> 4] |= 1 << (h & 0xF);
1261 	}
1262 
1263 	CSR_WRITE_4(sc, MC0, filter[0]);
1264 	CSR_WRITE_4(sc, MC1, filter[1]);
1265 	CSR_WRITE_4(sc, MC2, filter[2]);
1266 	CSR_WRITE_4(sc, MC3, filter[3]);
1267 
1268 	return;
1269 }
1270 
1271 /*
1272  * Synopsis: Start receive process and transmit one, if they need.
1273  */
1274 static void
1275 epic_start_activity(sc)
1276 	epic_softc_t *sc;
1277 {
1278 	/* Start rx process */
1279 	CSR_WRITE_4(sc, COMMAND,
1280 		COMMAND_RXQUEUED | COMMAND_START_RX |
1281 		(sc->pending_txs?COMMAND_TXQUEUED:0));
1282 }
1283 
1284 /*
1285  * Synopsis: Completely stop Rx and Tx processes. If TQE is set additional
1286  * packet needs to be queued to stop Tx DMA.
1287  */
1288 static void
1289 epic_stop_activity(sc)
1290 	epic_softc_t *sc;
1291 {
1292 	int status, i;
1293 
1294 	/* Stop Tx and Rx DMA */
1295 	CSR_WRITE_4(sc, COMMAND,
1296 	    COMMAND_STOP_RX | COMMAND_STOP_RDMA | COMMAND_STOP_TDMA);
1297 
1298 	/* Wait Rx and Tx DMA to stop (why 1 ms ??? XXX) */
1299 	for (i=0; i<0x1000; i++) {
1300 		status = CSR_READ_4(sc, INTSTAT) & (INTSTAT_TXIDLE | INTSTAT_RXIDLE);
1301 		if (status == (INTSTAT_TXIDLE | INTSTAT_RXIDLE))
1302 			break;
1303 		DELAY(1);
1304 	}
1305 
1306 	/* Catch all finished packets */
1307 	epic_rx_done(sc);
1308 	epic_tx_done(sc);
1309 
1310 	status = CSR_READ_4(sc, INTSTAT);
1311 
1312 	if ((status & INTSTAT_RXIDLE) == 0)
1313 		if_printf(&sc->sc_if, "ERROR! Can't stop Rx DMA\n");
1314 
1315 	if ((status & INTSTAT_TXIDLE) == 0)
1316 		if_printf(&sc->sc_if, "ERROR! Can't stop Tx DMA\n");
1317 
1318 	/*
1319 	 * May need to queue one more packet if TQE, this is rare
1320 	 * but existing case.
1321 	 */
1322 	if ((status & INTSTAT_TQE) && !(status & INTSTAT_TXIDLE))
1323 		(void) epic_queue_last_packet(sc);
1324 
1325 }
1326 
1327 /*
1328  * The EPIC transmitter may stuck in TQE state. It will not go IDLE until
1329  * a packet from current descriptor will be copied to internal RAM. We
1330  * compose a dummy packet here and queue it for transmission.
1331  *
1332  * XXX the packet will then be actually sent over network...
1333  */
1334 static int
1335 epic_queue_last_packet(sc)
1336 	epic_softc_t *sc;
1337 {
1338 	struct epic_tx_desc *desc;
1339 	struct epic_frag_list *flist;
1340 	struct epic_tx_buffer *buf;
1341 	struct mbuf *m0;
1342 	int i;
1343 
1344 	if_printf(&sc->sc_if, "queue last packet\n");
1345 
1346 	desc = sc->tx_desc + sc->cur_tx;
1347 	flist = sc->tx_flist + sc->cur_tx;
1348 	buf = sc->tx_buffer + sc->cur_tx;
1349 
1350 	if ((desc->status & 0x8000) || (buf->mbuf != NULL))
1351 		return (EBUSY);
1352 
1353 	MGETHDR(m0, MB_DONTWAIT, MT_DATA);
1354 	if (NULL == m0)
1355 		return (ENOBUFS);
1356 
1357 	/* Prepare mbuf */
1358 	m0->m_len = min(MHLEN, ETHER_MIN_LEN-ETHER_CRC_LEN);
1359 	flist->frag[0].fraglen = m0->m_len;
1360 	m0->m_pkthdr.len = m0->m_len;
1361 	m0->m_pkthdr.rcvif = &sc->sc_if;
1362 	bzero(mtod(m0,caddr_t), m0->m_len);
1363 
1364 	/* Fill fragments list */
1365 	flist->frag[0].fraglen = m0->m_len;
1366 	flist->frag[0].fragaddr = vtophys(mtod(m0, caddr_t));
1367 	flist->numfrags = 1;
1368 
1369 	/* Fill in descriptor */
1370 	buf->mbuf = m0;
1371 	sc->pending_txs++;
1372 	sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
1373 	desc->control = 0x01;
1374 	desc->txlength = max(m0->m_pkthdr.len,ETHER_MIN_LEN-ETHER_CRC_LEN);
1375 	desc->status = 0x8000;
1376 
1377 	/* Launch transmition */
1378 	CSR_WRITE_4(sc, COMMAND, COMMAND_STOP_TDMA | COMMAND_TXQUEUED);
1379 
1380 	/* Wait Tx DMA to stop (for how long??? XXX) */
1381 	for (i=0; i<1000; i++) {
1382 		if (CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE)
1383 			break;
1384 		DELAY(1);
1385 	}
1386 
1387 	if ((CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) == 0)
1388 		if_printf(&sc->sc_if, "ERROR! can't stop Tx DMA (2)\n");
1389 	else
1390 		epic_tx_done(sc);
1391 
1392 	return 0;
1393 }
1394 
1395 /*
1396  *  Synopsis: Shut down board and deallocates rings.
1397  */
1398 static void
1399 epic_stop(sc)
1400 	epic_softc_t *sc;
1401 {
1402 
1403 	crit_enter();
1404 
1405 	sc->sc_if.if_timer = 0;
1406 
1407 	callout_stop(&sc->tx_stat_timer);
1408 
1409 	/* Disable interrupts */
1410 	CSR_WRITE_4(sc, INTMASK, 0);
1411 	CSR_WRITE_4(sc, GENCTL, 0);
1412 
1413 	/* Try to stop Rx and TX processes */
1414 	epic_stop_activity(sc);
1415 
1416 	/* Reset chip */
1417 	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
1418 	DELAY(1000);
1419 
1420 	/* Make chip go to bed */
1421 	CSR_WRITE_4(sc, GENCTL, GENCTL_POWER_DOWN);
1422 
1423 	/* Free memory allocated for rings */
1424 	epic_free_rings(sc);
1425 
1426 	/* Mark as stoped */
1427 	sc->sc_if.if_flags &= ~IFF_RUNNING;
1428 
1429 	crit_exit();
1430 	return;
1431 }
1432 
1433 /*
1434  * Synopsis: This function should free all memory allocated for rings.
1435  */
1436 static void
1437 epic_free_rings(sc)
1438 	epic_softc_t *sc;
1439 {
1440 	int i;
1441 
1442 	for (i=0; i<RX_RING_SIZE; i++) {
1443 		struct epic_rx_buffer *buf = sc->rx_buffer + i;
1444 		struct epic_rx_desc *desc = sc->rx_desc + i;
1445 
1446 		desc->status = 0;
1447 		desc->buflength = 0;
1448 		desc->bufaddr = 0;
1449 
1450 		if (buf->mbuf) m_freem(buf->mbuf);
1451 		buf->mbuf = NULL;
1452 	}
1453 
1454 	for (i=0; i<TX_RING_SIZE; i++) {
1455 		struct epic_tx_buffer *buf = sc->tx_buffer + i;
1456 		struct epic_tx_desc *desc = sc->tx_desc + i;
1457 
1458 		desc->status = 0;
1459 		desc->buflength = 0;
1460 		desc->bufaddr = 0;
1461 
1462 		if (buf->mbuf) m_freem(buf->mbuf);
1463 		buf->mbuf = NULL;
1464 	}
1465 }
1466 
1467 /*
1468  * Synopsis:  Allocates mbufs for Rx ring and point Rx descs to them.
1469  * Point Tx descs to fragment lists. Check that all descs and fraglists
1470  * are bounded and aligned properly.
1471  */
1472 static int
1473 epic_init_rings(sc)
1474 	epic_softc_t *sc;
1475 {
1476 	int i;
1477 
1478 	sc->cur_rx = sc->cur_tx = sc->dirty_tx = sc->pending_txs = 0;
1479 
1480 	for (i = 0; i < RX_RING_SIZE; i++) {
1481 		struct epic_rx_buffer *buf = sc->rx_buffer + i;
1482 		struct epic_rx_desc *desc = sc->rx_desc + i;
1483 
1484 		desc->status = 0;		/* Owned by driver */
1485 		desc->next = vtophys(sc->rx_desc + ((i+1) & RX_RING_MASK));
1486 
1487 		if ((desc->next & 3) ||
1488 		    ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
1489 			epic_free_rings(sc);
1490 			return EFAULT;
1491 		}
1492 
1493 		EPIC_MGETCLUSTER(buf->mbuf);
1494 		if (NULL == buf->mbuf) {
1495 			epic_free_rings(sc);
1496 			return ENOBUFS;
1497 		}
1498 		desc->bufaddr = vtophys(mtod(buf->mbuf, caddr_t));
1499 
1500 		desc->buflength = MCLBYTES;	/* Max RX buffer length */
1501 		desc->status = 0x8000;		/* Set owner bit to NIC */
1502 	}
1503 
1504 	for (i = 0; i < TX_RING_SIZE; i++) {
1505 		struct epic_tx_buffer *buf = sc->tx_buffer + i;
1506 		struct epic_tx_desc *desc = sc->tx_desc + i;
1507 
1508 		desc->status = 0;
1509 		desc->next = vtophys(sc->tx_desc + ((i+1) & TX_RING_MASK));
1510 
1511 		if ((desc->next & 3) ||
1512 		    ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
1513 			epic_free_rings(sc);
1514 			return EFAULT;
1515 		}
1516 
1517 		buf->mbuf = NULL;
1518 		desc->bufaddr = vtophys(sc->tx_flist + i);
1519 
1520 		if ((desc->bufaddr & 3) ||
1521 		    ((desc->bufaddr & PAGE_MASK) + sizeof(struct epic_frag_list)) > PAGE_SIZE) {
1522 			epic_free_rings(sc);
1523 			return EFAULT;
1524 		}
1525 	}
1526 
1527 	return 0;
1528 }
1529 
1530 /*
1531  * EEPROM operation functions
1532  */
1533 static void
1534 epic_write_eepromreg(sc, val)
1535 	epic_softc_t *sc;
1536 	u_int8_t val;
1537 {
1538 	u_int16_t i;
1539 
1540 	CSR_WRITE_1(sc, EECTL, val);
1541 
1542 	for (i=0; i<0xFF; i++)
1543 		if ((CSR_READ_1(sc, EECTL) & 0x20) == 0) break;
1544 
1545 	return;
1546 }
1547 
1548 static u_int8_t
1549 epic_read_eepromreg(sc)
1550 	epic_softc_t *sc;
1551 {
1552 	return CSR_READ_1(sc, EECTL);
1553 }
1554 
1555 static u_int8_t
1556 epic_eeprom_clock(sc, val)
1557 	epic_softc_t *sc;
1558 	u_int8_t val;
1559 {
1560 	epic_write_eepromreg(sc, val);
1561 	epic_write_eepromreg(sc, (val | 0x4));
1562 	epic_write_eepromreg(sc, val);
1563 
1564 	return epic_read_eepromreg(sc);
1565 }
1566 
1567 static void
1568 epic_output_eepromw(sc, val)
1569 	epic_softc_t *sc;
1570 	u_int16_t val;
1571 {
1572 	int i;
1573 
1574 	for (i = 0xF; i >= 0; i--) {
1575 		if (val & (1 << i))
1576 			epic_eeprom_clock(sc, 0x0B);
1577 		else
1578 			epic_eeprom_clock(sc, 0x03);
1579 	}
1580 }
1581 
1582 static u_int16_t
1583 epic_input_eepromw(sc)
1584 	epic_softc_t *sc;
1585 {
1586 	u_int16_t retval = 0;
1587 	int i;
1588 
1589 	for (i = 0xF; i >= 0; i--) {
1590 		if (epic_eeprom_clock(sc, 0x3) & 0x10)
1591 			retval |= (1 << i);
1592 	}
1593 
1594 	return retval;
1595 }
1596 
1597 static int
1598 epic_read_eeprom(sc, loc)
1599 	epic_softc_t *sc;
1600 	u_int16_t loc;
1601 {
1602 	u_int16_t dataval;
1603 	u_int16_t read_cmd;
1604 
1605 	epic_write_eepromreg(sc, 3);
1606 
1607 	if (epic_read_eepromreg(sc) & 0x40)
1608 		read_cmd = (loc & 0x3F) | 0x180;
1609 	else
1610 		read_cmd = (loc & 0xFF) | 0x600;
1611 
1612 	epic_output_eepromw(sc, read_cmd);
1613 
1614 	dataval = epic_input_eepromw(sc);
1615 
1616 	epic_write_eepromreg(sc, 1);
1617 
1618 	return dataval;
1619 }
1620 
1621 /*
1622  * Here goes MII read/write routines
1623  */
1624 static int
1625 epic_read_phy_reg(sc, phy, reg)
1626 	epic_softc_t *sc;
1627 	int phy, reg;
1628 {
1629 	int i;
1630 
1631 	CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x01));
1632 
1633 	for (i = 0; i < 0x100; i++) {
1634 		if ((CSR_READ_4(sc, MIICTL) & 0x01) == 0) break;
1635 		DELAY(1);
1636 	}
1637 
1638 	return (CSR_READ_4(sc, MIIDATA));
1639 }
1640 
1641 static void
1642 epic_write_phy_reg(sc, phy, reg, val)
1643 	epic_softc_t *sc;
1644 	int phy, reg, val;
1645 {
1646 	int i;
1647 
1648 	CSR_WRITE_4(sc, MIIDATA, val);
1649 	CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x02));
1650 
1651 	for(i=0;i<0x100;i++) {
1652 		if ((CSR_READ_4(sc, MIICTL) & 0x02) == 0) break;
1653 		DELAY(1);
1654 	}
1655 
1656 	return;
1657 }
1658 
1659 static int
1660 epic_miibus_readreg(dev, phy, reg)
1661 	device_t dev;
1662 	int phy, reg;
1663 {
1664 	epic_softc_t *sc;
1665 
1666 	sc = device_get_softc(dev);
1667 
1668 	return (PHY_READ_2(sc, phy, reg));
1669 }
1670 
1671 static int
1672 epic_miibus_writereg(dev, phy, reg, data)
1673 	device_t dev;
1674 	int phy, reg, data;
1675 {
1676 	epic_softc_t *sc;
1677 
1678 	sc = device_get_softc(dev);
1679 
1680 	PHY_WRITE_2(sc, phy, reg, data);
1681 
1682 	return (0);
1683 }
1684