xref: /dragonfly/sys/dev/netif/tx/if_tx.c (revision 4caa7869)
1 /*-
2  * Copyright (c) 1997 Semen Ustimenko (semenu@FreeBSD.org)
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/tx/if_tx.c,v 1.61.2.1 2002/10/29 01:43:49 semenu Exp $
27  * $DragonFly: src/sys/dev/netif/tx/if_tx.c,v 1.6 2004/01/06 01:40:49 dillon Exp $
28  */
29 
30 /*
31  * EtherPower II 10/100 Fast Ethernet (SMC 9432 serie)
32  *
33  * These cards are based on SMC83c17x (EPIC) chip and one of the various
34  * PHYs (QS6612, AC101 and LXT970 were seen). The media support depends on
35  * card model. All cards support 10baseT/UTP and 100baseTX half- and full-
36  * duplex (SMB9432TX). SMC9432BTX also supports 10baseT/BNC. SMC9432FTX also
37  * supports fibre optics.
38  *
39  * Thanks are going to Steve Bauer and Jason Wright.
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sockio.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/socket.h>
49 #include <sys/queue.h>
50 
51 #include <net/if.h>
52 #include <net/if_arp.h>
53 #include <net/ethernet.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 
57 #include <net/bpf.h>
58 
59 #include <net/vlan/if_vlan_var.h>
60 
61 #include <vm/vm.h>		/* for vtophys */
62 #include <vm/pmap.h>		/* for vtophys */
63 #include <machine/bus_memio.h>
64 #include <machine/bus_pio.h>
65 #include <machine/bus.h>
66 #include <machine/resource.h>
67 #include <machine/clock.h>	/* for DELAY */
68 #include <sys/bus.h>
69 #include <sys/rman.h>
70 
71 #include <bus/pci/pcireg.h>
72 #include <bus/pci/pcivar.h>
73 
74 #include "../mii_layer/mii.h"
75 #include "../mii_layer/miivar.h"
76 #include "../mii_layer/miidevs.h"
77 #include "../mii_layer/lxtphyreg.h"
78 
79 #include "miibus_if.h"
80 
81 #include "if_txreg.h"
82 #include "if_txvar.h"
83 
84 static int epic_ifioctl(struct ifnet *, u_long, caddr_t);
85 static void epic_intr(void *);
86 static void epic_tx_underrun(epic_softc_t *);
87 static int epic_common_attach(epic_softc_t *);
88 static void epic_ifstart(struct ifnet *);
89 static void epic_ifwatchdog(struct ifnet *);
90 static void epic_stats_update(epic_softc_t *);
91 static int epic_init(epic_softc_t *);
92 static void epic_stop(epic_softc_t *);
93 static void epic_rx_done(epic_softc_t *);
94 static void epic_tx_done(epic_softc_t *);
95 static int epic_init_rings(epic_softc_t *);
96 static void epic_free_rings(epic_softc_t *);
97 static void epic_stop_activity(epic_softc_t *);
98 static int epic_queue_last_packet(epic_softc_t *);
99 static void epic_start_activity(epic_softc_t *);
100 static void epic_set_rx_mode(epic_softc_t *);
101 static void epic_set_tx_mode(epic_softc_t *);
102 static void epic_set_mc_table(epic_softc_t *);
103 static u_int8_t epic_calchash(caddr_t);
104 static int epic_read_eeprom(epic_softc_t *,u_int16_t);
105 static void epic_output_eepromw(epic_softc_t *, u_int16_t);
106 static u_int16_t epic_input_eepromw(epic_softc_t *);
107 static u_int8_t epic_eeprom_clock(epic_softc_t *,u_int8_t);
108 static void epic_write_eepromreg(epic_softc_t *,u_int8_t);
109 static u_int8_t epic_read_eepromreg(epic_softc_t *);
110 
111 static int epic_read_phy_reg(epic_softc_t *, int, int);
112 static void epic_write_phy_reg(epic_softc_t *, int, int, int);
113 
114 static int epic_miibus_readreg(device_t, int, int);
115 static int epic_miibus_writereg(device_t, int, int, int);
116 static void epic_miibus_statchg(device_t);
117 static void epic_miibus_mediainit(device_t);
118 
119 static int epic_ifmedia_upd(struct ifnet *);
120 static void epic_ifmedia_sts(struct ifnet *, struct ifmediareq *);
121 
122 static int epic_probe(device_t);
123 static int epic_attach(device_t);
124 static void epic_shutdown(device_t);
125 static int epic_detach(device_t);
126 static struct epic_type *epic_devtype(device_t);
127 
128 static device_method_t epic_methods[] = {
129 	/* Device interface */
130 	DEVMETHOD(device_probe,		epic_probe),
131 	DEVMETHOD(device_attach,	epic_attach),
132 	DEVMETHOD(device_detach,	epic_detach),
133 	DEVMETHOD(device_shutdown,	epic_shutdown),
134 
135 	/* MII interface */
136 	DEVMETHOD(miibus_readreg,	epic_miibus_readreg),
137 	DEVMETHOD(miibus_writereg,	epic_miibus_writereg),
138 	DEVMETHOD(miibus_statchg,	epic_miibus_statchg),
139 	DEVMETHOD(miibus_mediainit,	epic_miibus_mediainit),
140 
141 	{ 0, 0 }
142 };
143 
144 static driver_t epic_driver = {
145 	"tx",
146 	epic_methods,
147 	sizeof(epic_softc_t)
148 };
149 
150 static devclass_t epic_devclass;
151 
152 DECLARE_DUMMY_MODULE(if_tx);
153 MODULE_DEPEND(if_tx, miibus, 1, 1, 1);
154 DRIVER_MODULE(if_tx, pci, epic_driver, epic_devclass, 0, 0);
155 DRIVER_MODULE(miibus, tx, miibus_driver, miibus_devclass, 0, 0);
156 
157 static struct epic_type epic_devs[] = {
158 	{ SMC_VENDORID, SMC_DEVICEID_83C170,
159 		"SMC EtherPower II 10/100" },
160 	{ 0, 0, NULL }
161 };
162 
163 static int
164 epic_probe(dev)
165 	device_t dev;
166 {
167 	struct epic_type *t;
168 
169 	t = epic_devtype(dev);
170 
171 	if (t != NULL) {
172 		device_set_desc(dev, t->name);
173 		return(0);
174 	}
175 
176 	return(ENXIO);
177 }
178 
179 static struct epic_type *
180 epic_devtype(dev)
181 	device_t dev;
182 {
183 	struct epic_type *t;
184 
185 	t = epic_devs;
186 
187 	while(t->name != NULL) {
188 		if ((pci_get_vendor(dev) == t->ven_id) &&
189 		    (pci_get_device(dev) == t->dev_id)) {
190 			return(t);
191 		}
192 		t++;
193 	}
194 	return (NULL);
195 }
196 
197 #if defined(EPIC_USEIOSPACE)
198 #define	EPIC_RES	SYS_RES_IOPORT
199 #define	EPIC_RID	PCIR_BASEIO
200 #else
201 #define	EPIC_RES	SYS_RES_MEMORY
202 #define	EPIC_RID	PCIR_BASEMEM
203 #endif
204 
205 /*
206  * Attach routine: map registers, allocate softc, rings and descriptors.
207  * Reset to known state.
208  */
209 static int
210 epic_attach(dev)
211 	device_t dev;
212 {
213 	struct ifnet *ifp;
214 	epic_softc_t *sc;
215 	u_int32_t command;
216 	int unit, error;
217 	int i, s, rid, tmp;
218 
219 	s = splimp ();
220 
221 	sc = device_get_softc(dev);
222 	unit = device_get_unit(dev);
223 
224 	/* Preinitialize softc structure */
225 	bzero(sc, sizeof(epic_softc_t));
226 	sc->unit = unit;
227 	sc->dev = dev;
228 
229 	/* Fill ifnet structure */
230 	ifp = &sc->sc_if;
231 	if_initname(ifp, "tx", unit);
232 	ifp->if_softc = sc;
233 	ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST;
234 	ifp->if_ioctl = epic_ifioctl;
235 	ifp->if_output = ether_output;
236 	ifp->if_start = epic_ifstart;
237 	ifp->if_watchdog = epic_ifwatchdog;
238 	ifp->if_init = (if_init_f_t*)epic_init;
239 	ifp->if_timer = 0;
240 	ifp->if_baudrate = 10000000;
241 	ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1;
242 
243 	/* Enable ports, memory and busmastering */
244 	command = pci_read_config(dev, PCIR_COMMAND, 4);
245 	command |= PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
246 	pci_write_config(dev, PCIR_COMMAND, command, 4);
247 	command = pci_read_config(dev, PCIR_COMMAND, 4);
248 
249 #if defined(EPIC_USEIOSPACE)
250 	if ((command & PCIM_CMD_PORTEN) == 0) {
251 		device_printf(dev, "failed to enable I/O mapping!\n");
252 		error = ENXIO;
253 		goto fail;
254 	}
255 #else
256 	if ((command & PCIM_CMD_MEMEN) == 0) {
257 		device_printf(dev, "failed to enable memory mapping!\n");
258 		error = ENXIO;
259 		goto fail;
260 	}
261 #endif
262 
263 	rid = EPIC_RID;
264 	sc->res = bus_alloc_resource(dev, EPIC_RES, &rid, 0, ~0, 1,
265 	    RF_ACTIVE);
266 
267 	if (sc->res == NULL) {
268 		device_printf(dev, "couldn't map ports/memory\n");
269 		error = ENXIO;
270 		goto fail;
271 	}
272 
273 	sc->sc_st = rman_get_bustag(sc->res);
274 	sc->sc_sh = rman_get_bushandle(sc->res);
275 
276 	/* Allocate interrupt */
277 	rid = 0;
278 	sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
279 	    RF_SHAREABLE | RF_ACTIVE);
280 
281 	if (sc->irq == NULL) {
282 		device_printf(dev, "couldn't map interrupt\n");
283 		bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
284 		error = ENXIO;
285 		goto fail;
286 	}
287 
288 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET,
289 	    epic_intr, sc, &sc->sc_ih);
290 
291 	if (error) {
292 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
293 		bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
294 		device_printf(dev, "couldn't set up irq\n");
295 		goto fail;
296 	}
297 
298 	/* Do OS independent part, including chip wakeup and reset */
299 	error = epic_common_attach(sc);
300 	if (error) {
301 		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
302 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
303 		bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
304 		error = ENXIO;
305 		goto fail;
306 	}
307 
308 	/* Do ifmedia setup */
309 	if (mii_phy_probe(dev, &sc->miibus,
310 	    epic_ifmedia_upd, epic_ifmedia_sts)) {
311 		device_printf(dev, "ERROR! MII without any PHY!?\n");
312 		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
313 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
314 		bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
315 		error = ENXIO;
316 		goto fail;
317 	}
318 
319 	/* Display ethernet address ,... */
320 	device_printf(dev, "address %6D,", sc->sc_macaddr, ":");
321 
322 	/* board type and ... */
323 	printf(" type ");
324 	for(i=0x2c;i<0x32;i++) {
325 		tmp = epic_read_eeprom(sc, i);
326 		if (' ' == (u_int8_t)tmp) break;
327 		printf("%c", (u_int8_t)tmp);
328 		tmp >>= 8;
329 		if (' ' == (u_int8_t)tmp) break;
330 		printf("%c", (u_int8_t)tmp);
331 	}
332 	printf("\n");
333 
334 	/* Attach to OS's managers */
335 	ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
336 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
337 	callout_handle_init(&sc->stat_ch);
338 
339 fail:
340 	splx(s);
341 
342 	return(error);
343 }
344 
345 /*
346  * Detach driver and free resources
347  */
348 static int
349 epic_detach(dev)
350 	device_t dev;
351 {
352 	struct ifnet *ifp;
353 	epic_softc_t *sc;
354 	int s;
355 
356 	s = splimp();
357 
358 	sc = device_get_softc(dev);
359 	ifp = &sc->arpcom.ac_if;
360 
361 	ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
362 
363 	epic_stop(sc);
364 
365 	bus_generic_detach(dev);
366 	device_delete_child(dev, sc->miibus);
367 
368 	bus_teardown_intr(dev, sc->irq, sc->sc_ih);
369 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
370 	bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
371 
372 	free(sc->tx_flist, M_DEVBUF);
373 	free(sc->tx_desc, M_DEVBUF);
374 	free(sc->rx_desc, M_DEVBUF);
375 
376 	splx(s);
377 
378 	return(0);
379 }
380 
381 #undef	EPIC_RES
382 #undef	EPIC_RID
383 
384 /*
385  * Stop all chip I/O so that the kernel's probe routines don't
386  * get confused by errant DMAs when rebooting.
387  */
388 static void
389 epic_shutdown(dev)
390 	device_t dev;
391 {
392 	epic_softc_t *sc;
393 
394 	sc = device_get_softc(dev);
395 
396 	epic_stop(sc);
397 
398 	return;
399 }
400 
401 /*
402  * This is if_ioctl handler.
403  */
404 static int
405 epic_ifioctl(ifp, command, data)
406 	struct ifnet *ifp;
407 	u_long command;
408 	caddr_t data;
409 {
410 	epic_softc_t *sc = ifp->if_softc;
411 	struct mii_data	*mii;
412 	struct ifreq *ifr = (struct ifreq *) data;
413 	int x, error = 0;
414 
415 	x = splimp();
416 
417 	switch (command) {
418 	case SIOCSIFADDR:
419 	case SIOCGIFADDR:
420 		error = ether_ioctl(ifp, command, data);
421 		break;
422 	case SIOCSIFMTU:
423 		if (ifp->if_mtu == ifr->ifr_mtu)
424 			break;
425 
426 		/* XXX Though the datasheet doesn't imply any
427 		 * limitations on RX and TX sizes beside max 64Kb
428 		 * DMA transfer, seems we can't send more then 1600
429 		 * data bytes per ethernet packet. (Transmitter hangs
430 		 * up if more data is sent)
431 		 */
432 		if (ifr->ifr_mtu + ifp->if_hdrlen <= EPIC_MAX_MTU) {
433 			ifp->if_mtu = ifr->ifr_mtu;
434 			epic_stop(sc);
435 			epic_init(sc);
436 		} else
437 			error = EINVAL;
438 		break;
439 
440 	case SIOCSIFFLAGS:
441 		/*
442 		 * If the interface is marked up and stopped, then start it.
443 		 * If it is marked down and running, then stop it.
444 		 */
445 		if (ifp->if_flags & IFF_UP) {
446 			if ((ifp->if_flags & IFF_RUNNING) == 0) {
447 				epic_init(sc);
448 				break;
449 			}
450 		} else {
451 			if (ifp->if_flags & IFF_RUNNING) {
452 				epic_stop(sc);
453 				break;
454 			}
455 		}
456 
457 		/* Handle IFF_PROMISC and IFF_ALLMULTI flags */
458 		epic_stop_activity(sc);
459 		epic_set_mc_table(sc);
460 		epic_set_rx_mode(sc);
461 		epic_start_activity(sc);
462 		break;
463 
464 	case SIOCADDMULTI:
465 	case SIOCDELMULTI:
466 		epic_set_mc_table(sc);
467 		error = 0;
468 		break;
469 
470 	case SIOCSIFMEDIA:
471 	case SIOCGIFMEDIA:
472 		mii = device_get_softc(sc->miibus);
473 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
474 		break;
475 
476 	default:
477 		error = EINVAL;
478 	}
479 	splx(x);
480 
481 	return error;
482 }
483 
484 /*
485  * OS-independed part of attach process. allocate memory for descriptors
486  * and frag lists, wake up chip, read MAC address and PHY identyfier.
487  * Return -1 on failure.
488  */
489 static int
490 epic_common_attach(sc)
491 	epic_softc_t *sc;
492 {
493 	int i;
494 
495 	sc->tx_flist = malloc(sizeof(struct epic_frag_list)*TX_RING_SIZE,
496 	    M_DEVBUF, M_NOWAIT | M_ZERO);
497 	sc->tx_desc = malloc(sizeof(struct epic_tx_desc)*TX_RING_SIZE,
498 	    M_DEVBUF, M_NOWAIT | M_ZERO);
499 	sc->rx_desc = malloc(sizeof(struct epic_rx_desc)*RX_RING_SIZE,
500 	    M_DEVBUF, M_NOWAIT | M_ZERO);
501 
502 	if (sc->tx_flist == NULL || sc->tx_desc == NULL || sc->rx_desc == NULL){
503 		device_printf(sc->dev, "failed to malloc memory\n");
504 		if (sc->tx_flist) free(sc->tx_flist, M_DEVBUF);
505 		if (sc->tx_desc) free(sc->tx_desc, M_DEVBUF);
506 		if (sc->rx_desc) free(sc->rx_desc, M_DEVBUF);
507 		return (ENOMEM);
508 	}
509 
510 	/* Bring the chip out of low-power mode. */
511 	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
512 	DELAY(500);
513 
514 	/* Workaround for Application Note 7-15 */
515 	for (i=0; i<16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
516 
517 	/* Read mac address from EEPROM */
518 	for (i = 0; i < ETHER_ADDR_LEN / sizeof(u_int16_t); i++)
519 		((u_int16_t *)sc->sc_macaddr)[i] = epic_read_eeprom(sc,i);
520 
521 	/* Set Non-Volatile Control Register from EEPROM */
522 	CSR_WRITE_4(sc, NVCTL, epic_read_eeprom(sc, EEPROM_NVCTL) & 0x1F);
523 
524 	/* Set defaults */
525 	sc->tx_threshold = TRANSMIT_THRESHOLD;
526 	sc->txcon = TXCON_DEFAULT;
527 	sc->miicfg = MIICFG_SMI_ENABLE;
528 	sc->phyid = EPIC_UNKN_PHY;
529 	sc->serinst = -1;
530 
531 	/* Fetch card id */
532 	sc->cardvend = pci_read_config(sc->dev, PCIR_SUBVEND_0, 2);
533 	sc->cardid = pci_read_config(sc->dev, PCIR_SUBDEV_0, 2);
534 
535 	if (sc->cardvend != SMC_VENDORID)
536 		device_printf(sc->dev, "unknown card vendor %04xh\n", sc->cardvend);
537 
538 	return 0;
539 }
540 
541 /*
542  * This is if_start handler. It takes mbufs from if_snd queue
543  * and queue them for transmit, one by one, until TX ring become full
544  * or queue become empty.
545  */
546 static void
547 epic_ifstart(ifp)
548 	struct ifnet * ifp;
549 {
550 	epic_softc_t *sc = ifp->if_softc;
551 	struct epic_tx_buffer *buf;
552 	struct epic_tx_desc *desc;
553 	struct epic_frag_list *flist;
554 	struct mbuf *m0;
555 	struct mbuf *m;
556 	int i;
557 
558 	while (sc->pending_txs < TX_RING_SIZE) {
559 		buf = sc->tx_buffer + sc->cur_tx;
560 		desc = sc->tx_desc + sc->cur_tx;
561 		flist = sc->tx_flist + sc->cur_tx;
562 
563 		/* Get next packet to send */
564 		IF_DEQUEUE(&ifp->if_snd, m0);
565 
566 		/* If nothing to send, return */
567 		if (NULL == m0) return;
568 
569 		/* Fill fragments list */
570 		for (m = m0, i = 0;
571 		    (NULL != m) && (i < EPIC_MAX_FRAGS);
572 		    m = m->m_next, i++) {
573 			flist->frag[i].fraglen = m->m_len;
574 			flist->frag[i].fragaddr = vtophys(mtod(m, caddr_t));
575 		}
576 		flist->numfrags = i;
577 
578 		/* If packet was more than EPIC_MAX_FRAGS parts, */
579 		/* recopy packet to new allocated mbuf cluster */
580 		if (NULL != m) {
581 			EPIC_MGETCLUSTER(m);
582 			if (NULL == m) {
583 				m_freem(m0);
584 				ifp->if_oerrors++;
585 				continue;
586 			}
587 
588 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
589 			flist->frag[0].fraglen =
590 			     m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
591 			m->m_pkthdr.rcvif = ifp;
592 
593 			flist->numfrags = 1;
594 			flist->frag[0].fragaddr = vtophys(mtod(m, caddr_t));
595 			m_freem(m0);
596 			m0 = m;
597 		}
598 
599 		buf->mbuf = m0;
600 		sc->pending_txs++;
601 		sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
602 		desc->control = 0x01;
603 		desc->txlength =
604 		    max(m0->m_pkthdr.len,ETHER_MIN_LEN-ETHER_CRC_LEN);
605 		desc->status = 0x8000;
606 		CSR_WRITE_4(sc, COMMAND, COMMAND_TXQUEUED);
607 
608 		/* Set watchdog timer */
609 		ifp->if_timer = 8;
610 
611 		if (ifp->if_bpf)
612 			bpf_mtap(ifp, m0);
613 	}
614 
615 	ifp->if_flags |= IFF_OACTIVE;
616 
617 	return;
618 
619 }
620 
621 /*
622  * Synopsis: Finish all received frames.
623  */
624 static void
625 epic_rx_done(sc)
626 	epic_softc_t *sc;
627 {
628 	u_int16_t len;
629 	struct epic_rx_buffer *buf;
630 	struct epic_rx_desc *desc;
631 	struct mbuf *m;
632 	struct ether_header *eh;
633 
634 	while ((sc->rx_desc[sc->cur_rx].status & 0x8000) == 0) {
635 		buf = sc->rx_buffer + sc->cur_rx;
636 		desc = sc->rx_desc + sc->cur_rx;
637 
638 		/* Switch to next descriptor */
639 		sc->cur_rx = (sc->cur_rx+1) & RX_RING_MASK;
640 
641 		/*
642 		 * Check for RX errors. This should only happen if
643 		 * SAVE_ERRORED_PACKETS is set. RX errors generate
644 		 * RXE interrupt usually.
645 		 */
646 		if ((desc->status & 1) == 0) {
647 			sc->sc_if.if_ierrors++;
648 			desc->status = 0x8000;
649 			continue;
650 		}
651 
652 		/* Save packet length and mbuf contained packet */
653 		len = desc->rxlength - ETHER_CRC_LEN;
654 		m = buf->mbuf;
655 
656 		/* Try to get mbuf cluster */
657 		EPIC_MGETCLUSTER(buf->mbuf);
658 		if (NULL == buf->mbuf) {
659 			buf->mbuf = m;
660 			desc->status = 0x8000;
661 			sc->sc_if.if_ierrors++;
662 			continue;
663 		}
664 
665 		/* Point to new mbuf, and give descriptor to chip */
666 		desc->bufaddr = vtophys(mtod(buf->mbuf, caddr_t));
667 		desc->status = 0x8000;
668 
669 		/* First mbuf in packet holds the ethernet and packet headers */
670 		eh = mtod(m, struct ether_header *);
671 		m->m_pkthdr.rcvif = &(sc->sc_if);
672 		m->m_pkthdr.len = m->m_len = len;
673 
674 		/* Second mbuf holds packet ifself */
675 		m->m_pkthdr.len = m->m_len = len - sizeof(struct ether_header);
676 		m->m_data += sizeof(struct ether_header);
677 
678 		/* Give mbuf to OS */
679 		ether_input(&sc->sc_if, eh, m);
680 
681 		/* Successfuly received frame */
682 		sc->sc_if.if_ipackets++;
683 	}
684 
685 	return;
686 }
687 
688 /*
689  * Synopsis: Do last phase of transmission. I.e. if desc is
690  * transmitted, decrease pending_txs counter, free mbuf contained
691  * packet, switch to next descriptor and repeat until no packets
692  * are pending or descriptor is not transmitted yet.
693  */
694 static void
695 epic_tx_done(sc)
696 	epic_softc_t *sc;
697 {
698 	struct epic_tx_buffer *buf;
699 	struct epic_tx_desc *desc;
700 	u_int16_t status;
701 
702 	while (sc->pending_txs > 0) {
703 		buf = sc->tx_buffer + sc->dirty_tx;
704 		desc = sc->tx_desc + sc->dirty_tx;
705 		status = desc->status;
706 
707 		/* If packet is not transmitted, thou followed */
708 		/* packets are not transmitted too */
709 		if (status & 0x8000) break;
710 
711 		/* Packet is transmitted. Switch to next and */
712 		/* free mbuf */
713 		sc->pending_txs--;
714 		sc->dirty_tx = (sc->dirty_tx + 1) & TX_RING_MASK;
715 		m_freem(buf->mbuf);
716 		buf->mbuf = NULL;
717 
718 		/* Check for errors and collisions */
719 		if (status & 0x0001) sc->sc_if.if_opackets++;
720 		else sc->sc_if.if_oerrors++;
721 		sc->sc_if.if_collisions += (status >> 8) & 0x1F;
722 #if defined(EPIC_DIAG)
723 		if ((status & 0x1001) == 0x1001)
724 			device_printf(sc->dev,  "Tx ERROR: excessive coll. number\n");
725 #endif
726 	}
727 
728 	if (sc->pending_txs < TX_RING_SIZE)
729 		sc->sc_if.if_flags &= ~IFF_OACTIVE;
730 }
731 
732 /*
733  * Interrupt function
734  */
735 static void
736 epic_intr(arg)
737     void *arg;
738 {
739     epic_softc_t * sc = (epic_softc_t *) arg;
740     int status, i = 4;
741 
742     while (i-- && ((status = CSR_READ_4(sc, INTSTAT)) & INTSTAT_INT_ACTV)) {
743 	CSR_WRITE_4(sc, INTSTAT, status);
744 
745 	if (status & (INTSTAT_RQE|INTSTAT_RCC|INTSTAT_OVW)) {
746 	    epic_rx_done(sc);
747 	    if (status & (INTSTAT_RQE|INTSTAT_OVW)) {
748 #if defined(EPIC_DIAG)
749 		if (status & INTSTAT_OVW)
750 		    device_printf(sc->dev, "RX buffer overflow\n");
751 		if (status & INTSTAT_RQE)
752 		    device_printf(sc->dev, "RX FIFO overflow\n");
753 #endif
754 		if ((CSR_READ_4(sc, COMMAND) & COMMAND_RXQUEUED) == 0)
755 		    CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED);
756 		sc->sc_if.if_ierrors++;
757 	    }
758 	}
759 
760 	if (status & (INTSTAT_TXC|INTSTAT_TCC|INTSTAT_TQE)) {
761 	    epic_tx_done(sc);
762 	    if (sc->sc_if.if_snd.ifq_head != NULL)
763 		    epic_ifstart(&sc->sc_if);
764 	}
765 
766 	/* Check for rare errors */
767 	if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
768 		      INTSTAT_APE|INTSTAT_DPE|INTSTAT_TXU|INTSTAT_RXE)) {
769     	    if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
770 			  INTSTAT_APE|INTSTAT_DPE)) {
771 		device_printf(sc->dev, "PCI fatal errors occured: %s%s%s%s\n",
772 		    (status&INTSTAT_PMA)?"PMA ":"",
773 		    (status&INTSTAT_PTA)?"PTA ":"",
774 		    (status&INTSTAT_APE)?"APE ":"",
775 		    (status&INTSTAT_DPE)?"DPE":""
776 		);
777 
778 		epic_stop(sc);
779 		epic_init(sc);
780 
781 	    	break;
782 	    }
783 
784 	    if (status & INTSTAT_RXE) {
785 #if defined(EPIC_DIAG)
786 		device_printf(sc->dev, "CRC/Alignment error\n");
787 #endif
788 		sc->sc_if.if_ierrors++;
789 	    }
790 
791 	    if (status & INTSTAT_TXU) {
792 		epic_tx_underrun(sc);
793 		sc->sc_if.if_oerrors++;
794 	    }
795 	}
796     }
797 
798     /* If no packets are pending, then no timeouts */
799     if (sc->pending_txs == 0) sc->sc_if.if_timer = 0;
800 
801     return;
802 }
803 
804 /*
805  * Handle the TX underrun error: increase the TX threshold
806  * and restart the transmitter.
807  */
808 static void
809 epic_tx_underrun(sc)
810 	epic_softc_t *sc;
811 {
812 	if (sc->tx_threshold > TRANSMIT_THRESHOLD_MAX) {
813 		sc->txcon &= ~TXCON_EARLY_TRANSMIT_ENABLE;
814 #if defined(EPIC_DIAG)
815 		device_printf(sc->dev, "Tx UNDERRUN: early TX disabled\n");
816 #endif
817 	} else {
818 		sc->tx_threshold += 0x40;
819 #if defined(EPIC_DIAG)
820 		device_printf(sc->dev, "Tx UNDERRUN: TX threshold increased to %d\n",
821 		    sc->tx_threshold);
822 #endif
823 	}
824 
825 	/* We must set TXUGO to reset the stuck transmitter */
826 	CSR_WRITE_4(sc, COMMAND, COMMAND_TXUGO);
827 
828 	/* Update the TX threshold */
829 	epic_stop_activity(sc);
830 	epic_set_tx_mode(sc);
831 	epic_start_activity(sc);
832 
833 	return;
834 }
835 
836 /*
837  * Synopsis: This one is called if packets wasn't transmitted
838  * during timeout. Try to deallocate transmitted packets, and
839  * if success continue to work.
840  */
841 static void
842 epic_ifwatchdog(ifp)
843 	struct ifnet *ifp;
844 {
845 	epic_softc_t *sc = ifp->if_softc;
846 	int x;
847 
848 	x = splimp();
849 
850 	device_printf(sc->dev, "device timeout %d packets\n", sc->pending_txs);
851 
852 	/* Try to finish queued packets */
853 	epic_tx_done(sc);
854 
855 	/* If not successful */
856 	if (sc->pending_txs > 0) {
857 
858 		ifp->if_oerrors+=sc->pending_txs;
859 
860 		/* Reinitialize board */
861 		device_printf(sc->dev, "reinitialization\n");
862 		epic_stop(sc);
863 		epic_init(sc);
864 
865 	} else
866 		device_printf(sc->dev, "seems we can continue normaly\n");
867 
868 	/* Start output */
869 	if (ifp->if_snd.ifq_head) epic_ifstart(ifp);
870 
871 	splx(x);
872 }
873 
874 /*
875  * Despite the name of this function, it doesn't update statistics, it only
876  * helps in autonegotiation process.
877  */
878 static void
879 epic_stats_update(epic_softc_t * sc)
880 {
881 	struct mii_data * mii;
882 	int s;
883 
884 	s = splimp();
885 
886 	mii = device_get_softc(sc->miibus);
887 	mii_tick(mii);
888 
889 	sc->stat_ch = timeout((timeout_t *)epic_stats_update, sc, hz);
890 
891 	splx(s);
892 }
893 
894 /*
895  * Set media options.
896  */
897 static int
898 epic_ifmedia_upd(ifp)
899 	struct ifnet *ifp;
900 {
901 	epic_softc_t *sc;
902 	struct mii_data *mii;
903 	struct ifmedia *ifm;
904 	struct mii_softc *miisc;
905 	int cfg, media;
906 
907 	sc = ifp->if_softc;
908 	mii = device_get_softc(sc->miibus);
909 	ifm = &mii->mii_media;
910 	media = ifm->ifm_cur->ifm_media;
911 
912 	/* Do not do anything if interface is not up */
913 	if ((ifp->if_flags & IFF_UP) == 0)
914 		return (0);
915 
916 	/*
917 	 * Lookup current selected PHY
918 	 */
919 	if (IFM_INST(media) == sc->serinst) {
920 		sc->phyid = EPIC_SERIAL;
921 		sc->physc = NULL;
922 	} else {
923 		/* If we're not selecting serial interface, select MII mode */
924 		sc->miicfg &= ~MIICFG_SERIAL_ENABLE;
925 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
926 
927 		/* Default to unknown PHY */
928 		sc->phyid = EPIC_UNKN_PHY;
929 
930 		/* Lookup selected PHY */
931 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
932 		     miisc = LIST_NEXT(miisc, mii_list)) {
933 			if (IFM_INST(media) == miisc->mii_inst) {
934 				sc->physc = miisc;
935 				break;
936 			}
937 		}
938 
939 		/* Identify selected PHY */
940 		if (sc->physc) {
941 			int id1, id2, model, oui;
942 
943 			id1 = PHY_READ(sc->physc, MII_PHYIDR1);
944 			id2 = PHY_READ(sc->physc, MII_PHYIDR2);
945 
946 			oui = MII_OUI(id1, id2);
947 			model = MII_MODEL(id2);
948 			switch (oui) {
949 			case MII_OUI_QUALSEMI:
950 				if (model == MII_MODEL_QUALSEMI_QS6612)
951 					sc->phyid = EPIC_QS6612_PHY;
952 				break;
953 			case MII_OUI_xxALTIMA:
954 				if (model == MII_MODEL_xxALTIMA_AC101)
955 					sc->phyid = EPIC_AC101_PHY;
956 				break;
957 			case MII_OUI_xxLEVEL1:
958 				if (model == MII_MODEL_xxLEVEL1_LXT970)
959 					sc->phyid = EPIC_LXT970_PHY;
960 				break;
961 			}
962 		}
963 	}
964 
965 	/*
966 	 * Do PHY specific card setup
967 	 */
968 
969 	/* Call this, to isolate all not selected PHYs and
970 	 * set up selected
971 	 */
972 	mii_mediachg(mii);
973 
974 	/* Do our own setup */
975 	switch (sc->phyid) {
976 	case EPIC_QS6612_PHY:
977 		break;
978 	case EPIC_AC101_PHY:
979 		/* We have to powerup fiber tranceivers */
980 		if (IFM_SUBTYPE(media) == IFM_100_FX)
981 			sc->miicfg |= MIICFG_694_ENABLE;
982 		else
983 			sc->miicfg &= ~MIICFG_694_ENABLE;
984 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
985 
986 		break;
987 	case EPIC_LXT970_PHY:
988 		/* We have to powerup fiber tranceivers */
989 		cfg = PHY_READ(sc->physc, MII_LXTPHY_CONFIG);
990 		if (IFM_SUBTYPE(media) == IFM_100_FX)
991 			cfg |= CONFIG_LEDC1 | CONFIG_LEDC0;
992 		else
993 			cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
994 		PHY_WRITE(sc->physc, MII_LXTPHY_CONFIG, cfg);
995 
996 		break;
997 	case EPIC_SERIAL:
998 		/* Select serial PHY, (10base2/BNC usually) */
999 		sc->miicfg |= MIICFG_694_ENABLE | MIICFG_SERIAL_ENABLE;
1000 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1001 
1002 		/* There is no driver to fill this */
1003 		mii->mii_media_active = media;
1004 		mii->mii_media_status = 0;
1005 
1006 		/* We need to call this manualy as i wasn't called
1007 		 * in mii_mediachg()
1008 		 */
1009 		epic_miibus_statchg(sc->dev);
1010 
1011 		break;
1012 	default:
1013 		device_printf(sc->dev, "ERROR! Unknown PHY selected\n");
1014 		return (EINVAL);
1015 	}
1016 
1017 	return(0);
1018 }
1019 
1020 /*
1021  * Report current media status.
1022  */
1023 static void
1024 epic_ifmedia_sts(ifp, ifmr)
1025 	struct ifnet *ifp;
1026 	struct ifmediareq *ifmr;
1027 {
1028 	epic_softc_t *sc;
1029 	struct mii_data *mii;
1030 	struct ifmedia *ifm;
1031 
1032 	sc = ifp->if_softc;
1033 	mii = device_get_softc(sc->miibus);
1034 	ifm = &mii->mii_media;
1035 
1036 	/* Nothing should be selected if interface is down */
1037 	if ((ifp->if_flags & IFF_UP) == 0) {
1038 		ifmr->ifm_active = IFM_NONE;
1039 		ifmr->ifm_status = 0;
1040 
1041 		return;
1042 	}
1043 
1044 	/* Call underlying pollstat, if not serial PHY */
1045 	if (sc->phyid != EPIC_SERIAL)
1046 		mii_pollstat(mii);
1047 
1048 	/* Simply copy media info */
1049 	ifmr->ifm_active = mii->mii_media_active;
1050 	ifmr->ifm_status = mii->mii_media_status;
1051 
1052 	return;
1053 }
1054 
1055 /*
1056  * Callback routine, called on media change.
1057  */
1058 static void
1059 epic_miibus_statchg(dev)
1060 	device_t dev;
1061 {
1062 	epic_softc_t *sc;
1063 	struct mii_data *mii;
1064 	int media;
1065 
1066 	sc = device_get_softc(dev);
1067 	mii = device_get_softc(sc->miibus);
1068 	media = mii->mii_media_active;
1069 
1070 	sc->txcon &= ~(TXCON_LOOPBACK_MODE | TXCON_FULL_DUPLEX);
1071 
1072 	/* If we are in full-duplex mode or loopback operation,
1073 	 * we need to decouple receiver and transmitter.
1074 	 */
1075 	if (IFM_OPTIONS(media) & (IFM_FDX | IFM_LOOP))
1076  		sc->txcon |= TXCON_FULL_DUPLEX;
1077 
1078 	/* On some cards we need manualy set fullduplex led */
1079 	if (sc->cardid == SMC9432FTX ||
1080 	    sc->cardid == SMC9432FTX_SC) {
1081 		if (IFM_OPTIONS(media) & IFM_FDX)
1082 			sc->miicfg |= MIICFG_694_ENABLE;
1083 		else
1084 			sc->miicfg &= ~MIICFG_694_ENABLE;
1085 
1086 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1087 	}
1088 
1089 	/* Update baudrate */
1090 	if (IFM_SUBTYPE(media) == IFM_100_TX ||
1091 	    IFM_SUBTYPE(media) == IFM_100_FX)
1092 		sc->sc_if.if_baudrate = 100000000;
1093 	else
1094 		sc->sc_if.if_baudrate = 10000000;
1095 
1096 	epic_stop_activity(sc);
1097 	epic_set_tx_mode(sc);
1098 	epic_start_activity(sc);
1099 
1100 	return;
1101 }
1102 
1103 static void
1104 epic_miibus_mediainit(dev)
1105 	device_t dev;
1106 {
1107 	epic_softc_t *sc;
1108 	struct mii_data *mii;
1109 	struct ifmedia *ifm;
1110 	int media;
1111 
1112 	sc = device_get_softc(dev);
1113 	mii = device_get_softc(sc->miibus);
1114 	ifm = &mii->mii_media;
1115 
1116 	/* Add Serial Media Interface if present, this applies to
1117 	 * SMC9432BTX serie
1118 	 */
1119 	if (CSR_READ_4(sc, MIICFG) & MIICFG_PHY_PRESENT) {
1120 		/* Store its instance */
1121 		sc->serinst = mii->mii_instance++;
1122 
1123 		/* Add as 10base2/BNC media */
1124 		media = IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->serinst);
1125 		ifmedia_add(ifm, media, 0, NULL);
1126 
1127 		/* Report to user */
1128 		device_printf(sc->dev, "serial PHY detected (10Base2/BNC)\n");
1129 	}
1130 
1131 	return;
1132 }
1133 
1134 /*
1135  * Reset chip, allocate rings, and update media.
1136  */
1137 static int
1138 epic_init(sc)
1139 	epic_softc_t *sc;
1140 {
1141 	struct ifnet *ifp = &sc->sc_if;
1142 	int s,i;
1143 
1144 	s = splimp();
1145 
1146 	/* If interface is already running, then we need not do anything */
1147 	if (ifp->if_flags & IFF_RUNNING) {
1148 		splx(s);
1149 		return 0;
1150 	}
1151 
1152 	/* Soft reset the chip (we have to power up card before) */
1153 	CSR_WRITE_4(sc, GENCTL, 0);
1154 	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
1155 
1156 	/*
1157 	 * Reset takes 15 pci ticks which depends on PCI bus speed.
1158 	 * Assuming it >= 33000000 hz, we have wait at least 495e-6 sec.
1159 	 */
1160 	DELAY(500);
1161 
1162 	/* Wake up */
1163 	CSR_WRITE_4(sc, GENCTL, 0);
1164 
1165 	/* Workaround for Application Note 7-15 */
1166 	for (i=0; i<16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
1167 
1168 	/* Initialize rings */
1169 	if (epic_init_rings(sc)) {
1170 		device_printf(sc->dev, "failed to init rings\n");
1171 		splx(s);
1172 		return -1;
1173 	}
1174 
1175 	/* Give rings to EPIC */
1176 	CSR_WRITE_4(sc, PRCDAR, vtophys(sc->rx_desc));
1177 	CSR_WRITE_4(sc, PTCDAR, vtophys(sc->tx_desc));
1178 
1179 	/* Put node address to EPIC */
1180 	CSR_WRITE_4(sc, LAN0, ((u_int16_t *)sc->sc_macaddr)[0]);
1181 	CSR_WRITE_4(sc, LAN1, ((u_int16_t *)sc->sc_macaddr)[1]);
1182 	CSR_WRITE_4(sc, LAN2, ((u_int16_t *)sc->sc_macaddr)[2]);
1183 
1184 	/* Set tx mode, includeing transmit threshold */
1185 	epic_set_tx_mode(sc);
1186 
1187 	/* Compute and set RXCON. */
1188 	epic_set_rx_mode(sc);
1189 
1190 	/* Set multicast table */
1191 	epic_set_mc_table(sc);
1192 
1193 	/* Enable interrupts by setting the interrupt mask. */
1194 	CSR_WRITE_4(sc, INTMASK,
1195 		INTSTAT_RCC  | /* INTSTAT_RQE | INTSTAT_OVW | INTSTAT_RXE | */
1196 		/* INTSTAT_TXC | */ INTSTAT_TCC | INTSTAT_TQE | INTSTAT_TXU |
1197 		INTSTAT_FATAL);
1198 
1199 	/* Acknowledge all pending interrupts */
1200 	CSR_WRITE_4(sc, INTSTAT, CSR_READ_4(sc, INTSTAT));
1201 
1202 	/* Enable interrupts,  set for PCI read multiple and etc */
1203 	CSR_WRITE_4(sc, GENCTL,
1204 		GENCTL_ENABLE_INTERRUPT | GENCTL_MEMORY_READ_MULTIPLE |
1205 		GENCTL_ONECOPY | GENCTL_RECEIVE_FIFO_THRESHOLD64);
1206 
1207 	/* Mark interface running ... */
1208 	if (ifp->if_flags & IFF_UP) ifp->if_flags |= IFF_RUNNING;
1209 	else ifp->if_flags &= ~IFF_RUNNING;
1210 
1211 	/* ... and free */
1212 	ifp->if_flags &= ~IFF_OACTIVE;
1213 
1214 	/* Start Rx process */
1215 	epic_start_activity(sc);
1216 
1217 	/* Set appropriate media */
1218 	epic_ifmedia_upd(ifp);
1219 
1220 	sc->stat_ch = timeout((timeout_t *)epic_stats_update, sc, hz);
1221 
1222 	splx(s);
1223 
1224 	return 0;
1225 }
1226 
1227 /*
1228  * Synopsis: calculate and set Rx mode. Chip must be in idle state to
1229  * access RXCON.
1230  */
1231 static void
1232 epic_set_rx_mode(sc)
1233 	epic_softc_t *sc;
1234 {
1235 	u_int32_t 		flags = sc->sc_if.if_flags;
1236 	u_int32_t 		rxcon = RXCON_DEFAULT;
1237 
1238 #if defined(EPIC_EARLY_RX)
1239 	rxcon |= RXCON_EARLY_RX;
1240 #endif
1241 
1242 	rxcon |= (flags & IFF_PROMISC) ? RXCON_PROMISCUOUS_MODE : 0;
1243 
1244 	CSR_WRITE_4(sc, RXCON, rxcon);
1245 
1246 	return;
1247 }
1248 
1249 /*
1250  * Synopsis: Set transmit control register. Chip must be in idle state to
1251  * access TXCON.
1252  */
1253 static void
1254 epic_set_tx_mode(sc)
1255 	epic_softc_t *sc;
1256 {
1257 	if (sc->txcon & TXCON_EARLY_TRANSMIT_ENABLE)
1258 		CSR_WRITE_4(sc, ETXTHR, sc->tx_threshold);
1259 
1260 	CSR_WRITE_4(sc, TXCON, sc->txcon);
1261 }
1262 
1263 /*
1264  * Synopsis: Program multicast filter honoring IFF_ALLMULTI and IFF_PROMISC
1265  * flags. (Note, that setting PROMISC bit in EPIC's RXCON will only touch
1266  * individual frames, multicast filter must be manually programmed)
1267  *
1268  * Note: EPIC must be in idle state.
1269  */
1270 static void
1271 epic_set_mc_table(sc)
1272 	epic_softc_t *sc;
1273 {
1274 	struct ifnet *ifp = &sc->sc_if;
1275 	struct ifmultiaddr *ifma;
1276 	u_int16_t filter[4];
1277 	u_int8_t h;
1278 
1279 	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1280 		CSR_WRITE_4(sc, MC0, 0xFFFF);
1281 		CSR_WRITE_4(sc, MC1, 0xFFFF);
1282 		CSR_WRITE_4(sc, MC2, 0xFFFF);
1283 		CSR_WRITE_4(sc, MC3, 0xFFFF);
1284 
1285 		return;
1286 	}
1287 
1288 	filter[0] = 0;
1289 	filter[1] = 0;
1290 	filter[2] = 0;
1291 	filter[3] = 0;
1292 
1293 #if __FreeBSD_version < 500000
1294 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1295 #else
1296 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1297 #endif
1298 		if (ifma->ifma_addr->sa_family != AF_LINK)
1299 			continue;
1300 		h = epic_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1301 		filter[h >> 4] |= 1 << (h & 0xF);
1302 	}
1303 
1304 	CSR_WRITE_4(sc, MC0, filter[0]);
1305 	CSR_WRITE_4(sc, MC1, filter[1]);
1306 	CSR_WRITE_4(sc, MC2, filter[2]);
1307 	CSR_WRITE_4(sc, MC3, filter[3]);
1308 
1309 	return;
1310 }
1311 
1312 /*
1313  * Synopsis: calculate EPIC's hash of multicast address.
1314  */
1315 static u_int8_t
1316 epic_calchash(addr)
1317 	caddr_t addr;
1318 {
1319 	u_int32_t crc, carry;
1320 	int i, j;
1321 	u_int8_t c;
1322 
1323 	/* Compute CRC for the address value. */
1324 	crc = 0xFFFFFFFF; /* initial value */
1325 
1326 	for (i = 0; i < 6; i++) {
1327 		c = *(addr + i);
1328 		for (j = 0; j < 8; j++) {
1329 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
1330 			crc <<= 1;
1331 			c >>= 1;
1332 			if (carry)
1333 				crc = (crc ^ 0x04c11db6) | carry;
1334 		}
1335 	}
1336 
1337 	return ((crc >> 26) & 0x3F);
1338 }
1339 
1340 
1341 /*
1342  * Synopsis: Start receive process and transmit one, if they need.
1343  */
1344 static void
1345 epic_start_activity(sc)
1346 	epic_softc_t *sc;
1347 {
1348 	/* Start rx process */
1349 	CSR_WRITE_4(sc, COMMAND,
1350 		COMMAND_RXQUEUED | COMMAND_START_RX |
1351 		(sc->pending_txs?COMMAND_TXQUEUED:0));
1352 }
1353 
1354 /*
1355  * Synopsis: Completely stop Rx and Tx processes. If TQE is set additional
1356  * packet needs to be queued to stop Tx DMA.
1357  */
1358 static void
1359 epic_stop_activity(sc)
1360 	epic_softc_t *sc;
1361 {
1362 	int status, i;
1363 
1364 	/* Stop Tx and Rx DMA */
1365 	CSR_WRITE_4(sc, COMMAND,
1366 	    COMMAND_STOP_RX | COMMAND_STOP_RDMA | COMMAND_STOP_TDMA);
1367 
1368 	/* Wait Rx and Tx DMA to stop (why 1 ms ??? XXX) */
1369 	for (i=0; i<0x1000; i++) {
1370 		status = CSR_READ_4(sc, INTSTAT) & (INTSTAT_TXIDLE | INTSTAT_RXIDLE);
1371 		if (status == (INTSTAT_TXIDLE | INTSTAT_RXIDLE))
1372 			break;
1373 		DELAY(1);
1374 	}
1375 
1376 	/* Catch all finished packets */
1377 	epic_rx_done(sc);
1378 	epic_tx_done(sc);
1379 
1380 	status = CSR_READ_4(sc, INTSTAT);
1381 
1382 	if ((status & INTSTAT_RXIDLE) == 0)
1383 		device_printf(sc->dev, "ERROR! Can't stop Rx DMA\n");
1384 
1385 	if ((status & INTSTAT_TXIDLE) == 0)
1386 		device_printf(sc->dev, "ERROR! Can't stop Tx DMA\n");
1387 
1388 	/*
1389 	 * May need to queue one more packet if TQE, this is rare
1390 	 * but existing case.
1391 	 */
1392 	if ((status & INTSTAT_TQE) && !(status & INTSTAT_TXIDLE))
1393 		(void) epic_queue_last_packet(sc);
1394 
1395 }
1396 
1397 /*
1398  * The EPIC transmitter may stuck in TQE state. It will not go IDLE until
1399  * a packet from current descriptor will be copied to internal RAM. We
1400  * compose a dummy packet here and queue it for transmission.
1401  *
1402  * XXX the packet will then be actually sent over network...
1403  */
1404 static int
1405 epic_queue_last_packet(sc)
1406 	epic_softc_t *sc;
1407 {
1408 	struct epic_tx_desc *desc;
1409 	struct epic_frag_list *flist;
1410 	struct epic_tx_buffer *buf;
1411 	struct mbuf *m0;
1412 	int i;
1413 
1414 	device_printf(sc->dev, "queue last packet\n");
1415 
1416 	desc = sc->tx_desc + sc->cur_tx;
1417 	flist = sc->tx_flist + sc->cur_tx;
1418 	buf = sc->tx_buffer + sc->cur_tx;
1419 
1420 	if ((desc->status & 0x8000) || (buf->mbuf != NULL))
1421 		return (EBUSY);
1422 
1423 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
1424 	if (NULL == m0)
1425 		return (ENOBUFS);
1426 
1427 	/* Prepare mbuf */
1428 	m0->m_len = min(MHLEN, ETHER_MIN_LEN-ETHER_CRC_LEN);
1429 	flist->frag[0].fraglen = m0->m_len;
1430 	m0->m_pkthdr.len = m0->m_len;
1431 	m0->m_pkthdr.rcvif = &sc->sc_if;
1432 	bzero(mtod(m0,caddr_t), m0->m_len);
1433 
1434 	/* Fill fragments list */
1435 	flist->frag[0].fraglen = m0->m_len;
1436 	flist->frag[0].fragaddr = vtophys(mtod(m0, caddr_t));
1437 	flist->numfrags = 1;
1438 
1439 	/* Fill in descriptor */
1440 	buf->mbuf = m0;
1441 	sc->pending_txs++;
1442 	sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
1443 	desc->control = 0x01;
1444 	desc->txlength = max(m0->m_pkthdr.len,ETHER_MIN_LEN-ETHER_CRC_LEN);
1445 	desc->status = 0x8000;
1446 
1447 	/* Launch transmition */
1448 	CSR_WRITE_4(sc, COMMAND, COMMAND_STOP_TDMA | COMMAND_TXQUEUED);
1449 
1450 	/* Wait Tx DMA to stop (for how long??? XXX) */
1451 	for (i=0; i<1000; i++) {
1452 		if (CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE)
1453 			break;
1454 		DELAY(1);
1455 	}
1456 
1457 	if ((CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) == 0)
1458 		device_printf(sc->dev, "ERROR! can't stop Tx DMA (2)\n");
1459 	else
1460 		epic_tx_done(sc);
1461 
1462 	return 0;
1463 }
1464 
1465 /*
1466  *  Synopsis: Shut down board and deallocates rings.
1467  */
1468 static void
1469 epic_stop(sc)
1470 	epic_softc_t *sc;
1471 {
1472 	int s;
1473 
1474 	s = splimp();
1475 
1476 	sc->sc_if.if_timer = 0;
1477 
1478 	untimeout((timeout_t *)epic_stats_update, sc, sc->stat_ch);
1479 
1480 	/* Disable interrupts */
1481 	CSR_WRITE_4(sc, INTMASK, 0);
1482 	CSR_WRITE_4(sc, GENCTL, 0);
1483 
1484 	/* Try to stop Rx and TX processes */
1485 	epic_stop_activity(sc);
1486 
1487 	/* Reset chip */
1488 	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
1489 	DELAY(1000);
1490 
1491 	/* Make chip go to bed */
1492 	CSR_WRITE_4(sc, GENCTL, GENCTL_POWER_DOWN);
1493 
1494 	/* Free memory allocated for rings */
1495 	epic_free_rings(sc);
1496 
1497 	/* Mark as stoped */
1498 	sc->sc_if.if_flags &= ~IFF_RUNNING;
1499 
1500 	splx(s);
1501 	return;
1502 }
1503 
1504 /*
1505  * Synopsis: This function should free all memory allocated for rings.
1506  */
1507 static void
1508 epic_free_rings(sc)
1509 	epic_softc_t *sc;
1510 {
1511 	int i;
1512 
1513 	for (i=0; i<RX_RING_SIZE; i++) {
1514 		struct epic_rx_buffer *buf = sc->rx_buffer + i;
1515 		struct epic_rx_desc *desc = sc->rx_desc + i;
1516 
1517 		desc->status = 0;
1518 		desc->buflength = 0;
1519 		desc->bufaddr = 0;
1520 
1521 		if (buf->mbuf) m_freem(buf->mbuf);
1522 		buf->mbuf = NULL;
1523 	}
1524 
1525 	for (i=0; i<TX_RING_SIZE; i++) {
1526 		struct epic_tx_buffer *buf = sc->tx_buffer + i;
1527 		struct epic_tx_desc *desc = sc->tx_desc + i;
1528 
1529 		desc->status = 0;
1530 		desc->buflength = 0;
1531 		desc->bufaddr = 0;
1532 
1533 		if (buf->mbuf) m_freem(buf->mbuf);
1534 		buf->mbuf = NULL;
1535 	}
1536 }
1537 
1538 /*
1539  * Synopsis:  Allocates mbufs for Rx ring and point Rx descs to them.
1540  * Point Tx descs to fragment lists. Check that all descs and fraglists
1541  * are bounded and aligned properly.
1542  */
1543 static int
1544 epic_init_rings(sc)
1545 	epic_softc_t *sc;
1546 {
1547 	int i;
1548 
1549 	sc->cur_rx = sc->cur_tx = sc->dirty_tx = sc->pending_txs = 0;
1550 
1551 	for (i = 0; i < RX_RING_SIZE; i++) {
1552 		struct epic_rx_buffer *buf = sc->rx_buffer + i;
1553 		struct epic_rx_desc *desc = sc->rx_desc + i;
1554 
1555 		desc->status = 0;		/* Owned by driver */
1556 		desc->next = vtophys(sc->rx_desc + ((i+1) & RX_RING_MASK));
1557 
1558 		if ((desc->next & 3) ||
1559 		    ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
1560 			epic_free_rings(sc);
1561 			return EFAULT;
1562 		}
1563 
1564 		EPIC_MGETCLUSTER(buf->mbuf);
1565 		if (NULL == buf->mbuf) {
1566 			epic_free_rings(sc);
1567 			return ENOBUFS;
1568 		}
1569 		desc->bufaddr = vtophys(mtod(buf->mbuf, caddr_t));
1570 
1571 		desc->buflength = MCLBYTES;	/* Max RX buffer length */
1572 		desc->status = 0x8000;		/* Set owner bit to NIC */
1573 	}
1574 
1575 	for (i = 0; i < TX_RING_SIZE; i++) {
1576 		struct epic_tx_buffer *buf = sc->tx_buffer + i;
1577 		struct epic_tx_desc *desc = sc->tx_desc + i;
1578 
1579 		desc->status = 0;
1580 		desc->next = vtophys(sc->tx_desc + ((i+1) & TX_RING_MASK));
1581 
1582 		if ((desc->next & 3) ||
1583 		    ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
1584 			epic_free_rings(sc);
1585 			return EFAULT;
1586 		}
1587 
1588 		buf->mbuf = NULL;
1589 		desc->bufaddr = vtophys(sc->tx_flist + i);
1590 
1591 		if ((desc->bufaddr & 3) ||
1592 		    ((desc->bufaddr & PAGE_MASK) + sizeof(struct epic_frag_list)) > PAGE_SIZE) {
1593 			epic_free_rings(sc);
1594 			return EFAULT;
1595 		}
1596 	}
1597 
1598 	return 0;
1599 }
1600 
1601 /*
1602  * EEPROM operation functions
1603  */
1604 static void
1605 epic_write_eepromreg(sc, val)
1606 	epic_softc_t *sc;
1607 	u_int8_t val;
1608 {
1609 	u_int16_t i;
1610 
1611 	CSR_WRITE_1(sc, EECTL, val);
1612 
1613 	for (i=0; i<0xFF; i++)
1614 		if ((CSR_READ_1(sc, EECTL) & 0x20) == 0) break;
1615 
1616 	return;
1617 }
1618 
1619 static u_int8_t
1620 epic_read_eepromreg(sc)
1621 	epic_softc_t *sc;
1622 {
1623 	return CSR_READ_1(sc, EECTL);
1624 }
1625 
1626 static u_int8_t
1627 epic_eeprom_clock(sc, val)
1628 	epic_softc_t *sc;
1629 	u_int8_t val;
1630 {
1631 	epic_write_eepromreg(sc, val);
1632 	epic_write_eepromreg(sc, (val | 0x4));
1633 	epic_write_eepromreg(sc, val);
1634 
1635 	return epic_read_eepromreg(sc);
1636 }
1637 
1638 static void
1639 epic_output_eepromw(sc, val)
1640 	epic_softc_t *sc;
1641 	u_int16_t val;
1642 {
1643 	int i;
1644 
1645 	for (i = 0xF; i >= 0; i--) {
1646 		if (val & (1 << i))
1647 			epic_eeprom_clock(sc, 0x0B);
1648 		else
1649 			epic_eeprom_clock(sc, 0x03);
1650 	}
1651 }
1652 
1653 static u_int16_t
1654 epic_input_eepromw(sc)
1655 	epic_softc_t *sc;
1656 {
1657 	u_int16_t retval = 0;
1658 	int i;
1659 
1660 	for (i = 0xF; i >= 0; i--) {
1661 		if (epic_eeprom_clock(sc, 0x3) & 0x10)
1662 			retval |= (1 << i);
1663 	}
1664 
1665 	return retval;
1666 }
1667 
1668 static int
1669 epic_read_eeprom(sc, loc)
1670 	epic_softc_t *sc;
1671 	u_int16_t loc;
1672 {
1673 	u_int16_t dataval;
1674 	u_int16_t read_cmd;
1675 
1676 	epic_write_eepromreg(sc, 3);
1677 
1678 	if (epic_read_eepromreg(sc) & 0x40)
1679 		read_cmd = (loc & 0x3F) | 0x180;
1680 	else
1681 		read_cmd = (loc & 0xFF) | 0x600;
1682 
1683 	epic_output_eepromw(sc, read_cmd);
1684 
1685 	dataval = epic_input_eepromw(sc);
1686 
1687 	epic_write_eepromreg(sc, 1);
1688 
1689 	return dataval;
1690 }
1691 
1692 /*
1693  * Here goes MII read/write routines
1694  */
1695 static int
1696 epic_read_phy_reg(sc, phy, reg)
1697 	epic_softc_t *sc;
1698 	int phy, reg;
1699 {
1700 	int i;
1701 
1702 	CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x01));
1703 
1704 	for (i = 0; i < 0x100; i++) {
1705 		if ((CSR_READ_4(sc, MIICTL) & 0x01) == 0) break;
1706 		DELAY(1);
1707 	}
1708 
1709 	return (CSR_READ_4(sc, MIIDATA));
1710 }
1711 
1712 static void
1713 epic_write_phy_reg(sc, phy, reg, val)
1714 	epic_softc_t *sc;
1715 	int phy, reg, val;
1716 {
1717 	int i;
1718 
1719 	CSR_WRITE_4(sc, MIIDATA, val);
1720 	CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x02));
1721 
1722 	for(i=0;i<0x100;i++) {
1723 		if ((CSR_READ_4(sc, MIICTL) & 0x02) == 0) break;
1724 		DELAY(1);
1725 	}
1726 
1727 	return;
1728 }
1729 
1730 static int
1731 epic_miibus_readreg(dev, phy, reg)
1732 	device_t dev;
1733 	int phy, reg;
1734 {
1735 	epic_softc_t *sc;
1736 
1737 	sc = device_get_softc(dev);
1738 
1739 	return (PHY_READ_2(sc, phy, reg));
1740 }
1741 
1742 static int
1743 epic_miibus_writereg(dev, phy, reg, data)
1744 	device_t dev;
1745 	int phy, reg, data;
1746 {
1747 	epic_softc_t *sc;
1748 
1749 	sc = device_get_softc(dev);
1750 
1751 	PHY_WRITE_2(sc, phy, reg, data);
1752 
1753 	return (0);
1754 }
1755