xref: /dragonfly/sys/dev/netif/tx/if_tx.c (revision 1de703da)
1 /*-
2  * Copyright (c) 1997 Semen Ustimenko (semenu@FreeBSD.org)
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/tx/if_tx.c,v 1.61.2.1 2002/10/29 01:43:49 semenu Exp $
27  * $DragonFly: src/sys/dev/netif/tx/if_tx.c,v 1.2 2003/06/17 04:28:32 dillon Exp $
28  */
29 
30 /*
31  * EtherPower II 10/100 Fast Ethernet (SMC 9432 serie)
32  *
33  * These cards are based on SMC83c17x (EPIC) chip and one of the various
34  * PHYs (QS6612, AC101 and LXT970 were seen). The media support depends on
35  * card model. All cards support 10baseT/UTP and 100baseTX half- and full-
36  * duplex (SMB9432TX). SMC9432BTX also supports 10baseT/BNC. SMC9432FTX also
37  * supports fibre optics.
38  *
39  * Thanks are going to Steve Bauer and Jason Wright.
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sockio.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/socket.h>
49 #include <sys/queue.h>
50 
51 #include <net/if.h>
52 #include <net/if_arp.h>
53 #include <net/ethernet.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 
57 #include <net/bpf.h>
58 
59 #include <net/if_vlan_var.h>
60 
61 #include <vm/vm.h>		/* for vtophys */
62 #include <vm/pmap.h>		/* for vtophys */
63 #include <machine/bus_memio.h>
64 #include <machine/bus_pio.h>
65 #include <machine/bus.h>
66 #include <machine/resource.h>
67 #include <machine/clock.h>	/* for DELAY */
68 #include <sys/bus.h>
69 #include <sys/rman.h>
70 
71 #include <pci/pcireg.h>
72 #include <pci/pcivar.h>
73 
74 #include <dev/mii/mii.h>
75 #include <dev/mii/miivar.h>
76 #include <dev/mii/miidevs.h>
77 
78 #include <dev/mii/lxtphyreg.h>
79 
80 #include "miibus_if.h"
81 
82 #include <dev/tx/if_txreg.h>
83 #include <dev/tx/if_txvar.h>
84 
85 MODULE_DEPEND(tx, miibus, 1, 1, 1);
86 
87 static int epic_ifioctl(register struct ifnet *, u_long, caddr_t);
88 static void epic_intr(void *);
89 static void epic_tx_underrun(epic_softc_t *);
90 static int epic_common_attach(epic_softc_t *);
91 static void epic_ifstart(struct ifnet *);
92 static void epic_ifwatchdog(struct ifnet *);
93 static void epic_stats_update(epic_softc_t *);
94 static int epic_init(epic_softc_t *);
95 static void epic_stop(epic_softc_t *);
96 static void epic_rx_done(epic_softc_t *);
97 static void epic_tx_done(epic_softc_t *);
98 static int epic_init_rings(epic_softc_t *);
99 static void epic_free_rings(epic_softc_t *);
100 static void epic_stop_activity(epic_softc_t *);
101 static int epic_queue_last_packet(epic_softc_t *);
102 static void epic_start_activity(epic_softc_t *);
103 static void epic_set_rx_mode(epic_softc_t *);
104 static void epic_set_tx_mode(epic_softc_t *);
105 static void epic_set_mc_table(epic_softc_t *);
106 static u_int8_t epic_calchash(caddr_t);
107 static int epic_read_eeprom(epic_softc_t *,u_int16_t);
108 static void epic_output_eepromw(epic_softc_t *, u_int16_t);
109 static u_int16_t epic_input_eepromw(epic_softc_t *);
110 static u_int8_t epic_eeprom_clock(epic_softc_t *,u_int8_t);
111 static void epic_write_eepromreg(epic_softc_t *,u_int8_t);
112 static u_int8_t epic_read_eepromreg(epic_softc_t *);
113 
114 static int epic_read_phy_reg(epic_softc_t *, int, int);
115 static void epic_write_phy_reg(epic_softc_t *, int, int, int);
116 
117 static int epic_miibus_readreg(device_t, int, int);
118 static int epic_miibus_writereg(device_t, int, int, int);
119 static void epic_miibus_statchg(device_t);
120 static void epic_miibus_mediainit(device_t);
121 
122 static int epic_ifmedia_upd(struct ifnet *);
123 static void epic_ifmedia_sts(struct ifnet *, struct ifmediareq *);
124 
125 static int epic_probe(device_t);
126 static int epic_attach(device_t);
127 static void epic_shutdown(device_t);
128 static int epic_detach(device_t);
129 static struct epic_type *epic_devtype(device_t);
130 
131 static device_method_t epic_methods[] = {
132 	/* Device interface */
133 	DEVMETHOD(device_probe,		epic_probe),
134 	DEVMETHOD(device_attach,	epic_attach),
135 	DEVMETHOD(device_detach,	epic_detach),
136 	DEVMETHOD(device_shutdown,	epic_shutdown),
137 
138 	/* MII interface */
139 	DEVMETHOD(miibus_readreg,	epic_miibus_readreg),
140 	DEVMETHOD(miibus_writereg,	epic_miibus_writereg),
141 	DEVMETHOD(miibus_statchg,	epic_miibus_statchg),
142 	DEVMETHOD(miibus_mediainit,	epic_miibus_mediainit),
143 
144 	{ 0, 0 }
145 };
146 
147 static driver_t epic_driver = {
148 	"tx",
149 	epic_methods,
150 	sizeof(epic_softc_t)
151 };
152 
153 static devclass_t epic_devclass;
154 
155 DRIVER_MODULE(if_tx, pci, epic_driver, epic_devclass, 0, 0);
156 DRIVER_MODULE(miibus, tx, miibus_driver, miibus_devclass, 0, 0);
157 
158 static struct epic_type epic_devs[] = {
159 	{ SMC_VENDORID, SMC_DEVICEID_83C170,
160 		"SMC EtherPower II 10/100" },
161 	{ 0, 0, NULL }
162 };
163 
164 static int
165 epic_probe(dev)
166 	device_t dev;
167 {
168 	struct epic_type *t;
169 
170 	t = epic_devtype(dev);
171 
172 	if (t != NULL) {
173 		device_set_desc(dev, t->name);
174 		return(0);
175 	}
176 
177 	return(ENXIO);
178 }
179 
180 static struct epic_type *
181 epic_devtype(dev)
182 	device_t dev;
183 {
184 	struct epic_type *t;
185 
186 	t = epic_devs;
187 
188 	while(t->name != NULL) {
189 		if ((pci_get_vendor(dev) == t->ven_id) &&
190 		    (pci_get_device(dev) == t->dev_id)) {
191 			return(t);
192 		}
193 		t++;
194 	}
195 	return (NULL);
196 }
197 
198 #if defined(EPIC_USEIOSPACE)
199 #define	EPIC_RES	SYS_RES_IOPORT
200 #define	EPIC_RID	PCIR_BASEIO
201 #else
202 #define	EPIC_RES	SYS_RES_MEMORY
203 #define	EPIC_RID	PCIR_BASEMEM
204 #endif
205 
206 /*
207  * Attach routine: map registers, allocate softc, rings and descriptors.
208  * Reset to known state.
209  */
210 static int
211 epic_attach(dev)
212 	device_t dev;
213 {
214 	struct ifnet *ifp;
215 	epic_softc_t *sc;
216 	u_int32_t command;
217 	int unit, error;
218 	int i, s, rid, tmp;
219 
220 	s = splimp ();
221 
222 	sc = device_get_softc(dev);
223 	unit = device_get_unit(dev);
224 
225 	/* Preinitialize softc structure */
226 	bzero(sc, sizeof(epic_softc_t));
227 	sc->unit = unit;
228 	sc->dev = dev;
229 
230 	/* Fill ifnet structure */
231 	ifp = &sc->sc_if;
232 	ifp->if_unit = unit;
233 	ifp->if_name = "tx";
234 	ifp->if_softc = sc;
235 	ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST;
236 	ifp->if_ioctl = epic_ifioctl;
237 	ifp->if_output = ether_output;
238 	ifp->if_start = epic_ifstart;
239 	ifp->if_watchdog = epic_ifwatchdog;
240 	ifp->if_init = (if_init_f_t*)epic_init;
241 	ifp->if_timer = 0;
242 	ifp->if_baudrate = 10000000;
243 	ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1;
244 
245 	/* Enable ports, memory and busmastering */
246 	command = pci_read_config(dev, PCIR_COMMAND, 4);
247 	command |= PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
248 	pci_write_config(dev, PCIR_COMMAND, command, 4);
249 	command = pci_read_config(dev, PCIR_COMMAND, 4);
250 
251 #if defined(EPIC_USEIOSPACE)
252 	if ((command & PCIM_CMD_PORTEN) == 0) {
253 		device_printf(dev, "failed to enable I/O mapping!\n");
254 		error = ENXIO;
255 		goto fail;
256 	}
257 #else
258 	if ((command & PCIM_CMD_MEMEN) == 0) {
259 		device_printf(dev, "failed to enable memory mapping!\n");
260 		error = ENXIO;
261 		goto fail;
262 	}
263 #endif
264 
265 	rid = EPIC_RID;
266 	sc->res = bus_alloc_resource(dev, EPIC_RES, &rid, 0, ~0, 1,
267 	    RF_ACTIVE);
268 
269 	if (sc->res == NULL) {
270 		device_printf(dev, "couldn't map ports/memory\n");
271 		error = ENXIO;
272 		goto fail;
273 	}
274 
275 	sc->sc_st = rman_get_bustag(sc->res);
276 	sc->sc_sh = rman_get_bushandle(sc->res);
277 
278 	/* Allocate interrupt */
279 	rid = 0;
280 	sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
281 	    RF_SHAREABLE | RF_ACTIVE);
282 
283 	if (sc->irq == NULL) {
284 		device_printf(dev, "couldn't map interrupt\n");
285 		bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
286 		error = ENXIO;
287 		goto fail;
288 	}
289 
290 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET,
291 	    epic_intr, sc, &sc->sc_ih);
292 
293 	if (error) {
294 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
295 		bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
296 		device_printf(dev, "couldn't set up irq\n");
297 		goto fail;
298 	}
299 
300 	/* Do OS independent part, including chip wakeup and reset */
301 	error = epic_common_attach(sc);
302 	if (error) {
303 		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
304 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
305 		bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
306 		error = ENXIO;
307 		goto fail;
308 	}
309 
310 	/* Do ifmedia setup */
311 	if (mii_phy_probe(dev, &sc->miibus,
312 	    epic_ifmedia_upd, epic_ifmedia_sts)) {
313 		device_printf(dev, "ERROR! MII without any PHY!?\n");
314 		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
315 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
316 		bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
317 		error = ENXIO;
318 		goto fail;
319 	}
320 
321 	/* Display ethernet address ,... */
322 	device_printf(dev, "address %6D,", sc->sc_macaddr, ":");
323 
324 	/* board type and ... */
325 	printf(" type ");
326 	for(i=0x2c;i<0x32;i++) {
327 		tmp = epic_read_eeprom(sc, i);
328 		if (' ' == (u_int8_t)tmp) break;
329 		printf("%c", (u_int8_t)tmp);
330 		tmp >>= 8;
331 		if (' ' == (u_int8_t)tmp) break;
332 		printf("%c", (u_int8_t)tmp);
333 	}
334 	printf("\n");
335 
336 	/* Attach to OS's managers */
337 	ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
338 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
339 	callout_handle_init(&sc->stat_ch);
340 
341 fail:
342 	splx(s);
343 
344 	return(error);
345 }
346 
347 /*
348  * Detach driver and free resources
349  */
350 static int
351 epic_detach(dev)
352 	device_t dev;
353 {
354 	struct ifnet *ifp;
355 	epic_softc_t *sc;
356 	int s;
357 
358 	s = splimp();
359 
360 	sc = device_get_softc(dev);
361 	ifp = &sc->arpcom.ac_if;
362 
363 	ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
364 
365 	epic_stop(sc);
366 
367 	bus_generic_detach(dev);
368 	device_delete_child(dev, sc->miibus);
369 
370 	bus_teardown_intr(dev, sc->irq, sc->sc_ih);
371 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
372 	bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
373 
374 	free(sc->tx_flist, M_DEVBUF);
375 	free(sc->tx_desc, M_DEVBUF);
376 	free(sc->rx_desc, M_DEVBUF);
377 
378 	splx(s);
379 
380 	return(0);
381 }
382 
383 #undef	EPIC_RES
384 #undef	EPIC_RID
385 
386 /*
387  * Stop all chip I/O so that the kernel's probe routines don't
388  * get confused by errant DMAs when rebooting.
389  */
390 static void
391 epic_shutdown(dev)
392 	device_t dev;
393 {
394 	epic_softc_t *sc;
395 
396 	sc = device_get_softc(dev);
397 
398 	epic_stop(sc);
399 
400 	return;
401 }
402 
403 /*
404  * This is if_ioctl handler.
405  */
406 static int
407 epic_ifioctl(ifp, command, data)
408 	struct ifnet *ifp;
409 	u_long command;
410 	caddr_t data;
411 {
412 	epic_softc_t *sc = ifp->if_softc;
413 	struct mii_data	*mii;
414 	struct ifreq *ifr = (struct ifreq *) data;
415 	int x, error = 0;
416 
417 	x = splimp();
418 
419 	switch (command) {
420 	case SIOCSIFADDR:
421 	case SIOCGIFADDR:
422 		error = ether_ioctl(ifp, command, data);
423 		break;
424 	case SIOCSIFMTU:
425 		if (ifp->if_mtu == ifr->ifr_mtu)
426 			break;
427 
428 		/* XXX Though the datasheet doesn't imply any
429 		 * limitations on RX and TX sizes beside max 64Kb
430 		 * DMA transfer, seems we can't send more then 1600
431 		 * data bytes per ethernet packet. (Transmitter hangs
432 		 * up if more data is sent)
433 		 */
434 		if (ifr->ifr_mtu + ifp->if_hdrlen <= EPIC_MAX_MTU) {
435 			ifp->if_mtu = ifr->ifr_mtu;
436 			epic_stop(sc);
437 			epic_init(sc);
438 		} else
439 			error = EINVAL;
440 		break;
441 
442 	case SIOCSIFFLAGS:
443 		/*
444 		 * If the interface is marked up and stopped, then start it.
445 		 * If it is marked down and running, then stop it.
446 		 */
447 		if (ifp->if_flags & IFF_UP) {
448 			if ((ifp->if_flags & IFF_RUNNING) == 0) {
449 				epic_init(sc);
450 				break;
451 			}
452 		} else {
453 			if (ifp->if_flags & IFF_RUNNING) {
454 				epic_stop(sc);
455 				break;
456 			}
457 		}
458 
459 		/* Handle IFF_PROMISC and IFF_ALLMULTI flags */
460 		epic_stop_activity(sc);
461 		epic_set_mc_table(sc);
462 		epic_set_rx_mode(sc);
463 		epic_start_activity(sc);
464 		break;
465 
466 	case SIOCADDMULTI:
467 	case SIOCDELMULTI:
468 		epic_set_mc_table(sc);
469 		error = 0;
470 		break;
471 
472 	case SIOCSIFMEDIA:
473 	case SIOCGIFMEDIA:
474 		mii = device_get_softc(sc->miibus);
475 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
476 		break;
477 
478 	default:
479 		error = EINVAL;
480 	}
481 	splx(x);
482 
483 	return error;
484 }
485 
486 /*
487  * OS-independed part of attach process. allocate memory for descriptors
488  * and frag lists, wake up chip, read MAC address and PHY identyfier.
489  * Return -1 on failure.
490  */
491 static int
492 epic_common_attach(sc)
493 	epic_softc_t *sc;
494 {
495 	int i;
496 
497 	sc->tx_flist = malloc(sizeof(struct epic_frag_list)*TX_RING_SIZE,
498 	    M_DEVBUF, M_NOWAIT | M_ZERO);
499 	sc->tx_desc = malloc(sizeof(struct epic_tx_desc)*TX_RING_SIZE,
500 	    M_DEVBUF, M_NOWAIT | M_ZERO);
501 	sc->rx_desc = malloc(sizeof(struct epic_rx_desc)*RX_RING_SIZE,
502 	    M_DEVBUF, M_NOWAIT | M_ZERO);
503 
504 	if (sc->tx_flist == NULL || sc->tx_desc == NULL || sc->rx_desc == NULL){
505 		device_printf(sc->dev, "failed to malloc memory\n");
506 		if (sc->tx_flist) free(sc->tx_flist, M_DEVBUF);
507 		if (sc->tx_desc) free(sc->tx_desc, M_DEVBUF);
508 		if (sc->rx_desc) free(sc->rx_desc, M_DEVBUF);
509 		return (ENOMEM);
510 	}
511 
512 	/* Bring the chip out of low-power mode. */
513 	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
514 	DELAY(500);
515 
516 	/* Workaround for Application Note 7-15 */
517 	for (i=0; i<16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
518 
519 	/* Read mac address from EEPROM */
520 	for (i = 0; i < ETHER_ADDR_LEN / sizeof(u_int16_t); i++)
521 		((u_int16_t *)sc->sc_macaddr)[i] = epic_read_eeprom(sc,i);
522 
523 	/* Set Non-Volatile Control Register from EEPROM */
524 	CSR_WRITE_4(sc, NVCTL, epic_read_eeprom(sc, EEPROM_NVCTL) & 0x1F);
525 
526 	/* Set defaults */
527 	sc->tx_threshold = TRANSMIT_THRESHOLD;
528 	sc->txcon = TXCON_DEFAULT;
529 	sc->miicfg = MIICFG_SMI_ENABLE;
530 	sc->phyid = EPIC_UNKN_PHY;
531 	sc->serinst = -1;
532 
533 	/* Fetch card id */
534 	sc->cardvend = pci_read_config(sc->dev, PCIR_SUBVEND_0, 2);
535 	sc->cardid = pci_read_config(sc->dev, PCIR_SUBDEV_0, 2);
536 
537 	if (sc->cardvend != SMC_VENDORID)
538 		device_printf(sc->dev, "unknown card vendor %04xh\n", sc->cardvend);
539 
540 	return 0;
541 }
542 
543 /*
544  * This is if_start handler. It takes mbufs from if_snd queue
545  * and queue them for transmit, one by one, until TX ring become full
546  * or queue become empty.
547  */
548 static void
549 epic_ifstart(ifp)
550 	struct ifnet * ifp;
551 {
552 	epic_softc_t *sc = ifp->if_softc;
553 	struct epic_tx_buffer *buf;
554 	struct epic_tx_desc *desc;
555 	struct epic_frag_list *flist;
556 	struct mbuf *m0;
557 	register struct mbuf *m;
558 	register int i;
559 
560 	while (sc->pending_txs < TX_RING_SIZE) {
561 		buf = sc->tx_buffer + sc->cur_tx;
562 		desc = sc->tx_desc + sc->cur_tx;
563 		flist = sc->tx_flist + sc->cur_tx;
564 
565 		/* Get next packet to send */
566 		IF_DEQUEUE(&ifp->if_snd, m0);
567 
568 		/* If nothing to send, return */
569 		if (NULL == m0) return;
570 
571 		/* Fill fragments list */
572 		for (m = m0, i = 0;
573 		    (NULL != m) && (i < EPIC_MAX_FRAGS);
574 		    m = m->m_next, i++) {
575 			flist->frag[i].fraglen = m->m_len;
576 			flist->frag[i].fragaddr = vtophys(mtod(m, caddr_t));
577 		}
578 		flist->numfrags = i;
579 
580 		/* If packet was more than EPIC_MAX_FRAGS parts, */
581 		/* recopy packet to new allocated mbuf cluster */
582 		if (NULL != m) {
583 			EPIC_MGETCLUSTER(m);
584 			if (NULL == m) {
585 				m_freem(m0);
586 				ifp->if_oerrors++;
587 				continue;
588 			}
589 
590 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
591 			flist->frag[0].fraglen =
592 			     m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
593 			m->m_pkthdr.rcvif = ifp;
594 
595 			flist->numfrags = 1;
596 			flist->frag[0].fragaddr = vtophys(mtod(m, caddr_t));
597 			m_freem(m0);
598 			m0 = m;
599 		}
600 
601 		buf->mbuf = m0;
602 		sc->pending_txs++;
603 		sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
604 		desc->control = 0x01;
605 		desc->txlength =
606 		    max(m0->m_pkthdr.len,ETHER_MIN_LEN-ETHER_CRC_LEN);
607 		desc->status = 0x8000;
608 		CSR_WRITE_4(sc, COMMAND, COMMAND_TXQUEUED);
609 
610 		/* Set watchdog timer */
611 		ifp->if_timer = 8;
612 
613 		if (ifp->if_bpf)
614 			bpf_mtap(ifp, m0);
615 	}
616 
617 	ifp->if_flags |= IFF_OACTIVE;
618 
619 	return;
620 
621 }
622 
623 /*
624  * Synopsis: Finish all received frames.
625  */
626 static void
627 epic_rx_done(sc)
628 	epic_softc_t *sc;
629 {
630 	u_int16_t len;
631 	struct epic_rx_buffer *buf;
632 	struct epic_rx_desc *desc;
633 	struct mbuf *m;
634 	struct ether_header *eh;
635 
636 	while ((sc->rx_desc[sc->cur_rx].status & 0x8000) == 0) {
637 		buf = sc->rx_buffer + sc->cur_rx;
638 		desc = sc->rx_desc + sc->cur_rx;
639 
640 		/* Switch to next descriptor */
641 		sc->cur_rx = (sc->cur_rx+1) & RX_RING_MASK;
642 
643 		/*
644 		 * Check for RX errors. This should only happen if
645 		 * SAVE_ERRORED_PACKETS is set. RX errors generate
646 		 * RXE interrupt usually.
647 		 */
648 		if ((desc->status & 1) == 0) {
649 			sc->sc_if.if_ierrors++;
650 			desc->status = 0x8000;
651 			continue;
652 		}
653 
654 		/* Save packet length and mbuf contained packet */
655 		len = desc->rxlength - ETHER_CRC_LEN;
656 		m = buf->mbuf;
657 
658 		/* Try to get mbuf cluster */
659 		EPIC_MGETCLUSTER(buf->mbuf);
660 		if (NULL == buf->mbuf) {
661 			buf->mbuf = m;
662 			desc->status = 0x8000;
663 			sc->sc_if.if_ierrors++;
664 			continue;
665 		}
666 
667 		/* Point to new mbuf, and give descriptor to chip */
668 		desc->bufaddr = vtophys(mtod(buf->mbuf, caddr_t));
669 		desc->status = 0x8000;
670 
671 		/* First mbuf in packet holds the ethernet and packet headers */
672 		eh = mtod(m, struct ether_header *);
673 		m->m_pkthdr.rcvif = &(sc->sc_if);
674 		m->m_pkthdr.len = m->m_len = len;
675 
676 		/* Second mbuf holds packet ifself */
677 		m->m_pkthdr.len = m->m_len = len - sizeof(struct ether_header);
678 		m->m_data += sizeof(struct ether_header);
679 
680 		/* Give mbuf to OS */
681 		ether_input(&sc->sc_if, eh, m);
682 
683 		/* Successfuly received frame */
684 		sc->sc_if.if_ipackets++;
685 	}
686 
687 	return;
688 }
689 
690 /*
691  * Synopsis: Do last phase of transmission. I.e. if desc is
692  * transmitted, decrease pending_txs counter, free mbuf contained
693  * packet, switch to next descriptor and repeat until no packets
694  * are pending or descriptor is not transmitted yet.
695  */
696 static void
697 epic_tx_done(sc)
698 	epic_softc_t *sc;
699 {
700 	struct epic_tx_buffer *buf;
701 	struct epic_tx_desc *desc;
702 	u_int16_t status;
703 
704 	while (sc->pending_txs > 0) {
705 		buf = sc->tx_buffer + sc->dirty_tx;
706 		desc = sc->tx_desc + sc->dirty_tx;
707 		status = desc->status;
708 
709 		/* If packet is not transmitted, thou followed */
710 		/* packets are not transmitted too */
711 		if (status & 0x8000) break;
712 
713 		/* Packet is transmitted. Switch to next and */
714 		/* free mbuf */
715 		sc->pending_txs--;
716 		sc->dirty_tx = (sc->dirty_tx + 1) & TX_RING_MASK;
717 		m_freem(buf->mbuf);
718 		buf->mbuf = NULL;
719 
720 		/* Check for errors and collisions */
721 		if (status & 0x0001) sc->sc_if.if_opackets++;
722 		else sc->sc_if.if_oerrors++;
723 		sc->sc_if.if_collisions += (status >> 8) & 0x1F;
724 #if defined(EPIC_DIAG)
725 		if ((status & 0x1001) == 0x1001)
726 			device_printf(sc->dev,  "Tx ERROR: excessive coll. number\n");
727 #endif
728 	}
729 
730 	if (sc->pending_txs < TX_RING_SIZE)
731 		sc->sc_if.if_flags &= ~IFF_OACTIVE;
732 }
733 
734 /*
735  * Interrupt function
736  */
737 static void
738 epic_intr(arg)
739     void *arg;
740 {
741     epic_softc_t * sc = (epic_softc_t *) arg;
742     int status, i = 4;
743 
744     while (i-- && ((status = CSR_READ_4(sc, INTSTAT)) & INTSTAT_INT_ACTV)) {
745 	CSR_WRITE_4(sc, INTSTAT, status);
746 
747 	if (status & (INTSTAT_RQE|INTSTAT_RCC|INTSTAT_OVW)) {
748 	    epic_rx_done(sc);
749 	    if (status & (INTSTAT_RQE|INTSTAT_OVW)) {
750 #if defined(EPIC_DIAG)
751 		if (status & INTSTAT_OVW)
752 		    device_printf(sc->dev, "RX buffer overflow\n");
753 		if (status & INTSTAT_RQE)
754 		    device_printf(sc->dev, "RX FIFO overflow\n");
755 #endif
756 		if ((CSR_READ_4(sc, COMMAND) & COMMAND_RXQUEUED) == 0)
757 		    CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED);
758 		sc->sc_if.if_ierrors++;
759 	    }
760 	}
761 
762 	if (status & (INTSTAT_TXC|INTSTAT_TCC|INTSTAT_TQE)) {
763 	    epic_tx_done(sc);
764 	    if (sc->sc_if.if_snd.ifq_head != NULL)
765 		    epic_ifstart(&sc->sc_if);
766 	}
767 
768 	/* Check for rare errors */
769 	if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
770 		      INTSTAT_APE|INTSTAT_DPE|INTSTAT_TXU|INTSTAT_RXE)) {
771     	    if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
772 			  INTSTAT_APE|INTSTAT_DPE)) {
773 		device_printf(sc->dev, "PCI fatal errors occured: %s%s%s%s\n",
774 		    (status&INTSTAT_PMA)?"PMA ":"",
775 		    (status&INTSTAT_PTA)?"PTA ":"",
776 		    (status&INTSTAT_APE)?"APE ":"",
777 		    (status&INTSTAT_DPE)?"DPE":""
778 		);
779 
780 		epic_stop(sc);
781 		epic_init(sc);
782 
783 	    	break;
784 	    }
785 
786 	    if (status & INTSTAT_RXE) {
787 #if defined(EPIC_DIAG)
788 		device_printf(sc->dev, "CRC/Alignment error\n");
789 #endif
790 		sc->sc_if.if_ierrors++;
791 	    }
792 
793 	    if (status & INTSTAT_TXU) {
794 		epic_tx_underrun(sc);
795 		sc->sc_if.if_oerrors++;
796 	    }
797 	}
798     }
799 
800     /* If no packets are pending, then no timeouts */
801     if (sc->pending_txs == 0) sc->sc_if.if_timer = 0;
802 
803     return;
804 }
805 
806 /*
807  * Handle the TX underrun error: increase the TX threshold
808  * and restart the transmitter.
809  */
810 static void
811 epic_tx_underrun(sc)
812 	epic_softc_t *sc;
813 {
814 	if (sc->tx_threshold > TRANSMIT_THRESHOLD_MAX) {
815 		sc->txcon &= ~TXCON_EARLY_TRANSMIT_ENABLE;
816 #if defined(EPIC_DIAG)
817 		device_printf(sc->dev, "Tx UNDERRUN: early TX disabled\n");
818 #endif
819 	} else {
820 		sc->tx_threshold += 0x40;
821 #if defined(EPIC_DIAG)
822 		device_printf(sc->dev, "Tx UNDERRUN: TX threshold increased to %d\n",
823 		    sc->tx_threshold);
824 #endif
825 	}
826 
827 	/* We must set TXUGO to reset the stuck transmitter */
828 	CSR_WRITE_4(sc, COMMAND, COMMAND_TXUGO);
829 
830 	/* Update the TX threshold */
831 	epic_stop_activity(sc);
832 	epic_set_tx_mode(sc);
833 	epic_start_activity(sc);
834 
835 	return;
836 }
837 
838 /*
839  * Synopsis: This one is called if packets wasn't transmitted
840  * during timeout. Try to deallocate transmitted packets, and
841  * if success continue to work.
842  */
843 static void
844 epic_ifwatchdog(ifp)
845 	struct ifnet *ifp;
846 {
847 	epic_softc_t *sc = ifp->if_softc;
848 	int x;
849 
850 	x = splimp();
851 
852 	device_printf(sc->dev, "device timeout %d packets\n", sc->pending_txs);
853 
854 	/* Try to finish queued packets */
855 	epic_tx_done(sc);
856 
857 	/* If not successful */
858 	if (sc->pending_txs > 0) {
859 
860 		ifp->if_oerrors+=sc->pending_txs;
861 
862 		/* Reinitialize board */
863 		device_printf(sc->dev, "reinitialization\n");
864 		epic_stop(sc);
865 		epic_init(sc);
866 
867 	} else
868 		device_printf(sc->dev, "seems we can continue normaly\n");
869 
870 	/* Start output */
871 	if (ifp->if_snd.ifq_head) epic_ifstart(ifp);
872 
873 	splx(x);
874 }
875 
876 /*
877  * Despite the name of this function, it doesn't update statistics, it only
878  * helps in autonegotiation process.
879  */
880 static void
881 epic_stats_update(epic_softc_t * sc)
882 {
883 	struct mii_data * mii;
884 	int s;
885 
886 	s = splimp();
887 
888 	mii = device_get_softc(sc->miibus);
889 	mii_tick(mii);
890 
891 	sc->stat_ch = timeout((timeout_t *)epic_stats_update, sc, hz);
892 
893 	splx(s);
894 }
895 
896 /*
897  * Set media options.
898  */
899 static int
900 epic_ifmedia_upd(ifp)
901 	struct ifnet *ifp;
902 {
903 	epic_softc_t *sc;
904 	struct mii_data *mii;
905 	struct ifmedia *ifm;
906 	struct mii_softc *miisc;
907 	int cfg, media;
908 
909 	sc = ifp->if_softc;
910 	mii = device_get_softc(sc->miibus);
911 	ifm = &mii->mii_media;
912 	media = ifm->ifm_cur->ifm_media;
913 
914 	/* Do not do anything if interface is not up */
915 	if ((ifp->if_flags & IFF_UP) == 0)
916 		return (0);
917 
918 	/*
919 	 * Lookup current selected PHY
920 	 */
921 	if (IFM_INST(media) == sc->serinst) {
922 		sc->phyid = EPIC_SERIAL;
923 		sc->physc = NULL;
924 	} else {
925 		/* If we're not selecting serial interface, select MII mode */
926 		sc->miicfg &= ~MIICFG_SERIAL_ENABLE;
927 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
928 
929 		/* Default to unknown PHY */
930 		sc->phyid = EPIC_UNKN_PHY;
931 
932 		/* Lookup selected PHY */
933 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
934 		     miisc = LIST_NEXT(miisc, mii_list)) {
935 			if (IFM_INST(media) == miisc->mii_inst) {
936 				sc->physc = miisc;
937 				break;
938 			}
939 		}
940 
941 		/* Identify selected PHY */
942 		if (sc->physc) {
943 			int id1, id2, model, oui;
944 
945 			id1 = PHY_READ(sc->physc, MII_PHYIDR1);
946 			id2 = PHY_READ(sc->physc, MII_PHYIDR2);
947 
948 			oui = MII_OUI(id1, id2);
949 			model = MII_MODEL(id2);
950 			switch (oui) {
951 			case MII_OUI_QUALSEMI:
952 				if (model == MII_MODEL_QUALSEMI_QS6612)
953 					sc->phyid = EPIC_QS6612_PHY;
954 				break;
955 			case MII_OUI_xxALTIMA:
956 				if (model == MII_MODEL_xxALTIMA_AC101)
957 					sc->phyid = EPIC_AC101_PHY;
958 				break;
959 			case MII_OUI_xxLEVEL1:
960 				if (model == MII_MODEL_xxLEVEL1_LXT970)
961 					sc->phyid = EPIC_LXT970_PHY;
962 				break;
963 			}
964 		}
965 	}
966 
967 	/*
968 	 * Do PHY specific card setup
969 	 */
970 
971 	/* Call this, to isolate all not selected PHYs and
972 	 * set up selected
973 	 */
974 	mii_mediachg(mii);
975 
976 	/* Do our own setup */
977 	switch (sc->phyid) {
978 	case EPIC_QS6612_PHY:
979 		break;
980 	case EPIC_AC101_PHY:
981 		/* We have to powerup fiber tranceivers */
982 		if (IFM_SUBTYPE(media) == IFM_100_FX)
983 			sc->miicfg |= MIICFG_694_ENABLE;
984 		else
985 			sc->miicfg &= ~MIICFG_694_ENABLE;
986 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
987 
988 		break;
989 	case EPIC_LXT970_PHY:
990 		/* We have to powerup fiber tranceivers */
991 		cfg = PHY_READ(sc->physc, MII_LXTPHY_CONFIG);
992 		if (IFM_SUBTYPE(media) == IFM_100_FX)
993 			cfg |= CONFIG_LEDC1 | CONFIG_LEDC0;
994 		else
995 			cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
996 		PHY_WRITE(sc->physc, MII_LXTPHY_CONFIG, cfg);
997 
998 		break;
999 	case EPIC_SERIAL:
1000 		/* Select serial PHY, (10base2/BNC usually) */
1001 		sc->miicfg |= MIICFG_694_ENABLE | MIICFG_SERIAL_ENABLE;
1002 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1003 
1004 		/* There is no driver to fill this */
1005 		mii->mii_media_active = media;
1006 		mii->mii_media_status = 0;
1007 
1008 		/* We need to call this manualy as i wasn't called
1009 		 * in mii_mediachg()
1010 		 */
1011 		epic_miibus_statchg(sc->dev);
1012 
1013 		break;
1014 	default:
1015 		device_printf(sc->dev, "ERROR! Unknown PHY selected\n");
1016 		return (EINVAL);
1017 	}
1018 
1019 	return(0);
1020 }
1021 
1022 /*
1023  * Report current media status.
1024  */
1025 static void
1026 epic_ifmedia_sts(ifp, ifmr)
1027 	struct ifnet *ifp;
1028 	struct ifmediareq *ifmr;
1029 {
1030 	epic_softc_t *sc;
1031 	struct mii_data *mii;
1032 	struct ifmedia *ifm;
1033 
1034 	sc = ifp->if_softc;
1035 	mii = device_get_softc(sc->miibus);
1036 	ifm = &mii->mii_media;
1037 
1038 	/* Nothing should be selected if interface is down */
1039 	if ((ifp->if_flags & IFF_UP) == 0) {
1040 		ifmr->ifm_active = IFM_NONE;
1041 		ifmr->ifm_status = 0;
1042 
1043 		return;
1044 	}
1045 
1046 	/* Call underlying pollstat, if not serial PHY */
1047 	if (sc->phyid != EPIC_SERIAL)
1048 		mii_pollstat(mii);
1049 
1050 	/* Simply copy media info */
1051 	ifmr->ifm_active = mii->mii_media_active;
1052 	ifmr->ifm_status = mii->mii_media_status;
1053 
1054 	return;
1055 }
1056 
1057 /*
1058  * Callback routine, called on media change.
1059  */
1060 static void
1061 epic_miibus_statchg(dev)
1062 	device_t dev;
1063 {
1064 	epic_softc_t *sc;
1065 	struct mii_data *mii;
1066 	int media;
1067 
1068 	sc = device_get_softc(dev);
1069 	mii = device_get_softc(sc->miibus);
1070 	media = mii->mii_media_active;
1071 
1072 	sc->txcon &= ~(TXCON_LOOPBACK_MODE | TXCON_FULL_DUPLEX);
1073 
1074 	/* If we are in full-duplex mode or loopback operation,
1075 	 * we need to decouple receiver and transmitter.
1076 	 */
1077 	if (IFM_OPTIONS(media) & (IFM_FDX | IFM_LOOP))
1078  		sc->txcon |= TXCON_FULL_DUPLEX;
1079 
1080 	/* On some cards we need manualy set fullduplex led */
1081 	if (sc->cardid == SMC9432FTX ||
1082 	    sc->cardid == SMC9432FTX_SC) {
1083 		if (IFM_OPTIONS(media) & IFM_FDX)
1084 			sc->miicfg |= MIICFG_694_ENABLE;
1085 		else
1086 			sc->miicfg &= ~MIICFG_694_ENABLE;
1087 
1088 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1089 	}
1090 
1091 	/* Update baudrate */
1092 	if (IFM_SUBTYPE(media) == IFM_100_TX ||
1093 	    IFM_SUBTYPE(media) == IFM_100_FX)
1094 		sc->sc_if.if_baudrate = 100000000;
1095 	else
1096 		sc->sc_if.if_baudrate = 10000000;
1097 
1098 	epic_stop_activity(sc);
1099 	epic_set_tx_mode(sc);
1100 	epic_start_activity(sc);
1101 
1102 	return;
1103 }
1104 
1105 static void
1106 epic_miibus_mediainit(dev)
1107 	device_t dev;
1108 {
1109 	epic_softc_t *sc;
1110 	struct mii_data *mii;
1111 	struct ifmedia *ifm;
1112 	int media;
1113 
1114 	sc = device_get_softc(dev);
1115 	mii = device_get_softc(sc->miibus);
1116 	ifm = &mii->mii_media;
1117 
1118 	/* Add Serial Media Interface if present, this applies to
1119 	 * SMC9432BTX serie
1120 	 */
1121 	if (CSR_READ_4(sc, MIICFG) & MIICFG_PHY_PRESENT) {
1122 		/* Store its instance */
1123 		sc->serinst = mii->mii_instance++;
1124 
1125 		/* Add as 10base2/BNC media */
1126 		media = IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->serinst);
1127 		ifmedia_add(ifm, media, 0, NULL);
1128 
1129 		/* Report to user */
1130 		device_printf(sc->dev, "serial PHY detected (10Base2/BNC)\n");
1131 	}
1132 
1133 	return;
1134 }
1135 
1136 /*
1137  * Reset chip, allocate rings, and update media.
1138  */
1139 static int
1140 epic_init(sc)
1141 	epic_softc_t *sc;
1142 {
1143 	struct ifnet *ifp = &sc->sc_if;
1144 	int s,i;
1145 
1146 	s = splimp();
1147 
1148 	/* If interface is already running, then we need not do anything */
1149 	if (ifp->if_flags & IFF_RUNNING) {
1150 		splx(s);
1151 		return 0;
1152 	}
1153 
1154 	/* Soft reset the chip (we have to power up card before) */
1155 	CSR_WRITE_4(sc, GENCTL, 0);
1156 	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
1157 
1158 	/*
1159 	 * Reset takes 15 pci ticks which depends on PCI bus speed.
1160 	 * Assuming it >= 33000000 hz, we have wait at least 495e-6 sec.
1161 	 */
1162 	DELAY(500);
1163 
1164 	/* Wake up */
1165 	CSR_WRITE_4(sc, GENCTL, 0);
1166 
1167 	/* Workaround for Application Note 7-15 */
1168 	for (i=0; i<16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
1169 
1170 	/* Initialize rings */
1171 	if (epic_init_rings(sc)) {
1172 		device_printf(sc->dev, "failed to init rings\n");
1173 		splx(s);
1174 		return -1;
1175 	}
1176 
1177 	/* Give rings to EPIC */
1178 	CSR_WRITE_4(sc, PRCDAR, vtophys(sc->rx_desc));
1179 	CSR_WRITE_4(sc, PTCDAR, vtophys(sc->tx_desc));
1180 
1181 	/* Put node address to EPIC */
1182 	CSR_WRITE_4(sc, LAN0, ((u_int16_t *)sc->sc_macaddr)[0]);
1183 	CSR_WRITE_4(sc, LAN1, ((u_int16_t *)sc->sc_macaddr)[1]);
1184 	CSR_WRITE_4(sc, LAN2, ((u_int16_t *)sc->sc_macaddr)[2]);
1185 
1186 	/* Set tx mode, includeing transmit threshold */
1187 	epic_set_tx_mode(sc);
1188 
1189 	/* Compute and set RXCON. */
1190 	epic_set_rx_mode(sc);
1191 
1192 	/* Set multicast table */
1193 	epic_set_mc_table(sc);
1194 
1195 	/* Enable interrupts by setting the interrupt mask. */
1196 	CSR_WRITE_4(sc, INTMASK,
1197 		INTSTAT_RCC  | /* INTSTAT_RQE | INTSTAT_OVW | INTSTAT_RXE | */
1198 		/* INTSTAT_TXC | */ INTSTAT_TCC | INTSTAT_TQE | INTSTAT_TXU |
1199 		INTSTAT_FATAL);
1200 
1201 	/* Acknowledge all pending interrupts */
1202 	CSR_WRITE_4(sc, INTSTAT, CSR_READ_4(sc, INTSTAT));
1203 
1204 	/* Enable interrupts,  set for PCI read multiple and etc */
1205 	CSR_WRITE_4(sc, GENCTL,
1206 		GENCTL_ENABLE_INTERRUPT | GENCTL_MEMORY_READ_MULTIPLE |
1207 		GENCTL_ONECOPY | GENCTL_RECEIVE_FIFO_THRESHOLD64);
1208 
1209 	/* Mark interface running ... */
1210 	if (ifp->if_flags & IFF_UP) ifp->if_flags |= IFF_RUNNING;
1211 	else ifp->if_flags &= ~IFF_RUNNING;
1212 
1213 	/* ... and free */
1214 	ifp->if_flags &= ~IFF_OACTIVE;
1215 
1216 	/* Start Rx process */
1217 	epic_start_activity(sc);
1218 
1219 	/* Set appropriate media */
1220 	epic_ifmedia_upd(ifp);
1221 
1222 	sc->stat_ch = timeout((timeout_t *)epic_stats_update, sc, hz);
1223 
1224 	splx(s);
1225 
1226 	return 0;
1227 }
1228 
1229 /*
1230  * Synopsis: calculate and set Rx mode. Chip must be in idle state to
1231  * access RXCON.
1232  */
1233 static void
1234 epic_set_rx_mode(sc)
1235 	epic_softc_t *sc;
1236 {
1237 	u_int32_t 		flags = sc->sc_if.if_flags;
1238 	u_int32_t 		rxcon = RXCON_DEFAULT;
1239 
1240 #if defined(EPIC_EARLY_RX)
1241 	rxcon |= RXCON_EARLY_RX;
1242 #endif
1243 
1244 	rxcon |= (flags & IFF_PROMISC) ? RXCON_PROMISCUOUS_MODE : 0;
1245 
1246 	CSR_WRITE_4(sc, RXCON, rxcon);
1247 
1248 	return;
1249 }
1250 
1251 /*
1252  * Synopsis: Set transmit control register. Chip must be in idle state to
1253  * access TXCON.
1254  */
1255 static void
1256 epic_set_tx_mode(sc)
1257 	epic_softc_t *sc;
1258 {
1259 	if (sc->txcon & TXCON_EARLY_TRANSMIT_ENABLE)
1260 		CSR_WRITE_4(sc, ETXTHR, sc->tx_threshold);
1261 
1262 	CSR_WRITE_4(sc, TXCON, sc->txcon);
1263 }
1264 
1265 /*
1266  * Synopsis: Program multicast filter honoring IFF_ALLMULTI and IFF_PROMISC
1267  * flags. (Note, that setting PROMISC bit in EPIC's RXCON will only touch
1268  * individual frames, multicast filter must be manually programmed)
1269  *
1270  * Note: EPIC must be in idle state.
1271  */
1272 static void
1273 epic_set_mc_table(sc)
1274 	epic_softc_t *sc;
1275 {
1276 	struct ifnet *ifp = &sc->sc_if;
1277 	struct ifmultiaddr *ifma;
1278 	u_int16_t filter[4];
1279 	u_int8_t h;
1280 
1281 	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1282 		CSR_WRITE_4(sc, MC0, 0xFFFF);
1283 		CSR_WRITE_4(sc, MC1, 0xFFFF);
1284 		CSR_WRITE_4(sc, MC2, 0xFFFF);
1285 		CSR_WRITE_4(sc, MC3, 0xFFFF);
1286 
1287 		return;
1288 	}
1289 
1290 	filter[0] = 0;
1291 	filter[1] = 0;
1292 	filter[2] = 0;
1293 	filter[3] = 0;
1294 
1295 #if __FreeBSD_version < 500000
1296 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1297 #else
1298 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1299 #endif
1300 		if (ifma->ifma_addr->sa_family != AF_LINK)
1301 			continue;
1302 		h = epic_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1303 		filter[h >> 4] |= 1 << (h & 0xF);
1304 	}
1305 
1306 	CSR_WRITE_4(sc, MC0, filter[0]);
1307 	CSR_WRITE_4(sc, MC1, filter[1]);
1308 	CSR_WRITE_4(sc, MC2, filter[2]);
1309 	CSR_WRITE_4(sc, MC3, filter[3]);
1310 
1311 	return;
1312 }
1313 
1314 /*
1315  * Synopsis: calculate EPIC's hash of multicast address.
1316  */
1317 static u_int8_t
1318 epic_calchash(addr)
1319 	caddr_t addr;
1320 {
1321 	u_int32_t crc, carry;
1322 	int i, j;
1323 	u_int8_t c;
1324 
1325 	/* Compute CRC for the address value. */
1326 	crc = 0xFFFFFFFF; /* initial value */
1327 
1328 	for (i = 0; i < 6; i++) {
1329 		c = *(addr + i);
1330 		for (j = 0; j < 8; j++) {
1331 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
1332 			crc <<= 1;
1333 			c >>= 1;
1334 			if (carry)
1335 				crc = (crc ^ 0x04c11db6) | carry;
1336 		}
1337 	}
1338 
1339 	return ((crc >> 26) & 0x3F);
1340 }
1341 
1342 
1343 /*
1344  * Synopsis: Start receive process and transmit one, if they need.
1345  */
1346 static void
1347 epic_start_activity(sc)
1348 	epic_softc_t *sc;
1349 {
1350 	/* Start rx process */
1351 	CSR_WRITE_4(sc, COMMAND,
1352 		COMMAND_RXQUEUED | COMMAND_START_RX |
1353 		(sc->pending_txs?COMMAND_TXQUEUED:0));
1354 }
1355 
1356 /*
1357  * Synopsis: Completely stop Rx and Tx processes. If TQE is set additional
1358  * packet needs to be queued to stop Tx DMA.
1359  */
1360 static void
1361 epic_stop_activity(sc)
1362 	epic_softc_t *sc;
1363 {
1364 	int status, i;
1365 
1366 	/* Stop Tx and Rx DMA */
1367 	CSR_WRITE_4(sc, COMMAND,
1368 	    COMMAND_STOP_RX | COMMAND_STOP_RDMA | COMMAND_STOP_TDMA);
1369 
1370 	/* Wait Rx and Tx DMA to stop (why 1 ms ??? XXX) */
1371 	for (i=0; i<0x1000; i++) {
1372 		status = CSR_READ_4(sc, INTSTAT) & (INTSTAT_TXIDLE | INTSTAT_RXIDLE);
1373 		if (status == (INTSTAT_TXIDLE | INTSTAT_RXIDLE))
1374 			break;
1375 		DELAY(1);
1376 	}
1377 
1378 	/* Catch all finished packets */
1379 	epic_rx_done(sc);
1380 	epic_tx_done(sc);
1381 
1382 	status = CSR_READ_4(sc, INTSTAT);
1383 
1384 	if ((status & INTSTAT_RXIDLE) == 0)
1385 		device_printf(sc->dev, "ERROR! Can't stop Rx DMA\n");
1386 
1387 	if ((status & INTSTAT_TXIDLE) == 0)
1388 		device_printf(sc->dev, "ERROR! Can't stop Tx DMA\n");
1389 
1390 	/*
1391 	 * May need to queue one more packet if TQE, this is rare
1392 	 * but existing case.
1393 	 */
1394 	if ((status & INTSTAT_TQE) && !(status & INTSTAT_TXIDLE))
1395 		(void) epic_queue_last_packet(sc);
1396 
1397 }
1398 
1399 /*
1400  * The EPIC transmitter may stuck in TQE state. It will not go IDLE until
1401  * a packet from current descriptor will be copied to internal RAM. We
1402  * compose a dummy packet here and queue it for transmission.
1403  *
1404  * XXX the packet will then be actually sent over network...
1405  */
1406 static int
1407 epic_queue_last_packet(sc)
1408 	epic_softc_t *sc;
1409 {
1410 	struct epic_tx_desc *desc;
1411 	struct epic_frag_list *flist;
1412 	struct epic_tx_buffer *buf;
1413 	struct mbuf *m0;
1414 	int i;
1415 
1416 	device_printf(sc->dev, "queue last packet\n");
1417 
1418 	desc = sc->tx_desc + sc->cur_tx;
1419 	flist = sc->tx_flist + sc->cur_tx;
1420 	buf = sc->tx_buffer + sc->cur_tx;
1421 
1422 	if ((desc->status & 0x8000) || (buf->mbuf != NULL))
1423 		return (EBUSY);
1424 
1425 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
1426 	if (NULL == m0)
1427 		return (ENOBUFS);
1428 
1429 	/* Prepare mbuf */
1430 	m0->m_len = min(MHLEN, ETHER_MIN_LEN-ETHER_CRC_LEN);
1431 	flist->frag[0].fraglen = m0->m_len;
1432 	m0->m_pkthdr.len = m0->m_len;
1433 	m0->m_pkthdr.rcvif = &sc->sc_if;
1434 	bzero(mtod(m0,caddr_t), m0->m_len);
1435 
1436 	/* Fill fragments list */
1437 	flist->frag[0].fraglen = m0->m_len;
1438 	flist->frag[0].fragaddr = vtophys(mtod(m0, caddr_t));
1439 	flist->numfrags = 1;
1440 
1441 	/* Fill in descriptor */
1442 	buf->mbuf = m0;
1443 	sc->pending_txs++;
1444 	sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
1445 	desc->control = 0x01;
1446 	desc->txlength = max(m0->m_pkthdr.len,ETHER_MIN_LEN-ETHER_CRC_LEN);
1447 	desc->status = 0x8000;
1448 
1449 	/* Launch transmition */
1450 	CSR_WRITE_4(sc, COMMAND, COMMAND_STOP_TDMA | COMMAND_TXQUEUED);
1451 
1452 	/* Wait Tx DMA to stop (for how long??? XXX) */
1453 	for (i=0; i<1000; i++) {
1454 		if (CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE)
1455 			break;
1456 		DELAY(1);
1457 	}
1458 
1459 	if ((CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) == 0)
1460 		device_printf(sc->dev, "ERROR! can't stop Tx DMA (2)\n");
1461 	else
1462 		epic_tx_done(sc);
1463 
1464 	return 0;
1465 }
1466 
1467 /*
1468  *  Synopsis: Shut down board and deallocates rings.
1469  */
1470 static void
1471 epic_stop(sc)
1472 	epic_softc_t *sc;
1473 {
1474 	int s;
1475 
1476 	s = splimp();
1477 
1478 	sc->sc_if.if_timer = 0;
1479 
1480 	untimeout((timeout_t *)epic_stats_update, sc, sc->stat_ch);
1481 
1482 	/* Disable interrupts */
1483 	CSR_WRITE_4(sc, INTMASK, 0);
1484 	CSR_WRITE_4(sc, GENCTL, 0);
1485 
1486 	/* Try to stop Rx and TX processes */
1487 	epic_stop_activity(sc);
1488 
1489 	/* Reset chip */
1490 	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
1491 	DELAY(1000);
1492 
1493 	/* Make chip go to bed */
1494 	CSR_WRITE_4(sc, GENCTL, GENCTL_POWER_DOWN);
1495 
1496 	/* Free memory allocated for rings */
1497 	epic_free_rings(sc);
1498 
1499 	/* Mark as stoped */
1500 	sc->sc_if.if_flags &= ~IFF_RUNNING;
1501 
1502 	splx(s);
1503 	return;
1504 }
1505 
1506 /*
1507  * Synopsis: This function should free all memory allocated for rings.
1508  */
1509 static void
1510 epic_free_rings(sc)
1511 	epic_softc_t *sc;
1512 {
1513 	int i;
1514 
1515 	for (i=0; i<RX_RING_SIZE; i++) {
1516 		struct epic_rx_buffer *buf = sc->rx_buffer + i;
1517 		struct epic_rx_desc *desc = sc->rx_desc + i;
1518 
1519 		desc->status = 0;
1520 		desc->buflength = 0;
1521 		desc->bufaddr = 0;
1522 
1523 		if (buf->mbuf) m_freem(buf->mbuf);
1524 		buf->mbuf = NULL;
1525 	}
1526 
1527 	for (i=0; i<TX_RING_SIZE; i++) {
1528 		struct epic_tx_buffer *buf = sc->tx_buffer + i;
1529 		struct epic_tx_desc *desc = sc->tx_desc + i;
1530 
1531 		desc->status = 0;
1532 		desc->buflength = 0;
1533 		desc->bufaddr = 0;
1534 
1535 		if (buf->mbuf) m_freem(buf->mbuf);
1536 		buf->mbuf = NULL;
1537 	}
1538 }
1539 
1540 /*
1541  * Synopsis:  Allocates mbufs for Rx ring and point Rx descs to them.
1542  * Point Tx descs to fragment lists. Check that all descs and fraglists
1543  * are bounded and aligned properly.
1544  */
1545 static int
1546 epic_init_rings(sc)
1547 	epic_softc_t *sc;
1548 {
1549 	int i;
1550 
1551 	sc->cur_rx = sc->cur_tx = sc->dirty_tx = sc->pending_txs = 0;
1552 
1553 	for (i = 0; i < RX_RING_SIZE; i++) {
1554 		struct epic_rx_buffer *buf = sc->rx_buffer + i;
1555 		struct epic_rx_desc *desc = sc->rx_desc + i;
1556 
1557 		desc->status = 0;		/* Owned by driver */
1558 		desc->next = vtophys(sc->rx_desc + ((i+1) & RX_RING_MASK));
1559 
1560 		if ((desc->next & 3) ||
1561 		    ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
1562 			epic_free_rings(sc);
1563 			return EFAULT;
1564 		}
1565 
1566 		EPIC_MGETCLUSTER(buf->mbuf);
1567 		if (NULL == buf->mbuf) {
1568 			epic_free_rings(sc);
1569 			return ENOBUFS;
1570 		}
1571 		desc->bufaddr = vtophys(mtod(buf->mbuf, caddr_t));
1572 
1573 		desc->buflength = MCLBYTES;	/* Max RX buffer length */
1574 		desc->status = 0x8000;		/* Set owner bit to NIC */
1575 	}
1576 
1577 	for (i = 0; i < TX_RING_SIZE; i++) {
1578 		struct epic_tx_buffer *buf = sc->tx_buffer + i;
1579 		struct epic_tx_desc *desc = sc->tx_desc + i;
1580 
1581 		desc->status = 0;
1582 		desc->next = vtophys(sc->tx_desc + ((i+1) & TX_RING_MASK));
1583 
1584 		if ((desc->next & 3) ||
1585 		    ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
1586 			epic_free_rings(sc);
1587 			return EFAULT;
1588 		}
1589 
1590 		buf->mbuf = NULL;
1591 		desc->bufaddr = vtophys(sc->tx_flist + i);
1592 
1593 		if ((desc->bufaddr & 3) ||
1594 		    ((desc->bufaddr & PAGE_MASK) + sizeof(struct epic_frag_list)) > PAGE_SIZE) {
1595 			epic_free_rings(sc);
1596 			return EFAULT;
1597 		}
1598 	}
1599 
1600 	return 0;
1601 }
1602 
1603 /*
1604  * EEPROM operation functions
1605  */
1606 static void
1607 epic_write_eepromreg(sc, val)
1608 	epic_softc_t *sc;
1609 	u_int8_t val;
1610 {
1611 	u_int16_t i;
1612 
1613 	CSR_WRITE_1(sc, EECTL, val);
1614 
1615 	for (i=0; i<0xFF; i++)
1616 		if ((CSR_READ_1(sc, EECTL) & 0x20) == 0) break;
1617 
1618 	return;
1619 }
1620 
1621 static u_int8_t
1622 epic_read_eepromreg(sc)
1623 	epic_softc_t *sc;
1624 {
1625 	return CSR_READ_1(sc, EECTL);
1626 }
1627 
1628 static u_int8_t
1629 epic_eeprom_clock(sc, val)
1630 	epic_softc_t *sc;
1631 	u_int8_t val;
1632 {
1633 	epic_write_eepromreg(sc, val);
1634 	epic_write_eepromreg(sc, (val | 0x4));
1635 	epic_write_eepromreg(sc, val);
1636 
1637 	return epic_read_eepromreg(sc);
1638 }
1639 
1640 static void
1641 epic_output_eepromw(sc, val)
1642 	epic_softc_t *sc;
1643 	u_int16_t val;
1644 {
1645 	int i;
1646 
1647 	for (i = 0xF; i >= 0; i--) {
1648 		if (val & (1 << i))
1649 			epic_eeprom_clock(sc, 0x0B);
1650 		else
1651 			epic_eeprom_clock(sc, 0x03);
1652 	}
1653 }
1654 
1655 static u_int16_t
1656 epic_input_eepromw(sc)
1657 	epic_softc_t *sc;
1658 {
1659 	u_int16_t retval = 0;
1660 	int i;
1661 
1662 	for (i = 0xF; i >= 0; i--) {
1663 		if (epic_eeprom_clock(sc, 0x3) & 0x10)
1664 			retval |= (1 << i);
1665 	}
1666 
1667 	return retval;
1668 }
1669 
1670 static int
1671 epic_read_eeprom(sc, loc)
1672 	epic_softc_t *sc;
1673 	u_int16_t loc;
1674 {
1675 	u_int16_t dataval;
1676 	u_int16_t read_cmd;
1677 
1678 	epic_write_eepromreg(sc, 3);
1679 
1680 	if (epic_read_eepromreg(sc) & 0x40)
1681 		read_cmd = (loc & 0x3F) | 0x180;
1682 	else
1683 		read_cmd = (loc & 0xFF) | 0x600;
1684 
1685 	epic_output_eepromw(sc, read_cmd);
1686 
1687 	dataval = epic_input_eepromw(sc);
1688 
1689 	epic_write_eepromreg(sc, 1);
1690 
1691 	return dataval;
1692 }
1693 
1694 /*
1695  * Here goes MII read/write routines
1696  */
1697 static int
1698 epic_read_phy_reg(sc, phy, reg)
1699 	epic_softc_t *sc;
1700 	int phy, reg;
1701 {
1702 	int i;
1703 
1704 	CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x01));
1705 
1706 	for (i = 0; i < 0x100; i++) {
1707 		if ((CSR_READ_4(sc, MIICTL) & 0x01) == 0) break;
1708 		DELAY(1);
1709 	}
1710 
1711 	return (CSR_READ_4(sc, MIIDATA));
1712 }
1713 
1714 static void
1715 epic_write_phy_reg(sc, phy, reg, val)
1716 	epic_softc_t *sc;
1717 	int phy, reg, val;
1718 {
1719 	int i;
1720 
1721 	CSR_WRITE_4(sc, MIIDATA, val);
1722 	CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x02));
1723 
1724 	for(i=0;i<0x100;i++) {
1725 		if ((CSR_READ_4(sc, MIICTL) & 0x02) == 0) break;
1726 		DELAY(1);
1727 	}
1728 
1729 	return;
1730 }
1731 
1732 static int
1733 epic_miibus_readreg(dev, phy, reg)
1734 	device_t dev;
1735 	int phy, reg;
1736 {
1737 	epic_softc_t *sc;
1738 
1739 	sc = device_get_softc(dev);
1740 
1741 	return (PHY_READ_2(sc, phy, reg));
1742 }
1743 
1744 static int
1745 epic_miibus_writereg(dev, phy, reg, data)
1746 	device_t dev;
1747 	int phy, reg, data;
1748 {
1749 	epic_softc_t *sc;
1750 
1751 	sc = device_get_softc(dev);
1752 
1753 	PHY_WRITE_2(sc, phy, reg, data);
1754 
1755 	return (0);
1756 }
1757