xref: /dragonfly/sys/dev/netif/tx/if_tx.c (revision 984263bc)
1 /*-
2  * Copyright (c) 1997 Semen Ustimenko (semenu@FreeBSD.org)
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/tx/if_tx.c,v 1.61.2.1 2002/10/29 01:43:49 semenu Exp $
27  */
28 
29 /*
30  * EtherPower II 10/100 Fast Ethernet (SMC 9432 serie)
31  *
32  * These cards are based on SMC83c17x (EPIC) chip and one of the various
33  * PHYs (QS6612, AC101 and LXT970 were seen). The media support depends on
34  * card model. All cards support 10baseT/UTP and 100baseTX half- and full-
35  * duplex (SMB9432TX). SMC9432BTX also supports 10baseT/BNC. SMC9432FTX also
36  * supports fibre optics.
37  *
38  * Thanks are going to Steve Bauer and Jason Wright.
39  */
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/queue.h>
49 
50 #include <net/if.h>
51 #include <net/if_arp.h>
52 #include <net/ethernet.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 
56 #include <net/bpf.h>
57 
58 #include <net/if_vlan_var.h>
59 
60 #include <vm/vm.h>		/* for vtophys */
61 #include <vm/pmap.h>		/* for vtophys */
62 #include <machine/bus_memio.h>
63 #include <machine/bus_pio.h>
64 #include <machine/bus.h>
65 #include <machine/resource.h>
66 #include <machine/clock.h>	/* for DELAY */
67 #include <sys/bus.h>
68 #include <sys/rman.h>
69 
70 #include <pci/pcireg.h>
71 #include <pci/pcivar.h>
72 
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75 #include <dev/mii/miidevs.h>
76 
77 #include <dev/mii/lxtphyreg.h>
78 
79 #include "miibus_if.h"
80 
81 #include <dev/tx/if_txreg.h>
82 #include <dev/tx/if_txvar.h>
83 
84 MODULE_DEPEND(tx, miibus, 1, 1, 1);
85 
86 static int epic_ifioctl(register struct ifnet *, u_long, caddr_t);
87 static void epic_intr(void *);
88 static void epic_tx_underrun(epic_softc_t *);
89 static int epic_common_attach(epic_softc_t *);
90 static void epic_ifstart(struct ifnet *);
91 static void epic_ifwatchdog(struct ifnet *);
92 static void epic_stats_update(epic_softc_t *);
93 static int epic_init(epic_softc_t *);
94 static void epic_stop(epic_softc_t *);
95 static void epic_rx_done(epic_softc_t *);
96 static void epic_tx_done(epic_softc_t *);
97 static int epic_init_rings(epic_softc_t *);
98 static void epic_free_rings(epic_softc_t *);
99 static void epic_stop_activity(epic_softc_t *);
100 static int epic_queue_last_packet(epic_softc_t *);
101 static void epic_start_activity(epic_softc_t *);
102 static void epic_set_rx_mode(epic_softc_t *);
103 static void epic_set_tx_mode(epic_softc_t *);
104 static void epic_set_mc_table(epic_softc_t *);
105 static u_int8_t epic_calchash(caddr_t);
106 static int epic_read_eeprom(epic_softc_t *,u_int16_t);
107 static void epic_output_eepromw(epic_softc_t *, u_int16_t);
108 static u_int16_t epic_input_eepromw(epic_softc_t *);
109 static u_int8_t epic_eeprom_clock(epic_softc_t *,u_int8_t);
110 static void epic_write_eepromreg(epic_softc_t *,u_int8_t);
111 static u_int8_t epic_read_eepromreg(epic_softc_t *);
112 
113 static int epic_read_phy_reg(epic_softc_t *, int, int);
114 static void epic_write_phy_reg(epic_softc_t *, int, int, int);
115 
116 static int epic_miibus_readreg(device_t, int, int);
117 static int epic_miibus_writereg(device_t, int, int, int);
118 static void epic_miibus_statchg(device_t);
119 static void epic_miibus_mediainit(device_t);
120 
121 static int epic_ifmedia_upd(struct ifnet *);
122 static void epic_ifmedia_sts(struct ifnet *, struct ifmediareq *);
123 
124 static int epic_probe(device_t);
125 static int epic_attach(device_t);
126 static void epic_shutdown(device_t);
127 static int epic_detach(device_t);
128 static struct epic_type *epic_devtype(device_t);
129 
130 static device_method_t epic_methods[] = {
131 	/* Device interface */
132 	DEVMETHOD(device_probe,		epic_probe),
133 	DEVMETHOD(device_attach,	epic_attach),
134 	DEVMETHOD(device_detach,	epic_detach),
135 	DEVMETHOD(device_shutdown,	epic_shutdown),
136 
137 	/* MII interface */
138 	DEVMETHOD(miibus_readreg,	epic_miibus_readreg),
139 	DEVMETHOD(miibus_writereg,	epic_miibus_writereg),
140 	DEVMETHOD(miibus_statchg,	epic_miibus_statchg),
141 	DEVMETHOD(miibus_mediainit,	epic_miibus_mediainit),
142 
143 	{ 0, 0 }
144 };
145 
146 static driver_t epic_driver = {
147 	"tx",
148 	epic_methods,
149 	sizeof(epic_softc_t)
150 };
151 
152 static devclass_t epic_devclass;
153 
154 DRIVER_MODULE(if_tx, pci, epic_driver, epic_devclass, 0, 0);
155 DRIVER_MODULE(miibus, tx, miibus_driver, miibus_devclass, 0, 0);
156 
157 static struct epic_type epic_devs[] = {
158 	{ SMC_VENDORID, SMC_DEVICEID_83C170,
159 		"SMC EtherPower II 10/100" },
160 	{ 0, 0, NULL }
161 };
162 
163 static int
164 epic_probe(dev)
165 	device_t dev;
166 {
167 	struct epic_type *t;
168 
169 	t = epic_devtype(dev);
170 
171 	if (t != NULL) {
172 		device_set_desc(dev, t->name);
173 		return(0);
174 	}
175 
176 	return(ENXIO);
177 }
178 
179 static struct epic_type *
180 epic_devtype(dev)
181 	device_t dev;
182 {
183 	struct epic_type *t;
184 
185 	t = epic_devs;
186 
187 	while(t->name != NULL) {
188 		if ((pci_get_vendor(dev) == t->ven_id) &&
189 		    (pci_get_device(dev) == t->dev_id)) {
190 			return(t);
191 		}
192 		t++;
193 	}
194 	return (NULL);
195 }
196 
197 #if defined(EPIC_USEIOSPACE)
198 #define	EPIC_RES	SYS_RES_IOPORT
199 #define	EPIC_RID	PCIR_BASEIO
200 #else
201 #define	EPIC_RES	SYS_RES_MEMORY
202 #define	EPIC_RID	PCIR_BASEMEM
203 #endif
204 
205 /*
206  * Attach routine: map registers, allocate softc, rings and descriptors.
207  * Reset to known state.
208  */
209 static int
210 epic_attach(dev)
211 	device_t dev;
212 {
213 	struct ifnet *ifp;
214 	epic_softc_t *sc;
215 	u_int32_t command;
216 	int unit, error;
217 	int i, s, rid, tmp;
218 
219 	s = splimp ();
220 
221 	sc = device_get_softc(dev);
222 	unit = device_get_unit(dev);
223 
224 	/* Preinitialize softc structure */
225 	bzero(sc, sizeof(epic_softc_t));
226 	sc->unit = unit;
227 	sc->dev = dev;
228 
229 	/* Fill ifnet structure */
230 	ifp = &sc->sc_if;
231 	ifp->if_unit = unit;
232 	ifp->if_name = "tx";
233 	ifp->if_softc = sc;
234 	ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST;
235 	ifp->if_ioctl = epic_ifioctl;
236 	ifp->if_output = ether_output;
237 	ifp->if_start = epic_ifstart;
238 	ifp->if_watchdog = epic_ifwatchdog;
239 	ifp->if_init = (if_init_f_t*)epic_init;
240 	ifp->if_timer = 0;
241 	ifp->if_baudrate = 10000000;
242 	ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1;
243 
244 	/* Enable ports, memory and busmastering */
245 	command = pci_read_config(dev, PCIR_COMMAND, 4);
246 	command |= PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
247 	pci_write_config(dev, PCIR_COMMAND, command, 4);
248 	command = pci_read_config(dev, PCIR_COMMAND, 4);
249 
250 #if defined(EPIC_USEIOSPACE)
251 	if ((command & PCIM_CMD_PORTEN) == 0) {
252 		device_printf(dev, "failed to enable I/O mapping!\n");
253 		error = ENXIO;
254 		goto fail;
255 	}
256 #else
257 	if ((command & PCIM_CMD_MEMEN) == 0) {
258 		device_printf(dev, "failed to enable memory mapping!\n");
259 		error = ENXIO;
260 		goto fail;
261 	}
262 #endif
263 
264 	rid = EPIC_RID;
265 	sc->res = bus_alloc_resource(dev, EPIC_RES, &rid, 0, ~0, 1,
266 	    RF_ACTIVE);
267 
268 	if (sc->res == NULL) {
269 		device_printf(dev, "couldn't map ports/memory\n");
270 		error = ENXIO;
271 		goto fail;
272 	}
273 
274 	sc->sc_st = rman_get_bustag(sc->res);
275 	sc->sc_sh = rman_get_bushandle(sc->res);
276 
277 	/* Allocate interrupt */
278 	rid = 0;
279 	sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
280 	    RF_SHAREABLE | RF_ACTIVE);
281 
282 	if (sc->irq == NULL) {
283 		device_printf(dev, "couldn't map interrupt\n");
284 		bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
285 		error = ENXIO;
286 		goto fail;
287 	}
288 
289 	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET,
290 	    epic_intr, sc, &sc->sc_ih);
291 
292 	if (error) {
293 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
294 		bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
295 		device_printf(dev, "couldn't set up irq\n");
296 		goto fail;
297 	}
298 
299 	/* Do OS independent part, including chip wakeup and reset */
300 	error = epic_common_attach(sc);
301 	if (error) {
302 		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
303 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
304 		bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
305 		error = ENXIO;
306 		goto fail;
307 	}
308 
309 	/* Do ifmedia setup */
310 	if (mii_phy_probe(dev, &sc->miibus,
311 	    epic_ifmedia_upd, epic_ifmedia_sts)) {
312 		device_printf(dev, "ERROR! MII without any PHY!?\n");
313 		bus_teardown_intr(dev, sc->irq, sc->sc_ih);
314 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
315 		bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
316 		error = ENXIO;
317 		goto fail;
318 	}
319 
320 	/* Display ethernet address ,... */
321 	device_printf(dev, "address %6D,", sc->sc_macaddr, ":");
322 
323 	/* board type and ... */
324 	printf(" type ");
325 	for(i=0x2c;i<0x32;i++) {
326 		tmp = epic_read_eeprom(sc, i);
327 		if (' ' == (u_int8_t)tmp) break;
328 		printf("%c", (u_int8_t)tmp);
329 		tmp >>= 8;
330 		if (' ' == (u_int8_t)tmp) break;
331 		printf("%c", (u_int8_t)tmp);
332 	}
333 	printf("\n");
334 
335 	/* Attach to OS's managers */
336 	ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
337 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
338 	callout_handle_init(&sc->stat_ch);
339 
340 fail:
341 	splx(s);
342 
343 	return(error);
344 }
345 
346 /*
347  * Detach driver and free resources
348  */
349 static int
350 epic_detach(dev)
351 	device_t dev;
352 {
353 	struct ifnet *ifp;
354 	epic_softc_t *sc;
355 	int s;
356 
357 	s = splimp();
358 
359 	sc = device_get_softc(dev);
360 	ifp = &sc->arpcom.ac_if;
361 
362 	ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
363 
364 	epic_stop(sc);
365 
366 	bus_generic_detach(dev);
367 	device_delete_child(dev, sc->miibus);
368 
369 	bus_teardown_intr(dev, sc->irq, sc->sc_ih);
370 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
371 	bus_release_resource(dev, EPIC_RES, EPIC_RID, sc->res);
372 
373 	free(sc->tx_flist, M_DEVBUF);
374 	free(sc->tx_desc, M_DEVBUF);
375 	free(sc->rx_desc, M_DEVBUF);
376 
377 	splx(s);
378 
379 	return(0);
380 }
381 
382 #undef	EPIC_RES
383 #undef	EPIC_RID
384 
385 /*
386  * Stop all chip I/O so that the kernel's probe routines don't
387  * get confused by errant DMAs when rebooting.
388  */
389 static void
390 epic_shutdown(dev)
391 	device_t dev;
392 {
393 	epic_softc_t *sc;
394 
395 	sc = device_get_softc(dev);
396 
397 	epic_stop(sc);
398 
399 	return;
400 }
401 
402 /*
403  * This is if_ioctl handler.
404  */
405 static int
406 epic_ifioctl(ifp, command, data)
407 	struct ifnet *ifp;
408 	u_long command;
409 	caddr_t data;
410 {
411 	epic_softc_t *sc = ifp->if_softc;
412 	struct mii_data	*mii;
413 	struct ifreq *ifr = (struct ifreq *) data;
414 	int x, error = 0;
415 
416 	x = splimp();
417 
418 	switch (command) {
419 	case SIOCSIFADDR:
420 	case SIOCGIFADDR:
421 		error = ether_ioctl(ifp, command, data);
422 		break;
423 	case SIOCSIFMTU:
424 		if (ifp->if_mtu == ifr->ifr_mtu)
425 			break;
426 
427 		/* XXX Though the datasheet doesn't imply any
428 		 * limitations on RX and TX sizes beside max 64Kb
429 		 * DMA transfer, seems we can't send more then 1600
430 		 * data bytes per ethernet packet. (Transmitter hangs
431 		 * up if more data is sent)
432 		 */
433 		if (ifr->ifr_mtu + ifp->if_hdrlen <= EPIC_MAX_MTU) {
434 			ifp->if_mtu = ifr->ifr_mtu;
435 			epic_stop(sc);
436 			epic_init(sc);
437 		} else
438 			error = EINVAL;
439 		break;
440 
441 	case SIOCSIFFLAGS:
442 		/*
443 		 * If the interface is marked up and stopped, then start it.
444 		 * If it is marked down and running, then stop it.
445 		 */
446 		if (ifp->if_flags & IFF_UP) {
447 			if ((ifp->if_flags & IFF_RUNNING) == 0) {
448 				epic_init(sc);
449 				break;
450 			}
451 		} else {
452 			if (ifp->if_flags & IFF_RUNNING) {
453 				epic_stop(sc);
454 				break;
455 			}
456 		}
457 
458 		/* Handle IFF_PROMISC and IFF_ALLMULTI flags */
459 		epic_stop_activity(sc);
460 		epic_set_mc_table(sc);
461 		epic_set_rx_mode(sc);
462 		epic_start_activity(sc);
463 		break;
464 
465 	case SIOCADDMULTI:
466 	case SIOCDELMULTI:
467 		epic_set_mc_table(sc);
468 		error = 0;
469 		break;
470 
471 	case SIOCSIFMEDIA:
472 	case SIOCGIFMEDIA:
473 		mii = device_get_softc(sc->miibus);
474 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
475 		break;
476 
477 	default:
478 		error = EINVAL;
479 	}
480 	splx(x);
481 
482 	return error;
483 }
484 
485 /*
486  * OS-independed part of attach process. allocate memory for descriptors
487  * and frag lists, wake up chip, read MAC address and PHY identyfier.
488  * Return -1 on failure.
489  */
490 static int
491 epic_common_attach(sc)
492 	epic_softc_t *sc;
493 {
494 	int i;
495 
496 	sc->tx_flist = malloc(sizeof(struct epic_frag_list)*TX_RING_SIZE,
497 	    M_DEVBUF, M_NOWAIT | M_ZERO);
498 	sc->tx_desc = malloc(sizeof(struct epic_tx_desc)*TX_RING_SIZE,
499 	    M_DEVBUF, M_NOWAIT | M_ZERO);
500 	sc->rx_desc = malloc(sizeof(struct epic_rx_desc)*RX_RING_SIZE,
501 	    M_DEVBUF, M_NOWAIT | M_ZERO);
502 
503 	if (sc->tx_flist == NULL || sc->tx_desc == NULL || sc->rx_desc == NULL){
504 		device_printf(sc->dev, "failed to malloc memory\n");
505 		if (sc->tx_flist) free(sc->tx_flist, M_DEVBUF);
506 		if (sc->tx_desc) free(sc->tx_desc, M_DEVBUF);
507 		if (sc->rx_desc) free(sc->rx_desc, M_DEVBUF);
508 		return (ENOMEM);
509 	}
510 
511 	/* Bring the chip out of low-power mode. */
512 	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
513 	DELAY(500);
514 
515 	/* Workaround for Application Note 7-15 */
516 	for (i=0; i<16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
517 
518 	/* Read mac address from EEPROM */
519 	for (i = 0; i < ETHER_ADDR_LEN / sizeof(u_int16_t); i++)
520 		((u_int16_t *)sc->sc_macaddr)[i] = epic_read_eeprom(sc,i);
521 
522 	/* Set Non-Volatile Control Register from EEPROM */
523 	CSR_WRITE_4(sc, NVCTL, epic_read_eeprom(sc, EEPROM_NVCTL) & 0x1F);
524 
525 	/* Set defaults */
526 	sc->tx_threshold = TRANSMIT_THRESHOLD;
527 	sc->txcon = TXCON_DEFAULT;
528 	sc->miicfg = MIICFG_SMI_ENABLE;
529 	sc->phyid = EPIC_UNKN_PHY;
530 	sc->serinst = -1;
531 
532 	/* Fetch card id */
533 	sc->cardvend = pci_read_config(sc->dev, PCIR_SUBVEND_0, 2);
534 	sc->cardid = pci_read_config(sc->dev, PCIR_SUBDEV_0, 2);
535 
536 	if (sc->cardvend != SMC_VENDORID)
537 		device_printf(sc->dev, "unknown card vendor %04xh\n", sc->cardvend);
538 
539 	return 0;
540 }
541 
542 /*
543  * This is if_start handler. It takes mbufs from if_snd queue
544  * and queue them for transmit, one by one, until TX ring become full
545  * or queue become empty.
546  */
547 static void
548 epic_ifstart(ifp)
549 	struct ifnet * ifp;
550 {
551 	epic_softc_t *sc = ifp->if_softc;
552 	struct epic_tx_buffer *buf;
553 	struct epic_tx_desc *desc;
554 	struct epic_frag_list *flist;
555 	struct mbuf *m0;
556 	register struct mbuf *m;
557 	register int i;
558 
559 	while (sc->pending_txs < TX_RING_SIZE) {
560 		buf = sc->tx_buffer + sc->cur_tx;
561 		desc = sc->tx_desc + sc->cur_tx;
562 		flist = sc->tx_flist + sc->cur_tx;
563 
564 		/* Get next packet to send */
565 		IF_DEQUEUE(&ifp->if_snd, m0);
566 
567 		/* If nothing to send, return */
568 		if (NULL == m0) return;
569 
570 		/* Fill fragments list */
571 		for (m = m0, i = 0;
572 		    (NULL != m) && (i < EPIC_MAX_FRAGS);
573 		    m = m->m_next, i++) {
574 			flist->frag[i].fraglen = m->m_len;
575 			flist->frag[i].fragaddr = vtophys(mtod(m, caddr_t));
576 		}
577 		flist->numfrags = i;
578 
579 		/* If packet was more than EPIC_MAX_FRAGS parts, */
580 		/* recopy packet to new allocated mbuf cluster */
581 		if (NULL != m) {
582 			EPIC_MGETCLUSTER(m);
583 			if (NULL == m) {
584 				m_freem(m0);
585 				ifp->if_oerrors++;
586 				continue;
587 			}
588 
589 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
590 			flist->frag[0].fraglen =
591 			     m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
592 			m->m_pkthdr.rcvif = ifp;
593 
594 			flist->numfrags = 1;
595 			flist->frag[0].fragaddr = vtophys(mtod(m, caddr_t));
596 			m_freem(m0);
597 			m0 = m;
598 		}
599 
600 		buf->mbuf = m0;
601 		sc->pending_txs++;
602 		sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
603 		desc->control = 0x01;
604 		desc->txlength =
605 		    max(m0->m_pkthdr.len,ETHER_MIN_LEN-ETHER_CRC_LEN);
606 		desc->status = 0x8000;
607 		CSR_WRITE_4(sc, COMMAND, COMMAND_TXQUEUED);
608 
609 		/* Set watchdog timer */
610 		ifp->if_timer = 8;
611 
612 		if (ifp->if_bpf)
613 			bpf_mtap(ifp, m0);
614 	}
615 
616 	ifp->if_flags |= IFF_OACTIVE;
617 
618 	return;
619 
620 }
621 
622 /*
623  * Synopsis: Finish all received frames.
624  */
625 static void
626 epic_rx_done(sc)
627 	epic_softc_t *sc;
628 {
629 	u_int16_t len;
630 	struct epic_rx_buffer *buf;
631 	struct epic_rx_desc *desc;
632 	struct mbuf *m;
633 	struct ether_header *eh;
634 
635 	while ((sc->rx_desc[sc->cur_rx].status & 0x8000) == 0) {
636 		buf = sc->rx_buffer + sc->cur_rx;
637 		desc = sc->rx_desc + sc->cur_rx;
638 
639 		/* Switch to next descriptor */
640 		sc->cur_rx = (sc->cur_rx+1) & RX_RING_MASK;
641 
642 		/*
643 		 * Check for RX errors. This should only happen if
644 		 * SAVE_ERRORED_PACKETS is set. RX errors generate
645 		 * RXE interrupt usually.
646 		 */
647 		if ((desc->status & 1) == 0) {
648 			sc->sc_if.if_ierrors++;
649 			desc->status = 0x8000;
650 			continue;
651 		}
652 
653 		/* Save packet length and mbuf contained packet */
654 		len = desc->rxlength - ETHER_CRC_LEN;
655 		m = buf->mbuf;
656 
657 		/* Try to get mbuf cluster */
658 		EPIC_MGETCLUSTER(buf->mbuf);
659 		if (NULL == buf->mbuf) {
660 			buf->mbuf = m;
661 			desc->status = 0x8000;
662 			sc->sc_if.if_ierrors++;
663 			continue;
664 		}
665 
666 		/* Point to new mbuf, and give descriptor to chip */
667 		desc->bufaddr = vtophys(mtod(buf->mbuf, caddr_t));
668 		desc->status = 0x8000;
669 
670 		/* First mbuf in packet holds the ethernet and packet headers */
671 		eh = mtod(m, struct ether_header *);
672 		m->m_pkthdr.rcvif = &(sc->sc_if);
673 		m->m_pkthdr.len = m->m_len = len;
674 
675 		/* Second mbuf holds packet ifself */
676 		m->m_pkthdr.len = m->m_len = len - sizeof(struct ether_header);
677 		m->m_data += sizeof(struct ether_header);
678 
679 		/* Give mbuf to OS */
680 		ether_input(&sc->sc_if, eh, m);
681 
682 		/* Successfuly received frame */
683 		sc->sc_if.if_ipackets++;
684 	}
685 
686 	return;
687 }
688 
689 /*
690  * Synopsis: Do last phase of transmission. I.e. if desc is
691  * transmitted, decrease pending_txs counter, free mbuf contained
692  * packet, switch to next descriptor and repeat until no packets
693  * are pending or descriptor is not transmitted yet.
694  */
695 static void
696 epic_tx_done(sc)
697 	epic_softc_t *sc;
698 {
699 	struct epic_tx_buffer *buf;
700 	struct epic_tx_desc *desc;
701 	u_int16_t status;
702 
703 	while (sc->pending_txs > 0) {
704 		buf = sc->tx_buffer + sc->dirty_tx;
705 		desc = sc->tx_desc + sc->dirty_tx;
706 		status = desc->status;
707 
708 		/* If packet is not transmitted, thou followed */
709 		/* packets are not transmitted too */
710 		if (status & 0x8000) break;
711 
712 		/* Packet is transmitted. Switch to next and */
713 		/* free mbuf */
714 		sc->pending_txs--;
715 		sc->dirty_tx = (sc->dirty_tx + 1) & TX_RING_MASK;
716 		m_freem(buf->mbuf);
717 		buf->mbuf = NULL;
718 
719 		/* Check for errors and collisions */
720 		if (status & 0x0001) sc->sc_if.if_opackets++;
721 		else sc->sc_if.if_oerrors++;
722 		sc->sc_if.if_collisions += (status >> 8) & 0x1F;
723 #if defined(EPIC_DIAG)
724 		if ((status & 0x1001) == 0x1001)
725 			device_printf(sc->dev,  "Tx ERROR: excessive coll. number\n");
726 #endif
727 	}
728 
729 	if (sc->pending_txs < TX_RING_SIZE)
730 		sc->sc_if.if_flags &= ~IFF_OACTIVE;
731 }
732 
733 /*
734  * Interrupt function
735  */
736 static void
737 epic_intr(arg)
738     void *arg;
739 {
740     epic_softc_t * sc = (epic_softc_t *) arg;
741     int status, i = 4;
742 
743     while (i-- && ((status = CSR_READ_4(sc, INTSTAT)) & INTSTAT_INT_ACTV)) {
744 	CSR_WRITE_4(sc, INTSTAT, status);
745 
746 	if (status & (INTSTAT_RQE|INTSTAT_RCC|INTSTAT_OVW)) {
747 	    epic_rx_done(sc);
748 	    if (status & (INTSTAT_RQE|INTSTAT_OVW)) {
749 #if defined(EPIC_DIAG)
750 		if (status & INTSTAT_OVW)
751 		    device_printf(sc->dev, "RX buffer overflow\n");
752 		if (status & INTSTAT_RQE)
753 		    device_printf(sc->dev, "RX FIFO overflow\n");
754 #endif
755 		if ((CSR_READ_4(sc, COMMAND) & COMMAND_RXQUEUED) == 0)
756 		    CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED);
757 		sc->sc_if.if_ierrors++;
758 	    }
759 	}
760 
761 	if (status & (INTSTAT_TXC|INTSTAT_TCC|INTSTAT_TQE)) {
762 	    epic_tx_done(sc);
763 	    if (sc->sc_if.if_snd.ifq_head != NULL)
764 		    epic_ifstart(&sc->sc_if);
765 	}
766 
767 	/* Check for rare errors */
768 	if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
769 		      INTSTAT_APE|INTSTAT_DPE|INTSTAT_TXU|INTSTAT_RXE)) {
770     	    if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|
771 			  INTSTAT_APE|INTSTAT_DPE)) {
772 		device_printf(sc->dev, "PCI fatal errors occured: %s%s%s%s\n",
773 		    (status&INTSTAT_PMA)?"PMA ":"",
774 		    (status&INTSTAT_PTA)?"PTA ":"",
775 		    (status&INTSTAT_APE)?"APE ":"",
776 		    (status&INTSTAT_DPE)?"DPE":""
777 		);
778 
779 		epic_stop(sc);
780 		epic_init(sc);
781 
782 	    	break;
783 	    }
784 
785 	    if (status & INTSTAT_RXE) {
786 #if defined(EPIC_DIAG)
787 		device_printf(sc->dev, "CRC/Alignment error\n");
788 #endif
789 		sc->sc_if.if_ierrors++;
790 	    }
791 
792 	    if (status & INTSTAT_TXU) {
793 		epic_tx_underrun(sc);
794 		sc->sc_if.if_oerrors++;
795 	    }
796 	}
797     }
798 
799     /* If no packets are pending, then no timeouts */
800     if (sc->pending_txs == 0) sc->sc_if.if_timer = 0;
801 
802     return;
803 }
804 
805 /*
806  * Handle the TX underrun error: increase the TX threshold
807  * and restart the transmitter.
808  */
809 static void
810 epic_tx_underrun(sc)
811 	epic_softc_t *sc;
812 {
813 	if (sc->tx_threshold > TRANSMIT_THRESHOLD_MAX) {
814 		sc->txcon &= ~TXCON_EARLY_TRANSMIT_ENABLE;
815 #if defined(EPIC_DIAG)
816 		device_printf(sc->dev, "Tx UNDERRUN: early TX disabled\n");
817 #endif
818 	} else {
819 		sc->tx_threshold += 0x40;
820 #if defined(EPIC_DIAG)
821 		device_printf(sc->dev, "Tx UNDERRUN: TX threshold increased to %d\n",
822 		    sc->tx_threshold);
823 #endif
824 	}
825 
826 	/* We must set TXUGO to reset the stuck transmitter */
827 	CSR_WRITE_4(sc, COMMAND, COMMAND_TXUGO);
828 
829 	/* Update the TX threshold */
830 	epic_stop_activity(sc);
831 	epic_set_tx_mode(sc);
832 	epic_start_activity(sc);
833 
834 	return;
835 }
836 
837 /*
838  * Synopsis: This one is called if packets wasn't transmitted
839  * during timeout. Try to deallocate transmitted packets, and
840  * if success continue to work.
841  */
842 static void
843 epic_ifwatchdog(ifp)
844 	struct ifnet *ifp;
845 {
846 	epic_softc_t *sc = ifp->if_softc;
847 	int x;
848 
849 	x = splimp();
850 
851 	device_printf(sc->dev, "device timeout %d packets\n", sc->pending_txs);
852 
853 	/* Try to finish queued packets */
854 	epic_tx_done(sc);
855 
856 	/* If not successful */
857 	if (sc->pending_txs > 0) {
858 
859 		ifp->if_oerrors+=sc->pending_txs;
860 
861 		/* Reinitialize board */
862 		device_printf(sc->dev, "reinitialization\n");
863 		epic_stop(sc);
864 		epic_init(sc);
865 
866 	} else
867 		device_printf(sc->dev, "seems we can continue normaly\n");
868 
869 	/* Start output */
870 	if (ifp->if_snd.ifq_head) epic_ifstart(ifp);
871 
872 	splx(x);
873 }
874 
875 /*
876  * Despite the name of this function, it doesn't update statistics, it only
877  * helps in autonegotiation process.
878  */
879 static void
880 epic_stats_update(epic_softc_t * sc)
881 {
882 	struct mii_data * mii;
883 	int s;
884 
885 	s = splimp();
886 
887 	mii = device_get_softc(sc->miibus);
888 	mii_tick(mii);
889 
890 	sc->stat_ch = timeout((timeout_t *)epic_stats_update, sc, hz);
891 
892 	splx(s);
893 }
894 
895 /*
896  * Set media options.
897  */
898 static int
899 epic_ifmedia_upd(ifp)
900 	struct ifnet *ifp;
901 {
902 	epic_softc_t *sc;
903 	struct mii_data *mii;
904 	struct ifmedia *ifm;
905 	struct mii_softc *miisc;
906 	int cfg, media;
907 
908 	sc = ifp->if_softc;
909 	mii = device_get_softc(sc->miibus);
910 	ifm = &mii->mii_media;
911 	media = ifm->ifm_cur->ifm_media;
912 
913 	/* Do not do anything if interface is not up */
914 	if ((ifp->if_flags & IFF_UP) == 0)
915 		return (0);
916 
917 	/*
918 	 * Lookup current selected PHY
919 	 */
920 	if (IFM_INST(media) == sc->serinst) {
921 		sc->phyid = EPIC_SERIAL;
922 		sc->physc = NULL;
923 	} else {
924 		/* If we're not selecting serial interface, select MII mode */
925 		sc->miicfg &= ~MIICFG_SERIAL_ENABLE;
926 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
927 
928 		/* Default to unknown PHY */
929 		sc->phyid = EPIC_UNKN_PHY;
930 
931 		/* Lookup selected PHY */
932 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
933 		     miisc = LIST_NEXT(miisc, mii_list)) {
934 			if (IFM_INST(media) == miisc->mii_inst) {
935 				sc->physc = miisc;
936 				break;
937 			}
938 		}
939 
940 		/* Identify selected PHY */
941 		if (sc->physc) {
942 			int id1, id2, model, oui;
943 
944 			id1 = PHY_READ(sc->physc, MII_PHYIDR1);
945 			id2 = PHY_READ(sc->physc, MII_PHYIDR2);
946 
947 			oui = MII_OUI(id1, id2);
948 			model = MII_MODEL(id2);
949 			switch (oui) {
950 			case MII_OUI_QUALSEMI:
951 				if (model == MII_MODEL_QUALSEMI_QS6612)
952 					sc->phyid = EPIC_QS6612_PHY;
953 				break;
954 			case MII_OUI_xxALTIMA:
955 				if (model == MII_MODEL_xxALTIMA_AC101)
956 					sc->phyid = EPIC_AC101_PHY;
957 				break;
958 			case MII_OUI_xxLEVEL1:
959 				if (model == MII_MODEL_xxLEVEL1_LXT970)
960 					sc->phyid = EPIC_LXT970_PHY;
961 				break;
962 			}
963 		}
964 	}
965 
966 	/*
967 	 * Do PHY specific card setup
968 	 */
969 
970 	/* Call this, to isolate all not selected PHYs and
971 	 * set up selected
972 	 */
973 	mii_mediachg(mii);
974 
975 	/* Do our own setup */
976 	switch (sc->phyid) {
977 	case EPIC_QS6612_PHY:
978 		break;
979 	case EPIC_AC101_PHY:
980 		/* We have to powerup fiber tranceivers */
981 		if (IFM_SUBTYPE(media) == IFM_100_FX)
982 			sc->miicfg |= MIICFG_694_ENABLE;
983 		else
984 			sc->miicfg &= ~MIICFG_694_ENABLE;
985 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
986 
987 		break;
988 	case EPIC_LXT970_PHY:
989 		/* We have to powerup fiber tranceivers */
990 		cfg = PHY_READ(sc->physc, MII_LXTPHY_CONFIG);
991 		if (IFM_SUBTYPE(media) == IFM_100_FX)
992 			cfg |= CONFIG_LEDC1 | CONFIG_LEDC0;
993 		else
994 			cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
995 		PHY_WRITE(sc->physc, MII_LXTPHY_CONFIG, cfg);
996 
997 		break;
998 	case EPIC_SERIAL:
999 		/* Select serial PHY, (10base2/BNC usually) */
1000 		sc->miicfg |= MIICFG_694_ENABLE | MIICFG_SERIAL_ENABLE;
1001 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1002 
1003 		/* There is no driver to fill this */
1004 		mii->mii_media_active = media;
1005 		mii->mii_media_status = 0;
1006 
1007 		/* We need to call this manualy as i wasn't called
1008 		 * in mii_mediachg()
1009 		 */
1010 		epic_miibus_statchg(sc->dev);
1011 
1012 		break;
1013 	default:
1014 		device_printf(sc->dev, "ERROR! Unknown PHY selected\n");
1015 		return (EINVAL);
1016 	}
1017 
1018 	return(0);
1019 }
1020 
1021 /*
1022  * Report current media status.
1023  */
1024 static void
1025 epic_ifmedia_sts(ifp, ifmr)
1026 	struct ifnet *ifp;
1027 	struct ifmediareq *ifmr;
1028 {
1029 	epic_softc_t *sc;
1030 	struct mii_data *mii;
1031 	struct ifmedia *ifm;
1032 
1033 	sc = ifp->if_softc;
1034 	mii = device_get_softc(sc->miibus);
1035 	ifm = &mii->mii_media;
1036 
1037 	/* Nothing should be selected if interface is down */
1038 	if ((ifp->if_flags & IFF_UP) == 0) {
1039 		ifmr->ifm_active = IFM_NONE;
1040 		ifmr->ifm_status = 0;
1041 
1042 		return;
1043 	}
1044 
1045 	/* Call underlying pollstat, if not serial PHY */
1046 	if (sc->phyid != EPIC_SERIAL)
1047 		mii_pollstat(mii);
1048 
1049 	/* Simply copy media info */
1050 	ifmr->ifm_active = mii->mii_media_active;
1051 	ifmr->ifm_status = mii->mii_media_status;
1052 
1053 	return;
1054 }
1055 
1056 /*
1057  * Callback routine, called on media change.
1058  */
1059 static void
1060 epic_miibus_statchg(dev)
1061 	device_t dev;
1062 {
1063 	epic_softc_t *sc;
1064 	struct mii_data *mii;
1065 	int media;
1066 
1067 	sc = device_get_softc(dev);
1068 	mii = device_get_softc(sc->miibus);
1069 	media = mii->mii_media_active;
1070 
1071 	sc->txcon &= ~(TXCON_LOOPBACK_MODE | TXCON_FULL_DUPLEX);
1072 
1073 	/* If we are in full-duplex mode or loopback operation,
1074 	 * we need to decouple receiver and transmitter.
1075 	 */
1076 	if (IFM_OPTIONS(media) & (IFM_FDX | IFM_LOOP))
1077  		sc->txcon |= TXCON_FULL_DUPLEX;
1078 
1079 	/* On some cards we need manualy set fullduplex led */
1080 	if (sc->cardid == SMC9432FTX ||
1081 	    sc->cardid == SMC9432FTX_SC) {
1082 		if (IFM_OPTIONS(media) & IFM_FDX)
1083 			sc->miicfg |= MIICFG_694_ENABLE;
1084 		else
1085 			sc->miicfg &= ~MIICFG_694_ENABLE;
1086 
1087 		CSR_WRITE_4(sc, MIICFG, sc->miicfg);
1088 	}
1089 
1090 	/* Update baudrate */
1091 	if (IFM_SUBTYPE(media) == IFM_100_TX ||
1092 	    IFM_SUBTYPE(media) == IFM_100_FX)
1093 		sc->sc_if.if_baudrate = 100000000;
1094 	else
1095 		sc->sc_if.if_baudrate = 10000000;
1096 
1097 	epic_stop_activity(sc);
1098 	epic_set_tx_mode(sc);
1099 	epic_start_activity(sc);
1100 
1101 	return;
1102 }
1103 
1104 static void
1105 epic_miibus_mediainit(dev)
1106 	device_t dev;
1107 {
1108 	epic_softc_t *sc;
1109 	struct mii_data *mii;
1110 	struct ifmedia *ifm;
1111 	int media;
1112 
1113 	sc = device_get_softc(dev);
1114 	mii = device_get_softc(sc->miibus);
1115 	ifm = &mii->mii_media;
1116 
1117 	/* Add Serial Media Interface if present, this applies to
1118 	 * SMC9432BTX serie
1119 	 */
1120 	if (CSR_READ_4(sc, MIICFG) & MIICFG_PHY_PRESENT) {
1121 		/* Store its instance */
1122 		sc->serinst = mii->mii_instance++;
1123 
1124 		/* Add as 10base2/BNC media */
1125 		media = IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->serinst);
1126 		ifmedia_add(ifm, media, 0, NULL);
1127 
1128 		/* Report to user */
1129 		device_printf(sc->dev, "serial PHY detected (10Base2/BNC)\n");
1130 	}
1131 
1132 	return;
1133 }
1134 
1135 /*
1136  * Reset chip, allocate rings, and update media.
1137  */
1138 static int
1139 epic_init(sc)
1140 	epic_softc_t *sc;
1141 {
1142 	struct ifnet *ifp = &sc->sc_if;
1143 	int s,i;
1144 
1145 	s = splimp();
1146 
1147 	/* If interface is already running, then we need not do anything */
1148 	if (ifp->if_flags & IFF_RUNNING) {
1149 		splx(s);
1150 		return 0;
1151 	}
1152 
1153 	/* Soft reset the chip (we have to power up card before) */
1154 	CSR_WRITE_4(sc, GENCTL, 0);
1155 	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
1156 
1157 	/*
1158 	 * Reset takes 15 pci ticks which depends on PCI bus speed.
1159 	 * Assuming it >= 33000000 hz, we have wait at least 495e-6 sec.
1160 	 */
1161 	DELAY(500);
1162 
1163 	/* Wake up */
1164 	CSR_WRITE_4(sc, GENCTL, 0);
1165 
1166 	/* Workaround for Application Note 7-15 */
1167 	for (i=0; i<16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST);
1168 
1169 	/* Initialize rings */
1170 	if (epic_init_rings(sc)) {
1171 		device_printf(sc->dev, "failed to init rings\n");
1172 		splx(s);
1173 		return -1;
1174 	}
1175 
1176 	/* Give rings to EPIC */
1177 	CSR_WRITE_4(sc, PRCDAR, vtophys(sc->rx_desc));
1178 	CSR_WRITE_4(sc, PTCDAR, vtophys(sc->tx_desc));
1179 
1180 	/* Put node address to EPIC */
1181 	CSR_WRITE_4(sc, LAN0, ((u_int16_t *)sc->sc_macaddr)[0]);
1182 	CSR_WRITE_4(sc, LAN1, ((u_int16_t *)sc->sc_macaddr)[1]);
1183 	CSR_WRITE_4(sc, LAN2, ((u_int16_t *)sc->sc_macaddr)[2]);
1184 
1185 	/* Set tx mode, includeing transmit threshold */
1186 	epic_set_tx_mode(sc);
1187 
1188 	/* Compute and set RXCON. */
1189 	epic_set_rx_mode(sc);
1190 
1191 	/* Set multicast table */
1192 	epic_set_mc_table(sc);
1193 
1194 	/* Enable interrupts by setting the interrupt mask. */
1195 	CSR_WRITE_4(sc, INTMASK,
1196 		INTSTAT_RCC  | /* INTSTAT_RQE | INTSTAT_OVW | INTSTAT_RXE | */
1197 		/* INTSTAT_TXC | */ INTSTAT_TCC | INTSTAT_TQE | INTSTAT_TXU |
1198 		INTSTAT_FATAL);
1199 
1200 	/* Acknowledge all pending interrupts */
1201 	CSR_WRITE_4(sc, INTSTAT, CSR_READ_4(sc, INTSTAT));
1202 
1203 	/* Enable interrupts,  set for PCI read multiple and etc */
1204 	CSR_WRITE_4(sc, GENCTL,
1205 		GENCTL_ENABLE_INTERRUPT | GENCTL_MEMORY_READ_MULTIPLE |
1206 		GENCTL_ONECOPY | GENCTL_RECEIVE_FIFO_THRESHOLD64);
1207 
1208 	/* Mark interface running ... */
1209 	if (ifp->if_flags & IFF_UP) ifp->if_flags |= IFF_RUNNING;
1210 	else ifp->if_flags &= ~IFF_RUNNING;
1211 
1212 	/* ... and free */
1213 	ifp->if_flags &= ~IFF_OACTIVE;
1214 
1215 	/* Start Rx process */
1216 	epic_start_activity(sc);
1217 
1218 	/* Set appropriate media */
1219 	epic_ifmedia_upd(ifp);
1220 
1221 	sc->stat_ch = timeout((timeout_t *)epic_stats_update, sc, hz);
1222 
1223 	splx(s);
1224 
1225 	return 0;
1226 }
1227 
1228 /*
1229  * Synopsis: calculate and set Rx mode. Chip must be in idle state to
1230  * access RXCON.
1231  */
1232 static void
1233 epic_set_rx_mode(sc)
1234 	epic_softc_t *sc;
1235 {
1236 	u_int32_t 		flags = sc->sc_if.if_flags;
1237 	u_int32_t 		rxcon = RXCON_DEFAULT;
1238 
1239 #if defined(EPIC_EARLY_RX)
1240 	rxcon |= RXCON_EARLY_RX;
1241 #endif
1242 
1243 	rxcon |= (flags & IFF_PROMISC) ? RXCON_PROMISCUOUS_MODE : 0;
1244 
1245 	CSR_WRITE_4(sc, RXCON, rxcon);
1246 
1247 	return;
1248 }
1249 
1250 /*
1251  * Synopsis: Set transmit control register. Chip must be in idle state to
1252  * access TXCON.
1253  */
1254 static void
1255 epic_set_tx_mode(sc)
1256 	epic_softc_t *sc;
1257 {
1258 	if (sc->txcon & TXCON_EARLY_TRANSMIT_ENABLE)
1259 		CSR_WRITE_4(sc, ETXTHR, sc->tx_threshold);
1260 
1261 	CSR_WRITE_4(sc, TXCON, sc->txcon);
1262 }
1263 
1264 /*
1265  * Synopsis: Program multicast filter honoring IFF_ALLMULTI and IFF_PROMISC
1266  * flags. (Note, that setting PROMISC bit in EPIC's RXCON will only touch
1267  * individual frames, multicast filter must be manually programmed)
1268  *
1269  * Note: EPIC must be in idle state.
1270  */
1271 static void
1272 epic_set_mc_table(sc)
1273 	epic_softc_t *sc;
1274 {
1275 	struct ifnet *ifp = &sc->sc_if;
1276 	struct ifmultiaddr *ifma;
1277 	u_int16_t filter[4];
1278 	u_int8_t h;
1279 
1280 	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1281 		CSR_WRITE_4(sc, MC0, 0xFFFF);
1282 		CSR_WRITE_4(sc, MC1, 0xFFFF);
1283 		CSR_WRITE_4(sc, MC2, 0xFFFF);
1284 		CSR_WRITE_4(sc, MC3, 0xFFFF);
1285 
1286 		return;
1287 	}
1288 
1289 	filter[0] = 0;
1290 	filter[1] = 0;
1291 	filter[2] = 0;
1292 	filter[3] = 0;
1293 
1294 #if __FreeBSD_version < 500000
1295 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1296 #else
1297 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1298 #endif
1299 		if (ifma->ifma_addr->sa_family != AF_LINK)
1300 			continue;
1301 		h = epic_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1302 		filter[h >> 4] |= 1 << (h & 0xF);
1303 	}
1304 
1305 	CSR_WRITE_4(sc, MC0, filter[0]);
1306 	CSR_WRITE_4(sc, MC1, filter[1]);
1307 	CSR_WRITE_4(sc, MC2, filter[2]);
1308 	CSR_WRITE_4(sc, MC3, filter[3]);
1309 
1310 	return;
1311 }
1312 
1313 /*
1314  * Synopsis: calculate EPIC's hash of multicast address.
1315  */
1316 static u_int8_t
1317 epic_calchash(addr)
1318 	caddr_t addr;
1319 {
1320 	u_int32_t crc, carry;
1321 	int i, j;
1322 	u_int8_t c;
1323 
1324 	/* Compute CRC for the address value. */
1325 	crc = 0xFFFFFFFF; /* initial value */
1326 
1327 	for (i = 0; i < 6; i++) {
1328 		c = *(addr + i);
1329 		for (j = 0; j < 8; j++) {
1330 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
1331 			crc <<= 1;
1332 			c >>= 1;
1333 			if (carry)
1334 				crc = (crc ^ 0x04c11db6) | carry;
1335 		}
1336 	}
1337 
1338 	return ((crc >> 26) & 0x3F);
1339 }
1340 
1341 
1342 /*
1343  * Synopsis: Start receive process and transmit one, if they need.
1344  */
1345 static void
1346 epic_start_activity(sc)
1347 	epic_softc_t *sc;
1348 {
1349 	/* Start rx process */
1350 	CSR_WRITE_4(sc, COMMAND,
1351 		COMMAND_RXQUEUED | COMMAND_START_RX |
1352 		(sc->pending_txs?COMMAND_TXQUEUED:0));
1353 }
1354 
1355 /*
1356  * Synopsis: Completely stop Rx and Tx processes. If TQE is set additional
1357  * packet needs to be queued to stop Tx DMA.
1358  */
1359 static void
1360 epic_stop_activity(sc)
1361 	epic_softc_t *sc;
1362 {
1363 	int status, i;
1364 
1365 	/* Stop Tx and Rx DMA */
1366 	CSR_WRITE_4(sc, COMMAND,
1367 	    COMMAND_STOP_RX | COMMAND_STOP_RDMA | COMMAND_STOP_TDMA);
1368 
1369 	/* Wait Rx and Tx DMA to stop (why 1 ms ??? XXX) */
1370 	for (i=0; i<0x1000; i++) {
1371 		status = CSR_READ_4(sc, INTSTAT) & (INTSTAT_TXIDLE | INTSTAT_RXIDLE);
1372 		if (status == (INTSTAT_TXIDLE | INTSTAT_RXIDLE))
1373 			break;
1374 		DELAY(1);
1375 	}
1376 
1377 	/* Catch all finished packets */
1378 	epic_rx_done(sc);
1379 	epic_tx_done(sc);
1380 
1381 	status = CSR_READ_4(sc, INTSTAT);
1382 
1383 	if ((status & INTSTAT_RXIDLE) == 0)
1384 		device_printf(sc->dev, "ERROR! Can't stop Rx DMA\n");
1385 
1386 	if ((status & INTSTAT_TXIDLE) == 0)
1387 		device_printf(sc->dev, "ERROR! Can't stop Tx DMA\n");
1388 
1389 	/*
1390 	 * May need to queue one more packet if TQE, this is rare
1391 	 * but existing case.
1392 	 */
1393 	if ((status & INTSTAT_TQE) && !(status & INTSTAT_TXIDLE))
1394 		(void) epic_queue_last_packet(sc);
1395 
1396 }
1397 
1398 /*
1399  * The EPIC transmitter may stuck in TQE state. It will not go IDLE until
1400  * a packet from current descriptor will be copied to internal RAM. We
1401  * compose a dummy packet here and queue it for transmission.
1402  *
1403  * XXX the packet will then be actually sent over network...
1404  */
1405 static int
1406 epic_queue_last_packet(sc)
1407 	epic_softc_t *sc;
1408 {
1409 	struct epic_tx_desc *desc;
1410 	struct epic_frag_list *flist;
1411 	struct epic_tx_buffer *buf;
1412 	struct mbuf *m0;
1413 	int i;
1414 
1415 	device_printf(sc->dev, "queue last packet\n");
1416 
1417 	desc = sc->tx_desc + sc->cur_tx;
1418 	flist = sc->tx_flist + sc->cur_tx;
1419 	buf = sc->tx_buffer + sc->cur_tx;
1420 
1421 	if ((desc->status & 0x8000) || (buf->mbuf != NULL))
1422 		return (EBUSY);
1423 
1424 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
1425 	if (NULL == m0)
1426 		return (ENOBUFS);
1427 
1428 	/* Prepare mbuf */
1429 	m0->m_len = min(MHLEN, ETHER_MIN_LEN-ETHER_CRC_LEN);
1430 	flist->frag[0].fraglen = m0->m_len;
1431 	m0->m_pkthdr.len = m0->m_len;
1432 	m0->m_pkthdr.rcvif = &sc->sc_if;
1433 	bzero(mtod(m0,caddr_t), m0->m_len);
1434 
1435 	/* Fill fragments list */
1436 	flist->frag[0].fraglen = m0->m_len;
1437 	flist->frag[0].fragaddr = vtophys(mtod(m0, caddr_t));
1438 	flist->numfrags = 1;
1439 
1440 	/* Fill in descriptor */
1441 	buf->mbuf = m0;
1442 	sc->pending_txs++;
1443 	sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK;
1444 	desc->control = 0x01;
1445 	desc->txlength = max(m0->m_pkthdr.len,ETHER_MIN_LEN-ETHER_CRC_LEN);
1446 	desc->status = 0x8000;
1447 
1448 	/* Launch transmition */
1449 	CSR_WRITE_4(sc, COMMAND, COMMAND_STOP_TDMA | COMMAND_TXQUEUED);
1450 
1451 	/* Wait Tx DMA to stop (for how long??? XXX) */
1452 	for (i=0; i<1000; i++) {
1453 		if (CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE)
1454 			break;
1455 		DELAY(1);
1456 	}
1457 
1458 	if ((CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) == 0)
1459 		device_printf(sc->dev, "ERROR! can't stop Tx DMA (2)\n");
1460 	else
1461 		epic_tx_done(sc);
1462 
1463 	return 0;
1464 }
1465 
1466 /*
1467  *  Synopsis: Shut down board and deallocates rings.
1468  */
1469 static void
1470 epic_stop(sc)
1471 	epic_softc_t *sc;
1472 {
1473 	int s;
1474 
1475 	s = splimp();
1476 
1477 	sc->sc_if.if_timer = 0;
1478 
1479 	untimeout((timeout_t *)epic_stats_update, sc, sc->stat_ch);
1480 
1481 	/* Disable interrupts */
1482 	CSR_WRITE_4(sc, INTMASK, 0);
1483 	CSR_WRITE_4(sc, GENCTL, 0);
1484 
1485 	/* Try to stop Rx and TX processes */
1486 	epic_stop_activity(sc);
1487 
1488 	/* Reset chip */
1489 	CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET);
1490 	DELAY(1000);
1491 
1492 	/* Make chip go to bed */
1493 	CSR_WRITE_4(sc, GENCTL, GENCTL_POWER_DOWN);
1494 
1495 	/* Free memory allocated for rings */
1496 	epic_free_rings(sc);
1497 
1498 	/* Mark as stoped */
1499 	sc->sc_if.if_flags &= ~IFF_RUNNING;
1500 
1501 	splx(s);
1502 	return;
1503 }
1504 
1505 /*
1506  * Synopsis: This function should free all memory allocated for rings.
1507  */
1508 static void
1509 epic_free_rings(sc)
1510 	epic_softc_t *sc;
1511 {
1512 	int i;
1513 
1514 	for (i=0; i<RX_RING_SIZE; i++) {
1515 		struct epic_rx_buffer *buf = sc->rx_buffer + i;
1516 		struct epic_rx_desc *desc = sc->rx_desc + i;
1517 
1518 		desc->status = 0;
1519 		desc->buflength = 0;
1520 		desc->bufaddr = 0;
1521 
1522 		if (buf->mbuf) m_freem(buf->mbuf);
1523 		buf->mbuf = NULL;
1524 	}
1525 
1526 	for (i=0; i<TX_RING_SIZE; i++) {
1527 		struct epic_tx_buffer *buf = sc->tx_buffer + i;
1528 		struct epic_tx_desc *desc = sc->tx_desc + i;
1529 
1530 		desc->status = 0;
1531 		desc->buflength = 0;
1532 		desc->bufaddr = 0;
1533 
1534 		if (buf->mbuf) m_freem(buf->mbuf);
1535 		buf->mbuf = NULL;
1536 	}
1537 }
1538 
1539 /*
1540  * Synopsis:  Allocates mbufs for Rx ring and point Rx descs to them.
1541  * Point Tx descs to fragment lists. Check that all descs and fraglists
1542  * are bounded and aligned properly.
1543  */
1544 static int
1545 epic_init_rings(sc)
1546 	epic_softc_t *sc;
1547 {
1548 	int i;
1549 
1550 	sc->cur_rx = sc->cur_tx = sc->dirty_tx = sc->pending_txs = 0;
1551 
1552 	for (i = 0; i < RX_RING_SIZE; i++) {
1553 		struct epic_rx_buffer *buf = sc->rx_buffer + i;
1554 		struct epic_rx_desc *desc = sc->rx_desc + i;
1555 
1556 		desc->status = 0;		/* Owned by driver */
1557 		desc->next = vtophys(sc->rx_desc + ((i+1) & RX_RING_MASK));
1558 
1559 		if ((desc->next & 3) ||
1560 		    ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
1561 			epic_free_rings(sc);
1562 			return EFAULT;
1563 		}
1564 
1565 		EPIC_MGETCLUSTER(buf->mbuf);
1566 		if (NULL == buf->mbuf) {
1567 			epic_free_rings(sc);
1568 			return ENOBUFS;
1569 		}
1570 		desc->bufaddr = vtophys(mtod(buf->mbuf, caddr_t));
1571 
1572 		desc->buflength = MCLBYTES;	/* Max RX buffer length */
1573 		desc->status = 0x8000;		/* Set owner bit to NIC */
1574 	}
1575 
1576 	for (i = 0; i < TX_RING_SIZE; i++) {
1577 		struct epic_tx_buffer *buf = sc->tx_buffer + i;
1578 		struct epic_tx_desc *desc = sc->tx_desc + i;
1579 
1580 		desc->status = 0;
1581 		desc->next = vtophys(sc->tx_desc + ((i+1) & TX_RING_MASK));
1582 
1583 		if ((desc->next & 3) ||
1584 		    ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) {
1585 			epic_free_rings(sc);
1586 			return EFAULT;
1587 		}
1588 
1589 		buf->mbuf = NULL;
1590 		desc->bufaddr = vtophys(sc->tx_flist + i);
1591 
1592 		if ((desc->bufaddr & 3) ||
1593 		    ((desc->bufaddr & PAGE_MASK) + sizeof(struct epic_frag_list)) > PAGE_SIZE) {
1594 			epic_free_rings(sc);
1595 			return EFAULT;
1596 		}
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 /*
1603  * EEPROM operation functions
1604  */
1605 static void
1606 epic_write_eepromreg(sc, val)
1607 	epic_softc_t *sc;
1608 	u_int8_t val;
1609 {
1610 	u_int16_t i;
1611 
1612 	CSR_WRITE_1(sc, EECTL, val);
1613 
1614 	for (i=0; i<0xFF; i++)
1615 		if ((CSR_READ_1(sc, EECTL) & 0x20) == 0) break;
1616 
1617 	return;
1618 }
1619 
1620 static u_int8_t
1621 epic_read_eepromreg(sc)
1622 	epic_softc_t *sc;
1623 {
1624 	return CSR_READ_1(sc, EECTL);
1625 }
1626 
1627 static u_int8_t
1628 epic_eeprom_clock(sc, val)
1629 	epic_softc_t *sc;
1630 	u_int8_t val;
1631 {
1632 	epic_write_eepromreg(sc, val);
1633 	epic_write_eepromreg(sc, (val | 0x4));
1634 	epic_write_eepromreg(sc, val);
1635 
1636 	return epic_read_eepromreg(sc);
1637 }
1638 
1639 static void
1640 epic_output_eepromw(sc, val)
1641 	epic_softc_t *sc;
1642 	u_int16_t val;
1643 {
1644 	int i;
1645 
1646 	for (i = 0xF; i >= 0; i--) {
1647 		if (val & (1 << i))
1648 			epic_eeprom_clock(sc, 0x0B);
1649 		else
1650 			epic_eeprom_clock(sc, 0x03);
1651 	}
1652 }
1653 
1654 static u_int16_t
1655 epic_input_eepromw(sc)
1656 	epic_softc_t *sc;
1657 {
1658 	u_int16_t retval = 0;
1659 	int i;
1660 
1661 	for (i = 0xF; i >= 0; i--) {
1662 		if (epic_eeprom_clock(sc, 0x3) & 0x10)
1663 			retval |= (1 << i);
1664 	}
1665 
1666 	return retval;
1667 }
1668 
1669 static int
1670 epic_read_eeprom(sc, loc)
1671 	epic_softc_t *sc;
1672 	u_int16_t loc;
1673 {
1674 	u_int16_t dataval;
1675 	u_int16_t read_cmd;
1676 
1677 	epic_write_eepromreg(sc, 3);
1678 
1679 	if (epic_read_eepromreg(sc) & 0x40)
1680 		read_cmd = (loc & 0x3F) | 0x180;
1681 	else
1682 		read_cmd = (loc & 0xFF) | 0x600;
1683 
1684 	epic_output_eepromw(sc, read_cmd);
1685 
1686 	dataval = epic_input_eepromw(sc);
1687 
1688 	epic_write_eepromreg(sc, 1);
1689 
1690 	return dataval;
1691 }
1692 
1693 /*
1694  * Here goes MII read/write routines
1695  */
1696 static int
1697 epic_read_phy_reg(sc, phy, reg)
1698 	epic_softc_t *sc;
1699 	int phy, reg;
1700 {
1701 	int i;
1702 
1703 	CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x01));
1704 
1705 	for (i = 0; i < 0x100; i++) {
1706 		if ((CSR_READ_4(sc, MIICTL) & 0x01) == 0) break;
1707 		DELAY(1);
1708 	}
1709 
1710 	return (CSR_READ_4(sc, MIIDATA));
1711 }
1712 
1713 static void
1714 epic_write_phy_reg(sc, phy, reg, val)
1715 	epic_softc_t *sc;
1716 	int phy, reg, val;
1717 {
1718 	int i;
1719 
1720 	CSR_WRITE_4(sc, MIIDATA, val);
1721 	CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x02));
1722 
1723 	for(i=0;i<0x100;i++) {
1724 		if ((CSR_READ_4(sc, MIICTL) & 0x02) == 0) break;
1725 		DELAY(1);
1726 	}
1727 
1728 	return;
1729 }
1730 
1731 static int
1732 epic_miibus_readreg(dev, phy, reg)
1733 	device_t dev;
1734 	int phy, reg;
1735 {
1736 	epic_softc_t *sc;
1737 
1738 	sc = device_get_softc(dev);
1739 
1740 	return (PHY_READ_2(sc, phy, reg));
1741 }
1742 
1743 static int
1744 epic_miibus_writereg(dev, phy, reg, data)
1745 	device_t dev;
1746 	int phy, reg, data;
1747 {
1748 	epic_softc_t *sc;
1749 
1750 	sc = device_get_softc(dev);
1751 
1752 	PHY_WRITE_2(sc, phy, reg, data);
1753 
1754 	return (0);
1755 }
1756