xref: /dragonfly/sys/dev/netif/sf/if_sf.c (revision 685c703c)
1 /*
2  * Copyright (c) 1997, 1998, 1999
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD: src/sys/pci/if_sf.c,v 1.18.2.8 2001/12/16 15:46:07 luigi Exp $
33  * $DragonFly: src/sys/dev/netif/sf/if_sf.c,v 1.28 2006/08/01 18:08:24 swildner Exp $
34  */
35 
36 /*
37  * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD.
38  * Programming manual is available from:
39  * ftp.adaptec.com:/pub/BBS/userguides/aic6915_pg.pdf.
40  *
41  * Written by Bill Paul <wpaul@ctr.columbia.edu>
42  * Department of Electical Engineering
43  * Columbia University, New York City
44  */
45 
46 /*
47  * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet
48  * controller designed with flexibility and reducing CPU load in mind.
49  * The Starfire offers high and low priority buffer queues, a
50  * producer/consumer index mechanism and several different buffer
51  * queue and completion queue descriptor types. Any one of a number
52  * of different driver designs can be used, depending on system and
53  * OS requirements. This driver makes use of type0 transmit frame
54  * descriptors (since BSD fragments packets across an mbuf chain)
55  * and two RX buffer queues prioritized on size (one queue for small
56  * frames that will fit into a single mbuf, another with full size
57  * mbuf clusters for everything else). The producer/consumer indexes
58  * and completion queues are also used.
59  *
60  * One downside to the Starfire has to do with alignment: buffer
61  * queues must be aligned on 256-byte boundaries, and receive buffers
62  * must be aligned on longword boundaries. The receive buffer alignment
63  * causes problems on the Alpha platform, where the packet payload
64  * should be longword aligned. There is no simple way around this.
65  *
66  * For receive filtering, the Starfire offers 16 perfect filter slots
67  * and a 512-bit hash table.
68  *
69  * The Starfire has no internal transceiver, relying instead on an
70  * external MII-based transceiver. Accessing registers on external
71  * PHYs is done through a special register map rather than with the
72  * usual bitbang MDIO method.
73  *
74  * Acesssing the registers on the Starfire is a little tricky. The
75  * Starfire has a 512K internal register space. When programmed for
76  * PCI memory mapped mode, the entire register space can be accessed
77  * directly. However in I/O space mode, only 256 bytes are directly
78  * mapped into PCI I/O space. The other registers can be accessed
79  * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA
80  * registers inside the 256-byte I/O window.
81  */
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/sockio.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/serialize.h>
91 
92 #include <sys/thread2.h>
93 
94 #include <net/if.h>
95 #include <net/ifq_var.h>
96 #include <net/if_arp.h>
97 #include <net/ethernet.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 
101 #include <net/bpf.h>
102 
103 #include <vm/vm.h>              /* for vtophys */
104 #include <vm/pmap.h>            /* for vtophys */
105 #include <machine/clock.h>      /* for DELAY */
106 #include <machine/bus_pio.h>
107 #include <machine/bus_memio.h>
108 #include <machine/bus.h>
109 #include <machine/resource.h>
110 #include <sys/bus.h>
111 #include <sys/rman.h>
112 
113 #include "../mii_layer/mii.h"
114 #include "../mii_layer/miivar.h"
115 
116 /* "controller miibus0" required.  See GENERIC if you get errors here. */
117 #include "miibus_if.h"
118 
119 #include <bus/pci/pcidevs.h>
120 #include <bus/pci/pcireg.h>
121 #include <bus/pci/pcivar.h>
122 
123 #define SF_USEIOSPACE
124 
125 #include "if_sfreg.h"
126 
127 static struct sf_type sf_devs[] = {
128 	{ PCI_VENDOR_ADP, PCI_PRODUCT_ADP_AIC6915,
129 		"Adaptec AIC-6915 10/100BaseTX" },
130 	{ 0, 0, NULL }
131 };
132 
133 static int sf_probe		(device_t);
134 static int sf_attach		(device_t);
135 static int sf_detach		(device_t);
136 static void sf_intr		(void *);
137 static void sf_stats_update	(void *);
138 static void sf_rxeof		(struct sf_softc *);
139 static void sf_txeof		(struct sf_softc *);
140 static int sf_encap		(struct sf_softc *,
141 					struct sf_tx_bufdesc_type0 *,
142 					struct mbuf *);
143 static void sf_start		(struct ifnet *);
144 static int sf_ioctl		(struct ifnet *, u_long, caddr_t,
145 					struct ucred *);
146 static void sf_init		(void *);
147 static void sf_stop		(struct sf_softc *);
148 static void sf_watchdog		(struct ifnet *);
149 static void sf_shutdown		(device_t);
150 static int sf_ifmedia_upd	(struct ifnet *);
151 static void sf_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
152 static void sf_reset		(struct sf_softc *);
153 static int sf_init_rx_ring	(struct sf_softc *);
154 static void sf_init_tx_ring	(struct sf_softc *);
155 static int sf_newbuf		(struct sf_softc *,
156 					struct sf_rx_bufdesc_type0 *,
157 					struct mbuf *);
158 static void sf_setmulti		(struct sf_softc *);
159 static int sf_setperf		(struct sf_softc *, int, caddr_t);
160 static int sf_sethash		(struct sf_softc *, caddr_t, int);
161 #ifdef notdef
162 static int sf_setvlan		(struct sf_softc *, int, u_int32_t);
163 #endif
164 
165 static u_int8_t sf_read_eeprom	(struct sf_softc *, int);
166 static u_int32_t sf_calchash	(caddr_t);
167 
168 static int sf_miibus_readreg	(device_t, int, int);
169 static int sf_miibus_writereg	(device_t, int, int, int);
170 static void sf_miibus_statchg	(device_t);
171 
172 static u_int32_t csr_read_4	(struct sf_softc *, int);
173 static void csr_write_4		(struct sf_softc *, int, u_int32_t);
174 static void sf_txthresh_adjust	(struct sf_softc *);
175 
176 #ifdef SF_USEIOSPACE
177 #define SF_RES			SYS_RES_IOPORT
178 #define SF_RID			SF_PCI_LOIO
179 #else
180 #define SF_RES			SYS_RES_MEMORY
181 #define SF_RID			SF_PCI_LOMEM
182 #endif
183 
184 static device_method_t sf_methods[] = {
185 	/* Device interface */
186 	DEVMETHOD(device_probe,		sf_probe),
187 	DEVMETHOD(device_attach,	sf_attach),
188 	DEVMETHOD(device_detach,	sf_detach),
189 	DEVMETHOD(device_shutdown,	sf_shutdown),
190 
191 	/* bus interface */
192 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
193 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
194 
195 	/* MII interface */
196 	DEVMETHOD(miibus_readreg,	sf_miibus_readreg),
197 	DEVMETHOD(miibus_writereg,	sf_miibus_writereg),
198 	DEVMETHOD(miibus_statchg,	sf_miibus_statchg),
199 
200 	{ 0, 0 }
201 };
202 
203 static driver_t sf_driver = {
204 	"sf",
205 	sf_methods,
206 	sizeof(struct sf_softc),
207 };
208 
209 static devclass_t sf_devclass;
210 
211 DECLARE_DUMMY_MODULE(if_sf);
212 DRIVER_MODULE(if_sf, pci, sf_driver, sf_devclass, 0, 0);
213 DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0);
214 
215 #define SF_SETBIT(sc, reg, x)	\
216 	csr_write_4(sc, reg, csr_read_4(sc, reg) | x)
217 
218 #define SF_CLRBIT(sc, reg, x)				\
219 	csr_write_4(sc, reg, csr_read_4(sc, reg) & ~x)
220 
221 static u_int32_t
222 csr_read_4(struct sf_softc *sc, int reg)
223 {
224 	u_int32_t		val;
225 
226 #ifdef SF_USEIOSPACE
227 	CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
228 	val = CSR_READ_4(sc, SF_INDIRECTIO_DATA);
229 #else
230 	val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE));
231 #endif
232 
233 	return(val);
234 }
235 
236 static u_int8_t
237 sf_read_eeprom(struct sf_softc *sc, int reg)
238 {
239 	u_int8_t		val;
240 
241 	val = (csr_read_4(sc, SF_EEADDR_BASE +
242 	    (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF;
243 
244 	return(val);
245 }
246 
247 static void
248 csr_write_4(struct sf_softc *sc, int reg, u_int32_t val)
249 {
250 #ifdef SF_USEIOSPACE
251 	CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE);
252 	CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val);
253 #else
254 	CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val);
255 #endif
256 	return;
257 }
258 
259 static u_int32_t
260 sf_calchash(caddr_t addr)
261 {
262 	u_int32_t		crc, carry;
263 	int			i, j;
264 	u_int8_t		c;
265 
266 	/* Compute CRC for the address value. */
267 	crc = 0xFFFFFFFF; /* initial value */
268 
269 	for (i = 0; i < 6; i++) {
270 		c = *(addr + i);
271 		for (j = 0; j < 8; j++) {
272 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
273 			crc <<= 1;
274 			c >>= 1;
275 			if (carry)
276 				crc = (crc ^ 0x04c11db6) | carry;
277 		}
278 	}
279 
280 	/* return the filter bit position */
281 	return(crc >> 23 & 0x1FF);
282 }
283 
284 /*
285  * Copy the address 'mac' into the perfect RX filter entry at
286  * offset 'idx.' The perfect filter only has 16 entries so do
287  * some sanity tests.
288  */
289 static int
290 sf_setperf(struct sf_softc *sc, int idx, caddr_t mac)
291 {
292 	u_int16_t		*p;
293 
294 	if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT)
295 		return(EINVAL);
296 
297 	if (mac == NULL)
298 		return(EINVAL);
299 
300 	p = (u_int16_t *)mac;
301 
302 	csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
303 	    (idx * SF_RXFILT_PERFECT_SKIP), htons(p[2]));
304 	csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
305 	    (idx * SF_RXFILT_PERFECT_SKIP) + 4, htons(p[1]));
306 	csr_write_4(sc, SF_RXFILT_PERFECT_BASE +
307 	    (idx * SF_RXFILT_PERFECT_SKIP) + 8, htons(p[0]));
308 
309 	return(0);
310 }
311 
312 /*
313  * Set the bit in the 512-bit hash table that corresponds to the
314  * specified mac address 'mac.' If 'prio' is nonzero, update the
315  * priority hash table instead of the filter hash table.
316  */
317 static int
318 sf_sethash(struct sf_softc *sc, caddr_t mac, int prio)
319 {
320 	u_int32_t		h = 0;
321 
322 	if (mac == NULL)
323 		return(EINVAL);
324 
325 	h = sf_calchash(mac);
326 
327 	if (prio) {
328 		SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF +
329 		    (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
330 	} else {
331 		SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF +
332 		    (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF)));
333 	}
334 
335 	return(0);
336 }
337 
338 #ifdef notdef
339 /*
340  * Set a VLAN tag in the receive filter.
341  */
342 static int
343 sf_setvlan(struct sf_softc *sc, int idx, u_int32_t vlan)
344 {
345 	if (idx < 0 || idx >> SF_RXFILT_HASH_CNT)
346 		return(EINVAL);
347 
348 	csr_write_4(sc, SF_RXFILT_HASH_BASE +
349 	    (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan);
350 
351 	return(0);
352 }
353 #endif
354 
355 static int
356 sf_miibus_readreg(device_t dev, int phy, int reg)
357 {
358 	struct sf_softc		*sc;
359 	int			i;
360 	u_int32_t		val = 0;
361 
362 	sc = device_get_softc(dev);
363 
364 	for (i = 0; i < SF_TIMEOUT; i++) {
365 		val = csr_read_4(sc, SF_PHY_REG(phy, reg));
366 		if (val & SF_MII_DATAVALID)
367 			break;
368 	}
369 
370 	if (i == SF_TIMEOUT)
371 		return(0);
372 
373 	if ((val & 0x0000FFFF) == 0xFFFF)
374 		return(0);
375 
376 	return(val & 0x0000FFFF);
377 }
378 
379 static int
380 sf_miibus_writereg(device_t dev, int phy, int reg, int val)
381 {
382 	struct sf_softc		*sc;
383 	int			i;
384 	int			busy;
385 
386 	sc = device_get_softc(dev);
387 
388 	csr_write_4(sc, SF_PHY_REG(phy, reg), val);
389 
390 	for (i = 0; i < SF_TIMEOUT; i++) {
391 		busy = csr_read_4(sc, SF_PHY_REG(phy, reg));
392 		if (!(busy & SF_MII_BUSY))
393 			break;
394 	}
395 
396 	return(0);
397 }
398 
399 static void
400 sf_miibus_statchg(device_t dev)
401 {
402 	struct sf_softc		*sc;
403 	struct mii_data		*mii;
404 
405 	sc = device_get_softc(dev);
406 	mii = device_get_softc(sc->sf_miibus);
407 
408 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
409 		SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX);
410 		csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX);
411 	} else {
412 		SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX);
413 		csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX);
414 	}
415 
416 	return;
417 }
418 
419 static void
420 sf_setmulti(struct sf_softc *sc)
421 {
422 	struct ifnet		*ifp;
423 	int			i;
424 	struct ifmultiaddr	*ifma;
425 	u_int8_t		dummy[] = { 0, 0, 0, 0, 0, 0 };
426 
427 	ifp = &sc->arpcom.ac_if;
428 
429 	/* First zot all the existing filters. */
430 	for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++)
431 		sf_setperf(sc, i, (char *)&dummy);
432 	for (i = SF_RXFILT_HASH_BASE;
433 	    i < (SF_RXFILT_HASH_MAX + 1); i += 4)
434 		csr_write_4(sc, i, 0);
435 	SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI);
436 
437 	/* Now program new ones. */
438 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
439 		SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI);
440 	} else {
441 		i = 1;
442 		/* First find the tail of the list. */
443 		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
444 					ifma = ifma->ifma_link.le_next) {
445 			if (ifma->ifma_link.le_next == NULL)
446 				break;
447 		}
448 		/* Now traverse the list backwards. */
449 		for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
450 			ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
451 			if (ifma->ifma_addr->sa_family != AF_LINK)
452 				continue;
453 			/*
454 			 * Program the first 15 multicast groups
455 			 * into the perfect filter. For all others,
456 			 * use the hash table.
457 			 */
458 			if (i < SF_RXFILT_PERFECT_CNT) {
459 				sf_setperf(sc, i,
460 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
461 				i++;
462 				continue;
463 			}
464 
465 			sf_sethash(sc,
466 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0);
467 		}
468 	}
469 
470 	return;
471 }
472 
473 /*
474  * Set media options.
475  */
476 static int
477 sf_ifmedia_upd(struct ifnet *ifp)
478 {
479 	struct sf_softc		*sc;
480 	struct mii_data		*mii;
481 
482 	sc = ifp->if_softc;
483 	mii = device_get_softc(sc->sf_miibus);
484 	sc->sf_link = 0;
485 	if (mii->mii_instance) {
486 		struct mii_softc        *miisc;
487 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
488 		    miisc = LIST_NEXT(miisc, mii_list))
489 			mii_phy_reset(miisc);
490 	}
491 	mii_mediachg(mii);
492 
493 	return(0);
494 }
495 
496 /*
497  * Report current media status.
498  */
499 static void
500 sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
501 {
502 	struct sf_softc		*sc;
503 	struct mii_data		*mii;
504 
505 	sc = ifp->if_softc;
506 	mii = device_get_softc(sc->sf_miibus);
507 
508 	mii_pollstat(mii);
509 	ifmr->ifm_active = mii->mii_media_active;
510 	ifmr->ifm_status = mii->mii_media_status;
511 
512 	return;
513 }
514 
515 static int
516 sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
517 {
518 	struct sf_softc		*sc = ifp->if_softc;
519 	struct ifreq		*ifr = (struct ifreq *) data;
520 	struct mii_data		*mii;
521 	int error = 0;
522 
523 	switch(command) {
524 	case SIOCSIFFLAGS:
525 		if (ifp->if_flags & IFF_UP) {
526 			if (ifp->if_flags & IFF_RUNNING &&
527 			    ifp->if_flags & IFF_PROMISC &&
528 			    !(sc->sf_if_flags & IFF_PROMISC)) {
529 				SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC);
530 			} else if (ifp->if_flags & IFF_RUNNING &&
531 			    !(ifp->if_flags & IFF_PROMISC) &&
532 			    sc->sf_if_flags & IFF_PROMISC) {
533 				SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC);
534 			} else if (!(ifp->if_flags & IFF_RUNNING))
535 				sf_init(sc);
536 		} else {
537 			if (ifp->if_flags & IFF_RUNNING)
538 				sf_stop(sc);
539 		}
540 		sc->sf_if_flags = ifp->if_flags;
541 		error = 0;
542 		break;
543 	case SIOCADDMULTI:
544 	case SIOCDELMULTI:
545 		sf_setmulti(sc);
546 		error = 0;
547 		break;
548 	case SIOCGIFMEDIA:
549 	case SIOCSIFMEDIA:
550 		mii = device_get_softc(sc->sf_miibus);
551 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
552 		break;
553 	default:
554 		error = ether_ioctl(ifp, command, data);
555 		break;
556 	}
557 
558 	return(error);
559 }
560 
561 static void
562 sf_reset(struct sf_softc *sc)
563 {
564 	int		i;
565 
566 	csr_write_4(sc, SF_GEN_ETH_CTL, 0);
567 	SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
568 	DELAY(1000);
569 	SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET);
570 
571 	SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET);
572 
573 	for (i = 0; i < SF_TIMEOUT; i++) {
574 		DELAY(10);
575 		if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET))
576 			break;
577 	}
578 
579 	if (i == SF_TIMEOUT)
580 		printf("sf%d: reset never completed!\n", sc->sf_unit);
581 
582 	/* Wait a little while for the chip to get its brains in order. */
583 	DELAY(1000);
584 	return;
585 }
586 
587 /*
588  * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device
589  * IDs against our list and return a device name if we find a match.
590  * We also check the subsystem ID so that we can identify exactly which
591  * NIC has been found, if possible.
592  */
593 static int
594 sf_probe(device_t dev)
595 {
596 	struct sf_type		*t;
597 
598 	t = sf_devs;
599 
600 	while(t->sf_name != NULL) {
601 		if ((pci_get_vendor(dev) == t->sf_vid) &&
602 		    (pci_get_device(dev) == t->sf_did)) {
603 			switch((pci_read_config(dev,
604 			    SF_PCI_SUBVEN_ID, 4) >> 16) & 0xFFFF) {
605 			case AD_SUBSYSID_62011_REV0:
606 			case AD_SUBSYSID_62011_REV1:
607 				device_set_desc(dev,
608 				    "Adaptec ANA-62011 10/100BaseTX");
609 				return(0);
610 				break;
611 			case AD_SUBSYSID_62022:
612 				device_set_desc(dev,
613 				    "Adaptec ANA-62022 10/100BaseTX");
614 				return(0);
615 				break;
616 			case AD_SUBSYSID_62044_REV0:
617 			case AD_SUBSYSID_62044_REV1:
618 				device_set_desc(dev,
619 				    "Adaptec ANA-62044 10/100BaseTX");
620 				return(0);
621 				break;
622 			case AD_SUBSYSID_62020:
623 				device_set_desc(dev,
624 				    "Adaptec ANA-62020 10/100BaseFX");
625 				return(0);
626 				break;
627 			case AD_SUBSYSID_69011:
628 				device_set_desc(dev,
629 				    "Adaptec ANA-69011 10/100BaseTX");
630 				return(0);
631 				break;
632 			default:
633 				device_set_desc(dev, t->sf_name);
634 				return(0);
635 				break;
636 			}
637 		}
638 		t++;
639 	}
640 
641 	return(ENXIO);
642 }
643 
644 /*
645  * Attach the interface. Allocate softc structures, do ifmedia
646  * setup and ethernet/BPF attach.
647  */
648 static int
649 sf_attach(device_t dev)
650 {
651 	int			i;
652 	u_int32_t		command;
653 	struct sf_softc		*sc;
654 	struct ifnet		*ifp;
655 	int			unit, rid, error = 0;
656 
657 	sc = device_get_softc(dev);
658 	unit = device_get_unit(dev);
659 
660 	/*
661 	 * Handle power management nonsense.
662 	 */
663 	command = pci_read_config(dev, SF_PCI_CAPID, 4) & 0x000000FF;
664 	if (command == 0x01) {
665 
666 		command = pci_read_config(dev, SF_PCI_PWRMGMTCTRL, 4);
667 		if (command & SF_PSTATE_MASK) {
668 			u_int32_t		iobase, membase, irq;
669 
670 			/* Save important PCI config data. */
671 			iobase = pci_read_config(dev, SF_PCI_LOIO, 4);
672 			membase = pci_read_config(dev, SF_PCI_LOMEM, 4);
673 			irq = pci_read_config(dev, SF_PCI_INTLINE, 4);
674 
675 			/* Reset the power state. */
676 			printf("sf%d: chip is in D%d power mode "
677 			"-- setting to D0\n", unit, command & SF_PSTATE_MASK);
678 			command &= 0xFFFFFFFC;
679 			pci_write_config(dev, SF_PCI_PWRMGMTCTRL, command, 4);
680 
681 			/* Restore PCI config data. */
682 			pci_write_config(dev, SF_PCI_LOIO, iobase, 4);
683 			pci_write_config(dev, SF_PCI_LOMEM, membase, 4);
684 			pci_write_config(dev, SF_PCI_INTLINE, irq, 4);
685 		}
686 	}
687 
688 	/*
689 	 * Map control/status registers.
690 	 */
691 	command = pci_read_config(dev, PCIR_COMMAND, 4);
692 	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
693 	pci_write_config(dev, PCIR_COMMAND, command, 4);
694 	command = pci_read_config(dev, PCIR_COMMAND, 4);
695 
696 #ifdef SF_USEIOSPACE
697 	if (!(command & PCIM_CMD_PORTEN)) {
698 		printf("sf%d: failed to enable I/O ports!\n", unit);
699 		error = ENXIO;
700 		return(error);
701 	}
702 #else
703 	if (!(command & PCIM_CMD_MEMEN)) {
704 		printf("sf%d: failed to enable memory mapping!\n", unit);
705 		error = ENXIO;
706 		return(error);
707 	}
708 #endif
709 
710 	rid = SF_RID;
711 	sc->sf_res = bus_alloc_resource_any(dev, SF_RES, &rid, RF_ACTIVE);
712 
713 	if (sc->sf_res == NULL) {
714 		printf ("sf%d: couldn't map ports\n", unit);
715 		error = ENXIO;
716 		return(error);
717 	}
718 
719 	sc->sf_btag = rman_get_bustag(sc->sf_res);
720 	sc->sf_bhandle = rman_get_bushandle(sc->sf_res);
721 
722 	/* Allocate interrupt */
723 	rid = 0;
724 	sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
725 	    RF_SHAREABLE | RF_ACTIVE);
726 
727 	if (sc->sf_irq == NULL) {
728 		printf("sf%d: couldn't map interrupt\n", unit);
729 		error = ENXIO;
730 		goto fail;
731 	}
732 
733 	callout_init(&sc->sf_stat_timer);
734 
735 	/* Reset the adapter. */
736 	sf_reset(sc);
737 
738 	/*
739 	 * Get station address from the EEPROM.
740 	 */
741 	for (i = 0; i < ETHER_ADDR_LEN; i++)
742 		sc->arpcom.ac_enaddr[i] =
743 		    sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i);
744 
745 	sc->sf_unit = unit;
746 
747 	/* Allocate the descriptor queues. */
748 	sc->sf_ldata = contigmalloc(sizeof(struct sf_list_data), M_DEVBUF,
749 	    M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
750 
751 	if (sc->sf_ldata == NULL) {
752 		printf("sf%d: no memory for list buffers!\n", unit);
753 		error = ENXIO;
754 		goto fail;
755 	}
756 
757 	bzero(sc->sf_ldata, sizeof(struct sf_list_data));
758 
759 	/* Do MII setup. */
760 	if (mii_phy_probe(dev, &sc->sf_miibus,
761 	    sf_ifmedia_upd, sf_ifmedia_sts)) {
762 		printf("sf%d: MII without any phy!\n", sc->sf_unit);
763 		error = ENXIO;
764 		goto fail;
765 	}
766 
767 	ifp = &sc->arpcom.ac_if;
768 	ifp->if_softc = sc;
769 	if_initname(ifp, "sf", unit);
770 	ifp->if_mtu = ETHERMTU;
771 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
772 	ifp->if_ioctl = sf_ioctl;
773 	ifp->if_start = sf_start;
774 	ifp->if_watchdog = sf_watchdog;
775 	ifp->if_init = sf_init;
776 	ifp->if_baudrate = 10000000;
777 	ifq_set_maxlen(&ifp->if_snd, SF_TX_DLIST_CNT - 1);
778 	ifq_set_ready(&ifp->if_snd);
779 
780 	/*
781 	 * Call MI attach routine.
782 	 */
783 	ether_ifattach(ifp, sc->arpcom.ac_enaddr, NULL);
784 
785 	error = bus_setup_intr(dev, sc->sf_irq, INTR_NETSAFE,
786 			       sf_intr, sc, &sc->sf_intrhand,
787 			       ifp->if_serializer);
788 
789 	if (error) {
790 		ether_ifdetach(ifp);
791 		device_printf(dev, "couldn't set up irq\n");
792 		goto fail;
793 	}
794 
795 	return(0);
796 
797 fail:
798 	sf_detach(dev);
799 	return(error);
800 }
801 
802 static int
803 sf_detach(device_t dev)
804 {
805 	struct sf_softc *sc = device_get_softc(dev);
806 	struct ifnet *ifp = &sc->arpcom.ac_if;
807 
808 	if (device_is_attached(dev)) {
809 		lwkt_serialize_enter(ifp->if_serializer);
810 		sf_stop(sc);
811 		bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand);
812 		lwkt_serialize_exit(ifp->if_serializer);
813 
814 		ether_ifdetach(ifp);
815 	}
816 
817 	if (sc->sf_miibus)
818 		device_delete_child(dev, sc->sf_miibus);
819 	bus_generic_detach(dev);
820 
821 	if (sc->sf_irq)
822 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq);
823 	if(sc->sf_res)
824 		bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res);
825 
826 	if (sc->sf_ldata) {
827 		contigfree(sc->sf_ldata, sizeof(struct sf_list_data),
828 			   M_DEVBUF);
829 	}
830 
831 	return(0);
832 }
833 
834 static int
835 sf_init_rx_ring(struct sf_softc *sc)
836 {
837 	struct sf_list_data	*ld;
838 	int			i;
839 
840 	ld = sc->sf_ldata;
841 
842 	bzero((char *)ld->sf_rx_dlist_big,
843 	    sizeof(struct sf_rx_bufdesc_type0) * SF_RX_DLIST_CNT);
844 	bzero((char *)ld->sf_rx_clist,
845 	    sizeof(struct sf_rx_cmpdesc_type3) * SF_RX_CLIST_CNT);
846 
847 	for (i = 0; i < SF_RX_DLIST_CNT; i++) {
848 		if (sf_newbuf(sc, &ld->sf_rx_dlist_big[i], NULL) == ENOBUFS)
849 			return(ENOBUFS);
850 	}
851 
852 	return(0);
853 }
854 
855 static void
856 sf_init_tx_ring(struct sf_softc *sc)
857 {
858 	struct sf_list_data	*ld;
859 	int			i;
860 
861 	ld = sc->sf_ldata;
862 
863 	bzero((char *)ld->sf_tx_dlist,
864 	    sizeof(struct sf_tx_bufdesc_type0) * SF_TX_DLIST_CNT);
865 	bzero((char *)ld->sf_tx_clist,
866 	    sizeof(struct sf_tx_cmpdesc_type0) * SF_TX_CLIST_CNT);
867 
868 	for (i = 0; i < SF_TX_DLIST_CNT; i++)
869 		ld->sf_tx_dlist[i].sf_id = SF_TX_BUFDESC_ID;
870 	for (i = 0; i < SF_TX_CLIST_CNT; i++)
871 		ld->sf_tx_clist[i].sf_type = SF_TXCMPTYPE_TX;
872 
873 	ld->sf_tx_dlist[SF_TX_DLIST_CNT - 1].sf_end = 1;
874 	sc->sf_tx_cnt = 0;
875 
876 	return;
877 }
878 
879 static int
880 sf_newbuf(struct sf_softc *sc, struct sf_rx_bufdesc_type0 *c,
881 	  struct mbuf *m)
882 {
883 	struct mbuf		*m_new = NULL;
884 
885 	if (m == NULL) {
886 		MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
887 		if (m_new == NULL)
888 			return(ENOBUFS);
889 
890 		MCLGET(m_new, MB_DONTWAIT);
891 		if (!(m_new->m_flags & M_EXT)) {
892 			m_freem(m_new);
893 			return(ENOBUFS);
894 		}
895 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
896 	} else {
897 		m_new = m;
898 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
899 		m_new->m_data = m_new->m_ext.ext_buf;
900 	}
901 
902 	m_adj(m_new, sizeof(u_int64_t));
903 
904 	c->sf_mbuf = m_new;
905 	c->sf_addrlo = SF_RX_HOSTADDR(vtophys(mtod(m_new, caddr_t)));
906 	c->sf_valid = 1;
907 
908 	return(0);
909 }
910 
911 /*
912  * The starfire is programmed to use 'normal' mode for packet reception,
913  * which means we use the consumer/producer model for both the buffer
914  * descriptor queue and the completion descriptor queue. The only problem
915  * with this is that it involves a lot of register accesses: we have to
916  * read the RX completion consumer and producer indexes and the RX buffer
917  * producer index, plus the RX completion consumer and RX buffer producer
918  * indexes have to be updated. It would have been easier if Adaptec had
919  * put each index in a separate register, especially given that the damn
920  * NIC has a 512K register space.
921  *
922  * In spite of all the lovely features that Adaptec crammed into the 6915,
923  * it is marred by one truly stupid design flaw, which is that receive
924  * buffer addresses must be aligned on a longword boundary. This forces
925  * the packet payload to be unaligned, which is suboptimal on the x86 and
926  * completely unuseable on the Alpha. Our only recourse is to copy received
927  * packets into properly aligned buffers before handing them off.
928  */
929 
930 static void
931 sf_rxeof(struct sf_softc *sc)
932 {
933 	struct mbuf		*m;
934 	struct ifnet		*ifp;
935 	struct sf_rx_bufdesc_type0	*desc;
936 	struct sf_rx_cmpdesc_type3	*cur_rx;
937 	u_int32_t		rxcons, rxprod;
938 	int			cmpprodidx, cmpconsidx, bufprodidx;
939 
940 	ifp = &sc->arpcom.ac_if;
941 
942 	rxcons = csr_read_4(sc, SF_CQ_CONSIDX);
943 	rxprod = csr_read_4(sc, SF_RXDQ_PTR_Q1);
944 	cmpprodidx = SF_IDX_LO(csr_read_4(sc, SF_CQ_PRODIDX));
945 	cmpconsidx = SF_IDX_LO(rxcons);
946 	bufprodidx = SF_IDX_LO(rxprod);
947 
948 	while (cmpconsidx != cmpprodidx) {
949 		struct mbuf		*m0;
950 
951 		cur_rx = &sc->sf_ldata->sf_rx_clist[cmpconsidx];
952 		desc = &sc->sf_ldata->sf_rx_dlist_big[cur_rx->sf_endidx];
953 		m = desc->sf_mbuf;
954 		SF_INC(cmpconsidx, SF_RX_CLIST_CNT);
955 		SF_INC(bufprodidx, SF_RX_DLIST_CNT);
956 
957 		if (!(cur_rx->sf_status1 & SF_RXSTAT1_OK)) {
958 			ifp->if_ierrors++;
959 			sf_newbuf(sc, desc, m);
960 			continue;
961 		}
962 
963 		m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
964 		    cur_rx->sf_len + ETHER_ALIGN, 0, ifp, NULL);
965 		sf_newbuf(sc, desc, m);
966 		if (m0 == NULL) {
967 			ifp->if_ierrors++;
968 			continue;
969 		}
970 		m_adj(m0, ETHER_ALIGN);
971 		m = m0;
972 
973 		ifp->if_ipackets++;
974 
975 		ifp->if_input(ifp, m);
976 	}
977 
978 	csr_write_4(sc, SF_CQ_CONSIDX,
979 	    (rxcons & ~SF_CQ_CONSIDX_RXQ1) | cmpconsidx);
980 	csr_write_4(sc, SF_RXDQ_PTR_Q1,
981 	    (rxprod & ~SF_RXDQ_PRODIDX) | bufprodidx);
982 
983 	return;
984 }
985 
986 /*
987  * Read the transmit status from the completion queue and release
988  * mbufs. Note that the buffer descriptor index in the completion
989  * descriptor is an offset from the start of the transmit buffer
990  * descriptor list in bytes. This is important because the manual
991  * gives the impression that it should match the producer/consumer
992  * index, which is the offset in 8 byte blocks.
993  */
994 static void
995 sf_txeof(struct sf_softc *sc)
996 {
997 	int			txcons, cmpprodidx, cmpconsidx;
998 	struct sf_tx_cmpdesc_type1 *cur_cmp;
999 	struct sf_tx_bufdesc_type0 *cur_tx;
1000 	struct ifnet		*ifp;
1001 
1002 	ifp = &sc->arpcom.ac_if;
1003 
1004 	txcons = csr_read_4(sc, SF_CQ_CONSIDX);
1005 	cmpprodidx = SF_IDX_HI(csr_read_4(sc, SF_CQ_PRODIDX));
1006 	cmpconsidx = SF_IDX_HI(txcons);
1007 
1008 	while (cmpconsidx != cmpprodidx) {
1009 		cur_cmp = &sc->sf_ldata->sf_tx_clist[cmpconsidx];
1010 		cur_tx = &sc->sf_ldata->sf_tx_dlist[cur_cmp->sf_index >> 7];
1011 
1012 		if (cur_cmp->sf_txstat & SF_TXSTAT_TX_OK)
1013 			ifp->if_opackets++;
1014 		else {
1015 			if (cur_cmp->sf_txstat & SF_TXSTAT_TX_UNDERRUN)
1016 				sf_txthresh_adjust(sc);
1017 			ifp->if_oerrors++;
1018 		}
1019 
1020 		sc->sf_tx_cnt--;
1021 		if (cur_tx->sf_mbuf != NULL) {
1022 			m_freem(cur_tx->sf_mbuf);
1023 			cur_tx->sf_mbuf = NULL;
1024 		} else
1025 			break;
1026 		SF_INC(cmpconsidx, SF_TX_CLIST_CNT);
1027 	}
1028 
1029 	ifp->if_timer = 0;
1030 	ifp->if_flags &= ~IFF_OACTIVE;
1031 
1032 	csr_write_4(sc, SF_CQ_CONSIDX,
1033 	    (txcons & ~SF_CQ_CONSIDX_TXQ) |
1034 	    ((cmpconsidx << 16) & 0xFFFF0000));
1035 
1036 	return;
1037 }
1038 
1039 static void
1040 sf_txthresh_adjust(struct sf_softc *sc)
1041 {
1042 	u_int32_t		txfctl;
1043 	u_int8_t		txthresh;
1044 
1045 	txfctl = csr_read_4(sc, SF_TX_FRAMCTL);
1046 	txthresh = txfctl & SF_TXFRMCTL_TXTHRESH;
1047 	if (txthresh < 0xFF) {
1048 		txthresh++;
1049 		txfctl &= ~SF_TXFRMCTL_TXTHRESH;
1050 		txfctl |= txthresh;
1051 #ifdef DIAGNOSTIC
1052 		printf("sf%d: tx underrun, increasing "
1053 		    "tx threshold to %d bytes\n",
1054 		    sc->sf_unit, txthresh * 4);
1055 #endif
1056 		csr_write_4(sc, SF_TX_FRAMCTL, txfctl);
1057 	}
1058 
1059 	return;
1060 }
1061 
1062 static void
1063 sf_intr(void *arg)
1064 {
1065 	struct sf_softc		*sc;
1066 	struct ifnet		*ifp;
1067 	u_int32_t		status;
1068 
1069 	sc = arg;
1070 	ifp = &sc->arpcom.ac_if;
1071 
1072 	if (!(csr_read_4(sc, SF_ISR_SHADOW) & SF_ISR_PCIINT_ASSERTED))
1073 		return;
1074 
1075 	/* Disable interrupts. */
1076 	csr_write_4(sc, SF_IMR, 0x00000000);
1077 
1078 	for (;;) {
1079 		status = csr_read_4(sc, SF_ISR);
1080 		if (status)
1081 			csr_write_4(sc, SF_ISR, status);
1082 
1083 		if (!(status & SF_INTRS))
1084 			break;
1085 
1086 		if (status & SF_ISR_RXDQ1_DMADONE)
1087 			sf_rxeof(sc);
1088 
1089 		if (status & SF_ISR_TX_TXDONE ||
1090 		    status & SF_ISR_TX_DMADONE ||
1091 		    status & SF_ISR_TX_QUEUEDONE)
1092 			sf_txeof(sc);
1093 
1094 		if (status & SF_ISR_TX_LOFIFO)
1095 			sf_txthresh_adjust(sc);
1096 
1097 		if (status & SF_ISR_ABNORMALINTR) {
1098 			if (status & SF_ISR_STATSOFLOW) {
1099 				callout_stop(&sc->sf_stat_timer);
1100 				sf_stats_update(sc);
1101 			} else
1102 				sf_init(sc);
1103 		}
1104 	}
1105 
1106 	/* Re-enable interrupts. */
1107 	csr_write_4(sc, SF_IMR, SF_INTRS);
1108 
1109 	if (!ifq_is_empty(&ifp->if_snd))
1110 		sf_start(ifp);
1111 
1112 	return;
1113 }
1114 
1115 static void
1116 sf_init(void *xsc)
1117 {
1118 	struct sf_softc *sc = xsc;
1119 	struct ifnet *ifp = &sc->arpcom.ac_if;
1120 	int i;
1121 
1122 	sf_stop(sc);
1123 	sf_reset(sc);
1124 
1125 	/* Init all the receive filter registers */
1126 	for (i = SF_RXFILT_PERFECT_BASE;
1127 	    i < (SF_RXFILT_HASH_MAX + 1); i += 4)
1128 		csr_write_4(sc, i, 0);
1129 
1130 	/* Empty stats counter registers. */
1131 	for (i = 0; i < sizeof(struct sf_stats)/sizeof(u_int32_t); i++)
1132 		csr_write_4(sc, SF_STATS_BASE +
1133 		    (i + sizeof(u_int32_t)), 0);
1134 
1135 	/* Init our MAC address */
1136 	csr_write_4(sc, SF_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
1137 	csr_write_4(sc, SF_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
1138 	sf_setperf(sc, 0, (caddr_t)&sc->arpcom.ac_enaddr);
1139 
1140 	if (sf_init_rx_ring(sc) == ENOBUFS) {
1141 		printf("sf%d: initialization failed: no "
1142 		    "memory for rx buffers\n", sc->sf_unit);
1143 		return;
1144 	}
1145 
1146 	sf_init_tx_ring(sc);
1147 
1148 	csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL|SF_HASHMODE_WITHVLAN);
1149 
1150 	/* If we want promiscuous mode, set the allframes bit. */
1151 	if (ifp->if_flags & IFF_PROMISC) {
1152 		SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC);
1153 	} else {
1154 		SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC);
1155 	}
1156 
1157 	if (ifp->if_flags & IFF_BROADCAST) {
1158 		SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_BROAD);
1159 	} else {
1160 		SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_BROAD);
1161 	}
1162 
1163 	/*
1164 	 * Load the multicast filter.
1165 	 */
1166 	sf_setmulti(sc);
1167 
1168 	/* Init the completion queue indexes */
1169 	csr_write_4(sc, SF_CQ_CONSIDX, 0);
1170 	csr_write_4(sc, SF_CQ_PRODIDX, 0);
1171 
1172 	/* Init the RX completion queue */
1173 	csr_write_4(sc, SF_RXCQ_CTL_1,
1174 	    vtophys(sc->sf_ldata->sf_rx_clist) & SF_RXCQ_ADDR);
1175 	SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_3);
1176 
1177 	/* Init RX DMA control. */
1178 	SF_SETBIT(sc, SF_RXDMA_CTL, SF_RXDMA_REPORTBADPKTS);
1179 
1180 	/* Init the RX buffer descriptor queue. */
1181 	csr_write_4(sc, SF_RXDQ_ADDR_Q1,
1182 	    vtophys(sc->sf_ldata->sf_rx_dlist_big));
1183 	csr_write_4(sc, SF_RXDQ_CTL_1, (MCLBYTES << 16) | SF_DESCSPACE_16BYTES);
1184 	csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1);
1185 
1186 	/* Init the TX completion queue */
1187 	csr_write_4(sc, SF_TXCQ_CTL,
1188 	    vtophys(sc->sf_ldata->sf_tx_clist) & SF_RXCQ_ADDR);
1189 
1190 	/* Init the TX buffer descriptor queue. */
1191 	csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO,
1192 		vtophys(sc->sf_ldata->sf_tx_dlist));
1193 	SF_SETBIT(sc, SF_TX_FRAMCTL, SF_TXFRMCTL_CPLAFTERTX);
1194 	csr_write_4(sc, SF_TXDQ_CTL,
1195 	    SF_TXBUFDESC_TYPE0|SF_TXMINSPACE_128BYTES|SF_TXSKIPLEN_8BYTES);
1196 	SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_NODMACMP);
1197 
1198 	/* Enable autopadding of short TX frames. */
1199 	SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD);
1200 
1201 	/* Enable interrupts. */
1202 	csr_write_4(sc, SF_IMR, SF_INTRS);
1203 	SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB);
1204 
1205 	/* Enable the RX and TX engines. */
1206 	SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RX_ENB|SF_ETHCTL_RXDMA_ENB);
1207 	SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TX_ENB|SF_ETHCTL_TXDMA_ENB);
1208 
1209 	/*mii_mediachg(mii);*/
1210 	sf_ifmedia_upd(ifp);
1211 
1212 	ifp->if_flags |= IFF_RUNNING;
1213 	ifp->if_flags &= ~IFF_OACTIVE;
1214 
1215 	callout_reset(&sc->sf_stat_timer, hz, sf_stats_update, sc);
1216 }
1217 
1218 static int
1219 sf_encap(struct sf_softc *sc, struct sf_tx_bufdesc_type0 *c,
1220 	 struct mbuf *m_head)
1221 {
1222 	int			frag = 0;
1223 	struct sf_frag		*f = NULL;
1224 	struct mbuf		*m;
1225 
1226 	m = m_head;
1227 
1228 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1229 		if (m->m_len != 0) {
1230 			if (frag == SF_MAXFRAGS)
1231 				break;
1232 			f = &c->sf_frags[frag];
1233 			if (frag == 0)
1234 				f->sf_pktlen = m_head->m_pkthdr.len;
1235 			f->sf_fraglen = m->m_len;
1236 			f->sf_addr = vtophys(mtod(m, vm_offset_t));
1237 			frag++;
1238 		}
1239 	}
1240 
1241 	if (m != NULL) {
1242 		struct mbuf		*m_new = NULL;
1243 
1244 		MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
1245 		if (m_new == NULL) {
1246 			printf("sf%d: no memory for tx list", sc->sf_unit);
1247 			return(1);
1248 		}
1249 
1250 		if (m_head->m_pkthdr.len > MHLEN) {
1251 			MCLGET(m_new, MB_DONTWAIT);
1252 			if (!(m_new->m_flags & M_EXT)) {
1253 				m_freem(m_new);
1254 				printf("sf%d: no memory for tx list",
1255 				    sc->sf_unit);
1256 				return(1);
1257 			}
1258 		}
1259 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1260 		    mtod(m_new, caddr_t));
1261 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1262 		m_freem(m_head);
1263 		m_head = m_new;
1264 		f = &c->sf_frags[0];
1265 		f->sf_fraglen = f->sf_pktlen = m_head->m_pkthdr.len;
1266 		f->sf_addr = vtophys(mtod(m_head, caddr_t));
1267 		frag = 1;
1268 	}
1269 
1270 	c->sf_mbuf = m_head;
1271 	c->sf_id = SF_TX_BUFDESC_ID;
1272 	c->sf_fragcnt = frag;
1273 	c->sf_intr = 1;
1274 	c->sf_caltcp = 0;
1275 	c->sf_crcen = 1;
1276 
1277 	return(0);
1278 }
1279 
1280 static void
1281 sf_start(struct ifnet *ifp)
1282 {
1283 	struct sf_softc		*sc;
1284 	struct sf_tx_bufdesc_type0 *cur_tx = NULL;
1285 	struct mbuf		*m_head = NULL;
1286 	int			i, txprod;
1287 
1288 	sc = ifp->if_softc;
1289 
1290 	if (!sc->sf_link)
1291 		return;
1292 
1293 	if (ifp->if_flags & IFF_OACTIVE)
1294 		return;
1295 
1296 	txprod = csr_read_4(sc, SF_TXDQ_PRODIDX);
1297 	i = SF_IDX_HI(txprod) >> 4;
1298 
1299 	if (sc->sf_ldata->sf_tx_dlist[i].sf_mbuf != NULL) {
1300 		printf("sf%d: TX ring full, resetting\n", sc->sf_unit);
1301 		sf_init(sc);
1302 		txprod = csr_read_4(sc, SF_TXDQ_PRODIDX);
1303 		i = SF_IDX_HI(txprod) >> 4;
1304 	}
1305 
1306 	while(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf == NULL) {
1307 		if (sc->sf_tx_cnt >= (SF_TX_DLIST_CNT - 5)) {
1308 			ifp->if_flags |= IFF_OACTIVE;
1309 			cur_tx = NULL;
1310 			break;
1311 		}
1312 		m_head = ifq_poll(&ifp->if_snd);
1313 		if (m_head == NULL)
1314 			break;
1315 
1316 		cur_tx = &sc->sf_ldata->sf_tx_dlist[i];
1317 		if (sf_encap(sc, cur_tx, m_head)) {
1318 			ifp->if_flags |= IFF_OACTIVE;
1319 			cur_tx = NULL;
1320 			break;
1321 		}
1322 		ifq_dequeue(&ifp->if_snd, m_head);
1323 		BPF_MTAP(ifp, cur_tx->sf_mbuf);
1324 
1325 		SF_INC(i, SF_TX_DLIST_CNT);
1326 		sc->sf_tx_cnt++;
1327 		/*
1328 		 * Don't get the TX DMA queue get too full.
1329 		 */
1330 		if (sc->sf_tx_cnt > 64)
1331 			break;
1332 	}
1333 
1334 	if (cur_tx == NULL)
1335 		return;
1336 
1337 	/* Transmit */
1338 	csr_write_4(sc, SF_TXDQ_PRODIDX,
1339 	    (txprod & ~SF_TXDQ_PRODIDX_HIPRIO) |
1340 	    ((i << 20) & 0xFFFF0000));
1341 
1342 	ifp->if_timer = 5;
1343 
1344 	return;
1345 }
1346 
1347 static void
1348 sf_stop(struct sf_softc *sc)
1349 {
1350 	int			i;
1351 	struct ifnet		*ifp;
1352 
1353 	ifp = &sc->arpcom.ac_if;
1354 
1355 	callout_stop(&sc->sf_stat_timer);
1356 
1357 	csr_write_4(sc, SF_GEN_ETH_CTL, 0);
1358 	csr_write_4(sc, SF_CQ_CONSIDX, 0);
1359 	csr_write_4(sc, SF_CQ_PRODIDX, 0);
1360 	csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0);
1361 	csr_write_4(sc, SF_RXDQ_CTL_1, 0);
1362 	csr_write_4(sc, SF_RXDQ_PTR_Q1, 0);
1363 	csr_write_4(sc, SF_TXCQ_CTL, 0);
1364 	csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0);
1365 	csr_write_4(sc, SF_TXDQ_CTL, 0);
1366 	sf_reset(sc);
1367 
1368 	sc->sf_link = 0;
1369 
1370 	for (i = 0; i < SF_RX_DLIST_CNT; i++) {
1371 		if (sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf != NULL) {
1372 			m_freem(sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf);
1373 			sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf = NULL;
1374 		}
1375 	}
1376 
1377 	for (i = 0; i < SF_TX_DLIST_CNT; i++) {
1378 		if (sc->sf_ldata->sf_tx_dlist[i].sf_mbuf != NULL) {
1379 			m_freem(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf);
1380 			sc->sf_ldata->sf_tx_dlist[i].sf_mbuf = NULL;
1381 		}
1382 	}
1383 
1384 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
1385 
1386 	return;
1387 }
1388 
1389 /*
1390  * Note: it is important that this function not be interrupted. We
1391  * use a two-stage register access scheme: if we are interrupted in
1392  * between setting the indirect address register and reading from the
1393  * indirect data register, the contents of the address register could
1394  * be changed out from under us.
1395  */
1396 static void
1397 sf_stats_update(void *xsc)
1398 {
1399 	struct sf_softc *sc = xsc;
1400 	struct ifnet *ifp = &sc->arpcom.ac_if;
1401 	struct mii_data *mii = device_get_softc(sc->sf_miibus);
1402 	struct sf_stats		stats;
1403 	u_int32_t		*ptr;
1404 	int			i;
1405 
1406 	lwkt_serialize_enter(ifp->if_serializer);
1407 
1408 	ptr = (u_int32_t *)&stats;
1409 	for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++)
1410 		ptr[i] = csr_read_4(sc, SF_STATS_BASE +
1411 		    (i + sizeof(u_int32_t)));
1412 
1413 	for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++)
1414 		csr_write_4(sc, SF_STATS_BASE +
1415 		    (i + sizeof(u_int32_t)), 0);
1416 
1417 	ifp->if_collisions += stats.sf_tx_single_colls +
1418 	    stats.sf_tx_multi_colls + stats.sf_tx_excess_colls;
1419 
1420 	mii_tick(mii);
1421 	if (!sc->sf_link) {
1422 		mii_pollstat(mii);
1423 		if (mii->mii_media_status & IFM_ACTIVE &&
1424 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
1425 			sc->sf_link++;
1426 			if (!ifq_is_empty(&ifp->if_snd))
1427 				sf_start(ifp);
1428 	}
1429 
1430 	callout_reset(&sc->sf_stat_timer, hz, sf_stats_update, sc);
1431 
1432 	lwkt_serialize_exit(ifp->if_serializer);
1433 }
1434 
1435 static void
1436 sf_watchdog(struct ifnet *ifp)
1437 {
1438 	struct sf_softc		*sc;
1439 
1440 	sc = ifp->if_softc;
1441 
1442 	ifp->if_oerrors++;
1443 	printf("sf%d: watchdog timeout\n", sc->sf_unit);
1444 
1445 	sf_stop(sc);
1446 	sf_reset(sc);
1447 	sf_init(sc);
1448 
1449 	if (!ifq_is_empty(&ifp->if_snd))
1450 		sf_start(ifp);
1451 
1452 	return;
1453 }
1454 
1455 static void
1456 sf_shutdown(device_t dev)
1457 {
1458 	struct sf_softc	*sc;
1459 	struct ifnet *ifp;
1460 
1461 	sc = device_get_softc(dev);
1462 	ifp = &sc->arpcom.ac_if;
1463 	lwkt_serialize_enter(ifp->if_serializer);
1464 	sf_stop(sc);
1465 	lwkt_serialize_exit(ifp->if_serializer);
1466 
1467 	return;
1468 }
1469