xref: /dragonfly/sys/dev/netif/sk/if_sk.c (revision c9f721c2)
1 /*	$OpenBSD: if_sk.c,v 1.33 2003/08/12 05:23:06 nate Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999, 2000
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_sk.c,v 1.19.2.9 2003/03/05 18:42:34 njl Exp $
35  * $DragonFly: src/sys/dev/netif/sk/if_sk.c,v 1.20 2004/09/23 23:18:01 dillon Exp $
36  *
37  * $FreeBSD: src/sys/pci/if_sk.c,v 1.19.2.9 2003/03/05 18:42:34 njl Exp $
38  */
39 
40 /*
41  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
42  *
43  * Permission to use, copy, modify, and distribute this software for any
44  * purpose with or without fee is hereby granted, provided that the above
45  * copyright notice and this permission notice appear in all copies.
46  *
47  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
54  */
55 
56 /*
57  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
58  * the SK-984x series adapters, both single port and dual port.
59  * References:
60  * 	The XaQti XMAC II datasheet,
61  *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
62  *	The SysKonnect GEnesis manual, http://www.syskonnect.com
63  *
64  * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
65  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
66  * convenience to others until Vitesse corrects this problem:
67  *
68  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
69  *
70  * Written by Bill Paul <wpaul@ee.columbia.edu>
71  * Department of Electrical Engineering
72  * Columbia University, New York City
73  */
74 
75 /*
76  * The SysKonnect gigabit ethernet adapters consist of two main
77  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
78  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
79  * components and a PHY while the GEnesis controller provides a PCI
80  * interface with DMA support. Each card may have between 512K and
81  * 2MB of SRAM on board depending on the configuration.
82  *
83  * The SysKonnect GEnesis controller can have either one or two XMAC
84  * chips connected to it, allowing single or dual port NIC configurations.
85  * SysKonnect has the distinction of being the only vendor on the market
86  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
87  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
88  * XMAC registers. This driver takes advantage of these features to allow
89  * both XMACs to operate as independent interfaces.
90  */
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/sockio.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kernel.h>
98 #include <sys/socket.h>
99 #include <sys/queue.h>
100 
101 #include <net/if.h>
102 #include <net/if_arp.h>
103 #include <net/ethernet.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 
107 #include <net/bpf.h>
108 
109 #include <vm/vm.h>              /* for vtophys */
110 #include <vm/pmap.h>            /* for vtophys */
111 #include <machine/clock.h>      /* for DELAY */
112 #include <machine/bus_pio.h>
113 #include <machine/bus_memio.h>
114 #include <machine/bus.h>
115 #include <machine/resource.h>
116 #include <sys/bus.h>
117 #include <sys/rman.h>
118 
119 #include "../mii_layer/mii.h"
120 #include "../mii_layer/miivar.h"
121 #include "../mii_layer/brgphyreg.h"
122 
123 #include <bus/pci/pcireg.h>
124 #include <bus/pci/pcivar.h>
125 
126 #if 0
127 #define SK_USEIOSPACE
128 #endif
129 
130 #include "if_skreg.h"
131 #include "xmaciireg.h"
132 #include "yukonreg.h"
133 
134 /* "controller miibus0" required.  See GENERIC if you get errors here. */
135 #include "miibus_if.h"
136 
137 static struct sk_type sk_devs[] = {
138 	{
139 		VENDORID_SK,
140 		DEVICEID_SK_V1,
141 		"SysKonnect Gigabit Ethernet (V1.0)"
142 	},
143 	{
144 		VENDORID_SK,
145 		DEVICEID_SK_V2,
146 		"SysKonnect Gigabit Ethernet (V2.0)"
147 	},
148 	{
149 		VENDORID_MARVELL,
150 		DEVICEID_SK_V2,
151 		"Marvell Gigabit Ethernet"
152 	},
153 	{
154 		VENDORID_3COM,
155 		DEVICEID_3COM_3C940,
156 		"3Com 3C940 Gigabit Ethernet"
157 	},
158 	{
159 		VENDORID_LINKSYS,
160 		DEVICEID_LINKSYS_EG1032,
161 		"Linksys EG1032 Gigabit Ethernet"
162 	},
163 	{
164 		VENDORID_DLINK,
165 		DEVICEID_DLINK_DGE530T,
166 		"D-Link DGE-530T Gigabit Ethernet"
167 	},
168 	{ 0, 0, NULL }
169 };
170 
171 static int skc_probe		(device_t);
172 static int skc_attach		(device_t);
173 static int skc_detach		(device_t);
174 static void skc_shutdown	(device_t);
175 static int sk_probe		(device_t);
176 static int sk_attach		(device_t);
177 static int sk_detach		(device_t);
178 static void sk_tick		(void *);
179 static void sk_intr		(void *);
180 static void sk_intr_bcom	(struct sk_if_softc *);
181 static void sk_intr_xmac	(struct sk_if_softc *);
182 static void sk_intr_yukon	(struct sk_if_softc *);
183 static void sk_rxeof		(struct sk_if_softc *);
184 static void sk_txeof		(struct sk_if_softc *);
185 static int sk_encap		(struct sk_if_softc *, struct mbuf *,
186 					u_int32_t *);
187 static void sk_start		(struct ifnet *);
188 static int sk_ioctl		(struct ifnet *, u_long, caddr_t,
189 					struct ucred *);
190 static void sk_init		(void *);
191 static void sk_init_xmac	(struct sk_if_softc *);
192 static void sk_init_yukon	(struct sk_if_softc *);
193 static void sk_stop		(struct sk_if_softc *);
194 static void sk_watchdog		(struct ifnet *);
195 static int sk_ifmedia_upd	(struct ifnet *);
196 static void sk_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
197 static void sk_reset		(struct sk_softc *);
198 static int sk_newbuf		(struct sk_if_softc *,
199 					struct sk_chain *, struct mbuf *);
200 static int sk_alloc_jumbo_mem	(struct sk_if_softc *);
201 static void *sk_jalloc		(struct sk_if_softc *);
202 static void sk_jfree		(caddr_t, u_int);
203 static void sk_jref		(caddr_t, u_int);
204 static int sk_init_rx_ring	(struct sk_if_softc *);
205 static void sk_init_tx_ring	(struct sk_if_softc *);
206 static u_int32_t sk_win_read_4	(struct sk_softc *, int);
207 static u_int16_t sk_win_read_2	(struct sk_softc *, int);
208 static u_int8_t sk_win_read_1	(struct sk_softc *, int);
209 static void sk_win_write_4	(struct sk_softc *, int, u_int32_t);
210 static void sk_win_write_2	(struct sk_softc *, int, u_int32_t);
211 static void sk_win_write_1	(struct sk_softc *, int, u_int32_t);
212 static u_int8_t sk_vpd_readbyte	(struct sk_softc *, int);
213 static void sk_vpd_read_res	(struct sk_softc *,
214 					struct vpd_res *, int);
215 static void sk_vpd_read		(struct sk_softc *);
216 
217 static int sk_miibus_readreg	(device_t, int, int);
218 static int sk_miibus_writereg	(device_t, int, int, int);
219 static void sk_miibus_statchg	(device_t);
220 
221 static int sk_xmac_miibus_readreg     (struct sk_if_softc *, int, int);
222 static int sk_xmac_miibus_writereg    (struct sk_if_softc *, int, int, int);
223 static void sk_xmac_miibus_statchg    (struct sk_if_softc *);
224 
225 static int sk_marv_miibus_readreg     (struct sk_if_softc *, int, int);
226 static int sk_marv_miibus_writereg    (struct sk_if_softc *, int, int, int);
227 static void sk_marv_miibus_statchg    (struct sk_if_softc *);
228 
229 static u_int32_t xmac_calchash	(caddr_t);
230 static u_int32_t gmac_calchash	(caddr_t);
231 static void sk_setfilt		(struct sk_if_softc *, caddr_t, int);
232 static void sk_setmulti		(struct sk_if_softc *);
233 static void sk_setpromisc	(struct sk_if_softc *);
234 
235 #ifdef SK_USEIOSPACE
236 #define SK_RES		SYS_RES_IOPORT
237 #define SK_RID		SK_PCI_LOIO
238 #else
239 #define SK_RES		SYS_RES_MEMORY
240 #define SK_RID		SK_PCI_LOMEM
241 #endif
242 
243 /*
244  * Note that we have newbus methods for both the GEnesis controller
245  * itself and the XMAC(s). The XMACs are children of the GEnesis, and
246  * the miibus code is a child of the XMACs. We need to do it this way
247  * so that the miibus drivers can access the PHY registers on the
248  * right PHY. It's not quite what I had in mind, but it's the only
249  * design that achieves the desired effect.
250  */
251 static device_method_t skc_methods[] = {
252 	/* Device interface */
253 	DEVMETHOD(device_probe,		skc_probe),
254 	DEVMETHOD(device_attach,	skc_attach),
255 	DEVMETHOD(device_detach,	skc_detach),
256 	DEVMETHOD(device_shutdown,	skc_shutdown),
257 
258 	/* bus interface */
259 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
260 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
261 
262 	{ 0, 0 }
263 };
264 
265 static driver_t skc_driver = {
266 	"skc",
267 	skc_methods,
268 	sizeof(struct sk_softc)
269 };
270 
271 static devclass_t skc_devclass;
272 
273 static device_method_t sk_methods[] = {
274 	/* Device interface */
275 	DEVMETHOD(device_probe,		sk_probe),
276 	DEVMETHOD(device_attach,	sk_attach),
277 	DEVMETHOD(device_detach,	sk_detach),
278 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
279 
280 	/* bus interface */
281 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
282 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
283 
284 	/* MII interface */
285 	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
286 	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
287 	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
288 
289 	{ 0, 0 }
290 };
291 
292 static driver_t sk_driver = {
293 	"sk",
294 	sk_methods,
295 	sizeof(struct sk_if_softc)
296 };
297 
298 static devclass_t sk_devclass;
299 
300 DECLARE_DUMMY_MODULE(if_sk);
301 DRIVER_MODULE(if_sk, pci, skc_driver, skc_devclass, 0, 0);
302 DRIVER_MODULE(if_sk, skc, sk_driver, sk_devclass, 0, 0);
303 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
304 
305 #define SK_SETBIT(sc, reg, x)		\
306 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
307 
308 #define SK_CLRBIT(sc, reg, x)		\
309 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
310 
311 #define SK_WIN_SETBIT_4(sc, reg, x)	\
312 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
313 
314 #define SK_WIN_CLRBIT_4(sc, reg, x)	\
315 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
316 
317 #define SK_WIN_SETBIT_2(sc, reg, x)	\
318 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
319 
320 #define SK_WIN_CLRBIT_2(sc, reg, x)	\
321 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
322 
323 static u_int32_t sk_win_read_4(sc, reg)
324 	struct sk_softc		*sc;
325 	int			reg;
326 {
327 #ifdef SK_USEIOSPACE
328 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
329 	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
330 #else
331 	return(CSR_READ_4(sc, reg));
332 #endif
333 }
334 
335 static u_int16_t sk_win_read_2(sc, reg)
336 	struct sk_softc		*sc;
337 	int			reg;
338 {
339 #ifdef SK_USEIOSPACE
340 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
341 	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
342 #else
343 	return(CSR_READ_2(sc, reg));
344 #endif
345 }
346 
347 static u_int8_t sk_win_read_1(sc, reg)
348 	struct sk_softc		*sc;
349 	int			reg;
350 {
351 #ifdef SK_USEIOSPACE
352 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
353 	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
354 #else
355 	return(CSR_READ_1(sc, reg));
356 #endif
357 }
358 
359 static void sk_win_write_4(sc, reg, val)
360 	struct sk_softc		*sc;
361 	int			reg;
362 	u_int32_t		val;
363 {
364 #ifdef SK_USEIOSPACE
365 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
366 	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
367 #else
368 	CSR_WRITE_4(sc, reg, val);
369 #endif
370 	return;
371 }
372 
373 static void sk_win_write_2(sc, reg, val)
374 	struct sk_softc		*sc;
375 	int			reg;
376 	u_int32_t		val;
377 {
378 #ifdef SK_USEIOSPACE
379 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
380 	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
381 #else
382 	CSR_WRITE_2(sc, reg, val);
383 #endif
384 	return;
385 }
386 
387 static void sk_win_write_1(sc, reg, val)
388 	struct sk_softc		*sc;
389 	int			reg;
390 	u_int32_t		val;
391 {
392 #ifdef SK_USEIOSPACE
393 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
394 	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
395 #else
396 	CSR_WRITE_1(sc, reg, val);
397 #endif
398 	return;
399 }
400 
401 /*
402  * The VPD EEPROM contains Vital Product Data, as suggested in
403  * the PCI 2.1 specification. The VPD data is separared into areas
404  * denoted by resource IDs. The SysKonnect VPD contains an ID string
405  * resource (the name of the adapter), a read-only area resource
406  * containing various key/data fields and a read/write area which
407  * can be used to store asset management information or log messages.
408  * We read the ID string and read-only into buffers attached to
409  * the controller softc structure for later use. At the moment,
410  * we only use the ID string during sk_attach().
411  */
412 static u_int8_t sk_vpd_readbyte(sc, addr)
413 	struct sk_softc		*sc;
414 	int			addr;
415 {
416 	int			i;
417 
418 	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
419 	for (i = 0; i < SK_TIMEOUT; i++) {
420 		DELAY(1);
421 		if (sk_win_read_2(sc,
422 		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
423 			break;
424 	}
425 
426 	if (i == SK_TIMEOUT)
427 		return(0);
428 
429 	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
430 }
431 
432 static void sk_vpd_read_res(sc, res, addr)
433 	struct sk_softc		*sc;
434 	struct vpd_res		*res;
435 	int			addr;
436 {
437 	int			i;
438 	u_int8_t		*ptr;
439 
440 	ptr = (u_int8_t *)res;
441 	for (i = 0; i < sizeof(struct vpd_res); i++)
442 		ptr[i] = sk_vpd_readbyte(sc, i + addr);
443 
444 	return;
445 }
446 
447 static void sk_vpd_read(sc)
448 	struct sk_softc		*sc;
449 {
450 	int			pos = 0, i;
451 	struct vpd_res		res;
452 
453 	if (sc->sk_vpd_prodname != NULL)
454 		free(sc->sk_vpd_prodname, M_DEVBUF);
455 	if (sc->sk_vpd_readonly != NULL)
456 		free(sc->sk_vpd_readonly, M_DEVBUF);
457 	sc->sk_vpd_prodname = NULL;
458 	sc->sk_vpd_readonly = NULL;
459 
460 	sk_vpd_read_res(sc, &res, pos);
461 
462 	if (res.vr_id != VPD_RES_ID) {
463 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
464 		    sc->sk_unit, VPD_RES_ID, res.vr_id);
465 		return;
466 	}
467 
468 	pos += sizeof(res);
469 	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT);
470 	for (i = 0; i < res.vr_len; i++)
471 		sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
472 	sc->sk_vpd_prodname[i] = '\0';
473 	pos += i;
474 
475 	sk_vpd_read_res(sc, &res, pos);
476 
477 	if (res.vr_id != VPD_RES_READ) {
478 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
479 		    sc->sk_unit, VPD_RES_READ, res.vr_id);
480 		return;
481 	}
482 
483 	pos += sizeof(res);
484 	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_INTWAIT);
485 	for (i = 0; i < res.vr_len + 1; i++)
486 		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
487 
488 	return;
489 }
490 
491 static int sk_miibus_readreg(dev, phy, reg)
492 	device_t		dev;
493 	int			phy, reg;
494 {
495 	struct sk_if_softc	*sc_if;
496 
497 	sc_if = device_get_softc(dev);
498 
499 	switch(sc_if->sk_softc->sk_type) {
500 	case SK_GENESIS:
501 		return(sk_xmac_miibus_readreg(sc_if, phy, reg));
502 	case SK_YUKON:
503 		return(sk_marv_miibus_readreg(sc_if, phy, reg));
504 	}
505 
506 	return(0);
507 }
508 
509 static int sk_miibus_writereg(dev, phy, reg, val)
510 	device_t		dev;
511 	int			phy, reg, val;
512 {
513 	struct sk_if_softc	*sc_if;
514 
515 	sc_if = device_get_softc(dev);
516 
517 	switch(sc_if->sk_softc->sk_type) {
518 	case SK_GENESIS:
519 		return(sk_xmac_miibus_writereg(sc_if, phy, reg, val));
520 	case SK_YUKON:
521 		return(sk_marv_miibus_writereg(sc_if, phy, reg, val));
522 	}
523 
524 	return(0);
525 }
526 
527 static void sk_miibus_statchg(dev)
528 	device_t		dev;
529 {
530 	struct sk_if_softc	*sc_if;
531 
532 	sc_if = device_get_softc(dev);
533 
534 	switch(sc_if->sk_softc->sk_type) {
535 	case SK_GENESIS:
536 		sk_xmac_miibus_statchg(sc_if);
537 		break;
538 	case SK_YUKON:
539 		sk_marv_miibus_statchg(sc_if);
540 		break;
541 	}
542 
543 	return;
544 }
545 
546 static int sk_xmac_miibus_readreg(sc_if, phy, reg)
547 	struct sk_if_softc	*sc_if;
548 	int			phy, reg;
549 {
550 	int			i;
551 
552 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
553 		return(0);
554 
555 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
556 	SK_XM_READ_2(sc_if, XM_PHY_DATA);
557 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
558 		for (i = 0; i < SK_TIMEOUT; i++) {
559 			DELAY(1);
560 			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
561 			    XM_MMUCMD_PHYDATARDY)
562 				break;
563 		}
564 
565 		if (i == SK_TIMEOUT) {
566 			printf("sk%d: phy failed to come ready\n",
567 			    sc_if->sk_unit);
568 			return(0);
569 		}
570 	}
571 	DELAY(1);
572 	return(SK_XM_READ_2(sc_if, XM_PHY_DATA));
573 }
574 
575 static int sk_xmac_miibus_writereg(sc_if, phy, reg, val)
576 	struct sk_if_softc	*sc_if;
577 	int			phy, reg, val;
578 {
579 	int			i;
580 
581 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
582 	for (i = 0; i < SK_TIMEOUT; i++) {
583 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
584 			break;
585 	}
586 
587 	if (i == SK_TIMEOUT) {
588 		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
589 		return(ETIMEDOUT);
590 	}
591 
592 	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
593 	for (i = 0; i < SK_TIMEOUT; i++) {
594 		DELAY(1);
595 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
596 			break;
597 	}
598 
599 	if (i == SK_TIMEOUT)
600 		printf("sk%d: phy write timed out\n", sc_if->sk_unit);
601 
602 	return(0);
603 }
604 
605 static void sk_xmac_miibus_statchg(sc_if)
606 	struct sk_if_softc	*sc_if;
607 {
608 	struct mii_data		*mii;
609 
610 	mii = device_get_softc(sc_if->sk_miibus);
611 
612 	/*
613 	 * If this is a GMII PHY, manually set the XMAC's
614 	 * duplex mode accordingly.
615 	 */
616 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
617 		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
618 			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
619 		} else {
620 			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
621 		}
622 	}
623 
624 	return;
625 }
626 
627 static int sk_marv_miibus_readreg(sc_if, phy, reg)
628 	struct sk_if_softc	*sc_if;
629 	int			phy, reg;
630 {
631 	u_int16_t		val;
632 	int			i;
633 
634 	if (phy != 0 ||
635 	    (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
636 	     sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
637 		return(0);
638 	}
639 
640         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
641 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
642 
643 	for (i = 0; i < SK_TIMEOUT; i++) {
644 		DELAY(1);
645 		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
646 		if (val & YU_SMICR_READ_VALID)
647 			break;
648 	}
649 
650 	if (i == SK_TIMEOUT) {
651 		printf("sk%d: phy failed to come ready\n",
652 		    sc_if->sk_unit);
653 		return(0);
654 	}
655 
656 	val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
657 
658 	return(val);
659 }
660 
661 static int sk_marv_miibus_writereg(sc_if, phy, reg, val)
662 	struct sk_if_softc	*sc_if;
663 	int			phy, reg, val;
664 {
665 	int			i;
666 
667 	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
668 	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
669 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
670 
671 	for (i = 0; i < SK_TIMEOUT; i++) {
672 		DELAY(1);
673 		if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
674 			break;
675 	}
676 
677 	return(0);
678 }
679 
680 static void sk_marv_miibus_statchg(sc_if)
681 	struct sk_if_softc	*sc_if;
682 {
683 	return;
684 }
685 
686 #define XMAC_POLY		0xEDB88320
687 #define GMAC_POLY               0x04C11DB7L
688 #define HASH_BITS		6
689 
690 static u_int32_t xmac_calchash(addr)
691 	caddr_t			addr;
692 {
693 	u_int32_t		idx, bit, data, crc;
694 
695 	/* Compute CRC for the address value. */
696 	crc = 0xFFFFFFFF; /* initial value */
697 
698 	for (idx = 0; idx < 6; idx++) {
699 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
700 			crc = (crc >> 1) ^ (((crc ^ data) & 1) ? XMAC_POLY : 0);
701 	}
702 
703 	return (~crc & ((1 << HASH_BITS) - 1));
704 }
705 
706 static u_int32_t gmac_calchash(addr)
707     caddr_t			addr;
708 {
709     u_int32_t               idx, bit, crc, tmpData, data;
710 
711     /* Compute CRC for the address value. */
712     crc = 0xFFFFFFFF; /* initial value */
713 
714     for (idx = 0; idx < 6; idx++) {
715         data = *addr++;
716 
717         /* Change bit order in byte. */
718         tmpData = data;
719         for (bit = 0; bit < 8; bit++) {
720             if (tmpData & 1) {
721                 data |=  1 << (7 - bit);
722             }
723             else {
724                 data &= ~(1 << (7 - bit));
725             }
726 
727             tmpData >>= 1;
728         }
729 
730         crc ^= (data << 24);
731         for (bit = 0; bit < 8; bit++) {
732             if (crc & 0x80000000) {
733                 crc = (crc << 1) ^ GMAC_POLY;
734             } else {
735                 crc <<= 1;
736             }
737         }
738     }
739 
740     return (crc & ((1 << HASH_BITS) - 1));
741 }
742 
743 static void sk_setfilt(sc_if, addr, slot)
744 	struct sk_if_softc	*sc_if;
745 	caddr_t			addr;
746 	int			slot;
747 {
748 	int			base;
749 
750 	base = XM_RXFILT_ENTRY(slot);
751 
752 	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
753 	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
754 	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
755 
756 	return;
757 }
758 
759 static void sk_setmulti(sc_if)
760 	struct sk_if_softc	*sc_if;
761 {
762 	struct sk_softc		*sc = sc_if->sk_softc;
763 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
764 	u_int32_t		hashes[2] = { 0, 0 };
765 	int			h, i;
766 	struct ifmultiaddr	*ifma;
767 	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
768 
769 
770 	/* First, zot all the existing filters. */
771 	switch(sc->sk_type) {
772 	case SK_GENESIS:
773 		for (i = 1; i < XM_RXFILT_MAX; i++)
774 			sk_setfilt(sc_if, (caddr_t)&dummy, i);
775 
776 		SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
777 		SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
778 		break;
779 	case SK_YUKON:
780 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
781 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
782 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
783 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
784 		break;
785 	}
786 
787 	/* Now program new ones. */
788 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
789 		hashes[0] = 0xFFFFFFFF;
790 		hashes[1] = 0xFFFFFFFF;
791 	} else {
792 		i = 1;
793 		/* First find the tail of the list. */
794 		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
795 					ifma = ifma->ifma_link.le_next) {
796 			if (ifma->ifma_link.le_next == NULL)
797 				break;
798 		}
799 		/* Now traverse the list backwards. */
800 		for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
801 			ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
802 			if (ifma->ifma_addr->sa_family != AF_LINK)
803 				continue;
804 			/*
805 			 * Program the first XM_RXFILT_MAX multicast groups
806 			 * into the perfect filter. For all others,
807 			 * use the hash table.
808 			 */
809 			if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
810 				sk_setfilt(sc_if,
811 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
812 				i++;
813 				continue;
814 			}
815 
816                         switch(sc->sk_type) {
817                         case SK_GENESIS:
818                             h = xmac_calchash(
819                                 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
820                             if (h < 32)
821                                 hashes[0] |= (1 << h);
822                             else
823                                 hashes[1] |= (1 << (h - 32));
824                             break;
825 
826                         case SK_YUKON:
827                             h = gmac_calchash(
828                                 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
829                             if (h < 32)
830                                 hashes[0] |= (1 << h);
831                             else
832                                 hashes[1] |= (1 << (h - 32));
833                             break;
834                         }
835 		}
836 	}
837 
838 	switch(sc->sk_type) {
839 	case SK_GENESIS:
840 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
841 			       XM_MODE_RX_USE_PERFECT);
842 		SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
843 		SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
844 		break;
845 	case SK_YUKON:
846 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
847 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
848 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
849 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
850 		break;
851 	}
852 
853 	return;
854 }
855 
856 static void sk_setpromisc(sc_if)
857 	struct sk_if_softc	*sc_if;
858 {
859 	struct sk_softc		*sc = sc_if->sk_softc;
860 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
861 
862 	switch(sc->sk_type) {
863 	case SK_GENESIS:
864 		if (ifp->if_flags & IFF_PROMISC) {
865 			SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
866 		} else {
867 			SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
868 		}
869 		break;
870 	case SK_YUKON:
871 		if (ifp->if_flags & IFF_PROMISC) {
872 			SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
873 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
874 		} else {
875 			SK_YU_SETBIT_2(sc_if, YUKON_RCR,
876 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
877 		}
878 		break;
879 	}
880 
881 	return;
882 }
883 
884 static int sk_init_rx_ring(sc_if)
885 	struct sk_if_softc	*sc_if;
886 {
887 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
888 	struct sk_ring_data	*rd = sc_if->sk_rdata;
889 	int			i;
890 
891 	bzero((char *)rd->sk_rx_ring,
892 	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
893 
894 	for (i = 0; i < SK_RX_RING_CNT; i++) {
895 		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
896 		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
897 			return(ENOBUFS);
898 		if (i == (SK_RX_RING_CNT - 1)) {
899 			cd->sk_rx_chain[i].sk_next =
900 			    &cd->sk_rx_chain[0];
901 			rd->sk_rx_ring[i].sk_next =
902 			    vtophys(&rd->sk_rx_ring[0]);
903 		} else {
904 			cd->sk_rx_chain[i].sk_next =
905 			    &cd->sk_rx_chain[i + 1];
906 			rd->sk_rx_ring[i].sk_next =
907 			    vtophys(&rd->sk_rx_ring[i + 1]);
908 		}
909 	}
910 
911 	sc_if->sk_cdata.sk_rx_prod = 0;
912 	sc_if->sk_cdata.sk_rx_cons = 0;
913 
914 	return(0);
915 }
916 
917 static void sk_init_tx_ring(sc_if)
918 	struct sk_if_softc	*sc_if;
919 {
920 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
921 	struct sk_ring_data	*rd = sc_if->sk_rdata;
922 	int			i;
923 
924 	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
925 	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
926 
927 	for (i = 0; i < SK_TX_RING_CNT; i++) {
928 		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
929 		if (i == (SK_TX_RING_CNT - 1)) {
930 			cd->sk_tx_chain[i].sk_next =
931 			    &cd->sk_tx_chain[0];
932 			rd->sk_tx_ring[i].sk_next =
933 			    vtophys(&rd->sk_tx_ring[0]);
934 		} else {
935 			cd->sk_tx_chain[i].sk_next =
936 			    &cd->sk_tx_chain[i + 1];
937 			rd->sk_tx_ring[i].sk_next =
938 			    vtophys(&rd->sk_tx_ring[i + 1]);
939 		}
940 	}
941 
942 	sc_if->sk_cdata.sk_tx_prod = 0;
943 	sc_if->sk_cdata.sk_tx_cons = 0;
944 	sc_if->sk_cdata.sk_tx_cnt = 0;
945 
946 	return;
947 }
948 
949 static int sk_newbuf(sc_if, c, m)
950 	struct sk_if_softc	*sc_if;
951 	struct sk_chain		*c;
952 	struct mbuf		*m;
953 {
954 	struct mbuf		*m_new = NULL;
955 	struct sk_rx_desc	*r;
956 
957 	if (m == NULL) {
958 		caddr_t			*buf = NULL;
959 
960 		MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
961 		if (m_new == NULL)
962 			return(ENOBUFS);
963 
964 		/* Allocate the jumbo buffer */
965 		buf = sk_jalloc(sc_if);
966 		if (buf == NULL) {
967 			m_freem(m_new);
968 #ifdef SK_VERBOSE
969 			printf("sk%d: jumbo allocation failed "
970 			    "-- packet dropped!\n", sc_if->sk_unit);
971 #endif
972 			return(ENOBUFS);
973 		}
974 
975 		/* Attach the buffer to the mbuf */
976 		m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
977 		m_new->m_flags |= M_EXT | M_EXT_OLD;
978 		m_new->m_ext.ext_size = m_new->m_pkthdr.len =
979 		    m_new->m_len = SK_MCLBYTES;
980 		m_new->m_ext.ext_nfree.old = sk_jfree;
981 		m_new->m_ext.ext_nref.old = sk_jref;
982 	} else {
983 		/*
984 	 	 * We're re-using a previously allocated mbuf;
985 		 * be sure to re-init pointers and lengths to
986 		 * default values.
987 		 */
988 		m_new = m;
989 		m_new->m_len = m_new->m_pkthdr.len = SK_MCLBYTES;
990 		m_new->m_data = m_new->m_ext.ext_buf;
991 	}
992 
993 	/*
994 	 * Adjust alignment so packet payload begins on a
995 	 * longword boundary. Mandatory for Alpha, useful on
996 	 * x86 too.
997 	 */
998 	m_adj(m_new, ETHER_ALIGN);
999 
1000 	r = c->sk_desc;
1001 	c->sk_mbuf = m_new;
1002 	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
1003 	r->sk_ctl = m_new->m_len | SK_RXSTAT;
1004 
1005 	return(0);
1006 }
1007 
1008 /*
1009  * Allocate jumbo buffer storage. The SysKonnect adapters support
1010  * "jumbograms" (9K frames), although SysKonnect doesn't currently
1011  * use them in their drivers. In order for us to use them, we need
1012  * large 9K receive buffers, however standard mbuf clusters are only
1013  * 2048 bytes in size. Consequently, we need to allocate and manage
1014  * our own jumbo buffer pool. Fortunately, this does not require an
1015  * excessive amount of additional code.
1016  */
1017 static int sk_alloc_jumbo_mem(sc_if)
1018 	struct sk_if_softc	*sc_if;
1019 {
1020 	caddr_t			ptr;
1021 	int		i;
1022 	struct sk_jpool_entry   *entry;
1023 
1024 	/* Grab a big chunk o' storage. */
1025 	sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
1026 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1027 
1028 	if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
1029 		printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
1030 		return(ENOBUFS);
1031 	}
1032 
1033 	SLIST_INIT(&sc_if->sk_jfree_listhead);
1034 	SLIST_INIT(&sc_if->sk_jinuse_listhead);
1035 
1036 	/*
1037 	 * Now divide it up into 9K pieces and save the addresses
1038 	 * in an array. Note that we play an evil trick here by using
1039 	 * the first few bytes in the buffer to hold the the address
1040 	 * of the softc structure for this interface. This is because
1041 	 * sk_jfree() needs it, but it is called by the mbuf management
1042 	 * code which will not pass it to us explicitly.
1043 	 */
1044 	ptr = sc_if->sk_cdata.sk_jumbo_buf;
1045 	for (i = 0; i < SK_JSLOTS; i++) {
1046 		u_int64_t		**aptr;
1047 		aptr = (u_int64_t **)ptr;
1048 		aptr[0] = (u_int64_t *)sc_if;
1049 		ptr += sizeof(u_int64_t);
1050 		sc_if->sk_cdata.sk_jslots[i].sk_buf = ptr;
1051 		sc_if->sk_cdata.sk_jslots[i].sk_inuse = 0;
1052 		ptr += SK_MCLBYTES;
1053 		entry = malloc(sizeof(struct sk_jpool_entry),
1054 		    M_DEVBUF, M_WAITOK);
1055 		if (entry == NULL) {
1056 			free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
1057 			sc_if->sk_cdata.sk_jumbo_buf = NULL;
1058 			printf("sk%d: no memory for jumbo "
1059 			    "buffer queue!\n", sc_if->sk_unit);
1060 			return(ENOBUFS);
1061 		}
1062 		entry->slot = i;
1063 		SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
1064 		    entry, jpool_entries);
1065 	}
1066 
1067 	return(0);
1068 }
1069 
1070 /*
1071  * Allocate a jumbo buffer.
1072  */
1073 static void *sk_jalloc(sc_if)
1074 	struct sk_if_softc	*sc_if;
1075 {
1076 	struct sk_jpool_entry   *entry;
1077 
1078 	entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1079 
1080 	if (entry == NULL) {
1081 #ifdef SK_VERBOSE
1082 		printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
1083 #endif
1084 		return(NULL);
1085 	}
1086 
1087 	SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1088 	SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
1089 	sc_if->sk_cdata.sk_jslots[entry->slot].sk_inuse = 1;
1090 	return(sc_if->sk_cdata.sk_jslots[entry->slot].sk_buf);
1091 }
1092 
1093 /*
1094  * Adjust usage count on a jumbo buffer. In general this doesn't
1095  * get used much because our jumbo buffers don't get passed around
1096  * a lot, but it's implemented for correctness.
1097  */
1098 static void sk_jref(buf, size)
1099 	caddr_t			buf;
1100 	u_int			size;
1101 {
1102 	struct sk_if_softc	*sc_if;
1103 	u_int64_t		**aptr;
1104 	int		i;
1105 
1106 	/* Extract the softc struct pointer. */
1107 	aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
1108 	sc_if = (struct sk_if_softc *)(aptr[0]);
1109 
1110 	if (sc_if == NULL)
1111 		panic("sk_jref: can't find softc pointer!");
1112 
1113 	if (size != SK_MCLBYTES)
1114 		panic("sk_jref: adjusting refcount of buf of wrong size!");
1115 
1116 	/* calculate the slot this buffer belongs to */
1117 
1118 	i = ((vm_offset_t)aptr
1119 	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1120 
1121 	if ((i < 0) || (i >= SK_JSLOTS))
1122 		panic("sk_jref: asked to reference buffer "
1123 		    "that we don't manage!");
1124 	else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
1125 		panic("sk_jref: buffer already free!");
1126 	else
1127 		sc_if->sk_cdata.sk_jslots[i].sk_inuse++;
1128 
1129 	return;
1130 }
1131 
1132 /*
1133  * Release a jumbo buffer.
1134  */
1135 static void sk_jfree(buf, size)
1136 	caddr_t			buf;
1137 	u_int			size;
1138 {
1139 	struct sk_if_softc	*sc_if;
1140 	u_int64_t		**aptr;
1141 	int		        i;
1142 	struct sk_jpool_entry   *entry;
1143 
1144 	/* Extract the softc struct pointer. */
1145 	aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
1146 	sc_if = (struct sk_if_softc *)(aptr[0]);
1147 
1148 	if (sc_if == NULL)
1149 		panic("sk_jfree: can't find softc pointer!");
1150 
1151 	if (size != SK_MCLBYTES)
1152 		panic("sk_jfree: freeing buffer of wrong size!");
1153 
1154 	/* calculate the slot this buffer belongs to */
1155 
1156 	i = ((vm_offset_t)aptr
1157 	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1158 
1159 	if ((i < 0) || (i >= SK_JSLOTS))
1160 		panic("sk_jfree: asked to free buffer that we don't manage!");
1161 	else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
1162 		panic("sk_jfree: buffer already free!");
1163 	else {
1164 		sc_if->sk_cdata.sk_jslots[i].sk_inuse--;
1165 		if(sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0) {
1166 			entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
1167 			if (entry == NULL)
1168 				panic("sk_jfree: buffer not in use!");
1169 			entry->slot = i;
1170 			SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead,
1171 					  jpool_entries);
1172 			SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
1173 					  entry, jpool_entries);
1174 		}
1175 	}
1176 
1177 	return;
1178 }
1179 
1180 /*
1181  * Set media options.
1182  */
1183 static int sk_ifmedia_upd(ifp)
1184 	struct ifnet		*ifp;
1185 {
1186 	struct sk_if_softc	*sc_if = ifp->if_softc;
1187 	struct mii_data		*mii;
1188 
1189 	mii = device_get_softc(sc_if->sk_miibus);
1190 	sk_init(sc_if);
1191 	mii_mediachg(mii);
1192 
1193 	return(0);
1194 }
1195 
1196 /*
1197  * Report current media status.
1198  */
1199 static void sk_ifmedia_sts(ifp, ifmr)
1200 	struct ifnet		*ifp;
1201 	struct ifmediareq	*ifmr;
1202 {
1203 	struct sk_if_softc	*sc_if;
1204 	struct mii_data		*mii;
1205 
1206 	sc_if = ifp->if_softc;
1207 	mii = device_get_softc(sc_if->sk_miibus);
1208 
1209 	mii_pollstat(mii);
1210 	ifmr->ifm_active = mii->mii_media_active;
1211 	ifmr->ifm_status = mii->mii_media_status;
1212 
1213 	return;
1214 }
1215 
1216 static int sk_ioctl(ifp, command, data, cr)
1217 	struct ifnet		*ifp;
1218 	u_long			command;
1219 	caddr_t			data;
1220 	struct ucred		*cr;
1221 {
1222 	struct sk_if_softc	*sc_if = ifp->if_softc;
1223 	struct ifreq		*ifr = (struct ifreq *) data;
1224 	int			s, error = 0;
1225 	struct mii_data		*mii;
1226 
1227 	s = splimp();
1228 
1229 	switch(command) {
1230 	case SIOCSIFADDR:
1231 	case SIOCGIFADDR:
1232 		error = ether_ioctl(ifp, command, data);
1233 		break;
1234 	case SIOCSIFMTU:
1235 		if (ifr->ifr_mtu > SK_JUMBO_MTU)
1236 			error = EINVAL;
1237 		else {
1238 			ifp->if_mtu = ifr->ifr_mtu;
1239 			sk_init(sc_if);
1240 		}
1241 		break;
1242 	case SIOCSIFFLAGS:
1243 		if (ifp->if_flags & IFF_UP) {
1244 			if (ifp->if_flags & IFF_RUNNING) {
1245 				if ((ifp->if_flags ^ sc_if->sk_if_flags)
1246 				    & IFF_PROMISC) {
1247 					sk_setpromisc(sc_if);
1248 					sk_setmulti(sc_if);
1249 				}
1250 			} else
1251 				sk_init(sc_if);
1252 		} else {
1253 			if (ifp->if_flags & IFF_RUNNING)
1254 				sk_stop(sc_if);
1255 		}
1256 		sc_if->sk_if_flags = ifp->if_flags;
1257 		error = 0;
1258 		break;
1259 	case SIOCADDMULTI:
1260 	case SIOCDELMULTI:
1261 		sk_setmulti(sc_if);
1262 		error = 0;
1263 		break;
1264 	case SIOCGIFMEDIA:
1265 	case SIOCSIFMEDIA:
1266 		mii = device_get_softc(sc_if->sk_miibus);
1267 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1268 		break;
1269 	default:
1270 		error = EINVAL;
1271 		break;
1272 	}
1273 
1274 	(void)splx(s);
1275 
1276 	return(error);
1277 }
1278 
1279 /*
1280  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1281  * IDs against our list and return a device name if we find a match.
1282  */
1283 static int skc_probe(dev)
1284 	device_t		dev;
1285 {
1286 	struct sk_softc		*sc;
1287 	struct sk_type		*t = sk_devs;
1288 
1289 	sc = device_get_softc(dev);
1290 
1291 	while(t->sk_name != NULL) {
1292 		if ((pci_get_vendor(dev) == t->sk_vid) &&
1293 		    (pci_get_device(dev) == t->sk_did)) {
1294 			device_set_desc(dev, t->sk_name);
1295 			return(0);
1296 		}
1297 		t++;
1298 	}
1299 
1300 	return(ENXIO);
1301 }
1302 
1303 /*
1304  * Force the GEnesis into reset, then bring it out of reset.
1305  */
1306 static void sk_reset(sc)
1307 	struct sk_softc		*sc;
1308 {
1309 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1310 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1311 	if (sc->sk_type == SK_YUKON)
1312 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1313 
1314 	DELAY(1000);
1315 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1316 	DELAY(2);
1317 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1318 	if (sc->sk_type == SK_YUKON)
1319 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1320 
1321 	if (sc->sk_type == SK_GENESIS) {
1322 		/* Configure packet arbiter */
1323 		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1324 		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1325 		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1326 		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1327 		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1328 	}
1329 
1330 	/* Enable RAM interface */
1331 	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1332 
1333 	/*
1334          * Configure interrupt moderation. The moderation timer
1335 	 * defers interrupts specified in the interrupt moderation
1336 	 * timer mask based on the timeout specified in the interrupt
1337 	 * moderation timer init register. Each bit in the timer
1338 	 * register represents 18.825ns, so to specify a timeout in
1339 	 * microseconds, we have to multiply by 54.
1340 	 */
1341         sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
1342         sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1343 	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1344         sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1345 
1346 	return;
1347 }
1348 
1349 static int sk_probe(dev)
1350 	device_t		dev;
1351 {
1352 	struct sk_softc		*sc;
1353 
1354 	sc = device_get_softc(device_get_parent(dev));
1355 
1356 	/*
1357 	 * Not much to do here. We always know there will be
1358 	 * at least one XMAC present, and if there are two,
1359 	 * skc_attach() will create a second device instance
1360 	 * for us.
1361 	 */
1362 	switch (sc->sk_type) {
1363 	case SK_GENESIS:
1364 		device_set_desc(dev, "XaQti Corp. XMAC II");
1365 		break;
1366 	case SK_YUKON:
1367 		device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1368 		break;
1369 	}
1370 
1371 	return(0);
1372 }
1373 
1374 /*
1375  * Each XMAC chip is attached as a separate logical IP interface.
1376  * Single port cards will have only one logical interface of course.
1377  */
1378 static int sk_attach(dev)
1379 	device_t		dev;
1380 {
1381 	struct sk_softc		*sc;
1382 	struct sk_if_softc	*sc_if;
1383 	struct ifnet		*ifp;
1384 	int			i, port;
1385 
1386 	if (dev == NULL)
1387 		return(EINVAL);
1388 
1389 	sc_if = device_get_softc(dev);
1390 	sc = device_get_softc(device_get_parent(dev));
1391 	port = *(int *)device_get_ivars(dev);
1392 	free(device_get_ivars(dev), M_DEVBUF);
1393 	device_set_ivars(dev, NULL);
1394 	sc_if->sk_dev = dev;
1395 	callout_init(&sc_if->sk_tick_timer);
1396 
1397 	bzero((char *)sc_if, sizeof(struct sk_if_softc));
1398 
1399 	sc_if->sk_dev = dev;
1400 	sc_if->sk_unit = device_get_unit(dev);
1401 	sc_if->sk_port = port;
1402 	sc_if->sk_softc = sc;
1403 	sc->sk_if[port] = sc_if;
1404 	if (port == SK_PORT_A)
1405 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1406 	if (port == SK_PORT_B)
1407 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1408 
1409 	/*
1410 	 * Get station address for this interface. Note that
1411 	 * dual port cards actually come with three station
1412 	 * addresses: one for each port, plus an extra. The
1413 	 * extra one is used by the SysKonnect driver software
1414 	 * as a 'virtual' station address for when both ports
1415 	 * are operating in failover mode. Currently we don't
1416 	 * use this extra address.
1417 	 */
1418 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1419 		sc_if->arpcom.ac_enaddr[i] =
1420 		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1421 
1422 	/*
1423 	 * Set up RAM buffer addresses. The NIC will have a certain
1424 	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1425 	 * need to divide this up a) between the transmitter and
1426  	 * receiver and b) between the two XMACs, if this is a
1427 	 * dual port NIC. Our algotithm is to divide up the memory
1428 	 * evenly so that everyone gets a fair share.
1429 	 */
1430 	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1431 		u_int32_t		chunk, val;
1432 
1433 		chunk = sc->sk_ramsize / 2;
1434 		val = sc->sk_rboff / sizeof(u_int64_t);
1435 		sc_if->sk_rx_ramstart = val;
1436 		val += (chunk / sizeof(u_int64_t));
1437 		sc_if->sk_rx_ramend = val - 1;
1438 		sc_if->sk_tx_ramstart = val;
1439 		val += (chunk / sizeof(u_int64_t));
1440 		sc_if->sk_tx_ramend = val - 1;
1441 	} else {
1442 		u_int32_t		chunk, val;
1443 
1444 		chunk = sc->sk_ramsize / 4;
1445 		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1446 		    sizeof(u_int64_t);
1447 		sc_if->sk_rx_ramstart = val;
1448 		val += (chunk / sizeof(u_int64_t));
1449 		sc_if->sk_rx_ramend = val - 1;
1450 		sc_if->sk_tx_ramstart = val;
1451 		val += (chunk / sizeof(u_int64_t));
1452 		sc_if->sk_tx_ramend = val - 1;
1453 	}
1454 
1455 	/* Read and save PHY type and set PHY address */
1456 	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1457 	switch(sc_if->sk_phytype) {
1458 	case SK_PHYTYPE_XMAC:
1459 		sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1460 		break;
1461 	case SK_PHYTYPE_BCOM:
1462 		sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1463 		break;
1464 	case SK_PHYTYPE_MARV_COPPER:
1465 		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1466 		break;
1467 	default:
1468 		printf("skc%d: unsupported PHY type: %d\n",
1469 		    sc->sk_unit, sc_if->sk_phytype);
1470 		return(ENODEV);
1471 	}
1472 
1473 	/* Allocate the descriptor queues. */
1474 	sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1475 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1476 
1477 	if (sc_if->sk_rdata == NULL) {
1478 		printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1479 		sc->sk_if[port] = NULL;
1480 		return(ENOMEM);
1481 	}
1482 
1483 	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1484 
1485 	/* Try to allocate memory for jumbo buffers. */
1486 	if (sk_alloc_jumbo_mem(sc_if)) {
1487 		printf("sk%d: jumbo buffer allocation failed\n",
1488 		    sc_if->sk_unit);
1489 		contigfree(sc_if->sk_rdata,
1490 		    sizeof(struct sk_ring_data), M_DEVBUF);
1491 		sc->sk_if[port] = NULL;
1492 		return(ENOMEM);
1493 	}
1494 
1495 	ifp = &sc_if->arpcom.ac_if;
1496 	ifp->if_softc = sc_if;
1497 	if_initname(ifp, "sk", sc_if->sk_unit);
1498 	ifp->if_mtu = ETHERMTU;
1499 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1500 	ifp->if_ioctl = sk_ioctl;
1501 	ifp->if_start = sk_start;
1502 	ifp->if_watchdog = sk_watchdog;
1503 	ifp->if_init = sk_init;
1504 	ifp->if_baudrate = 1000000000;
1505 	ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1506 
1507 	/*
1508 	 * Do miibus setup.
1509 	 */
1510 	switch (sc->sk_type) {
1511 	case SK_GENESIS:
1512 		sk_init_xmac(sc_if);
1513 		break;
1514 	case SK_YUKON:
1515 		sk_init_yukon(sc_if);
1516 		break;
1517 	}
1518 
1519 	if (mii_phy_probe(dev, &sc_if->sk_miibus,
1520 	    sk_ifmedia_upd, sk_ifmedia_sts)) {
1521 		printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1522 		contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM,
1523 		    M_DEVBUF);
1524 		contigfree(sc_if->sk_rdata,
1525 		    sizeof(struct sk_ring_data), M_DEVBUF);
1526 		return(ENXIO);
1527 	}
1528 
1529 	/*
1530 	 * Call MI attach routine.
1531 	 */
1532 	ether_ifattach(ifp, sc_if->arpcom.ac_enaddr);
1533 	callout_init(&sc_if->sk_tick_timer);
1534 
1535 	return(0);
1536 }
1537 
1538 /*
1539  * Attach the interface. Allocate softc structures, do ifmedia
1540  * setup and ethernet/BPF attach.
1541  */
1542 static int skc_attach(dev)
1543 	device_t		dev;
1544 {
1545 	int			s;
1546 	u_int32_t		command;
1547 	struct sk_softc		*sc;
1548 	int			unit, error = 0, rid, *port;
1549 
1550 	s = splimp();
1551 
1552 	sc = device_get_softc(dev);
1553 	unit = device_get_unit(dev);
1554 	bzero(sc, sizeof(struct sk_softc));
1555 	switch (pci_get_device(dev)) {
1556 	case DEVICEID_SK_V1:
1557 		sc->sk_type = SK_GENESIS;
1558 		break;
1559 	case DEVICEID_SK_V2:
1560 	case DEVICEID_3COM_3C940:
1561 	case DEVICEID_LINKSYS_EG1032:
1562 	case DEVICEID_DLINK_DGE530T:
1563 		sc->sk_type = SK_YUKON;
1564 		break;
1565 	}
1566 
1567 	/*
1568 	 * Handle power management nonsense.
1569 	 */
1570 	command = pci_read_config(dev, SK_PCI_CAPID, 4) & 0x000000FF;
1571 	if (command == 0x01) {
1572 		command = pci_read_config(dev, SK_PCI_PWRMGMTCTRL, 4);
1573 		if (command & SK_PSTATE_MASK) {
1574 			u_int32_t		iobase, membase, irq;
1575 
1576 			/* Save important PCI config data. */
1577 			iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1578 			membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1579 			irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1580 
1581 			/* Reset the power state. */
1582 			printf("skc%d: chip is in D%d power mode "
1583 			"-- setting to D0\n", unit, command & SK_PSTATE_MASK);
1584 			command &= 0xFFFFFFFC;
1585 			pci_write_config(dev, SK_PCI_PWRMGMTCTRL, command, 4);
1586 
1587 			/* Restore PCI config data. */
1588 			pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1589 			pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1590 			pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1591 		}
1592 	}
1593 
1594 	/*
1595 	 * Map control/status registers.
1596 	 */
1597 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1598 	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1599 	pci_write_config(dev, PCIR_COMMAND, command, 4);
1600 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1601 
1602 #ifdef SK_USEIOSPACE
1603 	if (!(command & PCIM_CMD_PORTEN)) {
1604 		printf("skc%d: failed to enable I/O ports!\n", unit);
1605 		error = ENXIO;
1606 		goto fail;
1607 	}
1608 #else
1609 	if (!(command & PCIM_CMD_MEMEN)) {
1610 		printf("skc%d: failed to enable memory mapping!\n", unit);
1611 		error = ENXIO;
1612 		goto fail;
1613 	}
1614 #endif
1615 
1616 	rid = SK_RID;
1617 	sc->sk_res = bus_alloc_resource(dev, SK_RES, &rid,
1618 	    0, ~0, 1, RF_ACTIVE);
1619 
1620 	if (sc->sk_res == NULL) {
1621 		printf("sk%d: couldn't map ports/memory\n", unit);
1622 		error = ENXIO;
1623 		goto fail;
1624 	}
1625 
1626 	sc->sk_btag = rman_get_bustag(sc->sk_res);
1627 	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1628 
1629 	/* Allocate interrupt */
1630 	rid = 0;
1631 	sc->sk_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1632 	    RF_SHAREABLE | RF_ACTIVE);
1633 
1634 	if (sc->sk_irq == NULL) {
1635 		printf("skc%d: couldn't map interrupt\n", unit);
1636 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1637 		error = ENXIO;
1638 		goto fail;
1639 	}
1640 
1641 	error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET,
1642 	    sk_intr, sc, &sc->sk_intrhand);
1643 
1644 	if (error) {
1645 		printf("skc%d: couldn't set up irq\n", unit);
1646 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1647 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1648 		goto fail;
1649 	}
1650 
1651 	/* Reset the adapter. */
1652 	sk_reset(sc);
1653 
1654 	sc->sk_unit = unit;
1655 
1656 	/* Read and save vital product data from EEPROM. */
1657 	sk_vpd_read(sc);
1658 
1659 	if (sc->sk_type == SK_GENESIS) {
1660 		/* Read and save RAM size and RAMbuffer offset */
1661 		switch(sk_win_read_1(sc, SK_EPROM0)) {
1662 		case SK_RAMSIZE_512K_64:
1663 			sc->sk_ramsize = 0x80000;
1664 			sc->sk_rboff = SK_RBOFF_0;
1665 			break;
1666 		case SK_RAMSIZE_1024K_64:
1667 			sc->sk_ramsize = 0x100000;
1668 			sc->sk_rboff = SK_RBOFF_80000;
1669 			break;
1670 		case SK_RAMSIZE_1024K_128:
1671 			sc->sk_ramsize = 0x100000;
1672 			sc->sk_rboff = SK_RBOFF_0;
1673 			break;
1674 		case SK_RAMSIZE_2048K_128:
1675 			sc->sk_ramsize = 0x200000;
1676 			sc->sk_rboff = SK_RBOFF_0;
1677 			break;
1678 		default:
1679 			printf("skc%d: unknown ram size: %d\n",
1680 			    sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1681 			bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1682 			bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1683 			bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1684 			error = ENXIO;
1685 			goto fail;
1686 			break;
1687 		}
1688 	} else {
1689 		sc->sk_ramsize = 0x20000;
1690 		sc->sk_rboff = SK_RBOFF_0;
1691 	}
1692 
1693 	/* Read and save physical media type */
1694 	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1695 	case SK_PMD_1000BASESX:
1696 		sc->sk_pmd = IFM_1000_SX;
1697 		break;
1698 	case SK_PMD_1000BASELX:
1699 		sc->sk_pmd = IFM_1000_LX;
1700 		break;
1701 	case SK_PMD_1000BASECX:
1702 		sc->sk_pmd = IFM_1000_CX;
1703 		break;
1704 	case SK_PMD_1000BASETX:
1705 		sc->sk_pmd = IFM_1000_TX;
1706 		break;
1707 	default:
1708 		printf("skc%d: unknown media type: 0x%x\n",
1709 		    sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1710 		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1711 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1712 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1713 		error = ENXIO;
1714 		goto fail;
1715 	}
1716 
1717 	/* Announce the product name. */
1718 	printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1719 	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1720 	port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1721 	*port = SK_PORT_A;
1722 	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1723 
1724 	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1725 		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1726 		port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1727 		*port = SK_PORT_B;
1728 		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1729 	}
1730 
1731 	/* Turn on the 'driver is loaded' LED. */
1732 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1733 
1734 	bus_generic_attach(dev);
1735 
1736 fail:
1737 	splx(s);
1738 	return(error);
1739 }
1740 
1741 static int sk_detach(dev)
1742 	device_t		dev;
1743 {
1744 	struct sk_softc		*sc;
1745 	struct sk_if_softc	*sc_if;
1746 	struct ifnet		*ifp;
1747 	int			s;
1748 
1749 	s = splimp();
1750 
1751 	sc = device_get_softc(device_get_parent(dev));
1752 	sc_if = device_get_softc(dev);
1753 	ifp = &sc_if->arpcom.ac_if;
1754 	sk_stop(sc_if);
1755 	ether_ifdetach(ifp);
1756 	bus_generic_detach(dev);
1757 	if (sc_if->sk_miibus != NULL)
1758 		device_delete_child(dev, sc_if->sk_miibus);
1759 	contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1760 	contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF);
1761 
1762 	return(0);
1763 }
1764 
1765 static int skc_detach(dev)
1766 	device_t		dev;
1767 {
1768 	struct sk_softc		*sc;
1769 	int			s;
1770 
1771 	s = splimp();
1772 
1773 	sc = device_get_softc(dev);
1774 
1775 	bus_generic_detach(dev);
1776 	if (sc->sk_devs[SK_PORT_A] != NULL)
1777 		device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1778 	if (sc->sk_devs[SK_PORT_B] != NULL)
1779 		device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1780 
1781 	bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1782 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1783 	bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1784 
1785 	splx(s);
1786 
1787 	return(0);
1788 }
1789 
1790 static int sk_encap(sc_if, m_head, txidx)
1791         struct sk_if_softc	*sc_if;
1792         struct mbuf		*m_head;
1793         u_int32_t		*txidx;
1794 {
1795 	struct sk_tx_desc	*f = NULL;
1796 	struct mbuf		*m;
1797 	u_int32_t		frag, cur, cnt = 0;
1798 
1799 	m = m_head;
1800 	cur = frag = *txidx;
1801 
1802 	/*
1803 	 * Start packing the mbufs in this chain into
1804 	 * the fragment pointers. Stop when we run out
1805 	 * of fragments or hit the end of the mbuf chain.
1806 	 */
1807 	for (m = m_head; m != NULL; m = m->m_next) {
1808 		if (m->m_len != 0) {
1809 			if ((SK_TX_RING_CNT -
1810 			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1811 				return(ENOBUFS);
1812 			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1813 			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1814 			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1815 			if (cnt == 0)
1816 				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1817 			else
1818 				f->sk_ctl |= SK_TXCTL_OWN;
1819 			cur = frag;
1820 			SK_INC(frag, SK_TX_RING_CNT);
1821 			cnt++;
1822 		}
1823 	}
1824 
1825 	if (m != NULL)
1826 		return(ENOBUFS);
1827 
1828 	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1829 		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1830 	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1831 	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1832 	sc_if->sk_cdata.sk_tx_cnt += cnt;
1833 
1834 	*txidx = frag;
1835 
1836 	return(0);
1837 }
1838 
1839 static void sk_start(ifp)
1840 	struct ifnet		*ifp;
1841 {
1842         struct sk_softc		*sc;
1843         struct sk_if_softc	*sc_if;
1844         struct mbuf		*m_head = NULL;
1845         u_int32_t		idx;
1846 
1847 	sc_if = ifp->if_softc;
1848 	sc = sc_if->sk_softc;
1849 
1850 	idx = sc_if->sk_cdata.sk_tx_prod;
1851 
1852 	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1853 		IF_DEQUEUE(&ifp->if_snd, m_head);
1854 		if (m_head == NULL)
1855 			break;
1856 
1857 		/*
1858 		 * Pack the data into the transmit ring. If we
1859 		 * don't have room, set the OACTIVE flag and wait
1860 		 * for the NIC to drain the ring.
1861 		 */
1862 		if (sk_encap(sc_if, m_head, &idx)) {
1863 			IF_PREPEND(&ifp->if_snd, m_head);
1864 			ifp->if_flags |= IFF_OACTIVE;
1865 			break;
1866 		}
1867 
1868 		/*
1869 		 * If there's a BPF listener, bounce a copy of this frame
1870 		 * to him.
1871 		 */
1872 		if (ifp->if_bpf)
1873 			bpf_mtap(ifp, m_head);
1874 	}
1875 
1876 	/* Transmit */
1877 	sc_if->sk_cdata.sk_tx_prod = idx;
1878 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1879 
1880 	/* Set a timeout in case the chip goes out to lunch. */
1881 	ifp->if_timer = 5;
1882 
1883 	return;
1884 }
1885 
1886 
1887 static void sk_watchdog(ifp)
1888 	struct ifnet		*ifp;
1889 {
1890 	struct sk_if_softc	*sc_if;
1891 
1892 	sc_if = ifp->if_softc;
1893 
1894 	printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1895 	sk_init(sc_if);
1896 
1897 	return;
1898 }
1899 
1900 static void skc_shutdown(dev)
1901 	device_t		dev;
1902 {
1903 	struct sk_softc		*sc;
1904 
1905 	sc = device_get_softc(dev);
1906 
1907 	/* Turn off the 'driver is loaded' LED. */
1908 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1909 
1910 	/*
1911 	 * Reset the GEnesis controller. Doing this should also
1912 	 * assert the resets on the attached XMAC(s).
1913 	 */
1914 	sk_reset(sc);
1915 
1916 	return;
1917 }
1918 
1919 static void sk_rxeof(sc_if)
1920 	struct sk_if_softc	*sc_if;
1921 {
1922 	struct mbuf		*m;
1923 	struct ifnet		*ifp;
1924 	struct sk_chain		*cur_rx;
1925 	int			total_len = 0;
1926 	int			i;
1927 	u_int32_t		rxstat;
1928 
1929 	ifp = &sc_if->arpcom.ac_if;
1930 	i = sc_if->sk_cdata.sk_rx_prod;
1931 	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1932 
1933 	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1934 
1935 		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1936 		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1937 		m = cur_rx->sk_mbuf;
1938 		cur_rx->sk_mbuf = NULL;
1939 		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1940 		SK_INC(i, SK_RX_RING_CNT);
1941 
1942 		if (rxstat & XM_RXSTAT_ERRFRAME) {
1943 			ifp->if_ierrors++;
1944 			sk_newbuf(sc_if, cur_rx, m);
1945 			continue;
1946 		}
1947 
1948 		/*
1949 		 * Try to allocate a new jumbo buffer. If that
1950 		 * fails, copy the packet to mbufs and put the
1951 		 * jumbo buffer back in the ring so it can be
1952 		 * re-used. If allocating mbufs fails, then we
1953 		 * have to drop the packet.
1954 		 */
1955 		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1956 			struct mbuf		*m0;
1957 			m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1958 			    total_len + ETHER_ALIGN, 0, ifp, NULL);
1959 			sk_newbuf(sc_if, cur_rx, m);
1960 			if (m0 == NULL) {
1961 				printf("sk%d: no receive buffers "
1962 				    "available -- packet dropped!\n",
1963 				    sc_if->sk_unit);
1964 				ifp->if_ierrors++;
1965 				continue;
1966 			}
1967 			m_adj(m0, ETHER_ALIGN);
1968 			m = m0;
1969 		} else {
1970 			m->m_pkthdr.rcvif = ifp;
1971 			m->m_pkthdr.len = m->m_len = total_len;
1972 		}
1973 
1974 		ifp->if_ipackets++;
1975 		(*ifp->if_input)(ifp, m);
1976 	}
1977 
1978 	sc_if->sk_cdata.sk_rx_prod = i;
1979 
1980 	return;
1981 }
1982 
1983 static void sk_txeof(sc_if)
1984 	struct sk_if_softc	*sc_if;
1985 {
1986 	struct sk_tx_desc	*cur_tx = NULL;
1987 	struct ifnet		*ifp;
1988 	u_int32_t		idx;
1989 
1990 	ifp = &sc_if->arpcom.ac_if;
1991 
1992 	/*
1993 	 * Go through our tx ring and free mbufs for those
1994 	 * frames that have been sent.
1995 	 */
1996 	idx = sc_if->sk_cdata.sk_tx_cons;
1997 	while(idx != sc_if->sk_cdata.sk_tx_prod) {
1998 		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1999 		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
2000 			break;
2001 		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
2002 			ifp->if_opackets++;
2003 		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
2004 			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
2005 			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
2006 		}
2007 		sc_if->sk_cdata.sk_tx_cnt--;
2008 		SK_INC(idx, SK_TX_RING_CNT);
2009 		ifp->if_timer = 0;
2010 	}
2011 
2012 	sc_if->sk_cdata.sk_tx_cons = idx;
2013 
2014 	if (cur_tx != NULL)
2015 		ifp->if_flags &= ~IFF_OACTIVE;
2016 
2017 	return;
2018 }
2019 
2020 static void sk_tick(xsc_if)
2021 	void			*xsc_if;
2022 {
2023 	struct sk_if_softc	*sc_if;
2024 	struct mii_data		*mii;
2025 	struct ifnet		*ifp;
2026 	int			i;
2027 
2028 	sc_if = xsc_if;
2029 	ifp = &sc_if->arpcom.ac_if;
2030 	mii = device_get_softc(sc_if->sk_miibus);
2031 
2032 	if (!(ifp->if_flags & IFF_UP))
2033 		return;
2034 
2035 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2036 		sk_intr_bcom(sc_if);
2037 		return;
2038 	}
2039 
2040 	/*
2041 	 * According to SysKonnect, the correct way to verify that
2042 	 * the link has come back up is to poll bit 0 of the GPIO
2043 	 * register three times. This pin has the signal from the
2044 	 * link_sync pin connected to it; if we read the same link
2045 	 * state 3 times in a row, we know the link is up.
2046 	 */
2047 	for (i = 0; i < 3; i++) {
2048 		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2049 			break;
2050 	}
2051 
2052 	if (i != 3) {
2053 		callout_reset(&sc_if->sk_tick_timer, hz, sk_tick, sc_if);
2054 		return;
2055 	}
2056 
2057 	/* Turn the GP0 interrupt back on. */
2058 	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2059 	SK_XM_READ_2(sc_if, XM_ISR);
2060 	mii_tick(mii);
2061 	mii_pollstat(mii);
2062 	callout_stop(&sc_if->sk_tick_timer);
2063 
2064 	return;
2065 }
2066 
2067 static void sk_intr_bcom(sc_if)
2068 	struct sk_if_softc	*sc_if;
2069 {
2070 	struct sk_softc		*sc;
2071 	struct mii_data		*mii;
2072 	struct ifnet		*ifp;
2073 	int			status;
2074 
2075 	sc = sc_if->sk_softc;
2076 	mii = device_get_softc(sc_if->sk_miibus);
2077 	ifp = &sc_if->arpcom.ac_if;
2078 
2079 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2080 
2081 	/*
2082 	 * Read the PHY interrupt register to make sure
2083 	 * we clear any pending interrupts.
2084 	 */
2085 	status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2086 
2087 	if (!(ifp->if_flags & IFF_RUNNING)) {
2088 		sk_init_xmac(sc_if);
2089 		return;
2090 	}
2091 
2092 	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2093 		int			lstat;
2094 		lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2095 		    BRGPHY_MII_AUXSTS);
2096 
2097 		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2098 			mii_mediachg(mii);
2099 			/* Turn off the link LED. */
2100 			SK_IF_WRITE_1(sc_if, 0,
2101 			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
2102 			sc_if->sk_link = 0;
2103 		} else if (status & BRGPHY_ISR_LNK_CHG) {
2104 			sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2105 	    		    BRGPHY_MII_IMR, 0xFF00);
2106 			mii_tick(mii);
2107 			sc_if->sk_link = 1;
2108 			/* Turn on the link LED. */
2109 			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2110 			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2111 			    SK_LINKLED_BLINK_OFF);
2112 			mii_pollstat(mii);
2113 		} else {
2114 			mii_tick(mii);
2115 			callout_reset(&sc_if->sk_tick_timer, hz,
2116 				      sk_tick, sc_if);
2117 		}
2118 	}
2119 
2120 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2121 
2122 	return;
2123 }
2124 
2125 static void sk_intr_xmac(sc_if)
2126 	struct sk_if_softc	*sc_if;
2127 {
2128 	struct sk_softc		*sc;
2129 	u_int16_t		status;
2130 	struct mii_data		*mii;
2131 
2132 	sc = sc_if->sk_softc;
2133 	mii = device_get_softc(sc_if->sk_miibus);
2134 	status = SK_XM_READ_2(sc_if, XM_ISR);
2135 
2136 	/*
2137 	 * Link has gone down. Start MII tick timeout to
2138 	 * watch for link resync.
2139 	 */
2140 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2141 		if (status & XM_ISR_GP0_SET) {
2142 			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2143 			callout_reset(&sc_if->sk_tick_timer, hz,
2144 				      sk_tick, sc_if);
2145 		}
2146 
2147 		if (status & XM_ISR_AUTONEG_DONE) {
2148 			callout_reset(&sc_if->sk_tick_timer, hz,
2149 				      sk_tick, sc_if);
2150 		}
2151 	}
2152 
2153 	if (status & XM_IMR_TX_UNDERRUN)
2154 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2155 
2156 	if (status & XM_IMR_RX_OVERRUN)
2157 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2158 
2159 	status = SK_XM_READ_2(sc_if, XM_ISR);
2160 
2161 	return;
2162 }
2163 
2164 static void sk_intr_yukon(sc_if)
2165 	struct sk_if_softc	*sc_if;
2166 {
2167 	int status;
2168 
2169 	status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2170 
2171 	return;
2172 }
2173 
2174 static void sk_intr(xsc)
2175 	void			*xsc;
2176 {
2177 	struct sk_softc		*sc = xsc;
2178 	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
2179 	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2180 	u_int32_t		status;
2181 
2182 	sc_if0 = sc->sk_if[SK_PORT_A];
2183 	sc_if1 = sc->sk_if[SK_PORT_B];
2184 
2185 	if (sc_if0 != NULL)
2186 		ifp0 = &sc_if0->arpcom.ac_if;
2187 	if (sc_if1 != NULL)
2188 		ifp1 = &sc_if1->arpcom.ac_if;
2189 
2190 	for (;;) {
2191 		status = CSR_READ_4(sc, SK_ISSR);
2192 		if (!(status & sc->sk_intrmask))
2193 			break;
2194 
2195 		/* Handle receive interrupts first. */
2196 		if (status & SK_ISR_RX1_EOF) {
2197 			sk_rxeof(sc_if0);
2198 			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2199 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2200 		}
2201 		if (status & SK_ISR_RX2_EOF) {
2202 			sk_rxeof(sc_if1);
2203 			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2204 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2205 		}
2206 
2207 		/* Then transmit interrupts. */
2208 		if (status & SK_ISR_TX1_S_EOF) {
2209 			sk_txeof(sc_if0);
2210 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2211 			    SK_TXBMU_CLR_IRQ_EOF);
2212 		}
2213 		if (status & SK_ISR_TX2_S_EOF) {
2214 			sk_txeof(sc_if1);
2215 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2216 			    SK_TXBMU_CLR_IRQ_EOF);
2217 		}
2218 
2219 		/* Then MAC interrupts. */
2220 		if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) {
2221 			if (sc->sk_type == SK_GENESIS)
2222 				sk_intr_xmac(sc_if0);
2223 			else
2224 				sk_intr_yukon(sc_if0);
2225 		}
2226 
2227 		if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) {
2228 			if (sc->sk_type == SK_GENESIS)
2229 				sk_intr_xmac(sc_if1);
2230 			else
2231 				sk_intr_yukon(sc_if0);
2232 		}
2233 
2234 		if (status & SK_ISR_EXTERNAL_REG) {
2235 			if (ifp0 != NULL &&
2236 			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2237 				sk_intr_bcom(sc_if0);
2238 			if (ifp1 != NULL &&
2239 			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2240 				sk_intr_bcom(sc_if1);
2241 		}
2242 	}
2243 
2244 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2245 
2246 	if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
2247 		sk_start(ifp0);
2248 	if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
2249 		sk_start(ifp1);
2250 
2251 	return;
2252 }
2253 
2254 static void sk_init_xmac(sc_if)
2255 	struct sk_if_softc	*sc_if;
2256 {
2257 	struct sk_softc		*sc;
2258 	struct ifnet		*ifp;
2259 	struct sk_bcom_hack	bhack[] = {
2260 	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2261 	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2262 	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2263 	{ 0, 0 } };
2264 
2265 	sc = sc_if->sk_softc;
2266 	ifp = &sc_if->arpcom.ac_if;
2267 
2268 	/* Unreset the XMAC. */
2269 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2270 	DELAY(1000);
2271 
2272 	/* Reset the XMAC's internal state. */
2273 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2274 
2275 	/* Save the XMAC II revision */
2276 	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2277 
2278 	/*
2279 	 * Perform additional initialization for external PHYs,
2280 	 * namely for the 1000baseTX cards that use the XMAC's
2281 	 * GMII mode.
2282 	 */
2283 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2284 		int			i = 0;
2285 		u_int32_t		val;
2286 
2287 		/* Take PHY out of reset. */
2288 		val = sk_win_read_4(sc, SK_GPIO);
2289 		if (sc_if->sk_port == SK_PORT_A)
2290 			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2291 		else
2292 			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2293 		sk_win_write_4(sc, SK_GPIO, val);
2294 
2295 		/* Enable GMII mode on the XMAC. */
2296 		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2297 
2298 		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2299 		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2300 		DELAY(10000);
2301 		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2302 		    BRGPHY_MII_IMR, 0xFFF0);
2303 
2304 		/*
2305 		 * Early versions of the BCM5400 apparently have
2306 		 * a bug that requires them to have their reserved
2307 		 * registers initialized to some magic values. I don't
2308 		 * know what the numbers do, I'm just the messenger.
2309 		 */
2310 		if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2311 		    == 0x6041) {
2312 			while(bhack[i].reg) {
2313 				sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2314 				    bhack[i].reg, bhack[i].val);
2315 				i++;
2316 			}
2317 		}
2318 	}
2319 
2320 	/* Set station address */
2321 	SK_XM_WRITE_2(sc_if, XM_PAR0,
2322 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
2323 	SK_XM_WRITE_2(sc_if, XM_PAR1,
2324 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
2325 	SK_XM_WRITE_2(sc_if, XM_PAR2,
2326 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
2327 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2328 
2329 	if (ifp->if_flags & IFF_BROADCAST) {
2330 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2331 	} else {
2332 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2333 	}
2334 
2335 	/* We don't need the FCS appended to the packet. */
2336 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2337 
2338 	/* We want short frames padded to 60 bytes. */
2339 	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2340 
2341 	/*
2342 	 * Enable the reception of all error frames. This is is
2343 	 * a necessary evil due to the design of the XMAC. The
2344 	 * XMAC's receive FIFO is only 8K in size, however jumbo
2345 	 * frames can be up to 9000 bytes in length. When bad
2346 	 * frame filtering is enabled, the XMAC's RX FIFO operates
2347 	 * in 'store and forward' mode. For this to work, the
2348 	 * entire frame has to fit into the FIFO, but that means
2349 	 * that jumbo frames larger than 8192 bytes will be
2350 	 * truncated. Disabling all bad frame filtering causes
2351 	 * the RX FIFO to operate in streaming mode, in which
2352 	 * case the XMAC will start transfering frames out of the
2353 	 * RX FIFO as soon as the FIFO threshold is reached.
2354 	 */
2355 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2356 	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2357 	    XM_MODE_RX_INRANGELEN);
2358 
2359 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2360 		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2361 	else
2362 		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2363 
2364 	/*
2365 	 * Bump up the transmit threshold. This helps hold off transmit
2366 	 * underruns when we're blasting traffic from both ports at once.
2367 	 */
2368 	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2369 
2370 	/* Set promiscuous mode */
2371 	sk_setpromisc(sc_if);
2372 
2373 	/* Set multicast filter */
2374 	sk_setmulti(sc_if);
2375 
2376 	/* Clear and enable interrupts */
2377 	SK_XM_READ_2(sc_if, XM_ISR);
2378 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2379 		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2380 	else
2381 		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2382 
2383 	/* Configure MAC arbiter */
2384 	switch(sc_if->sk_xmac_rev) {
2385 	case XM_XMAC_REV_B2:
2386 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2387 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2388 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2389 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2390 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2391 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2392 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2393 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2394 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2395 		break;
2396 	case XM_XMAC_REV_C1:
2397 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2398 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2399 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2400 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2401 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2402 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2403 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2404 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2405 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2406 		break;
2407 	default:
2408 		break;
2409 	}
2410 	sk_win_write_2(sc, SK_MACARB_CTL,
2411 	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2412 
2413 	sc_if->sk_link = 1;
2414 
2415 	return;
2416 }
2417 
2418 static void sk_init_yukon(sc_if)
2419 	struct sk_if_softc	*sc_if;
2420 {
2421 	u_int32_t		phy;
2422 	u_int16_t		reg;
2423 	int			i;
2424 
2425 	/* GMAC and GPHY Reset */
2426 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2427 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2428 	DELAY(1000);
2429 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
2430 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2431 	DELAY(1000);
2432 
2433 	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2434 		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2435 
2436 	switch(sc_if->sk_softc->sk_pmd) {
2437 	case IFM_1000_SX:
2438 	case IFM_1000_LX:
2439 		phy |= SK_GPHY_FIBER;
2440 		break;
2441 
2442 	case IFM_1000_CX:
2443 	case IFM_1000_TX:
2444 		phy |= SK_GPHY_COPPER;
2445 		break;
2446 	}
2447 
2448 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2449 	DELAY(1000);
2450 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2451 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2452 		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2453 
2454 	/* unused read of the interrupt source register */
2455 	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2456 
2457 	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2458 
2459 	/* MIB Counter Clear Mode set */
2460 	reg |= YU_PAR_MIB_CLR;
2461 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2462 
2463 	/* MIB Counter Clear Mode clear */
2464 	reg &= ~YU_PAR_MIB_CLR;
2465 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2466 
2467 	/* receive control reg */
2468 	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2469 
2470 	/* transmit parameter register */
2471 	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2472 		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2473 
2474 	/* serial mode register */
2475 	SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) |
2476 		      YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e));
2477 
2478 	/* Setup Yukon's address */
2479 	for (i = 0; i < 3; i++) {
2480 		/* Write Source Address 1 (unicast filter) */
2481 		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2482 			      sc_if->arpcom.ac_enaddr[i * 2] |
2483 			      sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2484 	}
2485 
2486 	for (i = 0; i < 3; i++) {
2487 		reg = sk_win_read_2(sc_if->sk_softc,
2488 				    SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2489 		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2490 	}
2491 
2492 	/* Set promiscuous mode */
2493 	sk_setpromisc(sc_if);
2494 
2495 	/* Set multicast filter */
2496 	sk_setmulti(sc_if);
2497 
2498 	/* enable interrupt mask for counter overflows */
2499 	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2500 	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2501 	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2502 
2503 	/* Configure RX MAC FIFO */
2504 	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2505 	SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
2506 
2507 	/* Configure TX MAC FIFO */
2508 	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2509 	SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2510 }
2511 
2512 /*
2513  * Note that to properly initialize any part of the GEnesis chip,
2514  * you first have to take it out of reset mode.
2515  */
2516 static void sk_init(xsc)
2517 	void			*xsc;
2518 {
2519 	struct sk_if_softc	*sc_if = xsc;
2520 	struct sk_softc		*sc;
2521 	struct ifnet		*ifp;
2522 	struct mii_data		*mii;
2523 	int			s;
2524 	u_int16_t		reg;
2525 
2526 	s = splimp();
2527 
2528 	ifp = &sc_if->arpcom.ac_if;
2529 	sc = sc_if->sk_softc;
2530 	mii = device_get_softc(sc_if->sk_miibus);
2531 
2532 	/* Cancel pending I/O and free all RX/TX buffers. */
2533 	sk_stop(sc_if);
2534 
2535 	if (sc->sk_type == SK_GENESIS) {
2536 		/* Configure LINK_SYNC LED */
2537 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2538 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2539 			SK_LINKLED_LINKSYNC_ON);
2540 
2541 		/* Configure RX LED */
2542 		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2543 			SK_RXLEDCTL_COUNTER_START);
2544 
2545 		/* Configure TX LED */
2546 		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2547 			SK_TXLEDCTL_COUNTER_START);
2548 	}
2549 
2550 	/* Configure I2C registers */
2551 
2552 	/* Configure XMAC(s) */
2553 	switch (sc->sk_type) {
2554 	case SK_GENESIS:
2555 		sk_init_xmac(sc_if);
2556 		break;
2557 	case SK_YUKON:
2558 		sk_init_yukon(sc_if);
2559 		break;
2560 	}
2561 	mii_mediachg(mii);
2562 
2563 	if (sc->sk_type == SK_GENESIS) {
2564 		/* Configure MAC FIFOs */
2565 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2566 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2567 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2568 
2569 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2570 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2571 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2572 	}
2573 
2574 	/* Configure transmit arbiter(s) */
2575 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2576 	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2577 
2578 	/* Configure RAMbuffers */
2579 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2580 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2581 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2582 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2583 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2584 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2585 
2586 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2587 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2588 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2589 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2590 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2591 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2592 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2593 
2594 	/* Configure BMUs */
2595 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2596 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2597 	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2598 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2599 
2600 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2601 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2602 	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2603 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2604 
2605 	/* Init descriptors */
2606 	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2607 		printf("sk%d: initialization failed: no "
2608 		    "memory for rx buffers\n", sc_if->sk_unit);
2609 		sk_stop(sc_if);
2610 		(void)splx(s);
2611 		return;
2612 	}
2613 	sk_init_tx_ring(sc_if);
2614 
2615 	/* Configure interrupt handling */
2616 	CSR_READ_4(sc, SK_ISSR);
2617 	if (sc_if->sk_port == SK_PORT_A)
2618 		sc->sk_intrmask |= SK_INTRS1;
2619 	else
2620 		sc->sk_intrmask |= SK_INTRS2;
2621 
2622 	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2623 
2624 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2625 
2626 	/* Start BMUs. */
2627 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2628 
2629 	switch(sc->sk_type) {
2630 	case SK_GENESIS:
2631 		/* Enable XMACs TX and RX state machines */
2632 		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2633 		SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2634 		break;
2635 	case SK_YUKON:
2636 		reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2637 		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2638 		reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN);
2639 		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2640 	}
2641 
2642 	ifp->if_flags |= IFF_RUNNING;
2643 	ifp->if_flags &= ~IFF_OACTIVE;
2644 
2645 	splx(s);
2646 
2647 	return;
2648 }
2649 
2650 static void sk_stop(sc_if)
2651 	struct sk_if_softc	*sc_if;
2652 {
2653 	int			i;
2654 	struct sk_softc		*sc;
2655 	struct ifnet		*ifp;
2656 
2657 	sc = sc_if->sk_softc;
2658 	ifp = &sc_if->arpcom.ac_if;
2659 
2660 	callout_stop(&sc_if->sk_tick_timer);
2661 
2662 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2663 		u_int32_t		val;
2664 
2665 		/* Put PHY back into reset. */
2666 		val = sk_win_read_4(sc, SK_GPIO);
2667 		if (sc_if->sk_port == SK_PORT_A) {
2668 			val |= SK_GPIO_DIR0;
2669 			val &= ~SK_GPIO_DAT0;
2670 		} else {
2671 			val |= SK_GPIO_DIR2;
2672 			val &= ~SK_GPIO_DAT2;
2673 		}
2674 		sk_win_write_4(sc, SK_GPIO, val);
2675 	}
2676 
2677 	/* Turn off various components of this interface. */
2678 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2679 	switch (sc->sk_type) {
2680 	case SK_GENESIS:
2681 		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2682 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2683 		break;
2684 	case SK_YUKON:
2685 		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2686 		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2687 		break;
2688 	}
2689 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2690 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2691 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2692 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2693 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2694 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2695 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2696 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2697 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2698 
2699 	/* Disable interrupts */
2700 	if (sc_if->sk_port == SK_PORT_A)
2701 		sc->sk_intrmask &= ~SK_INTRS1;
2702 	else
2703 		sc->sk_intrmask &= ~SK_INTRS2;
2704 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2705 
2706 	SK_XM_READ_2(sc_if, XM_ISR);
2707 	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2708 
2709 	/* Free RX and TX mbufs still in the queues. */
2710 	for (i = 0; i < SK_RX_RING_CNT; i++) {
2711 		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2712 			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2713 			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2714 		}
2715 	}
2716 
2717 	for (i = 0; i < SK_TX_RING_CNT; i++) {
2718 		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2719 			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2720 			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2721 		}
2722 	}
2723 
2724 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2725 
2726 	return;
2727 }
2728