xref: /dragonfly/sys/dev/netif/sk/if_sk.c (revision 606a6e92)
1 /*	$OpenBSD: if_sk.c,v 1.33 2003/08/12 05:23:06 nate Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999, 2000
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_sk.c,v 1.19.2.9 2003/03/05 18:42:34 njl Exp $
35  * $DragonFly: src/sys/dev/netif/sk/if_sk.c,v 1.22 2004/12/26 06:12:04 dillon Exp $
36  *
37  * $FreeBSD: src/sys/pci/if_sk.c,v 1.19.2.9 2003/03/05 18:42:34 njl Exp $
38  */
39 
40 /*
41  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
42  *
43  * Permission to use, copy, modify, and distribute this software for any
44  * purpose with or without fee is hereby granted, provided that the above
45  * copyright notice and this permission notice appear in all copies.
46  *
47  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
54  */
55 
56 /*
57  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
58  * the SK-984x series adapters, both single port and dual port.
59  * References:
60  * 	The XaQti XMAC II datasheet,
61  *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
62  *	The SysKonnect GEnesis manual, http://www.syskonnect.com
63  *
64  * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
65  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
66  * convenience to others until Vitesse corrects this problem:
67  *
68  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
69  *
70  * Written by Bill Paul <wpaul@ee.columbia.edu>
71  * Department of Electrical Engineering
72  * Columbia University, New York City
73  */
74 
75 /*
76  * The SysKonnect gigabit ethernet adapters consist of two main
77  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
78  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
79  * components and a PHY while the GEnesis controller provides a PCI
80  * interface with DMA support. Each card may have between 512K and
81  * 2MB of SRAM on board depending on the configuration.
82  *
83  * The SysKonnect GEnesis controller can have either one or two XMAC
84  * chips connected to it, allowing single or dual port NIC configurations.
85  * SysKonnect has the distinction of being the only vendor on the market
86  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
87  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
88  * XMAC registers. This driver takes advantage of these features to allow
89  * both XMACs to operate as independent interfaces.
90  */
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/sockio.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kernel.h>
98 #include <sys/socket.h>
99 #include <sys/queue.h>
100 
101 #include <net/if.h>
102 #include <net/if_arp.h>
103 #include <net/ethernet.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 
107 #include <net/bpf.h>
108 
109 #include <vm/vm.h>              /* for vtophys */
110 #include <vm/pmap.h>            /* for vtophys */
111 #include <machine/clock.h>      /* for DELAY */
112 #include <machine/bus_pio.h>
113 #include <machine/bus_memio.h>
114 #include <machine/bus.h>
115 #include <machine/resource.h>
116 #include <sys/bus.h>
117 #include <sys/rman.h>
118 
119 #include "../mii_layer/mii.h"
120 #include "../mii_layer/miivar.h"
121 #include "../mii_layer/brgphyreg.h"
122 
123 #include <bus/pci/pcireg.h>
124 #include <bus/pci/pcivar.h>
125 
126 #if 0
127 #define SK_USEIOSPACE
128 #endif
129 
130 #include "if_skreg.h"
131 #include "xmaciireg.h"
132 #include "yukonreg.h"
133 
134 /* "controller miibus0" required.  See GENERIC if you get errors here. */
135 #include "miibus_if.h"
136 
137 static struct sk_type sk_devs[] = {
138 	{
139 		VENDORID_SK,
140 		DEVICEID_SK_V1,
141 		"SysKonnect Gigabit Ethernet (V1.0)"
142 	},
143 	{
144 		VENDORID_SK,
145 		DEVICEID_SK_V2,
146 		"SysKonnect Gigabit Ethernet (V2.0)"
147 	},
148 	{
149 		VENDORID_MARVELL,
150 		DEVICEID_SK_V2,
151 		"Marvell Gigabit Ethernet"
152 	},
153 	{
154 		VENDORID_3COM,
155 		DEVICEID_3COM_3C940,
156 		"3Com 3C940 Gigabit Ethernet"
157 	},
158 	{
159 		VENDORID_LINKSYS,
160 		DEVICEID_LINKSYS_EG1032,
161 		"Linksys EG1032 Gigabit Ethernet"
162 	},
163 	{
164 		VENDORID_DLINK,
165 		DEVICEID_DLINK_DGE530T,
166 		"D-Link DGE-530T Gigabit Ethernet"
167 	},
168 	{ 0, 0, NULL }
169 };
170 
171 static int skc_probe		(device_t);
172 static int skc_attach		(device_t);
173 static int skc_detach		(device_t);
174 static void skc_shutdown	(device_t);
175 static int sk_probe		(device_t);
176 static int sk_attach		(device_t);
177 static int sk_detach		(device_t);
178 static void sk_tick		(void *);
179 static void sk_intr		(void *);
180 static void sk_intr_bcom	(struct sk_if_softc *);
181 static void sk_intr_xmac	(struct sk_if_softc *);
182 static void sk_intr_yukon	(struct sk_if_softc *);
183 static void sk_rxeof		(struct sk_if_softc *);
184 static void sk_txeof		(struct sk_if_softc *);
185 static int sk_encap		(struct sk_if_softc *, struct mbuf *,
186 					u_int32_t *);
187 static void sk_start		(struct ifnet *);
188 static int sk_ioctl		(struct ifnet *, u_long, caddr_t,
189 					struct ucred *);
190 static void sk_init		(void *);
191 static void sk_init_xmac	(struct sk_if_softc *);
192 static void sk_init_yukon	(struct sk_if_softc *);
193 static void sk_stop		(struct sk_if_softc *);
194 static void sk_watchdog		(struct ifnet *);
195 static int sk_ifmedia_upd	(struct ifnet *);
196 static void sk_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
197 static void sk_reset		(struct sk_softc *);
198 static int sk_newbuf		(struct sk_if_softc *,
199 					struct sk_chain *, struct mbuf *);
200 static int sk_alloc_jumbo_mem	(struct sk_if_softc *);
201 static void *sk_jalloc		(struct sk_if_softc *);
202 static void sk_jfree		(caddr_t, u_int);
203 static void sk_jref		(caddr_t, u_int);
204 static int sk_init_rx_ring	(struct sk_if_softc *);
205 static void sk_init_tx_ring	(struct sk_if_softc *);
206 static u_int32_t sk_win_read_4	(struct sk_softc *, int);
207 static u_int16_t sk_win_read_2	(struct sk_softc *, int);
208 static u_int8_t sk_win_read_1	(struct sk_softc *, int);
209 static void sk_win_write_4	(struct sk_softc *, int, u_int32_t);
210 static void sk_win_write_2	(struct sk_softc *, int, u_int32_t);
211 static void sk_win_write_1	(struct sk_softc *, int, u_int32_t);
212 static u_int8_t sk_vpd_readbyte	(struct sk_softc *, int);
213 static void sk_vpd_read_res	(struct sk_softc *,
214 					struct vpd_res *, int);
215 static void sk_vpd_read		(struct sk_softc *);
216 
217 static int sk_miibus_readreg	(device_t, int, int);
218 static int sk_miibus_writereg	(device_t, int, int, int);
219 static void sk_miibus_statchg	(device_t);
220 
221 static int sk_xmac_miibus_readreg     (struct sk_if_softc *, int, int);
222 static int sk_xmac_miibus_writereg    (struct sk_if_softc *, int, int, int);
223 static void sk_xmac_miibus_statchg    (struct sk_if_softc *);
224 
225 static int sk_marv_miibus_readreg     (struct sk_if_softc *, int, int);
226 static int sk_marv_miibus_writereg    (struct sk_if_softc *, int, int, int);
227 static void sk_marv_miibus_statchg    (struct sk_if_softc *);
228 
229 static u_int32_t xmac_calchash	(caddr_t);
230 static u_int32_t gmac_calchash	(caddr_t);
231 static void sk_setfilt		(struct sk_if_softc *, caddr_t, int);
232 static void sk_setmulti		(struct sk_if_softc *);
233 static void sk_setpromisc	(struct sk_if_softc *);
234 
235 #ifdef SK_USEIOSPACE
236 #define SK_RES		SYS_RES_IOPORT
237 #define SK_RID		SK_PCI_LOIO
238 #else
239 #define SK_RES		SYS_RES_MEMORY
240 #define SK_RID		SK_PCI_LOMEM
241 #endif
242 
243 /*
244  * Note that we have newbus methods for both the GEnesis controller
245  * itself and the XMAC(s). The XMACs are children of the GEnesis, and
246  * the miibus code is a child of the XMACs. We need to do it this way
247  * so that the miibus drivers can access the PHY registers on the
248  * right PHY. It's not quite what I had in mind, but it's the only
249  * design that achieves the desired effect.
250  */
251 static device_method_t skc_methods[] = {
252 	/* Device interface */
253 	DEVMETHOD(device_probe,		skc_probe),
254 	DEVMETHOD(device_attach,	skc_attach),
255 	DEVMETHOD(device_detach,	skc_detach),
256 	DEVMETHOD(device_shutdown,	skc_shutdown),
257 
258 	/* bus interface */
259 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
260 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
261 
262 	{ 0, 0 }
263 };
264 
265 static driver_t skc_driver = {
266 	"skc",
267 	skc_methods,
268 	sizeof(struct sk_softc)
269 };
270 
271 static devclass_t skc_devclass;
272 
273 static device_method_t sk_methods[] = {
274 	/* Device interface */
275 	DEVMETHOD(device_probe,		sk_probe),
276 	DEVMETHOD(device_attach,	sk_attach),
277 	DEVMETHOD(device_detach,	sk_detach),
278 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
279 
280 	/* bus interface */
281 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
282 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
283 
284 	/* MII interface */
285 	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
286 	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
287 	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
288 
289 	{ 0, 0 }
290 };
291 
292 static driver_t sk_driver = {
293 	"sk",
294 	sk_methods,
295 	sizeof(struct sk_if_softc)
296 };
297 
298 static devclass_t sk_devclass;
299 
300 DECLARE_DUMMY_MODULE(if_sk);
301 DRIVER_MODULE(if_sk, pci, skc_driver, skc_devclass, 0, 0);
302 DRIVER_MODULE(if_sk, skc, sk_driver, sk_devclass, 0, 0);
303 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
304 
305 #define SK_SETBIT(sc, reg, x)		\
306 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
307 
308 #define SK_CLRBIT(sc, reg, x)		\
309 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
310 
311 #define SK_WIN_SETBIT_4(sc, reg, x)	\
312 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
313 
314 #define SK_WIN_CLRBIT_4(sc, reg, x)	\
315 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
316 
317 #define SK_WIN_SETBIT_2(sc, reg, x)	\
318 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
319 
320 #define SK_WIN_CLRBIT_2(sc, reg, x)	\
321 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
322 
323 static u_int32_t sk_win_read_4(sc, reg)
324 	struct sk_softc		*sc;
325 	int			reg;
326 {
327 #ifdef SK_USEIOSPACE
328 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
329 	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
330 #else
331 	return(CSR_READ_4(sc, reg));
332 #endif
333 }
334 
335 static u_int16_t sk_win_read_2(sc, reg)
336 	struct sk_softc		*sc;
337 	int			reg;
338 {
339 #ifdef SK_USEIOSPACE
340 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
341 	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
342 #else
343 	return(CSR_READ_2(sc, reg));
344 #endif
345 }
346 
347 static u_int8_t sk_win_read_1(sc, reg)
348 	struct sk_softc		*sc;
349 	int			reg;
350 {
351 #ifdef SK_USEIOSPACE
352 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
353 	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
354 #else
355 	return(CSR_READ_1(sc, reg));
356 #endif
357 }
358 
359 static void sk_win_write_4(sc, reg, val)
360 	struct sk_softc		*sc;
361 	int			reg;
362 	u_int32_t		val;
363 {
364 #ifdef SK_USEIOSPACE
365 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
366 	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
367 #else
368 	CSR_WRITE_4(sc, reg, val);
369 #endif
370 	return;
371 }
372 
373 static void sk_win_write_2(sc, reg, val)
374 	struct sk_softc		*sc;
375 	int			reg;
376 	u_int32_t		val;
377 {
378 #ifdef SK_USEIOSPACE
379 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
380 	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
381 #else
382 	CSR_WRITE_2(sc, reg, val);
383 #endif
384 	return;
385 }
386 
387 static void sk_win_write_1(sc, reg, val)
388 	struct sk_softc		*sc;
389 	int			reg;
390 	u_int32_t		val;
391 {
392 #ifdef SK_USEIOSPACE
393 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
394 	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
395 #else
396 	CSR_WRITE_1(sc, reg, val);
397 #endif
398 	return;
399 }
400 
401 /*
402  * The VPD EEPROM contains Vital Product Data, as suggested in
403  * the PCI 2.1 specification. The VPD data is separared into areas
404  * denoted by resource IDs. The SysKonnect VPD contains an ID string
405  * resource (the name of the adapter), a read-only area resource
406  * containing various key/data fields and a read/write area which
407  * can be used to store asset management information or log messages.
408  * We read the ID string and read-only into buffers attached to
409  * the controller softc structure for later use. At the moment,
410  * we only use the ID string during sk_attach().
411  */
412 static u_int8_t sk_vpd_readbyte(sc, addr)
413 	struct sk_softc		*sc;
414 	int			addr;
415 {
416 	int			i;
417 
418 	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
419 	for (i = 0; i < SK_TIMEOUT; i++) {
420 		DELAY(1);
421 		if (sk_win_read_2(sc,
422 		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
423 			break;
424 	}
425 
426 	if (i == SK_TIMEOUT)
427 		return(0);
428 
429 	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
430 }
431 
432 static void sk_vpd_read_res(sc, res, addr)
433 	struct sk_softc		*sc;
434 	struct vpd_res		*res;
435 	int			addr;
436 {
437 	int			i;
438 	u_int8_t		*ptr;
439 
440 	ptr = (u_int8_t *)res;
441 	for (i = 0; i < sizeof(struct vpd_res); i++)
442 		ptr[i] = sk_vpd_readbyte(sc, i + addr);
443 
444 	return;
445 }
446 
447 static void sk_vpd_read(sc)
448 	struct sk_softc		*sc;
449 {
450 	int			pos = 0, i;
451 	struct vpd_res		res;
452 
453 	if (sc->sk_vpd_prodname != NULL)
454 		free(sc->sk_vpd_prodname, M_DEVBUF);
455 	if (sc->sk_vpd_readonly != NULL)
456 		free(sc->sk_vpd_readonly, M_DEVBUF);
457 	sc->sk_vpd_prodname = NULL;
458 	sc->sk_vpd_readonly = NULL;
459 
460 	sk_vpd_read_res(sc, &res, pos);
461 
462 	if (res.vr_id != VPD_RES_ID) {
463 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
464 		    sc->sk_unit, VPD_RES_ID, res.vr_id);
465 		return;
466 	}
467 
468 	pos += sizeof(res);
469 	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT);
470 	for (i = 0; i < res.vr_len; i++)
471 		sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
472 	sc->sk_vpd_prodname[i] = '\0';
473 	pos += i;
474 
475 	sk_vpd_read_res(sc, &res, pos);
476 
477 	if (res.vr_id != VPD_RES_READ) {
478 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
479 		    sc->sk_unit, VPD_RES_READ, res.vr_id);
480 		return;
481 	}
482 
483 	pos += sizeof(res);
484 	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_INTWAIT);
485 	for (i = 0; i < res.vr_len + 1; i++)
486 		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
487 
488 	return;
489 }
490 
491 static int sk_miibus_readreg(dev, phy, reg)
492 	device_t		dev;
493 	int			phy, reg;
494 {
495 	struct sk_if_softc	*sc_if;
496 
497 	sc_if = device_get_softc(dev);
498 
499 	switch(sc_if->sk_softc->sk_type) {
500 	case SK_GENESIS:
501 		return(sk_xmac_miibus_readreg(sc_if, phy, reg));
502 	case SK_YUKON:
503 		return(sk_marv_miibus_readreg(sc_if, phy, reg));
504 	}
505 
506 	return(0);
507 }
508 
509 static int sk_miibus_writereg(dev, phy, reg, val)
510 	device_t		dev;
511 	int			phy, reg, val;
512 {
513 	struct sk_if_softc	*sc_if;
514 
515 	sc_if = device_get_softc(dev);
516 
517 	switch(sc_if->sk_softc->sk_type) {
518 	case SK_GENESIS:
519 		return(sk_xmac_miibus_writereg(sc_if, phy, reg, val));
520 	case SK_YUKON:
521 		return(sk_marv_miibus_writereg(sc_if, phy, reg, val));
522 	}
523 
524 	return(0);
525 }
526 
527 static void sk_miibus_statchg(dev)
528 	device_t		dev;
529 {
530 	struct sk_if_softc	*sc_if;
531 
532 	sc_if = device_get_softc(dev);
533 
534 	switch(sc_if->sk_softc->sk_type) {
535 	case SK_GENESIS:
536 		sk_xmac_miibus_statchg(sc_if);
537 		break;
538 	case SK_YUKON:
539 		sk_marv_miibus_statchg(sc_if);
540 		break;
541 	}
542 
543 	return;
544 }
545 
546 static int sk_xmac_miibus_readreg(sc_if, phy, reg)
547 	struct sk_if_softc	*sc_if;
548 	int			phy, reg;
549 {
550 	int			i;
551 
552 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
553 		return(0);
554 
555 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
556 	SK_XM_READ_2(sc_if, XM_PHY_DATA);
557 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
558 		for (i = 0; i < SK_TIMEOUT; i++) {
559 			DELAY(1);
560 			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
561 			    XM_MMUCMD_PHYDATARDY)
562 				break;
563 		}
564 
565 		if (i == SK_TIMEOUT) {
566 			printf("sk%d: phy failed to come ready\n",
567 			    sc_if->sk_unit);
568 			return(0);
569 		}
570 	}
571 	DELAY(1);
572 	return(SK_XM_READ_2(sc_if, XM_PHY_DATA));
573 }
574 
575 static int sk_xmac_miibus_writereg(sc_if, phy, reg, val)
576 	struct sk_if_softc	*sc_if;
577 	int			phy, reg, val;
578 {
579 	int			i;
580 
581 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
582 	for (i = 0; i < SK_TIMEOUT; i++) {
583 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
584 			break;
585 	}
586 
587 	if (i == SK_TIMEOUT) {
588 		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
589 		return(ETIMEDOUT);
590 	}
591 
592 	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
593 	for (i = 0; i < SK_TIMEOUT; i++) {
594 		DELAY(1);
595 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
596 			break;
597 	}
598 
599 	if (i == SK_TIMEOUT)
600 		printf("sk%d: phy write timed out\n", sc_if->sk_unit);
601 
602 	return(0);
603 }
604 
605 static void sk_xmac_miibus_statchg(sc_if)
606 	struct sk_if_softc	*sc_if;
607 {
608 	struct mii_data		*mii;
609 
610 	mii = device_get_softc(sc_if->sk_miibus);
611 
612 	/*
613 	 * If this is a GMII PHY, manually set the XMAC's
614 	 * duplex mode accordingly.
615 	 */
616 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
617 		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
618 			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
619 		} else {
620 			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
621 		}
622 	}
623 
624 	return;
625 }
626 
627 static int sk_marv_miibus_readreg(sc_if, phy, reg)
628 	struct sk_if_softc	*sc_if;
629 	int			phy, reg;
630 {
631 	u_int16_t		val;
632 	int			i;
633 
634 	if (phy != 0 ||
635 	    (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
636 	     sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
637 		return(0);
638 	}
639 
640         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
641 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
642 
643 	for (i = 0; i < SK_TIMEOUT; i++) {
644 		DELAY(1);
645 		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
646 		if (val & YU_SMICR_READ_VALID)
647 			break;
648 	}
649 
650 	if (i == SK_TIMEOUT) {
651 		printf("sk%d: phy failed to come ready\n",
652 		    sc_if->sk_unit);
653 		return(0);
654 	}
655 
656 	val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
657 
658 	return(val);
659 }
660 
661 static int sk_marv_miibus_writereg(sc_if, phy, reg, val)
662 	struct sk_if_softc	*sc_if;
663 	int			phy, reg, val;
664 {
665 	int			i;
666 
667 	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
668 	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
669 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
670 
671 	for (i = 0; i < SK_TIMEOUT; i++) {
672 		DELAY(1);
673 		if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
674 			break;
675 	}
676 
677 	return(0);
678 }
679 
680 static void sk_marv_miibus_statchg(sc_if)
681 	struct sk_if_softc	*sc_if;
682 {
683 	return;
684 }
685 
686 #define XMAC_POLY		0xEDB88320
687 #define GMAC_POLY               0x04C11DB7L
688 #define HASH_BITS		6
689 
690 static u_int32_t xmac_calchash(addr)
691 	caddr_t			addr;
692 {
693 	u_int32_t		idx, bit, data, crc;
694 
695 	/* Compute CRC for the address value. */
696 	crc = 0xFFFFFFFF; /* initial value */
697 
698 	for (idx = 0; idx < 6; idx++) {
699 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
700 			crc = (crc >> 1) ^ (((crc ^ data) & 1) ? XMAC_POLY : 0);
701 	}
702 
703 	return (~crc & ((1 << HASH_BITS) - 1));
704 }
705 
706 static u_int32_t gmac_calchash(addr)
707     caddr_t			addr;
708 {
709     u_int32_t               idx, bit, crc, tmpData, data;
710 
711     /* Compute CRC for the address value. */
712     crc = 0xFFFFFFFF; /* initial value */
713 
714     for (idx = 0; idx < 6; idx++) {
715         data = *addr++;
716 
717         /* Change bit order in byte. */
718         tmpData = data;
719         for (bit = 0; bit < 8; bit++) {
720             if (tmpData & 1) {
721                 data |=  1 << (7 - bit);
722             }
723             else {
724                 data &= ~(1 << (7 - bit));
725             }
726 
727             tmpData >>= 1;
728         }
729 
730         crc ^= (data << 24);
731         for (bit = 0; bit < 8; bit++) {
732             if (crc & 0x80000000) {
733                 crc = (crc << 1) ^ GMAC_POLY;
734             } else {
735                 crc <<= 1;
736             }
737         }
738     }
739 
740     return (crc & ((1 << HASH_BITS) - 1));
741 }
742 
743 static void sk_setfilt(sc_if, addr, slot)
744 	struct sk_if_softc	*sc_if;
745 	caddr_t			addr;
746 	int			slot;
747 {
748 	int			base;
749 
750 	base = XM_RXFILT_ENTRY(slot);
751 
752 	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
753 	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
754 	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
755 
756 	return;
757 }
758 
759 static void sk_setmulti(sc_if)
760 	struct sk_if_softc	*sc_if;
761 {
762 	struct sk_softc		*sc = sc_if->sk_softc;
763 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
764 	u_int32_t		hashes[2] = { 0, 0 };
765 	int			h, i;
766 	struct ifmultiaddr	*ifma;
767 	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
768 
769 
770 	/* First, zot all the existing filters. */
771 	switch(sc->sk_type) {
772 	case SK_GENESIS:
773 		for (i = 1; i < XM_RXFILT_MAX; i++)
774 			sk_setfilt(sc_if, (caddr_t)&dummy, i);
775 
776 		SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
777 		SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
778 		break;
779 	case SK_YUKON:
780 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
781 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
782 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
783 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
784 		break;
785 	}
786 
787 	/* Now program new ones. */
788 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
789 		hashes[0] = 0xFFFFFFFF;
790 		hashes[1] = 0xFFFFFFFF;
791 	} else {
792 		i = 1;
793 		/* First find the tail of the list. */
794 		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
795 					ifma = ifma->ifma_link.le_next) {
796 			if (ifma->ifma_link.le_next == NULL)
797 				break;
798 		}
799 		/* Now traverse the list backwards. */
800 		for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
801 			ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
802 			if (ifma->ifma_addr->sa_family != AF_LINK)
803 				continue;
804 			/*
805 			 * Program the first XM_RXFILT_MAX multicast groups
806 			 * into the perfect filter. For all others,
807 			 * use the hash table.
808 			 */
809 			if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
810 				sk_setfilt(sc_if,
811 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
812 				i++;
813 				continue;
814 			}
815 
816                         switch(sc->sk_type) {
817                         case SK_GENESIS:
818                             h = xmac_calchash(
819                                 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
820                             if (h < 32)
821                                 hashes[0] |= (1 << h);
822                             else
823                                 hashes[1] |= (1 << (h - 32));
824                             break;
825 
826                         case SK_YUKON:
827                             h = gmac_calchash(
828                                 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
829                             if (h < 32)
830                                 hashes[0] |= (1 << h);
831                             else
832                                 hashes[1] |= (1 << (h - 32));
833                             break;
834                         }
835 		}
836 	}
837 
838 	switch(sc->sk_type) {
839 	case SK_GENESIS:
840 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
841 			       XM_MODE_RX_USE_PERFECT);
842 		SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
843 		SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
844 		break;
845 	case SK_YUKON:
846 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
847 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
848 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
849 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
850 		break;
851 	}
852 
853 	return;
854 }
855 
856 static void sk_setpromisc(sc_if)
857 	struct sk_if_softc	*sc_if;
858 {
859 	struct sk_softc		*sc = sc_if->sk_softc;
860 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
861 
862 	switch(sc->sk_type) {
863 	case SK_GENESIS:
864 		if (ifp->if_flags & IFF_PROMISC) {
865 			SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
866 		} else {
867 			SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
868 		}
869 		break;
870 	case SK_YUKON:
871 		if (ifp->if_flags & IFF_PROMISC) {
872 			SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
873 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
874 		} else {
875 			SK_YU_SETBIT_2(sc_if, YUKON_RCR,
876 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
877 		}
878 		break;
879 	}
880 
881 	return;
882 }
883 
884 static int sk_init_rx_ring(sc_if)
885 	struct sk_if_softc	*sc_if;
886 {
887 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
888 	struct sk_ring_data	*rd = sc_if->sk_rdata;
889 	int			i;
890 
891 	bzero((char *)rd->sk_rx_ring,
892 	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
893 
894 	for (i = 0; i < SK_RX_RING_CNT; i++) {
895 		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
896 		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
897 			return(ENOBUFS);
898 		if (i == (SK_RX_RING_CNT - 1)) {
899 			cd->sk_rx_chain[i].sk_next =
900 			    &cd->sk_rx_chain[0];
901 			rd->sk_rx_ring[i].sk_next =
902 			    vtophys(&rd->sk_rx_ring[0]);
903 		} else {
904 			cd->sk_rx_chain[i].sk_next =
905 			    &cd->sk_rx_chain[i + 1];
906 			rd->sk_rx_ring[i].sk_next =
907 			    vtophys(&rd->sk_rx_ring[i + 1]);
908 		}
909 	}
910 
911 	sc_if->sk_cdata.sk_rx_prod = 0;
912 	sc_if->sk_cdata.sk_rx_cons = 0;
913 
914 	return(0);
915 }
916 
917 static void sk_init_tx_ring(sc_if)
918 	struct sk_if_softc	*sc_if;
919 {
920 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
921 	struct sk_ring_data	*rd = sc_if->sk_rdata;
922 	int			i;
923 
924 	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
925 	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
926 
927 	for (i = 0; i < SK_TX_RING_CNT; i++) {
928 		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
929 		if (i == (SK_TX_RING_CNT - 1)) {
930 			cd->sk_tx_chain[i].sk_next =
931 			    &cd->sk_tx_chain[0];
932 			rd->sk_tx_ring[i].sk_next =
933 			    vtophys(&rd->sk_tx_ring[0]);
934 		} else {
935 			cd->sk_tx_chain[i].sk_next =
936 			    &cd->sk_tx_chain[i + 1];
937 			rd->sk_tx_ring[i].sk_next =
938 			    vtophys(&rd->sk_tx_ring[i + 1]);
939 		}
940 	}
941 
942 	sc_if->sk_cdata.sk_tx_prod = 0;
943 	sc_if->sk_cdata.sk_tx_cons = 0;
944 	sc_if->sk_cdata.sk_tx_cnt = 0;
945 
946 	return;
947 }
948 
949 static int sk_newbuf(sc_if, c, m)
950 	struct sk_if_softc	*sc_if;
951 	struct sk_chain		*c;
952 	struct mbuf		*m;
953 {
954 	struct mbuf		*m_new = NULL;
955 	struct sk_rx_desc	*r;
956 
957 	if (m == NULL) {
958 		caddr_t			*buf = NULL;
959 
960 		MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
961 		if (m_new == NULL)
962 			return(ENOBUFS);
963 
964 		/* Allocate the jumbo buffer */
965 		buf = sk_jalloc(sc_if);
966 		if (buf == NULL) {
967 			m_freem(m_new);
968 #ifdef SK_VERBOSE
969 			printf("sk%d: jumbo allocation failed "
970 			    "-- packet dropped!\n", sc_if->sk_unit);
971 #endif
972 			return(ENOBUFS);
973 		}
974 
975 		/* Attach the buffer to the mbuf */
976 		m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
977 		m_new->m_flags |= M_EXT | M_EXT_OLD;
978 		m_new->m_ext.ext_size = m_new->m_pkthdr.len =
979 		    m_new->m_len = SK_MCLBYTES;
980 		m_new->m_ext.ext_nfree.old = sk_jfree;
981 		m_new->m_ext.ext_nref.old = sk_jref;
982 	} else {
983 		/*
984 	 	 * We're re-using a previously allocated mbuf;
985 		 * be sure to re-init pointers and lengths to
986 		 * default values.
987 		 */
988 		m_new = m;
989 		m_new->m_len = m_new->m_pkthdr.len = SK_MCLBYTES;
990 		m_new->m_data = m_new->m_ext.ext_buf;
991 	}
992 
993 	/*
994 	 * Adjust alignment so packet payload begins on a
995 	 * longword boundary. Mandatory for Alpha, useful on
996 	 * x86 too.
997 	 */
998 	m_adj(m_new, ETHER_ALIGN);
999 
1000 	r = c->sk_desc;
1001 	c->sk_mbuf = m_new;
1002 	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
1003 	r->sk_ctl = m_new->m_len | SK_RXSTAT;
1004 
1005 	return(0);
1006 }
1007 
1008 /*
1009  * Allocate jumbo buffer storage. The SysKonnect adapters support
1010  * "jumbograms" (9K frames), although SysKonnect doesn't currently
1011  * use them in their drivers. In order for us to use them, we need
1012  * large 9K receive buffers, however standard mbuf clusters are only
1013  * 2048 bytes in size. Consequently, we need to allocate and manage
1014  * our own jumbo buffer pool. Fortunately, this does not require an
1015  * excessive amount of additional code.
1016  */
1017 static int sk_alloc_jumbo_mem(sc_if)
1018 	struct sk_if_softc	*sc_if;
1019 {
1020 	caddr_t			ptr;
1021 	int		i;
1022 	struct sk_jpool_entry   *entry;
1023 
1024 	/* Grab a big chunk o' storage. */
1025 	sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
1026 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1027 
1028 	if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
1029 		printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
1030 		return(ENOBUFS);
1031 	}
1032 
1033 	SLIST_INIT(&sc_if->sk_jfree_listhead);
1034 	SLIST_INIT(&sc_if->sk_jinuse_listhead);
1035 
1036 	/*
1037 	 * Now divide it up into 9K pieces and save the addresses
1038 	 * in an array. Note that we play an evil trick here by using
1039 	 * the first few bytes in the buffer to hold the the address
1040 	 * of the softc structure for this interface. This is because
1041 	 * sk_jfree() needs it, but it is called by the mbuf management
1042 	 * code which will not pass it to us explicitly.
1043 	 */
1044 	ptr = sc_if->sk_cdata.sk_jumbo_buf;
1045 	for (i = 0; i < SK_JSLOTS; i++) {
1046 		u_int64_t		**aptr;
1047 		aptr = (u_int64_t **)ptr;
1048 		aptr[0] = (u_int64_t *)sc_if;
1049 		ptr += sizeof(u_int64_t);
1050 		sc_if->sk_cdata.sk_jslots[i].sk_buf = ptr;
1051 		sc_if->sk_cdata.sk_jslots[i].sk_inuse = 0;
1052 		ptr += SK_MCLBYTES;
1053 		entry = malloc(sizeof(struct sk_jpool_entry),
1054 		    M_DEVBUF, M_WAITOK);
1055 		if (entry == NULL) {
1056 			free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
1057 			sc_if->sk_cdata.sk_jumbo_buf = NULL;
1058 			printf("sk%d: no memory for jumbo "
1059 			    "buffer queue!\n", sc_if->sk_unit);
1060 			return(ENOBUFS);
1061 		}
1062 		entry->slot = i;
1063 		SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
1064 		    entry, jpool_entries);
1065 	}
1066 
1067 	return(0);
1068 }
1069 
1070 /*
1071  * Allocate a jumbo buffer.
1072  */
1073 static void *sk_jalloc(sc_if)
1074 	struct sk_if_softc	*sc_if;
1075 {
1076 	struct sk_jpool_entry   *entry;
1077 
1078 	entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1079 
1080 	if (entry == NULL) {
1081 #ifdef SK_VERBOSE
1082 		printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
1083 #endif
1084 		return(NULL);
1085 	}
1086 
1087 	SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1088 	SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
1089 	sc_if->sk_cdata.sk_jslots[entry->slot].sk_inuse = 1;
1090 	return(sc_if->sk_cdata.sk_jslots[entry->slot].sk_buf);
1091 }
1092 
1093 /*
1094  * Adjust usage count on a jumbo buffer. In general this doesn't
1095  * get used much because our jumbo buffers don't get passed around
1096  * a lot, but it's implemented for correctness.
1097  */
1098 static void sk_jref(buf, size)
1099 	caddr_t			buf;
1100 	u_int			size;
1101 {
1102 	struct sk_if_softc	*sc_if;
1103 	u_int64_t		**aptr;
1104 	int		i;
1105 
1106 	/* Extract the softc struct pointer. */
1107 	aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
1108 	sc_if = (struct sk_if_softc *)(aptr[0]);
1109 
1110 	if (sc_if == NULL)
1111 		panic("sk_jref: can't find softc pointer!");
1112 
1113 	if (size != SK_MCLBYTES)
1114 		panic("sk_jref: adjusting refcount of buf of wrong size!");
1115 
1116 	/* calculate the slot this buffer belongs to */
1117 
1118 	i = ((vm_offset_t)aptr
1119 	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1120 
1121 	if ((i < 0) || (i >= SK_JSLOTS))
1122 		panic("sk_jref: asked to reference buffer "
1123 		    "that we don't manage!");
1124 	else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
1125 		panic("sk_jref: buffer already free!");
1126 	else
1127 		sc_if->sk_cdata.sk_jslots[i].sk_inuse++;
1128 
1129 	return;
1130 }
1131 
1132 /*
1133  * Release a jumbo buffer.
1134  */
1135 static void sk_jfree(buf, size)
1136 	caddr_t			buf;
1137 	u_int			size;
1138 {
1139 	struct sk_if_softc	*sc_if;
1140 	u_int64_t		**aptr;
1141 	int		        i;
1142 	struct sk_jpool_entry   *entry;
1143 
1144 	/* Extract the softc struct pointer. */
1145 	aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
1146 	sc_if = (struct sk_if_softc *)(aptr[0]);
1147 
1148 	if (sc_if == NULL)
1149 		panic("sk_jfree: can't find softc pointer!");
1150 
1151 	if (size != SK_MCLBYTES)
1152 		panic("sk_jfree: freeing buffer of wrong size!");
1153 
1154 	/* calculate the slot this buffer belongs to */
1155 
1156 	i = ((vm_offset_t)aptr
1157 	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1158 
1159 	if ((i < 0) || (i >= SK_JSLOTS))
1160 		panic("sk_jfree: asked to free buffer that we don't manage!");
1161 	else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
1162 		panic("sk_jfree: buffer already free!");
1163 	else {
1164 		sc_if->sk_cdata.sk_jslots[i].sk_inuse--;
1165 		if(sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0) {
1166 			entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
1167 			if (entry == NULL)
1168 				panic("sk_jfree: buffer not in use!");
1169 			entry->slot = i;
1170 			SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead,
1171 					  jpool_entries);
1172 			SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
1173 					  entry, jpool_entries);
1174 		}
1175 	}
1176 
1177 	return;
1178 }
1179 
1180 /*
1181  * Set media options.
1182  */
1183 static int sk_ifmedia_upd(ifp)
1184 	struct ifnet		*ifp;
1185 {
1186 	struct sk_if_softc	*sc_if = ifp->if_softc;
1187 	struct mii_data		*mii;
1188 
1189 	mii = device_get_softc(sc_if->sk_miibus);
1190 	sk_init(sc_if);
1191 	mii_mediachg(mii);
1192 
1193 	return(0);
1194 }
1195 
1196 /*
1197  * Report current media status.
1198  */
1199 static void sk_ifmedia_sts(ifp, ifmr)
1200 	struct ifnet		*ifp;
1201 	struct ifmediareq	*ifmr;
1202 {
1203 	struct sk_if_softc	*sc_if;
1204 	struct mii_data		*mii;
1205 
1206 	sc_if = ifp->if_softc;
1207 	mii = device_get_softc(sc_if->sk_miibus);
1208 
1209 	mii_pollstat(mii);
1210 	ifmr->ifm_active = mii->mii_media_active;
1211 	ifmr->ifm_status = mii->mii_media_status;
1212 
1213 	return;
1214 }
1215 
1216 static int sk_ioctl(ifp, command, data, cr)
1217 	struct ifnet		*ifp;
1218 	u_long			command;
1219 	caddr_t			data;
1220 	struct ucred		*cr;
1221 {
1222 	struct sk_if_softc	*sc_if = ifp->if_softc;
1223 	struct ifreq		*ifr = (struct ifreq *) data;
1224 	int			s, error = 0;
1225 	struct mii_data		*mii;
1226 
1227 	s = splimp();
1228 
1229 	switch(command) {
1230 	case SIOCSIFADDR:
1231 	case SIOCGIFADDR:
1232 		error = ether_ioctl(ifp, command, data);
1233 		break;
1234 	case SIOCSIFMTU:
1235 		if (ifr->ifr_mtu > SK_JUMBO_MTU)
1236 			error = EINVAL;
1237 		else {
1238 			ifp->if_mtu = ifr->ifr_mtu;
1239 			sk_init(sc_if);
1240 		}
1241 		break;
1242 	case SIOCSIFFLAGS:
1243 		if (ifp->if_flags & IFF_UP) {
1244 			if (ifp->if_flags & IFF_RUNNING) {
1245 				if ((ifp->if_flags ^ sc_if->sk_if_flags)
1246 				    & IFF_PROMISC) {
1247 					sk_setpromisc(sc_if);
1248 					sk_setmulti(sc_if);
1249 				}
1250 			} else
1251 				sk_init(sc_if);
1252 		} else {
1253 			if (ifp->if_flags & IFF_RUNNING)
1254 				sk_stop(sc_if);
1255 		}
1256 		sc_if->sk_if_flags = ifp->if_flags;
1257 		error = 0;
1258 		break;
1259 	case SIOCADDMULTI:
1260 	case SIOCDELMULTI:
1261 		sk_setmulti(sc_if);
1262 		error = 0;
1263 		break;
1264 	case SIOCGIFMEDIA:
1265 	case SIOCSIFMEDIA:
1266 		mii = device_get_softc(sc_if->sk_miibus);
1267 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1268 		break;
1269 	default:
1270 		error = EINVAL;
1271 		break;
1272 	}
1273 
1274 	(void)splx(s);
1275 
1276 	return(error);
1277 }
1278 
1279 /*
1280  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1281  * IDs against our list and return a device name if we find a match.
1282  */
1283 static int skc_probe(dev)
1284 	device_t		dev;
1285 {
1286 	struct sk_softc		*sc;
1287 	struct sk_type		*t = sk_devs;
1288 
1289 	sc = device_get_softc(dev);
1290 
1291 	while(t->sk_name != NULL) {
1292 		if ((pci_get_vendor(dev) == t->sk_vid) &&
1293 		    (pci_get_device(dev) == t->sk_did)) {
1294 			device_set_desc(dev, t->sk_name);
1295 			return(0);
1296 		}
1297 		t++;
1298 	}
1299 
1300 	return(ENXIO);
1301 }
1302 
1303 /*
1304  * Force the GEnesis into reset, then bring it out of reset.
1305  */
1306 static void sk_reset(sc)
1307 	struct sk_softc		*sc;
1308 {
1309 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1310 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1311 	if (sc->sk_type == SK_YUKON)
1312 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1313 
1314 	DELAY(1000);
1315 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1316 	DELAY(2);
1317 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1318 	if (sc->sk_type == SK_YUKON)
1319 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1320 
1321 	if (sc->sk_type == SK_GENESIS) {
1322 		/* Configure packet arbiter */
1323 		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1324 		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1325 		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1326 		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1327 		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1328 	}
1329 
1330 	/* Enable RAM interface */
1331 	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1332 
1333 	/*
1334          * Configure interrupt moderation. The moderation timer
1335 	 * defers interrupts specified in the interrupt moderation
1336 	 * timer mask based on the timeout specified in the interrupt
1337 	 * moderation timer init register. Each bit in the timer
1338 	 * register represents 18.825ns, so to specify a timeout in
1339 	 * microseconds, we have to multiply by 54.
1340 	 */
1341         sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
1342         sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1343 	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1344         sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1345 
1346 	return;
1347 }
1348 
1349 static int sk_probe(dev)
1350 	device_t		dev;
1351 {
1352 	struct sk_softc		*sc;
1353 
1354 	sc = device_get_softc(device_get_parent(dev));
1355 
1356 	/*
1357 	 * Not much to do here. We always know there will be
1358 	 * at least one XMAC present, and if there are two,
1359 	 * skc_attach() will create a second device instance
1360 	 * for us.
1361 	 */
1362 	switch (sc->sk_type) {
1363 	case SK_GENESIS:
1364 		device_set_desc(dev, "XaQti Corp. XMAC II");
1365 		break;
1366 	case SK_YUKON:
1367 		device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1368 		break;
1369 	}
1370 
1371 	return(0);
1372 }
1373 
1374 /*
1375  * Each XMAC chip is attached as a separate logical IP interface.
1376  * Single port cards will have only one logical interface of course.
1377  */
1378 static int sk_attach(dev)
1379 	device_t		dev;
1380 {
1381 	struct sk_softc		*sc;
1382 	struct sk_if_softc	*sc_if;
1383 	struct ifnet		*ifp;
1384 	int			i, port;
1385 
1386 	if (dev == NULL)
1387 		return(EINVAL);
1388 
1389 	sc_if = device_get_softc(dev);
1390 	sc = device_get_softc(device_get_parent(dev));
1391 	port = *(int *)device_get_ivars(dev);
1392 	free(device_get_ivars(dev), M_DEVBUF);
1393 	device_set_ivars(dev, NULL);
1394 	sc_if->sk_dev = dev;
1395 	callout_init(&sc_if->sk_tick_timer);
1396 
1397 	bzero((char *)sc_if, sizeof(struct sk_if_softc));
1398 
1399 	sc_if->sk_dev = dev;
1400 	sc_if->sk_unit = device_get_unit(dev);
1401 	sc_if->sk_port = port;
1402 	sc_if->sk_softc = sc;
1403 	sc->sk_if[port] = sc_if;
1404 	if (port == SK_PORT_A)
1405 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1406 	if (port == SK_PORT_B)
1407 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1408 
1409 	/*
1410 	 * Get station address for this interface. Note that
1411 	 * dual port cards actually come with three station
1412 	 * addresses: one for each port, plus an extra. The
1413 	 * extra one is used by the SysKonnect driver software
1414 	 * as a 'virtual' station address for when both ports
1415 	 * are operating in failover mode. Currently we don't
1416 	 * use this extra address.
1417 	 */
1418 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1419 		sc_if->arpcom.ac_enaddr[i] =
1420 		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1421 
1422 	/*
1423 	 * Set up RAM buffer addresses. The NIC will have a certain
1424 	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1425 	 * need to divide this up a) between the transmitter and
1426  	 * receiver and b) between the two XMACs, if this is a
1427 	 * dual port NIC. Our algotithm is to divide up the memory
1428 	 * evenly so that everyone gets a fair share.
1429 	 */
1430 	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1431 		u_int32_t		chunk, val;
1432 
1433 		chunk = sc->sk_ramsize / 2;
1434 		val = sc->sk_rboff / sizeof(u_int64_t);
1435 		sc_if->sk_rx_ramstart = val;
1436 		val += (chunk / sizeof(u_int64_t));
1437 		sc_if->sk_rx_ramend = val - 1;
1438 		sc_if->sk_tx_ramstart = val;
1439 		val += (chunk / sizeof(u_int64_t));
1440 		sc_if->sk_tx_ramend = val - 1;
1441 	} else {
1442 		u_int32_t		chunk, val;
1443 
1444 		chunk = sc->sk_ramsize / 4;
1445 		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1446 		    sizeof(u_int64_t);
1447 		sc_if->sk_rx_ramstart = val;
1448 		val += (chunk / sizeof(u_int64_t));
1449 		sc_if->sk_rx_ramend = val - 1;
1450 		sc_if->sk_tx_ramstart = val;
1451 		val += (chunk / sizeof(u_int64_t));
1452 		sc_if->sk_tx_ramend = val - 1;
1453 	}
1454 
1455 	/* Read and save PHY type and set PHY address */
1456 	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1457 	switch(sc_if->sk_phytype) {
1458 	case SK_PHYTYPE_XMAC:
1459 		sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1460 		break;
1461 	case SK_PHYTYPE_BCOM:
1462 		sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1463 		break;
1464 	case SK_PHYTYPE_MARV_COPPER:
1465 		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1466 		break;
1467 	default:
1468 		printf("skc%d: unsupported PHY type: %d\n",
1469 		    sc->sk_unit, sc_if->sk_phytype);
1470 		return(ENODEV);
1471 	}
1472 
1473 	/* Allocate the descriptor queues. */
1474 	sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1475 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1476 
1477 	if (sc_if->sk_rdata == NULL) {
1478 		printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1479 		sc->sk_if[port] = NULL;
1480 		return(ENOMEM);
1481 	}
1482 
1483 	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1484 
1485 	/* Try to allocate memory for jumbo buffers. */
1486 	if (sk_alloc_jumbo_mem(sc_if)) {
1487 		printf("sk%d: jumbo buffer allocation failed\n",
1488 		    sc_if->sk_unit);
1489 		contigfree(sc_if->sk_rdata,
1490 		    sizeof(struct sk_ring_data), M_DEVBUF);
1491 		sc->sk_if[port] = NULL;
1492 		return(ENOMEM);
1493 	}
1494 
1495 	ifp = &sc_if->arpcom.ac_if;
1496 	ifp->if_softc = sc_if;
1497 	if_initname(ifp, "sk", sc_if->sk_unit);
1498 	ifp->if_mtu = ETHERMTU;
1499 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1500 	ifp->if_ioctl = sk_ioctl;
1501 	ifp->if_start = sk_start;
1502 	ifp->if_watchdog = sk_watchdog;
1503 	ifp->if_init = sk_init;
1504 	ifp->if_baudrate = 1000000000;
1505 	ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1506 
1507 	/*
1508 	 * Do miibus setup.
1509 	 */
1510 	switch (sc->sk_type) {
1511 	case SK_GENESIS:
1512 		sk_init_xmac(sc_if);
1513 		break;
1514 	case SK_YUKON:
1515 		sk_init_yukon(sc_if);
1516 		break;
1517 	}
1518 
1519 	if (mii_phy_probe(dev, &sc_if->sk_miibus,
1520 	    sk_ifmedia_upd, sk_ifmedia_sts)) {
1521 		printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1522 		contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM,
1523 		    M_DEVBUF);
1524 		contigfree(sc_if->sk_rdata,
1525 		    sizeof(struct sk_ring_data), M_DEVBUF);
1526 		return(ENXIO);
1527 	}
1528 
1529 	/*
1530 	 * Call MI attach routine.
1531 	 */
1532 	ether_ifattach(ifp, sc_if->arpcom.ac_enaddr);
1533 	callout_init(&sc_if->sk_tick_timer);
1534 
1535 	return(0);
1536 }
1537 
1538 /*
1539  * Attach the interface. Allocate softc structures, do ifmedia
1540  * setup and ethernet/BPF attach.
1541  */
1542 static int skc_attach(dev)
1543 	device_t		dev;
1544 {
1545 	int			s;
1546 	u_int32_t		command;
1547 	struct sk_softc		*sc;
1548 	int			unit, error = 0, rid, *port;
1549 	uint8_t			skrs;
1550 
1551 	s = splimp();
1552 
1553 	sc = device_get_softc(dev);
1554 	unit = device_get_unit(dev);
1555 	bzero(sc, sizeof(struct sk_softc));
1556 	switch (pci_get_device(dev)) {
1557 	case DEVICEID_SK_V1:
1558 		sc->sk_type = SK_GENESIS;
1559 		break;
1560 	case DEVICEID_SK_V2:
1561 	case DEVICEID_3COM_3C940:
1562 	case DEVICEID_LINKSYS_EG1032:
1563 	case DEVICEID_DLINK_DGE530T:
1564 		sc->sk_type = SK_YUKON;
1565 		break;
1566 	}
1567 
1568 	/*
1569 	 * Handle power management nonsense.
1570 	 */
1571 	command = pci_read_config(dev, SK_PCI_CAPID, 4) & 0x000000FF;
1572 	if (command == 0x01) {
1573 		command = pci_read_config(dev, SK_PCI_PWRMGMTCTRL, 4);
1574 		if (command & SK_PSTATE_MASK) {
1575 			u_int32_t		iobase, membase, irq;
1576 
1577 			/* Save important PCI config data. */
1578 			iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1579 			membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1580 			irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1581 
1582 			/* Reset the power state. */
1583 			printf("skc%d: chip is in D%d power mode "
1584 			"-- setting to D0\n", unit, command & SK_PSTATE_MASK);
1585 			command &= 0xFFFFFFFC;
1586 			pci_write_config(dev, SK_PCI_PWRMGMTCTRL, command, 4);
1587 
1588 			/* Restore PCI config data. */
1589 			pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1590 			pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1591 			pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1592 		}
1593 	}
1594 
1595 	/*
1596 	 * Map control/status registers.
1597 	 */
1598 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1599 	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1600 	pci_write_config(dev, PCIR_COMMAND, command, 4);
1601 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1602 
1603 #ifdef SK_USEIOSPACE
1604 	if (!(command & PCIM_CMD_PORTEN)) {
1605 		printf("skc%d: failed to enable I/O ports!\n", unit);
1606 		error = ENXIO;
1607 		goto fail;
1608 	}
1609 #else
1610 	if (!(command & PCIM_CMD_MEMEN)) {
1611 		printf("skc%d: failed to enable memory mapping!\n", unit);
1612 		error = ENXIO;
1613 		goto fail;
1614 	}
1615 #endif
1616 
1617 	rid = SK_RID;
1618 	sc->sk_res = bus_alloc_resource(dev, SK_RES, &rid,
1619 	    0, ~0, 1, RF_ACTIVE);
1620 
1621 	if (sc->sk_res == NULL) {
1622 		printf("sk%d: couldn't map ports/memory\n", unit);
1623 		error = ENXIO;
1624 		goto fail;
1625 	}
1626 
1627 	sc->sk_btag = rman_get_bustag(sc->sk_res);
1628 	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1629 
1630 	/* Allocate interrupt */
1631 	rid = 0;
1632 	sc->sk_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1633 	    RF_SHAREABLE | RF_ACTIVE);
1634 
1635 	if (sc->sk_irq == NULL) {
1636 		printf("skc%d: couldn't map interrupt\n", unit);
1637 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1638 		error = ENXIO;
1639 		goto fail;
1640 	}
1641 
1642 	error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET,
1643 	    sk_intr, sc, &sc->sk_intrhand);
1644 
1645 	if (error) {
1646 		printf("skc%d: couldn't set up irq\n", unit);
1647 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1648 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1649 		goto fail;
1650 	}
1651 
1652 	/* Reset the adapter. */
1653 	sk_reset(sc);
1654 
1655 	sc->sk_unit = unit;
1656 
1657 	/* Read and save vital product data from EEPROM. */
1658 	sk_vpd_read(sc);
1659 
1660 	skrs = sk_win_read_1(sc, SK_EPROM0);
1661 	if (sc->sk_type == SK_GENESIS) {
1662 		/* Read and save RAM size and RAMbuffer offset */
1663 		switch(skrs) {
1664 		case SK_RAMSIZE_512K_64:
1665 			sc->sk_ramsize = 0x80000;
1666 			sc->sk_rboff = SK_RBOFF_0;
1667 			break;
1668 		case SK_RAMSIZE_1024K_64:
1669 			sc->sk_ramsize = 0x100000;
1670 			sc->sk_rboff = SK_RBOFF_80000;
1671 			break;
1672 		case SK_RAMSIZE_1024K_128:
1673 			sc->sk_ramsize = 0x100000;
1674 			sc->sk_rboff = SK_RBOFF_0;
1675 			break;
1676 		case SK_RAMSIZE_2048K_128:
1677 			sc->sk_ramsize = 0x200000;
1678 			sc->sk_rboff = SK_RBOFF_0;
1679 			break;
1680 		default:
1681 			printf("skc%d: unknown ram size: %d\n",
1682 			    sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1683 			bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1684 			bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1685 			bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1686 			error = ENXIO;
1687 			goto fail;
1688 			break;
1689 		}
1690 	} else { /* SK_YUKON */
1691 		if (skrs == 0x00) {
1692 			sc->sk_ramsize = 0x20000;
1693 		} else {
1694 			sc->sk_ramsize = skrs * (1<<12);
1695 		}
1696 		sc->sk_rboff = SK_RBOFF_0;
1697 	}
1698 
1699 	/* Read and save physical media type */
1700 	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1701 	case SK_PMD_1000BASESX:
1702 		sc->sk_pmd = IFM_1000_SX;
1703 		break;
1704 	case SK_PMD_1000BASELX:
1705 		sc->sk_pmd = IFM_1000_LX;
1706 		break;
1707 	case SK_PMD_1000BASECX:
1708 		sc->sk_pmd = IFM_1000_CX;
1709 		break;
1710 	case SK_PMD_1000BASETX:
1711 		sc->sk_pmd = IFM_1000_TX;
1712 		break;
1713 	default:
1714 		printf("skc%d: unknown media type: 0x%x\n",
1715 		    sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1716 		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1717 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1718 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1719 		error = ENXIO;
1720 		goto fail;
1721 	}
1722 
1723 	/* Announce the product name. */
1724 	printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1725 	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1726 	port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1727 	*port = SK_PORT_A;
1728 	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1729 
1730 	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1731 		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1732 		port = malloc(sizeof(int), M_DEVBUF, M_WAITOK);
1733 		*port = SK_PORT_B;
1734 		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1735 	}
1736 
1737 	/* Turn on the 'driver is loaded' LED. */
1738 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1739 
1740 	bus_generic_attach(dev);
1741 
1742 fail:
1743 	splx(s);
1744 	return(error);
1745 }
1746 
1747 static int sk_detach(dev)
1748 	device_t		dev;
1749 {
1750 	struct sk_softc		*sc;
1751 	struct sk_if_softc	*sc_if;
1752 	struct ifnet		*ifp;
1753 	int			s;
1754 
1755 	s = splimp();
1756 
1757 	sc = device_get_softc(device_get_parent(dev));
1758 	sc_if = device_get_softc(dev);
1759 	ifp = &sc_if->arpcom.ac_if;
1760 	sk_stop(sc_if);
1761 	ether_ifdetach(ifp);
1762 	bus_generic_detach(dev);
1763 	if (sc_if->sk_miibus != NULL)
1764 		device_delete_child(dev, sc_if->sk_miibus);
1765 	contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1766 	contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF);
1767 
1768 	return(0);
1769 }
1770 
1771 static int skc_detach(dev)
1772 	device_t		dev;
1773 {
1774 	struct sk_softc		*sc;
1775 	int			s;
1776 
1777 	s = splimp();
1778 
1779 	sc = device_get_softc(dev);
1780 
1781 	bus_generic_detach(dev);
1782 	if (sc->sk_devs[SK_PORT_A] != NULL)
1783 		device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1784 	if (sc->sk_devs[SK_PORT_B] != NULL)
1785 		device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1786 
1787 	bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1788 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1789 	bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1790 
1791 	splx(s);
1792 
1793 	return(0);
1794 }
1795 
1796 static int sk_encap(sc_if, m_head, txidx)
1797         struct sk_if_softc	*sc_if;
1798         struct mbuf		*m_head;
1799         u_int32_t		*txidx;
1800 {
1801 	struct sk_tx_desc	*f = NULL;
1802 	struct mbuf		*m;
1803 	u_int32_t		frag, cur, cnt = 0;
1804 
1805 	m = m_head;
1806 	cur = frag = *txidx;
1807 
1808 	/*
1809 	 * Start packing the mbufs in this chain into
1810 	 * the fragment pointers. Stop when we run out
1811 	 * of fragments or hit the end of the mbuf chain.
1812 	 */
1813 	for (m = m_head; m != NULL; m = m->m_next) {
1814 		if (m->m_len != 0) {
1815 			if ((SK_TX_RING_CNT -
1816 			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1817 				return(ENOBUFS);
1818 			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1819 			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1820 			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1821 			if (cnt == 0)
1822 				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1823 			else
1824 				f->sk_ctl |= SK_TXCTL_OWN;
1825 			cur = frag;
1826 			SK_INC(frag, SK_TX_RING_CNT);
1827 			cnt++;
1828 		}
1829 	}
1830 
1831 	if (m != NULL)
1832 		return(ENOBUFS);
1833 
1834 	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1835 		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1836 	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1837 	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1838 	sc_if->sk_cdata.sk_tx_cnt += cnt;
1839 
1840 	*txidx = frag;
1841 
1842 	return(0);
1843 }
1844 
1845 static void sk_start(ifp)
1846 	struct ifnet		*ifp;
1847 {
1848         struct sk_softc		*sc;
1849         struct sk_if_softc	*sc_if;
1850         struct mbuf		*m_head = NULL;
1851         u_int32_t		idx;
1852 
1853 	sc_if = ifp->if_softc;
1854 	sc = sc_if->sk_softc;
1855 
1856 	idx = sc_if->sk_cdata.sk_tx_prod;
1857 
1858 	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1859 		IF_DEQUEUE(&ifp->if_snd, m_head);
1860 		if (m_head == NULL)
1861 			break;
1862 
1863 		/*
1864 		 * Pack the data into the transmit ring. If we
1865 		 * don't have room, set the OACTIVE flag and wait
1866 		 * for the NIC to drain the ring.
1867 		 */
1868 		if (sk_encap(sc_if, m_head, &idx)) {
1869 			IF_PREPEND(&ifp->if_snd, m_head);
1870 			ifp->if_flags |= IFF_OACTIVE;
1871 			break;
1872 		}
1873 
1874 		/*
1875 		 * If there's a BPF listener, bounce a copy of this frame
1876 		 * to him.
1877 		 */
1878 		if (ifp->if_bpf)
1879 			bpf_mtap(ifp, m_head);
1880 	}
1881 
1882 	/* Transmit */
1883 	sc_if->sk_cdata.sk_tx_prod = idx;
1884 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1885 
1886 	/* Set a timeout in case the chip goes out to lunch. */
1887 	ifp->if_timer = 5;
1888 
1889 	return;
1890 }
1891 
1892 
1893 static void sk_watchdog(ifp)
1894 	struct ifnet		*ifp;
1895 {
1896 	struct sk_if_softc	*sc_if;
1897 
1898 	sc_if = ifp->if_softc;
1899 
1900 	printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1901 	sk_init(sc_if);
1902 
1903 	return;
1904 }
1905 
1906 static void skc_shutdown(dev)
1907 	device_t		dev;
1908 {
1909 	struct sk_softc		*sc;
1910 
1911 	sc = device_get_softc(dev);
1912 
1913 	/* Turn off the 'driver is loaded' LED. */
1914 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1915 
1916 	/*
1917 	 * Reset the GEnesis controller. Doing this should also
1918 	 * assert the resets on the attached XMAC(s).
1919 	 */
1920 	sk_reset(sc);
1921 
1922 	return;
1923 }
1924 
1925 static void sk_rxeof(sc_if)
1926 	struct sk_if_softc	*sc_if;
1927 {
1928 	struct mbuf		*m;
1929 	struct ifnet		*ifp;
1930 	struct sk_chain		*cur_rx;
1931 	int			total_len = 0;
1932 	int			i;
1933 	u_int32_t		rxstat;
1934 
1935 	ifp = &sc_if->arpcom.ac_if;
1936 	i = sc_if->sk_cdata.sk_rx_prod;
1937 	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1938 
1939 	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1940 
1941 		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1942 		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1943 		m = cur_rx->sk_mbuf;
1944 		cur_rx->sk_mbuf = NULL;
1945 		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1946 		SK_INC(i, SK_RX_RING_CNT);
1947 
1948 		if (rxstat & XM_RXSTAT_ERRFRAME) {
1949 			ifp->if_ierrors++;
1950 			sk_newbuf(sc_if, cur_rx, m);
1951 			continue;
1952 		}
1953 
1954 		/*
1955 		 * Try to allocate a new jumbo buffer. If that
1956 		 * fails, copy the packet to mbufs and put the
1957 		 * jumbo buffer back in the ring so it can be
1958 		 * re-used. If allocating mbufs fails, then we
1959 		 * have to drop the packet.
1960 		 */
1961 		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1962 			struct mbuf		*m0;
1963 			m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1964 			    total_len + ETHER_ALIGN, 0, ifp, NULL);
1965 			sk_newbuf(sc_if, cur_rx, m);
1966 			if (m0 == NULL) {
1967 				printf("sk%d: no receive buffers "
1968 				    "available -- packet dropped!\n",
1969 				    sc_if->sk_unit);
1970 				ifp->if_ierrors++;
1971 				continue;
1972 			}
1973 			m_adj(m0, ETHER_ALIGN);
1974 			m = m0;
1975 		} else {
1976 			m->m_pkthdr.rcvif = ifp;
1977 			m->m_pkthdr.len = m->m_len = total_len;
1978 		}
1979 
1980 		ifp->if_ipackets++;
1981 		(*ifp->if_input)(ifp, m);
1982 	}
1983 
1984 	sc_if->sk_cdata.sk_rx_prod = i;
1985 
1986 	return;
1987 }
1988 
1989 static void sk_txeof(sc_if)
1990 	struct sk_if_softc	*sc_if;
1991 {
1992 	struct sk_tx_desc	*cur_tx = NULL;
1993 	struct ifnet		*ifp;
1994 	u_int32_t		idx;
1995 
1996 	ifp = &sc_if->arpcom.ac_if;
1997 
1998 	/*
1999 	 * Go through our tx ring and free mbufs for those
2000 	 * frames that have been sent.
2001 	 */
2002 	idx = sc_if->sk_cdata.sk_tx_cons;
2003 	while(idx != sc_if->sk_cdata.sk_tx_prod) {
2004 		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
2005 		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
2006 			break;
2007 		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
2008 			ifp->if_opackets++;
2009 		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
2010 			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
2011 			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
2012 		}
2013 		sc_if->sk_cdata.sk_tx_cnt--;
2014 		SK_INC(idx, SK_TX_RING_CNT);
2015 		ifp->if_timer = 0;
2016 	}
2017 
2018 	sc_if->sk_cdata.sk_tx_cons = idx;
2019 
2020 	if (cur_tx != NULL)
2021 		ifp->if_flags &= ~IFF_OACTIVE;
2022 
2023 	return;
2024 }
2025 
2026 static void sk_tick(xsc_if)
2027 	void			*xsc_if;
2028 {
2029 	struct sk_if_softc	*sc_if;
2030 	struct mii_data		*mii;
2031 	struct ifnet		*ifp;
2032 	int			i;
2033 
2034 	sc_if = xsc_if;
2035 	ifp = &sc_if->arpcom.ac_if;
2036 	mii = device_get_softc(sc_if->sk_miibus);
2037 
2038 	if (!(ifp->if_flags & IFF_UP))
2039 		return;
2040 
2041 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2042 		sk_intr_bcom(sc_if);
2043 		return;
2044 	}
2045 
2046 	/*
2047 	 * According to SysKonnect, the correct way to verify that
2048 	 * the link has come back up is to poll bit 0 of the GPIO
2049 	 * register three times. This pin has the signal from the
2050 	 * link_sync pin connected to it; if we read the same link
2051 	 * state 3 times in a row, we know the link is up.
2052 	 */
2053 	for (i = 0; i < 3; i++) {
2054 		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2055 			break;
2056 	}
2057 
2058 	if (i != 3) {
2059 		callout_reset(&sc_if->sk_tick_timer, hz, sk_tick, sc_if);
2060 		return;
2061 	}
2062 
2063 	/* Turn the GP0 interrupt back on. */
2064 	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2065 	SK_XM_READ_2(sc_if, XM_ISR);
2066 	mii_tick(mii);
2067 	mii_pollstat(mii);
2068 	callout_stop(&sc_if->sk_tick_timer);
2069 
2070 	return;
2071 }
2072 
2073 static void sk_intr_bcom(sc_if)
2074 	struct sk_if_softc	*sc_if;
2075 {
2076 	struct sk_softc		*sc;
2077 	struct mii_data		*mii;
2078 	struct ifnet		*ifp;
2079 	int			status;
2080 
2081 	sc = sc_if->sk_softc;
2082 	mii = device_get_softc(sc_if->sk_miibus);
2083 	ifp = &sc_if->arpcom.ac_if;
2084 
2085 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2086 
2087 	/*
2088 	 * Read the PHY interrupt register to make sure
2089 	 * we clear any pending interrupts.
2090 	 */
2091 	status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2092 
2093 	if (!(ifp->if_flags & IFF_RUNNING)) {
2094 		sk_init_xmac(sc_if);
2095 		return;
2096 	}
2097 
2098 	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2099 		int			lstat;
2100 		lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2101 		    BRGPHY_MII_AUXSTS);
2102 
2103 		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2104 			mii_mediachg(mii);
2105 			/* Turn off the link LED. */
2106 			SK_IF_WRITE_1(sc_if, 0,
2107 			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
2108 			sc_if->sk_link = 0;
2109 		} else if (status & BRGPHY_ISR_LNK_CHG) {
2110 			sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2111 	    		    BRGPHY_MII_IMR, 0xFF00);
2112 			mii_tick(mii);
2113 			sc_if->sk_link = 1;
2114 			/* Turn on the link LED. */
2115 			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2116 			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2117 			    SK_LINKLED_BLINK_OFF);
2118 			mii_pollstat(mii);
2119 		} else {
2120 			mii_tick(mii);
2121 			callout_reset(&sc_if->sk_tick_timer, hz,
2122 				      sk_tick, sc_if);
2123 		}
2124 	}
2125 
2126 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2127 
2128 	return;
2129 }
2130 
2131 static void sk_intr_xmac(sc_if)
2132 	struct sk_if_softc	*sc_if;
2133 {
2134 	struct sk_softc		*sc;
2135 	u_int16_t		status;
2136 	struct mii_data		*mii;
2137 
2138 	sc = sc_if->sk_softc;
2139 	mii = device_get_softc(sc_if->sk_miibus);
2140 	status = SK_XM_READ_2(sc_if, XM_ISR);
2141 
2142 	/*
2143 	 * Link has gone down. Start MII tick timeout to
2144 	 * watch for link resync.
2145 	 */
2146 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2147 		if (status & XM_ISR_GP0_SET) {
2148 			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2149 			callout_reset(&sc_if->sk_tick_timer, hz,
2150 				      sk_tick, sc_if);
2151 		}
2152 
2153 		if (status & XM_ISR_AUTONEG_DONE) {
2154 			callout_reset(&sc_if->sk_tick_timer, hz,
2155 				      sk_tick, sc_if);
2156 		}
2157 	}
2158 
2159 	if (status & XM_IMR_TX_UNDERRUN)
2160 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2161 
2162 	if (status & XM_IMR_RX_OVERRUN)
2163 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2164 
2165 	status = SK_XM_READ_2(sc_if, XM_ISR);
2166 
2167 	return;
2168 }
2169 
2170 static void sk_intr_yukon(sc_if)
2171 	struct sk_if_softc	*sc_if;
2172 {
2173 	int status;
2174 
2175 	status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2176 
2177 	return;
2178 }
2179 
2180 static void sk_intr(xsc)
2181 	void			*xsc;
2182 {
2183 	struct sk_softc		*sc = xsc;
2184 	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
2185 	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2186 	u_int32_t		status;
2187 
2188 	sc_if0 = sc->sk_if[SK_PORT_A];
2189 	sc_if1 = sc->sk_if[SK_PORT_B];
2190 
2191 	if (sc_if0 != NULL)
2192 		ifp0 = &sc_if0->arpcom.ac_if;
2193 	if (sc_if1 != NULL)
2194 		ifp1 = &sc_if1->arpcom.ac_if;
2195 
2196 	for (;;) {
2197 		status = CSR_READ_4(sc, SK_ISSR);
2198 		if (!(status & sc->sk_intrmask))
2199 			break;
2200 
2201 		/* Handle receive interrupts first. */
2202 		if (status & SK_ISR_RX1_EOF) {
2203 			sk_rxeof(sc_if0);
2204 			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2205 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2206 		}
2207 		if (status & SK_ISR_RX2_EOF) {
2208 			sk_rxeof(sc_if1);
2209 			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2210 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2211 		}
2212 
2213 		/* Then transmit interrupts. */
2214 		if (status & SK_ISR_TX1_S_EOF) {
2215 			sk_txeof(sc_if0);
2216 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2217 			    SK_TXBMU_CLR_IRQ_EOF);
2218 		}
2219 		if (status & SK_ISR_TX2_S_EOF) {
2220 			sk_txeof(sc_if1);
2221 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2222 			    SK_TXBMU_CLR_IRQ_EOF);
2223 		}
2224 
2225 		/* Then MAC interrupts. */
2226 		if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) {
2227 			if (sc->sk_type == SK_GENESIS)
2228 				sk_intr_xmac(sc_if0);
2229 			else
2230 				sk_intr_yukon(sc_if0);
2231 		}
2232 
2233 		if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) {
2234 			if (sc->sk_type == SK_GENESIS)
2235 				sk_intr_xmac(sc_if1);
2236 			else
2237 				sk_intr_yukon(sc_if0);
2238 		}
2239 
2240 		if (status & SK_ISR_EXTERNAL_REG) {
2241 			if (ifp0 != NULL &&
2242 			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2243 				sk_intr_bcom(sc_if0);
2244 			if (ifp1 != NULL &&
2245 			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2246 				sk_intr_bcom(sc_if1);
2247 		}
2248 	}
2249 
2250 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2251 
2252 	if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
2253 		sk_start(ifp0);
2254 	if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
2255 		sk_start(ifp1);
2256 
2257 	return;
2258 }
2259 
2260 static void sk_init_xmac(sc_if)
2261 	struct sk_if_softc	*sc_if;
2262 {
2263 	struct sk_softc		*sc;
2264 	struct ifnet		*ifp;
2265 	struct sk_bcom_hack	bhack[] = {
2266 	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2267 	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2268 	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2269 	{ 0, 0 } };
2270 
2271 	sc = sc_if->sk_softc;
2272 	ifp = &sc_if->arpcom.ac_if;
2273 
2274 	/* Unreset the XMAC. */
2275 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2276 	DELAY(1000);
2277 
2278 	/* Reset the XMAC's internal state. */
2279 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2280 
2281 	/* Save the XMAC II revision */
2282 	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2283 
2284 	/*
2285 	 * Perform additional initialization for external PHYs,
2286 	 * namely for the 1000baseTX cards that use the XMAC's
2287 	 * GMII mode.
2288 	 */
2289 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2290 		int			i = 0;
2291 		u_int32_t		val;
2292 
2293 		/* Take PHY out of reset. */
2294 		val = sk_win_read_4(sc, SK_GPIO);
2295 		if (sc_if->sk_port == SK_PORT_A)
2296 			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2297 		else
2298 			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2299 		sk_win_write_4(sc, SK_GPIO, val);
2300 
2301 		/* Enable GMII mode on the XMAC. */
2302 		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2303 
2304 		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2305 		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2306 		DELAY(10000);
2307 		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2308 		    BRGPHY_MII_IMR, 0xFFF0);
2309 
2310 		/*
2311 		 * Early versions of the BCM5400 apparently have
2312 		 * a bug that requires them to have their reserved
2313 		 * registers initialized to some magic values. I don't
2314 		 * know what the numbers do, I'm just the messenger.
2315 		 */
2316 		if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2317 		    == 0x6041) {
2318 			while(bhack[i].reg) {
2319 				sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2320 				    bhack[i].reg, bhack[i].val);
2321 				i++;
2322 			}
2323 		}
2324 	}
2325 
2326 	/* Set station address */
2327 	SK_XM_WRITE_2(sc_if, XM_PAR0,
2328 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
2329 	SK_XM_WRITE_2(sc_if, XM_PAR1,
2330 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
2331 	SK_XM_WRITE_2(sc_if, XM_PAR2,
2332 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
2333 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2334 
2335 	if (ifp->if_flags & IFF_BROADCAST) {
2336 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2337 	} else {
2338 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2339 	}
2340 
2341 	/* We don't need the FCS appended to the packet. */
2342 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2343 
2344 	/* We want short frames padded to 60 bytes. */
2345 	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2346 
2347 	/*
2348 	 * Enable the reception of all error frames. This is is
2349 	 * a necessary evil due to the design of the XMAC. The
2350 	 * XMAC's receive FIFO is only 8K in size, however jumbo
2351 	 * frames can be up to 9000 bytes in length. When bad
2352 	 * frame filtering is enabled, the XMAC's RX FIFO operates
2353 	 * in 'store and forward' mode. For this to work, the
2354 	 * entire frame has to fit into the FIFO, but that means
2355 	 * that jumbo frames larger than 8192 bytes will be
2356 	 * truncated. Disabling all bad frame filtering causes
2357 	 * the RX FIFO to operate in streaming mode, in which
2358 	 * case the XMAC will start transfering frames out of the
2359 	 * RX FIFO as soon as the FIFO threshold is reached.
2360 	 */
2361 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2362 	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2363 	    XM_MODE_RX_INRANGELEN);
2364 
2365 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2366 		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2367 	else
2368 		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2369 
2370 	/*
2371 	 * Bump up the transmit threshold. This helps hold off transmit
2372 	 * underruns when we're blasting traffic from both ports at once.
2373 	 */
2374 	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2375 
2376 	/* Set promiscuous mode */
2377 	sk_setpromisc(sc_if);
2378 
2379 	/* Set multicast filter */
2380 	sk_setmulti(sc_if);
2381 
2382 	/* Clear and enable interrupts */
2383 	SK_XM_READ_2(sc_if, XM_ISR);
2384 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2385 		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2386 	else
2387 		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2388 
2389 	/* Configure MAC arbiter */
2390 	switch(sc_if->sk_xmac_rev) {
2391 	case XM_XMAC_REV_B2:
2392 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2393 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2394 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2395 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2396 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2397 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2398 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2399 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2400 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2401 		break;
2402 	case XM_XMAC_REV_C1:
2403 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2404 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2405 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2406 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2407 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2408 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2409 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2410 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2411 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2412 		break;
2413 	default:
2414 		break;
2415 	}
2416 	sk_win_write_2(sc, SK_MACARB_CTL,
2417 	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2418 
2419 	sc_if->sk_link = 1;
2420 
2421 	return;
2422 }
2423 
2424 static void sk_init_yukon(sc_if)
2425 	struct sk_if_softc	*sc_if;
2426 {
2427 	u_int32_t		phy;
2428 	u_int16_t		reg;
2429 	struct sk_softc		*sc;
2430 	struct ifnet		*ifp;
2431 	int			i;
2432 
2433 	sc = sc_if->sk_softc;
2434 	ifp = &sc_if->arpcom.ac_if;
2435 
2436 	/* GMAC and GPHY Reset */
2437 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2438 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2439 	DELAY(1000);
2440 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
2441 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2442 	DELAY(1000);
2443 
2444 	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2445 		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2446 
2447 	switch(sc_if->sk_softc->sk_pmd) {
2448 	case IFM_1000_SX:
2449 	case IFM_1000_LX:
2450 		phy |= SK_GPHY_FIBER;
2451 		break;
2452 
2453 	case IFM_1000_CX:
2454 	case IFM_1000_TX:
2455 		phy |= SK_GPHY_COPPER;
2456 		break;
2457 	}
2458 
2459 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2460 	DELAY(1000);
2461 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2462 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2463 		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2464 
2465 	/* unused read of the interrupt source register */
2466 	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2467 
2468 	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2469 
2470 	/* MIB Counter Clear Mode set */
2471 	reg |= YU_PAR_MIB_CLR;
2472 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2473 
2474 	/* MIB Counter Clear Mode clear */
2475 	reg &= ~YU_PAR_MIB_CLR;
2476 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2477 
2478 	/* receive control reg */
2479 	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2480 
2481 	/* transmit parameter register */
2482 	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2483 		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2484 
2485 	/* serial mode register */
2486 	reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
2487 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2488 		reg |= YU_SMR_MFL_JUMBO;
2489 	SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
2490 
2491 	/* Setup Yukon's address */
2492 	for (i = 0; i < 3; i++) {
2493 		/* Write Source Address 1 (unicast filter) */
2494 		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2495 			      sc_if->arpcom.ac_enaddr[i * 2] |
2496 			      sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2497 	}
2498 
2499 	for (i = 0; i < 3; i++) {
2500 		reg = sk_win_read_2(sc_if->sk_softc,
2501 				    SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2502 		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2503 	}
2504 
2505 	/* Set promiscuous mode */
2506 	sk_setpromisc(sc_if);
2507 
2508 	/* Set multicast filter */
2509 	sk_setmulti(sc_if);
2510 
2511 	/* enable interrupt mask for counter overflows */
2512 	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2513 	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2514 	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2515 
2516 	/* Configure RX MAC FIFO */
2517 	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2518 	SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
2519 
2520 	/* Configure TX MAC FIFO */
2521 	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2522 	SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2523 }
2524 
2525 /*
2526  * Note that to properly initialize any part of the GEnesis chip,
2527  * you first have to take it out of reset mode.
2528  */
2529 static void sk_init(xsc)
2530 	void			*xsc;
2531 {
2532 	struct sk_if_softc	*sc_if = xsc;
2533 	struct sk_softc		*sc;
2534 	struct ifnet		*ifp;
2535 	struct mii_data		*mii;
2536 	int			s;
2537 	u_int16_t		reg;
2538 
2539 	s = splimp();
2540 
2541 	ifp = &sc_if->arpcom.ac_if;
2542 	sc = sc_if->sk_softc;
2543 	mii = device_get_softc(sc_if->sk_miibus);
2544 
2545 	/* Cancel pending I/O and free all RX/TX buffers. */
2546 	sk_stop(sc_if);
2547 
2548 	if (sc->sk_type == SK_GENESIS) {
2549 		/* Configure LINK_SYNC LED */
2550 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2551 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2552 			SK_LINKLED_LINKSYNC_ON);
2553 
2554 		/* Configure RX LED */
2555 		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2556 			SK_RXLEDCTL_COUNTER_START);
2557 
2558 		/* Configure TX LED */
2559 		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2560 			SK_TXLEDCTL_COUNTER_START);
2561 	}
2562 
2563 	/* Configure I2C registers */
2564 
2565 	/* Configure XMAC(s) */
2566 	switch (sc->sk_type) {
2567 	case SK_GENESIS:
2568 		sk_init_xmac(sc_if);
2569 		break;
2570 	case SK_YUKON:
2571 		sk_init_yukon(sc_if);
2572 		break;
2573 	}
2574 	mii_mediachg(mii);
2575 
2576 	if (sc->sk_type == SK_GENESIS) {
2577 		/* Configure MAC FIFOs */
2578 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2579 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2580 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2581 
2582 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2583 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2584 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2585 	}
2586 
2587 	/* Configure transmit arbiter(s) */
2588 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2589 	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2590 
2591 	/* Configure RAMbuffers */
2592 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2593 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2594 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2595 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2596 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2597 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2598 
2599 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2600 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2601 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2602 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2603 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2604 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2605 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2606 
2607 	/* Configure BMUs */
2608 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2609 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2610 	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2611 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2612 
2613 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2614 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2615 	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2616 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2617 
2618 	/* Init descriptors */
2619 	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2620 		printf("sk%d: initialization failed: no "
2621 		    "memory for rx buffers\n", sc_if->sk_unit);
2622 		sk_stop(sc_if);
2623 		(void)splx(s);
2624 		return;
2625 	}
2626 	sk_init_tx_ring(sc_if);
2627 
2628 	/* Configure interrupt handling */
2629 	CSR_READ_4(sc, SK_ISSR);
2630 	if (sc_if->sk_port == SK_PORT_A)
2631 		sc->sk_intrmask |= SK_INTRS1;
2632 	else
2633 		sc->sk_intrmask |= SK_INTRS2;
2634 
2635 	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2636 
2637 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2638 
2639 	/* Start BMUs. */
2640 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2641 
2642 	switch(sc->sk_type) {
2643 	case SK_GENESIS:
2644 		/* Enable XMACs TX and RX state machines */
2645 		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2646 		SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2647 		break;
2648 	case SK_YUKON:
2649 		reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2650 		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2651 		reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN);
2652 		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2653 	}
2654 
2655 	ifp->if_flags |= IFF_RUNNING;
2656 	ifp->if_flags &= ~IFF_OACTIVE;
2657 
2658 	splx(s);
2659 
2660 	return;
2661 }
2662 
2663 static void sk_stop(sc_if)
2664 	struct sk_if_softc	*sc_if;
2665 {
2666 	int			i;
2667 	struct sk_softc		*sc;
2668 	struct ifnet		*ifp;
2669 
2670 	sc = sc_if->sk_softc;
2671 	ifp = &sc_if->arpcom.ac_if;
2672 
2673 	callout_stop(&sc_if->sk_tick_timer);
2674 
2675 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2676 		u_int32_t		val;
2677 
2678 		/* Put PHY back into reset. */
2679 		val = sk_win_read_4(sc, SK_GPIO);
2680 		if (sc_if->sk_port == SK_PORT_A) {
2681 			val |= SK_GPIO_DIR0;
2682 			val &= ~SK_GPIO_DAT0;
2683 		} else {
2684 			val |= SK_GPIO_DIR2;
2685 			val &= ~SK_GPIO_DAT2;
2686 		}
2687 		sk_win_write_4(sc, SK_GPIO, val);
2688 	}
2689 
2690 	/* Turn off various components of this interface. */
2691 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2692 	switch (sc->sk_type) {
2693 	case SK_GENESIS:
2694 		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2695 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2696 		break;
2697 	case SK_YUKON:
2698 		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2699 		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2700 		break;
2701 	}
2702 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2703 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2704 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2705 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2706 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2707 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2708 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2709 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2710 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2711 
2712 	/* Disable interrupts */
2713 	if (sc_if->sk_port == SK_PORT_A)
2714 		sc->sk_intrmask &= ~SK_INTRS1;
2715 	else
2716 		sc->sk_intrmask &= ~SK_INTRS2;
2717 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2718 
2719 	SK_XM_READ_2(sc_if, XM_ISR);
2720 	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2721 
2722 	/* Free RX and TX mbufs still in the queues. */
2723 	for (i = 0; i < SK_RX_RING_CNT; i++) {
2724 		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2725 			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2726 			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2727 		}
2728 	}
2729 
2730 	for (i = 0; i < SK_TX_RING_CNT; i++) {
2731 		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2732 			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2733 			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2734 		}
2735 	}
2736 
2737 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2738 
2739 	return;
2740 }
2741