xref: /openbsd/sys/dev/pci/if_sk.c (revision 905646f0)
1 /*	$OpenBSD: if_sk.c,v 1.191 2020/07/10 13:26:38 patrick Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999, 2000
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
35  */
36 
37 /*
38  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
39  *
40  * Permission to use, copy, modify, and distribute this software for any
41  * purpose with or without fee is hereby granted, provided that the above
42  * copyright notice and this permission notice appear in all copies.
43  *
44  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
45  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
46  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
47  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
48  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
49  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
50  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
51  */
52 
53 /*
54  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55  * the SK-984x series adapters, both single port and dual port.
56  * References:
57  * 	The XaQti XMAC II datasheet,
58  * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59  *	The SysKonnect GEnesis manual, http://www.syskonnect.com
60  *
61  * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63  * convenience to others until Vitesse corrects this problem:
64  *
65  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66  *
67  * Written by Bill Paul <wpaul@ee.columbia.edu>
68  * Department of Electrical Engineering
69  * Columbia University, New York City
70  */
71 
72 /*
73  * The SysKonnect gigabit ethernet adapters consist of two main
74  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
75  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
76  * components and a PHY while the GEnesis controller provides a PCI
77  * interface with DMA support. Each card may have between 512K and
78  * 2MB of SRAM on board depending on the configuration.
79  *
80  * The SysKonnect GEnesis controller can have either one or two XMAC
81  * chips connected to it, allowing single or dual port NIC configurations.
82  * SysKonnect has the distinction of being the only vendor on the market
83  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
84  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
85  * XMAC registers. This driver takes advantage of these features to allow
86  * both XMACs to operate as independent interfaces.
87  */
88 
89 #include "bpfilter.h"
90 
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/sockio.h>
94 #include <sys/mbuf.h>
95 #include <sys/malloc.h>
96 #include <sys/kernel.h>
97 #include <sys/socket.h>
98 #include <sys/timeout.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 
102 #include <net/if.h>
103 
104 #include <netinet/in.h>
105 #include <netinet/if_ether.h>
106 
107 #include <net/if_media.h>
108 
109 #if NBPFILTER > 0
110 #include <net/bpf.h>
111 #endif
112 
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/brgphyreg.h>
116 
117 #include <dev/pci/pcireg.h>
118 #include <dev/pci/pcivar.h>
119 #include <dev/pci/pcidevs.h>
120 
121 #include <dev/pci/if_skreg.h>
122 #include <dev/pci/if_skvar.h>
123 
124 int skc_probe(struct device *, void *, void *);
125 void skc_attach(struct device *, struct device *self, void *aux);
126 int skc_detach(struct device *, int);
127 int skc_activate(struct device *, int);
128 int sk_probe(struct device *, void *, void *);
129 void sk_attach(struct device *, struct device *self, void *aux);
130 int sk_detach(struct device *, int);
131 int sk_activate(struct device *, int);
132 int skcprint(void *, const char *);
133 int sk_intr(void *);
134 void sk_intr_bcom(struct sk_if_softc *);
135 void sk_intr_xmac(struct sk_if_softc *);
136 void sk_intr_yukon(struct sk_if_softc *);
137 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
138 void sk_rxeof(struct sk_if_softc *);
139 void sk_txeof(struct sk_if_softc *);
140 int sk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *);
141 void sk_start(struct ifnet *);
142 int sk_ioctl(struct ifnet *, u_long, caddr_t);
143 void sk_init(void *);
144 void sk_init_xmac(struct sk_if_softc *);
145 void sk_init_yukon(struct sk_if_softc *);
146 void sk_stop(struct sk_if_softc *, int softonly);
147 void sk_watchdog(struct ifnet *);
148 int sk_ifmedia_upd(struct ifnet *);
149 void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
150 void skc_reset(struct sk_softc *);
151 int sk_newbuf(struct sk_if_softc *);
152 int sk_reset(struct sk_if_softc *);
153 int sk_init_rx_ring(struct sk_if_softc *);
154 void sk_fill_rx_ring(struct sk_if_softc *);
155 int sk_init_tx_ring(struct sk_if_softc *);
156 
157 int sk_xmac_miibus_readreg(struct device *, int, int);
158 void sk_xmac_miibus_writereg(struct device *, int, int, int);
159 void sk_xmac_miibus_statchg(struct device *);
160 
161 int sk_marv_miibus_readreg(struct device *, int, int);
162 void sk_marv_miibus_writereg(struct device *, int, int, int);
163 void sk_marv_miibus_statchg(struct device *);
164 
165 void sk_setfilt(struct sk_if_softc *, caddr_t, int);
166 void sk_iff(struct sk_if_softc *);
167 void sk_iff_xmac(struct sk_if_softc *);
168 void sk_iff_yukon(struct sk_if_softc *);
169 
170 void sk_tick(void *);
171 void sk_yukon_tick(void *);
172 
173 #ifdef SK_DEBUG
174 #define DPRINTF(x)	if (skdebug) printf x
175 #define DPRINTFN(n,x)	if (skdebug >= (n)) printf x
176 int	skdebug = 0;
177 
178 void sk_dump_txdesc(struct sk_tx_desc *, int);
179 void sk_dump_mbuf(struct mbuf *);
180 void sk_dump_bytes(const char *, int);
181 #else
182 #define DPRINTF(x)
183 #define DPRINTFN(n,x)
184 #endif
185 
186 /* supported device vendors */
187 const struct pci_matchid skc_devices[] = {
188 	{ PCI_VENDOR_3COM,		PCI_PRODUCT_3COM_3C940 },
189 	{ PCI_VENDOR_3COM,		PCI_PRODUCT_3COM_3C940B },
190 	{ PCI_VENDOR_CNET,		PCI_PRODUCT_CNET_GIGACARD },
191 	{ PCI_VENDOR_DLINK,		PCI_PRODUCT_DLINK_DGE530T_A1 },
192 	{ PCI_VENDOR_DLINK,		PCI_PRODUCT_DLINK_DGE530T_B1 },
193 	{ PCI_VENDOR_LINKSYS,		PCI_PRODUCT_LINKSYS_EG1064 },
194 	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON },
195 	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON_BELKIN },
196 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK98XX },
197 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK98XX2 },
198 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK9821 },
199 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK9843 }
200 };
201 
202 #define SK_LINKSYS_EG1032_SUBID 0x00151737
203 
204 static inline u_int32_t
205 sk_win_read_4(struct sk_softc *sc, u_int32_t reg)
206 {
207 	return CSR_READ_4(sc, reg);
208 }
209 
210 static inline u_int16_t
211 sk_win_read_2(struct sk_softc *sc, u_int32_t reg)
212 {
213 	return CSR_READ_2(sc, reg);
214 }
215 
216 static inline u_int8_t
217 sk_win_read_1(struct sk_softc *sc, u_int32_t reg)
218 {
219 	return CSR_READ_1(sc, reg);
220 }
221 
222 static inline void
223 sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x)
224 {
225 	CSR_WRITE_4(sc, reg, x);
226 }
227 
228 static inline void
229 sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x)
230 {
231 	CSR_WRITE_2(sc, reg, x);
232 }
233 
234 static inline void
235 sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x)
236 {
237 	CSR_WRITE_1(sc, reg, x);
238 }
239 
240 int
241 sk_xmac_miibus_readreg(struct device *dev, int phy, int reg)
242 {
243 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
244 	int i;
245 
246 	DPRINTFN(9, ("sk_xmac_miibus_readreg\n"));
247 
248 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
249 		return (0);
250 
251 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
252 	SK_XM_READ_2(sc_if, XM_PHY_DATA);
253 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
254 		for (i = 0; i < SK_TIMEOUT; i++) {
255 			DELAY(1);
256 			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
257 			    XM_MMUCMD_PHYDATARDY)
258 				break;
259 		}
260 
261 		if (i == SK_TIMEOUT) {
262 			printf("%s: phy failed to come ready\n",
263 			    sc_if->sk_dev.dv_xname);
264 			return (0);
265 		}
266 	}
267 	DELAY(1);
268 	return (SK_XM_READ_2(sc_if, XM_PHY_DATA));
269 }
270 
271 void
272 sk_xmac_miibus_writereg(struct device *dev, int phy, int reg, int val)
273 {
274 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
275 	int i;
276 
277 	DPRINTFN(9, ("sk_xmac_miibus_writereg\n"));
278 
279 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
280 	for (i = 0; i < SK_TIMEOUT; i++) {
281 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
282 			break;
283 	}
284 
285 	if (i == SK_TIMEOUT) {
286 		printf("%s: phy failed to come ready\n",
287 		    sc_if->sk_dev.dv_xname);
288 		return;
289 	}
290 
291 	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
292 	for (i = 0; i < SK_TIMEOUT; i++) {
293 		DELAY(1);
294 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
295 			break;
296 	}
297 
298 	if (i == SK_TIMEOUT)
299 		printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
300 }
301 
302 void
303 sk_xmac_miibus_statchg(struct device *dev)
304 {
305 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
306 	struct mii_data *mii = &sc_if->sk_mii;
307 
308 	DPRINTFN(9, ("sk_xmac_miibus_statchg\n"));
309 
310 	/*
311 	 * If this is a GMII PHY, manually set the XMAC's
312 	 * duplex mode accordingly.
313 	 */
314 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
315 		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
316 			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
317 		else
318 			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
319 	}
320 }
321 
322 int
323 sk_marv_miibus_readreg(struct device *dev, int phy, int reg)
324 {
325 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
326 	u_int16_t val;
327 	int i;
328 
329 	if (phy != 0 ||
330 	    (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
331 	     sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
332 		DPRINTFN(9, ("sk_marv_miibus_readreg (skip) phy=%d, reg=%#x\n",
333 		    phy, reg));
334 		return (0);
335 	}
336 
337 	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
338 	    YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
339 
340 	for (i = 0; i < SK_TIMEOUT; i++) {
341 		DELAY(1);
342 		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
343 		if (val & YU_SMICR_READ_VALID)
344 			break;
345 	}
346 
347 	if (i == SK_TIMEOUT) {
348 		printf("%s: phy failed to come ready\n",
349 		    sc_if->sk_dev.dv_xname);
350 		return (0);
351 	}
352 
353  	DPRINTFN(9, ("sk_marv_miibus_readreg: i=%d, timeout=%d\n", i,
354 	    SK_TIMEOUT));
355 
356 	val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
357 
358 	DPRINTFN(9, ("sk_marv_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
359 	    phy, reg, val));
360 
361 	return (val);
362 }
363 
364 void
365 sk_marv_miibus_writereg(struct device *dev, int phy, int reg, int val)
366 {
367 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
368 	int i;
369 
370 	DPRINTFN(9, ("sk_marv_miibus_writereg phy=%d reg=%#x val=%#x\n",
371 	    phy, reg, val));
372 
373 	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
374 	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
375 	    YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
376 
377 	for (i = 0; i < SK_TIMEOUT; i++) {
378 		DELAY(1);
379 		if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY))
380 			break;
381 	}
382 
383 	if (i == SK_TIMEOUT)
384 		printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
385 }
386 
387 void
388 sk_marv_miibus_statchg(struct device *dev)
389 {
390 	DPRINTFN(9, ("sk_marv_miibus_statchg: gpcr=%x\n",
391 	    SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR)));
392 }
393 
394 void
395 sk_setfilt(struct sk_if_softc *sc_if, caddr_t addr, int slot)
396 {
397 	int base = XM_RXFILT_ENTRY(slot);
398 
399 	SK_XM_WRITE_2(sc_if, base, letoh16(*(u_int16_t *)(&addr[0])));
400 	SK_XM_WRITE_2(sc_if, base + 2, letoh16(*(u_int16_t *)(&addr[2])));
401 	SK_XM_WRITE_2(sc_if, base + 4, letoh16(*(u_int16_t *)(&addr[4])));
402 }
403 
404 void
405 sk_iff(struct sk_if_softc *sc_if)
406 {
407 	struct sk_softc *sc = sc_if->sk_softc;
408 
409 	if (SK_IS_GENESIS(sc))
410 		sk_iff_xmac(sc_if);
411 	else
412 		sk_iff_yukon(sc_if);
413 }
414 
415 void
416 sk_iff_xmac(struct sk_if_softc *sc_if)
417 {
418 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
419 	struct arpcom *ac = &sc_if->arpcom;
420 	struct ether_multi *enm;
421 	struct ether_multistep step;
422 	u_int32_t reg, hashes[2];
423 	u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
424 	int h, i;
425 
426 	reg = SK_XM_READ_4(sc_if, XM_MODE);
427 	reg &= ~(XM_MODE_RX_NOBROAD | XM_MODE_RX_PROMISC | XM_MODE_RX_USE_HASH |
428 	    XM_MODE_RX_USE_PERFECT | XM_MODE_RX_USE_STATION);
429 	ifp->if_flags &= ~IFF_ALLMULTI;
430 
431 	/*
432 	 * Always accept frames destined to our station address.
433 	 */
434 	reg |= XM_MODE_RX_USE_STATION;
435 
436 	/* don't use perfect filter. */
437 	for (i = 1; i < XM_RXFILT_MAX; i++)
438 		sk_setfilt(sc_if, (caddr_t)&dummy, i);
439 
440 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
441 		ifp->if_flags |= IFF_ALLMULTI;
442 		if (ifp->if_flags & IFF_PROMISC)
443 			reg |= XM_MODE_RX_PROMISC;
444 		else
445 			reg |= XM_MODE_RX_USE_HASH;
446 		hashes[0] = hashes[1] = 0xFFFFFFFF;
447 	} else {
448 		reg |= XM_MODE_RX_USE_HASH;
449 		/* Program new filter. */
450 		bzero(hashes, sizeof(hashes));
451 
452 		ETHER_FIRST_MULTI(step, ac, enm);
453 		while (enm != NULL) {
454 			h = ether_crc32_le(enm->enm_addrlo,
455 			    ETHER_ADDR_LEN) & ((1 << SK_HASH_BITS) - 1);
456 
457 			if (h < 32)
458 				hashes[0] |= (1 << h);
459 			else
460 				hashes[1] |= (1 << (h - 32));
461 
462 			ETHER_NEXT_MULTI(step, enm);
463 		}
464 	}
465 
466 	SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
467 	SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
468 	SK_XM_WRITE_4(sc_if, XM_MODE, reg);
469 }
470 
471 void
472 sk_iff_yukon(struct sk_if_softc *sc_if)
473 {
474 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
475 	struct arpcom *ac = &sc_if->arpcom;
476 	struct ether_multi *enm;
477 	struct ether_multistep step;
478 	u_int32_t hashes[2];
479 	u_int16_t rcr;
480 	int h;
481 
482 	rcr = SK_YU_READ_2(sc_if, YUKON_RCR);
483 	rcr &= ~(YU_RCR_MUFLEN | YU_RCR_UFLEN);
484 	ifp->if_flags &= ~IFF_ALLMULTI;
485 
486 	/*
487 	 * Always accept frames destined to our station address.
488 	 */
489 	rcr |= YU_RCR_UFLEN;
490 
491 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
492 		ifp->if_flags |= IFF_ALLMULTI;
493 		if (ifp->if_flags & IFF_PROMISC)
494 			rcr &= ~YU_RCR_UFLEN;
495 		else
496 			rcr |= YU_RCR_MUFLEN;
497 		hashes[0] = hashes[1] = 0xFFFFFFFF;
498 	} else {
499 		rcr |= YU_RCR_MUFLEN;
500 		/* Program new filter. */
501 		bzero(hashes, sizeof(hashes));
502 
503 		ETHER_FIRST_MULTI(step, ac, enm);
504 		while (enm != NULL) {
505 			h = ether_crc32_be(enm->enm_addrlo,
506 			    ETHER_ADDR_LEN) & ((1 << SK_HASH_BITS) - 1);
507 
508 			if (h < 32)
509 				hashes[0] |= (1 << h);
510 			else
511 				hashes[1] |= (1 << (h - 32));
512 
513 			ETHER_NEXT_MULTI(step, enm);
514 		}
515 	}
516 
517 	SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
518 	SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
519 	SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
520 	SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
521 	SK_YU_WRITE_2(sc_if, YUKON_RCR, rcr);
522 }
523 
524 int
525 sk_init_rx_ring(struct sk_if_softc *sc_if)
526 {
527 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
528 	struct sk_ring_data	*rd = sc_if->sk_rdata;
529 	int			i, nexti;
530 
531 	bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
532 
533 	for (i = 0; i < SK_RX_RING_CNT; i++) {
534 		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
535 		if (i == (SK_RX_RING_CNT - 1))
536 			nexti = 0;
537 		else
538 			nexti = i + 1;
539 		cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[nexti];
540 		htolem32(&rd->sk_rx_ring[i].sk_next,
541 		    SK_RX_RING_ADDR(sc_if, nexti));
542 	}
543 
544 	sc_if->sk_cdata.sk_rx_prod = 0;
545 	sc_if->sk_cdata.sk_rx_cons = 0;
546 
547 	if_rxr_init(&sc_if->sk_cdata.sk_rx_ring, 2, SK_RX_RING_CNT);
548 
549 	sk_fill_rx_ring(sc_if);
550 
551 	return (0);
552 }
553 
554 void
555 sk_fill_rx_ring(struct sk_if_softc *sc_if)
556 {
557 	struct if_rxring *rxr = &sc_if->sk_cdata.sk_rx_ring;
558 	u_int slots;
559 
560 	for (slots = if_rxr_get(rxr, SK_RX_RING_CNT); slots > 0; slots--) {
561 		if (sk_newbuf(sc_if) == ENOBUFS)
562 			break;
563 	}
564 	if_rxr_put(rxr, slots);
565 }
566 
567 int
568 sk_init_tx_ring(struct sk_if_softc *sc_if)
569 {
570 	struct sk_softc		*sc = sc_if->sk_softc;
571 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
572 	struct sk_ring_data	*rd = sc_if->sk_rdata;
573 	bus_dmamap_t		dmamap;
574 	struct sk_txmap_entry	*entry;
575 	int			i, nexti;
576 
577 	bzero(sc_if->sk_rdata->sk_tx_ring,
578 	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
579 
580 	SIMPLEQ_INIT(&sc_if->sk_txmap_head);
581 	for (i = 0; i < SK_TX_RING_CNT; i++) {
582 		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
583 		if (i == (SK_TX_RING_CNT - 1))
584 			nexti = 0;
585 		else
586 			nexti = i + 1;
587 		cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti];
588 		htolem32(&rd->sk_tx_ring[i].sk_next,
589 		    SK_TX_RING_ADDR(sc_if, nexti));
590 
591 		if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG,
592 		   SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap))
593 			return (ENOBUFS);
594 
595 		entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT);
596 		if (!entry) {
597 			bus_dmamap_destroy(sc->sc_dmatag, dmamap);
598 			return (ENOBUFS);
599 		}
600 		entry->dmamap = dmamap;
601 		SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link);
602 	}
603 
604 	sc_if->sk_cdata.sk_tx_prod = 0;
605 	sc_if->sk_cdata.sk_tx_cons = 0;
606 	sc_if->sk_cdata.sk_tx_cnt = 0;
607 
608 	SK_CDTXSYNC(sc_if, 0, SK_TX_RING_CNT,
609 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
610 
611 	return (0);
612 }
613 
614 int
615 sk_newbuf(struct sk_if_softc *sc_if)
616 {
617 	struct mbuf		*m;
618 	struct sk_chain		*c;
619 	struct sk_rx_desc	*r;
620 	bus_dmamap_t		dmamap;
621 	u_int			prod;
622 	int			error;
623 	uint64_t		dva;
624 
625 	m = MCLGETI(NULL, M_DONTWAIT, NULL, SK_JLEN);
626 	if (m == NULL)
627 		return (ENOBUFS);
628 
629 	m->m_len = m->m_pkthdr.len = SK_JLEN;
630 	m_adj(m, ETHER_ALIGN);
631 
632 	prod = sc_if->sk_cdata.sk_rx_prod;
633 	dmamap = sc_if->sk_cdata.sk_rx_map[prod];
634 
635 	error = bus_dmamap_load_mbuf(sc_if->sk_softc->sc_dmatag, dmamap, m,
636 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
637 	if (error) {
638 		m_freem(m);
639 		return (ENOBUFS);
640 	}
641 
642 	bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
643 	    dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
644 
645 	c = &sc_if->sk_cdata.sk_rx_chain[prod];
646 	c->sk_mbuf = m;
647 
648 	r = c->sk_desc;
649 	dva = dmamap->dm_segs[0].ds_addr;
650 	htolem32(&r->sk_data_lo, dva);
651 	htolem32(&r->sk_data_hi, dva >> 32);
652 	htolem32(&r->sk_ctl, dmamap->dm_segs[0].ds_len | SK_RXSTAT);
653 
654 	SK_CDRXSYNC(sc_if, prod, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
655 
656 	SK_INC(prod, SK_RX_RING_CNT);
657 	sc_if->sk_cdata.sk_rx_prod = prod;
658 
659 	return (0);
660 }
661 
662 /*
663  * Set media options.
664  */
665 int
666 sk_ifmedia_upd(struct ifnet *ifp)
667 {
668 	struct sk_if_softc *sc_if = ifp->if_softc;
669 
670 	mii_mediachg(&sc_if->sk_mii);
671 	return (0);
672 }
673 
674 /*
675  * Report current media status.
676  */
677 void
678 sk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
679 {
680 	struct sk_if_softc *sc_if = ifp->if_softc;
681 
682 	mii_pollstat(&sc_if->sk_mii);
683 	ifmr->ifm_active = sc_if->sk_mii.mii_media_active;
684 	ifmr->ifm_status = sc_if->sk_mii.mii_media_status;
685 }
686 
687 int
688 sk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
689 {
690 	struct sk_if_softc *sc_if = ifp->if_softc;
691 	struct ifreq *ifr = (struct ifreq *) data;
692 	struct mii_data *mii;
693 	int s, error = 0;
694 
695 	s = splnet();
696 
697 	switch(command) {
698 	case SIOCSIFADDR:
699 		ifp->if_flags |= IFF_UP;
700 		if (!(ifp->if_flags & IFF_RUNNING))
701 			sk_init(sc_if);
702 		break;
703 
704 	case SIOCSIFFLAGS:
705 		if (ifp->if_flags & IFF_UP) {
706 			if (ifp->if_flags & IFF_RUNNING)
707 				error = ENETRESET;
708 			else
709 				sk_init(sc_if);
710 		} else {
711 			if (ifp->if_flags & IFF_RUNNING)
712 				sk_stop(sc_if, 0);
713 		}
714 		break;
715 
716 	case SIOCGIFMEDIA:
717 	case SIOCSIFMEDIA:
718 		mii = &sc_if->sk_mii;
719 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
720 		break;
721 
722 	case SIOCGIFRXR:
723 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
724 		    NULL, SK_JLEN, &sc_if->sk_cdata.sk_rx_ring);
725 
726 		break;
727 
728 	default:
729 		error = ether_ioctl(ifp, &sc_if->arpcom, command, data);
730 	}
731 
732 	if (error == ENETRESET) {
733 		if (ifp->if_flags & IFF_RUNNING)
734 			sk_iff(sc_if);
735 		error = 0;
736 	}
737 
738 	splx(s);
739 	return (error);
740 }
741 
742 /*
743  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
744  * IDs against our list and return a device name if we find a match.
745  */
746 int
747 skc_probe(struct device *parent, void *match, void *aux)
748 {
749 	struct pci_attach_args *pa = aux;
750 	pci_chipset_tag_t pc = pa->pa_pc;
751 	pcireg_t subid;
752 
753 	subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
754 
755 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_LINKSYS &&
756 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_LINKSYS_EG1032 &&
757 	    subid == SK_LINKSYS_EG1032_SUBID)
758 		return (1);
759 
760 	return (pci_matchbyid((struct pci_attach_args *)aux, skc_devices,
761 	    nitems(skc_devices)));
762 }
763 
764 /*
765  * Force the GEnesis into reset, then bring it out of reset.
766  */
767 void
768 skc_reset(struct sk_softc *sc)
769 {
770 	u_int32_t imtimer_ticks;
771 
772 	DPRINTFN(2, ("skc_reset\n"));
773 
774 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
775 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
776 	if (SK_IS_YUKON(sc))
777 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
778 
779 	DELAY(1000);
780 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
781 	DELAY(2);
782 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
783 	if (SK_IS_YUKON(sc))
784 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
785 
786 	DPRINTFN(2, ("sk_reset: sk_csr=%x\n", CSR_READ_2(sc, SK_CSR)));
787 	DPRINTFN(2, ("sk_reset: sk_link_ctrl=%x\n",
788 	    CSR_READ_2(sc, SK_LINK_CTRL)));
789 
790 	if (SK_IS_GENESIS(sc)) {
791 		/* Configure packet arbiter */
792 		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
793 		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
794 		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
795 		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
796 		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
797 	}
798 
799 	/* Enable RAM interface */
800 	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
801 
802 	/*
803 	 * Configure interrupt moderation. The moderation timer
804 	 * defers interrupts specified in the interrupt moderation
805 	 * timer mask based on the timeout specified in the interrupt
806 	 * moderation timer init register. Each bit in the timer
807 	 * register represents one tick, so to specify a timeout in
808 	 * microseconds, we have to multiply by the correct number of
809 	 * ticks-per-microsecond.
810 	 */
811 	switch (sc->sk_type) {
812 	case SK_GENESIS:
813 		imtimer_ticks = SK_IMTIMER_TICKS_GENESIS;
814 		break;
815 	default:
816 		imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
817 		break;
818 	}
819 	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(100));
820 	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
821 	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
822 	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
823 }
824 
825 int
826 sk_probe(struct device *parent, void *match, void *aux)
827 {
828 	struct skc_attach_args *sa = aux;
829 
830 	if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B)
831 		return (0);
832 
833 	switch (sa->skc_type) {
834 	case SK_GENESIS:
835 	case SK_YUKON:
836 	case SK_YUKON_LITE:
837 	case SK_YUKON_LP:
838 		return (1);
839 	}
840 
841 	return (0);
842 }
843 
844 /*
845  * Each XMAC chip is attached as a separate logical IP interface.
846  * Single port cards will have only one logical interface of course.
847  */
848 void
849 sk_attach(struct device *parent, struct device *self, void *aux)
850 {
851 	struct sk_if_softc *sc_if = (struct sk_if_softc *) self;
852 	struct sk_softc *sc = (struct sk_softc *)parent;
853 	struct skc_attach_args *sa = aux;
854 	struct ifnet *ifp;
855 	caddr_t kva;
856 	int i, error;
857 
858 	sc_if->sk_port = sa->skc_port;
859 	sc_if->sk_softc = sc;
860 	sc->sk_if[sa->skc_port] = sc_if;
861 
862 	if (sa->skc_port == SK_PORT_A)
863 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
864 	if (sa->skc_port == SK_PORT_B)
865 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
866 
867 	DPRINTFN(2, ("begin sk_attach: port=%d\n", sc_if->sk_port));
868 
869 	/*
870 	 * Get station address for this interface. Note that
871 	 * dual port cards actually come with three station
872 	 * addresses: one for each port, plus an extra. The
873 	 * extra one is used by the SysKonnect driver software
874 	 * as a 'virtual' station address for when both ports
875 	 * are operating in failover mode. Currently we don't
876 	 * use this extra address.
877 	 */
878 	for (i = 0; i < ETHER_ADDR_LEN; i++)
879 		sc_if->arpcom.ac_enaddr[i] =
880 		    sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i);
881 
882 	printf(": address %s\n",
883 	    ether_sprintf(sc_if->arpcom.ac_enaddr));
884 
885 	/*
886 	 * Set up RAM buffer addresses. The NIC will have a certain
887 	 * amount of SRAM on it, somewhere between 512K and 2MB. We
888 	 * need to divide this up a) between the transmitter and
889  	 * receiver and b) between the two XMACs, if this is a
890 	 * dual port NIC. Our algorithm is to divide up the memory
891 	 * evenly so that everyone gets a fair share.
892 	 */
893 	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
894 		u_int32_t		chunk, val;
895 
896 		chunk = sc->sk_ramsize / 2;
897 		val = sc->sk_rboff / sizeof(u_int64_t);
898 		sc_if->sk_rx_ramstart = val;
899 		val += (chunk / sizeof(u_int64_t));
900 		sc_if->sk_rx_ramend = val - 1;
901 		sc_if->sk_tx_ramstart = val;
902 		val += (chunk / sizeof(u_int64_t));
903 		sc_if->sk_tx_ramend = val - 1;
904 	} else {
905 		u_int32_t		chunk, val;
906 
907 		chunk = sc->sk_ramsize / 4;
908 		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
909 		    sizeof(u_int64_t);
910 		sc_if->sk_rx_ramstart = val;
911 		val += (chunk / sizeof(u_int64_t));
912 		sc_if->sk_rx_ramend = val - 1;
913 		sc_if->sk_tx_ramstart = val;
914 		val += (chunk / sizeof(u_int64_t));
915 		sc_if->sk_tx_ramend = val - 1;
916 	}
917 
918 	DPRINTFN(2, ("sk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
919 	    "           tx_ramstart=%#x tx_ramend=%#x\n",
920 	    sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
921 	    sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
922 
923 	/* Read and save PHY type */
924 	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
925 
926 	/* Set PHY address */
927 	if (SK_IS_GENESIS(sc)) {
928 		switch (sc_if->sk_phytype) {
929 		case SK_PHYTYPE_XMAC:
930 			sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
931 			break;
932 		case SK_PHYTYPE_BCOM:
933 			sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
934 			break;
935 		default:
936 			printf("%s: unsupported PHY type: %d\n",
937 			    sc->sk_dev.dv_xname, sc_if->sk_phytype);
938 			return;
939 		}
940 	}
941 
942 	if (SK_IS_YUKON(sc)) {
943 		if ((sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
944 		    sc->sk_pmd != 'L' && sc->sk_pmd != 'S')) {
945 			/* not initialized, punt */
946 			sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
947 
948 			sc->sk_coppertype = 1;
949 		}
950 
951 		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
952 
953 		if (!(sc->sk_coppertype))
954 			sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
955 	}
956 
957 	/* Allocate the descriptor queues. */
958 	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct sk_ring_data),
959 	    PAGE_SIZE, 0, &sc_if->sk_ring_seg, 1, &sc_if->sk_ring_nseg,
960 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
961 		printf(": can't alloc rx buffers\n");
962 		goto fail;
963 	}
964 	if (bus_dmamem_map(sc->sc_dmatag, &sc_if->sk_ring_seg,
965 	    sc_if->sk_ring_nseg, sizeof(struct sk_ring_data),
966 	    &kva, BUS_DMA_NOWAIT)) {
967 		printf(": can't map dma buffers (%lu bytes)\n",
968 		    (ulong)sizeof(struct sk_ring_data));
969 		goto fail_1;
970 	}
971 	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct sk_ring_data), 1,
972 	    sizeof(struct sk_ring_data), 0, BUS_DMA_NOWAIT,
973 	    &sc_if->sk_ring_map)) {
974 		printf(": can't create dma map\n");
975 		goto fail_2;
976 	}
977 	if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva,
978 	    sizeof(struct sk_ring_data), NULL, BUS_DMA_NOWAIT)) {
979 		printf(": can't load dma map\n");
980 		goto fail_3;
981 	}
982 	sc_if->sk_rdata = (struct sk_ring_data *)kva;
983 
984 	for (i = 0; i < SK_RX_RING_CNT; i++) {
985 		error = bus_dmamap_create(sc->sc_dmatag, SK_JLEN, 1,
986 		    SK_JLEN, 0, 0, &sc_if->sk_cdata.sk_rx_map[i]);
987 		if (error != 0) {
988 			printf(": unable to create rx DMA map %d, "
989 			    "error = %d\n", i, error);
990 			goto fail_4;
991 		}
992 	}
993 
994 	ifp = &sc_if->arpcom.ac_if;
995 	ifp->if_softc = sc_if;
996 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
997 	ifp->if_ioctl = sk_ioctl;
998 	ifp->if_start = sk_start;
999 	ifp->if_watchdog = sk_watchdog;
1000 	ifp->if_hardmtu = SK_JUMBO_MTU;
1001 	ifq_set_maxlen(&ifp->if_snd, SK_TX_RING_CNT - 1);
1002 	bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1003 
1004 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1005 
1006 	if (sk_reset(sc_if) == -1) {
1007 		printf(": unknown device type %d\n", sc_if->sk_softc->sk_type);
1008 		/* dealloc jumbo on error */
1009 		goto fail_3;
1010 	}
1011 
1012  	DPRINTFN(2, ("sk_attach: 1\n"));
1013 
1014 	sc_if->sk_mii.mii_ifp = ifp;
1015 	if (SK_IS_GENESIS(sc)) {
1016 		sc_if->sk_mii.mii_readreg = sk_xmac_miibus_readreg;
1017 		sc_if->sk_mii.mii_writereg = sk_xmac_miibus_writereg;
1018 		sc_if->sk_mii.mii_statchg = sk_xmac_miibus_statchg;
1019 	} else {
1020 		sc_if->sk_mii.mii_readreg = sk_marv_miibus_readreg;
1021 		sc_if->sk_mii.mii_writereg = sk_marv_miibus_writereg;
1022 		sc_if->sk_mii.mii_statchg = sk_marv_miibus_statchg;
1023 	}
1024 
1025 	ifmedia_init(&sc_if->sk_mii.mii_media, 0,
1026 	    sk_ifmedia_upd, sk_ifmedia_sts);
1027 	if (SK_IS_GENESIS(sc)) {
1028 		mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
1029 		    MII_OFFSET_ANY, 0);
1030 	} else {
1031 		mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
1032 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
1033 	}
1034 	if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) {
1035 		printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname);
1036 		ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL,
1037 			    0, NULL);
1038 		ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1039 	} else
1040 		ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO);
1041 
1042 	if (SK_IS_GENESIS(sc)) {
1043 		timeout_set(&sc_if->sk_tick_ch, sk_tick, sc_if);
1044 		timeout_add_sec(&sc_if->sk_tick_ch, 1);
1045 	} else
1046 		timeout_set(&sc_if->sk_tick_ch, sk_yukon_tick, sc_if);
1047 
1048 	/*
1049 	 * Call MI attach routines.
1050 	 */
1051 	if_attach(ifp);
1052 	ether_ifattach(ifp);
1053 
1054 	DPRINTFN(2, ("sk_attach: end\n"));
1055 	return;
1056 fail_4:
1057 	for (i = 0; i < SK_RX_RING_CNT; i++) {
1058 		if (sc_if->sk_cdata.sk_rx_map[i] == NULL)
1059 			continue;
1060 
1061 		bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_cdata.sk_rx_map[i]);
1062 	}
1063 fail_3:
1064 	bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct sk_ring_data));
1065 fail_2:
1066 	bus_dmamem_free(sc->sc_dmatag, &sc_if->sk_ring_seg, sc_if->sk_ring_nseg);
1067 fail_1:
1068 	bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map);
1069 fail:
1070 	sc->sk_if[sa->skc_port] = NULL;
1071 }
1072 
1073 int
1074 sk_reset(struct sk_if_softc *sc_if)
1075 {
1076 	/*
1077 	 * Do miibus setup.
1078 	 */
1079 	switch (sc_if->sk_softc->sk_type) {
1080 	case SK_GENESIS:
1081 		sk_init_xmac(sc_if);
1082 		break;
1083 	case SK_YUKON:
1084 	case SK_YUKON_LITE:
1085 	case SK_YUKON_LP:
1086 		sk_init_yukon(sc_if);
1087 		break;
1088 	default:
1089 		return (-1);
1090 	}
1091 	return (0);
1092 }
1093 
1094 int
1095 sk_detach(struct device *self, int flags)
1096 {
1097 	struct sk_if_softc *sc_if = (struct sk_if_softc *)self;
1098 	struct sk_softc *sc = sc_if->sk_softc;
1099 	struct ifnet *ifp= &sc_if->arpcom.ac_if;
1100 
1101 	if (sc->sk_if[sc_if->sk_port] == NULL)
1102 		return (0);
1103 
1104 	sk_stop(sc_if, 1);
1105 
1106 	/* Detach any PHYs we might have. */
1107 	if (LIST_FIRST(&sc_if->sk_mii.mii_phys) != NULL)
1108 		mii_detach(&sc_if->sk_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1109 
1110 	/* Delete any remaining media. */
1111 	ifmedia_delete_instance(&sc_if->sk_mii.mii_media, IFM_INST_ANY);
1112 
1113 	ether_ifdetach(ifp);
1114 	if_detach(ifp);
1115 
1116 	bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc_if->sk_rdata,
1117 	    sizeof(struct sk_ring_data));
1118 	bus_dmamem_free(sc->sc_dmatag,
1119 	    &sc_if->sk_ring_seg, sc_if->sk_ring_nseg);
1120 	bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map);
1121 	sc->sk_if[sc_if->sk_port] = NULL;
1122 
1123 	return (0);
1124 }
1125 
1126 int
1127 sk_activate(struct device *self, int act)
1128 {
1129 	struct sk_if_softc *sc_if = (void *)self;
1130 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
1131 	int rv = 0;
1132 
1133 	switch (act) {
1134 	case DVACT_RESUME:
1135 		sk_reset(sc_if);
1136 		if (ifp->if_flags & IFF_RUNNING)
1137 			sk_init(sc_if);
1138 		break;
1139 	default:
1140 		rv = config_activate_children(self, act);
1141 		break;
1142 	}
1143 	return (rv);
1144 }
1145 
1146 int
1147 skcprint(void *aux, const char *pnp)
1148 {
1149 	struct skc_attach_args *sa = aux;
1150 
1151 	if (pnp)
1152 		printf("sk port %c at %s",
1153 		    (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp);
1154 	else
1155 		printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B');
1156 	return (UNCONF);
1157 }
1158 
1159 /*
1160  * Attach the interface. Allocate softc structures, do ifmedia
1161  * setup and ethernet/BPF attach.
1162  */
1163 void
1164 skc_attach(struct device *parent, struct device *self, void *aux)
1165 {
1166 	struct sk_softc *sc = (struct sk_softc *)self;
1167 	struct pci_attach_args *pa = aux;
1168 	struct skc_attach_args skca;
1169 	pci_chipset_tag_t pc = pa->pa_pc;
1170 	pcireg_t memtype;
1171 	pci_intr_handle_t ih;
1172 	const char *intrstr = NULL;
1173 	u_int8_t skrs;
1174 	char *revstr = NULL;
1175 
1176 	DPRINTFN(2, ("begin skc_attach\n"));
1177 
1178 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
1179 
1180 	/*
1181 	 * Map control/status registers.
1182 	 */
1183 	memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM);
1184 	if (pci_mapreg_map(pa, SK_PCI_LOMEM, memtype, 0, &sc->sk_btag,
1185 	    &sc->sk_bhandle, NULL, &sc->sk_bsize, 0)) {
1186 		printf(": can't map mem space\n");
1187 		return;
1188 	}
1189 
1190 	sc->sc_dmatag = pa->pa_dmat;
1191 
1192 	sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1193 	sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4);
1194 	sc->sk_pc = pc;
1195 
1196 	/* bail out here if chip is not recognized */
1197 	if (! SK_IS_GENESIS(sc) && ! SK_IS_YUKON(sc)) {
1198 		printf(": unknown chip type: %d\n", sc->sk_type);
1199 		goto fail_1;
1200 	}
1201 	DPRINTFN(2, ("skc_attach: allocate interrupt\n"));
1202 
1203 	/* Allocate interrupt */
1204 	if (pci_intr_map(pa, &ih)) {
1205 		printf(": couldn't map interrupt\n");
1206 		goto fail_1;
1207 	}
1208 
1209 	intrstr = pci_intr_string(pc, ih);
1210 	sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, sk_intr, sc,
1211 	    self->dv_xname);
1212 	if (sc->sk_intrhand == NULL) {
1213 		printf(": couldn't establish interrupt");
1214 		if (intrstr != NULL)
1215 			printf(" at %s", intrstr);
1216 		printf("\n");
1217 		goto fail_1;
1218 	}
1219 
1220 	/* Reset the adapter. */
1221 	skc_reset(sc);
1222 
1223 	skrs = sk_win_read_1(sc, SK_EPROM0);
1224 	if (SK_IS_GENESIS(sc)) {
1225 		/* Read and save RAM size and RAMbuffer offset */
1226 		switch(skrs) {
1227 		case SK_RAMSIZE_512K_64:
1228 			sc->sk_ramsize = 0x80000;
1229 			sc->sk_rboff = SK_RBOFF_0;
1230 			break;
1231 		case SK_RAMSIZE_1024K_64:
1232 			sc->sk_ramsize = 0x100000;
1233 			sc->sk_rboff = SK_RBOFF_80000;
1234 			break;
1235 		case SK_RAMSIZE_1024K_128:
1236 			sc->sk_ramsize = 0x100000;
1237 			sc->sk_rboff = SK_RBOFF_0;
1238 			break;
1239 		case SK_RAMSIZE_2048K_128:
1240 			sc->sk_ramsize = 0x200000;
1241 			sc->sk_rboff = SK_RBOFF_0;
1242 			break;
1243 		default:
1244 			printf(": unknown ram size: %d\n", skrs);
1245 			goto fail_2;
1246 			break;
1247 		}
1248 	} else {
1249 		if (skrs == 0x00)
1250 			sc->sk_ramsize = 0x20000;
1251 		else
1252 			sc->sk_ramsize = skrs * (1<<12);
1253 		sc->sk_rboff = SK_RBOFF_0;
1254 	}
1255 
1256 	DPRINTFN(2, ("skc_attach: ramsize=%d (%dk), rboff=%d\n",
1257 	    sc->sk_ramsize, sc->sk_ramsize / 1024, sc->sk_rboff));
1258 
1259 	/* Read and save physical media type */
1260 	sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1261 
1262 	if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1263 		sc->sk_coppertype = 1;
1264 	else
1265 		sc->sk_coppertype = 0;
1266 
1267 	switch (sc->sk_type) {
1268 	case SK_GENESIS:
1269 		sc->sk_name = "GEnesis";
1270 		break;
1271 	case SK_YUKON:
1272 		sc->sk_name = "Yukon";
1273 		break;
1274 	case SK_YUKON_LITE:
1275 		sc->sk_name = "Yukon Lite";
1276 		break;
1277 	case SK_YUKON_LP:
1278 		sc->sk_name = "Yukon LP";
1279 		break;
1280 	default:
1281 		sc->sk_name = "Yukon (Unknown)";
1282 	}
1283 
1284 	/* Yukon Lite Rev A0 needs special test, from sk98lin driver */
1285 	if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1286 		u_int32_t flashaddr;
1287 		u_int8_t testbyte;
1288 
1289 		flashaddr = sk_win_read_4(sc, SK_EP_ADDR);
1290 
1291 		/* test Flash-Address Register */
1292 		sk_win_write_1(sc, SK_EP_ADDR+3, 0xff);
1293 		testbyte = sk_win_read_1(sc, SK_EP_ADDR+3);
1294 
1295 		if (testbyte != 0) {
1296 			/* This is a Yukon Lite Rev A0 */
1297 			sc->sk_type = SK_YUKON_LITE;
1298 			sc->sk_rev = SK_YUKON_LITE_REV_A0;
1299 			/* restore Flash-Address Register */
1300 			sk_win_write_4(sc, SK_EP_ADDR, flashaddr);
1301 		}
1302 	}
1303 
1304 	if (sc->sk_type == SK_YUKON_LITE) {
1305 		switch (sc->sk_rev) {
1306 		case SK_YUKON_LITE_REV_A0:
1307 			revstr = "A0";
1308 			break;
1309 		case SK_YUKON_LITE_REV_A1:
1310 			revstr = "A1";
1311 			break;
1312 		case SK_YUKON_LITE_REV_A3:
1313 			revstr = "A3";
1314 			break;
1315 		default:
1316 			;
1317 		}
1318 	}
1319 
1320 	/* Announce the product name. */
1321 	printf(", %s", sc->sk_name);
1322 	if (revstr != NULL)
1323 		printf(" rev. %s", revstr);
1324 	printf(" (0x%x): %s\n", sc->sk_rev, intrstr);
1325 
1326 	sc->sk_macs = 1;
1327 
1328 	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC))
1329 		sc->sk_macs++;
1330 
1331 	skca.skc_port = SK_PORT_A;
1332 	skca.skc_type = sc->sk_type;
1333 	skca.skc_rev = sc->sk_rev;
1334 	(void)config_found(&sc->sk_dev, &skca, skcprint);
1335 
1336 	if (sc->sk_macs > 1) {
1337 		skca.skc_port = SK_PORT_B;
1338 		skca.skc_type = sc->sk_type;
1339 		skca.skc_rev = sc->sk_rev;
1340 		(void)config_found(&sc->sk_dev, &skca, skcprint);
1341 	}
1342 
1343 	/* Turn on the 'driver is loaded' LED. */
1344 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1345 
1346 	return;
1347 
1348 fail_2:
1349 	pci_intr_disestablish(pc, sc->sk_intrhand);
1350 fail_1:
1351 	bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize);
1352 }
1353 
1354 int
1355 skc_detach(struct device *self, int flags)
1356 {
1357 	struct sk_softc *sc = (struct sk_softc *)self;
1358 	int rv;
1359 
1360 	if (sc->sk_intrhand)
1361 		pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand);
1362 
1363 	rv = config_detach_children(self, flags);
1364 	if (rv != 0)
1365 		return (rv);
1366 
1367 	if (sc->sk_bsize > 0)
1368 		bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize);
1369 
1370 	return(0);
1371 }
1372 
1373 int
1374 skc_activate(struct device *self, int act)
1375 {
1376 	struct sk_softc *sc = (void *)self;
1377 	int rv = 0;
1378 
1379 	switch (act) {
1380 	case DVACT_RESUME:
1381 		skc_reset(sc);
1382 		rv = config_activate_children(self, act);
1383 		break;
1384 	default:
1385 		rv = config_activate_children(self, act);
1386 		break;
1387 	}
1388 	return (rv);
1389 }
1390 
1391 int
1392 sk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, u_int32_t *txidx)
1393 {
1394 	struct sk_softc		*sc = sc_if->sk_softc;
1395 	struct sk_tx_desc	*f = NULL;
1396 	u_int32_t		frag, cur, sk_ctl;
1397 	int			i;
1398 	struct sk_txmap_entry	*entry;
1399 	bus_dmamap_t		txmap;
1400 	uint64_t		dva;
1401 
1402 	DPRINTFN(2, ("sk_encap\n"));
1403 
1404 	entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head);
1405 	if (entry == NULL) {
1406 		DPRINTFN(2, ("sk_encap: no txmap available\n"));
1407 		return (ENOBUFS);
1408 	}
1409 	txmap = entry->dmamap;
1410 
1411 	cur = frag = *txidx;
1412 
1413 	switch (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
1414 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1415 	case 0:
1416 		break;
1417 
1418 	case EFBIG: /* mbuf chain is too fragmented */
1419 		if (m_defrag(m_head, M_DONTWAIT) == 0 &&
1420 		    bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
1421 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1422 			break;
1423 	default:
1424 		return (1);
1425 	}
1426 
1427 	/* Sync the DMA map. */
1428 	bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
1429 	    BUS_DMASYNC_PREWRITE);
1430 
1431 	for (i = 0; i < txmap->dm_nsegs; i++) {
1432 		f = &sc_if->sk_rdata->sk_tx_ring[frag];
1433 		dva = txmap->dm_segs[i].ds_addr;
1434 		htolem32(&f->sk_data_lo, dva);
1435 		htolem32(&f->sk_data_hi, dva >> 32);
1436 		sk_ctl = txmap->dm_segs[i].ds_len | SK_OPCODE_DEFAULT;
1437 		if (i == 0)
1438 			sk_ctl |= SK_TXCTL_FIRSTFRAG;
1439 		else
1440 			sk_ctl |= SK_TXCTL_OWN;
1441 		htolem32(&f->sk_ctl, sk_ctl);
1442 		cur = frag;
1443 		SK_INC(frag, SK_TX_RING_CNT);
1444 	}
1445 
1446 	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1447 	SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
1448 
1449 	sc_if->sk_cdata.sk_tx_map[cur] = entry;
1450 	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1451 		htole32(SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR);
1452 
1453 	/* Sync descriptors before handing to chip */
1454 	SK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs,
1455 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1456 
1457 	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |=
1458 		htole32(SK_TXCTL_OWN);
1459 
1460 	/* Sync first descriptor to hand it off */
1461 	SK_CDTXSYNC(sc_if, *txidx, 1, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1462 
1463 	sc_if->sk_cdata.sk_tx_cnt += txmap->dm_nsegs;
1464 
1465 #ifdef SK_DEBUG
1466 	if (skdebug >= 2) {
1467 		struct sk_tx_desc *desc;
1468 		u_int32_t idx;
1469 		for (idx = *txidx; idx != frag; SK_INC(idx, SK_TX_RING_CNT)) {
1470 			desc = &sc_if->sk_rdata->sk_tx_ring[idx];
1471 			sk_dump_txdesc(desc, idx);
1472 		}
1473 	}
1474 #endif
1475 
1476 	*txidx = frag;
1477 
1478 	DPRINTFN(2, ("sk_encap: completed successfully\n"));
1479 
1480 	return (0);
1481 }
1482 
1483 void
1484 sk_start(struct ifnet *ifp)
1485 {
1486 	struct sk_if_softc	*sc_if = ifp->if_softc;
1487 	struct sk_softc		*sc = sc_if->sk_softc;
1488 	struct mbuf		*m_head = NULL;
1489 	u_int32_t		idx = sc_if->sk_cdata.sk_tx_prod;
1490 	int			post = 0;
1491 
1492 	DPRINTFN(2, ("sk_start\n"));
1493 
1494 	for (;;) {
1495 		if (sc_if->sk_cdata.sk_tx_cnt + SK_NTXSEG + 1 >
1496 		    SK_TX_RING_CNT) {
1497 			ifq_set_oactive(&ifp->if_snd);
1498 			break;
1499 		}
1500 
1501 		m_head = ifq_dequeue(&ifp->if_snd);
1502 		if (m_head == NULL)
1503 			break;
1504 
1505 		/*
1506 		 * Pack the data into the transmit ring. If we
1507 		 * don't have room, set the OACTIVE flag and wait
1508 		 * for the NIC to drain the ring.
1509 		 */
1510 		if (sk_encap(sc_if, m_head, &idx)) {
1511 			m_freem(m_head);
1512 			continue;
1513 		}
1514 
1515 		/* now we are committed to transmit the packet */
1516 
1517 		/*
1518 		 * If there's a BPF listener, bounce a copy of this frame
1519 		 * to him.
1520 		 */
1521 #if NBPFILTER > 0
1522 		if (ifp->if_bpf)
1523 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1524 #endif
1525 
1526 		post = 1;
1527 	}
1528 	if (post == 0)
1529 		return;
1530 
1531 	/* Transmit */
1532 	sc_if->sk_cdata.sk_tx_prod = idx;
1533 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1534 
1535 	/* Set a timeout in case the chip goes out to lunch. */
1536 	ifp->if_timer = SK_TX_TIMEOUT;
1537 }
1538 
1539 
1540 void
1541 sk_watchdog(struct ifnet *ifp)
1542 {
1543 	struct sk_if_softc *sc_if = ifp->if_softc;
1544 
1545 	/*
1546 	 * Reclaim first as there is a possibility of losing Tx completion
1547 	 * interrupts.
1548 	 */
1549 	sk_txeof(sc_if);
1550 	if (sc_if->sk_cdata.sk_tx_cnt != 0) {
1551 		printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname);
1552 
1553 		ifp->if_oerrors++;
1554 
1555 		sk_init(sc_if);
1556 	}
1557 }
1558 
1559 static __inline int
1560 sk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len)
1561 {
1562 	if (sc->sk_type == SK_GENESIS) {
1563 		if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
1564 		    XM_RXSTAT_BYTES(stat) != len)
1565 			return (0);
1566 	} else {
1567 		if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
1568 		    YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
1569 		    YU_RXSTAT_JABBER)) != 0 ||
1570 		    (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
1571 		    YU_RXSTAT_BYTES(stat) != len)
1572 			return (0);
1573 	}
1574 
1575 	return (1);
1576 }
1577 
1578 void
1579 sk_rxeof(struct sk_if_softc *sc_if)
1580 {
1581 	struct sk_softc		*sc = sc_if->sk_softc;
1582 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
1583 	struct if_rxring	*rxr = &sc_if->sk_cdata.sk_rx_ring;
1584 	struct mbuf		*m;
1585 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
1586 	struct sk_chain		*cur_rx;
1587 	struct sk_rx_desc	*cur_desc;
1588 	int			cur, total_len = 0;
1589 	u_int32_t		rxstat, sk_ctl;
1590 	bus_dmamap_t		dmamap;
1591 
1592 	DPRINTFN(2, ("sk_rxeof\n"));
1593 
1594 	cur = sc_if->sk_cdata.sk_rx_cons;
1595 	while (if_rxr_inuse(rxr) > 0) {
1596 		/* Sync the descriptor */
1597 		SK_CDRXSYNC(sc_if, cur,
1598 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1599 
1600 		cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur];
1601 		if (cur_rx->sk_mbuf == NULL)
1602 			break;
1603 
1604 		cur_desc = &sc_if->sk_rdata->sk_rx_ring[cur];
1605 		sk_ctl = lemtoh32(&cur_desc->sk_ctl);
1606 		if ((sk_ctl & SK_RXCTL_OWN) != 0)
1607 			break;
1608 
1609 		dmamap = sc_if->sk_cdata.sk_rx_map[cur];
1610 
1611 		bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
1612 		    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1613 		bus_dmamap_unload(sc_if->sk_softc->sc_dmatag, dmamap);
1614 
1615 		m = cur_rx->sk_mbuf;
1616 		cur_rx->sk_mbuf = NULL;
1617 		if_rxr_put(rxr, 1);
1618 		SK_INC(cur, SK_RX_RING_CNT);
1619 
1620 		total_len = SK_RXBYTES(sk_ctl);
1621 		rxstat = lemtoh32(&cur_desc->sk_xmac_rxstat);
1622 
1623 		if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
1624 		    SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
1625 		    SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
1626 		    total_len < SK_MIN_FRAMELEN ||
1627 		    total_len > SK_JUMBO_FRAMELEN ||
1628 		    sk_rxvalid(sc, rxstat, total_len) == 0) {
1629 			ifp->if_ierrors++;
1630 			m_freem(m);
1631 			continue;
1632 		}
1633 
1634 		m->m_pkthdr.len = m->m_len = total_len;
1635 
1636 		ml_enqueue(&ml, m);
1637 	}
1638 	sc_if->sk_cdata.sk_rx_cons = cur;
1639 
1640 	if (ifiq_input(&ifp->if_rcv, &ml))
1641 		if_rxr_livelocked(rxr);
1642 
1643 	sk_fill_rx_ring(sc_if);
1644 
1645 }
1646 
1647 void
1648 sk_txeof(struct sk_if_softc *sc_if)
1649 {
1650 	struct sk_softc		*sc = sc_if->sk_softc;
1651 	struct sk_tx_desc	*cur_tx;
1652 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
1653 	u_int32_t		idx, sk_ctl;
1654 	struct sk_txmap_entry	*entry;
1655 
1656 	DPRINTFN(2, ("sk_txeof\n"));
1657 
1658 	/*
1659 	 * Go through our tx ring and free mbufs for those
1660 	 * frames that have been sent.
1661 	 */
1662 	idx = sc_if->sk_cdata.sk_tx_cons;
1663 	while (idx != sc_if->sk_cdata.sk_tx_prod) {
1664 		SK_CDTXSYNC(sc_if, idx, 1,
1665 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1666 
1667 		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1668 		sk_ctl = lemtoh32(&cur_tx->sk_ctl);
1669 #ifdef SK_DEBUG
1670 		if (skdebug >= 2)
1671 			sk_dump_txdesc(cur_tx, idx);
1672 #endif
1673 		if (sk_ctl & SK_TXCTL_OWN) {
1674 			SK_CDTXSYNC(sc_if, idx, 1, BUS_DMASYNC_PREREAD);
1675 			break;
1676 		}
1677 		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1678 			entry = sc_if->sk_cdata.sk_tx_map[idx];
1679 
1680 			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1681 			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1682 
1683 			bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
1684 			    entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1685 
1686 			bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
1687 			SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry,
1688 					  link);
1689 			sc_if->sk_cdata.sk_tx_map[idx] = NULL;
1690 		}
1691 		sc_if->sk_cdata.sk_tx_cnt--;
1692 		SK_INC(idx, SK_TX_RING_CNT);
1693 	}
1694 	ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? SK_TX_TIMEOUT : 0;
1695 
1696 	if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
1697 		ifq_clr_oactive(&ifp->if_snd);
1698 
1699 	sc_if->sk_cdata.sk_tx_cons = idx;
1700 }
1701 
1702 void
1703 sk_tick(void *xsc_if)
1704 {
1705 	struct sk_if_softc *sc_if = xsc_if;
1706 	struct mii_data *mii = &sc_if->sk_mii;
1707 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
1708 	int i;
1709 
1710 	DPRINTFN(2, ("sk_tick\n"));
1711 
1712 	if (!(ifp->if_flags & IFF_UP))
1713 		return;
1714 
1715 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1716 		sk_intr_bcom(sc_if);
1717 		return;
1718 	}
1719 
1720 	/*
1721 	 * According to SysKonnect, the correct way to verify that
1722 	 * the link has come back up is to poll bit 0 of the GPIO
1723 	 * register three times. This pin has the signal from the
1724 	 * link sync pin connected to it; if we read the same link
1725 	 * state 3 times in a row, we know the link is up.
1726 	 */
1727 	for (i = 0; i < 3; i++) {
1728 		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
1729 			break;
1730 	}
1731 
1732 	if (i != 3) {
1733 		timeout_add_sec(&sc_if->sk_tick_ch, 1);
1734 		return;
1735 	}
1736 
1737 	/* Turn the GP0 interrupt back on. */
1738 	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1739 	SK_XM_READ_2(sc_if, XM_ISR);
1740 	mii_tick(mii);
1741 	timeout_del(&sc_if->sk_tick_ch);
1742 }
1743 
1744 void
1745 sk_yukon_tick(void *xsc_if)
1746 {
1747 	struct sk_if_softc *sc_if = xsc_if;
1748 	struct mii_data *mii = &sc_if->sk_mii;
1749 	int s;
1750 
1751 	s = splnet();
1752 	mii_tick(mii);
1753 	splx(s);
1754 	timeout_add_sec(&sc_if->sk_tick_ch, 1);
1755 }
1756 
1757 void
1758 sk_intr_bcom(struct sk_if_softc *sc_if)
1759 {
1760 	struct mii_data *mii = &sc_if->sk_mii;
1761 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
1762 	int status;
1763 
1764 	DPRINTFN(2, ("sk_intr_bcom\n"));
1765 
1766 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1767 
1768 	/*
1769 	 * Read the PHY interrupt register to make sure
1770 	 * we clear any pending interrupts.
1771 	 */
1772 	status = sk_xmac_miibus_readreg((struct device *)sc_if,
1773 	    SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
1774 
1775 	if (!(ifp->if_flags & IFF_RUNNING)) {
1776 		sk_init_xmac(sc_if);
1777 		return;
1778 	}
1779 
1780 	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
1781 		int lstat;
1782 		lstat = sk_xmac_miibus_readreg((struct device *)sc_if,
1783 		    SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS);
1784 
1785 		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
1786 			mii_mediachg(mii);
1787 			/* Turn off the link LED. */
1788 			SK_IF_WRITE_1(sc_if, 0,
1789 			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
1790 			sc_if->sk_link = 0;
1791 		} else if (status & BRGPHY_ISR_LNK_CHG) {
1792 			sk_xmac_miibus_writereg((struct device *)sc_if,
1793 			    SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00);
1794 			mii_tick(mii);
1795 			sc_if->sk_link = 1;
1796 			/* Turn on the link LED. */
1797 			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
1798 			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
1799 			    SK_LINKLED_BLINK_OFF);
1800 		} else {
1801 			mii_tick(mii);
1802 			timeout_add_sec(&sc_if->sk_tick_ch, 1);
1803 		}
1804 	}
1805 
1806 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1807 }
1808 
1809 void
1810 sk_intr_xmac(struct sk_if_softc	*sc_if)
1811 {
1812 	u_int16_t status = SK_XM_READ_2(sc_if, XM_ISR);
1813 
1814 	DPRINTFN(2, ("sk_intr_xmac\n"));
1815 
1816 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
1817 		if (status & XM_ISR_GP0_SET) {
1818 			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1819 			timeout_add_sec(&sc_if->sk_tick_ch, 1);
1820 		}
1821 
1822 		if (status & XM_ISR_AUTONEG_DONE) {
1823 			timeout_add_sec(&sc_if->sk_tick_ch, 1);
1824 		}
1825 	}
1826 
1827 	if (status & XM_IMR_TX_UNDERRUN)
1828 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
1829 
1830 	if (status & XM_IMR_RX_OVERRUN)
1831 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
1832 }
1833 
1834 void
1835 sk_intr_yukon(struct sk_if_softc *sc_if)
1836 {
1837 	u_int8_t status;
1838 
1839 	status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
1840 	/* RX overrun */
1841 	if ((status & SK_GMAC_INT_RX_OVER) != 0) {
1842 		SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
1843 		    SK_RFCTL_RX_FIFO_OVER);
1844 	}
1845 	/* TX underrun */
1846 	if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
1847 		SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST,
1848 		    SK_TFCTL_TX_FIFO_UNDER);
1849 	}
1850 
1851 	DPRINTFN(2, ("sk_intr_yukon status=%#x\n", status));
1852 }
1853 
1854 int
1855 sk_intr(void *xsc)
1856 {
1857 	struct sk_softc		*sc = xsc;
1858 	struct sk_if_softc	*sc_if0 = sc->sk_if[SK_PORT_A];
1859 	struct sk_if_softc	*sc_if1 = sc->sk_if[SK_PORT_B];
1860 	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1861 	u_int32_t		status;
1862 	int			claimed = 0;
1863 
1864 	status = CSR_READ_4(sc, SK_ISSR);
1865 	if (status == 0 || status == 0xffffffff)
1866 		return (0);
1867 
1868 	if (sc_if0 != NULL)
1869 		ifp0 = &sc_if0->arpcom.ac_if;
1870 	if (sc_if1 != NULL)
1871 		ifp1 = &sc_if1->arpcom.ac_if;
1872 
1873 	for (; (status &= sc->sk_intrmask) != 0;) {
1874 		claimed = 1;
1875 
1876 		/* Handle receive interrupts first. */
1877 		if (sc_if0 && (status & SK_ISR_RX1_EOF)) {
1878 			sk_rxeof(sc_if0);
1879 			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
1880 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1881 		}
1882 		if (sc_if1 && (status & SK_ISR_RX2_EOF)) {
1883 			sk_rxeof(sc_if1);
1884 			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
1885 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1886 		}
1887 
1888 		/* Then transmit interrupts. */
1889 		if (sc_if0 && (status & SK_ISR_TX1_S_EOF)) {
1890 			sk_txeof(sc_if0);
1891 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
1892 			    SK_TXBMU_CLR_IRQ_EOF);
1893 		}
1894 		if (sc_if1 && (status & SK_ISR_TX2_S_EOF)) {
1895 			sk_txeof(sc_if1);
1896 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
1897 			    SK_TXBMU_CLR_IRQ_EOF);
1898 		}
1899 
1900 		/* Then MAC interrupts. */
1901 		if (sc_if0 && (status & SK_ISR_MAC1) &&
1902 		    (ifp0->if_flags & IFF_RUNNING)) {
1903 			if (SK_IS_GENESIS(sc))
1904 				sk_intr_xmac(sc_if0);
1905 			else
1906 				sk_intr_yukon(sc_if0);
1907 		}
1908 
1909 		if (sc_if1 && (status & SK_ISR_MAC2) &&
1910 		    (ifp1->if_flags & IFF_RUNNING)) {
1911 			if (SK_IS_GENESIS(sc))
1912 				sk_intr_xmac(sc_if1);
1913 			else
1914 				sk_intr_yukon(sc_if1);
1915 
1916 		}
1917 
1918 		if (status & SK_ISR_EXTERNAL_REG) {
1919 			if (sc_if0 != NULL &&
1920 			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
1921 				sk_intr_bcom(sc_if0);
1922 
1923 			if (sc_if1 != NULL &&
1924 			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
1925 				sk_intr_bcom(sc_if1);
1926 		}
1927 		status = CSR_READ_4(sc, SK_ISSR);
1928 	}
1929 
1930 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1931 
1932 	if (ifp0 != NULL && !ifq_empty(&ifp0->if_snd))
1933 		sk_start(ifp0);
1934 	if (ifp1 != NULL && !ifq_empty(&ifp1->if_snd))
1935 		sk_start(ifp1);
1936 
1937 	return (claimed);
1938 }
1939 
1940 void
1941 sk_init_xmac(struct sk_if_softc	*sc_if)
1942 {
1943 	struct sk_softc		*sc = sc_if->sk_softc;
1944 	struct sk_bcom_hack	bhack[] = {
1945 	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
1946 	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
1947 	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1948 	{ 0, 0 } };
1949 
1950 	DPRINTFN(2, ("sk_init_xmac\n"));
1951 
1952 	/* Unreset the XMAC. */
1953 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
1954 	DELAY(1000);
1955 
1956 	/* Reset the XMAC's internal state. */
1957 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
1958 
1959 	/* Save the XMAC II revision */
1960 	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
1961 
1962 	/*
1963 	 * Perform additional initialization for external PHYs,
1964 	 * namely for the 1000baseTX cards that use the XMAC's
1965 	 * GMII mode.
1966 	 */
1967 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1968 		int			i = 0;
1969 		u_int32_t		val;
1970 
1971 		/* Take PHY out of reset. */
1972 		val = sk_win_read_4(sc, SK_GPIO);
1973 		if (sc_if->sk_port == SK_PORT_A)
1974 			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
1975 		else
1976 			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
1977 		sk_win_write_4(sc, SK_GPIO, val);
1978 
1979 		/* Enable GMII mode on the XMAC. */
1980 		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
1981 
1982 		sk_xmac_miibus_writereg((struct device *)sc_if,
1983 		    SK_PHYADDR_BCOM, MII_BMCR, BMCR_RESET);
1984 		DELAY(10000);
1985 		sk_xmac_miibus_writereg((struct device *)sc_if,
1986 		    SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFFF0);
1987 
1988 		/*
1989 		 * Early versions of the BCM5400 apparently have
1990 		 * a bug that requires them to have their reserved
1991 		 * registers initialized to some magic values. I don't
1992 		 * know what the numbers do, I'm just the messenger.
1993 		 */
1994 		if (sk_xmac_miibus_readreg((struct device *)sc_if,
1995 		    SK_PHYADDR_BCOM, 0x03) == 0x6041) {
1996 			while(bhack[i].reg) {
1997 				sk_xmac_miibus_writereg((struct device *)sc_if,
1998 				    SK_PHYADDR_BCOM, bhack[i].reg,
1999 				    bhack[i].val);
2000 				i++;
2001 			}
2002 		}
2003 	}
2004 
2005 	/* Set station address */
2006 	SK_XM_WRITE_2(sc_if, XM_PAR0,
2007 	    letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0])));
2008 	SK_XM_WRITE_2(sc_if, XM_PAR1,
2009 	    letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2])));
2010 	SK_XM_WRITE_2(sc_if, XM_PAR2,
2011 	    letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4])));
2012 
2013 	/* We don't need the FCS appended to the packet. */
2014 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2015 
2016 	/* We want short frames padded to 60 bytes. */
2017 	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2018 
2019 	/*
2020 	 * Enable the reception of all error frames. This is
2021 	 * a necessary evil due to the design of the XMAC. The
2022 	 * XMAC's receive FIFO is only 8K in size, however jumbo
2023 	 * frames can be up to 9000 bytes in length. When bad
2024 	 * frame filtering is enabled, the XMAC's RX FIFO operates
2025 	 * in 'store and forward' mode. For this to work, the
2026 	 * entire frame has to fit into the FIFO, but that means
2027 	 * that jumbo frames larger than 8192 bytes will be
2028 	 * truncated. Disabling all bad frame filtering causes
2029 	 * the RX FIFO to operate in streaming mode, in which
2030 	 * case the XMAC will start transferring frames out of the
2031 	 * RX FIFO as soon as the FIFO threshold is reached.
2032 	 */
2033 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2034 	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2035 	    XM_MODE_RX_INRANGELEN);
2036 
2037 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2038 
2039 	/*
2040 	 * Bump up the transmit threshold. This helps hold off transmit
2041 	 * underruns when we're blasting traffic from both ports at once.
2042 	 */
2043 	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2044 
2045 	/* Program promiscuous mode and multicast filters. */
2046 	sk_iff(sc_if);
2047 
2048 	/* Clear and enable interrupts */
2049 	SK_XM_READ_2(sc_if, XM_ISR);
2050 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2051 		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2052 	else
2053 		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2054 
2055 	/* Configure MAC arbiter */
2056 	switch(sc_if->sk_xmac_rev) {
2057 	case XM_XMAC_REV_B2:
2058 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2059 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2060 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2061 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2062 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2063 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2064 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2065 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2066 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2067 		break;
2068 	case XM_XMAC_REV_C1:
2069 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2070 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2071 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2072 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2073 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2074 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2075 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2076 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2077 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2078 		break;
2079 	default:
2080 		break;
2081 	}
2082 	sk_win_write_2(sc, SK_MACARB_CTL,
2083 	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2084 
2085 	sc_if->sk_link = 1;
2086 }
2087 
2088 void sk_init_yukon(struct sk_if_softc *sc_if)
2089 {
2090 	u_int32_t		phy, v;
2091 	u_int16_t		reg;
2092 	struct sk_softc		*sc;
2093 	int			i;
2094 
2095 	sc = sc_if->sk_softc;
2096 
2097 	DPRINTFN(2, ("sk_init_yukon: start: sk_csr=%#x\n",
2098 	    CSR_READ_4(sc_if->sk_softc, SK_CSR)));
2099 
2100 	if (sc->sk_type == SK_YUKON_LITE &&
2101 	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2102 		/*
2103 		 * Workaround code for COMA mode, set PHY reset.
2104 		 * Otherwise it will not correctly take chip out of
2105 		 * powerdown (coma)
2106 		 */
2107 		v = sk_win_read_4(sc, SK_GPIO);
2108 		v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
2109 		sk_win_write_4(sc, SK_GPIO, v);
2110 	}
2111 
2112 	DPRINTFN(6, ("sk_init_yukon: 1\n"));
2113 
2114 	/* GMAC and GPHY Reset */
2115 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2116 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2117 	DELAY(1000);
2118 
2119 	DPRINTFN(6, ("sk_init_yukon: 2\n"));
2120 
2121 	if (sc->sk_type == SK_YUKON_LITE &&
2122 	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2123 		/*
2124 		 * Workaround code for COMA mode, clear PHY reset
2125 		 */
2126 		v = sk_win_read_4(sc, SK_GPIO);
2127 		v |= SK_GPIO_DIR9;
2128 		v &= ~SK_GPIO_DAT9;
2129 		sk_win_write_4(sc, SK_GPIO, v);
2130 	}
2131 
2132 	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2133 		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2134 
2135 	if (sc->sk_coppertype)
2136 		phy |= SK_GPHY_COPPER;
2137 	else
2138 		phy |= SK_GPHY_FIBER;
2139 
2140 	DPRINTFN(3, ("sk_init_yukon: phy=%#x\n", phy));
2141 
2142 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2143 	DELAY(1000);
2144 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2145 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2146 	    SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2147 
2148 	DPRINTFN(3, ("sk_init_yukon: gmac_ctrl=%#x\n",
2149 	    SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
2150 
2151 	DPRINTFN(6, ("sk_init_yukon: 3\n"));
2152 
2153 	/* unused read of the interrupt source register */
2154 	DPRINTFN(6, ("sk_init_yukon: 4\n"));
2155 	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2156 
2157 	DPRINTFN(6, ("sk_init_yukon: 4a\n"));
2158 	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2159 	DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2160 
2161 	/* MIB Counter Clear Mode set */
2162 	reg |= YU_PAR_MIB_CLR;
2163 	DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2164 	DPRINTFN(6, ("sk_init_yukon: 4b\n"));
2165 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2166 
2167 	/* MIB Counter Clear Mode clear */
2168 	DPRINTFN(6, ("sk_init_yukon: 5\n"));
2169 	reg &= ~YU_PAR_MIB_CLR;
2170 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2171 
2172 	/* receive control reg */
2173 	DPRINTFN(6, ("sk_init_yukon: 7\n"));
2174 	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2175 
2176 	/* transmit parameter register */
2177 	DPRINTFN(6, ("sk_init_yukon: 8\n"));
2178 	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2179 	    YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2180 
2181 	/* serial mode register */
2182 	DPRINTFN(6, ("sk_init_yukon: 9\n"));
2183 	SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) |
2184 	    YU_SMR_MFL_VLAN | YU_SMR_MFL_JUMBO | YU_SMR_IPG_DATA(0x1e));
2185 
2186 	DPRINTFN(6, ("sk_init_yukon: 10\n"));
2187 	/* Setup Yukon's address */
2188 	for (i = 0; i < 3; i++) {
2189 		/* Write Source Address 1 (unicast filter) */
2190 		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2191 		    sc_if->arpcom.ac_enaddr[i * 2] |
2192 		    sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2193 	}
2194 
2195 	for (i = 0; i < 3; i++) {
2196 		reg = sk_win_read_2(sc_if->sk_softc,
2197 				    SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2198 		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2199 	}
2200 
2201 	/* Program promiscuous mode and multicast filters. */
2202 	DPRINTFN(6, ("sk_init_yukon: 11\n"));
2203 	sk_iff(sc_if);
2204 
2205 	/* enable interrupt mask for counter overflows */
2206 	DPRINTFN(6, ("sk_init_yukon: 12\n"));
2207 	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2208 	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2209 	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2210 
2211 	/* Configure RX MAC FIFO Flush Mask */
2212 	v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
2213 	    YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
2214 	    YU_RXSTAT_JABBER;
2215 	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
2216 
2217 	/* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
2218 	if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
2219 		v = SK_TFCTL_OPERATION_ON;
2220 	else
2221 		v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
2222 	/* Configure RX MAC FIFO */
2223 	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2224 	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
2225 
2226 	/* Increase flush threshould to 64 bytes */
2227 	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
2228 	    SK_RFCTL_FIFO_THRESHOLD + 1);
2229 
2230 	/* Configure TX MAC FIFO */
2231 	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2232 	SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2233 
2234 	DPRINTFN(6, ("sk_init_yukon: end\n"));
2235 }
2236 
2237 /*
2238  * Note that to properly initialize any part of the GEnesis chip,
2239  * you first have to take it out of reset mode.
2240  */
2241 void
2242 sk_init(void *xsc_if)
2243 {
2244 	struct sk_if_softc	*sc_if = xsc_if;
2245 	struct sk_softc		*sc = sc_if->sk_softc;
2246 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
2247 	struct mii_data		*mii = &sc_if->sk_mii;
2248 	int			s;
2249 
2250 	DPRINTFN(2, ("sk_init\n"));
2251 
2252 	s = splnet();
2253 
2254 	/* Cancel pending I/O and free all RX/TX buffers. */
2255 	sk_stop(sc_if, 0);
2256 
2257 	if (SK_IS_GENESIS(sc)) {
2258 		/* Configure LINK_SYNC LED */
2259 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2260 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2261 		    SK_LINKLED_LINKSYNC_ON);
2262 
2263 		/* Configure RX LED */
2264 		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2265 		    SK_RXLEDCTL_COUNTER_START);
2266 
2267 		/* Configure TX LED */
2268 		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2269 		    SK_TXLEDCTL_COUNTER_START);
2270 	}
2271 
2272 	/*
2273 	 * Configure descriptor poll timer
2274 	 *
2275 	 * SK-NET GENESIS data sheet says that possibility of losing Start
2276 	 * transmit command due to CPU/cache related interim storage problems
2277 	 * under certain conditions. The document recommends a polling
2278 	 * mechanism to send a Start transmit command to initiate transfer
2279 	 * of ready descriptors regulary. To cope with this issue sk(4) now
2280 	 * enables descriptor poll timer to initiate descriptor processing
2281 	 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
2282 	 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
2283 	 * command instead of waiting for next descriptor polling time.
2284 	 * The same rule may apply to Rx side too but it seems that is not
2285 	 * needed at the moment.
2286 	 * Since sk(4) uses descriptor polling as a last resort there is no
2287 	 * need to set smaller polling time than maximum allowable one.
2288 	 */
2289 	SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
2290 
2291 	/* Configure I2C registers */
2292 
2293 	/* Configure XMAC(s) */
2294 	switch (sc->sk_type) {
2295 	case SK_GENESIS:
2296 		sk_init_xmac(sc_if);
2297 		break;
2298 	case SK_YUKON:
2299 	case SK_YUKON_LITE:
2300 	case SK_YUKON_LP:
2301 		sk_init_yukon(sc_if);
2302 		break;
2303 	}
2304 	mii_mediachg(mii);
2305 
2306 	if (SK_IS_GENESIS(sc)) {
2307 		/* Configure MAC FIFOs */
2308 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2309 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2310 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2311 
2312 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2313 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2314 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2315 	}
2316 
2317 	/* Configure transmit arbiter(s) */
2318 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2319 	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2320 
2321 	/* Configure RAMbuffers */
2322 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2323 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2324 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2325 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2326 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2327 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2328 
2329 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2330 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2331 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2332 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2333 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2334 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2335 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2336 
2337 	/* Configure BMUs */
2338 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2339 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2340 	    SK_RX_RING_ADDR(sc_if, 0));
2341 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2342 
2343 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2344 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2345 	    SK_TX_RING_ADDR(sc_if, 0));
2346 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2347 
2348 	/* Init descriptors */
2349 	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2350 		printf("%s: initialization failed: no "
2351 		    "memory for rx buffers\n", sc_if->sk_dev.dv_xname);
2352 		sk_stop(sc_if, 0);
2353 		splx(s);
2354 		return;
2355 	}
2356 
2357 	if (sk_init_tx_ring(sc_if) == ENOBUFS) {
2358 		printf("%s: initialization failed: no "
2359 		    "memory for tx buffers\n", sc_if->sk_dev.dv_xname);
2360 		sk_stop(sc_if, 0);
2361 		splx(s);
2362 		return;
2363 	}
2364 
2365 	/* Configure interrupt handling */
2366 	CSR_READ_4(sc, SK_ISSR);
2367 	if (sc_if->sk_port == SK_PORT_A)
2368 		sc->sk_intrmask |= SK_INTRS1;
2369 	else
2370 		sc->sk_intrmask |= SK_INTRS2;
2371 
2372 	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2373 
2374 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2375 
2376 	/* Start BMUs. */
2377 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2378 
2379 	if (SK_IS_GENESIS(sc)) {
2380 		/* Enable XMACs TX and RX state machines */
2381 		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2382 		SK_XM_SETBIT_2(sc_if, XM_MMUCMD,
2383 		    XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2384 	}
2385 
2386 	if (SK_IS_YUKON(sc)) {
2387 		u_int16_t reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2388 		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2389 		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2390 	}
2391 
2392 	/* Activate descriptor polling timer */
2393 	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
2394 	/* start transfer of Tx descriptors */
2395 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2396 
2397 	ifp->if_flags |= IFF_RUNNING;
2398 	ifq_clr_oactive(&ifp->if_snd);
2399 
2400 	if (SK_IS_YUKON(sc))
2401 		timeout_add_sec(&sc_if->sk_tick_ch, 1);
2402 
2403 	splx(s);
2404 }
2405 
2406 void
2407 sk_stop(struct sk_if_softc *sc_if, int softonly)
2408 {
2409 	struct sk_softc		*sc = sc_if->sk_softc;
2410 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
2411 	bus_dmamap_t		dmamap;
2412 	struct sk_txmap_entry	*dma;
2413 	int			i;
2414 	u_int32_t		val;
2415 
2416 	DPRINTFN(2, ("sk_stop\n"));
2417 
2418 	timeout_del(&sc_if->sk_tick_ch);
2419 
2420 	ifp->if_flags &= ~IFF_RUNNING;
2421 	ifq_clr_oactive(&ifp->if_snd);
2422 
2423 	if (!softonly) {
2424 		/* stop Tx descriptor polling timer */
2425 		SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
2426 		/* stop transfer of Tx descriptors */
2427 		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
2428 		for (i = 0; i < SK_TIMEOUT; i++) {
2429 			val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
2430 			if (!(val & SK_TXBMU_TX_STOP))
2431 				break;
2432 			DELAY(1);
2433 		}
2434 		if (i == SK_TIMEOUT) {
2435 			printf("%s: cannot stop transfer of Tx descriptors\n",
2436 			    sc_if->sk_dev.dv_xname);
2437 		}
2438 		/* stop transfer of Rx descriptors */
2439 		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
2440 		for (i = 0; i < SK_TIMEOUT; i++) {
2441 			val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
2442 			if (!(val & SK_RXBMU_RX_STOP))
2443 				break;
2444 			DELAY(1);
2445 		}
2446 		if (i == SK_TIMEOUT) {
2447 			printf("%s: cannot stop transfer of Rx descriptors\n",
2448 			    sc_if->sk_dev.dv_xname);
2449 		}
2450 
2451 		if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2452 			u_int32_t		val;
2453 
2454 			/* Put PHY back into reset. */
2455 			val = sk_win_read_4(sc, SK_GPIO);
2456 			if (sc_if->sk_port == SK_PORT_A) {
2457 				val |= SK_GPIO_DIR0;
2458 				val &= ~SK_GPIO_DAT0;
2459 			} else {
2460 				val |= SK_GPIO_DIR2;
2461 				val &= ~SK_GPIO_DAT2;
2462 			}
2463 			sk_win_write_4(sc, SK_GPIO, val);
2464 		}
2465 
2466 		/* Turn off various components of this interface. */
2467 		SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2468 		switch (sc->sk_type) {
2469 		case SK_GENESIS:
2470 			SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL,
2471 			    SK_TXMACCTL_XMAC_RESET);
2472 			SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2473 			break;
2474 		case SK_YUKON:
2475 		case SK_YUKON_LITE:
2476 		case SK_YUKON_LP:
2477 			SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2478 			SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2479 			break;
2480 		}
2481 		SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2482 		SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2483 		SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2484 		SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2485 		SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2486 		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2487 		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2488 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2489 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2490 
2491 		/* Disable interrupts */
2492 		if (sc_if->sk_port == SK_PORT_A)
2493 			sc->sk_intrmask &= ~SK_INTRS1;
2494 		else
2495 			sc->sk_intrmask &= ~SK_INTRS2;
2496 		CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2497 
2498 		SK_XM_READ_2(sc_if, XM_ISR);
2499 		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2500 	}
2501 
2502 	/* Free RX and TX mbufs still in the queues. */
2503 	for (i = 0; i < SK_RX_RING_CNT; i++) {
2504 		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2505 			dmamap = sc_if->sk_cdata.sk_rx_map[i];
2506 			bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
2507 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2508 			bus_dmamap_unload(sc_if->sk_softc->sc_dmatag, dmamap);
2509 			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2510 			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2511 		}
2512 	}
2513 
2514 	for (i = 0; i < SK_TX_RING_CNT; i++) {
2515 		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2516 			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2517 			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2518 			SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head,
2519 			    sc_if->sk_cdata.sk_tx_map[i], link);
2520 			sc_if->sk_cdata.sk_tx_map[i] = 0;
2521 		}
2522 	}
2523 
2524 	while ((dma = SIMPLEQ_FIRST(&sc_if->sk_txmap_head))) {
2525 		SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
2526 		bus_dmamap_destroy(sc->sc_dmatag, dma->dmamap);
2527 		free(dma, M_DEVBUF, 0);
2528 	}
2529 }
2530 
2531 struct cfattach skc_ca = {
2532 	sizeof(struct sk_softc), skc_probe, skc_attach, skc_detach,
2533 	skc_activate
2534 };
2535 
2536 struct cfdriver skc_cd = {
2537 	0, "skc", DV_DULL
2538 };
2539 
2540 struct cfattach sk_ca = {
2541 	sizeof(struct sk_if_softc), sk_probe, sk_attach, sk_detach,
2542 	sk_activate
2543 };
2544 
2545 struct cfdriver sk_cd = {
2546 	NULL, "sk", DV_IFNET
2547 };
2548 
2549 #ifdef SK_DEBUG
2550 void
2551 sk_dump_txdesc(struct sk_tx_desc *desc, int idx)
2552 {
2553 #define DESC_PRINT(X)					\
2554 	if (X)						\
2555 		printf("txdesc[%d]." #X "=%#x\n", idx, X);
2556 
2557 	DESC_PRINT(letoh32(desc->sk_ctl));
2558 	DESC_PRINT(letoh32(desc->sk_next));
2559 	DESC_PRINT(letoh32(desc->sk_data_lo));
2560 	DESC_PRINT(letoh32(desc->sk_data_hi));
2561 	DESC_PRINT(letoh32(desc->sk_xmac_txstat));
2562 	DESC_PRINT(letoh16(desc->sk_rsvd0));
2563 	DESC_PRINT(letoh16(desc->sk_rsvd1));
2564 #undef PRINT
2565 }
2566 
2567 void
2568 sk_dump_bytes(const char *data, int len)
2569 {
2570 	int c, i, j;
2571 
2572 	for (i = 0; i < len; i += 16) {
2573 		printf("%08x  ", i);
2574 		c = len - i;
2575 		if (c > 16) c = 16;
2576 
2577 		for (j = 0; j < c; j++) {
2578 			printf("%02x ", data[i + j] & 0xff);
2579 			if ((j & 0xf) == 7 && j > 0)
2580 				printf(" ");
2581 		}
2582 
2583 		for (; j < 16; j++)
2584 			printf("   ");
2585 		printf("  ");
2586 
2587 		for (j = 0; j < c; j++) {
2588 			int ch = data[i + j] & 0xff;
2589 			printf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
2590 		}
2591 
2592 		printf("\n");
2593 
2594 		if (c < 16)
2595 			break;
2596 	}
2597 }
2598 
2599 void
2600 sk_dump_mbuf(struct mbuf *m)
2601 {
2602 	int count = m->m_pkthdr.len;
2603 
2604 	printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdr.len);
2605 
2606 	while (count > 0 && m) {
2607 		printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n",
2608 		    m, m->m_data, m->m_len);
2609 		sk_dump_bytes(mtod(m, char *), m->m_len);
2610 
2611 		count -= m->m_len;
2612 		m = m->m_next;
2613 	}
2614 }
2615 #endif
2616