xref: /dragonfly/sys/dev/netif/sk/if_sk.c (revision 2038fb68)
1 /*
2  * Copyright (c) 1997, 1998, 1999, 2000
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $OpenBSD: if_sk.c,v 1.129 2006/10/16 12:30:08 tom Exp $
33  * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
34  * $DragonFly: src/sys/dev/netif/sk/if_sk.c,v 1.58 2008/10/12 11:17:08 sephe Exp $
35  */
36 
37 /*
38  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
39  *
40  * Permission to use, copy, modify, and distribute this software for any
41  * purpose with or without fee is hereby granted, provided that the above
42  * copyright notice and this permission notice appear in all copies.
43  *
44  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
45  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
46  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
47  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
48  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
49  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
50  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
51  */
52 
53 /*
54  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55  * the SK-984x series adapters, both single port and dual port.
56  * References:
57  * 	The XaQti XMAC II datasheet,
58  * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59  *	The SysKonnect GEnesis manual, http://www.syskonnect.com
60  *
61  * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63  * convenience to others until Vitesse corrects this problem:
64  *
65  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66  *
67  * Written by Bill Paul <wpaul@ee.columbia.edu>
68  * Department of Electrical Engineering
69  * Columbia University, New York City
70  */
71 
72 /*
73  * The SysKonnect gigabit ethernet adapters consist of two main
74  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
75  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
76  * components and a PHY while the GEnesis controller provides a PCI
77  * interface with DMA support. Each card may have between 512K and
78  * 2MB of SRAM on board depending on the configuration.
79  *
80  * The SysKonnect GEnesis controller can have either one or two XMAC
81  * chips connected to it, allowing single or dual port NIC configurations.
82  * SysKonnect has the distinction of being the only vendor on the market
83  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
84  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
85  * XMAC registers. This driver takes advantage of these features to allow
86  * both XMACs to operate as independent interfaces.
87  */
88 
89 #include <sys/param.h>
90 #include <sys/bus.h>
91 #include <sys/endian.h>
92 #include <sys/in_cksum.h>
93 #include <sys/kernel.h>
94 #include <sys/interrupt.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/queue.h>
98 #include <sys/rman.h>
99 #include <sys/serialize.h>
100 #include <sys/socket.h>
101 #include <sys/sockio.h>
102 #include <sys/sysctl.h>
103 
104 #include <net/bpf.h>
105 #include <net/ethernet.h>
106 #include <net/if.h>
107 #include <net/if_arp.h>
108 #include <net/if_dl.h>
109 #include <net/if_media.h>
110 #include <net/ifq_var.h>
111 #include <net/vlan/if_vlan_var.h>
112 
113 #include <netinet/ip.h>
114 #include <netinet/udp.h>
115 
116 #include <dev/netif/mii_layer/mii.h>
117 #include <dev/netif/mii_layer/miivar.h>
118 #include <dev/netif/mii_layer/brgphyreg.h>
119 
120 #include <bus/pci/pcireg.h>
121 #include <bus/pci/pcivar.h>
122 #include <bus/pci/pcidevs.h>
123 
124 #include <dev/netif/sk/if_skreg.h>
125 #include <dev/netif/sk/yukonreg.h>
126 #include <dev/netif/sk/xmaciireg.h>
127 #include <dev/netif/sk/if_skvar.h>
128 
129 #include "miibus_if.h"
130 
131 #if 0
132 #define SK_DEBUG
133 #endif
134 
135 #if 0
136 #define SK_RXCSUM
137 #endif
138 
139 /* supported device vendors */
140 static const struct skc_type {
141 	uint16_t	skc_vid;
142 	uint16_t	skc_did;
143 	const char	*skc_name;
144 } skc_devs[] = {
145 	{ PCI_VENDOR_3COM,		PCI_PRODUCT_3COM_3C940,
146 	  "3Com 3C940" },
147 	{ PCI_VENDOR_3COM,		PCI_PRODUCT_3COM_3C940B,
148 	  "3Com 3C940B" },
149 
150 	{ PCI_VENDOR_CNET,		PCI_PRODUCT_CNET_GIGACARD,
151 	  "CNet GigaCard" },
152 
153 	{ PCI_VENDOR_DLINK,		PCI_PRODUCT_DLINK_DGE530T_A1,
154 	  "D-Link DGE-530T A1" },
155 	{ PCI_VENDOR_DLINK,		PCI_PRODUCT_DLINK_DGE530T_B1,
156 	  "D-Link DGE-530T B1" },
157 
158 	{ PCI_VENDOR_LINKSYS,		PCI_PRODUCT_LINKSYS_EG1032,
159 	  "Linksys EG1032 v2" },
160 	{ PCI_VENDOR_LINKSYS,		PCI_PRODUCT_LINKSYS_EG1064,
161 	  "Linksys EG1064" },
162 
163 	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON,
164 	  "Marvell Yukon 88E8001/8003/8010" },
165 	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON_BELKIN,
166 	  "Belkin F5D5005" },
167 
168 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SKNET_GE,
169 	  "SysKonnect SK-NET" },
170 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK9821v2,
171 	  "SysKonnect SK9821 v2" },
172 
173 	{ 0, 0, NULL }
174 };
175 
176 static int	skc_probe(device_t);
177 static int	skc_attach(device_t);
178 static int	skc_detach(device_t);
179 static void	skc_shutdown(device_t);
180 static int	skc_sysctl_imtime(SYSCTL_HANDLER_ARGS);
181 
182 static int	sk_probe(device_t);
183 static int	sk_attach(device_t);
184 static int	sk_detach(device_t);
185 static void	sk_tick(void *);
186 static void	sk_yukon_tick(void *);
187 static void	sk_intr(void *);
188 static void	sk_intr_bcom(struct sk_if_softc *);
189 static void	sk_intr_xmac(struct sk_if_softc *);
190 static void	sk_intr_yukon(struct sk_if_softc *);
191 static void	sk_rxeof(struct sk_if_softc *);
192 static void	sk_txeof(struct sk_if_softc *);
193 static int	sk_encap(struct sk_if_softc *, struct mbuf **, uint32_t *);
194 static void	sk_start(struct ifnet *);
195 static int	sk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
196 static void	sk_init(void *);
197 static void	sk_init_xmac(struct sk_if_softc *);
198 static void	sk_init_yukon(struct sk_if_softc *);
199 static void	sk_stop(struct sk_if_softc *);
200 static void	sk_watchdog(struct ifnet *);
201 static int	sk_ifmedia_upd(struct ifnet *);
202 static void	sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
203 static void	sk_reset(struct sk_softc *);
204 static int	sk_newbuf_jumbo(struct sk_if_softc *, int, int);
205 static int	sk_newbuf_std(struct sk_if_softc *, int, int);
206 static int	sk_jpool_alloc(device_t);
207 static void	sk_jpool_free(struct sk_if_softc *);
208 static struct sk_jpool_entry
209 		*sk_jalloc(struct sk_if_softc *);
210 static void	sk_jfree(void *);
211 static void	sk_jref(void *);
212 static int	sk_init_rx_ring(struct sk_if_softc *);
213 static int	sk_init_tx_ring(struct sk_if_softc *);
214 
215 static int	sk_miibus_readreg(device_t, int, int);
216 static int	sk_miibus_writereg(device_t, int, int, int);
217 static void	sk_miibus_statchg(device_t);
218 
219 static int	sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
220 static int	sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, int);
221 static void	sk_xmac_miibus_statchg(struct sk_if_softc *);
222 
223 static int	sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
224 static int	sk_marv_miibus_writereg(struct sk_if_softc *, int, int, int);
225 static void	sk_marv_miibus_statchg(struct sk_if_softc *);
226 
227 static void	sk_setfilt(struct sk_if_softc *, caddr_t, int);
228 static void	sk_setmulti(struct sk_if_softc *);
229 static void	sk_setpromisc(struct sk_if_softc *);
230 
231 #ifdef SK_RXCSUM
232 static void	sk_rxcsum(struct ifnet *, struct mbuf *, const uint16_t,
233 			  const uint16_t);
234 #endif
235 static int	sk_dma_alloc(device_t);
236 static void	sk_dma_free(device_t);
237 
238 #ifdef SK_DEBUG
239 #define DPRINTF(x)	if (skdebug) kprintf x
240 #define DPRINTFN(n,x)	if (skdebug >= (n)) kprintf x
241 static int	skdebug = 2;
242 
243 static void	sk_dump_txdesc(struct sk_tx_desc *, int);
244 static void	sk_dump_mbuf(struct mbuf *);
245 static void	sk_dump_bytes(const char *, int);
246 #else
247 #define DPRINTF(x)
248 #define DPRINTFN(n,x)
249 #endif
250 
251 /* Interrupt moderation time. */
252 static int	skc_imtime = SK_IMTIME_DEFAULT;
253 TUNABLE_INT("hw.skc.imtime", &skc_imtime);
254 
255 /*
256  * Note that we have newbus methods for both the GEnesis controller
257  * itself and the XMAC(s). The XMACs are children of the GEnesis, and
258  * the miibus code is a child of the XMACs. We need to do it this way
259  * so that the miibus drivers can access the PHY registers on the
260  * right PHY. It's not quite what I had in mind, but it's the only
261  * design that achieves the desired effect.
262  */
263 static device_method_t skc_methods[] = {
264 	/* Device interface */
265 	DEVMETHOD(device_probe,		skc_probe),
266 	DEVMETHOD(device_attach,	skc_attach),
267 	DEVMETHOD(device_detach,	skc_detach),
268 	DEVMETHOD(device_shutdown,	skc_shutdown),
269 
270 	/* bus interface */
271 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
272 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
273 
274 	{ 0, 0 }
275 };
276 
277 static DEFINE_CLASS_0(skc, skc_driver, skc_methods, sizeof(struct sk_softc));
278 static devclass_t skc_devclass;
279 
280 static device_method_t sk_methods[] = {
281 	/* Device interface */
282 	DEVMETHOD(device_probe,		sk_probe),
283 	DEVMETHOD(device_attach,	sk_attach),
284 	DEVMETHOD(device_detach,	sk_detach),
285 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
286 
287 	/* bus interface */
288 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
289 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
290 
291 	/* MII interface */
292 	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
293 	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
294 	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
295 
296 	{ 0, 0 }
297 };
298 
299 static DEFINE_CLASS_0(sk, sk_driver, sk_methods, sizeof(struct sk_if_softc));
300 static devclass_t sk_devclass;
301 
302 DECLARE_DUMMY_MODULE(if_sk);
303 DRIVER_MODULE(if_sk, pci, skc_driver, skc_devclass, 0, 0);
304 DRIVER_MODULE(if_sk, skc, sk_driver, sk_devclass, 0, 0);
305 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
306 
307 static __inline uint32_t
308 sk_win_read_4(struct sk_softc *sc, uint32_t reg)
309 {
310 	return CSR_READ_4(sc, reg);
311 }
312 
313 static __inline uint16_t
314 sk_win_read_2(struct sk_softc *sc, uint32_t reg)
315 {
316 	return CSR_READ_2(sc, reg);
317 }
318 
319 static __inline uint8_t
320 sk_win_read_1(struct sk_softc *sc, uint32_t reg)
321 {
322 	return CSR_READ_1(sc, reg);
323 }
324 
325 static __inline void
326 sk_win_write_4(struct sk_softc *sc, uint32_t reg, uint32_t x)
327 {
328 	CSR_WRITE_4(sc, reg, x);
329 }
330 
331 static __inline void
332 sk_win_write_2(struct sk_softc *sc, uint32_t reg, uint16_t x)
333 {
334 	CSR_WRITE_2(sc, reg, x);
335 }
336 
337 static __inline void
338 sk_win_write_1(struct sk_softc *sc, uint32_t reg, uint8_t x)
339 {
340 	CSR_WRITE_1(sc, reg, x);
341 }
342 
343 static __inline int
344 sk_newbuf(struct sk_if_softc *sc_if, int idx, int wait)
345 {
346 	int ret;
347 
348 	if (sc_if->sk_use_jumbo)
349 		ret = sk_newbuf_jumbo(sc_if, idx, wait);
350 	else
351 		ret = sk_newbuf_std(sc_if, idx, wait);
352 	return ret;
353 }
354 
355 static int
356 sk_miibus_readreg(device_t dev, int phy, int reg)
357 {
358 	struct sk_if_softc *sc_if = device_get_softc(dev);
359 
360 	if (SK_IS_GENESIS(sc_if->sk_softc))
361 		return sk_xmac_miibus_readreg(sc_if, phy, reg);
362 	else
363 		return sk_marv_miibus_readreg(sc_if, phy, reg);
364 }
365 
366 static int
367 sk_miibus_writereg(device_t dev, int phy, int reg, int val)
368 {
369 	struct sk_if_softc *sc_if = device_get_softc(dev);
370 
371 	if (SK_IS_GENESIS(sc_if->sk_softc))
372 		return sk_xmac_miibus_writereg(sc_if, phy, reg, val);
373 	else
374 		return sk_marv_miibus_writereg(sc_if, phy, reg, val);
375 }
376 
377 static void
378 sk_miibus_statchg(device_t dev)
379 {
380 	struct sk_if_softc *sc_if = device_get_softc(dev);
381 
382 	if (SK_IS_GENESIS(sc_if->sk_softc))
383 		sk_xmac_miibus_statchg(sc_if);
384 	else
385 		sk_marv_miibus_statchg(sc_if);
386 }
387 
388 static int
389 sk_xmac_miibus_readreg(struct sk_if_softc *sc_if, int phy, int reg)
390 {
391 	int i;
392 
393 	DPRINTFN(9, ("sk_xmac_miibus_readreg\n"));
394 
395 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
396 		return(0);
397 
398 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
399 	SK_XM_READ_2(sc_if, XM_PHY_DATA);
400 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
401 		for (i = 0; i < SK_TIMEOUT; i++) {
402 			DELAY(1);
403 			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
404 			    XM_MMUCMD_PHYDATARDY)
405 				break;
406 		}
407 
408 		if (i == SK_TIMEOUT) {
409 			if_printf(&sc_if->arpcom.ac_if,
410 				  "phy failed to come ready\n");
411 			return(0);
412 		}
413 	}
414 	DELAY(1);
415 	return(SK_XM_READ_2(sc_if, XM_PHY_DATA));
416 }
417 
418 static int
419 sk_xmac_miibus_writereg(struct sk_if_softc *sc_if, int phy, int reg, int val)
420 {
421 	int i;
422 
423 	DPRINTFN(9, ("sk_xmac_miibus_writereg\n"));
424 
425 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
426 	for (i = 0; i < SK_TIMEOUT; i++) {
427 		if ((SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY) == 0)
428 			break;
429 	}
430 
431 	if (i == SK_TIMEOUT) {
432 		if_printf(&sc_if->arpcom.ac_if, "phy failed to come ready\n");
433 		return(ETIMEDOUT);
434 	}
435 
436 	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
437 	for (i = 0; i < SK_TIMEOUT; i++) {
438 		DELAY(1);
439 		if ((SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY) == 0)
440 			break;
441 	}
442 
443 	if (i == SK_TIMEOUT)
444 		if_printf(&sc_if->arpcom.ac_if, "phy write timed out\n");
445 	return(0);
446 }
447 
448 static void
449 sk_xmac_miibus_statchg(struct sk_if_softc *sc_if)
450 {
451 	struct mii_data *mii;
452 
453 	mii = device_get_softc(sc_if->sk_miibus);
454 	DPRINTFN(9, ("sk_xmac_miibus_statchg\n"));
455 
456 	/*
457 	 * If this is a GMII PHY, manually set the XMAC's
458 	 * duplex mode accordingly.
459 	 */
460 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
461 		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
462 			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
463 		else
464 			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
465 	}
466 }
467 
468 static int
469 sk_marv_miibus_readreg(struct sk_if_softc *sc_if, int phy, int reg)
470 {
471 	uint16_t val;
472 	int i;
473 
474 	if (phy != 0 ||
475 	    (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
476 	     sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
477 		DPRINTFN(9, ("sk_marv_miibus_readreg (skip) phy=%d, reg=%#x\n",
478 			     phy, reg));
479 		return(0);
480 	}
481 
482         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
483 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
484 
485 	for (i = 0; i < SK_TIMEOUT; i++) {
486 		DELAY(1);
487 		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
488 		if (val & YU_SMICR_READ_VALID)
489 			break;
490 	}
491 
492 	if (i == SK_TIMEOUT) {
493 		if_printf(&sc_if->arpcom.ac_if, "phy failed to come ready\n");
494 		return(0);
495 	}
496 
497  	DPRINTFN(9, ("sk_marv_miibus_readreg: i=%d, timeout=%d\n", i,
498 		     SK_TIMEOUT));
499 
500 	val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
501 
502 	DPRINTFN(9, ("sk_marv_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
503 		     phy, reg, val));
504 
505 	return(val);
506 }
507 
508 static int
509 sk_marv_miibus_writereg(struct sk_if_softc *sc_if, int phy, int reg, int val)
510 {
511 	int i;
512 
513 	DPRINTFN(9, ("sk_marv_miibus_writereg phy=%d reg=%#x val=%#x\n",
514 		     phy, reg, val));
515 
516 	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
517 	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
518 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
519 
520 	for (i = 0; i < SK_TIMEOUT; i++) {
521 		DELAY(1);
522 		if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
523 			break;
524 	}
525 
526 	if (i == SK_TIMEOUT)
527 		if_printf(&sc_if->arpcom.ac_if, "phy write timed out\n");
528 
529 	return(0);
530 }
531 
532 static void
533 sk_marv_miibus_statchg(struct sk_if_softc *sc_if)
534 {
535 	DPRINTFN(9, ("sk_marv_miibus_statchg: gpcr=%x\n",
536 		     SK_YU_READ_2(sc_if, YUKON_GPCR)));
537 }
538 
539 #define HASH_BITS	6
540 
541 static uint32_t
542 sk_xmac_hash(caddr_t addr)
543 {
544 	uint32_t crc;
545 
546 	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
547 	return (~crc & ((1 << HASH_BITS) - 1));
548 }
549 
550 static uint32_t
551 sk_yukon_hash(caddr_t addr)
552 {
553 	uint32_t crc;
554 
555 	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
556 	return (crc & ((1 << HASH_BITS) - 1));
557 }
558 
559 static void
560 sk_setfilt(struct sk_if_softc *sc_if, caddr_t addr, int slot)
561 {
562 	int base;
563 
564 	base = XM_RXFILT_ENTRY(slot);
565 
566 	SK_XM_WRITE_2(sc_if, base, *(uint16_t *)(&addr[0]));
567 	SK_XM_WRITE_2(sc_if, base + 2, *(uint16_t *)(&addr[2]));
568 	SK_XM_WRITE_2(sc_if, base + 4, *(uint16_t *)(&addr[4]));
569 }
570 
571 static void
572 sk_setmulti(struct sk_if_softc *sc_if)
573 {
574 	struct sk_softc *sc = sc_if->sk_softc;
575 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
576 	uint32_t hashes[2] = { 0, 0 };
577 	int h = 0, i;
578 	struct ifmultiaddr *ifma;
579 	uint8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
580 
581 	/* First, zot all the existing filters. */
582 	switch(sc->sk_type) {
583 	case SK_GENESIS:
584 		for (i = 1; i < XM_RXFILT_MAX; i++)
585 			sk_setfilt(sc_if, (caddr_t)&dummy, i);
586 
587 		SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
588 		SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
589 		break;
590 	case SK_YUKON:
591 	case SK_YUKON_LITE:
592 	case SK_YUKON_LP:
593 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
594 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
595 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
596 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
597 		break;
598 	}
599 
600 	/* Now program new ones. */
601 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
602 		hashes[0] = 0xFFFFFFFF;
603 		hashes[1] = 0xFFFFFFFF;
604 	} else {
605 		i = 1;
606 		/* First find the tail of the list. */
607 		LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
608 			if (ifma->ifma_link.le_next == NULL)
609 				break;
610 		}
611 		/* Now traverse the list backwards. */
612 		for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
613 			ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
614 			caddr_t maddr;
615 
616 			if (ifma->ifma_addr->sa_family != AF_LINK)
617 				continue;
618 
619 			maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
620 
621 			/*
622 			 * Program the first XM_RXFILT_MAX multicast groups
623 			 * into the perfect filter. For all others,
624 			 * use the hash table.
625 			 */
626 			if (SK_IS_GENESIS(sc) && i < XM_RXFILT_MAX) {
627 				sk_setfilt(sc_if, maddr, i);
628 				i++;
629 				continue;
630 			}
631 
632 			switch(sc->sk_type) {
633 			case SK_GENESIS:
634 				h = sk_xmac_hash(maddr);
635 				break;
636 
637 			case SK_YUKON:
638 			case SK_YUKON_LITE:
639 			case SK_YUKON_LP:
640 				h = sk_yukon_hash(maddr);
641 				break;
642 			}
643 			if (h < 32)
644 				hashes[0] |= (1 << h);
645 			else
646 				hashes[1] |= (1 << (h - 32));
647 		}
648 	}
649 
650 	switch(sc->sk_type) {
651 	case SK_GENESIS:
652 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
653 			       XM_MODE_RX_USE_PERFECT);
654 		SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
655 		SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
656 		break;
657 	case SK_YUKON:
658 	case SK_YUKON_LITE:
659 	case SK_YUKON_LP:
660 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
661 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
662 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
663 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
664 		break;
665 	}
666 }
667 
668 static void
669 sk_setpromisc(struct sk_if_softc *sc_if)
670 {
671 	struct sk_softc	*sc = sc_if->sk_softc;
672 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
673 
674 	switch(sc->sk_type) {
675 	case SK_GENESIS:
676 		if (ifp->if_flags & IFF_PROMISC)
677 			SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
678 		else
679 			SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
680 		break;
681 	case SK_YUKON:
682 	case SK_YUKON_LITE:
683 	case SK_YUKON_LP:
684 		if (ifp->if_flags & IFF_PROMISC) {
685 			SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
686 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
687 		} else {
688 			SK_YU_SETBIT_2(sc_if, YUKON_RCR,
689 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
690 		}
691 		break;
692 	}
693 }
694 
695 static int
696 sk_init_rx_ring(struct sk_if_softc *sc_if)
697 {
698 	struct sk_chain_data *cd = &sc_if->sk_cdata;
699 	struct sk_ring_data *rd = &sc_if->sk_rdata;
700 	int i, nexti, error;
701 
702 	bzero(rd->sk_rx_ring, SK_RX_RING_SIZE);
703 
704 	for (i = 0; i < SK_RX_RING_CNT; i++) {
705 		bus_addr_t paddr;
706 
707 		if (i == (SK_RX_RING_CNT - 1))
708 			nexti = 0;
709 		else
710 			nexti = i + 1;
711 		paddr = rd->sk_rx_ring_paddr +
712 			(nexti * sizeof(struct sk_rx_desc));
713 
714 		rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(paddr));
715 		rd->sk_rx_ring[i].sk_csum1_start = htole16(ETHER_HDR_LEN);
716 		rd->sk_rx_ring[i].sk_csum2_start =
717 			htole16(ETHER_HDR_LEN + sizeof(struct ip));
718 
719 		error = sk_newbuf(sc_if, i, 1);
720 		if (error) {
721 			if_printf(&sc_if->arpcom.ac_if,
722 				  "failed alloc of %dth mbuf\n", i);
723 			return error;
724 		}
725 	}
726 
727 	cd->sk_rx_prod = 0;
728 	cd->sk_rx_cons = 0;
729 
730 	return (0);
731 }
732 
733 static int
734 sk_init_tx_ring(struct sk_if_softc *sc_if)
735 {
736 	struct sk_ring_data *rd = &sc_if->sk_rdata;
737 	int i, nexti;
738 
739 	bzero(rd->sk_tx_ring, SK_TX_RING_SIZE);
740 
741 	for (i = 0; i < SK_TX_RING_CNT; i++) {
742 		bus_addr_t paddr;
743 
744 		if (i == (SK_TX_RING_CNT - 1))
745 			nexti = 0;
746 		else
747 			nexti = i + 1;
748 		paddr = rd->sk_tx_ring_paddr +
749 			(nexti * sizeof(struct sk_tx_desc));
750 
751 		rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(paddr));
752 	}
753 
754 	sc_if->sk_cdata.sk_tx_prod = 0;
755 	sc_if->sk_cdata.sk_tx_cons = 0;
756 	sc_if->sk_cdata.sk_tx_cnt = 0;
757 
758 	return (0);
759 }
760 
761 static int
762 sk_newbuf_jumbo(struct sk_if_softc *sc_if, int idx, int wait)
763 {
764 	struct sk_jpool_entry *entry;
765 	struct mbuf *m_new = NULL;
766 	struct sk_rx_desc *r;
767 	bus_addr_t paddr;
768 
769 	KKASSERT(idx < SK_RX_RING_CNT && idx >= 0);
770 
771 	MGETHDR(m_new, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
772 	if (m_new == NULL)
773 		return ENOBUFS;
774 
775 	/* Allocate the jumbo buffer */
776 	entry = sk_jalloc(sc_if);
777 	if (entry == NULL) {
778 		m_freem(m_new);
779 		DPRINTFN(1, ("%s jumbo allocation failed -- packet "
780 		    "dropped!\n", sc_if->arpcom.ac_if.if_xname));
781 		return ENOBUFS;
782 	}
783 
784 	m_new->m_ext.ext_arg = entry;
785 	m_new->m_ext.ext_buf = entry->buf;
786 	m_new->m_ext.ext_free = sk_jfree;
787 	m_new->m_ext.ext_ref = sk_jref;
788 	m_new->m_ext.ext_size = SK_JLEN;
789 
790 	m_new->m_flags |= M_EXT;
791 
792 	m_new->m_data = m_new->m_ext.ext_buf;
793 	m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
794 
795 	paddr = entry->paddr;
796 
797 	/*
798 	 * Adjust alignment so packet payload begins on a
799 	 * longword boundary. Mandatory for Alpha, useful on
800 	 * x86 too.
801 	 */
802 	m_adj(m_new, ETHER_ALIGN);
803 	paddr += ETHER_ALIGN;
804 
805 	sc_if->sk_cdata.sk_rx_mbuf[idx] = m_new;
806 
807 	r = &sc_if->sk_rdata.sk_rx_ring[idx];
808 	r->sk_data_lo = htole32(SK_ADDR_LO(paddr));
809 	r->sk_data_hi = htole32(SK_ADDR_HI(paddr));
810 	r->sk_ctl = htole32(m_new->m_pkthdr.len | SK_RXSTAT);
811 
812 	return 0;
813 }
814 
815 static int
816 sk_newbuf_std(struct sk_if_softc *sc_if, int idx, int wait)
817 {
818 	struct mbuf *m_new = NULL;
819 	struct sk_chain_data *cd = &sc_if->sk_cdata;
820 	struct sk_rx_desc *r;
821 	bus_dma_segment_t seg;
822 	bus_dmamap_t map;
823 	int error, nseg;
824 
825 	KKASSERT(idx < SK_RX_RING_CNT && idx >= 0);
826 
827 	m_new = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
828 	if (m_new == NULL)
829 		return ENOBUFS;
830 
831 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
832 
833 	/*
834 	 * Adjust alignment so packet payload begins on a
835 	 * longword boundary. Mandatory for Alpha, useful on
836 	 * x86 too.
837 	 */
838 	m_adj(m_new, ETHER_ALIGN);
839 
840 	error = bus_dmamap_load_mbuf_segment(cd->sk_rx_dtag, cd->sk_rx_dmap_tmp,
841 			m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT);
842 	if (error) {
843 		m_freem(m_new);
844 		if (wait) {
845 			if_printf(&sc_if->arpcom.ac_if,
846 				  "could not map RX mbuf\n");
847 		}
848 		return error;
849 	}
850 
851 	/* Unload originally mapped mbuf */
852 	if (cd->sk_rx_mbuf[idx] != NULL) {
853 		bus_dmamap_sync(cd->sk_rx_dtag, cd->sk_rx_dmap[idx],
854 				BUS_DMASYNC_POSTREAD);
855 		bus_dmamap_unload(cd->sk_rx_dtag, cd->sk_rx_dmap[idx]);
856 	}
857 
858 	/* Switch DMA map with tmp DMA map */
859 	map = cd->sk_rx_dmap_tmp;
860 	cd->sk_rx_dmap_tmp = cd->sk_rx_dmap[idx];
861 	cd->sk_rx_dmap[idx] = map;
862 
863 	cd->sk_rx_mbuf[idx] = m_new;
864 
865 	r = &sc_if->sk_rdata.sk_rx_ring[idx];
866 	r->sk_data_lo = htole32(SK_ADDR_LO(seg.ds_addr));
867 	r->sk_data_hi = htole32(SK_ADDR_HI(seg.ds_addr));
868 	r->sk_ctl = htole32(m_new->m_pkthdr.len | SK_RXSTAT);
869 
870 	return 0;
871 }
872 
873 /*
874  * Allocate a jumbo buffer.
875  */
876 struct sk_jpool_entry *
877 sk_jalloc(struct sk_if_softc *sc_if)
878 {
879 	struct sk_chain_data *cd = &sc_if->sk_cdata;
880 	struct sk_jpool_entry *entry;
881 
882 	lwkt_serialize_enter(&cd->sk_jpool_serializer);
883 
884 	entry = SLIST_FIRST(&cd->sk_jpool_free_ent);
885 	if (entry != NULL) {
886 		SLIST_REMOVE_HEAD(&cd->sk_jpool_free_ent, entry_next);
887 		entry->inuse = 1;
888 	} else {
889 		DPRINTF(("no free jumbo buffer\n"));
890 	}
891 
892 	lwkt_serialize_exit(&cd->sk_jpool_serializer);
893 	return entry;
894 }
895 
896 /*
897  * Release a jumbo buffer.
898  */
899 void
900 sk_jfree(void *arg)
901 {
902 	struct sk_jpool_entry *entry = arg;
903 	struct sk_chain_data *cd = &entry->sc_if->sk_cdata;
904 
905 	if (&cd->sk_jpool_ent[entry->slot] != entry)
906 		panic("%s: free wrong jumbo buffer\n", __func__);
907 	else if (entry->inuse == 0)
908 		panic("%s: jumbo buffer already freed\n", __func__);
909 
910 	lwkt_serialize_enter(&cd->sk_jpool_serializer);
911 
912 	atomic_subtract_int(&entry->inuse, 1);
913 	if (entry->inuse == 0)
914 		SLIST_INSERT_HEAD(&cd->sk_jpool_free_ent, entry, entry_next);
915 
916 	lwkt_serialize_exit(&cd->sk_jpool_serializer);
917 }
918 
919 static void
920 sk_jref(void *arg)
921 {
922 	struct sk_jpool_entry *entry = arg;
923 	struct sk_chain_data *cd = &entry->sc_if->sk_cdata;
924 
925 	if (&cd->sk_jpool_ent[entry->slot] != entry)
926 		panic("%s: free wrong jumbo buffer\n", __func__);
927 	else if (entry->inuse == 0)
928 		panic("%s: jumbo buffer already freed\n", __func__);
929 
930 	atomic_add_int(&entry->inuse, 1);
931 }
932 
933 /*
934  * Set media options.
935  */
936 static int
937 sk_ifmedia_upd(struct ifnet *ifp)
938 {
939 	struct sk_if_softc *sc_if = ifp->if_softc;
940 	struct mii_data *mii;
941 
942 	mii = device_get_softc(sc_if->sk_miibus);
943 	sk_init(sc_if);
944 	mii_mediachg(mii);
945 
946 	return(0);
947 }
948 
949 /*
950  * Report current media status.
951  */
952 static void
953 sk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
954 {
955 	struct sk_if_softc *sc_if;
956 	struct mii_data *mii;
957 
958 	sc_if = ifp->if_softc;
959 	mii = device_get_softc(sc_if->sk_miibus);
960 
961 	mii_pollstat(mii);
962 	ifmr->ifm_active = mii->mii_media_active;
963 	ifmr->ifm_status = mii->mii_media_status;
964 }
965 
966 static int
967 sk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
968 {
969 	struct sk_if_softc *sc_if = ifp->if_softc;
970 	struct ifreq *ifr = (struct ifreq *)data;
971 	struct mii_data *mii;
972 	int error = 0;
973 
974 	ASSERT_SERIALIZED(ifp->if_serializer);
975 
976 	switch(command) {
977 	case SIOCSIFMTU:
978 		if (ifr->ifr_mtu > SK_JUMBO_MTU)
979 			error = EINVAL;
980 		else {
981 			ifp->if_mtu = ifr->ifr_mtu;
982 			ifp->if_flags &= ~IFF_RUNNING;
983 			sk_init(sc_if);
984 		}
985 		break;
986 	case SIOCSIFFLAGS:
987 		if (ifp->if_flags & IFF_UP) {
988 			if (ifp->if_flags & IFF_RUNNING) {
989 				if ((ifp->if_flags ^ sc_if->sk_if_flags)
990 				    & IFF_PROMISC) {
991 					sk_setpromisc(sc_if);
992 					sk_setmulti(sc_if);
993 				}
994 			} else
995 				sk_init(sc_if);
996 		} else {
997 			if (ifp->if_flags & IFF_RUNNING)
998 				sk_stop(sc_if);
999 		}
1000 		sc_if->sk_if_flags = ifp->if_flags;
1001 		break;
1002 	case SIOCADDMULTI:
1003 	case SIOCDELMULTI:
1004 		sk_setmulti(sc_if);
1005 		break;
1006 	case SIOCGIFMEDIA:
1007 	case SIOCSIFMEDIA:
1008 		mii = device_get_softc(sc_if->sk_miibus);
1009 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1010 		break;
1011 	default:
1012 		error = ether_ioctl(ifp, command, data);
1013 		break;
1014 	}
1015 
1016 	return(error);
1017 }
1018 
1019 /*
1020  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1021  * IDs against our list and return a device name if we find a match.
1022  */
1023 static int
1024 skc_probe(device_t dev)
1025 {
1026 	const struct skc_type *t;
1027 	uint16_t vid, did;
1028 
1029 	vid = pci_get_vendor(dev);
1030 	did = pci_get_device(dev);
1031 
1032 	/*
1033 	 * Only attach to rev.2 of the Linksys EG1032 adapter.
1034 	 * Rev.3 is supported by re(4).
1035 	 */
1036 	if (vid == PCI_VENDOR_LINKSYS &&
1037 	    did == PCI_PRODUCT_LINKSYS_EG1032 &&
1038 	    pci_get_subdevice(dev) != SUBDEVICEID_LINKSYS_EG1032_REV2)
1039 		return ENXIO;
1040 
1041 	for (t = skc_devs; t->skc_name != NULL; t++) {
1042 		if (vid == t->skc_vid && did == t->skc_did) {
1043 			device_set_desc(dev, t->skc_name);
1044 			return 0;
1045 		}
1046 	}
1047 	return ENXIO;
1048 }
1049 
1050 /*
1051  * Force the GEnesis into reset, then bring it out of reset.
1052  */
1053 static void
1054 sk_reset(struct sk_softc *sc)
1055 {
1056 	DPRINTFN(2, ("sk_reset\n"));
1057 
1058 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1059 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1060 	if (SK_IS_YUKON(sc))
1061 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1062 
1063 	DELAY(1000);
1064 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1065 	DELAY(2);
1066 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1067 	if (SK_IS_YUKON(sc))
1068 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1069 
1070 	DPRINTFN(2, ("sk_reset: sk_csr=%x\n", CSR_READ_2(sc, SK_CSR)));
1071 	DPRINTFN(2, ("sk_reset: sk_link_ctrl=%x\n",
1072 		     CSR_READ_2(sc, SK_LINK_CTRL)));
1073 
1074 	if (SK_IS_GENESIS(sc)) {
1075 		/* Configure packet arbiter */
1076 		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1077 		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1078 		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1079 		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1080 		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1081 	}
1082 
1083 	/* Enable RAM interface */
1084 	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1085 
1086 	/*
1087 	 * Configure interrupt moderation. The moderation timer
1088 	 * defers interrupts specified in the interrupt moderation
1089 	 * timer mask based on the timeout specified in the interrupt
1090 	 * moderation timer init register. Each bit in the timer
1091 	 * register represents one tick, so to specify a timeout in
1092 	 * microseconds, we have to multiply by the correct number of
1093 	 * ticks-per-microsecond.
1094 	 */
1095 	KKASSERT(sc->sk_imtimer_ticks != 0 && sc->sk_imtime != 0);
1096 	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc, sc->sk_imtime));
1097 	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1098 	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1099 	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1100 }
1101 
1102 static int
1103 sk_probe(device_t dev)
1104 {
1105 	struct sk_softc *sc = device_get_softc(device_get_parent(dev));
1106 	const char *revstr = "", *name = NULL;
1107 	char devname[80];
1108 
1109 	switch (sc->sk_type) {
1110 	case SK_GENESIS:
1111 		name = "SysKonnect GEnesis";
1112 		break;
1113 	case SK_YUKON:
1114 		name = "Marvell Yukon";
1115 		break;
1116 	case SK_YUKON_LITE:
1117 		name = "Marvell Yukon Lite";
1118 		switch (sc->sk_rev) {
1119 		case SK_YUKON_LITE_REV_A0:
1120 			revstr = " rev.A0";
1121 			break;
1122 		case SK_YUKON_LITE_REV_A1:
1123 			revstr = " rev.A1";
1124 			break;
1125 		case SK_YUKON_LITE_REV_A3:
1126 			revstr = " rev.A3";
1127 			break;
1128 		}
1129 		break;
1130 	case SK_YUKON_LP:
1131 		name = "Marvell Yukon LP";
1132 		break;
1133 	default:
1134 		return ENXIO;
1135 	}
1136 
1137 	ksnprintf(devname, sizeof(devname), "%s%s (0x%x)",
1138 		 name, revstr, sc->sk_rev);
1139 	device_set_desc_copy(dev, devname);
1140 	return 0;
1141 }
1142 
1143 /*
1144  * Each XMAC chip is attached as a separate logical IP interface.
1145  * Single port cards will have only one logical interface of course.
1146  */
1147 static int
1148 sk_attach(device_t dev)
1149 {
1150 	struct sk_softc *sc = device_get_softc(device_get_parent(dev));
1151 	struct sk_if_softc *sc_if = device_get_softc(dev);
1152 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
1153 	int i, error;
1154 
1155 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1156 
1157 	sc_if->sk_port = *(int *)device_get_ivars(dev);
1158 	KKASSERT(sc_if->sk_port == SK_PORT_A || sc_if->sk_port == SK_PORT_B);
1159 
1160 	sc_if->sk_softc = sc;
1161 	sc->sk_if[sc_if->sk_port] = sc_if;
1162 
1163 	kfree(device_get_ivars(dev), M_DEVBUF);
1164 	device_set_ivars(dev, NULL);
1165 
1166 	if (sc_if->sk_port == SK_PORT_A)
1167 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1168 	if (sc_if->sk_port == SK_PORT_B)
1169 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1170 
1171 	DPRINTFN(2, ("begin sk_attach: port=%d\n", sc_if->sk_port));
1172 
1173 	/*
1174 	 * Get station address for this interface. Note that
1175 	 * dual port cards actually come with three station
1176 	 * addresses: one for each port, plus an extra. The
1177 	 * extra one is used by the SysKonnect driver software
1178 	 * as a 'virtual' station address for when both ports
1179 	 * are operating in failover mode. Currently we don't
1180 	 * use this extra address.
1181 	 */
1182 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1183 		/* XXX */
1184 		sc_if->arpcom.ac_enaddr[i] =
1185 		    sk_win_read_1(sc, SK_MAC0_0 + (sc_if->sk_port * 8) + i);
1186 	}
1187 
1188 	/*
1189 	 * Set up RAM buffer addresses. The NIC will have a certain
1190 	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1191 	 * need to divide this up a) between the transmitter and
1192  	 * receiver and b) between the two XMACs, if this is a
1193 	 * dual port NIC. Our algorithm is to divide up the memory
1194 	 * evenly so that everyone gets a fair share.
1195 	 */
1196 	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1197 		uint32_t chunk, val;
1198 
1199 		chunk = sc->sk_ramsize / 2;
1200 		val = sc->sk_rboff / sizeof(uint64_t);
1201 		sc_if->sk_rx_ramstart = val;
1202 		val += (chunk / sizeof(uint64_t));
1203 		sc_if->sk_rx_ramend = val - 1;
1204 		sc_if->sk_tx_ramstart = val;
1205 		val += (chunk / sizeof(uint64_t));
1206 		sc_if->sk_tx_ramend = val - 1;
1207 	} else {
1208 		uint32_t chunk, val;
1209 
1210 		chunk = sc->sk_ramsize / 4;
1211 		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1212 		    sizeof(uint64_t);
1213 		sc_if->sk_rx_ramstart = val;
1214 		val += (chunk / sizeof(uint64_t));
1215 		sc_if->sk_rx_ramend = val - 1;
1216 		sc_if->sk_tx_ramstart = val;
1217 		val += (chunk / sizeof(uint64_t));
1218 		sc_if->sk_tx_ramend = val - 1;
1219 	}
1220 
1221 	DPRINTFN(2, ("sk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
1222 		     "           tx_ramstart=%#x tx_ramend=%#x\n",
1223 		     sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
1224 		     sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
1225 
1226 	/* Read and save PHY type */
1227 	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1228 
1229 	/* Set PHY address */
1230 	if (SK_IS_GENESIS(sc)) {
1231 		switch (sc_if->sk_phytype) {
1232 		case SK_PHYTYPE_XMAC:
1233 			sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1234 			break;
1235 		case SK_PHYTYPE_BCOM:
1236 			sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1237 			break;
1238 		default:
1239 			device_printf(dev, "unsupported PHY type: %d\n",
1240 			    sc_if->sk_phytype);
1241 			error = ENXIO;
1242 			goto fail;
1243 		}
1244 	}
1245 
1246 	if (SK_IS_YUKON(sc)) {
1247 		if ((sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1248 		    sc->sk_pmd != 'L' && sc->sk_pmd != 'S')) {
1249 			/* not initialized, punt */
1250 			sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1251 			sc->sk_coppertype = 1;
1252 		}
1253 
1254 		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1255 
1256 		if (!(sc->sk_coppertype))
1257 			sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1258 	}
1259 
1260 	error = sk_dma_alloc(dev);
1261 	if (error)
1262 		goto fail;
1263 
1264 	ifp->if_softc = sc_if;
1265 	ifp->if_mtu = ETHERMTU;
1266 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1267 	ifp->if_ioctl = sk_ioctl;
1268 	ifp->if_start = sk_start;
1269 	ifp->if_watchdog = sk_watchdog;
1270 	ifp->if_init = sk_init;
1271 	ifp->if_baudrate = 1000000000;
1272 	ifq_set_maxlen(&ifp->if_snd, SK_TX_RING_CNT - 1);
1273 	ifq_set_ready(&ifp->if_snd);
1274 
1275 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1276 
1277 	/* Don't use jumbo buffers by default */
1278 	sc_if->sk_use_jumbo = 0;
1279 
1280 	/*
1281 	 * Do miibus setup.
1282 	 */
1283 	switch (sc->sk_type) {
1284 	case SK_GENESIS:
1285 		sk_init_xmac(sc_if);
1286 		break;
1287 	case SK_YUKON:
1288 	case SK_YUKON_LITE:
1289 	case SK_YUKON_LP:
1290 		sk_init_yukon(sc_if);
1291 		break;
1292 	default:
1293 		device_printf(dev, "unknown device type %d\n", sc->sk_type);
1294 		error = ENXIO;
1295 		goto fail;
1296 	}
1297 
1298  	DPRINTFN(2, ("sk_attach: 1\n"));
1299 
1300 	error = mii_phy_probe(dev, &sc_if->sk_miibus,
1301 			      sk_ifmedia_upd, sk_ifmedia_sts);
1302 	if (error) {
1303 		device_printf(dev, "no PHY found!\n");
1304 		goto fail;
1305 	}
1306 
1307 	callout_init(&sc_if->sk_tick_timer);
1308 
1309 	/*
1310 	 * Call MI attach routines.
1311 	 */
1312 	ether_ifattach(ifp, sc_if->arpcom.ac_enaddr, &sc->sk_serializer);
1313 
1314 	DPRINTFN(2, ("sk_attach: end\n"));
1315 	return 0;
1316 fail:
1317 	sk_detach(dev);
1318 	sc->sk_if[sc_if->sk_port] = NULL;
1319 	return error;
1320 }
1321 
1322 /*
1323  * Attach the interface. Allocate softc structures, do ifmedia
1324  * setup and ethernet/BPF attach.
1325  */
1326 static int
1327 skc_attach(device_t dev)
1328 {
1329 	struct sk_softc *sc = device_get_softc(dev);
1330 	uint8_t skrs;
1331 	int *port;
1332 	int error, cpuid;
1333 
1334 	DPRINTFN(2, ("begin skc_attach\n"));
1335 
1336 	sc->sk_dev = dev;
1337 	lwkt_serialize_init(&sc->sk_serializer);
1338 
1339 #ifndef BURN_BRIDGES
1340 	/*
1341 	 * Handle power management nonsense.
1342 	 */
1343 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1344 		uint32_t iobase, membase, irq;
1345 
1346 		/* Save important PCI config data. */
1347 		iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1348 		membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1349 		irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1350 
1351 		/* Reset the power state. */
1352 		device_printf(dev, "chip is in D%d power mode "
1353 			      "-- setting to D0\n", pci_get_powerstate(dev));
1354 
1355 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1356 
1357 		/* Restore PCI config data. */
1358 		pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1359 		pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1360 		pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1361 	}
1362 #endif	/* BURN_BRIDGES */
1363 
1364 	/*
1365 	 * Map control/status registers.
1366 	 */
1367 	pci_enable_busmaster(dev);
1368 
1369 	sc->sk_res_rid = SK_PCI_LOMEM;
1370 	sc->sk_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1371 					    &sc->sk_res_rid, RF_ACTIVE);
1372 	if (sc->sk_res == NULL) {
1373 		device_printf(dev, "couldn't map memory\n");
1374 		error = ENXIO;
1375 		goto fail;
1376 	}
1377 	sc->sk_btag = rman_get_bustag(sc->sk_res);
1378 	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1379 
1380 	sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1381 	sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4);
1382 
1383 	/* Bail out here if chip is not recognized */
1384 	if (!SK_IS_GENESIS(sc) && !SK_IS_YUKON(sc)) {
1385 		device_printf(dev, "unknown chip type: %d\n", sc->sk_type);
1386 		error = ENXIO;
1387 		goto fail;
1388 	}
1389 
1390 	DPRINTFN(2, ("skc_attach: allocate interrupt\n"));
1391 
1392 	/* Allocate interrupt */
1393 	sc->sk_irq_rid = 0;
1394 	sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sk_irq_rid,
1395 					    RF_SHAREABLE | RF_ACTIVE);
1396 	if (sc->sk_irq == NULL) {
1397 		device_printf(dev, "couldn't map interrupt\n");
1398 		error = ENXIO;
1399 		goto fail;
1400 	}
1401 
1402 	switch (sc->sk_type) {
1403 	case SK_GENESIS:
1404 		sc->sk_imtimer_ticks = SK_IMTIMER_TICKS_GENESIS;
1405 		break;
1406 	default:
1407 		sc->sk_imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
1408 		break;
1409 	}
1410 	sc->sk_imtime = skc_imtime;
1411 
1412 	/* Reset the adapter. */
1413 	sk_reset(sc);
1414 
1415 	skrs = sk_win_read_1(sc, SK_EPROM0);
1416 	if (SK_IS_GENESIS(sc)) {
1417 		/* Read and save RAM size and RAMbuffer offset */
1418 		switch(skrs) {
1419 		case SK_RAMSIZE_512K_64:
1420 			sc->sk_ramsize = 0x80000;
1421 			sc->sk_rboff = SK_RBOFF_0;
1422 			break;
1423 		case SK_RAMSIZE_1024K_64:
1424 			sc->sk_ramsize = 0x100000;
1425 			sc->sk_rboff = SK_RBOFF_80000;
1426 			break;
1427 		case SK_RAMSIZE_1024K_128:
1428 			sc->sk_ramsize = 0x100000;
1429 			sc->sk_rboff = SK_RBOFF_0;
1430 			break;
1431 		case SK_RAMSIZE_2048K_128:
1432 			sc->sk_ramsize = 0x200000;
1433 			sc->sk_rboff = SK_RBOFF_0;
1434 			break;
1435 		default:
1436 			device_printf(dev, "unknown ram size: %d\n", skrs);
1437 			error = ENXIO;
1438 			goto fail;
1439 		}
1440 	} else {
1441 		if (skrs == 0x00)
1442 			sc->sk_ramsize = 0x20000;
1443 		else
1444 			sc->sk_ramsize = skrs * (1<<12);
1445 		sc->sk_rboff = SK_RBOFF_0;
1446 	}
1447 
1448 	DPRINTFN(2, ("skc_attach: ramsize=%d (%dk), rboff=%d\n",
1449 		     sc->sk_ramsize, sc->sk_ramsize / 1024,
1450 		     sc->sk_rboff));
1451 
1452 	/* Read and save physical media type */
1453 	sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1454 
1455 	if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1456 		sc->sk_coppertype = 1;
1457 	else
1458 		sc->sk_coppertype = 0;
1459 
1460 	/* Yukon Lite Rev A0 needs special test, from sk98lin driver */
1461 	if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1462 		uint32_t flashaddr;
1463 		uint8_t testbyte;
1464 
1465 		flashaddr = sk_win_read_4(sc, SK_EP_ADDR);
1466 
1467 		/* Test Flash-Address Register */
1468 		sk_win_write_1(sc, SK_EP_ADDR+3, 0xff);
1469 		testbyte = sk_win_read_1(sc, SK_EP_ADDR+3);
1470 
1471 		if (testbyte != 0) {
1472 			/* This is a Yukon Lite Rev A0 */
1473 			sc->sk_type = SK_YUKON_LITE;
1474 			sc->sk_rev = SK_YUKON_LITE_REV_A0;
1475 			/* Restore Flash-Address Register */
1476 			sk_win_write_4(sc, SK_EP_ADDR, flashaddr);
1477 		}
1478 	}
1479 
1480 	/*
1481 	 * Create sysctl nodes.
1482 	 */
1483 	sysctl_ctx_init(&sc->sk_sysctl_ctx);
1484 	sc->sk_sysctl_tree = SYSCTL_ADD_NODE(&sc->sk_sysctl_ctx,
1485 					     SYSCTL_STATIC_CHILDREN(_hw),
1486 					     OID_AUTO,
1487 					     device_get_nameunit(dev),
1488 					     CTLFLAG_RD, 0, "");
1489 	if (sc->sk_sysctl_tree == NULL) {
1490 		device_printf(dev, "can't add sysctl node\n");
1491 		error = ENXIO;
1492 		goto fail;
1493 	}
1494 	SYSCTL_ADD_PROC(&sc->sk_sysctl_ctx,
1495 			SYSCTL_CHILDREN(sc->sk_sysctl_tree),
1496 			OID_AUTO, "imtime", CTLTYPE_INT | CTLFLAG_RW,
1497 			sc, 0, skc_sysctl_imtime, "I",
1498 			"Interrupt moderation time (usec).");
1499 
1500 	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1501 	port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1502 	*port = SK_PORT_A;
1503 	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1504 
1505 	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1506 		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1507 		port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1508 		*port = SK_PORT_B;
1509 		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1510 	}
1511 
1512 	/* Turn on the 'driver is loaded' LED. */
1513 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1514 
1515 	bus_generic_attach(dev);
1516 
1517 	error = bus_setup_intr(dev, sc->sk_irq, INTR_MPSAFE, sk_intr, sc,
1518 			       &sc->sk_intrhand, &sc->sk_serializer);
1519 	if (error) {
1520 		device_printf(dev, "couldn't set up irq\n");
1521 		goto fail;
1522 	}
1523 
1524 	cpuid = ithread_cpuid(rman_get_start(sc->sk_irq));
1525 	KKASSERT(cpuid >= 0 && cpuid < ncpus);
1526 
1527 	if (sc->sk_if[0] != NULL)
1528 		sc->sk_if[0]->arpcom.ac_if.if_cpuid = cpuid;
1529 	if (sc->sk_if[1] != NULL)
1530 		sc->sk_if[1]->arpcom.ac_if.if_cpuid = cpuid;
1531 
1532 	return 0;
1533 fail:
1534 	skc_detach(dev);
1535 	return error;
1536 }
1537 
1538 static int
1539 sk_detach(device_t dev)
1540 {
1541 	struct sk_if_softc *sc_if = device_get_softc(dev);
1542 
1543 	if (device_is_attached(dev)) {
1544 		struct sk_softc *sc = sc_if->sk_softc;
1545 		struct ifnet *ifp = &sc_if->arpcom.ac_if;
1546 
1547 		lwkt_serialize_enter(ifp->if_serializer);
1548 
1549 		if (sc->sk_intrhand != NULL) {
1550 			if (sc->sk_if[SK_PORT_A] != NULL)
1551 				sk_stop(sc->sk_if[SK_PORT_A]);
1552 			if (sc->sk_if[SK_PORT_B] != NULL)
1553 				sk_stop(sc->sk_if[SK_PORT_B]);
1554 
1555 			bus_teardown_intr(sc->sk_dev, sc->sk_irq,
1556 					  sc->sk_intrhand);
1557 			sc->sk_intrhand = NULL;
1558 		}
1559 
1560 		lwkt_serialize_exit(ifp->if_serializer);
1561 
1562 		ether_ifdetach(ifp);
1563 	}
1564 
1565 	if (sc_if->sk_miibus != NULL)
1566 		device_delete_child(dev, sc_if->sk_miibus);
1567 
1568 	sk_dma_free(dev);
1569 	return 0;
1570 }
1571 
1572 static int
1573 skc_detach(device_t dev)
1574 {
1575 	struct sk_softc *sc = device_get_softc(dev);
1576 	int *port;
1577 
1578 #ifdef INVARIANTS
1579 	if (device_is_attached(dev)) {
1580 		KASSERT(sc->sk_intrhand == NULL,
1581 			("intr has not been torn down yet"));
1582 	}
1583 #endif
1584 
1585 	if (sc->sk_devs[SK_PORT_A] != NULL) {
1586 		port = device_get_ivars(sc->sk_devs[SK_PORT_A]);
1587 		if (port != NULL) {
1588 			kfree(port, M_DEVBUF);
1589 			device_set_ivars(sc->sk_devs[SK_PORT_A], NULL);
1590 		}
1591 		device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1592 	}
1593 	if (sc->sk_devs[SK_PORT_B] != NULL) {
1594 		port = device_get_ivars(sc->sk_devs[SK_PORT_B]);
1595 		if (port != NULL) {
1596 			kfree(port, M_DEVBUF);
1597 			device_set_ivars(sc->sk_devs[SK_PORT_B], NULL);
1598 		}
1599 		device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1600 	}
1601 
1602 	if (sc->sk_irq != NULL) {
1603 		bus_release_resource(dev, SYS_RES_IRQ, sc->sk_irq_rid,
1604 				     sc->sk_irq);
1605 	}
1606 	if (sc->sk_res != NULL) {
1607 		bus_release_resource(dev, SYS_RES_MEMORY, sc->sk_res_rid,
1608 				     sc->sk_res);
1609 	}
1610 
1611 	if (sc->sk_sysctl_tree != NULL)
1612 		sysctl_ctx_free(&sc->sk_sysctl_ctx);
1613 
1614 	return 0;
1615 }
1616 
1617 static int
1618 sk_encap(struct sk_if_softc *sc_if, struct mbuf **m_head0, uint32_t *txidx)
1619 {
1620 	struct sk_chain_data *cd = &sc_if->sk_cdata;
1621 	struct sk_ring_data *rd = &sc_if->sk_rdata;
1622 	struct sk_tx_desc *f = NULL;
1623 	uint32_t frag, cur, sk_ctl;
1624 	bus_dma_segment_t segs[SK_NTXSEG];
1625 	bus_dmamap_t map;
1626 	int i, error, maxsegs, nsegs;
1627 
1628 	DPRINTFN(2, ("sk_encap\n"));
1629 
1630 	maxsegs = SK_TX_RING_CNT - sc_if->sk_cdata.sk_tx_cnt - SK_NDESC_RESERVE;
1631 	KASSERT(maxsegs >= SK_NDESC_SPARE, ("not enough spare TX desc\n"));
1632 	if (maxsegs > SK_NTXSEG)
1633 		maxsegs = SK_NTXSEG;
1634 
1635 	cur = frag = *txidx;
1636 
1637 #ifdef SK_DEBUG
1638 	if (skdebug >= 2)
1639 		sk_dump_mbuf(*m_head0);
1640 #endif
1641 
1642 	map = cd->sk_tx_dmap[*txidx];
1643 
1644 	error = bus_dmamap_load_mbuf_defrag(cd->sk_tx_dtag, map, m_head0,
1645 			segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1646 	if (error) {
1647 		m_freem(*m_head0);
1648 		*m_head0 = NULL;
1649 		return error;
1650 	}
1651 
1652 	DPRINTFN(2, ("sk_encap: nsegs=%d\n", nsegs));
1653 
1654 	/* Sync the DMA map. */
1655 	bus_dmamap_sync(cd->sk_tx_dtag, map, BUS_DMASYNC_PREWRITE);
1656 
1657 	for (i = 0; i < nsegs; i++) {
1658 		f = &rd->sk_tx_ring[frag];
1659 		f->sk_data_lo = htole32(SK_ADDR_LO(segs[i].ds_addr));
1660 		f->sk_data_hi = htole32(SK_ADDR_HI(segs[i].ds_addr));
1661 		sk_ctl = segs[i].ds_len | SK_OPCODE_DEFAULT;
1662 		if (i == 0)
1663 			sk_ctl |= SK_TXCTL_FIRSTFRAG;
1664 		else
1665 			sk_ctl |= SK_TXCTL_OWN;
1666 		f->sk_ctl = htole32(sk_ctl);
1667 		cur = frag;
1668 		SK_INC(frag, SK_TX_RING_CNT);
1669 	}
1670 
1671 	cd->sk_tx_mbuf[cur] = *m_head0;
1672 	/* Switch DMA map */
1673 	cd->sk_tx_dmap[*txidx] = cd->sk_tx_dmap[cur];
1674 	cd->sk_tx_dmap[cur] = map;
1675 
1676 	rd->sk_tx_ring[cur].sk_ctl |=
1677 		htole32(SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR);
1678 	rd->sk_tx_ring[*txidx].sk_ctl |= htole32(SK_TXCTL_OWN);
1679 
1680 	sc_if->sk_cdata.sk_tx_cnt += nsegs;
1681 
1682 #ifdef SK_DEBUG
1683 	if (skdebug >= 2) {
1684 		struct sk_tx_desc *desc;
1685 		uint32_t idx;
1686 
1687 		for (idx = *txidx; idx != frag; SK_INC(idx, SK_TX_RING_CNT)) {
1688 			desc = &sc_if->sk_rdata->sk_tx_ring[idx];
1689 			sk_dump_txdesc(desc, idx);
1690 		}
1691 	}
1692 #endif
1693 
1694 	*txidx = frag;
1695 
1696 	DPRINTFN(2, ("sk_encap: completed successfully\n"));
1697 
1698 	return (0);
1699 }
1700 
1701 static void
1702 sk_start(struct ifnet *ifp)
1703 {
1704 	struct sk_if_softc *sc_if = ifp->if_softc;
1705 	struct sk_softc *sc = sc_if->sk_softc;
1706 	uint32_t idx = sc_if->sk_cdata.sk_tx_prod;
1707 	int trans = 0;
1708 
1709 	DPRINTFN(2, ("sk_start\n"));
1710 
1711 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1712 		return;
1713 
1714 	while (sc_if->sk_cdata.sk_tx_mbuf[idx] == NULL) {
1715 		struct mbuf *m_head;
1716 
1717 		if (SK_IS_OACTIVE(sc_if)) {
1718 			ifp->if_flags |= IFF_OACTIVE;
1719 			break;
1720 		}
1721 
1722 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1723 		if (m_head == NULL)
1724 			break;
1725 
1726 		/*
1727 		 * Pack the data into the transmit ring. If we
1728 		 * don't have room, set the OACTIVE flag and wait
1729 		 * for the NIC to drain the ring.
1730 		 */
1731 		if (sk_encap(sc_if, &m_head, &idx)) {
1732 			if (sc_if->sk_cdata.sk_tx_cnt == 0) {
1733 				continue;
1734 			} else {
1735 				ifp->if_flags |= IFF_OACTIVE;
1736 				break;
1737 			}
1738 		}
1739 
1740 		trans = 1;
1741 		BPF_MTAP(ifp, m_head);
1742 	}
1743 	if (!trans)
1744 		return;
1745 
1746 	/* Transmit */
1747 	if (idx != sc_if->sk_cdata.sk_tx_prod) {
1748 		sc_if->sk_cdata.sk_tx_prod = idx;
1749 		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1750 
1751 		/* Set a timeout in case the chip goes out to lunch. */
1752 		ifp->if_timer = 5;
1753 	}
1754 }
1755 
1756 static void
1757 sk_watchdog(struct ifnet *ifp)
1758 {
1759 	struct sk_if_softc *sc_if = ifp->if_softc;
1760 
1761 	ASSERT_SERIALIZED(ifp->if_serializer);
1762 	/*
1763 	 * Reclaim first as there is a possibility of losing Tx completion
1764 	 * interrupts.
1765 	 */
1766 	sk_txeof(sc_if);
1767 	if (sc_if->sk_cdata.sk_tx_cnt != 0) {
1768 		if_printf(&sc_if->arpcom.ac_if, "watchdog timeout\n");
1769 		ifp->if_oerrors++;
1770 		ifp->if_flags &= ~IFF_RUNNING;
1771 		sk_init(sc_if);
1772 	}
1773 }
1774 
1775 static void
1776 skc_shutdown(device_t dev)
1777 {
1778 	struct sk_softc *sc = device_get_softc(dev);
1779 
1780 	DPRINTFN(2, ("sk_shutdown\n"));
1781 
1782 	lwkt_serialize_enter(&sc->sk_serializer);
1783 
1784 	/* Turn off the 'driver is loaded' LED. */
1785 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1786 
1787 	/*
1788 	 * Reset the GEnesis controller. Doing this should also
1789 	 * assert the resets on the attached XMAC(s).
1790 	 */
1791 	sk_reset(sc);
1792 
1793 	lwkt_serialize_exit(&sc->sk_serializer);
1794 }
1795 
1796 static __inline int
1797 sk_rxvalid(struct sk_softc *sc, uint32_t stat, uint32_t len)
1798 {
1799 	if (sc->sk_type == SK_GENESIS) {
1800 		if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
1801 		    XM_RXSTAT_BYTES(stat) != len)
1802 			return (0);
1803 	} else {
1804 		if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
1805 		    YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
1806 		    YU_RXSTAT_JABBER)) != 0 ||
1807 		    (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
1808 		    YU_RXSTAT_BYTES(stat) != len)
1809 			return (0);
1810 	}
1811 
1812 	return (1);
1813 }
1814 
1815 static void
1816 sk_rxeof(struct sk_if_softc *sc_if)
1817 {
1818 	struct sk_softc *sc = sc_if->sk_softc;
1819 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
1820 	struct sk_chain_data *cd = &sc_if->sk_cdata;
1821 	struct sk_ring_data *rd = &sc_if->sk_rdata;
1822 	int i, reap, max_frmlen;
1823 
1824 	DPRINTFN(2, ("sk_rxeof\n"));
1825 
1826 	i = cd->sk_rx_prod;
1827 
1828 	if (sc_if->sk_use_jumbo)
1829 		max_frmlen = SK_JUMBO_FRAMELEN;
1830 	else
1831 		max_frmlen = ETHER_MAX_LEN;
1832 
1833 	reap = 0;
1834 	for (;;) {
1835 		struct sk_rx_desc *cur_desc;
1836 		uint32_t rxstat, sk_ctl;
1837 #ifdef SK_RXCSUM
1838 		uint16_t csum1, csum2;
1839 #endif
1840 		int cur, total_len;
1841 		struct mbuf *m;
1842 
1843 		cur = i;
1844 		cur_desc = &rd->sk_rx_ring[cur];
1845 
1846 		sk_ctl = le32toh(cur_desc->sk_ctl);
1847 		if (sk_ctl & SK_RXCTL_OWN) {
1848 			/* Invalidate the descriptor -- it's not ready yet */
1849 			cd->sk_rx_prod = cur;
1850 			break;
1851 		}
1852 
1853 		rxstat = le32toh(cur_desc->sk_xmac_rxstat);
1854 		total_len = SK_RXBYTES(le32toh(cur_desc->sk_ctl));
1855 
1856 #ifdef SK_RXCSUM
1857 		csum1 = le16toh(cur_desc->sk_csum1);
1858 		csum2 = le16toh(cur_desc->sk_csum2);
1859 #endif
1860 
1861 		m = cd->sk_rx_mbuf[cur];
1862 
1863 		/*
1864 		 * Bump 'i' here, so we can keep going, even if the current
1865 		 * RX descriptor reaping fails later.  'i' shoult NOT be used
1866 		 * in the following processing any more.
1867 		 */
1868 		SK_INC(i, SK_RX_RING_CNT);
1869 		reap = 1;
1870 
1871 		if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
1872 		    SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
1873 		    SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
1874 		    total_len < SK_MIN_FRAMELEN || total_len > max_frmlen ||
1875 		    sk_rxvalid(sc, rxstat, total_len) == 0) {
1876 			ifp->if_ierrors++;
1877 			cur_desc->sk_ctl = htole32(m->m_pkthdr.len | SK_RXSTAT);
1878 			continue;
1879 		}
1880 
1881 		/*
1882 		 * Try to allocate a new RX buffer. If that fails,
1883 		 * copy the packet to mbufs and put the RX buffer
1884 		 * back in the ring so it can be re-used. If
1885 		 * allocating mbufs fails, then we have to drop
1886 		 * the packet.
1887 		 */
1888 		if (sk_newbuf(sc_if, cur, 0)) {
1889 			ifp->if_ierrors++;
1890 			cur_desc->sk_ctl = htole32(m->m_pkthdr.len | SK_RXSTAT);
1891 			continue;
1892 		} else {
1893 			m->m_pkthdr.rcvif = ifp;
1894 			m->m_pkthdr.len = m->m_len = total_len;
1895 		}
1896 
1897 #ifdef SK_RXCSUM
1898 		sk_rxcsum(ifp, m, csum1, csum2);
1899 #endif
1900 
1901 		ifp->if_ipackets++;
1902 		ifp->if_input(ifp, m);
1903 	}
1904 }
1905 
1906 #ifdef SK_RXCSUM
1907 static void
1908 sk_rxcsum(struct ifnet *ifp, struct mbuf *m,
1909 	  const uint16_t csum1, const uint16_t csum2)
1910 {
1911 	struct ether_header *eh;
1912 	struct ip *ip;
1913 	uint8_t *pp;
1914 	int hlen, len, plen;
1915 	uint16_t iph_csum, ipo_csum, ipd_csum, csum;
1916 
1917 	pp = mtod(m, uint8_t *);
1918 	plen = m->m_pkthdr.len;
1919 	if (plen < sizeof(*eh))
1920 		return;
1921 	eh = (struct ether_header *)pp;
1922 	iph_csum = in_addword(csum1, (~csum2 & 0xffff));
1923 
1924 	if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1925 		uint16_t *xp = (uint16_t *)pp;
1926 
1927 		xp = (uint16_t *)pp;
1928 		if (xp[1] != htons(ETHERTYPE_IP))
1929 			return;
1930 		iph_csum = in_addword(iph_csum, (~xp[0] & 0xffff));
1931 		iph_csum = in_addword(iph_csum, (~xp[1] & 0xffff));
1932 		xp = (uint16_t *)(pp + sizeof(struct ip));
1933 		iph_csum = in_addword(iph_csum, xp[0]);
1934 		iph_csum = in_addword(iph_csum, xp[1]);
1935 		pp += EVL_ENCAPLEN;
1936 	} else if (eh->ether_type != htons(ETHERTYPE_IP)) {
1937 		return;
1938 	}
1939 
1940 	pp += sizeof(*eh);
1941 	plen -= sizeof(*eh);
1942 
1943 	ip = (struct ip *)pp;
1944 
1945 	if (ip->ip_v != IPVERSION)
1946 		return;
1947 
1948 	hlen = ip->ip_hl << 2;
1949 	if (hlen < sizeof(struct ip))
1950 		return;
1951 	if (hlen > ntohs(ip->ip_len))
1952 		return;
1953 
1954 	/* Don't deal with truncated or padded packets. */
1955 	if (plen != ntohs(ip->ip_len))
1956 		return;
1957 
1958 	len = hlen - sizeof(struct ip);
1959 	if (len > 0) {
1960 		uint16_t *p;
1961 
1962 		p = (uint16_t *)(ip + 1);
1963 		ipo_csum = 0;
1964 		for (ipo_csum = 0; len > 0; len -= sizeof(*p), p++)
1965 			ipo_csum = in_addword(ipo_csum, *p);
1966 		iph_csum = in_addword(iph_csum, ipo_csum);
1967 		ipd_csum = in_addword(csum2, (~ipo_csum & 0xffff));
1968 	} else {
1969 		ipd_csum = csum2;
1970 	}
1971 
1972 	if (iph_csum != 0xffff)
1973 		return;
1974 	m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
1975 
1976 	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1977 		return;                 /* ip frag, we're done for now */
1978 
1979 	pp += hlen;
1980 
1981 	/* Only know checksum protocol for udp/tcp */
1982 	if (ip->ip_p == IPPROTO_UDP) {
1983 		struct udphdr *uh = (struct udphdr *)pp;
1984 
1985 		if (uh->uh_sum == 0)    /* udp with no checksum */
1986 			return;
1987 	} else if (ip->ip_p != IPPROTO_TCP) {
1988 		return;
1989 	}
1990 
1991 	csum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1992 	    htonl(ntohs(ip->ip_len) - hlen + ip->ip_p) + ipd_csum);
1993 	if (csum == 0xffff) {
1994 		m->m_pkthdr.csum_data = csum;
1995 		m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1996 	}
1997 }
1998 #endif
1999 
2000 static void
2001 sk_txeof(struct sk_if_softc *sc_if)
2002 {
2003 	struct sk_chain_data *cd = &sc_if->sk_cdata;
2004 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
2005 	uint32_t idx;
2006 	int reap = 0;
2007 
2008 	DPRINTFN(2, ("sk_txeof\n"));
2009 
2010 	/*
2011 	 * Go through our tx ring and free mbufs for those
2012 	 * frames that have been sent.
2013 	 */
2014 	idx = cd->sk_tx_cons;
2015 	while (idx != cd->sk_tx_prod) {
2016 		struct sk_tx_desc *cur_tx;
2017 		uint32_t sk_ctl;
2018 
2019 		cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx];
2020 		sk_ctl = le32toh(cur_tx->sk_ctl);
2021 #ifdef SK_DEBUG
2022 		if (skdebug >= 2)
2023 			sk_dump_txdesc(cur_tx, idx);
2024 #endif
2025 		if (sk_ctl & SK_TXCTL_OWN)
2026 			break;
2027 		if (sk_ctl & SK_TXCTL_LASTFRAG)
2028 			ifp->if_opackets++;
2029 		if (cd->sk_tx_mbuf[idx] != NULL) {
2030 			bus_dmamap_unload(cd->sk_tx_dtag, cd->sk_tx_dmap[idx]);
2031 			m_freem(cd->sk_tx_mbuf[idx]);
2032 			cd->sk_tx_mbuf[idx] = NULL;
2033 		}
2034 		sc_if->sk_cdata.sk_tx_cnt--;
2035 		reap = 1;
2036 		SK_INC(idx, SK_TX_RING_CNT);
2037 	}
2038 
2039 	if (!SK_IS_OACTIVE(sc_if))
2040 		ifp->if_flags &= ~IFF_OACTIVE;
2041 
2042 	if (sc_if->sk_cdata.sk_tx_cnt == 0)
2043 		ifp->if_timer = 0;
2044 
2045 	sc_if->sk_cdata.sk_tx_cons = idx;
2046 }
2047 
2048 static void
2049 sk_tick(void *xsc_if)
2050 {
2051 	struct sk_if_softc *sc_if = xsc_if;
2052 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
2053 	struct mii_data *mii = device_get_softc(sc_if->sk_miibus);
2054 	int i;
2055 
2056 	DPRINTFN(2, ("sk_tick\n"));
2057 
2058 	lwkt_serialize_enter(ifp->if_serializer);
2059 
2060 	if ((ifp->if_flags & IFF_UP) == 0) {
2061 		lwkt_serialize_exit(ifp->if_serializer);
2062 		return;
2063 	}
2064 
2065 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2066 		sk_intr_bcom(sc_if);
2067 		lwkt_serialize_exit(ifp->if_serializer);
2068 		return;
2069 	}
2070 
2071 	/*
2072 	 * According to SysKonnect, the correct way to verify that
2073 	 * the link has come back up is to poll bit 0 of the GPIO
2074 	 * register three times. This pin has the signal from the
2075 	 * link sync pin connected to it; if we read the same link
2076 	 * state 3 times in a row, we know the link is up.
2077 	 */
2078 	for (i = 0; i < 3; i++) {
2079 		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2080 			break;
2081 	}
2082 
2083 	if (i != 3) {
2084 		callout_reset(&sc_if->sk_tick_timer, hz, sk_tick, sc_if);
2085 		lwkt_serialize_exit(ifp->if_serializer);
2086 		return;
2087 	}
2088 
2089 	/* Turn the GP0 interrupt back on. */
2090 	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2091 	SK_XM_READ_2(sc_if, XM_ISR);
2092 	mii_tick(mii);
2093 	callout_stop(&sc_if->sk_tick_timer);
2094 	lwkt_serialize_exit(ifp->if_serializer);
2095 }
2096 
2097 static void
2098 sk_yukon_tick(void *xsc_if)
2099 {
2100 	struct sk_if_softc *sc_if = xsc_if;
2101 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
2102 	struct mii_data *mii = device_get_softc(sc_if->sk_miibus);
2103 
2104 	lwkt_serialize_enter(ifp->if_serializer);
2105 	mii_tick(mii);
2106 	callout_reset(&sc_if->sk_tick_timer, hz, sk_yukon_tick, sc_if);
2107 	lwkt_serialize_exit(ifp->if_serializer);
2108 }
2109 
2110 static void
2111 sk_intr_bcom(struct sk_if_softc *sc_if)
2112 {
2113 	struct mii_data *mii = device_get_softc(sc_if->sk_miibus);
2114 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
2115 	int status;
2116 
2117 	DPRINTFN(2, ("sk_intr_bcom\n"));
2118 
2119 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2120 
2121 	/*
2122 	 * Read the PHY interrupt register to make sure
2123 	 * we clear any pending interrupts.
2124 	 */
2125 	status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2126 
2127 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
2128 		sk_init_xmac(sc_if);
2129 		return;
2130 	}
2131 
2132 	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2133 		int lstat;
2134 
2135 		lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2136 		    BRGPHY_MII_AUXSTS);
2137 
2138 		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2139 			mii_mediachg(mii);
2140 			/* Turn off the link LED. */
2141 			SK_IF_WRITE_1(sc_if, 0,
2142 			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
2143 			sc_if->sk_link = 0;
2144 		} else if (status & BRGPHY_ISR_LNK_CHG) {
2145 			sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2146 			    BRGPHY_MII_IMR, 0xFF00);
2147 			mii_tick(mii);
2148 			sc_if->sk_link = 1;
2149 			/* Turn on the link LED. */
2150 			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2151 			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2152 			    SK_LINKLED_BLINK_OFF);
2153 		} else {
2154 			mii_tick(mii);
2155 			callout_reset(&sc_if->sk_tick_timer, hz,
2156 				      sk_tick, sc_if);
2157 		}
2158 	}
2159 
2160 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2161 }
2162 
2163 static void
2164 sk_intr_xmac(struct sk_if_softc *sc_if)
2165 {
2166 	uint16_t status;
2167 
2168 	status = SK_XM_READ_2(sc_if, XM_ISR);
2169 	DPRINTFN(2, ("sk_intr_xmac\n"));
2170 
2171 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC &&
2172 	    (status & (XM_ISR_GP0_SET | XM_ISR_AUTONEG_DONE))) {
2173 		if (status & XM_ISR_GP0_SET)
2174 			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2175 
2176 		callout_reset(&sc_if->sk_tick_timer, hz,
2177 			      sk_tick, sc_if);
2178 	}
2179 
2180 	if (status & XM_IMR_TX_UNDERRUN)
2181 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2182 
2183 	if (status & XM_IMR_RX_OVERRUN)
2184 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2185 }
2186 
2187 static void
2188 sk_intr_yukon(struct sk_if_softc *sc_if)
2189 {
2190 	uint8_t status;
2191 
2192 	status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
2193 	/* RX overrun */
2194 	if ((status & SK_GMAC_INT_RX_OVER) != 0) {
2195 		SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
2196 		    SK_RFCTL_RX_FIFO_OVER);
2197 	}
2198 	/* TX underrun */
2199 	if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
2200 		SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
2201 		    SK_TFCTL_TX_FIFO_UNDER);
2202 	}
2203 
2204 	DPRINTFN(2, ("sk_intr_yukon status=%#x\n", status));
2205 }
2206 
2207 static void
2208 sk_intr(void *xsc)
2209 {
2210 	struct sk_softc *sc = xsc;
2211 	struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A];
2212 	struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B];
2213 	struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2214 	uint32_t status;
2215 
2216 	ASSERT_SERIALIZED(&sc->sk_serializer);
2217 
2218 	status = CSR_READ_4(sc, SK_ISSR);
2219 	if (status == 0 || status == 0xffffffff)
2220 		return;
2221 
2222 	if (sc_if0 != NULL)
2223 		ifp0 = &sc_if0->arpcom.ac_if;
2224 	if (sc_if1 != NULL)
2225 		ifp1 = &sc_if1->arpcom.ac_if;
2226 
2227 	for (; (status &= sc->sk_intrmask) != 0;) {
2228 		/* Handle receive interrupts first. */
2229 		if (sc_if0 && (status & SK_ISR_RX1_EOF)) {
2230 			sk_rxeof(sc_if0);
2231 			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2232 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2233 		}
2234 		if (sc_if1 && (status & SK_ISR_RX2_EOF)) {
2235 			sk_rxeof(sc_if1);
2236 			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2237 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2238 		}
2239 
2240 		/* Then transmit interrupts. */
2241 		if (sc_if0 && (status & SK_ISR_TX1_S_EOF)) {
2242 			sk_txeof(sc_if0);
2243 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2244 			    SK_TXBMU_CLR_IRQ_EOF);
2245 		}
2246 		if (sc_if1 && (status & SK_ISR_TX2_S_EOF)) {
2247 			sk_txeof(sc_if1);
2248 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2249 			    SK_TXBMU_CLR_IRQ_EOF);
2250 		}
2251 
2252 		/* Then MAC interrupts. */
2253 		if (sc_if0 && (status & SK_ISR_MAC1) &&
2254 		    (ifp0->if_flags & IFF_RUNNING)) {
2255 			if (SK_IS_GENESIS(sc))
2256 				sk_intr_xmac(sc_if0);
2257 			else
2258 				sk_intr_yukon(sc_if0);
2259 		}
2260 
2261 		if (sc_if1 && (status & SK_ISR_MAC2) &&
2262 		    (ifp1->if_flags & IFF_RUNNING)) {
2263 			if (SK_IS_GENESIS(sc))
2264 				sk_intr_xmac(sc_if1);
2265 			else
2266 				sk_intr_yukon(sc_if1);
2267 		}
2268 
2269 		if (status & SK_ISR_EXTERNAL_REG) {
2270 			if (sc_if0 != NULL &&
2271 			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2272 				sk_intr_bcom(sc_if0);
2273 
2274 			if (sc_if1 != NULL &&
2275 			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2276 				sk_intr_bcom(sc_if1);
2277 		}
2278 		status = CSR_READ_4(sc, SK_ISSR);
2279 	}
2280 
2281 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2282 
2283 	if (ifp0 != NULL && !ifq_is_empty(&ifp0->if_snd))
2284 		if_devstart(ifp0);
2285 	if (ifp1 != NULL && !ifq_is_empty(&ifp1->if_snd))
2286 		if_devstart(ifp1);
2287 }
2288 
2289 static void
2290 sk_init_xmac(struct sk_if_softc	*sc_if)
2291 {
2292 	struct sk_softc *sc = sc_if->sk_softc;
2293 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
2294 	static const struct sk_bcom_hack bhack[] = {
2295 	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2296 	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2297 	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2298 	{ 0, 0 } };
2299 
2300 	DPRINTFN(2, ("sk_init_xmac\n"));
2301 
2302 	/* Unreset the XMAC. */
2303 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2304 	DELAY(1000);
2305 
2306 	/* Reset the XMAC's internal state. */
2307 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2308 
2309 	/* Save the XMAC II revision */
2310 	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2311 
2312 	/*
2313 	 * Perform additional initialization for external PHYs,
2314 	 * namely for the 1000baseT cards that use the XMAC's
2315 	 * GMII mode.
2316 	 */
2317 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2318 		int i = 0;
2319 		uint32_t val;
2320 
2321 		/* Take PHY out of reset. */
2322 		val = sk_win_read_4(sc, SK_GPIO);
2323 		if (sc_if->sk_port == SK_PORT_A)
2324 			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2325 		else
2326 			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2327 		sk_win_write_4(sc, SK_GPIO, val);
2328 
2329 		/* Enable GMII mode on the XMAC. */
2330 		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2331 
2332 		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2333 		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2334 		DELAY(10000);
2335 		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2336 		    BRGPHY_MII_IMR, 0xFFF0);
2337 
2338 		/*
2339 		 * Early versions of the BCM5400 apparently have
2340 		 * a bug that requires them to have their reserved
2341 		 * registers initialized to some magic values. I don't
2342 		 * know what the numbers do, I'm just the messenger.
2343 		 */
2344 		if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2345 		    == 0x6041) {
2346 			while(bhack[i].reg) {
2347 				sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2348 				    bhack[i].reg, bhack[i].val);
2349 				i++;
2350 			}
2351 		}
2352 	}
2353 
2354 	/* Set station address */
2355 	SK_XM_WRITE_2(sc_if, XM_PAR0,
2356 	    *(uint16_t *)(&sc_if->arpcom.ac_enaddr[0]));
2357 	SK_XM_WRITE_2(sc_if, XM_PAR1,
2358 	    *(uint16_t *)(&sc_if->arpcom.ac_enaddr[2]));
2359 	SK_XM_WRITE_2(sc_if, XM_PAR2,
2360 	    *(uint16_t *)(&sc_if->arpcom.ac_enaddr[4]));
2361 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2362 
2363 	if (ifp->if_flags & IFF_BROADCAST)
2364 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2365 	else
2366 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2367 
2368 	/* We don't need the FCS appended to the packet. */
2369 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2370 
2371 	/* We want short frames padded to 60 bytes. */
2372 	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2373 
2374 	/*
2375 	 * Enable the reception of all error frames. This is
2376 	 * a necessary evil due to the design of the XMAC. The
2377 	 * XMAC's receive FIFO is only 8K in size, however jumbo
2378 	 * frames can be up to 9000 bytes in length. When bad
2379 	 * frame filtering is enabled, the XMAC's RX FIFO operates
2380 	 * in 'store and forward' mode. For this to work, the
2381 	 * entire frame has to fit into the FIFO, but that means
2382 	 * that jumbo frames larger than 8192 bytes will be
2383 	 * truncated. Disabling all bad frame filtering causes
2384 	 * the RX FIFO to operate in streaming mode, in which
2385 	 * case the XMAC will start transfering frames out of the
2386 	 * RX FIFO as soon as the FIFO threshold is reached.
2387 	 */
2388 	if (sc_if->sk_use_jumbo) {
2389 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2390 		    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2391 		    XM_MODE_RX_INRANGELEN);
2392 	}
2393 
2394 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2395 
2396 	/*
2397 	 * Bump up the transmit threshold. This helps hold off transmit
2398 	 * underruns when we're blasting traffic from both ports at once.
2399 	 */
2400 	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2401 
2402 	/* Set promiscuous mode */
2403 	sk_setpromisc(sc_if);
2404 
2405 	/* Set multicast filter */
2406 	sk_setmulti(sc_if);
2407 
2408 	/* Clear and enable interrupts */
2409 	SK_XM_READ_2(sc_if, XM_ISR);
2410 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2411 		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2412 	else
2413 		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2414 
2415 	/* Configure MAC arbiter */
2416 	switch(sc_if->sk_xmac_rev) {
2417 	case XM_XMAC_REV_B2:
2418 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2419 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2420 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2421 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2422 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2423 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2424 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2425 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2426 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2427 		break;
2428 	case XM_XMAC_REV_C1:
2429 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2430 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2431 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2432 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2433 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2434 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2435 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2436 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2437 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2438 		break;
2439 	default:
2440 		break;
2441 	}
2442 	sk_win_write_2(sc, SK_MACARB_CTL,
2443 	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2444 
2445 	sc_if->sk_link = 1;
2446 }
2447 
2448 static void
2449 sk_init_yukon(struct sk_if_softc *sc_if)
2450 {
2451 	uint32_t phy, v;
2452 	uint16_t reg;
2453 	struct sk_softc *sc;
2454 	int i;
2455 
2456 	sc = sc_if->sk_softc;
2457 
2458 	DPRINTFN(2, ("sk_init_yukon: start: sk_csr=%#x\n",
2459 		     CSR_READ_4(sc_if->sk_softc, SK_CSR)));
2460 
2461 	if (sc->sk_type == SK_YUKON_LITE &&
2462 	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2463 		/*
2464 		 * Workaround code for COMA mode, set PHY reset.
2465 		 * Otherwise it will not correctly take chip out of
2466 		 * powerdown (coma)
2467 		 */
2468 		v = sk_win_read_4(sc, SK_GPIO);
2469 		v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
2470 		sk_win_write_4(sc, SK_GPIO, v);
2471 	}
2472 
2473 	DPRINTFN(6, ("sk_init_yukon: 1\n"));
2474 
2475 	/* GMAC and GPHY Reset */
2476 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2477 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2478 	DELAY(1000);
2479 
2480 	DPRINTFN(6, ("sk_init_yukon: 2\n"));
2481 
2482 	if (sc->sk_type == SK_YUKON_LITE &&
2483 	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2484 		/*
2485 		 * Workaround code for COMA mode, clear PHY reset
2486 		 */
2487 		v = sk_win_read_4(sc, SK_GPIO);
2488 		v |= SK_GPIO_DIR9;
2489 		v &= ~SK_GPIO_DAT9;
2490 		sk_win_write_4(sc, SK_GPIO, v);
2491 	}
2492 
2493 	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2494 		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2495 
2496 	if (sc->sk_coppertype)
2497 		phy |= SK_GPHY_COPPER;
2498 	else
2499 		phy |= SK_GPHY_FIBER;
2500 
2501 	DPRINTFN(3, ("sk_init_yukon: phy=%#x\n", phy));
2502 
2503 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2504 	DELAY(1000);
2505 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2506 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2507 		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2508 
2509 	DPRINTFN(3, ("sk_init_yukon: gmac_ctrl=%#x\n",
2510 		     SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
2511 
2512 	DPRINTFN(6, ("sk_init_yukon: 3\n"));
2513 
2514 	/* unused read of the interrupt source register */
2515 	DPRINTFN(6, ("sk_init_yukon: 4\n"));
2516 	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2517 
2518 	DPRINTFN(6, ("sk_init_yukon: 4a\n"));
2519 	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2520 	DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2521 
2522 	/* MIB Counter Clear Mode set */
2523 	reg |= YU_PAR_MIB_CLR;
2524 	DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2525 	DPRINTFN(6, ("sk_init_yukon: 4b\n"));
2526 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2527 
2528 	/* MIB Counter Clear Mode clear */
2529 	DPRINTFN(6, ("sk_init_yukon: 5\n"));
2530 	reg &= ~YU_PAR_MIB_CLR;
2531 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2532 
2533 	/* receive control reg */
2534 	DPRINTFN(6, ("sk_init_yukon: 7\n"));
2535 	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2536 
2537 	/* transmit parameter register */
2538 	DPRINTFN(6, ("sk_init_yukon: 8\n"));
2539 	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2540 		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2541 
2542 	/* serial mode register */
2543 	DPRINTFN(6, ("sk_init_yukon: 9\n"));
2544 	reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
2545 	if (sc_if->sk_use_jumbo)
2546 		reg |= YU_SMR_MFL_JUMBO;
2547 	SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
2548 
2549 	DPRINTFN(6, ("sk_init_yukon: 10\n"));
2550 	/* Setup Yukon's address */
2551 	for (i = 0; i < 3; i++) {
2552 		/* Write Source Address 1 (unicast filter) */
2553 		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2554 			      sc_if->arpcom.ac_enaddr[i * 2] |
2555 			      sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2556 	}
2557 
2558 	for (i = 0; i < 3; i++) {
2559 		reg = sk_win_read_2(sc_if->sk_softc,
2560 				    SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2561 		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2562 	}
2563 
2564 	/* Set promiscuous mode */
2565 	sk_setpromisc(sc_if);
2566 
2567 	/* Set multicast filter */
2568 	DPRINTFN(6, ("sk_init_yukon: 11\n"));
2569 	sk_setmulti(sc_if);
2570 
2571 	/* enable interrupt mask for counter overflows */
2572 	DPRINTFN(6, ("sk_init_yukon: 12\n"));
2573 	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2574 	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2575 	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2576 
2577 	/* Configure RX MAC FIFO Flush Mask */
2578 	v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
2579 	    YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
2580 	    YU_RXSTAT_JABBER;
2581 	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
2582 
2583 	/* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
2584 	if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
2585 		v = SK_TFCTL_OPERATION_ON;
2586 	else
2587 		v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
2588 	/* Configure RX MAC FIFO */
2589 	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2590 	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
2591 
2592 	/* Increase flush threshould to 64 bytes */
2593 	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
2594 	    SK_RFCTL_FIFO_THRESHOLD + 1);
2595 
2596 	/* Configure TX MAC FIFO */
2597 	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2598 	SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2599 
2600 	DPRINTFN(6, ("sk_init_yukon: end\n"));
2601 }
2602 
2603 /*
2604  * Note that to properly initialize any part of the GEnesis chip,
2605  * you first have to take it out of reset mode.
2606  */
2607 static void
2608 sk_init(void *xsc_if)
2609 {
2610 	struct sk_if_softc *sc_if = xsc_if;
2611 	struct sk_softc *sc = sc_if->sk_softc;
2612 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
2613 	struct mii_data *mii = device_get_softc(sc_if->sk_miibus);
2614 
2615 	DPRINTFN(2, ("sk_init\n"));
2616 
2617 	ASSERT_SERIALIZED(ifp->if_serializer);
2618 
2619 	if (ifp->if_flags & IFF_RUNNING)
2620 		return;
2621 
2622 	/* Cancel pending I/O and free all RX/TX buffers. */
2623 	sk_stop(sc_if);
2624 
2625 	/*
2626 	 * NOTE: Change sk_use_jumbo after sk_stop(),
2627 	 *       but before real initialization.
2628 	 */
2629 	if (ifp->if_mtu > ETHER_MAX_LEN)
2630 		sc_if->sk_use_jumbo = 1;
2631 	else
2632 		sc_if->sk_use_jumbo = 0;
2633 	DPRINTF(("use jumbo buffer: %s\n", sc_if->sk_use_jumbo ? "YES" : "NO"));
2634 
2635 	if (SK_IS_GENESIS(sc)) {
2636 		/* Configure LINK_SYNC LED */
2637 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2638 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2639 			SK_LINKLED_LINKSYNC_ON);
2640 
2641 		/* Configure RX LED */
2642 		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2643 			SK_RXLEDCTL_COUNTER_START);
2644 
2645 		/* Configure TX LED */
2646 		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2647 			SK_TXLEDCTL_COUNTER_START);
2648 	}
2649 
2650 	/*
2651 	 * Configure descriptor poll timer
2652 	 *
2653 	 * SK-NET GENESIS data sheet says that possibility of losing Start
2654 	 * transmit command due to CPU/cache related interim storage problems
2655 	 * under certain conditions. The document recommends a polling
2656 	 * mechanism to send a Start transmit command to initiate transfer
2657 	 * of ready descriptors regulary. To cope with this issue sk(4) now
2658 	 * enables descriptor poll timer to initiate descriptor processing
2659 	 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
2660 	 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
2661 	 * command instead of waiting for next descriptor polling time.
2662 	 * The same rule may apply to Rx side too but it seems that is not
2663 	 * needed at the moment.
2664 	 * Since sk(4) uses descriptor polling as a last resort there is no
2665 	 * need to set smaller polling time than maximum allowable one.
2666 	 */
2667 	SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
2668 
2669 	/* Configure I2C registers */
2670 
2671 	/* Configure XMAC(s) */
2672 	switch (sc->sk_type) {
2673 	case SK_GENESIS:
2674 		sk_init_xmac(sc_if);
2675 		break;
2676 	case SK_YUKON:
2677 	case SK_YUKON_LITE:
2678 	case SK_YUKON_LP:
2679 		sk_init_yukon(sc_if);
2680 		break;
2681 	}
2682 	mii_mediachg(mii);
2683 
2684 	if (SK_IS_GENESIS(sc)) {
2685 		/* Configure MAC FIFOs */
2686 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2687 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2688 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2689 
2690 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2691 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2692 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2693 	}
2694 
2695 	/* Configure transmit arbiter(s) */
2696 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2697 	    SK_TXARCTL_ON | SK_TXARCTL_FSYNC_ON);
2698 
2699 	/* Configure RAMbuffers */
2700 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2701 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2702 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2703 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2704 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2705 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2706 
2707 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2708 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2709 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2710 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2711 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2712 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2713 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2714 
2715 	/* Configure BMUs */
2716 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2717 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2718 		      SK_ADDR_LO(sc_if->sk_rdata.sk_rx_ring_paddr));
2719 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI,
2720 		      SK_ADDR_HI(sc_if->sk_rdata.sk_rx_ring_paddr));
2721 
2722 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2723 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2724 		      SK_ADDR_LO(sc_if->sk_rdata.sk_tx_ring_paddr));
2725 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI,
2726 		      SK_ADDR_HI(sc_if->sk_rdata.sk_tx_ring_paddr));
2727 
2728 	/* Init descriptors */
2729 	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2730 		if_printf(ifp, "initialization failed: "
2731 			  "no memory for rx buffers\n");
2732 		sk_stop(sc_if);
2733 		return;
2734 	}
2735 
2736 	if (sk_init_tx_ring(sc_if) == ENOBUFS) {
2737 		if_printf(ifp, "initialization failed: "
2738 			  "no memory for tx buffers\n");
2739 		sk_stop(sc_if);
2740 		return;
2741 	}
2742 
2743 	/* Configure interrupt handling */
2744 	CSR_READ_4(sc, SK_ISSR);
2745 	if (sc_if->sk_port == SK_PORT_A)
2746 		sc->sk_intrmask |= SK_INTRS1;
2747 	else
2748 		sc->sk_intrmask |= SK_INTRS2;
2749 
2750 	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2751 
2752 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2753 
2754 	/* Start BMUs. */
2755 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2756 
2757 	if (SK_IS_GENESIS(sc)) {
2758 		/* Enable XMACs TX and RX state machines */
2759 		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2760 		SK_XM_SETBIT_2(sc_if, XM_MMUCMD,
2761 			       XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2762 	}
2763 
2764 	if (SK_IS_YUKON(sc)) {
2765 		uint16_t reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2766 		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2767 #if 0
2768 		/* XXX disable 100Mbps and full duplex mode? */
2769 		reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
2770 #endif
2771 		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2772 	}
2773 
2774 	/* Activate descriptor polling timer */
2775 	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
2776 	/* Start transfer of Tx descriptors */
2777 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2778 
2779 	ifp->if_flags |= IFF_RUNNING;
2780 	ifp->if_flags &= ~IFF_OACTIVE;
2781 
2782 	if (SK_IS_YUKON(sc))
2783 		callout_reset(&sc_if->sk_tick_timer, hz, sk_yukon_tick, sc_if);
2784 }
2785 
2786 static void
2787 sk_stop(struct sk_if_softc *sc_if)
2788 {
2789 	struct sk_softc *sc = sc_if->sk_softc;
2790 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
2791 	struct sk_chain_data *cd = &sc_if->sk_cdata;
2792 	uint32_t val;
2793 	int i;
2794 
2795 	ASSERT_SERIALIZED(ifp->if_serializer);
2796 
2797 	DPRINTFN(2, ("sk_stop\n"));
2798 
2799 	callout_stop(&sc_if->sk_tick_timer);
2800 
2801 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2802 
2803 	/* Stop Tx descriptor polling timer */
2804 	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
2805 
2806 	/* Stop transfer of Tx descriptors */
2807 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
2808 	for (i = 0; i < SK_TIMEOUT; i++) {
2809 		val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
2810 		if (!(val & SK_TXBMU_TX_STOP))
2811 			break;
2812 		DELAY(1);
2813 	}
2814 	if (i == SK_TIMEOUT)
2815 		if_printf(ifp, "cannot stop transfer of Tx descriptors\n");
2816 
2817 	/* Stop transfer of Rx descriptors */
2818 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
2819 	for (i = 0; i < SK_TIMEOUT; i++) {
2820 		val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
2821 		if (!(val & SK_RXBMU_RX_STOP))
2822 			break;
2823 		DELAY(1);
2824 	}
2825 	if (i == SK_TIMEOUT)
2826 		if_printf(ifp, "cannot stop transfer of Rx descriptors\n");
2827 
2828 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2829 		/* Put PHY back into reset. */
2830 		val = sk_win_read_4(sc, SK_GPIO);
2831 		if (sc_if->sk_port == SK_PORT_A) {
2832 			val |= SK_GPIO_DIR0;
2833 			val &= ~SK_GPIO_DAT0;
2834 		} else {
2835 			val |= SK_GPIO_DIR2;
2836 			val &= ~SK_GPIO_DAT2;
2837 		}
2838 		sk_win_write_4(sc, SK_GPIO, val);
2839 	}
2840 
2841 	/* Turn off various components of this interface. */
2842 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2843 	switch (sc->sk_type) {
2844 	case SK_GENESIS:
2845 		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2846 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2847 		break;
2848 	case SK_YUKON:
2849 	case SK_YUKON_LITE:
2850 	case SK_YUKON_LP:
2851 		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2852 		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2853 		break;
2854 	}
2855 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2856 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET | SK_RBCTL_OFF);
2857 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2858 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST,
2859 	    SK_RBCTL_RESET | SK_RBCTL_OFF);
2860 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2861 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2862 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2863 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2864 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2865 
2866 	/* Disable interrupts */
2867 	if (sc_if->sk_port == SK_PORT_A)
2868 		sc->sk_intrmask &= ~SK_INTRS1;
2869 	else
2870 		sc->sk_intrmask &= ~SK_INTRS2;
2871 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2872 
2873 	SK_XM_READ_2(sc_if, XM_ISR);
2874 	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2875 
2876 	/* Free RX and TX mbufs still in the queues. */
2877 	for (i = 0; i < SK_RX_RING_CNT; i++) {
2878 		if (cd->sk_rx_mbuf[i] != NULL) {
2879 			if (!sc_if->sk_use_jumbo) {
2880 				bus_dmamap_unload(cd->sk_rx_dtag,
2881 						  cd->sk_rx_dmap[i]);
2882 			}
2883 			m_freem(cd->sk_rx_mbuf[i]);
2884 			cd->sk_rx_mbuf[i] = NULL;
2885 		}
2886 	}
2887 	for (i = 0; i < SK_TX_RING_CNT; i++) {
2888 		if (cd->sk_tx_mbuf[i] != NULL) {
2889 			bus_dmamap_unload(cd->sk_tx_dtag, cd->sk_tx_dmap[i]);
2890 			m_freem(cd->sk_tx_mbuf[i]);
2891 			cd->sk_tx_mbuf[i] = NULL;
2892 		}
2893 	}
2894 }
2895 
2896 #ifdef SK_DEBUG
2897 static void
2898 sk_dump_txdesc(struct sk_tx_desc *desc, int idx)
2899 {
2900 #define DESC_PRINT(X)					\
2901 	if (X)					\
2902 		kprintf("txdesc[%d]." #X "=%#x\n",	\
2903 		       idx, X);
2904 
2905 	DESC_PRINT(le32toh(desc->sk_ctl));
2906 	DESC_PRINT(le32toh(desc->sk_next));
2907 	DESC_PRINT(le32toh(desc->sk_data_lo));
2908 	DESC_PRINT(le32toh(desc->sk_data_hi));
2909 	DESC_PRINT(le32toh(desc->sk_xmac_txstat));
2910 	DESC_PRINT(le16toh(desc->sk_rsvd0));
2911 	DESC_PRINT(le16toh(desc->sk_csum_startval));
2912 	DESC_PRINT(le16toh(desc->sk_csum_startpos));
2913 	DESC_PRINT(le16toh(desc->sk_csum_writepos));
2914 	DESC_PRINT(le16toh(desc->sk_rsvd1));
2915 #undef PRINT
2916 }
2917 
2918 static void
2919 sk_dump_bytes(const char *data, int len)
2920 {
2921 	int c, i, j;
2922 
2923 	for (i = 0; i < len; i += 16) {
2924 		kprintf("%08x  ", i);
2925 		c = len - i;
2926 		if (c > 16) c = 16;
2927 
2928 		for (j = 0; j < c; j++) {
2929 			kprintf("%02x ", data[i + j] & 0xff);
2930 			if ((j & 0xf) == 7 && j > 0)
2931 				kprintf(" ");
2932 		}
2933 
2934 		for (; j < 16; j++)
2935 			kprintf("   ");
2936 		kprintf("  ");
2937 
2938 		for (j = 0; j < c; j++) {
2939 			int ch = data[i + j] & 0xff;
2940 			kprintf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
2941 		}
2942 
2943 		kprintf("\n");
2944 
2945 		if (c < 16)
2946 			break;
2947 	}
2948 }
2949 
2950 static void
2951 sk_dump_mbuf(struct mbuf *m)
2952 {
2953 	int count = m->m_pkthdr.len;
2954 
2955 	kprintf("m=%p, m->m_pkthdr.len=%d\n", m, m->m_pkthdr.len);
2956 
2957 	while (count > 0 && m) {
2958 		kprintf("m=%p, m->m_data=%p, m->m_len=%d\n",
2959 		       m, m->m_data, m->m_len);
2960 		sk_dump_bytes(mtod(m, char *), m->m_len);
2961 
2962 		count -= m->m_len;
2963 		m = m->m_next;
2964 	}
2965 }
2966 #endif
2967 
2968 /*
2969  * Allocate jumbo buffer storage. The SysKonnect adapters support
2970  * "jumbograms" (9K frames), although SysKonnect doesn't currently
2971  * use them in their drivers. In order for us to use them, we need
2972  * large 9K receive buffers, however standard mbuf clusters are only
2973  * 2048 bytes in size. Consequently, we need to allocate and manage
2974  * our own jumbo buffer pool. Fortunately, this does not require an
2975  * excessive amount of additional code.
2976  */
2977 static int
2978 sk_jpool_alloc(device_t dev)
2979 {
2980 	struct sk_if_softc *sc_if = device_get_softc(dev);
2981 	struct sk_chain_data *cd = &sc_if->sk_cdata;
2982 	bus_dmamem_t dmem;
2983 	bus_addr_t paddr;
2984 	caddr_t buf;
2985 	int error, i;
2986 
2987 	lwkt_serialize_init(&cd->sk_jpool_serializer);
2988 
2989 	error = bus_dmamem_coherent(cd->sk_buf_dtag, PAGE_SIZE /* XXX */, 0,
2990 				    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2991 				    SK_JMEM, BUS_DMA_WAITOK, &dmem);
2992 	if (error) {
2993 		device_printf(dev, "can't allocate jumbo frame pool\n");
2994 		return error;
2995 	}
2996 	cd->sk_jpool_dtag = dmem.dmem_tag;
2997 	cd->sk_jpool_dmap = dmem.dmem_map;
2998 	cd->sk_jpool = dmem.dmem_addr;
2999 	paddr = dmem.dmem_busaddr;
3000 
3001 	SLIST_INIT(&cd->sk_jpool_free_ent);
3002 	buf = cd->sk_jpool;
3003 
3004 	/*
3005 	 * Now divide it up into SK_JLEN pieces.
3006 	 */
3007 	for (i = 0; i < SK_JSLOTS; i++) {
3008 		struct sk_jpool_entry *entry = &cd->sk_jpool_ent[i];
3009 
3010 		entry->sc_if = sc_if;
3011 		entry->inuse = 0;
3012 		entry->slot = i;
3013 		entry->buf = buf;
3014 		entry->paddr = paddr;
3015 
3016 		SLIST_INSERT_HEAD(&cd->sk_jpool_free_ent, entry, entry_next);
3017 
3018 		buf += SK_JLEN;
3019 		paddr += SK_JLEN;
3020 	}
3021 	return 0;
3022 }
3023 
3024 static void
3025 sk_jpool_free(struct sk_if_softc *sc_if)
3026 {
3027 	struct sk_chain_data *cd = &sc_if->sk_cdata;
3028 
3029 	if (cd->sk_jpool_dtag != NULL) {
3030 		bus_dmamap_unload(cd->sk_jpool_dtag, cd->sk_jpool_dmap);
3031 		bus_dmamem_free(cd->sk_jpool_dtag, cd->sk_jpool,
3032 				cd->sk_jpool_dmap);
3033 		bus_dma_tag_destroy(cd->sk_jpool_dtag);
3034 		cd->sk_jpool_dtag = NULL;
3035 	}
3036 }
3037 
3038 static int
3039 sk_dma_alloc(device_t dev)
3040 {
3041 	struct sk_if_softc *sc_if = device_get_softc(dev);
3042 	struct sk_chain_data *cd = &sc_if->sk_cdata;
3043 	struct sk_ring_data *rd = &sc_if->sk_rdata;
3044 	bus_dmamem_t dmem;
3045 	int i, j, error;
3046 
3047 	/* Create parent DMA tag */
3048 	error = bus_dma_tag_create(NULL, 1, 0,
3049 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3050 				   NULL, NULL,
3051 				   BUS_SPACE_MAXSIZE_32BIT, 0,
3052 				   BUS_SPACE_MAXSIZE_32BIT,
3053 				   0, &sc_if->sk_parent_dtag);
3054 	if (error) {
3055 		device_printf(dev, "can't create parent DMA tag\n");
3056 		return error;
3057 	}
3058 
3059 	/* Create top level ring DMA tag */
3060 	error = bus_dma_tag_create(sc_if->sk_parent_dtag,
3061 				   1, SK_RING_BOUNDARY,
3062 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3063 				   NULL, NULL,
3064 				   BUS_SPACE_MAXSIZE_32BIT, 0,
3065 				   BUS_SPACE_MAXSIZE_32BIT,
3066 				   0, &rd->sk_ring_dtag);
3067 	if (error) {
3068 		device_printf(dev, "can't create ring DMA tag\n");
3069 		return error;
3070 	}
3071 
3072 	/* Create top level buffer DMA tag */
3073 	error = bus_dma_tag_create(sc_if->sk_parent_dtag, 1, 0,
3074 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3075 				   NULL, NULL,
3076 				   BUS_SPACE_MAXSIZE_32BIT, 0,
3077 				   BUS_SPACE_MAXSIZE_32BIT,
3078 				   0, &cd->sk_buf_dtag);
3079 	if (error) {
3080 		device_printf(dev, "can't create buf DMA tag\n");
3081 		return error;
3082 	}
3083 
3084 	/* Allocate the TX descriptor queue. */
3085 	error = bus_dmamem_coherent(rd->sk_ring_dtag, SK_RING_ALIGN, 0,
3086 				    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3087 				    SK_TX_RING_SIZE,
3088 				    BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3089 	if (error) {
3090 		device_printf(dev, "can't allocate TX ring\n");
3091 		return error;
3092 	}
3093 	rd->sk_tx_ring_dtag = dmem.dmem_tag;
3094 	rd->sk_tx_ring_dmap = dmem.dmem_map;
3095 	rd->sk_tx_ring = dmem.dmem_addr;
3096 	rd->sk_tx_ring_paddr = dmem.dmem_busaddr;
3097 
3098 	/* Allocate the RX descriptor queue. */
3099 	error = bus_dmamem_coherent(rd->sk_ring_dtag, SK_RING_ALIGN, 0,
3100 				    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3101 				    SK_RX_RING_SIZE,
3102 				    BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3103 	if (error) {
3104 		device_printf(dev, "can't allocate TX ring\n");
3105 		return error;
3106 	}
3107 	rd->sk_rx_ring_dtag = dmem.dmem_tag;
3108 	rd->sk_rx_ring_dmap = dmem.dmem_map;
3109 	rd->sk_rx_ring = dmem.dmem_addr;
3110 	rd->sk_rx_ring_paddr = dmem.dmem_busaddr;
3111 
3112 	/* Try to allocate memory for jumbo buffers. */
3113 	error = sk_jpool_alloc(dev);
3114 	if (error) {
3115 		device_printf(dev, "jumbo buffer allocation failed\n");
3116 		return error;
3117 	}
3118 
3119 	/* Create DMA tag for TX. */
3120 	error = bus_dma_tag_create(cd->sk_buf_dtag, 1, 0,
3121 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3122 				   NULL, NULL,
3123 				   SK_JLEN, SK_NTXSEG, SK_JLEN,
3124 				   BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
3125 				   BUS_DMA_ONEBPAGE,
3126 				   &cd->sk_tx_dtag);
3127 	if (error) {
3128 		device_printf(dev, "can't create TX DMA tag\n");
3129 		return error;
3130 	}
3131 
3132 	/* Create DMA maps for TX. */
3133 	for (i = 0; i < SK_TX_RING_CNT; i++) {
3134 		error = bus_dmamap_create(cd->sk_tx_dtag,
3135 					  BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
3136 					  &cd->sk_tx_dmap[i]);
3137 		if (error) {
3138 			device_printf(dev, "can't create %dth TX DMA map\n", i);
3139 
3140 			for (j = 0; j < i; ++j) {
3141 				bus_dmamap_destroy(cd->sk_tx_dtag,
3142 						   cd->sk_tx_dmap[i]);
3143 			}
3144 			bus_dma_tag_destroy(cd->sk_tx_dtag);
3145 			cd->sk_tx_dtag = NULL;
3146 			return error;
3147 		}
3148 	}
3149 
3150 	/* Create DMA tag for RX. */
3151 	error = bus_dma_tag_create(cd->sk_buf_dtag, 1, 0,
3152 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3153 				   NULL, NULL,
3154 				   MCLBYTES, 1, MCLBYTES,
3155 				   BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
3156 				   &cd->sk_rx_dtag);
3157 	if (error) {
3158 		device_printf(dev, "can't create RX DMA tag\n");
3159 		return error;
3160 	}
3161 
3162 	/* Create a spare RX DMA map. */
3163 	error = bus_dmamap_create(cd->sk_rx_dtag, BUS_DMA_WAITOK,
3164 				  &cd->sk_rx_dmap_tmp);
3165 	if (error) {
3166 		device_printf(dev, "can't create spare RX DMA map\n");
3167 		bus_dma_tag_destroy(cd->sk_rx_dtag);
3168 		cd->sk_rx_dtag = NULL;
3169 		return error;
3170 	}
3171 
3172 	/* Create DMA maps for RX. */
3173 	for (i = 0; i < SK_RX_RING_CNT; ++i) {
3174 		error = bus_dmamap_create(cd->sk_rx_dtag, BUS_DMA_WAITOK,
3175 					  &cd->sk_rx_dmap[i]);
3176 		if (error) {
3177 			device_printf(dev, "can't create %dth RX DMA map\n", i);
3178 
3179 			for (j = 0; j < i; ++j) {
3180 				bus_dmamap_destroy(cd->sk_rx_dtag,
3181 						   cd->sk_rx_dmap[i]);
3182 			}
3183 			bus_dmamap_destroy(cd->sk_rx_dtag, cd->sk_rx_dmap_tmp);
3184 			bus_dma_tag_destroy(cd->sk_rx_dtag);
3185 			cd->sk_rx_dtag = NULL;
3186 			return error;
3187 		}
3188 	}
3189 	return 0;
3190 }
3191 
3192 static void
3193 sk_dma_free(device_t dev)
3194 {
3195 	struct sk_if_softc *sc_if = device_get_softc(dev);
3196 	struct sk_chain_data *cd = &sc_if->sk_cdata;
3197 	struct sk_ring_data *rd = &sc_if->sk_rdata;
3198 	int i;
3199 
3200 	if (cd->sk_tx_dtag != NULL) {
3201 		for (i = 0; i < SK_TX_RING_CNT; ++i) {
3202 			KASSERT(cd->sk_tx_mbuf[i] == NULL,
3203 				("sk_stop() is not called before %s()",
3204 				 __func__));
3205 			bus_dmamap_destroy(cd->sk_tx_dtag, cd->sk_tx_dmap[i]);
3206 		}
3207 		bus_dma_tag_destroy(cd->sk_tx_dtag);
3208 	}
3209 
3210 	if (cd->sk_rx_dtag != NULL) {
3211 		for (i = 0; i < SK_RX_RING_CNT; ++i) {
3212 			KASSERT(cd->sk_rx_mbuf[i] == NULL,
3213 				("sk_stop() is not called before %s()",
3214 				 __func__));
3215 			bus_dmamap_destroy(cd->sk_rx_dtag, cd->sk_rx_dmap[i]);
3216 		}
3217 		bus_dmamap_destroy(cd->sk_rx_dtag, cd->sk_rx_dmap_tmp);
3218 		bus_dma_tag_destroy(cd->sk_rx_dtag);
3219 	}
3220 
3221 	sk_jpool_free(sc_if);
3222 
3223 	if (rd->sk_rx_ring_dtag != NULL) {
3224 		bus_dmamap_unload(rd->sk_rx_ring_dtag, rd->sk_rx_ring_dmap);
3225 		bus_dmamem_free(rd->sk_rx_ring_dtag, rd->sk_rx_ring,
3226 				rd->sk_rx_ring_dmap);
3227 		bus_dma_tag_destroy(rd->sk_rx_ring_dtag);
3228 	}
3229 
3230 	if (rd->sk_tx_ring_dtag != NULL) {
3231 		bus_dmamap_unload(rd->sk_tx_ring_dtag, rd->sk_tx_ring_dmap);
3232 		bus_dmamem_free(rd->sk_tx_ring_dtag, rd->sk_tx_ring,
3233 				rd->sk_tx_ring_dmap);
3234 		bus_dma_tag_destroy(rd->sk_tx_ring_dtag);
3235 	}
3236 
3237 	if (rd->sk_ring_dtag != NULL)
3238 		bus_dma_tag_destroy(rd->sk_ring_dtag);
3239 	if (cd->sk_buf_dtag != NULL)
3240 		bus_dma_tag_destroy(cd->sk_buf_dtag);
3241 	if (sc_if->sk_parent_dtag != NULL)
3242 		bus_dma_tag_destroy(sc_if->sk_parent_dtag);
3243 }
3244 
3245 static int
3246 skc_sysctl_imtime(SYSCTL_HANDLER_ARGS)
3247 {
3248 	struct sk_softc *sc = arg1;
3249 	struct lwkt_serialize *slize = &sc->sk_serializer;
3250 	int error = 0, v;
3251 
3252 	lwkt_serialize_enter(slize);
3253 
3254 	v = sc->sk_imtime;
3255 	error = sysctl_handle_int(oidp, &v, 0, req);
3256 	if (error || req->newptr == NULL)
3257 		goto back;
3258 	if (v <= 0) {
3259 		error = EINVAL;
3260 		goto back;
3261 	}
3262 
3263 	if (sc->sk_imtime != v) {
3264 		sc->sk_imtime = v;
3265 		sk_win_write_4(sc, SK_IMTIMERINIT,
3266 			       SK_IM_USECS(sc, sc->sk_imtime));
3267 
3268 		/*
3269 		 * Force interrupt moderation timer to
3270 		 * reload new value.
3271 		 */
3272 		sk_win_write_4(sc, SK_IMTIMER, 0);
3273 	}
3274 back:
3275 	lwkt_serialize_exit(slize);
3276 	return error;
3277 }
3278