xref: /dragonfly/sys/bus/u4b/net/if_axe.c (revision 0429c130)
1 /*-
2  * Copyright (c) 1997, 1998, 1999, 2000-2003
3  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * ASIX Electronics AX88172/AX88178/AX88778 USB 2.0 ethernet driver.
35  * Used in the LinkSys USB200M and various other adapters.
36  *
37  * Manuals available from:
38  * http://www.asix.com.tw/datasheet/mac/Ax88172.PDF
39  * Note: you need the manual for the AX88170 chip (USB 1.x ethernet
40  * controller) to find the definitions for the RX control register.
41  * http://www.asix.com.tw/datasheet/mac/Ax88170.PDF
42  *
43  * Written by Bill Paul <wpaul@windriver.com>
44  * Senior Engineer
45  * Wind River Systems
46  */
47 
48 /*
49  * The AX88172 provides USB ethernet supports at 10 and 100Mbps.
50  * It uses an external PHY (reference designs use a RealTek chip),
51  * and has a 64-bit multicast hash filter. There is some information
52  * missing from the manual which one needs to know in order to make
53  * the chip function:
54  *
55  * - You must set bit 7 in the RX control register, otherwise the
56  *   chip won't receive any packets.
57  * - You must initialize all 3 IPG registers, or you won't be able
58  *   to send any packets.
59  *
60  * Note that this device appears to only support loading the station
61  * address via autload from the EEPROM (i.e. there's no way to manaully
62  * set it).
63  *
64  * (Adam Weinberger wanted me to name this driver if_gir.c.)
65  */
66 
67 /*
68  * Ax88178 and Ax88772 support backported from the OpenBSD driver.
69  * 2007/02/12, J.R. Oldroyd, fbsd@opal.com
70  *
71  * Manual here:
72  * http://www.asix.com.tw/FrootAttach/datasheet/AX88178_datasheet_Rev10.pdf
73  * http://www.asix.com.tw/FrootAttach/datasheet/AX88772_datasheet_Rev10.pdf
74  */
75 
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/bus.h>
79 #include <sys/condvar.h>
80 #include <sys/endian.h>
81 #include <sys/kernel.h>
82 #include <sys/lock.h>
83 #include <sys/malloc.h>
84 #include <sys/mbuf.h>
85 #include <sys/module.h>
86 #include <sys/socket.h>
87 #include <sys/sockio.h>
88 #include <sys/sysctl.h>
89 
90 #include <net/if.h>
91 #include <net/ethernet.h>
92 #include <net/if_types.h>
93 #include <net/if_media.h>
94 #include <net/vlan/if_vlan_var.h>
95 #include <net/ifq_var.h>
96 
97 #include <dev/netif/mii_layer/mii.h>
98 #include <dev/netif/mii_layer/miivar.h>
99 
100 #include <bus/u4b/usb.h>
101 #include <bus/u4b/usbdi.h>
102 #include <bus/u4b/usbdi_util.h>
103 #include <bus/u4b/usbdevs.h>
104 
105 #define	USB_DEBUG_VAR axe_debug
106 #include <bus/u4b/usb_debug.h>
107 #include <bus/u4b/usb_process.h>
108 
109 #include <bus/u4b/net/usb_ethernet.h>
110 #include <bus/u4b/net/if_axereg.h>
111 
112 /*
113  * AXE_178_MAX_FRAME_BURST
114  * max frame burst size for Ax88178 and Ax88772
115  *	0	2048 bytes
116  *	1	4096 bytes
117  *	2	8192 bytes
118  *	3	16384 bytes
119  * use the largest your system can handle without USB stalling.
120  *
121  * NB: 88772 parts appear to generate lots of input errors with
122  * a 2K rx buffer and 8K is only slightly faster than 4K on an
123  * EHCI port on a T42 so change at your own risk.
124  */
125 #define AXE_178_MAX_FRAME_BURST	1
126 
127 #define	AXE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
128 
129 #ifdef USB_DEBUG
130 static int axe_debug = 0;
131 
132 static SYSCTL_NODE(_hw_usb, OID_AUTO, axe, CTLFLAG_RW, 0, "USB axe");
133 SYSCTL_INT(_hw_usb_axe, OID_AUTO, debug, CTLFLAG_RW, &axe_debug, 0,
134     "Debug level");
135 #endif
136 
137 /*
138  * Various supported device vendors/products.
139  */
140 static const STRUCT_USB_HOST_ID axe_devs[] = {
141 #define	AXE_DEV(v,p,i) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i) }
142 	AXE_DEV(ABOCOM, UF200, 0),
143 	AXE_DEV(ACERCM, EP1427X2, 0),
144 	AXE_DEV(APPLE, ETHERNET, AXE_FLAG_772),
145 	AXE_DEV(ASIX, AX88172, 0),
146 	AXE_DEV(ASIX, AX88178, AXE_FLAG_178),
147 	AXE_DEV(ASIX, AX88772, AXE_FLAG_772),
148 	AXE_DEV(ASIX, AX88772A, AXE_FLAG_772A),
149 	AXE_DEV(ASIX, AX88772B, AXE_FLAG_772B),
150 	AXE_DEV(ASIX, AX88772B_1, AXE_FLAG_772B),
151 	AXE_DEV(ATEN, UC210T, 0),
152 	AXE_DEV(BELKIN, F5D5055, AXE_FLAG_178),
153 	AXE_DEV(BILLIONTON, USB2AR, 0),
154 	AXE_DEV(CISCOLINKSYS, USB200MV2, AXE_FLAG_772A),
155 	AXE_DEV(COREGA, FETHER_USB2_TX, 0),
156 	AXE_DEV(DLINK, DUBE100, 0),
157 	AXE_DEV(DLINK, DUBE100B1, AXE_FLAG_772),
158 	AXE_DEV(GOODWAY, GWUSB2E, 0),
159 	AXE_DEV(IODATA, ETGUS2, AXE_FLAG_178),
160 	AXE_DEV(JVC, MP_PRX1, 0),
161 	AXE_DEV(LINKSYS2, USB200M, 0),
162 	AXE_DEV(LINKSYS4, USB1000, AXE_FLAG_178),
163 	AXE_DEV(LOGITEC, LAN_GTJU2A, AXE_FLAG_178),
164 	AXE_DEV(MELCO, LUAU2KTX, 0),
165 	AXE_DEV(MELCO, LUA3U2AGT, AXE_FLAG_178),
166 	AXE_DEV(NETGEAR, FA120, 0),
167 	AXE_DEV(OQO, ETHER01PLUS, AXE_FLAG_772),
168 	AXE_DEV(PLANEX3, GU1000T, AXE_FLAG_178),
169 	AXE_DEV(SITECOM, LN029, 0),
170 	AXE_DEV(SITECOMEU, LN028, AXE_FLAG_178),
171 	AXE_DEV(SYSTEMTALKS, SGCX2UL, 0),
172 #undef AXE_DEV
173 };
174 
175 static device_probe_t axe_probe;
176 static device_attach_t axe_attach;
177 static device_detach_t axe_detach;
178 
179 static usb_callback_t axe_bulk_read_callback;
180 static usb_callback_t axe_bulk_write_callback;
181 
182 static miibus_readreg_t axe_miibus_readreg;
183 static miibus_writereg_t axe_miibus_writereg;
184 static miibus_statchg_t axe_miibus_statchg;
185 
186 /*
187 static int axe_miibus_readreg(device_t dev, int phy, int reg);
188 static int axe_miibus_writereg(device_t dev, int phy, int reg, int val);
189 static void axe_miibus_statchg(device_t dev);
190 */
191 static uether_fn_t axe_attach_post;
192 static uether_fn_t axe_init;
193 static uether_fn_t axe_stop;
194 static uether_fn_t axe_start;
195 static uether_fn_t axe_tick;
196 static uether_fn_t axe_setmulti;
197 static uether_fn_t axe_setpromisc;
198 
199 static int	axe_attach_post_sub(struct usb_ether *);
200 static int	axe_ifmedia_upd(struct ifnet *);
201 static void	axe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
202 static int	axe_cmd(struct axe_softc *, int, int, int, void *);
203 static void	axe_ax88178_init(struct axe_softc *);
204 static void	axe_ax88772_init(struct axe_softc *);
205 static void	axe_ax88772_phywake(struct axe_softc *);
206 static void	axe_ax88772a_init(struct axe_softc *);
207 static void	axe_ax88772b_init(struct axe_softc *);
208 static int	axe_get_phyno(struct axe_softc *, int);
209 static int	axe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
210 static int	axe_rx_frame(struct usb_ether *, struct usb_page_cache *, int);
211 static int	axe_rxeof(struct usb_ether *, struct usb_page_cache *,
212 		    unsigned int offset, unsigned int, struct axe_csum_hdr *);
213 static void	axe_csum_cfg(struct usb_ether *);
214 
215 static const struct usb_config axe_config[AXE_N_TRANSFER] = {
216 
217 	[AXE_BULK_DT_WR] = {
218 		.type = UE_BULK,
219 		.endpoint = UE_ADDR_ANY,
220 		.direction = UE_DIR_OUT,
221 		.frames = 16,
222 		.bufsize = 16 * MCLBYTES,
223 		.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
224 		.callback = axe_bulk_write_callback,
225 		.timeout = 10000,	/* 10 seconds */
226 	},
227 
228 	[AXE_BULK_DT_RD] = {
229 		.type = UE_BULK,
230 		.endpoint = UE_ADDR_ANY,
231 		.direction = UE_DIR_IN,
232 		.bufsize = 16384,	/* bytes */
233 		.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
234 		.callback = axe_bulk_read_callback,
235 		.timeout = 0,	/* no timeout */
236 	},
237 };
238 
239 static const struct ax88772b_mfb ax88772b_mfb_table[] = {
240 	{ 0x8000, 0x8001, 2048 },
241 	{ 0x8100, 0x8147, 4096},
242 	{ 0x8200, 0x81EB, 6144},
243 	{ 0x8300, 0x83D7, 8192},
244 	{ 0x8400, 0x851E, 16384},
245 	{ 0x8500, 0x8666, 20480},
246 	{ 0x8600, 0x87AE, 24576},
247 	{ 0x8700, 0x8A3D, 32768}
248 };
249 
250 static device_method_t axe_methods[] = {
251 	/* Device interface */
252 	DEVMETHOD(device_probe, axe_probe),
253 	DEVMETHOD(device_attach, axe_attach),
254 	DEVMETHOD(device_detach, axe_detach),
255 
256 	/* MII interface */
257 	DEVMETHOD(miibus_readreg, axe_miibus_readreg),
258 	DEVMETHOD(miibus_writereg, axe_miibus_writereg),
259 	DEVMETHOD(miibus_statchg, axe_miibus_statchg),
260 
261 	DEVMETHOD_END
262 };
263 
264 static driver_t axe_driver = {
265 	.name = "axe",
266 	.methods = axe_methods,
267 	.size = sizeof(struct axe_softc),
268 };
269 
270 static devclass_t axe_devclass;
271 
272 DRIVER_MODULE(axe, uhub, axe_driver, axe_devclass, NULL, NULL);
273 DRIVER_MODULE(miibus, axe, miibus_driver, miibus_devclass, NULL, NULL);
274 MODULE_DEPEND(axe, uether, 1, 1, 1);
275 MODULE_DEPEND(axe, usb, 1, 1, 1);
276 MODULE_DEPEND(axe, ether, 1, 1, 1);
277 MODULE_DEPEND(axe, miibus, 1, 1, 1);
278 MODULE_VERSION(axe, 1);
279 
280 static const struct usb_ether_methods axe_ue_methods = {
281 	.ue_attach_post = axe_attach_post,
282 	.ue_attach_post_sub = axe_attach_post_sub,
283 	.ue_start = axe_start,
284 	.ue_init = axe_init,
285 	.ue_stop = axe_stop,
286 	.ue_tick = axe_tick,
287 	.ue_setmulti = axe_setmulti,
288 	.ue_setpromisc = axe_setpromisc,
289 	.ue_mii_upd = axe_ifmedia_upd,
290 	.ue_mii_sts = axe_ifmedia_sts,
291 };
292 
293 static int
294 axe_cmd(struct axe_softc *sc, int cmd, int index, int val, void *buf)
295 {
296 	struct usb_device_request req;
297 	usb_error_t err;
298 
299 	AXE_LOCK_ASSERT(sc);
300 
301 	req.bmRequestType = (AXE_CMD_IS_WRITE(cmd) ?
302 	    UT_WRITE_VENDOR_DEVICE :
303 	    UT_READ_VENDOR_DEVICE);
304 	req.bRequest = AXE_CMD_CMD(cmd);
305 	USETW(req.wValue, val);
306 	USETW(req.wIndex, index);
307 	USETW(req.wLength, AXE_CMD_LEN(cmd));
308 
309 	err = uether_do_request(&sc->sc_ue, &req, buf, 1000);
310 
311 	return (err);
312 }
313 
314 static int
315 axe_miibus_readreg(device_t dev, int phy, int reg)
316 {
317 	struct axe_softc *sc = device_get_softc(dev);
318 	uint16_t val;
319 	int locked;
320 
321 	locked = lockowned(&sc->sc_lock);
322 
323 	if(phy != sc->sc_phyno){
324 		return(0);
325 	}
326 
327 	if (!locked)
328 		AXE_LOCK(sc);
329 
330 	axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL);
331 	axe_cmd(sc, AXE_CMD_MII_READ_REG, reg, phy, &val);
332 	axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL);
333 	DPRINTFN(9,"reg     %x\n", reg);
334 	DPRINTFN(9,"pre val %x\n", val);
335 	val = le16toh(val);
336 	DPRINTFN(9,"pos val %x\n", val);
337 
338 	if (AXE_IS_772(sc) && reg == MII_BMSR) {
339 		/*
340 		 * BMSR of AX88772 indicates that it supports extended
341 		 * capability but the extended status register is
342 		 * revered for embedded ethernet PHY. So clear the
343 		 * extended capability bit of BMSR.
344 		 */
345 		val &= ~BMSR_EXTCAP;
346 	}
347 
348 	if (!locked)
349 		AXE_UNLOCK(sc);
350 	return (val);
351 }
352 
353 static int
354 axe_miibus_writereg(device_t dev, int phy, int reg, int val)
355 {
356 	struct axe_softc *sc = device_get_softc(dev);
357 	int locked;
358 
359 	val = htole32(val);
360 	locked = lockowned(&sc->sc_lock);
361 	if (!locked)
362 		AXE_LOCK(sc);
363 
364 	axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL);
365 	axe_cmd(sc, AXE_CMD_MII_WRITE_REG, reg, phy, &val);
366 	axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL);
367 
368 	if (!locked)
369 		AXE_UNLOCK(sc);
370 	return (0);
371 }
372 
373 static void
374 axe_miibus_statchg(device_t dev)
375 {
376 	struct axe_softc *sc = device_get_softc(dev);
377 	struct mii_data *mii = GET_MII(sc);
378 	struct ifnet *ifp;
379 	uint16_t val;
380 	int err, locked;
381 
382 	locked = lockowned(&sc->sc_lock);
383 	if (!locked)
384 		AXE_LOCK(sc);
385 
386 	ifp = uether_getifp(&sc->sc_ue);
387 	if (mii == NULL || ifp == NULL ||
388 	    (ifp->if_flags & IFF_RUNNING) == 0)
389 		goto done;
390 
391 	sc->sc_flags &= ~AXE_FLAG_LINK;
392 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
393 	    (IFM_ACTIVE | IFM_AVALID)) {
394 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
395 		case IFM_10_T:
396 		case IFM_100_TX:
397 			sc->sc_flags |= AXE_FLAG_LINK;
398 			break;
399 		case IFM_1000_T:
400 			if ((sc->sc_flags & AXE_FLAG_178) == 0)
401 				break;
402 			sc->sc_flags |= AXE_FLAG_LINK;
403 			DPRINTFN(11, "miibus_statchg: link should be up\n");
404 			break;
405 		default:
406 			break;
407 		}
408 	} else {
409 		DPRINTFN(11, "miibus_statchg: not active or not valid: %x\n", mii->mii_media_status);
410 	}
411 
412 	/* Lost link, do nothing. */
413 	if ((sc->sc_flags & AXE_FLAG_LINK) == 0) {
414 		goto done;
415 	}
416 
417 	val = 0;
418 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
419 		val |= AXE_MEDIA_FULL_DUPLEX;
420 		if (AXE_IS_178_FAMILY(sc)) {
421 			if ((IFM_OPTIONS(mii->mii_media_active) &
422 			    IFM_ETH_TXPAUSE) != 0)
423 				val |= AXE_178_MEDIA_TXFLOW_CONTROL_EN;
424 			if ((IFM_OPTIONS(mii->mii_media_active) &
425 			    IFM_ETH_RXPAUSE) != 0)
426 				val |= AXE_178_MEDIA_RXFLOW_CONTROL_EN;
427 		}
428 	}
429 	if (AXE_IS_178_FAMILY(sc)) {
430 		val |= AXE_178_MEDIA_RX_EN | AXE_178_MEDIA_MAGIC;
431 		if ((sc->sc_flags & AXE_FLAG_178) != 0)
432 			val |= AXE_178_MEDIA_ENCK;
433 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
434 		case IFM_1000_T:
435 			val |= AXE_178_MEDIA_GMII | AXE_178_MEDIA_ENCK;
436 			break;
437 		case IFM_100_TX:
438 			val |= AXE_178_MEDIA_100TX;
439 			break;
440 		case IFM_10_T:
441 			/* doesn't need to be handled */
442 			break;
443 		}
444 	}
445 	err = axe_cmd(sc, AXE_CMD_WRITE_MEDIA, 0, val, NULL);
446 	if (err)
447 		device_printf(dev, "media change failed, error %d\n", err);
448 done:
449 	if (!locked)
450 		AXE_UNLOCK(sc);
451 }
452 
453 /*
454  * Set media options.
455  */
456 static int
457 axe_ifmedia_upd(struct ifnet *ifp)
458 {
459 	struct axe_softc *sc = ifp->if_softc;
460 	struct mii_data *mii = GET_MII(sc);
461 	struct mii_softc *miisc;
462 	int error;
463 
464 	AXE_LOCK_ASSERT(sc);
465 
466 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
467 		mii_phy_reset(miisc);
468 	error = mii_mediachg(mii);
469 	return (error);
470 }
471 
472 /*
473  * Report current media status.
474  */
475 static void
476 axe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
477 {
478 	struct axe_softc *sc = ifp->if_softc;
479 	struct mii_data *mii = GET_MII(sc);
480 
481 	AXE_LOCK(sc);
482 	mii_pollstat(mii);
483 	ifmr->ifm_active = mii->mii_media_active;
484 	ifmr->ifm_status = mii->mii_media_status;
485 	AXE_UNLOCK(sc);
486 }
487 
488 static void
489 axe_setmulti(struct usb_ether *ue)
490 {
491 	struct axe_softc *sc = uether_getsc(ue);
492 	struct ifnet *ifp = uether_getifp(ue);
493 	struct ifmultiaddr *ifma;
494 	uint32_t h = 0;
495 	uint16_t rxmode;
496 	uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
497 
498 	AXE_LOCK_ASSERT(sc);
499 
500 	axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, &rxmode);
501 	rxmode = le16toh(rxmode);
502 
503 	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
504 		rxmode |= AXE_RXCMD_ALLMULTI;
505 		axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
506 		return;
507 	}
508 	rxmode &= ~AXE_RXCMD_ALLMULTI;
509 
510 	/* if_maddr_rlock(ifp); */
511 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
512 	{
513 		if (ifma->ifma_addr->sa_family != AF_LINK)
514 			continue;
515 		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
516 		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
517 		hashtbl[h / 8] |= 1 << (h % 8);
518 	}
519 /*	if_maddr_runlock(ifp); */
520 
521 	axe_cmd(sc, AXE_CMD_WRITE_MCAST, 0, 0, (void *)&hashtbl);
522 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
523 }
524 
525 static int
526 axe_get_phyno(struct axe_softc *sc, int sel)
527 {
528 	int phyno;
529 
530 	switch (AXE_PHY_TYPE(sc->sc_phyaddrs[sel])) {
531 	case PHY_TYPE_100_HOME:
532 	case PHY_TYPE_GIG:
533 		phyno = AXE_PHY_NO(sc->sc_phyaddrs[sel]);
534 		break;
535 	case PHY_TYPE_SPECIAL:
536 		/* FALLTHROUGH */
537 	case PHY_TYPE_RSVD:
538 		/* FALLTHROUGH */
539 	case PHY_TYPE_NON_SUP:
540 		/* FALLTHROUGH */
541 	default:
542 		phyno = -1;
543 		break;
544 	}
545 
546 	return (phyno);
547 }
548 
549 #define	AXE_GPIO_WRITE(x, y)	do {				\
550 	axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, (x), NULL);		\
551 	uether_pause(ue, (y));					\
552 } while (0)
553 
554 static void
555 axe_ax88178_init(struct axe_softc *sc)
556 {
557 	struct usb_ether *ue;
558 	int gpio0, ledmode, phymode;
559 	uint16_t eeprom, val;
560 
561 	ue = &sc->sc_ue;
562 	axe_cmd(sc, AXE_CMD_SROM_WR_ENABLE, 0, 0, NULL);
563 	/* XXX magic */
564 	axe_cmd(sc, AXE_CMD_SROM_READ, 0, 0x0017, &eeprom);
565 	eeprom = le16toh(eeprom);
566 	axe_cmd(sc, AXE_CMD_SROM_WR_DISABLE, 0, 0, NULL);
567 
568 	/* if EEPROM is invalid we have to use to GPIO0 */
569 	if (eeprom == 0xffff) {
570 		phymode = AXE_PHY_MODE_MARVELL;
571 		gpio0 = 1;
572 		ledmode = 0;
573 	} else {
574 		phymode = eeprom & 0x7f;
575 		gpio0 = (eeprom & 0x80) ? 0 : 1;
576 		ledmode = eeprom >> 8;
577 	}
578 
579 	if (bootverbose)
580 		device_printf(sc->sc_ue.ue_dev,
581 		    "EEPROM data : 0x%04x, phymode : 0x%02x\n", eeprom,
582 		    phymode);
583 	/* Program GPIOs depending on PHY hardware. */
584 	switch (phymode) {
585 	case AXE_PHY_MODE_MARVELL:
586 		if (gpio0 == 1) {
587 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0_EN,
588 			    hz / 32);
589 			AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN,
590 			    hz / 32);
591 			AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2_EN, hz / 4);
592 			AXE_GPIO_WRITE(AXE_GPIO0_EN | AXE_GPIO2 | AXE_GPIO2_EN,
593 			    hz / 32);
594 		} else {
595 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
596 			    AXE_GPIO1_EN, hz / 3);
597 			if (ledmode == 1) {
598 				AXE_GPIO_WRITE(AXE_GPIO1_EN, hz / 3);
599 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN,
600 				    hz / 3);
601 			} else {
602 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
603 				    AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
604 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
605 				    AXE_GPIO2_EN, hz / 4);
606 				AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN |
607 				    AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
608 			}
609 		}
610 		break;
611 	case AXE_PHY_MODE_CICADA:
612 	case AXE_PHY_MODE_CICADA_V2:
613 	case AXE_PHY_MODE_CICADA_V2_ASIX:
614 		if (gpio0 == 1)
615 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO0 |
616 			    AXE_GPIO0_EN, hz / 32);
617 		else
618 			AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
619 			    AXE_GPIO1_EN, hz / 32);
620 		break;
621 	case AXE_PHY_MODE_AGERE:
622 		AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM | AXE_GPIO1 |
623 		    AXE_GPIO1_EN, hz / 32);
624 		AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 |
625 		    AXE_GPIO2_EN, hz / 32);
626 		AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2_EN, hz / 4);
627 		AXE_GPIO_WRITE(AXE_GPIO1 | AXE_GPIO1_EN | AXE_GPIO2 |
628 		    AXE_GPIO2_EN, hz / 32);
629 		break;
630 	case AXE_PHY_MODE_REALTEK_8211CL:
631 	case AXE_PHY_MODE_REALTEK_8211BN:
632 	case AXE_PHY_MODE_REALTEK_8251CL:
633 		val = gpio0 == 1 ? AXE_GPIO0 | AXE_GPIO0_EN :
634 		    AXE_GPIO1 | AXE_GPIO1_EN;
635 		AXE_GPIO_WRITE(val, hz / 32);
636 		AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
637 		AXE_GPIO_WRITE(val | AXE_GPIO2_EN, hz / 4);
638 		AXE_GPIO_WRITE(val | AXE_GPIO2 | AXE_GPIO2_EN, hz / 32);
639 		if (phymode == AXE_PHY_MODE_REALTEK_8211CL) {
640 			axe_miibus_writereg(ue->ue_dev, sc->sc_phyno,
641 			    0x1F, 0x0005);
642 			axe_miibus_writereg(ue->ue_dev, sc->sc_phyno,
643 			    0x0C, 0x0000);
644 			val = axe_miibus_readreg(ue->ue_dev, sc->sc_phyno,
645 			    0x0001);
646 			axe_miibus_writereg(ue->ue_dev, sc->sc_phyno,
647 			    0x01, val | 0x0080);
648 			axe_miibus_writereg(ue->ue_dev, sc->sc_phyno,
649 			    0x1F, 0x0000);
650 		}
651 		break;
652 	default:
653 		/* Unknown PHY model or no need to program GPIOs. */
654 		break;
655 	}
656 
657 	/* soft reset */
658 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL);
659 	uether_pause(ue, hz / 4);
660 
661 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
662 	    AXE_SW_RESET_PRL | AXE_178_RESET_MAGIC, NULL);
663 	uether_pause(ue, hz / 4);
664 	/* Enable MII/GMII/RGMII interface to work with external PHY. */
665 	axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0, NULL);
666 	uether_pause(ue, hz / 4);
667 
668 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
669 }
670 
671 static void
672 axe_ax88772_init(struct axe_softc *sc)
673 {
674 	axe_cmd(sc, AXE_CMD_WRITE_GPIO, 0, 0x00b0, NULL);
675 	uether_pause(&sc->sc_ue, hz / 16);
676 
677 	if (sc->sc_phyno == AXE_772_PHY_NO_EPHY) {
678 		/* ask for the embedded PHY */
679 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0x01, NULL);
680 		uether_pause(&sc->sc_ue, hz / 64);
681 
682 		/* power down and reset state, pin reset state */
683 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
684 		    AXE_SW_RESET_CLEAR, NULL);
685 		uether_pause(&sc->sc_ue, hz / 16);
686 
687 		/* power down/reset state, pin operating state */
688 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
689 		    AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL);
690 		uether_pause(&sc->sc_ue, hz / 4);
691 
692 		/* power up, reset */
693 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_PRL, NULL);
694 
695 		/* power up, operating */
696 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
697 		    AXE_SW_RESET_IPRL | AXE_SW_RESET_PRL, NULL);
698 	} else {
699 		/* ask for external PHY */
700 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, 0x00, NULL);
701 		uether_pause(&sc->sc_ue, hz / 64);
702 
703 		/* power down internal PHY */
704 		axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0,
705 		    AXE_SW_RESET_IPPD | AXE_SW_RESET_PRL, NULL);
706 	}
707 
708 	uether_pause(&sc->sc_ue, hz / 4);
709 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
710 }
711 
712 static void
713 axe_ax88772_phywake(struct axe_softc *sc)
714 {
715 	struct usb_ether *ue;
716 
717 	ue = &sc->sc_ue;
718 	if (sc->sc_phyno == AXE_772_PHY_NO_EPHY) {
719 		/* Manually select internal(embedded) PHY - MAC mode. */
720 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_SS_ENB |
721 		    AXE_SW_PHY_SELECT_EMBEDDED | AXE_SW_PHY_SELECT_SS_MII,
722 		    NULL);
723 		uether_pause(&sc->sc_ue, hz / 32);
724 	} else {
725 		/*
726 		 * Manually select external PHY - MAC mode.
727 		 * Reverse MII/RMII is for AX88772A PHY mode.
728 		 */
729 		axe_cmd(sc, AXE_CMD_SW_PHY_SELECT, 0, AXE_SW_PHY_SELECT_SS_ENB |
730 		    AXE_SW_PHY_SELECT_EXT | AXE_SW_PHY_SELECT_SS_MII, NULL);
731 		uether_pause(&sc->sc_ue, hz / 32);
732 	}
733 	/* Take PHY out of power down. */
734 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPPD |
735 	    AXE_SW_RESET_IPRL, NULL);
736 	uether_pause(&sc->sc_ue, hz / 4);
737 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL);
738 	uether_pause(&sc->sc_ue, hz);
739 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_CLEAR, NULL);
740 	uether_pause(&sc->sc_ue, hz / 32);
741 	axe_cmd(sc, AXE_CMD_SW_RESET_REG, 0, AXE_SW_RESET_IPRL, NULL);
742 	uether_pause(&sc->sc_ue, hz / 32);
743 }
744 
745 static void
746 axe_ax88772a_init(struct axe_softc *sc)
747 {
748 	struct usb_ether *ue;
749 
750 	ue = &sc->sc_ue;
751 	/* Reload EEPROM. */
752 	AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM, hz / 32);
753 	axe_ax88772_phywake(sc);
754 	/* Stop MAC. */
755 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
756 }
757 
758 static void
759 axe_ax88772b_init(struct axe_softc *sc)
760 {
761 	struct usb_ether *ue;
762 	uint16_t eeprom;
763 	uint8_t *eaddr;
764 	int i;
765 
766 	ue = &sc->sc_ue;
767 	/* Reload EEPROM. */
768 	AXE_GPIO_WRITE(AXE_GPIO_RELOAD_EEPROM, hz / 32);
769 	/*
770 	 * Save PHY power saving configuration(high byte) and
771 	 * clear EEPROM checksum value(low byte).
772 	 */
773 	axe_cmd(sc, AXE_CMD_SROM_READ, 0, AXE_EEPROM_772B_PHY_PWRCFG, &eeprom);
774 	sc->sc_pwrcfg = le16toh(eeprom) & 0xFF00;
775 
776 	/*
777 	 * Auto-loaded default station address from internal ROM is
778 	 * 00:00:00:00:00:00 such that an explicit access to EEPROM
779 	 * is required to get real station address.
780 	 */
781 	eaddr = ue->ue_eaddr;
782 	for (i = 0; i < ETHER_ADDR_LEN / 2; i++) {
783 		axe_cmd(sc, AXE_CMD_SROM_READ, 0, AXE_EEPROM_772B_NODE_ID + i,
784 		    &eeprom);
785 		eeprom = le16toh(eeprom);
786 		*eaddr++ = (uint8_t)(eeprom & 0xFF);
787 		*eaddr++ = (uint8_t)((eeprom >> 8) & 0xFF);
788 	}
789 	/* Wakeup PHY. */
790 	axe_ax88772_phywake(sc);
791 	/* Stop MAC. */
792 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, 0, NULL);
793 }
794 
795 #undef	AXE_GPIO_WRITE
796 
797 static void
798 axe_reset(struct axe_softc *sc)
799 {
800 	struct usb_config_descriptor *cd;
801 	usb_error_t err;
802 
803 	cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev);
804 
805 	err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_lock,
806 	    cd->bConfigurationValue);
807 	if (err)
808 		DPRINTF("reset failed (ignored)\n");
809 
810 	/* Wait a little while for the chip to get its brains in order. */
811 	uether_pause(&sc->sc_ue, hz / 100);
812 
813 	/* Reinitialize controller to achieve full reset. */
814 	if (sc->sc_flags & AXE_FLAG_178)
815 		axe_ax88178_init(sc);
816 	else if (sc->sc_flags & AXE_FLAG_772)
817 		axe_ax88772_init(sc);
818 	else if (sc->sc_flags & AXE_FLAG_772A)
819 		axe_ax88772a_init(sc);
820 	else if (sc->sc_flags & AXE_FLAG_772B)
821 		axe_ax88772b_init(sc);
822 }
823 
824 static void
825 axe_attach_post(struct usb_ether *ue)
826 {
827 	struct axe_softc *sc = uether_getsc(ue);
828 
829 	/*
830 	 * Load PHY indexes first. Needed by axe_xxx_init().
831 	 */
832 	axe_cmd(sc, AXE_CMD_READ_PHYID, 0, 0, sc->sc_phyaddrs);
833 	if (bootverbose)
834 		device_printf(sc->sc_ue.ue_dev, "PHYADDR 0x%02x:0x%02x\n",
835 		    sc->sc_phyaddrs[0], sc->sc_phyaddrs[1]);
836 	sc->sc_phyno = axe_get_phyno(sc, AXE_PHY_SEL_PRI);
837 	if (sc->sc_phyno == -1)
838 		sc->sc_phyno = axe_get_phyno(sc, AXE_PHY_SEL_SEC);
839 	if (sc->sc_phyno == -1) {
840 		device_printf(sc->sc_ue.ue_dev,
841 		    "no valid PHY address found, assuming PHY address 0\n");
842 		sc->sc_phyno = 0;
843 	}
844 
845 	/* Initialize controller and get station address. */
846 	if (sc->sc_flags & AXE_FLAG_178) {
847 		axe_ax88178_init(sc);
848 		sc->sc_tx_bufsz = 16 * 1024;
849 		axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr);
850 	} else if (sc->sc_flags & AXE_FLAG_772) {
851 		axe_ax88772_init(sc);
852 		sc->sc_tx_bufsz = 8 * 1024;
853 		axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr);
854 	} else if (sc->sc_flags & AXE_FLAG_772A) {
855 		axe_ax88772a_init(sc);
856 		sc->sc_tx_bufsz = 8 * 1024;
857 		axe_cmd(sc, AXE_178_CMD_READ_NODEID, 0, 0, ue->ue_eaddr);
858 	} else if (sc->sc_flags & AXE_FLAG_772B) {
859 		axe_ax88772b_init(sc);
860 		sc->sc_tx_bufsz = 8 * 1024;
861 	} else
862 		axe_cmd(sc, AXE_172_CMD_READ_NODEID, 0, 0, ue->ue_eaddr);
863 
864 	/*
865 	 * Fetch IPG values.
866 	 */
867 	if (sc->sc_flags & (AXE_FLAG_772A | AXE_FLAG_772B)) {
868 		/* Set IPG values. */
869 		sc->sc_ipgs[0] = 0x15;
870 		sc->sc_ipgs[1] = 0x16;
871 		sc->sc_ipgs[2] = 0x1A;
872 	} else
873 		axe_cmd(sc, AXE_CMD_READ_IPG012, 0, 0, sc->sc_ipgs);
874 }
875 
876 static int
877 axe_attach_post_sub(struct usb_ether *ue)
878 {
879 	struct axe_softc *sc;
880 	struct ifnet *ifp;
881 	u_int adv_pause;
882 	int error;
883 
884 	sc = uether_getsc(ue);
885 	ifp = ue->ue_ifp;
886 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
887 	ifp->if_start = uether_start;
888 	ifp->if_ioctl = axe_ioctl;
889 	ifp->if_init = uether_init;
890 	ifq_set_maxlen(&ifp->if_snd, ifqmaxlen);
891 	/* XXX
892 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
893 	*/
894 	ifq_set_ready(&ifp->if_snd);
895 
896 	if (AXE_IS_178_FAMILY(sc))
897 		ifp->if_capabilities |= IFCAP_VLAN_MTU;
898 	if (sc->sc_flags & AXE_FLAG_772B) {
899 		ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_RXCSUM;
900 		ifp->if_hwassist = AXE_CSUM_FEATURES;
901 		/*
902 		 * Checksum offloading of AX88772B also works with VLAN
903 		 * tagged frames but there is no way to take advantage
904 		 * of the feature because vlan(4) assumes
905 		 * IFCAP_VLAN_HWTAGGING is prerequisite condition to
906 		 * support checksum offloading with VLAN. VLAN hardware
907 		 * tagging support of AX88772B is very limited so it's
908 		 * not possible to announce IFCAP_VLAN_HWTAGGING.
909 		 */
910 	}
911 	ifp->if_capenable = ifp->if_capabilities;
912 	if (sc->sc_flags & (AXE_FLAG_772A | AXE_FLAG_772B | AXE_FLAG_178))
913 		adv_pause = MIIF_DOPAUSE;
914 	else
915 		adv_pause = 0;
916 
917 	error = mii_phy_probe(ue->ue_dev, &ue->ue_miibus,
918 		uether_ifmedia_upd, ue->ue_methods->ue_mii_sts);
919 	/* XXX
920 	error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp,
921 	    uether_ifmedia_upd, ue->ue_methods->ue_mii_sts,
922 	    BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, adv_pause);
923 	*/
924 	return (error);
925 }
926 
927 /*
928  * Probe for a AX88172 chip.
929  */
930 static int
931 axe_probe(device_t dev)
932 {
933 	struct usb_attach_arg *uaa = device_get_ivars(dev);
934 
935 	if (uaa->usb_mode != USB_MODE_HOST)
936 		return (ENXIO);
937 	if (uaa->info.bConfigIndex != AXE_CONFIG_IDX)
938 		return (ENXIO);
939 	if (uaa->info.bIfaceIndex != AXE_IFACE_IDX)
940 		return (ENXIO);
941 
942 	return (usbd_lookup_id_by_uaa(axe_devs, sizeof(axe_devs), uaa));
943 }
944 
945 /*
946  * Attach the interface. Allocate softc structures, do ifmedia
947  * setup and ethernet/BPF attach.
948  */
949 static int
950 axe_attach(device_t dev)
951 {
952 	struct usb_attach_arg *uaa = device_get_ivars(dev);
953 	struct axe_softc *sc = device_get_softc(dev);
954 	struct usb_ether *ue = &sc->sc_ue;
955 	uint8_t iface_index;
956 	int error;
957 
958 	sc->sc_flags = USB_GET_DRIVER_INFO(uaa);
959 
960 	device_set_usb_desc(dev);
961 
962 	lockinit(&sc->sc_lock, device_get_nameunit(dev), 0, 0);
963 
964 	iface_index = AXE_IFACE_IDX;
965 	error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
966 	    axe_config, AXE_N_TRANSFER, sc, &sc->sc_lock);
967 	if (error) {
968 		device_printf(dev, "allocating USB transfers failed\n");
969 		goto detach;
970 	}
971 
972 	ue->ue_sc = sc;
973 	ue->ue_dev = dev;
974 	ue->ue_udev = uaa->device;
975 	ue->ue_lock = &sc->sc_lock;
976 	ue->ue_methods = &axe_ue_methods;
977 
978 	error = uether_ifattach(ue);
979 	if (error) {
980 		device_printf(dev, "could not attach interface\n");
981 		goto detach;
982 	}
983 	return (0);			/* success */
984 
985 detach:
986 	axe_detach(dev);
987 	return (ENXIO);			/* failure */
988 }
989 
990 static int
991 axe_detach(device_t dev)
992 {
993 	struct axe_softc *sc = device_get_softc(dev);
994 	struct usb_ether *ue = &sc->sc_ue;
995 
996 	usbd_transfer_unsetup(sc->sc_xfer, AXE_N_TRANSFER);
997 	uether_ifdetach(ue);
998 	lockuninit(&sc->sc_lock);
999 
1000 
1001 	return (0);
1002 }
1003 
1004 #if (AXE_BULK_BUF_SIZE >= 0x10000)
1005 #error "Please update axe_bulk_read_callback()!"
1006 #endif
1007 
1008 static void
1009 axe_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
1010 {
1011 	struct axe_softc *sc = usbd_xfer_softc(xfer);
1012 	struct usb_ether *ue = &sc->sc_ue;
1013 	struct usb_page_cache *pc;
1014 	int actlen;
1015 
1016 	usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
1017 
1018 	switch (USB_GET_STATE(xfer)) {
1019 	case USB_ST_TRANSFERRED:
1020 		pc = usbd_xfer_get_frame(xfer, 0);
1021 		axe_rx_frame(ue, pc, actlen);
1022 
1023 		/* FALLTHROUGH */
1024 	case USB_ST_SETUP:
1025 tr_setup:
1026 		usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
1027 		usbd_transfer_submit(xfer);
1028 		uether_rxflush(ue);
1029 		return;
1030 
1031 	default:			/* Error */
1032 		DPRINTF("bulk read error, %s\n", usbd_errstr(error));
1033 
1034 		if (error != USB_ERR_CANCELLED) {
1035 			/* try to clear stall first */
1036 			usbd_xfer_set_stall(xfer);
1037 			goto tr_setup;
1038 		}
1039 		return;
1040 
1041 	}
1042 }
1043 
1044 static int
1045 axe_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen)
1046 {
1047 	struct axe_softc *sc;
1048 	struct axe_sframe_hdr hdr;
1049 	struct axe_csum_hdr csum_hdr;
1050 	int error, len, pos;
1051 
1052 	sc = uether_getsc(ue);
1053 	pos = 0;
1054 	len = 0;
1055 	error = 0;
1056 	if ((sc->sc_flags & AXE_FLAG_STD_FRAME) != 0) {
1057 		while (pos < actlen) {
1058 			if ((pos + sizeof(hdr)) > actlen) {
1059 				/* too little data */
1060 				error = EINVAL;
1061 				break;
1062 			}
1063 			usbd_copy_out(pc, pos, &hdr, sizeof(hdr));
1064 
1065 			if ((hdr.len ^ hdr.ilen) != sc->sc_lenmask) {
1066 				/* we lost sync */
1067 				error = EINVAL;
1068 				break;
1069 			}
1070 			pos += sizeof(hdr);
1071 			len = le16toh(hdr.len);
1072 			if (pos + len > actlen) {
1073 				/* invalid length */
1074 				error = EINVAL;
1075 				break;
1076 			}
1077 			axe_rxeof(ue, pc, pos, len, NULL);
1078 			pos += len + (len % 2);
1079 		}
1080 	} else if ((sc->sc_flags & AXE_FLAG_CSUM_FRAME) != 0) {
1081 		while (pos < actlen) {
1082 			if ((pos + sizeof(csum_hdr)) > actlen) {
1083 				/* too little data */
1084 				error = EINVAL;
1085 				break;
1086 			}
1087 			usbd_copy_out(pc, pos, &csum_hdr, sizeof(csum_hdr));
1088 
1089 			csum_hdr.len = le16toh(csum_hdr.len);
1090 			csum_hdr.ilen = le16toh(csum_hdr.ilen);
1091 			csum_hdr.cstatus = le16toh(csum_hdr.cstatus);
1092 			if ((AXE_CSUM_RXBYTES(csum_hdr.len) ^
1093 			    AXE_CSUM_RXBYTES(csum_hdr.ilen)) !=
1094 			    sc->sc_lenmask) {
1095 				/* we lost sync */
1096 				error = EINVAL;
1097 				break;
1098 			}
1099 			/*
1100 			 * Get total transferred frame length including
1101 			 * checksum header.  The length should be multiple
1102 			 * of 4.
1103 			 */
1104 			len = sizeof(csum_hdr) + AXE_CSUM_RXBYTES(csum_hdr.len);
1105 			len = (len + 3) & ~3;
1106 			if (pos + len > actlen) {
1107 				/* invalid length */
1108 				error = EINVAL;
1109 				break;
1110 			}
1111 			axe_rxeof(ue, pc, pos + sizeof(csum_hdr),
1112 			    AXE_CSUM_RXBYTES(csum_hdr.len), &csum_hdr);
1113 			pos += len;
1114 		}
1115 	} else
1116 		axe_rxeof(ue, pc, 0, actlen, NULL);
1117 
1118 	if (error != 0)
1119 		IFNET_STAT_INC(ue->ue_ifp, ierrors, 1);
1120 	return (error);
1121 }
1122 
1123 static int
1124 axe_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset,
1125     unsigned int len, struct axe_csum_hdr *csum_hdr)
1126 {
1127 	struct ifnet *ifp = ue->ue_ifp;
1128 	struct mbuf *m;
1129 
1130 	if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) {
1131 		IFNET_STAT_INC(ifp, ierrors, 1);
1132 		return (EINVAL);
1133 	}
1134 
1135 	m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
1136 	if (m == NULL) {
1137 		IFNET_STAT_INC(ifp, iqdrops, 1);
1138 		return (ENOMEM);
1139 	}
1140 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1141 	m_adj(m, ETHER_ALIGN);
1142 
1143 	usbd_copy_out(pc, offset, mtod(m, uint8_t *), len);
1144 
1145 	IFNET_STAT_INC(ifp, ipackets, 1);
1146 	m->m_pkthdr.rcvif = ifp;
1147 	m->m_pkthdr.len = m->m_len = len;
1148 
1149 	if (csum_hdr != NULL && csum_hdr->cstatus & AXE_CSUM_HDR_L3_TYPE_IPV4) {
1150 		if ((csum_hdr->cstatus & (AXE_CSUM_HDR_L4_CSUM_ERR |
1151 		    AXE_CSUM_HDR_L3_CSUM_ERR)) == 0) {
1152 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
1153 			    CSUM_IP_VALID;
1154 			if ((csum_hdr->cstatus & AXE_CSUM_HDR_L4_TYPE_MASK) ==
1155 			    AXE_CSUM_HDR_L4_TYPE_TCP ||
1156 			    (csum_hdr->cstatus & AXE_CSUM_HDR_L4_TYPE_MASK) ==
1157 			    AXE_CSUM_HDR_L4_TYPE_UDP) {
1158 				m->m_pkthdr.csum_flags |=
1159 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1160 				m->m_pkthdr.csum_data = 0xffff;
1161 			}
1162 		}
1163 	}
1164 
1165 	IF_ENQUEUE(&ue->ue_rxq, m);
1166 	return (0);
1167 }
1168 
1169 #if ((AXE_BULK_BUF_SIZE >= 0x10000) || (AXE_BULK_BUF_SIZE < (MCLBYTES+4)))
1170 #error "Please update axe_bulk_write_callback()!"
1171 #endif
1172 
1173 static void
1174 axe_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
1175 {
1176 	struct axe_softc *sc = usbd_xfer_softc(xfer);
1177 	struct axe_sframe_hdr hdr;
1178 	struct ifnet *ifp = uether_getifp(&sc->sc_ue);
1179 	struct usb_page_cache *pc;
1180 	struct mbuf *m;
1181 	int nframes, pos;
1182 
1183 	DPRINTFN(11, "starting transfer\n");
1184 
1185 	switch (USB_GET_STATE(xfer)) {
1186 	case USB_ST_TRANSFERRED:
1187 		DPRINTFN(11, "transfer complete\n");
1188 
1189 		ifq_clr_oactive(&ifp->if_snd);
1190 
1191 		/* FALLTHROUGH */
1192 	case USB_ST_SETUP:
1193 tr_setup:
1194 		if ((sc->sc_flags & AXE_FLAG_LINK) == 0 ||
1195 		    ifq_is_oactive(&ifp->if_snd)) {
1196 			/*
1197 			 * Don't send anything if there is no link or
1198 			 * controller is busy.
1199 			 */
1200 			DPRINTFN(11, "controller busy:  sc_flags: %x if_flags %x\n",sc->sc_flags, ifp->if_flags);
1201 			return;
1202 		}
1203 
1204 		DPRINTFN(11, "copying frames, 16 at a time\n");
1205 		for (nframes = 0; nframes < 16 &&
1206 		    !ifq_is_empty(&ifp->if_snd); nframes++) {
1207 			m = ifq_dequeue(&ifp->if_snd);
1208 			if (m == NULL)
1209 				break;
1210 			usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES,
1211 			    nframes);
1212 			pos = 0;
1213 			pc = usbd_xfer_get_frame(xfer, nframes);
1214 			if (AXE_IS_178_FAMILY(sc)) {
1215 				hdr.len = htole16(m->m_pkthdr.len);
1216 				hdr.ilen = ~hdr.len;
1217 				/*
1218 				 * If upper stack computed checksum, driver
1219 				 * should tell controller not to insert
1220 				 * computed checksum for checksum offloading
1221 				 * enabled controller.
1222 				 */
1223 				if (ifp->if_capabilities & IFCAP_TXCSUM) {
1224 					if ((m->m_pkthdr.csum_flags &
1225 					    AXE_CSUM_FEATURES) != 0)
1226 						hdr.len |= htole16(
1227 						    AXE_TX_CSUM_PSEUDO_HDR);
1228 					else
1229 						hdr.len |= htole16(
1230 						    AXE_TX_CSUM_DIS);
1231 				}
1232 				DPRINTFN(11, "usbd copy in\n");
1233 				usbd_copy_in(pc, pos, &hdr, sizeof(hdr));
1234 				pos += sizeof(hdr);
1235 				usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len);
1236 				pos += m->m_pkthdr.len;
1237 				if ((pos % 512) == 0) {
1238 					hdr.len = 0;
1239 					hdr.ilen = 0xffff;
1240 					usbd_copy_in(pc, pos, &hdr,
1241 					    sizeof(hdr));
1242 					pos += sizeof(hdr);
1243 				}
1244 			} else {
1245 				usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len);
1246 				pos += m->m_pkthdr.len;
1247 			}
1248 
1249 			/*
1250 			 * XXX
1251 			 * Update TX packet counter here. This is not
1252 			 * correct way but it seems that there is no way
1253 			 * to know how many packets are sent at the end
1254 			 * of transfer because controller combines
1255 			 * multiple writes into single one if there is
1256 			 * room in TX buffer of controller.
1257 			 */
1258 			IFNET_STAT_INC(ifp, opackets, 1);
1259 
1260 			/*
1261 			 * if there's a BPF listener, bounce a copy
1262 			 * of this frame to him:
1263 			 */
1264 			BPF_MTAP(ifp, m);
1265 
1266 			m_freem(m);
1267 
1268 			/* Set frame length. */
1269 			usbd_xfer_set_frame_len(xfer, nframes, pos);
1270 		}
1271 		if (nframes != 0) {
1272 			usbd_xfer_set_frames(xfer, nframes);
1273 			DPRINTFN(5, "submitting transfer\n");
1274 			usbd_transfer_submit(xfer);
1275 			ifq_set_oactive(&ifp->if_snd);
1276 		}
1277 		return;
1278 		/* NOTREACHED */
1279 	default:			/* Error */
1280 		DPRINTFN(11, "transfer error, %s\n",
1281 		    usbd_errstr(error));
1282 
1283 		IFNET_STAT_INC(ifp, oerrors, 1);
1284 		ifq_clr_oactive(&ifp->if_snd);
1285 		if (error != USB_ERR_CANCELLED) {
1286 			/* try to clear stall first */
1287 			usbd_xfer_set_stall(xfer);
1288 			goto tr_setup;
1289 		}
1290 		return;
1291 
1292 	}
1293 }
1294 
1295 static void
1296 axe_tick(struct usb_ether *ue)
1297 {
1298 	struct axe_softc *sc = uether_getsc(ue);
1299 	struct mii_data *mii = GET_MII(sc);
1300 
1301 	AXE_LOCK_ASSERT(sc);
1302 
1303 	mii_tick(mii);
1304 	if ((sc->sc_flags & AXE_FLAG_LINK) == 0) {
1305 		axe_miibus_statchg(ue->ue_dev);
1306 		if ((sc->sc_flags & AXE_FLAG_LINK) != 0)
1307 			axe_start(ue);
1308 	}
1309 }
1310 
1311 static void
1312 axe_start(struct usb_ether *ue)
1313 {
1314 	struct axe_softc *sc = uether_getsc(ue);
1315 
1316 	/*
1317 	 * start the USB transfers, if not already started:
1318 	 */
1319 	usbd_transfer_start(sc->sc_xfer[AXE_BULK_DT_RD]);
1320 	usbd_transfer_start(sc->sc_xfer[AXE_BULK_DT_WR]);
1321 }
1322 
1323 static void
1324 axe_csum_cfg(struct usb_ether *ue)
1325 {
1326 	struct axe_softc *sc;
1327 	struct ifnet *ifp;
1328 	uint16_t csum1, csum2;
1329 
1330 	sc = uether_getsc(ue);
1331 	AXE_LOCK_ASSERT(sc);
1332 
1333 	if ((sc->sc_flags & AXE_FLAG_772B) != 0) {
1334 		ifp = uether_getifp(ue);
1335 		csum1 = 0;
1336 		csum2 = 0;
1337 		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1338 			csum1 |= AXE_TXCSUM_IP | AXE_TXCSUM_TCP |
1339 			    AXE_TXCSUM_UDP;
1340 		axe_cmd(sc, AXE_772B_CMD_WRITE_TXCSUM, csum2, csum1, NULL);
1341 		csum1 = 0;
1342 		csum2 = 0;
1343 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1344 			csum1 |= AXE_RXCSUM_IP | AXE_RXCSUM_IPVE |
1345 			    AXE_RXCSUM_TCP | AXE_RXCSUM_UDP | AXE_RXCSUM_ICMP |
1346 			    AXE_RXCSUM_IGMP;
1347 		axe_cmd(sc, AXE_772B_CMD_WRITE_RXCSUM, csum2, csum1, NULL);
1348 	}
1349 }
1350 
1351 static void
1352 axe_init(struct usb_ether *ue)
1353 {
1354 	struct axe_softc *sc = uether_getsc(ue);
1355 	struct ifnet *ifp = uether_getifp(ue);
1356 	uint16_t rxmode;
1357 
1358 	AXE_LOCK_ASSERT(sc);
1359 
1360 
1361 	if ((ifp->if_flags & IFF_RUNNING) != 0)
1362 		return;
1363 
1364 	/* Cancel pending I/O */
1365 	axe_stop(ue);
1366 
1367 	axe_reset(sc);
1368 
1369 	/* Set MAC address and transmitter IPG values. */
1370 	if (AXE_IS_178_FAMILY(sc)) {
1371 		axe_cmd(sc, AXE_178_CMD_WRITE_NODEID, 0, 0, IF_LLADDR(ifp));
1372 		axe_cmd(sc, AXE_178_CMD_WRITE_IPG012, sc->sc_ipgs[2],
1373 		    (sc->sc_ipgs[1] << 8) | (sc->sc_ipgs[0]), NULL);
1374 	} else {
1375 		axe_cmd(sc, AXE_172_CMD_WRITE_NODEID, 0, 0, IF_LLADDR(ifp));
1376 		axe_cmd(sc, AXE_172_CMD_WRITE_IPG0, 0, sc->sc_ipgs[0], NULL);
1377 		axe_cmd(sc, AXE_172_CMD_WRITE_IPG1, 0, sc->sc_ipgs[1], NULL);
1378 		axe_cmd(sc, AXE_172_CMD_WRITE_IPG2, 0, sc->sc_ipgs[2], NULL);
1379 	}
1380 
1381 	if (AXE_IS_178_FAMILY(sc)) {
1382 		sc->sc_flags &= ~(AXE_FLAG_STD_FRAME | AXE_FLAG_CSUM_FRAME);
1383 		if ((sc->sc_flags & AXE_FLAG_772B) != 0)
1384 			sc->sc_lenmask = AXE_CSUM_HDR_LEN_MASK;
1385 		else
1386 			sc->sc_lenmask = AXE_HDR_LEN_MASK;
1387 		if ((sc->sc_flags & AXE_FLAG_772B) != 0 &&
1388 		    (ifp->if_capenable & IFCAP_RXCSUM) != 0)
1389 			sc->sc_flags |= AXE_FLAG_CSUM_FRAME;
1390 		else
1391 			sc->sc_flags |= AXE_FLAG_STD_FRAME;
1392 	}
1393 
1394 	/* Configure TX/RX checksum offloading. */
1395 	axe_csum_cfg(ue);
1396 
1397 	if (sc->sc_flags & AXE_FLAG_772B) {
1398 		/* AX88772B uses different maximum frame burst configuration. */
1399 		axe_cmd(sc, AXE_772B_CMD_RXCTL_WRITE_CFG,
1400 		    ax88772b_mfb_table[AX88772B_MFB_16K].threshold,
1401 		    ax88772b_mfb_table[AX88772B_MFB_16K].byte_cnt, NULL);
1402 	}
1403 
1404 	/* Enable receiver, set RX mode. */
1405 	rxmode = (AXE_RXCMD_MULTICAST | AXE_RXCMD_ENABLE);
1406 	if (AXE_IS_178_FAMILY(sc)) {
1407 		if (sc->sc_flags & AXE_FLAG_772B) {
1408 			/*
1409 			 * Select RX header format type 1.  Aligning IP
1410 			 * header on 4 byte boundary is not needed when
1411 			 * checksum offloading feature is not used
1412 			 * because we always copy the received frame in
1413 			 * RX handler.  When RX checksum offloading is
1414 			 * active, aligning IP header is required to
1415 			 * reflect actual frame length including RX
1416 			 * header size.
1417 			 */
1418 			rxmode |= AXE_772B_RXCMD_HDR_TYPE_1;
1419 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1420 				rxmode |= AXE_772B_RXCMD_IPHDR_ALIGN;
1421 		} else {
1422 			/*
1423 			 * Default Rx buffer size is too small to get
1424 			 * maximum performance.
1425 			 */
1426 			rxmode |= AXE_178_RXCMD_MFB_16384;
1427 		}
1428 	} else {
1429 		rxmode |= AXE_172_RXCMD_UNICAST;
1430 	}
1431 
1432 	/* If we want promiscuous mode, set the allframes bit. */
1433 	if (ifp->if_flags & IFF_PROMISC)
1434 		rxmode |= AXE_RXCMD_PROMISC;
1435 
1436 	if (ifp->if_flags & IFF_BROADCAST)
1437 		rxmode |= AXE_RXCMD_BROADCAST;
1438 
1439 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
1440 
1441 	/* Load the multicast filter. */
1442 	axe_setmulti(ue);
1443 
1444 	usbd_xfer_set_stall(sc->sc_xfer[AXE_BULK_DT_WR]);
1445 
1446 
1447 	ifp->if_flags |= IFF_RUNNING;
1448 
1449 	/* Switch to selected media. */
1450 	axe_ifmedia_upd(ifp);
1451 }
1452 
1453 static void
1454 axe_setpromisc(struct usb_ether *ue)
1455 {
1456 	struct axe_softc *sc = uether_getsc(ue);
1457 	struct ifnet *ifp = uether_getifp(ue);
1458 	uint16_t rxmode;
1459 
1460 	axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, &rxmode);
1461 
1462 	rxmode = le16toh(rxmode);
1463 
1464 	if (ifp->if_flags & IFF_PROMISC) {
1465 		rxmode |= AXE_RXCMD_PROMISC;
1466 	} else {
1467 		rxmode &= ~AXE_RXCMD_PROMISC;
1468 	}
1469 
1470 	axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL);
1471 
1472 	axe_setmulti(ue);
1473 }
1474 
1475 static void
1476 axe_stop(struct usb_ether *ue)
1477 {
1478 	struct axe_softc *sc = uether_getsc(ue);
1479 	struct ifnet *ifp = uether_getifp(ue);
1480 
1481 	AXE_LOCK_ASSERT(sc);
1482 
1483 
1484 	ifp->if_flags &= ~IFF_RUNNING;
1485 	ifq_clr_oactive(&ifp->if_snd);
1486 
1487 	sc->sc_flags &= ~AXE_FLAG_LINK;
1488 
1489 	/*
1490 	 * stop all the transfers, if not already stopped:
1491 	 */
1492 	usbd_transfer_stop(sc->sc_xfer[AXE_BULK_DT_WR]);
1493 	usbd_transfer_stop(sc->sc_xfer[AXE_BULK_DT_RD]);
1494 }
1495 
1496 static int
1497 axe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *uc)
1498 {
1499 	struct usb_ether *ue = ifp->if_softc;
1500 	struct axe_softc *sc;
1501 	struct ifreq *ifr;
1502 	int error, mask, reinit;
1503 
1504 	sc = uether_getsc(ue);
1505 	ifr = (struct ifreq *)data;
1506 	error = 0;
1507 	reinit = 0;
1508 	if (cmd == SIOCSIFCAP) {
1509 		AXE_LOCK(sc);
1510 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1511 		if ((mask & IFCAP_TXCSUM) != 0 &&
1512 		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1513 			ifp->if_capenable ^= IFCAP_TXCSUM;
1514 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1515 				ifp->if_hwassist |= AXE_CSUM_FEATURES;
1516 			else
1517 				ifp->if_hwassist &= ~AXE_CSUM_FEATURES;
1518 			reinit++;
1519 		}
1520 		if ((mask & IFCAP_RXCSUM) != 0 &&
1521 		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
1522 			ifp->if_capenable ^= IFCAP_RXCSUM;
1523 			reinit++;
1524 		}
1525 		if (reinit > 0 && ifp->if_flags & IFF_RUNNING)
1526 			ifp->if_flags &= ~IFF_RUNNING;
1527 		else
1528 			reinit = 0;
1529 		AXE_UNLOCK(sc);
1530 		if (reinit > 0)
1531 			uether_init(ue);
1532 	} else
1533 		error = uether_ioctl(ifp, cmd, data, uc);
1534 
1535 	return (error);
1536 }
1537