xref: /dragonfly/sys/dev/netif/et/if_et.c (revision 8e9b4bd4)
1 /*
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Sepherosa Ziehau <sepherosa@gmail.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.6 2007/10/23 14:28:42 sephe Exp $
35  */
36 
37 #include <sys/param.h>
38 #include <sys/bitops.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/bus.h>
42 #include <sys/malloc.h>
43 #include <sys/proc.h>
44 #include <sys/rman.h>
45 #include <sys/serialize.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 
50 #include <net/ethernet.h>
51 #include <net/if.h>
52 #include <net/bpf.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/ifq_var.h>
57 #include <net/vlan/if_vlan_var.h>
58 
59 #include <dev/netif/mii_layer/miivar.h>
60 
61 #include <bus/pci/pcireg.h>
62 #include <bus/pci/pcivar.h>
63 #include <bus/pci/pcidevs.h>
64 
65 #include <dev/netif/et/if_etreg.h>
66 #include <dev/netif/et/if_etvar.h>
67 
68 #include "miibus_if.h"
69 
70 static int	et_probe(device_t);
71 static int	et_attach(device_t);
72 static int	et_detach(device_t);
73 static int	et_shutdown(device_t);
74 
75 static int	et_miibus_readreg(device_t, int, int);
76 static int	et_miibus_writereg(device_t, int, int, int);
77 static void	et_miibus_statchg(device_t);
78 
79 static void	et_init(void *);
80 static int	et_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
81 static void	et_start(struct ifnet *);
82 static void	et_watchdog(struct ifnet *);
83 static int	et_ifmedia_upd(struct ifnet *);
84 static void	et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
85 
86 static int	et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
87 static int	et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
88 
89 static void	et_intr(void *);
90 static void	et_enable_intrs(struct et_softc *, uint32_t);
91 static void	et_disable_intrs(struct et_softc *);
92 static void	et_rxeof(struct et_softc *);
93 static void	et_txeof(struct et_softc *);
94 
95 static int	et_dma_alloc(device_t);
96 static void	et_dma_free(device_t);
97 static int	et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *,
98 				  void **, bus_addr_t *, bus_dmamap_t *);
99 static void	et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
100 static int	et_dma_mbuf_create(device_t);
101 static void	et_dma_mbuf_destroy(device_t, int, const int[]);
102 static int	et_jumbo_mem_alloc(device_t);
103 static void	et_jumbo_mem_free(device_t);
104 static void	et_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
105 static void	et_dma_buf_addr(void *, bus_dma_segment_t *, int,
106 				bus_size_t, int);
107 static int	et_init_tx_ring(struct et_softc *);
108 static int	et_init_rx_ring(struct et_softc *);
109 static void	et_free_tx_ring(struct et_softc *);
110 static void	et_free_rx_ring(struct et_softc *);
111 static int	et_encap(struct et_softc *, struct mbuf **);
112 static struct et_jslot *
113 		et_jalloc(struct et_jumbo_data *);
114 static void	et_jfree(void *);
115 static void	et_jref(void *);
116 static int	et_newbuf(struct et_rxbuf_data *, int, int, int);
117 static int	et_newbuf_cluster(struct et_rxbuf_data *, int, int);
118 static int	et_newbuf_hdr(struct et_rxbuf_data *, int, int);
119 static int	et_newbuf_jumbo(struct et_rxbuf_data *, int, int);
120 
121 static void	et_stop(struct et_softc *);
122 static int	et_chip_init(struct et_softc *);
123 static void	et_chip_attach(struct et_softc *);
124 static void	et_init_mac(struct et_softc *);
125 static void	et_init_rxmac(struct et_softc *);
126 static void	et_init_txmac(struct et_softc *);
127 static int	et_init_rxdma(struct et_softc *);
128 static int	et_init_txdma(struct et_softc *);
129 static int	et_start_rxdma(struct et_softc *);
130 static int	et_start_txdma(struct et_softc *);
131 static int	et_stop_rxdma(struct et_softc *);
132 static int	et_stop_txdma(struct et_softc *);
133 static int	et_enable_txrx(struct et_softc *, int);
134 static void	et_reset(struct et_softc *);
135 static int	et_bus_config(device_t);
136 static void	et_get_eaddr(device_t, uint8_t[]);
137 static void	et_setmulti(struct et_softc *);
138 static void	et_tick(void *);
139 static void	et_setmedia(struct et_softc *);
140 static void	et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t);
141 
142 static const struct et_dev {
143 	uint16_t	vid;
144 	uint16_t	did;
145 	const char	*desc;
146 } et_devices[] = {
147 	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
148 	  "Agere ET1310 Gigabit Ethernet" },
149 	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
150 	  "Agere ET1310 Fast Ethernet" },
151 	{ 0, 0, NULL }
152 };
153 
154 static device_method_t et_methods[] = {
155 	DEVMETHOD(device_probe,		et_probe),
156 	DEVMETHOD(device_attach,	et_attach),
157 	DEVMETHOD(device_detach,	et_detach),
158 	DEVMETHOD(device_shutdown,	et_shutdown),
159 #if 0
160 	DEVMETHOD(device_suspend,	et_suspend),
161 	DEVMETHOD(device_resume,	et_resume),
162 #endif
163 
164 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
165 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
166 
167 	DEVMETHOD(miibus_readreg,	et_miibus_readreg),
168 	DEVMETHOD(miibus_writereg,	et_miibus_writereg),
169 	DEVMETHOD(miibus_statchg,	et_miibus_statchg),
170 
171 	{ 0, 0 }
172 };
173 
174 static driver_t et_driver = {
175 	"et",
176 	et_methods,
177 	sizeof(struct et_softc)
178 };
179 
180 static devclass_t et_devclass;
181 
182 DECLARE_DUMMY_MODULE(if_et);
183 MODULE_DEPEND(if_et, miibus, 1, 1, 1);
184 DRIVER_MODULE(if_et, pci, et_driver, et_devclass, 0, 0);
185 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
186 
187 static int	et_rx_intr_npkts = 32;
188 static int	et_rx_intr_delay = 20;		/* x10 usec */
189 static int	et_tx_intr_nsegs = 126;
190 static uint32_t	et_timer = 1000 * 1000 * 1000;	/* nanosec */
191 
192 TUNABLE_INT("hw.et.timer", &et_timer);
193 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
194 TUNABLE_INT("hw.et.rx_intr_intvl", &et_rx_intr_delay);
195 TUNABLE_INT("hw.et_tx_intr_nsegs", &et_tx_intr_nsegs);
196 
197 struct et_bsize {
198 	int		bufsize;
199 	int		jumbo;
200 	et_newbuf_t	newbuf;
201 };
202 
203 static const struct et_bsize	et_bufsize_std[ET_RX_NRING] = {
204 	{ .bufsize = ET_RXDMA_CTRL_RING0_128,	.jumbo = 0,
205 	  .newbuf = et_newbuf_hdr },
206 	{ .bufsize = ET_RXDMA_CTRL_RING1_2048,	.jumbo = 0,
207 	  .newbuf = et_newbuf_cluster },
208 };
209 
210 static const struct et_bsize	et_bufsize_jumbo[ET_RX_NRING] = {
211 	{ .bufsize = ET_RXDMA_CTRL_RING0_128,	.jumbo = 0,
212 	  .newbuf = et_newbuf_hdr },
213 	{ .bufsize = ET_RXDMA_CTRL_RING1_16384,	.jumbo = 1,
214 	  .newbuf = et_newbuf_jumbo },
215 };
216 
217 static int
218 et_probe(device_t dev)
219 {
220 	const struct et_dev *d;
221 	uint16_t did, vid;
222 
223 	vid = pci_get_vendor(dev);
224 	did = pci_get_device(dev);
225 
226 	for (d = et_devices; d->desc != NULL; ++d) {
227 		if (vid == d->vid && did == d->did) {
228 			device_set_desc(dev, d->desc);
229 			return 0;
230 		}
231 	}
232 	return ENXIO;
233 }
234 
235 static int
236 et_attach(device_t dev)
237 {
238 	struct et_softc *sc = device_get_softc(dev);
239 	struct ifnet *ifp = &sc->arpcom.ac_if;
240 	uint8_t eaddr[ETHER_ADDR_LEN];
241 	int error;
242 
243 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
244 	callout_init(&sc->sc_tick);
245 
246 	/*
247 	 * Initialize tunables
248 	 */
249 	sc->sc_rx_intr_npkts = et_rx_intr_npkts;
250 	sc->sc_rx_intr_delay = et_rx_intr_delay;
251 	sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
252 	sc->sc_timer = et_timer;
253 
254 #ifndef BURN_BRIDGES
255 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
256 		uint32_t irq, mem;
257 
258 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
259 		mem = pci_read_config(dev, ET_PCIR_BAR, 4);
260 
261 		device_printf(dev, "chip is in D%d power mode "
262 		    "-- setting to D0\n", pci_get_powerstate(dev));
263 
264 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
265 
266 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
267 		pci_write_config(dev, ET_PCIR_BAR, mem, 4);
268 	}
269 #endif	/* !BURN_BRIDGE */
270 
271 	/* Enable bus mastering */
272 	pci_enable_busmaster(dev);
273 
274 	/*
275 	 * Allocate IO memory
276 	 */
277 	sc->sc_mem_rid = ET_PCIR_BAR;
278 	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
279 						&sc->sc_mem_rid, RF_ACTIVE);
280 	if (sc->sc_mem_res == NULL) {
281 		device_printf(dev, "can't allocate IO memory\n");
282 		return ENXIO;
283 	}
284 	sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res);
285 	sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res);
286 
287 	/*
288 	 * Allocate IRQ
289 	 */
290 	sc->sc_irq_rid = 0;
291 	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
292 						&sc->sc_irq_rid,
293 						RF_SHAREABLE | RF_ACTIVE);
294 	if (sc->sc_irq_res == NULL) {
295 		device_printf(dev, "can't allocate irq\n");
296 		error = ENXIO;
297 		goto fail;
298 	}
299 
300 	/*
301 	 * Create sysctl tree
302 	 */
303 	sysctl_ctx_init(&sc->sc_sysctl_ctx);
304 	sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
305 					     SYSCTL_STATIC_CHILDREN(_hw),
306 					     OID_AUTO,
307 					     device_get_nameunit(dev),
308 					     CTLFLAG_RD, 0, "");
309 	if (sc->sc_sysctl_tree == NULL) {
310 		device_printf(dev, "can't add sysctl node\n");
311 		error = ENXIO;
312 		goto fail;
313 	}
314 
315 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
316 			SYSCTL_CHILDREN(sc->sc_sysctl_tree),
317 			OID_AUTO, "rx_intr_npkts", CTLTYPE_INT | CTLFLAG_RW,
318 			sc, 0, et_sysctl_rx_intr_npkts, "I",
319 			"RX IM, # packets per RX interrupt");
320 	SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
321 			SYSCTL_CHILDREN(sc->sc_sysctl_tree),
322 			OID_AUTO, "rx_intr_delay", CTLTYPE_INT | CTLFLAG_RW,
323 			sc, 0, et_sysctl_rx_intr_delay, "I",
324 			"RX IM, RX interrupt delay (x10 usec)");
325 	SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
326 		       SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
327 		       "tx_intr_nsegs", CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
328 		       "TX IM, # segments per TX interrupt");
329 	SYSCTL_ADD_UINT(&sc->sc_sysctl_ctx,
330 			SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
331 			"timer", CTLFLAG_RW, &sc->sc_timer, 0,
332 			"TX timer");
333 
334 	error = et_bus_config(dev);
335 	if (error)
336 		goto fail;
337 
338 	et_get_eaddr(dev, eaddr);
339 
340 	CSR_WRITE_4(sc, ET_PM,
341 		    ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
342 
343 	et_reset(sc);
344 
345 	et_disable_intrs(sc);
346 
347 	error = et_dma_alloc(dev);
348 	if (error)
349 		goto fail;
350 
351 	ifp->if_softc = sc;
352 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
353 	ifp->if_init = et_init;
354 	ifp->if_ioctl = et_ioctl;
355 	ifp->if_start = et_start;
356 	ifp->if_watchdog = et_watchdog;
357 	ifp->if_mtu = ETHERMTU;
358 	ifp->if_capabilities = IFCAP_VLAN_MTU;
359 	ifp->if_capenable = ifp->if_capabilities;
360 	ifq_set_maxlen(&ifp->if_snd, ET_TX_NDESC);
361 	ifq_set_ready(&ifp->if_snd);
362 
363 	et_chip_attach(sc);
364 
365 	error = mii_phy_probe(dev, &sc->sc_miibus,
366 			      et_ifmedia_upd, et_ifmedia_sts);
367 	if (error) {
368 		device_printf(dev, "can't probe any PHY\n");
369 		goto fail;
370 	}
371 
372 	ether_ifattach(ifp, eaddr, NULL);
373 
374 	error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, et_intr, sc,
375 			       &sc->sc_irq_handle, ifp->if_serializer);
376 	if (error) {
377 		ether_ifdetach(ifp);
378 		device_printf(dev, "can't setup intr\n");
379 		goto fail;
380 	}
381 	return 0;
382 fail:
383 	et_detach(dev);
384 	return error;
385 }
386 
387 static int
388 et_detach(device_t dev)
389 {
390 	struct et_softc *sc = device_get_softc(dev);
391 
392 	if (device_is_attached(dev)) {
393 		struct ifnet *ifp = &sc->arpcom.ac_if;
394 
395 		lwkt_serialize_enter(ifp->if_serializer);
396 		et_stop(sc);
397 		bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
398 		lwkt_serialize_exit(ifp->if_serializer);
399 
400 		ether_ifdetach(ifp);
401 	}
402 
403 	if (sc->sc_sysctl_tree != NULL)
404 		sysctl_ctx_free(&sc->sc_sysctl_ctx);
405 
406 	if (sc->sc_miibus != NULL)
407 		device_delete_child(dev, sc->sc_miibus);
408 	bus_generic_detach(dev);
409 
410 	if (sc->sc_irq_res != NULL) {
411 		bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
412 				     sc->sc_irq_res);
413 	}
414 
415 	if (sc->sc_mem_res != NULL) {
416 		bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
417 				     sc->sc_mem_res);
418 	}
419 
420 	et_dma_free(dev);
421 
422 	return 0;
423 }
424 
425 static int
426 et_shutdown(device_t dev)
427 {
428 	struct et_softc *sc = device_get_softc(dev);
429 	struct ifnet *ifp = &sc->arpcom.ac_if;
430 
431 	lwkt_serialize_enter(ifp->if_serializer);
432 	et_stop(sc);
433 	lwkt_serialize_exit(ifp->if_serializer);
434 	return 0;
435 }
436 
437 static int
438 et_miibus_readreg(device_t dev, int phy, int reg)
439 {
440 	struct et_softc *sc = device_get_softc(dev);
441 	uint32_t val;
442 	int i, ret;
443 
444 	/* Stop any pending operations */
445 	CSR_WRITE_4(sc, ET_MII_CMD, 0);
446 
447 	val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
448 	      __SHIFTIN(reg, ET_MII_ADDR_REG);
449 	CSR_WRITE_4(sc, ET_MII_ADDR, val);
450 
451 	/* Start reading */
452 	CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
453 
454 #define NRETRY	50
455 
456 	for (i = 0; i < NRETRY; ++i) {
457 		val = CSR_READ_4(sc, ET_MII_IND);
458 		if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
459 			break;
460 		DELAY(50);
461 	}
462 	if (i == NRETRY) {
463 		if_printf(&sc->arpcom.ac_if,
464 			  "read phy %d, reg %d timed out\n", phy, reg);
465 		ret = 0;
466 		goto back;
467 	}
468 
469 #undef NRETRY
470 
471 	val = CSR_READ_4(sc, ET_MII_STAT);
472 	ret = __SHIFTOUT(val, ET_MII_STAT_VALUE);
473 
474 back:
475 	/* Make sure that the current operation is stopped */
476 	CSR_WRITE_4(sc, ET_MII_CMD, 0);
477 	return ret;
478 }
479 
480 static int
481 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
482 {
483 	struct et_softc *sc = device_get_softc(dev);
484 	uint32_t val;
485 	int i;
486 
487 	/* Stop any pending operations */
488 	CSR_WRITE_4(sc, ET_MII_CMD, 0);
489 
490 	val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
491 	      __SHIFTIN(reg, ET_MII_ADDR_REG);
492 	CSR_WRITE_4(sc, ET_MII_ADDR, val);
493 
494 	/* Start writing */
495 	CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE));
496 
497 #define NRETRY 100
498 
499 	for (i = 0; i < NRETRY; ++i) {
500 		val = CSR_READ_4(sc, ET_MII_IND);
501 		if ((val & ET_MII_IND_BUSY) == 0)
502 			break;
503 		DELAY(50);
504 	}
505 	if (i == NRETRY) {
506 		if_printf(&sc->arpcom.ac_if,
507 			  "write phy %d, reg %d timed out\n", phy, reg);
508 		et_miibus_readreg(dev, phy, reg);
509 	}
510 
511 #undef NRETRY
512 
513 	/* Make sure that the current operation is stopped */
514 	CSR_WRITE_4(sc, ET_MII_CMD, 0);
515 	return 0;
516 }
517 
518 static void
519 et_miibus_statchg(device_t dev)
520 {
521 	et_setmedia(device_get_softc(dev));
522 }
523 
524 static int
525 et_ifmedia_upd(struct ifnet *ifp)
526 {
527 	struct et_softc *sc = ifp->if_softc;
528 	struct mii_data *mii = device_get_softc(sc->sc_miibus);
529 
530 	if (mii->mii_instance != 0) {
531 		struct mii_softc *miisc;
532 
533 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
534 			mii_phy_reset(miisc);
535 	}
536 	mii_mediachg(mii);
537 
538 	return 0;
539 }
540 
541 static void
542 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
543 {
544 	struct et_softc *sc = ifp->if_softc;
545 	struct mii_data *mii = device_get_softc(sc->sc_miibus);
546 
547 	mii_pollstat(mii);
548 	ifmr->ifm_active = mii->mii_media_active;
549 	ifmr->ifm_status = mii->mii_media_status;
550 }
551 
552 static void
553 et_stop(struct et_softc *sc)
554 {
555 	struct ifnet *ifp = &sc->arpcom.ac_if;
556 
557 	ASSERT_SERIALIZED(ifp->if_serializer);
558 
559 	callout_stop(&sc->sc_tick);
560 
561 	et_stop_rxdma(sc);
562 	et_stop_txdma(sc);
563 
564 	et_disable_intrs(sc);
565 
566 	et_free_tx_ring(sc);
567 	et_free_rx_ring(sc);
568 
569 	et_reset(sc);
570 
571 	sc->sc_tx = 0;
572 	sc->sc_tx_intr = 0;
573 	sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
574 
575 	ifp->if_timer = 0;
576 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
577 }
578 
579 static int
580 et_bus_config(device_t dev)
581 {
582 	uint32_t val, max_plsz;
583 	uint16_t ack_latency, replay_timer;
584 
585 	/*
586 	 * Test whether EEPROM is valid
587 	 * NOTE: Read twice to get the correct value
588 	 */
589 	pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
590 	val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
591 	if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
592 		device_printf(dev, "EEPROM status error 0x%02x\n", val);
593 		return ENXIO;
594 	}
595 
596 	/* TODO: LED */
597 
598 	/*
599 	 * Configure ACK latency and replay timer according to
600 	 * max playload size
601 	 */
602 	val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4);
603 	max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
604 
605 	switch (max_plsz) {
606 	case ET_PCIV_DEVICE_CAPS_PLSZ_128:
607 		ack_latency = ET_PCIV_ACK_LATENCY_128;
608 		replay_timer = ET_PCIV_REPLAY_TIMER_128;
609 		break;
610 
611 	case ET_PCIV_DEVICE_CAPS_PLSZ_256:
612 		ack_latency = ET_PCIV_ACK_LATENCY_256;
613 		replay_timer = ET_PCIV_REPLAY_TIMER_256;
614 		break;
615 
616 	default:
617 		ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2);
618 		replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2);
619 		device_printf(dev, "ack latency %u, replay timer %u\n",
620 			      ack_latency, replay_timer);
621 		break;
622 	}
623 	if (ack_latency != 0) {
624 		pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
625 		pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2);
626 	}
627 
628 	/*
629 	 * Set L0s and L1 latency timer to 2us
630 	 */
631 	val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
632 	pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1);
633 
634 	/*
635 	 * Set max read request size to 2048 bytes
636 	 */
637 	val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2);
638 	val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
639 	val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
640 	pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2);
641 
642 	return 0;
643 }
644 
645 static void
646 et_get_eaddr(device_t dev, uint8_t eaddr[])
647 {
648 	uint32_t val;
649 	int i;
650 
651 	val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
652 	for (i = 0; i < 4; ++i)
653 		eaddr[i] = (val >> (8 * i)) & 0xff;
654 
655 	val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
656 	for (; i < ETHER_ADDR_LEN; ++i)
657 		eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
658 }
659 
660 static void
661 et_reset(struct et_softc *sc)
662 {
663 	CSR_WRITE_4(sc, ET_MAC_CFG1,
664 		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
665 		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
666 		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
667 
668 	CSR_WRITE_4(sc, ET_SWRST,
669 		    ET_SWRST_TXDMA | ET_SWRST_RXDMA |
670 		    ET_SWRST_TXMAC | ET_SWRST_RXMAC |
671 		    ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
672 
673 	CSR_WRITE_4(sc, ET_MAC_CFG1,
674 		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
675 		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
676 	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
677 }
678 
679 static void
680 et_disable_intrs(struct et_softc *sc)
681 {
682 	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
683 }
684 
685 static void
686 et_enable_intrs(struct et_softc *sc, uint32_t intrs)
687 {
688 	CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
689 }
690 
691 static int
692 et_dma_alloc(device_t dev)
693 {
694 	struct et_softc *sc = device_get_softc(dev);
695 	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
696 	struct et_txstatus_data *txsd = &sc->sc_tx_status;
697 	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
698 	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
699 	int i, error;
700 
701 	/*
702 	 * Create top level DMA tag
703 	 */
704 	error = bus_dma_tag_create(NULL, 1, 0,
705 				   BUS_SPACE_MAXADDR_32BIT,
706 				   BUS_SPACE_MAXADDR,
707 				   NULL, NULL,
708 				   MAXBSIZE,
709 				   BUS_SPACE_UNRESTRICTED,
710 				   BUS_SPACE_MAXSIZE_32BIT,
711 				   0, &sc->sc_dtag);
712 	if (error) {
713 		device_printf(dev, "can't create DMA tag\n");
714 		return error;
715 	}
716 
717 	/*
718 	 * Create TX ring DMA stuffs
719 	 */
720 	error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag,
721 				  (void **)&tx_ring->tr_desc,
722 				  &tx_ring->tr_paddr, &tx_ring->tr_dmap);
723 	if (error) {
724 		device_printf(dev, "can't create TX ring DMA stuffs\n");
725 		return error;
726 	}
727 
728 	/*
729 	 * Create TX status DMA stuffs
730 	 */
731 	error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag,
732 				  (void **)&txsd->txsd_status,
733 				  &txsd->txsd_paddr, &txsd->txsd_dmap);
734 	if (error) {
735 		device_printf(dev, "can't create TX status DMA stuffs\n");
736 		return error;
737 	}
738 
739 	/*
740 	 * Create DMA stuffs for RX rings
741 	 */
742 	for (i = 0; i < ET_RX_NRING; ++i) {
743 		static const uint32_t rx_ring_posreg[ET_RX_NRING] =
744 		{ ET_RX_RING0_POS, ET_RX_RING1_POS };
745 
746 		struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
747 
748 		error = et_dma_mem_create(dev, ET_RX_RING_SIZE,
749 					  &rx_ring->rr_dtag,
750 					  (void **)&rx_ring->rr_desc,
751 					  &rx_ring->rr_paddr,
752 					  &rx_ring->rr_dmap);
753 		if (error) {
754 			device_printf(dev, "can't create DMA stuffs for "
755 				      "the %d RX ring\n", i);
756 			return error;
757 		}
758 		rx_ring->rr_posreg = rx_ring_posreg[i];
759 	}
760 
761 	/*
762 	 * Create RX stat ring DMA stuffs
763 	 */
764 	error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE,
765 				  &rxst_ring->rsr_dtag,
766 				  (void **)&rxst_ring->rsr_stat,
767 				  &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap);
768 	if (error) {
769 		device_printf(dev, "can't create RX stat ring DMA stuffs\n");
770 		return error;
771 	}
772 
773 	/*
774 	 * Create RX status DMA stuffs
775 	 */
776 	error = et_dma_mem_create(dev, sizeof(struct et_rxstatus),
777 				  &rxsd->rxsd_dtag,
778 				  (void **)&rxsd->rxsd_status,
779 				  &rxsd->rxsd_paddr, &rxsd->rxsd_dmap);
780 	if (error) {
781 		device_printf(dev, "can't create RX status DMA stuffs\n");
782 		return error;
783 	}
784 
785 	/*
786 	 * Create mbuf DMA stuffs
787 	 */
788 	error = et_dma_mbuf_create(dev);
789 	if (error)
790 		return error;
791 
792 	/*
793 	 * Create jumbo buffer DMA stuffs
794 	 * NOTE: Allow it to fail
795 	 */
796 	if (et_jumbo_mem_alloc(dev) == 0)
797 		sc->sc_flags |= ET_FLAG_JUMBO;
798 
799 	return 0;
800 }
801 
802 static void
803 et_dma_free(device_t dev)
804 {
805 	struct et_softc *sc = device_get_softc(dev);
806 	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
807 	struct et_txstatus_data *txsd = &sc->sc_tx_status;
808 	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
809 	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
810 	int i, rx_done[ET_RX_NRING];
811 
812 	/*
813 	 * Destroy TX ring DMA stuffs
814 	 */
815 	et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc,
816 			   tx_ring->tr_dmap);
817 
818 	/*
819 	 * Destroy TX status DMA stuffs
820 	 */
821 	et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status,
822 			   txsd->txsd_dmap);
823 
824 	/*
825 	 * Destroy DMA stuffs for RX rings
826 	 */
827 	for (i = 0; i < ET_RX_NRING; ++i) {
828 		struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
829 
830 		et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc,
831 				   rx_ring->rr_dmap);
832 	}
833 
834 	/*
835 	 * Destroy RX stat ring DMA stuffs
836 	 */
837 	et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat,
838 			   rxst_ring->rsr_dmap);
839 
840 	/*
841 	 * Destroy RX status DMA stuffs
842 	 */
843 	et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status,
844 			   rxsd->rxsd_dmap);
845 
846 	/*
847 	 * Destroy mbuf DMA stuffs
848 	 */
849 	for (i = 0; i < ET_RX_NRING; ++i)
850 		rx_done[i] = ET_RX_NDESC;
851 	et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done);
852 
853 	/*
854 	 * Destroy jumbo buffer DMA stuffs
855 	 */
856 	if (sc->sc_flags & ET_FLAG_JUMBO)
857 		et_jumbo_mem_free(dev);
858 
859 	/*
860 	 * Destroy top level DMA tag
861 	 */
862 	if (sc->sc_dtag != NULL)
863 		bus_dma_tag_destroy(sc->sc_dtag);
864 }
865 
866 static int
867 et_dma_mbuf_create(device_t dev)
868 {
869 	struct et_softc *sc = device_get_softc(dev);
870 	struct et_txbuf_data *tbd = &sc->sc_tx_data;
871 	int i, error, rx_done[ET_RX_NRING];
872 
873 	/*
874 	 * Create mbuf DMA tag
875 	 */
876 	error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
877 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
878 				   NULL, NULL,
879 				   ET_JUMBO_FRAMELEN, ET_NSEG_MAX,
880 				   BUS_SPACE_MAXSIZE_32BIT,
881 				   BUS_DMA_ALLOCNOW, &sc->sc_mbuf_dtag);
882 	if (error) {
883 		device_printf(dev, "can't create mbuf DMA tag\n");
884 		return error;
885 	}
886 
887 	/*
888 	 * Create spare DMA map for RX mbufs
889 	 */
890 	error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap);
891 	if (error) {
892 		device_printf(dev, "can't create spare mbuf DMA map\n");
893 		bus_dma_tag_destroy(sc->sc_mbuf_dtag);
894 		sc->sc_mbuf_dtag = NULL;
895 		return error;
896 	}
897 
898 	/*
899 	 * Create DMA maps for RX mbufs
900 	 */
901 	bzero(rx_done, sizeof(rx_done));
902 	for (i = 0; i < ET_RX_NRING; ++i) {
903 		struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
904 		int j;
905 
906 		for (j = 0; j < ET_RX_NDESC; ++j) {
907 			error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
908 				&rbd->rbd_buf[j].rb_dmap);
909 			if (error) {
910 				device_printf(dev, "can't create %d RX mbuf "
911 					      "for %d RX ring\n", j, i);
912 				rx_done[i] = j;
913 				et_dma_mbuf_destroy(dev, 0, rx_done);
914 				return error;
915 			}
916 		}
917 		rx_done[i] = ET_RX_NDESC;
918 
919 		rbd->rbd_softc = sc;
920 		rbd->rbd_ring = &sc->sc_rx_ring[i];
921 	}
922 
923 	/*
924 	 * Create DMA maps for TX mbufs
925 	 */
926 	for (i = 0; i < ET_TX_NDESC; ++i) {
927 		error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
928 					  &tbd->tbd_buf[i].tb_dmap);
929 		if (error) {
930 			device_printf(dev, "can't create %d TX mbuf "
931 				      "DMA map\n", i);
932 			et_dma_mbuf_destroy(dev, i, rx_done);
933 			return error;
934 		}
935 	}
936 
937 	return 0;
938 }
939 
940 static void
941 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[])
942 {
943 	struct et_softc *sc = device_get_softc(dev);
944 	struct et_txbuf_data *tbd = &sc->sc_tx_data;
945 	int i;
946 
947 	if (sc->sc_mbuf_dtag == NULL)
948 		return;
949 
950 	/*
951 	 * Destroy DMA maps for RX mbufs
952 	 */
953 	for (i = 0; i < ET_RX_NRING; ++i) {
954 		struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
955 		int j;
956 
957 		for (j = 0; j < rx_done[i]; ++j) {
958 			struct et_rxbuf *rb = &rbd->rbd_buf[j];
959 
960 			KASSERT(rb->rb_mbuf == NULL,
961 			    ("RX mbuf in %d RX ring is not freed yet\n", i));
962 			bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap);
963 		}
964 	}
965 
966 	/*
967 	 * Destroy DMA maps for TX mbufs
968 	 */
969 	for (i = 0; i < tx_done; ++i) {
970 		struct et_txbuf *tb = &tbd->tbd_buf[i];
971 
972 		KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n"));
973 		bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap);
974 	}
975 
976 	/*
977 	 * Destroy spare mbuf DMA map
978 	 */
979 	bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap);
980 
981 	/*
982 	 * Destroy mbuf DMA tag
983 	 */
984 	bus_dma_tag_destroy(sc->sc_mbuf_dtag);
985 	sc->sc_mbuf_dtag = NULL;
986 }
987 
988 static int
989 et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
990 		  void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
991 {
992 	struct et_softc *sc = device_get_softc(dev);
993 	int error;
994 
995 	error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0,
996 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
997 				   NULL, NULL,
998 				   size, 1, BUS_SPACE_MAXSIZE_32BIT,
999 				   0, dtag);
1000 	if (error) {
1001 		device_printf(dev, "can't create DMA tag\n");
1002 		return error;
1003 	}
1004 
1005 	error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1006 				 dmap);
1007 	if (error) {
1008 		device_printf(dev, "can't allocate DMA mem\n");
1009 		bus_dma_tag_destroy(*dtag);
1010 		*dtag = NULL;
1011 		return error;
1012 	}
1013 
1014 	error = bus_dmamap_load(*dtag, *dmap, *addr, size,
1015 				et_dma_ring_addr, paddr, BUS_DMA_WAITOK);
1016 	if (error) {
1017 		device_printf(dev, "can't load DMA mem\n");
1018 		bus_dmamem_free(*dtag, *addr, *dmap);
1019 		bus_dma_tag_destroy(*dtag);
1020 		*dtag = NULL;
1021 		return error;
1022 	}
1023 	return 0;
1024 }
1025 
1026 static void
1027 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
1028 {
1029 	if (dtag != NULL) {
1030 		bus_dmamap_unload(dtag, dmap);
1031 		bus_dmamem_free(dtag, addr, dmap);
1032 		bus_dma_tag_destroy(dtag);
1033 	}
1034 }
1035 
1036 static void
1037 et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1038 {
1039 	KASSERT(nseg == 1, ("too many segments\n"));
1040 	*((bus_addr_t *)arg) = seg->ds_addr;
1041 }
1042 
1043 static void
1044 et_chip_attach(struct et_softc *sc)
1045 {
1046 	uint32_t val;
1047 
1048 	/*
1049 	 * Perform minimal initialization
1050 	 */
1051 
1052 	/* Disable loopback */
1053 	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1054 
1055 	/* Reset MAC */
1056 	CSR_WRITE_4(sc, ET_MAC_CFG1,
1057 		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1058 		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1059 		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1060 
1061 	/*
1062 	 * Setup half duplex mode
1063 	 */
1064 	val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1065 	      __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1066 	      __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1067 	      ET_MAC_HDX_EXC_DEFER;
1068 	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1069 
1070 	/* Clear MAC control */
1071 	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1072 
1073 	/* Reset MII */
1074 	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1075 
1076 	/* Bring MAC out of reset state */
1077 	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1078 
1079 	/* Enable memory controllers */
1080 	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1081 }
1082 
1083 static void
1084 et_intr(void *xsc)
1085 {
1086 	struct et_softc *sc = xsc;
1087 	struct ifnet *ifp = &sc->arpcom.ac_if;
1088 	uint32_t intrs;
1089 
1090 	ASSERT_SERIALIZED(ifp->if_serializer);
1091 
1092 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1093 		return;
1094 
1095 	et_disable_intrs(sc);
1096 
1097 	intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1098 	intrs &= ET_INTRS;
1099 	if (intrs == 0)	/* Not interested */
1100 		goto back;
1101 
1102 	if (intrs & ET_INTR_RXEOF)
1103 		et_rxeof(sc);
1104 	if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1105 		et_txeof(sc);
1106 	if (intrs & ET_INTR_TIMER)
1107 		CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1108 back:
1109 	et_enable_intrs(sc, ET_INTRS);
1110 }
1111 
1112 static void
1113 et_init(void *xsc)
1114 {
1115 	struct et_softc *sc = xsc;
1116 	struct ifnet *ifp = &sc->arpcom.ac_if;
1117 	const struct et_bsize *arr;
1118 	int error, i;
1119 
1120 	ASSERT_SERIALIZED(ifp->if_serializer);
1121 
1122 	et_stop(sc);
1123 
1124 	arr = ET_FRAMELEN(ifp->if_mtu) < MCLBYTES ?
1125 	      et_bufsize_std : et_bufsize_jumbo;
1126 	for (i = 0; i < ET_RX_NRING; ++i) {
1127 		sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize;
1128 		sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf;
1129 		sc->sc_rx_data[i].rbd_jumbo = arr[i].jumbo;
1130 	}
1131 
1132 	error = et_init_tx_ring(sc);
1133 	if (error)
1134 		goto back;
1135 
1136 	error = et_init_rx_ring(sc);
1137 	if (error)
1138 		goto back;
1139 
1140 	error = et_chip_init(sc);
1141 	if (error)
1142 		goto back;
1143 
1144 	error = et_enable_txrx(sc, 1);
1145 	if (error)
1146 		goto back;
1147 
1148 	et_enable_intrs(sc, ET_INTRS);
1149 
1150 	callout_reset(&sc->sc_tick, hz, et_tick, sc);
1151 
1152 	CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1153 
1154 	ifp->if_flags |= IFF_RUNNING;
1155 	ifp->if_flags &= ~IFF_OACTIVE;
1156 back:
1157 	if (error)
1158 		et_stop(sc);
1159 }
1160 
1161 static int
1162 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1163 {
1164 	struct et_softc *sc = ifp->if_softc;
1165 	struct mii_data *mii = device_get_softc(sc->sc_miibus);
1166 	struct ifreq *ifr = (struct ifreq *)data;
1167 	int error = 0, max_framelen;
1168 
1169 	ASSERT_SERIALIZED(ifp->if_serializer);
1170 
1171 	switch (cmd) {
1172 	case SIOCSIFFLAGS:
1173 		if (ifp->if_flags & IFF_UP) {
1174 			if (ifp->if_flags & IFF_RUNNING) {
1175 				if ((ifp->if_flags ^ sc->sc_if_flags) &
1176 				(IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1177 					et_setmulti(sc);
1178 			} else {
1179 				et_init(sc);
1180 			}
1181 		} else {
1182 			if (ifp->if_flags & IFF_RUNNING)
1183 				et_stop(sc);
1184 		}
1185 		sc->sc_if_flags = ifp->if_flags;
1186 		break;
1187 
1188 	case SIOCSIFMEDIA:
1189 	case SIOCGIFMEDIA:
1190 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1191 		break;
1192 
1193 	case SIOCADDMULTI:
1194 	case SIOCDELMULTI:
1195 		if (ifp->if_flags & IFF_RUNNING)
1196 			et_setmulti(sc);
1197 		break;
1198 
1199 	case SIOCSIFMTU:
1200 		if (sc->sc_flags & ET_FLAG_JUMBO)
1201 			max_framelen = ET_JUMBO_FRAMELEN;
1202 		else
1203 			max_framelen = MCLBYTES - 1;
1204 
1205 		if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1206 			error = EOPNOTSUPP;
1207 			break;
1208 		}
1209 
1210 		ifp->if_mtu = ifr->ifr_mtu;
1211 		if (ifp->if_flags & IFF_RUNNING)
1212 			et_init(sc);
1213 		break;
1214 
1215 	default:
1216 		error = ether_ioctl(ifp, cmd, data);
1217 		break;
1218 	}
1219 	return error;
1220 }
1221 
1222 static void
1223 et_start(struct ifnet *ifp)
1224 {
1225 	struct et_softc *sc = ifp->if_softc;
1226 	struct et_txbuf_data *tbd = &sc->sc_tx_data;
1227 	int trans;
1228 
1229 	ASSERT_SERIALIZED(ifp->if_serializer);
1230 
1231 	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1232 		return;
1233 
1234 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1235 		return;
1236 
1237 	trans = 0;
1238 	for (;;) {
1239 		struct mbuf *m;
1240 
1241 		if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1242 			ifp->if_flags |= IFF_OACTIVE;
1243 			break;
1244 		}
1245 
1246 		m = ifq_dequeue(&ifp->if_snd, NULL);
1247 		if (m == NULL)
1248 			break;
1249 
1250 		if (et_encap(sc, &m)) {
1251 			ifp->if_oerrors++;
1252 			ifp->if_flags |= IFF_OACTIVE;
1253 			break;
1254 		}
1255 		trans = 1;
1256 
1257 		BPF_MTAP(ifp, m);
1258 	}
1259 
1260 	if (trans)
1261 		ifp->if_timer = 5;
1262 }
1263 
1264 static void
1265 et_watchdog(struct ifnet *ifp)
1266 {
1267 	ASSERT_SERIALIZED(ifp->if_serializer);
1268 
1269 	if_printf(ifp, "watchdog timed out\n");
1270 
1271 	ifp->if_init(ifp->if_softc);
1272 	ifp->if_start(ifp);
1273 }
1274 
1275 static int
1276 et_stop_rxdma(struct et_softc *sc)
1277 {
1278 	CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1279 		    ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1280 
1281 	DELAY(5);
1282 	if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1283 		if_printf(&sc->arpcom.ac_if, "can't stop RX DMA engine\n");
1284 		return ETIMEDOUT;
1285 	}
1286 	return 0;
1287 }
1288 
1289 static int
1290 et_stop_txdma(struct et_softc *sc)
1291 {
1292 	CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1293 		    ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1294 	return 0;
1295 }
1296 
1297 static void
1298 et_free_tx_ring(struct et_softc *sc)
1299 {
1300 	struct et_txbuf_data *tbd = &sc->sc_tx_data;
1301 	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1302 	int i;
1303 
1304 	for (i = 0; i < ET_TX_NDESC; ++i) {
1305 		struct et_txbuf *tb = &tbd->tbd_buf[i];
1306 
1307 		if (tb->tb_mbuf != NULL) {
1308 			bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1309 			m_freem(tb->tb_mbuf);
1310 			tb->tb_mbuf = NULL;
1311 		}
1312 	}
1313 
1314 	bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1315 	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1316 			BUS_DMASYNC_PREWRITE);
1317 }
1318 
1319 static void
1320 et_free_rx_ring(struct et_softc *sc)
1321 {
1322 	int n;
1323 
1324 	for (n = 0; n < ET_RX_NRING; ++n) {
1325 		struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1326 		struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1327 		int i;
1328 
1329 		for (i = 0; i < ET_RX_NDESC; ++i) {
1330 			struct et_rxbuf *rb = &rbd->rbd_buf[i];
1331 
1332 			if (rb->rb_mbuf != NULL) {
1333 				if (!rbd->rbd_jumbo) {
1334 					bus_dmamap_unload(sc->sc_mbuf_dtag,
1335 							  rb->rb_dmap);
1336 				}
1337 				m_freem(rb->rb_mbuf);
1338 				rb->rb_mbuf = NULL;
1339 			}
1340 		}
1341 
1342 		bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1343 		bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
1344 				BUS_DMASYNC_PREWRITE);
1345 	}
1346 }
1347 
1348 static void
1349 et_setmulti(struct et_softc *sc)
1350 {
1351 	struct ifnet *ifp = &sc->arpcom.ac_if;
1352 	uint32_t hash[4] = { 0, 0, 0, 0 };
1353 	uint32_t rxmac_ctrl, pktfilt;
1354 	struct ifmultiaddr *ifma;
1355 	int i, count;
1356 
1357 	pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1358 	rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1359 
1360 	pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1361 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1362 		rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1363 		goto back;
1364 	}
1365 
1366 	count = 0;
1367 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1368 		uint32_t *hp, h;
1369 
1370 		if (ifma->ifma_addr->sa_family != AF_LINK)
1371 			continue;
1372 
1373 		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1374 				   ifma->ifma_addr), ETHER_ADDR_LEN);
1375 		h = (h & 0x3f800000) >> 23;
1376 
1377 		hp = &hash[0];
1378 		if (h >= 32 && h < 64) {
1379 			h -= 32;
1380 			hp = &hash[1];
1381 		} else if (h >= 64 && h < 96) {
1382 			h -= 64;
1383 			hp = &hash[2];
1384 		} else if (h >= 96) {
1385 			h -= 96;
1386 			hp = &hash[3];
1387 		}
1388 		*hp |= (1 << h);
1389 
1390 		++count;
1391 	}
1392 
1393 	for (i = 0; i < 4; ++i)
1394 		CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1395 
1396 	if (count > 0)
1397 		pktfilt |= ET_PKTFILT_MCAST;
1398 	rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1399 back:
1400 	CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1401 	CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1402 }
1403 
1404 static int
1405 et_chip_init(struct et_softc *sc)
1406 {
1407 	struct ifnet *ifp = &sc->arpcom.ac_if;
1408 	uint32_t rxq_end;
1409 	int error, frame_len, rxmem_size;
1410 
1411 	/*
1412 	 * Split 16Kbytes internal memory between TX and RX
1413 	 * according to frame length.
1414 	 */
1415 	frame_len = ET_FRAMELEN(ifp->if_mtu);
1416 	if (frame_len < 2048) {
1417 		rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1418 	} else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1419 		rxmem_size = ET_MEM_SIZE / 2;
1420 	} else {
1421 		rxmem_size = ET_MEM_SIZE -
1422 		roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1423 	}
1424 	rxq_end = ET_QUEUE_ADDR(rxmem_size);
1425 
1426 	CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1427 	CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1428 	CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1429 	CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1430 
1431 	/* No loopback */
1432 	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1433 
1434 	/* Clear MSI configure */
1435 	CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1436 
1437 	/* Disable timer */
1438 	CSR_WRITE_4(sc, ET_TIMER, 0);
1439 
1440 	/* Initialize MAC */
1441 	et_init_mac(sc);
1442 
1443 	/* Enable memory controllers */
1444 	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1445 
1446 	/* Initialize RX MAC */
1447 	et_init_rxmac(sc);
1448 
1449 	/* Initialize TX MAC */
1450 	et_init_txmac(sc);
1451 
1452 	/* Initialize RX DMA engine */
1453 	error = et_init_rxdma(sc);
1454 	if (error)
1455 		return error;
1456 
1457 	/* Initialize TX DMA engine */
1458 	error = et_init_txdma(sc);
1459 	if (error)
1460 		return error;
1461 
1462 	return 0;
1463 }
1464 
1465 static int
1466 et_init_tx_ring(struct et_softc *sc)
1467 {
1468 	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1469 	struct et_txstatus_data *txsd = &sc->sc_tx_status;
1470 	struct et_txbuf_data *tbd = &sc->sc_tx_data;
1471 
1472 	bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1473 	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1474 			BUS_DMASYNC_PREWRITE);
1475 
1476 	tbd->tbd_start_index = 0;
1477 	tbd->tbd_start_wrap = 0;
1478 	tbd->tbd_used = 0;
1479 
1480 	bzero(txsd->txsd_status, sizeof(uint32_t));
1481 	bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1482 			BUS_DMASYNC_PREWRITE);
1483 	return 0;
1484 }
1485 
1486 static int
1487 et_init_rx_ring(struct et_softc *sc)
1488 {
1489 	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1490 	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1491 	int n;
1492 
1493 	for (n = 0; n < ET_RX_NRING; ++n) {
1494 		struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1495 		int i, error;
1496 
1497 		for (i = 0; i < ET_RX_NDESC; ++i) {
1498 			error = rbd->rbd_newbuf(rbd, i, 1);
1499 			if (error) {
1500 				if_printf(&sc->arpcom.ac_if, "%d ring %d buf, "
1501 					  "newbuf failed: %d\n", n, i, error);
1502 				return error;
1503 			}
1504 		}
1505 	}
1506 
1507 	bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1508 	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1509 			BUS_DMASYNC_PREWRITE);
1510 
1511 	bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1512 	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1513 			BUS_DMASYNC_PREWRITE);
1514 
1515 	return 0;
1516 }
1517 
1518 static void
1519 et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs,
1520 		bus_size_t mapsz __unused, int error)
1521 {
1522 	struct et_dmamap_ctx *ctx = xctx;
1523 	int i;
1524 
1525 	if (error)
1526 		return;
1527 
1528 	if (nsegs > ctx->nsegs) {
1529 		ctx->nsegs = 0;
1530 		return;
1531 	}
1532 
1533 	ctx->nsegs = nsegs;
1534 	for (i = 0; i < nsegs; ++i)
1535 		ctx->segs[i] = segs[i];
1536 }
1537 
1538 static int
1539 et_init_rxdma(struct et_softc *sc)
1540 {
1541 	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1542 	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1543 	struct et_rxdesc_ring *rx_ring;
1544 	int error;
1545 
1546 	error = et_stop_rxdma(sc);
1547 	if (error) {
1548 		if_printf(&sc->arpcom.ac_if, "can't init RX DMA engine\n");
1549 		return error;
1550 	}
1551 
1552 	/*
1553 	 * Install RX status
1554 	 */
1555 	CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1556 	CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1557 
1558 	/*
1559 	 * Install RX stat ring
1560 	 */
1561 	CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1562 	CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1563 	CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1564 	CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1565 	CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1566 
1567 	/* Match ET_RXSTAT_POS */
1568 	rxst_ring->rsr_index = 0;
1569 	rxst_ring->rsr_wrap = 0;
1570 
1571 	/*
1572 	 * Install the 2nd RX descriptor ring
1573 	 */
1574 	rx_ring = &sc->sc_rx_ring[1];
1575 	CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1576 	CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1577 	CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1578 	CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1579 	CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1580 
1581 	/* Match ET_RX_RING1_POS */
1582 	rx_ring->rr_index = 0;
1583 	rx_ring->rr_wrap = 1;
1584 
1585 	/*
1586 	 * Install the 1st RX descriptor ring
1587 	 */
1588 	rx_ring = &sc->sc_rx_ring[0];
1589 	CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1590 	CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1591 	CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1592 	CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1593 	CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1594 
1595 	/* Match ET_RX_RING0_POS */
1596 	rx_ring->rr_index = 0;
1597 	rx_ring->rr_wrap = 1;
1598 
1599 	/*
1600 	 * RX intr moderation
1601 	 */
1602 	CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1603 	CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1604 
1605 	return 0;
1606 }
1607 
1608 static int
1609 et_init_txdma(struct et_softc *sc)
1610 {
1611 	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1612 	struct et_txstatus_data *txsd = &sc->sc_tx_status;
1613 	int error;
1614 
1615 	error = et_stop_txdma(sc);
1616 	if (error) {
1617 		if_printf(&sc->arpcom.ac_if, "can't init TX DMA engine\n");
1618 		return error;
1619 	}
1620 
1621 	/*
1622 	 * Install TX descriptor ring
1623 	 */
1624 	CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1625 	CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1626 	CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1627 
1628 	/*
1629 	 * Install TX status
1630 	 */
1631 	CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1632 	CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1633 
1634 	CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1635 
1636 	/* Match ET_TX_READY_POS */
1637 	tx_ring->tr_ready_index = 0;
1638 	tx_ring->tr_ready_wrap = 0;
1639 
1640 	return 0;
1641 }
1642 
1643 static void
1644 et_init_mac(struct et_softc *sc)
1645 {
1646 	struct ifnet *ifp = &sc->arpcom.ac_if;
1647 	const uint8_t *eaddr = IF_LLADDR(ifp);
1648 	uint32_t val;
1649 
1650 	/* Reset MAC */
1651 	CSR_WRITE_4(sc, ET_MAC_CFG1,
1652 		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1653 		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1654 		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1655 
1656 	/*
1657 	 * Setup inter packet gap
1658 	 */
1659 	val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
1660 	      __SHIFTIN(88, ET_IPG_NONB2B_2) |
1661 	      __SHIFTIN(80, ET_IPG_MINIFG) |
1662 	      __SHIFTIN(96, ET_IPG_B2B);
1663 	CSR_WRITE_4(sc, ET_IPG, val);
1664 
1665 	/*
1666 	 * Setup half duplex mode
1667 	 */
1668 	val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1669 	      __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1670 	      __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1671 	      ET_MAC_HDX_EXC_DEFER;
1672 	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1673 
1674 	/* Clear MAC control */
1675 	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1676 
1677 	/* Reset MII */
1678 	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1679 
1680 	/*
1681 	 * Set MAC address
1682 	 */
1683 	val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1684 	CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1685 	val = (eaddr[0] << 16) | (eaddr[1] << 24);
1686 	CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1687 
1688 	/* Set max frame length */
1689 	CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1690 
1691 	/* Bring MAC out of reset state */
1692 	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1693 }
1694 
1695 static void
1696 et_init_rxmac(struct et_softc *sc)
1697 {
1698 	struct ifnet *ifp = &sc->arpcom.ac_if;
1699 	const uint8_t *eaddr = IF_LLADDR(ifp);
1700 	uint32_t val;
1701 	int i;
1702 
1703 	/* Disable RX MAC and WOL */
1704 	CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1705 
1706 	/*
1707 	 * Clear all WOL related registers
1708 	 */
1709 	for (i = 0; i < 3; ++i)
1710 		CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1711 	for (i = 0; i < 20; ++i)
1712 		CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1713 
1714 	/*
1715 	 * Set WOL source address.  XXX is this necessary?
1716 	 */
1717 	val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1718 	CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1719 	val = (eaddr[0] << 8) | eaddr[1];
1720 	CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1721 
1722 	/* Clear packet filters */
1723 	CSR_WRITE_4(sc, ET_PKTFILT, 0);
1724 
1725 	/* No ucast filtering */
1726 	CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1727 	CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1728 	CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1729 
1730 	if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1731 		/*
1732 		 * In order to transmit jumbo packets greater than
1733 		 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1734 		 * RX MAC and RX DMA needs to be reduced in size to
1735 		 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen).  In
1736 		 * order to implement this, we must use "cut through"
1737 		 * mode in the RX MAC, which chops packets down into
1738 		 * segments.  In this case we selected 256 bytes,
1739 		 * since this is the size of the PCI-Express TLP's
1740 		 * that the ET1310 uses.
1741 		 */
1742 		val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) |
1743 		      ET_RXMAC_MC_SEGSZ_ENABLE;
1744 	} else {
1745 		val = 0;
1746 	}
1747 	CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1748 
1749 	CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1750 
1751 	/* Initialize RX MAC management register */
1752 	CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1753 
1754 	CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1755 
1756 	CSR_WRITE_4(sc, ET_RXMAC_MGT,
1757 		    ET_RXMAC_MGT_PASS_ECRC |
1758 		    ET_RXMAC_MGT_PASS_ELEN |
1759 		    ET_RXMAC_MGT_PASS_ETRUNC |
1760 		    ET_RXMAC_MGT_CHECK_PKT);
1761 
1762 	/*
1763 	 * Configure runt filtering (may not work on certain chip generation)
1764 	 */
1765 	val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
1766 	CSR_WRITE_4(sc, ET_PKTFILT, val);
1767 
1768 	/* Enable RX MAC but leave WOL disabled */
1769 	CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1770 		    ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1771 
1772 	/*
1773 	 * Setup multicast hash and allmulti/promisc mode
1774 	 */
1775 	et_setmulti(sc);
1776 }
1777 
1778 static void
1779 et_init_txmac(struct et_softc *sc)
1780 {
1781 	/* Disable TX MAC and FC(?) */
1782 	CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1783 
1784 	/* No flow control yet */
1785 	CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1786 
1787 	/* Enable TX MAC but leave FC(?) diabled */
1788 	CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1789 		    ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1790 }
1791 
1792 static int
1793 et_start_rxdma(struct et_softc *sc)
1794 {
1795 	uint32_t val = 0;
1796 
1797 	val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
1798 			 ET_RXDMA_CTRL_RING0_SIZE) |
1799 	       ET_RXDMA_CTRL_RING0_ENABLE;
1800 	val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
1801 			 ET_RXDMA_CTRL_RING1_SIZE) |
1802 	       ET_RXDMA_CTRL_RING1_ENABLE;
1803 
1804 	CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1805 
1806 	DELAY(5);
1807 
1808 	if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1809 		if_printf(&sc->arpcom.ac_if, "can't start RX DMA engine\n");
1810 		return ETIMEDOUT;
1811 	}
1812 	return 0;
1813 }
1814 
1815 static int
1816 et_start_txdma(struct et_softc *sc)
1817 {
1818 	CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1819 	return 0;
1820 }
1821 
1822 static int
1823 et_enable_txrx(struct et_softc *sc, int media_upd)
1824 {
1825 	struct ifnet *ifp = &sc->arpcom.ac_if;
1826 	uint32_t val;
1827 	int i, error;
1828 
1829 	val = CSR_READ_4(sc, ET_MAC_CFG1);
1830 	val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1831 	val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1832 		 ET_MAC_CFG1_LOOPBACK);
1833 	CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1834 
1835 	if (media_upd)
1836 		et_ifmedia_upd(ifp);
1837 	else
1838 		et_setmedia(sc);
1839 
1840 #define NRETRY	100
1841 
1842 	for (i = 0; i < NRETRY; ++i) {
1843 		val = CSR_READ_4(sc, ET_MAC_CFG1);
1844 		if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1845 		    (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1846 			break;
1847 
1848 		DELAY(10);
1849 	}
1850 	if (i == NRETRY) {
1851 		if_printf(ifp, "can't enable RX/TX\n");
1852 		return 0;
1853 	}
1854 	sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
1855 
1856 #undef NRETRY
1857 
1858 	/*
1859 	 * Start TX/RX DMA engine
1860 	 */
1861 	error = et_start_rxdma(sc);
1862 	if (error)
1863 		return error;
1864 
1865 	error = et_start_txdma(sc);
1866 	if (error)
1867 		return error;
1868 
1869 	return 0;
1870 }
1871 
1872 static void
1873 et_rxeof(struct et_softc *sc)
1874 {
1875 	struct ifnet *ifp = &sc->arpcom.ac_if;
1876 	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1877 	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1878 	uint32_t rxs_stat_ring;
1879 	int rxst_wrap, rxst_index;
1880 
1881 	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1882 		return;
1883 
1884 	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1885 			BUS_DMASYNC_POSTREAD);
1886 	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1887 			BUS_DMASYNC_POSTREAD);
1888 
1889 	rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
1890 	rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1891 	rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
1892 
1893 	while (rxst_index != rxst_ring->rsr_index ||
1894 	       rxst_wrap != rxst_ring->rsr_wrap) {
1895 		struct et_rxbuf_data *rbd;
1896 		struct et_rxdesc_ring *rx_ring;
1897 		struct et_rxstat *st;
1898 		struct mbuf *m;
1899 		int buflen, buf_idx, ring_idx;
1900 		uint32_t rxstat_pos, rxring_pos;
1901 
1902 		KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT);
1903 		st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1904 
1905 		buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
1906 		buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
1907 		ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
1908 
1909 		if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1910 			rxst_ring->rsr_index = 0;
1911 			rxst_ring->rsr_wrap ^= 1;
1912 		}
1913 		rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
1914 				       ET_RXSTAT_POS_INDEX);
1915 		if (rxst_ring->rsr_wrap)
1916 			rxstat_pos |= ET_RXSTAT_POS_WRAP;
1917 		CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1918 
1919 		if (ring_idx >= ET_RX_NRING) {
1920 			ifp->if_ierrors++;
1921 			if_printf(ifp, "invalid ring index %d\n", ring_idx);
1922 			continue;
1923 		}
1924 		if (buf_idx >= ET_RX_NDESC) {
1925 			ifp->if_ierrors++;
1926 			if_printf(ifp, "invalid buf index %d\n", buf_idx);
1927 			continue;
1928 		}
1929 
1930 		rbd = &sc->sc_rx_data[ring_idx];
1931 		m = rbd->rbd_buf[buf_idx].rb_mbuf;
1932 
1933 		if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1934 			if (buflen < ETHER_CRC_LEN) {
1935 				m_freem(m);
1936 				ifp->if_ierrors++;
1937 			} else {
1938 				m->m_pkthdr.len = m->m_len = buflen;
1939 				m->m_pkthdr.rcvif = ifp;
1940 
1941 				m_adj(m, -ETHER_CRC_LEN);
1942 
1943 				ifp->if_ipackets++;
1944 				ifp->if_input(ifp, m);
1945 			}
1946 		} else {
1947 			ifp->if_ierrors++;
1948 		}
1949 		m = NULL;	/* Catch invalid reference */
1950 
1951 		rx_ring = &sc->sc_rx_ring[ring_idx];
1952 
1953 		if (buf_idx != rx_ring->rr_index) {
1954 			if_printf(ifp, "WARNING!! ring %d, "
1955 				  "buf_idx %d, rr_idx %d\n",
1956 				  ring_idx, buf_idx, rx_ring->rr_index);
1957 		}
1958 
1959 		KKASSERT(rx_ring->rr_index < ET_RX_NDESC);
1960 		if (++rx_ring->rr_index == ET_RX_NDESC) {
1961 			rx_ring->rr_index = 0;
1962 			rx_ring->rr_wrap ^= 1;
1963 		}
1964 		rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
1965 		if (rx_ring->rr_wrap)
1966 			rxring_pos |= ET_RX_RING_POS_WRAP;
1967 		CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
1968 	}
1969 }
1970 
1971 static int
1972 et_encap(struct et_softc *sc, struct mbuf **m0)
1973 {
1974 	struct mbuf *m = *m0;
1975 	bus_dma_segment_t segs[ET_NSEG_MAX];
1976 	struct et_dmamap_ctx ctx;
1977 	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1978 	struct et_txbuf_data *tbd = &sc->sc_tx_data;
1979 	struct et_txdesc *td;
1980 	bus_dmamap_t map;
1981 	int error, maxsegs, first_idx, last_idx, i;
1982 	uint32_t tx_ready_pos, last_td_ctrl2;
1983 
1984 	maxsegs = ET_TX_NDESC - tbd->tbd_used;
1985 	if (maxsegs > ET_NSEG_MAX)
1986 		maxsegs = ET_NSEG_MAX;
1987 	KASSERT(maxsegs >= ET_NSEG_SPARE,
1988 		("not enough spare TX desc (%d)\n", maxsegs));
1989 
1990 	KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1991 	first_idx = tx_ring->tr_ready_index;
1992 	map = tbd->tbd_buf[first_idx].tb_dmap;
1993 
1994 	ctx.nsegs = maxsegs;
1995 	ctx.segs = segs;
1996 	error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
1997 				     et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT);
1998 	if (!error && ctx.nsegs == 0) {
1999 		bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2000 		error = EFBIG;
2001 	}
2002 	if (error && error != EFBIG) {
2003 		if_printf(&sc->arpcom.ac_if, "can't load TX mbuf, error %d\n",
2004 			  error);
2005 		goto back;
2006 	}
2007 	if (error) {	/* error == EFBIG */
2008 		struct mbuf *m_new;
2009 
2010 		m_new = m_defrag(m, MB_DONTWAIT);
2011 		if (m_new == NULL) {
2012 			if_printf(&sc->arpcom.ac_if, "can't defrag TX mbuf\n");
2013 			error = ENOBUFS;
2014 			goto back;
2015 		} else {
2016 			*m0 = m = m_new;
2017 		}
2018 
2019 		ctx.nsegs = maxsegs;
2020 		ctx.segs = segs;
2021 		error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2022 					     et_dma_buf_addr, &ctx,
2023 					     BUS_DMA_NOWAIT);
2024 		if (error || ctx.nsegs == 0) {
2025 			if (ctx.nsegs == 0) {
2026 				bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2027 				error = EFBIG;
2028 			}
2029 			if_printf(&sc->arpcom.ac_if,
2030 				  "can't load defraged TX mbuf\n");
2031 			goto back;
2032 		}
2033 	}
2034 
2035 	bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE);
2036 
2037 	last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2038 	sc->sc_tx += ctx.nsegs;
2039 	if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2040 		sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2041 		last_td_ctrl2 |= ET_TDCTRL2_INTR;
2042 	}
2043 
2044 	last_idx = -1;
2045 	for (i = 0; i < ctx.nsegs; ++i) {
2046 		int idx;
2047 
2048 		idx = (first_idx + i) % ET_TX_NDESC;
2049 		td = &tx_ring->tr_desc[idx];
2050 		td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr);
2051 		td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr);
2052 		td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN);
2053 
2054 		if (i == ctx.nsegs - 1) {	/* Last frag */
2055 			td->td_ctrl2 = last_td_ctrl2;
2056 			last_idx = idx;
2057 		}
2058 
2059 		KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
2060 		if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2061 			tx_ring->tr_ready_index = 0;
2062 			tx_ring->tr_ready_wrap ^= 1;
2063 		}
2064 	}
2065 	td = &tx_ring->tr_desc[first_idx];
2066 	td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG;	/* First frag */
2067 
2068 	KKASSERT(last_idx >= 0);
2069 	tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2070 	tbd->tbd_buf[last_idx].tb_dmap = map;
2071 	tbd->tbd_buf[last_idx].tb_mbuf = m;
2072 
2073 	tbd->tbd_used += ctx.nsegs;
2074 	KKASSERT(tbd->tbd_used <= ET_TX_NDESC);
2075 
2076 	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2077 			BUS_DMASYNC_PREWRITE);
2078 
2079 	tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
2080 		       ET_TX_READY_POS_INDEX);
2081 	if (tx_ring->tr_ready_wrap)
2082 		tx_ready_pos |= ET_TX_READY_POS_WRAP;
2083 	CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
2084 
2085 	error = 0;
2086 back:
2087 	if (error) {
2088 		m_freem(m);
2089 		*m0 = NULL;
2090 	}
2091 	return error;
2092 }
2093 
2094 static void
2095 et_txeof(struct et_softc *sc)
2096 {
2097 	struct ifnet *ifp = &sc->arpcom.ac_if;
2098 	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
2099 	struct et_txbuf_data *tbd = &sc->sc_tx_data;
2100 	uint32_t tx_done;
2101 	int end, wrap;
2102 
2103 	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2104 		return;
2105 
2106 	if (tbd->tbd_used == 0)
2107 		return;
2108 
2109 	tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2110 	end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
2111 	wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2112 
2113 	while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2114 		struct et_txbuf *tb;
2115 
2116 		KKASSERT(tbd->tbd_start_index < ET_TX_NDESC);
2117 		tb = &tbd->tbd_buf[tbd->tbd_start_index];
2118 
2119 		bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
2120 		      sizeof(struct et_txdesc));
2121 		bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2122 				BUS_DMASYNC_PREWRITE);
2123 
2124 		if (tb->tb_mbuf != NULL) {
2125 			bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
2126 			m_freem(tb->tb_mbuf);
2127 			tb->tb_mbuf = NULL;
2128 		}
2129 
2130 		if (++tbd->tbd_start_index == ET_TX_NDESC) {
2131 			tbd->tbd_start_index = 0;
2132 			tbd->tbd_start_wrap ^= 1;
2133 		}
2134 
2135 		KKASSERT(tbd->tbd_used > 0);
2136 		tbd->tbd_used--;
2137 	}
2138 
2139 	if (tbd->tbd_used == 0)
2140 		ifp->if_timer = 0;
2141 	if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
2142 		ifp->if_flags &= ~IFF_OACTIVE;
2143 
2144 	ifp->if_start(ifp);
2145 }
2146 
2147 static void
2148 et_tick(void *xsc)
2149 {
2150 	struct et_softc *sc = xsc;
2151 	struct ifnet *ifp = &sc->arpcom.ac_if;
2152 	struct mii_data *mii = device_get_softc(sc->sc_miibus);
2153 
2154 	lwkt_serialize_enter(ifp->if_serializer);
2155 
2156 	mii_tick(mii);
2157 	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 &&
2158 	    (mii->mii_media_status & IFM_ACTIVE) &&
2159 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2160 		if_printf(ifp, "Link up, enable TX/RX\n");
2161 		if (et_enable_txrx(sc, 0) == 0)
2162 			ifp->if_start(ifp);
2163 	}
2164 	callout_reset(&sc->sc_tick, hz, et_tick, sc);
2165 
2166 	lwkt_serialize_exit(ifp->if_serializer);
2167 }
2168 
2169 static int
2170 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2171 {
2172 	return et_newbuf(rbd, buf_idx, init, MCLBYTES);
2173 }
2174 
2175 static int
2176 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2177 {
2178 	return et_newbuf(rbd, buf_idx, init, MHLEN);
2179 }
2180 
2181 static int
2182 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2183 {
2184 	struct et_softc *sc = rbd->rbd_softc;
2185 	struct et_rxbuf *rb;
2186 	struct mbuf *m;
2187 	struct et_dmamap_ctx ctx;
2188 	bus_dma_segment_t seg;
2189 	bus_dmamap_t dmap;
2190 	int error, len;
2191 
2192 	KASSERT(!rbd->rbd_jumbo, ("calling %s with jumbo ring\n", __func__));
2193 
2194 	KKASSERT(buf_idx < ET_RX_NDESC);
2195 	rb = &rbd->rbd_buf[buf_idx];
2196 
2197 	m = m_getl(len0, init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR, &len);
2198 	if (m == NULL) {
2199 		error = ENOBUFS;
2200 
2201 		if (init) {
2202 			if_printf(&sc->arpcom.ac_if,
2203 				  "m_getl failed, size %d\n", len0);
2204 			return error;
2205 		} else {
2206 			goto back;
2207 		}
2208 	}
2209 	m->m_len = m->m_pkthdr.len = len;
2210 
2211 	/*
2212 	 * Try load RX mbuf into temporary DMA tag
2213 	 */
2214 	ctx.nsegs = 1;
2215 	ctx.segs = &seg;
2216 	error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m,
2217 				     et_dma_buf_addr, &ctx,
2218 				     init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2219 	if (error || ctx.nsegs == 0) {
2220 		if (!error) {
2221 			bus_dmamap_unload(sc->sc_mbuf_dtag,
2222 					  sc->sc_mbuf_tmp_dmap);
2223 			error = EFBIG;
2224 			if_printf(&sc->arpcom.ac_if, "too many segments?!\n");
2225 		}
2226 		m_freem(m);
2227 
2228 		if (init) {
2229 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2230 			return error;
2231 		} else {
2232 			goto back;
2233 		}
2234 	}
2235 
2236 	if (!init) {
2237 		bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap,
2238 				BUS_DMASYNC_POSTREAD);
2239 		bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap);
2240 	}
2241 	rb->rb_mbuf = m;
2242 	rb->rb_paddr = seg.ds_addr;
2243 
2244 	/*
2245 	 * Swap RX buf's DMA map with the loaded temporary one
2246 	 */
2247 	dmap = rb->rb_dmap;
2248 	rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2249 	sc->sc_mbuf_tmp_dmap = dmap;
2250 
2251 	error = 0;
2252 back:
2253 	et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2254 	return error;
2255 }
2256 
2257 static int
2258 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2259 {
2260 	struct et_softc *sc = arg1;
2261 	struct ifnet *ifp = &sc->arpcom.ac_if;
2262 	int error = 0, v;
2263 
2264 	lwkt_serialize_enter(ifp->if_serializer);
2265 
2266 	v = sc->sc_rx_intr_npkts;
2267 	error = sysctl_handle_int(oidp, &v, 0, req);
2268 	if (error || req->newptr == NULL)
2269 		goto back;
2270 	if (v <= 0) {
2271 		error = EINVAL;
2272 		goto back;
2273 	}
2274 
2275 	if (sc->sc_rx_intr_npkts != v) {
2276 		if (ifp->if_flags & IFF_RUNNING)
2277 			CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2278 		sc->sc_rx_intr_npkts = v;
2279 	}
2280 back:
2281 	lwkt_serialize_exit(ifp->if_serializer);
2282 	return error;
2283 }
2284 
2285 static int
2286 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2287 {
2288 	struct et_softc *sc = arg1;
2289 	struct ifnet *ifp = &sc->arpcom.ac_if;
2290 	int error = 0, v;
2291 
2292 	lwkt_serialize_enter(ifp->if_serializer);
2293 
2294 	v = sc->sc_rx_intr_delay;
2295 	error = sysctl_handle_int(oidp, &v, 0, req);
2296 	if (error || req->newptr == NULL)
2297 		goto back;
2298 	if (v <= 0) {
2299 		error = EINVAL;
2300 		goto back;
2301 	}
2302 
2303 	if (sc->sc_rx_intr_delay != v) {
2304 		if (ifp->if_flags & IFF_RUNNING)
2305 			CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2306 		sc->sc_rx_intr_delay = v;
2307 	}
2308 back:
2309 	lwkt_serialize_exit(ifp->if_serializer);
2310 	return error;
2311 }
2312 
2313 static void
2314 et_setmedia(struct et_softc *sc)
2315 {
2316 	struct mii_data *mii = device_get_softc(sc->sc_miibus);
2317 	uint32_t cfg2, ctrl;
2318 
2319 	cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
2320 	cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
2321 		  ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
2322 	cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
2323 		__SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
2324 
2325 	ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
2326 	ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
2327 
2328 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
2329 		cfg2 |= ET_MAC_CFG2_MODE_GMII;
2330 	} else {
2331 		cfg2 |= ET_MAC_CFG2_MODE_MII;
2332 		ctrl |= ET_MAC_CTRL_MODE_MII;
2333 	}
2334 
2335 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
2336 		cfg2 |= ET_MAC_CFG2_FDX;
2337 	else
2338 		ctrl |= ET_MAC_CTRL_GHDX;
2339 
2340 	CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
2341 	CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
2342 }
2343 
2344 static int
2345 et_jumbo_mem_alloc(device_t dev)
2346 {
2347 	struct et_softc *sc = device_get_softc(dev);
2348 	struct et_jumbo_data *jd = &sc->sc_jumbo_data;
2349 	bus_addr_t paddr;
2350 	uint8_t *buf;
2351 	int error, i;
2352 
2353 	error = et_dma_mem_create(dev, ET_JUMBO_MEM_SIZE, &jd->jd_dtag,
2354 				  &jd->jd_buf, &paddr, &jd->jd_dmap);
2355 	if (error) {
2356 		device_printf(dev, "can't create jumbo DMA stuffs\n");
2357 		return error;
2358 	}
2359 
2360 	jd->jd_slots = kmalloc(sizeof(*jd->jd_slots) * ET_JSLOTS, M_DEVBUF,
2361 			       M_WAITOK | M_ZERO);
2362 	lwkt_serialize_init(&jd->jd_serializer);
2363 	SLIST_INIT(&jd->jd_free_slots);
2364 
2365 	buf = jd->jd_buf;
2366 	for (i = 0; i < ET_JSLOTS; ++i) {
2367 		struct et_jslot *jslot = &jd->jd_slots[i];
2368 
2369 		jslot->jslot_data = jd;
2370 		jslot->jslot_buf = buf;
2371 		jslot->jslot_paddr = paddr;
2372 		jslot->jslot_inuse = 0;
2373 		jslot->jslot_index = i;
2374 		SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, jslot_link);
2375 
2376 		buf += ET_JLEN;
2377 		paddr += ET_JLEN;
2378 	}
2379 	return 0;
2380 }
2381 
2382 static void
2383 et_jumbo_mem_free(device_t dev)
2384 {
2385 	struct et_softc *sc = device_get_softc(dev);
2386 	struct et_jumbo_data *jd = &sc->sc_jumbo_data;
2387 
2388 	KKASSERT(sc->sc_flags & ET_FLAG_JUMBO);
2389 
2390 	kfree(jd->jd_slots, M_DEVBUF);
2391 	et_dma_mem_destroy(jd->jd_dtag, jd->jd_buf, jd->jd_dmap);
2392 }
2393 
2394 static struct et_jslot *
2395 et_jalloc(struct et_jumbo_data *jd)
2396 {
2397 	struct et_jslot *jslot;
2398 
2399 	lwkt_serialize_enter(&jd->jd_serializer);
2400 
2401 	jslot = SLIST_FIRST(&jd->jd_free_slots);
2402 	if (jslot) {
2403 		SLIST_REMOVE_HEAD(&jd->jd_free_slots, jslot_link);
2404 		jslot->jslot_inuse = 1;
2405 	}
2406 
2407 	lwkt_serialize_exit(&jd->jd_serializer);
2408 	return jslot;
2409 }
2410 
2411 static void
2412 et_jfree(void *xjslot)
2413 {
2414 	struct et_jslot *jslot = xjslot;
2415 	struct et_jumbo_data *jd = jslot->jslot_data;
2416 
2417 	if (&jd->jd_slots[jslot->jslot_index] != jslot) {
2418 		panic("%s wrong jslot!?\n", __func__);
2419 	} else if (jslot->jslot_inuse == 0) {
2420 		panic("%s jslot already freed\n", __func__);
2421 	} else {
2422 		lwkt_serialize_enter(&jd->jd_serializer);
2423 
2424 		atomic_subtract_int(&jslot->jslot_inuse, 1);
2425 		if (jslot->jslot_inuse == 0) {
2426 			SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot,
2427 					  jslot_link);
2428 		}
2429 
2430 		lwkt_serialize_exit(&jd->jd_serializer);
2431 	}
2432 }
2433 
2434 static void
2435 et_jref(void *xjslot)
2436 {
2437 	struct et_jslot *jslot = xjslot;
2438 	struct et_jumbo_data *jd = jslot->jslot_data;
2439 
2440 	if (&jd->jd_slots[jslot->jslot_index] != jslot)
2441 		panic("%s wrong jslot!?\n", __func__);
2442 	else if (jslot->jslot_inuse == 0)
2443 		panic("%s jslot already freed\n", __func__);
2444 	else
2445 		atomic_add_int(&jslot->jslot_inuse, 1);
2446 }
2447 
2448 static int
2449 et_newbuf_jumbo(struct et_rxbuf_data *rbd, int buf_idx, int init)
2450 {
2451 	struct et_softc *sc = rbd->rbd_softc;
2452 	struct et_rxbuf *rb;
2453 	struct mbuf *m;
2454 	struct et_jslot *jslot;
2455 	int error;
2456 
2457 	KASSERT(rbd->rbd_jumbo, ("calling %s with non-jumbo ring\n", __func__));
2458 
2459 	KKASSERT(buf_idx < ET_RX_NDESC);
2460 	rb = &rbd->rbd_buf[buf_idx];
2461 
2462 	error = ENOBUFS;
2463 
2464 	MGETHDR(m, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2465 	if (m == NULL) {
2466 		if (init) {
2467 			if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n");
2468 			return error;
2469 		} else {
2470 			goto back;
2471 		}
2472 	}
2473 
2474 	jslot = et_jalloc(&sc->sc_jumbo_data);
2475 	if (jslot == NULL) {
2476 		m_freem(m);
2477 
2478 		if (init) {
2479 			if_printf(&sc->arpcom.ac_if,
2480 				  "jslot allocation failed\n");
2481 			return error;
2482 		} else {
2483 			goto back;
2484 		}
2485 	}
2486 
2487 	m->m_ext.ext_arg = jslot;
2488 	m->m_ext.ext_buf = jslot->jslot_buf;
2489 	m->m_ext.ext_free = et_jfree;
2490 	m->m_ext.ext_ref = et_jref;
2491 	m->m_ext.ext_size = ET_JUMBO_FRAMELEN;
2492 	m->m_flags |= M_EXT;
2493 	m->m_data = m->m_ext.ext_buf;
2494 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2495 
2496 	rb->rb_mbuf = m;
2497 	rb->rb_paddr = jslot->jslot_paddr;
2498 
2499 	error = 0;
2500 back:
2501 	et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2502 	return error;
2503 }
2504 
2505 static void
2506 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr)
2507 {
2508 	struct et_rxdesc_ring *rx_ring = rbd->rbd_ring;
2509 	struct et_rxdesc *desc;
2510 
2511 	KKASSERT(buf_idx < ET_RX_NDESC);
2512 	desc = &rx_ring->rr_desc[buf_idx];
2513 
2514 	desc->rd_addr_hi = ET_ADDR_HI(paddr);
2515 	desc->rd_addr_lo = ET_ADDR_LO(paddr);
2516 	desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);
2517 
2518 	bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
2519 			BUS_DMASYNC_PREWRITE);
2520 }
2521