xref: /dragonfly/sys/dev/netif/jme/if_jme.c (revision ad7a2457)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $
29  */
30 
31 #include "opt_polling.h"
32 
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/bpf.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/ifq_var.h>
53 #include <net/vlan/if_vlan_var.h>
54 #include <net/vlan/if_vlan_ether.h>
55 
56 #include <dev/netif/mii_layer/miivar.h>
57 #include <dev/netif/mii_layer/jmphyreg.h>
58 
59 #include <bus/pci/pcireg.h>
60 #include <bus/pci/pcivar.h>
61 #include <bus/pci/pcidevs.h>
62 
63 #include <dev/netif/jme/if_jmereg.h>
64 #include <dev/netif/jme/if_jmevar.h>
65 
66 #include "miibus_if.h"
67 
68 /* Define the following to disable printing Rx errors. */
69 #undef	JME_SHOW_ERRORS
70 
71 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
72 
73 #define JME_RSS_DEBUG
74 
75 #ifdef JME_RSS_DEBUG
76 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
77 do { \
78 	if ((sc)->jme_rss_debug > (lvl)) \
79 		if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
80 } while (0)
81 #else	/* !JME_RSS_DEBUG */
82 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
83 #endif	/* JME_RSS_DEBUG */
84 
85 static int	jme_probe(device_t);
86 static int	jme_attach(device_t);
87 static int	jme_detach(device_t);
88 static int	jme_shutdown(device_t);
89 static int	jme_suspend(device_t);
90 static int	jme_resume(device_t);
91 
92 static int	jme_miibus_readreg(device_t, int, int);
93 static int	jme_miibus_writereg(device_t, int, int, int);
94 static void	jme_miibus_statchg(device_t);
95 
96 static void	jme_init(void *);
97 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
98 static void	jme_start(struct ifnet *);
99 static void	jme_watchdog(struct ifnet *);
100 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
101 static int	jme_mediachange(struct ifnet *);
102 #ifdef DEVICE_POLLING
103 static void	jme_poll(struct ifnet *, enum poll_cmd, int);
104 #endif
105 
106 static void	jme_intr(void *);
107 static void	jme_txeof(struct jme_softc *);
108 static void	jme_rxeof(struct jme_softc *, int);
109 static int	jme_rxeof_chain(struct jme_softc *, int,
110 				struct mbuf_chain *, int);
111 static void	jme_rx_intr(struct jme_softc *, uint32_t);
112 
113 static int	jme_dma_alloc(struct jme_softc *);
114 static void	jme_dma_free(struct jme_softc *, int);
115 static void	jme_dmamap_ring_cb(void *, bus_dma_segment_t *, int, int);
116 static void	jme_dmamap_buf_cb(void *, bus_dma_segment_t *, int,
117 				  bus_size_t, int);
118 static int	jme_init_rx_ring(struct jme_softc *, int);
119 static void	jme_init_tx_ring(struct jme_softc *);
120 static void	jme_init_ssb(struct jme_softc *);
121 static int	jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
122 static int	jme_encap(struct jme_softc *, struct mbuf **);
123 static void	jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
124 static int	jme_rxring_dma_alloc(struct jme_softc *, bus_addr_t, int);
125 static int	jme_rxbuf_dma_alloc(struct jme_softc *, int);
126 
127 static void	jme_tick(void *);
128 static void	jme_stop(struct jme_softc *);
129 static void	jme_reset(struct jme_softc *);
130 static void	jme_set_vlan(struct jme_softc *);
131 static void	jme_set_filter(struct jme_softc *);
132 static void	jme_stop_tx(struct jme_softc *);
133 static void	jme_stop_rx(struct jme_softc *);
134 static void	jme_mac_config(struct jme_softc *);
135 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
136 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
137 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
138 #ifdef notyet
139 static void	jme_setwol(struct jme_softc *);
140 static void	jme_setlinkspeed(struct jme_softc *);
141 #endif
142 static void	jme_set_tx_coal(struct jme_softc *);
143 static void	jme_set_rx_coal(struct jme_softc *);
144 static void	jme_enable_rss(struct jme_softc *);
145 static void	jme_disable_rss(struct jme_softc *);
146 
147 static void	jme_sysctl_node(struct jme_softc *);
148 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
149 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
150 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
151 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
152 
153 /*
154  * Devices supported by this driver.
155  */
156 static const struct jme_dev {
157 	uint16_t	jme_vendorid;
158 	uint16_t	jme_deviceid;
159 	uint32_t	jme_caps;
160 	const char	*jme_name;
161 } jme_devs[] = {
162 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
163 	    JME_CAP_JUMBO,
164 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
165 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
166 	    JME_CAP_FASTETH,
167 	    "JMicron Inc, JMC260 Fast Ethernet" },
168 	{ 0, 0, 0, NULL }
169 };
170 
171 static device_method_t jme_methods[] = {
172 	/* Device interface. */
173 	DEVMETHOD(device_probe,		jme_probe),
174 	DEVMETHOD(device_attach,	jme_attach),
175 	DEVMETHOD(device_detach,	jme_detach),
176 	DEVMETHOD(device_shutdown,	jme_shutdown),
177 	DEVMETHOD(device_suspend,	jme_suspend),
178 	DEVMETHOD(device_resume,	jme_resume),
179 
180 	/* Bus interface. */
181 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
182 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
183 
184 	/* MII interface. */
185 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
186 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
187 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
188 
189 	{ NULL, NULL }
190 };
191 
192 static driver_t jme_driver = {
193 	"jme",
194 	jme_methods,
195 	sizeof(struct jme_softc)
196 };
197 
198 static devclass_t jme_devclass;
199 
200 DECLARE_DUMMY_MODULE(if_jme);
201 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
202 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
203 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
204 
205 static const struct {
206 	uint32_t	jme_coal;
207 	uint32_t	jme_comp;
208 } jme_rx_status[JME_NRXRING_MAX] = {
209 	{ INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
210 	{ INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
211 	{ INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
212 	{ INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
213 };
214 
215 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
216 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
217 static int	jme_rx_ring_count = JME_NRXRING_DEF;
218 
219 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
220 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
221 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
222 
223 /*
224  *	Read a PHY register on the MII of the JMC250.
225  */
226 static int
227 jme_miibus_readreg(device_t dev, int phy, int reg)
228 {
229 	struct jme_softc *sc = device_get_softc(dev);
230 	uint32_t val;
231 	int i;
232 
233 	/* For FPGA version, PHY address 0 should be ignored. */
234 	if (sc->jme_caps & JME_CAP_FPGA) {
235 		if (phy == 0)
236 			return (0);
237 	} else {
238 		if (sc->jme_phyaddr != phy)
239 			return (0);
240 	}
241 
242 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
243 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
244 
245 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
246 		DELAY(1);
247 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
248 			break;
249 	}
250 	if (i == 0) {
251 		device_printf(sc->jme_dev, "phy read timeout: "
252 			      "phy %d, reg %d\n", phy, reg);
253 		return (0);
254 	}
255 
256 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
257 }
258 
259 /*
260  *	Write a PHY register on the MII of the JMC250.
261  */
262 static int
263 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
264 {
265 	struct jme_softc *sc = device_get_softc(dev);
266 	int i;
267 
268 	/* For FPGA version, PHY address 0 should be ignored. */
269 	if (sc->jme_caps & JME_CAP_FPGA) {
270 		if (phy == 0)
271 			return (0);
272 	} else {
273 		if (sc->jme_phyaddr != phy)
274 			return (0);
275 	}
276 
277 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
278 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
279 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
280 
281 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
282 		DELAY(1);
283 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
284 			break;
285 	}
286 	if (i == 0) {
287 		device_printf(sc->jme_dev, "phy write timeout: "
288 			      "phy %d, reg %d\n", phy, reg);
289 	}
290 
291 	return (0);
292 }
293 
294 /*
295  *	Callback from MII layer when media changes.
296  */
297 static void
298 jme_miibus_statchg(device_t dev)
299 {
300 	struct jme_softc *sc = device_get_softc(dev);
301 	struct ifnet *ifp = &sc->arpcom.ac_if;
302 	struct mii_data *mii;
303 	struct jme_txdesc *txd;
304 	bus_addr_t paddr;
305 	int i, r;
306 
307 	ASSERT_SERIALIZED(ifp->if_serializer);
308 
309 	if ((ifp->if_flags & IFF_RUNNING) == 0)
310 		return;
311 
312 	mii = device_get_softc(sc->jme_miibus);
313 
314 	sc->jme_flags &= ~JME_FLAG_LINK;
315 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
316 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
317 		case IFM_10_T:
318 		case IFM_100_TX:
319 			sc->jme_flags |= JME_FLAG_LINK;
320 			break;
321 		case IFM_1000_T:
322 			if (sc->jme_caps & JME_CAP_FASTETH)
323 				break;
324 			sc->jme_flags |= JME_FLAG_LINK;
325 			break;
326 		default:
327 			break;
328 		}
329 	}
330 
331 	/*
332 	 * Disabling Rx/Tx MACs have a side-effect of resetting
333 	 * JME_TXNDA/JME_RXNDA register to the first address of
334 	 * Tx/Rx descriptor address. So driver should reset its
335 	 * internal procucer/consumer pointer and reclaim any
336 	 * allocated resources.  Note, just saving the value of
337 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
338 	 * and restoring JME_TXNDA/JME_RXNDA register is not
339 	 * sufficient to make sure correct MAC state because
340 	 * stopping MAC operation can take a while and hardware
341 	 * might have updated JME_TXNDA/JME_RXNDA registers
342 	 * during the stop operation.
343 	 */
344 
345 	/* Disable interrupts */
346 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
347 
348 	/* Stop driver */
349 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
350 	ifp->if_timer = 0;
351 	callout_stop(&sc->jme_tick_ch);
352 
353 	/* Stop receiver/transmitter. */
354 	jme_stop_rx(sc);
355 	jme_stop_tx(sc);
356 
357 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
358 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
359 
360 		jme_rxeof(sc, r);
361 		if (rdata->jme_rxhead != NULL)
362 			m_freem(rdata->jme_rxhead);
363 		JME_RXCHAIN_RESET(sc, r);
364 
365 		/*
366 		 * Reuse configured Rx descriptors and reset
367 		 * procuder/consumer index.
368 		 */
369 		rdata->jme_rx_cons = 0;
370 	}
371 
372 	jme_txeof(sc);
373 	if (sc->jme_cdata.jme_tx_cnt != 0) {
374 		/* Remove queued packets for transmit. */
375 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
376 			txd = &sc->jme_cdata.jme_txdesc[i];
377 			if (txd->tx_m != NULL) {
378 				bus_dmamap_unload(
379 				    sc->jme_cdata.jme_tx_tag,
380 				    txd->tx_dmamap);
381 				m_freem(txd->tx_m);
382 				txd->tx_m = NULL;
383 				txd->tx_ndesc = 0;
384 				ifp->if_oerrors++;
385 			}
386 		}
387 	}
388 	jme_init_tx_ring(sc);
389 
390 	/* Initialize shadow status block. */
391 	jme_init_ssb(sc);
392 
393 	/* Program MAC with resolved speed/duplex/flow-control. */
394 	if (sc->jme_flags & JME_FLAG_LINK) {
395 		jme_mac_config(sc);
396 
397 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
398 
399 		/* Set Tx ring address to the hardware. */
400 		paddr = sc->jme_cdata.jme_tx_ring_paddr;
401 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
402 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
403 
404 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
405 			CSR_WRITE_4(sc, JME_RXCSR,
406 			    sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
407 
408 			/* Set Rx ring address to the hardware. */
409 			paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
410 			CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
411 			CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
412 		}
413 
414 		/* Restart receiver/transmitter. */
415 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
416 		    RXCSR_RXQ_START);
417 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
418 	}
419 
420 	ifp->if_flags |= IFF_RUNNING;
421 	ifp->if_flags &= ~IFF_OACTIVE;
422 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
423 
424 #ifdef DEVICE_POLLING
425 	if (!(ifp->if_flags & IFF_POLLING))
426 #endif
427 	/* Reenable interrupts. */
428 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
429 }
430 
431 /*
432  *	Get the current interface media status.
433  */
434 static void
435 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
436 {
437 	struct jme_softc *sc = ifp->if_softc;
438 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
439 
440 	ASSERT_SERIALIZED(ifp->if_serializer);
441 
442 	mii_pollstat(mii);
443 	ifmr->ifm_status = mii->mii_media_status;
444 	ifmr->ifm_active = mii->mii_media_active;
445 }
446 
447 /*
448  *	Set hardware to newly-selected media.
449  */
450 static int
451 jme_mediachange(struct ifnet *ifp)
452 {
453 	struct jme_softc *sc = ifp->if_softc;
454 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
455 	int error;
456 
457 	ASSERT_SERIALIZED(ifp->if_serializer);
458 
459 	if (mii->mii_instance != 0) {
460 		struct mii_softc *miisc;
461 
462 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
463 			mii_phy_reset(miisc);
464 	}
465 	error = mii_mediachg(mii);
466 
467 	return (error);
468 }
469 
470 static int
471 jme_probe(device_t dev)
472 {
473 	const struct jme_dev *sp;
474 	uint16_t vid, did;
475 
476 	vid = pci_get_vendor(dev);
477 	did = pci_get_device(dev);
478 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
479 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
480 			struct jme_softc *sc = device_get_softc(dev);
481 
482 			sc->jme_caps = sp->jme_caps;
483 			device_set_desc(dev, sp->jme_name);
484 			return (0);
485 		}
486 	}
487 	return (ENXIO);
488 }
489 
490 static int
491 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
492 {
493 	uint32_t reg;
494 	int i;
495 
496 	*val = 0;
497 	for (i = JME_TIMEOUT; i > 0; i--) {
498 		reg = CSR_READ_4(sc, JME_SMBCSR);
499 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
500 			break;
501 		DELAY(1);
502 	}
503 
504 	if (i == 0) {
505 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
506 		return (ETIMEDOUT);
507 	}
508 
509 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
510 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
511 	for (i = JME_TIMEOUT; i > 0; i--) {
512 		DELAY(1);
513 		reg = CSR_READ_4(sc, JME_SMBINTF);
514 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
515 			break;
516 	}
517 
518 	if (i == 0) {
519 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
520 		return (ETIMEDOUT);
521 	}
522 
523 	reg = CSR_READ_4(sc, JME_SMBINTF);
524 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
525 
526 	return (0);
527 }
528 
529 static int
530 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
531 {
532 	uint8_t fup, reg, val;
533 	uint32_t offset;
534 	int match;
535 
536 	offset = 0;
537 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
538 	    fup != JME_EEPROM_SIG0)
539 		return (ENOENT);
540 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
541 	    fup != JME_EEPROM_SIG1)
542 		return (ENOENT);
543 	match = 0;
544 	do {
545 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
546 			break;
547 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
548 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
549 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
550 				break;
551 			if (reg >= JME_PAR0 &&
552 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
553 				if (jme_eeprom_read_byte(sc, offset + 2,
554 				    &val) != 0)
555 					break;
556 				eaddr[reg - JME_PAR0] = val;
557 				match++;
558 			}
559 		}
560 		/* Check for the end of EEPROM descriptor. */
561 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
562 			break;
563 		/* Try next eeprom descriptor. */
564 		offset += JME_EEPROM_DESC_BYTES;
565 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
566 
567 	if (match == ETHER_ADDR_LEN)
568 		return (0);
569 
570 	return (ENOENT);
571 }
572 
573 static void
574 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
575 {
576 	uint32_t par0, par1;
577 
578 	/* Read station address. */
579 	par0 = CSR_READ_4(sc, JME_PAR0);
580 	par1 = CSR_READ_4(sc, JME_PAR1);
581 	par1 &= 0xFFFF;
582 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
583 		device_printf(sc->jme_dev,
584 		    "generating fake ethernet address.\n");
585 		par0 = karc4random();
586 		/* Set OUI to JMicron. */
587 		eaddr[0] = 0x00;
588 		eaddr[1] = 0x1B;
589 		eaddr[2] = 0x8C;
590 		eaddr[3] = (par0 >> 16) & 0xff;
591 		eaddr[4] = (par0 >> 8) & 0xff;
592 		eaddr[5] = par0 & 0xff;
593 	} else {
594 		eaddr[0] = (par0 >> 0) & 0xFF;
595 		eaddr[1] = (par0 >> 8) & 0xFF;
596 		eaddr[2] = (par0 >> 16) & 0xFF;
597 		eaddr[3] = (par0 >> 24) & 0xFF;
598 		eaddr[4] = (par1 >> 0) & 0xFF;
599 		eaddr[5] = (par1 >> 8) & 0xFF;
600 	}
601 }
602 
603 static int
604 jme_attach(device_t dev)
605 {
606 	struct jme_softc *sc = device_get_softc(dev);
607 	struct ifnet *ifp = &sc->arpcom.ac_if;
608 	uint32_t reg;
609 	uint16_t did;
610 	uint8_t pcie_ptr, rev;
611 	int error = 0;
612 	uint8_t eaddr[ETHER_ADDR_LEN];
613 
614 	sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
615 	if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
616 		sc->jme_rx_desc_cnt = JME_NDESC_MAX;
617 
618 	sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
619 	if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
620 		sc->jme_tx_desc_cnt = JME_NDESC_MAX;
621 
622 	sc->jme_rx_ring_cnt = jme_rx_ring_count;
623 	if (sc->jme_rx_ring_cnt <= 0)
624 		sc->jme_rx_ring_cnt = JME_NRXRING_1;
625 	if (sc->jme_rx_ring_cnt > ncpus2)
626 		sc->jme_rx_ring_cnt = ncpus2;
627 
628 	if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
629 		sc->jme_rx_ring_cnt = JME_NRXRING_4;
630 	else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
631 		sc->jme_rx_ring_cnt = JME_NRXRING_2;
632 
633 	if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN) {
634 		sc->jme_caps |= JME_CAP_RSS;
635 		sc->jme_flags |= JME_FLAG_RSS;
636 	}
637 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
638 
639 	sc->jme_dev = dev;
640 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
641 
642 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
643 
644 	callout_init(&sc->jme_tick_ch);
645 
646 #ifndef BURN_BRIDGES
647 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
648 		uint32_t irq, mem;
649 
650 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
651 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
652 
653 		device_printf(dev, "chip is in D%d power mode "
654 		    "-- setting to D0\n", pci_get_powerstate(dev));
655 
656 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
657 
658 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
659 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
660 	}
661 #endif	/* !BURN_BRIDGE */
662 
663 	/* Enable bus mastering */
664 	pci_enable_busmaster(dev);
665 
666 	/*
667 	 * Allocate IO memory
668 	 *
669 	 * JMC250 supports both memory mapped and I/O register space
670 	 * access.  Because I/O register access should use different
671 	 * BARs to access registers it's waste of time to use I/O
672 	 * register spce access.  JMC250 uses 16K to map entire memory
673 	 * space.
674 	 */
675 	sc->jme_mem_rid = JME_PCIR_BAR;
676 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
677 						 &sc->jme_mem_rid, RF_ACTIVE);
678 	if (sc->jme_mem_res == NULL) {
679 		device_printf(dev, "can't allocate IO memory\n");
680 		return ENXIO;
681 	}
682 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
683 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
684 
685 	/*
686 	 * Allocate IRQ
687 	 */
688 	sc->jme_irq_rid = 0;
689 	sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
690 						 &sc->jme_irq_rid,
691 						 RF_SHAREABLE | RF_ACTIVE);
692 	if (sc->jme_irq_res == NULL) {
693 		device_printf(dev, "can't allocate irq\n");
694 		error = ENXIO;
695 		goto fail;
696 	}
697 
698 	/*
699 	 * Extract revisions
700 	 */
701 	reg = CSR_READ_4(sc, JME_CHIPMODE);
702 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
703 	    CHIPMODE_NOT_FPGA) {
704 		sc->jme_caps |= JME_CAP_FPGA;
705 		if (bootverbose) {
706 			device_printf(dev, "FPGA revision: 0x%04x\n",
707 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
708 				      CHIPMODE_FPGA_REV_SHIFT);
709 		}
710 	}
711 
712 	/* NOTE: FM revision is put in the upper 4 bits */
713 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
714 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
715 	if (bootverbose)
716 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
717 
718 	did = pci_get_device(dev);
719 	switch (did) {
720 	case PCI_PRODUCT_JMICRON_JMC250:
721 		if (rev == JME_REV1_A2)
722 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
723 		break;
724 
725 	case PCI_PRODUCT_JMICRON_JMC260:
726 		if (rev == JME_REV2)
727 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
728 		break;
729 
730 	default:
731 		panic("unknown device id 0x%04x\n", did);
732 	}
733 	if (rev >= JME_REV2) {
734 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
735 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
736 				      GHC_TXMAC_CLKSRC_1000;
737 	}
738 
739 	/* Reset the ethernet controller. */
740 	jme_reset(sc);
741 
742 	/* Get station address. */
743 	reg = CSR_READ_4(sc, JME_SMBCSR);
744 	if (reg & SMBCSR_EEPROM_PRESENT)
745 		error = jme_eeprom_macaddr(sc, eaddr);
746 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
747 		if (error != 0 && (bootverbose)) {
748 			device_printf(dev, "ethernet hardware address "
749 				      "not found in EEPROM.\n");
750 		}
751 		jme_reg_macaddr(sc, eaddr);
752 	}
753 
754 	/*
755 	 * Save PHY address.
756 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
757 	 * requires PHY probing to get correct PHY address.
758 	 */
759 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
760 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
761 		    GPREG0_PHY_ADDR_MASK;
762 		if (bootverbose) {
763 			device_printf(dev, "PHY is at address %d.\n",
764 			    sc->jme_phyaddr);
765 		}
766 	} else {
767 		sc->jme_phyaddr = 0;
768 	}
769 
770 	/* Set max allowable DMA size. */
771 	pcie_ptr = pci_get_pciecap_ptr(dev);
772 	if (pcie_ptr != 0) {
773 		uint16_t ctrl;
774 
775 		sc->jme_caps |= JME_CAP_PCIE;
776 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
777 		if (bootverbose) {
778 			device_printf(dev, "Read request size : %d bytes.\n",
779 			    128 << ((ctrl >> 12) & 0x07));
780 			device_printf(dev, "TLP payload size : %d bytes.\n",
781 			    128 << ((ctrl >> 5) & 0x07));
782 		}
783 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
784 		case PCIEM_DEVCTL_MAX_READRQ_128:
785 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
786 			break;
787 		case PCIEM_DEVCTL_MAX_READRQ_256:
788 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
789 			break;
790 		default:
791 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
792 			break;
793 		}
794 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
795 	} else {
796 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
797 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
798 	}
799 
800 #ifdef notyet
801 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
802 		sc->jme_caps |= JME_CAP_PMCAP;
803 #endif
804 
805 	/*
806 	 * Create sysctl tree
807 	 */
808 	jme_sysctl_node(sc);
809 
810 	/* Allocate DMA stuffs */
811 	error = jme_dma_alloc(sc);
812 	if (error)
813 		goto fail;
814 
815 	ifp->if_softc = sc;
816 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
817 	ifp->if_init = jme_init;
818 	ifp->if_ioctl = jme_ioctl;
819 	ifp->if_start = jme_start;
820 #ifdef DEVICE_POLLING
821 	ifp->if_poll = jme_poll;
822 #endif
823 	ifp->if_watchdog = jme_watchdog;
824 	ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
825 	ifq_set_ready(&ifp->if_snd);
826 
827 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
828 	ifp->if_capabilities = IFCAP_HWCSUM |
829 			       IFCAP_VLAN_MTU |
830 			       IFCAP_VLAN_HWTAGGING;
831 	ifp->if_hwassist = JME_CSUM_FEATURES;
832 	ifp->if_capenable = ifp->if_capabilities;
833 
834 	/* Set up MII bus. */
835 	error = mii_phy_probe(dev, &sc->jme_miibus,
836 			      jme_mediachange, jme_mediastatus);
837 	if (error) {
838 		device_printf(dev, "no PHY found!\n");
839 		goto fail;
840 	}
841 
842 	/*
843 	 * Save PHYADDR for FPGA mode PHY.
844 	 */
845 	if (sc->jme_caps & JME_CAP_FPGA) {
846 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
847 
848 		if (mii->mii_instance != 0) {
849 			struct mii_softc *miisc;
850 
851 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
852 				if (miisc->mii_phy != 0) {
853 					sc->jme_phyaddr = miisc->mii_phy;
854 					break;
855 				}
856 			}
857 			if (sc->jme_phyaddr != 0) {
858 				device_printf(sc->jme_dev,
859 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
860 				/* vendor magic. */
861 				jme_miibus_writereg(dev, sc->jme_phyaddr,
862 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
863 
864 				/* XXX should we clear JME_WA_EXTFIFO */
865 			}
866 		}
867 	}
868 
869 	ether_ifattach(ifp, eaddr, NULL);
870 
871 	/* Tell the upper layer(s) we support long frames. */
872 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
873 
874 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
875 			       &sc->jme_irq_handle, ifp->if_serializer);
876 	if (error) {
877 		device_printf(dev, "could not set up interrupt handler.\n");
878 		ether_ifdetach(ifp);
879 		goto fail;
880 	}
881 
882 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
883 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
884 	return 0;
885 fail:
886 	jme_detach(dev);
887 	return (error);
888 }
889 
890 static int
891 jme_detach(device_t dev)
892 {
893 	struct jme_softc *sc = device_get_softc(dev);
894 
895 	if (device_is_attached(dev)) {
896 		struct ifnet *ifp = &sc->arpcom.ac_if;
897 
898 		lwkt_serialize_enter(ifp->if_serializer);
899 		jme_stop(sc);
900 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
901 		lwkt_serialize_exit(ifp->if_serializer);
902 
903 		ether_ifdetach(ifp);
904 	}
905 
906 	if (sc->jme_sysctl_tree != NULL)
907 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
908 
909 	if (sc->jme_miibus != NULL)
910 		device_delete_child(dev, sc->jme_miibus);
911 	bus_generic_detach(dev);
912 
913 	if (sc->jme_irq_res != NULL) {
914 		bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
915 				     sc->jme_irq_res);
916 	}
917 
918 	if (sc->jme_mem_res != NULL) {
919 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
920 				     sc->jme_mem_res);
921 	}
922 
923 	jme_dma_free(sc, 1);
924 
925 	return (0);
926 }
927 
928 static void
929 jme_sysctl_node(struct jme_softc *sc)
930 {
931 	int coal_max;
932 #ifdef JME_RSS_DEBUG
933 	char rx_ring_pkt[32];
934 	int r;
935 #endif
936 
937 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
938 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
939 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
940 				device_get_nameunit(sc->jme_dev),
941 				CTLFLAG_RD, 0, "");
942 	if (sc->jme_sysctl_tree == NULL) {
943 		device_printf(sc->jme_dev, "can't add sysctl node\n");
944 		return;
945 	}
946 
947 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
948 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
949 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
950 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
951 
952 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
953 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
954 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
955 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
956 
957 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
958 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
959 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
960 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
961 
962 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
963 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
964 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
965 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
966 
967 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
968 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
969 		       "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
970 		       0, "RX desc count");
971 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
972 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
973 		       "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
974 		       0, "TX desc count");
975 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
976 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
977 		       "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
978 		       0, "RX ring count");
979 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
980 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
981 		       "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
982 		       0, "RX ring in use");
983 #ifdef JME_RSS_DEBUG
984 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
985 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
986 		       "rss_debug", CTLFLAG_RD, &sc->jme_rss_debug,
987 		       0, "RSS debug level");
988 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
989 		ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
990 		SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
991 				SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
992 				rx_ring_pkt, CTLFLAG_RD,
993 				&sc->jme_rx_ring_pkt[r],
994 				0, "RXed packets");
995 	}
996 #endif
997 
998 	/*
999 	 * Set default coalesce valves
1000 	 */
1001 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1002 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1003 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1004 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1005 
1006 	/*
1007 	 * Adjust coalesce valves, in case that the number of TX/RX
1008 	 * descs are set to small values by users.
1009 	 *
1010 	 * NOTE: coal_max will not be zero, since number of descs
1011 	 * must aligned by JME_NDESC_ALIGN (16 currently)
1012 	 */
1013 	coal_max = sc->jme_tx_desc_cnt / 6;
1014 	if (coal_max < sc->jme_tx_coal_pkt)
1015 		sc->jme_tx_coal_pkt = coal_max;
1016 
1017 	coal_max = sc->jme_rx_desc_cnt / 4;
1018 	if (coal_max < sc->jme_rx_coal_pkt)
1019 		sc->jme_rx_coal_pkt = coal_max;
1020 }
1021 
1022 static void
1023 jme_dmamap_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1024 {
1025 	if (error)
1026 		return;
1027 
1028 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1029 	*((bus_addr_t *)arg) = segs->ds_addr;
1030 }
1031 
1032 static void
1033 jme_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs,
1034 		  bus_size_t mapsz __unused, int error)
1035 {
1036 	struct jme_dmamap_ctx *ctx = xctx;
1037 	int i;
1038 
1039 	if (error)
1040 		return;
1041 
1042 	if (nsegs > ctx->nsegs) {
1043 		ctx->nsegs = 0;
1044 		return;
1045 	}
1046 
1047 	ctx->nsegs = nsegs;
1048 	for (i = 0; i < nsegs; ++i)
1049 		ctx->segs[i] = segs[i];
1050 }
1051 
1052 static int
1053 jme_dma_alloc(struct jme_softc *sc)
1054 {
1055 	struct jme_txdesc *txd;
1056 	bus_addr_t busaddr, lowaddr;
1057 	int error, i;
1058 
1059 	sc->jme_cdata.jme_txdesc =
1060 	kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1061 		M_DEVBUF, M_WAITOK | M_ZERO);
1062 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1063 		sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1064 		kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1065 			M_DEVBUF, M_WAITOK | M_ZERO);
1066 	}
1067 
1068 	lowaddr = sc->jme_lowaddr;
1069 again:
1070 	/* Create parent ring tag. */
1071 	error = bus_dma_tag_create(NULL,/* parent */
1072 	    1, 0,			/* algnmnt, boundary */
1073 	    lowaddr,			/* lowaddr */
1074 	    BUS_SPACE_MAXADDR,		/* highaddr */
1075 	    NULL, NULL,			/* filter, filterarg */
1076 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1077 	    0,				/* nsegments */
1078 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1079 	    0,				/* flags */
1080 	    &sc->jme_cdata.jme_ring_tag);
1081 	if (error) {
1082 		device_printf(sc->jme_dev,
1083 		    "could not create parent ring DMA tag.\n");
1084 		return error;
1085 	}
1086 
1087 	/*
1088 	 * Create DMA stuffs for TX ring
1089 	 */
1090 
1091 	/* Create tag for Tx ring. */
1092 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1093 	    JME_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
1094 	    lowaddr,			/* lowaddr */
1095 	    BUS_SPACE_MAXADDR,		/* highaddr */
1096 	    NULL, NULL,			/* filter, filterarg */
1097 	    JME_TX_RING_SIZE(sc),	/* maxsize */
1098 	    1,				/* nsegments */
1099 	    JME_TX_RING_SIZE(sc),	/* maxsegsize */
1100 	    0,				/* flags */
1101 	    &sc->jme_cdata.jme_tx_ring_tag);
1102 	if (error) {
1103 		device_printf(sc->jme_dev,
1104 		    "could not allocate Tx ring DMA tag.\n");
1105 		return error;
1106 	}
1107 
1108 	/* Allocate DMA'able memory for TX ring */
1109 	error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1110 	    (void **)&sc->jme_cdata.jme_tx_ring,
1111 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
1112 	    &sc->jme_cdata.jme_tx_ring_map);
1113 	if (error) {
1114 		device_printf(sc->jme_dev,
1115 		    "could not allocate DMA'able memory for Tx ring.\n");
1116 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1117 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1118 		return error;
1119 	}
1120 
1121 	/*  Load the DMA map for Tx ring. */
1122 	error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1123 	    sc->jme_cdata.jme_tx_ring_map, sc->jme_cdata.jme_tx_ring,
1124 	    JME_TX_RING_SIZE(sc), jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1125 	if (error) {
1126 		device_printf(sc->jme_dev,
1127 		    "could not load DMA'able memory for Tx ring.\n");
1128 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1129 				sc->jme_cdata.jme_tx_ring,
1130 				sc->jme_cdata.jme_tx_ring_map);
1131 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1132 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1133 		return error;
1134 	}
1135 	sc->jme_cdata.jme_tx_ring_paddr = busaddr;
1136 
1137 	/*
1138 	 * Create DMA stuffs for RX ring
1139 	 */
1140 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1141 		error = jme_rxring_dma_alloc(sc, lowaddr, i);
1142 		if (error)
1143 			return error;
1144 	}
1145 
1146 	if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1147 		bus_addr_t ring_end;
1148 
1149 		/* Tx/Rx descriptor queue should reside within 4GB boundary. */
1150 		ring_end = sc->jme_cdata.jme_tx_ring_paddr +
1151 			   JME_TX_RING_SIZE(sc);
1152 		if (JME_ADDR_HI(ring_end) !=
1153 		    JME_ADDR_HI(sc->jme_cdata.jme_tx_ring_paddr)) {
1154 			device_printf(sc->jme_dev, "TX ring 4GB boundary "
1155 			    "crossed, switching to 32bit DMA address mode.\n");
1156 			jme_dma_free(sc, 0);
1157 			/* Limit DMA address space to 32bit and try again. */
1158 			lowaddr = BUS_SPACE_MAXADDR_32BIT;
1159 			goto again;
1160 		}
1161 
1162 		for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1163 			bus_addr_t ring_start;
1164 
1165 			ring_start =
1166 			    sc->jme_cdata.jme_rx_data[i].jme_rx_ring_paddr;
1167 			ring_end = ring_start + JME_RX_RING_SIZE(sc);
1168 			if (JME_ADDR_HI(ring_end) != JME_ADDR_HI(ring_start)) {
1169 				device_printf(sc->jme_dev,
1170 				"%dth RX ring 4GB boundary crossed, "
1171 				"switching to 32bit DMA address mode.\n", i);
1172 				jme_dma_free(sc, 0);
1173 				/*
1174 				 * Limit DMA address space to 32bit and
1175 				 * try again.
1176 				 */
1177 				lowaddr = BUS_SPACE_MAXADDR_32BIT;
1178 				goto again;
1179 			}
1180 		}
1181 	}
1182 
1183 	/* Create parent buffer tag. */
1184 	error = bus_dma_tag_create(NULL,/* parent */
1185 	    1, 0,			/* algnmnt, boundary */
1186 	    sc->jme_lowaddr,		/* lowaddr */
1187 	    BUS_SPACE_MAXADDR,		/* highaddr */
1188 	    NULL, NULL,			/* filter, filterarg */
1189 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1190 	    0,				/* nsegments */
1191 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1192 	    0,				/* flags */
1193 	    &sc->jme_cdata.jme_buffer_tag);
1194 	if (error) {
1195 		device_printf(sc->jme_dev,
1196 		    "could not create parent buffer DMA tag.\n");
1197 		return error;
1198 	}
1199 
1200 	/*
1201 	 * Create DMA stuffs for shadow status block
1202 	 */
1203 
1204 	/* Create shadow status block tag. */
1205 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1206 	    JME_SSB_ALIGN, 0,		/* algnmnt, boundary */
1207 	    sc->jme_lowaddr,		/* lowaddr */
1208 	    BUS_SPACE_MAXADDR,		/* highaddr */
1209 	    NULL, NULL,			/* filter, filterarg */
1210 	    JME_SSB_SIZE,		/* maxsize */
1211 	    1,				/* nsegments */
1212 	    JME_SSB_SIZE,		/* maxsegsize */
1213 	    0,				/* flags */
1214 	    &sc->jme_cdata.jme_ssb_tag);
1215 	if (error) {
1216 		device_printf(sc->jme_dev,
1217 		    "could not create shadow status block DMA tag.\n");
1218 		return error;
1219 	}
1220 
1221 	/* Allocate DMA'able memory for shadow status block. */
1222 	error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1223 	    (void **)&sc->jme_cdata.jme_ssb_block,
1224 	    BUS_DMA_WAITOK | BUS_DMA_ZERO,
1225 	    &sc->jme_cdata.jme_ssb_map);
1226 	if (error) {
1227 		device_printf(sc->jme_dev, "could not allocate DMA'able "
1228 		    "memory for shadow status block.\n");
1229 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1230 		sc->jme_cdata.jme_ssb_tag = NULL;
1231 		return error;
1232 	}
1233 
1234 	/* Load the DMA map for shadow status block */
1235 	error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1236 	    sc->jme_cdata.jme_ssb_map, sc->jme_cdata.jme_ssb_block,
1237 	    JME_SSB_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1238 	if (error) {
1239 		device_printf(sc->jme_dev, "could not load DMA'able memory "
1240 		    "for shadow status block.\n");
1241 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1242 				sc->jme_cdata.jme_ssb_block,
1243 				sc->jme_cdata.jme_ssb_map);
1244 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1245 		sc->jme_cdata.jme_ssb_tag = NULL;
1246 		return error;
1247 	}
1248 	sc->jme_cdata.jme_ssb_block_paddr = busaddr;
1249 
1250 	/*
1251 	 * Create DMA stuffs for TX buffers
1252 	 */
1253 
1254 	/* Create tag for Tx buffers. */
1255 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1256 	    1, 0,			/* algnmnt, boundary */
1257 	    sc->jme_lowaddr,		/* lowaddr */
1258 	    BUS_SPACE_MAXADDR,		/* highaddr */
1259 	    NULL, NULL,			/* filter, filterarg */
1260 	    JME_TSO_MAXSIZE,		/* maxsize */
1261 	    JME_MAXTXSEGS,		/* nsegments */
1262 	    JME_TSO_MAXSEGSIZE,		/* maxsegsize */
1263 	    0,				/* flags */
1264 	    &sc->jme_cdata.jme_tx_tag);
1265 	if (error != 0) {
1266 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1267 		return error;
1268 	}
1269 
1270 	/* Create DMA maps for Tx buffers. */
1271 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1272 		txd = &sc->jme_cdata.jme_txdesc[i];
1273 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1274 		    &txd->tx_dmamap);
1275 		if (error) {
1276 			int j;
1277 
1278 			device_printf(sc->jme_dev,
1279 			    "could not create %dth Tx dmamap.\n", i);
1280 
1281 			for (j = 0; j < i; ++j) {
1282 				txd = &sc->jme_cdata.jme_txdesc[j];
1283 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1284 						   txd->tx_dmamap);
1285 			}
1286 			bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1287 			sc->jme_cdata.jme_tx_tag = NULL;
1288 			return error;
1289 		}
1290 	}
1291 
1292 	/*
1293 	 * Create DMA stuffs for RX buffers
1294 	 */
1295 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1296 		error = jme_rxbuf_dma_alloc(sc, i);
1297 		if (error)
1298 			return error;
1299 	}
1300 	return 0;
1301 }
1302 
1303 static void
1304 jme_dma_free(struct jme_softc *sc, int detach)
1305 {
1306 	struct jme_txdesc *txd;
1307 	struct jme_rxdesc *rxd;
1308 	struct jme_rxdata *rdata;
1309 	int i, r;
1310 
1311 	/* Tx ring */
1312 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1313 		bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1314 		    sc->jme_cdata.jme_tx_ring_map);
1315 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1316 		    sc->jme_cdata.jme_tx_ring,
1317 		    sc->jme_cdata.jme_tx_ring_map);
1318 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1319 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1320 	}
1321 
1322 	/* Rx ring */
1323 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1324 		rdata = &sc->jme_cdata.jme_rx_data[r];
1325 		if (rdata->jme_rx_ring_tag != NULL) {
1326 			bus_dmamap_unload(rdata->jme_rx_ring_tag,
1327 					  rdata->jme_rx_ring_map);
1328 			bus_dmamem_free(rdata->jme_rx_ring_tag,
1329 					rdata->jme_rx_ring,
1330 					rdata->jme_rx_ring_map);
1331 			bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1332 			rdata->jme_rx_ring_tag = NULL;
1333 		}
1334 	}
1335 
1336 	/* Tx buffers */
1337 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1338 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1339 			txd = &sc->jme_cdata.jme_txdesc[i];
1340 			bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1341 			    txd->tx_dmamap);
1342 		}
1343 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1344 		sc->jme_cdata.jme_tx_tag = NULL;
1345 	}
1346 
1347 	/* Rx buffers */
1348 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1349 		rdata = &sc->jme_cdata.jme_rx_data[r];
1350 		if (rdata->jme_rx_tag != NULL) {
1351 			for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1352 				rxd = &rdata->jme_rxdesc[i];
1353 				bus_dmamap_destroy(rdata->jme_rx_tag,
1354 						   rxd->rx_dmamap);
1355 			}
1356 			bus_dmamap_destroy(rdata->jme_rx_tag,
1357 					   rdata->jme_rx_sparemap);
1358 			bus_dma_tag_destroy(rdata->jme_rx_tag);
1359 			rdata->jme_rx_tag = NULL;
1360 		}
1361 	}
1362 
1363 	/* Shadow status block. */
1364 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1365 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1366 		    sc->jme_cdata.jme_ssb_map);
1367 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1368 		    sc->jme_cdata.jme_ssb_block,
1369 		    sc->jme_cdata.jme_ssb_map);
1370 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1371 		sc->jme_cdata.jme_ssb_tag = NULL;
1372 	}
1373 
1374 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1375 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1376 		sc->jme_cdata.jme_buffer_tag = NULL;
1377 	}
1378 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1379 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1380 		sc->jme_cdata.jme_ring_tag = NULL;
1381 	}
1382 
1383 	if (detach) {
1384 		if (sc->jme_cdata.jme_txdesc != NULL) {
1385 			kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1386 			sc->jme_cdata.jme_txdesc = NULL;
1387 		}
1388 		for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1389 			rdata = &sc->jme_cdata.jme_rx_data[r];
1390 			if (rdata->jme_rxdesc != NULL) {
1391 				kfree(rdata->jme_rxdesc, M_DEVBUF);
1392 				rdata->jme_rxdesc = NULL;
1393 			}
1394 		}
1395 	}
1396 }
1397 
1398 /*
1399  *	Make sure the interface is stopped at reboot time.
1400  */
1401 static int
1402 jme_shutdown(device_t dev)
1403 {
1404 	return jme_suspend(dev);
1405 }
1406 
1407 #ifdef notyet
1408 /*
1409  * Unlike other ethernet controllers, JMC250 requires
1410  * explicit resetting link speed to 10/100Mbps as gigabit
1411  * link will cunsume more power than 375mA.
1412  * Note, we reset the link speed to 10/100Mbps with
1413  * auto-negotiation but we don't know whether that operation
1414  * would succeed or not as we have no control after powering
1415  * off. If the renegotiation fail WOL may not work. Running
1416  * at 1Gbps draws more power than 375mA at 3.3V which is
1417  * specified in PCI specification and that would result in
1418  * complete shutdowning power to ethernet controller.
1419  *
1420  * TODO
1421  *  Save current negotiated media speed/duplex/flow-control
1422  *  to softc and restore the same link again after resuming.
1423  *  PHY handling such as power down/resetting to 100Mbps
1424  *  may be better handled in suspend method in phy driver.
1425  */
1426 static void
1427 jme_setlinkspeed(struct jme_softc *sc)
1428 {
1429 	struct mii_data *mii;
1430 	int aneg, i;
1431 
1432 	JME_LOCK_ASSERT(sc);
1433 
1434 	mii = device_get_softc(sc->jme_miibus);
1435 	mii_pollstat(mii);
1436 	aneg = 0;
1437 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1438 		switch IFM_SUBTYPE(mii->mii_media_active) {
1439 		case IFM_10_T:
1440 		case IFM_100_TX:
1441 			return;
1442 		case IFM_1000_T:
1443 			aneg++;
1444 		default:
1445 			break;
1446 		}
1447 	}
1448 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1449 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1450 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1451 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1452 	    BMCR_AUTOEN | BMCR_STARTNEG);
1453 	DELAY(1000);
1454 	if (aneg != 0) {
1455 		/* Poll link state until jme(4) get a 10/100 link. */
1456 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1457 			mii_pollstat(mii);
1458 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1459 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1460 				case IFM_10_T:
1461 				case IFM_100_TX:
1462 					jme_mac_config(sc);
1463 					return;
1464 				default:
1465 					break;
1466 				}
1467 			}
1468 			JME_UNLOCK(sc);
1469 			pause("jmelnk", hz);
1470 			JME_LOCK(sc);
1471 		}
1472 		if (i == MII_ANEGTICKS_GIGE)
1473 			device_printf(sc->jme_dev, "establishing link failed, "
1474 			    "WOL may not work!");
1475 	}
1476 	/*
1477 	 * No link, force MAC to have 100Mbps, full-duplex link.
1478 	 * This is the last resort and may/may not work.
1479 	 */
1480 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1481 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1482 	jme_mac_config(sc);
1483 }
1484 
1485 static void
1486 jme_setwol(struct jme_softc *sc)
1487 {
1488 	struct ifnet *ifp = &sc->arpcom.ac_if;
1489 	uint32_t gpr, pmcs;
1490 	uint16_t pmstat;
1491 	int pmc;
1492 
1493 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1494 		/* No PME capability, PHY power down. */
1495 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1496 		    MII_BMCR, BMCR_PDOWN);
1497 		return;
1498 	}
1499 
1500 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1501 	pmcs = CSR_READ_4(sc, JME_PMCS);
1502 	pmcs &= ~PMCS_WOL_ENB_MASK;
1503 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1504 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1505 		/* Enable PME message. */
1506 		gpr |= GPREG0_PME_ENB;
1507 		/* For gigabit controllers, reset link speed to 10/100. */
1508 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1509 			jme_setlinkspeed(sc);
1510 	}
1511 
1512 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1513 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1514 
1515 	/* Request PME. */
1516 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1517 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1518 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1519 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1520 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1521 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1522 		/* No WOL, PHY power down. */
1523 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1524 		    MII_BMCR, BMCR_PDOWN);
1525 	}
1526 }
1527 #endif
1528 
1529 static int
1530 jme_suspend(device_t dev)
1531 {
1532 	struct jme_softc *sc = device_get_softc(dev);
1533 	struct ifnet *ifp = &sc->arpcom.ac_if;
1534 
1535 	lwkt_serialize_enter(ifp->if_serializer);
1536 	jme_stop(sc);
1537 #ifdef notyet
1538 	jme_setwol(sc);
1539 #endif
1540 	lwkt_serialize_exit(ifp->if_serializer);
1541 
1542 	return (0);
1543 }
1544 
1545 static int
1546 jme_resume(device_t dev)
1547 {
1548 	struct jme_softc *sc = device_get_softc(dev);
1549 	struct ifnet *ifp = &sc->arpcom.ac_if;
1550 #ifdef notyet
1551 	int pmc;
1552 #endif
1553 
1554 	lwkt_serialize_enter(ifp->if_serializer);
1555 
1556 #ifdef notyet
1557 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1558 		uint16_t pmstat;
1559 
1560 		pmstat = pci_read_config(sc->jme_dev,
1561 		    pmc + PCIR_POWER_STATUS, 2);
1562 		/* Disable PME clear PME status. */
1563 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1564 		pci_write_config(sc->jme_dev,
1565 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1566 	}
1567 #endif
1568 
1569 	if (ifp->if_flags & IFF_UP)
1570 		jme_init(sc);
1571 
1572 	lwkt_serialize_exit(ifp->if_serializer);
1573 
1574 	return (0);
1575 }
1576 
1577 static int
1578 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1579 {
1580 	struct jme_txdesc *txd;
1581 	struct jme_desc *desc;
1582 	struct mbuf *m;
1583 	struct jme_dmamap_ctx ctx;
1584 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1585 	int maxsegs;
1586 	int error, i, prod, symbol_desc;
1587 	uint32_t cflags, flag64;
1588 
1589 	M_ASSERTPKTHDR((*m_head));
1590 
1591 	prod = sc->jme_cdata.jme_tx_prod;
1592 	txd = &sc->jme_cdata.jme_txdesc[prod];
1593 
1594 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1595 		symbol_desc = 1;
1596 	else
1597 		symbol_desc = 0;
1598 
1599 	maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1600 		  (JME_TXD_RSVD + symbol_desc);
1601 	if (maxsegs > JME_MAXTXSEGS)
1602 		maxsegs = JME_MAXTXSEGS;
1603 	KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1604 		("not enough segments %d\n", maxsegs));
1605 
1606 	ctx.nsegs = maxsegs;
1607 	ctx.segs = txsegs;
1608 	error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1609 				     *m_head, jme_dmamap_buf_cb, &ctx,
1610 				     BUS_DMA_NOWAIT);
1611 	if (!error && ctx.nsegs == 0) {
1612 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1613 		error = EFBIG;
1614 	}
1615 	if (error == EFBIG) {
1616 		m = m_defrag(*m_head, MB_DONTWAIT);
1617 		if (m == NULL) {
1618 			if_printf(&sc->arpcom.ac_if,
1619 				  "could not defrag TX mbuf\n");
1620 			error = ENOBUFS;
1621 			goto fail;
1622 		}
1623 		*m_head = m;
1624 
1625 		ctx.nsegs = maxsegs;
1626 		ctx.segs = txsegs;
1627 		error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag,
1628 					     txd->tx_dmamap, *m_head,
1629 					     jme_dmamap_buf_cb, &ctx,
1630 					     BUS_DMA_NOWAIT);
1631 		if (error || ctx.nsegs == 0) {
1632 			if_printf(&sc->arpcom.ac_if,
1633 				  "could not load defragged TX mbuf\n");
1634 			if (!error) {
1635 				bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
1636 						  txd->tx_dmamap);
1637 				error = EFBIG;
1638 			}
1639 			goto fail;
1640 		}
1641 	} else if (error) {
1642 		if_printf(&sc->arpcom.ac_if, "could not load TX mbuf\n");
1643 		goto fail;
1644 	}
1645 
1646 	m = *m_head;
1647 	cflags = 0;
1648 
1649 	/* Configure checksum offload. */
1650 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1651 		cflags |= JME_TD_IPCSUM;
1652 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1653 		cflags |= JME_TD_TCPCSUM;
1654 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1655 		cflags |= JME_TD_UDPCSUM;
1656 
1657 	/* Configure VLAN. */
1658 	if (m->m_flags & M_VLANTAG) {
1659 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1660 		cflags |= JME_TD_VLAN_TAG;
1661 	}
1662 
1663 	desc = &sc->jme_cdata.jme_tx_ring[prod];
1664 	desc->flags = htole32(cflags);
1665 	desc->addr_hi = htole32(m->m_pkthdr.len);
1666 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1667 		/*
1668 		 * Use 64bits TX desc chain format.
1669 		 *
1670 		 * The first TX desc of the chain, which is setup here,
1671 		 * is just a symbol TX desc carrying no payload.
1672 		 */
1673 		flag64 = JME_TD_64BIT;
1674 		desc->buflen = 0;
1675 		desc->addr_lo = 0;
1676 
1677 		/* No effective TX desc is consumed */
1678 		i = 0;
1679 	} else {
1680 		/*
1681 		 * Use 32bits TX desc chain format.
1682 		 *
1683 		 * The first TX desc of the chain, which is setup here,
1684 		 * is an effective TX desc carrying the first segment of
1685 		 * the mbuf chain.
1686 		 */
1687 		flag64 = 0;
1688 		desc->buflen = htole32(txsegs[0].ds_len);
1689 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1690 
1691 		/* One effective TX desc is consumed */
1692 		i = 1;
1693 	}
1694 	sc->jme_cdata.jme_tx_cnt++;
1695 	KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1696 		 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1697 	JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1698 
1699 	txd->tx_ndesc = 1 - i;
1700 	for (; i < ctx.nsegs; i++) {
1701 		desc = &sc->jme_cdata.jme_tx_ring[prod];
1702 		desc->flags = htole32(JME_TD_OWN | flag64);
1703 		desc->buflen = htole32(txsegs[i].ds_len);
1704 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1705 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1706 
1707 		sc->jme_cdata.jme_tx_cnt++;
1708 		KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1709 			 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1710 		JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1711 	}
1712 
1713 	/* Update producer index. */
1714 	sc->jme_cdata.jme_tx_prod = prod;
1715 	/*
1716 	 * Finally request interrupt and give the first descriptor
1717 	 * owenership to hardware.
1718 	 */
1719 	desc = txd->tx_desc;
1720 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1721 
1722 	txd->tx_m = m;
1723 	txd->tx_ndesc += ctx.nsegs;
1724 
1725 	/* Sync descriptors. */
1726 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1727 			BUS_DMASYNC_PREWRITE);
1728 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1729 			sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREWRITE);
1730 	return 0;
1731 fail:
1732 	m_freem(*m_head);
1733 	*m_head = NULL;
1734 	return error;
1735 }
1736 
1737 static void
1738 jme_start(struct ifnet *ifp)
1739 {
1740 	struct jme_softc *sc = ifp->if_softc;
1741 	struct mbuf *m_head;
1742 	int enq = 0;
1743 
1744 	ASSERT_SERIALIZED(ifp->if_serializer);
1745 
1746 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1747 		ifq_purge(&ifp->if_snd);
1748 		return;
1749 	}
1750 
1751 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1752 		return;
1753 
1754 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1755 		jme_txeof(sc);
1756 
1757 	while (!ifq_is_empty(&ifp->if_snd)) {
1758 		/*
1759 		 * Check number of available TX descs, always
1760 		 * leave JME_TXD_RSVD free TX descs.
1761 		 */
1762 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1763 		    sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1764 			ifp->if_flags |= IFF_OACTIVE;
1765 			break;
1766 		}
1767 
1768 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1769 		if (m_head == NULL)
1770 			break;
1771 
1772 		/*
1773 		 * Pack the data into the transmit ring. If we
1774 		 * don't have room, set the OACTIVE flag and wait
1775 		 * for the NIC to drain the ring.
1776 		 */
1777 		if (jme_encap(sc, &m_head)) {
1778 			KKASSERT(m_head == NULL);
1779 			ifp->if_oerrors++;
1780 			ifp->if_flags |= IFF_OACTIVE;
1781 			break;
1782 		}
1783 		enq++;
1784 
1785 		/*
1786 		 * If there's a BPF listener, bounce a copy of this frame
1787 		 * to him.
1788 		 */
1789 		ETHER_BPF_MTAP(ifp, m_head);
1790 	}
1791 
1792 	if (enq > 0) {
1793 		/*
1794 		 * Reading TXCSR takes very long time under heavy load
1795 		 * so cache TXCSR value and writes the ORed value with
1796 		 * the kick command to the TXCSR. This saves one register
1797 		 * access cycle.
1798 		 */
1799 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1800 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1801 		/* Set a timeout in case the chip goes out to lunch. */
1802 		ifp->if_timer = JME_TX_TIMEOUT;
1803 	}
1804 }
1805 
1806 static void
1807 jme_watchdog(struct ifnet *ifp)
1808 {
1809 	struct jme_softc *sc = ifp->if_softc;
1810 
1811 	ASSERT_SERIALIZED(ifp->if_serializer);
1812 
1813 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1814 		if_printf(ifp, "watchdog timeout (missed link)\n");
1815 		ifp->if_oerrors++;
1816 		jme_init(sc);
1817 		return;
1818 	}
1819 
1820 	jme_txeof(sc);
1821 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1822 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1823 			  "-- recovering\n");
1824 		if (!ifq_is_empty(&ifp->if_snd))
1825 			if_devstart(ifp);
1826 		return;
1827 	}
1828 
1829 	if_printf(ifp, "watchdog timeout\n");
1830 	ifp->if_oerrors++;
1831 	jme_init(sc);
1832 	if (!ifq_is_empty(&ifp->if_snd))
1833 		if_devstart(ifp);
1834 }
1835 
1836 static int
1837 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1838 {
1839 	struct jme_softc *sc = ifp->if_softc;
1840 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1841 	struct ifreq *ifr = (struct ifreq *)data;
1842 	int error = 0, mask;
1843 
1844 	ASSERT_SERIALIZED(ifp->if_serializer);
1845 
1846 	switch (cmd) {
1847 	case SIOCSIFMTU:
1848 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1849 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1850 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1851 			error = EINVAL;
1852 			break;
1853 		}
1854 
1855 		if (ifp->if_mtu != ifr->ifr_mtu) {
1856 			/*
1857 			 * No special configuration is required when interface
1858 			 * MTU is changed but availability of Tx checksum
1859 			 * offload should be chcked against new MTU size as
1860 			 * FIFO size is just 2K.
1861 			 */
1862 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1863 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1864 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1865 			}
1866 			ifp->if_mtu = ifr->ifr_mtu;
1867 			if (ifp->if_flags & IFF_RUNNING)
1868 				jme_init(sc);
1869 		}
1870 		break;
1871 
1872 	case SIOCSIFFLAGS:
1873 		if (ifp->if_flags & IFF_UP) {
1874 			if (ifp->if_flags & IFF_RUNNING) {
1875 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1876 				    (IFF_PROMISC | IFF_ALLMULTI))
1877 					jme_set_filter(sc);
1878 			} else {
1879 				jme_init(sc);
1880 			}
1881 		} else {
1882 			if (ifp->if_flags & IFF_RUNNING)
1883 				jme_stop(sc);
1884 		}
1885 		sc->jme_if_flags = ifp->if_flags;
1886 		break;
1887 
1888 	case SIOCADDMULTI:
1889 	case SIOCDELMULTI:
1890 		if (ifp->if_flags & IFF_RUNNING)
1891 			jme_set_filter(sc);
1892 		break;
1893 
1894 	case SIOCSIFMEDIA:
1895 	case SIOCGIFMEDIA:
1896 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1897 		break;
1898 
1899 	case SIOCSIFCAP:
1900 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1901 
1902 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1903 			if (IFCAP_TXCSUM & ifp->if_capabilities) {
1904 				ifp->if_capenable ^= IFCAP_TXCSUM;
1905 				if (IFCAP_TXCSUM & ifp->if_capenable)
1906 					ifp->if_hwassist |= JME_CSUM_FEATURES;
1907 				else
1908 					ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1909 			}
1910 		}
1911 		if ((mask & IFCAP_RXCSUM) &&
1912 		    (IFCAP_RXCSUM & ifp->if_capabilities)) {
1913 			uint32_t reg;
1914 
1915 			ifp->if_capenable ^= IFCAP_RXCSUM;
1916 			reg = CSR_READ_4(sc, JME_RXMAC);
1917 			reg &= ~RXMAC_CSUM_ENB;
1918 			if (ifp->if_capenable & IFCAP_RXCSUM)
1919 				reg |= RXMAC_CSUM_ENB;
1920 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1921 		}
1922 
1923 		if ((mask & IFCAP_VLAN_HWTAGGING) &&
1924 		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities)) {
1925 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1926 			jme_set_vlan(sc);
1927 		}
1928 		break;
1929 
1930 	default:
1931 		error = ether_ioctl(ifp, cmd, data);
1932 		break;
1933 	}
1934 	return (error);
1935 }
1936 
1937 static void
1938 jme_mac_config(struct jme_softc *sc)
1939 {
1940 	struct mii_data *mii;
1941 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1942 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1943 
1944 	mii = device_get_softc(sc->jme_miibus);
1945 
1946 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1947 	DELAY(10);
1948 	CSR_WRITE_4(sc, JME_GHC, 0);
1949 	ghc = 0;
1950 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1951 	rxmac &= ~RXMAC_FC_ENB;
1952 	txmac = CSR_READ_4(sc, JME_TXMAC);
1953 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1954 	txpause = CSR_READ_4(sc, JME_TXPFC);
1955 	txpause &= ~TXPFC_PAUSE_ENB;
1956 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1957 		ghc |= GHC_FULL_DUPLEX;
1958 		rxmac &= ~RXMAC_COLL_DET_ENB;
1959 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1960 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1961 		    TXMAC_FRAME_BURST);
1962 #ifdef notyet
1963 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1964 			txpause |= TXPFC_PAUSE_ENB;
1965 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1966 			rxmac |= RXMAC_FC_ENB;
1967 #endif
1968 		/* Disable retry transmit timer/retry limit. */
1969 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1970 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1971 	} else {
1972 		rxmac |= RXMAC_COLL_DET_ENB;
1973 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1974 		/* Enable retry transmit timer/retry limit. */
1975 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1976 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1977 	}
1978 
1979 	/*
1980 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1981 	 */
1982 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1983 	gp1 &= ~GPREG1_WA_HDX;
1984 
1985 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1986 		hdx = 1;
1987 
1988 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1989 	case IFM_10_T:
1990 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1991 		if (hdx)
1992 			gp1 |= GPREG1_WA_HDX;
1993 		break;
1994 
1995 	case IFM_100_TX:
1996 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1997 		if (hdx)
1998 			gp1 |= GPREG1_WA_HDX;
1999 
2000 		/*
2001 		 * Use extended FIFO depth to workaround CRC errors
2002 		 * emitted by chips before JMC250B
2003 		 */
2004 		phyconf = JMPHY_CONF_EXTFIFO;
2005 		break;
2006 
2007 	case IFM_1000_T:
2008 		if (sc->jme_caps & JME_CAP_FASTETH)
2009 			break;
2010 
2011 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
2012 		if (hdx)
2013 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2014 		break;
2015 
2016 	default:
2017 		break;
2018 	}
2019 	CSR_WRITE_4(sc, JME_GHC, ghc);
2020 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2021 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
2022 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
2023 
2024 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
2025 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2026 				    JMPHY_CONF, phyconf);
2027 	}
2028 	if (sc->jme_workaround & JME_WA_HDX)
2029 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
2030 }
2031 
2032 static void
2033 jme_intr(void *xsc)
2034 {
2035 	struct jme_softc *sc = xsc;
2036 	struct ifnet *ifp = &sc->arpcom.ac_if;
2037 	uint32_t status;
2038 	int r;
2039 
2040 	ASSERT_SERIALIZED(ifp->if_serializer);
2041 
2042 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2043 	if (status == 0 || status == 0xFFFFFFFF)
2044 		return;
2045 
2046 	/* Disable interrupts. */
2047 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2048 
2049 	status = CSR_READ_4(sc, JME_INTR_STATUS);
2050 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2051 		goto back;
2052 
2053 	/* Reset PCC counter/timer and Ack interrupts. */
2054 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2055 
2056 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2057 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2058 
2059 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2060 		if (status & jme_rx_status[r].jme_coal) {
2061 			status |= jme_rx_status[r].jme_coal |
2062 				  jme_rx_status[r].jme_comp;
2063 		}
2064 	}
2065 
2066 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2067 
2068 	if (ifp->if_flags & IFF_RUNNING) {
2069 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2070 			jme_rx_intr(sc, status);
2071 
2072 		if (status & INTR_RXQ_DESC_EMPTY) {
2073 			/*
2074 			 * Notify hardware availability of new Rx buffers.
2075 			 * Reading RXCSR takes very long time under heavy
2076 			 * load so cache RXCSR value and writes the ORed
2077 			 * value with the kick command to the RXCSR. This
2078 			 * saves one register access cycle.
2079 			 */
2080 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2081 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2082 		}
2083 
2084 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2085 			jme_txeof(sc);
2086 			if (!ifq_is_empty(&ifp->if_snd))
2087 				if_devstart(ifp);
2088 		}
2089 	}
2090 back:
2091 	/* Reenable interrupts. */
2092 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2093 }
2094 
2095 static void
2096 jme_txeof(struct jme_softc *sc)
2097 {
2098 	struct ifnet *ifp = &sc->arpcom.ac_if;
2099 	struct jme_txdesc *txd;
2100 	uint32_t status;
2101 	int cons, nsegs;
2102 
2103 	cons = sc->jme_cdata.jme_tx_cons;
2104 	if (cons == sc->jme_cdata.jme_tx_prod)
2105 		return;
2106 
2107 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2108 			sc->jme_cdata.jme_tx_ring_map,
2109 			BUS_DMASYNC_POSTREAD);
2110 
2111 	/*
2112 	 * Go through our Tx list and free mbufs for those
2113 	 * frames which have been transmitted.
2114 	 */
2115 	while (cons != sc->jme_cdata.jme_tx_prod) {
2116 		txd = &sc->jme_cdata.jme_txdesc[cons];
2117 		KASSERT(txd->tx_m != NULL,
2118 			("%s: freeing NULL mbuf!\n", __func__));
2119 
2120 		status = le32toh(txd->tx_desc->flags);
2121 		if ((status & JME_TD_OWN) == JME_TD_OWN)
2122 			break;
2123 
2124 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2125 			ifp->if_oerrors++;
2126 		} else {
2127 			ifp->if_opackets++;
2128 			if (status & JME_TD_COLLISION) {
2129 				ifp->if_collisions +=
2130 				    le32toh(txd->tx_desc->buflen) &
2131 				    JME_TD_BUF_LEN_MASK;
2132 			}
2133 		}
2134 
2135 		/*
2136 		 * Only the first descriptor of multi-descriptor
2137 		 * transmission is updated so driver have to skip entire
2138 		 * chained buffers for the transmiited frame. In other
2139 		 * words, JME_TD_OWN bit is valid only at the first
2140 		 * descriptor of a multi-descriptor transmission.
2141 		 */
2142 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2143 			sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2144 			JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
2145 		}
2146 
2147 		/* Reclaim transferred mbufs. */
2148 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2149 		m_freem(txd->tx_m);
2150 		txd->tx_m = NULL;
2151 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2152 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2153 			("%s: Active Tx desc counter was garbled\n", __func__));
2154 		txd->tx_ndesc = 0;
2155 	}
2156 	sc->jme_cdata.jme_tx_cons = cons;
2157 
2158 	if (sc->jme_cdata.jme_tx_cnt == 0)
2159 		ifp->if_timer = 0;
2160 
2161 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2162 	    sc->jme_tx_desc_cnt - JME_TXD_RSVD)
2163 		ifp->if_flags &= ~IFF_OACTIVE;
2164 
2165 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2166 			sc->jme_cdata.jme_tx_ring_map,
2167 			BUS_DMASYNC_PREWRITE);
2168 }
2169 
2170 static __inline void
2171 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
2172 {
2173 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2174 	int i;
2175 
2176 	for (i = 0; i < count; ++i) {
2177 		struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2178 
2179 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2180 		desc->buflen = htole32(MCLBYTES);
2181 		JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2182 	}
2183 }
2184 
2185 /* Receive a frame. */
2186 static void
2187 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2188 {
2189 	struct ifnet *ifp = &sc->arpcom.ac_if;
2190 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2191 	struct jme_desc *desc;
2192 	struct jme_rxdesc *rxd;
2193 	struct mbuf *mp, *m;
2194 	uint32_t flags, status;
2195 	int cons, count, nsegs;
2196 
2197 	cons = rdata->jme_rx_cons;
2198 	desc = &rdata->jme_rx_ring[cons];
2199 	flags = le32toh(desc->flags);
2200 	status = le32toh(desc->buflen);
2201 	nsegs = JME_RX_NSEGS(status);
2202 
2203 	JME_RSS_DPRINTF(sc, 10, "ring%d, flags 0x%08x, "
2204 			"hash 0x%08x, hash type 0x%08x\n",
2205 			ring, flags, desc->addr_hi, desc->addr_lo);
2206 
2207 	if (status & JME_RX_ERR_STAT) {
2208 		ifp->if_ierrors++;
2209 		jme_discard_rxbufs(sc, ring, cons, nsegs);
2210 #ifdef JME_SHOW_ERRORS
2211 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2212 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2213 #endif
2214 		rdata->jme_rx_cons += nsegs;
2215 		rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2216 		return;
2217 	}
2218 
2219 	rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2220 	for (count = 0; count < nsegs; count++,
2221 	     JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2222 		rxd = &rdata->jme_rxdesc[cons];
2223 		mp = rxd->rx_m;
2224 
2225 		/* Add a new receive buffer to the ring. */
2226 		if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2227 			ifp->if_iqdrops++;
2228 			/* Reuse buffer. */
2229 			jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2230 			if (rdata->jme_rxhead != NULL) {
2231 				m_freem(rdata->jme_rxhead);
2232 				JME_RXCHAIN_RESET(sc, ring);
2233 			}
2234 			break;
2235 		}
2236 
2237 		/*
2238 		 * Assume we've received a full sized frame.
2239 		 * Actual size is fixed when we encounter the end of
2240 		 * multi-segmented frame.
2241 		 */
2242 		mp->m_len = MCLBYTES;
2243 
2244 		/* Chain received mbufs. */
2245 		if (rdata->jme_rxhead == NULL) {
2246 			rdata->jme_rxhead = mp;
2247 			rdata->jme_rxtail = mp;
2248 		} else {
2249 			/*
2250 			 * Receive processor can receive a maximum frame
2251 			 * size of 65535 bytes.
2252 			 */
2253 			mp->m_flags &= ~M_PKTHDR;
2254 			rdata->jme_rxtail->m_next = mp;
2255 			rdata->jme_rxtail = mp;
2256 		}
2257 
2258 		if (count == nsegs - 1) {
2259 			/* Last desc. for this frame. */
2260 			m = rdata->jme_rxhead;
2261 			/* XXX assert PKTHDR? */
2262 			m->m_flags |= M_PKTHDR;
2263 			m->m_pkthdr.len = rdata->jme_rxlen;
2264 			if (nsegs > 1) {
2265 				/* Set first mbuf size. */
2266 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2267 				/* Set last mbuf size. */
2268 				mp->m_len = rdata->jme_rxlen -
2269 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2270 				    (MCLBYTES * (nsegs - 2)));
2271 			} else {
2272 				m->m_len = rdata->jme_rxlen;
2273 			}
2274 			m->m_pkthdr.rcvif = ifp;
2275 
2276 			/*
2277 			 * Account for 10bytes auto padding which is used
2278 			 * to align IP header on 32bit boundary. Also note,
2279 			 * CRC bytes is automatically removed by the
2280 			 * hardware.
2281 			 */
2282 			m->m_data += JME_RX_PAD_BYTES;
2283 
2284 			/* Set checksum information. */
2285 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2286 			    (flags & JME_RD_IPV4)) {
2287 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2288 				if (flags & JME_RD_IPCSUM)
2289 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2290 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2291 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2292 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2293 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2294 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2295 					m->m_pkthdr.csum_flags |=
2296 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2297 					m->m_pkthdr.csum_data = 0xffff;
2298 				}
2299 			}
2300 
2301 			/* Check for VLAN tagged packets. */
2302 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2303 			    (flags & JME_RD_VLAN_TAG)) {
2304 				m->m_pkthdr.ether_vlantag =
2305 				    flags & JME_RD_VLAN_MASK;
2306 				m->m_flags |= M_VLANTAG;
2307 			}
2308 
2309 			ifp->if_ipackets++;
2310 			/* Pass it on. */
2311 			ether_input_chain(ifp, m, chain);
2312 
2313 			/* Reset mbuf chains. */
2314 			JME_RXCHAIN_RESET(sc, ring);
2315 #ifdef JME_RSS_DEBUG
2316 			sc->jme_rx_ring_pkt[ring]++;
2317 #endif
2318 		}
2319 	}
2320 
2321 	rdata->jme_rx_cons += nsegs;
2322 	rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2323 }
2324 
2325 static int
2326 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2327 		int count)
2328 {
2329 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2330 	struct jme_desc *desc;
2331 	int nsegs, prog, pktlen;
2332 
2333 	bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
2334 			BUS_DMASYNC_POSTREAD);
2335 
2336 	prog = 0;
2337 	for (;;) {
2338 #ifdef DEVICE_POLLING
2339 		if (count >= 0 && count-- == 0)
2340 			break;
2341 #endif
2342 		desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2343 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2344 			break;
2345 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2346 			break;
2347 
2348 		/*
2349 		 * Check number of segments against received bytes.
2350 		 * Non-matching value would indicate that hardware
2351 		 * is still trying to update Rx descriptors. I'm not
2352 		 * sure whether this check is needed.
2353 		 */
2354 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2355 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2356 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2357 			if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2358 				  "and packet size(%d) mismach\n",
2359 				  nsegs, pktlen);
2360 			break;
2361 		}
2362 
2363 		/* Received a frame. */
2364 		jme_rxpkt(sc, ring, chain);
2365 		prog++;
2366 	}
2367 
2368 	if (prog > 0) {
2369 		bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
2370 				BUS_DMASYNC_PREWRITE);
2371 	}
2372 	return prog;
2373 }
2374 
2375 static void
2376 jme_rxeof(struct jme_softc *sc, int ring)
2377 {
2378 	struct mbuf_chain chain[MAXCPU];
2379 
2380 	ether_input_chain_init(chain);
2381 	if (jme_rxeof_chain(sc, ring, chain, -1))
2382 		ether_input_dispatch(chain);
2383 }
2384 
2385 static void
2386 jme_tick(void *xsc)
2387 {
2388 	struct jme_softc *sc = xsc;
2389 	struct ifnet *ifp = &sc->arpcom.ac_if;
2390 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2391 
2392 	lwkt_serialize_enter(ifp->if_serializer);
2393 
2394 	mii_tick(mii);
2395 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2396 
2397 	lwkt_serialize_exit(ifp->if_serializer);
2398 }
2399 
2400 static void
2401 jme_reset(struct jme_softc *sc)
2402 {
2403 #ifdef foo
2404 	/* Stop receiver, transmitter. */
2405 	jme_stop_rx(sc);
2406 	jme_stop_tx(sc);
2407 #endif
2408 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2409 	DELAY(10);
2410 	CSR_WRITE_4(sc, JME_GHC, 0);
2411 }
2412 
2413 static void
2414 jme_init(void *xsc)
2415 {
2416 	struct jme_softc *sc = xsc;
2417 	struct ifnet *ifp = &sc->arpcom.ac_if;
2418 	struct mii_data *mii;
2419 	uint8_t eaddr[ETHER_ADDR_LEN];
2420 	bus_addr_t paddr;
2421 	uint32_t reg;
2422 	int error, r;
2423 
2424 	ASSERT_SERIALIZED(ifp->if_serializer);
2425 
2426 	/*
2427 	 * Cancel any pending I/O.
2428 	 */
2429 	jme_stop(sc);
2430 
2431 	/*
2432 	 * Reset the chip to a known state.
2433 	 */
2434 	jme_reset(sc);
2435 
2436 	sc->jme_txd_spare =
2437 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2438 	KKASSERT(sc->jme_txd_spare >= 1);
2439 
2440 	/*
2441 	 * If we use 64bit address mode for transmitting, each Tx request
2442 	 * needs one more symbol descriptor.
2443 	 */
2444 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2445 		sc->jme_txd_spare += 1;
2446 
2447 	if (sc->jme_flags & JME_FLAG_RSS)
2448 		jme_enable_rss(sc);
2449 	else
2450 		jme_disable_rss(sc);
2451 
2452 	/* Init RX descriptors */
2453 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2454 		error = jme_init_rx_ring(sc, r);
2455 		if (error) {
2456 			if_printf(ifp, "initialization failed: "
2457 				  "no memory for %dth RX ring.\n", r);
2458 			jme_stop(sc);
2459 			return;
2460 		}
2461 	}
2462 
2463 	/* Init TX descriptors */
2464 	jme_init_tx_ring(sc);
2465 
2466 	/* Initialize shadow status block. */
2467 	jme_init_ssb(sc);
2468 
2469 	/* Reprogram the station address. */
2470 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2471 	CSR_WRITE_4(sc, JME_PAR0,
2472 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2473 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2474 
2475 	/*
2476 	 * Configure Tx queue.
2477 	 *  Tx priority queue weight value : 0
2478 	 *  Tx FIFO threshold for processing next packet : 16QW
2479 	 *  Maximum Tx DMA length : 512
2480 	 *  Allow Tx DMA burst.
2481 	 */
2482 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2483 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2484 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2485 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2486 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2487 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2488 
2489 	/* Set Tx descriptor counter. */
2490 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2491 
2492 	/* Set Tx ring address to the hardware. */
2493 	paddr = sc->jme_cdata.jme_tx_ring_paddr;
2494 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2495 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2496 
2497 	/* Configure TxMAC parameters. */
2498 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2499 	reg |= TXMAC_THRESH_1_PKT;
2500 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2501 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2502 
2503 	/*
2504 	 * Configure Rx queue.
2505 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2506 	 *  FIFO threshold for processing next packet : 128QW
2507 	 *  Rx queue 0 select
2508 	 *  Max Rx DMA length : 128
2509 	 *  Rx descriptor retry : 32
2510 	 *  Rx descriptor retry time gap : 256ns
2511 	 *  Don't receive runt/bad frame.
2512 	 */
2513 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2514 #if 0
2515 	/*
2516 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2517 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2518 	 * decrease FIFO threshold to reduce the FIFO overruns for
2519 	 * frames larger than 4000 bytes.
2520 	 * For best performance of standard MTU sized frames use
2521 	 * maximum allowable FIFO threshold, 128QW.
2522 	 */
2523 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2524 	    JME_RX_FIFO_SIZE)
2525 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2526 	else
2527 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2528 #else
2529 	/* Improve PCI Express compatibility */
2530 	sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2531 #endif
2532 	sc->jme_rxcsr |= sc->jme_rx_dma_size;
2533 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2534 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2535 	/* XXX TODO DROP_BAD */
2536 
2537 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2538 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2539 
2540 		/* Set Rx descriptor counter. */
2541 		CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2542 
2543 		/* Set Rx ring address to the hardware. */
2544 		paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2545 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2546 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2547 	}
2548 
2549 	/* Clear receive filter. */
2550 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2551 
2552 	/* Set up the receive filter. */
2553 	jme_set_filter(sc);
2554 	jme_set_vlan(sc);
2555 
2556 	/*
2557 	 * Disable all WOL bits as WOL can interfere normal Rx
2558 	 * operation. Also clear WOL detection status bits.
2559 	 */
2560 	reg = CSR_READ_4(sc, JME_PMCS);
2561 	reg &= ~PMCS_WOL_ENB_MASK;
2562 	CSR_WRITE_4(sc, JME_PMCS, reg);
2563 
2564 	/*
2565 	 * Pad 10bytes right before received frame. This will greatly
2566 	 * help Rx performance on strict-alignment architectures as
2567 	 * it does not need to copy the frame to align the payload.
2568 	 */
2569 	reg = CSR_READ_4(sc, JME_RXMAC);
2570 	reg |= RXMAC_PAD_10BYTES;
2571 
2572 	if (ifp->if_capenable & IFCAP_RXCSUM)
2573 		reg |= RXMAC_CSUM_ENB;
2574 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2575 
2576 	/* Configure general purpose reg0 */
2577 	reg = CSR_READ_4(sc, JME_GPREG0);
2578 	reg &= ~GPREG0_PCC_UNIT_MASK;
2579 	/* Set PCC timer resolution to micro-seconds unit. */
2580 	reg |= GPREG0_PCC_UNIT_US;
2581 	/*
2582 	 * Disable all shadow register posting as we have to read
2583 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2584 	 * that it's hard to synchronize interrupt status between
2585 	 * hardware and software with shadow posting due to
2586 	 * requirements of bus_dmamap_sync(9).
2587 	 */
2588 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2589 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2590 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2591 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2592 	/* Disable posting of DW0. */
2593 	reg &= ~GPREG0_POST_DW0_ENB;
2594 	/* Clear PME message. */
2595 	reg &= ~GPREG0_PME_ENB;
2596 	/* Set PHY address. */
2597 	reg &= ~GPREG0_PHY_ADDR_MASK;
2598 	reg |= sc->jme_phyaddr;
2599 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2600 
2601 	/* Configure Tx queue 0 packet completion coalescing. */
2602 	jme_set_tx_coal(sc);
2603 
2604 	/* Configure Rx queue 0 packet completion coalescing. */
2605 	jme_set_rx_coal(sc);
2606 
2607 	/* Configure shadow status block but don't enable posting. */
2608 	paddr = sc->jme_cdata.jme_ssb_block_paddr;
2609 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2610 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2611 
2612 	/* Disable Timer 1 and Timer 2. */
2613 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2614 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2615 
2616 	/* Configure retry transmit period, retry limit value. */
2617 	CSR_WRITE_4(sc, JME_TXTRHD,
2618 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2619 	    TXTRHD_RT_PERIOD_MASK) |
2620 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2621 	    TXTRHD_RT_LIMIT_SHIFT));
2622 
2623 #ifdef DEVICE_POLLING
2624 	if (!(ifp->if_flags & IFF_POLLING))
2625 #endif
2626 	/* Initialize the interrupt mask. */
2627 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2628 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2629 
2630 	/*
2631 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2632 	 * done after detection of valid link in jme_miibus_statchg.
2633 	 */
2634 	sc->jme_flags &= ~JME_FLAG_LINK;
2635 
2636 	/* Set the current media. */
2637 	mii = device_get_softc(sc->jme_miibus);
2638 	mii_mediachg(mii);
2639 
2640 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2641 
2642 	ifp->if_flags |= IFF_RUNNING;
2643 	ifp->if_flags &= ~IFF_OACTIVE;
2644 }
2645 
2646 static void
2647 jme_stop(struct jme_softc *sc)
2648 {
2649 	struct ifnet *ifp = &sc->arpcom.ac_if;
2650 	struct jme_txdesc *txd;
2651 	struct jme_rxdesc *rxd;
2652 	struct jme_rxdata *rdata;
2653 	int i, r;
2654 
2655 	ASSERT_SERIALIZED(ifp->if_serializer);
2656 
2657 	/*
2658 	 * Mark the interface down and cancel the watchdog timer.
2659 	 */
2660 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2661 	ifp->if_timer = 0;
2662 
2663 	callout_stop(&sc->jme_tick_ch);
2664 	sc->jme_flags &= ~JME_FLAG_LINK;
2665 
2666 	/*
2667 	 * Disable interrupts.
2668 	 */
2669 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2670 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2671 
2672 	/* Disable updating shadow status block. */
2673 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2674 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2675 
2676 	/* Stop receiver, transmitter. */
2677 	jme_stop_rx(sc);
2678 	jme_stop_tx(sc);
2679 
2680 	/*
2681 	 * Free partial finished RX segments
2682 	 */
2683 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2684 		rdata = &sc->jme_cdata.jme_rx_data[r];
2685 		if (rdata->jme_rxhead != NULL)
2686 			m_freem(rdata->jme_rxhead);
2687 		JME_RXCHAIN_RESET(sc, r);
2688 	}
2689 
2690 	/*
2691 	 * Free RX and TX mbufs still in the queues.
2692 	 */
2693 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2694 		rdata = &sc->jme_cdata.jme_rx_data[r];
2695 		for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2696 			rxd = &rdata->jme_rxdesc[i];
2697 			if (rxd->rx_m != NULL) {
2698 				bus_dmamap_unload(rdata->jme_rx_tag,
2699 						  rxd->rx_dmamap);
2700 				m_freem(rxd->rx_m);
2701 				rxd->rx_m = NULL;
2702 			}
2703 		}
2704 	}
2705 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2706 		txd = &sc->jme_cdata.jme_txdesc[i];
2707 		if (txd->tx_m != NULL) {
2708 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2709 			    txd->tx_dmamap);
2710 			m_freem(txd->tx_m);
2711 			txd->tx_m = NULL;
2712 			txd->tx_ndesc = 0;
2713 		}
2714         }
2715 }
2716 
2717 static void
2718 jme_stop_tx(struct jme_softc *sc)
2719 {
2720 	uint32_t reg;
2721 	int i;
2722 
2723 	reg = CSR_READ_4(sc, JME_TXCSR);
2724 	if ((reg & TXCSR_TX_ENB) == 0)
2725 		return;
2726 	reg &= ~TXCSR_TX_ENB;
2727 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2728 	for (i = JME_TIMEOUT; i > 0; i--) {
2729 		DELAY(1);
2730 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2731 			break;
2732 	}
2733 	if (i == 0)
2734 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2735 }
2736 
2737 static void
2738 jme_stop_rx(struct jme_softc *sc)
2739 {
2740 	uint32_t reg;
2741 	int i;
2742 
2743 	reg = CSR_READ_4(sc, JME_RXCSR);
2744 	if ((reg & RXCSR_RX_ENB) == 0)
2745 		return;
2746 	reg &= ~RXCSR_RX_ENB;
2747 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2748 	for (i = JME_TIMEOUT; i > 0; i--) {
2749 		DELAY(1);
2750 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2751 			break;
2752 	}
2753 	if (i == 0)
2754 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2755 }
2756 
2757 static void
2758 jme_init_tx_ring(struct jme_softc *sc)
2759 {
2760 	struct jme_chain_data *cd;
2761 	struct jme_txdesc *txd;
2762 	int i;
2763 
2764 	sc->jme_cdata.jme_tx_prod = 0;
2765 	sc->jme_cdata.jme_tx_cons = 0;
2766 	sc->jme_cdata.jme_tx_cnt = 0;
2767 
2768 	cd = &sc->jme_cdata;
2769 	bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2770 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2771 		txd = &sc->jme_cdata.jme_txdesc[i];
2772 		txd->tx_m = NULL;
2773 		txd->tx_desc = &cd->jme_tx_ring[i];
2774 		txd->tx_ndesc = 0;
2775 	}
2776 
2777 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2778 			sc->jme_cdata.jme_tx_ring_map,
2779 			BUS_DMASYNC_PREWRITE);
2780 }
2781 
2782 static void
2783 jme_init_ssb(struct jme_softc *sc)
2784 {
2785 	struct jme_chain_data *cd;
2786 
2787 	cd = &sc->jme_cdata;
2788 	bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2789 	bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
2790 			BUS_DMASYNC_PREWRITE);
2791 }
2792 
2793 static int
2794 jme_init_rx_ring(struct jme_softc *sc, int ring)
2795 {
2796 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2797 	struct jme_rxdesc *rxd;
2798 	int i;
2799 
2800 	KKASSERT(rdata->jme_rxhead == NULL &&
2801 		 rdata->jme_rxtail == NULL &&
2802 		 rdata->jme_rxlen == 0);
2803 	rdata->jme_rx_cons = 0;
2804 
2805 	bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2806 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2807 		int error;
2808 
2809 		rxd = &rdata->jme_rxdesc[i];
2810 		rxd->rx_m = NULL;
2811 		rxd->rx_desc = &rdata->jme_rx_ring[i];
2812 		error = jme_newbuf(sc, ring, rxd, 1);
2813 		if (error)
2814 			return error;
2815 	}
2816 
2817 	bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
2818 			BUS_DMASYNC_PREWRITE);
2819 	return 0;
2820 }
2821 
2822 static int
2823 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2824 {
2825 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2826 	struct jme_desc *desc;
2827 	struct mbuf *m;
2828 	struct jme_dmamap_ctx ctx;
2829 	bus_dma_segment_t segs;
2830 	bus_dmamap_t map;
2831 	int error;
2832 
2833 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2834 	if (m == NULL)
2835 		return ENOBUFS;
2836 	/*
2837 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2838 	 * takes advantage of 10 bytes padding feature of hardware
2839 	 * in order not to copy entire frame to align IP header on
2840 	 * 32bit boundary.
2841 	 */
2842 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2843 
2844 	ctx.nsegs = 1;
2845 	ctx.segs = &segs;
2846 	error = bus_dmamap_load_mbuf(rdata->jme_rx_tag,
2847 				     rdata->jme_rx_sparemap,
2848 				     m, jme_dmamap_buf_cb, &ctx,
2849 				     BUS_DMA_NOWAIT);
2850 	if (error || ctx.nsegs == 0) {
2851 		if (!error) {
2852 			bus_dmamap_unload(rdata->jme_rx_tag,
2853 					  rdata->jme_rx_sparemap);
2854 			error = EFBIG;
2855 			if_printf(&sc->arpcom.ac_if, "too many segments?!\n");
2856 		}
2857 		m_freem(m);
2858 
2859 		if (init)
2860 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2861 		return error;
2862 	}
2863 
2864 	if (rxd->rx_m != NULL) {
2865 		bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2866 				BUS_DMASYNC_POSTREAD);
2867 		bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2868 	}
2869 	map = rxd->rx_dmamap;
2870 	rxd->rx_dmamap = rdata->jme_rx_sparemap;
2871 	rdata->jme_rx_sparemap = map;
2872 	rxd->rx_m = m;
2873 
2874 	desc = rxd->rx_desc;
2875 	desc->buflen = htole32(segs.ds_len);
2876 	desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2877 	desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2878 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2879 
2880 	return 0;
2881 }
2882 
2883 static void
2884 jme_set_vlan(struct jme_softc *sc)
2885 {
2886 	struct ifnet *ifp = &sc->arpcom.ac_if;
2887 	uint32_t reg;
2888 
2889 	ASSERT_SERIALIZED(ifp->if_serializer);
2890 
2891 	reg = CSR_READ_4(sc, JME_RXMAC);
2892 	reg &= ~RXMAC_VLAN_ENB;
2893 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2894 		reg |= RXMAC_VLAN_ENB;
2895 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2896 }
2897 
2898 static void
2899 jme_set_filter(struct jme_softc *sc)
2900 {
2901 	struct ifnet *ifp = &sc->arpcom.ac_if;
2902 	struct ifmultiaddr *ifma;
2903 	uint32_t crc;
2904 	uint32_t mchash[2];
2905 	uint32_t rxcfg;
2906 
2907 	ASSERT_SERIALIZED(ifp->if_serializer);
2908 
2909 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2910 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2911 	    RXMAC_ALLMULTI);
2912 
2913 	/*
2914 	 * Always accept frames destined to our station address.
2915 	 * Always accept broadcast frames.
2916 	 */
2917 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2918 
2919 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2920 		if (ifp->if_flags & IFF_PROMISC)
2921 			rxcfg |= RXMAC_PROMISC;
2922 		if (ifp->if_flags & IFF_ALLMULTI)
2923 			rxcfg |= RXMAC_ALLMULTI;
2924 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2925 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2926 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2927 		return;
2928 	}
2929 
2930 	/*
2931 	 * Set up the multicast address filter by passing all multicast
2932 	 * addresses through a CRC generator, and then using the low-order
2933 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2934 	 * high order bits select the register, while the rest of the bits
2935 	 * select the bit within the register.
2936 	 */
2937 	rxcfg |= RXMAC_MULTICAST;
2938 	bzero(mchash, sizeof(mchash));
2939 
2940 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2941 		if (ifma->ifma_addr->sa_family != AF_LINK)
2942 			continue;
2943 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2944 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2945 
2946 		/* Just want the 6 least significant bits. */
2947 		crc &= 0x3f;
2948 
2949 		/* Set the corresponding bit in the hash table. */
2950 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2951 	}
2952 
2953 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2954 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2955 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2956 }
2957 
2958 static int
2959 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2960 {
2961 	struct jme_softc *sc = arg1;
2962 	struct ifnet *ifp = &sc->arpcom.ac_if;
2963 	int error, v;
2964 
2965 	lwkt_serialize_enter(ifp->if_serializer);
2966 
2967 	v = sc->jme_tx_coal_to;
2968 	error = sysctl_handle_int(oidp, &v, 0, req);
2969 	if (error || req->newptr == NULL)
2970 		goto back;
2971 
2972 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2973 		error = EINVAL;
2974 		goto back;
2975 	}
2976 
2977 	if (v != sc->jme_tx_coal_to) {
2978 		sc->jme_tx_coal_to = v;
2979 		if (ifp->if_flags & IFF_RUNNING)
2980 			jme_set_tx_coal(sc);
2981 	}
2982 back:
2983 	lwkt_serialize_exit(ifp->if_serializer);
2984 	return error;
2985 }
2986 
2987 static int
2988 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2989 {
2990 	struct jme_softc *sc = arg1;
2991 	struct ifnet *ifp = &sc->arpcom.ac_if;
2992 	int error, v;
2993 
2994 	lwkt_serialize_enter(ifp->if_serializer);
2995 
2996 	v = sc->jme_tx_coal_pkt;
2997 	error = sysctl_handle_int(oidp, &v, 0, req);
2998 	if (error || req->newptr == NULL)
2999 		goto back;
3000 
3001 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
3002 		error = EINVAL;
3003 		goto back;
3004 	}
3005 
3006 	if (v != sc->jme_tx_coal_pkt) {
3007 		sc->jme_tx_coal_pkt = v;
3008 		if (ifp->if_flags & IFF_RUNNING)
3009 			jme_set_tx_coal(sc);
3010 	}
3011 back:
3012 	lwkt_serialize_exit(ifp->if_serializer);
3013 	return error;
3014 }
3015 
3016 static int
3017 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
3018 {
3019 	struct jme_softc *sc = arg1;
3020 	struct ifnet *ifp = &sc->arpcom.ac_if;
3021 	int error, v;
3022 
3023 	lwkt_serialize_enter(ifp->if_serializer);
3024 
3025 	v = sc->jme_rx_coal_to;
3026 	error = sysctl_handle_int(oidp, &v, 0, req);
3027 	if (error || req->newptr == NULL)
3028 		goto back;
3029 
3030 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3031 		error = EINVAL;
3032 		goto back;
3033 	}
3034 
3035 	if (v != sc->jme_rx_coal_to) {
3036 		sc->jme_rx_coal_to = v;
3037 		if (ifp->if_flags & IFF_RUNNING)
3038 			jme_set_rx_coal(sc);
3039 	}
3040 back:
3041 	lwkt_serialize_exit(ifp->if_serializer);
3042 	return error;
3043 }
3044 
3045 static int
3046 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3047 {
3048 	struct jme_softc *sc = arg1;
3049 	struct ifnet *ifp = &sc->arpcom.ac_if;
3050 	int error, v;
3051 
3052 	lwkt_serialize_enter(ifp->if_serializer);
3053 
3054 	v = sc->jme_rx_coal_pkt;
3055 	error = sysctl_handle_int(oidp, &v, 0, req);
3056 	if (error || req->newptr == NULL)
3057 		goto back;
3058 
3059 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3060 		error = EINVAL;
3061 		goto back;
3062 	}
3063 
3064 	if (v != sc->jme_rx_coal_pkt) {
3065 		sc->jme_rx_coal_pkt = v;
3066 		if (ifp->if_flags & IFF_RUNNING)
3067 			jme_set_rx_coal(sc);
3068 	}
3069 back:
3070 	lwkt_serialize_exit(ifp->if_serializer);
3071 	return error;
3072 }
3073 
3074 static void
3075 jme_set_tx_coal(struct jme_softc *sc)
3076 {
3077 	uint32_t reg;
3078 
3079 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3080 	    PCCTX_COAL_TO_MASK;
3081 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3082 	    PCCTX_COAL_PKT_MASK;
3083 	reg |= PCCTX_COAL_TXQ0;
3084 	CSR_WRITE_4(sc, JME_PCCTX, reg);
3085 }
3086 
3087 static void
3088 jme_set_rx_coal(struct jme_softc *sc)
3089 {
3090 	uint32_t reg;
3091 	int r;
3092 
3093 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3094 	    PCCRX_COAL_TO_MASK;
3095 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3096 	    PCCRX_COAL_PKT_MASK;
3097 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
3098 		if (r < sc->jme_rx_ring_inuse)
3099 			CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3100 		else
3101 			CSR_WRITE_4(sc, JME_PCCRX(r), 0);
3102 	}
3103 }
3104 
3105 #ifdef DEVICE_POLLING
3106 
3107 static void
3108 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3109 {
3110 	struct jme_softc *sc = ifp->if_softc;
3111 	struct mbuf_chain chain[MAXCPU];
3112 	uint32_t status;
3113 	int r, prog = 0;
3114 
3115 	ASSERT_SERIALIZED(ifp->if_serializer);
3116 
3117 	switch (cmd) {
3118 	case POLL_REGISTER:
3119 		CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3120 		break;
3121 
3122 	case POLL_DEREGISTER:
3123 		CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3124 		break;
3125 
3126 	case POLL_AND_CHECK_STATUS:
3127 	case POLL_ONLY:
3128 		status = CSR_READ_4(sc, JME_INTR_STATUS);
3129 
3130 		ether_input_chain_init(chain);
3131 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r)
3132 			prog += jme_rxeof_chain(sc, r, chain, count);
3133 		if (prog)
3134 			ether_input_dispatch(chain);
3135 
3136 		if (status & INTR_RXQ_DESC_EMPTY) {
3137 			CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3138 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3139 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
3140 		}
3141 
3142 		jme_txeof(sc);
3143 		if (!ifq_is_empty(&ifp->if_snd))
3144 			if_devstart(ifp);
3145 		break;
3146 	}
3147 }
3148 
3149 #endif	/* DEVICE_POLLING */
3150 
3151 static int
3152 jme_rxring_dma_alloc(struct jme_softc *sc, bus_addr_t lowaddr, int ring)
3153 {
3154 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3155 	bus_addr_t busaddr;
3156 	int error;
3157 
3158 	/* Create tag for Rx ring. */
3159 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
3160 	    JME_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
3161 	    lowaddr,			/* lowaddr */
3162 	    BUS_SPACE_MAXADDR,		/* highaddr */
3163 	    NULL, NULL,			/* filter, filterarg */
3164 	    JME_RX_RING_SIZE(sc),	/* maxsize */
3165 	    1,				/* nsegments */
3166 	    JME_RX_RING_SIZE(sc),	/* maxsegsize */
3167 	    0,				/* flags */
3168 	    &rdata->jme_rx_ring_tag);
3169 	if (error) {
3170 		device_printf(sc->jme_dev,
3171 		    "could not allocate %dth Rx ring DMA tag.\n", ring);
3172 		return error;
3173 	}
3174 
3175 	/* Allocate DMA'able memory for RX ring */
3176 	error = bus_dmamem_alloc(rdata->jme_rx_ring_tag,
3177 				 (void **)&rdata->jme_rx_ring,
3178 				 BUS_DMA_WAITOK | BUS_DMA_ZERO,
3179 				 &rdata->jme_rx_ring_map);
3180 	if (error) {
3181 		device_printf(sc->jme_dev,
3182 		    "could not allocate DMA'able memory for "
3183 		    "%dth Rx ring.\n", ring);
3184 		bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
3185 		rdata->jme_rx_ring_tag = NULL;
3186 		return error;
3187 	}
3188 
3189 	/* Load the DMA map for Rx ring. */
3190 	error = bus_dmamap_load(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
3191 				rdata->jme_rx_ring, JME_RX_RING_SIZE(sc),
3192 				jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
3193 	if (error) {
3194 		device_printf(sc->jme_dev,
3195 		    "could not load DMA'able memory for %dth Rx ring.\n", ring);
3196 		bus_dmamem_free(rdata->jme_rx_ring_tag, rdata->jme_rx_ring,
3197 				rdata->jme_rx_ring_map);
3198 		bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
3199 		rdata->jme_rx_ring_tag = NULL;
3200 		return error;
3201 	}
3202 	rdata->jme_rx_ring_paddr = busaddr;
3203 
3204 	return 0;
3205 }
3206 
3207 static int
3208 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
3209 {
3210 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3211 	int i, error;
3212 
3213 	/* Create tag for Rx buffers. */
3214 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
3215 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
3216 	    sc->jme_lowaddr,		/* lowaddr */
3217 	    BUS_SPACE_MAXADDR,		/* highaddr */
3218 	    NULL, NULL,			/* filter, filterarg */
3219 	    MCLBYTES,			/* maxsize */
3220 	    1,				/* nsegments */
3221 	    MCLBYTES,			/* maxsegsize */
3222 	    0,				/* flags */
3223 	    &rdata->jme_rx_tag);
3224 	if (error) {
3225 		device_printf(sc->jme_dev,
3226 		    "could not create %dth Rx DMA tag.\n", ring);
3227 		return error;
3228 	}
3229 
3230 	/* Create DMA maps for Rx buffers. */
3231 	error = bus_dmamap_create(rdata->jme_rx_tag, 0,
3232 				  &rdata->jme_rx_sparemap);
3233 	if (error) {
3234 		device_printf(sc->jme_dev,
3235 		    "could not create %dth spare Rx dmamap.\n", ring);
3236 		bus_dma_tag_destroy(rdata->jme_rx_tag);
3237 		rdata->jme_rx_tag = NULL;
3238 		return error;
3239 	}
3240 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3241 		struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3242 
3243 		error = bus_dmamap_create(rdata->jme_rx_tag, 0,
3244 					  &rxd->rx_dmamap);
3245 		if (error) {
3246 			int j;
3247 
3248 			device_printf(sc->jme_dev,
3249 			    "could not create %dth Rx dmamap "
3250 			    "for %dth RX ring.\n", i, ring);
3251 
3252 			for (j = 0; j < i; ++j) {
3253 				rxd = &rdata->jme_rxdesc[j];
3254 				bus_dmamap_destroy(rdata->jme_rx_tag,
3255 						   rxd->rx_dmamap);
3256 			}
3257 			bus_dmamap_destroy(rdata->jme_rx_tag,
3258 					   rdata->jme_rx_sparemap);
3259 			bus_dma_tag_destroy(rdata->jme_rx_tag);
3260 			rdata->jme_rx_tag = NULL;
3261 			return error;
3262 		}
3263 	}
3264 	return 0;
3265 }
3266 
3267 static void
3268 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3269 {
3270 	struct mbuf_chain chain[MAXCPU];
3271 	int r, prog = 0;
3272 
3273 	ether_input_chain_init(chain);
3274 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3275 		if (status & jme_rx_status[r].jme_coal)
3276 			prog += jme_rxeof_chain(sc, r, chain, -1);
3277 	}
3278 	if (prog)
3279 		ether_input_dispatch(chain);
3280 }
3281 
3282 static void
3283 jme_enable_rss(struct jme_softc *sc)
3284 {
3285 	uint32_t rssc, key, ind;
3286 	int i;
3287 
3288 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3289 
3290 	rssc = RSSC_HASH_64_ENTRY;
3291 	rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3292 	rssc |= sc->jme_rx_ring_inuse >> 1;
3293 	JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3294 	CSR_WRITE_4(sc, JME_RSSC, rssc);
3295 
3296 	key = 0x6d5a6d5a; /* XXX */
3297 	for (i = 0; i < RSSKEY_NREGS; ++i)
3298 		CSR_WRITE_4(sc, RSSKEY_REG(i), key);
3299 
3300 	ind = 0;
3301 	if (sc->jme_rx_ring_inuse == JME_NRXRING_2) {
3302 		ind = 0x01000100;
3303 	} else if (sc->jme_rx_ring_inuse == JME_NRXRING_4) {
3304 		ind = 0x03020100;
3305 	} else {
3306 		panic("%s: invalid # of RX rings (%d)\n",
3307 		      sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse);
3308 	}
3309 	JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3310 	for (i = 0; i < RSSTBL_NREGS; ++i)
3311 		CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3312 }
3313 
3314 static void
3315 jme_disable_rss(struct jme_softc *sc)
3316 {
3317 	sc->jme_rx_ring_inuse = JME_NRXRING_1;
3318 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3319 }
3320