xref: /dragonfly/sys/dev/netif/jme/if_jme.c (revision 6e278935)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  */
29 
30 #include "opt_polling.h"
31 #include "opt_rss.h"
32 #include "opt_jme.h"
33 
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/interrupt.h>
39 #include <sys/malloc.h>
40 #include <sys/proc.h>
41 #include <sys/rman.h>
42 #include <sys/serialize.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46 
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/bpf.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/ifq_var.h>
54 #include <net/toeplitz.h>
55 #include <net/toeplitz2.h>
56 #include <net/vlan/if_vlan_var.h>
57 #include <net/vlan/if_vlan_ether.h>
58 
59 #include <netinet/in.h>
60 
61 #include <dev/netif/mii_layer/miivar.h>
62 #include <dev/netif/mii_layer/jmphyreg.h>
63 
64 #include <bus/pci/pcireg.h>
65 #include <bus/pci/pcivar.h>
66 #include <bus/pci/pcidevs.h>
67 
68 #include <dev/netif/jme/if_jmereg.h>
69 #include <dev/netif/jme/if_jmevar.h>
70 
71 #include "miibus_if.h"
72 
73 /* Define the following to disable printing Rx errors. */
74 #undef	JME_SHOW_ERRORS
75 
76 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
77 
78 #ifdef JME_RSS_DEBUG
79 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
80 do { \
81 	if ((sc)->jme_rss_debug >= (lvl)) \
82 		if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
83 } while (0)
84 #else	/* !JME_RSS_DEBUG */
85 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
86 #endif	/* JME_RSS_DEBUG */
87 
88 static int	jme_probe(device_t);
89 static int	jme_attach(device_t);
90 static int	jme_detach(device_t);
91 static int	jme_shutdown(device_t);
92 static int	jme_suspend(device_t);
93 static int	jme_resume(device_t);
94 
95 static int	jme_miibus_readreg(device_t, int, int);
96 static int	jme_miibus_writereg(device_t, int, int, int);
97 static void	jme_miibus_statchg(device_t);
98 
99 static void	jme_init(void *);
100 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
101 static void	jme_start(struct ifnet *);
102 static void	jme_watchdog(struct ifnet *);
103 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
104 static int	jme_mediachange(struct ifnet *);
105 #ifdef DEVICE_POLLING
106 static void	jme_poll(struct ifnet *, enum poll_cmd, int);
107 #endif
108 
109 static void	jme_intr(void *);
110 static void	jme_txeof(struct jme_softc *);
111 static void	jme_rxeof(struct jme_softc *, int);
112 static int	jme_rxeof_chain(struct jme_softc *, int,
113 				struct mbuf_chain *, int);
114 static void	jme_rx_intr(struct jme_softc *, uint32_t);
115 
116 static int	jme_dma_alloc(struct jme_softc *);
117 static void	jme_dma_free(struct jme_softc *);
118 static int	jme_init_rx_ring(struct jme_softc *, int);
119 static void	jme_init_tx_ring(struct jme_softc *);
120 static void	jme_init_ssb(struct jme_softc *);
121 static int	jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
122 static int	jme_encap(struct jme_softc *, struct mbuf **);
123 static void	jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
124 static int	jme_rxring_dma_alloc(struct jme_softc *, int);
125 static int	jme_rxbuf_dma_alloc(struct jme_softc *, int);
126 
127 static void	jme_tick(void *);
128 static void	jme_stop(struct jme_softc *);
129 static void	jme_reset(struct jme_softc *);
130 static void	jme_set_vlan(struct jme_softc *);
131 static void	jme_set_filter(struct jme_softc *);
132 static void	jme_stop_tx(struct jme_softc *);
133 static void	jme_stop_rx(struct jme_softc *);
134 static void	jme_mac_config(struct jme_softc *);
135 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
136 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
137 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
138 #ifdef notyet
139 static void	jme_setwol(struct jme_softc *);
140 static void	jme_setlinkspeed(struct jme_softc *);
141 #endif
142 static void	jme_set_tx_coal(struct jme_softc *);
143 static void	jme_set_rx_coal(struct jme_softc *);
144 static void	jme_enable_rss(struct jme_softc *);
145 static void	jme_disable_rss(struct jme_softc *);
146 
147 static void	jme_sysctl_node(struct jme_softc *);
148 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
149 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
150 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
151 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
152 
153 /*
154  * Devices supported by this driver.
155  */
156 static const struct jme_dev {
157 	uint16_t	jme_vendorid;
158 	uint16_t	jme_deviceid;
159 	uint32_t	jme_caps;
160 	const char	*jme_name;
161 } jme_devs[] = {
162 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
163 	    JME_CAP_JUMBO,
164 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
165 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
166 	    JME_CAP_FASTETH,
167 	    "JMicron Inc, JMC260 Fast Ethernet" },
168 	{ 0, 0, 0, NULL }
169 };
170 
171 static device_method_t jme_methods[] = {
172 	/* Device interface. */
173 	DEVMETHOD(device_probe,		jme_probe),
174 	DEVMETHOD(device_attach,	jme_attach),
175 	DEVMETHOD(device_detach,	jme_detach),
176 	DEVMETHOD(device_shutdown,	jme_shutdown),
177 	DEVMETHOD(device_suspend,	jme_suspend),
178 	DEVMETHOD(device_resume,	jme_resume),
179 
180 	/* Bus interface. */
181 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
182 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
183 
184 	/* MII interface. */
185 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
186 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
187 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
188 
189 	{ NULL, NULL }
190 };
191 
192 static driver_t jme_driver = {
193 	"jme",
194 	jme_methods,
195 	sizeof(struct jme_softc)
196 };
197 
198 static devclass_t jme_devclass;
199 
200 DECLARE_DUMMY_MODULE(if_jme);
201 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
202 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
203 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
204 
205 static const struct {
206 	uint32_t	jme_coal;
207 	uint32_t	jme_comp;
208 } jme_rx_status[JME_NRXRING_MAX] = {
209 	{ INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
210 	{ INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
211 	{ INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
212 	{ INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
213 };
214 
215 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
216 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
217 static int	jme_rx_ring_count = JME_NRXRING_DEF;
218 
219 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
220 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
221 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
222 
223 /*
224  *	Read a PHY register on the MII of the JMC250.
225  */
226 static int
227 jme_miibus_readreg(device_t dev, int phy, int reg)
228 {
229 	struct jme_softc *sc = device_get_softc(dev);
230 	uint32_t val;
231 	int i;
232 
233 	/* For FPGA version, PHY address 0 should be ignored. */
234 	if (sc->jme_caps & JME_CAP_FPGA) {
235 		if (phy == 0)
236 			return (0);
237 	} else {
238 		if (sc->jme_phyaddr != phy)
239 			return (0);
240 	}
241 
242 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
243 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
244 
245 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
246 		DELAY(1);
247 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
248 			break;
249 	}
250 	if (i == 0) {
251 		device_printf(sc->jme_dev, "phy read timeout: "
252 			      "phy %d, reg %d\n", phy, reg);
253 		return (0);
254 	}
255 
256 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
257 }
258 
259 /*
260  *	Write a PHY register on the MII of the JMC250.
261  */
262 static int
263 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
264 {
265 	struct jme_softc *sc = device_get_softc(dev);
266 	int i;
267 
268 	/* For FPGA version, PHY address 0 should be ignored. */
269 	if (sc->jme_caps & JME_CAP_FPGA) {
270 		if (phy == 0)
271 			return (0);
272 	} else {
273 		if (sc->jme_phyaddr != phy)
274 			return (0);
275 	}
276 
277 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
278 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
279 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
280 
281 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
282 		DELAY(1);
283 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
284 			break;
285 	}
286 	if (i == 0) {
287 		device_printf(sc->jme_dev, "phy write timeout: "
288 			      "phy %d, reg %d\n", phy, reg);
289 	}
290 
291 	return (0);
292 }
293 
294 /*
295  *	Callback from MII layer when media changes.
296  */
297 static void
298 jme_miibus_statchg(device_t dev)
299 {
300 	struct jme_softc *sc = device_get_softc(dev);
301 	struct ifnet *ifp = &sc->arpcom.ac_if;
302 	struct mii_data *mii;
303 	struct jme_txdesc *txd;
304 	bus_addr_t paddr;
305 	int i, r;
306 
307 	ASSERT_SERIALIZED(ifp->if_serializer);
308 
309 	if ((ifp->if_flags & IFF_RUNNING) == 0)
310 		return;
311 
312 	mii = device_get_softc(sc->jme_miibus);
313 
314 	sc->jme_flags &= ~JME_FLAG_LINK;
315 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
316 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
317 		case IFM_10_T:
318 		case IFM_100_TX:
319 			sc->jme_flags |= JME_FLAG_LINK;
320 			break;
321 		case IFM_1000_T:
322 			if (sc->jme_caps & JME_CAP_FASTETH)
323 				break;
324 			sc->jme_flags |= JME_FLAG_LINK;
325 			break;
326 		default:
327 			break;
328 		}
329 	}
330 
331 	/*
332 	 * Disabling Rx/Tx MACs have a side-effect of resetting
333 	 * JME_TXNDA/JME_RXNDA register to the first address of
334 	 * Tx/Rx descriptor address. So driver should reset its
335 	 * internal procucer/consumer pointer and reclaim any
336 	 * allocated resources.  Note, just saving the value of
337 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
338 	 * and restoring JME_TXNDA/JME_RXNDA register is not
339 	 * sufficient to make sure correct MAC state because
340 	 * stopping MAC operation can take a while and hardware
341 	 * might have updated JME_TXNDA/JME_RXNDA registers
342 	 * during the stop operation.
343 	 */
344 
345 	/* Disable interrupts */
346 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
347 
348 	/* Stop driver */
349 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
350 	ifp->if_timer = 0;
351 	callout_stop(&sc->jme_tick_ch);
352 
353 	/* Stop receiver/transmitter. */
354 	jme_stop_rx(sc);
355 	jme_stop_tx(sc);
356 
357 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
358 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
359 
360 		jme_rxeof(sc, r);
361 		if (rdata->jme_rxhead != NULL)
362 			m_freem(rdata->jme_rxhead);
363 		JME_RXCHAIN_RESET(sc, r);
364 
365 		/*
366 		 * Reuse configured Rx descriptors and reset
367 		 * procuder/consumer index.
368 		 */
369 		rdata->jme_rx_cons = 0;
370 	}
371 
372 	jme_txeof(sc);
373 	if (sc->jme_cdata.jme_tx_cnt != 0) {
374 		/* Remove queued packets for transmit. */
375 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
376 			txd = &sc->jme_cdata.jme_txdesc[i];
377 			if (txd->tx_m != NULL) {
378 				bus_dmamap_unload(
379 				    sc->jme_cdata.jme_tx_tag,
380 				    txd->tx_dmamap);
381 				m_freem(txd->tx_m);
382 				txd->tx_m = NULL;
383 				txd->tx_ndesc = 0;
384 				ifp->if_oerrors++;
385 			}
386 		}
387 	}
388 	jme_init_tx_ring(sc);
389 
390 	/* Initialize shadow status block. */
391 	jme_init_ssb(sc);
392 
393 	/* Program MAC with resolved speed/duplex/flow-control. */
394 	if (sc->jme_flags & JME_FLAG_LINK) {
395 		jme_mac_config(sc);
396 
397 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
398 
399 		/* Set Tx ring address to the hardware. */
400 		paddr = sc->jme_cdata.jme_tx_ring_paddr;
401 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
402 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
403 
404 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
405 			CSR_WRITE_4(sc, JME_RXCSR,
406 			    sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
407 
408 			/* Set Rx ring address to the hardware. */
409 			paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
410 			CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
411 			CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
412 		}
413 
414 		/* Restart receiver/transmitter. */
415 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
416 		    RXCSR_RXQ_START);
417 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
418 	}
419 
420 	ifp->if_flags |= IFF_RUNNING;
421 	ifp->if_flags &= ~IFF_OACTIVE;
422 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
423 
424 #ifdef DEVICE_POLLING
425 	if (!(ifp->if_flags & IFF_POLLING))
426 #endif
427 	/* Reenable interrupts. */
428 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
429 }
430 
431 /*
432  *	Get the current interface media status.
433  */
434 static void
435 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
436 {
437 	struct jme_softc *sc = ifp->if_softc;
438 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
439 
440 	ASSERT_SERIALIZED(ifp->if_serializer);
441 
442 	mii_pollstat(mii);
443 	ifmr->ifm_status = mii->mii_media_status;
444 	ifmr->ifm_active = mii->mii_media_active;
445 }
446 
447 /*
448  *	Set hardware to newly-selected media.
449  */
450 static int
451 jme_mediachange(struct ifnet *ifp)
452 {
453 	struct jme_softc *sc = ifp->if_softc;
454 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
455 	int error;
456 
457 	ASSERT_SERIALIZED(ifp->if_serializer);
458 
459 	if (mii->mii_instance != 0) {
460 		struct mii_softc *miisc;
461 
462 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
463 			mii_phy_reset(miisc);
464 	}
465 	error = mii_mediachg(mii);
466 
467 	return (error);
468 }
469 
470 static int
471 jme_probe(device_t dev)
472 {
473 	const struct jme_dev *sp;
474 	uint16_t vid, did;
475 
476 	vid = pci_get_vendor(dev);
477 	did = pci_get_device(dev);
478 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
479 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
480 			struct jme_softc *sc = device_get_softc(dev);
481 
482 			sc->jme_caps = sp->jme_caps;
483 			device_set_desc(dev, sp->jme_name);
484 			return (0);
485 		}
486 	}
487 	return (ENXIO);
488 }
489 
490 static int
491 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
492 {
493 	uint32_t reg;
494 	int i;
495 
496 	*val = 0;
497 	for (i = JME_TIMEOUT; i > 0; i--) {
498 		reg = CSR_READ_4(sc, JME_SMBCSR);
499 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
500 			break;
501 		DELAY(1);
502 	}
503 
504 	if (i == 0) {
505 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
506 		return (ETIMEDOUT);
507 	}
508 
509 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
510 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
511 	for (i = JME_TIMEOUT; i > 0; i--) {
512 		DELAY(1);
513 		reg = CSR_READ_4(sc, JME_SMBINTF);
514 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
515 			break;
516 	}
517 
518 	if (i == 0) {
519 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
520 		return (ETIMEDOUT);
521 	}
522 
523 	reg = CSR_READ_4(sc, JME_SMBINTF);
524 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
525 
526 	return (0);
527 }
528 
529 static int
530 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
531 {
532 	uint8_t fup, reg, val;
533 	uint32_t offset;
534 	int match;
535 
536 	offset = 0;
537 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
538 	    fup != JME_EEPROM_SIG0)
539 		return (ENOENT);
540 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
541 	    fup != JME_EEPROM_SIG1)
542 		return (ENOENT);
543 	match = 0;
544 	do {
545 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
546 			break;
547 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
548 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
549 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
550 				break;
551 			if (reg >= JME_PAR0 &&
552 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
553 				if (jme_eeprom_read_byte(sc, offset + 2,
554 				    &val) != 0)
555 					break;
556 				eaddr[reg - JME_PAR0] = val;
557 				match++;
558 			}
559 		}
560 		/* Check for the end of EEPROM descriptor. */
561 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
562 			break;
563 		/* Try next eeprom descriptor. */
564 		offset += JME_EEPROM_DESC_BYTES;
565 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
566 
567 	if (match == ETHER_ADDR_LEN)
568 		return (0);
569 
570 	return (ENOENT);
571 }
572 
573 static void
574 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
575 {
576 	uint32_t par0, par1;
577 
578 	/* Read station address. */
579 	par0 = CSR_READ_4(sc, JME_PAR0);
580 	par1 = CSR_READ_4(sc, JME_PAR1);
581 	par1 &= 0xFFFF;
582 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
583 		device_printf(sc->jme_dev,
584 		    "generating fake ethernet address.\n");
585 		par0 = karc4random();
586 		/* Set OUI to JMicron. */
587 		eaddr[0] = 0x00;
588 		eaddr[1] = 0x1B;
589 		eaddr[2] = 0x8C;
590 		eaddr[3] = (par0 >> 16) & 0xff;
591 		eaddr[4] = (par0 >> 8) & 0xff;
592 		eaddr[5] = par0 & 0xff;
593 	} else {
594 		eaddr[0] = (par0 >> 0) & 0xFF;
595 		eaddr[1] = (par0 >> 8) & 0xFF;
596 		eaddr[2] = (par0 >> 16) & 0xFF;
597 		eaddr[3] = (par0 >> 24) & 0xFF;
598 		eaddr[4] = (par1 >> 0) & 0xFF;
599 		eaddr[5] = (par1 >> 8) & 0xFF;
600 	}
601 }
602 
603 static int
604 jme_attach(device_t dev)
605 {
606 	struct jme_softc *sc = device_get_softc(dev);
607 	struct ifnet *ifp = &sc->arpcom.ac_if;
608 	uint32_t reg;
609 	uint16_t did;
610 	uint8_t pcie_ptr, rev;
611 	int error = 0;
612 	uint8_t eaddr[ETHER_ADDR_LEN];
613 
614 	sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
615 	if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
616 		sc->jme_rx_desc_cnt = JME_NDESC_MAX;
617 
618 	sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
619 	if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
620 		sc->jme_tx_desc_cnt = JME_NDESC_MAX;
621 
622 	/*
623 	 * Calculate rx rings based on ncpus2
624 	 */
625 	sc->jme_rx_ring_cnt = jme_rx_ring_count;
626 	if (sc->jme_rx_ring_cnt <= 0)
627 		sc->jme_rx_ring_cnt = JME_NRXRING_1;
628 	if (sc->jme_rx_ring_cnt > ncpus2)
629 		sc->jme_rx_ring_cnt = ncpus2;
630 
631 	if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
632 		sc->jme_rx_ring_cnt = JME_NRXRING_4;
633 	else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
634 		sc->jme_rx_ring_cnt = JME_NRXRING_2;
635 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
636 
637 	sc->jme_dev = dev;
638 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
639 
640 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
641 
642 	callout_init(&sc->jme_tick_ch);
643 
644 #ifndef BURN_BRIDGES
645 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
646 		uint32_t irq, mem;
647 
648 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
649 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
650 
651 		device_printf(dev, "chip is in D%d power mode "
652 		    "-- setting to D0\n", pci_get_powerstate(dev));
653 
654 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
655 
656 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
657 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
658 	}
659 #endif	/* !BURN_BRIDGE */
660 
661 	/* Enable bus mastering */
662 	pci_enable_busmaster(dev);
663 
664 	/*
665 	 * Allocate IO memory
666 	 *
667 	 * JMC250 supports both memory mapped and I/O register space
668 	 * access.  Because I/O register access should use different
669 	 * BARs to access registers it's waste of time to use I/O
670 	 * register spce access.  JMC250 uses 16K to map entire memory
671 	 * space.
672 	 */
673 	sc->jme_mem_rid = JME_PCIR_BAR;
674 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
675 						 &sc->jme_mem_rid, RF_ACTIVE);
676 	if (sc->jme_mem_res == NULL) {
677 		device_printf(dev, "can't allocate IO memory\n");
678 		return ENXIO;
679 	}
680 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
681 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
682 
683 	/*
684 	 * Allocate IRQ
685 	 */
686 	sc->jme_irq_rid = 0;
687 	sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
688 						 &sc->jme_irq_rid,
689 						 RF_SHAREABLE | RF_ACTIVE);
690 	if (sc->jme_irq_res == NULL) {
691 		device_printf(dev, "can't allocate irq\n");
692 		error = ENXIO;
693 		goto fail;
694 	}
695 
696 	/*
697 	 * Extract revisions
698 	 */
699 	reg = CSR_READ_4(sc, JME_CHIPMODE);
700 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
701 	    CHIPMODE_NOT_FPGA) {
702 		sc->jme_caps |= JME_CAP_FPGA;
703 		if (bootverbose) {
704 			device_printf(dev, "FPGA revision: 0x%04x\n",
705 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
706 				      CHIPMODE_FPGA_REV_SHIFT);
707 		}
708 	}
709 
710 	/* NOTE: FM revision is put in the upper 4 bits */
711 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
712 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
713 	if (bootverbose)
714 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
715 
716 	did = pci_get_device(dev);
717 	switch (did) {
718 	case PCI_PRODUCT_JMICRON_JMC250:
719 		if (rev == JME_REV1_A2)
720 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
721 		break;
722 
723 	case PCI_PRODUCT_JMICRON_JMC260:
724 		if (rev == JME_REV2)
725 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
726 		break;
727 
728 	default:
729 		panic("unknown device id 0x%04x\n", did);
730 	}
731 	if (rev >= JME_REV2) {
732 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
733 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
734 				      GHC_TXMAC_CLKSRC_1000;
735 	}
736 
737 	/* Reset the ethernet controller. */
738 	jme_reset(sc);
739 
740 	/* Get station address. */
741 	reg = CSR_READ_4(sc, JME_SMBCSR);
742 	if (reg & SMBCSR_EEPROM_PRESENT)
743 		error = jme_eeprom_macaddr(sc, eaddr);
744 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
745 		if (error != 0 && (bootverbose)) {
746 			device_printf(dev, "ethernet hardware address "
747 				      "not found in EEPROM.\n");
748 		}
749 		jme_reg_macaddr(sc, eaddr);
750 	}
751 
752 	/*
753 	 * Save PHY address.
754 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
755 	 * requires PHY probing to get correct PHY address.
756 	 */
757 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
758 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
759 		    GPREG0_PHY_ADDR_MASK;
760 		if (bootverbose) {
761 			device_printf(dev, "PHY is at address %d.\n",
762 			    sc->jme_phyaddr);
763 		}
764 	} else {
765 		sc->jme_phyaddr = 0;
766 	}
767 
768 	/* Set max allowable DMA size. */
769 	pcie_ptr = pci_get_pciecap_ptr(dev);
770 	if (pcie_ptr != 0) {
771 		uint16_t ctrl;
772 
773 		sc->jme_caps |= JME_CAP_PCIE;
774 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
775 		if (bootverbose) {
776 			device_printf(dev, "Read request size : %d bytes.\n",
777 			    128 << ((ctrl >> 12) & 0x07));
778 			device_printf(dev, "TLP payload size : %d bytes.\n",
779 			    128 << ((ctrl >> 5) & 0x07));
780 		}
781 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
782 		case PCIEM_DEVCTL_MAX_READRQ_128:
783 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
784 			break;
785 		case PCIEM_DEVCTL_MAX_READRQ_256:
786 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
787 			break;
788 		default:
789 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
790 			break;
791 		}
792 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
793 	} else {
794 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
795 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
796 	}
797 
798 #ifdef notyet
799 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
800 		sc->jme_caps |= JME_CAP_PMCAP;
801 #endif
802 
803 	/*
804 	 * Create sysctl tree
805 	 */
806 	jme_sysctl_node(sc);
807 
808 	/* Allocate DMA stuffs */
809 	error = jme_dma_alloc(sc);
810 	if (error)
811 		goto fail;
812 
813 	ifp->if_softc = sc;
814 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
815 	ifp->if_init = jme_init;
816 	ifp->if_ioctl = jme_ioctl;
817 	ifp->if_start = jme_start;
818 #ifdef DEVICE_POLLING
819 	ifp->if_poll = jme_poll;
820 #endif
821 	ifp->if_watchdog = jme_watchdog;
822 	ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
823 	ifq_set_ready(&ifp->if_snd);
824 
825 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
826 	ifp->if_capabilities = IFCAP_HWCSUM |
827 			       IFCAP_VLAN_MTU |
828 			       IFCAP_VLAN_HWTAGGING;
829 	if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN)
830 		ifp->if_capabilities |= IFCAP_RSS;
831 	ifp->if_capenable = ifp->if_capabilities;
832 
833 	/*
834 	 * Disable TXCSUM by default to improve bulk data
835 	 * transmit performance (+20Mbps improvement).
836 	 */
837 	ifp->if_capenable &= ~IFCAP_TXCSUM;
838 
839 	if (ifp->if_capenable & IFCAP_TXCSUM)
840 		ifp->if_hwassist = JME_CSUM_FEATURES;
841 
842 	/* Set up MII bus. */
843 	error = mii_phy_probe(dev, &sc->jme_miibus,
844 			      jme_mediachange, jme_mediastatus);
845 	if (error) {
846 		device_printf(dev, "no PHY found!\n");
847 		goto fail;
848 	}
849 
850 	/*
851 	 * Save PHYADDR for FPGA mode PHY.
852 	 */
853 	if (sc->jme_caps & JME_CAP_FPGA) {
854 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
855 
856 		if (mii->mii_instance != 0) {
857 			struct mii_softc *miisc;
858 
859 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
860 				if (miisc->mii_phy != 0) {
861 					sc->jme_phyaddr = miisc->mii_phy;
862 					break;
863 				}
864 			}
865 			if (sc->jme_phyaddr != 0) {
866 				device_printf(sc->jme_dev,
867 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
868 				/* vendor magic. */
869 				jme_miibus_writereg(dev, sc->jme_phyaddr,
870 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
871 
872 				/* XXX should we clear JME_WA_EXTFIFO */
873 			}
874 		}
875 	}
876 
877 	ether_ifattach(ifp, eaddr, NULL);
878 
879 	/* Tell the upper layer(s) we support long frames. */
880 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
881 
882 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
883 			       &sc->jme_irq_handle, ifp->if_serializer);
884 	if (error) {
885 		device_printf(dev, "could not set up interrupt handler.\n");
886 		ether_ifdetach(ifp);
887 		goto fail;
888 	}
889 
890 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
891 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
892 	return 0;
893 fail:
894 	jme_detach(dev);
895 	return (error);
896 }
897 
898 static int
899 jme_detach(device_t dev)
900 {
901 	struct jme_softc *sc = device_get_softc(dev);
902 
903 	if (device_is_attached(dev)) {
904 		struct ifnet *ifp = &sc->arpcom.ac_if;
905 
906 		lwkt_serialize_enter(ifp->if_serializer);
907 		jme_stop(sc);
908 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
909 		lwkt_serialize_exit(ifp->if_serializer);
910 
911 		ether_ifdetach(ifp);
912 	}
913 
914 	if (sc->jme_sysctl_tree != NULL)
915 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
916 
917 	if (sc->jme_miibus != NULL)
918 		device_delete_child(dev, sc->jme_miibus);
919 	bus_generic_detach(dev);
920 
921 	if (sc->jme_irq_res != NULL) {
922 		bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
923 				     sc->jme_irq_res);
924 	}
925 
926 	if (sc->jme_mem_res != NULL) {
927 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
928 				     sc->jme_mem_res);
929 	}
930 
931 	jme_dma_free(sc);
932 
933 	return (0);
934 }
935 
936 static void
937 jme_sysctl_node(struct jme_softc *sc)
938 {
939 	int coal_max;
940 #ifdef JME_RSS_DEBUG
941 	char rx_ring_pkt[32];
942 	int r;
943 #endif
944 
945 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
946 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
947 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
948 				device_get_nameunit(sc->jme_dev),
949 				CTLFLAG_RD, 0, "");
950 	if (sc->jme_sysctl_tree == NULL) {
951 		device_printf(sc->jme_dev, "can't add sysctl node\n");
952 		return;
953 	}
954 
955 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
956 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
957 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
958 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
959 
960 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
961 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
962 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
963 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
964 
965 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
966 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
967 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
968 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
969 
970 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
971 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
972 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
973 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
974 
975 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
976 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
977 		       "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
978 		       0, "RX desc count");
979 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
980 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
981 		       "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
982 		       0, "TX desc count");
983 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
984 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
985 		       "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
986 		       0, "RX ring count");
987 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
988 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
989 		       "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
990 		       0, "RX ring in use");
991 #ifdef JME_RSS_DEBUG
992 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
993 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
994 		       "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
995 		       0, "RSS debug level");
996 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
997 		ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
998 		SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
999 				SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1000 				rx_ring_pkt, CTLFLAG_RW,
1001 				&sc->jme_rx_ring_pkt[r],
1002 				0, "RXed packets");
1003 	}
1004 #endif
1005 
1006 	/*
1007 	 * Set default coalesce valves
1008 	 */
1009 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1010 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1011 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1012 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1013 
1014 	/*
1015 	 * Adjust coalesce valves, in case that the number of TX/RX
1016 	 * descs are set to small values by users.
1017 	 *
1018 	 * NOTE: coal_max will not be zero, since number of descs
1019 	 * must aligned by JME_NDESC_ALIGN (16 currently)
1020 	 */
1021 	coal_max = sc->jme_tx_desc_cnt / 6;
1022 	if (coal_max < sc->jme_tx_coal_pkt)
1023 		sc->jme_tx_coal_pkt = coal_max;
1024 
1025 	coal_max = sc->jme_rx_desc_cnt / 4;
1026 	if (coal_max < sc->jme_rx_coal_pkt)
1027 		sc->jme_rx_coal_pkt = coal_max;
1028 }
1029 
1030 static int
1031 jme_dma_alloc(struct jme_softc *sc)
1032 {
1033 	struct jme_txdesc *txd;
1034 	bus_dmamem_t dmem;
1035 	int error, i;
1036 
1037 	sc->jme_cdata.jme_txdesc =
1038 	kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1039 		M_DEVBUF, M_WAITOK | M_ZERO);
1040 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1041 		sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1042 		kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1043 			M_DEVBUF, M_WAITOK | M_ZERO);
1044 	}
1045 
1046 	/* Create parent ring tag. */
1047 	error = bus_dma_tag_create(NULL,/* parent */
1048 	    1, JME_RING_BOUNDARY,	/* algnmnt, boundary */
1049 	    sc->jme_lowaddr,		/* lowaddr */
1050 	    BUS_SPACE_MAXADDR,		/* highaddr */
1051 	    NULL, NULL,			/* filter, filterarg */
1052 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1053 	    0,				/* nsegments */
1054 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1055 	    0,				/* flags */
1056 	    &sc->jme_cdata.jme_ring_tag);
1057 	if (error) {
1058 		device_printf(sc->jme_dev,
1059 		    "could not create parent ring DMA tag.\n");
1060 		return error;
1061 	}
1062 
1063 	/*
1064 	 * Create DMA stuffs for TX ring
1065 	 */
1066 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1067 			JME_TX_RING_ALIGN, 0,
1068 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1069 			JME_TX_RING_SIZE(sc),
1070 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1071 	if (error) {
1072 		device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1073 		return error;
1074 	}
1075 	sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1076 	sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1077 	sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1078 	sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1079 
1080 	/*
1081 	 * Create DMA stuffs for RX rings
1082 	 */
1083 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1084 		error = jme_rxring_dma_alloc(sc, i);
1085 		if (error)
1086 			return error;
1087 	}
1088 
1089 	/* Create parent buffer tag. */
1090 	error = bus_dma_tag_create(NULL,/* parent */
1091 	    1, 0,			/* algnmnt, boundary */
1092 	    sc->jme_lowaddr,		/* lowaddr */
1093 	    BUS_SPACE_MAXADDR,		/* highaddr */
1094 	    NULL, NULL,			/* filter, filterarg */
1095 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1096 	    0,				/* nsegments */
1097 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1098 	    0,				/* flags */
1099 	    &sc->jme_cdata.jme_buffer_tag);
1100 	if (error) {
1101 		device_printf(sc->jme_dev,
1102 		    "could not create parent buffer DMA tag.\n");
1103 		return error;
1104 	}
1105 
1106 	/*
1107 	 * Create DMA stuffs for shadow status block
1108 	 */
1109 	error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1110 			JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1111 			JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1112 	if (error) {
1113 		device_printf(sc->jme_dev,
1114 		    "could not create shadow status block.\n");
1115 		return error;
1116 	}
1117 	sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1118 	sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1119 	sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1120 	sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1121 
1122 	/*
1123 	 * Create DMA stuffs for TX buffers
1124 	 */
1125 
1126 	/* Create tag for Tx buffers. */
1127 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1128 	    1, 0,			/* algnmnt, boundary */
1129 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1130 	    BUS_SPACE_MAXADDR,		/* highaddr */
1131 	    NULL, NULL,			/* filter, filterarg */
1132 	    JME_JUMBO_FRAMELEN,		/* maxsize */
1133 	    JME_MAXTXSEGS,		/* nsegments */
1134 	    JME_MAXSEGSIZE,		/* maxsegsize */
1135 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1136 	    &sc->jme_cdata.jme_tx_tag);
1137 	if (error != 0) {
1138 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1139 		return error;
1140 	}
1141 
1142 	/* Create DMA maps for Tx buffers. */
1143 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1144 		txd = &sc->jme_cdata.jme_txdesc[i];
1145 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1146 				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1147 				&txd->tx_dmamap);
1148 		if (error) {
1149 			int j;
1150 
1151 			device_printf(sc->jme_dev,
1152 			    "could not create %dth Tx dmamap.\n", i);
1153 
1154 			for (j = 0; j < i; ++j) {
1155 				txd = &sc->jme_cdata.jme_txdesc[j];
1156 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1157 						   txd->tx_dmamap);
1158 			}
1159 			bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1160 			sc->jme_cdata.jme_tx_tag = NULL;
1161 			return error;
1162 		}
1163 	}
1164 
1165 	/*
1166 	 * Create DMA stuffs for RX buffers
1167 	 */
1168 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1169 		error = jme_rxbuf_dma_alloc(sc, i);
1170 		if (error)
1171 			return error;
1172 	}
1173 	return 0;
1174 }
1175 
1176 static void
1177 jme_dma_free(struct jme_softc *sc)
1178 {
1179 	struct jme_txdesc *txd;
1180 	struct jme_rxdesc *rxd;
1181 	struct jme_rxdata *rdata;
1182 	int i, r;
1183 
1184 	/* Tx ring */
1185 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1186 		bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1187 		    sc->jme_cdata.jme_tx_ring_map);
1188 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1189 		    sc->jme_cdata.jme_tx_ring,
1190 		    sc->jme_cdata.jme_tx_ring_map);
1191 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1192 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1193 	}
1194 
1195 	/* Rx ring */
1196 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1197 		rdata = &sc->jme_cdata.jme_rx_data[r];
1198 		if (rdata->jme_rx_ring_tag != NULL) {
1199 			bus_dmamap_unload(rdata->jme_rx_ring_tag,
1200 					  rdata->jme_rx_ring_map);
1201 			bus_dmamem_free(rdata->jme_rx_ring_tag,
1202 					rdata->jme_rx_ring,
1203 					rdata->jme_rx_ring_map);
1204 			bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1205 			rdata->jme_rx_ring_tag = NULL;
1206 		}
1207 	}
1208 
1209 	/* Tx buffers */
1210 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1211 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1212 			txd = &sc->jme_cdata.jme_txdesc[i];
1213 			bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1214 			    txd->tx_dmamap);
1215 		}
1216 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1217 		sc->jme_cdata.jme_tx_tag = NULL;
1218 	}
1219 
1220 	/* Rx buffers */
1221 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1222 		rdata = &sc->jme_cdata.jme_rx_data[r];
1223 		if (rdata->jme_rx_tag != NULL) {
1224 			for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1225 				rxd = &rdata->jme_rxdesc[i];
1226 				bus_dmamap_destroy(rdata->jme_rx_tag,
1227 						   rxd->rx_dmamap);
1228 			}
1229 			bus_dmamap_destroy(rdata->jme_rx_tag,
1230 					   rdata->jme_rx_sparemap);
1231 			bus_dma_tag_destroy(rdata->jme_rx_tag);
1232 			rdata->jme_rx_tag = NULL;
1233 		}
1234 	}
1235 
1236 	/* Shadow status block. */
1237 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1238 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1239 		    sc->jme_cdata.jme_ssb_map);
1240 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1241 		    sc->jme_cdata.jme_ssb_block,
1242 		    sc->jme_cdata.jme_ssb_map);
1243 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1244 		sc->jme_cdata.jme_ssb_tag = NULL;
1245 	}
1246 
1247 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1248 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1249 		sc->jme_cdata.jme_buffer_tag = NULL;
1250 	}
1251 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1252 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1253 		sc->jme_cdata.jme_ring_tag = NULL;
1254 	}
1255 
1256 	if (sc->jme_cdata.jme_txdesc != NULL) {
1257 		kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1258 		sc->jme_cdata.jme_txdesc = NULL;
1259 	}
1260 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1261 		rdata = &sc->jme_cdata.jme_rx_data[r];
1262 		if (rdata->jme_rxdesc != NULL) {
1263 			kfree(rdata->jme_rxdesc, M_DEVBUF);
1264 			rdata->jme_rxdesc = NULL;
1265 		}
1266 	}
1267 }
1268 
1269 /*
1270  *	Make sure the interface is stopped at reboot time.
1271  */
1272 static int
1273 jme_shutdown(device_t dev)
1274 {
1275 	return jme_suspend(dev);
1276 }
1277 
1278 #ifdef notyet
1279 /*
1280  * Unlike other ethernet controllers, JMC250 requires
1281  * explicit resetting link speed to 10/100Mbps as gigabit
1282  * link will cunsume more power than 375mA.
1283  * Note, we reset the link speed to 10/100Mbps with
1284  * auto-negotiation but we don't know whether that operation
1285  * would succeed or not as we have no control after powering
1286  * off. If the renegotiation fail WOL may not work. Running
1287  * at 1Gbps draws more power than 375mA at 3.3V which is
1288  * specified in PCI specification and that would result in
1289  * complete shutdowning power to ethernet controller.
1290  *
1291  * TODO
1292  *  Save current negotiated media speed/duplex/flow-control
1293  *  to softc and restore the same link again after resuming.
1294  *  PHY handling such as power down/resetting to 100Mbps
1295  *  may be better handled in suspend method in phy driver.
1296  */
1297 static void
1298 jme_setlinkspeed(struct jme_softc *sc)
1299 {
1300 	struct mii_data *mii;
1301 	int aneg, i;
1302 
1303 	JME_LOCK_ASSERT(sc);
1304 
1305 	mii = device_get_softc(sc->jme_miibus);
1306 	mii_pollstat(mii);
1307 	aneg = 0;
1308 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1309 		switch IFM_SUBTYPE(mii->mii_media_active) {
1310 		case IFM_10_T:
1311 		case IFM_100_TX:
1312 			return;
1313 		case IFM_1000_T:
1314 			aneg++;
1315 		default:
1316 			break;
1317 		}
1318 	}
1319 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1320 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1321 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1322 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1323 	    BMCR_AUTOEN | BMCR_STARTNEG);
1324 	DELAY(1000);
1325 	if (aneg != 0) {
1326 		/* Poll link state until jme(4) get a 10/100 link. */
1327 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1328 			mii_pollstat(mii);
1329 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1330 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1331 				case IFM_10_T:
1332 				case IFM_100_TX:
1333 					jme_mac_config(sc);
1334 					return;
1335 				default:
1336 					break;
1337 				}
1338 			}
1339 			JME_UNLOCK(sc);
1340 			pause("jmelnk", hz);
1341 			JME_LOCK(sc);
1342 		}
1343 		if (i == MII_ANEGTICKS_GIGE)
1344 			device_printf(sc->jme_dev, "establishing link failed, "
1345 			    "WOL may not work!");
1346 	}
1347 	/*
1348 	 * No link, force MAC to have 100Mbps, full-duplex link.
1349 	 * This is the last resort and may/may not work.
1350 	 */
1351 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1352 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1353 	jme_mac_config(sc);
1354 }
1355 
1356 static void
1357 jme_setwol(struct jme_softc *sc)
1358 {
1359 	struct ifnet *ifp = &sc->arpcom.ac_if;
1360 	uint32_t gpr, pmcs;
1361 	uint16_t pmstat;
1362 	int pmc;
1363 
1364 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1365 		/* No PME capability, PHY power down. */
1366 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1367 		    MII_BMCR, BMCR_PDOWN);
1368 		return;
1369 	}
1370 
1371 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1372 	pmcs = CSR_READ_4(sc, JME_PMCS);
1373 	pmcs &= ~PMCS_WOL_ENB_MASK;
1374 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1375 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1376 		/* Enable PME message. */
1377 		gpr |= GPREG0_PME_ENB;
1378 		/* For gigabit controllers, reset link speed to 10/100. */
1379 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1380 			jme_setlinkspeed(sc);
1381 	}
1382 
1383 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1384 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1385 
1386 	/* Request PME. */
1387 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1388 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1389 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1390 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1391 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1392 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1393 		/* No WOL, PHY power down. */
1394 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1395 		    MII_BMCR, BMCR_PDOWN);
1396 	}
1397 }
1398 #endif
1399 
1400 static int
1401 jme_suspend(device_t dev)
1402 {
1403 	struct jme_softc *sc = device_get_softc(dev);
1404 	struct ifnet *ifp = &sc->arpcom.ac_if;
1405 
1406 	lwkt_serialize_enter(ifp->if_serializer);
1407 	jme_stop(sc);
1408 #ifdef notyet
1409 	jme_setwol(sc);
1410 #endif
1411 	lwkt_serialize_exit(ifp->if_serializer);
1412 
1413 	return (0);
1414 }
1415 
1416 static int
1417 jme_resume(device_t dev)
1418 {
1419 	struct jme_softc *sc = device_get_softc(dev);
1420 	struct ifnet *ifp = &sc->arpcom.ac_if;
1421 #ifdef notyet
1422 	int pmc;
1423 #endif
1424 
1425 	lwkt_serialize_enter(ifp->if_serializer);
1426 
1427 #ifdef notyet
1428 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1429 		uint16_t pmstat;
1430 
1431 		pmstat = pci_read_config(sc->jme_dev,
1432 		    pmc + PCIR_POWER_STATUS, 2);
1433 		/* Disable PME clear PME status. */
1434 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1435 		pci_write_config(sc->jme_dev,
1436 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1437 	}
1438 #endif
1439 
1440 	if (ifp->if_flags & IFF_UP)
1441 		jme_init(sc);
1442 
1443 	lwkt_serialize_exit(ifp->if_serializer);
1444 
1445 	return (0);
1446 }
1447 
1448 static int
1449 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1450 {
1451 	struct jme_txdesc *txd;
1452 	struct jme_desc *desc;
1453 	struct mbuf *m;
1454 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1455 	int maxsegs, nsegs;
1456 	int error, i, prod, symbol_desc;
1457 	uint32_t cflags, flag64;
1458 
1459 	M_ASSERTPKTHDR((*m_head));
1460 
1461 	prod = sc->jme_cdata.jme_tx_prod;
1462 	txd = &sc->jme_cdata.jme_txdesc[prod];
1463 
1464 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1465 		symbol_desc = 1;
1466 	else
1467 		symbol_desc = 0;
1468 
1469 	maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1470 		  (JME_TXD_RSVD + symbol_desc);
1471 	if (maxsegs > JME_MAXTXSEGS)
1472 		maxsegs = JME_MAXTXSEGS;
1473 	KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1474 		("not enough segments %d\n", maxsegs));
1475 
1476 	error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1477 			txd->tx_dmamap, m_head,
1478 			txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1479 	if (error)
1480 		goto fail;
1481 
1482 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1483 			BUS_DMASYNC_PREWRITE);
1484 
1485 	m = *m_head;
1486 	cflags = 0;
1487 
1488 	/* Configure checksum offload. */
1489 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1490 		cflags |= JME_TD_IPCSUM;
1491 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1492 		cflags |= JME_TD_TCPCSUM;
1493 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1494 		cflags |= JME_TD_UDPCSUM;
1495 
1496 	/* Configure VLAN. */
1497 	if (m->m_flags & M_VLANTAG) {
1498 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1499 		cflags |= JME_TD_VLAN_TAG;
1500 	}
1501 
1502 	desc = &sc->jme_cdata.jme_tx_ring[prod];
1503 	desc->flags = htole32(cflags);
1504 	desc->addr_hi = htole32(m->m_pkthdr.len);
1505 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1506 		/*
1507 		 * Use 64bits TX desc chain format.
1508 		 *
1509 		 * The first TX desc of the chain, which is setup here,
1510 		 * is just a symbol TX desc carrying no payload.
1511 		 */
1512 		flag64 = JME_TD_64BIT;
1513 		desc->buflen = 0;
1514 		desc->addr_lo = 0;
1515 
1516 		/* No effective TX desc is consumed */
1517 		i = 0;
1518 	} else {
1519 		/*
1520 		 * Use 32bits TX desc chain format.
1521 		 *
1522 		 * The first TX desc of the chain, which is setup here,
1523 		 * is an effective TX desc carrying the first segment of
1524 		 * the mbuf chain.
1525 		 */
1526 		flag64 = 0;
1527 		desc->buflen = htole32(txsegs[0].ds_len);
1528 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1529 
1530 		/* One effective TX desc is consumed */
1531 		i = 1;
1532 	}
1533 	sc->jme_cdata.jme_tx_cnt++;
1534 	KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1535 		 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1536 	JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1537 
1538 	txd->tx_ndesc = 1 - i;
1539 	for (; i < nsegs; i++) {
1540 		desc = &sc->jme_cdata.jme_tx_ring[prod];
1541 		desc->flags = htole32(JME_TD_OWN | flag64);
1542 		desc->buflen = htole32(txsegs[i].ds_len);
1543 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1544 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1545 
1546 		sc->jme_cdata.jme_tx_cnt++;
1547 		KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1548 			 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1549 		JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1550 	}
1551 
1552 	/* Update producer index. */
1553 	sc->jme_cdata.jme_tx_prod = prod;
1554 	/*
1555 	 * Finally request interrupt and give the first descriptor
1556 	 * owenership to hardware.
1557 	 */
1558 	desc = txd->tx_desc;
1559 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1560 
1561 	txd->tx_m = m;
1562 	txd->tx_ndesc += nsegs;
1563 
1564 	return 0;
1565 fail:
1566 	m_freem(*m_head);
1567 	*m_head = NULL;
1568 	return error;
1569 }
1570 
1571 static void
1572 jme_start(struct ifnet *ifp)
1573 {
1574 	struct jme_softc *sc = ifp->if_softc;
1575 	struct mbuf *m_head;
1576 	int enq = 0;
1577 
1578 	ASSERT_SERIALIZED(ifp->if_serializer);
1579 
1580 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1581 		ifq_purge(&ifp->if_snd);
1582 		return;
1583 	}
1584 
1585 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1586 		return;
1587 
1588 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1589 		jme_txeof(sc);
1590 
1591 	while (!ifq_is_empty(&ifp->if_snd)) {
1592 		/*
1593 		 * Check number of available TX descs, always
1594 		 * leave JME_TXD_RSVD free TX descs.
1595 		 */
1596 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1597 		    sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1598 			ifp->if_flags |= IFF_OACTIVE;
1599 			break;
1600 		}
1601 
1602 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1603 		if (m_head == NULL)
1604 			break;
1605 
1606 		/*
1607 		 * Pack the data into the transmit ring. If we
1608 		 * don't have room, set the OACTIVE flag and wait
1609 		 * for the NIC to drain the ring.
1610 		 */
1611 		if (jme_encap(sc, &m_head)) {
1612 			KKASSERT(m_head == NULL);
1613 			ifp->if_oerrors++;
1614 			ifp->if_flags |= IFF_OACTIVE;
1615 			break;
1616 		}
1617 		enq++;
1618 
1619 		/*
1620 		 * If there's a BPF listener, bounce a copy of this frame
1621 		 * to him.
1622 		 */
1623 		ETHER_BPF_MTAP(ifp, m_head);
1624 	}
1625 
1626 	if (enq > 0) {
1627 		/*
1628 		 * Reading TXCSR takes very long time under heavy load
1629 		 * so cache TXCSR value and writes the ORed value with
1630 		 * the kick command to the TXCSR. This saves one register
1631 		 * access cycle.
1632 		 */
1633 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1634 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1635 		/* Set a timeout in case the chip goes out to lunch. */
1636 		ifp->if_timer = JME_TX_TIMEOUT;
1637 	}
1638 }
1639 
1640 static void
1641 jme_watchdog(struct ifnet *ifp)
1642 {
1643 	struct jme_softc *sc = ifp->if_softc;
1644 
1645 	ASSERT_SERIALIZED(ifp->if_serializer);
1646 
1647 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1648 		if_printf(ifp, "watchdog timeout (missed link)\n");
1649 		ifp->if_oerrors++;
1650 		jme_init(sc);
1651 		return;
1652 	}
1653 
1654 	jme_txeof(sc);
1655 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1656 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1657 			  "-- recovering\n");
1658 		if (!ifq_is_empty(&ifp->if_snd))
1659 			if_devstart(ifp);
1660 		return;
1661 	}
1662 
1663 	if_printf(ifp, "watchdog timeout\n");
1664 	ifp->if_oerrors++;
1665 	jme_init(sc);
1666 	if (!ifq_is_empty(&ifp->if_snd))
1667 		if_devstart(ifp);
1668 }
1669 
1670 static int
1671 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1672 {
1673 	struct jme_softc *sc = ifp->if_softc;
1674 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1675 	struct ifreq *ifr = (struct ifreq *)data;
1676 	int error = 0, mask;
1677 
1678 	ASSERT_SERIALIZED(ifp->if_serializer);
1679 
1680 	switch (cmd) {
1681 	case SIOCSIFMTU:
1682 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1683 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1684 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1685 			error = EINVAL;
1686 			break;
1687 		}
1688 
1689 		if (ifp->if_mtu != ifr->ifr_mtu) {
1690 			/*
1691 			 * No special configuration is required when interface
1692 			 * MTU is changed but availability of Tx checksum
1693 			 * offload should be chcked against new MTU size as
1694 			 * FIFO size is just 2K.
1695 			 */
1696 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1697 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1698 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1699 			}
1700 			ifp->if_mtu = ifr->ifr_mtu;
1701 			if (ifp->if_flags & IFF_RUNNING)
1702 				jme_init(sc);
1703 		}
1704 		break;
1705 
1706 	case SIOCSIFFLAGS:
1707 		if (ifp->if_flags & IFF_UP) {
1708 			if (ifp->if_flags & IFF_RUNNING) {
1709 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1710 				    (IFF_PROMISC | IFF_ALLMULTI))
1711 					jme_set_filter(sc);
1712 			} else {
1713 				jme_init(sc);
1714 			}
1715 		} else {
1716 			if (ifp->if_flags & IFF_RUNNING)
1717 				jme_stop(sc);
1718 		}
1719 		sc->jme_if_flags = ifp->if_flags;
1720 		break;
1721 
1722 	case SIOCADDMULTI:
1723 	case SIOCDELMULTI:
1724 		if (ifp->if_flags & IFF_RUNNING)
1725 			jme_set_filter(sc);
1726 		break;
1727 
1728 	case SIOCSIFMEDIA:
1729 	case SIOCGIFMEDIA:
1730 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1731 		break;
1732 
1733 	case SIOCSIFCAP:
1734 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1735 
1736 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1737 			ifp->if_capenable ^= IFCAP_TXCSUM;
1738 			if (IFCAP_TXCSUM & ifp->if_capenable)
1739 				ifp->if_hwassist |= JME_CSUM_FEATURES;
1740 			else
1741 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1742 		}
1743 		if (mask & IFCAP_RXCSUM) {
1744 			uint32_t reg;
1745 
1746 			ifp->if_capenable ^= IFCAP_RXCSUM;
1747 			reg = CSR_READ_4(sc, JME_RXMAC);
1748 			reg &= ~RXMAC_CSUM_ENB;
1749 			if (ifp->if_capenable & IFCAP_RXCSUM)
1750 				reg |= RXMAC_CSUM_ENB;
1751 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1752 		}
1753 
1754 		if (mask & IFCAP_VLAN_HWTAGGING) {
1755 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1756 			jme_set_vlan(sc);
1757 		}
1758 
1759 		if (mask & IFCAP_RSS) {
1760 			ifp->if_capenable ^= IFCAP_RSS;
1761 			if (ifp->if_flags & IFF_RUNNING)
1762 				jme_init(sc);
1763 		}
1764 		break;
1765 
1766 	default:
1767 		error = ether_ioctl(ifp, cmd, data);
1768 		break;
1769 	}
1770 	return (error);
1771 }
1772 
1773 static void
1774 jme_mac_config(struct jme_softc *sc)
1775 {
1776 	struct mii_data *mii;
1777 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1778 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1779 
1780 	mii = device_get_softc(sc->jme_miibus);
1781 
1782 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1783 	DELAY(10);
1784 	CSR_WRITE_4(sc, JME_GHC, 0);
1785 	ghc = 0;
1786 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1787 	rxmac &= ~RXMAC_FC_ENB;
1788 	txmac = CSR_READ_4(sc, JME_TXMAC);
1789 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1790 	txpause = CSR_READ_4(sc, JME_TXPFC);
1791 	txpause &= ~TXPFC_PAUSE_ENB;
1792 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1793 		ghc |= GHC_FULL_DUPLEX;
1794 		rxmac &= ~RXMAC_COLL_DET_ENB;
1795 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1796 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1797 		    TXMAC_FRAME_BURST);
1798 #ifdef notyet
1799 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1800 			txpause |= TXPFC_PAUSE_ENB;
1801 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1802 			rxmac |= RXMAC_FC_ENB;
1803 #endif
1804 		/* Disable retry transmit timer/retry limit. */
1805 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1806 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1807 	} else {
1808 		rxmac |= RXMAC_COLL_DET_ENB;
1809 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1810 		/* Enable retry transmit timer/retry limit. */
1811 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1812 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1813 	}
1814 
1815 	/*
1816 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1817 	 */
1818 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1819 	gp1 &= ~GPREG1_WA_HDX;
1820 
1821 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1822 		hdx = 1;
1823 
1824 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1825 	case IFM_10_T:
1826 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1827 		if (hdx)
1828 			gp1 |= GPREG1_WA_HDX;
1829 		break;
1830 
1831 	case IFM_100_TX:
1832 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1833 		if (hdx)
1834 			gp1 |= GPREG1_WA_HDX;
1835 
1836 		/*
1837 		 * Use extended FIFO depth to workaround CRC errors
1838 		 * emitted by chips before JMC250B
1839 		 */
1840 		phyconf = JMPHY_CONF_EXTFIFO;
1841 		break;
1842 
1843 	case IFM_1000_T:
1844 		if (sc->jme_caps & JME_CAP_FASTETH)
1845 			break;
1846 
1847 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1848 		if (hdx)
1849 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1850 		break;
1851 
1852 	default:
1853 		break;
1854 	}
1855 	CSR_WRITE_4(sc, JME_GHC, ghc);
1856 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1857 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1858 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1859 
1860 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
1861 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1862 				    JMPHY_CONF, phyconf);
1863 	}
1864 	if (sc->jme_workaround & JME_WA_HDX)
1865 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
1866 }
1867 
1868 static void
1869 jme_intr(void *xsc)
1870 {
1871 	struct jme_softc *sc = xsc;
1872 	struct ifnet *ifp = &sc->arpcom.ac_if;
1873 	uint32_t status;
1874 	int r;
1875 
1876 	ASSERT_SERIALIZED(ifp->if_serializer);
1877 
1878 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1879 	if (status == 0 || status == 0xFFFFFFFF)
1880 		return;
1881 
1882 	/* Disable interrupts. */
1883 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1884 
1885 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1886 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1887 		goto back;
1888 
1889 	/* Reset PCC counter/timer and Ack interrupts. */
1890 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1891 
1892 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1893 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1894 
1895 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
1896 		if (status & jme_rx_status[r].jme_coal) {
1897 			status |= jme_rx_status[r].jme_coal |
1898 				  jme_rx_status[r].jme_comp;
1899 		}
1900 	}
1901 
1902 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1903 
1904 	if (ifp->if_flags & IFF_RUNNING) {
1905 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1906 			jme_rx_intr(sc, status);
1907 
1908 		if (status & INTR_RXQ_DESC_EMPTY) {
1909 			/*
1910 			 * Notify hardware availability of new Rx buffers.
1911 			 * Reading RXCSR takes very long time under heavy
1912 			 * load so cache RXCSR value and writes the ORed
1913 			 * value with the kick command to the RXCSR. This
1914 			 * saves one register access cycle.
1915 			 */
1916 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1917 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1918 		}
1919 
1920 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1921 			jme_txeof(sc);
1922 			if (!ifq_is_empty(&ifp->if_snd))
1923 				if_devstart(ifp);
1924 		}
1925 	}
1926 back:
1927 	/* Reenable interrupts. */
1928 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1929 }
1930 
1931 static void
1932 jme_txeof(struct jme_softc *sc)
1933 {
1934 	struct ifnet *ifp = &sc->arpcom.ac_if;
1935 	struct jme_txdesc *txd;
1936 	uint32_t status;
1937 	int cons, nsegs;
1938 
1939 	cons = sc->jme_cdata.jme_tx_cons;
1940 	if (cons == sc->jme_cdata.jme_tx_prod)
1941 		return;
1942 
1943 	/*
1944 	 * Go through our Tx list and free mbufs for those
1945 	 * frames which have been transmitted.
1946 	 */
1947 	while (cons != sc->jme_cdata.jme_tx_prod) {
1948 		txd = &sc->jme_cdata.jme_txdesc[cons];
1949 		KASSERT(txd->tx_m != NULL,
1950 			("%s: freeing NULL mbuf!\n", __func__));
1951 
1952 		status = le32toh(txd->tx_desc->flags);
1953 		if ((status & JME_TD_OWN) == JME_TD_OWN)
1954 			break;
1955 
1956 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1957 			ifp->if_oerrors++;
1958 		} else {
1959 			ifp->if_opackets++;
1960 			if (status & JME_TD_COLLISION) {
1961 				ifp->if_collisions +=
1962 				    le32toh(txd->tx_desc->buflen) &
1963 				    JME_TD_BUF_LEN_MASK;
1964 			}
1965 		}
1966 
1967 		/*
1968 		 * Only the first descriptor of multi-descriptor
1969 		 * transmission is updated so driver have to skip entire
1970 		 * chained buffers for the transmiited frame. In other
1971 		 * words, JME_TD_OWN bit is valid only at the first
1972 		 * descriptor of a multi-descriptor transmission.
1973 		 */
1974 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1975 			sc->jme_cdata.jme_tx_ring[cons].flags = 0;
1976 			JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
1977 		}
1978 
1979 		/* Reclaim transferred mbufs. */
1980 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1981 		m_freem(txd->tx_m);
1982 		txd->tx_m = NULL;
1983 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1984 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
1985 			("%s: Active Tx desc counter was garbled\n", __func__));
1986 		txd->tx_ndesc = 0;
1987 	}
1988 	sc->jme_cdata.jme_tx_cons = cons;
1989 
1990 	if (sc->jme_cdata.jme_tx_cnt == 0)
1991 		ifp->if_timer = 0;
1992 
1993 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
1994 	    sc->jme_tx_desc_cnt - JME_TXD_RSVD)
1995 		ifp->if_flags &= ~IFF_OACTIVE;
1996 }
1997 
1998 static __inline void
1999 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
2000 {
2001 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2002 	int i;
2003 
2004 	for (i = 0; i < count; ++i) {
2005 		struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2006 
2007 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2008 		desc->buflen = htole32(MCLBYTES);
2009 		JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2010 	}
2011 }
2012 
2013 static __inline struct pktinfo *
2014 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2015 {
2016 	if (flags & JME_RD_IPV4)
2017 		pi->pi_netisr = NETISR_IP;
2018 	else if (flags & JME_RD_IPV6)
2019 		pi->pi_netisr = NETISR_IPV6;
2020 	else
2021 		return NULL;
2022 
2023 	pi->pi_flags = 0;
2024 	pi->pi_l3proto = IPPROTO_UNKNOWN;
2025 
2026 	if (flags & JME_RD_MORE_FRAG)
2027 		pi->pi_flags |= PKTINFO_FLAG_FRAG;
2028 	else if (flags & JME_RD_TCP)
2029 		pi->pi_l3proto = IPPROTO_TCP;
2030 	else if (flags & JME_RD_UDP)
2031 		pi->pi_l3proto = IPPROTO_UDP;
2032 	else
2033 		pi = NULL;
2034 	return pi;
2035 }
2036 
2037 /* Receive a frame. */
2038 static void
2039 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2040 {
2041 	struct ifnet *ifp = &sc->arpcom.ac_if;
2042 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2043 	struct jme_desc *desc;
2044 	struct jme_rxdesc *rxd;
2045 	struct mbuf *mp, *m;
2046 	uint32_t flags, status, hash, hashinfo;
2047 	int cons, count, nsegs;
2048 
2049 	cons = rdata->jme_rx_cons;
2050 	desc = &rdata->jme_rx_ring[cons];
2051 	flags = le32toh(desc->flags);
2052 	status = le32toh(desc->buflen);
2053 	hash = le32toh(desc->addr_hi);
2054 	hashinfo = le32toh(desc->addr_lo);
2055 	nsegs = JME_RX_NSEGS(status);
2056 
2057 	JME_RSS_DPRINTF(sc, 15, "ring%d, flags 0x%08x, "
2058 			"hash 0x%08x, hash info 0x%08x\n",
2059 			ring, flags, hash, hashinfo);
2060 
2061 	if (status & JME_RX_ERR_STAT) {
2062 		ifp->if_ierrors++;
2063 		jme_discard_rxbufs(sc, ring, cons, nsegs);
2064 #ifdef JME_SHOW_ERRORS
2065 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2066 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2067 #endif
2068 		rdata->jme_rx_cons += nsegs;
2069 		rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2070 		return;
2071 	}
2072 
2073 	rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2074 	for (count = 0; count < nsegs; count++,
2075 	     JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2076 		rxd = &rdata->jme_rxdesc[cons];
2077 		mp = rxd->rx_m;
2078 
2079 		/* Add a new receive buffer to the ring. */
2080 		if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2081 			ifp->if_iqdrops++;
2082 			/* Reuse buffer. */
2083 			jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2084 			if (rdata->jme_rxhead != NULL) {
2085 				m_freem(rdata->jme_rxhead);
2086 				JME_RXCHAIN_RESET(sc, ring);
2087 			}
2088 			break;
2089 		}
2090 
2091 		/*
2092 		 * Assume we've received a full sized frame.
2093 		 * Actual size is fixed when we encounter the end of
2094 		 * multi-segmented frame.
2095 		 */
2096 		mp->m_len = MCLBYTES;
2097 
2098 		/* Chain received mbufs. */
2099 		if (rdata->jme_rxhead == NULL) {
2100 			rdata->jme_rxhead = mp;
2101 			rdata->jme_rxtail = mp;
2102 		} else {
2103 			/*
2104 			 * Receive processor can receive a maximum frame
2105 			 * size of 65535 bytes.
2106 			 */
2107 			rdata->jme_rxtail->m_next = mp;
2108 			rdata->jme_rxtail = mp;
2109 		}
2110 
2111 		if (count == nsegs - 1) {
2112 			struct pktinfo pi0, *pi;
2113 
2114 			/* Last desc. for this frame. */
2115 			m = rdata->jme_rxhead;
2116 			m->m_pkthdr.len = rdata->jme_rxlen;
2117 			if (nsegs > 1) {
2118 				/* Set first mbuf size. */
2119 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2120 				/* Set last mbuf size. */
2121 				mp->m_len = rdata->jme_rxlen -
2122 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2123 				    (MCLBYTES * (nsegs - 2)));
2124 			} else {
2125 				m->m_len = rdata->jme_rxlen;
2126 			}
2127 			m->m_pkthdr.rcvif = ifp;
2128 
2129 			/*
2130 			 * Account for 10bytes auto padding which is used
2131 			 * to align IP header on 32bit boundary. Also note,
2132 			 * CRC bytes is automatically removed by the
2133 			 * hardware.
2134 			 */
2135 			m->m_data += JME_RX_PAD_BYTES;
2136 
2137 			/* Set checksum information. */
2138 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2139 			    (flags & JME_RD_IPV4)) {
2140 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2141 				if (flags & JME_RD_IPCSUM)
2142 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2143 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2144 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2145 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2146 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2147 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2148 					m->m_pkthdr.csum_flags |=
2149 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2150 					m->m_pkthdr.csum_data = 0xffff;
2151 				}
2152 			}
2153 
2154 			/* Check for VLAN tagged packets. */
2155 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2156 			    (flags & JME_RD_VLAN_TAG)) {
2157 				m->m_pkthdr.ether_vlantag =
2158 				    flags & JME_RD_VLAN_MASK;
2159 				m->m_flags |= M_VLANTAG;
2160 			}
2161 
2162 			ifp->if_ipackets++;
2163 
2164 			if (ifp->if_capenable & IFCAP_RSS)
2165 				pi = jme_pktinfo(&pi0, flags);
2166 			else
2167 				pi = NULL;
2168 
2169 			if (pi != NULL &&
2170 			    (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2171 				m->m_flags |= M_HASH;
2172 				m->m_pkthdr.hash = toeplitz_hash(hash);
2173 			}
2174 
2175 #ifdef JME_RSS_DEBUG
2176 			if (pi != NULL) {
2177 				JME_RSS_DPRINTF(sc, 10,
2178 				    "isr %d flags %08x, l3 %d %s\n",
2179 				    pi->pi_netisr, pi->pi_flags,
2180 				    pi->pi_l3proto,
2181 				    (m->m_flags & M_HASH) ? "hash" : "");
2182 			}
2183 #endif
2184 
2185 			/* Pass it on. */
2186 			ether_input_chain(ifp, m, pi, chain);
2187 
2188 			/* Reset mbuf chains. */
2189 			JME_RXCHAIN_RESET(sc, ring);
2190 #ifdef JME_RSS_DEBUG
2191 			sc->jme_rx_ring_pkt[ring]++;
2192 #endif
2193 		}
2194 	}
2195 
2196 	rdata->jme_rx_cons += nsegs;
2197 	rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2198 }
2199 
2200 static int
2201 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2202 		int count)
2203 {
2204 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2205 	struct jme_desc *desc;
2206 	int nsegs, prog, pktlen;
2207 
2208 	prog = 0;
2209 	for (;;) {
2210 #ifdef DEVICE_POLLING
2211 		if (count >= 0 && count-- == 0)
2212 			break;
2213 #endif
2214 		desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2215 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2216 			break;
2217 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2218 			break;
2219 
2220 		/*
2221 		 * Check number of segments against received bytes.
2222 		 * Non-matching value would indicate that hardware
2223 		 * is still trying to update Rx descriptors. I'm not
2224 		 * sure whether this check is needed.
2225 		 */
2226 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2227 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2228 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2229 			if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2230 				  "and packet size(%d) mismach\n",
2231 				  nsegs, pktlen);
2232 			break;
2233 		}
2234 
2235 		/* Received a frame. */
2236 		jme_rxpkt(sc, ring, chain);
2237 		prog++;
2238 	}
2239 	return prog;
2240 }
2241 
2242 static void
2243 jme_rxeof(struct jme_softc *sc, int ring)
2244 {
2245 	struct mbuf_chain chain[MAXCPU];
2246 
2247 	ether_input_chain_init(chain);
2248 	if (jme_rxeof_chain(sc, ring, chain, -1))
2249 		ether_input_dispatch(chain);
2250 }
2251 
2252 static void
2253 jme_tick(void *xsc)
2254 {
2255 	struct jme_softc *sc = xsc;
2256 	struct ifnet *ifp = &sc->arpcom.ac_if;
2257 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2258 
2259 	lwkt_serialize_enter(ifp->if_serializer);
2260 
2261 	mii_tick(mii);
2262 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2263 
2264 	lwkt_serialize_exit(ifp->if_serializer);
2265 }
2266 
2267 static void
2268 jme_reset(struct jme_softc *sc)
2269 {
2270 	uint32_t val;
2271 
2272 	/* Make sure that TX and RX are stopped */
2273 	jme_stop_tx(sc);
2274 	jme_stop_rx(sc);
2275 
2276 	/* Start reset */
2277 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2278 	DELAY(20);
2279 
2280 	/*
2281 	 * Hold reset bit before stop reset
2282 	 */
2283 
2284 	/* Disable TXMAC and TXOFL clock sources */
2285 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2286 	/* Disable RXMAC clock source */
2287 	val = CSR_READ_4(sc, JME_GPREG1);
2288 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2289 	/* Flush */
2290 	CSR_READ_4(sc, JME_GHC);
2291 
2292 	/* Stop reset */
2293 	CSR_WRITE_4(sc, JME_GHC, 0);
2294 	/* Flush */
2295 	CSR_READ_4(sc, JME_GHC);
2296 
2297 	/*
2298 	 * Clear reset bit after stop reset
2299 	 */
2300 
2301 	/* Enable TXMAC and TXOFL clock sources */
2302 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2303 	/* Enable RXMAC clock source */
2304 	val = CSR_READ_4(sc, JME_GPREG1);
2305 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2306 	/* Flush */
2307 	CSR_READ_4(sc, JME_GHC);
2308 
2309 	/* Disable TXMAC and TXOFL clock sources */
2310 	CSR_WRITE_4(sc, JME_GHC, 0);
2311 	/* Disable RXMAC clock source */
2312 	val = CSR_READ_4(sc, JME_GPREG1);
2313 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2314 	/* Flush */
2315 	CSR_READ_4(sc, JME_GHC);
2316 
2317 	/* Enable TX and RX */
2318 	val = CSR_READ_4(sc, JME_TXCSR);
2319 	CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2320 	val = CSR_READ_4(sc, JME_RXCSR);
2321 	CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2322 	/* Flush */
2323 	CSR_READ_4(sc, JME_TXCSR);
2324 	CSR_READ_4(sc, JME_RXCSR);
2325 
2326 	/* Enable TXMAC and TXOFL clock sources */
2327 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2328 	/* Eisable RXMAC clock source */
2329 	val = CSR_READ_4(sc, JME_GPREG1);
2330 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2331 	/* Flush */
2332 	CSR_READ_4(sc, JME_GHC);
2333 
2334 	/* Stop TX and RX */
2335 	jme_stop_tx(sc);
2336 	jme_stop_rx(sc);
2337 }
2338 
2339 static void
2340 jme_init(void *xsc)
2341 {
2342 	struct jme_softc *sc = xsc;
2343 	struct ifnet *ifp = &sc->arpcom.ac_if;
2344 	struct mii_data *mii;
2345 	uint8_t eaddr[ETHER_ADDR_LEN];
2346 	bus_addr_t paddr;
2347 	uint32_t reg;
2348 	int error, r;
2349 
2350 	ASSERT_SERIALIZED(ifp->if_serializer);
2351 
2352 	/*
2353 	 * Cancel any pending I/O.
2354 	 */
2355 	jme_stop(sc);
2356 
2357 	/*
2358 	 * Reset the chip to a known state.
2359 	 */
2360 	jme_reset(sc);
2361 
2362 	sc->jme_txd_spare =
2363 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2364 	KKASSERT(sc->jme_txd_spare >= 1);
2365 
2366 	/*
2367 	 * If we use 64bit address mode for transmitting, each Tx request
2368 	 * needs one more symbol descriptor.
2369 	 */
2370 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2371 		sc->jme_txd_spare += 1;
2372 
2373 	if (ifp->if_capenable & IFCAP_RSS)
2374 		jme_enable_rss(sc);
2375 	else
2376 		jme_disable_rss(sc);
2377 
2378 	/* Init RX descriptors */
2379 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2380 		error = jme_init_rx_ring(sc, r);
2381 		if (error) {
2382 			if_printf(ifp, "initialization failed: "
2383 				  "no memory for %dth RX ring.\n", r);
2384 			jme_stop(sc);
2385 			return;
2386 		}
2387 	}
2388 
2389 	/* Init TX descriptors */
2390 	jme_init_tx_ring(sc);
2391 
2392 	/* Initialize shadow status block. */
2393 	jme_init_ssb(sc);
2394 
2395 	/* Reprogram the station address. */
2396 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2397 	CSR_WRITE_4(sc, JME_PAR0,
2398 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2399 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2400 
2401 	/*
2402 	 * Configure Tx queue.
2403 	 *  Tx priority queue weight value : 0
2404 	 *  Tx FIFO threshold for processing next packet : 16QW
2405 	 *  Maximum Tx DMA length : 512
2406 	 *  Allow Tx DMA burst.
2407 	 */
2408 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2409 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2410 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2411 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2412 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2413 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2414 
2415 	/* Set Tx descriptor counter. */
2416 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2417 
2418 	/* Set Tx ring address to the hardware. */
2419 	paddr = sc->jme_cdata.jme_tx_ring_paddr;
2420 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2421 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2422 
2423 	/* Configure TxMAC parameters. */
2424 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2425 	reg |= TXMAC_THRESH_1_PKT;
2426 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2427 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2428 
2429 	/*
2430 	 * Configure Rx queue.
2431 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2432 	 *  FIFO threshold for processing next packet : 128QW
2433 	 *  Rx queue 0 select
2434 	 *  Max Rx DMA length : 128
2435 	 *  Rx descriptor retry : 32
2436 	 *  Rx descriptor retry time gap : 256ns
2437 	 *  Don't receive runt/bad frame.
2438 	 */
2439 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2440 #if 0
2441 	/*
2442 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2443 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2444 	 * decrease FIFO threshold to reduce the FIFO overruns for
2445 	 * frames larger than 4000 bytes.
2446 	 * For best performance of standard MTU sized frames use
2447 	 * maximum allowable FIFO threshold, 128QW.
2448 	 */
2449 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2450 	    JME_RX_FIFO_SIZE)
2451 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2452 	else
2453 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2454 #else
2455 	/* Improve PCI Express compatibility */
2456 	sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2457 #endif
2458 	sc->jme_rxcsr |= sc->jme_rx_dma_size;
2459 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2460 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2461 	/* XXX TODO DROP_BAD */
2462 
2463 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2464 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2465 
2466 		/* Set Rx descriptor counter. */
2467 		CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2468 
2469 		/* Set Rx ring address to the hardware. */
2470 		paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2471 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2472 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2473 	}
2474 
2475 	/* Clear receive filter. */
2476 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2477 
2478 	/* Set up the receive filter. */
2479 	jme_set_filter(sc);
2480 	jme_set_vlan(sc);
2481 
2482 	/*
2483 	 * Disable all WOL bits as WOL can interfere normal Rx
2484 	 * operation. Also clear WOL detection status bits.
2485 	 */
2486 	reg = CSR_READ_4(sc, JME_PMCS);
2487 	reg &= ~PMCS_WOL_ENB_MASK;
2488 	CSR_WRITE_4(sc, JME_PMCS, reg);
2489 
2490 	/*
2491 	 * Pad 10bytes right before received frame. This will greatly
2492 	 * help Rx performance on strict-alignment architectures as
2493 	 * it does not need to copy the frame to align the payload.
2494 	 */
2495 	reg = CSR_READ_4(sc, JME_RXMAC);
2496 	reg |= RXMAC_PAD_10BYTES;
2497 
2498 	if (ifp->if_capenable & IFCAP_RXCSUM)
2499 		reg |= RXMAC_CSUM_ENB;
2500 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2501 
2502 	/* Configure general purpose reg0 */
2503 	reg = CSR_READ_4(sc, JME_GPREG0);
2504 	reg &= ~GPREG0_PCC_UNIT_MASK;
2505 	/* Set PCC timer resolution to micro-seconds unit. */
2506 	reg |= GPREG0_PCC_UNIT_US;
2507 	/*
2508 	 * Disable all shadow register posting as we have to read
2509 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2510 	 * that it's hard to synchronize interrupt status between
2511 	 * hardware and software with shadow posting due to
2512 	 * requirements of bus_dmamap_sync(9).
2513 	 */
2514 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2515 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2516 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2517 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2518 	/* Disable posting of DW0. */
2519 	reg &= ~GPREG0_POST_DW0_ENB;
2520 	/* Clear PME message. */
2521 	reg &= ~GPREG0_PME_ENB;
2522 	/* Set PHY address. */
2523 	reg &= ~GPREG0_PHY_ADDR_MASK;
2524 	reg |= sc->jme_phyaddr;
2525 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2526 
2527 	/* Configure Tx queue 0 packet completion coalescing. */
2528 	jme_set_tx_coal(sc);
2529 
2530 	/* Configure Rx queue 0 packet completion coalescing. */
2531 	jme_set_rx_coal(sc);
2532 
2533 	/* Configure shadow status block but don't enable posting. */
2534 	paddr = sc->jme_cdata.jme_ssb_block_paddr;
2535 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2536 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2537 
2538 	/* Disable Timer 1 and Timer 2. */
2539 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2540 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2541 
2542 	/* Configure retry transmit period, retry limit value. */
2543 	CSR_WRITE_4(sc, JME_TXTRHD,
2544 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2545 	    TXTRHD_RT_PERIOD_MASK) |
2546 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2547 	    TXTRHD_RT_LIMIT_SHIFT));
2548 
2549 #ifdef DEVICE_POLLING
2550 	if (!(ifp->if_flags & IFF_POLLING))
2551 #endif
2552 	/* Initialize the interrupt mask. */
2553 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2554 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2555 
2556 	/*
2557 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2558 	 * done after detection of valid link in jme_miibus_statchg.
2559 	 */
2560 	sc->jme_flags &= ~JME_FLAG_LINK;
2561 
2562 	/* Set the current media. */
2563 	mii = device_get_softc(sc->jme_miibus);
2564 	mii_mediachg(mii);
2565 
2566 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2567 
2568 	ifp->if_flags |= IFF_RUNNING;
2569 	ifp->if_flags &= ~IFF_OACTIVE;
2570 }
2571 
2572 static void
2573 jme_stop(struct jme_softc *sc)
2574 {
2575 	struct ifnet *ifp = &sc->arpcom.ac_if;
2576 	struct jme_txdesc *txd;
2577 	struct jme_rxdesc *rxd;
2578 	struct jme_rxdata *rdata;
2579 	int i, r;
2580 
2581 	ASSERT_SERIALIZED(ifp->if_serializer);
2582 
2583 	/*
2584 	 * Mark the interface down and cancel the watchdog timer.
2585 	 */
2586 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2587 	ifp->if_timer = 0;
2588 
2589 	callout_stop(&sc->jme_tick_ch);
2590 	sc->jme_flags &= ~JME_FLAG_LINK;
2591 
2592 	/*
2593 	 * Disable interrupts.
2594 	 */
2595 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2596 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2597 
2598 	/* Disable updating shadow status block. */
2599 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2600 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2601 
2602 	/* Stop receiver, transmitter. */
2603 	jme_stop_rx(sc);
2604 	jme_stop_tx(sc);
2605 
2606 	/*
2607 	 * Free partial finished RX segments
2608 	 */
2609 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2610 		rdata = &sc->jme_cdata.jme_rx_data[r];
2611 		if (rdata->jme_rxhead != NULL)
2612 			m_freem(rdata->jme_rxhead);
2613 		JME_RXCHAIN_RESET(sc, r);
2614 	}
2615 
2616 	/*
2617 	 * Free RX and TX mbufs still in the queues.
2618 	 */
2619 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2620 		rdata = &sc->jme_cdata.jme_rx_data[r];
2621 		for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2622 			rxd = &rdata->jme_rxdesc[i];
2623 			if (rxd->rx_m != NULL) {
2624 				bus_dmamap_unload(rdata->jme_rx_tag,
2625 						  rxd->rx_dmamap);
2626 				m_freem(rxd->rx_m);
2627 				rxd->rx_m = NULL;
2628 			}
2629 		}
2630 	}
2631 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2632 		txd = &sc->jme_cdata.jme_txdesc[i];
2633 		if (txd->tx_m != NULL) {
2634 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2635 			    txd->tx_dmamap);
2636 			m_freem(txd->tx_m);
2637 			txd->tx_m = NULL;
2638 			txd->tx_ndesc = 0;
2639 		}
2640         }
2641 }
2642 
2643 static void
2644 jme_stop_tx(struct jme_softc *sc)
2645 {
2646 	uint32_t reg;
2647 	int i;
2648 
2649 	reg = CSR_READ_4(sc, JME_TXCSR);
2650 	if ((reg & TXCSR_TX_ENB) == 0)
2651 		return;
2652 	reg &= ~TXCSR_TX_ENB;
2653 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2654 	for (i = JME_TIMEOUT; i > 0; i--) {
2655 		DELAY(1);
2656 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2657 			break;
2658 	}
2659 	if (i == 0)
2660 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2661 }
2662 
2663 static void
2664 jme_stop_rx(struct jme_softc *sc)
2665 {
2666 	uint32_t reg;
2667 	int i;
2668 
2669 	reg = CSR_READ_4(sc, JME_RXCSR);
2670 	if ((reg & RXCSR_RX_ENB) == 0)
2671 		return;
2672 	reg &= ~RXCSR_RX_ENB;
2673 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2674 	for (i = JME_TIMEOUT; i > 0; i--) {
2675 		DELAY(1);
2676 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2677 			break;
2678 	}
2679 	if (i == 0)
2680 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2681 }
2682 
2683 static void
2684 jme_init_tx_ring(struct jme_softc *sc)
2685 {
2686 	struct jme_chain_data *cd;
2687 	struct jme_txdesc *txd;
2688 	int i;
2689 
2690 	sc->jme_cdata.jme_tx_prod = 0;
2691 	sc->jme_cdata.jme_tx_cons = 0;
2692 	sc->jme_cdata.jme_tx_cnt = 0;
2693 
2694 	cd = &sc->jme_cdata;
2695 	bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2696 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2697 		txd = &sc->jme_cdata.jme_txdesc[i];
2698 		txd->tx_m = NULL;
2699 		txd->tx_desc = &cd->jme_tx_ring[i];
2700 		txd->tx_ndesc = 0;
2701 	}
2702 }
2703 
2704 static void
2705 jme_init_ssb(struct jme_softc *sc)
2706 {
2707 	struct jme_chain_data *cd;
2708 
2709 	cd = &sc->jme_cdata;
2710 	bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2711 }
2712 
2713 static int
2714 jme_init_rx_ring(struct jme_softc *sc, int ring)
2715 {
2716 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2717 	struct jme_rxdesc *rxd;
2718 	int i;
2719 
2720 	KKASSERT(rdata->jme_rxhead == NULL &&
2721 		 rdata->jme_rxtail == NULL &&
2722 		 rdata->jme_rxlen == 0);
2723 	rdata->jme_rx_cons = 0;
2724 
2725 	bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2726 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2727 		int error;
2728 
2729 		rxd = &rdata->jme_rxdesc[i];
2730 		rxd->rx_m = NULL;
2731 		rxd->rx_desc = &rdata->jme_rx_ring[i];
2732 		error = jme_newbuf(sc, ring, rxd, 1);
2733 		if (error)
2734 			return error;
2735 	}
2736 	return 0;
2737 }
2738 
2739 static int
2740 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2741 {
2742 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2743 	struct jme_desc *desc;
2744 	struct mbuf *m;
2745 	bus_dma_segment_t segs;
2746 	bus_dmamap_t map;
2747 	int error, nsegs;
2748 
2749 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2750 	if (m == NULL)
2751 		return ENOBUFS;
2752 	/*
2753 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2754 	 * takes advantage of 10 bytes padding feature of hardware
2755 	 * in order not to copy entire frame to align IP header on
2756 	 * 32bit boundary.
2757 	 */
2758 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2759 
2760 	error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2761 			rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2762 			BUS_DMA_NOWAIT);
2763 	if (error) {
2764 		m_freem(m);
2765 		if (init)
2766 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2767 		return error;
2768 	}
2769 
2770 	if (rxd->rx_m != NULL) {
2771 		bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2772 				BUS_DMASYNC_POSTREAD);
2773 		bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2774 	}
2775 	map = rxd->rx_dmamap;
2776 	rxd->rx_dmamap = rdata->jme_rx_sparemap;
2777 	rdata->jme_rx_sparemap = map;
2778 	rxd->rx_m = m;
2779 
2780 	desc = rxd->rx_desc;
2781 	desc->buflen = htole32(segs.ds_len);
2782 	desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2783 	desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2784 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2785 
2786 	return 0;
2787 }
2788 
2789 static void
2790 jme_set_vlan(struct jme_softc *sc)
2791 {
2792 	struct ifnet *ifp = &sc->arpcom.ac_if;
2793 	uint32_t reg;
2794 
2795 	ASSERT_SERIALIZED(ifp->if_serializer);
2796 
2797 	reg = CSR_READ_4(sc, JME_RXMAC);
2798 	reg &= ~RXMAC_VLAN_ENB;
2799 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2800 		reg |= RXMAC_VLAN_ENB;
2801 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2802 }
2803 
2804 static void
2805 jme_set_filter(struct jme_softc *sc)
2806 {
2807 	struct ifnet *ifp = &sc->arpcom.ac_if;
2808 	struct ifmultiaddr *ifma;
2809 	uint32_t crc;
2810 	uint32_t mchash[2];
2811 	uint32_t rxcfg;
2812 
2813 	ASSERT_SERIALIZED(ifp->if_serializer);
2814 
2815 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2816 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2817 	    RXMAC_ALLMULTI);
2818 
2819 	/*
2820 	 * Always accept frames destined to our station address.
2821 	 * Always accept broadcast frames.
2822 	 */
2823 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2824 
2825 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2826 		if (ifp->if_flags & IFF_PROMISC)
2827 			rxcfg |= RXMAC_PROMISC;
2828 		if (ifp->if_flags & IFF_ALLMULTI)
2829 			rxcfg |= RXMAC_ALLMULTI;
2830 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2831 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2832 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2833 		return;
2834 	}
2835 
2836 	/*
2837 	 * Set up the multicast address filter by passing all multicast
2838 	 * addresses through a CRC generator, and then using the low-order
2839 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2840 	 * high order bits select the register, while the rest of the bits
2841 	 * select the bit within the register.
2842 	 */
2843 	rxcfg |= RXMAC_MULTICAST;
2844 	bzero(mchash, sizeof(mchash));
2845 
2846 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2847 		if (ifma->ifma_addr->sa_family != AF_LINK)
2848 			continue;
2849 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2850 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2851 
2852 		/* Just want the 6 least significant bits. */
2853 		crc &= 0x3f;
2854 
2855 		/* Set the corresponding bit in the hash table. */
2856 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2857 	}
2858 
2859 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2860 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2861 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2862 }
2863 
2864 static int
2865 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2866 {
2867 	struct jme_softc *sc = arg1;
2868 	struct ifnet *ifp = &sc->arpcom.ac_if;
2869 	int error, v;
2870 
2871 	lwkt_serialize_enter(ifp->if_serializer);
2872 
2873 	v = sc->jme_tx_coal_to;
2874 	error = sysctl_handle_int(oidp, &v, 0, req);
2875 	if (error || req->newptr == NULL)
2876 		goto back;
2877 
2878 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2879 		error = EINVAL;
2880 		goto back;
2881 	}
2882 
2883 	if (v != sc->jme_tx_coal_to) {
2884 		sc->jme_tx_coal_to = v;
2885 		if (ifp->if_flags & IFF_RUNNING)
2886 			jme_set_tx_coal(sc);
2887 	}
2888 back:
2889 	lwkt_serialize_exit(ifp->if_serializer);
2890 	return error;
2891 }
2892 
2893 static int
2894 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2895 {
2896 	struct jme_softc *sc = arg1;
2897 	struct ifnet *ifp = &sc->arpcom.ac_if;
2898 	int error, v;
2899 
2900 	lwkt_serialize_enter(ifp->if_serializer);
2901 
2902 	v = sc->jme_tx_coal_pkt;
2903 	error = sysctl_handle_int(oidp, &v, 0, req);
2904 	if (error || req->newptr == NULL)
2905 		goto back;
2906 
2907 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2908 		error = EINVAL;
2909 		goto back;
2910 	}
2911 
2912 	if (v != sc->jme_tx_coal_pkt) {
2913 		sc->jme_tx_coal_pkt = v;
2914 		if (ifp->if_flags & IFF_RUNNING)
2915 			jme_set_tx_coal(sc);
2916 	}
2917 back:
2918 	lwkt_serialize_exit(ifp->if_serializer);
2919 	return error;
2920 }
2921 
2922 static int
2923 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2924 {
2925 	struct jme_softc *sc = arg1;
2926 	struct ifnet *ifp = &sc->arpcom.ac_if;
2927 	int error, v;
2928 
2929 	lwkt_serialize_enter(ifp->if_serializer);
2930 
2931 	v = sc->jme_rx_coal_to;
2932 	error = sysctl_handle_int(oidp, &v, 0, req);
2933 	if (error || req->newptr == NULL)
2934 		goto back;
2935 
2936 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2937 		error = EINVAL;
2938 		goto back;
2939 	}
2940 
2941 	if (v != sc->jme_rx_coal_to) {
2942 		sc->jme_rx_coal_to = v;
2943 		if (ifp->if_flags & IFF_RUNNING)
2944 			jme_set_rx_coal(sc);
2945 	}
2946 back:
2947 	lwkt_serialize_exit(ifp->if_serializer);
2948 	return error;
2949 }
2950 
2951 static int
2952 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2953 {
2954 	struct jme_softc *sc = arg1;
2955 	struct ifnet *ifp = &sc->arpcom.ac_if;
2956 	int error, v;
2957 
2958 	lwkt_serialize_enter(ifp->if_serializer);
2959 
2960 	v = sc->jme_rx_coal_pkt;
2961 	error = sysctl_handle_int(oidp, &v, 0, req);
2962 	if (error || req->newptr == NULL)
2963 		goto back;
2964 
2965 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
2966 		error = EINVAL;
2967 		goto back;
2968 	}
2969 
2970 	if (v != sc->jme_rx_coal_pkt) {
2971 		sc->jme_rx_coal_pkt = v;
2972 		if (ifp->if_flags & IFF_RUNNING)
2973 			jme_set_rx_coal(sc);
2974 	}
2975 back:
2976 	lwkt_serialize_exit(ifp->if_serializer);
2977 	return error;
2978 }
2979 
2980 static void
2981 jme_set_tx_coal(struct jme_softc *sc)
2982 {
2983 	uint32_t reg;
2984 
2985 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2986 	    PCCTX_COAL_TO_MASK;
2987 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2988 	    PCCTX_COAL_PKT_MASK;
2989 	reg |= PCCTX_COAL_TXQ0;
2990 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2991 }
2992 
2993 static void
2994 jme_set_rx_coal(struct jme_softc *sc)
2995 {
2996 	uint32_t reg;
2997 	int r;
2998 
2999 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3000 	    PCCRX_COAL_TO_MASK;
3001 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3002 	    PCCRX_COAL_PKT_MASK;
3003 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
3004 		if (r < sc->jme_rx_ring_inuse)
3005 			CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3006 		else
3007 			CSR_WRITE_4(sc, JME_PCCRX(r), 0);
3008 	}
3009 }
3010 
3011 #ifdef DEVICE_POLLING
3012 
3013 static void
3014 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3015 {
3016 	struct jme_softc *sc = ifp->if_softc;
3017 	struct mbuf_chain chain[MAXCPU];
3018 	uint32_t status;
3019 	int r, prog = 0;
3020 
3021 	ASSERT_SERIALIZED(ifp->if_serializer);
3022 
3023 	switch (cmd) {
3024 	case POLL_REGISTER:
3025 		CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3026 		break;
3027 
3028 	case POLL_DEREGISTER:
3029 		CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3030 		break;
3031 
3032 	case POLL_AND_CHECK_STATUS:
3033 	case POLL_ONLY:
3034 		status = CSR_READ_4(sc, JME_INTR_STATUS);
3035 
3036 		ether_input_chain_init(chain);
3037 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r)
3038 			prog += jme_rxeof_chain(sc, r, chain, count);
3039 		if (prog)
3040 			ether_input_dispatch(chain);
3041 
3042 		if (status & INTR_RXQ_DESC_EMPTY) {
3043 			CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3044 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3045 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
3046 		}
3047 
3048 		jme_txeof(sc);
3049 		if (!ifq_is_empty(&ifp->if_snd))
3050 			if_devstart(ifp);
3051 		break;
3052 	}
3053 }
3054 
3055 #endif	/* DEVICE_POLLING */
3056 
3057 static int
3058 jme_rxring_dma_alloc(struct jme_softc *sc, int ring)
3059 {
3060 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3061 	bus_dmamem_t dmem;
3062 	int error;
3063 
3064 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
3065 			JME_RX_RING_ALIGN, 0,
3066 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3067 			JME_RX_RING_SIZE(sc),
3068 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3069 	if (error) {
3070 		device_printf(sc->jme_dev,
3071 		    "could not allocate %dth Rx ring.\n", ring);
3072 		return error;
3073 	}
3074 	rdata->jme_rx_ring_tag = dmem.dmem_tag;
3075 	rdata->jme_rx_ring_map = dmem.dmem_map;
3076 	rdata->jme_rx_ring = dmem.dmem_addr;
3077 	rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3078 
3079 	return 0;
3080 }
3081 
3082 static int
3083 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
3084 {
3085 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3086 	int i, error;
3087 
3088 	/* Create tag for Rx buffers. */
3089 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
3090 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
3091 	    BUS_SPACE_MAXADDR,		/* lowaddr */
3092 	    BUS_SPACE_MAXADDR,		/* highaddr */
3093 	    NULL, NULL,			/* filter, filterarg */
3094 	    MCLBYTES,			/* maxsize */
3095 	    1,				/* nsegments */
3096 	    MCLBYTES,			/* maxsegsize */
3097 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3098 	    &rdata->jme_rx_tag);
3099 	if (error) {
3100 		device_printf(sc->jme_dev,
3101 		    "could not create %dth Rx DMA tag.\n", ring);
3102 		return error;
3103 	}
3104 
3105 	/* Create DMA maps for Rx buffers. */
3106 	error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3107 				  &rdata->jme_rx_sparemap);
3108 	if (error) {
3109 		device_printf(sc->jme_dev,
3110 		    "could not create %dth spare Rx dmamap.\n", ring);
3111 		bus_dma_tag_destroy(rdata->jme_rx_tag);
3112 		rdata->jme_rx_tag = NULL;
3113 		return error;
3114 	}
3115 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3116 		struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3117 
3118 		error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3119 					  &rxd->rx_dmamap);
3120 		if (error) {
3121 			int j;
3122 
3123 			device_printf(sc->jme_dev,
3124 			    "could not create %dth Rx dmamap "
3125 			    "for %dth RX ring.\n", i, ring);
3126 
3127 			for (j = 0; j < i; ++j) {
3128 				rxd = &rdata->jme_rxdesc[j];
3129 				bus_dmamap_destroy(rdata->jme_rx_tag,
3130 						   rxd->rx_dmamap);
3131 			}
3132 			bus_dmamap_destroy(rdata->jme_rx_tag,
3133 					   rdata->jme_rx_sparemap);
3134 			bus_dma_tag_destroy(rdata->jme_rx_tag);
3135 			rdata->jme_rx_tag = NULL;
3136 			return error;
3137 		}
3138 	}
3139 	return 0;
3140 }
3141 
3142 static void
3143 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3144 {
3145 	struct mbuf_chain chain[MAXCPU];
3146 	int r, prog = 0;
3147 
3148 	ether_input_chain_init(chain);
3149 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3150 		if (status & jme_rx_status[r].jme_coal)
3151 			prog += jme_rxeof_chain(sc, r, chain, -1);
3152 	}
3153 	if (prog)
3154 		ether_input_dispatch(chain);
3155 }
3156 
3157 static void
3158 jme_enable_rss(struct jme_softc *sc)
3159 {
3160 	uint32_t rssc, ind;
3161 	uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3162 	int i;
3163 
3164 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3165 
3166 	KASSERT(sc->jme_rx_ring_inuse == JME_NRXRING_2 ||
3167 		sc->jme_rx_ring_inuse == JME_NRXRING_4,
3168 		("%s: invalid # of RX rings (%d)\n",
3169 		 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse));
3170 
3171 	rssc = RSSC_HASH_64_ENTRY;
3172 	rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3173 	rssc |= sc->jme_rx_ring_inuse >> 1;
3174 	JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3175 	CSR_WRITE_4(sc, JME_RSSC, rssc);
3176 
3177 	toeplitz_get_key(key, sizeof(key));
3178 	for (i = 0; i < RSSKEY_NREGS; ++i) {
3179 		uint32_t keyreg;
3180 
3181 		keyreg = RSSKEY_REGVAL(key, i);
3182 		JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3183 
3184 		CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3185 	}
3186 
3187 	/*
3188 	 * Create redirect table in following fashion:
3189 	 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3190 	 */
3191 	ind = 0;
3192 	for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3193 		int q;
3194 
3195 		q = i % sc->jme_rx_ring_inuse;
3196 		ind |= q << (i * 8);
3197 	}
3198 	JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3199 
3200 	for (i = 0; i < RSSTBL_NREGS; ++i)
3201 		CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3202 }
3203 
3204 static void
3205 jme_disable_rss(struct jme_softc *sc)
3206 {
3207 	sc->jme_rx_ring_inuse = JME_NRXRING_1;
3208 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3209 }
3210