xref: /dragonfly/sys/dev/netif/jme/if_jme.c (revision b3aace65)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $
29  */
30 
31 #include "opt_polling.h"
32 #include "opt_rss.h"
33 #include "opt_jme.h"
34 
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/interrupt.h>
40 #include <sys/malloc.h>
41 #include <sys/proc.h>
42 #include <sys/rman.h>
43 #include <sys/serialize.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 
48 #include <net/ethernet.h>
49 #include <net/if.h>
50 #include <net/bpf.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/ifq_var.h>
55 #include <net/vlan/if_vlan_var.h>
56 #include <net/vlan/if_vlan_ether.h>
57 
58 #include <dev/netif/mii_layer/miivar.h>
59 #include <dev/netif/mii_layer/jmphyreg.h>
60 
61 #include <bus/pci/pcireg.h>
62 #include <bus/pci/pcivar.h>
63 #include <bus/pci/pcidevs.h>
64 
65 #include <dev/netif/jme/if_jmereg.h>
66 #include <dev/netif/jme/if_jmevar.h>
67 
68 #include "miibus_if.h"
69 
70 /* Define the following to disable printing Rx errors. */
71 #undef	JME_SHOW_ERRORS
72 
73 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
74 
75 #ifdef JME_RSS_DEBUG
76 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
77 do { \
78 	if ((sc)->jme_rss_debug > (lvl)) \
79 		if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
80 } while (0)
81 #else	/* !JME_RSS_DEBUG */
82 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
83 #endif	/* JME_RSS_DEBUG */
84 
85 static int	jme_probe(device_t);
86 static int	jme_attach(device_t);
87 static int	jme_detach(device_t);
88 static int	jme_shutdown(device_t);
89 static int	jme_suspend(device_t);
90 static int	jme_resume(device_t);
91 
92 static int	jme_miibus_readreg(device_t, int, int);
93 static int	jme_miibus_writereg(device_t, int, int, int);
94 static void	jme_miibus_statchg(device_t);
95 
96 static void	jme_init(void *);
97 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
98 static void	jme_start(struct ifnet *);
99 static void	jme_watchdog(struct ifnet *);
100 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
101 static int	jme_mediachange(struct ifnet *);
102 #ifdef DEVICE_POLLING
103 static void	jme_poll(struct ifnet *, enum poll_cmd, int);
104 #endif
105 
106 static void	jme_intr(void *);
107 static void	jme_txeof(struct jme_softc *);
108 static void	jme_rxeof(struct jme_softc *, int);
109 static int	jme_rxeof_chain(struct jme_softc *, int,
110 				struct mbuf_chain *, int);
111 static void	jme_rx_intr(struct jme_softc *, uint32_t);
112 
113 static int	jme_dma_alloc(struct jme_softc *);
114 static void	jme_dma_free(struct jme_softc *);
115 static int	jme_init_rx_ring(struct jme_softc *, int);
116 static void	jme_init_tx_ring(struct jme_softc *);
117 static void	jme_init_ssb(struct jme_softc *);
118 static int	jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
119 static int	jme_encap(struct jme_softc *, struct mbuf **);
120 static void	jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
121 static int	jme_rxring_dma_alloc(struct jme_softc *, int);
122 static int	jme_rxbuf_dma_alloc(struct jme_softc *, int);
123 
124 static void	jme_tick(void *);
125 static void	jme_stop(struct jme_softc *);
126 static void	jme_reset(struct jme_softc *);
127 static void	jme_set_vlan(struct jme_softc *);
128 static void	jme_set_filter(struct jme_softc *);
129 static void	jme_stop_tx(struct jme_softc *);
130 static void	jme_stop_rx(struct jme_softc *);
131 static void	jme_mac_config(struct jme_softc *);
132 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
133 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
134 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
135 #ifdef notyet
136 static void	jme_setwol(struct jme_softc *);
137 static void	jme_setlinkspeed(struct jme_softc *);
138 #endif
139 static void	jme_set_tx_coal(struct jme_softc *);
140 static void	jme_set_rx_coal(struct jme_softc *);
141 static void	jme_enable_rss(struct jme_softc *);
142 static void	jme_disable_rss(struct jme_softc *);
143 
144 static void	jme_sysctl_node(struct jme_softc *);
145 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
146 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
147 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
148 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
149 
150 /*
151  * Devices supported by this driver.
152  */
153 static const struct jme_dev {
154 	uint16_t	jme_vendorid;
155 	uint16_t	jme_deviceid;
156 	uint32_t	jme_caps;
157 	const char	*jme_name;
158 } jme_devs[] = {
159 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
160 	    JME_CAP_JUMBO,
161 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
162 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
163 	    JME_CAP_FASTETH,
164 	    "JMicron Inc, JMC260 Fast Ethernet" },
165 	{ 0, 0, 0, NULL }
166 };
167 
168 static device_method_t jme_methods[] = {
169 	/* Device interface. */
170 	DEVMETHOD(device_probe,		jme_probe),
171 	DEVMETHOD(device_attach,	jme_attach),
172 	DEVMETHOD(device_detach,	jme_detach),
173 	DEVMETHOD(device_shutdown,	jme_shutdown),
174 	DEVMETHOD(device_suspend,	jme_suspend),
175 	DEVMETHOD(device_resume,	jme_resume),
176 
177 	/* Bus interface. */
178 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
179 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
180 
181 	/* MII interface. */
182 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
183 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
184 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
185 
186 	{ NULL, NULL }
187 };
188 
189 static driver_t jme_driver = {
190 	"jme",
191 	jme_methods,
192 	sizeof(struct jme_softc)
193 };
194 
195 static devclass_t jme_devclass;
196 
197 DECLARE_DUMMY_MODULE(if_jme);
198 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
199 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
200 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
201 
202 static const struct {
203 	uint32_t	jme_coal;
204 	uint32_t	jme_comp;
205 } jme_rx_status[JME_NRXRING_MAX] = {
206 	{ INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
207 	{ INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
208 	{ INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
209 	{ INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
210 };
211 
212 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
213 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
214 static int	jme_rx_ring_count = JME_NRXRING_DEF;
215 
216 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
217 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
218 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
219 
220 /*
221  *	Read a PHY register on the MII of the JMC250.
222  */
223 static int
224 jme_miibus_readreg(device_t dev, int phy, int reg)
225 {
226 	struct jme_softc *sc = device_get_softc(dev);
227 	uint32_t val;
228 	int i;
229 
230 	/* For FPGA version, PHY address 0 should be ignored. */
231 	if (sc->jme_caps & JME_CAP_FPGA) {
232 		if (phy == 0)
233 			return (0);
234 	} else {
235 		if (sc->jme_phyaddr != phy)
236 			return (0);
237 	}
238 
239 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
240 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
241 
242 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
243 		DELAY(1);
244 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
245 			break;
246 	}
247 	if (i == 0) {
248 		device_printf(sc->jme_dev, "phy read timeout: "
249 			      "phy %d, reg %d\n", phy, reg);
250 		return (0);
251 	}
252 
253 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
254 }
255 
256 /*
257  *	Write a PHY register on the MII of the JMC250.
258  */
259 static int
260 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
261 {
262 	struct jme_softc *sc = device_get_softc(dev);
263 	int i;
264 
265 	/* For FPGA version, PHY address 0 should be ignored. */
266 	if (sc->jme_caps & JME_CAP_FPGA) {
267 		if (phy == 0)
268 			return (0);
269 	} else {
270 		if (sc->jme_phyaddr != phy)
271 			return (0);
272 	}
273 
274 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
275 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
276 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
277 
278 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
279 		DELAY(1);
280 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
281 			break;
282 	}
283 	if (i == 0) {
284 		device_printf(sc->jme_dev, "phy write timeout: "
285 			      "phy %d, reg %d\n", phy, reg);
286 	}
287 
288 	return (0);
289 }
290 
291 /*
292  *	Callback from MII layer when media changes.
293  */
294 static void
295 jme_miibus_statchg(device_t dev)
296 {
297 	struct jme_softc *sc = device_get_softc(dev);
298 	struct ifnet *ifp = &sc->arpcom.ac_if;
299 	struct mii_data *mii;
300 	struct jme_txdesc *txd;
301 	bus_addr_t paddr;
302 	int i, r;
303 
304 	ASSERT_SERIALIZED(ifp->if_serializer);
305 
306 	if ((ifp->if_flags & IFF_RUNNING) == 0)
307 		return;
308 
309 	mii = device_get_softc(sc->jme_miibus);
310 
311 	sc->jme_flags &= ~JME_FLAG_LINK;
312 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
313 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
314 		case IFM_10_T:
315 		case IFM_100_TX:
316 			sc->jme_flags |= JME_FLAG_LINK;
317 			break;
318 		case IFM_1000_T:
319 			if (sc->jme_caps & JME_CAP_FASTETH)
320 				break;
321 			sc->jme_flags |= JME_FLAG_LINK;
322 			break;
323 		default:
324 			break;
325 		}
326 	}
327 
328 	/*
329 	 * Disabling Rx/Tx MACs have a side-effect of resetting
330 	 * JME_TXNDA/JME_RXNDA register to the first address of
331 	 * Tx/Rx descriptor address. So driver should reset its
332 	 * internal procucer/consumer pointer and reclaim any
333 	 * allocated resources.  Note, just saving the value of
334 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
335 	 * and restoring JME_TXNDA/JME_RXNDA register is not
336 	 * sufficient to make sure correct MAC state because
337 	 * stopping MAC operation can take a while and hardware
338 	 * might have updated JME_TXNDA/JME_RXNDA registers
339 	 * during the stop operation.
340 	 */
341 
342 	/* Disable interrupts */
343 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
344 
345 	/* Stop driver */
346 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
347 	ifp->if_timer = 0;
348 	callout_stop(&sc->jme_tick_ch);
349 
350 	/* Stop receiver/transmitter. */
351 	jme_stop_rx(sc);
352 	jme_stop_tx(sc);
353 
354 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
355 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
356 
357 		jme_rxeof(sc, r);
358 		if (rdata->jme_rxhead != NULL)
359 			m_freem(rdata->jme_rxhead);
360 		JME_RXCHAIN_RESET(sc, r);
361 
362 		/*
363 		 * Reuse configured Rx descriptors and reset
364 		 * procuder/consumer index.
365 		 */
366 		rdata->jme_rx_cons = 0;
367 	}
368 
369 	jme_txeof(sc);
370 	if (sc->jme_cdata.jme_tx_cnt != 0) {
371 		/* Remove queued packets for transmit. */
372 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
373 			txd = &sc->jme_cdata.jme_txdesc[i];
374 			if (txd->tx_m != NULL) {
375 				bus_dmamap_unload(
376 				    sc->jme_cdata.jme_tx_tag,
377 				    txd->tx_dmamap);
378 				m_freem(txd->tx_m);
379 				txd->tx_m = NULL;
380 				txd->tx_ndesc = 0;
381 				ifp->if_oerrors++;
382 			}
383 		}
384 	}
385 	jme_init_tx_ring(sc);
386 
387 	/* Initialize shadow status block. */
388 	jme_init_ssb(sc);
389 
390 	/* Program MAC with resolved speed/duplex/flow-control. */
391 	if (sc->jme_flags & JME_FLAG_LINK) {
392 		jme_mac_config(sc);
393 
394 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
395 
396 		/* Set Tx ring address to the hardware. */
397 		paddr = sc->jme_cdata.jme_tx_ring_paddr;
398 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
399 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
400 
401 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
402 			CSR_WRITE_4(sc, JME_RXCSR,
403 			    sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
404 
405 			/* Set Rx ring address to the hardware. */
406 			paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
407 			CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
408 			CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
409 		}
410 
411 		/* Restart receiver/transmitter. */
412 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
413 		    RXCSR_RXQ_START);
414 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
415 	}
416 
417 	ifp->if_flags |= IFF_RUNNING;
418 	ifp->if_flags &= ~IFF_OACTIVE;
419 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
420 
421 #ifdef DEVICE_POLLING
422 	if (!(ifp->if_flags & IFF_POLLING))
423 #endif
424 	/* Reenable interrupts. */
425 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
426 }
427 
428 /*
429  *	Get the current interface media status.
430  */
431 static void
432 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
433 {
434 	struct jme_softc *sc = ifp->if_softc;
435 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
436 
437 	ASSERT_SERIALIZED(ifp->if_serializer);
438 
439 	mii_pollstat(mii);
440 	ifmr->ifm_status = mii->mii_media_status;
441 	ifmr->ifm_active = mii->mii_media_active;
442 }
443 
444 /*
445  *	Set hardware to newly-selected media.
446  */
447 static int
448 jme_mediachange(struct ifnet *ifp)
449 {
450 	struct jme_softc *sc = ifp->if_softc;
451 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
452 	int error;
453 
454 	ASSERT_SERIALIZED(ifp->if_serializer);
455 
456 	if (mii->mii_instance != 0) {
457 		struct mii_softc *miisc;
458 
459 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
460 			mii_phy_reset(miisc);
461 	}
462 	error = mii_mediachg(mii);
463 
464 	return (error);
465 }
466 
467 static int
468 jme_probe(device_t dev)
469 {
470 	const struct jme_dev *sp;
471 	uint16_t vid, did;
472 
473 	vid = pci_get_vendor(dev);
474 	did = pci_get_device(dev);
475 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
476 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
477 			struct jme_softc *sc = device_get_softc(dev);
478 
479 			sc->jme_caps = sp->jme_caps;
480 			device_set_desc(dev, sp->jme_name);
481 			return (0);
482 		}
483 	}
484 	return (ENXIO);
485 }
486 
487 static int
488 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
489 {
490 	uint32_t reg;
491 	int i;
492 
493 	*val = 0;
494 	for (i = JME_TIMEOUT; i > 0; i--) {
495 		reg = CSR_READ_4(sc, JME_SMBCSR);
496 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
497 			break;
498 		DELAY(1);
499 	}
500 
501 	if (i == 0) {
502 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
503 		return (ETIMEDOUT);
504 	}
505 
506 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
507 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
508 	for (i = JME_TIMEOUT; i > 0; i--) {
509 		DELAY(1);
510 		reg = CSR_READ_4(sc, JME_SMBINTF);
511 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
512 			break;
513 	}
514 
515 	if (i == 0) {
516 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
517 		return (ETIMEDOUT);
518 	}
519 
520 	reg = CSR_READ_4(sc, JME_SMBINTF);
521 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
522 
523 	return (0);
524 }
525 
526 static int
527 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
528 {
529 	uint8_t fup, reg, val;
530 	uint32_t offset;
531 	int match;
532 
533 	offset = 0;
534 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
535 	    fup != JME_EEPROM_SIG0)
536 		return (ENOENT);
537 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
538 	    fup != JME_EEPROM_SIG1)
539 		return (ENOENT);
540 	match = 0;
541 	do {
542 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
543 			break;
544 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
545 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
546 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
547 				break;
548 			if (reg >= JME_PAR0 &&
549 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
550 				if (jme_eeprom_read_byte(sc, offset + 2,
551 				    &val) != 0)
552 					break;
553 				eaddr[reg - JME_PAR0] = val;
554 				match++;
555 			}
556 		}
557 		/* Check for the end of EEPROM descriptor. */
558 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
559 			break;
560 		/* Try next eeprom descriptor. */
561 		offset += JME_EEPROM_DESC_BYTES;
562 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
563 
564 	if (match == ETHER_ADDR_LEN)
565 		return (0);
566 
567 	return (ENOENT);
568 }
569 
570 static void
571 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
572 {
573 	uint32_t par0, par1;
574 
575 	/* Read station address. */
576 	par0 = CSR_READ_4(sc, JME_PAR0);
577 	par1 = CSR_READ_4(sc, JME_PAR1);
578 	par1 &= 0xFFFF;
579 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
580 		device_printf(sc->jme_dev,
581 		    "generating fake ethernet address.\n");
582 		par0 = karc4random();
583 		/* Set OUI to JMicron. */
584 		eaddr[0] = 0x00;
585 		eaddr[1] = 0x1B;
586 		eaddr[2] = 0x8C;
587 		eaddr[3] = (par0 >> 16) & 0xff;
588 		eaddr[4] = (par0 >> 8) & 0xff;
589 		eaddr[5] = par0 & 0xff;
590 	} else {
591 		eaddr[0] = (par0 >> 0) & 0xFF;
592 		eaddr[1] = (par0 >> 8) & 0xFF;
593 		eaddr[2] = (par0 >> 16) & 0xFF;
594 		eaddr[3] = (par0 >> 24) & 0xFF;
595 		eaddr[4] = (par1 >> 0) & 0xFF;
596 		eaddr[5] = (par1 >> 8) & 0xFF;
597 	}
598 }
599 
600 static int
601 jme_attach(device_t dev)
602 {
603 	struct jme_softc *sc = device_get_softc(dev);
604 	struct ifnet *ifp = &sc->arpcom.ac_if;
605 	uint32_t reg;
606 	uint16_t did;
607 	uint8_t pcie_ptr, rev;
608 	int error = 0;
609 	uint8_t eaddr[ETHER_ADDR_LEN];
610 
611 	sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
612 	if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
613 		sc->jme_rx_desc_cnt = JME_NDESC_MAX;
614 
615 	sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
616 	if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
617 		sc->jme_tx_desc_cnt = JME_NDESC_MAX;
618 
619 #ifdef RSS
620 	sc->jme_rx_ring_cnt = jme_rx_ring_count;
621 	if (sc->jme_rx_ring_cnt <= 0)
622 		sc->jme_rx_ring_cnt = JME_NRXRING_1;
623 	if (sc->jme_rx_ring_cnt > ncpus2)
624 		sc->jme_rx_ring_cnt = ncpus2;
625 
626 	if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
627 		sc->jme_rx_ring_cnt = JME_NRXRING_4;
628 	else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
629 		sc->jme_rx_ring_cnt = JME_NRXRING_2;
630 #else
631 	sc->jme_rx_ring_cnt = JME_NRXRING_MIN;
632 #endif
633 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
634 
635 	sc->jme_dev = dev;
636 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
637 
638 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
639 
640 	callout_init(&sc->jme_tick_ch);
641 
642 #ifndef BURN_BRIDGES
643 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
644 		uint32_t irq, mem;
645 
646 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
647 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
648 
649 		device_printf(dev, "chip is in D%d power mode "
650 		    "-- setting to D0\n", pci_get_powerstate(dev));
651 
652 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
653 
654 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
655 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
656 	}
657 #endif	/* !BURN_BRIDGE */
658 
659 	/* Enable bus mastering */
660 	pci_enable_busmaster(dev);
661 
662 	/*
663 	 * Allocate IO memory
664 	 *
665 	 * JMC250 supports both memory mapped and I/O register space
666 	 * access.  Because I/O register access should use different
667 	 * BARs to access registers it's waste of time to use I/O
668 	 * register spce access.  JMC250 uses 16K to map entire memory
669 	 * space.
670 	 */
671 	sc->jme_mem_rid = JME_PCIR_BAR;
672 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
673 						 &sc->jme_mem_rid, RF_ACTIVE);
674 	if (sc->jme_mem_res == NULL) {
675 		device_printf(dev, "can't allocate IO memory\n");
676 		return ENXIO;
677 	}
678 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
679 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
680 
681 	/*
682 	 * Allocate IRQ
683 	 */
684 	sc->jme_irq_rid = 0;
685 	sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
686 						 &sc->jme_irq_rid,
687 						 RF_SHAREABLE | RF_ACTIVE);
688 	if (sc->jme_irq_res == NULL) {
689 		device_printf(dev, "can't allocate irq\n");
690 		error = ENXIO;
691 		goto fail;
692 	}
693 
694 	/*
695 	 * Extract revisions
696 	 */
697 	reg = CSR_READ_4(sc, JME_CHIPMODE);
698 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
699 	    CHIPMODE_NOT_FPGA) {
700 		sc->jme_caps |= JME_CAP_FPGA;
701 		if (bootverbose) {
702 			device_printf(dev, "FPGA revision: 0x%04x\n",
703 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
704 				      CHIPMODE_FPGA_REV_SHIFT);
705 		}
706 	}
707 
708 	/* NOTE: FM revision is put in the upper 4 bits */
709 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
710 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
711 	if (bootverbose)
712 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
713 
714 	did = pci_get_device(dev);
715 	switch (did) {
716 	case PCI_PRODUCT_JMICRON_JMC250:
717 		if (rev == JME_REV1_A2)
718 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
719 		break;
720 
721 	case PCI_PRODUCT_JMICRON_JMC260:
722 		if (rev == JME_REV2)
723 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
724 		break;
725 
726 	default:
727 		panic("unknown device id 0x%04x\n", did);
728 	}
729 	if (rev >= JME_REV2) {
730 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
731 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
732 				      GHC_TXMAC_CLKSRC_1000;
733 	}
734 
735 	/* Reset the ethernet controller. */
736 	jme_reset(sc);
737 
738 	/* Get station address. */
739 	reg = CSR_READ_4(sc, JME_SMBCSR);
740 	if (reg & SMBCSR_EEPROM_PRESENT)
741 		error = jme_eeprom_macaddr(sc, eaddr);
742 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
743 		if (error != 0 && (bootverbose)) {
744 			device_printf(dev, "ethernet hardware address "
745 				      "not found in EEPROM.\n");
746 		}
747 		jme_reg_macaddr(sc, eaddr);
748 	}
749 
750 	/*
751 	 * Save PHY address.
752 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
753 	 * requires PHY probing to get correct PHY address.
754 	 */
755 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
756 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
757 		    GPREG0_PHY_ADDR_MASK;
758 		if (bootverbose) {
759 			device_printf(dev, "PHY is at address %d.\n",
760 			    sc->jme_phyaddr);
761 		}
762 	} else {
763 		sc->jme_phyaddr = 0;
764 	}
765 
766 	/* Set max allowable DMA size. */
767 	pcie_ptr = pci_get_pciecap_ptr(dev);
768 	if (pcie_ptr != 0) {
769 		uint16_t ctrl;
770 
771 		sc->jme_caps |= JME_CAP_PCIE;
772 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
773 		if (bootverbose) {
774 			device_printf(dev, "Read request size : %d bytes.\n",
775 			    128 << ((ctrl >> 12) & 0x07));
776 			device_printf(dev, "TLP payload size : %d bytes.\n",
777 			    128 << ((ctrl >> 5) & 0x07));
778 		}
779 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
780 		case PCIEM_DEVCTL_MAX_READRQ_128:
781 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
782 			break;
783 		case PCIEM_DEVCTL_MAX_READRQ_256:
784 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
785 			break;
786 		default:
787 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
788 			break;
789 		}
790 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
791 	} else {
792 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
793 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
794 	}
795 
796 #ifdef notyet
797 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
798 		sc->jme_caps |= JME_CAP_PMCAP;
799 #endif
800 
801 	/*
802 	 * Create sysctl tree
803 	 */
804 	jme_sysctl_node(sc);
805 
806 	/* Allocate DMA stuffs */
807 	error = jme_dma_alloc(sc);
808 	if (error)
809 		goto fail;
810 
811 	ifp->if_softc = sc;
812 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
813 	ifp->if_init = jme_init;
814 	ifp->if_ioctl = jme_ioctl;
815 	ifp->if_start = jme_start;
816 #ifdef DEVICE_POLLING
817 	ifp->if_poll = jme_poll;
818 #endif
819 	ifp->if_watchdog = jme_watchdog;
820 	ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
821 	ifq_set_ready(&ifp->if_snd);
822 
823 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
824 	ifp->if_capabilities = IFCAP_HWCSUM |
825 			       IFCAP_VLAN_MTU |
826 			       IFCAP_VLAN_HWTAGGING;
827 	if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN)
828 		ifp->if_capabilities |= IFCAP_RSS;
829 	ifp->if_hwassist = JME_CSUM_FEATURES;
830 	ifp->if_capenable = ifp->if_capabilities;
831 
832 	/* Set up MII bus. */
833 	error = mii_phy_probe(dev, &sc->jme_miibus,
834 			      jme_mediachange, jme_mediastatus);
835 	if (error) {
836 		device_printf(dev, "no PHY found!\n");
837 		goto fail;
838 	}
839 
840 	/*
841 	 * Save PHYADDR for FPGA mode PHY.
842 	 */
843 	if (sc->jme_caps & JME_CAP_FPGA) {
844 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
845 
846 		if (mii->mii_instance != 0) {
847 			struct mii_softc *miisc;
848 
849 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
850 				if (miisc->mii_phy != 0) {
851 					sc->jme_phyaddr = miisc->mii_phy;
852 					break;
853 				}
854 			}
855 			if (sc->jme_phyaddr != 0) {
856 				device_printf(sc->jme_dev,
857 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
858 				/* vendor magic. */
859 				jme_miibus_writereg(dev, sc->jme_phyaddr,
860 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
861 
862 				/* XXX should we clear JME_WA_EXTFIFO */
863 			}
864 		}
865 	}
866 
867 	ether_ifattach(ifp, eaddr, NULL);
868 
869 	/* Tell the upper layer(s) we support long frames. */
870 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
871 
872 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
873 			       &sc->jme_irq_handle, ifp->if_serializer);
874 	if (error) {
875 		device_printf(dev, "could not set up interrupt handler.\n");
876 		ether_ifdetach(ifp);
877 		goto fail;
878 	}
879 
880 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
881 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
882 	return 0;
883 fail:
884 	jme_detach(dev);
885 	return (error);
886 }
887 
888 static int
889 jme_detach(device_t dev)
890 {
891 	struct jme_softc *sc = device_get_softc(dev);
892 
893 	if (device_is_attached(dev)) {
894 		struct ifnet *ifp = &sc->arpcom.ac_if;
895 
896 		lwkt_serialize_enter(ifp->if_serializer);
897 		jme_stop(sc);
898 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
899 		lwkt_serialize_exit(ifp->if_serializer);
900 
901 		ether_ifdetach(ifp);
902 	}
903 
904 	if (sc->jme_sysctl_tree != NULL)
905 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
906 
907 	if (sc->jme_miibus != NULL)
908 		device_delete_child(dev, sc->jme_miibus);
909 	bus_generic_detach(dev);
910 
911 	if (sc->jme_irq_res != NULL) {
912 		bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
913 				     sc->jme_irq_res);
914 	}
915 
916 	if (sc->jme_mem_res != NULL) {
917 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
918 				     sc->jme_mem_res);
919 	}
920 
921 	jme_dma_free(sc);
922 
923 	return (0);
924 }
925 
926 static void
927 jme_sysctl_node(struct jme_softc *sc)
928 {
929 	int coal_max;
930 #ifdef JME_RSS_DEBUG
931 	char rx_ring_pkt[32];
932 	int r;
933 #endif
934 
935 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
936 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
937 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
938 				device_get_nameunit(sc->jme_dev),
939 				CTLFLAG_RD, 0, "");
940 	if (sc->jme_sysctl_tree == NULL) {
941 		device_printf(sc->jme_dev, "can't add sysctl node\n");
942 		return;
943 	}
944 
945 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
946 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
947 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
948 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
949 
950 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
951 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
952 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
953 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
954 
955 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
956 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
957 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
958 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
959 
960 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
961 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
962 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
963 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
964 
965 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
966 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
967 		       "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
968 		       0, "RX desc count");
969 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
970 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
971 		       "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
972 		       0, "TX desc count");
973 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
974 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
975 		       "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
976 		       0, "RX ring count");
977 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
978 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
979 		       "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
980 		       0, "RX ring in use");
981 #ifdef JME_RSS_DEBUG
982 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
983 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
984 		       "rss_debug", CTLFLAG_RD, &sc->jme_rss_debug,
985 		       0, "RSS debug level");
986 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
987 		ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
988 		SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
989 				SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
990 				rx_ring_pkt, CTLFLAG_RD,
991 				&sc->jme_rx_ring_pkt[r],
992 				0, "RXed packets");
993 	}
994 #endif
995 
996 	/*
997 	 * Set default coalesce valves
998 	 */
999 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1000 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1001 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1002 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1003 
1004 	/*
1005 	 * Adjust coalesce valves, in case that the number of TX/RX
1006 	 * descs are set to small values by users.
1007 	 *
1008 	 * NOTE: coal_max will not be zero, since number of descs
1009 	 * must aligned by JME_NDESC_ALIGN (16 currently)
1010 	 */
1011 	coal_max = sc->jme_tx_desc_cnt / 6;
1012 	if (coal_max < sc->jme_tx_coal_pkt)
1013 		sc->jme_tx_coal_pkt = coal_max;
1014 
1015 	coal_max = sc->jme_rx_desc_cnt / 4;
1016 	if (coal_max < sc->jme_rx_coal_pkt)
1017 		sc->jme_rx_coal_pkt = coal_max;
1018 }
1019 
1020 static int
1021 jme_dma_alloc(struct jme_softc *sc)
1022 {
1023 	struct jme_txdesc *txd;
1024 	bus_dmamem_t dmem;
1025 	int error, i;
1026 
1027 	sc->jme_cdata.jme_txdesc =
1028 	kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1029 		M_DEVBUF, M_WAITOK | M_ZERO);
1030 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1031 		sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1032 		kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1033 			M_DEVBUF, M_WAITOK | M_ZERO);
1034 	}
1035 
1036 	/* Create parent ring tag. */
1037 	error = bus_dma_tag_create(NULL,/* parent */
1038 	    1, JME_RING_BOUNDARY,	/* algnmnt, boundary */
1039 	    sc->jme_lowaddr,		/* lowaddr */
1040 	    BUS_SPACE_MAXADDR,		/* highaddr */
1041 	    NULL, NULL,			/* filter, filterarg */
1042 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1043 	    0,				/* nsegments */
1044 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1045 	    0,				/* flags */
1046 	    &sc->jme_cdata.jme_ring_tag);
1047 	if (error) {
1048 		device_printf(sc->jme_dev,
1049 		    "could not create parent ring DMA tag.\n");
1050 		return error;
1051 	}
1052 
1053 	/*
1054 	 * Create DMA stuffs for TX ring
1055 	 */
1056 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1057 			JME_TX_RING_ALIGN, 0,
1058 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1059 			JME_TX_RING_SIZE(sc),
1060 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1061 	if (error) {
1062 		device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1063 		return error;
1064 	}
1065 	sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1066 	sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1067 	sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1068 	sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1069 
1070 	/*
1071 	 * Create DMA stuffs for RX rings
1072 	 */
1073 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1074 		error = jme_rxring_dma_alloc(sc, i);
1075 		if (error)
1076 			return error;
1077 	}
1078 
1079 	/* Create parent buffer tag. */
1080 	error = bus_dma_tag_create(NULL,/* parent */
1081 	    1, 0,			/* algnmnt, boundary */
1082 	    sc->jme_lowaddr,		/* lowaddr */
1083 	    BUS_SPACE_MAXADDR,		/* highaddr */
1084 	    NULL, NULL,			/* filter, filterarg */
1085 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1086 	    0,				/* nsegments */
1087 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1088 	    0,				/* flags */
1089 	    &sc->jme_cdata.jme_buffer_tag);
1090 	if (error) {
1091 		device_printf(sc->jme_dev,
1092 		    "could not create parent buffer DMA tag.\n");
1093 		return error;
1094 	}
1095 
1096 	/*
1097 	 * Create DMA stuffs for shadow status block
1098 	 */
1099 	error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1100 			JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1101 			JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1102 	if (error) {
1103 		device_printf(sc->jme_dev,
1104 		    "could not create shadow status block.\n");
1105 		return error;
1106 	}
1107 	sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1108 	sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1109 	sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1110 	sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1111 
1112 	/*
1113 	 * Create DMA stuffs for TX buffers
1114 	 */
1115 
1116 	/* Create tag for Tx buffers. */
1117 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1118 	    1, 0,			/* algnmnt, boundary */
1119 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1120 	    BUS_SPACE_MAXADDR,		/* highaddr */
1121 	    NULL, NULL,			/* filter, filterarg */
1122 	    JME_JUMBO_FRAMELEN,		/* maxsize */
1123 	    JME_MAXTXSEGS,		/* nsegments */
1124 	    JME_MAXSEGSIZE,		/* maxsegsize */
1125 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1126 	    &sc->jme_cdata.jme_tx_tag);
1127 	if (error != 0) {
1128 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1129 		return error;
1130 	}
1131 
1132 	/* Create DMA maps for Tx buffers. */
1133 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1134 		txd = &sc->jme_cdata.jme_txdesc[i];
1135 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1136 				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1137 				&txd->tx_dmamap);
1138 		if (error) {
1139 			int j;
1140 
1141 			device_printf(sc->jme_dev,
1142 			    "could not create %dth Tx dmamap.\n", i);
1143 
1144 			for (j = 0; j < i; ++j) {
1145 				txd = &sc->jme_cdata.jme_txdesc[j];
1146 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1147 						   txd->tx_dmamap);
1148 			}
1149 			bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1150 			sc->jme_cdata.jme_tx_tag = NULL;
1151 			return error;
1152 		}
1153 	}
1154 
1155 	/*
1156 	 * Create DMA stuffs for RX buffers
1157 	 */
1158 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1159 		error = jme_rxbuf_dma_alloc(sc, i);
1160 		if (error)
1161 			return error;
1162 	}
1163 	return 0;
1164 }
1165 
1166 static void
1167 jme_dma_free(struct jme_softc *sc)
1168 {
1169 	struct jme_txdesc *txd;
1170 	struct jme_rxdesc *rxd;
1171 	struct jme_rxdata *rdata;
1172 	int i, r;
1173 
1174 	/* Tx ring */
1175 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1176 		bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1177 		    sc->jme_cdata.jme_tx_ring_map);
1178 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1179 		    sc->jme_cdata.jme_tx_ring,
1180 		    sc->jme_cdata.jme_tx_ring_map);
1181 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1182 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1183 	}
1184 
1185 	/* Rx ring */
1186 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1187 		rdata = &sc->jme_cdata.jme_rx_data[r];
1188 		if (rdata->jme_rx_ring_tag != NULL) {
1189 			bus_dmamap_unload(rdata->jme_rx_ring_tag,
1190 					  rdata->jme_rx_ring_map);
1191 			bus_dmamem_free(rdata->jme_rx_ring_tag,
1192 					rdata->jme_rx_ring,
1193 					rdata->jme_rx_ring_map);
1194 			bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1195 			rdata->jme_rx_ring_tag = NULL;
1196 		}
1197 	}
1198 
1199 	/* Tx buffers */
1200 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1201 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1202 			txd = &sc->jme_cdata.jme_txdesc[i];
1203 			bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1204 			    txd->tx_dmamap);
1205 		}
1206 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1207 		sc->jme_cdata.jme_tx_tag = NULL;
1208 	}
1209 
1210 	/* Rx buffers */
1211 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1212 		rdata = &sc->jme_cdata.jme_rx_data[r];
1213 		if (rdata->jme_rx_tag != NULL) {
1214 			for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1215 				rxd = &rdata->jme_rxdesc[i];
1216 				bus_dmamap_destroy(rdata->jme_rx_tag,
1217 						   rxd->rx_dmamap);
1218 			}
1219 			bus_dmamap_destroy(rdata->jme_rx_tag,
1220 					   rdata->jme_rx_sparemap);
1221 			bus_dma_tag_destroy(rdata->jme_rx_tag);
1222 			rdata->jme_rx_tag = NULL;
1223 		}
1224 	}
1225 
1226 	/* Shadow status block. */
1227 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1228 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1229 		    sc->jme_cdata.jme_ssb_map);
1230 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1231 		    sc->jme_cdata.jme_ssb_block,
1232 		    sc->jme_cdata.jme_ssb_map);
1233 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1234 		sc->jme_cdata.jme_ssb_tag = NULL;
1235 	}
1236 
1237 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1238 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1239 		sc->jme_cdata.jme_buffer_tag = NULL;
1240 	}
1241 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1242 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1243 		sc->jme_cdata.jme_ring_tag = NULL;
1244 	}
1245 
1246 	if (sc->jme_cdata.jme_txdesc != NULL) {
1247 		kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1248 		sc->jme_cdata.jme_txdesc = NULL;
1249 	}
1250 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1251 		rdata = &sc->jme_cdata.jme_rx_data[r];
1252 		if (rdata->jme_rxdesc != NULL) {
1253 			kfree(rdata->jme_rxdesc, M_DEVBUF);
1254 			rdata->jme_rxdesc = NULL;
1255 		}
1256 	}
1257 }
1258 
1259 /*
1260  *	Make sure the interface is stopped at reboot time.
1261  */
1262 static int
1263 jme_shutdown(device_t dev)
1264 {
1265 	return jme_suspend(dev);
1266 }
1267 
1268 #ifdef notyet
1269 /*
1270  * Unlike other ethernet controllers, JMC250 requires
1271  * explicit resetting link speed to 10/100Mbps as gigabit
1272  * link will cunsume more power than 375mA.
1273  * Note, we reset the link speed to 10/100Mbps with
1274  * auto-negotiation but we don't know whether that operation
1275  * would succeed or not as we have no control after powering
1276  * off. If the renegotiation fail WOL may not work. Running
1277  * at 1Gbps draws more power than 375mA at 3.3V which is
1278  * specified in PCI specification and that would result in
1279  * complete shutdowning power to ethernet controller.
1280  *
1281  * TODO
1282  *  Save current negotiated media speed/duplex/flow-control
1283  *  to softc and restore the same link again after resuming.
1284  *  PHY handling such as power down/resetting to 100Mbps
1285  *  may be better handled in suspend method in phy driver.
1286  */
1287 static void
1288 jme_setlinkspeed(struct jme_softc *sc)
1289 {
1290 	struct mii_data *mii;
1291 	int aneg, i;
1292 
1293 	JME_LOCK_ASSERT(sc);
1294 
1295 	mii = device_get_softc(sc->jme_miibus);
1296 	mii_pollstat(mii);
1297 	aneg = 0;
1298 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1299 		switch IFM_SUBTYPE(mii->mii_media_active) {
1300 		case IFM_10_T:
1301 		case IFM_100_TX:
1302 			return;
1303 		case IFM_1000_T:
1304 			aneg++;
1305 		default:
1306 			break;
1307 		}
1308 	}
1309 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1310 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1311 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1312 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1313 	    BMCR_AUTOEN | BMCR_STARTNEG);
1314 	DELAY(1000);
1315 	if (aneg != 0) {
1316 		/* Poll link state until jme(4) get a 10/100 link. */
1317 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1318 			mii_pollstat(mii);
1319 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1320 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1321 				case IFM_10_T:
1322 				case IFM_100_TX:
1323 					jme_mac_config(sc);
1324 					return;
1325 				default:
1326 					break;
1327 				}
1328 			}
1329 			JME_UNLOCK(sc);
1330 			pause("jmelnk", hz);
1331 			JME_LOCK(sc);
1332 		}
1333 		if (i == MII_ANEGTICKS_GIGE)
1334 			device_printf(sc->jme_dev, "establishing link failed, "
1335 			    "WOL may not work!");
1336 	}
1337 	/*
1338 	 * No link, force MAC to have 100Mbps, full-duplex link.
1339 	 * This is the last resort and may/may not work.
1340 	 */
1341 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1342 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1343 	jme_mac_config(sc);
1344 }
1345 
1346 static void
1347 jme_setwol(struct jme_softc *sc)
1348 {
1349 	struct ifnet *ifp = &sc->arpcom.ac_if;
1350 	uint32_t gpr, pmcs;
1351 	uint16_t pmstat;
1352 	int pmc;
1353 
1354 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1355 		/* No PME capability, PHY power down. */
1356 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1357 		    MII_BMCR, BMCR_PDOWN);
1358 		return;
1359 	}
1360 
1361 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1362 	pmcs = CSR_READ_4(sc, JME_PMCS);
1363 	pmcs &= ~PMCS_WOL_ENB_MASK;
1364 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1365 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1366 		/* Enable PME message. */
1367 		gpr |= GPREG0_PME_ENB;
1368 		/* For gigabit controllers, reset link speed to 10/100. */
1369 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1370 			jme_setlinkspeed(sc);
1371 	}
1372 
1373 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1374 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1375 
1376 	/* Request PME. */
1377 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1378 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1379 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1380 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1381 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1382 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1383 		/* No WOL, PHY power down. */
1384 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1385 		    MII_BMCR, BMCR_PDOWN);
1386 	}
1387 }
1388 #endif
1389 
1390 static int
1391 jme_suspend(device_t dev)
1392 {
1393 	struct jme_softc *sc = device_get_softc(dev);
1394 	struct ifnet *ifp = &sc->arpcom.ac_if;
1395 
1396 	lwkt_serialize_enter(ifp->if_serializer);
1397 	jme_stop(sc);
1398 #ifdef notyet
1399 	jme_setwol(sc);
1400 #endif
1401 	lwkt_serialize_exit(ifp->if_serializer);
1402 
1403 	return (0);
1404 }
1405 
1406 static int
1407 jme_resume(device_t dev)
1408 {
1409 	struct jme_softc *sc = device_get_softc(dev);
1410 	struct ifnet *ifp = &sc->arpcom.ac_if;
1411 #ifdef notyet
1412 	int pmc;
1413 #endif
1414 
1415 	lwkt_serialize_enter(ifp->if_serializer);
1416 
1417 #ifdef notyet
1418 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1419 		uint16_t pmstat;
1420 
1421 		pmstat = pci_read_config(sc->jme_dev,
1422 		    pmc + PCIR_POWER_STATUS, 2);
1423 		/* Disable PME clear PME status. */
1424 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1425 		pci_write_config(sc->jme_dev,
1426 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1427 	}
1428 #endif
1429 
1430 	if (ifp->if_flags & IFF_UP)
1431 		jme_init(sc);
1432 
1433 	lwkt_serialize_exit(ifp->if_serializer);
1434 
1435 	return (0);
1436 }
1437 
1438 static int
1439 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1440 {
1441 	struct jme_txdesc *txd;
1442 	struct jme_desc *desc;
1443 	struct mbuf *m;
1444 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1445 	int maxsegs, nsegs;
1446 	int error, i, prod, symbol_desc;
1447 	uint32_t cflags, flag64;
1448 
1449 	M_ASSERTPKTHDR((*m_head));
1450 
1451 	prod = sc->jme_cdata.jme_tx_prod;
1452 	txd = &sc->jme_cdata.jme_txdesc[prod];
1453 
1454 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1455 		symbol_desc = 1;
1456 	else
1457 		symbol_desc = 0;
1458 
1459 	maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1460 		  (JME_TXD_RSVD + symbol_desc);
1461 	if (maxsegs > JME_MAXTXSEGS)
1462 		maxsegs = JME_MAXTXSEGS;
1463 	KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1464 		("not enough segments %d\n", maxsegs));
1465 
1466 	error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1467 			txd->tx_dmamap, m_head,
1468 			txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1469 	if (error)
1470 		goto fail;
1471 
1472 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1473 			BUS_DMASYNC_PREWRITE);
1474 
1475 	m = *m_head;
1476 	cflags = 0;
1477 
1478 	/* Configure checksum offload. */
1479 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1480 		cflags |= JME_TD_IPCSUM;
1481 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1482 		cflags |= JME_TD_TCPCSUM;
1483 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1484 		cflags |= JME_TD_UDPCSUM;
1485 
1486 	/* Configure VLAN. */
1487 	if (m->m_flags & M_VLANTAG) {
1488 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1489 		cflags |= JME_TD_VLAN_TAG;
1490 	}
1491 
1492 	desc = &sc->jme_cdata.jme_tx_ring[prod];
1493 	desc->flags = htole32(cflags);
1494 	desc->addr_hi = htole32(m->m_pkthdr.len);
1495 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1496 		/*
1497 		 * Use 64bits TX desc chain format.
1498 		 *
1499 		 * The first TX desc of the chain, which is setup here,
1500 		 * is just a symbol TX desc carrying no payload.
1501 		 */
1502 		flag64 = JME_TD_64BIT;
1503 		desc->buflen = 0;
1504 		desc->addr_lo = 0;
1505 
1506 		/* No effective TX desc is consumed */
1507 		i = 0;
1508 	} else {
1509 		/*
1510 		 * Use 32bits TX desc chain format.
1511 		 *
1512 		 * The first TX desc of the chain, which is setup here,
1513 		 * is an effective TX desc carrying the first segment of
1514 		 * the mbuf chain.
1515 		 */
1516 		flag64 = 0;
1517 		desc->buflen = htole32(txsegs[0].ds_len);
1518 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1519 
1520 		/* One effective TX desc is consumed */
1521 		i = 1;
1522 	}
1523 	sc->jme_cdata.jme_tx_cnt++;
1524 	KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1525 		 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1526 	JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1527 
1528 	txd->tx_ndesc = 1 - i;
1529 	for (; i < nsegs; i++) {
1530 		desc = &sc->jme_cdata.jme_tx_ring[prod];
1531 		desc->flags = htole32(JME_TD_OWN | flag64);
1532 		desc->buflen = htole32(txsegs[i].ds_len);
1533 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1534 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1535 
1536 		sc->jme_cdata.jme_tx_cnt++;
1537 		KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1538 			 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1539 		JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1540 	}
1541 
1542 	/* Update producer index. */
1543 	sc->jme_cdata.jme_tx_prod = prod;
1544 	/*
1545 	 * Finally request interrupt and give the first descriptor
1546 	 * owenership to hardware.
1547 	 */
1548 	desc = txd->tx_desc;
1549 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1550 
1551 	txd->tx_m = m;
1552 	txd->tx_ndesc += nsegs;
1553 
1554 	return 0;
1555 fail:
1556 	m_freem(*m_head);
1557 	*m_head = NULL;
1558 	return error;
1559 }
1560 
1561 static void
1562 jme_start(struct ifnet *ifp)
1563 {
1564 	struct jme_softc *sc = ifp->if_softc;
1565 	struct mbuf *m_head;
1566 	int enq = 0;
1567 
1568 	ASSERT_SERIALIZED(ifp->if_serializer);
1569 
1570 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1571 		ifq_purge(&ifp->if_snd);
1572 		return;
1573 	}
1574 
1575 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1576 		return;
1577 
1578 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1579 		jme_txeof(sc);
1580 
1581 	while (!ifq_is_empty(&ifp->if_snd)) {
1582 		/*
1583 		 * Check number of available TX descs, always
1584 		 * leave JME_TXD_RSVD free TX descs.
1585 		 */
1586 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1587 		    sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1588 			ifp->if_flags |= IFF_OACTIVE;
1589 			break;
1590 		}
1591 
1592 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1593 		if (m_head == NULL)
1594 			break;
1595 
1596 		/*
1597 		 * Pack the data into the transmit ring. If we
1598 		 * don't have room, set the OACTIVE flag and wait
1599 		 * for the NIC to drain the ring.
1600 		 */
1601 		if (jme_encap(sc, &m_head)) {
1602 			KKASSERT(m_head == NULL);
1603 			ifp->if_oerrors++;
1604 			ifp->if_flags |= IFF_OACTIVE;
1605 			break;
1606 		}
1607 		enq++;
1608 
1609 		/*
1610 		 * If there's a BPF listener, bounce a copy of this frame
1611 		 * to him.
1612 		 */
1613 		ETHER_BPF_MTAP(ifp, m_head);
1614 	}
1615 
1616 	if (enq > 0) {
1617 		/*
1618 		 * Reading TXCSR takes very long time under heavy load
1619 		 * so cache TXCSR value and writes the ORed value with
1620 		 * the kick command to the TXCSR. This saves one register
1621 		 * access cycle.
1622 		 */
1623 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1624 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1625 		/* Set a timeout in case the chip goes out to lunch. */
1626 		ifp->if_timer = JME_TX_TIMEOUT;
1627 	}
1628 }
1629 
1630 static void
1631 jme_watchdog(struct ifnet *ifp)
1632 {
1633 	struct jme_softc *sc = ifp->if_softc;
1634 
1635 	ASSERT_SERIALIZED(ifp->if_serializer);
1636 
1637 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1638 		if_printf(ifp, "watchdog timeout (missed link)\n");
1639 		ifp->if_oerrors++;
1640 		jme_init(sc);
1641 		return;
1642 	}
1643 
1644 	jme_txeof(sc);
1645 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1646 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1647 			  "-- recovering\n");
1648 		if (!ifq_is_empty(&ifp->if_snd))
1649 			if_devstart(ifp);
1650 		return;
1651 	}
1652 
1653 	if_printf(ifp, "watchdog timeout\n");
1654 	ifp->if_oerrors++;
1655 	jme_init(sc);
1656 	if (!ifq_is_empty(&ifp->if_snd))
1657 		if_devstart(ifp);
1658 }
1659 
1660 static int
1661 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1662 {
1663 	struct jme_softc *sc = ifp->if_softc;
1664 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1665 	struct ifreq *ifr = (struct ifreq *)data;
1666 	int error = 0, mask;
1667 
1668 	ASSERT_SERIALIZED(ifp->if_serializer);
1669 
1670 	switch (cmd) {
1671 	case SIOCSIFMTU:
1672 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1673 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1674 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1675 			error = EINVAL;
1676 			break;
1677 		}
1678 
1679 		if (ifp->if_mtu != ifr->ifr_mtu) {
1680 			/*
1681 			 * No special configuration is required when interface
1682 			 * MTU is changed but availability of Tx checksum
1683 			 * offload should be chcked against new MTU size as
1684 			 * FIFO size is just 2K.
1685 			 */
1686 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1687 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1688 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1689 			}
1690 			ifp->if_mtu = ifr->ifr_mtu;
1691 			if (ifp->if_flags & IFF_RUNNING)
1692 				jme_init(sc);
1693 		}
1694 		break;
1695 
1696 	case SIOCSIFFLAGS:
1697 		if (ifp->if_flags & IFF_UP) {
1698 			if (ifp->if_flags & IFF_RUNNING) {
1699 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1700 				    (IFF_PROMISC | IFF_ALLMULTI))
1701 					jme_set_filter(sc);
1702 			} else {
1703 				jme_init(sc);
1704 			}
1705 		} else {
1706 			if (ifp->if_flags & IFF_RUNNING)
1707 				jme_stop(sc);
1708 		}
1709 		sc->jme_if_flags = ifp->if_flags;
1710 		break;
1711 
1712 	case SIOCADDMULTI:
1713 	case SIOCDELMULTI:
1714 		if (ifp->if_flags & IFF_RUNNING)
1715 			jme_set_filter(sc);
1716 		break;
1717 
1718 	case SIOCSIFMEDIA:
1719 	case SIOCGIFMEDIA:
1720 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1721 		break;
1722 
1723 	case SIOCSIFCAP:
1724 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1725 
1726 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1727 			ifp->if_capenable ^= IFCAP_TXCSUM;
1728 			if (IFCAP_TXCSUM & ifp->if_capenable)
1729 				ifp->if_hwassist |= JME_CSUM_FEATURES;
1730 			else
1731 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1732 		}
1733 		if (mask & IFCAP_RXCSUM) {
1734 			uint32_t reg;
1735 
1736 			ifp->if_capenable ^= IFCAP_RXCSUM;
1737 			reg = CSR_READ_4(sc, JME_RXMAC);
1738 			reg &= ~RXMAC_CSUM_ENB;
1739 			if (ifp->if_capenable & IFCAP_RXCSUM)
1740 				reg |= RXMAC_CSUM_ENB;
1741 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1742 		}
1743 
1744 		if (mask & IFCAP_VLAN_HWTAGGING) {
1745 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1746 			jme_set_vlan(sc);
1747 		}
1748 
1749 		if (mask & IFCAP_RSS) {
1750 			ifp->if_capenable ^= IFCAP_RSS;
1751 			if (ifp->if_flags & IFF_RUNNING)
1752 				jme_init(sc);
1753 		}
1754 		break;
1755 
1756 	default:
1757 		error = ether_ioctl(ifp, cmd, data);
1758 		break;
1759 	}
1760 	return (error);
1761 }
1762 
1763 static void
1764 jme_mac_config(struct jme_softc *sc)
1765 {
1766 	struct mii_data *mii;
1767 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1768 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1769 
1770 	mii = device_get_softc(sc->jme_miibus);
1771 
1772 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1773 	DELAY(10);
1774 	CSR_WRITE_4(sc, JME_GHC, 0);
1775 	ghc = 0;
1776 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1777 	rxmac &= ~RXMAC_FC_ENB;
1778 	txmac = CSR_READ_4(sc, JME_TXMAC);
1779 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1780 	txpause = CSR_READ_4(sc, JME_TXPFC);
1781 	txpause &= ~TXPFC_PAUSE_ENB;
1782 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1783 		ghc |= GHC_FULL_DUPLEX;
1784 		rxmac &= ~RXMAC_COLL_DET_ENB;
1785 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1786 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1787 		    TXMAC_FRAME_BURST);
1788 #ifdef notyet
1789 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1790 			txpause |= TXPFC_PAUSE_ENB;
1791 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1792 			rxmac |= RXMAC_FC_ENB;
1793 #endif
1794 		/* Disable retry transmit timer/retry limit. */
1795 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1796 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1797 	} else {
1798 		rxmac |= RXMAC_COLL_DET_ENB;
1799 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1800 		/* Enable retry transmit timer/retry limit. */
1801 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1802 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1803 	}
1804 
1805 	/*
1806 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1807 	 */
1808 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1809 	gp1 &= ~GPREG1_WA_HDX;
1810 
1811 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1812 		hdx = 1;
1813 
1814 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1815 	case IFM_10_T:
1816 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1817 		if (hdx)
1818 			gp1 |= GPREG1_WA_HDX;
1819 		break;
1820 
1821 	case IFM_100_TX:
1822 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1823 		if (hdx)
1824 			gp1 |= GPREG1_WA_HDX;
1825 
1826 		/*
1827 		 * Use extended FIFO depth to workaround CRC errors
1828 		 * emitted by chips before JMC250B
1829 		 */
1830 		phyconf = JMPHY_CONF_EXTFIFO;
1831 		break;
1832 
1833 	case IFM_1000_T:
1834 		if (sc->jme_caps & JME_CAP_FASTETH)
1835 			break;
1836 
1837 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1838 		if (hdx)
1839 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1840 		break;
1841 
1842 	default:
1843 		break;
1844 	}
1845 	CSR_WRITE_4(sc, JME_GHC, ghc);
1846 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1847 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1848 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1849 
1850 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
1851 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1852 				    JMPHY_CONF, phyconf);
1853 	}
1854 	if (sc->jme_workaround & JME_WA_HDX)
1855 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
1856 }
1857 
1858 static void
1859 jme_intr(void *xsc)
1860 {
1861 	struct jme_softc *sc = xsc;
1862 	struct ifnet *ifp = &sc->arpcom.ac_if;
1863 	uint32_t status;
1864 	int r;
1865 
1866 	ASSERT_SERIALIZED(ifp->if_serializer);
1867 
1868 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1869 	if (status == 0 || status == 0xFFFFFFFF)
1870 		return;
1871 
1872 	/* Disable interrupts. */
1873 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1874 
1875 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1876 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1877 		goto back;
1878 
1879 	/* Reset PCC counter/timer and Ack interrupts. */
1880 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1881 
1882 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1883 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1884 
1885 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
1886 		if (status & jme_rx_status[r].jme_coal) {
1887 			status |= jme_rx_status[r].jme_coal |
1888 				  jme_rx_status[r].jme_comp;
1889 		}
1890 	}
1891 
1892 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1893 
1894 	if (ifp->if_flags & IFF_RUNNING) {
1895 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1896 			jme_rx_intr(sc, status);
1897 
1898 		if (status & INTR_RXQ_DESC_EMPTY) {
1899 			/*
1900 			 * Notify hardware availability of new Rx buffers.
1901 			 * Reading RXCSR takes very long time under heavy
1902 			 * load so cache RXCSR value and writes the ORed
1903 			 * value with the kick command to the RXCSR. This
1904 			 * saves one register access cycle.
1905 			 */
1906 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1907 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1908 		}
1909 
1910 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1911 			jme_txeof(sc);
1912 			if (!ifq_is_empty(&ifp->if_snd))
1913 				if_devstart(ifp);
1914 		}
1915 	}
1916 back:
1917 	/* Reenable interrupts. */
1918 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1919 }
1920 
1921 static void
1922 jme_txeof(struct jme_softc *sc)
1923 {
1924 	struct ifnet *ifp = &sc->arpcom.ac_if;
1925 	struct jme_txdesc *txd;
1926 	uint32_t status;
1927 	int cons, nsegs;
1928 
1929 	cons = sc->jme_cdata.jme_tx_cons;
1930 	if (cons == sc->jme_cdata.jme_tx_prod)
1931 		return;
1932 
1933 	/*
1934 	 * Go through our Tx list and free mbufs for those
1935 	 * frames which have been transmitted.
1936 	 */
1937 	while (cons != sc->jme_cdata.jme_tx_prod) {
1938 		txd = &sc->jme_cdata.jme_txdesc[cons];
1939 		KASSERT(txd->tx_m != NULL,
1940 			("%s: freeing NULL mbuf!\n", __func__));
1941 
1942 		status = le32toh(txd->tx_desc->flags);
1943 		if ((status & JME_TD_OWN) == JME_TD_OWN)
1944 			break;
1945 
1946 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1947 			ifp->if_oerrors++;
1948 		} else {
1949 			ifp->if_opackets++;
1950 			if (status & JME_TD_COLLISION) {
1951 				ifp->if_collisions +=
1952 				    le32toh(txd->tx_desc->buflen) &
1953 				    JME_TD_BUF_LEN_MASK;
1954 			}
1955 		}
1956 
1957 		/*
1958 		 * Only the first descriptor of multi-descriptor
1959 		 * transmission is updated so driver have to skip entire
1960 		 * chained buffers for the transmiited frame. In other
1961 		 * words, JME_TD_OWN bit is valid only at the first
1962 		 * descriptor of a multi-descriptor transmission.
1963 		 */
1964 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1965 			sc->jme_cdata.jme_tx_ring[cons].flags = 0;
1966 			JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
1967 		}
1968 
1969 		/* Reclaim transferred mbufs. */
1970 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1971 		m_freem(txd->tx_m);
1972 		txd->tx_m = NULL;
1973 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1974 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
1975 			("%s: Active Tx desc counter was garbled\n", __func__));
1976 		txd->tx_ndesc = 0;
1977 	}
1978 	sc->jme_cdata.jme_tx_cons = cons;
1979 
1980 	if (sc->jme_cdata.jme_tx_cnt == 0)
1981 		ifp->if_timer = 0;
1982 
1983 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
1984 	    sc->jme_tx_desc_cnt - JME_TXD_RSVD)
1985 		ifp->if_flags &= ~IFF_OACTIVE;
1986 }
1987 
1988 static __inline void
1989 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
1990 {
1991 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
1992 	int i;
1993 
1994 	for (i = 0; i < count; ++i) {
1995 		struct jme_desc *desc = &rdata->jme_rx_ring[cons];
1996 
1997 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
1998 		desc->buflen = htole32(MCLBYTES);
1999 		JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2000 	}
2001 }
2002 
2003 /* Receive a frame. */
2004 static void
2005 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2006 {
2007 	struct ifnet *ifp = &sc->arpcom.ac_if;
2008 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2009 	struct jme_desc *desc;
2010 	struct jme_rxdesc *rxd;
2011 	struct mbuf *mp, *m;
2012 	uint32_t flags, status;
2013 	int cons, count, nsegs;
2014 
2015 	cons = rdata->jme_rx_cons;
2016 	desc = &rdata->jme_rx_ring[cons];
2017 	flags = le32toh(desc->flags);
2018 	status = le32toh(desc->buflen);
2019 	nsegs = JME_RX_NSEGS(status);
2020 
2021 	JME_RSS_DPRINTF(sc, 10, "ring%d, flags 0x%08x, "
2022 			"hash 0x%08x, hash type 0x%08x\n",
2023 			ring, flags, desc->addr_hi, desc->addr_lo);
2024 
2025 	if (status & JME_RX_ERR_STAT) {
2026 		ifp->if_ierrors++;
2027 		jme_discard_rxbufs(sc, ring, cons, nsegs);
2028 #ifdef JME_SHOW_ERRORS
2029 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2030 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2031 #endif
2032 		rdata->jme_rx_cons += nsegs;
2033 		rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2034 		return;
2035 	}
2036 
2037 	rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2038 	for (count = 0; count < nsegs; count++,
2039 	     JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2040 		rxd = &rdata->jme_rxdesc[cons];
2041 		mp = rxd->rx_m;
2042 
2043 		/* Add a new receive buffer to the ring. */
2044 		if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2045 			ifp->if_iqdrops++;
2046 			/* Reuse buffer. */
2047 			jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2048 			if (rdata->jme_rxhead != NULL) {
2049 				m_freem(rdata->jme_rxhead);
2050 				JME_RXCHAIN_RESET(sc, ring);
2051 			}
2052 			break;
2053 		}
2054 
2055 		/*
2056 		 * Assume we've received a full sized frame.
2057 		 * Actual size is fixed when we encounter the end of
2058 		 * multi-segmented frame.
2059 		 */
2060 		mp->m_len = MCLBYTES;
2061 
2062 		/* Chain received mbufs. */
2063 		if (rdata->jme_rxhead == NULL) {
2064 			rdata->jme_rxhead = mp;
2065 			rdata->jme_rxtail = mp;
2066 		} else {
2067 			/*
2068 			 * Receive processor can receive a maximum frame
2069 			 * size of 65535 bytes.
2070 			 */
2071 			mp->m_flags &= ~M_PKTHDR;
2072 			rdata->jme_rxtail->m_next = mp;
2073 			rdata->jme_rxtail = mp;
2074 		}
2075 
2076 		if (count == nsegs - 1) {
2077 			/* Last desc. for this frame. */
2078 			m = rdata->jme_rxhead;
2079 			/* XXX assert PKTHDR? */
2080 			m->m_flags |= M_PKTHDR;
2081 			m->m_pkthdr.len = rdata->jme_rxlen;
2082 			if (nsegs > 1) {
2083 				/* Set first mbuf size. */
2084 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2085 				/* Set last mbuf size. */
2086 				mp->m_len = rdata->jme_rxlen -
2087 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2088 				    (MCLBYTES * (nsegs - 2)));
2089 			} else {
2090 				m->m_len = rdata->jme_rxlen;
2091 			}
2092 			m->m_pkthdr.rcvif = ifp;
2093 
2094 			/*
2095 			 * Account for 10bytes auto padding which is used
2096 			 * to align IP header on 32bit boundary. Also note,
2097 			 * CRC bytes is automatically removed by the
2098 			 * hardware.
2099 			 */
2100 			m->m_data += JME_RX_PAD_BYTES;
2101 
2102 			/* Set checksum information. */
2103 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2104 			    (flags & JME_RD_IPV4)) {
2105 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2106 				if (flags & JME_RD_IPCSUM)
2107 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2108 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2109 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2110 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2111 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2112 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2113 					m->m_pkthdr.csum_flags |=
2114 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2115 					m->m_pkthdr.csum_data = 0xffff;
2116 				}
2117 			}
2118 
2119 			/* Check for VLAN tagged packets. */
2120 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2121 			    (flags & JME_RD_VLAN_TAG)) {
2122 				m->m_pkthdr.ether_vlantag =
2123 				    flags & JME_RD_VLAN_MASK;
2124 				m->m_flags |= M_VLANTAG;
2125 			}
2126 
2127 			ifp->if_ipackets++;
2128 			/* Pass it on. */
2129 			ether_input_chain(ifp, m, chain);
2130 
2131 			/* Reset mbuf chains. */
2132 			JME_RXCHAIN_RESET(sc, ring);
2133 #ifdef JME_RSS_DEBUG
2134 			sc->jme_rx_ring_pkt[ring]++;
2135 #endif
2136 		}
2137 	}
2138 
2139 	rdata->jme_rx_cons += nsegs;
2140 	rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2141 }
2142 
2143 static int
2144 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2145 		int count)
2146 {
2147 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2148 	struct jme_desc *desc;
2149 	int nsegs, prog, pktlen;
2150 
2151 	prog = 0;
2152 	for (;;) {
2153 #ifdef DEVICE_POLLING
2154 		if (count >= 0 && count-- == 0)
2155 			break;
2156 #endif
2157 		desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2158 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2159 			break;
2160 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2161 			break;
2162 
2163 		/*
2164 		 * Check number of segments against received bytes.
2165 		 * Non-matching value would indicate that hardware
2166 		 * is still trying to update Rx descriptors. I'm not
2167 		 * sure whether this check is needed.
2168 		 */
2169 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2170 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2171 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2172 			if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2173 				  "and packet size(%d) mismach\n",
2174 				  nsegs, pktlen);
2175 			break;
2176 		}
2177 
2178 		/* Received a frame. */
2179 		jme_rxpkt(sc, ring, chain);
2180 		prog++;
2181 	}
2182 	return prog;
2183 }
2184 
2185 static void
2186 jme_rxeof(struct jme_softc *sc, int ring)
2187 {
2188 	struct mbuf_chain chain[MAXCPU];
2189 
2190 	ether_input_chain_init(chain);
2191 	if (jme_rxeof_chain(sc, ring, chain, -1))
2192 		ether_input_dispatch(chain);
2193 }
2194 
2195 static void
2196 jme_tick(void *xsc)
2197 {
2198 	struct jme_softc *sc = xsc;
2199 	struct ifnet *ifp = &sc->arpcom.ac_if;
2200 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2201 
2202 	lwkt_serialize_enter(ifp->if_serializer);
2203 
2204 	mii_tick(mii);
2205 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2206 
2207 	lwkt_serialize_exit(ifp->if_serializer);
2208 }
2209 
2210 static void
2211 jme_reset(struct jme_softc *sc)
2212 {
2213 #ifdef foo
2214 	/* Stop receiver, transmitter. */
2215 	jme_stop_rx(sc);
2216 	jme_stop_tx(sc);
2217 #endif
2218 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2219 	DELAY(10);
2220 	CSR_WRITE_4(sc, JME_GHC, 0);
2221 }
2222 
2223 static void
2224 jme_init(void *xsc)
2225 {
2226 	struct jme_softc *sc = xsc;
2227 	struct ifnet *ifp = &sc->arpcom.ac_if;
2228 	struct mii_data *mii;
2229 	uint8_t eaddr[ETHER_ADDR_LEN];
2230 	bus_addr_t paddr;
2231 	uint32_t reg;
2232 	int error, r;
2233 
2234 	ASSERT_SERIALIZED(ifp->if_serializer);
2235 
2236 	/*
2237 	 * Cancel any pending I/O.
2238 	 */
2239 	jme_stop(sc);
2240 
2241 	/*
2242 	 * Reset the chip to a known state.
2243 	 */
2244 	jme_reset(sc);
2245 
2246 	sc->jme_txd_spare =
2247 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2248 	KKASSERT(sc->jme_txd_spare >= 1);
2249 
2250 	/*
2251 	 * If we use 64bit address mode for transmitting, each Tx request
2252 	 * needs one more symbol descriptor.
2253 	 */
2254 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2255 		sc->jme_txd_spare += 1;
2256 
2257 	if (ifp->if_capenable & IFCAP_RSS)
2258 		jme_enable_rss(sc);
2259 	else
2260 		jme_disable_rss(sc);
2261 
2262 	/* Init RX descriptors */
2263 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2264 		error = jme_init_rx_ring(sc, r);
2265 		if (error) {
2266 			if_printf(ifp, "initialization failed: "
2267 				  "no memory for %dth RX ring.\n", r);
2268 			jme_stop(sc);
2269 			return;
2270 		}
2271 	}
2272 
2273 	/* Init TX descriptors */
2274 	jme_init_tx_ring(sc);
2275 
2276 	/* Initialize shadow status block. */
2277 	jme_init_ssb(sc);
2278 
2279 	/* Reprogram the station address. */
2280 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2281 	CSR_WRITE_4(sc, JME_PAR0,
2282 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2283 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2284 
2285 	/*
2286 	 * Configure Tx queue.
2287 	 *  Tx priority queue weight value : 0
2288 	 *  Tx FIFO threshold for processing next packet : 16QW
2289 	 *  Maximum Tx DMA length : 512
2290 	 *  Allow Tx DMA burst.
2291 	 */
2292 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2293 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2294 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2295 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2296 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2297 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2298 
2299 	/* Set Tx descriptor counter. */
2300 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2301 
2302 	/* Set Tx ring address to the hardware. */
2303 	paddr = sc->jme_cdata.jme_tx_ring_paddr;
2304 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2305 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2306 
2307 	/* Configure TxMAC parameters. */
2308 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2309 	reg |= TXMAC_THRESH_1_PKT;
2310 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2311 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2312 
2313 	/*
2314 	 * Configure Rx queue.
2315 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2316 	 *  FIFO threshold for processing next packet : 128QW
2317 	 *  Rx queue 0 select
2318 	 *  Max Rx DMA length : 128
2319 	 *  Rx descriptor retry : 32
2320 	 *  Rx descriptor retry time gap : 256ns
2321 	 *  Don't receive runt/bad frame.
2322 	 */
2323 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2324 #if 0
2325 	/*
2326 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2327 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2328 	 * decrease FIFO threshold to reduce the FIFO overruns for
2329 	 * frames larger than 4000 bytes.
2330 	 * For best performance of standard MTU sized frames use
2331 	 * maximum allowable FIFO threshold, 128QW.
2332 	 */
2333 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2334 	    JME_RX_FIFO_SIZE)
2335 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2336 	else
2337 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2338 #else
2339 	/* Improve PCI Express compatibility */
2340 	sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2341 #endif
2342 	sc->jme_rxcsr |= sc->jme_rx_dma_size;
2343 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2344 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2345 	/* XXX TODO DROP_BAD */
2346 
2347 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2348 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2349 
2350 		/* Set Rx descriptor counter. */
2351 		CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2352 
2353 		/* Set Rx ring address to the hardware. */
2354 		paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2355 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2356 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2357 	}
2358 
2359 	/* Clear receive filter. */
2360 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2361 
2362 	/* Set up the receive filter. */
2363 	jme_set_filter(sc);
2364 	jme_set_vlan(sc);
2365 
2366 	/*
2367 	 * Disable all WOL bits as WOL can interfere normal Rx
2368 	 * operation. Also clear WOL detection status bits.
2369 	 */
2370 	reg = CSR_READ_4(sc, JME_PMCS);
2371 	reg &= ~PMCS_WOL_ENB_MASK;
2372 	CSR_WRITE_4(sc, JME_PMCS, reg);
2373 
2374 	/*
2375 	 * Pad 10bytes right before received frame. This will greatly
2376 	 * help Rx performance on strict-alignment architectures as
2377 	 * it does not need to copy the frame to align the payload.
2378 	 */
2379 	reg = CSR_READ_4(sc, JME_RXMAC);
2380 	reg |= RXMAC_PAD_10BYTES;
2381 
2382 	if (ifp->if_capenable & IFCAP_RXCSUM)
2383 		reg |= RXMAC_CSUM_ENB;
2384 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2385 
2386 	/* Configure general purpose reg0 */
2387 	reg = CSR_READ_4(sc, JME_GPREG0);
2388 	reg &= ~GPREG0_PCC_UNIT_MASK;
2389 	/* Set PCC timer resolution to micro-seconds unit. */
2390 	reg |= GPREG0_PCC_UNIT_US;
2391 	/*
2392 	 * Disable all shadow register posting as we have to read
2393 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2394 	 * that it's hard to synchronize interrupt status between
2395 	 * hardware and software with shadow posting due to
2396 	 * requirements of bus_dmamap_sync(9).
2397 	 */
2398 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2399 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2400 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2401 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2402 	/* Disable posting of DW0. */
2403 	reg &= ~GPREG0_POST_DW0_ENB;
2404 	/* Clear PME message. */
2405 	reg &= ~GPREG0_PME_ENB;
2406 	/* Set PHY address. */
2407 	reg &= ~GPREG0_PHY_ADDR_MASK;
2408 	reg |= sc->jme_phyaddr;
2409 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2410 
2411 	/* Configure Tx queue 0 packet completion coalescing. */
2412 	jme_set_tx_coal(sc);
2413 
2414 	/* Configure Rx queue 0 packet completion coalescing. */
2415 	jme_set_rx_coal(sc);
2416 
2417 	/* Configure shadow status block but don't enable posting. */
2418 	paddr = sc->jme_cdata.jme_ssb_block_paddr;
2419 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2420 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2421 
2422 	/* Disable Timer 1 and Timer 2. */
2423 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2424 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2425 
2426 	/* Configure retry transmit period, retry limit value. */
2427 	CSR_WRITE_4(sc, JME_TXTRHD,
2428 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2429 	    TXTRHD_RT_PERIOD_MASK) |
2430 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2431 	    TXTRHD_RT_LIMIT_SHIFT));
2432 
2433 #ifdef DEVICE_POLLING
2434 	if (!(ifp->if_flags & IFF_POLLING))
2435 #endif
2436 	/* Initialize the interrupt mask. */
2437 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2438 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2439 
2440 	/*
2441 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2442 	 * done after detection of valid link in jme_miibus_statchg.
2443 	 */
2444 	sc->jme_flags &= ~JME_FLAG_LINK;
2445 
2446 	/* Set the current media. */
2447 	mii = device_get_softc(sc->jme_miibus);
2448 	mii_mediachg(mii);
2449 
2450 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2451 
2452 	ifp->if_flags |= IFF_RUNNING;
2453 	ifp->if_flags &= ~IFF_OACTIVE;
2454 }
2455 
2456 static void
2457 jme_stop(struct jme_softc *sc)
2458 {
2459 	struct ifnet *ifp = &sc->arpcom.ac_if;
2460 	struct jme_txdesc *txd;
2461 	struct jme_rxdesc *rxd;
2462 	struct jme_rxdata *rdata;
2463 	int i, r;
2464 
2465 	ASSERT_SERIALIZED(ifp->if_serializer);
2466 
2467 	/*
2468 	 * Mark the interface down and cancel the watchdog timer.
2469 	 */
2470 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2471 	ifp->if_timer = 0;
2472 
2473 	callout_stop(&sc->jme_tick_ch);
2474 	sc->jme_flags &= ~JME_FLAG_LINK;
2475 
2476 	/*
2477 	 * Disable interrupts.
2478 	 */
2479 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2480 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2481 
2482 	/* Disable updating shadow status block. */
2483 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2484 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2485 
2486 	/* Stop receiver, transmitter. */
2487 	jme_stop_rx(sc);
2488 	jme_stop_tx(sc);
2489 
2490 	/*
2491 	 * Free partial finished RX segments
2492 	 */
2493 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2494 		rdata = &sc->jme_cdata.jme_rx_data[r];
2495 		if (rdata->jme_rxhead != NULL)
2496 			m_freem(rdata->jme_rxhead);
2497 		JME_RXCHAIN_RESET(sc, r);
2498 	}
2499 
2500 	/*
2501 	 * Free RX and TX mbufs still in the queues.
2502 	 */
2503 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2504 		rdata = &sc->jme_cdata.jme_rx_data[r];
2505 		for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2506 			rxd = &rdata->jme_rxdesc[i];
2507 			if (rxd->rx_m != NULL) {
2508 				bus_dmamap_unload(rdata->jme_rx_tag,
2509 						  rxd->rx_dmamap);
2510 				m_freem(rxd->rx_m);
2511 				rxd->rx_m = NULL;
2512 			}
2513 		}
2514 	}
2515 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2516 		txd = &sc->jme_cdata.jme_txdesc[i];
2517 		if (txd->tx_m != NULL) {
2518 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2519 			    txd->tx_dmamap);
2520 			m_freem(txd->tx_m);
2521 			txd->tx_m = NULL;
2522 			txd->tx_ndesc = 0;
2523 		}
2524         }
2525 }
2526 
2527 static void
2528 jme_stop_tx(struct jme_softc *sc)
2529 {
2530 	uint32_t reg;
2531 	int i;
2532 
2533 	reg = CSR_READ_4(sc, JME_TXCSR);
2534 	if ((reg & TXCSR_TX_ENB) == 0)
2535 		return;
2536 	reg &= ~TXCSR_TX_ENB;
2537 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2538 	for (i = JME_TIMEOUT; i > 0; i--) {
2539 		DELAY(1);
2540 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2541 			break;
2542 	}
2543 	if (i == 0)
2544 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2545 }
2546 
2547 static void
2548 jme_stop_rx(struct jme_softc *sc)
2549 {
2550 	uint32_t reg;
2551 	int i;
2552 
2553 	reg = CSR_READ_4(sc, JME_RXCSR);
2554 	if ((reg & RXCSR_RX_ENB) == 0)
2555 		return;
2556 	reg &= ~RXCSR_RX_ENB;
2557 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2558 	for (i = JME_TIMEOUT; i > 0; i--) {
2559 		DELAY(1);
2560 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2561 			break;
2562 	}
2563 	if (i == 0)
2564 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2565 }
2566 
2567 static void
2568 jme_init_tx_ring(struct jme_softc *sc)
2569 {
2570 	struct jme_chain_data *cd;
2571 	struct jme_txdesc *txd;
2572 	int i;
2573 
2574 	sc->jme_cdata.jme_tx_prod = 0;
2575 	sc->jme_cdata.jme_tx_cons = 0;
2576 	sc->jme_cdata.jme_tx_cnt = 0;
2577 
2578 	cd = &sc->jme_cdata;
2579 	bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2580 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2581 		txd = &sc->jme_cdata.jme_txdesc[i];
2582 		txd->tx_m = NULL;
2583 		txd->tx_desc = &cd->jme_tx_ring[i];
2584 		txd->tx_ndesc = 0;
2585 	}
2586 }
2587 
2588 static void
2589 jme_init_ssb(struct jme_softc *sc)
2590 {
2591 	struct jme_chain_data *cd;
2592 
2593 	cd = &sc->jme_cdata;
2594 	bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2595 }
2596 
2597 static int
2598 jme_init_rx_ring(struct jme_softc *sc, int ring)
2599 {
2600 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2601 	struct jme_rxdesc *rxd;
2602 	int i;
2603 
2604 	KKASSERT(rdata->jme_rxhead == NULL &&
2605 		 rdata->jme_rxtail == NULL &&
2606 		 rdata->jme_rxlen == 0);
2607 	rdata->jme_rx_cons = 0;
2608 
2609 	bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2610 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2611 		int error;
2612 
2613 		rxd = &rdata->jme_rxdesc[i];
2614 		rxd->rx_m = NULL;
2615 		rxd->rx_desc = &rdata->jme_rx_ring[i];
2616 		error = jme_newbuf(sc, ring, rxd, 1);
2617 		if (error)
2618 			return error;
2619 	}
2620 	return 0;
2621 }
2622 
2623 static int
2624 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2625 {
2626 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2627 	struct jme_desc *desc;
2628 	struct mbuf *m;
2629 	bus_dma_segment_t segs;
2630 	bus_dmamap_t map;
2631 	int error, nsegs;
2632 
2633 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2634 	if (m == NULL)
2635 		return ENOBUFS;
2636 	/*
2637 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2638 	 * takes advantage of 10 bytes padding feature of hardware
2639 	 * in order not to copy entire frame to align IP header on
2640 	 * 32bit boundary.
2641 	 */
2642 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2643 
2644 	error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2645 			rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2646 			BUS_DMA_NOWAIT);
2647 	if (error) {
2648 		m_freem(m);
2649 		if (init)
2650 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2651 		return error;
2652 	}
2653 
2654 	if (rxd->rx_m != NULL) {
2655 		bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2656 				BUS_DMASYNC_POSTREAD);
2657 		bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2658 	}
2659 	map = rxd->rx_dmamap;
2660 	rxd->rx_dmamap = rdata->jme_rx_sparemap;
2661 	rdata->jme_rx_sparemap = map;
2662 	rxd->rx_m = m;
2663 
2664 	desc = rxd->rx_desc;
2665 	desc->buflen = htole32(segs.ds_len);
2666 	desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2667 	desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2668 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2669 
2670 	return 0;
2671 }
2672 
2673 static void
2674 jme_set_vlan(struct jme_softc *sc)
2675 {
2676 	struct ifnet *ifp = &sc->arpcom.ac_if;
2677 	uint32_t reg;
2678 
2679 	ASSERT_SERIALIZED(ifp->if_serializer);
2680 
2681 	reg = CSR_READ_4(sc, JME_RXMAC);
2682 	reg &= ~RXMAC_VLAN_ENB;
2683 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2684 		reg |= RXMAC_VLAN_ENB;
2685 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2686 }
2687 
2688 static void
2689 jme_set_filter(struct jme_softc *sc)
2690 {
2691 	struct ifnet *ifp = &sc->arpcom.ac_if;
2692 	struct ifmultiaddr *ifma;
2693 	uint32_t crc;
2694 	uint32_t mchash[2];
2695 	uint32_t rxcfg;
2696 
2697 	ASSERT_SERIALIZED(ifp->if_serializer);
2698 
2699 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2700 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2701 	    RXMAC_ALLMULTI);
2702 
2703 	/*
2704 	 * Always accept frames destined to our station address.
2705 	 * Always accept broadcast frames.
2706 	 */
2707 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2708 
2709 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2710 		if (ifp->if_flags & IFF_PROMISC)
2711 			rxcfg |= RXMAC_PROMISC;
2712 		if (ifp->if_flags & IFF_ALLMULTI)
2713 			rxcfg |= RXMAC_ALLMULTI;
2714 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2715 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2716 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2717 		return;
2718 	}
2719 
2720 	/*
2721 	 * Set up the multicast address filter by passing all multicast
2722 	 * addresses through a CRC generator, and then using the low-order
2723 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2724 	 * high order bits select the register, while the rest of the bits
2725 	 * select the bit within the register.
2726 	 */
2727 	rxcfg |= RXMAC_MULTICAST;
2728 	bzero(mchash, sizeof(mchash));
2729 
2730 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2731 		if (ifma->ifma_addr->sa_family != AF_LINK)
2732 			continue;
2733 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2734 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2735 
2736 		/* Just want the 6 least significant bits. */
2737 		crc &= 0x3f;
2738 
2739 		/* Set the corresponding bit in the hash table. */
2740 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2741 	}
2742 
2743 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2744 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2745 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2746 }
2747 
2748 static int
2749 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2750 {
2751 	struct jme_softc *sc = arg1;
2752 	struct ifnet *ifp = &sc->arpcom.ac_if;
2753 	int error, v;
2754 
2755 	lwkt_serialize_enter(ifp->if_serializer);
2756 
2757 	v = sc->jme_tx_coal_to;
2758 	error = sysctl_handle_int(oidp, &v, 0, req);
2759 	if (error || req->newptr == NULL)
2760 		goto back;
2761 
2762 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2763 		error = EINVAL;
2764 		goto back;
2765 	}
2766 
2767 	if (v != sc->jme_tx_coal_to) {
2768 		sc->jme_tx_coal_to = v;
2769 		if (ifp->if_flags & IFF_RUNNING)
2770 			jme_set_tx_coal(sc);
2771 	}
2772 back:
2773 	lwkt_serialize_exit(ifp->if_serializer);
2774 	return error;
2775 }
2776 
2777 static int
2778 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2779 {
2780 	struct jme_softc *sc = arg1;
2781 	struct ifnet *ifp = &sc->arpcom.ac_if;
2782 	int error, v;
2783 
2784 	lwkt_serialize_enter(ifp->if_serializer);
2785 
2786 	v = sc->jme_tx_coal_pkt;
2787 	error = sysctl_handle_int(oidp, &v, 0, req);
2788 	if (error || req->newptr == NULL)
2789 		goto back;
2790 
2791 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2792 		error = EINVAL;
2793 		goto back;
2794 	}
2795 
2796 	if (v != sc->jme_tx_coal_pkt) {
2797 		sc->jme_tx_coal_pkt = v;
2798 		if (ifp->if_flags & IFF_RUNNING)
2799 			jme_set_tx_coal(sc);
2800 	}
2801 back:
2802 	lwkt_serialize_exit(ifp->if_serializer);
2803 	return error;
2804 }
2805 
2806 static int
2807 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2808 {
2809 	struct jme_softc *sc = arg1;
2810 	struct ifnet *ifp = &sc->arpcom.ac_if;
2811 	int error, v;
2812 
2813 	lwkt_serialize_enter(ifp->if_serializer);
2814 
2815 	v = sc->jme_rx_coal_to;
2816 	error = sysctl_handle_int(oidp, &v, 0, req);
2817 	if (error || req->newptr == NULL)
2818 		goto back;
2819 
2820 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2821 		error = EINVAL;
2822 		goto back;
2823 	}
2824 
2825 	if (v != sc->jme_rx_coal_to) {
2826 		sc->jme_rx_coal_to = v;
2827 		if (ifp->if_flags & IFF_RUNNING)
2828 			jme_set_rx_coal(sc);
2829 	}
2830 back:
2831 	lwkt_serialize_exit(ifp->if_serializer);
2832 	return error;
2833 }
2834 
2835 static int
2836 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2837 {
2838 	struct jme_softc *sc = arg1;
2839 	struct ifnet *ifp = &sc->arpcom.ac_if;
2840 	int error, v;
2841 
2842 	lwkt_serialize_enter(ifp->if_serializer);
2843 
2844 	v = sc->jme_rx_coal_pkt;
2845 	error = sysctl_handle_int(oidp, &v, 0, req);
2846 	if (error || req->newptr == NULL)
2847 		goto back;
2848 
2849 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
2850 		error = EINVAL;
2851 		goto back;
2852 	}
2853 
2854 	if (v != sc->jme_rx_coal_pkt) {
2855 		sc->jme_rx_coal_pkt = v;
2856 		if (ifp->if_flags & IFF_RUNNING)
2857 			jme_set_rx_coal(sc);
2858 	}
2859 back:
2860 	lwkt_serialize_exit(ifp->if_serializer);
2861 	return error;
2862 }
2863 
2864 static void
2865 jme_set_tx_coal(struct jme_softc *sc)
2866 {
2867 	uint32_t reg;
2868 
2869 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2870 	    PCCTX_COAL_TO_MASK;
2871 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2872 	    PCCTX_COAL_PKT_MASK;
2873 	reg |= PCCTX_COAL_TXQ0;
2874 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2875 }
2876 
2877 static void
2878 jme_set_rx_coal(struct jme_softc *sc)
2879 {
2880 	uint32_t reg;
2881 	int r;
2882 
2883 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2884 	    PCCRX_COAL_TO_MASK;
2885 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2886 	    PCCRX_COAL_PKT_MASK;
2887 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
2888 		if (r < sc->jme_rx_ring_inuse)
2889 			CSR_WRITE_4(sc, JME_PCCRX(r), reg);
2890 		else
2891 			CSR_WRITE_4(sc, JME_PCCRX(r), 0);
2892 	}
2893 }
2894 
2895 #ifdef DEVICE_POLLING
2896 
2897 static void
2898 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2899 {
2900 	struct jme_softc *sc = ifp->if_softc;
2901 	struct mbuf_chain chain[MAXCPU];
2902 	uint32_t status;
2903 	int r, prog = 0;
2904 
2905 	ASSERT_SERIALIZED(ifp->if_serializer);
2906 
2907 	switch (cmd) {
2908 	case POLL_REGISTER:
2909 		CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2910 		break;
2911 
2912 	case POLL_DEREGISTER:
2913 		CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2914 		break;
2915 
2916 	case POLL_AND_CHECK_STATUS:
2917 	case POLL_ONLY:
2918 		status = CSR_READ_4(sc, JME_INTR_STATUS);
2919 
2920 		ether_input_chain_init(chain);
2921 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r)
2922 			prog += jme_rxeof_chain(sc, r, chain, count);
2923 		if (prog)
2924 			ether_input_dispatch(chain);
2925 
2926 		if (status & INTR_RXQ_DESC_EMPTY) {
2927 			CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2928 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2929 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2930 		}
2931 
2932 		jme_txeof(sc);
2933 		if (!ifq_is_empty(&ifp->if_snd))
2934 			if_devstart(ifp);
2935 		break;
2936 	}
2937 }
2938 
2939 #endif	/* DEVICE_POLLING */
2940 
2941 static int
2942 jme_rxring_dma_alloc(struct jme_softc *sc, int ring)
2943 {
2944 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2945 	bus_dmamem_t dmem;
2946 	int error;
2947 
2948 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
2949 			JME_RX_RING_ALIGN, 0,
2950 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2951 			JME_RX_RING_SIZE(sc),
2952 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
2953 	if (error) {
2954 		device_printf(sc->jme_dev,
2955 		    "could not allocate %dth Rx ring.\n", ring);
2956 		return error;
2957 	}
2958 	rdata->jme_rx_ring_tag = dmem.dmem_tag;
2959 	rdata->jme_rx_ring_map = dmem.dmem_map;
2960 	rdata->jme_rx_ring = dmem.dmem_addr;
2961 	rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
2962 
2963 	return 0;
2964 }
2965 
2966 static int
2967 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
2968 {
2969 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2970 	int i, error;
2971 
2972 	/* Create tag for Rx buffers. */
2973 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
2974 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
2975 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2976 	    BUS_SPACE_MAXADDR,		/* highaddr */
2977 	    NULL, NULL,			/* filter, filterarg */
2978 	    MCLBYTES,			/* maxsize */
2979 	    1,				/* nsegments */
2980 	    MCLBYTES,			/* maxsegsize */
2981 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
2982 	    &rdata->jme_rx_tag);
2983 	if (error) {
2984 		device_printf(sc->jme_dev,
2985 		    "could not create %dth Rx DMA tag.\n", ring);
2986 		return error;
2987 	}
2988 
2989 	/* Create DMA maps for Rx buffers. */
2990 	error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
2991 				  &rdata->jme_rx_sparemap);
2992 	if (error) {
2993 		device_printf(sc->jme_dev,
2994 		    "could not create %dth spare Rx dmamap.\n", ring);
2995 		bus_dma_tag_destroy(rdata->jme_rx_tag);
2996 		rdata->jme_rx_tag = NULL;
2997 		return error;
2998 	}
2999 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3000 		struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3001 
3002 		error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3003 					  &rxd->rx_dmamap);
3004 		if (error) {
3005 			int j;
3006 
3007 			device_printf(sc->jme_dev,
3008 			    "could not create %dth Rx dmamap "
3009 			    "for %dth RX ring.\n", i, ring);
3010 
3011 			for (j = 0; j < i; ++j) {
3012 				rxd = &rdata->jme_rxdesc[j];
3013 				bus_dmamap_destroy(rdata->jme_rx_tag,
3014 						   rxd->rx_dmamap);
3015 			}
3016 			bus_dmamap_destroy(rdata->jme_rx_tag,
3017 					   rdata->jme_rx_sparemap);
3018 			bus_dma_tag_destroy(rdata->jme_rx_tag);
3019 			rdata->jme_rx_tag = NULL;
3020 			return error;
3021 		}
3022 	}
3023 	return 0;
3024 }
3025 
3026 static void
3027 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3028 {
3029 	struct mbuf_chain chain[MAXCPU];
3030 	int r, prog = 0;
3031 
3032 	ether_input_chain_init(chain);
3033 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3034 		if (status & jme_rx_status[r].jme_coal)
3035 			prog += jme_rxeof_chain(sc, r, chain, -1);
3036 	}
3037 	if (prog)
3038 		ether_input_dispatch(chain);
3039 }
3040 
3041 static void
3042 jme_enable_rss(struct jme_softc *sc)
3043 {
3044 	uint32_t rssc, key, ind;
3045 	int i;
3046 
3047 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3048 
3049 	rssc = RSSC_HASH_64_ENTRY;
3050 	rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3051 	rssc |= sc->jme_rx_ring_inuse >> 1;
3052 	JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3053 	CSR_WRITE_4(sc, JME_RSSC, rssc);
3054 
3055 	key = 0x6d5a6d5a; /* XXX */
3056 	for (i = 0; i < RSSKEY_NREGS; ++i)
3057 		CSR_WRITE_4(sc, RSSKEY_REG(i), key);
3058 
3059 	ind = 0;
3060 	if (sc->jme_rx_ring_inuse == JME_NRXRING_2) {
3061 		ind = 0x01000100;
3062 	} else if (sc->jme_rx_ring_inuse == JME_NRXRING_4) {
3063 		ind = 0x03020100;
3064 	} else {
3065 		panic("%s: invalid # of RX rings (%d)\n",
3066 		      sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse);
3067 	}
3068 	JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3069 	for (i = 0; i < RSSTBL_NREGS; ++i)
3070 		CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3071 }
3072 
3073 static void
3074 jme_disable_rss(struct jme_softc *sc)
3075 {
3076 	sc->jme_rx_ring_inuse = JME_NRXRING_1;
3077 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3078 }
3079