xref: /dragonfly/sys/dev/netif/jme/if_jme.c (revision fcf53d9b)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $
29  */
30 
31 #include "opt_polling.h"
32 #include "opt_rss.h"
33 #include "opt_jme.h"
34 
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/interrupt.h>
40 #include <sys/malloc.h>
41 #include <sys/proc.h>
42 #include <sys/rman.h>
43 #include <sys/serialize.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 
48 #include <net/ethernet.h>
49 #include <net/if.h>
50 #include <net/bpf.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/ifq_var.h>
55 #include <net/toeplitz.h>
56 #include <net/toeplitz2.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
59 
60 #include <netinet/in.h>
61 
62 #include <dev/netif/mii_layer/miivar.h>
63 #include <dev/netif/mii_layer/jmphyreg.h>
64 
65 #include <bus/pci/pcireg.h>
66 #include <bus/pci/pcivar.h>
67 #include <bus/pci/pcidevs.h>
68 
69 #include <dev/netif/jme/if_jmereg.h>
70 #include <dev/netif/jme/if_jmevar.h>
71 
72 #include "miibus_if.h"
73 
74 /* Define the following to disable printing Rx errors. */
75 #undef	JME_SHOW_ERRORS
76 
77 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
78 
79 #ifdef JME_RSS_DEBUG
80 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
81 do { \
82 	if ((sc)->jme_rss_debug >= (lvl)) \
83 		if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
84 } while (0)
85 #else	/* !JME_RSS_DEBUG */
86 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
87 #endif	/* JME_RSS_DEBUG */
88 
89 static int	jme_probe(device_t);
90 static int	jme_attach(device_t);
91 static int	jme_detach(device_t);
92 static int	jme_shutdown(device_t);
93 static int	jme_suspend(device_t);
94 static int	jme_resume(device_t);
95 
96 static int	jme_miibus_readreg(device_t, int, int);
97 static int	jme_miibus_writereg(device_t, int, int, int);
98 static void	jme_miibus_statchg(device_t);
99 
100 static void	jme_init(void *);
101 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
102 static void	jme_start(struct ifnet *);
103 static void	jme_watchdog(struct ifnet *);
104 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
105 static int	jme_mediachange(struct ifnet *);
106 #ifdef DEVICE_POLLING
107 static void	jme_poll(struct ifnet *, enum poll_cmd, int);
108 #endif
109 
110 static void	jme_intr(void *);
111 static void	jme_txeof(struct jme_softc *);
112 static void	jme_rxeof(struct jme_softc *, int);
113 static int	jme_rxeof_chain(struct jme_softc *, int,
114 				struct mbuf_chain *, int);
115 static void	jme_rx_intr(struct jme_softc *, uint32_t);
116 
117 static int	jme_dma_alloc(struct jme_softc *);
118 static void	jme_dma_free(struct jme_softc *);
119 static int	jme_init_rx_ring(struct jme_softc *, int);
120 static void	jme_init_tx_ring(struct jme_softc *);
121 static void	jme_init_ssb(struct jme_softc *);
122 static int	jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
123 static int	jme_encap(struct jme_softc *, struct mbuf **);
124 static void	jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
125 static int	jme_rxring_dma_alloc(struct jme_softc *, int);
126 static int	jme_rxbuf_dma_alloc(struct jme_softc *, int);
127 
128 static void	jme_tick(void *);
129 static void	jme_stop(struct jme_softc *);
130 static void	jme_reset(struct jme_softc *);
131 static void	jme_set_vlan(struct jme_softc *);
132 static void	jme_set_filter(struct jme_softc *);
133 static void	jme_stop_tx(struct jme_softc *);
134 static void	jme_stop_rx(struct jme_softc *);
135 static void	jme_mac_config(struct jme_softc *);
136 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
137 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
138 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
139 #ifdef notyet
140 static void	jme_setwol(struct jme_softc *);
141 static void	jme_setlinkspeed(struct jme_softc *);
142 #endif
143 static void	jme_set_tx_coal(struct jme_softc *);
144 static void	jme_set_rx_coal(struct jme_softc *);
145 static void	jme_enable_rss(struct jme_softc *);
146 static void	jme_disable_rss(struct jme_softc *);
147 
148 static void	jme_sysctl_node(struct jme_softc *);
149 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
150 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
151 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
152 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
153 
154 /*
155  * Devices supported by this driver.
156  */
157 static const struct jme_dev {
158 	uint16_t	jme_vendorid;
159 	uint16_t	jme_deviceid;
160 	uint32_t	jme_caps;
161 	const char	*jme_name;
162 } jme_devs[] = {
163 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
164 	    JME_CAP_JUMBO,
165 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
166 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
167 	    JME_CAP_FASTETH,
168 	    "JMicron Inc, JMC260 Fast Ethernet" },
169 	{ 0, 0, 0, NULL }
170 };
171 
172 static device_method_t jme_methods[] = {
173 	/* Device interface. */
174 	DEVMETHOD(device_probe,		jme_probe),
175 	DEVMETHOD(device_attach,	jme_attach),
176 	DEVMETHOD(device_detach,	jme_detach),
177 	DEVMETHOD(device_shutdown,	jme_shutdown),
178 	DEVMETHOD(device_suspend,	jme_suspend),
179 	DEVMETHOD(device_resume,	jme_resume),
180 
181 	/* Bus interface. */
182 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
183 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
184 
185 	/* MII interface. */
186 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
187 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
188 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
189 
190 	{ NULL, NULL }
191 };
192 
193 static driver_t jme_driver = {
194 	"jme",
195 	jme_methods,
196 	sizeof(struct jme_softc)
197 };
198 
199 static devclass_t jme_devclass;
200 
201 DECLARE_DUMMY_MODULE(if_jme);
202 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
203 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
204 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
205 
206 static const struct {
207 	uint32_t	jme_coal;
208 	uint32_t	jme_comp;
209 } jme_rx_status[JME_NRXRING_MAX] = {
210 	{ INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
211 	{ INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
212 	{ INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
213 	{ INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
214 };
215 
216 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
217 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
218 static int	jme_rx_ring_count = JME_NRXRING_DEF;
219 
220 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
221 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
222 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
223 
224 /*
225  *	Read a PHY register on the MII of the JMC250.
226  */
227 static int
228 jme_miibus_readreg(device_t dev, int phy, int reg)
229 {
230 	struct jme_softc *sc = device_get_softc(dev);
231 	uint32_t val;
232 	int i;
233 
234 	/* For FPGA version, PHY address 0 should be ignored. */
235 	if (sc->jme_caps & JME_CAP_FPGA) {
236 		if (phy == 0)
237 			return (0);
238 	} else {
239 		if (sc->jme_phyaddr != phy)
240 			return (0);
241 	}
242 
243 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
244 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
245 
246 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
247 		DELAY(1);
248 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
249 			break;
250 	}
251 	if (i == 0) {
252 		device_printf(sc->jme_dev, "phy read timeout: "
253 			      "phy %d, reg %d\n", phy, reg);
254 		return (0);
255 	}
256 
257 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
258 }
259 
260 /*
261  *	Write a PHY register on the MII of the JMC250.
262  */
263 static int
264 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
265 {
266 	struct jme_softc *sc = device_get_softc(dev);
267 	int i;
268 
269 	/* For FPGA version, PHY address 0 should be ignored. */
270 	if (sc->jme_caps & JME_CAP_FPGA) {
271 		if (phy == 0)
272 			return (0);
273 	} else {
274 		if (sc->jme_phyaddr != phy)
275 			return (0);
276 	}
277 
278 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
279 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
280 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
281 
282 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
283 		DELAY(1);
284 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
285 			break;
286 	}
287 	if (i == 0) {
288 		device_printf(sc->jme_dev, "phy write timeout: "
289 			      "phy %d, reg %d\n", phy, reg);
290 	}
291 
292 	return (0);
293 }
294 
295 /*
296  *	Callback from MII layer when media changes.
297  */
298 static void
299 jme_miibus_statchg(device_t dev)
300 {
301 	struct jme_softc *sc = device_get_softc(dev);
302 	struct ifnet *ifp = &sc->arpcom.ac_if;
303 	struct mii_data *mii;
304 	struct jme_txdesc *txd;
305 	bus_addr_t paddr;
306 	int i, r;
307 
308 	ASSERT_SERIALIZED(ifp->if_serializer);
309 
310 	if ((ifp->if_flags & IFF_RUNNING) == 0)
311 		return;
312 
313 	mii = device_get_softc(sc->jme_miibus);
314 
315 	sc->jme_flags &= ~JME_FLAG_LINK;
316 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
317 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
318 		case IFM_10_T:
319 		case IFM_100_TX:
320 			sc->jme_flags |= JME_FLAG_LINK;
321 			break;
322 		case IFM_1000_T:
323 			if (sc->jme_caps & JME_CAP_FASTETH)
324 				break;
325 			sc->jme_flags |= JME_FLAG_LINK;
326 			break;
327 		default:
328 			break;
329 		}
330 	}
331 
332 	/*
333 	 * Disabling Rx/Tx MACs have a side-effect of resetting
334 	 * JME_TXNDA/JME_RXNDA register to the first address of
335 	 * Tx/Rx descriptor address. So driver should reset its
336 	 * internal procucer/consumer pointer and reclaim any
337 	 * allocated resources.  Note, just saving the value of
338 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
339 	 * and restoring JME_TXNDA/JME_RXNDA register is not
340 	 * sufficient to make sure correct MAC state because
341 	 * stopping MAC operation can take a while and hardware
342 	 * might have updated JME_TXNDA/JME_RXNDA registers
343 	 * during the stop operation.
344 	 */
345 
346 	/* Disable interrupts */
347 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
348 
349 	/* Stop driver */
350 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
351 	ifp->if_timer = 0;
352 	callout_stop(&sc->jme_tick_ch);
353 
354 	/* Stop receiver/transmitter. */
355 	jme_stop_rx(sc);
356 	jme_stop_tx(sc);
357 
358 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
359 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
360 
361 		jme_rxeof(sc, r);
362 		if (rdata->jme_rxhead != NULL)
363 			m_freem(rdata->jme_rxhead);
364 		JME_RXCHAIN_RESET(sc, r);
365 
366 		/*
367 		 * Reuse configured Rx descriptors and reset
368 		 * procuder/consumer index.
369 		 */
370 		rdata->jme_rx_cons = 0;
371 	}
372 
373 	jme_txeof(sc);
374 	if (sc->jme_cdata.jme_tx_cnt != 0) {
375 		/* Remove queued packets for transmit. */
376 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
377 			txd = &sc->jme_cdata.jme_txdesc[i];
378 			if (txd->tx_m != NULL) {
379 				bus_dmamap_unload(
380 				    sc->jme_cdata.jme_tx_tag,
381 				    txd->tx_dmamap);
382 				m_freem(txd->tx_m);
383 				txd->tx_m = NULL;
384 				txd->tx_ndesc = 0;
385 				ifp->if_oerrors++;
386 			}
387 		}
388 	}
389 	jme_init_tx_ring(sc);
390 
391 	/* Initialize shadow status block. */
392 	jme_init_ssb(sc);
393 
394 	/* Program MAC with resolved speed/duplex/flow-control. */
395 	if (sc->jme_flags & JME_FLAG_LINK) {
396 		jme_mac_config(sc);
397 
398 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
399 
400 		/* Set Tx ring address to the hardware. */
401 		paddr = sc->jme_cdata.jme_tx_ring_paddr;
402 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
403 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
404 
405 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
406 			CSR_WRITE_4(sc, JME_RXCSR,
407 			    sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
408 
409 			/* Set Rx ring address to the hardware. */
410 			paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
411 			CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
412 			CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
413 		}
414 
415 		/* Restart receiver/transmitter. */
416 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
417 		    RXCSR_RXQ_START);
418 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
419 	}
420 
421 	ifp->if_flags |= IFF_RUNNING;
422 	ifp->if_flags &= ~IFF_OACTIVE;
423 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
424 
425 #ifdef DEVICE_POLLING
426 	if (!(ifp->if_flags & IFF_POLLING))
427 #endif
428 	/* Reenable interrupts. */
429 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
430 }
431 
432 /*
433  *	Get the current interface media status.
434  */
435 static void
436 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
437 {
438 	struct jme_softc *sc = ifp->if_softc;
439 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
440 
441 	ASSERT_SERIALIZED(ifp->if_serializer);
442 
443 	mii_pollstat(mii);
444 	ifmr->ifm_status = mii->mii_media_status;
445 	ifmr->ifm_active = mii->mii_media_active;
446 }
447 
448 /*
449  *	Set hardware to newly-selected media.
450  */
451 static int
452 jme_mediachange(struct ifnet *ifp)
453 {
454 	struct jme_softc *sc = ifp->if_softc;
455 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
456 	int error;
457 
458 	ASSERT_SERIALIZED(ifp->if_serializer);
459 
460 	if (mii->mii_instance != 0) {
461 		struct mii_softc *miisc;
462 
463 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
464 			mii_phy_reset(miisc);
465 	}
466 	error = mii_mediachg(mii);
467 
468 	return (error);
469 }
470 
471 static int
472 jme_probe(device_t dev)
473 {
474 	const struct jme_dev *sp;
475 	uint16_t vid, did;
476 
477 	vid = pci_get_vendor(dev);
478 	did = pci_get_device(dev);
479 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
480 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
481 			struct jme_softc *sc = device_get_softc(dev);
482 
483 			sc->jme_caps = sp->jme_caps;
484 			device_set_desc(dev, sp->jme_name);
485 			return (0);
486 		}
487 	}
488 	return (ENXIO);
489 }
490 
491 static int
492 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
493 {
494 	uint32_t reg;
495 	int i;
496 
497 	*val = 0;
498 	for (i = JME_TIMEOUT; i > 0; i--) {
499 		reg = CSR_READ_4(sc, JME_SMBCSR);
500 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
501 			break;
502 		DELAY(1);
503 	}
504 
505 	if (i == 0) {
506 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
507 		return (ETIMEDOUT);
508 	}
509 
510 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
511 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
512 	for (i = JME_TIMEOUT; i > 0; i--) {
513 		DELAY(1);
514 		reg = CSR_READ_4(sc, JME_SMBINTF);
515 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
516 			break;
517 	}
518 
519 	if (i == 0) {
520 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
521 		return (ETIMEDOUT);
522 	}
523 
524 	reg = CSR_READ_4(sc, JME_SMBINTF);
525 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
526 
527 	return (0);
528 }
529 
530 static int
531 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
532 {
533 	uint8_t fup, reg, val;
534 	uint32_t offset;
535 	int match;
536 
537 	offset = 0;
538 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
539 	    fup != JME_EEPROM_SIG0)
540 		return (ENOENT);
541 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
542 	    fup != JME_EEPROM_SIG1)
543 		return (ENOENT);
544 	match = 0;
545 	do {
546 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
547 			break;
548 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
549 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
550 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
551 				break;
552 			if (reg >= JME_PAR0 &&
553 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
554 				if (jme_eeprom_read_byte(sc, offset + 2,
555 				    &val) != 0)
556 					break;
557 				eaddr[reg - JME_PAR0] = val;
558 				match++;
559 			}
560 		}
561 		/* Check for the end of EEPROM descriptor. */
562 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
563 			break;
564 		/* Try next eeprom descriptor. */
565 		offset += JME_EEPROM_DESC_BYTES;
566 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
567 
568 	if (match == ETHER_ADDR_LEN)
569 		return (0);
570 
571 	return (ENOENT);
572 }
573 
574 static void
575 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
576 {
577 	uint32_t par0, par1;
578 
579 	/* Read station address. */
580 	par0 = CSR_READ_4(sc, JME_PAR0);
581 	par1 = CSR_READ_4(sc, JME_PAR1);
582 	par1 &= 0xFFFF;
583 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
584 		device_printf(sc->jme_dev,
585 		    "generating fake ethernet address.\n");
586 		par0 = karc4random();
587 		/* Set OUI to JMicron. */
588 		eaddr[0] = 0x00;
589 		eaddr[1] = 0x1B;
590 		eaddr[2] = 0x8C;
591 		eaddr[3] = (par0 >> 16) & 0xff;
592 		eaddr[4] = (par0 >> 8) & 0xff;
593 		eaddr[5] = par0 & 0xff;
594 	} else {
595 		eaddr[0] = (par0 >> 0) & 0xFF;
596 		eaddr[1] = (par0 >> 8) & 0xFF;
597 		eaddr[2] = (par0 >> 16) & 0xFF;
598 		eaddr[3] = (par0 >> 24) & 0xFF;
599 		eaddr[4] = (par1 >> 0) & 0xFF;
600 		eaddr[5] = (par1 >> 8) & 0xFF;
601 	}
602 }
603 
604 static int
605 jme_attach(device_t dev)
606 {
607 	struct jme_softc *sc = device_get_softc(dev);
608 	struct ifnet *ifp = &sc->arpcom.ac_if;
609 	uint32_t reg;
610 	uint16_t did;
611 	uint8_t pcie_ptr, rev;
612 	int error = 0;
613 	uint8_t eaddr[ETHER_ADDR_LEN];
614 
615 	sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
616 	if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
617 		sc->jme_rx_desc_cnt = JME_NDESC_MAX;
618 
619 	sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
620 	if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
621 		sc->jme_tx_desc_cnt = JME_NDESC_MAX;
622 
623 	/*
624 	 * Calculate rx rings based on ncpus2
625 	 */
626 	sc->jme_rx_ring_cnt = jme_rx_ring_count;
627 	if (sc->jme_rx_ring_cnt <= 0)
628 		sc->jme_rx_ring_cnt = JME_NRXRING_1;
629 	if (sc->jme_rx_ring_cnt > ncpus2)
630 		sc->jme_rx_ring_cnt = ncpus2;
631 
632 	if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
633 		sc->jme_rx_ring_cnt = JME_NRXRING_4;
634 	else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
635 		sc->jme_rx_ring_cnt = JME_NRXRING_2;
636 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
637 
638 	sc->jme_dev = dev;
639 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
640 
641 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
642 
643 	callout_init(&sc->jme_tick_ch);
644 
645 #ifndef BURN_BRIDGES
646 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
647 		uint32_t irq, mem;
648 
649 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
650 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
651 
652 		device_printf(dev, "chip is in D%d power mode "
653 		    "-- setting to D0\n", pci_get_powerstate(dev));
654 
655 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
656 
657 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
658 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
659 	}
660 #endif	/* !BURN_BRIDGE */
661 
662 	/* Enable bus mastering */
663 	pci_enable_busmaster(dev);
664 
665 	/*
666 	 * Allocate IO memory
667 	 *
668 	 * JMC250 supports both memory mapped and I/O register space
669 	 * access.  Because I/O register access should use different
670 	 * BARs to access registers it's waste of time to use I/O
671 	 * register spce access.  JMC250 uses 16K to map entire memory
672 	 * space.
673 	 */
674 	sc->jme_mem_rid = JME_PCIR_BAR;
675 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
676 						 &sc->jme_mem_rid, RF_ACTIVE);
677 	if (sc->jme_mem_res == NULL) {
678 		device_printf(dev, "can't allocate IO memory\n");
679 		return ENXIO;
680 	}
681 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
682 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
683 
684 	/*
685 	 * Allocate IRQ
686 	 */
687 	sc->jme_irq_rid = 0;
688 	sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
689 						 &sc->jme_irq_rid,
690 						 RF_SHAREABLE | RF_ACTIVE);
691 	if (sc->jme_irq_res == NULL) {
692 		device_printf(dev, "can't allocate irq\n");
693 		error = ENXIO;
694 		goto fail;
695 	}
696 
697 	/*
698 	 * Extract revisions
699 	 */
700 	reg = CSR_READ_4(sc, JME_CHIPMODE);
701 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
702 	    CHIPMODE_NOT_FPGA) {
703 		sc->jme_caps |= JME_CAP_FPGA;
704 		if (bootverbose) {
705 			device_printf(dev, "FPGA revision: 0x%04x\n",
706 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
707 				      CHIPMODE_FPGA_REV_SHIFT);
708 		}
709 	}
710 
711 	/* NOTE: FM revision is put in the upper 4 bits */
712 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
713 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
714 	if (bootverbose)
715 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
716 
717 	did = pci_get_device(dev);
718 	switch (did) {
719 	case PCI_PRODUCT_JMICRON_JMC250:
720 		if (rev == JME_REV1_A2)
721 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
722 		break;
723 
724 	case PCI_PRODUCT_JMICRON_JMC260:
725 		if (rev == JME_REV2)
726 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
727 		break;
728 
729 	default:
730 		panic("unknown device id 0x%04x\n", did);
731 	}
732 	if (rev >= JME_REV2) {
733 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
734 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
735 				      GHC_TXMAC_CLKSRC_1000;
736 	}
737 
738 	/* Reset the ethernet controller. */
739 	jme_reset(sc);
740 
741 	/* Get station address. */
742 	reg = CSR_READ_4(sc, JME_SMBCSR);
743 	if (reg & SMBCSR_EEPROM_PRESENT)
744 		error = jme_eeprom_macaddr(sc, eaddr);
745 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
746 		if (error != 0 && (bootverbose)) {
747 			device_printf(dev, "ethernet hardware address "
748 				      "not found in EEPROM.\n");
749 		}
750 		jme_reg_macaddr(sc, eaddr);
751 	}
752 
753 	/*
754 	 * Save PHY address.
755 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
756 	 * requires PHY probing to get correct PHY address.
757 	 */
758 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
759 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
760 		    GPREG0_PHY_ADDR_MASK;
761 		if (bootverbose) {
762 			device_printf(dev, "PHY is at address %d.\n",
763 			    sc->jme_phyaddr);
764 		}
765 	} else {
766 		sc->jme_phyaddr = 0;
767 	}
768 
769 	/* Set max allowable DMA size. */
770 	pcie_ptr = pci_get_pciecap_ptr(dev);
771 	if (pcie_ptr != 0) {
772 		uint16_t ctrl;
773 
774 		sc->jme_caps |= JME_CAP_PCIE;
775 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
776 		if (bootverbose) {
777 			device_printf(dev, "Read request size : %d bytes.\n",
778 			    128 << ((ctrl >> 12) & 0x07));
779 			device_printf(dev, "TLP payload size : %d bytes.\n",
780 			    128 << ((ctrl >> 5) & 0x07));
781 		}
782 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
783 		case PCIEM_DEVCTL_MAX_READRQ_128:
784 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
785 			break;
786 		case PCIEM_DEVCTL_MAX_READRQ_256:
787 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
788 			break;
789 		default:
790 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
791 			break;
792 		}
793 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
794 	} else {
795 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
796 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
797 	}
798 
799 #ifdef notyet
800 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
801 		sc->jme_caps |= JME_CAP_PMCAP;
802 #endif
803 
804 	/*
805 	 * Create sysctl tree
806 	 */
807 	jme_sysctl_node(sc);
808 
809 	/* Allocate DMA stuffs */
810 	error = jme_dma_alloc(sc);
811 	if (error)
812 		goto fail;
813 
814 	ifp->if_softc = sc;
815 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
816 	ifp->if_init = jme_init;
817 	ifp->if_ioctl = jme_ioctl;
818 	ifp->if_start = jme_start;
819 #ifdef DEVICE_POLLING
820 	ifp->if_poll = jme_poll;
821 #endif
822 	ifp->if_watchdog = jme_watchdog;
823 	ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
824 	ifq_set_ready(&ifp->if_snd);
825 
826 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
827 	ifp->if_capabilities = IFCAP_HWCSUM |
828 			       IFCAP_VLAN_MTU |
829 			       IFCAP_VLAN_HWTAGGING;
830 	if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN)
831 		ifp->if_capabilities |= IFCAP_RSS;
832 	ifp->if_capenable = ifp->if_capabilities;
833 
834 	/*
835 	 * Disable TXCSUM by default to improve bulk data
836 	 * transmit performance (+20Mbps improvement).
837 	 */
838 	ifp->if_capenable &= ~IFCAP_TXCSUM;
839 
840 	if (ifp->if_capenable & IFCAP_TXCSUM)
841 		ifp->if_hwassist = JME_CSUM_FEATURES;
842 
843 	/* Set up MII bus. */
844 	error = mii_phy_probe(dev, &sc->jme_miibus,
845 			      jme_mediachange, jme_mediastatus);
846 	if (error) {
847 		device_printf(dev, "no PHY found!\n");
848 		goto fail;
849 	}
850 
851 	/*
852 	 * Save PHYADDR for FPGA mode PHY.
853 	 */
854 	if (sc->jme_caps & JME_CAP_FPGA) {
855 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
856 
857 		if (mii->mii_instance != 0) {
858 			struct mii_softc *miisc;
859 
860 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
861 				if (miisc->mii_phy != 0) {
862 					sc->jme_phyaddr = miisc->mii_phy;
863 					break;
864 				}
865 			}
866 			if (sc->jme_phyaddr != 0) {
867 				device_printf(sc->jme_dev,
868 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
869 				/* vendor magic. */
870 				jme_miibus_writereg(dev, sc->jme_phyaddr,
871 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
872 
873 				/* XXX should we clear JME_WA_EXTFIFO */
874 			}
875 		}
876 	}
877 
878 	ether_ifattach(ifp, eaddr, NULL);
879 
880 	/* Tell the upper layer(s) we support long frames. */
881 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
882 
883 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
884 			       &sc->jme_irq_handle, ifp->if_serializer);
885 	if (error) {
886 		device_printf(dev, "could not set up interrupt handler.\n");
887 		ether_ifdetach(ifp);
888 		goto fail;
889 	}
890 
891 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
892 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
893 	return 0;
894 fail:
895 	jme_detach(dev);
896 	return (error);
897 }
898 
899 static int
900 jme_detach(device_t dev)
901 {
902 	struct jme_softc *sc = device_get_softc(dev);
903 
904 	if (device_is_attached(dev)) {
905 		struct ifnet *ifp = &sc->arpcom.ac_if;
906 
907 		lwkt_serialize_enter(ifp->if_serializer);
908 		jme_stop(sc);
909 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
910 		lwkt_serialize_exit(ifp->if_serializer);
911 
912 		ether_ifdetach(ifp);
913 	}
914 
915 	if (sc->jme_sysctl_tree != NULL)
916 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
917 
918 	if (sc->jme_miibus != NULL)
919 		device_delete_child(dev, sc->jme_miibus);
920 	bus_generic_detach(dev);
921 
922 	if (sc->jme_irq_res != NULL) {
923 		bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
924 				     sc->jme_irq_res);
925 	}
926 
927 	if (sc->jme_mem_res != NULL) {
928 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
929 				     sc->jme_mem_res);
930 	}
931 
932 	jme_dma_free(sc);
933 
934 	return (0);
935 }
936 
937 static void
938 jme_sysctl_node(struct jme_softc *sc)
939 {
940 	int coal_max;
941 #ifdef JME_RSS_DEBUG
942 	char rx_ring_pkt[32];
943 	int r;
944 #endif
945 
946 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
947 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
948 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
949 				device_get_nameunit(sc->jme_dev),
950 				CTLFLAG_RD, 0, "");
951 	if (sc->jme_sysctl_tree == NULL) {
952 		device_printf(sc->jme_dev, "can't add sysctl node\n");
953 		return;
954 	}
955 
956 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
957 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
958 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
959 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
960 
961 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
962 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
963 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
964 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
965 
966 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
967 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
968 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
969 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
970 
971 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
972 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
973 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
974 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
975 
976 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
977 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
978 		       "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
979 		       0, "RX desc count");
980 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
981 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
982 		       "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
983 		       0, "TX desc count");
984 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
985 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
986 		       "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
987 		       0, "RX ring count");
988 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
989 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
990 		       "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
991 		       0, "RX ring in use");
992 #ifdef JME_RSS_DEBUG
993 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
994 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
995 		       "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
996 		       0, "RSS debug level");
997 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
998 		ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
999 		SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
1000 				SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1001 				rx_ring_pkt, CTLFLAG_RW,
1002 				&sc->jme_rx_ring_pkt[r],
1003 				0, "RXed packets");
1004 	}
1005 #endif
1006 
1007 	/*
1008 	 * Set default coalesce valves
1009 	 */
1010 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1011 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1012 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1013 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1014 
1015 	/*
1016 	 * Adjust coalesce valves, in case that the number of TX/RX
1017 	 * descs are set to small values by users.
1018 	 *
1019 	 * NOTE: coal_max will not be zero, since number of descs
1020 	 * must aligned by JME_NDESC_ALIGN (16 currently)
1021 	 */
1022 	coal_max = sc->jme_tx_desc_cnt / 6;
1023 	if (coal_max < sc->jme_tx_coal_pkt)
1024 		sc->jme_tx_coal_pkt = coal_max;
1025 
1026 	coal_max = sc->jme_rx_desc_cnt / 4;
1027 	if (coal_max < sc->jme_rx_coal_pkt)
1028 		sc->jme_rx_coal_pkt = coal_max;
1029 }
1030 
1031 static int
1032 jme_dma_alloc(struct jme_softc *sc)
1033 {
1034 	struct jme_txdesc *txd;
1035 	bus_dmamem_t dmem;
1036 	int error, i;
1037 
1038 	sc->jme_cdata.jme_txdesc =
1039 	kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1040 		M_DEVBUF, M_WAITOK | M_ZERO);
1041 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1042 		sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1043 		kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1044 			M_DEVBUF, M_WAITOK | M_ZERO);
1045 	}
1046 
1047 	/* Create parent ring tag. */
1048 	error = bus_dma_tag_create(NULL,/* parent */
1049 	    1, JME_RING_BOUNDARY,	/* algnmnt, boundary */
1050 	    sc->jme_lowaddr,		/* lowaddr */
1051 	    BUS_SPACE_MAXADDR,		/* highaddr */
1052 	    NULL, NULL,			/* filter, filterarg */
1053 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1054 	    0,				/* nsegments */
1055 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1056 	    0,				/* flags */
1057 	    &sc->jme_cdata.jme_ring_tag);
1058 	if (error) {
1059 		device_printf(sc->jme_dev,
1060 		    "could not create parent ring DMA tag.\n");
1061 		return error;
1062 	}
1063 
1064 	/*
1065 	 * Create DMA stuffs for TX ring
1066 	 */
1067 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1068 			JME_TX_RING_ALIGN, 0,
1069 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1070 			JME_TX_RING_SIZE(sc),
1071 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1072 	if (error) {
1073 		device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1074 		return error;
1075 	}
1076 	sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1077 	sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1078 	sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1079 	sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1080 
1081 	/*
1082 	 * Create DMA stuffs for RX rings
1083 	 */
1084 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1085 		error = jme_rxring_dma_alloc(sc, i);
1086 		if (error)
1087 			return error;
1088 	}
1089 
1090 	/* Create parent buffer tag. */
1091 	error = bus_dma_tag_create(NULL,/* parent */
1092 	    1, 0,			/* algnmnt, boundary */
1093 	    sc->jme_lowaddr,		/* lowaddr */
1094 	    BUS_SPACE_MAXADDR,		/* highaddr */
1095 	    NULL, NULL,			/* filter, filterarg */
1096 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1097 	    0,				/* nsegments */
1098 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1099 	    0,				/* flags */
1100 	    &sc->jme_cdata.jme_buffer_tag);
1101 	if (error) {
1102 		device_printf(sc->jme_dev,
1103 		    "could not create parent buffer DMA tag.\n");
1104 		return error;
1105 	}
1106 
1107 	/*
1108 	 * Create DMA stuffs for shadow status block
1109 	 */
1110 	error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1111 			JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1112 			JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1113 	if (error) {
1114 		device_printf(sc->jme_dev,
1115 		    "could not create shadow status block.\n");
1116 		return error;
1117 	}
1118 	sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1119 	sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1120 	sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1121 	sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1122 
1123 	/*
1124 	 * Create DMA stuffs for TX buffers
1125 	 */
1126 
1127 	/* Create tag for Tx buffers. */
1128 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1129 	    1, 0,			/* algnmnt, boundary */
1130 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1131 	    BUS_SPACE_MAXADDR,		/* highaddr */
1132 	    NULL, NULL,			/* filter, filterarg */
1133 	    JME_JUMBO_FRAMELEN,		/* maxsize */
1134 	    JME_MAXTXSEGS,		/* nsegments */
1135 	    JME_MAXSEGSIZE,		/* maxsegsize */
1136 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1137 	    &sc->jme_cdata.jme_tx_tag);
1138 	if (error != 0) {
1139 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1140 		return error;
1141 	}
1142 
1143 	/* Create DMA maps for Tx buffers. */
1144 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1145 		txd = &sc->jme_cdata.jme_txdesc[i];
1146 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1147 				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1148 				&txd->tx_dmamap);
1149 		if (error) {
1150 			int j;
1151 
1152 			device_printf(sc->jme_dev,
1153 			    "could not create %dth Tx dmamap.\n", i);
1154 
1155 			for (j = 0; j < i; ++j) {
1156 				txd = &sc->jme_cdata.jme_txdesc[j];
1157 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1158 						   txd->tx_dmamap);
1159 			}
1160 			bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1161 			sc->jme_cdata.jme_tx_tag = NULL;
1162 			return error;
1163 		}
1164 	}
1165 
1166 	/*
1167 	 * Create DMA stuffs for RX buffers
1168 	 */
1169 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1170 		error = jme_rxbuf_dma_alloc(sc, i);
1171 		if (error)
1172 			return error;
1173 	}
1174 	return 0;
1175 }
1176 
1177 static void
1178 jme_dma_free(struct jme_softc *sc)
1179 {
1180 	struct jme_txdesc *txd;
1181 	struct jme_rxdesc *rxd;
1182 	struct jme_rxdata *rdata;
1183 	int i, r;
1184 
1185 	/* Tx ring */
1186 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1187 		bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1188 		    sc->jme_cdata.jme_tx_ring_map);
1189 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1190 		    sc->jme_cdata.jme_tx_ring,
1191 		    sc->jme_cdata.jme_tx_ring_map);
1192 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1193 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1194 	}
1195 
1196 	/* Rx ring */
1197 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1198 		rdata = &sc->jme_cdata.jme_rx_data[r];
1199 		if (rdata->jme_rx_ring_tag != NULL) {
1200 			bus_dmamap_unload(rdata->jme_rx_ring_tag,
1201 					  rdata->jme_rx_ring_map);
1202 			bus_dmamem_free(rdata->jme_rx_ring_tag,
1203 					rdata->jme_rx_ring,
1204 					rdata->jme_rx_ring_map);
1205 			bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1206 			rdata->jme_rx_ring_tag = NULL;
1207 		}
1208 	}
1209 
1210 	/* Tx buffers */
1211 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1212 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1213 			txd = &sc->jme_cdata.jme_txdesc[i];
1214 			bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1215 			    txd->tx_dmamap);
1216 		}
1217 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1218 		sc->jme_cdata.jme_tx_tag = NULL;
1219 	}
1220 
1221 	/* Rx buffers */
1222 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1223 		rdata = &sc->jme_cdata.jme_rx_data[r];
1224 		if (rdata->jme_rx_tag != NULL) {
1225 			for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1226 				rxd = &rdata->jme_rxdesc[i];
1227 				bus_dmamap_destroy(rdata->jme_rx_tag,
1228 						   rxd->rx_dmamap);
1229 			}
1230 			bus_dmamap_destroy(rdata->jme_rx_tag,
1231 					   rdata->jme_rx_sparemap);
1232 			bus_dma_tag_destroy(rdata->jme_rx_tag);
1233 			rdata->jme_rx_tag = NULL;
1234 		}
1235 	}
1236 
1237 	/* Shadow status block. */
1238 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1239 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1240 		    sc->jme_cdata.jme_ssb_map);
1241 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1242 		    sc->jme_cdata.jme_ssb_block,
1243 		    sc->jme_cdata.jme_ssb_map);
1244 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1245 		sc->jme_cdata.jme_ssb_tag = NULL;
1246 	}
1247 
1248 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1249 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1250 		sc->jme_cdata.jme_buffer_tag = NULL;
1251 	}
1252 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1253 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1254 		sc->jme_cdata.jme_ring_tag = NULL;
1255 	}
1256 
1257 	if (sc->jme_cdata.jme_txdesc != NULL) {
1258 		kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1259 		sc->jme_cdata.jme_txdesc = NULL;
1260 	}
1261 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1262 		rdata = &sc->jme_cdata.jme_rx_data[r];
1263 		if (rdata->jme_rxdesc != NULL) {
1264 			kfree(rdata->jme_rxdesc, M_DEVBUF);
1265 			rdata->jme_rxdesc = NULL;
1266 		}
1267 	}
1268 }
1269 
1270 /*
1271  *	Make sure the interface is stopped at reboot time.
1272  */
1273 static int
1274 jme_shutdown(device_t dev)
1275 {
1276 	return jme_suspend(dev);
1277 }
1278 
1279 #ifdef notyet
1280 /*
1281  * Unlike other ethernet controllers, JMC250 requires
1282  * explicit resetting link speed to 10/100Mbps as gigabit
1283  * link will cunsume more power than 375mA.
1284  * Note, we reset the link speed to 10/100Mbps with
1285  * auto-negotiation but we don't know whether that operation
1286  * would succeed or not as we have no control after powering
1287  * off. If the renegotiation fail WOL may not work. Running
1288  * at 1Gbps draws more power than 375mA at 3.3V which is
1289  * specified in PCI specification and that would result in
1290  * complete shutdowning power to ethernet controller.
1291  *
1292  * TODO
1293  *  Save current negotiated media speed/duplex/flow-control
1294  *  to softc and restore the same link again after resuming.
1295  *  PHY handling such as power down/resetting to 100Mbps
1296  *  may be better handled in suspend method in phy driver.
1297  */
1298 static void
1299 jme_setlinkspeed(struct jme_softc *sc)
1300 {
1301 	struct mii_data *mii;
1302 	int aneg, i;
1303 
1304 	JME_LOCK_ASSERT(sc);
1305 
1306 	mii = device_get_softc(sc->jme_miibus);
1307 	mii_pollstat(mii);
1308 	aneg = 0;
1309 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1310 		switch IFM_SUBTYPE(mii->mii_media_active) {
1311 		case IFM_10_T:
1312 		case IFM_100_TX:
1313 			return;
1314 		case IFM_1000_T:
1315 			aneg++;
1316 		default:
1317 			break;
1318 		}
1319 	}
1320 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1321 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1322 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1323 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1324 	    BMCR_AUTOEN | BMCR_STARTNEG);
1325 	DELAY(1000);
1326 	if (aneg != 0) {
1327 		/* Poll link state until jme(4) get a 10/100 link. */
1328 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1329 			mii_pollstat(mii);
1330 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1331 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1332 				case IFM_10_T:
1333 				case IFM_100_TX:
1334 					jme_mac_config(sc);
1335 					return;
1336 				default:
1337 					break;
1338 				}
1339 			}
1340 			JME_UNLOCK(sc);
1341 			pause("jmelnk", hz);
1342 			JME_LOCK(sc);
1343 		}
1344 		if (i == MII_ANEGTICKS_GIGE)
1345 			device_printf(sc->jme_dev, "establishing link failed, "
1346 			    "WOL may not work!");
1347 	}
1348 	/*
1349 	 * No link, force MAC to have 100Mbps, full-duplex link.
1350 	 * This is the last resort and may/may not work.
1351 	 */
1352 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1353 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1354 	jme_mac_config(sc);
1355 }
1356 
1357 static void
1358 jme_setwol(struct jme_softc *sc)
1359 {
1360 	struct ifnet *ifp = &sc->arpcom.ac_if;
1361 	uint32_t gpr, pmcs;
1362 	uint16_t pmstat;
1363 	int pmc;
1364 
1365 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1366 		/* No PME capability, PHY power down. */
1367 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1368 		    MII_BMCR, BMCR_PDOWN);
1369 		return;
1370 	}
1371 
1372 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1373 	pmcs = CSR_READ_4(sc, JME_PMCS);
1374 	pmcs &= ~PMCS_WOL_ENB_MASK;
1375 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1376 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1377 		/* Enable PME message. */
1378 		gpr |= GPREG0_PME_ENB;
1379 		/* For gigabit controllers, reset link speed to 10/100. */
1380 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1381 			jme_setlinkspeed(sc);
1382 	}
1383 
1384 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1385 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1386 
1387 	/* Request PME. */
1388 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1389 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1390 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1391 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1392 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1393 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1394 		/* No WOL, PHY power down. */
1395 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1396 		    MII_BMCR, BMCR_PDOWN);
1397 	}
1398 }
1399 #endif
1400 
1401 static int
1402 jme_suspend(device_t dev)
1403 {
1404 	struct jme_softc *sc = device_get_softc(dev);
1405 	struct ifnet *ifp = &sc->arpcom.ac_if;
1406 
1407 	lwkt_serialize_enter(ifp->if_serializer);
1408 	jme_stop(sc);
1409 #ifdef notyet
1410 	jme_setwol(sc);
1411 #endif
1412 	lwkt_serialize_exit(ifp->if_serializer);
1413 
1414 	return (0);
1415 }
1416 
1417 static int
1418 jme_resume(device_t dev)
1419 {
1420 	struct jme_softc *sc = device_get_softc(dev);
1421 	struct ifnet *ifp = &sc->arpcom.ac_if;
1422 #ifdef notyet
1423 	int pmc;
1424 #endif
1425 
1426 	lwkt_serialize_enter(ifp->if_serializer);
1427 
1428 #ifdef notyet
1429 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1430 		uint16_t pmstat;
1431 
1432 		pmstat = pci_read_config(sc->jme_dev,
1433 		    pmc + PCIR_POWER_STATUS, 2);
1434 		/* Disable PME clear PME status. */
1435 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1436 		pci_write_config(sc->jme_dev,
1437 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1438 	}
1439 #endif
1440 
1441 	if (ifp->if_flags & IFF_UP)
1442 		jme_init(sc);
1443 
1444 	lwkt_serialize_exit(ifp->if_serializer);
1445 
1446 	return (0);
1447 }
1448 
1449 static int
1450 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1451 {
1452 	struct jme_txdesc *txd;
1453 	struct jme_desc *desc;
1454 	struct mbuf *m;
1455 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1456 	int maxsegs, nsegs;
1457 	int error, i, prod, symbol_desc;
1458 	uint32_t cflags, flag64;
1459 
1460 	M_ASSERTPKTHDR((*m_head));
1461 
1462 	prod = sc->jme_cdata.jme_tx_prod;
1463 	txd = &sc->jme_cdata.jme_txdesc[prod];
1464 
1465 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1466 		symbol_desc = 1;
1467 	else
1468 		symbol_desc = 0;
1469 
1470 	maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1471 		  (JME_TXD_RSVD + symbol_desc);
1472 	if (maxsegs > JME_MAXTXSEGS)
1473 		maxsegs = JME_MAXTXSEGS;
1474 	KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1475 		("not enough segments %d\n", maxsegs));
1476 
1477 	error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1478 			txd->tx_dmamap, m_head,
1479 			txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1480 	if (error)
1481 		goto fail;
1482 
1483 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1484 			BUS_DMASYNC_PREWRITE);
1485 
1486 	m = *m_head;
1487 	cflags = 0;
1488 
1489 	/* Configure checksum offload. */
1490 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1491 		cflags |= JME_TD_IPCSUM;
1492 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1493 		cflags |= JME_TD_TCPCSUM;
1494 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1495 		cflags |= JME_TD_UDPCSUM;
1496 
1497 	/* Configure VLAN. */
1498 	if (m->m_flags & M_VLANTAG) {
1499 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1500 		cflags |= JME_TD_VLAN_TAG;
1501 	}
1502 
1503 	desc = &sc->jme_cdata.jme_tx_ring[prod];
1504 	desc->flags = htole32(cflags);
1505 	desc->addr_hi = htole32(m->m_pkthdr.len);
1506 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1507 		/*
1508 		 * Use 64bits TX desc chain format.
1509 		 *
1510 		 * The first TX desc of the chain, which is setup here,
1511 		 * is just a symbol TX desc carrying no payload.
1512 		 */
1513 		flag64 = JME_TD_64BIT;
1514 		desc->buflen = 0;
1515 		desc->addr_lo = 0;
1516 
1517 		/* No effective TX desc is consumed */
1518 		i = 0;
1519 	} else {
1520 		/*
1521 		 * Use 32bits TX desc chain format.
1522 		 *
1523 		 * The first TX desc of the chain, which is setup here,
1524 		 * is an effective TX desc carrying the first segment of
1525 		 * the mbuf chain.
1526 		 */
1527 		flag64 = 0;
1528 		desc->buflen = htole32(txsegs[0].ds_len);
1529 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1530 
1531 		/* One effective TX desc is consumed */
1532 		i = 1;
1533 	}
1534 	sc->jme_cdata.jme_tx_cnt++;
1535 	KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1536 		 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1537 	JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1538 
1539 	txd->tx_ndesc = 1 - i;
1540 	for (; i < nsegs; i++) {
1541 		desc = &sc->jme_cdata.jme_tx_ring[prod];
1542 		desc->flags = htole32(JME_TD_OWN | flag64);
1543 		desc->buflen = htole32(txsegs[i].ds_len);
1544 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1545 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1546 
1547 		sc->jme_cdata.jme_tx_cnt++;
1548 		KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1549 			 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1550 		JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1551 	}
1552 
1553 	/* Update producer index. */
1554 	sc->jme_cdata.jme_tx_prod = prod;
1555 	/*
1556 	 * Finally request interrupt and give the first descriptor
1557 	 * owenership to hardware.
1558 	 */
1559 	desc = txd->tx_desc;
1560 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1561 
1562 	txd->tx_m = m;
1563 	txd->tx_ndesc += nsegs;
1564 
1565 	return 0;
1566 fail:
1567 	m_freem(*m_head);
1568 	*m_head = NULL;
1569 	return error;
1570 }
1571 
1572 static void
1573 jme_start(struct ifnet *ifp)
1574 {
1575 	struct jme_softc *sc = ifp->if_softc;
1576 	struct mbuf *m_head;
1577 	int enq = 0;
1578 
1579 	ASSERT_SERIALIZED(ifp->if_serializer);
1580 
1581 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1582 		ifq_purge(&ifp->if_snd);
1583 		return;
1584 	}
1585 
1586 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1587 		return;
1588 
1589 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1590 		jme_txeof(sc);
1591 
1592 	while (!ifq_is_empty(&ifp->if_snd)) {
1593 		/*
1594 		 * Check number of available TX descs, always
1595 		 * leave JME_TXD_RSVD free TX descs.
1596 		 */
1597 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1598 		    sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1599 			ifp->if_flags |= IFF_OACTIVE;
1600 			break;
1601 		}
1602 
1603 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1604 		if (m_head == NULL)
1605 			break;
1606 
1607 		/*
1608 		 * Pack the data into the transmit ring. If we
1609 		 * don't have room, set the OACTIVE flag and wait
1610 		 * for the NIC to drain the ring.
1611 		 */
1612 		if (jme_encap(sc, &m_head)) {
1613 			KKASSERT(m_head == NULL);
1614 			ifp->if_oerrors++;
1615 			ifp->if_flags |= IFF_OACTIVE;
1616 			break;
1617 		}
1618 		enq++;
1619 
1620 		/*
1621 		 * If there's a BPF listener, bounce a copy of this frame
1622 		 * to him.
1623 		 */
1624 		ETHER_BPF_MTAP(ifp, m_head);
1625 	}
1626 
1627 	if (enq > 0) {
1628 		/*
1629 		 * Reading TXCSR takes very long time under heavy load
1630 		 * so cache TXCSR value and writes the ORed value with
1631 		 * the kick command to the TXCSR. This saves one register
1632 		 * access cycle.
1633 		 */
1634 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1635 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1636 		/* Set a timeout in case the chip goes out to lunch. */
1637 		ifp->if_timer = JME_TX_TIMEOUT;
1638 	}
1639 }
1640 
1641 static void
1642 jme_watchdog(struct ifnet *ifp)
1643 {
1644 	struct jme_softc *sc = ifp->if_softc;
1645 
1646 	ASSERT_SERIALIZED(ifp->if_serializer);
1647 
1648 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1649 		if_printf(ifp, "watchdog timeout (missed link)\n");
1650 		ifp->if_oerrors++;
1651 		jme_init(sc);
1652 		return;
1653 	}
1654 
1655 	jme_txeof(sc);
1656 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1657 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1658 			  "-- recovering\n");
1659 		if (!ifq_is_empty(&ifp->if_snd))
1660 			if_devstart(ifp);
1661 		return;
1662 	}
1663 
1664 	if_printf(ifp, "watchdog timeout\n");
1665 	ifp->if_oerrors++;
1666 	jme_init(sc);
1667 	if (!ifq_is_empty(&ifp->if_snd))
1668 		if_devstart(ifp);
1669 }
1670 
1671 static int
1672 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1673 {
1674 	struct jme_softc *sc = ifp->if_softc;
1675 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1676 	struct ifreq *ifr = (struct ifreq *)data;
1677 	int error = 0, mask;
1678 
1679 	ASSERT_SERIALIZED(ifp->if_serializer);
1680 
1681 	switch (cmd) {
1682 	case SIOCSIFMTU:
1683 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1684 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1685 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1686 			error = EINVAL;
1687 			break;
1688 		}
1689 
1690 		if (ifp->if_mtu != ifr->ifr_mtu) {
1691 			/*
1692 			 * No special configuration is required when interface
1693 			 * MTU is changed but availability of Tx checksum
1694 			 * offload should be chcked against new MTU size as
1695 			 * FIFO size is just 2K.
1696 			 */
1697 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1698 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1699 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1700 			}
1701 			ifp->if_mtu = ifr->ifr_mtu;
1702 			if (ifp->if_flags & IFF_RUNNING)
1703 				jme_init(sc);
1704 		}
1705 		break;
1706 
1707 	case SIOCSIFFLAGS:
1708 		if (ifp->if_flags & IFF_UP) {
1709 			if (ifp->if_flags & IFF_RUNNING) {
1710 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1711 				    (IFF_PROMISC | IFF_ALLMULTI))
1712 					jme_set_filter(sc);
1713 			} else {
1714 				jme_init(sc);
1715 			}
1716 		} else {
1717 			if (ifp->if_flags & IFF_RUNNING)
1718 				jme_stop(sc);
1719 		}
1720 		sc->jme_if_flags = ifp->if_flags;
1721 		break;
1722 
1723 	case SIOCADDMULTI:
1724 	case SIOCDELMULTI:
1725 		if (ifp->if_flags & IFF_RUNNING)
1726 			jme_set_filter(sc);
1727 		break;
1728 
1729 	case SIOCSIFMEDIA:
1730 	case SIOCGIFMEDIA:
1731 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1732 		break;
1733 
1734 	case SIOCSIFCAP:
1735 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1736 
1737 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1738 			ifp->if_capenable ^= IFCAP_TXCSUM;
1739 			if (IFCAP_TXCSUM & ifp->if_capenable)
1740 				ifp->if_hwassist |= JME_CSUM_FEATURES;
1741 			else
1742 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1743 		}
1744 		if (mask & IFCAP_RXCSUM) {
1745 			uint32_t reg;
1746 
1747 			ifp->if_capenable ^= IFCAP_RXCSUM;
1748 			reg = CSR_READ_4(sc, JME_RXMAC);
1749 			reg &= ~RXMAC_CSUM_ENB;
1750 			if (ifp->if_capenable & IFCAP_RXCSUM)
1751 				reg |= RXMAC_CSUM_ENB;
1752 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1753 		}
1754 
1755 		if (mask & IFCAP_VLAN_HWTAGGING) {
1756 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1757 			jme_set_vlan(sc);
1758 		}
1759 
1760 		if (mask & IFCAP_RSS) {
1761 			ifp->if_capenable ^= IFCAP_RSS;
1762 			if (ifp->if_flags & IFF_RUNNING)
1763 				jme_init(sc);
1764 		}
1765 		break;
1766 
1767 	default:
1768 		error = ether_ioctl(ifp, cmd, data);
1769 		break;
1770 	}
1771 	return (error);
1772 }
1773 
1774 static void
1775 jme_mac_config(struct jme_softc *sc)
1776 {
1777 	struct mii_data *mii;
1778 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1779 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1780 
1781 	mii = device_get_softc(sc->jme_miibus);
1782 
1783 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1784 	DELAY(10);
1785 	CSR_WRITE_4(sc, JME_GHC, 0);
1786 	ghc = 0;
1787 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1788 	rxmac &= ~RXMAC_FC_ENB;
1789 	txmac = CSR_READ_4(sc, JME_TXMAC);
1790 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1791 	txpause = CSR_READ_4(sc, JME_TXPFC);
1792 	txpause &= ~TXPFC_PAUSE_ENB;
1793 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1794 		ghc |= GHC_FULL_DUPLEX;
1795 		rxmac &= ~RXMAC_COLL_DET_ENB;
1796 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1797 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1798 		    TXMAC_FRAME_BURST);
1799 #ifdef notyet
1800 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1801 			txpause |= TXPFC_PAUSE_ENB;
1802 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1803 			rxmac |= RXMAC_FC_ENB;
1804 #endif
1805 		/* Disable retry transmit timer/retry limit. */
1806 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1807 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1808 	} else {
1809 		rxmac |= RXMAC_COLL_DET_ENB;
1810 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1811 		/* Enable retry transmit timer/retry limit. */
1812 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1813 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1814 	}
1815 
1816 	/*
1817 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1818 	 */
1819 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1820 	gp1 &= ~GPREG1_WA_HDX;
1821 
1822 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1823 		hdx = 1;
1824 
1825 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1826 	case IFM_10_T:
1827 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1828 		if (hdx)
1829 			gp1 |= GPREG1_WA_HDX;
1830 		break;
1831 
1832 	case IFM_100_TX:
1833 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1834 		if (hdx)
1835 			gp1 |= GPREG1_WA_HDX;
1836 
1837 		/*
1838 		 * Use extended FIFO depth to workaround CRC errors
1839 		 * emitted by chips before JMC250B
1840 		 */
1841 		phyconf = JMPHY_CONF_EXTFIFO;
1842 		break;
1843 
1844 	case IFM_1000_T:
1845 		if (sc->jme_caps & JME_CAP_FASTETH)
1846 			break;
1847 
1848 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1849 		if (hdx)
1850 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1851 		break;
1852 
1853 	default:
1854 		break;
1855 	}
1856 	CSR_WRITE_4(sc, JME_GHC, ghc);
1857 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1858 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1859 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1860 
1861 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
1862 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1863 				    JMPHY_CONF, phyconf);
1864 	}
1865 	if (sc->jme_workaround & JME_WA_HDX)
1866 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
1867 }
1868 
1869 static void
1870 jme_intr(void *xsc)
1871 {
1872 	struct jme_softc *sc = xsc;
1873 	struct ifnet *ifp = &sc->arpcom.ac_if;
1874 	uint32_t status;
1875 	int r;
1876 
1877 	ASSERT_SERIALIZED(ifp->if_serializer);
1878 
1879 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1880 	if (status == 0 || status == 0xFFFFFFFF)
1881 		return;
1882 
1883 	/* Disable interrupts. */
1884 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1885 
1886 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1887 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1888 		goto back;
1889 
1890 	/* Reset PCC counter/timer and Ack interrupts. */
1891 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1892 
1893 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1894 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1895 
1896 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
1897 		if (status & jme_rx_status[r].jme_coal) {
1898 			status |= jme_rx_status[r].jme_coal |
1899 				  jme_rx_status[r].jme_comp;
1900 		}
1901 	}
1902 
1903 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1904 
1905 	if (ifp->if_flags & IFF_RUNNING) {
1906 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1907 			jme_rx_intr(sc, status);
1908 
1909 		if (status & INTR_RXQ_DESC_EMPTY) {
1910 			/*
1911 			 * Notify hardware availability of new Rx buffers.
1912 			 * Reading RXCSR takes very long time under heavy
1913 			 * load so cache RXCSR value and writes the ORed
1914 			 * value with the kick command to the RXCSR. This
1915 			 * saves one register access cycle.
1916 			 */
1917 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1918 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1919 		}
1920 
1921 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1922 			jme_txeof(sc);
1923 			if (!ifq_is_empty(&ifp->if_snd))
1924 				if_devstart(ifp);
1925 		}
1926 	}
1927 back:
1928 	/* Reenable interrupts. */
1929 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1930 }
1931 
1932 static void
1933 jme_txeof(struct jme_softc *sc)
1934 {
1935 	struct ifnet *ifp = &sc->arpcom.ac_if;
1936 	struct jme_txdesc *txd;
1937 	uint32_t status;
1938 	int cons, nsegs;
1939 
1940 	cons = sc->jme_cdata.jme_tx_cons;
1941 	if (cons == sc->jme_cdata.jme_tx_prod)
1942 		return;
1943 
1944 	/*
1945 	 * Go through our Tx list and free mbufs for those
1946 	 * frames which have been transmitted.
1947 	 */
1948 	while (cons != sc->jme_cdata.jme_tx_prod) {
1949 		txd = &sc->jme_cdata.jme_txdesc[cons];
1950 		KASSERT(txd->tx_m != NULL,
1951 			("%s: freeing NULL mbuf!\n", __func__));
1952 
1953 		status = le32toh(txd->tx_desc->flags);
1954 		if ((status & JME_TD_OWN) == JME_TD_OWN)
1955 			break;
1956 
1957 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1958 			ifp->if_oerrors++;
1959 		} else {
1960 			ifp->if_opackets++;
1961 			if (status & JME_TD_COLLISION) {
1962 				ifp->if_collisions +=
1963 				    le32toh(txd->tx_desc->buflen) &
1964 				    JME_TD_BUF_LEN_MASK;
1965 			}
1966 		}
1967 
1968 		/*
1969 		 * Only the first descriptor of multi-descriptor
1970 		 * transmission is updated so driver have to skip entire
1971 		 * chained buffers for the transmiited frame. In other
1972 		 * words, JME_TD_OWN bit is valid only at the first
1973 		 * descriptor of a multi-descriptor transmission.
1974 		 */
1975 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1976 			sc->jme_cdata.jme_tx_ring[cons].flags = 0;
1977 			JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
1978 		}
1979 
1980 		/* Reclaim transferred mbufs. */
1981 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1982 		m_freem(txd->tx_m);
1983 		txd->tx_m = NULL;
1984 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1985 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
1986 			("%s: Active Tx desc counter was garbled\n", __func__));
1987 		txd->tx_ndesc = 0;
1988 	}
1989 	sc->jme_cdata.jme_tx_cons = cons;
1990 
1991 	if (sc->jme_cdata.jme_tx_cnt == 0)
1992 		ifp->if_timer = 0;
1993 
1994 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
1995 	    sc->jme_tx_desc_cnt - JME_TXD_RSVD)
1996 		ifp->if_flags &= ~IFF_OACTIVE;
1997 }
1998 
1999 static __inline void
2000 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
2001 {
2002 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2003 	int i;
2004 
2005 	for (i = 0; i < count; ++i) {
2006 		struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2007 
2008 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2009 		desc->buflen = htole32(MCLBYTES);
2010 		JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2011 	}
2012 }
2013 
2014 static __inline struct pktinfo *
2015 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2016 {
2017 	if (flags & JME_RD_IPV4)
2018 		pi->pi_netisr = NETISR_IP;
2019 	else if (flags & JME_RD_IPV6)
2020 		pi->pi_netisr = NETISR_IPV6;
2021 	else
2022 		return NULL;
2023 
2024 	pi->pi_flags = 0;
2025 	pi->pi_l3proto = IPPROTO_UNKNOWN;
2026 
2027 	if (flags & JME_RD_MORE_FRAG)
2028 		pi->pi_flags |= PKTINFO_FLAG_FRAG;
2029 	else if (flags & JME_RD_TCP)
2030 		pi->pi_l3proto = IPPROTO_TCP;
2031 	else if (flags & JME_RD_UDP)
2032 		pi->pi_l3proto = IPPROTO_UDP;
2033 	else
2034 		pi = NULL;
2035 	return pi;
2036 }
2037 
2038 /* Receive a frame. */
2039 static void
2040 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2041 {
2042 	struct ifnet *ifp = &sc->arpcom.ac_if;
2043 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2044 	struct jme_desc *desc;
2045 	struct jme_rxdesc *rxd;
2046 	struct mbuf *mp, *m;
2047 	uint32_t flags, status, hash, hashinfo;
2048 	int cons, count, nsegs;
2049 
2050 	cons = rdata->jme_rx_cons;
2051 	desc = &rdata->jme_rx_ring[cons];
2052 	flags = le32toh(desc->flags);
2053 	status = le32toh(desc->buflen);
2054 	hash = le32toh(desc->addr_hi);
2055 	hashinfo = le32toh(desc->addr_lo);
2056 	nsegs = JME_RX_NSEGS(status);
2057 
2058 	JME_RSS_DPRINTF(sc, 15, "ring%d, flags 0x%08x, "
2059 			"hash 0x%08x, hash info 0x%08x\n",
2060 			ring, flags, hash, hashinfo);
2061 
2062 	if (status & JME_RX_ERR_STAT) {
2063 		ifp->if_ierrors++;
2064 		jme_discard_rxbufs(sc, ring, cons, nsegs);
2065 #ifdef JME_SHOW_ERRORS
2066 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2067 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2068 #endif
2069 		rdata->jme_rx_cons += nsegs;
2070 		rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2071 		return;
2072 	}
2073 
2074 	rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2075 	for (count = 0; count < nsegs; count++,
2076 	     JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2077 		rxd = &rdata->jme_rxdesc[cons];
2078 		mp = rxd->rx_m;
2079 
2080 		/* Add a new receive buffer to the ring. */
2081 		if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2082 			ifp->if_iqdrops++;
2083 			/* Reuse buffer. */
2084 			jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2085 			if (rdata->jme_rxhead != NULL) {
2086 				m_freem(rdata->jme_rxhead);
2087 				JME_RXCHAIN_RESET(sc, ring);
2088 			}
2089 			break;
2090 		}
2091 
2092 		/*
2093 		 * Assume we've received a full sized frame.
2094 		 * Actual size is fixed when we encounter the end of
2095 		 * multi-segmented frame.
2096 		 */
2097 		mp->m_len = MCLBYTES;
2098 
2099 		/* Chain received mbufs. */
2100 		if (rdata->jme_rxhead == NULL) {
2101 			rdata->jme_rxhead = mp;
2102 			rdata->jme_rxtail = mp;
2103 		} else {
2104 			/*
2105 			 * Receive processor can receive a maximum frame
2106 			 * size of 65535 bytes.
2107 			 */
2108 			rdata->jme_rxtail->m_next = mp;
2109 			rdata->jme_rxtail = mp;
2110 		}
2111 
2112 		if (count == nsegs - 1) {
2113 			struct pktinfo pi0, *pi;
2114 
2115 			/* Last desc. for this frame. */
2116 			m = rdata->jme_rxhead;
2117 			m->m_pkthdr.len = rdata->jme_rxlen;
2118 			if (nsegs > 1) {
2119 				/* Set first mbuf size. */
2120 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2121 				/* Set last mbuf size. */
2122 				mp->m_len = rdata->jme_rxlen -
2123 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2124 				    (MCLBYTES * (nsegs - 2)));
2125 			} else {
2126 				m->m_len = rdata->jme_rxlen;
2127 			}
2128 			m->m_pkthdr.rcvif = ifp;
2129 
2130 			/*
2131 			 * Account for 10bytes auto padding which is used
2132 			 * to align IP header on 32bit boundary. Also note,
2133 			 * CRC bytes is automatically removed by the
2134 			 * hardware.
2135 			 */
2136 			m->m_data += JME_RX_PAD_BYTES;
2137 
2138 			/* Set checksum information. */
2139 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2140 			    (flags & JME_RD_IPV4)) {
2141 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2142 				if (flags & JME_RD_IPCSUM)
2143 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2144 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2145 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2146 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2147 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2148 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2149 					m->m_pkthdr.csum_flags |=
2150 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2151 					m->m_pkthdr.csum_data = 0xffff;
2152 				}
2153 			}
2154 
2155 			/* Check for VLAN tagged packets. */
2156 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2157 			    (flags & JME_RD_VLAN_TAG)) {
2158 				m->m_pkthdr.ether_vlantag =
2159 				    flags & JME_RD_VLAN_MASK;
2160 				m->m_flags |= M_VLANTAG;
2161 			}
2162 
2163 			ifp->if_ipackets++;
2164 
2165 			if (ifp->if_capenable & IFCAP_RSS)
2166 				pi = jme_pktinfo(&pi0, flags);
2167 			else
2168 				pi = NULL;
2169 
2170 			if (pi != NULL &&
2171 			    (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2172 				m->m_flags |= M_HASH;
2173 				m->m_pkthdr.hash = toeplitz_hash(hash);
2174 			}
2175 
2176 #ifdef JME_RSS_DEBUG
2177 			if (pi != NULL) {
2178 				JME_RSS_DPRINTF(sc, 10,
2179 				    "isr %d flags %08x, l3 %d %s\n",
2180 				    pi->pi_netisr, pi->pi_flags,
2181 				    pi->pi_l3proto,
2182 				    (m->m_flags & M_HASH) ? "hash" : "");
2183 			}
2184 #endif
2185 
2186 			/* Pass it on. */
2187 			ether_input_chain(ifp, m, pi, chain);
2188 
2189 			/* Reset mbuf chains. */
2190 			JME_RXCHAIN_RESET(sc, ring);
2191 #ifdef JME_RSS_DEBUG
2192 			sc->jme_rx_ring_pkt[ring]++;
2193 #endif
2194 		}
2195 	}
2196 
2197 	rdata->jme_rx_cons += nsegs;
2198 	rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2199 }
2200 
2201 static int
2202 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2203 		int count)
2204 {
2205 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2206 	struct jme_desc *desc;
2207 	int nsegs, prog, pktlen;
2208 
2209 	prog = 0;
2210 	for (;;) {
2211 #ifdef DEVICE_POLLING
2212 		if (count >= 0 && count-- == 0)
2213 			break;
2214 #endif
2215 		desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2216 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2217 			break;
2218 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2219 			break;
2220 
2221 		/*
2222 		 * Check number of segments against received bytes.
2223 		 * Non-matching value would indicate that hardware
2224 		 * is still trying to update Rx descriptors. I'm not
2225 		 * sure whether this check is needed.
2226 		 */
2227 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2228 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2229 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2230 			if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2231 				  "and packet size(%d) mismach\n",
2232 				  nsegs, pktlen);
2233 			break;
2234 		}
2235 
2236 		/* Received a frame. */
2237 		jme_rxpkt(sc, ring, chain);
2238 		prog++;
2239 	}
2240 	return prog;
2241 }
2242 
2243 static void
2244 jme_rxeof(struct jme_softc *sc, int ring)
2245 {
2246 	struct mbuf_chain chain[MAXCPU];
2247 
2248 	ether_input_chain_init(chain);
2249 	if (jme_rxeof_chain(sc, ring, chain, -1))
2250 		ether_input_dispatch(chain);
2251 }
2252 
2253 static void
2254 jme_tick(void *xsc)
2255 {
2256 	struct jme_softc *sc = xsc;
2257 	struct ifnet *ifp = &sc->arpcom.ac_if;
2258 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2259 
2260 	lwkt_serialize_enter(ifp->if_serializer);
2261 
2262 	mii_tick(mii);
2263 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2264 
2265 	lwkt_serialize_exit(ifp->if_serializer);
2266 }
2267 
2268 static void
2269 jme_reset(struct jme_softc *sc)
2270 {
2271 	uint32_t val;
2272 
2273 	/* Make sure that TX and RX are stopped */
2274 	jme_stop_tx(sc);
2275 	jme_stop_rx(sc);
2276 
2277 	/* Start reset */
2278 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2279 	DELAY(20);
2280 
2281 	/*
2282 	 * Hold reset bit before stop reset
2283 	 */
2284 
2285 	/* Disable TXMAC and TXOFL clock sources */
2286 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2287 	/* Disable RXMAC clock source */
2288 	val = CSR_READ_4(sc, JME_GPREG1);
2289 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2290 	/* Flush */
2291 	CSR_READ_4(sc, JME_GHC);
2292 
2293 	/* Stop reset */
2294 	CSR_WRITE_4(sc, JME_GHC, 0);
2295 	/* Flush */
2296 	CSR_READ_4(sc, JME_GHC);
2297 
2298 	/*
2299 	 * Clear reset bit after stop reset
2300 	 */
2301 
2302 	/* Enable TXMAC and TXOFL clock sources */
2303 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2304 	/* Enable RXMAC clock source */
2305 	val = CSR_READ_4(sc, JME_GPREG1);
2306 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2307 	/* Flush */
2308 	CSR_READ_4(sc, JME_GHC);
2309 
2310 	/* Disable TXMAC and TXOFL clock sources */
2311 	CSR_WRITE_4(sc, JME_GHC, 0);
2312 	/* Disable RXMAC clock source */
2313 	val = CSR_READ_4(sc, JME_GPREG1);
2314 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2315 	/* Flush */
2316 	CSR_READ_4(sc, JME_GHC);
2317 
2318 	/* Enable TX and RX */
2319 	val = CSR_READ_4(sc, JME_TXCSR);
2320 	CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2321 	val = CSR_READ_4(sc, JME_RXCSR);
2322 	CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2323 	/* Flush */
2324 	CSR_READ_4(sc, JME_TXCSR);
2325 	CSR_READ_4(sc, JME_RXCSR);
2326 
2327 	/* Enable TXMAC and TXOFL clock sources */
2328 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2329 	/* Eisable RXMAC clock source */
2330 	val = CSR_READ_4(sc, JME_GPREG1);
2331 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2332 	/* Flush */
2333 	CSR_READ_4(sc, JME_GHC);
2334 
2335 	/* Stop TX and RX */
2336 	jme_stop_tx(sc);
2337 	jme_stop_rx(sc);
2338 }
2339 
2340 static void
2341 jme_init(void *xsc)
2342 {
2343 	struct jme_softc *sc = xsc;
2344 	struct ifnet *ifp = &sc->arpcom.ac_if;
2345 	struct mii_data *mii;
2346 	uint8_t eaddr[ETHER_ADDR_LEN];
2347 	bus_addr_t paddr;
2348 	uint32_t reg;
2349 	int error, r;
2350 
2351 	ASSERT_SERIALIZED(ifp->if_serializer);
2352 
2353 	/*
2354 	 * Cancel any pending I/O.
2355 	 */
2356 	jme_stop(sc);
2357 
2358 	/*
2359 	 * Reset the chip to a known state.
2360 	 */
2361 	jme_reset(sc);
2362 
2363 	sc->jme_txd_spare =
2364 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2365 	KKASSERT(sc->jme_txd_spare >= 1);
2366 
2367 	/*
2368 	 * If we use 64bit address mode for transmitting, each Tx request
2369 	 * needs one more symbol descriptor.
2370 	 */
2371 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2372 		sc->jme_txd_spare += 1;
2373 
2374 	if (ifp->if_capenable & IFCAP_RSS)
2375 		jme_enable_rss(sc);
2376 	else
2377 		jme_disable_rss(sc);
2378 
2379 	/* Init RX descriptors */
2380 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2381 		error = jme_init_rx_ring(sc, r);
2382 		if (error) {
2383 			if_printf(ifp, "initialization failed: "
2384 				  "no memory for %dth RX ring.\n", r);
2385 			jme_stop(sc);
2386 			return;
2387 		}
2388 	}
2389 
2390 	/* Init TX descriptors */
2391 	jme_init_tx_ring(sc);
2392 
2393 	/* Initialize shadow status block. */
2394 	jme_init_ssb(sc);
2395 
2396 	/* Reprogram the station address. */
2397 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2398 	CSR_WRITE_4(sc, JME_PAR0,
2399 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2400 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2401 
2402 	/*
2403 	 * Configure Tx queue.
2404 	 *  Tx priority queue weight value : 0
2405 	 *  Tx FIFO threshold for processing next packet : 16QW
2406 	 *  Maximum Tx DMA length : 512
2407 	 *  Allow Tx DMA burst.
2408 	 */
2409 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2410 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2411 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2412 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2413 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2414 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2415 
2416 	/* Set Tx descriptor counter. */
2417 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2418 
2419 	/* Set Tx ring address to the hardware. */
2420 	paddr = sc->jme_cdata.jme_tx_ring_paddr;
2421 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2422 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2423 
2424 	/* Configure TxMAC parameters. */
2425 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2426 	reg |= TXMAC_THRESH_1_PKT;
2427 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2428 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2429 
2430 	/*
2431 	 * Configure Rx queue.
2432 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2433 	 *  FIFO threshold for processing next packet : 128QW
2434 	 *  Rx queue 0 select
2435 	 *  Max Rx DMA length : 128
2436 	 *  Rx descriptor retry : 32
2437 	 *  Rx descriptor retry time gap : 256ns
2438 	 *  Don't receive runt/bad frame.
2439 	 */
2440 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2441 #if 0
2442 	/*
2443 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2444 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2445 	 * decrease FIFO threshold to reduce the FIFO overruns for
2446 	 * frames larger than 4000 bytes.
2447 	 * For best performance of standard MTU sized frames use
2448 	 * maximum allowable FIFO threshold, 128QW.
2449 	 */
2450 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2451 	    JME_RX_FIFO_SIZE)
2452 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2453 	else
2454 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2455 #else
2456 	/* Improve PCI Express compatibility */
2457 	sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2458 #endif
2459 	sc->jme_rxcsr |= sc->jme_rx_dma_size;
2460 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2461 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2462 	/* XXX TODO DROP_BAD */
2463 
2464 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2465 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2466 
2467 		/* Set Rx descriptor counter. */
2468 		CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2469 
2470 		/* Set Rx ring address to the hardware. */
2471 		paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2472 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2473 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2474 	}
2475 
2476 	/* Clear receive filter. */
2477 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2478 
2479 	/* Set up the receive filter. */
2480 	jme_set_filter(sc);
2481 	jme_set_vlan(sc);
2482 
2483 	/*
2484 	 * Disable all WOL bits as WOL can interfere normal Rx
2485 	 * operation. Also clear WOL detection status bits.
2486 	 */
2487 	reg = CSR_READ_4(sc, JME_PMCS);
2488 	reg &= ~PMCS_WOL_ENB_MASK;
2489 	CSR_WRITE_4(sc, JME_PMCS, reg);
2490 
2491 	/*
2492 	 * Pad 10bytes right before received frame. This will greatly
2493 	 * help Rx performance on strict-alignment architectures as
2494 	 * it does not need to copy the frame to align the payload.
2495 	 */
2496 	reg = CSR_READ_4(sc, JME_RXMAC);
2497 	reg |= RXMAC_PAD_10BYTES;
2498 
2499 	if (ifp->if_capenable & IFCAP_RXCSUM)
2500 		reg |= RXMAC_CSUM_ENB;
2501 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2502 
2503 	/* Configure general purpose reg0 */
2504 	reg = CSR_READ_4(sc, JME_GPREG0);
2505 	reg &= ~GPREG0_PCC_UNIT_MASK;
2506 	/* Set PCC timer resolution to micro-seconds unit. */
2507 	reg |= GPREG0_PCC_UNIT_US;
2508 	/*
2509 	 * Disable all shadow register posting as we have to read
2510 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2511 	 * that it's hard to synchronize interrupt status between
2512 	 * hardware and software with shadow posting due to
2513 	 * requirements of bus_dmamap_sync(9).
2514 	 */
2515 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2516 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2517 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2518 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2519 	/* Disable posting of DW0. */
2520 	reg &= ~GPREG0_POST_DW0_ENB;
2521 	/* Clear PME message. */
2522 	reg &= ~GPREG0_PME_ENB;
2523 	/* Set PHY address. */
2524 	reg &= ~GPREG0_PHY_ADDR_MASK;
2525 	reg |= sc->jme_phyaddr;
2526 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2527 
2528 	/* Configure Tx queue 0 packet completion coalescing. */
2529 	jme_set_tx_coal(sc);
2530 
2531 	/* Configure Rx queue 0 packet completion coalescing. */
2532 	jme_set_rx_coal(sc);
2533 
2534 	/* Configure shadow status block but don't enable posting. */
2535 	paddr = sc->jme_cdata.jme_ssb_block_paddr;
2536 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2537 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2538 
2539 	/* Disable Timer 1 and Timer 2. */
2540 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2541 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2542 
2543 	/* Configure retry transmit period, retry limit value. */
2544 	CSR_WRITE_4(sc, JME_TXTRHD,
2545 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2546 	    TXTRHD_RT_PERIOD_MASK) |
2547 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2548 	    TXTRHD_RT_LIMIT_SHIFT));
2549 
2550 #ifdef DEVICE_POLLING
2551 	if (!(ifp->if_flags & IFF_POLLING))
2552 #endif
2553 	/* Initialize the interrupt mask. */
2554 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2555 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2556 
2557 	/*
2558 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2559 	 * done after detection of valid link in jme_miibus_statchg.
2560 	 */
2561 	sc->jme_flags &= ~JME_FLAG_LINK;
2562 
2563 	/* Set the current media. */
2564 	mii = device_get_softc(sc->jme_miibus);
2565 	mii_mediachg(mii);
2566 
2567 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2568 
2569 	ifp->if_flags |= IFF_RUNNING;
2570 	ifp->if_flags &= ~IFF_OACTIVE;
2571 }
2572 
2573 static void
2574 jme_stop(struct jme_softc *sc)
2575 {
2576 	struct ifnet *ifp = &sc->arpcom.ac_if;
2577 	struct jme_txdesc *txd;
2578 	struct jme_rxdesc *rxd;
2579 	struct jme_rxdata *rdata;
2580 	int i, r;
2581 
2582 	ASSERT_SERIALIZED(ifp->if_serializer);
2583 
2584 	/*
2585 	 * Mark the interface down and cancel the watchdog timer.
2586 	 */
2587 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2588 	ifp->if_timer = 0;
2589 
2590 	callout_stop(&sc->jme_tick_ch);
2591 	sc->jme_flags &= ~JME_FLAG_LINK;
2592 
2593 	/*
2594 	 * Disable interrupts.
2595 	 */
2596 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2597 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2598 
2599 	/* Disable updating shadow status block. */
2600 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2601 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2602 
2603 	/* Stop receiver, transmitter. */
2604 	jme_stop_rx(sc);
2605 	jme_stop_tx(sc);
2606 
2607 	/*
2608 	 * Free partial finished RX segments
2609 	 */
2610 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2611 		rdata = &sc->jme_cdata.jme_rx_data[r];
2612 		if (rdata->jme_rxhead != NULL)
2613 			m_freem(rdata->jme_rxhead);
2614 		JME_RXCHAIN_RESET(sc, r);
2615 	}
2616 
2617 	/*
2618 	 * Free RX and TX mbufs still in the queues.
2619 	 */
2620 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2621 		rdata = &sc->jme_cdata.jme_rx_data[r];
2622 		for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2623 			rxd = &rdata->jme_rxdesc[i];
2624 			if (rxd->rx_m != NULL) {
2625 				bus_dmamap_unload(rdata->jme_rx_tag,
2626 						  rxd->rx_dmamap);
2627 				m_freem(rxd->rx_m);
2628 				rxd->rx_m = NULL;
2629 			}
2630 		}
2631 	}
2632 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2633 		txd = &sc->jme_cdata.jme_txdesc[i];
2634 		if (txd->tx_m != NULL) {
2635 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2636 			    txd->tx_dmamap);
2637 			m_freem(txd->tx_m);
2638 			txd->tx_m = NULL;
2639 			txd->tx_ndesc = 0;
2640 		}
2641         }
2642 }
2643 
2644 static void
2645 jme_stop_tx(struct jme_softc *sc)
2646 {
2647 	uint32_t reg;
2648 	int i;
2649 
2650 	reg = CSR_READ_4(sc, JME_TXCSR);
2651 	if ((reg & TXCSR_TX_ENB) == 0)
2652 		return;
2653 	reg &= ~TXCSR_TX_ENB;
2654 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2655 	for (i = JME_TIMEOUT; i > 0; i--) {
2656 		DELAY(1);
2657 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2658 			break;
2659 	}
2660 	if (i == 0)
2661 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2662 }
2663 
2664 static void
2665 jme_stop_rx(struct jme_softc *sc)
2666 {
2667 	uint32_t reg;
2668 	int i;
2669 
2670 	reg = CSR_READ_4(sc, JME_RXCSR);
2671 	if ((reg & RXCSR_RX_ENB) == 0)
2672 		return;
2673 	reg &= ~RXCSR_RX_ENB;
2674 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2675 	for (i = JME_TIMEOUT; i > 0; i--) {
2676 		DELAY(1);
2677 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2678 			break;
2679 	}
2680 	if (i == 0)
2681 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2682 }
2683 
2684 static void
2685 jme_init_tx_ring(struct jme_softc *sc)
2686 {
2687 	struct jme_chain_data *cd;
2688 	struct jme_txdesc *txd;
2689 	int i;
2690 
2691 	sc->jme_cdata.jme_tx_prod = 0;
2692 	sc->jme_cdata.jme_tx_cons = 0;
2693 	sc->jme_cdata.jme_tx_cnt = 0;
2694 
2695 	cd = &sc->jme_cdata;
2696 	bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2697 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2698 		txd = &sc->jme_cdata.jme_txdesc[i];
2699 		txd->tx_m = NULL;
2700 		txd->tx_desc = &cd->jme_tx_ring[i];
2701 		txd->tx_ndesc = 0;
2702 	}
2703 }
2704 
2705 static void
2706 jme_init_ssb(struct jme_softc *sc)
2707 {
2708 	struct jme_chain_data *cd;
2709 
2710 	cd = &sc->jme_cdata;
2711 	bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2712 }
2713 
2714 static int
2715 jme_init_rx_ring(struct jme_softc *sc, int ring)
2716 {
2717 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2718 	struct jme_rxdesc *rxd;
2719 	int i;
2720 
2721 	KKASSERT(rdata->jme_rxhead == NULL &&
2722 		 rdata->jme_rxtail == NULL &&
2723 		 rdata->jme_rxlen == 0);
2724 	rdata->jme_rx_cons = 0;
2725 
2726 	bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2727 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2728 		int error;
2729 
2730 		rxd = &rdata->jme_rxdesc[i];
2731 		rxd->rx_m = NULL;
2732 		rxd->rx_desc = &rdata->jme_rx_ring[i];
2733 		error = jme_newbuf(sc, ring, rxd, 1);
2734 		if (error)
2735 			return error;
2736 	}
2737 	return 0;
2738 }
2739 
2740 static int
2741 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2742 {
2743 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2744 	struct jme_desc *desc;
2745 	struct mbuf *m;
2746 	bus_dma_segment_t segs;
2747 	bus_dmamap_t map;
2748 	int error, nsegs;
2749 
2750 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2751 	if (m == NULL)
2752 		return ENOBUFS;
2753 	/*
2754 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2755 	 * takes advantage of 10 bytes padding feature of hardware
2756 	 * in order not to copy entire frame to align IP header on
2757 	 * 32bit boundary.
2758 	 */
2759 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2760 
2761 	error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2762 			rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2763 			BUS_DMA_NOWAIT);
2764 	if (error) {
2765 		m_freem(m);
2766 		if (init)
2767 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2768 		return error;
2769 	}
2770 
2771 	if (rxd->rx_m != NULL) {
2772 		bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2773 				BUS_DMASYNC_POSTREAD);
2774 		bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2775 	}
2776 	map = rxd->rx_dmamap;
2777 	rxd->rx_dmamap = rdata->jme_rx_sparemap;
2778 	rdata->jme_rx_sparemap = map;
2779 	rxd->rx_m = m;
2780 
2781 	desc = rxd->rx_desc;
2782 	desc->buflen = htole32(segs.ds_len);
2783 	desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2784 	desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2785 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2786 
2787 	return 0;
2788 }
2789 
2790 static void
2791 jme_set_vlan(struct jme_softc *sc)
2792 {
2793 	struct ifnet *ifp = &sc->arpcom.ac_if;
2794 	uint32_t reg;
2795 
2796 	ASSERT_SERIALIZED(ifp->if_serializer);
2797 
2798 	reg = CSR_READ_4(sc, JME_RXMAC);
2799 	reg &= ~RXMAC_VLAN_ENB;
2800 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2801 		reg |= RXMAC_VLAN_ENB;
2802 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2803 }
2804 
2805 static void
2806 jme_set_filter(struct jme_softc *sc)
2807 {
2808 	struct ifnet *ifp = &sc->arpcom.ac_if;
2809 	struct ifmultiaddr *ifma;
2810 	uint32_t crc;
2811 	uint32_t mchash[2];
2812 	uint32_t rxcfg;
2813 
2814 	ASSERT_SERIALIZED(ifp->if_serializer);
2815 
2816 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2817 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2818 	    RXMAC_ALLMULTI);
2819 
2820 	/*
2821 	 * Always accept frames destined to our station address.
2822 	 * Always accept broadcast frames.
2823 	 */
2824 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2825 
2826 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2827 		if (ifp->if_flags & IFF_PROMISC)
2828 			rxcfg |= RXMAC_PROMISC;
2829 		if (ifp->if_flags & IFF_ALLMULTI)
2830 			rxcfg |= RXMAC_ALLMULTI;
2831 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2832 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2833 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2834 		return;
2835 	}
2836 
2837 	/*
2838 	 * Set up the multicast address filter by passing all multicast
2839 	 * addresses through a CRC generator, and then using the low-order
2840 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2841 	 * high order bits select the register, while the rest of the bits
2842 	 * select the bit within the register.
2843 	 */
2844 	rxcfg |= RXMAC_MULTICAST;
2845 	bzero(mchash, sizeof(mchash));
2846 
2847 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2848 		if (ifma->ifma_addr->sa_family != AF_LINK)
2849 			continue;
2850 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2851 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2852 
2853 		/* Just want the 6 least significant bits. */
2854 		crc &= 0x3f;
2855 
2856 		/* Set the corresponding bit in the hash table. */
2857 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2858 	}
2859 
2860 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2861 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2862 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2863 }
2864 
2865 static int
2866 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2867 {
2868 	struct jme_softc *sc = arg1;
2869 	struct ifnet *ifp = &sc->arpcom.ac_if;
2870 	int error, v;
2871 
2872 	lwkt_serialize_enter(ifp->if_serializer);
2873 
2874 	v = sc->jme_tx_coal_to;
2875 	error = sysctl_handle_int(oidp, &v, 0, req);
2876 	if (error || req->newptr == NULL)
2877 		goto back;
2878 
2879 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2880 		error = EINVAL;
2881 		goto back;
2882 	}
2883 
2884 	if (v != sc->jme_tx_coal_to) {
2885 		sc->jme_tx_coal_to = v;
2886 		if (ifp->if_flags & IFF_RUNNING)
2887 			jme_set_tx_coal(sc);
2888 	}
2889 back:
2890 	lwkt_serialize_exit(ifp->if_serializer);
2891 	return error;
2892 }
2893 
2894 static int
2895 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2896 {
2897 	struct jme_softc *sc = arg1;
2898 	struct ifnet *ifp = &sc->arpcom.ac_if;
2899 	int error, v;
2900 
2901 	lwkt_serialize_enter(ifp->if_serializer);
2902 
2903 	v = sc->jme_tx_coal_pkt;
2904 	error = sysctl_handle_int(oidp, &v, 0, req);
2905 	if (error || req->newptr == NULL)
2906 		goto back;
2907 
2908 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2909 		error = EINVAL;
2910 		goto back;
2911 	}
2912 
2913 	if (v != sc->jme_tx_coal_pkt) {
2914 		sc->jme_tx_coal_pkt = v;
2915 		if (ifp->if_flags & IFF_RUNNING)
2916 			jme_set_tx_coal(sc);
2917 	}
2918 back:
2919 	lwkt_serialize_exit(ifp->if_serializer);
2920 	return error;
2921 }
2922 
2923 static int
2924 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2925 {
2926 	struct jme_softc *sc = arg1;
2927 	struct ifnet *ifp = &sc->arpcom.ac_if;
2928 	int error, v;
2929 
2930 	lwkt_serialize_enter(ifp->if_serializer);
2931 
2932 	v = sc->jme_rx_coal_to;
2933 	error = sysctl_handle_int(oidp, &v, 0, req);
2934 	if (error || req->newptr == NULL)
2935 		goto back;
2936 
2937 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2938 		error = EINVAL;
2939 		goto back;
2940 	}
2941 
2942 	if (v != sc->jme_rx_coal_to) {
2943 		sc->jme_rx_coal_to = v;
2944 		if (ifp->if_flags & IFF_RUNNING)
2945 			jme_set_rx_coal(sc);
2946 	}
2947 back:
2948 	lwkt_serialize_exit(ifp->if_serializer);
2949 	return error;
2950 }
2951 
2952 static int
2953 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2954 {
2955 	struct jme_softc *sc = arg1;
2956 	struct ifnet *ifp = &sc->arpcom.ac_if;
2957 	int error, v;
2958 
2959 	lwkt_serialize_enter(ifp->if_serializer);
2960 
2961 	v = sc->jme_rx_coal_pkt;
2962 	error = sysctl_handle_int(oidp, &v, 0, req);
2963 	if (error || req->newptr == NULL)
2964 		goto back;
2965 
2966 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
2967 		error = EINVAL;
2968 		goto back;
2969 	}
2970 
2971 	if (v != sc->jme_rx_coal_pkt) {
2972 		sc->jme_rx_coal_pkt = v;
2973 		if (ifp->if_flags & IFF_RUNNING)
2974 			jme_set_rx_coal(sc);
2975 	}
2976 back:
2977 	lwkt_serialize_exit(ifp->if_serializer);
2978 	return error;
2979 }
2980 
2981 static void
2982 jme_set_tx_coal(struct jme_softc *sc)
2983 {
2984 	uint32_t reg;
2985 
2986 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2987 	    PCCTX_COAL_TO_MASK;
2988 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2989 	    PCCTX_COAL_PKT_MASK;
2990 	reg |= PCCTX_COAL_TXQ0;
2991 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2992 }
2993 
2994 static void
2995 jme_set_rx_coal(struct jme_softc *sc)
2996 {
2997 	uint32_t reg;
2998 	int r;
2999 
3000 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3001 	    PCCRX_COAL_TO_MASK;
3002 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3003 	    PCCRX_COAL_PKT_MASK;
3004 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
3005 		if (r < sc->jme_rx_ring_inuse)
3006 			CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3007 		else
3008 			CSR_WRITE_4(sc, JME_PCCRX(r), 0);
3009 	}
3010 }
3011 
3012 #ifdef DEVICE_POLLING
3013 
3014 static void
3015 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3016 {
3017 	struct jme_softc *sc = ifp->if_softc;
3018 	struct mbuf_chain chain[MAXCPU];
3019 	uint32_t status;
3020 	int r, prog = 0;
3021 
3022 	ASSERT_SERIALIZED(ifp->if_serializer);
3023 
3024 	switch (cmd) {
3025 	case POLL_REGISTER:
3026 		CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3027 		break;
3028 
3029 	case POLL_DEREGISTER:
3030 		CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3031 		break;
3032 
3033 	case POLL_AND_CHECK_STATUS:
3034 	case POLL_ONLY:
3035 		status = CSR_READ_4(sc, JME_INTR_STATUS);
3036 
3037 		ether_input_chain_init(chain);
3038 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r)
3039 			prog += jme_rxeof_chain(sc, r, chain, count);
3040 		if (prog)
3041 			ether_input_dispatch(chain);
3042 
3043 		if (status & INTR_RXQ_DESC_EMPTY) {
3044 			CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3045 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3046 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
3047 		}
3048 
3049 		jme_txeof(sc);
3050 		if (!ifq_is_empty(&ifp->if_snd))
3051 			if_devstart(ifp);
3052 		break;
3053 	}
3054 }
3055 
3056 #endif	/* DEVICE_POLLING */
3057 
3058 static int
3059 jme_rxring_dma_alloc(struct jme_softc *sc, int ring)
3060 {
3061 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3062 	bus_dmamem_t dmem;
3063 	int error;
3064 
3065 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
3066 			JME_RX_RING_ALIGN, 0,
3067 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3068 			JME_RX_RING_SIZE(sc),
3069 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3070 	if (error) {
3071 		device_printf(sc->jme_dev,
3072 		    "could not allocate %dth Rx ring.\n", ring);
3073 		return error;
3074 	}
3075 	rdata->jme_rx_ring_tag = dmem.dmem_tag;
3076 	rdata->jme_rx_ring_map = dmem.dmem_map;
3077 	rdata->jme_rx_ring = dmem.dmem_addr;
3078 	rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3079 
3080 	return 0;
3081 }
3082 
3083 static int
3084 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
3085 {
3086 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3087 	int i, error;
3088 
3089 	/* Create tag for Rx buffers. */
3090 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
3091 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
3092 	    BUS_SPACE_MAXADDR,		/* lowaddr */
3093 	    BUS_SPACE_MAXADDR,		/* highaddr */
3094 	    NULL, NULL,			/* filter, filterarg */
3095 	    MCLBYTES,			/* maxsize */
3096 	    1,				/* nsegments */
3097 	    MCLBYTES,			/* maxsegsize */
3098 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3099 	    &rdata->jme_rx_tag);
3100 	if (error) {
3101 		device_printf(sc->jme_dev,
3102 		    "could not create %dth Rx DMA tag.\n", ring);
3103 		return error;
3104 	}
3105 
3106 	/* Create DMA maps for Rx buffers. */
3107 	error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3108 				  &rdata->jme_rx_sparemap);
3109 	if (error) {
3110 		device_printf(sc->jme_dev,
3111 		    "could not create %dth spare Rx dmamap.\n", ring);
3112 		bus_dma_tag_destroy(rdata->jme_rx_tag);
3113 		rdata->jme_rx_tag = NULL;
3114 		return error;
3115 	}
3116 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3117 		struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3118 
3119 		error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3120 					  &rxd->rx_dmamap);
3121 		if (error) {
3122 			int j;
3123 
3124 			device_printf(sc->jme_dev,
3125 			    "could not create %dth Rx dmamap "
3126 			    "for %dth RX ring.\n", i, ring);
3127 
3128 			for (j = 0; j < i; ++j) {
3129 				rxd = &rdata->jme_rxdesc[j];
3130 				bus_dmamap_destroy(rdata->jme_rx_tag,
3131 						   rxd->rx_dmamap);
3132 			}
3133 			bus_dmamap_destroy(rdata->jme_rx_tag,
3134 					   rdata->jme_rx_sparemap);
3135 			bus_dma_tag_destroy(rdata->jme_rx_tag);
3136 			rdata->jme_rx_tag = NULL;
3137 			return error;
3138 		}
3139 	}
3140 	return 0;
3141 }
3142 
3143 static void
3144 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3145 {
3146 	struct mbuf_chain chain[MAXCPU];
3147 	int r, prog = 0;
3148 
3149 	ether_input_chain_init(chain);
3150 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3151 		if (status & jme_rx_status[r].jme_coal)
3152 			prog += jme_rxeof_chain(sc, r, chain, -1);
3153 	}
3154 	if (prog)
3155 		ether_input_dispatch(chain);
3156 }
3157 
3158 static void
3159 jme_enable_rss(struct jme_softc *sc)
3160 {
3161 	uint32_t rssc, ind;
3162 	uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3163 	int i;
3164 
3165 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3166 
3167 	KASSERT(sc->jme_rx_ring_inuse == JME_NRXRING_2 ||
3168 		sc->jme_rx_ring_inuse == JME_NRXRING_4,
3169 		("%s: invalid # of RX rings (%d)\n",
3170 		 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse));
3171 
3172 	rssc = RSSC_HASH_64_ENTRY;
3173 	rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3174 	rssc |= sc->jme_rx_ring_inuse >> 1;
3175 	JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3176 	CSR_WRITE_4(sc, JME_RSSC, rssc);
3177 
3178 	toeplitz_get_key(key, sizeof(key));
3179 	for (i = 0; i < RSSKEY_NREGS; ++i) {
3180 		uint32_t keyreg;
3181 
3182 		keyreg = RSSKEY_REGVAL(key, i);
3183 		JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3184 
3185 		CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3186 	}
3187 
3188 	/*
3189 	 * Create redirect table in following fashion:
3190 	 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3191 	 */
3192 	ind = 0;
3193 	for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3194 		int q;
3195 
3196 		q = i % sc->jme_rx_ring_inuse;
3197 		ind |= q << (i * 8);
3198 	}
3199 	JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3200 
3201 	for (i = 0; i < RSSTBL_NREGS; ++i)
3202 		CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3203 }
3204 
3205 static void
3206 jme_disable_rss(struct jme_softc *sc)
3207 {
3208 	sc->jme_rx_ring_inuse = JME_NRXRING_1;
3209 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3210 }
3211