xref: /dragonfly/sys/dev/netif/jme/if_jme.c (revision dcd37f7d)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $
29  */
30 
31 #include "opt_polling.h"
32 #include "opt_rss.h"
33 #include "opt_jme.h"
34 
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/interrupt.h>
40 #include <sys/malloc.h>
41 #include <sys/proc.h>
42 #include <sys/rman.h>
43 #include <sys/serialize.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 
48 #include <net/ethernet.h>
49 #include <net/if.h>
50 #include <net/bpf.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/ifq_var.h>
55 #include <net/toeplitz.h>
56 #include <net/toeplitz2.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
59 
60 #include <netinet/in.h>
61 
62 #include <dev/netif/mii_layer/miivar.h>
63 #include <dev/netif/mii_layer/jmphyreg.h>
64 
65 #include <bus/pci/pcireg.h>
66 #include <bus/pci/pcivar.h>
67 #include <bus/pci/pcidevs.h>
68 
69 #include <dev/netif/jme/if_jmereg.h>
70 #include <dev/netif/jme/if_jmevar.h>
71 
72 #include "miibus_if.h"
73 
74 /* Define the following to disable printing Rx errors. */
75 #undef	JME_SHOW_ERRORS
76 
77 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
78 
79 #ifdef JME_RSS_DEBUG
80 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
81 do { \
82 	if ((sc)->jme_rss_debug >= (lvl)) \
83 		if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
84 } while (0)
85 #else	/* !JME_RSS_DEBUG */
86 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
87 #endif	/* JME_RSS_DEBUG */
88 
89 static int	jme_probe(device_t);
90 static int	jme_attach(device_t);
91 static int	jme_detach(device_t);
92 static int	jme_shutdown(device_t);
93 static int	jme_suspend(device_t);
94 static int	jme_resume(device_t);
95 
96 static int	jme_miibus_readreg(device_t, int, int);
97 static int	jme_miibus_writereg(device_t, int, int, int);
98 static void	jme_miibus_statchg(device_t);
99 
100 static void	jme_init(void *);
101 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
102 static void	jme_start(struct ifnet *);
103 static void	jme_watchdog(struct ifnet *);
104 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
105 static int	jme_mediachange(struct ifnet *);
106 #ifdef DEVICE_POLLING
107 static void	jme_poll(struct ifnet *, enum poll_cmd, int);
108 #endif
109 
110 static void	jme_intr(void *);
111 static void	jme_txeof(struct jme_softc *);
112 static void	jme_rxeof(struct jme_softc *, int);
113 static int	jme_rxeof_chain(struct jme_softc *, int,
114 				struct mbuf_chain *, int);
115 static void	jme_rx_intr(struct jme_softc *, uint32_t);
116 
117 static int	jme_dma_alloc(struct jme_softc *);
118 static void	jme_dma_free(struct jme_softc *);
119 static int	jme_init_rx_ring(struct jme_softc *, int);
120 static void	jme_init_tx_ring(struct jme_softc *);
121 static void	jme_init_ssb(struct jme_softc *);
122 static int	jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
123 static int	jme_encap(struct jme_softc *, struct mbuf **);
124 static void	jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
125 static int	jme_rxring_dma_alloc(struct jme_softc *, int);
126 static int	jme_rxbuf_dma_alloc(struct jme_softc *, int);
127 
128 static void	jme_tick(void *);
129 static void	jme_stop(struct jme_softc *);
130 static void	jme_reset(struct jme_softc *);
131 static void	jme_set_vlan(struct jme_softc *);
132 static void	jme_set_filter(struct jme_softc *);
133 static void	jme_stop_tx(struct jme_softc *);
134 static void	jme_stop_rx(struct jme_softc *);
135 static void	jme_mac_config(struct jme_softc *);
136 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
137 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
138 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
139 #ifdef notyet
140 static void	jme_setwol(struct jme_softc *);
141 static void	jme_setlinkspeed(struct jme_softc *);
142 #endif
143 static void	jme_set_tx_coal(struct jme_softc *);
144 static void	jme_set_rx_coal(struct jme_softc *);
145 static void	jme_enable_rss(struct jme_softc *);
146 static void	jme_disable_rss(struct jme_softc *);
147 
148 static void	jme_sysctl_node(struct jme_softc *);
149 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
150 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
151 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
152 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
153 
154 /*
155  * Devices supported by this driver.
156  */
157 static const struct jme_dev {
158 	uint16_t	jme_vendorid;
159 	uint16_t	jme_deviceid;
160 	uint32_t	jme_caps;
161 	const char	*jme_name;
162 } jme_devs[] = {
163 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
164 	    JME_CAP_JUMBO,
165 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
166 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
167 	    JME_CAP_FASTETH,
168 	    "JMicron Inc, JMC260 Fast Ethernet" },
169 	{ 0, 0, 0, NULL }
170 };
171 
172 static device_method_t jme_methods[] = {
173 	/* Device interface. */
174 	DEVMETHOD(device_probe,		jme_probe),
175 	DEVMETHOD(device_attach,	jme_attach),
176 	DEVMETHOD(device_detach,	jme_detach),
177 	DEVMETHOD(device_shutdown,	jme_shutdown),
178 	DEVMETHOD(device_suspend,	jme_suspend),
179 	DEVMETHOD(device_resume,	jme_resume),
180 
181 	/* Bus interface. */
182 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
183 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
184 
185 	/* MII interface. */
186 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
187 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
188 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
189 
190 	{ NULL, NULL }
191 };
192 
193 static driver_t jme_driver = {
194 	"jme",
195 	jme_methods,
196 	sizeof(struct jme_softc)
197 };
198 
199 static devclass_t jme_devclass;
200 
201 DECLARE_DUMMY_MODULE(if_jme);
202 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
203 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
204 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
205 
206 static const struct {
207 	uint32_t	jme_coal;
208 	uint32_t	jme_comp;
209 } jme_rx_status[JME_NRXRING_MAX] = {
210 	{ INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
211 	{ INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
212 	{ INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
213 	{ INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
214 };
215 
216 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
217 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
218 static int	jme_rx_ring_count = JME_NRXRING_DEF;
219 
220 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
221 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
222 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
223 
224 /*
225  *	Read a PHY register on the MII of the JMC250.
226  */
227 static int
228 jme_miibus_readreg(device_t dev, int phy, int reg)
229 {
230 	struct jme_softc *sc = device_get_softc(dev);
231 	uint32_t val;
232 	int i;
233 
234 	/* For FPGA version, PHY address 0 should be ignored. */
235 	if (sc->jme_caps & JME_CAP_FPGA) {
236 		if (phy == 0)
237 			return (0);
238 	} else {
239 		if (sc->jme_phyaddr != phy)
240 			return (0);
241 	}
242 
243 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
244 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
245 
246 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
247 		DELAY(1);
248 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
249 			break;
250 	}
251 	if (i == 0) {
252 		device_printf(sc->jme_dev, "phy read timeout: "
253 			      "phy %d, reg %d\n", phy, reg);
254 		return (0);
255 	}
256 
257 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
258 }
259 
260 /*
261  *	Write a PHY register on the MII of the JMC250.
262  */
263 static int
264 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
265 {
266 	struct jme_softc *sc = device_get_softc(dev);
267 	int i;
268 
269 	/* For FPGA version, PHY address 0 should be ignored. */
270 	if (sc->jme_caps & JME_CAP_FPGA) {
271 		if (phy == 0)
272 			return (0);
273 	} else {
274 		if (sc->jme_phyaddr != phy)
275 			return (0);
276 	}
277 
278 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
279 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
280 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
281 
282 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
283 		DELAY(1);
284 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
285 			break;
286 	}
287 	if (i == 0) {
288 		device_printf(sc->jme_dev, "phy write timeout: "
289 			      "phy %d, reg %d\n", phy, reg);
290 	}
291 
292 	return (0);
293 }
294 
295 /*
296  *	Callback from MII layer when media changes.
297  */
298 static void
299 jme_miibus_statchg(device_t dev)
300 {
301 	struct jme_softc *sc = device_get_softc(dev);
302 	struct ifnet *ifp = &sc->arpcom.ac_if;
303 	struct mii_data *mii;
304 	struct jme_txdesc *txd;
305 	bus_addr_t paddr;
306 	int i, r;
307 
308 	ASSERT_SERIALIZED(ifp->if_serializer);
309 
310 	if ((ifp->if_flags & IFF_RUNNING) == 0)
311 		return;
312 
313 	mii = device_get_softc(sc->jme_miibus);
314 
315 	sc->jme_flags &= ~JME_FLAG_LINK;
316 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
317 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
318 		case IFM_10_T:
319 		case IFM_100_TX:
320 			sc->jme_flags |= JME_FLAG_LINK;
321 			break;
322 		case IFM_1000_T:
323 			if (sc->jme_caps & JME_CAP_FASTETH)
324 				break;
325 			sc->jme_flags |= JME_FLAG_LINK;
326 			break;
327 		default:
328 			break;
329 		}
330 	}
331 
332 	/*
333 	 * Disabling Rx/Tx MACs have a side-effect of resetting
334 	 * JME_TXNDA/JME_RXNDA register to the first address of
335 	 * Tx/Rx descriptor address. So driver should reset its
336 	 * internal procucer/consumer pointer and reclaim any
337 	 * allocated resources.  Note, just saving the value of
338 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
339 	 * and restoring JME_TXNDA/JME_RXNDA register is not
340 	 * sufficient to make sure correct MAC state because
341 	 * stopping MAC operation can take a while and hardware
342 	 * might have updated JME_TXNDA/JME_RXNDA registers
343 	 * during the stop operation.
344 	 */
345 
346 	/* Disable interrupts */
347 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
348 
349 	/* Stop driver */
350 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
351 	ifp->if_timer = 0;
352 	callout_stop(&sc->jme_tick_ch);
353 
354 	/* Stop receiver/transmitter. */
355 	jme_stop_rx(sc);
356 	jme_stop_tx(sc);
357 
358 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
359 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
360 
361 		jme_rxeof(sc, r);
362 		if (rdata->jme_rxhead != NULL)
363 			m_freem(rdata->jme_rxhead);
364 		JME_RXCHAIN_RESET(sc, r);
365 
366 		/*
367 		 * Reuse configured Rx descriptors and reset
368 		 * procuder/consumer index.
369 		 */
370 		rdata->jme_rx_cons = 0;
371 	}
372 
373 	jme_txeof(sc);
374 	if (sc->jme_cdata.jme_tx_cnt != 0) {
375 		/* Remove queued packets for transmit. */
376 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
377 			txd = &sc->jme_cdata.jme_txdesc[i];
378 			if (txd->tx_m != NULL) {
379 				bus_dmamap_unload(
380 				    sc->jme_cdata.jme_tx_tag,
381 				    txd->tx_dmamap);
382 				m_freem(txd->tx_m);
383 				txd->tx_m = NULL;
384 				txd->tx_ndesc = 0;
385 				ifp->if_oerrors++;
386 			}
387 		}
388 	}
389 	jme_init_tx_ring(sc);
390 
391 	/* Initialize shadow status block. */
392 	jme_init_ssb(sc);
393 
394 	/* Program MAC with resolved speed/duplex/flow-control. */
395 	if (sc->jme_flags & JME_FLAG_LINK) {
396 		jme_mac_config(sc);
397 
398 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
399 
400 		/* Set Tx ring address to the hardware. */
401 		paddr = sc->jme_cdata.jme_tx_ring_paddr;
402 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
403 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
404 
405 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
406 			CSR_WRITE_4(sc, JME_RXCSR,
407 			    sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
408 
409 			/* Set Rx ring address to the hardware. */
410 			paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
411 			CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
412 			CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
413 		}
414 
415 		/* Restart receiver/transmitter. */
416 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
417 		    RXCSR_RXQ_START);
418 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
419 	}
420 
421 	ifp->if_flags |= IFF_RUNNING;
422 	ifp->if_flags &= ~IFF_OACTIVE;
423 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
424 
425 #ifdef DEVICE_POLLING
426 	if (!(ifp->if_flags & IFF_POLLING))
427 #endif
428 	/* Reenable interrupts. */
429 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
430 }
431 
432 /*
433  *	Get the current interface media status.
434  */
435 static void
436 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
437 {
438 	struct jme_softc *sc = ifp->if_softc;
439 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
440 
441 	ASSERT_SERIALIZED(ifp->if_serializer);
442 
443 	mii_pollstat(mii);
444 	ifmr->ifm_status = mii->mii_media_status;
445 	ifmr->ifm_active = mii->mii_media_active;
446 }
447 
448 /*
449  *	Set hardware to newly-selected media.
450  */
451 static int
452 jme_mediachange(struct ifnet *ifp)
453 {
454 	struct jme_softc *sc = ifp->if_softc;
455 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
456 	int error;
457 
458 	ASSERT_SERIALIZED(ifp->if_serializer);
459 
460 	if (mii->mii_instance != 0) {
461 		struct mii_softc *miisc;
462 
463 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
464 			mii_phy_reset(miisc);
465 	}
466 	error = mii_mediachg(mii);
467 
468 	return (error);
469 }
470 
471 static int
472 jme_probe(device_t dev)
473 {
474 	const struct jme_dev *sp;
475 	uint16_t vid, did;
476 
477 	vid = pci_get_vendor(dev);
478 	did = pci_get_device(dev);
479 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
480 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
481 			struct jme_softc *sc = device_get_softc(dev);
482 
483 			sc->jme_caps = sp->jme_caps;
484 			device_set_desc(dev, sp->jme_name);
485 			return (0);
486 		}
487 	}
488 	return (ENXIO);
489 }
490 
491 static int
492 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
493 {
494 	uint32_t reg;
495 	int i;
496 
497 	*val = 0;
498 	for (i = JME_TIMEOUT; i > 0; i--) {
499 		reg = CSR_READ_4(sc, JME_SMBCSR);
500 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
501 			break;
502 		DELAY(1);
503 	}
504 
505 	if (i == 0) {
506 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
507 		return (ETIMEDOUT);
508 	}
509 
510 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
511 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
512 	for (i = JME_TIMEOUT; i > 0; i--) {
513 		DELAY(1);
514 		reg = CSR_READ_4(sc, JME_SMBINTF);
515 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
516 			break;
517 	}
518 
519 	if (i == 0) {
520 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
521 		return (ETIMEDOUT);
522 	}
523 
524 	reg = CSR_READ_4(sc, JME_SMBINTF);
525 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
526 
527 	return (0);
528 }
529 
530 static int
531 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
532 {
533 	uint8_t fup, reg, val;
534 	uint32_t offset;
535 	int match;
536 
537 	offset = 0;
538 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
539 	    fup != JME_EEPROM_SIG0)
540 		return (ENOENT);
541 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
542 	    fup != JME_EEPROM_SIG1)
543 		return (ENOENT);
544 	match = 0;
545 	do {
546 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
547 			break;
548 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
549 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
550 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
551 				break;
552 			if (reg >= JME_PAR0 &&
553 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
554 				if (jme_eeprom_read_byte(sc, offset + 2,
555 				    &val) != 0)
556 					break;
557 				eaddr[reg - JME_PAR0] = val;
558 				match++;
559 			}
560 		}
561 		/* Check for the end of EEPROM descriptor. */
562 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
563 			break;
564 		/* Try next eeprom descriptor. */
565 		offset += JME_EEPROM_DESC_BYTES;
566 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
567 
568 	if (match == ETHER_ADDR_LEN)
569 		return (0);
570 
571 	return (ENOENT);
572 }
573 
574 static void
575 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
576 {
577 	uint32_t par0, par1;
578 
579 	/* Read station address. */
580 	par0 = CSR_READ_4(sc, JME_PAR0);
581 	par1 = CSR_READ_4(sc, JME_PAR1);
582 	par1 &= 0xFFFF;
583 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
584 		device_printf(sc->jme_dev,
585 		    "generating fake ethernet address.\n");
586 		par0 = karc4random();
587 		/* Set OUI to JMicron. */
588 		eaddr[0] = 0x00;
589 		eaddr[1] = 0x1B;
590 		eaddr[2] = 0x8C;
591 		eaddr[3] = (par0 >> 16) & 0xff;
592 		eaddr[4] = (par0 >> 8) & 0xff;
593 		eaddr[5] = par0 & 0xff;
594 	} else {
595 		eaddr[0] = (par0 >> 0) & 0xFF;
596 		eaddr[1] = (par0 >> 8) & 0xFF;
597 		eaddr[2] = (par0 >> 16) & 0xFF;
598 		eaddr[3] = (par0 >> 24) & 0xFF;
599 		eaddr[4] = (par1 >> 0) & 0xFF;
600 		eaddr[5] = (par1 >> 8) & 0xFF;
601 	}
602 }
603 
604 static int
605 jme_attach(device_t dev)
606 {
607 	struct jme_softc *sc = device_get_softc(dev);
608 	struct ifnet *ifp = &sc->arpcom.ac_if;
609 	uint32_t reg;
610 	uint16_t did;
611 	uint8_t pcie_ptr, rev;
612 	int error = 0;
613 	uint8_t eaddr[ETHER_ADDR_LEN];
614 
615 	sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
616 	if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
617 		sc->jme_rx_desc_cnt = JME_NDESC_MAX;
618 
619 	sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
620 	if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
621 		sc->jme_tx_desc_cnt = JME_NDESC_MAX;
622 
623 #ifdef RSS
624 	sc->jme_rx_ring_cnt = jme_rx_ring_count;
625 	if (sc->jme_rx_ring_cnt <= 0)
626 		sc->jme_rx_ring_cnt = JME_NRXRING_1;
627 	if (sc->jme_rx_ring_cnt > ncpus2)
628 		sc->jme_rx_ring_cnt = ncpus2;
629 
630 	if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
631 		sc->jme_rx_ring_cnt = JME_NRXRING_4;
632 	else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
633 		sc->jme_rx_ring_cnt = JME_NRXRING_2;
634 #else
635 	sc->jme_rx_ring_cnt = JME_NRXRING_MIN;
636 #endif
637 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
638 
639 	sc->jme_dev = dev;
640 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
641 
642 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
643 
644 	callout_init(&sc->jme_tick_ch);
645 
646 #ifndef BURN_BRIDGES
647 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
648 		uint32_t irq, mem;
649 
650 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
651 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
652 
653 		device_printf(dev, "chip is in D%d power mode "
654 		    "-- setting to D0\n", pci_get_powerstate(dev));
655 
656 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
657 
658 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
659 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
660 	}
661 #endif	/* !BURN_BRIDGE */
662 
663 	/* Enable bus mastering */
664 	pci_enable_busmaster(dev);
665 
666 	/*
667 	 * Allocate IO memory
668 	 *
669 	 * JMC250 supports both memory mapped and I/O register space
670 	 * access.  Because I/O register access should use different
671 	 * BARs to access registers it's waste of time to use I/O
672 	 * register spce access.  JMC250 uses 16K to map entire memory
673 	 * space.
674 	 */
675 	sc->jme_mem_rid = JME_PCIR_BAR;
676 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
677 						 &sc->jme_mem_rid, RF_ACTIVE);
678 	if (sc->jme_mem_res == NULL) {
679 		device_printf(dev, "can't allocate IO memory\n");
680 		return ENXIO;
681 	}
682 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
683 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
684 
685 	/*
686 	 * Allocate IRQ
687 	 */
688 	sc->jme_irq_rid = 0;
689 	sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
690 						 &sc->jme_irq_rid,
691 						 RF_SHAREABLE | RF_ACTIVE);
692 	if (sc->jme_irq_res == NULL) {
693 		device_printf(dev, "can't allocate irq\n");
694 		error = ENXIO;
695 		goto fail;
696 	}
697 
698 	/*
699 	 * Extract revisions
700 	 */
701 	reg = CSR_READ_4(sc, JME_CHIPMODE);
702 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
703 	    CHIPMODE_NOT_FPGA) {
704 		sc->jme_caps |= JME_CAP_FPGA;
705 		if (bootverbose) {
706 			device_printf(dev, "FPGA revision: 0x%04x\n",
707 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
708 				      CHIPMODE_FPGA_REV_SHIFT);
709 		}
710 	}
711 
712 	/* NOTE: FM revision is put in the upper 4 bits */
713 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
714 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
715 	if (bootverbose)
716 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
717 
718 	did = pci_get_device(dev);
719 	switch (did) {
720 	case PCI_PRODUCT_JMICRON_JMC250:
721 		if (rev == JME_REV1_A2)
722 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
723 		break;
724 
725 	case PCI_PRODUCT_JMICRON_JMC260:
726 		if (rev == JME_REV2)
727 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
728 		break;
729 
730 	default:
731 		panic("unknown device id 0x%04x\n", did);
732 	}
733 	if (rev >= JME_REV2) {
734 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
735 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
736 				      GHC_TXMAC_CLKSRC_1000;
737 	}
738 
739 	/* Reset the ethernet controller. */
740 	jme_reset(sc);
741 
742 	/* Get station address. */
743 	reg = CSR_READ_4(sc, JME_SMBCSR);
744 	if (reg & SMBCSR_EEPROM_PRESENT)
745 		error = jme_eeprom_macaddr(sc, eaddr);
746 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
747 		if (error != 0 && (bootverbose)) {
748 			device_printf(dev, "ethernet hardware address "
749 				      "not found in EEPROM.\n");
750 		}
751 		jme_reg_macaddr(sc, eaddr);
752 	}
753 
754 	/*
755 	 * Save PHY address.
756 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
757 	 * requires PHY probing to get correct PHY address.
758 	 */
759 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
760 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
761 		    GPREG0_PHY_ADDR_MASK;
762 		if (bootverbose) {
763 			device_printf(dev, "PHY is at address %d.\n",
764 			    sc->jme_phyaddr);
765 		}
766 	} else {
767 		sc->jme_phyaddr = 0;
768 	}
769 
770 	/* Set max allowable DMA size. */
771 	pcie_ptr = pci_get_pciecap_ptr(dev);
772 	if (pcie_ptr != 0) {
773 		uint16_t ctrl;
774 
775 		sc->jme_caps |= JME_CAP_PCIE;
776 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
777 		if (bootverbose) {
778 			device_printf(dev, "Read request size : %d bytes.\n",
779 			    128 << ((ctrl >> 12) & 0x07));
780 			device_printf(dev, "TLP payload size : %d bytes.\n",
781 			    128 << ((ctrl >> 5) & 0x07));
782 		}
783 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
784 		case PCIEM_DEVCTL_MAX_READRQ_128:
785 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
786 			break;
787 		case PCIEM_DEVCTL_MAX_READRQ_256:
788 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
789 			break;
790 		default:
791 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
792 			break;
793 		}
794 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
795 	} else {
796 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
797 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
798 	}
799 
800 #ifdef notyet
801 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
802 		sc->jme_caps |= JME_CAP_PMCAP;
803 #endif
804 
805 	/*
806 	 * Create sysctl tree
807 	 */
808 	jme_sysctl_node(sc);
809 
810 	/* Allocate DMA stuffs */
811 	error = jme_dma_alloc(sc);
812 	if (error)
813 		goto fail;
814 
815 	ifp->if_softc = sc;
816 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
817 	ifp->if_init = jme_init;
818 	ifp->if_ioctl = jme_ioctl;
819 	ifp->if_start = jme_start;
820 #ifdef DEVICE_POLLING
821 	ifp->if_poll = jme_poll;
822 #endif
823 	ifp->if_watchdog = jme_watchdog;
824 	ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
825 	ifq_set_ready(&ifp->if_snd);
826 
827 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
828 	ifp->if_capabilities = IFCAP_HWCSUM |
829 			       IFCAP_VLAN_MTU |
830 			       IFCAP_VLAN_HWTAGGING;
831 	if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN)
832 		ifp->if_capabilities |= IFCAP_RSS;
833 	ifp->if_capenable = ifp->if_capabilities;
834 
835 	/*
836 	 * Disable TXCSUM by default to improve bulk data
837 	 * transmit performance (+20Mbps improvement).
838 	 */
839 	ifp->if_capenable &= ~IFCAP_TXCSUM;
840 
841 	if (ifp->if_capenable & IFCAP_TXCSUM)
842 		ifp->if_hwassist = JME_CSUM_FEATURES;
843 
844 	/* Set up MII bus. */
845 	error = mii_phy_probe(dev, &sc->jme_miibus,
846 			      jme_mediachange, jme_mediastatus);
847 	if (error) {
848 		device_printf(dev, "no PHY found!\n");
849 		goto fail;
850 	}
851 
852 	/*
853 	 * Save PHYADDR for FPGA mode PHY.
854 	 */
855 	if (sc->jme_caps & JME_CAP_FPGA) {
856 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
857 
858 		if (mii->mii_instance != 0) {
859 			struct mii_softc *miisc;
860 
861 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
862 				if (miisc->mii_phy != 0) {
863 					sc->jme_phyaddr = miisc->mii_phy;
864 					break;
865 				}
866 			}
867 			if (sc->jme_phyaddr != 0) {
868 				device_printf(sc->jme_dev,
869 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
870 				/* vendor magic. */
871 				jme_miibus_writereg(dev, sc->jme_phyaddr,
872 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
873 
874 				/* XXX should we clear JME_WA_EXTFIFO */
875 			}
876 		}
877 	}
878 
879 	ether_ifattach(ifp, eaddr, NULL);
880 
881 	/* Tell the upper layer(s) we support long frames. */
882 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
883 
884 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
885 			       &sc->jme_irq_handle, ifp->if_serializer);
886 	if (error) {
887 		device_printf(dev, "could not set up interrupt handler.\n");
888 		ether_ifdetach(ifp);
889 		goto fail;
890 	}
891 
892 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
893 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
894 	return 0;
895 fail:
896 	jme_detach(dev);
897 	return (error);
898 }
899 
900 static int
901 jme_detach(device_t dev)
902 {
903 	struct jme_softc *sc = device_get_softc(dev);
904 
905 	if (device_is_attached(dev)) {
906 		struct ifnet *ifp = &sc->arpcom.ac_if;
907 
908 		lwkt_serialize_enter(ifp->if_serializer);
909 		jme_stop(sc);
910 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
911 		lwkt_serialize_exit(ifp->if_serializer);
912 
913 		ether_ifdetach(ifp);
914 	}
915 
916 	if (sc->jme_sysctl_tree != NULL)
917 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
918 
919 	if (sc->jme_miibus != NULL)
920 		device_delete_child(dev, sc->jme_miibus);
921 	bus_generic_detach(dev);
922 
923 	if (sc->jme_irq_res != NULL) {
924 		bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
925 				     sc->jme_irq_res);
926 	}
927 
928 	if (sc->jme_mem_res != NULL) {
929 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
930 				     sc->jme_mem_res);
931 	}
932 
933 	jme_dma_free(sc);
934 
935 	return (0);
936 }
937 
938 static void
939 jme_sysctl_node(struct jme_softc *sc)
940 {
941 	int coal_max;
942 #ifdef JME_RSS_DEBUG
943 	char rx_ring_pkt[32];
944 	int r;
945 #endif
946 
947 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
948 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
949 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
950 				device_get_nameunit(sc->jme_dev),
951 				CTLFLAG_RD, 0, "");
952 	if (sc->jme_sysctl_tree == NULL) {
953 		device_printf(sc->jme_dev, "can't add sysctl node\n");
954 		return;
955 	}
956 
957 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
958 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
959 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
960 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
961 
962 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
963 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
964 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
965 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
966 
967 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
968 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
969 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
970 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
971 
972 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
973 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
974 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
975 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
976 
977 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
978 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
979 		       "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
980 		       0, "RX desc count");
981 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
982 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
983 		       "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
984 		       0, "TX desc count");
985 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
986 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
987 		       "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
988 		       0, "RX ring count");
989 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
990 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
991 		       "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
992 		       0, "RX ring in use");
993 #ifdef JME_RSS_DEBUG
994 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
995 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
996 		       "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
997 		       0, "RSS debug level");
998 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
999 		ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
1000 		SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
1001 				SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1002 				rx_ring_pkt, CTLFLAG_RW,
1003 				&sc->jme_rx_ring_pkt[r],
1004 				0, "RXed packets");
1005 	}
1006 #endif
1007 
1008 	/*
1009 	 * Set default coalesce valves
1010 	 */
1011 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1012 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1013 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1014 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1015 
1016 	/*
1017 	 * Adjust coalesce valves, in case that the number of TX/RX
1018 	 * descs are set to small values by users.
1019 	 *
1020 	 * NOTE: coal_max will not be zero, since number of descs
1021 	 * must aligned by JME_NDESC_ALIGN (16 currently)
1022 	 */
1023 	coal_max = sc->jme_tx_desc_cnt / 6;
1024 	if (coal_max < sc->jme_tx_coal_pkt)
1025 		sc->jme_tx_coal_pkt = coal_max;
1026 
1027 	coal_max = sc->jme_rx_desc_cnt / 4;
1028 	if (coal_max < sc->jme_rx_coal_pkt)
1029 		sc->jme_rx_coal_pkt = coal_max;
1030 }
1031 
1032 static int
1033 jme_dma_alloc(struct jme_softc *sc)
1034 {
1035 	struct jme_txdesc *txd;
1036 	bus_dmamem_t dmem;
1037 	int error, i;
1038 
1039 	sc->jme_cdata.jme_txdesc =
1040 	kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1041 		M_DEVBUF, M_WAITOK | M_ZERO);
1042 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1043 		sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1044 		kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1045 			M_DEVBUF, M_WAITOK | M_ZERO);
1046 	}
1047 
1048 	/* Create parent ring tag. */
1049 	error = bus_dma_tag_create(NULL,/* parent */
1050 	    1, JME_RING_BOUNDARY,	/* algnmnt, boundary */
1051 	    sc->jme_lowaddr,		/* lowaddr */
1052 	    BUS_SPACE_MAXADDR,		/* highaddr */
1053 	    NULL, NULL,			/* filter, filterarg */
1054 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1055 	    0,				/* nsegments */
1056 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1057 	    0,				/* flags */
1058 	    &sc->jme_cdata.jme_ring_tag);
1059 	if (error) {
1060 		device_printf(sc->jme_dev,
1061 		    "could not create parent ring DMA tag.\n");
1062 		return error;
1063 	}
1064 
1065 	/*
1066 	 * Create DMA stuffs for TX ring
1067 	 */
1068 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1069 			JME_TX_RING_ALIGN, 0,
1070 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1071 			JME_TX_RING_SIZE(sc),
1072 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1073 	if (error) {
1074 		device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1075 		return error;
1076 	}
1077 	sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1078 	sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1079 	sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1080 	sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1081 
1082 	/*
1083 	 * Create DMA stuffs for RX rings
1084 	 */
1085 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1086 		error = jme_rxring_dma_alloc(sc, i);
1087 		if (error)
1088 			return error;
1089 	}
1090 
1091 	/* Create parent buffer tag. */
1092 	error = bus_dma_tag_create(NULL,/* parent */
1093 	    1, 0,			/* algnmnt, boundary */
1094 	    sc->jme_lowaddr,		/* lowaddr */
1095 	    BUS_SPACE_MAXADDR,		/* highaddr */
1096 	    NULL, NULL,			/* filter, filterarg */
1097 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1098 	    0,				/* nsegments */
1099 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1100 	    0,				/* flags */
1101 	    &sc->jme_cdata.jme_buffer_tag);
1102 	if (error) {
1103 		device_printf(sc->jme_dev,
1104 		    "could not create parent buffer DMA tag.\n");
1105 		return error;
1106 	}
1107 
1108 	/*
1109 	 * Create DMA stuffs for shadow status block
1110 	 */
1111 	error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1112 			JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1113 			JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1114 	if (error) {
1115 		device_printf(sc->jme_dev,
1116 		    "could not create shadow status block.\n");
1117 		return error;
1118 	}
1119 	sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1120 	sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1121 	sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1122 	sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1123 
1124 	/*
1125 	 * Create DMA stuffs for TX buffers
1126 	 */
1127 
1128 	/* Create tag for Tx buffers. */
1129 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1130 	    1, 0,			/* algnmnt, boundary */
1131 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1132 	    BUS_SPACE_MAXADDR,		/* highaddr */
1133 	    NULL, NULL,			/* filter, filterarg */
1134 	    JME_JUMBO_FRAMELEN,		/* maxsize */
1135 	    JME_MAXTXSEGS,		/* nsegments */
1136 	    JME_MAXSEGSIZE,		/* maxsegsize */
1137 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1138 	    &sc->jme_cdata.jme_tx_tag);
1139 	if (error != 0) {
1140 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1141 		return error;
1142 	}
1143 
1144 	/* Create DMA maps for Tx buffers. */
1145 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1146 		txd = &sc->jme_cdata.jme_txdesc[i];
1147 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1148 				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1149 				&txd->tx_dmamap);
1150 		if (error) {
1151 			int j;
1152 
1153 			device_printf(sc->jme_dev,
1154 			    "could not create %dth Tx dmamap.\n", i);
1155 
1156 			for (j = 0; j < i; ++j) {
1157 				txd = &sc->jme_cdata.jme_txdesc[j];
1158 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1159 						   txd->tx_dmamap);
1160 			}
1161 			bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1162 			sc->jme_cdata.jme_tx_tag = NULL;
1163 			return error;
1164 		}
1165 	}
1166 
1167 	/*
1168 	 * Create DMA stuffs for RX buffers
1169 	 */
1170 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1171 		error = jme_rxbuf_dma_alloc(sc, i);
1172 		if (error)
1173 			return error;
1174 	}
1175 	return 0;
1176 }
1177 
1178 static void
1179 jme_dma_free(struct jme_softc *sc)
1180 {
1181 	struct jme_txdesc *txd;
1182 	struct jme_rxdesc *rxd;
1183 	struct jme_rxdata *rdata;
1184 	int i, r;
1185 
1186 	/* Tx ring */
1187 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1188 		bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1189 		    sc->jme_cdata.jme_tx_ring_map);
1190 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1191 		    sc->jme_cdata.jme_tx_ring,
1192 		    sc->jme_cdata.jme_tx_ring_map);
1193 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1194 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1195 	}
1196 
1197 	/* Rx ring */
1198 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1199 		rdata = &sc->jme_cdata.jme_rx_data[r];
1200 		if (rdata->jme_rx_ring_tag != NULL) {
1201 			bus_dmamap_unload(rdata->jme_rx_ring_tag,
1202 					  rdata->jme_rx_ring_map);
1203 			bus_dmamem_free(rdata->jme_rx_ring_tag,
1204 					rdata->jme_rx_ring,
1205 					rdata->jme_rx_ring_map);
1206 			bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1207 			rdata->jme_rx_ring_tag = NULL;
1208 		}
1209 	}
1210 
1211 	/* Tx buffers */
1212 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1213 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1214 			txd = &sc->jme_cdata.jme_txdesc[i];
1215 			bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1216 			    txd->tx_dmamap);
1217 		}
1218 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1219 		sc->jme_cdata.jme_tx_tag = NULL;
1220 	}
1221 
1222 	/* Rx buffers */
1223 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1224 		rdata = &sc->jme_cdata.jme_rx_data[r];
1225 		if (rdata->jme_rx_tag != NULL) {
1226 			for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1227 				rxd = &rdata->jme_rxdesc[i];
1228 				bus_dmamap_destroy(rdata->jme_rx_tag,
1229 						   rxd->rx_dmamap);
1230 			}
1231 			bus_dmamap_destroy(rdata->jme_rx_tag,
1232 					   rdata->jme_rx_sparemap);
1233 			bus_dma_tag_destroy(rdata->jme_rx_tag);
1234 			rdata->jme_rx_tag = NULL;
1235 		}
1236 	}
1237 
1238 	/* Shadow status block. */
1239 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1240 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1241 		    sc->jme_cdata.jme_ssb_map);
1242 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1243 		    sc->jme_cdata.jme_ssb_block,
1244 		    sc->jme_cdata.jme_ssb_map);
1245 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1246 		sc->jme_cdata.jme_ssb_tag = NULL;
1247 	}
1248 
1249 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1250 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1251 		sc->jme_cdata.jme_buffer_tag = NULL;
1252 	}
1253 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1254 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1255 		sc->jme_cdata.jme_ring_tag = NULL;
1256 	}
1257 
1258 	if (sc->jme_cdata.jme_txdesc != NULL) {
1259 		kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1260 		sc->jme_cdata.jme_txdesc = NULL;
1261 	}
1262 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1263 		rdata = &sc->jme_cdata.jme_rx_data[r];
1264 		if (rdata->jme_rxdesc != NULL) {
1265 			kfree(rdata->jme_rxdesc, M_DEVBUF);
1266 			rdata->jme_rxdesc = NULL;
1267 		}
1268 	}
1269 }
1270 
1271 /*
1272  *	Make sure the interface is stopped at reboot time.
1273  */
1274 static int
1275 jme_shutdown(device_t dev)
1276 {
1277 	return jme_suspend(dev);
1278 }
1279 
1280 #ifdef notyet
1281 /*
1282  * Unlike other ethernet controllers, JMC250 requires
1283  * explicit resetting link speed to 10/100Mbps as gigabit
1284  * link will cunsume more power than 375mA.
1285  * Note, we reset the link speed to 10/100Mbps with
1286  * auto-negotiation but we don't know whether that operation
1287  * would succeed or not as we have no control after powering
1288  * off. If the renegotiation fail WOL may not work. Running
1289  * at 1Gbps draws more power than 375mA at 3.3V which is
1290  * specified in PCI specification and that would result in
1291  * complete shutdowning power to ethernet controller.
1292  *
1293  * TODO
1294  *  Save current negotiated media speed/duplex/flow-control
1295  *  to softc and restore the same link again after resuming.
1296  *  PHY handling such as power down/resetting to 100Mbps
1297  *  may be better handled in suspend method in phy driver.
1298  */
1299 static void
1300 jme_setlinkspeed(struct jme_softc *sc)
1301 {
1302 	struct mii_data *mii;
1303 	int aneg, i;
1304 
1305 	JME_LOCK_ASSERT(sc);
1306 
1307 	mii = device_get_softc(sc->jme_miibus);
1308 	mii_pollstat(mii);
1309 	aneg = 0;
1310 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1311 		switch IFM_SUBTYPE(mii->mii_media_active) {
1312 		case IFM_10_T:
1313 		case IFM_100_TX:
1314 			return;
1315 		case IFM_1000_T:
1316 			aneg++;
1317 		default:
1318 			break;
1319 		}
1320 	}
1321 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1322 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1323 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1324 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1325 	    BMCR_AUTOEN | BMCR_STARTNEG);
1326 	DELAY(1000);
1327 	if (aneg != 0) {
1328 		/* Poll link state until jme(4) get a 10/100 link. */
1329 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1330 			mii_pollstat(mii);
1331 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1332 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1333 				case IFM_10_T:
1334 				case IFM_100_TX:
1335 					jme_mac_config(sc);
1336 					return;
1337 				default:
1338 					break;
1339 				}
1340 			}
1341 			JME_UNLOCK(sc);
1342 			pause("jmelnk", hz);
1343 			JME_LOCK(sc);
1344 		}
1345 		if (i == MII_ANEGTICKS_GIGE)
1346 			device_printf(sc->jme_dev, "establishing link failed, "
1347 			    "WOL may not work!");
1348 	}
1349 	/*
1350 	 * No link, force MAC to have 100Mbps, full-duplex link.
1351 	 * This is the last resort and may/may not work.
1352 	 */
1353 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1354 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1355 	jme_mac_config(sc);
1356 }
1357 
1358 static void
1359 jme_setwol(struct jme_softc *sc)
1360 {
1361 	struct ifnet *ifp = &sc->arpcom.ac_if;
1362 	uint32_t gpr, pmcs;
1363 	uint16_t pmstat;
1364 	int pmc;
1365 
1366 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1367 		/* No PME capability, PHY power down. */
1368 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1369 		    MII_BMCR, BMCR_PDOWN);
1370 		return;
1371 	}
1372 
1373 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1374 	pmcs = CSR_READ_4(sc, JME_PMCS);
1375 	pmcs &= ~PMCS_WOL_ENB_MASK;
1376 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1377 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1378 		/* Enable PME message. */
1379 		gpr |= GPREG0_PME_ENB;
1380 		/* For gigabit controllers, reset link speed to 10/100. */
1381 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1382 			jme_setlinkspeed(sc);
1383 	}
1384 
1385 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1386 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1387 
1388 	/* Request PME. */
1389 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1390 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1391 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1392 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1393 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1394 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1395 		/* No WOL, PHY power down. */
1396 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1397 		    MII_BMCR, BMCR_PDOWN);
1398 	}
1399 }
1400 #endif
1401 
1402 static int
1403 jme_suspend(device_t dev)
1404 {
1405 	struct jme_softc *sc = device_get_softc(dev);
1406 	struct ifnet *ifp = &sc->arpcom.ac_if;
1407 
1408 	lwkt_serialize_enter(ifp->if_serializer);
1409 	jme_stop(sc);
1410 #ifdef notyet
1411 	jme_setwol(sc);
1412 #endif
1413 	lwkt_serialize_exit(ifp->if_serializer);
1414 
1415 	return (0);
1416 }
1417 
1418 static int
1419 jme_resume(device_t dev)
1420 {
1421 	struct jme_softc *sc = device_get_softc(dev);
1422 	struct ifnet *ifp = &sc->arpcom.ac_if;
1423 #ifdef notyet
1424 	int pmc;
1425 #endif
1426 
1427 	lwkt_serialize_enter(ifp->if_serializer);
1428 
1429 #ifdef notyet
1430 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1431 		uint16_t pmstat;
1432 
1433 		pmstat = pci_read_config(sc->jme_dev,
1434 		    pmc + PCIR_POWER_STATUS, 2);
1435 		/* Disable PME clear PME status. */
1436 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1437 		pci_write_config(sc->jme_dev,
1438 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1439 	}
1440 #endif
1441 
1442 	if (ifp->if_flags & IFF_UP)
1443 		jme_init(sc);
1444 
1445 	lwkt_serialize_exit(ifp->if_serializer);
1446 
1447 	return (0);
1448 }
1449 
1450 static int
1451 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1452 {
1453 	struct jme_txdesc *txd;
1454 	struct jme_desc *desc;
1455 	struct mbuf *m;
1456 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1457 	int maxsegs, nsegs;
1458 	int error, i, prod, symbol_desc;
1459 	uint32_t cflags, flag64;
1460 
1461 	M_ASSERTPKTHDR((*m_head));
1462 
1463 	prod = sc->jme_cdata.jme_tx_prod;
1464 	txd = &sc->jme_cdata.jme_txdesc[prod];
1465 
1466 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1467 		symbol_desc = 1;
1468 	else
1469 		symbol_desc = 0;
1470 
1471 	maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1472 		  (JME_TXD_RSVD + symbol_desc);
1473 	if (maxsegs > JME_MAXTXSEGS)
1474 		maxsegs = JME_MAXTXSEGS;
1475 	KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1476 		("not enough segments %d\n", maxsegs));
1477 
1478 	error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1479 			txd->tx_dmamap, m_head,
1480 			txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1481 	if (error)
1482 		goto fail;
1483 
1484 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1485 			BUS_DMASYNC_PREWRITE);
1486 
1487 	m = *m_head;
1488 	cflags = 0;
1489 
1490 	/* Configure checksum offload. */
1491 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1492 		cflags |= JME_TD_IPCSUM;
1493 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1494 		cflags |= JME_TD_TCPCSUM;
1495 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1496 		cflags |= JME_TD_UDPCSUM;
1497 
1498 	/* Configure VLAN. */
1499 	if (m->m_flags & M_VLANTAG) {
1500 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1501 		cflags |= JME_TD_VLAN_TAG;
1502 	}
1503 
1504 	desc = &sc->jme_cdata.jme_tx_ring[prod];
1505 	desc->flags = htole32(cflags);
1506 	desc->addr_hi = htole32(m->m_pkthdr.len);
1507 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1508 		/*
1509 		 * Use 64bits TX desc chain format.
1510 		 *
1511 		 * The first TX desc of the chain, which is setup here,
1512 		 * is just a symbol TX desc carrying no payload.
1513 		 */
1514 		flag64 = JME_TD_64BIT;
1515 		desc->buflen = 0;
1516 		desc->addr_lo = 0;
1517 
1518 		/* No effective TX desc is consumed */
1519 		i = 0;
1520 	} else {
1521 		/*
1522 		 * Use 32bits TX desc chain format.
1523 		 *
1524 		 * The first TX desc of the chain, which is setup here,
1525 		 * is an effective TX desc carrying the first segment of
1526 		 * the mbuf chain.
1527 		 */
1528 		flag64 = 0;
1529 		desc->buflen = htole32(txsegs[0].ds_len);
1530 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1531 
1532 		/* One effective TX desc is consumed */
1533 		i = 1;
1534 	}
1535 	sc->jme_cdata.jme_tx_cnt++;
1536 	KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1537 		 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1538 	JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1539 
1540 	txd->tx_ndesc = 1 - i;
1541 	for (; i < nsegs; i++) {
1542 		desc = &sc->jme_cdata.jme_tx_ring[prod];
1543 		desc->flags = htole32(JME_TD_OWN | flag64);
1544 		desc->buflen = htole32(txsegs[i].ds_len);
1545 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1546 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1547 
1548 		sc->jme_cdata.jme_tx_cnt++;
1549 		KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1550 			 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1551 		JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1552 	}
1553 
1554 	/* Update producer index. */
1555 	sc->jme_cdata.jme_tx_prod = prod;
1556 	/*
1557 	 * Finally request interrupt and give the first descriptor
1558 	 * owenership to hardware.
1559 	 */
1560 	desc = txd->tx_desc;
1561 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1562 
1563 	txd->tx_m = m;
1564 	txd->tx_ndesc += nsegs;
1565 
1566 	return 0;
1567 fail:
1568 	m_freem(*m_head);
1569 	*m_head = NULL;
1570 	return error;
1571 }
1572 
1573 static void
1574 jme_start(struct ifnet *ifp)
1575 {
1576 	struct jme_softc *sc = ifp->if_softc;
1577 	struct mbuf *m_head;
1578 	int enq = 0;
1579 
1580 	ASSERT_SERIALIZED(ifp->if_serializer);
1581 
1582 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1583 		ifq_purge(&ifp->if_snd);
1584 		return;
1585 	}
1586 
1587 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1588 		return;
1589 
1590 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1591 		jme_txeof(sc);
1592 
1593 	while (!ifq_is_empty(&ifp->if_snd)) {
1594 		/*
1595 		 * Check number of available TX descs, always
1596 		 * leave JME_TXD_RSVD free TX descs.
1597 		 */
1598 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1599 		    sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1600 			ifp->if_flags |= IFF_OACTIVE;
1601 			break;
1602 		}
1603 
1604 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1605 		if (m_head == NULL)
1606 			break;
1607 
1608 		/*
1609 		 * Pack the data into the transmit ring. If we
1610 		 * don't have room, set the OACTIVE flag and wait
1611 		 * for the NIC to drain the ring.
1612 		 */
1613 		if (jme_encap(sc, &m_head)) {
1614 			KKASSERT(m_head == NULL);
1615 			ifp->if_oerrors++;
1616 			ifp->if_flags |= IFF_OACTIVE;
1617 			break;
1618 		}
1619 		enq++;
1620 
1621 		/*
1622 		 * If there's a BPF listener, bounce a copy of this frame
1623 		 * to him.
1624 		 */
1625 		ETHER_BPF_MTAP(ifp, m_head);
1626 	}
1627 
1628 	if (enq > 0) {
1629 		/*
1630 		 * Reading TXCSR takes very long time under heavy load
1631 		 * so cache TXCSR value and writes the ORed value with
1632 		 * the kick command to the TXCSR. This saves one register
1633 		 * access cycle.
1634 		 */
1635 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1636 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1637 		/* Set a timeout in case the chip goes out to lunch. */
1638 		ifp->if_timer = JME_TX_TIMEOUT;
1639 	}
1640 }
1641 
1642 static void
1643 jme_watchdog(struct ifnet *ifp)
1644 {
1645 	struct jme_softc *sc = ifp->if_softc;
1646 
1647 	ASSERT_SERIALIZED(ifp->if_serializer);
1648 
1649 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1650 		if_printf(ifp, "watchdog timeout (missed link)\n");
1651 		ifp->if_oerrors++;
1652 		jme_init(sc);
1653 		return;
1654 	}
1655 
1656 	jme_txeof(sc);
1657 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1658 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1659 			  "-- recovering\n");
1660 		if (!ifq_is_empty(&ifp->if_snd))
1661 			if_devstart(ifp);
1662 		return;
1663 	}
1664 
1665 	if_printf(ifp, "watchdog timeout\n");
1666 	ifp->if_oerrors++;
1667 	jme_init(sc);
1668 	if (!ifq_is_empty(&ifp->if_snd))
1669 		if_devstart(ifp);
1670 }
1671 
1672 static int
1673 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1674 {
1675 	struct jme_softc *sc = ifp->if_softc;
1676 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1677 	struct ifreq *ifr = (struct ifreq *)data;
1678 	int error = 0, mask;
1679 
1680 	ASSERT_SERIALIZED(ifp->if_serializer);
1681 
1682 	switch (cmd) {
1683 	case SIOCSIFMTU:
1684 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1685 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1686 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1687 			error = EINVAL;
1688 			break;
1689 		}
1690 
1691 		if (ifp->if_mtu != ifr->ifr_mtu) {
1692 			/*
1693 			 * No special configuration is required when interface
1694 			 * MTU is changed but availability of Tx checksum
1695 			 * offload should be chcked against new MTU size as
1696 			 * FIFO size is just 2K.
1697 			 */
1698 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1699 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1700 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1701 			}
1702 			ifp->if_mtu = ifr->ifr_mtu;
1703 			if (ifp->if_flags & IFF_RUNNING)
1704 				jme_init(sc);
1705 		}
1706 		break;
1707 
1708 	case SIOCSIFFLAGS:
1709 		if (ifp->if_flags & IFF_UP) {
1710 			if (ifp->if_flags & IFF_RUNNING) {
1711 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1712 				    (IFF_PROMISC | IFF_ALLMULTI))
1713 					jme_set_filter(sc);
1714 			} else {
1715 				jme_init(sc);
1716 			}
1717 		} else {
1718 			if (ifp->if_flags & IFF_RUNNING)
1719 				jme_stop(sc);
1720 		}
1721 		sc->jme_if_flags = ifp->if_flags;
1722 		break;
1723 
1724 	case SIOCADDMULTI:
1725 	case SIOCDELMULTI:
1726 		if (ifp->if_flags & IFF_RUNNING)
1727 			jme_set_filter(sc);
1728 		break;
1729 
1730 	case SIOCSIFMEDIA:
1731 	case SIOCGIFMEDIA:
1732 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1733 		break;
1734 
1735 	case SIOCSIFCAP:
1736 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1737 
1738 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1739 			ifp->if_capenable ^= IFCAP_TXCSUM;
1740 			if (IFCAP_TXCSUM & ifp->if_capenable)
1741 				ifp->if_hwassist |= JME_CSUM_FEATURES;
1742 			else
1743 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1744 		}
1745 		if (mask & IFCAP_RXCSUM) {
1746 			uint32_t reg;
1747 
1748 			ifp->if_capenable ^= IFCAP_RXCSUM;
1749 			reg = CSR_READ_4(sc, JME_RXMAC);
1750 			reg &= ~RXMAC_CSUM_ENB;
1751 			if (ifp->if_capenable & IFCAP_RXCSUM)
1752 				reg |= RXMAC_CSUM_ENB;
1753 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1754 		}
1755 
1756 		if (mask & IFCAP_VLAN_HWTAGGING) {
1757 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1758 			jme_set_vlan(sc);
1759 		}
1760 
1761 		if (mask & IFCAP_RSS) {
1762 			ifp->if_capenable ^= IFCAP_RSS;
1763 			if (ifp->if_flags & IFF_RUNNING)
1764 				jme_init(sc);
1765 		}
1766 		break;
1767 
1768 	default:
1769 		error = ether_ioctl(ifp, cmd, data);
1770 		break;
1771 	}
1772 	return (error);
1773 }
1774 
1775 static void
1776 jme_mac_config(struct jme_softc *sc)
1777 {
1778 	struct mii_data *mii;
1779 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1780 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1781 
1782 	mii = device_get_softc(sc->jme_miibus);
1783 
1784 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1785 	DELAY(10);
1786 	CSR_WRITE_4(sc, JME_GHC, 0);
1787 	ghc = 0;
1788 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1789 	rxmac &= ~RXMAC_FC_ENB;
1790 	txmac = CSR_READ_4(sc, JME_TXMAC);
1791 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1792 	txpause = CSR_READ_4(sc, JME_TXPFC);
1793 	txpause &= ~TXPFC_PAUSE_ENB;
1794 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1795 		ghc |= GHC_FULL_DUPLEX;
1796 		rxmac &= ~RXMAC_COLL_DET_ENB;
1797 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1798 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1799 		    TXMAC_FRAME_BURST);
1800 #ifdef notyet
1801 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1802 			txpause |= TXPFC_PAUSE_ENB;
1803 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1804 			rxmac |= RXMAC_FC_ENB;
1805 #endif
1806 		/* Disable retry transmit timer/retry limit. */
1807 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1808 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1809 	} else {
1810 		rxmac |= RXMAC_COLL_DET_ENB;
1811 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1812 		/* Enable retry transmit timer/retry limit. */
1813 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1814 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1815 	}
1816 
1817 	/*
1818 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1819 	 */
1820 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1821 	gp1 &= ~GPREG1_WA_HDX;
1822 
1823 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1824 		hdx = 1;
1825 
1826 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1827 	case IFM_10_T:
1828 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1829 		if (hdx)
1830 			gp1 |= GPREG1_WA_HDX;
1831 		break;
1832 
1833 	case IFM_100_TX:
1834 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1835 		if (hdx)
1836 			gp1 |= GPREG1_WA_HDX;
1837 
1838 		/*
1839 		 * Use extended FIFO depth to workaround CRC errors
1840 		 * emitted by chips before JMC250B
1841 		 */
1842 		phyconf = JMPHY_CONF_EXTFIFO;
1843 		break;
1844 
1845 	case IFM_1000_T:
1846 		if (sc->jme_caps & JME_CAP_FASTETH)
1847 			break;
1848 
1849 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1850 		if (hdx)
1851 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1852 		break;
1853 
1854 	default:
1855 		break;
1856 	}
1857 	CSR_WRITE_4(sc, JME_GHC, ghc);
1858 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1859 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1860 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1861 
1862 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
1863 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1864 				    JMPHY_CONF, phyconf);
1865 	}
1866 	if (sc->jme_workaround & JME_WA_HDX)
1867 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
1868 }
1869 
1870 static void
1871 jme_intr(void *xsc)
1872 {
1873 	struct jme_softc *sc = xsc;
1874 	struct ifnet *ifp = &sc->arpcom.ac_if;
1875 	uint32_t status;
1876 	int r;
1877 
1878 	ASSERT_SERIALIZED(ifp->if_serializer);
1879 
1880 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1881 	if (status == 0 || status == 0xFFFFFFFF)
1882 		return;
1883 
1884 	/* Disable interrupts. */
1885 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1886 
1887 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1888 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1889 		goto back;
1890 
1891 	/* Reset PCC counter/timer and Ack interrupts. */
1892 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1893 
1894 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1895 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1896 
1897 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
1898 		if (status & jme_rx_status[r].jme_coal) {
1899 			status |= jme_rx_status[r].jme_coal |
1900 				  jme_rx_status[r].jme_comp;
1901 		}
1902 	}
1903 
1904 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1905 
1906 	if (ifp->if_flags & IFF_RUNNING) {
1907 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1908 			jme_rx_intr(sc, status);
1909 
1910 		if (status & INTR_RXQ_DESC_EMPTY) {
1911 			/*
1912 			 * Notify hardware availability of new Rx buffers.
1913 			 * Reading RXCSR takes very long time under heavy
1914 			 * load so cache RXCSR value and writes the ORed
1915 			 * value with the kick command to the RXCSR. This
1916 			 * saves one register access cycle.
1917 			 */
1918 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1919 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1920 		}
1921 
1922 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1923 			jme_txeof(sc);
1924 			if (!ifq_is_empty(&ifp->if_snd))
1925 				if_devstart(ifp);
1926 		}
1927 	}
1928 back:
1929 	/* Reenable interrupts. */
1930 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1931 }
1932 
1933 static void
1934 jme_txeof(struct jme_softc *sc)
1935 {
1936 	struct ifnet *ifp = &sc->arpcom.ac_if;
1937 	struct jme_txdesc *txd;
1938 	uint32_t status;
1939 	int cons, nsegs;
1940 
1941 	cons = sc->jme_cdata.jme_tx_cons;
1942 	if (cons == sc->jme_cdata.jme_tx_prod)
1943 		return;
1944 
1945 	/*
1946 	 * Go through our Tx list and free mbufs for those
1947 	 * frames which have been transmitted.
1948 	 */
1949 	while (cons != sc->jme_cdata.jme_tx_prod) {
1950 		txd = &sc->jme_cdata.jme_txdesc[cons];
1951 		KASSERT(txd->tx_m != NULL,
1952 			("%s: freeing NULL mbuf!\n", __func__));
1953 
1954 		status = le32toh(txd->tx_desc->flags);
1955 		if ((status & JME_TD_OWN) == JME_TD_OWN)
1956 			break;
1957 
1958 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1959 			ifp->if_oerrors++;
1960 		} else {
1961 			ifp->if_opackets++;
1962 			if (status & JME_TD_COLLISION) {
1963 				ifp->if_collisions +=
1964 				    le32toh(txd->tx_desc->buflen) &
1965 				    JME_TD_BUF_LEN_MASK;
1966 			}
1967 		}
1968 
1969 		/*
1970 		 * Only the first descriptor of multi-descriptor
1971 		 * transmission is updated so driver have to skip entire
1972 		 * chained buffers for the transmiited frame. In other
1973 		 * words, JME_TD_OWN bit is valid only at the first
1974 		 * descriptor of a multi-descriptor transmission.
1975 		 */
1976 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1977 			sc->jme_cdata.jme_tx_ring[cons].flags = 0;
1978 			JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
1979 		}
1980 
1981 		/* Reclaim transferred mbufs. */
1982 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1983 		m_freem(txd->tx_m);
1984 		txd->tx_m = NULL;
1985 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1986 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
1987 			("%s: Active Tx desc counter was garbled\n", __func__));
1988 		txd->tx_ndesc = 0;
1989 	}
1990 	sc->jme_cdata.jme_tx_cons = cons;
1991 
1992 	if (sc->jme_cdata.jme_tx_cnt == 0)
1993 		ifp->if_timer = 0;
1994 
1995 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
1996 	    sc->jme_tx_desc_cnt - JME_TXD_RSVD)
1997 		ifp->if_flags &= ~IFF_OACTIVE;
1998 }
1999 
2000 static __inline void
2001 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
2002 {
2003 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2004 	int i;
2005 
2006 	for (i = 0; i < count; ++i) {
2007 		struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2008 
2009 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2010 		desc->buflen = htole32(MCLBYTES);
2011 		JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2012 	}
2013 }
2014 
2015 static __inline struct pktinfo *
2016 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2017 {
2018 	if (flags & JME_RD_IPV4)
2019 		pi->pi_netisr = NETISR_IP;
2020 	else if (flags & JME_RD_IPV6)
2021 		pi->pi_netisr = NETISR_IPV6;
2022 	else
2023 		return NULL;
2024 
2025 	pi->pi_flags = 0;
2026 	pi->pi_l3proto = IPPROTO_UNKNOWN;
2027 
2028 	if (flags & JME_RD_MORE_FRAG)
2029 		pi->pi_flags |= PKTINFO_FLAG_FRAG;
2030 	else if (flags & JME_RD_TCP)
2031 		pi->pi_l3proto = IPPROTO_TCP;
2032 	else if (flags & JME_RD_UDP)
2033 		pi->pi_l3proto = IPPROTO_UDP;
2034 	else
2035 		pi = NULL;
2036 	return pi;
2037 }
2038 
2039 /* Receive a frame. */
2040 static void
2041 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2042 {
2043 	struct ifnet *ifp = &sc->arpcom.ac_if;
2044 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2045 	struct jme_desc *desc;
2046 	struct jme_rxdesc *rxd;
2047 	struct mbuf *mp, *m;
2048 	uint32_t flags, status, hash, hashinfo;
2049 	int cons, count, nsegs;
2050 
2051 	cons = rdata->jme_rx_cons;
2052 	desc = &rdata->jme_rx_ring[cons];
2053 	flags = le32toh(desc->flags);
2054 	status = le32toh(desc->buflen);
2055 	hash = le32toh(desc->addr_hi);
2056 	hashinfo = le32toh(desc->addr_lo);
2057 	nsegs = JME_RX_NSEGS(status);
2058 
2059 	JME_RSS_DPRINTF(sc, 15, "ring%d, flags 0x%08x, "
2060 			"hash 0x%08x, hash info 0x%08x\n",
2061 			ring, flags, hash, hashinfo);
2062 
2063 	if (status & JME_RX_ERR_STAT) {
2064 		ifp->if_ierrors++;
2065 		jme_discard_rxbufs(sc, ring, cons, nsegs);
2066 #ifdef JME_SHOW_ERRORS
2067 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2068 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2069 #endif
2070 		rdata->jme_rx_cons += nsegs;
2071 		rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2072 		return;
2073 	}
2074 
2075 	rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2076 	for (count = 0; count < nsegs; count++,
2077 	     JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2078 		rxd = &rdata->jme_rxdesc[cons];
2079 		mp = rxd->rx_m;
2080 
2081 		/* Add a new receive buffer to the ring. */
2082 		if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2083 			ifp->if_iqdrops++;
2084 			/* Reuse buffer. */
2085 			jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2086 			if (rdata->jme_rxhead != NULL) {
2087 				m_freem(rdata->jme_rxhead);
2088 				JME_RXCHAIN_RESET(sc, ring);
2089 			}
2090 			break;
2091 		}
2092 
2093 		/*
2094 		 * Assume we've received a full sized frame.
2095 		 * Actual size is fixed when we encounter the end of
2096 		 * multi-segmented frame.
2097 		 */
2098 		mp->m_len = MCLBYTES;
2099 
2100 		/* Chain received mbufs. */
2101 		if (rdata->jme_rxhead == NULL) {
2102 			rdata->jme_rxhead = mp;
2103 			rdata->jme_rxtail = mp;
2104 		} else {
2105 			/*
2106 			 * Receive processor can receive a maximum frame
2107 			 * size of 65535 bytes.
2108 			 */
2109 			rdata->jme_rxtail->m_next = mp;
2110 			rdata->jme_rxtail = mp;
2111 		}
2112 
2113 		if (count == nsegs - 1) {
2114 			struct pktinfo pi0, *pi;
2115 
2116 			/* Last desc. for this frame. */
2117 			m = rdata->jme_rxhead;
2118 			m->m_pkthdr.len = rdata->jme_rxlen;
2119 			if (nsegs > 1) {
2120 				/* Set first mbuf size. */
2121 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2122 				/* Set last mbuf size. */
2123 				mp->m_len = rdata->jme_rxlen -
2124 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2125 				    (MCLBYTES * (nsegs - 2)));
2126 			} else {
2127 				m->m_len = rdata->jme_rxlen;
2128 			}
2129 			m->m_pkthdr.rcvif = ifp;
2130 
2131 			/*
2132 			 * Account for 10bytes auto padding which is used
2133 			 * to align IP header on 32bit boundary. Also note,
2134 			 * CRC bytes is automatically removed by the
2135 			 * hardware.
2136 			 */
2137 			m->m_data += JME_RX_PAD_BYTES;
2138 
2139 			/* Set checksum information. */
2140 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2141 			    (flags & JME_RD_IPV4)) {
2142 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2143 				if (flags & JME_RD_IPCSUM)
2144 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2145 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2146 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2147 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2148 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2149 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2150 					m->m_pkthdr.csum_flags |=
2151 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2152 					m->m_pkthdr.csum_data = 0xffff;
2153 				}
2154 			}
2155 
2156 			/* Check for VLAN tagged packets. */
2157 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2158 			    (flags & JME_RD_VLAN_TAG)) {
2159 				m->m_pkthdr.ether_vlantag =
2160 				    flags & JME_RD_VLAN_MASK;
2161 				m->m_flags |= M_VLANTAG;
2162 			}
2163 
2164 			ifp->if_ipackets++;
2165 
2166 			if (ifp->if_capenable & IFCAP_RSS)
2167 				pi = jme_pktinfo(&pi0, flags);
2168 			else
2169 				pi = NULL;
2170 
2171 			if (pi != NULL &&
2172 			    (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2173 				m->m_flags |= M_HASH;
2174 				m->m_pkthdr.hash = toeplitz_hash(hash);
2175 			}
2176 
2177 #ifdef JME_RSS_DEBUG
2178 			if (pi != NULL) {
2179 				JME_RSS_DPRINTF(sc, 10,
2180 				    "isr %d flags %08x, l3 %d %s\n",
2181 				    pi->pi_netisr, pi->pi_flags,
2182 				    pi->pi_l3proto,
2183 				    (m->m_flags & M_HASH) ? "hash" : "");
2184 			}
2185 #endif
2186 
2187 			/* Pass it on. */
2188 			ether_input_chain(ifp, m, pi, chain);
2189 
2190 			/* Reset mbuf chains. */
2191 			JME_RXCHAIN_RESET(sc, ring);
2192 #ifdef JME_RSS_DEBUG
2193 			sc->jme_rx_ring_pkt[ring]++;
2194 #endif
2195 		}
2196 	}
2197 
2198 	rdata->jme_rx_cons += nsegs;
2199 	rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2200 }
2201 
2202 static int
2203 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2204 		int count)
2205 {
2206 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2207 	struct jme_desc *desc;
2208 	int nsegs, prog, pktlen;
2209 
2210 	prog = 0;
2211 	for (;;) {
2212 #ifdef DEVICE_POLLING
2213 		if (count >= 0 && count-- == 0)
2214 			break;
2215 #endif
2216 		desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2217 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2218 			break;
2219 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2220 			break;
2221 
2222 		/*
2223 		 * Check number of segments against received bytes.
2224 		 * Non-matching value would indicate that hardware
2225 		 * is still trying to update Rx descriptors. I'm not
2226 		 * sure whether this check is needed.
2227 		 */
2228 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2229 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2230 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2231 			if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2232 				  "and packet size(%d) mismach\n",
2233 				  nsegs, pktlen);
2234 			break;
2235 		}
2236 
2237 		/* Received a frame. */
2238 		jme_rxpkt(sc, ring, chain);
2239 		prog++;
2240 	}
2241 	return prog;
2242 }
2243 
2244 static void
2245 jme_rxeof(struct jme_softc *sc, int ring)
2246 {
2247 	struct mbuf_chain chain[MAXCPU];
2248 
2249 	ether_input_chain_init(chain);
2250 	if (jme_rxeof_chain(sc, ring, chain, -1))
2251 		ether_input_dispatch(chain);
2252 }
2253 
2254 static void
2255 jme_tick(void *xsc)
2256 {
2257 	struct jme_softc *sc = xsc;
2258 	struct ifnet *ifp = &sc->arpcom.ac_if;
2259 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2260 
2261 	lwkt_serialize_enter(ifp->if_serializer);
2262 
2263 	mii_tick(mii);
2264 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2265 
2266 	lwkt_serialize_exit(ifp->if_serializer);
2267 }
2268 
2269 static void
2270 jme_reset(struct jme_softc *sc)
2271 {
2272 #ifdef foo
2273 	/* Stop receiver, transmitter. */
2274 	jme_stop_rx(sc);
2275 	jme_stop_tx(sc);
2276 #endif
2277 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2278 	DELAY(10);
2279 	CSR_WRITE_4(sc, JME_GHC, 0);
2280 }
2281 
2282 static void
2283 jme_init(void *xsc)
2284 {
2285 	struct jme_softc *sc = xsc;
2286 	struct ifnet *ifp = &sc->arpcom.ac_if;
2287 	struct mii_data *mii;
2288 	uint8_t eaddr[ETHER_ADDR_LEN];
2289 	bus_addr_t paddr;
2290 	uint32_t reg;
2291 	int error, r;
2292 
2293 	ASSERT_SERIALIZED(ifp->if_serializer);
2294 
2295 	/*
2296 	 * Cancel any pending I/O.
2297 	 */
2298 	jme_stop(sc);
2299 
2300 	/*
2301 	 * Reset the chip to a known state.
2302 	 */
2303 	jme_reset(sc);
2304 
2305 	sc->jme_txd_spare =
2306 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2307 	KKASSERT(sc->jme_txd_spare >= 1);
2308 
2309 	/*
2310 	 * If we use 64bit address mode for transmitting, each Tx request
2311 	 * needs one more symbol descriptor.
2312 	 */
2313 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2314 		sc->jme_txd_spare += 1;
2315 
2316 	if (ifp->if_capenable & IFCAP_RSS)
2317 		jme_enable_rss(sc);
2318 	else
2319 		jme_disable_rss(sc);
2320 
2321 	/* Init RX descriptors */
2322 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2323 		error = jme_init_rx_ring(sc, r);
2324 		if (error) {
2325 			if_printf(ifp, "initialization failed: "
2326 				  "no memory for %dth RX ring.\n", r);
2327 			jme_stop(sc);
2328 			return;
2329 		}
2330 	}
2331 
2332 	/* Init TX descriptors */
2333 	jme_init_tx_ring(sc);
2334 
2335 	/* Initialize shadow status block. */
2336 	jme_init_ssb(sc);
2337 
2338 	/* Reprogram the station address. */
2339 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2340 	CSR_WRITE_4(sc, JME_PAR0,
2341 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2342 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2343 
2344 	/*
2345 	 * Configure Tx queue.
2346 	 *  Tx priority queue weight value : 0
2347 	 *  Tx FIFO threshold for processing next packet : 16QW
2348 	 *  Maximum Tx DMA length : 512
2349 	 *  Allow Tx DMA burst.
2350 	 */
2351 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2352 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2353 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2354 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2355 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2356 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2357 
2358 	/* Set Tx descriptor counter. */
2359 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2360 
2361 	/* Set Tx ring address to the hardware. */
2362 	paddr = sc->jme_cdata.jme_tx_ring_paddr;
2363 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2364 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2365 
2366 	/* Configure TxMAC parameters. */
2367 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2368 	reg |= TXMAC_THRESH_1_PKT;
2369 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2370 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2371 
2372 	/*
2373 	 * Configure Rx queue.
2374 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2375 	 *  FIFO threshold for processing next packet : 128QW
2376 	 *  Rx queue 0 select
2377 	 *  Max Rx DMA length : 128
2378 	 *  Rx descriptor retry : 32
2379 	 *  Rx descriptor retry time gap : 256ns
2380 	 *  Don't receive runt/bad frame.
2381 	 */
2382 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2383 #if 0
2384 	/*
2385 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2386 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2387 	 * decrease FIFO threshold to reduce the FIFO overruns for
2388 	 * frames larger than 4000 bytes.
2389 	 * For best performance of standard MTU sized frames use
2390 	 * maximum allowable FIFO threshold, 128QW.
2391 	 */
2392 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2393 	    JME_RX_FIFO_SIZE)
2394 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2395 	else
2396 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2397 #else
2398 	/* Improve PCI Express compatibility */
2399 	sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2400 #endif
2401 	sc->jme_rxcsr |= sc->jme_rx_dma_size;
2402 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2403 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2404 	/* XXX TODO DROP_BAD */
2405 
2406 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2407 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2408 
2409 		/* Set Rx descriptor counter. */
2410 		CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2411 
2412 		/* Set Rx ring address to the hardware. */
2413 		paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2414 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2415 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2416 	}
2417 
2418 	/* Clear receive filter. */
2419 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2420 
2421 	/* Set up the receive filter. */
2422 	jme_set_filter(sc);
2423 	jme_set_vlan(sc);
2424 
2425 	/*
2426 	 * Disable all WOL bits as WOL can interfere normal Rx
2427 	 * operation. Also clear WOL detection status bits.
2428 	 */
2429 	reg = CSR_READ_4(sc, JME_PMCS);
2430 	reg &= ~PMCS_WOL_ENB_MASK;
2431 	CSR_WRITE_4(sc, JME_PMCS, reg);
2432 
2433 	/*
2434 	 * Pad 10bytes right before received frame. This will greatly
2435 	 * help Rx performance on strict-alignment architectures as
2436 	 * it does not need to copy the frame to align the payload.
2437 	 */
2438 	reg = CSR_READ_4(sc, JME_RXMAC);
2439 	reg |= RXMAC_PAD_10BYTES;
2440 
2441 	if (ifp->if_capenable & IFCAP_RXCSUM)
2442 		reg |= RXMAC_CSUM_ENB;
2443 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2444 
2445 	/* Configure general purpose reg0 */
2446 	reg = CSR_READ_4(sc, JME_GPREG0);
2447 	reg &= ~GPREG0_PCC_UNIT_MASK;
2448 	/* Set PCC timer resolution to micro-seconds unit. */
2449 	reg |= GPREG0_PCC_UNIT_US;
2450 	/*
2451 	 * Disable all shadow register posting as we have to read
2452 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2453 	 * that it's hard to synchronize interrupt status between
2454 	 * hardware and software with shadow posting due to
2455 	 * requirements of bus_dmamap_sync(9).
2456 	 */
2457 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2458 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2459 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2460 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2461 	/* Disable posting of DW0. */
2462 	reg &= ~GPREG0_POST_DW0_ENB;
2463 	/* Clear PME message. */
2464 	reg &= ~GPREG0_PME_ENB;
2465 	/* Set PHY address. */
2466 	reg &= ~GPREG0_PHY_ADDR_MASK;
2467 	reg |= sc->jme_phyaddr;
2468 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2469 
2470 	/* Configure Tx queue 0 packet completion coalescing. */
2471 	jme_set_tx_coal(sc);
2472 
2473 	/* Configure Rx queue 0 packet completion coalescing. */
2474 	jme_set_rx_coal(sc);
2475 
2476 	/* Configure shadow status block but don't enable posting. */
2477 	paddr = sc->jme_cdata.jme_ssb_block_paddr;
2478 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2479 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2480 
2481 	/* Disable Timer 1 and Timer 2. */
2482 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2483 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2484 
2485 	/* Configure retry transmit period, retry limit value. */
2486 	CSR_WRITE_4(sc, JME_TXTRHD,
2487 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2488 	    TXTRHD_RT_PERIOD_MASK) |
2489 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2490 	    TXTRHD_RT_LIMIT_SHIFT));
2491 
2492 #ifdef DEVICE_POLLING
2493 	if (!(ifp->if_flags & IFF_POLLING))
2494 #endif
2495 	/* Initialize the interrupt mask. */
2496 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2497 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2498 
2499 	/*
2500 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2501 	 * done after detection of valid link in jme_miibus_statchg.
2502 	 */
2503 	sc->jme_flags &= ~JME_FLAG_LINK;
2504 
2505 	/* Set the current media. */
2506 	mii = device_get_softc(sc->jme_miibus);
2507 	mii_mediachg(mii);
2508 
2509 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2510 
2511 	ifp->if_flags |= IFF_RUNNING;
2512 	ifp->if_flags &= ~IFF_OACTIVE;
2513 }
2514 
2515 static void
2516 jme_stop(struct jme_softc *sc)
2517 {
2518 	struct ifnet *ifp = &sc->arpcom.ac_if;
2519 	struct jme_txdesc *txd;
2520 	struct jme_rxdesc *rxd;
2521 	struct jme_rxdata *rdata;
2522 	int i, r;
2523 
2524 	ASSERT_SERIALIZED(ifp->if_serializer);
2525 
2526 	/*
2527 	 * Mark the interface down and cancel the watchdog timer.
2528 	 */
2529 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2530 	ifp->if_timer = 0;
2531 
2532 	callout_stop(&sc->jme_tick_ch);
2533 	sc->jme_flags &= ~JME_FLAG_LINK;
2534 
2535 	/*
2536 	 * Disable interrupts.
2537 	 */
2538 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2539 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2540 
2541 	/* Disable updating shadow status block. */
2542 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2543 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2544 
2545 	/* Stop receiver, transmitter. */
2546 	jme_stop_rx(sc);
2547 	jme_stop_tx(sc);
2548 
2549 	/*
2550 	 * Free partial finished RX segments
2551 	 */
2552 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2553 		rdata = &sc->jme_cdata.jme_rx_data[r];
2554 		if (rdata->jme_rxhead != NULL)
2555 			m_freem(rdata->jme_rxhead);
2556 		JME_RXCHAIN_RESET(sc, r);
2557 	}
2558 
2559 	/*
2560 	 * Free RX and TX mbufs still in the queues.
2561 	 */
2562 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2563 		rdata = &sc->jme_cdata.jme_rx_data[r];
2564 		for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2565 			rxd = &rdata->jme_rxdesc[i];
2566 			if (rxd->rx_m != NULL) {
2567 				bus_dmamap_unload(rdata->jme_rx_tag,
2568 						  rxd->rx_dmamap);
2569 				m_freem(rxd->rx_m);
2570 				rxd->rx_m = NULL;
2571 			}
2572 		}
2573 	}
2574 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2575 		txd = &sc->jme_cdata.jme_txdesc[i];
2576 		if (txd->tx_m != NULL) {
2577 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2578 			    txd->tx_dmamap);
2579 			m_freem(txd->tx_m);
2580 			txd->tx_m = NULL;
2581 			txd->tx_ndesc = 0;
2582 		}
2583         }
2584 }
2585 
2586 static void
2587 jme_stop_tx(struct jme_softc *sc)
2588 {
2589 	uint32_t reg;
2590 	int i;
2591 
2592 	reg = CSR_READ_4(sc, JME_TXCSR);
2593 	if ((reg & TXCSR_TX_ENB) == 0)
2594 		return;
2595 	reg &= ~TXCSR_TX_ENB;
2596 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2597 	for (i = JME_TIMEOUT; i > 0; i--) {
2598 		DELAY(1);
2599 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2600 			break;
2601 	}
2602 	if (i == 0)
2603 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2604 }
2605 
2606 static void
2607 jme_stop_rx(struct jme_softc *sc)
2608 {
2609 	uint32_t reg;
2610 	int i;
2611 
2612 	reg = CSR_READ_4(sc, JME_RXCSR);
2613 	if ((reg & RXCSR_RX_ENB) == 0)
2614 		return;
2615 	reg &= ~RXCSR_RX_ENB;
2616 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2617 	for (i = JME_TIMEOUT; i > 0; i--) {
2618 		DELAY(1);
2619 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2620 			break;
2621 	}
2622 	if (i == 0)
2623 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2624 }
2625 
2626 static void
2627 jme_init_tx_ring(struct jme_softc *sc)
2628 {
2629 	struct jme_chain_data *cd;
2630 	struct jme_txdesc *txd;
2631 	int i;
2632 
2633 	sc->jme_cdata.jme_tx_prod = 0;
2634 	sc->jme_cdata.jme_tx_cons = 0;
2635 	sc->jme_cdata.jme_tx_cnt = 0;
2636 
2637 	cd = &sc->jme_cdata;
2638 	bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2639 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2640 		txd = &sc->jme_cdata.jme_txdesc[i];
2641 		txd->tx_m = NULL;
2642 		txd->tx_desc = &cd->jme_tx_ring[i];
2643 		txd->tx_ndesc = 0;
2644 	}
2645 }
2646 
2647 static void
2648 jme_init_ssb(struct jme_softc *sc)
2649 {
2650 	struct jme_chain_data *cd;
2651 
2652 	cd = &sc->jme_cdata;
2653 	bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2654 }
2655 
2656 static int
2657 jme_init_rx_ring(struct jme_softc *sc, int ring)
2658 {
2659 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2660 	struct jme_rxdesc *rxd;
2661 	int i;
2662 
2663 	KKASSERT(rdata->jme_rxhead == NULL &&
2664 		 rdata->jme_rxtail == NULL &&
2665 		 rdata->jme_rxlen == 0);
2666 	rdata->jme_rx_cons = 0;
2667 
2668 	bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2669 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2670 		int error;
2671 
2672 		rxd = &rdata->jme_rxdesc[i];
2673 		rxd->rx_m = NULL;
2674 		rxd->rx_desc = &rdata->jme_rx_ring[i];
2675 		error = jme_newbuf(sc, ring, rxd, 1);
2676 		if (error)
2677 			return error;
2678 	}
2679 	return 0;
2680 }
2681 
2682 static int
2683 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2684 {
2685 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2686 	struct jme_desc *desc;
2687 	struct mbuf *m;
2688 	bus_dma_segment_t segs;
2689 	bus_dmamap_t map;
2690 	int error, nsegs;
2691 
2692 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2693 	if (m == NULL)
2694 		return ENOBUFS;
2695 	/*
2696 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2697 	 * takes advantage of 10 bytes padding feature of hardware
2698 	 * in order not to copy entire frame to align IP header on
2699 	 * 32bit boundary.
2700 	 */
2701 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2702 
2703 	error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2704 			rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2705 			BUS_DMA_NOWAIT);
2706 	if (error) {
2707 		m_freem(m);
2708 		if (init)
2709 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2710 		return error;
2711 	}
2712 
2713 	if (rxd->rx_m != NULL) {
2714 		bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2715 				BUS_DMASYNC_POSTREAD);
2716 		bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2717 	}
2718 	map = rxd->rx_dmamap;
2719 	rxd->rx_dmamap = rdata->jme_rx_sparemap;
2720 	rdata->jme_rx_sparemap = map;
2721 	rxd->rx_m = m;
2722 
2723 	desc = rxd->rx_desc;
2724 	desc->buflen = htole32(segs.ds_len);
2725 	desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2726 	desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2727 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2728 
2729 	return 0;
2730 }
2731 
2732 static void
2733 jme_set_vlan(struct jme_softc *sc)
2734 {
2735 	struct ifnet *ifp = &sc->arpcom.ac_if;
2736 	uint32_t reg;
2737 
2738 	ASSERT_SERIALIZED(ifp->if_serializer);
2739 
2740 	reg = CSR_READ_4(sc, JME_RXMAC);
2741 	reg &= ~RXMAC_VLAN_ENB;
2742 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2743 		reg |= RXMAC_VLAN_ENB;
2744 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2745 }
2746 
2747 static void
2748 jme_set_filter(struct jme_softc *sc)
2749 {
2750 	struct ifnet *ifp = &sc->arpcom.ac_if;
2751 	struct ifmultiaddr *ifma;
2752 	uint32_t crc;
2753 	uint32_t mchash[2];
2754 	uint32_t rxcfg;
2755 
2756 	ASSERT_SERIALIZED(ifp->if_serializer);
2757 
2758 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2759 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2760 	    RXMAC_ALLMULTI);
2761 
2762 	/*
2763 	 * Always accept frames destined to our station address.
2764 	 * Always accept broadcast frames.
2765 	 */
2766 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2767 
2768 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2769 		if (ifp->if_flags & IFF_PROMISC)
2770 			rxcfg |= RXMAC_PROMISC;
2771 		if (ifp->if_flags & IFF_ALLMULTI)
2772 			rxcfg |= RXMAC_ALLMULTI;
2773 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2774 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2775 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2776 		return;
2777 	}
2778 
2779 	/*
2780 	 * Set up the multicast address filter by passing all multicast
2781 	 * addresses through a CRC generator, and then using the low-order
2782 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2783 	 * high order bits select the register, while the rest of the bits
2784 	 * select the bit within the register.
2785 	 */
2786 	rxcfg |= RXMAC_MULTICAST;
2787 	bzero(mchash, sizeof(mchash));
2788 
2789 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2790 		if (ifma->ifma_addr->sa_family != AF_LINK)
2791 			continue;
2792 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2793 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2794 
2795 		/* Just want the 6 least significant bits. */
2796 		crc &= 0x3f;
2797 
2798 		/* Set the corresponding bit in the hash table. */
2799 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2800 	}
2801 
2802 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2803 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2804 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2805 }
2806 
2807 static int
2808 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2809 {
2810 	struct jme_softc *sc = arg1;
2811 	struct ifnet *ifp = &sc->arpcom.ac_if;
2812 	int error, v;
2813 
2814 	lwkt_serialize_enter(ifp->if_serializer);
2815 
2816 	v = sc->jme_tx_coal_to;
2817 	error = sysctl_handle_int(oidp, &v, 0, req);
2818 	if (error || req->newptr == NULL)
2819 		goto back;
2820 
2821 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2822 		error = EINVAL;
2823 		goto back;
2824 	}
2825 
2826 	if (v != sc->jme_tx_coal_to) {
2827 		sc->jme_tx_coal_to = v;
2828 		if (ifp->if_flags & IFF_RUNNING)
2829 			jme_set_tx_coal(sc);
2830 	}
2831 back:
2832 	lwkt_serialize_exit(ifp->if_serializer);
2833 	return error;
2834 }
2835 
2836 static int
2837 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2838 {
2839 	struct jme_softc *sc = arg1;
2840 	struct ifnet *ifp = &sc->arpcom.ac_if;
2841 	int error, v;
2842 
2843 	lwkt_serialize_enter(ifp->if_serializer);
2844 
2845 	v = sc->jme_tx_coal_pkt;
2846 	error = sysctl_handle_int(oidp, &v, 0, req);
2847 	if (error || req->newptr == NULL)
2848 		goto back;
2849 
2850 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2851 		error = EINVAL;
2852 		goto back;
2853 	}
2854 
2855 	if (v != sc->jme_tx_coal_pkt) {
2856 		sc->jme_tx_coal_pkt = v;
2857 		if (ifp->if_flags & IFF_RUNNING)
2858 			jme_set_tx_coal(sc);
2859 	}
2860 back:
2861 	lwkt_serialize_exit(ifp->if_serializer);
2862 	return error;
2863 }
2864 
2865 static int
2866 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2867 {
2868 	struct jme_softc *sc = arg1;
2869 	struct ifnet *ifp = &sc->arpcom.ac_if;
2870 	int error, v;
2871 
2872 	lwkt_serialize_enter(ifp->if_serializer);
2873 
2874 	v = sc->jme_rx_coal_to;
2875 	error = sysctl_handle_int(oidp, &v, 0, req);
2876 	if (error || req->newptr == NULL)
2877 		goto back;
2878 
2879 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2880 		error = EINVAL;
2881 		goto back;
2882 	}
2883 
2884 	if (v != sc->jme_rx_coal_to) {
2885 		sc->jme_rx_coal_to = v;
2886 		if (ifp->if_flags & IFF_RUNNING)
2887 			jme_set_rx_coal(sc);
2888 	}
2889 back:
2890 	lwkt_serialize_exit(ifp->if_serializer);
2891 	return error;
2892 }
2893 
2894 static int
2895 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2896 {
2897 	struct jme_softc *sc = arg1;
2898 	struct ifnet *ifp = &sc->arpcom.ac_if;
2899 	int error, v;
2900 
2901 	lwkt_serialize_enter(ifp->if_serializer);
2902 
2903 	v = sc->jme_rx_coal_pkt;
2904 	error = sysctl_handle_int(oidp, &v, 0, req);
2905 	if (error || req->newptr == NULL)
2906 		goto back;
2907 
2908 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
2909 		error = EINVAL;
2910 		goto back;
2911 	}
2912 
2913 	if (v != sc->jme_rx_coal_pkt) {
2914 		sc->jme_rx_coal_pkt = v;
2915 		if (ifp->if_flags & IFF_RUNNING)
2916 			jme_set_rx_coal(sc);
2917 	}
2918 back:
2919 	lwkt_serialize_exit(ifp->if_serializer);
2920 	return error;
2921 }
2922 
2923 static void
2924 jme_set_tx_coal(struct jme_softc *sc)
2925 {
2926 	uint32_t reg;
2927 
2928 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2929 	    PCCTX_COAL_TO_MASK;
2930 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2931 	    PCCTX_COAL_PKT_MASK;
2932 	reg |= PCCTX_COAL_TXQ0;
2933 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2934 }
2935 
2936 static void
2937 jme_set_rx_coal(struct jme_softc *sc)
2938 {
2939 	uint32_t reg;
2940 	int r;
2941 
2942 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2943 	    PCCRX_COAL_TO_MASK;
2944 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2945 	    PCCRX_COAL_PKT_MASK;
2946 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
2947 		if (r < sc->jme_rx_ring_inuse)
2948 			CSR_WRITE_4(sc, JME_PCCRX(r), reg);
2949 		else
2950 			CSR_WRITE_4(sc, JME_PCCRX(r), 0);
2951 	}
2952 }
2953 
2954 #ifdef DEVICE_POLLING
2955 
2956 static void
2957 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2958 {
2959 	struct jme_softc *sc = ifp->if_softc;
2960 	struct mbuf_chain chain[MAXCPU];
2961 	uint32_t status;
2962 	int r, prog = 0;
2963 
2964 	ASSERT_SERIALIZED(ifp->if_serializer);
2965 
2966 	switch (cmd) {
2967 	case POLL_REGISTER:
2968 		CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2969 		break;
2970 
2971 	case POLL_DEREGISTER:
2972 		CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2973 		break;
2974 
2975 	case POLL_AND_CHECK_STATUS:
2976 	case POLL_ONLY:
2977 		status = CSR_READ_4(sc, JME_INTR_STATUS);
2978 
2979 		ether_input_chain_init(chain);
2980 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r)
2981 			prog += jme_rxeof_chain(sc, r, chain, count);
2982 		if (prog)
2983 			ether_input_dispatch(chain);
2984 
2985 		if (status & INTR_RXQ_DESC_EMPTY) {
2986 			CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2987 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2988 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2989 		}
2990 
2991 		jme_txeof(sc);
2992 		if (!ifq_is_empty(&ifp->if_snd))
2993 			if_devstart(ifp);
2994 		break;
2995 	}
2996 }
2997 
2998 #endif	/* DEVICE_POLLING */
2999 
3000 static int
3001 jme_rxring_dma_alloc(struct jme_softc *sc, int ring)
3002 {
3003 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3004 	bus_dmamem_t dmem;
3005 	int error;
3006 
3007 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
3008 			JME_RX_RING_ALIGN, 0,
3009 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3010 			JME_RX_RING_SIZE(sc),
3011 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3012 	if (error) {
3013 		device_printf(sc->jme_dev,
3014 		    "could not allocate %dth Rx ring.\n", ring);
3015 		return error;
3016 	}
3017 	rdata->jme_rx_ring_tag = dmem.dmem_tag;
3018 	rdata->jme_rx_ring_map = dmem.dmem_map;
3019 	rdata->jme_rx_ring = dmem.dmem_addr;
3020 	rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3021 
3022 	return 0;
3023 }
3024 
3025 static int
3026 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
3027 {
3028 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3029 	int i, error;
3030 
3031 	/* Create tag for Rx buffers. */
3032 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
3033 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
3034 	    BUS_SPACE_MAXADDR,		/* lowaddr */
3035 	    BUS_SPACE_MAXADDR,		/* highaddr */
3036 	    NULL, NULL,			/* filter, filterarg */
3037 	    MCLBYTES,			/* maxsize */
3038 	    1,				/* nsegments */
3039 	    MCLBYTES,			/* maxsegsize */
3040 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3041 	    &rdata->jme_rx_tag);
3042 	if (error) {
3043 		device_printf(sc->jme_dev,
3044 		    "could not create %dth Rx DMA tag.\n", ring);
3045 		return error;
3046 	}
3047 
3048 	/* Create DMA maps for Rx buffers. */
3049 	error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3050 				  &rdata->jme_rx_sparemap);
3051 	if (error) {
3052 		device_printf(sc->jme_dev,
3053 		    "could not create %dth spare Rx dmamap.\n", ring);
3054 		bus_dma_tag_destroy(rdata->jme_rx_tag);
3055 		rdata->jme_rx_tag = NULL;
3056 		return error;
3057 	}
3058 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3059 		struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3060 
3061 		error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3062 					  &rxd->rx_dmamap);
3063 		if (error) {
3064 			int j;
3065 
3066 			device_printf(sc->jme_dev,
3067 			    "could not create %dth Rx dmamap "
3068 			    "for %dth RX ring.\n", i, ring);
3069 
3070 			for (j = 0; j < i; ++j) {
3071 				rxd = &rdata->jme_rxdesc[j];
3072 				bus_dmamap_destroy(rdata->jme_rx_tag,
3073 						   rxd->rx_dmamap);
3074 			}
3075 			bus_dmamap_destroy(rdata->jme_rx_tag,
3076 					   rdata->jme_rx_sparemap);
3077 			bus_dma_tag_destroy(rdata->jme_rx_tag);
3078 			rdata->jme_rx_tag = NULL;
3079 			return error;
3080 		}
3081 	}
3082 	return 0;
3083 }
3084 
3085 static void
3086 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3087 {
3088 	struct mbuf_chain chain[MAXCPU];
3089 	int r, prog = 0;
3090 
3091 	ether_input_chain_init(chain);
3092 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3093 		if (status & jme_rx_status[r].jme_coal)
3094 			prog += jme_rxeof_chain(sc, r, chain, -1);
3095 	}
3096 	if (prog)
3097 		ether_input_dispatch(chain);
3098 }
3099 
3100 static void
3101 jme_enable_rss(struct jme_softc *sc)
3102 {
3103 	uint32_t rssc, ind;
3104 	uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3105 	int i;
3106 
3107 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3108 
3109 	KASSERT(sc->jme_rx_ring_inuse == JME_NRXRING_2 ||
3110 		sc->jme_rx_ring_inuse == JME_NRXRING_4,
3111 		("%s: invalid # of RX rings (%d)\n",
3112 		 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse));
3113 
3114 	rssc = RSSC_HASH_64_ENTRY;
3115 	rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3116 	rssc |= sc->jme_rx_ring_inuse >> 1;
3117 	JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3118 	CSR_WRITE_4(sc, JME_RSSC, rssc);
3119 
3120 	toeplitz_get_key(key, sizeof(key));
3121 	for (i = 0; i < RSSKEY_NREGS; ++i) {
3122 		uint32_t keyreg;
3123 
3124 		keyreg = RSSKEY_REGVAL(key, i);
3125 		JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3126 
3127 		CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3128 	}
3129 
3130 	/*
3131 	 * Create redirect table in following fashion:
3132 	 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3133 	 */
3134 	ind = 0;
3135 	for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3136 		int q;
3137 
3138 		q = i % sc->jme_rx_ring_inuse;
3139 		ind |= q << (i * 8);
3140 	}
3141 	JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3142 
3143 	for (i = 0; i < RSSTBL_NREGS; ++i)
3144 		CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3145 }
3146 
3147 static void
3148 jme_disable_rss(struct jme_softc *sc)
3149 {
3150 	sc->jme_rx_ring_inuse = JME_NRXRING_1;
3151 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3152 }
3153