xref: /dragonfly/sys/dev/netif/jme/if_jme.c (revision bcb3e04d)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $
29  */
30 
31 #include "opt_polling.h"
32 #include "opt_rss.h"
33 #include "opt_jme.h"
34 
35 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/interrupt.h>
40 #include <sys/malloc.h>
41 #include <sys/proc.h>
42 #include <sys/rman.h>
43 #include <sys/serialize.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 
48 #include <net/ethernet.h>
49 #include <net/if.h>
50 #include <net/bpf.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/ifq_var.h>
55 #include <net/toeplitz.h>
56 #include <net/toeplitz2.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
59 
60 #include <netinet/in.h>
61 
62 #include <dev/netif/mii_layer/miivar.h>
63 #include <dev/netif/mii_layer/jmphyreg.h>
64 
65 #include <bus/pci/pcireg.h>
66 #include <bus/pci/pcivar.h>
67 #include <bus/pci/pcidevs.h>
68 
69 #include <dev/netif/jme/if_jmereg.h>
70 #include <dev/netif/jme/if_jmevar.h>
71 
72 #include "miibus_if.h"
73 
74 /* Define the following to disable printing Rx errors. */
75 #undef	JME_SHOW_ERRORS
76 
77 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
78 
79 #ifdef JME_RSS_DEBUG
80 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
81 do { \
82 	if ((sc)->jme_rss_debug >= (lvl)) \
83 		if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
84 } while (0)
85 #else	/* !JME_RSS_DEBUG */
86 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
87 #endif	/* JME_RSS_DEBUG */
88 
89 static int	jme_probe(device_t);
90 static int	jme_attach(device_t);
91 static int	jme_detach(device_t);
92 static int	jme_shutdown(device_t);
93 static int	jme_suspend(device_t);
94 static int	jme_resume(device_t);
95 
96 static int	jme_miibus_readreg(device_t, int, int);
97 static int	jme_miibus_writereg(device_t, int, int, int);
98 static void	jme_miibus_statchg(device_t);
99 
100 static void	jme_init(void *);
101 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
102 static void	jme_start(struct ifnet *);
103 static void	jme_watchdog(struct ifnet *);
104 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
105 static int	jme_mediachange(struct ifnet *);
106 #ifdef DEVICE_POLLING
107 static void	jme_poll(struct ifnet *, enum poll_cmd, int);
108 #endif
109 
110 static void	jme_intr(void *);
111 static void	jme_txeof(struct jme_softc *);
112 static void	jme_rxeof(struct jme_softc *, int);
113 static int	jme_rxeof_chain(struct jme_softc *, int,
114 				struct mbuf_chain *, int);
115 static void	jme_rx_intr(struct jme_softc *, uint32_t);
116 
117 static int	jme_dma_alloc(struct jme_softc *);
118 static void	jme_dma_free(struct jme_softc *);
119 static int	jme_init_rx_ring(struct jme_softc *, int);
120 static void	jme_init_tx_ring(struct jme_softc *);
121 static void	jme_init_ssb(struct jme_softc *);
122 static int	jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
123 static int	jme_encap(struct jme_softc *, struct mbuf **);
124 static void	jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
125 static int	jme_rxring_dma_alloc(struct jme_softc *, int);
126 static int	jme_rxbuf_dma_alloc(struct jme_softc *, int);
127 
128 static void	jme_tick(void *);
129 static void	jme_stop(struct jme_softc *);
130 static void	jme_reset(struct jme_softc *);
131 static void	jme_set_vlan(struct jme_softc *);
132 static void	jme_set_filter(struct jme_softc *);
133 static void	jme_stop_tx(struct jme_softc *);
134 static void	jme_stop_rx(struct jme_softc *);
135 static void	jme_mac_config(struct jme_softc *);
136 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
137 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
138 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
139 #ifdef notyet
140 static void	jme_setwol(struct jme_softc *);
141 static void	jme_setlinkspeed(struct jme_softc *);
142 #endif
143 static void	jme_set_tx_coal(struct jme_softc *);
144 static void	jme_set_rx_coal(struct jme_softc *);
145 static void	jme_enable_rss(struct jme_softc *);
146 static void	jme_disable_rss(struct jme_softc *);
147 
148 static void	jme_sysctl_node(struct jme_softc *);
149 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
150 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
151 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
152 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
153 
154 /*
155  * Devices supported by this driver.
156  */
157 static const struct jme_dev {
158 	uint16_t	jme_vendorid;
159 	uint16_t	jme_deviceid;
160 	uint32_t	jme_caps;
161 	const char	*jme_name;
162 } jme_devs[] = {
163 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
164 	    JME_CAP_JUMBO,
165 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
166 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
167 	    JME_CAP_FASTETH,
168 	    "JMicron Inc, JMC260 Fast Ethernet" },
169 	{ 0, 0, 0, NULL }
170 };
171 
172 static device_method_t jme_methods[] = {
173 	/* Device interface. */
174 	DEVMETHOD(device_probe,		jme_probe),
175 	DEVMETHOD(device_attach,	jme_attach),
176 	DEVMETHOD(device_detach,	jme_detach),
177 	DEVMETHOD(device_shutdown,	jme_shutdown),
178 	DEVMETHOD(device_suspend,	jme_suspend),
179 	DEVMETHOD(device_resume,	jme_resume),
180 
181 	/* Bus interface. */
182 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
183 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
184 
185 	/* MII interface. */
186 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
187 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
188 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
189 
190 	{ NULL, NULL }
191 };
192 
193 static driver_t jme_driver = {
194 	"jme",
195 	jme_methods,
196 	sizeof(struct jme_softc)
197 };
198 
199 static devclass_t jme_devclass;
200 
201 DECLARE_DUMMY_MODULE(if_jme);
202 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
203 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
204 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
205 
206 static const struct {
207 	uint32_t	jme_coal;
208 	uint32_t	jme_comp;
209 } jme_rx_status[JME_NRXRING_MAX] = {
210 	{ INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
211 	{ INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
212 	{ INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
213 	{ INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
214 };
215 
216 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
217 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
218 static int	jme_rx_ring_count = JME_NRXRING_DEF;
219 
220 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
221 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
222 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
223 
224 /*
225  *	Read a PHY register on the MII of the JMC250.
226  */
227 static int
228 jme_miibus_readreg(device_t dev, int phy, int reg)
229 {
230 	struct jme_softc *sc = device_get_softc(dev);
231 	uint32_t val;
232 	int i;
233 
234 	/* For FPGA version, PHY address 0 should be ignored. */
235 	if (sc->jme_caps & JME_CAP_FPGA) {
236 		if (phy == 0)
237 			return (0);
238 	} else {
239 		if (sc->jme_phyaddr != phy)
240 			return (0);
241 	}
242 
243 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
244 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
245 
246 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
247 		DELAY(1);
248 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
249 			break;
250 	}
251 	if (i == 0) {
252 		device_printf(sc->jme_dev, "phy read timeout: "
253 			      "phy %d, reg %d\n", phy, reg);
254 		return (0);
255 	}
256 
257 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
258 }
259 
260 /*
261  *	Write a PHY register on the MII of the JMC250.
262  */
263 static int
264 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
265 {
266 	struct jme_softc *sc = device_get_softc(dev);
267 	int i;
268 
269 	/* For FPGA version, PHY address 0 should be ignored. */
270 	if (sc->jme_caps & JME_CAP_FPGA) {
271 		if (phy == 0)
272 			return (0);
273 	} else {
274 		if (sc->jme_phyaddr != phy)
275 			return (0);
276 	}
277 
278 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
279 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
280 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
281 
282 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
283 		DELAY(1);
284 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
285 			break;
286 	}
287 	if (i == 0) {
288 		device_printf(sc->jme_dev, "phy write timeout: "
289 			      "phy %d, reg %d\n", phy, reg);
290 	}
291 
292 	return (0);
293 }
294 
295 /*
296  *	Callback from MII layer when media changes.
297  */
298 static void
299 jme_miibus_statchg(device_t dev)
300 {
301 	struct jme_softc *sc = device_get_softc(dev);
302 	struct ifnet *ifp = &sc->arpcom.ac_if;
303 	struct mii_data *mii;
304 	struct jme_txdesc *txd;
305 	bus_addr_t paddr;
306 	int i, r;
307 
308 	ASSERT_SERIALIZED(ifp->if_serializer);
309 
310 	if ((ifp->if_flags & IFF_RUNNING) == 0)
311 		return;
312 
313 	mii = device_get_softc(sc->jme_miibus);
314 
315 	sc->jme_flags &= ~JME_FLAG_LINK;
316 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
317 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
318 		case IFM_10_T:
319 		case IFM_100_TX:
320 			sc->jme_flags |= JME_FLAG_LINK;
321 			break;
322 		case IFM_1000_T:
323 			if (sc->jme_caps & JME_CAP_FASTETH)
324 				break;
325 			sc->jme_flags |= JME_FLAG_LINK;
326 			break;
327 		default:
328 			break;
329 		}
330 	}
331 
332 	/*
333 	 * Disabling Rx/Tx MACs have a side-effect of resetting
334 	 * JME_TXNDA/JME_RXNDA register to the first address of
335 	 * Tx/Rx descriptor address. So driver should reset its
336 	 * internal procucer/consumer pointer and reclaim any
337 	 * allocated resources.  Note, just saving the value of
338 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
339 	 * and restoring JME_TXNDA/JME_RXNDA register is not
340 	 * sufficient to make sure correct MAC state because
341 	 * stopping MAC operation can take a while and hardware
342 	 * might have updated JME_TXNDA/JME_RXNDA registers
343 	 * during the stop operation.
344 	 */
345 
346 	/* Disable interrupts */
347 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
348 
349 	/* Stop driver */
350 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
351 	ifp->if_timer = 0;
352 	callout_stop(&sc->jme_tick_ch);
353 
354 	/* Stop receiver/transmitter. */
355 	jme_stop_rx(sc);
356 	jme_stop_tx(sc);
357 
358 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
359 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
360 
361 		jme_rxeof(sc, r);
362 		if (rdata->jme_rxhead != NULL)
363 			m_freem(rdata->jme_rxhead);
364 		JME_RXCHAIN_RESET(sc, r);
365 
366 		/*
367 		 * Reuse configured Rx descriptors and reset
368 		 * procuder/consumer index.
369 		 */
370 		rdata->jme_rx_cons = 0;
371 	}
372 
373 	jme_txeof(sc);
374 	if (sc->jme_cdata.jme_tx_cnt != 0) {
375 		/* Remove queued packets for transmit. */
376 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
377 			txd = &sc->jme_cdata.jme_txdesc[i];
378 			if (txd->tx_m != NULL) {
379 				bus_dmamap_unload(
380 				    sc->jme_cdata.jme_tx_tag,
381 				    txd->tx_dmamap);
382 				m_freem(txd->tx_m);
383 				txd->tx_m = NULL;
384 				txd->tx_ndesc = 0;
385 				ifp->if_oerrors++;
386 			}
387 		}
388 	}
389 	jme_init_tx_ring(sc);
390 
391 	/* Initialize shadow status block. */
392 	jme_init_ssb(sc);
393 
394 	/* Program MAC with resolved speed/duplex/flow-control. */
395 	if (sc->jme_flags & JME_FLAG_LINK) {
396 		jme_mac_config(sc);
397 
398 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
399 
400 		/* Set Tx ring address to the hardware. */
401 		paddr = sc->jme_cdata.jme_tx_ring_paddr;
402 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
403 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
404 
405 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
406 			CSR_WRITE_4(sc, JME_RXCSR,
407 			    sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
408 
409 			/* Set Rx ring address to the hardware. */
410 			paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
411 			CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
412 			CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
413 		}
414 
415 		/* Restart receiver/transmitter. */
416 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
417 		    RXCSR_RXQ_START);
418 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
419 	}
420 
421 	ifp->if_flags |= IFF_RUNNING;
422 	ifp->if_flags &= ~IFF_OACTIVE;
423 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
424 
425 #ifdef DEVICE_POLLING
426 	if (!(ifp->if_flags & IFF_POLLING))
427 #endif
428 	/* Reenable interrupts. */
429 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
430 }
431 
432 /*
433  *	Get the current interface media status.
434  */
435 static void
436 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
437 {
438 	struct jme_softc *sc = ifp->if_softc;
439 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
440 
441 	ASSERT_SERIALIZED(ifp->if_serializer);
442 
443 	mii_pollstat(mii);
444 	ifmr->ifm_status = mii->mii_media_status;
445 	ifmr->ifm_active = mii->mii_media_active;
446 }
447 
448 /*
449  *	Set hardware to newly-selected media.
450  */
451 static int
452 jme_mediachange(struct ifnet *ifp)
453 {
454 	struct jme_softc *sc = ifp->if_softc;
455 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
456 	int error;
457 
458 	ASSERT_SERIALIZED(ifp->if_serializer);
459 
460 	if (mii->mii_instance != 0) {
461 		struct mii_softc *miisc;
462 
463 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
464 			mii_phy_reset(miisc);
465 	}
466 	error = mii_mediachg(mii);
467 
468 	return (error);
469 }
470 
471 static int
472 jme_probe(device_t dev)
473 {
474 	const struct jme_dev *sp;
475 	uint16_t vid, did;
476 
477 	vid = pci_get_vendor(dev);
478 	did = pci_get_device(dev);
479 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
480 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
481 			struct jme_softc *sc = device_get_softc(dev);
482 
483 			sc->jme_caps = sp->jme_caps;
484 			device_set_desc(dev, sp->jme_name);
485 			return (0);
486 		}
487 	}
488 	return (ENXIO);
489 }
490 
491 static int
492 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
493 {
494 	uint32_t reg;
495 	int i;
496 
497 	*val = 0;
498 	for (i = JME_TIMEOUT; i > 0; i--) {
499 		reg = CSR_READ_4(sc, JME_SMBCSR);
500 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
501 			break;
502 		DELAY(1);
503 	}
504 
505 	if (i == 0) {
506 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
507 		return (ETIMEDOUT);
508 	}
509 
510 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
511 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
512 	for (i = JME_TIMEOUT; i > 0; i--) {
513 		DELAY(1);
514 		reg = CSR_READ_4(sc, JME_SMBINTF);
515 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
516 			break;
517 	}
518 
519 	if (i == 0) {
520 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
521 		return (ETIMEDOUT);
522 	}
523 
524 	reg = CSR_READ_4(sc, JME_SMBINTF);
525 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
526 
527 	return (0);
528 }
529 
530 static int
531 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
532 {
533 	uint8_t fup, reg, val;
534 	uint32_t offset;
535 	int match;
536 
537 	offset = 0;
538 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
539 	    fup != JME_EEPROM_SIG0)
540 		return (ENOENT);
541 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
542 	    fup != JME_EEPROM_SIG1)
543 		return (ENOENT);
544 	match = 0;
545 	do {
546 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
547 			break;
548 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
549 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
550 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
551 				break;
552 			if (reg >= JME_PAR0 &&
553 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
554 				if (jme_eeprom_read_byte(sc, offset + 2,
555 				    &val) != 0)
556 					break;
557 				eaddr[reg - JME_PAR0] = val;
558 				match++;
559 			}
560 		}
561 		/* Check for the end of EEPROM descriptor. */
562 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
563 			break;
564 		/* Try next eeprom descriptor. */
565 		offset += JME_EEPROM_DESC_BYTES;
566 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
567 
568 	if (match == ETHER_ADDR_LEN)
569 		return (0);
570 
571 	return (ENOENT);
572 }
573 
574 static void
575 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
576 {
577 	uint32_t par0, par1;
578 
579 	/* Read station address. */
580 	par0 = CSR_READ_4(sc, JME_PAR0);
581 	par1 = CSR_READ_4(sc, JME_PAR1);
582 	par1 &= 0xFFFF;
583 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
584 		device_printf(sc->jme_dev,
585 		    "generating fake ethernet address.\n");
586 		par0 = karc4random();
587 		/* Set OUI to JMicron. */
588 		eaddr[0] = 0x00;
589 		eaddr[1] = 0x1B;
590 		eaddr[2] = 0x8C;
591 		eaddr[3] = (par0 >> 16) & 0xff;
592 		eaddr[4] = (par0 >> 8) & 0xff;
593 		eaddr[5] = par0 & 0xff;
594 	} else {
595 		eaddr[0] = (par0 >> 0) & 0xFF;
596 		eaddr[1] = (par0 >> 8) & 0xFF;
597 		eaddr[2] = (par0 >> 16) & 0xFF;
598 		eaddr[3] = (par0 >> 24) & 0xFF;
599 		eaddr[4] = (par1 >> 0) & 0xFF;
600 		eaddr[5] = (par1 >> 8) & 0xFF;
601 	}
602 }
603 
604 static int
605 jme_attach(device_t dev)
606 {
607 	struct jme_softc *sc = device_get_softc(dev);
608 	struct ifnet *ifp = &sc->arpcom.ac_if;
609 	uint32_t reg;
610 	uint16_t did;
611 	uint8_t pcie_ptr, rev;
612 	int error = 0;
613 	uint8_t eaddr[ETHER_ADDR_LEN];
614 
615 	sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
616 	if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
617 		sc->jme_rx_desc_cnt = JME_NDESC_MAX;
618 
619 	sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
620 	if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
621 		sc->jme_tx_desc_cnt = JME_NDESC_MAX;
622 
623 	/*
624 	 * Calculate rx rings based on ncpus2
625 	 */
626 	sc->jme_rx_ring_cnt = jme_rx_ring_count;
627 	if (sc->jme_rx_ring_cnt <= 0)
628 		sc->jme_rx_ring_cnt = JME_NRXRING_1;
629 	if (sc->jme_rx_ring_cnt > ncpus2)
630 		sc->jme_rx_ring_cnt = ncpus2;
631 
632 	if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
633 		sc->jme_rx_ring_cnt = JME_NRXRING_4;
634 	else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
635 		sc->jme_rx_ring_cnt = JME_NRXRING_2;
636 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
637 
638 	sc->jme_dev = dev;
639 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
640 
641 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
642 
643 	callout_init(&sc->jme_tick_ch);
644 
645 #ifndef BURN_BRIDGES
646 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
647 		uint32_t irq, mem;
648 
649 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
650 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
651 
652 		device_printf(dev, "chip is in D%d power mode "
653 		    "-- setting to D0\n", pci_get_powerstate(dev));
654 
655 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
656 
657 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
658 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
659 	}
660 #endif	/* !BURN_BRIDGE */
661 
662 	/* Enable bus mastering */
663 	pci_enable_busmaster(dev);
664 
665 	/*
666 	 * Allocate IO memory
667 	 *
668 	 * JMC250 supports both memory mapped and I/O register space
669 	 * access.  Because I/O register access should use different
670 	 * BARs to access registers it's waste of time to use I/O
671 	 * register spce access.  JMC250 uses 16K to map entire memory
672 	 * space.
673 	 */
674 	sc->jme_mem_rid = JME_PCIR_BAR;
675 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
676 						 &sc->jme_mem_rid, RF_ACTIVE);
677 	if (sc->jme_mem_res == NULL) {
678 		device_printf(dev, "can't allocate IO memory\n");
679 		return ENXIO;
680 	}
681 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
682 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
683 
684 	/*
685 	 * Allocate IRQ
686 	 */
687 	sc->jme_irq_rid = 0;
688 	sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
689 						 &sc->jme_irq_rid,
690 						 RF_SHAREABLE | RF_ACTIVE);
691 	if (sc->jme_irq_res == NULL) {
692 		device_printf(dev, "can't allocate irq\n");
693 		error = ENXIO;
694 		goto fail;
695 	}
696 
697 	/*
698 	 * Extract revisions
699 	 */
700 	reg = CSR_READ_4(sc, JME_CHIPMODE);
701 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
702 	    CHIPMODE_NOT_FPGA) {
703 		sc->jme_caps |= JME_CAP_FPGA;
704 		if (bootverbose) {
705 			device_printf(dev, "FPGA revision: 0x%04x\n",
706 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
707 				      CHIPMODE_FPGA_REV_SHIFT);
708 		}
709 	}
710 
711 	/* NOTE: FM revision is put in the upper 4 bits */
712 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
713 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
714 	if (bootverbose)
715 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
716 
717 	did = pci_get_device(dev);
718 	switch (did) {
719 	case PCI_PRODUCT_JMICRON_JMC250:
720 		if (rev == JME_REV1_A2)
721 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
722 		break;
723 
724 	case PCI_PRODUCT_JMICRON_JMC260:
725 		if (rev == JME_REV2)
726 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
727 		break;
728 
729 	default:
730 		panic("unknown device id 0x%04x\n", did);
731 	}
732 	if (rev >= JME_REV2) {
733 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
734 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
735 				      GHC_TXMAC_CLKSRC_1000;
736 	}
737 
738 	/* Reset the ethernet controller. */
739 	jme_reset(sc);
740 
741 	/* Get station address. */
742 	reg = CSR_READ_4(sc, JME_SMBCSR);
743 	if (reg & SMBCSR_EEPROM_PRESENT)
744 		error = jme_eeprom_macaddr(sc, eaddr);
745 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
746 		if (error != 0 && (bootverbose)) {
747 			device_printf(dev, "ethernet hardware address "
748 				      "not found in EEPROM.\n");
749 		}
750 		jme_reg_macaddr(sc, eaddr);
751 	}
752 
753 	/*
754 	 * Save PHY address.
755 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
756 	 * requires PHY probing to get correct PHY address.
757 	 */
758 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
759 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
760 		    GPREG0_PHY_ADDR_MASK;
761 		if (bootverbose) {
762 			device_printf(dev, "PHY is at address %d.\n",
763 			    sc->jme_phyaddr);
764 		}
765 	} else {
766 		sc->jme_phyaddr = 0;
767 	}
768 
769 	/* Set max allowable DMA size. */
770 	pcie_ptr = pci_get_pciecap_ptr(dev);
771 	if (pcie_ptr != 0) {
772 		uint16_t ctrl;
773 
774 		sc->jme_caps |= JME_CAP_PCIE;
775 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
776 		if (bootverbose) {
777 			device_printf(dev, "Read request size : %d bytes.\n",
778 			    128 << ((ctrl >> 12) & 0x07));
779 			device_printf(dev, "TLP payload size : %d bytes.\n",
780 			    128 << ((ctrl >> 5) & 0x07));
781 		}
782 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
783 		case PCIEM_DEVCTL_MAX_READRQ_128:
784 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
785 			break;
786 		case PCIEM_DEVCTL_MAX_READRQ_256:
787 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
788 			break;
789 		default:
790 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
791 			break;
792 		}
793 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
794 	} else {
795 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
796 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
797 	}
798 
799 #ifdef notyet
800 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
801 		sc->jme_caps |= JME_CAP_PMCAP;
802 #endif
803 
804 	/*
805 	 * Create sysctl tree
806 	 */
807 	jme_sysctl_node(sc);
808 
809 	/* Allocate DMA stuffs */
810 	error = jme_dma_alloc(sc);
811 	if (error)
812 		goto fail;
813 
814 	ifp->if_softc = sc;
815 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
816 	ifp->if_init = jme_init;
817 	ifp->if_ioctl = jme_ioctl;
818 	ifp->if_start = jme_start;
819 #ifdef DEVICE_POLLING
820 	ifp->if_poll = jme_poll;
821 #endif
822 	ifp->if_watchdog = jme_watchdog;
823 	ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
824 	ifq_set_ready(&ifp->if_snd);
825 
826 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
827 	ifp->if_capabilities = IFCAP_HWCSUM |
828 			       IFCAP_VLAN_MTU |
829 			       IFCAP_VLAN_HWTAGGING;
830 	if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN)
831 		ifp->if_capabilities |= IFCAP_RSS;
832 	ifp->if_capenable = ifp->if_capabilities;
833 
834 	/*
835 	 * Disable TXCSUM by default to improve bulk data
836 	 * transmit performance (+20Mbps improvement).
837 	 */
838 	ifp->if_capenable &= ~IFCAP_TXCSUM;
839 
840 	if (ifp->if_capenable & IFCAP_TXCSUM)
841 		ifp->if_hwassist = JME_CSUM_FEATURES;
842 
843 	/* Set up MII bus. */
844 	error = mii_phy_probe(dev, &sc->jme_miibus,
845 			      jme_mediachange, jme_mediastatus);
846 	if (error) {
847 		device_printf(dev, "no PHY found!\n");
848 		goto fail;
849 	}
850 
851 	/*
852 	 * Save PHYADDR for FPGA mode PHY.
853 	 */
854 	if (sc->jme_caps & JME_CAP_FPGA) {
855 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
856 
857 		if (mii->mii_instance != 0) {
858 			struct mii_softc *miisc;
859 
860 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
861 				if (miisc->mii_phy != 0) {
862 					sc->jme_phyaddr = miisc->mii_phy;
863 					break;
864 				}
865 			}
866 			if (sc->jme_phyaddr != 0) {
867 				device_printf(sc->jme_dev,
868 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
869 				/* vendor magic. */
870 				jme_miibus_writereg(dev, sc->jme_phyaddr,
871 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
872 
873 				/* XXX should we clear JME_WA_EXTFIFO */
874 			}
875 		}
876 	}
877 
878 	ether_ifattach(ifp, eaddr, NULL);
879 
880 	/* Tell the upper layer(s) we support long frames. */
881 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
882 
883 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
884 			       &sc->jme_irq_handle, ifp->if_serializer);
885 	if (error) {
886 		device_printf(dev, "could not set up interrupt handler.\n");
887 		ether_ifdetach(ifp);
888 		goto fail;
889 	}
890 
891 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
892 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
893 	return 0;
894 fail:
895 	jme_detach(dev);
896 	return (error);
897 }
898 
899 static int
900 jme_detach(device_t dev)
901 {
902 	struct jme_softc *sc = device_get_softc(dev);
903 
904 	if (device_is_attached(dev)) {
905 		struct ifnet *ifp = &sc->arpcom.ac_if;
906 
907 		lwkt_serialize_enter(ifp->if_serializer);
908 		jme_stop(sc);
909 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
910 		lwkt_serialize_exit(ifp->if_serializer);
911 
912 		ether_ifdetach(ifp);
913 	}
914 
915 	if (sc->jme_sysctl_tree != NULL)
916 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
917 
918 	if (sc->jme_miibus != NULL)
919 		device_delete_child(dev, sc->jme_miibus);
920 	bus_generic_detach(dev);
921 
922 	if (sc->jme_irq_res != NULL) {
923 		bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
924 				     sc->jme_irq_res);
925 	}
926 
927 	if (sc->jme_mem_res != NULL) {
928 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
929 				     sc->jme_mem_res);
930 	}
931 
932 	jme_dma_free(sc);
933 
934 	return (0);
935 }
936 
937 static void
938 jme_sysctl_node(struct jme_softc *sc)
939 {
940 	int coal_max;
941 #ifdef JME_RSS_DEBUG
942 	char rx_ring_pkt[32];
943 	int r;
944 #endif
945 
946 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
947 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
948 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
949 				device_get_nameunit(sc->jme_dev),
950 				CTLFLAG_RD, 0, "");
951 	if (sc->jme_sysctl_tree == NULL) {
952 		device_printf(sc->jme_dev, "can't add sysctl node\n");
953 		return;
954 	}
955 
956 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
957 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
958 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
959 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
960 
961 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
962 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
963 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
964 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
965 
966 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
967 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
968 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
969 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
970 
971 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
972 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
973 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
974 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
975 
976 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
977 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
978 		       "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
979 		       0, "RX desc count");
980 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
981 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
982 		       "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
983 		       0, "TX desc count");
984 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
985 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
986 		       "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
987 		       0, "RX ring count");
988 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
989 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
990 		       "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
991 		       0, "RX ring in use");
992 #ifdef JME_RSS_DEBUG
993 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
994 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
995 		       "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
996 		       0, "RSS debug level");
997 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
998 		ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
999 		SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
1000 				SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1001 				rx_ring_pkt, CTLFLAG_RW,
1002 				&sc->jme_rx_ring_pkt[r],
1003 				0, "RXed packets");
1004 	}
1005 #endif
1006 
1007 	/*
1008 	 * Set default coalesce valves
1009 	 */
1010 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1011 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1012 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1013 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1014 
1015 	/*
1016 	 * Adjust coalesce valves, in case that the number of TX/RX
1017 	 * descs are set to small values by users.
1018 	 *
1019 	 * NOTE: coal_max will not be zero, since number of descs
1020 	 * must aligned by JME_NDESC_ALIGN (16 currently)
1021 	 */
1022 	coal_max = sc->jme_tx_desc_cnt / 6;
1023 	if (coal_max < sc->jme_tx_coal_pkt)
1024 		sc->jme_tx_coal_pkt = coal_max;
1025 
1026 	coal_max = sc->jme_rx_desc_cnt / 4;
1027 	if (coal_max < sc->jme_rx_coal_pkt)
1028 		sc->jme_rx_coal_pkt = coal_max;
1029 }
1030 
1031 static int
1032 jme_dma_alloc(struct jme_softc *sc)
1033 {
1034 	struct jme_txdesc *txd;
1035 	bus_dmamem_t dmem;
1036 	int error, i;
1037 
1038 	sc->jme_cdata.jme_txdesc =
1039 	kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1040 		M_DEVBUF, M_WAITOK | M_ZERO);
1041 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1042 		sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1043 		kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1044 			M_DEVBUF, M_WAITOK | M_ZERO);
1045 	}
1046 
1047 	/* Create parent ring tag. */
1048 	error = bus_dma_tag_create(NULL,/* parent */
1049 	    1, JME_RING_BOUNDARY,	/* algnmnt, boundary */
1050 	    sc->jme_lowaddr,		/* lowaddr */
1051 	    BUS_SPACE_MAXADDR,		/* highaddr */
1052 	    NULL, NULL,			/* filter, filterarg */
1053 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1054 	    0,				/* nsegments */
1055 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1056 	    0,				/* flags */
1057 	    &sc->jme_cdata.jme_ring_tag);
1058 	if (error) {
1059 		device_printf(sc->jme_dev,
1060 		    "could not create parent ring DMA tag.\n");
1061 		return error;
1062 	}
1063 
1064 	/*
1065 	 * Create DMA stuffs for TX ring
1066 	 */
1067 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1068 			JME_TX_RING_ALIGN, 0,
1069 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1070 			JME_TX_RING_SIZE(sc),
1071 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1072 	if (error) {
1073 		device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1074 		return error;
1075 	}
1076 	sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1077 	sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1078 	sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1079 	sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1080 
1081 	/*
1082 	 * Create DMA stuffs for RX rings
1083 	 */
1084 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1085 		error = jme_rxring_dma_alloc(sc, i);
1086 		if (error)
1087 			return error;
1088 	}
1089 
1090 	/* Create parent buffer tag. */
1091 	error = bus_dma_tag_create(NULL,/* parent */
1092 	    1, 0,			/* algnmnt, boundary */
1093 	    sc->jme_lowaddr,		/* lowaddr */
1094 	    BUS_SPACE_MAXADDR,		/* highaddr */
1095 	    NULL, NULL,			/* filter, filterarg */
1096 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1097 	    0,				/* nsegments */
1098 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1099 	    0,				/* flags */
1100 	    &sc->jme_cdata.jme_buffer_tag);
1101 	if (error) {
1102 		device_printf(sc->jme_dev,
1103 		    "could not create parent buffer DMA tag.\n");
1104 		return error;
1105 	}
1106 
1107 	/*
1108 	 * Create DMA stuffs for shadow status block
1109 	 */
1110 	error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1111 			JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1112 			JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1113 	if (error) {
1114 		device_printf(sc->jme_dev,
1115 		    "could not create shadow status block.\n");
1116 		return error;
1117 	}
1118 	sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1119 	sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1120 	sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1121 	sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1122 
1123 	/*
1124 	 * Create DMA stuffs for TX buffers
1125 	 */
1126 
1127 	/* Create tag for Tx buffers. */
1128 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1129 	    1, 0,			/* algnmnt, boundary */
1130 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1131 	    BUS_SPACE_MAXADDR,		/* highaddr */
1132 	    NULL, NULL,			/* filter, filterarg */
1133 	    JME_JUMBO_FRAMELEN,		/* maxsize */
1134 	    JME_MAXTXSEGS,		/* nsegments */
1135 	    JME_MAXSEGSIZE,		/* maxsegsize */
1136 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1137 	    &sc->jme_cdata.jme_tx_tag);
1138 	if (error != 0) {
1139 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1140 		return error;
1141 	}
1142 
1143 	/* Create DMA maps for Tx buffers. */
1144 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1145 		txd = &sc->jme_cdata.jme_txdesc[i];
1146 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1147 				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1148 				&txd->tx_dmamap);
1149 		if (error) {
1150 			int j;
1151 
1152 			device_printf(sc->jme_dev,
1153 			    "could not create %dth Tx dmamap.\n", i);
1154 
1155 			for (j = 0; j < i; ++j) {
1156 				txd = &sc->jme_cdata.jme_txdesc[j];
1157 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1158 						   txd->tx_dmamap);
1159 			}
1160 			bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1161 			sc->jme_cdata.jme_tx_tag = NULL;
1162 			return error;
1163 		}
1164 	}
1165 
1166 	/*
1167 	 * Create DMA stuffs for RX buffers
1168 	 */
1169 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1170 		error = jme_rxbuf_dma_alloc(sc, i);
1171 		if (error)
1172 			return error;
1173 	}
1174 	return 0;
1175 }
1176 
1177 static void
1178 jme_dma_free(struct jme_softc *sc)
1179 {
1180 	struct jme_txdesc *txd;
1181 	struct jme_rxdesc *rxd;
1182 	struct jme_rxdata *rdata;
1183 	int i, r;
1184 
1185 	/* Tx ring */
1186 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1187 		bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1188 		    sc->jme_cdata.jme_tx_ring_map);
1189 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1190 		    sc->jme_cdata.jme_tx_ring,
1191 		    sc->jme_cdata.jme_tx_ring_map);
1192 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1193 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1194 	}
1195 
1196 	/* Rx ring */
1197 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1198 		rdata = &sc->jme_cdata.jme_rx_data[r];
1199 		if (rdata->jme_rx_ring_tag != NULL) {
1200 			bus_dmamap_unload(rdata->jme_rx_ring_tag,
1201 					  rdata->jme_rx_ring_map);
1202 			bus_dmamem_free(rdata->jme_rx_ring_tag,
1203 					rdata->jme_rx_ring,
1204 					rdata->jme_rx_ring_map);
1205 			bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1206 			rdata->jme_rx_ring_tag = NULL;
1207 		}
1208 	}
1209 
1210 	/* Tx buffers */
1211 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1212 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1213 			txd = &sc->jme_cdata.jme_txdesc[i];
1214 			bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1215 			    txd->tx_dmamap);
1216 		}
1217 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1218 		sc->jme_cdata.jme_tx_tag = NULL;
1219 	}
1220 
1221 	/* Rx buffers */
1222 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1223 		rdata = &sc->jme_cdata.jme_rx_data[r];
1224 		if (rdata->jme_rx_tag != NULL) {
1225 			for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1226 				rxd = &rdata->jme_rxdesc[i];
1227 				bus_dmamap_destroy(rdata->jme_rx_tag,
1228 						   rxd->rx_dmamap);
1229 			}
1230 			bus_dmamap_destroy(rdata->jme_rx_tag,
1231 					   rdata->jme_rx_sparemap);
1232 			bus_dma_tag_destroy(rdata->jme_rx_tag);
1233 			rdata->jme_rx_tag = NULL;
1234 		}
1235 	}
1236 
1237 	/* Shadow status block. */
1238 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1239 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1240 		    sc->jme_cdata.jme_ssb_map);
1241 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1242 		    sc->jme_cdata.jme_ssb_block,
1243 		    sc->jme_cdata.jme_ssb_map);
1244 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1245 		sc->jme_cdata.jme_ssb_tag = NULL;
1246 	}
1247 
1248 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1249 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1250 		sc->jme_cdata.jme_buffer_tag = NULL;
1251 	}
1252 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1253 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1254 		sc->jme_cdata.jme_ring_tag = NULL;
1255 	}
1256 
1257 	if (sc->jme_cdata.jme_txdesc != NULL) {
1258 		kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1259 		sc->jme_cdata.jme_txdesc = NULL;
1260 	}
1261 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1262 		rdata = &sc->jme_cdata.jme_rx_data[r];
1263 		if (rdata->jme_rxdesc != NULL) {
1264 			kfree(rdata->jme_rxdesc, M_DEVBUF);
1265 			rdata->jme_rxdesc = NULL;
1266 		}
1267 	}
1268 }
1269 
1270 /*
1271  *	Make sure the interface is stopped at reboot time.
1272  */
1273 static int
1274 jme_shutdown(device_t dev)
1275 {
1276 	return jme_suspend(dev);
1277 }
1278 
1279 #ifdef notyet
1280 /*
1281  * Unlike other ethernet controllers, JMC250 requires
1282  * explicit resetting link speed to 10/100Mbps as gigabit
1283  * link will cunsume more power than 375mA.
1284  * Note, we reset the link speed to 10/100Mbps with
1285  * auto-negotiation but we don't know whether that operation
1286  * would succeed or not as we have no control after powering
1287  * off. If the renegotiation fail WOL may not work. Running
1288  * at 1Gbps draws more power than 375mA at 3.3V which is
1289  * specified in PCI specification and that would result in
1290  * complete shutdowning power to ethernet controller.
1291  *
1292  * TODO
1293  *  Save current negotiated media speed/duplex/flow-control
1294  *  to softc and restore the same link again after resuming.
1295  *  PHY handling such as power down/resetting to 100Mbps
1296  *  may be better handled in suspend method in phy driver.
1297  */
1298 static void
1299 jme_setlinkspeed(struct jme_softc *sc)
1300 {
1301 	struct mii_data *mii;
1302 	int aneg, i;
1303 
1304 	JME_LOCK_ASSERT(sc);
1305 
1306 	mii = device_get_softc(sc->jme_miibus);
1307 	mii_pollstat(mii);
1308 	aneg = 0;
1309 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1310 		switch IFM_SUBTYPE(mii->mii_media_active) {
1311 		case IFM_10_T:
1312 		case IFM_100_TX:
1313 			return;
1314 		case IFM_1000_T:
1315 			aneg++;
1316 		default:
1317 			break;
1318 		}
1319 	}
1320 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1321 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1322 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1323 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1324 	    BMCR_AUTOEN | BMCR_STARTNEG);
1325 	DELAY(1000);
1326 	if (aneg != 0) {
1327 		/* Poll link state until jme(4) get a 10/100 link. */
1328 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1329 			mii_pollstat(mii);
1330 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1331 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1332 				case IFM_10_T:
1333 				case IFM_100_TX:
1334 					jme_mac_config(sc);
1335 					return;
1336 				default:
1337 					break;
1338 				}
1339 			}
1340 			JME_UNLOCK(sc);
1341 			pause("jmelnk", hz);
1342 			JME_LOCK(sc);
1343 		}
1344 		if (i == MII_ANEGTICKS_GIGE)
1345 			device_printf(sc->jme_dev, "establishing link failed, "
1346 			    "WOL may not work!");
1347 	}
1348 	/*
1349 	 * No link, force MAC to have 100Mbps, full-duplex link.
1350 	 * This is the last resort and may/may not work.
1351 	 */
1352 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1353 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1354 	jme_mac_config(sc);
1355 }
1356 
1357 static void
1358 jme_setwol(struct jme_softc *sc)
1359 {
1360 	struct ifnet *ifp = &sc->arpcom.ac_if;
1361 	uint32_t gpr, pmcs;
1362 	uint16_t pmstat;
1363 	int pmc;
1364 
1365 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1366 		/* No PME capability, PHY power down. */
1367 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1368 		    MII_BMCR, BMCR_PDOWN);
1369 		return;
1370 	}
1371 
1372 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1373 	pmcs = CSR_READ_4(sc, JME_PMCS);
1374 	pmcs &= ~PMCS_WOL_ENB_MASK;
1375 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1376 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1377 		/* Enable PME message. */
1378 		gpr |= GPREG0_PME_ENB;
1379 		/* For gigabit controllers, reset link speed to 10/100. */
1380 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1381 			jme_setlinkspeed(sc);
1382 	}
1383 
1384 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1385 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1386 
1387 	/* Request PME. */
1388 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1389 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1390 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1391 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1392 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1393 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1394 		/* No WOL, PHY power down. */
1395 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1396 		    MII_BMCR, BMCR_PDOWN);
1397 	}
1398 }
1399 #endif
1400 
1401 static int
1402 jme_suspend(device_t dev)
1403 {
1404 	struct jme_softc *sc = device_get_softc(dev);
1405 	struct ifnet *ifp = &sc->arpcom.ac_if;
1406 
1407 	lwkt_serialize_enter(ifp->if_serializer);
1408 	jme_stop(sc);
1409 #ifdef notyet
1410 	jme_setwol(sc);
1411 #endif
1412 	lwkt_serialize_exit(ifp->if_serializer);
1413 
1414 	return (0);
1415 }
1416 
1417 static int
1418 jme_resume(device_t dev)
1419 {
1420 	struct jme_softc *sc = device_get_softc(dev);
1421 	struct ifnet *ifp = &sc->arpcom.ac_if;
1422 #ifdef notyet
1423 	int pmc;
1424 #endif
1425 
1426 	lwkt_serialize_enter(ifp->if_serializer);
1427 
1428 #ifdef notyet
1429 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1430 		uint16_t pmstat;
1431 
1432 		pmstat = pci_read_config(sc->jme_dev,
1433 		    pmc + PCIR_POWER_STATUS, 2);
1434 		/* Disable PME clear PME status. */
1435 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1436 		pci_write_config(sc->jme_dev,
1437 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1438 	}
1439 #endif
1440 
1441 	if (ifp->if_flags & IFF_UP)
1442 		jme_init(sc);
1443 
1444 	lwkt_serialize_exit(ifp->if_serializer);
1445 
1446 	return (0);
1447 }
1448 
1449 static int
1450 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1451 {
1452 	struct jme_txdesc *txd;
1453 	struct jme_desc *desc;
1454 	struct mbuf *m;
1455 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1456 	int maxsegs, nsegs;
1457 	int error, i, prod, symbol_desc;
1458 	uint32_t cflags, flag64;
1459 
1460 	M_ASSERTPKTHDR((*m_head));
1461 
1462 	prod = sc->jme_cdata.jme_tx_prod;
1463 	txd = &sc->jme_cdata.jme_txdesc[prod];
1464 
1465 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1466 		symbol_desc = 1;
1467 	else
1468 		symbol_desc = 0;
1469 
1470 	maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1471 		  (JME_TXD_RSVD + symbol_desc);
1472 	if (maxsegs > JME_MAXTXSEGS)
1473 		maxsegs = JME_MAXTXSEGS;
1474 	KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1475 		("not enough segments %d\n", maxsegs));
1476 
1477 	error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1478 			txd->tx_dmamap, m_head,
1479 			txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1480 	if (error)
1481 		goto fail;
1482 
1483 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1484 			BUS_DMASYNC_PREWRITE);
1485 
1486 	m = *m_head;
1487 	cflags = 0;
1488 
1489 	/* Configure checksum offload. */
1490 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1491 		cflags |= JME_TD_IPCSUM;
1492 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1493 		cflags |= JME_TD_TCPCSUM;
1494 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1495 		cflags |= JME_TD_UDPCSUM;
1496 
1497 	/* Configure VLAN. */
1498 	if (m->m_flags & M_VLANTAG) {
1499 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1500 		cflags |= JME_TD_VLAN_TAG;
1501 	}
1502 
1503 	desc = &sc->jme_cdata.jme_tx_ring[prod];
1504 	desc->flags = htole32(cflags);
1505 	desc->addr_hi = htole32(m->m_pkthdr.len);
1506 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1507 		/*
1508 		 * Use 64bits TX desc chain format.
1509 		 *
1510 		 * The first TX desc of the chain, which is setup here,
1511 		 * is just a symbol TX desc carrying no payload.
1512 		 */
1513 		flag64 = JME_TD_64BIT;
1514 		desc->buflen = 0;
1515 		desc->addr_lo = 0;
1516 
1517 		/* No effective TX desc is consumed */
1518 		i = 0;
1519 	} else {
1520 		/*
1521 		 * Use 32bits TX desc chain format.
1522 		 *
1523 		 * The first TX desc of the chain, which is setup here,
1524 		 * is an effective TX desc carrying the first segment of
1525 		 * the mbuf chain.
1526 		 */
1527 		flag64 = 0;
1528 		desc->buflen = htole32(txsegs[0].ds_len);
1529 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1530 
1531 		/* One effective TX desc is consumed */
1532 		i = 1;
1533 	}
1534 	sc->jme_cdata.jme_tx_cnt++;
1535 	KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1536 		 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1537 	JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1538 
1539 	txd->tx_ndesc = 1 - i;
1540 	for (; i < nsegs; i++) {
1541 		desc = &sc->jme_cdata.jme_tx_ring[prod];
1542 		desc->flags = htole32(JME_TD_OWN | flag64);
1543 		desc->buflen = htole32(txsegs[i].ds_len);
1544 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1545 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1546 
1547 		sc->jme_cdata.jme_tx_cnt++;
1548 		KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1549 			 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1550 		JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1551 	}
1552 
1553 	/* Update producer index. */
1554 	sc->jme_cdata.jme_tx_prod = prod;
1555 	/*
1556 	 * Finally request interrupt and give the first descriptor
1557 	 * owenership to hardware.
1558 	 */
1559 	desc = txd->tx_desc;
1560 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1561 
1562 	txd->tx_m = m;
1563 	txd->tx_ndesc += nsegs;
1564 
1565 	return 0;
1566 fail:
1567 	m_freem(*m_head);
1568 	*m_head = NULL;
1569 	return error;
1570 }
1571 
1572 static void
1573 jme_start(struct ifnet *ifp)
1574 {
1575 	struct jme_softc *sc = ifp->if_softc;
1576 	struct mbuf *m_head;
1577 	int enq = 0;
1578 
1579 	ASSERT_SERIALIZED(ifp->if_serializer);
1580 
1581 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1582 		ifq_purge(&ifp->if_snd);
1583 		return;
1584 	}
1585 
1586 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1587 		return;
1588 
1589 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1590 		jme_txeof(sc);
1591 
1592 	while (!ifq_is_empty(&ifp->if_snd)) {
1593 		/*
1594 		 * Check number of available TX descs, always
1595 		 * leave JME_TXD_RSVD free TX descs.
1596 		 */
1597 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1598 		    sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1599 			ifp->if_flags |= IFF_OACTIVE;
1600 			break;
1601 		}
1602 
1603 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1604 		if (m_head == NULL)
1605 			break;
1606 
1607 		/*
1608 		 * Pack the data into the transmit ring. If we
1609 		 * don't have room, set the OACTIVE flag and wait
1610 		 * for the NIC to drain the ring.
1611 		 */
1612 		if (jme_encap(sc, &m_head)) {
1613 			KKASSERT(m_head == NULL);
1614 			ifp->if_oerrors++;
1615 			ifp->if_flags |= IFF_OACTIVE;
1616 			break;
1617 		}
1618 		enq++;
1619 
1620 		/*
1621 		 * If there's a BPF listener, bounce a copy of this frame
1622 		 * to him.
1623 		 */
1624 		ETHER_BPF_MTAP(ifp, m_head);
1625 	}
1626 
1627 	if (enq > 0) {
1628 		/*
1629 		 * Reading TXCSR takes very long time under heavy load
1630 		 * so cache TXCSR value and writes the ORed value with
1631 		 * the kick command to the TXCSR. This saves one register
1632 		 * access cycle.
1633 		 */
1634 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1635 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1636 		/* Set a timeout in case the chip goes out to lunch. */
1637 		ifp->if_timer = JME_TX_TIMEOUT;
1638 	}
1639 }
1640 
1641 static void
1642 jme_watchdog(struct ifnet *ifp)
1643 {
1644 	struct jme_softc *sc = ifp->if_softc;
1645 
1646 	ASSERT_SERIALIZED(ifp->if_serializer);
1647 
1648 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1649 		if_printf(ifp, "watchdog timeout (missed link)\n");
1650 		ifp->if_oerrors++;
1651 		jme_init(sc);
1652 		return;
1653 	}
1654 
1655 	jme_txeof(sc);
1656 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1657 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1658 			  "-- recovering\n");
1659 		if (!ifq_is_empty(&ifp->if_snd))
1660 			if_devstart(ifp);
1661 		return;
1662 	}
1663 
1664 	if_printf(ifp, "watchdog timeout\n");
1665 	ifp->if_oerrors++;
1666 	jme_init(sc);
1667 	if (!ifq_is_empty(&ifp->if_snd))
1668 		if_devstart(ifp);
1669 }
1670 
1671 static int
1672 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1673 {
1674 	struct jme_softc *sc = ifp->if_softc;
1675 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1676 	struct ifreq *ifr = (struct ifreq *)data;
1677 	int error = 0, mask;
1678 
1679 	ASSERT_SERIALIZED(ifp->if_serializer);
1680 
1681 	switch (cmd) {
1682 	case SIOCSIFMTU:
1683 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1684 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1685 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1686 			error = EINVAL;
1687 			break;
1688 		}
1689 
1690 		if (ifp->if_mtu != ifr->ifr_mtu) {
1691 			/*
1692 			 * No special configuration is required when interface
1693 			 * MTU is changed but availability of Tx checksum
1694 			 * offload should be chcked against new MTU size as
1695 			 * FIFO size is just 2K.
1696 			 */
1697 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1698 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1699 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1700 			}
1701 			ifp->if_mtu = ifr->ifr_mtu;
1702 			if (ifp->if_flags & IFF_RUNNING)
1703 				jme_init(sc);
1704 		}
1705 		break;
1706 
1707 	case SIOCSIFFLAGS:
1708 		if (ifp->if_flags & IFF_UP) {
1709 			if (ifp->if_flags & IFF_RUNNING) {
1710 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1711 				    (IFF_PROMISC | IFF_ALLMULTI))
1712 					jme_set_filter(sc);
1713 			} else {
1714 				jme_init(sc);
1715 			}
1716 		} else {
1717 			if (ifp->if_flags & IFF_RUNNING)
1718 				jme_stop(sc);
1719 		}
1720 		sc->jme_if_flags = ifp->if_flags;
1721 		break;
1722 
1723 	case SIOCADDMULTI:
1724 	case SIOCDELMULTI:
1725 		if (ifp->if_flags & IFF_RUNNING)
1726 			jme_set_filter(sc);
1727 		break;
1728 
1729 	case SIOCSIFMEDIA:
1730 	case SIOCGIFMEDIA:
1731 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1732 		break;
1733 
1734 	case SIOCSIFCAP:
1735 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1736 
1737 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1738 			ifp->if_capenable ^= IFCAP_TXCSUM;
1739 			if (IFCAP_TXCSUM & ifp->if_capenable)
1740 				ifp->if_hwassist |= JME_CSUM_FEATURES;
1741 			else
1742 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1743 		}
1744 		if (mask & IFCAP_RXCSUM) {
1745 			uint32_t reg;
1746 
1747 			ifp->if_capenable ^= IFCAP_RXCSUM;
1748 			reg = CSR_READ_4(sc, JME_RXMAC);
1749 			reg &= ~RXMAC_CSUM_ENB;
1750 			if (ifp->if_capenable & IFCAP_RXCSUM)
1751 				reg |= RXMAC_CSUM_ENB;
1752 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1753 		}
1754 
1755 		if (mask & IFCAP_VLAN_HWTAGGING) {
1756 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1757 			jme_set_vlan(sc);
1758 		}
1759 
1760 		if (mask & IFCAP_RSS) {
1761 			ifp->if_capenable ^= IFCAP_RSS;
1762 			if (ifp->if_flags & IFF_RUNNING)
1763 				jme_init(sc);
1764 		}
1765 		break;
1766 
1767 	default:
1768 		error = ether_ioctl(ifp, cmd, data);
1769 		break;
1770 	}
1771 	return (error);
1772 }
1773 
1774 static void
1775 jme_mac_config(struct jme_softc *sc)
1776 {
1777 	struct mii_data *mii;
1778 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1779 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1780 
1781 	mii = device_get_softc(sc->jme_miibus);
1782 
1783 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1784 	DELAY(10);
1785 	CSR_WRITE_4(sc, JME_GHC, 0);
1786 	ghc = 0;
1787 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1788 	rxmac &= ~RXMAC_FC_ENB;
1789 	txmac = CSR_READ_4(sc, JME_TXMAC);
1790 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1791 	txpause = CSR_READ_4(sc, JME_TXPFC);
1792 	txpause &= ~TXPFC_PAUSE_ENB;
1793 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1794 		ghc |= GHC_FULL_DUPLEX;
1795 		rxmac &= ~RXMAC_COLL_DET_ENB;
1796 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1797 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1798 		    TXMAC_FRAME_BURST);
1799 #ifdef notyet
1800 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1801 			txpause |= TXPFC_PAUSE_ENB;
1802 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1803 			rxmac |= RXMAC_FC_ENB;
1804 #endif
1805 		/* Disable retry transmit timer/retry limit. */
1806 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1807 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1808 	} else {
1809 		rxmac |= RXMAC_COLL_DET_ENB;
1810 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1811 		/* Enable retry transmit timer/retry limit. */
1812 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1813 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1814 	}
1815 
1816 	/*
1817 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1818 	 */
1819 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1820 	gp1 &= ~GPREG1_WA_HDX;
1821 
1822 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1823 		hdx = 1;
1824 
1825 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1826 	case IFM_10_T:
1827 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1828 		if (hdx)
1829 			gp1 |= GPREG1_WA_HDX;
1830 		break;
1831 
1832 	case IFM_100_TX:
1833 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1834 		if (hdx)
1835 			gp1 |= GPREG1_WA_HDX;
1836 
1837 		/*
1838 		 * Use extended FIFO depth to workaround CRC errors
1839 		 * emitted by chips before JMC250B
1840 		 */
1841 		phyconf = JMPHY_CONF_EXTFIFO;
1842 		break;
1843 
1844 	case IFM_1000_T:
1845 		if (sc->jme_caps & JME_CAP_FASTETH)
1846 			break;
1847 
1848 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1849 		if (hdx)
1850 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1851 		break;
1852 
1853 	default:
1854 		break;
1855 	}
1856 	CSR_WRITE_4(sc, JME_GHC, ghc);
1857 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1858 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1859 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1860 
1861 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
1862 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1863 				    JMPHY_CONF, phyconf);
1864 	}
1865 	if (sc->jme_workaround & JME_WA_HDX)
1866 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
1867 }
1868 
1869 static void
1870 jme_intr(void *xsc)
1871 {
1872 	struct jme_softc *sc = xsc;
1873 	struct ifnet *ifp = &sc->arpcom.ac_if;
1874 	uint32_t status;
1875 	int r;
1876 
1877 	ASSERT_SERIALIZED(ifp->if_serializer);
1878 
1879 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1880 	if (status == 0 || status == 0xFFFFFFFF)
1881 		return;
1882 
1883 	/* Disable interrupts. */
1884 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1885 
1886 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1887 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1888 		goto back;
1889 
1890 	/* Reset PCC counter/timer and Ack interrupts. */
1891 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1892 
1893 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1894 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1895 
1896 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
1897 		if (status & jme_rx_status[r].jme_coal) {
1898 			status |= jme_rx_status[r].jme_coal |
1899 				  jme_rx_status[r].jme_comp;
1900 		}
1901 	}
1902 
1903 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1904 
1905 	if (ifp->if_flags & IFF_RUNNING) {
1906 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1907 			jme_rx_intr(sc, status);
1908 
1909 		if (status & INTR_RXQ_DESC_EMPTY) {
1910 			/*
1911 			 * Notify hardware availability of new Rx buffers.
1912 			 * Reading RXCSR takes very long time under heavy
1913 			 * load so cache RXCSR value and writes the ORed
1914 			 * value with the kick command to the RXCSR. This
1915 			 * saves one register access cycle.
1916 			 */
1917 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1918 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1919 		}
1920 
1921 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1922 			jme_txeof(sc);
1923 			if (!ifq_is_empty(&ifp->if_snd))
1924 				if_devstart(ifp);
1925 		}
1926 	}
1927 back:
1928 	/* Reenable interrupts. */
1929 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1930 }
1931 
1932 static void
1933 jme_txeof(struct jme_softc *sc)
1934 {
1935 	struct ifnet *ifp = &sc->arpcom.ac_if;
1936 	struct jme_txdesc *txd;
1937 	uint32_t status;
1938 	int cons, nsegs;
1939 
1940 	cons = sc->jme_cdata.jme_tx_cons;
1941 	if (cons == sc->jme_cdata.jme_tx_prod)
1942 		return;
1943 
1944 	/*
1945 	 * Go through our Tx list and free mbufs for those
1946 	 * frames which have been transmitted.
1947 	 */
1948 	while (cons != sc->jme_cdata.jme_tx_prod) {
1949 		txd = &sc->jme_cdata.jme_txdesc[cons];
1950 		KASSERT(txd->tx_m != NULL,
1951 			("%s: freeing NULL mbuf!\n", __func__));
1952 
1953 		status = le32toh(txd->tx_desc->flags);
1954 		if ((status & JME_TD_OWN) == JME_TD_OWN)
1955 			break;
1956 
1957 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1958 			ifp->if_oerrors++;
1959 		} else {
1960 			ifp->if_opackets++;
1961 			if (status & JME_TD_COLLISION) {
1962 				ifp->if_collisions +=
1963 				    le32toh(txd->tx_desc->buflen) &
1964 				    JME_TD_BUF_LEN_MASK;
1965 			}
1966 		}
1967 
1968 		/*
1969 		 * Only the first descriptor of multi-descriptor
1970 		 * transmission is updated so driver have to skip entire
1971 		 * chained buffers for the transmiited frame. In other
1972 		 * words, JME_TD_OWN bit is valid only at the first
1973 		 * descriptor of a multi-descriptor transmission.
1974 		 */
1975 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1976 			sc->jme_cdata.jme_tx_ring[cons].flags = 0;
1977 			JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
1978 		}
1979 
1980 		/* Reclaim transferred mbufs. */
1981 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1982 		m_freem(txd->tx_m);
1983 		txd->tx_m = NULL;
1984 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1985 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
1986 			("%s: Active Tx desc counter was garbled\n", __func__));
1987 		txd->tx_ndesc = 0;
1988 	}
1989 	sc->jme_cdata.jme_tx_cons = cons;
1990 
1991 	if (sc->jme_cdata.jme_tx_cnt == 0)
1992 		ifp->if_timer = 0;
1993 
1994 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
1995 	    sc->jme_tx_desc_cnt - JME_TXD_RSVD)
1996 		ifp->if_flags &= ~IFF_OACTIVE;
1997 }
1998 
1999 static __inline void
2000 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
2001 {
2002 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2003 	int i;
2004 
2005 	for (i = 0; i < count; ++i) {
2006 		struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2007 
2008 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2009 		desc->buflen = htole32(MCLBYTES);
2010 		JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2011 	}
2012 }
2013 
2014 static __inline struct pktinfo *
2015 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2016 {
2017 	if (flags & JME_RD_IPV4)
2018 		pi->pi_netisr = NETISR_IP;
2019 	else if (flags & JME_RD_IPV6)
2020 		pi->pi_netisr = NETISR_IPV6;
2021 	else
2022 		return NULL;
2023 
2024 	pi->pi_flags = 0;
2025 	pi->pi_l3proto = IPPROTO_UNKNOWN;
2026 
2027 	if (flags & JME_RD_MORE_FRAG)
2028 		pi->pi_flags |= PKTINFO_FLAG_FRAG;
2029 	else if (flags & JME_RD_TCP)
2030 		pi->pi_l3proto = IPPROTO_TCP;
2031 	else if (flags & JME_RD_UDP)
2032 		pi->pi_l3proto = IPPROTO_UDP;
2033 	else
2034 		pi = NULL;
2035 	return pi;
2036 }
2037 
2038 /* Receive a frame. */
2039 static void
2040 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2041 {
2042 	struct ifnet *ifp = &sc->arpcom.ac_if;
2043 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2044 	struct jme_desc *desc;
2045 	struct jme_rxdesc *rxd;
2046 	struct mbuf *mp, *m;
2047 	uint32_t flags, status, hash, hashinfo;
2048 	int cons, count, nsegs;
2049 
2050 	cons = rdata->jme_rx_cons;
2051 	desc = &rdata->jme_rx_ring[cons];
2052 	flags = le32toh(desc->flags);
2053 	status = le32toh(desc->buflen);
2054 	hash = le32toh(desc->addr_hi);
2055 	hashinfo = le32toh(desc->addr_lo);
2056 	nsegs = JME_RX_NSEGS(status);
2057 
2058 	JME_RSS_DPRINTF(sc, 15, "ring%d, flags 0x%08x, "
2059 			"hash 0x%08x, hash info 0x%08x\n",
2060 			ring, flags, hash, hashinfo);
2061 
2062 	if (status & JME_RX_ERR_STAT) {
2063 		ifp->if_ierrors++;
2064 		jme_discard_rxbufs(sc, ring, cons, nsegs);
2065 #ifdef JME_SHOW_ERRORS
2066 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2067 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2068 #endif
2069 		rdata->jme_rx_cons += nsegs;
2070 		rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2071 		return;
2072 	}
2073 
2074 	rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2075 	for (count = 0; count < nsegs; count++,
2076 	     JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2077 		rxd = &rdata->jme_rxdesc[cons];
2078 		mp = rxd->rx_m;
2079 
2080 		/* Add a new receive buffer to the ring. */
2081 		if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2082 			ifp->if_iqdrops++;
2083 			/* Reuse buffer. */
2084 			jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2085 			if (rdata->jme_rxhead != NULL) {
2086 				m_freem(rdata->jme_rxhead);
2087 				JME_RXCHAIN_RESET(sc, ring);
2088 			}
2089 			break;
2090 		}
2091 
2092 		/*
2093 		 * Assume we've received a full sized frame.
2094 		 * Actual size is fixed when we encounter the end of
2095 		 * multi-segmented frame.
2096 		 */
2097 		mp->m_len = MCLBYTES;
2098 
2099 		/* Chain received mbufs. */
2100 		if (rdata->jme_rxhead == NULL) {
2101 			rdata->jme_rxhead = mp;
2102 			rdata->jme_rxtail = mp;
2103 		} else {
2104 			/*
2105 			 * Receive processor can receive a maximum frame
2106 			 * size of 65535 bytes.
2107 			 */
2108 			rdata->jme_rxtail->m_next = mp;
2109 			rdata->jme_rxtail = mp;
2110 		}
2111 
2112 		if (count == nsegs - 1) {
2113 			struct pktinfo pi0, *pi;
2114 
2115 			/* Last desc. for this frame. */
2116 			m = rdata->jme_rxhead;
2117 			m->m_pkthdr.len = rdata->jme_rxlen;
2118 			if (nsegs > 1) {
2119 				/* Set first mbuf size. */
2120 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2121 				/* Set last mbuf size. */
2122 				mp->m_len = rdata->jme_rxlen -
2123 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2124 				    (MCLBYTES * (nsegs - 2)));
2125 			} else {
2126 				m->m_len = rdata->jme_rxlen;
2127 			}
2128 			m->m_pkthdr.rcvif = ifp;
2129 
2130 			/*
2131 			 * Account for 10bytes auto padding which is used
2132 			 * to align IP header on 32bit boundary. Also note,
2133 			 * CRC bytes is automatically removed by the
2134 			 * hardware.
2135 			 */
2136 			m->m_data += JME_RX_PAD_BYTES;
2137 
2138 			/* Set checksum information. */
2139 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2140 			    (flags & JME_RD_IPV4)) {
2141 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2142 				if (flags & JME_RD_IPCSUM)
2143 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2144 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2145 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2146 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2147 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2148 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2149 					m->m_pkthdr.csum_flags |=
2150 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2151 					m->m_pkthdr.csum_data = 0xffff;
2152 				}
2153 			}
2154 
2155 			/* Check for VLAN tagged packets. */
2156 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2157 			    (flags & JME_RD_VLAN_TAG)) {
2158 				m->m_pkthdr.ether_vlantag =
2159 				    flags & JME_RD_VLAN_MASK;
2160 				m->m_flags |= M_VLANTAG;
2161 			}
2162 
2163 			ifp->if_ipackets++;
2164 
2165 			if (ifp->if_capenable & IFCAP_RSS)
2166 				pi = jme_pktinfo(&pi0, flags);
2167 			else
2168 				pi = NULL;
2169 
2170 			if (pi != NULL &&
2171 			    (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2172 				m->m_flags |= M_HASH;
2173 				m->m_pkthdr.hash = toeplitz_hash(hash);
2174 			}
2175 
2176 #ifdef JME_RSS_DEBUG
2177 			if (pi != NULL) {
2178 				JME_RSS_DPRINTF(sc, 10,
2179 				    "isr %d flags %08x, l3 %d %s\n",
2180 				    pi->pi_netisr, pi->pi_flags,
2181 				    pi->pi_l3proto,
2182 				    (m->m_flags & M_HASH) ? "hash" : "");
2183 			}
2184 #endif
2185 
2186 			/* Pass it on. */
2187 			ether_input_chain(ifp, m, pi, chain);
2188 
2189 			/* Reset mbuf chains. */
2190 			JME_RXCHAIN_RESET(sc, ring);
2191 #ifdef JME_RSS_DEBUG
2192 			sc->jme_rx_ring_pkt[ring]++;
2193 #endif
2194 		}
2195 	}
2196 
2197 	rdata->jme_rx_cons += nsegs;
2198 	rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2199 }
2200 
2201 static int
2202 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2203 		int count)
2204 {
2205 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2206 	struct jme_desc *desc;
2207 	int nsegs, prog, pktlen;
2208 
2209 	prog = 0;
2210 	for (;;) {
2211 #ifdef DEVICE_POLLING
2212 		if (count >= 0 && count-- == 0)
2213 			break;
2214 #endif
2215 		desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2216 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2217 			break;
2218 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2219 			break;
2220 
2221 		/*
2222 		 * Check number of segments against received bytes.
2223 		 * Non-matching value would indicate that hardware
2224 		 * is still trying to update Rx descriptors. I'm not
2225 		 * sure whether this check is needed.
2226 		 */
2227 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2228 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2229 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2230 			if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2231 				  "and packet size(%d) mismach\n",
2232 				  nsegs, pktlen);
2233 			break;
2234 		}
2235 
2236 		/* Received a frame. */
2237 		jme_rxpkt(sc, ring, chain);
2238 		prog++;
2239 	}
2240 	return prog;
2241 }
2242 
2243 static void
2244 jme_rxeof(struct jme_softc *sc, int ring)
2245 {
2246 	struct mbuf_chain chain[MAXCPU];
2247 
2248 	ether_input_chain_init(chain);
2249 	if (jme_rxeof_chain(sc, ring, chain, -1))
2250 		ether_input_dispatch(chain);
2251 }
2252 
2253 static void
2254 jme_tick(void *xsc)
2255 {
2256 	struct jme_softc *sc = xsc;
2257 	struct ifnet *ifp = &sc->arpcom.ac_if;
2258 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2259 
2260 	lwkt_serialize_enter(ifp->if_serializer);
2261 
2262 	mii_tick(mii);
2263 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2264 
2265 	lwkt_serialize_exit(ifp->if_serializer);
2266 }
2267 
2268 static void
2269 jme_reset(struct jme_softc *sc)
2270 {
2271 #ifdef foo
2272 	/* Stop receiver, transmitter. */
2273 	jme_stop_rx(sc);
2274 	jme_stop_tx(sc);
2275 #endif
2276 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2277 	DELAY(10);
2278 	CSR_WRITE_4(sc, JME_GHC, 0);
2279 }
2280 
2281 static void
2282 jme_init(void *xsc)
2283 {
2284 	struct jme_softc *sc = xsc;
2285 	struct ifnet *ifp = &sc->arpcom.ac_if;
2286 	struct mii_data *mii;
2287 	uint8_t eaddr[ETHER_ADDR_LEN];
2288 	bus_addr_t paddr;
2289 	uint32_t reg;
2290 	int error, r;
2291 
2292 	ASSERT_SERIALIZED(ifp->if_serializer);
2293 
2294 	/*
2295 	 * Cancel any pending I/O.
2296 	 */
2297 	jme_stop(sc);
2298 
2299 	/*
2300 	 * Reset the chip to a known state.
2301 	 */
2302 	jme_reset(sc);
2303 
2304 	sc->jme_txd_spare =
2305 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2306 	KKASSERT(sc->jme_txd_spare >= 1);
2307 
2308 	/*
2309 	 * If we use 64bit address mode for transmitting, each Tx request
2310 	 * needs one more symbol descriptor.
2311 	 */
2312 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2313 		sc->jme_txd_spare += 1;
2314 
2315 	if (ifp->if_capenable & IFCAP_RSS)
2316 		jme_enable_rss(sc);
2317 	else
2318 		jme_disable_rss(sc);
2319 
2320 	/* Init RX descriptors */
2321 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2322 		error = jme_init_rx_ring(sc, r);
2323 		if (error) {
2324 			if_printf(ifp, "initialization failed: "
2325 				  "no memory for %dth RX ring.\n", r);
2326 			jme_stop(sc);
2327 			return;
2328 		}
2329 	}
2330 
2331 	/* Init TX descriptors */
2332 	jme_init_tx_ring(sc);
2333 
2334 	/* Initialize shadow status block. */
2335 	jme_init_ssb(sc);
2336 
2337 	/* Reprogram the station address. */
2338 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2339 	CSR_WRITE_4(sc, JME_PAR0,
2340 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2341 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2342 
2343 	/*
2344 	 * Configure Tx queue.
2345 	 *  Tx priority queue weight value : 0
2346 	 *  Tx FIFO threshold for processing next packet : 16QW
2347 	 *  Maximum Tx DMA length : 512
2348 	 *  Allow Tx DMA burst.
2349 	 */
2350 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2351 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2352 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2353 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2354 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2355 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2356 
2357 	/* Set Tx descriptor counter. */
2358 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2359 
2360 	/* Set Tx ring address to the hardware. */
2361 	paddr = sc->jme_cdata.jme_tx_ring_paddr;
2362 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2363 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2364 
2365 	/* Configure TxMAC parameters. */
2366 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2367 	reg |= TXMAC_THRESH_1_PKT;
2368 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2369 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2370 
2371 	/*
2372 	 * Configure Rx queue.
2373 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2374 	 *  FIFO threshold for processing next packet : 128QW
2375 	 *  Rx queue 0 select
2376 	 *  Max Rx DMA length : 128
2377 	 *  Rx descriptor retry : 32
2378 	 *  Rx descriptor retry time gap : 256ns
2379 	 *  Don't receive runt/bad frame.
2380 	 */
2381 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2382 #if 0
2383 	/*
2384 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2385 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2386 	 * decrease FIFO threshold to reduce the FIFO overruns for
2387 	 * frames larger than 4000 bytes.
2388 	 * For best performance of standard MTU sized frames use
2389 	 * maximum allowable FIFO threshold, 128QW.
2390 	 */
2391 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2392 	    JME_RX_FIFO_SIZE)
2393 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2394 	else
2395 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2396 #else
2397 	/* Improve PCI Express compatibility */
2398 	sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2399 #endif
2400 	sc->jme_rxcsr |= sc->jme_rx_dma_size;
2401 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2402 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2403 	/* XXX TODO DROP_BAD */
2404 
2405 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2406 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2407 
2408 		/* Set Rx descriptor counter. */
2409 		CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2410 
2411 		/* Set Rx ring address to the hardware. */
2412 		paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2413 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2414 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2415 	}
2416 
2417 	/* Clear receive filter. */
2418 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2419 
2420 	/* Set up the receive filter. */
2421 	jme_set_filter(sc);
2422 	jme_set_vlan(sc);
2423 
2424 	/*
2425 	 * Disable all WOL bits as WOL can interfere normal Rx
2426 	 * operation. Also clear WOL detection status bits.
2427 	 */
2428 	reg = CSR_READ_4(sc, JME_PMCS);
2429 	reg &= ~PMCS_WOL_ENB_MASK;
2430 	CSR_WRITE_4(sc, JME_PMCS, reg);
2431 
2432 	/*
2433 	 * Pad 10bytes right before received frame. This will greatly
2434 	 * help Rx performance on strict-alignment architectures as
2435 	 * it does not need to copy the frame to align the payload.
2436 	 */
2437 	reg = CSR_READ_4(sc, JME_RXMAC);
2438 	reg |= RXMAC_PAD_10BYTES;
2439 
2440 	if (ifp->if_capenable & IFCAP_RXCSUM)
2441 		reg |= RXMAC_CSUM_ENB;
2442 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2443 
2444 	/* Configure general purpose reg0 */
2445 	reg = CSR_READ_4(sc, JME_GPREG0);
2446 	reg &= ~GPREG0_PCC_UNIT_MASK;
2447 	/* Set PCC timer resolution to micro-seconds unit. */
2448 	reg |= GPREG0_PCC_UNIT_US;
2449 	/*
2450 	 * Disable all shadow register posting as we have to read
2451 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2452 	 * that it's hard to synchronize interrupt status between
2453 	 * hardware and software with shadow posting due to
2454 	 * requirements of bus_dmamap_sync(9).
2455 	 */
2456 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2457 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2458 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2459 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2460 	/* Disable posting of DW0. */
2461 	reg &= ~GPREG0_POST_DW0_ENB;
2462 	/* Clear PME message. */
2463 	reg &= ~GPREG0_PME_ENB;
2464 	/* Set PHY address. */
2465 	reg &= ~GPREG0_PHY_ADDR_MASK;
2466 	reg |= sc->jme_phyaddr;
2467 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2468 
2469 	/* Configure Tx queue 0 packet completion coalescing. */
2470 	jme_set_tx_coal(sc);
2471 
2472 	/* Configure Rx queue 0 packet completion coalescing. */
2473 	jme_set_rx_coal(sc);
2474 
2475 	/* Configure shadow status block but don't enable posting. */
2476 	paddr = sc->jme_cdata.jme_ssb_block_paddr;
2477 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2478 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2479 
2480 	/* Disable Timer 1 and Timer 2. */
2481 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2482 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2483 
2484 	/* Configure retry transmit period, retry limit value. */
2485 	CSR_WRITE_4(sc, JME_TXTRHD,
2486 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2487 	    TXTRHD_RT_PERIOD_MASK) |
2488 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2489 	    TXTRHD_RT_LIMIT_SHIFT));
2490 
2491 #ifdef DEVICE_POLLING
2492 	if (!(ifp->if_flags & IFF_POLLING))
2493 #endif
2494 	/* Initialize the interrupt mask. */
2495 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2496 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2497 
2498 	/*
2499 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2500 	 * done after detection of valid link in jme_miibus_statchg.
2501 	 */
2502 	sc->jme_flags &= ~JME_FLAG_LINK;
2503 
2504 	/* Set the current media. */
2505 	mii = device_get_softc(sc->jme_miibus);
2506 	mii_mediachg(mii);
2507 
2508 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2509 
2510 	ifp->if_flags |= IFF_RUNNING;
2511 	ifp->if_flags &= ~IFF_OACTIVE;
2512 }
2513 
2514 static void
2515 jme_stop(struct jme_softc *sc)
2516 {
2517 	struct ifnet *ifp = &sc->arpcom.ac_if;
2518 	struct jme_txdesc *txd;
2519 	struct jme_rxdesc *rxd;
2520 	struct jme_rxdata *rdata;
2521 	int i, r;
2522 
2523 	ASSERT_SERIALIZED(ifp->if_serializer);
2524 
2525 	/*
2526 	 * Mark the interface down and cancel the watchdog timer.
2527 	 */
2528 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2529 	ifp->if_timer = 0;
2530 
2531 	callout_stop(&sc->jme_tick_ch);
2532 	sc->jme_flags &= ~JME_FLAG_LINK;
2533 
2534 	/*
2535 	 * Disable interrupts.
2536 	 */
2537 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2538 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2539 
2540 	/* Disable updating shadow status block. */
2541 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2542 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2543 
2544 	/* Stop receiver, transmitter. */
2545 	jme_stop_rx(sc);
2546 	jme_stop_tx(sc);
2547 
2548 	/*
2549 	 * Free partial finished RX segments
2550 	 */
2551 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2552 		rdata = &sc->jme_cdata.jme_rx_data[r];
2553 		if (rdata->jme_rxhead != NULL)
2554 			m_freem(rdata->jme_rxhead);
2555 		JME_RXCHAIN_RESET(sc, r);
2556 	}
2557 
2558 	/*
2559 	 * Free RX and TX mbufs still in the queues.
2560 	 */
2561 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2562 		rdata = &sc->jme_cdata.jme_rx_data[r];
2563 		for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2564 			rxd = &rdata->jme_rxdesc[i];
2565 			if (rxd->rx_m != NULL) {
2566 				bus_dmamap_unload(rdata->jme_rx_tag,
2567 						  rxd->rx_dmamap);
2568 				m_freem(rxd->rx_m);
2569 				rxd->rx_m = NULL;
2570 			}
2571 		}
2572 	}
2573 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2574 		txd = &sc->jme_cdata.jme_txdesc[i];
2575 		if (txd->tx_m != NULL) {
2576 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2577 			    txd->tx_dmamap);
2578 			m_freem(txd->tx_m);
2579 			txd->tx_m = NULL;
2580 			txd->tx_ndesc = 0;
2581 		}
2582         }
2583 }
2584 
2585 static void
2586 jme_stop_tx(struct jme_softc *sc)
2587 {
2588 	uint32_t reg;
2589 	int i;
2590 
2591 	reg = CSR_READ_4(sc, JME_TXCSR);
2592 	if ((reg & TXCSR_TX_ENB) == 0)
2593 		return;
2594 	reg &= ~TXCSR_TX_ENB;
2595 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2596 	for (i = JME_TIMEOUT; i > 0; i--) {
2597 		DELAY(1);
2598 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2599 			break;
2600 	}
2601 	if (i == 0)
2602 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2603 }
2604 
2605 static void
2606 jme_stop_rx(struct jme_softc *sc)
2607 {
2608 	uint32_t reg;
2609 	int i;
2610 
2611 	reg = CSR_READ_4(sc, JME_RXCSR);
2612 	if ((reg & RXCSR_RX_ENB) == 0)
2613 		return;
2614 	reg &= ~RXCSR_RX_ENB;
2615 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2616 	for (i = JME_TIMEOUT; i > 0; i--) {
2617 		DELAY(1);
2618 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2619 			break;
2620 	}
2621 	if (i == 0)
2622 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2623 }
2624 
2625 static void
2626 jme_init_tx_ring(struct jme_softc *sc)
2627 {
2628 	struct jme_chain_data *cd;
2629 	struct jme_txdesc *txd;
2630 	int i;
2631 
2632 	sc->jme_cdata.jme_tx_prod = 0;
2633 	sc->jme_cdata.jme_tx_cons = 0;
2634 	sc->jme_cdata.jme_tx_cnt = 0;
2635 
2636 	cd = &sc->jme_cdata;
2637 	bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2638 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2639 		txd = &sc->jme_cdata.jme_txdesc[i];
2640 		txd->tx_m = NULL;
2641 		txd->tx_desc = &cd->jme_tx_ring[i];
2642 		txd->tx_ndesc = 0;
2643 	}
2644 }
2645 
2646 static void
2647 jme_init_ssb(struct jme_softc *sc)
2648 {
2649 	struct jme_chain_data *cd;
2650 
2651 	cd = &sc->jme_cdata;
2652 	bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2653 }
2654 
2655 static int
2656 jme_init_rx_ring(struct jme_softc *sc, int ring)
2657 {
2658 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2659 	struct jme_rxdesc *rxd;
2660 	int i;
2661 
2662 	KKASSERT(rdata->jme_rxhead == NULL &&
2663 		 rdata->jme_rxtail == NULL &&
2664 		 rdata->jme_rxlen == 0);
2665 	rdata->jme_rx_cons = 0;
2666 
2667 	bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2668 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2669 		int error;
2670 
2671 		rxd = &rdata->jme_rxdesc[i];
2672 		rxd->rx_m = NULL;
2673 		rxd->rx_desc = &rdata->jme_rx_ring[i];
2674 		error = jme_newbuf(sc, ring, rxd, 1);
2675 		if (error)
2676 			return error;
2677 	}
2678 	return 0;
2679 }
2680 
2681 static int
2682 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2683 {
2684 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2685 	struct jme_desc *desc;
2686 	struct mbuf *m;
2687 	bus_dma_segment_t segs;
2688 	bus_dmamap_t map;
2689 	int error, nsegs;
2690 
2691 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2692 	if (m == NULL)
2693 		return ENOBUFS;
2694 	/*
2695 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2696 	 * takes advantage of 10 bytes padding feature of hardware
2697 	 * in order not to copy entire frame to align IP header on
2698 	 * 32bit boundary.
2699 	 */
2700 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2701 
2702 	error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2703 			rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2704 			BUS_DMA_NOWAIT);
2705 	if (error) {
2706 		m_freem(m);
2707 		if (init)
2708 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2709 		return error;
2710 	}
2711 
2712 	if (rxd->rx_m != NULL) {
2713 		bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2714 				BUS_DMASYNC_POSTREAD);
2715 		bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2716 	}
2717 	map = rxd->rx_dmamap;
2718 	rxd->rx_dmamap = rdata->jme_rx_sparemap;
2719 	rdata->jme_rx_sparemap = map;
2720 	rxd->rx_m = m;
2721 
2722 	desc = rxd->rx_desc;
2723 	desc->buflen = htole32(segs.ds_len);
2724 	desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2725 	desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2726 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2727 
2728 	return 0;
2729 }
2730 
2731 static void
2732 jme_set_vlan(struct jme_softc *sc)
2733 {
2734 	struct ifnet *ifp = &sc->arpcom.ac_if;
2735 	uint32_t reg;
2736 
2737 	ASSERT_SERIALIZED(ifp->if_serializer);
2738 
2739 	reg = CSR_READ_4(sc, JME_RXMAC);
2740 	reg &= ~RXMAC_VLAN_ENB;
2741 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2742 		reg |= RXMAC_VLAN_ENB;
2743 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2744 }
2745 
2746 static void
2747 jme_set_filter(struct jme_softc *sc)
2748 {
2749 	struct ifnet *ifp = &sc->arpcom.ac_if;
2750 	struct ifmultiaddr *ifma;
2751 	uint32_t crc;
2752 	uint32_t mchash[2];
2753 	uint32_t rxcfg;
2754 
2755 	ASSERT_SERIALIZED(ifp->if_serializer);
2756 
2757 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2758 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2759 	    RXMAC_ALLMULTI);
2760 
2761 	/*
2762 	 * Always accept frames destined to our station address.
2763 	 * Always accept broadcast frames.
2764 	 */
2765 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2766 
2767 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2768 		if (ifp->if_flags & IFF_PROMISC)
2769 			rxcfg |= RXMAC_PROMISC;
2770 		if (ifp->if_flags & IFF_ALLMULTI)
2771 			rxcfg |= RXMAC_ALLMULTI;
2772 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2773 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2774 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2775 		return;
2776 	}
2777 
2778 	/*
2779 	 * Set up the multicast address filter by passing all multicast
2780 	 * addresses through a CRC generator, and then using the low-order
2781 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2782 	 * high order bits select the register, while the rest of the bits
2783 	 * select the bit within the register.
2784 	 */
2785 	rxcfg |= RXMAC_MULTICAST;
2786 	bzero(mchash, sizeof(mchash));
2787 
2788 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2789 		if (ifma->ifma_addr->sa_family != AF_LINK)
2790 			continue;
2791 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2792 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2793 
2794 		/* Just want the 6 least significant bits. */
2795 		crc &= 0x3f;
2796 
2797 		/* Set the corresponding bit in the hash table. */
2798 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2799 	}
2800 
2801 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2802 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2803 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2804 }
2805 
2806 static int
2807 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2808 {
2809 	struct jme_softc *sc = arg1;
2810 	struct ifnet *ifp = &sc->arpcom.ac_if;
2811 	int error, v;
2812 
2813 	lwkt_serialize_enter(ifp->if_serializer);
2814 
2815 	v = sc->jme_tx_coal_to;
2816 	error = sysctl_handle_int(oidp, &v, 0, req);
2817 	if (error || req->newptr == NULL)
2818 		goto back;
2819 
2820 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2821 		error = EINVAL;
2822 		goto back;
2823 	}
2824 
2825 	if (v != sc->jme_tx_coal_to) {
2826 		sc->jme_tx_coal_to = v;
2827 		if (ifp->if_flags & IFF_RUNNING)
2828 			jme_set_tx_coal(sc);
2829 	}
2830 back:
2831 	lwkt_serialize_exit(ifp->if_serializer);
2832 	return error;
2833 }
2834 
2835 static int
2836 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2837 {
2838 	struct jme_softc *sc = arg1;
2839 	struct ifnet *ifp = &sc->arpcom.ac_if;
2840 	int error, v;
2841 
2842 	lwkt_serialize_enter(ifp->if_serializer);
2843 
2844 	v = sc->jme_tx_coal_pkt;
2845 	error = sysctl_handle_int(oidp, &v, 0, req);
2846 	if (error || req->newptr == NULL)
2847 		goto back;
2848 
2849 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2850 		error = EINVAL;
2851 		goto back;
2852 	}
2853 
2854 	if (v != sc->jme_tx_coal_pkt) {
2855 		sc->jme_tx_coal_pkt = v;
2856 		if (ifp->if_flags & IFF_RUNNING)
2857 			jme_set_tx_coal(sc);
2858 	}
2859 back:
2860 	lwkt_serialize_exit(ifp->if_serializer);
2861 	return error;
2862 }
2863 
2864 static int
2865 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2866 {
2867 	struct jme_softc *sc = arg1;
2868 	struct ifnet *ifp = &sc->arpcom.ac_if;
2869 	int error, v;
2870 
2871 	lwkt_serialize_enter(ifp->if_serializer);
2872 
2873 	v = sc->jme_rx_coal_to;
2874 	error = sysctl_handle_int(oidp, &v, 0, req);
2875 	if (error || req->newptr == NULL)
2876 		goto back;
2877 
2878 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2879 		error = EINVAL;
2880 		goto back;
2881 	}
2882 
2883 	if (v != sc->jme_rx_coal_to) {
2884 		sc->jme_rx_coal_to = v;
2885 		if (ifp->if_flags & IFF_RUNNING)
2886 			jme_set_rx_coal(sc);
2887 	}
2888 back:
2889 	lwkt_serialize_exit(ifp->if_serializer);
2890 	return error;
2891 }
2892 
2893 static int
2894 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2895 {
2896 	struct jme_softc *sc = arg1;
2897 	struct ifnet *ifp = &sc->arpcom.ac_if;
2898 	int error, v;
2899 
2900 	lwkt_serialize_enter(ifp->if_serializer);
2901 
2902 	v = sc->jme_rx_coal_pkt;
2903 	error = sysctl_handle_int(oidp, &v, 0, req);
2904 	if (error || req->newptr == NULL)
2905 		goto back;
2906 
2907 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
2908 		error = EINVAL;
2909 		goto back;
2910 	}
2911 
2912 	if (v != sc->jme_rx_coal_pkt) {
2913 		sc->jme_rx_coal_pkt = v;
2914 		if (ifp->if_flags & IFF_RUNNING)
2915 			jme_set_rx_coal(sc);
2916 	}
2917 back:
2918 	lwkt_serialize_exit(ifp->if_serializer);
2919 	return error;
2920 }
2921 
2922 static void
2923 jme_set_tx_coal(struct jme_softc *sc)
2924 {
2925 	uint32_t reg;
2926 
2927 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2928 	    PCCTX_COAL_TO_MASK;
2929 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2930 	    PCCTX_COAL_PKT_MASK;
2931 	reg |= PCCTX_COAL_TXQ0;
2932 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2933 }
2934 
2935 static void
2936 jme_set_rx_coal(struct jme_softc *sc)
2937 {
2938 	uint32_t reg;
2939 	int r;
2940 
2941 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2942 	    PCCRX_COAL_TO_MASK;
2943 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2944 	    PCCRX_COAL_PKT_MASK;
2945 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
2946 		if (r < sc->jme_rx_ring_inuse)
2947 			CSR_WRITE_4(sc, JME_PCCRX(r), reg);
2948 		else
2949 			CSR_WRITE_4(sc, JME_PCCRX(r), 0);
2950 	}
2951 }
2952 
2953 #ifdef DEVICE_POLLING
2954 
2955 static void
2956 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2957 {
2958 	struct jme_softc *sc = ifp->if_softc;
2959 	struct mbuf_chain chain[MAXCPU];
2960 	uint32_t status;
2961 	int r, prog = 0;
2962 
2963 	ASSERT_SERIALIZED(ifp->if_serializer);
2964 
2965 	switch (cmd) {
2966 	case POLL_REGISTER:
2967 		CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2968 		break;
2969 
2970 	case POLL_DEREGISTER:
2971 		CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2972 		break;
2973 
2974 	case POLL_AND_CHECK_STATUS:
2975 	case POLL_ONLY:
2976 		status = CSR_READ_4(sc, JME_INTR_STATUS);
2977 
2978 		ether_input_chain_init(chain);
2979 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r)
2980 			prog += jme_rxeof_chain(sc, r, chain, count);
2981 		if (prog)
2982 			ether_input_dispatch(chain);
2983 
2984 		if (status & INTR_RXQ_DESC_EMPTY) {
2985 			CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2986 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2987 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2988 		}
2989 
2990 		jme_txeof(sc);
2991 		if (!ifq_is_empty(&ifp->if_snd))
2992 			if_devstart(ifp);
2993 		break;
2994 	}
2995 }
2996 
2997 #endif	/* DEVICE_POLLING */
2998 
2999 static int
3000 jme_rxring_dma_alloc(struct jme_softc *sc, int ring)
3001 {
3002 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3003 	bus_dmamem_t dmem;
3004 	int error;
3005 
3006 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
3007 			JME_RX_RING_ALIGN, 0,
3008 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3009 			JME_RX_RING_SIZE(sc),
3010 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3011 	if (error) {
3012 		device_printf(sc->jme_dev,
3013 		    "could not allocate %dth Rx ring.\n", ring);
3014 		return error;
3015 	}
3016 	rdata->jme_rx_ring_tag = dmem.dmem_tag;
3017 	rdata->jme_rx_ring_map = dmem.dmem_map;
3018 	rdata->jme_rx_ring = dmem.dmem_addr;
3019 	rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3020 
3021 	return 0;
3022 }
3023 
3024 static int
3025 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
3026 {
3027 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3028 	int i, error;
3029 
3030 	/* Create tag for Rx buffers. */
3031 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
3032 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
3033 	    BUS_SPACE_MAXADDR,		/* lowaddr */
3034 	    BUS_SPACE_MAXADDR,		/* highaddr */
3035 	    NULL, NULL,			/* filter, filterarg */
3036 	    MCLBYTES,			/* maxsize */
3037 	    1,				/* nsegments */
3038 	    MCLBYTES,			/* maxsegsize */
3039 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3040 	    &rdata->jme_rx_tag);
3041 	if (error) {
3042 		device_printf(sc->jme_dev,
3043 		    "could not create %dth Rx DMA tag.\n", ring);
3044 		return error;
3045 	}
3046 
3047 	/* Create DMA maps for Rx buffers. */
3048 	error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3049 				  &rdata->jme_rx_sparemap);
3050 	if (error) {
3051 		device_printf(sc->jme_dev,
3052 		    "could not create %dth spare Rx dmamap.\n", ring);
3053 		bus_dma_tag_destroy(rdata->jme_rx_tag);
3054 		rdata->jme_rx_tag = NULL;
3055 		return error;
3056 	}
3057 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3058 		struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3059 
3060 		error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3061 					  &rxd->rx_dmamap);
3062 		if (error) {
3063 			int j;
3064 
3065 			device_printf(sc->jme_dev,
3066 			    "could not create %dth Rx dmamap "
3067 			    "for %dth RX ring.\n", i, ring);
3068 
3069 			for (j = 0; j < i; ++j) {
3070 				rxd = &rdata->jme_rxdesc[j];
3071 				bus_dmamap_destroy(rdata->jme_rx_tag,
3072 						   rxd->rx_dmamap);
3073 			}
3074 			bus_dmamap_destroy(rdata->jme_rx_tag,
3075 					   rdata->jme_rx_sparemap);
3076 			bus_dma_tag_destroy(rdata->jme_rx_tag);
3077 			rdata->jme_rx_tag = NULL;
3078 			return error;
3079 		}
3080 	}
3081 	return 0;
3082 }
3083 
3084 static void
3085 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3086 {
3087 	struct mbuf_chain chain[MAXCPU];
3088 	int r, prog = 0;
3089 
3090 	ether_input_chain_init(chain);
3091 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3092 		if (status & jme_rx_status[r].jme_coal)
3093 			prog += jme_rxeof_chain(sc, r, chain, -1);
3094 	}
3095 	if (prog)
3096 		ether_input_dispatch(chain);
3097 }
3098 
3099 static void
3100 jme_enable_rss(struct jme_softc *sc)
3101 {
3102 	uint32_t rssc, ind;
3103 	uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3104 	int i;
3105 
3106 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3107 
3108 	KASSERT(sc->jme_rx_ring_inuse == JME_NRXRING_2 ||
3109 		sc->jme_rx_ring_inuse == JME_NRXRING_4,
3110 		("%s: invalid # of RX rings (%d)\n",
3111 		 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse));
3112 
3113 	rssc = RSSC_HASH_64_ENTRY;
3114 	rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3115 	rssc |= sc->jme_rx_ring_inuse >> 1;
3116 	JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3117 	CSR_WRITE_4(sc, JME_RSSC, rssc);
3118 
3119 	toeplitz_get_key(key, sizeof(key));
3120 	for (i = 0; i < RSSKEY_NREGS; ++i) {
3121 		uint32_t keyreg;
3122 
3123 		keyreg = RSSKEY_REGVAL(key, i);
3124 		JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3125 
3126 		CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3127 	}
3128 
3129 	/*
3130 	 * Create redirect table in following fashion:
3131 	 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3132 	 */
3133 	ind = 0;
3134 	for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3135 		int q;
3136 
3137 		q = i % sc->jme_rx_ring_inuse;
3138 		ind |= q << (i * 8);
3139 	}
3140 	JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3141 
3142 	for (i = 0; i < RSSTBL_NREGS; ++i)
3143 		CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3144 }
3145 
3146 static void
3147 jme_disable_rss(struct jme_softc *sc)
3148 {
3149 	sc->jme_rx_ring_inuse = JME_NRXRING_1;
3150 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3151 }
3152