xref: /dragonfly/sys/dev/netif/jme/if_jme.c (revision ce7a3582)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  */
29 
30 #include "opt_polling.h"
31 #include "opt_rss.h"
32 #include "opt_jme.h"
33 
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/interrupt.h>
39 #include <sys/malloc.h>
40 #include <sys/proc.h>
41 #include <sys/rman.h>
42 #include <sys/serialize.h>
43 #include <sys/serialize2.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 
48 #include <net/ethernet.h>
49 #include <net/if.h>
50 #include <net/bpf.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/ifq_var.h>
55 #include <net/toeplitz.h>
56 #include <net/toeplitz2.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
59 
60 #include <netinet/in.h>
61 
62 #include <dev/netif/mii_layer/miivar.h>
63 #include <dev/netif/mii_layer/jmphyreg.h>
64 
65 #include <bus/pci/pcireg.h>
66 #include <bus/pci/pcivar.h>
67 #include <bus/pci/pcidevs.h>
68 
69 #include <dev/netif/jme/if_jmereg.h>
70 #include <dev/netif/jme/if_jmevar.h>
71 
72 #include "miibus_if.h"
73 
74 /* Define the following to disable printing Rx errors. */
75 #undef	JME_SHOW_ERRORS
76 
77 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
78 
79 #ifdef JME_RSS_DEBUG
80 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
81 do { \
82 	if ((sc)->jme_rss_debug >= (lvl)) \
83 		if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
84 } while (0)
85 #else	/* !JME_RSS_DEBUG */
86 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
87 #endif	/* JME_RSS_DEBUG */
88 
89 static int	jme_probe(device_t);
90 static int	jme_attach(device_t);
91 static int	jme_detach(device_t);
92 static int	jme_shutdown(device_t);
93 static int	jme_suspend(device_t);
94 static int	jme_resume(device_t);
95 
96 static int	jme_miibus_readreg(device_t, int, int);
97 static int	jme_miibus_writereg(device_t, int, int, int);
98 static void	jme_miibus_statchg(device_t);
99 
100 static void	jme_init(void *);
101 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
102 static void	jme_start(struct ifnet *);
103 static void	jme_watchdog(struct ifnet *);
104 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
105 static int	jme_mediachange(struct ifnet *);
106 #ifdef DEVICE_POLLING
107 static void	jme_poll(struct ifnet *, enum poll_cmd, int);
108 #endif
109 static void	jme_serialize(struct ifnet *, enum ifnet_serialize);
110 static void	jme_deserialize(struct ifnet *, enum ifnet_serialize);
111 static int	jme_tryserialize(struct ifnet *, enum ifnet_serialize);
112 #ifdef INVARIANTS
113 static void	jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
114 		    boolean_t);
115 #endif
116 
117 static void	jme_intr(void *);
118 static void	jme_msix_tx(void *);
119 static void	jme_msix_rx(void *);
120 static void	jme_txeof(struct jme_softc *);
121 static void	jme_rxeof(struct jme_softc *, int);
122 static int	jme_rxeof_chain(struct jme_softc *, int,
123 				struct mbuf_chain *, int);
124 static void	jme_rx_intr(struct jme_softc *, uint32_t);
125 
126 static int	jme_msix_setup(device_t);
127 static void	jme_msix_teardown(device_t, int);
128 static int	jme_intr_setup(device_t);
129 static void	jme_intr_teardown(device_t);
130 static void	jme_msix_try_alloc(device_t);
131 static void	jme_msix_free(device_t);
132 static int	jme_intr_alloc(device_t);
133 static void	jme_intr_free(device_t);
134 static int	jme_dma_alloc(struct jme_softc *);
135 static void	jme_dma_free(struct jme_softc *);
136 static int	jme_init_rx_ring(struct jme_softc *, int);
137 static void	jme_init_tx_ring(struct jme_softc *);
138 static void	jme_init_ssb(struct jme_softc *);
139 static int	jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
140 static int	jme_encap(struct jme_softc *, struct mbuf **);
141 static void	jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
142 static int	jme_rxring_dma_alloc(struct jme_softc *, int);
143 static int	jme_rxbuf_dma_alloc(struct jme_softc *, int);
144 
145 static void	jme_tick(void *);
146 static void	jme_stop(struct jme_softc *);
147 static void	jme_reset(struct jme_softc *);
148 static void	jme_set_msinum(struct jme_softc *);
149 static void	jme_set_vlan(struct jme_softc *);
150 static void	jme_set_filter(struct jme_softc *);
151 static void	jme_stop_tx(struct jme_softc *);
152 static void	jme_stop_rx(struct jme_softc *);
153 static void	jme_mac_config(struct jme_softc *);
154 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
155 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
156 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
157 #ifdef notyet
158 static void	jme_setwol(struct jme_softc *);
159 static void	jme_setlinkspeed(struct jme_softc *);
160 #endif
161 static void	jme_set_tx_coal(struct jme_softc *);
162 static void	jme_set_rx_coal(struct jme_softc *);
163 static void	jme_enable_rss(struct jme_softc *);
164 static void	jme_disable_rss(struct jme_softc *);
165 
166 static void	jme_sysctl_node(struct jme_softc *);
167 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
168 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
169 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
170 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
171 
172 /*
173  * Devices supported by this driver.
174  */
175 static const struct jme_dev {
176 	uint16_t	jme_vendorid;
177 	uint16_t	jme_deviceid;
178 	uint32_t	jme_caps;
179 	const char	*jme_name;
180 } jme_devs[] = {
181 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
182 	    JME_CAP_JUMBO,
183 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
184 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
185 	    JME_CAP_FASTETH,
186 	    "JMicron Inc, JMC260 Fast Ethernet" },
187 	{ 0, 0, 0, NULL }
188 };
189 
190 static device_method_t jme_methods[] = {
191 	/* Device interface. */
192 	DEVMETHOD(device_probe,		jme_probe),
193 	DEVMETHOD(device_attach,	jme_attach),
194 	DEVMETHOD(device_detach,	jme_detach),
195 	DEVMETHOD(device_shutdown,	jme_shutdown),
196 	DEVMETHOD(device_suspend,	jme_suspend),
197 	DEVMETHOD(device_resume,	jme_resume),
198 
199 	/* Bus interface. */
200 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
201 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
202 
203 	/* MII interface. */
204 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
205 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
206 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
207 
208 	{ NULL, NULL }
209 };
210 
211 static driver_t jme_driver = {
212 	"jme",
213 	jme_methods,
214 	sizeof(struct jme_softc)
215 };
216 
217 static devclass_t jme_devclass;
218 
219 DECLARE_DUMMY_MODULE(if_jme);
220 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
221 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
222 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
223 
224 static const struct {
225 	uint32_t	jme_coal;
226 	uint32_t	jme_comp;
227 	uint32_t	jme_empty;
228 } jme_rx_status[JME_NRXRING_MAX] = {
229 	{ INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
230 	  INTR_RXQ0_DESC_EMPTY },
231 	{ INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
232 	  INTR_RXQ1_DESC_EMPTY },
233 	{ INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
234 	  INTR_RXQ2_DESC_EMPTY },
235 	{ INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
236 	  INTR_RXQ3_DESC_EMPTY }
237 };
238 
239 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
240 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
241 static int	jme_rx_ring_count = JME_NRXRING_DEF;
242 static int	jme_msi_enable = 1;
243 static int	jme_msix_enable = 1;
244 
245 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
246 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
247 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
248 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
249 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
250 
251 /*
252  *	Read a PHY register on the MII of the JMC250.
253  */
254 static int
255 jme_miibus_readreg(device_t dev, int phy, int reg)
256 {
257 	struct jme_softc *sc = device_get_softc(dev);
258 	uint32_t val;
259 	int i;
260 
261 	/* For FPGA version, PHY address 0 should be ignored. */
262 	if (sc->jme_caps & JME_CAP_FPGA) {
263 		if (phy == 0)
264 			return (0);
265 	} else {
266 		if (sc->jme_phyaddr != phy)
267 			return (0);
268 	}
269 
270 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
271 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
272 
273 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
274 		DELAY(1);
275 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
276 			break;
277 	}
278 	if (i == 0) {
279 		device_printf(sc->jme_dev, "phy read timeout: "
280 			      "phy %d, reg %d\n", phy, reg);
281 		return (0);
282 	}
283 
284 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
285 }
286 
287 /*
288  *	Write a PHY register on the MII of the JMC250.
289  */
290 static int
291 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
292 {
293 	struct jme_softc *sc = device_get_softc(dev);
294 	int i;
295 
296 	/* For FPGA version, PHY address 0 should be ignored. */
297 	if (sc->jme_caps & JME_CAP_FPGA) {
298 		if (phy == 0)
299 			return (0);
300 	} else {
301 		if (sc->jme_phyaddr != phy)
302 			return (0);
303 	}
304 
305 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
306 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
307 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
308 
309 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
310 		DELAY(1);
311 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
312 			break;
313 	}
314 	if (i == 0) {
315 		device_printf(sc->jme_dev, "phy write timeout: "
316 			      "phy %d, reg %d\n", phy, reg);
317 	}
318 
319 	return (0);
320 }
321 
322 /*
323  *	Callback from MII layer when media changes.
324  */
325 static void
326 jme_miibus_statchg(device_t dev)
327 {
328 	struct jme_softc *sc = device_get_softc(dev);
329 	struct ifnet *ifp = &sc->arpcom.ac_if;
330 	struct mii_data *mii;
331 	struct jme_txdesc *txd;
332 	bus_addr_t paddr;
333 	int i, r;
334 
335 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
336 
337 	if ((ifp->if_flags & IFF_RUNNING) == 0)
338 		return;
339 
340 	mii = device_get_softc(sc->jme_miibus);
341 
342 	sc->jme_flags &= ~JME_FLAG_LINK;
343 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
344 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
345 		case IFM_10_T:
346 		case IFM_100_TX:
347 			sc->jme_flags |= JME_FLAG_LINK;
348 			break;
349 		case IFM_1000_T:
350 			if (sc->jme_caps & JME_CAP_FASTETH)
351 				break;
352 			sc->jme_flags |= JME_FLAG_LINK;
353 			break;
354 		default:
355 			break;
356 		}
357 	}
358 
359 	/*
360 	 * Disabling Rx/Tx MACs have a side-effect of resetting
361 	 * JME_TXNDA/JME_RXNDA register to the first address of
362 	 * Tx/Rx descriptor address. So driver should reset its
363 	 * internal procucer/consumer pointer and reclaim any
364 	 * allocated resources.  Note, just saving the value of
365 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
366 	 * and restoring JME_TXNDA/JME_RXNDA register is not
367 	 * sufficient to make sure correct MAC state because
368 	 * stopping MAC operation can take a while and hardware
369 	 * might have updated JME_TXNDA/JME_RXNDA registers
370 	 * during the stop operation.
371 	 */
372 
373 	/* Disable interrupts */
374 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
375 
376 	/* Stop driver */
377 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
378 	ifp->if_timer = 0;
379 	callout_stop(&sc->jme_tick_ch);
380 
381 	/* Stop receiver/transmitter. */
382 	jme_stop_rx(sc);
383 	jme_stop_tx(sc);
384 
385 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
386 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
387 
388 		jme_rxeof(sc, r);
389 		if (rdata->jme_rxhead != NULL)
390 			m_freem(rdata->jme_rxhead);
391 		JME_RXCHAIN_RESET(sc, r);
392 
393 		/*
394 		 * Reuse configured Rx descriptors and reset
395 		 * procuder/consumer index.
396 		 */
397 		rdata->jme_rx_cons = 0;
398 	}
399 
400 	jme_txeof(sc);
401 	if (sc->jme_cdata.jme_tx_cnt != 0) {
402 		/* Remove queued packets for transmit. */
403 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
404 			txd = &sc->jme_cdata.jme_txdesc[i];
405 			if (txd->tx_m != NULL) {
406 				bus_dmamap_unload(
407 				    sc->jme_cdata.jme_tx_tag,
408 				    txd->tx_dmamap);
409 				m_freem(txd->tx_m);
410 				txd->tx_m = NULL;
411 				txd->tx_ndesc = 0;
412 				ifp->if_oerrors++;
413 			}
414 		}
415 	}
416 	jme_init_tx_ring(sc);
417 
418 	/* Initialize shadow status block. */
419 	jme_init_ssb(sc);
420 
421 	/* Program MAC with resolved speed/duplex/flow-control. */
422 	if (sc->jme_flags & JME_FLAG_LINK) {
423 		jme_mac_config(sc);
424 
425 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
426 
427 		/* Set Tx ring address to the hardware. */
428 		paddr = sc->jme_cdata.jme_tx_ring_paddr;
429 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
430 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
431 
432 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
433 			CSR_WRITE_4(sc, JME_RXCSR,
434 			    sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
435 
436 			/* Set Rx ring address to the hardware. */
437 			paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
438 			CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
439 			CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
440 		}
441 
442 		/* Restart receiver/transmitter. */
443 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
444 		    RXCSR_RXQ_START);
445 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
446 	}
447 
448 	ifp->if_flags |= IFF_RUNNING;
449 	ifp->if_flags &= ~IFF_OACTIVE;
450 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
451 
452 #ifdef DEVICE_POLLING
453 	if (!(ifp->if_flags & IFF_POLLING))
454 #endif
455 	/* Reenable interrupts. */
456 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
457 }
458 
459 /*
460  *	Get the current interface media status.
461  */
462 static void
463 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
464 {
465 	struct jme_softc *sc = ifp->if_softc;
466 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
467 
468 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
469 
470 	mii_pollstat(mii);
471 	ifmr->ifm_status = mii->mii_media_status;
472 	ifmr->ifm_active = mii->mii_media_active;
473 }
474 
475 /*
476  *	Set hardware to newly-selected media.
477  */
478 static int
479 jme_mediachange(struct ifnet *ifp)
480 {
481 	struct jme_softc *sc = ifp->if_softc;
482 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
483 	int error;
484 
485 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
486 
487 	if (mii->mii_instance != 0) {
488 		struct mii_softc *miisc;
489 
490 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
491 			mii_phy_reset(miisc);
492 	}
493 	error = mii_mediachg(mii);
494 
495 	return (error);
496 }
497 
498 static int
499 jme_probe(device_t dev)
500 {
501 	const struct jme_dev *sp;
502 	uint16_t vid, did;
503 
504 	vid = pci_get_vendor(dev);
505 	did = pci_get_device(dev);
506 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
507 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
508 			struct jme_softc *sc = device_get_softc(dev);
509 
510 			sc->jme_caps = sp->jme_caps;
511 			device_set_desc(dev, sp->jme_name);
512 			return (0);
513 		}
514 	}
515 	return (ENXIO);
516 }
517 
518 static int
519 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
520 {
521 	uint32_t reg;
522 	int i;
523 
524 	*val = 0;
525 	for (i = JME_TIMEOUT; i > 0; i--) {
526 		reg = CSR_READ_4(sc, JME_SMBCSR);
527 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
528 			break;
529 		DELAY(1);
530 	}
531 
532 	if (i == 0) {
533 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
534 		return (ETIMEDOUT);
535 	}
536 
537 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
538 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
539 	for (i = JME_TIMEOUT; i > 0; i--) {
540 		DELAY(1);
541 		reg = CSR_READ_4(sc, JME_SMBINTF);
542 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
543 			break;
544 	}
545 
546 	if (i == 0) {
547 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
548 		return (ETIMEDOUT);
549 	}
550 
551 	reg = CSR_READ_4(sc, JME_SMBINTF);
552 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
553 
554 	return (0);
555 }
556 
557 static int
558 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
559 {
560 	uint8_t fup, reg, val;
561 	uint32_t offset;
562 	int match;
563 
564 	offset = 0;
565 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
566 	    fup != JME_EEPROM_SIG0)
567 		return (ENOENT);
568 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
569 	    fup != JME_EEPROM_SIG1)
570 		return (ENOENT);
571 	match = 0;
572 	do {
573 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
574 			break;
575 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
576 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
577 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
578 				break;
579 			if (reg >= JME_PAR0 &&
580 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
581 				if (jme_eeprom_read_byte(sc, offset + 2,
582 				    &val) != 0)
583 					break;
584 				eaddr[reg - JME_PAR0] = val;
585 				match++;
586 			}
587 		}
588 		/* Check for the end of EEPROM descriptor. */
589 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
590 			break;
591 		/* Try next eeprom descriptor. */
592 		offset += JME_EEPROM_DESC_BYTES;
593 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
594 
595 	if (match == ETHER_ADDR_LEN)
596 		return (0);
597 
598 	return (ENOENT);
599 }
600 
601 static void
602 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
603 {
604 	uint32_t par0, par1;
605 
606 	/* Read station address. */
607 	par0 = CSR_READ_4(sc, JME_PAR0);
608 	par1 = CSR_READ_4(sc, JME_PAR1);
609 	par1 &= 0xFFFF;
610 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
611 		device_printf(sc->jme_dev,
612 		    "generating fake ethernet address.\n");
613 		par0 = karc4random();
614 		/* Set OUI to JMicron. */
615 		eaddr[0] = 0x00;
616 		eaddr[1] = 0x1B;
617 		eaddr[2] = 0x8C;
618 		eaddr[3] = (par0 >> 16) & 0xff;
619 		eaddr[4] = (par0 >> 8) & 0xff;
620 		eaddr[5] = par0 & 0xff;
621 	} else {
622 		eaddr[0] = (par0 >> 0) & 0xFF;
623 		eaddr[1] = (par0 >> 8) & 0xFF;
624 		eaddr[2] = (par0 >> 16) & 0xFF;
625 		eaddr[3] = (par0 >> 24) & 0xFF;
626 		eaddr[4] = (par1 >> 0) & 0xFF;
627 		eaddr[5] = (par1 >> 8) & 0xFF;
628 	}
629 }
630 
631 static int
632 jme_attach(device_t dev)
633 {
634 	struct jme_softc *sc = device_get_softc(dev);
635 	struct ifnet *ifp = &sc->arpcom.ac_if;
636 	uint32_t reg;
637 	uint16_t did;
638 	uint8_t pcie_ptr, rev;
639 	int error = 0, i, j;
640 	uint8_t eaddr[ETHER_ADDR_LEN];
641 
642 	lwkt_serialize_init(&sc->jme_serialize);
643 	lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
644 	for (i = 0; i < JME_NRXRING_MAX; ++i) {
645 		lwkt_serialize_init(
646 		    &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
647 	}
648 
649 	sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
650 	if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
651 		sc->jme_rx_desc_cnt = JME_NDESC_MAX;
652 
653 	sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
654 	if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
655 		sc->jme_tx_desc_cnt = JME_NDESC_MAX;
656 
657 	/*
658 	 * Calculate rx rings based on ncpus2
659 	 */
660 	sc->jme_rx_ring_cnt = jme_rx_ring_count;
661 	if (sc->jme_rx_ring_cnt <= 0)
662 		sc->jme_rx_ring_cnt = JME_NRXRING_1;
663 	if (sc->jme_rx_ring_cnt > ncpus2)
664 		sc->jme_rx_ring_cnt = ncpus2;
665 
666 	if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
667 		sc->jme_rx_ring_cnt = JME_NRXRING_4;
668 	else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
669 		sc->jme_rx_ring_cnt = JME_NRXRING_2;
670 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
671 
672 	i = 0;
673 	sc->jme_serialize_arr[i++] = &sc->jme_serialize;
674 	sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
675 	for (j = 0; j < sc->jme_rx_ring_cnt; ++j) {
676 		sc->jme_serialize_arr[i++] =
677 		    &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
678 	}
679 	KKASSERT(i <= JME_NSERIALIZE);
680 	sc->jme_serialize_cnt = i;
681 
682 	sc->jme_cdata.jme_sc = sc;
683 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
684 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
685 
686 		rdata->jme_sc = sc;
687 		rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
688 		rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
689 		rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
690 		rdata->jme_rx_idx = i;
691 	}
692 
693 	sc->jme_dev = dev;
694 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
695 
696 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
697 
698 	callout_init(&sc->jme_tick_ch);
699 
700 #ifndef BURN_BRIDGES
701 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
702 		uint32_t irq, mem;
703 
704 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
705 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
706 
707 		device_printf(dev, "chip is in D%d power mode "
708 		    "-- setting to D0\n", pci_get_powerstate(dev));
709 
710 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
711 
712 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
713 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
714 	}
715 #endif	/* !BURN_BRIDGE */
716 
717 	/* Enable bus mastering */
718 	pci_enable_busmaster(dev);
719 
720 	/*
721 	 * Allocate IO memory
722 	 *
723 	 * JMC250 supports both memory mapped and I/O register space
724 	 * access.  Because I/O register access should use different
725 	 * BARs to access registers it's waste of time to use I/O
726 	 * register spce access.  JMC250 uses 16K to map entire memory
727 	 * space.
728 	 */
729 	sc->jme_mem_rid = JME_PCIR_BAR;
730 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
731 						 &sc->jme_mem_rid, RF_ACTIVE);
732 	if (sc->jme_mem_res == NULL) {
733 		device_printf(dev, "can't allocate IO memory\n");
734 		return ENXIO;
735 	}
736 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
737 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
738 
739 	/*
740 	 * Allocate IRQ
741 	 */
742 	error = jme_intr_alloc(dev);
743 	if (error)
744 		goto fail;
745 
746 	/*
747 	 * Extract revisions
748 	 */
749 	reg = CSR_READ_4(sc, JME_CHIPMODE);
750 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
751 	    CHIPMODE_NOT_FPGA) {
752 		sc->jme_caps |= JME_CAP_FPGA;
753 		if (bootverbose) {
754 			device_printf(dev, "FPGA revision: 0x%04x\n",
755 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
756 				      CHIPMODE_FPGA_REV_SHIFT);
757 		}
758 	}
759 
760 	/* NOTE: FM revision is put in the upper 4 bits */
761 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
762 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
763 	if (bootverbose)
764 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
765 
766 	did = pci_get_device(dev);
767 	switch (did) {
768 	case PCI_PRODUCT_JMICRON_JMC250:
769 		if (rev == JME_REV1_A2)
770 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
771 		break;
772 
773 	case PCI_PRODUCT_JMICRON_JMC260:
774 		if (rev == JME_REV2)
775 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
776 		break;
777 
778 	default:
779 		panic("unknown device id 0x%04x\n", did);
780 	}
781 	if (rev >= JME_REV2) {
782 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
783 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
784 				      GHC_TXMAC_CLKSRC_1000;
785 	}
786 
787 	/* Reset the ethernet controller. */
788 	jme_reset(sc);
789 
790 	/* Map MSI/MSI-X vectors */
791 	jme_set_msinum(sc);
792 
793 	/* Get station address. */
794 	reg = CSR_READ_4(sc, JME_SMBCSR);
795 	if (reg & SMBCSR_EEPROM_PRESENT)
796 		error = jme_eeprom_macaddr(sc, eaddr);
797 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
798 		if (error != 0 && (bootverbose)) {
799 			device_printf(dev, "ethernet hardware address "
800 				      "not found in EEPROM.\n");
801 		}
802 		jme_reg_macaddr(sc, eaddr);
803 	}
804 
805 	/*
806 	 * Save PHY address.
807 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
808 	 * requires PHY probing to get correct PHY address.
809 	 */
810 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
811 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
812 		    GPREG0_PHY_ADDR_MASK;
813 		if (bootverbose) {
814 			device_printf(dev, "PHY is at address %d.\n",
815 			    sc->jme_phyaddr);
816 		}
817 	} else {
818 		sc->jme_phyaddr = 0;
819 	}
820 
821 	/* Set max allowable DMA size. */
822 	pcie_ptr = pci_get_pciecap_ptr(dev);
823 	if (pcie_ptr != 0) {
824 		uint16_t ctrl;
825 
826 		sc->jme_caps |= JME_CAP_PCIE;
827 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
828 		if (bootverbose) {
829 			device_printf(dev, "Read request size : %d bytes.\n",
830 			    128 << ((ctrl >> 12) & 0x07));
831 			device_printf(dev, "TLP payload size : %d bytes.\n",
832 			    128 << ((ctrl >> 5) & 0x07));
833 		}
834 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
835 		case PCIEM_DEVCTL_MAX_READRQ_128:
836 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
837 			break;
838 		case PCIEM_DEVCTL_MAX_READRQ_256:
839 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
840 			break;
841 		default:
842 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
843 			break;
844 		}
845 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
846 	} else {
847 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
848 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
849 	}
850 
851 #ifdef notyet
852 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
853 		sc->jme_caps |= JME_CAP_PMCAP;
854 #endif
855 
856 	/*
857 	 * Create sysctl tree
858 	 */
859 	jme_sysctl_node(sc);
860 
861 	/* Allocate DMA stuffs */
862 	error = jme_dma_alloc(sc);
863 	if (error)
864 		goto fail;
865 
866 	ifp->if_softc = sc;
867 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
868 	ifp->if_init = jme_init;
869 	ifp->if_ioctl = jme_ioctl;
870 	ifp->if_start = jme_start;
871 #ifdef DEVICE_POLLING
872 	ifp->if_poll = jme_poll;
873 #endif
874 	ifp->if_watchdog = jme_watchdog;
875 	ifp->if_serialize = jme_serialize;
876 	ifp->if_deserialize = jme_deserialize;
877 	ifp->if_tryserialize = jme_tryserialize;
878 #ifdef INVARIANTS
879 	ifp->if_serialize_assert = jme_serialize_assert;
880 #endif
881 	ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
882 	ifq_set_ready(&ifp->if_snd);
883 
884 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
885 	ifp->if_capabilities = IFCAP_HWCSUM |
886 			       IFCAP_VLAN_MTU |
887 			       IFCAP_VLAN_HWTAGGING;
888 	if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN)
889 		ifp->if_capabilities |= IFCAP_RSS;
890 	ifp->if_capenable = ifp->if_capabilities;
891 
892 	/*
893 	 * Disable TXCSUM by default to improve bulk data
894 	 * transmit performance (+20Mbps improvement).
895 	 */
896 	ifp->if_capenable &= ~IFCAP_TXCSUM;
897 
898 	if (ifp->if_capenable & IFCAP_TXCSUM)
899 		ifp->if_hwassist = JME_CSUM_FEATURES;
900 
901 	/* Set up MII bus. */
902 	error = mii_phy_probe(dev, &sc->jme_miibus,
903 			      jme_mediachange, jme_mediastatus);
904 	if (error) {
905 		device_printf(dev, "no PHY found!\n");
906 		goto fail;
907 	}
908 
909 	/*
910 	 * Save PHYADDR for FPGA mode PHY.
911 	 */
912 	if (sc->jme_caps & JME_CAP_FPGA) {
913 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
914 
915 		if (mii->mii_instance != 0) {
916 			struct mii_softc *miisc;
917 
918 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
919 				if (miisc->mii_phy != 0) {
920 					sc->jme_phyaddr = miisc->mii_phy;
921 					break;
922 				}
923 			}
924 			if (sc->jme_phyaddr != 0) {
925 				device_printf(sc->jme_dev,
926 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
927 				/* vendor magic. */
928 				jme_miibus_writereg(dev, sc->jme_phyaddr,
929 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
930 
931 				/* XXX should we clear JME_WA_EXTFIFO */
932 			}
933 		}
934 	}
935 
936 	ether_ifattach(ifp, eaddr, NULL);
937 
938 	/* Tell the upper layer(s) we support long frames. */
939 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
940 
941 	error = jme_intr_setup(dev);
942 	if (error) {
943 		ether_ifdetach(ifp);
944 		goto fail;
945 	}
946 
947 	return 0;
948 fail:
949 	jme_detach(dev);
950 	return (error);
951 }
952 
953 static int
954 jme_detach(device_t dev)
955 {
956 	struct jme_softc *sc = device_get_softc(dev);
957 
958 	if (device_is_attached(dev)) {
959 		struct ifnet *ifp = &sc->arpcom.ac_if;
960 
961 		ifnet_serialize_all(ifp);
962 		jme_stop(sc);
963 		jme_intr_teardown(dev);
964 		ifnet_deserialize_all(ifp);
965 
966 		ether_ifdetach(ifp);
967 	}
968 
969 	if (sc->jme_sysctl_tree != NULL)
970 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
971 
972 	if (sc->jme_miibus != NULL)
973 		device_delete_child(dev, sc->jme_miibus);
974 	bus_generic_detach(dev);
975 
976 	jme_intr_free(dev);
977 
978 	if (sc->jme_mem_res != NULL) {
979 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
980 				     sc->jme_mem_res);
981 	}
982 
983 	jme_dma_free(sc);
984 
985 	return (0);
986 }
987 
988 static void
989 jme_sysctl_node(struct jme_softc *sc)
990 {
991 	int coal_max;
992 #ifdef JME_RSS_DEBUG
993 	char rx_ring_pkt[32];
994 	int r;
995 #endif
996 
997 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
998 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
999 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1000 				device_get_nameunit(sc->jme_dev),
1001 				CTLFLAG_RD, 0, "");
1002 	if (sc->jme_sysctl_tree == NULL) {
1003 		device_printf(sc->jme_dev, "can't add sysctl node\n");
1004 		return;
1005 	}
1006 
1007 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1008 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1009 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1010 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1011 
1012 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1013 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1014 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1015 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1016 
1017 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1018 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1019 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1020 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1021 
1022 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1023 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1024 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1025 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1026 
1027 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1028 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1029 		       "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
1030 		       0, "RX desc count");
1031 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1032 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1033 		       "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
1034 		       0, "TX desc count");
1035 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1036 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1037 		       "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
1038 		       0, "RX ring count");
1039 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1040 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1041 		       "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
1042 		       0, "RX ring in use");
1043 #ifdef JME_RSS_DEBUG
1044 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1045 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1046 		       "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1047 		       0, "RSS debug level");
1048 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1049 		ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
1050 		SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
1051 				SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1052 				rx_ring_pkt, CTLFLAG_RW,
1053 				&sc->jme_rx_ring_pkt[r],
1054 				0, "RXed packets");
1055 	}
1056 #endif
1057 
1058 	/*
1059 	 * Set default coalesce valves
1060 	 */
1061 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1062 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1063 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1064 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1065 
1066 	/*
1067 	 * Adjust coalesce valves, in case that the number of TX/RX
1068 	 * descs are set to small values by users.
1069 	 *
1070 	 * NOTE: coal_max will not be zero, since number of descs
1071 	 * must aligned by JME_NDESC_ALIGN (16 currently)
1072 	 */
1073 	coal_max = sc->jme_tx_desc_cnt / 6;
1074 	if (coal_max < sc->jme_tx_coal_pkt)
1075 		sc->jme_tx_coal_pkt = coal_max;
1076 
1077 	coal_max = sc->jme_rx_desc_cnt / 4;
1078 	if (coal_max < sc->jme_rx_coal_pkt)
1079 		sc->jme_rx_coal_pkt = coal_max;
1080 }
1081 
1082 static int
1083 jme_dma_alloc(struct jme_softc *sc)
1084 {
1085 	struct jme_txdesc *txd;
1086 	bus_dmamem_t dmem;
1087 	int error, i;
1088 
1089 	sc->jme_cdata.jme_txdesc =
1090 	kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1091 		M_DEVBUF, M_WAITOK | M_ZERO);
1092 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1093 		sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1094 		kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1095 			M_DEVBUF, M_WAITOK | M_ZERO);
1096 	}
1097 
1098 	/* Create parent ring tag. */
1099 	error = bus_dma_tag_create(NULL,/* parent */
1100 	    1, JME_RING_BOUNDARY,	/* algnmnt, boundary */
1101 	    sc->jme_lowaddr,		/* lowaddr */
1102 	    BUS_SPACE_MAXADDR,		/* highaddr */
1103 	    NULL, NULL,			/* filter, filterarg */
1104 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1105 	    0,				/* nsegments */
1106 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1107 	    0,				/* flags */
1108 	    &sc->jme_cdata.jme_ring_tag);
1109 	if (error) {
1110 		device_printf(sc->jme_dev,
1111 		    "could not create parent ring DMA tag.\n");
1112 		return error;
1113 	}
1114 
1115 	/*
1116 	 * Create DMA stuffs for TX ring
1117 	 */
1118 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1119 			JME_TX_RING_ALIGN, 0,
1120 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1121 			JME_TX_RING_SIZE(sc),
1122 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1123 	if (error) {
1124 		device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1125 		return error;
1126 	}
1127 	sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1128 	sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1129 	sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1130 	sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1131 
1132 	/*
1133 	 * Create DMA stuffs for RX rings
1134 	 */
1135 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1136 		error = jme_rxring_dma_alloc(sc, i);
1137 		if (error)
1138 			return error;
1139 	}
1140 
1141 	/* Create parent buffer tag. */
1142 	error = bus_dma_tag_create(NULL,/* parent */
1143 	    1, 0,			/* algnmnt, boundary */
1144 	    sc->jme_lowaddr,		/* lowaddr */
1145 	    BUS_SPACE_MAXADDR,		/* highaddr */
1146 	    NULL, NULL,			/* filter, filterarg */
1147 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1148 	    0,				/* nsegments */
1149 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1150 	    0,				/* flags */
1151 	    &sc->jme_cdata.jme_buffer_tag);
1152 	if (error) {
1153 		device_printf(sc->jme_dev,
1154 		    "could not create parent buffer DMA tag.\n");
1155 		return error;
1156 	}
1157 
1158 	/*
1159 	 * Create DMA stuffs for shadow status block
1160 	 */
1161 	error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1162 			JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1163 			JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1164 	if (error) {
1165 		device_printf(sc->jme_dev,
1166 		    "could not create shadow status block.\n");
1167 		return error;
1168 	}
1169 	sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1170 	sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1171 	sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1172 	sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1173 
1174 	/*
1175 	 * Create DMA stuffs for TX buffers
1176 	 */
1177 
1178 	/* Create tag for Tx buffers. */
1179 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1180 	    1, 0,			/* algnmnt, boundary */
1181 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1182 	    BUS_SPACE_MAXADDR,		/* highaddr */
1183 	    NULL, NULL,			/* filter, filterarg */
1184 	    JME_JUMBO_FRAMELEN,		/* maxsize */
1185 	    JME_MAXTXSEGS,		/* nsegments */
1186 	    JME_MAXSEGSIZE,		/* maxsegsize */
1187 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1188 	    &sc->jme_cdata.jme_tx_tag);
1189 	if (error != 0) {
1190 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1191 		return error;
1192 	}
1193 
1194 	/* Create DMA maps for Tx buffers. */
1195 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1196 		txd = &sc->jme_cdata.jme_txdesc[i];
1197 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1198 				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1199 				&txd->tx_dmamap);
1200 		if (error) {
1201 			int j;
1202 
1203 			device_printf(sc->jme_dev,
1204 			    "could not create %dth Tx dmamap.\n", i);
1205 
1206 			for (j = 0; j < i; ++j) {
1207 				txd = &sc->jme_cdata.jme_txdesc[j];
1208 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1209 						   txd->tx_dmamap);
1210 			}
1211 			bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1212 			sc->jme_cdata.jme_tx_tag = NULL;
1213 			return error;
1214 		}
1215 	}
1216 
1217 	/*
1218 	 * Create DMA stuffs for RX buffers
1219 	 */
1220 	for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1221 		error = jme_rxbuf_dma_alloc(sc, i);
1222 		if (error)
1223 			return error;
1224 	}
1225 	return 0;
1226 }
1227 
1228 static void
1229 jme_dma_free(struct jme_softc *sc)
1230 {
1231 	struct jme_txdesc *txd;
1232 	struct jme_rxdesc *rxd;
1233 	struct jme_rxdata *rdata;
1234 	int i, r;
1235 
1236 	/* Tx ring */
1237 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1238 		bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1239 		    sc->jme_cdata.jme_tx_ring_map);
1240 		bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1241 		    sc->jme_cdata.jme_tx_ring,
1242 		    sc->jme_cdata.jme_tx_ring_map);
1243 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1244 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1245 	}
1246 
1247 	/* Rx ring */
1248 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1249 		rdata = &sc->jme_cdata.jme_rx_data[r];
1250 		if (rdata->jme_rx_ring_tag != NULL) {
1251 			bus_dmamap_unload(rdata->jme_rx_ring_tag,
1252 					  rdata->jme_rx_ring_map);
1253 			bus_dmamem_free(rdata->jme_rx_ring_tag,
1254 					rdata->jme_rx_ring,
1255 					rdata->jme_rx_ring_map);
1256 			bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1257 			rdata->jme_rx_ring_tag = NULL;
1258 		}
1259 	}
1260 
1261 	/* Tx buffers */
1262 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1263 		for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1264 			txd = &sc->jme_cdata.jme_txdesc[i];
1265 			bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1266 			    txd->tx_dmamap);
1267 		}
1268 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1269 		sc->jme_cdata.jme_tx_tag = NULL;
1270 	}
1271 
1272 	/* Rx buffers */
1273 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1274 		rdata = &sc->jme_cdata.jme_rx_data[r];
1275 		if (rdata->jme_rx_tag != NULL) {
1276 			for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1277 				rxd = &rdata->jme_rxdesc[i];
1278 				bus_dmamap_destroy(rdata->jme_rx_tag,
1279 						   rxd->rx_dmamap);
1280 			}
1281 			bus_dmamap_destroy(rdata->jme_rx_tag,
1282 					   rdata->jme_rx_sparemap);
1283 			bus_dma_tag_destroy(rdata->jme_rx_tag);
1284 			rdata->jme_rx_tag = NULL;
1285 		}
1286 	}
1287 
1288 	/* Shadow status block. */
1289 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1290 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1291 		    sc->jme_cdata.jme_ssb_map);
1292 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1293 		    sc->jme_cdata.jme_ssb_block,
1294 		    sc->jme_cdata.jme_ssb_map);
1295 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1296 		sc->jme_cdata.jme_ssb_tag = NULL;
1297 	}
1298 
1299 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1300 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1301 		sc->jme_cdata.jme_buffer_tag = NULL;
1302 	}
1303 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1304 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1305 		sc->jme_cdata.jme_ring_tag = NULL;
1306 	}
1307 
1308 	if (sc->jme_cdata.jme_txdesc != NULL) {
1309 		kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1310 		sc->jme_cdata.jme_txdesc = NULL;
1311 	}
1312 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1313 		rdata = &sc->jme_cdata.jme_rx_data[r];
1314 		if (rdata->jme_rxdesc != NULL) {
1315 			kfree(rdata->jme_rxdesc, M_DEVBUF);
1316 			rdata->jme_rxdesc = NULL;
1317 		}
1318 	}
1319 }
1320 
1321 /*
1322  *	Make sure the interface is stopped at reboot time.
1323  */
1324 static int
1325 jme_shutdown(device_t dev)
1326 {
1327 	return jme_suspend(dev);
1328 }
1329 
1330 #ifdef notyet
1331 /*
1332  * Unlike other ethernet controllers, JMC250 requires
1333  * explicit resetting link speed to 10/100Mbps as gigabit
1334  * link will cunsume more power than 375mA.
1335  * Note, we reset the link speed to 10/100Mbps with
1336  * auto-negotiation but we don't know whether that operation
1337  * would succeed or not as we have no control after powering
1338  * off. If the renegotiation fail WOL may not work. Running
1339  * at 1Gbps draws more power than 375mA at 3.3V which is
1340  * specified in PCI specification and that would result in
1341  * complete shutdowning power to ethernet controller.
1342  *
1343  * TODO
1344  *  Save current negotiated media speed/duplex/flow-control
1345  *  to softc and restore the same link again after resuming.
1346  *  PHY handling such as power down/resetting to 100Mbps
1347  *  may be better handled in suspend method in phy driver.
1348  */
1349 static void
1350 jme_setlinkspeed(struct jme_softc *sc)
1351 {
1352 	struct mii_data *mii;
1353 	int aneg, i;
1354 
1355 	JME_LOCK_ASSERT(sc);
1356 
1357 	mii = device_get_softc(sc->jme_miibus);
1358 	mii_pollstat(mii);
1359 	aneg = 0;
1360 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1361 		switch IFM_SUBTYPE(mii->mii_media_active) {
1362 		case IFM_10_T:
1363 		case IFM_100_TX:
1364 			return;
1365 		case IFM_1000_T:
1366 			aneg++;
1367 		default:
1368 			break;
1369 		}
1370 	}
1371 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1372 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1373 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1374 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1375 	    BMCR_AUTOEN | BMCR_STARTNEG);
1376 	DELAY(1000);
1377 	if (aneg != 0) {
1378 		/* Poll link state until jme(4) get a 10/100 link. */
1379 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1380 			mii_pollstat(mii);
1381 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1382 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1383 				case IFM_10_T:
1384 				case IFM_100_TX:
1385 					jme_mac_config(sc);
1386 					return;
1387 				default:
1388 					break;
1389 				}
1390 			}
1391 			JME_UNLOCK(sc);
1392 			pause("jmelnk", hz);
1393 			JME_LOCK(sc);
1394 		}
1395 		if (i == MII_ANEGTICKS_GIGE)
1396 			device_printf(sc->jme_dev, "establishing link failed, "
1397 			    "WOL may not work!");
1398 	}
1399 	/*
1400 	 * No link, force MAC to have 100Mbps, full-duplex link.
1401 	 * This is the last resort and may/may not work.
1402 	 */
1403 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1404 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1405 	jme_mac_config(sc);
1406 }
1407 
1408 static void
1409 jme_setwol(struct jme_softc *sc)
1410 {
1411 	struct ifnet *ifp = &sc->arpcom.ac_if;
1412 	uint32_t gpr, pmcs;
1413 	uint16_t pmstat;
1414 	int pmc;
1415 
1416 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1417 		/* No PME capability, PHY power down. */
1418 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1419 		    MII_BMCR, BMCR_PDOWN);
1420 		return;
1421 	}
1422 
1423 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1424 	pmcs = CSR_READ_4(sc, JME_PMCS);
1425 	pmcs &= ~PMCS_WOL_ENB_MASK;
1426 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1427 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1428 		/* Enable PME message. */
1429 		gpr |= GPREG0_PME_ENB;
1430 		/* For gigabit controllers, reset link speed to 10/100. */
1431 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1432 			jme_setlinkspeed(sc);
1433 	}
1434 
1435 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1436 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1437 
1438 	/* Request PME. */
1439 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1440 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1441 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1442 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1443 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1444 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1445 		/* No WOL, PHY power down. */
1446 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1447 		    MII_BMCR, BMCR_PDOWN);
1448 	}
1449 }
1450 #endif
1451 
1452 static int
1453 jme_suspend(device_t dev)
1454 {
1455 	struct jme_softc *sc = device_get_softc(dev);
1456 	struct ifnet *ifp = &sc->arpcom.ac_if;
1457 
1458 	ifnet_serialize_all(ifp);
1459 	jme_stop(sc);
1460 #ifdef notyet
1461 	jme_setwol(sc);
1462 #endif
1463 	ifnet_deserialize_all(ifp);
1464 
1465 	return (0);
1466 }
1467 
1468 static int
1469 jme_resume(device_t dev)
1470 {
1471 	struct jme_softc *sc = device_get_softc(dev);
1472 	struct ifnet *ifp = &sc->arpcom.ac_if;
1473 #ifdef notyet
1474 	int pmc;
1475 #endif
1476 
1477 	ifnet_serialize_all(ifp);
1478 
1479 #ifdef notyet
1480 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1481 		uint16_t pmstat;
1482 
1483 		pmstat = pci_read_config(sc->jme_dev,
1484 		    pmc + PCIR_POWER_STATUS, 2);
1485 		/* Disable PME clear PME status. */
1486 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1487 		pci_write_config(sc->jme_dev,
1488 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1489 	}
1490 #endif
1491 
1492 	if (ifp->if_flags & IFF_UP)
1493 		jme_init(sc);
1494 
1495 	ifnet_deserialize_all(ifp);
1496 
1497 	return (0);
1498 }
1499 
1500 static int
1501 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1502 {
1503 	struct jme_txdesc *txd;
1504 	struct jme_desc *desc;
1505 	struct mbuf *m;
1506 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1507 	int maxsegs, nsegs;
1508 	int error, i, prod, symbol_desc;
1509 	uint32_t cflags, flag64;
1510 
1511 	M_ASSERTPKTHDR((*m_head));
1512 
1513 	prod = sc->jme_cdata.jme_tx_prod;
1514 	txd = &sc->jme_cdata.jme_txdesc[prod];
1515 
1516 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1517 		symbol_desc = 1;
1518 	else
1519 		symbol_desc = 0;
1520 
1521 	maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1522 		  (JME_TXD_RSVD + symbol_desc);
1523 	if (maxsegs > JME_MAXTXSEGS)
1524 		maxsegs = JME_MAXTXSEGS;
1525 	KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1526 		("not enough segments %d\n", maxsegs));
1527 
1528 	error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1529 			txd->tx_dmamap, m_head,
1530 			txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1531 	if (error)
1532 		goto fail;
1533 
1534 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1535 			BUS_DMASYNC_PREWRITE);
1536 
1537 	m = *m_head;
1538 	cflags = 0;
1539 
1540 	/* Configure checksum offload. */
1541 	if (m->m_pkthdr.csum_flags & CSUM_IP)
1542 		cflags |= JME_TD_IPCSUM;
1543 	if (m->m_pkthdr.csum_flags & CSUM_TCP)
1544 		cflags |= JME_TD_TCPCSUM;
1545 	if (m->m_pkthdr.csum_flags & CSUM_UDP)
1546 		cflags |= JME_TD_UDPCSUM;
1547 
1548 	/* Configure VLAN. */
1549 	if (m->m_flags & M_VLANTAG) {
1550 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1551 		cflags |= JME_TD_VLAN_TAG;
1552 	}
1553 
1554 	desc = &sc->jme_cdata.jme_tx_ring[prod];
1555 	desc->flags = htole32(cflags);
1556 	desc->addr_hi = htole32(m->m_pkthdr.len);
1557 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1558 		/*
1559 		 * Use 64bits TX desc chain format.
1560 		 *
1561 		 * The first TX desc of the chain, which is setup here,
1562 		 * is just a symbol TX desc carrying no payload.
1563 		 */
1564 		flag64 = JME_TD_64BIT;
1565 		desc->buflen = 0;
1566 		desc->addr_lo = 0;
1567 
1568 		/* No effective TX desc is consumed */
1569 		i = 0;
1570 	} else {
1571 		/*
1572 		 * Use 32bits TX desc chain format.
1573 		 *
1574 		 * The first TX desc of the chain, which is setup here,
1575 		 * is an effective TX desc carrying the first segment of
1576 		 * the mbuf chain.
1577 		 */
1578 		flag64 = 0;
1579 		desc->buflen = htole32(txsegs[0].ds_len);
1580 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1581 
1582 		/* One effective TX desc is consumed */
1583 		i = 1;
1584 	}
1585 	sc->jme_cdata.jme_tx_cnt++;
1586 	KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1587 		 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1588 	JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1589 
1590 	txd->tx_ndesc = 1 - i;
1591 	for (; i < nsegs; i++) {
1592 		desc = &sc->jme_cdata.jme_tx_ring[prod];
1593 		desc->flags = htole32(JME_TD_OWN | flag64);
1594 		desc->buflen = htole32(txsegs[i].ds_len);
1595 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1596 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1597 
1598 		sc->jme_cdata.jme_tx_cnt++;
1599 		KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1600 			 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1601 		JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1602 	}
1603 
1604 	/* Update producer index. */
1605 	sc->jme_cdata.jme_tx_prod = prod;
1606 	/*
1607 	 * Finally request interrupt and give the first descriptor
1608 	 * owenership to hardware.
1609 	 */
1610 	desc = txd->tx_desc;
1611 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1612 
1613 	txd->tx_m = m;
1614 	txd->tx_ndesc += nsegs;
1615 
1616 	return 0;
1617 fail:
1618 	m_freem(*m_head);
1619 	*m_head = NULL;
1620 	return error;
1621 }
1622 
1623 static void
1624 jme_start(struct ifnet *ifp)
1625 {
1626 	struct jme_softc *sc = ifp->if_softc;
1627 	struct mbuf *m_head;
1628 	int enq = 0;
1629 
1630 	ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
1631 
1632 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1633 		ifq_purge(&ifp->if_snd);
1634 		return;
1635 	}
1636 
1637 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1638 		return;
1639 
1640 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1641 		jme_txeof(sc);
1642 
1643 	while (!ifq_is_empty(&ifp->if_snd)) {
1644 		/*
1645 		 * Check number of available TX descs, always
1646 		 * leave JME_TXD_RSVD free TX descs.
1647 		 */
1648 		if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1649 		    sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1650 			ifp->if_flags |= IFF_OACTIVE;
1651 			break;
1652 		}
1653 
1654 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
1655 		if (m_head == NULL)
1656 			break;
1657 
1658 		/*
1659 		 * Pack the data into the transmit ring. If we
1660 		 * don't have room, set the OACTIVE flag and wait
1661 		 * for the NIC to drain the ring.
1662 		 */
1663 		if (jme_encap(sc, &m_head)) {
1664 			KKASSERT(m_head == NULL);
1665 			ifp->if_oerrors++;
1666 			ifp->if_flags |= IFF_OACTIVE;
1667 			break;
1668 		}
1669 		enq++;
1670 
1671 		/*
1672 		 * If there's a BPF listener, bounce a copy of this frame
1673 		 * to him.
1674 		 */
1675 		ETHER_BPF_MTAP(ifp, m_head);
1676 	}
1677 
1678 	if (enq > 0) {
1679 		/*
1680 		 * Reading TXCSR takes very long time under heavy load
1681 		 * so cache TXCSR value and writes the ORed value with
1682 		 * the kick command to the TXCSR. This saves one register
1683 		 * access cycle.
1684 		 */
1685 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1686 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1687 		/* Set a timeout in case the chip goes out to lunch. */
1688 		ifp->if_timer = JME_TX_TIMEOUT;
1689 	}
1690 }
1691 
1692 static void
1693 jme_watchdog(struct ifnet *ifp)
1694 {
1695 	struct jme_softc *sc = ifp->if_softc;
1696 
1697 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
1698 
1699 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1700 		if_printf(ifp, "watchdog timeout (missed link)\n");
1701 		ifp->if_oerrors++;
1702 		jme_init(sc);
1703 		return;
1704 	}
1705 
1706 	jme_txeof(sc);
1707 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1708 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1709 			  "-- recovering\n");
1710 		if (!ifq_is_empty(&ifp->if_snd))
1711 			if_devstart(ifp);
1712 		return;
1713 	}
1714 
1715 	if_printf(ifp, "watchdog timeout\n");
1716 	ifp->if_oerrors++;
1717 	jme_init(sc);
1718 	if (!ifq_is_empty(&ifp->if_snd))
1719 		if_devstart(ifp);
1720 }
1721 
1722 static int
1723 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1724 {
1725 	struct jme_softc *sc = ifp->if_softc;
1726 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1727 	struct ifreq *ifr = (struct ifreq *)data;
1728 	int error = 0, mask;
1729 
1730 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
1731 
1732 	switch (cmd) {
1733 	case SIOCSIFMTU:
1734 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1735 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1736 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1737 			error = EINVAL;
1738 			break;
1739 		}
1740 
1741 		if (ifp->if_mtu != ifr->ifr_mtu) {
1742 			/*
1743 			 * No special configuration is required when interface
1744 			 * MTU is changed but availability of Tx checksum
1745 			 * offload should be chcked against new MTU size as
1746 			 * FIFO size is just 2K.
1747 			 */
1748 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1749 				ifp->if_capenable &= ~IFCAP_TXCSUM;
1750 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1751 			}
1752 			ifp->if_mtu = ifr->ifr_mtu;
1753 			if (ifp->if_flags & IFF_RUNNING)
1754 				jme_init(sc);
1755 		}
1756 		break;
1757 
1758 	case SIOCSIFFLAGS:
1759 		if (ifp->if_flags & IFF_UP) {
1760 			if (ifp->if_flags & IFF_RUNNING) {
1761 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1762 				    (IFF_PROMISC | IFF_ALLMULTI))
1763 					jme_set_filter(sc);
1764 			} else {
1765 				jme_init(sc);
1766 			}
1767 		} else {
1768 			if (ifp->if_flags & IFF_RUNNING)
1769 				jme_stop(sc);
1770 		}
1771 		sc->jme_if_flags = ifp->if_flags;
1772 		break;
1773 
1774 	case SIOCADDMULTI:
1775 	case SIOCDELMULTI:
1776 		if (ifp->if_flags & IFF_RUNNING)
1777 			jme_set_filter(sc);
1778 		break;
1779 
1780 	case SIOCSIFMEDIA:
1781 	case SIOCGIFMEDIA:
1782 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1783 		break;
1784 
1785 	case SIOCSIFCAP:
1786 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1787 
1788 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1789 			ifp->if_capenable ^= IFCAP_TXCSUM;
1790 			if (IFCAP_TXCSUM & ifp->if_capenable)
1791 				ifp->if_hwassist |= JME_CSUM_FEATURES;
1792 			else
1793 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1794 		}
1795 		if (mask & IFCAP_RXCSUM) {
1796 			uint32_t reg;
1797 
1798 			ifp->if_capenable ^= IFCAP_RXCSUM;
1799 			reg = CSR_READ_4(sc, JME_RXMAC);
1800 			reg &= ~RXMAC_CSUM_ENB;
1801 			if (ifp->if_capenable & IFCAP_RXCSUM)
1802 				reg |= RXMAC_CSUM_ENB;
1803 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1804 		}
1805 
1806 		if (mask & IFCAP_VLAN_HWTAGGING) {
1807 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1808 			jme_set_vlan(sc);
1809 		}
1810 
1811 		if (mask & IFCAP_RSS) {
1812 			ifp->if_capenable ^= IFCAP_RSS;
1813 			if (ifp->if_flags & IFF_RUNNING)
1814 				jme_init(sc);
1815 		}
1816 		break;
1817 
1818 	default:
1819 		error = ether_ioctl(ifp, cmd, data);
1820 		break;
1821 	}
1822 	return (error);
1823 }
1824 
1825 static void
1826 jme_mac_config(struct jme_softc *sc)
1827 {
1828 	struct mii_data *mii;
1829 	uint32_t ghc, rxmac, txmac, txpause, gp1;
1830 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1831 
1832 	mii = device_get_softc(sc->jme_miibus);
1833 
1834 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1835 	DELAY(10);
1836 	CSR_WRITE_4(sc, JME_GHC, 0);
1837 	ghc = 0;
1838 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1839 	rxmac &= ~RXMAC_FC_ENB;
1840 	txmac = CSR_READ_4(sc, JME_TXMAC);
1841 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1842 	txpause = CSR_READ_4(sc, JME_TXPFC);
1843 	txpause &= ~TXPFC_PAUSE_ENB;
1844 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1845 		ghc |= GHC_FULL_DUPLEX;
1846 		rxmac &= ~RXMAC_COLL_DET_ENB;
1847 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1848 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1849 		    TXMAC_FRAME_BURST);
1850 #ifdef notyet
1851 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1852 			txpause |= TXPFC_PAUSE_ENB;
1853 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1854 			rxmac |= RXMAC_FC_ENB;
1855 #endif
1856 		/* Disable retry transmit timer/retry limit. */
1857 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1858 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1859 	} else {
1860 		rxmac |= RXMAC_COLL_DET_ENB;
1861 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1862 		/* Enable retry transmit timer/retry limit. */
1863 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1864 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1865 	}
1866 
1867 	/*
1868 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1869 	 */
1870 	gp1 = CSR_READ_4(sc, JME_GPREG1);
1871 	gp1 &= ~GPREG1_WA_HDX;
1872 
1873 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1874 		hdx = 1;
1875 
1876 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1877 	case IFM_10_T:
1878 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1879 		if (hdx)
1880 			gp1 |= GPREG1_WA_HDX;
1881 		break;
1882 
1883 	case IFM_100_TX:
1884 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1885 		if (hdx)
1886 			gp1 |= GPREG1_WA_HDX;
1887 
1888 		/*
1889 		 * Use extended FIFO depth to workaround CRC errors
1890 		 * emitted by chips before JMC250B
1891 		 */
1892 		phyconf = JMPHY_CONF_EXTFIFO;
1893 		break;
1894 
1895 	case IFM_1000_T:
1896 		if (sc->jme_caps & JME_CAP_FASTETH)
1897 			break;
1898 
1899 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1900 		if (hdx)
1901 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1902 		break;
1903 
1904 	default:
1905 		break;
1906 	}
1907 	CSR_WRITE_4(sc, JME_GHC, ghc);
1908 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1909 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
1910 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
1911 
1912 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
1913 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1914 				    JMPHY_CONF, phyconf);
1915 	}
1916 	if (sc->jme_workaround & JME_WA_HDX)
1917 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
1918 }
1919 
1920 static void
1921 jme_intr(void *xsc)
1922 {
1923 	struct jme_softc *sc = xsc;
1924 	struct ifnet *ifp = &sc->arpcom.ac_if;
1925 	uint32_t status;
1926 	int r;
1927 
1928 	ASSERT_SERIALIZED(&sc->jme_serialize);
1929 
1930 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1931 	if (status == 0 || status == 0xFFFFFFFF)
1932 		return;
1933 
1934 	/* Disable interrupts. */
1935 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1936 
1937 	status = CSR_READ_4(sc, JME_INTR_STATUS);
1938 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1939 		goto back;
1940 
1941 	/* Reset PCC counter/timer and Ack interrupts. */
1942 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1943 
1944 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1945 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1946 
1947 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
1948 		if (status & jme_rx_status[r].jme_coal) {
1949 			status |= jme_rx_status[r].jme_coal |
1950 				  jme_rx_status[r].jme_comp;
1951 		}
1952 	}
1953 
1954 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1955 
1956 	if (ifp->if_flags & IFF_RUNNING) {
1957 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1958 			jme_rx_intr(sc, status);
1959 
1960 		if (status & INTR_RXQ_DESC_EMPTY) {
1961 			/*
1962 			 * Notify hardware availability of new Rx buffers.
1963 			 * Reading RXCSR takes very long time under heavy
1964 			 * load so cache RXCSR value and writes the ORed
1965 			 * value with the kick command to the RXCSR. This
1966 			 * saves one register access cycle.
1967 			 */
1968 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1969 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
1970 		}
1971 
1972 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1973 			lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
1974 			jme_txeof(sc);
1975 			if (!ifq_is_empty(&ifp->if_snd))
1976 				if_devstart(ifp);
1977 			lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
1978 		}
1979 	}
1980 back:
1981 	/* Reenable interrupts. */
1982 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1983 }
1984 
1985 static void
1986 jme_txeof(struct jme_softc *sc)
1987 {
1988 	struct ifnet *ifp = &sc->arpcom.ac_if;
1989 	struct jme_txdesc *txd;
1990 	uint32_t status;
1991 	int cons, nsegs;
1992 
1993 	cons = sc->jme_cdata.jme_tx_cons;
1994 	if (cons == sc->jme_cdata.jme_tx_prod)
1995 		return;
1996 
1997 	/*
1998 	 * Go through our Tx list and free mbufs for those
1999 	 * frames which have been transmitted.
2000 	 */
2001 	while (cons != sc->jme_cdata.jme_tx_prod) {
2002 		txd = &sc->jme_cdata.jme_txdesc[cons];
2003 		KASSERT(txd->tx_m != NULL,
2004 			("%s: freeing NULL mbuf!\n", __func__));
2005 
2006 		status = le32toh(txd->tx_desc->flags);
2007 		if ((status & JME_TD_OWN) == JME_TD_OWN)
2008 			break;
2009 
2010 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2011 			ifp->if_oerrors++;
2012 		} else {
2013 			ifp->if_opackets++;
2014 			if (status & JME_TD_COLLISION) {
2015 				ifp->if_collisions +=
2016 				    le32toh(txd->tx_desc->buflen) &
2017 				    JME_TD_BUF_LEN_MASK;
2018 			}
2019 		}
2020 
2021 		/*
2022 		 * Only the first descriptor of multi-descriptor
2023 		 * transmission is updated so driver have to skip entire
2024 		 * chained buffers for the transmiited frame. In other
2025 		 * words, JME_TD_OWN bit is valid only at the first
2026 		 * descriptor of a multi-descriptor transmission.
2027 		 */
2028 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2029 			sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2030 			JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
2031 		}
2032 
2033 		/* Reclaim transferred mbufs. */
2034 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2035 		m_freem(txd->tx_m);
2036 		txd->tx_m = NULL;
2037 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2038 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2039 			("%s: Active Tx desc counter was garbled\n", __func__));
2040 		txd->tx_ndesc = 0;
2041 	}
2042 	sc->jme_cdata.jme_tx_cons = cons;
2043 
2044 	if (sc->jme_cdata.jme_tx_cnt == 0)
2045 		ifp->if_timer = 0;
2046 
2047 	if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2048 	    sc->jme_tx_desc_cnt - JME_TXD_RSVD)
2049 		ifp->if_flags &= ~IFF_OACTIVE;
2050 }
2051 
2052 static __inline void
2053 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
2054 {
2055 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2056 	int i;
2057 
2058 	for (i = 0; i < count; ++i) {
2059 		struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2060 
2061 		desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2062 		desc->buflen = htole32(MCLBYTES);
2063 		JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2064 	}
2065 }
2066 
2067 static __inline struct pktinfo *
2068 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2069 {
2070 	if (flags & JME_RD_IPV4)
2071 		pi->pi_netisr = NETISR_IP;
2072 	else if (flags & JME_RD_IPV6)
2073 		pi->pi_netisr = NETISR_IPV6;
2074 	else
2075 		return NULL;
2076 
2077 	pi->pi_flags = 0;
2078 	pi->pi_l3proto = IPPROTO_UNKNOWN;
2079 
2080 	if (flags & JME_RD_MORE_FRAG)
2081 		pi->pi_flags |= PKTINFO_FLAG_FRAG;
2082 	else if (flags & JME_RD_TCP)
2083 		pi->pi_l3proto = IPPROTO_TCP;
2084 	else if (flags & JME_RD_UDP)
2085 		pi->pi_l3proto = IPPROTO_UDP;
2086 	else
2087 		pi = NULL;
2088 	return pi;
2089 }
2090 
2091 /* Receive a frame. */
2092 static void
2093 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2094 {
2095 	struct ifnet *ifp = &sc->arpcom.ac_if;
2096 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2097 	struct jme_desc *desc;
2098 	struct jme_rxdesc *rxd;
2099 	struct mbuf *mp, *m;
2100 	uint32_t flags, status, hash, hashinfo;
2101 	int cons, count, nsegs;
2102 
2103 	cons = rdata->jme_rx_cons;
2104 	desc = &rdata->jme_rx_ring[cons];
2105 	flags = le32toh(desc->flags);
2106 	status = le32toh(desc->buflen);
2107 	hash = le32toh(desc->addr_hi);
2108 	hashinfo = le32toh(desc->addr_lo);
2109 	nsegs = JME_RX_NSEGS(status);
2110 
2111 	JME_RSS_DPRINTF(sc, 15, "ring%d, flags 0x%08x, "
2112 			"hash 0x%08x, hash info 0x%08x\n",
2113 			ring, flags, hash, hashinfo);
2114 
2115 	if (status & JME_RX_ERR_STAT) {
2116 		ifp->if_ierrors++;
2117 		jme_discard_rxbufs(sc, ring, cons, nsegs);
2118 #ifdef JME_SHOW_ERRORS
2119 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2120 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2121 #endif
2122 		rdata->jme_rx_cons += nsegs;
2123 		rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2124 		return;
2125 	}
2126 
2127 	rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2128 	for (count = 0; count < nsegs; count++,
2129 	     JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2130 		rxd = &rdata->jme_rxdesc[cons];
2131 		mp = rxd->rx_m;
2132 
2133 		/* Add a new receive buffer to the ring. */
2134 		if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2135 			ifp->if_iqdrops++;
2136 			/* Reuse buffer. */
2137 			jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2138 			if (rdata->jme_rxhead != NULL) {
2139 				m_freem(rdata->jme_rxhead);
2140 				JME_RXCHAIN_RESET(sc, ring);
2141 			}
2142 			break;
2143 		}
2144 
2145 		/*
2146 		 * Assume we've received a full sized frame.
2147 		 * Actual size is fixed when we encounter the end of
2148 		 * multi-segmented frame.
2149 		 */
2150 		mp->m_len = MCLBYTES;
2151 
2152 		/* Chain received mbufs. */
2153 		if (rdata->jme_rxhead == NULL) {
2154 			rdata->jme_rxhead = mp;
2155 			rdata->jme_rxtail = mp;
2156 		} else {
2157 			/*
2158 			 * Receive processor can receive a maximum frame
2159 			 * size of 65535 bytes.
2160 			 */
2161 			rdata->jme_rxtail->m_next = mp;
2162 			rdata->jme_rxtail = mp;
2163 		}
2164 
2165 		if (count == nsegs - 1) {
2166 			struct pktinfo pi0, *pi;
2167 
2168 			/* Last desc. for this frame. */
2169 			m = rdata->jme_rxhead;
2170 			m->m_pkthdr.len = rdata->jme_rxlen;
2171 			if (nsegs > 1) {
2172 				/* Set first mbuf size. */
2173 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2174 				/* Set last mbuf size. */
2175 				mp->m_len = rdata->jme_rxlen -
2176 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2177 				    (MCLBYTES * (nsegs - 2)));
2178 			} else {
2179 				m->m_len = rdata->jme_rxlen;
2180 			}
2181 			m->m_pkthdr.rcvif = ifp;
2182 
2183 			/*
2184 			 * Account for 10bytes auto padding which is used
2185 			 * to align IP header on 32bit boundary. Also note,
2186 			 * CRC bytes is automatically removed by the
2187 			 * hardware.
2188 			 */
2189 			m->m_data += JME_RX_PAD_BYTES;
2190 
2191 			/* Set checksum information. */
2192 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2193 			    (flags & JME_RD_IPV4)) {
2194 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2195 				if (flags & JME_RD_IPCSUM)
2196 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2197 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2198 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2199 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2200 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2201 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2202 					m->m_pkthdr.csum_flags |=
2203 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2204 					m->m_pkthdr.csum_data = 0xffff;
2205 				}
2206 			}
2207 
2208 			/* Check for VLAN tagged packets. */
2209 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2210 			    (flags & JME_RD_VLAN_TAG)) {
2211 				m->m_pkthdr.ether_vlantag =
2212 				    flags & JME_RD_VLAN_MASK;
2213 				m->m_flags |= M_VLANTAG;
2214 			}
2215 
2216 			ifp->if_ipackets++;
2217 
2218 			if (ifp->if_capenable & IFCAP_RSS)
2219 				pi = jme_pktinfo(&pi0, flags);
2220 			else
2221 				pi = NULL;
2222 
2223 			if (pi != NULL &&
2224 			    (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2225 				m->m_flags |= M_HASH;
2226 				m->m_pkthdr.hash = toeplitz_hash(hash);
2227 			}
2228 
2229 #ifdef JME_RSS_DEBUG
2230 			if (pi != NULL) {
2231 				JME_RSS_DPRINTF(sc, 10,
2232 				    "isr %d flags %08x, l3 %d %s\n",
2233 				    pi->pi_netisr, pi->pi_flags,
2234 				    pi->pi_l3proto,
2235 				    (m->m_flags & M_HASH) ? "hash" : "");
2236 			}
2237 #endif
2238 
2239 			/* Pass it on. */
2240 			ether_input_chain(ifp, m, pi, chain);
2241 
2242 			/* Reset mbuf chains. */
2243 			JME_RXCHAIN_RESET(sc, ring);
2244 #ifdef JME_RSS_DEBUG
2245 			sc->jme_rx_ring_pkt[ring]++;
2246 #endif
2247 		}
2248 	}
2249 
2250 	rdata->jme_rx_cons += nsegs;
2251 	rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2252 }
2253 
2254 static int
2255 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2256 		int count)
2257 {
2258 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2259 	struct jme_desc *desc;
2260 	int nsegs, prog, pktlen;
2261 
2262 	prog = 0;
2263 	for (;;) {
2264 #ifdef DEVICE_POLLING
2265 		if (count >= 0 && count-- == 0)
2266 			break;
2267 #endif
2268 		desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2269 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2270 			break;
2271 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2272 			break;
2273 
2274 		/*
2275 		 * Check number of segments against received bytes.
2276 		 * Non-matching value would indicate that hardware
2277 		 * is still trying to update Rx descriptors. I'm not
2278 		 * sure whether this check is needed.
2279 		 */
2280 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2281 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2282 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2283 			if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2284 				  "and packet size(%d) mismach\n",
2285 				  nsegs, pktlen);
2286 			break;
2287 		}
2288 
2289 		/* Received a frame. */
2290 		jme_rxpkt(sc, ring, chain);
2291 		prog++;
2292 	}
2293 	return prog;
2294 }
2295 
2296 static void
2297 jme_rxeof(struct jme_softc *sc, int ring)
2298 {
2299 	struct mbuf_chain chain[MAXCPU];
2300 
2301 	ether_input_chain_init(chain);
2302 	if (jme_rxeof_chain(sc, ring, chain, -1))
2303 		ether_input_dispatch(chain);
2304 }
2305 
2306 static void
2307 jme_tick(void *xsc)
2308 {
2309 	struct jme_softc *sc = xsc;
2310 	struct ifnet *ifp = &sc->arpcom.ac_if;
2311 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2312 
2313 	ifnet_serialize_all(ifp);
2314 
2315 	mii_tick(mii);
2316 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2317 
2318 	ifnet_deserialize_all(ifp);
2319 }
2320 
2321 static void
2322 jme_reset(struct jme_softc *sc)
2323 {
2324 	uint32_t val;
2325 
2326 	/* Make sure that TX and RX are stopped */
2327 	jme_stop_tx(sc);
2328 	jme_stop_rx(sc);
2329 
2330 	/* Start reset */
2331 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2332 	DELAY(20);
2333 
2334 	/*
2335 	 * Hold reset bit before stop reset
2336 	 */
2337 
2338 	/* Disable TXMAC and TXOFL clock sources */
2339 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2340 	/* Disable RXMAC clock source */
2341 	val = CSR_READ_4(sc, JME_GPREG1);
2342 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2343 	/* Flush */
2344 	CSR_READ_4(sc, JME_GHC);
2345 
2346 	/* Stop reset */
2347 	CSR_WRITE_4(sc, JME_GHC, 0);
2348 	/* Flush */
2349 	CSR_READ_4(sc, JME_GHC);
2350 
2351 	/*
2352 	 * Clear reset bit after stop reset
2353 	 */
2354 
2355 	/* Enable TXMAC and TXOFL clock sources */
2356 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2357 	/* Enable RXMAC clock source */
2358 	val = CSR_READ_4(sc, JME_GPREG1);
2359 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2360 	/* Flush */
2361 	CSR_READ_4(sc, JME_GHC);
2362 
2363 	/* Disable TXMAC and TXOFL clock sources */
2364 	CSR_WRITE_4(sc, JME_GHC, 0);
2365 	/* Disable RXMAC clock source */
2366 	val = CSR_READ_4(sc, JME_GPREG1);
2367 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2368 	/* Flush */
2369 	CSR_READ_4(sc, JME_GHC);
2370 
2371 	/* Enable TX and RX */
2372 	val = CSR_READ_4(sc, JME_TXCSR);
2373 	CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2374 	val = CSR_READ_4(sc, JME_RXCSR);
2375 	CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2376 	/* Flush */
2377 	CSR_READ_4(sc, JME_TXCSR);
2378 	CSR_READ_4(sc, JME_RXCSR);
2379 
2380 	/* Enable TXMAC and TXOFL clock sources */
2381 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2382 	/* Eisable RXMAC clock source */
2383 	val = CSR_READ_4(sc, JME_GPREG1);
2384 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2385 	/* Flush */
2386 	CSR_READ_4(sc, JME_GHC);
2387 
2388 	/* Stop TX and RX */
2389 	jme_stop_tx(sc);
2390 	jme_stop_rx(sc);
2391 }
2392 
2393 static void
2394 jme_init(void *xsc)
2395 {
2396 	struct jme_softc *sc = xsc;
2397 	struct ifnet *ifp = &sc->arpcom.ac_if;
2398 	struct mii_data *mii;
2399 	uint8_t eaddr[ETHER_ADDR_LEN];
2400 	bus_addr_t paddr;
2401 	uint32_t reg;
2402 	int error, r;
2403 
2404 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
2405 
2406 	/*
2407 	 * Cancel any pending I/O.
2408 	 */
2409 	jme_stop(sc);
2410 
2411 	/*
2412 	 * Reset the chip to a known state.
2413 	 */
2414 	jme_reset(sc);
2415 
2416 	/*
2417 	 * Setup MSI/MSI-X vectors to interrupts mapping
2418 	 */
2419 	jme_set_msinum(sc);
2420 
2421 	sc->jme_txd_spare =
2422 	howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2423 	KKASSERT(sc->jme_txd_spare >= 1);
2424 
2425 	/*
2426 	 * If we use 64bit address mode for transmitting, each Tx request
2427 	 * needs one more symbol descriptor.
2428 	 */
2429 	if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2430 		sc->jme_txd_spare += 1;
2431 
2432 	if (ifp->if_capenable & IFCAP_RSS)
2433 		jme_enable_rss(sc);
2434 	else
2435 		jme_disable_rss(sc);
2436 
2437 	/* Init RX descriptors */
2438 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2439 		error = jme_init_rx_ring(sc, r);
2440 		if (error) {
2441 			if_printf(ifp, "initialization failed: "
2442 				  "no memory for %dth RX ring.\n", r);
2443 			jme_stop(sc);
2444 			return;
2445 		}
2446 	}
2447 
2448 	/* Init TX descriptors */
2449 	jme_init_tx_ring(sc);
2450 
2451 	/* Initialize shadow status block. */
2452 	jme_init_ssb(sc);
2453 
2454 	/* Reprogram the station address. */
2455 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2456 	CSR_WRITE_4(sc, JME_PAR0,
2457 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2458 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2459 
2460 	/*
2461 	 * Configure Tx queue.
2462 	 *  Tx priority queue weight value : 0
2463 	 *  Tx FIFO threshold for processing next packet : 16QW
2464 	 *  Maximum Tx DMA length : 512
2465 	 *  Allow Tx DMA burst.
2466 	 */
2467 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2468 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2469 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2470 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2471 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2472 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2473 
2474 	/* Set Tx descriptor counter. */
2475 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2476 
2477 	/* Set Tx ring address to the hardware. */
2478 	paddr = sc->jme_cdata.jme_tx_ring_paddr;
2479 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2480 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2481 
2482 	/* Configure TxMAC parameters. */
2483 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2484 	reg |= TXMAC_THRESH_1_PKT;
2485 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2486 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2487 
2488 	/*
2489 	 * Configure Rx queue.
2490 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2491 	 *  FIFO threshold for processing next packet : 128QW
2492 	 *  Rx queue 0 select
2493 	 *  Max Rx DMA length : 128
2494 	 *  Rx descriptor retry : 32
2495 	 *  Rx descriptor retry time gap : 256ns
2496 	 *  Don't receive runt/bad frame.
2497 	 */
2498 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2499 #if 0
2500 	/*
2501 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2502 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2503 	 * decrease FIFO threshold to reduce the FIFO overruns for
2504 	 * frames larger than 4000 bytes.
2505 	 * For best performance of standard MTU sized frames use
2506 	 * maximum allowable FIFO threshold, 128QW.
2507 	 */
2508 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2509 	    JME_RX_FIFO_SIZE)
2510 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2511 	else
2512 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2513 #else
2514 	/* Improve PCI Express compatibility */
2515 	sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2516 #endif
2517 	sc->jme_rxcsr |= sc->jme_rx_dma_size;
2518 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2519 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2520 	/* XXX TODO DROP_BAD */
2521 
2522 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2523 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2524 
2525 		/* Set Rx descriptor counter. */
2526 		CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2527 
2528 		/* Set Rx ring address to the hardware. */
2529 		paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2530 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2531 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2532 	}
2533 
2534 	/* Clear receive filter. */
2535 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2536 
2537 	/* Set up the receive filter. */
2538 	jme_set_filter(sc);
2539 	jme_set_vlan(sc);
2540 
2541 	/*
2542 	 * Disable all WOL bits as WOL can interfere normal Rx
2543 	 * operation. Also clear WOL detection status bits.
2544 	 */
2545 	reg = CSR_READ_4(sc, JME_PMCS);
2546 	reg &= ~PMCS_WOL_ENB_MASK;
2547 	CSR_WRITE_4(sc, JME_PMCS, reg);
2548 
2549 	/*
2550 	 * Pad 10bytes right before received frame. This will greatly
2551 	 * help Rx performance on strict-alignment architectures as
2552 	 * it does not need to copy the frame to align the payload.
2553 	 */
2554 	reg = CSR_READ_4(sc, JME_RXMAC);
2555 	reg |= RXMAC_PAD_10BYTES;
2556 
2557 	if (ifp->if_capenable & IFCAP_RXCSUM)
2558 		reg |= RXMAC_CSUM_ENB;
2559 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2560 
2561 	/* Configure general purpose reg0 */
2562 	reg = CSR_READ_4(sc, JME_GPREG0);
2563 	reg &= ~GPREG0_PCC_UNIT_MASK;
2564 	/* Set PCC timer resolution to micro-seconds unit. */
2565 	reg |= GPREG0_PCC_UNIT_US;
2566 	/*
2567 	 * Disable all shadow register posting as we have to read
2568 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2569 	 * that it's hard to synchronize interrupt status between
2570 	 * hardware and software with shadow posting due to
2571 	 * requirements of bus_dmamap_sync(9).
2572 	 */
2573 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2574 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2575 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2576 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2577 	/* Disable posting of DW0. */
2578 	reg &= ~GPREG0_POST_DW0_ENB;
2579 	/* Clear PME message. */
2580 	reg &= ~GPREG0_PME_ENB;
2581 	/* Set PHY address. */
2582 	reg &= ~GPREG0_PHY_ADDR_MASK;
2583 	reg |= sc->jme_phyaddr;
2584 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2585 
2586 	/* Configure Tx queue 0 packet completion coalescing. */
2587 	jme_set_tx_coal(sc);
2588 
2589 	/* Configure Rx queue 0 packet completion coalescing. */
2590 	jme_set_rx_coal(sc);
2591 
2592 	/* Configure shadow status block but don't enable posting. */
2593 	paddr = sc->jme_cdata.jme_ssb_block_paddr;
2594 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2595 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2596 
2597 	/* Disable Timer 1 and Timer 2. */
2598 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2599 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2600 
2601 	/* Configure retry transmit period, retry limit value. */
2602 	CSR_WRITE_4(sc, JME_TXTRHD,
2603 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2604 	    TXTRHD_RT_PERIOD_MASK) |
2605 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2606 	    TXTRHD_RT_LIMIT_SHIFT));
2607 
2608 #ifdef DEVICE_POLLING
2609 	if (!(ifp->if_flags & IFF_POLLING))
2610 #endif
2611 	/* Initialize the interrupt mask. */
2612 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2613 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2614 
2615 	/*
2616 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2617 	 * done after detection of valid link in jme_miibus_statchg.
2618 	 */
2619 	sc->jme_flags &= ~JME_FLAG_LINK;
2620 
2621 	/* Set the current media. */
2622 	mii = device_get_softc(sc->jme_miibus);
2623 	mii_mediachg(mii);
2624 
2625 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2626 
2627 	ifp->if_flags |= IFF_RUNNING;
2628 	ifp->if_flags &= ~IFF_OACTIVE;
2629 }
2630 
2631 static void
2632 jme_stop(struct jme_softc *sc)
2633 {
2634 	struct ifnet *ifp = &sc->arpcom.ac_if;
2635 	struct jme_txdesc *txd;
2636 	struct jme_rxdesc *rxd;
2637 	struct jme_rxdata *rdata;
2638 	int i, r;
2639 
2640 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
2641 
2642 	/*
2643 	 * Mark the interface down and cancel the watchdog timer.
2644 	 */
2645 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2646 	ifp->if_timer = 0;
2647 
2648 	callout_stop(&sc->jme_tick_ch);
2649 	sc->jme_flags &= ~JME_FLAG_LINK;
2650 
2651 	/*
2652 	 * Disable interrupts.
2653 	 */
2654 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2655 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2656 
2657 	/* Disable updating shadow status block. */
2658 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2659 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2660 
2661 	/* Stop receiver, transmitter. */
2662 	jme_stop_rx(sc);
2663 	jme_stop_tx(sc);
2664 
2665 	/*
2666 	 * Free partial finished RX segments
2667 	 */
2668 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2669 		rdata = &sc->jme_cdata.jme_rx_data[r];
2670 		if (rdata->jme_rxhead != NULL)
2671 			m_freem(rdata->jme_rxhead);
2672 		JME_RXCHAIN_RESET(sc, r);
2673 	}
2674 
2675 	/*
2676 	 * Free RX and TX mbufs still in the queues.
2677 	 */
2678 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2679 		rdata = &sc->jme_cdata.jme_rx_data[r];
2680 		for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2681 			rxd = &rdata->jme_rxdesc[i];
2682 			if (rxd->rx_m != NULL) {
2683 				bus_dmamap_unload(rdata->jme_rx_tag,
2684 						  rxd->rx_dmamap);
2685 				m_freem(rxd->rx_m);
2686 				rxd->rx_m = NULL;
2687 			}
2688 		}
2689 	}
2690 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2691 		txd = &sc->jme_cdata.jme_txdesc[i];
2692 		if (txd->tx_m != NULL) {
2693 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2694 			    txd->tx_dmamap);
2695 			m_freem(txd->tx_m);
2696 			txd->tx_m = NULL;
2697 			txd->tx_ndesc = 0;
2698 		}
2699         }
2700 }
2701 
2702 static void
2703 jme_stop_tx(struct jme_softc *sc)
2704 {
2705 	uint32_t reg;
2706 	int i;
2707 
2708 	reg = CSR_READ_4(sc, JME_TXCSR);
2709 	if ((reg & TXCSR_TX_ENB) == 0)
2710 		return;
2711 	reg &= ~TXCSR_TX_ENB;
2712 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2713 	for (i = JME_TIMEOUT; i > 0; i--) {
2714 		DELAY(1);
2715 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2716 			break;
2717 	}
2718 	if (i == 0)
2719 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2720 }
2721 
2722 static void
2723 jme_stop_rx(struct jme_softc *sc)
2724 {
2725 	uint32_t reg;
2726 	int i;
2727 
2728 	reg = CSR_READ_4(sc, JME_RXCSR);
2729 	if ((reg & RXCSR_RX_ENB) == 0)
2730 		return;
2731 	reg &= ~RXCSR_RX_ENB;
2732 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2733 	for (i = JME_TIMEOUT; i > 0; i--) {
2734 		DELAY(1);
2735 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2736 			break;
2737 	}
2738 	if (i == 0)
2739 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2740 }
2741 
2742 static void
2743 jme_init_tx_ring(struct jme_softc *sc)
2744 {
2745 	struct jme_chain_data *cd;
2746 	struct jme_txdesc *txd;
2747 	int i;
2748 
2749 	sc->jme_cdata.jme_tx_prod = 0;
2750 	sc->jme_cdata.jme_tx_cons = 0;
2751 	sc->jme_cdata.jme_tx_cnt = 0;
2752 
2753 	cd = &sc->jme_cdata;
2754 	bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2755 	for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2756 		txd = &sc->jme_cdata.jme_txdesc[i];
2757 		txd->tx_m = NULL;
2758 		txd->tx_desc = &cd->jme_tx_ring[i];
2759 		txd->tx_ndesc = 0;
2760 	}
2761 }
2762 
2763 static void
2764 jme_init_ssb(struct jme_softc *sc)
2765 {
2766 	struct jme_chain_data *cd;
2767 
2768 	cd = &sc->jme_cdata;
2769 	bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2770 }
2771 
2772 static int
2773 jme_init_rx_ring(struct jme_softc *sc, int ring)
2774 {
2775 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2776 	struct jme_rxdesc *rxd;
2777 	int i;
2778 
2779 	KKASSERT(rdata->jme_rxhead == NULL &&
2780 		 rdata->jme_rxtail == NULL &&
2781 		 rdata->jme_rxlen == 0);
2782 	rdata->jme_rx_cons = 0;
2783 
2784 	bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2785 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2786 		int error;
2787 
2788 		rxd = &rdata->jme_rxdesc[i];
2789 		rxd->rx_m = NULL;
2790 		rxd->rx_desc = &rdata->jme_rx_ring[i];
2791 		error = jme_newbuf(sc, ring, rxd, 1);
2792 		if (error)
2793 			return error;
2794 	}
2795 	return 0;
2796 }
2797 
2798 static int
2799 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2800 {
2801 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2802 	struct jme_desc *desc;
2803 	struct mbuf *m;
2804 	bus_dma_segment_t segs;
2805 	bus_dmamap_t map;
2806 	int error, nsegs;
2807 
2808 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2809 	if (m == NULL)
2810 		return ENOBUFS;
2811 	/*
2812 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2813 	 * takes advantage of 10 bytes padding feature of hardware
2814 	 * in order not to copy entire frame to align IP header on
2815 	 * 32bit boundary.
2816 	 */
2817 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2818 
2819 	error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2820 			rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2821 			BUS_DMA_NOWAIT);
2822 	if (error) {
2823 		m_freem(m);
2824 		if (init)
2825 			if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2826 		return error;
2827 	}
2828 
2829 	if (rxd->rx_m != NULL) {
2830 		bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2831 				BUS_DMASYNC_POSTREAD);
2832 		bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2833 	}
2834 	map = rxd->rx_dmamap;
2835 	rxd->rx_dmamap = rdata->jme_rx_sparemap;
2836 	rdata->jme_rx_sparemap = map;
2837 	rxd->rx_m = m;
2838 
2839 	desc = rxd->rx_desc;
2840 	desc->buflen = htole32(segs.ds_len);
2841 	desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2842 	desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2843 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2844 
2845 	return 0;
2846 }
2847 
2848 static void
2849 jme_set_vlan(struct jme_softc *sc)
2850 {
2851 	struct ifnet *ifp = &sc->arpcom.ac_if;
2852 	uint32_t reg;
2853 
2854 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
2855 
2856 	reg = CSR_READ_4(sc, JME_RXMAC);
2857 	reg &= ~RXMAC_VLAN_ENB;
2858 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2859 		reg |= RXMAC_VLAN_ENB;
2860 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2861 }
2862 
2863 static void
2864 jme_set_filter(struct jme_softc *sc)
2865 {
2866 	struct ifnet *ifp = &sc->arpcom.ac_if;
2867 	struct ifmultiaddr *ifma;
2868 	uint32_t crc;
2869 	uint32_t mchash[2];
2870 	uint32_t rxcfg;
2871 
2872 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
2873 
2874 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2875 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2876 	    RXMAC_ALLMULTI);
2877 
2878 	/*
2879 	 * Always accept frames destined to our station address.
2880 	 * Always accept broadcast frames.
2881 	 */
2882 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2883 
2884 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2885 		if (ifp->if_flags & IFF_PROMISC)
2886 			rxcfg |= RXMAC_PROMISC;
2887 		if (ifp->if_flags & IFF_ALLMULTI)
2888 			rxcfg |= RXMAC_ALLMULTI;
2889 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2890 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2891 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2892 		return;
2893 	}
2894 
2895 	/*
2896 	 * Set up the multicast address filter by passing all multicast
2897 	 * addresses through a CRC generator, and then using the low-order
2898 	 * 6 bits as an index into the 64 bit multicast hash table.  The
2899 	 * high order bits select the register, while the rest of the bits
2900 	 * select the bit within the register.
2901 	 */
2902 	rxcfg |= RXMAC_MULTICAST;
2903 	bzero(mchash, sizeof(mchash));
2904 
2905 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2906 		if (ifma->ifma_addr->sa_family != AF_LINK)
2907 			continue;
2908 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2909 		    ifma->ifma_addr), ETHER_ADDR_LEN);
2910 
2911 		/* Just want the 6 least significant bits. */
2912 		crc &= 0x3f;
2913 
2914 		/* Set the corresponding bit in the hash table. */
2915 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
2916 	}
2917 
2918 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2919 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2920 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2921 }
2922 
2923 static int
2924 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2925 {
2926 	struct jme_softc *sc = arg1;
2927 	struct ifnet *ifp = &sc->arpcom.ac_if;
2928 	int error, v;
2929 
2930 	ifnet_serialize_all(ifp);
2931 
2932 	v = sc->jme_tx_coal_to;
2933 	error = sysctl_handle_int(oidp, &v, 0, req);
2934 	if (error || req->newptr == NULL)
2935 		goto back;
2936 
2937 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2938 		error = EINVAL;
2939 		goto back;
2940 	}
2941 
2942 	if (v != sc->jme_tx_coal_to) {
2943 		sc->jme_tx_coal_to = v;
2944 		if (ifp->if_flags & IFF_RUNNING)
2945 			jme_set_tx_coal(sc);
2946 	}
2947 back:
2948 	ifnet_deserialize_all(ifp);
2949 	return error;
2950 }
2951 
2952 static int
2953 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2954 {
2955 	struct jme_softc *sc = arg1;
2956 	struct ifnet *ifp = &sc->arpcom.ac_if;
2957 	int error, v;
2958 
2959 	ifnet_serialize_all(ifp);
2960 
2961 	v = sc->jme_tx_coal_pkt;
2962 	error = sysctl_handle_int(oidp, &v, 0, req);
2963 	if (error || req->newptr == NULL)
2964 		goto back;
2965 
2966 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2967 		error = EINVAL;
2968 		goto back;
2969 	}
2970 
2971 	if (v != sc->jme_tx_coal_pkt) {
2972 		sc->jme_tx_coal_pkt = v;
2973 		if (ifp->if_flags & IFF_RUNNING)
2974 			jme_set_tx_coal(sc);
2975 	}
2976 back:
2977 	ifnet_deserialize_all(ifp);
2978 	return error;
2979 }
2980 
2981 static int
2982 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2983 {
2984 	struct jme_softc *sc = arg1;
2985 	struct ifnet *ifp = &sc->arpcom.ac_if;
2986 	int error, v;
2987 
2988 	ifnet_serialize_all(ifp);
2989 
2990 	v = sc->jme_rx_coal_to;
2991 	error = sysctl_handle_int(oidp, &v, 0, req);
2992 	if (error || req->newptr == NULL)
2993 		goto back;
2994 
2995 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2996 		error = EINVAL;
2997 		goto back;
2998 	}
2999 
3000 	if (v != sc->jme_rx_coal_to) {
3001 		sc->jme_rx_coal_to = v;
3002 		if (ifp->if_flags & IFF_RUNNING)
3003 			jme_set_rx_coal(sc);
3004 	}
3005 back:
3006 	ifnet_deserialize_all(ifp);
3007 	return error;
3008 }
3009 
3010 static int
3011 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3012 {
3013 	struct jme_softc *sc = arg1;
3014 	struct ifnet *ifp = &sc->arpcom.ac_if;
3015 	int error, v;
3016 
3017 	ifnet_serialize_all(ifp);
3018 
3019 	v = sc->jme_rx_coal_pkt;
3020 	error = sysctl_handle_int(oidp, &v, 0, req);
3021 	if (error || req->newptr == NULL)
3022 		goto back;
3023 
3024 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3025 		error = EINVAL;
3026 		goto back;
3027 	}
3028 
3029 	if (v != sc->jme_rx_coal_pkt) {
3030 		sc->jme_rx_coal_pkt = v;
3031 		if (ifp->if_flags & IFF_RUNNING)
3032 			jme_set_rx_coal(sc);
3033 	}
3034 back:
3035 	ifnet_deserialize_all(ifp);
3036 	return error;
3037 }
3038 
3039 static void
3040 jme_set_tx_coal(struct jme_softc *sc)
3041 {
3042 	uint32_t reg;
3043 
3044 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3045 	    PCCTX_COAL_TO_MASK;
3046 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3047 	    PCCTX_COAL_PKT_MASK;
3048 	reg |= PCCTX_COAL_TXQ0;
3049 	CSR_WRITE_4(sc, JME_PCCTX, reg);
3050 }
3051 
3052 static void
3053 jme_set_rx_coal(struct jme_softc *sc)
3054 {
3055 	uint32_t reg;
3056 	int r;
3057 
3058 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3059 	    PCCRX_COAL_TO_MASK;
3060 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3061 	    PCCRX_COAL_PKT_MASK;
3062 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
3063 		if (r < sc->jme_rx_ring_inuse)
3064 			CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3065 		else
3066 			CSR_WRITE_4(sc, JME_PCCRX(r), 0);
3067 	}
3068 }
3069 
3070 #ifdef DEVICE_POLLING
3071 
3072 static void
3073 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3074 {
3075 	struct jme_softc *sc = ifp->if_softc;
3076 	struct mbuf_chain chain[MAXCPU];
3077 	uint32_t status;
3078 	int r, prog = 0;
3079 
3080 	ASSERT_SERIALIZED(&sc->jme_serialize);
3081 
3082 	switch (cmd) {
3083 	case POLL_REGISTER:
3084 		CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3085 		break;
3086 
3087 	case POLL_DEREGISTER:
3088 		CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3089 		break;
3090 
3091 	case POLL_AND_CHECK_STATUS:
3092 	case POLL_ONLY:
3093 		status = CSR_READ_4(sc, JME_INTR_STATUS);
3094 
3095 		ether_input_chain_init(chain);
3096 		for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3097 			struct jme_rxdata *rdata =
3098 			    &sc->jme_cdata.jme_rx_data[r];
3099 
3100 			lwkt_serialize_enter(&rdata->jme_rx_serialize);
3101 			prog += jme_rxeof_chain(sc, r, chain, count);
3102 			lwkt_serialize_exit(&rdata->jme_rx_serialize);
3103 		}
3104 		if (prog)
3105 			ether_input_dispatch(chain);
3106 
3107 		if (status & INTR_RXQ_DESC_EMPTY) {
3108 			CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3109 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3110 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
3111 		}
3112 
3113 		lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3114 		jme_txeof(sc);
3115 		if (!ifq_is_empty(&ifp->if_snd))
3116 			if_devstart(ifp);
3117 		lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3118 		break;
3119 	}
3120 }
3121 
3122 #endif	/* DEVICE_POLLING */
3123 
3124 static int
3125 jme_rxring_dma_alloc(struct jme_softc *sc, int ring)
3126 {
3127 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3128 	bus_dmamem_t dmem;
3129 	int error;
3130 
3131 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
3132 			JME_RX_RING_ALIGN, 0,
3133 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3134 			JME_RX_RING_SIZE(sc),
3135 			BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3136 	if (error) {
3137 		device_printf(sc->jme_dev,
3138 		    "could not allocate %dth Rx ring.\n", ring);
3139 		return error;
3140 	}
3141 	rdata->jme_rx_ring_tag = dmem.dmem_tag;
3142 	rdata->jme_rx_ring_map = dmem.dmem_map;
3143 	rdata->jme_rx_ring = dmem.dmem_addr;
3144 	rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3145 
3146 	return 0;
3147 }
3148 
3149 static int
3150 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
3151 {
3152 	struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3153 	int i, error;
3154 
3155 	/* Create tag for Rx buffers. */
3156 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
3157 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
3158 	    BUS_SPACE_MAXADDR,		/* lowaddr */
3159 	    BUS_SPACE_MAXADDR,		/* highaddr */
3160 	    NULL, NULL,			/* filter, filterarg */
3161 	    MCLBYTES,			/* maxsize */
3162 	    1,				/* nsegments */
3163 	    MCLBYTES,			/* maxsegsize */
3164 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3165 	    &rdata->jme_rx_tag);
3166 	if (error) {
3167 		device_printf(sc->jme_dev,
3168 		    "could not create %dth Rx DMA tag.\n", ring);
3169 		return error;
3170 	}
3171 
3172 	/* Create DMA maps for Rx buffers. */
3173 	error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3174 				  &rdata->jme_rx_sparemap);
3175 	if (error) {
3176 		device_printf(sc->jme_dev,
3177 		    "could not create %dth spare Rx dmamap.\n", ring);
3178 		bus_dma_tag_destroy(rdata->jme_rx_tag);
3179 		rdata->jme_rx_tag = NULL;
3180 		return error;
3181 	}
3182 	for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3183 		struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3184 
3185 		error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3186 					  &rxd->rx_dmamap);
3187 		if (error) {
3188 			int j;
3189 
3190 			device_printf(sc->jme_dev,
3191 			    "could not create %dth Rx dmamap "
3192 			    "for %dth RX ring.\n", i, ring);
3193 
3194 			for (j = 0; j < i; ++j) {
3195 				rxd = &rdata->jme_rxdesc[j];
3196 				bus_dmamap_destroy(rdata->jme_rx_tag,
3197 						   rxd->rx_dmamap);
3198 			}
3199 			bus_dmamap_destroy(rdata->jme_rx_tag,
3200 					   rdata->jme_rx_sparemap);
3201 			bus_dma_tag_destroy(rdata->jme_rx_tag);
3202 			rdata->jme_rx_tag = NULL;
3203 			return error;
3204 		}
3205 	}
3206 	return 0;
3207 }
3208 
3209 static void
3210 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3211 {
3212 	struct mbuf_chain chain[MAXCPU];
3213 	int r, prog = 0;
3214 
3215 	ether_input_chain_init(chain);
3216 	for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3217 		if (status & jme_rx_status[r].jme_coal) {
3218 			struct jme_rxdata *rdata =
3219 			    &sc->jme_cdata.jme_rx_data[r];
3220 
3221 			lwkt_serialize_enter(&rdata->jme_rx_serialize);
3222 			prog += jme_rxeof_chain(sc, r, chain, -1);
3223 			lwkt_serialize_exit(&rdata->jme_rx_serialize);
3224 		}
3225 	}
3226 	if (prog)
3227 		ether_input_dispatch(chain);
3228 }
3229 
3230 static void
3231 jme_enable_rss(struct jme_softc *sc)
3232 {
3233 	uint32_t rssc, ind;
3234 	uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3235 	int i;
3236 
3237 	sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3238 
3239 	KASSERT(sc->jme_rx_ring_inuse == JME_NRXRING_2 ||
3240 		sc->jme_rx_ring_inuse == JME_NRXRING_4,
3241 		("%s: invalid # of RX rings (%d)\n",
3242 		 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse));
3243 
3244 	rssc = RSSC_HASH_64_ENTRY;
3245 	rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3246 	rssc |= sc->jme_rx_ring_inuse >> 1;
3247 	JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3248 	CSR_WRITE_4(sc, JME_RSSC, rssc);
3249 
3250 	toeplitz_get_key(key, sizeof(key));
3251 	for (i = 0; i < RSSKEY_NREGS; ++i) {
3252 		uint32_t keyreg;
3253 
3254 		keyreg = RSSKEY_REGVAL(key, i);
3255 		JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3256 
3257 		CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3258 	}
3259 
3260 	/*
3261 	 * Create redirect table in following fashion:
3262 	 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3263 	 */
3264 	ind = 0;
3265 	for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3266 		int q;
3267 
3268 		q = i % sc->jme_rx_ring_inuse;
3269 		ind |= q << (i * 8);
3270 	}
3271 	JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3272 
3273 	for (i = 0; i < RSSTBL_NREGS; ++i)
3274 		CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3275 }
3276 
3277 static void
3278 jme_disable_rss(struct jme_softc *sc)
3279 {
3280 	sc->jme_rx_ring_inuse = JME_NRXRING_1;
3281 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3282 }
3283 
3284 static void
3285 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3286 {
3287 	struct jme_softc *sc = ifp->if_softc;
3288 
3289 	switch (slz) {
3290 	case IFNET_SERIALIZE_ALL:
3291 		lwkt_serialize_array_enter(sc->jme_serialize_arr,
3292 		    sc->jme_serialize_cnt, 0);
3293 		break;
3294 
3295 	case IFNET_SERIALIZE_MAIN:
3296 		lwkt_serialize_enter(&sc->jme_serialize);
3297 		break;
3298 
3299 	case IFNET_SERIALIZE_TX:
3300 		lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3301 		break;
3302 
3303 	case IFNET_SERIALIZE_RX(0):
3304 		lwkt_serialize_enter(
3305 		    &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3306 		break;
3307 
3308 	case IFNET_SERIALIZE_RX(1):
3309 		lwkt_serialize_enter(
3310 		    &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3311 		break;
3312 
3313 	case IFNET_SERIALIZE_RX(2):
3314 		lwkt_serialize_enter(
3315 		    &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3316 		break;
3317 
3318 	case IFNET_SERIALIZE_RX(3):
3319 		lwkt_serialize_enter(
3320 		    &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3321 		break;
3322 
3323 	default:
3324 		panic("%s unsupported serialize type\n", ifp->if_xname);
3325 	}
3326 }
3327 
3328 static void
3329 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3330 {
3331 	struct jme_softc *sc = ifp->if_softc;
3332 
3333 	switch (slz) {
3334 	case IFNET_SERIALIZE_ALL:
3335 		lwkt_serialize_array_exit(sc->jme_serialize_arr,
3336 		    sc->jme_serialize_cnt, 0);
3337 		break;
3338 
3339 	case IFNET_SERIALIZE_MAIN:
3340 		lwkt_serialize_exit(&sc->jme_serialize);
3341 		break;
3342 
3343 	case IFNET_SERIALIZE_TX:
3344 		lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3345 		break;
3346 
3347 	case IFNET_SERIALIZE_RX(0):
3348 		lwkt_serialize_exit(
3349 		    &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3350 		break;
3351 
3352 	case IFNET_SERIALIZE_RX(1):
3353 		lwkt_serialize_exit(
3354 		    &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3355 		break;
3356 
3357 	case IFNET_SERIALIZE_RX(2):
3358 		lwkt_serialize_exit(
3359 		    &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3360 		break;
3361 
3362 	case IFNET_SERIALIZE_RX(3):
3363 		lwkt_serialize_exit(
3364 		    &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3365 		break;
3366 
3367 	default:
3368 		panic("%s unsupported serialize type\n", ifp->if_xname);
3369 	}
3370 }
3371 
3372 static int
3373 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3374 {
3375 	struct jme_softc *sc = ifp->if_softc;
3376 
3377 	switch (slz) {
3378 	case IFNET_SERIALIZE_ALL:
3379 		return lwkt_serialize_array_try(sc->jme_serialize_arr,
3380 		    sc->jme_serialize_cnt, 0);
3381 
3382 	case IFNET_SERIALIZE_MAIN:
3383 		return lwkt_serialize_try(&sc->jme_serialize);
3384 
3385 	case IFNET_SERIALIZE_TX:
3386 		return lwkt_serialize_try(&sc->jme_cdata.jme_tx_serialize);
3387 
3388 	case IFNET_SERIALIZE_RX(0):
3389 		return lwkt_serialize_try(
3390 		    &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3391 
3392 	case IFNET_SERIALIZE_RX(1):
3393 		return lwkt_serialize_try(
3394 		    &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3395 
3396 	case IFNET_SERIALIZE_RX(2):
3397 		return lwkt_serialize_try(
3398 		    &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3399 
3400 	case IFNET_SERIALIZE_RX(3):
3401 		return lwkt_serialize_try(
3402 		    &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3403 
3404 	default:
3405 		panic("%s unsupported serialize type\n", ifp->if_xname);
3406 	}
3407 }
3408 
3409 #ifdef INVARIANTS
3410 
3411 static void
3412 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3413     boolean_t serialized)
3414 {
3415 	struct jme_softc *sc = ifp->if_softc;
3416 	struct jme_rxdata *rdata;
3417 	int i;
3418 
3419 	switch (slz) {
3420 	case IFNET_SERIALIZE_ALL:
3421 		if (serialized) {
3422 			for (i = 0; i < sc->jme_serialize_cnt; ++i)
3423 				ASSERT_SERIALIZED(sc->jme_serialize_arr[i]);
3424 		} else {
3425 			for (i = 0; i < sc->jme_serialize_cnt; ++i)
3426 				ASSERT_NOT_SERIALIZED(sc->jme_serialize_arr[i]);
3427 		}
3428 		break;
3429 
3430 	case IFNET_SERIALIZE_MAIN:
3431 		if (serialized)
3432 			ASSERT_SERIALIZED(&sc->jme_serialize);
3433 		else
3434 			ASSERT_NOT_SERIALIZED(&sc->jme_serialize);
3435 		break;
3436 
3437 	case IFNET_SERIALIZE_TX:
3438 		if (serialized)
3439 			ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3440 		else
3441 			ASSERT_NOT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3442 		break;
3443 
3444 	case IFNET_SERIALIZE_RX(0):
3445 		rdata = &sc->jme_cdata.jme_rx_data[0];
3446 		if (serialized)
3447 			ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3448 		else
3449 			ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3450 		break;
3451 
3452 	case IFNET_SERIALIZE_RX(1):
3453 		rdata = &sc->jme_cdata.jme_rx_data[1];
3454 		if (serialized)
3455 			ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3456 		else
3457 			ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3458 		break;
3459 
3460 	case IFNET_SERIALIZE_RX(2):
3461 		rdata = &sc->jme_cdata.jme_rx_data[2];
3462 		if (serialized)
3463 			ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3464 		else
3465 			ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3466 		break;
3467 
3468 	case IFNET_SERIALIZE_RX(3):
3469 		rdata = &sc->jme_cdata.jme_rx_data[3];
3470 		if (serialized)
3471 			ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3472 		else
3473 			ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3474 		break;
3475 
3476 	default:
3477 		panic("%s unsupported serialize type\n", ifp->if_xname);
3478 	}
3479 }
3480 
3481 #endif	/* INVARIANTS */
3482 
3483 static void
3484 jme_msix_try_alloc(device_t dev)
3485 {
3486 	struct jme_softc *sc = device_get_softc(dev);
3487 	struct jme_msix_data *msix;
3488 	int error, i, r, msix_enable, msix_count;
3489 	char env[64];
3490 
3491 	msix_count = 1 + sc->jme_rx_ring_cnt;
3492 	KKASSERT(msix_count <= JME_NMSIX);
3493 
3494 	msix_enable = jme_msix_enable;
3495 	ksnprintf(env, sizeof(env), "hw.%s.msix.enable",
3496 	    device_get_nameunit(dev));
3497 	kgetenv_int(env, &msix_enable);
3498 
3499 	/*
3500 	 * We leave the 1st MSI-X vector unused, so we
3501 	 * actually need msix_count + 1 MSI-X vectors.
3502 	 */
3503 	if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3504 		return;
3505 
3506 	for (i = 0; i < msix_count; ++i)
3507 		sc->jme_msix[i].jme_msix_rid = -1;
3508 
3509 	i = 0;
3510 
3511 	msix = &sc->jme_msix[i++];
3512 	msix->jme_msix_cpuid = 0;		/* XXX Put TX to cpu0 */
3513 	msix->jme_msix_arg = &sc->jme_cdata;
3514 	msix->jme_msix_func = jme_msix_tx;
3515 	msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3516 	msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize;
3517 	ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3518 	    device_get_nameunit(dev));
3519 
3520 	for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
3521 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3522 
3523 		msix = &sc->jme_msix[i++];
3524 		msix->jme_msix_cpuid = r;	/* XXX Put RX to cpuX */
3525 		msix->jme_msix_arg = rdata;
3526 		msix->jme_msix_func = jme_msix_rx;
3527 		msix->jme_msix_intrs = rdata->jme_rx_coal | rdata->jme_rx_empty;
3528 		msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3529 		ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3530 		    "%s rx%d", device_get_nameunit(dev), r);
3531 	}
3532 
3533 	KKASSERT(i == msix_count);
3534 
3535 	error = pci_setup_msix(dev);
3536 	if (error)
3537 		return;
3538 
3539 	/* Setup jme_msix_cnt early, so we could cleanup */
3540 	sc->jme_msix_cnt = msix_count;
3541 
3542 	for (i = 0; i < msix_count; ++i) {
3543 		msix = &sc->jme_msix[i];
3544 
3545 		msix->jme_msix_vector = i + 1;
3546 		error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3547 		    &msix->jme_msix_rid, msix->jme_msix_cpuid);
3548 		if (error)
3549 			goto back;
3550 
3551 		msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3552 		    &msix->jme_msix_rid, RF_ACTIVE);
3553 		if (msix->jme_msix_res == NULL) {
3554 			error = ENOMEM;
3555 			goto back;
3556 		}
3557 	}
3558 
3559 	for (i = 0; i < JME_INTR_CNT; ++i) {
3560 		uint32_t intr_mask = (1 << i);
3561 		int x;
3562 
3563 		if ((JME_INTRS & intr_mask) == 0)
3564 			continue;
3565 
3566 		for (x = 0; x < msix_count; ++x) {
3567 			msix = &sc->jme_msix[x];
3568 			if (msix->jme_msix_intrs & intr_mask) {
3569 				int reg, shift;
3570 
3571 				reg = i / JME_MSINUM_FACTOR;
3572 				KKASSERT(reg < JME_MSINUM_CNT);
3573 
3574 				shift = (i % JME_MSINUM_FACTOR) * 4;
3575 
3576 				sc->jme_msinum[reg] |=
3577 				    (msix->jme_msix_vector << shift);
3578 
3579 				break;
3580 			}
3581 		}
3582 	}
3583 
3584 	if (bootverbose) {
3585 		for (i = 0; i < JME_MSINUM_CNT; ++i) {
3586 			device_printf(dev, "MSINUM%d: %#x\n", i,
3587 			    sc->jme_msinum[i]);
3588 		}
3589 	}
3590 
3591 	pci_enable_msix(dev);
3592 	sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3593 
3594 back:
3595 	if (error)
3596 		jme_msix_free(dev);
3597 }
3598 
3599 static int
3600 jme_intr_alloc(device_t dev)
3601 {
3602 	struct jme_softc *sc = device_get_softc(dev);
3603 	u_int irq_flags;
3604 
3605 	jme_msix_try_alloc(dev);
3606 
3607 	if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3608 		sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3609 		    &sc->jme_irq_rid, &irq_flags);
3610 
3611 		sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3612 		    &sc->jme_irq_rid, irq_flags);
3613 		if (sc->jme_irq_res == NULL) {
3614 			device_printf(dev, "can't allocate irq\n");
3615 			return ENXIO;
3616 		}
3617 	}
3618 	return 0;
3619 }
3620 
3621 static void
3622 jme_msix_free(device_t dev)
3623 {
3624 	struct jme_softc *sc = device_get_softc(dev);
3625 	int i;
3626 
3627 	KKASSERT(sc->jme_msix_cnt > 1);
3628 
3629 	for (i = 0; i < sc->jme_msix_cnt; ++i) {
3630 		struct jme_msix_data *msix = &sc->jme_msix[i];
3631 
3632 		if (msix->jme_msix_res != NULL) {
3633 			bus_release_resource(dev, SYS_RES_IRQ,
3634 			    msix->jme_msix_rid, msix->jme_msix_res);
3635 			msix->jme_msix_res = NULL;
3636 		}
3637 		if (msix->jme_msix_rid >= 0) {
3638 			pci_release_msix_vector(dev, msix->jme_msix_rid);
3639 			msix->jme_msix_rid = -1;
3640 		}
3641 	}
3642 	pci_teardown_msix(dev);
3643 }
3644 
3645 static void
3646 jme_intr_free(device_t dev)
3647 {
3648 	struct jme_softc *sc = device_get_softc(dev);
3649 
3650 	if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3651 		if (sc->jme_irq_res != NULL) {
3652 			bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3653 					     sc->jme_irq_res);
3654 		}
3655 		if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3656 			pci_release_msi(dev);
3657 	} else {
3658 		jme_msix_free(dev);
3659 	}
3660 }
3661 
3662 static void
3663 jme_msix_tx(void *xcd)
3664 {
3665 	struct jme_chain_data *cd = xcd;
3666 	struct jme_softc *sc = cd->jme_sc;
3667 	struct ifnet *ifp = &sc->arpcom.ac_if;
3668 
3669 	ASSERT_SERIALIZED(&cd->jme_tx_serialize);
3670 
3671 	CSR_WRITE_4(sc, JME_INTR_STATUS,
3672 	    INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3673 
3674 	if (ifp->if_flags & IFF_RUNNING) {
3675 		jme_txeof(sc);
3676 		if (!ifq_is_empty(&ifp->if_snd))
3677 			if_devstart(ifp);
3678 	}
3679 }
3680 
3681 static void
3682 jme_msix_rx(void *xrdata)
3683 {
3684 	struct jme_rxdata *rdata = xrdata;
3685 	struct jme_softc *sc = rdata->jme_sc;
3686 	struct ifnet *ifp = &sc->arpcom.ac_if;
3687 	uint32_t status;
3688 
3689 	ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3690 
3691 	status = CSR_READ_4(sc, JME_INTR_STATUS);
3692 	status &= (rdata->jme_rx_coal | rdata->jme_rx_empty);
3693 
3694 	if (status & rdata->jme_rx_coal) {
3695 		status |= (rdata->jme_rx_coal | rdata->jme_rx_comp);
3696 		CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3697 	}
3698 
3699 	if (ifp->if_flags & IFF_RUNNING) {
3700 		if (status & rdata->jme_rx_coal) {
3701 			struct mbuf_chain chain[MAXCPU];
3702 			int prog;
3703 
3704 			ether_input_chain_init(chain);
3705 
3706 			prog = jme_rxeof_chain(sc, rdata->jme_rx_idx,
3707 			    chain, -1);
3708 			if (prog)
3709 				ether_input_dispatch(chain);
3710 		}
3711 
3712 		if (status & rdata->jme_rx_empty) {
3713 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3714 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
3715 		}
3716 	}
3717 }
3718 
3719 static void
3720 jme_set_msinum(struct jme_softc *sc)
3721 {
3722 	int i;
3723 
3724 	for (i = 0; i < JME_MSINUM_CNT; ++i)
3725 		CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3726 }
3727 
3728 static int
3729 jme_intr_setup(device_t dev)
3730 {
3731 	struct jme_softc *sc = device_get_softc(dev);
3732 	struct ifnet *ifp = &sc->arpcom.ac_if;
3733 	int error;
3734 
3735 	if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3736 		return jme_msix_setup(dev);
3737 
3738 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3739 	    jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3740 	if (error) {
3741 		device_printf(dev, "could not set up interrupt handler.\n");
3742 		return error;
3743 	}
3744 
3745 	ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
3746 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3747 	return 0;
3748 }
3749 
3750 static void
3751 jme_intr_teardown(device_t dev)
3752 {
3753 	struct jme_softc *sc = device_get_softc(dev);
3754 
3755 	if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3756 		jme_msix_teardown(dev, sc->jme_msix_cnt);
3757 	else
3758 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
3759 }
3760 
3761 static int
3762 jme_msix_setup(device_t dev)
3763 {
3764 	struct jme_softc *sc = device_get_softc(dev);
3765 	struct ifnet *ifp = &sc->arpcom.ac_if;
3766 	int x;
3767 
3768 	for (x = 0; x < sc->jme_msix_cnt; ++x) {
3769 		struct jme_msix_data *msix = &sc->jme_msix[x];
3770 		int error;
3771 
3772 		error = bus_setup_intr_descr(dev, msix->jme_msix_res,
3773 		    INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
3774 		    &msix->jme_msix_handle, msix->jme_msix_serialize,
3775 		    msix->jme_msix_desc);
3776 		if (error) {
3777 			device_printf(dev, "could not set up %s "
3778 			    "interrupt handler.\n", msix->jme_msix_desc);
3779 			jme_msix_teardown(dev, x);
3780 			return error;
3781 		}
3782 	}
3783 	ifp->if_cpuid = 0; /* XXX */
3784 	return 0;
3785 }
3786 
3787 static void
3788 jme_msix_teardown(device_t dev, int msix_count)
3789 {
3790 	struct jme_softc *sc = device_get_softc(dev);
3791 	int x;
3792 
3793 	for (x = 0; x < msix_count; ++x) {
3794 		struct jme_msix_data *msix = &sc->jme_msix[x];
3795 
3796 		bus_teardown_intr(dev, msix->jme_msix_res,
3797 		    msix->jme_msix_handle);
3798 	}
3799 }
3800