xref: /dragonfly/sys/dev/netif/jme/if_jme.c (revision 5062ee70)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  */
29 
30 #include "opt_ifpoll.h"
31 #include "opt_jme.h"
32 
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46 
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/bpf.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_poll.h>
54 #include <net/ifq_var.h>
55 #include <net/if_ringmap.h>
56 #include <net/toeplitz.h>
57 #include <net/toeplitz2.h>
58 #include <net/vlan/if_vlan_var.h>
59 #include <net/vlan/if_vlan_ether.h>
60 
61 #include <netinet/ip.h>
62 #include <netinet/tcp.h>
63 
64 #include <dev/netif/mii_layer/mii.h>
65 #include <dev/netif/mii_layer/miivar.h>
66 #include <dev/netif/mii_layer/jmphyreg.h>
67 
68 #include <bus/pci/pcireg.h>
69 #include <bus/pci/pcivar.h>
70 #include "pcidevs.h"
71 
72 #include <dev/netif/jme/if_jmereg.h>
73 #include <dev/netif/jme/if_jmevar.h>
74 
75 #include "miibus_if.h"
76 
77 #define JME_TICK_CPUID		0	/* DO NOT CHANGE THIS */
78 
79 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
80 
81 #ifdef JME_RSS_DEBUG
82 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
83 do { \
84 	if ((sc)->jme_rss_debug >= (lvl)) \
85 		if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
86 } while (0)
87 #else	/* !JME_RSS_DEBUG */
88 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
89 #endif	/* JME_RSS_DEBUG */
90 
91 static int	jme_probe(device_t);
92 static int	jme_attach(device_t);
93 static int	jme_detach(device_t);
94 static int	jme_shutdown(device_t);
95 static int	jme_suspend(device_t);
96 static int	jme_resume(device_t);
97 
98 static int	jme_miibus_readreg(device_t, int, int);
99 static int	jme_miibus_writereg(device_t, int, int, int);
100 static void	jme_miibus_statchg(device_t);
101 
102 static void	jme_init(void *);
103 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
104 static void	jme_start(struct ifnet *, struct ifaltq_subque *);
105 static void	jme_watchdog(struct ifnet *);
106 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
107 static int	jme_mediachange(struct ifnet *);
108 #ifdef IFPOLL_ENABLE
109 static void	jme_npoll(struct ifnet *, struct ifpoll_info *);
110 static void	jme_npoll_status(struct ifnet *);
111 static void	jme_npoll_rx(struct ifnet *, void *, int);
112 static void	jme_npoll_tx(struct ifnet *, void *, int);
113 #endif
114 static void	jme_serialize(struct ifnet *, enum ifnet_serialize);
115 static void	jme_deserialize(struct ifnet *, enum ifnet_serialize);
116 static int	jme_tryserialize(struct ifnet *, enum ifnet_serialize);
117 #ifdef INVARIANTS
118 static void	jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
119 		    boolean_t);
120 #endif
121 
122 static void	jme_intr(void *);
123 static void	jme_msix_tx(void *);
124 static void	jme_msix_rx(void *);
125 static void	jme_msix_status(void *);
126 static void	jme_txeof(struct jme_txdata *);
127 static void	jme_rxeof(struct jme_rxdata *, int, int);
128 static void	jme_rx_intr(struct jme_softc *, uint32_t);
129 static void	jme_enable_intr(struct jme_softc *);
130 static void	jme_disable_intr(struct jme_softc *);
131 static void	jme_rx_restart(struct jme_softc *, uint32_t);
132 
133 static int	jme_msix_setup(device_t);
134 static void	jme_msix_teardown(device_t, int);
135 static int	jme_intr_setup(device_t);
136 static void	jme_intr_teardown(device_t);
137 static void	jme_msix_try_alloc(device_t);
138 static void	jme_msix_free(device_t);
139 static int	jme_intr_alloc(device_t);
140 static void	jme_intr_free(device_t);
141 static int	jme_dma_alloc(struct jme_softc *);
142 static void	jme_dma_free(struct jme_softc *);
143 static int	jme_init_rx_ring(struct jme_rxdata *);
144 static void	jme_init_tx_ring(struct jme_txdata *);
145 static void	jme_init_ssb(struct jme_softc *);
146 static int	jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
147 static int	jme_encap(struct jme_txdata *, struct mbuf **, int *);
148 static void	jme_rxpkt(struct jme_rxdata *, int);
149 static int	jme_rxring_dma_alloc(struct jme_rxdata *);
150 static int	jme_rxbuf_dma_alloc(struct jme_rxdata *);
151 static int	jme_rxbuf_dma_filter(void *, bus_addr_t);
152 
153 static void	jme_tick(void *);
154 static void	jme_stop(struct jme_softc *);
155 static void	jme_reset(struct jme_softc *);
156 static void	jme_set_msinum(struct jme_softc *);
157 static void	jme_set_vlan(struct jme_softc *);
158 static void	jme_set_filter(struct jme_softc *);
159 static void	jme_stop_tx(struct jme_softc *);
160 static void	jme_stop_rx(struct jme_softc *);
161 static void	jme_mac_config(struct jme_softc *);
162 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
163 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
164 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
165 #ifdef notyet
166 static void	jme_setwol(struct jme_softc *);
167 static void	jme_setlinkspeed(struct jme_softc *);
168 #endif
169 static void	jme_set_tx_coal(struct jme_softc *);
170 static void	jme_set_rx_coal(struct jme_softc *);
171 static void	jme_enable_rss(struct jme_softc *);
172 static void	jme_disable_rss(struct jme_softc *);
173 static void	jme_serialize_skipmain(struct jme_softc *);
174 static void	jme_deserialize_skipmain(struct jme_softc *);
175 static void	jme_phy_poweron(struct jme_softc *);
176 static void	jme_phy_poweroff(struct jme_softc *);
177 static int	jme_miiext_read(struct jme_softc *, int);
178 static void	jme_miiext_write(struct jme_softc *, int, int);
179 static void	jme_phy_init(struct jme_softc *);
180 
181 static void	jme_sysctl_node(struct jme_softc *);
182 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
183 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
184 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
185 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
186 
187 /*
188  * Devices supported by this driver.
189  */
190 static const struct jme_dev {
191 	uint16_t	jme_vendorid;
192 	uint16_t	jme_deviceid;
193 	uint32_t	jme_caps;
194 	const char	*jme_name;
195 } jme_devs[] = {
196 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
197 	    JME_CAP_JUMBO,
198 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
199 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
200 	    JME_CAP_FASTETH,
201 	    "JMicron Inc, JMC260 Fast Ethernet" },
202 	{ 0, 0, 0, NULL }
203 };
204 
205 static device_method_t jme_methods[] = {
206 	/* Device interface. */
207 	DEVMETHOD(device_probe,		jme_probe),
208 	DEVMETHOD(device_attach,	jme_attach),
209 	DEVMETHOD(device_detach,	jme_detach),
210 	DEVMETHOD(device_shutdown,	jme_shutdown),
211 	DEVMETHOD(device_suspend,	jme_suspend),
212 	DEVMETHOD(device_resume,	jme_resume),
213 
214 	/* Bus interface. */
215 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
216 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
217 
218 	/* MII interface. */
219 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
220 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
221 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
222 
223 	{ NULL, NULL }
224 };
225 
226 static driver_t jme_driver = {
227 	"jme",
228 	jme_methods,
229 	sizeof(struct jme_softc)
230 };
231 
232 static devclass_t jme_devclass;
233 
234 DECLARE_DUMMY_MODULE(if_jme);
235 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
236 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
237 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
238 
239 static const struct {
240 	uint32_t	jme_coal;
241 	uint32_t	jme_comp;
242 	uint32_t	jme_empty;
243 } jme_rx_status[JME_NRXRING_MAX] = {
244 	{ INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
245 	  INTR_RXQ0_DESC_EMPTY },
246 	{ INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
247 	  INTR_RXQ1_DESC_EMPTY },
248 	{ INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
249 	  INTR_RXQ2_DESC_EMPTY },
250 	{ INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
251 	  INTR_RXQ3_DESC_EMPTY }
252 };
253 
254 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
255 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
256 static int	jme_rx_ring_count = 0;
257 static int	jme_msi_enable = 1;
258 static int	jme_msix_enable = 1;
259 
260 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
261 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
262 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
263 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
264 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
265 
266 static __inline void
267 jme_setup_rxdesc(struct jme_rxdesc *rxd)
268 {
269 	struct jme_desc *desc;
270 
271 	desc = rxd->rx_desc;
272 	desc->buflen = htole32(MCLBYTES);
273 	desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
274 	desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
275 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
276 }
277 
278 /*
279  *	Read a PHY register on the MII of the JMC250.
280  */
281 static int
282 jme_miibus_readreg(device_t dev, int phy, int reg)
283 {
284 	struct jme_softc *sc = device_get_softc(dev);
285 	uint32_t val;
286 	int i;
287 
288 	/* For FPGA version, PHY address 0 should be ignored. */
289 	if (sc->jme_caps & JME_CAP_FPGA) {
290 		if (phy == 0)
291 			return (0);
292 	} else {
293 		if (sc->jme_phyaddr != phy)
294 			return (0);
295 	}
296 
297 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
298 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
299 
300 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
301 		DELAY(1);
302 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
303 			break;
304 	}
305 	if (i == 0) {
306 		device_printf(sc->jme_dev, "phy read timeout: "
307 			      "phy %d, reg %d\n", phy, reg);
308 		return (0);
309 	}
310 
311 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
312 }
313 
314 /*
315  *	Write a PHY register on the MII of the JMC250.
316  */
317 static int
318 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
319 {
320 	struct jme_softc *sc = device_get_softc(dev);
321 	int i;
322 
323 	/* For FPGA version, PHY address 0 should be ignored. */
324 	if (sc->jme_caps & JME_CAP_FPGA) {
325 		if (phy == 0)
326 			return (0);
327 	} else {
328 		if (sc->jme_phyaddr != phy)
329 			return (0);
330 	}
331 
332 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
333 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
334 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
335 
336 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
337 		DELAY(1);
338 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
339 			break;
340 	}
341 	if (i == 0) {
342 		device_printf(sc->jme_dev, "phy write timeout: "
343 			      "phy %d, reg %d\n", phy, reg);
344 	}
345 
346 	return (0);
347 }
348 
349 /*
350  *	Callback from MII layer when media changes.
351  */
352 static void
353 jme_miibus_statchg(device_t dev)
354 {
355 	struct jme_softc *sc = device_get_softc(dev);
356 	struct ifnet *ifp = &sc->arpcom.ac_if;
357 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
358 	struct mii_data *mii;
359 	struct jme_txdesc *txd;
360 	bus_addr_t paddr;
361 	int i, r;
362 
363 	if (sc->jme_in_tick)
364 		jme_serialize_skipmain(sc);
365 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
366 
367 	if ((ifp->if_flags & IFF_RUNNING) == 0)
368 		goto done;
369 
370 	mii = device_get_softc(sc->jme_miibus);
371 
372 	sc->jme_has_link = FALSE;
373 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
374 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
375 		case IFM_10_T:
376 		case IFM_100_TX:
377 			sc->jme_has_link = TRUE;
378 			break;
379 		case IFM_1000_T:
380 			if (sc->jme_caps & JME_CAP_FASTETH)
381 				break;
382 			sc->jme_has_link = TRUE;
383 			break;
384 		default:
385 			break;
386 		}
387 	}
388 
389 	/*
390 	 * Disabling Rx/Tx MACs have a side-effect of resetting
391 	 * JME_TXNDA/JME_RXNDA register to the first address of
392 	 * Tx/Rx descriptor address. So driver should reset its
393 	 * internal procucer/consumer pointer and reclaim any
394 	 * allocated resources.  Note, just saving the value of
395 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
396 	 * and restoring JME_TXNDA/JME_RXNDA register is not
397 	 * sufficient to make sure correct MAC state because
398 	 * stopping MAC operation can take a while and hardware
399 	 * might have updated JME_TXNDA/JME_RXNDA registers
400 	 * during the stop operation.
401 	 */
402 
403 	/* Disable interrupts */
404 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
405 
406 	/* Stop driver */
407 	ifp->if_flags &= ~IFF_RUNNING;
408 	ifq_clr_oactive(&ifp->if_snd);
409 	ifp->if_timer = 0;
410 	callout_stop(&sc->jme_tick_ch);
411 
412 	/* Stop receiver/transmitter. */
413 	jme_stop_rx(sc);
414 	jme_stop_tx(sc);
415 
416 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
417 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
418 
419 		jme_rxeof(rdata, -1, -1);
420 		if (rdata->jme_rxhead != NULL)
421 			m_freem(rdata->jme_rxhead);
422 		JME_RXCHAIN_RESET(rdata);
423 
424 		/*
425 		 * Reuse configured Rx descriptors and reset
426 		 * procuder/consumer index.
427 		 */
428 		rdata->jme_rx_cons = 0;
429 	}
430 	if (JME_ENABLE_HWRSS(sc))
431 		jme_enable_rss(sc);
432 	else
433 		jme_disable_rss(sc);
434 
435 	jme_txeof(tdata);
436 	if (tdata->jme_tx_cnt != 0) {
437 		/* Remove queued packets for transmit. */
438 		for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
439 			txd = &tdata->jme_txdesc[i];
440 			if (txd->tx_m != NULL) {
441 				bus_dmamap_unload( tdata->jme_tx_tag,
442 				    txd->tx_dmamap);
443 				m_freem(txd->tx_m);
444 				txd->tx_m = NULL;
445 				txd->tx_ndesc = 0;
446 				IFNET_STAT_INC(ifp, oerrors, 1);
447 			}
448 		}
449 	}
450 	jme_init_tx_ring(tdata);
451 
452 	/* Initialize shadow status block. */
453 	jme_init_ssb(sc);
454 
455 	/* Program MAC with resolved speed/duplex/flow-control. */
456 	if (sc->jme_has_link) {
457 		jme_mac_config(sc);
458 
459 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
460 
461 		/* Set Tx ring address to the hardware. */
462 		paddr = tdata->jme_tx_ring_paddr;
463 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
464 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
465 
466 		for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
467 			CSR_WRITE_4(sc, JME_RXCSR,
468 			    sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
469 
470 			/* Set Rx ring address to the hardware. */
471 			paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
472 			CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
473 			CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
474 		}
475 
476 		/* Restart receiver/transmitter. */
477 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
478 		    RXCSR_RXQ_START);
479 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
480 	}
481 
482 	ifp->if_flags |= IFF_RUNNING;
483 	ifq_clr_oactive(&ifp->if_snd);
484 	callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc,
485 	    JME_TICK_CPUID);
486 
487 #ifdef IFPOLL_ENABLE
488 	if (!(ifp->if_flags & IFF_NPOLLING))
489 #endif
490 	/* Reenable interrupts. */
491 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
492 
493 done:
494 	if (sc->jme_in_tick)
495 		jme_deserialize_skipmain(sc);
496 }
497 
498 /*
499  *	Get the current interface media status.
500  */
501 static void
502 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
503 {
504 	struct jme_softc *sc = ifp->if_softc;
505 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
506 
507 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
508 
509 	mii_pollstat(mii);
510 	ifmr->ifm_status = mii->mii_media_status;
511 	ifmr->ifm_active = mii->mii_media_active;
512 }
513 
514 /*
515  *	Set hardware to newly-selected media.
516  */
517 static int
518 jme_mediachange(struct ifnet *ifp)
519 {
520 	struct jme_softc *sc = ifp->if_softc;
521 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
522 	int error;
523 
524 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
525 
526 	if (mii->mii_instance != 0) {
527 		struct mii_softc *miisc;
528 
529 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
530 			mii_phy_reset(miisc);
531 	}
532 	error = mii_mediachg(mii);
533 
534 	return (error);
535 }
536 
537 static int
538 jme_probe(device_t dev)
539 {
540 	const struct jme_dev *sp;
541 	uint16_t vid, did;
542 
543 	vid = pci_get_vendor(dev);
544 	did = pci_get_device(dev);
545 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
546 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
547 			struct jme_softc *sc = device_get_softc(dev);
548 
549 			sc->jme_caps = sp->jme_caps;
550 			device_set_desc(dev, sp->jme_name);
551 			return (0);
552 		}
553 	}
554 	return (ENXIO);
555 }
556 
557 static int
558 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
559 {
560 	uint32_t reg;
561 	int i;
562 
563 	*val = 0;
564 	for (i = JME_TIMEOUT; i > 0; i--) {
565 		reg = CSR_READ_4(sc, JME_SMBCSR);
566 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
567 			break;
568 		DELAY(1);
569 	}
570 
571 	if (i == 0) {
572 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
573 		return (ETIMEDOUT);
574 	}
575 
576 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
577 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
578 	for (i = JME_TIMEOUT; i > 0; i--) {
579 		DELAY(1);
580 		reg = CSR_READ_4(sc, JME_SMBINTF);
581 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
582 			break;
583 	}
584 
585 	if (i == 0) {
586 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
587 		return (ETIMEDOUT);
588 	}
589 
590 	reg = CSR_READ_4(sc, JME_SMBINTF);
591 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
592 
593 	return (0);
594 }
595 
596 static int
597 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
598 {
599 	uint8_t fup, reg, val;
600 	uint32_t offset;
601 	int match;
602 
603 	offset = 0;
604 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
605 	    fup != JME_EEPROM_SIG0)
606 		return (ENOENT);
607 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
608 	    fup != JME_EEPROM_SIG1)
609 		return (ENOENT);
610 	match = 0;
611 	do {
612 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
613 			break;
614 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
615 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
616 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
617 				break;
618 			if (reg >= JME_PAR0 &&
619 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
620 				if (jme_eeprom_read_byte(sc, offset + 2,
621 				    &val) != 0)
622 					break;
623 				eaddr[reg - JME_PAR0] = val;
624 				match++;
625 			}
626 		}
627 		/* Check for the end of EEPROM descriptor. */
628 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
629 			break;
630 		/* Try next eeprom descriptor. */
631 		offset += JME_EEPROM_DESC_BYTES;
632 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
633 
634 	if (match == ETHER_ADDR_LEN)
635 		return (0);
636 
637 	return (ENOENT);
638 }
639 
640 static void
641 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
642 {
643 	uint32_t par0, par1;
644 
645 	/* Read station address. */
646 	par0 = CSR_READ_4(sc, JME_PAR0);
647 	par1 = CSR_READ_4(sc, JME_PAR1);
648 	par1 &= 0xFFFF;
649 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
650 		device_printf(sc->jme_dev,
651 		    "generating fake ethernet address.\n");
652 		par0 = karc4random();
653 		/* Set OUI to JMicron. */
654 		eaddr[0] = 0x00;
655 		eaddr[1] = 0x1B;
656 		eaddr[2] = 0x8C;
657 		eaddr[3] = (par0 >> 16) & 0xff;
658 		eaddr[4] = (par0 >> 8) & 0xff;
659 		eaddr[5] = par0 & 0xff;
660 	} else {
661 		eaddr[0] = (par0 >> 0) & 0xFF;
662 		eaddr[1] = (par0 >> 8) & 0xFF;
663 		eaddr[2] = (par0 >> 16) & 0xFF;
664 		eaddr[3] = (par0 >> 24) & 0xFF;
665 		eaddr[4] = (par1 >> 0) & 0xFF;
666 		eaddr[5] = (par1 >> 8) & 0xFF;
667 	}
668 }
669 
670 static int
671 jme_attach(device_t dev)
672 {
673 	struct jme_softc *sc = device_get_softc(dev);
674 	struct ifnet *ifp = &sc->arpcom.ac_if;
675 	uint32_t reg;
676 	uint16_t did;
677 	uint8_t pcie_ptr, rev;
678 	int error = 0, i, j, rx_desc_cnt, coal_max, ring_cnt;
679 	uint8_t eaddr[ETHER_ADDR_LEN];
680 
681 	/*
682 	 * Initialize serializers
683 	 */
684 	lwkt_serialize_init(&sc->jme_serialize);
685 	lwkt_serialize_init(&sc->jme_cdata.jme_tx_data.jme_tx_serialize);
686 	for (i = 0; i < JME_NRXRING_MAX; ++i) {
687 		lwkt_serialize_init(
688 		    &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
689 	}
690 
691 	/*
692 	 * Get # of RX ring descriptors
693 	 */
694 	rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
695 	    jme_rx_desc_count);
696 	rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
697 	if (rx_desc_cnt > JME_NDESC_MAX)
698 		rx_desc_cnt = JME_NDESC_MAX;
699 
700 	/*
701 	 * Get # of TX ring descriptors
702 	 */
703 	sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt =
704 	    device_getenv_int(dev, "tx_desc_count", jme_tx_desc_count);
705 	sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt =
706 	    roundup(sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt, JME_NDESC_ALIGN);
707 	if (sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt > JME_NDESC_MAX)
708 		sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = JME_NDESC_MAX;
709 
710 	/*
711 	 * Create TX/RX ring maps.
712 	 */
713 	ring_cnt = device_getenv_int(dev, "rx_ring_count", jme_rx_ring_count);
714 	/* Require power-of-2 ring count. */
715 	sc->jme_rx_rmap = if_ringmap_alloc2(dev, ring_cnt, JME_NRXRING_MAX);
716 	sc->jme_cdata.jme_rx_ring_cnt = if_ringmap_count(sc->jme_rx_rmap);
717 
718 	/* Only one TX ring is supported. */
719 	sc->jme_tx_rmap = if_ringmap_alloc(dev, 1, 1);
720 
721 	/*
722 	 * NOTE:
723 	 * There is _no_ need to align or match TX/RX ring maps,
724 	 * since TX/RX rings are completely indepedent in this
725 	 * driver.
726 	 */
727 
728 	/*
729 	 * Initialize serializer array
730 	 */
731 	i = 0;
732 
733 	KKASSERT(i < JME_NSERIALIZE);
734 	sc->jme_serialize_arr[i++] = &sc->jme_serialize;
735 
736 	KKASSERT(i < JME_NSERIALIZE);
737 	sc->jme_serialize_arr[i++] =
738 	    &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
739 
740 	for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
741 		KKASSERT(i < JME_NSERIALIZE);
742 		sc->jme_serialize_arr[i++] =
743 		    &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
744 	}
745 
746 	KKASSERT(i <= JME_NSERIALIZE);
747 	sc->jme_serialize_cnt = i;
748 
749 	/*
750 	 * Setup TX ring specific data
751 	 */
752 	sc->jme_cdata.jme_tx_data.jme_sc = sc;
753 
754 	/*
755 	 * Setup RX rings specific data
756 	 */
757 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
758 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
759 
760 		rdata->jme_sc = sc;
761 		rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
762 		rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
763 		rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
764 		rdata->jme_rx_idx = i;
765 		rdata->jme_rx_desc_cnt = rx_desc_cnt;
766 	}
767 
768 	sc->jme_dev = dev;
769 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
770 
771 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
772 
773 	callout_init_mp(&sc->jme_tick_ch);
774 
775 #ifndef BURN_BRIDGES
776 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
777 		uint32_t irq, mem;
778 
779 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
780 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
781 
782 		device_printf(dev, "chip is in D%d power mode "
783 		    "-- setting to D0\n", pci_get_powerstate(dev));
784 
785 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
786 
787 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
788 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
789 	}
790 #endif	/* !BURN_BRIDGE */
791 
792 	/* Enable bus mastering */
793 	pci_enable_busmaster(dev);
794 
795 	/*
796 	 * Allocate IO memory
797 	 *
798 	 * JMC250 supports both memory mapped and I/O register space
799 	 * access.  Because I/O register access should use different
800 	 * BARs to access registers it's waste of time to use I/O
801 	 * register spce access.  JMC250 uses 16K to map entire memory
802 	 * space.
803 	 */
804 	sc->jme_mem_rid = JME_PCIR_BAR;
805 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
806 						 &sc->jme_mem_rid, RF_ACTIVE);
807 	if (sc->jme_mem_res == NULL) {
808 		device_printf(dev, "can't allocate IO memory\n");
809 		return ENXIO;
810 	}
811 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
812 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
813 
814 	/*
815 	 * Allocate IRQ
816 	 */
817 	error = jme_intr_alloc(dev);
818 	if (error)
819 		goto fail;
820 
821 	/*
822 	 * Extract revisions
823 	 */
824 	reg = CSR_READ_4(sc, JME_CHIPMODE);
825 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
826 	    CHIPMODE_NOT_FPGA) {
827 		sc->jme_caps |= JME_CAP_FPGA;
828 		if (bootverbose) {
829 			device_printf(dev, "FPGA revision: 0x%04x\n",
830 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
831 				      CHIPMODE_FPGA_REV_SHIFT);
832 		}
833 	}
834 
835 	/* NOTE: FM revision is put in the upper 4 bits */
836 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
837 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
838 	if (bootverbose)
839 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
840 
841 	did = pci_get_device(dev);
842 	switch (did) {
843 	case PCI_PRODUCT_JMICRON_JMC250:
844 		if (rev == JME_REV1_A2)
845 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
846 		break;
847 
848 	case PCI_PRODUCT_JMICRON_JMC260:
849 		if (rev == JME_REV2) {
850 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
851 			sc->jme_phycom0 = 0x608a;
852 		} else if (rev == JME_REV2_2) {
853 			sc->jme_phycom0 = 0x408a;
854 		}
855 		break;
856 
857 	default:
858 		panic("unknown device id 0x%04x", did);
859 	}
860 	if (rev >= JME_REV2) {
861 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
862 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
863 				      GHC_TXMAC_CLKSRC_1000;
864 	}
865 	if (rev >= JME_REV5)
866 		sc->jme_caps |= JME_CAP_PHYPWR;
867 	if (rev >= JME_REV6 || rev == JME_REV5 || rev == JME_REV5_1 ||
868 	    rev == JME_REV5_3) {
869 		sc->jme_phycom0 = 0x008a;
870 		sc->jme_phycom1 = 0x4109;
871 	} else if (rev == JME_REV3_1 || rev == JME_REV3_2) {
872 		sc->jme_phycom0 = 0xe088;
873 	}
874 
875 	if (rev >= JME_REV2) {
876 		reg = pci_read_config(dev, JME_PCI_SSCTRL, 4);
877 		if ((reg & SSCTRL_PHYMASK) == SSCTRL_PHYEA) {
878 			sc->jme_phycom0 = 0;
879 			sc->jme_phycom1 = 0;
880 		}
881 	}
882 
883 	/* Reset the ethernet controller. */
884 	jme_reset(sc);
885 
886 	/* Map MSI/MSI-X vectors */
887 	jme_set_msinum(sc);
888 
889 	/* Get station address. */
890 	reg = CSR_READ_4(sc, JME_SMBCSR);
891 	if (reg & SMBCSR_EEPROM_PRESENT)
892 		error = jme_eeprom_macaddr(sc, eaddr);
893 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
894 		if (error != 0 && (bootverbose)) {
895 			device_printf(dev, "ethernet hardware address "
896 				      "not found in EEPROM.\n");
897 		}
898 		jme_reg_macaddr(sc, eaddr);
899 	}
900 
901 	/*
902 	 * Save PHY address.
903 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
904 	 * requires PHY probing to get correct PHY address.
905 	 */
906 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
907 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
908 		    GPREG0_PHY_ADDR_MASK;
909 		if (bootverbose) {
910 			device_printf(dev, "PHY is at address %d.\n",
911 			    sc->jme_phyaddr);
912 		}
913 	} else {
914 		sc->jme_phyaddr = 0;
915 	}
916 
917 	/* Set max allowable DMA size. */
918 	pcie_ptr = pci_get_pciecap_ptr(dev);
919 	if (pcie_ptr != 0) {
920 		uint16_t ctrl;
921 
922 		sc->jme_caps |= JME_CAP_PCIE;
923 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
924 		if (bootverbose) {
925 			device_printf(dev, "Read request size : %d bytes.\n",
926 			    128 << ((ctrl >> 12) & 0x07));
927 			device_printf(dev, "TLP payload size : %d bytes.\n",
928 			    128 << ((ctrl >> 5) & 0x07));
929 		}
930 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
931 		case PCIEM_DEVCTL_MAX_READRQ_128:
932 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
933 			break;
934 		case PCIEM_DEVCTL_MAX_READRQ_256:
935 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
936 			break;
937 		default:
938 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
939 			break;
940 		}
941 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
942 	} else {
943 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
944 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
945 	}
946 
947 #ifdef notyet
948 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
949 		sc->jme_caps |= JME_CAP_PMCAP;
950 #endif
951 
952 	/*
953 	 * Set default coalesce valves
954 	 */
955 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
956 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
957 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
958 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
959 
960 	/*
961 	 * Adjust coalesce valves, in case that the number of TX/RX
962 	 * descs are set to small values by users.
963 	 *
964 	 * NOTE: coal_max will not be zero, since number of descs
965 	 * must aligned by JME_NDESC_ALIGN (16 currently)
966 	 */
967 	coal_max = sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt / 2;
968 	if (coal_max < sc->jme_tx_coal_pkt)
969 		sc->jme_tx_coal_pkt = coal_max;
970 
971 	coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2;
972 	if (coal_max < sc->jme_rx_coal_pkt)
973 		sc->jme_rx_coal_pkt = coal_max;
974 
975 	sc->jme_cdata.jme_tx_data.jme_tx_wreg = JME_TXWREG_NSEGS;
976 
977 	/*
978 	 * Create sysctl tree
979 	 */
980 	jme_sysctl_node(sc);
981 
982 	/* Allocate DMA stuffs */
983 	error = jme_dma_alloc(sc);
984 	if (error)
985 		goto fail;
986 
987 	ifp->if_softc = sc;
988 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
989 	ifp->if_init = jme_init;
990 	ifp->if_ioctl = jme_ioctl;
991 	ifp->if_start = jme_start;
992 #ifdef IFPOLL_ENABLE
993 	ifp->if_npoll = jme_npoll;
994 #endif
995 	ifp->if_watchdog = jme_watchdog;
996 	ifp->if_serialize = jme_serialize;
997 	ifp->if_deserialize = jme_deserialize;
998 	ifp->if_tryserialize = jme_tryserialize;
999 #ifdef INVARIANTS
1000 	ifp->if_serialize_assert = jme_serialize_assert;
1001 #endif
1002 	ifp->if_nmbclusters = sc->jme_cdata.jme_rx_ring_cnt *
1003 	    sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt;
1004 	ifq_set_maxlen(&ifp->if_snd,
1005 	    sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt - JME_TXD_RSVD);
1006 	ifq_set_ready(&ifp->if_snd);
1007 
1008 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
1009 	ifp->if_capabilities = IFCAP_HWCSUM |
1010 			       IFCAP_TSO |
1011 			       IFCAP_VLAN_MTU |
1012 			       IFCAP_VLAN_HWTAGGING;
1013 	if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
1014 		ifp->if_capabilities |= IFCAP_RSS;
1015 	ifp->if_capenable = ifp->if_capabilities;
1016 
1017 	/*
1018 	 * Disable TXCSUM by default to improve bulk data
1019 	 * transmit performance (+20Mbps improvement).
1020 	 */
1021 	ifp->if_capenable &= ~IFCAP_TXCSUM;
1022 
1023 	if (ifp->if_capenable & IFCAP_TXCSUM)
1024 		ifp->if_hwassist |= JME_CSUM_FEATURES;
1025 	ifp->if_hwassist |= CSUM_TSO;
1026 
1027 	/* Set up MII bus. */
1028 	error = mii_phy_probe(dev, &sc->jme_miibus,
1029 			      jme_mediachange, jme_mediastatus);
1030 	if (error) {
1031 		device_printf(dev, "no PHY found!\n");
1032 		goto fail;
1033 	}
1034 
1035 	/*
1036 	 * Save PHYADDR for FPGA mode PHY.
1037 	 */
1038 	if (sc->jme_caps & JME_CAP_FPGA) {
1039 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
1040 
1041 		if (mii->mii_instance != 0) {
1042 			struct mii_softc *miisc;
1043 
1044 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1045 				if (miisc->mii_phy != 0) {
1046 					sc->jme_phyaddr = miisc->mii_phy;
1047 					break;
1048 				}
1049 			}
1050 			if (sc->jme_phyaddr != 0) {
1051 				device_printf(sc->jme_dev,
1052 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
1053 				/* vendor magic. */
1054 				jme_miibus_writereg(dev, sc->jme_phyaddr,
1055 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
1056 
1057 				/* XXX should we clear JME_WA_EXTFIFO */
1058 			}
1059 		}
1060 	}
1061 
1062 	ether_ifattach(ifp, eaddr, NULL);
1063 
1064 	/* Tell the upper layer(s) we support long frames. */
1065 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1066 
1067 	/* Setup the TX ring's CPUID */
1068 	ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid);
1069 	ifq_set_hw_serialize(&ifp->if_snd,
1070 	    &sc->jme_cdata.jme_tx_data.jme_tx_serialize);
1071 
1072 	error = jme_intr_setup(dev);
1073 	if (error) {
1074 		ether_ifdetach(ifp);
1075 		goto fail;
1076 	}
1077 
1078 	return 0;
1079 fail:
1080 	jme_detach(dev);
1081 	return (error);
1082 }
1083 
1084 static int
1085 jme_detach(device_t dev)
1086 {
1087 	struct jme_softc *sc = device_get_softc(dev);
1088 
1089 	if (device_is_attached(dev)) {
1090 		struct ifnet *ifp = &sc->arpcom.ac_if;
1091 
1092 		ifnet_serialize_all(ifp);
1093 		jme_stop(sc);
1094 		jme_intr_teardown(dev);
1095 		ifnet_deserialize_all(ifp);
1096 
1097 		ether_ifdetach(ifp);
1098 	}
1099 
1100 	if (sc->jme_miibus != NULL)
1101 		device_delete_child(dev, sc->jme_miibus);
1102 	bus_generic_detach(dev);
1103 
1104 	jme_intr_free(dev);
1105 
1106 	if (sc->jme_mem_res != NULL) {
1107 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
1108 				     sc->jme_mem_res);
1109 	}
1110 
1111 	jme_dma_free(sc);
1112 
1113 	if (sc->jme_rx_rmap != NULL)
1114 		if_ringmap_free(sc->jme_rx_rmap);
1115 	if (sc->jme_tx_rmap != NULL)
1116 		if_ringmap_free(sc->jme_tx_rmap);
1117 
1118 	return (0);
1119 }
1120 
1121 static void
1122 jme_sysctl_node(struct jme_softc *sc)
1123 {
1124 	struct sysctl_ctx_list *ctx;
1125 	struct sysctl_oid *tree;
1126 #ifdef JME_RSS_DEBUG
1127 	int r;
1128 #endif
1129 
1130 	ctx = device_get_sysctl_ctx(sc->jme_dev);
1131 	tree = device_get_sysctl_tree(sc->jme_dev);
1132 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1133 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1134 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1135 
1136 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1137 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1138 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1139 
1140 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1141 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1142 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1143 
1144 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1145 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1146 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1147 
1148 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1149 		       "rx_desc_count", CTLFLAG_RD,
1150 		       &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1151 		       0, "RX desc count");
1152 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1153 		       "tx_desc_count", CTLFLAG_RD,
1154 		       &sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt,
1155 		       0, "TX desc count");
1156 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1157 		       "rx_ring_count", CTLFLAG_RD,
1158 		       &sc->jme_cdata.jme_rx_ring_cnt,
1159 		       0, "RX ring count");
1160 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1161 		       "tx_wreg", CTLFLAG_RW,
1162 		       &sc->jme_cdata.jme_tx_data.jme_tx_wreg, 0,
1163 		       "# of segments before writing to hardware register");
1164 
1165 	if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) {
1166 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1167 		    "tx_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD,
1168 		    sc->jme_tx_rmap, 0, if_ringmap_cpumap_sysctl, "I",
1169 		    "TX ring CPU map");
1170 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1171 		    "rx_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD,
1172 		    sc->jme_rx_rmap, 0, if_ringmap_cpumap_sysctl, "I",
1173 		    "RX ring CPU map");
1174 	} else {
1175 #ifdef IFPOLL_ENABLE
1176 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1177 		    "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD,
1178 		    sc->jme_tx_rmap, 0, if_ringmap_cpumap_sysctl, "I",
1179 		    "TX poll CPU map");
1180 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1181 		    "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD,
1182 		    sc->jme_rx_rmap, 0, if_ringmap_cpumap_sysctl, "I",
1183 		    "RX poll CPU map");
1184 #endif
1185 	}
1186 
1187 #ifdef JME_RSS_DEBUG
1188 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1189 		       "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1190 		       0, "RSS debug level");
1191 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1192 		char rx_ring_desc[32];
1193 
1194 		ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1195 		    "rx_ring%d_pkt", r);
1196 		SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1197 		    rx_ring_desc, CTLFLAG_RW,
1198 		    &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1199 
1200 		ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1201 		    "rx_ring%d_emp", r);
1202 		SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1203 		    rx_ring_desc, CTLFLAG_RW,
1204 		    &sc->jme_cdata.jme_rx_data[r].jme_rx_emp,
1205 		    "# of time RX ring empty");
1206 	}
1207 #endif
1208 }
1209 
1210 static int
1211 jme_dma_alloc(struct jme_softc *sc)
1212 {
1213 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1214 	struct jme_txdesc *txd;
1215 	bus_dmamem_t dmem;
1216 	int error, i, asize;
1217 
1218 	asize = __VM_CACHELINE_ALIGN(
1219 	    tdata->jme_tx_desc_cnt * sizeof(struct jme_txdesc));
1220 	tdata->jme_txdesc = kmalloc_cachealign(asize, M_DEVBUF,
1221 	    M_WAITOK | M_ZERO);
1222 
1223 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1224 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1225 
1226 		asize = __VM_CACHELINE_ALIGN(
1227 		    rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc));
1228 		rdata->jme_rxdesc = kmalloc_cachealign(asize, M_DEVBUF,
1229 		    M_WAITOK | M_ZERO);
1230 	}
1231 
1232 	/* Create parent ring tag. */
1233 	error = bus_dma_tag_create(NULL,/* parent */
1234 	    1, JME_RING_BOUNDARY,	/* algnmnt, boundary */
1235 	    sc->jme_lowaddr,		/* lowaddr */
1236 	    BUS_SPACE_MAXADDR,		/* highaddr */
1237 	    NULL, NULL,			/* filter, filterarg */
1238 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1239 	    0,				/* nsegments */
1240 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1241 	    0,				/* flags */
1242 	    &sc->jme_cdata.jme_ring_tag);
1243 	if (error) {
1244 		device_printf(sc->jme_dev,
1245 		    "could not create parent ring DMA tag.\n");
1246 		return error;
1247 	}
1248 
1249 	/*
1250 	 * Create DMA stuffs for TX ring
1251 	 */
1252 	asize = roundup2(JME_TX_RING_SIZE(tdata), JME_TX_RING_ALIGN);
1253 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1254 			JME_TX_RING_ALIGN, 0,
1255 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1256 			asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1257 	if (error) {
1258 		device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1259 		return error;
1260 	}
1261 	tdata->jme_tx_ring_tag = dmem.dmem_tag;
1262 	tdata->jme_tx_ring_map = dmem.dmem_map;
1263 	tdata->jme_tx_ring = dmem.dmem_addr;
1264 	tdata->jme_tx_ring_paddr = dmem.dmem_busaddr;
1265 
1266 	/*
1267 	 * Create DMA stuffs for RX rings
1268 	 */
1269 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1270 		error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1271 		if (error)
1272 			return error;
1273 	}
1274 
1275 	/* Create parent buffer tag. */
1276 	error = bus_dma_tag_create(NULL,/* parent */
1277 	    1, 0,			/* algnmnt, boundary */
1278 	    sc->jme_lowaddr,		/* lowaddr */
1279 	    BUS_SPACE_MAXADDR,		/* highaddr */
1280 	    NULL, NULL,			/* filter, filterarg */
1281 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1282 	    0,				/* nsegments */
1283 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1284 	    0,				/* flags */
1285 	    &sc->jme_cdata.jme_buffer_tag);
1286 	if (error) {
1287 		device_printf(sc->jme_dev,
1288 		    "could not create parent buffer DMA tag.\n");
1289 		return error;
1290 	}
1291 
1292 	/*
1293 	 * Create DMA stuffs for shadow status block
1294 	 */
1295 	asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1296 	error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1297 			JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1298 			asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1299 	if (error) {
1300 		device_printf(sc->jme_dev,
1301 		    "could not create shadow status block.\n");
1302 		return error;
1303 	}
1304 	sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1305 	sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1306 	sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1307 	sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1308 
1309 	/*
1310 	 * Create DMA stuffs for TX buffers
1311 	 */
1312 
1313 	/* Create tag for Tx buffers. */
1314 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1315 	    1, 0,			/* algnmnt, boundary */
1316 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1317 	    BUS_SPACE_MAXADDR,		/* highaddr */
1318 	    NULL, NULL,			/* filter, filterarg */
1319 	    JME_TSO_MAXSIZE,		/* maxsize */
1320 	    JME_MAXTXSEGS,		/* nsegments */
1321 	    JME_MAXSEGSIZE,		/* maxsegsize */
1322 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1323 	    &tdata->jme_tx_tag);
1324 	if (error != 0) {
1325 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1326 		return error;
1327 	}
1328 
1329 	/* Create DMA maps for Tx buffers. */
1330 	for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
1331 		txd = &tdata->jme_txdesc[i];
1332 		error = bus_dmamap_create(tdata->jme_tx_tag,
1333 				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1334 				&txd->tx_dmamap);
1335 		if (error) {
1336 			int j;
1337 
1338 			device_printf(sc->jme_dev,
1339 			    "could not create %dth Tx dmamap.\n", i);
1340 
1341 			for (j = 0; j < i; ++j) {
1342 				txd = &tdata->jme_txdesc[j];
1343 				bus_dmamap_destroy(tdata->jme_tx_tag,
1344 						   txd->tx_dmamap);
1345 			}
1346 			bus_dma_tag_destroy(tdata->jme_tx_tag);
1347 			tdata->jme_tx_tag = NULL;
1348 			return error;
1349 		}
1350 	}
1351 
1352 	/*
1353 	 * Create DMA stuffs for RX buffers
1354 	 */
1355 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1356 		error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1357 		if (error)
1358 			return error;
1359 	}
1360 	return 0;
1361 }
1362 
1363 static void
1364 jme_dma_free(struct jme_softc *sc)
1365 {
1366 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1367 	struct jme_txdesc *txd;
1368 	struct jme_rxdesc *rxd;
1369 	struct jme_rxdata *rdata;
1370 	int i, r;
1371 
1372 	/* Tx ring */
1373 	if (tdata->jme_tx_ring_tag != NULL) {
1374 		bus_dmamap_unload(tdata->jme_tx_ring_tag,
1375 		    tdata->jme_tx_ring_map);
1376 		bus_dmamem_free(tdata->jme_tx_ring_tag,
1377 		    tdata->jme_tx_ring, tdata->jme_tx_ring_map);
1378 		bus_dma_tag_destroy(tdata->jme_tx_ring_tag);
1379 		tdata->jme_tx_ring_tag = NULL;
1380 	}
1381 
1382 	/* Rx ring */
1383 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1384 		rdata = &sc->jme_cdata.jme_rx_data[r];
1385 		if (rdata->jme_rx_ring_tag != NULL) {
1386 			bus_dmamap_unload(rdata->jme_rx_ring_tag,
1387 					  rdata->jme_rx_ring_map);
1388 			bus_dmamem_free(rdata->jme_rx_ring_tag,
1389 					rdata->jme_rx_ring,
1390 					rdata->jme_rx_ring_map);
1391 			bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1392 			rdata->jme_rx_ring_tag = NULL;
1393 		}
1394 	}
1395 
1396 	/* Tx buffers */
1397 	if (tdata->jme_tx_tag != NULL) {
1398 		for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
1399 			txd = &tdata->jme_txdesc[i];
1400 			bus_dmamap_destroy(tdata->jme_tx_tag, txd->tx_dmamap);
1401 		}
1402 		bus_dma_tag_destroy(tdata->jme_tx_tag);
1403 		tdata->jme_tx_tag = NULL;
1404 	}
1405 
1406 	/* Rx buffers */
1407 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1408 		rdata = &sc->jme_cdata.jme_rx_data[r];
1409 		if (rdata->jme_rx_tag != NULL) {
1410 			for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1411 				rxd = &rdata->jme_rxdesc[i];
1412 				bus_dmamap_destroy(rdata->jme_rx_tag,
1413 						   rxd->rx_dmamap);
1414 			}
1415 			bus_dmamap_destroy(rdata->jme_rx_tag,
1416 					   rdata->jme_rx_sparemap);
1417 			bus_dma_tag_destroy(rdata->jme_rx_tag);
1418 			rdata->jme_rx_tag = NULL;
1419 		}
1420 	}
1421 
1422 	/* Shadow status block. */
1423 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1424 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1425 		    sc->jme_cdata.jme_ssb_map);
1426 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1427 		    sc->jme_cdata.jme_ssb_block,
1428 		    sc->jme_cdata.jme_ssb_map);
1429 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1430 		sc->jme_cdata.jme_ssb_tag = NULL;
1431 	}
1432 
1433 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1434 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1435 		sc->jme_cdata.jme_buffer_tag = NULL;
1436 	}
1437 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1438 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1439 		sc->jme_cdata.jme_ring_tag = NULL;
1440 	}
1441 
1442 	if (tdata->jme_txdesc != NULL) {
1443 		kfree(tdata->jme_txdesc, M_DEVBUF);
1444 		tdata->jme_txdesc = NULL;
1445 	}
1446 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1447 		rdata = &sc->jme_cdata.jme_rx_data[r];
1448 		if (rdata->jme_rxdesc != NULL) {
1449 			kfree(rdata->jme_rxdesc, M_DEVBUF);
1450 			rdata->jme_rxdesc = NULL;
1451 		}
1452 	}
1453 }
1454 
1455 /*
1456  *	Make sure the interface is stopped at reboot time.
1457  */
1458 static int
1459 jme_shutdown(device_t dev)
1460 {
1461 	return jme_suspend(dev);
1462 }
1463 
1464 #ifdef notyet
1465 /*
1466  * Unlike other ethernet controllers, JMC250 requires
1467  * explicit resetting link speed to 10/100Mbps as gigabit
1468  * link will cunsume more power than 375mA.
1469  * Note, we reset the link speed to 10/100Mbps with
1470  * auto-negotiation but we don't know whether that operation
1471  * would succeed or not as we have no control after powering
1472  * off. If the renegotiation fail WOL may not work. Running
1473  * at 1Gbps draws more power than 375mA at 3.3V which is
1474  * specified in PCI specification and that would result in
1475  * complete shutdowning power to ethernet controller.
1476  *
1477  * TODO
1478  *  Save current negotiated media speed/duplex/flow-control
1479  *  to softc and restore the same link again after resuming.
1480  *  PHY handling such as power down/resetting to 100Mbps
1481  *  may be better handled in suspend method in phy driver.
1482  */
1483 static void
1484 jme_setlinkspeed(struct jme_softc *sc)
1485 {
1486 	struct mii_data *mii;
1487 	int aneg, i;
1488 
1489 	JME_LOCK_ASSERT(sc);
1490 
1491 	mii = device_get_softc(sc->jme_miibus);
1492 	mii_pollstat(mii);
1493 	aneg = 0;
1494 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1495 		switch IFM_SUBTYPE(mii->mii_media_active) {
1496 		case IFM_10_T:
1497 		case IFM_100_TX:
1498 			return;
1499 		case IFM_1000_T:
1500 			aneg++;
1501 		default:
1502 			break;
1503 		}
1504 	}
1505 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1506 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1507 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1508 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1509 	    BMCR_AUTOEN | BMCR_STARTNEG);
1510 	DELAY(1000);
1511 	if (aneg != 0) {
1512 		/* Poll link state until jme(4) get a 10/100 link. */
1513 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1514 			mii_pollstat(mii);
1515 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1516 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1517 				case IFM_10_T:
1518 				case IFM_100_TX:
1519 					jme_mac_config(sc);
1520 					return;
1521 				default:
1522 					break;
1523 				}
1524 			}
1525 			JME_UNLOCK(sc);
1526 			pause("jmelnk", hz);
1527 			JME_LOCK(sc);
1528 		}
1529 		if (i == MII_ANEGTICKS_GIGE)
1530 			device_printf(sc->jme_dev, "establishing link failed, "
1531 			    "WOL may not work!");
1532 	}
1533 	/*
1534 	 * No link, force MAC to have 100Mbps, full-duplex link.
1535 	 * This is the last resort and may/may not work.
1536 	 */
1537 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1538 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1539 	jme_mac_config(sc);
1540 }
1541 
1542 static void
1543 jme_setwol(struct jme_softc *sc)
1544 {
1545 	struct ifnet *ifp = &sc->arpcom.ac_if;
1546 	uint32_t gpr, pmcs;
1547 	uint16_t pmstat;
1548 	int pmc;
1549 
1550 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1551 		/* No PME capability, PHY power down. */
1552 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1553 		    MII_BMCR, BMCR_PDOWN);
1554 		return;
1555 	}
1556 
1557 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1558 	pmcs = CSR_READ_4(sc, JME_PMCS);
1559 	pmcs &= ~PMCS_WOL_ENB_MASK;
1560 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1561 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1562 		/* Enable PME message. */
1563 		gpr |= GPREG0_PME_ENB;
1564 		/* For gigabit controllers, reset link speed to 10/100. */
1565 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1566 			jme_setlinkspeed(sc);
1567 	}
1568 
1569 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1570 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1571 
1572 	/* Request PME. */
1573 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1574 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1575 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1576 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1577 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1578 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1579 		/* No WOL, PHY power down. */
1580 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1581 		    MII_BMCR, BMCR_PDOWN);
1582 	}
1583 }
1584 #endif
1585 
1586 static int
1587 jme_suspend(device_t dev)
1588 {
1589 	struct jme_softc *sc = device_get_softc(dev);
1590 	struct ifnet *ifp = &sc->arpcom.ac_if;
1591 
1592 	ifnet_serialize_all(ifp);
1593 	jme_stop(sc);
1594 #ifdef notyet
1595 	jme_setwol(sc);
1596 #endif
1597 	ifnet_deserialize_all(ifp);
1598 
1599 	return (0);
1600 }
1601 
1602 static int
1603 jme_resume(device_t dev)
1604 {
1605 	struct jme_softc *sc = device_get_softc(dev);
1606 	struct ifnet *ifp = &sc->arpcom.ac_if;
1607 #ifdef notyet
1608 	int pmc;
1609 #endif
1610 
1611 	ifnet_serialize_all(ifp);
1612 
1613 #ifdef notyet
1614 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1615 		uint16_t pmstat;
1616 
1617 		pmstat = pci_read_config(sc->jme_dev,
1618 		    pmc + PCIR_POWER_STATUS, 2);
1619 		/* Disable PME clear PME status. */
1620 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1621 		pci_write_config(sc->jme_dev,
1622 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1623 	}
1624 #endif
1625 
1626 	if (ifp->if_flags & IFF_UP)
1627 		jme_init(sc);
1628 
1629 	ifnet_deserialize_all(ifp);
1630 
1631 	return (0);
1632 }
1633 
1634 static __inline int
1635 jme_tso_pullup(struct mbuf **mp)
1636 {
1637 	int hoff, iphlen, thoff;
1638 	struct mbuf *m;
1639 
1640 	m = *mp;
1641 	KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
1642 
1643 	iphlen = m->m_pkthdr.csum_iphlen;
1644 	thoff = m->m_pkthdr.csum_thlen;
1645 	hoff = m->m_pkthdr.csum_lhlen;
1646 
1647 	KASSERT(iphlen > 0, ("invalid ip hlen"));
1648 	KASSERT(thoff > 0, ("invalid tcp hlen"));
1649 	KASSERT(hoff > 0, ("invalid ether hlen"));
1650 
1651 	if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
1652 		m = m_pullup(m, hoff + iphlen + thoff);
1653 		if (m == NULL) {
1654 			*mp = NULL;
1655 			return ENOBUFS;
1656 		}
1657 		*mp = m;
1658 	}
1659 	return 0;
1660 }
1661 
1662 static int
1663 jme_encap(struct jme_txdata *tdata, struct mbuf **m_head, int *segs_used)
1664 {
1665 	struct jme_txdesc *txd;
1666 	struct jme_desc *desc;
1667 	struct mbuf *m;
1668 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1669 	int maxsegs, nsegs;
1670 	int error, i, prod, symbol_desc;
1671 	uint32_t cflags, flag64, mss;
1672 
1673 	M_ASSERTPKTHDR((*m_head));
1674 
1675 	if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) {
1676 		/* XXX Is this necessary? */
1677 		error = jme_tso_pullup(m_head);
1678 		if (error)
1679 			return error;
1680 	}
1681 
1682 	prod = tdata->jme_tx_prod;
1683 	txd = &tdata->jme_txdesc[prod];
1684 
1685 	if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1686 		symbol_desc = 1;
1687 	else
1688 		symbol_desc = 0;
1689 
1690 	maxsegs = (tdata->jme_tx_desc_cnt - tdata->jme_tx_cnt) -
1691 		  (JME_TXD_RSVD + symbol_desc);
1692 	if (maxsegs > JME_MAXTXSEGS)
1693 		maxsegs = JME_MAXTXSEGS;
1694 	KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc),
1695 		("not enough segments %d", maxsegs));
1696 
1697 	error = bus_dmamap_load_mbuf_defrag(tdata->jme_tx_tag,
1698 			txd->tx_dmamap, m_head,
1699 			txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1700 	if (error)
1701 		goto fail;
1702 	*segs_used += nsegs;
1703 
1704 	bus_dmamap_sync(tdata->jme_tx_tag, txd->tx_dmamap,
1705 			BUS_DMASYNC_PREWRITE);
1706 
1707 	m = *m_head;
1708 	cflags = 0;
1709 	mss = 0;
1710 
1711 	/* Configure checksum offload. */
1712 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1713 		mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT;
1714 		cflags |= JME_TD_TSO;
1715 	} else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) {
1716 		if (m->m_pkthdr.csum_flags & CSUM_IP)
1717 			cflags |= JME_TD_IPCSUM;
1718 		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1719 			cflags |= JME_TD_TCPCSUM;
1720 		if (m->m_pkthdr.csum_flags & CSUM_UDP)
1721 			cflags |= JME_TD_UDPCSUM;
1722 	}
1723 
1724 	/* Configure VLAN. */
1725 	if (m->m_flags & M_VLANTAG) {
1726 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1727 		cflags |= JME_TD_VLAN_TAG;
1728 	}
1729 
1730 	desc = &tdata->jme_tx_ring[prod];
1731 	desc->flags = htole32(cflags);
1732 	desc->addr_hi = htole32(m->m_pkthdr.len);
1733 	if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1734 		/*
1735 		 * Use 64bits TX desc chain format.
1736 		 *
1737 		 * The first TX desc of the chain, which is setup here,
1738 		 * is just a symbol TX desc carrying no payload.
1739 		 */
1740 		flag64 = JME_TD_64BIT;
1741 		desc->buflen = htole32(mss);
1742 		desc->addr_lo = 0;
1743 
1744 		*segs_used += 1;
1745 
1746 		/* No effective TX desc is consumed */
1747 		i = 0;
1748 	} else {
1749 		/*
1750 		 * Use 32bits TX desc chain format.
1751 		 *
1752 		 * The first TX desc of the chain, which is setup here,
1753 		 * is an effective TX desc carrying the first segment of
1754 		 * the mbuf chain.
1755 		 */
1756 		flag64 = 0;
1757 		desc->buflen = htole32(mss | txsegs[0].ds_len);
1758 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1759 
1760 		/* One effective TX desc is consumed */
1761 		i = 1;
1762 	}
1763 	tdata->jme_tx_cnt++;
1764 	KKASSERT(tdata->jme_tx_cnt - i < tdata->jme_tx_desc_cnt - JME_TXD_RSVD);
1765 	JME_DESC_INC(prod, tdata->jme_tx_desc_cnt);
1766 
1767 	txd->tx_ndesc = 1 - i;
1768 	for (; i < nsegs; i++) {
1769 		desc = &tdata->jme_tx_ring[prod];
1770 		desc->buflen = htole32(txsegs[i].ds_len);
1771 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1772 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1773 		desc->flags = htole32(JME_TD_OWN | flag64);
1774 
1775 		tdata->jme_tx_cnt++;
1776 		KKASSERT(tdata->jme_tx_cnt <=
1777 			 tdata->jme_tx_desc_cnt - JME_TXD_RSVD);
1778 		JME_DESC_INC(prod, tdata->jme_tx_desc_cnt);
1779 	}
1780 
1781 	/* Update producer index. */
1782 	tdata->jme_tx_prod = prod;
1783 	/*
1784 	 * Finally request interrupt and give the first descriptor
1785 	 * owenership to hardware.
1786 	 */
1787 	desc = txd->tx_desc;
1788 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1789 
1790 	txd->tx_m = m;
1791 	txd->tx_ndesc += nsegs;
1792 
1793 	return 0;
1794 fail:
1795 	m_freem(*m_head);
1796 	*m_head = NULL;
1797 	return error;
1798 }
1799 
1800 static void
1801 jme_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1802 {
1803 	struct jme_softc *sc = ifp->if_softc;
1804 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1805 	struct mbuf *m_head;
1806 	int enq = 0;
1807 
1808 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1809 	ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
1810 
1811 	if (!sc->jme_has_link) {
1812 		ifq_purge(&ifp->if_snd);
1813 		return;
1814 	}
1815 
1816 	if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1817 		return;
1818 
1819 	if (tdata->jme_tx_cnt >= JME_TX_DESC_HIWAT(tdata))
1820 		jme_txeof(tdata);
1821 
1822 	while (!ifq_is_empty(&ifp->if_snd)) {
1823 		/*
1824 		 * Check number of available TX descs, always
1825 		 * leave JME_TXD_RSVD free TX descs.
1826 		 */
1827 		if (tdata->jme_tx_cnt + JME_TXD_SPARE >
1828 		    tdata->jme_tx_desc_cnt - JME_TXD_RSVD) {
1829 			ifq_set_oactive(&ifp->if_snd);
1830 			break;
1831 		}
1832 
1833 		m_head = ifq_dequeue(&ifp->if_snd);
1834 		if (m_head == NULL)
1835 			break;
1836 
1837 		/*
1838 		 * Pack the data into the transmit ring. If we
1839 		 * don't have room, set the OACTIVE flag and wait
1840 		 * for the NIC to drain the ring.
1841 		 */
1842 		if (jme_encap(tdata, &m_head, &enq)) {
1843 			KKASSERT(m_head == NULL);
1844 			IFNET_STAT_INC(ifp, oerrors, 1);
1845 			ifq_set_oactive(&ifp->if_snd);
1846 			break;
1847 		}
1848 
1849 		if (enq >= tdata->jme_tx_wreg) {
1850 			CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr |
1851 			    TXCSR_TX_ENB | TXCSR_TXQ_N_START(TXCSR_TXQ0));
1852 			enq = 0;
1853 		}
1854 
1855 		/*
1856 		 * If there's a BPF listener, bounce a copy of this frame
1857 		 * to him.
1858 		 */
1859 		ETHER_BPF_MTAP(ifp, m_head);
1860 
1861 		/* Set a timeout in case the chip goes out to lunch. */
1862 		ifp->if_timer = JME_TX_TIMEOUT;
1863 	}
1864 
1865 	if (enq > 0) {
1866 		/*
1867 		 * Reading TXCSR takes very long time under heavy load
1868 		 * so cache TXCSR value and writes the ORed value with
1869 		 * the kick command to the TXCSR. This saves one register
1870 		 * access cycle.
1871 		 */
1872 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1873 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1874 	}
1875 }
1876 
1877 static void
1878 jme_watchdog(struct ifnet *ifp)
1879 {
1880 	struct jme_softc *sc = ifp->if_softc;
1881 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1882 
1883 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
1884 
1885 	if (!sc->jme_has_link) {
1886 		if_printf(ifp, "watchdog timeout (missed link)\n");
1887 		IFNET_STAT_INC(ifp, oerrors, 1);
1888 		jme_init(sc);
1889 		return;
1890 	}
1891 
1892 	jme_txeof(tdata);
1893 	if (tdata->jme_tx_cnt == 0) {
1894 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1895 			  "-- recovering\n");
1896 		if (!ifq_is_empty(&ifp->if_snd))
1897 			if_devstart(ifp);
1898 		return;
1899 	}
1900 
1901 	if_printf(ifp, "watchdog timeout\n");
1902 	IFNET_STAT_INC(ifp, oerrors, 1);
1903 	jme_init(sc);
1904 	if (!ifq_is_empty(&ifp->if_snd))
1905 		if_devstart(ifp);
1906 }
1907 
1908 static int
1909 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1910 {
1911 	struct jme_softc *sc = ifp->if_softc;
1912 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1913 	struct ifreq *ifr = (struct ifreq *)data;
1914 	int error = 0, mask;
1915 
1916 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
1917 
1918 	switch (cmd) {
1919 	case SIOCSIFMTU:
1920 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1921 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1922 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1923 			error = EINVAL;
1924 			break;
1925 		}
1926 
1927 		if (ifp->if_mtu != ifr->ifr_mtu) {
1928 			/*
1929 			 * No special configuration is required when interface
1930 			 * MTU is changed but availability of Tx checksum
1931 			 * offload should be chcked against new MTU size as
1932 			 * FIFO size is just 2K.
1933 			 */
1934 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1935 				ifp->if_capenable &=
1936 				    ~(IFCAP_TXCSUM | IFCAP_TSO);
1937 				ifp->if_hwassist &=
1938 				    ~(JME_CSUM_FEATURES | CSUM_TSO);
1939 			}
1940 			ifp->if_mtu = ifr->ifr_mtu;
1941 			if (ifp->if_flags & IFF_RUNNING)
1942 				jme_init(sc);
1943 		}
1944 		break;
1945 
1946 	case SIOCSIFFLAGS:
1947 		if (ifp->if_flags & IFF_UP) {
1948 			if (ifp->if_flags & IFF_RUNNING) {
1949 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1950 				    (IFF_PROMISC | IFF_ALLMULTI))
1951 					jme_set_filter(sc);
1952 			} else {
1953 				jme_init(sc);
1954 			}
1955 		} else {
1956 			if (ifp->if_flags & IFF_RUNNING)
1957 				jme_stop(sc);
1958 		}
1959 		sc->jme_if_flags = ifp->if_flags;
1960 		break;
1961 
1962 	case SIOCADDMULTI:
1963 	case SIOCDELMULTI:
1964 		if (ifp->if_flags & IFF_RUNNING)
1965 			jme_set_filter(sc);
1966 		break;
1967 
1968 	case SIOCSIFMEDIA:
1969 	case SIOCGIFMEDIA:
1970 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1971 		break;
1972 
1973 	case SIOCSIFCAP:
1974 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1975 
1976 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1977 			ifp->if_capenable ^= IFCAP_TXCSUM;
1978 			if (ifp->if_capenable & IFCAP_TXCSUM)
1979 				ifp->if_hwassist |= JME_CSUM_FEATURES;
1980 			else
1981 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1982 		}
1983 		if (mask & IFCAP_RXCSUM) {
1984 			uint32_t reg;
1985 
1986 			ifp->if_capenable ^= IFCAP_RXCSUM;
1987 			reg = CSR_READ_4(sc, JME_RXMAC);
1988 			reg &= ~RXMAC_CSUM_ENB;
1989 			if (ifp->if_capenable & IFCAP_RXCSUM)
1990 				reg |= RXMAC_CSUM_ENB;
1991 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1992 		}
1993 
1994 		if (mask & IFCAP_VLAN_HWTAGGING) {
1995 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1996 			jme_set_vlan(sc);
1997 		}
1998 
1999 		if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
2000 			ifp->if_capenable ^= IFCAP_TSO;
2001 			if (ifp->if_capenable & IFCAP_TSO)
2002 				ifp->if_hwassist |= CSUM_TSO;
2003 			else
2004 				ifp->if_hwassist &= ~CSUM_TSO;
2005 		}
2006 
2007 		if (mask & IFCAP_RSS)
2008 			ifp->if_capenable ^= IFCAP_RSS;
2009 		break;
2010 
2011 	default:
2012 		error = ether_ioctl(ifp, cmd, data);
2013 		break;
2014 	}
2015 	return (error);
2016 }
2017 
2018 static void
2019 jme_mac_config(struct jme_softc *sc)
2020 {
2021 	struct mii_data *mii;
2022 	uint32_t ghc, rxmac, txmac, txpause, gp1;
2023 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
2024 
2025 	mii = device_get_softc(sc->jme_miibus);
2026 
2027 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2028 	DELAY(10);
2029 	CSR_WRITE_4(sc, JME_GHC, 0);
2030 	ghc = 0;
2031 	rxmac = CSR_READ_4(sc, JME_RXMAC);
2032 	rxmac &= ~RXMAC_FC_ENB;
2033 	txmac = CSR_READ_4(sc, JME_TXMAC);
2034 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2035 	txpause = CSR_READ_4(sc, JME_TXPFC);
2036 	txpause &= ~TXPFC_PAUSE_ENB;
2037 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2038 		ghc |= GHC_FULL_DUPLEX;
2039 		rxmac &= ~RXMAC_COLL_DET_ENB;
2040 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2041 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2042 		    TXMAC_FRAME_BURST);
2043 #ifdef notyet
2044 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2045 			txpause |= TXPFC_PAUSE_ENB;
2046 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2047 			rxmac |= RXMAC_FC_ENB;
2048 #endif
2049 		/* Disable retry transmit timer/retry limit. */
2050 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2051 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2052 	} else {
2053 		rxmac |= RXMAC_COLL_DET_ENB;
2054 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2055 		/* Enable retry transmit timer/retry limit. */
2056 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2057 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2058 	}
2059 
2060 	/*
2061 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
2062 	 */
2063 	gp1 = CSR_READ_4(sc, JME_GPREG1);
2064 	gp1 &= ~GPREG1_WA_HDX;
2065 
2066 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2067 		hdx = 1;
2068 
2069 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
2070 	case IFM_10_T:
2071 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
2072 		if (hdx)
2073 			gp1 |= GPREG1_WA_HDX;
2074 		break;
2075 
2076 	case IFM_100_TX:
2077 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
2078 		if (hdx)
2079 			gp1 |= GPREG1_WA_HDX;
2080 
2081 		/*
2082 		 * Use extended FIFO depth to workaround CRC errors
2083 		 * emitted by chips before JMC250B
2084 		 */
2085 		phyconf = JMPHY_CONF_EXTFIFO;
2086 		break;
2087 
2088 	case IFM_1000_T:
2089 		if (sc->jme_caps & JME_CAP_FASTETH)
2090 			break;
2091 
2092 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
2093 		if (hdx)
2094 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2095 		break;
2096 
2097 	default:
2098 		break;
2099 	}
2100 	CSR_WRITE_4(sc, JME_GHC, ghc);
2101 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2102 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
2103 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
2104 
2105 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
2106 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2107 				    JMPHY_CONF, phyconf);
2108 	}
2109 	if (sc->jme_workaround & JME_WA_HDX)
2110 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
2111 }
2112 
2113 static void
2114 jme_intr(void *xsc)
2115 {
2116 	struct jme_softc *sc = xsc;
2117 	struct ifnet *ifp = &sc->arpcom.ac_if;
2118 	uint32_t status;
2119 	int r;
2120 
2121 	ASSERT_SERIALIZED(&sc->jme_serialize);
2122 
2123 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2124 	if (status == 0 || status == 0xFFFFFFFF)
2125 		return;
2126 
2127 	/* Disable interrupts. */
2128 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2129 
2130 	status = CSR_READ_4(sc, JME_INTR_STATUS);
2131 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2132 		goto back;
2133 
2134 	/* Reset PCC counter/timer and Ack interrupts. */
2135 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2136 
2137 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2138 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2139 
2140 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2141 		if (status & jme_rx_status[r].jme_coal) {
2142 			status |= jme_rx_status[r].jme_coal |
2143 				  jme_rx_status[r].jme_comp;
2144 		}
2145 	}
2146 
2147 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2148 
2149 	if (ifp->if_flags & IFF_RUNNING) {
2150 		struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
2151 
2152 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2153 			jme_rx_intr(sc, status);
2154 
2155 		if (status & INTR_RXQ_DESC_EMPTY) {
2156 			/*
2157 			 * Notify hardware availability of new Rx buffers.
2158 			 * Reading RXCSR takes very long time under heavy
2159 			 * load so cache RXCSR value and writes the ORed
2160 			 * value with the kick command to the RXCSR. This
2161 			 * saves one register access cycle.
2162 			 */
2163 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2164 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2165 		}
2166 
2167 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2168 			lwkt_serialize_enter(&tdata->jme_tx_serialize);
2169 			jme_txeof(tdata);
2170 			if (!ifq_is_empty(&ifp->if_snd))
2171 				if_devstart(ifp);
2172 			lwkt_serialize_exit(&tdata->jme_tx_serialize);
2173 		}
2174 	}
2175 back:
2176 	/* Reenable interrupts. */
2177 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2178 }
2179 
2180 static void
2181 jme_txeof(struct jme_txdata *tdata)
2182 {
2183 	struct ifnet *ifp = &tdata->jme_sc->arpcom.ac_if;
2184 	int cons;
2185 
2186 	cons = tdata->jme_tx_cons;
2187 	if (cons == tdata->jme_tx_prod)
2188 		return;
2189 
2190 	/*
2191 	 * Go through our Tx list and free mbufs for those
2192 	 * frames which have been transmitted.
2193 	 */
2194 	while (cons != tdata->jme_tx_prod) {
2195 		struct jme_txdesc *txd, *next_txd;
2196 		uint32_t status, next_status;
2197 		int next_cons, nsegs;
2198 
2199 		txd = &tdata->jme_txdesc[cons];
2200 		KASSERT(txd->tx_m != NULL,
2201 			("%s: freeing NULL mbuf!", __func__));
2202 
2203 		status = le32toh(txd->tx_desc->flags);
2204 		if ((status & JME_TD_OWN) == JME_TD_OWN)
2205 			break;
2206 
2207 		/*
2208 		 * NOTE:
2209 		 * This chip will always update the TX descriptor's
2210 		 * buflen field and this updating always happens
2211 		 * after clearing the OWN bit, so even if the OWN
2212 		 * bit is cleared by the chip, we still don't sure
2213 		 * about whether the buflen field has been updated
2214 		 * by the chip or not.  To avoid this race, we wait
2215 		 * for the next TX descriptor's OWN bit to be cleared
2216 		 * by the chip before reusing this TX descriptor.
2217 		 */
2218 		next_cons = cons;
2219 		JME_DESC_ADD(next_cons, txd->tx_ndesc, tdata->jme_tx_desc_cnt);
2220 		next_txd = &tdata->jme_txdesc[next_cons];
2221 		if (next_txd->tx_m == NULL)
2222 			break;
2223 		next_status = le32toh(next_txd->tx_desc->flags);
2224 		if ((next_status & JME_TD_OWN) == JME_TD_OWN)
2225 			break;
2226 
2227 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2228 			IFNET_STAT_INC(ifp, oerrors, 1);
2229 		} else {
2230 			IFNET_STAT_INC(ifp, opackets, 1);
2231 			if (status & JME_TD_COLLISION) {
2232 				IFNET_STAT_INC(ifp, collisions,
2233 				    le32toh(txd->tx_desc->buflen) &
2234 				    JME_TD_BUF_LEN_MASK);
2235 			}
2236 		}
2237 
2238 		/*
2239 		 * Only the first descriptor of multi-descriptor
2240 		 * transmission is updated so driver have to skip entire
2241 		 * chained buffers for the transmiited frame. In other
2242 		 * words, JME_TD_OWN bit is valid only at the first
2243 		 * descriptor of a multi-descriptor transmission.
2244 		 */
2245 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2246 			tdata->jme_tx_ring[cons].flags = 0;
2247 			JME_DESC_INC(cons, tdata->jme_tx_desc_cnt);
2248 		}
2249 
2250 		/* Reclaim transferred mbufs. */
2251 		bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap);
2252 		m_freem(txd->tx_m);
2253 		txd->tx_m = NULL;
2254 		tdata->jme_tx_cnt -= txd->tx_ndesc;
2255 		KASSERT(tdata->jme_tx_cnt >= 0,
2256 			("%s: Active Tx desc counter was garbled", __func__));
2257 		txd->tx_ndesc = 0;
2258 	}
2259 	tdata->jme_tx_cons = cons;
2260 
2261 	/* 1 for symbol TX descriptor */
2262 	if (tdata->jme_tx_cnt <= JME_MAXTXSEGS + 1)
2263 		ifp->if_timer = 0;
2264 
2265 	if (tdata->jme_tx_cnt + JME_TXD_SPARE <=
2266 	    tdata->jme_tx_desc_cnt - JME_TXD_RSVD)
2267 		ifq_clr_oactive(&ifp->if_snd);
2268 }
2269 
2270 static __inline void
2271 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2272 {
2273 	int i;
2274 
2275 	for (i = 0; i < count; ++i) {
2276 		jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
2277 		JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2278 	}
2279 }
2280 
2281 static __inline struct pktinfo *
2282 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2283 {
2284 	if (flags & JME_RD_IPV4)
2285 		pi->pi_netisr = NETISR_IP;
2286 	else if (flags & JME_RD_IPV6)
2287 		pi->pi_netisr = NETISR_IPV6;
2288 	else
2289 		return NULL;
2290 
2291 	pi->pi_flags = 0;
2292 	pi->pi_l3proto = IPPROTO_UNKNOWN;
2293 
2294 	if (flags & JME_RD_MORE_FRAG)
2295 		pi->pi_flags |= PKTINFO_FLAG_FRAG;
2296 	else if (flags & JME_RD_TCP)
2297 		pi->pi_l3proto = IPPROTO_TCP;
2298 	else if (flags & JME_RD_UDP)
2299 		pi->pi_l3proto = IPPROTO_UDP;
2300 	else
2301 		pi = NULL;
2302 	return pi;
2303 }
2304 
2305 /* Receive a frame. */
2306 static void
2307 jme_rxpkt(struct jme_rxdata *rdata, int cpuid)
2308 {
2309 	struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2310 	struct jme_desc *desc;
2311 	struct jme_rxdesc *rxd;
2312 	struct mbuf *mp, *m;
2313 	uint32_t flags, status, hash, hashinfo;
2314 	int cons, count, nsegs;
2315 
2316 	cons = rdata->jme_rx_cons;
2317 	desc = &rdata->jme_rx_ring[cons];
2318 
2319 	flags = le32toh(desc->flags);
2320 	status = le32toh(desc->buflen);
2321 	hash = le32toh(desc->addr_hi);
2322 	hashinfo = le32toh(desc->addr_lo);
2323 	nsegs = JME_RX_NSEGS(status);
2324 
2325 	if (nsegs > 1) {
2326 		/* Skip the first descriptor. */
2327 		JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2328 
2329 		/*
2330 		 * Clear the OWN bit of the following RX descriptors;
2331 		 * hardware will not clear the OWN bit except the first
2332 		 * RX descriptor.
2333 		 *
2334 		 * Since the first RX descriptor is setup, i.e. OWN bit
2335 		 * on, before its followins RX descriptors, leaving the
2336 		 * OWN bit on the following RX descriptors will trick
2337 		 * the hardware into thinking that the following RX
2338 		 * descriptors are ready to be used too.
2339 		 */
2340 		for (count = 1; count < nsegs; count++,
2341 		     JME_DESC_INC(cons, rdata->jme_rx_desc_cnt))
2342 			rdata->jme_rx_ring[cons].flags = 0;
2343 
2344 		cons = rdata->jme_rx_cons;
2345 	}
2346 
2347 	JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2348 			"hash 0x%08x, hash info 0x%08x\n",
2349 			rdata->jme_rx_idx, flags, hash, hashinfo);
2350 
2351 	if (status & JME_RX_ERR_STAT) {
2352 		IFNET_STAT_INC(ifp, ierrors, 1);
2353 		jme_discard_rxbufs(rdata, cons, nsegs);
2354 #ifdef JME_SHOW_ERRORS
2355 		if_printf(ifp, "%s : receive error = 0x%pb%i\n",
2356 		    __func__, JME_RX_ERR_BITS, JME_RX_ERR(status));
2357 #endif
2358 		rdata->jme_rx_cons += nsegs;
2359 		rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2360 		return;
2361 	}
2362 
2363 	rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2364 	for (count = 0; count < nsegs; count++,
2365 	     JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2366 		rxd = &rdata->jme_rxdesc[cons];
2367 		mp = rxd->rx_m;
2368 
2369 		/* Add a new receive buffer to the ring. */
2370 		if (jme_newbuf(rdata, rxd, 0) != 0) {
2371 			IFNET_STAT_INC(ifp, iqdrops, 1);
2372 			/* Reuse buffer. */
2373 			jme_discard_rxbufs(rdata, cons, nsegs - count);
2374 			if (rdata->jme_rxhead != NULL) {
2375 				m_freem(rdata->jme_rxhead);
2376 				JME_RXCHAIN_RESET(rdata);
2377 			}
2378 			break;
2379 		}
2380 
2381 		/*
2382 		 * Assume we've received a full sized frame.
2383 		 * Actual size is fixed when we encounter the end of
2384 		 * multi-segmented frame.
2385 		 */
2386 		mp->m_len = MCLBYTES;
2387 
2388 		/* Chain received mbufs. */
2389 		if (rdata->jme_rxhead == NULL) {
2390 			rdata->jme_rxhead = mp;
2391 			rdata->jme_rxtail = mp;
2392 		} else {
2393 			/*
2394 			 * Receive processor can receive a maximum frame
2395 			 * size of 65535 bytes.
2396 			 */
2397 			rdata->jme_rxtail->m_next = mp;
2398 			rdata->jme_rxtail = mp;
2399 		}
2400 
2401 		if (count == nsegs - 1) {
2402 			struct pktinfo pi0, *pi;
2403 
2404 			/* Last desc. for this frame. */
2405 			m = rdata->jme_rxhead;
2406 			m->m_pkthdr.len = rdata->jme_rxlen;
2407 			if (nsegs > 1) {
2408 				/* Set first mbuf size. */
2409 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2410 				/* Set last mbuf size. */
2411 				mp->m_len = rdata->jme_rxlen -
2412 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2413 				    (MCLBYTES * (nsegs - 2)));
2414 			} else {
2415 				m->m_len = rdata->jme_rxlen;
2416 			}
2417 			m->m_pkthdr.rcvif = ifp;
2418 
2419 			/*
2420 			 * Account for 10bytes auto padding which is used
2421 			 * to align IP header on 32bit boundary. Also note,
2422 			 * CRC bytes is automatically removed by the
2423 			 * hardware.
2424 			 */
2425 			m->m_data += JME_RX_PAD_BYTES;
2426 
2427 			/* Set checksum information. */
2428 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2429 			    (flags & JME_RD_IPV4)) {
2430 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2431 				if (flags & JME_RD_IPCSUM)
2432 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2433 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2434 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2435 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2436 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2437 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2438 					m->m_pkthdr.csum_flags |=
2439 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2440 					m->m_pkthdr.csum_data = 0xffff;
2441 				}
2442 			}
2443 
2444 			/* Check for VLAN tagged packets. */
2445 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2446 			    (flags & JME_RD_VLAN_TAG)) {
2447 				m->m_pkthdr.ether_vlantag =
2448 				    flags & JME_RD_VLAN_MASK;
2449 				m->m_flags |= M_VLANTAG;
2450 			}
2451 
2452 			IFNET_STAT_INC(ifp, ipackets, 1);
2453 
2454 			if (ifp->if_capenable & IFCAP_RSS)
2455 				pi = jme_pktinfo(&pi0, flags);
2456 			else
2457 				pi = NULL;
2458 
2459 			if (pi != NULL &&
2460 			    (hashinfo & JME_RD_HASH_FN_MASK) ==
2461 			    JME_RD_HASH_FN_TOEPLITZ) {
2462 				m_sethash(m, toeplitz_hash(hash));
2463 				m->m_flags |= M_CKHASH;
2464 			}
2465 
2466 #ifdef JME_RSS_DEBUG
2467 			if (pi != NULL) {
2468 				JME_RSS_DPRINTF(rdata->jme_sc, 10,
2469 				    "isr %d flags %08x, l3 %d %s\n",
2470 				    pi->pi_netisr, pi->pi_flags,
2471 				    pi->pi_l3proto,
2472 				    (m->m_flags & M_HASH) ? "hash" : "");
2473 			}
2474 #endif
2475 
2476 			/* Pass it on. */
2477 			ifp->if_input(ifp, m, pi, cpuid);
2478 
2479 			/* Reset mbuf chains. */
2480 			JME_RXCHAIN_RESET(rdata);
2481 #ifdef JME_RSS_DEBUG
2482 			rdata->jme_rx_pkt++;
2483 #endif
2484 		}
2485 	}
2486 
2487 	rdata->jme_rx_cons += nsegs;
2488 	rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2489 }
2490 
2491 static void
2492 jme_rxeof(struct jme_rxdata *rdata, int count, int cpuid)
2493 {
2494 	struct jme_desc *desc;
2495 	int nsegs, pktlen;
2496 
2497 	for (;;) {
2498 #ifdef IFPOLL_ENABLE
2499 		if (count >= 0 && count-- == 0)
2500 			break;
2501 #endif
2502 		desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2503 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2504 			break;
2505 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2506 			break;
2507 
2508 		/*
2509 		 * Check number of segments against received bytes.
2510 		 * Non-matching value would indicate that hardware
2511 		 * is still trying to update Rx descriptors. I'm not
2512 		 * sure whether this check is needed.
2513 		 */
2514 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2515 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2516 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2517 			if_printf(&rdata->jme_sc->arpcom.ac_if,
2518 			    "RX fragment count(%d) and "
2519 			    "packet size(%d) mismach\n", nsegs, pktlen);
2520 			break;
2521 		}
2522 
2523 		/*
2524 		 * NOTE:
2525 		 * RSS hash and hash information may _not_ be set by the
2526 		 * hardware even if the OWN bit is cleared and VALID bit
2527 		 * is set.
2528 		 *
2529 		 * If the RSS information is not delivered by the hardware
2530 		 * yet, we MUST NOT accept this packet, let alone reusing
2531 		 * its RX descriptor.  If this packet was accepted and its
2532 		 * RX descriptor was reused before hardware delivering the
2533 		 * RSS information, the RX buffer's address would be trashed
2534 		 * by the RSS information delivered by the hardware.
2535 		 */
2536 		if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
2537 			struct jme_rxdesc *rxd;
2538 			uint32_t hashinfo;
2539 
2540 			hashinfo = le32toh(desc->addr_lo);
2541 			rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons];
2542 
2543 			/*
2544 			 * This test should be enough to detect the pending
2545 			 * RSS information delivery, given:
2546 			 * - If RSS hash is not calculated, the hashinfo
2547 			 *   will be 0.  Howvever, the lower 32bits of RX
2548 			 *   buffers' physical address will never be 0.
2549 			 *   (see jme_rxbuf_dma_filter)
2550 			 * - If RSS hash is calculated, the lowest 4 bits
2551 			 *   of hashinfo will be set, while the RX buffers
2552 			 *   are at least 2K aligned.
2553 			 */
2554 			if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) {
2555 #ifdef JME_SHOW_RSSWB
2556 				if_printf(&rdata->jme_sc->arpcom.ac_if,
2557 				    "RSS is not written back yet\n");
2558 #endif
2559 				break;
2560 			}
2561 		}
2562 
2563 		/* Received a frame. */
2564 		jme_rxpkt(rdata, cpuid);
2565 	}
2566 }
2567 
2568 static void
2569 jme_tick(void *xsc)
2570 {
2571 	struct jme_softc *sc = xsc;
2572 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2573 
2574 	lwkt_serialize_enter(&sc->jme_serialize);
2575 
2576 	KKASSERT(mycpuid == JME_TICK_CPUID);
2577 
2578 	sc->jme_in_tick = TRUE;
2579 	mii_tick(mii);
2580 	sc->jme_in_tick = FALSE;
2581 
2582 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2583 
2584 	lwkt_serialize_exit(&sc->jme_serialize);
2585 }
2586 
2587 static void
2588 jme_reset(struct jme_softc *sc)
2589 {
2590 	uint32_t val;
2591 
2592 	/* Make sure that TX and RX are stopped */
2593 	jme_stop_tx(sc);
2594 	jme_stop_rx(sc);
2595 
2596 	/* Start reset */
2597 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2598 	DELAY(20);
2599 
2600 	/*
2601 	 * Hold reset bit before stop reset
2602 	 */
2603 
2604 	/* Disable TXMAC and TXOFL clock sources */
2605 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2606 	/* Disable RXMAC clock source */
2607 	val = CSR_READ_4(sc, JME_GPREG1);
2608 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2609 	/* Flush */
2610 	CSR_READ_4(sc, JME_GHC);
2611 
2612 	/* Stop reset */
2613 	CSR_WRITE_4(sc, JME_GHC, 0);
2614 	/* Flush */
2615 	CSR_READ_4(sc, JME_GHC);
2616 
2617 	/*
2618 	 * Clear reset bit after stop reset
2619 	 */
2620 
2621 	/* Enable TXMAC and TXOFL clock sources */
2622 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2623 	/* Enable RXMAC clock source */
2624 	val = CSR_READ_4(sc, JME_GPREG1);
2625 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2626 	/* Flush */
2627 	CSR_READ_4(sc, JME_GHC);
2628 
2629 	/* Disable TXMAC and TXOFL clock sources */
2630 	CSR_WRITE_4(sc, JME_GHC, 0);
2631 	/* Disable RXMAC clock source */
2632 	val = CSR_READ_4(sc, JME_GPREG1);
2633 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2634 	/* Flush */
2635 	CSR_READ_4(sc, JME_GHC);
2636 
2637 	/* Enable TX and RX */
2638 	val = CSR_READ_4(sc, JME_TXCSR);
2639 	CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2640 	val = CSR_READ_4(sc, JME_RXCSR);
2641 	CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2642 	/* Flush */
2643 	CSR_READ_4(sc, JME_TXCSR);
2644 	CSR_READ_4(sc, JME_RXCSR);
2645 
2646 	/* Enable TXMAC and TXOFL clock sources */
2647 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2648 	/* Disable RXMAC clock source */
2649 	val = CSR_READ_4(sc, JME_GPREG1);
2650 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2651 	/* Flush */
2652 	CSR_READ_4(sc, JME_GHC);
2653 
2654 	/* Stop TX and RX */
2655 	jme_stop_tx(sc);
2656 	jme_stop_rx(sc);
2657 }
2658 
2659 static void
2660 jme_init(void *xsc)
2661 {
2662 	struct jme_softc *sc = xsc;
2663 	struct ifnet *ifp = &sc->arpcom.ac_if;
2664 	struct mii_data *mii;
2665 	uint8_t eaddr[ETHER_ADDR_LEN];
2666 	bus_addr_t paddr;
2667 	uint32_t reg;
2668 	int error, r;
2669 
2670 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
2671 
2672 	/*
2673 	 * Cancel any pending I/O.
2674 	 */
2675 	jme_stop(sc);
2676 
2677 	/*
2678 	 * Reset the chip to a known state.
2679 	 */
2680 	jme_reset(sc);
2681 
2682 	/*
2683 	 * Setup MSI/MSI-X vectors to interrupts mapping
2684 	 */
2685 	jme_set_msinum(sc);
2686 
2687 	if (JME_ENABLE_HWRSS(sc))
2688 		jme_enable_rss(sc);
2689 	else
2690 		jme_disable_rss(sc);
2691 
2692 	/* Init RX descriptors */
2693 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2694 		error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2695 		if (error) {
2696 			if_printf(ifp, "initialization failed: "
2697 				  "no memory for %dth RX ring.\n", r);
2698 			jme_stop(sc);
2699 			return;
2700 		}
2701 	}
2702 
2703 	/* Init TX descriptors */
2704 	jme_init_tx_ring(&sc->jme_cdata.jme_tx_data);
2705 
2706 	/* Initialize shadow status block. */
2707 	jme_init_ssb(sc);
2708 
2709 	/* Reprogram the station address. */
2710 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2711 	CSR_WRITE_4(sc, JME_PAR0,
2712 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2713 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2714 
2715 	/*
2716 	 * Configure Tx queue.
2717 	 *  Tx priority queue weight value : 0
2718 	 *  Tx FIFO threshold for processing next packet : 16QW
2719 	 *  Maximum Tx DMA length : 512
2720 	 *  Allow Tx DMA burst.
2721 	 */
2722 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2723 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2724 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2725 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2726 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2727 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2728 
2729 	/* Set Tx descriptor counter. */
2730 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt);
2731 
2732 	/* Set Tx ring address to the hardware. */
2733 	paddr = sc->jme_cdata.jme_tx_data.jme_tx_ring_paddr;
2734 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2735 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2736 
2737 	/* Configure TxMAC parameters. */
2738 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2739 	reg |= TXMAC_THRESH_1_PKT;
2740 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2741 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2742 
2743 	/*
2744 	 * Configure Rx queue.
2745 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2746 	 *  FIFO threshold for processing next packet : 128QW
2747 	 *  Rx queue 0 select
2748 	 *  Max Rx DMA length : 128
2749 	 *  Rx descriptor retry : 32
2750 	 *  Rx descriptor retry time gap : 256ns
2751 	 *  Don't receive runt/bad frame.
2752 	 */
2753 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2754 #if 0
2755 	/*
2756 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2757 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2758 	 * decrease FIFO threshold to reduce the FIFO overruns for
2759 	 * frames larger than 4000 bytes.
2760 	 * For best performance of standard MTU sized frames use
2761 	 * maximum allowable FIFO threshold, 128QW.
2762 	 */
2763 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2764 	    JME_RX_FIFO_SIZE)
2765 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2766 	else
2767 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2768 #else
2769 	/* Improve PCI Express compatibility */
2770 	sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2771 #endif
2772 	sc->jme_rxcsr |= sc->jme_rx_dma_size;
2773 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2774 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2775 	/* XXX TODO DROP_BAD */
2776 
2777 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2778 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2779 
2780 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2781 
2782 		/* Set Rx descriptor counter. */
2783 		CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2784 
2785 		/* Set Rx ring address to the hardware. */
2786 		paddr = rdata->jme_rx_ring_paddr;
2787 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2788 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2789 	}
2790 
2791 	/* Clear receive filter. */
2792 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2793 
2794 	/* Set up the receive filter. */
2795 	jme_set_filter(sc);
2796 	jme_set_vlan(sc);
2797 
2798 	/*
2799 	 * Disable all WOL bits as WOL can interfere normal Rx
2800 	 * operation. Also clear WOL detection status bits.
2801 	 */
2802 	reg = CSR_READ_4(sc, JME_PMCS);
2803 	reg &= ~PMCS_WOL_ENB_MASK;
2804 	CSR_WRITE_4(sc, JME_PMCS, reg);
2805 
2806 	/*
2807 	 * Pad 10bytes right before received frame. This will greatly
2808 	 * help Rx performance on strict-alignment architectures as
2809 	 * it does not need to copy the frame to align the payload.
2810 	 */
2811 	reg = CSR_READ_4(sc, JME_RXMAC);
2812 	reg |= RXMAC_PAD_10BYTES;
2813 
2814 	if (ifp->if_capenable & IFCAP_RXCSUM)
2815 		reg |= RXMAC_CSUM_ENB;
2816 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2817 
2818 	/* Configure general purpose reg0 */
2819 	reg = CSR_READ_4(sc, JME_GPREG0);
2820 	reg &= ~GPREG0_PCC_UNIT_MASK;
2821 	/* Set PCC timer resolution to micro-seconds unit. */
2822 	reg |= GPREG0_PCC_UNIT_US;
2823 	/*
2824 	 * Disable all shadow register posting as we have to read
2825 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2826 	 * that it's hard to synchronize interrupt status between
2827 	 * hardware and software with shadow posting due to
2828 	 * requirements of bus_dmamap_sync(9).
2829 	 */
2830 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2831 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2832 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2833 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2834 	/* Disable posting of DW0. */
2835 	reg &= ~GPREG0_POST_DW0_ENB;
2836 	/* Clear PME message. */
2837 	reg &= ~GPREG0_PME_ENB;
2838 	/* Set PHY address. */
2839 	reg &= ~GPREG0_PHY_ADDR_MASK;
2840 	reg |= sc->jme_phyaddr;
2841 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2842 
2843 	/* Configure Tx queue 0 packet completion coalescing. */
2844 	jme_set_tx_coal(sc);
2845 
2846 	/* Configure Rx queues packet completion coalescing. */
2847 	jme_set_rx_coal(sc);
2848 
2849 	/* Configure shadow status block but don't enable posting. */
2850 	paddr = sc->jme_cdata.jme_ssb_block_paddr;
2851 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2852 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2853 
2854 	/* Disable Timer 1 and Timer 2. */
2855 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2856 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2857 
2858 	/* Configure retry transmit period, retry limit value. */
2859 	CSR_WRITE_4(sc, JME_TXTRHD,
2860 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2861 	    TXTRHD_RT_PERIOD_MASK) |
2862 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2863 	    TXTRHD_RT_LIMIT_SHIFT));
2864 
2865 #ifdef IFPOLL_ENABLE
2866 	if (!(ifp->if_flags & IFF_NPOLLING))
2867 #endif
2868 	/* Initialize the interrupt mask. */
2869 	jme_enable_intr(sc);
2870 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2871 
2872 	/*
2873 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2874 	 * done after detection of valid link in jme_miibus_statchg.
2875 	 */
2876 	sc->jme_has_link = FALSE;
2877 
2878 	jme_phy_init(sc);
2879 
2880 	/* Set the current media. */
2881 	mii = device_get_softc(sc->jme_miibus);
2882 	mii_mediachg(mii);
2883 
2884 	callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc,
2885 	    JME_TICK_CPUID);
2886 
2887 	ifp->if_flags |= IFF_RUNNING;
2888 	ifq_clr_oactive(&ifp->if_snd);
2889 }
2890 
2891 static void
2892 jme_stop(struct jme_softc *sc)
2893 {
2894 	struct ifnet *ifp = &sc->arpcom.ac_if;
2895 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
2896 	struct jme_txdesc *txd;
2897 	struct jme_rxdesc *rxd;
2898 	struct jme_rxdata *rdata;
2899 	int i, r;
2900 
2901 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
2902 
2903 	/*
2904 	 * Mark the interface down and cancel the watchdog timer.
2905 	 */
2906 	ifp->if_flags &= ~IFF_RUNNING;
2907 	ifq_clr_oactive(&ifp->if_snd);
2908 	ifp->if_timer = 0;
2909 
2910 	callout_stop(&sc->jme_tick_ch);
2911 	sc->jme_has_link = FALSE;
2912 
2913 	/*
2914 	 * Disable interrupts.
2915 	 */
2916 	jme_disable_intr(sc);
2917 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2918 
2919 	/* Disable updating shadow status block. */
2920 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2921 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2922 
2923 	/* Stop receiver, transmitter. */
2924 	jme_stop_rx(sc);
2925 	jme_stop_tx(sc);
2926 
2927 	/*
2928 	 * Free partial finished RX segments
2929 	 */
2930 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2931 		rdata = &sc->jme_cdata.jme_rx_data[r];
2932 		if (rdata->jme_rxhead != NULL)
2933 			m_freem(rdata->jme_rxhead);
2934 		JME_RXCHAIN_RESET(rdata);
2935 	}
2936 
2937 	/*
2938 	 * Free RX and TX mbufs still in the queues.
2939 	 */
2940 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2941 		rdata = &sc->jme_cdata.jme_rx_data[r];
2942 		for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2943 			rxd = &rdata->jme_rxdesc[i];
2944 			if (rxd->rx_m != NULL) {
2945 				bus_dmamap_unload(rdata->jme_rx_tag,
2946 						  rxd->rx_dmamap);
2947 				m_freem(rxd->rx_m);
2948 				rxd->rx_m = NULL;
2949 			}
2950 		}
2951 	}
2952 	for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
2953 		txd = &tdata->jme_txdesc[i];
2954 		if (txd->tx_m != NULL) {
2955 			bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap);
2956 			m_freem(txd->tx_m);
2957 			txd->tx_m = NULL;
2958 			txd->tx_ndesc = 0;
2959 		}
2960         }
2961 }
2962 
2963 static void
2964 jme_stop_tx(struct jme_softc *sc)
2965 {
2966 	uint32_t reg;
2967 	int i;
2968 
2969 	reg = CSR_READ_4(sc, JME_TXCSR);
2970 	if ((reg & TXCSR_TX_ENB) == 0)
2971 		return;
2972 	reg &= ~TXCSR_TX_ENB;
2973 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2974 	for (i = JME_TIMEOUT; i > 0; i--) {
2975 		DELAY(1);
2976 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2977 			break;
2978 	}
2979 	if (i == 0)
2980 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2981 }
2982 
2983 static void
2984 jme_stop_rx(struct jme_softc *sc)
2985 {
2986 	uint32_t reg;
2987 	int i;
2988 
2989 	reg = CSR_READ_4(sc, JME_RXCSR);
2990 	if ((reg & RXCSR_RX_ENB) == 0)
2991 		return;
2992 	reg &= ~RXCSR_RX_ENB;
2993 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2994 	for (i = JME_TIMEOUT; i > 0; i--) {
2995 		DELAY(1);
2996 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2997 			break;
2998 	}
2999 	if (i == 0)
3000 		device_printf(sc->jme_dev, "stopping receiver timeout!\n");
3001 }
3002 
3003 static void
3004 jme_init_tx_ring(struct jme_txdata *tdata)
3005 {
3006 	struct jme_txdesc *txd;
3007 	int i;
3008 
3009 	tdata->jme_tx_prod = 0;
3010 	tdata->jme_tx_cons = 0;
3011 	tdata->jme_tx_cnt = 0;
3012 
3013 	bzero(tdata->jme_tx_ring, JME_TX_RING_SIZE(tdata));
3014 	for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
3015 		txd = &tdata->jme_txdesc[i];
3016 		txd->tx_m = NULL;
3017 		txd->tx_desc = &tdata->jme_tx_ring[i];
3018 		txd->tx_ndesc = 0;
3019 	}
3020 }
3021 
3022 static void
3023 jme_init_ssb(struct jme_softc *sc)
3024 {
3025 	struct jme_chain_data *cd;
3026 
3027 	cd = &sc->jme_cdata;
3028 	bzero(cd->jme_ssb_block, JME_SSB_SIZE);
3029 }
3030 
3031 static int
3032 jme_init_rx_ring(struct jme_rxdata *rdata)
3033 {
3034 	struct jme_rxdesc *rxd;
3035 	int i;
3036 
3037 	KKASSERT(rdata->jme_rxhead == NULL &&
3038 		 rdata->jme_rxtail == NULL &&
3039 		 rdata->jme_rxlen == 0);
3040 	rdata->jme_rx_cons = 0;
3041 
3042 	bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
3043 	for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3044 		int error;
3045 
3046 		rxd = &rdata->jme_rxdesc[i];
3047 		rxd->rx_m = NULL;
3048 		rxd->rx_desc = &rdata->jme_rx_ring[i];
3049 		error = jme_newbuf(rdata, rxd, 1);
3050 		if (error)
3051 			return error;
3052 	}
3053 	return 0;
3054 }
3055 
3056 static int
3057 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
3058 {
3059 	struct mbuf *m;
3060 	bus_dma_segment_t segs;
3061 	bus_dmamap_t map;
3062 	int error, nsegs;
3063 
3064 	m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
3065 	if (m == NULL)
3066 		return ENOBUFS;
3067 	/*
3068 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
3069 	 * takes advantage of 10 bytes padding feature of hardware
3070 	 * in order not to copy entire frame to align IP header on
3071 	 * 32bit boundary.
3072 	 */
3073 	m->m_len = m->m_pkthdr.len = MCLBYTES;
3074 
3075 	error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
3076 			rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
3077 			BUS_DMA_NOWAIT);
3078 	if (error) {
3079 		m_freem(m);
3080 		if (init) {
3081 			if_printf(&rdata->jme_sc->arpcom.ac_if,
3082 			    "can't load RX mbuf\n");
3083 		}
3084 		return error;
3085 	}
3086 
3087 	if (rxd->rx_m != NULL) {
3088 		bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
3089 				BUS_DMASYNC_POSTREAD);
3090 		bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
3091 	}
3092 	map = rxd->rx_dmamap;
3093 	rxd->rx_dmamap = rdata->jme_rx_sparemap;
3094 	rdata->jme_rx_sparemap = map;
3095 	rxd->rx_m = m;
3096 	rxd->rx_paddr = segs.ds_addr;
3097 
3098 	jme_setup_rxdesc(rxd);
3099 	return 0;
3100 }
3101 
3102 static void
3103 jme_set_vlan(struct jme_softc *sc)
3104 {
3105 	struct ifnet *ifp = &sc->arpcom.ac_if;
3106 	uint32_t reg;
3107 
3108 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
3109 
3110 	reg = CSR_READ_4(sc, JME_RXMAC);
3111 	reg &= ~RXMAC_VLAN_ENB;
3112 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
3113 		reg |= RXMAC_VLAN_ENB;
3114 	CSR_WRITE_4(sc, JME_RXMAC, reg);
3115 }
3116 
3117 static void
3118 jme_set_filter(struct jme_softc *sc)
3119 {
3120 	struct ifnet *ifp = &sc->arpcom.ac_if;
3121 	struct ifmultiaddr *ifma;
3122 	uint32_t crc;
3123 	uint32_t mchash[2];
3124 	uint32_t rxcfg;
3125 
3126 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
3127 
3128 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
3129 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3130 	    RXMAC_ALLMULTI);
3131 
3132 	/*
3133 	 * Always accept frames destined to our station address.
3134 	 * Always accept broadcast frames.
3135 	 */
3136 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
3137 
3138 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
3139 		if (ifp->if_flags & IFF_PROMISC)
3140 			rxcfg |= RXMAC_PROMISC;
3141 		if (ifp->if_flags & IFF_ALLMULTI)
3142 			rxcfg |= RXMAC_ALLMULTI;
3143 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3144 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3145 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3146 		return;
3147 	}
3148 
3149 	/*
3150 	 * Set up the multicast address filter by passing all multicast
3151 	 * addresses through a CRC generator, and then using the low-order
3152 	 * 6 bits as an index into the 64 bit multicast hash table.  The
3153 	 * high order bits select the register, while the rest of the bits
3154 	 * select the bit within the register.
3155 	 */
3156 	rxcfg |= RXMAC_MULTICAST;
3157 	bzero(mchash, sizeof(mchash));
3158 
3159 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3160 		if (ifma->ifma_addr->sa_family != AF_LINK)
3161 			continue;
3162 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3163 		    ifma->ifma_addr), ETHER_ADDR_LEN);
3164 
3165 		/* Just want the 6 least significant bits. */
3166 		crc &= 0x3f;
3167 
3168 		/* Set the corresponding bit in the hash table. */
3169 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
3170 	}
3171 
3172 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3173 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3174 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3175 }
3176 
3177 static int
3178 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
3179 {
3180 	struct jme_softc *sc = arg1;
3181 	struct ifnet *ifp = &sc->arpcom.ac_if;
3182 	int error, v;
3183 
3184 	ifnet_serialize_all(ifp);
3185 
3186 	v = sc->jme_tx_coal_to;
3187 	error = sysctl_handle_int(oidp, &v, 0, req);
3188 	if (error || req->newptr == NULL)
3189 		goto back;
3190 
3191 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
3192 		error = EINVAL;
3193 		goto back;
3194 	}
3195 
3196 	if (v != sc->jme_tx_coal_to) {
3197 		sc->jme_tx_coal_to = v;
3198 		if (ifp->if_flags & IFF_RUNNING)
3199 			jme_set_tx_coal(sc);
3200 	}
3201 back:
3202 	ifnet_deserialize_all(ifp);
3203 	return error;
3204 }
3205 
3206 static int
3207 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3208 {
3209 	struct jme_softc *sc = arg1;
3210 	struct ifnet *ifp = &sc->arpcom.ac_if;
3211 	int error, v;
3212 
3213 	ifnet_serialize_all(ifp);
3214 
3215 	v = sc->jme_tx_coal_pkt;
3216 	error = sysctl_handle_int(oidp, &v, 0, req);
3217 	if (error || req->newptr == NULL)
3218 		goto back;
3219 
3220 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
3221 		error = EINVAL;
3222 		goto back;
3223 	}
3224 
3225 	if (v != sc->jme_tx_coal_pkt) {
3226 		sc->jme_tx_coal_pkt = v;
3227 		if (ifp->if_flags & IFF_RUNNING)
3228 			jme_set_tx_coal(sc);
3229 	}
3230 back:
3231 	ifnet_deserialize_all(ifp);
3232 	return error;
3233 }
3234 
3235 static int
3236 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
3237 {
3238 	struct jme_softc *sc = arg1;
3239 	struct ifnet *ifp = &sc->arpcom.ac_if;
3240 	int error, v;
3241 
3242 	ifnet_serialize_all(ifp);
3243 
3244 	v = sc->jme_rx_coal_to;
3245 	error = sysctl_handle_int(oidp, &v, 0, req);
3246 	if (error || req->newptr == NULL)
3247 		goto back;
3248 
3249 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3250 		error = EINVAL;
3251 		goto back;
3252 	}
3253 
3254 	if (v != sc->jme_rx_coal_to) {
3255 		sc->jme_rx_coal_to = v;
3256 		if (ifp->if_flags & IFF_RUNNING)
3257 			jme_set_rx_coal(sc);
3258 	}
3259 back:
3260 	ifnet_deserialize_all(ifp);
3261 	return error;
3262 }
3263 
3264 static int
3265 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3266 {
3267 	struct jme_softc *sc = arg1;
3268 	struct ifnet *ifp = &sc->arpcom.ac_if;
3269 	int error, v;
3270 
3271 	ifnet_serialize_all(ifp);
3272 
3273 	v = sc->jme_rx_coal_pkt;
3274 	error = sysctl_handle_int(oidp, &v, 0, req);
3275 	if (error || req->newptr == NULL)
3276 		goto back;
3277 
3278 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3279 		error = EINVAL;
3280 		goto back;
3281 	}
3282 
3283 	if (v != sc->jme_rx_coal_pkt) {
3284 		sc->jme_rx_coal_pkt = v;
3285 		if (ifp->if_flags & IFF_RUNNING)
3286 			jme_set_rx_coal(sc);
3287 	}
3288 back:
3289 	ifnet_deserialize_all(ifp);
3290 	return error;
3291 }
3292 
3293 static void
3294 jme_set_tx_coal(struct jme_softc *sc)
3295 {
3296 	uint32_t reg;
3297 
3298 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3299 	    PCCTX_COAL_TO_MASK;
3300 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3301 	    PCCTX_COAL_PKT_MASK;
3302 	reg |= PCCTX_COAL_TXQ0;
3303 	CSR_WRITE_4(sc, JME_PCCTX, reg);
3304 }
3305 
3306 static void
3307 jme_set_rx_coal(struct jme_softc *sc)
3308 {
3309 	uint32_t reg;
3310 	int r;
3311 
3312 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3313 	    PCCRX_COAL_TO_MASK;
3314 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3315 	    PCCRX_COAL_PKT_MASK;
3316 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3317 		CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3318 }
3319 
3320 #ifdef IFPOLL_ENABLE
3321 
3322 static void
3323 jme_npoll_status(struct ifnet *ifp)
3324 {
3325 	struct jme_softc *sc = ifp->if_softc;
3326 	uint32_t status;
3327 
3328 	ASSERT_SERIALIZED(&sc->jme_serialize);
3329 
3330 	status = CSR_READ_4(sc, JME_INTR_STATUS);
3331 	if (status & INTR_RXQ_DESC_EMPTY) {
3332 		CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY);
3333 		jme_rx_restart(sc, status);
3334 	}
3335 }
3336 
3337 static void
3338 jme_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
3339 {
3340 	struct jme_rxdata *rdata = arg;
3341 
3342 	ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3343 
3344 	jme_rxeof(rdata, cycle, mycpuid);
3345 }
3346 
3347 static void
3348 jme_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused)
3349 {
3350 	struct jme_txdata *tdata = arg;
3351 
3352 	ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
3353 
3354 	jme_txeof(tdata);
3355 	if (!ifq_is_empty(&ifp->if_snd))
3356 		if_devstart(ifp);
3357 }
3358 
3359 static void
3360 jme_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3361 {
3362 	struct jme_softc *sc = ifp->if_softc;
3363 
3364 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
3365 
3366 	if (info) {
3367 		int i, cpu;
3368 
3369 		info->ifpi_status.status_func = jme_npoll_status;
3370 		info->ifpi_status.serializer = &sc->jme_serialize;
3371 
3372 		cpu = if_ringmap_cpumap(sc->jme_tx_rmap, 0);
3373 		KKASSERT(cpu <= netisr_ncpus);
3374 		info->ifpi_tx[cpu].poll_func = jme_npoll_tx;
3375 		info->ifpi_tx[cpu].arg = &sc->jme_cdata.jme_tx_data;
3376 		info->ifpi_tx[cpu].serializer =
3377 		    &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
3378 		ifq_set_cpuid(&ifp->if_snd, cpu);
3379 
3380 		for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3381 			struct jme_rxdata *rdata =
3382 			    &sc->jme_cdata.jme_rx_data[i];
3383 
3384 			cpu = if_ringmap_cpumap(sc->jme_rx_rmap, i);
3385 			KKASSERT(cpu <= netisr_ncpus);
3386 			info->ifpi_rx[cpu].poll_func = jme_npoll_rx;
3387 			info->ifpi_rx[cpu].arg = rdata;
3388 			info->ifpi_rx[cpu].serializer =
3389 			    &rdata->jme_rx_serialize;
3390 		}
3391 
3392 		if (ifp->if_flags & IFF_RUNNING)
3393 			jme_disable_intr(sc);
3394 	} else {
3395 		ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid);
3396 		if (ifp->if_flags & IFF_RUNNING)
3397 			jme_enable_intr(sc);
3398 	}
3399 }
3400 
3401 #endif	/* IFPOLL_ENABLE */
3402 
3403 static int
3404 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3405 {
3406 	bus_dmamem_t dmem;
3407 	int error, asize;
3408 
3409 	asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
3410 	error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3411 			JME_RX_RING_ALIGN, 0,
3412 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3413 			asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3414 	if (error) {
3415 		device_printf(rdata->jme_sc->jme_dev,
3416 		    "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3417 		return error;
3418 	}
3419 	rdata->jme_rx_ring_tag = dmem.dmem_tag;
3420 	rdata->jme_rx_ring_map = dmem.dmem_map;
3421 	rdata->jme_rx_ring = dmem.dmem_addr;
3422 	rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3423 
3424 	return 0;
3425 }
3426 
3427 static int
3428 jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr)
3429 {
3430 	if ((paddr & 0xffffffff) == 0) {
3431 		/*
3432 		 * Don't allow lower 32bits of the RX buffer's
3433 		 * physical address to be 0, else it will break
3434 		 * hardware pending RSS information delivery
3435 		 * detection on RX path.
3436 		 */
3437 		return 1;
3438 	}
3439 	return 0;
3440 }
3441 
3442 static int
3443 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3444 {
3445 	bus_addr_t lowaddr;
3446 	int i, error;
3447 
3448 	lowaddr = BUS_SPACE_MAXADDR;
3449 	if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
3450 		/* jme_rxbuf_dma_filter will be called */
3451 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
3452 	}
3453 
3454 	/* Create tag for Rx buffers. */
3455 	error = bus_dma_tag_create(
3456 	    rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3457 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
3458 	    lowaddr,			/* lowaddr */
3459 	    BUS_SPACE_MAXADDR,		/* highaddr */
3460 	    jme_rxbuf_dma_filter, NULL,	/* filter, filterarg */
3461 	    MCLBYTES,			/* maxsize */
3462 	    1,				/* nsegments */
3463 	    MCLBYTES,			/* maxsegsize */
3464 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3465 	    &rdata->jme_rx_tag);
3466 	if (error) {
3467 		device_printf(rdata->jme_sc->jme_dev,
3468 		    "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3469 		return error;
3470 	}
3471 
3472 	/* Create DMA maps for Rx buffers. */
3473 	error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3474 				  &rdata->jme_rx_sparemap);
3475 	if (error) {
3476 		device_printf(rdata->jme_sc->jme_dev,
3477 		    "could not create %dth spare Rx dmamap.\n",
3478 		    rdata->jme_rx_idx);
3479 		bus_dma_tag_destroy(rdata->jme_rx_tag);
3480 		rdata->jme_rx_tag = NULL;
3481 		return error;
3482 	}
3483 	for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3484 		struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3485 
3486 		error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3487 					  &rxd->rx_dmamap);
3488 		if (error) {
3489 			int j;
3490 
3491 			device_printf(rdata->jme_sc->jme_dev,
3492 			    "could not create %dth Rx dmamap "
3493 			    "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3494 
3495 			for (j = 0; j < i; ++j) {
3496 				rxd = &rdata->jme_rxdesc[j];
3497 				bus_dmamap_destroy(rdata->jme_rx_tag,
3498 						   rxd->rx_dmamap);
3499 			}
3500 			bus_dmamap_destroy(rdata->jme_rx_tag,
3501 					   rdata->jme_rx_sparemap);
3502 			bus_dma_tag_destroy(rdata->jme_rx_tag);
3503 			rdata->jme_rx_tag = NULL;
3504 			return error;
3505 		}
3506 	}
3507 	return 0;
3508 }
3509 
3510 static void
3511 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3512 {
3513 	int r, cpuid = mycpuid;
3514 
3515 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3516 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3517 
3518 		if (status & rdata->jme_rx_coal) {
3519 			lwkt_serialize_enter(&rdata->jme_rx_serialize);
3520 			jme_rxeof(rdata, -1, cpuid);
3521 			lwkt_serialize_exit(&rdata->jme_rx_serialize);
3522 		}
3523 	}
3524 }
3525 
3526 static void
3527 jme_enable_rss(struct jme_softc *sc)
3528 {
3529 	uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3530 	uint32_t rssc;
3531 	int j, i, r;
3532 
3533 	KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3534 		sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
3535 		("%s: invalid # of RX rings (%d)",
3536 		 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
3537 	jme_disable_rss(sc);
3538 
3539 	toeplitz_get_key(key, sizeof(key));
3540 	for (i = 0; i < RSSKEY_NREGS; ++i) {
3541 		uint32_t keyreg;
3542 
3543 		keyreg = RSSKEY_REGVAL(key, i);
3544 		JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x, reg 0x%08x\n",
3545 		    i, keyreg, RSSKEY_REG(RSSKEY_NREGS - 1 - i));
3546 
3547 		CSR_WRITE_4(sc, RSSKEY_REG(RSSKEY_NREGS - 1 - i), keyreg);
3548 	}
3549 
3550 	/*
3551 	 * Fill redirect table.
3552 	 */
3553 	if_ringmap_rdrtable(sc->jme_rx_rmap, sc->jme_rdrtable,
3554 	    JME_RDRTABLE_SIZE);
3555 
3556 	r = 0;
3557 	for (j = 0; j < RSSTBL_NREGS; ++j) {
3558 		uint32_t ind = 0;
3559 
3560 		for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3561 			int q;
3562 
3563 			q = sc->jme_rdrtable[r];
3564 			ind |= q << (i * 8);
3565 			++r;
3566 		}
3567 		JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3568 		CSR_WRITE_4(sc, RSSTBL_REG(j), ind);
3569 	}
3570 
3571 	/*
3572 	 * Enable RSS.
3573 	 */
3574 	rssc = RSSC_HASH_128_ENTRY;
3575 	rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3576 	rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3577 	JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3578 	CSR_WRITE_4(sc, JME_RSSC, rssc);
3579 }
3580 
3581 static void
3582 jme_disable_rss(struct jme_softc *sc)
3583 {
3584 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3585 }
3586 
3587 static void
3588 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3589 {
3590 	struct jme_softc *sc = ifp->if_softc;
3591 
3592 	ifnet_serialize_array_enter(sc->jme_serialize_arr,
3593 	    sc->jme_serialize_cnt, slz);
3594 }
3595 
3596 static void
3597 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3598 {
3599 	struct jme_softc *sc = ifp->if_softc;
3600 
3601 	ifnet_serialize_array_exit(sc->jme_serialize_arr,
3602 	    sc->jme_serialize_cnt, slz);
3603 }
3604 
3605 static int
3606 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3607 {
3608 	struct jme_softc *sc = ifp->if_softc;
3609 
3610 	return ifnet_serialize_array_try(sc->jme_serialize_arr,
3611 	    sc->jme_serialize_cnt, slz);
3612 }
3613 
3614 #ifdef INVARIANTS
3615 
3616 static void
3617 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3618     boolean_t serialized)
3619 {
3620 	struct jme_softc *sc = ifp->if_softc;
3621 
3622 	ifnet_serialize_array_assert(sc->jme_serialize_arr,
3623 	    sc->jme_serialize_cnt, slz, serialized);
3624 }
3625 
3626 #endif	/* INVARIANTS */
3627 
3628 static void
3629 jme_msix_try_alloc(device_t dev)
3630 {
3631 	struct jme_softc *sc = device_get_softc(dev);
3632 	struct jme_msix_data *msix;
3633 	int error, i, r, msix_enable, msix_count;
3634 
3635 	msix_count = JME_MSIXCNT(sc->jme_cdata.jme_rx_ring_cnt);
3636 	KKASSERT(msix_count <= JME_NMSIX);
3637 
3638 	msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3639 
3640 	/*
3641 	 * We leave the 1st MSI-X vector unused, so we
3642 	 * actually need msix_count + 1 MSI-X vectors.
3643 	 */
3644 	if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3645 		return;
3646 
3647 	for (i = 0; i < msix_count; ++i)
3648 		sc->jme_msix[i].jme_msix_rid = -1;
3649 
3650 	i = 0;
3651 
3652 	/*
3653 	 * Setup status MSI-X
3654 	 */
3655 	msix = &sc->jme_msix[i++];
3656 	msix->jme_msix_cpuid = 0;
3657 	msix->jme_msix_arg = sc;
3658 	msix->jme_msix_func = jme_msix_status;
3659 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3660 		msix->jme_msix_intrs |=
3661 		    sc->jme_cdata.jme_rx_data[r].jme_rx_empty;
3662 	}
3663 	msix->jme_msix_serialize = &sc->jme_serialize;
3664 	ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s sts",
3665 	    device_get_nameunit(dev));
3666 
3667 	/*
3668 	 * Setup TX MSI-X
3669 	 */
3670 	msix = &sc->jme_msix[i++];
3671 	msix->jme_msix_cpuid = if_ringmap_cpumap(sc->jme_tx_rmap, 0);
3672 	sc->jme_tx_cpuid = msix->jme_msix_cpuid;
3673 	msix->jme_msix_arg = &sc->jme_cdata.jme_tx_data;
3674 	msix->jme_msix_func = jme_msix_tx;
3675 	msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3676 	msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
3677 	ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3678 	    device_get_nameunit(dev));
3679 
3680 	/*
3681 	 * Setup RX MSI-X
3682 	 */
3683 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3684 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3685 
3686 		msix = &sc->jme_msix[i++];
3687 		msix->jme_msix_cpuid = if_ringmap_cpumap(sc->jme_rx_rmap, r);
3688 		KKASSERT(msix->jme_msix_cpuid < netisr_ncpus);
3689 		msix->jme_msix_arg = rdata;
3690 		msix->jme_msix_func = jme_msix_rx;
3691 		msix->jme_msix_intrs = rdata->jme_rx_coal;
3692 		msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3693 		ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3694 		    "%s rx%d", device_get_nameunit(dev), r);
3695 	}
3696 
3697 	KKASSERT(i == msix_count);
3698 
3699 	error = pci_setup_msix(dev);
3700 	if (error)
3701 		return;
3702 
3703 	/* Setup jme_msix_cnt early, so we could cleanup */
3704 	sc->jme_msix_cnt = msix_count;
3705 
3706 	for (i = 0; i < msix_count; ++i) {
3707 		msix = &sc->jme_msix[i];
3708 
3709 		msix->jme_msix_vector = i + 1;
3710 		error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3711 		    &msix->jme_msix_rid, msix->jme_msix_cpuid);
3712 		if (error)
3713 			goto back;
3714 
3715 		msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3716 		    &msix->jme_msix_rid, RF_ACTIVE);
3717 		if (msix->jme_msix_res == NULL) {
3718 			error = ENOMEM;
3719 			goto back;
3720 		}
3721 	}
3722 
3723 	for (i = 0; i < JME_INTR_CNT; ++i) {
3724 		uint32_t intr_mask = (1 << i);
3725 		int x;
3726 
3727 		if ((JME_INTRS & intr_mask) == 0)
3728 			continue;
3729 
3730 		for (x = 0; x < msix_count; ++x) {
3731 			msix = &sc->jme_msix[x];
3732 			if (msix->jme_msix_intrs & intr_mask) {
3733 				int reg, shift;
3734 
3735 				reg = i / JME_MSINUM_FACTOR;
3736 				KKASSERT(reg < JME_MSINUM_CNT);
3737 
3738 				shift = (i % JME_MSINUM_FACTOR) * 4;
3739 
3740 				sc->jme_msinum[reg] |=
3741 				    (msix->jme_msix_vector << shift);
3742 
3743 				break;
3744 			}
3745 		}
3746 	}
3747 
3748 	if (bootverbose) {
3749 		for (i = 0; i < JME_MSINUM_CNT; ++i) {
3750 			device_printf(dev, "MSINUM%d: %#x\n", i,
3751 			    sc->jme_msinum[i]);
3752 		}
3753 	}
3754 
3755 	pci_enable_msix(dev);
3756 	sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3757 
3758 back:
3759 	if (error)
3760 		jme_msix_free(dev);
3761 }
3762 
3763 static int
3764 jme_intr_alloc(device_t dev)
3765 {
3766 	struct jme_softc *sc = device_get_softc(dev);
3767 	u_int irq_flags;
3768 
3769 	jme_msix_try_alloc(dev);
3770 
3771 	if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3772 		sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3773 		    &sc->jme_irq_rid, &irq_flags);
3774 
3775 		sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3776 		    &sc->jme_irq_rid, irq_flags);
3777 		if (sc->jme_irq_res == NULL) {
3778 			device_printf(dev, "can't allocate irq\n");
3779 			return ENXIO;
3780 		}
3781 		sc->jme_tx_cpuid = rman_get_cpuid(sc->jme_irq_res);
3782 	}
3783 	return 0;
3784 }
3785 
3786 static void
3787 jme_msix_free(device_t dev)
3788 {
3789 	struct jme_softc *sc = device_get_softc(dev);
3790 	int i;
3791 
3792 	KKASSERT(sc->jme_msix_cnt > 1);
3793 
3794 	for (i = 0; i < sc->jme_msix_cnt; ++i) {
3795 		struct jme_msix_data *msix = &sc->jme_msix[i];
3796 
3797 		if (msix->jme_msix_res != NULL) {
3798 			bus_release_resource(dev, SYS_RES_IRQ,
3799 			    msix->jme_msix_rid, msix->jme_msix_res);
3800 			msix->jme_msix_res = NULL;
3801 		}
3802 		if (msix->jme_msix_rid >= 0) {
3803 			pci_release_msix_vector(dev, msix->jme_msix_rid);
3804 			msix->jme_msix_rid = -1;
3805 		}
3806 	}
3807 	pci_teardown_msix(dev);
3808 }
3809 
3810 static void
3811 jme_intr_free(device_t dev)
3812 {
3813 	struct jme_softc *sc = device_get_softc(dev);
3814 
3815 	if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3816 		if (sc->jme_irq_res != NULL) {
3817 			bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3818 					     sc->jme_irq_res);
3819 		}
3820 		if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3821 			pci_release_msi(dev);
3822 	} else {
3823 		jme_msix_free(dev);
3824 	}
3825 }
3826 
3827 static void
3828 jme_msix_tx(void *xtdata)
3829 {
3830 	struct jme_txdata *tdata = xtdata;
3831 	struct jme_softc *sc = tdata->jme_sc;
3832 	struct ifnet *ifp = &sc->arpcom.ac_if;
3833 
3834 	ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
3835 
3836 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3837 
3838 	CSR_WRITE_4(sc, JME_INTR_STATUS,
3839 	    INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3840 
3841 	if (ifp->if_flags & IFF_RUNNING) {
3842 		jme_txeof(tdata);
3843 		if (!ifq_is_empty(&ifp->if_snd))
3844 			if_devstart(ifp);
3845 	}
3846 
3847 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3848 }
3849 
3850 static void
3851 jme_msix_rx(void *xrdata)
3852 {
3853 	struct jme_rxdata *rdata = xrdata;
3854 	struct jme_softc *sc = rdata->jme_sc;
3855 	struct ifnet *ifp = &sc->arpcom.ac_if;
3856 
3857 	ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3858 
3859 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, rdata->jme_rx_coal);
3860 
3861 	CSR_WRITE_4(sc, JME_INTR_STATUS,
3862 	    rdata->jme_rx_coal | rdata->jme_rx_comp);
3863 
3864 	if (ifp->if_flags & IFF_RUNNING)
3865 		jme_rxeof(rdata, -1, mycpuid);
3866 
3867 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, rdata->jme_rx_coal);
3868 }
3869 
3870 static void
3871 jme_msix_status(void *xsc)
3872 {
3873 	struct jme_softc *sc = xsc;
3874 	struct ifnet *ifp = &sc->arpcom.ac_if;
3875 	uint32_t status;
3876 
3877 	ASSERT_SERIALIZED(&sc->jme_serialize);
3878 
3879 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_RXQ_DESC_EMPTY);
3880 
3881 	status = CSR_READ_4(sc, JME_INTR_STATUS);
3882 
3883 	if (status & INTR_RXQ_DESC_EMPTY) {
3884 		CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY);
3885 		if (ifp->if_flags & IFF_RUNNING)
3886 			jme_rx_restart(sc, status);
3887 	}
3888 
3889 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_RXQ_DESC_EMPTY);
3890 }
3891 
3892 static void
3893 jme_rx_restart(struct jme_softc *sc, uint32_t status)
3894 {
3895 	int i, cpuid = mycpuid;
3896 
3897 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3898 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
3899 
3900 		if (status & rdata->jme_rx_empty) {
3901 			lwkt_serialize_enter(&rdata->jme_rx_serialize);
3902 			jme_rxeof(rdata, -1, cpuid);
3903 #ifdef JME_RSS_DEBUG
3904 			rdata->jme_rx_emp++;
3905 #endif
3906 			lwkt_serialize_exit(&rdata->jme_rx_serialize);
3907 		}
3908 	}
3909 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
3910 	    RXCSR_RXQ_START);
3911 }
3912 
3913 static void
3914 jme_set_msinum(struct jme_softc *sc)
3915 {
3916 	int i;
3917 
3918 	for (i = 0; i < JME_MSINUM_CNT; ++i)
3919 		CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3920 }
3921 
3922 static int
3923 jme_intr_setup(device_t dev)
3924 {
3925 	struct jme_softc *sc = device_get_softc(dev);
3926 	int error;
3927 
3928 	if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3929 		return jme_msix_setup(dev);
3930 
3931 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3932 	    jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3933 	if (error) {
3934 		device_printf(dev, "could not set up interrupt handler.\n");
3935 		return error;
3936 	}
3937 
3938 	return 0;
3939 }
3940 
3941 static void
3942 jme_intr_teardown(device_t dev)
3943 {
3944 	struct jme_softc *sc = device_get_softc(dev);
3945 
3946 	if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3947 		jme_msix_teardown(dev, sc->jme_msix_cnt);
3948 	else
3949 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
3950 }
3951 
3952 static int
3953 jme_msix_setup(device_t dev)
3954 {
3955 	struct jme_softc *sc = device_get_softc(dev);
3956 	int x;
3957 
3958 	for (x = 0; x < sc->jme_msix_cnt; ++x) {
3959 		struct jme_msix_data *msix = &sc->jme_msix[x];
3960 		int error;
3961 
3962 		error = bus_setup_intr_descr(dev, msix->jme_msix_res,
3963 		    INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
3964 		    &msix->jme_msix_handle, msix->jme_msix_serialize,
3965 		    msix->jme_msix_desc);
3966 		if (error) {
3967 			device_printf(dev, "could not set up %s "
3968 			    "interrupt handler.\n", msix->jme_msix_desc);
3969 			jme_msix_teardown(dev, x);
3970 			return error;
3971 		}
3972 	}
3973 	return 0;
3974 }
3975 
3976 static void
3977 jme_msix_teardown(device_t dev, int msix_count)
3978 {
3979 	struct jme_softc *sc = device_get_softc(dev);
3980 	int x;
3981 
3982 	for (x = 0; x < msix_count; ++x) {
3983 		struct jme_msix_data *msix = &sc->jme_msix[x];
3984 
3985 		bus_teardown_intr(dev, msix->jme_msix_res,
3986 		    msix->jme_msix_handle);
3987 	}
3988 }
3989 
3990 static void
3991 jme_serialize_skipmain(struct jme_softc *sc)
3992 {
3993 	lwkt_serialize_array_enter(sc->jme_serialize_arr,
3994 	    sc->jme_serialize_cnt, 1);
3995 }
3996 
3997 static void
3998 jme_deserialize_skipmain(struct jme_softc *sc)
3999 {
4000 	lwkt_serialize_array_exit(sc->jme_serialize_arr,
4001 	    sc->jme_serialize_cnt, 1);
4002 }
4003 
4004 static void
4005 jme_enable_intr(struct jme_softc *sc)
4006 {
4007 	int i;
4008 
4009 	for (i = 0; i < sc->jme_serialize_cnt; ++i)
4010 		lwkt_serialize_handler_enable(sc->jme_serialize_arr[i]);
4011 
4012 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
4013 }
4014 
4015 static void
4016 jme_disable_intr(struct jme_softc *sc)
4017 {
4018 	int i;
4019 
4020 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
4021 
4022 	for (i = 0; i < sc->jme_serialize_cnt; ++i)
4023 		lwkt_serialize_handler_disable(sc->jme_serialize_arr[i]);
4024 }
4025 
4026 static void
4027 jme_phy_poweron(struct jme_softc *sc)
4028 {
4029 	uint16_t bmcr;
4030 
4031 	bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
4032 	bmcr &= ~BMCR_PDOWN;
4033 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
4034 
4035 	if (sc->jme_caps & JME_CAP_PHYPWR) {
4036 		uint32_t val;
4037 
4038 		val = CSR_READ_4(sc, JME_PHYPWR);
4039 		val &= ~(PHYPWR_DOWN1SEL | PHYPWR_DOWN1SW |
4040 		    PHYPWR_DOWN2 | PHYPWR_CLKSEL);
4041 		CSR_WRITE_4(sc, JME_PHYPWR, val);
4042 
4043 		val = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
4044 		val &= ~PE1_GPREG0_PHYBG;
4045 		val |= PE1_GPREG0_ENBG;
4046 		pci_write_config(sc->jme_dev, JME_PCI_PE1, val, 4);
4047 	}
4048 }
4049 
4050 static void
4051 jme_phy_poweroff(struct jme_softc *sc)
4052 {
4053 	uint16_t bmcr;
4054 
4055 	bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
4056 	bmcr |= BMCR_PDOWN;
4057 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
4058 
4059 	if (sc->jme_caps & JME_CAP_PHYPWR) {
4060 		uint32_t val;
4061 
4062 		val = CSR_READ_4(sc, JME_PHYPWR);
4063 		val |= PHYPWR_DOWN1SEL | PHYPWR_DOWN1SW |
4064 		    PHYPWR_DOWN2 | PHYPWR_CLKSEL;
4065 		CSR_WRITE_4(sc, JME_PHYPWR, val);
4066 
4067 		val = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
4068 		val &= ~PE1_GPREG0_PHYBG;
4069 		val |= PE1_GPREG0_PDD3COLD;
4070 		pci_write_config(sc->jme_dev, JME_PCI_PE1, val, 4);
4071 	}
4072 }
4073 
4074 static int
4075 jme_miiext_read(struct jme_softc *sc, int reg)
4076 {
4077 	int addr;
4078 
4079 	addr = JME_MII_EXT_ADDR_RD | reg;
4080 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
4081 	    JME_MII_EXT_ADDR, addr);
4082 	return jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr,
4083 	    JME_MII_EXT_DATA);
4084 }
4085 
4086 static void
4087 jme_miiext_write(struct jme_softc *sc, int reg, int val)
4088 {
4089 	int addr;
4090 
4091 	addr = JME_MII_EXT_ADDR_WR | reg;
4092 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
4093 	    JME_MII_EXT_DATA, val);
4094 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
4095 	    JME_MII_EXT_ADDR, addr);
4096 }
4097 
4098 static void
4099 jme_phy_init(struct jme_softc *sc)
4100 {
4101 	uint16_t gtcr;
4102 	int val;
4103 
4104 	jme_phy_poweroff(sc);
4105 	jme_phy_poweron(sc);
4106 
4107 	/* Enable PHY test 1 */
4108 	gtcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR);
4109 	gtcr &= ~GTCR_TEST_MASK;
4110 	gtcr |= GTCR_TEST_1;
4111 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, gtcr);
4112 
4113 	val = jme_miiext_read(sc, JME_MII_EXT_COM2);
4114 	val &= ~JME_MII_EXT_COM2_CALIB_MODE0;
4115 	val |= JME_MII_EXT_COM2_CALIB_LATCH | JME_MII_EXT_COM2_CALIB_EN;
4116 	jme_miiext_write(sc, JME_MII_EXT_COM2, val);
4117 
4118 	DELAY(20000);
4119 
4120 	val = jme_miiext_read(sc, JME_MII_EXT_COM2);
4121 	val &= ~(JME_MII_EXT_COM2_CALIB_MODE0 |
4122 	    JME_MII_EXT_COM2_CALIB_LATCH | JME_MII_EXT_COM2_CALIB_EN);
4123 	jme_miiext_write(sc, JME_MII_EXT_COM2, val);
4124 
4125 	/* Disable PHY test */
4126 	gtcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR);
4127 	gtcr &= ~GTCR_TEST_MASK;
4128 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, gtcr);
4129 
4130 	if (sc->jme_phycom0 != 0)
4131 		jme_miiext_write(sc, JME_MII_EXT_COM0, sc->jme_phycom0);
4132 	if (sc->jme_phycom1 != 0)
4133 		jme_miiext_write(sc, JME_MII_EXT_COM1, sc->jme_phycom1);
4134 }
4135