xref: /dragonfly/sys/dev/netif/jme/if_jme.c (revision 33311965)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  */
29 
30 #include "opt_ifpoll.h"
31 #include "opt_jme.h"
32 
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46 
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/bpf.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_poll.h>
54 #include <net/ifq_var.h>
55 #include <net/toeplitz.h>
56 #include <net/toeplitz2.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
59 
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 
63 #include <dev/netif/mii_layer/mii.h>
64 #include <dev/netif/mii_layer/miivar.h>
65 #include <dev/netif/mii_layer/jmphyreg.h>
66 
67 #include <bus/pci/pcireg.h>
68 #include <bus/pci/pcivar.h>
69 #include "pcidevs.h"
70 
71 #include <dev/netif/jme/if_jmereg.h>
72 #include <dev/netif/jme/if_jmevar.h>
73 
74 #include "miibus_if.h"
75 
76 #define JME_TICK_CPUID		0	/* DO NOT CHANGE THIS */
77 
78 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
79 
80 #ifdef JME_RSS_DEBUG
81 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
82 do { \
83 	if ((sc)->jme_rss_debug >= (lvl)) \
84 		if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
85 } while (0)
86 #else	/* !JME_RSS_DEBUG */
87 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
88 #endif	/* JME_RSS_DEBUG */
89 
90 static int	jme_probe(device_t);
91 static int	jme_attach(device_t);
92 static int	jme_detach(device_t);
93 static int	jme_shutdown(device_t);
94 static int	jme_suspend(device_t);
95 static int	jme_resume(device_t);
96 
97 static int	jme_miibus_readreg(device_t, int, int);
98 static int	jme_miibus_writereg(device_t, int, int, int);
99 static void	jme_miibus_statchg(device_t);
100 
101 static void	jme_init(void *);
102 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
103 static void	jme_start(struct ifnet *, struct ifaltq_subque *);
104 static void	jme_watchdog(struct ifnet *);
105 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
106 static int	jme_mediachange(struct ifnet *);
107 #ifdef IFPOLL_ENABLE
108 static void	jme_npoll(struct ifnet *, struct ifpoll_info *);
109 static void	jme_npoll_status(struct ifnet *);
110 static void	jme_npoll_rx(struct ifnet *, void *, int);
111 static void	jme_npoll_tx(struct ifnet *, void *, int);
112 #endif
113 static void	jme_serialize(struct ifnet *, enum ifnet_serialize);
114 static void	jme_deserialize(struct ifnet *, enum ifnet_serialize);
115 static int	jme_tryserialize(struct ifnet *, enum ifnet_serialize);
116 #ifdef INVARIANTS
117 static void	jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
118 		    boolean_t);
119 #endif
120 
121 static void	jme_intr(void *);
122 static void	jme_msix_tx(void *);
123 static void	jme_msix_rx(void *);
124 static void	jme_msix_status(void *);
125 static void	jme_txeof(struct jme_txdata *);
126 static void	jme_rxeof(struct jme_rxdata *, int, int);
127 static void	jme_rx_intr(struct jme_softc *, uint32_t);
128 static void	jme_enable_intr(struct jme_softc *);
129 static void	jme_disable_intr(struct jme_softc *);
130 static void	jme_rx_restart(struct jme_softc *, uint32_t);
131 
132 static int	jme_msix_setup(device_t);
133 static void	jme_msix_teardown(device_t, int);
134 static int	jme_intr_setup(device_t);
135 static void	jme_intr_teardown(device_t);
136 static void	jme_msix_try_alloc(device_t);
137 static void	jme_msix_free(device_t);
138 static int	jme_intr_alloc(device_t);
139 static void	jme_intr_free(device_t);
140 static int	jme_dma_alloc(struct jme_softc *);
141 static void	jme_dma_free(struct jme_softc *);
142 static int	jme_init_rx_ring(struct jme_rxdata *);
143 static void	jme_init_tx_ring(struct jme_txdata *);
144 static void	jme_init_ssb(struct jme_softc *);
145 static int	jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
146 static int	jme_encap(struct jme_txdata *, struct mbuf **, int *);
147 static void	jme_rxpkt(struct jme_rxdata *, int);
148 static int	jme_rxring_dma_alloc(struct jme_rxdata *);
149 static int	jme_rxbuf_dma_alloc(struct jme_rxdata *);
150 static int	jme_rxbuf_dma_filter(void *, bus_addr_t);
151 
152 static void	jme_tick(void *);
153 static void	jme_stop(struct jme_softc *);
154 static void	jme_reset(struct jme_softc *);
155 static void	jme_set_msinum(struct jme_softc *);
156 static void	jme_set_vlan(struct jme_softc *);
157 static void	jme_set_filter(struct jme_softc *);
158 static void	jme_stop_tx(struct jme_softc *);
159 static void	jme_stop_rx(struct jme_softc *);
160 static void	jme_mac_config(struct jme_softc *);
161 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
162 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
163 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
164 #ifdef notyet
165 static void	jme_setwol(struct jme_softc *);
166 static void	jme_setlinkspeed(struct jme_softc *);
167 #endif
168 static void	jme_set_tx_coal(struct jme_softc *);
169 static void	jme_set_rx_coal(struct jme_softc *);
170 static void	jme_enable_rss(struct jme_softc *);
171 static void	jme_disable_rss(struct jme_softc *);
172 static void	jme_serialize_skipmain(struct jme_softc *);
173 static void	jme_deserialize_skipmain(struct jme_softc *);
174 static void	jme_phy_poweron(struct jme_softc *);
175 static void	jme_phy_poweroff(struct jme_softc *);
176 static int	jme_miiext_read(struct jme_softc *, int);
177 static void	jme_miiext_write(struct jme_softc *, int, int);
178 static void	jme_phy_init(struct jme_softc *);
179 
180 static void	jme_sysctl_node(struct jme_softc *);
181 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
182 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
183 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
184 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
185 #ifdef IFPOLL_ENABLE
186 static int	jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
187 static int	jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
188 #endif
189 
190 /*
191  * Devices supported by this driver.
192  */
193 static const struct jme_dev {
194 	uint16_t	jme_vendorid;
195 	uint16_t	jme_deviceid;
196 	uint32_t	jme_caps;
197 	const char	*jme_name;
198 } jme_devs[] = {
199 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
200 	    JME_CAP_JUMBO,
201 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
202 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
203 	    JME_CAP_FASTETH,
204 	    "JMicron Inc, JMC260 Fast Ethernet" },
205 	{ 0, 0, 0, NULL }
206 };
207 
208 static device_method_t jme_methods[] = {
209 	/* Device interface. */
210 	DEVMETHOD(device_probe,		jme_probe),
211 	DEVMETHOD(device_attach,	jme_attach),
212 	DEVMETHOD(device_detach,	jme_detach),
213 	DEVMETHOD(device_shutdown,	jme_shutdown),
214 	DEVMETHOD(device_suspend,	jme_suspend),
215 	DEVMETHOD(device_resume,	jme_resume),
216 
217 	/* Bus interface. */
218 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
219 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
220 
221 	/* MII interface. */
222 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
223 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
224 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
225 
226 	{ NULL, NULL }
227 };
228 
229 static driver_t jme_driver = {
230 	"jme",
231 	jme_methods,
232 	sizeof(struct jme_softc)
233 };
234 
235 static devclass_t jme_devclass;
236 
237 DECLARE_DUMMY_MODULE(if_jme);
238 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
239 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
240 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
241 
242 static const struct {
243 	uint32_t	jme_coal;
244 	uint32_t	jme_comp;
245 	uint32_t	jme_empty;
246 } jme_rx_status[JME_NRXRING_MAX] = {
247 	{ INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
248 	  INTR_RXQ0_DESC_EMPTY },
249 	{ INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
250 	  INTR_RXQ1_DESC_EMPTY },
251 	{ INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
252 	  INTR_RXQ2_DESC_EMPTY },
253 	{ INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
254 	  INTR_RXQ3_DESC_EMPTY }
255 };
256 
257 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
258 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
259 static int	jme_rx_ring_count = 0;
260 static int	jme_msi_enable = 1;
261 static int	jme_msix_enable = 1;
262 
263 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
264 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
265 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
266 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
267 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
268 
269 static __inline void
270 jme_setup_rxdesc(struct jme_rxdesc *rxd)
271 {
272 	struct jme_desc *desc;
273 
274 	desc = rxd->rx_desc;
275 	desc->buflen = htole32(MCLBYTES);
276 	desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
277 	desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
278 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
279 }
280 
281 /*
282  *	Read a PHY register on the MII of the JMC250.
283  */
284 static int
285 jme_miibus_readreg(device_t dev, int phy, int reg)
286 {
287 	struct jme_softc *sc = device_get_softc(dev);
288 	uint32_t val;
289 	int i;
290 
291 	/* For FPGA version, PHY address 0 should be ignored. */
292 	if (sc->jme_caps & JME_CAP_FPGA) {
293 		if (phy == 0)
294 			return (0);
295 	} else {
296 		if (sc->jme_phyaddr != phy)
297 			return (0);
298 	}
299 
300 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
301 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
302 
303 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
304 		DELAY(1);
305 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
306 			break;
307 	}
308 	if (i == 0) {
309 		device_printf(sc->jme_dev, "phy read timeout: "
310 			      "phy %d, reg %d\n", phy, reg);
311 		return (0);
312 	}
313 
314 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
315 }
316 
317 /*
318  *	Write a PHY register on the MII of the JMC250.
319  */
320 static int
321 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
322 {
323 	struct jme_softc *sc = device_get_softc(dev);
324 	int i;
325 
326 	/* For FPGA version, PHY address 0 should be ignored. */
327 	if (sc->jme_caps & JME_CAP_FPGA) {
328 		if (phy == 0)
329 			return (0);
330 	} else {
331 		if (sc->jme_phyaddr != phy)
332 			return (0);
333 	}
334 
335 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
336 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
337 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
338 
339 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
340 		DELAY(1);
341 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
342 			break;
343 	}
344 	if (i == 0) {
345 		device_printf(sc->jme_dev, "phy write timeout: "
346 			      "phy %d, reg %d\n", phy, reg);
347 	}
348 
349 	return (0);
350 }
351 
352 /*
353  *	Callback from MII layer when media changes.
354  */
355 static void
356 jme_miibus_statchg(device_t dev)
357 {
358 	struct jme_softc *sc = device_get_softc(dev);
359 	struct ifnet *ifp = &sc->arpcom.ac_if;
360 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
361 	struct mii_data *mii;
362 	struct jme_txdesc *txd;
363 	bus_addr_t paddr;
364 	int i, r;
365 
366 	if (sc->jme_in_tick)
367 		jme_serialize_skipmain(sc);
368 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
369 
370 	if ((ifp->if_flags & IFF_RUNNING) == 0)
371 		goto done;
372 
373 	mii = device_get_softc(sc->jme_miibus);
374 
375 	sc->jme_has_link = FALSE;
376 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
377 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
378 		case IFM_10_T:
379 		case IFM_100_TX:
380 			sc->jme_has_link = TRUE;
381 			break;
382 		case IFM_1000_T:
383 			if (sc->jme_caps & JME_CAP_FASTETH)
384 				break;
385 			sc->jme_has_link = TRUE;
386 			break;
387 		default:
388 			break;
389 		}
390 	}
391 
392 	/*
393 	 * Disabling Rx/Tx MACs have a side-effect of resetting
394 	 * JME_TXNDA/JME_RXNDA register to the first address of
395 	 * Tx/Rx descriptor address. So driver should reset its
396 	 * internal procucer/consumer pointer and reclaim any
397 	 * allocated resources.  Note, just saving the value of
398 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
399 	 * and restoring JME_TXNDA/JME_RXNDA register is not
400 	 * sufficient to make sure correct MAC state because
401 	 * stopping MAC operation can take a while and hardware
402 	 * might have updated JME_TXNDA/JME_RXNDA registers
403 	 * during the stop operation.
404 	 */
405 
406 	/* Disable interrupts */
407 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
408 
409 	/* Stop driver */
410 	ifp->if_flags &= ~IFF_RUNNING;
411 	ifq_clr_oactive(&ifp->if_snd);
412 	ifp->if_timer = 0;
413 	callout_stop(&sc->jme_tick_ch);
414 
415 	/* Stop receiver/transmitter. */
416 	jme_stop_rx(sc);
417 	jme_stop_tx(sc);
418 
419 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
420 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
421 
422 		jme_rxeof(rdata, -1, -1);
423 		if (rdata->jme_rxhead != NULL)
424 			m_freem(rdata->jme_rxhead);
425 		JME_RXCHAIN_RESET(rdata);
426 
427 		/*
428 		 * Reuse configured Rx descriptors and reset
429 		 * procuder/consumer index.
430 		 */
431 		rdata->jme_rx_cons = 0;
432 	}
433 	if (JME_ENABLE_HWRSS(sc))
434 		jme_enable_rss(sc);
435 	else
436 		jme_disable_rss(sc);
437 
438 	jme_txeof(tdata);
439 	if (tdata->jme_tx_cnt != 0) {
440 		/* Remove queued packets for transmit. */
441 		for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
442 			txd = &tdata->jme_txdesc[i];
443 			if (txd->tx_m != NULL) {
444 				bus_dmamap_unload( tdata->jme_tx_tag,
445 				    txd->tx_dmamap);
446 				m_freem(txd->tx_m);
447 				txd->tx_m = NULL;
448 				txd->tx_ndesc = 0;
449 				IFNET_STAT_INC(ifp, oerrors, 1);
450 			}
451 		}
452 	}
453 	jme_init_tx_ring(tdata);
454 
455 	/* Initialize shadow status block. */
456 	jme_init_ssb(sc);
457 
458 	/* Program MAC with resolved speed/duplex/flow-control. */
459 	if (sc->jme_has_link) {
460 		jme_mac_config(sc);
461 
462 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
463 
464 		/* Set Tx ring address to the hardware. */
465 		paddr = tdata->jme_tx_ring_paddr;
466 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
467 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
468 
469 		for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
470 			CSR_WRITE_4(sc, JME_RXCSR,
471 			    sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
472 
473 			/* Set Rx ring address to the hardware. */
474 			paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
475 			CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
476 			CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
477 		}
478 
479 		/* Restart receiver/transmitter. */
480 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
481 		    RXCSR_RXQ_START);
482 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
483 	}
484 
485 	ifp->if_flags |= IFF_RUNNING;
486 	ifq_clr_oactive(&ifp->if_snd);
487 	callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc,
488 	    JME_TICK_CPUID);
489 
490 #ifdef IFPOLL_ENABLE
491 	if (!(ifp->if_flags & IFF_NPOLLING))
492 #endif
493 	/* Reenable interrupts. */
494 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
495 
496 done:
497 	if (sc->jme_in_tick)
498 		jme_deserialize_skipmain(sc);
499 }
500 
501 /*
502  *	Get the current interface media status.
503  */
504 static void
505 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
506 {
507 	struct jme_softc *sc = ifp->if_softc;
508 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
509 
510 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
511 
512 	mii_pollstat(mii);
513 	ifmr->ifm_status = mii->mii_media_status;
514 	ifmr->ifm_active = mii->mii_media_active;
515 }
516 
517 /*
518  *	Set hardware to newly-selected media.
519  */
520 static int
521 jme_mediachange(struct ifnet *ifp)
522 {
523 	struct jme_softc *sc = ifp->if_softc;
524 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
525 	int error;
526 
527 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
528 
529 	if (mii->mii_instance != 0) {
530 		struct mii_softc *miisc;
531 
532 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
533 			mii_phy_reset(miisc);
534 	}
535 	error = mii_mediachg(mii);
536 
537 	return (error);
538 }
539 
540 static int
541 jme_probe(device_t dev)
542 {
543 	const struct jme_dev *sp;
544 	uint16_t vid, did;
545 
546 	vid = pci_get_vendor(dev);
547 	did = pci_get_device(dev);
548 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
549 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
550 			struct jme_softc *sc = device_get_softc(dev);
551 
552 			sc->jme_caps = sp->jme_caps;
553 			device_set_desc(dev, sp->jme_name);
554 			return (0);
555 		}
556 	}
557 	return (ENXIO);
558 }
559 
560 static int
561 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
562 {
563 	uint32_t reg;
564 	int i;
565 
566 	*val = 0;
567 	for (i = JME_TIMEOUT; i > 0; i--) {
568 		reg = CSR_READ_4(sc, JME_SMBCSR);
569 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
570 			break;
571 		DELAY(1);
572 	}
573 
574 	if (i == 0) {
575 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
576 		return (ETIMEDOUT);
577 	}
578 
579 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
580 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
581 	for (i = JME_TIMEOUT; i > 0; i--) {
582 		DELAY(1);
583 		reg = CSR_READ_4(sc, JME_SMBINTF);
584 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
585 			break;
586 	}
587 
588 	if (i == 0) {
589 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
590 		return (ETIMEDOUT);
591 	}
592 
593 	reg = CSR_READ_4(sc, JME_SMBINTF);
594 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
595 
596 	return (0);
597 }
598 
599 static int
600 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
601 {
602 	uint8_t fup, reg, val;
603 	uint32_t offset;
604 	int match;
605 
606 	offset = 0;
607 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
608 	    fup != JME_EEPROM_SIG0)
609 		return (ENOENT);
610 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
611 	    fup != JME_EEPROM_SIG1)
612 		return (ENOENT);
613 	match = 0;
614 	do {
615 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
616 			break;
617 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
618 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
619 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
620 				break;
621 			if (reg >= JME_PAR0 &&
622 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
623 				if (jme_eeprom_read_byte(sc, offset + 2,
624 				    &val) != 0)
625 					break;
626 				eaddr[reg - JME_PAR0] = val;
627 				match++;
628 			}
629 		}
630 		/* Check for the end of EEPROM descriptor. */
631 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
632 			break;
633 		/* Try next eeprom descriptor. */
634 		offset += JME_EEPROM_DESC_BYTES;
635 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
636 
637 	if (match == ETHER_ADDR_LEN)
638 		return (0);
639 
640 	return (ENOENT);
641 }
642 
643 static void
644 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
645 {
646 	uint32_t par0, par1;
647 
648 	/* Read station address. */
649 	par0 = CSR_READ_4(sc, JME_PAR0);
650 	par1 = CSR_READ_4(sc, JME_PAR1);
651 	par1 &= 0xFFFF;
652 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
653 		device_printf(sc->jme_dev,
654 		    "generating fake ethernet address.\n");
655 		par0 = karc4random();
656 		/* Set OUI to JMicron. */
657 		eaddr[0] = 0x00;
658 		eaddr[1] = 0x1B;
659 		eaddr[2] = 0x8C;
660 		eaddr[3] = (par0 >> 16) & 0xff;
661 		eaddr[4] = (par0 >> 8) & 0xff;
662 		eaddr[5] = par0 & 0xff;
663 	} else {
664 		eaddr[0] = (par0 >> 0) & 0xFF;
665 		eaddr[1] = (par0 >> 8) & 0xFF;
666 		eaddr[2] = (par0 >> 16) & 0xFF;
667 		eaddr[3] = (par0 >> 24) & 0xFF;
668 		eaddr[4] = (par1 >> 0) & 0xFF;
669 		eaddr[5] = (par1 >> 8) & 0xFF;
670 	}
671 }
672 
673 static int
674 jme_attach(device_t dev)
675 {
676 	struct jme_softc *sc = device_get_softc(dev);
677 	struct ifnet *ifp = &sc->arpcom.ac_if;
678 	uint32_t reg;
679 	uint16_t did;
680 	uint8_t pcie_ptr, rev;
681 	int error = 0, i, j, rx_desc_cnt, coal_max;
682 	uint8_t eaddr[ETHER_ADDR_LEN];
683 #ifdef IFPOLL_ENABLE
684 	int offset, offset_def;
685 #endif
686 
687 	/*
688 	 * Initialize serializers
689 	 */
690 	lwkt_serialize_init(&sc->jme_serialize);
691 	lwkt_serialize_init(&sc->jme_cdata.jme_tx_data.jme_tx_serialize);
692 	for (i = 0; i < JME_NRXRING_MAX; ++i) {
693 		lwkt_serialize_init(
694 		    &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
695 	}
696 
697 	/*
698 	 * Get # of RX ring descriptors
699 	 */
700 	rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
701 	    jme_rx_desc_count);
702 	rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
703 	if (rx_desc_cnt > JME_NDESC_MAX)
704 		rx_desc_cnt = JME_NDESC_MAX;
705 
706 	/*
707 	 * Get # of TX ring descriptors
708 	 */
709 	sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt =
710 	    device_getenv_int(dev, "tx_desc_count", jme_tx_desc_count);
711 	sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt =
712 	    roundup(sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt, JME_NDESC_ALIGN);
713 	if (sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt > JME_NDESC_MAX)
714 		sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = JME_NDESC_MAX;
715 
716 	/*
717 	 * Get # of RX rings
718 	 */
719 	sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
720 	    jme_rx_ring_count);
721 	sc->jme_cdata.jme_rx_ring_cnt =
722 	    if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
723 
724 	/*
725 	 * Initialize serializer array
726 	 */
727 	i = 0;
728 
729 	KKASSERT(i < JME_NSERIALIZE);
730 	sc->jme_serialize_arr[i++] = &sc->jme_serialize;
731 
732 	KKASSERT(i < JME_NSERIALIZE);
733 	sc->jme_serialize_arr[i++] =
734 	    &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
735 
736 	for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
737 		KKASSERT(i < JME_NSERIALIZE);
738 		sc->jme_serialize_arr[i++] =
739 		    &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
740 	}
741 
742 	KKASSERT(i <= JME_NSERIALIZE);
743 	sc->jme_serialize_cnt = i;
744 
745 	/*
746 	 * Setup TX ring specific data
747 	 */
748 	sc->jme_cdata.jme_tx_data.jme_sc = sc;
749 
750 	/*
751 	 * Setup RX rings specific data
752 	 */
753 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
754 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
755 
756 		rdata->jme_sc = sc;
757 		rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
758 		rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
759 		rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
760 		rdata->jme_rx_idx = i;
761 		rdata->jme_rx_desc_cnt = rx_desc_cnt;
762 	}
763 
764 	sc->jme_dev = dev;
765 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
766 
767 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
768 
769 	callout_init(&sc->jme_tick_ch);
770 
771 #ifndef BURN_BRIDGES
772 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
773 		uint32_t irq, mem;
774 
775 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
776 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
777 
778 		device_printf(dev, "chip is in D%d power mode "
779 		    "-- setting to D0\n", pci_get_powerstate(dev));
780 
781 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
782 
783 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
784 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
785 	}
786 #endif	/* !BURN_BRIDGE */
787 
788 	/* Enable bus mastering */
789 	pci_enable_busmaster(dev);
790 
791 	/*
792 	 * Allocate IO memory
793 	 *
794 	 * JMC250 supports both memory mapped and I/O register space
795 	 * access.  Because I/O register access should use different
796 	 * BARs to access registers it's waste of time to use I/O
797 	 * register spce access.  JMC250 uses 16K to map entire memory
798 	 * space.
799 	 */
800 	sc->jme_mem_rid = JME_PCIR_BAR;
801 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
802 						 &sc->jme_mem_rid, RF_ACTIVE);
803 	if (sc->jme_mem_res == NULL) {
804 		device_printf(dev, "can't allocate IO memory\n");
805 		return ENXIO;
806 	}
807 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
808 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
809 
810 	/*
811 	 * Allocate IRQ
812 	 */
813 	error = jme_intr_alloc(dev);
814 	if (error)
815 		goto fail;
816 
817 	/*
818 	 * Extract revisions
819 	 */
820 	reg = CSR_READ_4(sc, JME_CHIPMODE);
821 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
822 	    CHIPMODE_NOT_FPGA) {
823 		sc->jme_caps |= JME_CAP_FPGA;
824 		if (bootverbose) {
825 			device_printf(dev, "FPGA revision: 0x%04x\n",
826 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
827 				      CHIPMODE_FPGA_REV_SHIFT);
828 		}
829 	}
830 
831 	/* NOTE: FM revision is put in the upper 4 bits */
832 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
833 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
834 	if (bootverbose)
835 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
836 
837 	did = pci_get_device(dev);
838 	switch (did) {
839 	case PCI_PRODUCT_JMICRON_JMC250:
840 		if (rev == JME_REV1_A2)
841 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
842 		break;
843 
844 	case PCI_PRODUCT_JMICRON_JMC260:
845 		if (rev == JME_REV2) {
846 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
847 			sc->jme_phycom0 = 0x608a;
848 		} else if (rev == JME_REV2_2) {
849 			sc->jme_phycom0 = 0x408a;
850 		}
851 		break;
852 
853 	default:
854 		panic("unknown device id 0x%04x", did);
855 	}
856 	if (rev >= JME_REV2) {
857 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
858 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
859 				      GHC_TXMAC_CLKSRC_1000;
860 	}
861 	if (rev >= JME_REV5)
862 		sc->jme_caps |= JME_CAP_PHYPWR;
863 	if (rev >= JME_REV6 || rev == JME_REV5 || rev == JME_REV5_1 ||
864 	    rev == JME_REV5_3) {
865 		sc->jme_phycom0 = 0x008a;
866 		sc->jme_phycom1 = 0x4109;
867 	} else if (rev == JME_REV3_1 || rev == JME_REV3_2) {
868 		sc->jme_phycom0 = 0xe088;
869 	}
870 
871 	if (rev >= JME_REV2) {
872 		reg = pci_read_config(dev, JME_PCI_SSCTRL, 4);
873 		if ((reg & SSCTRL_PHYMASK) == SSCTRL_PHYEA) {
874 			sc->jme_phycom0 = 0;
875 			sc->jme_phycom1 = 0;
876 		}
877 	}
878 
879 	/* Reset the ethernet controller. */
880 	jme_reset(sc);
881 
882 	/* Map MSI/MSI-X vectors */
883 	jme_set_msinum(sc);
884 
885 	/* Get station address. */
886 	reg = CSR_READ_4(sc, JME_SMBCSR);
887 	if (reg & SMBCSR_EEPROM_PRESENT)
888 		error = jme_eeprom_macaddr(sc, eaddr);
889 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
890 		if (error != 0 && (bootverbose)) {
891 			device_printf(dev, "ethernet hardware address "
892 				      "not found in EEPROM.\n");
893 		}
894 		jme_reg_macaddr(sc, eaddr);
895 	}
896 
897 	/*
898 	 * Save PHY address.
899 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
900 	 * requires PHY probing to get correct PHY address.
901 	 */
902 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
903 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
904 		    GPREG0_PHY_ADDR_MASK;
905 		if (bootverbose) {
906 			device_printf(dev, "PHY is at address %d.\n",
907 			    sc->jme_phyaddr);
908 		}
909 	} else {
910 		sc->jme_phyaddr = 0;
911 	}
912 
913 	/* Set max allowable DMA size. */
914 	pcie_ptr = pci_get_pciecap_ptr(dev);
915 	if (pcie_ptr != 0) {
916 		uint16_t ctrl;
917 
918 		sc->jme_caps |= JME_CAP_PCIE;
919 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
920 		if (bootverbose) {
921 			device_printf(dev, "Read request size : %d bytes.\n",
922 			    128 << ((ctrl >> 12) & 0x07));
923 			device_printf(dev, "TLP payload size : %d bytes.\n",
924 			    128 << ((ctrl >> 5) & 0x07));
925 		}
926 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
927 		case PCIEM_DEVCTL_MAX_READRQ_128:
928 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
929 			break;
930 		case PCIEM_DEVCTL_MAX_READRQ_256:
931 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
932 			break;
933 		default:
934 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
935 			break;
936 		}
937 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
938 	} else {
939 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
940 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
941 	}
942 
943 #ifdef notyet
944 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
945 		sc->jme_caps |= JME_CAP_PMCAP;
946 #endif
947 
948 #ifdef IFPOLL_ENABLE
949 	/*
950 	 * NPOLLING RX CPU offset
951 	 */
952 	if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) {
953 		offset = 0;
954 	} else {
955 		offset_def = (sc->jme_cdata.jme_rx_ring_cnt *
956 		    device_get_unit(dev)) % ncpus2;
957 		offset = device_getenv_int(dev, "npoll.rxoff", offset_def);
958 		if (offset >= ncpus2 ||
959 		    offset % sc->jme_cdata.jme_rx_ring_cnt != 0) {
960 			device_printf(dev, "invalid npoll.rxoff %d, use %d\n",
961 			    offset, offset_def);
962 			offset = offset_def;
963 		}
964 	}
965 	sc->jme_npoll_rxoff = offset;
966 
967 	/*
968 	 * NPOLLING TX CPU offset
969 	 */
970 	offset_def = sc->jme_npoll_rxoff;
971 	offset = device_getenv_int(dev, "npoll.txoff", offset_def);
972 	if (offset >= ncpus2) {
973 		device_printf(dev, "invalid npoll.txoff %d, use %d\n",
974 		    offset, offset_def);
975 		offset = offset_def;
976 	}
977 	sc->jme_npoll_txoff = offset;
978 #endif
979 
980 	/*
981 	 * Set default coalesce valves
982 	 */
983 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
984 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
985 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
986 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
987 
988 	/*
989 	 * Adjust coalesce valves, in case that the number of TX/RX
990 	 * descs are set to small values by users.
991 	 *
992 	 * NOTE: coal_max will not be zero, since number of descs
993 	 * must aligned by JME_NDESC_ALIGN (16 currently)
994 	 */
995 	coal_max = sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt / 2;
996 	if (coal_max < sc->jme_tx_coal_pkt)
997 		sc->jme_tx_coal_pkt = coal_max;
998 
999 	coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2;
1000 	if (coal_max < sc->jme_rx_coal_pkt)
1001 		sc->jme_rx_coal_pkt = coal_max;
1002 
1003 	sc->jme_cdata.jme_tx_data.jme_tx_wreg = JME_TXWREG_NSEGS;
1004 
1005 	/*
1006 	 * Create sysctl tree
1007 	 */
1008 	jme_sysctl_node(sc);
1009 
1010 	/* Allocate DMA stuffs */
1011 	error = jme_dma_alloc(sc);
1012 	if (error)
1013 		goto fail;
1014 
1015 	ifp->if_softc = sc;
1016 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1017 	ifp->if_init = jme_init;
1018 	ifp->if_ioctl = jme_ioctl;
1019 	ifp->if_start = jme_start;
1020 #ifdef IFPOLL_ENABLE
1021 	ifp->if_npoll = jme_npoll;
1022 #endif
1023 	ifp->if_watchdog = jme_watchdog;
1024 	ifp->if_serialize = jme_serialize;
1025 	ifp->if_deserialize = jme_deserialize;
1026 	ifp->if_tryserialize = jme_tryserialize;
1027 #ifdef INVARIANTS
1028 	ifp->if_serialize_assert = jme_serialize_assert;
1029 #endif
1030 	ifq_set_maxlen(&ifp->if_snd,
1031 	    sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt - JME_TXD_RSVD);
1032 	ifq_set_ready(&ifp->if_snd);
1033 
1034 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
1035 	ifp->if_capabilities = IFCAP_HWCSUM |
1036 			       IFCAP_TSO |
1037 			       IFCAP_VLAN_MTU |
1038 			       IFCAP_VLAN_HWTAGGING;
1039 	if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
1040 		ifp->if_capabilities |= IFCAP_RSS;
1041 	ifp->if_capenable = ifp->if_capabilities;
1042 
1043 	/*
1044 	 * Disable TXCSUM by default to improve bulk data
1045 	 * transmit performance (+20Mbps improvement).
1046 	 */
1047 	ifp->if_capenable &= ~IFCAP_TXCSUM;
1048 
1049 	if (ifp->if_capenable & IFCAP_TXCSUM)
1050 		ifp->if_hwassist |= JME_CSUM_FEATURES;
1051 	ifp->if_hwassist |= CSUM_TSO;
1052 
1053 	/* Set up MII bus. */
1054 	error = mii_phy_probe(dev, &sc->jme_miibus,
1055 			      jme_mediachange, jme_mediastatus);
1056 	if (error) {
1057 		device_printf(dev, "no PHY found!\n");
1058 		goto fail;
1059 	}
1060 
1061 	/*
1062 	 * Save PHYADDR for FPGA mode PHY.
1063 	 */
1064 	if (sc->jme_caps & JME_CAP_FPGA) {
1065 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
1066 
1067 		if (mii->mii_instance != 0) {
1068 			struct mii_softc *miisc;
1069 
1070 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1071 				if (miisc->mii_phy != 0) {
1072 					sc->jme_phyaddr = miisc->mii_phy;
1073 					break;
1074 				}
1075 			}
1076 			if (sc->jme_phyaddr != 0) {
1077 				device_printf(sc->jme_dev,
1078 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
1079 				/* vendor magic. */
1080 				jme_miibus_writereg(dev, sc->jme_phyaddr,
1081 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
1082 
1083 				/* XXX should we clear JME_WA_EXTFIFO */
1084 			}
1085 		}
1086 	}
1087 
1088 	ether_ifattach(ifp, eaddr, NULL);
1089 
1090 	/* Tell the upper layer(s) we support long frames. */
1091 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1092 
1093 	/* Setup the TX ring's CPUID */
1094 	ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid);
1095 	ifq_set_hw_serialize(&ifp->if_snd,
1096 	    &sc->jme_cdata.jme_tx_data.jme_tx_serialize);
1097 
1098 	error = jme_intr_setup(dev);
1099 	if (error) {
1100 		ether_ifdetach(ifp);
1101 		goto fail;
1102 	}
1103 
1104 	return 0;
1105 fail:
1106 	jme_detach(dev);
1107 	return (error);
1108 }
1109 
1110 static int
1111 jme_detach(device_t dev)
1112 {
1113 	struct jme_softc *sc = device_get_softc(dev);
1114 
1115 	if (device_is_attached(dev)) {
1116 		struct ifnet *ifp = &sc->arpcom.ac_if;
1117 
1118 		ifnet_serialize_all(ifp);
1119 		jme_stop(sc);
1120 		jme_intr_teardown(dev);
1121 		ifnet_deserialize_all(ifp);
1122 
1123 		ether_ifdetach(ifp);
1124 	}
1125 
1126 	if (sc->jme_sysctl_tree != NULL)
1127 		sysctl_ctx_free(&sc->jme_sysctl_ctx);
1128 
1129 	if (sc->jme_miibus != NULL)
1130 		device_delete_child(dev, sc->jme_miibus);
1131 	bus_generic_detach(dev);
1132 
1133 	jme_intr_free(dev);
1134 
1135 	if (sc->jme_mem_res != NULL) {
1136 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
1137 				     sc->jme_mem_res);
1138 	}
1139 
1140 	jme_dma_free(sc);
1141 
1142 	return (0);
1143 }
1144 
1145 static void
1146 jme_sysctl_node(struct jme_softc *sc)
1147 {
1148 #ifdef JME_RSS_DEBUG
1149 	int r;
1150 #endif
1151 
1152 	sysctl_ctx_init(&sc->jme_sysctl_ctx);
1153 	sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
1154 				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1155 				device_get_nameunit(sc->jme_dev),
1156 				CTLFLAG_RD, 0, "");
1157 	if (sc->jme_sysctl_tree == NULL) {
1158 		device_printf(sc->jme_dev, "can't add sysctl node\n");
1159 		return;
1160 	}
1161 
1162 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1163 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1164 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1165 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1166 
1167 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1168 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1169 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1170 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1171 
1172 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1173 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1174 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1175 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1176 
1177 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1178 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1179 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1180 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1181 
1182 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1183 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1184 		       "rx_desc_count", CTLFLAG_RD,
1185 		       &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1186 		       0, "RX desc count");
1187 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1188 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1189 		       "tx_desc_count", CTLFLAG_RD,
1190 		       &sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt,
1191 		       0, "TX desc count");
1192 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1193 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1194 		       "rx_ring_count", CTLFLAG_RD,
1195 		       &sc->jme_cdata.jme_rx_ring_cnt,
1196 		       0, "RX ring count");
1197 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1198 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1199 		       "tx_wreg", CTLFLAG_RW,
1200 		       &sc->jme_cdata.jme_tx_data.jme_tx_wreg, 0,
1201 		       "# of segments before writing to hardware register");
1202 
1203 #ifdef JME_RSS_DEBUG
1204 	SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1205 		       SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1206 		       "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1207 		       0, "RSS debug level");
1208 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1209 		char rx_ring_desc[32];
1210 
1211 		ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1212 		    "rx_ring%d_pkt", r);
1213 		SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1214 		    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1215 		    rx_ring_desc, CTLFLAG_RW,
1216 		    &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1217 
1218 		ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1219 		    "rx_ring%d_emp", r);
1220 		SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1221 		    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1222 		    rx_ring_desc, CTLFLAG_RW,
1223 		    &sc->jme_cdata.jme_rx_data[r].jme_rx_emp,
1224 		    "# of time RX ring empty");
1225 	}
1226 #endif
1227 
1228 #ifdef IFPOLL_ENABLE
1229 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1230 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1231 	    "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1232 	    jme_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset");
1233 	SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1234 	    SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1235 	    "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1236 	    jme_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset");
1237 #endif
1238 }
1239 
1240 static int
1241 jme_dma_alloc(struct jme_softc *sc)
1242 {
1243 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1244 	struct jme_txdesc *txd;
1245 	bus_dmamem_t dmem;
1246 	int error, i, asize;
1247 
1248 	asize = __VM_CACHELINE_ALIGN(
1249 	    tdata->jme_tx_desc_cnt * sizeof(struct jme_txdesc));
1250 	tdata->jme_txdesc = kmalloc_cachealign(asize, M_DEVBUF,
1251 	    M_WAITOK | M_ZERO);
1252 
1253 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1254 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1255 
1256 		asize = __VM_CACHELINE_ALIGN(
1257 		    rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc));
1258 		rdata->jme_rxdesc = kmalloc_cachealign(asize, M_DEVBUF,
1259 		    M_WAITOK | M_ZERO);
1260 	}
1261 
1262 	/* Create parent ring tag. */
1263 	error = bus_dma_tag_create(NULL,/* parent */
1264 	    1, JME_RING_BOUNDARY,	/* algnmnt, boundary */
1265 	    sc->jme_lowaddr,		/* lowaddr */
1266 	    BUS_SPACE_MAXADDR,		/* highaddr */
1267 	    NULL, NULL,			/* filter, filterarg */
1268 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1269 	    0,				/* nsegments */
1270 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1271 	    0,				/* flags */
1272 	    &sc->jme_cdata.jme_ring_tag);
1273 	if (error) {
1274 		device_printf(sc->jme_dev,
1275 		    "could not create parent ring DMA tag.\n");
1276 		return error;
1277 	}
1278 
1279 	/*
1280 	 * Create DMA stuffs for TX ring
1281 	 */
1282 	asize = roundup2(JME_TX_RING_SIZE(tdata), JME_TX_RING_ALIGN);
1283 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1284 			JME_TX_RING_ALIGN, 0,
1285 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1286 			asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1287 	if (error) {
1288 		device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1289 		return error;
1290 	}
1291 	tdata->jme_tx_ring_tag = dmem.dmem_tag;
1292 	tdata->jme_tx_ring_map = dmem.dmem_map;
1293 	tdata->jme_tx_ring = dmem.dmem_addr;
1294 	tdata->jme_tx_ring_paddr = dmem.dmem_busaddr;
1295 
1296 	/*
1297 	 * Create DMA stuffs for RX rings
1298 	 */
1299 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1300 		error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1301 		if (error)
1302 			return error;
1303 	}
1304 
1305 	/* Create parent buffer tag. */
1306 	error = bus_dma_tag_create(NULL,/* parent */
1307 	    1, 0,			/* algnmnt, boundary */
1308 	    sc->jme_lowaddr,		/* lowaddr */
1309 	    BUS_SPACE_MAXADDR,		/* highaddr */
1310 	    NULL, NULL,			/* filter, filterarg */
1311 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1312 	    0,				/* nsegments */
1313 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1314 	    0,				/* flags */
1315 	    &sc->jme_cdata.jme_buffer_tag);
1316 	if (error) {
1317 		device_printf(sc->jme_dev,
1318 		    "could not create parent buffer DMA tag.\n");
1319 		return error;
1320 	}
1321 
1322 	/*
1323 	 * Create DMA stuffs for shadow status block
1324 	 */
1325 	asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1326 	error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1327 			JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1328 			asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1329 	if (error) {
1330 		device_printf(sc->jme_dev,
1331 		    "could not create shadow status block.\n");
1332 		return error;
1333 	}
1334 	sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1335 	sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1336 	sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1337 	sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1338 
1339 	/*
1340 	 * Create DMA stuffs for TX buffers
1341 	 */
1342 
1343 	/* Create tag for Tx buffers. */
1344 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1345 	    1, 0,			/* algnmnt, boundary */
1346 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1347 	    BUS_SPACE_MAXADDR,		/* highaddr */
1348 	    NULL, NULL,			/* filter, filterarg */
1349 	    JME_TSO_MAXSIZE,		/* maxsize */
1350 	    JME_MAXTXSEGS,		/* nsegments */
1351 	    JME_MAXSEGSIZE,		/* maxsegsize */
1352 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1353 	    &tdata->jme_tx_tag);
1354 	if (error != 0) {
1355 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1356 		return error;
1357 	}
1358 
1359 	/* Create DMA maps for Tx buffers. */
1360 	for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
1361 		txd = &tdata->jme_txdesc[i];
1362 		error = bus_dmamap_create(tdata->jme_tx_tag,
1363 				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1364 				&txd->tx_dmamap);
1365 		if (error) {
1366 			int j;
1367 
1368 			device_printf(sc->jme_dev,
1369 			    "could not create %dth Tx dmamap.\n", i);
1370 
1371 			for (j = 0; j < i; ++j) {
1372 				txd = &tdata->jme_txdesc[j];
1373 				bus_dmamap_destroy(tdata->jme_tx_tag,
1374 						   txd->tx_dmamap);
1375 			}
1376 			bus_dma_tag_destroy(tdata->jme_tx_tag);
1377 			tdata->jme_tx_tag = NULL;
1378 			return error;
1379 		}
1380 	}
1381 
1382 	/*
1383 	 * Create DMA stuffs for RX buffers
1384 	 */
1385 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1386 		error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1387 		if (error)
1388 			return error;
1389 	}
1390 	return 0;
1391 }
1392 
1393 static void
1394 jme_dma_free(struct jme_softc *sc)
1395 {
1396 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1397 	struct jme_txdesc *txd;
1398 	struct jme_rxdesc *rxd;
1399 	struct jme_rxdata *rdata;
1400 	int i, r;
1401 
1402 	/* Tx ring */
1403 	if (tdata->jme_tx_ring_tag != NULL) {
1404 		bus_dmamap_unload(tdata->jme_tx_ring_tag,
1405 		    tdata->jme_tx_ring_map);
1406 		bus_dmamem_free(tdata->jme_tx_ring_tag,
1407 		    tdata->jme_tx_ring, tdata->jme_tx_ring_map);
1408 		bus_dma_tag_destroy(tdata->jme_tx_ring_tag);
1409 		tdata->jme_tx_ring_tag = NULL;
1410 	}
1411 
1412 	/* Rx ring */
1413 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1414 		rdata = &sc->jme_cdata.jme_rx_data[r];
1415 		if (rdata->jme_rx_ring_tag != NULL) {
1416 			bus_dmamap_unload(rdata->jme_rx_ring_tag,
1417 					  rdata->jme_rx_ring_map);
1418 			bus_dmamem_free(rdata->jme_rx_ring_tag,
1419 					rdata->jme_rx_ring,
1420 					rdata->jme_rx_ring_map);
1421 			bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1422 			rdata->jme_rx_ring_tag = NULL;
1423 		}
1424 	}
1425 
1426 	/* Tx buffers */
1427 	if (tdata->jme_tx_tag != NULL) {
1428 		for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
1429 			txd = &tdata->jme_txdesc[i];
1430 			bus_dmamap_destroy(tdata->jme_tx_tag, txd->tx_dmamap);
1431 		}
1432 		bus_dma_tag_destroy(tdata->jme_tx_tag);
1433 		tdata->jme_tx_tag = NULL;
1434 	}
1435 
1436 	/* Rx buffers */
1437 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1438 		rdata = &sc->jme_cdata.jme_rx_data[r];
1439 		if (rdata->jme_rx_tag != NULL) {
1440 			for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1441 				rxd = &rdata->jme_rxdesc[i];
1442 				bus_dmamap_destroy(rdata->jme_rx_tag,
1443 						   rxd->rx_dmamap);
1444 			}
1445 			bus_dmamap_destroy(rdata->jme_rx_tag,
1446 					   rdata->jme_rx_sparemap);
1447 			bus_dma_tag_destroy(rdata->jme_rx_tag);
1448 			rdata->jme_rx_tag = NULL;
1449 		}
1450 	}
1451 
1452 	/* Shadow status block. */
1453 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1454 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1455 		    sc->jme_cdata.jme_ssb_map);
1456 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1457 		    sc->jme_cdata.jme_ssb_block,
1458 		    sc->jme_cdata.jme_ssb_map);
1459 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1460 		sc->jme_cdata.jme_ssb_tag = NULL;
1461 	}
1462 
1463 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1464 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1465 		sc->jme_cdata.jme_buffer_tag = NULL;
1466 	}
1467 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1468 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1469 		sc->jme_cdata.jme_ring_tag = NULL;
1470 	}
1471 
1472 	if (tdata->jme_txdesc != NULL) {
1473 		kfree(tdata->jme_txdesc, M_DEVBUF);
1474 		tdata->jme_txdesc = NULL;
1475 	}
1476 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1477 		rdata = &sc->jme_cdata.jme_rx_data[r];
1478 		if (rdata->jme_rxdesc != NULL) {
1479 			kfree(rdata->jme_rxdesc, M_DEVBUF);
1480 			rdata->jme_rxdesc = NULL;
1481 		}
1482 	}
1483 }
1484 
1485 /*
1486  *	Make sure the interface is stopped at reboot time.
1487  */
1488 static int
1489 jme_shutdown(device_t dev)
1490 {
1491 	return jme_suspend(dev);
1492 }
1493 
1494 #ifdef notyet
1495 /*
1496  * Unlike other ethernet controllers, JMC250 requires
1497  * explicit resetting link speed to 10/100Mbps as gigabit
1498  * link will cunsume more power than 375mA.
1499  * Note, we reset the link speed to 10/100Mbps with
1500  * auto-negotiation but we don't know whether that operation
1501  * would succeed or not as we have no control after powering
1502  * off. If the renegotiation fail WOL may not work. Running
1503  * at 1Gbps draws more power than 375mA at 3.3V which is
1504  * specified in PCI specification and that would result in
1505  * complete shutdowning power to ethernet controller.
1506  *
1507  * TODO
1508  *  Save current negotiated media speed/duplex/flow-control
1509  *  to softc and restore the same link again after resuming.
1510  *  PHY handling such as power down/resetting to 100Mbps
1511  *  may be better handled in suspend method in phy driver.
1512  */
1513 static void
1514 jme_setlinkspeed(struct jme_softc *sc)
1515 {
1516 	struct mii_data *mii;
1517 	int aneg, i;
1518 
1519 	JME_LOCK_ASSERT(sc);
1520 
1521 	mii = device_get_softc(sc->jme_miibus);
1522 	mii_pollstat(mii);
1523 	aneg = 0;
1524 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1525 		switch IFM_SUBTYPE(mii->mii_media_active) {
1526 		case IFM_10_T:
1527 		case IFM_100_TX:
1528 			return;
1529 		case IFM_1000_T:
1530 			aneg++;
1531 		default:
1532 			break;
1533 		}
1534 	}
1535 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1536 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1537 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1538 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1539 	    BMCR_AUTOEN | BMCR_STARTNEG);
1540 	DELAY(1000);
1541 	if (aneg != 0) {
1542 		/* Poll link state until jme(4) get a 10/100 link. */
1543 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1544 			mii_pollstat(mii);
1545 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1546 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1547 				case IFM_10_T:
1548 				case IFM_100_TX:
1549 					jme_mac_config(sc);
1550 					return;
1551 				default:
1552 					break;
1553 				}
1554 			}
1555 			JME_UNLOCK(sc);
1556 			pause("jmelnk", hz);
1557 			JME_LOCK(sc);
1558 		}
1559 		if (i == MII_ANEGTICKS_GIGE)
1560 			device_printf(sc->jme_dev, "establishing link failed, "
1561 			    "WOL may not work!");
1562 	}
1563 	/*
1564 	 * No link, force MAC to have 100Mbps, full-duplex link.
1565 	 * This is the last resort and may/may not work.
1566 	 */
1567 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1568 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1569 	jme_mac_config(sc);
1570 }
1571 
1572 static void
1573 jme_setwol(struct jme_softc *sc)
1574 {
1575 	struct ifnet *ifp = &sc->arpcom.ac_if;
1576 	uint32_t gpr, pmcs;
1577 	uint16_t pmstat;
1578 	int pmc;
1579 
1580 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1581 		/* No PME capability, PHY power down. */
1582 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1583 		    MII_BMCR, BMCR_PDOWN);
1584 		return;
1585 	}
1586 
1587 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1588 	pmcs = CSR_READ_4(sc, JME_PMCS);
1589 	pmcs &= ~PMCS_WOL_ENB_MASK;
1590 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1591 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1592 		/* Enable PME message. */
1593 		gpr |= GPREG0_PME_ENB;
1594 		/* For gigabit controllers, reset link speed to 10/100. */
1595 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1596 			jme_setlinkspeed(sc);
1597 	}
1598 
1599 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1600 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1601 
1602 	/* Request PME. */
1603 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1604 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1605 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1606 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1607 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1608 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1609 		/* No WOL, PHY power down. */
1610 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1611 		    MII_BMCR, BMCR_PDOWN);
1612 	}
1613 }
1614 #endif
1615 
1616 static int
1617 jme_suspend(device_t dev)
1618 {
1619 	struct jme_softc *sc = device_get_softc(dev);
1620 	struct ifnet *ifp = &sc->arpcom.ac_if;
1621 
1622 	ifnet_serialize_all(ifp);
1623 	jme_stop(sc);
1624 #ifdef notyet
1625 	jme_setwol(sc);
1626 #endif
1627 	ifnet_deserialize_all(ifp);
1628 
1629 	return (0);
1630 }
1631 
1632 static int
1633 jme_resume(device_t dev)
1634 {
1635 	struct jme_softc *sc = device_get_softc(dev);
1636 	struct ifnet *ifp = &sc->arpcom.ac_if;
1637 #ifdef notyet
1638 	int pmc;
1639 #endif
1640 
1641 	ifnet_serialize_all(ifp);
1642 
1643 #ifdef notyet
1644 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1645 		uint16_t pmstat;
1646 
1647 		pmstat = pci_read_config(sc->jme_dev,
1648 		    pmc + PCIR_POWER_STATUS, 2);
1649 		/* Disable PME clear PME status. */
1650 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1651 		pci_write_config(sc->jme_dev,
1652 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1653 	}
1654 #endif
1655 
1656 	if (ifp->if_flags & IFF_UP)
1657 		jme_init(sc);
1658 
1659 	ifnet_deserialize_all(ifp);
1660 
1661 	return (0);
1662 }
1663 
1664 static __inline int
1665 jme_tso_pullup(struct mbuf **mp)
1666 {
1667 	int hoff, iphlen, thoff;
1668 	struct mbuf *m;
1669 
1670 	m = *mp;
1671 	KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
1672 
1673 	iphlen = m->m_pkthdr.csum_iphlen;
1674 	thoff = m->m_pkthdr.csum_thlen;
1675 	hoff = m->m_pkthdr.csum_lhlen;
1676 
1677 	KASSERT(iphlen > 0, ("invalid ip hlen"));
1678 	KASSERT(thoff > 0, ("invalid tcp hlen"));
1679 	KASSERT(hoff > 0, ("invalid ether hlen"));
1680 
1681 	if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
1682 		m = m_pullup(m, hoff + iphlen + thoff);
1683 		if (m == NULL) {
1684 			*mp = NULL;
1685 			return ENOBUFS;
1686 		}
1687 		*mp = m;
1688 	}
1689 	return 0;
1690 }
1691 
1692 static int
1693 jme_encap(struct jme_txdata *tdata, struct mbuf **m_head, int *segs_used)
1694 {
1695 	struct jme_txdesc *txd;
1696 	struct jme_desc *desc;
1697 	struct mbuf *m;
1698 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1699 	int maxsegs, nsegs;
1700 	int error, i, prod, symbol_desc;
1701 	uint32_t cflags, flag64, mss;
1702 
1703 	M_ASSERTPKTHDR((*m_head));
1704 
1705 	if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) {
1706 		/* XXX Is this necessary? */
1707 		error = jme_tso_pullup(m_head);
1708 		if (error)
1709 			return error;
1710 	}
1711 
1712 	prod = tdata->jme_tx_prod;
1713 	txd = &tdata->jme_txdesc[prod];
1714 
1715 	if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1716 		symbol_desc = 1;
1717 	else
1718 		symbol_desc = 0;
1719 
1720 	maxsegs = (tdata->jme_tx_desc_cnt - tdata->jme_tx_cnt) -
1721 		  (JME_TXD_RSVD + symbol_desc);
1722 	if (maxsegs > JME_MAXTXSEGS)
1723 		maxsegs = JME_MAXTXSEGS;
1724 	KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc),
1725 		("not enough segments %d", maxsegs));
1726 
1727 	error = bus_dmamap_load_mbuf_defrag(tdata->jme_tx_tag,
1728 			txd->tx_dmamap, m_head,
1729 			txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1730 	if (error)
1731 		goto fail;
1732 	*segs_used += nsegs;
1733 
1734 	bus_dmamap_sync(tdata->jme_tx_tag, txd->tx_dmamap,
1735 			BUS_DMASYNC_PREWRITE);
1736 
1737 	m = *m_head;
1738 	cflags = 0;
1739 	mss = 0;
1740 
1741 	/* Configure checksum offload. */
1742 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1743 		mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT;
1744 		cflags |= JME_TD_TSO;
1745 	} else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) {
1746 		if (m->m_pkthdr.csum_flags & CSUM_IP)
1747 			cflags |= JME_TD_IPCSUM;
1748 		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1749 			cflags |= JME_TD_TCPCSUM;
1750 		if (m->m_pkthdr.csum_flags & CSUM_UDP)
1751 			cflags |= JME_TD_UDPCSUM;
1752 	}
1753 
1754 	/* Configure VLAN. */
1755 	if (m->m_flags & M_VLANTAG) {
1756 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1757 		cflags |= JME_TD_VLAN_TAG;
1758 	}
1759 
1760 	desc = &tdata->jme_tx_ring[prod];
1761 	desc->flags = htole32(cflags);
1762 	desc->addr_hi = htole32(m->m_pkthdr.len);
1763 	if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1764 		/*
1765 		 * Use 64bits TX desc chain format.
1766 		 *
1767 		 * The first TX desc of the chain, which is setup here,
1768 		 * is just a symbol TX desc carrying no payload.
1769 		 */
1770 		flag64 = JME_TD_64BIT;
1771 		desc->buflen = htole32(mss);
1772 		desc->addr_lo = 0;
1773 
1774 		*segs_used += 1;
1775 
1776 		/* No effective TX desc is consumed */
1777 		i = 0;
1778 	} else {
1779 		/*
1780 		 * Use 32bits TX desc chain format.
1781 		 *
1782 		 * The first TX desc of the chain, which is setup here,
1783 		 * is an effective TX desc carrying the first segment of
1784 		 * the mbuf chain.
1785 		 */
1786 		flag64 = 0;
1787 		desc->buflen = htole32(mss | txsegs[0].ds_len);
1788 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1789 
1790 		/* One effective TX desc is consumed */
1791 		i = 1;
1792 	}
1793 	tdata->jme_tx_cnt++;
1794 	KKASSERT(tdata->jme_tx_cnt - i < tdata->jme_tx_desc_cnt - JME_TXD_RSVD);
1795 	JME_DESC_INC(prod, tdata->jme_tx_desc_cnt);
1796 
1797 	txd->tx_ndesc = 1 - i;
1798 	for (; i < nsegs; i++) {
1799 		desc = &tdata->jme_tx_ring[prod];
1800 		desc->buflen = htole32(txsegs[i].ds_len);
1801 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1802 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1803 		desc->flags = htole32(JME_TD_OWN | flag64);
1804 
1805 		tdata->jme_tx_cnt++;
1806 		KKASSERT(tdata->jme_tx_cnt <=
1807 			 tdata->jme_tx_desc_cnt - JME_TXD_RSVD);
1808 		JME_DESC_INC(prod, tdata->jme_tx_desc_cnt);
1809 	}
1810 
1811 	/* Update producer index. */
1812 	tdata->jme_tx_prod = prod;
1813 	/*
1814 	 * Finally request interrupt and give the first descriptor
1815 	 * owenership to hardware.
1816 	 */
1817 	desc = txd->tx_desc;
1818 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1819 
1820 	txd->tx_m = m;
1821 	txd->tx_ndesc += nsegs;
1822 
1823 	return 0;
1824 fail:
1825 	m_freem(*m_head);
1826 	*m_head = NULL;
1827 	return error;
1828 }
1829 
1830 static void
1831 jme_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1832 {
1833 	struct jme_softc *sc = ifp->if_softc;
1834 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1835 	struct mbuf *m_head;
1836 	int enq = 0;
1837 
1838 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1839 	ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
1840 
1841 	if (!sc->jme_has_link) {
1842 		ifq_purge(&ifp->if_snd);
1843 		return;
1844 	}
1845 
1846 	if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1847 		return;
1848 
1849 	if (tdata->jme_tx_cnt >= JME_TX_DESC_HIWAT(tdata))
1850 		jme_txeof(tdata);
1851 
1852 	while (!ifq_is_empty(&ifp->if_snd)) {
1853 		/*
1854 		 * Check number of available TX descs, always
1855 		 * leave JME_TXD_RSVD free TX descs.
1856 		 */
1857 		if (tdata->jme_tx_cnt + JME_TXD_SPARE >
1858 		    tdata->jme_tx_desc_cnt - JME_TXD_RSVD) {
1859 			ifq_set_oactive(&ifp->if_snd);
1860 			break;
1861 		}
1862 
1863 		m_head = ifq_dequeue(&ifp->if_snd);
1864 		if (m_head == NULL)
1865 			break;
1866 
1867 		/*
1868 		 * Pack the data into the transmit ring. If we
1869 		 * don't have room, set the OACTIVE flag and wait
1870 		 * for the NIC to drain the ring.
1871 		 */
1872 		if (jme_encap(tdata, &m_head, &enq)) {
1873 			KKASSERT(m_head == NULL);
1874 			IFNET_STAT_INC(ifp, oerrors, 1);
1875 			ifq_set_oactive(&ifp->if_snd);
1876 			break;
1877 		}
1878 
1879 		if (enq >= tdata->jme_tx_wreg) {
1880 			CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr |
1881 			    TXCSR_TX_ENB | TXCSR_TXQ_N_START(TXCSR_TXQ0));
1882 			enq = 0;
1883 		}
1884 
1885 		/*
1886 		 * If there's a BPF listener, bounce a copy of this frame
1887 		 * to him.
1888 		 */
1889 		ETHER_BPF_MTAP(ifp, m_head);
1890 
1891 		/* Set a timeout in case the chip goes out to lunch. */
1892 		ifp->if_timer = JME_TX_TIMEOUT;
1893 	}
1894 
1895 	if (enq > 0) {
1896 		/*
1897 		 * Reading TXCSR takes very long time under heavy load
1898 		 * so cache TXCSR value and writes the ORed value with
1899 		 * the kick command to the TXCSR. This saves one register
1900 		 * access cycle.
1901 		 */
1902 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1903 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1904 	}
1905 }
1906 
1907 static void
1908 jme_watchdog(struct ifnet *ifp)
1909 {
1910 	struct jme_softc *sc = ifp->if_softc;
1911 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1912 
1913 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
1914 
1915 	if (!sc->jme_has_link) {
1916 		if_printf(ifp, "watchdog timeout (missed link)\n");
1917 		IFNET_STAT_INC(ifp, oerrors, 1);
1918 		jme_init(sc);
1919 		return;
1920 	}
1921 
1922 	jme_txeof(tdata);
1923 	if (tdata->jme_tx_cnt == 0) {
1924 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1925 			  "-- recovering\n");
1926 		if (!ifq_is_empty(&ifp->if_snd))
1927 			if_devstart(ifp);
1928 		return;
1929 	}
1930 
1931 	if_printf(ifp, "watchdog timeout\n");
1932 	IFNET_STAT_INC(ifp, oerrors, 1);
1933 	jme_init(sc);
1934 	if (!ifq_is_empty(&ifp->if_snd))
1935 		if_devstart(ifp);
1936 }
1937 
1938 static int
1939 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1940 {
1941 	struct jme_softc *sc = ifp->if_softc;
1942 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1943 	struct ifreq *ifr = (struct ifreq *)data;
1944 	int error = 0, mask;
1945 
1946 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
1947 
1948 	switch (cmd) {
1949 	case SIOCSIFMTU:
1950 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1951 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1952 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1953 			error = EINVAL;
1954 			break;
1955 		}
1956 
1957 		if (ifp->if_mtu != ifr->ifr_mtu) {
1958 			/*
1959 			 * No special configuration is required when interface
1960 			 * MTU is changed but availability of Tx checksum
1961 			 * offload should be chcked against new MTU size as
1962 			 * FIFO size is just 2K.
1963 			 */
1964 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1965 				ifp->if_capenable &=
1966 				    ~(IFCAP_TXCSUM | IFCAP_TSO);
1967 				ifp->if_hwassist &=
1968 				    ~(JME_CSUM_FEATURES | CSUM_TSO);
1969 			}
1970 			ifp->if_mtu = ifr->ifr_mtu;
1971 			if (ifp->if_flags & IFF_RUNNING)
1972 				jme_init(sc);
1973 		}
1974 		break;
1975 
1976 	case SIOCSIFFLAGS:
1977 		if (ifp->if_flags & IFF_UP) {
1978 			if (ifp->if_flags & IFF_RUNNING) {
1979 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1980 				    (IFF_PROMISC | IFF_ALLMULTI))
1981 					jme_set_filter(sc);
1982 			} else {
1983 				jme_init(sc);
1984 			}
1985 		} else {
1986 			if (ifp->if_flags & IFF_RUNNING)
1987 				jme_stop(sc);
1988 		}
1989 		sc->jme_if_flags = ifp->if_flags;
1990 		break;
1991 
1992 	case SIOCADDMULTI:
1993 	case SIOCDELMULTI:
1994 		if (ifp->if_flags & IFF_RUNNING)
1995 			jme_set_filter(sc);
1996 		break;
1997 
1998 	case SIOCSIFMEDIA:
1999 	case SIOCGIFMEDIA:
2000 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2001 		break;
2002 
2003 	case SIOCSIFCAP:
2004 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2005 
2006 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
2007 			ifp->if_capenable ^= IFCAP_TXCSUM;
2008 			if (ifp->if_capenable & IFCAP_TXCSUM)
2009 				ifp->if_hwassist |= JME_CSUM_FEATURES;
2010 			else
2011 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
2012 		}
2013 		if (mask & IFCAP_RXCSUM) {
2014 			uint32_t reg;
2015 
2016 			ifp->if_capenable ^= IFCAP_RXCSUM;
2017 			reg = CSR_READ_4(sc, JME_RXMAC);
2018 			reg &= ~RXMAC_CSUM_ENB;
2019 			if (ifp->if_capenable & IFCAP_RXCSUM)
2020 				reg |= RXMAC_CSUM_ENB;
2021 			CSR_WRITE_4(sc, JME_RXMAC, reg);
2022 		}
2023 
2024 		if (mask & IFCAP_VLAN_HWTAGGING) {
2025 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2026 			jme_set_vlan(sc);
2027 		}
2028 
2029 		if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
2030 			ifp->if_capenable ^= IFCAP_TSO;
2031 			if (ifp->if_capenable & IFCAP_TSO)
2032 				ifp->if_hwassist |= CSUM_TSO;
2033 			else
2034 				ifp->if_hwassist &= ~CSUM_TSO;
2035 		}
2036 
2037 		if (mask & IFCAP_RSS)
2038 			ifp->if_capenable ^= IFCAP_RSS;
2039 		break;
2040 
2041 	default:
2042 		error = ether_ioctl(ifp, cmd, data);
2043 		break;
2044 	}
2045 	return (error);
2046 }
2047 
2048 static void
2049 jme_mac_config(struct jme_softc *sc)
2050 {
2051 	struct mii_data *mii;
2052 	uint32_t ghc, rxmac, txmac, txpause, gp1;
2053 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
2054 
2055 	mii = device_get_softc(sc->jme_miibus);
2056 
2057 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2058 	DELAY(10);
2059 	CSR_WRITE_4(sc, JME_GHC, 0);
2060 	ghc = 0;
2061 	rxmac = CSR_READ_4(sc, JME_RXMAC);
2062 	rxmac &= ~RXMAC_FC_ENB;
2063 	txmac = CSR_READ_4(sc, JME_TXMAC);
2064 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2065 	txpause = CSR_READ_4(sc, JME_TXPFC);
2066 	txpause &= ~TXPFC_PAUSE_ENB;
2067 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2068 		ghc |= GHC_FULL_DUPLEX;
2069 		rxmac &= ~RXMAC_COLL_DET_ENB;
2070 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2071 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2072 		    TXMAC_FRAME_BURST);
2073 #ifdef notyet
2074 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2075 			txpause |= TXPFC_PAUSE_ENB;
2076 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2077 			rxmac |= RXMAC_FC_ENB;
2078 #endif
2079 		/* Disable retry transmit timer/retry limit. */
2080 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2081 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2082 	} else {
2083 		rxmac |= RXMAC_COLL_DET_ENB;
2084 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2085 		/* Enable retry transmit timer/retry limit. */
2086 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2087 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2088 	}
2089 
2090 	/*
2091 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
2092 	 */
2093 	gp1 = CSR_READ_4(sc, JME_GPREG1);
2094 	gp1 &= ~GPREG1_WA_HDX;
2095 
2096 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2097 		hdx = 1;
2098 
2099 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
2100 	case IFM_10_T:
2101 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
2102 		if (hdx)
2103 			gp1 |= GPREG1_WA_HDX;
2104 		break;
2105 
2106 	case IFM_100_TX:
2107 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
2108 		if (hdx)
2109 			gp1 |= GPREG1_WA_HDX;
2110 
2111 		/*
2112 		 * Use extended FIFO depth to workaround CRC errors
2113 		 * emitted by chips before JMC250B
2114 		 */
2115 		phyconf = JMPHY_CONF_EXTFIFO;
2116 		break;
2117 
2118 	case IFM_1000_T:
2119 		if (sc->jme_caps & JME_CAP_FASTETH)
2120 			break;
2121 
2122 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
2123 		if (hdx)
2124 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2125 		break;
2126 
2127 	default:
2128 		break;
2129 	}
2130 	CSR_WRITE_4(sc, JME_GHC, ghc);
2131 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2132 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
2133 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
2134 
2135 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
2136 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2137 				    JMPHY_CONF, phyconf);
2138 	}
2139 	if (sc->jme_workaround & JME_WA_HDX)
2140 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
2141 }
2142 
2143 static void
2144 jme_intr(void *xsc)
2145 {
2146 	struct jme_softc *sc = xsc;
2147 	struct ifnet *ifp = &sc->arpcom.ac_if;
2148 	uint32_t status;
2149 	int r;
2150 
2151 	ASSERT_SERIALIZED(&sc->jme_serialize);
2152 
2153 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2154 	if (status == 0 || status == 0xFFFFFFFF)
2155 		return;
2156 
2157 	/* Disable interrupts. */
2158 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2159 
2160 	status = CSR_READ_4(sc, JME_INTR_STATUS);
2161 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2162 		goto back;
2163 
2164 	/* Reset PCC counter/timer and Ack interrupts. */
2165 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2166 
2167 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2168 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2169 
2170 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2171 		if (status & jme_rx_status[r].jme_coal) {
2172 			status |= jme_rx_status[r].jme_coal |
2173 				  jme_rx_status[r].jme_comp;
2174 		}
2175 	}
2176 
2177 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2178 
2179 	if (ifp->if_flags & IFF_RUNNING) {
2180 		struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
2181 
2182 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2183 			jme_rx_intr(sc, status);
2184 
2185 		if (status & INTR_RXQ_DESC_EMPTY) {
2186 			/*
2187 			 * Notify hardware availability of new Rx buffers.
2188 			 * Reading RXCSR takes very long time under heavy
2189 			 * load so cache RXCSR value and writes the ORed
2190 			 * value with the kick command to the RXCSR. This
2191 			 * saves one register access cycle.
2192 			 */
2193 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2194 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2195 		}
2196 
2197 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2198 			lwkt_serialize_enter(&tdata->jme_tx_serialize);
2199 			jme_txeof(tdata);
2200 			if (!ifq_is_empty(&ifp->if_snd))
2201 				if_devstart(ifp);
2202 			lwkt_serialize_exit(&tdata->jme_tx_serialize);
2203 		}
2204 	}
2205 back:
2206 	/* Reenable interrupts. */
2207 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2208 }
2209 
2210 static void
2211 jme_txeof(struct jme_txdata *tdata)
2212 {
2213 	struct ifnet *ifp = &tdata->jme_sc->arpcom.ac_if;
2214 	int cons;
2215 
2216 	cons = tdata->jme_tx_cons;
2217 	if (cons == tdata->jme_tx_prod)
2218 		return;
2219 
2220 	/*
2221 	 * Go through our Tx list and free mbufs for those
2222 	 * frames which have been transmitted.
2223 	 */
2224 	while (cons != tdata->jme_tx_prod) {
2225 		struct jme_txdesc *txd, *next_txd;
2226 		uint32_t status, next_status;
2227 		int next_cons, nsegs;
2228 
2229 		txd = &tdata->jme_txdesc[cons];
2230 		KASSERT(txd->tx_m != NULL,
2231 			("%s: freeing NULL mbuf!", __func__));
2232 
2233 		status = le32toh(txd->tx_desc->flags);
2234 		if ((status & JME_TD_OWN) == JME_TD_OWN)
2235 			break;
2236 
2237 		/*
2238 		 * NOTE:
2239 		 * This chip will always update the TX descriptor's
2240 		 * buflen field and this updating always happens
2241 		 * after clearing the OWN bit, so even if the OWN
2242 		 * bit is cleared by the chip, we still don't sure
2243 		 * about whether the buflen field has been updated
2244 		 * by the chip or not.  To avoid this race, we wait
2245 		 * for the next TX descriptor's OWN bit to be cleared
2246 		 * by the chip before reusing this TX descriptor.
2247 		 */
2248 		next_cons = cons;
2249 		JME_DESC_ADD(next_cons, txd->tx_ndesc, tdata->jme_tx_desc_cnt);
2250 		next_txd = &tdata->jme_txdesc[next_cons];
2251 		if (next_txd->tx_m == NULL)
2252 			break;
2253 		next_status = le32toh(next_txd->tx_desc->flags);
2254 		if ((next_status & JME_TD_OWN) == JME_TD_OWN)
2255 			break;
2256 
2257 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2258 			IFNET_STAT_INC(ifp, oerrors, 1);
2259 		} else {
2260 			IFNET_STAT_INC(ifp, opackets, 1);
2261 			if (status & JME_TD_COLLISION) {
2262 				IFNET_STAT_INC(ifp, collisions,
2263 				    le32toh(txd->tx_desc->buflen) &
2264 				    JME_TD_BUF_LEN_MASK);
2265 			}
2266 		}
2267 
2268 		/*
2269 		 * Only the first descriptor of multi-descriptor
2270 		 * transmission is updated so driver have to skip entire
2271 		 * chained buffers for the transmiited frame. In other
2272 		 * words, JME_TD_OWN bit is valid only at the first
2273 		 * descriptor of a multi-descriptor transmission.
2274 		 */
2275 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2276 			tdata->jme_tx_ring[cons].flags = 0;
2277 			JME_DESC_INC(cons, tdata->jme_tx_desc_cnt);
2278 		}
2279 
2280 		/* Reclaim transferred mbufs. */
2281 		bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap);
2282 		m_freem(txd->tx_m);
2283 		txd->tx_m = NULL;
2284 		tdata->jme_tx_cnt -= txd->tx_ndesc;
2285 		KASSERT(tdata->jme_tx_cnt >= 0,
2286 			("%s: Active Tx desc counter was garbled", __func__));
2287 		txd->tx_ndesc = 0;
2288 	}
2289 	tdata->jme_tx_cons = cons;
2290 
2291 	/* 1 for symbol TX descriptor */
2292 	if (tdata->jme_tx_cnt <= JME_MAXTXSEGS + 1)
2293 		ifp->if_timer = 0;
2294 
2295 	if (tdata->jme_tx_cnt + JME_TXD_SPARE <=
2296 	    tdata->jme_tx_desc_cnt - JME_TXD_RSVD)
2297 		ifq_clr_oactive(&ifp->if_snd);
2298 }
2299 
2300 static __inline void
2301 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2302 {
2303 	int i;
2304 
2305 	for (i = 0; i < count; ++i) {
2306 		jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
2307 		JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2308 	}
2309 }
2310 
2311 static __inline struct pktinfo *
2312 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2313 {
2314 	if (flags & JME_RD_IPV4)
2315 		pi->pi_netisr = NETISR_IP;
2316 	else if (flags & JME_RD_IPV6)
2317 		pi->pi_netisr = NETISR_IPV6;
2318 	else
2319 		return NULL;
2320 
2321 	pi->pi_flags = 0;
2322 	pi->pi_l3proto = IPPROTO_UNKNOWN;
2323 
2324 	if (flags & JME_RD_MORE_FRAG)
2325 		pi->pi_flags |= PKTINFO_FLAG_FRAG;
2326 	else if (flags & JME_RD_TCP)
2327 		pi->pi_l3proto = IPPROTO_TCP;
2328 	else if (flags & JME_RD_UDP)
2329 		pi->pi_l3proto = IPPROTO_UDP;
2330 	else
2331 		pi = NULL;
2332 	return pi;
2333 }
2334 
2335 /* Receive a frame. */
2336 static void
2337 jme_rxpkt(struct jme_rxdata *rdata, int cpuid)
2338 {
2339 	struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2340 	struct jme_desc *desc;
2341 	struct jme_rxdesc *rxd;
2342 	struct mbuf *mp, *m;
2343 	uint32_t flags, status, hash, hashinfo;
2344 	int cons, count, nsegs;
2345 
2346 	cons = rdata->jme_rx_cons;
2347 	desc = &rdata->jme_rx_ring[cons];
2348 
2349 	flags = le32toh(desc->flags);
2350 	status = le32toh(desc->buflen);
2351 	hash = le32toh(desc->addr_hi);
2352 	hashinfo = le32toh(desc->addr_lo);
2353 	nsegs = JME_RX_NSEGS(status);
2354 
2355 	if (nsegs > 1) {
2356 		/* Skip the first descriptor. */
2357 		JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2358 
2359 		/*
2360 		 * Clear the OWN bit of the following RX descriptors;
2361 		 * hardware will not clear the OWN bit except the first
2362 		 * RX descriptor.
2363 		 *
2364 		 * Since the first RX descriptor is setup, i.e. OWN bit
2365 		 * on, before its followins RX descriptors, leaving the
2366 		 * OWN bit on the following RX descriptors will trick
2367 		 * the hardware into thinking that the following RX
2368 		 * descriptors are ready to be used too.
2369 		 */
2370 		for (count = 1; count < nsegs; count++,
2371 		     JME_DESC_INC(cons, rdata->jme_rx_desc_cnt))
2372 			rdata->jme_rx_ring[cons].flags = 0;
2373 
2374 		cons = rdata->jme_rx_cons;
2375 	}
2376 
2377 	JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2378 			"hash 0x%08x, hash info 0x%08x\n",
2379 			rdata->jme_rx_idx, flags, hash, hashinfo);
2380 
2381 	if (status & JME_RX_ERR_STAT) {
2382 		IFNET_STAT_INC(ifp, ierrors, 1);
2383 		jme_discard_rxbufs(rdata, cons, nsegs);
2384 #ifdef JME_SHOW_ERRORS
2385 		if_printf(ifp, "%s : receive error = 0x%b\n",
2386 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2387 #endif
2388 		rdata->jme_rx_cons += nsegs;
2389 		rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2390 		return;
2391 	}
2392 
2393 	rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2394 	for (count = 0; count < nsegs; count++,
2395 	     JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2396 		rxd = &rdata->jme_rxdesc[cons];
2397 		mp = rxd->rx_m;
2398 
2399 		/* Add a new receive buffer to the ring. */
2400 		if (jme_newbuf(rdata, rxd, 0) != 0) {
2401 			IFNET_STAT_INC(ifp, iqdrops, 1);
2402 			/* Reuse buffer. */
2403 			jme_discard_rxbufs(rdata, cons, nsegs - count);
2404 			if (rdata->jme_rxhead != NULL) {
2405 				m_freem(rdata->jme_rxhead);
2406 				JME_RXCHAIN_RESET(rdata);
2407 			}
2408 			break;
2409 		}
2410 
2411 		/*
2412 		 * Assume we've received a full sized frame.
2413 		 * Actual size is fixed when we encounter the end of
2414 		 * multi-segmented frame.
2415 		 */
2416 		mp->m_len = MCLBYTES;
2417 
2418 		/* Chain received mbufs. */
2419 		if (rdata->jme_rxhead == NULL) {
2420 			rdata->jme_rxhead = mp;
2421 			rdata->jme_rxtail = mp;
2422 		} else {
2423 			/*
2424 			 * Receive processor can receive a maximum frame
2425 			 * size of 65535 bytes.
2426 			 */
2427 			rdata->jme_rxtail->m_next = mp;
2428 			rdata->jme_rxtail = mp;
2429 		}
2430 
2431 		if (count == nsegs - 1) {
2432 			struct pktinfo pi0, *pi;
2433 
2434 			/* Last desc. for this frame. */
2435 			m = rdata->jme_rxhead;
2436 			m->m_pkthdr.len = rdata->jme_rxlen;
2437 			if (nsegs > 1) {
2438 				/* Set first mbuf size. */
2439 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2440 				/* Set last mbuf size. */
2441 				mp->m_len = rdata->jme_rxlen -
2442 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2443 				    (MCLBYTES * (nsegs - 2)));
2444 			} else {
2445 				m->m_len = rdata->jme_rxlen;
2446 			}
2447 			m->m_pkthdr.rcvif = ifp;
2448 
2449 			/*
2450 			 * Account for 10bytes auto padding which is used
2451 			 * to align IP header on 32bit boundary. Also note,
2452 			 * CRC bytes is automatically removed by the
2453 			 * hardware.
2454 			 */
2455 			m->m_data += JME_RX_PAD_BYTES;
2456 
2457 			/* Set checksum information. */
2458 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2459 			    (flags & JME_RD_IPV4)) {
2460 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2461 				if (flags & JME_RD_IPCSUM)
2462 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2463 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2464 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2465 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2466 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2467 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2468 					m->m_pkthdr.csum_flags |=
2469 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2470 					m->m_pkthdr.csum_data = 0xffff;
2471 				}
2472 			}
2473 
2474 			/* Check for VLAN tagged packets. */
2475 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2476 			    (flags & JME_RD_VLAN_TAG)) {
2477 				m->m_pkthdr.ether_vlantag =
2478 				    flags & JME_RD_VLAN_MASK;
2479 				m->m_flags |= M_VLANTAG;
2480 			}
2481 
2482 			IFNET_STAT_INC(ifp, ipackets, 1);
2483 
2484 			if (ifp->if_capenable & IFCAP_RSS)
2485 				pi = jme_pktinfo(&pi0, flags);
2486 			else
2487 				pi = NULL;
2488 
2489 			if (pi != NULL &&
2490 			    (hashinfo & JME_RD_HASH_FN_MASK) ==
2491 			    JME_RD_HASH_FN_TOEPLITZ) {
2492 				m->m_flags |= (M_HASH | M_CKHASH);
2493 				m->m_pkthdr.hash = toeplitz_hash(hash);
2494 			}
2495 
2496 #ifdef JME_RSS_DEBUG
2497 			if (pi != NULL) {
2498 				JME_RSS_DPRINTF(rdata->jme_sc, 10,
2499 				    "isr %d flags %08x, l3 %d %s\n",
2500 				    pi->pi_netisr, pi->pi_flags,
2501 				    pi->pi_l3proto,
2502 				    (m->m_flags & M_HASH) ? "hash" : "");
2503 			}
2504 #endif
2505 
2506 			/* Pass it on. */
2507 			ether_input_pkt(ifp, m, pi, cpuid);
2508 
2509 			/* Reset mbuf chains. */
2510 			JME_RXCHAIN_RESET(rdata);
2511 #ifdef JME_RSS_DEBUG
2512 			rdata->jme_rx_pkt++;
2513 #endif
2514 		}
2515 	}
2516 
2517 	rdata->jme_rx_cons += nsegs;
2518 	rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2519 }
2520 
2521 static void
2522 jme_rxeof(struct jme_rxdata *rdata, int count, int cpuid)
2523 {
2524 	struct jme_desc *desc;
2525 	int nsegs, pktlen;
2526 
2527 	for (;;) {
2528 #ifdef IFPOLL_ENABLE
2529 		if (count >= 0 && count-- == 0)
2530 			break;
2531 #endif
2532 		desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2533 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2534 			break;
2535 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2536 			break;
2537 
2538 		/*
2539 		 * Check number of segments against received bytes.
2540 		 * Non-matching value would indicate that hardware
2541 		 * is still trying to update Rx descriptors. I'm not
2542 		 * sure whether this check is needed.
2543 		 */
2544 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2545 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2546 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2547 			if_printf(&rdata->jme_sc->arpcom.ac_if,
2548 			    "RX fragment count(%d) and "
2549 			    "packet size(%d) mismach\n", nsegs, pktlen);
2550 			break;
2551 		}
2552 
2553 		/*
2554 		 * NOTE:
2555 		 * RSS hash and hash information may _not_ be set by the
2556 		 * hardware even if the OWN bit is cleared and VALID bit
2557 		 * is set.
2558 		 *
2559 		 * If the RSS information is not delivered by the hardware
2560 		 * yet, we MUST NOT accept this packet, let alone reusing
2561 		 * its RX descriptor.  If this packet was accepted and its
2562 		 * RX descriptor was reused before hardware delivering the
2563 		 * RSS information, the RX buffer's address would be trashed
2564 		 * by the RSS information delivered by the hardware.
2565 		 */
2566 		if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
2567 			struct jme_rxdesc *rxd;
2568 			uint32_t hashinfo;
2569 
2570 			hashinfo = le32toh(desc->addr_lo);
2571 			rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons];
2572 
2573 			/*
2574 			 * This test should be enough to detect the pending
2575 			 * RSS information delivery, given:
2576 			 * - If RSS hash is not calculated, the hashinfo
2577 			 *   will be 0.  Howvever, the lower 32bits of RX
2578 			 *   buffers' physical address will never be 0.
2579 			 *   (see jme_rxbuf_dma_filter)
2580 			 * - If RSS hash is calculated, the lowest 4 bits
2581 			 *   of hashinfo will be set, while the RX buffers
2582 			 *   are at least 2K aligned.
2583 			 */
2584 			if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) {
2585 #ifdef JME_SHOW_RSSWB
2586 				if_printf(&rdata->jme_sc->arpcom.ac_if,
2587 				    "RSS is not written back yet\n");
2588 #endif
2589 				break;
2590 			}
2591 		}
2592 
2593 		/* Received a frame. */
2594 		jme_rxpkt(rdata, cpuid);
2595 	}
2596 }
2597 
2598 static void
2599 jme_tick(void *xsc)
2600 {
2601 	struct jme_softc *sc = xsc;
2602 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2603 
2604 	lwkt_serialize_enter(&sc->jme_serialize);
2605 
2606 	KKASSERT(mycpuid == JME_TICK_CPUID);
2607 
2608 	sc->jme_in_tick = TRUE;
2609 	mii_tick(mii);
2610 	sc->jme_in_tick = FALSE;
2611 
2612 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2613 
2614 	lwkt_serialize_exit(&sc->jme_serialize);
2615 }
2616 
2617 static void
2618 jme_reset(struct jme_softc *sc)
2619 {
2620 	uint32_t val;
2621 
2622 	/* Make sure that TX and RX are stopped */
2623 	jme_stop_tx(sc);
2624 	jme_stop_rx(sc);
2625 
2626 	/* Start reset */
2627 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2628 	DELAY(20);
2629 
2630 	/*
2631 	 * Hold reset bit before stop reset
2632 	 */
2633 
2634 	/* Disable TXMAC and TXOFL clock sources */
2635 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2636 	/* Disable RXMAC clock source */
2637 	val = CSR_READ_4(sc, JME_GPREG1);
2638 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2639 	/* Flush */
2640 	CSR_READ_4(sc, JME_GHC);
2641 
2642 	/* Stop reset */
2643 	CSR_WRITE_4(sc, JME_GHC, 0);
2644 	/* Flush */
2645 	CSR_READ_4(sc, JME_GHC);
2646 
2647 	/*
2648 	 * Clear reset bit after stop reset
2649 	 */
2650 
2651 	/* Enable TXMAC and TXOFL clock sources */
2652 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2653 	/* Enable RXMAC clock source */
2654 	val = CSR_READ_4(sc, JME_GPREG1);
2655 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2656 	/* Flush */
2657 	CSR_READ_4(sc, JME_GHC);
2658 
2659 	/* Disable TXMAC and TXOFL clock sources */
2660 	CSR_WRITE_4(sc, JME_GHC, 0);
2661 	/* Disable RXMAC clock source */
2662 	val = CSR_READ_4(sc, JME_GPREG1);
2663 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2664 	/* Flush */
2665 	CSR_READ_4(sc, JME_GHC);
2666 
2667 	/* Enable TX and RX */
2668 	val = CSR_READ_4(sc, JME_TXCSR);
2669 	CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2670 	val = CSR_READ_4(sc, JME_RXCSR);
2671 	CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2672 	/* Flush */
2673 	CSR_READ_4(sc, JME_TXCSR);
2674 	CSR_READ_4(sc, JME_RXCSR);
2675 
2676 	/* Enable TXMAC and TXOFL clock sources */
2677 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2678 	/* Disable RXMAC clock source */
2679 	val = CSR_READ_4(sc, JME_GPREG1);
2680 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2681 	/* Flush */
2682 	CSR_READ_4(sc, JME_GHC);
2683 
2684 	/* Stop TX and RX */
2685 	jme_stop_tx(sc);
2686 	jme_stop_rx(sc);
2687 }
2688 
2689 static void
2690 jme_init(void *xsc)
2691 {
2692 	struct jme_softc *sc = xsc;
2693 	struct ifnet *ifp = &sc->arpcom.ac_if;
2694 	struct mii_data *mii;
2695 	uint8_t eaddr[ETHER_ADDR_LEN];
2696 	bus_addr_t paddr;
2697 	uint32_t reg;
2698 	int error, r;
2699 
2700 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
2701 
2702 	/*
2703 	 * Cancel any pending I/O.
2704 	 */
2705 	jme_stop(sc);
2706 
2707 	/*
2708 	 * Reset the chip to a known state.
2709 	 */
2710 	jme_reset(sc);
2711 
2712 	/*
2713 	 * Setup MSI/MSI-X vectors to interrupts mapping
2714 	 */
2715 	jme_set_msinum(sc);
2716 
2717 	if (JME_ENABLE_HWRSS(sc))
2718 		jme_enable_rss(sc);
2719 	else
2720 		jme_disable_rss(sc);
2721 
2722 	/* Init RX descriptors */
2723 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2724 		error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2725 		if (error) {
2726 			if_printf(ifp, "initialization failed: "
2727 				  "no memory for %dth RX ring.\n", r);
2728 			jme_stop(sc);
2729 			return;
2730 		}
2731 	}
2732 
2733 	/* Init TX descriptors */
2734 	jme_init_tx_ring(&sc->jme_cdata.jme_tx_data);
2735 
2736 	/* Initialize shadow status block. */
2737 	jme_init_ssb(sc);
2738 
2739 	/* Reprogram the station address. */
2740 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2741 	CSR_WRITE_4(sc, JME_PAR0,
2742 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2743 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2744 
2745 	/*
2746 	 * Configure Tx queue.
2747 	 *  Tx priority queue weight value : 0
2748 	 *  Tx FIFO threshold for processing next packet : 16QW
2749 	 *  Maximum Tx DMA length : 512
2750 	 *  Allow Tx DMA burst.
2751 	 */
2752 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2753 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2754 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2755 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2756 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2757 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2758 
2759 	/* Set Tx descriptor counter. */
2760 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt);
2761 
2762 	/* Set Tx ring address to the hardware. */
2763 	paddr = sc->jme_cdata.jme_tx_data.jme_tx_ring_paddr;
2764 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2765 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2766 
2767 	/* Configure TxMAC parameters. */
2768 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2769 	reg |= TXMAC_THRESH_1_PKT;
2770 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2771 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2772 
2773 	/*
2774 	 * Configure Rx queue.
2775 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2776 	 *  FIFO threshold for processing next packet : 128QW
2777 	 *  Rx queue 0 select
2778 	 *  Max Rx DMA length : 128
2779 	 *  Rx descriptor retry : 32
2780 	 *  Rx descriptor retry time gap : 256ns
2781 	 *  Don't receive runt/bad frame.
2782 	 */
2783 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2784 #if 0
2785 	/*
2786 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2787 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2788 	 * decrease FIFO threshold to reduce the FIFO overruns for
2789 	 * frames larger than 4000 bytes.
2790 	 * For best performance of standard MTU sized frames use
2791 	 * maximum allowable FIFO threshold, 128QW.
2792 	 */
2793 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2794 	    JME_RX_FIFO_SIZE)
2795 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2796 	else
2797 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2798 #else
2799 	/* Improve PCI Express compatibility */
2800 	sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2801 #endif
2802 	sc->jme_rxcsr |= sc->jme_rx_dma_size;
2803 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2804 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2805 	/* XXX TODO DROP_BAD */
2806 
2807 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2808 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2809 
2810 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2811 
2812 		/* Set Rx descriptor counter. */
2813 		CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2814 
2815 		/* Set Rx ring address to the hardware. */
2816 		paddr = rdata->jme_rx_ring_paddr;
2817 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2818 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2819 	}
2820 
2821 	/* Clear receive filter. */
2822 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2823 
2824 	/* Set up the receive filter. */
2825 	jme_set_filter(sc);
2826 	jme_set_vlan(sc);
2827 
2828 	/*
2829 	 * Disable all WOL bits as WOL can interfere normal Rx
2830 	 * operation. Also clear WOL detection status bits.
2831 	 */
2832 	reg = CSR_READ_4(sc, JME_PMCS);
2833 	reg &= ~PMCS_WOL_ENB_MASK;
2834 	CSR_WRITE_4(sc, JME_PMCS, reg);
2835 
2836 	/*
2837 	 * Pad 10bytes right before received frame. This will greatly
2838 	 * help Rx performance on strict-alignment architectures as
2839 	 * it does not need to copy the frame to align the payload.
2840 	 */
2841 	reg = CSR_READ_4(sc, JME_RXMAC);
2842 	reg |= RXMAC_PAD_10BYTES;
2843 
2844 	if (ifp->if_capenable & IFCAP_RXCSUM)
2845 		reg |= RXMAC_CSUM_ENB;
2846 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2847 
2848 	/* Configure general purpose reg0 */
2849 	reg = CSR_READ_4(sc, JME_GPREG0);
2850 	reg &= ~GPREG0_PCC_UNIT_MASK;
2851 	/* Set PCC timer resolution to micro-seconds unit. */
2852 	reg |= GPREG0_PCC_UNIT_US;
2853 	/*
2854 	 * Disable all shadow register posting as we have to read
2855 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2856 	 * that it's hard to synchronize interrupt status between
2857 	 * hardware and software with shadow posting due to
2858 	 * requirements of bus_dmamap_sync(9).
2859 	 */
2860 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2861 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2862 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2863 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2864 	/* Disable posting of DW0. */
2865 	reg &= ~GPREG0_POST_DW0_ENB;
2866 	/* Clear PME message. */
2867 	reg &= ~GPREG0_PME_ENB;
2868 	/* Set PHY address. */
2869 	reg &= ~GPREG0_PHY_ADDR_MASK;
2870 	reg |= sc->jme_phyaddr;
2871 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2872 
2873 	/* Configure Tx queue 0 packet completion coalescing. */
2874 	jme_set_tx_coal(sc);
2875 
2876 	/* Configure Rx queues packet completion coalescing. */
2877 	jme_set_rx_coal(sc);
2878 
2879 	/* Configure shadow status block but don't enable posting. */
2880 	paddr = sc->jme_cdata.jme_ssb_block_paddr;
2881 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2882 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2883 
2884 	/* Disable Timer 1 and Timer 2. */
2885 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2886 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2887 
2888 	/* Configure retry transmit period, retry limit value. */
2889 	CSR_WRITE_4(sc, JME_TXTRHD,
2890 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2891 	    TXTRHD_RT_PERIOD_MASK) |
2892 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2893 	    TXTRHD_RT_LIMIT_SHIFT));
2894 
2895 #ifdef IFPOLL_ENABLE
2896 	if (!(ifp->if_flags & IFF_NPOLLING))
2897 #endif
2898 	/* Initialize the interrupt mask. */
2899 	jme_enable_intr(sc);
2900 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2901 
2902 	/*
2903 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2904 	 * done after detection of valid link in jme_miibus_statchg.
2905 	 */
2906 	sc->jme_has_link = FALSE;
2907 
2908 	jme_phy_init(sc);
2909 
2910 	/* Set the current media. */
2911 	mii = device_get_softc(sc->jme_miibus);
2912 	mii_mediachg(mii);
2913 
2914 	callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc,
2915 	    JME_TICK_CPUID);
2916 
2917 	ifp->if_flags |= IFF_RUNNING;
2918 	ifq_clr_oactive(&ifp->if_snd);
2919 }
2920 
2921 static void
2922 jme_stop(struct jme_softc *sc)
2923 {
2924 	struct ifnet *ifp = &sc->arpcom.ac_if;
2925 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
2926 	struct jme_txdesc *txd;
2927 	struct jme_rxdesc *rxd;
2928 	struct jme_rxdata *rdata;
2929 	int i, r;
2930 
2931 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
2932 
2933 	/*
2934 	 * Mark the interface down and cancel the watchdog timer.
2935 	 */
2936 	ifp->if_flags &= ~IFF_RUNNING;
2937 	ifq_clr_oactive(&ifp->if_snd);
2938 	ifp->if_timer = 0;
2939 
2940 	callout_stop(&sc->jme_tick_ch);
2941 	sc->jme_has_link = FALSE;
2942 
2943 	/*
2944 	 * Disable interrupts.
2945 	 */
2946 	jme_disable_intr(sc);
2947 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2948 
2949 	/* Disable updating shadow status block. */
2950 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2951 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2952 
2953 	/* Stop receiver, transmitter. */
2954 	jme_stop_rx(sc);
2955 	jme_stop_tx(sc);
2956 
2957 	/*
2958 	 * Free partial finished RX segments
2959 	 */
2960 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2961 		rdata = &sc->jme_cdata.jme_rx_data[r];
2962 		if (rdata->jme_rxhead != NULL)
2963 			m_freem(rdata->jme_rxhead);
2964 		JME_RXCHAIN_RESET(rdata);
2965 	}
2966 
2967 	/*
2968 	 * Free RX and TX mbufs still in the queues.
2969 	 */
2970 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2971 		rdata = &sc->jme_cdata.jme_rx_data[r];
2972 		for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2973 			rxd = &rdata->jme_rxdesc[i];
2974 			if (rxd->rx_m != NULL) {
2975 				bus_dmamap_unload(rdata->jme_rx_tag,
2976 						  rxd->rx_dmamap);
2977 				m_freem(rxd->rx_m);
2978 				rxd->rx_m = NULL;
2979 			}
2980 		}
2981 	}
2982 	for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
2983 		txd = &tdata->jme_txdesc[i];
2984 		if (txd->tx_m != NULL) {
2985 			bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap);
2986 			m_freem(txd->tx_m);
2987 			txd->tx_m = NULL;
2988 			txd->tx_ndesc = 0;
2989 		}
2990         }
2991 }
2992 
2993 static void
2994 jme_stop_tx(struct jme_softc *sc)
2995 {
2996 	uint32_t reg;
2997 	int i;
2998 
2999 	reg = CSR_READ_4(sc, JME_TXCSR);
3000 	if ((reg & TXCSR_TX_ENB) == 0)
3001 		return;
3002 	reg &= ~TXCSR_TX_ENB;
3003 	CSR_WRITE_4(sc, JME_TXCSR, reg);
3004 	for (i = JME_TIMEOUT; i > 0; i--) {
3005 		DELAY(1);
3006 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
3007 			break;
3008 	}
3009 	if (i == 0)
3010 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
3011 }
3012 
3013 static void
3014 jme_stop_rx(struct jme_softc *sc)
3015 {
3016 	uint32_t reg;
3017 	int i;
3018 
3019 	reg = CSR_READ_4(sc, JME_RXCSR);
3020 	if ((reg & RXCSR_RX_ENB) == 0)
3021 		return;
3022 	reg &= ~RXCSR_RX_ENB;
3023 	CSR_WRITE_4(sc, JME_RXCSR, reg);
3024 	for (i = JME_TIMEOUT; i > 0; i--) {
3025 		DELAY(1);
3026 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
3027 			break;
3028 	}
3029 	if (i == 0)
3030 		device_printf(sc->jme_dev, "stopping receiver timeout!\n");
3031 }
3032 
3033 static void
3034 jme_init_tx_ring(struct jme_txdata *tdata)
3035 {
3036 	struct jme_txdesc *txd;
3037 	int i;
3038 
3039 	tdata->jme_tx_prod = 0;
3040 	tdata->jme_tx_cons = 0;
3041 	tdata->jme_tx_cnt = 0;
3042 
3043 	bzero(tdata->jme_tx_ring, JME_TX_RING_SIZE(tdata));
3044 	for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
3045 		txd = &tdata->jme_txdesc[i];
3046 		txd->tx_m = NULL;
3047 		txd->tx_desc = &tdata->jme_tx_ring[i];
3048 		txd->tx_ndesc = 0;
3049 	}
3050 }
3051 
3052 static void
3053 jme_init_ssb(struct jme_softc *sc)
3054 {
3055 	struct jme_chain_data *cd;
3056 
3057 	cd = &sc->jme_cdata;
3058 	bzero(cd->jme_ssb_block, JME_SSB_SIZE);
3059 }
3060 
3061 static int
3062 jme_init_rx_ring(struct jme_rxdata *rdata)
3063 {
3064 	struct jme_rxdesc *rxd;
3065 	int i;
3066 
3067 	KKASSERT(rdata->jme_rxhead == NULL &&
3068 		 rdata->jme_rxtail == NULL &&
3069 		 rdata->jme_rxlen == 0);
3070 	rdata->jme_rx_cons = 0;
3071 
3072 	bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
3073 	for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3074 		int error;
3075 
3076 		rxd = &rdata->jme_rxdesc[i];
3077 		rxd->rx_m = NULL;
3078 		rxd->rx_desc = &rdata->jme_rx_ring[i];
3079 		error = jme_newbuf(rdata, rxd, 1);
3080 		if (error)
3081 			return error;
3082 	}
3083 	return 0;
3084 }
3085 
3086 static int
3087 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
3088 {
3089 	struct mbuf *m;
3090 	bus_dma_segment_t segs;
3091 	bus_dmamap_t map;
3092 	int error, nsegs;
3093 
3094 	m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3095 	if (m == NULL)
3096 		return ENOBUFS;
3097 	/*
3098 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
3099 	 * takes advantage of 10 bytes padding feature of hardware
3100 	 * in order not to copy entire frame to align IP header on
3101 	 * 32bit boundary.
3102 	 */
3103 	m->m_len = m->m_pkthdr.len = MCLBYTES;
3104 
3105 	error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
3106 			rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
3107 			BUS_DMA_NOWAIT);
3108 	if (error) {
3109 		m_freem(m);
3110 		if (init) {
3111 			if_printf(&rdata->jme_sc->arpcom.ac_if,
3112 			    "can't load RX mbuf\n");
3113 		}
3114 		return error;
3115 	}
3116 
3117 	if (rxd->rx_m != NULL) {
3118 		bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
3119 				BUS_DMASYNC_POSTREAD);
3120 		bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
3121 	}
3122 	map = rxd->rx_dmamap;
3123 	rxd->rx_dmamap = rdata->jme_rx_sparemap;
3124 	rdata->jme_rx_sparemap = map;
3125 	rxd->rx_m = m;
3126 	rxd->rx_paddr = segs.ds_addr;
3127 
3128 	jme_setup_rxdesc(rxd);
3129 	return 0;
3130 }
3131 
3132 static void
3133 jme_set_vlan(struct jme_softc *sc)
3134 {
3135 	struct ifnet *ifp = &sc->arpcom.ac_if;
3136 	uint32_t reg;
3137 
3138 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
3139 
3140 	reg = CSR_READ_4(sc, JME_RXMAC);
3141 	reg &= ~RXMAC_VLAN_ENB;
3142 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
3143 		reg |= RXMAC_VLAN_ENB;
3144 	CSR_WRITE_4(sc, JME_RXMAC, reg);
3145 }
3146 
3147 static void
3148 jme_set_filter(struct jme_softc *sc)
3149 {
3150 	struct ifnet *ifp = &sc->arpcom.ac_if;
3151 	struct ifmultiaddr *ifma;
3152 	uint32_t crc;
3153 	uint32_t mchash[2];
3154 	uint32_t rxcfg;
3155 
3156 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
3157 
3158 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
3159 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3160 	    RXMAC_ALLMULTI);
3161 
3162 	/*
3163 	 * Always accept frames destined to our station address.
3164 	 * Always accept broadcast frames.
3165 	 */
3166 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
3167 
3168 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
3169 		if (ifp->if_flags & IFF_PROMISC)
3170 			rxcfg |= RXMAC_PROMISC;
3171 		if (ifp->if_flags & IFF_ALLMULTI)
3172 			rxcfg |= RXMAC_ALLMULTI;
3173 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3174 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3175 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3176 		return;
3177 	}
3178 
3179 	/*
3180 	 * Set up the multicast address filter by passing all multicast
3181 	 * addresses through a CRC generator, and then using the low-order
3182 	 * 6 bits as an index into the 64 bit multicast hash table.  The
3183 	 * high order bits select the register, while the rest of the bits
3184 	 * select the bit within the register.
3185 	 */
3186 	rxcfg |= RXMAC_MULTICAST;
3187 	bzero(mchash, sizeof(mchash));
3188 
3189 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3190 		if (ifma->ifma_addr->sa_family != AF_LINK)
3191 			continue;
3192 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3193 		    ifma->ifma_addr), ETHER_ADDR_LEN);
3194 
3195 		/* Just want the 6 least significant bits. */
3196 		crc &= 0x3f;
3197 
3198 		/* Set the corresponding bit in the hash table. */
3199 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
3200 	}
3201 
3202 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3203 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3204 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3205 }
3206 
3207 static int
3208 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
3209 {
3210 	struct jme_softc *sc = arg1;
3211 	struct ifnet *ifp = &sc->arpcom.ac_if;
3212 	int error, v;
3213 
3214 	ifnet_serialize_all(ifp);
3215 
3216 	v = sc->jme_tx_coal_to;
3217 	error = sysctl_handle_int(oidp, &v, 0, req);
3218 	if (error || req->newptr == NULL)
3219 		goto back;
3220 
3221 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
3222 		error = EINVAL;
3223 		goto back;
3224 	}
3225 
3226 	if (v != sc->jme_tx_coal_to) {
3227 		sc->jme_tx_coal_to = v;
3228 		if (ifp->if_flags & IFF_RUNNING)
3229 			jme_set_tx_coal(sc);
3230 	}
3231 back:
3232 	ifnet_deserialize_all(ifp);
3233 	return error;
3234 }
3235 
3236 static int
3237 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3238 {
3239 	struct jme_softc *sc = arg1;
3240 	struct ifnet *ifp = &sc->arpcom.ac_if;
3241 	int error, v;
3242 
3243 	ifnet_serialize_all(ifp);
3244 
3245 	v = sc->jme_tx_coal_pkt;
3246 	error = sysctl_handle_int(oidp, &v, 0, req);
3247 	if (error || req->newptr == NULL)
3248 		goto back;
3249 
3250 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
3251 		error = EINVAL;
3252 		goto back;
3253 	}
3254 
3255 	if (v != sc->jme_tx_coal_pkt) {
3256 		sc->jme_tx_coal_pkt = v;
3257 		if (ifp->if_flags & IFF_RUNNING)
3258 			jme_set_tx_coal(sc);
3259 	}
3260 back:
3261 	ifnet_deserialize_all(ifp);
3262 	return error;
3263 }
3264 
3265 static int
3266 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
3267 {
3268 	struct jme_softc *sc = arg1;
3269 	struct ifnet *ifp = &sc->arpcom.ac_if;
3270 	int error, v;
3271 
3272 	ifnet_serialize_all(ifp);
3273 
3274 	v = sc->jme_rx_coal_to;
3275 	error = sysctl_handle_int(oidp, &v, 0, req);
3276 	if (error || req->newptr == NULL)
3277 		goto back;
3278 
3279 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3280 		error = EINVAL;
3281 		goto back;
3282 	}
3283 
3284 	if (v != sc->jme_rx_coal_to) {
3285 		sc->jme_rx_coal_to = v;
3286 		if (ifp->if_flags & IFF_RUNNING)
3287 			jme_set_rx_coal(sc);
3288 	}
3289 back:
3290 	ifnet_deserialize_all(ifp);
3291 	return error;
3292 }
3293 
3294 static int
3295 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3296 {
3297 	struct jme_softc *sc = arg1;
3298 	struct ifnet *ifp = &sc->arpcom.ac_if;
3299 	int error, v;
3300 
3301 	ifnet_serialize_all(ifp);
3302 
3303 	v = sc->jme_rx_coal_pkt;
3304 	error = sysctl_handle_int(oidp, &v, 0, req);
3305 	if (error || req->newptr == NULL)
3306 		goto back;
3307 
3308 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3309 		error = EINVAL;
3310 		goto back;
3311 	}
3312 
3313 	if (v != sc->jme_rx_coal_pkt) {
3314 		sc->jme_rx_coal_pkt = v;
3315 		if (ifp->if_flags & IFF_RUNNING)
3316 			jme_set_rx_coal(sc);
3317 	}
3318 back:
3319 	ifnet_deserialize_all(ifp);
3320 	return error;
3321 }
3322 
3323 static void
3324 jme_set_tx_coal(struct jme_softc *sc)
3325 {
3326 	uint32_t reg;
3327 
3328 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3329 	    PCCTX_COAL_TO_MASK;
3330 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3331 	    PCCTX_COAL_PKT_MASK;
3332 	reg |= PCCTX_COAL_TXQ0;
3333 	CSR_WRITE_4(sc, JME_PCCTX, reg);
3334 }
3335 
3336 static void
3337 jme_set_rx_coal(struct jme_softc *sc)
3338 {
3339 	uint32_t reg;
3340 	int r;
3341 
3342 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3343 	    PCCRX_COAL_TO_MASK;
3344 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3345 	    PCCRX_COAL_PKT_MASK;
3346 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3347 		CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3348 }
3349 
3350 #ifdef IFPOLL_ENABLE
3351 
3352 static void
3353 jme_npoll_status(struct ifnet *ifp)
3354 {
3355 	struct jme_softc *sc = ifp->if_softc;
3356 	uint32_t status;
3357 
3358 	ASSERT_SERIALIZED(&sc->jme_serialize);
3359 
3360 	status = CSR_READ_4(sc, JME_INTR_STATUS);
3361 	if (status & INTR_RXQ_DESC_EMPTY) {
3362 		CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY);
3363 		jme_rx_restart(sc, status);
3364 	}
3365 }
3366 
3367 static void
3368 jme_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
3369 {
3370 	struct jme_rxdata *rdata = arg;
3371 
3372 	ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3373 
3374 	jme_rxeof(rdata, cycle, mycpuid);
3375 }
3376 
3377 static void
3378 jme_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused)
3379 {
3380 	struct jme_txdata *tdata = arg;
3381 
3382 	ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
3383 
3384 	jme_txeof(tdata);
3385 	if (!ifq_is_empty(&ifp->if_snd))
3386 		if_devstart(ifp);
3387 }
3388 
3389 static void
3390 jme_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3391 {
3392 	struct jme_softc *sc = ifp->if_softc;
3393 
3394 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
3395 
3396 	if (info) {
3397 		int i, off;
3398 
3399 		info->ifpi_status.status_func = jme_npoll_status;
3400 		info->ifpi_status.serializer = &sc->jme_serialize;
3401 
3402 		off = sc->jme_npoll_txoff;
3403 		KKASSERT(off <= ncpus2);
3404 		info->ifpi_tx[off].poll_func = jme_npoll_tx;
3405 		info->ifpi_tx[off].arg = &sc->jme_cdata.jme_tx_data;
3406 		info->ifpi_tx[off].serializer =
3407 		    &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
3408 		ifq_set_cpuid(&ifp->if_snd, sc->jme_npoll_txoff);
3409 
3410 		off = sc->jme_npoll_rxoff;
3411 		for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3412 			struct jme_rxdata *rdata =
3413 			    &sc->jme_cdata.jme_rx_data[i];
3414 			int idx = i + off;
3415 
3416 			info->ifpi_rx[idx].poll_func = jme_npoll_rx;
3417 			info->ifpi_rx[idx].arg = rdata;
3418 			info->ifpi_rx[idx].serializer =
3419 			    &rdata->jme_rx_serialize;
3420 		}
3421 
3422 		if (ifp->if_flags & IFF_RUNNING)
3423 			jme_disable_intr(sc);
3424 	} else {
3425 		ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid);
3426 		if (ifp->if_flags & IFF_RUNNING)
3427 			jme_enable_intr(sc);
3428 	}
3429 }
3430 
3431 static int
3432 jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
3433 {
3434 	struct jme_softc *sc = (void *)arg1;
3435 	struct ifnet *ifp = &sc->arpcom.ac_if;
3436 	int error, off;
3437 
3438 	off = sc->jme_npoll_rxoff;
3439 	error = sysctl_handle_int(oidp, &off, 0, req);
3440 	if (error || req->newptr == NULL)
3441 		return error;
3442 	if (off < 0)
3443 		return EINVAL;
3444 
3445 	ifnet_serialize_all(ifp);
3446 	if (off >= ncpus2 || off % sc->jme_cdata.jme_rx_ring_cnt != 0) {
3447 		error = EINVAL;
3448 	} else {
3449 		error = 0;
3450 		sc->jme_npoll_rxoff = off;
3451 	}
3452 	ifnet_deserialize_all(ifp);
3453 
3454 	return error;
3455 }
3456 
3457 static int
3458 jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
3459 {
3460 	struct jme_softc *sc = (void *)arg1;
3461 	struct ifnet *ifp = &sc->arpcom.ac_if;
3462 	int error, off;
3463 
3464 	off = sc->jme_npoll_txoff;
3465 	error = sysctl_handle_int(oidp, &off, 0, req);
3466 	if (error || req->newptr == NULL)
3467 		return error;
3468 	if (off < 0)
3469 		return EINVAL;
3470 
3471 	ifnet_serialize_all(ifp);
3472 	if (off >= ncpus2) {
3473 		error = EINVAL;
3474 	} else {
3475 		error = 0;
3476 		sc->jme_npoll_txoff = off;
3477 	}
3478 	ifnet_deserialize_all(ifp);
3479 
3480 	return error;
3481 }
3482 
3483 #endif	/* IFPOLL_ENABLE */
3484 
3485 static int
3486 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3487 {
3488 	bus_dmamem_t dmem;
3489 	int error, asize;
3490 
3491 	asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
3492 	error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3493 			JME_RX_RING_ALIGN, 0,
3494 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3495 			asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3496 	if (error) {
3497 		device_printf(rdata->jme_sc->jme_dev,
3498 		    "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3499 		return error;
3500 	}
3501 	rdata->jme_rx_ring_tag = dmem.dmem_tag;
3502 	rdata->jme_rx_ring_map = dmem.dmem_map;
3503 	rdata->jme_rx_ring = dmem.dmem_addr;
3504 	rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3505 
3506 	return 0;
3507 }
3508 
3509 static int
3510 jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr)
3511 {
3512 	if ((paddr & 0xffffffff) == 0) {
3513 		/*
3514 		 * Don't allow lower 32bits of the RX buffer's
3515 		 * physical address to be 0, else it will break
3516 		 * hardware pending RSS information delivery
3517 		 * detection on RX path.
3518 		 */
3519 		return 1;
3520 	}
3521 	return 0;
3522 }
3523 
3524 static int
3525 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3526 {
3527 	bus_addr_t lowaddr;
3528 	int i, error;
3529 
3530 	lowaddr = BUS_SPACE_MAXADDR;
3531 	if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
3532 		/* jme_rxbuf_dma_filter will be called */
3533 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
3534 	}
3535 
3536 	/* Create tag for Rx buffers. */
3537 	error = bus_dma_tag_create(
3538 	    rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3539 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
3540 	    lowaddr,			/* lowaddr */
3541 	    BUS_SPACE_MAXADDR,		/* highaddr */
3542 	    jme_rxbuf_dma_filter, NULL,	/* filter, filterarg */
3543 	    MCLBYTES,			/* maxsize */
3544 	    1,				/* nsegments */
3545 	    MCLBYTES,			/* maxsegsize */
3546 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3547 	    &rdata->jme_rx_tag);
3548 	if (error) {
3549 		device_printf(rdata->jme_sc->jme_dev,
3550 		    "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3551 		return error;
3552 	}
3553 
3554 	/* Create DMA maps for Rx buffers. */
3555 	error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3556 				  &rdata->jme_rx_sparemap);
3557 	if (error) {
3558 		device_printf(rdata->jme_sc->jme_dev,
3559 		    "could not create %dth spare Rx dmamap.\n",
3560 		    rdata->jme_rx_idx);
3561 		bus_dma_tag_destroy(rdata->jme_rx_tag);
3562 		rdata->jme_rx_tag = NULL;
3563 		return error;
3564 	}
3565 	for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3566 		struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3567 
3568 		error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3569 					  &rxd->rx_dmamap);
3570 		if (error) {
3571 			int j;
3572 
3573 			device_printf(rdata->jme_sc->jme_dev,
3574 			    "could not create %dth Rx dmamap "
3575 			    "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3576 
3577 			for (j = 0; j < i; ++j) {
3578 				rxd = &rdata->jme_rxdesc[j];
3579 				bus_dmamap_destroy(rdata->jme_rx_tag,
3580 						   rxd->rx_dmamap);
3581 			}
3582 			bus_dmamap_destroy(rdata->jme_rx_tag,
3583 					   rdata->jme_rx_sparemap);
3584 			bus_dma_tag_destroy(rdata->jme_rx_tag);
3585 			rdata->jme_rx_tag = NULL;
3586 			return error;
3587 		}
3588 	}
3589 	return 0;
3590 }
3591 
3592 static void
3593 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3594 {
3595 	int r, cpuid = mycpuid;
3596 
3597 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3598 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3599 
3600 		if (status & rdata->jme_rx_coal) {
3601 			lwkt_serialize_enter(&rdata->jme_rx_serialize);
3602 			jme_rxeof(rdata, -1, cpuid);
3603 			lwkt_serialize_exit(&rdata->jme_rx_serialize);
3604 		}
3605 	}
3606 }
3607 
3608 static void
3609 jme_enable_rss(struct jme_softc *sc)
3610 {
3611 	uint32_t rssc, ind;
3612 	uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3613 	int i;
3614 
3615 	KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3616 		sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
3617 		("%s: invalid # of RX rings (%d)",
3618 		 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
3619 
3620 	rssc = RSSC_HASH_64_ENTRY;
3621 	rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3622 	rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3623 	JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3624 	CSR_WRITE_4(sc, JME_RSSC, rssc);
3625 
3626 	toeplitz_get_key(key, sizeof(key));
3627 	for (i = 0; i < RSSKEY_NREGS; ++i) {
3628 		uint32_t keyreg;
3629 
3630 		keyreg = RSSKEY_REGVAL(key, i);
3631 		JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x, reg 0x%08x\n",
3632 		    i, keyreg, RSSKEY_REG(RSSKEY_NREGS - 1 - i));
3633 
3634 		CSR_WRITE_4(sc, RSSKEY_REG(RSSKEY_NREGS - 1 - i), keyreg);
3635 	}
3636 
3637 	/*
3638 	 * Create redirect table in following fashion:
3639 	 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3640 	 */
3641 	ind = 0;
3642 	for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3643 		int q;
3644 
3645 		q = i % sc->jme_cdata.jme_rx_ring_cnt;
3646 		ind |= q << (i * 8);
3647 	}
3648 	JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3649 
3650 	for (i = 0; i < RSSTBL_NREGS; ++i)
3651 		CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3652 }
3653 
3654 static void
3655 jme_disable_rss(struct jme_softc *sc)
3656 {
3657 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3658 }
3659 
3660 static void
3661 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3662 {
3663 	struct jme_softc *sc = ifp->if_softc;
3664 
3665 	ifnet_serialize_array_enter(sc->jme_serialize_arr,
3666 	    sc->jme_serialize_cnt, slz);
3667 }
3668 
3669 static void
3670 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3671 {
3672 	struct jme_softc *sc = ifp->if_softc;
3673 
3674 	ifnet_serialize_array_exit(sc->jme_serialize_arr,
3675 	    sc->jme_serialize_cnt, slz);
3676 }
3677 
3678 static int
3679 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3680 {
3681 	struct jme_softc *sc = ifp->if_softc;
3682 
3683 	return ifnet_serialize_array_try(sc->jme_serialize_arr,
3684 	    sc->jme_serialize_cnt, slz);
3685 }
3686 
3687 #ifdef INVARIANTS
3688 
3689 static void
3690 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3691     boolean_t serialized)
3692 {
3693 	struct jme_softc *sc = ifp->if_softc;
3694 
3695 	ifnet_serialize_array_assert(sc->jme_serialize_arr,
3696 	    sc->jme_serialize_cnt, slz, serialized);
3697 }
3698 
3699 #endif	/* INVARIANTS */
3700 
3701 static void
3702 jme_msix_try_alloc(device_t dev)
3703 {
3704 	struct jme_softc *sc = device_get_softc(dev);
3705 	struct jme_msix_data *msix;
3706 	int error, i, r, msix_enable, msix_count;
3707 	int offset, offset_def;
3708 
3709 	msix_count = JME_MSIXCNT(sc->jme_cdata.jme_rx_ring_cnt);
3710 	KKASSERT(msix_count <= JME_NMSIX);
3711 
3712 	msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3713 
3714 	/*
3715 	 * We leave the 1st MSI-X vector unused, so we
3716 	 * actually need msix_count + 1 MSI-X vectors.
3717 	 */
3718 	if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3719 		return;
3720 
3721 	for (i = 0; i < msix_count; ++i)
3722 		sc->jme_msix[i].jme_msix_rid = -1;
3723 
3724 	i = 0;
3725 
3726 	/*
3727 	 * Setup status MSI-X
3728 	 */
3729 
3730 	msix = &sc->jme_msix[i++];
3731 	msix->jme_msix_cpuid = 0;
3732 	msix->jme_msix_arg = sc;
3733 	msix->jme_msix_func = jme_msix_status;
3734 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3735 		msix->jme_msix_intrs |=
3736 		    sc->jme_cdata.jme_rx_data[r].jme_rx_empty;
3737 	}
3738 	msix->jme_msix_serialize = &sc->jme_serialize;
3739 	ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s sts",
3740 	    device_get_nameunit(dev));
3741 
3742 	/*
3743 	 * Setup TX MSI-X
3744 	 */
3745 
3746 	offset_def = device_get_unit(dev) % ncpus2;
3747 	offset = device_getenv_int(dev, "msix.txoff", offset_def);
3748 	if (offset >= ncpus2) {
3749 		device_printf(dev, "invalid msix.txoff %d, use %d\n",
3750 		    offset, offset_def);
3751 		offset = offset_def;
3752 	}
3753 
3754 	msix = &sc->jme_msix[i++];
3755 	msix->jme_msix_cpuid = offset;
3756 	sc->jme_tx_cpuid = msix->jme_msix_cpuid;
3757 	msix->jme_msix_arg = &sc->jme_cdata.jme_tx_data;
3758 	msix->jme_msix_func = jme_msix_tx;
3759 	msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3760 	msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
3761 	ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3762 	    device_get_nameunit(dev));
3763 
3764 	/*
3765 	 * Setup RX MSI-X
3766 	 */
3767 
3768 	if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) {
3769 		offset = 0;
3770 	} else {
3771 		offset_def = (sc->jme_cdata.jme_rx_ring_cnt *
3772 		    device_get_unit(dev)) % ncpus2;
3773 
3774 		offset = device_getenv_int(dev, "msix.rxoff", offset_def);
3775 		if (offset >= ncpus2 ||
3776 		    offset % sc->jme_cdata.jme_rx_ring_cnt != 0) {
3777 			device_printf(dev, "invalid msix.rxoff %d, use %d\n",
3778 			    offset, offset_def);
3779 			offset = offset_def;
3780 		}
3781 	}
3782 
3783 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3784 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3785 
3786 		msix = &sc->jme_msix[i++];
3787 		msix->jme_msix_cpuid = r + offset;
3788 		KKASSERT(msix->jme_msix_cpuid < ncpus2);
3789 		msix->jme_msix_arg = rdata;
3790 		msix->jme_msix_func = jme_msix_rx;
3791 		msix->jme_msix_intrs = rdata->jme_rx_coal;
3792 		msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3793 		ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3794 		    "%s rx%d", device_get_nameunit(dev), r);
3795 	}
3796 
3797 	KKASSERT(i == msix_count);
3798 
3799 	error = pci_setup_msix(dev);
3800 	if (error)
3801 		return;
3802 
3803 	/* Setup jme_msix_cnt early, so we could cleanup */
3804 	sc->jme_msix_cnt = msix_count;
3805 
3806 	for (i = 0; i < msix_count; ++i) {
3807 		msix = &sc->jme_msix[i];
3808 
3809 		msix->jme_msix_vector = i + 1;
3810 		error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3811 		    &msix->jme_msix_rid, msix->jme_msix_cpuid);
3812 		if (error)
3813 			goto back;
3814 
3815 		msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3816 		    &msix->jme_msix_rid, RF_ACTIVE);
3817 		if (msix->jme_msix_res == NULL) {
3818 			error = ENOMEM;
3819 			goto back;
3820 		}
3821 	}
3822 
3823 	for (i = 0; i < JME_INTR_CNT; ++i) {
3824 		uint32_t intr_mask = (1 << i);
3825 		int x;
3826 
3827 		if ((JME_INTRS & intr_mask) == 0)
3828 			continue;
3829 
3830 		for (x = 0; x < msix_count; ++x) {
3831 			msix = &sc->jme_msix[x];
3832 			if (msix->jme_msix_intrs & intr_mask) {
3833 				int reg, shift;
3834 
3835 				reg = i / JME_MSINUM_FACTOR;
3836 				KKASSERT(reg < JME_MSINUM_CNT);
3837 
3838 				shift = (i % JME_MSINUM_FACTOR) * 4;
3839 
3840 				sc->jme_msinum[reg] |=
3841 				    (msix->jme_msix_vector << shift);
3842 
3843 				break;
3844 			}
3845 		}
3846 	}
3847 
3848 	if (bootverbose) {
3849 		for (i = 0; i < JME_MSINUM_CNT; ++i) {
3850 			device_printf(dev, "MSINUM%d: %#x\n", i,
3851 			    sc->jme_msinum[i]);
3852 		}
3853 	}
3854 
3855 	pci_enable_msix(dev);
3856 	sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3857 
3858 back:
3859 	if (error)
3860 		jme_msix_free(dev);
3861 }
3862 
3863 static int
3864 jme_intr_alloc(device_t dev)
3865 {
3866 	struct jme_softc *sc = device_get_softc(dev);
3867 	u_int irq_flags;
3868 
3869 	jme_msix_try_alloc(dev);
3870 
3871 	if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3872 		sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3873 		    &sc->jme_irq_rid, &irq_flags);
3874 
3875 		sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3876 		    &sc->jme_irq_rid, irq_flags);
3877 		if (sc->jme_irq_res == NULL) {
3878 			device_printf(dev, "can't allocate irq\n");
3879 			return ENXIO;
3880 		}
3881 		sc->jme_tx_cpuid = rman_get_cpuid(sc->jme_irq_res);
3882 	}
3883 	return 0;
3884 }
3885 
3886 static void
3887 jme_msix_free(device_t dev)
3888 {
3889 	struct jme_softc *sc = device_get_softc(dev);
3890 	int i;
3891 
3892 	KKASSERT(sc->jme_msix_cnt > 1);
3893 
3894 	for (i = 0; i < sc->jme_msix_cnt; ++i) {
3895 		struct jme_msix_data *msix = &sc->jme_msix[i];
3896 
3897 		if (msix->jme_msix_res != NULL) {
3898 			bus_release_resource(dev, SYS_RES_IRQ,
3899 			    msix->jme_msix_rid, msix->jme_msix_res);
3900 			msix->jme_msix_res = NULL;
3901 		}
3902 		if (msix->jme_msix_rid >= 0) {
3903 			pci_release_msix_vector(dev, msix->jme_msix_rid);
3904 			msix->jme_msix_rid = -1;
3905 		}
3906 	}
3907 	pci_teardown_msix(dev);
3908 }
3909 
3910 static void
3911 jme_intr_free(device_t dev)
3912 {
3913 	struct jme_softc *sc = device_get_softc(dev);
3914 
3915 	if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3916 		if (sc->jme_irq_res != NULL) {
3917 			bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3918 					     sc->jme_irq_res);
3919 		}
3920 		if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3921 			pci_release_msi(dev);
3922 	} else {
3923 		jme_msix_free(dev);
3924 	}
3925 }
3926 
3927 static void
3928 jme_msix_tx(void *xtdata)
3929 {
3930 	struct jme_txdata *tdata = xtdata;
3931 	struct jme_softc *sc = tdata->jme_sc;
3932 	struct ifnet *ifp = &sc->arpcom.ac_if;
3933 
3934 	ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
3935 
3936 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3937 
3938 	CSR_WRITE_4(sc, JME_INTR_STATUS,
3939 	    INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3940 
3941 	if (ifp->if_flags & IFF_RUNNING) {
3942 		jme_txeof(tdata);
3943 		if (!ifq_is_empty(&ifp->if_snd))
3944 			if_devstart(ifp);
3945 	}
3946 
3947 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3948 }
3949 
3950 static void
3951 jme_msix_rx(void *xrdata)
3952 {
3953 	struct jme_rxdata *rdata = xrdata;
3954 	struct jme_softc *sc = rdata->jme_sc;
3955 	struct ifnet *ifp = &sc->arpcom.ac_if;
3956 
3957 	ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3958 
3959 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, rdata->jme_rx_coal);
3960 
3961 	CSR_WRITE_4(sc, JME_INTR_STATUS,
3962 	    rdata->jme_rx_coal | rdata->jme_rx_comp);
3963 
3964 	if (ifp->if_flags & IFF_RUNNING)
3965 		jme_rxeof(rdata, -1, mycpuid);
3966 
3967 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, rdata->jme_rx_coal);
3968 }
3969 
3970 static void
3971 jme_msix_status(void *xsc)
3972 {
3973 	struct jme_softc *sc = xsc;
3974 	struct ifnet *ifp = &sc->arpcom.ac_if;
3975 	uint32_t status;
3976 
3977 	ASSERT_SERIALIZED(&sc->jme_serialize);
3978 
3979 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_RXQ_DESC_EMPTY);
3980 
3981 	status = CSR_READ_4(sc, JME_INTR_STATUS);
3982 
3983 	if (status & INTR_RXQ_DESC_EMPTY) {
3984 		CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY);
3985 		if (ifp->if_flags & IFF_RUNNING)
3986 			jme_rx_restart(sc, status);
3987 	}
3988 
3989 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_RXQ_DESC_EMPTY);
3990 }
3991 
3992 static void
3993 jme_rx_restart(struct jme_softc *sc, uint32_t status)
3994 {
3995 	int i, cpuid = mycpuid;
3996 
3997 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3998 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
3999 
4000 		if (status & rdata->jme_rx_empty) {
4001 			lwkt_serialize_enter(&rdata->jme_rx_serialize);
4002 			jme_rxeof(rdata, -1, cpuid);
4003 #ifdef JME_RSS_DEBUG
4004 			rdata->jme_rx_emp++;
4005 #endif
4006 			lwkt_serialize_exit(&rdata->jme_rx_serialize);
4007 		}
4008 	}
4009 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
4010 	    RXCSR_RXQ_START);
4011 }
4012 
4013 static void
4014 jme_set_msinum(struct jme_softc *sc)
4015 {
4016 	int i;
4017 
4018 	for (i = 0; i < JME_MSINUM_CNT; ++i)
4019 		CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
4020 }
4021 
4022 static int
4023 jme_intr_setup(device_t dev)
4024 {
4025 	struct jme_softc *sc = device_get_softc(dev);
4026 	int error;
4027 
4028 	if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
4029 		return jme_msix_setup(dev);
4030 
4031 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
4032 	    jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
4033 	if (error) {
4034 		device_printf(dev, "could not set up interrupt handler.\n");
4035 		return error;
4036 	}
4037 
4038 	return 0;
4039 }
4040 
4041 static void
4042 jme_intr_teardown(device_t dev)
4043 {
4044 	struct jme_softc *sc = device_get_softc(dev);
4045 
4046 	if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
4047 		jme_msix_teardown(dev, sc->jme_msix_cnt);
4048 	else
4049 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
4050 }
4051 
4052 static int
4053 jme_msix_setup(device_t dev)
4054 {
4055 	struct jme_softc *sc = device_get_softc(dev);
4056 	int x;
4057 
4058 	for (x = 0; x < sc->jme_msix_cnt; ++x) {
4059 		struct jme_msix_data *msix = &sc->jme_msix[x];
4060 		int error;
4061 
4062 		error = bus_setup_intr_descr(dev, msix->jme_msix_res,
4063 		    INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
4064 		    &msix->jme_msix_handle, msix->jme_msix_serialize,
4065 		    msix->jme_msix_desc);
4066 		if (error) {
4067 			device_printf(dev, "could not set up %s "
4068 			    "interrupt handler.\n", msix->jme_msix_desc);
4069 			jme_msix_teardown(dev, x);
4070 			return error;
4071 		}
4072 	}
4073 	return 0;
4074 }
4075 
4076 static void
4077 jme_msix_teardown(device_t dev, int msix_count)
4078 {
4079 	struct jme_softc *sc = device_get_softc(dev);
4080 	int x;
4081 
4082 	for (x = 0; x < msix_count; ++x) {
4083 		struct jme_msix_data *msix = &sc->jme_msix[x];
4084 
4085 		bus_teardown_intr(dev, msix->jme_msix_res,
4086 		    msix->jme_msix_handle);
4087 	}
4088 }
4089 
4090 static void
4091 jme_serialize_skipmain(struct jme_softc *sc)
4092 {
4093 	lwkt_serialize_array_enter(sc->jme_serialize_arr,
4094 	    sc->jme_serialize_cnt, 1);
4095 }
4096 
4097 static void
4098 jme_deserialize_skipmain(struct jme_softc *sc)
4099 {
4100 	lwkt_serialize_array_exit(sc->jme_serialize_arr,
4101 	    sc->jme_serialize_cnt, 1);
4102 }
4103 
4104 static void
4105 jme_enable_intr(struct jme_softc *sc)
4106 {
4107 	int i;
4108 
4109 	for (i = 0; i < sc->jme_serialize_cnt; ++i)
4110 		lwkt_serialize_handler_enable(sc->jme_serialize_arr[i]);
4111 
4112 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
4113 }
4114 
4115 static void
4116 jme_disable_intr(struct jme_softc *sc)
4117 {
4118 	int i;
4119 
4120 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
4121 
4122 	for (i = 0; i < sc->jme_serialize_cnt; ++i)
4123 		lwkt_serialize_handler_disable(sc->jme_serialize_arr[i]);
4124 }
4125 
4126 static void
4127 jme_phy_poweron(struct jme_softc *sc)
4128 {
4129 	uint16_t bmcr;
4130 
4131 	bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
4132 	bmcr &= ~BMCR_PDOWN;
4133 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
4134 
4135 	if (sc->jme_caps & JME_CAP_PHYPWR) {
4136 		uint32_t val;
4137 
4138 		val = CSR_READ_4(sc, JME_PHYPWR);
4139 		val &= ~(PHYPWR_DOWN1SEL | PHYPWR_DOWN1SW |
4140 		    PHYPWR_DOWN2 | PHYPWR_CLKSEL);
4141 		CSR_WRITE_4(sc, JME_PHYPWR, val);
4142 
4143 		val = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
4144 		val &= ~PE1_GPREG0_PHYBG;
4145 		val |= PE1_GPREG0_ENBG;
4146 		pci_write_config(sc->jme_dev, JME_PCI_PE1, val, 4);
4147 	}
4148 }
4149 
4150 static void
4151 jme_phy_poweroff(struct jme_softc *sc)
4152 {
4153 	uint16_t bmcr;
4154 
4155 	bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
4156 	bmcr |= BMCR_PDOWN;
4157 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
4158 
4159 	if (sc->jme_caps & JME_CAP_PHYPWR) {
4160 		uint32_t val;
4161 
4162 		val = CSR_READ_4(sc, JME_PHYPWR);
4163 		val |= PHYPWR_DOWN1SEL | PHYPWR_DOWN1SW |
4164 		    PHYPWR_DOWN2 | PHYPWR_CLKSEL;
4165 		CSR_WRITE_4(sc, JME_PHYPWR, val);
4166 
4167 		val = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
4168 		val &= ~PE1_GPREG0_PHYBG;
4169 		val |= PE1_GPREG0_PDD3COLD;
4170 		pci_write_config(sc->jme_dev, JME_PCI_PE1, val, 4);
4171 	}
4172 }
4173 
4174 static int
4175 jme_miiext_read(struct jme_softc *sc, int reg)
4176 {
4177 	int addr;
4178 
4179 	addr = JME_MII_EXT_ADDR_RD | reg;
4180 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
4181 	    JME_MII_EXT_ADDR, addr);
4182 	return jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr,
4183 	    JME_MII_EXT_DATA);
4184 }
4185 
4186 static void
4187 jme_miiext_write(struct jme_softc *sc, int reg, int val)
4188 {
4189 	int addr;
4190 
4191 	addr = JME_MII_EXT_ADDR_WR | reg;
4192 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
4193 	    JME_MII_EXT_DATA, val);
4194 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
4195 	    JME_MII_EXT_ADDR, addr);
4196 }
4197 
4198 static void
4199 jme_phy_init(struct jme_softc *sc)
4200 {
4201 	uint16_t gtcr;
4202 	int val;
4203 
4204 	jme_phy_poweroff(sc);
4205 	jme_phy_poweron(sc);
4206 
4207 	/* Enable PHY test 1 */
4208 	gtcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR);
4209 	gtcr &= ~GTCR_TEST_MASK;
4210 	gtcr |= GTCR_TEST_1;
4211 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, gtcr);
4212 
4213 	val = jme_miiext_read(sc, JME_MII_EXT_COM2);
4214 	val &= ~JME_MII_EXT_COM2_CALIB_MODE0;
4215 	val |= JME_MII_EXT_COM2_CALIB_LATCH | JME_MII_EXT_COM2_CALIB_EN;
4216 	jme_miiext_write(sc, JME_MII_EXT_COM2, val);
4217 
4218 	DELAY(20000);
4219 
4220 	val = jme_miiext_read(sc, JME_MII_EXT_COM2);
4221 	val &= ~(JME_MII_EXT_COM2_CALIB_MODE0 |
4222 	    JME_MII_EXT_COM2_CALIB_LATCH | JME_MII_EXT_COM2_CALIB_EN);
4223 	jme_miiext_write(sc, JME_MII_EXT_COM2, val);
4224 
4225 	/* Disable PHY test */
4226 	gtcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR);
4227 	gtcr &= ~GTCR_TEST_MASK;
4228 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, gtcr);
4229 
4230 	if (sc->jme_phycom0 != 0)
4231 		jme_miiext_write(sc, JME_MII_EXT_COM0, sc->jme_phycom0);
4232 	if (sc->jme_phycom1 != 0)
4233 		jme_miiext_write(sc, JME_MII_EXT_COM1, sc->jme_phycom1);
4234 }
4235