xref: /dragonfly/sys/dev/netif/jme/if_jme.c (revision e4b9e6f6)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  */
29 
30 #include "opt_ifpoll.h"
31 #include "opt_jme.h"
32 
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46 
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/bpf.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_poll.h>
54 #include <net/ifq_var.h>
55 #include <net/if_ringmap.h>
56 #include <net/toeplitz.h>
57 #include <net/toeplitz2.h>
58 #include <net/vlan/if_vlan_var.h>
59 #include <net/vlan/if_vlan_ether.h>
60 
61 #include <netinet/ip.h>
62 #include <netinet/tcp.h>
63 
64 #include <dev/netif/mii_layer/mii.h>
65 #include <dev/netif/mii_layer/miivar.h>
66 #include <dev/netif/mii_layer/jmphyreg.h>
67 
68 #include <bus/pci/pcireg.h>
69 #include <bus/pci/pcivar.h>
70 #include "pcidevs.h"
71 
72 #include <dev/netif/jme/if_jmereg.h>
73 #include <dev/netif/jme/if_jmevar.h>
74 
75 #include "miibus_if.h"
76 
77 #define JME_TICK_CPUID		0	/* DO NOT CHANGE THIS */
78 
79 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
80 
81 #ifdef JME_RSS_DEBUG
82 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
83 do { \
84 	if ((sc)->jme_rss_debug >= (lvl)) \
85 		if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
86 } while (0)
87 #else	/* !JME_RSS_DEBUG */
88 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)	((void)0)
89 #endif	/* JME_RSS_DEBUG */
90 
91 static int	jme_probe(device_t);
92 static int	jme_attach(device_t);
93 static int	jme_detach(device_t);
94 static int	jme_shutdown(device_t);
95 static int	jme_suspend(device_t);
96 static int	jme_resume(device_t);
97 
98 static int	jme_miibus_readreg(device_t, int, int);
99 static int	jme_miibus_writereg(device_t, int, int, int);
100 static void	jme_miibus_statchg(device_t);
101 
102 static void	jme_init(void *);
103 static int	jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
104 static void	jme_start(struct ifnet *, struct ifaltq_subque *);
105 static void	jme_watchdog(struct ifnet *);
106 static void	jme_mediastatus(struct ifnet *, struct ifmediareq *);
107 static int	jme_mediachange(struct ifnet *);
108 #ifdef IFPOLL_ENABLE
109 static void	jme_npoll(struct ifnet *, struct ifpoll_info *);
110 static void	jme_npoll_status(struct ifnet *);
111 static void	jme_npoll_rx(struct ifnet *, void *, int);
112 static void	jme_npoll_tx(struct ifnet *, void *, int);
113 #endif
114 static void	jme_serialize(struct ifnet *, enum ifnet_serialize);
115 static void	jme_deserialize(struct ifnet *, enum ifnet_serialize);
116 static int	jme_tryserialize(struct ifnet *, enum ifnet_serialize);
117 #ifdef INVARIANTS
118 static void	jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
119 		    boolean_t);
120 #endif
121 
122 static void	jme_intr(void *);
123 static void	jme_msix_tx(void *);
124 static void	jme_msix_rx(void *);
125 static void	jme_msix_status(void *);
126 static void	jme_txeof(struct jme_txdata *);
127 static void	jme_rxeof(struct jme_rxdata *, int, int);
128 static void	jme_rx_intr(struct jme_softc *, uint32_t);
129 static void	jme_enable_intr(struct jme_softc *);
130 static void	jme_disable_intr(struct jme_softc *);
131 static void	jme_rx_restart(struct jme_softc *, uint32_t);
132 
133 static int	jme_msix_setup(device_t);
134 static void	jme_msix_teardown(device_t, int);
135 static int	jme_intr_setup(device_t);
136 static void	jme_intr_teardown(device_t);
137 static void	jme_msix_try_alloc(device_t);
138 static void	jme_msix_free(device_t);
139 static int	jme_intr_alloc(device_t);
140 static void	jme_intr_free(device_t);
141 static int	jme_dma_alloc(struct jme_softc *);
142 static void	jme_dma_free(struct jme_softc *);
143 static int	jme_init_rx_ring(struct jme_rxdata *);
144 static void	jme_init_tx_ring(struct jme_txdata *);
145 static void	jme_init_ssb(struct jme_softc *);
146 static int	jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
147 static int	jme_encap(struct jme_txdata *, struct mbuf **, int *);
148 static void	jme_rxpkt(struct jme_rxdata *, int);
149 static int	jme_rxring_dma_alloc(struct jme_rxdata *);
150 static int	jme_rxbuf_dma_alloc(struct jme_rxdata *);
151 
152 static void	jme_tick(void *);
153 static void	jme_stop(struct jme_softc *);
154 static void	jme_reset(struct jme_softc *);
155 static void	jme_set_msinum(struct jme_softc *);
156 static void	jme_set_vlan(struct jme_softc *);
157 static void	jme_set_filter(struct jme_softc *);
158 static void	jme_stop_tx(struct jme_softc *);
159 static void	jme_stop_rx(struct jme_softc *);
160 static void	jme_mac_config(struct jme_softc *);
161 static void	jme_reg_macaddr(struct jme_softc *, uint8_t[]);
162 static int	jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
163 static int	jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
164 #ifdef notyet
165 static void	jme_setwol(struct jme_softc *);
166 static void	jme_setlinkspeed(struct jme_softc *);
167 #endif
168 static void	jme_set_tx_coal(struct jme_softc *);
169 static void	jme_set_rx_coal(struct jme_softc *);
170 static void	jme_enable_rss(struct jme_softc *);
171 static void	jme_disable_rss(struct jme_softc *);
172 static void	jme_serialize_skipmain(struct jme_softc *);
173 static void	jme_deserialize_skipmain(struct jme_softc *);
174 static void	jme_phy_poweron(struct jme_softc *);
175 static void	jme_phy_poweroff(struct jme_softc *);
176 static int	jme_miiext_read(struct jme_softc *, int);
177 static void	jme_miiext_write(struct jme_softc *, int, int);
178 static void	jme_phy_init(struct jme_softc *);
179 
180 static void	jme_sysctl_node(struct jme_softc *);
181 static int	jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
182 static int	jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
183 static int	jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
184 static int	jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
185 
186 /*
187  * Devices supported by this driver.
188  */
189 static const struct jme_dev {
190 	uint16_t	jme_vendorid;
191 	uint16_t	jme_deviceid;
192 	uint32_t	jme_caps;
193 	const char	*jme_name;
194 } jme_devs[] = {
195 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
196 	    JME_CAP_JUMBO,
197 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
198 	{ PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
199 	    JME_CAP_FASTETH,
200 	    "JMicron Inc, JMC260 Fast Ethernet" },
201 	{ 0, 0, 0, NULL }
202 };
203 
204 static device_method_t jme_methods[] = {
205 	/* Device interface. */
206 	DEVMETHOD(device_probe,		jme_probe),
207 	DEVMETHOD(device_attach,	jme_attach),
208 	DEVMETHOD(device_detach,	jme_detach),
209 	DEVMETHOD(device_shutdown,	jme_shutdown),
210 	DEVMETHOD(device_suspend,	jme_suspend),
211 	DEVMETHOD(device_resume,	jme_resume),
212 
213 	/* Bus interface. */
214 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
215 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
216 
217 	/* MII interface. */
218 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
219 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
220 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
221 
222 	{ NULL, NULL }
223 };
224 
225 static driver_t jme_driver = {
226 	"jme",
227 	jme_methods,
228 	sizeof(struct jme_softc)
229 };
230 
231 static devclass_t jme_devclass;
232 
233 DECLARE_DUMMY_MODULE(if_jme);
234 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
235 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
236 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
237 
238 static const struct {
239 	uint32_t	jme_coal;
240 	uint32_t	jme_comp;
241 	uint32_t	jme_empty;
242 } jme_rx_status[JME_NRXRING_MAX] = {
243 	{ INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
244 	  INTR_RXQ0_DESC_EMPTY },
245 	{ INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
246 	  INTR_RXQ1_DESC_EMPTY },
247 	{ INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
248 	  INTR_RXQ2_DESC_EMPTY },
249 	{ INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
250 	  INTR_RXQ3_DESC_EMPTY }
251 };
252 
253 static int	jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
254 static int	jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
255 static int	jme_rx_ring_count = 0;
256 static int	jme_msi_enable = 1;
257 static int	jme_msix_enable = 1;
258 
259 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
260 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
261 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
262 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
263 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
264 
265 static __inline void
jme_setup_rxdesc(struct jme_rxdesc * rxd)266 jme_setup_rxdesc(struct jme_rxdesc *rxd)
267 {
268 	struct jme_desc *desc;
269 
270 	desc = rxd->rx_desc;
271 	desc->buflen = htole32(MCLBYTES);
272 	desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
273 	desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
274 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
275 }
276 
277 /*
278  *	Read a PHY register on the MII of the JMC250.
279  */
280 static int
jme_miibus_readreg(device_t dev,int phy,int reg)281 jme_miibus_readreg(device_t dev, int phy, int reg)
282 {
283 	struct jme_softc *sc = device_get_softc(dev);
284 	uint32_t val;
285 	int i;
286 
287 	/* For FPGA version, PHY address 0 should be ignored. */
288 	if (sc->jme_caps & JME_CAP_FPGA) {
289 		if (phy == 0)
290 			return (0);
291 	} else {
292 		if (sc->jme_phyaddr != phy)
293 			return (0);
294 	}
295 
296 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
297 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
298 
299 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
300 		DELAY(1);
301 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
302 			break;
303 	}
304 	if (i == 0) {
305 		device_printf(sc->jme_dev, "phy read timeout: "
306 			      "phy %d, reg %d\n", phy, reg);
307 		return (0);
308 	}
309 
310 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
311 }
312 
313 /*
314  *	Write a PHY register on the MII of the JMC250.
315  */
316 static int
jme_miibus_writereg(device_t dev,int phy,int reg,int val)317 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
318 {
319 	struct jme_softc *sc = device_get_softc(dev);
320 	int i;
321 
322 	/* For FPGA version, PHY address 0 should be ignored. */
323 	if (sc->jme_caps & JME_CAP_FPGA) {
324 		if (phy == 0)
325 			return (0);
326 	} else {
327 		if (sc->jme_phyaddr != phy)
328 			return (0);
329 	}
330 
331 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
332 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
333 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
334 
335 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
336 		DELAY(1);
337 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
338 			break;
339 	}
340 	if (i == 0) {
341 		device_printf(sc->jme_dev, "phy write timeout: "
342 			      "phy %d, reg %d\n", phy, reg);
343 	}
344 
345 	return (0);
346 }
347 
348 /*
349  *	Callback from MII layer when media changes.
350  */
351 static void
jme_miibus_statchg(device_t dev)352 jme_miibus_statchg(device_t dev)
353 {
354 	struct jme_softc *sc = device_get_softc(dev);
355 	struct ifnet *ifp = &sc->arpcom.ac_if;
356 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
357 	struct mii_data *mii;
358 	struct jme_txdesc *txd;
359 	bus_addr_t paddr;
360 	int i, r;
361 
362 	if (sc->jme_in_tick)
363 		jme_serialize_skipmain(sc);
364 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
365 
366 	if ((ifp->if_flags & IFF_RUNNING) == 0)
367 		goto done;
368 
369 	mii = device_get_softc(sc->jme_miibus);
370 
371 	sc->jme_has_link = FALSE;
372 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
373 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
374 		case IFM_10_T:
375 		case IFM_100_TX:
376 			sc->jme_has_link = TRUE;
377 			break;
378 		case IFM_1000_T:
379 			if (sc->jme_caps & JME_CAP_FASTETH)
380 				break;
381 			sc->jme_has_link = TRUE;
382 			break;
383 		default:
384 			break;
385 		}
386 	}
387 
388 	/*
389 	 * Disabling Rx/Tx MACs have a side-effect of resetting
390 	 * JME_TXNDA/JME_RXNDA register to the first address of
391 	 * Tx/Rx descriptor address. So driver should reset its
392 	 * internal procucer/consumer pointer and reclaim any
393 	 * allocated resources.  Note, just saving the value of
394 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
395 	 * and restoring JME_TXNDA/JME_RXNDA register is not
396 	 * sufficient to make sure correct MAC state because
397 	 * stopping MAC operation can take a while and hardware
398 	 * might have updated JME_TXNDA/JME_RXNDA registers
399 	 * during the stop operation.
400 	 */
401 
402 	/* Disable interrupts */
403 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
404 
405 	/* Stop driver */
406 	ifp->if_flags &= ~IFF_RUNNING;
407 	ifq_clr_oactive(&ifp->if_snd);
408 	ifp->if_timer = 0;
409 	callout_stop(&sc->jme_tick_ch);
410 
411 	/* Stop receiver/transmitter. */
412 	jme_stop_rx(sc);
413 	jme_stop_tx(sc);
414 
415 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
416 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
417 
418 		jme_rxeof(rdata, -1, -1);
419 		if (rdata->jme_rxhead != NULL)
420 			m_freem(rdata->jme_rxhead);
421 		JME_RXCHAIN_RESET(rdata);
422 
423 		/*
424 		 * Reuse configured Rx descriptors and reset
425 		 * procuder/consumer index.
426 		 */
427 		rdata->jme_rx_cons = 0;
428 	}
429 	if (JME_ENABLE_HWRSS(sc))
430 		jme_enable_rss(sc);
431 	else
432 		jme_disable_rss(sc);
433 
434 	jme_txeof(tdata);
435 	if (tdata->jme_tx_cnt != 0) {
436 		/* Remove queued packets for transmit. */
437 		for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
438 			txd = &tdata->jme_txdesc[i];
439 			if (txd->tx_m != NULL) {
440 				bus_dmamap_unload( tdata->jme_tx_tag,
441 				    txd->tx_dmamap);
442 				m_freem(txd->tx_m);
443 				txd->tx_m = NULL;
444 				txd->tx_ndesc = 0;
445 				IFNET_STAT_INC(ifp, oerrors, 1);
446 			}
447 		}
448 	}
449 	jme_init_tx_ring(tdata);
450 
451 	/* Initialize shadow status block. */
452 	jme_init_ssb(sc);
453 
454 	/* Program MAC with resolved speed/duplex/flow-control. */
455 	if (sc->jme_has_link) {
456 		jme_mac_config(sc);
457 
458 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
459 
460 		/* Set Tx ring address to the hardware. */
461 		paddr = tdata->jme_tx_ring_paddr;
462 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
463 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
464 
465 		for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
466 			CSR_WRITE_4(sc, JME_RXCSR,
467 			    sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
468 
469 			/* Set Rx ring address to the hardware. */
470 			paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
471 			CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
472 			CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
473 		}
474 
475 		/* Restart receiver/transmitter. */
476 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
477 		    RXCSR_RXQ_START);
478 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
479 	}
480 
481 	ifp->if_flags |= IFF_RUNNING;
482 	ifq_clr_oactive(&ifp->if_snd);
483 	callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc,
484 	    JME_TICK_CPUID);
485 
486 #ifdef IFPOLL_ENABLE
487 	if (!(ifp->if_flags & IFF_NPOLLING))
488 #endif
489 	/* Reenable interrupts. */
490 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
491 
492 done:
493 	if (sc->jme_in_tick)
494 		jme_deserialize_skipmain(sc);
495 }
496 
497 /*
498  *	Get the current interface media status.
499  */
500 static void
jme_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)501 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
502 {
503 	struct jme_softc *sc = ifp->if_softc;
504 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
505 
506 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
507 
508 	mii_pollstat(mii);
509 	ifmr->ifm_status = mii->mii_media_status;
510 	ifmr->ifm_active = mii->mii_media_active;
511 }
512 
513 /*
514  *	Set hardware to newly-selected media.
515  */
516 static int
jme_mediachange(struct ifnet * ifp)517 jme_mediachange(struct ifnet *ifp)
518 {
519 	struct jme_softc *sc = ifp->if_softc;
520 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
521 	int error;
522 
523 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
524 
525 	if (mii->mii_instance != 0) {
526 		struct mii_softc *miisc;
527 
528 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
529 			mii_phy_reset(miisc);
530 	}
531 	error = mii_mediachg(mii);
532 
533 	return (error);
534 }
535 
536 static int
jme_probe(device_t dev)537 jme_probe(device_t dev)
538 {
539 	const struct jme_dev *sp;
540 	uint16_t vid, did;
541 
542 	vid = pci_get_vendor(dev);
543 	did = pci_get_device(dev);
544 	for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
545 		if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
546 			struct jme_softc *sc = device_get_softc(dev);
547 
548 			sc->jme_caps = sp->jme_caps;
549 			device_set_desc(dev, sp->jme_name);
550 			return (0);
551 		}
552 	}
553 	return (ENXIO);
554 }
555 
556 static int
jme_eeprom_read_byte(struct jme_softc * sc,uint8_t addr,uint8_t * val)557 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
558 {
559 	uint32_t reg;
560 	int i;
561 
562 	*val = 0;
563 	for (i = JME_TIMEOUT; i > 0; i--) {
564 		reg = CSR_READ_4(sc, JME_SMBCSR);
565 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
566 			break;
567 		DELAY(1);
568 	}
569 
570 	if (i == 0) {
571 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
572 		return (ETIMEDOUT);
573 	}
574 
575 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
576 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
577 	for (i = JME_TIMEOUT; i > 0; i--) {
578 		DELAY(1);
579 		reg = CSR_READ_4(sc, JME_SMBINTF);
580 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
581 			break;
582 	}
583 
584 	if (i == 0) {
585 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
586 		return (ETIMEDOUT);
587 	}
588 
589 	reg = CSR_READ_4(sc, JME_SMBINTF);
590 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
591 
592 	return (0);
593 }
594 
595 static int
jme_eeprom_macaddr(struct jme_softc * sc,uint8_t eaddr[])596 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
597 {
598 	uint8_t fup, reg, val;
599 	uint32_t offset;
600 	int match;
601 
602 	offset = 0;
603 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
604 	    fup != JME_EEPROM_SIG0)
605 		return (ENOENT);
606 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
607 	    fup != JME_EEPROM_SIG1)
608 		return (ENOENT);
609 	match = 0;
610 	do {
611 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
612 			break;
613 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
614 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
615 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
616 				break;
617 			if (reg >= JME_PAR0 &&
618 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
619 				if (jme_eeprom_read_byte(sc, offset + 2,
620 				    &val) != 0)
621 					break;
622 				eaddr[reg - JME_PAR0] = val;
623 				match++;
624 			}
625 		}
626 		/* Check for the end of EEPROM descriptor. */
627 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
628 			break;
629 		/* Try next eeprom descriptor. */
630 		offset += JME_EEPROM_DESC_BYTES;
631 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
632 
633 	if (match == ETHER_ADDR_LEN)
634 		return (0);
635 
636 	return (ENOENT);
637 }
638 
639 static void
jme_reg_macaddr(struct jme_softc * sc,uint8_t eaddr[])640 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
641 {
642 	uint32_t par0, par1;
643 
644 	/* Read station address. */
645 	par0 = CSR_READ_4(sc, JME_PAR0);
646 	par1 = CSR_READ_4(sc, JME_PAR1);
647 	par1 &= 0xFFFF;
648 	if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
649 		device_printf(sc->jme_dev,
650 		    "generating fake ethernet address.\n");
651 		par0 = karc4random();
652 		/* Set OUI to JMicron. */
653 		eaddr[0] = 0x00;
654 		eaddr[1] = 0x1B;
655 		eaddr[2] = 0x8C;
656 		eaddr[3] = (par0 >> 16) & 0xff;
657 		eaddr[4] = (par0 >> 8) & 0xff;
658 		eaddr[5] = par0 & 0xff;
659 	} else {
660 		eaddr[0] = (par0 >> 0) & 0xFF;
661 		eaddr[1] = (par0 >> 8) & 0xFF;
662 		eaddr[2] = (par0 >> 16) & 0xFF;
663 		eaddr[3] = (par0 >> 24) & 0xFF;
664 		eaddr[4] = (par1 >> 0) & 0xFF;
665 		eaddr[5] = (par1 >> 8) & 0xFF;
666 	}
667 }
668 
669 static int
jme_attach(device_t dev)670 jme_attach(device_t dev)
671 {
672 	struct jme_softc *sc = device_get_softc(dev);
673 	struct ifnet *ifp = &sc->arpcom.ac_if;
674 	uint32_t reg;
675 	uint16_t did;
676 	uint8_t pcie_ptr, rev;
677 	int error = 0, i, j, rx_desc_cnt, coal_max, ring_cnt;
678 	uint8_t eaddr[ETHER_ADDR_LEN];
679 
680 	/*
681 	 * Initialize serializers
682 	 */
683 	lwkt_serialize_init(&sc->jme_serialize);
684 	lwkt_serialize_init(&sc->jme_cdata.jme_tx_data.jme_tx_serialize);
685 	for (i = 0; i < JME_NRXRING_MAX; ++i) {
686 		lwkt_serialize_init(
687 		    &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
688 	}
689 
690 	/*
691 	 * Get # of RX ring descriptors
692 	 */
693 	rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
694 	    jme_rx_desc_count);
695 	rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
696 	if (rx_desc_cnt > JME_NDESC_MAX)
697 		rx_desc_cnt = JME_NDESC_MAX;
698 
699 	/*
700 	 * Get # of TX ring descriptors
701 	 */
702 	sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt =
703 	    device_getenv_int(dev, "tx_desc_count", jme_tx_desc_count);
704 	sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt =
705 	    roundup(sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt, JME_NDESC_ALIGN);
706 	if (sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt > JME_NDESC_MAX)
707 		sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = JME_NDESC_MAX;
708 
709 	/*
710 	 * Create TX/RX ring maps.
711 	 */
712 	ring_cnt = device_getenv_int(dev, "rx_ring_count", jme_rx_ring_count);
713 	/* Require power-of-2 ring count. */
714 	sc->jme_rx_rmap = if_ringmap_alloc2(dev, ring_cnt, JME_NRXRING_MAX);
715 	sc->jme_cdata.jme_rx_ring_cnt = if_ringmap_count(sc->jme_rx_rmap);
716 
717 	/* Only one TX ring is supported. */
718 	sc->jme_tx_rmap = if_ringmap_alloc(dev, 1, 1);
719 
720 	/*
721 	 * NOTE:
722 	 * There is _no_ need to align or match TX/RX ring maps,
723 	 * since TX/RX rings are completely indepedent in this
724 	 * driver.
725 	 */
726 
727 	/*
728 	 * Initialize serializer array
729 	 */
730 	i = 0;
731 
732 	KKASSERT(i < JME_NSERIALIZE);
733 	sc->jme_serialize_arr[i++] = &sc->jme_serialize;
734 
735 	KKASSERT(i < JME_NSERIALIZE);
736 	sc->jme_serialize_arr[i++] =
737 	    &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
738 
739 	for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
740 		KKASSERT(i < JME_NSERIALIZE);
741 		sc->jme_serialize_arr[i++] =
742 		    &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
743 	}
744 
745 	KKASSERT(i <= JME_NSERIALIZE);
746 	sc->jme_serialize_cnt = i;
747 
748 	/*
749 	 * Setup TX ring specific data
750 	 */
751 	sc->jme_cdata.jme_tx_data.jme_sc = sc;
752 
753 	/*
754 	 * Setup RX rings specific data
755 	 */
756 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
757 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
758 
759 		rdata->jme_sc = sc;
760 		rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
761 		rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
762 		rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
763 		rdata->jme_rx_idx = i;
764 		rdata->jme_rx_desc_cnt = rx_desc_cnt;
765 	}
766 
767 	sc->jme_dev = dev;
768 	sc->jme_lowaddr = BUS_SPACE_MAXADDR;
769 
770 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
771 
772 	callout_init_mp(&sc->jme_tick_ch);
773 
774 #ifndef BURN_BRIDGES
775 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
776 		uint32_t irq, mem;
777 
778 		irq = pci_read_config(dev, PCIR_INTLINE, 4);
779 		mem = pci_read_config(dev, JME_PCIR_BAR, 4);
780 
781 		device_printf(dev, "chip is in D%d power mode "
782 		    "-- setting to D0\n", pci_get_powerstate(dev));
783 
784 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
785 
786 		pci_write_config(dev, PCIR_INTLINE, irq, 4);
787 		pci_write_config(dev, JME_PCIR_BAR, mem, 4);
788 	}
789 #endif	/* !BURN_BRIDGE */
790 
791 	/* Enable bus mastering */
792 	pci_enable_busmaster(dev);
793 
794 	/*
795 	 * Allocate IO memory
796 	 *
797 	 * JMC250 supports both memory mapped and I/O register space
798 	 * access.  Because I/O register access should use different
799 	 * BARs to access registers it's waste of time to use I/O
800 	 * register space access.  JMC250 uses 16K to map entire memory
801 	 * space.
802 	 */
803 	sc->jme_mem_rid = JME_PCIR_BAR;
804 	sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
805 						 &sc->jme_mem_rid, RF_ACTIVE);
806 	if (sc->jme_mem_res == NULL) {
807 		device_printf(dev, "can't allocate IO memory\n");
808 		return ENXIO;
809 	}
810 	sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
811 	sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
812 
813 	/*
814 	 * Allocate IRQ
815 	 */
816 	error = jme_intr_alloc(dev);
817 	if (error)
818 		goto fail;
819 
820 	/*
821 	 * Extract revisions
822 	 */
823 	reg = CSR_READ_4(sc, JME_CHIPMODE);
824 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
825 	    CHIPMODE_NOT_FPGA) {
826 		sc->jme_caps |= JME_CAP_FPGA;
827 		if (bootverbose) {
828 			device_printf(dev, "FPGA revision: 0x%04x\n",
829 				      (reg & CHIPMODE_FPGA_REV_MASK) >>
830 				      CHIPMODE_FPGA_REV_SHIFT);
831 		}
832 	}
833 
834 	/* NOTE: FM revision is put in the upper 4 bits */
835 	rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
836 	rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
837 	if (bootverbose)
838 		device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
839 
840 	did = pci_get_device(dev);
841 	switch (did) {
842 	case PCI_PRODUCT_JMICRON_JMC250:
843 		if (rev == JME_REV1_A2)
844 			sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
845 		break;
846 
847 	case PCI_PRODUCT_JMICRON_JMC260:
848 		if (rev == JME_REV2) {
849 			sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
850 			sc->jme_phycom0 = 0x608a;
851 		} else if (rev == JME_REV2_2) {
852 			sc->jme_phycom0 = 0x408a;
853 		}
854 		break;
855 
856 	default:
857 		panic("unknown device id 0x%04x", did);
858 	}
859 	if (rev >= JME_REV2) {
860 		sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
861 		sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
862 				      GHC_TXMAC_CLKSRC_1000;
863 	}
864 	if (rev >= JME_REV5)
865 		sc->jme_caps |= JME_CAP_PHYPWR;
866 	if (rev >= JME_REV6 || rev == JME_REV5 || rev == JME_REV5_1 ||
867 	    rev == JME_REV5_3) {
868 		sc->jme_phycom0 = 0x008a;
869 		sc->jme_phycom1 = 0x4109;
870 	} else if (rev == JME_REV3_1 || rev == JME_REV3_2) {
871 		sc->jme_phycom0 = 0xe088;
872 	}
873 
874 	if (rev >= JME_REV2) {
875 		reg = pci_read_config(dev, JME_PCI_SSCTRL, 4);
876 		if ((reg & SSCTRL_PHYMASK) == SSCTRL_PHYEA) {
877 			sc->jme_phycom0 = 0;
878 			sc->jme_phycom1 = 0;
879 		}
880 	}
881 
882 	/* Reset the ethernet controller. */
883 	jme_reset(sc);
884 
885 	/* Map MSI/MSI-X vectors */
886 	jme_set_msinum(sc);
887 
888 	/* Get station address. */
889 	reg = CSR_READ_4(sc, JME_SMBCSR);
890 	if (reg & SMBCSR_EEPROM_PRESENT)
891 		error = jme_eeprom_macaddr(sc, eaddr);
892 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
893 		if (error != 0 && (bootverbose)) {
894 			device_printf(dev, "ethernet hardware address "
895 				      "not found in EEPROM.\n");
896 		}
897 		jme_reg_macaddr(sc, eaddr);
898 	}
899 
900 	/*
901 	 * Save PHY address.
902 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
903 	 * requires PHY probing to get correct PHY address.
904 	 */
905 	if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
906 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
907 		    GPREG0_PHY_ADDR_MASK;
908 		if (bootverbose) {
909 			device_printf(dev, "PHY is at address %d.\n",
910 			    sc->jme_phyaddr);
911 		}
912 	} else {
913 		sc->jme_phyaddr = 0;
914 	}
915 
916 	/* Set max allowable DMA size. */
917 	pcie_ptr = pci_get_pciecap_ptr(dev);
918 	if (pcie_ptr != 0) {
919 		uint16_t ctrl;
920 
921 		sc->jme_caps |= JME_CAP_PCIE;
922 		ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
923 		if (bootverbose) {
924 			device_printf(dev, "Read request size : %d bytes.\n",
925 			    128 << ((ctrl >> 12) & 0x07));
926 			device_printf(dev, "TLP payload size : %d bytes.\n",
927 			    128 << ((ctrl >> 5) & 0x07));
928 		}
929 		switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
930 		case PCIEM_DEVCTL_MAX_READRQ_128:
931 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
932 			break;
933 		case PCIEM_DEVCTL_MAX_READRQ_256:
934 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
935 			break;
936 		default:
937 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
938 			break;
939 		}
940 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
941 	} else {
942 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
943 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
944 	}
945 
946 #ifdef notyet
947 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
948 		sc->jme_caps |= JME_CAP_PMCAP;
949 #endif
950 
951 	/*
952 	 * Set default coalesce valves
953 	 */
954 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
955 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
956 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
957 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
958 
959 	/*
960 	 * Adjust coalesce valves, in case that the number of TX/RX
961 	 * descs are set to small values by users.
962 	 *
963 	 * NOTE: coal_max will not be zero, since number of descs
964 	 * must aligned by JME_NDESC_ALIGN (16 currently)
965 	 */
966 	coal_max = sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt / 2;
967 	if (coal_max < sc->jme_tx_coal_pkt)
968 		sc->jme_tx_coal_pkt = coal_max;
969 
970 	coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2;
971 	if (coal_max < sc->jme_rx_coal_pkt)
972 		sc->jme_rx_coal_pkt = coal_max;
973 
974 	sc->jme_cdata.jme_tx_data.jme_tx_wreg = JME_TXWREG_NSEGS;
975 
976 	/*
977 	 * Create sysctl tree
978 	 */
979 	jme_sysctl_node(sc);
980 
981 	/* Allocate DMA stuffs */
982 	error = jme_dma_alloc(sc);
983 	if (error)
984 		goto fail;
985 
986 	ifp->if_softc = sc;
987 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
988 	ifp->if_init = jme_init;
989 	ifp->if_ioctl = jme_ioctl;
990 	ifp->if_start = jme_start;
991 #ifdef IFPOLL_ENABLE
992 	ifp->if_npoll = jme_npoll;
993 #endif
994 	ifp->if_watchdog = jme_watchdog;
995 	ifp->if_serialize = jme_serialize;
996 	ifp->if_deserialize = jme_deserialize;
997 	ifp->if_tryserialize = jme_tryserialize;
998 #ifdef INVARIANTS
999 	ifp->if_serialize_assert = jme_serialize_assert;
1000 #endif
1001 	ifp->if_nmbclusters = sc->jme_cdata.jme_rx_ring_cnt *
1002 	    sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt;
1003 	ifq_set_maxlen(&ifp->if_snd,
1004 	    sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt - JME_TXD_RSVD);
1005 	ifq_set_ready(&ifp->if_snd);
1006 
1007 	/* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
1008 	ifp->if_capabilities = IFCAP_HWCSUM |
1009 			       IFCAP_TSO |
1010 			       IFCAP_VLAN_MTU |
1011 			       IFCAP_VLAN_HWTAGGING;
1012 	if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
1013 		ifp->if_capabilities |= IFCAP_RSS;
1014 	ifp->if_capenable = ifp->if_capabilities;
1015 
1016 	/*
1017 	 * Disable TXCSUM by default to improve bulk data
1018 	 * transmit performance (+20Mbps improvement).
1019 	 */
1020 	ifp->if_capenable &= ~IFCAP_TXCSUM;
1021 
1022 	if (ifp->if_capenable & IFCAP_TXCSUM)
1023 		ifp->if_hwassist |= JME_CSUM_FEATURES;
1024 	ifp->if_hwassist |= CSUM_TSO;
1025 
1026 	/* Set up MII bus. */
1027 	error = mii_phy_probe(dev, &sc->jme_miibus,
1028 			      jme_mediachange, jme_mediastatus);
1029 	if (error) {
1030 		device_printf(dev, "no PHY found!\n");
1031 		goto fail;
1032 	}
1033 
1034 	/*
1035 	 * Save PHYADDR for FPGA mode PHY.
1036 	 */
1037 	if (sc->jme_caps & JME_CAP_FPGA) {
1038 		struct mii_data *mii = device_get_softc(sc->jme_miibus);
1039 
1040 		if (mii->mii_instance != 0) {
1041 			struct mii_softc *miisc;
1042 
1043 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1044 				if (miisc->mii_phy != 0) {
1045 					sc->jme_phyaddr = miisc->mii_phy;
1046 					break;
1047 				}
1048 			}
1049 			if (sc->jme_phyaddr != 0) {
1050 				device_printf(sc->jme_dev,
1051 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
1052 				/* vendor magic. */
1053 				jme_miibus_writereg(dev, sc->jme_phyaddr,
1054 				    JMPHY_CONF, JMPHY_CONF_DEFFIFO);
1055 
1056 				/* XXX should we clear JME_WA_EXTFIFO */
1057 			}
1058 		}
1059 	}
1060 
1061 	ether_ifattach(ifp, eaddr, NULL);
1062 
1063 	/* Tell the upper layer(s) we support long frames. */
1064 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1065 
1066 	/* Setup the TX ring's CPUID */
1067 	ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid);
1068 	ifq_set_hw_serialize(&ifp->if_snd,
1069 	    &sc->jme_cdata.jme_tx_data.jme_tx_serialize);
1070 
1071 	error = jme_intr_setup(dev);
1072 	if (error) {
1073 		ether_ifdetach(ifp);
1074 		goto fail;
1075 	}
1076 
1077 	return 0;
1078 fail:
1079 	jme_detach(dev);
1080 	return (error);
1081 }
1082 
1083 static int
jme_detach(device_t dev)1084 jme_detach(device_t dev)
1085 {
1086 	struct jme_softc *sc = device_get_softc(dev);
1087 
1088 	if (device_is_attached(dev)) {
1089 		struct ifnet *ifp = &sc->arpcom.ac_if;
1090 
1091 		ifnet_serialize_all(ifp);
1092 		jme_stop(sc);
1093 		jme_intr_teardown(dev);
1094 		ifnet_deserialize_all(ifp);
1095 
1096 		ether_ifdetach(ifp);
1097 	}
1098 
1099 	if (sc->jme_miibus != NULL)
1100 		device_delete_child(dev, sc->jme_miibus);
1101 	bus_generic_detach(dev);
1102 
1103 	jme_intr_free(dev);
1104 
1105 	if (sc->jme_mem_res != NULL) {
1106 		bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
1107 				     sc->jme_mem_res);
1108 	}
1109 
1110 	jme_dma_free(sc);
1111 
1112 	if (sc->jme_rx_rmap != NULL)
1113 		if_ringmap_free(sc->jme_rx_rmap);
1114 	if (sc->jme_tx_rmap != NULL)
1115 		if_ringmap_free(sc->jme_tx_rmap);
1116 
1117 	return (0);
1118 }
1119 
1120 static void
jme_sysctl_node(struct jme_softc * sc)1121 jme_sysctl_node(struct jme_softc *sc)
1122 {
1123 	struct sysctl_ctx_list *ctx;
1124 	struct sysctl_oid *tree;
1125 #ifdef JME_RSS_DEBUG
1126 	int r;
1127 #endif
1128 
1129 	ctx = device_get_sysctl_ctx(sc->jme_dev);
1130 	tree = device_get_sysctl_tree(sc->jme_dev);
1131 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1132 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1133 	    sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1134 
1135 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1136 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1137 	    sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1138 
1139 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1140 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1141 	    sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1142 
1143 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1144 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1145 	    sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1146 
1147 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1148 		       "rx_desc_count", CTLFLAG_RD,
1149 		       &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1150 		       0, "RX desc count");
1151 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1152 		       "tx_desc_count", CTLFLAG_RD,
1153 		       &sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt,
1154 		       0, "TX desc count");
1155 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1156 		       "rx_ring_count", CTLFLAG_RD,
1157 		       &sc->jme_cdata.jme_rx_ring_cnt,
1158 		       0, "RX ring count");
1159 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1160 		       "tx_wreg", CTLFLAG_RW,
1161 		       &sc->jme_cdata.jme_tx_data.jme_tx_wreg, 0,
1162 		       "# of segments before writing to hardware register");
1163 
1164 	if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX) {
1165 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1166 		    "tx_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD,
1167 		    sc->jme_tx_rmap, 0, if_ringmap_cpumap_sysctl, "I",
1168 		    "TX ring CPU map");
1169 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1170 		    "rx_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD,
1171 		    sc->jme_rx_rmap, 0, if_ringmap_cpumap_sysctl, "I",
1172 		    "RX ring CPU map");
1173 	} else {
1174 #ifdef IFPOLL_ENABLE
1175 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1176 		    "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD,
1177 		    sc->jme_tx_rmap, 0, if_ringmap_cpumap_sysctl, "I",
1178 		    "TX poll CPU map");
1179 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1180 		    "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD,
1181 		    sc->jme_rx_rmap, 0, if_ringmap_cpumap_sysctl, "I",
1182 		    "RX poll CPU map");
1183 #endif
1184 	}
1185 
1186 #ifdef JME_RSS_DEBUG
1187 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1188 		       "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1189 		       0, "RSS debug level");
1190 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1191 		char rx_ring_desc[32];
1192 
1193 		ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1194 		    "rx_ring%d_pkt", r);
1195 		SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1196 		    rx_ring_desc, CTLFLAG_RW,
1197 		    &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1198 
1199 		ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1200 		    "rx_ring%d_emp", r);
1201 		SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1202 		    rx_ring_desc, CTLFLAG_RW,
1203 		    &sc->jme_cdata.jme_rx_data[r].jme_rx_emp,
1204 		    "# of time RX ring empty");
1205 	}
1206 #endif
1207 }
1208 
1209 static int
jme_dma_alloc(struct jme_softc * sc)1210 jme_dma_alloc(struct jme_softc *sc)
1211 {
1212 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1213 	struct jme_txdesc *txd;
1214 	bus_dmamem_t dmem;
1215 	int error, i, asize;
1216 
1217 	asize = __VM_CACHELINE_ALIGN(
1218 	    tdata->jme_tx_desc_cnt * sizeof(struct jme_txdesc));
1219 	tdata->jme_txdesc = kmalloc(asize, M_DEVBUF,
1220 				    M_WAITOK | M_ZERO | M_CACHEALIGN);
1221 
1222 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1223 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1224 
1225 		asize = __VM_CACHELINE_ALIGN(
1226 		    rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc));
1227 		rdata->jme_rxdesc = kmalloc(asize, M_DEVBUF,
1228 					    M_WAITOK | M_ZERO | M_CACHEALIGN);
1229 	}
1230 
1231 	/* Create parent ring tag. */
1232 	error = bus_dma_tag_create(NULL,/* parent */
1233 	    1, JME_RING_BOUNDARY,	/* algnmnt, boundary */
1234 	    sc->jme_lowaddr,		/* lowaddr */
1235 	    BUS_SPACE_MAXADDR,		/* highaddr */
1236 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1237 	    0,				/* nsegments */
1238 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1239 	    0,				/* flags */
1240 	    &sc->jme_cdata.jme_ring_tag);
1241 	if (error) {
1242 		device_printf(sc->jme_dev,
1243 		    "could not create parent ring DMA tag.\n");
1244 		return error;
1245 	}
1246 
1247 	/*
1248 	 * Create DMA stuffs for TX ring
1249 	 */
1250 	asize = roundup2(JME_TX_RING_SIZE(tdata), JME_TX_RING_ALIGN);
1251 	error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1252 			JME_TX_RING_ALIGN, 0,
1253 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1254 			asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1255 	if (error) {
1256 		device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1257 		return error;
1258 	}
1259 	tdata->jme_tx_ring_tag = dmem.dmem_tag;
1260 	tdata->jme_tx_ring_map = dmem.dmem_map;
1261 	tdata->jme_tx_ring = dmem.dmem_addr;
1262 	tdata->jme_tx_ring_paddr = dmem.dmem_busaddr;
1263 
1264 	/*
1265 	 * Create DMA stuffs for RX rings
1266 	 */
1267 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1268 		error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1269 		if (error)
1270 			return error;
1271 	}
1272 
1273 	/* Create parent buffer tag. */
1274 	error = bus_dma_tag_create(NULL,/* parent */
1275 	    1, 0,			/* algnmnt, boundary */
1276 	    sc->jme_lowaddr,		/* lowaddr */
1277 	    BUS_SPACE_MAXADDR,		/* highaddr */
1278 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1279 	    0,				/* nsegments */
1280 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1281 	    0,				/* flags */
1282 	    &sc->jme_cdata.jme_buffer_tag);
1283 	if (error) {
1284 		device_printf(sc->jme_dev,
1285 		    "could not create parent buffer DMA tag.\n");
1286 		return error;
1287 	}
1288 
1289 	/*
1290 	 * Create DMA stuffs for shadow status block
1291 	 */
1292 	asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1293 	error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1294 			JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1295 			asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1296 	if (error) {
1297 		device_printf(sc->jme_dev,
1298 		    "could not create shadow status block.\n");
1299 		return error;
1300 	}
1301 	sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1302 	sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1303 	sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1304 	sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1305 
1306 	/*
1307 	 * Create DMA stuffs for TX buffers
1308 	 */
1309 
1310 	/* Create tag for Tx buffers. */
1311 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1312 	    1, 0,			/* algnmnt, boundary */
1313 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1314 	    BUS_SPACE_MAXADDR,		/* highaddr */
1315 	    JME_TSO_MAXSIZE,		/* maxsize */
1316 	    JME_MAXTXSEGS,		/* nsegments */
1317 	    JME_MAXSEGSIZE,		/* maxsegsize */
1318 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1319 	    &tdata->jme_tx_tag);
1320 	if (error != 0) {
1321 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1322 		return error;
1323 	}
1324 
1325 	/* Create DMA maps for Tx buffers. */
1326 	for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
1327 		txd = &tdata->jme_txdesc[i];
1328 		error = bus_dmamap_create(tdata->jme_tx_tag,
1329 				BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1330 				&txd->tx_dmamap);
1331 		if (error) {
1332 			int j;
1333 
1334 			device_printf(sc->jme_dev,
1335 			    "could not create %dth Tx dmamap.\n", i);
1336 
1337 			for (j = 0; j < i; ++j) {
1338 				txd = &tdata->jme_txdesc[j];
1339 				bus_dmamap_destroy(tdata->jme_tx_tag,
1340 						   txd->tx_dmamap);
1341 			}
1342 			bus_dma_tag_destroy(tdata->jme_tx_tag);
1343 			tdata->jme_tx_tag = NULL;
1344 			return error;
1345 		}
1346 	}
1347 
1348 	/*
1349 	 * Create DMA stuffs for RX buffers
1350 	 */
1351 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1352 		error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1353 		if (error)
1354 			return error;
1355 	}
1356 	return 0;
1357 }
1358 
1359 static void
jme_dma_free(struct jme_softc * sc)1360 jme_dma_free(struct jme_softc *sc)
1361 {
1362 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1363 	struct jme_txdesc *txd;
1364 	struct jme_rxdesc *rxd;
1365 	struct jme_rxdata *rdata;
1366 	int i, r;
1367 
1368 	/* Tx ring */
1369 	if (tdata->jme_tx_ring_tag != NULL) {
1370 		bus_dmamap_unload(tdata->jme_tx_ring_tag,
1371 		    tdata->jme_tx_ring_map);
1372 		bus_dmamem_free(tdata->jme_tx_ring_tag,
1373 		    tdata->jme_tx_ring, tdata->jme_tx_ring_map);
1374 		bus_dma_tag_destroy(tdata->jme_tx_ring_tag);
1375 		tdata->jme_tx_ring_tag = NULL;
1376 	}
1377 
1378 	/* Rx ring */
1379 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1380 		rdata = &sc->jme_cdata.jme_rx_data[r];
1381 		if (rdata->jme_rx_ring_tag != NULL) {
1382 			bus_dmamap_unload(rdata->jme_rx_ring_tag,
1383 					  rdata->jme_rx_ring_map);
1384 			bus_dmamem_free(rdata->jme_rx_ring_tag,
1385 					rdata->jme_rx_ring,
1386 					rdata->jme_rx_ring_map);
1387 			bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1388 			rdata->jme_rx_ring_tag = NULL;
1389 		}
1390 	}
1391 
1392 	/* Tx buffers */
1393 	if (tdata->jme_tx_tag != NULL) {
1394 		for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
1395 			txd = &tdata->jme_txdesc[i];
1396 			bus_dmamap_destroy(tdata->jme_tx_tag, txd->tx_dmamap);
1397 		}
1398 		bus_dma_tag_destroy(tdata->jme_tx_tag);
1399 		tdata->jme_tx_tag = NULL;
1400 	}
1401 
1402 	/* Rx buffers */
1403 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1404 		rdata = &sc->jme_cdata.jme_rx_data[r];
1405 		if (rdata->jme_rx_tag != NULL) {
1406 			for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1407 				rxd = &rdata->jme_rxdesc[i];
1408 				bus_dmamap_destroy(rdata->jme_rx_tag,
1409 						   rxd->rx_dmamap);
1410 			}
1411 			bus_dmamap_destroy(rdata->jme_rx_tag,
1412 					   rdata->jme_rx_sparemap);
1413 			bus_dma_tag_destroy(rdata->jme_rx_tag);
1414 			rdata->jme_rx_tag = NULL;
1415 		}
1416 	}
1417 
1418 	/* Shadow status block. */
1419 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1420 		bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1421 		    sc->jme_cdata.jme_ssb_map);
1422 		bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1423 		    sc->jme_cdata.jme_ssb_block,
1424 		    sc->jme_cdata.jme_ssb_map);
1425 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1426 		sc->jme_cdata.jme_ssb_tag = NULL;
1427 	}
1428 
1429 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1430 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1431 		sc->jme_cdata.jme_buffer_tag = NULL;
1432 	}
1433 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1434 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1435 		sc->jme_cdata.jme_ring_tag = NULL;
1436 	}
1437 
1438 	if (tdata->jme_txdesc != NULL) {
1439 		kfree(tdata->jme_txdesc, M_DEVBUF);
1440 		tdata->jme_txdesc = NULL;
1441 	}
1442 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1443 		rdata = &sc->jme_cdata.jme_rx_data[r];
1444 		if (rdata->jme_rxdesc != NULL) {
1445 			kfree(rdata->jme_rxdesc, M_DEVBUF);
1446 			rdata->jme_rxdesc = NULL;
1447 		}
1448 	}
1449 }
1450 
1451 /*
1452  *	Make sure the interface is stopped at reboot time.
1453  */
1454 static int
jme_shutdown(device_t dev)1455 jme_shutdown(device_t dev)
1456 {
1457 	return jme_suspend(dev);
1458 }
1459 
1460 #ifdef notyet
1461 /*
1462  * Unlike other ethernet controllers, JMC250 requires
1463  * explicit resetting link speed to 10/100Mbps as gigabit
1464  * link will cunsume more power than 375mA.
1465  * Note, we reset the link speed to 10/100Mbps with
1466  * auto-negotiation but we don't know whether that operation
1467  * would succeed or not as we have no control after powering
1468  * off. If the renegotiation fail WOL may not work. Running
1469  * at 1Gbps draws more power than 375mA at 3.3V which is
1470  * specified in PCI specification and that would result in
1471  * complete shutdowning power to ethernet controller.
1472  *
1473  * TODO
1474  *  Save current negotiated media speed/duplex/flow-control
1475  *  to softc and restore the same link again after resuming.
1476  *  PHY handling such as power down/resetting to 100Mbps
1477  *  may be better handled in suspend method in phy driver.
1478  */
1479 static void
jme_setlinkspeed(struct jme_softc * sc)1480 jme_setlinkspeed(struct jme_softc *sc)
1481 {
1482 	struct mii_data *mii;
1483 	int aneg, i;
1484 
1485 	JME_LOCK_ASSERT(sc);
1486 
1487 	mii = device_get_softc(sc->jme_miibus);
1488 	mii_pollstat(mii);
1489 	aneg = 0;
1490 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1491 		switch IFM_SUBTYPE(mii->mii_media_active) {
1492 		case IFM_10_T:
1493 		case IFM_100_TX:
1494 			return;
1495 		case IFM_1000_T:
1496 			aneg++;
1497 		default:
1498 			break;
1499 		}
1500 	}
1501 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1502 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1503 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1504 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1505 	    BMCR_AUTOEN | BMCR_STARTNEG);
1506 	DELAY(1000);
1507 	if (aneg != 0) {
1508 		/* Poll link state until jme(4) get a 10/100 link. */
1509 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1510 			mii_pollstat(mii);
1511 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1512 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1513 				case IFM_10_T:
1514 				case IFM_100_TX:
1515 					jme_mac_config(sc);
1516 					return;
1517 				default:
1518 					break;
1519 				}
1520 			}
1521 			JME_UNLOCK(sc);
1522 			pause("jmelnk", hz);
1523 			JME_LOCK(sc);
1524 		}
1525 		if (i == MII_ANEGTICKS_GIGE)
1526 			device_printf(sc->jme_dev, "establishing link failed, "
1527 			    "WOL may not work!");
1528 	}
1529 	/*
1530 	 * No link, force MAC to have 100Mbps, full-duplex link.
1531 	 * This is the last resort and may/may not work.
1532 	 */
1533 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1534 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1535 	jme_mac_config(sc);
1536 }
1537 
1538 static void
jme_setwol(struct jme_softc * sc)1539 jme_setwol(struct jme_softc *sc)
1540 {
1541 	struct ifnet *ifp = &sc->arpcom.ac_if;
1542 	uint32_t gpr, pmcs;
1543 	uint16_t pmstat;
1544 	int pmc;
1545 
1546 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1547 		/* No PME capability, PHY power down. */
1548 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1549 		    MII_BMCR, BMCR_PDOWN);
1550 		return;
1551 	}
1552 
1553 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1554 	pmcs = CSR_READ_4(sc, JME_PMCS);
1555 	pmcs &= ~PMCS_WOL_ENB_MASK;
1556 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1557 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1558 		/* Enable PME message. */
1559 		gpr |= GPREG0_PME_ENB;
1560 		/* For gigabit controllers, reset link speed to 10/100. */
1561 		if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1562 			jme_setlinkspeed(sc);
1563 	}
1564 
1565 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1566 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1567 
1568 	/* Request PME. */
1569 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1570 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1571 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1572 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1573 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1574 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1575 		/* No WOL, PHY power down. */
1576 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1577 		    MII_BMCR, BMCR_PDOWN);
1578 	}
1579 }
1580 #endif
1581 
1582 static int
jme_suspend(device_t dev)1583 jme_suspend(device_t dev)
1584 {
1585 	struct jme_softc *sc = device_get_softc(dev);
1586 	struct ifnet *ifp = &sc->arpcom.ac_if;
1587 
1588 	ifnet_serialize_all(ifp);
1589 	jme_stop(sc);
1590 #ifdef notyet
1591 	jme_setwol(sc);
1592 #endif
1593 	ifnet_deserialize_all(ifp);
1594 
1595 	return (0);
1596 }
1597 
1598 static int
jme_resume(device_t dev)1599 jme_resume(device_t dev)
1600 {
1601 	struct jme_softc *sc = device_get_softc(dev);
1602 	struct ifnet *ifp = &sc->arpcom.ac_if;
1603 #ifdef notyet
1604 	int pmc;
1605 #endif
1606 
1607 	ifnet_serialize_all(ifp);
1608 
1609 #ifdef notyet
1610 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1611 		uint16_t pmstat;
1612 
1613 		pmstat = pci_read_config(sc->jme_dev,
1614 		    pmc + PCIR_POWER_STATUS, 2);
1615 		/* Disable PME clear PME status. */
1616 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1617 		pci_write_config(sc->jme_dev,
1618 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1619 	}
1620 #endif
1621 
1622 	if (ifp->if_flags & IFF_UP)
1623 		jme_init(sc);
1624 
1625 	ifnet_deserialize_all(ifp);
1626 
1627 	return (0);
1628 }
1629 
1630 static __inline int
jme_tso_pullup(struct mbuf ** mp)1631 jme_tso_pullup(struct mbuf **mp)
1632 {
1633 	int hoff, iphlen, thoff;
1634 	struct mbuf *m;
1635 
1636 	m = *mp;
1637 	KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
1638 
1639 	iphlen = m->m_pkthdr.csum_iphlen;
1640 	thoff = m->m_pkthdr.csum_thlen;
1641 	hoff = m->m_pkthdr.csum_lhlen;
1642 
1643 	KASSERT(iphlen > 0, ("invalid ip hlen"));
1644 	KASSERT(thoff > 0, ("invalid tcp hlen"));
1645 	KASSERT(hoff > 0, ("invalid ether hlen"));
1646 
1647 	if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
1648 		m = m_pullup(m, hoff + iphlen + thoff);
1649 		if (m == NULL) {
1650 			*mp = NULL;
1651 			return ENOBUFS;
1652 		}
1653 		*mp = m;
1654 	}
1655 	return 0;
1656 }
1657 
1658 static int
jme_encap(struct jme_txdata * tdata,struct mbuf ** m_head,int * segs_used)1659 jme_encap(struct jme_txdata *tdata, struct mbuf **m_head, int *segs_used)
1660 {
1661 	struct jme_txdesc *txd;
1662 	struct jme_desc *desc;
1663 	struct mbuf *m;
1664 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1665 	int maxsegs, nsegs;
1666 	int error, i, prod, symbol_desc;
1667 	uint32_t cflags, flag64, mss;
1668 
1669 	M_ASSERTPKTHDR((*m_head));
1670 
1671 	if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) {
1672 		/* XXX Is this necessary? */
1673 		error = jme_tso_pullup(m_head);
1674 		if (error)
1675 			return error;
1676 	}
1677 
1678 	prod = tdata->jme_tx_prod;
1679 	txd = &tdata->jme_txdesc[prod];
1680 
1681 	if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1682 		symbol_desc = 1;
1683 	else
1684 		symbol_desc = 0;
1685 
1686 	maxsegs = (tdata->jme_tx_desc_cnt - tdata->jme_tx_cnt) -
1687 		  (JME_TXD_RSVD + symbol_desc);
1688 	if (maxsegs > JME_MAXTXSEGS)
1689 		maxsegs = JME_MAXTXSEGS;
1690 	KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc),
1691 		("not enough segments %d", maxsegs));
1692 
1693 	error = bus_dmamap_load_mbuf_defrag(tdata->jme_tx_tag,
1694 			txd->tx_dmamap, m_head,
1695 			txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1696 	if (error)
1697 		goto fail;
1698 	*segs_used += nsegs;
1699 
1700 	bus_dmamap_sync(tdata->jme_tx_tag, txd->tx_dmamap,
1701 			BUS_DMASYNC_PREWRITE);
1702 
1703 	m = *m_head;
1704 	cflags = 0;
1705 	mss = 0;
1706 
1707 	/* Configure checksum offload. */
1708 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1709 		mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT;
1710 		cflags |= JME_TD_TSO;
1711 	} else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) {
1712 		if (m->m_pkthdr.csum_flags & CSUM_IP)
1713 			cflags |= JME_TD_IPCSUM;
1714 		if (m->m_pkthdr.csum_flags & CSUM_TCP)
1715 			cflags |= JME_TD_TCPCSUM;
1716 		if (m->m_pkthdr.csum_flags & CSUM_UDP)
1717 			cflags |= JME_TD_UDPCSUM;
1718 	}
1719 
1720 	/* Configure VLAN. */
1721 	if (m->m_flags & M_VLANTAG) {
1722 		cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1723 		cflags |= JME_TD_VLAN_TAG;
1724 	}
1725 
1726 	desc = &tdata->jme_tx_ring[prod];
1727 	desc->flags = htole32(cflags);
1728 	desc->addr_hi = htole32(m->m_pkthdr.len);
1729 	if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1730 		/*
1731 		 * Use 64bits TX desc chain format.
1732 		 *
1733 		 * The first TX desc of the chain, which is setup here,
1734 		 * is just a symbol TX desc carrying no payload.
1735 		 */
1736 		flag64 = JME_TD_64BIT;
1737 		desc->buflen = htole32(mss);
1738 		desc->addr_lo = 0;
1739 
1740 		*segs_used += 1;
1741 
1742 		/* No effective TX desc is consumed */
1743 		i = 0;
1744 	} else {
1745 		/*
1746 		 * Use 32bits TX desc chain format.
1747 		 *
1748 		 * The first TX desc of the chain, which is setup here,
1749 		 * is an effective TX desc carrying the first segment of
1750 		 * the mbuf chain.
1751 		 */
1752 		flag64 = 0;
1753 		desc->buflen = htole32(mss | txsegs[0].ds_len);
1754 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1755 
1756 		/* One effective TX desc is consumed */
1757 		i = 1;
1758 	}
1759 	tdata->jme_tx_cnt++;
1760 	KKASSERT(tdata->jme_tx_cnt - i < tdata->jme_tx_desc_cnt - JME_TXD_RSVD);
1761 	JME_DESC_INC(prod, tdata->jme_tx_desc_cnt);
1762 
1763 	txd->tx_ndesc = 1 - i;
1764 	for (; i < nsegs; i++) {
1765 		desc = &tdata->jme_tx_ring[prod];
1766 		desc->buflen = htole32(txsegs[i].ds_len);
1767 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1768 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1769 		desc->flags = htole32(JME_TD_OWN | flag64);
1770 
1771 		tdata->jme_tx_cnt++;
1772 		KKASSERT(tdata->jme_tx_cnt <=
1773 			 tdata->jme_tx_desc_cnt - JME_TXD_RSVD);
1774 		JME_DESC_INC(prod, tdata->jme_tx_desc_cnt);
1775 	}
1776 
1777 	/* Update producer index. */
1778 	tdata->jme_tx_prod = prod;
1779 	/*
1780 	 * Finally request interrupt and give the first descriptor
1781 	 * owenership to hardware.
1782 	 */
1783 	desc = txd->tx_desc;
1784 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1785 
1786 	txd->tx_m = m;
1787 	txd->tx_ndesc += nsegs;
1788 
1789 	return 0;
1790 fail:
1791 	m_freem(*m_head);
1792 	*m_head = NULL;
1793 	return error;
1794 }
1795 
1796 static void
jme_start(struct ifnet * ifp,struct ifaltq_subque * ifsq)1797 jme_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1798 {
1799 	struct jme_softc *sc = ifp->if_softc;
1800 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1801 	struct mbuf *m_head;
1802 	int enq = 0;
1803 
1804 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1805 	ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
1806 
1807 	if (!sc->jme_has_link) {
1808 		ifq_purge(&ifp->if_snd);
1809 		return;
1810 	}
1811 
1812 	if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1813 		return;
1814 
1815 	if (tdata->jme_tx_cnt >= JME_TX_DESC_HIWAT(tdata))
1816 		jme_txeof(tdata);
1817 
1818 	while (!ifq_is_empty(&ifp->if_snd)) {
1819 		/*
1820 		 * Check number of available TX descs, always
1821 		 * leave JME_TXD_RSVD free TX descs.
1822 		 */
1823 		if (tdata->jme_tx_cnt + JME_TXD_SPARE >
1824 		    tdata->jme_tx_desc_cnt - JME_TXD_RSVD) {
1825 			ifq_set_oactive(&ifp->if_snd);
1826 			break;
1827 		}
1828 
1829 		m_head = ifq_dequeue(&ifp->if_snd);
1830 		if (m_head == NULL)
1831 			break;
1832 
1833 		/*
1834 		 * Pack the data into the transmit ring. If we
1835 		 * don't have room, set the OACTIVE flag and wait
1836 		 * for the NIC to drain the ring.
1837 		 */
1838 		if (jme_encap(tdata, &m_head, &enq)) {
1839 			KKASSERT(m_head == NULL);
1840 			IFNET_STAT_INC(ifp, oerrors, 1);
1841 			ifq_set_oactive(&ifp->if_snd);
1842 			break;
1843 		}
1844 
1845 		if (enq >= tdata->jme_tx_wreg) {
1846 			CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr |
1847 			    TXCSR_TX_ENB | TXCSR_TXQ_N_START(TXCSR_TXQ0));
1848 			enq = 0;
1849 		}
1850 
1851 		/*
1852 		 * If there's a BPF listener, bounce a copy of this frame
1853 		 * to him.
1854 		 */
1855 		ETHER_BPF_MTAP(ifp, m_head);
1856 
1857 		/* Set a timeout in case the chip goes out to lunch. */
1858 		ifp->if_timer = JME_TX_TIMEOUT;
1859 	}
1860 
1861 	if (enq > 0) {
1862 		/*
1863 		 * Reading TXCSR takes very long time under heavy load
1864 		 * so cache TXCSR value and writes the ORed value with
1865 		 * the kick command to the TXCSR. This saves one register
1866 		 * access cycle.
1867 		 */
1868 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1869 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1870 	}
1871 }
1872 
1873 static void
jme_watchdog(struct ifnet * ifp)1874 jme_watchdog(struct ifnet *ifp)
1875 {
1876 	struct jme_softc *sc = ifp->if_softc;
1877 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1878 
1879 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
1880 
1881 	if (!sc->jme_has_link) {
1882 		if_printf(ifp, "watchdog timeout (missed link)\n");
1883 		IFNET_STAT_INC(ifp, oerrors, 1);
1884 		jme_init(sc);
1885 		return;
1886 	}
1887 
1888 	jme_txeof(tdata);
1889 	if (tdata->jme_tx_cnt == 0) {
1890 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1891 			  "-- recovering\n");
1892 		if (!ifq_is_empty(&ifp->if_snd))
1893 			if_devstart(ifp);
1894 		return;
1895 	}
1896 
1897 	if_printf(ifp, "watchdog timeout\n");
1898 	IFNET_STAT_INC(ifp, oerrors, 1);
1899 	jme_init(sc);
1900 	if (!ifq_is_empty(&ifp->if_snd))
1901 		if_devstart(ifp);
1902 }
1903 
1904 static int
jme_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data,struct ucred * cr)1905 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1906 {
1907 	struct jme_softc *sc = ifp->if_softc;
1908 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
1909 	struct ifreq *ifr = (struct ifreq *)data;
1910 	int error = 0, mask;
1911 
1912 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
1913 
1914 	switch (cmd) {
1915 	case SIOCSIFMTU:
1916 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1917 		    (!(sc->jme_caps & JME_CAP_JUMBO) &&
1918 		     ifr->ifr_mtu > JME_MAX_MTU)) {
1919 			error = EINVAL;
1920 			break;
1921 		}
1922 
1923 		if (ifp->if_mtu != ifr->ifr_mtu) {
1924 			/*
1925 			 * No special configuration is required when interface
1926 			 * MTU is changed but availability of Tx checksum
1927 			 * offload should be chcked against new MTU size as
1928 			 * FIFO size is just 2K.
1929 			 */
1930 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1931 				ifp->if_capenable &=
1932 				    ~(IFCAP_TXCSUM | IFCAP_TSO);
1933 				ifp->if_hwassist &=
1934 				    ~(JME_CSUM_FEATURES | CSUM_TSO);
1935 			}
1936 			ifp->if_mtu = ifr->ifr_mtu;
1937 			if (ifp->if_flags & IFF_RUNNING)
1938 				jme_init(sc);
1939 		}
1940 		break;
1941 
1942 	case SIOCSIFFLAGS:
1943 		if (ifp->if_flags & IFF_UP) {
1944 			if (ifp->if_flags & IFF_RUNNING) {
1945 				if ((ifp->if_flags ^ sc->jme_if_flags) &
1946 				    (IFF_PROMISC | IFF_ALLMULTI))
1947 					jme_set_filter(sc);
1948 			} else {
1949 				jme_init(sc);
1950 			}
1951 		} else {
1952 			if (ifp->if_flags & IFF_RUNNING)
1953 				jme_stop(sc);
1954 		}
1955 		sc->jme_if_flags = ifp->if_flags;
1956 		break;
1957 
1958 	case SIOCADDMULTI:
1959 	case SIOCDELMULTI:
1960 		if (ifp->if_flags & IFF_RUNNING)
1961 			jme_set_filter(sc);
1962 		break;
1963 
1964 	case SIOCSIFMEDIA:
1965 	case SIOCGIFMEDIA:
1966 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1967 		break;
1968 
1969 	case SIOCSIFCAP:
1970 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1971 
1972 		if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1973 			ifp->if_capenable ^= IFCAP_TXCSUM;
1974 			if (ifp->if_capenable & IFCAP_TXCSUM)
1975 				ifp->if_hwassist |= JME_CSUM_FEATURES;
1976 			else
1977 				ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1978 		}
1979 		if (mask & IFCAP_RXCSUM) {
1980 			uint32_t reg;
1981 
1982 			ifp->if_capenable ^= IFCAP_RXCSUM;
1983 			reg = CSR_READ_4(sc, JME_RXMAC);
1984 			reg &= ~RXMAC_CSUM_ENB;
1985 			if (ifp->if_capenable & IFCAP_RXCSUM)
1986 				reg |= RXMAC_CSUM_ENB;
1987 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1988 		}
1989 
1990 		if (mask & IFCAP_VLAN_HWTAGGING) {
1991 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1992 			jme_set_vlan(sc);
1993 		}
1994 
1995 		if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1996 			ifp->if_capenable ^= IFCAP_TSO;
1997 			if (ifp->if_capenable & IFCAP_TSO)
1998 				ifp->if_hwassist |= CSUM_TSO;
1999 			else
2000 				ifp->if_hwassist &= ~CSUM_TSO;
2001 		}
2002 
2003 		if (mask & IFCAP_RSS)
2004 			ifp->if_capenable ^= IFCAP_RSS;
2005 		break;
2006 
2007 	default:
2008 		error = ether_ioctl(ifp, cmd, data);
2009 		break;
2010 	}
2011 	return (error);
2012 }
2013 
2014 static void
jme_mac_config(struct jme_softc * sc)2015 jme_mac_config(struct jme_softc *sc)
2016 {
2017 	struct mii_data *mii;
2018 	uint32_t ghc, rxmac, txmac, txpause, gp1;
2019 	int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
2020 
2021 	mii = device_get_softc(sc->jme_miibus);
2022 
2023 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2024 	DELAY(10);
2025 	CSR_WRITE_4(sc, JME_GHC, 0);
2026 	ghc = 0;
2027 	rxmac = CSR_READ_4(sc, JME_RXMAC);
2028 	rxmac &= ~RXMAC_FC_ENB;
2029 	txmac = CSR_READ_4(sc, JME_TXMAC);
2030 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2031 	txpause = CSR_READ_4(sc, JME_TXPFC);
2032 	txpause &= ~TXPFC_PAUSE_ENB;
2033 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2034 		ghc |= GHC_FULL_DUPLEX;
2035 		rxmac &= ~RXMAC_COLL_DET_ENB;
2036 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2037 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2038 		    TXMAC_FRAME_BURST);
2039 #ifdef notyet
2040 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2041 			txpause |= TXPFC_PAUSE_ENB;
2042 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2043 			rxmac |= RXMAC_FC_ENB;
2044 #endif
2045 		/* Disable retry transmit timer/retry limit. */
2046 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2047 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2048 	} else {
2049 		rxmac |= RXMAC_COLL_DET_ENB;
2050 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2051 		/* Enable retry transmit timer/retry limit. */
2052 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2053 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2054 	}
2055 
2056 	/*
2057 	 * Reprogram Tx/Rx MACs with resolved speed/duplex.
2058 	 */
2059 	gp1 = CSR_READ_4(sc, JME_GPREG1);
2060 	gp1 &= ~GPREG1_WA_HDX;
2061 
2062 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2063 		hdx = 1;
2064 
2065 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
2066 	case IFM_10_T:
2067 		ghc |= GHC_SPEED_10 | sc->jme_clksrc;
2068 		if (hdx)
2069 			gp1 |= GPREG1_WA_HDX;
2070 		break;
2071 
2072 	case IFM_100_TX:
2073 		ghc |= GHC_SPEED_100 | sc->jme_clksrc;
2074 		if (hdx)
2075 			gp1 |= GPREG1_WA_HDX;
2076 
2077 		/*
2078 		 * Use extended FIFO depth to workaround CRC errors
2079 		 * emitted by chips before JMC250B
2080 		 */
2081 		phyconf = JMPHY_CONF_EXTFIFO;
2082 		break;
2083 
2084 	case IFM_1000_T:
2085 		if (sc->jme_caps & JME_CAP_FASTETH)
2086 			break;
2087 
2088 		ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
2089 		if (hdx)
2090 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2091 		break;
2092 
2093 	default:
2094 		break;
2095 	}
2096 	CSR_WRITE_4(sc, JME_GHC, ghc);
2097 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2098 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
2099 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
2100 
2101 	if (sc->jme_workaround & JME_WA_EXTFIFO) {
2102 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2103 				    JMPHY_CONF, phyconf);
2104 	}
2105 	if (sc->jme_workaround & JME_WA_HDX)
2106 		CSR_WRITE_4(sc, JME_GPREG1, gp1);
2107 }
2108 
2109 static void
jme_intr(void * xsc)2110 jme_intr(void *xsc)
2111 {
2112 	struct jme_softc *sc = xsc;
2113 	struct ifnet *ifp = &sc->arpcom.ac_if;
2114 	uint32_t status;
2115 	int r;
2116 
2117 	ASSERT_SERIALIZED(&sc->jme_serialize);
2118 
2119 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2120 	if (status == 0 || status == 0xFFFFFFFF)
2121 		return;
2122 
2123 	/* Disable interrupts. */
2124 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2125 
2126 	status = CSR_READ_4(sc, JME_INTR_STATUS);
2127 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2128 		goto back;
2129 
2130 	/* Reset PCC counter/timer and Ack interrupts. */
2131 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2132 
2133 	if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2134 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2135 
2136 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2137 		if (status & jme_rx_status[r].jme_coal) {
2138 			status |= jme_rx_status[r].jme_coal |
2139 				  jme_rx_status[r].jme_comp;
2140 		}
2141 	}
2142 
2143 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2144 
2145 	if (ifp->if_flags & IFF_RUNNING) {
2146 		struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
2147 
2148 		if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2149 			jme_rx_intr(sc, status);
2150 
2151 		if (status & INTR_RXQ_DESC_EMPTY) {
2152 			/*
2153 			 * Notify hardware availability of new Rx buffers.
2154 			 * Reading RXCSR takes very long time under heavy
2155 			 * load so cache RXCSR value and writes the ORed
2156 			 * value with the kick command to the RXCSR. This
2157 			 * saves one register access cycle.
2158 			 */
2159 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2160 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2161 		}
2162 
2163 		if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2164 			lwkt_serialize_enter(&tdata->jme_tx_serialize);
2165 			jme_txeof(tdata);
2166 			if (!ifq_is_empty(&ifp->if_snd))
2167 				if_devstart(ifp);
2168 			lwkt_serialize_exit(&tdata->jme_tx_serialize);
2169 		}
2170 	}
2171 back:
2172 	/* Reenable interrupts. */
2173 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2174 }
2175 
2176 static void
jme_txeof(struct jme_txdata * tdata)2177 jme_txeof(struct jme_txdata *tdata)
2178 {
2179 	struct ifnet *ifp = &tdata->jme_sc->arpcom.ac_if;
2180 	int cons;
2181 
2182 	cons = tdata->jme_tx_cons;
2183 	if (cons == tdata->jme_tx_prod)
2184 		return;
2185 
2186 	/*
2187 	 * Go through our Tx list and free mbufs for those
2188 	 * frames which have been transmitted.
2189 	 */
2190 	while (cons != tdata->jme_tx_prod) {
2191 		struct jme_txdesc *txd, *next_txd;
2192 		uint32_t status, next_status;
2193 		int next_cons, nsegs;
2194 
2195 		txd = &tdata->jme_txdesc[cons];
2196 		KASSERT(txd->tx_m != NULL,
2197 			("%s: freeing NULL mbuf!", __func__));
2198 
2199 		status = le32toh(txd->tx_desc->flags);
2200 		if ((status & JME_TD_OWN) == JME_TD_OWN)
2201 			break;
2202 
2203 		/*
2204 		 * NOTE:
2205 		 * This chip will always update the TX descriptor's
2206 		 * buflen field and this updating always happens
2207 		 * after clearing the OWN bit, so even if the OWN
2208 		 * bit is cleared by the chip, we still don't sure
2209 		 * about whether the buflen field has been updated
2210 		 * by the chip or not.  To avoid this race, we wait
2211 		 * for the next TX descriptor's OWN bit to be cleared
2212 		 * by the chip before reusing this TX descriptor.
2213 		 */
2214 		next_cons = cons;
2215 		JME_DESC_ADD(next_cons, txd->tx_ndesc, tdata->jme_tx_desc_cnt);
2216 		next_txd = &tdata->jme_txdesc[next_cons];
2217 		if (next_txd->tx_m == NULL)
2218 			break;
2219 		next_status = le32toh(next_txd->tx_desc->flags);
2220 		if ((next_status & JME_TD_OWN) == JME_TD_OWN)
2221 			break;
2222 
2223 		if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2224 			IFNET_STAT_INC(ifp, oerrors, 1);
2225 		} else {
2226 			IFNET_STAT_INC(ifp, opackets, 1);
2227 			if (status & JME_TD_COLLISION) {
2228 				IFNET_STAT_INC(ifp, collisions,
2229 				    le32toh(txd->tx_desc->buflen) &
2230 				    JME_TD_BUF_LEN_MASK);
2231 			}
2232 		}
2233 
2234 		/*
2235 		 * Only the first descriptor of multi-descriptor
2236 		 * transmission is updated so driver have to skip entire
2237 		 * chained buffers for the transmiited frame. In other
2238 		 * words, JME_TD_OWN bit is valid only at the first
2239 		 * descriptor of a multi-descriptor transmission.
2240 		 */
2241 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2242 			tdata->jme_tx_ring[cons].flags = 0;
2243 			JME_DESC_INC(cons, tdata->jme_tx_desc_cnt);
2244 		}
2245 
2246 		/* Reclaim transferred mbufs. */
2247 		bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap);
2248 		m_freem(txd->tx_m);
2249 		txd->tx_m = NULL;
2250 		tdata->jme_tx_cnt -= txd->tx_ndesc;
2251 		KASSERT(tdata->jme_tx_cnt >= 0,
2252 			("%s: Active Tx desc counter was garbled", __func__));
2253 		txd->tx_ndesc = 0;
2254 	}
2255 	tdata->jme_tx_cons = cons;
2256 
2257 	/* 1 for symbol TX descriptor */
2258 	if (tdata->jme_tx_cnt <= JME_MAXTXSEGS + 1)
2259 		ifp->if_timer = 0;
2260 
2261 	if (tdata->jme_tx_cnt + JME_TXD_SPARE <=
2262 	    tdata->jme_tx_desc_cnt - JME_TXD_RSVD)
2263 		ifq_clr_oactive(&ifp->if_snd);
2264 }
2265 
2266 static __inline void
jme_discard_rxbufs(struct jme_rxdata * rdata,int cons,int count)2267 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2268 {
2269 	int i;
2270 
2271 	for (i = 0; i < count; ++i) {
2272 		jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
2273 		JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2274 	}
2275 }
2276 
2277 static __inline struct pktinfo *
jme_pktinfo(struct pktinfo * pi,uint32_t flags)2278 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2279 {
2280 	if (flags & JME_RD_IPV4)
2281 		pi->pi_netisr = NETISR_IP;
2282 	else if (flags & JME_RD_IPV6)
2283 		pi->pi_netisr = NETISR_IPV6;
2284 	else
2285 		return NULL;
2286 
2287 	pi->pi_flags = 0;
2288 	pi->pi_l3proto = IPPROTO_UNKNOWN;
2289 
2290 	if (flags & JME_RD_MORE_FRAG)
2291 		pi->pi_flags |= PKTINFO_FLAG_FRAG;
2292 	else if (flags & JME_RD_TCP)
2293 		pi->pi_l3proto = IPPROTO_TCP;
2294 	else if (flags & JME_RD_UDP)
2295 		pi->pi_l3proto = IPPROTO_UDP;
2296 	else
2297 		pi = NULL;
2298 	return pi;
2299 }
2300 
2301 /* Receive a frame. */
2302 static void
jme_rxpkt(struct jme_rxdata * rdata,int cpuid)2303 jme_rxpkt(struct jme_rxdata *rdata, int cpuid)
2304 {
2305 	struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2306 	struct jme_desc *desc;
2307 	struct jme_rxdesc *rxd;
2308 	struct mbuf *mp, *m;
2309 	uint32_t flags, status, hash, hashinfo;
2310 	int cons, count, nsegs;
2311 
2312 	cons = rdata->jme_rx_cons;
2313 	desc = &rdata->jme_rx_ring[cons];
2314 
2315 	flags = le32toh(desc->flags);
2316 	status = le32toh(desc->buflen);
2317 	hash = le32toh(desc->addr_hi);
2318 	hashinfo = le32toh(desc->addr_lo);
2319 	nsegs = JME_RX_NSEGS(status);
2320 
2321 	if (nsegs > 1) {
2322 		/* Skip the first descriptor. */
2323 		JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2324 
2325 		/*
2326 		 * Clear the OWN bit of the following RX descriptors;
2327 		 * hardware will not clear the OWN bit except the first
2328 		 * RX descriptor.
2329 		 *
2330 		 * Since the first RX descriptor is setup, i.e. OWN bit
2331 		 * on, before its followins RX descriptors, leaving the
2332 		 * OWN bit on the following RX descriptors will trick
2333 		 * the hardware into thinking that the following RX
2334 		 * descriptors are ready to be used too.
2335 		 */
2336 		for (count = 1; count < nsegs; count++,
2337 		     JME_DESC_INC(cons, rdata->jme_rx_desc_cnt))
2338 			rdata->jme_rx_ring[cons].flags = 0;
2339 
2340 		cons = rdata->jme_rx_cons;
2341 	}
2342 
2343 	JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2344 			"hash 0x%08x, hash info 0x%08x\n",
2345 			rdata->jme_rx_idx, flags, hash, hashinfo);
2346 
2347 	if (status & JME_RX_ERR_STAT) {
2348 		IFNET_STAT_INC(ifp, ierrors, 1);
2349 		jme_discard_rxbufs(rdata, cons, nsegs);
2350 #ifdef JME_SHOW_ERRORS
2351 		if_printf(ifp, "%s : receive error = 0x%pb%i\n",
2352 		    __func__, JME_RX_ERR_BITS, JME_RX_ERR(status));
2353 #endif
2354 		rdata->jme_rx_cons += nsegs;
2355 		rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2356 		return;
2357 	}
2358 
2359 	rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2360 	for (count = 0; count < nsegs; count++,
2361 	     JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2362 		rxd = &rdata->jme_rxdesc[cons];
2363 		mp = rxd->rx_m;
2364 
2365 		/* Add a new receive buffer to the ring. */
2366 		if (jme_newbuf(rdata, rxd, 0) != 0) {
2367 			IFNET_STAT_INC(ifp, iqdrops, 1);
2368 			/* Reuse buffer. */
2369 			jme_discard_rxbufs(rdata, cons, nsegs - count);
2370 			if (rdata->jme_rxhead != NULL) {
2371 				m_freem(rdata->jme_rxhead);
2372 				JME_RXCHAIN_RESET(rdata);
2373 			}
2374 			break;
2375 		}
2376 
2377 		/*
2378 		 * Assume we've received a full sized frame.
2379 		 * Actual size is fixed when we encounter the end of
2380 		 * multi-segmented frame.
2381 		 */
2382 		mp->m_len = MCLBYTES;
2383 
2384 		/* Chain received mbufs. */
2385 		if (rdata->jme_rxhead == NULL) {
2386 			rdata->jme_rxhead = mp;
2387 			rdata->jme_rxtail = mp;
2388 		} else {
2389 			/*
2390 			 * Receive processor can receive a maximum frame
2391 			 * size of 65535 bytes.
2392 			 */
2393 			rdata->jme_rxtail->m_next = mp;
2394 			rdata->jme_rxtail = mp;
2395 		}
2396 
2397 		if (count == nsegs - 1) {
2398 			struct pktinfo pi0, *pi;
2399 
2400 			/* Last desc. for this frame. */
2401 			m = rdata->jme_rxhead;
2402 			m->m_pkthdr.len = rdata->jme_rxlen;
2403 			if (nsegs > 1) {
2404 				/* Set first mbuf size. */
2405 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2406 				/* Set last mbuf size. */
2407 				mp->m_len = rdata->jme_rxlen -
2408 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2409 				    (MCLBYTES * (nsegs - 2)));
2410 			} else {
2411 				m->m_len = rdata->jme_rxlen;
2412 			}
2413 			m->m_pkthdr.rcvif = ifp;
2414 
2415 			/*
2416 			 * Account for 10bytes auto padding which is used
2417 			 * to align IP header on 32bit boundary. Also note,
2418 			 * CRC bytes is automatically removed by the
2419 			 * hardware.
2420 			 */
2421 			m->m_data += JME_RX_PAD_BYTES;
2422 
2423 			/* Set checksum information. */
2424 			if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2425 			    (flags & JME_RD_IPV4)) {
2426 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2427 				if (flags & JME_RD_IPCSUM)
2428 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2429 				if ((flags & JME_RD_MORE_FRAG) == 0 &&
2430 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2431 				     (JME_RD_TCP | JME_RD_TCPCSUM) ||
2432 				     (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2433 				     (JME_RD_UDP | JME_RD_UDPCSUM))) {
2434 					m->m_pkthdr.csum_flags |=
2435 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2436 					m->m_pkthdr.csum_data = 0xffff;
2437 				}
2438 			}
2439 
2440 			/* Check for VLAN tagged packets. */
2441 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2442 			    (flags & JME_RD_VLAN_TAG)) {
2443 				m->m_pkthdr.ether_vlantag =
2444 				    flags & JME_RD_VLAN_MASK;
2445 				m->m_flags |= M_VLANTAG;
2446 			}
2447 
2448 			IFNET_STAT_INC(ifp, ipackets, 1);
2449 
2450 			if (ifp->if_capenable & IFCAP_RSS)
2451 				pi = jme_pktinfo(&pi0, flags);
2452 			else
2453 				pi = NULL;
2454 
2455 			if (pi != NULL &&
2456 			    (hashinfo & JME_RD_HASH_FN_MASK) ==
2457 			    JME_RD_HASH_FN_TOEPLITZ) {
2458 				m_sethash(m, toeplitz_hash(hash));
2459 				m->m_flags |= M_CKHASH;
2460 			}
2461 
2462 #ifdef JME_RSS_DEBUG
2463 			if (pi != NULL) {
2464 				JME_RSS_DPRINTF(rdata->jme_sc, 10,
2465 				    "isr %d flags %08x, l3 %d %s\n",
2466 				    pi->pi_netisr, pi->pi_flags,
2467 				    pi->pi_l3proto,
2468 				    (m->m_flags & M_HASH) ? "hash" : "");
2469 			}
2470 #endif
2471 
2472 			/* Pass it on. */
2473 			ifp->if_input(ifp, m, pi, cpuid);
2474 
2475 			/* Reset mbuf chains. */
2476 			JME_RXCHAIN_RESET(rdata);
2477 #ifdef JME_RSS_DEBUG
2478 			rdata->jme_rx_pkt++;
2479 #endif
2480 		}
2481 	}
2482 
2483 	rdata->jme_rx_cons += nsegs;
2484 	rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2485 }
2486 
2487 static void
jme_rxeof(struct jme_rxdata * rdata,int count,int cpuid)2488 jme_rxeof(struct jme_rxdata *rdata, int count, int cpuid)
2489 {
2490 	struct jme_desc *desc;
2491 	int nsegs, pktlen;
2492 
2493 	for (;;) {
2494 #ifdef IFPOLL_ENABLE
2495 		if (count >= 0 && count-- == 0)
2496 			break;
2497 #endif
2498 		desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2499 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2500 			break;
2501 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2502 			break;
2503 
2504 		/*
2505 		 * Check number of segments against received bytes.
2506 		 * Non-matching value would indicate that hardware
2507 		 * is still trying to update Rx descriptors. I'm not
2508 		 * sure whether this check is needed.
2509 		 */
2510 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2511 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2512 		if (nsegs != howmany(pktlen, MCLBYTES)) {
2513 			if_printf(&rdata->jme_sc->arpcom.ac_if,
2514 			    "RX fragment count(%d) and "
2515 			    "packet size(%d) mismach\n", nsegs, pktlen);
2516 			break;
2517 		}
2518 
2519 		/*
2520 		 * NOTE:
2521 		 * RSS hash and hash information may _not_ be set by the
2522 		 * hardware even if the OWN bit is cleared and VALID bit
2523 		 * is set.
2524 		 *
2525 		 * If the RSS information is not delivered by the hardware
2526 		 * yet, we MUST NOT accept this packet, let alone reusing
2527 		 * its RX descriptor.  If this packet was accepted and its
2528 		 * RX descriptor was reused before hardware delivering the
2529 		 * RSS information, the RX buffer's address would be trashed
2530 		 * by the RSS information delivered by the hardware.
2531 		 */
2532 		if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
2533 			struct jme_rxdesc *rxd;
2534 			uint32_t hashinfo;
2535 
2536 			hashinfo = le32toh(desc->addr_lo);
2537 			rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons];
2538 
2539 			/*
2540 			 * This test should be enough to detect the pending
2541 			 * RSS information delivery, given:
2542 			 * - If RSS hash is not calculated, the hashinfo
2543 			 *   will be 0.  However, the lower 32bits of RX
2544 			 *   buffers' physical address will never be 0.
2545 			 *   (see jme_rxbuf_dma_filter)
2546 			 * - If RSS hash is calculated, the lowest 4 bits
2547 			 *   of hashinfo will be set, while the RX buffers
2548 			 *   are at least 2K aligned.
2549 			 */
2550 			if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) {
2551 #ifdef JME_SHOW_RSSWB
2552 				if_printf(&rdata->jme_sc->arpcom.ac_if,
2553 				    "RSS is not written back yet\n");
2554 #endif
2555 				break;
2556 			}
2557 		}
2558 
2559 		/* Received a frame. */
2560 		jme_rxpkt(rdata, cpuid);
2561 	}
2562 }
2563 
2564 static void
jme_tick(void * xsc)2565 jme_tick(void *xsc)
2566 {
2567 	struct jme_softc *sc = xsc;
2568 	struct mii_data *mii = device_get_softc(sc->jme_miibus);
2569 
2570 	lwkt_serialize_enter(&sc->jme_serialize);
2571 
2572 	KKASSERT(mycpuid == JME_TICK_CPUID);
2573 
2574 	sc->jme_in_tick = TRUE;
2575 	mii_tick(mii);
2576 	sc->jme_in_tick = FALSE;
2577 
2578 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2579 
2580 	lwkt_serialize_exit(&sc->jme_serialize);
2581 }
2582 
2583 static void
jme_reset(struct jme_softc * sc)2584 jme_reset(struct jme_softc *sc)
2585 {
2586 	uint32_t val;
2587 
2588 	/* Make sure that TX and RX are stopped */
2589 	jme_stop_tx(sc);
2590 	jme_stop_rx(sc);
2591 
2592 	/* Start reset */
2593 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2594 	DELAY(20);
2595 
2596 	/*
2597 	 * Hold reset bit before stop reset
2598 	 */
2599 
2600 	/* Disable TXMAC and TXOFL clock sources */
2601 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2602 	/* Disable RXMAC clock source */
2603 	val = CSR_READ_4(sc, JME_GPREG1);
2604 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2605 	/* Flush */
2606 	CSR_READ_4(sc, JME_GHC);
2607 
2608 	/* Stop reset */
2609 	CSR_WRITE_4(sc, JME_GHC, 0);
2610 	/* Flush */
2611 	CSR_READ_4(sc, JME_GHC);
2612 
2613 	/*
2614 	 * Clear reset bit after stop reset
2615 	 */
2616 
2617 	/* Enable TXMAC and TXOFL clock sources */
2618 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2619 	/* Enable RXMAC clock source */
2620 	val = CSR_READ_4(sc, JME_GPREG1);
2621 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2622 	/* Flush */
2623 	CSR_READ_4(sc, JME_GHC);
2624 
2625 	/* Disable TXMAC and TXOFL clock sources */
2626 	CSR_WRITE_4(sc, JME_GHC, 0);
2627 	/* Disable RXMAC clock source */
2628 	val = CSR_READ_4(sc, JME_GPREG1);
2629 	CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2630 	/* Flush */
2631 	CSR_READ_4(sc, JME_GHC);
2632 
2633 	/* Enable TX and RX */
2634 	val = CSR_READ_4(sc, JME_TXCSR);
2635 	CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2636 	val = CSR_READ_4(sc, JME_RXCSR);
2637 	CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2638 	/* Flush */
2639 	CSR_READ_4(sc, JME_TXCSR);
2640 	CSR_READ_4(sc, JME_RXCSR);
2641 
2642 	/* Enable TXMAC and TXOFL clock sources */
2643 	CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2644 	/* Disable RXMAC clock source */
2645 	val = CSR_READ_4(sc, JME_GPREG1);
2646 	CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2647 	/* Flush */
2648 	CSR_READ_4(sc, JME_GHC);
2649 
2650 	/* Stop TX and RX */
2651 	jme_stop_tx(sc);
2652 	jme_stop_rx(sc);
2653 }
2654 
2655 static void
jme_init(void * xsc)2656 jme_init(void *xsc)
2657 {
2658 	struct jme_softc *sc = xsc;
2659 	struct ifnet *ifp = &sc->arpcom.ac_if;
2660 	struct mii_data *mii;
2661 	uint8_t eaddr[ETHER_ADDR_LEN];
2662 	bus_addr_t paddr;
2663 	uint32_t reg;
2664 	int error, r;
2665 
2666 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
2667 
2668 	/*
2669 	 * Cancel any pending I/O.
2670 	 */
2671 	jme_stop(sc);
2672 
2673 	/*
2674 	 * Reset the chip to a known state.
2675 	 */
2676 	jme_reset(sc);
2677 
2678 	/*
2679 	 * Setup MSI/MSI-X vectors to interrupts mapping
2680 	 */
2681 	jme_set_msinum(sc);
2682 
2683 	if (JME_ENABLE_HWRSS(sc))
2684 		jme_enable_rss(sc);
2685 	else
2686 		jme_disable_rss(sc);
2687 
2688 	/* Init RX descriptors */
2689 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2690 		error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2691 		if (error) {
2692 			if_printf(ifp, "initialization failed: "
2693 				  "no memory for %dth RX ring.\n", r);
2694 			jme_stop(sc);
2695 			return;
2696 		}
2697 	}
2698 
2699 	/* Init TX descriptors */
2700 	jme_init_tx_ring(&sc->jme_cdata.jme_tx_data);
2701 
2702 	/* Initialize shadow status block. */
2703 	jme_init_ssb(sc);
2704 
2705 	/* Reprogram the station address. */
2706 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2707 	CSR_WRITE_4(sc, JME_PAR0,
2708 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2709 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2710 
2711 	/*
2712 	 * Configure Tx queue.
2713 	 *  Tx priority queue weight value : 0
2714 	 *  Tx FIFO threshold for processing next packet : 16QW
2715 	 *  Maximum Tx DMA length : 512
2716 	 *  Allow Tx DMA burst.
2717 	 */
2718 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2719 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2720 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2721 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2722 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2723 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2724 
2725 	/* Set Tx descriptor counter. */
2726 	CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt);
2727 
2728 	/* Set Tx ring address to the hardware. */
2729 	paddr = sc->jme_cdata.jme_tx_data.jme_tx_ring_paddr;
2730 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2731 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2732 
2733 	/* Configure TxMAC parameters. */
2734 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2735 	reg |= TXMAC_THRESH_1_PKT;
2736 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2737 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2738 
2739 	/*
2740 	 * Configure Rx queue.
2741 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2742 	 *  FIFO threshold for processing next packet : 128QW
2743 	 *  Rx queue 0 select
2744 	 *  Max Rx DMA length : 128
2745 	 *  Rx descriptor retry : 32
2746 	 *  Rx descriptor retry time gap : 256ns
2747 	 *  Don't receive runt/bad frame.
2748 	 */
2749 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2750 #if 0
2751 	/*
2752 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2753 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2754 	 * decrease FIFO threshold to reduce the FIFO overruns for
2755 	 * frames larger than 4000 bytes.
2756 	 * For best performance of standard MTU sized frames use
2757 	 * maximum allowable FIFO threshold, 128QW.
2758 	 */
2759 	if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2760 	    JME_RX_FIFO_SIZE)
2761 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2762 	else
2763 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2764 #else
2765 	/* Improve PCI Express compatibility */
2766 	sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2767 #endif
2768 	sc->jme_rxcsr |= sc->jme_rx_dma_size;
2769 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2770 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2771 	/* XXX TODO DROP_BAD */
2772 
2773 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2774 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2775 
2776 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2777 
2778 		/* Set Rx descriptor counter. */
2779 		CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2780 
2781 		/* Set Rx ring address to the hardware. */
2782 		paddr = rdata->jme_rx_ring_paddr;
2783 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2784 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2785 	}
2786 
2787 	/* Clear receive filter. */
2788 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2789 
2790 	/* Set up the receive filter. */
2791 	jme_set_filter(sc);
2792 	jme_set_vlan(sc);
2793 
2794 	/*
2795 	 * Disable all WOL bits as WOL can interfere normal Rx
2796 	 * operation. Also clear WOL detection status bits.
2797 	 */
2798 	reg = CSR_READ_4(sc, JME_PMCS);
2799 	reg &= ~PMCS_WOL_ENB_MASK;
2800 	CSR_WRITE_4(sc, JME_PMCS, reg);
2801 
2802 	/*
2803 	 * Pad 10bytes right before received frame. This will greatly
2804 	 * help Rx performance on strict-alignment architectures as
2805 	 * it does not need to copy the frame to align the payload.
2806 	 */
2807 	reg = CSR_READ_4(sc, JME_RXMAC);
2808 	reg |= RXMAC_PAD_10BYTES;
2809 
2810 	if (ifp->if_capenable & IFCAP_RXCSUM)
2811 		reg |= RXMAC_CSUM_ENB;
2812 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2813 
2814 	/* Configure general purpose reg0 */
2815 	reg = CSR_READ_4(sc, JME_GPREG0);
2816 	reg &= ~GPREG0_PCC_UNIT_MASK;
2817 	/* Set PCC timer resolution to micro-seconds unit. */
2818 	reg |= GPREG0_PCC_UNIT_US;
2819 	/*
2820 	 * Disable all shadow register posting as we have to read
2821 	 * JME_INTR_STATUS register in jme_intr. Also it seems
2822 	 * that it's hard to synchronize interrupt status between
2823 	 * hardware and software with shadow posting due to
2824 	 * requirements of bus_dmamap_sync(9).
2825 	 */
2826 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2827 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2828 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2829 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2830 	/* Disable posting of DW0. */
2831 	reg &= ~GPREG0_POST_DW0_ENB;
2832 	/* Clear PME message. */
2833 	reg &= ~GPREG0_PME_ENB;
2834 	/* Set PHY address. */
2835 	reg &= ~GPREG0_PHY_ADDR_MASK;
2836 	reg |= sc->jme_phyaddr;
2837 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2838 
2839 	/* Configure Tx queue 0 packet completion coalescing. */
2840 	jme_set_tx_coal(sc);
2841 
2842 	/* Configure Rx queues packet completion coalescing. */
2843 	jme_set_rx_coal(sc);
2844 
2845 	/* Configure shadow status block but don't enable posting. */
2846 	paddr = sc->jme_cdata.jme_ssb_block_paddr;
2847 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2848 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2849 
2850 	/* Disable Timer 1 and Timer 2. */
2851 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2852 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2853 
2854 	/* Configure retry transmit period, retry limit value. */
2855 	CSR_WRITE_4(sc, JME_TXTRHD,
2856 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2857 	    TXTRHD_RT_PERIOD_MASK) |
2858 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2859 	    TXTRHD_RT_LIMIT_SHIFT));
2860 
2861 #ifdef IFPOLL_ENABLE
2862 	if (!(ifp->if_flags & IFF_NPOLLING))
2863 #endif
2864 	/* Initialize the interrupt mask. */
2865 	jme_enable_intr(sc);
2866 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2867 
2868 	/*
2869 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2870 	 * done after detection of valid link in jme_miibus_statchg.
2871 	 */
2872 	sc->jme_has_link = FALSE;
2873 
2874 	jme_phy_init(sc);
2875 
2876 	/* Set the current media. */
2877 	mii = device_get_softc(sc->jme_miibus);
2878 	mii_mediachg(mii);
2879 
2880 	callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc,
2881 	    JME_TICK_CPUID);
2882 
2883 	ifp->if_flags |= IFF_RUNNING;
2884 	ifq_clr_oactive(&ifp->if_snd);
2885 }
2886 
2887 static void
jme_stop(struct jme_softc * sc)2888 jme_stop(struct jme_softc *sc)
2889 {
2890 	struct ifnet *ifp = &sc->arpcom.ac_if;
2891 	struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
2892 	struct jme_txdesc *txd;
2893 	struct jme_rxdesc *rxd;
2894 	struct jme_rxdata *rdata;
2895 	int i, r;
2896 
2897 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
2898 
2899 	/*
2900 	 * Mark the interface down and cancel the watchdog timer.
2901 	 */
2902 	ifp->if_flags &= ~IFF_RUNNING;
2903 	ifq_clr_oactive(&ifp->if_snd);
2904 	ifp->if_timer = 0;
2905 
2906 	callout_stop(&sc->jme_tick_ch);
2907 	sc->jme_has_link = FALSE;
2908 
2909 	/*
2910 	 * Disable interrupts.
2911 	 */
2912 	jme_disable_intr(sc);
2913 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2914 
2915 	/* Disable updating shadow status block. */
2916 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2917 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2918 
2919 	/* Stop receiver, transmitter. */
2920 	jme_stop_rx(sc);
2921 	jme_stop_tx(sc);
2922 
2923 	/*
2924 	 * Free partial finished RX segments
2925 	 */
2926 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2927 		rdata = &sc->jme_cdata.jme_rx_data[r];
2928 		if (rdata->jme_rxhead != NULL)
2929 			m_freem(rdata->jme_rxhead);
2930 		JME_RXCHAIN_RESET(rdata);
2931 	}
2932 
2933 	/*
2934 	 * Free RX and TX mbufs still in the queues.
2935 	 */
2936 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2937 		rdata = &sc->jme_cdata.jme_rx_data[r];
2938 		for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2939 			rxd = &rdata->jme_rxdesc[i];
2940 			if (rxd->rx_m != NULL) {
2941 				bus_dmamap_unload(rdata->jme_rx_tag,
2942 						  rxd->rx_dmamap);
2943 				m_freem(rxd->rx_m);
2944 				rxd->rx_m = NULL;
2945 			}
2946 		}
2947 	}
2948 	for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
2949 		txd = &tdata->jme_txdesc[i];
2950 		if (txd->tx_m != NULL) {
2951 			bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap);
2952 			m_freem(txd->tx_m);
2953 			txd->tx_m = NULL;
2954 			txd->tx_ndesc = 0;
2955 		}
2956         }
2957 }
2958 
2959 static void
jme_stop_tx(struct jme_softc * sc)2960 jme_stop_tx(struct jme_softc *sc)
2961 {
2962 	uint32_t reg;
2963 	int i;
2964 
2965 	reg = CSR_READ_4(sc, JME_TXCSR);
2966 	if ((reg & TXCSR_TX_ENB) == 0)
2967 		return;
2968 	reg &= ~TXCSR_TX_ENB;
2969 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2970 	for (i = JME_TIMEOUT; i > 0; i--) {
2971 		DELAY(1);
2972 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2973 			break;
2974 	}
2975 	if (i == 0)
2976 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2977 }
2978 
2979 static void
jme_stop_rx(struct jme_softc * sc)2980 jme_stop_rx(struct jme_softc *sc)
2981 {
2982 	uint32_t reg;
2983 	int i;
2984 
2985 	reg = CSR_READ_4(sc, JME_RXCSR);
2986 	if ((reg & RXCSR_RX_ENB) == 0)
2987 		return;
2988 	reg &= ~RXCSR_RX_ENB;
2989 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2990 	for (i = JME_TIMEOUT; i > 0; i--) {
2991 		DELAY(1);
2992 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2993 			break;
2994 	}
2995 	if (i == 0)
2996 		device_printf(sc->jme_dev, "stopping receiver timeout!\n");
2997 }
2998 
2999 static void
jme_init_tx_ring(struct jme_txdata * tdata)3000 jme_init_tx_ring(struct jme_txdata *tdata)
3001 {
3002 	struct jme_txdesc *txd;
3003 	int i;
3004 
3005 	tdata->jme_tx_prod = 0;
3006 	tdata->jme_tx_cons = 0;
3007 	tdata->jme_tx_cnt = 0;
3008 
3009 	bzero(tdata->jme_tx_ring, JME_TX_RING_SIZE(tdata));
3010 	for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
3011 		txd = &tdata->jme_txdesc[i];
3012 		txd->tx_m = NULL;
3013 		txd->tx_desc = &tdata->jme_tx_ring[i];
3014 		txd->tx_ndesc = 0;
3015 	}
3016 }
3017 
3018 static void
jme_init_ssb(struct jme_softc * sc)3019 jme_init_ssb(struct jme_softc *sc)
3020 {
3021 	struct jme_chain_data *cd;
3022 
3023 	cd = &sc->jme_cdata;
3024 	bzero(cd->jme_ssb_block, JME_SSB_SIZE);
3025 }
3026 
3027 static int
jme_init_rx_ring(struct jme_rxdata * rdata)3028 jme_init_rx_ring(struct jme_rxdata *rdata)
3029 {
3030 	struct jme_rxdesc *rxd;
3031 	int i;
3032 
3033 	KKASSERT(rdata->jme_rxhead == NULL &&
3034 		 rdata->jme_rxtail == NULL &&
3035 		 rdata->jme_rxlen == 0);
3036 	rdata->jme_rx_cons = 0;
3037 
3038 	bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
3039 	for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3040 		int error;
3041 
3042 		rxd = &rdata->jme_rxdesc[i];
3043 		rxd->rx_m = NULL;
3044 		rxd->rx_desc = &rdata->jme_rx_ring[i];
3045 		error = jme_newbuf(rdata, rxd, 1);
3046 		if (error)
3047 			return error;
3048 	}
3049 	return 0;
3050 }
3051 
3052 static int
jme_newbuf(struct jme_rxdata * rdata,struct jme_rxdesc * rxd,int init)3053 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
3054 {
3055 	struct mbuf *m;
3056 	bus_dma_segment_t segs;
3057 	bus_dmamap_t map;
3058 	int error, nsegs;
3059 
3060 	m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
3061 	if (m == NULL)
3062 		return ENOBUFS;
3063 	/*
3064 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
3065 	 * takes advantage of 10 bytes padding feature of hardware
3066 	 * in order not to copy entire frame to align IP header on
3067 	 * 32bit boundary.
3068 	 */
3069 	m->m_len = m->m_pkthdr.len = MCLBYTES;
3070 
3071 	error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
3072 			rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
3073 			BUS_DMA_NOWAIT);
3074 	if (error) {
3075 		m_freem(m);
3076 		if (init) {
3077 			if_printf(&rdata->jme_sc->arpcom.ac_if,
3078 			    "can't load RX mbuf\n");
3079 		}
3080 		return error;
3081 	}
3082 
3083 	if (rxd->rx_m != NULL) {
3084 		bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
3085 				BUS_DMASYNC_POSTREAD);
3086 		bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
3087 	}
3088 	map = rxd->rx_dmamap;
3089 	rxd->rx_dmamap = rdata->jme_rx_sparemap;
3090 	rdata->jme_rx_sparemap = map;
3091 	rxd->rx_m = m;
3092 	rxd->rx_paddr = segs.ds_addr;
3093 
3094 	jme_setup_rxdesc(rxd);
3095 	return 0;
3096 }
3097 
3098 static void
jme_set_vlan(struct jme_softc * sc)3099 jme_set_vlan(struct jme_softc *sc)
3100 {
3101 	struct ifnet *ifp = &sc->arpcom.ac_if;
3102 	uint32_t reg;
3103 
3104 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
3105 
3106 	reg = CSR_READ_4(sc, JME_RXMAC);
3107 	reg &= ~RXMAC_VLAN_ENB;
3108 	if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
3109 		reg |= RXMAC_VLAN_ENB;
3110 	CSR_WRITE_4(sc, JME_RXMAC, reg);
3111 }
3112 
3113 static void
jme_set_filter(struct jme_softc * sc)3114 jme_set_filter(struct jme_softc *sc)
3115 {
3116 	struct ifnet *ifp = &sc->arpcom.ac_if;
3117 	struct ifmultiaddr *ifma;
3118 	uint32_t crc;
3119 	uint32_t mchash[2];
3120 	uint32_t rxcfg;
3121 
3122 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
3123 
3124 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
3125 	rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3126 	    RXMAC_ALLMULTI);
3127 
3128 	/*
3129 	 * Always accept frames destined to our station address.
3130 	 * Always accept broadcast frames.
3131 	 */
3132 	rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
3133 
3134 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
3135 		if (ifp->if_flags & IFF_PROMISC)
3136 			rxcfg |= RXMAC_PROMISC;
3137 		if (ifp->if_flags & IFF_ALLMULTI)
3138 			rxcfg |= RXMAC_ALLMULTI;
3139 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3140 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3141 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3142 		return;
3143 	}
3144 
3145 	/*
3146 	 * Set up the multicast address filter by passing all multicast
3147 	 * addresses through a CRC generator, and then using the low-order
3148 	 * 6 bits as an index into the 64 bit multicast hash table.  The
3149 	 * high order bits select the register, while the rest of the bits
3150 	 * select the bit within the register.
3151 	 */
3152 	rxcfg |= RXMAC_MULTICAST;
3153 	bzero(mchash, sizeof(mchash));
3154 
3155 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3156 		if (ifma->ifma_addr->sa_family != AF_LINK)
3157 			continue;
3158 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3159 		    ifma->ifma_addr), ETHER_ADDR_LEN);
3160 
3161 		/* Just want the 6 least significant bits. */
3162 		crc &= 0x3f;
3163 
3164 		/* Set the corresponding bit in the hash table. */
3165 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
3166 	}
3167 
3168 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3169 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3170 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3171 }
3172 
3173 static int
jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)3174 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
3175 {
3176 	struct jme_softc *sc = arg1;
3177 	struct ifnet *ifp = &sc->arpcom.ac_if;
3178 	int error, v;
3179 
3180 	ifnet_serialize_all(ifp);
3181 
3182 	v = sc->jme_tx_coal_to;
3183 	error = sysctl_handle_int(oidp, &v, 0, req);
3184 	if (error || req->newptr == NULL)
3185 		goto back;
3186 
3187 	if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
3188 		error = EINVAL;
3189 		goto back;
3190 	}
3191 
3192 	if (v != sc->jme_tx_coal_to) {
3193 		sc->jme_tx_coal_to = v;
3194 		if (ifp->if_flags & IFF_RUNNING)
3195 			jme_set_tx_coal(sc);
3196 	}
3197 back:
3198 	ifnet_deserialize_all(ifp);
3199 	return error;
3200 }
3201 
3202 static int
jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)3203 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3204 {
3205 	struct jme_softc *sc = arg1;
3206 	struct ifnet *ifp = &sc->arpcom.ac_if;
3207 	int error, v;
3208 
3209 	ifnet_serialize_all(ifp);
3210 
3211 	v = sc->jme_tx_coal_pkt;
3212 	error = sysctl_handle_int(oidp, &v, 0, req);
3213 	if (error || req->newptr == NULL)
3214 		goto back;
3215 
3216 	if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
3217 		error = EINVAL;
3218 		goto back;
3219 	}
3220 
3221 	if (v != sc->jme_tx_coal_pkt) {
3222 		sc->jme_tx_coal_pkt = v;
3223 		if (ifp->if_flags & IFF_RUNNING)
3224 			jme_set_tx_coal(sc);
3225 	}
3226 back:
3227 	ifnet_deserialize_all(ifp);
3228 	return error;
3229 }
3230 
3231 static int
jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)3232 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
3233 {
3234 	struct jme_softc *sc = arg1;
3235 	struct ifnet *ifp = &sc->arpcom.ac_if;
3236 	int error, v;
3237 
3238 	ifnet_serialize_all(ifp);
3239 
3240 	v = sc->jme_rx_coal_to;
3241 	error = sysctl_handle_int(oidp, &v, 0, req);
3242 	if (error || req->newptr == NULL)
3243 		goto back;
3244 
3245 	if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3246 		error = EINVAL;
3247 		goto back;
3248 	}
3249 
3250 	if (v != sc->jme_rx_coal_to) {
3251 		sc->jme_rx_coal_to = v;
3252 		if (ifp->if_flags & IFF_RUNNING)
3253 			jme_set_rx_coal(sc);
3254 	}
3255 back:
3256 	ifnet_deserialize_all(ifp);
3257 	return error;
3258 }
3259 
3260 static int
jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)3261 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3262 {
3263 	struct jme_softc *sc = arg1;
3264 	struct ifnet *ifp = &sc->arpcom.ac_if;
3265 	int error, v;
3266 
3267 	ifnet_serialize_all(ifp);
3268 
3269 	v = sc->jme_rx_coal_pkt;
3270 	error = sysctl_handle_int(oidp, &v, 0, req);
3271 	if (error || req->newptr == NULL)
3272 		goto back;
3273 
3274 	if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3275 		error = EINVAL;
3276 		goto back;
3277 	}
3278 
3279 	if (v != sc->jme_rx_coal_pkt) {
3280 		sc->jme_rx_coal_pkt = v;
3281 		if (ifp->if_flags & IFF_RUNNING)
3282 			jme_set_rx_coal(sc);
3283 	}
3284 back:
3285 	ifnet_deserialize_all(ifp);
3286 	return error;
3287 }
3288 
3289 static void
jme_set_tx_coal(struct jme_softc * sc)3290 jme_set_tx_coal(struct jme_softc *sc)
3291 {
3292 	uint32_t reg;
3293 
3294 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3295 	    PCCTX_COAL_TO_MASK;
3296 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3297 	    PCCTX_COAL_PKT_MASK;
3298 	reg |= PCCTX_COAL_TXQ0;
3299 	CSR_WRITE_4(sc, JME_PCCTX, reg);
3300 }
3301 
3302 static void
jme_set_rx_coal(struct jme_softc * sc)3303 jme_set_rx_coal(struct jme_softc *sc)
3304 {
3305 	uint32_t reg;
3306 	int r;
3307 
3308 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3309 	    PCCRX_COAL_TO_MASK;
3310 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3311 	    PCCRX_COAL_PKT_MASK;
3312 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3313 		CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3314 }
3315 
3316 #ifdef IFPOLL_ENABLE
3317 
3318 static void
jme_npoll_status(struct ifnet * ifp)3319 jme_npoll_status(struct ifnet *ifp)
3320 {
3321 	struct jme_softc *sc = ifp->if_softc;
3322 	uint32_t status;
3323 
3324 	ASSERT_SERIALIZED(&sc->jme_serialize);
3325 
3326 	status = CSR_READ_4(sc, JME_INTR_STATUS);
3327 	if (status & INTR_RXQ_DESC_EMPTY) {
3328 		CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY);
3329 		jme_rx_restart(sc, status);
3330 	}
3331 }
3332 
3333 static void
jme_npoll_rx(struct ifnet * ifp __unused,void * arg,int cycle)3334 jme_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
3335 {
3336 	struct jme_rxdata *rdata = arg;
3337 
3338 	ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3339 
3340 	jme_rxeof(rdata, cycle, mycpuid);
3341 }
3342 
3343 static void
jme_npoll_tx(struct ifnet * ifp,void * arg,int cycle __unused)3344 jme_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused)
3345 {
3346 	struct jme_txdata *tdata = arg;
3347 
3348 	ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
3349 
3350 	jme_txeof(tdata);
3351 	if (!ifq_is_empty(&ifp->if_snd))
3352 		if_devstart(ifp);
3353 }
3354 
3355 static void
jme_npoll(struct ifnet * ifp,struct ifpoll_info * info)3356 jme_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3357 {
3358 	struct jme_softc *sc = ifp->if_softc;
3359 
3360 	ASSERT_IFNET_SERIALIZED_ALL(ifp);
3361 
3362 	if (info) {
3363 		int i, cpu;
3364 
3365 		info->ifpi_status.status_func = jme_npoll_status;
3366 		info->ifpi_status.serializer = &sc->jme_serialize;
3367 
3368 		cpu = if_ringmap_cpumap(sc->jme_tx_rmap, 0);
3369 		KKASSERT(cpu <= netisr_ncpus);
3370 		info->ifpi_tx[cpu].poll_func = jme_npoll_tx;
3371 		info->ifpi_tx[cpu].arg = &sc->jme_cdata.jme_tx_data;
3372 		info->ifpi_tx[cpu].serializer =
3373 		    &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
3374 		ifq_set_cpuid(&ifp->if_snd, cpu);
3375 
3376 		for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3377 			struct jme_rxdata *rdata =
3378 			    &sc->jme_cdata.jme_rx_data[i];
3379 
3380 			cpu = if_ringmap_cpumap(sc->jme_rx_rmap, i);
3381 			KKASSERT(cpu <= netisr_ncpus);
3382 			info->ifpi_rx[cpu].poll_func = jme_npoll_rx;
3383 			info->ifpi_rx[cpu].arg = rdata;
3384 			info->ifpi_rx[cpu].serializer =
3385 			    &rdata->jme_rx_serialize;
3386 		}
3387 
3388 		if (ifp->if_flags & IFF_RUNNING)
3389 			jme_disable_intr(sc);
3390 	} else {
3391 		ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid);
3392 		if (ifp->if_flags & IFF_RUNNING)
3393 			jme_enable_intr(sc);
3394 	}
3395 }
3396 
3397 #endif	/* IFPOLL_ENABLE */
3398 
3399 static int
jme_rxring_dma_alloc(struct jme_rxdata * rdata)3400 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3401 {
3402 	bus_dmamem_t dmem;
3403 	int error, asize;
3404 
3405 	asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
3406 	error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3407 			JME_RX_RING_ALIGN, 0,
3408 			BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3409 			asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3410 	if (error) {
3411 		device_printf(rdata->jme_sc->jme_dev,
3412 		    "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3413 		return error;
3414 	}
3415 	rdata->jme_rx_ring_tag = dmem.dmem_tag;
3416 	rdata->jme_rx_ring_map = dmem.dmem_map;
3417 	rdata->jme_rx_ring = dmem.dmem_addr;
3418 	rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3419 
3420 	return 0;
3421 }
3422 
3423 static int
jme_rxbuf_dma_alloc(struct jme_rxdata * rdata)3424 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3425 {
3426 	bus_addr_t lowaddr;
3427 	int i, error;
3428 
3429 	lowaddr = BUS_SPACE_MAXADDR;
3430 	if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
3431 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
3432 	}
3433 
3434 	/* Create tag for Rx buffers. */
3435 	error = bus_dma_tag_create(
3436 	    rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3437 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
3438 	    lowaddr,			/* lowaddr */
3439 	    BUS_SPACE_MAXADDR,		/* highaddr */
3440 	    MCLBYTES,			/* maxsize */
3441 	    1,				/* nsegments */
3442 	    MCLBYTES,			/* maxsegsize */
3443 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3444 	    &rdata->jme_rx_tag);
3445 	if (error) {
3446 		device_printf(rdata->jme_sc->jme_dev,
3447 		    "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3448 		return error;
3449 	}
3450 
3451 	/* Create DMA maps for Rx buffers. */
3452 	error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3453 				  &rdata->jme_rx_sparemap);
3454 	if (error) {
3455 		device_printf(rdata->jme_sc->jme_dev,
3456 		    "could not create %dth spare Rx dmamap.\n",
3457 		    rdata->jme_rx_idx);
3458 		bus_dma_tag_destroy(rdata->jme_rx_tag);
3459 		rdata->jme_rx_tag = NULL;
3460 		return error;
3461 	}
3462 	for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3463 		struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3464 
3465 		error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3466 					  &rxd->rx_dmamap);
3467 		if (error) {
3468 			int j;
3469 
3470 			device_printf(rdata->jme_sc->jme_dev,
3471 			    "could not create %dth Rx dmamap "
3472 			    "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3473 
3474 			for (j = 0; j < i; ++j) {
3475 				rxd = &rdata->jme_rxdesc[j];
3476 				bus_dmamap_destroy(rdata->jme_rx_tag,
3477 						   rxd->rx_dmamap);
3478 			}
3479 			bus_dmamap_destroy(rdata->jme_rx_tag,
3480 					   rdata->jme_rx_sparemap);
3481 			bus_dma_tag_destroy(rdata->jme_rx_tag);
3482 			rdata->jme_rx_tag = NULL;
3483 			return error;
3484 		}
3485 	}
3486 	return 0;
3487 }
3488 
3489 static void
jme_rx_intr(struct jme_softc * sc,uint32_t status)3490 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3491 {
3492 	int r, cpuid = mycpuid;
3493 
3494 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3495 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3496 
3497 		if (status & rdata->jme_rx_coal) {
3498 			lwkt_serialize_enter(&rdata->jme_rx_serialize);
3499 			jme_rxeof(rdata, -1, cpuid);
3500 			lwkt_serialize_exit(&rdata->jme_rx_serialize);
3501 		}
3502 	}
3503 }
3504 
3505 static void
jme_enable_rss(struct jme_softc * sc)3506 jme_enable_rss(struct jme_softc *sc)
3507 {
3508 	uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3509 	uint32_t rssc;
3510 	int j, i, r;
3511 
3512 	KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3513 		sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
3514 		("%s: invalid # of RX rings (%d)",
3515 		 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
3516 	jme_disable_rss(sc);
3517 
3518 	toeplitz_get_key(key, sizeof(key));
3519 	for (i = 0; i < RSSKEY_NREGS; ++i) {
3520 		uint32_t keyreg;
3521 
3522 		keyreg = RSSKEY_REGVAL(key, i);
3523 		JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x, reg 0x%08x\n",
3524 		    i, keyreg, RSSKEY_REG(RSSKEY_NREGS - 1 - i));
3525 
3526 		CSR_WRITE_4(sc, RSSKEY_REG(RSSKEY_NREGS - 1 - i), keyreg);
3527 	}
3528 
3529 	/*
3530 	 * Fill redirect table.
3531 	 */
3532 	if_ringmap_rdrtable(sc->jme_rx_rmap, sc->jme_rdrtable,
3533 	    JME_RDRTABLE_SIZE);
3534 
3535 	r = 0;
3536 	for (j = 0; j < RSSTBL_NREGS; ++j) {
3537 		uint32_t ind = 0;
3538 
3539 		for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3540 			int q;
3541 
3542 			q = sc->jme_rdrtable[r];
3543 			ind |= q << (i * 8);
3544 			++r;
3545 		}
3546 		JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3547 		CSR_WRITE_4(sc, RSSTBL_REG(j), ind);
3548 	}
3549 
3550 	/*
3551 	 * Enable RSS.
3552 	 */
3553 	rssc = RSSC_HASH_128_ENTRY;
3554 	rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3555 	rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3556 	JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3557 	CSR_WRITE_4(sc, JME_RSSC, rssc);
3558 }
3559 
3560 static void
jme_disable_rss(struct jme_softc * sc)3561 jme_disable_rss(struct jme_softc *sc)
3562 {
3563 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3564 }
3565 
3566 static void
jme_serialize(struct ifnet * ifp,enum ifnet_serialize slz)3567 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3568 {
3569 	struct jme_softc *sc = ifp->if_softc;
3570 
3571 	ifnet_serialize_array_enter(sc->jme_serialize_arr,
3572 	    sc->jme_serialize_cnt, slz);
3573 }
3574 
3575 static void
jme_deserialize(struct ifnet * ifp,enum ifnet_serialize slz)3576 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3577 {
3578 	struct jme_softc *sc = ifp->if_softc;
3579 
3580 	ifnet_serialize_array_exit(sc->jme_serialize_arr,
3581 	    sc->jme_serialize_cnt, slz);
3582 }
3583 
3584 static int
jme_tryserialize(struct ifnet * ifp,enum ifnet_serialize slz)3585 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3586 {
3587 	struct jme_softc *sc = ifp->if_softc;
3588 
3589 	return ifnet_serialize_array_try(sc->jme_serialize_arr,
3590 	    sc->jme_serialize_cnt, slz);
3591 }
3592 
3593 #ifdef INVARIANTS
3594 
3595 static void
jme_serialize_assert(struct ifnet * ifp,enum ifnet_serialize slz,boolean_t serialized)3596 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3597     boolean_t serialized)
3598 {
3599 	struct jme_softc *sc = ifp->if_softc;
3600 
3601 	ifnet_serialize_array_assert(sc->jme_serialize_arr,
3602 	    sc->jme_serialize_cnt, slz, serialized);
3603 }
3604 
3605 #endif	/* INVARIANTS */
3606 
3607 static void
jme_msix_try_alloc(device_t dev)3608 jme_msix_try_alloc(device_t dev)
3609 {
3610 	struct jme_softc *sc = device_get_softc(dev);
3611 	struct jme_msix_data *msix;
3612 	int error, i, r, msix_enable, msix_count;
3613 
3614 	msix_count = JME_MSIXCNT(sc->jme_cdata.jme_rx_ring_cnt);
3615 	KKASSERT(msix_count <= JME_NMSIX);
3616 
3617 	msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3618 
3619 	/*
3620 	 * We leave the 1st MSI-X vector unused, so we
3621 	 * actually need msix_count + 1 MSI-X vectors.
3622 	 */
3623 	if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3624 		return;
3625 
3626 	for (i = 0; i < msix_count; ++i)
3627 		sc->jme_msix[i].jme_msix_rid = -1;
3628 
3629 	i = 0;
3630 
3631 	/*
3632 	 * Setup status MSI-X
3633 	 */
3634 	msix = &sc->jme_msix[i++];
3635 	msix->jme_msix_cpuid = 0;
3636 	msix->jme_msix_arg = sc;
3637 	msix->jme_msix_func = jme_msix_status;
3638 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3639 		msix->jme_msix_intrs |=
3640 		    sc->jme_cdata.jme_rx_data[r].jme_rx_empty;
3641 	}
3642 	msix->jme_msix_serialize = &sc->jme_serialize;
3643 	ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s sts",
3644 	    device_get_nameunit(dev));
3645 
3646 	/*
3647 	 * Setup TX MSI-X
3648 	 */
3649 	msix = &sc->jme_msix[i++];
3650 	msix->jme_msix_cpuid = if_ringmap_cpumap(sc->jme_tx_rmap, 0);
3651 	sc->jme_tx_cpuid = msix->jme_msix_cpuid;
3652 	msix->jme_msix_arg = &sc->jme_cdata.jme_tx_data;
3653 	msix->jme_msix_func = jme_msix_tx;
3654 	msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3655 	msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
3656 	ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3657 	    device_get_nameunit(dev));
3658 
3659 	/*
3660 	 * Setup RX MSI-X
3661 	 */
3662 	for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3663 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3664 
3665 		msix = &sc->jme_msix[i++];
3666 		msix->jme_msix_cpuid = if_ringmap_cpumap(sc->jme_rx_rmap, r);
3667 		KKASSERT(msix->jme_msix_cpuid < netisr_ncpus);
3668 		msix->jme_msix_arg = rdata;
3669 		msix->jme_msix_func = jme_msix_rx;
3670 		msix->jme_msix_intrs = rdata->jme_rx_coal;
3671 		msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3672 		ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3673 		    "%s rx%d", device_get_nameunit(dev), r);
3674 	}
3675 
3676 	KKASSERT(i == msix_count);
3677 
3678 	error = pci_setup_msix(dev);
3679 	if (error)
3680 		return;
3681 
3682 	/* Setup jme_msix_cnt early, so we could cleanup */
3683 	sc->jme_msix_cnt = msix_count;
3684 
3685 	for (i = 0; i < msix_count; ++i) {
3686 		msix = &sc->jme_msix[i];
3687 
3688 		msix->jme_msix_vector = i + 1;
3689 		error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3690 		    &msix->jme_msix_rid, msix->jme_msix_cpuid);
3691 		if (error)
3692 			goto back;
3693 
3694 		msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3695 		    &msix->jme_msix_rid, RF_ACTIVE);
3696 		if (msix->jme_msix_res == NULL) {
3697 			error = ENOMEM;
3698 			goto back;
3699 		}
3700 	}
3701 
3702 	for (i = 0; i < JME_INTR_CNT; ++i) {
3703 		uint32_t intr_mask = (1 << i);
3704 		int x;
3705 
3706 		if ((JME_INTRS & intr_mask) == 0)
3707 			continue;
3708 
3709 		for (x = 0; x < msix_count; ++x) {
3710 			msix = &sc->jme_msix[x];
3711 			if (msix->jme_msix_intrs & intr_mask) {
3712 				int reg, shift;
3713 
3714 				reg = i / JME_MSINUM_FACTOR;
3715 				KKASSERT(reg < JME_MSINUM_CNT);
3716 
3717 				shift = (i % JME_MSINUM_FACTOR) * 4;
3718 
3719 				sc->jme_msinum[reg] |=
3720 				    (msix->jme_msix_vector << shift);
3721 
3722 				break;
3723 			}
3724 		}
3725 	}
3726 
3727 	if (bootverbose) {
3728 		for (i = 0; i < JME_MSINUM_CNT; ++i) {
3729 			device_printf(dev, "MSINUM%d: %#x\n", i,
3730 			    sc->jme_msinum[i]);
3731 		}
3732 	}
3733 
3734 	pci_enable_msix(dev);
3735 	sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3736 
3737 back:
3738 	if (error)
3739 		jme_msix_free(dev);
3740 }
3741 
3742 static int
jme_intr_alloc(device_t dev)3743 jme_intr_alloc(device_t dev)
3744 {
3745 	struct jme_softc *sc = device_get_softc(dev);
3746 	u_int irq_flags;
3747 
3748 	jme_msix_try_alloc(dev);
3749 
3750 	if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3751 		sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3752 		    &sc->jme_irq_rid, &irq_flags);
3753 
3754 		sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3755 		    &sc->jme_irq_rid, irq_flags);
3756 		if (sc->jme_irq_res == NULL) {
3757 			device_printf(dev, "can't allocate irq\n");
3758 			return ENXIO;
3759 		}
3760 		sc->jme_tx_cpuid = rman_get_cpuid(sc->jme_irq_res);
3761 	}
3762 	return 0;
3763 }
3764 
3765 static void
jme_msix_free(device_t dev)3766 jme_msix_free(device_t dev)
3767 {
3768 	struct jme_softc *sc = device_get_softc(dev);
3769 	int i;
3770 
3771 	KKASSERT(sc->jme_msix_cnt > 1);
3772 
3773 	for (i = 0; i < sc->jme_msix_cnt; ++i) {
3774 		struct jme_msix_data *msix = &sc->jme_msix[i];
3775 
3776 		if (msix->jme_msix_res != NULL) {
3777 			bus_release_resource(dev, SYS_RES_IRQ,
3778 			    msix->jme_msix_rid, msix->jme_msix_res);
3779 			msix->jme_msix_res = NULL;
3780 		}
3781 		if (msix->jme_msix_rid >= 0) {
3782 			pci_release_msix_vector(dev, msix->jme_msix_rid);
3783 			msix->jme_msix_rid = -1;
3784 		}
3785 	}
3786 	pci_teardown_msix(dev);
3787 }
3788 
3789 static void
jme_intr_free(device_t dev)3790 jme_intr_free(device_t dev)
3791 {
3792 	struct jme_softc *sc = device_get_softc(dev);
3793 
3794 	if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3795 		if (sc->jme_irq_res != NULL) {
3796 			bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3797 					     sc->jme_irq_res);
3798 		}
3799 		if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3800 			pci_release_msi(dev);
3801 	} else {
3802 		jme_msix_free(dev);
3803 	}
3804 }
3805 
3806 static void
jme_msix_tx(void * xtdata)3807 jme_msix_tx(void *xtdata)
3808 {
3809 	struct jme_txdata *tdata = xtdata;
3810 	struct jme_softc *sc = tdata->jme_sc;
3811 	struct ifnet *ifp = &sc->arpcom.ac_if;
3812 
3813 	ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
3814 
3815 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3816 
3817 	CSR_WRITE_4(sc, JME_INTR_STATUS,
3818 	    INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3819 
3820 	if (ifp->if_flags & IFF_RUNNING) {
3821 		jme_txeof(tdata);
3822 		if (!ifq_is_empty(&ifp->if_snd))
3823 			if_devstart(ifp);
3824 	}
3825 
3826 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3827 }
3828 
3829 static void
jme_msix_rx(void * xrdata)3830 jme_msix_rx(void *xrdata)
3831 {
3832 	struct jme_rxdata *rdata = xrdata;
3833 	struct jme_softc *sc = rdata->jme_sc;
3834 	struct ifnet *ifp = &sc->arpcom.ac_if;
3835 
3836 	ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3837 
3838 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, rdata->jme_rx_coal);
3839 
3840 	CSR_WRITE_4(sc, JME_INTR_STATUS,
3841 	    rdata->jme_rx_coal | rdata->jme_rx_comp);
3842 
3843 	if (ifp->if_flags & IFF_RUNNING)
3844 		jme_rxeof(rdata, -1, mycpuid);
3845 
3846 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, rdata->jme_rx_coal);
3847 }
3848 
3849 static void
jme_msix_status(void * xsc)3850 jme_msix_status(void *xsc)
3851 {
3852 	struct jme_softc *sc = xsc;
3853 	struct ifnet *ifp = &sc->arpcom.ac_if;
3854 	uint32_t status;
3855 
3856 	ASSERT_SERIALIZED(&sc->jme_serialize);
3857 
3858 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_RXQ_DESC_EMPTY);
3859 
3860 	status = CSR_READ_4(sc, JME_INTR_STATUS);
3861 
3862 	if (status & INTR_RXQ_DESC_EMPTY) {
3863 		CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY);
3864 		if (ifp->if_flags & IFF_RUNNING)
3865 			jme_rx_restart(sc, status);
3866 	}
3867 
3868 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_RXQ_DESC_EMPTY);
3869 }
3870 
3871 static void
jme_rx_restart(struct jme_softc * sc,uint32_t status)3872 jme_rx_restart(struct jme_softc *sc, uint32_t status)
3873 {
3874 	int i, cpuid = mycpuid;
3875 
3876 	for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3877 		struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
3878 
3879 		if (status & rdata->jme_rx_empty) {
3880 			lwkt_serialize_enter(&rdata->jme_rx_serialize);
3881 			jme_rxeof(rdata, -1, cpuid);
3882 #ifdef JME_RSS_DEBUG
3883 			rdata->jme_rx_emp++;
3884 #endif
3885 			lwkt_serialize_exit(&rdata->jme_rx_serialize);
3886 		}
3887 	}
3888 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
3889 	    RXCSR_RXQ_START);
3890 }
3891 
3892 static void
jme_set_msinum(struct jme_softc * sc)3893 jme_set_msinum(struct jme_softc *sc)
3894 {
3895 	int i;
3896 
3897 	for (i = 0; i < JME_MSINUM_CNT; ++i)
3898 		CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3899 }
3900 
3901 static int
jme_intr_setup(device_t dev)3902 jme_intr_setup(device_t dev)
3903 {
3904 	struct jme_softc *sc = device_get_softc(dev);
3905 	int error;
3906 
3907 	if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3908 		return jme_msix_setup(dev);
3909 
3910 	error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3911 	    jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3912 	if (error) {
3913 		device_printf(dev, "could not set up interrupt handler.\n");
3914 		return error;
3915 	}
3916 
3917 	return 0;
3918 }
3919 
3920 static void
jme_intr_teardown(device_t dev)3921 jme_intr_teardown(device_t dev)
3922 {
3923 	struct jme_softc *sc = device_get_softc(dev);
3924 
3925 	if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3926 		jme_msix_teardown(dev, sc->jme_msix_cnt);
3927 	else
3928 		bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
3929 }
3930 
3931 static int
jme_msix_setup(device_t dev)3932 jme_msix_setup(device_t dev)
3933 {
3934 	struct jme_softc *sc = device_get_softc(dev);
3935 	int x;
3936 
3937 	for (x = 0; x < sc->jme_msix_cnt; ++x) {
3938 		struct jme_msix_data *msix = &sc->jme_msix[x];
3939 		int error;
3940 
3941 		error = bus_setup_intr_descr(dev, msix->jme_msix_res,
3942 		    INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
3943 		    &msix->jme_msix_handle, msix->jme_msix_serialize,
3944 		    msix->jme_msix_desc);
3945 		if (error) {
3946 			device_printf(dev, "could not set up %s "
3947 			    "interrupt handler.\n", msix->jme_msix_desc);
3948 			jme_msix_teardown(dev, x);
3949 			return error;
3950 		}
3951 	}
3952 	return 0;
3953 }
3954 
3955 static void
jme_msix_teardown(device_t dev,int msix_count)3956 jme_msix_teardown(device_t dev, int msix_count)
3957 {
3958 	struct jme_softc *sc = device_get_softc(dev);
3959 	int x;
3960 
3961 	for (x = 0; x < msix_count; ++x) {
3962 		struct jme_msix_data *msix = &sc->jme_msix[x];
3963 
3964 		bus_teardown_intr(dev, msix->jme_msix_res,
3965 		    msix->jme_msix_handle);
3966 	}
3967 }
3968 
3969 static void
jme_serialize_skipmain(struct jme_softc * sc)3970 jme_serialize_skipmain(struct jme_softc *sc)
3971 {
3972 	lwkt_serialize_array_enter(sc->jme_serialize_arr,
3973 	    sc->jme_serialize_cnt, 1);
3974 }
3975 
3976 static void
jme_deserialize_skipmain(struct jme_softc * sc)3977 jme_deserialize_skipmain(struct jme_softc *sc)
3978 {
3979 	lwkt_serialize_array_exit(sc->jme_serialize_arr,
3980 	    sc->jme_serialize_cnt, 1);
3981 }
3982 
3983 static void
jme_enable_intr(struct jme_softc * sc)3984 jme_enable_intr(struct jme_softc *sc)
3985 {
3986 	int i;
3987 
3988 	for (i = 0; i < sc->jme_serialize_cnt; ++i)
3989 		lwkt_serialize_handler_enable(sc->jme_serialize_arr[i]);
3990 
3991 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3992 }
3993 
3994 static void
jme_disable_intr(struct jme_softc * sc)3995 jme_disable_intr(struct jme_softc *sc)
3996 {
3997 	int i;
3998 
3999 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
4000 
4001 	for (i = 0; i < sc->jme_serialize_cnt; ++i)
4002 		lwkt_serialize_handler_disable(sc->jme_serialize_arr[i]);
4003 }
4004 
4005 static void
jme_phy_poweron(struct jme_softc * sc)4006 jme_phy_poweron(struct jme_softc *sc)
4007 {
4008 	uint16_t bmcr;
4009 
4010 	bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
4011 	bmcr &= ~BMCR_PDOWN;
4012 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
4013 
4014 	if (sc->jme_caps & JME_CAP_PHYPWR) {
4015 		uint32_t val;
4016 
4017 		val = CSR_READ_4(sc, JME_PHYPWR);
4018 		val &= ~(PHYPWR_DOWN1SEL | PHYPWR_DOWN1SW |
4019 		    PHYPWR_DOWN2 | PHYPWR_CLKSEL);
4020 		CSR_WRITE_4(sc, JME_PHYPWR, val);
4021 
4022 		val = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
4023 		val &= ~PE1_GPREG0_PHYBG;
4024 		val |= PE1_GPREG0_ENBG;
4025 		pci_write_config(sc->jme_dev, JME_PCI_PE1, val, 4);
4026 	}
4027 }
4028 
4029 static void
jme_phy_poweroff(struct jme_softc * sc)4030 jme_phy_poweroff(struct jme_softc *sc)
4031 {
4032 	uint16_t bmcr;
4033 
4034 	bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
4035 	bmcr |= BMCR_PDOWN;
4036 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
4037 
4038 	if (sc->jme_caps & JME_CAP_PHYPWR) {
4039 		uint32_t val;
4040 
4041 		val = CSR_READ_4(sc, JME_PHYPWR);
4042 		val |= PHYPWR_DOWN1SEL | PHYPWR_DOWN1SW |
4043 		    PHYPWR_DOWN2 | PHYPWR_CLKSEL;
4044 		CSR_WRITE_4(sc, JME_PHYPWR, val);
4045 
4046 		val = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
4047 		val &= ~PE1_GPREG0_PHYBG;
4048 		val |= PE1_GPREG0_PDD3COLD;
4049 		pci_write_config(sc->jme_dev, JME_PCI_PE1, val, 4);
4050 	}
4051 }
4052 
4053 static int
jme_miiext_read(struct jme_softc * sc,int reg)4054 jme_miiext_read(struct jme_softc *sc, int reg)
4055 {
4056 	int addr;
4057 
4058 	addr = JME_MII_EXT_ADDR_RD | reg;
4059 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
4060 	    JME_MII_EXT_ADDR, addr);
4061 	return jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr,
4062 	    JME_MII_EXT_DATA);
4063 }
4064 
4065 static void
jme_miiext_write(struct jme_softc * sc,int reg,int val)4066 jme_miiext_write(struct jme_softc *sc, int reg, int val)
4067 {
4068 	int addr;
4069 
4070 	addr = JME_MII_EXT_ADDR_WR | reg;
4071 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
4072 	    JME_MII_EXT_DATA, val);
4073 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
4074 	    JME_MII_EXT_ADDR, addr);
4075 }
4076 
4077 static void
jme_phy_init(struct jme_softc * sc)4078 jme_phy_init(struct jme_softc *sc)
4079 {
4080 	uint16_t gtcr;
4081 	int val;
4082 
4083 	jme_phy_poweroff(sc);
4084 	jme_phy_poweron(sc);
4085 
4086 	/* Enable PHY test 1 */
4087 	gtcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR);
4088 	gtcr &= ~GTCR_TEST_MASK;
4089 	gtcr |= GTCR_TEST_1;
4090 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, gtcr);
4091 
4092 	val = jme_miiext_read(sc, JME_MII_EXT_COM2);
4093 	val &= ~JME_MII_EXT_COM2_CALIB_MODE0;
4094 	val |= JME_MII_EXT_COM2_CALIB_LATCH | JME_MII_EXT_COM2_CALIB_EN;
4095 	jme_miiext_write(sc, JME_MII_EXT_COM2, val);
4096 
4097 	DELAY(20000);
4098 
4099 	val = jme_miiext_read(sc, JME_MII_EXT_COM2);
4100 	val &= ~(JME_MII_EXT_COM2_CALIB_MODE0 |
4101 	    JME_MII_EXT_COM2_CALIB_LATCH | JME_MII_EXT_COM2_CALIB_EN);
4102 	jme_miiext_write(sc, JME_MII_EXT_COM2, val);
4103 
4104 	/* Disable PHY test */
4105 	gtcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR);
4106 	gtcr &= ~GTCR_TEST_MASK;
4107 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, gtcr);
4108 
4109 	if (sc->jme_phycom0 != 0)
4110 		jme_miiext_write(sc, JME_MII_EXT_COM0, sc->jme_phycom0);
4111 	if (sc->jme_phycom1 != 0)
4112 		jme_miiext_write(sc, JME_MII_EXT_COM1, sc->jme_phycom1);
4113 }
4114