xref: /openbsd/sys/dev/fdt/if_mvneta.c (revision d415bd75)
1 /*	$OpenBSD: if_mvneta.c,v 1.31 2023/11/10 15:51:19 bluhm Exp $	*/
2 /*	$NetBSD: if_mvneta.c,v 1.41 2015/04/15 10:15:40 hsuenaga Exp $	*/
3 /*
4  * Copyright (c) 2007, 2008, 2013 KIYOHARA Takashi
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "bpfilter.h"
30 #include "kstat.h"
31 
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/systm.h>
35 #include <sys/endian.h>
36 #include <sys/errno.h>
37 #include <sys/kernel.h>
38 #include <sys/mutex.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <uvm/uvm_extern.h>
42 #include <sys/mbuf.h>
43 #include <sys/kstat.h>
44 
45 #include <machine/bus.h>
46 #include <machine/cpufunc.h>
47 #include <machine/fdt.h>
48 
49 #include <dev/ofw/openfirm.h>
50 #include <dev/ofw/ofw_clock.h>
51 #include <dev/ofw/ofw_misc.h>
52 #include <dev/ofw/ofw_pinctrl.h>
53 #include <dev/ofw/fdt.h>
54 
55 #include <dev/fdt/if_mvnetareg.h>
56 
57 #ifdef __armv7__
58 #include <armv7/marvell/mvmbusvar.h>
59 #endif
60 
61 #include <net/if.h>
62 #include <net/if_media.h>
63 #include <net/if_types.h>
64 
65 #include <netinet/in.h>
66 #include <netinet/if_ether.h>
67 
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 
71 #if NBPFILTER > 0
72 #include <net/bpf.h>
73 #endif
74 
75 #ifdef MVNETA_DEBUG
76 #define DPRINTF(x)	if (mvneta_debug) printf x
77 #define DPRINTFN(n,x)	if (mvneta_debug >= (n)) printf x
78 int mvneta_debug = MVNETA_DEBUG;
79 #else
80 #define DPRINTF(x)
81 #define DPRINTFN(n,x)
82 #endif
83 
84 #define MVNETA_READ(sc, reg) \
85 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
86 #define MVNETA_WRITE(sc, reg, val) \
87 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
88 #define MVNETA_READ_FILTER(sc, reg, val, c) \
89 	bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
90 #define MVNETA_WRITE_FILTER(sc, reg, val, c) \
91 	bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
92 
93 #define MVNETA_LINKUP_READ(sc) \
94 	MVNETA_READ(sc, MVNETA_PS0)
95 #define MVNETA_IS_LINKUP(sc)	(MVNETA_LINKUP_READ(sc) & MVNETA_PS0_LINKUP)
96 
97 #define MVNETA_TX_RING_CNT	256
98 #define MVNETA_TX_RING_MSK	(MVNETA_TX_RING_CNT - 1)
99 #define MVNETA_TX_RING_NEXT(x)	(((x) + 1) & MVNETA_TX_RING_MSK)
100 #define MVNETA_TX_QUEUE_CNT	1
101 #define MVNETA_RX_RING_CNT	256
102 #define MVNETA_RX_RING_MSK	(MVNETA_RX_RING_CNT - 1)
103 #define MVNETA_RX_RING_NEXT(x)	(((x) + 1) & MVNETA_RX_RING_MSK)
104 #define MVNETA_RX_QUEUE_CNT	1
105 
106 CTASSERT(MVNETA_TX_RING_CNT > 1 && MVNETA_TX_RING_NEXT(MVNETA_TX_RING_CNT) ==
107 	(MVNETA_TX_RING_CNT + 1) % MVNETA_TX_RING_CNT);
108 CTASSERT(MVNETA_RX_RING_CNT > 1 && MVNETA_RX_RING_NEXT(MVNETA_RX_RING_CNT) ==
109 	(MVNETA_RX_RING_CNT + 1) % MVNETA_RX_RING_CNT);
110 
111 #define MVNETA_NTXSEG		30
112 
113 struct mvneta_dmamem {
114 	bus_dmamap_t		mdm_map;
115 	bus_dma_segment_t	mdm_seg;
116 	size_t			mdm_size;
117 	caddr_t			mdm_kva;
118 };
119 #define MVNETA_DMA_MAP(_mdm)	((_mdm)->mdm_map)
120 #define MVNETA_DMA_LEN(_mdm)	((_mdm)->mdm_size)
121 #define MVNETA_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
122 #define MVNETA_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
123 
124 struct mvneta_buf {
125 	bus_dmamap_t	tb_map;
126 	struct mbuf	*tb_m;
127 };
128 
129 struct mvneta_softc {
130 	struct device sc_dev;
131 	struct mii_bus *sc_mdio;
132 
133 	bus_space_tag_t sc_iot;
134 	bus_space_handle_t sc_ioh;
135 	bus_dma_tag_t sc_dmat;
136 	void *sc_ih;
137 
138 	uint64_t		sc_clk_freq;
139 
140 	struct arpcom sc_ac;
141 #define sc_enaddr sc_ac.ac_enaddr
142 	struct mii_data sc_mii;
143 #define sc_media sc_mii.mii_media
144 
145 	struct timeout sc_tick_ch;
146 
147 	struct mvneta_dmamem	*sc_txring;
148 	struct mvneta_buf	*sc_txbuf;
149 	struct mvneta_tx_desc	*sc_txdesc;
150 	unsigned int		 sc_tx_prod;	/* next free tx desc */
151 	unsigned int		 sc_tx_cons;	/* first tx desc sent */
152 
153 	struct mvneta_dmamem	*sc_rxring;
154 	struct mvneta_buf	*sc_rxbuf;
155 	struct mvneta_rx_desc	*sc_rxdesc;
156 	unsigned int		 sc_rx_prod;	/* next rx desc to fill */
157 	unsigned int		 sc_rx_cons;	/* next rx desc recvd */
158 	struct if_rxring	 sc_rx_ring;
159 
160 	enum {
161 		PHY_MODE_QSGMII,
162 		PHY_MODE_SGMII,
163 		PHY_MODE_RGMII,
164 		PHY_MODE_RGMII_ID,
165 		PHY_MODE_1000BASEX,
166 		PHY_MODE_2500BASEX,
167 	}			 sc_phy_mode;
168 	int			 sc_fixed_link;
169 	int			 sc_inband_status;
170 	int			 sc_phy;
171 	int			 sc_phyloc;
172 	int			 sc_link;
173 	int			 sc_sfp;
174 	int			 sc_node;
175 
176 	struct if_device	 sc_ifd;
177 
178 #if NKSTAT > 0
179 	struct mutex		 sc_kstat_lock;
180 	struct timeout		 sc_kstat_tick;
181 	struct kstat		*sc_kstat;
182 #endif
183 };
184 
185 
186 int mvneta_miibus_readreg(struct device *, int, int);
187 void mvneta_miibus_writereg(struct device *, int, int, int);
188 void mvneta_miibus_statchg(struct device *);
189 
190 void mvneta_wininit(struct mvneta_softc *);
191 
192 /* Gigabit Ethernet Port part functions */
193 int mvneta_match(struct device *, void *, void *);
194 void mvneta_attach(struct device *, struct device *, void *);
195 void mvneta_attach_deferred(struct device *);
196 
197 void mvneta_tick(void *);
198 int mvneta_intr(void *);
199 
200 void mvneta_start(struct ifqueue *);
201 int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
202 void mvneta_inband_statchg(struct mvneta_softc *);
203 void mvneta_port_change(struct mvneta_softc *);
204 void mvneta_port_up(struct mvneta_softc *);
205 int mvneta_up(struct mvneta_softc *);
206 void mvneta_down(struct mvneta_softc *);
207 void mvneta_watchdog(struct ifnet *);
208 
209 int mvneta_mediachange(struct ifnet *);
210 void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
211 
212 void mvneta_rx_proc(struct mvneta_softc *);
213 void mvneta_tx_proc(struct mvneta_softc *);
214 uint8_t mvneta_crc8(const uint8_t *, size_t);
215 void mvneta_iff(struct mvneta_softc *);
216 
217 struct mvneta_dmamem *mvneta_dmamem_alloc(struct mvneta_softc *,
218     bus_size_t, bus_size_t);
219 void mvneta_dmamem_free(struct mvneta_softc *, struct mvneta_dmamem *);
220 void mvneta_fill_rx_ring(struct mvneta_softc *);
221 
222 #if NKSTAT > 0
223 void		mvneta_kstat_attach(struct mvneta_softc *);
224 #endif
225 
226 static struct rwlock mvneta_sff_lock = RWLOCK_INITIALIZER("mvnetasff");
227 
228 struct cfdriver mvneta_cd = {
229 	NULL, "mvneta", DV_IFNET
230 };
231 
232 const struct cfattach mvneta_ca = {
233 	sizeof (struct mvneta_softc), mvneta_match, mvneta_attach,
234 };
235 
236 int
237 mvneta_miibus_readreg(struct device *dev, int phy, int reg)
238 {
239 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
240 	return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
241 }
242 
243 void
244 mvneta_miibus_writereg(struct device *dev, int phy, int reg, int val)
245 {
246 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
247 	return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
248 }
249 
250 void
251 mvneta_miibus_statchg(struct device *self)
252 {
253 	struct mvneta_softc *sc = (struct mvneta_softc *)self;
254 
255 	if (sc->sc_mii.mii_media_status & IFM_ACTIVE) {
256 		uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
257 
258 		panc &= ~(MVNETA_PANC_SETMIISPEED |
259 			  MVNETA_PANC_SETGMIISPEED |
260 			  MVNETA_PANC_SETFULLDX);
261 
262 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
263 		case IFM_1000_SX:
264 		case IFM_1000_LX:
265 		case IFM_1000_CX:
266 		case IFM_1000_T:
267 			panc |= MVNETA_PANC_SETGMIISPEED;
268 			break;
269 		case IFM_100_TX:
270 			panc |= MVNETA_PANC_SETMIISPEED;
271 			break;
272 		case IFM_10_T:
273 			break;
274 		}
275 
276 		if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
277 			panc |= MVNETA_PANC_SETFULLDX;
278 
279 		MVNETA_WRITE(sc, MVNETA_PANC, panc);
280 	}
281 
282 	mvneta_port_change(sc);
283 }
284 
285 void
286 mvneta_inband_statchg(struct mvneta_softc *sc)
287 {
288 	uint64_t subtype = IFM_SUBTYPE(sc->sc_mii.mii_media_active);
289 	uint32_t reg;
290 
291 	sc->sc_mii.mii_media_status = IFM_AVALID;
292 	sc->sc_mii.mii_media_active = IFM_ETHER;
293 
294 	reg = MVNETA_READ(sc, MVNETA_PS0);
295 	if (reg & MVNETA_PS0_LINKUP)
296 		sc->sc_mii.mii_media_status |= IFM_ACTIVE;
297 	if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
298 		sc->sc_mii.mii_media_active |= subtype;
299 	else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
300 		sc->sc_mii.mii_media_active |= subtype;
301 	else if (reg & MVNETA_PS0_GMIISPEED)
302 		sc->sc_mii.mii_media_active |= IFM_1000_T;
303 	else if (reg & MVNETA_PS0_MIISPEED)
304 		sc->sc_mii.mii_media_active |= IFM_100_TX;
305 	else
306 		sc->sc_mii.mii_media_active |= IFM_10_T;
307 	if (reg & MVNETA_PS0_FULLDX)
308 		sc->sc_mii.mii_media_active |= IFM_FDX;
309 
310 	mvneta_port_change(sc);
311 }
312 
313 void
314 mvneta_enaddr_write(struct mvneta_softc *sc)
315 {
316 	uint32_t maddrh, maddrl;
317 	maddrh  = sc->sc_enaddr[0] << 24;
318 	maddrh |= sc->sc_enaddr[1] << 16;
319 	maddrh |= sc->sc_enaddr[2] << 8;
320 	maddrh |= sc->sc_enaddr[3];
321 	maddrl  = sc->sc_enaddr[4] << 8;
322 	maddrl |= sc->sc_enaddr[5];
323 	MVNETA_WRITE(sc, MVNETA_MACAH, maddrh);
324 	MVNETA_WRITE(sc, MVNETA_MACAL, maddrl);
325 }
326 
327 void
328 mvneta_wininit(struct mvneta_softc *sc)
329 {
330 	uint32_t en;
331 	int i;
332 
333 #ifdef __armv7__
334 	if (mvmbus_dram_info == NULL)
335 		panic("%s: mbus dram information not set up",
336 		    sc->sc_dev.dv_xname);
337 #endif
338 
339 	for (i = 0; i < MVNETA_NWINDOW; i++) {
340 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i), 0);
341 		MVNETA_WRITE(sc, MVNETA_S(i), 0);
342 
343 		if (i < MVNETA_NREMAP)
344 			MVNETA_WRITE(sc, MVNETA_HA(i), 0);
345 	}
346 
347 	en = MVNETA_BARE_EN_MASK;
348 
349 #ifdef __armv7__
350 	for (i = 0; i < mvmbus_dram_info->numcs; i++) {
351 		struct mbus_dram_window *win = &mvmbus_dram_info->cs[i];
352 
353 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i),
354 		    MVNETA_BASEADDR_TARGET(mvmbus_dram_info->targetid) |
355 		    MVNETA_BASEADDR_ATTR(win->attr)	|
356 		    MVNETA_BASEADDR_BASE(win->base));
357 		MVNETA_WRITE(sc, MVNETA_S(i), MVNETA_S_SIZE(win->size));
358 
359 		en &= ~(1 << i);
360 	}
361 #else
362 	MVNETA_WRITE(sc, MVNETA_S(0), MVNETA_S_SIZE(0));
363 	en &= ~(1 << 0);
364 #endif
365 
366 	MVNETA_WRITE(sc, MVNETA_BARE, en);
367 }
368 
369 #define COMPHY_SIP_POWER_ON	0x82000001
370 #define COMPHY_SIP_POWER_OFF	0x82000002
371 #define COMPHY_SPEED(x)		((x) << 2)
372 #define  COMPHY_SPEED_1_25G		0 /* SGMII 1G */
373 #define  COMPHY_SPEED_2_5G		1
374 #define  COMPHY_SPEED_3_125G		2 /* SGMII 2.5G */
375 #define  COMPHY_SPEED_5G		3
376 #define  COMPHY_SPEED_5_15625G		4 /* XFI 5G */
377 #define  COMPHY_SPEED_6G		5
378 #define  COMPHY_SPEED_10_3125G		6 /* XFI 10G */
379 #define COMPHY_UNIT(x)		((x) << 8)
380 #define COMPHY_MODE(x)		((x) << 12)
381 #define  COMPHY_MODE_SATA		1
382 #define  COMPHY_MODE_SGMII		2 /* SGMII 1G */
383 #define  COMPHY_MODE_HS_SGMII		3 /* SGMII 2.5G */
384 #define  COMPHY_MODE_USB3H		4
385 #define  COMPHY_MODE_USB3D		5
386 #define  COMPHY_MODE_PCIE		6
387 #define  COMPHY_MODE_RXAUI		7
388 #define  COMPHY_MODE_XFI		8
389 #define  COMPHY_MODE_SFI		9
390 #define  COMPHY_MODE_USB3		10
391 
392 void
393 mvneta_comphy_init(struct mvneta_softc *sc)
394 {
395 	int node, phys[2], lane, unit;
396 	uint32_t mode;
397 
398 	if (OF_getpropintarray(sc->sc_node, "phys", phys, sizeof(phys)) !=
399 	    sizeof(phys))
400 		return;
401 	node = OF_getnodebyphandle(phys[0]);
402 	if (!node)
403 		return;
404 
405 	lane = OF_getpropint(node, "reg", 0);
406 	unit = phys[1];
407 
408 	switch (sc->sc_phy_mode) {
409 	case PHY_MODE_1000BASEX:
410 	case PHY_MODE_SGMII:
411 		mode = COMPHY_MODE(COMPHY_MODE_SGMII) |
412 		    COMPHY_SPEED(COMPHY_SPEED_1_25G) |
413 		    COMPHY_UNIT(unit);
414 		break;
415 	case PHY_MODE_2500BASEX:
416 		mode = COMPHY_MODE(COMPHY_MODE_HS_SGMII) |
417 		    COMPHY_SPEED(COMPHY_SPEED_3_125G) |
418 		    COMPHY_UNIT(unit);
419 		break;
420 	default:
421 		return;
422 	}
423 
424 	smc_call(COMPHY_SIP_POWER_ON, lane, mode, 0);
425 }
426 
427 int
428 mvneta_match(struct device *parent, void *cfdata, void *aux)
429 {
430 	struct fdt_attach_args *faa = aux;
431 
432 	return OF_is_compatible(faa->fa_node, "marvell,armada-370-neta") ||
433 	    OF_is_compatible(faa->fa_node, "marvell,armada-3700-neta");
434 }
435 
436 void
437 mvneta_attach(struct device *parent, struct device *self, void *aux)
438 {
439 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
440 	struct fdt_attach_args *faa = aux;
441 	uint32_t ctl0, ctl2, ctl4, panc;
442 	struct ifnet *ifp;
443 	int i, len, node;
444 	char *phy_mode;
445 	char *managed;
446 
447 	sc->sc_iot = faa->fa_iot;
448 	timeout_set(&sc->sc_tick_ch, mvneta_tick, sc);
449 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
450 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
451 		printf("%s: cannot map registers\n", self->dv_xname);
452 		return;
453 	}
454 	sc->sc_dmat = faa->fa_dmat;
455 	sc->sc_node = faa->fa_node;
456 
457 	clock_enable(faa->fa_node, NULL);
458 	sc->sc_clk_freq = clock_get_frequency_idx(faa->fa_node, 0);
459 
460 	pinctrl_byname(faa->fa_node, "default");
461 
462 	len = OF_getproplen(faa->fa_node, "phy-mode");
463 	if (len <= 0) {
464 		printf(": cannot extract phy-mode\n");
465 		return;
466 	}
467 
468 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
469 	OF_getprop(faa->fa_node, "phy-mode", phy_mode, len);
470 	if (!strncmp(phy_mode, "qsgmii", strlen("qsgmii")))
471 		sc->sc_phy_mode = PHY_MODE_QSGMII;
472 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
473 		sc->sc_phy_mode = PHY_MODE_SGMII;
474 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
475 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
476 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
477 		sc->sc_phy_mode = PHY_MODE_RGMII;
478 	else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
479 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
480 	else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
481 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
482 	else {
483 		printf(": cannot use phy-mode %s\n", phy_mode);
484 		return;
485 	}
486 	free(phy_mode, M_TEMP, len);
487 
488 	/* TODO: check child's name to be "fixed-link" */
489 	if (OF_getproplen(faa->fa_node, "fixed-link") >= 0 ||
490 	    OF_child(faa->fa_node))
491 		sc->sc_fixed_link = 1;
492 
493 	if ((len = OF_getproplen(faa->fa_node, "managed")) >= 0) {
494 		managed = malloc(len, M_TEMP, M_WAITOK);
495 		OF_getprop(faa->fa_node, "managed", managed, len);
496 		if (!strncmp(managed, "in-band-status",
497 		    strlen("in-band-status"))) {
498 			sc->sc_fixed_link = 1;
499 			sc->sc_inband_status = 1;
500 		}
501 		free(managed, M_TEMP, len);
502 	}
503 
504 	if (!sc->sc_fixed_link) {
505 		sc->sc_phy = OF_getpropint(faa->fa_node, "phy", 0);
506 		node = OF_getnodebyphandle(sc->sc_phy);
507 		if (!node) {
508 			printf(": cannot find phy in fdt\n");
509 			return;
510 		}
511 
512 		if ((sc->sc_phyloc = OF_getpropint(node, "reg", -1)) == -1) {
513 			printf(": cannot extract phy addr\n");
514 			return;
515 		}
516 	}
517 
518 	mvneta_wininit(sc);
519 
520 	if (OF_getproplen(faa->fa_node, "local-mac-address") ==
521 	    ETHER_ADDR_LEN) {
522 		OF_getprop(faa->fa_node, "local-mac-address",
523 		    sc->sc_enaddr, ETHER_ADDR_LEN);
524 		mvneta_enaddr_write(sc);
525 	} else {
526 		uint32_t maddrh, maddrl;
527 		maddrh = MVNETA_READ(sc, MVNETA_MACAH);
528 		maddrl = MVNETA_READ(sc, MVNETA_MACAL);
529 		if (maddrh || maddrl) {
530 			sc->sc_enaddr[0] = maddrh >> 24;
531 			sc->sc_enaddr[1] = maddrh >> 16;
532 			sc->sc_enaddr[2] = maddrh >> 8;
533 			sc->sc_enaddr[3] = maddrh >> 0;
534 			sc->sc_enaddr[4] = maddrl >> 8;
535 			sc->sc_enaddr[5] = maddrl >> 0;
536 		} else
537 			ether_fakeaddr(&sc->sc_ac.ac_if);
538 	}
539 
540 	sc->sc_sfp = OF_getpropint(faa->fa_node, "sfp", 0);
541 
542 	printf(": address %s\n", ether_sprintf(sc->sc_enaddr));
543 
544 	/* disable port */
545 	MVNETA_WRITE(sc, MVNETA_PMACC0,
546 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
547 	delay(200);
548 
549 	/* clear all cause registers */
550 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
551 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
552 	MVNETA_WRITE(sc, MVNETA_PMIC, 0);
553 
554 	/* mask all interrupts */
555 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
556 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
557 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
558 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
559 	MVNETA_WRITE(sc, MVNETA_PIE, 0);
560 
561 	/* enable MBUS Retry bit16 */
562 	MVNETA_WRITE(sc, MVNETA_ERETRY, 0x20);
563 
564 	/* enable access for CPU0 */
565 	MVNETA_WRITE(sc, MVNETA_PCP2Q(0),
566 	    MVNETA_PCP2Q_RXQAE_ALL | MVNETA_PCP2Q_TXQAE_ALL);
567 
568 	/* reset RX and TX DMAs */
569 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
570 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
571 
572 	/* disable legacy WRR, disable EJP, release from reset */
573 	MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
574 	for (i = 0; i < MVNETA_TX_QUEUE_CNT; i++) {
575 		MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(i), 0);
576 		MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(i), 0);
577 	}
578 
579 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
580 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
581 
582 	/* set port acceleration mode */
583 	MVNETA_WRITE(sc, MVNETA_PACC, MVGVE_PACC_ACCELERATIONMODE_EDM);
584 
585 	MVNETA_WRITE(sc, MVNETA_PXC, MVNETA_PXC_AMNOTXES | MVNETA_PXC_RXCS);
586 	MVNETA_WRITE(sc, MVNETA_PXCX, 0);
587 	MVNETA_WRITE(sc, MVNETA_PMFS, 64);
588 
589 	/* Set SDC register except IPGINT bits */
590 	MVNETA_WRITE(sc, MVNETA_SDC,
591 	    MVNETA_SDC_RXBSZ_16_64BITWORDS |
592 	    MVNETA_SDC_BLMR |	/* Big/Little Endian Receive Mode: No swap */
593 	    MVNETA_SDC_BLMT |	/* Big/Little Endian Transmit Mode: No swap */
594 	    MVNETA_SDC_TXBSZ_16_64BITWORDS);
595 
596 	/* XXX: Disable PHY polling in hardware */
597 	MVNETA_WRITE(sc, MVNETA_EUC,
598 	    MVNETA_READ(sc, MVNETA_EUC) & ~MVNETA_EUC_POLLING);
599 
600 	/* clear uni-/multicast tables */
601 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
602 	memset(dfut, 0, sizeof(dfut));
603 	memset(dfsmt, 0, sizeof(dfut));
604 	memset(dfomt, 0, sizeof(dfut));
605 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
606 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfut, MVNETA_NDFSMT);
607 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfut, MVNETA_NDFOMT);
608 
609 	MVNETA_WRITE(sc, MVNETA_PIE,
610 	    MVNETA_PIE_RXPKTINTRPTENB_ALL | MVNETA_PIE_TXPKTINTRPTENB_ALL);
611 
612 	MVNETA_WRITE(sc, MVNETA_EUIC, 0);
613 
614 	/* Setup phy. */
615 	ctl0 = MVNETA_READ(sc, MVNETA_PMACC0);
616 	ctl2 = MVNETA_READ(sc, MVNETA_PMACC2);
617 	ctl4 = MVNETA_READ(sc, MVNETA_PMACC4);
618 	panc = MVNETA_READ(sc, MVNETA_PANC);
619 
620 	/* Force link down to change in-band settings. */
621 	panc &= ~MVNETA_PANC_FORCELINKPASS;
622 	panc |= MVNETA_PANC_FORCELINKFAIL;
623 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
624 
625 	mvneta_comphy_init(sc);
626 
627 	ctl0 &= ~MVNETA_PMACC0_PORTTYPE;
628 	ctl2 &= ~(MVNETA_PMACC2_PORTMACRESET | MVNETA_PMACC2_INBANDAN);
629 	ctl4 &= ~(MVNETA_PMACC4_SHORT_PREAMBLE);
630 	panc &= ~(MVNETA_PANC_INBANDANEN | MVNETA_PANC_INBANDRESTARTAN |
631 	    MVNETA_PANC_SETMIISPEED | MVNETA_PANC_SETGMIISPEED |
632 	    MVNETA_PANC_ANSPEEDEN | MVNETA_PANC_SETFCEN |
633 	    MVNETA_PANC_PAUSEADV | MVNETA_PANC_ANFCEN |
634 	    MVNETA_PANC_SETFULLDX | MVNETA_PANC_ANDUPLEXEN);
635 
636 	ctl2 |= MVNETA_PMACC2_RGMIIEN;
637 	switch (sc->sc_phy_mode) {
638 	case PHY_MODE_QSGMII:
639 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
640 		    MVNETA_SERDESCFG_QSGMII_PROTO);
641 		ctl2 |= MVNETA_PMACC2_PCSEN;
642 		break;
643 	case PHY_MODE_SGMII:
644 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
645 		    MVNETA_SERDESCFG_SGMII_PROTO);
646 		ctl2 |= MVNETA_PMACC2_PCSEN;
647 		break;
648 	case PHY_MODE_1000BASEX:
649 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
650 		    MVNETA_SERDESCFG_SGMII_PROTO);
651 		ctl2 |= MVNETA_PMACC2_PCSEN;
652 		break;
653 	case PHY_MODE_2500BASEX:
654 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
655 		    MVNETA_SERDESCFG_HSGMII_PROTO);
656 		ctl2 |= MVNETA_PMACC2_PCSEN;
657 		ctl4 |= MVNETA_PMACC4_SHORT_PREAMBLE;
658 		break;
659 	default:
660 		break;
661 	}
662 
663 	/* Use Auto-Negotiation for Inband Status only */
664 	if (sc->sc_inband_status) {
665 		panc &= ~(MVNETA_PANC_FORCELINKFAIL |
666 		    MVNETA_PANC_FORCELINKPASS);
667 		/* TODO: read mode from SFP */
668 		if (1) {
669 			/* 802.3z */
670 			ctl0 |= MVNETA_PMACC0_PORTTYPE;
671 			panc |= (MVNETA_PANC_INBANDANEN |
672 			    MVNETA_PANC_SETGMIISPEED |
673 			    MVNETA_PANC_SETFULLDX);
674 		} else {
675 			/* SGMII */
676 			ctl2 |= MVNETA_PMACC2_INBANDAN;
677 			panc |= (MVNETA_PANC_INBANDANEN |
678 			    MVNETA_PANC_ANSPEEDEN |
679 			    MVNETA_PANC_ANDUPLEXEN);
680 		}
681 		MVNETA_WRITE(sc, MVNETA_OMSCD,
682 		    MVNETA_READ(sc, MVNETA_OMSCD) | MVNETA_OMSCD_1MS_CLOCK_ENABLE);
683 	} else {
684 		MVNETA_WRITE(sc, MVNETA_OMSCD,
685 		    MVNETA_READ(sc, MVNETA_OMSCD) & ~MVNETA_OMSCD_1MS_CLOCK_ENABLE);
686 	}
687 
688 	MVNETA_WRITE(sc, MVNETA_PMACC0, ctl0);
689 	MVNETA_WRITE(sc, MVNETA_PMACC2, ctl2);
690 	MVNETA_WRITE(sc, MVNETA_PMACC4, ctl4);
691 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
692 
693 	/* Port reset */
694 	while (MVNETA_READ(sc, MVNETA_PMACC2) & MVNETA_PMACC2_PORTMACRESET)
695 		;
696 
697 	sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE,
698 	    mvneta_intr, sc, sc->sc_dev.dv_xname);
699 
700 	ifp = &sc->sc_ac.ac_if;
701 	ifp->if_softc = sc;
702 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
703 	ifp->if_xflags = IFXF_MPSAFE;
704 	ifp->if_qstart = mvneta_start;
705 	ifp->if_ioctl = mvneta_ioctl;
706 	ifp->if_watchdog = mvneta_watchdog;
707 	ifp->if_capabilities = IFCAP_VLAN_MTU;
708 
709 #if notyet
710 	/*
711 	 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
712 	 */
713 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
714 				IFCAP_CSUM_UDPv4;
715 
716 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
717 	/*
718 	 * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums.
719 	 */
720 	ifp->if_capabilities &= ~IFCAP_CSUM_TCPv4;
721 #endif
722 
723 	ifq_init_maxlen(&ifp->if_snd, max(MVNETA_TX_RING_CNT - 1, IFQ_MAXLEN));
724 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
725 
726 	/*
727 	 * Do MII setup.
728 	 */
729 	sc->sc_mii.mii_ifp = ifp;
730 	sc->sc_mii.mii_readreg = mvneta_miibus_readreg;
731 	sc->sc_mii.mii_writereg = mvneta_miibus_writereg;
732 	sc->sc_mii.mii_statchg = mvneta_miibus_statchg;
733 
734 	ifmedia_init(&sc->sc_mii.mii_media, 0,
735 	    mvneta_mediachange, mvneta_mediastatus);
736 
737 	config_defer(self, mvneta_attach_deferred);
738 }
739 
740 void
741 mvneta_attach_deferred(struct device *self)
742 {
743 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
744 	struct ifnet *ifp = &sc->sc_ac.ac_if;
745 	int mii_flags = 0;
746 
747 	if (!sc->sc_fixed_link) {
748 		sc->sc_mdio = mii_byphandle(sc->sc_phy);
749 		if (sc->sc_mdio == NULL) {
750 			printf("%s: mdio bus not yet attached\n", self->dv_xname);
751 			return;
752 		}
753 
754 		switch (sc->sc_phy_mode) {
755 		case PHY_MODE_1000BASEX:
756 			mii_flags |= MIIF_IS_1000X;
757 			break;
758 		case PHY_MODE_SGMII:
759 			mii_flags |= MIIF_SGMII;
760 			break;
761 		case PHY_MODE_RGMII_ID:
762 			mii_flags |= MIIF_RXID | MIIF_TXID;
763 			break;
764 		default:
765 			break;
766 		}
767 
768 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
769 		    MII_OFFSET_ANY, mii_flags);
770 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
771 			printf("%s: no PHY found!\n", self->dv_xname);
772 			ifmedia_add(&sc->sc_mii.mii_media,
773 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
774 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
775 		} else
776 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
777 	} else {
778 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
779 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
780 
781 		if (sc->sc_inband_status) {
782 			switch (sc->sc_phy_mode) {
783 			case PHY_MODE_1000BASEX:
784 				sc->sc_mii.mii_media_active =
785 				    IFM_ETHER|IFM_1000_KX|IFM_FDX;
786 				break;
787 			case PHY_MODE_2500BASEX:
788 				sc->sc_mii.mii_media_active =
789 				    IFM_ETHER|IFM_2500_KX|IFM_FDX;
790 				break;
791 			default:
792 				break;
793 			}
794 			mvneta_inband_statchg(sc);
795 		} else {
796 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
797 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
798 			mvneta_miibus_statchg(self);
799 		}
800 
801 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
802 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
803 	}
804 
805 	/*
806 	 * Call MI attach routines.
807 	 */
808 	if_attach(ifp);
809 	ether_ifattach(ifp);
810 
811 	sc->sc_ifd.if_node = sc->sc_node;
812 	sc->sc_ifd.if_ifp = ifp;
813 	if_register(&sc->sc_ifd);
814 
815 #if NKSTAT > 0
816 	mvneta_kstat_attach(sc);
817 #endif
818 }
819 
820 void
821 mvneta_tick(void *arg)
822 {
823 	struct mvneta_softc *sc = arg;
824 	struct mii_data *mii = &sc->sc_mii;
825 	int s;
826 
827 	s = splnet();
828 	mii_tick(mii);
829 	splx(s);
830 
831 	timeout_add_sec(&sc->sc_tick_ch, 1);
832 }
833 
834 int
835 mvneta_intr(void *arg)
836 {
837 	struct mvneta_softc *sc = arg;
838 	struct ifnet *ifp = &sc->sc_ac.ac_if;
839 	uint32_t ic, misc;
840 
841 	ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
842 
843 	if (ic & MVNETA_PRXTXTI_PMISCICSUMMARY) {
844 		KERNEL_LOCK();
845 		misc = MVNETA_READ(sc, MVNETA_PMIC);
846 		MVNETA_WRITE(sc, MVNETA_PMIC, 0);
847 		if (sc->sc_inband_status && (misc &
848 		    (MVNETA_PMI_PHYSTATUSCHNG |
849 		    MVNETA_PMI_LINKCHANGE |
850 		    MVNETA_PMI_PSCSYNCCHNG))) {
851 			mvneta_inband_statchg(sc);
852 		}
853 		KERNEL_UNLOCK();
854 	}
855 
856 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
857 		return 1;
858 
859 	if (ic & MVNETA_PRXTXTI_TBTCQ(0))
860 		mvneta_tx_proc(sc);
861 
862 	if (ISSET(ic, MVNETA_PRXTXTI_RBICTAPQ(0) | MVNETA_PRXTXTI_RDTAQ(0)))
863 		mvneta_rx_proc(sc);
864 
865 	return 1;
866 }
867 
868 static inline int
869 mvneta_load_mbuf(struct mvneta_softc *sc, bus_dmamap_t map, struct mbuf *m)
870 {
871 	int error;
872 
873 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
874 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
875 	switch (error) {
876 	case EFBIG:
877 		error = m_defrag(m, M_DONTWAIT);
878 		if (error != 0)
879 			break;
880 
881 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
882 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
883 		if (error != 0)
884 			break;
885 
886 		/* FALLTHROUGH */
887 	case 0:
888 		return (0);
889 
890 	default:
891 		break;
892 	}
893 
894         return (error);
895 }
896 
897 static inline void
898 mvneta_encap(struct mvneta_softc *sc, bus_dmamap_t map, struct mbuf *m,
899     unsigned int prod)
900 {
901 	struct mvneta_tx_desc *txd;
902 	uint32_t cmdsts;
903 	unsigned int i;
904 
905 	cmdsts = MVNETA_TX_FIRST_DESC | MVNETA_TX_ZERO_PADDING |
906 	    MVNETA_TX_L4_CSUM_NOT;
907 #if notyet
908 	int m_csumflags;
909 	if (m_csumflags & M_CSUM_IPv4)
910 		cmdsts |= MVNETA_TX_GENERATE_IP_CHKSUM;
911 	if (m_csumflags & M_CSUM_TCPv4)
912 		cmdsts |=
913 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_TCP;
914 	if (m_csumflags & M_CSUM_UDPv4)
915 		cmdsts |=
916 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_UDP;
917 	if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
918 		const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t);
919 
920 		cmdsts |= MVNETA_TX_IP_NO_FRAG |
921 		    MVNETA_TX_IP_HEADER_LEN(iphdr_unitlen);	/* unit is 4B */
922 	}
923 #endif
924 
925 	for (i = 0; i < map->dm_nsegs; i++) {
926 		txd = &sc->sc_txdesc[prod];
927 		txd->bytecnt = map->dm_segs[i].ds_len;
928 		txd->l4ichk = 0;
929 		txd->cmdsts = cmdsts;
930 		txd->nextdescptr = 0;
931 		txd->bufptr = map->dm_segs[i].ds_addr;
932 		txd->_padding[0] = 0;
933 		txd->_padding[1] = 0;
934 		txd->_padding[2] = 0;
935 		txd->_padding[3] = 0;
936 
937 		prod = MVNETA_TX_RING_NEXT(prod);
938 		cmdsts = 0;
939 	}
940 	txd->cmdsts |= MVNETA_TX_LAST_DESC;
941 }
942 
943 static inline void
944 mvneta_sync_txring(struct mvneta_softc *sc, int ops)
945 {
946 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
947 	    MVNETA_DMA_LEN(sc->sc_txring), ops);
948 }
949 
950 void
951 mvneta_start(struct ifqueue *ifq)
952 {
953 	struct ifnet *ifp = ifq->ifq_if;
954 	struct mvneta_softc *sc = ifp->if_softc;
955 	unsigned int prod, nprod, free, used = 0, nused;
956 	struct mbuf *m;
957 	bus_dmamap_t map;
958 
959 	/* If Link is DOWN, can't start TX */
960 	if (!MVNETA_IS_LINKUP(sc)) {
961 		ifq_purge(ifq);
962 		return;
963 	}
964 
965 	mvneta_sync_txring(sc, BUS_DMASYNC_POSTWRITE);
966 
967 	prod = sc->sc_tx_prod;
968 	free = MVNETA_TX_RING_CNT - (prod - sc->sc_tx_cons);
969 
970 	for (;;) {
971 		if (free < MVNETA_NTXSEG - 1) {
972 			ifq_set_oactive(ifq);
973 			break;
974 		}
975 
976 		m = ifq_dequeue(ifq);
977 		if (m == NULL)
978 			break;
979 
980 		map = sc->sc_txbuf[prod].tb_map;
981 		if (mvneta_load_mbuf(sc, map, m) != 0) {
982 			m_freem(m);
983 			ifp->if_oerrors++; /* XXX atomic */
984 			continue;
985 		}
986 
987 #if NBPFILTER > 0
988 		if (ifp->if_bpf)
989 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
990 #endif
991 
992 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
993 		    BUS_DMASYNC_PREWRITE);
994 
995 		mvneta_encap(sc, map, m, prod);
996 
997 		if (map->dm_nsegs > 1) {
998 			nprod = (prod + (map->dm_nsegs - 1)) %
999 			    MVNETA_TX_RING_CNT;
1000 			sc->sc_txbuf[prod].tb_map = sc->sc_txbuf[nprod].tb_map;
1001 			prod = nprod;
1002 			sc->sc_txbuf[prod].tb_map = map;
1003 		}
1004 		sc->sc_txbuf[prod].tb_m = m;
1005 		prod = MVNETA_TX_RING_NEXT(prod);
1006 
1007 		free -= map->dm_nsegs;
1008 
1009 		nused = used + map->dm_nsegs;
1010 		if (nused > MVNETA_PTXSU_MAX) {
1011 			mvneta_sync_txring(sc,
1012 			    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTWRITE);
1013 			MVNETA_WRITE(sc, MVNETA_PTXSU(0),
1014 			    MVNETA_PTXSU_NOWD(used));
1015 			used = map->dm_nsegs;
1016 		} else
1017 			used = nused;
1018 	}
1019 
1020 	mvneta_sync_txring(sc, BUS_DMASYNC_PREWRITE);
1021 
1022 	sc->sc_tx_prod = prod;
1023 	if (used)
1024 		MVNETA_WRITE(sc, MVNETA_PTXSU(0), MVNETA_PTXSU_NOWD(used));
1025 }
1026 
1027 int
1028 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1029 {
1030 	struct mvneta_softc *sc = ifp->if_softc;
1031 	struct ifreq *ifr = (struct ifreq *)addr;
1032 	int s, error = 0;
1033 
1034 	s = splnet();
1035 
1036 	switch (cmd) {
1037 	case SIOCSIFADDR:
1038 		ifp->if_flags |= IFF_UP;
1039 		/* FALLTHROUGH */
1040 	case SIOCSIFFLAGS:
1041 		if (ifp->if_flags & IFF_UP) {
1042 			if (ifp->if_flags & IFF_RUNNING)
1043 				error = ENETRESET;
1044 			else
1045 				mvneta_up(sc);
1046 		} else {
1047 			if (ifp->if_flags & IFF_RUNNING)
1048 				mvneta_down(sc);
1049 		}
1050 		break;
1051 	case SIOCGIFMEDIA:
1052 	case SIOCSIFMEDIA:
1053 		DPRINTFN(2, ("mvneta_ioctl MEDIA\n"));
1054 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1055 		break;
1056 	case SIOCGIFRXR:
1057 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1058 		    NULL, MCLBYTES, &sc->sc_rx_ring);
1059 		break;
1060 	case SIOCGIFSFFPAGE:
1061 		error = rw_enter(&mvneta_sff_lock, RW_WRITE|RW_INTR);
1062 		if (error != 0)
1063 			break;
1064 
1065 		error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
1066 		rw_exit(&mvneta_sff_lock);
1067 		break;
1068 	default:
1069 		DPRINTFN(2, ("mvneta_ioctl ETHER\n"));
1070 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1071 		break;
1072 	}
1073 
1074 	if (error == ENETRESET) {
1075 		if (ifp->if_flags & IFF_RUNNING)
1076 			mvneta_iff(sc);
1077 		error = 0;
1078 	}
1079 
1080 	splx(s);
1081 
1082 	return error;
1083 }
1084 
1085 void
1086 mvneta_port_change(struct mvneta_softc *sc)
1087 {
1088 	if (!!(sc->sc_mii.mii_media_status & IFM_ACTIVE) != sc->sc_link) {
1089 		sc->sc_link = !sc->sc_link;
1090 
1091 		if (sc->sc_link) {
1092 			if (!sc->sc_inband_status) {
1093 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
1094 				panc &= ~MVNETA_PANC_FORCELINKFAIL;
1095 				panc |= MVNETA_PANC_FORCELINKPASS;
1096 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
1097 			}
1098 			mvneta_port_up(sc);
1099 		} else {
1100 			if (!sc->sc_inband_status) {
1101 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
1102 				panc &= ~MVNETA_PANC_FORCELINKPASS;
1103 				panc |= MVNETA_PANC_FORCELINKFAIL;
1104 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
1105 			}
1106 		}
1107 	}
1108 }
1109 
1110 void
1111 mvneta_port_up(struct mvneta_softc *sc)
1112 {
1113 	/* Enable port RX/TX. */
1114 	MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_ENQ(0));
1115 	MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(0));
1116 }
1117 
1118 int
1119 mvneta_up(struct mvneta_softc *sc)
1120 {
1121 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1122 	struct mvneta_buf *txb, *rxb;
1123 	int i;
1124 
1125 	DPRINTFN(2, ("mvneta_up\n"));
1126 
1127 	/* Allocate Tx descriptor ring. */
1128 	sc->sc_txring = mvneta_dmamem_alloc(sc,
1129 	    MVNETA_TX_RING_CNT * sizeof(struct mvneta_tx_desc), 32);
1130 	sc->sc_txdesc = MVNETA_DMA_KVA(sc->sc_txring);
1131 
1132 	sc->sc_txbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_TX_RING_CNT,
1133 	    M_DEVBUF, M_WAITOK);
1134 
1135 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1136 		txb = &sc->sc_txbuf[i];
1137 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVNETA_NTXSEG,
1138 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
1139 		txb->tb_m = NULL;
1140 	}
1141 
1142 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1143 
1144 	/* Allocate Rx descriptor ring. */
1145 	sc->sc_rxring = mvneta_dmamem_alloc(sc,
1146 	    MVNETA_RX_RING_CNT * sizeof(struct mvneta_rx_desc), 32);
1147 	sc->sc_rxdesc = MVNETA_DMA_KVA(sc->sc_rxring);
1148 
1149 	sc->sc_rxbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_RX_RING_CNT,
1150 	    M_DEVBUF, M_WAITOK);
1151 
1152 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1153 		rxb = &sc->sc_rxbuf[i];
1154 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1155 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
1156 		rxb->tb_m = NULL;
1157 	}
1158 
1159 	/* Set Rx descriptor ring data. */
1160 	MVNETA_WRITE(sc, MVNETA_PRXDQA(0), MVNETA_DMA_DVA(sc->sc_rxring));
1161 	MVNETA_WRITE(sc, MVNETA_PRXDQS(0), MVNETA_RX_RING_CNT |
1162 	    ((MCLBYTES >> 3) << 19));
1163 
1164 	if (sc->sc_clk_freq != 0) {
1165 		/*
1166 		 * Use the Non Occupied Descriptors Threshold to
1167 		 * interrupt when the descriptors granted by rxr are
1168 		 * used up, otherwise wait until the RX Interrupt
1169 		 * Time Threshold is reached.
1170 		 */
1171 		MVNETA_WRITE(sc, MVNETA_PRXDQTH(0),
1172 		    MVNETA_PRXDQTH_ODT(MVNETA_RX_RING_CNT) |
1173 		    MVNETA_PRXDQTH_NODT(2));
1174 		MVNETA_WRITE(sc, MVNETA_PRXITTH(0), sc->sc_clk_freq / 4000);
1175 	} else {
1176 		/* Time based moderation is hard without a clock */
1177 		MVNETA_WRITE(sc, MVNETA_PRXDQTH(0), 0);
1178 		MVNETA_WRITE(sc, MVNETA_PRXITTH(0), 0);
1179 	}
1180 
1181 	MVNETA_WRITE(sc, MVNETA_PRXC(0), 0);
1182 
1183 	/* Set Tx queue bandwidth. */
1184 	MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(0), 0x03ffffff);
1185 	MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(0), 0x03ffffff);
1186 
1187 	/* Set Tx descriptor ring data. */
1188 	MVNETA_WRITE(sc, MVNETA_PTXDQA(0), MVNETA_DMA_DVA(sc->sc_txring));
1189 	MVNETA_WRITE(sc, MVNETA_PTXDQS(0),
1190 	    MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT) |
1191 	    MVNETA_PTXDQS_TBT(MIN(MVNETA_TX_RING_CNT / 2, ifp->if_txmit)));
1192 
1193 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
1194 
1195 	if_rxr_init(&sc->sc_rx_ring, 2, MVNETA_RX_RING_CNT);
1196 	mvneta_fill_rx_ring(sc);
1197 
1198 	/* TODO: correct frame size */
1199 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1200 	    (MVNETA_READ(sc, MVNETA_PMACC0) & MVNETA_PMACC0_PORTTYPE) |
1201 	    MVNETA_PMACC0_FRAMESIZELIMIT(MCLBYTES - MVNETA_HWHEADER_SIZE));
1202 
1203 	/* set max MTU */
1204 	MVNETA_WRITE(sc, MVNETA_TXMTU, MVNETA_TXMTU_MAX);
1205 	MVNETA_WRITE(sc, MVNETA_TXTKSIZE, 0xffffffff);
1206 	MVNETA_WRITE(sc, MVNETA_TXQTKSIZE(0), 0x7fffffff);
1207 
1208 	/* enable port */
1209 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1210 	    MVNETA_READ(sc, MVNETA_PMACC0) | MVNETA_PMACC0_PORTEN);
1211 
1212 	mvneta_enaddr_write(sc);
1213 
1214 	/* Program promiscuous mode and multicast filters. */
1215 	mvneta_iff(sc);
1216 
1217 	if (!sc->sc_fixed_link)
1218 		mii_mediachg(&sc->sc_mii);
1219 
1220 	if (sc->sc_link)
1221 		mvneta_port_up(sc);
1222 
1223 	/* Enable interrupt masks */
1224 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_RBICTAPQ(0) |
1225 	    MVNETA_PRXTXTI_TBTCQ(0) | MVNETA_PRXTXTI_RDTAQ(0) |
1226 	    MVNETA_PRXTXTI_PMISCICSUMMARY);
1227 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
1228 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
1229 
1230 	timeout_add_sec(&sc->sc_tick_ch, 1);
1231 
1232 	ifp->if_flags |= IFF_RUNNING;
1233 	ifq_clr_oactive(&ifp->if_snd);
1234 
1235 	return 0;
1236 }
1237 
1238 void
1239 mvneta_down(struct mvneta_softc *sc)
1240 {
1241 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1242 	uint32_t reg, txinprog, txfifoemp;
1243 	struct mvneta_buf *txb, *rxb;
1244 	int i, cnt;
1245 
1246 	DPRINTFN(2, ("mvneta_down\n"));
1247 
1248 	timeout_del(&sc->sc_tick_ch);
1249 	ifp->if_flags &= ~IFF_RUNNING;
1250 	intr_barrier(sc->sc_ih);
1251 
1252 	/* Stop Rx port activity. Check port Rx activity. */
1253 	reg = MVNETA_READ(sc, MVNETA_RQC);
1254 	if (reg & MVNETA_RQC_ENQ_MASK)
1255 		/* Issue stop command for active channels only */
1256 		MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_DISQ_DISABLE(reg));
1257 
1258 	/* Stop Tx port activity. Check port Tx activity. */
1259 	if (MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_ENQ(0))
1260 		MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_DISQ(0));
1261 
1262 	txinprog = MVNETA_PS_TXINPROG_(0);
1263 	txfifoemp = MVNETA_PS_TXFIFOEMP_(0);
1264 
1265 #define RX_DISABLE_TIMEOUT		0x1000000
1266 #define TX_FIFO_EMPTY_TIMEOUT		0x1000000
1267 	/* Wait for all Rx activity to terminate. */
1268 	cnt = 0;
1269 	do {
1270 		if (cnt >= RX_DISABLE_TIMEOUT) {
1271 			printf("%s: timeout for RX stopped. rqc 0x%x\n",
1272 			    sc->sc_dev.dv_xname, reg);
1273 			break;
1274 		}
1275 		cnt++;
1276 
1277 		/*
1278 		 * Check Receive Queue Command register that all Rx queues
1279 		 * are stopped
1280 		 */
1281 		reg = MVNETA_READ(sc, MVNETA_RQC);
1282 	} while (reg & 0xff);
1283 
1284 	/* Double check to verify that TX FIFO is empty */
1285 	cnt = 0;
1286 	while (1) {
1287 		do {
1288 			if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1289 				printf("%s: timeout for TX FIFO empty. status "
1290 				    "0x%x\n", sc->sc_dev.dv_xname, reg);
1291 				break;
1292 			}
1293 			cnt++;
1294 
1295 			reg = MVNETA_READ(sc, MVNETA_PS);
1296 		} while (!(reg & txfifoemp) || reg & txinprog);
1297 
1298 		if (cnt >= TX_FIFO_EMPTY_TIMEOUT)
1299 			break;
1300 
1301 		/* Double check */
1302 		reg = MVNETA_READ(sc, MVNETA_PS);
1303 		if (reg & txfifoemp && !(reg & txinprog))
1304 			break;
1305 		else
1306 			printf("%s: TX FIFO empty double check failed."
1307 			    " %d loops, status 0x%x\n", sc->sc_dev.dv_xname,
1308 			    cnt, reg);
1309 	}
1310 
1311 	delay(200);
1312 
1313 	/* disable port */
1314 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1315 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
1316 	delay(200);
1317 
1318 	/* mask all interrupts */
1319 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
1320 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1321 
1322 	/* clear all cause registers */
1323 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1324 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1325 
1326 	/* Free RX and TX mbufs still in the queues. */
1327 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1328 		txb = &sc->sc_txbuf[i];
1329 		if (txb->tb_m) {
1330 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1331 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1332 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1333 			m_freem(txb->tb_m);
1334 		}
1335 		bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
1336 	}
1337 
1338 	mvneta_dmamem_free(sc, sc->sc_txring);
1339 	free(sc->sc_txbuf, M_DEVBUF, 0);
1340 
1341 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1342 		rxb = &sc->sc_rxbuf[i];
1343 		if (rxb->tb_m) {
1344 			bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1345 			    rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1346 			bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1347 			m_freem(rxb->tb_m);
1348 		}
1349 		bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
1350 	}
1351 
1352 	mvneta_dmamem_free(sc, sc->sc_rxring);
1353 	free(sc->sc_rxbuf, M_DEVBUF, 0);
1354 
1355 	/* reset RX and TX DMAs */
1356 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
1357 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
1358 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
1359 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
1360 
1361 	ifq_clr_oactive(&ifp->if_snd);
1362 }
1363 
1364 void
1365 mvneta_watchdog(struct ifnet *ifp)
1366 {
1367 	struct mvneta_softc *sc = ifp->if_softc;
1368 
1369 	/*
1370 	 * Reclaim first as there is a possibility of losing Tx completion
1371 	 * interrupts.
1372 	 */
1373 	mvneta_tx_proc(sc);
1374 	if (sc->sc_tx_prod != sc->sc_tx_cons) {
1375 		printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1376 
1377 		ifp->if_oerrors++;
1378 	}
1379 }
1380 
1381 /*
1382  * Set media options.
1383  */
1384 int
1385 mvneta_mediachange(struct ifnet *ifp)
1386 {
1387 	struct mvneta_softc *sc = ifp->if_softc;
1388 
1389 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1390 		mii_mediachg(&sc->sc_mii);
1391 
1392 	return (0);
1393 }
1394 
1395 /*
1396  * Report current media status.
1397  */
1398 void
1399 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1400 {
1401 	struct mvneta_softc *sc = ifp->if_softc;
1402 
1403 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
1404 		mii_pollstat(&sc->sc_mii);
1405 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1406 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1407 	}
1408 
1409 	if (sc->sc_fixed_link) {
1410 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1411 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1412 	}
1413 }
1414 
1415 void
1416 mvneta_rx_proc(struct mvneta_softc *sc)
1417 {
1418 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1419 	struct mvneta_rx_desc *rxd;
1420 	struct mvneta_buf *rxb;
1421 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1422 	struct mbuf *m;
1423 	uint32_t rxstat;
1424 	unsigned int i, done, cons;
1425 
1426 	done = MVNETA_PRXS_ODC(MVNETA_READ(sc, MVNETA_PRXS(0)));
1427 	if (done == 0)
1428 		return;
1429 
1430 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1431 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_POSTREAD);
1432 
1433 	cons = sc->sc_rx_cons;
1434 
1435 	for (i = 0; i < done; i++) {
1436 		rxd = &sc->sc_rxdesc[cons];
1437 		rxb = &sc->sc_rxbuf[cons];
1438 
1439 		m = rxb->tb_m;
1440 		rxb->tb_m = NULL;
1441 
1442 		bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1443 		    m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1444 		bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1445 
1446 		rxstat = rxd->cmdsts;
1447 		if (rxstat & MVNETA_ERROR_SUMMARY) {
1448 #if 0
1449 			int err = rxstat & MVNETA_RX_ERROR_CODE_MASK;
1450 
1451 			if (err == MVNETA_RX_CRC_ERROR)
1452 				ifp->if_ierrors++;
1453 			if (err == MVNETA_RX_OVERRUN_ERROR)
1454 				ifp->if_ierrors++;
1455 			if (err == MVNETA_RX_MAX_FRAME_LEN_ERROR)
1456 				ifp->if_ierrors++;
1457 			if (err == MVNETA_RX_RESOURCE_ERROR)
1458 				ifp->if_ierrors++;
1459 #else
1460 			ifp->if_ierrors++;
1461 #endif
1462 			m_freem(m);
1463 		} else {
1464 			m->m_pkthdr.len = m->m_len = rxd->bytecnt;
1465 			m_adj(m, MVNETA_HWHEADER_SIZE);
1466 
1467 			ml_enqueue(&ml, m);
1468 		}
1469 
1470 #if notyet
1471 		if (rxstat & MVNETA_RX_IP_FRAME_TYPE) {
1472 			int flgs = 0;
1473 
1474 			/* Check IPv4 header checksum */
1475 			flgs |= M_CSUM_IPv4;
1476 			if (!(rxstat & MVNETA_RX_IP_HEADER_OK))
1477 				flgs |= M_CSUM_IPv4_BAD;
1478 			else if ((bufsize & MVNETA_RX_IP_FRAGMENT) == 0) {
1479 				/*
1480 				 * Check TCPv4/UDPv4 checksum for
1481 				 * non-fragmented packet only.
1482 				 *
1483 				 * It seemd that sometimes
1484 				 * MVNETA_RX_L4_CHECKSUM_OK bit was set to 0
1485 				 * even if the checksum is correct and the
1486 				 * packet was not fragmented. So we don't set
1487 				 * M_CSUM_TCP_UDP_BAD even if csum bit is 0.
1488 				 */
1489 
1490 				if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1491 					MVNETA_RX_L4_TYPE_TCP) &&
1492 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1493 					flgs |= M_CSUM_TCPv4;
1494 				else if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1495 					MVNETA_RX_L4_TYPE_UDP) &&
1496 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1497 					flgs |= M_CSUM_UDPv4;
1498 			}
1499 			m->m_pkthdr.csum_flags = flgs;
1500 		}
1501 #endif
1502 
1503 		if_rxr_put(&sc->sc_rx_ring, 1);
1504 
1505 		cons = MVNETA_RX_RING_NEXT(cons);
1506 
1507 		if (i == MVNETA_PRXSU_MAX) {
1508 			MVNETA_WRITE(sc, MVNETA_PRXSU(0),
1509 			    MVNETA_PRXSU_NOPD(MVNETA_PRXSU_MAX));
1510 
1511 			/* tweaking the iterator inside the loop is fun */
1512 			done -= MVNETA_PRXSU_MAX;
1513 			i = 0;
1514 		}
1515 	}
1516 
1517 	sc->sc_rx_cons = cons;
1518 
1519 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1520 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_PREREAD);
1521 
1522 	if (i > 0) {
1523 		MVNETA_WRITE(sc, MVNETA_PRXSU(0),
1524 		    MVNETA_PRXSU_NOPD(i));
1525 	}
1526 
1527 	if (ifiq_input(&ifp->if_rcv, &ml))
1528 		if_rxr_livelocked(&sc->sc_rx_ring);
1529 
1530 	mvneta_fill_rx_ring(sc);
1531 }
1532 
1533 void
1534 mvneta_tx_proc(struct mvneta_softc *sc)
1535 {
1536 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1537 	struct ifqueue *ifq = &ifp->if_snd;
1538 	struct mvneta_tx_desc *txd;
1539 	struct mvneta_buf *txb;
1540 	unsigned int i, cons, done;
1541 
1542 	if (!(ifp->if_flags & IFF_RUNNING))
1543 		return;
1544 
1545 	done = MVNETA_PTXS_TBC(MVNETA_READ(sc, MVNETA_PTXS(0)));
1546 	if (done == 0)
1547 		return;
1548 
1549 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
1550 	    MVNETA_DMA_LEN(sc->sc_txring),
1551 	    BUS_DMASYNC_POSTREAD);
1552 
1553 	cons = sc->sc_tx_cons;
1554 
1555 	for (i = 0; i < done; i++) {
1556 		txd = &sc->sc_txdesc[cons];
1557 		txb = &sc->sc_txbuf[cons];
1558 
1559 		if (txb->tb_m) {
1560 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1561 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1562 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1563 
1564 			m_freem(txb->tb_m);
1565 			txb->tb_m = NULL;
1566 		}
1567 
1568 		if (txd->cmdsts & MVNETA_ERROR_SUMMARY) {
1569 			int err = txd->cmdsts & MVNETA_TX_ERROR_CODE_MASK;
1570 
1571 			if (err == MVNETA_TX_LATE_COLLISION_ERROR)
1572 				ifp->if_collisions++;
1573 			if (err == MVNETA_TX_UNDERRUN_ERROR)
1574 				ifp->if_oerrors++;
1575 			if (err == MVNETA_TX_EXCESSIVE_COLLISION_ERRO)
1576 				ifp->if_collisions++;
1577 		}
1578 
1579 		cons = MVNETA_TX_RING_NEXT(cons);
1580 
1581 		if (i == MVNETA_PTXSU_MAX) {
1582 			MVNETA_WRITE(sc, MVNETA_PTXSU(0),
1583 			    MVNETA_PTXSU_NORB(MVNETA_PTXSU_MAX));
1584 
1585 			/* tweaking the iterator inside the loop is fun */
1586 			done -= MVNETA_PTXSU_MAX;
1587 			i = 0;
1588 		}
1589 	}
1590 
1591 	sc->sc_tx_cons = cons;
1592 
1593 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
1594 	    MVNETA_DMA_LEN(sc->sc_txring),
1595 	    BUS_DMASYNC_PREREAD);
1596 
1597 	if (i > 0) {
1598 		MVNETA_WRITE(sc, MVNETA_PTXSU(0),
1599 		    MVNETA_PTXSU_NORB(i));
1600 	}
1601 	if (ifq_is_oactive(ifq))
1602 		ifq_restart(ifq);
1603 }
1604 
1605 uint8_t
1606 mvneta_crc8(const uint8_t *data, size_t size)
1607 {
1608 	int bit;
1609 	uint8_t byte;
1610 	uint8_t crc = 0;
1611 	const uint8_t poly = 0x07;
1612 
1613 	while(size--)
1614 	  for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
1615 	    crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
1616 
1617 	return crc;
1618 }
1619 
1620 CTASSERT(MVNETA_NDFSMT == MVNETA_NDFOMT);
1621 
1622 void
1623 mvneta_iff(struct mvneta_softc *sc)
1624 {
1625 	struct arpcom *ac = &sc->sc_ac;
1626 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1627 	struct ether_multi *enm;
1628 	struct ether_multistep step;
1629 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
1630 	uint32_t pxc;
1631 	int i;
1632 	const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
1633 
1634 	pxc = MVNETA_READ(sc, MVNETA_PXC);
1635 	pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP | MVNETA_PXC_UPM);
1636 	ifp->if_flags &= ~IFF_ALLMULTI;
1637 	memset(dfut, 0, sizeof(dfut));
1638 	memset(dfsmt, 0, sizeof(dfsmt));
1639 	memset(dfomt, 0, sizeof(dfomt));
1640 
1641 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1642 		ifp->if_flags |= IFF_ALLMULTI;
1643 		if (ifp->if_flags & IFF_PROMISC)
1644 			pxc |= MVNETA_PXC_UPM;
1645 		for (i = 0; i < MVNETA_NDFSMT; i++) {
1646 			dfsmt[i] = dfomt[i] =
1647 			    MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1648 			    MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1649 			    MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1650 			    MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1651 		}
1652 	} else {
1653 		ETHER_FIRST_MULTI(step, ac, enm);
1654 		while (enm != NULL) {
1655 			/* chip handles some IPv4 multicast specially */
1656 			if (memcmp(enm->enm_addrlo, special, 5) == 0) {
1657 				i = enm->enm_addrlo[5];
1658 				dfsmt[i>>2] |=
1659 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1660 			} else {
1661 				i = mvneta_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
1662 				dfomt[i>>2] |=
1663 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1664 			}
1665 
1666 			ETHER_NEXT_MULTI(step, enm);
1667 		}
1668 	}
1669 
1670 	MVNETA_WRITE(sc, MVNETA_PXC, pxc);
1671 
1672 	/* Set Destination Address Filter Unicast Table */
1673 	i = sc->sc_enaddr[5] & 0xf;		/* last nibble */
1674 	dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1675 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
1676 
1677 	/* Set Destination Address Filter Multicast Tables */
1678 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfsmt, MVNETA_NDFSMT);
1679 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfomt, MVNETA_NDFOMT);
1680 }
1681 
1682 struct mvneta_dmamem *
1683 mvneta_dmamem_alloc(struct mvneta_softc *sc, bus_size_t size, bus_size_t align)
1684 {
1685 	struct mvneta_dmamem *mdm;
1686 	int nsegs;
1687 
1688 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
1689 	mdm->mdm_size = size;
1690 
1691 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1692 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
1693 		goto mdmfree;
1694 
1695 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
1696 	    &nsegs, BUS_DMA_WAITOK) != 0)
1697 		goto destroy;
1698 
1699 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1700 	    &mdm->mdm_kva, BUS_DMA_WAITOK|BUS_DMA_COHERENT) != 0)
1701 		goto free;
1702 
1703 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1704 	    NULL, BUS_DMA_WAITOK) != 0)
1705 		goto unmap;
1706 
1707 	bzero(mdm->mdm_kva, size);
1708 
1709 	return (mdm);
1710 
1711 unmap:
1712 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1713 free:
1714 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1715 destroy:
1716 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1717 mdmfree:
1718 	free(mdm, M_DEVBUF, 0);
1719 
1720 	return (NULL);
1721 }
1722 
1723 void
1724 mvneta_dmamem_free(struct mvneta_softc *sc, struct mvneta_dmamem *mdm)
1725 {
1726 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1727 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1728 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1729 	free(mdm, M_DEVBUF, 0);
1730 }
1731 
1732 static inline struct mbuf *
1733 mvneta_alloc_mbuf(struct mvneta_softc *sc, bus_dmamap_t map)
1734 {
1735 	struct mbuf *m = NULL;
1736 
1737 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1738 	if (m == NULL)
1739 		return (NULL);
1740 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1741 
1742 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1743 		printf("%s: could not load mbuf DMA map", sc->sc_dev.dv_xname);
1744 		m_freem(m);
1745 		return (NULL);
1746 	}
1747 
1748 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1749 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1750 
1751 	return (m);
1752 }
1753 
1754 void
1755 mvneta_fill_rx_ring(struct mvneta_softc *sc)
1756 {
1757 	struct mvneta_rx_desc *rxd;
1758 	struct mvneta_buf *rxb;
1759 	unsigned int slots, used = 0;
1760 	unsigned int prod;
1761 
1762 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1763 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_POSTWRITE);
1764 
1765 	prod = sc->sc_rx_prod;
1766 
1767 	for (slots = if_rxr_get(&sc->sc_rx_ring, MVNETA_PRXSU_MAX);
1768 	    slots > 0; slots--) {
1769 		rxb = &sc->sc_rxbuf[prod];
1770 		rxb->tb_m = mvneta_alloc_mbuf(sc, rxb->tb_map);
1771 		if (rxb->tb_m == NULL)
1772 			break;
1773 
1774 		rxd = &sc->sc_rxdesc[prod];
1775 		rxd->cmdsts = 0;
1776 		rxd->bufsize = 0;
1777 		rxd->bytecnt = 0;
1778 		rxd->bufptr = rxb->tb_map->dm_segs[0].ds_addr;
1779 		rxd->nextdescptr = 0;
1780 		rxd->_padding[0] = 0;
1781 		rxd->_padding[1] = 0;
1782 		rxd->_padding[2] = 0;
1783 		rxd->_padding[3] = 0;
1784 
1785 		prod = MVNETA_RX_RING_NEXT(prod);
1786 		used++;
1787 	}
1788 	if_rxr_put(&sc->sc_rx_ring, slots);
1789 
1790 	sc->sc_rx_prod = prod;
1791 
1792 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1793 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_PREWRITE);
1794 
1795 	if (used > 0)
1796 		MVNETA_WRITE(sc, MVNETA_PRXSU(0), MVNETA_PRXSU_NOND(used));
1797 }
1798 
1799 #if NKSTAT > 0
1800 
1801 /* this is used to sort and look up the array of kstats quickly */
1802 enum mvneta_stat {
1803 	mvneta_stat_good_octets_received,
1804 	mvneta_stat_bad_octets_received,
1805 	mvneta_stat_good_frames_received,
1806 	mvneta_stat_mac_trans_error,
1807 	mvneta_stat_bad_frames_received,
1808 	mvneta_stat_broadcast_frames_received,
1809 	mvneta_stat_multicast_frames_received,
1810 	mvneta_stat_frames_64_octets,
1811 	mvneta_stat_frames_65_to_127_octets,
1812 	mvneta_stat_frames_128_to_255_octets,
1813 	mvneta_stat_frames_256_to_511_octets,
1814 	mvneta_stat_frames_512_to_1023_octets,
1815 	mvneta_stat_frames_1024_to_max_octets,
1816 	mvneta_stat_good_octets_sent,
1817 	mvneta_stat_good_frames_sent,
1818 	mvneta_stat_excessive_collision,
1819 	mvneta_stat_multicast_frames_sent,
1820 	mvneta_stat_broadcast_frames_sent,
1821 	mvneta_stat_unrecog_mac_control_received,
1822 	mvneta_stat_good_fc_received,
1823 	mvneta_stat_bad_fc_received,
1824 	mvneta_stat_undersize,
1825 	mvneta_stat_fc_sent,
1826 	mvneta_stat_fragments,
1827 	mvneta_stat_oversize,
1828 	mvneta_stat_jabber,
1829 	mvneta_stat_mac_rcv_error,
1830 	mvneta_stat_bad_crc,
1831 	mvneta_stat_collisions,
1832 	mvneta_stat_late_collisions,
1833 
1834 	mvneta_stat_port_discard,
1835 	mvneta_stat_port_overrun,
1836 
1837 	mvnet_stat_count
1838 };
1839 
1840 struct mvneta_counter {
1841 	const char		 *name;
1842 	enum kstat_kv_unit	 unit;
1843 	bus_size_t		 reg;
1844 };
1845 
1846 static const struct mvneta_counter mvneta_counters[] = {
1847 	[mvneta_stat_good_octets_received] =
1848 	    { "rx good",	KSTAT_KV_U_BYTES,	0x0 /* 64bit */ },
1849 	[mvneta_stat_bad_octets_received] =
1850 	    { "rx bad",		KSTAT_KV_U_BYTES,	0x3008 },
1851 	[mvneta_stat_good_frames_received] =
1852 	    { "rx good",	KSTAT_KV_U_PACKETS,	0x3010 },
1853 	[mvneta_stat_mac_trans_error] =
1854 	    { "tx mac error",	KSTAT_KV_U_PACKETS,	0x300c },
1855 	[mvneta_stat_bad_frames_received] =
1856 	    { "rx bad",		KSTAT_KV_U_PACKETS,	0x3014 },
1857 	[mvneta_stat_broadcast_frames_received] =
1858 	    { "rx bcast",	KSTAT_KV_U_PACKETS,	0x3018 },
1859 	[mvneta_stat_multicast_frames_received] =
1860 	    { "rx mcast",	KSTAT_KV_U_PACKETS,	0x301c },
1861 	[mvneta_stat_frames_64_octets] =
1862 	    { "64B",		KSTAT_KV_U_PACKETS,	0x3020 },
1863 	[mvneta_stat_frames_65_to_127_octets] =
1864 	    { "65-127B",	KSTAT_KV_U_PACKETS,	0x3024 },
1865 	[mvneta_stat_frames_128_to_255_octets] =
1866 	    { "128-255B",	KSTAT_KV_U_PACKETS,	0x3028 },
1867 	[mvneta_stat_frames_256_to_511_octets] =
1868 	    { "256-511B",	KSTAT_KV_U_PACKETS,	0x302c },
1869 	[mvneta_stat_frames_512_to_1023_octets] =
1870 	    { "512-1023B",	KSTAT_KV_U_PACKETS,	0x3030 },
1871 	[mvneta_stat_frames_1024_to_max_octets] =
1872 	    { "1024-maxB",	KSTAT_KV_U_PACKETS,	0x3034 },
1873 	[mvneta_stat_good_octets_sent] =
1874 	    { "tx good",	KSTAT_KV_U_BYTES,	0x0 /* 64bit */ },
1875 	[mvneta_stat_good_frames_sent] =
1876 	    { "tx good",	KSTAT_KV_U_PACKETS,	0x3040 },
1877 	[mvneta_stat_excessive_collision] =
1878 	    { "tx excess coll",	KSTAT_KV_U_PACKETS,	0x3044 },
1879 	[mvneta_stat_multicast_frames_sent] =
1880 	    { "tx mcast",	KSTAT_KV_U_PACKETS,	0x3048 },
1881 	[mvneta_stat_broadcast_frames_sent] =
1882 	    { "tx bcast",	KSTAT_KV_U_PACKETS,	0x304c },
1883 	[mvneta_stat_unrecog_mac_control_received] =
1884 	    { "rx unknown fc",	KSTAT_KV_U_PACKETS,	0x3050 },
1885 	[mvneta_stat_good_fc_received] =
1886 	    { "rx fc good",	KSTAT_KV_U_PACKETS,	0x3058 },
1887 	[mvneta_stat_bad_fc_received] =
1888 	    { "rx fc bad",	KSTAT_KV_U_PACKETS,	0x305c },
1889 	[mvneta_stat_undersize] =
1890 	    { "rx undersize",	KSTAT_KV_U_PACKETS,	0x3060 },
1891 	[mvneta_stat_fc_sent] =
1892 	    { "tx fc",		KSTAT_KV_U_PACKETS,	0x3054 },
1893 	[mvneta_stat_fragments] =
1894 	    { "rx fragments",	KSTAT_KV_U_NONE,	0x3064 },
1895 	[mvneta_stat_oversize] =
1896 	    { "rx oversize",	KSTAT_KV_U_PACKETS,	0x3068 },
1897 	[mvneta_stat_jabber] =
1898 	    { "rx jabber",	KSTAT_KV_U_PACKETS,	0x306c },
1899 	[mvneta_stat_mac_rcv_error] =
1900 	    { "rx mac errors",	KSTAT_KV_U_PACKETS,	0x3070 },
1901 	[mvneta_stat_bad_crc] =
1902 	    { "rx bad crc",	KSTAT_KV_U_PACKETS,	0x3074 },
1903 	[mvneta_stat_collisions] =
1904 	    { "rx colls",	KSTAT_KV_U_PACKETS,	0x3078 },
1905 	[mvneta_stat_late_collisions] =
1906 	    { "rx late colls",	KSTAT_KV_U_PACKETS,	0x307c },
1907 
1908 	[mvneta_stat_port_discard] =
1909 	    { "rx discard",	KSTAT_KV_U_PACKETS,	MVNETA_PXDFC },
1910 	[mvneta_stat_port_overrun] =
1911 	    { "rx overrun",	KSTAT_KV_U_PACKETS,	MVNETA_POFC },
1912 };
1913 
1914 CTASSERT(nitems(mvneta_counters) == mvnet_stat_count);
1915 
1916 int
1917 mvneta_kstat_read(struct kstat *ks)
1918 {
1919 	struct mvneta_softc *sc = ks->ks_softc;
1920 	struct kstat_kv *kvs = ks->ks_data;
1921 	unsigned int i;
1922 	uint32_t hi, lo;
1923 
1924 	for (i = 0; i < nitems(mvneta_counters); i++) {
1925 		const struct mvneta_counter *c = &mvneta_counters[i];
1926 		if (c->reg == 0)
1927 			continue;
1928 
1929 		kstat_kv_u64(&kvs[i]) += (uint64_t)MVNETA_READ(sc, c->reg);
1930 	}
1931 
1932 	/* handle the exceptions */
1933 
1934 	lo = MVNETA_READ(sc, 0x3000);
1935 	hi = MVNETA_READ(sc, 0x3004);
1936 	kstat_kv_u64(&kvs[mvneta_stat_good_octets_received]) +=
1937 	    (uint64_t)hi << 32 | (uint64_t)lo;
1938 
1939 	lo = MVNETA_READ(sc, 0x3038);
1940 	hi = MVNETA_READ(sc, 0x303c);
1941 	kstat_kv_u64(&kvs[mvneta_stat_good_octets_sent]) +=
1942 	    (uint64_t)hi << 32 | (uint64_t)lo;
1943 
1944 	nanouptime(&ks->ks_updated);
1945 
1946 	return (0);
1947 }
1948 
1949 void
1950 mvneta_kstat_tick(void *arg)
1951 {
1952 	struct mvneta_softc *sc = arg;
1953 
1954 	timeout_add_sec(&sc->sc_kstat_tick, 37);
1955 
1956 	if (mtx_enter_try(&sc->sc_kstat_lock)) {
1957 		mvneta_kstat_read(sc->sc_kstat);
1958 		mtx_leave(&sc->sc_kstat_lock);
1959 	}
1960 }
1961 
1962 void
1963 mvneta_kstat_attach(struct mvneta_softc *sc)
1964 {
1965 	struct kstat *ks;
1966 	struct kstat_kv *kvs;
1967 	unsigned int i;
1968 
1969 	mtx_init(&sc->sc_kstat_lock, IPL_SOFTCLOCK);
1970 	timeout_set(&sc->sc_kstat_tick, mvneta_kstat_tick, sc);
1971 
1972 	ks = kstat_create(sc->sc_dev.dv_xname, 0, "mvneta-stats", 0,
1973 	    KSTAT_T_KV, 0);
1974 	if (ks == NULL)
1975 		return;
1976 
1977 	kvs = mallocarray(nitems(mvneta_counters), sizeof(*kvs),
1978 	    M_DEVBUF, M_WAITOK|M_ZERO);
1979 	for (i = 0; i < nitems(mvneta_counters); i++) {
1980 		const struct mvneta_counter *c = &mvneta_counters[i];
1981 		kstat_kv_unit_init(&kvs[i], c->name,
1982 		    KSTAT_KV_T_COUNTER64, c->unit);
1983 	}
1984 
1985 	ks->ks_softc = sc;
1986 	ks->ks_data = kvs;
1987 	ks->ks_datalen = nitems(mvneta_counters) * sizeof(*kvs);
1988 	ks->ks_read = mvneta_kstat_read;
1989 	kstat_set_mutex(ks, &sc->sc_kstat_lock);
1990 
1991 	kstat_install(ks);
1992 
1993 	sc->sc_kstat = ks;
1994 
1995 	timeout_add_sec(&sc->sc_kstat_tick, 37);
1996 }
1997 
1998 #endif
1999