xref: /openbsd/sys/dev/fdt/if_mvneta.c (revision c5c83bc1)
1 /*	$OpenBSD: if_mvneta.c,v 1.32 2024/03/21 23:12:33 patrick Exp $	*/
2 /*	$NetBSD: if_mvneta.c,v 1.41 2015/04/15 10:15:40 hsuenaga Exp $	*/
3 /*
4  * Copyright (c) 2007, 2008, 2013 KIYOHARA Takashi
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include "bpfilter.h"
30 #include "kstat.h"
31 
32 #include <sys/param.h>
33 #include <sys/device.h>
34 #include <sys/systm.h>
35 #include <sys/endian.h>
36 #include <sys/errno.h>
37 #include <sys/kernel.h>
38 #include <sys/mutex.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <uvm/uvm_extern.h>
42 #include <sys/mbuf.h>
43 #include <sys/kstat.h>
44 
45 #include <machine/bus.h>
46 #include <machine/cpufunc.h>
47 #include <machine/fdt.h>
48 
49 #include <dev/ofw/openfirm.h>
50 #include <dev/ofw/ofw_clock.h>
51 #include <dev/ofw/ofw_misc.h>
52 #include <dev/ofw/ofw_pinctrl.h>
53 #include <dev/ofw/fdt.h>
54 
55 #include <dev/fdt/if_mvnetareg.h>
56 
57 #ifdef __armv7__
58 #include <armv7/marvell/mvmbusvar.h>
59 #endif
60 
61 #include <net/if.h>
62 #include <net/if_media.h>
63 #include <net/if_types.h>
64 
65 #include <netinet/in.h>
66 #include <netinet/if_ether.h>
67 
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 
71 #if NBPFILTER > 0
72 #include <net/bpf.h>
73 #endif
74 
75 #ifdef MVNETA_DEBUG
76 #define DPRINTF(x)	if (mvneta_debug) printf x
77 #define DPRINTFN(n,x)	if (mvneta_debug >= (n)) printf x
78 int mvneta_debug = MVNETA_DEBUG;
79 #else
80 #define DPRINTF(x)
81 #define DPRINTFN(n,x)
82 #endif
83 
84 #define MVNETA_READ(sc, reg) \
85 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
86 #define MVNETA_WRITE(sc, reg, val) \
87 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
88 #define MVNETA_READ_FILTER(sc, reg, val, c) \
89 	bus_space_read_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
90 #define MVNETA_WRITE_FILTER(sc, reg, val, c) \
91 	bus_space_write_region_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val), (c))
92 
93 #define MVNETA_LINKUP_READ(sc) \
94 	MVNETA_READ(sc, MVNETA_PS0)
95 #define MVNETA_IS_LINKUP(sc)	(MVNETA_LINKUP_READ(sc) & MVNETA_PS0_LINKUP)
96 
97 #define MVNETA_TX_RING_CNT	256
98 #define MVNETA_TX_RING_MSK	(MVNETA_TX_RING_CNT - 1)
99 #define MVNETA_TX_RING_NEXT(x)	(((x) + 1) & MVNETA_TX_RING_MSK)
100 #define MVNETA_TX_QUEUE_CNT	1
101 #define MVNETA_RX_RING_CNT	256
102 #define MVNETA_RX_RING_MSK	(MVNETA_RX_RING_CNT - 1)
103 #define MVNETA_RX_RING_NEXT(x)	(((x) + 1) & MVNETA_RX_RING_MSK)
104 #define MVNETA_RX_QUEUE_CNT	1
105 
106 CTASSERT(MVNETA_TX_RING_CNT > 1 && MVNETA_TX_RING_NEXT(MVNETA_TX_RING_CNT) ==
107 	(MVNETA_TX_RING_CNT + 1) % MVNETA_TX_RING_CNT);
108 CTASSERT(MVNETA_RX_RING_CNT > 1 && MVNETA_RX_RING_NEXT(MVNETA_RX_RING_CNT) ==
109 	(MVNETA_RX_RING_CNT + 1) % MVNETA_RX_RING_CNT);
110 
111 #define MVNETA_NTXSEG		30
112 
113 struct mvneta_dmamem {
114 	bus_dmamap_t		mdm_map;
115 	bus_dma_segment_t	mdm_seg;
116 	size_t			mdm_size;
117 	caddr_t			mdm_kva;
118 };
119 #define MVNETA_DMA_MAP(_mdm)	((_mdm)->mdm_map)
120 #define MVNETA_DMA_LEN(_mdm)	((_mdm)->mdm_size)
121 #define MVNETA_DMA_DVA(_mdm)	((_mdm)->mdm_map->dm_segs[0].ds_addr)
122 #define MVNETA_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
123 
124 struct mvneta_buf {
125 	bus_dmamap_t	tb_map;
126 	struct mbuf	*tb_m;
127 };
128 
129 struct mvneta_softc {
130 	struct device sc_dev;
131 	struct mii_bus *sc_mdio;
132 
133 	bus_space_tag_t sc_iot;
134 	bus_space_handle_t sc_ioh;
135 	bus_dma_tag_t sc_dmat;
136 	void *sc_ih;
137 
138 	uint64_t		sc_clk_freq;
139 
140 	struct arpcom sc_ac;
141 #define sc_enaddr sc_ac.ac_enaddr
142 	struct mii_data sc_mii;
143 #define sc_media sc_mii.mii_media
144 
145 	struct timeout sc_tick_ch;
146 
147 	struct mvneta_dmamem	*sc_txring;
148 	struct mvneta_buf	*sc_txbuf;
149 	struct mvneta_tx_desc	*sc_txdesc;
150 	unsigned int		 sc_tx_prod;	/* next free tx desc */
151 	unsigned int		 sc_tx_cons;	/* first tx desc sent */
152 
153 	struct mvneta_dmamem	*sc_rxring;
154 	struct mvneta_buf	*sc_rxbuf;
155 	struct mvneta_rx_desc	*sc_rxdesc;
156 	unsigned int		 sc_rx_prod;	/* next rx desc to fill */
157 	unsigned int		 sc_rx_cons;	/* next rx desc recvd */
158 	struct if_rxring	 sc_rx_ring;
159 
160 	enum {
161 		PHY_MODE_QSGMII,
162 		PHY_MODE_SGMII,
163 		PHY_MODE_RGMII,
164 		PHY_MODE_RGMII_ID,
165 		PHY_MODE_1000BASEX,
166 		PHY_MODE_2500BASEX,
167 	}			 sc_phy_mode;
168 	int			 sc_fixed_link;
169 	int			 sc_inband_status;
170 	int			 sc_phy;
171 	int			 sc_phyloc;
172 	int			 sc_link;
173 	int			 sc_sfp;
174 	int			 sc_node;
175 
176 	struct if_device	 sc_ifd;
177 
178 #if NKSTAT > 0
179 	struct mutex		 sc_kstat_lock;
180 	struct timeout		 sc_kstat_tick;
181 	struct kstat		*sc_kstat;
182 #endif
183 };
184 
185 
186 int mvneta_miibus_readreg(struct device *, int, int);
187 void mvneta_miibus_writereg(struct device *, int, int, int);
188 void mvneta_miibus_statchg(struct device *);
189 
190 void mvneta_wininit(struct mvneta_softc *);
191 
192 /* Gigabit Ethernet Port part functions */
193 int mvneta_match(struct device *, void *, void *);
194 void mvneta_attach(struct device *, struct device *, void *);
195 void mvneta_attach_deferred(struct device *);
196 
197 void mvneta_tick(void *);
198 int mvneta_intr(void *);
199 
200 void mvneta_start(struct ifqueue *);
201 int mvneta_ioctl(struct ifnet *, u_long, caddr_t);
202 void mvneta_inband_statchg(struct mvneta_softc *);
203 void mvneta_port_change(struct mvneta_softc *);
204 void mvneta_port_up(struct mvneta_softc *);
205 int mvneta_up(struct mvneta_softc *);
206 void mvneta_down(struct mvneta_softc *);
207 void mvneta_watchdog(struct ifnet *);
208 
209 int mvneta_mediachange(struct ifnet *);
210 void mvneta_mediastatus(struct ifnet *, struct ifmediareq *);
211 
212 void mvneta_rx_proc(struct mvneta_softc *);
213 void mvneta_tx_proc(struct mvneta_softc *);
214 uint8_t mvneta_crc8(const uint8_t *, size_t);
215 void mvneta_iff(struct mvneta_softc *);
216 
217 struct mvneta_dmamem *mvneta_dmamem_alloc(struct mvneta_softc *,
218     bus_size_t, bus_size_t);
219 void mvneta_dmamem_free(struct mvneta_softc *, struct mvneta_dmamem *);
220 void mvneta_fill_rx_ring(struct mvneta_softc *);
221 
222 #if NKSTAT > 0
223 void		mvneta_kstat_attach(struct mvneta_softc *);
224 #endif
225 
226 static struct rwlock mvneta_sff_lock = RWLOCK_INITIALIZER("mvnetasff");
227 
228 struct cfdriver mvneta_cd = {
229 	NULL, "mvneta", DV_IFNET
230 };
231 
232 const struct cfattach mvneta_ca = {
233 	sizeof (struct mvneta_softc), mvneta_match, mvneta_attach,
234 };
235 
236 int
mvneta_miibus_readreg(struct device * dev,int phy,int reg)237 mvneta_miibus_readreg(struct device *dev, int phy, int reg)
238 {
239 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
240 	return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
241 }
242 
243 void
mvneta_miibus_writereg(struct device * dev,int phy,int reg,int val)244 mvneta_miibus_writereg(struct device *dev, int phy, int reg, int val)
245 {
246 	struct mvneta_softc *sc = (struct mvneta_softc *) dev;
247 	return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
248 }
249 
250 void
mvneta_miibus_statchg(struct device * self)251 mvneta_miibus_statchg(struct device *self)
252 {
253 	struct mvneta_softc *sc = (struct mvneta_softc *)self;
254 
255 	if (sc->sc_mii.mii_media_status & IFM_ACTIVE) {
256 		uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
257 
258 		panc &= ~(MVNETA_PANC_SETMIISPEED |
259 			  MVNETA_PANC_SETGMIISPEED |
260 			  MVNETA_PANC_SETFULLDX);
261 
262 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
263 		case IFM_1000_SX:
264 		case IFM_1000_LX:
265 		case IFM_1000_CX:
266 		case IFM_1000_T:
267 			panc |= MVNETA_PANC_SETGMIISPEED;
268 			break;
269 		case IFM_100_TX:
270 			panc |= MVNETA_PANC_SETMIISPEED;
271 			break;
272 		case IFM_10_T:
273 			break;
274 		}
275 
276 		if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
277 			panc |= MVNETA_PANC_SETFULLDX;
278 
279 		MVNETA_WRITE(sc, MVNETA_PANC, panc);
280 	}
281 
282 	mvneta_port_change(sc);
283 }
284 
285 void
mvneta_inband_statchg(struct mvneta_softc * sc)286 mvneta_inband_statchg(struct mvneta_softc *sc)
287 {
288 	uint64_t subtype = IFM_SUBTYPE(sc->sc_mii.mii_media_active);
289 	uint32_t reg;
290 
291 	sc->sc_mii.mii_media_status = IFM_AVALID;
292 	sc->sc_mii.mii_media_active = IFM_ETHER;
293 
294 	reg = MVNETA_READ(sc, MVNETA_PS0);
295 	if (reg & MVNETA_PS0_LINKUP)
296 		sc->sc_mii.mii_media_status |= IFM_ACTIVE;
297 	if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
298 		sc->sc_mii.mii_media_active |= subtype;
299 	else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
300 		sc->sc_mii.mii_media_active |= subtype;
301 	else if (reg & MVNETA_PS0_GMIISPEED)
302 		sc->sc_mii.mii_media_active |= IFM_1000_T;
303 	else if (reg & MVNETA_PS0_MIISPEED)
304 		sc->sc_mii.mii_media_active |= IFM_100_TX;
305 	else
306 		sc->sc_mii.mii_media_active |= IFM_10_T;
307 	if (reg & MVNETA_PS0_FULLDX)
308 		sc->sc_mii.mii_media_active |= IFM_FDX;
309 
310 	mvneta_port_change(sc);
311 }
312 
313 void
mvneta_enaddr_write(struct mvneta_softc * sc)314 mvneta_enaddr_write(struct mvneta_softc *sc)
315 {
316 	uint32_t maddrh, maddrl;
317 	maddrh  = sc->sc_enaddr[0] << 24;
318 	maddrh |= sc->sc_enaddr[1] << 16;
319 	maddrh |= sc->sc_enaddr[2] << 8;
320 	maddrh |= sc->sc_enaddr[3];
321 	maddrl  = sc->sc_enaddr[4] << 8;
322 	maddrl |= sc->sc_enaddr[5];
323 	MVNETA_WRITE(sc, MVNETA_MACAH, maddrh);
324 	MVNETA_WRITE(sc, MVNETA_MACAL, maddrl);
325 }
326 
327 void
mvneta_wininit(struct mvneta_softc * sc)328 mvneta_wininit(struct mvneta_softc *sc)
329 {
330 	uint32_t en;
331 	int i;
332 
333 #ifdef __armv7__
334 	if (mvmbus_dram_info == NULL)
335 		panic("%s: mbus dram information not set up",
336 		    sc->sc_dev.dv_xname);
337 #endif
338 
339 	for (i = 0; i < MVNETA_NWINDOW; i++) {
340 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i), 0);
341 		MVNETA_WRITE(sc, MVNETA_S(i), 0);
342 
343 		if (i < MVNETA_NREMAP)
344 			MVNETA_WRITE(sc, MVNETA_HA(i), 0);
345 	}
346 
347 	en = MVNETA_BARE_EN_MASK;
348 
349 #ifdef __armv7__
350 	for (i = 0; i < mvmbus_dram_info->numcs; i++) {
351 		struct mbus_dram_window *win = &mvmbus_dram_info->cs[i];
352 
353 		MVNETA_WRITE(sc, MVNETA_BASEADDR(i),
354 		    MVNETA_BASEADDR_TARGET(mvmbus_dram_info->targetid) |
355 		    MVNETA_BASEADDR_ATTR(win->attr)	|
356 		    MVNETA_BASEADDR_BASE(win->base));
357 		MVNETA_WRITE(sc, MVNETA_S(i), MVNETA_S_SIZE(win->size));
358 
359 		en &= ~(1 << i);
360 	}
361 #else
362 	MVNETA_WRITE(sc, MVNETA_S(0), MVNETA_S_SIZE(0));
363 	en &= ~(1 << 0);
364 #endif
365 
366 	MVNETA_WRITE(sc, MVNETA_BARE, en);
367 }
368 
369 #define COMPHY_SIP_POWER_ON	0x82000001
370 #define COMPHY_SIP_POWER_OFF	0x82000002
371 #define COMPHY_SPEED(x)		((x) << 2)
372 #define  COMPHY_SPEED_1_25G		0 /* SGMII 1G */
373 #define  COMPHY_SPEED_2_5G		1
374 #define  COMPHY_SPEED_3_125G		2 /* SGMII 2.5G */
375 #define  COMPHY_SPEED_5G		3
376 #define  COMPHY_SPEED_5_15625G		4 /* XFI 5G */
377 #define  COMPHY_SPEED_6G		5
378 #define  COMPHY_SPEED_10_3125G		6 /* XFI 10G */
379 #define COMPHY_UNIT(x)		((x) << 8)
380 #define COMPHY_MODE(x)		((x) << 12)
381 #define  COMPHY_MODE_SATA		1
382 #define  COMPHY_MODE_SGMII		2 /* SGMII 1G */
383 #define  COMPHY_MODE_HS_SGMII		3 /* SGMII 2.5G */
384 #define  COMPHY_MODE_USB3H		4
385 #define  COMPHY_MODE_USB3D		5
386 #define  COMPHY_MODE_PCIE		6
387 #define  COMPHY_MODE_RXAUI		7
388 #define  COMPHY_MODE_XFI		8
389 #define  COMPHY_MODE_SFI		9
390 #define  COMPHY_MODE_USB3		10
391 
392 void
mvneta_comphy_init(struct mvneta_softc * sc)393 mvneta_comphy_init(struct mvneta_softc *sc)
394 {
395 	int node, phys[2], lane, unit;
396 	uint32_t mode;
397 
398 	if (OF_getpropintarray(sc->sc_node, "phys", phys, sizeof(phys)) !=
399 	    sizeof(phys))
400 		return;
401 	node = OF_getnodebyphandle(phys[0]);
402 	if (!node)
403 		return;
404 
405 	lane = OF_getpropint(node, "reg", 0);
406 	unit = phys[1];
407 
408 	switch (sc->sc_phy_mode) {
409 	case PHY_MODE_1000BASEX:
410 	case PHY_MODE_SGMII:
411 		mode = COMPHY_MODE(COMPHY_MODE_SGMII) |
412 		    COMPHY_SPEED(COMPHY_SPEED_1_25G) |
413 		    COMPHY_UNIT(unit);
414 		break;
415 	case PHY_MODE_2500BASEX:
416 		mode = COMPHY_MODE(COMPHY_MODE_HS_SGMII) |
417 		    COMPHY_SPEED(COMPHY_SPEED_3_125G) |
418 		    COMPHY_UNIT(unit);
419 		break;
420 	default:
421 		return;
422 	}
423 
424 	smc_call(COMPHY_SIP_POWER_ON, lane, mode, 0);
425 }
426 
427 int
mvneta_match(struct device * parent,void * cfdata,void * aux)428 mvneta_match(struct device *parent, void *cfdata, void *aux)
429 {
430 	struct fdt_attach_args *faa = aux;
431 
432 	return OF_is_compatible(faa->fa_node, "marvell,armada-370-neta") ||
433 	    OF_is_compatible(faa->fa_node, "marvell,armada-3700-neta");
434 }
435 
436 void
mvneta_attach(struct device * parent,struct device * self,void * aux)437 mvneta_attach(struct device *parent, struct device *self, void *aux)
438 {
439 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
440 	struct fdt_attach_args *faa = aux;
441 	uint32_t ctl0, ctl2, ctl4, panc;
442 	struct ifnet *ifp;
443 	int i, len, node;
444 	char *phy_mode;
445 	char *managed;
446 
447 	sc->sc_iot = faa->fa_iot;
448 	timeout_set(&sc->sc_tick_ch, mvneta_tick, sc);
449 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
450 	    faa->fa_reg[0].size, 0, &sc->sc_ioh)) {
451 		printf("%s: cannot map registers\n", self->dv_xname);
452 		return;
453 	}
454 	sc->sc_dmat = faa->fa_dmat;
455 	sc->sc_node = faa->fa_node;
456 
457 	clock_enable(faa->fa_node, NULL);
458 	sc->sc_clk_freq = clock_get_frequency_idx(faa->fa_node, 0);
459 
460 	pinctrl_byname(faa->fa_node, "default");
461 
462 	len = OF_getproplen(faa->fa_node, "phy-mode");
463 	if (len <= 0) {
464 		printf(": cannot extract phy-mode\n");
465 		return;
466 	}
467 
468 	phy_mode = malloc(len, M_TEMP, M_WAITOK);
469 	OF_getprop(faa->fa_node, "phy-mode", phy_mode, len);
470 	if (!strncmp(phy_mode, "qsgmii", strlen("qsgmii")))
471 		sc->sc_phy_mode = PHY_MODE_QSGMII;
472 	else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
473 		sc->sc_phy_mode = PHY_MODE_SGMII;
474 	else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
475 		sc->sc_phy_mode = PHY_MODE_RGMII_ID;
476 	else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
477 		sc->sc_phy_mode = PHY_MODE_RGMII;
478 	else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
479 		sc->sc_phy_mode = PHY_MODE_1000BASEX;
480 	else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
481 		sc->sc_phy_mode = PHY_MODE_2500BASEX;
482 	else {
483 		printf(": cannot use phy-mode %s\n", phy_mode);
484 		return;
485 	}
486 	free(phy_mode, M_TEMP, len);
487 
488 	/* TODO: check child's name to be "fixed-link" */
489 	if (OF_getproplen(faa->fa_node, "fixed-link") >= 0 ||
490 	    OF_child(faa->fa_node))
491 		sc->sc_fixed_link = 1;
492 
493 	if ((len = OF_getproplen(faa->fa_node, "managed")) >= 0) {
494 		managed = malloc(len, M_TEMP, M_WAITOK);
495 		OF_getprop(faa->fa_node, "managed", managed, len);
496 		if (!strncmp(managed, "in-band-status",
497 		    strlen("in-band-status"))) {
498 			sc->sc_fixed_link = 1;
499 			sc->sc_inband_status = 1;
500 		}
501 		free(managed, M_TEMP, len);
502 	}
503 
504 	if (!sc->sc_fixed_link) {
505 		sc->sc_phy = OF_getpropint(faa->fa_node, "phy-handle", 0);
506 		if (!sc->sc_phy)
507 			sc->sc_phy = OF_getpropint(faa->fa_node, "phy", 0);
508 		node = OF_getnodebyphandle(sc->sc_phy);
509 		if (!node) {
510 			printf(": cannot find phy in fdt\n");
511 			return;
512 		}
513 
514 		if ((sc->sc_phyloc = OF_getpropint(node, "reg", -1)) == -1) {
515 			printf(": cannot extract phy addr\n");
516 			return;
517 		}
518 	}
519 
520 	mvneta_wininit(sc);
521 
522 	if (OF_getproplen(faa->fa_node, "local-mac-address") ==
523 	    ETHER_ADDR_LEN) {
524 		OF_getprop(faa->fa_node, "local-mac-address",
525 		    sc->sc_enaddr, ETHER_ADDR_LEN);
526 		mvneta_enaddr_write(sc);
527 	} else {
528 		uint32_t maddrh, maddrl;
529 		maddrh = MVNETA_READ(sc, MVNETA_MACAH);
530 		maddrl = MVNETA_READ(sc, MVNETA_MACAL);
531 		if (maddrh || maddrl) {
532 			sc->sc_enaddr[0] = maddrh >> 24;
533 			sc->sc_enaddr[1] = maddrh >> 16;
534 			sc->sc_enaddr[2] = maddrh >> 8;
535 			sc->sc_enaddr[3] = maddrh >> 0;
536 			sc->sc_enaddr[4] = maddrl >> 8;
537 			sc->sc_enaddr[5] = maddrl >> 0;
538 		} else
539 			ether_fakeaddr(&sc->sc_ac.ac_if);
540 	}
541 
542 	sc->sc_sfp = OF_getpropint(faa->fa_node, "sfp", 0);
543 
544 	printf(": address %s\n", ether_sprintf(sc->sc_enaddr));
545 
546 	/* disable port */
547 	MVNETA_WRITE(sc, MVNETA_PMACC0,
548 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
549 	delay(200);
550 
551 	/* clear all cause registers */
552 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
553 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
554 	MVNETA_WRITE(sc, MVNETA_PMIC, 0);
555 
556 	/* mask all interrupts */
557 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
558 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
559 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
560 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
561 	MVNETA_WRITE(sc, MVNETA_PIE, 0);
562 
563 	/* enable MBUS Retry bit16 */
564 	MVNETA_WRITE(sc, MVNETA_ERETRY, 0x20);
565 
566 	/* enable access for CPU0 */
567 	MVNETA_WRITE(sc, MVNETA_PCP2Q(0),
568 	    MVNETA_PCP2Q_RXQAE_ALL | MVNETA_PCP2Q_TXQAE_ALL);
569 
570 	/* reset RX and TX DMAs */
571 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
572 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
573 
574 	/* disable legacy WRR, disable EJP, release from reset */
575 	MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
576 	for (i = 0; i < MVNETA_TX_QUEUE_CNT; i++) {
577 		MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(i), 0);
578 		MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(i), 0);
579 	}
580 
581 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
582 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
583 
584 	/* set port acceleration mode */
585 	MVNETA_WRITE(sc, MVNETA_PACC, MVGVE_PACC_ACCELERATIONMODE_EDM);
586 
587 	MVNETA_WRITE(sc, MVNETA_PXC, MVNETA_PXC_AMNOTXES | MVNETA_PXC_RXCS);
588 	MVNETA_WRITE(sc, MVNETA_PXCX, 0);
589 	MVNETA_WRITE(sc, MVNETA_PMFS, 64);
590 
591 	/* Set SDC register except IPGINT bits */
592 	MVNETA_WRITE(sc, MVNETA_SDC,
593 	    MVNETA_SDC_RXBSZ_16_64BITWORDS |
594 	    MVNETA_SDC_BLMR |	/* Big/Little Endian Receive Mode: No swap */
595 	    MVNETA_SDC_BLMT |	/* Big/Little Endian Transmit Mode: No swap */
596 	    MVNETA_SDC_TXBSZ_16_64BITWORDS);
597 
598 	/* XXX: Disable PHY polling in hardware */
599 	MVNETA_WRITE(sc, MVNETA_EUC,
600 	    MVNETA_READ(sc, MVNETA_EUC) & ~MVNETA_EUC_POLLING);
601 
602 	/* clear uni-/multicast tables */
603 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
604 	memset(dfut, 0, sizeof(dfut));
605 	memset(dfsmt, 0, sizeof(dfut));
606 	memset(dfomt, 0, sizeof(dfut));
607 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
608 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfut, MVNETA_NDFSMT);
609 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfut, MVNETA_NDFOMT);
610 
611 	MVNETA_WRITE(sc, MVNETA_PIE,
612 	    MVNETA_PIE_RXPKTINTRPTENB_ALL | MVNETA_PIE_TXPKTINTRPTENB_ALL);
613 
614 	MVNETA_WRITE(sc, MVNETA_EUIC, 0);
615 
616 	/* Setup phy. */
617 	ctl0 = MVNETA_READ(sc, MVNETA_PMACC0);
618 	ctl2 = MVNETA_READ(sc, MVNETA_PMACC2);
619 	ctl4 = MVNETA_READ(sc, MVNETA_PMACC4);
620 	panc = MVNETA_READ(sc, MVNETA_PANC);
621 
622 	/* Force link down to change in-band settings. */
623 	panc &= ~MVNETA_PANC_FORCELINKPASS;
624 	panc |= MVNETA_PANC_FORCELINKFAIL;
625 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
626 
627 	mvneta_comphy_init(sc);
628 
629 	ctl0 &= ~MVNETA_PMACC0_PORTTYPE;
630 	ctl2 &= ~(MVNETA_PMACC2_PORTMACRESET | MVNETA_PMACC2_INBANDAN);
631 	ctl4 &= ~(MVNETA_PMACC4_SHORT_PREAMBLE);
632 	panc &= ~(MVNETA_PANC_INBANDANEN | MVNETA_PANC_INBANDRESTARTAN |
633 	    MVNETA_PANC_SETMIISPEED | MVNETA_PANC_SETGMIISPEED |
634 	    MVNETA_PANC_ANSPEEDEN | MVNETA_PANC_SETFCEN |
635 	    MVNETA_PANC_PAUSEADV | MVNETA_PANC_ANFCEN |
636 	    MVNETA_PANC_SETFULLDX | MVNETA_PANC_ANDUPLEXEN);
637 
638 	ctl2 |= MVNETA_PMACC2_RGMIIEN;
639 	switch (sc->sc_phy_mode) {
640 	case PHY_MODE_QSGMII:
641 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
642 		    MVNETA_SERDESCFG_QSGMII_PROTO);
643 		ctl2 |= MVNETA_PMACC2_PCSEN;
644 		break;
645 	case PHY_MODE_SGMII:
646 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
647 		    MVNETA_SERDESCFG_SGMII_PROTO);
648 		ctl2 |= MVNETA_PMACC2_PCSEN;
649 		break;
650 	case PHY_MODE_1000BASEX:
651 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
652 		    MVNETA_SERDESCFG_SGMII_PROTO);
653 		ctl2 |= MVNETA_PMACC2_PCSEN;
654 		break;
655 	case PHY_MODE_2500BASEX:
656 		MVNETA_WRITE(sc, MVNETA_SERDESCFG,
657 		    MVNETA_SERDESCFG_HSGMII_PROTO);
658 		ctl2 |= MVNETA_PMACC2_PCSEN;
659 		ctl4 |= MVNETA_PMACC4_SHORT_PREAMBLE;
660 		break;
661 	default:
662 		break;
663 	}
664 
665 	/* Use Auto-Negotiation for Inband Status only */
666 	if (sc->sc_inband_status) {
667 		panc &= ~(MVNETA_PANC_FORCELINKFAIL |
668 		    MVNETA_PANC_FORCELINKPASS);
669 		/* TODO: read mode from SFP */
670 		if (1) {
671 			/* 802.3z */
672 			ctl0 |= MVNETA_PMACC0_PORTTYPE;
673 			panc |= (MVNETA_PANC_INBANDANEN |
674 			    MVNETA_PANC_SETGMIISPEED |
675 			    MVNETA_PANC_SETFULLDX);
676 		} else {
677 			/* SGMII */
678 			ctl2 |= MVNETA_PMACC2_INBANDAN;
679 			panc |= (MVNETA_PANC_INBANDANEN |
680 			    MVNETA_PANC_ANSPEEDEN |
681 			    MVNETA_PANC_ANDUPLEXEN);
682 		}
683 		MVNETA_WRITE(sc, MVNETA_OMSCD,
684 		    MVNETA_READ(sc, MVNETA_OMSCD) | MVNETA_OMSCD_1MS_CLOCK_ENABLE);
685 	} else {
686 		MVNETA_WRITE(sc, MVNETA_OMSCD,
687 		    MVNETA_READ(sc, MVNETA_OMSCD) & ~MVNETA_OMSCD_1MS_CLOCK_ENABLE);
688 	}
689 
690 	MVNETA_WRITE(sc, MVNETA_PMACC0, ctl0);
691 	MVNETA_WRITE(sc, MVNETA_PMACC2, ctl2);
692 	MVNETA_WRITE(sc, MVNETA_PMACC4, ctl4);
693 	MVNETA_WRITE(sc, MVNETA_PANC, panc);
694 
695 	/* Port reset */
696 	while (MVNETA_READ(sc, MVNETA_PMACC2) & MVNETA_PMACC2_PORTMACRESET)
697 		;
698 
699 	sc->sc_ih = fdt_intr_establish(faa->fa_node, IPL_NET | IPL_MPSAFE,
700 	    mvneta_intr, sc, sc->sc_dev.dv_xname);
701 
702 	ifp = &sc->sc_ac.ac_if;
703 	ifp->if_softc = sc;
704 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
705 	ifp->if_xflags = IFXF_MPSAFE;
706 	ifp->if_qstart = mvneta_start;
707 	ifp->if_ioctl = mvneta_ioctl;
708 	ifp->if_watchdog = mvneta_watchdog;
709 	ifp->if_capabilities = IFCAP_VLAN_MTU;
710 
711 #if notyet
712 	/*
713 	 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
714 	 */
715 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
716 				IFCAP_CSUM_UDPv4;
717 
718 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
719 	/*
720 	 * But, IPv6 packets in the stream can cause incorrect TCPv4 Tx sums.
721 	 */
722 	ifp->if_capabilities &= ~IFCAP_CSUM_TCPv4;
723 #endif
724 
725 	ifq_init_maxlen(&ifp->if_snd, max(MVNETA_TX_RING_CNT - 1, IFQ_MAXLEN));
726 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname));
727 
728 	/*
729 	 * Do MII setup.
730 	 */
731 	sc->sc_mii.mii_ifp = ifp;
732 	sc->sc_mii.mii_readreg = mvneta_miibus_readreg;
733 	sc->sc_mii.mii_writereg = mvneta_miibus_writereg;
734 	sc->sc_mii.mii_statchg = mvneta_miibus_statchg;
735 
736 	ifmedia_init(&sc->sc_mii.mii_media, 0,
737 	    mvneta_mediachange, mvneta_mediastatus);
738 
739 	config_defer(self, mvneta_attach_deferred);
740 }
741 
742 void
mvneta_attach_deferred(struct device * self)743 mvneta_attach_deferred(struct device *self)
744 {
745 	struct mvneta_softc *sc = (struct mvneta_softc *) self;
746 	struct ifnet *ifp = &sc->sc_ac.ac_if;
747 	int mii_flags = 0;
748 
749 	if (!sc->sc_fixed_link) {
750 		sc->sc_mdio = mii_byphandle(sc->sc_phy);
751 		if (sc->sc_mdio == NULL) {
752 			printf("%s: mdio bus not yet attached\n", self->dv_xname);
753 			return;
754 		}
755 
756 		switch (sc->sc_phy_mode) {
757 		case PHY_MODE_1000BASEX:
758 			mii_flags |= MIIF_IS_1000X;
759 			break;
760 		case PHY_MODE_SGMII:
761 			mii_flags |= MIIF_SGMII;
762 			break;
763 		case PHY_MODE_RGMII_ID:
764 			mii_flags |= MIIF_RXID | MIIF_TXID;
765 			break;
766 		default:
767 			break;
768 		}
769 
770 		mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
771 		    MII_OFFSET_ANY, mii_flags);
772 		if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
773 			printf("%s: no PHY found!\n", self->dv_xname);
774 			ifmedia_add(&sc->sc_mii.mii_media,
775 			    IFM_ETHER|IFM_MANUAL, 0, NULL);
776 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
777 		} else
778 			ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
779 	} else {
780 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
781 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
782 
783 		if (sc->sc_inband_status) {
784 			switch (sc->sc_phy_mode) {
785 			case PHY_MODE_1000BASEX:
786 				sc->sc_mii.mii_media_active =
787 				    IFM_ETHER|IFM_1000_KX|IFM_FDX;
788 				break;
789 			case PHY_MODE_2500BASEX:
790 				sc->sc_mii.mii_media_active =
791 				    IFM_ETHER|IFM_2500_KX|IFM_FDX;
792 				break;
793 			default:
794 				break;
795 			}
796 			mvneta_inband_statchg(sc);
797 		} else {
798 			sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
799 			sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
800 			mvneta_miibus_statchg(self);
801 		}
802 
803 		ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
804 		ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
805 	}
806 
807 	/*
808 	 * Call MI attach routines.
809 	 */
810 	if_attach(ifp);
811 	ether_ifattach(ifp);
812 
813 	sc->sc_ifd.if_node = sc->sc_node;
814 	sc->sc_ifd.if_ifp = ifp;
815 	if_register(&sc->sc_ifd);
816 
817 #if NKSTAT > 0
818 	mvneta_kstat_attach(sc);
819 #endif
820 }
821 
822 void
mvneta_tick(void * arg)823 mvneta_tick(void *arg)
824 {
825 	struct mvneta_softc *sc = arg;
826 	struct mii_data *mii = &sc->sc_mii;
827 	int s;
828 
829 	s = splnet();
830 	mii_tick(mii);
831 	splx(s);
832 
833 	timeout_add_sec(&sc->sc_tick_ch, 1);
834 }
835 
836 int
mvneta_intr(void * arg)837 mvneta_intr(void *arg)
838 {
839 	struct mvneta_softc *sc = arg;
840 	struct ifnet *ifp = &sc->sc_ac.ac_if;
841 	uint32_t ic, misc;
842 
843 	ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
844 
845 	if (ic & MVNETA_PRXTXTI_PMISCICSUMMARY) {
846 		KERNEL_LOCK();
847 		misc = MVNETA_READ(sc, MVNETA_PMIC);
848 		MVNETA_WRITE(sc, MVNETA_PMIC, 0);
849 		if (sc->sc_inband_status && (misc &
850 		    (MVNETA_PMI_PHYSTATUSCHNG |
851 		    MVNETA_PMI_LINKCHANGE |
852 		    MVNETA_PMI_PSCSYNCCHNG))) {
853 			mvneta_inband_statchg(sc);
854 		}
855 		KERNEL_UNLOCK();
856 	}
857 
858 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
859 		return 1;
860 
861 	if (ic & MVNETA_PRXTXTI_TBTCQ(0))
862 		mvneta_tx_proc(sc);
863 
864 	if (ISSET(ic, MVNETA_PRXTXTI_RBICTAPQ(0) | MVNETA_PRXTXTI_RDTAQ(0)))
865 		mvneta_rx_proc(sc);
866 
867 	return 1;
868 }
869 
870 static inline int
mvneta_load_mbuf(struct mvneta_softc * sc,bus_dmamap_t map,struct mbuf * m)871 mvneta_load_mbuf(struct mvneta_softc *sc, bus_dmamap_t map, struct mbuf *m)
872 {
873 	int error;
874 
875 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
876 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
877 	switch (error) {
878 	case EFBIG:
879 		error = m_defrag(m, M_DONTWAIT);
880 		if (error != 0)
881 			break;
882 
883 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
884 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
885 		if (error != 0)
886 			break;
887 
888 		/* FALLTHROUGH */
889 	case 0:
890 		return (0);
891 
892 	default:
893 		break;
894 	}
895 
896         return (error);
897 }
898 
899 static inline void
mvneta_encap(struct mvneta_softc * sc,bus_dmamap_t map,struct mbuf * m,unsigned int prod)900 mvneta_encap(struct mvneta_softc *sc, bus_dmamap_t map, struct mbuf *m,
901     unsigned int prod)
902 {
903 	struct mvneta_tx_desc *txd;
904 	uint32_t cmdsts;
905 	unsigned int i;
906 
907 	cmdsts = MVNETA_TX_FIRST_DESC | MVNETA_TX_ZERO_PADDING |
908 	    MVNETA_TX_L4_CSUM_NOT;
909 #if notyet
910 	int m_csumflags;
911 	if (m_csumflags & M_CSUM_IPv4)
912 		cmdsts |= MVNETA_TX_GENERATE_IP_CHKSUM;
913 	if (m_csumflags & M_CSUM_TCPv4)
914 		cmdsts |=
915 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_TCP;
916 	if (m_csumflags & M_CSUM_UDPv4)
917 		cmdsts |=
918 		    MVNETA_TX_GENERATE_L4_CHKSUM | MVNETA_TX_L4_TYPE_UDP;
919 	if (m_csumflags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
920 		const int iphdr_unitlen = sizeof(struct ip) / sizeof(uint32_t);
921 
922 		cmdsts |= MVNETA_TX_IP_NO_FRAG |
923 		    MVNETA_TX_IP_HEADER_LEN(iphdr_unitlen);	/* unit is 4B */
924 	}
925 #endif
926 
927 	for (i = 0; i < map->dm_nsegs; i++) {
928 		txd = &sc->sc_txdesc[prod];
929 		txd->bytecnt = map->dm_segs[i].ds_len;
930 		txd->l4ichk = 0;
931 		txd->cmdsts = cmdsts;
932 		txd->nextdescptr = 0;
933 		txd->bufptr = map->dm_segs[i].ds_addr;
934 		txd->_padding[0] = 0;
935 		txd->_padding[1] = 0;
936 		txd->_padding[2] = 0;
937 		txd->_padding[3] = 0;
938 
939 		prod = MVNETA_TX_RING_NEXT(prod);
940 		cmdsts = 0;
941 	}
942 	txd->cmdsts |= MVNETA_TX_LAST_DESC;
943 }
944 
945 static inline void
mvneta_sync_txring(struct mvneta_softc * sc,int ops)946 mvneta_sync_txring(struct mvneta_softc *sc, int ops)
947 {
948 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
949 	    MVNETA_DMA_LEN(sc->sc_txring), ops);
950 }
951 
952 void
mvneta_start(struct ifqueue * ifq)953 mvneta_start(struct ifqueue *ifq)
954 {
955 	struct ifnet *ifp = ifq->ifq_if;
956 	struct mvneta_softc *sc = ifp->if_softc;
957 	unsigned int prod, nprod, free, used = 0, nused;
958 	struct mbuf *m;
959 	bus_dmamap_t map;
960 
961 	/* If Link is DOWN, can't start TX */
962 	if (!MVNETA_IS_LINKUP(sc)) {
963 		ifq_purge(ifq);
964 		return;
965 	}
966 
967 	mvneta_sync_txring(sc, BUS_DMASYNC_POSTWRITE);
968 
969 	prod = sc->sc_tx_prod;
970 	free = MVNETA_TX_RING_CNT - (prod - sc->sc_tx_cons);
971 
972 	for (;;) {
973 		if (free < MVNETA_NTXSEG - 1) {
974 			ifq_set_oactive(ifq);
975 			break;
976 		}
977 
978 		m = ifq_dequeue(ifq);
979 		if (m == NULL)
980 			break;
981 
982 		map = sc->sc_txbuf[prod].tb_map;
983 		if (mvneta_load_mbuf(sc, map, m) != 0) {
984 			m_freem(m);
985 			ifp->if_oerrors++; /* XXX atomic */
986 			continue;
987 		}
988 
989 #if NBPFILTER > 0
990 		if (ifp->if_bpf)
991 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
992 #endif
993 
994 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
995 		    BUS_DMASYNC_PREWRITE);
996 
997 		mvneta_encap(sc, map, m, prod);
998 
999 		if (map->dm_nsegs > 1) {
1000 			nprod = (prod + (map->dm_nsegs - 1)) %
1001 			    MVNETA_TX_RING_CNT;
1002 			sc->sc_txbuf[prod].tb_map = sc->sc_txbuf[nprod].tb_map;
1003 			prod = nprod;
1004 			sc->sc_txbuf[prod].tb_map = map;
1005 		}
1006 		sc->sc_txbuf[prod].tb_m = m;
1007 		prod = MVNETA_TX_RING_NEXT(prod);
1008 
1009 		free -= map->dm_nsegs;
1010 
1011 		nused = used + map->dm_nsegs;
1012 		if (nused > MVNETA_PTXSU_MAX) {
1013 			mvneta_sync_txring(sc,
1014 			    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTWRITE);
1015 			MVNETA_WRITE(sc, MVNETA_PTXSU(0),
1016 			    MVNETA_PTXSU_NOWD(used));
1017 			used = map->dm_nsegs;
1018 		} else
1019 			used = nused;
1020 	}
1021 
1022 	mvneta_sync_txring(sc, BUS_DMASYNC_PREWRITE);
1023 
1024 	sc->sc_tx_prod = prod;
1025 	if (used)
1026 		MVNETA_WRITE(sc, MVNETA_PTXSU(0), MVNETA_PTXSU_NOWD(used));
1027 }
1028 
1029 int
mvneta_ioctl(struct ifnet * ifp,u_long cmd,caddr_t addr)1030 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1031 {
1032 	struct mvneta_softc *sc = ifp->if_softc;
1033 	struct ifreq *ifr = (struct ifreq *)addr;
1034 	int s, error = 0;
1035 
1036 	s = splnet();
1037 
1038 	switch (cmd) {
1039 	case SIOCSIFADDR:
1040 		ifp->if_flags |= IFF_UP;
1041 		/* FALLTHROUGH */
1042 	case SIOCSIFFLAGS:
1043 		if (ifp->if_flags & IFF_UP) {
1044 			if (ifp->if_flags & IFF_RUNNING)
1045 				error = ENETRESET;
1046 			else
1047 				mvneta_up(sc);
1048 		} else {
1049 			if (ifp->if_flags & IFF_RUNNING)
1050 				mvneta_down(sc);
1051 		}
1052 		break;
1053 	case SIOCGIFMEDIA:
1054 	case SIOCSIFMEDIA:
1055 		DPRINTFN(2, ("mvneta_ioctl MEDIA\n"));
1056 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1057 		break;
1058 	case SIOCGIFRXR:
1059 		error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data,
1060 		    NULL, MCLBYTES, &sc->sc_rx_ring);
1061 		break;
1062 	case SIOCGIFSFFPAGE:
1063 		error = rw_enter(&mvneta_sff_lock, RW_WRITE|RW_INTR);
1064 		if (error != 0)
1065 			break;
1066 
1067 		error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
1068 		rw_exit(&mvneta_sff_lock);
1069 		break;
1070 	default:
1071 		DPRINTFN(2, ("mvneta_ioctl ETHER\n"));
1072 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1073 		break;
1074 	}
1075 
1076 	if (error == ENETRESET) {
1077 		if (ifp->if_flags & IFF_RUNNING)
1078 			mvneta_iff(sc);
1079 		error = 0;
1080 	}
1081 
1082 	splx(s);
1083 
1084 	return error;
1085 }
1086 
1087 void
mvneta_port_change(struct mvneta_softc * sc)1088 mvneta_port_change(struct mvneta_softc *sc)
1089 {
1090 	if (!!(sc->sc_mii.mii_media_status & IFM_ACTIVE) != sc->sc_link) {
1091 		sc->sc_link = !sc->sc_link;
1092 
1093 		if (sc->sc_link) {
1094 			if (!sc->sc_inband_status) {
1095 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
1096 				panc &= ~MVNETA_PANC_FORCELINKFAIL;
1097 				panc |= MVNETA_PANC_FORCELINKPASS;
1098 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
1099 			}
1100 			mvneta_port_up(sc);
1101 		} else {
1102 			if (!sc->sc_inband_status) {
1103 				uint32_t panc = MVNETA_READ(sc, MVNETA_PANC);
1104 				panc &= ~MVNETA_PANC_FORCELINKPASS;
1105 				panc |= MVNETA_PANC_FORCELINKFAIL;
1106 				MVNETA_WRITE(sc, MVNETA_PANC, panc);
1107 			}
1108 		}
1109 	}
1110 }
1111 
1112 void
mvneta_port_up(struct mvneta_softc * sc)1113 mvneta_port_up(struct mvneta_softc *sc)
1114 {
1115 	/* Enable port RX/TX. */
1116 	MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_ENQ(0));
1117 	MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(0));
1118 }
1119 
1120 int
mvneta_up(struct mvneta_softc * sc)1121 mvneta_up(struct mvneta_softc *sc)
1122 {
1123 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1124 	struct mvneta_buf *txb, *rxb;
1125 	int i;
1126 
1127 	DPRINTFN(2, ("mvneta_up\n"));
1128 
1129 	/* Allocate Tx descriptor ring. */
1130 	sc->sc_txring = mvneta_dmamem_alloc(sc,
1131 	    MVNETA_TX_RING_CNT * sizeof(struct mvneta_tx_desc), 32);
1132 	sc->sc_txdesc = MVNETA_DMA_KVA(sc->sc_txring);
1133 
1134 	sc->sc_txbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_TX_RING_CNT,
1135 	    M_DEVBUF, M_WAITOK);
1136 
1137 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1138 		txb = &sc->sc_txbuf[i];
1139 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVNETA_NTXSEG,
1140 		    MCLBYTES, 0, BUS_DMA_WAITOK, &txb->tb_map);
1141 		txb->tb_m = NULL;
1142 	}
1143 
1144 	sc->sc_tx_prod = sc->sc_tx_cons = 0;
1145 
1146 	/* Allocate Rx descriptor ring. */
1147 	sc->sc_rxring = mvneta_dmamem_alloc(sc,
1148 	    MVNETA_RX_RING_CNT * sizeof(struct mvneta_rx_desc), 32);
1149 	sc->sc_rxdesc = MVNETA_DMA_KVA(sc->sc_rxring);
1150 
1151 	sc->sc_rxbuf = malloc(sizeof(struct mvneta_buf) * MVNETA_RX_RING_CNT,
1152 	    M_DEVBUF, M_WAITOK);
1153 
1154 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1155 		rxb = &sc->sc_rxbuf[i];
1156 		bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1157 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->tb_map);
1158 		rxb->tb_m = NULL;
1159 	}
1160 
1161 	/* Set Rx descriptor ring data. */
1162 	MVNETA_WRITE(sc, MVNETA_PRXDQA(0), MVNETA_DMA_DVA(sc->sc_rxring));
1163 	MVNETA_WRITE(sc, MVNETA_PRXDQS(0), MVNETA_RX_RING_CNT |
1164 	    ((MCLBYTES >> 3) << 19));
1165 
1166 	if (sc->sc_clk_freq != 0) {
1167 		/*
1168 		 * Use the Non Occupied Descriptors Threshold to
1169 		 * interrupt when the descriptors granted by rxr are
1170 		 * used up, otherwise wait until the RX Interrupt
1171 		 * Time Threshold is reached.
1172 		 */
1173 		MVNETA_WRITE(sc, MVNETA_PRXDQTH(0),
1174 		    MVNETA_PRXDQTH_ODT(MVNETA_RX_RING_CNT) |
1175 		    MVNETA_PRXDQTH_NODT(2));
1176 		MVNETA_WRITE(sc, MVNETA_PRXITTH(0), sc->sc_clk_freq / 4000);
1177 	} else {
1178 		/* Time based moderation is hard without a clock */
1179 		MVNETA_WRITE(sc, MVNETA_PRXDQTH(0), 0);
1180 		MVNETA_WRITE(sc, MVNETA_PRXITTH(0), 0);
1181 	}
1182 
1183 	MVNETA_WRITE(sc, MVNETA_PRXC(0), 0);
1184 
1185 	/* Set Tx queue bandwidth. */
1186 	MVNETA_WRITE(sc, MVNETA_TQTBCOUNT(0), 0x03ffffff);
1187 	MVNETA_WRITE(sc, MVNETA_TQTBCONFIG(0), 0x03ffffff);
1188 
1189 	/* Set Tx descriptor ring data. */
1190 	MVNETA_WRITE(sc, MVNETA_PTXDQA(0), MVNETA_DMA_DVA(sc->sc_txring));
1191 	MVNETA_WRITE(sc, MVNETA_PTXDQS(0),
1192 	    MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT) |
1193 	    MVNETA_PTXDQS_TBT(MIN(MVNETA_TX_RING_CNT / 2, ifp->if_txmit)));
1194 
1195 	sc->sc_rx_prod = sc->sc_rx_cons = 0;
1196 
1197 	if_rxr_init(&sc->sc_rx_ring, 2, MVNETA_RX_RING_CNT);
1198 	mvneta_fill_rx_ring(sc);
1199 
1200 	/* TODO: correct frame size */
1201 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1202 	    (MVNETA_READ(sc, MVNETA_PMACC0) & MVNETA_PMACC0_PORTTYPE) |
1203 	    MVNETA_PMACC0_FRAMESIZELIMIT(MCLBYTES - MVNETA_HWHEADER_SIZE));
1204 
1205 	/* set max MTU */
1206 	MVNETA_WRITE(sc, MVNETA_TXMTU, MVNETA_TXMTU_MAX);
1207 	MVNETA_WRITE(sc, MVNETA_TXTKSIZE, 0xffffffff);
1208 	MVNETA_WRITE(sc, MVNETA_TXQTKSIZE(0), 0x7fffffff);
1209 
1210 	/* enable port */
1211 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1212 	    MVNETA_READ(sc, MVNETA_PMACC0) | MVNETA_PMACC0_PORTEN);
1213 
1214 	mvneta_enaddr_write(sc);
1215 
1216 	/* Program promiscuous mode and multicast filters. */
1217 	mvneta_iff(sc);
1218 
1219 	if (!sc->sc_fixed_link)
1220 		mii_mediachg(&sc->sc_mii);
1221 
1222 	if (sc->sc_link)
1223 		mvneta_port_up(sc);
1224 
1225 	/* Enable interrupt masks */
1226 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_RBICTAPQ(0) |
1227 	    MVNETA_PRXTXTI_TBTCQ(0) | MVNETA_PRXTXTI_RDTAQ(0) |
1228 	    MVNETA_PRXTXTI_PMISCICSUMMARY);
1229 	MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
1230 	    MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHNG);
1231 
1232 	timeout_add_sec(&sc->sc_tick_ch, 1);
1233 
1234 	ifp->if_flags |= IFF_RUNNING;
1235 	ifq_clr_oactive(&ifp->if_snd);
1236 
1237 	return 0;
1238 }
1239 
1240 void
mvneta_down(struct mvneta_softc * sc)1241 mvneta_down(struct mvneta_softc *sc)
1242 {
1243 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1244 	uint32_t reg, txinprog, txfifoemp;
1245 	struct mvneta_buf *txb, *rxb;
1246 	int i, cnt;
1247 
1248 	DPRINTFN(2, ("mvneta_down\n"));
1249 
1250 	timeout_del(&sc->sc_tick_ch);
1251 	ifp->if_flags &= ~IFF_RUNNING;
1252 	intr_barrier(sc->sc_ih);
1253 
1254 	/* Stop Rx port activity. Check port Rx activity. */
1255 	reg = MVNETA_READ(sc, MVNETA_RQC);
1256 	if (reg & MVNETA_RQC_ENQ_MASK)
1257 		/* Issue stop command for active channels only */
1258 		MVNETA_WRITE(sc, MVNETA_RQC, MVNETA_RQC_DISQ_DISABLE(reg));
1259 
1260 	/* Stop Tx port activity. Check port Tx activity. */
1261 	if (MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_ENQ(0))
1262 		MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_DISQ(0));
1263 
1264 	txinprog = MVNETA_PS_TXINPROG_(0);
1265 	txfifoemp = MVNETA_PS_TXFIFOEMP_(0);
1266 
1267 #define RX_DISABLE_TIMEOUT		0x1000000
1268 #define TX_FIFO_EMPTY_TIMEOUT		0x1000000
1269 	/* Wait for all Rx activity to terminate. */
1270 	cnt = 0;
1271 	do {
1272 		if (cnt >= RX_DISABLE_TIMEOUT) {
1273 			printf("%s: timeout for RX stopped. rqc 0x%x\n",
1274 			    sc->sc_dev.dv_xname, reg);
1275 			break;
1276 		}
1277 		cnt++;
1278 
1279 		/*
1280 		 * Check Receive Queue Command register that all Rx queues
1281 		 * are stopped
1282 		 */
1283 		reg = MVNETA_READ(sc, MVNETA_RQC);
1284 	} while (reg & 0xff);
1285 
1286 	/* Double check to verify that TX FIFO is empty */
1287 	cnt = 0;
1288 	while (1) {
1289 		do {
1290 			if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1291 				printf("%s: timeout for TX FIFO empty. status "
1292 				    "0x%x\n", sc->sc_dev.dv_xname, reg);
1293 				break;
1294 			}
1295 			cnt++;
1296 
1297 			reg = MVNETA_READ(sc, MVNETA_PS);
1298 		} while (!(reg & txfifoemp) || reg & txinprog);
1299 
1300 		if (cnt >= TX_FIFO_EMPTY_TIMEOUT)
1301 			break;
1302 
1303 		/* Double check */
1304 		reg = MVNETA_READ(sc, MVNETA_PS);
1305 		if (reg & txfifoemp && !(reg & txinprog))
1306 			break;
1307 		else
1308 			printf("%s: TX FIFO empty double check failed."
1309 			    " %d loops, status 0x%x\n", sc->sc_dev.dv_xname,
1310 			    cnt, reg);
1311 	}
1312 
1313 	delay(200);
1314 
1315 	/* disable port */
1316 	MVNETA_WRITE(sc, MVNETA_PMACC0,
1317 	    MVNETA_READ(sc, MVNETA_PMACC0) & ~MVNETA_PMACC0_PORTEN);
1318 	delay(200);
1319 
1320 	/* mask all interrupts */
1321 	MVNETA_WRITE(sc, MVNETA_PRXTXTIM, MVNETA_PRXTXTI_PMISCICSUMMARY);
1322 	MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1323 
1324 	/* clear all cause registers */
1325 	MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1326 	MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1327 
1328 	/* Free RX and TX mbufs still in the queues. */
1329 	for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1330 		txb = &sc->sc_txbuf[i];
1331 		if (txb->tb_m) {
1332 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1333 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1334 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1335 			m_freem(txb->tb_m);
1336 		}
1337 		bus_dmamap_destroy(sc->sc_dmat, txb->tb_map);
1338 	}
1339 
1340 	mvneta_dmamem_free(sc, sc->sc_txring);
1341 	free(sc->sc_txbuf, M_DEVBUF, 0);
1342 
1343 	for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1344 		rxb = &sc->sc_rxbuf[i];
1345 		if (rxb->tb_m) {
1346 			bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1347 			    rxb->tb_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1348 			bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1349 			m_freem(rxb->tb_m);
1350 		}
1351 		bus_dmamap_destroy(sc->sc_dmat, rxb->tb_map);
1352 	}
1353 
1354 	mvneta_dmamem_free(sc, sc->sc_rxring);
1355 	free(sc->sc_rxbuf, M_DEVBUF, 0);
1356 
1357 	/* reset RX and TX DMAs */
1358 	MVNETA_WRITE(sc, MVNETA_PRXINIT, MVNETA_PRXINIT_RXDMAINIT);
1359 	MVNETA_WRITE(sc, MVNETA_PTXINIT, MVNETA_PTXINIT_TXDMAINIT);
1360 	MVNETA_WRITE(sc, MVNETA_PRXINIT, 0);
1361 	MVNETA_WRITE(sc, MVNETA_PTXINIT, 0);
1362 
1363 	ifq_clr_oactive(&ifp->if_snd);
1364 }
1365 
1366 void
mvneta_watchdog(struct ifnet * ifp)1367 mvneta_watchdog(struct ifnet *ifp)
1368 {
1369 	struct mvneta_softc *sc = ifp->if_softc;
1370 
1371 	/*
1372 	 * Reclaim first as there is a possibility of losing Tx completion
1373 	 * interrupts.
1374 	 */
1375 	mvneta_tx_proc(sc);
1376 	if (sc->sc_tx_prod != sc->sc_tx_cons) {
1377 		printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1378 
1379 		ifp->if_oerrors++;
1380 	}
1381 }
1382 
1383 /*
1384  * Set media options.
1385  */
1386 int
mvneta_mediachange(struct ifnet * ifp)1387 mvneta_mediachange(struct ifnet *ifp)
1388 {
1389 	struct mvneta_softc *sc = ifp->if_softc;
1390 
1391 	if (LIST_FIRST(&sc->sc_mii.mii_phys))
1392 		mii_mediachg(&sc->sc_mii);
1393 
1394 	return (0);
1395 }
1396 
1397 /*
1398  * Report current media status.
1399  */
1400 void
mvneta_mediastatus(struct ifnet * ifp,struct ifmediareq * ifmr)1401 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1402 {
1403 	struct mvneta_softc *sc = ifp->if_softc;
1404 
1405 	if (LIST_FIRST(&sc->sc_mii.mii_phys)) {
1406 		mii_pollstat(&sc->sc_mii);
1407 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1408 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1409 	}
1410 
1411 	if (sc->sc_fixed_link) {
1412 		ifmr->ifm_active = sc->sc_mii.mii_media_active;
1413 		ifmr->ifm_status = sc->sc_mii.mii_media_status;
1414 	}
1415 }
1416 
1417 void
mvneta_rx_proc(struct mvneta_softc * sc)1418 mvneta_rx_proc(struct mvneta_softc *sc)
1419 {
1420 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1421 	struct mvneta_rx_desc *rxd;
1422 	struct mvneta_buf *rxb;
1423 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1424 	struct mbuf *m;
1425 	uint32_t rxstat;
1426 	unsigned int i, done, cons;
1427 
1428 	done = MVNETA_PRXS_ODC(MVNETA_READ(sc, MVNETA_PRXS(0)));
1429 	if (done == 0)
1430 		return;
1431 
1432 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1433 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_POSTREAD);
1434 
1435 	cons = sc->sc_rx_cons;
1436 
1437 	for (i = 0; i < done; i++) {
1438 		rxd = &sc->sc_rxdesc[cons];
1439 		rxb = &sc->sc_rxbuf[cons];
1440 
1441 		m = rxb->tb_m;
1442 		rxb->tb_m = NULL;
1443 
1444 		bus_dmamap_sync(sc->sc_dmat, rxb->tb_map, 0,
1445 		    m->m_pkthdr.len, BUS_DMASYNC_POSTREAD);
1446 		bus_dmamap_unload(sc->sc_dmat, rxb->tb_map);
1447 
1448 		rxstat = rxd->cmdsts;
1449 		if (rxstat & MVNETA_ERROR_SUMMARY) {
1450 #if 0
1451 			int err = rxstat & MVNETA_RX_ERROR_CODE_MASK;
1452 
1453 			if (err == MVNETA_RX_CRC_ERROR)
1454 				ifp->if_ierrors++;
1455 			if (err == MVNETA_RX_OVERRUN_ERROR)
1456 				ifp->if_ierrors++;
1457 			if (err == MVNETA_RX_MAX_FRAME_LEN_ERROR)
1458 				ifp->if_ierrors++;
1459 			if (err == MVNETA_RX_RESOURCE_ERROR)
1460 				ifp->if_ierrors++;
1461 #else
1462 			ifp->if_ierrors++;
1463 #endif
1464 			m_freem(m);
1465 		} else {
1466 			m->m_pkthdr.len = m->m_len = rxd->bytecnt;
1467 			m_adj(m, MVNETA_HWHEADER_SIZE);
1468 
1469 			ml_enqueue(&ml, m);
1470 		}
1471 
1472 #if notyet
1473 		if (rxstat & MVNETA_RX_IP_FRAME_TYPE) {
1474 			int flgs = 0;
1475 
1476 			/* Check IPv4 header checksum */
1477 			flgs |= M_CSUM_IPv4;
1478 			if (!(rxstat & MVNETA_RX_IP_HEADER_OK))
1479 				flgs |= M_CSUM_IPv4_BAD;
1480 			else if ((bufsize & MVNETA_RX_IP_FRAGMENT) == 0) {
1481 				/*
1482 				 * Check TCPv4/UDPv4 checksum for
1483 				 * non-fragmented packet only.
1484 				 *
1485 				 * It seemd that sometimes
1486 				 * MVNETA_RX_L4_CHECKSUM_OK bit was set to 0
1487 				 * even if the checksum is correct and the
1488 				 * packet was not fragmented. So we don't set
1489 				 * M_CSUM_TCP_UDP_BAD even if csum bit is 0.
1490 				 */
1491 
1492 				if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1493 					MVNETA_RX_L4_TYPE_TCP) &&
1494 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1495 					flgs |= M_CSUM_TCPv4;
1496 				else if (((rxstat & MVNETA_RX_L4_TYPE_MASK) ==
1497 					MVNETA_RX_L4_TYPE_UDP) &&
1498 				    ((rxstat & MVNETA_RX_L4_CHECKSUM_OK) != 0))
1499 					flgs |= M_CSUM_UDPv4;
1500 			}
1501 			m->m_pkthdr.csum_flags = flgs;
1502 		}
1503 #endif
1504 
1505 		if_rxr_put(&sc->sc_rx_ring, 1);
1506 
1507 		cons = MVNETA_RX_RING_NEXT(cons);
1508 
1509 		if (i == MVNETA_PRXSU_MAX) {
1510 			MVNETA_WRITE(sc, MVNETA_PRXSU(0),
1511 			    MVNETA_PRXSU_NOPD(MVNETA_PRXSU_MAX));
1512 
1513 			/* tweaking the iterator inside the loop is fun */
1514 			done -= MVNETA_PRXSU_MAX;
1515 			i = 0;
1516 		}
1517 	}
1518 
1519 	sc->sc_rx_cons = cons;
1520 
1521 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1522 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_PREREAD);
1523 
1524 	if (i > 0) {
1525 		MVNETA_WRITE(sc, MVNETA_PRXSU(0),
1526 		    MVNETA_PRXSU_NOPD(i));
1527 	}
1528 
1529 	if (ifiq_input(&ifp->if_rcv, &ml))
1530 		if_rxr_livelocked(&sc->sc_rx_ring);
1531 
1532 	mvneta_fill_rx_ring(sc);
1533 }
1534 
1535 void
mvneta_tx_proc(struct mvneta_softc * sc)1536 mvneta_tx_proc(struct mvneta_softc *sc)
1537 {
1538 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1539 	struct ifqueue *ifq = &ifp->if_snd;
1540 	struct mvneta_tx_desc *txd;
1541 	struct mvneta_buf *txb;
1542 	unsigned int i, cons, done;
1543 
1544 	if (!(ifp->if_flags & IFF_RUNNING))
1545 		return;
1546 
1547 	done = MVNETA_PTXS_TBC(MVNETA_READ(sc, MVNETA_PTXS(0)));
1548 	if (done == 0)
1549 		return;
1550 
1551 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
1552 	    MVNETA_DMA_LEN(sc->sc_txring),
1553 	    BUS_DMASYNC_POSTREAD);
1554 
1555 	cons = sc->sc_tx_cons;
1556 
1557 	for (i = 0; i < done; i++) {
1558 		txd = &sc->sc_txdesc[cons];
1559 		txb = &sc->sc_txbuf[cons];
1560 
1561 		if (txb->tb_m) {
1562 			bus_dmamap_sync(sc->sc_dmat, txb->tb_map, 0,
1563 			    txb->tb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1564 			bus_dmamap_unload(sc->sc_dmat, txb->tb_map);
1565 
1566 			m_freem(txb->tb_m);
1567 			txb->tb_m = NULL;
1568 		}
1569 
1570 		if (txd->cmdsts & MVNETA_ERROR_SUMMARY) {
1571 			int err = txd->cmdsts & MVNETA_TX_ERROR_CODE_MASK;
1572 
1573 			if (err == MVNETA_TX_LATE_COLLISION_ERROR)
1574 				ifp->if_collisions++;
1575 			if (err == MVNETA_TX_UNDERRUN_ERROR)
1576 				ifp->if_oerrors++;
1577 			if (err == MVNETA_TX_EXCESSIVE_COLLISION_ERRO)
1578 				ifp->if_collisions++;
1579 		}
1580 
1581 		cons = MVNETA_TX_RING_NEXT(cons);
1582 
1583 		if (i == MVNETA_PTXSU_MAX) {
1584 			MVNETA_WRITE(sc, MVNETA_PTXSU(0),
1585 			    MVNETA_PTXSU_NORB(MVNETA_PTXSU_MAX));
1586 
1587 			/* tweaking the iterator inside the loop is fun */
1588 			done -= MVNETA_PTXSU_MAX;
1589 			i = 0;
1590 		}
1591 	}
1592 
1593 	sc->sc_tx_cons = cons;
1594 
1595 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_txring), 0,
1596 	    MVNETA_DMA_LEN(sc->sc_txring),
1597 	    BUS_DMASYNC_PREREAD);
1598 
1599 	if (i > 0) {
1600 		MVNETA_WRITE(sc, MVNETA_PTXSU(0),
1601 		    MVNETA_PTXSU_NORB(i));
1602 	}
1603 	if (ifq_is_oactive(ifq))
1604 		ifq_restart(ifq);
1605 }
1606 
1607 uint8_t
mvneta_crc8(const uint8_t * data,size_t size)1608 mvneta_crc8(const uint8_t *data, size_t size)
1609 {
1610 	int bit;
1611 	uint8_t byte;
1612 	uint8_t crc = 0;
1613 	const uint8_t poly = 0x07;
1614 
1615 	while(size--)
1616 	  for (byte = *data++, bit = NBBY-1; bit >= 0; bit--)
1617 	    crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0);
1618 
1619 	return crc;
1620 }
1621 
1622 CTASSERT(MVNETA_NDFSMT == MVNETA_NDFOMT);
1623 
1624 void
mvneta_iff(struct mvneta_softc * sc)1625 mvneta_iff(struct mvneta_softc *sc)
1626 {
1627 	struct arpcom *ac = &sc->sc_ac;
1628 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1629 	struct ether_multi *enm;
1630 	struct ether_multistep step;
1631 	uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
1632 	uint32_t pxc;
1633 	int i;
1634 	const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00};
1635 
1636 	pxc = MVNETA_READ(sc, MVNETA_PXC);
1637 	pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP | MVNETA_PXC_UPM);
1638 	ifp->if_flags &= ~IFF_ALLMULTI;
1639 	memset(dfut, 0, sizeof(dfut));
1640 	memset(dfsmt, 0, sizeof(dfsmt));
1641 	memset(dfomt, 0, sizeof(dfomt));
1642 
1643 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1644 		ifp->if_flags |= IFF_ALLMULTI;
1645 		if (ifp->if_flags & IFF_PROMISC)
1646 			pxc |= MVNETA_PXC_UPM;
1647 		for (i = 0; i < MVNETA_NDFSMT; i++) {
1648 			dfsmt[i] = dfomt[i] =
1649 			    MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1650 			    MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1651 			    MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
1652 			    MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1653 		}
1654 	} else {
1655 		ETHER_FIRST_MULTI(step, ac, enm);
1656 		while (enm != NULL) {
1657 			/* chip handles some IPv4 multicast specially */
1658 			if (memcmp(enm->enm_addrlo, special, 5) == 0) {
1659 				i = enm->enm_addrlo[5];
1660 				dfsmt[i>>2] |=
1661 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1662 			} else {
1663 				i = mvneta_crc8(enm->enm_addrlo, ETHER_ADDR_LEN);
1664 				dfomt[i>>2] |=
1665 				    MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1666 			}
1667 
1668 			ETHER_NEXT_MULTI(step, enm);
1669 		}
1670 	}
1671 
1672 	MVNETA_WRITE(sc, MVNETA_PXC, pxc);
1673 
1674 	/* Set Destination Address Filter Unicast Table */
1675 	i = sc->sc_enaddr[5] & 0xf;		/* last nibble */
1676 	dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
1677 	MVNETA_WRITE_FILTER(sc, MVNETA_DFUT, dfut, MVNETA_NDFUT);
1678 
1679 	/* Set Destination Address Filter Multicast Tables */
1680 	MVNETA_WRITE_FILTER(sc, MVNETA_DFSMT, dfsmt, MVNETA_NDFSMT);
1681 	MVNETA_WRITE_FILTER(sc, MVNETA_DFOMT, dfomt, MVNETA_NDFOMT);
1682 }
1683 
1684 struct mvneta_dmamem *
mvneta_dmamem_alloc(struct mvneta_softc * sc,bus_size_t size,bus_size_t align)1685 mvneta_dmamem_alloc(struct mvneta_softc *sc, bus_size_t size, bus_size_t align)
1686 {
1687 	struct mvneta_dmamem *mdm;
1688 	int nsegs;
1689 
1690 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
1691 	mdm->mdm_size = size;
1692 
1693 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1694 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
1695 		goto mdmfree;
1696 
1697 	if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
1698 	    &nsegs, BUS_DMA_WAITOK) != 0)
1699 		goto destroy;
1700 
1701 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1702 	    &mdm->mdm_kva, BUS_DMA_WAITOK|BUS_DMA_COHERENT) != 0)
1703 		goto free;
1704 
1705 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1706 	    NULL, BUS_DMA_WAITOK) != 0)
1707 		goto unmap;
1708 
1709 	bzero(mdm->mdm_kva, size);
1710 
1711 	return (mdm);
1712 
1713 unmap:
1714 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1715 free:
1716 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1717 destroy:
1718 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1719 mdmfree:
1720 	free(mdm, M_DEVBUF, 0);
1721 
1722 	return (NULL);
1723 }
1724 
1725 void
mvneta_dmamem_free(struct mvneta_softc * sc,struct mvneta_dmamem * mdm)1726 mvneta_dmamem_free(struct mvneta_softc *sc, struct mvneta_dmamem *mdm)
1727 {
1728 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1729 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1730 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1731 	free(mdm, M_DEVBUF, 0);
1732 }
1733 
1734 static inline struct mbuf *
mvneta_alloc_mbuf(struct mvneta_softc * sc,bus_dmamap_t map)1735 mvneta_alloc_mbuf(struct mvneta_softc *sc, bus_dmamap_t map)
1736 {
1737 	struct mbuf *m = NULL;
1738 
1739 	m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1740 	if (m == NULL)
1741 		return (NULL);
1742 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1743 
1744 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
1745 		printf("%s: could not load mbuf DMA map", sc->sc_dev.dv_xname);
1746 		m_freem(m);
1747 		return (NULL);
1748 	}
1749 
1750 	bus_dmamap_sync(sc->sc_dmat, map, 0,
1751 	    m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
1752 
1753 	return (m);
1754 }
1755 
1756 void
mvneta_fill_rx_ring(struct mvneta_softc * sc)1757 mvneta_fill_rx_ring(struct mvneta_softc *sc)
1758 {
1759 	struct mvneta_rx_desc *rxd;
1760 	struct mvneta_buf *rxb;
1761 	unsigned int slots, used = 0;
1762 	unsigned int prod;
1763 
1764 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1765 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_POSTWRITE);
1766 
1767 	prod = sc->sc_rx_prod;
1768 
1769 	for (slots = if_rxr_get(&sc->sc_rx_ring, MVNETA_PRXSU_MAX);
1770 	    slots > 0; slots--) {
1771 		rxb = &sc->sc_rxbuf[prod];
1772 		rxb->tb_m = mvneta_alloc_mbuf(sc, rxb->tb_map);
1773 		if (rxb->tb_m == NULL)
1774 			break;
1775 
1776 		rxd = &sc->sc_rxdesc[prod];
1777 		rxd->cmdsts = 0;
1778 		rxd->bufsize = 0;
1779 		rxd->bytecnt = 0;
1780 		rxd->bufptr = rxb->tb_map->dm_segs[0].ds_addr;
1781 		rxd->nextdescptr = 0;
1782 		rxd->_padding[0] = 0;
1783 		rxd->_padding[1] = 0;
1784 		rxd->_padding[2] = 0;
1785 		rxd->_padding[3] = 0;
1786 
1787 		prod = MVNETA_RX_RING_NEXT(prod);
1788 		used++;
1789 	}
1790 	if_rxr_put(&sc->sc_rx_ring, slots);
1791 
1792 	sc->sc_rx_prod = prod;
1793 
1794 	bus_dmamap_sync(sc->sc_dmat, MVNETA_DMA_MAP(sc->sc_rxring),
1795 	    0, MVNETA_DMA_LEN(sc->sc_rxring), BUS_DMASYNC_PREWRITE);
1796 
1797 	if (used > 0)
1798 		MVNETA_WRITE(sc, MVNETA_PRXSU(0), MVNETA_PRXSU_NOND(used));
1799 }
1800 
1801 #if NKSTAT > 0
1802 
1803 /* this is used to sort and look up the array of kstats quickly */
1804 enum mvneta_stat {
1805 	mvneta_stat_good_octets_received,
1806 	mvneta_stat_bad_octets_received,
1807 	mvneta_stat_good_frames_received,
1808 	mvneta_stat_mac_trans_error,
1809 	mvneta_stat_bad_frames_received,
1810 	mvneta_stat_broadcast_frames_received,
1811 	mvneta_stat_multicast_frames_received,
1812 	mvneta_stat_frames_64_octets,
1813 	mvneta_stat_frames_65_to_127_octets,
1814 	mvneta_stat_frames_128_to_255_octets,
1815 	mvneta_stat_frames_256_to_511_octets,
1816 	mvneta_stat_frames_512_to_1023_octets,
1817 	mvneta_stat_frames_1024_to_max_octets,
1818 	mvneta_stat_good_octets_sent,
1819 	mvneta_stat_good_frames_sent,
1820 	mvneta_stat_excessive_collision,
1821 	mvneta_stat_multicast_frames_sent,
1822 	mvneta_stat_broadcast_frames_sent,
1823 	mvneta_stat_unrecog_mac_control_received,
1824 	mvneta_stat_good_fc_received,
1825 	mvneta_stat_bad_fc_received,
1826 	mvneta_stat_undersize,
1827 	mvneta_stat_fc_sent,
1828 	mvneta_stat_fragments,
1829 	mvneta_stat_oversize,
1830 	mvneta_stat_jabber,
1831 	mvneta_stat_mac_rcv_error,
1832 	mvneta_stat_bad_crc,
1833 	mvneta_stat_collisions,
1834 	mvneta_stat_late_collisions,
1835 
1836 	mvneta_stat_port_discard,
1837 	mvneta_stat_port_overrun,
1838 
1839 	mvnet_stat_count
1840 };
1841 
1842 struct mvneta_counter {
1843 	const char		 *name;
1844 	enum kstat_kv_unit	 unit;
1845 	bus_size_t		 reg;
1846 };
1847 
1848 static const struct mvneta_counter mvneta_counters[] = {
1849 	[mvneta_stat_good_octets_received] =
1850 	    { "rx good",	KSTAT_KV_U_BYTES,	0x0 /* 64bit */ },
1851 	[mvneta_stat_bad_octets_received] =
1852 	    { "rx bad",		KSTAT_KV_U_BYTES,	0x3008 },
1853 	[mvneta_stat_good_frames_received] =
1854 	    { "rx good",	KSTAT_KV_U_PACKETS,	0x3010 },
1855 	[mvneta_stat_mac_trans_error] =
1856 	    { "tx mac error",	KSTAT_KV_U_PACKETS,	0x300c },
1857 	[mvneta_stat_bad_frames_received] =
1858 	    { "rx bad",		KSTAT_KV_U_PACKETS,	0x3014 },
1859 	[mvneta_stat_broadcast_frames_received] =
1860 	    { "rx bcast",	KSTAT_KV_U_PACKETS,	0x3018 },
1861 	[mvneta_stat_multicast_frames_received] =
1862 	    { "rx mcast",	KSTAT_KV_U_PACKETS,	0x301c },
1863 	[mvneta_stat_frames_64_octets] =
1864 	    { "64B",		KSTAT_KV_U_PACKETS,	0x3020 },
1865 	[mvneta_stat_frames_65_to_127_octets] =
1866 	    { "65-127B",	KSTAT_KV_U_PACKETS,	0x3024 },
1867 	[mvneta_stat_frames_128_to_255_octets] =
1868 	    { "128-255B",	KSTAT_KV_U_PACKETS,	0x3028 },
1869 	[mvneta_stat_frames_256_to_511_octets] =
1870 	    { "256-511B",	KSTAT_KV_U_PACKETS,	0x302c },
1871 	[mvneta_stat_frames_512_to_1023_octets] =
1872 	    { "512-1023B",	KSTAT_KV_U_PACKETS,	0x3030 },
1873 	[mvneta_stat_frames_1024_to_max_octets] =
1874 	    { "1024-maxB",	KSTAT_KV_U_PACKETS,	0x3034 },
1875 	[mvneta_stat_good_octets_sent] =
1876 	    { "tx good",	KSTAT_KV_U_BYTES,	0x0 /* 64bit */ },
1877 	[mvneta_stat_good_frames_sent] =
1878 	    { "tx good",	KSTAT_KV_U_PACKETS,	0x3040 },
1879 	[mvneta_stat_excessive_collision] =
1880 	    { "tx excess coll",	KSTAT_KV_U_PACKETS,	0x3044 },
1881 	[mvneta_stat_multicast_frames_sent] =
1882 	    { "tx mcast",	KSTAT_KV_U_PACKETS,	0x3048 },
1883 	[mvneta_stat_broadcast_frames_sent] =
1884 	    { "tx bcast",	KSTAT_KV_U_PACKETS,	0x304c },
1885 	[mvneta_stat_unrecog_mac_control_received] =
1886 	    { "rx unknown fc",	KSTAT_KV_U_PACKETS,	0x3050 },
1887 	[mvneta_stat_good_fc_received] =
1888 	    { "rx fc good",	KSTAT_KV_U_PACKETS,	0x3058 },
1889 	[mvneta_stat_bad_fc_received] =
1890 	    { "rx fc bad",	KSTAT_KV_U_PACKETS,	0x305c },
1891 	[mvneta_stat_undersize] =
1892 	    { "rx undersize",	KSTAT_KV_U_PACKETS,	0x3060 },
1893 	[mvneta_stat_fc_sent] =
1894 	    { "tx fc",		KSTAT_KV_U_PACKETS,	0x3054 },
1895 	[mvneta_stat_fragments] =
1896 	    { "rx fragments",	KSTAT_KV_U_NONE,	0x3064 },
1897 	[mvneta_stat_oversize] =
1898 	    { "rx oversize",	KSTAT_KV_U_PACKETS,	0x3068 },
1899 	[mvneta_stat_jabber] =
1900 	    { "rx jabber",	KSTAT_KV_U_PACKETS,	0x306c },
1901 	[mvneta_stat_mac_rcv_error] =
1902 	    { "rx mac errors",	KSTAT_KV_U_PACKETS,	0x3070 },
1903 	[mvneta_stat_bad_crc] =
1904 	    { "rx bad crc",	KSTAT_KV_U_PACKETS,	0x3074 },
1905 	[mvneta_stat_collisions] =
1906 	    { "rx colls",	KSTAT_KV_U_PACKETS,	0x3078 },
1907 	[mvneta_stat_late_collisions] =
1908 	    { "rx late colls",	KSTAT_KV_U_PACKETS,	0x307c },
1909 
1910 	[mvneta_stat_port_discard] =
1911 	    { "rx discard",	KSTAT_KV_U_PACKETS,	MVNETA_PXDFC },
1912 	[mvneta_stat_port_overrun] =
1913 	    { "rx overrun",	KSTAT_KV_U_PACKETS,	MVNETA_POFC },
1914 };
1915 
1916 CTASSERT(nitems(mvneta_counters) == mvnet_stat_count);
1917 
1918 int
mvneta_kstat_read(struct kstat * ks)1919 mvneta_kstat_read(struct kstat *ks)
1920 {
1921 	struct mvneta_softc *sc = ks->ks_softc;
1922 	struct kstat_kv *kvs = ks->ks_data;
1923 	unsigned int i;
1924 	uint32_t hi, lo;
1925 
1926 	for (i = 0; i < nitems(mvneta_counters); i++) {
1927 		const struct mvneta_counter *c = &mvneta_counters[i];
1928 		if (c->reg == 0)
1929 			continue;
1930 
1931 		kstat_kv_u64(&kvs[i]) += (uint64_t)MVNETA_READ(sc, c->reg);
1932 	}
1933 
1934 	/* handle the exceptions */
1935 
1936 	lo = MVNETA_READ(sc, 0x3000);
1937 	hi = MVNETA_READ(sc, 0x3004);
1938 	kstat_kv_u64(&kvs[mvneta_stat_good_octets_received]) +=
1939 	    (uint64_t)hi << 32 | (uint64_t)lo;
1940 
1941 	lo = MVNETA_READ(sc, 0x3038);
1942 	hi = MVNETA_READ(sc, 0x303c);
1943 	kstat_kv_u64(&kvs[mvneta_stat_good_octets_sent]) +=
1944 	    (uint64_t)hi << 32 | (uint64_t)lo;
1945 
1946 	nanouptime(&ks->ks_updated);
1947 
1948 	return (0);
1949 }
1950 
1951 void
mvneta_kstat_tick(void * arg)1952 mvneta_kstat_tick(void *arg)
1953 {
1954 	struct mvneta_softc *sc = arg;
1955 
1956 	timeout_add_sec(&sc->sc_kstat_tick, 37);
1957 
1958 	if (mtx_enter_try(&sc->sc_kstat_lock)) {
1959 		mvneta_kstat_read(sc->sc_kstat);
1960 		mtx_leave(&sc->sc_kstat_lock);
1961 	}
1962 }
1963 
1964 void
mvneta_kstat_attach(struct mvneta_softc * sc)1965 mvneta_kstat_attach(struct mvneta_softc *sc)
1966 {
1967 	struct kstat *ks;
1968 	struct kstat_kv *kvs;
1969 	unsigned int i;
1970 
1971 	mtx_init(&sc->sc_kstat_lock, IPL_SOFTCLOCK);
1972 	timeout_set(&sc->sc_kstat_tick, mvneta_kstat_tick, sc);
1973 
1974 	ks = kstat_create(sc->sc_dev.dv_xname, 0, "mvneta-stats", 0,
1975 	    KSTAT_T_KV, 0);
1976 	if (ks == NULL)
1977 		return;
1978 
1979 	kvs = mallocarray(nitems(mvneta_counters), sizeof(*kvs),
1980 	    M_DEVBUF, M_WAITOK|M_ZERO);
1981 	for (i = 0; i < nitems(mvneta_counters); i++) {
1982 		const struct mvneta_counter *c = &mvneta_counters[i];
1983 		kstat_kv_unit_init(&kvs[i], c->name,
1984 		    KSTAT_KV_T_COUNTER64, c->unit);
1985 	}
1986 
1987 	ks->ks_softc = sc;
1988 	ks->ks_data = kvs;
1989 	ks->ks_datalen = nitems(mvneta_counters) * sizeof(*kvs);
1990 	ks->ks_read = mvneta_kstat_read;
1991 	kstat_set_mutex(ks, &sc->sc_kstat_lock);
1992 
1993 	kstat_install(ks);
1994 
1995 	sc->sc_kstat = ks;
1996 
1997 	timeout_add_sec(&sc->sc_kstat_tick, 37);
1998 }
1999 
2000 #endif
2001