xref: /openbsd/sys/arch/octeon/dev/if_ogx.c (revision a8b58197)
1 /*	$OpenBSD: if_ogx.c,v 1.7 2024/05/20 23:13:33 jsg Exp $	*/
2 
3 /*
4  * Copyright (c) 2019-2020 Visa Hankala
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for OCTEON III network processor.
21  */
22 
23 #include "bpfilter.h"
24 #include "kstat.h"
25 
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/atomic.h>
29 #include <sys/mutex.h>
30 #include <sys/rwlock.h>
31 #include <sys/device.h>
32 #include <sys/ioctl.h>
33 #include <sys/kstat.h>
34 #include <sys/socket.h>
35 #include <sys/stdint.h>
36 
37 #include <net/if.h>
38 #include <net/if_media.h>
39 #include <netinet/in.h>
40 #include <netinet/ip.h>
41 #include <netinet/if_ether.h>
42 
43 #if NBPFILTER > 0
44 #include <net/bpf.h>
45 #endif
46 
47 #ifdef INET6
48 #include <netinet/ip6.h>
49 #endif
50 
51 #include <dev/mii/mii.h>
52 #include <dev/mii/miivar.h>
53 
54 #include <dev/ofw/fdt.h>
55 #include <dev/ofw/openfirm.h>
56 
57 #include <machine/bus.h>
58 #include <machine/fdt.h>
59 #include <machine/octeonvar.h>
60 #include <machine/octeon_model.h>
61 
62 #include <octeon/dev/cn30xxsmivar.h>
63 #include <octeon/dev/ogxreg.h>
64 #include <octeon/dev/ogxvar.h>
65 
66 struct ogx_link_ops;
67 
68 struct ogx_softc {
69 	struct device		 sc_dev;
70 	struct arpcom		 sc_ac;
71 	unsigned int		 sc_bgxid;
72 	unsigned int		 sc_lmacid;
73 	unsigned int		 sc_ipdport;
74 	unsigned int		 sc_pkomac;
75 	unsigned int		 sc_rxused;
76 	unsigned int		 sc_txfree;
77 
78 	struct ogx_node		*sc_node;
79 	unsigned int		 sc_unit;	/* logical unit within node */
80 
81 	struct mii_data		 sc_mii;
82 #define sc_media	sc_mii.mii_media
83 	struct timeout		 sc_tick;
84 	struct cn30xxsmi_softc	*sc_smi;
85 
86 	struct timeout		 sc_rxrefill;
87 	void			*sc_rx_ih;
88 	void			*sc_tx_ih;
89 
90 	bus_space_tag_t		 sc_iot;
91 	bus_space_handle_t	 sc_port_ioh;
92 	bus_space_handle_t	 sc_nexus_ioh;
93 
94 	struct fpa3aura		 sc_pkt_aura;
95 	const struct ogx_link_ops *sc_link_ops;
96 	uint8_t			 sc_link_duplex;
97 
98 	struct mutex		 sc_kstat_mtx;
99 	struct timeout		 sc_kstat_tmo;
100 	struct kstat		*sc_kstat;
101 	uint64_t		*sc_counter_vals;
102 	bus_space_handle_t	 sc_pki_stat_ioh;
103 };
104 
105 #define DEVNAME(sc)		((sc)->sc_dev.dv_xname)
106 
107 #define L1_QUEUE(sc)		((sc)->sc_unit)
108 #define L2_QUEUE(sc)		((sc)->sc_unit)
109 #define L3_QUEUE(sc)		((sc)->sc_unit)
110 #define L4_QUEUE(sc)		((sc)->sc_unit)
111 #define L5_QUEUE(sc)		((sc)->sc_unit)
112 #define DESC_QUEUE(sc)		((sc)->sc_unit)
113 
114 #define PORT_FIFO(sc)		((sc)->sc_unit)		/* PKO FIFO */
115 #define PORT_GROUP_RX(sc)	((sc)->sc_unit * 2)	/* SSO group for Rx */
116 #define PORT_GROUP_TX(sc)	((sc)->sc_unit * 2 + 1)	/* SSO group for Tx */
117 #define PORT_MAC(sc)		((sc)->sc_pkomac)
118 #define PORT_PKIND(sc)		((sc)->sc_unit)
119 #define PORT_QPG(sc)		((sc)->sc_unit)
120 #define PORT_STYLE(sc)		((sc)->sc_unit)
121 
122 struct ogx_link_ops {
123 	const char	*link_type;
124 	unsigned int	 link_fifo_speed;	/* in Mbps */
125 	/* Initialize link. */
126 	int		(*link_init)(struct ogx_softc *);
127 	/* Deinitialize link. */
128 	void		(*link_down)(struct ogx_softc *);
129 	/* Change link parameters. */
130 	void		(*link_change)(struct ogx_softc *);
131 	/* Query link status. Returns non-zero if status has changed. */
132 	int		(*link_status)(struct ogx_softc *);
133 };
134 
135 struct ogx_fifo_group {
136 	unsigned int		fg_inited;
137 	unsigned int		fg_speed;
138 };
139 
140 struct ogx_config {
141 	unsigned int		cfg_nclusters;	/* number of parsing clusters */
142 	unsigned int		cfg_nfifogrps;	/* number of FIFO groups */
143 	unsigned int		cfg_nmacs;	/* number of MACs */
144 	unsigned int		cfg_npqs;	/* number of port queues */
145 	unsigned int		cfg_npkolvl;	/* number of PKO Lx levels */
146 	unsigned int		cfg_nullmac;	/* index of NULL MAC */
147 };
148 
149 struct ogx_node {
150 	bus_dma_tag_t		 node_dmat;
151 	bus_space_tag_t		 node_iot;
152 	bus_space_handle_t	 node_fpa3;
153 	bus_space_handle_t	 node_pki;
154 	bus_space_handle_t	 node_pko3;
155 	bus_space_handle_t	 node_sso;
156 
157 	struct fpa3pool		 node_pko_pool;
158 	struct fpa3pool		 node_pkt_pool;
159 	struct fpa3pool		 node_sso_pool;
160 	struct fpa3aura		 node_pko_aura;
161 	struct fpa3aura		 node_sso_aura;
162 
163 	uint64_t		 node_id;
164 	unsigned int		 node_nclusters;
165 	unsigned int		 node_nunits;
166 	struct ogx_fifo_group	 node_fifogrp[8];
167 	const struct ogx_config	*node_cfg;
168 
169 	struct rwlock		 node_lock;
170 	unsigned int		 node_flags;
171 #define NODE_INITED			0x01	/* node initialized */
172 #define NODE_FWREADY			0x02	/* node firmware ready */
173 };
174 
175 struct ogx_fwhdr {
176 	char		fw_version[8];
177 	uint64_t	fw_size;
178 };
179 
180 #define BGX_PORT_SIZE	0x100000
181 
182 #define PORT_RD_8(sc, reg) \
183 	bus_space_read_8((sc)->sc_iot, (sc)->sc_port_ioh, (reg))
184 #define PORT_WR_8(sc, reg, val) \
185 	bus_space_write_8((sc)->sc_iot, (sc)->sc_port_ioh, (reg), (val))
186 
187 #define NEXUS_RD_8(sc, reg) \
188 	bus_space_read_8((sc)->sc_iot, (sc)->sc_nexus_ioh, (reg))
189 #define NEXUS_WR_8(sc, reg, val) \
190 	bus_space_write_8((sc)->sc_iot, (sc)->sc_nexus_ioh, (reg), (val))
191 
192 #define FPA3_RD_8(node, reg) \
193 	bus_space_read_8((node)->node_iot, (node)->node_fpa3, (reg))
194 #define FPA3_WR_8(node, reg, val) \
195 	bus_space_write_8((node)->node_iot, (node)->node_fpa3, (reg), (val))
196 #define PKI_RD_8(node, reg) \
197 	bus_space_read_8((node)->node_iot, (node)->node_pki, (reg))
198 #define PKI_WR_8(node, reg, val) \
199 	bus_space_write_8((node)->node_iot, (node)->node_pki, (reg), (val))
200 #define PKO3_RD_8(node, reg) \
201 	bus_space_read_8((node)->node_iot, (node)->node_pko3, (reg))
202 #define PKO3_WR_8(node, reg, val) \
203 	bus_space_write_8((node)->node_iot, (node)->node_pko3, (reg), (val))
204 #define SSO_RD_8(node, reg) \
205 	bus_space_read_8((node)->node_iot, (node)->node_sso, (reg))
206 #define SSO_WR_8(node, reg, val) \
207 	bus_space_write_8((node)->node_iot, (node)->node_sso, (reg), (val))
208 
209 int	ogx_match(struct device *, void *, void *);
210 void	ogx_attach(struct device *, struct device *, void *);
211 void	ogx_defer(struct device *);
212 
213 int	ogx_ioctl(struct ifnet *, u_long, caddr_t);
214 void	ogx_start(struct ifqueue *);
215 int	ogx_send_mbuf(struct ogx_softc *, struct mbuf *);
216 u_int	ogx_load_mbufs(struct ogx_softc *, unsigned int);
217 u_int	ogx_unload_mbufs(struct ogx_softc *);
218 
219 void	ogx_media_status(struct ifnet *, struct ifmediareq *);
220 int	ogx_media_change(struct ifnet *);
221 int	ogx_mii_readreg(struct device *, int, int);
222 void	ogx_mii_writereg(struct device *, int, int, int);
223 void	ogx_mii_statchg(struct device *);
224 
225 int	ogx_init(struct ogx_softc *);
226 void	ogx_down(struct ogx_softc *);
227 void	ogx_iff(struct ogx_softc *);
228 void	ogx_rxrefill(void *);
229 int	ogx_rxintr(void *);
230 int	ogx_txintr(void *);
231 void	ogx_tick(void *);
232 
233 #if NKSTAT > 0
234 #define OGX_KSTAT_TICK_SECS	600
235 void	ogx_kstat_attach(struct ogx_softc *);
236 int	ogx_kstat_read(struct kstat *);
237 void	ogx_kstat_start(struct ogx_softc *);
238 void	ogx_kstat_stop(struct ogx_softc *);
239 void	ogx_kstat_tick(void *);
240 #endif
241 
242 int	ogx_node_init(struct ogx_node **, bus_dma_tag_t, bus_space_tag_t);
243 int	ogx_node_load_firmware(struct ogx_node *);
244 void	ogx_fpa3_aura_init(struct ogx_node *, struct fpa3aura *, uint32_t,
245 	    struct fpa3pool *);
246 void	ogx_fpa3_aura_load(struct ogx_node *, struct fpa3aura *, size_t,
247 	    size_t);
248 paddr_t	ogx_fpa3_alloc(struct fpa3aura *);
249 void	ogx_fpa3_free(struct fpa3aura *, paddr_t);
250 void	ogx_fpa3_pool_init(struct ogx_node *, struct fpa3pool *, uint32_t,
251 	    uint32_t);
252 
253 int	ogx_sgmii_link_init(struct ogx_softc *);
254 void	ogx_sgmii_link_down(struct ogx_softc *);
255 void	ogx_sgmii_link_change(struct ogx_softc *);
256 
257 static inline paddr_t
ogx_kvtophys(vaddr_t kva)258 ogx_kvtophys(vaddr_t kva)
259 {
260 	KASSERT(IS_XKPHYS(kva));
261 	return XKPHYS_TO_PHYS(kva);
262 }
263 #define KVTOPHYS(addr)	ogx_kvtophys((vaddr_t)(addr))
264 
265 const struct cfattach ogx_ca = {
266 	sizeof(struct ogx_softc), ogx_match, ogx_attach
267 };
268 
269 struct cfdriver ogx_cd = {
270 	NULL, "ogx", DV_IFNET
271 };
272 
273 const struct ogx_config ogx_cn73xx_config = {
274 	.cfg_nclusters		= 2,
275 	.cfg_nfifogrps		= 4,
276 	.cfg_nmacs		= 14,
277 	.cfg_npqs		= 16,
278 	.cfg_npkolvl		= 3,
279 	.cfg_nullmac		= 15,
280 };
281 
282 const struct ogx_config ogx_cn78xx_config = {
283 	.cfg_nclusters		= 4,
284 	.cfg_nfifogrps		= 8,
285 	.cfg_nmacs		= 28,
286 	.cfg_npqs		= 32,
287 	.cfg_npkolvl		= 5,
288 	.cfg_nullmac		= 28,
289 };
290 
291 const struct ogx_link_ops ogx_sgmii_link_ops = {
292 	.link_type		= "SGMII",
293 	.link_fifo_speed	= 1000,
294 	.link_init		= ogx_sgmii_link_init,
295 	.link_down		= ogx_sgmii_link_down,
296 	.link_change		= ogx_sgmii_link_change,
297 };
298 
299 const struct ogx_link_ops ogx_xfi_link_ops = {
300 	.link_type		= "XFI",
301 	.link_fifo_speed	= 10000,
302 };
303 
304 #define BELTYPE_NONE	0x00
305 #define BELTYPE_MISC	0x01
306 #define BELTYPE_IPv4	0x02
307 #define BELTYPE_IPv6	0x03
308 #define BELTYPE_TCP	0x04
309 #define BELTYPE_UDP	0x05
310 
311 static const unsigned int ogx_ltypes[] = {
312 	BELTYPE_NONE,	/* 0x00 */
313 	BELTYPE_MISC,	/* 0x01 Ethernet */
314 	BELTYPE_MISC,	/* 0x02 VLAN */
315 	BELTYPE_NONE,	/* 0x03 */
316 	BELTYPE_NONE,	/* 0x04 */
317 	BELTYPE_MISC,	/* 0x05 SNAP */
318 	BELTYPE_MISC,	/* 0x06 ARP */
319 	BELTYPE_MISC,	/* 0x07 RARP */
320 	BELTYPE_IPv4,	/* 0x08 IPv4 */
321 	BELTYPE_IPv4,	/* 0x09 IPv4 options */
322 	BELTYPE_IPv6,	/* 0x0a IPv6 */
323 	BELTYPE_IPv6,	/* 0x0b IPv6 options */
324 	BELTYPE_MISC,	/* 0x0c ESP */
325 	BELTYPE_MISC,	/* 0x0d IP fragment */
326 	BELTYPE_MISC,	/* 0x0e IPcomp */
327 	BELTYPE_NONE,	/* 0x0f */
328 	BELTYPE_TCP,	/* 0x10 TCP */
329 	BELTYPE_UDP,	/* 0x11 UDP */
330 	BELTYPE_MISC,	/* 0x12 SCTP */
331 	BELTYPE_UDP,	/* 0x13 UDP VXLAN */
332 	BELTYPE_MISC,	/* 0x14 GRE */
333 	BELTYPE_MISC,	/* 0x15 NVGRE */
334 	BELTYPE_MISC,	/* 0x16 GTP */
335 	BELTYPE_UDP,	/* 0x17 UDP Geneve */
336 	BELTYPE_NONE,	/* 0x18 */
337 	BELTYPE_NONE,	/* 0x19 */
338 	BELTYPE_NONE,	/* 0x1a */
339 	BELTYPE_NONE,	/* 0x1b */
340 	BELTYPE_MISC,	/* 0x1c software */
341 	BELTYPE_MISC,	/* 0x1d software */
342 	BELTYPE_MISC,	/* 0x1e software */
343 	BELTYPE_MISC	/* 0x1f software */
344 };
345 
346 #define OGX_POOL_SSO		0
347 #define OGX_POOL_PKO		1
348 #define OGX_POOL_PKT		2
349 
350 #define OGX_AURA_SSO		0
351 #define OGX_AURA_PKO		1
352 #define OGX_AURA_PKT(sc)	((sc)->sc_unit + 2)
353 
354 struct ogx_node	ogx_node;
355 
356 int
ogx_match(struct device * parent,void * match,void * aux)357 ogx_match(struct device *parent, void *match, void *aux)
358 {
359 	return 1;
360 }
361 
362 void
ogx_attach(struct device * parent,struct device * self,void * aux)363 ogx_attach(struct device *parent, struct device *self, void *aux)
364 {
365 	const struct ogx_config *cfg;
366 	struct ogx_fifo_group *fifogrp;
367 	struct ogx_node *node;
368 	struct ogx_attach_args *oaa = aux;
369 	struct ogx_softc *sc = (struct ogx_softc *)self;
370 	struct ifnet *ifp = &sc->sc_ac.ac_if;
371 	uint64_t lmac_type, lut_index, val;
372 	uint32_t lmac;
373 	int fgindex = PORT_FIFO(sc) >> 2;
374 	int cl, phy_addr, phy_handle;
375 
376 	if (ogx_node_init(&node, oaa->oaa_dmat, oaa->oaa_iot)) {
377 		printf(": node init failed\n");
378 		return;
379 	}
380 	cfg = node->node_cfg;
381 
382 	sc->sc_node = node;
383 	sc->sc_unit = node->node_nunits++;
384 
385 	phy_handle = OF_getpropint(oaa->oaa_node, "phy-handle", 0);
386 	if (phy_handle == 0) {
387 		printf(": no phy-handle\n");
388 		return;
389 	}
390 	if (cn30xxsmi_get_phy(phy_handle, 0, &sc->sc_smi, &phy_addr)) {
391 		printf(": no phy found\n");
392 		return;
393 	}
394 
395 	lmac = OF_getpropint(oaa->oaa_node, "reg", UINT32_MAX);
396 	if (lmac == UINT32_MAX) {
397 		printf(": no reg property\n");
398 		return;
399 	}
400 
401 	sc->sc_bgxid = oaa->oaa_bgxid;
402 	sc->sc_lmacid = lmac;
403 	sc->sc_ipdport = sc->sc_bgxid * 0x100 + lmac * 0x10 + 0x800;
404 	sc->sc_pkomac = sc->sc_bgxid * 4 + lmac + 2;
405 
406 	if (OF_getproplen(oaa->oaa_node, "local-mac-address") !=
407 	    ETHER_ADDR_LEN) {
408 		printf(": no MAC address\n");
409 		return;
410 	}
411 	OF_getprop(oaa->oaa_node, "local-mac-address", sc->sc_ac.ac_enaddr,
412 	    ETHER_ADDR_LEN);
413 
414 	sc->sc_iot = oaa->oaa_iot;
415 	sc->sc_nexus_ioh = oaa->oaa_ioh;
416 	if (bus_space_subregion(sc->sc_iot, oaa->oaa_ioh,
417 	    sc->sc_lmacid * BGX_PORT_SIZE, BGX_PORT_SIZE, &sc->sc_port_ioh)) {
418 		printf(": can't map IO subregion\n");
419 		return;
420 	}
421 
422 	val = PORT_RD_8(sc, BGX_CMR_RX_ID_MAP);
423 	val &= ~BGX_CMR_RX_ID_MAP_RID_M;
424 	val &= ~BGX_CMR_RX_ID_MAP_PKND_M;
425 	val |= (uint64_t)(sc->sc_bgxid * 4 + 2 + sc->sc_lmacid) <<
426 	    BGX_CMR_RX_ID_MAP_RID_S;
427 	val |= (uint64_t)PORT_PKIND(sc) << BGX_CMR_RX_ID_MAP_PKND_S;
428 	PORT_WR_8(sc, BGX_CMR_RX_ID_MAP, val);
429 
430 	val = PORT_RD_8(sc, BGX_CMR_CHAN_MSK_AND);
431 	val |= 0xffffULL << (sc->sc_lmacid * 16);
432 	PORT_WR_8(sc, BGX_CMR_CHAN_MSK_AND, val);
433 
434 	val = PORT_RD_8(sc, BGX_CMR_CHAN_MSK_OR);
435 	val |= 0xffffULL << (sc->sc_lmacid * 16);
436 	PORT_WR_8(sc, BGX_CMR_CHAN_MSK_OR, val);
437 
438 	sc->sc_rx_ih = octeon_intr_establish(0x61000 | PORT_GROUP_RX(sc),
439 	    IPL_NET | IPL_MPSAFE, ogx_rxintr, sc, DEVNAME(sc));
440 	if (sc->sc_rx_ih == NULL) {
441 		printf(": could not establish Rx interrupt\n");
442 		return;
443 	}
444 	sc->sc_tx_ih = octeon_intr_establish(0x61000 | PORT_GROUP_TX(sc),
445 	    IPL_NET | IPL_MPSAFE, ogx_txintr, sc, DEVNAME(sc));
446 	if (sc->sc_tx_ih == NULL) {
447 		printf(": could not establish Tx interrupt\n");
448 		return;
449 	}
450 
451 	val = PORT_RD_8(sc, BGX_CMR_CONFIG);
452 	lmac_type = (val & BGX_CMR_CONFIG_LMAC_TYPE_M) >>
453 	    BGX_CMR_CONFIG_LMAC_TYPE_S;
454 	switch (lmac_type) {
455 	case 0:
456 		sc->sc_link_ops = &ogx_sgmii_link_ops;
457 		break;
458 	default:
459 		printf(": unhandled LMAC type %llu\n", lmac_type);
460 		return;
461 	}
462 	printf(": %s", sc->sc_link_ops->link_type);
463 
464 	printf(", address %s", ether_sprintf(sc->sc_ac.ac_enaddr));
465 
466 	ogx_fpa3_aura_init(node, &sc->sc_pkt_aura, OGX_AURA_PKT(sc),
467 	    &node->node_pkt_pool);
468 
469 	sc->sc_rxused = 128;
470 	sc->sc_txfree = 128;
471 
472 	timeout_set(&sc->sc_rxrefill, ogx_rxrefill, sc);
473 	timeout_set(&sc->sc_tick, ogx_tick, sc);
474 
475 	printf("\n");
476 
477 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
478 	ifp->if_softc = sc;
479 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
480 	ifp->if_xflags |= IFXF_MPSAFE;
481 	ifp->if_ioctl = ogx_ioctl;
482 	ifp->if_qstart = ogx_start;
483 	ifp->if_capabilities = IFCAP_CSUM_IPv4 |
484 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
485 	    IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
486 
487 	sc->sc_mii.mii_ifp = ifp;
488 	sc->sc_mii.mii_readreg = ogx_mii_readreg;
489 	sc->sc_mii.mii_writereg = ogx_mii_writereg;
490 	sc->sc_mii.mii_statchg = ogx_mii_statchg;
491 	ifmedia_init(&sc->sc_media, 0, ogx_media_change, ogx_media_status);
492 
493 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, phy_addr,
494 	    MII_OFFSET_ANY, MIIF_NOISOLATE);
495 	if (LIST_EMPTY(&sc->sc_mii.mii_phys)) {
496 		printf("%s: no PHY found\n", DEVNAME(sc));
497 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
498 		ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL);
499 	} else {
500 		ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
501 
502 		timeout_add_sec(&sc->sc_tick, 1);
503 	}
504 
505 	/*
506 	 * Set up the PKI for this port.
507 	 */
508 
509 	val = (uint64_t)PORT_GROUP_RX(sc) << PKI_QPG_TBL_GRP_OK_S;
510 	val |= (uint64_t)PORT_GROUP_RX(sc) << PKI_QPG_TBL_GRP_BAD_S;
511 	val |= OGX_AURA_PKT(sc) << PKI_QPG_TBL_LAURA_S;
512 	PKI_WR_8(node, PKI_QPG_TBL(PORT_QPG(sc)), val);
513 
514 	for (cl = 0; cl < cfg->cfg_nclusters; cl++) {
515 		val = (uint64_t)PORT_QPG(sc) << PKI_CL_STYLE_CFG_QPG_BASE_S;
516 		PKI_WR_8(node, PKI_CL_STYLE_CFG(cl, PORT_STYLE(sc)), val);
517 		PKI_WR_8(node, PKI_CL_STYLE_CFG2(cl, PORT_STYLE(sc)), 0);
518 		PKI_WR_8(node, PKI_CL_STYLE_ALG(cl, PORT_STYLE(sc)), 1u << 31);
519 
520 		val = PKI_RD_8(node, PKI_CL_PKIND_STYLE(cl, PORT_PKIND(sc)));
521 		val &= ~PKI_CL_PKIND_STYLE_PM_M;
522 		val &= ~PKI_CL_PKIND_STYLE_STYLE_M;
523 		val |= PORT_STYLE(sc) << PKI_CL_PKIND_STYLE_STYLE_S;
524 		PKI_WR_8(node, PKI_CL_PKIND_STYLE(cl, PORT_PKIND(sc)), val);
525 	}
526 
527 	val = 5ULL << PKI_STYLE_BUF_FIRST_SKIP_S;
528 	val |= ((MCLBYTES - CACHELINESIZE) / sizeof(uint64_t)) <<
529 	    PKI_STYLE_BUF_MB_SIZE_S;
530 	PKI_WR_8(node, PKI_STYLE_BUF(PORT_STYLE(sc)), val);
531 
532 	/*
533 	 * Set up output queues from the descriptor queue to the port queue.
534 	 *
535 	 * The hardware implements a multilevel hierarchy of queues
536 	 * with configurable priorities.
537 	 * This driver uses a simple topology where there is one queue
538 	 * on each level.
539 	 *
540 	 * CN73xx: DQ ->             L3 -> L2 -> port
541 	 * CN78xx: DQ -> L5 -> L4 -> L3 -> L2 -> port
542 	 */
543 
544 	/* Map channel to queue L2. */
545 	val = PKO3_RD_8(node, PKO3_L3_L2_SQ_CHANNEL(L2_QUEUE(sc)));
546 	val &= ~PKO3_L3_L2_SQ_CHANNEL_CC_ENABLE;
547 	val &= ~PKO3_L3_L2_SQ_CHANNEL_M;
548 	val |= (uint64_t)sc->sc_ipdport << PKO3_L3_L2_SQ_CHANNEL_S;
549 	PKO3_WR_8(node, PKO3_L3_L2_SQ_CHANNEL(L2_QUEUE(sc)), val);
550 
551 	val = PKO3_RD_8(node, PKO3_MAC_CFG(PORT_MAC(sc)));
552 	val &= ~PKO3_MAC_CFG_MIN_PAD_ENA;
553 	val &= ~PKO3_MAC_CFG_FCS_ENA;
554 	val &= ~PKO3_MAC_CFG_FCS_SOP_OFF_M;
555 	val &= ~PKO3_MAC_CFG_FIFO_NUM_M;
556 	val |= PORT_FIFO(sc) << PKO3_MAC_CFG_FIFO_NUM_S;
557 	PKO3_WR_8(node, PKO3_MAC_CFG(PORT_MAC(sc)), val);
558 
559 	val = PKO3_RD_8(node, PKO3_MAC_CFG(PORT_MAC(sc)));
560 	val &= ~PKO3_MAC_CFG_SKID_MAX_CNT_M;
561 	PKO3_WR_8(node, PKO3_MAC_CFG(PORT_MAC(sc)), val);
562 
563 	PKO3_WR_8(node, PKO3_MCI0_MAX_CRED(PORT_MAC(sc)), 0);
564 	PKO3_WR_8(node, PKO3_MCI1_MAX_CRED(PORT_MAC(sc)), 2560 / 16);
565 
566 	/* Map the port queue to the MAC. */
567 
568 	val = (uint64_t)PORT_MAC(sc) << PKO3_L1_SQ_TOPOLOGY_LINK_S;
569 	PKO3_WR_8(node, PKO3_L1_SQ_TOPOLOGY(L1_QUEUE(sc)), val);
570 
571 	val = (uint64_t)PORT_MAC(sc) << PKO3_L1_SQ_SHAPE_LINK_S;
572 	PKO3_WR_8(node, PKO3_L1_SQ_SHAPE(L1_QUEUE(sc)), val);
573 
574 	val = (uint64_t)PORT_MAC(sc) << PKO3_L1_SQ_LINK_LINK_S;
575 	PKO3_WR_8(node, PKO3_L1_SQ_LINK(L1_QUEUE(sc)), val);
576 
577 	/* L1 / port queue */
578 
579 	val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S;
580 	PKO3_WR_8(node, PKO3_L1_SQ_SCHEDULE(L1_QUEUE(sc)), val);
581 
582 	val = PKO3_RD_8(node, PKO3_L1_SQ_TOPOLOGY(L1_QUEUE(sc)));
583 	val &= ~PKO3_L1_SQ_TOPOLOGY_PRIO_ANCHOR_M;
584 	val &= ~PKO3_L1_SQ_TOPOLOGY_RR_PRIO_M;
585 	val |= (uint64_t)L2_QUEUE(sc) << PKO3_L1_SQ_TOPOLOGY_PRIO_ANCHOR_S;
586 	val |= (uint64_t)0xf << PKO3_L1_SQ_TOPOLOGY_RR_PRIO_S;
587 	PKO3_WR_8(node, PKO3_L1_SQ_TOPOLOGY(L1_QUEUE(sc)), val);
588 
589 	/* L2 */
590 
591 	val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S;
592 	PKO3_WR_8(node, PKO3_L2_SQ_SCHEDULE(L2_QUEUE(sc)), val);
593 
594 	val = PKO3_RD_8(node, PKO3_L2_SQ_TOPOLOGY(L2_QUEUE(sc)));
595 	val &= ~PKO3_L2_SQ_TOPOLOGY_PRIO_ANCHOR_M;
596 	val &= ~PKO3_L2_SQ_TOPOLOGY_PARENT_M;
597 	val &= ~PKO3_L2_SQ_TOPOLOGY_RR_PRIO_M;
598 	val |= (uint64_t)L3_QUEUE(sc) << PKO3_L2_SQ_TOPOLOGY_PRIO_ANCHOR_S;
599 	val |= (uint64_t)L1_QUEUE(sc) << PKO3_L2_SQ_TOPOLOGY_PARENT_S;
600 	val |= (uint64_t)0xf << PKO3_L2_SQ_TOPOLOGY_RR_PRIO_S;
601 	PKO3_WR_8(node, PKO3_L2_SQ_TOPOLOGY(L2_QUEUE(sc)), val);
602 
603 	switch (cfg->cfg_npkolvl) {
604 	case 3:
605 		/* L3 */
606 
607 		val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S;
608 		PKO3_WR_8(node, PKO3_L3_SQ_SCHEDULE(L3_QUEUE(sc)), val);
609 
610 		val = PKO3_RD_8(node, PKO3_L3_SQ_TOPOLOGY(L3_QUEUE(sc)));
611 		val &= ~PKO3_L3_SQ_TOPOLOGY_PRIO_ANCHOR_M;
612 		val &= ~PKO3_L3_SQ_TOPOLOGY_PARENT_M;
613 		val &= ~PKO3_L3_SQ_TOPOLOGY_RR_PRIO_M;
614 		val |= (uint64_t)DESC_QUEUE(sc) <<
615 		    PKO3_L3_SQ_TOPOLOGY_PRIO_ANCHOR_S;
616 		val |= (uint64_t)L2_QUEUE(sc) << PKO3_L3_SQ_TOPOLOGY_PARENT_S;
617 		val |= (uint64_t)0xf << PKO3_L3_SQ_TOPOLOGY_RR_PRIO_S;
618 		PKO3_WR_8(node, PKO3_L3_SQ_TOPOLOGY(L3_QUEUE(sc)), val);
619 
620 		/* Descriptor queue */
621 
622 		val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S;
623 		PKO3_WR_8(node, PKO3_DQ_SCHEDULE(DESC_QUEUE(sc)), val);
624 
625 		val = (uint64_t)L3_QUEUE(sc) << PKO3_DQ_TOPOLOGY_PARENT_S;
626 		PKO3_WR_8(node, PKO3_DQ_TOPOLOGY(DESC_QUEUE(sc)), val);
627 
628 		break;
629 
630 	case 5:
631 		/* L3 */
632 
633 		val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S;
634 		PKO3_WR_8(node, PKO3_L3_SQ_SCHEDULE(L3_QUEUE(sc)), val);
635 
636 		val = PKO3_RD_8(node, PKO3_L3_SQ_TOPOLOGY(L3_QUEUE(sc)));
637 		val &= ~PKO3_L3_SQ_TOPOLOGY_PRIO_ANCHOR_M;
638 		val &= ~PKO3_L3_SQ_TOPOLOGY_PARENT_M;
639 		val &= ~PKO3_L3_SQ_TOPOLOGY_RR_PRIO_M;
640 		val |= (uint64_t)L4_QUEUE(sc) <<
641 		    PKO3_L3_SQ_TOPOLOGY_PRIO_ANCHOR_S;
642 		val |= (uint64_t)L2_QUEUE(sc) << PKO3_L3_SQ_TOPOLOGY_PARENT_S;
643 		val |= (uint64_t)0xf << PKO3_L3_SQ_TOPOLOGY_RR_PRIO_S;
644 		PKO3_WR_8(node, PKO3_L3_SQ_TOPOLOGY(L3_QUEUE(sc)), val);
645 
646 		/* L4 */
647 
648 		val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S;
649 		PKO3_WR_8(node, PKO3_L4_SQ_SCHEDULE(L4_QUEUE(sc)), val);
650 
651 		val = PKO3_RD_8(node, PKO3_L4_SQ_TOPOLOGY(L4_QUEUE(sc)));
652 		val &= ~PKO3_L4_SQ_TOPOLOGY_PRIO_ANCHOR_M;
653 		val &= ~PKO3_L4_SQ_TOPOLOGY_PARENT_M;
654 		val &= ~PKO3_L4_SQ_TOPOLOGY_RR_PRIO_M;
655 		val |= (uint64_t)L5_QUEUE(sc) <<
656 		    PKO3_L4_SQ_TOPOLOGY_PRIO_ANCHOR_S;
657 		val |= (uint64_t)L3_QUEUE(sc) << PKO3_L4_SQ_TOPOLOGY_PARENT_S;
658 		val |= (uint64_t)0xf << PKO3_L4_SQ_TOPOLOGY_RR_PRIO_S;
659 		PKO3_WR_8(node, PKO3_L4_SQ_TOPOLOGY(L4_QUEUE(sc)), val);
660 
661 		/* L5 */
662 
663 		val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S;
664 		PKO3_WR_8(node, PKO3_L5_SQ_SCHEDULE(L5_QUEUE(sc)), val);
665 
666 		val = PKO3_RD_8(node, PKO3_L5_SQ_TOPOLOGY(L5_QUEUE(sc)));
667 		val &= ~PKO3_L5_SQ_TOPOLOGY_PRIO_ANCHOR_M;
668 		val &= ~PKO3_L5_SQ_TOPOLOGY_PARENT_M;
669 		val &= ~PKO3_L5_SQ_TOPOLOGY_RR_PRIO_M;
670 		val |= (uint64_t)DESC_QUEUE(sc) <<
671 		    PKO3_L5_SQ_TOPOLOGY_PRIO_ANCHOR_S;
672 		val |= (uint64_t)L4_QUEUE(sc) << PKO3_L5_SQ_TOPOLOGY_PARENT_S;
673 		val |= (uint64_t)0xf << PKO3_L5_SQ_TOPOLOGY_RR_PRIO_S;
674 		PKO3_WR_8(node, PKO3_L5_SQ_TOPOLOGY(L5_QUEUE(sc)), val);
675 
676 		/* Descriptor queue */
677 
678 		val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S;
679 		PKO3_WR_8(node, PKO3_DQ_SCHEDULE(DESC_QUEUE(sc)), val);
680 
681 		val = (uint64_t)L5_QUEUE(sc) << PKO3_DQ_TOPOLOGY_PARENT_S;
682 		PKO3_WR_8(node, PKO3_DQ_TOPOLOGY(DESC_QUEUE(sc)), val);
683 
684 		break;
685 
686 	default:
687 		printf(": unhandled number of PKO levels (%u)\n",
688 		    cfg->cfg_npkolvl);
689 		return;
690 	}
691 
692 	/* Descriptor queue, common part */
693 
694 	PKO3_WR_8(node, PKO3_DQ_WM_CTL(DESC_QUEUE(sc)), PKO3_DQ_WM_CTL_KIND);
695 
696 	val = PKO3_RD_8(node, PKO3_PDM_DQ_MINPAD(DESC_QUEUE(sc)));
697 	val &= ~PKO3_PDM_DQ_MINPAD_MINPAD;
698 	PKO3_WR_8(node, PKO3_PDM_DQ_MINPAD(DESC_QUEUE(sc)), val);
699 
700 	lut_index = sc->sc_bgxid * 0x40 + lmac * 0x10;
701 	val = PKO3_LUT_VALID | (L1_QUEUE(sc) << PKO3_LUT_PQ_IDX_S) |
702 	    (L2_QUEUE(sc) << PKO3_LUT_QUEUE_NUM_S);
703 	PKO3_WR_8(node, PKO3_LUT(lut_index), val);
704 
705 #if NKSTAT > 0
706 	ogx_kstat_attach(sc);
707 #endif
708 
709 	fifogrp = &node->node_fifogrp[fgindex];
710 	fifogrp->fg_speed += sc->sc_link_ops->link_fifo_speed;
711 
712 	/*
713 	 * Defer the rest of the initialization so that FIFO groups
714 	 * can be configured properly.
715 	 */
716 	config_defer(&sc->sc_dev, ogx_defer);
717 }
718 
719 void
ogx_defer(struct device * dev)720 ogx_defer(struct device *dev)
721 {
722 	struct ogx_fifo_group *fifogrp;
723 	struct ogx_softc *sc = (struct ogx_softc *)dev;
724 	struct ogx_node *node = sc->sc_node;
725 	struct ifnet *ifp = &sc->sc_ac.ac_if;
726 	uint64_t grprate, val;
727 	int fgindex = PORT_FIFO(sc) >> 2;
728 
729 	fifogrp = &node->node_fifogrp[fgindex];
730 	if (fifogrp->fg_inited == 0) {
731 		/* Adjust the total rate of the fifo group. */
732 		grprate = 0;
733 		while (fifogrp->fg_speed > (6250 << grprate))
734 			grprate++;
735 		if (grprate > 5)
736 			grprate = 5;
737 
738 		val = PKO3_RD_8(node, PKO3_PTGF_CFG(fgindex));
739 		val &= ~PKO3_PTGF_CFG_RATE_M;
740 		val |= grprate << PKO3_PTGF_CFG_RATE_S;
741 		PKO3_WR_8(node, PKO3_PTGF_CFG(fgindex), val);
742 
743 		fifogrp->fg_inited = 1;
744 	}
745 
746 	if_attach(ifp);
747 	ether_ifattach(ifp);
748 }
749 
750 int
ogx_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)751 ogx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
752 {
753 	struct ogx_softc *sc = ifp->if_softc;
754 	struct ifreq *ifr = (struct ifreq *)data;
755 	int error = 0;
756 	int s;
757 
758 	s = splnet();
759 
760 	switch (cmd) {
761 	case SIOCSIFADDR:
762 		ifp->if_flags |= IFF_UP;
763 		/* FALLTHROUGH */
764 
765 	case SIOCSIFFLAGS:
766 		if (ISSET(ifp->if_flags, IFF_UP)) {
767 			if (ISSET(ifp->if_flags, IFF_RUNNING))
768 				error = ENETRESET;
769 			else
770 				error = ogx_init(sc);
771 		} else {
772 			if (ISSET(ifp->if_flags, IFF_RUNNING))
773 				ogx_down(sc);
774 		}
775 		break;
776 
777 	case SIOCGIFMEDIA:
778 	case SIOCSIFMEDIA:
779 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
780 		break;
781 
782 	default:
783 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
784 		break;
785 	}
786 
787 	if (error == ENETRESET) {
788 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
789 		    (IFF_UP | IFF_RUNNING))
790 			ogx_iff(sc);
791 		error = 0;
792 	}
793 
794 	splx(s);
795 
796 	return error;
797 }
798 
799 int
ogx_init(struct ogx_softc * sc)800 ogx_init(struct ogx_softc *sc)
801 {
802 	struct ogx_node *node = sc->sc_node;
803 	struct ifnet *ifp = &sc->sc_ac.ac_if;
804 	uint64_t op;
805 	int error;
806 
807 	error = ogx_node_load_firmware(node);
808 	if (error != 0)
809 		return error;
810 
811 #if NKSTAT > 0
812 	ogx_kstat_start(sc);
813 #endif
814 
815 	ogx_iff(sc);
816 
817 	SSO_WR_8(sc->sc_node, SSO_GRP_INT_THR(PORT_GROUP_RX(sc)), 1);
818 	SSO_WR_8(sc->sc_node, SSO_GRP_INT_THR(PORT_GROUP_TX(sc)), 1);
819 
820 	sc->sc_link_ops->link_init(sc);
821 	if (!LIST_EMPTY(&sc->sc_mii.mii_phys))
822 		mii_mediachg(&sc->sc_mii);
823 
824 	/* Open the descriptor queue. */
825 	op = PKO3_LD_IO | PKO3_LD_DID;
826 	op |= node->node_id << PKO3_LD_NODE_S;
827 	op |= PKO3_DQOP_OPEN << PKO3_LD_OP_S;
828 	op |= DESC_QUEUE(sc) << PKO3_LD_DQ_S;
829 	(void)octeon_xkphys_read_8(op);
830 
831 	ifp->if_flags |= IFF_RUNNING;
832 	ifq_restart(&ifp->if_snd);
833 
834 	timeout_add(&sc->sc_rxrefill, 1);
835 	timeout_add_sec(&sc->sc_tick, 1);
836 
837 	return 0;
838 }
839 
840 void
ogx_down(struct ogx_softc * sc)841 ogx_down(struct ogx_softc *sc)
842 {
843 	struct ifnet *ifp = &sc->sc_ac.ac_if;
844 	struct ogx_node *node = sc->sc_node;
845 	uint64_t op, val;
846 	unsigned int nused;
847 
848 	CLR(ifp->if_flags, IFF_RUNNING);
849 
850 	/* Drain the descriptor queue. */
851 	val = PKO3_LX_SQ_SW_XOFF_DRAIN;
852 	val |= PKO3_LX_SQ_SW_XOFF_DRAIN_NULL_LINK;
853 	PKO3_WR_8(node, PKO3_DQ_SW_XOFF(DESC_QUEUE(sc)), val);
854 	(void)PKO3_RD_8(node, PKO3_DQ_SW_XOFF(DESC_QUEUE(sc)));
855 
856 	delay(1000);
857 
858 	/* Finish the drain operation. */
859 	PKO3_WR_8(node, PKO3_DQ_SW_XOFF(DESC_QUEUE(sc)), 0);
860 	(void)PKO3_RD_8(node, PKO3_DQ_SW_XOFF(DESC_QUEUE(sc)));
861 
862 	/* Close the descriptor queue. */
863 	op = PKO3_LD_IO | PKO3_LD_DID;
864 	op |= node->node_id << PKO3_LD_NODE_S;
865 	op |= PKO3_DQOP_CLOSE << PKO3_LD_OP_S;
866 	op |= DESC_QUEUE(sc) << PKO3_LD_DQ_S;
867 	(void)octeon_xkphys_read_8(op);
868 
869 	/* Disable data transfer. */
870 	val = PORT_RD_8(sc, BGX_CMR_CONFIG);
871 	val &= ~BGX_CMR_CONFIG_DATA_PKT_RX_EN;
872 	val &= ~BGX_CMR_CONFIG_DATA_PKT_TX_EN;
873 	PORT_WR_8(sc, BGX_CMR_CONFIG, val);
874 	(void)PORT_RD_8(sc, BGX_CMR_CONFIG);
875 
876 	if (!LIST_EMPTY(&sc->sc_mii.mii_phys))
877 		mii_down(&sc->sc_mii);
878 	sc->sc_link_ops->link_down(sc);
879 
880 	ifq_clr_oactive(&ifp->if_snd);
881 	ifq_barrier(&ifp->if_snd);
882 
883 	timeout_del_barrier(&sc->sc_rxrefill);
884 	timeout_del_barrier(&sc->sc_tick);
885 
886 #if NKSTAT > 0
887 	ogx_kstat_stop(sc);
888 #endif
889 
890 	nused = ogx_unload_mbufs(sc);
891 	atomic_add_int(&sc->sc_rxused, nused);
892 }
893 
894 void
ogx_iff(struct ogx_softc * sc)895 ogx_iff(struct ogx_softc *sc)
896 {
897 	struct arpcom *ac = &sc->sc_ac;
898 	struct ifnet *ifp = &sc->sc_ac.ac_if;
899 	struct ether_multi *enm;
900 	struct ether_multistep step;
901 	uint64_t rx_adr_ctl;
902 	uint64_t val;
903 	int cidx, clast, i;
904 
905 	rx_adr_ctl = PORT_RD_8(sc, BGX_CMR_RX_ADR_CTL);
906 	rx_adr_ctl |= BGX_CMR_RX_ADR_CTL_BCST_ACCEPT;
907 	rx_adr_ctl |= BGX_CMR_RX_ADR_CTL_CAM_ACCEPT;
908 	rx_adr_ctl &= ~BGX_CMR_RX_ADR_CTL_MCST_MODE_ALL;
909 	ifp->if_flags &= ~IFF_ALLMULTI;
910 
911 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
912 		ifp->if_flags |= IFF_ALLMULTI;
913 		rx_adr_ctl &= ~BGX_CMR_RX_ADR_CTL_CAM_ACCEPT;
914 		rx_adr_ctl |= BGX_CMR_RX_ADR_CTL_MCST_MODE_ALL;
915 	} else if (ac->ac_multirangecnt > 0 || ac->ac_multicnt >= OGX_NCAM) {
916 		ifp->if_flags |= IFF_ALLMULTI;
917 		rx_adr_ctl |= BGX_CMR_RX_ADR_CTL_MCST_MODE_ALL;
918 	} else {
919 		rx_adr_ctl |= BGX_CMR_RX_ADR_CTL_MCST_MODE_CAM;
920 	}
921 
922 	PORT_WR_8(sc, BGX_CMR_RX_ADR_CTL, rx_adr_ctl);
923 
924 	cidx = sc->sc_lmacid * OGX_NCAM;
925 	clast = (sc->sc_lmacid + 1) * OGX_NCAM;
926 
927 	if (!ISSET(ifp->if_flags, IFF_PROMISC)) {
928 		val = BGX_CMR_RX_ADR_CAM_EN | ((uint64_t)sc->sc_lmacid
929 		    << BGX_CMR_RX_ADR_CAM_ID_S);
930 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
931 			val |= (uint64_t)ac->ac_enaddr[i] <<
932 			    ((ETHER_ADDR_LEN - 1 - i) * 8);
933 		}
934 		NEXUS_WR_8(sc, BGX_CMR_RX_ADR_CAM(cidx++), val);
935 	}
936 
937 	if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
938 		ETHER_FIRST_MULTI(step, ac, enm);
939 		while (enm != NULL) {
940 			val = BGX_CMR_RX_ADR_CAM_EN | ((uint64_t)sc->sc_lmacid
941 			    << BGX_CMR_RX_ADR_CAM_ID_S);
942 			for (i = 0; i < ETHER_ADDR_LEN; i++)
943 				val |= (uint64_t)enm->enm_addrlo[i] << (i * 8);
944 			KASSERT(cidx < clast);
945 			NEXUS_WR_8(sc, BGX_CMR_RX_ADR_CAM(cidx++), val);
946 
947 			ETHER_NEXT_MULTI(step, enm);
948 		}
949 	}
950 
951 	/* Disable any remaining address CAM entries. */
952 	while (cidx < clast)
953 		NEXUS_WR_8(sc, BGX_CMR_RX_ADR_CAM(cidx++), 0);
954 }
955 
956 static inline uint64_t *
ogx_get_work(struct ogx_node * node,uint32_t group)957 ogx_get_work(struct ogx_node *node, uint32_t group)
958 {
959 	uint64_t op, resp;
960 
961 	op = SSO_LD_IO | SSO_LD_DID;
962 	op |= node->node_id << SSO_LD_NODE_S;
963 	op |= SSO_LD_GROUPED | (group << SSO_LD_INDEX_S);
964 	resp = octeon_xkphys_read_8(op);
965 
966 	if (resp & SSO_LD_RTN_NO_WORK)
967 		return NULL;
968 
969 	return (uint64_t *)PHYS_TO_XKPHYS(resp & SSO_LD_RTN_ADDR_M, CCA_CACHED);
970 }
971 
972 static inline struct mbuf *
ogx_extract_mbuf(struct ogx_softc * sc,paddr_t pktbuf)973 ogx_extract_mbuf(struct ogx_softc *sc, paddr_t pktbuf)
974 {
975 	struct mbuf *m, **pm;
976 
977 	pm = (struct mbuf **)PHYS_TO_XKPHYS(pktbuf, CCA_CACHED) - 1;
978 	m = *pm;
979 	*pm = NULL;
980 	KASSERTMSG((paddr_t)m->m_pkthdr.ph_cookie == pktbuf,
981 	    "%s: corrupt packet pool, mbuf cookie %p != pktbuf %p",
982 	    DEVNAME(sc), m->m_pkthdr.ph_cookie, (void *)pktbuf);
983 	m->m_pkthdr.ph_cookie = NULL;
984 	return m;
985 }
986 
987 void
ogx_rxrefill(void * arg)988 ogx_rxrefill(void *arg)
989 {
990 	struct ogx_softc *sc = arg;
991 	unsigned int to_alloc;
992 
993 	if (sc->sc_rxused > 0) {
994 		to_alloc = atomic_swap_uint(&sc->sc_rxused, 0);
995 		to_alloc = ogx_load_mbufs(sc, to_alloc);
996 		if (to_alloc > 0) {
997 			atomic_add_int(&sc->sc_rxused, to_alloc);
998 			timeout_add(&sc->sc_rxrefill, 1);
999 		}
1000 	}
1001 }
1002 
1003 void
ogx_tick(void * arg)1004 ogx_tick(void *arg)
1005 {
1006 	struct ogx_softc *sc = arg;
1007 	int s;
1008 
1009 	s = splnet();
1010 	if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1011 		mii_tick(&sc->sc_mii);
1012 	} else {
1013 		if (sc->sc_link_ops->link_status(sc))
1014 			sc->sc_link_ops->link_change(sc);
1015 	}
1016 	splx(s);
1017 
1018 	timeout_add_sec(&sc->sc_tick, 1);
1019 }
1020 
1021 int
ogx_rxintr(void * arg)1022 ogx_rxintr(void *arg)
1023 {
1024 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1025 	struct mbuf *m, *m0, *mprev;
1026 	struct ogx_softc *sc = arg;
1027 	struct ogx_node *node = sc->sc_node;
1028 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1029 	paddr_t pktbuf, pktdata;
1030 	uint64_t *work;
1031 	uint64_t nsegs;
1032 	unsigned int rxused = 0;
1033 
1034 	/* Acknowledge the interrupt. */
1035 	SSO_WR_8(node, SSO_GRP_INT(PORT_GROUP_RX(sc)), SSO_GRP_INT_EXE_INT);
1036 
1037 	for (;;) {
1038 		uint64_t errcode, errlevel;
1039 		uint64_t word3;
1040 		size_t pktlen, left;
1041 #ifdef DIAGNOSTIC
1042 		unsigned int pkind;
1043 #endif
1044 
1045 		work = ogx_get_work(sc->sc_node, PORT_GROUP_RX(sc));
1046 		if (work == NULL)
1047 			break;
1048 
1049 #ifdef DIAGNOSTIC
1050 		pkind = (work[0] & PKI_WORD0_PKIND_M) >> PKI_WORD0_PKIND_S;
1051 		if (__predict_false(pkind != PORT_PKIND(sc))) {
1052 			printf("%s: unexpected pkind %u, should be %u\n",
1053 			    DEVNAME(sc), pkind, PORT_PKIND(sc));
1054 			goto wqe_error;
1055 		}
1056 #endif
1057 
1058 		nsegs = (work[0] & PKI_WORD0_BUFS_M) >> PKI_WORD0_BUFS_S;
1059 		word3 = work[3];
1060 
1061 		errlevel = (work[2] & PKI_WORD2_ERR_LEVEL_M) >>
1062 		    PKI_WORD2_ERR_LEVEL_S;
1063 		errcode = (work[2] & PKI_WORD2_ERR_CODE_M) >>
1064 		    PKI_WORD2_ERR_CODE_S;
1065 		if (__predict_false(errlevel <= 1 && errcode != 0)) {
1066 			ifp->if_ierrors++;
1067 			goto drop;
1068 		}
1069 
1070 		KASSERT(nsegs > 0);
1071 		rxused += nsegs;
1072 
1073 		pktlen = (work[1] & PKI_WORD1_LEN_M) >> PKI_WORD1_LEN_S;
1074 		left = pktlen;
1075 
1076 		m0 = NULL;
1077 		mprev = NULL;
1078 		while (nsegs-- > 0) {
1079 			size_t size;
1080 
1081 			pktdata = (word3 & PKI_WORD3_ADDR_M) >>
1082 			    PKI_WORD3_ADDR_S;
1083 			pktbuf = pktdata & ~(CACHELINESIZE - 1);
1084 			size = (word3 & PKI_WORD3_SIZE_M) >> PKI_WORD3_SIZE_S;
1085 			if (size > left)
1086 				size = left;
1087 
1088 			m = ogx_extract_mbuf(sc, pktbuf);
1089 			m->m_data += (pktdata - pktbuf) & (CACHELINESIZE - 1);
1090 			m->m_len = size;
1091 			left -= size;
1092 
1093 			/* pktdata can be unaligned. */
1094 			memcpy(&word3, (void *)PHYS_TO_XKPHYS(pktdata -
1095 			    sizeof(uint64_t), CCA_CACHED), sizeof(uint64_t));
1096 
1097 			if (m0 == NULL) {
1098 				m0 = m;
1099 			} else {
1100 				m->m_flags &= ~M_PKTHDR;
1101 				mprev->m_next = m;
1102 			}
1103 			mprev = m;
1104 		}
1105 
1106 		m0->m_pkthdr.len = pktlen;
1107 		ml_enqueue(&ml, m0);
1108 
1109 		continue;
1110 
1111 drop:
1112 		/* Return the buffers back to the pool. */
1113 		while (nsegs-- > 0) {
1114 			pktdata = (word3 & PKI_WORD3_ADDR_M) >>
1115 			    PKI_WORD3_ADDR_S;
1116 			pktbuf = pktdata & ~(CACHELINESIZE - 1);
1117 			/* pktdata can be unaligned. */
1118 			memcpy(&word3, (void *)PHYS_TO_XKPHYS(pktdata -
1119 			    sizeof(uint64_t), CCA_CACHED), sizeof(uint64_t));
1120 			ogx_fpa3_free(&sc->sc_pkt_aura, pktbuf);
1121 		}
1122 	}
1123 
1124 	if_input(ifp, &ml);
1125 
1126 	rxused = ogx_load_mbufs(sc, rxused);
1127 	if (rxused != 0) {
1128 		atomic_add_int(&sc->sc_rxused, rxused);
1129 		timeout_add(&sc->sc_rxrefill, 1);
1130 	}
1131 
1132 	return 1;
1133 
1134 #ifdef DIAGNOSTIC
1135 wqe_error:
1136 	printf("work0: %016llx\n", work[0]);
1137 	printf("work1: %016llx\n", work[1]);
1138 	printf("work2: %016llx\n", work[2]);
1139 	printf("work3: %016llx\n", work[3]);
1140 	printf("work4: %016llx\n", work[4]);
1141 	panic("%s: %s: wqe error", DEVNAME(sc), __func__);
1142 #endif
1143 }
1144 
1145 int
ogx_txintr(void * arg)1146 ogx_txintr(void *arg)
1147 {
1148 	struct ogx_softc *sc = arg;
1149 	struct ogx_node *node = sc->sc_node;
1150 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1151 	struct mbuf *m;
1152 	uint64_t *work;
1153 	unsigned int nfreed = 0;
1154 
1155 	/* Acknowledge the interrupt. */
1156 	SSO_WR_8(node, SSO_GRP_INT(PORT_GROUP_TX(sc)), SSO_GRP_INT_EXE_INT);
1157 
1158 	for (;;) {
1159 		work = ogx_get_work(node, PORT_GROUP_TX(sc));
1160 		if (work == NULL)
1161 			break;
1162 
1163 		/*
1164 		 * work points to ph_cookie via the xkphys segment.
1165 		 * ph_cookie contains the original mbuf pointer.
1166 		 */
1167 		m = *(struct mbuf **)work;
1168 		KASSERT(m->m_pkthdr.ph_ifidx == (u_int)(uintptr_t)sc);
1169 		m->m_pkthdr.ph_ifidx = 0;
1170 		m_freem(m);
1171 		nfreed++;
1172 	}
1173 
1174 	if (nfreed > 0 && atomic_add_int_nv(&sc->sc_txfree, nfreed) == nfreed)
1175 		ifq_restart(&ifp->if_snd);
1176 
1177 	return 1;
1178 }
1179 
1180 unsigned int
ogx_load_mbufs(struct ogx_softc * sc,unsigned int n)1181 ogx_load_mbufs(struct ogx_softc *sc, unsigned int n)
1182 {
1183 	struct mbuf *m;
1184 	paddr_t pktbuf;
1185 
1186 	for ( ; n > 0; n--) {
1187 		m = MCLGETL(NULL, M_NOWAIT, MCLBYTES);
1188 		if (m == NULL)
1189 			break;
1190 
1191 		m->m_data = (void *)(((vaddr_t)m->m_data + CACHELINESIZE) &
1192 		    ~(CACHELINESIZE - 1));
1193 		((struct mbuf **)m->m_data)[-1] = m;
1194 
1195 		pktbuf = KVTOPHYS(m->m_data);
1196 		m->m_pkthdr.ph_cookie = (void *)pktbuf;
1197 		ogx_fpa3_free(&sc->sc_pkt_aura, pktbuf);
1198 	}
1199 	return n;
1200 }
1201 
1202 unsigned int
ogx_unload_mbufs(struct ogx_softc * sc)1203 ogx_unload_mbufs(struct ogx_softc *sc)
1204 {
1205 	struct mbuf *m;
1206 	paddr_t pktbuf;
1207 	unsigned int n = 0;
1208 
1209 	for (;;) {
1210 		pktbuf = ogx_fpa3_alloc(&sc->sc_pkt_aura);
1211 		if (pktbuf == 0)
1212 			break;
1213 		m = ogx_extract_mbuf(sc, pktbuf);
1214 		m_freem(m);
1215 		n++;
1216 	}
1217 	return n;
1218 }
1219 
1220 void
ogx_start(struct ifqueue * ifq)1221 ogx_start(struct ifqueue *ifq)
1222 {
1223 	struct ifnet *ifp = ifq->ifq_if;
1224 	struct ogx_softc *sc = ifp->if_softc;
1225 	struct mbuf *m;
1226 	unsigned int txfree, txused;
1227 
1228 	txfree = READ_ONCE(sc->sc_txfree);
1229 	txused = 0;
1230 
1231 	while (txused < txfree) {
1232 		m = ifq_dequeue(ifq);
1233 		if (m == NULL)
1234 			break;
1235 
1236 #if NBPFILTER > 0
1237 		if (ifp->if_bpf != NULL)
1238 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1239 #endif
1240 
1241 		if (ogx_send_mbuf(sc, m) != 0) {
1242 			m_freem(m);
1243 			ifp->if_oerrors++;
1244 			continue;
1245 		}
1246 		txused++;
1247 	}
1248 
1249 	if (atomic_sub_int_nv(&sc->sc_txfree, txused) == 0)
1250 		ifq_set_oactive(ifq);
1251 }
1252 
1253 int
ogx_send_mbuf(struct ogx_softc * sc,struct mbuf * m0)1254 ogx_send_mbuf(struct ogx_softc *sc, struct mbuf *m0)
1255 {
1256 	struct ether_header *eh;
1257 	struct mbuf *m;
1258 	uint64_t ehdrlen, hdr, scroff, word;
1259 	unsigned int nfrags;
1260 
1261 	/* Save original pointer for freeing after transmission. */
1262 	m0->m_pkthdr.ph_cookie = m0;
1263 	/* Add a tag for sanity checking. */
1264 	m0->m_pkthdr.ph_ifidx = (u_int)(uintptr_t)sc;
1265 
1266 	hdr = PKO3_SEND_HDR_DF;
1267 	hdr |= m0->m_pkthdr.len << PKO3_SEND_HDR_TOTAL_S;
1268 
1269 	if (m0->m_pkthdr.csum_flags &
1270 	    (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) {
1271 		eh = mtod(m0, struct ether_header *);
1272 		ehdrlen = ETHER_HDR_LEN;
1273 
1274 		switch (ntohs(eh->ether_type)) {
1275 		case ETHERTYPE_IP:
1276 			hdr |= ehdrlen << PKO3_SEND_HDR_L3PTR_S;
1277 			hdr |= (ehdrlen + sizeof(struct ip)) <<
1278 			    PKO3_SEND_HDR_L4PTR_S;
1279 			break;
1280 		case ETHERTYPE_IPV6:
1281 			hdr |= ehdrlen << PKO3_SEND_HDR_L3PTR_S;
1282 			hdr |= (ehdrlen + sizeof(struct ip6_hdr)) <<
1283 			    PKO3_SEND_HDR_L4PTR_S;
1284 			break;
1285 		default:
1286 			break;
1287 		}
1288 
1289 		if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1290 			hdr |= PKO3_SEND_HDR_CKL3;
1291 		if (m0->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1292 			hdr |= PKO3_SEND_HDR_CKL4_TCP;
1293 		if (m0->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1294 			hdr |= PKO3_SEND_HDR_CKL4_UDP;
1295 	}
1296 
1297 	/* Flush pending writes before packet submission. */
1298 	octeon_syncw();
1299 
1300 	/* Block until any previous LMTDMA request has been processed. */
1301 	octeon_synciobdma();
1302 
1303 	/* Get the LMTDMA region offset in the scratchpad. */
1304 	scroff = 2 * 0x80;
1305 
1306 	octeon_cvmseg_write_8(scroff, hdr);
1307 	scroff += sizeof(hdr);
1308 
1309 	for (m = m0, nfrags = 0; m != NULL && nfrags < 13;
1310 	    m = m->m_next, nfrags++) {
1311 		word = PKO3_SUBDC3_SEND_GATHER << PKO3_SUBC_BUF_PTR_SUBDC3_S;
1312 		word |= KVTOPHYS(m->m_data) << PKO3_SUBC_BUF_PTR_ADDR_S;
1313 		word |= (uint64_t)m->m_len << PKO3_SUBC_BUF_PTR_SIZE_S;
1314 		octeon_cvmseg_write_8(scroff, word);
1315 		scroff += sizeof(word);
1316 	}
1317 
1318 	if (m != NULL) {
1319 		if (m_defrag(m0, M_DONTWAIT) != 0)
1320 			return ENOMEM;
1321 
1322 		/* Discard previously set fragments. */
1323 		scroff -= sizeof(word) * nfrags;
1324 
1325 		word = PKO3_SUBDC3_SEND_GATHER << PKO3_SUBC_BUF_PTR_SUBDC3_S;
1326 		word |= KVTOPHYS(m0->m_data) << PKO3_SUBC_BUF_PTR_ADDR_S;
1327 		word |= (uint64_t)m0->m_len << PKO3_SUBC_BUF_PTR_SIZE_S;
1328 		octeon_cvmseg_write_8(scroff, word);
1329 		scroff += sizeof(word);
1330 	}
1331 
1332 	/* Send work when ready to free the mbuf. */
1333 	word = PKO3_SEND_WORK_CODE << PKO3_SEND_SUBDC4_CODE_S;
1334 	word |= KVTOPHYS(&m0->m_pkthdr.ph_cookie) << PKO3_SEND_WORK_ADDR_S;
1335 	word |= (uint64_t)PORT_GROUP_TX(sc) << PKO3_SEND_WORK_GRP_S;
1336 	word |= 2ULL << PKO3_SEND_WORK_TT_S;
1337 	octeon_cvmseg_write_8(scroff, word);
1338 	scroff += sizeof(word);
1339 
1340 	/* Submit the command. */
1341 	word = PKO3_LMTDMA_DID;
1342 	word |= ((2ULL * 0x80) >> 3) << PKO3_LMTDMA_SCRADDR_S;
1343 	word |= 1ULL << PKO3_LMTDMA_RTNLEN_S;
1344 	word |= DESC_QUEUE(sc) << PKO3_LMTDMA_DQ_S;
1345 	octeon_lmtdma_write_8((scroff - 8) & 0x78, word);
1346 
1347 	return 0;
1348 }
1349 
1350 int
ogx_media_change(struct ifnet * ifp)1351 ogx_media_change(struct ifnet *ifp)
1352 {
1353 	struct ogx_softc *sc = ifp->if_softc;
1354 
1355 	if (!LIST_EMPTY(&sc->sc_mii.mii_phys))
1356 		mii_mediachg(&sc->sc_mii);
1357 
1358 	return 0;
1359 }
1360 
1361 void
ogx_media_status(struct ifnet * ifp,struct ifmediareq * imr)1362 ogx_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1363 {
1364 	struct ogx_softc *sc = ifp->if_softc;
1365 
1366 	if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) {
1367 		mii_pollstat(&sc->sc_mii);
1368 		imr->ifm_status = sc->sc_mii.mii_media_status;
1369 		imr->ifm_active = sc->sc_mii.mii_media_active;
1370 	}
1371 }
1372 
1373 int
ogx_mii_readreg(struct device * self,int phy_no,int reg)1374 ogx_mii_readreg(struct device *self, int phy_no, int reg)
1375 {
1376 	struct ogx_softc *sc = (struct ogx_softc *)self;
1377 
1378 	return cn30xxsmi_read(sc->sc_smi, phy_no, reg);
1379 }
1380 
1381 void
ogx_mii_writereg(struct device * self,int phy_no,int reg,int value)1382 ogx_mii_writereg(struct device *self, int phy_no, int reg, int value)
1383 {
1384 	struct ogx_softc *sc = (struct ogx_softc *)self;
1385 
1386 	cn30xxsmi_write(sc->sc_smi, phy_no, reg, value);
1387 }
1388 
1389 void
ogx_mii_statchg(struct device * self)1390 ogx_mii_statchg(struct device *self)
1391 {
1392 	struct ogx_softc *sc = (struct ogx_softc *)self;
1393 
1394 	if (ISSET(sc->sc_mii.mii_media_active, IFM_FDX))
1395 		sc->sc_link_duplex = 1;
1396 	else
1397 		sc->sc_link_duplex = 0;
1398 	sc->sc_link_ops->link_change(sc);
1399 }
1400 
1401 int
ogx_sgmii_link_init(struct ogx_softc * sc)1402 ogx_sgmii_link_init(struct ogx_softc *sc)
1403 {
1404 	uint64_t cpu_freq = octeon_boot_info->eclock / 1000000;
1405 	uint64_t val;
1406 	int align = 1;
1407 
1408 	val = PORT_RD_8(sc, BGX_GMP_GMI_TX_APPEND);
1409 	val |= BGX_GMP_GMI_TX_APPEND_FCS;
1410 	val |= BGX_GMP_GMI_TX_APPEND_PAD;
1411 	if (ISSET(val, BGX_GMP_GMI_TX_APPEND_PREAMBLE))
1412 		align = 0;
1413 	PORT_WR_8(sc, BGX_GMP_GMI_TX_APPEND, val);
1414 	PORT_WR_8(sc, BGX_GMP_GMI_TX_MIN_PKT, 59);
1415 	PORT_WR_8(sc, BGX_GMP_GMI_TX_THRESH, 0x20);
1416 
1417 	val = PORT_RD_8(sc, BGX_GMP_GMI_TX_SGMII_CTL);
1418 	if (align)
1419 		val |= BGX_GMP_GMI_TX_SGMII_CTL_ALIGN;
1420 	else
1421 		val &= ~BGX_GMP_GMI_TX_SGMII_CTL_ALIGN;
1422 	PORT_WR_8(sc, BGX_GMP_GMI_TX_SGMII_CTL, val);
1423 
1424 	/* Set timing for SGMII. */
1425 	val = PORT_RD_8(sc, BGX_GMP_PCS_LINK_TIMER);
1426 	val &= ~BGX_GMP_PCS_LINK_TIMER_COUNT_M;
1427 	val |= (1600 * cpu_freq) >> 10;
1428 	PORT_WR_8(sc, BGX_GMP_PCS_LINK_TIMER, val);
1429 
1430 	return 0;
1431 }
1432 
1433 void
ogx_sgmii_link_down(struct ogx_softc * sc)1434 ogx_sgmii_link_down(struct ogx_softc *sc)
1435 {
1436 	uint64_t val;
1437 	int timeout;
1438 
1439 	/* Wait until the port is idle. */
1440 	for (timeout = 1000; timeout > 0; timeout--) {
1441 		const uint64_t idlemask = BGX_GMP_GMI_PRT_CFG_RX_IDLE |
1442 		    BGX_GMP_GMI_PRT_CFG_TX_IDLE;
1443 		val = PORT_RD_8(sc, BGX_GMP_GMI_PRT_CFG);
1444 		if ((val & idlemask) == idlemask)
1445 			break;
1446 		delay(1000);
1447 	}
1448 	if (timeout == 0)
1449 		printf("%s: port idle timeout\n", DEVNAME(sc));
1450 
1451 	/* Disable autonegotiation and power down the link. */
1452 	val = PORT_RD_8(sc, BGX_GMP_PCS_MR_CONTROL);
1453 	val &= ~BGX_GMP_PCS_MR_CONTROL_AN_EN;
1454 	val |= BGX_GMP_PCS_MR_CONTROL_PWR_DN;
1455 	PORT_WR_8(sc, BGX_GMP_PCS_MR_CONTROL, val);
1456 }
1457 
1458 void
ogx_sgmii_link_change(struct ogx_softc * sc)1459 ogx_sgmii_link_change(struct ogx_softc *sc)
1460 {
1461 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1462 	uint64_t config;
1463 	uint64_t misc_ctl;
1464 	uint64_t prt_cfg = 0;
1465 	uint64_t samp_pt;
1466 	uint64_t tx_burst, tx_slot;
1467 	uint64_t val;
1468 	int timeout;
1469 
1470 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
1471 		misc_ctl = PORT_RD_8(sc, BGX_GMP_PCS_MISC_CTL);
1472 		misc_ctl |= BGX_GMP_PCS_MISC_CTL_GMXENO;
1473 		PORT_WR_8(sc, BGX_GMP_PCS_MISC_CTL, misc_ctl);
1474 		return;
1475 	}
1476 
1477 	val = PORT_RD_8(sc, BGX_CMR_CONFIG);
1478 	val |= BGX_CMR_CONFIG_ENABLE;
1479 	PORT_WR_8(sc, BGX_CMR_CONFIG, val);
1480 
1481 	/* Reset the PCS. */
1482 	val = PORT_RD_8(sc, BGX_GMP_PCS_MR_CONTROL);
1483 	val |= BGX_GMP_PCS_MR_CONTROL_RESET;
1484 	PORT_WR_8(sc, BGX_GMP_PCS_MR_CONTROL_RESET, val);
1485 
1486 	/* Wait for the reset to complete. */
1487 	timeout = 100000;
1488 	while (timeout-- > 0) {
1489 		val = PORT_RD_8(sc, BGX_GMP_PCS_MR_CONTROL);
1490 		if (!ISSET(val, BGX_GMP_PCS_MR_CONTROL_RESET))
1491 			break;
1492 		delay(10);
1493 	}
1494 	if (timeout == 0)
1495 		printf("%s: SGMII reset timeout\n", DEVNAME(sc));
1496 
1497 	/* Use MAC mode. */
1498 	val = PORT_RD_8(sc, BGX_GMP_PCS_MISC_CTL);
1499 	val &= ~BGX_GMP_PCS_MISC_CTL_MAC_PHY;
1500 	val &= ~BGX_GMP_PCS_MISC_CTL_MODE;
1501 	PORT_WR_8(sc, BGX_GMP_PCS_MISC_CTL, val);
1502 
1503 	/* Start autonegotiation between the SoC and the PHY. */
1504 	val = PORT_RD_8(sc, BGX_GMP_PCS_MR_CONTROL);
1505 	val |= BGX_GMP_PCS_MR_CONTROL_AN_EN;
1506 	val |= BGX_GMP_PCS_MR_CONTROL_RST_AN;
1507 	val &= ~BGX_GMP_PCS_MR_CONTROL_PWR_DN;
1508 	PORT_WR_8(sc, BGX_GMP_PCS_MR_CONTROL, val);
1509 
1510 	/* Wait for the autonegotiation to complete. */
1511 	timeout = 100000;
1512 	while (timeout-- > 0) {
1513 		val = PORT_RD_8(sc, BGX_GMP_PCS_MR_STATUS);
1514 		if (ISSET(val, BGX_GMP_PCS_MR_STATUS_AN_CPT))
1515 			break;
1516 		delay(10);
1517 	}
1518 	if (timeout == 0)
1519 		printf("%s: SGMII autonegotiation timeout\n", DEVNAME(sc));
1520 
1521 	/* Stop Rx and Tx engines. */
1522 	config = PORT_RD_8(sc, BGX_CMR_CONFIG);
1523 	config &= ~BGX_CMR_CONFIG_DATA_PKT_RX_EN;
1524 	config &= ~BGX_CMR_CONFIG_DATA_PKT_TX_EN;
1525 	PORT_WR_8(sc, BGX_CMR_CONFIG, config);
1526 	(void)PORT_RD_8(sc, BGX_CMR_CONFIG);
1527 
1528 	/* Wait until the engines are idle. */
1529 	for (timeout = 1000000; timeout > 0; timeout--) {
1530 		const uint64_t idlemask = BGX_GMP_GMI_PRT_CFG_RX_IDLE |
1531 		    BGX_GMP_GMI_PRT_CFG_TX_IDLE;
1532 		prt_cfg = PORT_RD_8(sc, BGX_GMP_GMI_PRT_CFG);
1533 		if ((prt_cfg & idlemask) == idlemask)
1534 			break;
1535 		delay(1);
1536 	}
1537 	if (timeout == 0)
1538 		printf("%s: port idle timeout\n", DEVNAME(sc));
1539 
1540 	if (sc->sc_link_duplex)
1541 		prt_cfg |= BGX_GMP_GMI_PRT_CFG_DUPLEX;
1542 	else
1543 		prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_DUPLEX;
1544 
1545 	switch (ifp->if_baudrate) {
1546 	case IF_Mbps(10):
1547 		prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_SPEED;
1548 		prt_cfg |= BGX_GMP_GMI_PRT_CFG_SPEED_MSB;
1549 		prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_SLOTTIME;
1550 		samp_pt = 25;
1551 		tx_slot = 0x40;
1552 		tx_burst = 0;
1553 		break;
1554 	case IF_Mbps(100):
1555 		prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_SPEED;
1556 		prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_SPEED_MSB;
1557 		prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_SLOTTIME;
1558 		samp_pt = 5;
1559 		tx_slot = 0x40;
1560 		tx_burst = 0;
1561 		break;
1562 	case IF_Gbps(1):
1563 	default:
1564 		prt_cfg |= BGX_GMP_GMI_PRT_CFG_SPEED;
1565 		prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_SPEED_MSB;
1566 		prt_cfg |= BGX_GMP_GMI_PRT_CFG_SLOTTIME;
1567 		samp_pt = 1;
1568 		tx_slot = 0x200;
1569 		if (sc->sc_link_duplex)
1570 			tx_burst = 0;
1571 		else
1572 			tx_burst = 0x2000;
1573 		break;
1574 	}
1575 
1576 	PORT_WR_8(sc, BGX_GMP_GMI_TX_SLOT, tx_slot);
1577 	PORT_WR_8(sc, BGX_GMP_GMI_TX_BURST, tx_burst);
1578 
1579 	misc_ctl = PORT_RD_8(sc, BGX_GMP_PCS_MISC_CTL);
1580 	misc_ctl &= ~BGX_GMP_PCS_MISC_CTL_GMXENO;
1581 	misc_ctl &= ~BGX_GMP_PCS_MISC_CTL_SAMP_PT_M;
1582 	misc_ctl |= samp_pt << BGX_GMP_PCS_MISC_CTL_SAMP_PT_S;
1583 	PORT_WR_8(sc, BGX_GMP_PCS_MISC_CTL, misc_ctl);
1584 	(void)PORT_RD_8(sc, BGX_GMP_PCS_MISC_CTL);
1585 
1586 	PORT_WR_8(sc, BGX_GMP_GMI_PRT_CFG, prt_cfg);
1587 	(void)PORT_RD_8(sc, BGX_GMP_GMI_PRT_CFG);
1588 
1589 	config = PORT_RD_8(sc, BGX_CMR_CONFIG);
1590 	config |= BGX_CMR_CONFIG_ENABLE |
1591 	    BGX_CMR_CONFIG_DATA_PKT_RX_EN |
1592 	    BGX_CMR_CONFIG_DATA_PKT_TX_EN;
1593 	PORT_WR_8(sc, BGX_CMR_CONFIG, config);
1594 	(void)PORT_RD_8(sc, BGX_CMR_CONFIG);
1595 }
1596 
1597 #if NKSTAT > 0
1598 enum ogx_stat {
1599 	ogx_stat_rx_hmin,
1600 	ogx_stat_rx_h64,
1601 	ogx_stat_rx_h128,
1602 	ogx_stat_rx_h256,
1603 	ogx_stat_rx_h512,
1604 	ogx_stat_rx_h1024,
1605 	ogx_stat_rx_hmax,
1606 	ogx_stat_rx_totp_pki,
1607 	ogx_stat_rx_toto_pki,
1608 	ogx_stat_rx_raw,
1609 	ogx_stat_rx_drop,
1610 	ogx_stat_rx_bcast,
1611 	ogx_stat_rx_mcast,
1612 	ogx_stat_rx_fcs_error,
1613 	ogx_stat_rx_fcs_undersz,
1614 	ogx_stat_rx_undersz,
1615 	ogx_stat_rx_fcs_oversz,
1616 	ogx_stat_rx_oversz,
1617 	ogx_stat_rx_error,
1618 	ogx_stat_rx_special,
1619 	ogx_stat_rx_bdrop,
1620 	ogx_stat_rx_mdrop,
1621 	ogx_stat_rx_ipbdrop,
1622 	ogx_stat_rx_ipmdrop,
1623 	ogx_stat_rx_sdrop,
1624 	ogx_stat_rx_totp_bgx,
1625 	ogx_stat_rx_toto_bgx,
1626 	ogx_stat_rx_pause,
1627 	ogx_stat_rx_dmac,
1628 	ogx_stat_rx_bgx_drop,
1629 	ogx_stat_rx_bgx_error,
1630 	ogx_stat_tx_hmin,
1631 	ogx_stat_tx_h64,
1632 	ogx_stat_tx_h65,
1633 	ogx_stat_tx_h128,
1634 	ogx_stat_tx_h256,
1635 	ogx_stat_tx_h512,
1636 	ogx_stat_tx_h1024,
1637 	ogx_stat_tx_hmax,
1638 	ogx_stat_tx_coll,
1639 	ogx_stat_tx_defer,
1640 	ogx_stat_tx_mcoll,
1641 	ogx_stat_tx_scoll,
1642 	ogx_stat_tx_toto_bgx,
1643 	ogx_stat_tx_totp_bgx,
1644 	ogx_stat_tx_bcast,
1645 	ogx_stat_tx_mcast,
1646 	ogx_stat_tx_uflow,
1647 	ogx_stat_tx_control,
1648 	ogx_stat_count
1649 };
1650 
1651 enum ogx_counter_type {
1652 	C_NONE = 0,
1653 	C_BGX,
1654 	C_PKI,
1655 };
1656 
1657 struct ogx_counter {
1658 	const char		*c_name;
1659 	enum kstat_kv_unit	 c_unit;
1660 	enum ogx_counter_type	 c_type;
1661 	uint32_t		 c_reg;
1662 };
1663 
1664 static const struct ogx_counter ogx_counters[ogx_stat_count] = {
1665 	[ogx_stat_rx_hmin] =
1666 	    { "rx 1-63B",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST0 },
1667 	[ogx_stat_rx_h64] =
1668 	    { "rx 64-127B",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST1 },
1669 	[ogx_stat_rx_h128] =
1670 	    { "rx 128-255B",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST2 },
1671 	[ogx_stat_rx_h256] =
1672 	    { "rx 256-511B",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST3 },
1673 	[ogx_stat_rx_h512] =
1674 	    { "rx 512-1023B",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST4 },
1675 	[ogx_stat_rx_h1024] =
1676 	    { "rx 1024-1518B",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST5 },
1677 	[ogx_stat_rx_hmax] =
1678 	    { "rx 1519-maxB",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST6 },
1679 	[ogx_stat_rx_totp_pki] =
1680 	    { "rx total pki",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT0 },
1681 	[ogx_stat_rx_toto_pki] =
1682 	    { "rx total pki",	KSTAT_KV_U_BYTES, C_PKI, PKI_STAT_STAT1 },
1683 	[ogx_stat_rx_raw] =
1684 	    { "rx raw",		KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT2 },
1685 	[ogx_stat_rx_drop] =
1686 	    { "rx drop",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT3 },
1687 	[ogx_stat_rx_bcast] =
1688 	    { "rx bcast",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT5 },
1689 	[ogx_stat_rx_mcast] =
1690 	    { "rx mcast",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT6 },
1691 	[ogx_stat_rx_fcs_error] =
1692 	    { "rx fcs error",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT7 },
1693 	[ogx_stat_rx_fcs_undersz] =
1694 	    { "rx fcs undersz",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT8 },
1695 	[ogx_stat_rx_undersz] =
1696 	    { "rx undersz",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT9 },
1697 	[ogx_stat_rx_fcs_oversz] =
1698 	    { "rx fcs oversz",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT10 },
1699 	[ogx_stat_rx_oversz] =
1700 	    { "rx oversize",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT11 },
1701 	[ogx_stat_rx_error] =
1702 	    { "rx error",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT12 },
1703 	[ogx_stat_rx_special] =
1704 	    { "rx special",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT13 },
1705 	[ogx_stat_rx_bdrop] =
1706 	    { "rx drop bcast",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT14 },
1707 	[ogx_stat_rx_mdrop] =
1708 	    { "rx drop mcast",	KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT15 },
1709 	[ogx_stat_rx_ipbdrop] =
1710 	    { "rx drop ipbcast",KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT16 },
1711 	[ogx_stat_rx_ipmdrop] =
1712 	    { "rx drop ipmcast",KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT17 },
1713 	[ogx_stat_rx_sdrop] =
1714 	    { "rx drop special",KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT18 },
1715 	[ogx_stat_rx_totp_bgx] =
1716 	    { "rx total bgx",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_RX_STAT0 },
1717 	[ogx_stat_rx_toto_bgx] =
1718 	    { "rx total bgx",	KSTAT_KV_U_BYTES, C_BGX, BGX_CMR_RX_STAT1 },
1719 	[ogx_stat_rx_pause] =
1720 	    { "rx bgx pause",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_RX_STAT2 },
1721 	[ogx_stat_rx_dmac] =
1722 	    { "rx bgx dmac",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_RX_STAT4 },
1723 	[ogx_stat_rx_bgx_drop] =
1724 	    { "rx bgx drop",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_RX_STAT6 },
1725 	[ogx_stat_rx_bgx_error] =
1726 	    { "rx bgx error",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_RX_STAT8 },
1727 	[ogx_stat_tx_hmin] =
1728 	    { "tx 1-63B",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT6 },
1729 	[ogx_stat_tx_h64] =
1730 	    { "tx 64B",		KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT7 },
1731 	[ogx_stat_tx_h65] =
1732 	    { "tx 65-127B",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT8 },
1733 	[ogx_stat_tx_h128] =
1734 	    { "tx 128-255B",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT9 },
1735 	[ogx_stat_tx_h256] =
1736 	    { "tx 256-511B",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT10 },
1737 	[ogx_stat_tx_h512] =
1738 	    { "tx 512-1023B",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT11 },
1739 	[ogx_stat_tx_h1024] =
1740 	    { "tx 1024-1518B",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT12 },
1741 	[ogx_stat_tx_hmax] =
1742 	    { "tx 1519-maxB",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT13 },
1743 	[ogx_stat_tx_coll] =
1744 	    { "tx coll",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT0 },
1745 	[ogx_stat_tx_defer] =
1746 	    { "tx defer",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT1 },
1747 	[ogx_stat_tx_mcoll] =
1748 	    { "tx mcoll",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT2 },
1749 	[ogx_stat_tx_scoll] =
1750 	    { "tx scoll",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT3 },
1751 	[ogx_stat_tx_toto_bgx] =
1752 	    { "tx total bgx",	KSTAT_KV_U_BYTES, C_BGX, BGX_CMR_TX_STAT4 },
1753 	[ogx_stat_tx_totp_bgx] =
1754 	    { "tx total bgx",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT5 },
1755 	[ogx_stat_tx_bcast] =
1756 	    { "tx bcast",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT14 },
1757 	[ogx_stat_tx_mcast] =
1758 	    { "tx mcast",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT15 },
1759 	[ogx_stat_tx_uflow] =
1760 	    { "tx underflow",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT16 },
1761 	[ogx_stat_tx_control] =
1762 	    { "tx control",	KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT17 },
1763 };
1764 
1765 void
ogx_kstat_attach(struct ogx_softc * sc)1766 ogx_kstat_attach(struct ogx_softc *sc)
1767 {
1768 	const struct ogx_counter *c;
1769 	struct kstat *ks;
1770 	struct kstat_kv *kvs;
1771 	struct ogx_node *node = sc->sc_node;
1772 	uint64_t *vals;
1773 	int i;
1774 
1775 	mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
1776 	timeout_set(&sc->sc_kstat_tmo, ogx_kstat_tick, sc);
1777 
1778 	if (bus_space_subregion(node->node_iot, node->node_pki,
1779 	    PKI_STAT_BASE(PORT_PKIND(sc)), PKI_STAT_SIZE,
1780 	    &sc->sc_pki_stat_ioh) != 0)
1781 		return;
1782 
1783 	ks = kstat_create(DEVNAME(sc), 0, "ogx-stats", 0, KSTAT_T_KV, 0);
1784 	if (ks == NULL)
1785 		return;
1786 
1787 	vals = mallocarray(nitems(ogx_counters), sizeof(*vals),
1788 	    M_DEVBUF, M_WAITOK | M_ZERO);
1789 	sc->sc_counter_vals = vals;
1790 
1791 	kvs = mallocarray(nitems(ogx_counters), sizeof(*kvs),
1792 	    M_DEVBUF, M_WAITOK | M_ZERO);
1793 	for (i = 0; i < nitems(ogx_counters); i++) {
1794 		c = &ogx_counters[i];
1795 		kstat_kv_unit_init(&kvs[i], c->c_name, KSTAT_KV_T_COUNTER64,
1796 		    c->c_unit);
1797 	}
1798 
1799 	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
1800 	ks->ks_softc = sc;
1801 	ks->ks_data = kvs;
1802 	ks->ks_datalen = nitems(ogx_counters) * sizeof(*kvs);
1803 	ks->ks_read = ogx_kstat_read;
1804 
1805 	sc->sc_kstat = ks;
1806 	kstat_install(ks);
1807 }
1808 
1809 int
ogx_kstat_read(struct kstat * ks)1810 ogx_kstat_read(struct kstat *ks)
1811 {
1812 	const struct ogx_counter *c;
1813 	struct ogx_softc *sc = ks->ks_softc;
1814 	struct kstat_kv *kvs = ks->ks_data;
1815 	uint64_t *counter_vals = sc->sc_counter_vals;
1816 	uint64_t delta, val;
1817 	int i, timeout;
1818 
1819 	for (i = 0; i < nitems(ogx_counters); i++) {
1820 		c = &ogx_counters[i];
1821 		switch (c->c_type) {
1822 		case C_BGX:
1823 			val = PORT_RD_8(sc, c->c_reg);
1824 			delta = (val - counter_vals[i]) & BGX_CMR_STAT_MASK;
1825 			counter_vals[i] = val;
1826 			kstat_kv_u64(&kvs[i]) += delta;
1827 			break;
1828 		case C_PKI:
1829 			/*
1830 			 * Retry the read if the value is bogus.
1831 			 * This can happen on some hardware when
1832 			 * the hardware is updating the value.
1833 			 */
1834 			for (timeout = 100; timeout > 0; timeout--) {
1835 				val = bus_space_read_8(sc->sc_iot,
1836 				    sc->sc_pki_stat_ioh, c->c_reg);
1837 				if (val != ~0ULL) {
1838 					delta = (val - counter_vals[i]) &
1839 					    PKI_STAT_MASK;
1840 					counter_vals[i] = val;
1841 					kstat_kv_u64(&kvs[i]) += delta;
1842 					break;
1843 				}
1844 				CPU_BUSY_CYCLE();
1845 			}
1846 			break;
1847 		case C_NONE:
1848 			break;
1849 		}
1850 	}
1851 
1852 	getnanouptime(&ks->ks_updated);
1853 
1854 	return 0;
1855 }
1856 
1857 void
ogx_kstat_start(struct ogx_softc * sc)1858 ogx_kstat_start(struct ogx_softc *sc)
1859 {
1860 	const struct ogx_counter *c;
1861 	int i;
1862 
1863 	/* Zero the counters. */
1864 	for (i = 0; i < nitems(ogx_counters); i++) {
1865 		c = &ogx_counters[i];
1866 		switch (c->c_type) {
1867 		case C_BGX:
1868 			PORT_WR_8(sc, c->c_reg, 0);
1869 			break;
1870 		case C_PKI:
1871 			bus_space_write_8(sc->sc_iot, sc->sc_pki_stat_ioh,
1872 			    c->c_reg, 0);
1873 			break;
1874 		case C_NONE:
1875 			break;
1876 		}
1877 	}
1878 	memset(sc->sc_counter_vals, 0,
1879 	    nitems(ogx_counters) * sizeof(*sc->sc_counter_vals));
1880 
1881 	timeout_add_sec(&sc->sc_kstat_tmo, OGX_KSTAT_TICK_SECS);
1882 }
1883 
1884 void
ogx_kstat_stop(struct ogx_softc * sc)1885 ogx_kstat_stop(struct ogx_softc *sc)
1886 {
1887 	timeout_del_barrier(&sc->sc_kstat_tmo);
1888 
1889 	mtx_enter(&sc->sc_kstat_mtx);
1890 	ogx_kstat_read(sc->sc_kstat);
1891 	mtx_leave(&sc->sc_kstat_mtx);
1892 }
1893 
1894 void
ogx_kstat_tick(void * arg)1895 ogx_kstat_tick(void *arg)
1896 {
1897 	struct ogx_softc *sc = arg;
1898 
1899 	timeout_add_sec(&sc->sc_kstat_tmo, OGX_KSTAT_TICK_SECS);
1900 
1901 	if (mtx_enter_try(&sc->sc_kstat_mtx)) {
1902 		ogx_kstat_read(sc->sc_kstat);
1903 		mtx_leave(&sc->sc_kstat_mtx);
1904 	}
1905 }
1906 #endif /* NKSTAT > 0 */
1907 
1908 int
ogx_node_init(struct ogx_node ** pnode,bus_dma_tag_t dmat,bus_space_tag_t iot)1909 ogx_node_init(struct ogx_node **pnode, bus_dma_tag_t dmat, bus_space_tag_t iot)
1910 {
1911 	const struct ogx_config *cfg;
1912 	struct ogx_node *node = &ogx_node;
1913 	uint64_t val;
1914 	uint32_t chipid;
1915 	int cl, i, timeout;
1916 
1917 	if (node->node_flags & NODE_INITED) {
1918 		*pnode = node;
1919 		return 0;
1920 	}
1921 
1922 	chipid = octeon_get_chipid();
1923 	switch (octeon_model_family(chipid)) {
1924 	case OCTEON_MODEL_FAMILY_CN73XX:
1925 		node->node_cfg = cfg = &ogx_cn73xx_config;
1926 		break;
1927 	case OCTEON_MODEL_FAMILY_CN78XX:
1928 		node->node_cfg = cfg = &ogx_cn78xx_config;
1929 		break;
1930 	default:
1931 		printf(": unhandled chipid 0x%x\n", chipid);
1932 		return -1;
1933 	}
1934 
1935 	rw_init(&node->node_lock, "ogxnlk");
1936 
1937 	node->node_dmat = dmat;
1938 	node->node_iot = iot;
1939 	if (bus_space_map(node->node_iot, FPA3_BASE, FPA3_SIZE, 0,
1940 	    &node->node_fpa3)) {
1941 		printf(": can't map FPA3\n");
1942 		goto error;
1943 	}
1944 	if (bus_space_map(node->node_iot, PKI_BASE, PKI_SIZE, 0,
1945 	    &node->node_pki)) {
1946 		printf(": can't map PKI\n");
1947 		goto error;
1948 	}
1949 	if (bus_space_map(node->node_iot, PKO3_BASE, PKO3_SIZE, 0,
1950 	    &node->node_pko3)) {
1951 		printf(": can't map PKO3\n");
1952 		goto error;
1953 	}
1954 	if (bus_space_map(node->node_iot, SSO_BASE, SSO_SIZE, 0,
1955 	    &node->node_sso)) {
1956 		printf(": can't map SSO\n");
1957 		goto error;
1958 	}
1959 
1960 	/*
1961 	 * The rest of this function handles errors by panicking.
1962 	 */
1963 
1964 	node->node_flags |= NODE_INITED;
1965 
1966 	PKO3_WR_8(node, PKO3_CHANNEL_LEVEL, 0);
1967 
1968 	ogx_fpa3_pool_init(node, &node->node_pkt_pool, OGX_POOL_PKT, 1024 * 32);
1969 	ogx_fpa3_pool_init(node, &node->node_pko_pool, OGX_POOL_PKO, 1024 * 32);
1970 	ogx_fpa3_pool_init(node, &node->node_sso_pool, OGX_POOL_SSO, 1024 * 32);
1971 
1972 	ogx_fpa3_aura_init(node, &node->node_pko_aura, OGX_AURA_PKO,
1973 	    &node->node_pko_pool);
1974 	ogx_fpa3_aura_init(node, &node->node_sso_aura, OGX_AURA_SSO,
1975 	    &node->node_sso_pool);
1976 
1977 	ogx_fpa3_aura_load(node, &node->node_sso_aura, 1024, 4096);
1978 	ogx_fpa3_aura_load(node, &node->node_pko_aura, 1024, 4096);
1979 
1980 	/*
1981 	 * Initialize the Schedule/Synchronization/Order (SSO) unit.
1982 	 */
1983 
1984 	val = SSO_AW_CFG_LDWB | SSO_AW_CFG_LDT | SSO_AW_CFG_STT;
1985 	SSO_WR_8(node, SSO_AW_CFG, val);
1986 
1987 	val = node->node_id << SSO_XAQ_AURA_NODE_S;
1988 	val |= (uint64_t)OGX_AURA_SSO << SSO_XAQ_AURA_LAURA_S;
1989 	SSO_WR_8(node, SSO_XAQ_AURA, val);
1990 
1991 	SSO_WR_8(node, SSO_ERR0, 0);
1992 
1993 	/* Initialize the hardware's linked lists. */
1994 	for (i = 0; i < 64; i++) {
1995 		paddr_t addr;
1996 
1997 		addr = ogx_fpa3_alloc(&node->node_sso_aura);
1998 		if (addr == 0)
1999 			panic("%s: could not alloc initial XAQ block %d",
2000 			    __func__, i);
2001 		SSO_WR_8(node, SSO_XAQ_HEAD_PTR(i), addr);
2002 		SSO_WR_8(node, SSO_XAQ_TAIL_PTR(i), addr);
2003 		SSO_WR_8(node, SSO_XAQ_HEAD_NEXT(i), addr);
2004 		SSO_WR_8(node, SSO_XAQ_TAIL_NEXT(i), addr);
2005 
2006 		SSO_WR_8(node, SSO_GRP_PRI(i), SSO_GRP_PRI_WEIGHT_M);
2007 	}
2008 
2009 	val = SSO_RD_8(node, SSO_AW_CFG);
2010 	val |= SSO_AW_CFG_RWEN;
2011 	SSO_WR_8(node, SSO_AW_CFG, val);
2012 
2013 	/*
2014 	 * Initialize the Packet Input (PKI) unit.
2015 	 */
2016 
2017 	/* Clear any previous style configuration. */
2018 	for (cl = 0; cl < cfg->cfg_nclusters; cl++) {
2019 		int pkind;
2020 
2021 		for (pkind = 0; pkind < 64; pkind++)
2022 			PKI_WR_8(node, PKI_CL_PKIND_STYLE(cl, pkind), 0);
2023 	}
2024 
2025 	/* Invalidate all PCAM entries. */
2026 	for (cl = 0; cl < cfg->cfg_nclusters; cl++) {
2027 		int bank;
2028 
2029 		for (bank = 0; bank < 2; bank++) {
2030 			for (i = 0; i < 192; i++) {
2031 				PKI_WR_8(node,
2032 				    PKI_CL_PCAM_TERM(cl, bank, i), 0);
2033 			}
2034 		}
2035 	}
2036 
2037 	PKI_WR_8(node, PKI_STAT_CTL, 0);
2038 
2039 	/* Enable input backpressure. */
2040 	val = PKI_RD_8(node, PKI_BUF_CTL);
2041 	val |= PKI_BUF_CTL_PBP_EN;
2042 	PKI_WR_8(node, PKI_BUF_CTL, val);
2043 
2044 	/* Disable the parsing clusters until the firmware has been loaded. */
2045 	for (cl = 0; cl < cfg->cfg_nclusters; cl++) {
2046 		val = PKI_RD_8(node, PKI_ICG_CFG(cl));
2047 		val &= ~PKI_ICG_CFG_PENA;
2048 		PKI_WR_8(node, PKI_ICG_CFG(cl), val);
2049 	}
2050 
2051 	val = PKI_RD_8(node, PKI_GBL_PEN);
2052 	val &= ~PKI_GBL_PEN_M;
2053 	val |= PKI_GBL_PEN_L3;
2054 	val |= PKI_GBL_PEN_L4;
2055 	PKI_WR_8(node, PKI_GBL_PEN, val);
2056 
2057 	for (i = 0; i < nitems(ogx_ltypes); i++) {
2058 		val = PKI_RD_8(node, PKI_LTYPE_MAP(i));
2059 		val &= ~0x7;
2060 		val |= ogx_ltypes[i];
2061 		PKI_WR_8(node, PKI_LTYPE_MAP(i), val);
2062 	}
2063 
2064 	while (PKI_RD_8(node, PKI_SFT_RST) & PKI_SFT_RST_BUSY)
2065 		delay(1);
2066 
2067 	val = PKI_RD_8(node, PKI_BUF_CTL);
2068 	val |= PKI_BUF_CTL_PKI_EN;
2069 	PKI_WR_8(node, PKI_BUF_CTL, val);
2070 
2071 	/*
2072 	 * Initialize the Packet Output (PKO) unit.
2073 	 */
2074 
2075 	/* Detach MACs from FIFOs. */
2076 	for (i = 0; i < cfg->cfg_nmacs; i++) {
2077 		val = PKO3_RD_8(node, PKO3_MAC_CFG(i));
2078 		val |= PKO3_MAC_CFG_FIFO_NUM_M;
2079 		PKO3_WR_8(node, PKO3_MAC_CFG(i), val);
2080 	}
2081 
2082 	/* Attach port queues to the NULL FIFO. */
2083 	for (i = 0; i < cfg->cfg_npqs; i++) {
2084 		val = (uint64_t)cfg->cfg_nullmac << PKO3_L1_SQ_TOPOLOGY_LINK_S;
2085 		PKO3_WR_8(node, PKO3_L1_SQ_TOPOLOGY(i), val);
2086 		val = (uint64_t)cfg->cfg_nullmac << PKO3_L1_SQ_SHAPE_LINK_S;
2087 		PKO3_WR_8(node, PKO3_L1_SQ_SHAPE(i), val);
2088 		val = (uint64_t)cfg->cfg_nullmac << PKO3_L1_SQ_LINK_LINK_S;
2089 		PKO3_WR_8(node, PKO3_L1_SQ_LINK(i), val);
2090 	}
2091 
2092 	/* Reset the FIFO groups to use 2.5 KB per each FIFO. */
2093 	for (i = 0; i < cfg->cfg_nfifogrps; i++) {
2094 		val = PKO3_RD_8(node, PKO3_PTGF_CFG(i));
2095 		val &= ~PKO3_PTGF_CFG_SIZE_M;
2096 		val &= ~PKO3_PTGF_CFG_RATE_M;
2097 		val |= 2 << PKO3_PTGF_CFG_RATE_S;
2098 		val |= PKO3_PTGF_CFG_RESET;
2099 		PKO3_WR_8(node, PKO3_PTGF_CFG(i), val);
2100 
2101 		val = PKO3_RD_8(node, PKO3_PTGF_CFG(i));
2102 		val &= ~PKO3_PTGF_CFG_RESET;
2103 		PKO3_WR_8(node, PKO3_PTGF_CFG(i), val);
2104 	}
2105 
2106 	PKO3_WR_8(node, PKO3_DPFI_FLUSH, 0);
2107 
2108 	/* Set PKO aura. */
2109 	val = ((uint64_t)node->node_id << PKO3_DPFI_FPA_AURA_NODE_S) |
2110 	    (OGX_AURA_PKO << PKO3_DPFI_FPA_AURA_AURA_S);
2111 	PKO3_WR_8(node, PKO3_DPFI_FPA_AURA, val);
2112 
2113 	/* Allow PKO to use the FPA. */
2114 	PKO3_WR_8(node, PKO3_DPFI_FPA_ENA, PKO3_DPFI_FPA_ENA_ENABLE);
2115 
2116 	timeout = 1000;
2117 	while (timeout-- > 0) {
2118 		val = PKO3_RD_8(node, PKO3_STATUS);
2119 		if (ISSET(val, PKO3_STATUS_PKO_RDY))
2120 			break;
2121 		delay(1000);
2122 	}
2123 	if (timeout == 0)
2124 		panic("PKO timeout");
2125 
2126 	val = 72 << PKO3_PTF_IOBP_CFG_MAX_RD_SZ_S;
2127 	PKO3_WR_8(node, PKO3_PTF_IOBP_CFG, val);
2128 
2129 	val = 60 << PKO3_PDM_CFG_MIN_PAD_LEN_S;
2130 	PKO3_WR_8(node, PKO3_PDM_CFG, val);
2131 
2132 	PKO3_WR_8(node, PKO3_ENABLE, PKO3_ENABLE_ENABLE);
2133 
2134 	*pnode = node;
2135 	return 0;
2136 
2137 error:
2138 	if (node->node_sso != 0)
2139 		bus_space_unmap(node->node_iot, node->node_sso, SSO_SIZE);
2140 	if (node->node_pko3 != 0)
2141 		bus_space_unmap(node->node_iot, node->node_pko3, PKO3_SIZE);
2142 	if (node->node_pki != 0)
2143 		bus_space_unmap(node->node_iot, node->node_pki, PKI_SIZE);
2144 	if (node->node_fpa3 != 0)
2145 		bus_space_unmap(node->node_iot, node->node_fpa3, FPA3_SIZE);
2146 	node->node_sso = 0;
2147 	node->node_pko3 = 0;
2148 	node->node_pki = 0;
2149 	node->node_fpa3 = 0;
2150 	return 1;
2151 }
2152 
2153 paddr_t
ogx_fpa3_alloc(struct fpa3aura * aura)2154 ogx_fpa3_alloc(struct fpa3aura *aura)
2155 {
2156 	uint64_t op;
2157 
2158 	op = FPA3_LD_IO | FPA3_LD_DID;
2159 	op |= (uint64_t)aura->nodeid << FPA3_LD_NODE_S;
2160 	op |= (uint64_t)aura->auraid << FPA3_LD_AURA_S;
2161 	return octeon_xkphys_read_8(op);
2162 }
2163 
2164 void
ogx_fpa3_free(struct fpa3aura * aura,paddr_t addr)2165 ogx_fpa3_free(struct fpa3aura *aura, paddr_t addr)
2166 {
2167 	uint64_t op;
2168 
2169 	/* Flush pending writes before the block is freed. */
2170 	octeon_syncw();
2171 
2172 	op = FPA3_ST_IO | FPA3_ST_DID_FPA;
2173 	op |= (uint64_t)aura->nodeid << FPA3_ST_NODE_S;
2174 	op |= (uint64_t)aura->auraid << FPA3_ST_AURA_S;
2175 	octeon_xkphys_write_8(op, addr);
2176 }
2177 
2178 void
ogx_fpa3_pool_init(struct ogx_node * node,struct fpa3pool * pool,uint32_t poolid,uint32_t nentries)2179 ogx_fpa3_pool_init(struct ogx_node *node, struct fpa3pool *pool,
2180     uint32_t poolid, uint32_t nentries)
2181 {
2182 	size_t segsize;
2183 	int rsegs;
2184 
2185 	segsize = nentries * 16;
2186 
2187 	pool->nodeid = node->node_id;
2188 	pool->poolid = poolid;
2189 
2190 	if (bus_dmamap_create(node->node_dmat, segsize, 1, segsize, 0,
2191 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &pool->dmap))
2192 		panic("%s: out of memory", __func__);
2193 	if (bus_dmamem_alloc(node->node_dmat, segsize, CACHELINESIZE,
2194 	    0, &pool->dmaseg, 1, &rsegs,
2195 	    BUS_DMA_NOWAIT | BUS_DMA_ZERO))
2196 		panic("%s: out of memory", __func__);
2197 	if (bus_dmamem_map(node->node_dmat, &pool->dmaseg, 1, segsize,
2198 	    &pool->kva, BUS_DMA_NOWAIT | BUS_DMA_COHERENT))
2199 		panic("%s: bus_dmamem_map", __func__);
2200 	if (bus_dmamap_load(node->node_dmat, pool->dmap, pool->kva, segsize,
2201 	    NULL, BUS_DMA_NOWAIT))
2202 		panic("%s: bus_dmamap_load", __func__);
2203 
2204 	/* Disable the pool before setup. */
2205 	FPA3_WR_8(node, FPA3_POOL_CFG(poolid), 0);
2206 
2207 	/* Set permitted address range of stored pointers. */
2208 	FPA3_WR_8(node, FPA3_POOL_START_ADDR(poolid), CACHELINESIZE);
2209 	FPA3_WR_8(node, FPA3_POOL_END_ADDR(poolid), UINT32_MAX);
2210 
2211 	/* Set up the pointer stack. */
2212 	FPA3_WR_8(node, FPA3_POOL_STACK_BASE(poolid), pool->dmaseg.ds_addr);
2213 	FPA3_WR_8(node, FPA3_POOL_STACK_ADDR(poolid), pool->dmaseg.ds_addr);
2214 	FPA3_WR_8(node, FPA3_POOL_STACK_END(poolid), pool->dmaseg.ds_addr +
2215 	    pool->dmaseg.ds_len);
2216 
2217 	/* Re-enable the pool. */
2218 	FPA3_WR_8(node, FPA3_POOL_CFG(poolid), FPA3_POOL_CFG_ENA);
2219 }
2220 
2221 void
ogx_fpa3_aura_init(struct ogx_node * node,struct fpa3aura * aura,uint32_t auraid,struct fpa3pool * pool)2222 ogx_fpa3_aura_init(struct ogx_node *node, struct fpa3aura *aura,
2223     uint32_t auraid, struct fpa3pool *pool)
2224 {
2225 	KASSERT(node->node_id == pool->nodeid);
2226 
2227 	aura->nodeid = pool->nodeid;
2228 	aura->poolid = pool->poolid;
2229 	aura->auraid = auraid;
2230 
2231 	/* Enable pointer counting. */
2232 	FPA3_WR_8(node, FPA3_AURA_CFG(aura->auraid), 0);
2233 	FPA3_WR_8(node, FPA3_AURA_CNT(aura->auraid), 1024);
2234 	FPA3_WR_8(node, FPA3_AURA_CNT_LIMIT(aura->auraid), 1024);
2235 
2236 	/* Set the backend pool. */
2237 	FPA3_WR_8(node, FPA3_AURA_POOL(aura->auraid), aura->poolid);
2238 }
2239 
2240 void
ogx_fpa3_aura_load(struct ogx_node * node,struct fpa3aura * aura,size_t nelem,size_t size)2241 ogx_fpa3_aura_load(struct ogx_node *node, struct fpa3aura *aura, size_t nelem,
2242     size_t size)
2243 {
2244 	paddr_t addr;
2245 	caddr_t kva;
2246 	size_t i;
2247 	size_t totsize;
2248 	int rsegs;
2249 
2250 	KASSERT(size % CACHELINESIZE == 0);
2251 
2252 	if (nelem > SIZE_MAX / size)
2253 		panic("%s: too large allocation", __func__);
2254 	totsize = nelem * size;
2255 
2256 	if (bus_dmamap_create(node->node_dmat, totsize, 1, totsize, 0,
2257 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aura->dmap))
2258 		panic("%s: out of memory", __func__);
2259 	if (bus_dmamem_alloc(node->node_dmat, totsize, CACHELINESIZE, 0,
2260 	    &aura->dmaseg, 1, &rsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO))
2261 		panic("%s: out of memory", __func__);
2262 	if (bus_dmamem_map(node->node_dmat, &aura->dmaseg, rsegs, totsize,
2263 	    &kva, BUS_DMA_NOWAIT | BUS_DMA_COHERENT))
2264 		panic("%s: bus_dmamem_map failed", __func__);
2265 	if (bus_dmamap_load(node->node_dmat, aura->dmap, kva, totsize, NULL,
2266 	    BUS_DMA_NOWAIT))
2267 		panic("%s: bus_dmamap_load failed", __func__);
2268 
2269 	for (i = 0, addr = aura->dmaseg.ds_addr; i < nelem; i++, addr += size)
2270 		ogx_fpa3_free(aura, addr);
2271 }
2272 
2273 int
ogx_node_load_firmware(struct ogx_node * node)2274 ogx_node_load_firmware(struct ogx_node *node)
2275 {
2276 	struct ogx_fwhdr *fw;
2277 	uint8_t *ucode = NULL;
2278 	size_t size = 0;
2279 	uint64_t *imem, val;
2280 	int cl, error = 0, i;
2281 
2282 	rw_enter_write(&node->node_lock);
2283 	if (node->node_flags & NODE_FWREADY)
2284 		goto out;
2285 
2286 	error = loadfirmware("ogx-pki-cluster", &ucode, &size);
2287 	if (error != 0) {
2288 		printf("ogx node%llu: could not load firmware, error %d\n",
2289 		    node->node_id, error);
2290 		goto out;
2291 	}
2292 
2293 	fw = (struct ogx_fwhdr *)ucode;
2294 	if (size < sizeof(*fw) || fw->fw_size != size - sizeof(*fw)) {
2295 		printf("ogx node%llu: invalid firmware\n", node->node_id);
2296 		error = EINVAL;
2297 		goto out;
2298 	}
2299 
2300 	imem = (uint64_t *)(fw + 1);
2301 	for (i = 0; i < fw->fw_size / sizeof(uint64_t); i++)
2302 		PKI_WR_8(node, PKI_IMEM(i), imem[i]);
2303 
2304 	/* Enable the parsing clusters. */
2305 	for (cl = 0; cl < node->node_cfg->cfg_nclusters; cl++) {
2306 		val = PKI_RD_8(node, PKI_ICG_CFG(cl));
2307 		val |= PKI_ICG_CFG_PENA;
2308 		PKI_WR_8(node, PKI_ICG_CFG(cl), val);
2309 	}
2310 
2311 	node->node_flags |= NODE_FWREADY;
2312 
2313 out:
2314 	free(ucode, M_DEVBUF, size);
2315 	rw_exit_write(&node->node_lock);
2316 	return error;
2317 }
2318