1 /* $OpenBSD: if_ogx.c,v 1.6 2021/07/29 14:11:53 visa Exp $ */ 2 3 /* 4 * Copyright (c) 2019-2020 Visa Hankala 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for OCTEON III network processor. 21 */ 22 23 #include "bpfilter.h" 24 #include "kstat.h" 25 26 #include <sys/param.h> 27 #include <sys/systm.h> 28 #include <sys/atomic.h> 29 #include <sys/mutex.h> 30 #include <sys/rwlock.h> 31 #include <sys/device.h> 32 #include <sys/ioctl.h> 33 #include <sys/kstat.h> 34 #include <sys/socket.h> 35 #include <sys/stdint.h> 36 37 #include <net/if.h> 38 #include <net/if_media.h> 39 #include <netinet/in.h> 40 #include <netinet/ip.h> 41 #include <netinet/if_ether.h> 42 43 #if NBPFILTER > 0 44 #include <net/bpf.h> 45 #endif 46 47 #ifdef INET6 48 #include <netinet/ip6.h> 49 #endif 50 51 #include <dev/mii/mii.h> 52 #include <dev/mii/miivar.h> 53 54 #include <dev/ofw/fdt.h> 55 #include <dev/ofw/openfirm.h> 56 57 #include <machine/bus.h> 58 #include <machine/fdt.h> 59 #include <machine/octeonvar.h> 60 #include <machine/octeon_model.h> 61 62 #include <octeon/dev/cn30xxsmivar.h> 63 #include <octeon/dev/ogxreg.h> 64 #include <octeon/dev/ogxvar.h> 65 66 struct ogx_link_ops; 67 68 struct ogx_softc { 69 struct device sc_dev; 70 struct arpcom sc_ac; 71 unsigned int sc_bgxid; 72 unsigned int sc_lmacid; 73 unsigned int sc_ipdport; 74 unsigned int sc_pkomac; 75 unsigned int sc_rxused; 76 unsigned int sc_txfree; 77 78 struct ogx_node *sc_node; 79 unsigned int sc_unit; /* logical unit within node */ 80 81 struct mii_data sc_mii; 82 #define sc_media sc_mii.mii_media 83 struct timeout sc_tick; 84 struct cn30xxsmi_softc *sc_smi; 85 86 struct timeout sc_rxrefill; 87 void *sc_rx_ih; 88 void *sc_tx_ih; 89 90 bus_space_tag_t sc_iot; 91 bus_space_handle_t sc_port_ioh; 92 bus_space_handle_t sc_nexus_ioh; 93 94 struct fpa3aura sc_pkt_aura; 95 const struct ogx_link_ops *sc_link_ops; 96 uint8_t sc_link_duplex; 97 98 struct mutex sc_kstat_mtx; 99 struct timeout sc_kstat_tmo; 100 struct kstat *sc_kstat; 101 uint64_t *sc_counter_vals; 102 bus_space_handle_t sc_pki_stat_ioh; 103 }; 104 105 #define DEVNAME(sc) ((sc)->sc_dev.dv_xname) 106 107 #define L1_QUEUE(sc) ((sc)->sc_unit) 108 #define L2_QUEUE(sc) ((sc)->sc_unit) 109 #define L3_QUEUE(sc) ((sc)->sc_unit) 110 #define L4_QUEUE(sc) ((sc)->sc_unit) 111 #define L5_QUEUE(sc) ((sc)->sc_unit) 112 #define DESC_QUEUE(sc) ((sc)->sc_unit) 113 114 #define PORT_FIFO(sc) ((sc)->sc_unit) /* PKO FIFO */ 115 #define PORT_GROUP_RX(sc) ((sc)->sc_unit * 2) /* SSO group for Rx */ 116 #define PORT_GROUP_TX(sc) ((sc)->sc_unit * 2 + 1) /* SSO group for Tx */ 117 #define PORT_MAC(sc) ((sc)->sc_pkomac) 118 #define PORT_PKIND(sc) ((sc)->sc_unit) 119 #define PORT_QPG(sc) ((sc)->sc_unit) 120 #define PORT_STYLE(sc) ((sc)->sc_unit) 121 122 struct ogx_link_ops { 123 const char *link_type; 124 unsigned int link_fifo_speed; /* in Mbps */ 125 /* Initialize link. */ 126 int (*link_init)(struct ogx_softc *); 127 /* Deinitialize link. */ 128 void (*link_down)(struct ogx_softc *); 129 /* Change link parameters. */ 130 void (*link_change)(struct ogx_softc *); 131 /* Query link status. Returns non-zero if status has changed. */ 132 int (*link_status)(struct ogx_softc *); 133 }; 134 135 struct ogx_fifo_group { 136 unsigned int fg_inited; 137 unsigned int fg_speed; 138 }; 139 140 struct ogx_config { 141 unsigned int cfg_nclusters; /* number of parsing clusters */ 142 unsigned int cfg_nfifogrps; /* number of FIFO groups */ 143 unsigned int cfg_nmacs; /* number of MACs */ 144 unsigned int cfg_npqs; /* number of port queues */ 145 unsigned int cfg_npkolvl; /* number of PKO Lx levels */ 146 unsigned int cfg_nullmac; /* index of NULL MAC */ 147 }; 148 149 struct ogx_node { 150 bus_dma_tag_t node_dmat; 151 bus_space_tag_t node_iot; 152 bus_space_handle_t node_fpa3; 153 bus_space_handle_t node_pki; 154 bus_space_handle_t node_pko3; 155 bus_space_handle_t node_sso; 156 157 struct fpa3pool node_pko_pool; 158 struct fpa3pool node_pkt_pool; 159 struct fpa3pool node_sso_pool; 160 struct fpa3aura node_pko_aura; 161 struct fpa3aura node_sso_aura; 162 163 uint64_t node_id; 164 unsigned int node_nclusters; 165 unsigned int node_nunits; 166 struct ogx_fifo_group node_fifogrp[8]; 167 const struct ogx_config *node_cfg; 168 169 struct rwlock node_lock; 170 unsigned int node_flags; 171 #define NODE_INITED 0x01 /* node initialized */ 172 #define NODE_FWREADY 0x02 /* node firmware ready */ 173 }; 174 175 struct ogx_fwhdr { 176 char fw_version[8]; 177 uint64_t fw_size; 178 }; 179 180 #define BGX_PORT_SIZE 0x100000 181 182 #define PORT_RD_8(sc, reg) \ 183 bus_space_read_8((sc)->sc_iot, (sc)->sc_port_ioh, (reg)) 184 #define PORT_WR_8(sc, reg, val) \ 185 bus_space_write_8((sc)->sc_iot, (sc)->sc_port_ioh, (reg), (val)) 186 187 #define NEXUS_RD_8(sc, reg) \ 188 bus_space_read_8((sc)->sc_iot, (sc)->sc_nexus_ioh, (reg)) 189 #define NEXUS_WR_8(sc, reg, val) \ 190 bus_space_write_8((sc)->sc_iot, (sc)->sc_nexus_ioh, (reg), (val)) 191 192 #define FPA3_RD_8(node, reg) \ 193 bus_space_read_8((node)->node_iot, (node)->node_fpa3, (reg)) 194 #define FPA3_WR_8(node, reg, val) \ 195 bus_space_write_8((node)->node_iot, (node)->node_fpa3, (reg), (val)) 196 #define PKI_RD_8(node, reg) \ 197 bus_space_read_8((node)->node_iot, (node)->node_pki, (reg)) 198 #define PKI_WR_8(node, reg, val) \ 199 bus_space_write_8((node)->node_iot, (node)->node_pki, (reg), (val)) 200 #define PKO3_RD_8(node, reg) \ 201 bus_space_read_8((node)->node_iot, (node)->node_pko3, (reg)) 202 #define PKO3_WR_8(node, reg, val) \ 203 bus_space_write_8((node)->node_iot, (node)->node_pko3, (reg), (val)) 204 #define SSO_RD_8(node, reg) \ 205 bus_space_read_8((node)->node_iot, (node)->node_sso, (reg)) 206 #define SSO_WR_8(node, reg, val) \ 207 bus_space_write_8((node)->node_iot, (node)->node_sso, (reg), (val)) 208 209 int ogx_match(struct device *, void *, void *); 210 void ogx_attach(struct device *, struct device *, void *); 211 void ogx_defer(struct device *); 212 213 int ogx_ioctl(struct ifnet *, u_long, caddr_t); 214 void ogx_start(struct ifqueue *); 215 int ogx_send_mbuf(struct ogx_softc *, struct mbuf *); 216 u_int ogx_load_mbufs(struct ogx_softc *, unsigned int); 217 u_int ogx_unload_mbufs(struct ogx_softc *); 218 219 void ogx_media_status(struct ifnet *, struct ifmediareq *); 220 int ogx_media_change(struct ifnet *); 221 int ogx_mii_readreg(struct device *, int, int); 222 void ogx_mii_writereg(struct device *, int, int, int); 223 void ogx_mii_statchg(struct device *); 224 225 int ogx_init(struct ogx_softc *); 226 void ogx_down(struct ogx_softc *); 227 void ogx_iff(struct ogx_softc *); 228 void ogx_rxrefill(void *); 229 int ogx_rxintr(void *); 230 int ogx_txintr(void *); 231 void ogx_tick(void *); 232 233 #if NKSTAT > 0 234 #define OGX_KSTAT_TICK_SECS 600 235 void ogx_kstat_attach(struct ogx_softc *); 236 int ogx_kstat_read(struct kstat *); 237 void ogx_kstat_start(struct ogx_softc *); 238 void ogx_kstat_stop(struct ogx_softc *); 239 void ogx_kstat_tick(void *); 240 #endif 241 242 int ogx_node_init(struct ogx_node **, bus_dma_tag_t, bus_space_tag_t); 243 int ogx_node_load_firmware(struct ogx_node *); 244 void ogx_fpa3_aura_init(struct ogx_node *, struct fpa3aura *, uint32_t, 245 struct fpa3pool *); 246 void ogx_fpa3_aura_load(struct ogx_node *, struct fpa3aura *, size_t, 247 size_t); 248 paddr_t ogx_fpa3_alloc(struct fpa3aura *); 249 void ogx_fpa3_free(struct fpa3aura *, paddr_t); 250 void ogx_fpa3_pool_init(struct ogx_node *, struct fpa3pool *, uint32_t, 251 uint32_t); 252 253 int ogx_sgmii_link_init(struct ogx_softc *); 254 void ogx_sgmii_link_down(struct ogx_softc *); 255 void ogx_sgmii_link_change(struct ogx_softc *); 256 int ogx_sgmii_link_status(struct ogx_softc *); 257 258 static inline paddr_t 259 ogx_kvtophys(vaddr_t kva) 260 { 261 KASSERT(IS_XKPHYS(kva)); 262 return XKPHYS_TO_PHYS(kva); 263 } 264 #define KVTOPHYS(addr) ogx_kvtophys((vaddr_t)(addr)) 265 266 const struct cfattach ogx_ca = { 267 sizeof(struct ogx_softc), ogx_match, ogx_attach 268 }; 269 270 struct cfdriver ogx_cd = { 271 NULL, "ogx", DV_IFNET 272 }; 273 274 const struct ogx_config ogx_cn73xx_config = { 275 .cfg_nclusters = 2, 276 .cfg_nfifogrps = 4, 277 .cfg_nmacs = 14, 278 .cfg_npqs = 16, 279 .cfg_npkolvl = 3, 280 .cfg_nullmac = 15, 281 }; 282 283 const struct ogx_config ogx_cn78xx_config = { 284 .cfg_nclusters = 4, 285 .cfg_nfifogrps = 8, 286 .cfg_nmacs = 28, 287 .cfg_npqs = 32, 288 .cfg_npkolvl = 5, 289 .cfg_nullmac = 28, 290 }; 291 292 const struct ogx_link_ops ogx_sgmii_link_ops = { 293 .link_type = "SGMII", 294 .link_fifo_speed = 1000, 295 .link_init = ogx_sgmii_link_init, 296 .link_down = ogx_sgmii_link_down, 297 .link_change = ogx_sgmii_link_change, 298 }; 299 300 const struct ogx_link_ops ogx_xfi_link_ops = { 301 .link_type = "XFI", 302 .link_fifo_speed = 10000, 303 }; 304 305 #define BELTYPE_NONE 0x00 306 #define BELTYPE_MISC 0x01 307 #define BELTYPE_IPv4 0x02 308 #define BELTYPE_IPv6 0x03 309 #define BELTYPE_TCP 0x04 310 #define BELTYPE_UDP 0x05 311 312 static const unsigned int ogx_ltypes[] = { 313 BELTYPE_NONE, /* 0x00 */ 314 BELTYPE_MISC, /* 0x01 Ethernet */ 315 BELTYPE_MISC, /* 0x02 VLAN */ 316 BELTYPE_NONE, /* 0x03 */ 317 BELTYPE_NONE, /* 0x04 */ 318 BELTYPE_MISC, /* 0x05 SNAP */ 319 BELTYPE_MISC, /* 0x06 ARP */ 320 BELTYPE_MISC, /* 0x07 RARP */ 321 BELTYPE_IPv4, /* 0x08 IPv4 */ 322 BELTYPE_IPv4, /* 0x09 IPv4 options */ 323 BELTYPE_IPv6, /* 0x0a IPv6 */ 324 BELTYPE_IPv6, /* 0x0b IPv6 options */ 325 BELTYPE_MISC, /* 0x0c ESP */ 326 BELTYPE_MISC, /* 0x0d IP fragment */ 327 BELTYPE_MISC, /* 0x0e IPcomp */ 328 BELTYPE_NONE, /* 0x0f */ 329 BELTYPE_TCP, /* 0x10 TCP */ 330 BELTYPE_UDP, /* 0x11 UDP */ 331 BELTYPE_MISC, /* 0x12 SCTP */ 332 BELTYPE_UDP, /* 0x13 UDP VXLAN */ 333 BELTYPE_MISC, /* 0x14 GRE */ 334 BELTYPE_MISC, /* 0x15 NVGRE */ 335 BELTYPE_MISC, /* 0x16 GTP */ 336 BELTYPE_UDP, /* 0x17 UDP Geneve */ 337 BELTYPE_NONE, /* 0x18 */ 338 BELTYPE_NONE, /* 0x19 */ 339 BELTYPE_NONE, /* 0x1a */ 340 BELTYPE_NONE, /* 0x1b */ 341 BELTYPE_MISC, /* 0x1c software */ 342 BELTYPE_MISC, /* 0x1d software */ 343 BELTYPE_MISC, /* 0x1e software */ 344 BELTYPE_MISC /* 0x1f software */ 345 }; 346 347 #define OGX_POOL_SSO 0 348 #define OGX_POOL_PKO 1 349 #define OGX_POOL_PKT 2 350 351 #define OGX_AURA_SSO 0 352 #define OGX_AURA_PKO 1 353 #define OGX_AURA_PKT(sc) ((sc)->sc_unit + 2) 354 355 struct ogx_node ogx_node; 356 357 int 358 ogx_match(struct device *parent, void *match, void *aux) 359 { 360 return 1; 361 } 362 363 void 364 ogx_attach(struct device *parent, struct device *self, void *aux) 365 { 366 const struct ogx_config *cfg; 367 struct ogx_fifo_group *fifogrp; 368 struct ogx_node *node; 369 struct ogx_attach_args *oaa = aux; 370 struct ogx_softc *sc = (struct ogx_softc *)self; 371 struct ifnet *ifp = &sc->sc_ac.ac_if; 372 uint64_t lmac_type, lut_index, val; 373 uint32_t lmac; 374 int fgindex = PORT_FIFO(sc) >> 2; 375 int cl, phy_addr, phy_handle; 376 377 if (ogx_node_init(&node, oaa->oaa_dmat, oaa->oaa_iot)) { 378 printf(": node init failed\n"); 379 return; 380 } 381 cfg = node->node_cfg; 382 383 sc->sc_node = node; 384 sc->sc_unit = node->node_nunits++; 385 386 phy_handle = OF_getpropint(oaa->oaa_node, "phy-handle", 0); 387 if (phy_handle == 0) { 388 printf(": no phy-handle\n"); 389 return; 390 } 391 if (cn30xxsmi_get_phy(phy_handle, 0, &sc->sc_smi, &phy_addr)) { 392 printf(": no phy found\n"); 393 return; 394 } 395 396 lmac = OF_getpropint(oaa->oaa_node, "reg", UINT32_MAX); 397 if (lmac == UINT32_MAX) { 398 printf(": no reg property\n"); 399 return; 400 } 401 402 sc->sc_bgxid = oaa->oaa_bgxid; 403 sc->sc_lmacid = lmac; 404 sc->sc_ipdport = sc->sc_bgxid * 0x100 + lmac * 0x10 + 0x800; 405 sc->sc_pkomac = sc->sc_bgxid * 4 + lmac + 2; 406 407 if (OF_getproplen(oaa->oaa_node, "local-mac-address") != 408 ETHER_ADDR_LEN) { 409 printf(": no MAC address\n"); 410 return; 411 } 412 OF_getprop(oaa->oaa_node, "local-mac-address", sc->sc_ac.ac_enaddr, 413 ETHER_ADDR_LEN); 414 415 sc->sc_iot = oaa->oaa_iot; 416 sc->sc_nexus_ioh = oaa->oaa_ioh; 417 if (bus_space_subregion(sc->sc_iot, oaa->oaa_ioh, 418 sc->sc_lmacid * BGX_PORT_SIZE, BGX_PORT_SIZE, &sc->sc_port_ioh)) { 419 printf(": can't map IO subregion\n"); 420 return; 421 } 422 423 val = PORT_RD_8(sc, BGX_CMR_RX_ID_MAP); 424 val &= ~BGX_CMR_RX_ID_MAP_RID_M; 425 val &= ~BGX_CMR_RX_ID_MAP_PKND_M; 426 val |= (uint64_t)(sc->sc_bgxid * 4 + 2 + sc->sc_lmacid) << 427 BGX_CMR_RX_ID_MAP_RID_S; 428 val |= (uint64_t)PORT_PKIND(sc) << BGX_CMR_RX_ID_MAP_PKND_S; 429 PORT_WR_8(sc, BGX_CMR_RX_ID_MAP, val); 430 431 val = PORT_RD_8(sc, BGX_CMR_CHAN_MSK_AND); 432 val |= 0xffffULL << (sc->sc_lmacid * 16); 433 PORT_WR_8(sc, BGX_CMR_CHAN_MSK_AND, val); 434 435 val = PORT_RD_8(sc, BGX_CMR_CHAN_MSK_OR); 436 val |= 0xffffULL << (sc->sc_lmacid * 16); 437 PORT_WR_8(sc, BGX_CMR_CHAN_MSK_OR, val); 438 439 sc->sc_rx_ih = octeon_intr_establish(0x61000 | PORT_GROUP_RX(sc), 440 IPL_NET | IPL_MPSAFE, ogx_rxintr, sc, DEVNAME(sc)); 441 if (sc->sc_rx_ih == NULL) { 442 printf(": could not establish Rx interrupt\n"); 443 return; 444 } 445 sc->sc_tx_ih = octeon_intr_establish(0x61000 | PORT_GROUP_TX(sc), 446 IPL_NET | IPL_MPSAFE, ogx_txintr, sc, DEVNAME(sc)); 447 if (sc->sc_tx_ih == NULL) { 448 printf(": could not establish Tx interrupt\n"); 449 return; 450 } 451 452 val = PORT_RD_8(sc, BGX_CMR_CONFIG); 453 lmac_type = (val & BGX_CMR_CONFIG_LMAC_TYPE_M) >> 454 BGX_CMR_CONFIG_LMAC_TYPE_S; 455 switch (lmac_type) { 456 case 0: 457 sc->sc_link_ops = &ogx_sgmii_link_ops; 458 break; 459 default: 460 printf(": unhandled LMAC type %llu\n", lmac_type); 461 return; 462 } 463 printf(": %s", sc->sc_link_ops->link_type); 464 465 printf(", address %s", ether_sprintf(sc->sc_ac.ac_enaddr)); 466 467 ogx_fpa3_aura_init(node, &sc->sc_pkt_aura, OGX_AURA_PKT(sc), 468 &node->node_pkt_pool); 469 470 sc->sc_rxused = 128; 471 sc->sc_txfree = 128; 472 473 timeout_set(&sc->sc_rxrefill, ogx_rxrefill, sc); 474 timeout_set(&sc->sc_tick, ogx_tick, sc); 475 476 printf("\n"); 477 478 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 479 ifp->if_softc = sc; 480 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST; 481 ifp->if_xflags |= IFXF_MPSAFE; 482 ifp->if_ioctl = ogx_ioctl; 483 ifp->if_qstart = ogx_start; 484 ifp->if_capabilities = IFCAP_CSUM_IPv4 | 485 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 | 486 IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6; 487 488 sc->sc_mii.mii_ifp = ifp; 489 sc->sc_mii.mii_readreg = ogx_mii_readreg; 490 sc->sc_mii.mii_writereg = ogx_mii_writereg; 491 sc->sc_mii.mii_statchg = ogx_mii_statchg; 492 ifmedia_init(&sc->sc_media, 0, ogx_media_change, ogx_media_status); 493 494 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, phy_addr, 495 MII_OFFSET_ANY, MIIF_NOISOLATE); 496 if (LIST_EMPTY(&sc->sc_mii.mii_phys)) { 497 printf("%s: no PHY found\n", DEVNAME(sc)); 498 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 499 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_MANUAL); 500 } else { 501 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 502 503 timeout_add_sec(&sc->sc_tick, 1); 504 } 505 506 /* 507 * Set up the PKI for this port. 508 */ 509 510 val = (uint64_t)PORT_GROUP_RX(sc) << PKI_QPG_TBL_GRP_OK_S; 511 val |= (uint64_t)PORT_GROUP_RX(sc) << PKI_QPG_TBL_GRP_BAD_S; 512 val |= OGX_AURA_PKT(sc) << PKI_QPG_TBL_LAURA_S; 513 PKI_WR_8(node, PKI_QPG_TBL(PORT_QPG(sc)), val); 514 515 for (cl = 0; cl < cfg->cfg_nclusters; cl++) { 516 val = (uint64_t)PORT_QPG(sc) << PKI_CL_STYLE_CFG_QPG_BASE_S; 517 PKI_WR_8(node, PKI_CL_STYLE_CFG(cl, PORT_STYLE(sc)), val); 518 PKI_WR_8(node, PKI_CL_STYLE_CFG2(cl, PORT_STYLE(sc)), 0); 519 PKI_WR_8(node, PKI_CL_STYLE_ALG(cl, PORT_STYLE(sc)), 1u << 31); 520 521 val = PKI_RD_8(node, PKI_CL_PKIND_STYLE(cl, PORT_PKIND(sc))); 522 val &= ~PKI_CL_PKIND_STYLE_PM_M; 523 val &= ~PKI_CL_PKIND_STYLE_STYLE_M; 524 val |= PORT_STYLE(sc) << PKI_CL_PKIND_STYLE_STYLE_S; 525 PKI_WR_8(node, PKI_CL_PKIND_STYLE(cl, PORT_PKIND(sc)), val); 526 } 527 528 val = 5ULL << PKI_STYLE_BUF_FIRST_SKIP_S; 529 val |= ((MCLBYTES - CACHELINESIZE) / sizeof(uint64_t)) << 530 PKI_STYLE_BUF_MB_SIZE_S; 531 PKI_WR_8(node, PKI_STYLE_BUF(PORT_STYLE(sc)), val); 532 533 /* 534 * Set up output queues from the descriptor queue to the port queue. 535 * 536 * The hardware implements a multilevel hierarchy of queues 537 * with configurable priorities. 538 * This driver uses a simple topology where there is one queue 539 * on each level. 540 * 541 * CN73xx: DQ -> L3 -> L2 -> port 542 * CN78xx: DQ -> L5 -> L4 -> L3 -> L2 -> port 543 */ 544 545 /* Map channel to queue L2. */ 546 val = PKO3_RD_8(node, PKO3_L3_L2_SQ_CHANNEL(L2_QUEUE(sc))); 547 val &= ~PKO3_L3_L2_SQ_CHANNEL_CC_ENABLE; 548 val &= ~PKO3_L3_L2_SQ_CHANNEL_M; 549 val |= (uint64_t)sc->sc_ipdport << PKO3_L3_L2_SQ_CHANNEL_S; 550 PKO3_WR_8(node, PKO3_L3_L2_SQ_CHANNEL(L2_QUEUE(sc)), val); 551 552 val = PKO3_RD_8(node, PKO3_MAC_CFG(PORT_MAC(sc))); 553 val &= ~PKO3_MAC_CFG_MIN_PAD_ENA; 554 val &= ~PKO3_MAC_CFG_FCS_ENA; 555 val &= ~PKO3_MAC_CFG_FCS_SOP_OFF_M; 556 val &= ~PKO3_MAC_CFG_FIFO_NUM_M; 557 val |= PORT_FIFO(sc) << PKO3_MAC_CFG_FIFO_NUM_S; 558 PKO3_WR_8(node, PKO3_MAC_CFG(PORT_MAC(sc)), val); 559 560 val = PKO3_RD_8(node, PKO3_MAC_CFG(PORT_MAC(sc))); 561 val &= ~PKO3_MAC_CFG_SKID_MAX_CNT_M; 562 PKO3_WR_8(node, PKO3_MAC_CFG(PORT_MAC(sc)), val); 563 564 PKO3_WR_8(node, PKO3_MCI0_MAX_CRED(PORT_MAC(sc)), 0); 565 PKO3_WR_8(node, PKO3_MCI1_MAX_CRED(PORT_MAC(sc)), 2560 / 16); 566 567 /* Map the port queue to the MAC. */ 568 569 val = (uint64_t)PORT_MAC(sc) << PKO3_L1_SQ_TOPOLOGY_LINK_S; 570 PKO3_WR_8(node, PKO3_L1_SQ_TOPOLOGY(L1_QUEUE(sc)), val); 571 572 val = (uint64_t)PORT_MAC(sc) << PKO3_L1_SQ_SHAPE_LINK_S; 573 PKO3_WR_8(node, PKO3_L1_SQ_SHAPE(L1_QUEUE(sc)), val); 574 575 val = (uint64_t)PORT_MAC(sc) << PKO3_L1_SQ_LINK_LINK_S; 576 PKO3_WR_8(node, PKO3_L1_SQ_LINK(L1_QUEUE(sc)), val); 577 578 /* L1 / port queue */ 579 580 val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S; 581 PKO3_WR_8(node, PKO3_L1_SQ_SCHEDULE(L1_QUEUE(sc)), val); 582 583 val = PKO3_RD_8(node, PKO3_L1_SQ_TOPOLOGY(L1_QUEUE(sc))); 584 val &= ~PKO3_L1_SQ_TOPOLOGY_PRIO_ANCHOR_M; 585 val &= ~PKO3_L1_SQ_TOPOLOGY_RR_PRIO_M; 586 val |= (uint64_t)L2_QUEUE(sc) << PKO3_L1_SQ_TOPOLOGY_PRIO_ANCHOR_S; 587 val |= (uint64_t)0xf << PKO3_L1_SQ_TOPOLOGY_RR_PRIO_S; 588 PKO3_WR_8(node, PKO3_L1_SQ_TOPOLOGY(L1_QUEUE(sc)), val); 589 590 /* L2 */ 591 592 val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S; 593 PKO3_WR_8(node, PKO3_L2_SQ_SCHEDULE(L2_QUEUE(sc)), val); 594 595 val = PKO3_RD_8(node, PKO3_L2_SQ_TOPOLOGY(L2_QUEUE(sc))); 596 val &= ~PKO3_L2_SQ_TOPOLOGY_PRIO_ANCHOR_M; 597 val &= ~PKO3_L2_SQ_TOPOLOGY_PARENT_M; 598 val &= ~PKO3_L2_SQ_TOPOLOGY_RR_PRIO_M; 599 val |= (uint64_t)L3_QUEUE(sc) << PKO3_L2_SQ_TOPOLOGY_PRIO_ANCHOR_S; 600 val |= (uint64_t)L1_QUEUE(sc) << PKO3_L2_SQ_TOPOLOGY_PARENT_S; 601 val |= (uint64_t)0xf << PKO3_L2_SQ_TOPOLOGY_RR_PRIO_S; 602 PKO3_WR_8(node, PKO3_L2_SQ_TOPOLOGY(L2_QUEUE(sc)), val); 603 604 switch (cfg->cfg_npkolvl) { 605 case 3: 606 /* L3 */ 607 608 val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S; 609 PKO3_WR_8(node, PKO3_L3_SQ_SCHEDULE(L3_QUEUE(sc)), val); 610 611 val = PKO3_RD_8(node, PKO3_L3_SQ_TOPOLOGY(L3_QUEUE(sc))); 612 val &= ~PKO3_L3_SQ_TOPOLOGY_PRIO_ANCHOR_M; 613 val &= ~PKO3_L3_SQ_TOPOLOGY_PARENT_M; 614 val &= ~PKO3_L3_SQ_TOPOLOGY_RR_PRIO_M; 615 val |= (uint64_t)DESC_QUEUE(sc) << 616 PKO3_L3_SQ_TOPOLOGY_PRIO_ANCHOR_S; 617 val |= (uint64_t)L2_QUEUE(sc) << PKO3_L3_SQ_TOPOLOGY_PARENT_S; 618 val |= (uint64_t)0xf << PKO3_L3_SQ_TOPOLOGY_RR_PRIO_S; 619 PKO3_WR_8(node, PKO3_L3_SQ_TOPOLOGY(L3_QUEUE(sc)), val); 620 621 /* Descriptor queue */ 622 623 val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S; 624 PKO3_WR_8(node, PKO3_DQ_SCHEDULE(DESC_QUEUE(sc)), val); 625 626 val = (uint64_t)L3_QUEUE(sc) << PKO3_DQ_TOPOLOGY_PARENT_S; 627 PKO3_WR_8(node, PKO3_DQ_TOPOLOGY(DESC_QUEUE(sc)), val); 628 629 break; 630 631 case 5: 632 /* L3 */ 633 634 val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S; 635 PKO3_WR_8(node, PKO3_L3_SQ_SCHEDULE(L3_QUEUE(sc)), val); 636 637 val = PKO3_RD_8(node, PKO3_L3_SQ_TOPOLOGY(L3_QUEUE(sc))); 638 val &= ~PKO3_L3_SQ_TOPOLOGY_PRIO_ANCHOR_M; 639 val &= ~PKO3_L3_SQ_TOPOLOGY_PARENT_M; 640 val &= ~PKO3_L3_SQ_TOPOLOGY_RR_PRIO_M; 641 val |= (uint64_t)L4_QUEUE(sc) << 642 PKO3_L3_SQ_TOPOLOGY_PRIO_ANCHOR_S; 643 val |= (uint64_t)L2_QUEUE(sc) << PKO3_L3_SQ_TOPOLOGY_PARENT_S; 644 val |= (uint64_t)0xf << PKO3_L3_SQ_TOPOLOGY_RR_PRIO_S; 645 PKO3_WR_8(node, PKO3_L3_SQ_TOPOLOGY(L3_QUEUE(sc)), val); 646 647 /* L4 */ 648 649 val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S; 650 PKO3_WR_8(node, PKO3_L4_SQ_SCHEDULE(L4_QUEUE(sc)), val); 651 652 val = PKO3_RD_8(node, PKO3_L4_SQ_TOPOLOGY(L4_QUEUE(sc))); 653 val &= ~PKO3_L4_SQ_TOPOLOGY_PRIO_ANCHOR_M; 654 val &= ~PKO3_L4_SQ_TOPOLOGY_PARENT_M; 655 val &= ~PKO3_L4_SQ_TOPOLOGY_RR_PRIO_M; 656 val |= (uint64_t)L5_QUEUE(sc) << 657 PKO3_L4_SQ_TOPOLOGY_PRIO_ANCHOR_S; 658 val |= (uint64_t)L3_QUEUE(sc) << PKO3_L4_SQ_TOPOLOGY_PARENT_S; 659 val |= (uint64_t)0xf << PKO3_L4_SQ_TOPOLOGY_RR_PRIO_S; 660 PKO3_WR_8(node, PKO3_L4_SQ_TOPOLOGY(L4_QUEUE(sc)), val); 661 662 /* L5 */ 663 664 val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S; 665 PKO3_WR_8(node, PKO3_L5_SQ_SCHEDULE(L5_QUEUE(sc)), val); 666 667 val = PKO3_RD_8(node, PKO3_L5_SQ_TOPOLOGY(L5_QUEUE(sc))); 668 val &= ~PKO3_L5_SQ_TOPOLOGY_PRIO_ANCHOR_M; 669 val &= ~PKO3_L5_SQ_TOPOLOGY_PARENT_M; 670 val &= ~PKO3_L5_SQ_TOPOLOGY_RR_PRIO_M; 671 val |= (uint64_t)DESC_QUEUE(sc) << 672 PKO3_L5_SQ_TOPOLOGY_PRIO_ANCHOR_S; 673 val |= (uint64_t)L4_QUEUE(sc) << PKO3_L5_SQ_TOPOLOGY_PARENT_S; 674 val |= (uint64_t)0xf << PKO3_L5_SQ_TOPOLOGY_RR_PRIO_S; 675 PKO3_WR_8(node, PKO3_L5_SQ_TOPOLOGY(L5_QUEUE(sc)), val); 676 677 /* Descriptor queue */ 678 679 val = (uint64_t)0x10 << PKO3_LX_SQ_SCHEDULE_RR_QUANTUM_S; 680 PKO3_WR_8(node, PKO3_DQ_SCHEDULE(DESC_QUEUE(sc)), val); 681 682 val = (uint64_t)L5_QUEUE(sc) << PKO3_DQ_TOPOLOGY_PARENT_S; 683 PKO3_WR_8(node, PKO3_DQ_TOPOLOGY(DESC_QUEUE(sc)), val); 684 685 break; 686 687 default: 688 printf(": unhandled number of PKO levels (%u)\n", 689 cfg->cfg_npkolvl); 690 return; 691 } 692 693 /* Descriptor queue, common part */ 694 695 PKO3_WR_8(node, PKO3_DQ_WM_CTL(DESC_QUEUE(sc)), PKO3_DQ_WM_CTL_KIND); 696 697 val = PKO3_RD_8(node, PKO3_PDM_DQ_MINPAD(DESC_QUEUE(sc))); 698 val &= ~PKO3_PDM_DQ_MINPAD_MINPAD; 699 PKO3_WR_8(node, PKO3_PDM_DQ_MINPAD(DESC_QUEUE(sc)), val); 700 701 lut_index = sc->sc_bgxid * 0x40 + lmac * 0x10; 702 val = PKO3_LUT_VALID | (L1_QUEUE(sc) << PKO3_LUT_PQ_IDX_S) | 703 (L2_QUEUE(sc) << PKO3_LUT_QUEUE_NUM_S); 704 PKO3_WR_8(node, PKO3_LUT(lut_index), val); 705 706 #if NKSTAT > 0 707 ogx_kstat_attach(sc); 708 #endif 709 710 fifogrp = &node->node_fifogrp[fgindex]; 711 fifogrp->fg_speed += sc->sc_link_ops->link_fifo_speed; 712 713 /* 714 * Defer the rest of the initialization so that FIFO groups 715 * can be configured properly. 716 */ 717 config_defer(&sc->sc_dev, ogx_defer); 718 } 719 720 void 721 ogx_defer(struct device *dev) 722 { 723 struct ogx_fifo_group *fifogrp; 724 struct ogx_softc *sc = (struct ogx_softc *)dev; 725 struct ogx_node *node = sc->sc_node; 726 struct ifnet *ifp = &sc->sc_ac.ac_if; 727 uint64_t grprate, val; 728 int fgindex = PORT_FIFO(sc) >> 2; 729 730 fifogrp = &node->node_fifogrp[fgindex]; 731 if (fifogrp->fg_inited == 0) { 732 /* Adjust the total rate of the fifo group. */ 733 grprate = 0; 734 while (fifogrp->fg_speed > (6250 << grprate)) 735 grprate++; 736 if (grprate > 5) 737 grprate = 5; 738 739 val = PKO3_RD_8(node, PKO3_PTGF_CFG(fgindex)); 740 val &= ~PKO3_PTGF_CFG_RATE_M; 741 val |= grprate << PKO3_PTGF_CFG_RATE_S; 742 PKO3_WR_8(node, PKO3_PTGF_CFG(fgindex), val); 743 744 fifogrp->fg_inited = 1; 745 } 746 747 if_attach(ifp); 748 ether_ifattach(ifp); 749 } 750 751 int 752 ogx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 753 { 754 struct ogx_softc *sc = ifp->if_softc; 755 struct ifreq *ifr = (struct ifreq *)data; 756 int error = 0; 757 int s; 758 759 s = splnet(); 760 761 switch (cmd) { 762 case SIOCSIFADDR: 763 ifp->if_flags |= IFF_UP; 764 /* FALLTHROUGH */ 765 766 case SIOCSIFFLAGS: 767 if (ISSET(ifp->if_flags, IFF_UP)) { 768 if (ISSET(ifp->if_flags, IFF_RUNNING)) 769 error = ENETRESET; 770 else 771 error = ogx_init(sc); 772 } else { 773 if (ISSET(ifp->if_flags, IFF_RUNNING)) 774 ogx_down(sc); 775 } 776 break; 777 778 case SIOCGIFMEDIA: 779 case SIOCSIFMEDIA: 780 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 781 break; 782 783 default: 784 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 785 break; 786 } 787 788 if (error == ENETRESET) { 789 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == 790 (IFF_UP | IFF_RUNNING)) 791 ogx_iff(sc); 792 error = 0; 793 } 794 795 splx(s); 796 797 return error; 798 } 799 800 int 801 ogx_init(struct ogx_softc *sc) 802 { 803 struct ogx_node *node = sc->sc_node; 804 struct ifnet *ifp = &sc->sc_ac.ac_if; 805 uint64_t op; 806 int error; 807 808 error = ogx_node_load_firmware(node); 809 if (error != 0) 810 return error; 811 812 #if NKSTAT > 0 813 ogx_kstat_start(sc); 814 #endif 815 816 ogx_iff(sc); 817 818 SSO_WR_8(sc->sc_node, SSO_GRP_INT_THR(PORT_GROUP_RX(sc)), 1); 819 SSO_WR_8(sc->sc_node, SSO_GRP_INT_THR(PORT_GROUP_TX(sc)), 1); 820 821 sc->sc_link_ops->link_init(sc); 822 if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) 823 mii_mediachg(&sc->sc_mii); 824 825 /* Open the descriptor queue. */ 826 op = PKO3_LD_IO | PKO3_LD_DID; 827 op |= node->node_id << PKO3_LD_NODE_S; 828 op |= PKO3_DQOP_OPEN << PKO3_LD_OP_S; 829 op |= DESC_QUEUE(sc) << PKO3_LD_DQ_S; 830 (void)octeon_xkphys_read_8(op); 831 832 ifp->if_flags |= IFF_RUNNING; 833 ifq_restart(&ifp->if_snd); 834 835 timeout_add(&sc->sc_rxrefill, 1); 836 timeout_add_sec(&sc->sc_tick, 1); 837 838 return 0; 839 } 840 841 void 842 ogx_down(struct ogx_softc *sc) 843 { 844 struct ifnet *ifp = &sc->sc_ac.ac_if; 845 struct ogx_node *node = sc->sc_node; 846 uint64_t op, val; 847 unsigned int nused; 848 849 CLR(ifp->if_flags, IFF_RUNNING); 850 851 /* Drain the descriptor queue. */ 852 val = PKO3_LX_SQ_SW_XOFF_DRAIN; 853 val |= PKO3_LX_SQ_SW_XOFF_DRAIN_NULL_LINK; 854 PKO3_WR_8(node, PKO3_DQ_SW_XOFF(DESC_QUEUE(sc)), val); 855 (void)PKO3_RD_8(node, PKO3_DQ_SW_XOFF(DESC_QUEUE(sc))); 856 857 delay(1000); 858 859 /* Finish the drain operation. */ 860 PKO3_WR_8(node, PKO3_DQ_SW_XOFF(DESC_QUEUE(sc)), 0); 861 (void)PKO3_RD_8(node, PKO3_DQ_SW_XOFF(DESC_QUEUE(sc))); 862 863 /* Close the descriptor queue. */ 864 op = PKO3_LD_IO | PKO3_LD_DID; 865 op |= node->node_id << PKO3_LD_NODE_S; 866 op |= PKO3_DQOP_CLOSE << PKO3_LD_OP_S; 867 op |= DESC_QUEUE(sc) << PKO3_LD_DQ_S; 868 (void)octeon_xkphys_read_8(op); 869 870 /* Disable data transfer. */ 871 val = PORT_RD_8(sc, BGX_CMR_CONFIG); 872 val &= ~BGX_CMR_CONFIG_DATA_PKT_RX_EN; 873 val &= ~BGX_CMR_CONFIG_DATA_PKT_TX_EN; 874 PORT_WR_8(sc, BGX_CMR_CONFIG, val); 875 (void)PORT_RD_8(sc, BGX_CMR_CONFIG); 876 877 if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) 878 mii_down(&sc->sc_mii); 879 sc->sc_link_ops->link_down(sc); 880 881 ifq_clr_oactive(&ifp->if_snd); 882 ifq_barrier(&ifp->if_snd); 883 884 timeout_del_barrier(&sc->sc_rxrefill); 885 timeout_del_barrier(&sc->sc_tick); 886 887 #if NKSTAT > 0 888 ogx_kstat_stop(sc); 889 #endif 890 891 nused = ogx_unload_mbufs(sc); 892 atomic_add_int(&sc->sc_rxused, nused); 893 } 894 895 void 896 ogx_iff(struct ogx_softc *sc) 897 { 898 struct arpcom *ac = &sc->sc_ac; 899 struct ifnet *ifp = &sc->sc_ac.ac_if; 900 struct ether_multi *enm; 901 struct ether_multistep step; 902 uint64_t rx_adr_ctl; 903 uint64_t val; 904 int cidx, clast, i; 905 906 rx_adr_ctl = PORT_RD_8(sc, BGX_CMR_RX_ADR_CTL); 907 rx_adr_ctl |= BGX_CMR_RX_ADR_CTL_BCST_ACCEPT; 908 rx_adr_ctl |= BGX_CMR_RX_ADR_CTL_CAM_ACCEPT; 909 rx_adr_ctl &= ~BGX_CMR_RX_ADR_CTL_MCST_MODE_ALL; 910 ifp->if_flags &= ~IFF_ALLMULTI; 911 912 if (ISSET(ifp->if_flags, IFF_PROMISC)) { 913 ifp->if_flags |= IFF_ALLMULTI; 914 rx_adr_ctl &= ~BGX_CMR_RX_ADR_CTL_CAM_ACCEPT; 915 rx_adr_ctl |= BGX_CMR_RX_ADR_CTL_MCST_MODE_ALL; 916 } else if (ac->ac_multirangecnt > 0 || ac->ac_multicnt >= OGX_NCAM) { 917 ifp->if_flags |= IFF_ALLMULTI; 918 rx_adr_ctl |= BGX_CMR_RX_ADR_CTL_MCST_MODE_ALL; 919 } else { 920 rx_adr_ctl |= BGX_CMR_RX_ADR_CTL_MCST_MODE_CAM; 921 } 922 923 PORT_WR_8(sc, BGX_CMR_RX_ADR_CTL, rx_adr_ctl); 924 925 cidx = sc->sc_lmacid * OGX_NCAM; 926 clast = (sc->sc_lmacid + 1) * OGX_NCAM; 927 928 if (!ISSET(ifp->if_flags, IFF_PROMISC)) { 929 val = BGX_CMR_RX_ADR_CAM_EN | ((uint64_t)sc->sc_lmacid 930 << BGX_CMR_RX_ADR_CAM_ID_S); 931 for (i = 0; i < ETHER_ADDR_LEN; i++) { 932 val |= (uint64_t)ac->ac_enaddr[i] << 933 ((ETHER_ADDR_LEN - 1 - i) * 8); 934 } 935 NEXUS_WR_8(sc, BGX_CMR_RX_ADR_CAM(cidx++), val); 936 } 937 938 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) { 939 ETHER_FIRST_MULTI(step, ac, enm); 940 while (enm != NULL) { 941 val = BGX_CMR_RX_ADR_CAM_EN | ((uint64_t)sc->sc_lmacid 942 << BGX_CMR_RX_ADR_CAM_ID_S); 943 for (i = 0; i < ETHER_ADDR_LEN; i++) 944 val |= (uint64_t)enm->enm_addrlo[i] << (i * 8); 945 KASSERT(cidx < clast); 946 NEXUS_WR_8(sc, BGX_CMR_RX_ADR_CAM(cidx++), val); 947 948 ETHER_NEXT_MULTI(step, enm); 949 } 950 } 951 952 /* Disable any remaining address CAM entries. */ 953 while (cidx < clast) 954 NEXUS_WR_8(sc, BGX_CMR_RX_ADR_CAM(cidx++), 0); 955 } 956 957 static inline uint64_t * 958 ogx_get_work(struct ogx_node *node, uint32_t group) 959 { 960 uint64_t op, resp; 961 962 op = SSO_LD_IO | SSO_LD_DID; 963 op |= node->node_id << SSO_LD_NODE_S; 964 op |= SSO_LD_GROUPED | (group << SSO_LD_INDEX_S); 965 resp = octeon_xkphys_read_8(op); 966 967 if (resp & SSO_LD_RTN_NO_WORK) 968 return NULL; 969 970 return (uint64_t *)PHYS_TO_XKPHYS(resp & SSO_LD_RTN_ADDR_M, CCA_CACHED); 971 } 972 973 static inline struct mbuf * 974 ogx_extract_mbuf(struct ogx_softc *sc, paddr_t pktbuf) 975 { 976 struct mbuf *m, **pm; 977 978 pm = (struct mbuf **)PHYS_TO_XKPHYS(pktbuf, CCA_CACHED) - 1; 979 m = *pm; 980 *pm = NULL; 981 KASSERTMSG((paddr_t)m->m_pkthdr.ph_cookie == pktbuf, 982 "%s: corrupt packet pool, mbuf cookie %p != pktbuf %p", 983 DEVNAME(sc), m->m_pkthdr.ph_cookie, (void *)pktbuf); 984 m->m_pkthdr.ph_cookie = NULL; 985 return m; 986 } 987 988 void 989 ogx_rxrefill(void *arg) 990 { 991 struct ogx_softc *sc = arg; 992 unsigned int to_alloc; 993 994 if (sc->sc_rxused > 0) { 995 to_alloc = atomic_swap_uint(&sc->sc_rxused, 0); 996 to_alloc = ogx_load_mbufs(sc, to_alloc); 997 if (to_alloc > 0) { 998 atomic_add_int(&sc->sc_rxused, to_alloc); 999 timeout_add(&sc->sc_rxrefill, 1); 1000 } 1001 } 1002 } 1003 1004 void 1005 ogx_tick(void *arg) 1006 { 1007 struct ogx_softc *sc = arg; 1008 int s; 1009 1010 s = splnet(); 1011 if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) { 1012 mii_tick(&sc->sc_mii); 1013 } else { 1014 if (sc->sc_link_ops->link_status(sc)) 1015 sc->sc_link_ops->link_change(sc); 1016 } 1017 splx(s); 1018 1019 timeout_add_sec(&sc->sc_tick, 1); 1020 } 1021 1022 int 1023 ogx_rxintr(void *arg) 1024 { 1025 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1026 struct mbuf *m, *m0, *mprev; 1027 struct ogx_softc *sc = arg; 1028 struct ogx_node *node = sc->sc_node; 1029 struct ifnet *ifp = &sc->sc_ac.ac_if; 1030 paddr_t pktbuf, pktdata; 1031 uint64_t *work; 1032 uint64_t nsegs; 1033 unsigned int rxused = 0; 1034 1035 /* Acknowledge the interrupt. */ 1036 SSO_WR_8(node, SSO_GRP_INT(PORT_GROUP_RX(sc)), SSO_GRP_INT_EXE_INT); 1037 1038 for (;;) { 1039 uint64_t errcode, errlevel; 1040 uint64_t word3; 1041 size_t pktlen, left; 1042 #ifdef DIAGNOSTIC 1043 unsigned int pkind; 1044 #endif 1045 1046 work = ogx_get_work(sc->sc_node, PORT_GROUP_RX(sc)); 1047 if (work == NULL) 1048 break; 1049 1050 #ifdef DIAGNOSTIC 1051 pkind = (work[0] & PKI_WORD0_PKIND_M) >> PKI_WORD0_PKIND_S; 1052 if (__predict_false(pkind != PORT_PKIND(sc))) { 1053 printf("%s: unexpected pkind %u, should be %u\n", 1054 DEVNAME(sc), pkind, PORT_PKIND(sc)); 1055 goto wqe_error; 1056 } 1057 #endif 1058 1059 nsegs = (work[0] & PKI_WORD0_BUFS_M) >> PKI_WORD0_BUFS_S; 1060 word3 = work[3]; 1061 1062 errlevel = (work[2] & PKI_WORD2_ERR_LEVEL_M) >> 1063 PKI_WORD2_ERR_LEVEL_S; 1064 errcode = (work[2] & PKI_WORD2_ERR_CODE_M) >> 1065 PKI_WORD2_ERR_CODE_S; 1066 if (__predict_false(errlevel <= 1 && errcode != 0)) { 1067 ifp->if_ierrors++; 1068 goto drop; 1069 } 1070 1071 KASSERT(nsegs > 0); 1072 rxused += nsegs; 1073 1074 pktlen = (work[1] & PKI_WORD1_LEN_M) >> PKI_WORD1_LEN_S; 1075 left = pktlen; 1076 1077 m0 = NULL; 1078 mprev = NULL; 1079 while (nsegs-- > 0) { 1080 size_t size; 1081 1082 pktdata = (word3 & PKI_WORD3_ADDR_M) >> 1083 PKI_WORD3_ADDR_S; 1084 pktbuf = pktdata & ~(CACHELINESIZE - 1); 1085 size = (word3 & PKI_WORD3_SIZE_M) >> PKI_WORD3_SIZE_S; 1086 if (size > left) 1087 size = left; 1088 1089 m = ogx_extract_mbuf(sc, pktbuf); 1090 m->m_data += (pktdata - pktbuf) & (CACHELINESIZE - 1); 1091 m->m_len = size; 1092 left -= size; 1093 1094 /* pktdata can be unaligned. */ 1095 memcpy(&word3, (void *)PHYS_TO_XKPHYS(pktdata - 1096 sizeof(uint64_t), CCA_CACHED), sizeof(uint64_t)); 1097 1098 if (m0 == NULL) { 1099 m0 = m; 1100 } else { 1101 m->m_flags &= ~M_PKTHDR; 1102 mprev->m_next = m; 1103 } 1104 mprev = m; 1105 } 1106 1107 m0->m_pkthdr.len = pktlen; 1108 ml_enqueue(&ml, m0); 1109 1110 continue; 1111 1112 drop: 1113 /* Return the buffers back to the pool. */ 1114 while (nsegs-- > 0) { 1115 pktdata = (word3 & PKI_WORD3_ADDR_M) >> 1116 PKI_WORD3_ADDR_S; 1117 pktbuf = pktdata & ~(CACHELINESIZE - 1); 1118 /* pktdata can be unaligned. */ 1119 memcpy(&word3, (void *)PHYS_TO_XKPHYS(pktdata - 1120 sizeof(uint64_t), CCA_CACHED), sizeof(uint64_t)); 1121 ogx_fpa3_free(&sc->sc_pkt_aura, pktbuf); 1122 } 1123 } 1124 1125 if_input(ifp, &ml); 1126 1127 rxused = ogx_load_mbufs(sc, rxused); 1128 if (rxused != 0) { 1129 atomic_add_int(&sc->sc_rxused, rxused); 1130 timeout_add(&sc->sc_rxrefill, 1); 1131 } 1132 1133 return 1; 1134 1135 #ifdef DIAGNOSTIC 1136 wqe_error: 1137 printf("work0: %016llx\n", work[0]); 1138 printf("work1: %016llx\n", work[1]); 1139 printf("work2: %016llx\n", work[2]); 1140 printf("work3: %016llx\n", work[3]); 1141 printf("work4: %016llx\n", work[4]); 1142 panic("%s: %s: wqe error", DEVNAME(sc), __func__); 1143 #endif 1144 } 1145 1146 int 1147 ogx_txintr(void *arg) 1148 { 1149 struct ogx_softc *sc = arg; 1150 struct ogx_node *node = sc->sc_node; 1151 struct ifnet *ifp = &sc->sc_ac.ac_if; 1152 struct mbuf *m; 1153 uint64_t *work; 1154 unsigned int nfreed = 0; 1155 1156 /* Acknowledge the interrupt. */ 1157 SSO_WR_8(node, SSO_GRP_INT(PORT_GROUP_TX(sc)), SSO_GRP_INT_EXE_INT); 1158 1159 for (;;) { 1160 work = ogx_get_work(node, PORT_GROUP_TX(sc)); 1161 if (work == NULL) 1162 break; 1163 1164 /* 1165 * work points to ph_cookie via the xkphys segment. 1166 * ph_cookie contains the original mbuf pointer. 1167 */ 1168 m = *(struct mbuf **)work; 1169 KASSERT(m->m_pkthdr.ph_ifidx == (u_int)(uintptr_t)sc); 1170 m->m_pkthdr.ph_ifidx = 0; 1171 m_freem(m); 1172 nfreed++; 1173 } 1174 1175 if (nfreed > 0 && atomic_add_int_nv(&sc->sc_txfree, nfreed) == nfreed) 1176 ifq_restart(&ifp->if_snd); 1177 1178 return 1; 1179 } 1180 1181 unsigned int 1182 ogx_load_mbufs(struct ogx_softc *sc, unsigned int n) 1183 { 1184 struct mbuf *m; 1185 paddr_t pktbuf; 1186 1187 for ( ; n > 0; n--) { 1188 m = MCLGETL(NULL, M_NOWAIT, MCLBYTES); 1189 if (m == NULL) 1190 break; 1191 1192 m->m_data = (void *)(((vaddr_t)m->m_data + CACHELINESIZE) & 1193 ~(CACHELINESIZE - 1)); 1194 ((struct mbuf **)m->m_data)[-1] = m; 1195 1196 pktbuf = KVTOPHYS(m->m_data); 1197 m->m_pkthdr.ph_cookie = (void *)pktbuf; 1198 ogx_fpa3_free(&sc->sc_pkt_aura, pktbuf); 1199 } 1200 return n; 1201 } 1202 1203 unsigned int 1204 ogx_unload_mbufs(struct ogx_softc *sc) 1205 { 1206 struct mbuf *m; 1207 paddr_t pktbuf; 1208 unsigned int n = 0; 1209 1210 for (;;) { 1211 pktbuf = ogx_fpa3_alloc(&sc->sc_pkt_aura); 1212 if (pktbuf == 0) 1213 break; 1214 m = ogx_extract_mbuf(sc, pktbuf); 1215 m_freem(m); 1216 n++; 1217 } 1218 return n; 1219 } 1220 1221 void 1222 ogx_start(struct ifqueue *ifq) 1223 { 1224 struct ifnet *ifp = ifq->ifq_if; 1225 struct ogx_softc *sc = ifp->if_softc; 1226 struct mbuf *m; 1227 unsigned int txfree, txused; 1228 1229 txfree = READ_ONCE(sc->sc_txfree); 1230 txused = 0; 1231 1232 while (txused < txfree) { 1233 m = ifq_dequeue(ifq); 1234 if (m == NULL) 1235 break; 1236 1237 #if NBPFILTER > 0 1238 if (ifp->if_bpf != NULL) 1239 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1240 #endif 1241 1242 if (ogx_send_mbuf(sc, m) != 0) { 1243 m_freem(m); 1244 ifp->if_oerrors++; 1245 continue; 1246 } 1247 txused++; 1248 } 1249 1250 if (atomic_sub_int_nv(&sc->sc_txfree, txused) == 0) 1251 ifq_set_oactive(ifq); 1252 } 1253 1254 int 1255 ogx_send_mbuf(struct ogx_softc *sc, struct mbuf *m0) 1256 { 1257 struct ether_header *eh; 1258 struct mbuf *m; 1259 uint64_t ehdrlen, hdr, scroff, word; 1260 unsigned int nfrags; 1261 1262 /* Save original pointer for freeing after transmission. */ 1263 m0->m_pkthdr.ph_cookie = m0; 1264 /* Add a tag for sanity checking. */ 1265 m0->m_pkthdr.ph_ifidx = (u_int)(uintptr_t)sc; 1266 1267 hdr = PKO3_SEND_HDR_DF; 1268 hdr |= m0->m_pkthdr.len << PKO3_SEND_HDR_TOTAL_S; 1269 1270 if (m0->m_pkthdr.csum_flags & 1271 (M_IPV4_CSUM_OUT | M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) { 1272 eh = mtod(m0, struct ether_header *); 1273 ehdrlen = ETHER_HDR_LEN; 1274 1275 switch (ntohs(eh->ether_type)) { 1276 case ETHERTYPE_IP: 1277 hdr |= ehdrlen << PKO3_SEND_HDR_L3PTR_S; 1278 hdr |= (ehdrlen + sizeof(struct ip)) << 1279 PKO3_SEND_HDR_L4PTR_S; 1280 break; 1281 case ETHERTYPE_IPV6: 1282 hdr |= ehdrlen << PKO3_SEND_HDR_L3PTR_S; 1283 hdr |= (ehdrlen + sizeof(struct ip6_hdr)) << 1284 PKO3_SEND_HDR_L4PTR_S; 1285 break; 1286 default: 1287 break; 1288 } 1289 1290 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1291 hdr |= PKO3_SEND_HDR_CKL3; 1292 if (m0->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) 1293 hdr |= PKO3_SEND_HDR_CKL4_TCP; 1294 if (m0->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) 1295 hdr |= PKO3_SEND_HDR_CKL4_UDP; 1296 } 1297 1298 /* Flush pending writes before packet submission. */ 1299 octeon_syncw(); 1300 1301 /* Block until any previous LMTDMA request has been processed. */ 1302 octeon_synciobdma(); 1303 1304 /* Get the LMTDMA region offset in the scratchpad. */ 1305 scroff = 2 * 0x80; 1306 1307 octeon_cvmseg_write_8(scroff, hdr); 1308 scroff += sizeof(hdr); 1309 1310 for (m = m0, nfrags = 0; m != NULL && nfrags < 13; 1311 m = m->m_next, nfrags++) { 1312 word = PKO3_SUBDC3_SEND_GATHER << PKO3_SUBC_BUF_PTR_SUBDC3_S; 1313 word |= KVTOPHYS(m->m_data) << PKO3_SUBC_BUF_PTR_ADDR_S; 1314 word |= (uint64_t)m->m_len << PKO3_SUBC_BUF_PTR_SIZE_S; 1315 octeon_cvmseg_write_8(scroff, word); 1316 scroff += sizeof(word); 1317 } 1318 1319 if (m != NULL) { 1320 if (m_defrag(m0, M_DONTWAIT) != 0) 1321 return ENOMEM; 1322 1323 /* Discard previously set fragments. */ 1324 scroff -= sizeof(word) * nfrags; 1325 1326 word = PKO3_SUBDC3_SEND_GATHER << PKO3_SUBC_BUF_PTR_SUBDC3_S; 1327 word |= KVTOPHYS(m0->m_data) << PKO3_SUBC_BUF_PTR_ADDR_S; 1328 word |= (uint64_t)m0->m_len << PKO3_SUBC_BUF_PTR_SIZE_S; 1329 octeon_cvmseg_write_8(scroff, word); 1330 scroff += sizeof(word); 1331 } 1332 1333 /* Send work when ready to free the mbuf. */ 1334 word = PKO3_SEND_WORK_CODE << PKO3_SEND_SUBDC4_CODE_S; 1335 word |= KVTOPHYS(&m0->m_pkthdr.ph_cookie) << PKO3_SEND_WORK_ADDR_S; 1336 word |= (uint64_t)PORT_GROUP_TX(sc) << PKO3_SEND_WORK_GRP_S; 1337 word |= 2ULL << PKO3_SEND_WORK_TT_S; 1338 octeon_cvmseg_write_8(scroff, word); 1339 scroff += sizeof(word); 1340 1341 /* Submit the command. */ 1342 word = PKO3_LMTDMA_DID; 1343 word |= ((2ULL * 0x80) >> 3) << PKO3_LMTDMA_SCRADDR_S; 1344 word |= 1ULL << PKO3_LMTDMA_RTNLEN_S; 1345 word |= DESC_QUEUE(sc) << PKO3_LMTDMA_DQ_S; 1346 octeon_lmtdma_write_8((scroff - 8) & 0x78, word); 1347 1348 return 0; 1349 } 1350 1351 int 1352 ogx_media_change(struct ifnet *ifp) 1353 { 1354 struct ogx_softc *sc = ifp->if_softc; 1355 1356 if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) 1357 mii_mediachg(&sc->sc_mii); 1358 1359 return 0; 1360 } 1361 1362 void 1363 ogx_media_status(struct ifnet *ifp, struct ifmediareq *imr) 1364 { 1365 struct ogx_softc *sc = ifp->if_softc; 1366 1367 if (!LIST_EMPTY(&sc->sc_mii.mii_phys)) { 1368 mii_pollstat(&sc->sc_mii); 1369 imr->ifm_status = sc->sc_mii.mii_media_status; 1370 imr->ifm_active = sc->sc_mii.mii_media_active; 1371 } 1372 } 1373 1374 int 1375 ogx_mii_readreg(struct device *self, int phy_no, int reg) 1376 { 1377 struct ogx_softc *sc = (struct ogx_softc *)self; 1378 1379 return cn30xxsmi_read(sc->sc_smi, phy_no, reg); 1380 } 1381 1382 void 1383 ogx_mii_writereg(struct device *self, int phy_no, int reg, int value) 1384 { 1385 struct ogx_softc *sc = (struct ogx_softc *)self; 1386 1387 cn30xxsmi_write(sc->sc_smi, phy_no, reg, value); 1388 } 1389 1390 void 1391 ogx_mii_statchg(struct device *self) 1392 { 1393 struct ogx_softc *sc = (struct ogx_softc *)self; 1394 1395 if (ISSET(sc->sc_mii.mii_media_active, IFM_FDX)) 1396 sc->sc_link_duplex = 1; 1397 else 1398 sc->sc_link_duplex = 0; 1399 sc->sc_link_ops->link_change(sc); 1400 } 1401 1402 int 1403 ogx_sgmii_link_init(struct ogx_softc *sc) 1404 { 1405 uint64_t cpu_freq = octeon_boot_info->eclock / 1000000; 1406 uint64_t val; 1407 int align = 1; 1408 1409 val = PORT_RD_8(sc, BGX_GMP_GMI_TX_APPEND); 1410 val |= BGX_GMP_GMI_TX_APPEND_FCS; 1411 val |= BGX_GMP_GMI_TX_APPEND_PAD; 1412 if (ISSET(val, BGX_GMP_GMI_TX_APPEND_PREAMBLE)) 1413 align = 0; 1414 PORT_WR_8(sc, BGX_GMP_GMI_TX_APPEND, val); 1415 PORT_WR_8(sc, BGX_GMP_GMI_TX_MIN_PKT, 59); 1416 PORT_WR_8(sc, BGX_GMP_GMI_TX_THRESH, 0x20); 1417 1418 val = PORT_RD_8(sc, BGX_GMP_GMI_TX_SGMII_CTL); 1419 if (align) 1420 val |= BGX_GMP_GMI_TX_SGMII_CTL_ALIGN; 1421 else 1422 val &= ~BGX_GMP_GMI_TX_SGMII_CTL_ALIGN; 1423 PORT_WR_8(sc, BGX_GMP_GMI_TX_SGMII_CTL, val); 1424 1425 /* Set timing for SGMII. */ 1426 val = PORT_RD_8(sc, BGX_GMP_PCS_LINK_TIMER); 1427 val &= ~BGX_GMP_PCS_LINK_TIMER_COUNT_M; 1428 val |= (1600 * cpu_freq) >> 10; 1429 PORT_WR_8(sc, BGX_GMP_PCS_LINK_TIMER, val); 1430 1431 return 0; 1432 } 1433 1434 void 1435 ogx_sgmii_link_down(struct ogx_softc *sc) 1436 { 1437 uint64_t val; 1438 int timeout; 1439 1440 /* Wait until the port is idle. */ 1441 for (timeout = 1000; timeout > 0; timeout--) { 1442 const uint64_t idlemask = BGX_GMP_GMI_PRT_CFG_RX_IDLE | 1443 BGX_GMP_GMI_PRT_CFG_TX_IDLE; 1444 val = PORT_RD_8(sc, BGX_GMP_GMI_PRT_CFG); 1445 if ((val & idlemask) == idlemask) 1446 break; 1447 delay(1000); 1448 } 1449 if (timeout == 0) 1450 printf("%s: port idle timeout\n", DEVNAME(sc)); 1451 1452 /* Disable autonegotiation and power down the link. */ 1453 val = PORT_RD_8(sc, BGX_GMP_PCS_MR_CONTROL); 1454 val &= ~BGX_GMP_PCS_MR_CONTROL_AN_EN; 1455 val |= BGX_GMP_PCS_MR_CONTROL_PWR_DN; 1456 PORT_WR_8(sc, BGX_GMP_PCS_MR_CONTROL, val); 1457 } 1458 1459 void 1460 ogx_sgmii_link_change(struct ogx_softc *sc) 1461 { 1462 struct ifnet *ifp = &sc->sc_ac.ac_if; 1463 uint64_t config; 1464 uint64_t misc_ctl; 1465 uint64_t prt_cfg = 0; 1466 uint64_t samp_pt; 1467 uint64_t tx_burst, tx_slot; 1468 uint64_t val; 1469 int timeout; 1470 1471 if (!LINK_STATE_IS_UP(ifp->if_link_state)) { 1472 misc_ctl = PORT_RD_8(sc, BGX_GMP_PCS_MISC_CTL); 1473 misc_ctl |= BGX_GMP_PCS_MISC_CTL_GMXENO; 1474 PORT_WR_8(sc, BGX_GMP_PCS_MISC_CTL, misc_ctl); 1475 return; 1476 } 1477 1478 val = PORT_RD_8(sc, BGX_CMR_CONFIG); 1479 val |= BGX_CMR_CONFIG_ENABLE; 1480 PORT_WR_8(sc, BGX_CMR_CONFIG, val); 1481 1482 /* Reset the PCS. */ 1483 val = PORT_RD_8(sc, BGX_GMP_PCS_MR_CONTROL); 1484 val |= BGX_GMP_PCS_MR_CONTROL_RESET; 1485 PORT_WR_8(sc, BGX_GMP_PCS_MR_CONTROL_RESET, val); 1486 1487 /* Wait for the reset to complete. */ 1488 timeout = 100000; 1489 while (timeout-- > 0) { 1490 val = PORT_RD_8(sc, BGX_GMP_PCS_MR_CONTROL); 1491 if (!ISSET(val, BGX_GMP_PCS_MR_CONTROL_RESET)) 1492 break; 1493 delay(10); 1494 } 1495 if (timeout == 0) 1496 printf("%s: SGMII reset timeout\n", DEVNAME(sc)); 1497 1498 /* Use MAC mode. */ 1499 val = PORT_RD_8(sc, BGX_GMP_PCS_MISC_CTL); 1500 val &= ~BGX_GMP_PCS_MISC_CTL_MAC_PHY; 1501 val &= ~BGX_GMP_PCS_MISC_CTL_MODE; 1502 PORT_WR_8(sc, BGX_GMP_PCS_MISC_CTL, val); 1503 1504 /* Start autonegotiation between the SoC and the PHY. */ 1505 val = PORT_RD_8(sc, BGX_GMP_PCS_MR_CONTROL); 1506 val |= BGX_GMP_PCS_MR_CONTROL_AN_EN; 1507 val |= BGX_GMP_PCS_MR_CONTROL_RST_AN; 1508 val &= ~BGX_GMP_PCS_MR_CONTROL_PWR_DN; 1509 PORT_WR_8(sc, BGX_GMP_PCS_MR_CONTROL, val); 1510 1511 /* Wait for the autonegotiation to complete. */ 1512 timeout = 100000; 1513 while (timeout-- > 0) { 1514 val = PORT_RD_8(sc, BGX_GMP_PCS_MR_STATUS); 1515 if (ISSET(val, BGX_GMP_PCS_MR_STATUS_AN_CPT)) 1516 break; 1517 delay(10); 1518 } 1519 if (timeout == 0) 1520 printf("%s: SGMII autonegotiation timeout\n", DEVNAME(sc)); 1521 1522 /* Stop Rx and Tx engines. */ 1523 config = PORT_RD_8(sc, BGX_CMR_CONFIG); 1524 config &= ~BGX_CMR_CONFIG_DATA_PKT_RX_EN; 1525 config &= ~BGX_CMR_CONFIG_DATA_PKT_TX_EN; 1526 PORT_WR_8(sc, BGX_CMR_CONFIG, config); 1527 (void)PORT_RD_8(sc, BGX_CMR_CONFIG); 1528 1529 /* Wait until the engines are idle. */ 1530 for (timeout = 1000000; timeout > 0; timeout--) { 1531 const uint64_t idlemask = BGX_GMP_GMI_PRT_CFG_RX_IDLE | 1532 BGX_GMP_GMI_PRT_CFG_TX_IDLE; 1533 prt_cfg = PORT_RD_8(sc, BGX_GMP_GMI_PRT_CFG); 1534 if ((prt_cfg & idlemask) == idlemask) 1535 break; 1536 delay(1); 1537 } 1538 if (timeout == 0) 1539 printf("%s: port idle timeout\n", DEVNAME(sc)); 1540 1541 if (sc->sc_link_duplex) 1542 prt_cfg |= BGX_GMP_GMI_PRT_CFG_DUPLEX; 1543 else 1544 prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_DUPLEX; 1545 1546 switch (ifp->if_baudrate) { 1547 case IF_Mbps(10): 1548 prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_SPEED; 1549 prt_cfg |= BGX_GMP_GMI_PRT_CFG_SPEED_MSB; 1550 prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_SLOTTIME; 1551 samp_pt = 25; 1552 tx_slot = 0x40; 1553 tx_burst = 0; 1554 break; 1555 case IF_Mbps(100): 1556 prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_SPEED; 1557 prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_SPEED_MSB; 1558 prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_SLOTTIME; 1559 samp_pt = 5; 1560 tx_slot = 0x40; 1561 tx_burst = 0; 1562 break; 1563 case IF_Gbps(1): 1564 default: 1565 prt_cfg |= BGX_GMP_GMI_PRT_CFG_SPEED; 1566 prt_cfg &= ~BGX_GMP_GMI_PRT_CFG_SPEED_MSB; 1567 prt_cfg |= BGX_GMP_GMI_PRT_CFG_SLOTTIME; 1568 samp_pt = 1; 1569 tx_slot = 0x200; 1570 if (sc->sc_link_duplex) 1571 tx_burst = 0; 1572 else 1573 tx_burst = 0x2000; 1574 break; 1575 } 1576 1577 PORT_WR_8(sc, BGX_GMP_GMI_TX_SLOT, tx_slot); 1578 PORT_WR_8(sc, BGX_GMP_GMI_TX_BURST, tx_burst); 1579 1580 misc_ctl = PORT_RD_8(sc, BGX_GMP_PCS_MISC_CTL); 1581 misc_ctl &= ~BGX_GMP_PCS_MISC_CTL_GMXENO; 1582 misc_ctl &= ~BGX_GMP_PCS_MISC_CTL_SAMP_PT_M; 1583 misc_ctl |= samp_pt << BGX_GMP_PCS_MISC_CTL_SAMP_PT_S; 1584 PORT_WR_8(sc, BGX_GMP_PCS_MISC_CTL, misc_ctl); 1585 (void)PORT_RD_8(sc, BGX_GMP_PCS_MISC_CTL); 1586 1587 PORT_WR_8(sc, BGX_GMP_GMI_PRT_CFG, prt_cfg); 1588 (void)PORT_RD_8(sc, BGX_GMP_GMI_PRT_CFG); 1589 1590 config = PORT_RD_8(sc, BGX_CMR_CONFIG); 1591 config |= BGX_CMR_CONFIG_ENABLE | 1592 BGX_CMR_CONFIG_DATA_PKT_RX_EN | 1593 BGX_CMR_CONFIG_DATA_PKT_TX_EN; 1594 PORT_WR_8(sc, BGX_CMR_CONFIG, config); 1595 (void)PORT_RD_8(sc, BGX_CMR_CONFIG); 1596 } 1597 1598 #if NKSTAT > 0 1599 enum ogx_stat { 1600 ogx_stat_rx_hmin, 1601 ogx_stat_rx_h64, 1602 ogx_stat_rx_h128, 1603 ogx_stat_rx_h256, 1604 ogx_stat_rx_h512, 1605 ogx_stat_rx_h1024, 1606 ogx_stat_rx_hmax, 1607 ogx_stat_rx_totp_pki, 1608 ogx_stat_rx_toto_pki, 1609 ogx_stat_rx_raw, 1610 ogx_stat_rx_drop, 1611 ogx_stat_rx_bcast, 1612 ogx_stat_rx_mcast, 1613 ogx_stat_rx_fcs_error, 1614 ogx_stat_rx_fcs_undersz, 1615 ogx_stat_rx_undersz, 1616 ogx_stat_rx_fcs_oversz, 1617 ogx_stat_rx_oversz, 1618 ogx_stat_rx_error, 1619 ogx_stat_rx_special, 1620 ogx_stat_rx_bdrop, 1621 ogx_stat_rx_mdrop, 1622 ogx_stat_rx_ipbdrop, 1623 ogx_stat_rx_ipmdrop, 1624 ogx_stat_rx_sdrop, 1625 ogx_stat_rx_totp_bgx, 1626 ogx_stat_rx_toto_bgx, 1627 ogx_stat_rx_pause, 1628 ogx_stat_rx_dmac, 1629 ogx_stat_rx_bgx_drop, 1630 ogx_stat_rx_bgx_error, 1631 ogx_stat_tx_hmin, 1632 ogx_stat_tx_h64, 1633 ogx_stat_tx_h65, 1634 ogx_stat_tx_h128, 1635 ogx_stat_tx_h256, 1636 ogx_stat_tx_h512, 1637 ogx_stat_tx_h1024, 1638 ogx_stat_tx_hmax, 1639 ogx_stat_tx_coll, 1640 ogx_stat_tx_defer, 1641 ogx_stat_tx_mcoll, 1642 ogx_stat_tx_scoll, 1643 ogx_stat_tx_toto_bgx, 1644 ogx_stat_tx_totp_bgx, 1645 ogx_stat_tx_bcast, 1646 ogx_stat_tx_mcast, 1647 ogx_stat_tx_uflow, 1648 ogx_stat_tx_control, 1649 ogx_stat_count 1650 }; 1651 1652 enum ogx_counter_type { 1653 C_NONE = 0, 1654 C_BGX, 1655 C_PKI, 1656 }; 1657 1658 struct ogx_counter { 1659 const char *c_name; 1660 enum kstat_kv_unit c_unit; 1661 enum ogx_counter_type c_type; 1662 uint32_t c_reg; 1663 }; 1664 1665 static const struct ogx_counter ogx_counters[ogx_stat_count] = { 1666 [ogx_stat_rx_hmin] = 1667 { "rx 1-63B", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST0 }, 1668 [ogx_stat_rx_h64] = 1669 { "rx 64-127B", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST1 }, 1670 [ogx_stat_rx_h128] = 1671 { "rx 128-255B", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST2 }, 1672 [ogx_stat_rx_h256] = 1673 { "rx 256-511B", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST3 }, 1674 [ogx_stat_rx_h512] = 1675 { "rx 512-1023B", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST4 }, 1676 [ogx_stat_rx_h1024] = 1677 { "rx 1024-1518B", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST5 }, 1678 [ogx_stat_rx_hmax] = 1679 { "rx 1519-maxB", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_HIST6 }, 1680 [ogx_stat_rx_totp_pki] = 1681 { "rx total pki", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT0 }, 1682 [ogx_stat_rx_toto_pki] = 1683 { "rx total pki", KSTAT_KV_U_BYTES, C_PKI, PKI_STAT_STAT1 }, 1684 [ogx_stat_rx_raw] = 1685 { "rx raw", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT2 }, 1686 [ogx_stat_rx_drop] = 1687 { "rx drop", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT3 }, 1688 [ogx_stat_rx_bcast] = 1689 { "rx bcast", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT5 }, 1690 [ogx_stat_rx_mcast] = 1691 { "rx mcast", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT6 }, 1692 [ogx_stat_rx_fcs_error] = 1693 { "rx fcs error", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT7 }, 1694 [ogx_stat_rx_fcs_undersz] = 1695 { "rx fcs undersz", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT8 }, 1696 [ogx_stat_rx_undersz] = 1697 { "rx undersz", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT9 }, 1698 [ogx_stat_rx_fcs_oversz] = 1699 { "rx fcs oversz", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT10 }, 1700 [ogx_stat_rx_oversz] = 1701 { "rx oversize", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT11 }, 1702 [ogx_stat_rx_error] = 1703 { "rx error", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT12 }, 1704 [ogx_stat_rx_special] = 1705 { "rx special", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT13 }, 1706 [ogx_stat_rx_bdrop] = 1707 { "rx drop bcast", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT14 }, 1708 [ogx_stat_rx_mdrop] = 1709 { "rx drop mcast", KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT15 }, 1710 [ogx_stat_rx_ipbdrop] = 1711 { "rx drop ipbcast",KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT16 }, 1712 [ogx_stat_rx_ipmdrop] = 1713 { "rx drop ipmcast",KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT17 }, 1714 [ogx_stat_rx_sdrop] = 1715 { "rx drop special",KSTAT_KV_U_PACKETS, C_PKI, PKI_STAT_STAT18 }, 1716 [ogx_stat_rx_totp_bgx] = 1717 { "rx total bgx", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_RX_STAT0 }, 1718 [ogx_stat_rx_toto_bgx] = 1719 { "rx total bgx", KSTAT_KV_U_BYTES, C_BGX, BGX_CMR_RX_STAT1 }, 1720 [ogx_stat_rx_pause] = 1721 { "rx bgx pause", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_RX_STAT2 }, 1722 [ogx_stat_rx_dmac] = 1723 { "rx bgx dmac", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_RX_STAT4 }, 1724 [ogx_stat_rx_bgx_drop] = 1725 { "rx bgx drop", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_RX_STAT6 }, 1726 [ogx_stat_rx_bgx_error] = 1727 { "rx bgx error", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_RX_STAT8 }, 1728 [ogx_stat_tx_hmin] = 1729 { "tx 1-63B", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT6 }, 1730 [ogx_stat_tx_h64] = 1731 { "tx 64B", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT7 }, 1732 [ogx_stat_tx_h65] = 1733 { "tx 65-127B", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT8 }, 1734 [ogx_stat_tx_h128] = 1735 { "tx 128-255B", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT9 }, 1736 [ogx_stat_tx_h256] = 1737 { "tx 256-511B", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT10 }, 1738 [ogx_stat_tx_h512] = 1739 { "tx 512-1023B", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT11 }, 1740 [ogx_stat_tx_h1024] = 1741 { "tx 1024-1518B", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT12 }, 1742 [ogx_stat_tx_hmax] = 1743 { "tx 1519-maxB", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT13 }, 1744 [ogx_stat_tx_coll] = 1745 { "tx coll", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT0 }, 1746 [ogx_stat_tx_defer] = 1747 { "tx defer", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT1 }, 1748 [ogx_stat_tx_mcoll] = 1749 { "tx mcoll", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT2 }, 1750 [ogx_stat_tx_scoll] = 1751 { "tx scoll", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT3 }, 1752 [ogx_stat_tx_toto_bgx] = 1753 { "tx total bgx", KSTAT_KV_U_BYTES, C_BGX, BGX_CMR_TX_STAT4 }, 1754 [ogx_stat_tx_totp_bgx] = 1755 { "tx total bgx", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT5 }, 1756 [ogx_stat_tx_bcast] = 1757 { "tx bcast", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT14 }, 1758 [ogx_stat_tx_mcast] = 1759 { "tx mcast", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT15 }, 1760 [ogx_stat_tx_uflow] = 1761 { "tx underflow", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT16 }, 1762 [ogx_stat_tx_control] = 1763 { "tx control", KSTAT_KV_U_PACKETS, C_BGX, BGX_CMR_TX_STAT17 }, 1764 }; 1765 1766 void 1767 ogx_kstat_attach(struct ogx_softc *sc) 1768 { 1769 const struct ogx_counter *c; 1770 struct kstat *ks; 1771 struct kstat_kv *kvs; 1772 struct ogx_node *node = sc->sc_node; 1773 uint64_t *vals; 1774 int i; 1775 1776 mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK); 1777 timeout_set(&sc->sc_kstat_tmo, ogx_kstat_tick, sc); 1778 1779 if (bus_space_subregion(node->node_iot, node->node_pki, 1780 PKI_STAT_BASE(PORT_PKIND(sc)), PKI_STAT_SIZE, 1781 &sc->sc_pki_stat_ioh) != 0) 1782 return; 1783 1784 ks = kstat_create(DEVNAME(sc), 0, "ogx-stats", 0, KSTAT_T_KV, 0); 1785 if (ks == NULL) 1786 return; 1787 1788 vals = mallocarray(nitems(ogx_counters), sizeof(*vals), 1789 M_DEVBUF, M_WAITOK | M_ZERO); 1790 sc->sc_counter_vals = vals; 1791 1792 kvs = mallocarray(nitems(ogx_counters), sizeof(*kvs), 1793 M_DEVBUF, M_WAITOK | M_ZERO); 1794 for (i = 0; i < nitems(ogx_counters); i++) { 1795 c = &ogx_counters[i]; 1796 kstat_kv_unit_init(&kvs[i], c->c_name, KSTAT_KV_T_COUNTER64, 1797 c->c_unit); 1798 } 1799 1800 kstat_set_mutex(ks, &sc->sc_kstat_mtx); 1801 ks->ks_softc = sc; 1802 ks->ks_data = kvs; 1803 ks->ks_datalen = nitems(ogx_counters) * sizeof(*kvs); 1804 ks->ks_read = ogx_kstat_read; 1805 1806 sc->sc_kstat = ks; 1807 kstat_install(ks); 1808 } 1809 1810 int 1811 ogx_kstat_read(struct kstat *ks) 1812 { 1813 const struct ogx_counter *c; 1814 struct ogx_softc *sc = ks->ks_softc; 1815 struct kstat_kv *kvs = ks->ks_data; 1816 uint64_t *counter_vals = sc->sc_counter_vals; 1817 uint64_t delta, val; 1818 int i, timeout; 1819 1820 for (i = 0; i < nitems(ogx_counters); i++) { 1821 c = &ogx_counters[i]; 1822 switch (c->c_type) { 1823 case C_BGX: 1824 val = PORT_RD_8(sc, c->c_reg); 1825 delta = (val - counter_vals[i]) & BGX_CMR_STAT_MASK; 1826 counter_vals[i] = val; 1827 kstat_kv_u64(&kvs[i]) += delta; 1828 break; 1829 case C_PKI: 1830 /* 1831 * Retry the read if the value is bogus. 1832 * This can happen on some hardware when 1833 * the hardware is updating the value. 1834 */ 1835 for (timeout = 100; timeout > 0; timeout--) { 1836 val = bus_space_read_8(sc->sc_iot, 1837 sc->sc_pki_stat_ioh, c->c_reg); 1838 if (val != ~0ULL) { 1839 delta = (val - counter_vals[i]) & 1840 PKI_STAT_MASK; 1841 counter_vals[i] = val; 1842 kstat_kv_u64(&kvs[i]) += delta; 1843 break; 1844 } 1845 CPU_BUSY_CYCLE(); 1846 } 1847 break; 1848 case C_NONE: 1849 break; 1850 } 1851 } 1852 1853 getnanouptime(&ks->ks_updated); 1854 1855 return 0; 1856 } 1857 1858 void 1859 ogx_kstat_start(struct ogx_softc *sc) 1860 { 1861 const struct ogx_counter *c; 1862 int i; 1863 1864 /* Zero the counters. */ 1865 for (i = 0; i < nitems(ogx_counters); i++) { 1866 c = &ogx_counters[i]; 1867 switch (c->c_type) { 1868 case C_BGX: 1869 PORT_WR_8(sc, c->c_reg, 0); 1870 break; 1871 case C_PKI: 1872 bus_space_write_8(sc->sc_iot, sc->sc_pki_stat_ioh, 1873 c->c_reg, 0); 1874 break; 1875 case C_NONE: 1876 break; 1877 } 1878 } 1879 memset(sc->sc_counter_vals, 0, 1880 nitems(ogx_counters) * sizeof(*sc->sc_counter_vals)); 1881 1882 timeout_add_sec(&sc->sc_kstat_tmo, OGX_KSTAT_TICK_SECS); 1883 } 1884 1885 void 1886 ogx_kstat_stop(struct ogx_softc *sc) 1887 { 1888 timeout_del_barrier(&sc->sc_kstat_tmo); 1889 1890 mtx_enter(&sc->sc_kstat_mtx); 1891 ogx_kstat_read(sc->sc_kstat); 1892 mtx_leave(&sc->sc_kstat_mtx); 1893 } 1894 1895 void 1896 ogx_kstat_tick(void *arg) 1897 { 1898 struct ogx_softc *sc = arg; 1899 1900 timeout_add_sec(&sc->sc_kstat_tmo, OGX_KSTAT_TICK_SECS); 1901 1902 if (mtx_enter_try(&sc->sc_kstat_mtx)) { 1903 ogx_kstat_read(sc->sc_kstat); 1904 mtx_leave(&sc->sc_kstat_mtx); 1905 } 1906 } 1907 #endif /* NKSTAT > 0 */ 1908 1909 int 1910 ogx_node_init(struct ogx_node **pnode, bus_dma_tag_t dmat, bus_space_tag_t iot) 1911 { 1912 const struct ogx_config *cfg; 1913 struct ogx_node *node = &ogx_node; 1914 uint64_t val; 1915 uint32_t chipid; 1916 int cl, i, timeout; 1917 1918 if (node->node_flags & NODE_INITED) { 1919 *pnode = node; 1920 return 0; 1921 } 1922 1923 chipid = octeon_get_chipid(); 1924 switch (octeon_model_family(chipid)) { 1925 case OCTEON_MODEL_FAMILY_CN73XX: 1926 node->node_cfg = cfg = &ogx_cn73xx_config; 1927 break; 1928 case OCTEON_MODEL_FAMILY_CN78XX: 1929 node->node_cfg = cfg = &ogx_cn78xx_config; 1930 break; 1931 default: 1932 printf(": unhandled chipid 0x%x\n", chipid); 1933 return -1; 1934 } 1935 1936 rw_init(&node->node_lock, "ogxnlk"); 1937 1938 node->node_dmat = dmat; 1939 node->node_iot = iot; 1940 if (bus_space_map(node->node_iot, FPA3_BASE, FPA3_SIZE, 0, 1941 &node->node_fpa3)) { 1942 printf(": can't map FPA3\n"); 1943 goto error; 1944 } 1945 if (bus_space_map(node->node_iot, PKI_BASE, PKI_SIZE, 0, 1946 &node->node_pki)) { 1947 printf(": can't map PKI\n"); 1948 goto error; 1949 } 1950 if (bus_space_map(node->node_iot, PKO3_BASE, PKO3_SIZE, 0, 1951 &node->node_pko3)) { 1952 printf(": can't map PKO3\n"); 1953 goto error; 1954 } 1955 if (bus_space_map(node->node_iot, SSO_BASE, SSO_SIZE, 0, 1956 &node->node_sso)) { 1957 printf(": can't map SSO\n"); 1958 goto error; 1959 } 1960 1961 /* 1962 * The rest of this function handles errors by panicking. 1963 */ 1964 1965 node->node_flags |= NODE_INITED; 1966 1967 PKO3_WR_8(node, PKO3_CHANNEL_LEVEL, 0); 1968 1969 ogx_fpa3_pool_init(node, &node->node_pkt_pool, OGX_POOL_PKT, 1024 * 32); 1970 ogx_fpa3_pool_init(node, &node->node_pko_pool, OGX_POOL_PKO, 1024 * 32); 1971 ogx_fpa3_pool_init(node, &node->node_sso_pool, OGX_POOL_SSO, 1024 * 32); 1972 1973 ogx_fpa3_aura_init(node, &node->node_pko_aura, OGX_AURA_PKO, 1974 &node->node_pko_pool); 1975 ogx_fpa3_aura_init(node, &node->node_sso_aura, OGX_AURA_SSO, 1976 &node->node_sso_pool); 1977 1978 ogx_fpa3_aura_load(node, &node->node_sso_aura, 1024, 4096); 1979 ogx_fpa3_aura_load(node, &node->node_pko_aura, 1024, 4096); 1980 1981 /* 1982 * Initialize the Schedule/Synchronization/Order (SSO) unit. 1983 */ 1984 1985 val = SSO_AW_CFG_LDWB | SSO_AW_CFG_LDT | SSO_AW_CFG_STT; 1986 SSO_WR_8(node, SSO_AW_CFG, val); 1987 1988 val = node->node_id << SSO_XAQ_AURA_NODE_S; 1989 val |= (uint64_t)OGX_AURA_SSO << SSO_XAQ_AURA_LAURA_S; 1990 SSO_WR_8(node, SSO_XAQ_AURA, val); 1991 1992 SSO_WR_8(node, SSO_ERR0, 0); 1993 1994 /* Initialize the hardware's linked lists. */ 1995 for (i = 0; i < 64; i++) { 1996 paddr_t addr; 1997 1998 addr = ogx_fpa3_alloc(&node->node_sso_aura); 1999 if (addr == 0) 2000 panic("%s: could not alloc initial XAQ block %d", 2001 __func__, i); 2002 SSO_WR_8(node, SSO_XAQ_HEAD_PTR(i), addr); 2003 SSO_WR_8(node, SSO_XAQ_TAIL_PTR(i), addr); 2004 SSO_WR_8(node, SSO_XAQ_HEAD_NEXT(i), addr); 2005 SSO_WR_8(node, SSO_XAQ_TAIL_NEXT(i), addr); 2006 2007 SSO_WR_8(node, SSO_GRP_PRI(i), SSO_GRP_PRI_WEIGHT_M); 2008 } 2009 2010 val = SSO_RD_8(node, SSO_AW_CFG); 2011 val |= SSO_AW_CFG_RWEN; 2012 SSO_WR_8(node, SSO_AW_CFG, val); 2013 2014 /* 2015 * Initialize the Packet Input (PKI) unit. 2016 */ 2017 2018 /* Clear any previous style configuration. */ 2019 for (cl = 0; cl < cfg->cfg_nclusters; cl++) { 2020 int pkind; 2021 2022 for (pkind = 0; pkind < 64; pkind++) 2023 PKI_WR_8(node, PKI_CL_PKIND_STYLE(cl, pkind), 0); 2024 } 2025 2026 /* Invalidate all PCAM entries. */ 2027 for (cl = 0; cl < cfg->cfg_nclusters; cl++) { 2028 int bank; 2029 2030 for (bank = 0; bank < 2; bank++) { 2031 for (i = 0; i < 192; i++) { 2032 PKI_WR_8(node, 2033 PKI_CL_PCAM_TERM(cl, bank, i), 0); 2034 } 2035 } 2036 } 2037 2038 PKI_WR_8(node, PKI_STAT_CTL, 0); 2039 2040 /* Enable input backpressure. */ 2041 val = PKI_RD_8(node, PKI_BUF_CTL); 2042 val |= PKI_BUF_CTL_PBP_EN; 2043 PKI_WR_8(node, PKI_BUF_CTL, val); 2044 2045 /* Disable the parsing clusters until the firmware has been loaded. */ 2046 for (cl = 0; cl < cfg->cfg_nclusters; cl++) { 2047 val = PKI_RD_8(node, PKI_ICG_CFG(cl)); 2048 val &= ~PKI_ICG_CFG_PENA; 2049 PKI_WR_8(node, PKI_ICG_CFG(cl), val); 2050 } 2051 2052 val = PKI_RD_8(node, PKI_GBL_PEN); 2053 val &= ~PKI_GBL_PEN_M; 2054 val |= PKI_GBL_PEN_L3; 2055 val |= PKI_GBL_PEN_L4; 2056 PKI_WR_8(node, PKI_GBL_PEN, val); 2057 2058 for (i = 0; i < nitems(ogx_ltypes); i++) { 2059 val = PKI_RD_8(node, PKI_LTYPE_MAP(i)); 2060 val &= ~0x7; 2061 val |= ogx_ltypes[i]; 2062 PKI_WR_8(node, PKI_LTYPE_MAP(i), val); 2063 } 2064 2065 while (PKI_RD_8(node, PKI_SFT_RST) & PKI_SFT_RST_BUSY) 2066 delay(1); 2067 2068 val = PKI_RD_8(node, PKI_BUF_CTL); 2069 val |= PKI_BUF_CTL_PKI_EN; 2070 PKI_WR_8(node, PKI_BUF_CTL, val); 2071 2072 /* 2073 * Initialize the Packet Output (PKO) unit. 2074 */ 2075 2076 /* Detach MACs from FIFOs. */ 2077 for (i = 0; i < cfg->cfg_nmacs; i++) { 2078 val = PKO3_RD_8(node, PKO3_MAC_CFG(i)); 2079 val |= PKO3_MAC_CFG_FIFO_NUM_M; 2080 PKO3_WR_8(node, PKO3_MAC_CFG(i), val); 2081 } 2082 2083 /* Attach port queues to the NULL FIFO. */ 2084 for (i = 0; i < cfg->cfg_npqs; i++) { 2085 val = (uint64_t)cfg->cfg_nullmac << PKO3_L1_SQ_TOPOLOGY_LINK_S; 2086 PKO3_WR_8(node, PKO3_L1_SQ_TOPOLOGY(i), val); 2087 val = (uint64_t)cfg->cfg_nullmac << PKO3_L1_SQ_SHAPE_LINK_S; 2088 PKO3_WR_8(node, PKO3_L1_SQ_SHAPE(i), val); 2089 val = (uint64_t)cfg->cfg_nullmac << PKO3_L1_SQ_LINK_LINK_S; 2090 PKO3_WR_8(node, PKO3_L1_SQ_LINK(i), val); 2091 } 2092 2093 /* Reset the FIFO groups to use 2.5 KB per each FIFO. */ 2094 for (i = 0; i < cfg->cfg_nfifogrps; i++) { 2095 val = PKO3_RD_8(node, PKO3_PTGF_CFG(i)); 2096 val &= ~PKO3_PTGF_CFG_SIZE_M; 2097 val &= ~PKO3_PTGF_CFG_RATE_M; 2098 val |= 2 << PKO3_PTGF_CFG_RATE_S; 2099 val |= PKO3_PTGF_CFG_RESET; 2100 PKO3_WR_8(node, PKO3_PTGF_CFG(i), val); 2101 2102 val = PKO3_RD_8(node, PKO3_PTGF_CFG(i)); 2103 val &= ~PKO3_PTGF_CFG_RESET; 2104 PKO3_WR_8(node, PKO3_PTGF_CFG(i), val); 2105 } 2106 2107 PKO3_WR_8(node, PKO3_DPFI_FLUSH, 0); 2108 2109 /* Set PKO aura. */ 2110 val = ((uint64_t)node->node_id << PKO3_DPFI_FPA_AURA_NODE_S) | 2111 (OGX_AURA_PKO << PKO3_DPFI_FPA_AURA_AURA_S); 2112 PKO3_WR_8(node, PKO3_DPFI_FPA_AURA, val); 2113 2114 /* Allow PKO to use the FPA. */ 2115 PKO3_WR_8(node, PKO3_DPFI_FPA_ENA, PKO3_DPFI_FPA_ENA_ENABLE); 2116 2117 timeout = 1000; 2118 while (timeout-- > 0) { 2119 val = PKO3_RD_8(node, PKO3_STATUS); 2120 if (ISSET(val, PKO3_STATUS_PKO_RDY)) 2121 break; 2122 delay(1000); 2123 } 2124 if (timeout == 0) 2125 panic("PKO timeout"); 2126 2127 val = 72 << PKO3_PTF_IOBP_CFG_MAX_RD_SZ_S; 2128 PKO3_WR_8(node, PKO3_PTF_IOBP_CFG, val); 2129 2130 val = 60 << PKO3_PDM_CFG_MIN_PAD_LEN_S; 2131 PKO3_WR_8(node, PKO3_PDM_CFG, val); 2132 2133 PKO3_WR_8(node, PKO3_ENABLE, PKO3_ENABLE_ENABLE); 2134 2135 *pnode = node; 2136 return 0; 2137 2138 error: 2139 if (node->node_sso != 0) 2140 bus_space_unmap(node->node_iot, node->node_sso, SSO_SIZE); 2141 if (node->node_pko3 != 0) 2142 bus_space_unmap(node->node_iot, node->node_pko3, PKO3_SIZE); 2143 if (node->node_pki != 0) 2144 bus_space_unmap(node->node_iot, node->node_pki, PKI_SIZE); 2145 if (node->node_fpa3 != 0) 2146 bus_space_unmap(node->node_iot, node->node_fpa3, FPA3_SIZE); 2147 node->node_sso = 0; 2148 node->node_pko3 = 0; 2149 node->node_pki = 0; 2150 node->node_fpa3 = 0; 2151 return 1; 2152 } 2153 2154 paddr_t 2155 ogx_fpa3_alloc(struct fpa3aura *aura) 2156 { 2157 uint64_t op; 2158 2159 op = FPA3_LD_IO | FPA3_LD_DID; 2160 op |= (uint64_t)aura->nodeid << FPA3_LD_NODE_S; 2161 op |= (uint64_t)aura->auraid << FPA3_LD_AURA_S; 2162 return octeon_xkphys_read_8(op); 2163 } 2164 2165 void 2166 ogx_fpa3_free(struct fpa3aura *aura, paddr_t addr) 2167 { 2168 uint64_t op; 2169 2170 /* Flush pending writes before the block is freed. */ 2171 octeon_syncw(); 2172 2173 op = FPA3_ST_IO | FPA3_ST_DID_FPA; 2174 op |= (uint64_t)aura->nodeid << FPA3_ST_NODE_S; 2175 op |= (uint64_t)aura->auraid << FPA3_ST_AURA_S; 2176 octeon_xkphys_write_8(op, addr); 2177 } 2178 2179 void 2180 ogx_fpa3_pool_init(struct ogx_node *node, struct fpa3pool *pool, 2181 uint32_t poolid, uint32_t nentries) 2182 { 2183 size_t segsize; 2184 int rsegs; 2185 2186 segsize = nentries * 16; 2187 2188 pool->nodeid = node->node_id; 2189 pool->poolid = poolid; 2190 2191 if (bus_dmamap_create(node->node_dmat, segsize, 1, segsize, 0, 2192 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &pool->dmap)) 2193 panic("%s: out of memory", __func__); 2194 if (bus_dmamem_alloc(node->node_dmat, segsize, CACHELINESIZE, 2195 0, &pool->dmaseg, 1, &rsegs, 2196 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) 2197 panic("%s: out of memory", __func__); 2198 if (bus_dmamem_map(node->node_dmat, &pool->dmaseg, 1, segsize, 2199 &pool->kva, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) 2200 panic("%s: bus_dmamem_map", __func__); 2201 if (bus_dmamap_load(node->node_dmat, pool->dmap, pool->kva, segsize, 2202 NULL, BUS_DMA_NOWAIT)) 2203 panic("%s: bus_dmamap_load", __func__); 2204 2205 /* Disable the pool before setup. */ 2206 FPA3_WR_8(node, FPA3_POOL_CFG(poolid), 0); 2207 2208 /* Set permitted address range of stored pointers. */ 2209 FPA3_WR_8(node, FPA3_POOL_START_ADDR(poolid), CACHELINESIZE); 2210 FPA3_WR_8(node, FPA3_POOL_END_ADDR(poolid), UINT32_MAX); 2211 2212 /* Set up the pointer stack. */ 2213 FPA3_WR_8(node, FPA3_POOL_STACK_BASE(poolid), pool->dmaseg.ds_addr); 2214 FPA3_WR_8(node, FPA3_POOL_STACK_ADDR(poolid), pool->dmaseg.ds_addr); 2215 FPA3_WR_8(node, FPA3_POOL_STACK_END(poolid), pool->dmaseg.ds_addr + 2216 pool->dmaseg.ds_len); 2217 2218 /* Re-enable the pool. */ 2219 FPA3_WR_8(node, FPA3_POOL_CFG(poolid), FPA3_POOL_CFG_ENA); 2220 } 2221 2222 void 2223 ogx_fpa3_aura_init(struct ogx_node *node, struct fpa3aura *aura, 2224 uint32_t auraid, struct fpa3pool *pool) 2225 { 2226 KASSERT(node->node_id == pool->nodeid); 2227 2228 aura->nodeid = pool->nodeid; 2229 aura->poolid = pool->poolid; 2230 aura->auraid = auraid; 2231 2232 /* Enable pointer counting. */ 2233 FPA3_WR_8(node, FPA3_AURA_CFG(aura->auraid), 0); 2234 FPA3_WR_8(node, FPA3_AURA_CNT(aura->auraid), 1024); 2235 FPA3_WR_8(node, FPA3_AURA_CNT_LIMIT(aura->auraid), 1024); 2236 2237 /* Set the backend pool. */ 2238 FPA3_WR_8(node, FPA3_AURA_POOL(aura->auraid), aura->poolid); 2239 } 2240 2241 void 2242 ogx_fpa3_aura_load(struct ogx_node *node, struct fpa3aura *aura, size_t nelem, 2243 size_t size) 2244 { 2245 paddr_t addr; 2246 caddr_t kva; 2247 size_t i; 2248 size_t totsize; 2249 int rsegs; 2250 2251 KASSERT(size % CACHELINESIZE == 0); 2252 2253 if (nelem > SIZE_MAX / size) 2254 panic("%s: too large allocation", __func__); 2255 totsize = nelem * size; 2256 2257 if (bus_dmamap_create(node->node_dmat, totsize, 1, totsize, 0, 2258 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aura->dmap)) 2259 panic("%s: out of memory", __func__); 2260 if (bus_dmamem_alloc(node->node_dmat, totsize, CACHELINESIZE, 0, 2261 &aura->dmaseg, 1, &rsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) 2262 panic("%s: out of memory", __func__); 2263 if (bus_dmamem_map(node->node_dmat, &aura->dmaseg, rsegs, totsize, 2264 &kva, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) 2265 panic("%s: bus_dmamem_map failed", __func__); 2266 if (bus_dmamap_load(node->node_dmat, aura->dmap, kva, totsize, NULL, 2267 BUS_DMA_NOWAIT)) 2268 panic("%s: bus_dmamap_load failed", __func__); 2269 2270 for (i = 0, addr = aura->dmaseg.ds_addr; i < nelem; i++, addr += size) 2271 ogx_fpa3_free(aura, addr); 2272 } 2273 2274 int 2275 ogx_node_load_firmware(struct ogx_node *node) 2276 { 2277 struct ogx_fwhdr *fw; 2278 uint8_t *ucode = NULL; 2279 size_t size = 0; 2280 uint64_t *imem, val; 2281 int cl, error = 0, i; 2282 2283 rw_enter_write(&node->node_lock); 2284 if (node->node_flags & NODE_FWREADY) 2285 goto out; 2286 2287 error = loadfirmware("ogx-pki-cluster", &ucode, &size); 2288 if (error != 0) { 2289 printf("ogx node%llu: could not load firmware, error %d\n", 2290 node->node_id, error); 2291 goto out; 2292 } 2293 2294 fw = (struct ogx_fwhdr *)ucode; 2295 if (size < sizeof(*fw) || fw->fw_size != size - sizeof(*fw)) { 2296 printf("ogx node%llu: invalid firmware\n", node->node_id); 2297 error = EINVAL; 2298 goto out; 2299 } 2300 2301 imem = (uint64_t *)(fw + 1); 2302 for (i = 0; i < fw->fw_size / sizeof(uint64_t); i++) 2303 PKI_WR_8(node, PKI_IMEM(i), imem[i]); 2304 2305 /* Enable the parsing clusters. */ 2306 for (cl = 0; cl < node->node_cfg->cfg_nclusters; cl++) { 2307 val = PKI_RD_8(node, PKI_ICG_CFG(cl)); 2308 val |= PKI_ICG_CFG_PENA; 2309 PKI_WR_8(node, PKI_ICG_CFG(cl), val); 2310 } 2311 2312 node->node_flags |= NODE_FWREADY; 2313 2314 out: 2315 free(ucode, M_DEVBUF, size); 2316 rw_exit_write(&node->node_lock); 2317 return error; 2318 } 2319