1 /* $OpenBSD: if_cnmac.c,v 1.85 2023/11/10 15:51:19 bluhm Exp $ */ 2 3 /* 4 * Copyright (c) 2007 Internet Initiative Japan, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #include "bpfilter.h" 29 30 /* 31 * XXXSEIL 32 * If no free send buffer is available, free all the sent buffer and bail out. 33 */ 34 #define OCTEON_ETH_SEND_QUEUE_CHECK 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/pool.h> 39 #include <sys/proc.h> 40 #include <sys/mbuf.h> 41 #include <sys/malloc.h> 42 #include <sys/kernel.h> 43 #include <sys/socket.h> 44 #include <sys/ioctl.h> 45 #include <sys/errno.h> 46 #include <sys/device.h> 47 #include <sys/queue.h> 48 #include <sys/conf.h> 49 #include <sys/stdint.h> /* uintptr_t */ 50 #include <sys/syslog.h> 51 #include <sys/endian.h> 52 #include <sys/atomic.h> 53 54 #include <net/if.h> 55 #include <net/if_media.h> 56 #include <netinet/in.h> 57 #include <netinet/if_ether.h> 58 59 #if NBPFILTER > 0 60 #include <net/bpf.h> 61 #endif 62 63 #include <machine/bus.h> 64 #include <machine/intr.h> 65 #include <machine/octeonvar.h> 66 #include <machine/octeon_model.h> 67 68 #include <dev/mii/mii.h> 69 #include <dev/mii/miivar.h> 70 71 #include <octeon/dev/cn30xxciureg.h> 72 #include <octeon/dev/cn30xxnpireg.h> 73 #include <octeon/dev/cn30xxgmxreg.h> 74 #include <octeon/dev/cn30xxipdreg.h> 75 #include <octeon/dev/cn30xxpipreg.h> 76 #include <octeon/dev/cn30xxpowreg.h> 77 #include <octeon/dev/cn30xxfaureg.h> 78 #include <octeon/dev/cn30xxfpareg.h> 79 #include <octeon/dev/cn30xxbootbusreg.h> 80 #include <octeon/dev/cn30xxfpavar.h> 81 #include <octeon/dev/cn30xxgmxvar.h> 82 #include <octeon/dev/cn30xxfauvar.h> 83 #include <octeon/dev/cn30xxpowvar.h> 84 #include <octeon/dev/cn30xxipdvar.h> 85 #include <octeon/dev/cn30xxpipvar.h> 86 #include <octeon/dev/cn30xxpkovar.h> 87 #include <octeon/dev/cn30xxsmivar.h> 88 #include <octeon/dev/iobusvar.h> 89 #include <octeon/dev/if_cnmacvar.h> 90 91 #ifdef OCTEON_ETH_DEBUG 92 #define OCTEON_ETH_KASSERT(x) KASSERT(x) 93 #define OCTEON_ETH_KDASSERT(x) KDASSERT(x) 94 #else 95 #define OCTEON_ETH_KASSERT(x) 96 #define OCTEON_ETH_KDASSERT(x) 97 #endif 98 99 /* 100 * Set the PKO to think command buffers are an odd length. This makes it so we 101 * never have to divide a command across two buffers. 102 */ 103 #define OCTEON_POOL_NWORDS_CMD \ 104 (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1) 105 #define FPA_COMMAND_BUFFER_POOL_NWORDS OCTEON_POOL_NWORDS_CMD /* XXX */ 106 107 CTASSERT(MCLBYTES >= OCTEON_POOL_SIZE_PKT + CACHELINESIZE); 108 109 void cnmac_buf_init(struct cnmac_softc *); 110 111 int cnmac_match(struct device *, void *, void *); 112 void cnmac_attach(struct device *, struct device *, void *); 113 void cnmac_pip_init(struct cnmac_softc *); 114 void cnmac_ipd_init(struct cnmac_softc *); 115 void cnmac_pko_init(struct cnmac_softc *); 116 void cnmac_smi_init(struct cnmac_softc *); 117 118 void cnmac_board_mac_addr(uint8_t *); 119 120 int cnmac_mii_readreg(struct device *, int, int); 121 void cnmac_mii_writereg(struct device *, int, int, int); 122 void cnmac_mii_statchg(struct device *); 123 124 int cnmac_mediainit(struct cnmac_softc *); 125 void cnmac_mediastatus(struct ifnet *, struct ifmediareq *); 126 int cnmac_mediachange(struct ifnet *); 127 128 void cnmac_send_queue_flush_prefetch(struct cnmac_softc *); 129 void cnmac_send_queue_flush_fetch(struct cnmac_softc *); 130 void cnmac_send_queue_flush(struct cnmac_softc *); 131 int cnmac_send_queue_is_full(struct cnmac_softc *); 132 void cnmac_send_queue_add(struct cnmac_softc *, 133 struct mbuf *, uint64_t *); 134 void cnmac_send_queue_del(struct cnmac_softc *, 135 struct mbuf **, uint64_t **); 136 int cnmac_buf_free_work(struct cnmac_softc *, uint64_t *); 137 void cnmac_buf_ext_free(caddr_t, u_int, void *); 138 139 int cnmac_ioctl(struct ifnet *, u_long, caddr_t); 140 void cnmac_watchdog(struct ifnet *); 141 int cnmac_init(struct ifnet *); 142 int cnmac_stop(struct ifnet *, int); 143 void cnmac_start(struct ifqueue *); 144 145 int cnmac_send_cmd(struct cnmac_softc *, uint64_t, uint64_t); 146 uint64_t cnmac_send_makecmd_w1(int, paddr_t); 147 uint64_t cnmac_send_makecmd_w0(uint64_t, uint64_t, size_t, int, int); 148 int cnmac_send_makecmd_gbuf(struct cnmac_softc *, 149 struct mbuf *, uint64_t *, int *); 150 int cnmac_send_makecmd(struct cnmac_softc *, 151 struct mbuf *, uint64_t *, uint64_t *, uint64_t *); 152 int cnmac_send_buf(struct cnmac_softc *, 153 struct mbuf *, uint64_t *); 154 int cnmac_send(struct cnmac_softc *, struct mbuf *); 155 156 int cnmac_reset(struct cnmac_softc *); 157 int cnmac_configure(struct cnmac_softc *); 158 int cnmac_configure_common(struct cnmac_softc *); 159 160 void cnmac_free_task(void *); 161 void cnmac_tick_free(void *arg); 162 void cnmac_tick_misc(void *); 163 164 int cnmac_recv_mbuf(struct cnmac_softc *, 165 uint64_t *, struct mbuf **, int *); 166 int cnmac_recv_check(struct cnmac_softc *, uint64_t); 167 int cnmac_recv(struct cnmac_softc *, uint64_t *, struct mbuf_list *); 168 int cnmac_intr(void *); 169 170 int cnmac_mbuf_alloc(int); 171 172 #if NKSTAT > 0 173 void cnmac_kstat_attach(struct cnmac_softc *); 174 int cnmac_kstat_read(struct kstat *); 175 void cnmac_kstat_tick(struct cnmac_softc *); 176 #endif 177 178 /* device parameters */ 179 int cnmac_param_pko_cmd_w0_n2 = 1; 180 181 const struct cfattach cnmac_ca = { 182 sizeof(struct cnmac_softc), cnmac_match, cnmac_attach 183 }; 184 185 struct cfdriver cnmac_cd = { NULL, "cnmac", DV_IFNET }; 186 187 /* ---- buffer management */ 188 189 const struct cnmac_pool_param { 190 int poolno; 191 size_t size; 192 size_t nelems; 193 } cnmac_pool_params[] = { 194 #define _ENTRY(x) { OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x } 195 _ENTRY(WQE), 196 _ENTRY(CMD), 197 _ENTRY(SG) 198 #undef _ENTRY 199 }; 200 struct cn30xxfpa_buf *cnmac_pools[8]; 201 #define cnmac_fb_wqe cnmac_pools[OCTEON_POOL_NO_WQE] 202 #define cnmac_fb_cmd cnmac_pools[OCTEON_POOL_NO_CMD] 203 #define cnmac_fb_sg cnmac_pools[OCTEON_POOL_NO_SG] 204 205 uint64_t cnmac_mac_addr = 0; 206 uint32_t cnmac_mac_addr_offset = 0; 207 208 int cnmac_mbufs_to_alloc; 209 int cnmac_npowgroups = 0; 210 211 void 212 cnmac_buf_init(struct cnmac_softc *sc) 213 { 214 static int once; 215 int i; 216 const struct cnmac_pool_param *pp; 217 struct cn30xxfpa_buf *fb; 218 219 if (once == 1) 220 return; 221 once = 1; 222 223 for (i = 0; i < (int)nitems(cnmac_pool_params); i++) { 224 pp = &cnmac_pool_params[i]; 225 cn30xxfpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb); 226 cnmac_pools[pp->poolno] = fb; 227 } 228 } 229 230 /* ---- autoconf */ 231 232 int 233 cnmac_match(struct device *parent, void *match, void *aux) 234 { 235 struct cfdata *cf = (struct cfdata *)match; 236 struct cn30xxgmx_attach_args *ga = aux; 237 238 if (strcmp(cf->cf_driver->cd_name, ga->ga_name) != 0) { 239 return 0; 240 } 241 return 1; 242 } 243 244 void 245 cnmac_attach(struct device *parent, struct device *self, void *aux) 246 { 247 struct cnmac_softc *sc = (void *)self; 248 struct cn30xxgmx_attach_args *ga = aux; 249 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 250 251 if (cnmac_npowgroups >= OCTEON_POW_GROUP_MAX) { 252 printf(": out of POW groups\n"); 253 return; 254 } 255 256 atomic_add_int(&cnmac_mbufs_to_alloc, 257 cnmac_mbuf_alloc(CNMAC_MBUFS_PER_PORT)); 258 259 sc->sc_regt = ga->ga_regt; 260 sc->sc_dmat = ga->ga_dmat; 261 sc->sc_port = ga->ga_portno; 262 sc->sc_port_type = ga->ga_port_type; 263 sc->sc_gmx = ga->ga_gmx; 264 sc->sc_gmx_port = ga->ga_gmx_port; 265 sc->sc_smi = ga->ga_smi; 266 sc->sc_phy_addr = ga->ga_phy_addr; 267 sc->sc_powgroup = cnmac_npowgroups++; 268 269 sc->sc_init_flag = 0; 270 271 /* 272 * XXX 273 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why??? 274 */ 275 sc->sc_ip_offset = 0/* XXX */; 276 277 cnmac_board_mac_addr(sc->sc_arpcom.ac_enaddr); 278 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 279 280 ml_init(&sc->sc_sendq); 281 sc->sc_soft_req_thresh = 15/* XXX */; 282 sc->sc_ext_callback_cnt = 0; 283 284 task_set(&sc->sc_free_task, cnmac_free_task, sc); 285 timeout_set(&sc->sc_tick_misc_ch, cnmac_tick_misc, sc); 286 timeout_set(&sc->sc_tick_free_ch, cnmac_tick_free, sc); 287 288 cn30xxfau_op_init(&sc->sc_fau_done, 289 OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_dev.dv_unit, csm_ether_fau_done), 290 OCT_FAU_REG_ADDR_END - (8 * (sc->sc_dev.dv_unit + 1))/* XXX */); 291 cn30xxfau_op_set_8(&sc->sc_fau_done, 0); 292 293 cnmac_pip_init(sc); 294 cnmac_ipd_init(sc); 295 cnmac_pko_init(sc); 296 297 cnmac_configure_common(sc); 298 299 sc->sc_gmx_port->sc_ipd = sc->sc_ipd; 300 sc->sc_gmx_port->sc_port_mii = &sc->sc_mii; 301 sc->sc_gmx_port->sc_port_ac = &sc->sc_arpcom; 302 303 /* XXX */ 304 sc->sc_pow = &cn30xxpow_softc; 305 306 cnmac_mediainit(sc); 307 308 strncpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname)); 309 ifp->if_softc = sc; 310 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 311 ifp->if_xflags = IFXF_MPSAFE; 312 ifp->if_ioctl = cnmac_ioctl; 313 ifp->if_qstart = cnmac_start; 314 ifp->if_watchdog = cnmac_watchdog; 315 ifp->if_hardmtu = CNMAC_MAX_MTU; 316 ifq_init_maxlen(&ifp->if_snd, max(GATHER_QUEUE_SIZE, IFQ_MAXLEN)); 317 318 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 | 319 IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6; 320 321 cn30xxgmx_set_filter(sc->sc_gmx_port); 322 323 if_attach(ifp); 324 ether_ifattach(ifp); 325 326 cnmac_buf_init(sc); 327 328 #if NKSTAT > 0 329 cnmac_kstat_attach(sc); 330 #endif 331 332 sc->sc_ih = octeon_intr_establish(POW_WORKQ_IRQ(sc->sc_powgroup), 333 IPL_NET | IPL_MPSAFE, cnmac_intr, sc, sc->sc_dev.dv_xname); 334 if (sc->sc_ih == NULL) 335 panic("%s: could not set up interrupt", sc->sc_dev.dv_xname); 336 } 337 338 /* ---- submodules */ 339 340 void 341 cnmac_pip_init(struct cnmac_softc *sc) 342 { 343 struct cn30xxpip_attach_args pip_aa; 344 345 pip_aa.aa_port = sc->sc_port; 346 pip_aa.aa_regt = sc->sc_regt; 347 pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */; 348 pip_aa.aa_receive_group = sc->sc_powgroup; 349 pip_aa.aa_ip_offset = sc->sc_ip_offset; 350 cn30xxpip_init(&pip_aa, &sc->sc_pip); 351 cn30xxpip_port_config(sc->sc_pip); 352 } 353 354 void 355 cnmac_ipd_init(struct cnmac_softc *sc) 356 { 357 struct cn30xxipd_attach_args ipd_aa; 358 359 ipd_aa.aa_port = sc->sc_port; 360 ipd_aa.aa_regt = sc->sc_regt; 361 ipd_aa.aa_first_mbuff_skip = 0/* XXX */; 362 ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */; 363 cn30xxipd_init(&ipd_aa, &sc->sc_ipd); 364 } 365 366 void 367 cnmac_pko_init(struct cnmac_softc *sc) 368 { 369 struct cn30xxpko_attach_args pko_aa; 370 371 pko_aa.aa_port = sc->sc_port; 372 pko_aa.aa_regt = sc->sc_regt; 373 pko_aa.aa_cmdptr = &sc->sc_cmdptr; 374 pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD; 375 pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD; 376 cn30xxpko_init(&pko_aa, &sc->sc_pko); 377 } 378 379 /* ---- XXX */ 380 381 void 382 cnmac_board_mac_addr(uint8_t *enaddr) 383 { 384 int id; 385 386 /* Initialize MAC addresses from the global address base. */ 387 if (cnmac_mac_addr == 0) { 388 memcpy((uint8_t *)&cnmac_mac_addr + 2, 389 octeon_boot_info->mac_addr_base, 6); 390 391 /* 392 * Should be allowed to fail hard if couldn't read the 393 * mac_addr_base address... 394 */ 395 if (cnmac_mac_addr == 0) 396 return; 397 398 /* 399 * Calculate the offset from the mac_addr_base that will be used 400 * for the next sc->sc_port. 401 */ 402 id = octeon_get_chipid(); 403 404 switch (octeon_model_family(id)) { 405 case OCTEON_MODEL_FAMILY_CN56XX: 406 cnmac_mac_addr_offset = 1; 407 break; 408 /* 409 case OCTEON_MODEL_FAMILY_CN52XX: 410 case OCTEON_MODEL_FAMILY_CN63XX: 411 cnmac_mac_addr_offset = 2; 412 break; 413 */ 414 default: 415 cnmac_mac_addr_offset = 0; 416 break; 417 } 418 419 enaddr += cnmac_mac_addr_offset; 420 } 421 422 /* No more MAC addresses to assign. */ 423 if (cnmac_mac_addr_offset >= octeon_boot_info->mac_addr_count) 424 return; 425 426 if (enaddr) 427 memcpy(enaddr, (uint8_t *)&cnmac_mac_addr + 2, 6); 428 429 cnmac_mac_addr++; 430 cnmac_mac_addr_offset++; 431 } 432 433 /* ---- media */ 434 435 int 436 cnmac_mii_readreg(struct device *self, int phy_no, int reg) 437 { 438 struct cnmac_softc *sc = (struct cnmac_softc *)self; 439 return cn30xxsmi_read(sc->sc_smi, phy_no, reg); 440 } 441 442 void 443 cnmac_mii_writereg(struct device *self, int phy_no, int reg, int value) 444 { 445 struct cnmac_softc *sc = (struct cnmac_softc *)self; 446 cn30xxsmi_write(sc->sc_smi, phy_no, reg, value); 447 } 448 449 void 450 cnmac_mii_statchg(struct device *self) 451 { 452 struct cnmac_softc *sc = (struct cnmac_softc *)self; 453 454 cn30xxpko_port_enable(sc->sc_pko, 0); 455 cn30xxgmx_port_enable(sc->sc_gmx_port, 0); 456 457 cnmac_reset(sc); 458 459 cn30xxpko_port_enable(sc->sc_pko, 1); 460 cn30xxgmx_port_enable(sc->sc_gmx_port, 1); 461 } 462 463 int 464 cnmac_mediainit(struct cnmac_softc *sc) 465 { 466 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 467 struct mii_softc *child; 468 469 sc->sc_mii.mii_ifp = ifp; 470 sc->sc_mii.mii_readreg = cnmac_mii_readreg; 471 sc->sc_mii.mii_writereg = cnmac_mii_writereg; 472 sc->sc_mii.mii_statchg = cnmac_mii_statchg; 473 ifmedia_init(&sc->sc_mii.mii_media, 0, cnmac_mediachange, 474 cnmac_mediastatus); 475 476 mii_attach(&sc->sc_dev, &sc->sc_mii, 477 0xffffffff, sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 478 479 child = LIST_FIRST(&sc->sc_mii.mii_phys); 480 if (child == NULL) { 481 /* No PHY attached. */ 482 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 483 0, NULL); 484 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 485 } else { 486 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 487 } 488 489 return 0; 490 } 491 492 void 493 cnmac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 494 { 495 struct cnmac_softc *sc = ifp->if_softc; 496 497 mii_pollstat(&sc->sc_mii); 498 ifmr->ifm_status = sc->sc_mii.mii_media_status; 499 ifmr->ifm_active = sc->sc_mii.mii_media_active; 500 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) | 501 sc->sc_gmx_port->sc_port_flowflags; 502 } 503 504 int 505 cnmac_mediachange(struct ifnet *ifp) 506 { 507 struct cnmac_softc *sc = ifp->if_softc; 508 509 if ((ifp->if_flags & IFF_UP) == 0) 510 return 0; 511 512 return mii_mediachg(&sc->sc_mii); 513 } 514 515 /* ---- send buffer garbage collection */ 516 517 void 518 cnmac_send_queue_flush_prefetch(struct cnmac_softc *sc) 519 { 520 OCTEON_ETH_KASSERT(sc->sc_prefetch == 0); 521 cn30xxfau_op_inc_fetch_8(&sc->sc_fau_done, 0); 522 sc->sc_prefetch = 1; 523 } 524 525 void 526 cnmac_send_queue_flush_fetch(struct cnmac_softc *sc) 527 { 528 #ifndef OCTEON_ETH_DEBUG 529 if (!sc->sc_prefetch) 530 return; 531 #endif 532 OCTEON_ETH_KASSERT(sc->sc_prefetch == 1); 533 sc->sc_hard_done_cnt = cn30xxfau_op_inc_read_8(&sc->sc_fau_done); 534 OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0); 535 sc->sc_prefetch = 0; 536 } 537 538 void 539 cnmac_send_queue_flush(struct cnmac_softc *sc) 540 { 541 const int64_t sent_count = sc->sc_hard_done_cnt; 542 int i; 543 544 OCTEON_ETH_KASSERT(sent_count <= 0); 545 546 for (i = 0; i < 0 - sent_count; i++) { 547 struct mbuf *m; 548 uint64_t *gbuf; 549 550 cnmac_send_queue_del(sc, &m, &gbuf); 551 552 cn30xxfpa_buf_put_paddr(cnmac_fb_sg, XKPHYS_TO_PHYS(gbuf)); 553 554 m_freem(m); 555 } 556 557 cn30xxfau_op_add_8(&sc->sc_fau_done, i); 558 } 559 560 int 561 cnmac_send_queue_is_full(struct cnmac_softc *sc) 562 { 563 #ifdef OCTEON_ETH_SEND_QUEUE_CHECK 564 int64_t nofree_cnt; 565 566 nofree_cnt = ml_len(&sc->sc_sendq) + sc->sc_hard_done_cnt; 567 568 if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) { 569 cnmac_send_queue_flush(sc); 570 return 1; 571 } 572 573 #endif 574 return 0; 575 } 576 577 void 578 cnmac_send_queue_add(struct cnmac_softc *sc, struct mbuf *m, 579 uint64_t *gbuf) 580 { 581 OCTEON_ETH_KASSERT(m->m_flags & M_PKTHDR); 582 583 m->m_pkthdr.ph_cookie = gbuf; 584 ml_enqueue(&sc->sc_sendq, m); 585 586 if (m->m_ext.ext_free_fn != 0) 587 sc->sc_ext_callback_cnt++; 588 } 589 590 void 591 cnmac_send_queue_del(struct cnmac_softc *sc, struct mbuf **rm, 592 uint64_t **rgbuf) 593 { 594 struct mbuf *m; 595 m = ml_dequeue(&sc->sc_sendq); 596 OCTEON_ETH_KASSERT(m != NULL); 597 598 *rm = m; 599 *rgbuf = m->m_pkthdr.ph_cookie; 600 601 if (m->m_ext.ext_free_fn != 0) { 602 sc->sc_ext_callback_cnt--; 603 OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0); 604 } 605 } 606 607 int 608 cnmac_buf_free_work(struct cnmac_softc *sc, uint64_t *work) 609 { 610 paddr_t addr, pktbuf; 611 uint64_t word3; 612 unsigned int back, nbufs; 613 614 nbufs = (work[2] & PIP_WQE_WORD2_IP_BUFS) >> 615 PIP_WQE_WORD2_IP_BUFS_SHIFT; 616 word3 = work[3]; 617 while (nbufs-- > 0) { 618 addr = word3 & PIP_WQE_WORD3_ADDR; 619 back = (word3 & PIP_WQE_WORD3_BACK) >> 620 PIP_WQE_WORD3_BACK_SHIFT; 621 pktbuf = (addr & ~(CACHELINESIZE - 1)) - back * CACHELINESIZE; 622 623 cn30xxfpa_store(pktbuf, OCTEON_POOL_NO_PKT, 624 OCTEON_POOL_SIZE_PKT / CACHELINESIZE); 625 626 if (nbufs > 0) 627 memcpy(&word3, (void *)PHYS_TO_XKPHYS(addr - 628 sizeof(word3), CCA_CACHED), sizeof(word3)); 629 } 630 631 cn30xxfpa_buf_put_paddr(cnmac_fb_wqe, XKPHYS_TO_PHYS(work)); 632 633 return 0; 634 } 635 636 /* ---- ifnet interfaces */ 637 638 int 639 cnmac_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 640 { 641 struct cnmac_softc *sc = ifp->if_softc; 642 struct ifreq *ifr = (struct ifreq *)data; 643 int s, error = 0; 644 645 s = splnet(); 646 647 switch (cmd) { 648 case SIOCSIFADDR: 649 ifp->if_flags |= IFF_UP; 650 if (!(ifp->if_flags & IFF_RUNNING)) 651 cnmac_init(ifp); 652 break; 653 654 case SIOCSIFFLAGS: 655 if (ifp->if_flags & IFF_UP) { 656 if (ifp->if_flags & IFF_RUNNING) 657 error = ENETRESET; 658 else 659 cnmac_init(ifp); 660 } else { 661 if (ifp->if_flags & IFF_RUNNING) 662 cnmac_stop(ifp, 0); 663 } 664 break; 665 666 case SIOCSIFMEDIA: 667 /* Flow control requires full-duplex mode. */ 668 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 669 (ifr->ifr_media & IFM_FDX) == 0) { 670 ifr->ifr_media &= ~IFM_ETH_FMASK; 671 } 672 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 673 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 674 ifr->ifr_media |= 675 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 676 } 677 sc->sc_gmx_port->sc_port_flowflags = 678 ifr->ifr_media & IFM_ETH_FMASK; 679 } 680 /* FALLTHROUGH */ 681 case SIOCGIFMEDIA: 682 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 683 break; 684 685 default: 686 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 687 } 688 689 if (error == ENETRESET) { 690 if (ISSET(ifp->if_flags, IFF_RUNNING)) 691 cn30xxgmx_set_filter(sc->sc_gmx_port); 692 error = 0; 693 } 694 695 splx(s); 696 return (error); 697 } 698 699 /* ---- send (output) */ 700 701 uint64_t 702 cnmac_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs, 703 int ipoffp1) 704 { 705 return cn30xxpko_cmd_word0( 706 OCT_FAU_OP_SIZE_64, /* sz1 */ 707 OCT_FAU_OP_SIZE_64, /* sz0 */ 708 1, fau1, 1, fau0, /* s1, reg1, s0, reg0 */ 709 0, /* le */ 710 cnmac_param_pko_cmd_w0_n2, /* n2 */ 711 1, 0, /* q, r */ 712 (segs == 1) ? 0 : 1, /* g */ 713 ipoffp1, 0, 1, /* ipoffp1, ii, df */ 714 segs, (int)len); /* segs, totalbytes */ 715 } 716 717 uint64_t 718 cnmac_send_makecmd_w1(int size, paddr_t addr) 719 { 720 return cn30xxpko_cmd_word1( 721 0, 0, /* i, back */ 722 OCTEON_POOL_NO_SG, /* pool */ 723 size, addr); /* size, addr */ 724 } 725 726 #define KVTOPHYS(addr) cnmac_kvtophys((vaddr_t)(addr)) 727 728 static inline paddr_t 729 cnmac_kvtophys(vaddr_t kva) 730 { 731 KASSERT(IS_XKPHYS(kva)); 732 return XKPHYS_TO_PHYS(kva); 733 } 734 735 int 736 cnmac_send_makecmd_gbuf(struct cnmac_softc *sc, struct mbuf *m0, 737 uint64_t *gbuf, int *rsegs) 738 { 739 struct mbuf *m; 740 int segs = 0; 741 742 for (m = m0; m != NULL; m = m->m_next) { 743 if (__predict_false(m->m_len == 0)) 744 continue; 745 746 if (segs >= OCTEON_POOL_SIZE_SG / sizeof(uint64_t)) 747 goto defrag; 748 gbuf[segs] = cnmac_send_makecmd_w1(m->m_len, 749 KVTOPHYS(m->m_data)); 750 segs++; 751 } 752 753 *rsegs = segs; 754 755 return 0; 756 757 defrag: 758 if (m_defrag(m0, M_DONTWAIT) != 0) 759 return 1; 760 gbuf[0] = cnmac_send_makecmd_w1(m0->m_len, KVTOPHYS(m0->m_data)); 761 *rsegs = 1; 762 return 0; 763 } 764 765 int 766 cnmac_send_makecmd(struct cnmac_softc *sc, struct mbuf *m, 767 uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1) 768 { 769 uint64_t pko_cmd_w0, pko_cmd_w1; 770 int ipoffp1; 771 int segs; 772 int result = 0; 773 774 if (cnmac_send_makecmd_gbuf(sc, m, gbuf, &segs)) { 775 log(LOG_WARNING, "%s: large number of transmission" 776 " data segments", sc->sc_dev.dv_xname); 777 result = 1; 778 goto done; 779 } 780 781 /* Get the IP packet offset for TCP/UDP checksum offloading. */ 782 ipoffp1 = (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) 783 ? (ETHER_HDR_LEN + 1) : 0; 784 785 /* 786 * segs == 1 -> link mode (single continuous buffer) 787 * WORD1[size] is number of bytes pointed by segment 788 * 789 * segs > 1 -> gather mode (scatter-gather buffer) 790 * WORD1[size] is number of segments 791 */ 792 pko_cmd_w0 = cnmac_send_makecmd_w0(sc->sc_fau_done.fd_regno, 793 0, m->m_pkthdr.len, segs, ipoffp1); 794 pko_cmd_w1 = cnmac_send_makecmd_w1( 795 (segs == 1) ? m->m_pkthdr.len : segs, 796 (segs == 1) ? 797 KVTOPHYS(m->m_data) : 798 XKPHYS_TO_PHYS(gbuf)); 799 800 *rpko_cmd_w0 = pko_cmd_w0; 801 *rpko_cmd_w1 = pko_cmd_w1; 802 803 done: 804 return result; 805 } 806 807 int 808 cnmac_send_cmd(struct cnmac_softc *sc, uint64_t pko_cmd_w0, 809 uint64_t pko_cmd_w1) 810 { 811 uint64_t *cmdptr; 812 int result = 0; 813 814 cmdptr = (uint64_t *)PHYS_TO_XKPHYS(sc->sc_cmdptr.cmdptr, CCA_CACHED); 815 cmdptr += sc->sc_cmdptr.cmdptr_idx; 816 817 OCTEON_ETH_KASSERT(cmdptr != NULL); 818 819 *cmdptr++ = pko_cmd_w0; 820 *cmdptr++ = pko_cmd_w1; 821 822 OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1); 823 824 if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) { 825 paddr_t buf; 826 827 buf = cn30xxfpa_buf_get_paddr(cnmac_fb_cmd); 828 if (buf == 0) { 829 log(LOG_WARNING, 830 "%s: cannot allocate command buffer from free pool allocator\n", 831 sc->sc_dev.dv_xname); 832 result = 1; 833 goto done; 834 } 835 *cmdptr++ = buf; 836 sc->sc_cmdptr.cmdptr = (uint64_t)buf; 837 sc->sc_cmdptr.cmdptr_idx = 0; 838 } else { 839 sc->sc_cmdptr.cmdptr_idx += 2; 840 } 841 842 cn30xxpko_op_doorbell_write(sc->sc_port, sc->sc_port, 2); 843 844 done: 845 return result; 846 } 847 848 int 849 cnmac_send_buf(struct cnmac_softc *sc, struct mbuf *m, uint64_t *gbuf) 850 { 851 int result = 0, error; 852 uint64_t pko_cmd_w0, pko_cmd_w1; 853 854 error = cnmac_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1); 855 if (error != 0) { 856 /* already logging */ 857 result = error; 858 goto done; 859 } 860 861 error = cnmac_send_cmd(sc, pko_cmd_w0, pko_cmd_w1); 862 if (error != 0) { 863 /* already logging */ 864 result = error; 865 } 866 867 done: 868 return result; 869 } 870 871 int 872 cnmac_send(struct cnmac_softc *sc, struct mbuf *m) 873 { 874 paddr_t gaddr = 0; 875 uint64_t *gbuf = NULL; 876 int result = 0, error; 877 878 gaddr = cn30xxfpa_buf_get_paddr(cnmac_fb_sg); 879 if (gaddr == 0) { 880 log(LOG_WARNING, 881 "%s: cannot allocate gather buffer from free pool allocator\n", 882 sc->sc_dev.dv_xname); 883 result = 1; 884 goto done; 885 } 886 887 gbuf = (uint64_t *)(uintptr_t)PHYS_TO_XKPHYS(gaddr, CCA_CACHED); 888 889 error = cnmac_send_buf(sc, m, gbuf); 890 if (error != 0) { 891 /* already logging */ 892 cn30xxfpa_buf_put_paddr(cnmac_fb_sg, gaddr); 893 result = error; 894 goto done; 895 } 896 897 cnmac_send_queue_add(sc, m, gbuf); 898 899 done: 900 return result; 901 } 902 903 void 904 cnmac_start(struct ifqueue *ifq) 905 { 906 struct ifnet *ifp = ifq->ifq_if; 907 struct cnmac_softc *sc = ifp->if_softc; 908 struct mbuf *m; 909 910 if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port))) { 911 ifq_purge(ifq); 912 return; 913 } 914 915 /* 916 * performance tuning 917 * presend iobdma request 918 */ 919 cnmac_send_queue_flush_prefetch(sc); 920 921 for (;;) { 922 cnmac_send_queue_flush_fetch(sc); /* XXX */ 923 924 /* 925 * XXXSEIL 926 * If no free send buffer is available, free all the sent buffer 927 * and bail out. 928 */ 929 if (cnmac_send_queue_is_full(sc)) { 930 ifq_set_oactive(ifq); 931 timeout_add(&sc->sc_tick_free_ch, 1); 932 return; 933 } 934 935 m = ifq_dequeue(ifq); 936 if (m == NULL) 937 return; 938 939 #if NBPFILTER > 0 940 if (ifp->if_bpf != NULL) 941 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 942 #endif 943 944 /* XXX */ 945 if (ml_len(&sc->sc_sendq) > sc->sc_soft_req_thresh) 946 cnmac_send_queue_flush(sc); 947 if (cnmac_send(sc, m)) { 948 ifp->if_oerrors++; 949 m_freem(m); 950 log(LOG_WARNING, 951 "%s: failed to transmit packet\n", 952 sc->sc_dev.dv_xname); 953 } 954 /* XXX */ 955 956 /* 957 * send next iobdma request 958 */ 959 cnmac_send_queue_flush_prefetch(sc); 960 } 961 962 cnmac_send_queue_flush_fetch(sc); 963 } 964 965 void 966 cnmac_watchdog(struct ifnet *ifp) 967 { 968 struct cnmac_softc *sc = ifp->if_softc; 969 970 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 971 972 cnmac_stop(ifp, 0); 973 974 cnmac_configure(sc); 975 976 SET(ifp->if_flags, IFF_RUNNING); 977 ifp->if_timer = 0; 978 979 ifq_restart(&ifp->if_snd); 980 } 981 982 int 983 cnmac_init(struct ifnet *ifp) 984 { 985 struct cnmac_softc *sc = ifp->if_softc; 986 987 /* XXX don't disable commonly used parts!!! XXX */ 988 if (sc->sc_init_flag == 0) { 989 /* Cancel any pending I/O. */ 990 cnmac_stop(ifp, 0); 991 992 /* Initialize the device */ 993 cnmac_configure(sc); 994 995 cn30xxpko_enable(sc->sc_pko); 996 cn30xxipd_enable(sc->sc_ipd); 997 998 sc->sc_init_flag = 1; 999 } else { 1000 cn30xxgmx_port_enable(sc->sc_gmx_port, 1); 1001 } 1002 cnmac_mediachange(ifp); 1003 1004 cn30xxpip_stats_init(sc->sc_pip); 1005 cn30xxgmx_stats_init(sc->sc_gmx_port); 1006 cn30xxgmx_set_filter(sc->sc_gmx_port); 1007 1008 timeout_add_sec(&sc->sc_tick_misc_ch, 1); 1009 timeout_add_sec(&sc->sc_tick_free_ch, 1); 1010 1011 SET(ifp->if_flags, IFF_RUNNING); 1012 ifq_clr_oactive(&ifp->if_snd); 1013 1014 return 0; 1015 } 1016 1017 int 1018 cnmac_stop(struct ifnet *ifp, int disable) 1019 { 1020 struct cnmac_softc *sc = ifp->if_softc; 1021 1022 CLR(ifp->if_flags, IFF_RUNNING); 1023 1024 timeout_del(&sc->sc_tick_misc_ch); 1025 timeout_del(&sc->sc_tick_free_ch); 1026 1027 mii_down(&sc->sc_mii); 1028 1029 cn30xxgmx_port_enable(sc->sc_gmx_port, 0); 1030 1031 intr_barrier(sc->sc_ih); 1032 ifq_barrier(&ifp->if_snd); 1033 1034 ifq_clr_oactive(&ifp->if_snd); 1035 ifp->if_timer = 0; 1036 1037 return 0; 1038 } 1039 1040 /* ---- misc */ 1041 1042 #define PKO_INDEX_MASK ((1ULL << 12/* XXX */) - 1) 1043 1044 int 1045 cnmac_reset(struct cnmac_softc *sc) 1046 { 1047 cn30xxgmx_reset_speed(sc->sc_gmx_port); 1048 cn30xxgmx_reset_flowctl(sc->sc_gmx_port); 1049 cn30xxgmx_reset_timing(sc->sc_gmx_port); 1050 1051 return 0; 1052 } 1053 1054 int 1055 cnmac_configure(struct cnmac_softc *sc) 1056 { 1057 cn30xxgmx_port_enable(sc->sc_gmx_port, 0); 1058 1059 cnmac_reset(sc); 1060 1061 cn30xxpko_port_config(sc->sc_pko); 1062 cn30xxpko_port_enable(sc->sc_pko, 1); 1063 cn30xxpow_config(sc->sc_pow, sc->sc_powgroup); 1064 1065 cn30xxgmx_port_enable(sc->sc_gmx_port, 1); 1066 1067 return 0; 1068 } 1069 1070 int 1071 cnmac_configure_common(struct cnmac_softc *sc) 1072 { 1073 static int once; 1074 1075 uint64_t reg; 1076 1077 if (once == 1) 1078 return 0; 1079 once = 1; 1080 1081 cn30xxipd_config(sc->sc_ipd); 1082 cn30xxpko_config(sc->sc_pko); 1083 1084 /* Set padding for packets that Octeon does not recognize as IP. */ 1085 reg = octeon_xkphys_read_8(PIP_GBL_CFG); 1086 reg &= ~PIP_GBL_CFG_NIP_SHF_MASK; 1087 reg |= ETHER_ALIGN << PIP_GBL_CFG_NIP_SHF_SHIFT; 1088 octeon_xkphys_write_8(PIP_GBL_CFG, reg); 1089 1090 return 0; 1091 } 1092 1093 int 1094 cnmac_mbuf_alloc(int n) 1095 { 1096 struct mbuf *m; 1097 paddr_t pktbuf; 1098 1099 while (n > 0) { 1100 m = MCLGETL(NULL, M_NOWAIT, 1101 OCTEON_POOL_SIZE_PKT + CACHELINESIZE); 1102 if (m == NULL || !ISSET(m->m_flags, M_EXT)) { 1103 m_freem(m); 1104 break; 1105 } 1106 1107 m->m_data = (void *)(((vaddr_t)m->m_data + CACHELINESIZE) & 1108 ~(CACHELINESIZE - 1)); 1109 ((struct mbuf **)m->m_data)[-1] = m; 1110 1111 pktbuf = KVTOPHYS(m->m_data); 1112 m->m_pkthdr.ph_cookie = (void *)pktbuf; 1113 cn30xxfpa_store(pktbuf, OCTEON_POOL_NO_PKT, 1114 OCTEON_POOL_SIZE_PKT / CACHELINESIZE); 1115 1116 n--; 1117 } 1118 return n; 1119 } 1120 1121 int 1122 cnmac_recv_mbuf(struct cnmac_softc *sc, uint64_t *work, 1123 struct mbuf **rm, int *nmbuf) 1124 { 1125 struct mbuf *m, *m0, *mprev, **pm; 1126 paddr_t addr, pktbuf; 1127 uint64_t word1 = work[1]; 1128 uint64_t word2 = work[2]; 1129 uint64_t word3 = work[3]; 1130 unsigned int back, i, nbufs; 1131 unsigned int left, total, size; 1132 1133 cn30xxfpa_buf_put_paddr(cnmac_fb_wqe, XKPHYS_TO_PHYS(work)); 1134 1135 nbufs = (word2 & PIP_WQE_WORD2_IP_BUFS) >> PIP_WQE_WORD2_IP_BUFS_SHIFT; 1136 if (nbufs == 0) 1137 panic("%s: dynamic short packet", __func__); 1138 1139 m0 = mprev = NULL; 1140 total = left = (word1 & PIP_WQE_WORD1_LEN) >> 48; 1141 for (i = 0; i < nbufs; i++) { 1142 addr = word3 & PIP_WQE_WORD3_ADDR; 1143 back = (word3 & PIP_WQE_WORD3_BACK) >> PIP_WQE_WORD3_BACK_SHIFT; 1144 pktbuf = (addr & ~(CACHELINESIZE - 1)) - back * CACHELINESIZE; 1145 pm = (struct mbuf **)PHYS_TO_XKPHYS(pktbuf, CCA_CACHED) - 1; 1146 m = *pm; 1147 *pm = NULL; 1148 if ((paddr_t)m->m_pkthdr.ph_cookie != pktbuf) 1149 panic("%s: packet pool is corrupted, mbuf cookie %p != " 1150 "pktbuf %p", __func__, m->m_pkthdr.ph_cookie, 1151 (void *)pktbuf); 1152 1153 /* 1154 * Because of a hardware bug in some Octeon models the size 1155 * field of word3 can be wrong (erratum PKI-100). 1156 * However, the hardware uses all space in a buffer before 1157 * moving to the next one so it is possible to derive 1158 * the size of this data segment from the size 1159 * of packet data buffers. 1160 */ 1161 size = OCTEON_POOL_SIZE_PKT - (addr - pktbuf); 1162 if (size > left) 1163 size = left; 1164 1165 m->m_pkthdr.ph_cookie = NULL; 1166 m->m_data += addr - pktbuf; 1167 m->m_len = size; 1168 left -= size; 1169 1170 if (m0 == NULL) 1171 m0 = m; 1172 else { 1173 m->m_flags &= ~M_PKTHDR; 1174 mprev->m_next = m; 1175 } 1176 mprev = m; 1177 1178 if (i + 1 < nbufs) 1179 memcpy(&word3, (void *)PHYS_TO_XKPHYS(addr - 1180 sizeof(word3), CCA_CACHED), sizeof(word3)); 1181 } 1182 1183 m0->m_pkthdr.len = total; 1184 *rm = m0; 1185 *nmbuf = nbufs; 1186 1187 return 0; 1188 } 1189 1190 int 1191 cnmac_recv_check(struct cnmac_softc *sc, uint64_t word2) 1192 { 1193 static struct timeval rxerr_log_interval = { 0, 250000 }; 1194 uint64_t opecode; 1195 1196 if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE))) 1197 return 0; 1198 1199 opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE; 1200 if ((sc->sc_arpcom.ac_if.if_flags & IFF_DEBUG) && 1201 ratecheck(&sc->sc_rxerr_log_last, &rxerr_log_interval)) 1202 log(LOG_DEBUG, "%s: rx error (%lld)\n", sc->sc_dev.dv_xname, 1203 opecode); 1204 1205 /* XXX harmless error? */ 1206 if (opecode == PIP_WQE_WORD2_RE_OPCODE_OVRRUN) 1207 return 0; 1208 1209 return 1; 1210 } 1211 1212 int 1213 cnmac_recv(struct cnmac_softc *sc, uint64_t *work, struct mbuf_list *ml) 1214 { 1215 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1216 struct mbuf *m; 1217 uint64_t word2; 1218 int nmbuf = 0; 1219 1220 word2 = work[2]; 1221 1222 if (!(ifp->if_flags & IFF_RUNNING)) 1223 goto drop; 1224 1225 if (__predict_false(cnmac_recv_check(sc, word2) != 0)) { 1226 ifp->if_ierrors++; 1227 goto drop; 1228 } 1229 1230 /* On success, this releases the work queue entry. */ 1231 if (__predict_false(cnmac_recv_mbuf(sc, work, &m, &nmbuf) != 0)) { 1232 ifp->if_ierrors++; 1233 goto drop; 1234 } 1235 1236 m->m_pkthdr.csum_flags = 0; 1237 if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_IP_NI))) { 1238 /* Check IP checksum status. */ 1239 if (!ISSET(word2, PIP_WQE_WORD2_IP_V6) && 1240 !ISSET(word2, PIP_WQE_WORD2_IP_IE)) 1241 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1242 1243 /* Check TCP/UDP checksum status. */ 1244 if (ISSET(word2, PIP_WQE_WORD2_IP_TU) && 1245 !ISSET(word2, PIP_WQE_WORD2_IP_FR) && 1246 !ISSET(word2, PIP_WQE_WORD2_IP_LE)) 1247 m->m_pkthdr.csum_flags |= 1248 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1249 } 1250 1251 ml_enqueue(ml, m); 1252 1253 return nmbuf; 1254 1255 drop: 1256 cnmac_buf_free_work(sc, work); 1257 return 0; 1258 } 1259 1260 int 1261 cnmac_intr(void *arg) 1262 { 1263 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1264 struct cnmac_softc *sc = arg; 1265 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1266 uint64_t *work; 1267 uint64_t wqmask = 1ull << sc->sc_powgroup; 1268 uint32_t coreid = octeon_get_coreid(); 1269 uint32_t port; 1270 int nmbuf = 0; 1271 1272 _POW_WR8(sc->sc_pow, POW_PP_GRP_MSK_OFFSET(coreid), wqmask); 1273 1274 cn30xxpow_tag_sw_wait(); 1275 cn30xxpow_work_request_async(OCTEON_CVMSEG_OFFSET(csm_pow_intr), 1276 POW_NO_WAIT); 1277 1278 for (;;) { 1279 work = (uint64_t *)cn30xxpow_work_response_async( 1280 OCTEON_CVMSEG_OFFSET(csm_pow_intr)); 1281 if (work == NULL) 1282 break; 1283 1284 cn30xxpow_tag_sw_wait(); 1285 cn30xxpow_work_request_async( 1286 OCTEON_CVMSEG_OFFSET(csm_pow_intr), POW_NO_WAIT); 1287 1288 port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42; 1289 if (port != sc->sc_port) { 1290 printf("%s: unexpected wqe port %u, should be %u\n", 1291 sc->sc_dev.dv_xname, port, sc->sc_port); 1292 goto wqe_error; 1293 } 1294 1295 nmbuf += cnmac_recv(sc, work, &ml); 1296 } 1297 1298 _POW_WR8(sc->sc_pow, POW_WQ_INT_OFFSET, wqmask); 1299 1300 if_input(ifp, &ml); 1301 1302 nmbuf = cnmac_mbuf_alloc(nmbuf); 1303 if (nmbuf != 0) 1304 atomic_add_int(&cnmac_mbufs_to_alloc, nmbuf); 1305 1306 return 1; 1307 1308 wqe_error: 1309 printf("word0: 0x%016llx\n", work[0]); 1310 printf("word1: 0x%016llx\n", work[1]); 1311 printf("word2: 0x%016llx\n", work[2]); 1312 printf("word3: 0x%016llx\n", work[3]); 1313 panic("wqe error"); 1314 } 1315 1316 /* ---- tick */ 1317 1318 void 1319 cnmac_free_task(void *arg) 1320 { 1321 struct cnmac_softc *sc = arg; 1322 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1323 struct ifqueue *ifq = &ifp->if_snd; 1324 int resched = 1; 1325 int timeout; 1326 1327 if (ml_len(&sc->sc_sendq) > 0) { 1328 cnmac_send_queue_flush_prefetch(sc); 1329 cnmac_send_queue_flush_fetch(sc); 1330 cnmac_send_queue_flush(sc); 1331 } 1332 1333 if (ifq_is_oactive(ifq)) { 1334 ifq_clr_oactive(ifq); 1335 cnmac_start(ifq); 1336 1337 if (ifq_is_oactive(ifq)) { 1338 /* The start routine did rescheduling already. */ 1339 resched = 0; 1340 } 1341 } 1342 1343 if (resched) { 1344 timeout = (sc->sc_ext_callback_cnt > 0) ? 1 : hz; 1345 timeout_add(&sc->sc_tick_free_ch, timeout); 1346 } 1347 } 1348 1349 /* 1350 * cnmac_tick_free 1351 * 1352 * => garbage collect send gather buffer / mbuf 1353 * => called at softclock 1354 */ 1355 void 1356 cnmac_tick_free(void *arg) 1357 { 1358 struct cnmac_softc *sc = arg; 1359 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1360 int to_alloc; 1361 1362 ifq_serialize(&ifp->if_snd, &sc->sc_free_task); 1363 1364 if (cnmac_mbufs_to_alloc != 0) { 1365 to_alloc = atomic_swap_uint(&cnmac_mbufs_to_alloc, 0); 1366 to_alloc = cnmac_mbuf_alloc(to_alloc); 1367 if (to_alloc != 0) 1368 atomic_add_int(&cnmac_mbufs_to_alloc, to_alloc); 1369 } 1370 } 1371 1372 /* 1373 * cnmac_tick_misc 1374 * 1375 * => collect statistics 1376 * => check link status 1377 * => called at softclock 1378 */ 1379 void 1380 cnmac_tick_misc(void *arg) 1381 { 1382 struct cnmac_softc *sc = arg; 1383 int s; 1384 1385 s = splnet(); 1386 mii_tick(&sc->sc_mii); 1387 splx(s); 1388 1389 #if NKSTAT > 0 1390 cnmac_kstat_tick(sc); 1391 #endif 1392 1393 timeout_add_sec(&sc->sc_tick_misc_ch, 1); 1394 } 1395 1396 #if NKSTAT > 0 1397 #define KVE(n, t) \ 1398 KSTAT_KV_UNIT_INITIALIZER((n), KSTAT_KV_T_COUNTER64, (t)) 1399 1400 static const struct kstat_kv cnmac_kstat_tpl[cnmac_stat_count] = { 1401 [cnmac_stat_rx_toto_gmx]= KVE("rx total gmx", KSTAT_KV_U_BYTES), 1402 [cnmac_stat_rx_totp_gmx]= KVE("rx total gmx", KSTAT_KV_U_PACKETS), 1403 [cnmac_stat_rx_toto_pip]= KVE("rx total pip", KSTAT_KV_U_BYTES), 1404 [cnmac_stat_rx_totp_pip]= KVE("rx total pip", KSTAT_KV_U_PACKETS), 1405 [cnmac_stat_rx_h64] = KVE("rx 64B", KSTAT_KV_U_PACKETS), 1406 [cnmac_stat_rx_h127] = KVE("rx 65-127B", KSTAT_KV_U_PACKETS), 1407 [cnmac_stat_rx_h255] = KVE("rx 128-255B", KSTAT_KV_U_PACKETS), 1408 [cnmac_stat_rx_h511] = KVE("rx 256-511B", KSTAT_KV_U_PACKETS), 1409 [cnmac_stat_rx_h1023] = KVE("rx 512-1023B", KSTAT_KV_U_PACKETS), 1410 [cnmac_stat_rx_h1518] = KVE("rx 1024-1518B", KSTAT_KV_U_PACKETS), 1411 [cnmac_stat_rx_hmax] = KVE("rx 1519-maxB", KSTAT_KV_U_PACKETS), 1412 [cnmac_stat_rx_bcast] = KVE("rx bcast", KSTAT_KV_U_PACKETS), 1413 [cnmac_stat_rx_mcast] = KVE("rx mcast", KSTAT_KV_U_PACKETS), 1414 [cnmac_stat_rx_qdpo] = KVE("rx qos drop", KSTAT_KV_U_BYTES), 1415 [cnmac_stat_rx_qdpp] = KVE("rx qos drop", KSTAT_KV_U_PACKETS), 1416 [cnmac_stat_rx_fcs] = KVE("rx fcs err", KSTAT_KV_U_PACKETS), 1417 [cnmac_stat_rx_frag] = KVE("rx fcs undersize",KSTAT_KV_U_PACKETS), 1418 [cnmac_stat_rx_undersz] = KVE("rx undersize", KSTAT_KV_U_PACKETS), 1419 [cnmac_stat_rx_jabber] = KVE("rx jabber", KSTAT_KV_U_PACKETS), 1420 [cnmac_stat_rx_oversz] = KVE("rx oversize", KSTAT_KV_U_PACKETS), 1421 [cnmac_stat_rx_raw] = KVE("rx raw", KSTAT_KV_U_PACKETS), 1422 [cnmac_stat_rx_bad] = KVE("rx bad", KSTAT_KV_U_PACKETS), 1423 [cnmac_stat_rx_drop] = KVE("rx drop", KSTAT_KV_U_PACKETS), 1424 [cnmac_stat_rx_ctl] = KVE("rx control", KSTAT_KV_U_PACKETS), 1425 [cnmac_stat_rx_dmac] = KVE("rx dmac", KSTAT_KV_U_PACKETS), 1426 [cnmac_stat_tx_toto] = KVE("tx total", KSTAT_KV_U_BYTES), 1427 [cnmac_stat_tx_totp] = KVE("tx total", KSTAT_KV_U_PACKETS), 1428 [cnmac_stat_tx_hmin] = KVE("tx min-63B", KSTAT_KV_U_PACKETS), 1429 [cnmac_stat_tx_h64] = KVE("tx 64B", KSTAT_KV_U_PACKETS), 1430 [cnmac_stat_tx_h127] = KVE("tx 65-127B", KSTAT_KV_U_PACKETS), 1431 [cnmac_stat_tx_h255] = KVE("tx 128-255B", KSTAT_KV_U_PACKETS), 1432 [cnmac_stat_tx_h511] = KVE("tx 256-511B", KSTAT_KV_U_PACKETS), 1433 [cnmac_stat_tx_h1023] = KVE("tx 512-1023B", KSTAT_KV_U_PACKETS), 1434 [cnmac_stat_tx_h1518] = KVE("tx 1024-1518B", KSTAT_KV_U_PACKETS), 1435 [cnmac_stat_tx_hmax] = KVE("tx 1519-maxB", KSTAT_KV_U_PACKETS), 1436 [cnmac_stat_tx_bcast] = KVE("tx bcast", KSTAT_KV_U_PACKETS), 1437 [cnmac_stat_tx_mcast] = KVE("tx mcast", KSTAT_KV_U_PACKETS), 1438 [cnmac_stat_tx_coll] = KVE("tx coll", KSTAT_KV_U_PACKETS), 1439 [cnmac_stat_tx_defer] = KVE("tx defer", KSTAT_KV_U_PACKETS), 1440 [cnmac_stat_tx_scol] = KVE("tx scoll", KSTAT_KV_U_PACKETS), 1441 [cnmac_stat_tx_mcol] = KVE("tx mcoll", KSTAT_KV_U_PACKETS), 1442 [cnmac_stat_tx_ctl] = KVE("tx control", KSTAT_KV_U_PACKETS), 1443 [cnmac_stat_tx_uflow] = KVE("tx underflow", KSTAT_KV_U_PACKETS), 1444 }; 1445 1446 void 1447 cnmac_kstat_attach(struct cnmac_softc *sc) 1448 { 1449 struct kstat *ks; 1450 struct kstat_kv *kvs; 1451 1452 mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK); 1453 1454 ks = kstat_create(sc->sc_dev.dv_xname, 0, "cnmac-stats", 0, 1455 KSTAT_T_KV, 0); 1456 if (ks == NULL) 1457 return; 1458 1459 kvs = malloc(sizeof(cnmac_kstat_tpl), M_DEVBUF, M_WAITOK | M_ZERO); 1460 memcpy(kvs, cnmac_kstat_tpl, sizeof(cnmac_kstat_tpl)); 1461 1462 kstat_set_mutex(ks, &sc->sc_kstat_mtx); 1463 ks->ks_softc = sc; 1464 ks->ks_data = kvs; 1465 ks->ks_datalen = sizeof(cnmac_kstat_tpl); 1466 ks->ks_read = cnmac_kstat_read; 1467 1468 sc->sc_kstat = ks; 1469 kstat_install(ks); 1470 } 1471 1472 int 1473 cnmac_kstat_read(struct kstat *ks) 1474 { 1475 struct cnmac_softc *sc = ks->ks_softc; 1476 struct kstat_kv *kvs = ks->ks_data; 1477 1478 cn30xxpip_kstat_read(sc->sc_pip, kvs); 1479 cn30xxgmx_kstat_read(sc->sc_gmx_port, kvs); 1480 1481 getnanouptime(&ks->ks_updated); 1482 1483 return 0; 1484 } 1485 1486 void 1487 cnmac_kstat_tick(struct cnmac_softc *sc) 1488 { 1489 if (sc->sc_kstat == NULL) 1490 return; 1491 if (!mtx_enter_try(&sc->sc_kstat_mtx)) 1492 return; 1493 cnmac_kstat_read(sc->sc_kstat); 1494 mtx_leave(&sc->sc_kstat_mtx); 1495 } 1496 #endif 1497