1 /* $OpenBSD: if_cnmac.c,v 1.37 2015/12/18 13:36:12 visa Exp $ */ 2 3 /* 4 * Copyright (c) 2007 Internet Initiative Japan, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #include "bpfilter.h" 29 30 /* 31 * XXXSEIL 32 * If no free send buffer is available, free all the sent buffer and bail out. 33 */ 34 #define OCTEON_ETH_SEND_QUEUE_CHECK 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/pool.h> 39 #include <sys/proc.h> 40 #include <sys/mbuf.h> 41 #include <sys/malloc.h> 42 #include <sys/kernel.h> 43 #include <sys/socket.h> 44 #include <sys/ioctl.h> 45 #include <sys/errno.h> 46 #include <sys/device.h> 47 #include <sys/queue.h> 48 #include <sys/conf.h> 49 #include <sys/stdint.h> /* uintptr_t */ 50 #include <sys/syslog.h> 51 #include <sys/endian.h> 52 #ifdef MBUF_TIMESTAMP 53 #include <sys/time.h> 54 #endif 55 56 #include <net/if.h> 57 #include <net/if_media.h> 58 #include <netinet/in.h> 59 #include <netinet/if_ether.h> 60 61 #if NBPFILTER > 0 62 #include <net/bpf.h> 63 #endif 64 65 #include <machine/bus.h> 66 #include <machine/intr.h> 67 #include <machine/octeonvar.h> 68 #include <machine/octeon_model.h> 69 70 #include <dev/mii/mii.h> 71 #include <dev/mii/miivar.h> 72 73 #include <octeon/dev/cn30xxasxreg.h> 74 #include <octeon/dev/cn30xxciureg.h> 75 #include <octeon/dev/cn30xxnpireg.h> 76 #include <octeon/dev/cn30xxgmxreg.h> 77 #include <octeon/dev/cn30xxipdreg.h> 78 #include <octeon/dev/cn30xxpipreg.h> 79 #include <octeon/dev/cn30xxpowreg.h> 80 #include <octeon/dev/cn30xxfaureg.h> 81 #include <octeon/dev/cn30xxfpareg.h> 82 #include <octeon/dev/cn30xxbootbusreg.h> 83 #include <octeon/dev/cn30xxfpavar.h> 84 #include <octeon/dev/cn30xxgmxvar.h> 85 #include <octeon/dev/cn30xxfauvar.h> 86 #include <octeon/dev/cn30xxpowvar.h> 87 #include <octeon/dev/cn30xxipdvar.h> 88 #include <octeon/dev/cn30xxpipvar.h> 89 #include <octeon/dev/cn30xxpkovar.h> 90 #include <octeon/dev/cn30xxasxvar.h> 91 #include <octeon/dev/cn30xxsmivar.h> 92 #include <octeon/dev/iobusvar.h> 93 #include <octeon/dev/if_cnmacvar.h> 94 95 #ifdef OCTEON_ETH_DEBUG 96 #define OCTEON_ETH_KASSERT(x) KASSERT(x) 97 #define OCTEON_ETH_KDASSERT(x) KDASSERT(x) 98 #else 99 #define OCTEON_ETH_KASSERT(x) 100 #define OCTEON_ETH_KDASSERT(x) 101 #endif 102 103 /* 104 * Set the PKO to think command buffers are an odd length. This makes it so we 105 * never have to divide a comamnd across two buffers. 106 */ 107 #define OCTEON_POOL_NWORDS_CMD \ 108 (((uint32_t)OCTEON_POOL_SIZE_CMD / sizeof(uint64_t)) - 1) 109 #define FPA_COMMAND_BUFFER_POOL_NWORDS OCTEON_POOL_NWORDS_CMD /* XXX */ 110 111 #if NBPFILTER > 0 112 #define OCTEON_ETH_TAP(ifp, m, dir) \ 113 do { \ 114 /* Pass this up to any BPF listeners. */ \ 115 if ((ifp)->if_bpf) \ 116 bpf_mtap((ifp)->if_bpf, (m), (dir)); \ 117 } while (0/* CONSTCOND */) 118 #else 119 #define OCTEON_ETH_TAP(ifp, m, dir) 120 #endif /* NBPFILTER > 0 */ 121 122 void octeon_eth_buf_init(struct octeon_eth_softc *); 123 124 int octeon_eth_match(struct device *, void *, void *); 125 void octeon_eth_attach(struct device *, struct device *, void *); 126 void octeon_eth_pip_init(struct octeon_eth_softc *); 127 void octeon_eth_ipd_init(struct octeon_eth_softc *); 128 void octeon_eth_pko_init(struct octeon_eth_softc *); 129 void octeon_eth_asx_init(struct octeon_eth_softc *); 130 void octeon_eth_smi_init(struct octeon_eth_softc *); 131 132 void octeon_eth_board_mac_addr(uint8_t *); 133 134 int octeon_eth_mii_readreg(struct device *, int, int); 135 void octeon_eth_mii_writereg(struct device *, int, int, int); 136 void octeon_eth_mii_statchg(struct device *); 137 138 int octeon_eth_mediainit(struct octeon_eth_softc *); 139 void octeon_eth_mediastatus(struct ifnet *, struct ifmediareq *); 140 int octeon_eth_mediachange(struct ifnet *); 141 142 void octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *); 143 void octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *); 144 void octeon_eth_send_queue_flush(struct octeon_eth_softc *); 145 int octeon_eth_send_queue_is_full(struct octeon_eth_softc *); 146 void octeon_eth_send_queue_add(struct octeon_eth_softc *, 147 struct mbuf *, uint64_t *); 148 void octeon_eth_send_queue_del(struct octeon_eth_softc *, 149 struct mbuf **, uint64_t **); 150 int octeon_eth_buf_free_work(struct octeon_eth_softc *, 151 uint64_t *, uint64_t); 152 void octeon_eth_buf_ext_free_m(caddr_t, u_int, void *); 153 void octeon_eth_buf_ext_free_ext(caddr_t, u_int, void *); 154 155 int octeon_eth_ioctl(struct ifnet *, u_long, caddr_t); 156 void octeon_eth_watchdog(struct ifnet *); 157 int octeon_eth_init(struct ifnet *); 158 int octeon_eth_stop(struct ifnet *, int); 159 void octeon_eth_start(struct ifnet *); 160 161 int octeon_eth_send_cmd(struct octeon_eth_softc *, uint64_t, uint64_t); 162 uint64_t octeon_eth_send_makecmd_w1(int, paddr_t); 163 uint64_t octeon_eth_send_makecmd_w0(uint64_t, uint64_t, size_t, int, int); 164 int octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *, 165 struct mbuf *, uint64_t *, int *); 166 int octeon_eth_send_makecmd(struct octeon_eth_softc *, 167 struct mbuf *, uint64_t *, uint64_t *, uint64_t *); 168 int octeon_eth_send_buf(struct octeon_eth_softc *, 169 struct mbuf *, uint64_t *); 170 int octeon_eth_send(struct octeon_eth_softc *, struct mbuf *); 171 172 int octeon_eth_reset(struct octeon_eth_softc *); 173 int octeon_eth_configure(struct octeon_eth_softc *); 174 int octeon_eth_configure_common(struct octeon_eth_softc *); 175 176 void octeon_eth_tick_free(void *arg); 177 void octeon_eth_tick_misc(void *); 178 179 int octeon_eth_recv_mbuf(struct octeon_eth_softc *, 180 uint64_t *, struct mbuf **); 181 int octeon_eth_recv_check_code(struct octeon_eth_softc *, uint64_t); 182 #if 0 /* not used */ 183 int octeon_eth_recv_check_jumbo(struct octeon_eth_softc *, uint64_t); 184 #endif 185 int octeon_eth_recv_check_link(struct octeon_eth_softc *, uint64_t); 186 int octeon_eth_recv_check(struct octeon_eth_softc *, uint64_t); 187 int octeon_eth_recv(struct octeon_eth_softc *, uint64_t *); 188 void octeon_eth_recv_intr(void *, uint64_t *); 189 190 /* device driver context */ 191 struct octeon_eth_softc *octeon_eth_gsc[GMX_PORT_NUNITS]; 192 void *octeon_eth_pow_recv_ih; 193 194 /* device parameters */ 195 int octeon_eth_param_pko_cmd_w0_n2 = 1; 196 197 const struct cfattach cnmac_ca = 198 { sizeof(struct octeon_eth_softc), octeon_eth_match, octeon_eth_attach }; 199 200 struct cfdriver cnmac_cd = { NULL, "cnmac", DV_IFNET }; 201 202 /* ---- buffer management */ 203 204 const struct octeon_eth_pool_param { 205 int poolno; 206 size_t size; 207 size_t nelems; 208 } octeon_eth_pool_params[] = { 209 #define _ENTRY(x) { OCTEON_POOL_NO_##x, OCTEON_POOL_SIZE_##x, OCTEON_POOL_NELEMS_##x } 210 _ENTRY(PKT), 211 _ENTRY(WQE), 212 _ENTRY(CMD), 213 _ENTRY(SG) 214 #undef _ENTRY 215 }; 216 struct cn30xxfpa_buf *octeon_eth_pools[8/* XXX */]; 217 #define octeon_eth_fb_pkt octeon_eth_pools[OCTEON_POOL_NO_PKT] 218 #define octeon_eth_fb_wqe octeon_eth_pools[OCTEON_POOL_NO_WQE] 219 #define octeon_eth_fb_cmd octeon_eth_pools[OCTEON_POOL_NO_CMD] 220 #define octeon_eth_fb_sg octeon_eth_pools[OCTEON_POOL_NO_SG] 221 222 uint64_t octeon_eth_mac_addr = 0; 223 uint32_t octeon_eth_mac_addr_offset = 0; 224 225 void 226 octeon_eth_buf_init(struct octeon_eth_softc *sc) 227 { 228 static int once; 229 int i; 230 const struct octeon_eth_pool_param *pp; 231 struct cn30xxfpa_buf *fb; 232 233 if (once == 1) 234 return; 235 once = 1; 236 237 for (i = 0; i < (int)nitems(octeon_eth_pool_params); i++) { 238 pp = &octeon_eth_pool_params[i]; 239 cn30xxfpa_buf_init(pp->poolno, pp->size, pp->nelems, &fb); 240 octeon_eth_pools[i] = fb; 241 } 242 } 243 244 /* ---- autoconf */ 245 246 int 247 octeon_eth_match(struct device *parent, void *match, void *aux) 248 { 249 struct cfdata *cf = (struct cfdata *)match; 250 struct cn30xxgmx_attach_args *ga = aux; 251 252 if (strcmp(cf->cf_driver->cd_name, ga->ga_name) != 0) { 253 return 0; 254 } 255 return 1; 256 } 257 258 void 259 octeon_eth_attach(struct device *parent, struct device *self, void *aux) 260 { 261 struct octeon_eth_softc *sc = (void *)self; 262 struct cn30xxgmx_attach_args *ga = aux; 263 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 264 uint8_t enaddr[ETHER_ADDR_LEN]; 265 266 sc->sc_regt = ga->ga_regt; 267 sc->sc_dmat = ga->ga_dmat; 268 sc->sc_port = ga->ga_portno; 269 sc->sc_port_type = ga->ga_port_type; 270 sc->sc_gmx = ga->ga_gmx; 271 sc->sc_gmx_port = ga->ga_gmx_port; 272 sc->sc_phy_addr = ga->ga_phy_addr; 273 274 sc->sc_init_flag = 0; 275 276 /* 277 * XXX 278 * Setting PIP_IP_OFFSET[OFFSET] to 8 causes panic ... why??? 279 */ 280 sc->sc_ip_offset = 0/* XXX */; 281 282 octeon_eth_board_mac_addr(enaddr); 283 printf(", address %s\n", ether_sprintf(enaddr)); 284 285 octeon_eth_gsc[sc->sc_port] = sc; 286 287 ml_init(&sc->sc_sendq); 288 sc->sc_soft_req_thresh = 15/* XXX */; 289 sc->sc_ext_callback_cnt = 0; 290 291 cn30xxgmx_stats_init(sc->sc_gmx_port); 292 293 timeout_set(&sc->sc_tick_misc_ch, octeon_eth_tick_misc, sc); 294 timeout_set(&sc->sc_tick_free_ch, octeon_eth_tick_free, sc); 295 296 cn30xxfau_op_init(&sc->sc_fau_done, 297 OCTEON_CVMSEG_ETHER_OFFSET(sc->sc_port, csm_ether_fau_done), 298 OCT_FAU_REG_ADDR_END - (8 * (sc->sc_port + 1))/* XXX */); 299 cn30xxfau_op_set_8(&sc->sc_fau_done, 0); 300 301 octeon_eth_pip_init(sc); 302 octeon_eth_ipd_init(sc); 303 octeon_eth_pko_init(sc); 304 octeon_eth_asx_init(sc); 305 octeon_eth_smi_init(sc); 306 307 sc->sc_gmx_port->sc_ipd = sc->sc_ipd; 308 sc->sc_gmx_port->sc_port_asx = sc->sc_asx; 309 sc->sc_gmx_port->sc_port_mii = &sc->sc_mii; 310 sc->sc_gmx_port->sc_port_ac = &sc->sc_arpcom; 311 312 /* XXX */ 313 sc->sc_pow = &cn30xxpow_softc; 314 315 octeon_eth_mediainit(sc); 316 317 strncpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof(ifp->if_xname)); 318 ifp->if_softc = sc; 319 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 320 ifp->if_ioctl = octeon_eth_ioctl; 321 ifp->if_start = octeon_eth_start; 322 ifp->if_watchdog = octeon_eth_watchdog; 323 IFQ_SET_MAXLEN(&ifp->if_snd, max(GATHER_QUEUE_SIZE, IFQ_MAXLEN)); 324 IFQ_SET_READY(&ifp->if_snd); 325 326 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_TCPv4 | 327 IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6; 328 329 cn30xxgmx_set_mac_addr(sc->sc_gmx_port, enaddr); 330 cn30xxgmx_set_filter(sc->sc_gmx_port); 331 332 if_attach(ifp); 333 334 memcpy(sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN); 335 ether_ifattach(ifp); 336 337 /* XXX */ 338 sc->sc_rate_recv_check_link_cap.tv_sec = 1; 339 sc->sc_rate_recv_check_jumbo_cap.tv_sec = 1; 340 sc->sc_rate_recv_check_code_cap.tv_sec = 1; 341 342 #if 1 343 octeon_eth_buf_init(sc); 344 #endif 345 346 if (octeon_eth_pow_recv_ih == NULL) 347 octeon_eth_pow_recv_ih = cn30xxpow_intr_establish( 348 OCTEON_POW_GROUP_PIP, IPL_NET | IPL_MPSAFE, 349 octeon_eth_recv_intr, NULL, NULL, sc->sc_dev.dv_xname); 350 } 351 352 /* ---- submodules */ 353 354 /* XXX */ 355 void 356 octeon_eth_pip_init(struct octeon_eth_softc *sc) 357 { 358 struct cn30xxpip_attach_args pip_aa; 359 360 pip_aa.aa_port = sc->sc_port; 361 pip_aa.aa_regt = sc->sc_regt; 362 pip_aa.aa_tag_type = POW_TAG_TYPE_ORDERED/* XXX */; 363 pip_aa.aa_receive_group = OCTEON_POW_GROUP_PIP; 364 pip_aa.aa_ip_offset = sc->sc_ip_offset; 365 cn30xxpip_init(&pip_aa, &sc->sc_pip); 366 } 367 368 /* XXX */ 369 void 370 octeon_eth_ipd_init(struct octeon_eth_softc *sc) 371 { 372 struct cn30xxipd_attach_args ipd_aa; 373 374 ipd_aa.aa_port = sc->sc_port; 375 ipd_aa.aa_regt = sc->sc_regt; 376 ipd_aa.aa_first_mbuff_skip = 184/* XXX */; 377 ipd_aa.aa_not_first_mbuff_skip = 0/* XXX */; 378 cn30xxipd_init(&ipd_aa, &sc->sc_ipd); 379 } 380 381 /* XXX */ 382 void 383 octeon_eth_pko_init(struct octeon_eth_softc *sc) 384 { 385 struct cn30xxpko_attach_args pko_aa; 386 387 pko_aa.aa_port = sc->sc_port; 388 pko_aa.aa_regt = sc->sc_regt; 389 pko_aa.aa_cmdptr = &sc->sc_cmdptr; 390 pko_aa.aa_cmd_buf_pool = OCTEON_POOL_NO_CMD; 391 pko_aa.aa_cmd_buf_size = OCTEON_POOL_NWORDS_CMD; 392 cn30xxpko_init(&pko_aa, &sc->sc_pko); 393 } 394 395 /* XXX */ 396 void 397 octeon_eth_asx_init(struct octeon_eth_softc *sc) 398 { 399 struct cn30xxasx_attach_args asx_aa; 400 401 asx_aa.aa_port = sc->sc_port; 402 asx_aa.aa_regt = sc->sc_regt; 403 cn30xxasx_init(&asx_aa, &sc->sc_asx); 404 } 405 406 void 407 octeon_eth_smi_init(struct octeon_eth_softc *sc) 408 { 409 struct cn30xxsmi_attach_args smi_aa; 410 411 smi_aa.aa_port = sc->sc_port; 412 smi_aa.aa_regt = sc->sc_regt; 413 cn30xxsmi_init(&smi_aa, &sc->sc_smi); 414 cn30xxsmi_set_clock(sc->sc_smi, 0x1464ULL); /* XXX */ 415 } 416 417 /* ---- XXX */ 418 419 void 420 octeon_eth_board_mac_addr(uint8_t *enaddr) 421 { 422 int id; 423 424 /* Initialize MAC addresses from the global address base. */ 425 if (octeon_eth_mac_addr == 0) { 426 memcpy((uint8_t *)&octeon_eth_mac_addr + 2, 427 octeon_boot_info->mac_addr_base, 6); 428 429 /* 430 * Should be allowed to fail hard if couldn't read the 431 * mac_addr_base address... 432 */ 433 if (octeon_eth_mac_addr == 0) 434 return; 435 436 /* 437 * Calculate the offset from the mac_addr_base that will be used 438 * for the next sc->sc_port. 439 */ 440 id = octeon_get_chipid(); 441 442 switch (octeon_model_family(id)) { 443 case OCTEON_MODEL_FAMILY_CN56XX: 444 octeon_eth_mac_addr_offset = 1; 445 break; 446 /* 447 case OCTEON_MODEL_FAMILY_CN52XX: 448 case OCTEON_MODEL_FAMILY_CN63XX: 449 octeon_eth_mac_addr_offset = 2; 450 break; 451 */ 452 default: 453 octeon_eth_mac_addr_offset = 0; 454 break; 455 } 456 457 enaddr += octeon_eth_mac_addr_offset; 458 } 459 460 /* No more MAC addresses to assign. */ 461 if (octeon_eth_mac_addr_offset >= octeon_boot_info->mac_addr_count) 462 return; 463 464 if (enaddr) 465 memcpy(enaddr, (uint8_t *)&octeon_eth_mac_addr + 2, 6); 466 467 octeon_eth_mac_addr++; 468 octeon_eth_mac_addr_offset++; 469 } 470 471 /* ---- media */ 472 473 int 474 octeon_eth_mii_readreg(struct device *self, int phy_no, int reg) 475 { 476 struct octeon_eth_softc *sc = (struct octeon_eth_softc *)self; 477 return cn30xxsmi_read(sc->sc_smi, phy_no, reg); 478 } 479 480 void 481 octeon_eth_mii_writereg(struct device *self, int phy_no, int reg, int value) 482 { 483 struct octeon_eth_softc *sc = (struct octeon_eth_softc *)self; 484 cn30xxsmi_write(sc->sc_smi, phy_no, reg, value); 485 } 486 487 void 488 octeon_eth_mii_statchg(struct device *self) 489 { 490 struct octeon_eth_softc *sc = (struct octeon_eth_softc *)self; 491 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 492 493 cn30xxpko_port_enable(sc->sc_pko, 0); 494 cn30xxgmx_port_enable(sc->sc_gmx_port, 0); 495 496 octeon_eth_reset(sc); 497 498 if (ISSET(ifp->if_flags, IFF_RUNNING)) 499 cn30xxgmx_set_filter(sc->sc_gmx_port); 500 501 cn30xxpko_port_enable(sc->sc_pko, 1); 502 cn30xxgmx_port_enable(sc->sc_gmx_port, 1); 503 } 504 505 int 506 octeon_eth_mediainit(struct octeon_eth_softc *sc) 507 { 508 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 509 struct mii_softc *child; 510 511 sc->sc_mii.mii_ifp = ifp; 512 sc->sc_mii.mii_readreg = octeon_eth_mii_readreg; 513 sc->sc_mii.mii_writereg = octeon_eth_mii_writereg; 514 sc->sc_mii.mii_statchg = octeon_eth_mii_statchg; 515 ifmedia_init(&sc->sc_mii.mii_media, 0, octeon_eth_mediachange, 516 octeon_eth_mediastatus); 517 518 mii_attach(&sc->sc_dev, &sc->sc_mii, 519 0xffffffff, sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 520 521 child = LIST_FIRST(&sc->sc_mii.mii_phys); 522 if (child == NULL) { 523 /* No PHY attached. */ 524 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 525 0, NULL); 526 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 527 } else { 528 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 529 } 530 531 return 0; 532 } 533 534 void 535 octeon_eth_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 536 { 537 struct octeon_eth_softc *sc = ifp->if_softc; 538 539 mii_pollstat(&sc->sc_mii); 540 ifmr->ifm_status = sc->sc_mii.mii_media_status; 541 ifmr->ifm_active = sc->sc_mii.mii_media_active; 542 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) | 543 sc->sc_gmx_port->sc_port_flowflags; 544 } 545 546 int 547 octeon_eth_mediachange(struct ifnet *ifp) 548 { 549 struct octeon_eth_softc *sc = ifp->if_softc; 550 551 if ((ifp->if_flags & IFF_UP) == 0) 552 return 0; 553 554 return mii_mediachg(&sc->sc_mii); 555 } 556 557 /* ---- send buffer garbage collection */ 558 559 void 560 octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *sc) 561 { 562 OCTEON_ETH_KASSERT(sc->sc_prefetch == 0); 563 cn30xxfau_op_inc_fetch_8(&sc->sc_fau_done, 0); 564 sc->sc_prefetch = 1; 565 } 566 567 void 568 octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *sc) 569 { 570 #ifndef OCTEON_ETH_DEBUG 571 if (!sc->sc_prefetch) 572 return; 573 #endif 574 OCTEON_ETH_KASSERT(sc->sc_prefetch == 1); 575 sc->sc_hard_done_cnt = cn30xxfau_op_inc_read_8(&sc->sc_fau_done); 576 OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0); 577 sc->sc_prefetch = 0; 578 } 579 580 void 581 octeon_eth_send_queue_flush(struct octeon_eth_softc *sc) 582 { 583 const int64_t sent_count = sc->sc_hard_done_cnt; 584 int i; 585 586 OCTEON_ETH_KASSERT(sent_count <= 0); 587 588 for (i = 0; i < 0 - sent_count; i++) { 589 struct mbuf *m; 590 uint64_t *gbuf; 591 592 octeon_eth_send_queue_del(sc, &m, &gbuf); 593 594 cn30xxfpa_buf_put_paddr(octeon_eth_fb_sg, XKPHYS_TO_PHYS(gbuf)); 595 596 m_freem(m); 597 } 598 599 cn30xxfau_op_add_8(&sc->sc_fau_done, i); 600 } 601 602 int 603 octeon_eth_send_queue_is_full(struct octeon_eth_softc *sc) 604 { 605 #ifdef OCTEON_ETH_SEND_QUEUE_CHECK 606 int64_t nofree_cnt; 607 608 nofree_cnt = ml_len(&sc->sc_sendq) + sc->sc_hard_done_cnt; 609 610 if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) { 611 octeon_eth_send_queue_flush(sc); 612 return 1; 613 } 614 615 #endif 616 return 0; 617 } 618 619 void 620 octeon_eth_send_queue_add(struct octeon_eth_softc *sc, struct mbuf *m, 621 uint64_t *gbuf) 622 { 623 OCTEON_ETH_KASSERT(m->m_flags & M_PKTHDR); 624 625 m->m_pkthdr.ph_cookie = gbuf; 626 ml_enqueue(&sc->sc_sendq, m); 627 628 if (m->m_ext.ext_free != NULL) 629 sc->sc_ext_callback_cnt++; 630 } 631 632 void 633 octeon_eth_send_queue_del(struct octeon_eth_softc *sc, struct mbuf **rm, 634 uint64_t **rgbuf) 635 { 636 struct mbuf *m; 637 m = ml_dequeue(&sc->sc_sendq); 638 OCTEON_ETH_KASSERT(m != NULL); 639 640 *rm = m; 641 *rgbuf = m->m_pkthdr.ph_cookie; 642 643 if (m->m_ext.ext_free != NULL) { 644 sc->sc_ext_callback_cnt--; 645 OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0); 646 } 647 } 648 649 int 650 octeon_eth_buf_free_work(struct octeon_eth_softc *sc, uint64_t *work, 651 uint64_t word2) 652 { 653 /* XXX when jumbo frame */ 654 if (ISSET(word2, PIP_WQE_WORD2_IP_BUFS)) { 655 paddr_t addr; 656 paddr_t start_buffer; 657 658 addr = XKPHYS_TO_PHYS(work[3] & PIP_WQE_WORD3_ADDR); 659 start_buffer = addr & ~(2048 - 1); 660 661 cn30xxfpa_buf_put_paddr(octeon_eth_fb_pkt, start_buffer); 662 } 663 664 cn30xxfpa_buf_put_paddr(octeon_eth_fb_wqe, XKPHYS_TO_PHYS(work)); 665 666 return 0; 667 } 668 669 void 670 octeon_eth_buf_ext_free_m(caddr_t buf, u_int size, void *arg) 671 { 672 uint64_t *work = (void *)arg; 673 674 cn30xxfpa_buf_put_paddr(octeon_eth_fb_wqe, XKPHYS_TO_PHYS(work)); 675 } 676 677 void 678 octeon_eth_buf_ext_free_ext(caddr_t buf, u_int size, 679 void *arg) 680 { 681 uint64_t *work = (void *)arg; 682 683 cn30xxfpa_buf_put_paddr(octeon_eth_fb_wqe, XKPHYS_TO_PHYS(work)); 684 cn30xxfpa_buf_put_paddr(octeon_eth_fb_pkt, XKPHYS_TO_PHYS(buf)); 685 } 686 687 /* ---- ifnet interfaces */ 688 689 int 690 octeon_eth_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 691 { 692 struct octeon_eth_softc *sc = ifp->if_softc; 693 struct ifreq *ifr = (struct ifreq *)data; 694 int s, error = 0; 695 696 s = splnet(); 697 698 switch (cmd) { 699 case SIOCSIFADDR: 700 ifp->if_flags |= IFF_UP; 701 if (!(ifp->if_flags & IFF_RUNNING)) 702 octeon_eth_init(ifp); 703 break; 704 705 case SIOCSIFFLAGS: 706 if (ifp->if_flags & IFF_UP) { 707 if (ifp->if_flags & IFF_RUNNING) 708 error = ENETRESET; 709 else 710 octeon_eth_init(ifp); 711 } else { 712 if (ifp->if_flags & IFF_RUNNING) 713 octeon_eth_stop(ifp, 0); 714 } 715 break; 716 717 case SIOCSIFMEDIA: 718 /* Flow control requires full-duplex mode. */ 719 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 720 (ifr->ifr_media & IFM_FDX) == 0) { 721 ifr->ifr_media &= ~IFM_ETH_FMASK; 722 } 723 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 724 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 725 ifr->ifr_media |= 726 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 727 } 728 sc->sc_gmx_port->sc_port_flowflags = 729 ifr->ifr_media & IFM_ETH_FMASK; 730 } 731 /* FALLTHROUGH */ 732 case SIOCGIFMEDIA: 733 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 734 break; 735 736 default: 737 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 738 } 739 740 if (error == ENETRESET) { 741 if (ISSET(ifp->if_flags, IFF_RUNNING)) 742 cn30xxgmx_set_filter(sc->sc_gmx_port); 743 error = 0; 744 } 745 746 octeon_eth_start(ifp); 747 748 splx(s); 749 return (error); 750 } 751 752 /* ---- send (output) */ 753 754 uint64_t 755 octeon_eth_send_makecmd_w0(uint64_t fau0, uint64_t fau1, size_t len, int segs, 756 int ipoffp1) 757 { 758 return cn30xxpko_cmd_word0( 759 OCT_FAU_OP_SIZE_64, /* sz1 */ 760 OCT_FAU_OP_SIZE_64, /* sz0 */ 761 1, fau1, 1, fau0, /* s1, reg1, s0, reg0 */ 762 0, /* le */ 763 octeon_eth_param_pko_cmd_w0_n2, /* n2 */ 764 1, 0, /* q, r */ 765 (segs == 1) ? 0 : 1, /* g */ 766 ipoffp1, 0, 1, /* ipoffp1, ii, df */ 767 segs, (int)len); /* segs, totalbytes */ 768 } 769 770 uint64_t 771 octeon_eth_send_makecmd_w1(int size, paddr_t addr) 772 { 773 return cn30xxpko_cmd_word1( 774 0, 0, /* i, back */ 775 FPA_GATHER_BUFFER_POOL, /* pool */ 776 size, addr); /* size, addr */ 777 } 778 779 #define KVTOPHYS(addr) if_cnmac_kvtophys((vaddr_t)(addr)) 780 paddr_t if_cnmac_kvtophys(vaddr_t); 781 782 paddr_t 783 if_cnmac_kvtophys(vaddr_t kva) 784 { 785 if (IS_XKPHYS(kva)) 786 return XKPHYS_TO_PHYS(kva); 787 else if (kva >= CKSEG0_BASE && kva < CKSEG0_BASE + CKSEG_SIZE) 788 return CKSEG0_TO_PHYS(kva); 789 else if (kva >= CKSEG1_BASE && kva < CKSEG1_BASE + CKSEG_SIZE) 790 return CKSEG1_TO_PHYS(kva); 791 792 panic("%s: non-direct mapped address %p", __func__, (void *)kva); 793 } 794 795 int 796 octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *sc, struct mbuf *m0, 797 uint64_t *gbuf, int *rsegs) 798 { 799 struct mbuf *m; 800 int segs = 0; 801 802 for (m = m0; m != NULL; m = m->m_next) { 803 if (__predict_false(m->m_len == 0)) 804 continue; 805 806 if (segs >= OCTEON_POOL_SIZE_SG / sizeof(uint64_t)) 807 return 1; 808 gbuf[segs] = octeon_eth_send_makecmd_w1(m->m_len, 809 KVTOPHYS(m->m_data)); 810 segs++; 811 } 812 813 *rsegs = segs; 814 815 return 0; 816 } 817 818 int 819 octeon_eth_send_makecmd(struct octeon_eth_softc *sc, struct mbuf *m, 820 uint64_t *gbuf, uint64_t *rpko_cmd_w0, uint64_t *rpko_cmd_w1) 821 { 822 uint64_t pko_cmd_w0, pko_cmd_w1; 823 int ipoffp1; 824 int segs; 825 int result = 0; 826 827 if (octeon_eth_send_makecmd_gbuf(sc, m, gbuf, &segs)) { 828 log(LOG_WARNING, "%s: large number of transmission" 829 " data segments", sc->sc_dev.dv_xname); 830 result = 1; 831 goto done; 832 } 833 834 /* Get the IP packet offset for TCP/UDP checksum offloading. */ 835 ipoffp1 = (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) 836 ? (ETHER_HDR_LEN + 1) : 0; 837 838 /* 839 * segs == 1 -> link mode (single continuous buffer) 840 * WORD1[size] is number of bytes pointed by segment 841 * 842 * segs > 1 -> gather mode (scatter-gather buffer) 843 * WORD1[size] is number of segments 844 */ 845 pko_cmd_w0 = octeon_eth_send_makecmd_w0(sc->sc_fau_done.fd_regno, 846 0, m->m_pkthdr.len, segs, ipoffp1); 847 pko_cmd_w1 = octeon_eth_send_makecmd_w1( 848 (segs == 1) ? m->m_pkthdr.len : segs, 849 (segs == 1) ? 850 KVTOPHYS(m->m_data) : 851 XKPHYS_TO_PHYS(gbuf)); 852 853 *rpko_cmd_w0 = pko_cmd_w0; 854 *rpko_cmd_w1 = pko_cmd_w1; 855 856 done: 857 return result; 858 } 859 860 int 861 octeon_eth_send_cmd(struct octeon_eth_softc *sc, uint64_t pko_cmd_w0, 862 uint64_t pko_cmd_w1) 863 { 864 uint64_t *cmdptr; 865 int result = 0; 866 867 cmdptr = (uint64_t *)PHYS_TO_XKPHYS(sc->sc_cmdptr.cmdptr, CCA_CACHED); 868 cmdptr += sc->sc_cmdptr.cmdptr_idx; 869 870 OCTEON_ETH_KASSERT(cmdptr != NULL); 871 872 *cmdptr++ = pko_cmd_w0; 873 *cmdptr++ = pko_cmd_w1; 874 875 OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1); 876 877 if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) { 878 paddr_t buf; 879 880 buf = cn30xxfpa_buf_get_paddr(octeon_eth_fb_cmd); 881 if (buf == 0) { 882 log(LOG_WARNING, 883 "%s: cannot allocate command buffer from free pool allocator\n", 884 sc->sc_dev.dv_xname); 885 result = 1; 886 goto done; 887 } 888 *cmdptr++ = buf; 889 sc->sc_cmdptr.cmdptr = (uint64_t)buf; 890 sc->sc_cmdptr.cmdptr_idx = 0; 891 } else { 892 sc->sc_cmdptr.cmdptr_idx += 2; 893 } 894 895 cn30xxpko_op_doorbell_write(sc->sc_port, sc->sc_port, 2); 896 897 done: 898 return result; 899 } 900 901 int 902 octeon_eth_send_buf(struct octeon_eth_softc *sc, struct mbuf *m, 903 uint64_t *gbuf) 904 { 905 int result = 0, error; 906 uint64_t pko_cmd_w0, pko_cmd_w1; 907 908 error = octeon_eth_send_makecmd(sc, m, gbuf, &pko_cmd_w0, &pko_cmd_w1); 909 if (error != 0) { 910 /* already logging */ 911 result = error; 912 goto done; 913 } 914 915 error = octeon_eth_send_cmd(sc, pko_cmd_w0, pko_cmd_w1); 916 if (error != 0) { 917 /* already logging */ 918 result = error; 919 } 920 921 done: 922 return result; 923 } 924 925 int 926 octeon_eth_send(struct octeon_eth_softc *sc, struct mbuf *m) 927 { 928 paddr_t gaddr = 0; 929 uint64_t *gbuf = NULL; 930 int result = 0, error; 931 932 gaddr = cn30xxfpa_buf_get_paddr(octeon_eth_fb_sg); 933 if (gaddr == 0) { 934 log(LOG_WARNING, 935 "%s: cannot allocate gather buffer from free pool allocator\n", 936 sc->sc_dev.dv_xname); 937 result = 1; 938 goto done; 939 } 940 941 gbuf = (uint64_t *)(uintptr_t)PHYS_TO_XKPHYS(gaddr, CCA_CACHED); 942 943 error = octeon_eth_send_buf(sc, m, gbuf); 944 if (error != 0) { 945 /* already logging */ 946 cn30xxfpa_buf_put_paddr(octeon_eth_fb_sg, gaddr); 947 result = error; 948 goto done; 949 } 950 951 octeon_eth_send_queue_add(sc, m, gbuf); 952 953 done: 954 return result; 955 } 956 957 void 958 octeon_eth_start(struct ifnet *ifp) 959 { 960 struct octeon_eth_softc *sc = ifp->if_softc; 961 struct mbuf *m; 962 963 /* 964 * performance tuning 965 * presend iobdma request 966 */ 967 octeon_eth_send_queue_flush_prefetch(sc); 968 969 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 970 goto last; 971 972 if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port))) 973 goto last; 974 975 for (;;) { 976 octeon_eth_send_queue_flush_fetch(sc); /* XXX */ 977 978 /* 979 * XXXSEIL 980 * If no free send buffer is available, free all the sent buffer 981 * and bail out. 982 */ 983 if (octeon_eth_send_queue_is_full(sc)) { 984 return; 985 } 986 /* XXX */ 987 988 IFQ_DEQUEUE(&ifp->if_snd, m); 989 if (m == NULL) 990 return; 991 992 OCTEON_ETH_TAP(ifp, m, BPF_DIRECTION_OUT); 993 994 /* XXX */ 995 if (ml_len(&sc->sc_sendq) > sc->sc_soft_req_thresh) 996 octeon_eth_send_queue_flush(sc); 997 if (octeon_eth_send(sc, m)) { 998 ifp->if_oerrors++; 999 m_freem(m); 1000 log(LOG_WARNING, 1001 "%s: failed to transmit packet\n", 1002 sc->sc_dev.dv_xname); 1003 } 1004 /* XXX */ 1005 1006 /* 1007 * send next iobdma request 1008 */ 1009 octeon_eth_send_queue_flush_prefetch(sc); 1010 } 1011 1012 last: 1013 octeon_eth_send_queue_flush_fetch(sc); 1014 } 1015 1016 void 1017 octeon_eth_watchdog(struct ifnet *ifp) 1018 { 1019 struct octeon_eth_softc *sc = ifp->if_softc; 1020 1021 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 1022 1023 octeon_eth_configure(sc); 1024 1025 SET(ifp->if_flags, IFF_RUNNING); 1026 ifq_clr_oactive(&ifp->if_snd); 1027 ifp->if_timer = 0; 1028 1029 octeon_eth_start(ifp); 1030 } 1031 1032 int 1033 octeon_eth_init(struct ifnet *ifp) 1034 { 1035 struct octeon_eth_softc *sc = ifp->if_softc; 1036 1037 /* XXX don't disable commonly used parts!!! XXX */ 1038 if (sc->sc_init_flag == 0) { 1039 /* Cancel any pending I/O. */ 1040 octeon_eth_stop(ifp, 0); 1041 1042 /* Initialize the device */ 1043 octeon_eth_configure(sc); 1044 1045 cn30xxpko_enable(sc->sc_pko); 1046 cn30xxipd_enable(sc->sc_ipd); 1047 1048 sc->sc_init_flag = 1; 1049 } else { 1050 cn30xxgmx_port_enable(sc->sc_gmx_port, 1); 1051 } 1052 octeon_eth_mediachange(ifp); 1053 1054 cn30xxgmx_set_filter(sc->sc_gmx_port); 1055 1056 timeout_add_sec(&sc->sc_tick_misc_ch, 1); 1057 timeout_add_sec(&sc->sc_tick_free_ch, 1); 1058 1059 SET(ifp->if_flags, IFF_RUNNING); 1060 ifq_clr_oactive(&ifp->if_snd); 1061 1062 return 0; 1063 } 1064 1065 int 1066 octeon_eth_stop(struct ifnet *ifp, int disable) 1067 { 1068 struct octeon_eth_softc *sc = ifp->if_softc; 1069 1070 timeout_del(&sc->sc_tick_misc_ch); 1071 timeout_del(&sc->sc_tick_free_ch); 1072 timeout_del(&sc->sc_resume_ch); 1073 1074 mii_down(&sc->sc_mii); 1075 1076 cn30xxgmx_port_enable(sc->sc_gmx_port, 0); 1077 1078 /* Mark the interface as down and cancel the watchdog timer. */ 1079 CLR(ifp->if_flags, IFF_RUNNING); 1080 ifq_clr_oactive(&ifp->if_snd); 1081 ifp->if_timer = 0; 1082 1083 intr_barrier(octeon_eth_pow_recv_ih); 1084 1085 return 0; 1086 } 1087 1088 /* ---- misc */ 1089 1090 #define PKO_INDEX_MASK ((1ULL << 12/* XXX */) - 1) 1091 1092 int 1093 octeon_eth_reset(struct octeon_eth_softc *sc) 1094 { 1095 cn30xxgmx_reset_speed(sc->sc_gmx_port); 1096 cn30xxgmx_reset_flowctl(sc->sc_gmx_port); 1097 cn30xxgmx_reset_timing(sc->sc_gmx_port); 1098 cn30xxgmx_reset_board(sc->sc_gmx_port); 1099 1100 return 0; 1101 } 1102 1103 int 1104 octeon_eth_configure(struct octeon_eth_softc *sc) 1105 { 1106 cn30xxgmx_port_enable(sc->sc_gmx_port, 0); 1107 1108 octeon_eth_reset(sc); 1109 1110 octeon_eth_configure_common(sc); 1111 1112 cn30xxpko_port_config(sc->sc_pko); 1113 cn30xxpko_port_enable(sc->sc_pko, 1); 1114 cn30xxpip_port_config(sc->sc_pip); 1115 1116 cn30xxgmx_tx_stats_rd_clr(sc->sc_gmx_port, 1); 1117 cn30xxgmx_rx_stats_rd_clr(sc->sc_gmx_port, 1); 1118 1119 cn30xxgmx_port_enable(sc->sc_gmx_port, 1); 1120 1121 return 0; 1122 } 1123 1124 int 1125 octeon_eth_configure_common(struct octeon_eth_softc *sc) 1126 { 1127 static int once; 1128 1129 uint64_t reg; 1130 1131 if (once == 1) 1132 return 0; 1133 once = 1; 1134 1135 #if 0 1136 octeon_eth_buf_init(sc); 1137 #endif 1138 1139 cn30xxipd_config(sc->sc_ipd); 1140 cn30xxpko_config(sc->sc_pko); 1141 1142 cn30xxpow_config(sc->sc_pow, OCTEON_POW_GROUP_PIP); 1143 1144 /* Set padding for packets that Octeon does not recognize as IP. */ 1145 reg = octeon_xkphys_read_8(PIP_GBL_CFG); 1146 reg &= ~PIP_GBL_CFG_NIP_SHF_MASK; 1147 reg |= ETHER_ALIGN << PIP_GBL_CFG_NIP_SHF_SHIFT; 1148 octeon_xkphys_write_8(PIP_GBL_CFG, reg); 1149 1150 return 0; 1151 } 1152 1153 int 1154 octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work, 1155 struct mbuf **rm) 1156 { 1157 struct mbuf *m; 1158 void (*ext_free)(caddr_t, u_int, void *); 1159 void *ext_buf; 1160 size_t ext_size; 1161 caddr_t data; 1162 uint64_t word1 = work[1]; 1163 uint64_t word2 = work[2]; 1164 uint64_t word3 = work[3]; 1165 1166 MGETHDR(m, M_NOWAIT, MT_DATA); 1167 if (m == NULL) 1168 return 1; 1169 OCTEON_ETH_KASSERT(m != NULL); 1170 1171 if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) { 1172 /* Dynamic short */ 1173 ext_free = octeon_eth_buf_ext_free_m; 1174 ext_buf = &work[4]; 1175 ext_size = 96; 1176 1177 /* 1178 * If the packet is IP, the hardware has padded it so that the 1179 * IP source address starts on the next 64-bit word boundary. 1180 */ 1181 data = (caddr_t)&work[4] + ETHER_ALIGN; 1182 if (!ISSET(word2, PIP_WQE_WORD2_IP_NI) && 1183 !ISSET(word2, PIP_WQE_WORD2_IP_V6)) 1184 data += 4; 1185 } else { 1186 vaddr_t addr; 1187 vaddr_t start_buffer; 1188 1189 addr = PHYS_TO_XKPHYS(word3 & PIP_WQE_WORD3_ADDR, CCA_CACHED); 1190 start_buffer = addr & ~(2048 - 1); 1191 1192 ext_free = octeon_eth_buf_ext_free_ext; 1193 ext_buf = (void *)start_buffer; 1194 ext_size = 2048; 1195 1196 data = (void *)addr; 1197 } 1198 1199 MEXTADD(m, ext_buf, ext_size, 0, ext_free, work); 1200 OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT)); 1201 1202 m->m_data = data; 1203 m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48; 1204 #if 0 1205 /* 1206 * not readonly buffer 1207 */ 1208 m->m_flags |= M_EXT_RW; 1209 #endif 1210 1211 *rm = m; 1212 1213 OCTEON_ETH_KASSERT(*rm != NULL); 1214 1215 return 0; 1216 } 1217 1218 int 1219 octeon_eth_recv_check_code(struct octeon_eth_softc *sc, uint64_t word2) 1220 { 1221 uint64_t opecode = word2 & PIP_WQE_WORD2_NOIP_OPECODE; 1222 1223 if (__predict_true(!ISSET(word2, PIP_WQE_WORD2_NOIP_RE))) 1224 return 0; 1225 1226 /* this error is harmless */ 1227 if (opecode == PIP_OVER_ERR) 1228 return 0; 1229 1230 return 1; 1231 } 1232 1233 #if 0 /* not used */ 1234 int 1235 octeon_eth_recv_check_jumbo(struct octeon_eth_softc *sc, uint64_t word2) 1236 { 1237 if (__predict_false((word2 & PIP_WQE_WORD2_IP_BUFS) > (1ULL << 56))) 1238 return 1; 1239 return 0; 1240 } 1241 #endif 1242 1243 int 1244 octeon_eth_recv_check_link(struct octeon_eth_softc *sc, uint64_t word2) 1245 { 1246 if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port))) 1247 return 1; 1248 return 0; 1249 } 1250 1251 int 1252 octeon_eth_recv_check(struct octeon_eth_softc *sc, uint64_t word2) 1253 { 1254 if (__predict_false(octeon_eth_recv_check_link(sc, word2)) != 0) { 1255 if (ratecheck(&sc->sc_rate_recv_check_link_last, 1256 &sc->sc_rate_recv_check_link_cap)) 1257 log(LOG_DEBUG, 1258 "%s: link is not up, the packet was dropped\n", 1259 sc->sc_dev.dv_xname); 1260 return 1; 1261 } 1262 1263 #if 0 /* XXX Performance tuning (Jumbo-frame is not supported yet!) */ 1264 if (__predict_false(octeon_eth_recv_check_jumbo(sc, word2)) != 0) { 1265 /* XXX jumbo frame */ 1266 if (ratecheck(&sc->sc_rate_recv_check_jumbo_last, 1267 &sc->sc_rate_recv_check_jumbo_cap)) 1268 log(LOG_DEBUG, 1269 "jumbo frame was received\n"); 1270 return 1; 1271 } 1272 #endif 1273 1274 if (__predict_false(octeon_eth_recv_check_code(sc, word2)) != 0) { 1275 if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) == PIP_WQE_WORD2_RE_OPCODE_LENGTH) { 1276 /* no logging */ 1277 /* XXX inclement special error count */ 1278 } else if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) == 1279 PIP_WQE_WORD2_RE_OPCODE_PARTIAL) { 1280 /* not an error. it's because of overload */ 1281 } 1282 else { 1283 if (ratecheck(&sc->sc_rate_recv_check_code_last, 1284 &sc->sc_rate_recv_check_code_cap)) 1285 log(LOG_WARNING, 1286 "%s: a reception error occured, " 1287 "the packet was dropped (error code = %lld)\n", 1288 sc->sc_dev.dv_xname, word2 & PIP_WQE_WORD2_NOIP_OPECODE); 1289 } 1290 return 1; 1291 } 1292 1293 return 0; 1294 } 1295 1296 int 1297 octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work) 1298 { 1299 struct ifnet *ifp; 1300 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1301 struct mbuf *m; 1302 uint64_t word2; 1303 1304 OCTEON_ETH_KASSERT(sc != NULL); 1305 OCTEON_ETH_KASSERT(work != NULL); 1306 1307 word2 = work[2]; 1308 ifp = &sc->sc_arpcom.ac_if; 1309 1310 OCTEON_ETH_KASSERT(ifp != NULL); 1311 1312 if (!(ifp->if_flags & IFF_RUNNING)) 1313 goto drop; 1314 1315 if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) { 1316 ifp->if_ierrors++; 1317 goto drop; 1318 } 1319 1320 if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) { 1321 ifp->if_ierrors++; 1322 goto drop; 1323 } 1324 1325 /* work[0] .. work[3] may not be valid any more */ 1326 1327 OCTEON_ETH_KASSERT(m != NULL); 1328 1329 cn30xxipd_offload(word2, &m->m_pkthdr.csum_flags); 1330 1331 ml_enqueue(&ml, m); 1332 if_input(ifp, &ml); 1333 1334 return 0; 1335 1336 drop: 1337 octeon_eth_buf_free_work(sc, work, word2); 1338 return 1; 1339 } 1340 1341 void 1342 octeon_eth_recv_intr(void *data, uint64_t *work) 1343 { 1344 struct octeon_eth_softc *sc; 1345 int port; 1346 1347 OCTEON_ETH_KASSERT(work != NULL); 1348 1349 port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42; 1350 1351 OCTEON_ETH_KASSERT(port < GMX_PORT_NUNITS); 1352 1353 sc = octeon_eth_gsc[port]; 1354 1355 OCTEON_ETH_KASSERT(sc != NULL); 1356 OCTEON_ETH_KASSERT(port == sc->sc_port); 1357 1358 /* XXX process all work queue entries anyway */ 1359 1360 (void)octeon_eth_recv(sc, work); 1361 } 1362 1363 /* ---- tick */ 1364 1365 /* 1366 * octeon_eth_tick_free 1367 * 1368 * => garbage collect send gather buffer / mbuf 1369 * => called at softclock 1370 */ 1371 void 1372 octeon_eth_tick_free(void *arg) 1373 { 1374 struct octeon_eth_softc *sc = arg; 1375 int timo; 1376 int s; 1377 1378 s = splnet(); 1379 /* XXX */ 1380 if (ml_len(&sc->sc_sendq) > 0) { 1381 octeon_eth_send_queue_flush_prefetch(sc); 1382 octeon_eth_send_queue_flush_fetch(sc); 1383 octeon_eth_send_queue_flush(sc); 1384 } 1385 /* XXX */ 1386 1387 /* XXX ??? */ 1388 timo = hz - (100 * sc->sc_ext_callback_cnt); 1389 if (timo < 10) 1390 timo = 10; 1391 timeout_add_msec(&sc->sc_tick_free_ch, 1000 * timo / hz); 1392 /* XXX */ 1393 splx(s); 1394 } 1395 1396 /* 1397 * octeon_eth_tick_misc 1398 * 1399 * => collect statistics 1400 * => check link status 1401 * => called at softclock 1402 */ 1403 void 1404 octeon_eth_tick_misc(void *arg) 1405 { 1406 struct octeon_eth_softc *sc = arg; 1407 struct ifnet *ifp; 1408 u_quad_t iqdrops, delta; 1409 int s; 1410 1411 s = splnet(); 1412 1413 ifp = &sc->sc_arpcom.ac_if; 1414 1415 iqdrops = ifp->if_iqdrops; 1416 cn30xxgmx_stats(sc->sc_gmx_port); 1417 #ifdef OCTEON_ETH_DEBUG 1418 delta = ifp->if_iqdrops - iqdrops; 1419 printf("%s: %qu packets dropped at GMX FIFO\n", 1420 ifp->if_xname, delta); 1421 #endif 1422 cn30xxpip_stats(sc->sc_pip, ifp, sc->sc_port); 1423 delta = ifp->if_iqdrops - iqdrops; 1424 #ifdef OCTEON_ETH_DEBUG 1425 printf("%s: %qu packets dropped at PIP + GMX FIFO\n", 1426 ifp->if_xname, delta); 1427 #endif 1428 1429 mii_tick(&sc->sc_mii); 1430 1431 #ifdef OCTEON_ETH_FIXUP_ODD_NIBBLE_DYNAMIC 1432 if (sc->sc_gmx_port->sc_proc_nibble_by_soft && 1433 sc->sc_gmx_port->sc_even_nibble_cnt > PROC_NIBBLE_SOFT_THRESHOLD) { 1434 #ifdef OCTEON_ETH_DEBUG 1435 log(LOG_DEBUG, "%s: even nibble preamble count %d\n", 1436 sc->sc_dev.dv_xname, sc->sc_gmx_port->sc_even_nibble_cnt); 1437 #endif 1438 if (OCTEON_ETH_FIXUP_ODD_NIBBLE_MODEL_P(sc) && 1439 OCTEON_ETH_FIXUP_ODD_NIBBLE_DYNAMIC_SPEED_P(sc->sc_gmx_port, ifp)) { 1440 log(LOG_NOTICE, 1441 "%s: the preamble processing switched to hardware\n", 1442 sc->sc_dev.dv_xname); 1443 } 1444 sc->sc_gmx_port->sc_proc_nibble_by_soft = 0; 1445 octeon_eth_mii_statchg((struct device *)sc); 1446 sc->sc_gmx_port->sc_even_nibble_cnt = 0; 1447 } 1448 #endif 1449 splx(s); 1450 1451 timeout_add_sec(&sc->sc_tick_misc_ch, 1); 1452 } 1453