1 /*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #include <sys/param.h> 35 #include <sys/types.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/ktr.h> 39 #include <sys/lock.h> 40 #include <sys/module.h> 41 #include <sys/protosw.h> 42 #include <sys/domain.h> 43 #include <sys/rmlock.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/taskqueue.h> 47 #include <net/if.h> 48 #include <net/if_var.h> 49 #include <netinet/in.h> 50 #include <netinet/in_pcb.h> 51 #include <netinet/in_var.h> 52 #include <netinet/ip.h> 53 #include <netinet/ip6.h> 54 #include <netinet/tcp_var.h> 55 #include <netinet6/scope6_var.h> 56 #define TCPSTATES 57 #include <netinet/tcp_fsm.h> 58 #include <netinet/toecore.h> 59 60 #ifdef TCP_OFFLOAD 61 #include "common/common.h" 62 #include "common/t4_msg.h" 63 #include "common/t4_regs.h" 64 #include "common/t4_regs_values.h" 65 #include "common/t4_tcb.h" 66 #include "tom/t4_tom_l2t.h" 67 #include "tom/t4_tom.h" 68 69 static struct protosw ddp_protosw; 70 static struct pr_usrreqs ddp_usrreqs; 71 72 static struct protosw ddp6_protosw; 73 static struct pr_usrreqs ddp6_usrreqs; 74 75 /* Module ops */ 76 static int t4_tom_mod_load(void); 77 static int t4_tom_mod_unload(void); 78 static int t4_tom_modevent(module_t, int, void *); 79 80 /* ULD ops and helpers */ 81 static int t4_tom_activate(struct adapter *); 82 static int t4_tom_deactivate(struct adapter *); 83 84 static struct uld_info tom_uld_info = { 85 .uld_id = ULD_TOM, 86 .activate = t4_tom_activate, 87 .deactivate = t4_tom_deactivate, 88 }; 89 90 static void queue_tid_release(struct adapter *, int); 91 static void release_offload_resources(struct toepcb *); 92 static int alloc_tid_tabs(struct tid_info *); 93 static void free_tid_tabs(struct tid_info *); 94 static int add_lip(struct adapter *, struct in6_addr *); 95 static int delete_lip(struct adapter *, struct in6_addr *); 96 static struct clip_entry *search_lip(struct tom_data *, struct in6_addr *); 97 static void init_clip_table(struct adapter *, struct tom_data *); 98 static void update_clip(struct adapter *, void *); 99 static void t4_clip_task(void *, int); 100 static void update_clip_table(struct adapter *, struct tom_data *); 101 static void destroy_clip_table(struct adapter *, struct tom_data *); 102 static void free_tom_data(struct adapter *, struct tom_data *); 103 static void reclaim_wr_resources(void *, int); 104 105 static int in6_ifaddr_gen; 106 static eventhandler_tag ifaddr_evhandler; 107 static struct timeout_task clip_task; 108 109 struct toepcb * 110 alloc_toepcb(struct port_info *pi, int txqid, int rxqid, int flags) 111 { 112 struct adapter *sc = pi->adapter; 113 struct toepcb *toep; 114 int tx_credits, txsd_total, len; 115 116 /* 117 * The firmware counts tx work request credits in units of 16 bytes 118 * each. Reserve room for an ABORT_REQ so the driver never has to worry 119 * about tx credits if it wants to abort a connection. 120 */ 121 tx_credits = sc->params.ofldq_wr_cred; 122 tx_credits -= howmany(sizeof(struct cpl_abort_req), 16); 123 124 /* 125 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte 126 * immediate payload, and firmware counts tx work request credits in 127 * units of 16 byte. Calculate the maximum work requests possible. 128 */ 129 txsd_total = tx_credits / 130 howmany((sizeof(struct fw_ofld_tx_data_wr) + 1), 16); 131 132 if (txqid < 0) 133 txqid = (arc4random() % pi->nofldtxq) + pi->first_ofld_txq; 134 KASSERT(txqid >= pi->first_ofld_txq && 135 txqid < pi->first_ofld_txq + pi->nofldtxq, 136 ("%s: txqid %d for port %p (first %d, n %d)", __func__, txqid, pi, 137 pi->first_ofld_txq, pi->nofldtxq)); 138 139 if (rxqid < 0) 140 rxqid = (arc4random() % pi->nofldrxq) + pi->first_ofld_rxq; 141 KASSERT(rxqid >= pi->first_ofld_rxq && 142 rxqid < pi->first_ofld_rxq + pi->nofldrxq, 143 ("%s: rxqid %d for port %p (first %d, n %d)", __func__, rxqid, pi, 144 pi->first_ofld_rxq, pi->nofldrxq)); 145 146 len = offsetof(struct toepcb, txsd) + 147 txsd_total * sizeof(struct ofld_tx_sdesc); 148 149 toep = malloc(len, M_CXGBE, M_ZERO | flags); 150 if (toep == NULL) 151 return (NULL); 152 153 toep->td = sc->tom_softc; 154 toep->port = pi; 155 toep->tx_total = tx_credits; 156 toep->tx_credits = tx_credits; 157 toep->ofld_txq = &sc->sge.ofld_txq[txqid]; 158 toep->ofld_rxq = &sc->sge.ofld_rxq[rxqid]; 159 toep->ctrlq = &sc->sge.ctrlq[pi->port_id]; 160 toep->txsd_total = txsd_total; 161 toep->txsd_avail = txsd_total; 162 toep->txsd_pidx = 0; 163 toep->txsd_cidx = 0; 164 165 return (toep); 166 } 167 168 void 169 free_toepcb(struct toepcb *toep) 170 { 171 172 KASSERT(!(toep->flags & TPF_ATTACHED), 173 ("%s: attached to an inpcb", __func__)); 174 KASSERT(!(toep->flags & TPF_CPL_PENDING), 175 ("%s: CPL pending", __func__)); 176 177 free(toep, M_CXGBE); 178 } 179 180 /* 181 * Set up the socket for TCP offload. 182 */ 183 void 184 offload_socket(struct socket *so, struct toepcb *toep) 185 { 186 struct tom_data *td = toep->td; 187 struct inpcb *inp = sotoinpcb(so); 188 struct tcpcb *tp = intotcpcb(inp); 189 struct sockbuf *sb; 190 191 INP_WLOCK_ASSERT(inp); 192 193 /* Update socket */ 194 sb = &so->so_snd; 195 SOCKBUF_LOCK(sb); 196 sb->sb_flags |= SB_NOCOALESCE; 197 SOCKBUF_UNLOCK(sb); 198 sb = &so->so_rcv; 199 SOCKBUF_LOCK(sb); 200 sb->sb_flags |= SB_NOCOALESCE; 201 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 202 if (inp->inp_vflag & INP_IPV6) 203 so->so_proto = &ddp6_protosw; 204 else 205 so->so_proto = &ddp_protosw; 206 } 207 SOCKBUF_UNLOCK(sb); 208 209 /* Update TCP PCB */ 210 tp->tod = &td->tod; 211 tp->t_toe = toep; 212 tp->t_flags |= TF_TOE; 213 214 /* Install an extra hold on inp */ 215 toep->inp = inp; 216 toep->flags |= TPF_ATTACHED; 217 in_pcbref(inp); 218 219 /* Add the TOE PCB to the active list */ 220 mtx_lock(&td->toep_list_lock); 221 TAILQ_INSERT_HEAD(&td->toep_list, toep, link); 222 mtx_unlock(&td->toep_list_lock); 223 } 224 225 /* This is _not_ the normal way to "unoffload" a socket. */ 226 void 227 undo_offload_socket(struct socket *so) 228 { 229 struct inpcb *inp = sotoinpcb(so); 230 struct tcpcb *tp = intotcpcb(inp); 231 struct toepcb *toep = tp->t_toe; 232 struct tom_data *td = toep->td; 233 struct sockbuf *sb; 234 235 INP_WLOCK_ASSERT(inp); 236 237 sb = &so->so_snd; 238 SOCKBUF_LOCK(sb); 239 sb->sb_flags &= ~SB_NOCOALESCE; 240 SOCKBUF_UNLOCK(sb); 241 sb = &so->so_rcv; 242 SOCKBUF_LOCK(sb); 243 sb->sb_flags &= ~SB_NOCOALESCE; 244 SOCKBUF_UNLOCK(sb); 245 246 tp->tod = NULL; 247 tp->t_toe = NULL; 248 tp->t_flags &= ~TF_TOE; 249 250 toep->inp = NULL; 251 toep->flags &= ~TPF_ATTACHED; 252 if (in_pcbrele_wlocked(inp)) 253 panic("%s: inp freed.", __func__); 254 255 mtx_lock(&td->toep_list_lock); 256 TAILQ_REMOVE(&td->toep_list, toep, link); 257 mtx_unlock(&td->toep_list_lock); 258 } 259 260 static void 261 release_offload_resources(struct toepcb *toep) 262 { 263 struct tom_data *td = toep->td; 264 struct adapter *sc = td_adapter(td); 265 int tid = toep->tid; 266 267 KASSERT(!(toep->flags & TPF_CPL_PENDING), 268 ("%s: %p has CPL pending.", __func__, toep)); 269 KASSERT(!(toep->flags & TPF_ATTACHED), 270 ("%s: %p is still attached.", __func__, toep)); 271 272 CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)", 273 __func__, toep, tid, toep->l2te, toep->ce); 274 275 if (toep->ulp_mode == ULP_MODE_TCPDDP) 276 release_ddp_resources(toep); 277 278 if (toep->l2te) 279 t4_l2t_release(toep->l2te); 280 281 if (tid >= 0) { 282 remove_tid(sc, tid); 283 release_tid(sc, tid, toep->ctrlq); 284 } 285 286 if (toep->ce) 287 release_lip(td, toep->ce); 288 289 mtx_lock(&td->toep_list_lock); 290 TAILQ_REMOVE(&td->toep_list, toep, link); 291 mtx_unlock(&td->toep_list_lock); 292 293 free_toepcb(toep); 294 } 295 296 /* 297 * The kernel is done with the TCP PCB and this is our opportunity to unhook the 298 * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no 299 * pending CPL) then it is time to release all resources tied to the toepcb. 300 * 301 * Also gets called when an offloaded active open fails and the TOM wants the 302 * kernel to take the TCP PCB back. 303 */ 304 static void 305 t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp) 306 { 307 #if defined(KTR) || defined(INVARIANTS) 308 struct inpcb *inp = tp->t_inpcb; 309 #endif 310 struct toepcb *toep = tp->t_toe; 311 312 INP_WLOCK_ASSERT(inp); 313 314 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 315 KASSERT(toep->flags & TPF_ATTACHED, 316 ("%s: not attached", __func__)); 317 318 #ifdef KTR 319 if (tp->t_state == TCPS_SYN_SENT) { 320 CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)", 321 __func__, toep->tid, toep, toep->flags, inp, 322 inp->inp_flags); 323 } else { 324 CTR6(KTR_CXGBE, 325 "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)", 326 toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp, 327 inp->inp_flags); 328 } 329 #endif 330 331 tp->t_toe = NULL; 332 tp->t_flags &= ~TF_TOE; 333 toep->flags &= ~TPF_ATTACHED; 334 335 if (!(toep->flags & TPF_CPL_PENDING)) 336 release_offload_resources(toep); 337 } 338 339 /* 340 * setsockopt handler. 341 */ 342 static void 343 t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name) 344 { 345 struct adapter *sc = tod->tod_softc; 346 struct toepcb *toep = tp->t_toe; 347 348 if (dir == SOPT_GET) 349 return; 350 351 CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name); 352 353 switch (name) { 354 case TCP_NODELAY: 355 t4_set_tcb_field(sc, toep, 1, W_TCB_T_FLAGS, V_TF_NAGLE(1), 356 V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1)); 357 break; 358 default: 359 break; 360 } 361 } 362 363 /* 364 * The TOE driver will not receive any more CPLs for the tid associated with the 365 * toepcb; release the hold on the inpcb. 366 */ 367 void 368 final_cpl_received(struct toepcb *toep) 369 { 370 struct inpcb *inp = toep->inp; 371 372 KASSERT(inp != NULL, ("%s: inp is NULL", __func__)); 373 INP_WLOCK_ASSERT(inp); 374 KASSERT(toep->flags & TPF_CPL_PENDING, 375 ("%s: CPL not pending already?", __func__)); 376 377 CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)", 378 __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags); 379 380 toep->inp = NULL; 381 toep->flags &= ~TPF_CPL_PENDING; 382 383 if (!(toep->flags & TPF_ATTACHED)) 384 release_offload_resources(toep); 385 386 if (!in_pcbrele_wlocked(inp)) 387 INP_WUNLOCK(inp); 388 } 389 390 void 391 insert_tid(struct adapter *sc, int tid, void *ctx) 392 { 393 struct tid_info *t = &sc->tids; 394 395 t->tid_tab[tid] = ctx; 396 atomic_add_int(&t->tids_in_use, 1); 397 } 398 399 void * 400 lookup_tid(struct adapter *sc, int tid) 401 { 402 struct tid_info *t = &sc->tids; 403 404 return (t->tid_tab[tid]); 405 } 406 407 void 408 update_tid(struct adapter *sc, int tid, void *ctx) 409 { 410 struct tid_info *t = &sc->tids; 411 412 t->tid_tab[tid] = ctx; 413 } 414 415 void 416 remove_tid(struct adapter *sc, int tid) 417 { 418 struct tid_info *t = &sc->tids; 419 420 t->tid_tab[tid] = NULL; 421 atomic_subtract_int(&t->tids_in_use, 1); 422 } 423 424 void 425 release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq) 426 { 427 struct wrqe *wr; 428 struct cpl_tid_release *req; 429 430 wr = alloc_wrqe(sizeof(*req), ctrlq); 431 if (wr == NULL) { 432 queue_tid_release(sc, tid); /* defer */ 433 return; 434 } 435 req = wrtod(wr); 436 437 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid); 438 439 t4_wrq_tx(sc, wr); 440 } 441 442 static void 443 queue_tid_release(struct adapter *sc, int tid) 444 { 445 446 CXGBE_UNIMPLEMENTED("deferred tid release"); 447 } 448 449 /* 450 * What mtu_idx to use, given a 4-tuple and/or an MSS cap 451 */ 452 int 453 find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc, int pmss) 454 { 455 unsigned short *mtus = &sc->params.mtus[0]; 456 int i, mss, n; 457 458 KASSERT(inc != NULL || pmss > 0, 459 ("%s: at least one of inc/pmss must be specified", __func__)); 460 461 mss = inc ? tcp_mssopt(inc) : pmss; 462 if (pmss > 0 && mss > pmss) 463 mss = pmss; 464 465 if (inc->inc_flags & INC_ISIPV6) 466 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 467 else 468 n = sizeof(struct ip) + sizeof(struct tcphdr); 469 470 for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mss + n; i++) 471 continue; 472 473 return (i); 474 } 475 476 /* 477 * Determine the receive window size for a socket. 478 */ 479 u_long 480 select_rcv_wnd(struct socket *so) 481 { 482 unsigned long wnd; 483 484 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 485 486 wnd = sbspace(&so->so_rcv); 487 if (wnd < MIN_RCV_WND) 488 wnd = MIN_RCV_WND; 489 490 return min(wnd, MAX_RCV_WND); 491 } 492 493 int 494 select_rcv_wscale(void) 495 { 496 int wscale = 0; 497 unsigned long space = sb_max; 498 499 if (space > MAX_RCV_WND) 500 space = MAX_RCV_WND; 501 502 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space) 503 wscale++; 504 505 return (wscale); 506 } 507 508 extern int always_keepalive; 509 #define VIID_SMACIDX(v) (((unsigned int)(v) & 0x7f) << 1) 510 511 /* 512 * socket so could be a listening socket too. 513 */ 514 uint64_t 515 calc_opt0(struct socket *so, struct port_info *pi, struct l2t_entry *e, 516 int mtu_idx, int rscale, int rx_credits, int ulp_mode) 517 { 518 uint64_t opt0; 519 520 KASSERT(rx_credits <= M_RCV_BUFSIZ, 521 ("%s: rcv_bufsiz too high", __func__)); 522 523 opt0 = F_TCAM_BYPASS | V_WND_SCALE(rscale) | V_MSS_IDX(mtu_idx) | 524 V_ULP_MODE(ulp_mode) | V_RCV_BUFSIZ(rx_credits); 525 526 if (so != NULL) { 527 struct inpcb *inp = sotoinpcb(so); 528 struct tcpcb *tp = intotcpcb(inp); 529 int keepalive = always_keepalive || 530 so_options_get(so) & SO_KEEPALIVE; 531 532 opt0 |= V_NAGLE((tp->t_flags & TF_NODELAY) == 0); 533 opt0 |= V_KEEP_ALIVE(keepalive != 0); 534 } 535 536 if (e != NULL) 537 opt0 |= V_L2T_IDX(e->idx); 538 539 if (pi != NULL) { 540 opt0 |= V_SMAC_SEL(VIID_SMACIDX(pi->viid)); 541 opt0 |= V_TX_CHAN(pi->tx_chan); 542 } 543 544 return htobe64(opt0); 545 } 546 547 uint64_t 548 select_ntuple(struct port_info *pi, struct l2t_entry *e) 549 { 550 struct adapter *sc = pi->adapter; 551 struct tp_params *tp = &sc->params.tp; 552 uint16_t viid = pi->viid; 553 uint64_t ntuple = 0; 554 555 /* 556 * Initialize each of the fields which we care about which are present 557 * in the Compressed Filter Tuple. 558 */ 559 if (tp->vlan_shift >= 0 && e->vlan != CPL_L2T_VLAN_NONE) 560 ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift; 561 562 if (tp->port_shift >= 0) 563 ntuple |= (uint64_t)e->lport << tp->port_shift; 564 565 if (tp->protocol_shift >= 0) 566 ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift; 567 568 if (tp->vnic_shift >= 0) { 569 uint32_t vf = G_FW_VIID_VIN(viid); 570 uint32_t pf = G_FW_VIID_PFN(viid); 571 uint32_t vld = G_FW_VIID_VIVLD(viid); 572 573 ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vf) | V_FT_VNID_ID_PF(pf) | 574 V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift; 575 } 576 577 if (is_t4(sc)) 578 return (htobe32((uint32_t)ntuple)); 579 else 580 return (htobe64(V_FILTER_TUPLE(ntuple))); 581 } 582 583 void 584 set_tcpddp_ulp_mode(struct toepcb *toep) 585 { 586 587 toep->ulp_mode = ULP_MODE_TCPDDP; 588 toep->ddp_flags = DDP_OK; 589 toep->ddp_score = DDP_LOW_SCORE; 590 } 591 592 int 593 negative_advice(int status) 594 { 595 596 return (status == CPL_ERR_RTX_NEG_ADVICE || 597 status == CPL_ERR_PERSIST_NEG_ADVICE || 598 status == CPL_ERR_KEEPALV_NEG_ADVICE); 599 } 600 601 static int 602 alloc_tid_tabs(struct tid_info *t) 603 { 604 size_t size; 605 unsigned int i; 606 607 size = t->ntids * sizeof(*t->tid_tab) + 608 t->natids * sizeof(*t->atid_tab) + 609 t->nstids * sizeof(*t->stid_tab); 610 611 t->tid_tab = malloc(size, M_CXGBE, M_ZERO | M_NOWAIT); 612 if (t->tid_tab == NULL) 613 return (ENOMEM); 614 615 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF); 616 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; 617 t->afree = t->atid_tab; 618 t->atids_in_use = 0; 619 for (i = 1; i < t->natids; i++) 620 t->atid_tab[i - 1].next = &t->atid_tab[i]; 621 t->atid_tab[t->natids - 1].next = NULL; 622 623 mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF); 624 t->stid_tab = (struct listen_ctx **)&t->atid_tab[t->natids]; 625 t->stids_in_use = 0; 626 TAILQ_INIT(&t->stids); 627 t->nstids_free_head = t->nstids; 628 629 atomic_store_rel_int(&t->tids_in_use, 0); 630 631 return (0); 632 } 633 634 static void 635 free_tid_tabs(struct tid_info *t) 636 { 637 KASSERT(t->tids_in_use == 0, 638 ("%s: %d tids still in use.", __func__, t->tids_in_use)); 639 KASSERT(t->atids_in_use == 0, 640 ("%s: %d atids still in use.", __func__, t->atids_in_use)); 641 KASSERT(t->stids_in_use == 0, 642 ("%s: %d tids still in use.", __func__, t->stids_in_use)); 643 644 free(t->tid_tab, M_CXGBE); 645 t->tid_tab = NULL; 646 647 if (mtx_initialized(&t->atid_lock)) 648 mtx_destroy(&t->atid_lock); 649 if (mtx_initialized(&t->stid_lock)) 650 mtx_destroy(&t->stid_lock); 651 } 652 653 static int 654 add_lip(struct adapter *sc, struct in6_addr *lip) 655 { 656 struct fw_clip_cmd c; 657 658 ASSERT_SYNCHRONIZED_OP(sc); 659 /* mtx_assert(&td->clip_table_lock, MA_OWNED); */ 660 661 memset(&c, 0, sizeof(c)); 662 c.op_to_write = htonl(V_FW_CMD_OP(FW_CLIP_CMD) | F_FW_CMD_REQUEST | 663 F_FW_CMD_WRITE); 664 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); 665 c.ip_hi = *(uint64_t *)&lip->s6_addr[0]; 666 c.ip_lo = *(uint64_t *)&lip->s6_addr[8]; 667 668 return (-t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c)); 669 } 670 671 static int 672 delete_lip(struct adapter *sc, struct in6_addr *lip) 673 { 674 struct fw_clip_cmd c; 675 676 ASSERT_SYNCHRONIZED_OP(sc); 677 /* mtx_assert(&td->clip_table_lock, MA_OWNED); */ 678 679 memset(&c, 0, sizeof(c)); 680 c.op_to_write = htonl(V_FW_CMD_OP(FW_CLIP_CMD) | F_FW_CMD_REQUEST | 681 F_FW_CMD_READ); 682 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); 683 c.ip_hi = *(uint64_t *)&lip->s6_addr[0]; 684 c.ip_lo = *(uint64_t *)&lip->s6_addr[8]; 685 686 return (-t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c)); 687 } 688 689 static struct clip_entry * 690 search_lip(struct tom_data *td, struct in6_addr *lip) 691 { 692 struct clip_entry *ce; 693 694 mtx_assert(&td->clip_table_lock, MA_OWNED); 695 696 TAILQ_FOREACH(ce, &td->clip_table, link) { 697 if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) 698 return (ce); 699 } 700 701 return (NULL); 702 } 703 704 struct clip_entry * 705 hold_lip(struct tom_data *td, struct in6_addr *lip) 706 { 707 struct clip_entry *ce; 708 709 mtx_lock(&td->clip_table_lock); 710 ce = search_lip(td, lip); 711 if (ce != NULL) 712 ce->refcount++; 713 mtx_unlock(&td->clip_table_lock); 714 715 return (ce); 716 } 717 718 void 719 release_lip(struct tom_data *td, struct clip_entry *ce) 720 { 721 722 mtx_lock(&td->clip_table_lock); 723 KASSERT(search_lip(td, &ce->lip) == ce, 724 ("%s: CLIP entry %p p not in CLIP table.", __func__, ce)); 725 KASSERT(ce->refcount > 0, 726 ("%s: CLIP entry %p has refcount 0", __func__, ce)); 727 --ce->refcount; 728 mtx_unlock(&td->clip_table_lock); 729 } 730 731 static void 732 init_clip_table(struct adapter *sc, struct tom_data *td) 733 { 734 735 ASSERT_SYNCHRONIZED_OP(sc); 736 737 mtx_init(&td->clip_table_lock, "CLIP table lock", NULL, MTX_DEF); 738 TAILQ_INIT(&td->clip_table); 739 td->clip_gen = -1; 740 741 update_clip_table(sc, td); 742 } 743 744 static void 745 update_clip(struct adapter *sc, void *arg __unused) 746 { 747 748 if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4tomuc")) 749 return; 750 751 if (uld_active(sc, ULD_TOM)) 752 update_clip_table(sc, sc->tom_softc); 753 754 end_synchronized_op(sc, LOCK_HELD); 755 } 756 757 static void 758 t4_clip_task(void *arg, int count) 759 { 760 761 t4_iterate(update_clip, NULL); 762 } 763 764 static void 765 update_clip_table(struct adapter *sc, struct tom_data *td) 766 { 767 struct rm_priotracker in6_ifa_tracker; 768 struct in6_ifaddr *ia; 769 struct in6_addr *lip, tlip; 770 struct clip_head stale; 771 struct clip_entry *ce, *ce_temp; 772 int rc, gen = atomic_load_acq_int(&in6_ifaddr_gen); 773 774 ASSERT_SYNCHRONIZED_OP(sc); 775 776 IN6_IFADDR_RLOCK(&in6_ifa_tracker); 777 mtx_lock(&td->clip_table_lock); 778 779 if (gen == td->clip_gen) 780 goto done; 781 782 TAILQ_INIT(&stale); 783 TAILQ_CONCAT(&stale, &td->clip_table, link); 784 785 TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) { 786 lip = &ia->ia_addr.sin6_addr; 787 788 KASSERT(!IN6_IS_ADDR_MULTICAST(lip), 789 ("%s: mcast address in in6_ifaddr list", __func__)); 790 791 if (IN6_IS_ADDR_LOOPBACK(lip)) 792 continue; 793 if (IN6_IS_SCOPE_EMBED(lip)) { 794 /* Remove the embedded scope */ 795 tlip = *lip; 796 lip = &tlip; 797 in6_clearscope(lip); 798 } 799 /* 800 * XXX: how to weed out the link local address for the loopback 801 * interface? It's fe80::1 usually (always?). 802 */ 803 804 /* 805 * If it's in the main list then we already know it's not stale. 806 */ 807 TAILQ_FOREACH(ce, &td->clip_table, link) { 808 if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) 809 goto next; 810 } 811 812 /* 813 * If it's in the stale list we should move it to the main list. 814 */ 815 TAILQ_FOREACH(ce, &stale, link) { 816 if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) { 817 TAILQ_REMOVE(&stale, ce, link); 818 TAILQ_INSERT_TAIL(&td->clip_table, ce, link); 819 goto next; 820 } 821 } 822 823 /* A new IP6 address; add it to the CLIP table */ 824 ce = malloc(sizeof(*ce), M_CXGBE, M_NOWAIT); 825 memcpy(&ce->lip, lip, sizeof(ce->lip)); 826 ce->refcount = 0; 827 rc = add_lip(sc, lip); 828 if (rc == 0) 829 TAILQ_INSERT_TAIL(&td->clip_table, ce, link); 830 else { 831 char ip[INET6_ADDRSTRLEN]; 832 833 inet_ntop(AF_INET6, &ce->lip, &ip[0], sizeof(ip)); 834 log(LOG_ERR, "%s: could not add %s (%d)\n", 835 __func__, ip, rc); 836 free(ce, M_CXGBE); 837 } 838 next: 839 continue; 840 } 841 842 /* 843 * Remove stale addresses (those no longer in V_in6_ifaddrhead) that are 844 * no longer referenced by the driver. 845 */ 846 TAILQ_FOREACH_SAFE(ce, &stale, link, ce_temp) { 847 if (ce->refcount == 0) { 848 rc = delete_lip(sc, &ce->lip); 849 if (rc == 0) { 850 TAILQ_REMOVE(&stale, ce, link); 851 free(ce, M_CXGBE); 852 } else { 853 char ip[INET6_ADDRSTRLEN]; 854 855 inet_ntop(AF_INET6, &ce->lip, &ip[0], 856 sizeof(ip)); 857 log(LOG_ERR, "%s: could not delete %s (%d)\n", 858 __func__, ip, rc); 859 } 860 } 861 } 862 /* The ones that are still referenced need to stay in the CLIP table */ 863 TAILQ_CONCAT(&td->clip_table, &stale, link); 864 865 td->clip_gen = gen; 866 done: 867 mtx_unlock(&td->clip_table_lock); 868 IN6_IFADDR_RUNLOCK(&in6_ifa_tracker); 869 } 870 871 static void 872 destroy_clip_table(struct adapter *sc, struct tom_data *td) 873 { 874 struct clip_entry *ce, *ce_temp; 875 876 if (mtx_initialized(&td->clip_table_lock)) { 877 mtx_lock(&td->clip_table_lock); 878 TAILQ_FOREACH_SAFE(ce, &td->clip_table, link, ce_temp) { 879 KASSERT(ce->refcount == 0, 880 ("%s: CLIP entry %p still in use (%d)", __func__, 881 ce, ce->refcount)); 882 TAILQ_REMOVE(&td->clip_table, ce, link); 883 delete_lip(sc, &ce->lip); 884 free(ce, M_CXGBE); 885 } 886 mtx_unlock(&td->clip_table_lock); 887 mtx_destroy(&td->clip_table_lock); 888 } 889 } 890 891 static void 892 free_tom_data(struct adapter *sc, struct tom_data *td) 893 { 894 895 ASSERT_SYNCHRONIZED_OP(sc); 896 897 KASSERT(TAILQ_EMPTY(&td->toep_list), 898 ("%s: TOE PCB list is not empty.", __func__)); 899 KASSERT(td->lctx_count == 0, 900 ("%s: lctx hash table is not empty.", __func__)); 901 902 t4_uninit_l2t_cpl_handlers(sc); 903 t4_uninit_cpl_io_handlers(sc); 904 t4_uninit_ddp(sc, td); 905 destroy_clip_table(sc, td); 906 907 if (td->listen_mask != 0) 908 hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask); 909 910 if (mtx_initialized(&td->unsent_wr_lock)) 911 mtx_destroy(&td->unsent_wr_lock); 912 if (mtx_initialized(&td->lctx_hash_lock)) 913 mtx_destroy(&td->lctx_hash_lock); 914 if (mtx_initialized(&td->toep_list_lock)) 915 mtx_destroy(&td->toep_list_lock); 916 917 free_tid_tabs(&sc->tids); 918 free(td, M_CXGBE); 919 } 920 921 static void 922 reclaim_wr_resources(void *arg, int count) 923 { 924 struct tom_data *td = arg; 925 STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list); 926 struct cpl_act_open_req *cpl; 927 u_int opcode, atid; 928 struct wrqe *wr; 929 struct adapter *sc; 930 931 mtx_lock(&td->unsent_wr_lock); 932 STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe); 933 mtx_unlock(&td->unsent_wr_lock); 934 935 while ((wr = STAILQ_FIRST(&twr_list)) != NULL) { 936 STAILQ_REMOVE_HEAD(&twr_list, link); 937 938 cpl = wrtod(wr); 939 opcode = GET_OPCODE(cpl); 940 941 switch (opcode) { 942 case CPL_ACT_OPEN_REQ: 943 case CPL_ACT_OPEN_REQ6: 944 atid = G_TID_TID(be32toh(OPCODE_TID(cpl))); 945 sc = td_adapter(td); 946 947 CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid); 948 act_open_failure_cleanup(sc, atid, EHOSTUNREACH); 949 free(wr, M_CXGBE); 950 break; 951 default: 952 log(LOG_ERR, "%s: leaked work request %p, wr_len %d, " 953 "opcode %x\n", __func__, wr, wr->wr_len, opcode); 954 /* WR not freed here; go look at it with a debugger. */ 955 } 956 } 957 } 958 959 /* 960 * Ground control to Major TOM 961 * Commencing countdown, engines on 962 */ 963 static int 964 t4_tom_activate(struct adapter *sc) 965 { 966 struct tom_data *td; 967 struct toedev *tod; 968 int i, rc; 969 970 ASSERT_SYNCHRONIZED_OP(sc); 971 972 /* per-adapter softc for TOM */ 973 td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT); 974 if (td == NULL) 975 return (ENOMEM); 976 977 /* List of TOE PCBs and associated lock */ 978 mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF); 979 TAILQ_INIT(&td->toep_list); 980 981 /* Listen context */ 982 mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF); 983 td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE, 984 &td->listen_mask, HASH_NOWAIT); 985 986 /* List of WRs for which L2 resolution failed */ 987 mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF); 988 STAILQ_INIT(&td->unsent_wr_list); 989 TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td); 990 991 /* TID tables */ 992 rc = alloc_tid_tabs(&sc->tids); 993 if (rc != 0) 994 goto done; 995 996 /* DDP page pods and CPL handlers */ 997 t4_init_ddp(sc, td); 998 999 /* CLIP table for IPv6 offload */ 1000 init_clip_table(sc, td); 1001 1002 /* CPL handlers */ 1003 t4_init_connect_cpl_handlers(sc); 1004 t4_init_l2t_cpl_handlers(sc); 1005 t4_init_listen_cpl_handlers(sc); 1006 t4_init_cpl_io_handlers(sc); 1007 1008 /* toedev ops */ 1009 tod = &td->tod; 1010 init_toedev(tod); 1011 tod->tod_softc = sc; 1012 tod->tod_connect = t4_connect; 1013 tod->tod_listen_start = t4_listen_start; 1014 tod->tod_listen_stop = t4_listen_stop; 1015 tod->tod_rcvd = t4_rcvd; 1016 tod->tod_output = t4_tod_output; 1017 tod->tod_send_rst = t4_send_rst; 1018 tod->tod_send_fin = t4_send_fin; 1019 tod->tod_pcb_detach = t4_pcb_detach; 1020 tod->tod_l2_update = t4_l2_update; 1021 tod->tod_syncache_added = t4_syncache_added; 1022 tod->tod_syncache_removed = t4_syncache_removed; 1023 tod->tod_syncache_respond = t4_syncache_respond; 1024 tod->tod_offload_socket = t4_offload_socket; 1025 tod->tod_ctloutput = t4_ctloutput; 1026 1027 for_each_port(sc, i) 1028 TOEDEV(sc->port[i]->ifp) = &td->tod; 1029 1030 sc->tom_softc = td; 1031 register_toedev(sc->tom_softc); 1032 1033 done: 1034 if (rc != 0) 1035 free_tom_data(sc, td); 1036 return (rc); 1037 } 1038 1039 static int 1040 t4_tom_deactivate(struct adapter *sc) 1041 { 1042 int rc = 0; 1043 struct tom_data *td = sc->tom_softc; 1044 1045 ASSERT_SYNCHRONIZED_OP(sc); 1046 1047 if (td == NULL) 1048 return (0); /* XXX. KASSERT? */ 1049 1050 if (sc->offload_map != 0) 1051 return (EBUSY); /* at least one port has IFCAP_TOE enabled */ 1052 1053 if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI)) 1054 return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */ 1055 1056 mtx_lock(&td->toep_list_lock); 1057 if (!TAILQ_EMPTY(&td->toep_list)) 1058 rc = EBUSY; 1059 mtx_unlock(&td->toep_list_lock); 1060 1061 mtx_lock(&td->lctx_hash_lock); 1062 if (td->lctx_count > 0) 1063 rc = EBUSY; 1064 mtx_unlock(&td->lctx_hash_lock); 1065 1066 taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources); 1067 mtx_lock(&td->unsent_wr_lock); 1068 if (!STAILQ_EMPTY(&td->unsent_wr_list)) 1069 rc = EBUSY; 1070 mtx_unlock(&td->unsent_wr_lock); 1071 1072 if (rc == 0) { 1073 unregister_toedev(sc->tom_softc); 1074 free_tom_data(sc, td); 1075 sc->tom_softc = NULL; 1076 } 1077 1078 return (rc); 1079 } 1080 1081 static void 1082 t4_tom_ifaddr_event(void *arg __unused, struct ifnet *ifp) 1083 { 1084 1085 atomic_add_rel_int(&in6_ifaddr_gen, 1); 1086 taskqueue_enqueue_timeout(taskqueue_thread, &clip_task, -hz / 4); 1087 } 1088 1089 static int 1090 t4_tom_mod_load(void) 1091 { 1092 int rc; 1093 struct protosw *tcp_protosw, *tcp6_protosw; 1094 1095 tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM); 1096 if (tcp_protosw == NULL) 1097 return (ENOPROTOOPT); 1098 bcopy(tcp_protosw, &ddp_protosw, sizeof(ddp_protosw)); 1099 bcopy(tcp_protosw->pr_usrreqs, &ddp_usrreqs, sizeof(ddp_usrreqs)); 1100 ddp_usrreqs.pru_soreceive = t4_soreceive_ddp; 1101 ddp_protosw.pr_usrreqs = &ddp_usrreqs; 1102 1103 tcp6_protosw = pffindproto(PF_INET6, IPPROTO_TCP, SOCK_STREAM); 1104 if (tcp6_protosw == NULL) 1105 return (ENOPROTOOPT); 1106 bcopy(tcp6_protosw, &ddp6_protosw, sizeof(ddp6_protosw)); 1107 bcopy(tcp6_protosw->pr_usrreqs, &ddp6_usrreqs, sizeof(ddp6_usrreqs)); 1108 ddp6_usrreqs.pru_soreceive = t4_soreceive_ddp; 1109 ddp6_protosw.pr_usrreqs = &ddp6_usrreqs; 1110 1111 TIMEOUT_TASK_INIT(taskqueue_thread, &clip_task, 0, t4_clip_task, NULL); 1112 ifaddr_evhandler = EVENTHANDLER_REGISTER(ifaddr_event, 1113 t4_tom_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY); 1114 1115 rc = t4_register_uld(&tom_uld_info); 1116 if (rc != 0) 1117 t4_tom_mod_unload(); 1118 1119 return (rc); 1120 } 1121 1122 static void 1123 tom_uninit(struct adapter *sc, void *arg __unused) 1124 { 1125 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun")) 1126 return; 1127 1128 /* Try to free resources (works only if no port has IFCAP_TOE) */ 1129 if (uld_active(sc, ULD_TOM)) 1130 t4_deactivate_uld(sc, ULD_TOM); 1131 1132 end_synchronized_op(sc, 0); 1133 } 1134 1135 static int 1136 t4_tom_mod_unload(void) 1137 { 1138 t4_iterate(tom_uninit, NULL); 1139 1140 if (t4_unregister_uld(&tom_uld_info) == EBUSY) 1141 return (EBUSY); 1142 1143 if (ifaddr_evhandler) { 1144 EVENTHANDLER_DEREGISTER(ifaddr_event, ifaddr_evhandler); 1145 taskqueue_cancel_timeout(taskqueue_thread, &clip_task, NULL); 1146 } 1147 1148 return (0); 1149 } 1150 #endif /* TCP_OFFLOAD */ 1151 1152 static int 1153 t4_tom_modevent(module_t mod, int cmd, void *arg) 1154 { 1155 int rc = 0; 1156 1157 #ifdef TCP_OFFLOAD 1158 switch (cmd) { 1159 case MOD_LOAD: 1160 rc = t4_tom_mod_load(); 1161 break; 1162 1163 case MOD_UNLOAD: 1164 rc = t4_tom_mod_unload(); 1165 break; 1166 1167 default: 1168 rc = EINVAL; 1169 } 1170 #else 1171 printf("t4_tom: compiled without TCP_OFFLOAD support.\n"); 1172 rc = EOPNOTSUPP; 1173 #endif 1174 return (rc); 1175 } 1176 1177 static moduledata_t t4_tom_moddata= { 1178 "t4_tom", 1179 t4_tom_modevent, 1180 0 1181 }; 1182 1183 MODULE_VERSION(t4_tom, 1); 1184 MODULE_DEPEND(t4_tom, toecore, 1, 1, 1); 1185 MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1); 1186 DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY); 1187