1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 #include "opt_ratelimit.h" 36 37 #include <sys/param.h> 38 #include <sys/types.h> 39 #include <sys/systm.h> 40 #include <sys/kernel.h> 41 #include <sys/ktr.h> 42 #include <sys/lock.h> 43 #include <sys/limits.h> 44 #include <sys/module.h> 45 #include <sys/protosw.h> 46 #include <sys/domain.h> 47 #include <sys/refcount.h> 48 #include <sys/rmlock.h> 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/taskqueue.h> 52 #include <net/if.h> 53 #include <net/if_var.h> 54 #include <net/if_types.h> 55 #include <net/if_vlan_var.h> 56 #include <netinet/in.h> 57 #include <netinet/in_pcb.h> 58 #include <netinet/in_var.h> 59 #include <netinet/ip.h> 60 #include <netinet/ip6.h> 61 #include <netinet6/scope6_var.h> 62 #define TCPSTATES 63 #include <netinet/tcp_fsm.h> 64 #include <netinet/tcp_timer.h> 65 #include <netinet/tcp_var.h> 66 #include <netinet/toecore.h> 67 68 #ifdef TCP_OFFLOAD 69 #include "common/common.h" 70 #include "common/t4_msg.h" 71 #include "common/t4_regs.h" 72 #include "common/t4_regs_values.h" 73 #include "common/t4_tcb.h" 74 #include "t4_clip.h" 75 #include "tom/t4_tom_l2t.h" 76 #include "tom/t4_tom.h" 77 #include "tom/t4_tls.h" 78 79 static struct protosw toe_protosw; 80 static struct pr_usrreqs toe_usrreqs; 81 82 static struct protosw toe6_protosw; 83 static struct pr_usrreqs toe6_usrreqs; 84 85 /* Module ops */ 86 static int t4_tom_mod_load(void); 87 static int t4_tom_mod_unload(void); 88 static int t4_tom_modevent(module_t, int, void *); 89 90 /* ULD ops and helpers */ 91 static int t4_tom_activate(struct adapter *); 92 static int t4_tom_deactivate(struct adapter *); 93 94 static struct uld_info tom_uld_info = { 95 .uld_id = ULD_TOM, 96 .activate = t4_tom_activate, 97 .deactivate = t4_tom_deactivate, 98 }; 99 100 static void release_offload_resources(struct toepcb *); 101 static int alloc_tid_tabs(struct tid_info *); 102 static void free_tid_tabs(struct tid_info *); 103 static void free_tom_data(struct adapter *, struct tom_data *); 104 static void reclaim_wr_resources(void *, int); 105 106 struct toepcb * 107 alloc_toepcb(struct vi_info *vi, int txqid, int rxqid, int flags) 108 { 109 struct port_info *pi = vi->pi; 110 struct adapter *sc = pi->adapter; 111 struct toepcb *toep; 112 int tx_credits, txsd_total, len; 113 114 /* 115 * The firmware counts tx work request credits in units of 16 bytes 116 * each. Reserve room for an ABORT_REQ so the driver never has to worry 117 * about tx credits if it wants to abort a connection. 118 */ 119 tx_credits = sc->params.ofldq_wr_cred; 120 tx_credits -= howmany(sizeof(struct cpl_abort_req), 16); 121 122 /* 123 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte 124 * immediate payload, and firmware counts tx work request credits in 125 * units of 16 byte. Calculate the maximum work requests possible. 126 */ 127 txsd_total = tx_credits / 128 howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16); 129 130 KASSERT(txqid >= vi->first_ofld_txq && 131 txqid < vi->first_ofld_txq + vi->nofldtxq, 132 ("%s: txqid %d for vi %p (first %d, n %d)", __func__, txqid, vi, 133 vi->first_ofld_txq, vi->nofldtxq)); 134 135 KASSERT(rxqid >= vi->first_ofld_rxq && 136 rxqid < vi->first_ofld_rxq + vi->nofldrxq, 137 ("%s: rxqid %d for vi %p (first %d, n %d)", __func__, rxqid, vi, 138 vi->first_ofld_rxq, vi->nofldrxq)); 139 140 len = offsetof(struct toepcb, txsd) + 141 txsd_total * sizeof(struct ofld_tx_sdesc); 142 143 toep = malloc(len, M_CXGBE, M_ZERO | flags); 144 if (toep == NULL) 145 return (NULL); 146 147 refcount_init(&toep->refcount, 1); 148 toep->td = sc->tom_softc; 149 toep->vi = vi; 150 toep->tc_idx = -1; 151 toep->tx_total = tx_credits; 152 toep->tx_credits = tx_credits; 153 toep->ofld_txq = &sc->sge.ofld_txq[txqid]; 154 toep->ofld_rxq = &sc->sge.ofld_rxq[rxqid]; 155 toep->ctrlq = &sc->sge.ctrlq[pi->port_id]; 156 mbufq_init(&toep->ulp_pduq, INT_MAX); 157 mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX); 158 toep->txsd_total = txsd_total; 159 toep->txsd_avail = txsd_total; 160 toep->txsd_pidx = 0; 161 toep->txsd_cidx = 0; 162 aiotx_init_toep(toep); 163 164 return (toep); 165 } 166 167 struct toepcb * 168 hold_toepcb(struct toepcb *toep) 169 { 170 171 refcount_acquire(&toep->refcount); 172 return (toep); 173 } 174 175 void 176 free_toepcb(struct toepcb *toep) 177 { 178 179 if (refcount_release(&toep->refcount) == 0) 180 return; 181 182 KASSERT(!(toep->flags & TPF_ATTACHED), 183 ("%s: attached to an inpcb", __func__)); 184 KASSERT(!(toep->flags & TPF_CPL_PENDING), 185 ("%s: CPL pending", __func__)); 186 187 if (toep->ulp_mode == ULP_MODE_TCPDDP) 188 ddp_uninit_toep(toep); 189 tls_uninit_toep(toep); 190 free(toep, M_CXGBE); 191 } 192 193 /* 194 * Set up the socket for TCP offload. 195 */ 196 void 197 offload_socket(struct socket *so, struct toepcb *toep) 198 { 199 struct tom_data *td = toep->td; 200 struct inpcb *inp = sotoinpcb(so); 201 struct tcpcb *tp = intotcpcb(inp); 202 struct sockbuf *sb; 203 204 INP_WLOCK_ASSERT(inp); 205 206 /* Update socket */ 207 sb = &so->so_snd; 208 SOCKBUF_LOCK(sb); 209 sb->sb_flags |= SB_NOCOALESCE; 210 SOCKBUF_UNLOCK(sb); 211 sb = &so->so_rcv; 212 SOCKBUF_LOCK(sb); 213 sb->sb_flags |= SB_NOCOALESCE; 214 if (inp->inp_vflag & INP_IPV6) 215 so->so_proto = &toe6_protosw; 216 else 217 so->so_proto = &toe_protosw; 218 SOCKBUF_UNLOCK(sb); 219 220 /* Update TCP PCB */ 221 tp->tod = &td->tod; 222 tp->t_toe = toep; 223 tp->t_flags |= TF_TOE; 224 225 /* Install an extra hold on inp */ 226 toep->inp = inp; 227 toep->flags |= TPF_ATTACHED; 228 in_pcbref(inp); 229 230 /* Add the TOE PCB to the active list */ 231 mtx_lock(&td->toep_list_lock); 232 TAILQ_INSERT_HEAD(&td->toep_list, toep, link); 233 mtx_unlock(&td->toep_list_lock); 234 } 235 236 /* This is _not_ the normal way to "unoffload" a socket. */ 237 void 238 undo_offload_socket(struct socket *so) 239 { 240 struct inpcb *inp = sotoinpcb(so); 241 struct tcpcb *tp = intotcpcb(inp); 242 struct toepcb *toep = tp->t_toe; 243 struct tom_data *td = toep->td; 244 struct sockbuf *sb; 245 246 INP_WLOCK_ASSERT(inp); 247 248 sb = &so->so_snd; 249 SOCKBUF_LOCK(sb); 250 sb->sb_flags &= ~SB_NOCOALESCE; 251 SOCKBUF_UNLOCK(sb); 252 sb = &so->so_rcv; 253 SOCKBUF_LOCK(sb); 254 sb->sb_flags &= ~SB_NOCOALESCE; 255 SOCKBUF_UNLOCK(sb); 256 257 tp->tod = NULL; 258 tp->t_toe = NULL; 259 tp->t_flags &= ~TF_TOE; 260 261 toep->inp = NULL; 262 toep->flags &= ~TPF_ATTACHED; 263 if (in_pcbrele_wlocked(inp)) 264 panic("%s: inp freed.", __func__); 265 266 mtx_lock(&td->toep_list_lock); 267 TAILQ_REMOVE(&td->toep_list, toep, link); 268 mtx_unlock(&td->toep_list_lock); 269 } 270 271 static void 272 release_offload_resources(struct toepcb *toep) 273 { 274 struct tom_data *td = toep->td; 275 struct adapter *sc = td_adapter(td); 276 int tid = toep->tid; 277 278 KASSERT(!(toep->flags & TPF_CPL_PENDING), 279 ("%s: %p has CPL pending.", __func__, toep)); 280 KASSERT(!(toep->flags & TPF_ATTACHED), 281 ("%s: %p is still attached.", __func__, toep)); 282 283 CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)", 284 __func__, toep, tid, toep->l2te, toep->ce); 285 286 /* 287 * These queues should have been emptied at approximately the same time 288 * that a normal connection's socket's so_snd would have been purged or 289 * drained. Do _not_ clean up here. 290 */ 291 MPASS(mbufq_len(&toep->ulp_pduq) == 0); 292 MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0); 293 #ifdef INVARIANTS 294 if (toep->ulp_mode == ULP_MODE_TCPDDP) 295 ddp_assert_empty(toep); 296 #endif 297 298 if (toep->l2te) 299 t4_l2t_release(toep->l2te); 300 301 if (tid >= 0) { 302 remove_tid(sc, tid, toep->ce ? 2 : 1); 303 release_tid(sc, tid, toep->ctrlq); 304 } 305 306 if (toep->ce) 307 t4_release_lip(sc, toep->ce); 308 309 if (toep->tc_idx != -1) 310 t4_release_cl_rl(sc, toep->vi->pi->port_id, toep->tc_idx); 311 312 mtx_lock(&td->toep_list_lock); 313 TAILQ_REMOVE(&td->toep_list, toep, link); 314 mtx_unlock(&td->toep_list_lock); 315 316 free_toepcb(toep); 317 } 318 319 /* 320 * The kernel is done with the TCP PCB and this is our opportunity to unhook the 321 * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no 322 * pending CPL) then it is time to release all resources tied to the toepcb. 323 * 324 * Also gets called when an offloaded active open fails and the TOM wants the 325 * kernel to take the TCP PCB back. 326 */ 327 static void 328 t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp) 329 { 330 #if defined(KTR) || defined(INVARIANTS) 331 struct inpcb *inp = tp->t_inpcb; 332 #endif 333 struct toepcb *toep = tp->t_toe; 334 335 INP_WLOCK_ASSERT(inp); 336 337 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 338 KASSERT(toep->flags & TPF_ATTACHED, 339 ("%s: not attached", __func__)); 340 341 #ifdef KTR 342 if (tp->t_state == TCPS_SYN_SENT) { 343 CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)", 344 __func__, toep->tid, toep, toep->flags, inp, 345 inp->inp_flags); 346 } else { 347 CTR6(KTR_CXGBE, 348 "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)", 349 toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp, 350 inp->inp_flags); 351 } 352 #endif 353 354 tp->t_toe = NULL; 355 tp->t_flags &= ~TF_TOE; 356 toep->flags &= ~TPF_ATTACHED; 357 358 if (!(toep->flags & TPF_CPL_PENDING)) 359 release_offload_resources(toep); 360 } 361 362 /* 363 * setsockopt handler. 364 */ 365 static void 366 t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name) 367 { 368 struct adapter *sc = tod->tod_softc; 369 struct toepcb *toep = tp->t_toe; 370 371 if (dir == SOPT_GET) 372 return; 373 374 CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name); 375 376 switch (name) { 377 case TCP_NODELAY: 378 if (tp->t_state != TCPS_ESTABLISHED) 379 break; 380 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, 381 V_TF_NAGLE(1), V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1), 382 0, 0); 383 break; 384 default: 385 break; 386 } 387 } 388 389 static inline uint64_t 390 get_tcb_tflags(const uint64_t *tcb) 391 { 392 393 return ((be64toh(tcb[14]) << 32) | (be64toh(tcb[15]) >> 32)); 394 } 395 396 static inline uint32_t 397 get_tcb_field(const uint64_t *tcb, u_int word, uint32_t mask, u_int shift) 398 { 399 #define LAST_WORD ((TCB_SIZE / 4) - 1) 400 uint64_t t1, t2; 401 int flit_idx; 402 403 MPASS(mask != 0); 404 MPASS(word <= LAST_WORD); 405 MPASS(shift < 32); 406 407 flit_idx = (LAST_WORD - word) / 2; 408 if (word & 0x1) 409 shift += 32; 410 t1 = be64toh(tcb[flit_idx]) >> shift; 411 t2 = 0; 412 if (fls(mask) > 64 - shift) { 413 /* 414 * Will spill over into the next logical flit, which is the flit 415 * before this one. The flit_idx before this one must be valid. 416 */ 417 MPASS(flit_idx > 0); 418 t2 = be64toh(tcb[flit_idx - 1]) << (64 - shift); 419 } 420 return ((t2 | t1) & mask); 421 #undef LAST_WORD 422 } 423 #define GET_TCB_FIELD(tcb, F) \ 424 get_tcb_field(tcb, W_TCB_##F, M_TCB_##F, S_TCB_##F) 425 426 /* 427 * Issues a CPL_GET_TCB to read the entire TCB for the tid. 428 */ 429 static int 430 send_get_tcb(struct adapter *sc, u_int tid) 431 { 432 struct cpl_get_tcb *cpl; 433 struct wrq_cookie cookie; 434 435 MPASS(tid < sc->tids.ntids); 436 437 cpl = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*cpl), 16), 438 &cookie); 439 if (__predict_false(cpl == NULL)) 440 return (ENOMEM); 441 bzero(cpl, sizeof(*cpl)); 442 INIT_TP_WR(cpl, tid); 443 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_GET_TCB, tid)); 444 cpl->reply_ctrl = htobe16(V_REPLY_CHAN(0) | 445 V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id)); 446 cpl->cookie = 0xff; 447 commit_wrq_wr(&sc->sge.ctrlq[0], cpl, &cookie); 448 449 return (0); 450 } 451 452 static struct tcb_histent * 453 alloc_tcb_histent(struct adapter *sc, u_int tid, int flags) 454 { 455 struct tcb_histent *te; 456 457 MPASS(flags == M_NOWAIT || flags == M_WAITOK); 458 459 te = malloc(sizeof(*te), M_CXGBE, M_ZERO | flags); 460 if (te == NULL) 461 return (NULL); 462 mtx_init(&te->te_lock, "TCB entry", NULL, MTX_DEF); 463 callout_init_mtx(&te->te_callout, &te->te_lock, 0); 464 te->te_adapter = sc; 465 te->te_tid = tid; 466 467 return (te); 468 } 469 470 static void 471 free_tcb_histent(struct tcb_histent *te) 472 { 473 474 mtx_destroy(&te->te_lock); 475 free(te, M_CXGBE); 476 } 477 478 /* 479 * Start tracking the tid in the TCB history. 480 */ 481 int 482 add_tid_to_history(struct adapter *sc, u_int tid) 483 { 484 struct tcb_histent *te = NULL; 485 struct tom_data *td = sc->tom_softc; 486 int rc; 487 488 MPASS(tid < sc->tids.ntids); 489 490 if (td->tcb_history == NULL) 491 return (ENXIO); 492 493 rw_wlock(&td->tcb_history_lock); 494 if (td->tcb_history[tid] != NULL) { 495 rc = EEXIST; 496 goto done; 497 } 498 te = alloc_tcb_histent(sc, tid, M_NOWAIT); 499 if (te == NULL) { 500 rc = ENOMEM; 501 goto done; 502 } 503 mtx_lock(&te->te_lock); 504 rc = send_get_tcb(sc, tid); 505 if (rc == 0) { 506 te->te_flags |= TE_RPL_PENDING; 507 td->tcb_history[tid] = te; 508 } else { 509 free(te, M_CXGBE); 510 } 511 mtx_unlock(&te->te_lock); 512 done: 513 rw_wunlock(&td->tcb_history_lock); 514 return (rc); 515 } 516 517 static void 518 remove_tcb_histent(struct tcb_histent *te) 519 { 520 struct adapter *sc = te->te_adapter; 521 struct tom_data *td = sc->tom_softc; 522 523 rw_assert(&td->tcb_history_lock, RA_WLOCKED); 524 mtx_assert(&te->te_lock, MA_OWNED); 525 MPASS(td->tcb_history[te->te_tid] == te); 526 527 td->tcb_history[te->te_tid] = NULL; 528 free_tcb_histent(te); 529 rw_wunlock(&td->tcb_history_lock); 530 } 531 532 static inline struct tcb_histent * 533 lookup_tcb_histent(struct adapter *sc, u_int tid, bool addrem) 534 { 535 struct tcb_histent *te; 536 struct tom_data *td = sc->tom_softc; 537 538 MPASS(tid < sc->tids.ntids); 539 540 if (addrem) 541 rw_wlock(&td->tcb_history_lock); 542 else 543 rw_rlock(&td->tcb_history_lock); 544 te = td->tcb_history[tid]; 545 if (te != NULL) { 546 mtx_lock(&te->te_lock); 547 return (te); /* with both locks held */ 548 } 549 if (addrem) 550 rw_wunlock(&td->tcb_history_lock); 551 else 552 rw_runlock(&td->tcb_history_lock); 553 554 return (te); 555 } 556 557 static inline void 558 release_tcb_histent(struct tcb_histent *te) 559 { 560 struct adapter *sc = te->te_adapter; 561 struct tom_data *td = sc->tom_softc; 562 563 mtx_assert(&te->te_lock, MA_OWNED); 564 mtx_unlock(&te->te_lock); 565 rw_assert(&td->tcb_history_lock, RA_RLOCKED); 566 rw_runlock(&td->tcb_history_lock); 567 } 568 569 static void 570 request_tcb(void *arg) 571 { 572 struct tcb_histent *te = arg; 573 574 mtx_assert(&te->te_lock, MA_OWNED); 575 576 /* Noone else is supposed to update the histent. */ 577 MPASS(!(te->te_flags & TE_RPL_PENDING)); 578 if (send_get_tcb(te->te_adapter, te->te_tid) == 0) 579 te->te_flags |= TE_RPL_PENDING; 580 else 581 callout_schedule(&te->te_callout, hz / 100); 582 } 583 584 static void 585 update_tcb_histent(struct tcb_histent *te, const uint64_t *tcb) 586 { 587 struct tom_data *td = te->te_adapter->tom_softc; 588 uint64_t tflags = get_tcb_tflags(tcb); 589 uint8_t sample = 0; 590 591 if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != GET_TCB_FIELD(tcb, SND_UNA_RAW)) { 592 if (GET_TCB_FIELD(tcb, T_RXTSHIFT) != 0) 593 sample |= TS_RTO; 594 if (GET_TCB_FIELD(tcb, T_DUPACKS) != 0) 595 sample |= TS_DUPACKS; 596 if (GET_TCB_FIELD(tcb, T_DUPACKS) >= td->dupack_threshold) 597 sample |= TS_FASTREXMT; 598 } 599 600 if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != 0) { 601 uint32_t snd_wnd; 602 603 sample |= TS_SND_BACKLOGGED; /* for whatever reason. */ 604 605 snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV); 606 if (tflags & V_TF_RECV_SCALE(1)) 607 snd_wnd <<= GET_TCB_FIELD(tcb, RCV_SCALE); 608 if (GET_TCB_FIELD(tcb, SND_CWND) < snd_wnd) 609 sample |= TS_CWND_LIMITED; /* maybe due to CWND */ 610 } 611 612 if (tflags & V_TF_CCTRL_ECN(1)) { 613 614 /* 615 * CE marker on incoming IP hdr, echoing ECE back in the TCP 616 * hdr. Indicates congestion somewhere on the way from the peer 617 * to this node. 618 */ 619 if (tflags & V_TF_CCTRL_ECE(1)) 620 sample |= TS_ECN_ECE; 621 622 /* 623 * ECE seen and CWR sent (or about to be sent). Might indicate 624 * congestion on the way to the peer. This node is reducing its 625 * congestion window in response. 626 */ 627 if (tflags & (V_TF_CCTRL_CWR(1) | V_TF_CCTRL_RFR(1))) 628 sample |= TS_ECN_CWR; 629 } 630 631 te->te_sample[te->te_pidx] = sample; 632 if (++te->te_pidx == nitems(te->te_sample)) 633 te->te_pidx = 0; 634 memcpy(te->te_tcb, tcb, TCB_SIZE); 635 te->te_flags |= TE_ACTIVE; 636 } 637 638 static int 639 do_get_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 640 { 641 struct adapter *sc = iq->adapter; 642 const struct cpl_get_tcb_rpl *cpl = mtod(m, const void *); 643 const uint64_t *tcb = (const uint64_t *)(const void *)(cpl + 1); 644 struct tcb_histent *te; 645 const u_int tid = GET_TID(cpl); 646 bool remove; 647 648 remove = GET_TCB_FIELD(tcb, T_STATE) == TCPS_CLOSED; 649 te = lookup_tcb_histent(sc, tid, remove); 650 if (te == NULL) { 651 /* Not in the history. Who issued the GET_TCB for this? */ 652 device_printf(sc->dev, "tcb %u: flags 0x%016jx, state %u, " 653 "srtt %u, sscale %u, rscale %u, cookie 0x%x\n", tid, 654 (uintmax_t)get_tcb_tflags(tcb), GET_TCB_FIELD(tcb, T_STATE), 655 GET_TCB_FIELD(tcb, T_SRTT), GET_TCB_FIELD(tcb, SND_SCALE), 656 GET_TCB_FIELD(tcb, RCV_SCALE), cpl->cookie); 657 goto done; 658 } 659 660 MPASS(te->te_flags & TE_RPL_PENDING); 661 te->te_flags &= ~TE_RPL_PENDING; 662 if (remove) { 663 remove_tcb_histent(te); 664 } else { 665 update_tcb_histent(te, tcb); 666 callout_reset(&te->te_callout, hz / 10, request_tcb, te); 667 release_tcb_histent(te); 668 } 669 done: 670 m_freem(m); 671 return (0); 672 } 673 674 static void 675 fill_tcp_info_from_tcb(struct adapter *sc, uint64_t *tcb, struct tcp_info *ti) 676 { 677 uint32_t v; 678 679 ti->tcpi_state = GET_TCB_FIELD(tcb, T_STATE); 680 681 v = GET_TCB_FIELD(tcb, T_SRTT); 682 ti->tcpi_rtt = tcp_ticks_to_us(sc, v); 683 684 v = GET_TCB_FIELD(tcb, T_RTTVAR); 685 ti->tcpi_rttvar = tcp_ticks_to_us(sc, v); 686 687 ti->tcpi_snd_ssthresh = GET_TCB_FIELD(tcb, SND_SSTHRESH); 688 ti->tcpi_snd_cwnd = GET_TCB_FIELD(tcb, SND_CWND); 689 ti->tcpi_rcv_nxt = GET_TCB_FIELD(tcb, RCV_NXT); 690 691 v = GET_TCB_FIELD(tcb, TX_MAX); 692 ti->tcpi_snd_nxt = v - GET_TCB_FIELD(tcb, SND_NXT_RAW); 693 694 /* Receive window being advertised by us. */ 695 ti->tcpi_rcv_wscale = GET_TCB_FIELD(tcb, SND_SCALE); /* Yes, SND. */ 696 ti->tcpi_rcv_space = GET_TCB_FIELD(tcb, RCV_WND); 697 698 /* Send window */ 699 ti->tcpi_snd_wscale = GET_TCB_FIELD(tcb, RCV_SCALE); /* Yes, RCV. */ 700 ti->tcpi_snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV); 701 if (get_tcb_tflags(tcb) & V_TF_RECV_SCALE(1)) 702 ti->tcpi_snd_wnd <<= ti->tcpi_snd_wscale; 703 else 704 ti->tcpi_snd_wscale = 0; 705 706 } 707 708 static void 709 fill_tcp_info_from_history(struct adapter *sc, struct tcb_histent *te, 710 struct tcp_info *ti) 711 { 712 713 fill_tcp_info_from_tcb(sc, te->te_tcb, ti); 714 } 715 716 /* 717 * Reads the TCB for the given tid using a memory window and copies it to 'buf' 718 * in the same format as CPL_GET_TCB_RPL. 719 */ 720 static void 721 read_tcb_using_memwin(struct adapter *sc, u_int tid, uint64_t *buf) 722 { 723 int i, j, k, rc; 724 uint32_t addr; 725 u_char *tcb, tmp; 726 727 MPASS(tid < sc->tids.ntids); 728 729 addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE; 730 rc = read_via_memwin(sc, 2, addr, (uint32_t *)buf, TCB_SIZE); 731 if (rc != 0) 732 return; 733 734 tcb = (u_char *)buf; 735 for (i = 0, j = TCB_SIZE - 16; i < j; i += 16, j -= 16) { 736 for (k = 0; k < 16; k++) { 737 tmp = tcb[i + k]; 738 tcb[i + k] = tcb[j + k]; 739 tcb[j + k] = tmp; 740 } 741 } 742 } 743 744 static void 745 fill_tcp_info(struct adapter *sc, u_int tid, struct tcp_info *ti) 746 { 747 uint64_t tcb[TCB_SIZE / sizeof(uint64_t)]; 748 struct tcb_histent *te; 749 750 ti->tcpi_toe_tid = tid; 751 te = lookup_tcb_histent(sc, tid, false); 752 if (te != NULL) { 753 fill_tcp_info_from_history(sc, te, ti); 754 release_tcb_histent(te); 755 } else { 756 if (!(sc->debug_flags & DF_DISABLE_TCB_CACHE)) { 757 /* XXX: tell firmware to flush TCB cache. */ 758 } 759 read_tcb_using_memwin(sc, tid, tcb); 760 fill_tcp_info_from_tcb(sc, tcb, ti); 761 } 762 } 763 764 /* 765 * Called by the kernel to allow the TOE driver to "refine" values filled up in 766 * the tcp_info for an offloaded connection. 767 */ 768 static void 769 t4_tcp_info(struct toedev *tod, struct tcpcb *tp, struct tcp_info *ti) 770 { 771 struct adapter *sc = tod->tod_softc; 772 struct toepcb *toep = tp->t_toe; 773 774 INP_WLOCK_ASSERT(tp->t_inpcb); 775 MPASS(ti != NULL); 776 777 fill_tcp_info(sc, toep->tid, ti); 778 } 779 780 /* 781 * The TOE driver will not receive any more CPLs for the tid associated with the 782 * toepcb; release the hold on the inpcb. 783 */ 784 void 785 final_cpl_received(struct toepcb *toep) 786 { 787 struct inpcb *inp = toep->inp; 788 789 KASSERT(inp != NULL, ("%s: inp is NULL", __func__)); 790 INP_WLOCK_ASSERT(inp); 791 KASSERT(toep->flags & TPF_CPL_PENDING, 792 ("%s: CPL not pending already?", __func__)); 793 794 CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)", 795 __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags); 796 797 if (toep->ulp_mode == ULP_MODE_TCPDDP) 798 release_ddp_resources(toep); 799 toep->inp = NULL; 800 toep->flags &= ~TPF_CPL_PENDING; 801 mbufq_drain(&toep->ulp_pdu_reclaimq); 802 803 if (!(toep->flags & TPF_ATTACHED)) 804 release_offload_resources(toep); 805 806 if (!in_pcbrele_wlocked(inp)) 807 INP_WUNLOCK(inp); 808 } 809 810 void 811 insert_tid(struct adapter *sc, int tid, void *ctx, int ntids) 812 { 813 struct tid_info *t = &sc->tids; 814 815 MPASS(tid >= t->tid_base); 816 MPASS(tid - t->tid_base < t->ntids); 817 818 t->tid_tab[tid - t->tid_base] = ctx; 819 atomic_add_int(&t->tids_in_use, ntids); 820 } 821 822 void * 823 lookup_tid(struct adapter *sc, int tid) 824 { 825 struct tid_info *t = &sc->tids; 826 827 return (t->tid_tab[tid - t->tid_base]); 828 } 829 830 void 831 update_tid(struct adapter *sc, int tid, void *ctx) 832 { 833 struct tid_info *t = &sc->tids; 834 835 t->tid_tab[tid - t->tid_base] = ctx; 836 } 837 838 void 839 remove_tid(struct adapter *sc, int tid, int ntids) 840 { 841 struct tid_info *t = &sc->tids; 842 843 t->tid_tab[tid - t->tid_base] = NULL; 844 atomic_subtract_int(&t->tids_in_use, ntids); 845 } 846 847 /* 848 * What mtu_idx to use, given a 4-tuple. Note that both s->mss and tcp_mssopt 849 * have the MSS that we should advertise in our SYN. Advertised MSS doesn't 850 * account for any TCP options so the effective MSS (only payload, no headers or 851 * options) could be different. We fill up tp->t_maxseg with the effective MSS 852 * at the end of the 3-way handshake. 853 */ 854 int 855 find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc, 856 struct offload_settings *s) 857 { 858 unsigned short *mtus = &sc->params.mtus[0]; 859 int i, mss, mtu; 860 861 MPASS(inc != NULL); 862 863 mss = s->mss > 0 ? s->mss : tcp_mssopt(inc); 864 if (inc->inc_flags & INC_ISIPV6) 865 mtu = mss + sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 866 else 867 mtu = mss + sizeof(struct ip) + sizeof(struct tcphdr); 868 869 for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mtu; i++) 870 continue; 871 872 return (i); 873 } 874 875 /* 876 * Determine the receive window size for a socket. 877 */ 878 u_long 879 select_rcv_wnd(struct socket *so) 880 { 881 unsigned long wnd; 882 883 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 884 885 wnd = sbspace(&so->so_rcv); 886 if (wnd < MIN_RCV_WND) 887 wnd = MIN_RCV_WND; 888 889 return min(wnd, MAX_RCV_WND); 890 } 891 892 int 893 select_rcv_wscale(void) 894 { 895 int wscale = 0; 896 unsigned long space = sb_max; 897 898 if (space > MAX_RCV_WND) 899 space = MAX_RCV_WND; 900 901 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space) 902 wscale++; 903 904 return (wscale); 905 } 906 907 /* 908 * socket so could be a listening socket too. 909 */ 910 uint64_t 911 calc_opt0(struct socket *so, struct vi_info *vi, struct l2t_entry *e, 912 int mtu_idx, int rscale, int rx_credits, int ulp_mode, 913 struct offload_settings *s) 914 { 915 int keepalive; 916 uint64_t opt0; 917 918 MPASS(so != NULL); 919 MPASS(vi != NULL); 920 KASSERT(rx_credits <= M_RCV_BUFSIZ, 921 ("%s: rcv_bufsiz too high", __func__)); 922 923 opt0 = F_TCAM_BYPASS | V_WND_SCALE(rscale) | V_MSS_IDX(mtu_idx) | 924 V_ULP_MODE(ulp_mode) | V_RCV_BUFSIZ(rx_credits) | 925 V_L2T_IDX(e->idx) | V_SMAC_SEL(vi->smt_idx) | 926 V_TX_CHAN(vi->pi->tx_chan); 927 928 keepalive = tcp_always_keepalive || so_options_get(so) & SO_KEEPALIVE; 929 opt0 |= V_KEEP_ALIVE(keepalive != 0); 930 931 if (s->nagle < 0) { 932 struct inpcb *inp = sotoinpcb(so); 933 struct tcpcb *tp = intotcpcb(inp); 934 935 opt0 |= V_NAGLE((tp->t_flags & TF_NODELAY) == 0); 936 } else 937 opt0 |= V_NAGLE(s->nagle != 0); 938 939 return htobe64(opt0); 940 } 941 942 uint64_t 943 select_ntuple(struct vi_info *vi, struct l2t_entry *e) 944 { 945 struct adapter *sc = vi->pi->adapter; 946 struct tp_params *tp = &sc->params.tp; 947 uint64_t ntuple = 0; 948 949 /* 950 * Initialize each of the fields which we care about which are present 951 * in the Compressed Filter Tuple. 952 */ 953 if (tp->vlan_shift >= 0 && EVL_VLANOFTAG(e->vlan) != CPL_L2T_VLAN_NONE) 954 ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift; 955 956 if (tp->port_shift >= 0) 957 ntuple |= (uint64_t)e->lport << tp->port_shift; 958 959 if (tp->protocol_shift >= 0) 960 ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift; 961 962 if (tp->vnic_shift >= 0 && tp->ingress_config & F_VNIC) { 963 ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vi->vin) | 964 V_FT_VNID_ID_PF(sc->pf) | V_FT_VNID_ID_VLD(vi->vfvld)) << 965 tp->vnic_shift; 966 } 967 968 if (is_t4(sc)) 969 return (htobe32((uint32_t)ntuple)); 970 else 971 return (htobe64(V_FILTER_TUPLE(ntuple))); 972 } 973 974 static int 975 is_tls_sock(struct socket *so, struct adapter *sc) 976 { 977 struct inpcb *inp = sotoinpcb(so); 978 int i, rc; 979 980 /* XXX: Eventually add a SO_WANT_TLS socket option perhaps? */ 981 rc = 0; 982 ADAPTER_LOCK(sc); 983 for (i = 0; i < sc->tt.num_tls_rx_ports; i++) { 984 if (inp->inp_lport == htons(sc->tt.tls_rx_ports[i]) || 985 inp->inp_fport == htons(sc->tt.tls_rx_ports[i])) { 986 rc = 1; 987 break; 988 } 989 } 990 ADAPTER_UNLOCK(sc); 991 return (rc); 992 } 993 994 int 995 select_ulp_mode(struct socket *so, struct adapter *sc, 996 struct offload_settings *s) 997 { 998 999 if (can_tls_offload(sc) && 1000 (s->tls > 0 || (s->tls < 0 && is_tls_sock(so, sc)))) 1001 return (ULP_MODE_TLS); 1002 else if (s->ddp > 0 || 1003 (s->ddp < 0 && sc->tt.ddp && (so->so_options & SO_NO_DDP) == 0)) 1004 return (ULP_MODE_TCPDDP); 1005 else 1006 return (ULP_MODE_NONE); 1007 } 1008 1009 void 1010 set_ulp_mode(struct toepcb *toep, int ulp_mode) 1011 { 1012 1013 CTR4(KTR_CXGBE, "%s: toep %p (tid %d) ulp_mode %d", 1014 __func__, toep, toep->tid, ulp_mode); 1015 toep->ulp_mode = ulp_mode; 1016 tls_init_toep(toep); 1017 if (toep->ulp_mode == ULP_MODE_TCPDDP) 1018 ddp_init_toep(toep); 1019 } 1020 1021 int 1022 negative_advice(int status) 1023 { 1024 1025 return (status == CPL_ERR_RTX_NEG_ADVICE || 1026 status == CPL_ERR_PERSIST_NEG_ADVICE || 1027 status == CPL_ERR_KEEPALV_NEG_ADVICE); 1028 } 1029 1030 static int 1031 alloc_tid_tab(struct tid_info *t, int flags) 1032 { 1033 1034 MPASS(t->ntids > 0); 1035 MPASS(t->tid_tab == NULL); 1036 1037 t->tid_tab = malloc(t->ntids * sizeof(*t->tid_tab), M_CXGBE, 1038 M_ZERO | flags); 1039 if (t->tid_tab == NULL) 1040 return (ENOMEM); 1041 atomic_store_rel_int(&t->tids_in_use, 0); 1042 1043 return (0); 1044 } 1045 1046 static void 1047 free_tid_tab(struct tid_info *t) 1048 { 1049 1050 KASSERT(t->tids_in_use == 0, 1051 ("%s: %d tids still in use.", __func__, t->tids_in_use)); 1052 1053 free(t->tid_tab, M_CXGBE); 1054 t->tid_tab = NULL; 1055 } 1056 1057 static int 1058 alloc_stid_tab(struct tid_info *t, int flags) 1059 { 1060 1061 MPASS(t->nstids > 0); 1062 MPASS(t->stid_tab == NULL); 1063 1064 t->stid_tab = malloc(t->nstids * sizeof(*t->stid_tab), M_CXGBE, 1065 M_ZERO | flags); 1066 if (t->stid_tab == NULL) 1067 return (ENOMEM); 1068 mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF); 1069 t->stids_in_use = 0; 1070 TAILQ_INIT(&t->stids); 1071 t->nstids_free_head = t->nstids; 1072 1073 return (0); 1074 } 1075 1076 static void 1077 free_stid_tab(struct tid_info *t) 1078 { 1079 1080 KASSERT(t->stids_in_use == 0, 1081 ("%s: %d tids still in use.", __func__, t->stids_in_use)); 1082 1083 if (mtx_initialized(&t->stid_lock)) 1084 mtx_destroy(&t->stid_lock); 1085 free(t->stid_tab, M_CXGBE); 1086 t->stid_tab = NULL; 1087 } 1088 1089 static void 1090 free_tid_tabs(struct tid_info *t) 1091 { 1092 1093 free_tid_tab(t); 1094 free_atid_tab(t); 1095 free_stid_tab(t); 1096 } 1097 1098 static int 1099 alloc_tid_tabs(struct tid_info *t) 1100 { 1101 int rc; 1102 1103 rc = alloc_tid_tab(t, M_NOWAIT); 1104 if (rc != 0) 1105 goto failed; 1106 1107 rc = alloc_atid_tab(t, M_NOWAIT); 1108 if (rc != 0) 1109 goto failed; 1110 1111 rc = alloc_stid_tab(t, M_NOWAIT); 1112 if (rc != 0) 1113 goto failed; 1114 1115 return (0); 1116 failed: 1117 free_tid_tabs(t); 1118 return (rc); 1119 } 1120 1121 static inline void 1122 alloc_tcb_history(struct adapter *sc, struct tom_data *td) 1123 { 1124 1125 if (sc->tids.ntids == 0 || sc->tids.ntids > 1024) 1126 return; 1127 rw_init(&td->tcb_history_lock, "TCB history"); 1128 td->tcb_history = malloc(sc->tids.ntids * sizeof(*td->tcb_history), 1129 M_CXGBE, M_ZERO | M_NOWAIT); 1130 td->dupack_threshold = G_DUPACKTHRESH(t4_read_reg(sc, A_TP_PARA_REG0)); 1131 } 1132 1133 static inline void 1134 free_tcb_history(struct adapter *sc, struct tom_data *td) 1135 { 1136 #ifdef INVARIANTS 1137 int i; 1138 1139 if (td->tcb_history != NULL) { 1140 for (i = 0; i < sc->tids.ntids; i++) { 1141 MPASS(td->tcb_history[i] == NULL); 1142 } 1143 } 1144 #endif 1145 free(td->tcb_history, M_CXGBE); 1146 if (rw_initialized(&td->tcb_history_lock)) 1147 rw_destroy(&td->tcb_history_lock); 1148 } 1149 1150 static void 1151 free_tom_data(struct adapter *sc, struct tom_data *td) 1152 { 1153 1154 ASSERT_SYNCHRONIZED_OP(sc); 1155 1156 KASSERT(TAILQ_EMPTY(&td->toep_list), 1157 ("%s: TOE PCB list is not empty.", __func__)); 1158 KASSERT(td->lctx_count == 0, 1159 ("%s: lctx hash table is not empty.", __func__)); 1160 1161 t4_free_ppod_region(&td->pr); 1162 1163 if (td->listen_mask != 0) 1164 hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask); 1165 1166 if (mtx_initialized(&td->unsent_wr_lock)) 1167 mtx_destroy(&td->unsent_wr_lock); 1168 if (mtx_initialized(&td->lctx_hash_lock)) 1169 mtx_destroy(&td->lctx_hash_lock); 1170 if (mtx_initialized(&td->toep_list_lock)) 1171 mtx_destroy(&td->toep_list_lock); 1172 1173 free_tcb_history(sc, td); 1174 free_tid_tabs(&sc->tids); 1175 free(td, M_CXGBE); 1176 } 1177 1178 static char * 1179 prepare_pkt(int open_type, uint16_t vtag, struct inpcb *inp, int *pktlen, 1180 int *buflen) 1181 { 1182 char *pkt; 1183 struct tcphdr *th; 1184 int ipv6, len; 1185 const int maxlen = 1186 max(sizeof(struct ether_header), sizeof(struct ether_vlan_header)) + 1187 max(sizeof(struct ip), sizeof(struct ip6_hdr)) + 1188 sizeof(struct tcphdr); 1189 1190 MPASS(open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN); 1191 1192 pkt = malloc(maxlen, M_CXGBE, M_ZERO | M_NOWAIT); 1193 if (pkt == NULL) 1194 return (NULL); 1195 1196 ipv6 = inp->inp_vflag & INP_IPV6; 1197 len = 0; 1198 1199 if (EVL_VLANOFTAG(vtag) == 0xfff) { 1200 struct ether_header *eh = (void *)pkt; 1201 1202 if (ipv6) 1203 eh->ether_type = htons(ETHERTYPE_IPV6); 1204 else 1205 eh->ether_type = htons(ETHERTYPE_IP); 1206 1207 len += sizeof(*eh); 1208 } else { 1209 struct ether_vlan_header *evh = (void *)pkt; 1210 1211 evh->evl_encap_proto = htons(ETHERTYPE_VLAN); 1212 evh->evl_tag = htons(vtag); 1213 if (ipv6) 1214 evh->evl_proto = htons(ETHERTYPE_IPV6); 1215 else 1216 evh->evl_proto = htons(ETHERTYPE_IP); 1217 1218 len += sizeof(*evh); 1219 } 1220 1221 if (ipv6) { 1222 struct ip6_hdr *ip6 = (void *)&pkt[len]; 1223 1224 ip6->ip6_vfc = IPV6_VERSION; 1225 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 1226 ip6->ip6_nxt = IPPROTO_TCP; 1227 if (open_type == OPEN_TYPE_ACTIVE) { 1228 ip6->ip6_src = inp->in6p_laddr; 1229 ip6->ip6_dst = inp->in6p_faddr; 1230 } else if (open_type == OPEN_TYPE_LISTEN) { 1231 ip6->ip6_src = inp->in6p_laddr; 1232 ip6->ip6_dst = ip6->ip6_src; 1233 } 1234 1235 len += sizeof(*ip6); 1236 } else { 1237 struct ip *ip = (void *)&pkt[len]; 1238 1239 ip->ip_v = IPVERSION; 1240 ip->ip_hl = sizeof(*ip) >> 2; 1241 ip->ip_tos = inp->inp_ip_tos; 1242 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr)); 1243 ip->ip_ttl = inp->inp_ip_ttl; 1244 ip->ip_p = IPPROTO_TCP; 1245 if (open_type == OPEN_TYPE_ACTIVE) { 1246 ip->ip_src = inp->inp_laddr; 1247 ip->ip_dst = inp->inp_faddr; 1248 } else if (open_type == OPEN_TYPE_LISTEN) { 1249 ip->ip_src = inp->inp_laddr; 1250 ip->ip_dst = ip->ip_src; 1251 } 1252 1253 len += sizeof(*ip); 1254 } 1255 1256 th = (void *)&pkt[len]; 1257 if (open_type == OPEN_TYPE_ACTIVE) { 1258 th->th_sport = inp->inp_lport; /* network byte order already */ 1259 th->th_dport = inp->inp_fport; /* ditto */ 1260 } else if (open_type == OPEN_TYPE_LISTEN) { 1261 th->th_sport = inp->inp_lport; /* network byte order already */ 1262 th->th_dport = th->th_sport; 1263 } 1264 len += sizeof(th); 1265 1266 *pktlen = *buflen = len; 1267 return (pkt); 1268 } 1269 1270 const struct offload_settings * 1271 lookup_offload_policy(struct adapter *sc, int open_type, struct mbuf *m, 1272 uint16_t vtag, struct inpcb *inp) 1273 { 1274 const struct t4_offload_policy *op; 1275 char *pkt; 1276 struct offload_rule *r; 1277 int i, matched, pktlen, buflen; 1278 static const struct offload_settings allow_offloading_settings = { 1279 .offload = 1, 1280 .rx_coalesce = -1, 1281 .cong_algo = -1, 1282 .sched_class = -1, 1283 .tstamp = -1, 1284 .sack = -1, 1285 .nagle = -1, 1286 .ecn = -1, 1287 .ddp = -1, 1288 .tls = -1, 1289 .txq = -1, 1290 .rxq = -1, 1291 .mss = -1, 1292 }; 1293 static const struct offload_settings disallow_offloading_settings = { 1294 .offload = 0, 1295 /* rest is irrelevant when offload is off. */ 1296 }; 1297 1298 rw_assert(&sc->policy_lock, RA_LOCKED); 1299 1300 /* 1301 * If there's no Connection Offloading Policy attached to the device 1302 * then we need to return a default static policy. If 1303 * "cop_managed_offloading" is true, then we need to disallow 1304 * offloading until a COP is attached to the device. Otherwise we 1305 * allow offloading ... 1306 */ 1307 op = sc->policy; 1308 if (op == NULL) { 1309 if (sc->tt.cop_managed_offloading) 1310 return (&disallow_offloading_settings); 1311 else 1312 return (&allow_offloading_settings); 1313 } 1314 1315 switch (open_type) { 1316 case OPEN_TYPE_ACTIVE: 1317 case OPEN_TYPE_LISTEN: 1318 pkt = prepare_pkt(open_type, vtag, inp, &pktlen, &buflen); 1319 break; 1320 case OPEN_TYPE_PASSIVE: 1321 MPASS(m != NULL); 1322 pkt = mtod(m, char *); 1323 MPASS(*pkt == CPL_PASS_ACCEPT_REQ); 1324 pkt += sizeof(struct cpl_pass_accept_req); 1325 pktlen = m->m_pkthdr.len - sizeof(struct cpl_pass_accept_req); 1326 buflen = m->m_len - sizeof(struct cpl_pass_accept_req); 1327 break; 1328 default: 1329 MPASS(0); 1330 return (&disallow_offloading_settings); 1331 } 1332 1333 if (pkt == NULL || pktlen == 0 || buflen == 0) 1334 return (&disallow_offloading_settings); 1335 1336 matched = 0; 1337 r = &op->rule[0]; 1338 for (i = 0; i < op->nrules; i++, r++) { 1339 if (r->open_type != open_type && 1340 r->open_type != OPEN_TYPE_DONTCARE) { 1341 continue; 1342 } 1343 matched = bpf_filter(r->bpf_prog.bf_insns, pkt, pktlen, buflen); 1344 if (matched) 1345 break; 1346 } 1347 1348 if (open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN) 1349 free(pkt, M_CXGBE); 1350 1351 return (matched ? &r->settings : &disallow_offloading_settings); 1352 } 1353 1354 static void 1355 reclaim_wr_resources(void *arg, int count) 1356 { 1357 struct tom_data *td = arg; 1358 STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list); 1359 struct cpl_act_open_req *cpl; 1360 u_int opcode, atid, tid; 1361 struct wrqe *wr; 1362 struct adapter *sc = td_adapter(td); 1363 1364 mtx_lock(&td->unsent_wr_lock); 1365 STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe); 1366 mtx_unlock(&td->unsent_wr_lock); 1367 1368 while ((wr = STAILQ_FIRST(&twr_list)) != NULL) { 1369 STAILQ_REMOVE_HEAD(&twr_list, link); 1370 1371 cpl = wrtod(wr); 1372 opcode = GET_OPCODE(cpl); 1373 1374 switch (opcode) { 1375 case CPL_ACT_OPEN_REQ: 1376 case CPL_ACT_OPEN_REQ6: 1377 atid = G_TID_TID(be32toh(OPCODE_TID(cpl))); 1378 CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid); 1379 act_open_failure_cleanup(sc, atid, EHOSTUNREACH); 1380 free(wr, M_CXGBE); 1381 break; 1382 case CPL_PASS_ACCEPT_RPL: 1383 tid = GET_TID(cpl); 1384 CTR2(KTR_CXGBE, "%s: tid %u ", __func__, tid); 1385 synack_failure_cleanup(sc, tid); 1386 free(wr, M_CXGBE); 1387 break; 1388 default: 1389 log(LOG_ERR, "%s: leaked work request %p, wr_len %d, " 1390 "opcode %x\n", __func__, wr, wr->wr_len, opcode); 1391 /* WR not freed here; go look at it with a debugger. */ 1392 } 1393 } 1394 } 1395 1396 /* 1397 * Ground control to Major TOM 1398 * Commencing countdown, engines on 1399 */ 1400 static int 1401 t4_tom_activate(struct adapter *sc) 1402 { 1403 struct tom_data *td; 1404 struct toedev *tod; 1405 struct vi_info *vi; 1406 int i, rc, v; 1407 1408 ASSERT_SYNCHRONIZED_OP(sc); 1409 1410 /* per-adapter softc for TOM */ 1411 td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT); 1412 if (td == NULL) 1413 return (ENOMEM); 1414 1415 /* List of TOE PCBs and associated lock */ 1416 mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF); 1417 TAILQ_INIT(&td->toep_list); 1418 1419 /* Listen context */ 1420 mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF); 1421 td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE, 1422 &td->listen_mask, HASH_NOWAIT); 1423 1424 /* List of WRs for which L2 resolution failed */ 1425 mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF); 1426 STAILQ_INIT(&td->unsent_wr_list); 1427 TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td); 1428 1429 /* TID tables */ 1430 rc = alloc_tid_tabs(&sc->tids); 1431 if (rc != 0) 1432 goto done; 1433 1434 rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp, 1435 t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods"); 1436 if (rc != 0) 1437 goto done; 1438 t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK, 1439 V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask); 1440 1441 alloc_tcb_history(sc, td); 1442 1443 /* toedev ops */ 1444 tod = &td->tod; 1445 init_toedev(tod); 1446 tod->tod_softc = sc; 1447 tod->tod_connect = t4_connect; 1448 tod->tod_listen_start = t4_listen_start; 1449 tod->tod_listen_stop = t4_listen_stop; 1450 tod->tod_rcvd = t4_rcvd; 1451 tod->tod_output = t4_tod_output; 1452 tod->tod_send_rst = t4_send_rst; 1453 tod->tod_send_fin = t4_send_fin; 1454 tod->tod_pcb_detach = t4_pcb_detach; 1455 tod->tod_l2_update = t4_l2_update; 1456 tod->tod_syncache_added = t4_syncache_added; 1457 tod->tod_syncache_removed = t4_syncache_removed; 1458 tod->tod_syncache_respond = t4_syncache_respond; 1459 tod->tod_offload_socket = t4_offload_socket; 1460 tod->tod_ctloutput = t4_ctloutput; 1461 tod->tod_tcp_info = t4_tcp_info; 1462 1463 for_each_port(sc, i) { 1464 for_each_vi(sc->port[i], v, vi) { 1465 TOEDEV(vi->ifp) = &td->tod; 1466 } 1467 } 1468 1469 sc->tom_softc = td; 1470 register_toedev(sc->tom_softc); 1471 1472 done: 1473 if (rc != 0) 1474 free_tom_data(sc, td); 1475 return (rc); 1476 } 1477 1478 static int 1479 t4_tom_deactivate(struct adapter *sc) 1480 { 1481 int rc = 0; 1482 struct tom_data *td = sc->tom_softc; 1483 1484 ASSERT_SYNCHRONIZED_OP(sc); 1485 1486 if (td == NULL) 1487 return (0); /* XXX. KASSERT? */ 1488 1489 if (sc->offload_map != 0) 1490 return (EBUSY); /* at least one port has IFCAP_TOE enabled */ 1491 1492 if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI)) 1493 return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */ 1494 1495 mtx_lock(&td->toep_list_lock); 1496 if (!TAILQ_EMPTY(&td->toep_list)) 1497 rc = EBUSY; 1498 mtx_unlock(&td->toep_list_lock); 1499 1500 mtx_lock(&td->lctx_hash_lock); 1501 if (td->lctx_count > 0) 1502 rc = EBUSY; 1503 mtx_unlock(&td->lctx_hash_lock); 1504 1505 taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources); 1506 mtx_lock(&td->unsent_wr_lock); 1507 if (!STAILQ_EMPTY(&td->unsent_wr_list)) 1508 rc = EBUSY; 1509 mtx_unlock(&td->unsent_wr_lock); 1510 1511 if (rc == 0) { 1512 unregister_toedev(sc->tom_softc); 1513 free_tom_data(sc, td); 1514 sc->tom_softc = NULL; 1515 } 1516 1517 return (rc); 1518 } 1519 1520 static int 1521 t4_aio_queue_tom(struct socket *so, struct kaiocb *job) 1522 { 1523 struct tcpcb *tp = so_sototcpcb(so); 1524 struct toepcb *toep = tp->t_toe; 1525 int error; 1526 1527 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1528 error = t4_aio_queue_ddp(so, job); 1529 if (error != EOPNOTSUPP) 1530 return (error); 1531 } 1532 1533 return (t4_aio_queue_aiotx(so, job)); 1534 } 1535 1536 static int 1537 t4_ctloutput_tom(struct socket *so, struct sockopt *sopt) 1538 { 1539 1540 if (sopt->sopt_level != IPPROTO_TCP) 1541 return (tcp_ctloutput(so, sopt)); 1542 1543 switch (sopt->sopt_name) { 1544 case TCP_TLSOM_SET_TLS_CONTEXT: 1545 case TCP_TLSOM_GET_TLS_TOM: 1546 case TCP_TLSOM_CLR_TLS_TOM: 1547 case TCP_TLSOM_CLR_QUIES: 1548 return (t4_ctloutput_tls(so, sopt)); 1549 default: 1550 return (tcp_ctloutput(so, sopt)); 1551 } 1552 } 1553 1554 static int 1555 t4_tom_mod_load(void) 1556 { 1557 struct protosw *tcp_protosw, *tcp6_protosw; 1558 1559 /* CPL handlers */ 1560 t4_register_cpl_handler(CPL_GET_TCB_RPL, do_get_tcb_rpl); 1561 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl2, 1562 CPL_COOKIE_TOM); 1563 t4_init_connect_cpl_handlers(); 1564 t4_init_listen_cpl_handlers(); 1565 t4_init_cpl_io_handlers(); 1566 1567 t4_ddp_mod_load(); 1568 t4_tls_mod_load(); 1569 1570 tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM); 1571 if (tcp_protosw == NULL) 1572 return (ENOPROTOOPT); 1573 bcopy(tcp_protosw, &toe_protosw, sizeof(toe_protosw)); 1574 bcopy(tcp_protosw->pr_usrreqs, &toe_usrreqs, sizeof(toe_usrreqs)); 1575 toe_usrreqs.pru_aio_queue = t4_aio_queue_tom; 1576 toe_protosw.pr_ctloutput = t4_ctloutput_tom; 1577 toe_protosw.pr_usrreqs = &toe_usrreqs; 1578 1579 tcp6_protosw = pffindproto(PF_INET6, IPPROTO_TCP, SOCK_STREAM); 1580 if (tcp6_protosw == NULL) 1581 return (ENOPROTOOPT); 1582 bcopy(tcp6_protosw, &toe6_protosw, sizeof(toe6_protosw)); 1583 bcopy(tcp6_protosw->pr_usrreqs, &toe6_usrreqs, sizeof(toe6_usrreqs)); 1584 toe6_usrreqs.pru_aio_queue = t4_aio_queue_tom; 1585 toe6_protosw.pr_ctloutput = t4_ctloutput_tom; 1586 toe6_protosw.pr_usrreqs = &toe6_usrreqs; 1587 1588 return (t4_register_uld(&tom_uld_info)); 1589 } 1590 1591 static void 1592 tom_uninit(struct adapter *sc, void *arg __unused) 1593 { 1594 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun")) 1595 return; 1596 1597 /* Try to free resources (works only if no port has IFCAP_TOE) */ 1598 if (uld_active(sc, ULD_TOM)) 1599 t4_deactivate_uld(sc, ULD_TOM); 1600 1601 end_synchronized_op(sc, 0); 1602 } 1603 1604 static int 1605 t4_tom_mod_unload(void) 1606 { 1607 t4_iterate(tom_uninit, NULL); 1608 1609 if (t4_unregister_uld(&tom_uld_info) == EBUSY) 1610 return (EBUSY); 1611 1612 t4_tls_mod_unload(); 1613 t4_ddp_mod_unload(); 1614 1615 t4_uninit_connect_cpl_handlers(); 1616 t4_uninit_listen_cpl_handlers(); 1617 t4_uninit_cpl_io_handlers(); 1618 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, NULL, CPL_COOKIE_TOM); 1619 1620 return (0); 1621 } 1622 #endif /* TCP_OFFLOAD */ 1623 1624 static int 1625 t4_tom_modevent(module_t mod, int cmd, void *arg) 1626 { 1627 int rc = 0; 1628 1629 #ifdef TCP_OFFLOAD 1630 switch (cmd) { 1631 case MOD_LOAD: 1632 rc = t4_tom_mod_load(); 1633 break; 1634 1635 case MOD_UNLOAD: 1636 rc = t4_tom_mod_unload(); 1637 break; 1638 1639 default: 1640 rc = EINVAL; 1641 } 1642 #else 1643 printf("t4_tom: compiled without TCP_OFFLOAD support.\n"); 1644 rc = EOPNOTSUPP; 1645 #endif 1646 return (rc); 1647 } 1648 1649 static moduledata_t t4_tom_moddata= { 1650 "t4_tom", 1651 t4_tom_modevent, 1652 0 1653 }; 1654 1655 MODULE_VERSION(t4_tom, 1); 1656 MODULE_DEPEND(t4_tom, toecore, 1, 1, 1); 1657 MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1); 1658 DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY); 1659