1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 36 #ifdef TCP_OFFLOAD 37 #include <sys/param.h> 38 #include <sys/types.h> 39 #include <sys/kernel.h> 40 #include <sys/ktr.h> 41 #include <sys/module.h> 42 #include <sys/protosw.h> 43 #include <sys/refcount.h> 44 #include <sys/domain.h> 45 #include <sys/fnv_hash.h> 46 #include <sys/socket.h> 47 #include <sys/socketvar.h> 48 #include <sys/sysctl.h> 49 #include <net/ethernet.h> 50 #include <net/if.h> 51 #include <net/if_types.h> 52 #include <net/if_vlan_var.h> 53 #include <net/route.h> 54 #include <net/route/nhop.h> 55 #include <netinet/in.h> 56 #include <netinet/in_fib.h> 57 #include <netinet/in_pcb.h> 58 #include <netinet/ip.h> 59 #include <netinet/ip6.h> 60 #include <netinet6/in6_fib.h> 61 #include <netinet6/scope6_var.h> 62 #include <netinet/tcp_timer.h> 63 #define TCPSTATES 64 #include <netinet/tcp_fsm.h> 65 #include <netinet/tcp_var.h> 66 #include <netinet/toecore.h> 67 #include <netinet/cc/cc.h> 68 69 #include "common/common.h" 70 #include "common/t4_msg.h" 71 #include "common/t4_regs.h" 72 #include "t4_clip.h" 73 #include "tom/t4_tom_l2t.h" 74 #include "tom/t4_tom.h" 75 76 /* stid services */ 77 static int alloc_stid(struct adapter *, struct listen_ctx *, int); 78 static struct listen_ctx *lookup_stid(struct adapter *, int); 79 static void free_stid(struct adapter *, struct listen_ctx *); 80 81 /* lctx services */ 82 static struct listen_ctx *alloc_lctx(struct adapter *, struct inpcb *, 83 struct vi_info *); 84 static int free_lctx(struct adapter *, struct listen_ctx *); 85 static void hold_lctx(struct listen_ctx *); 86 static void listen_hash_add(struct adapter *, struct listen_ctx *); 87 static struct listen_ctx *listen_hash_find(struct adapter *, struct inpcb *); 88 static struct listen_ctx *listen_hash_del(struct adapter *, struct inpcb *); 89 static struct inpcb *release_lctx(struct adapter *, struct listen_ctx *); 90 91 static void send_reset_synqe(struct toedev *, struct synq_entry *); 92 93 static int 94 alloc_stid(struct adapter *sc, struct listen_ctx *lctx, int isipv6) 95 { 96 struct tid_info *t = &sc->tids; 97 u_int stid, n, f, mask; 98 struct stid_region *sr = &lctx->stid_region; 99 100 /* 101 * An IPv6 server needs 2 naturally aligned stids (1 stid = 4 cells) in 102 * the TCAM. The start of the stid region is properly aligned (the chip 103 * requires each region to be 128-cell aligned). 104 */ 105 n = isipv6 ? 2 : 1; 106 mask = n - 1; 107 KASSERT((t->stid_base & mask) == 0 && (t->nstids & mask) == 0, 108 ("%s: stid region (%u, %u) not properly aligned. n = %u", 109 __func__, t->stid_base, t->nstids, n)); 110 111 mtx_lock(&t->stid_lock); 112 if (n > t->nstids - t->stids_in_use) { 113 mtx_unlock(&t->stid_lock); 114 return (-1); 115 } 116 117 if (t->nstids_free_head >= n) { 118 /* 119 * This allocation will definitely succeed because the region 120 * starts at a good alignment and we just checked we have enough 121 * stids free. 122 */ 123 f = t->nstids_free_head & mask; 124 t->nstids_free_head -= n + f; 125 stid = t->nstids_free_head; 126 TAILQ_INSERT_HEAD(&t->stids, sr, link); 127 } else { 128 struct stid_region *s; 129 130 stid = t->nstids_free_head; 131 TAILQ_FOREACH(s, &t->stids, link) { 132 stid += s->used + s->free; 133 f = stid & mask; 134 if (s->free >= n + f) { 135 stid -= n + f; 136 s->free -= n + f; 137 TAILQ_INSERT_AFTER(&t->stids, s, sr, link); 138 goto allocated; 139 } 140 } 141 142 if (__predict_false(stid != t->nstids)) { 143 panic("%s: stids TAILQ (%p) corrupt." 144 " At %d instead of %d at the end of the queue.", 145 __func__, &t->stids, stid, t->nstids); 146 } 147 148 mtx_unlock(&t->stid_lock); 149 return (-1); 150 } 151 152 allocated: 153 sr->used = n; 154 sr->free = f; 155 t->stids_in_use += n; 156 t->stid_tab[stid] = lctx; 157 mtx_unlock(&t->stid_lock); 158 159 KASSERT(((stid + t->stid_base) & mask) == 0, 160 ("%s: EDOOFUS.", __func__)); 161 return (stid + t->stid_base); 162 } 163 164 static struct listen_ctx * 165 lookup_stid(struct adapter *sc, int stid) 166 { 167 struct tid_info *t = &sc->tids; 168 169 return (t->stid_tab[stid - t->stid_base]); 170 } 171 172 static void 173 free_stid(struct adapter *sc, struct listen_ctx *lctx) 174 { 175 struct tid_info *t = &sc->tids; 176 struct stid_region *sr = &lctx->stid_region; 177 struct stid_region *s; 178 179 KASSERT(sr->used > 0, ("%s: nonsense free (%d)", __func__, sr->used)); 180 181 mtx_lock(&t->stid_lock); 182 s = TAILQ_PREV(sr, stid_head, link); 183 if (s != NULL) 184 s->free += sr->used + sr->free; 185 else 186 t->nstids_free_head += sr->used + sr->free; 187 KASSERT(t->stids_in_use >= sr->used, 188 ("%s: stids_in_use (%u) < stids being freed (%u)", __func__, 189 t->stids_in_use, sr->used)); 190 t->stids_in_use -= sr->used; 191 TAILQ_REMOVE(&t->stids, sr, link); 192 mtx_unlock(&t->stid_lock); 193 } 194 195 static struct listen_ctx * 196 alloc_lctx(struct adapter *sc, struct inpcb *inp, struct vi_info *vi) 197 { 198 struct listen_ctx *lctx; 199 200 INP_WLOCK_ASSERT(inp); 201 202 lctx = malloc(sizeof(struct listen_ctx), M_CXGBE, M_NOWAIT | M_ZERO); 203 if (lctx == NULL) 204 return (NULL); 205 206 lctx->stid = alloc_stid(sc, lctx, inp->inp_vflag & INP_IPV6); 207 if (lctx->stid < 0) { 208 free(lctx, M_CXGBE); 209 return (NULL); 210 } 211 212 if (inp->inp_vflag & INP_IPV6 && 213 !IN6_ARE_ADDR_EQUAL(&in6addr_any, &inp->in6p_laddr)) { 214 lctx->ce = t4_hold_lip(sc, &inp->in6p_laddr, NULL); 215 if (lctx->ce == NULL) { 216 free(lctx, M_CXGBE); 217 return (NULL); 218 } 219 } 220 221 lctx->ctrlq = &sc->sge.ctrlq[vi->pi->port_id]; 222 lctx->ofld_rxq = &sc->sge.ofld_rxq[vi->first_ofld_rxq]; 223 refcount_init(&lctx->refcount, 1); 224 225 lctx->inp = inp; 226 lctx->vnet = inp->inp_socket->so_vnet; 227 in_pcbref(inp); 228 229 return (lctx); 230 } 231 232 /* Don't call this directly, use release_lctx instead */ 233 static int 234 free_lctx(struct adapter *sc, struct listen_ctx *lctx) 235 { 236 struct inpcb *inp = lctx->inp; 237 238 INP_WLOCK_ASSERT(inp); 239 KASSERT(lctx->refcount == 0, 240 ("%s: refcount %d", __func__, lctx->refcount)); 241 KASSERT(lctx->stid >= 0, ("%s: bad stid %d.", __func__, lctx->stid)); 242 243 CTR4(KTR_CXGBE, "%s: stid %u, lctx %p, inp %p", 244 __func__, lctx->stid, lctx, lctx->inp); 245 246 if (lctx->ce) 247 t4_release_lip(sc, lctx->ce); 248 free_stid(sc, lctx); 249 free(lctx, M_CXGBE); 250 251 return (in_pcbrele_wlocked(inp)); 252 } 253 254 static void 255 hold_lctx(struct listen_ctx *lctx) 256 { 257 258 refcount_acquire(&lctx->refcount); 259 } 260 261 static inline uint32_t 262 listen_hashfn(void *key, u_long mask) 263 { 264 265 return (fnv_32_buf(&key, sizeof(key), FNV1_32_INIT) & mask); 266 } 267 268 /* 269 * Add a listen_ctx entry to the listen hash table. 270 */ 271 static void 272 listen_hash_add(struct adapter *sc, struct listen_ctx *lctx) 273 { 274 struct tom_data *td = sc->tom_softc; 275 int bucket = listen_hashfn(lctx->inp, td->listen_mask); 276 277 mtx_lock(&td->lctx_hash_lock); 278 LIST_INSERT_HEAD(&td->listen_hash[bucket], lctx, link); 279 td->lctx_count++; 280 mtx_unlock(&td->lctx_hash_lock); 281 } 282 283 /* 284 * Look for the listening socket's context entry in the hash and return it. 285 */ 286 static struct listen_ctx * 287 listen_hash_find(struct adapter *sc, struct inpcb *inp) 288 { 289 struct tom_data *td = sc->tom_softc; 290 int bucket = listen_hashfn(inp, td->listen_mask); 291 struct listen_ctx *lctx; 292 293 mtx_lock(&td->lctx_hash_lock); 294 LIST_FOREACH(lctx, &td->listen_hash[bucket], link) { 295 if (lctx->inp == inp) 296 break; 297 } 298 mtx_unlock(&td->lctx_hash_lock); 299 300 return (lctx); 301 } 302 303 /* 304 * Removes the listen_ctx structure for inp from the hash and returns it. 305 */ 306 static struct listen_ctx * 307 listen_hash_del(struct adapter *sc, struct inpcb *inp) 308 { 309 struct tom_data *td = sc->tom_softc; 310 int bucket = listen_hashfn(inp, td->listen_mask); 311 struct listen_ctx *lctx, *l; 312 313 mtx_lock(&td->lctx_hash_lock); 314 LIST_FOREACH_SAFE(lctx, &td->listen_hash[bucket], link, l) { 315 if (lctx->inp == inp) { 316 LIST_REMOVE(lctx, link); 317 td->lctx_count--; 318 break; 319 } 320 } 321 mtx_unlock(&td->lctx_hash_lock); 322 323 return (lctx); 324 } 325 326 /* 327 * Releases a hold on the lctx. Must be called with the listening socket's inp 328 * locked. The inp may be freed by this function and it returns NULL to 329 * indicate this. 330 */ 331 static struct inpcb * 332 release_lctx(struct adapter *sc, struct listen_ctx *lctx) 333 { 334 struct inpcb *inp = lctx->inp; 335 int inp_freed = 0; 336 337 INP_WLOCK_ASSERT(inp); 338 if (refcount_release(&lctx->refcount)) 339 inp_freed = free_lctx(sc, lctx); 340 341 return (inp_freed ? NULL : inp); 342 } 343 344 static void 345 send_reset_synqe(struct toedev *tod, struct synq_entry *synqe) 346 { 347 struct adapter *sc = tod->tod_softc; 348 struct mbuf *m = synqe->syn; 349 struct ifnet *ifp = m->m_pkthdr.rcvif; 350 struct vi_info *vi = ifp->if_softc; 351 struct port_info *pi = vi->pi; 352 struct l2t_entry *e = &sc->l2t->l2tab[synqe->params.l2t_idx]; 353 struct wrqe *wr; 354 struct fw_flowc_wr *flowc; 355 struct cpl_abort_req *req; 356 int flowclen; 357 struct sge_wrq *ofld_txq; 358 struct sge_ofld_rxq *ofld_rxq; 359 const int nparams = 6; 360 const u_int pfvf = sc->pf << S_FW_VIID_PFN; 361 362 INP_WLOCK_ASSERT(synqe->lctx->inp); 363 364 CTR5(KTR_CXGBE, "%s: synqe %p (0x%x), tid %d%s", 365 __func__, synqe, synqe->flags, synqe->tid, 366 synqe->flags & TPF_ABORT_SHUTDOWN ? 367 " (abort already in progress)" : ""); 368 if (synqe->flags & TPF_ABORT_SHUTDOWN) 369 return; /* abort already in progress */ 370 synqe->flags |= TPF_ABORT_SHUTDOWN; 371 372 ofld_txq = &sc->sge.ofld_txq[synqe->params.txq_idx]; 373 ofld_rxq = &sc->sge.ofld_rxq[synqe->params.rxq_idx]; 374 375 /* The wrqe will have two WRs - a flowc followed by an abort_req */ 376 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 377 378 wr = alloc_wrqe(roundup2(flowclen, EQ_ESIZE) + sizeof(*req), ofld_txq); 379 if (wr == NULL) { 380 /* XXX */ 381 panic("%s: allocation failure.", __func__); 382 } 383 flowc = wrtod(wr); 384 req = (void *)((caddr_t)flowc + roundup2(flowclen, EQ_ESIZE)); 385 386 /* First the flowc ... */ 387 memset(flowc, 0, wr->wr_len); 388 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 389 V_FW_FLOWC_WR_NPARAMS(nparams)); 390 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 391 V_FW_WR_FLOWID(synqe->tid)); 392 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 393 flowc->mnemval[0].val = htobe32(pfvf); 394 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 395 flowc->mnemval[1].val = htobe32(pi->tx_chan); 396 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 397 flowc->mnemval[2].val = htobe32(pi->tx_chan); 398 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 399 flowc->mnemval[3].val = htobe32(ofld_rxq->iq.abs_id); 400 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF; 401 flowc->mnemval[4].val = htobe32(512); 402 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS; 403 flowc->mnemval[5].val = htobe32(512); 404 synqe->flags |= TPF_FLOWC_WR_SENT; 405 406 /* ... then ABORT request */ 407 INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, synqe->tid); 408 req->rsvd0 = 0; /* don't have a snd_nxt */ 409 req->rsvd1 = 1; /* no data sent yet */ 410 req->cmd = CPL_ABORT_SEND_RST; 411 412 t4_l2t_send(sc, wr, e); 413 } 414 415 static int 416 create_server(struct adapter *sc, struct listen_ctx *lctx) 417 { 418 struct wrqe *wr; 419 struct cpl_pass_open_req *req; 420 struct inpcb *inp = lctx->inp; 421 422 wr = alloc_wrqe(sizeof(*req), lctx->ctrlq); 423 if (wr == NULL) { 424 log(LOG_ERR, "%s: allocation failure", __func__); 425 return (ENOMEM); 426 } 427 req = wrtod(wr); 428 429 INIT_TP_WR(req, 0); 430 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, lctx->stid)); 431 req->local_port = inp->inp_lport; 432 req->peer_port = 0; 433 req->local_ip = inp->inp_laddr.s_addr; 434 req->peer_ip = 0; 435 req->opt0 = htobe64(V_TX_CHAN(lctx->ctrlq->eq.tx_chan)); 436 req->opt1 = htobe64(V_CONN_POLICY(CPL_CONN_POLICY_ASK) | 437 F_SYN_RSS_ENABLE | V_SYN_RSS_QUEUE(lctx->ofld_rxq->iq.abs_id)); 438 439 t4_wrq_tx(sc, wr); 440 return (0); 441 } 442 443 static int 444 create_server6(struct adapter *sc, struct listen_ctx *lctx) 445 { 446 struct wrqe *wr; 447 struct cpl_pass_open_req6 *req; 448 struct inpcb *inp = lctx->inp; 449 450 wr = alloc_wrqe(sizeof(*req), lctx->ctrlq); 451 if (wr == NULL) { 452 log(LOG_ERR, "%s: allocation failure", __func__); 453 return (ENOMEM); 454 } 455 req = wrtod(wr); 456 457 INIT_TP_WR(req, 0); 458 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, lctx->stid)); 459 req->local_port = inp->inp_lport; 460 req->peer_port = 0; 461 req->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0]; 462 req->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8]; 463 req->peer_ip_hi = 0; 464 req->peer_ip_lo = 0; 465 req->opt0 = htobe64(V_TX_CHAN(lctx->ctrlq->eq.tx_chan)); 466 req->opt1 = htobe64(V_CONN_POLICY(CPL_CONN_POLICY_ASK) | 467 F_SYN_RSS_ENABLE | V_SYN_RSS_QUEUE(lctx->ofld_rxq->iq.abs_id)); 468 469 t4_wrq_tx(sc, wr); 470 return (0); 471 } 472 473 static int 474 destroy_server(struct adapter *sc, struct listen_ctx *lctx) 475 { 476 struct wrqe *wr; 477 struct cpl_close_listsvr_req *req; 478 479 wr = alloc_wrqe(sizeof(*req), lctx->ctrlq); 480 if (wr == NULL) { 481 /* XXX */ 482 panic("%s: allocation failure.", __func__); 483 } 484 req = wrtod(wr); 485 486 INIT_TP_WR(req, 0); 487 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, 488 lctx->stid)); 489 req->reply_ctrl = htobe16(lctx->ofld_rxq->iq.abs_id); 490 req->rsvd = htobe16(0); 491 492 t4_wrq_tx(sc, wr); 493 return (0); 494 } 495 496 /* 497 * Start a listening server by sending a passive open request to HW. 498 * 499 * Can't take adapter lock here and access to sc->flags, 500 * sc->offload_map, if_capenable are all race prone. 501 */ 502 int 503 t4_listen_start(struct toedev *tod, struct tcpcb *tp) 504 { 505 struct adapter *sc = tod->tod_softc; 506 struct vi_info *vi; 507 struct port_info *pi; 508 struct inpcb *inp = tp->t_inpcb; 509 struct listen_ctx *lctx; 510 int i, rc, v; 511 struct offload_settings settings; 512 513 INP_WLOCK_ASSERT(inp); 514 515 rw_rlock(&sc->policy_lock); 516 settings = *lookup_offload_policy(sc, OPEN_TYPE_LISTEN, NULL, 517 EVL_MAKETAG(0xfff, 0, 0), inp); 518 rw_runlock(&sc->policy_lock); 519 if (!settings.offload) 520 return (0); 521 522 /* Don't start a hardware listener for any loopback address. */ 523 if (inp->inp_vflag & INP_IPV6 && IN6_IS_ADDR_LOOPBACK(&inp->in6p_laddr)) 524 return (0); 525 if (!(inp->inp_vflag & INP_IPV6) && 526 IN_LOOPBACK(ntohl(inp->inp_laddr.s_addr))) 527 return (0); 528 if (sc->flags & KERN_TLS_OK) 529 return (0); 530 #if 0 531 ADAPTER_LOCK(sc); 532 if (IS_BUSY(sc)) { 533 log(LOG_ERR, "%s: listen request ignored, %s is busy", 534 __func__, device_get_nameunit(sc->dev)); 535 goto done; 536 } 537 538 KASSERT(uld_active(sc, ULD_TOM), 539 ("%s: TOM not initialized", __func__)); 540 #endif 541 542 /* 543 * Find an initialized VI with IFCAP_TOE (4 or 6). We'll use the first 544 * such VI's queues to send the passive open and receive the reply to 545 * it. 546 * 547 * XXX: need a way to mark a port in use by offload. if_cxgbe should 548 * then reject any attempt to bring down such a port (and maybe reject 549 * attempts to disable IFCAP_TOE on that port too?). 550 */ 551 for_each_port(sc, i) { 552 pi = sc->port[i]; 553 for_each_vi(pi, v, vi) { 554 if (vi->flags & VI_INIT_DONE && 555 vi->ifp->if_capenable & IFCAP_TOE) 556 goto found; 557 } 558 } 559 goto done; /* no port that's UP with IFCAP_TOE enabled */ 560 found: 561 562 if (listen_hash_find(sc, inp) != NULL) 563 goto done; /* already setup */ 564 565 lctx = alloc_lctx(sc, inp, vi); 566 if (lctx == NULL) { 567 log(LOG_ERR, 568 "%s: listen request ignored, %s couldn't allocate lctx\n", 569 __func__, device_get_nameunit(sc->dev)); 570 goto done; 571 } 572 listen_hash_add(sc, lctx); 573 574 CTR6(KTR_CXGBE, "%s: stid %u (%s), lctx %p, inp %p vflag 0x%x", 575 __func__, lctx->stid, tcpstates[tp->t_state], lctx, inp, 576 inp->inp_vflag); 577 578 if (inp->inp_vflag & INP_IPV6) 579 rc = create_server6(sc, lctx); 580 else 581 rc = create_server(sc, lctx); 582 if (rc != 0) { 583 log(LOG_ERR, "%s: %s failed to create hw listener: %d.\n", 584 __func__, device_get_nameunit(sc->dev), rc); 585 (void) listen_hash_del(sc, inp); 586 inp = release_lctx(sc, lctx); 587 /* can't be freed, host stack has a reference */ 588 KASSERT(inp != NULL, ("%s: inp freed", __func__)); 589 goto done; 590 } 591 lctx->flags |= LCTX_RPL_PENDING; 592 done: 593 #if 0 594 ADAPTER_UNLOCK(sc); 595 #endif 596 return (0); 597 } 598 599 int 600 t4_listen_stop(struct toedev *tod, struct tcpcb *tp) 601 { 602 struct listen_ctx *lctx; 603 struct adapter *sc = tod->tod_softc; 604 struct inpcb *inp = tp->t_inpcb; 605 606 INP_WLOCK_ASSERT(inp); 607 608 lctx = listen_hash_del(sc, inp); 609 if (lctx == NULL) 610 return (ENOENT); /* no hardware listener for this inp */ 611 612 CTR4(KTR_CXGBE, "%s: stid %u, lctx %p, flags %x", __func__, lctx->stid, 613 lctx, lctx->flags); 614 615 /* 616 * If the reply to the PASS_OPEN is still pending we'll wait for it to 617 * arrive and clean up when it does. 618 */ 619 if (lctx->flags & LCTX_RPL_PENDING) { 620 return (EINPROGRESS); 621 } 622 623 destroy_server(sc, lctx); 624 return (0); 625 } 626 627 static inline struct synq_entry * 628 alloc_synqe(struct adapter *sc __unused, struct listen_ctx *lctx, int flags) 629 { 630 struct synq_entry *synqe; 631 632 INP_WLOCK_ASSERT(lctx->inp); 633 MPASS(flags == M_WAITOK || flags == M_NOWAIT); 634 635 synqe = malloc(sizeof(*synqe), M_CXGBE, flags); 636 if (__predict_true(synqe != NULL)) { 637 synqe->flags = TPF_SYNQE; 638 refcount_init(&synqe->refcnt, 1); 639 synqe->lctx = lctx; 640 hold_lctx(lctx); /* Every synqe has a ref on its lctx. */ 641 synqe->syn = NULL; 642 } 643 644 return (synqe); 645 } 646 647 static inline void 648 hold_synqe(struct synq_entry *synqe) 649 { 650 651 refcount_acquire(&synqe->refcnt); 652 } 653 654 static inline struct inpcb * 655 release_synqe(struct adapter *sc, struct synq_entry *synqe) 656 { 657 struct inpcb *inp; 658 659 MPASS(synqe->flags & TPF_SYNQE); 660 MPASS(synqe->lctx != NULL); 661 662 inp = synqe->lctx->inp; 663 MPASS(inp != NULL); 664 INP_WLOCK_ASSERT(inp); 665 666 if (refcount_release(&synqe->refcnt)) { 667 inp = release_lctx(sc, synqe->lctx); 668 m_freem(synqe->syn); 669 free(synqe, M_CXGBE); 670 } 671 672 return (inp); 673 } 674 675 void 676 t4_syncache_added(struct toedev *tod __unused, void *arg) 677 { 678 struct synq_entry *synqe = arg; 679 680 hold_synqe(synqe); 681 } 682 683 void 684 t4_syncache_removed(struct toedev *tod, void *arg) 685 { 686 struct adapter *sc = tod->tod_softc; 687 struct synq_entry *synqe = arg; 688 struct inpcb *inp = synqe->lctx->inp; 689 690 /* 691 * XXX: this is a LOR but harmless when running from the softclock. 692 */ 693 INP_WLOCK(inp); 694 inp = release_synqe(sc, synqe); 695 if (inp != NULL) 696 INP_WUNLOCK(inp); 697 } 698 699 int 700 t4_syncache_respond(struct toedev *tod, void *arg, struct mbuf *m) 701 { 702 struct synq_entry *synqe = arg; 703 704 if (atomic_fetchadd_int(&synqe->ok_to_respond, 1) == 0) { 705 struct tcpopt to; 706 struct ip *ip = mtod(m, struct ip *); 707 struct tcphdr *th; 708 709 if (ip->ip_v == IPVERSION) 710 th = (void *)(ip + 1); 711 else 712 th = (void *)((struct ip6_hdr *)ip + 1); 713 bzero(&to, sizeof(to)); 714 tcp_dooptions(&to, (void *)(th + 1), 715 (th->th_off << 2) - sizeof(*th), TO_SYN); 716 717 /* save these for later */ 718 synqe->iss = be32toh(th->th_seq); 719 synqe->irs = be32toh(th->th_ack) - 1; 720 synqe->ts = to.to_tsval; 721 } 722 723 m_freem(m); /* don't need this any more */ 724 return (0); 725 } 726 727 static int 728 do_pass_open_rpl(struct sge_iq *iq, const struct rss_header *rss, 729 struct mbuf *m) 730 { 731 struct adapter *sc = iq->adapter; 732 const struct cpl_pass_open_rpl *cpl = (const void *)(rss + 1); 733 int stid = GET_TID(cpl); 734 unsigned int status = cpl->status; 735 struct listen_ctx *lctx = lookup_stid(sc, stid); 736 struct inpcb *inp = lctx->inp; 737 #ifdef INVARIANTS 738 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 739 #endif 740 741 KASSERT(opcode == CPL_PASS_OPEN_RPL, 742 ("%s: unexpected opcode 0x%x", __func__, opcode)); 743 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 744 KASSERT(lctx->stid == stid, ("%s: lctx stid mismatch", __func__)); 745 746 INP_WLOCK(inp); 747 748 CTR4(KTR_CXGBE, "%s: stid %d, status %u, flags 0x%x", 749 __func__, stid, status, lctx->flags); 750 751 lctx->flags &= ~LCTX_RPL_PENDING; 752 753 if (status != CPL_ERR_NONE) 754 log(LOG_ERR, "listener (stid %u) failed: %d\n", stid, status); 755 756 #ifdef INVARIANTS 757 /* 758 * If the inp has been dropped (listening socket closed) then 759 * listen_stop must have run and taken the inp out of the hash. 760 */ 761 if (inp->inp_flags & INP_DROPPED) { 762 KASSERT(listen_hash_del(sc, inp) == NULL, 763 ("%s: inp %p still in listen hash", __func__, inp)); 764 } 765 #endif 766 767 if (inp->inp_flags & INP_DROPPED && status != CPL_ERR_NONE) { 768 if (release_lctx(sc, lctx) != NULL) 769 INP_WUNLOCK(inp); 770 return (status); 771 } 772 773 /* 774 * Listening socket stopped listening earlier and now the chip tells us 775 * it has started the hardware listener. Stop it; the lctx will be 776 * released in do_close_server_rpl. 777 */ 778 if (inp->inp_flags & INP_DROPPED) { 779 destroy_server(sc, lctx); 780 INP_WUNLOCK(inp); 781 return (status); 782 } 783 784 /* 785 * Failed to start hardware listener. Take inp out of the hash and 786 * release our reference on it. An error message has been logged 787 * already. 788 */ 789 if (status != CPL_ERR_NONE) { 790 listen_hash_del(sc, inp); 791 if (release_lctx(sc, lctx) != NULL) 792 INP_WUNLOCK(inp); 793 return (status); 794 } 795 796 /* hardware listener open for business */ 797 798 INP_WUNLOCK(inp); 799 return (status); 800 } 801 802 static int 803 do_close_server_rpl(struct sge_iq *iq, const struct rss_header *rss, 804 struct mbuf *m) 805 { 806 struct adapter *sc = iq->adapter; 807 const struct cpl_close_listsvr_rpl *cpl = (const void *)(rss + 1); 808 int stid = GET_TID(cpl); 809 unsigned int status = cpl->status; 810 struct listen_ctx *lctx = lookup_stid(sc, stid); 811 struct inpcb *inp = lctx->inp; 812 #ifdef INVARIANTS 813 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 814 #endif 815 816 KASSERT(opcode == CPL_CLOSE_LISTSRV_RPL, 817 ("%s: unexpected opcode 0x%x", __func__, opcode)); 818 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 819 KASSERT(lctx->stid == stid, ("%s: lctx stid mismatch", __func__)); 820 821 CTR3(KTR_CXGBE, "%s: stid %u, status %u", __func__, stid, status); 822 823 if (status != CPL_ERR_NONE) { 824 log(LOG_ERR, "%s: failed (%u) to close listener for stid %u\n", 825 __func__, status, stid); 826 return (status); 827 } 828 829 INP_WLOCK(inp); 830 inp = release_lctx(sc, lctx); 831 if (inp != NULL) 832 INP_WUNLOCK(inp); 833 834 return (status); 835 } 836 837 static void 838 done_with_synqe(struct adapter *sc, struct synq_entry *synqe) 839 { 840 struct listen_ctx *lctx = synqe->lctx; 841 struct inpcb *inp = lctx->inp; 842 struct l2t_entry *e = &sc->l2t->l2tab[synqe->params.l2t_idx]; 843 int ntids; 844 845 INP_WLOCK_ASSERT(inp); 846 ntids = inp->inp_vflag & INP_IPV6 ? 2 : 1; 847 848 remove_tid(sc, synqe->tid, ntids); 849 release_tid(sc, synqe->tid, lctx->ctrlq); 850 t4_l2t_release(e); 851 inp = release_synqe(sc, synqe); 852 if (inp) 853 INP_WUNLOCK(inp); 854 } 855 856 void 857 synack_failure_cleanup(struct adapter *sc, int tid) 858 { 859 struct synq_entry *synqe = lookup_tid(sc, tid); 860 861 INP_WLOCK(synqe->lctx->inp); 862 done_with_synqe(sc, synqe); 863 } 864 865 int 866 do_abort_req_synqe(struct sge_iq *iq, const struct rss_header *rss, 867 struct mbuf *m) 868 { 869 struct adapter *sc = iq->adapter; 870 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); 871 unsigned int tid = GET_TID(cpl); 872 struct synq_entry *synqe = lookup_tid(sc, tid); 873 struct listen_ctx *lctx = synqe->lctx; 874 struct inpcb *inp = lctx->inp; 875 struct sge_wrq *ofld_txq; 876 #ifdef INVARIANTS 877 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 878 #endif 879 880 KASSERT(opcode == CPL_ABORT_REQ_RSS, 881 ("%s: unexpected opcode 0x%x", __func__, opcode)); 882 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 883 KASSERT(synqe->tid == tid, ("%s: toep tid mismatch", __func__)); 884 885 CTR6(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x), lctx %p, status %d", 886 __func__, tid, synqe, synqe->flags, synqe->lctx, cpl->status); 887 888 if (negative_advice(cpl->status)) 889 return (0); /* Ignore negative advice */ 890 891 INP_WLOCK(inp); 892 893 ofld_txq = &sc->sge.ofld_txq[synqe->params.txq_idx]; 894 895 /* 896 * If we'd initiated an abort earlier the reply to it is responsible for 897 * cleaning up resources. Otherwise we tear everything down right here 898 * right now. We owe the T4 a CPL_ABORT_RPL no matter what. 899 */ 900 if (synqe->flags & TPF_ABORT_SHUTDOWN) { 901 INP_WUNLOCK(inp); 902 goto done; 903 } 904 905 done_with_synqe(sc, synqe); 906 /* inp lock released by done_with_synqe */ 907 done: 908 send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST); 909 return (0); 910 } 911 912 int 913 do_abort_rpl_synqe(struct sge_iq *iq, const struct rss_header *rss, 914 struct mbuf *m) 915 { 916 struct adapter *sc = iq->adapter; 917 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 918 unsigned int tid = GET_TID(cpl); 919 struct synq_entry *synqe = lookup_tid(sc, tid); 920 struct listen_ctx *lctx = synqe->lctx; 921 struct inpcb *inp = lctx->inp; 922 #ifdef INVARIANTS 923 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 924 #endif 925 926 KASSERT(opcode == CPL_ABORT_RPL_RSS, 927 ("%s: unexpected opcode 0x%x", __func__, opcode)); 928 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 929 KASSERT(synqe->tid == tid, ("%s: toep tid mismatch", __func__)); 930 931 CTR6(KTR_CXGBE, "%s: tid %u, synqe %p (0x%x), lctx %p, status %d", 932 __func__, tid, synqe, synqe->flags, synqe->lctx, cpl->status); 933 934 INP_WLOCK(inp); 935 KASSERT(synqe->flags & TPF_ABORT_SHUTDOWN, 936 ("%s: wasn't expecting abort reply for synqe %p (0x%x)", 937 __func__, synqe, synqe->flags)); 938 939 done_with_synqe(sc, synqe); 940 /* inp lock released by done_with_synqe */ 941 942 return (0); 943 } 944 945 void 946 t4_offload_socket(struct toedev *tod, void *arg, struct socket *so) 947 { 948 struct adapter *sc = tod->tod_softc; 949 struct synq_entry *synqe = arg; 950 struct inpcb *inp = sotoinpcb(so); 951 struct toepcb *toep = synqe->toep; 952 953 NET_EPOCH_ASSERT(); /* prevents bad race with accept() */ 954 INP_WLOCK_ASSERT(inp); 955 KASSERT(synqe->flags & TPF_SYNQE, 956 ("%s: %p not a synq_entry?", __func__, arg)); 957 MPASS(toep->tid == synqe->tid); 958 959 offload_socket(so, toep); 960 make_established(toep, synqe->iss, synqe->irs, synqe->tcp_opt); 961 toep->flags |= TPF_CPL_PENDING; 962 update_tid(sc, synqe->tid, toep); 963 synqe->flags |= TPF_SYNQE_EXPANDED; 964 inp->inp_flowtype = (inp->inp_vflag & INP_IPV6) ? 965 M_HASHTYPE_RSS_TCP_IPV6 : M_HASHTYPE_RSS_TCP_IPV4; 966 inp->inp_flowid = synqe->rss_hash; 967 } 968 969 static void 970 t4opt_to_tcpopt(const struct tcp_options *t4opt, struct tcpopt *to) 971 { 972 bzero(to, sizeof(*to)); 973 974 if (t4opt->mss) { 975 to->to_flags |= TOF_MSS; 976 to->to_mss = be16toh(t4opt->mss); 977 } 978 979 if (t4opt->wsf > 0 && t4opt->wsf < 15) { 980 to->to_flags |= TOF_SCALE; 981 to->to_wscale = t4opt->wsf; 982 } 983 984 if (t4opt->tstamp) 985 to->to_flags |= TOF_TS; 986 987 if (t4opt->sack) 988 to->to_flags |= TOF_SACKPERM; 989 } 990 991 static void 992 pass_accept_req_to_protohdrs(struct adapter *sc, const struct mbuf *m, 993 struct in_conninfo *inc, struct tcphdr *th, uint8_t *iptos) 994 { 995 const struct cpl_pass_accept_req *cpl = mtod(m, const void *); 996 const struct ether_header *eh; 997 unsigned int hlen = be32toh(cpl->hdr_len); 998 uintptr_t l3hdr; 999 const struct tcphdr *tcp; 1000 1001 eh = (const void *)(cpl + 1); 1002 if (chip_id(sc) >= CHELSIO_T6) { 1003 l3hdr = ((uintptr_t)eh + G_T6_ETH_HDR_LEN(hlen)); 1004 tcp = (const void *)(l3hdr + G_T6_IP_HDR_LEN(hlen)); 1005 } else { 1006 l3hdr = ((uintptr_t)eh + G_ETH_HDR_LEN(hlen)); 1007 tcp = (const void *)(l3hdr + G_IP_HDR_LEN(hlen)); 1008 } 1009 1010 /* extract TOS (DiffServ + ECN) byte for AccECN */ 1011 if (iptos) { 1012 if (((struct ip *)l3hdr)->ip_v == IPVERSION) { 1013 const struct ip *ip = (const void *)l3hdr; 1014 *iptos = ip->ip_tos; 1015 } 1016 #ifdef INET6 1017 else 1018 if (((struct ip *)l3hdr)->ip_v == (IPV6_VERSION >> 4)) { 1019 const struct ip6_hdr *ip6 = (const void *)l3hdr; 1020 *iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 1021 } 1022 #endif /* INET */ 1023 } 1024 1025 if (inc) { 1026 bzero(inc, sizeof(*inc)); 1027 inc->inc_fport = tcp->th_sport; 1028 inc->inc_lport = tcp->th_dport; 1029 if (((struct ip *)l3hdr)->ip_v == IPVERSION) { 1030 const struct ip *ip = (const void *)l3hdr; 1031 1032 inc->inc_faddr = ip->ip_src; 1033 inc->inc_laddr = ip->ip_dst; 1034 } else { 1035 const struct ip6_hdr *ip6 = (const void *)l3hdr; 1036 1037 inc->inc_flags |= INC_ISIPV6; 1038 inc->inc6_faddr = ip6->ip6_src; 1039 inc->inc6_laddr = ip6->ip6_dst; 1040 } 1041 } 1042 1043 if (th) { 1044 bcopy(tcp, th, sizeof(*th)); 1045 tcp_fields_to_host(th); /* just like tcp_input */ 1046 } 1047 } 1048 1049 static struct l2t_entry * 1050 get_l2te_for_nexthop(struct port_info *pi, struct ifnet *ifp, 1051 struct in_conninfo *inc) 1052 { 1053 struct l2t_entry *e; 1054 struct sockaddr_in6 sin6; 1055 struct sockaddr *dst = (void *)&sin6; 1056 struct nhop_object *nh; 1057 1058 if (inc->inc_flags & INC_ISIPV6) { 1059 bzero(dst, sizeof(struct sockaddr_in6)); 1060 dst->sa_len = sizeof(struct sockaddr_in6); 1061 dst->sa_family = AF_INET6; 1062 1063 if (IN6_IS_ADDR_LINKLOCAL(&inc->inc6_laddr)) { 1064 /* no need for route lookup */ 1065 e = t4_l2t_get(pi, ifp, dst); 1066 return (e); 1067 } 1068 1069 nh = fib6_lookup(RT_DEFAULT_FIB, &inc->inc6_faddr, 0, NHR_NONE, 0); 1070 if (nh == NULL) 1071 return (NULL); 1072 if (nh->nh_ifp != ifp) 1073 return (NULL); 1074 if (nh->nh_flags & NHF_GATEWAY) 1075 ((struct sockaddr_in6 *)dst)->sin6_addr = nh->gw6_sa.sin6_addr; 1076 else 1077 ((struct sockaddr_in6 *)dst)->sin6_addr = inc->inc6_faddr; 1078 } else { 1079 dst->sa_len = sizeof(struct sockaddr_in); 1080 dst->sa_family = AF_INET; 1081 1082 nh = fib4_lookup(RT_DEFAULT_FIB, inc->inc_faddr, 0, NHR_NONE, 0); 1083 if (nh == NULL) 1084 return (NULL); 1085 if (nh->nh_ifp != ifp) 1086 return (NULL); 1087 if (nh->nh_flags & NHF_GATEWAY) 1088 ((struct sockaddr_in *)dst)->sin_addr = nh->gw4_sa.sin_addr; 1089 else 1090 ((struct sockaddr_in *)dst)->sin_addr = inc->inc_faddr; 1091 } 1092 1093 e = t4_l2t_get(pi, ifp, dst); 1094 return (e); 1095 } 1096 1097 static int 1098 send_synack(struct adapter *sc, struct synq_entry *synqe, uint64_t opt0, 1099 uint32_t opt2, int tid) 1100 { 1101 struct wrqe *wr; 1102 struct cpl_pass_accept_rpl *rpl; 1103 struct l2t_entry *e = &sc->l2t->l2tab[synqe->params.l2t_idx]; 1104 1105 wr = alloc_wrqe(is_t4(sc) ? sizeof(struct cpl_pass_accept_rpl) : 1106 sizeof(struct cpl_t5_pass_accept_rpl), &sc->sge.ctrlq[0]); 1107 if (wr == NULL) 1108 return (ENOMEM); 1109 rpl = wrtod(wr); 1110 1111 if (is_t4(sc)) 1112 INIT_TP_WR_MIT_CPL(rpl, CPL_PASS_ACCEPT_RPL, tid); 1113 else { 1114 struct cpl_t5_pass_accept_rpl *rpl5 = (void *)rpl; 1115 1116 INIT_TP_WR_MIT_CPL(rpl5, CPL_PASS_ACCEPT_RPL, tid); 1117 rpl5->iss = htobe32(synqe->iss); 1118 } 1119 rpl->opt0 = opt0; 1120 rpl->opt2 = opt2; 1121 1122 return (t4_l2t_send(sc, wr, e)); 1123 } 1124 1125 #define REJECT_PASS_ACCEPT_REQ(tunnel) do { \ 1126 if (!tunnel) { \ 1127 m_freem(m); \ 1128 m = NULL; \ 1129 } \ 1130 reject_reason = __LINE__; \ 1131 goto reject; \ 1132 } while (0) 1133 1134 /* 1135 * The context associated with a tid entry via insert_tid could be a synq_entry 1136 * or a toepcb. The only way CPL handlers can tell is via a bit in these flags. 1137 */ 1138 CTASSERT(offsetof(struct toepcb, flags) == offsetof(struct synq_entry, flags)); 1139 1140 /* 1141 * Incoming SYN on a listening socket. 1142 * 1143 * XXX: Every use of ifp in this routine has a bad race with up/down, toe/-toe, 1144 * etc. 1145 */ 1146 static int 1147 do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss, 1148 struct mbuf *m) 1149 { 1150 struct adapter *sc = iq->adapter; 1151 struct toedev *tod; 1152 const struct cpl_pass_accept_req *cpl = mtod(m, const void *); 1153 unsigned int stid = G_PASS_OPEN_TID(be32toh(cpl->tos_stid)); 1154 unsigned int tid = GET_TID(cpl); 1155 struct listen_ctx *lctx = lookup_stid(sc, stid); 1156 struct inpcb *inp; 1157 struct socket *so; 1158 struct in_conninfo inc; 1159 struct tcphdr th; 1160 struct tcpopt to; 1161 struct port_info *pi; 1162 struct vi_info *vi; 1163 struct ifnet *hw_ifp, *ifp; 1164 struct l2t_entry *e = NULL; 1165 struct synq_entry *synqe = NULL; 1166 int reject_reason, v, ntids; 1167 uint16_t vid, l2info; 1168 struct epoch_tracker et; 1169 #ifdef INVARIANTS 1170 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1171 #endif 1172 struct offload_settings settings; 1173 uint8_t iptos; 1174 1175 KASSERT(opcode == CPL_PASS_ACCEPT_REQ, 1176 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1177 KASSERT(lctx->stid == stid, ("%s: lctx stid mismatch", __func__)); 1178 1179 CTR4(KTR_CXGBE, "%s: stid %u, tid %u, lctx %p", __func__, stid, tid, 1180 lctx); 1181 1182 CURVNET_SET(lctx->vnet); /* before any potential REJECT */ 1183 1184 /* 1185 * Use the MAC index to lookup the associated VI. If this SYN didn't 1186 * match a perfect MAC filter, punt. 1187 */ 1188 l2info = be16toh(cpl->l2info); 1189 pi = sc->port[G_SYN_INTF(l2info)]; 1190 if (!(l2info & F_SYN_XACT_MATCH)) { 1191 REJECT_PASS_ACCEPT_REQ(false); 1192 } 1193 for_each_vi(pi, v, vi) { 1194 if (vi->xact_addr_filt == G_SYN_MAC_IDX(l2info)) 1195 goto found; 1196 } 1197 REJECT_PASS_ACCEPT_REQ(false); 1198 found: 1199 hw_ifp = vi->ifp; /* the cxgbe ifnet */ 1200 m->m_pkthdr.rcvif = hw_ifp; 1201 tod = TOEDEV(hw_ifp); 1202 1203 /* 1204 * Don't offload if the peer requested a TCP option that's not known to 1205 * the silicon. Send the SYN to the kernel instead. 1206 */ 1207 if (__predict_false(cpl->tcpopt.unknown)) 1208 REJECT_PASS_ACCEPT_REQ(true); 1209 1210 /* 1211 * Figure out if there is a pseudo interface (vlan, lagg, etc.) 1212 * involved. Don't offload if the SYN had a VLAN tag and the vid 1213 * doesn't match anything on this interface. 1214 * 1215 * XXX: lagg support, lagg + vlan support. 1216 */ 1217 vid = EVL_VLANOFTAG(be16toh(cpl->vlan)); 1218 if (vid != 0xfff && vid != 0) { 1219 ifp = VLAN_DEVAT(hw_ifp, vid); 1220 if (ifp == NULL) 1221 REJECT_PASS_ACCEPT_REQ(true); 1222 } else 1223 ifp = hw_ifp; 1224 1225 /* 1226 * Don't offload if the ifnet that the SYN came in on is not in the same 1227 * vnet as the listening socket. 1228 */ 1229 if (lctx->vnet != ifp->if_vnet) 1230 REJECT_PASS_ACCEPT_REQ(true); 1231 1232 pass_accept_req_to_protohdrs(sc, m, &inc, &th, &iptos); 1233 if (inc.inc_flags & INC_ISIPV6) { 1234 1235 /* Don't offload if the ifcap isn't enabled */ 1236 if ((ifp->if_capenable & IFCAP_TOE6) == 0) 1237 REJECT_PASS_ACCEPT_REQ(true); 1238 1239 /* 1240 * SYN must be directed to an IP6 address on this ifnet. This 1241 * is more restrictive than in6_localip. 1242 */ 1243 NET_EPOCH_ENTER(et); 1244 if (!in6_ifhasaddr(ifp, &inc.inc6_laddr)) { 1245 NET_EPOCH_EXIT(et); 1246 REJECT_PASS_ACCEPT_REQ(true); 1247 } 1248 1249 ntids = 2; 1250 } else { 1251 1252 /* Don't offload if the ifcap isn't enabled */ 1253 if ((ifp->if_capenable & IFCAP_TOE4) == 0) 1254 REJECT_PASS_ACCEPT_REQ(true); 1255 1256 /* 1257 * SYN must be directed to an IP address on this ifnet. This 1258 * is more restrictive than in_localip. 1259 */ 1260 NET_EPOCH_ENTER(et); 1261 if (!in_ifhasaddr(ifp, inc.inc_laddr)) { 1262 NET_EPOCH_EXIT(et); 1263 REJECT_PASS_ACCEPT_REQ(true); 1264 } 1265 1266 ntids = 1; 1267 } 1268 1269 e = get_l2te_for_nexthop(pi, ifp, &inc); 1270 if (e == NULL) { 1271 NET_EPOCH_EXIT(et); 1272 REJECT_PASS_ACCEPT_REQ(true); 1273 } 1274 1275 /* Don't offload if the 4-tuple is already in use */ 1276 if (toe_4tuple_check(&inc, &th, ifp) != 0) { 1277 NET_EPOCH_EXIT(et); 1278 REJECT_PASS_ACCEPT_REQ(false); 1279 } 1280 1281 inp = lctx->inp; /* listening socket, not owned by TOE */ 1282 INP_WLOCK(inp); 1283 1284 /* Don't offload if the listening socket has closed */ 1285 if (__predict_false(inp->inp_flags & INP_DROPPED)) { 1286 INP_WUNLOCK(inp); 1287 NET_EPOCH_EXIT(et); 1288 REJECT_PASS_ACCEPT_REQ(false); 1289 } 1290 so = inp->inp_socket; 1291 rw_rlock(&sc->policy_lock); 1292 settings = *lookup_offload_policy(sc, OPEN_TYPE_PASSIVE, m, 1293 EVL_MAKETAG(0xfff, 0, 0), inp); 1294 rw_runlock(&sc->policy_lock); 1295 if (!settings.offload) { 1296 INP_WUNLOCK(inp); 1297 NET_EPOCH_EXIT(et); 1298 REJECT_PASS_ACCEPT_REQ(true); /* Rejected by COP. */ 1299 } 1300 1301 synqe = alloc_synqe(sc, lctx, M_NOWAIT); 1302 if (synqe == NULL) { 1303 INP_WUNLOCK(inp); 1304 NET_EPOCH_EXIT(et); 1305 REJECT_PASS_ACCEPT_REQ(true); 1306 } 1307 MPASS(rss->hash_type == RSS_HASH_TCP); 1308 synqe->rss_hash = be32toh(rss->hash_val); 1309 atomic_store_int(&synqe->ok_to_respond, 0); 1310 1311 init_conn_params(vi, &settings, &inc, so, &cpl->tcpopt, e->idx, 1312 &synqe->params); 1313 1314 /* 1315 * If all goes well t4_syncache_respond will get called during 1316 * syncache_add. Note that syncache_add releases the pcb lock. 1317 */ 1318 t4opt_to_tcpopt(&cpl->tcpopt, &to); 1319 toe_syncache_add(&inc, &to, &th, inp, tod, synqe, iptos); 1320 1321 if (atomic_load_int(&synqe->ok_to_respond) > 0) { 1322 uint64_t opt0; 1323 uint32_t opt2; 1324 1325 opt0 = calc_options0(vi, &synqe->params); 1326 opt2 = calc_options2(vi, &synqe->params); 1327 1328 insert_tid(sc, tid, synqe, ntids); 1329 synqe->tid = tid; 1330 synqe->syn = m; 1331 m = NULL; 1332 1333 if (send_synack(sc, synqe, opt0, opt2, tid) != 0) { 1334 remove_tid(sc, tid, ntids); 1335 m = synqe->syn; 1336 synqe->syn = NULL; 1337 NET_EPOCH_EXIT(et); 1338 REJECT_PASS_ACCEPT_REQ(true); 1339 } 1340 1341 CTR6(KTR_CXGBE, 1342 "%s: stid %u, tid %u, synqe %p, opt0 %#016lx, opt2 %#08x", 1343 __func__, stid, tid, synqe, be64toh(opt0), be32toh(opt2)); 1344 } else { 1345 NET_EPOCH_EXIT(et); 1346 REJECT_PASS_ACCEPT_REQ(false); 1347 } 1348 1349 NET_EPOCH_EXIT(et); 1350 CURVNET_RESTORE(); 1351 return (0); 1352 reject: 1353 CURVNET_RESTORE(); 1354 CTR4(KTR_CXGBE, "%s: stid %u, tid %u, REJECT (%d)", __func__, stid, tid, 1355 reject_reason); 1356 1357 if (e) 1358 t4_l2t_release(e); 1359 release_tid(sc, tid, lctx->ctrlq); 1360 if (synqe) { 1361 inp = synqe->lctx->inp; 1362 INP_WLOCK(inp); 1363 inp = release_synqe(sc, synqe); 1364 if (inp) 1365 INP_WUNLOCK(inp); 1366 } 1367 1368 if (m) { 1369 /* 1370 * The connection request hit a TOE listener but is being passed 1371 * on to the kernel sw stack instead of getting offloaded. 1372 */ 1373 m_adj(m, sizeof(*cpl)); 1374 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 1375 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1376 m->m_pkthdr.csum_data = 0xffff; 1377 hw_ifp->if_input(hw_ifp, m); 1378 } 1379 1380 return (reject_reason); 1381 } 1382 1383 static void 1384 synqe_to_protohdrs(struct adapter *sc, struct synq_entry *synqe, 1385 const struct cpl_pass_establish *cpl, struct in_conninfo *inc, 1386 struct tcphdr *th, struct tcpopt *to) 1387 { 1388 uint16_t tcp_opt = be16toh(cpl->tcp_opt); 1389 uint8_t iptos; 1390 1391 /* start off with the original SYN */ 1392 pass_accept_req_to_protohdrs(sc, synqe->syn, inc, th, &iptos); 1393 1394 /* modify parts to make it look like the ACK to our SYN|ACK */ 1395 th->th_flags = TH_ACK; 1396 th->th_ack = synqe->iss + 1; 1397 th->th_seq = be32toh(cpl->rcv_isn); 1398 bzero(to, sizeof(*to)); 1399 if (G_TCPOPT_TSTAMP(tcp_opt)) { 1400 to->to_flags |= TOF_TS; 1401 to->to_tsecr = synqe->ts; 1402 } 1403 } 1404 1405 static int 1406 do_pass_establish(struct sge_iq *iq, const struct rss_header *rss, 1407 struct mbuf *m) 1408 { 1409 struct adapter *sc = iq->adapter; 1410 struct vi_info *vi; 1411 struct ifnet *ifp; 1412 const struct cpl_pass_establish *cpl = (const void *)(rss + 1); 1413 #if defined(KTR) || defined(INVARIANTS) 1414 unsigned int stid = G_PASS_OPEN_TID(be32toh(cpl->tos_stid)); 1415 #endif 1416 unsigned int tid = GET_TID(cpl); 1417 struct synq_entry *synqe = lookup_tid(sc, tid); 1418 struct listen_ctx *lctx = synqe->lctx; 1419 struct inpcb *inp = lctx->inp, *new_inp; 1420 struct socket *so; 1421 struct tcphdr th; 1422 struct tcpopt to; 1423 struct in_conninfo inc; 1424 struct toepcb *toep; 1425 struct epoch_tracker et; 1426 #ifdef INVARIANTS 1427 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); 1428 #endif 1429 1430 KASSERT(opcode == CPL_PASS_ESTABLISH, 1431 ("%s: unexpected opcode 0x%x", __func__, opcode)); 1432 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1433 KASSERT(lctx->stid == stid, ("%s: lctx stid mismatch", __func__)); 1434 KASSERT(synqe->flags & TPF_SYNQE, 1435 ("%s: tid %u (ctx %p) not a synqe", __func__, tid, synqe)); 1436 1437 CURVNET_SET(lctx->vnet); 1438 NET_EPOCH_ENTER(et); /* for syncache_expand */ 1439 INP_WLOCK(inp); 1440 1441 CTR6(KTR_CXGBE, 1442 "%s: stid %u, tid %u, synqe %p (0x%x), inp_flags 0x%x", 1443 __func__, stid, tid, synqe, synqe->flags, inp->inp_flags); 1444 1445 ifp = synqe->syn->m_pkthdr.rcvif; 1446 vi = ifp->if_softc; 1447 KASSERT(vi->adapter == sc, 1448 ("%s: vi %p, sc %p mismatch", __func__, vi, sc)); 1449 1450 if (__predict_false(inp->inp_flags & INP_DROPPED)) { 1451 reset: 1452 send_reset_synqe(TOEDEV(ifp), synqe); 1453 INP_WUNLOCK(inp); 1454 NET_EPOCH_EXIT(et); 1455 CURVNET_RESTORE(); 1456 return (0); 1457 } 1458 1459 KASSERT(synqe->params.rxq_idx == iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0], 1460 ("%s: CPL arrived on unexpected rxq. %d %d", __func__, 1461 synqe->params.rxq_idx, 1462 (int)(iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0]))); 1463 1464 toep = alloc_toepcb(vi, M_NOWAIT); 1465 if (toep == NULL) 1466 goto reset; 1467 toep->tid = tid; 1468 toep->l2te = &sc->l2t->l2tab[synqe->params.l2t_idx]; 1469 toep->vnet = lctx->vnet; 1470 bcopy(&synqe->params, &toep->params, sizeof(toep->params)); 1471 init_toepcb(vi, toep); 1472 1473 MPASS(be32toh(cpl->snd_isn) - 1 == synqe->iss); 1474 MPASS(be32toh(cpl->rcv_isn) - 1 == synqe->irs); 1475 synqe->tcp_opt = cpl->tcp_opt; 1476 synqe->toep = toep; 1477 1478 /* Come up with something that syncache_expand should be ok with. */ 1479 synqe_to_protohdrs(sc, synqe, cpl, &inc, &th, &to); 1480 if (inc.inc_flags & INC_ISIPV6) 1481 toep->ce = t4_hold_lip(sc, &inc.inc6_laddr, lctx->ce); 1482 so = inp->inp_socket; 1483 KASSERT(so != NULL, ("%s: socket is NULL", __func__)); 1484 1485 if (!toe_syncache_expand(&inc, &to, &th, &so) || so == NULL) { 1486 free_toepcb(toep); 1487 goto reset; 1488 } 1489 1490 /* New connection inpcb is already locked by syncache_expand(). */ 1491 new_inp = sotoinpcb(so); 1492 INP_WLOCK_ASSERT(new_inp); 1493 MPASS(so->so_vnet == lctx->vnet); 1494 1495 /* 1496 * This is for expansion from syncookies. 1497 * 1498 * XXX: we've held the tcbinfo lock throughout so there's no risk of 1499 * anyone accept'ing a connection before we've installed our hooks, but 1500 * this somewhat defeats the purpose of having a tod_offload_socket :-( 1501 */ 1502 if (__predict_false(!(synqe->flags & TPF_SYNQE_EXPANDED))) { 1503 tcp_timer_activate(intotcpcb(new_inp), TT_KEEP, 0); 1504 t4_offload_socket(TOEDEV(ifp), synqe, so); 1505 } 1506 1507 INP_WUNLOCK(new_inp); 1508 1509 /* Done with the synqe */ 1510 inp = release_synqe(sc, synqe); 1511 if (inp != NULL) 1512 INP_WUNLOCK(inp); 1513 NET_EPOCH_EXIT(et); 1514 CURVNET_RESTORE(); 1515 1516 return (0); 1517 } 1518 1519 void 1520 t4_init_listen_cpl_handlers(void) 1521 { 1522 1523 t4_register_cpl_handler(CPL_PASS_OPEN_RPL, do_pass_open_rpl); 1524 t4_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_close_server_rpl); 1525 t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_pass_accept_req); 1526 t4_register_cpl_handler(CPL_PASS_ESTABLISH, do_pass_establish); 1527 } 1528 1529 void 1530 t4_uninit_listen_cpl_handlers(void) 1531 { 1532 1533 t4_register_cpl_handler(CPL_PASS_OPEN_RPL, NULL); 1534 t4_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, NULL); 1535 t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, NULL); 1536 t4_register_cpl_handler(CPL_PASS_ESTABLISH, NULL); 1537 } 1538 #endif 1539