1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2012 Chelsio Communications, Inc.
5 * All rights reserved.
6 * Written by: Navdeep Parhar <np@FreeBSD.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 #include "opt_kern_tls.h"
34 #include "opt_ratelimit.h"
35
36 #include <sys/param.h>
37 #include <sys/types.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/ktr.h>
41 #include <sys/lock.h>
42 #include <sys/limits.h>
43 #include <sys/module.h>
44 #include <sys/protosw.h>
45 #include <sys/domain.h>
46 #include <sys/refcount.h>
47 #include <sys/rmlock.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
51 #include <sys/taskqueue.h>
52 #include <net/if.h>
53 #include <net/if_var.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
56 #include <netinet/in.h>
57 #include <netinet/in_pcb.h>
58 #include <netinet/in_var.h>
59 #include <netinet/ip.h>
60 #include <netinet/ip6.h>
61 #include <netinet6/scope6_var.h>
62 #define TCPSTATES
63 #include <netinet/tcp_fsm.h>
64 #include <netinet/tcp_seq.h>
65 #include <netinet/tcp_timer.h>
66 #include <netinet/tcp_var.h>
67 #include <netinet/toecore.h>
68 #include <netinet/cc/cc.h>
69
70 #ifdef TCP_OFFLOAD
71 #include "common/common.h"
72 #include "common/t4_msg.h"
73 #include "common/t4_regs.h"
74 #include "common/t4_regs_values.h"
75 #include "common/t4_tcb.h"
76 #include "t4_clip.h"
77 #include "tom/t4_tom_l2t.h"
78 #include "tom/t4_tom.h"
79 #include "tom/t4_tls.h"
80
81 static struct protosw toe_protosw;
82 static struct protosw toe6_protosw;
83
84 /* Module ops */
85 static int t4_tom_mod_load(void);
86 static int t4_tom_mod_unload(void);
87 static int t4_tom_modevent(module_t, int, void *);
88
89 /* ULD ops and helpers */
90 static int t4_tom_activate(struct adapter *);
91 static int t4_tom_deactivate(struct adapter *);
92
93 static struct uld_info tom_uld_info = {
94 .uld_id = ULD_TOM,
95 .activate = t4_tom_activate,
96 .deactivate = t4_tom_deactivate,
97 };
98
99 static void release_offload_resources(struct toepcb *);
100 static int alloc_tid_tabs(struct tid_info *);
101 static void free_tid_tabs(struct tid_info *);
102 static void free_tom_data(struct adapter *, struct tom_data *);
103 static void reclaim_wr_resources(void *, int);
104
105 struct toepcb *
alloc_toepcb(struct vi_info * vi,int flags)106 alloc_toepcb(struct vi_info *vi, int flags)
107 {
108 struct port_info *pi = vi->pi;
109 struct adapter *sc = pi->adapter;
110 struct toepcb *toep;
111 int tx_credits, txsd_total, len;
112
113 /*
114 * The firmware counts tx work request credits in units of 16 bytes
115 * each. Reserve room for an ABORT_REQ so the driver never has to worry
116 * about tx credits if it wants to abort a connection.
117 */
118 tx_credits = sc->params.ofldq_wr_cred;
119 tx_credits -= howmany(sizeof(struct cpl_abort_req), 16);
120
121 /*
122 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte
123 * immediate payload, and firmware counts tx work request credits in
124 * units of 16 byte. Calculate the maximum work requests possible.
125 */
126 txsd_total = tx_credits /
127 howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16);
128
129 len = offsetof(struct toepcb, txsd) +
130 txsd_total * sizeof(struct ofld_tx_sdesc);
131
132 toep = malloc(len, M_CXGBE, M_ZERO | flags);
133 if (toep == NULL)
134 return (NULL);
135
136 refcount_init(&toep->refcount, 1);
137 toep->td = sc->tom_softc;
138 toep->vi = vi;
139 toep->tid = -1;
140 toep->tx_total = tx_credits;
141 toep->tx_credits = tx_credits;
142 mbufq_init(&toep->ulp_pduq, INT_MAX);
143 mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX);
144 toep->txsd_total = txsd_total;
145 toep->txsd_avail = txsd_total;
146 toep->txsd_pidx = 0;
147 toep->txsd_cidx = 0;
148 aiotx_init_toep(toep);
149
150 return (toep);
151 }
152
153 /*
154 * Initialize a toepcb after its params have been filled out.
155 */
156 int
init_toepcb(struct vi_info * vi,struct toepcb * toep)157 init_toepcb(struct vi_info *vi, struct toepcb *toep)
158 {
159 struct conn_params *cp = &toep->params;
160 struct port_info *pi = vi->pi;
161 struct adapter *sc = pi->adapter;
162 struct tx_cl_rl_params *tc;
163
164 if (cp->tc_idx >= 0 && cp->tc_idx < sc->params.nsched_cls) {
165 tc = &pi->sched_params->cl_rl[cp->tc_idx];
166 mtx_lock(&sc->tc_lock);
167 if (tc->state != CS_HW_CONFIGURED) {
168 CH_ERR(vi, "tid %d cannot be bound to traffic class %d "
169 "because it is not configured (its state is %d)\n",
170 toep->tid, cp->tc_idx, tc->state);
171 cp->tc_idx = -1;
172 } else {
173 tc->refcount++;
174 }
175 mtx_unlock(&sc->tc_lock);
176 }
177 toep->ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
178 toep->ofld_rxq = &sc->sge.ofld_rxq[cp->rxq_idx];
179 toep->ctrlq = &sc->sge.ctrlq[pi->port_id];
180
181 tls_init_toep(toep);
182 MPASS(ulp_mode(toep) != ULP_MODE_TCPDDP);
183
184 toep->flags |= TPF_INITIALIZED;
185
186 return (0);
187 }
188
189 struct toepcb *
hold_toepcb(struct toepcb * toep)190 hold_toepcb(struct toepcb *toep)
191 {
192
193 refcount_acquire(&toep->refcount);
194 return (toep);
195 }
196
197 void
free_toepcb(struct toepcb * toep)198 free_toepcb(struct toepcb *toep)
199 {
200
201 if (refcount_release(&toep->refcount) == 0)
202 return;
203
204 KASSERT(!(toep->flags & TPF_ATTACHED),
205 ("%s: attached to an inpcb", __func__));
206 KASSERT(!(toep->flags & TPF_CPL_PENDING),
207 ("%s: CPL pending", __func__));
208
209 if (toep->flags & TPF_INITIALIZED) {
210 if (ulp_mode(toep) == ULP_MODE_TCPDDP)
211 ddp_uninit_toep(toep);
212 tls_uninit_toep(toep);
213 }
214 free(toep, M_CXGBE);
215 }
216
217 /*
218 * Set up the socket for TCP offload.
219 */
220 void
offload_socket(struct socket * so,struct toepcb * toep)221 offload_socket(struct socket *so, struct toepcb *toep)
222 {
223 struct tom_data *td = toep->td;
224 struct inpcb *inp = sotoinpcb(so);
225 struct tcpcb *tp = intotcpcb(inp);
226 struct sockbuf *sb;
227
228 INP_WLOCK_ASSERT(inp);
229
230 /* Update socket */
231 sb = &so->so_snd;
232 SOCKBUF_LOCK(sb);
233 sb->sb_flags |= SB_NOCOALESCE;
234 SOCKBUF_UNLOCK(sb);
235 sb = &so->so_rcv;
236 SOCKBUF_LOCK(sb);
237 sb->sb_flags |= SB_NOCOALESCE;
238 if (inp->inp_vflag & INP_IPV6)
239 so->so_proto = &toe6_protosw;
240 else
241 so->so_proto = &toe_protosw;
242 SOCKBUF_UNLOCK(sb);
243
244 /* Update TCP PCB */
245 tp->tod = &td->tod;
246 tp->t_toe = toep;
247 tp->t_flags |= TF_TOE;
248
249 /* Install an extra hold on inp */
250 toep->inp = inp;
251 toep->flags |= TPF_ATTACHED;
252 in_pcbref(inp);
253
254 /* Add the TOE PCB to the active list */
255 mtx_lock(&td->toep_list_lock);
256 TAILQ_INSERT_HEAD(&td->toep_list, toep, link);
257 mtx_unlock(&td->toep_list_lock);
258 }
259
260 void
restore_so_proto(struct socket * so,bool v6)261 restore_so_proto(struct socket *so, bool v6)
262 {
263 if (v6)
264 so->so_proto = &tcp6_protosw;
265 else
266 so->so_proto = &tcp_protosw;
267 }
268
269 /* This is _not_ the normal way to "unoffload" a socket. */
270 void
undo_offload_socket(struct socket * so)271 undo_offload_socket(struct socket *so)
272 {
273 struct inpcb *inp = sotoinpcb(so);
274 struct tcpcb *tp = intotcpcb(inp);
275 struct toepcb *toep = tp->t_toe;
276 struct tom_data *td = toep->td;
277 struct sockbuf *sb;
278
279 INP_WLOCK_ASSERT(inp);
280
281 sb = &so->so_snd;
282 SOCKBUF_LOCK(sb);
283 sb->sb_flags &= ~SB_NOCOALESCE;
284 SOCKBUF_UNLOCK(sb);
285 sb = &so->so_rcv;
286 SOCKBUF_LOCK(sb);
287 sb->sb_flags &= ~SB_NOCOALESCE;
288 restore_so_proto(so, inp->inp_vflag & INP_IPV6);
289 SOCKBUF_UNLOCK(sb);
290
291 tp->tod = NULL;
292 tp->t_toe = NULL;
293 tp->t_flags &= ~TF_TOE;
294
295 toep->inp = NULL;
296 toep->flags &= ~TPF_ATTACHED;
297 if (in_pcbrele_wlocked(inp))
298 panic("%s: inp freed.", __func__);
299
300 mtx_lock(&td->toep_list_lock);
301 TAILQ_REMOVE(&td->toep_list, toep, link);
302 mtx_unlock(&td->toep_list_lock);
303 }
304
305 static void
release_offload_resources(struct toepcb * toep)306 release_offload_resources(struct toepcb *toep)
307 {
308 struct tom_data *td = toep->td;
309 struct adapter *sc = td_adapter(td);
310 int tid = toep->tid;
311
312 KASSERT(!(toep->flags & TPF_CPL_PENDING),
313 ("%s: %p has CPL pending.", __func__, toep));
314 KASSERT(!(toep->flags & TPF_ATTACHED),
315 ("%s: %p is still attached.", __func__, toep));
316
317 CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)",
318 __func__, toep, tid, toep->l2te, toep->ce);
319
320 /*
321 * These queues should have been emptied at approximately the same time
322 * that a normal connection's socket's so_snd would have been purged or
323 * drained. Do _not_ clean up here.
324 */
325 MPASS(mbufq_empty(&toep->ulp_pduq));
326 MPASS(mbufq_empty(&toep->ulp_pdu_reclaimq));
327 #ifdef INVARIANTS
328 if (ulp_mode(toep) == ULP_MODE_TCPDDP)
329 ddp_assert_empty(toep);
330 #endif
331 MPASS(TAILQ_EMPTY(&toep->aiotx_jobq));
332
333 if (toep->l2te)
334 t4_l2t_release(toep->l2te);
335
336 if (tid >= 0) {
337 remove_tid(sc, tid, toep->ce ? 2 : 1);
338 release_tid(sc, tid, toep->ctrlq);
339 }
340
341 if (toep->ce)
342 t4_release_clip_entry(sc, toep->ce);
343
344 if (toep->params.tc_idx != -1)
345 t4_release_cl_rl(sc, toep->vi->pi->port_id, toep->params.tc_idx);
346
347 mtx_lock(&td->toep_list_lock);
348 TAILQ_REMOVE(&td->toep_list, toep, link);
349 mtx_unlock(&td->toep_list_lock);
350
351 free_toepcb(toep);
352 }
353
354 /*
355 * The kernel is done with the TCP PCB and this is our opportunity to unhook the
356 * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no
357 * pending CPL) then it is time to release all resources tied to the toepcb.
358 *
359 * Also gets called when an offloaded active open fails and the TOM wants the
360 * kernel to take the TCP PCB back.
361 */
362 static void
t4_pcb_detach(struct toedev * tod __unused,struct tcpcb * tp)363 t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp)
364 {
365 #if defined(KTR) || defined(INVARIANTS)
366 struct inpcb *inp = tptoinpcb(tp);
367 #endif
368 struct toepcb *toep = tp->t_toe;
369
370 INP_WLOCK_ASSERT(inp);
371
372 KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
373 KASSERT(toep->flags & TPF_ATTACHED,
374 ("%s: not attached", __func__));
375
376 #ifdef KTR
377 if (tp->t_state == TCPS_SYN_SENT) {
378 CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)",
379 __func__, toep->tid, toep, toep->flags, inp,
380 inp->inp_flags);
381 } else {
382 CTR6(KTR_CXGBE,
383 "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)",
384 toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp,
385 inp->inp_flags);
386 }
387 #endif
388
389 tp->tod = NULL;
390 tp->t_toe = NULL;
391 tp->t_flags &= ~TF_TOE;
392 toep->flags &= ~TPF_ATTACHED;
393
394 if (!(toep->flags & TPF_CPL_PENDING))
395 release_offload_resources(toep);
396 }
397
398 /*
399 * setsockopt handler.
400 */
401 static void
t4_ctloutput(struct toedev * tod,struct tcpcb * tp,int dir,int name)402 t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name)
403 {
404 struct adapter *sc = tod->tod_softc;
405 struct toepcb *toep = tp->t_toe;
406
407 if (dir == SOPT_GET)
408 return;
409
410 CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name);
411
412 switch (name) {
413 case TCP_NODELAY:
414 if (tp->t_state != TCPS_ESTABLISHED)
415 break;
416 toep->params.nagle = tp->t_flags & TF_NODELAY ? 0 : 1;
417 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS,
418 V_TF_NAGLE(1), V_TF_NAGLE(toep->params.nagle), 0, 0);
419 break;
420 default:
421 break;
422 }
423 }
424
425 static inline uint64_t
get_tcb_tflags(const uint64_t * tcb)426 get_tcb_tflags(const uint64_t *tcb)
427 {
428
429 return ((be64toh(tcb[14]) << 32) | (be64toh(tcb[15]) >> 32));
430 }
431
432 static inline uint32_t
get_tcb_field(const uint64_t * tcb,u_int word,uint32_t mask,u_int shift)433 get_tcb_field(const uint64_t *tcb, u_int word, uint32_t mask, u_int shift)
434 {
435 #define LAST_WORD ((TCB_SIZE / 4) - 1)
436 uint64_t t1, t2;
437 int flit_idx;
438
439 MPASS(mask != 0);
440 MPASS(word <= LAST_WORD);
441 MPASS(shift < 32);
442
443 flit_idx = (LAST_WORD - word) / 2;
444 if (word & 0x1)
445 shift += 32;
446 t1 = be64toh(tcb[flit_idx]) >> shift;
447 t2 = 0;
448 if (fls(mask) > 64 - shift) {
449 /*
450 * Will spill over into the next logical flit, which is the flit
451 * before this one. The flit_idx before this one must be valid.
452 */
453 MPASS(flit_idx > 0);
454 t2 = be64toh(tcb[flit_idx - 1]) << (64 - shift);
455 }
456 return ((t2 | t1) & mask);
457 #undef LAST_WORD
458 }
459 #define GET_TCB_FIELD(tcb, F) \
460 get_tcb_field(tcb, W_TCB_##F, M_TCB_##F, S_TCB_##F)
461
462 /*
463 * Issues a CPL_GET_TCB to read the entire TCB for the tid.
464 */
465 static int
send_get_tcb(struct adapter * sc,u_int tid)466 send_get_tcb(struct adapter *sc, u_int tid)
467 {
468 struct cpl_get_tcb *cpl;
469 struct wrq_cookie cookie;
470
471 MPASS(tid >= sc->tids.tid_base);
472 MPASS(tid - sc->tids.tid_base < sc->tids.ntids);
473
474 cpl = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*cpl), 16),
475 &cookie);
476 if (__predict_false(cpl == NULL))
477 return (ENOMEM);
478 bzero(cpl, sizeof(*cpl));
479 INIT_TP_WR(cpl, tid);
480 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_GET_TCB, tid));
481 cpl->reply_ctrl = htobe16(V_REPLY_CHAN(0) |
482 V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id));
483 cpl->cookie = 0xff;
484 commit_wrq_wr(&sc->sge.ctrlq[0], cpl, &cookie);
485
486 return (0);
487 }
488
489 static struct tcb_histent *
alloc_tcb_histent(struct adapter * sc,u_int tid,int flags)490 alloc_tcb_histent(struct adapter *sc, u_int tid, int flags)
491 {
492 struct tcb_histent *te;
493
494 MPASS(flags == M_NOWAIT || flags == M_WAITOK);
495
496 te = malloc(sizeof(*te), M_CXGBE, M_ZERO | flags);
497 if (te == NULL)
498 return (NULL);
499 mtx_init(&te->te_lock, "TCB entry", NULL, MTX_DEF);
500 callout_init_mtx(&te->te_callout, &te->te_lock, 0);
501 te->te_adapter = sc;
502 te->te_tid = tid;
503
504 return (te);
505 }
506
507 static void
free_tcb_histent(struct tcb_histent * te)508 free_tcb_histent(struct tcb_histent *te)
509 {
510
511 mtx_destroy(&te->te_lock);
512 free(te, M_CXGBE);
513 }
514
515 /*
516 * Start tracking the tid in the TCB history.
517 */
518 int
add_tid_to_history(struct adapter * sc,u_int tid)519 add_tid_to_history(struct adapter *sc, u_int tid)
520 {
521 struct tcb_histent *te = NULL;
522 struct tom_data *td = sc->tom_softc;
523 int rc;
524
525 MPASS(tid >= sc->tids.tid_base);
526 MPASS(tid - sc->tids.tid_base < sc->tids.ntids);
527
528 if (td->tcb_history == NULL)
529 return (ENXIO);
530
531 rw_wlock(&td->tcb_history_lock);
532 if (td->tcb_history[tid] != NULL) {
533 rc = EEXIST;
534 goto done;
535 }
536 te = alloc_tcb_histent(sc, tid, M_NOWAIT);
537 if (te == NULL) {
538 rc = ENOMEM;
539 goto done;
540 }
541 mtx_lock(&te->te_lock);
542 rc = send_get_tcb(sc, tid);
543 if (rc == 0) {
544 te->te_flags |= TE_RPL_PENDING;
545 td->tcb_history[tid] = te;
546 } else {
547 free(te, M_CXGBE);
548 }
549 mtx_unlock(&te->te_lock);
550 done:
551 rw_wunlock(&td->tcb_history_lock);
552 return (rc);
553 }
554
555 static void
remove_tcb_histent(struct tcb_histent * te)556 remove_tcb_histent(struct tcb_histent *te)
557 {
558 struct adapter *sc = te->te_adapter;
559 struct tom_data *td = sc->tom_softc;
560
561 rw_assert(&td->tcb_history_lock, RA_WLOCKED);
562 mtx_assert(&te->te_lock, MA_OWNED);
563 MPASS(td->tcb_history[te->te_tid] == te);
564
565 td->tcb_history[te->te_tid] = NULL;
566 free_tcb_histent(te);
567 rw_wunlock(&td->tcb_history_lock);
568 }
569
570 static inline struct tcb_histent *
lookup_tcb_histent(struct adapter * sc,u_int tid,bool addrem)571 lookup_tcb_histent(struct adapter *sc, u_int tid, bool addrem)
572 {
573 struct tcb_histent *te;
574 struct tom_data *td = sc->tom_softc;
575
576 MPASS(tid >= sc->tids.tid_base);
577 MPASS(tid - sc->tids.tid_base < sc->tids.ntids);
578
579 if (td->tcb_history == NULL)
580 return (NULL);
581
582 if (addrem)
583 rw_wlock(&td->tcb_history_lock);
584 else
585 rw_rlock(&td->tcb_history_lock);
586 te = td->tcb_history[tid];
587 if (te != NULL) {
588 mtx_lock(&te->te_lock);
589 return (te); /* with both locks held */
590 }
591 if (addrem)
592 rw_wunlock(&td->tcb_history_lock);
593 else
594 rw_runlock(&td->tcb_history_lock);
595
596 return (te);
597 }
598
599 static inline void
release_tcb_histent(struct tcb_histent * te)600 release_tcb_histent(struct tcb_histent *te)
601 {
602 struct adapter *sc = te->te_adapter;
603 struct tom_data *td = sc->tom_softc;
604
605 mtx_assert(&te->te_lock, MA_OWNED);
606 mtx_unlock(&te->te_lock);
607 rw_assert(&td->tcb_history_lock, RA_RLOCKED);
608 rw_runlock(&td->tcb_history_lock);
609 }
610
611 static void
request_tcb(void * arg)612 request_tcb(void *arg)
613 {
614 struct tcb_histent *te = arg;
615
616 mtx_assert(&te->te_lock, MA_OWNED);
617
618 /* Noone else is supposed to update the histent. */
619 MPASS(!(te->te_flags & TE_RPL_PENDING));
620 if (send_get_tcb(te->te_adapter, te->te_tid) == 0)
621 te->te_flags |= TE_RPL_PENDING;
622 else
623 callout_schedule(&te->te_callout, hz / 100);
624 }
625
626 static void
update_tcb_histent(struct tcb_histent * te,const uint64_t * tcb)627 update_tcb_histent(struct tcb_histent *te, const uint64_t *tcb)
628 {
629 struct tom_data *td = te->te_adapter->tom_softc;
630 uint64_t tflags = get_tcb_tflags(tcb);
631 uint8_t sample = 0;
632
633 if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != GET_TCB_FIELD(tcb, SND_UNA_RAW)) {
634 if (GET_TCB_FIELD(tcb, T_RXTSHIFT) != 0)
635 sample |= TS_RTO;
636 if (GET_TCB_FIELD(tcb, T_DUPACKS) != 0)
637 sample |= TS_DUPACKS;
638 if (GET_TCB_FIELD(tcb, T_DUPACKS) >= td->dupack_threshold)
639 sample |= TS_FASTREXMT;
640 }
641
642 if (GET_TCB_FIELD(tcb, SND_MAX_RAW) != 0) {
643 uint32_t snd_wnd;
644
645 sample |= TS_SND_BACKLOGGED; /* for whatever reason. */
646
647 snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV);
648 if (tflags & V_TF_RECV_SCALE(1))
649 snd_wnd <<= GET_TCB_FIELD(tcb, RCV_SCALE);
650 if (GET_TCB_FIELD(tcb, SND_CWND) < snd_wnd)
651 sample |= TS_CWND_LIMITED; /* maybe due to CWND */
652 }
653
654 if (tflags & V_TF_CCTRL_ECN(1)) {
655
656 /*
657 * CE marker on incoming IP hdr, echoing ECE back in the TCP
658 * hdr. Indicates congestion somewhere on the way from the peer
659 * to this node.
660 */
661 if (tflags & V_TF_CCTRL_ECE(1))
662 sample |= TS_ECN_ECE;
663
664 /*
665 * ECE seen and CWR sent (or about to be sent). Might indicate
666 * congestion on the way to the peer. This node is reducing its
667 * congestion window in response.
668 */
669 if (tflags & (V_TF_CCTRL_CWR(1) | V_TF_CCTRL_RFR(1)))
670 sample |= TS_ECN_CWR;
671 }
672
673 te->te_sample[te->te_pidx] = sample;
674 if (++te->te_pidx == nitems(te->te_sample))
675 te->te_pidx = 0;
676 memcpy(te->te_tcb, tcb, TCB_SIZE);
677 te->te_flags |= TE_ACTIVE;
678 }
679
680 static int
do_get_tcb_rpl(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)681 do_get_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
682 {
683 struct adapter *sc = iq->adapter;
684 const struct cpl_get_tcb_rpl *cpl = mtod(m, const void *);
685 const uint64_t *tcb = (const uint64_t *)(const void *)(cpl + 1);
686 struct tcb_histent *te;
687 const u_int tid = GET_TID(cpl);
688 bool remove;
689
690 remove = GET_TCB_FIELD(tcb, T_STATE) == TCPS_CLOSED;
691 te = lookup_tcb_histent(sc, tid, remove);
692 if (te == NULL) {
693 /* Not in the history. Who issued the GET_TCB for this? */
694 device_printf(sc->dev, "tcb %u: flags 0x%016jx, state %u, "
695 "srtt %u, sscale %u, rscale %u, cookie 0x%x\n", tid,
696 (uintmax_t)get_tcb_tflags(tcb), GET_TCB_FIELD(tcb, T_STATE),
697 GET_TCB_FIELD(tcb, T_SRTT), GET_TCB_FIELD(tcb, SND_SCALE),
698 GET_TCB_FIELD(tcb, RCV_SCALE), cpl->cookie);
699 goto done;
700 }
701
702 MPASS(te->te_flags & TE_RPL_PENDING);
703 te->te_flags &= ~TE_RPL_PENDING;
704 if (remove) {
705 remove_tcb_histent(te);
706 } else {
707 update_tcb_histent(te, tcb);
708 callout_reset(&te->te_callout, hz / 10, request_tcb, te);
709 release_tcb_histent(te);
710 }
711 done:
712 m_freem(m);
713 return (0);
714 }
715
716 static void
fill_tcp_info_from_tcb(struct adapter * sc,uint64_t * tcb,struct tcp_info * ti)717 fill_tcp_info_from_tcb(struct adapter *sc, uint64_t *tcb, struct tcp_info *ti)
718 {
719 uint32_t v;
720
721 ti->tcpi_state = GET_TCB_FIELD(tcb, T_STATE);
722
723 v = GET_TCB_FIELD(tcb, T_SRTT);
724 ti->tcpi_rtt = tcp_ticks_to_us(sc, v);
725
726 v = GET_TCB_FIELD(tcb, T_RTTVAR);
727 ti->tcpi_rttvar = tcp_ticks_to_us(sc, v);
728
729 ti->tcpi_snd_ssthresh = GET_TCB_FIELD(tcb, SND_SSTHRESH);
730 ti->tcpi_snd_cwnd = GET_TCB_FIELD(tcb, SND_CWND);
731 ti->tcpi_rcv_nxt = GET_TCB_FIELD(tcb, RCV_NXT);
732 ti->tcpi_rcv_adv = GET_TCB_FIELD(tcb, RCV_ADV);
733 ti->tcpi_dupacks = GET_TCB_FIELD(tcb, T_DUPACKS);
734
735 v = GET_TCB_FIELD(tcb, TX_MAX);
736 ti->tcpi_snd_nxt = v - GET_TCB_FIELD(tcb, SND_NXT_RAW);
737 ti->tcpi_snd_una = v - GET_TCB_FIELD(tcb, SND_UNA_RAW);
738 ti->tcpi_snd_max = v - GET_TCB_FIELD(tcb, SND_MAX_RAW);
739
740 /* Receive window being advertised by us. */
741 ti->tcpi_rcv_wscale = GET_TCB_FIELD(tcb, SND_SCALE); /* Yes, SND. */
742 ti->tcpi_rcv_space = GET_TCB_FIELD(tcb, RCV_WND);
743
744 /* Send window */
745 ti->tcpi_snd_wscale = GET_TCB_FIELD(tcb, RCV_SCALE); /* Yes, RCV. */
746 ti->tcpi_snd_wnd = GET_TCB_FIELD(tcb, RCV_ADV);
747 if (get_tcb_tflags(tcb) & V_TF_RECV_SCALE(1))
748 ti->tcpi_snd_wnd <<= ti->tcpi_snd_wscale;
749 else
750 ti->tcpi_snd_wscale = 0;
751
752 }
753
754 static void
fill_tcp_info_from_history(struct adapter * sc,struct tcb_histent * te,struct tcp_info * ti)755 fill_tcp_info_from_history(struct adapter *sc, struct tcb_histent *te,
756 struct tcp_info *ti)
757 {
758
759 fill_tcp_info_from_tcb(sc, te->te_tcb, ti);
760 }
761
762 /*
763 * Reads the TCB for the given tid using a memory window and copies it to 'buf'
764 * in the same format as CPL_GET_TCB_RPL.
765 */
766 static void
read_tcb_using_memwin(struct adapter * sc,u_int tid,uint64_t * buf)767 read_tcb_using_memwin(struct adapter *sc, u_int tid, uint64_t *buf)
768 {
769 int i, j, k, rc;
770 uint32_t addr;
771 u_char *tcb, tmp;
772
773 MPASS(tid >= sc->tids.tid_base);
774 MPASS(tid - sc->tids.tid_base < sc->tids.ntids);
775
776 addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
777 rc = read_via_memwin(sc, 2, addr, (uint32_t *)buf, TCB_SIZE);
778 if (rc != 0)
779 return;
780
781 tcb = (u_char *)buf;
782 for (i = 0, j = TCB_SIZE - 16; i < j; i += 16, j -= 16) {
783 for (k = 0; k < 16; k++) {
784 tmp = tcb[i + k];
785 tcb[i + k] = tcb[j + k];
786 tcb[j + k] = tmp;
787 }
788 }
789 }
790
791 static void
fill_tcp_info(struct adapter * sc,u_int tid,struct tcp_info * ti)792 fill_tcp_info(struct adapter *sc, u_int tid, struct tcp_info *ti)
793 {
794 uint64_t tcb[TCB_SIZE / sizeof(uint64_t)];
795 struct tcb_histent *te;
796
797 ti->tcpi_toe_tid = tid;
798 te = lookup_tcb_histent(sc, tid, false);
799 if (te != NULL) {
800 fill_tcp_info_from_history(sc, te, ti);
801 release_tcb_histent(te);
802 } else {
803 if (!(sc->debug_flags & DF_DISABLE_TCB_CACHE)) {
804 /* XXX: tell firmware to flush TCB cache. */
805 }
806 read_tcb_using_memwin(sc, tid, tcb);
807 fill_tcp_info_from_tcb(sc, tcb, ti);
808 }
809 }
810
811 /*
812 * Called by the kernel to allow the TOE driver to "refine" values filled up in
813 * the tcp_info for an offloaded connection.
814 */
815 static void
t4_tcp_info(struct toedev * tod,const struct tcpcb * tp,struct tcp_info * ti)816 t4_tcp_info(struct toedev *tod, const struct tcpcb *tp, struct tcp_info *ti)
817 {
818 struct adapter *sc = tod->tod_softc;
819 struct toepcb *toep = tp->t_toe;
820
821 INP_LOCK_ASSERT(tptoinpcb(tp));
822 MPASS(ti != NULL);
823
824 fill_tcp_info(sc, toep->tid, ti);
825 }
826
827 #ifdef KERN_TLS
828 static int
t4_alloc_tls_session(struct toedev * tod,struct tcpcb * tp,struct ktls_session * tls,int direction)829 t4_alloc_tls_session(struct toedev *tod, struct tcpcb *tp,
830 struct ktls_session *tls, int direction)
831 {
832 struct toepcb *toep = tp->t_toe;
833
834 INP_WLOCK_ASSERT(tptoinpcb(tp));
835 MPASS(tls != NULL);
836
837 return (tls_alloc_ktls(toep, tls, direction));
838 }
839 #endif
840
841 static void
send_mss_flowc_wr(struct adapter * sc,struct toepcb * toep)842 send_mss_flowc_wr(struct adapter *sc, struct toepcb *toep)
843 {
844 struct wrq_cookie cookie;
845 struct fw_flowc_wr *flowc;
846 struct ofld_tx_sdesc *txsd;
847 const int flowclen = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval);
848 const int flowclen16 = howmany(flowclen, 16);
849
850 if (toep->tx_credits < flowclen16 || toep->txsd_avail == 0) {
851 CH_ERR(sc, "%s: tid %u out of tx credits (%d, %d).\n", __func__,
852 toep->tid, toep->tx_credits, toep->txsd_avail);
853 return;
854 }
855
856 flowc = start_wrq_wr(&toep->ofld_txq->wrq, flowclen16, &cookie);
857 if (__predict_false(flowc == NULL)) {
858 CH_ERR(sc, "ENOMEM in %s for tid %u.\n", __func__, toep->tid);
859 return;
860 }
861 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
862 V_FW_FLOWC_WR_NPARAMS(1));
863 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(flowclen16) |
864 V_FW_WR_FLOWID(toep->tid));
865 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_MSS;
866 flowc->mnemval[0].val = htobe32(toep->params.emss);
867
868 txsd = &toep->txsd[toep->txsd_pidx];
869 txsd->tx_credits = flowclen16;
870 txsd->plen = 0;
871 toep->tx_credits -= txsd->tx_credits;
872 if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
873 toep->txsd_pidx = 0;
874 toep->txsd_avail--;
875 commit_wrq_wr(&toep->ofld_txq->wrq, flowc, &cookie);
876 }
877
878 static void
t4_pmtu_update(struct toedev * tod,struct tcpcb * tp,tcp_seq seq,int mtu)879 t4_pmtu_update(struct toedev *tod, struct tcpcb *tp, tcp_seq seq, int mtu)
880 {
881 struct work_request_hdr *wrh;
882 struct ulp_txpkt *ulpmc;
883 int idx, len;
884 struct wrq_cookie cookie;
885 struct inpcb *inp = tptoinpcb(tp);
886 struct toepcb *toep = tp->t_toe;
887 struct adapter *sc = td_adapter(toep->td);
888 unsigned short *mtus = &sc->params.mtus[0];
889
890 INP_WLOCK_ASSERT(inp);
891 MPASS(mtu > 0); /* kernel is supposed to provide something usable. */
892
893 /* tp->snd_una and snd_max are in host byte order too. */
894 seq = be32toh(seq);
895
896 CTR6(KTR_CXGBE, "%s: tid %d, seq 0x%08x, mtu %u, mtu_idx %u (%d)",
897 __func__, toep->tid, seq, mtu, toep->params.mtu_idx,
898 mtus[toep->params.mtu_idx]);
899
900 if (ulp_mode(toep) == ULP_MODE_NONE && /* XXX: Read TCB otherwise? */
901 (SEQ_LT(seq, tp->snd_una) || SEQ_GEQ(seq, tp->snd_max))) {
902 CTR5(KTR_CXGBE,
903 "%s: tid %d, seq 0x%08x not in range [0x%08x, 0x%08x).",
904 __func__, toep->tid, seq, tp->snd_una, tp->snd_max);
905 return;
906 }
907
908 /* Find the best mtu_idx for the suggested MTU. */
909 for (idx = 0; idx < NMTUS - 1 && mtus[idx + 1] <= mtu; idx++)
910 continue;
911 if (idx >= toep->params.mtu_idx)
912 return; /* Never increase the PMTU (just like the kernel). */
913
914 /*
915 * We'll send a compound work request with 2 SET_TCB_FIELDs -- the first
916 * one updates the mtu_idx and the second one triggers a retransmit.
917 */
918 len = sizeof(*wrh) + 2 * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
919 wrh = start_wrq_wr(toep->ctrlq, howmany(len, 16), &cookie);
920 if (wrh == NULL) {
921 CH_ERR(sc, "failed to change mtu_idx of tid %d (%u -> %u).\n",
922 toep->tid, toep->params.mtu_idx, idx);
923 return;
924 }
925 INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */
926 ulpmc = (struct ulp_txpkt *)(wrh + 1);
927 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_MAXSEG,
928 V_TCB_T_MAXSEG(M_TCB_T_MAXSEG), V_TCB_T_MAXSEG(idx));
929 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_TIMESTAMP,
930 V_TCB_TIMESTAMP(0x7FFFFULL << 11), 0);
931 commit_wrq_wr(toep->ctrlq, wrh, &cookie);
932
933 /* Update the software toepcb and tcpcb. */
934 toep->params.mtu_idx = idx;
935 tp->t_maxseg = mtus[toep->params.mtu_idx];
936 if (inp->inp_inc.inc_flags & INC_ISIPV6)
937 tp->t_maxseg -= sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
938 else
939 tp->t_maxseg -= sizeof(struct ip) + sizeof(struct tcphdr);
940 toep->params.emss = tp->t_maxseg;
941 if (tp->t_flags & TF_RCVD_TSTMP)
942 toep->params.emss -= TCPOLEN_TSTAMP_APPA;
943
944 /* Update the firmware flowc. */
945 send_mss_flowc_wr(sc, toep);
946
947 /* Update the MTU in the kernel's hostcache. */
948 if (sc->tt.update_hc_on_pmtu_change != 0) {
949 struct in_conninfo inc = {0};
950
951 inc.inc_fibnum = inp->inp_inc.inc_fibnum;
952 if (inp->inp_inc.inc_flags & INC_ISIPV6) {
953 inc.inc_flags |= INC_ISIPV6;
954 inc.inc6_faddr = inp->inp_inc.inc6_faddr;
955 } else {
956 inc.inc_faddr = inp->inp_inc.inc_faddr;
957 }
958 tcp_hc_updatemtu(&inc, mtu);
959 }
960
961 CTR6(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u), t_maxseg %u, emss %u",
962 __func__, toep->tid, toep->params.mtu_idx,
963 mtus[toep->params.mtu_idx], tp->t_maxseg, toep->params.emss);
964 }
965
966 /*
967 * The TOE driver will not receive any more CPLs for the tid associated with the
968 * toepcb; release the hold on the inpcb.
969 */
970 void
final_cpl_received(struct toepcb * toep)971 final_cpl_received(struct toepcb *toep)
972 {
973 struct inpcb *inp = toep->inp;
974 bool need_wakeup;
975
976 KASSERT(inp != NULL, ("%s: inp is NULL", __func__));
977 INP_WLOCK_ASSERT(inp);
978 KASSERT(toep->flags & TPF_CPL_PENDING,
979 ("%s: CPL not pending already?", __func__));
980
981 CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)",
982 __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags);
983
984 if (ulp_mode(toep) == ULP_MODE_TCPDDP)
985 release_ddp_resources(toep);
986 toep->inp = NULL;
987 need_wakeup = (toep->flags & TPF_WAITING_FOR_FINAL) != 0;
988 toep->flags &= ~(TPF_CPL_PENDING | TPF_WAITING_FOR_FINAL);
989 mbufq_drain(&toep->ulp_pduq);
990 mbufq_drain(&toep->ulp_pdu_reclaimq);
991
992 if (!(toep->flags & TPF_ATTACHED))
993 release_offload_resources(toep);
994
995 if (!in_pcbrele_wlocked(inp))
996 INP_WUNLOCK(inp);
997
998 if (need_wakeup) {
999 struct mtx *lock = mtx_pool_find(mtxpool_sleep, toep);
1000
1001 mtx_lock(lock);
1002 wakeup(toep);
1003 mtx_unlock(lock);
1004 }
1005 }
1006
1007 void
insert_tid(struct adapter * sc,int tid,void * ctx,int ntids)1008 insert_tid(struct adapter *sc, int tid, void *ctx, int ntids)
1009 {
1010 struct tid_info *t = &sc->tids;
1011
1012 MPASS(tid >= t->tid_base);
1013 MPASS(tid - t->tid_base < t->ntids);
1014
1015 t->tid_tab[tid - t->tid_base] = ctx;
1016 atomic_add_int(&t->tids_in_use, ntids);
1017 }
1018
1019 void *
lookup_tid(struct adapter * sc,int tid)1020 lookup_tid(struct adapter *sc, int tid)
1021 {
1022 struct tid_info *t = &sc->tids;
1023
1024 return (t->tid_tab[tid - t->tid_base]);
1025 }
1026
1027 void
update_tid(struct adapter * sc,int tid,void * ctx)1028 update_tid(struct adapter *sc, int tid, void *ctx)
1029 {
1030 struct tid_info *t = &sc->tids;
1031
1032 t->tid_tab[tid - t->tid_base] = ctx;
1033 }
1034
1035 void
remove_tid(struct adapter * sc,int tid,int ntids)1036 remove_tid(struct adapter *sc, int tid, int ntids)
1037 {
1038 struct tid_info *t = &sc->tids;
1039
1040 t->tid_tab[tid - t->tid_base] = NULL;
1041 atomic_subtract_int(&t->tids_in_use, ntids);
1042 }
1043
1044 /*
1045 * What mtu_idx to use, given a 4-tuple. Note that both s->mss and tcp_mssopt
1046 * have the MSS that we should advertise in our SYN. Advertised MSS doesn't
1047 * account for any TCP options so the effective MSS (only payload, no headers or
1048 * options) could be different.
1049 */
1050 static int
find_best_mtu_idx(struct adapter * sc,struct in_conninfo * inc,struct offload_settings * s)1051 find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc,
1052 struct offload_settings *s)
1053 {
1054 unsigned short *mtus = &sc->params.mtus[0];
1055 int i, mss, mtu;
1056
1057 MPASS(inc != NULL);
1058
1059 mss = s->mss > 0 ? s->mss : tcp_mssopt(inc);
1060 if (inc->inc_flags & INC_ISIPV6)
1061 mtu = mss + sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
1062 else
1063 mtu = mss + sizeof(struct ip) + sizeof(struct tcphdr);
1064
1065 for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mtu; i++)
1066 continue;
1067
1068 return (i);
1069 }
1070
1071 /*
1072 * Determine the receive window size for a socket.
1073 */
1074 u_long
select_rcv_wnd(struct socket * so)1075 select_rcv_wnd(struct socket *so)
1076 {
1077 unsigned long wnd;
1078
1079 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1080
1081 wnd = sbspace(&so->so_rcv);
1082 if (wnd < MIN_RCV_WND)
1083 wnd = MIN_RCV_WND;
1084
1085 return min(wnd, MAX_RCV_WND);
1086 }
1087
1088 int
select_rcv_wscale(void)1089 select_rcv_wscale(void)
1090 {
1091 int wscale = 0;
1092 unsigned long space = sb_max;
1093
1094 if (space > MAX_RCV_WND)
1095 space = MAX_RCV_WND;
1096
1097 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space)
1098 wscale++;
1099
1100 return (wscale);
1101 }
1102
1103 __be64
calc_options0(struct vi_info * vi,struct conn_params * cp)1104 calc_options0(struct vi_info *vi, struct conn_params *cp)
1105 {
1106 uint64_t opt0 = 0;
1107
1108 opt0 |= F_TCAM_BYPASS;
1109
1110 MPASS(cp->wscale >= 0 && cp->wscale <= M_WND_SCALE);
1111 opt0 |= V_WND_SCALE(cp->wscale);
1112
1113 MPASS(cp->mtu_idx >= 0 && cp->mtu_idx < NMTUS);
1114 opt0 |= V_MSS_IDX(cp->mtu_idx);
1115
1116 MPASS(cp->ulp_mode >= 0 && cp->ulp_mode <= M_ULP_MODE);
1117 opt0 |= V_ULP_MODE(cp->ulp_mode);
1118
1119 MPASS(cp->opt0_bufsize >= 0 && cp->opt0_bufsize <= M_RCV_BUFSIZ);
1120 opt0 |= V_RCV_BUFSIZ(cp->opt0_bufsize);
1121
1122 MPASS(cp->l2t_idx >= 0 && cp->l2t_idx < vi->adapter->vres.l2t.size);
1123 opt0 |= V_L2T_IDX(cp->l2t_idx);
1124
1125 opt0 |= V_SMAC_SEL(vi->smt_idx);
1126 opt0 |= V_TX_CHAN(vi->pi->tx_chan);
1127
1128 MPASS(cp->keepalive == 0 || cp->keepalive == 1);
1129 opt0 |= V_KEEP_ALIVE(cp->keepalive);
1130
1131 MPASS(cp->nagle == 0 || cp->nagle == 1);
1132 opt0 |= V_NAGLE(cp->nagle);
1133
1134 return (htobe64(opt0));
1135 }
1136
1137 __be32
calc_options2(struct vi_info * vi,struct conn_params * cp)1138 calc_options2(struct vi_info *vi, struct conn_params *cp)
1139 {
1140 uint32_t opt2 = 0;
1141 struct port_info *pi = vi->pi;
1142 struct adapter *sc = pi->adapter;
1143
1144 /*
1145 * rx flow control, rx coalesce, congestion control, and tx pace are all
1146 * explicitly set by the driver. On T5+ the ISS is also set by the
1147 * driver to the value picked by the kernel.
1148 */
1149 if (is_t4(sc)) {
1150 opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID;
1151 opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID;
1152 } else {
1153 opt2 |= F_T5_OPT_2_VALID; /* all 4 valid */
1154 opt2 |= F_T5_ISS; /* ISS provided in CPL */
1155 }
1156
1157 MPASS(cp->sack == 0 || cp->sack == 1);
1158 opt2 |= V_SACK_EN(cp->sack);
1159
1160 MPASS(cp->tstamp == 0 || cp->tstamp == 1);
1161 opt2 |= V_TSTAMPS_EN(cp->tstamp);
1162
1163 if (cp->wscale > 0)
1164 opt2 |= F_WND_SCALE_EN;
1165
1166 MPASS(cp->ecn == 0 || cp->ecn == 1);
1167 opt2 |= V_CCTRL_ECN(cp->ecn);
1168
1169 opt2 |= V_TX_QUEUE(TX_MODQ(pi->tx_chan));
1170 opt2 |= V_PACE(0);
1171 opt2 |= F_RSS_QUEUE_VALID;
1172 opt2 |= V_RSS_QUEUE(sc->sge.ofld_rxq[cp->rxq_idx].iq.abs_id);
1173 if (chip_id(sc) <= CHELSIO_T6) {
1174 MPASS(pi->rx_chan == 0 || pi->rx_chan == 1);
1175 opt2 |= V_RX_CHANNEL(pi->rx_chan);
1176 }
1177
1178 MPASS(cp->cong_algo >= 0 && cp->cong_algo <= M_CONG_CNTRL);
1179 opt2 |= V_CONG_CNTRL(cp->cong_algo);
1180
1181 MPASS(cp->rx_coalesce == 0 || cp->rx_coalesce == 1);
1182 if (cp->rx_coalesce == 1)
1183 opt2 |= V_RX_COALESCE(M_RX_COALESCE);
1184
1185 opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0);
1186 MPASS(cp->ulp_mode != ULP_MODE_TCPDDP);
1187
1188 return (htobe32(opt2));
1189 }
1190
1191 uint64_t
select_ntuple(struct vi_info * vi,struct l2t_entry * e)1192 select_ntuple(struct vi_info *vi, struct l2t_entry *e)
1193 {
1194 struct adapter *sc = vi->adapter;
1195 struct tp_params *tp = &sc->params.tp;
1196 uint64_t ntuple = 0;
1197
1198 /*
1199 * Initialize each of the fields which we care about which are present
1200 * in the Compressed Filter Tuple.
1201 */
1202 if (tp->vlan_shift >= 0 && EVL_VLANOFTAG(e->vlan) != CPL_L2T_VLAN_NONE)
1203 ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift;
1204
1205 if (tp->port_shift >= 0)
1206 ntuple |= (uint64_t)e->lport << tp->port_shift;
1207
1208 if (tp->protocol_shift >= 0)
1209 ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
1210
1211 if (tp->vnic_shift >= 0 && tp->vnic_mode == FW_VNIC_MODE_PF_VF) {
1212 ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vi->vin) |
1213 V_FT_VNID_ID_PF(sc->pf) | V_FT_VNID_ID_VLD(vi->vfvld)) <<
1214 tp->vnic_shift;
1215 }
1216
1217 if (is_t4(sc))
1218 return (htobe32((uint32_t)ntuple));
1219 else
1220 return (htobe64(V_FILTER_TUPLE(ntuple)));
1221 }
1222
1223 /*
1224 * Initialize various connection parameters.
1225 */
1226 void
init_conn_params(struct vi_info * vi,struct offload_settings * s,struct in_conninfo * inc,struct socket * so,const struct tcp_options * tcpopt,int16_t l2t_idx,struct conn_params * cp)1227 init_conn_params(struct vi_info *vi , struct offload_settings *s,
1228 struct in_conninfo *inc, struct socket *so,
1229 const struct tcp_options *tcpopt, int16_t l2t_idx, struct conn_params *cp)
1230 {
1231 struct port_info *pi = vi->pi;
1232 struct adapter *sc = pi->adapter;
1233 struct tom_tunables *tt = &sc->tt;
1234 struct inpcb *inp = sotoinpcb(so);
1235 struct tcpcb *tp = intotcpcb(inp);
1236 u_long wnd;
1237 u_int q_idx;
1238
1239 MPASS(s->offload != 0);
1240
1241 /* Congestion control algorithm */
1242 if (s->cong_algo >= 0)
1243 cp->cong_algo = s->cong_algo & M_CONG_CNTRL;
1244 else if (sc->tt.cong_algorithm >= 0)
1245 cp->cong_algo = tt->cong_algorithm & M_CONG_CNTRL;
1246 else {
1247 struct cc_algo *cc = CC_ALGO(tp);
1248
1249 if (strcasecmp(cc->name, "reno") == 0)
1250 cp->cong_algo = CONG_ALG_RENO;
1251 else if (strcasecmp(cc->name, "tahoe") == 0)
1252 cp->cong_algo = CONG_ALG_TAHOE;
1253 if (strcasecmp(cc->name, "newreno") == 0)
1254 cp->cong_algo = CONG_ALG_NEWRENO;
1255 if (strcasecmp(cc->name, "highspeed") == 0)
1256 cp->cong_algo = CONG_ALG_HIGHSPEED;
1257 else {
1258 /*
1259 * Use newreno in case the algorithm selected by the
1260 * host stack is not supported by the hardware.
1261 */
1262 cp->cong_algo = CONG_ALG_NEWRENO;
1263 }
1264 }
1265
1266 /* Tx traffic scheduling class. */
1267 if (s->sched_class >= 0 && s->sched_class < sc->params.nsched_cls)
1268 cp->tc_idx = s->sched_class;
1269 else
1270 cp->tc_idx = -1;
1271
1272 /* Nagle's algorithm. */
1273 if (s->nagle >= 0)
1274 cp->nagle = s->nagle > 0 ? 1 : 0;
1275 else
1276 cp->nagle = tp->t_flags & TF_NODELAY ? 0 : 1;
1277
1278 /* TCP Keepalive. */
1279 if (V_tcp_always_keepalive || so_options_get(so) & SO_KEEPALIVE)
1280 cp->keepalive = 1;
1281 else
1282 cp->keepalive = 0;
1283
1284 /* Optimization that's specific to T5 @ 40G. */
1285 if (tt->tx_align >= 0)
1286 cp->tx_align = tt->tx_align > 0 ? 1 : 0;
1287 else if (chip_id(sc) == CHELSIO_T5 &&
1288 (port_top_speed(pi) > 10 || sc->params.nports > 2))
1289 cp->tx_align = 1;
1290 else
1291 cp->tx_align = 0;
1292
1293 /* ULP mode. */
1294 cp->ulp_mode = ULP_MODE_NONE;
1295
1296 /* Rx coalescing. */
1297 if (s->rx_coalesce >= 0)
1298 cp->rx_coalesce = s->rx_coalesce > 0 ? 1 : 0;
1299 else if (tt->rx_coalesce >= 0)
1300 cp->rx_coalesce = tt->rx_coalesce > 0 ? 1 : 0;
1301 else
1302 cp->rx_coalesce = 1; /* default */
1303
1304 /*
1305 * Index in the PMTU table. This controls the MSS that we announce in
1306 * our SYN initially, but after ESTABLISHED it controls the MSS that we
1307 * use to send data.
1308 */
1309 cp->mtu_idx = find_best_mtu_idx(sc, inc, s);
1310
1311 /* Tx queue for this connection. */
1312 if (s->txq == QUEUE_RANDOM)
1313 q_idx = arc4random();
1314 else if (s->txq == QUEUE_ROUNDROBIN)
1315 q_idx = atomic_fetchadd_int(&vi->txq_rr, 1);
1316 else
1317 q_idx = s->txq;
1318 cp->txq_idx = vi->first_ofld_txq + q_idx % vi->nofldtxq;
1319
1320 /* Rx queue for this connection. */
1321 if (s->rxq == QUEUE_RANDOM)
1322 q_idx = arc4random();
1323 else if (s->rxq == QUEUE_ROUNDROBIN)
1324 q_idx = atomic_fetchadd_int(&vi->rxq_rr, 1);
1325 else
1326 q_idx = s->rxq;
1327 cp->rxq_idx = vi->first_ofld_rxq + q_idx % vi->nofldrxq;
1328
1329 if (SOLISTENING(so)) {
1330 /* Passive open */
1331 MPASS(tcpopt != NULL);
1332
1333 /* TCP timestamp option */
1334 if (tcpopt->tstamp &&
1335 (s->tstamp > 0 || (s->tstamp < 0 && V_tcp_do_rfc1323)))
1336 cp->tstamp = 1;
1337 else
1338 cp->tstamp = 0;
1339
1340 /* SACK */
1341 if (tcpopt->sack &&
1342 (s->sack > 0 || (s->sack < 0 && V_tcp_do_sack)))
1343 cp->sack = 1;
1344 else
1345 cp->sack = 0;
1346
1347 /* Receive window scaling. */
1348 if (tcpopt->wsf > 0 && tcpopt->wsf < 15 && V_tcp_do_rfc1323)
1349 cp->wscale = select_rcv_wscale();
1350 else
1351 cp->wscale = 0;
1352
1353 /* ECN */
1354 if (tcpopt->ecn && /* XXX: review. */
1355 (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn)))
1356 cp->ecn = 1;
1357 else
1358 cp->ecn = 0;
1359
1360 wnd = max(so->sol_sbrcv_hiwat, MIN_RCV_WND);
1361 cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ);
1362
1363 if (tt->sndbuf > 0)
1364 cp->sndbuf = tt->sndbuf;
1365 else if (so->sol_sbsnd_flags & SB_AUTOSIZE &&
1366 V_tcp_do_autosndbuf)
1367 cp->sndbuf = 256 * 1024;
1368 else
1369 cp->sndbuf = so->sol_sbsnd_hiwat;
1370 } else {
1371 /* Active open */
1372
1373 /* TCP timestamp option */
1374 if (s->tstamp > 0 ||
1375 (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP)))
1376 cp->tstamp = 1;
1377 else
1378 cp->tstamp = 0;
1379
1380 /* SACK */
1381 if (s->sack > 0 ||
1382 (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT)))
1383 cp->sack = 1;
1384 else
1385 cp->sack = 0;
1386
1387 /* Receive window scaling */
1388 if (tp->t_flags & TF_REQ_SCALE)
1389 cp->wscale = select_rcv_wscale();
1390 else
1391 cp->wscale = 0;
1392
1393 /* ECN */
1394 if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1))
1395 cp->ecn = 1;
1396 else
1397 cp->ecn = 0;
1398
1399 SOCKBUF_LOCK(&so->so_rcv);
1400 wnd = max(select_rcv_wnd(so), MIN_RCV_WND);
1401 SOCKBUF_UNLOCK(&so->so_rcv);
1402 cp->opt0_bufsize = min(wnd >> 10, M_RCV_BUFSIZ);
1403
1404 if (tt->sndbuf > 0)
1405 cp->sndbuf = tt->sndbuf;
1406 else {
1407 SOCKBUF_LOCK(&so->so_snd);
1408 if (so->so_snd.sb_flags & SB_AUTOSIZE &&
1409 V_tcp_do_autosndbuf)
1410 cp->sndbuf = 256 * 1024;
1411 else
1412 cp->sndbuf = so->so_snd.sb_hiwat;
1413 SOCKBUF_UNLOCK(&so->so_snd);
1414 }
1415 }
1416
1417 cp->l2t_idx = l2t_idx;
1418
1419 /* This will be initialized on ESTABLISHED. */
1420 cp->emss = 0;
1421 }
1422
1423 int
negative_advice(int status)1424 negative_advice(int status)
1425 {
1426
1427 return (status == CPL_ERR_RTX_NEG_ADVICE ||
1428 status == CPL_ERR_PERSIST_NEG_ADVICE ||
1429 status == CPL_ERR_KEEPALV_NEG_ADVICE);
1430 }
1431
1432 static int
alloc_tid_tab(struct tid_info * t,int flags)1433 alloc_tid_tab(struct tid_info *t, int flags)
1434 {
1435
1436 MPASS(t->ntids > 0);
1437 MPASS(t->tid_tab == NULL);
1438
1439 t->tid_tab = malloc(t->ntids * sizeof(*t->tid_tab), M_CXGBE,
1440 M_ZERO | flags);
1441 if (t->tid_tab == NULL)
1442 return (ENOMEM);
1443 atomic_store_rel_int(&t->tids_in_use, 0);
1444
1445 return (0);
1446 }
1447
1448 static void
free_tid_tab(struct tid_info * t)1449 free_tid_tab(struct tid_info *t)
1450 {
1451
1452 KASSERT(t->tids_in_use == 0,
1453 ("%s: %d tids still in use.", __func__, t->tids_in_use));
1454
1455 free(t->tid_tab, M_CXGBE);
1456 t->tid_tab = NULL;
1457 }
1458
1459 static int
alloc_stid_tab(struct tid_info * t,int flags)1460 alloc_stid_tab(struct tid_info *t, int flags)
1461 {
1462
1463 MPASS(t->nstids > 0);
1464 MPASS(t->stid_tab == NULL);
1465
1466 t->stid_tab = malloc(t->nstids * sizeof(*t->stid_tab), M_CXGBE,
1467 M_ZERO | flags);
1468 if (t->stid_tab == NULL)
1469 return (ENOMEM);
1470 mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF);
1471 t->stids_in_use = 0;
1472 TAILQ_INIT(&t->stids);
1473 t->nstids_free_head = t->nstids;
1474
1475 return (0);
1476 }
1477
1478 static void
free_stid_tab(struct tid_info * t)1479 free_stid_tab(struct tid_info *t)
1480 {
1481
1482 KASSERT(t->stids_in_use == 0,
1483 ("%s: %d tids still in use.", __func__, t->stids_in_use));
1484
1485 if (mtx_initialized(&t->stid_lock))
1486 mtx_destroy(&t->stid_lock);
1487 free(t->stid_tab, M_CXGBE);
1488 t->stid_tab = NULL;
1489 }
1490
1491 static void
free_tid_tabs(struct tid_info * t)1492 free_tid_tabs(struct tid_info *t)
1493 {
1494
1495 free_tid_tab(t);
1496 free_stid_tab(t);
1497 }
1498
1499 static int
alloc_tid_tabs(struct tid_info * t)1500 alloc_tid_tabs(struct tid_info *t)
1501 {
1502 int rc;
1503
1504 rc = alloc_tid_tab(t, M_NOWAIT);
1505 if (rc != 0)
1506 goto failed;
1507
1508 rc = alloc_stid_tab(t, M_NOWAIT);
1509 if (rc != 0)
1510 goto failed;
1511
1512 return (0);
1513 failed:
1514 free_tid_tabs(t);
1515 return (rc);
1516 }
1517
1518 static inline void
alloc_tcb_history(struct adapter * sc,struct tom_data * td)1519 alloc_tcb_history(struct adapter *sc, struct tom_data *td)
1520 {
1521
1522 if (sc->tids.ntids == 0 || sc->tids.ntids > 1024)
1523 return;
1524 rw_init(&td->tcb_history_lock, "TCB history");
1525 td->tcb_history = malloc(sc->tids.ntids * sizeof(*td->tcb_history),
1526 M_CXGBE, M_ZERO | M_NOWAIT);
1527 td->dupack_threshold = G_DUPACKTHRESH(t4_read_reg(sc, A_TP_PARA_REG0));
1528 }
1529
1530 static inline void
free_tcb_history(struct adapter * sc,struct tom_data * td)1531 free_tcb_history(struct adapter *sc, struct tom_data *td)
1532 {
1533 #ifdef INVARIANTS
1534 int i;
1535
1536 if (td->tcb_history != NULL) {
1537 for (i = 0; i < sc->tids.ntids; i++) {
1538 MPASS(td->tcb_history[i] == NULL);
1539 }
1540 }
1541 #endif
1542 free(td->tcb_history, M_CXGBE);
1543 if (rw_initialized(&td->tcb_history_lock))
1544 rw_destroy(&td->tcb_history_lock);
1545 }
1546
1547 static void
free_tom_data(struct adapter * sc,struct tom_data * td)1548 free_tom_data(struct adapter *sc, struct tom_data *td)
1549 {
1550
1551 ASSERT_SYNCHRONIZED_OP(sc);
1552
1553 KASSERT(TAILQ_EMPTY(&td->toep_list),
1554 ("%s: TOE PCB list is not empty.", __func__));
1555 KASSERT(td->lctx_count == 0,
1556 ("%s: lctx hash table is not empty.", __func__));
1557
1558 t4_free_ppod_region(&td->pr);
1559
1560 if (td->listen_mask != 0)
1561 hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask);
1562
1563 if (mtx_initialized(&td->unsent_wr_lock))
1564 mtx_destroy(&td->unsent_wr_lock);
1565 if (mtx_initialized(&td->lctx_hash_lock))
1566 mtx_destroy(&td->lctx_hash_lock);
1567 if (mtx_initialized(&td->toep_list_lock))
1568 mtx_destroy(&td->toep_list_lock);
1569
1570 free_tcb_history(sc, td);
1571 free_tid_tabs(&sc->tids);
1572 free(td, M_CXGBE);
1573 }
1574
1575 static char *
prepare_pkt(int open_type,uint16_t vtag,struct inpcb * inp,int * pktlen,int * buflen)1576 prepare_pkt(int open_type, uint16_t vtag, struct inpcb *inp, int *pktlen,
1577 int *buflen)
1578 {
1579 char *pkt;
1580 struct tcphdr *th;
1581 int ipv6, len;
1582 const int maxlen =
1583 max(sizeof(struct ether_header), sizeof(struct ether_vlan_header)) +
1584 max(sizeof(struct ip), sizeof(struct ip6_hdr)) +
1585 sizeof(struct tcphdr);
1586
1587 MPASS(open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN);
1588
1589 pkt = malloc(maxlen, M_CXGBE, M_ZERO | M_NOWAIT);
1590 if (pkt == NULL)
1591 return (NULL);
1592
1593 ipv6 = inp->inp_vflag & INP_IPV6;
1594 len = 0;
1595
1596 if (EVL_VLANOFTAG(vtag) == 0xfff) {
1597 struct ether_header *eh = (void *)pkt;
1598
1599 if (ipv6)
1600 eh->ether_type = htons(ETHERTYPE_IPV6);
1601 else
1602 eh->ether_type = htons(ETHERTYPE_IP);
1603
1604 len += sizeof(*eh);
1605 } else {
1606 struct ether_vlan_header *evh = (void *)pkt;
1607
1608 evh->evl_encap_proto = htons(ETHERTYPE_VLAN);
1609 evh->evl_tag = htons(vtag);
1610 if (ipv6)
1611 evh->evl_proto = htons(ETHERTYPE_IPV6);
1612 else
1613 evh->evl_proto = htons(ETHERTYPE_IP);
1614
1615 len += sizeof(*evh);
1616 }
1617
1618 if (ipv6) {
1619 struct ip6_hdr *ip6 = (void *)&pkt[len];
1620
1621 ip6->ip6_vfc = IPV6_VERSION;
1622 ip6->ip6_plen = htons(sizeof(struct tcphdr));
1623 ip6->ip6_nxt = IPPROTO_TCP;
1624 if (open_type == OPEN_TYPE_ACTIVE) {
1625 ip6->ip6_src = inp->in6p_laddr;
1626 ip6->ip6_dst = inp->in6p_faddr;
1627 } else if (open_type == OPEN_TYPE_LISTEN) {
1628 ip6->ip6_src = inp->in6p_laddr;
1629 ip6->ip6_dst = ip6->ip6_src;
1630 }
1631
1632 len += sizeof(*ip6);
1633 } else {
1634 struct ip *ip = (void *)&pkt[len];
1635
1636 ip->ip_v = IPVERSION;
1637 ip->ip_hl = sizeof(*ip) >> 2;
1638 ip->ip_tos = inp->inp_ip_tos;
1639 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr));
1640 ip->ip_ttl = inp->inp_ip_ttl;
1641 ip->ip_p = IPPROTO_TCP;
1642 if (open_type == OPEN_TYPE_ACTIVE) {
1643 ip->ip_src = inp->inp_laddr;
1644 ip->ip_dst = inp->inp_faddr;
1645 } else if (open_type == OPEN_TYPE_LISTEN) {
1646 ip->ip_src = inp->inp_laddr;
1647 ip->ip_dst = ip->ip_src;
1648 }
1649
1650 len += sizeof(*ip);
1651 }
1652
1653 th = (void *)&pkt[len];
1654 if (open_type == OPEN_TYPE_ACTIVE) {
1655 th->th_sport = inp->inp_lport; /* network byte order already */
1656 th->th_dport = inp->inp_fport; /* ditto */
1657 } else if (open_type == OPEN_TYPE_LISTEN) {
1658 th->th_sport = inp->inp_lport; /* network byte order already */
1659 th->th_dport = th->th_sport;
1660 }
1661 len += sizeof(th);
1662
1663 *pktlen = *buflen = len;
1664 return (pkt);
1665 }
1666
1667 const struct offload_settings *
lookup_offload_policy(struct adapter * sc,int open_type,struct mbuf * m,uint16_t vtag,struct inpcb * inp)1668 lookup_offload_policy(struct adapter *sc, int open_type, struct mbuf *m,
1669 uint16_t vtag, struct inpcb *inp)
1670 {
1671 const struct t4_offload_policy *op;
1672 char *pkt;
1673 struct offload_rule *r;
1674 int i, matched, pktlen, buflen;
1675 static const struct offload_settings allow_offloading_settings = {
1676 .offload = 1,
1677 .rx_coalesce = -1,
1678 .cong_algo = -1,
1679 .sched_class = -1,
1680 .tstamp = -1,
1681 .sack = -1,
1682 .nagle = -1,
1683 .ecn = -1,
1684 .ddp = -1,
1685 .tls = -1,
1686 .txq = QUEUE_RANDOM,
1687 .rxq = QUEUE_RANDOM,
1688 .mss = -1,
1689 };
1690 static const struct offload_settings disallow_offloading_settings = {
1691 .offload = 0,
1692 /* rest is irrelevant when offload is off. */
1693 };
1694
1695 rw_assert(&sc->policy_lock, RA_LOCKED);
1696
1697 /*
1698 * If there's no Connection Offloading Policy attached to the device
1699 * then we need to return a default static policy. If
1700 * "cop_managed_offloading" is true, then we need to disallow
1701 * offloading until a COP is attached to the device. Otherwise we
1702 * allow offloading ...
1703 */
1704 op = sc->policy;
1705 if (op == NULL) {
1706 if (sc->tt.cop_managed_offloading)
1707 return (&disallow_offloading_settings);
1708 else
1709 return (&allow_offloading_settings);
1710 }
1711
1712 switch (open_type) {
1713 case OPEN_TYPE_ACTIVE:
1714 case OPEN_TYPE_LISTEN:
1715 pkt = prepare_pkt(open_type, vtag, inp, &pktlen, &buflen);
1716 break;
1717 case OPEN_TYPE_PASSIVE:
1718 MPASS(m != NULL);
1719 pkt = mtod(m, char *);
1720 MPASS(*pkt == CPL_PASS_ACCEPT_REQ);
1721 pkt += sizeof(struct cpl_pass_accept_req);
1722 pktlen = m->m_pkthdr.len - sizeof(struct cpl_pass_accept_req);
1723 buflen = m->m_len - sizeof(struct cpl_pass_accept_req);
1724 break;
1725 default:
1726 MPASS(0);
1727 return (&disallow_offloading_settings);
1728 }
1729
1730 if (pkt == NULL || pktlen == 0 || buflen == 0)
1731 return (&disallow_offloading_settings);
1732
1733 matched = 0;
1734 r = &op->rule[0];
1735 for (i = 0; i < op->nrules; i++, r++) {
1736 if (r->open_type != open_type &&
1737 r->open_type != OPEN_TYPE_DONTCARE) {
1738 continue;
1739 }
1740 matched = bpf_filter(r->bpf_prog.bf_insns, pkt, pktlen, buflen);
1741 if (matched)
1742 break;
1743 }
1744
1745 if (open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN)
1746 free(pkt, M_CXGBE);
1747
1748 return (matched ? &r->settings : &disallow_offloading_settings);
1749 }
1750
1751 static void
reclaim_wr_resources(void * arg,int count)1752 reclaim_wr_resources(void *arg, int count)
1753 {
1754 struct tom_data *td = arg;
1755 STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list);
1756 struct cpl_act_open_req *cpl;
1757 u_int opcode, atid, tid;
1758 struct wrqe *wr;
1759 struct adapter *sc = td_adapter(td);
1760
1761 mtx_lock(&td->unsent_wr_lock);
1762 STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe);
1763 mtx_unlock(&td->unsent_wr_lock);
1764
1765 while ((wr = STAILQ_FIRST(&twr_list)) != NULL) {
1766 STAILQ_REMOVE_HEAD(&twr_list, link);
1767
1768 cpl = wrtod(wr);
1769 opcode = GET_OPCODE(cpl);
1770
1771 switch (opcode) {
1772 case CPL_ACT_OPEN_REQ:
1773 case CPL_ACT_OPEN_REQ6:
1774 atid = G_TID_TID(be32toh(OPCODE_TID(cpl)));
1775 CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid);
1776 act_open_failure_cleanup(sc, atid, EHOSTUNREACH);
1777 free(wr, M_CXGBE);
1778 break;
1779 case CPL_PASS_ACCEPT_RPL:
1780 tid = GET_TID(cpl);
1781 CTR2(KTR_CXGBE, "%s: tid %u ", __func__, tid);
1782 synack_failure_cleanup(sc, tid);
1783 free(wr, M_CXGBE);
1784 break;
1785 default:
1786 log(LOG_ERR, "%s: leaked work request %p, wr_len %d, "
1787 "opcode %x\n", __func__, wr, wr->wr_len, opcode);
1788 /* WR not freed here; go look at it with a debugger. */
1789 }
1790 }
1791 }
1792
1793 /*
1794 * Ground control to Major TOM
1795 * Commencing countdown, engines on
1796 */
1797 static int
t4_tom_activate(struct adapter * sc)1798 t4_tom_activate(struct adapter *sc)
1799 {
1800 struct tom_data *td;
1801 struct toedev *tod;
1802 struct vi_info *vi;
1803 int i, rc, v;
1804
1805 ASSERT_SYNCHRONIZED_OP(sc);
1806
1807 /* per-adapter softc for TOM */
1808 td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT);
1809 if (td == NULL)
1810 return (ENOMEM);
1811
1812 /* List of TOE PCBs and associated lock */
1813 mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF);
1814 TAILQ_INIT(&td->toep_list);
1815
1816 /* Listen context */
1817 mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF);
1818 td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE,
1819 &td->listen_mask, HASH_NOWAIT);
1820
1821 /* List of WRs for which L2 resolution failed */
1822 mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF);
1823 STAILQ_INIT(&td->unsent_wr_list);
1824 TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td);
1825
1826 /* TID tables */
1827 rc = alloc_tid_tabs(&sc->tids);
1828 if (rc != 0)
1829 goto done;
1830
1831 rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp,
1832 t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods");
1833 if (rc != 0)
1834 goto done;
1835 t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK,
1836 V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask);
1837
1838 alloc_tcb_history(sc, td);
1839
1840 /* toedev ops */
1841 tod = &td->tod;
1842 init_toedev(tod);
1843 tod->tod_softc = sc;
1844 tod->tod_connect = t4_connect;
1845 tod->tod_listen_start = t4_listen_start;
1846 tod->tod_listen_stop = t4_listen_stop;
1847 tod->tod_rcvd = t4_rcvd;
1848 tod->tod_output = t4_tod_output;
1849 tod->tod_send_rst = t4_send_rst;
1850 tod->tod_send_fin = t4_send_fin;
1851 tod->tod_pcb_detach = t4_pcb_detach;
1852 tod->tod_l2_update = t4_l2_update;
1853 tod->tod_syncache_added = t4_syncache_added;
1854 tod->tod_syncache_removed = t4_syncache_removed;
1855 tod->tod_syncache_respond = t4_syncache_respond;
1856 tod->tod_offload_socket = t4_offload_socket;
1857 tod->tod_ctloutput = t4_ctloutput;
1858 tod->tod_tcp_info = t4_tcp_info;
1859 #ifdef KERN_TLS
1860 tod->tod_alloc_tls_session = t4_alloc_tls_session;
1861 #endif
1862 tod->tod_pmtu_update = t4_pmtu_update;
1863
1864 for_each_port(sc, i) {
1865 for_each_vi(sc->port[i], v, vi) {
1866 SETTOEDEV(vi->ifp, &td->tod);
1867 }
1868 }
1869
1870 sc->tom_softc = td;
1871 register_toedev(sc->tom_softc);
1872
1873 done:
1874 if (rc != 0)
1875 free_tom_data(sc, td);
1876 return (rc);
1877 }
1878
1879 static int
t4_tom_deactivate(struct adapter * sc)1880 t4_tom_deactivate(struct adapter *sc)
1881 {
1882 int rc = 0;
1883 struct tom_data *td = sc->tom_softc;
1884
1885 ASSERT_SYNCHRONIZED_OP(sc);
1886
1887 if (td == NULL)
1888 return (0); /* XXX. KASSERT? */
1889
1890 if (sc->offload_map != 0)
1891 return (EBUSY); /* at least one port has IFCAP_TOE enabled */
1892
1893 if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI))
1894 return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */
1895
1896 mtx_lock(&td->toep_list_lock);
1897 if (!TAILQ_EMPTY(&td->toep_list))
1898 rc = EBUSY;
1899 mtx_unlock(&td->toep_list_lock);
1900
1901 mtx_lock(&td->lctx_hash_lock);
1902 if (td->lctx_count > 0)
1903 rc = EBUSY;
1904 mtx_unlock(&td->lctx_hash_lock);
1905
1906 taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources);
1907 mtx_lock(&td->unsent_wr_lock);
1908 if (!STAILQ_EMPTY(&td->unsent_wr_list))
1909 rc = EBUSY;
1910 mtx_unlock(&td->unsent_wr_lock);
1911
1912 if (rc == 0) {
1913 unregister_toedev(sc->tom_softc);
1914 free_tom_data(sc, td);
1915 sc->tom_softc = NULL;
1916 }
1917
1918 return (rc);
1919 }
1920
1921 static int
t4_ctloutput_tom(struct socket * so,struct sockopt * sopt)1922 t4_ctloutput_tom(struct socket *so, struct sockopt *sopt)
1923 {
1924 struct tcpcb *tp = sototcpcb(so);
1925 struct toepcb *toep = tp->t_toe;
1926 int error, optval;
1927
1928 if (sopt->sopt_level == IPPROTO_TCP && sopt->sopt_name == TCP_USE_DDP) {
1929 if (sopt->sopt_dir != SOPT_SET)
1930 return (EOPNOTSUPP);
1931
1932 if (sopt->sopt_td != NULL) {
1933 /* Only settable by the kernel. */
1934 return (EPERM);
1935 }
1936
1937 error = sooptcopyin(sopt, &optval, sizeof(optval),
1938 sizeof(optval));
1939 if (error != 0)
1940 return (error);
1941
1942 if (optval != 0)
1943 return (t4_enable_ddp_rcv(so, toep));
1944 else
1945 return (EOPNOTSUPP);
1946 }
1947 return (tcp_ctloutput(so, sopt));
1948 }
1949
1950 static int
t4_aio_queue_tom(struct socket * so,struct kaiocb * job)1951 t4_aio_queue_tom(struct socket *so, struct kaiocb *job)
1952 {
1953 struct tcpcb *tp = sototcpcb(so);
1954 struct toepcb *toep = tp->t_toe;
1955 int error;
1956
1957 /*
1958 * No lock is needed as TOE sockets never change between
1959 * active and passive.
1960 */
1961 if (SOLISTENING(so))
1962 return (EINVAL);
1963
1964 if (ulp_mode(toep) == ULP_MODE_TCPDDP ||
1965 ulp_mode(toep) == ULP_MODE_NONE) {
1966 error = t4_aio_queue_ddp(so, job);
1967 if (error != EOPNOTSUPP)
1968 return (error);
1969 }
1970
1971 return (t4_aio_queue_aiotx(so, job));
1972 }
1973
1974 static int
t4_tom_mod_load(void)1975 t4_tom_mod_load(void)
1976 {
1977 /* CPL handlers */
1978 t4_register_cpl_handler(CPL_GET_TCB_RPL, do_get_tcb_rpl);
1979 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl2,
1980 CPL_COOKIE_TOM);
1981 t4_init_connect_cpl_handlers();
1982 t4_init_listen_cpl_handlers();
1983 t4_init_cpl_io_handlers();
1984
1985 t4_ddp_mod_load();
1986 t4_tls_mod_load();
1987
1988 bcopy(&tcp_protosw, &toe_protosw, sizeof(toe_protosw));
1989 toe_protosw.pr_ctloutput = t4_ctloutput_tom;
1990 toe_protosw.pr_aio_queue = t4_aio_queue_tom;
1991
1992 bcopy(&tcp6_protosw, &toe6_protosw, sizeof(toe6_protosw));
1993 toe6_protosw.pr_ctloutput = t4_ctloutput_tom;
1994 toe6_protosw.pr_aio_queue = t4_aio_queue_tom;
1995
1996 return (t4_register_uld(&tom_uld_info));
1997 }
1998
1999 static void
tom_uninit(struct adapter * sc,void * arg __unused)2000 tom_uninit(struct adapter *sc, void *arg __unused)
2001 {
2002 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun"))
2003 return;
2004
2005 /* Try to free resources (works only if no port has IFCAP_TOE) */
2006 if (uld_active(sc, ULD_TOM))
2007 t4_deactivate_uld(sc, ULD_TOM);
2008
2009 end_synchronized_op(sc, 0);
2010 }
2011
2012 static int
t4_tom_mod_unload(void)2013 t4_tom_mod_unload(void)
2014 {
2015 t4_iterate(tom_uninit, NULL);
2016
2017 if (t4_unregister_uld(&tom_uld_info) == EBUSY)
2018 return (EBUSY);
2019
2020 t4_tls_mod_unload();
2021 t4_ddp_mod_unload();
2022
2023 t4_uninit_connect_cpl_handlers();
2024 t4_uninit_listen_cpl_handlers();
2025 t4_uninit_cpl_io_handlers();
2026 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, NULL, CPL_COOKIE_TOM);
2027 t4_register_cpl_handler(CPL_GET_TCB_RPL, NULL);
2028
2029 return (0);
2030 }
2031 #endif /* TCP_OFFLOAD */
2032
2033 static int
t4_tom_modevent(module_t mod,int cmd,void * arg)2034 t4_tom_modevent(module_t mod, int cmd, void *arg)
2035 {
2036 int rc = 0;
2037
2038 #ifdef TCP_OFFLOAD
2039 switch (cmd) {
2040 case MOD_LOAD:
2041 rc = t4_tom_mod_load();
2042 break;
2043
2044 case MOD_UNLOAD:
2045 rc = t4_tom_mod_unload();
2046 break;
2047
2048 default:
2049 rc = EINVAL;
2050 }
2051 #else
2052 printf("t4_tom: compiled without TCP_OFFLOAD support.\n");
2053 rc = EOPNOTSUPP;
2054 #endif
2055 return (rc);
2056 }
2057
2058 static moduledata_t t4_tom_moddata= {
2059 "t4_tom",
2060 t4_tom_modevent,
2061 0
2062 };
2063
2064 MODULE_VERSION(t4_tom, 1);
2065 MODULE_DEPEND(t4_tom, toecore, 1, 1, 1);
2066 MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1);
2067 DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY);
2068