xref: /freebsd/sys/dev/cxgbe/tom/t4_connect.c (revision 0957b409)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2012 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: Navdeep Parhar <np@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_inet.h"
34 #include "opt_inet6.h"
35 
36 #ifdef TCP_OFFLOAD
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/ktr.h>
41 #include <sys/module.h>
42 #include <sys/protosw.h>
43 #include <sys/domain.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/sysctl.h>
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/if_types.h>
50 #include <net/if_vlan_var.h>
51 #include <net/route.h>
52 #include <netinet/in.h>
53 #include <netinet/in_pcb.h>
54 #include <netinet/ip.h>
55 #define TCPSTATES
56 #include <netinet/tcp_fsm.h>
57 #include <netinet/tcp_var.h>
58 #include <netinet/toecore.h>
59 #include <netinet/cc/cc.h>
60 
61 #include "common/common.h"
62 #include "common/t4_msg.h"
63 #include "common/t4_regs.h"
64 #include "common/t4_regs_values.h"
65 #include "t4_clip.h"
66 #include "tom/t4_tom_l2t.h"
67 #include "tom/t4_tom.h"
68 
69 /*
70  * Active open succeeded.
71  */
72 static int
73 do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
74     struct mbuf *m)
75 {
76 	struct adapter *sc = iq->adapter;
77 	const struct cpl_act_establish *cpl = (const void *)(rss + 1);
78 	u_int tid = GET_TID(cpl);
79 	u_int atid = G_TID_TID(ntohl(cpl->tos_atid));
80 	struct toepcb *toep = lookup_atid(sc, atid);
81 	struct inpcb *inp = toep->inp;
82 
83 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
84 	KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__));
85 
86 	CTR3(KTR_CXGBE, "%s: atid %u, tid %u", __func__, atid, tid);
87 	free_atid(sc, atid);
88 
89 	CURVNET_SET(toep->vnet);
90 	INP_WLOCK(inp);
91 	toep->tid = tid;
92 	insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1);
93 	if (inp->inp_flags & INP_DROPPED) {
94 
95 		/* socket closed by the kernel before hw told us it connected */
96 
97 		send_flowc_wr(toep, NULL);
98 		send_reset(sc, toep, be32toh(cpl->snd_isn));
99 		goto done;
100 	}
101 
102 	make_established(toep, be32toh(cpl->snd_isn) - 1,
103 	    be32toh(cpl->rcv_isn) - 1, cpl->tcp_opt);
104 
105 	if (toep->ulp_mode == ULP_MODE_TLS)
106 		tls_establish(toep);
107 
108 done:
109 	INP_WUNLOCK(inp);
110 	CURVNET_RESTORE();
111 	return (0);
112 }
113 
114 void
115 act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status)
116 {
117 	struct toepcb *toep = lookup_atid(sc, atid);
118 	struct inpcb *inp = toep->inp;
119 	struct toedev *tod = &toep->td->tod;
120 	struct epoch_tracker et;
121 
122 	free_atid(sc, atid);
123 	toep->tid = -1;
124 
125 	CURVNET_SET(toep->vnet);
126 	if (status != EAGAIN)
127 		INP_INFO_RLOCK_ET(&V_tcbinfo, et);
128 	INP_WLOCK(inp);
129 	toe_connect_failed(tod, inp, status);
130 	final_cpl_received(toep);	/* unlocks inp */
131 	if (status != EAGAIN)
132 		INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
133 	CURVNET_RESTORE();
134 }
135 
136 /*
137  * Active open failed.
138  */
139 static int
140 do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
141     struct mbuf *m)
142 {
143 	struct adapter *sc = iq->adapter;
144 	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
145 	u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
146 	u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
147 	struct toepcb *toep = lookup_atid(sc, atid);
148 	int rc;
149 
150 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
151 	KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__));
152 
153 	CTR3(KTR_CXGBE, "%s: atid %u, status %u ", __func__, atid, status);
154 
155 	/* Ignore negative advice */
156 	if (negative_advice(status))
157 		return (0);
158 
159 	if (status && act_open_has_tid(status))
160 		release_tid(sc, GET_TID(cpl), toep->ctrlq);
161 
162 	rc = act_open_rpl_status_to_errno(status);
163 	act_open_failure_cleanup(sc, atid, rc);
164 
165 	return (0);
166 }
167 
168 /*
169  * Options2 for active open.
170  */
171 static uint32_t
172 calc_opt2a(struct socket *so, struct toepcb *toep,
173     const struct offload_settings *s)
174 {
175 	struct tcpcb *tp = so_sototcpcb(so);
176 	struct port_info *pi = toep->vi->pi;
177 	struct adapter *sc = pi->adapter;
178 	uint32_t opt2 = 0;
179 
180 	/*
181 	 * rx flow control, rx coalesce, congestion control, and tx pace are all
182 	 * explicitly set by the driver.  On T5+ the ISS is also set by the
183 	 * driver to the value picked by the kernel.
184 	 */
185 	if (is_t4(sc)) {
186 		opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID;
187 		opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID;
188 	} else {
189 		opt2 |= F_T5_OPT_2_VALID;	/* all 4 valid */
190 		opt2 |= F_T5_ISS;		/* ISS provided in CPL */
191 	}
192 
193 	if (s->sack > 0 || (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT)))
194 		opt2 |= F_SACK_EN;
195 
196 	if (s->tstamp > 0 || (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP)))
197 		opt2 |= F_TSTAMPS_EN;
198 
199 	if (tp->t_flags & TF_REQ_SCALE)
200 		opt2 |= F_WND_SCALE_EN;
201 
202 	if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1))
203 		opt2 |= F_CCTRL_ECN;
204 
205 	/* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */
206 
207 	opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]);
208 
209 	/* These defaults are subject to ULP specific fixups later. */
210 	opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0);
211 
212 	opt2 |= V_PACE(0);
213 
214 	if (s->cong_algo >= 0)
215 		opt2 |= V_CONG_CNTRL(s->cong_algo);
216 	else if (sc->tt.cong_algorithm >= 0)
217 		opt2 |= V_CONG_CNTRL(sc->tt.cong_algorithm & M_CONG_CNTRL);
218 	else {
219 		struct cc_algo *cc = CC_ALGO(tp);
220 
221 		if (strcasecmp(cc->name, "reno") == 0)
222 			opt2 |= V_CONG_CNTRL(CONG_ALG_RENO);
223 		else if (strcasecmp(cc->name, "tahoe") == 0)
224 			opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
225 		if (strcasecmp(cc->name, "newreno") == 0)
226 			opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO);
227 		if (strcasecmp(cc->name, "highspeed") == 0)
228 			opt2 |= V_CONG_CNTRL(CONG_ALG_HIGHSPEED);
229 		else {
230 			/*
231 			 * Use newreno in case the algorithm selected by the
232 			 * host stack is not supported by the hardware.
233 			 */
234 			opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO);
235 		}
236 	}
237 
238 	if (s->rx_coalesce > 0 || (s->rx_coalesce < 0 && sc->tt.rx_coalesce))
239 		opt2 |= V_RX_COALESCE(M_RX_COALESCE);
240 
241 	/* Note that ofld_rxq is already set according to s->rxq. */
242 	opt2 |= F_RSS_QUEUE_VALID;
243 	opt2 |= V_RSS_QUEUE(toep->ofld_rxq->iq.abs_id);
244 
245 #ifdef USE_DDP_RX_FLOW_CONTROL
246 	if (toep->ulp_mode == ULP_MODE_TCPDDP)
247 		opt2 |= F_RX_FC_DDP;
248 #endif
249 
250 	if (toep->ulp_mode == ULP_MODE_TLS) {
251 		opt2 &= ~V_RX_COALESCE(M_RX_COALESCE);
252 		opt2 |= F_RX_FC_DISABLE;
253 	}
254 
255 	return (htobe32(opt2));
256 }
257 
258 void
259 t4_init_connect_cpl_handlers(void)
260 {
261 
262 	t4_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
263 	t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl,
264 	    CPL_COOKIE_TOM);
265 }
266 
267 void
268 t4_uninit_connect_cpl_handlers(void)
269 {
270 
271 	t4_register_cpl_handler(CPL_ACT_ESTABLISH, NULL);
272 	t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, NULL, CPL_COOKIE_TOM);
273 }
274 
275 #define DONT_OFFLOAD_ACTIVE_OPEN(x)	do { \
276 	reason = __LINE__; \
277 	rc = (x); \
278 	goto failed; \
279 } while (0)
280 
281 static inline int
282 act_open_cpl_size(struct adapter *sc, int isipv6)
283 {
284 	int idx;
285 	static const int sz_table[3][2] = {
286 		{
287 			sizeof (struct cpl_act_open_req),
288 			sizeof (struct cpl_act_open_req6)
289 		},
290 		{
291 			sizeof (struct cpl_t5_act_open_req),
292 			sizeof (struct cpl_t5_act_open_req6)
293 		},
294 		{
295 			sizeof (struct cpl_t6_act_open_req),
296 			sizeof (struct cpl_t6_act_open_req6)
297 		},
298 	};
299 
300 	MPASS(chip_id(sc) >= CHELSIO_T4);
301 	idx = min(chip_id(sc) - CHELSIO_T4, 2);
302 
303 	return (sz_table[idx][!!isipv6]);
304 }
305 
306 /*
307  * active open (soconnect).
308  *
309  * State of affairs on entry:
310  * soisconnecting (so_state |= SS_ISCONNECTING)
311  * tcbinfo not locked (This has changed - used to be WLOCKed)
312  * inp WLOCKed
313  * tp->t_state = TCPS_SYN_SENT
314  * rtalloc1, RT_UNLOCK on rt.
315  */
316 int
317 t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt,
318     struct sockaddr *nam)
319 {
320 	struct adapter *sc = tod->tod_softc;
321 	struct toepcb *toep = NULL;
322 	struct wrqe *wr = NULL;
323 	struct ifnet *rt_ifp = rt->rt_ifp;
324 	struct vi_info *vi;
325 	int mtu_idx, rscale, qid_atid, rc, isipv6, txqid, rxqid;
326 	struct inpcb *inp = sotoinpcb(so);
327 	struct tcpcb *tp = intotcpcb(inp);
328 	int reason;
329 	struct offload_settings settings;
330 	uint16_t vid = 0xfff, pcp = 0;
331 
332 	INP_WLOCK_ASSERT(inp);
333 	KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6,
334 	    ("%s: dest addr %p has family %u", __func__, nam, nam->sa_family));
335 
336 	if (rt_ifp->if_type == IFT_ETHER)
337 		vi = rt_ifp->if_softc;
338 	else if (rt_ifp->if_type == IFT_L2VLAN) {
339 		struct ifnet *ifp = VLAN_TRUNKDEV(rt_ifp);
340 
341 		vi = ifp->if_softc;
342 		VLAN_TAG(rt_ifp, &vid);
343 		VLAN_PCP(rt_ifp, &pcp);
344 	} else if (rt_ifp->if_type == IFT_IEEE8023ADLAG)
345 		DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */
346 	else
347 		DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
348 
349 	rw_rlock(&sc->policy_lock);
350 	settings = *lookup_offload_policy(sc, OPEN_TYPE_ACTIVE, NULL,
351 	    EVL_MAKETAG(vid, pcp, 0), inp);
352 	rw_runlock(&sc->policy_lock);
353 	if (!settings.offload)
354 		DONT_OFFLOAD_ACTIVE_OPEN(EPERM);
355 
356 	if (settings.txq >= 0 && settings.txq < vi->nofldtxq)
357 		txqid = settings.txq;
358 	else
359 		txqid = arc4random() % vi->nofldtxq;
360 	txqid += vi->first_ofld_txq;
361 	if (settings.rxq >= 0 && settings.rxq < vi->nofldrxq)
362 		rxqid = settings.rxq;
363 	else
364 		rxqid = arc4random() % vi->nofldrxq;
365 	rxqid += vi->first_ofld_rxq;
366 
367 	toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT | M_ZERO);
368 	if (toep == NULL)
369 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
370 
371 	toep->tid = alloc_atid(sc, toep);
372 	if (toep->tid < 0)
373 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
374 
375 	toep->l2te = t4_l2t_get(vi->pi, rt_ifp,
376 	    rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam);
377 	if (toep->l2te == NULL)
378 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
379 
380 	isipv6 = nam->sa_family == AF_INET6;
381 	wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq);
382 	if (wr == NULL)
383 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
384 
385 	toep->vnet = so->so_vnet;
386 	set_ulp_mode(toep, select_ulp_mode(so, sc, &settings));
387 	SOCKBUF_LOCK(&so->so_rcv);
388 	/* opt0 rcv_bufsiz initially, assumes its normal meaning later */
389 	toep->rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ);
390 	SOCKBUF_UNLOCK(&so->so_rcv);
391 
392 	/*
393 	 * The kernel sets request_r_scale based on sb_max whereas we need to
394 	 * take hardware's MAX_RCV_WND into account too.  This is normally a
395 	 * no-op as MAX_RCV_WND is much larger than the default sb_max.
396 	 */
397 	if (tp->t_flags & TF_REQ_SCALE)
398 		rscale = tp->request_r_scale = select_rcv_wscale();
399 	else
400 		rscale = 0;
401 	mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, &settings);
402 	qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) |
403 	    V_TID_COOKIE(CPL_COOKIE_TOM);
404 
405 	if (isipv6) {
406 		struct cpl_act_open_req6 *cpl = wrtod(wr);
407 		struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
408 		struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
409 
410 		if ((inp->inp_vflag & INP_IPV6) == 0)
411 			DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
412 
413 		toep->ce = t4_hold_lip(sc, &inp->in6p_laddr, NULL);
414 		if (toep->ce == NULL)
415 			DONT_OFFLOAD_ACTIVE_OPEN(ENOENT);
416 
417 		switch (chip_id(sc)) {
418 		case CHELSIO_T4:
419 			INIT_TP_WR(cpl, 0);
420 			cpl->params = select_ntuple(vi, toep->l2te);
421 			break;
422 		case CHELSIO_T5:
423 			INIT_TP_WR(cpl5, 0);
424 			cpl5->iss = htobe32(tp->iss);
425 			cpl5->params = select_ntuple(vi, toep->l2te);
426 			break;
427 		case CHELSIO_T6:
428 		default:
429 			INIT_TP_WR(cpl6, 0);
430 			cpl6->iss = htobe32(tp->iss);
431 			cpl6->params = select_ntuple(vi, toep->l2te);
432 			break;
433 		}
434 		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
435 		    qid_atid));
436 		cpl->local_port = inp->inp_lport;
437 		cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0];
438 		cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8];
439 		cpl->peer_port = inp->inp_fport;
440 		cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0];
441 		cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8];
442 		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
443 		    toep->rx_credits, toep->ulp_mode, &settings);
444 		cpl->opt2 = calc_opt2a(so, toep, &settings);
445 	} else {
446 		struct cpl_act_open_req *cpl = wrtod(wr);
447 		struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
448 		struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
449 
450 		switch (chip_id(sc)) {
451 		case CHELSIO_T4:
452 			INIT_TP_WR(cpl, 0);
453 			cpl->params = select_ntuple(vi, toep->l2te);
454 			break;
455 		case CHELSIO_T5:
456 			INIT_TP_WR(cpl5, 0);
457 			cpl5->iss = htobe32(tp->iss);
458 			cpl5->params = select_ntuple(vi, toep->l2te);
459 			break;
460 		case CHELSIO_T6:
461 		default:
462 			INIT_TP_WR(cpl6, 0);
463 			cpl6->iss = htobe32(tp->iss);
464 			cpl6->params = select_ntuple(vi, toep->l2te);
465 			break;
466 		}
467 		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
468 		    qid_atid));
469 		inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port,
470 		    &cpl->peer_ip, &cpl->peer_port);
471 		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
472 		    toep->rx_credits, toep->ulp_mode, &settings);
473 		cpl->opt2 = calc_opt2a(so, toep, &settings);
474 	}
475 
476 	CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__,
477 	    toep->tid, tcpstates[tp->t_state], toep, inp);
478 
479 	offload_socket(so, toep);
480 	rc = t4_l2t_send(sc, wr, toep->l2te);
481 	if (rc == 0) {
482 		toep->flags |= TPF_CPL_PENDING;
483 		return (0);
484 	}
485 
486 	undo_offload_socket(so);
487 	reason = __LINE__;
488 failed:
489 	CTR3(KTR_CXGBE, "%s: not offloading (%d), rc %d", __func__, reason, rc);
490 
491 	if (wr)
492 		free_wrqe(wr);
493 
494 	if (toep) {
495 		if (toep->tid >= 0)
496 			free_atid(sc, toep->tid);
497 		if (toep->l2te)
498 			t4_l2t_release(toep->l2te);
499 		if (toep->ce)
500 			t4_release_lip(sc, toep->ce);
501 		free_toepcb(toep);
502 	}
503 
504 	return (rc);
505 }
506 #endif
507