xref: /freebsd/sys/dev/cxgbe/tom/t4_tom.h (revision 9978c628)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012, 2015 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: Navdeep Parhar <np@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 
31 #ifndef __T4_TOM_H__
32 #define __T4_TOM_H__
33 #include <sys/vmem.h>
34 #include "common/t4_hw.h"
35 #include "common/t4_msg.h"
36 #include "tom/t4_tls.h"
37 
38 #define LISTEN_HASH_SIZE 32
39 
40 /*
41  * Min receive window.  We want it to be large enough to accommodate receive
42  * coalescing, handle jumbo frames, and not trigger sender SWS avoidance.
43  */
44 #define MIN_RCV_WND (24 * 1024U)
45 
46 /*
47  * Max receive window supported by HW in bytes.  Only a small part of it can
48  * be set through option0, the rest needs to be set through RX_DATA_ACK.
49  */
50 #define MAX_RCV_WND ((1U << 27) - 1)
51 
52 #define	DDP_RSVD_WIN (16 * 1024U)
53 #define	SB_DDP_INDICATE	SB_IN_TOE	/* soreceive must respond to indicate */
54 
55 #define USE_DDP_RX_FLOW_CONTROL
56 
57 #define PPOD_SZ(n)	((n) * sizeof(struct pagepod))
58 #define PPOD_SIZE	(PPOD_SZ(1))
59 
60 /* TOE PCB flags */
61 enum {
62 	TPF_ATTACHED	   = (1 << 0),	/* a tcpcb refers to this toepcb */
63 	TPF_FLOWC_WR_SENT  = (1 << 1),	/* firmware flow context WR sent */
64 	TPF_TX_DATA_SENT   = (1 << 2),	/* some data sent */
65 	TPF_TX_SUSPENDED   = (1 << 3),	/* tx suspended for lack of resources */
66 	TPF_SEND_FIN	   = (1 << 4),	/* send FIN after all pending data */
67 	TPF_FIN_SENT	   = (1 << 5),	/* FIN has been sent */
68 	TPF_ABORT_SHUTDOWN = (1 << 6),	/* connection abort is in progress */
69 	TPF_CPL_PENDING    = (1 << 7),	/* haven't received the last CPL */
70 	TPF_SYNQE	   = (1 << 8),	/* synq_entry, not really a toepcb */
71 	TPF_SYNQE_EXPANDED = (1 << 9),	/* toepcb ready, tid context updated */
72 	TPF_TLS_STARTING   = (1 << 10), /* starting TLS receive */
73 	TPF_KTLS           = (1 << 11), /* send TLS records from KTLS */
74 	TPF_INITIALIZED    = (1 << 12), /* init_toepcb has been called */
75 	TPF_TLS_RECEIVE	   = (1 << 13), /* should receive TLS records */
76 	TPF_TLS_RX_QUIESCING = (1 << 14), /* RX quiesced for TLS RX startup */
77 	TPF_TLS_RX_QUIESCED = (1 << 15), /* RX quiesced for TLS RX startup */
78 	TPF_WAITING_FOR_FINAL = (1<< 16), /* waiting for wakeup on final CPL */
79 };
80 
81 enum {
82 	DDP_OK		= (1 << 0),	/* OK to turn on DDP */
83 	DDP_SC_REQ	= (1 << 1),	/* state change (on/off) requested */
84 	DDP_ON		= (1 << 2),	/* DDP is turned on */
85 	DDP_BUF0_ACTIVE	= (1 << 3),	/* buffer 0 in use (not invalidated) */
86 	DDP_BUF1_ACTIVE	= (1 << 4),	/* buffer 1 in use (not invalidated) */
87 	DDP_TASK_ACTIVE = (1 << 5),	/* requeue task is queued / running */
88 	DDP_DEAD	= (1 << 6),	/* toepcb is shutting down */
89 	DDP_AIO		= (1 << 7),	/* DDP used for AIO, not so_rcv */
90 	DDP_RCVBUF	= (1 << 8),	/* DDP used for so_rcv, not AIO */
91 };
92 
93 struct bio;
94 struct ctl_sg_entry;
95 struct sockopt;
96 struct offload_settings;
97 
98 /*
99  * Connection parameters for an offloaded connection.  These are mostly (but not
100  * all) hardware TOE parameters.
101  */
102 struct conn_params {
103 	int8_t rx_coalesce;
104 	int8_t cong_algo;
105 	int8_t tc_idx;
106 	int8_t tstamp;
107 	int8_t sack;
108 	int8_t nagle;
109 	int8_t keepalive;
110 	int8_t wscale;
111 	int8_t ecn;
112 	int8_t mtu_idx;
113 	int8_t ulp_mode;
114 	int8_t tx_align;
115 	int16_t txq_idx;	/* ofld_txq = &sc->sge.ofld_txq[txq_idx] */
116 	int16_t rxq_idx;	/* ofld_rxq = &sc->sge.ofld_rxq[rxq_idx] */
117 	int16_t l2t_idx;
118 	uint16_t emss;
119 	uint16_t opt0_bufsize;
120 	u_int sndbuf;		/* controls TP tx pages */
121 };
122 
123 struct ofld_tx_sdesc {
124 	uint32_t plen;		/* payload length */
125 	uint8_t tx_credits;	/* firmware tx credits (unit is 16B) */
126 };
127 
128 struct ppod_region {
129 	u_int pr_start;
130 	u_int pr_len;
131 	u_int pr_page_shift[4];
132 	uint32_t pr_tag_mask;		/* hardware tagmask for this region. */
133 	uint32_t pr_invalid_bit;	/* OR with this to invalidate tag. */
134 	uint32_t pr_alias_mask;		/* AND with tag to get alias bits. */
135 	u_int pr_alias_shift;		/* shift this much for first alias bit. */
136 	vmem_t *pr_arena;
137 };
138 
139 struct ppod_reservation {
140 	struct ppod_region *prsv_pr;
141 	uint32_t prsv_tag;		/* Full tag: pgsz, alias, tag, color */
142 	u_int prsv_nppods;
143 };
144 
145 struct pageset {
146 	TAILQ_ENTRY(pageset) link;
147 	vm_page_t *pages;
148 	int npages;
149 	int flags;
150 	int offset;		/* offset in first page */
151 	int len;
152 	struct ppod_reservation prsv;
153 	struct vmspace *vm;
154 	vm_offset_t start;
155 	u_int vm_timestamp;
156 };
157 
158 TAILQ_HEAD(pagesetq, pageset);
159 
160 #define	PS_PPODS_WRITTEN	0x0001	/* Page pods written to the card. */
161 
162 struct ddp_rcv_buffer {
163 	TAILQ_ENTRY(ddp_rcv_buffer) link;
164 	void	*buf;
165 	struct ppod_reservation prsv;
166 	size_t	len;
167 	u_int	refs;
168 };
169 
170 struct ddp_buffer {
171 	union {
172 		/* DDP_AIO fields */
173 		struct {
174 			struct pageset *ps;
175 			struct kaiocb *job;
176 			int	cancel_pending;
177 		};
178 
179 		/* DDP_RCVBUF fields */
180 		struct {
181 			struct ddp_rcv_buffer *drb;
182 			uint32_t placed;
183 		};
184 	};
185 };
186 
187 /*
188  * (a) - DDP_AIO only
189  * (r) - DDP_RCVBUF only
190  */
191 struct ddp_pcb {
192 	struct mtx lock;
193 	u_int flags;
194 	int active_id;	/* the currently active DDP buffer */
195 	struct ddp_buffer db[2];
196 	union {
197 		TAILQ_HEAD(, pageset) cached_pagesets;	/* (a) */
198 		TAILQ_HEAD(, ddp_rcv_buffer) cached_buffers; /* (r) */
199 	};
200 	TAILQ_HEAD(, kaiocb) aiojobq;		/* (a) */
201 	u_int waiting_count;			/* (a) */
202 	u_int active_count;
203 	u_int cached_count;
204 	struct task requeue_task;
205 	struct kaiocb *queueing;		/* (a) */
206 	struct mtx cache_lock;			/* (r) */
207 };
208 
209 struct toepcb {
210 	struct tom_data *td;
211 	struct inpcb *inp;	/* backpointer to host stack's PCB */
212 	u_int flags;		/* miscellaneous flags */
213 	TAILQ_ENTRY(toepcb) link; /* toep_list */
214 	int refcount;
215 	struct vnet *vnet;
216 	struct vi_info *vi;	/* virtual interface */
217 	struct sge_ofld_txq *ofld_txq;
218 	struct sge_ofld_rxq *ofld_rxq;
219 	struct sge_wrq *ctrlq;
220 	struct l2t_entry *l2te;	/* L2 table entry used by this connection */
221 	struct clip_entry *ce;	/* CLIP table entry used by this tid */
222 	int tid;		/* Connection identifier */
223 
224 	/* tx credit handling */
225 	u_int tx_total;		/* total tx WR credits (in 16B units) */
226 	u_int tx_credits;	/* tx WR credits (in 16B units) available */
227 	u_int tx_nocompl;	/* tx WR credits since last compl request */
228 	u_int plen_nocompl;	/* payload since last compl request */
229 
230 	struct conn_params params;
231 
232 	void *ulpcb;
233 	void *ulpcb2;
234 	struct mbufq ulp_pduq;	/* PDUs waiting to be sent out. */
235 	struct mbufq ulp_pdu_reclaimq;
236 
237 	struct ddp_pcb ddp;
238 	struct tls_ofld_info tls;
239 
240 	TAILQ_HEAD(, kaiocb) aiotx_jobq;
241 	struct task aiotx_task;
242 	struct socket *aiotx_so;
243 
244 	/* Tx software descriptor */
245 	uint8_t txsd_total;
246 	uint8_t txsd_pidx;
247 	uint8_t txsd_cidx;
248 	uint8_t txsd_avail;
249 	struct ofld_tx_sdesc txsd[];
250 };
251 
252 static inline int
ulp_mode(struct toepcb * toep)253 ulp_mode(struct toepcb *toep)
254 {
255 
256 	return (toep->params.ulp_mode);
257 }
258 
259 #define	DDP_LOCK(toep)		mtx_lock(&(toep)->ddp.lock)
260 #define	DDP_UNLOCK(toep)	mtx_unlock(&(toep)->ddp.lock)
261 #define	DDP_ASSERT_LOCKED(toep)	mtx_assert(&(toep)->ddp.lock, MA_OWNED)
262 #define	DDP_CACHE_LOCK(toep)	mtx_lock(&(toep)->ddp.cache_lock)
263 #define	DDP_CACHE_UNLOCK(toep)	mtx_unlock(&(toep)->ddp.cache_lock)
264 
265 /*
266  * Compressed state for embryonic connections for a listener.
267  */
268 struct synq_entry {
269 	struct listen_ctx *lctx;	/* backpointer to listen ctx */
270 	struct mbuf *syn;
271 	int flags;			/* same as toepcb's tp_flags */
272 	volatile int ok_to_respond;
273 	volatile u_int refcnt;
274 	int tid;
275 	uint32_t iss;
276 	uint32_t irs;
277 	uint32_t ts;
278 	uint32_t rss_hash;
279 	__be16 tcp_opt; /* from cpl_pass_establish */
280 	struct toepcb *toep;
281 
282 	struct conn_params params;
283 };
284 
285 /* listen_ctx flags */
286 #define LCTX_RPL_PENDING 1	/* waiting for a CPL_PASS_OPEN_RPL */
287 
288 struct listen_ctx {
289 	LIST_ENTRY(listen_ctx) link;	/* listen hash linkage */
290 	volatile int refcount;
291 	int stid;
292 	struct stid_region stid_region;
293 	int flags;
294 	struct inpcb *inp;		/* listening socket's inp */
295 	struct vnet *vnet;
296 	struct sge_wrq *ctrlq;
297 	struct sge_ofld_rxq *ofld_rxq;
298 	struct clip_entry *ce;
299 };
300 
301 /* tcb_histent flags */
302 #define TE_RPL_PENDING	1
303 #define TE_ACTIVE	2
304 
305 /* bits in one 8b tcb_histent sample. */
306 #define TS_RTO			(1 << 0)
307 #define TS_DUPACKS		(1 << 1)
308 #define TS_FASTREXMT		(1 << 2)
309 #define TS_SND_BACKLOGGED	(1 << 3)
310 #define TS_CWND_LIMITED		(1 << 4)
311 #define TS_ECN_ECE		(1 << 5)
312 #define TS_ECN_CWR		(1 << 6)
313 #define TS_RESERVED		(1 << 7)	/* Unused. */
314 
315 struct tcb_histent {
316 	struct mtx te_lock;
317 	struct callout te_callout;
318 	uint64_t te_tcb[TCB_SIZE / sizeof(uint64_t)];
319 	struct adapter *te_adapter;
320 	u_int te_flags;
321 	u_int te_tid;
322 	uint8_t te_pidx;
323 	uint8_t te_sample[100];
324 };
325 
326 struct tom_data {
327 	struct toedev tod;
328 
329 	/* toepcb's associated with this TOE device */
330 	struct mtx toep_list_lock;
331 	TAILQ_HEAD(, toepcb) toep_list;
332 
333 	struct mtx lctx_hash_lock;
334 	LIST_HEAD(, listen_ctx) *listen_hash;
335 	u_long listen_mask;
336 	int lctx_count;		/* # of lctx in the hash table */
337 
338 	struct ppod_region pr;
339 
340 	struct rwlock tcb_history_lock __aligned(CACHE_LINE_SIZE);
341 	struct tcb_histent **tcb_history;
342 	int dupack_threshold;
343 
344 	/* WRs that will not be sent to the chip because L2 resolution failed */
345 	struct mtx unsent_wr_lock;
346 	STAILQ_HEAD(, wrqe) unsent_wr_list;
347 	struct task reclaim_wr_resources;
348 };
349 
350 static inline struct tom_data *
tod_td(struct toedev * tod)351 tod_td(struct toedev *tod)
352 {
353 
354 	return (__containerof(tod, struct tom_data, tod));
355 }
356 
357 static inline struct adapter *
td_adapter(struct tom_data * td)358 td_adapter(struct tom_data *td)
359 {
360 
361 	return (td->tod.tod_softc);
362 }
363 
364 static inline void
set_mbuf_raw_wr(struct mbuf * m,bool raw)365 set_mbuf_raw_wr(struct mbuf *m, bool raw)
366 {
367 
368 	M_ASSERTPKTHDR(m);
369 	m->m_pkthdr.PH_per.eight[6] = raw;
370 }
371 
372 static inline bool
mbuf_raw_wr(struct mbuf * m)373 mbuf_raw_wr(struct mbuf *m)
374 {
375 
376 	M_ASSERTPKTHDR(m);
377 	return (m->m_pkthdr.PH_per.eight[6]);
378 }
379 
380 static inline void
set_mbuf_ulp_submode(struct mbuf * m,uint8_t ulp_submode)381 set_mbuf_ulp_submode(struct mbuf *m, uint8_t ulp_submode)
382 {
383 
384 	M_ASSERTPKTHDR(m);
385 	m->m_pkthdr.PH_per.eight[0] = ulp_submode;
386 }
387 
388 static inline uint8_t
mbuf_ulp_submode(struct mbuf * m)389 mbuf_ulp_submode(struct mbuf *m)
390 {
391 
392 	M_ASSERTPKTHDR(m);
393 	return (m->m_pkthdr.PH_per.eight[0]);
394 }
395 
396 static inline void
set_mbuf_iscsi_iso(struct mbuf * m,bool iso)397 set_mbuf_iscsi_iso(struct mbuf *m, bool iso)
398 {
399 
400 	M_ASSERTPKTHDR(m);
401 	m->m_pkthdr.PH_per.eight[1] = iso;
402 }
403 
404 static inline bool
mbuf_iscsi_iso(struct mbuf * m)405 mbuf_iscsi_iso(struct mbuf *m)
406 {
407 
408 	M_ASSERTPKTHDR(m);
409 	return (m->m_pkthdr.PH_per.eight[1]);
410 }
411 
412 /* Flags for iSCSI segmentation offload. */
413 #define	CXGBE_ISO_TYPE(flags)	((flags) & 0x3)
414 #define	CXGBE_ISO_F		0x4
415 
416 static inline void
set_mbuf_iscsi_iso_flags(struct mbuf * m,uint8_t flags)417 set_mbuf_iscsi_iso_flags(struct mbuf *m, uint8_t flags)
418 {
419 
420 	M_ASSERTPKTHDR(m);
421 	m->m_pkthdr.PH_per.eight[2] = flags;
422 }
423 
424 static inline uint8_t
mbuf_iscsi_iso_flags(struct mbuf * m)425 mbuf_iscsi_iso_flags(struct mbuf *m)
426 {
427 
428 	M_ASSERTPKTHDR(m);
429 	return (m->m_pkthdr.PH_per.eight[2]);
430 }
431 
432 static inline void
set_mbuf_iscsi_iso_mss(struct mbuf * m,uint16_t mss)433 set_mbuf_iscsi_iso_mss(struct mbuf *m, uint16_t mss)
434 {
435 
436 	M_ASSERTPKTHDR(m);
437 	m->m_pkthdr.PH_per.sixteen[2] = mss;
438 }
439 
440 static inline uint16_t
mbuf_iscsi_iso_mss(struct mbuf * m)441 mbuf_iscsi_iso_mss(struct mbuf *m)
442 {
443 
444 	M_ASSERTPKTHDR(m);
445 	return (m->m_pkthdr.PH_per.sixteen[2]);
446 }
447 
448 /* t4_tom.c */
449 struct toepcb *alloc_toepcb(struct vi_info *, int);
450 int init_toepcb(struct vi_info *, struct toepcb *);
451 struct toepcb *hold_toepcb(struct toepcb *);
452 void free_toepcb(struct toepcb *);
453 void offload_socket(struct socket *, struct toepcb *);
454 void restore_so_proto(struct socket *, bool);
455 void undo_offload_socket(struct socket *);
456 void final_cpl_received(struct toepcb *);
457 void insert_tid(struct adapter *, int, void *, int);
458 void *lookup_tid(struct adapter *, int);
459 void update_tid(struct adapter *, int, void *);
460 void remove_tid(struct adapter *, int, int);
461 u_long select_rcv_wnd(struct socket *);
462 int select_rcv_wscale(void);
463 void init_conn_params(struct vi_info *, struct offload_settings *,
464     struct in_conninfo *, struct socket *, const struct tcp_options *, int16_t,
465     struct conn_params *cp);
466 __be64 calc_options0(struct vi_info *, struct conn_params *);
467 __be32 calc_options2(struct vi_info *, struct conn_params *);
468 uint64_t select_ntuple(struct vi_info *, struct l2t_entry *);
469 int negative_advice(int);
470 int add_tid_to_history(struct adapter *, u_int);
471 
472 /* t4_connect.c */
473 void t4_init_connect_cpl_handlers(void);
474 void t4_uninit_connect_cpl_handlers(void);
475 int t4_connect(struct toedev *, struct socket *, struct nhop_object *,
476     struct sockaddr *);
477 void act_open_failure_cleanup(struct adapter *, u_int, u_int);
478 
479 /* t4_listen.c */
480 void t4_init_listen_cpl_handlers(void);
481 void t4_uninit_listen_cpl_handlers(void);
482 int t4_listen_start(struct toedev *, struct tcpcb *);
483 int t4_listen_stop(struct toedev *, struct tcpcb *);
484 void t4_syncache_added(struct toedev *, void *);
485 void t4_syncache_removed(struct toedev *, void *);
486 int t4_syncache_respond(struct toedev *, void *, struct mbuf *);
487 int do_abort_req_synqe(struct sge_iq *, const struct rss_header *,
488     struct mbuf *);
489 int do_abort_rpl_synqe(struct sge_iq *, const struct rss_header *,
490     struct mbuf *);
491 void t4_offload_socket(struct toedev *, void *, struct socket *);
492 void synack_failure_cleanup(struct adapter *, int);
493 
494 /* t4_cpl_io.c */
495 void aiotx_init_toep(struct toepcb *);
496 int t4_aio_queue_aiotx(struct socket *, struct kaiocb *);
497 void t4_init_cpl_io_handlers(void);
498 void t4_uninit_cpl_io_handlers(void);
499 void send_abort_rpl(struct adapter *, struct sge_ofld_txq *, int , int);
500 void send_flowc_wr(struct toepcb *, struct tcpcb *);
501 void send_reset(struct adapter *, struct toepcb *, uint32_t);
502 int send_rx_credits(struct adapter *, struct toepcb *, int);
503 void make_established(struct toepcb *, uint32_t, uint32_t, uint16_t);
504 int t4_close_conn(struct adapter *, struct toepcb *);
505 void t4_rcvd(struct toedev *, struct tcpcb *);
506 void t4_rcvd_locked(struct toedev *, struct tcpcb *);
507 int t4_tod_output(struct toedev *, struct tcpcb *);
508 int t4_send_fin(struct toedev *, struct tcpcb *);
509 int t4_send_rst(struct toedev *, struct tcpcb *);
510 void t4_set_tcb_field(struct adapter *, struct sge_wrq *, struct toepcb *,
511     uint16_t, uint64_t, uint64_t, int, int);
512 void t4_push_frames(struct adapter *, struct toepcb *, int);
513 void t4_push_pdus(struct adapter *, struct toepcb *, int);
514 
515 /* t4_ddp.c */
516 int t4_init_ppod_region(struct ppod_region *, struct t4_range *, u_int,
517     const char *);
518 void t4_free_ppod_region(struct ppod_region *);
519 int t4_alloc_page_pods_for_ps(struct ppod_region *, struct pageset *);
520 int t4_alloc_page_pods_for_bio(struct ppod_region *, struct bio *,
521     struct ppod_reservation *);
522 int t4_alloc_page_pods_for_buf(struct ppod_region *, vm_offset_t, int,
523     struct ppod_reservation *);
524 int t4_alloc_page_pods_for_sgl(struct ppod_region *, struct ctl_sg_entry *, int,
525     struct ppod_reservation *);
526 int t4_write_page_pods_for_ps(struct adapter *, struct sge_wrq *, int,
527     struct pageset *);
528 int t4_write_page_pods_for_bio(struct adapter *, struct toepcb *,
529     struct ppod_reservation *, struct bio *, struct mbufq *);
530 int t4_write_page_pods_for_buf(struct adapter *, struct toepcb *,
531     struct ppod_reservation *, vm_offset_t, int, struct mbufq *);
532 int t4_write_page_pods_for_sgl(struct adapter *, struct toepcb *,
533     struct ppod_reservation *, struct ctl_sg_entry *, int, int, struct mbufq *);
534 void t4_free_page_pods(struct ppod_reservation *);
535 int t4_aio_queue_ddp(struct socket *, struct kaiocb *);
536 int t4_enable_ddp_rcv(struct socket *, struct toepcb *);
537 void t4_ddp_mod_load(void);
538 void t4_ddp_mod_unload(void);
539 void ddp_assert_empty(struct toepcb *);
540 void ddp_uninit_toep(struct toepcb *);
541 void ddp_queue_toep(struct toepcb *);
542 void release_ddp_resources(struct toepcb *toep);
543 void handle_ddp_close(struct toepcb *, struct tcpcb *, uint32_t);
544 void handle_ddp_indicate(struct toepcb *);
545 void insert_ddp_data(struct toepcb *, uint32_t);
546 const struct offload_settings *lookup_offload_policy(struct adapter *, int,
547     struct mbuf *, uint16_t, struct inpcb *);
548 
549 /* t4_tls.c */
550 bool can_tls_offload(struct adapter *);
551 void do_rx_data_tls(const struct cpl_rx_data *, struct toepcb *, struct mbuf *);
552 void t4_push_ktls(struct adapter *, struct toepcb *, int);
553 void tls_received_starting_data(struct adapter *, struct toepcb *,
554     struct sockbuf *, int);
555 void t4_tls_mod_load(void);
556 void t4_tls_mod_unload(void);
557 void tls_init_toep(struct toepcb *);
558 int tls_tx_key(struct toepcb *);
559 void tls_uninit_toep(struct toepcb *);
560 int tls_alloc_ktls(struct toepcb *, struct ktls_session *, int);
561 
562 #endif
563