xref: /freebsd/sys/dev/cxgbe/t4_filter.c (revision b00ab754)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/bus.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/socket.h>
44 #include <sys/sbuf.h>
45 #include <netinet/in.h>
46 
47 #include "common/common.h"
48 #include "common/t4_msg.h"
49 #include "common/t4_regs.h"
50 #include "common/t4_regs_values.h"
51 #include "common/t4_tcb.h"
52 #include "t4_l2t.h"
53 
54 struct filter_entry {
55 	uint32_t valid:1;	/* filter allocated and valid */
56 	uint32_t locked:1;	/* filter is administratively locked or busy */
57 	uint32_t pending:1;	/* filter action is pending firmware reply */
58 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
59 	int tid;		/* tid of the filter TCB */
60 	struct l2t_entry *l2te;	/* L2 table entry for DMAC rewrite */
61 
62 	struct t4_filter_specification fs;
63 };
64 
65 static void free_filter_resources(struct filter_entry *);
66 static int get_hashfilter(struct adapter *, struct t4_filter *);
67 static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t,
68     struct l2t_entry *);
69 static int del_hashfilter(struct adapter *, struct t4_filter *);
70 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
71 
72 static void
73 insert_hftid(struct adapter *sc, int tid, void *ctx, int ntids)
74 {
75 	struct tid_info *t = &sc->tids;
76 
77 	t->hftid_tab[tid] = ctx;
78 	atomic_add_int(&t->tids_in_use, ntids);
79 }
80 
81 static void *
82 lookup_hftid(struct adapter *sc, int tid)
83 {
84 	struct tid_info *t = &sc->tids;
85 
86 	return (t->hftid_tab[tid]);
87 }
88 
89 static void
90 remove_hftid(struct adapter *sc, int tid, int ntids)
91 {
92 	struct tid_info *t = &sc->tids;
93 
94 	t->hftid_tab[tid] = NULL;
95 	atomic_subtract_int(&t->tids_in_use, ntids);
96 }
97 
98 static uint32_t
99 mode_to_fconf(uint32_t mode)
100 {
101 	uint32_t fconf = 0;
102 
103 	if (mode & T4_FILTER_IP_FRAGMENT)
104 		fconf |= F_FRAGMENTATION;
105 
106 	if (mode & T4_FILTER_MPS_HIT_TYPE)
107 		fconf |= F_MPSHITTYPE;
108 
109 	if (mode & T4_FILTER_MAC_IDX)
110 		fconf |= F_MACMATCH;
111 
112 	if (mode & T4_FILTER_ETH_TYPE)
113 		fconf |= F_ETHERTYPE;
114 
115 	if (mode & T4_FILTER_IP_PROTO)
116 		fconf |= F_PROTOCOL;
117 
118 	if (mode & T4_FILTER_IP_TOS)
119 		fconf |= F_TOS;
120 
121 	if (mode & T4_FILTER_VLAN)
122 		fconf |= F_VLAN;
123 
124 	if (mode & T4_FILTER_VNIC)
125 		fconf |= F_VNIC_ID;
126 
127 	if (mode & T4_FILTER_PORT)
128 		fconf |= F_PORT;
129 
130 	if (mode & T4_FILTER_FCoE)
131 		fconf |= F_FCOE;
132 
133 	return (fconf);
134 }
135 
136 static uint32_t
137 mode_to_iconf(uint32_t mode)
138 {
139 
140 	if (mode & T4_FILTER_IC_VNIC)
141 		return (F_VNIC);
142 	return (0);
143 }
144 
145 static int
146 check_fspec_against_fconf_iconf(struct adapter *sc,
147     struct t4_filter_specification *fs)
148 {
149 	struct tp_params *tpp = &sc->params.tp;
150 	uint32_t fconf = 0;
151 
152 	if (fs->val.frag || fs->mask.frag)
153 		fconf |= F_FRAGMENTATION;
154 
155 	if (fs->val.matchtype || fs->mask.matchtype)
156 		fconf |= F_MPSHITTYPE;
157 
158 	if (fs->val.macidx || fs->mask.macidx)
159 		fconf |= F_MACMATCH;
160 
161 	if (fs->val.ethtype || fs->mask.ethtype)
162 		fconf |= F_ETHERTYPE;
163 
164 	if (fs->val.proto || fs->mask.proto)
165 		fconf |= F_PROTOCOL;
166 
167 	if (fs->val.tos || fs->mask.tos)
168 		fconf |= F_TOS;
169 
170 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
171 		fconf |= F_VLAN;
172 
173 	if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
174 		fconf |= F_VNIC_ID;
175 		if (tpp->ingress_config & F_VNIC)
176 			return (EINVAL);
177 	}
178 
179 	if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
180 		fconf |= F_VNIC_ID;
181 		if ((tpp->ingress_config & F_VNIC) == 0)
182 			return (EINVAL);
183 	}
184 
185 	if (fs->val.iport || fs->mask.iport)
186 		fconf |= F_PORT;
187 
188 	if (fs->val.fcoe || fs->mask.fcoe)
189 		fconf |= F_FCOE;
190 
191 	if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
192 		return (E2BIG);
193 
194 	return (0);
195 }
196 
197 int
198 get_filter_mode(struct adapter *sc, uint32_t *mode)
199 {
200 	struct tp_params *tp = &sc->params.tp;
201 	uint64_t mask;
202 
203 	/* Non-zero incoming value in mode means "hashfilter mode". */
204 	mask = *mode ? tp->hash_filter_mask : UINT64_MAX;
205 
206 	/* Always */
207 	*mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
208 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
209 
210 #define CHECK_FIELD(fconf_bit, field_shift, field_mask, mode_bit)  do { \
211 	if (tp->vlan_pri_map & (fconf_bit)) { \
212 		MPASS(tp->field_shift >= 0); \
213 		if ((mask >> tp->field_shift & field_mask) == field_mask) \
214 		*mode |= (mode_bit); \
215 	} \
216 } while (0)
217 
218 	CHECK_FIELD(F_FRAGMENTATION, frag_shift, M_FT_FRAGMENTATION, T4_FILTER_IP_FRAGMENT);
219 	CHECK_FIELD(F_MPSHITTYPE, matchtype_shift, M_FT_MPSHITTYPE, T4_FILTER_MPS_HIT_TYPE);
220 	CHECK_FIELD(F_MACMATCH, macmatch_shift, M_FT_MACMATCH, T4_FILTER_MAC_IDX);
221 	CHECK_FIELD(F_ETHERTYPE, ethertype_shift, M_FT_ETHERTYPE, T4_FILTER_ETH_TYPE);
222 	CHECK_FIELD(F_PROTOCOL, protocol_shift, M_FT_PROTOCOL, T4_FILTER_IP_PROTO);
223 	CHECK_FIELD(F_TOS, tos_shift, M_FT_TOS, T4_FILTER_IP_TOS);
224 	CHECK_FIELD(F_VLAN, vlan_shift, M_FT_VLAN, T4_FILTER_VLAN);
225 	CHECK_FIELD(F_VNIC_ID, vnic_shift, M_FT_VNIC_ID , T4_FILTER_VNIC);
226 	if (tp->ingress_config & F_VNIC)
227 		*mode |= T4_FILTER_IC_VNIC;
228 	CHECK_FIELD(F_PORT, port_shift, M_FT_PORT , T4_FILTER_PORT);
229 	CHECK_FIELD(F_FCOE, fcoe_shift, M_FT_FCOE , T4_FILTER_FCoE);
230 #undef CHECK_FIELD
231 
232 	return (0);
233 }
234 
235 int
236 set_filter_mode(struct adapter *sc, uint32_t mode)
237 {
238 	struct tp_params *tpp = &sc->params.tp;
239 	uint32_t fconf, iconf;
240 	int rc;
241 
242 	iconf = mode_to_iconf(mode);
243 	if ((iconf ^ tpp->ingress_config) & F_VNIC) {
244 		/*
245 		 * For now we just complain if A_TP_INGRESS_CONFIG is not
246 		 * already set to the correct value for the requested filter
247 		 * mode.  It's not clear if it's safe to write to this register
248 		 * on the fly.  (And we trust the cached value of the register).
249 		 *
250 		 * check_fspec_against_fconf_iconf and other code that looks at
251 		 * tp->vlan_pri_map and tp->ingress_config needs to be reviewed
252 		 * thorougly before allowing dynamic filter mode changes.
253 		 */
254 		return (EBUSY);
255 	}
256 
257 	fconf = mode_to_fconf(mode);
258 
259 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
260 	    "t4setfm");
261 	if (rc)
262 		return (rc);
263 
264 	if (sc->tids.ftids_in_use > 0) {
265 		rc = EBUSY;
266 		goto done;
267 	}
268 
269 #ifdef TCP_OFFLOAD
270 	if (uld_active(sc, ULD_TOM)) {
271 		rc = EBUSY;
272 		goto done;
273 	}
274 #endif
275 
276 	rc = -t4_set_filter_mode(sc, fconf, true);
277 done:
278 	end_synchronized_op(sc, LOCK_HELD);
279 	return (rc);
280 }
281 
282 static inline uint64_t
283 get_filter_hits(struct adapter *sc, uint32_t tid)
284 {
285 	uint32_t tcb_addr;
286 
287 	tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
288 
289 	if (is_t4(sc)) {
290 		uint64_t hits;
291 
292 		read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
293 		return (be64toh(hits));
294 	} else {
295 		uint32_t hits;
296 
297 		read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
298 		return (be32toh(hits));
299 	}
300 }
301 
302 int
303 get_filter(struct adapter *sc, struct t4_filter *t)
304 {
305 	int i, nfilters = sc->tids.nftids;
306 	struct filter_entry *f;
307 
308 	if (t->fs.hash)
309 		return (get_hashfilter(sc, t));
310 
311 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
312 	    t->idx >= nfilters) {
313 		t->idx = 0xffffffff;
314 		return (0);
315 	}
316 
317 	mtx_lock(&sc->tids.ftid_lock);
318 	f = &sc->tids.ftid_tab[t->idx];
319 	for (i = t->idx; i < nfilters; i++, f++) {
320 		if (f->valid) {
321 			MPASS(f->tid == sc->tids.ftid_base + i);
322 			t->idx = i;
323 			t->l2tidx = f->l2te ? f->l2te->idx : 0;
324 			t->smtidx = f->smtidx;
325 			if (f->fs.hitcnts)
326 				t->hits = get_filter_hits(sc, f->tid);
327 			else
328 				t->hits = UINT64_MAX;
329 			t->fs = f->fs;
330 
331 			goto done;
332 		}
333 	}
334 	t->idx = 0xffffffff;
335 done:
336 	mtx_unlock(&sc->tids.ftid_lock);
337 	return (0);
338 }
339 
340 static int
341 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te)
342 {
343 	struct filter_entry *f;
344 	struct fw_filter2_wr *fwr;
345 	u_int vnic_vld, vnic_vld_mask;
346 	struct wrq_cookie cookie;
347 	int i, rc, busy, locked;
348 	const int ntids = t->fs.type ? 4 : 1;
349 
350 	MPASS(!t->fs.hash);
351 	MPASS(t->idx < sc->tids.nftids);
352 	/* Already validated against fconf, iconf */
353 	MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
354 	MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
355 
356 	f = &sc->tids.ftid_tab[t->idx];
357 	rc = busy = locked = 0;
358 	mtx_lock(&sc->tids.ftid_lock);
359 	for (i = 0; i < ntids; i++) {
360 		busy += f[i].pending + f[i].valid;
361 		locked += f[i].locked;
362 	}
363 	if (locked > 0)
364 		rc = EPERM;
365 	else if (busy > 0)
366 		rc = EBUSY;
367 	else {
368 		int len16;
369 
370 		if (sc->params.filter2_wr_support)
371 			len16 = howmany(sizeof(struct fw_filter2_wr), 16);
372 		else
373 			len16 = howmany(sizeof(struct fw_filter_wr), 16);
374 		fwr = start_wrq_wr(&sc->sge.mgmtq, len16, &cookie);
375 		if (__predict_false(fwr == NULL))
376 			rc = ENOMEM;
377 		else {
378 			f->pending = 1;
379 			sc->tids.ftids_in_use++;
380 		}
381 	}
382 	mtx_unlock(&sc->tids.ftid_lock);
383 	if (rc != 0) {
384 		if (l2te)
385 			t4_l2t_release(l2te);
386 		return (rc);
387 	}
388 
389 	/*
390 	 * Can't fail now.  A set-filter WR will definitely be sent.
391 	 */
392 
393 	f->tid = sc->tids.ftid_base + t->idx;
394 	f->fs = t->fs;
395 	f->l2te = l2te;
396 
397 	if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
398 		vnic_vld = 1;
399 	else
400 		vnic_vld = 0;
401 	if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
402 		vnic_vld_mask = 1;
403 	else
404 		vnic_vld_mask = 0;
405 
406 	bzero(fwr, sizeof(*fwr));
407 	if (sc->params.filter2_wr_support)
408 		fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR));
409 	else
410 		fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
411 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
412 	fwr->tid_to_iq =
413 	    htobe32(V_FW_FILTER_WR_TID(f->tid) |
414 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
415 		V_FW_FILTER_WR_NOREPLY(0) |
416 		V_FW_FILTER_WR_IQ(f->fs.iq));
417 	fwr->del_filter_to_l2tix =
418 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
419 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
420 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
421 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
422 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
423 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
424 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
425 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
426 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
427 		    f->fs.newvlan == VLAN_REWRITE) |
428 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
429 		    f->fs.newvlan == VLAN_REWRITE) |
430 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
431 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
432 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
433 		V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
434 	fwr->ethtype = htobe16(f->fs.val.ethtype);
435 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
436 	fwr->frag_to_ovlan_vldm =
437 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
438 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
439 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
440 		V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
441 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
442 		V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
443 	fwr->smac_sel = 0;
444 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
445 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
446 	fwr->maci_to_matchtypem =
447 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
448 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
449 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
450 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
451 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
452 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
453 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
454 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
455 	fwr->ptcl = f->fs.val.proto;
456 	fwr->ptclm = f->fs.mask.proto;
457 	fwr->ttyp = f->fs.val.tos;
458 	fwr->ttypm = f->fs.mask.tos;
459 	fwr->ivlan = htobe16(f->fs.val.vlan);
460 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
461 	fwr->ovlan = htobe16(f->fs.val.vnic);
462 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
463 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
464 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
465 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
466 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
467 	fwr->lp = htobe16(f->fs.val.dport);
468 	fwr->lpm = htobe16(f->fs.mask.dport);
469 	fwr->fp = htobe16(f->fs.val.sport);
470 	fwr->fpm = htobe16(f->fs.mask.sport);
471 	if (f->fs.newsmac) {
472 		/* XXX: need to use SMT idx instead */
473 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
474 	}
475 	if (sc->params.filter2_wr_support) {
476 		fwr->filter_type_swapmac =
477 		    V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
478 		fwr->natmode_to_ulp_type =
479 		    V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
480 			ULP_MODE_TCPDDP : ULP_MODE_NONE) |
481 		    V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) |
482 		    V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
483 		memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip));
484 		memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip));
485 		fwr->newlport = htobe16(f->fs.nat_dport);
486 		fwr->newfport = htobe16(f->fs.nat_sport);
487 		fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
488 	}
489 	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
490 
491 	/* Wait for response. */
492 	mtx_lock(&sc->tids.ftid_lock);
493 	for (;;) {
494 		if (f->pending == 0) {
495 			rc = f->valid ? 0 : EIO;
496 			break;
497 		}
498 		if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
499 			rc = EINPROGRESS;
500 			break;
501 		}
502 	}
503 	mtx_unlock(&sc->tids.ftid_lock);
504 	return (rc);
505 }
506 
507 static int
508 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
509     uint64_t *ftuple)
510 {
511 	struct tp_params *tp = &sc->params.tp;
512 	uint64_t fmask;
513 
514 	*ftuple = fmask = 0;
515 
516 	/*
517 	 * Initialize each of the fields which we care about which are present
518 	 * in the Compressed Filter Tuple.
519 	 */
520 	if (tp->vlan_shift >= 0 && fs->mask.vlan) {
521 		*ftuple |= (F_FT_VLAN_VLD | fs->val.vlan) << tp->vlan_shift;
522 		fmask |= M_FT_VLAN << tp->vlan_shift;
523 	}
524 
525 	if (tp->port_shift >= 0 && fs->mask.iport) {
526 		*ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
527 		fmask |= M_FT_PORT << tp->port_shift;
528 	}
529 
530 	if (tp->protocol_shift >= 0 && fs->mask.proto) {
531 		*ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
532 		fmask |= M_FT_PROTOCOL << tp->protocol_shift;
533 	}
534 
535 	if (tp->tos_shift >= 0 && fs->mask.tos) {
536 		*ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
537 		fmask |= M_FT_TOS << tp->tos_shift;
538 	}
539 
540 	if (tp->vnic_shift >= 0 && fs->mask.vnic) {
541 		/* F_VNIC in ingress config was already validated. */
542 		if (tp->ingress_config & F_VNIC)
543 			MPASS(fs->mask.pfvf_vld);
544 		else
545 			MPASS(fs->mask.ovlan_vld);
546 
547 		*ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
548 		fmask |= M_FT_VNIC_ID << tp->vnic_shift;
549 	}
550 
551 	if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
552 		*ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
553 		fmask |= M_FT_MACMATCH << tp->macmatch_shift;
554 	}
555 
556 	if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
557 		*ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
558 		fmask |= M_FT_ETHERTYPE << tp->ethertype_shift;
559 	}
560 
561 	if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
562 		*ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
563 		fmask |= M_FT_MPSHITTYPE << tp->matchtype_shift;
564 	}
565 
566 	if (tp->frag_shift >= 0 && fs->mask.frag) {
567 		*ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
568 		fmask |= M_FT_FRAGMENTATION << tp->frag_shift;
569 	}
570 
571 	if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
572 		*ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
573 		fmask |= M_FT_FCOE << tp->fcoe_shift;
574 	}
575 
576 	/* A hashfilter must conform to the filterMask. */
577 	if (fmask != tp->hash_filter_mask)
578 		return (EINVAL);
579 
580 	return (0);
581 }
582 
583 int
584 set_filter(struct adapter *sc, struct t4_filter *t)
585 {
586 	struct tid_info *ti = &sc->tids;
587 	struct l2t_entry *l2te;
588 	uint64_t ftuple;
589 	int rc;
590 
591 	/*
592 	 * Basic filter checks first.
593 	 */
594 
595 	if (t->fs.hash) {
596 		if (!is_hashfilter(sc) || ti->ntids == 0)
597 			return (ENOTSUP);
598 		/* Hardware, not user, selects a tid for hashfilters. */
599 		if (t->idx != (uint32_t)-1)
600 			return (EINVAL);
601 		/* T5 can't count hashfilter hits. */
602 		if (is_t5(sc) && t->fs.hitcnts)
603 			return (EINVAL);
604 		rc = hashfilter_ntuple(sc, &t->fs, &ftuple);
605 		if (rc != 0)
606 			return (rc);
607 	} else {
608 		if (ti->nftids == 0)
609 			return (ENOTSUP);
610 		if (t->idx >= ti->nftids)
611 			return (EINVAL);
612 		/* IPv6 filter idx must be 4 aligned */
613 		if (t->fs.type == 1 &&
614 		    ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
615 			return (EINVAL);
616 	}
617 
618 	/* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */
619 	if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
620 	    (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE ||
621 	    t->fs.swapmac || t->fs.nat_mode))
622 		return (ENOTSUP);
623 
624 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
625 		return (EINVAL);
626 	if (t->fs.val.iport >= sc->params.nports)
627 		return (EINVAL);
628 
629 	/* Can't specify an iq if not steering to it */
630 	if (!t->fs.dirsteer && t->fs.iq)
631 		return (EINVAL);
632 
633 	/* Validate against the global filter mode and ingress config */
634 	rc = check_fspec_against_fconf_iconf(sc, &t->fs);
635 	if (rc != 0)
636 		return (rc);
637 
638 	/*
639 	 * Basic checks passed.  Make sure the queues and tid tables are setup.
640 	 */
641 
642 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
643 	if (rc)
644 		return (rc);
645 	if (!(sc->flags & FULL_INIT_DONE) &&
646 	    ((rc = adapter_full_init(sc)) != 0)) {
647 		end_synchronized_op(sc, 0);
648 		return (rc);
649 	}
650 	if (t->fs.hash) {
651 		if (__predict_false(ti->hftid_tab == NULL)) {
652 			ti->hftid_tab = malloc(sizeof(*ti->hftid_tab) * ti->ntids,
653 			    M_CXGBE, M_NOWAIT | M_ZERO);
654 			if (ti->hftid_tab == NULL) {
655 				rc = ENOMEM;
656 				goto done;
657 			}
658 			mtx_init(&ti->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
659 			cv_init(&ti->hftid_cv, "t4hfcv");
660 		}
661 		if (__predict_false(sc->tids.atid_tab == NULL)) {
662 			rc = alloc_atid_tab(&sc->tids, M_NOWAIT);
663 			if (rc != 0)
664 				goto done;
665 		}
666 	} else if (__predict_false(ti->ftid_tab == NULL)) {
667 		KASSERT(ti->ftids_in_use == 0,
668 		    ("%s: no memory allocated but ftids_in_use > 0", __func__));
669 		ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
670 		    M_CXGBE, M_NOWAIT | M_ZERO);
671 		if (ti->ftid_tab == NULL) {
672 			rc = ENOMEM;
673 			goto done;
674 		}
675 		mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
676 		cv_init(&ti->ftid_cv, "t4fcv");
677 	}
678 done:
679 	end_synchronized_op(sc, 0);
680 	if (rc != 0)
681 		return (rc);
682 
683 	/*
684 	 * Allocate L2T entry, SMT entry, etc.
685 	 */
686 
687 	l2te = NULL;
688 	if (t->fs.newdmac || t->fs.newvlan) {
689 		/* This filter needs an L2T entry; allocate one. */
690 		l2te = t4_l2t_alloc_switching(sc->l2t);
691 		if (__predict_false(l2te == NULL))
692 			return (EAGAIN);
693 		if (t4_l2t_set_switching(sc, l2te, t->fs.vlan, t->fs.eport,
694 		    t->fs.dmac)) {
695 			t4_l2t_release(l2te);
696 			return (ENOMEM);
697 		}
698 	}
699 
700 	if (t->fs.newsmac) {
701 		/* XXX: alloc SMT */
702 		return (ENOTSUP);
703 	}
704 
705 	if (t->fs.hash)
706 		return (set_hashfilter(sc, t, ftuple, l2te));
707 	else
708 		return (set_tcamfilter(sc, t, l2te));
709 
710 }
711 
712 static int
713 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
714 {
715 	struct filter_entry *f;
716 	struct fw_filter_wr *fwr;
717 	struct wrq_cookie cookie;
718 	int rc;
719 
720 	MPASS(sc->tids.ftid_tab != NULL);
721 	MPASS(sc->tids.nftids > 0);
722 
723 	if (t->idx >= sc->tids.nftids)
724 		return (EINVAL);
725 
726 	mtx_lock(&sc->tids.ftid_lock);
727 	f = &sc->tids.ftid_tab[t->idx];
728 	if (f->locked) {
729 		rc = EPERM;
730 		goto done;
731 	}
732 	if (f->pending) {
733 		rc = EBUSY;
734 		goto done;
735 	}
736 	if (f->valid == 0) {
737 		rc = EINVAL;
738 		goto done;
739 	}
740 	MPASS(f->tid == sc->tids.ftid_base + t->idx);
741 	fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
742 	if (fwr == NULL) {
743 		rc = ENOMEM;
744 		goto done;
745 	}
746 
747 	bzero(fwr, sizeof (*fwr));
748 	t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
749 	f->pending = 1;
750 	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
751 	t->fs = f->fs;	/* extra info for the caller */
752 
753 	for (;;) {
754 		if (f->pending == 0) {
755 			rc = f->valid ? EIO : 0;
756 			break;
757 		}
758 		if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
759 			rc = EINPROGRESS;
760 			break;
761 		}
762 	}
763 done:
764 	mtx_unlock(&sc->tids.ftid_lock);
765 	return (rc);
766 }
767 
768 int
769 del_filter(struct adapter *sc, struct t4_filter *t)
770 {
771 
772 	/* No filters possible if not initialized yet. */
773 	if (!(sc->flags & FULL_INIT_DONE))
774 		return (EINVAL);
775 
776 	/*
777 	 * The checks for tid tables ensure that the locks that del_* will reach
778 	 * for are initialized.
779 	 */
780 	if (t->fs.hash) {
781 		if (sc->tids.hftid_tab != NULL)
782 			return (del_hashfilter(sc, t));
783 	} else {
784 		if (sc->tids.ftid_tab != NULL)
785 			return (del_tcamfilter(sc, t));
786 	}
787 
788 	return (EINVAL);
789 }
790 
791 /*
792  * Release secondary resources associated with the filter.
793  */
794 static void
795 free_filter_resources(struct filter_entry *f)
796 {
797 
798 	if (f->l2te) {
799 		t4_l2t_release(f->l2te);
800 		f->l2te = NULL;
801 	}
802 }
803 
804 int
805 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
806 {
807 	struct adapter *sc = iq->adapter;
808 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
809 	u_int tid = GET_TID(rpl);
810 	u_int rc, cleanup, idx;
811 	struct filter_entry *f;
812 
813 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
814 	    rss->opcode));
815 	MPASS(is_ftid(sc, tid));
816 
817 	cleanup = 0;
818 	idx = tid - sc->tids.ftid_base;
819 	f = &sc->tids.ftid_tab[idx];
820 	rc = G_COOKIE(rpl->cookie);
821 
822 	mtx_lock(&sc->tids.ftid_lock);
823 	KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
824 	    __func__, rc, idx));
825 	switch(rc) {
826 	case FW_FILTER_WR_FLT_ADDED:
827 		/* set-filter succeeded */
828 		f->valid = 1;
829 		f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
830 		break;
831 	case FW_FILTER_WR_FLT_DELETED:
832 		/* del-filter succeeded */
833 		MPASS(f->valid == 1);
834 		f->valid = 0;
835 		/* Fall through */
836 	case FW_FILTER_WR_SMT_TBL_FULL:
837 		/* set-filter failed due to lack of SMT space. */
838 		MPASS(f->valid == 0);
839 		free_filter_resources(f);
840 		sc->tids.ftids_in_use--;
841 		break;
842 	case FW_FILTER_WR_SUCCESS:
843 	case FW_FILTER_WR_EINVAL:
844 	default:
845 		panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
846 		    idx);
847 	}
848 	f->pending = 0;
849 	cv_broadcast(&sc->tids.ftid_cv);
850 	mtx_unlock(&sc->tids.ftid_lock);
851 
852 	return (0);
853 }
854 
855 /*
856  * This is the reply to the Active Open that created the filter.  Additional TCB
857  * updates may be required to complete the filter configuration.
858  */
859 int
860 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
861     struct mbuf *m)
862 {
863 	struct adapter *sc = iq->adapter;
864 	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
865 	u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
866 	u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
867 	struct filter_entry *f = lookup_atid(sc, atid);
868 
869 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
870 
871 	mtx_lock(&sc->tids.hftid_lock);
872 	KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
873 	KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
874 	    __func__, f, f->tid));
875 	if (status == CPL_ERR_NONE) {
876 		struct filter_entry *f2;
877 
878 		f->tid = GET_TID(cpl);
879 		MPASS(f->tid < sc->tids.ntids);
880 		if (__predict_false((f2 = lookup_hftid(sc, f->tid)) != NULL)) {
881 			/* XXX: avoid hash collisions in the first place. */
882 			MPASS(f2->tid == f->tid);
883 			remove_hftid(sc, f2->tid, f2->fs.type ? 2 : 1);
884 			free_filter_resources(f2);
885 			free(f2, M_CXGBE);
886 		}
887 		insert_hftid(sc, f->tid, f, f->fs.type ? 2 : 1);
888 		/*
889 		 * Leave the filter pending until it is fully set up, which will
890 		 * be indicated by the reply to the last TCB update.  No need to
891 		 * unblock the ioctl thread either.
892 		 */
893 		if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
894 			goto done;
895 		f->valid = 1;
896 		f->pending = 0;
897 	} else {
898 		/* provide errno instead of tid to ioctl */
899 		f->tid = act_open_rpl_status_to_errno(status);
900 		f->valid = 0;
901 		if (act_open_has_tid(status))
902 			release_tid(sc, GET_TID(cpl), &sc->sge.mgmtq);
903 		free_filter_resources(f);
904 		if (f->locked == 0)
905 			free(f, M_CXGBE);
906 	}
907 	cv_broadcast(&sc->tids.hftid_cv);
908 done:
909 	mtx_unlock(&sc->tids.hftid_lock);
910 
911 	free_atid(sc, atid);
912 	return (0);
913 }
914 
915 int
916 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
917     struct mbuf *m)
918 {
919 	struct adapter *sc = iq->adapter;
920 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
921 	u_int tid = GET_TID(rpl);
922 	struct filter_entry *f;
923 
924 	mtx_lock(&sc->tids.hftid_lock);
925 	f = lookup_hftid(sc, tid);
926 	KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
927 	KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
928 	    f, tid));
929 	KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
930 	    __func__, f, tid));
931 	f->pending = 0;
932 	if (rpl->status == 0) {
933 		f->valid = 1;
934 	} else {
935 		f->tid = EIO;
936 		f->valid = 0;
937 		free_filter_resources(f);
938 		remove_hftid(sc, tid, f->fs.type ? 2 : 1);
939 		release_tid(sc, tid, &sc->sge.mgmtq);
940 		if (f->locked == 0)
941 			free(f, M_CXGBE);
942 	}
943 	cv_broadcast(&sc->tids.hftid_cv);
944 	mtx_unlock(&sc->tids.hftid_lock);
945 
946 	return (0);
947 }
948 
949 int
950 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
951     struct mbuf *m)
952 {
953 	struct adapter *sc = iq->adapter;
954 	const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
955 	unsigned int tid = GET_TID(cpl);
956 	struct filter_entry *f;
957 
958 	mtx_lock(&sc->tids.hftid_lock);
959 	f = lookup_hftid(sc, tid);
960 	KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
961 	KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
962 	    f, tid));
963 	KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
964 	    tid));
965 	f->pending = 0;
966 	if (cpl->status == 0) {
967 		f->valid = 0;
968 		free_filter_resources(f);
969 		remove_hftid(sc, tid, f->fs.type ? 2 : 1);
970 		release_tid(sc, tid, &sc->sge.mgmtq);
971 		if (f->locked == 0)
972 			free(f, M_CXGBE);
973 	}
974 	cv_broadcast(&sc->tids.hftid_cv);
975 	mtx_unlock(&sc->tids.hftid_lock);
976 
977 	return (0);
978 }
979 
980 static int
981 get_hashfilter(struct adapter *sc, struct t4_filter *t)
982 {
983 	int i, nfilters = sc->tids.ntids;
984 	struct filter_entry *f;
985 
986 	if (sc->tids.tids_in_use == 0 || sc->tids.hftid_tab == NULL ||
987 	    t->idx >= nfilters) {
988 		t->idx = 0xffffffff;
989 		return (0);
990 	}
991 
992 	mtx_lock(&sc->tids.hftid_lock);
993 	for (i = t->idx; i < nfilters; i++) {
994 		f = lookup_hftid(sc, i);
995 		if (f != NULL && f->valid) {
996 			t->idx = i;
997 			t->l2tidx = f->l2te ? f->l2te->idx : 0;
998 			t->smtidx = f->smtidx;
999 			if (f->fs.hitcnts)
1000 				t->hits = get_filter_hits(sc, t->idx);
1001 			else
1002 				t->hits = UINT64_MAX;
1003 			t->fs = f->fs;
1004 
1005 			goto done;
1006 		}
1007 	}
1008 	t->idx = 0xffffffff;
1009 done:
1010 	mtx_unlock(&sc->tids.hftid_lock);
1011 	return (0);
1012 }
1013 
1014 static void
1015 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
1016     uint64_t ftuple, struct cpl_act_open_req6 *cpl)
1017 {
1018 	struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
1019 	struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
1020 
1021 	/* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1022 	MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1023 	MPASS(atid >= 0);
1024 
1025 	if (chip_id(sc) == CHELSIO_T5) {
1026 		INIT_TP_WR(cpl5, 0);
1027 	} else {
1028 		INIT_TP_WR(cpl6, 0);
1029 		cpl6->rsvd2 = 0;
1030 		cpl6->opt3 = 0;
1031 	}
1032 
1033 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1034 	    V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1035 	    V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1036 	cpl->local_port = htobe16(f->fs.val.dport);
1037 	cpl->peer_port = htobe16(f->fs.val.sport);
1038 	cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1039 	cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1040 	cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1041 	cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1042 	cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1043 	    f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1044 	    V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1045 	    V_NO_CONG(f->fs.rpttid) |
1046 	    V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1047 	    F_TCAM_BYPASS | F_NON_OFFLOAD);
1048 
1049 	cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1050 	cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1051 	    V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1052 	    V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1053 	    F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1054 	    V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1055 	    V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1056 }
1057 
1058 static void
1059 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1060     uint64_t ftuple, struct cpl_act_open_req *cpl)
1061 {
1062 	struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1063 	struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1064 
1065 	/* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1066 	MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1067 	MPASS(atid >= 0);
1068 
1069 	if (chip_id(sc) == CHELSIO_T5) {
1070 		INIT_TP_WR(cpl5, 0);
1071 	} else {
1072 		INIT_TP_WR(cpl6, 0);
1073 		cpl6->rsvd2 = 0;
1074 		cpl6->opt3 = 0;
1075 	}
1076 
1077 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1078 	    V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1079 	    V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1080 	cpl->local_port = htobe16(f->fs.val.dport);
1081 	cpl->peer_port = htobe16(f->fs.val.sport);
1082 	cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1083 	    f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1084 	cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1085 		f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1086 	cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1087 	    f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1088 	    V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1089 	    V_NO_CONG(f->fs.rpttid) |
1090 	    V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1091 	    F_TCAM_BYPASS | F_NON_OFFLOAD);
1092 
1093 	cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1094 	cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1095 	    V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1096 	    V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1097 	    F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1098 	    V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1099 	    V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1100 }
1101 
1102 static int
1103 act_open_cpl_len16(struct adapter *sc, int isipv6)
1104 {
1105 	int idx;
1106 	static const int sz_table[3][2] = {
1107 		{
1108 			howmany(sizeof (struct cpl_act_open_req), 16),
1109 			howmany(sizeof (struct cpl_act_open_req6), 16)
1110 		},
1111 		{
1112 			howmany(sizeof (struct cpl_t5_act_open_req), 16),
1113 			howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1114 		},
1115 		{
1116 			howmany(sizeof (struct cpl_t6_act_open_req), 16),
1117 			howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1118 		},
1119 	};
1120 
1121 	MPASS(chip_id(sc) >= CHELSIO_T4);
1122 	idx = min(chip_id(sc) - CHELSIO_T4, 2);
1123 
1124 	return (sz_table[idx][!!isipv6]);
1125 }
1126 
1127 static int
1128 set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
1129     struct l2t_entry *l2te)
1130 {
1131 	void *wr;
1132 	struct wrq_cookie cookie;
1133 	struct filter_entry *f;
1134 	int rc, atid = -1;
1135 
1136 	MPASS(t->fs.hash);
1137 	/* Already validated against fconf, iconf */
1138 	MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1139 	MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1140 
1141 	mtx_lock(&sc->tids.hftid_lock);
1142 
1143 	/*
1144 	 * XXX: Check for hash collisions and insert in the hash based lookup
1145 	 * table so that in-flight hashfilters are also considered when checking
1146 	 * for collisions.
1147 	 */
1148 
1149 	f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1150 	if (__predict_false(f == NULL)) {
1151 		if (l2te)
1152 			t4_l2t_release(l2te);
1153 		rc = ENOMEM;
1154 		goto done;
1155 	}
1156 	f->fs = t->fs;
1157 	f->l2te = l2te;
1158 
1159 	atid = alloc_atid(sc, f);
1160 	if (__predict_false(atid) == -1) {
1161 		if (l2te)
1162 			t4_l2t_release(l2te);
1163 		free(f, M_CXGBE);
1164 		rc = EAGAIN;
1165 		goto done;
1166 	}
1167 	MPASS(atid >= 0);
1168 
1169 	wr = start_wrq_wr(&sc->sge.mgmtq, act_open_cpl_len16(sc, f->fs.type),
1170 	    &cookie);
1171 	if (wr == NULL) {
1172 		free_atid(sc, atid);
1173 		if (l2te)
1174 			t4_l2t_release(l2te);
1175 		free(f, M_CXGBE);
1176 		rc = ENOMEM;
1177 		goto done;
1178 	}
1179 	if (f->fs.type)
1180 		mk_act_open_req6(sc, f, atid, ftuple, wr);
1181 	else
1182 		mk_act_open_req(sc, f, atid, ftuple, wr);
1183 
1184 	f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1185 	f->pending = 1;
1186 	f->tid = -1;
1187 	commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
1188 
1189 	for (;;) {
1190 		MPASS(f->locked);
1191 		if (f->pending == 0) {
1192 			if (f->valid) {
1193 				rc = 0;
1194 				f->locked = 0;
1195 				t->idx = f->tid;
1196 			} else {
1197 				rc = f->tid;
1198 				free(f, M_CXGBE);
1199 			}
1200 			break;
1201 		}
1202 		if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1203 			f->locked = 0;
1204 			rc = EINPROGRESS;
1205 			break;
1206 		}
1207 	}
1208 done:
1209 	mtx_unlock(&sc->tids.hftid_lock);
1210 	return (rc);
1211 }
1212 
1213 /* SET_TCB_FIELD sent as a ULP command looks like this */
1214 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1215     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1216 
1217 static void *
1218 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1219 		uint64_t val, uint32_t tid, uint32_t qid)
1220 {
1221 	struct ulptx_idata *ulpsc;
1222 	struct cpl_set_tcb_field_core *req;
1223 
1224 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1225 	ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1226 
1227 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1228 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1229 	ulpsc->len = htobe32(sizeof(*req));
1230 
1231 	req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1232 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1233 	req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1234 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1235 	req->mask = htobe64(mask);
1236 	req->val = htobe64(val);
1237 
1238 	ulpsc = (struct ulptx_idata *)(req + 1);
1239 	if (LEN__SET_TCB_FIELD_ULP % 16) {
1240 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1241 		ulpsc->len = htobe32(0);
1242 		return (ulpsc + 1);
1243 	}
1244 	return (ulpsc);
1245 }
1246 
1247 /* ABORT_REQ sent as a ULP command looks like this */
1248 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1249 	sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1250 
1251 static void *
1252 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1253 {
1254 	struct ulptx_idata *ulpsc;
1255 	struct cpl_abort_req_core *req;
1256 
1257 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1258 	ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1259 
1260 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1261 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1262 	ulpsc->len = htobe32(sizeof(*req));
1263 
1264 	req = (struct cpl_abort_req_core *)(ulpsc + 1);
1265 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1266 	req->rsvd0 = htonl(0);
1267 	req->rsvd1 = 0;
1268 	req->cmd = CPL_ABORT_NO_RST;
1269 
1270 	ulpsc = (struct ulptx_idata *)(req + 1);
1271 	if (LEN__ABORT_REQ_ULP % 16) {
1272 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1273 		ulpsc->len = htobe32(0);
1274 		return (ulpsc + 1);
1275 	}
1276 	return (ulpsc);
1277 }
1278 
1279 /* ABORT_RPL sent as a ULP command looks like this */
1280 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1281 	sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1282 
1283 static void *
1284 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1285 {
1286 	struct ulptx_idata *ulpsc;
1287 	struct cpl_abort_rpl_core *rpl;
1288 
1289 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1290 	ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1291 
1292 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1293 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1294 	ulpsc->len = htobe32(sizeof(*rpl));
1295 
1296 	rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1297 	OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1298 	rpl->rsvd0 = htonl(0);
1299 	rpl->rsvd1 = 0;
1300 	rpl->cmd = CPL_ABORT_NO_RST;
1301 
1302 	ulpsc = (struct ulptx_idata *)(rpl + 1);
1303 	if (LEN__ABORT_RPL_ULP % 16) {
1304 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1305 		ulpsc->len = htobe32(0);
1306 		return (ulpsc + 1);
1307 	}
1308 	return (ulpsc);
1309 }
1310 
1311 static inline int
1312 del_hashfilter_wrlen(void)
1313 {
1314 
1315 	return (sizeof(struct work_request_hdr) +
1316 	    roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1317 	    roundup2(LEN__ABORT_REQ_ULP, 16) +
1318 	    roundup2(LEN__ABORT_RPL_ULP, 16));
1319 }
1320 
1321 static void
1322 mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1323 {
1324 	struct ulp_txpkt *ulpmc;
1325 
1326 	INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1327 	ulpmc = (struct ulp_txpkt *)(wrh + 1);
1328 	ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1329 	    V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
1330 	ulpmc = mk_abort_req_ulp(ulpmc, tid);
1331 	ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1332 }
1333 
1334 static int
1335 del_hashfilter(struct adapter *sc, struct t4_filter *t)
1336 {
1337 	void *wr;
1338 	struct filter_entry *f;
1339 	struct wrq_cookie cookie;
1340 	int rc;
1341 	const int wrlen = del_hashfilter_wrlen();
1342 
1343 	MPASS(sc->tids.hftid_tab != NULL);
1344 	MPASS(sc->tids.ntids > 0);
1345 
1346 	if (t->idx >= sc->tids.ntids)
1347 		return (EINVAL);
1348 
1349 	mtx_lock(&sc->tids.hftid_lock);
1350 	f = lookup_hftid(sc, t->idx);
1351 	if (f == NULL || f->valid == 0) {
1352 		rc = EINVAL;
1353 		goto done;
1354 	}
1355 	MPASS(f->tid == t->idx);
1356 	if (f->locked) {
1357 		rc = EPERM;
1358 		goto done;
1359 	}
1360 	if (f->pending) {
1361 		rc = EBUSY;
1362 		goto done;
1363 	}
1364 	wr = start_wrq_wr(&sc->sge.mgmtq, howmany(wrlen, 16), &cookie);
1365 	if (wr == NULL) {
1366 		rc = ENOMEM;
1367 		goto done;
1368 	}
1369 
1370 	mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1371 	f->locked = 1;
1372 	f->pending = 1;
1373 	commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
1374 	t->fs = f->fs;	/* extra info for the caller */
1375 
1376 	for (;;) {
1377 		MPASS(f->locked);
1378 		if (f->pending == 0) {
1379 			if (f->valid) {
1380 				f->locked = 0;
1381 				rc = EIO;
1382 			} else {
1383 				rc = 0;
1384 				free(f, M_CXGBE);
1385 			}
1386 			break;
1387 		}
1388 		if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1389 			f->locked = 0;
1390 			rc = EINPROGRESS;
1391 			break;
1392 		}
1393 	}
1394 done:
1395 	mtx_unlock(&sc->tids.hftid_lock);
1396 	return (rc);
1397 }
1398 
1399 static int
1400 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
1401     uint64_t val, int no_reply)
1402 {
1403 	struct wrq_cookie cookie;
1404 	struct cpl_set_tcb_field *req;
1405 
1406 	req = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*req), 16), &cookie);
1407 	if (req == NULL)
1408 		return (ENOMEM);
1409 	bzero(req, sizeof(*req));
1410 	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
1411 	if (no_reply == 0) {
1412 		req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
1413 		    V_NO_REPLY(0));
1414 	} else
1415 		req->reply_ctrl = htobe16(V_NO_REPLY(1));
1416 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
1417 	req->mask = htobe64(mask);
1418 	req->val = htobe64(val);
1419 	commit_wrq_wr(&sc->sge.mgmtq, req, &cookie);
1420 
1421 	return (0);
1422 }
1423 
1424 /* Set one of the t_flags bits in the TCB. */
1425 static inline int
1426 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val,
1427     u_int no_reply)
1428 {
1429 
1430 	return (set_tcb_field(sc, tid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
1431 	    (uint64_t)val << bit_pos, no_reply));
1432 }
1433 
1434 #define WORD_MASK       0xffffffff
1435 static void
1436 set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip,
1437     const bool sip, const bool dp, const bool sp)
1438 {
1439 
1440 	if (dip) {
1441 		if (f->fs.type) {
1442 			set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK,
1443 			    f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 |
1444 			    f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1);
1445 
1446 			set_tcb_field(sc, f->tid,
1447 			    W_TCB_SND_UNA_RAW + 1, WORD_MASK,
1448 			    f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 |
1449 			    f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1);
1450 
1451 			set_tcb_field(sc, f->tid,
1452 			    W_TCB_SND_UNA_RAW + 2, WORD_MASK,
1453 			    f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 |
1454 			    f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1);
1455 
1456 			set_tcb_field(sc, f->tid,
1457 			    W_TCB_SND_UNA_RAW + 3, WORD_MASK,
1458 			    f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1459 			    f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1460 		} else {
1461 			set_tcb_field(sc, f->tid,
1462 			    W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK,
1463 			    f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1464 			    f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1465 		}
1466 	}
1467 
1468 	if (sip) {
1469 		if (f->fs.type) {
1470 			set_tcb_field(sc, f->tid,
1471 			    W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK,
1472 			    f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 |
1473 			    f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1);
1474 
1475 			set_tcb_field(sc, f->tid,
1476 			    W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK,
1477 			    f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 |
1478 			    f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1);
1479 
1480 			set_tcb_field(sc, f->tid,
1481 			    W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK,
1482 			    f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 |
1483 			    f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1);
1484 
1485 			set_tcb_field(sc, f->tid,
1486 			    W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK,
1487 			    f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1488 			    f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1489 
1490 		} else {
1491 			set_tcb_field(sc, f->tid,
1492 			    W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK,
1493 			    f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1494 			    f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1495 		}
1496 	}
1497 
1498 	set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK,
1499 	    (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1);
1500 }
1501 
1502 /*
1503  * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1504  * last of the series of updates requested a reply.  The reply informs the
1505  * driver that the filter is fully setup.
1506  */
1507 static int
1508 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
1509 {
1510 	int updated = 0;
1511 
1512 	MPASS(f->tid < sc->tids.ntids);
1513 	MPASS(f->fs.hash);
1514 	MPASS(f->pending);
1515 	MPASS(f->valid == 0);
1516 
1517 	if (f->fs.newdmac) {
1518 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1);
1519 		updated++;
1520 	}
1521 
1522 	if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1523 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1);
1524 		updated++;
1525 	}
1526 
1527 	if (f->fs.newsmac) {
1528 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1529 		set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1530 		    V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smtidx),
1531 		    1);
1532 		updated++;
1533 	}
1534 
1535 	switch(f->fs.nat_mode) {
1536 	case NAT_MODE_NONE:
1537 		break;
1538 	case NAT_MODE_DIP:
1539 		set_nat_params(sc, f, true, false, false, false);
1540 		updated++;
1541 		break;
1542 	case NAT_MODE_DIP_DP:
1543 		set_nat_params(sc, f, true, false, true, false);
1544 		updated++;
1545 		break;
1546 	case NAT_MODE_DIP_DP_SIP:
1547 		set_nat_params(sc, f, true, true, true, false);
1548 		updated++;
1549 		break;
1550 	case NAT_MODE_DIP_DP_SP:
1551 		set_nat_params(sc, f, true, false, true, true);
1552 		updated++;
1553 		break;
1554 	case NAT_MODE_SIP_SP:
1555 		set_nat_params(sc, f, false, true, false, true);
1556 		updated++;
1557 		break;
1558 	case NAT_MODE_DIP_SIP_SP:
1559 		set_nat_params(sc, f, true, true, false, true);
1560 		updated++;
1561 		break;
1562 	case NAT_MODE_ALL:
1563 		set_nat_params(sc, f, true, true, true, true);
1564 		updated++;
1565 		break;
1566 	default:
1567 		MPASS(0);	/* should have been validated earlier */
1568 		break;
1569 
1570 	}
1571 
1572 	if (f->fs.nat_seq_chk) {
1573 		set_tcb_field(sc, f->tid, W_TCB_RCV_NXT,
1574 		    V_TCB_RCV_NXT(M_TCB_RCV_NXT),
1575 		    V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1);
1576 		updated++;
1577 	}
1578 
1579 	if (is_t5(sc) && f->fs.action == FILTER_DROP) {
1580 		/*
1581 		 * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop.
1582 		 */
1583 		set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) |
1584 		    V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1);
1585 		updated++;
1586 	}
1587 
1588 	/*
1589 	 * Enable switching after all secondary resources (L2T entry, SMT entry,
1590 	 * etc.) are setup so that any switched packet will use correct
1591 	 * values.
1592 	 */
1593 	if (f->fs.action == FILTER_SWITCH) {
1594 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1);
1595 		updated++;
1596 	}
1597 
1598 	if (f->fs.hitcnts || updated > 0) {
1599 		set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
1600 		    V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1601 		    V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
1602 		    V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
1603 		return (EINPROGRESS);
1604 	}
1605 
1606 	return (0);
1607 }
1608