xref: /freebsd/sys/dev/cxgbe/t4_filter.c (revision 47dd1d1b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/bus.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/rwlock.h>
43 #include <sys/socket.h>
44 #include <sys/sbuf.h>
45 #include <netinet/in.h>
46 
47 #include "common/common.h"
48 #include "common/t4_msg.h"
49 #include "common/t4_regs.h"
50 #include "common/t4_regs_values.h"
51 #include "common/t4_tcb.h"
52 #include "t4_l2t.h"
53 
54 struct filter_entry {
55 	uint32_t valid:1;	/* filter allocated and valid */
56 	uint32_t locked:1;	/* filter is administratively locked or busy */
57 	uint32_t pending:1;	/* filter action is pending firmware reply */
58 	uint32_t smtidx:8;	/* Source MAC Table index for smac */
59 	int tid;		/* tid of the filter TCB */
60 	struct l2t_entry *l2te;	/* L2 table entry for DMAC rewrite */
61 
62 	struct t4_filter_specification fs;
63 };
64 
65 static void free_filter_resources(struct filter_entry *);
66 static int get_hashfilter(struct adapter *, struct t4_filter *);
67 static int set_hashfilter(struct adapter *, struct t4_filter *,
68     struct l2t_entry *);
69 static int del_hashfilter(struct adapter *, struct t4_filter *);
70 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
71 
72 static void
73 insert_hftid(struct adapter *sc, int tid, void *ctx, int ntids)
74 {
75 	struct tid_info *t = &sc->tids;
76 
77 	t->hftid_tab[tid] = ctx;
78 	atomic_add_int(&t->tids_in_use, ntids);
79 }
80 
81 static void *
82 lookup_hftid(struct adapter *sc, int tid)
83 {
84 	struct tid_info *t = &sc->tids;
85 
86 	return (t->hftid_tab[tid]);
87 }
88 
89 static void
90 remove_hftid(struct adapter *sc, int tid, int ntids)
91 {
92 	struct tid_info *t = &sc->tids;
93 
94 	t->hftid_tab[tid] = NULL;
95 	atomic_subtract_int(&t->tids_in_use, ntids);
96 }
97 
98 static uint32_t
99 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf)
100 {
101 	uint32_t mode;
102 
103 	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
104 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
105 
106 	if (fconf & F_FRAGMENTATION)
107 		mode |= T4_FILTER_IP_FRAGMENT;
108 
109 	if (fconf & F_MPSHITTYPE)
110 		mode |= T4_FILTER_MPS_HIT_TYPE;
111 
112 	if (fconf & F_MACMATCH)
113 		mode |= T4_FILTER_MAC_IDX;
114 
115 	if (fconf & F_ETHERTYPE)
116 		mode |= T4_FILTER_ETH_TYPE;
117 
118 	if (fconf & F_PROTOCOL)
119 		mode |= T4_FILTER_IP_PROTO;
120 
121 	if (fconf & F_TOS)
122 		mode |= T4_FILTER_IP_TOS;
123 
124 	if (fconf & F_VLAN)
125 		mode |= T4_FILTER_VLAN;
126 
127 	if (fconf & F_VNIC_ID) {
128 		mode |= T4_FILTER_VNIC;
129 		if (iconf & F_VNIC)
130 			mode |= T4_FILTER_IC_VNIC;
131 	}
132 
133 	if (fconf & F_PORT)
134 		mode |= T4_FILTER_PORT;
135 
136 	if (fconf & F_FCOE)
137 		mode |= T4_FILTER_FCoE;
138 
139 	return (mode);
140 }
141 
142 static uint32_t
143 mode_to_fconf(uint32_t mode)
144 {
145 	uint32_t fconf = 0;
146 
147 	if (mode & T4_FILTER_IP_FRAGMENT)
148 		fconf |= F_FRAGMENTATION;
149 
150 	if (mode & T4_FILTER_MPS_HIT_TYPE)
151 		fconf |= F_MPSHITTYPE;
152 
153 	if (mode & T4_FILTER_MAC_IDX)
154 		fconf |= F_MACMATCH;
155 
156 	if (mode & T4_FILTER_ETH_TYPE)
157 		fconf |= F_ETHERTYPE;
158 
159 	if (mode & T4_FILTER_IP_PROTO)
160 		fconf |= F_PROTOCOL;
161 
162 	if (mode & T4_FILTER_IP_TOS)
163 		fconf |= F_TOS;
164 
165 	if (mode & T4_FILTER_VLAN)
166 		fconf |= F_VLAN;
167 
168 	if (mode & T4_FILTER_VNIC)
169 		fconf |= F_VNIC_ID;
170 
171 	if (mode & T4_FILTER_PORT)
172 		fconf |= F_PORT;
173 
174 	if (mode & T4_FILTER_FCoE)
175 		fconf |= F_FCOE;
176 
177 	return (fconf);
178 }
179 
180 static uint32_t
181 mode_to_iconf(uint32_t mode)
182 {
183 
184 	if (mode & T4_FILTER_IC_VNIC)
185 		return (F_VNIC);
186 	return (0);
187 }
188 
189 static int check_fspec_against_fconf_iconf(struct adapter *sc,
190     struct t4_filter_specification *fs)
191 {
192 	struct tp_params *tpp = &sc->params.tp;
193 	uint32_t fconf = 0;
194 
195 	if (fs->val.frag || fs->mask.frag)
196 		fconf |= F_FRAGMENTATION;
197 
198 	if (fs->val.matchtype || fs->mask.matchtype)
199 		fconf |= F_MPSHITTYPE;
200 
201 	if (fs->val.macidx || fs->mask.macidx)
202 		fconf |= F_MACMATCH;
203 
204 	if (fs->val.ethtype || fs->mask.ethtype)
205 		fconf |= F_ETHERTYPE;
206 
207 	if (fs->val.proto || fs->mask.proto)
208 		fconf |= F_PROTOCOL;
209 
210 	if (fs->val.tos || fs->mask.tos)
211 		fconf |= F_TOS;
212 
213 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
214 		fconf |= F_VLAN;
215 
216 	if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
217 		fconf |= F_VNIC_ID;
218 		if (tpp->ingress_config & F_VNIC)
219 			return (EINVAL);
220 	}
221 
222 	if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
223 		fconf |= F_VNIC_ID;
224 		if ((tpp->ingress_config & F_VNIC) == 0)
225 			return (EINVAL);
226 	}
227 
228 	if (fs->val.iport || fs->mask.iport)
229 		fconf |= F_PORT;
230 
231 	if (fs->val.fcoe || fs->mask.fcoe)
232 		fconf |= F_FCOE;
233 
234 	if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
235 		return (E2BIG);
236 
237 	return (0);
238 }
239 
240 int
241 get_filter_mode(struct adapter *sc, uint32_t *mode)
242 {
243 	struct tp_params *tpp = &sc->params.tp;
244 
245 	/*
246 	 * We trust the cached values of the relevant TP registers.  This means
247 	 * things work reliably only if writes to those registers are always via
248 	 * t4_set_filter_mode.
249 	 */
250 	*mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config);
251 
252 	return (0);
253 }
254 
255 int
256 set_filter_mode(struct adapter *sc, uint32_t mode)
257 {
258 	struct tp_params *tpp = &sc->params.tp;
259 	uint32_t fconf, iconf;
260 	int rc;
261 
262 	iconf = mode_to_iconf(mode);
263 	if ((iconf ^ tpp->ingress_config) & F_VNIC) {
264 		/*
265 		 * For now we just complain if A_TP_INGRESS_CONFIG is not
266 		 * already set to the correct value for the requested filter
267 		 * mode.  It's not clear if it's safe to write to this register
268 		 * on the fly.  (And we trust the cached value of the register).
269 		 *
270 		 * check_fspec_against_fconf_iconf and other code that looks at
271 		 * tp->vlan_pri_map and tp->ingress_config needs to be reviewed
272 		 * thorougly before allowing dynamic filter mode changes.
273 		 */
274 		return (EBUSY);
275 	}
276 
277 	fconf = mode_to_fconf(mode);
278 
279 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
280 	    "t4setfm");
281 	if (rc)
282 		return (rc);
283 
284 	if (sc->tids.ftids_in_use > 0) {
285 		rc = EBUSY;
286 		goto done;
287 	}
288 
289 #ifdef TCP_OFFLOAD
290 	if (uld_active(sc, ULD_TOM)) {
291 		rc = EBUSY;
292 		goto done;
293 	}
294 #endif
295 
296 	rc = -t4_set_filter_mode(sc, fconf, true);
297 done:
298 	end_synchronized_op(sc, LOCK_HELD);
299 	return (rc);
300 }
301 
302 static inline uint64_t
303 get_filter_hits(struct adapter *sc, uint32_t tid)
304 {
305 	uint32_t tcb_addr;
306 
307 	tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
308 
309 	if (is_t4(sc)) {
310 		uint64_t hits;
311 
312 		read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
313 		return (be64toh(hits));
314 	} else {
315 		uint32_t hits;
316 
317 		read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
318 		return (be32toh(hits));
319 	}
320 }
321 
322 int
323 get_filter(struct adapter *sc, struct t4_filter *t)
324 {
325 	int i, nfilters = sc->tids.nftids;
326 	struct filter_entry *f;
327 
328 	if (t->fs.hash)
329 		return (get_hashfilter(sc, t));
330 
331 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
332 	    t->idx >= nfilters) {
333 		t->idx = 0xffffffff;
334 		return (0);
335 	}
336 
337 	mtx_lock(&sc->tids.ftid_lock);
338 	f = &sc->tids.ftid_tab[t->idx];
339 	MPASS(f->tid == sc->tids.ftid_base + t->idx);
340 	for (i = t->idx; i < nfilters; i++, f++) {
341 		if (f->valid) {
342 			t->idx = i;
343 			t->l2tidx = f->l2te ? f->l2te->idx : 0;
344 			t->smtidx = f->smtidx;
345 			if (f->fs.hitcnts)
346 				t->hits = get_filter_hits(sc, f->tid);
347 			else
348 				t->hits = UINT64_MAX;
349 			t->fs = f->fs;
350 
351 			goto done;
352 		}
353 	}
354 	t->idx = 0xffffffff;
355 done:
356 	mtx_unlock(&sc->tids.ftid_lock);
357 	return (0);
358 }
359 
360 static int
361 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te)
362 {
363 	struct filter_entry *f;
364 	struct fw_filter_wr *fwr;
365 	u_int vnic_vld, vnic_vld_mask;
366 	struct wrq_cookie cookie;
367 	int i, rc, busy, locked;
368 	const int ntids = t->fs.type ? 4 : 1;
369 
370 	MPASS(!t->fs.hash);
371 	MPASS(t->idx < sc->tids.nftids);
372 	/* Already validated against fconf, iconf */
373 	MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
374 	MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
375 
376 	f = &sc->tids.ftid_tab[t->idx];
377 	rc = busy = locked = 0;
378 	mtx_lock(&sc->tids.ftid_lock);
379 	for (i = 0; i < ntids; i++) {
380 		busy += f[i].pending + f[i].valid;
381 		locked += f[i].locked;
382 	}
383 	if (locked > 0)
384 		rc = EPERM;
385 	else if (busy > 0)
386 		rc = EBUSY;
387 	else {
388 		fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16),
389 		    &cookie);
390 		if (__predict_false(fwr == NULL))
391 			rc = ENOMEM;
392 		else {
393 			f->pending = 1;
394 			sc->tids.ftids_in_use++;
395 		}
396 	}
397 	mtx_unlock(&sc->tids.ftid_lock);
398 	if (rc != 0) {
399 		if (l2te)
400 			t4_l2t_release(l2te);
401 		return (rc);
402 	}
403 
404 	/*
405 	 * Can't fail now.  A set-filter WR will definitely be sent.
406 	 */
407 
408 	f->tid = sc->tids.ftid_base + t->idx;
409 	f->fs = t->fs;
410 	f->l2te = l2te;
411 
412 	if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
413 		vnic_vld = 1;
414 	else
415 		vnic_vld = 0;
416 	if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
417 		vnic_vld_mask = 1;
418 	else
419 		vnic_vld_mask = 0;
420 
421 	bzero(fwr, sizeof(*fwr));
422 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
423 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
424 	fwr->tid_to_iq =
425 	    htobe32(V_FW_FILTER_WR_TID(f->tid) |
426 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
427 		V_FW_FILTER_WR_NOREPLY(0) |
428 		V_FW_FILTER_WR_IQ(f->fs.iq));
429 	fwr->del_filter_to_l2tix =
430 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
431 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
432 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
433 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
434 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
435 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
436 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
437 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
438 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
439 		    f->fs.newvlan == VLAN_REWRITE) |
440 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
441 		    f->fs.newvlan == VLAN_REWRITE) |
442 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
443 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
444 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
445 		V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
446 	fwr->ethtype = htobe16(f->fs.val.ethtype);
447 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
448 	fwr->frag_to_ovlan_vldm =
449 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
450 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
451 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
452 		V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
453 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
454 		V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
455 	fwr->smac_sel = 0;
456 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
457 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
458 	fwr->maci_to_matchtypem =
459 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
460 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
461 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
462 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
463 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
464 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
465 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
466 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
467 	fwr->ptcl = f->fs.val.proto;
468 	fwr->ptclm = f->fs.mask.proto;
469 	fwr->ttyp = f->fs.val.tos;
470 	fwr->ttypm = f->fs.mask.tos;
471 	fwr->ivlan = htobe16(f->fs.val.vlan);
472 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
473 	fwr->ovlan = htobe16(f->fs.val.vnic);
474 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
475 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
476 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
477 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
478 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
479 	fwr->lp = htobe16(f->fs.val.dport);
480 	fwr->lpm = htobe16(f->fs.mask.dport);
481 	fwr->fp = htobe16(f->fs.val.sport);
482 	fwr->fpm = htobe16(f->fs.mask.sport);
483 	if (f->fs.newsmac) {
484 		/* XXX: need to use SMT idx instead */
485 		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
486 	}
487 	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
488 
489 	/* Wait for response. */
490 	mtx_lock(&sc->tids.ftid_lock);
491 	for (;;) {
492 		if (f->pending == 0) {
493 			rc = f->valid ? 0 : EIO;
494 			break;
495 		}
496 		if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
497 			rc = EINPROGRESS;
498 			break;
499 		}
500 	}
501 	mtx_unlock(&sc->tids.ftid_lock);
502 	return (rc);
503 }
504 
505 int
506 set_filter(struct adapter *sc, struct t4_filter *t)
507 {
508 	struct tid_info *ti = &sc->tids;
509 	struct l2t_entry *l2te;
510 	int rc;
511 
512 	/*
513 	 * Basic filter checks first.
514 	 */
515 
516 	if (t->fs.hash) {
517 		if (!is_hashfilter(sc) || ti->ntids == 0)
518 			return (ENOTSUP);
519 		if (t->idx != (uint32_t)-1)
520 			return (EINVAL);	/* hw, not user picks the idx */
521 	} else {
522 		if (ti->nftids == 0)
523 			return (ENOTSUP);
524 		if (t->idx >= ti->nftids)
525 			return (EINVAL);
526 		/* IPv6 filter idx must be 4 aligned */
527 		if (t->fs.type == 1 &&
528 		    ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
529 			return (EINVAL);
530 	}
531 
532 	/* T4 doesn't support removing VLAN Tags for loop back filters. */
533 	if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
534 	    (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE))
535 		return (ENOTSUP);
536 
537 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
538 		return (EINVAL);
539 	if (t->fs.val.iport >= sc->params.nports)
540 		return (EINVAL);
541 
542 	/* Can't specify an iq if not steering to it */
543 	if (!t->fs.dirsteer && t->fs.iq)
544 		return (EINVAL);
545 
546 	/* Validate against the global filter mode and ingress config */
547 	rc = check_fspec_against_fconf_iconf(sc, &t->fs);
548 	if (rc != 0)
549 		return (rc);
550 
551 	/*
552 	 * Basic checks passed.  Make sure the queues and tid tables are setup.
553 	 */
554 
555 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
556 	if (rc)
557 		return (rc);
558 	if (!(sc->flags & FULL_INIT_DONE) &&
559 	    ((rc = adapter_full_init(sc)) != 0)) {
560 		end_synchronized_op(sc, 0);
561 		return (rc);
562 	}
563 	if (t->fs.hash) {
564 		if (__predict_false(ti->hftid_tab == NULL)) {
565 			ti->hftid_tab = malloc(sizeof(*ti->hftid_tab) * ti->ntids,
566 			    M_CXGBE, M_NOWAIT | M_ZERO);
567 			if (ti->hftid_tab == NULL) {
568 				rc = ENOMEM;
569 				goto done;
570 			}
571 			mtx_init(&ti->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
572 			cv_init(&ti->hftid_cv, "t4hfcv");
573 		}
574 		if (__predict_false(sc->tids.atid_tab == NULL)) {
575 			rc = alloc_atid_tab(&sc->tids, M_NOWAIT);
576 			if (rc != 0)
577 				goto done;
578 		}
579 	} else if (__predict_false(ti->ftid_tab == NULL)) {
580 		KASSERT(ti->ftids_in_use == 0,
581 		    ("%s: no memory allocated but ftids_in_use > 0", __func__));
582 		ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
583 		    M_CXGBE, M_NOWAIT | M_ZERO);
584 		if (ti->ftid_tab == NULL) {
585 			rc = ENOMEM;
586 			goto done;
587 		}
588 		mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
589 		cv_init(&ti->ftid_cv, "t4fcv");
590 	}
591 done:
592 	end_synchronized_op(sc, 0);
593 	if (rc != 0)
594 		return (rc);
595 
596 	/*
597 	 * Allocate L2T entry, SMT entry, etc.
598 	 */
599 
600 	l2te = NULL;
601 	if (t->fs.newdmac || t->fs.newvlan) {
602 		/* This filter needs an L2T entry; allocate one. */
603 		l2te = t4_l2t_alloc_switching(sc->l2t);
604 		if (__predict_false(l2te == NULL))
605 			return (EAGAIN);
606 		if (t4_l2t_set_switching(sc, l2te, t->fs.vlan, t->fs.eport,
607 		    t->fs.dmac)) {
608 			t4_l2t_release(l2te);
609 			return (ENOMEM);
610 		}
611 	}
612 
613 	if (t->fs.newsmac) {
614 		/* XXX: alloc SMT */
615 		return (ENOTSUP);
616 	}
617 
618 	if (t->fs.hash)
619 		return (set_hashfilter(sc, t, l2te));
620 	else
621 		return (set_tcamfilter(sc, t, l2te));
622 
623 }
624 
625 static int
626 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
627 {
628 	struct filter_entry *f;
629 	struct fw_filter_wr *fwr;
630 	struct wrq_cookie cookie;
631 	int rc;
632 
633 	MPASS(sc->tids.ftid_tab != NULL);
634 	MPASS(sc->tids.nftids > 0);
635 
636 	if (t->idx >= sc->tids.nftids)
637 		return (EINVAL);
638 
639 	mtx_lock(&sc->tids.ftid_lock);
640 	f = &sc->tids.ftid_tab[t->idx];
641 	if (f->locked) {
642 		rc = EPERM;
643 		goto done;
644 	}
645 	if (f->pending) {
646 		rc = EBUSY;
647 		goto done;
648 	}
649 	if (f->valid == 0) {
650 		rc = EINVAL;
651 		goto done;
652 	}
653 	MPASS(f->tid == sc->tids.ftid_base + t->idx);
654 	fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
655 	if (fwr == NULL) {
656 		rc = ENOMEM;
657 		goto done;
658 	}
659 
660 	bzero(fwr, sizeof (*fwr));
661 	t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
662 	f->pending = 1;
663 	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
664 	t->fs = f->fs;	/* extra info for the caller */
665 
666 	for (;;) {
667 		if (f->pending == 0) {
668 			rc = f->valid ? EIO : 0;
669 			break;
670 		}
671 		if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
672 			rc = EINPROGRESS;
673 			break;
674 		}
675 	}
676 done:
677 	mtx_unlock(&sc->tids.ftid_lock);
678 	return (rc);
679 }
680 
681 int
682 del_filter(struct adapter *sc, struct t4_filter *t)
683 {
684 
685 	/* No filters possible if not initialized yet. */
686 	if (!(sc->flags & FULL_INIT_DONE))
687 		return (EINVAL);
688 
689 	/*
690 	 * The checks for tid tables ensure that the locks that del_* will reach
691 	 * for are initialized.
692 	 */
693 	if (t->fs.hash) {
694 		if (sc->tids.hftid_tab != NULL)
695 			return (del_hashfilter(sc, t));
696 	} else {
697 		if (sc->tids.ftid_tab != NULL)
698 			return (del_tcamfilter(sc, t));
699 	}
700 
701 	return (EINVAL);
702 }
703 
704 /*
705  * Release secondary resources associated with the filter.
706  */
707 static void
708 free_filter_resources(struct filter_entry *f)
709 {
710 
711 	if (f->l2te) {
712 		t4_l2t_release(f->l2te);
713 		f->l2te = NULL;
714 	}
715 }
716 
717 int
718 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
719 {
720 	struct adapter *sc = iq->adapter;
721 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
722 	u_int tid = GET_TID(rpl);
723 	u_int rc, cleanup, idx;
724 	struct filter_entry *f;
725 
726 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
727 	    rss->opcode));
728 	MPASS(is_ftid(sc, tid));
729 
730 	cleanup = 0;
731 	idx = tid - sc->tids.ftid_base;
732 	f = &sc->tids.ftid_tab[idx];
733 	rc = G_COOKIE(rpl->cookie);
734 
735 	mtx_lock(&sc->tids.ftid_lock);
736 	KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
737 	    __func__, rc, idx));
738 	switch(rc) {
739 	case FW_FILTER_WR_FLT_ADDED:
740 		/* set-filter succeeded */
741 		f->valid = 1;
742 		f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
743 		break;
744 	case FW_FILTER_WR_FLT_DELETED:
745 		/* del-filter succeeded */
746 		MPASS(f->valid == 1);
747 		f->valid = 0;
748 		/* Fall through */
749 	case FW_FILTER_WR_SMT_TBL_FULL:
750 		/* set-filter failed due to lack of SMT space. */
751 		MPASS(f->valid == 0);
752 		free_filter_resources(f);
753 		sc->tids.ftids_in_use--;
754 		break;
755 	case FW_FILTER_WR_SUCCESS:
756 	case FW_FILTER_WR_EINVAL:
757 	default:
758 		panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
759 		    idx);
760 	}
761 	f->pending = 0;
762 	cv_broadcast(&sc->tids.ftid_cv);
763 	mtx_unlock(&sc->tids.ftid_lock);
764 
765 	return (0);
766 }
767 
768 /*
769  * This is the reply to the Active Open that created the filter.  Additional TCB
770  * updates may be required to complete the filter configuration.
771  */
772 int
773 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
774     struct mbuf *m)
775 {
776 	struct adapter *sc = iq->adapter;
777 	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
778 	u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
779 	u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
780 	struct filter_entry *f = lookup_atid(sc, atid);
781 
782 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
783 
784 	mtx_lock(&sc->tids.hftid_lock);
785 	KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
786 	KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
787 	    __func__, f, f->tid));
788 	if (status == CPL_ERR_NONE) {
789 		struct filter_entry *f2;
790 
791 		f->tid = GET_TID(cpl);
792 		MPASS(f->tid < sc->tids.ntids);
793 		if (__predict_false((f2 = lookup_hftid(sc, f->tid)) != NULL)) {
794 			/* XXX: avoid hash collisions in the first place. */
795 			MPASS(f2->tid == f->tid);
796 			remove_hftid(sc, f2->tid, f2->fs.type ? 2 : 1);
797 			free_filter_resources(f2);
798 			free(f2, M_CXGBE);
799 		}
800 		insert_hftid(sc, f->tid, f, f->fs.type ? 2 : 1);
801 		/*
802 		 * Leave the filter pending until it is fully set up, which will
803 		 * be indicated by the reply to the last TCB update.  No need to
804 		 * unblock the ioctl thread either.
805 		 */
806 		if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
807 			goto done;
808 		f->valid = 1;
809 		f->pending = 0;
810 	} else {
811 		/* provide errno instead of tid to ioctl */
812 		f->tid = act_open_rpl_status_to_errno(status);
813 		f->valid = 0;
814 		if (act_open_has_tid(status))
815 			release_tid(sc, GET_TID(cpl), &sc->sge.mgmtq);
816 		free_filter_resources(f);
817 		if (f->locked == 0)
818 			free(f, M_CXGBE);
819 	}
820 	cv_broadcast(&sc->tids.hftid_cv);
821 done:
822 	mtx_unlock(&sc->tids.hftid_lock);
823 
824 	free_atid(sc, atid);
825 	return (0);
826 }
827 
828 int
829 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
830     struct mbuf *m)
831 {
832 	struct adapter *sc = iq->adapter;
833 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
834 	u_int tid = GET_TID(rpl);
835 	struct filter_entry *f;
836 
837 	mtx_lock(&sc->tids.hftid_lock);
838 	f = lookup_hftid(sc, tid);
839 	KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
840 	KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
841 	    f, tid));
842 	KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
843 	    __func__, f, tid));
844 	f->pending = 0;
845 	if (rpl->status == 0) {
846 		f->valid = 1;
847 	} else {
848 		f->tid = EIO;
849 		f->valid = 0;
850 		free_filter_resources(f);
851 		remove_hftid(sc, tid, f->fs.type ? 2 : 1);
852 		release_tid(sc, tid, &sc->sge.mgmtq);
853 		if (f->locked == 0)
854 			free(f, M_CXGBE);
855 	}
856 	cv_broadcast(&sc->tids.hftid_cv);
857 	mtx_unlock(&sc->tids.hftid_lock);
858 
859 	return (0);
860 }
861 
862 int
863 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
864     struct mbuf *m)
865 {
866 	struct adapter *sc = iq->adapter;
867 	const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
868 	unsigned int tid = GET_TID(cpl);
869 	struct filter_entry *f;
870 
871 	mtx_lock(&sc->tids.hftid_lock);
872 	f = lookup_hftid(sc, tid);
873 	KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
874 	KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
875 	    f, tid));
876 	KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
877 	    tid));
878 	f->pending = 0;
879 	if (cpl->status == 0) {
880 		f->valid = 0;
881 		free_filter_resources(f);
882 		remove_hftid(sc, tid, f->fs.type ? 2 : 1);
883 		release_tid(sc, tid, &sc->sge.mgmtq);
884 		if (f->locked == 0)
885 			free(f, M_CXGBE);
886 	}
887 	cv_broadcast(&sc->tids.hftid_cv);
888 	mtx_unlock(&sc->tids.hftid_lock);
889 
890 	return (0);
891 }
892 
893 static int
894 get_hashfilter(struct adapter *sc, struct t4_filter *t)
895 {
896 	int i, nfilters = sc->tids.ntids;
897 	struct filter_entry *f;
898 
899 	if (sc->tids.tids_in_use == 0 || sc->tids.hftid_tab == NULL ||
900 	    t->idx >= nfilters) {
901 		t->idx = 0xffffffff;
902 		return (0);
903 	}
904 
905 	mtx_lock(&sc->tids.hftid_lock);
906 	for (i = t->idx; i < nfilters; i++) {
907 		f = lookup_hftid(sc, i);
908 		if (f != NULL && f->valid) {
909 			t->idx = i;
910 			t->l2tidx = f->l2te ? f->l2te->idx : 0;
911 			t->smtidx = f->smtidx;
912 			if (f->fs.hitcnts)
913 				t->hits = get_filter_hits(sc, t->idx);
914 			else
915 				t->hits = UINT64_MAX;
916 			t->fs = f->fs;
917 
918 			goto done;
919 		}
920 	}
921 	t->idx = 0xffffffff;
922 done:
923 	mtx_unlock(&sc->tids.hftid_lock);
924 	return (0);
925 }
926 
927 static uint64_t
928 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs)
929 {
930 	struct tp_params *tp = &sc->params.tp;
931 	uint64_t ntuple = 0;
932 
933 	/*
934 	 * Initialize each of the fields which we care about which are present
935 	 * in the Compressed Filter Tuple.
936 	 */
937 	if (tp->vlan_shift >= 0 && fs->mask.vlan)
938 		ntuple |= (F_FT_VLAN_VLD | fs->val.vlan) << tp->vlan_shift;
939 
940 	if (tp->port_shift >= 0 && fs->mask.iport)
941 		ntuple |= (uint64_t)fs->val.iport << tp->port_shift;
942 
943 	if (tp->protocol_shift >= 0) {
944 		if (!fs->val.proto)
945 			ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
946 		else
947 			ntuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
948 	}
949 
950 	if (tp->tos_shift >= 0 && fs->mask.tos)
951 		ntuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
952 
953 	if (tp->vnic_shift >= 0) {
954 #ifdef notyet
955 		if (tp->ingress_config & F_VNIC && fs->mask.pfvf_vld)
956 			ntuple |= (uint64_t)((fs->val.pfvf_vld << 16) |
957 					(fs->val.pf << 13) |
958 					(fs->val.vf)) << tp->vnic_shift;
959 		else
960 #endif
961 			ntuple |= (uint64_t)((fs->val.ovlan_vld << 16) |
962 					(fs->val.vnic)) << tp->vnic_shift;
963 	}
964 
965 	if (tp->macmatch_shift >= 0 && fs->mask.macidx)
966 		ntuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
967 
968 	if (tp->ethertype_shift >= 0 && fs->mask.ethtype)
969 		ntuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
970 
971 	if (tp->matchtype_shift >= 0 && fs->mask.matchtype)
972 		ntuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
973 
974 	if (tp->frag_shift >= 0 && fs->mask.frag)
975 		ntuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
976 
977 	if (tp->fcoe_shift >= 0 && fs->mask.fcoe)
978 		ntuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
979 
980 	return (ntuple);
981 }
982 
983 static void
984 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
985     struct cpl_act_open_req6 *cpl)
986 {
987 	struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
988 	struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
989 
990 	/* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
991 	MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
992 	MPASS(atid >= 0);
993 
994 	if (chip_id(sc) == CHELSIO_T5) {
995 		INIT_TP_WR(cpl5, 0);
996 	} else {
997 		INIT_TP_WR(cpl6, 0);
998 		cpl6->rsvd2 = 0;
999 		cpl6->opt3 = 0;
1000 	}
1001 
1002 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1003 	    V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1004 	    V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1005 	cpl->local_port = htobe16(f->fs.val.dport);
1006 	cpl->peer_port = htobe16(f->fs.val.sport);
1007 	cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1008 	cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1009 	cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1010 	cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1011 	cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1012 	    f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1013 	    V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1014 	    V_NO_CONG(f->fs.rpttid) | F_TCAM_BYPASS | F_NON_OFFLOAD);
1015 
1016 	cpl6->params = htobe64(V_FILTER_TUPLE(hashfilter_ntuple(sc, &f->fs)));
1017 	cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1018 	    F_T5_OPT_2_VALID | F_RX_CHANNEL |
1019 	    V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1020 	    V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1021 }
1022 
1023 static void
1024 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1025     struct cpl_act_open_req *cpl)
1026 {
1027 	struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1028 	struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1029 
1030 	/* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1031 	MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1032 	MPASS(atid >= 0);
1033 
1034 	if (chip_id(sc) == CHELSIO_T5) {
1035 		INIT_TP_WR(cpl5, 0);
1036 	} else {
1037 		INIT_TP_WR(cpl6, 0);
1038 		cpl6->rsvd2 = 0;
1039 		cpl6->opt3 = 0;
1040 	}
1041 
1042 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1043 	    V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1044 	    V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1045 	cpl->local_port = htobe16(f->fs.val.dport);
1046 	cpl->peer_port = htobe16(f->fs.val.sport);
1047 	cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1048 	    f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1049 	cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1050 		f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1051 	cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1052 	    f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1053 	    V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1054 	    V_NO_CONG(f->fs.rpttid) | F_TCAM_BYPASS | F_NON_OFFLOAD);
1055 
1056 	cpl6->params = htobe64(V_FILTER_TUPLE(hashfilter_ntuple(sc, &f->fs)));
1057 	cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1058 	    F_T5_OPT_2_VALID | F_RX_CHANNEL |
1059 	    V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1060 	    V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1061 }
1062 
1063 static int
1064 act_open_cpl_len16(struct adapter *sc, int isipv6)
1065 {
1066 	int idx;
1067 	static const int sz_table[3][2] = {
1068 		{
1069 			howmany(sizeof (struct cpl_act_open_req), 16),
1070 			howmany(sizeof (struct cpl_act_open_req6), 16)
1071 		},
1072 		{
1073 			howmany(sizeof (struct cpl_t5_act_open_req), 16),
1074 			howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1075 		},
1076 		{
1077 			howmany(sizeof (struct cpl_t6_act_open_req), 16),
1078 			howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1079 		},
1080 	};
1081 
1082 	MPASS(chip_id(sc) >= CHELSIO_T4);
1083 	idx = min(chip_id(sc) - CHELSIO_T4, 2);
1084 
1085 	return (sz_table[idx][!!isipv6]);
1086 }
1087 
1088 static int
1089 set_hashfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te)
1090 {
1091 	void *wr;
1092 	struct wrq_cookie cookie;
1093 	struct filter_entry *f;
1094 	int rc, atid = -1;
1095 
1096 	MPASS(t->fs.hash);
1097 	/* Already validated against fconf, iconf */
1098 	MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1099 	MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1100 
1101 	mtx_lock(&sc->tids.hftid_lock);
1102 
1103 	/*
1104 	 * XXX: Check for hash collisions and insert in the hash based lookup
1105 	 * table so that in-flight hashfilters are also considered when checking
1106 	 * for collisions.
1107 	 */
1108 
1109 	f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1110 	if (__predict_false(f == NULL)) {
1111 		if (l2te)
1112 			t4_l2t_release(l2te);
1113 		rc = ENOMEM;
1114 		goto done;
1115 	}
1116 	f->fs = t->fs;
1117 	f->l2te = l2te;
1118 
1119 	atid = alloc_atid(sc, f);
1120 	if (__predict_false(atid) == -1) {
1121 		if (l2te)
1122 			t4_l2t_release(l2te);
1123 		free(f, M_CXGBE);
1124 		rc = EAGAIN;
1125 		goto done;
1126 	}
1127 	MPASS(atid >= 0);
1128 
1129 	wr = start_wrq_wr(&sc->sge.mgmtq, act_open_cpl_len16(sc, f->fs.type),
1130 	    &cookie);
1131 	if (wr == NULL) {
1132 		free_atid(sc, atid);
1133 		if (l2te)
1134 			t4_l2t_release(l2te);
1135 		free(f, M_CXGBE);
1136 		rc = ENOMEM;
1137 		goto done;
1138 	}
1139 	if (f->fs.type)
1140 		mk_act_open_req6(sc, f, atid, wr);
1141 	else
1142 		mk_act_open_req(sc, f, atid, wr);
1143 
1144 	f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1145 	f->pending = 1;
1146 	f->tid = -1;
1147 	commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
1148 
1149 	for (;;) {
1150 		MPASS(f->locked);
1151 		if (f->pending == 0) {
1152 			if (f->valid) {
1153 				rc = 0;
1154 				f->locked = 0;
1155 				t->idx = f->tid;
1156 			} else {
1157 				rc = f->tid;
1158 				free(f, M_CXGBE);
1159 			}
1160 			break;
1161 		}
1162 		if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1163 			f->locked = 0;
1164 			rc = EINPROGRESS;
1165 			break;
1166 		}
1167 	}
1168 done:
1169 	mtx_unlock(&sc->tids.hftid_lock);
1170 	return (rc);
1171 }
1172 
1173 /* SET_TCB_FIELD sent as a ULP command looks like this */
1174 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1175     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1176 
1177 static void *
1178 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1179 		uint64_t val, uint32_t tid, uint32_t qid)
1180 {
1181 	struct ulptx_idata *ulpsc;
1182 	struct cpl_set_tcb_field_core *req;
1183 
1184 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1185 	ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1186 
1187 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1188 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1189 	ulpsc->len = htobe32(sizeof(*req));
1190 
1191 	req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1192 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1193 	req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1194 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1195 	req->mask = htobe64(mask);
1196 	req->val = htobe64(val);
1197 
1198 	ulpsc = (struct ulptx_idata *)(req + 1);
1199 	if (LEN__SET_TCB_FIELD_ULP % 16) {
1200 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1201 		ulpsc->len = htobe32(0);
1202 		return (ulpsc + 1);
1203 	}
1204 	return (ulpsc);
1205 }
1206 
1207 /* ABORT_REQ sent as a ULP command looks like this */
1208 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1209 	sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1210 
1211 static void *
1212 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1213 {
1214 	struct ulptx_idata *ulpsc;
1215 	struct cpl_abort_req_core *req;
1216 
1217 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1218 	ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1219 
1220 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1221 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1222 	ulpsc->len = htobe32(sizeof(*req));
1223 
1224 	req = (struct cpl_abort_req_core *)(ulpsc + 1);
1225 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1226 	req->rsvd0 = htonl(0);
1227 	req->rsvd1 = 0;
1228 	req->cmd = CPL_ABORT_NO_RST;
1229 
1230 	ulpsc = (struct ulptx_idata *)(req + 1);
1231 	if (LEN__ABORT_REQ_ULP % 16) {
1232 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1233 		ulpsc->len = htobe32(0);
1234 		return (ulpsc + 1);
1235 	}
1236 	return (ulpsc);
1237 }
1238 
1239 /* ABORT_RPL sent as a ULP command looks like this */
1240 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1241 	sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1242 
1243 static void *
1244 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1245 {
1246 	struct ulptx_idata *ulpsc;
1247 	struct cpl_abort_rpl_core *rpl;
1248 
1249 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1250 	ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1251 
1252 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1253 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1254 	ulpsc->len = htobe32(sizeof(*rpl));
1255 
1256 	rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1257 	OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1258 	rpl->rsvd0 = htonl(0);
1259 	rpl->rsvd1 = 0;
1260 	rpl->cmd = CPL_ABORT_NO_RST;
1261 
1262 	ulpsc = (struct ulptx_idata *)(rpl + 1);
1263 	if (LEN__ABORT_RPL_ULP % 16) {
1264 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1265 		ulpsc->len = htobe32(0);
1266 		return (ulpsc + 1);
1267 	}
1268 	return (ulpsc);
1269 }
1270 
1271 static inline int
1272 del_hashfilter_wrlen(void)
1273 {
1274 
1275 	return (sizeof(struct work_request_hdr) +
1276 	    roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1277 	    roundup2(LEN__ABORT_REQ_ULP, 16) +
1278 	    roundup2(LEN__ABORT_RPL_ULP, 16));
1279 }
1280 
1281 static void
1282 mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1283 {
1284 	struct ulp_txpkt *ulpmc;
1285 
1286 	INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1287 	ulpmc = (struct ulp_txpkt *)(wrh + 1);
1288 	ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1289 	    V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
1290 	ulpmc = mk_abort_req_ulp(ulpmc, tid);
1291 	ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1292 }
1293 
1294 static int
1295 del_hashfilter(struct adapter *sc, struct t4_filter *t)
1296 {
1297 	void *wr;
1298 	struct filter_entry *f;
1299 	struct wrq_cookie cookie;
1300 	int rc;
1301 	const int wrlen = del_hashfilter_wrlen();
1302 
1303 	MPASS(sc->tids.hftid_tab != NULL);
1304 	MPASS(sc->tids.ntids > 0);
1305 
1306 	if (t->idx >= sc->tids.ntids)
1307 		return (EINVAL);
1308 
1309 	mtx_lock(&sc->tids.hftid_lock);
1310 	f = lookup_hftid(sc, t->idx);
1311 	if (f == NULL || f->valid == 0) {
1312 		rc = EINVAL;
1313 		goto done;
1314 	}
1315 	MPASS(f->tid == t->idx);
1316 	if (f->locked) {
1317 		rc = EPERM;
1318 		goto done;
1319 	}
1320 	if (f->pending) {
1321 		rc = EBUSY;
1322 		goto done;
1323 	}
1324 	wr = start_wrq_wr(&sc->sge.mgmtq, howmany(wrlen, 16), &cookie);
1325 	if (wr == NULL) {
1326 		rc = ENOMEM;
1327 		goto done;
1328 	}
1329 
1330 	mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1331 	f->locked = 1;
1332 	f->pending = 1;
1333 	commit_wrq_wr(&sc->sge.mgmtq, wr, &cookie);
1334 	t->fs = f->fs;	/* extra info for the caller */
1335 
1336 	for (;;) {
1337 		MPASS(f->locked);
1338 		if (f->pending == 0) {
1339 			if (f->valid) {
1340 				f->locked = 0;
1341 				rc = EIO;
1342 			} else {
1343 				rc = 0;
1344 				free(f, M_CXGBE);
1345 			}
1346 			break;
1347 		}
1348 		if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1349 			f->locked = 0;
1350 			rc = EINPROGRESS;
1351 			break;
1352 		}
1353 	}
1354 done:
1355 	mtx_unlock(&sc->tids.hftid_lock);
1356 	return (rc);
1357 }
1358 
1359 static int
1360 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
1361     uint64_t val, int no_reply)
1362 {
1363 	struct wrq_cookie cookie;
1364 	struct cpl_set_tcb_field *req;
1365 
1366 	req = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*req), 16), &cookie);
1367 	if (req == NULL)
1368 		return (ENOMEM);
1369 	bzero(req, sizeof(*req));
1370 	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
1371 	if (no_reply == 0) {
1372 		req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
1373 		    V_NO_REPLY(0));
1374 	} else
1375 		req->reply_ctrl = htobe16(V_NO_REPLY(1));
1376 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
1377 	req->mask = htobe64(mask);
1378 	req->val = htobe64(val);
1379 	commit_wrq_wr(&sc->sge.mgmtq, req, &cookie);
1380 
1381 	return (0);
1382 }
1383 
1384 /* Set one of the t_flags bits in the TCB. */
1385 static inline int
1386 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val)
1387 {
1388 
1389 	return (set_tcb_field(sc, tid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
1390 	    (uint64_t)val << bit_pos, 1));
1391 }
1392 
1393 /*
1394  * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1395  * last of the series of updates requested a reply.  The reply informs the
1396  * driver that the filter is fully setup.
1397  */
1398 static int
1399 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
1400 {
1401 	int updated = 0;
1402 
1403 	MPASS(f->tid < sc->tids.ntids);
1404 	MPASS(f->fs.hash);
1405 	MPASS(f->pending);
1406 	MPASS(f->valid == 0);
1407 
1408 	if (f->fs.newdmac) {
1409 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1);
1410 		updated++;
1411 	}
1412 
1413 	if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1414 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1);
1415 		updated++;
1416 	}
1417 
1418 	if (f->fs.hitcnts || updated > 0) {
1419 		set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
1420 		    V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1421 		    V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
1422 		    V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
1423 		return (EINPROGRESS);
1424 	}
1425 
1426 	return (0);
1427 }
1428