xref: /freebsd/sys/dev/cxgbe/t4_filter.c (revision c697fb7f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/fnv_hash.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/bus.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/rwlock.h>
44 #include <sys/socket.h>
45 #include <sys/sbuf.h>
46 #include <netinet/in.h>
47 
48 #include "common/common.h"
49 #include "common/t4_msg.h"
50 #include "common/t4_regs.h"
51 #include "common/t4_regs_values.h"
52 #include "common/t4_tcb.h"
53 #include "t4_l2t.h"
54 #include "t4_smt.h"
55 
56 struct filter_entry {
57 	LIST_ENTRY(filter_entry) link_4t;
58 	LIST_ENTRY(filter_entry) link_tid;
59 
60 	uint32_t valid:1;	/* filter allocated and valid */
61 	uint32_t locked:1;	/* filter is administratively locked or busy */
62 	uint32_t pending:1;	/* filter action is pending firmware reply */
63 	int tid;		/* tid of the filter TCB */
64 	struct l2t_entry *l2te;	/* L2 table entry for DMAC rewrite */
65 	struct smt_entry *smt;	/* SMT entry for SMAC rewrite */
66 
67 	struct t4_filter_specification fs;
68 };
69 
70 static void free_filter_resources(struct filter_entry *);
71 static int get_tcamfilter(struct adapter *, struct t4_filter *);
72 static int get_hashfilter(struct adapter *, struct t4_filter *);
73 static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t,
74     struct l2t_entry *, struct smt_entry *);
75 static int del_hashfilter(struct adapter *, struct t4_filter *);
76 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
77 
78 static inline bool
79 separate_hpfilter_region(struct adapter *sc)
80 {
81 
82 	return (chip_id(sc) >= CHELSIO_T6);
83 }
84 
85 static inline uint32_t
86 hf_hashfn_4t(struct t4_filter_specification *fs)
87 {
88 	struct t4_filter_tuple *ft = &fs->val;
89 	uint32_t hash;
90 
91 	if (fs->type) {
92 		/* IPv6 */
93 		hash = fnv_32_buf(&ft->sip[0], 16, FNV1_32_INIT);
94 		hash = fnv_32_buf(&ft->dip[0], 16, hash);
95 	} else {
96 		hash = fnv_32_buf(&ft->sip[0], 4, FNV1_32_INIT);
97 		hash = fnv_32_buf(&ft->dip[0], 4, hash);
98 	}
99 	hash = fnv_32_buf(&ft->sport, sizeof(ft->sport), hash);
100 	hash = fnv_32_buf(&ft->dport, sizeof(ft->dport), hash);
101 
102 	return (hash);
103 }
104 
105 static inline uint32_t
106 hf_hashfn_tid(int tid)
107 {
108 
109 	return (fnv_32_buf(&tid, sizeof(tid), FNV1_32_INIT));
110 }
111 
112 static int
113 alloc_hftid_hash(struct tid_info *t, int flags)
114 {
115 	int n;
116 
117 	MPASS(t->ntids > 0);
118 	MPASS(t->hftid_hash_4t == NULL);
119 	MPASS(t->hftid_hash_tid == NULL);
120 
121 	n = max(t->ntids / 1024, 16);
122 	t->hftid_hash_4t = hashinit_flags(n, M_CXGBE, &t->hftid_4t_mask, flags);
123 	if (t->hftid_hash_4t == NULL)
124 		return (ENOMEM);
125 	t->hftid_hash_tid = hashinit_flags(n, M_CXGBE, &t->hftid_tid_mask,
126 	    flags);
127 	if (t->hftid_hash_tid == NULL) {
128 		hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
129 		t->hftid_hash_4t = NULL;
130 		return (ENOMEM);
131 	}
132 
133 	mtx_init(&t->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
134 	cv_init(&t->hftid_cv, "t4hfcv");
135 
136 	return (0);
137 }
138 
139 void
140 free_hftid_hash(struct tid_info *t)
141 {
142 	struct filter_entry *f, *ftmp;
143 	LIST_HEAD(, filter_entry) *head;
144 	int i;
145 #ifdef INVARIANTS
146 	int n = 0;
147 #endif
148 
149 	if (t->tids_in_use > 0) {
150 		/* Remove everything from the tid hash. */
151 		head = t->hftid_hash_tid;
152 		for (i = 0; i <= t->hftid_tid_mask; i++) {
153 			LIST_FOREACH_SAFE(f, &head[i], link_tid, ftmp) {
154 				LIST_REMOVE(f, link_tid);
155 			}
156 		}
157 
158 		/* Remove and then free each filter in the 4t hash. */
159 		head = t->hftid_hash_4t;
160 		for (i = 0; i <= t->hftid_4t_mask; i++) {
161 			LIST_FOREACH_SAFE(f, &head[i], link_4t, ftmp) {
162 #ifdef INVARIANTS
163 				n += f->fs.type ? 2 : 1;
164 #endif
165 				LIST_REMOVE(f, link_4t);
166 				free(f, M_CXGBE);
167 			}
168 		}
169 		MPASS(t->tids_in_use == n);
170 		t->tids_in_use = 0;
171 	}
172 
173 	if (t->hftid_hash_4t) {
174 		hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
175 		t->hftid_hash_4t = NULL;
176 	}
177 	if (t->hftid_hash_tid) {
178 		hashdestroy(t->hftid_hash_tid, M_CXGBE, t->hftid_tid_mask);
179 		t->hftid_hash_tid = NULL;
180 	}
181 	if (mtx_initialized(&t->hftid_lock)) {
182 		mtx_destroy(&t->hftid_lock);
183 		cv_destroy(&t->hftid_cv);
184 	}
185 }
186 
187 static void
188 insert_hf(struct adapter *sc, struct filter_entry *f, uint32_t hash)
189 {
190 	struct tid_info *t = &sc->tids;
191 	LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
192 
193 	MPASS(head != NULL);
194 	if (hash == 0)
195 		hash = hf_hashfn_4t(&f->fs);
196 	LIST_INSERT_HEAD(&head[hash & t->hftid_4t_mask], f, link_4t);
197 	atomic_add_int(&t->tids_in_use, f->fs.type ? 2 : 1);
198 }
199 
200 static void
201 insert_hftid(struct adapter *sc, struct filter_entry *f)
202 {
203 	struct tid_info *t = &sc->tids;
204 	LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
205 	uint32_t hash;
206 
207 	MPASS(f->tid >= t->tid_base);
208 	MPASS(f->tid - t->tid_base < t->ntids);
209 	mtx_assert(&t->hftid_lock, MA_OWNED);
210 
211 	hash = hf_hashfn_tid(f->tid);
212 	LIST_INSERT_HEAD(&head[hash & t->hftid_tid_mask], f, link_tid);
213 }
214 
215 static bool
216 filter_eq(struct t4_filter_specification *fs1,
217     struct t4_filter_specification *fs2)
218 {
219 	int n;
220 
221 	MPASS(fs1->hash && fs2->hash);
222 
223 	if (fs1->type != fs2->type)
224 		return (false);
225 
226 	n = fs1->type ? 16 : 4;
227 	if (bcmp(&fs1->val.sip[0], &fs2->val.sip[0], n) ||
228 	    bcmp(&fs1->val.dip[0], &fs2->val.dip[0], n) ||
229 	    fs1->val.sport != fs2->val.sport ||
230 	    fs1->val.dport != fs2->val.dport)
231 		return (false);
232 
233 	/*
234 	 * We know the masks are the same because all hashfilter masks have to
235 	 * conform to the global tp->hash_filter_mask and the driver has
236 	 * verified that already.
237 	 */
238 
239 	if ((fs1->mask.pfvf_vld || fs1->mask.ovlan_vld) &&
240 	    fs1->val.vnic != fs2->val.vnic)
241 		return (false);
242 	if (fs1->mask.vlan_vld && fs1->val.vlan != fs2->val.vlan)
243 		return (false);
244 	if (fs1->mask.macidx && fs1->val.macidx != fs2->val.macidx)
245 		return (false);
246 	if (fs1->mask.frag && fs1->val.frag != fs2->val.frag)
247 		return (false);
248 	if (fs1->mask.matchtype && fs1->val.matchtype != fs2->val.matchtype)
249 		return (false);
250 	if (fs1->mask.iport && fs1->val.iport != fs2->val.iport)
251 		return (false);
252 	if (fs1->mask.fcoe && fs1->val.fcoe != fs2->val.fcoe)
253 		return (false);
254 	if (fs1->mask.proto && fs1->val.proto != fs2->val.proto)
255 		return (false);
256 	if (fs1->mask.tos && fs1->val.tos != fs2->val.tos)
257 		return (false);
258 	if (fs1->mask.ethtype && fs1->val.ethtype != fs2->val.ethtype)
259 		return (false);
260 
261 	return (true);
262 }
263 
264 static struct filter_entry *
265 lookup_hf(struct adapter *sc, struct t4_filter_specification *fs, uint32_t hash)
266 {
267 	struct tid_info *t = &sc->tids;
268 	LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
269 	struct filter_entry *f;
270 
271 	mtx_assert(&t->hftid_lock, MA_OWNED);
272 	MPASS(head != NULL);
273 
274 	if (hash == 0)
275 		hash = hf_hashfn_4t(fs);
276 
277 	LIST_FOREACH(f, &head[hash & t->hftid_4t_mask], link_4t) {
278 		if (filter_eq(&f->fs, fs))
279 			return (f);
280 	}
281 
282 	return (NULL);
283 }
284 
285 static struct filter_entry *
286 lookup_hftid(struct adapter *sc, int tid)
287 {
288 	struct tid_info *t = &sc->tids;
289 	LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
290 	struct filter_entry *f;
291 	uint32_t hash;
292 
293 	mtx_assert(&t->hftid_lock, MA_OWNED);
294 	MPASS(head != NULL);
295 
296 	hash = hf_hashfn_tid(tid);
297 	LIST_FOREACH(f, &head[hash & t->hftid_tid_mask], link_tid) {
298 		if (f->tid == tid)
299 			return (f);
300 	}
301 
302 	return (NULL);
303 }
304 
305 static void
306 remove_hf(struct adapter *sc, struct filter_entry *f)
307 {
308 	struct tid_info *t = &sc->tids;
309 
310 	mtx_assert(&t->hftid_lock, MA_OWNED);
311 
312 	LIST_REMOVE(f, link_4t);
313 	atomic_subtract_int(&t->tids_in_use, f->fs.type ? 2 : 1);
314 }
315 
316 static void
317 remove_hftid(struct adapter *sc, struct filter_entry *f)
318 {
319 #ifdef INVARIANTS
320 	struct tid_info *t = &sc->tids;
321 
322 	mtx_assert(&t->hftid_lock, MA_OWNED);
323 #endif
324 
325 	LIST_REMOVE(f, link_tid);
326 }
327 
328 static uint32_t
329 mode_to_fconf(uint32_t mode)
330 {
331 	uint32_t fconf = 0;
332 
333 	if (mode & T4_FILTER_IP_FRAGMENT)
334 		fconf |= F_FRAGMENTATION;
335 
336 	if (mode & T4_FILTER_MPS_HIT_TYPE)
337 		fconf |= F_MPSHITTYPE;
338 
339 	if (mode & T4_FILTER_MAC_IDX)
340 		fconf |= F_MACMATCH;
341 
342 	if (mode & T4_FILTER_ETH_TYPE)
343 		fconf |= F_ETHERTYPE;
344 
345 	if (mode & T4_FILTER_IP_PROTO)
346 		fconf |= F_PROTOCOL;
347 
348 	if (mode & T4_FILTER_IP_TOS)
349 		fconf |= F_TOS;
350 
351 	if (mode & T4_FILTER_VLAN)
352 		fconf |= F_VLAN;
353 
354 	if (mode & T4_FILTER_VNIC)
355 		fconf |= F_VNIC_ID;
356 
357 	if (mode & T4_FILTER_PORT)
358 		fconf |= F_PORT;
359 
360 	if (mode & T4_FILTER_FCoE)
361 		fconf |= F_FCOE;
362 
363 	return (fconf);
364 }
365 
366 static uint32_t
367 mode_to_iconf(uint32_t mode)
368 {
369 
370 	if (mode & T4_FILTER_IC_VNIC)
371 		return (F_VNIC);
372 	return (0);
373 }
374 
375 static int
376 check_fspec_against_fconf_iconf(struct adapter *sc,
377     struct t4_filter_specification *fs)
378 {
379 	struct tp_params *tpp = &sc->params.tp;
380 	uint32_t fconf = 0;
381 
382 	if (fs->val.frag || fs->mask.frag)
383 		fconf |= F_FRAGMENTATION;
384 
385 	if (fs->val.matchtype || fs->mask.matchtype)
386 		fconf |= F_MPSHITTYPE;
387 
388 	if (fs->val.macidx || fs->mask.macidx)
389 		fconf |= F_MACMATCH;
390 
391 	if (fs->val.ethtype || fs->mask.ethtype)
392 		fconf |= F_ETHERTYPE;
393 
394 	if (fs->val.proto || fs->mask.proto)
395 		fconf |= F_PROTOCOL;
396 
397 	if (fs->val.tos || fs->mask.tos)
398 		fconf |= F_TOS;
399 
400 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
401 		fconf |= F_VLAN;
402 
403 	if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
404 		fconf |= F_VNIC_ID;
405 		if (tpp->ingress_config & F_VNIC)
406 			return (EINVAL);
407 	}
408 
409 	if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
410 		fconf |= F_VNIC_ID;
411 		if ((tpp->ingress_config & F_VNIC) == 0)
412 			return (EINVAL);
413 	}
414 
415 	if (fs->val.iport || fs->mask.iport)
416 		fconf |= F_PORT;
417 
418 	if (fs->val.fcoe || fs->mask.fcoe)
419 		fconf |= F_FCOE;
420 
421 	if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
422 		return (E2BIG);
423 
424 	return (0);
425 }
426 
427 int
428 get_filter_mode(struct adapter *sc, uint32_t *mode)
429 {
430 	struct tp_params *tp = &sc->params.tp;
431 	uint64_t mask;
432 
433 	/* Non-zero incoming value in mode means "hashfilter mode". */
434 	mask = *mode ? tp->hash_filter_mask : UINT64_MAX;
435 
436 	/* Always */
437 	*mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
438 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
439 
440 #define CHECK_FIELD(fconf_bit, field_shift, field_mask, mode_bit)  do { \
441 	if (tp->vlan_pri_map & (fconf_bit)) { \
442 		MPASS(tp->field_shift >= 0); \
443 		if ((mask >> tp->field_shift & field_mask) == field_mask) \
444 		*mode |= (mode_bit); \
445 	} \
446 } while (0)
447 
448 	CHECK_FIELD(F_FRAGMENTATION, frag_shift, M_FT_FRAGMENTATION, T4_FILTER_IP_FRAGMENT);
449 	CHECK_FIELD(F_MPSHITTYPE, matchtype_shift, M_FT_MPSHITTYPE, T4_FILTER_MPS_HIT_TYPE);
450 	CHECK_FIELD(F_MACMATCH, macmatch_shift, M_FT_MACMATCH, T4_FILTER_MAC_IDX);
451 	CHECK_FIELD(F_ETHERTYPE, ethertype_shift, M_FT_ETHERTYPE, T4_FILTER_ETH_TYPE);
452 	CHECK_FIELD(F_PROTOCOL, protocol_shift, M_FT_PROTOCOL, T4_FILTER_IP_PROTO);
453 	CHECK_FIELD(F_TOS, tos_shift, M_FT_TOS, T4_FILTER_IP_TOS);
454 	CHECK_FIELD(F_VLAN, vlan_shift, M_FT_VLAN, T4_FILTER_VLAN);
455 	CHECK_FIELD(F_VNIC_ID, vnic_shift, M_FT_VNIC_ID , T4_FILTER_VNIC);
456 	if (tp->ingress_config & F_VNIC)
457 		*mode |= T4_FILTER_IC_VNIC;
458 	CHECK_FIELD(F_PORT, port_shift, M_FT_PORT , T4_FILTER_PORT);
459 	CHECK_FIELD(F_FCOE, fcoe_shift, M_FT_FCOE , T4_FILTER_FCoE);
460 #undef CHECK_FIELD
461 
462 	return (0);
463 }
464 
465 int
466 set_filter_mode(struct adapter *sc, uint32_t mode)
467 {
468 	struct tp_params *tpp = &sc->params.tp;
469 	uint32_t fconf, iconf;
470 	int rc;
471 
472 	iconf = mode_to_iconf(mode);
473 	if ((iconf ^ tpp->ingress_config) & F_VNIC) {
474 		/*
475 		 * For now we just complain if A_TP_INGRESS_CONFIG is not
476 		 * already set to the correct value for the requested filter
477 		 * mode.  It's not clear if it's safe to write to this register
478 		 * on the fly.  (And we trust the cached value of the register).
479 		 *
480 		 * check_fspec_against_fconf_iconf and other code that looks at
481 		 * tp->vlan_pri_map and tp->ingress_config needs to be reviewed
482 		 * thorougly before allowing dynamic filter mode changes.
483 		 */
484 		return (EBUSY);
485 	}
486 
487 	fconf = mode_to_fconf(mode);
488 
489 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
490 	    "t4setfm");
491 	if (rc)
492 		return (rc);
493 
494 	if (sc->tids.ftids_in_use > 0 || sc->tids.hpftids_in_use > 0) {
495 		rc = EBUSY;
496 		goto done;
497 	}
498 
499 #ifdef TCP_OFFLOAD
500 	if (uld_active(sc, ULD_TOM)) {
501 		rc = EBUSY;
502 		goto done;
503 	}
504 #endif
505 
506 	rc = -t4_set_filter_mode(sc, fconf, true);
507 done:
508 	end_synchronized_op(sc, LOCK_HELD);
509 	return (rc);
510 }
511 
512 static inline uint64_t
513 get_filter_hits(struct adapter *sc, uint32_t tid)
514 {
515 	uint32_t tcb_addr;
516 
517 	tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
518 
519 	if (is_t4(sc)) {
520 		uint64_t hits;
521 
522 		read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
523 		return (be64toh(hits));
524 	} else {
525 		uint32_t hits;
526 
527 		read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
528 		return (be32toh(hits));
529 	}
530 }
531 
532 int
533 get_filter(struct adapter *sc, struct t4_filter *t)
534 {
535 	if (t->fs.hash)
536 		return (get_hashfilter(sc, t));
537 	else
538 		return (get_tcamfilter(sc, t));
539 }
540 
541 static int
542 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
543     struct smt_entry *smt)
544 {
545 	struct filter_entry *f;
546 	struct fw_filter2_wr *fwr;
547 	u_int vnic_vld, vnic_vld_mask;
548 	struct wrq_cookie cookie;
549 	int i, rc, busy, locked;
550 	u_int tid;
551 	const int ntids = t->fs.type ? 4 : 1;
552 
553 	MPASS(!t->fs.hash);
554 	/* Already validated against fconf, iconf */
555 	MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
556 	MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
557 
558 	if (separate_hpfilter_region(sc) && t->fs.prio) {
559 		MPASS(t->idx < sc->tids.nhpftids);
560 		f = &sc->tids.hpftid_tab[t->idx];
561 		tid = sc->tids.hpftid_base + t->idx;
562 	} else {
563 		MPASS(t->idx < sc->tids.nftids);
564 		f = &sc->tids.ftid_tab[t->idx];
565 		tid = sc->tids.ftid_base + t->idx;
566 	}
567 	rc = busy = locked = 0;
568 	mtx_lock(&sc->tids.ftid_lock);
569 	for (i = 0; i < ntids; i++) {
570 		busy += f[i].pending + f[i].valid;
571 		locked += f[i].locked;
572 	}
573 	if (locked > 0)
574 		rc = EPERM;
575 	else if (busy > 0)
576 		rc = EBUSY;
577 	else {
578 		int len16;
579 
580 		if (sc->params.filter2_wr_support)
581 			len16 = howmany(sizeof(struct fw_filter2_wr), 16);
582 		else
583 			len16 = howmany(sizeof(struct fw_filter_wr), 16);
584 		fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie);
585 		if (__predict_false(fwr == NULL))
586 			rc = ENOMEM;
587 		else {
588 			f->pending = 1;
589 			if (separate_hpfilter_region(sc) && t->fs.prio)
590 				sc->tids.hpftids_in_use++;
591 			else
592 				sc->tids.ftids_in_use++;
593 		}
594 	}
595 	mtx_unlock(&sc->tids.ftid_lock);
596 	if (rc != 0)
597 		return (rc);
598 
599 	/*
600 	 * Can't fail now.  A set-filter WR will definitely be sent.
601 	 */
602 
603 	f->tid = tid;
604 	f->fs = t->fs;
605 	f->l2te = l2te;
606 	f->smt = smt;
607 
608 	if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
609 		vnic_vld = 1;
610 	else
611 		vnic_vld = 0;
612 	if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
613 		vnic_vld_mask = 1;
614 	else
615 		vnic_vld_mask = 0;
616 
617 	bzero(fwr, sizeof(*fwr));
618 	if (sc->params.filter2_wr_support)
619 		fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR));
620 	else
621 		fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
622 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
623 	fwr->tid_to_iq =
624 	    htobe32(V_FW_FILTER_WR_TID(f->tid) |
625 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
626 		V_FW_FILTER_WR_NOREPLY(0) |
627 		V_FW_FILTER_WR_IQ(f->fs.iq));
628 	fwr->del_filter_to_l2tix =
629 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
630 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
631 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
632 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
633 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
634 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
635 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
636 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
637 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
638 		    f->fs.newvlan == VLAN_REWRITE) |
639 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
640 		    f->fs.newvlan == VLAN_REWRITE) |
641 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
642 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
643 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
644 		V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
645 	fwr->ethtype = htobe16(f->fs.val.ethtype);
646 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
647 	fwr->frag_to_ovlan_vldm =
648 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
649 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
650 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
651 		V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
652 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
653 		V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
654 	fwr->smac_sel = 0;
655 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
656 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
657 	fwr->maci_to_matchtypem =
658 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
659 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
660 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
661 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
662 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
663 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
664 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
665 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
666 	fwr->ptcl = f->fs.val.proto;
667 	fwr->ptclm = f->fs.mask.proto;
668 	fwr->ttyp = f->fs.val.tos;
669 	fwr->ttypm = f->fs.mask.tos;
670 	fwr->ivlan = htobe16(f->fs.val.vlan);
671 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
672 	fwr->ovlan = htobe16(f->fs.val.vnic);
673 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
674 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
675 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
676 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
677 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
678 	fwr->lp = htobe16(f->fs.val.dport);
679 	fwr->lpm = htobe16(f->fs.mask.dport);
680 	fwr->fp = htobe16(f->fs.val.sport);
681 	fwr->fpm = htobe16(f->fs.mask.sport);
682 	/* sma = 0 tells the fw to use SMAC_SEL for source MAC address */
683 	bzero(fwr->sma, sizeof (fwr->sma));
684 	if (sc->params.filter2_wr_support) {
685 		fwr->filter_type_swapmac =
686 		    V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
687 		fwr->natmode_to_ulp_type =
688 		    V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
689 			ULP_MODE_TCPDDP : ULP_MODE_NONE) |
690 		    V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) |
691 		    V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
692 		memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip));
693 		memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip));
694 		fwr->newlport = htobe16(f->fs.nat_dport);
695 		fwr->newfport = htobe16(f->fs.nat_sport);
696 		fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
697 	}
698 	commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
699 
700 	/* Wait for response. */
701 	mtx_lock(&sc->tids.ftid_lock);
702 	for (;;) {
703 		if (f->pending == 0) {
704 			rc = f->valid ? 0 : EIO;
705 			break;
706 		}
707 		if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
708 			rc = EINPROGRESS;
709 			break;
710 		}
711 	}
712 	mtx_unlock(&sc->tids.ftid_lock);
713 	return (rc);
714 }
715 
716 static int
717 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
718     uint64_t *ftuple)
719 {
720 	struct tp_params *tp = &sc->params.tp;
721 	uint64_t fmask;
722 
723 	*ftuple = fmask = 0;
724 
725 	/*
726 	 * Initialize each of the fields which we care about which are present
727 	 * in the Compressed Filter Tuple.
728 	 */
729 	if (tp->vlan_shift >= 0 && fs->mask.vlan) {
730 		*ftuple |= (F_FT_VLAN_VLD | fs->val.vlan) << tp->vlan_shift;
731 		fmask |= M_FT_VLAN << tp->vlan_shift;
732 	}
733 
734 	if (tp->port_shift >= 0 && fs->mask.iport) {
735 		*ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
736 		fmask |= M_FT_PORT << tp->port_shift;
737 	}
738 
739 	if (tp->protocol_shift >= 0 && fs->mask.proto) {
740 		*ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
741 		fmask |= M_FT_PROTOCOL << tp->protocol_shift;
742 	}
743 
744 	if (tp->tos_shift >= 0 && fs->mask.tos) {
745 		*ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
746 		fmask |= M_FT_TOS << tp->tos_shift;
747 	}
748 
749 	if (tp->vnic_shift >= 0 && fs->mask.vnic) {
750 		/* F_VNIC in ingress config was already validated. */
751 		if (tp->ingress_config & F_VNIC)
752 			MPASS(fs->mask.pfvf_vld);
753 		else
754 			MPASS(fs->mask.ovlan_vld);
755 
756 		*ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
757 		fmask |= M_FT_VNIC_ID << tp->vnic_shift;
758 	}
759 
760 	if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
761 		*ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
762 		fmask |= M_FT_MACMATCH << tp->macmatch_shift;
763 	}
764 
765 	if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
766 		*ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
767 		fmask |= M_FT_ETHERTYPE << tp->ethertype_shift;
768 	}
769 
770 	if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
771 		*ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
772 		fmask |= M_FT_MPSHITTYPE << tp->matchtype_shift;
773 	}
774 
775 	if (tp->frag_shift >= 0 && fs->mask.frag) {
776 		*ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
777 		fmask |= M_FT_FRAGMENTATION << tp->frag_shift;
778 	}
779 
780 	if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
781 		*ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
782 		fmask |= M_FT_FCOE << tp->fcoe_shift;
783 	}
784 
785 	/* A hashfilter must conform to the filterMask. */
786 	if (fmask != tp->hash_filter_mask)
787 		return (EINVAL);
788 
789 	return (0);
790 }
791 
792 static bool
793 is_4tuple_specified(struct t4_filter_specification *fs)
794 {
795 	int i;
796 	const int n = fs->type ? 16 : 4;
797 
798 	if (fs->mask.sport != 0xffff || fs->mask.dport != 0xffff)
799 		return (false);
800 
801 	for (i = 0; i < n; i++) {
802 		if (fs->mask.sip[i] != 0xff)
803 			return (false);
804 		if (fs->mask.dip[i] != 0xff)
805 			return (false);
806 	}
807 
808 	return (true);
809 }
810 
811 int
812 set_filter(struct adapter *sc, struct t4_filter *t)
813 {
814 	struct tid_info *ti = &sc->tids;
815 	struct l2t_entry *l2te = NULL;
816 	struct smt_entry *smt = NULL;
817 	uint64_t ftuple;
818 	int rc;
819 
820 	/*
821 	 * Basic filter checks first.
822 	 */
823 
824 	if (t->fs.hash) {
825 		if (!is_hashfilter(sc) || ti->ntids == 0)
826 			return (ENOTSUP);
827 		/* Hardware, not user, selects a tid for hashfilters. */
828 		if (t->idx != (uint32_t)-1)
829 			return (EINVAL);
830 		/* T5 can't count hashfilter hits. */
831 		if (is_t5(sc) && t->fs.hitcnts)
832 			return (EINVAL);
833 		if (!is_4tuple_specified(&t->fs))
834 			return (EINVAL);
835 		rc = hashfilter_ntuple(sc, &t->fs, &ftuple);
836 		if (rc != 0)
837 			return (rc);
838 	} else {
839 		if (separate_hpfilter_region(sc) && t->fs.prio) {
840 			if (ti->nhpftids == 0)
841 				return (ENOTSUP);
842 			if (t->idx >= ti->nhpftids)
843 				return (EINVAL);
844 		} else {
845 			if (ti->nftids == 0)
846 				return (ENOTSUP);
847 			if (t->idx >= ti->nftids)
848 				return (EINVAL);
849 		}
850 		/* IPv6 filter idx must be 4 aligned */
851 		if (t->fs.type == 1 &&
852 		    ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
853 			return (EINVAL);
854 	}
855 
856 	/* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */
857 	if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
858 	    (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE ||
859 	    t->fs.swapmac || t->fs.nat_mode))
860 		return (ENOTSUP);
861 
862 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
863 		return (EINVAL);
864 	if (t->fs.val.iport >= sc->params.nports)
865 		return (EINVAL);
866 
867 	/* Can't specify an iqid/rss_info if not steering. */
868 	if (!t->fs.dirsteer && !t->fs.dirsteerhash && !t->fs.maskhash && t->fs.iq)
869 		return (EINVAL);
870 
871 	/* Validate against the global filter mode and ingress config */
872 	rc = check_fspec_against_fconf_iconf(sc, &t->fs);
873 	if (rc != 0)
874 		return (rc);
875 
876 	/*
877 	 * Basic checks passed.  Make sure the queues and tid tables are setup.
878 	 */
879 
880 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
881 	if (rc)
882 		return (rc);
883 	if (!(sc->flags & FULL_INIT_DONE) &&
884 	    ((rc = adapter_full_init(sc)) != 0)) {
885 		end_synchronized_op(sc, 0);
886 		return (rc);
887 	}
888 	if (t->fs.hash) {
889 		if (__predict_false(ti->hftid_hash_4t == NULL)) {
890 			rc = alloc_hftid_hash(&sc->tids, HASH_NOWAIT);
891 			if (rc != 0)
892 				goto done;
893 		}
894 	} else if (separate_hpfilter_region(sc) && t->fs.prio &&
895 	    __predict_false(ti->hpftid_tab == NULL)) {
896 		MPASS(ti->nhpftids != 0);
897 		KASSERT(ti->hpftids_in_use == 0,
898 		    ("%s: no memory allocated but hpftids_in_use is %u",
899 		    __func__, ti->hpftids_in_use));
900 		ti->hpftid_tab = malloc(sizeof(struct filter_entry) *
901 		    ti->nhpftids, M_CXGBE, M_NOWAIT | M_ZERO);
902 		if (ti->hpftid_tab == NULL) {
903 			rc = ENOMEM;
904 			goto done;
905 		}
906 		if (!mtx_initialized(&sc->tids.ftid_lock)) {
907 			mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
908 			cv_init(&ti->ftid_cv, "t4fcv");
909 		}
910 	} else if (__predict_false(ti->ftid_tab == NULL)) {
911 		MPASS(ti->nftids != 0);
912 		KASSERT(ti->ftids_in_use == 0,
913 		    ("%s: no memory allocated but ftids_in_use is %u",
914 		    __func__, ti->ftids_in_use));
915 		ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
916 		    M_CXGBE, M_NOWAIT | M_ZERO);
917 		if (ti->ftid_tab == NULL) {
918 			rc = ENOMEM;
919 			goto done;
920 		}
921 		if (!mtx_initialized(&sc->tids.ftid_lock)) {
922 			mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
923 			cv_init(&ti->ftid_cv, "t4fcv");
924 		}
925 	}
926 done:
927 	end_synchronized_op(sc, 0);
928 	if (rc != 0)
929 		return (rc);
930 
931 	/*
932 	 * Allocate L2T entry, SMT entry, etc.
933 	 */
934 
935 	if (t->fs.newdmac || t->fs.newvlan) {
936 		/* This filter needs an L2T entry; allocate one. */
937 		l2te = t4_l2t_alloc_switching(sc, t->fs.vlan, t->fs.eport,
938 		    t->fs.dmac);
939 		if (__predict_false(l2te == NULL)) {
940 			rc = EAGAIN;
941 			goto error;
942 		}
943 	}
944 
945 	if (t->fs.newsmac) {
946 		/* This filter needs an SMT entry; allocate one. */
947 		smt = t4_smt_alloc_switching(sc->smt, t->fs.smac);
948 		if (__predict_false(smt == NULL)) {
949 			rc = EAGAIN;
950 			goto error;
951 		}
952 		rc = t4_smt_set_switching(sc, smt, 0x0, t->fs.smac);
953 		if (rc)
954 			goto error;
955 	}
956 
957 	if (t->fs.hash)
958 		rc = set_hashfilter(sc, t, ftuple, l2te, smt);
959 	else
960 		rc = set_tcamfilter(sc, t, l2te, smt);
961 
962 	if (rc != 0 && rc != EINPROGRESS) {
963 error:
964 		if (l2te)
965 			t4_l2t_release(l2te);
966 		if (smt)
967 			t4_smt_release(smt);
968 	}
969 	return (rc);
970 }
971 
972 static int
973 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
974 {
975 	struct filter_entry *f;
976 	struct fw_filter_wr *fwr;
977 	struct wrq_cookie cookie;
978 	int rc, nfilters;
979 #ifdef INVARIANTS
980 	u_int tid_base;
981 #endif
982 
983 	mtx_lock(&sc->tids.ftid_lock);
984 	if (separate_hpfilter_region(sc) && t->fs.prio) {
985 		nfilters = sc->tids.nhpftids;
986 		f = sc->tids.hpftid_tab;
987 #ifdef INVARIANTS
988 		tid_base = sc->tids.hpftid_base;
989 #endif
990 	} else {
991 		nfilters = sc->tids.nftids;
992 		f = sc->tids.ftid_tab;
993 #ifdef INVARIANTS
994 		tid_base = sc->tids.ftid_base;
995 #endif
996 	}
997 	MPASS(f != NULL);	/* Caller checked this. */
998 	if (t->idx >= nfilters) {
999 		rc = EINVAL;
1000 		goto done;
1001 	}
1002 	f += t->idx;
1003 
1004 	if (f->locked) {
1005 		rc = EPERM;
1006 		goto done;
1007 	}
1008 	if (f->pending) {
1009 		rc = EBUSY;
1010 		goto done;
1011 	}
1012 	if (f->valid == 0) {
1013 		rc = EINVAL;
1014 		goto done;
1015 	}
1016 	MPASS(f->tid == tid_base + t->idx);
1017 	fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie);
1018 	if (fwr == NULL) {
1019 		rc = ENOMEM;
1020 		goto done;
1021 	}
1022 
1023 	bzero(fwr, sizeof (*fwr));
1024 	t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
1025 	f->pending = 1;
1026 	commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
1027 	t->fs = f->fs;	/* extra info for the caller */
1028 
1029 	for (;;) {
1030 		if (f->pending == 0) {
1031 			rc = f->valid ? EIO : 0;
1032 			break;
1033 		}
1034 		if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
1035 			rc = EINPROGRESS;
1036 			break;
1037 		}
1038 	}
1039 done:
1040 	mtx_unlock(&sc->tids.ftid_lock);
1041 	return (rc);
1042 }
1043 
1044 int
1045 del_filter(struct adapter *sc, struct t4_filter *t)
1046 {
1047 
1048 	/* No filters possible if not initialized yet. */
1049 	if (!(sc->flags & FULL_INIT_DONE))
1050 		return (EINVAL);
1051 
1052 	/*
1053 	 * The checks for tid tables ensure that the locks that del_* will reach
1054 	 * for are initialized.
1055 	 */
1056 	if (t->fs.hash) {
1057 		if (sc->tids.hftid_hash_4t != NULL)
1058 			return (del_hashfilter(sc, t));
1059 	} else if (separate_hpfilter_region(sc) && t->fs.prio) {
1060 		if (sc->tids.hpftid_tab != NULL)
1061 			return (del_tcamfilter(sc, t));
1062 	} else {
1063 		if (sc->tids.ftid_tab != NULL)
1064 			return (del_tcamfilter(sc, t));
1065 	}
1066 
1067 	return (EINVAL);
1068 }
1069 
1070 /*
1071  * Release secondary resources associated with the filter.
1072  */
1073 static void
1074 free_filter_resources(struct filter_entry *f)
1075 {
1076 
1077 	if (f->l2te) {
1078 		t4_l2t_release(f->l2te);
1079 		f->l2te = NULL;
1080 	}
1081 	if (f->smt) {
1082 		t4_smt_release(f->smt);
1083 		f->smt = NULL;
1084 	}
1085 }
1086 
1087 static int
1088 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
1089     uint64_t val, int no_reply)
1090 {
1091 	struct wrq_cookie cookie;
1092 	struct cpl_set_tcb_field *req;
1093 
1094 	req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie);
1095 	if (req == NULL)
1096 		return (ENOMEM);
1097 	bzero(req, sizeof(*req));
1098 	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
1099 	if (no_reply == 0) {
1100 		req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
1101 		    V_NO_REPLY(0));
1102 	} else
1103 		req->reply_ctrl = htobe16(V_NO_REPLY(1));
1104 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
1105 	req->mask = htobe64(mask);
1106 	req->val = htobe64(val);
1107 	commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie);
1108 
1109 	return (0);
1110 }
1111 
1112 /* Set one of the t_flags bits in the TCB. */
1113 static inline int
1114 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val,
1115     u_int no_reply)
1116 {
1117 
1118 	return (set_tcb_field(sc, tid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
1119 	    (uint64_t)val << bit_pos, no_reply));
1120 }
1121 
1122 int
1123 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1124 {
1125 	struct adapter *sc = iq->adapter;
1126 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1127 	u_int tid = GET_TID(rpl);
1128 	u_int rc, idx;
1129 	struct filter_entry *f;
1130 
1131 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
1132 	    rss->opcode));
1133 
1134 
1135 	if (is_hpftid(sc, tid)) {
1136 		idx = tid - sc->tids.hpftid_base;
1137 		f = &sc->tids.hpftid_tab[idx];
1138 	} else if (is_ftid(sc, tid)) {
1139 		idx = tid - sc->tids.ftid_base;
1140 		f = &sc->tids.ftid_tab[idx];
1141 	} else
1142 		panic("%s: FW reply for invalid TID %d.", __func__, tid);
1143 
1144 	MPASS(f->tid == tid);
1145 	rc = G_COOKIE(rpl->cookie);
1146 
1147 	mtx_lock(&sc->tids.ftid_lock);
1148 	KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
1149 	    __func__, rc, tid));
1150 	switch(rc) {
1151 	case FW_FILTER_WR_FLT_ADDED:
1152 		/* set-filter succeeded */
1153 		f->valid = 1;
1154 		if (f->fs.newsmac) {
1155 			MPASS(f->smt != NULL);
1156 			set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1157 			set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1158 			    V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1159 			    V_TCB_SMAC_SEL(f->smt->idx), 1);
1160 			/* XXX: wait for reply to TCB update before !pending */
1161 		}
1162 		break;
1163 	case FW_FILTER_WR_FLT_DELETED:
1164 		/* del-filter succeeded */
1165 		MPASS(f->valid == 1);
1166 		f->valid = 0;
1167 		/* Fall through */
1168 	case FW_FILTER_WR_SMT_TBL_FULL:
1169 		/* set-filter failed due to lack of SMT space. */
1170 		MPASS(f->valid == 0);
1171 		free_filter_resources(f);
1172 		if (separate_hpfilter_region(sc) && f->fs.prio)
1173 			sc->tids.hpftids_in_use--;
1174 		else
1175 			sc->tids.ftids_in_use--;
1176 		break;
1177 	case FW_FILTER_WR_SUCCESS:
1178 	case FW_FILTER_WR_EINVAL:
1179 	default:
1180 		panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
1181 		    idx);
1182 	}
1183 	f->pending = 0;
1184 	cv_broadcast(&sc->tids.ftid_cv);
1185 	mtx_unlock(&sc->tids.ftid_lock);
1186 
1187 	return (0);
1188 }
1189 
1190 /*
1191  * This is the reply to the Active Open that created the filter.  Additional TCB
1192  * updates may be required to complete the filter configuration.
1193  */
1194 int
1195 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
1196     struct mbuf *m)
1197 {
1198 	struct adapter *sc = iq->adapter;
1199 	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
1200 	u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
1201 	u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
1202 	struct filter_entry *f = lookup_atid(sc, atid);
1203 
1204 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1205 
1206 	mtx_lock(&sc->tids.hftid_lock);
1207 	KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
1208 	KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
1209 	    __func__, f, f->tid));
1210 	if (status == CPL_ERR_NONE) {
1211 		f->tid = GET_TID(cpl);
1212 		MPASS(lookup_hftid(sc, f->tid) == NULL);
1213 		insert_hftid(sc, f);
1214 		/*
1215 		 * Leave the filter pending until it is fully set up, which will
1216 		 * be indicated by the reply to the last TCB update.  No need to
1217 		 * unblock the ioctl thread either.
1218 		 */
1219 		if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
1220 			goto done;
1221 		f->valid = 1;
1222 		f->pending = 0;
1223 	} else {
1224 		/* provide errno instead of tid to ioctl */
1225 		f->tid = act_open_rpl_status_to_errno(status);
1226 		f->valid = 0;
1227 		f->pending = 0;
1228 		if (act_open_has_tid(status))
1229 			release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]);
1230 		free_filter_resources(f);
1231 		remove_hf(sc, f);
1232 		if (f->locked == 0)
1233 			free(f, M_CXGBE);
1234 	}
1235 	cv_broadcast(&sc->tids.hftid_cv);
1236 done:
1237 	mtx_unlock(&sc->tids.hftid_lock);
1238 
1239 	free_atid(sc, atid);
1240 	return (0);
1241 }
1242 
1243 int
1244 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
1245     struct mbuf *m)
1246 {
1247 	struct adapter *sc = iq->adapter;
1248 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1249 	u_int tid = GET_TID(rpl);
1250 	struct filter_entry *f;
1251 
1252 	mtx_lock(&sc->tids.hftid_lock);
1253 	f = lookup_hftid(sc, tid);
1254 	KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1255 	KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1256 	    f, tid));
1257 	KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
1258 	    __func__, f, tid));
1259 	f->pending = 0;
1260 	if (rpl->status == 0) {
1261 		f->valid = 1;
1262 	} else {
1263 		f->tid = EIO;
1264 		f->valid = 0;
1265 		free_filter_resources(f);
1266 		remove_hftid(sc, f);
1267 		remove_hf(sc, f);
1268 		release_tid(sc, tid, &sc->sge.ctrlq[0]);
1269 		if (f->locked == 0)
1270 			free(f, M_CXGBE);
1271 	}
1272 	cv_broadcast(&sc->tids.hftid_cv);
1273 	mtx_unlock(&sc->tids.hftid_lock);
1274 
1275 	return (0);
1276 }
1277 
1278 int
1279 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
1280     struct mbuf *m)
1281 {
1282 	struct adapter *sc = iq->adapter;
1283 	const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1284 	unsigned int tid = GET_TID(cpl);
1285 	struct filter_entry *f;
1286 
1287 	mtx_lock(&sc->tids.hftid_lock);
1288 	f = lookup_hftid(sc, tid);
1289 	KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1290 	KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1291 	    f, tid));
1292 	KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
1293 	    tid));
1294 	f->pending = 0;
1295 	if (cpl->status == 0) {
1296 		f->valid = 0;
1297 		free_filter_resources(f);
1298 		remove_hftid(sc, f);
1299 		remove_hf(sc, f);
1300 		release_tid(sc, tid, &sc->sge.ctrlq[0]);
1301 		if (f->locked == 0)
1302 			free(f, M_CXGBE);
1303 	}
1304 	cv_broadcast(&sc->tids.hftid_cv);
1305 	mtx_unlock(&sc->tids.hftid_lock);
1306 
1307 	return (0);
1308 }
1309 
1310 static int
1311 get_tcamfilter(struct adapter *sc, struct t4_filter *t)
1312 {
1313 	int i, nfilters;
1314 	struct filter_entry *f;
1315 	u_int in_use;
1316 #ifdef INVARIANTS
1317 	u_int tid_base;
1318 #endif
1319 
1320 	MPASS(!t->fs.hash);
1321 
1322 	if (separate_hpfilter_region(sc) && t->fs.prio) {
1323 		nfilters = sc->tids.nhpftids;
1324 		f = sc->tids.hpftid_tab;
1325 		in_use = sc->tids.hpftids_in_use;
1326 #ifdef INVARIANTS
1327 		tid_base = sc->tids.hpftid_base;
1328 #endif
1329 	} else {
1330 		nfilters = sc->tids.nftids;
1331 		f = sc->tids.ftid_tab;
1332 		in_use = sc->tids.ftids_in_use;
1333 #ifdef INVARIANTS
1334 		tid_base = sc->tids.ftid_base;
1335 #endif
1336 	}
1337 
1338 	if (in_use == 0 || f == NULL || t->idx >= nfilters) {
1339 		t->idx = 0xffffffff;
1340 		return (0);
1341 	}
1342 
1343 	f += t->idx;
1344 	mtx_lock(&sc->tids.ftid_lock);
1345 	for (i = t->idx; i < nfilters; i++, f++) {
1346 		if (f->valid) {
1347 			MPASS(f->tid == tid_base + i);
1348 			t->idx = i;
1349 			t->l2tidx = f->l2te ? f->l2te->idx : 0;
1350 			t->smtidx = f->smt ? f->smt->idx : 0;
1351 			if (f->fs.hitcnts)
1352 				t->hits = get_filter_hits(sc, f->tid);
1353 			else
1354 				t->hits = UINT64_MAX;
1355 			t->fs = f->fs;
1356 
1357 			goto done;
1358 		}
1359 	}
1360 	t->idx = 0xffffffff;
1361 done:
1362 	mtx_unlock(&sc->tids.ftid_lock);
1363 	return (0);
1364 }
1365 
1366 static int
1367 get_hashfilter(struct adapter *sc, struct t4_filter *t)
1368 {
1369 	struct tid_info *ti = &sc->tids;
1370 	int tid;
1371 	struct filter_entry *f;
1372 	const int inv_tid = ti->ntids + ti->tid_base;
1373 
1374 	MPASS(t->fs.hash);
1375 
1376 	if (ti->tids_in_use == 0 || ti->hftid_hash_tid == NULL ||
1377 	    t->idx >= inv_tid) {
1378 		t->idx = 0xffffffff;
1379 		return (0);
1380 	}
1381 	if (t->idx < ti->tid_base)
1382 		t->idx = ti->tid_base;
1383 
1384 	mtx_lock(&ti->hftid_lock);
1385 	for (tid = t->idx; tid < inv_tid; tid++) {
1386 		f = lookup_hftid(sc, tid);
1387 		if (f != NULL && f->valid) {
1388 			t->idx = tid;
1389 			t->l2tidx = f->l2te ? f->l2te->idx : 0;
1390 			t->smtidx = f->smt ? f->smt->idx : 0;
1391 			if (f->fs.hitcnts)
1392 				t->hits = get_filter_hits(sc, tid);
1393 			else
1394 				t->hits = UINT64_MAX;
1395 			t->fs = f->fs;
1396 
1397 			goto done;
1398 		}
1399 	}
1400 	t->idx = 0xffffffff;
1401 done:
1402 	mtx_unlock(&ti->hftid_lock);
1403 	return (0);
1404 }
1405 
1406 static void
1407 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
1408     uint64_t ftuple, struct cpl_act_open_req6 *cpl)
1409 {
1410 	struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
1411 	struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
1412 
1413 	/* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1414 	MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1415 	MPASS(atid >= 0);
1416 
1417 	if (chip_id(sc) == CHELSIO_T5) {
1418 		INIT_TP_WR(cpl5, 0);
1419 	} else {
1420 		INIT_TP_WR(cpl6, 0);
1421 		cpl6->rsvd2 = 0;
1422 		cpl6->opt3 = 0;
1423 	}
1424 
1425 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1426 	    V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1427 	    V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1428 	cpl->local_port = htobe16(f->fs.val.dport);
1429 	cpl->peer_port = htobe16(f->fs.val.sport);
1430 	cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1431 	cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1432 	cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1433 	cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1434 	cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1435 	    f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1436 	    V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1437 	    V_NO_CONG(f->fs.rpttid) |
1438 	    V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1439 	    F_TCAM_BYPASS | F_NON_OFFLOAD);
1440 
1441 	cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1442 	cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1443 	    V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1444 	    V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1445 	    F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1446 	    V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1447 	    V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1448 }
1449 
1450 static void
1451 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1452     uint64_t ftuple, struct cpl_act_open_req *cpl)
1453 {
1454 	struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1455 	struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1456 
1457 	/* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1458 	MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1459 	MPASS(atid >= 0);
1460 
1461 	if (chip_id(sc) == CHELSIO_T5) {
1462 		INIT_TP_WR(cpl5, 0);
1463 	} else {
1464 		INIT_TP_WR(cpl6, 0);
1465 		cpl6->rsvd2 = 0;
1466 		cpl6->opt3 = 0;
1467 	}
1468 
1469 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1470 	    V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1471 	    V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1472 	cpl->local_port = htobe16(f->fs.val.dport);
1473 	cpl->peer_port = htobe16(f->fs.val.sport);
1474 	cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1475 	    f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1476 	cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1477 		f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1478 	cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1479 	    f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1480 	    V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1481 	    V_NO_CONG(f->fs.rpttid) |
1482 	    V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1483 	    F_TCAM_BYPASS | F_NON_OFFLOAD);
1484 
1485 	cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1486 	cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1487 	    V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1488 	    V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1489 	    F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1490 	    V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1491 	    V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1492 }
1493 
1494 static int
1495 act_open_cpl_len16(struct adapter *sc, int isipv6)
1496 {
1497 	int idx;
1498 	static const int sz_table[3][2] = {
1499 		{
1500 			howmany(sizeof (struct cpl_act_open_req), 16),
1501 			howmany(sizeof (struct cpl_act_open_req6), 16)
1502 		},
1503 		{
1504 			howmany(sizeof (struct cpl_t5_act_open_req), 16),
1505 			howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1506 		},
1507 		{
1508 			howmany(sizeof (struct cpl_t6_act_open_req), 16),
1509 			howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1510 		},
1511 	};
1512 
1513 	MPASS(chip_id(sc) >= CHELSIO_T4);
1514 	idx = min(chip_id(sc) - CHELSIO_T4, 2);
1515 
1516 	return (sz_table[idx][!!isipv6]);
1517 }
1518 
1519 static int
1520 set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
1521     struct l2t_entry *l2te, struct smt_entry *smt)
1522 {
1523 	void *wr;
1524 	struct wrq_cookie cookie;
1525 	struct filter_entry *f;
1526 	int rc, atid = -1;
1527 	uint32_t hash;
1528 
1529 	MPASS(t->fs.hash);
1530 	/* Already validated against fconf, iconf */
1531 	MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1532 	MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1533 
1534 	hash = hf_hashfn_4t(&t->fs);
1535 
1536 	mtx_lock(&sc->tids.hftid_lock);
1537 	if (lookup_hf(sc, &t->fs, hash) != NULL) {
1538 		rc = EEXIST;
1539 		goto done;
1540 	}
1541 
1542 	f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1543 	if (__predict_false(f == NULL)) {
1544 		rc = ENOMEM;
1545 		goto done;
1546 	}
1547 	f->fs = t->fs;
1548 	f->l2te = l2te;
1549 	f->smt = smt;
1550 
1551 	atid = alloc_atid(sc, f);
1552 	if (__predict_false(atid) == -1) {
1553 		free(f, M_CXGBE);
1554 		rc = EAGAIN;
1555 		goto done;
1556 	}
1557 	MPASS(atid >= 0);
1558 
1559 	wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type),
1560 	    &cookie);
1561 	if (wr == NULL) {
1562 		free_atid(sc, atid);
1563 		free(f, M_CXGBE);
1564 		rc = ENOMEM;
1565 		goto done;
1566 	}
1567 	if (f->fs.type)
1568 		mk_act_open_req6(sc, f, atid, ftuple, wr);
1569 	else
1570 		mk_act_open_req(sc, f, atid, ftuple, wr);
1571 
1572 	f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1573 	f->pending = 1;
1574 	f->tid = -1;
1575 	insert_hf(sc, f, hash);
1576 	commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1577 
1578 	for (;;) {
1579 		MPASS(f->locked);
1580 		if (f->pending == 0) {
1581 			if (f->valid) {
1582 				rc = 0;
1583 				f->locked = 0;
1584 				t->idx = f->tid;
1585 			} else {
1586 				rc = f->tid;
1587 				free(f, M_CXGBE);
1588 			}
1589 			break;
1590 		}
1591 		if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1592 			f->locked = 0;
1593 			rc = EINPROGRESS;
1594 			break;
1595 		}
1596 	}
1597 done:
1598 	mtx_unlock(&sc->tids.hftid_lock);
1599 	return (rc);
1600 }
1601 
1602 /* SET_TCB_FIELD sent as a ULP command looks like this */
1603 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1604     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1605 
1606 static void *
1607 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1608 		uint64_t val, uint32_t tid, uint32_t qid)
1609 {
1610 	struct ulptx_idata *ulpsc;
1611 	struct cpl_set_tcb_field_core *req;
1612 
1613 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1614 	ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1615 
1616 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1617 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1618 	ulpsc->len = htobe32(sizeof(*req));
1619 
1620 	req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1621 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1622 	req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1623 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1624 	req->mask = htobe64(mask);
1625 	req->val = htobe64(val);
1626 
1627 	ulpsc = (struct ulptx_idata *)(req + 1);
1628 	if (LEN__SET_TCB_FIELD_ULP % 16) {
1629 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1630 		ulpsc->len = htobe32(0);
1631 		return (ulpsc + 1);
1632 	}
1633 	return (ulpsc);
1634 }
1635 
1636 /* ABORT_REQ sent as a ULP command looks like this */
1637 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1638 	sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1639 
1640 static void *
1641 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1642 {
1643 	struct ulptx_idata *ulpsc;
1644 	struct cpl_abort_req_core *req;
1645 
1646 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1647 	ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1648 
1649 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1650 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1651 	ulpsc->len = htobe32(sizeof(*req));
1652 
1653 	req = (struct cpl_abort_req_core *)(ulpsc + 1);
1654 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1655 	req->rsvd0 = htonl(0);
1656 	req->rsvd1 = 0;
1657 	req->cmd = CPL_ABORT_NO_RST;
1658 
1659 	ulpsc = (struct ulptx_idata *)(req + 1);
1660 	if (LEN__ABORT_REQ_ULP % 16) {
1661 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1662 		ulpsc->len = htobe32(0);
1663 		return (ulpsc + 1);
1664 	}
1665 	return (ulpsc);
1666 }
1667 
1668 /* ABORT_RPL sent as a ULP command looks like this */
1669 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1670 	sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1671 
1672 static void *
1673 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1674 {
1675 	struct ulptx_idata *ulpsc;
1676 	struct cpl_abort_rpl_core *rpl;
1677 
1678 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1679 	ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1680 
1681 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1682 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1683 	ulpsc->len = htobe32(sizeof(*rpl));
1684 
1685 	rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1686 	OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1687 	rpl->rsvd0 = htonl(0);
1688 	rpl->rsvd1 = 0;
1689 	rpl->cmd = CPL_ABORT_NO_RST;
1690 
1691 	ulpsc = (struct ulptx_idata *)(rpl + 1);
1692 	if (LEN__ABORT_RPL_ULP % 16) {
1693 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1694 		ulpsc->len = htobe32(0);
1695 		return (ulpsc + 1);
1696 	}
1697 	return (ulpsc);
1698 }
1699 
1700 static inline int
1701 del_hashfilter_wrlen(void)
1702 {
1703 
1704 	return (sizeof(struct work_request_hdr) +
1705 	    roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1706 	    roundup2(LEN__ABORT_REQ_ULP, 16) +
1707 	    roundup2(LEN__ABORT_RPL_ULP, 16));
1708 }
1709 
1710 static void
1711 mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1712 {
1713 	struct ulp_txpkt *ulpmc;
1714 
1715 	INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1716 	ulpmc = (struct ulp_txpkt *)(wrh + 1);
1717 	ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1718 	    V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
1719 	ulpmc = mk_abort_req_ulp(ulpmc, tid);
1720 	ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1721 }
1722 
1723 static int
1724 del_hashfilter(struct adapter *sc, struct t4_filter *t)
1725 {
1726 	struct tid_info *ti = &sc->tids;
1727 	void *wr;
1728 	struct filter_entry *f;
1729 	struct wrq_cookie cookie;
1730 	int rc;
1731 	const int wrlen = del_hashfilter_wrlen();
1732 	const int inv_tid = ti->ntids + ti->tid_base;
1733 
1734 	MPASS(sc->tids.hftid_hash_4t != NULL);
1735 	MPASS(sc->tids.ntids > 0);
1736 
1737 	if (t->idx < sc->tids.tid_base || t->idx >= inv_tid)
1738 		return (EINVAL);
1739 
1740 	mtx_lock(&ti->hftid_lock);
1741 	f = lookup_hftid(sc, t->idx);
1742 	if (f == NULL || f->valid == 0) {
1743 		rc = EINVAL;
1744 		goto done;
1745 	}
1746 	MPASS(f->tid == t->idx);
1747 	if (f->locked) {
1748 		rc = EPERM;
1749 		goto done;
1750 	}
1751 	if (f->pending) {
1752 		rc = EBUSY;
1753 		goto done;
1754 	}
1755 	wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie);
1756 	if (wr == NULL) {
1757 		rc = ENOMEM;
1758 		goto done;
1759 	}
1760 
1761 	mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1762 	f->locked = 1;
1763 	f->pending = 1;
1764 	commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1765 	t->fs = f->fs;	/* extra info for the caller */
1766 
1767 	for (;;) {
1768 		MPASS(f->locked);
1769 		if (f->pending == 0) {
1770 			if (f->valid) {
1771 				f->locked = 0;
1772 				rc = EIO;
1773 			} else {
1774 				rc = 0;
1775 				free(f, M_CXGBE);
1776 			}
1777 			break;
1778 		}
1779 		if (cv_wait_sig(&ti->hftid_cv, &ti->hftid_lock) != 0) {
1780 			f->locked = 0;
1781 			rc = EINPROGRESS;
1782 			break;
1783 		}
1784 	}
1785 done:
1786 	mtx_unlock(&ti->hftid_lock);
1787 	return (rc);
1788 }
1789 
1790 #define WORD_MASK       0xffffffff
1791 static void
1792 set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip,
1793     const bool sip, const bool dp, const bool sp)
1794 {
1795 
1796 	if (dip) {
1797 		if (f->fs.type) {
1798 			set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK,
1799 			    f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 |
1800 			    f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1);
1801 
1802 			set_tcb_field(sc, f->tid,
1803 			    W_TCB_SND_UNA_RAW + 1, WORD_MASK,
1804 			    f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 |
1805 			    f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1);
1806 
1807 			set_tcb_field(sc, f->tid,
1808 			    W_TCB_SND_UNA_RAW + 2, WORD_MASK,
1809 			    f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 |
1810 			    f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1);
1811 
1812 			set_tcb_field(sc, f->tid,
1813 			    W_TCB_SND_UNA_RAW + 3, WORD_MASK,
1814 			    f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1815 			    f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1816 		} else {
1817 			set_tcb_field(sc, f->tid,
1818 			    W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK,
1819 			    f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1820 			    f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1821 		}
1822 	}
1823 
1824 	if (sip) {
1825 		if (f->fs.type) {
1826 			set_tcb_field(sc, f->tid,
1827 			    W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK,
1828 			    f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 |
1829 			    f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1);
1830 
1831 			set_tcb_field(sc, f->tid,
1832 			    W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK,
1833 			    f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 |
1834 			    f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1);
1835 
1836 			set_tcb_field(sc, f->tid,
1837 			    W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK,
1838 			    f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 |
1839 			    f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1);
1840 
1841 			set_tcb_field(sc, f->tid,
1842 			    W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK,
1843 			    f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1844 			    f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1845 
1846 		} else {
1847 			set_tcb_field(sc, f->tid,
1848 			    W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK,
1849 			    f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1850 			    f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1851 		}
1852 	}
1853 
1854 	set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK,
1855 	    (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1);
1856 }
1857 
1858 /*
1859  * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1860  * last of the series of updates requested a reply.  The reply informs the
1861  * driver that the filter is fully setup.
1862  */
1863 static int
1864 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
1865 {
1866 	int updated = 0;
1867 
1868 	MPASS(f->tid < sc->tids.ntids);
1869 	MPASS(f->fs.hash);
1870 	MPASS(f->pending);
1871 	MPASS(f->valid == 0);
1872 
1873 	if (f->fs.newdmac) {
1874 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1);
1875 		updated++;
1876 	}
1877 
1878 	if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1879 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1);
1880 		updated++;
1881 	}
1882 
1883 	if (f->fs.newsmac) {
1884 		MPASS(f->smt != NULL);
1885 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1886 		set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1887 		    V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smt->idx),
1888 		    1);
1889 		updated++;
1890 	}
1891 
1892 	switch(f->fs.nat_mode) {
1893 	case NAT_MODE_NONE:
1894 		break;
1895 	case NAT_MODE_DIP:
1896 		set_nat_params(sc, f, true, false, false, false);
1897 		updated++;
1898 		break;
1899 	case NAT_MODE_DIP_DP:
1900 		set_nat_params(sc, f, true, false, true, false);
1901 		updated++;
1902 		break;
1903 	case NAT_MODE_DIP_DP_SIP:
1904 		set_nat_params(sc, f, true, true, true, false);
1905 		updated++;
1906 		break;
1907 	case NAT_MODE_DIP_DP_SP:
1908 		set_nat_params(sc, f, true, false, true, true);
1909 		updated++;
1910 		break;
1911 	case NAT_MODE_SIP_SP:
1912 		set_nat_params(sc, f, false, true, false, true);
1913 		updated++;
1914 		break;
1915 	case NAT_MODE_DIP_SIP_SP:
1916 		set_nat_params(sc, f, true, true, false, true);
1917 		updated++;
1918 		break;
1919 	case NAT_MODE_ALL:
1920 		set_nat_params(sc, f, true, true, true, true);
1921 		updated++;
1922 		break;
1923 	default:
1924 		MPASS(0);	/* should have been validated earlier */
1925 		break;
1926 
1927 	}
1928 
1929 	if (f->fs.nat_seq_chk) {
1930 		set_tcb_field(sc, f->tid, W_TCB_RCV_NXT,
1931 		    V_TCB_RCV_NXT(M_TCB_RCV_NXT),
1932 		    V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1);
1933 		updated++;
1934 	}
1935 
1936 	if (is_t5(sc) && f->fs.action == FILTER_DROP) {
1937 		/*
1938 		 * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop.
1939 		 */
1940 		set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) |
1941 		    V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1);
1942 		updated++;
1943 	}
1944 
1945 	/*
1946 	 * Enable switching after all secondary resources (L2T entry, SMT entry,
1947 	 * etc.) are setup so that any switched packet will use correct
1948 	 * values.
1949 	 */
1950 	if (f->fs.action == FILTER_SWITCH) {
1951 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1);
1952 		updated++;
1953 	}
1954 
1955 	if (f->fs.hitcnts || updated > 0) {
1956 		set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
1957 		    V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1958 		    V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
1959 		    V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
1960 		return (EINPROGRESS);
1961 	}
1962 
1963 	return (0);
1964 }
1965