xref: /freebsd/sys/dev/cxgbe/t4_filter.c (revision 2a01feab)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/eventhandler.h>
36 #include <sys/fnv_hash.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/bus.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/rwlock.h>
44 #include <sys/socket.h>
45 #include <sys/sbuf.h>
46 #include <netinet/in.h>
47 
48 #include "common/common.h"
49 #include "common/t4_msg.h"
50 #include "common/t4_regs.h"
51 #include "common/t4_regs_values.h"
52 #include "common/t4_tcb.h"
53 #include "t4_l2t.h"
54 #include "t4_smt.h"
55 
56 struct filter_entry {
57 	LIST_ENTRY(filter_entry) link_4t;
58 	LIST_ENTRY(filter_entry) link_tid;
59 
60 	uint32_t valid:1;	/* filter allocated and valid */
61 	uint32_t locked:1;	/* filter is administratively locked or busy */
62 	uint32_t pending:1;	/* filter action is pending firmware reply */
63 	int tid;		/* tid of the filter TCB */
64 	struct l2t_entry *l2te;	/* L2 table entry for DMAC rewrite */
65 	struct smt_entry *smt;	/* SMT entry for SMAC rewrite */
66 
67 	struct t4_filter_specification fs;
68 };
69 
70 static void free_filter_resources(struct filter_entry *);
71 static int get_tcamfilter(struct adapter *, struct t4_filter *);
72 static int get_hashfilter(struct adapter *, struct t4_filter *);
73 static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t,
74     struct l2t_entry *, struct smt_entry *);
75 static int del_hashfilter(struct adapter *, struct t4_filter *);
76 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
77 
78 static inline bool
79 separate_hpfilter_region(struct adapter *sc)
80 {
81 
82 	return (chip_id(sc) >= CHELSIO_T6);
83 }
84 
85 static inline uint32_t
86 hf_hashfn_4t(struct t4_filter_specification *fs)
87 {
88 	struct t4_filter_tuple *ft = &fs->val;
89 	uint32_t hash;
90 
91 	if (fs->type) {
92 		/* IPv6 */
93 		hash = fnv_32_buf(&ft->sip[0], 16, FNV1_32_INIT);
94 		hash = fnv_32_buf(&ft->dip[0], 16, hash);
95 	} else {
96 		hash = fnv_32_buf(&ft->sip[0], 4, FNV1_32_INIT);
97 		hash = fnv_32_buf(&ft->dip[0], 4, hash);
98 	}
99 	hash = fnv_32_buf(&ft->sport, sizeof(ft->sport), hash);
100 	hash = fnv_32_buf(&ft->dport, sizeof(ft->dport), hash);
101 
102 	return (hash);
103 }
104 
105 static inline uint32_t
106 hf_hashfn_tid(int tid)
107 {
108 
109 	return (fnv_32_buf(&tid, sizeof(tid), FNV1_32_INIT));
110 }
111 
112 static int
113 alloc_hftid_hash(struct tid_info *t, int flags)
114 {
115 	int n;
116 
117 	MPASS(t->ntids > 0);
118 	MPASS(t->hftid_hash_4t == NULL);
119 	MPASS(t->hftid_hash_tid == NULL);
120 
121 	n = max(t->ntids / 1024, 16);
122 	t->hftid_hash_4t = hashinit_flags(n, M_CXGBE, &t->hftid_4t_mask, flags);
123 	if (t->hftid_hash_4t == NULL)
124 		return (ENOMEM);
125 	t->hftid_hash_tid = hashinit_flags(n, M_CXGBE, &t->hftid_tid_mask,
126 	    flags);
127 	if (t->hftid_hash_tid == NULL) {
128 		hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
129 		t->hftid_hash_4t = NULL;
130 		return (ENOMEM);
131 	}
132 
133 	mtx_init(&t->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
134 	cv_init(&t->hftid_cv, "t4hfcv");
135 
136 	return (0);
137 }
138 
139 void
140 free_hftid_hash(struct tid_info *t)
141 {
142 	struct filter_entry *f, *ftmp;
143 	LIST_HEAD(, filter_entry) *head;
144 	int i;
145 #ifdef INVARIANTS
146 	int n = 0;
147 #endif
148 
149 	if (t->tids_in_use > 0) {
150 		/* Remove everything from the tid hash. */
151 		head = t->hftid_hash_tid;
152 		for (i = 0; i <= t->hftid_tid_mask; i++) {
153 			LIST_FOREACH_SAFE(f, &head[i], link_tid, ftmp) {
154 				LIST_REMOVE(f, link_tid);
155 			}
156 		}
157 
158 		/* Remove and then free each filter in the 4t hash. */
159 		head = t->hftid_hash_4t;
160 		for (i = 0; i <= t->hftid_4t_mask; i++) {
161 			LIST_FOREACH_SAFE(f, &head[i], link_4t, ftmp) {
162 #ifdef INVARIANTS
163 				n += f->fs.type ? 2 : 1;
164 #endif
165 				LIST_REMOVE(f, link_4t);
166 				free(f, M_CXGBE);
167 			}
168 		}
169 		MPASS(t->tids_in_use == n);
170 		t->tids_in_use = 0;
171 	}
172 
173 	if (t->hftid_hash_4t) {
174 		hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
175 		t->hftid_hash_4t = NULL;
176 	}
177 	if (t->hftid_hash_tid) {
178 		hashdestroy(t->hftid_hash_tid, M_CXGBE, t->hftid_tid_mask);
179 		t->hftid_hash_tid = NULL;
180 	}
181 	if (mtx_initialized(&t->hftid_lock)) {
182 		mtx_destroy(&t->hftid_lock);
183 		cv_destroy(&t->hftid_cv);
184 	}
185 }
186 
187 static void
188 insert_hf(struct adapter *sc, struct filter_entry *f, uint32_t hash)
189 {
190 	struct tid_info *t = &sc->tids;
191 	LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
192 
193 	MPASS(head != NULL);
194 	if (hash == 0)
195 		hash = hf_hashfn_4t(&f->fs);
196 	LIST_INSERT_HEAD(&head[hash & t->hftid_4t_mask], f, link_4t);
197 	atomic_add_int(&t->tids_in_use, f->fs.type ? 2 : 1);
198 }
199 
200 static void
201 insert_hftid(struct adapter *sc, struct filter_entry *f)
202 {
203 	struct tid_info *t = &sc->tids;
204 	LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
205 	uint32_t hash;
206 
207 	MPASS(f->tid >= t->tid_base);
208 	MPASS(f->tid - t->tid_base < t->ntids);
209 	mtx_assert(&t->hftid_lock, MA_OWNED);
210 
211 	hash = hf_hashfn_tid(f->tid);
212 	LIST_INSERT_HEAD(&head[hash & t->hftid_tid_mask], f, link_tid);
213 }
214 
215 static bool
216 filter_eq(struct t4_filter_specification *fs1,
217     struct t4_filter_specification *fs2)
218 {
219 	int n;
220 
221 	MPASS(fs1->hash && fs2->hash);
222 
223 	if (fs1->type != fs2->type)
224 		return (false);
225 
226 	n = fs1->type ? 16 : 4;
227 	if (bcmp(&fs1->val.sip[0], &fs2->val.sip[0], n) ||
228 	    bcmp(&fs1->val.dip[0], &fs2->val.dip[0], n) ||
229 	    fs1->val.sport != fs2->val.sport ||
230 	    fs1->val.dport != fs2->val.dport)
231 		return (false);
232 
233 	/*
234 	 * We know the masks are the same because all hashfilter masks have to
235 	 * conform to the global tp->hash_filter_mask and the driver has
236 	 * verified that already.
237 	 */
238 
239 	if ((fs1->mask.pfvf_vld || fs1->mask.ovlan_vld) &&
240 	    fs1->val.vnic != fs2->val.vnic)
241 		return (false);
242 	if (fs1->mask.vlan_vld && fs1->val.vlan != fs2->val.vlan)
243 		return (false);
244 	if (fs1->mask.macidx && fs1->val.macidx != fs2->val.macidx)
245 		return (false);
246 	if (fs1->mask.frag && fs1->val.frag != fs2->val.frag)
247 		return (false);
248 	if (fs1->mask.matchtype && fs1->val.matchtype != fs2->val.matchtype)
249 		return (false);
250 	if (fs1->mask.iport && fs1->val.iport != fs2->val.iport)
251 		return (false);
252 	if (fs1->mask.fcoe && fs1->val.fcoe != fs2->val.fcoe)
253 		return (false);
254 	if (fs1->mask.proto && fs1->val.proto != fs2->val.proto)
255 		return (false);
256 	if (fs1->mask.tos && fs1->val.tos != fs2->val.tos)
257 		return (false);
258 	if (fs1->mask.ethtype && fs1->val.ethtype != fs2->val.ethtype)
259 		return (false);
260 
261 	return (true);
262 }
263 
264 static struct filter_entry *
265 lookup_hf(struct adapter *sc, struct t4_filter_specification *fs, uint32_t hash)
266 {
267 	struct tid_info *t = &sc->tids;
268 	LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
269 	struct filter_entry *f;
270 
271 	mtx_assert(&t->hftid_lock, MA_OWNED);
272 	MPASS(head != NULL);
273 
274 	if (hash == 0)
275 		hash = hf_hashfn_4t(fs);
276 
277 	LIST_FOREACH(f, &head[hash & t->hftid_4t_mask], link_4t) {
278 		if (filter_eq(&f->fs, fs))
279 			return (f);
280 	}
281 
282 	return (NULL);
283 }
284 
285 static struct filter_entry *
286 lookup_hftid(struct adapter *sc, int tid)
287 {
288 	struct tid_info *t = &sc->tids;
289 	LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
290 	struct filter_entry *f;
291 	uint32_t hash;
292 
293 	mtx_assert(&t->hftid_lock, MA_OWNED);
294 	MPASS(head != NULL);
295 
296 	hash = hf_hashfn_tid(tid);
297 	LIST_FOREACH(f, &head[hash & t->hftid_tid_mask], link_tid) {
298 		if (f->tid == tid)
299 			return (f);
300 	}
301 
302 	return (NULL);
303 }
304 
305 static void
306 remove_hf(struct adapter *sc, struct filter_entry *f)
307 {
308 	struct tid_info *t = &sc->tids;
309 
310 	mtx_assert(&t->hftid_lock, MA_OWNED);
311 
312 	LIST_REMOVE(f, link_4t);
313 	atomic_subtract_int(&t->tids_in_use, f->fs.type ? 2 : 1);
314 }
315 
316 static void
317 remove_hftid(struct adapter *sc, struct filter_entry *f)
318 {
319 #ifdef INVARIANTS
320 	struct tid_info *t = &sc->tids;
321 
322 	mtx_assert(&t->hftid_lock, MA_OWNED);
323 #endif
324 
325 	LIST_REMOVE(f, link_tid);
326 }
327 
328 static uint32_t
329 mode_to_fconf(uint32_t mode)
330 {
331 	uint32_t fconf = 0;
332 
333 	if (mode & T4_FILTER_IP_FRAGMENT)
334 		fconf |= F_FRAGMENTATION;
335 
336 	if (mode & T4_FILTER_MPS_HIT_TYPE)
337 		fconf |= F_MPSHITTYPE;
338 
339 	if (mode & T4_FILTER_MAC_IDX)
340 		fconf |= F_MACMATCH;
341 
342 	if (mode & T4_FILTER_ETH_TYPE)
343 		fconf |= F_ETHERTYPE;
344 
345 	if (mode & T4_FILTER_IP_PROTO)
346 		fconf |= F_PROTOCOL;
347 
348 	if (mode & T4_FILTER_IP_TOS)
349 		fconf |= F_TOS;
350 
351 	if (mode & T4_FILTER_VLAN)
352 		fconf |= F_VLAN;
353 
354 	if (mode & T4_FILTER_VNIC)
355 		fconf |= F_VNIC_ID;
356 
357 	if (mode & T4_FILTER_PORT)
358 		fconf |= F_PORT;
359 
360 	if (mode & T4_FILTER_FCoE)
361 		fconf |= F_FCOE;
362 
363 	return (fconf);
364 }
365 
366 static uint32_t
367 mode_to_iconf(uint32_t mode)
368 {
369 
370 	if (mode & T4_FILTER_IC_VNIC)
371 		return (F_VNIC);
372 	return (0);
373 }
374 
375 static int
376 check_fspec_against_fconf_iconf(struct adapter *sc,
377     struct t4_filter_specification *fs)
378 {
379 	struct tp_params *tpp = &sc->params.tp;
380 	uint32_t fconf = 0;
381 
382 	if (fs->val.frag || fs->mask.frag)
383 		fconf |= F_FRAGMENTATION;
384 
385 	if (fs->val.matchtype || fs->mask.matchtype)
386 		fconf |= F_MPSHITTYPE;
387 
388 	if (fs->val.macidx || fs->mask.macidx)
389 		fconf |= F_MACMATCH;
390 
391 	if (fs->val.ethtype || fs->mask.ethtype)
392 		fconf |= F_ETHERTYPE;
393 
394 	if (fs->val.proto || fs->mask.proto)
395 		fconf |= F_PROTOCOL;
396 
397 	if (fs->val.tos || fs->mask.tos)
398 		fconf |= F_TOS;
399 
400 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
401 		fconf |= F_VLAN;
402 
403 	if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
404 		fconf |= F_VNIC_ID;
405 		if (tpp->ingress_config & F_VNIC)
406 			return (EINVAL);
407 	}
408 
409 	if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
410 		fconf |= F_VNIC_ID;
411 		if ((tpp->ingress_config & F_VNIC) == 0)
412 			return (EINVAL);
413 	}
414 
415 	if (fs->val.iport || fs->mask.iport)
416 		fconf |= F_PORT;
417 
418 	if (fs->val.fcoe || fs->mask.fcoe)
419 		fconf |= F_FCOE;
420 
421 	if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
422 		return (E2BIG);
423 
424 	return (0);
425 }
426 
427 int
428 get_filter_mode(struct adapter *sc, uint32_t *mode)
429 {
430 	struct tp_params *tp = &sc->params.tp;
431 	uint64_t mask;
432 
433 	/* Non-zero incoming value in mode means "hashfilter mode". */
434 	mask = *mode ? tp->hash_filter_mask : UINT64_MAX;
435 
436 	/* Always */
437 	*mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
438 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
439 
440 #define CHECK_FIELD(fconf_bit, field_shift, field_mask, mode_bit)  do { \
441 	if (tp->vlan_pri_map & (fconf_bit)) { \
442 		MPASS(tp->field_shift >= 0); \
443 		if ((mask >> tp->field_shift & field_mask) == field_mask) \
444 		*mode |= (mode_bit); \
445 	} \
446 } while (0)
447 
448 	CHECK_FIELD(F_FRAGMENTATION, frag_shift, M_FT_FRAGMENTATION, T4_FILTER_IP_FRAGMENT);
449 	CHECK_FIELD(F_MPSHITTYPE, matchtype_shift, M_FT_MPSHITTYPE, T4_FILTER_MPS_HIT_TYPE);
450 	CHECK_FIELD(F_MACMATCH, macmatch_shift, M_FT_MACMATCH, T4_FILTER_MAC_IDX);
451 	CHECK_FIELD(F_ETHERTYPE, ethertype_shift, M_FT_ETHERTYPE, T4_FILTER_ETH_TYPE);
452 	CHECK_FIELD(F_PROTOCOL, protocol_shift, M_FT_PROTOCOL, T4_FILTER_IP_PROTO);
453 	CHECK_FIELD(F_TOS, tos_shift, M_FT_TOS, T4_FILTER_IP_TOS);
454 	CHECK_FIELD(F_VLAN, vlan_shift, M_FT_VLAN, T4_FILTER_VLAN);
455 	CHECK_FIELD(F_VNIC_ID, vnic_shift, M_FT_VNIC_ID , T4_FILTER_VNIC);
456 	if (tp->ingress_config & F_VNIC)
457 		*mode |= T4_FILTER_IC_VNIC;
458 	CHECK_FIELD(F_PORT, port_shift, M_FT_PORT , T4_FILTER_PORT);
459 	CHECK_FIELD(F_FCOE, fcoe_shift, M_FT_FCOE , T4_FILTER_FCoE);
460 #undef CHECK_FIELD
461 
462 	return (0);
463 }
464 
465 int
466 set_filter_mode(struct adapter *sc, uint32_t mode)
467 {
468 	struct tp_params *tpp = &sc->params.tp;
469 	uint32_t fconf, iconf;
470 	int rc;
471 
472 	iconf = mode_to_iconf(mode);
473 	if ((iconf ^ tpp->ingress_config) & F_VNIC) {
474 		/*
475 		 * For now we just complain if A_TP_INGRESS_CONFIG is not
476 		 * already set to the correct value for the requested filter
477 		 * mode.  It's not clear if it's safe to write to this register
478 		 * on the fly.  (And we trust the cached value of the register).
479 		 *
480 		 * check_fspec_against_fconf_iconf and other code that looks at
481 		 * tp->vlan_pri_map and tp->ingress_config needs to be reviewed
482 		 * thorougly before allowing dynamic filter mode changes.
483 		 */
484 		return (EBUSY);
485 	}
486 
487 	fconf = mode_to_fconf(mode);
488 
489 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
490 	    "t4setfm");
491 	if (rc)
492 		return (rc);
493 
494 	if (sc->tids.ftids_in_use > 0 || sc->tids.hpftids_in_use > 0) {
495 		rc = EBUSY;
496 		goto done;
497 	}
498 
499 #ifdef TCP_OFFLOAD
500 	if (uld_active(sc, ULD_TOM)) {
501 		rc = EBUSY;
502 		goto done;
503 	}
504 #endif
505 
506 	rc = -t4_set_filter_mode(sc, fconf, true);
507 done:
508 	end_synchronized_op(sc, LOCK_HELD);
509 	return (rc);
510 }
511 
512 static inline uint64_t
513 get_filter_hits(struct adapter *sc, uint32_t tid)
514 {
515 	uint32_t tcb_addr;
516 
517 	tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
518 
519 	if (is_t4(sc)) {
520 		uint64_t hits;
521 
522 		read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
523 		return (be64toh(hits));
524 	} else {
525 		uint32_t hits;
526 
527 		read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
528 		return (be32toh(hits));
529 	}
530 }
531 
532 int
533 get_filter(struct adapter *sc, struct t4_filter *t)
534 {
535 	if (t->fs.hash)
536 		return (get_hashfilter(sc, t));
537 	else
538 		return (get_tcamfilter(sc, t));
539 }
540 
541 static int
542 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
543     struct smt_entry *smt)
544 {
545 	struct filter_entry *f;
546 	struct fw_filter2_wr *fwr;
547 	u_int vnic_vld, vnic_vld_mask;
548 	struct wrq_cookie cookie;
549 	int i, rc, busy, locked;
550 	u_int tid;
551 	const int ntids = t->fs.type ? 4 : 1;
552 
553 	MPASS(!t->fs.hash);
554 	/* Already validated against fconf, iconf */
555 	MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
556 	MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
557 
558 	if (separate_hpfilter_region(sc) && t->fs.prio) {
559 		MPASS(t->idx < sc->tids.nhpftids);
560 		f = &sc->tids.hpftid_tab[t->idx];
561 		tid = sc->tids.hpftid_base + t->idx;
562 	} else {
563 		MPASS(t->idx < sc->tids.nftids);
564 		f = &sc->tids.ftid_tab[t->idx];
565 		tid = sc->tids.ftid_base + t->idx;
566 	}
567 	rc = busy = locked = 0;
568 	mtx_lock(&sc->tids.ftid_lock);
569 	for (i = 0; i < ntids; i++) {
570 		busy += f[i].pending + f[i].valid;
571 		locked += f[i].locked;
572 	}
573 	if (locked > 0)
574 		rc = EPERM;
575 	else if (busy > 0)
576 		rc = EBUSY;
577 	else {
578 		int len16;
579 
580 		if (sc->params.filter2_wr_support)
581 			len16 = howmany(sizeof(struct fw_filter2_wr), 16);
582 		else
583 			len16 = howmany(sizeof(struct fw_filter_wr), 16);
584 		fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie);
585 		if (__predict_false(fwr == NULL))
586 			rc = ENOMEM;
587 		else {
588 			f->pending = 1;
589 			if (separate_hpfilter_region(sc) && t->fs.prio)
590 				sc->tids.hpftids_in_use++;
591 			else
592 				sc->tids.ftids_in_use++;
593 		}
594 	}
595 	mtx_unlock(&sc->tids.ftid_lock);
596 	if (rc != 0)
597 		return (rc);
598 
599 	/*
600 	 * Can't fail now.  A set-filter WR will definitely be sent.
601 	 */
602 
603 	f->tid = tid;
604 	f->fs = t->fs;
605 	f->l2te = l2te;
606 	f->smt = smt;
607 
608 	if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
609 		vnic_vld = 1;
610 	else
611 		vnic_vld = 0;
612 	if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
613 		vnic_vld_mask = 1;
614 	else
615 		vnic_vld_mask = 0;
616 
617 	bzero(fwr, sizeof(*fwr));
618 	if (sc->params.filter2_wr_support)
619 		fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR));
620 	else
621 		fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
622 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
623 	fwr->tid_to_iq =
624 	    htobe32(V_FW_FILTER_WR_TID(f->tid) |
625 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
626 		V_FW_FILTER_WR_NOREPLY(0) |
627 		V_FW_FILTER_WR_IQ(f->fs.iq));
628 	fwr->del_filter_to_l2tix =
629 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
630 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
631 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
632 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
633 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
634 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
635 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
636 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
637 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
638 		    f->fs.newvlan == VLAN_REWRITE) |
639 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
640 		    f->fs.newvlan == VLAN_REWRITE) |
641 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
642 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
643 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
644 		V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
645 	fwr->ethtype = htobe16(f->fs.val.ethtype);
646 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
647 	fwr->frag_to_ovlan_vldm =
648 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
649 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
650 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
651 		V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
652 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
653 		V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
654 	fwr->smac_sel = 0;
655 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
656 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
657 	fwr->maci_to_matchtypem =
658 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
659 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
660 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
661 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
662 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
663 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
664 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
665 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
666 	fwr->ptcl = f->fs.val.proto;
667 	fwr->ptclm = f->fs.mask.proto;
668 	fwr->ttyp = f->fs.val.tos;
669 	fwr->ttypm = f->fs.mask.tos;
670 	fwr->ivlan = htobe16(f->fs.val.vlan);
671 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
672 	fwr->ovlan = htobe16(f->fs.val.vnic);
673 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
674 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
675 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
676 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
677 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
678 	fwr->lp = htobe16(f->fs.val.dport);
679 	fwr->lpm = htobe16(f->fs.mask.dport);
680 	fwr->fp = htobe16(f->fs.val.sport);
681 	fwr->fpm = htobe16(f->fs.mask.sport);
682 	/* sma = 0 tells the fw to use SMAC_SEL for source MAC address */
683 	bzero(fwr->sma, sizeof (fwr->sma));
684 	if (sc->params.filter2_wr_support) {
685 		fwr->filter_type_swapmac =
686 		    V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
687 		fwr->natmode_to_ulp_type =
688 		    V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
689 			ULP_MODE_TCPDDP : ULP_MODE_NONE) |
690 		    V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) |
691 		    V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
692 		memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip));
693 		memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip));
694 		fwr->newlport = htobe16(f->fs.nat_dport);
695 		fwr->newfport = htobe16(f->fs.nat_sport);
696 		fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
697 	}
698 	commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
699 
700 	/* Wait for response. */
701 	mtx_lock(&sc->tids.ftid_lock);
702 	for (;;) {
703 		if (f->pending == 0) {
704 			rc = f->valid ? 0 : EIO;
705 			break;
706 		}
707 		if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
708 			rc = EINPROGRESS;
709 			break;
710 		}
711 	}
712 	mtx_unlock(&sc->tids.ftid_lock);
713 	return (rc);
714 }
715 
716 static int
717 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
718     uint64_t *ftuple)
719 {
720 	struct tp_params *tp = &sc->params.tp;
721 	uint64_t fmask;
722 
723 	*ftuple = fmask = 0;
724 
725 	/*
726 	 * Initialize each of the fields which we care about which are present
727 	 * in the Compressed Filter Tuple.
728 	 */
729 	if (tp->vlan_shift >= 0 && fs->mask.vlan) {
730 		*ftuple |= (F_FT_VLAN_VLD | fs->val.vlan) << tp->vlan_shift;
731 		fmask |= M_FT_VLAN << tp->vlan_shift;
732 	}
733 
734 	if (tp->port_shift >= 0 && fs->mask.iport) {
735 		*ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
736 		fmask |= M_FT_PORT << tp->port_shift;
737 	}
738 
739 	if (tp->protocol_shift >= 0 && fs->mask.proto) {
740 		*ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
741 		fmask |= M_FT_PROTOCOL << tp->protocol_shift;
742 	}
743 
744 	if (tp->tos_shift >= 0 && fs->mask.tos) {
745 		*ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
746 		fmask |= M_FT_TOS << tp->tos_shift;
747 	}
748 
749 	if (tp->vnic_shift >= 0 && fs->mask.vnic) {
750 		/* F_VNIC in ingress config was already validated. */
751 		if (tp->ingress_config & F_VNIC)
752 			MPASS(fs->mask.pfvf_vld);
753 		else
754 			MPASS(fs->mask.ovlan_vld);
755 
756 		*ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
757 		fmask |= M_FT_VNIC_ID << tp->vnic_shift;
758 	}
759 
760 	if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
761 		*ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
762 		fmask |= M_FT_MACMATCH << tp->macmatch_shift;
763 	}
764 
765 	if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
766 		*ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
767 		fmask |= M_FT_ETHERTYPE << tp->ethertype_shift;
768 	}
769 
770 	if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
771 		*ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
772 		fmask |= M_FT_MPSHITTYPE << tp->matchtype_shift;
773 	}
774 
775 	if (tp->frag_shift >= 0 && fs->mask.frag) {
776 		*ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
777 		fmask |= M_FT_FRAGMENTATION << tp->frag_shift;
778 	}
779 
780 	if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
781 		*ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
782 		fmask |= M_FT_FCOE << tp->fcoe_shift;
783 	}
784 
785 	/* A hashfilter must conform to the filterMask. */
786 	if (fmask != tp->hash_filter_mask)
787 		return (EINVAL);
788 
789 	return (0);
790 }
791 
792 static bool
793 is_4tuple_specified(struct t4_filter_specification *fs)
794 {
795 	int i;
796 	const int n = fs->type ? 16 : 4;
797 
798 	if (fs->mask.sport != 0xffff || fs->mask.dport != 0xffff)
799 		return (false);
800 
801 	for (i = 0; i < n; i++) {
802 		if (fs->mask.sip[i] != 0xff)
803 			return (false);
804 		if (fs->mask.dip[i] != 0xff)
805 			return (false);
806 	}
807 
808 	return (true);
809 }
810 
811 int
812 set_filter(struct adapter *sc, struct t4_filter *t)
813 {
814 	struct tid_info *ti = &sc->tids;
815 	struct l2t_entry *l2te = NULL;
816 	struct smt_entry *smt = NULL;
817 	uint64_t ftuple;
818 	int rc;
819 
820 	/*
821 	 * Basic filter checks first.
822 	 */
823 
824 	if (t->fs.hash) {
825 		if (!is_hashfilter(sc) || ti->ntids == 0)
826 			return (ENOTSUP);
827 		/* Hardware, not user, selects a tid for hashfilters. */
828 		if (t->idx != (uint32_t)-1)
829 			return (EINVAL);
830 		/* T5 can't count hashfilter hits. */
831 		if (is_t5(sc) && t->fs.hitcnts)
832 			return (EINVAL);
833 		if (!is_4tuple_specified(&t->fs))
834 			return (EINVAL);
835 		rc = hashfilter_ntuple(sc, &t->fs, &ftuple);
836 		if (rc != 0)
837 			return (rc);
838 	} else {
839 		if (separate_hpfilter_region(sc) && t->fs.prio) {
840 			if (ti->nhpftids == 0)
841 				return (ENOTSUP);
842 			if (t->idx >= ti->nhpftids)
843 				return (EINVAL);
844 		} else {
845 			if (ti->nftids == 0)
846 				return (ENOTSUP);
847 			if (t->idx >= ti->nftids)
848 				return (EINVAL);
849 		}
850 		/* IPv6 filter idx must be 4 aligned */
851 		if (t->fs.type == 1 &&
852 		    ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
853 			return (EINVAL);
854 	}
855 
856 	/* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */
857 	if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
858 	    (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE ||
859 	    t->fs.swapmac || t->fs.nat_mode))
860 		return (ENOTSUP);
861 
862 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
863 		return (EINVAL);
864 	if (t->fs.val.iport >= sc->params.nports)
865 		return (EINVAL);
866 
867 	/* Can't specify an iq if not steering to it */
868 	if (!t->fs.dirsteer && t->fs.iq)
869 		return (EINVAL);
870 
871 	/* Validate against the global filter mode and ingress config */
872 	rc = check_fspec_against_fconf_iconf(sc, &t->fs);
873 	if (rc != 0)
874 		return (rc);
875 
876 	/*
877 	 * Basic checks passed.  Make sure the queues and tid tables are setup.
878 	 */
879 
880 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
881 	if (rc)
882 		return (rc);
883 	if (!(sc->flags & FULL_INIT_DONE) &&
884 	    ((rc = adapter_full_init(sc)) != 0)) {
885 		end_synchronized_op(sc, 0);
886 		return (rc);
887 	}
888 	if (t->fs.hash) {
889 		if (__predict_false(ti->hftid_hash_4t == NULL)) {
890 			rc = alloc_hftid_hash(&sc->tids, HASH_NOWAIT);
891 			if (rc != 0)
892 				goto done;
893 		}
894 		if (__predict_false(sc->tids.atid_tab == NULL)) {
895 			rc = alloc_atid_tab(&sc->tids, M_NOWAIT);
896 			if (rc != 0)
897 				goto done;
898 		}
899 	} else if (separate_hpfilter_region(sc) && t->fs.prio &&
900 	    __predict_false(ti->hpftid_tab == NULL)) {
901 		MPASS(ti->nhpftids != 0);
902 		KASSERT(ti->hpftids_in_use == 0,
903 		    ("%s: no memory allocated but hpftids_in_use is %u",
904 		    __func__, ti->hpftids_in_use));
905 		ti->hpftid_tab = malloc(sizeof(struct filter_entry) *
906 		    ti->nhpftids, M_CXGBE, M_NOWAIT | M_ZERO);
907 		if (ti->hpftid_tab == NULL) {
908 			rc = ENOMEM;
909 			goto done;
910 		}
911 		if (!mtx_initialized(&sc->tids.ftid_lock)) {
912 			mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
913 			cv_init(&ti->ftid_cv, "t4fcv");
914 		}
915 	} else if (__predict_false(ti->ftid_tab == NULL)) {
916 		MPASS(ti->nftids != 0);
917 		KASSERT(ti->ftids_in_use == 0,
918 		    ("%s: no memory allocated but ftids_in_use is %u",
919 		    __func__, ti->ftids_in_use));
920 		ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
921 		    M_CXGBE, M_NOWAIT | M_ZERO);
922 		if (ti->ftid_tab == NULL) {
923 			rc = ENOMEM;
924 			goto done;
925 		}
926 		if (!mtx_initialized(&sc->tids.ftid_lock)) {
927 			mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
928 			cv_init(&ti->ftid_cv, "t4fcv");
929 		}
930 	}
931 done:
932 	end_synchronized_op(sc, 0);
933 	if (rc != 0)
934 		return (rc);
935 
936 	/*
937 	 * Allocate L2T entry, SMT entry, etc.
938 	 */
939 
940 	if (t->fs.newdmac || t->fs.newvlan) {
941 		/* This filter needs an L2T entry; allocate one. */
942 		l2te = t4_l2t_alloc_switching(sc, t->fs.vlan, t->fs.eport,
943 		    t->fs.dmac);
944 		if (__predict_false(l2te == NULL)) {
945 			rc = EAGAIN;
946 			goto error;
947 		}
948 	}
949 
950 	if (t->fs.newsmac) {
951 		/* This filter needs an SMT entry; allocate one. */
952 		smt = t4_smt_alloc_switching(sc->smt, t->fs.smac);
953 		if (__predict_false(smt == NULL)) {
954 			rc = EAGAIN;
955 			goto error;
956 		}
957 		rc = t4_smt_set_switching(sc, smt, 0x0, t->fs.smac);
958 		if (rc)
959 			goto error;
960 	}
961 
962 	if (t->fs.hash)
963 		rc = set_hashfilter(sc, t, ftuple, l2te, smt);
964 	else
965 		rc = set_tcamfilter(sc, t, l2te, smt);
966 
967 	if (rc != 0 && rc != EINPROGRESS) {
968 error:
969 		if (l2te)
970 			t4_l2t_release(l2te);
971 		if (smt)
972 			t4_smt_release(smt);
973 	}
974 	return (rc);
975 }
976 
977 static int
978 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
979 {
980 	struct filter_entry *f;
981 	struct fw_filter_wr *fwr;
982 	struct wrq_cookie cookie;
983 	int rc, nfilters;
984 #ifdef INVARIANTS
985 	u_int tid_base;
986 #endif
987 
988 	mtx_lock(&sc->tids.ftid_lock);
989 	if (separate_hpfilter_region(sc) && t->fs.prio) {
990 		nfilters = sc->tids.nhpftids;
991 		f = sc->tids.hpftid_tab;
992 #ifdef INVARIANTS
993 		tid_base = sc->tids.hpftid_base;
994 #endif
995 	} else {
996 		nfilters = sc->tids.nftids;
997 		f = sc->tids.ftid_tab;
998 #ifdef INVARIANTS
999 		tid_base = sc->tids.ftid_base;
1000 #endif
1001 	}
1002 	MPASS(f != NULL);	/* Caller checked this. */
1003 	if (t->idx >= nfilters) {
1004 		rc = EINVAL;
1005 		goto done;
1006 	}
1007 	f += t->idx;
1008 
1009 	if (f->locked) {
1010 		rc = EPERM;
1011 		goto done;
1012 	}
1013 	if (f->pending) {
1014 		rc = EBUSY;
1015 		goto done;
1016 	}
1017 	if (f->valid == 0) {
1018 		rc = EINVAL;
1019 		goto done;
1020 	}
1021 	MPASS(f->tid == tid_base + t->idx);
1022 	fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie);
1023 	if (fwr == NULL) {
1024 		rc = ENOMEM;
1025 		goto done;
1026 	}
1027 
1028 	bzero(fwr, sizeof (*fwr));
1029 	t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
1030 	f->pending = 1;
1031 	commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
1032 	t->fs = f->fs;	/* extra info for the caller */
1033 
1034 	for (;;) {
1035 		if (f->pending == 0) {
1036 			rc = f->valid ? EIO : 0;
1037 			break;
1038 		}
1039 		if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
1040 			rc = EINPROGRESS;
1041 			break;
1042 		}
1043 	}
1044 done:
1045 	mtx_unlock(&sc->tids.ftid_lock);
1046 	return (rc);
1047 }
1048 
1049 int
1050 del_filter(struct adapter *sc, struct t4_filter *t)
1051 {
1052 
1053 	/* No filters possible if not initialized yet. */
1054 	if (!(sc->flags & FULL_INIT_DONE))
1055 		return (EINVAL);
1056 
1057 	/*
1058 	 * The checks for tid tables ensure that the locks that del_* will reach
1059 	 * for are initialized.
1060 	 */
1061 	if (t->fs.hash) {
1062 		if (sc->tids.hftid_hash_4t != NULL)
1063 			return (del_hashfilter(sc, t));
1064 	} else if (separate_hpfilter_region(sc) && t->fs.prio) {
1065 		if (sc->tids.hpftid_tab != NULL)
1066 			return (del_tcamfilter(sc, t));
1067 	} else {
1068 		if (sc->tids.ftid_tab != NULL)
1069 			return (del_tcamfilter(sc, t));
1070 	}
1071 
1072 	return (EINVAL);
1073 }
1074 
1075 /*
1076  * Release secondary resources associated with the filter.
1077  */
1078 static void
1079 free_filter_resources(struct filter_entry *f)
1080 {
1081 
1082 	if (f->l2te) {
1083 		t4_l2t_release(f->l2te);
1084 		f->l2te = NULL;
1085 	}
1086 	if (f->smt) {
1087 		t4_smt_release(f->smt);
1088 		f->smt = NULL;
1089 	}
1090 }
1091 
1092 static int
1093 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
1094     uint64_t val, int no_reply)
1095 {
1096 	struct wrq_cookie cookie;
1097 	struct cpl_set_tcb_field *req;
1098 
1099 	req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie);
1100 	if (req == NULL)
1101 		return (ENOMEM);
1102 	bzero(req, sizeof(*req));
1103 	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
1104 	if (no_reply == 0) {
1105 		req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
1106 		    V_NO_REPLY(0));
1107 	} else
1108 		req->reply_ctrl = htobe16(V_NO_REPLY(1));
1109 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
1110 	req->mask = htobe64(mask);
1111 	req->val = htobe64(val);
1112 	commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie);
1113 
1114 	return (0);
1115 }
1116 
1117 /* Set one of the t_flags bits in the TCB. */
1118 static inline int
1119 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val,
1120     u_int no_reply)
1121 {
1122 
1123 	return (set_tcb_field(sc, tid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
1124 	    (uint64_t)val << bit_pos, no_reply));
1125 }
1126 
1127 int
1128 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1129 {
1130 	struct adapter *sc = iq->adapter;
1131 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1132 	u_int tid = GET_TID(rpl);
1133 	u_int rc, idx;
1134 	struct filter_entry *f;
1135 
1136 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
1137 	    rss->opcode));
1138 
1139 
1140 	if (is_hpftid(sc, tid)) {
1141 		idx = tid - sc->tids.hpftid_base;
1142 		f = &sc->tids.hpftid_tab[idx];
1143 	} else if (is_ftid(sc, tid)) {
1144 		idx = tid - sc->tids.ftid_base;
1145 		f = &sc->tids.ftid_tab[idx];
1146 	} else
1147 		panic("%s: FW reply for invalid TID %d.", __func__, tid);
1148 
1149 	MPASS(f->tid == tid);
1150 	rc = G_COOKIE(rpl->cookie);
1151 
1152 	mtx_lock(&sc->tids.ftid_lock);
1153 	KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
1154 	    __func__, rc, tid));
1155 	switch(rc) {
1156 	case FW_FILTER_WR_FLT_ADDED:
1157 		/* set-filter succeeded */
1158 		f->valid = 1;
1159 		if (f->fs.newsmac) {
1160 			MPASS(f->smt != NULL);
1161 			set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1162 			set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1163 			    V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1164 			    V_TCB_SMAC_SEL(f->smt->idx), 1);
1165 			/* XXX: wait for reply to TCB update before !pending */
1166 		}
1167 		break;
1168 	case FW_FILTER_WR_FLT_DELETED:
1169 		/* del-filter succeeded */
1170 		MPASS(f->valid == 1);
1171 		f->valid = 0;
1172 		/* Fall through */
1173 	case FW_FILTER_WR_SMT_TBL_FULL:
1174 		/* set-filter failed due to lack of SMT space. */
1175 		MPASS(f->valid == 0);
1176 		free_filter_resources(f);
1177 		if (separate_hpfilter_region(sc) && f->fs.prio)
1178 			sc->tids.hpftids_in_use--;
1179 		else
1180 			sc->tids.ftids_in_use--;
1181 		break;
1182 	case FW_FILTER_WR_SUCCESS:
1183 	case FW_FILTER_WR_EINVAL:
1184 	default:
1185 		panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
1186 		    idx);
1187 	}
1188 	f->pending = 0;
1189 	cv_broadcast(&sc->tids.ftid_cv);
1190 	mtx_unlock(&sc->tids.ftid_lock);
1191 
1192 	return (0);
1193 }
1194 
1195 /*
1196  * This is the reply to the Active Open that created the filter.  Additional TCB
1197  * updates may be required to complete the filter configuration.
1198  */
1199 int
1200 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
1201     struct mbuf *m)
1202 {
1203 	struct adapter *sc = iq->adapter;
1204 	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
1205 	u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
1206 	u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
1207 	struct filter_entry *f = lookup_atid(sc, atid);
1208 
1209 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1210 
1211 	mtx_lock(&sc->tids.hftid_lock);
1212 	KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
1213 	KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
1214 	    __func__, f, f->tid));
1215 	if (status == CPL_ERR_NONE) {
1216 		f->tid = GET_TID(cpl);
1217 		MPASS(lookup_hftid(sc, f->tid) == NULL);
1218 		insert_hftid(sc, f);
1219 		/*
1220 		 * Leave the filter pending until it is fully set up, which will
1221 		 * be indicated by the reply to the last TCB update.  No need to
1222 		 * unblock the ioctl thread either.
1223 		 */
1224 		if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
1225 			goto done;
1226 		f->valid = 1;
1227 		f->pending = 0;
1228 	} else {
1229 		/* provide errno instead of tid to ioctl */
1230 		f->tid = act_open_rpl_status_to_errno(status);
1231 		f->valid = 0;
1232 		if (act_open_has_tid(status))
1233 			release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]);
1234 		free_filter_resources(f);
1235 		remove_hf(sc, f);
1236 		if (f->locked == 0)
1237 			free(f, M_CXGBE);
1238 	}
1239 	cv_broadcast(&sc->tids.hftid_cv);
1240 done:
1241 	mtx_unlock(&sc->tids.hftid_lock);
1242 
1243 	free_atid(sc, atid);
1244 	return (0);
1245 }
1246 
1247 int
1248 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
1249     struct mbuf *m)
1250 {
1251 	struct adapter *sc = iq->adapter;
1252 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1253 	u_int tid = GET_TID(rpl);
1254 	struct filter_entry *f;
1255 
1256 	mtx_lock(&sc->tids.hftid_lock);
1257 	f = lookup_hftid(sc, tid);
1258 	KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1259 	KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1260 	    f, tid));
1261 	KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
1262 	    __func__, f, tid));
1263 	f->pending = 0;
1264 	if (rpl->status == 0) {
1265 		f->valid = 1;
1266 	} else {
1267 		f->tid = EIO;
1268 		f->valid = 0;
1269 		free_filter_resources(f);
1270 		remove_hftid(sc, f);
1271 		remove_hf(sc, f);
1272 		release_tid(sc, tid, &sc->sge.ctrlq[0]);
1273 		if (f->locked == 0)
1274 			free(f, M_CXGBE);
1275 	}
1276 	cv_broadcast(&sc->tids.hftid_cv);
1277 	mtx_unlock(&sc->tids.hftid_lock);
1278 
1279 	return (0);
1280 }
1281 
1282 int
1283 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
1284     struct mbuf *m)
1285 {
1286 	struct adapter *sc = iq->adapter;
1287 	const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1288 	unsigned int tid = GET_TID(cpl);
1289 	struct filter_entry *f;
1290 
1291 	mtx_lock(&sc->tids.hftid_lock);
1292 	f = lookup_hftid(sc, tid);
1293 	KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1294 	KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1295 	    f, tid));
1296 	KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
1297 	    tid));
1298 	f->pending = 0;
1299 	if (cpl->status == 0) {
1300 		f->valid = 0;
1301 		free_filter_resources(f);
1302 		remove_hftid(sc, f);
1303 		remove_hf(sc, f);
1304 		release_tid(sc, tid, &sc->sge.ctrlq[0]);
1305 		if (f->locked == 0)
1306 			free(f, M_CXGBE);
1307 	}
1308 	cv_broadcast(&sc->tids.hftid_cv);
1309 	mtx_unlock(&sc->tids.hftid_lock);
1310 
1311 	return (0);
1312 }
1313 
1314 static int
1315 get_tcamfilter(struct adapter *sc, struct t4_filter *t)
1316 {
1317 	int i, nfilters;
1318 	struct filter_entry *f;
1319 	u_int in_use;
1320 #ifdef INVARIANTS
1321 	u_int tid_base;
1322 #endif
1323 
1324 	MPASS(!t->fs.hash);
1325 
1326 	if (separate_hpfilter_region(sc) && t->fs.prio) {
1327 		nfilters = sc->tids.nhpftids;
1328 		f = sc->tids.hpftid_tab;
1329 		in_use = sc->tids.hpftids_in_use;
1330 #ifdef INVARIANTS
1331 		tid_base = sc->tids.hpftid_base;
1332 #endif
1333 	} else {
1334 		nfilters = sc->tids.nftids;
1335 		f = sc->tids.ftid_tab;
1336 		in_use = sc->tids.ftids_in_use;
1337 #ifdef INVARIANTS
1338 		tid_base = sc->tids.ftid_base;
1339 #endif
1340 	}
1341 
1342 	if (in_use == 0 || f == NULL || t->idx >= nfilters) {
1343 		t->idx = 0xffffffff;
1344 		return (0);
1345 	}
1346 
1347 	f += t->idx;
1348 	mtx_lock(&sc->tids.ftid_lock);
1349 	for (i = t->idx; i < nfilters; i++, f++) {
1350 		if (f->valid) {
1351 			MPASS(f->tid == tid_base + i);
1352 			t->idx = i;
1353 			t->l2tidx = f->l2te ? f->l2te->idx : 0;
1354 			t->smtidx = f->smt ? f->smt->idx : 0;
1355 			if (f->fs.hitcnts)
1356 				t->hits = get_filter_hits(sc, f->tid);
1357 			else
1358 				t->hits = UINT64_MAX;
1359 			t->fs = f->fs;
1360 
1361 			goto done;
1362 		}
1363 	}
1364 	t->idx = 0xffffffff;
1365 done:
1366 	mtx_unlock(&sc->tids.ftid_lock);
1367 	return (0);
1368 }
1369 
1370 static int
1371 get_hashfilter(struct adapter *sc, struct t4_filter *t)
1372 {
1373 	struct tid_info *ti = &sc->tids;
1374 	int tid;
1375 	struct filter_entry *f;
1376 	const int inv_tid = ti->ntids + ti->tid_base;
1377 
1378 	MPASS(t->fs.hash);
1379 
1380 	if (ti->tids_in_use == 0 || ti->hftid_hash_tid == NULL ||
1381 	    t->idx >= inv_tid) {
1382 		t->idx = 0xffffffff;
1383 		return (0);
1384 	}
1385 	if (t->idx < ti->tid_base)
1386 		t->idx = ti->tid_base;
1387 
1388 	mtx_lock(&ti->hftid_lock);
1389 	for (tid = t->idx; tid < inv_tid; tid++) {
1390 		f = lookup_hftid(sc, tid);
1391 		if (f != NULL && f->valid) {
1392 			t->idx = tid;
1393 			t->l2tidx = f->l2te ? f->l2te->idx : 0;
1394 			t->smtidx = f->smt ? f->smt->idx : 0;
1395 			if (f->fs.hitcnts)
1396 				t->hits = get_filter_hits(sc, tid);
1397 			else
1398 				t->hits = UINT64_MAX;
1399 			t->fs = f->fs;
1400 
1401 			goto done;
1402 		}
1403 	}
1404 	t->idx = 0xffffffff;
1405 done:
1406 	mtx_unlock(&ti->hftid_lock);
1407 	return (0);
1408 }
1409 
1410 static void
1411 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
1412     uint64_t ftuple, struct cpl_act_open_req6 *cpl)
1413 {
1414 	struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
1415 	struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
1416 
1417 	/* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1418 	MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1419 	MPASS(atid >= 0);
1420 
1421 	if (chip_id(sc) == CHELSIO_T5) {
1422 		INIT_TP_WR(cpl5, 0);
1423 	} else {
1424 		INIT_TP_WR(cpl6, 0);
1425 		cpl6->rsvd2 = 0;
1426 		cpl6->opt3 = 0;
1427 	}
1428 
1429 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1430 	    V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1431 	    V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1432 	cpl->local_port = htobe16(f->fs.val.dport);
1433 	cpl->peer_port = htobe16(f->fs.val.sport);
1434 	cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1435 	cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1436 	cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1437 	cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1438 	cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1439 	    f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1440 	    V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1441 	    V_NO_CONG(f->fs.rpttid) |
1442 	    V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1443 	    F_TCAM_BYPASS | F_NON_OFFLOAD);
1444 
1445 	cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1446 	cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1447 	    V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1448 	    V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1449 	    F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1450 	    V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1451 	    V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1452 }
1453 
1454 static void
1455 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1456     uint64_t ftuple, struct cpl_act_open_req *cpl)
1457 {
1458 	struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1459 	struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1460 
1461 	/* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1462 	MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1463 	MPASS(atid >= 0);
1464 
1465 	if (chip_id(sc) == CHELSIO_T5) {
1466 		INIT_TP_WR(cpl5, 0);
1467 	} else {
1468 		INIT_TP_WR(cpl6, 0);
1469 		cpl6->rsvd2 = 0;
1470 		cpl6->opt3 = 0;
1471 	}
1472 
1473 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1474 	    V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1475 	    V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1476 	cpl->local_port = htobe16(f->fs.val.dport);
1477 	cpl->peer_port = htobe16(f->fs.val.sport);
1478 	cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1479 	    f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1480 	cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1481 		f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1482 	cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1483 	    f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1484 	    V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1485 	    V_NO_CONG(f->fs.rpttid) |
1486 	    V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1487 	    F_TCAM_BYPASS | F_NON_OFFLOAD);
1488 
1489 	cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1490 	cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1491 	    V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1492 	    V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1493 	    F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1494 	    V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1495 	    V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1496 }
1497 
1498 static int
1499 act_open_cpl_len16(struct adapter *sc, int isipv6)
1500 {
1501 	int idx;
1502 	static const int sz_table[3][2] = {
1503 		{
1504 			howmany(sizeof (struct cpl_act_open_req), 16),
1505 			howmany(sizeof (struct cpl_act_open_req6), 16)
1506 		},
1507 		{
1508 			howmany(sizeof (struct cpl_t5_act_open_req), 16),
1509 			howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1510 		},
1511 		{
1512 			howmany(sizeof (struct cpl_t6_act_open_req), 16),
1513 			howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1514 		},
1515 	};
1516 
1517 	MPASS(chip_id(sc) >= CHELSIO_T4);
1518 	idx = min(chip_id(sc) - CHELSIO_T4, 2);
1519 
1520 	return (sz_table[idx][!!isipv6]);
1521 }
1522 
1523 static int
1524 set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
1525     struct l2t_entry *l2te, struct smt_entry *smt)
1526 {
1527 	void *wr;
1528 	struct wrq_cookie cookie;
1529 	struct filter_entry *f;
1530 	int rc, atid = -1;
1531 	uint32_t hash;
1532 
1533 	MPASS(t->fs.hash);
1534 	/* Already validated against fconf, iconf */
1535 	MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1536 	MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1537 
1538 	hash = hf_hashfn_4t(&t->fs);
1539 
1540 	mtx_lock(&sc->tids.hftid_lock);
1541 	if (lookup_hf(sc, &t->fs, hash) != NULL) {
1542 		rc = EEXIST;
1543 		goto done;
1544 	}
1545 
1546 	f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1547 	if (__predict_false(f == NULL)) {
1548 		rc = ENOMEM;
1549 		goto done;
1550 	}
1551 	f->fs = t->fs;
1552 	f->l2te = l2te;
1553 	f->smt = smt;
1554 
1555 	atid = alloc_atid(sc, f);
1556 	if (__predict_false(atid) == -1) {
1557 		free(f, M_CXGBE);
1558 		rc = EAGAIN;
1559 		goto done;
1560 	}
1561 	MPASS(atid >= 0);
1562 
1563 	wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type),
1564 	    &cookie);
1565 	if (wr == NULL) {
1566 		free_atid(sc, atid);
1567 		free(f, M_CXGBE);
1568 		rc = ENOMEM;
1569 		goto done;
1570 	}
1571 	if (f->fs.type)
1572 		mk_act_open_req6(sc, f, atid, ftuple, wr);
1573 	else
1574 		mk_act_open_req(sc, f, atid, ftuple, wr);
1575 
1576 	f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1577 	f->pending = 1;
1578 	f->tid = -1;
1579 	insert_hf(sc, f, hash);
1580 	commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1581 
1582 	for (;;) {
1583 		MPASS(f->locked);
1584 		if (f->pending == 0) {
1585 			if (f->valid) {
1586 				rc = 0;
1587 				f->locked = 0;
1588 				t->idx = f->tid;
1589 			} else {
1590 				remove_hf(sc, f);
1591 				rc = f->tid;
1592 				free(f, M_CXGBE);
1593 			}
1594 			break;
1595 		}
1596 		if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1597 			f->locked = 0;
1598 			rc = EINPROGRESS;
1599 			break;
1600 		}
1601 	}
1602 done:
1603 	mtx_unlock(&sc->tids.hftid_lock);
1604 	return (rc);
1605 }
1606 
1607 /* SET_TCB_FIELD sent as a ULP command looks like this */
1608 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1609     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1610 
1611 static void *
1612 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1613 		uint64_t val, uint32_t tid, uint32_t qid)
1614 {
1615 	struct ulptx_idata *ulpsc;
1616 	struct cpl_set_tcb_field_core *req;
1617 
1618 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1619 	ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1620 
1621 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1622 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1623 	ulpsc->len = htobe32(sizeof(*req));
1624 
1625 	req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1626 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1627 	req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1628 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1629 	req->mask = htobe64(mask);
1630 	req->val = htobe64(val);
1631 
1632 	ulpsc = (struct ulptx_idata *)(req + 1);
1633 	if (LEN__SET_TCB_FIELD_ULP % 16) {
1634 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1635 		ulpsc->len = htobe32(0);
1636 		return (ulpsc + 1);
1637 	}
1638 	return (ulpsc);
1639 }
1640 
1641 /* ABORT_REQ sent as a ULP command looks like this */
1642 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1643 	sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1644 
1645 static void *
1646 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1647 {
1648 	struct ulptx_idata *ulpsc;
1649 	struct cpl_abort_req_core *req;
1650 
1651 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1652 	ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1653 
1654 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1655 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1656 	ulpsc->len = htobe32(sizeof(*req));
1657 
1658 	req = (struct cpl_abort_req_core *)(ulpsc + 1);
1659 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1660 	req->rsvd0 = htonl(0);
1661 	req->rsvd1 = 0;
1662 	req->cmd = CPL_ABORT_NO_RST;
1663 
1664 	ulpsc = (struct ulptx_idata *)(req + 1);
1665 	if (LEN__ABORT_REQ_ULP % 16) {
1666 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1667 		ulpsc->len = htobe32(0);
1668 		return (ulpsc + 1);
1669 	}
1670 	return (ulpsc);
1671 }
1672 
1673 /* ABORT_RPL sent as a ULP command looks like this */
1674 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1675 	sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1676 
1677 static void *
1678 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1679 {
1680 	struct ulptx_idata *ulpsc;
1681 	struct cpl_abort_rpl_core *rpl;
1682 
1683 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1684 	ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1685 
1686 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1687 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1688 	ulpsc->len = htobe32(sizeof(*rpl));
1689 
1690 	rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1691 	OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1692 	rpl->rsvd0 = htonl(0);
1693 	rpl->rsvd1 = 0;
1694 	rpl->cmd = CPL_ABORT_NO_RST;
1695 
1696 	ulpsc = (struct ulptx_idata *)(rpl + 1);
1697 	if (LEN__ABORT_RPL_ULP % 16) {
1698 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1699 		ulpsc->len = htobe32(0);
1700 		return (ulpsc + 1);
1701 	}
1702 	return (ulpsc);
1703 }
1704 
1705 static inline int
1706 del_hashfilter_wrlen(void)
1707 {
1708 
1709 	return (sizeof(struct work_request_hdr) +
1710 	    roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1711 	    roundup2(LEN__ABORT_REQ_ULP, 16) +
1712 	    roundup2(LEN__ABORT_RPL_ULP, 16));
1713 }
1714 
1715 static void
1716 mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1717 {
1718 	struct ulp_txpkt *ulpmc;
1719 
1720 	INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1721 	ulpmc = (struct ulp_txpkt *)(wrh + 1);
1722 	ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1723 	    V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
1724 	ulpmc = mk_abort_req_ulp(ulpmc, tid);
1725 	ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1726 }
1727 
1728 static int
1729 del_hashfilter(struct adapter *sc, struct t4_filter *t)
1730 {
1731 	struct tid_info *ti = &sc->tids;
1732 	void *wr;
1733 	struct filter_entry *f;
1734 	struct wrq_cookie cookie;
1735 	int rc;
1736 	const int wrlen = del_hashfilter_wrlen();
1737 	const int inv_tid = ti->ntids + ti->tid_base;
1738 
1739 	MPASS(sc->tids.hftid_hash_4t != NULL);
1740 	MPASS(sc->tids.ntids > 0);
1741 
1742 	if (t->idx < sc->tids.tid_base || t->idx >= inv_tid)
1743 		return (EINVAL);
1744 
1745 	mtx_lock(&ti->hftid_lock);
1746 	f = lookup_hftid(sc, t->idx);
1747 	if (f == NULL || f->valid == 0) {
1748 		rc = EINVAL;
1749 		goto done;
1750 	}
1751 	MPASS(f->tid == t->idx);
1752 	if (f->locked) {
1753 		rc = EPERM;
1754 		goto done;
1755 	}
1756 	if (f->pending) {
1757 		rc = EBUSY;
1758 		goto done;
1759 	}
1760 	wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie);
1761 	if (wr == NULL) {
1762 		rc = ENOMEM;
1763 		goto done;
1764 	}
1765 
1766 	mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1767 	f->locked = 1;
1768 	f->pending = 1;
1769 	commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1770 	t->fs = f->fs;	/* extra info for the caller */
1771 
1772 	for (;;) {
1773 		MPASS(f->locked);
1774 		if (f->pending == 0) {
1775 			if (f->valid) {
1776 				f->locked = 0;
1777 				rc = EIO;
1778 			} else {
1779 				rc = 0;
1780 				free(f, M_CXGBE);
1781 			}
1782 			break;
1783 		}
1784 		if (cv_wait_sig(&ti->hftid_cv, &ti->hftid_lock) != 0) {
1785 			f->locked = 0;
1786 			rc = EINPROGRESS;
1787 			break;
1788 		}
1789 	}
1790 done:
1791 	mtx_unlock(&ti->hftid_lock);
1792 	return (rc);
1793 }
1794 
1795 #define WORD_MASK       0xffffffff
1796 static void
1797 set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip,
1798     const bool sip, const bool dp, const bool sp)
1799 {
1800 
1801 	if (dip) {
1802 		if (f->fs.type) {
1803 			set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK,
1804 			    f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 |
1805 			    f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1);
1806 
1807 			set_tcb_field(sc, f->tid,
1808 			    W_TCB_SND_UNA_RAW + 1, WORD_MASK,
1809 			    f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 |
1810 			    f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1);
1811 
1812 			set_tcb_field(sc, f->tid,
1813 			    W_TCB_SND_UNA_RAW + 2, WORD_MASK,
1814 			    f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 |
1815 			    f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1);
1816 
1817 			set_tcb_field(sc, f->tid,
1818 			    W_TCB_SND_UNA_RAW + 3, WORD_MASK,
1819 			    f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1820 			    f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1821 		} else {
1822 			set_tcb_field(sc, f->tid,
1823 			    W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK,
1824 			    f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1825 			    f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1826 		}
1827 	}
1828 
1829 	if (sip) {
1830 		if (f->fs.type) {
1831 			set_tcb_field(sc, f->tid,
1832 			    W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK,
1833 			    f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 |
1834 			    f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1);
1835 
1836 			set_tcb_field(sc, f->tid,
1837 			    W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK,
1838 			    f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 |
1839 			    f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1);
1840 
1841 			set_tcb_field(sc, f->tid,
1842 			    W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK,
1843 			    f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 |
1844 			    f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1);
1845 
1846 			set_tcb_field(sc, f->tid,
1847 			    W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK,
1848 			    f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1849 			    f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1850 
1851 		} else {
1852 			set_tcb_field(sc, f->tid,
1853 			    W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK,
1854 			    f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1855 			    f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1856 		}
1857 	}
1858 
1859 	set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK,
1860 	    (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1);
1861 }
1862 
1863 /*
1864  * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1865  * last of the series of updates requested a reply.  The reply informs the
1866  * driver that the filter is fully setup.
1867  */
1868 static int
1869 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
1870 {
1871 	int updated = 0;
1872 
1873 	MPASS(f->tid < sc->tids.ntids);
1874 	MPASS(f->fs.hash);
1875 	MPASS(f->pending);
1876 	MPASS(f->valid == 0);
1877 
1878 	if (f->fs.newdmac) {
1879 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1);
1880 		updated++;
1881 	}
1882 
1883 	if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1884 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1);
1885 		updated++;
1886 	}
1887 
1888 	if (f->fs.newsmac) {
1889 		MPASS(f->smt != NULL);
1890 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1891 		set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1892 		    V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smt->idx),
1893 		    1);
1894 		updated++;
1895 	}
1896 
1897 	switch(f->fs.nat_mode) {
1898 	case NAT_MODE_NONE:
1899 		break;
1900 	case NAT_MODE_DIP:
1901 		set_nat_params(sc, f, true, false, false, false);
1902 		updated++;
1903 		break;
1904 	case NAT_MODE_DIP_DP:
1905 		set_nat_params(sc, f, true, false, true, false);
1906 		updated++;
1907 		break;
1908 	case NAT_MODE_DIP_DP_SIP:
1909 		set_nat_params(sc, f, true, true, true, false);
1910 		updated++;
1911 		break;
1912 	case NAT_MODE_DIP_DP_SP:
1913 		set_nat_params(sc, f, true, false, true, true);
1914 		updated++;
1915 		break;
1916 	case NAT_MODE_SIP_SP:
1917 		set_nat_params(sc, f, false, true, false, true);
1918 		updated++;
1919 		break;
1920 	case NAT_MODE_DIP_SIP_SP:
1921 		set_nat_params(sc, f, true, true, false, true);
1922 		updated++;
1923 		break;
1924 	case NAT_MODE_ALL:
1925 		set_nat_params(sc, f, true, true, true, true);
1926 		updated++;
1927 		break;
1928 	default:
1929 		MPASS(0);	/* should have been validated earlier */
1930 		break;
1931 
1932 	}
1933 
1934 	if (f->fs.nat_seq_chk) {
1935 		set_tcb_field(sc, f->tid, W_TCB_RCV_NXT,
1936 		    V_TCB_RCV_NXT(M_TCB_RCV_NXT),
1937 		    V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1);
1938 		updated++;
1939 	}
1940 
1941 	if (is_t5(sc) && f->fs.action == FILTER_DROP) {
1942 		/*
1943 		 * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop.
1944 		 */
1945 		set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) |
1946 		    V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1);
1947 		updated++;
1948 	}
1949 
1950 	/*
1951 	 * Enable switching after all secondary resources (L2T entry, SMT entry,
1952 	 * etc.) are setup so that any switched packet will use correct
1953 	 * values.
1954 	 */
1955 	if (f->fs.action == FILTER_SWITCH) {
1956 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1);
1957 		updated++;
1958 	}
1959 
1960 	if (f->fs.hitcnts || updated > 0) {
1961 		set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
1962 		    V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1963 		    V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
1964 		    V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
1965 		return (EINPROGRESS);
1966 	}
1967 
1968 	return (0);
1969 }
1970