xref: /freebsd/sys/dev/cxgbe/t4_filter.c (revision 06c3fb27)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2018 Chelsio Communications, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/cdefs.h>
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31 
32 #include <sys/param.h>
33 #include <sys/eventhandler.h>
34 #include <sys/fnv_hash.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/bus.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/rwlock.h>
42 #include <sys/socket.h>
43 #include <sys/sbuf.h>
44 #include <netinet/in.h>
45 
46 #include "common/common.h"
47 #include "common/t4_msg.h"
48 #include "common/t4_regs.h"
49 #include "common/t4_regs_values.h"
50 #include "common/t4_tcb.h"
51 #include "t4_l2t.h"
52 #include "t4_smt.h"
53 
54 struct filter_entry {
55 	LIST_ENTRY(filter_entry) link_4t;
56 	LIST_ENTRY(filter_entry) link_tid;
57 
58 	uint32_t valid:1;	/* filter allocated and valid */
59 	uint32_t locked:1;	/* filter is administratively locked or busy */
60 	uint32_t pending:1;	/* filter action is pending firmware reply */
61 	int tid;		/* tid of the filter TCB */
62 	struct l2t_entry *l2te;	/* L2 table entry for DMAC rewrite */
63 	struct smt_entry *smt;	/* SMT entry for SMAC rewrite */
64 
65 	struct t4_filter_specification fs;
66 };
67 
68 static void free_filter_resources(struct filter_entry *);
69 static int get_tcamfilter(struct adapter *, struct t4_filter *);
70 static int get_hashfilter(struct adapter *, struct t4_filter *);
71 static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t,
72     struct l2t_entry *, struct smt_entry *);
73 static int del_hashfilter(struct adapter *, struct t4_filter *);
74 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
75 
76 static inline bool
77 separate_hpfilter_region(struct adapter *sc)
78 {
79 
80 	return (chip_id(sc) >= CHELSIO_T6);
81 }
82 
83 static inline uint32_t
84 hf_hashfn_4t(struct t4_filter_specification *fs)
85 {
86 	struct t4_filter_tuple *ft = &fs->val;
87 	uint32_t hash;
88 
89 	if (fs->type) {
90 		/* IPv6 */
91 		hash = fnv_32_buf(&ft->sip[0], 16, FNV1_32_INIT);
92 		hash = fnv_32_buf(&ft->dip[0], 16, hash);
93 	} else {
94 		hash = fnv_32_buf(&ft->sip[0], 4, FNV1_32_INIT);
95 		hash = fnv_32_buf(&ft->dip[0], 4, hash);
96 	}
97 	hash = fnv_32_buf(&ft->sport, sizeof(ft->sport), hash);
98 	hash = fnv_32_buf(&ft->dport, sizeof(ft->dport), hash);
99 
100 	return (hash);
101 }
102 
103 static inline uint32_t
104 hf_hashfn_tid(int tid)
105 {
106 
107 	return (fnv_32_buf(&tid, sizeof(tid), FNV1_32_INIT));
108 }
109 
110 static int
111 alloc_hftid_hash(struct tid_info *t, int flags)
112 {
113 	int n;
114 
115 	MPASS(t->ntids > 0);
116 	MPASS(t->hftid_hash_4t == NULL);
117 	MPASS(t->hftid_hash_tid == NULL);
118 
119 	n = max(t->ntids / 1024, 16);
120 	t->hftid_hash_4t = hashinit_flags(n, M_CXGBE, &t->hftid_4t_mask, flags);
121 	if (t->hftid_hash_4t == NULL)
122 		return (ENOMEM);
123 	t->hftid_hash_tid = hashinit_flags(n, M_CXGBE, &t->hftid_tid_mask,
124 	    flags);
125 	if (t->hftid_hash_tid == NULL) {
126 		hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
127 		t->hftid_hash_4t = NULL;
128 		return (ENOMEM);
129 	}
130 
131 	mtx_init(&t->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
132 	cv_init(&t->hftid_cv, "t4hfcv");
133 
134 	return (0);
135 }
136 
137 void
138 free_hftid_hash(struct tid_info *t)
139 {
140 	struct filter_entry *f, *ftmp;
141 	LIST_HEAD(, filter_entry) *head;
142 	int i;
143 #ifdef INVARIANTS
144 	int n = 0;
145 #endif
146 
147 	if (t->tids_in_use > 0) {
148 		/* Remove everything from the tid hash. */
149 		head = t->hftid_hash_tid;
150 		for (i = 0; i <= t->hftid_tid_mask; i++) {
151 			LIST_FOREACH_SAFE(f, &head[i], link_tid, ftmp) {
152 				LIST_REMOVE(f, link_tid);
153 			}
154 		}
155 
156 		/* Remove and then free each filter in the 4t hash. */
157 		head = t->hftid_hash_4t;
158 		for (i = 0; i <= t->hftid_4t_mask; i++) {
159 			LIST_FOREACH_SAFE(f, &head[i], link_4t, ftmp) {
160 #ifdef INVARIANTS
161 				n += f->fs.type ? 2 : 1;
162 #endif
163 				LIST_REMOVE(f, link_4t);
164 				free(f, M_CXGBE);
165 			}
166 		}
167 		MPASS(t->tids_in_use == n);
168 		t->tids_in_use = 0;
169 	}
170 
171 	if (t->hftid_hash_4t) {
172 		hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
173 		t->hftid_hash_4t = NULL;
174 	}
175 	if (t->hftid_hash_tid) {
176 		hashdestroy(t->hftid_hash_tid, M_CXGBE, t->hftid_tid_mask);
177 		t->hftid_hash_tid = NULL;
178 	}
179 	if (mtx_initialized(&t->hftid_lock)) {
180 		mtx_destroy(&t->hftid_lock);
181 		cv_destroy(&t->hftid_cv);
182 	}
183 }
184 
185 static void
186 insert_hf(struct adapter *sc, struct filter_entry *f, uint32_t hash)
187 {
188 	struct tid_info *t = &sc->tids;
189 	LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
190 
191 	MPASS(head != NULL);
192 	if (hash == 0)
193 		hash = hf_hashfn_4t(&f->fs);
194 	LIST_INSERT_HEAD(&head[hash & t->hftid_4t_mask], f, link_4t);
195 	atomic_add_int(&t->tids_in_use, f->fs.type ? 2 : 1);
196 }
197 
198 static void
199 insert_hftid(struct adapter *sc, struct filter_entry *f)
200 {
201 	struct tid_info *t = &sc->tids;
202 	LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
203 	uint32_t hash;
204 
205 	MPASS(f->tid >= t->tid_base);
206 	MPASS(f->tid - t->tid_base < t->ntids);
207 	mtx_assert(&t->hftid_lock, MA_OWNED);
208 
209 	hash = hf_hashfn_tid(f->tid);
210 	LIST_INSERT_HEAD(&head[hash & t->hftid_tid_mask], f, link_tid);
211 }
212 
213 static bool
214 filter_eq(struct t4_filter_specification *fs1,
215     struct t4_filter_specification *fs2)
216 {
217 	int n;
218 
219 	MPASS(fs1->hash && fs2->hash);
220 
221 	if (fs1->type != fs2->type)
222 		return (false);
223 
224 	n = fs1->type ? 16 : 4;
225 	if (bcmp(&fs1->val.sip[0], &fs2->val.sip[0], n) ||
226 	    bcmp(&fs1->val.dip[0], &fs2->val.dip[0], n) ||
227 	    fs1->val.sport != fs2->val.sport ||
228 	    fs1->val.dport != fs2->val.dport)
229 		return (false);
230 
231 	/*
232 	 * We know the masks are the same because all hashfilters conform to the
233 	 * global tp->filter_mask and the driver has verified that already.
234 	 */
235 
236 	if ((fs1->mask.pfvf_vld || fs1->mask.ovlan_vld) &&
237 	    fs1->val.vnic != fs2->val.vnic)
238 		return (false);
239 	if (fs1->mask.vlan_vld && fs1->val.vlan != fs2->val.vlan)
240 		return (false);
241 	if (fs1->mask.macidx && fs1->val.macidx != fs2->val.macidx)
242 		return (false);
243 	if (fs1->mask.frag && fs1->val.frag != fs2->val.frag)
244 		return (false);
245 	if (fs1->mask.matchtype && fs1->val.matchtype != fs2->val.matchtype)
246 		return (false);
247 	if (fs1->mask.iport && fs1->val.iport != fs2->val.iport)
248 		return (false);
249 	if (fs1->mask.fcoe && fs1->val.fcoe != fs2->val.fcoe)
250 		return (false);
251 	if (fs1->mask.proto && fs1->val.proto != fs2->val.proto)
252 		return (false);
253 	if (fs1->mask.tos && fs1->val.tos != fs2->val.tos)
254 		return (false);
255 	if (fs1->mask.ethtype && fs1->val.ethtype != fs2->val.ethtype)
256 		return (false);
257 
258 	return (true);
259 }
260 
261 static struct filter_entry *
262 lookup_hf(struct adapter *sc, struct t4_filter_specification *fs, uint32_t hash)
263 {
264 	struct tid_info *t = &sc->tids;
265 	LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
266 	struct filter_entry *f;
267 
268 	mtx_assert(&t->hftid_lock, MA_OWNED);
269 	MPASS(head != NULL);
270 
271 	if (hash == 0)
272 		hash = hf_hashfn_4t(fs);
273 
274 	LIST_FOREACH(f, &head[hash & t->hftid_4t_mask], link_4t) {
275 		if (filter_eq(&f->fs, fs))
276 			return (f);
277 	}
278 
279 	return (NULL);
280 }
281 
282 static struct filter_entry *
283 lookup_hftid(struct adapter *sc, int tid)
284 {
285 	struct tid_info *t = &sc->tids;
286 	LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
287 	struct filter_entry *f;
288 	uint32_t hash;
289 
290 	mtx_assert(&t->hftid_lock, MA_OWNED);
291 	MPASS(head != NULL);
292 
293 	hash = hf_hashfn_tid(tid);
294 	LIST_FOREACH(f, &head[hash & t->hftid_tid_mask], link_tid) {
295 		if (f->tid == tid)
296 			return (f);
297 	}
298 
299 	return (NULL);
300 }
301 
302 static void
303 remove_hf(struct adapter *sc, struct filter_entry *f)
304 {
305 	struct tid_info *t = &sc->tids;
306 
307 	mtx_assert(&t->hftid_lock, MA_OWNED);
308 
309 	LIST_REMOVE(f, link_4t);
310 	atomic_subtract_int(&t->tids_in_use, f->fs.type ? 2 : 1);
311 }
312 
313 static void
314 remove_hftid(struct adapter *sc, struct filter_entry *f)
315 {
316 #ifdef INVARIANTS
317 	struct tid_info *t = &sc->tids;
318 
319 	mtx_assert(&t->hftid_lock, MA_OWNED);
320 #endif
321 
322 	LIST_REMOVE(f, link_tid);
323 }
324 
325 /*
326  * Input: driver's 32b filter mode.
327  * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input.
328  */
329 static uint16_t
330 mode_to_fconf(uint32_t mode)
331 {
332 	uint32_t fconf = 0;
333 
334 	if (mode & T4_FILTER_IP_FRAGMENT)
335 		fconf |= F_FRAGMENTATION;
336 
337 	if (mode & T4_FILTER_MPS_HIT_TYPE)
338 		fconf |= F_MPSHITTYPE;
339 
340 	if (mode & T4_FILTER_MAC_IDX)
341 		fconf |= F_MACMATCH;
342 
343 	if (mode & T4_FILTER_ETH_TYPE)
344 		fconf |= F_ETHERTYPE;
345 
346 	if (mode & T4_FILTER_IP_PROTO)
347 		fconf |= F_PROTOCOL;
348 
349 	if (mode & T4_FILTER_IP_TOS)
350 		fconf |= F_TOS;
351 
352 	if (mode & T4_FILTER_VLAN)
353 		fconf |= F_VLAN;
354 
355 	if (mode & T4_FILTER_VNIC)
356 		fconf |= F_VNIC_ID;
357 
358 	if (mode & T4_FILTER_PORT)
359 		fconf |= F_PORT;
360 
361 	if (mode & T4_FILTER_FCoE)
362 		fconf |= F_FCOE;
363 
364 	return (fconf);
365 }
366 
367 /*
368  * Input: driver's 32b filter mode.
369  * Returns: hardware vnic mode (ingress config) matching the input.
370  */
371 static int
372 mode_to_iconf(uint32_t mode)
373 {
374 	if ((mode & T4_FILTER_VNIC) == 0)
375 		return (-1);	/* ingress config doesn't matter. */
376 
377 	if (mode & T4_FILTER_IC_VNIC)
378 		return (FW_VNIC_MODE_PF_VF);
379 	else if (mode & T4_FILTER_IC_ENCAP)
380 		return (FW_VNIC_MODE_ENCAP_EN);
381 	else
382 		return (FW_VNIC_MODE_OUTER_VLAN);
383 }
384 
385 static int
386 check_fspec_against_fconf_iconf(struct adapter *sc,
387     struct t4_filter_specification *fs)
388 {
389 	struct tp_params *tpp = &sc->params.tp;
390 	uint32_t fconf = 0;
391 
392 	if (fs->val.frag || fs->mask.frag)
393 		fconf |= F_FRAGMENTATION;
394 
395 	if (fs->val.matchtype || fs->mask.matchtype)
396 		fconf |= F_MPSHITTYPE;
397 
398 	if (fs->val.macidx || fs->mask.macidx)
399 		fconf |= F_MACMATCH;
400 
401 	if (fs->val.ethtype || fs->mask.ethtype)
402 		fconf |= F_ETHERTYPE;
403 
404 	if (fs->val.proto || fs->mask.proto)
405 		fconf |= F_PROTOCOL;
406 
407 	if (fs->val.tos || fs->mask.tos)
408 		fconf |= F_TOS;
409 
410 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
411 		fconf |= F_VLAN;
412 
413 	if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
414 		if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
415 			return (EINVAL);
416 		fconf |= F_VNIC_ID;
417 	}
418 
419 	if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
420 		if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
421 			return (EINVAL);
422 		fconf |= F_VNIC_ID;
423 	}
424 
425 #ifdef notyet
426 	if (fs->val.encap_vld || fs->mask.encap_vld) {
427 		if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
428 			return (EINVAL);
429 		fconf |= F_VNIC_ID;
430 	}
431 #endif
432 
433 	if (fs->val.iport || fs->mask.iport)
434 		fconf |= F_PORT;
435 
436 	if (fs->val.fcoe || fs->mask.fcoe)
437 		fconf |= F_FCOE;
438 
439 	if ((tpp->filter_mode | fconf) != tpp->filter_mode)
440 		return (E2BIG);
441 
442 	return (0);
443 }
444 
445 /*
446  * Input: hardware filter configuration (filter mode/mask, ingress config).
447  * Input: driver's 32b filter mode matching the input.
448  */
449 static uint32_t
450 fconf_to_mode(uint16_t hwmode, int vnic_mode)
451 {
452 	uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
453 	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
454 
455 	if (hwmode & F_FRAGMENTATION)
456 		mode |= T4_FILTER_IP_FRAGMENT;
457 	if (hwmode & F_MPSHITTYPE)
458 		mode |= T4_FILTER_MPS_HIT_TYPE;
459 	if (hwmode & F_MACMATCH)
460 		mode |= T4_FILTER_MAC_IDX;
461 	if (hwmode & F_ETHERTYPE)
462 		mode |= T4_FILTER_ETH_TYPE;
463 	if (hwmode & F_PROTOCOL)
464 		mode |= T4_FILTER_IP_PROTO;
465 	if (hwmode & F_TOS)
466 		mode |= T4_FILTER_IP_TOS;
467 	if (hwmode & F_VLAN)
468 		mode |= T4_FILTER_VLAN;
469 	if (hwmode & F_VNIC_ID)
470 		mode |= T4_FILTER_VNIC; /* real meaning depends on vnic_mode. */
471 	if (hwmode & F_PORT)
472 		mode |= T4_FILTER_PORT;
473 	if (hwmode & F_FCOE)
474 		mode |= T4_FILTER_FCoE;
475 
476 	switch (vnic_mode) {
477 	case FW_VNIC_MODE_PF_VF:
478 		mode |= T4_FILTER_IC_VNIC;
479 		break;
480 	case FW_VNIC_MODE_ENCAP_EN:
481 		mode |= T4_FILTER_IC_ENCAP;
482 		break;
483 	case FW_VNIC_MODE_OUTER_VLAN:
484 	default:
485 		break;
486 	}
487 
488 	return (mode);
489 }
490 
491 int
492 get_filter_mode(struct adapter *sc, uint32_t *mode)
493 {
494 	struct tp_params *tp = &sc->params.tp;
495 	uint16_t filter_mode;
496 
497 	/* Filter mask must comply with the global filter mode. */
498 	MPASS((tp->filter_mode | tp->filter_mask) == tp->filter_mode);
499 
500 	/* Non-zero incoming value in mode means "hashfilter mode". */
501 	filter_mode = *mode ? tp->filter_mask : tp->filter_mode;
502 	*mode = fconf_to_mode(filter_mode, tp->vnic_mode);
503 
504 	return (0);
505 }
506 
507 int
508 set_filter_mode(struct adapter *sc, uint32_t mode)
509 {
510 	struct tp_params *tp = &sc->params.tp;
511 	int rc, iconf;
512 	uint16_t fconf;
513 
514 	iconf = mode_to_iconf(mode);
515 	fconf = mode_to_fconf(mode);
516 	if ((iconf == -1 || iconf == tp->vnic_mode) && fconf == tp->filter_mode)
517 		return (0);	/* Nothing to do */
518 
519 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setfm");
520 	if (rc)
521 		return (rc);
522 
523 	if (hw_off_limits(sc)) {
524 		rc = ENXIO;
525 		goto done;
526 	}
527 
528 	if (sc->tids.ftids_in_use > 0 ||	/* TCAM filters active */
529 	    sc->tids.hpftids_in_use > 0 ||	/* hi-pri TCAM filters active */
530 	    sc->tids.tids_in_use > 0) {		/* TOE or hashfilters active */
531 		rc = EBUSY;
532 		goto done;
533 	}
534 
535 #ifdef TCP_OFFLOAD
536 	if (uld_active(sc, ULD_TOM)) {
537 		rc = EBUSY;
538 		goto done;
539 	}
540 #endif
541 
542 	/* Note that filter mask will get clipped to the new filter mode. */
543 	rc = -t4_set_filter_cfg(sc, fconf, -1, iconf);
544 done:
545 	end_synchronized_op(sc, 0);
546 	return (rc);
547 }
548 
549 int
550 set_filter_mask(struct adapter *sc, uint32_t mode)
551 {
552 	struct tp_params *tp = &sc->params.tp;
553 	int rc, iconf;
554 	uint16_t fmask;
555 
556 	iconf = mode_to_iconf(mode);
557 	fmask = mode_to_fconf(mode);
558 	if ((iconf == -1 || iconf == tp->vnic_mode) && fmask == tp->filter_mask)
559 		return (0);	/* Nothing to do */
560 
561 	/*
562 	 * We aren't going to change the global filter mode or VNIC mode here.
563 	 * The given filter mask must conform to them.
564 	 */
565 	if ((fmask | tp->filter_mode) != tp->filter_mode)
566 		return (EINVAL);
567 	if (iconf != -1 && iconf != tp->vnic_mode)
568 		return (EINVAL);
569 
570 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sethfm");
571 	if (rc)
572 		return (rc);
573 
574 	if (hw_off_limits(sc)) {
575 		rc = ENXIO;
576 		goto done;
577 	}
578 
579 	if (sc->tids.tids_in_use > 0) {		/* TOE or hashfilters active */
580 		rc = EBUSY;
581 		goto done;
582 	}
583 
584 #ifdef TCP_OFFLOAD
585 	if (uld_active(sc, ULD_TOM)) {
586 		rc = EBUSY;
587 		goto done;
588 	}
589 #endif
590 	rc = -t4_set_filter_cfg(sc, -1, fmask, -1);
591 done:
592 	end_synchronized_op(sc, 0);
593 	return (rc);
594 }
595 
596 static inline uint64_t
597 get_filter_hits(struct adapter *sc, uint32_t tid)
598 {
599 	uint32_t tcb_addr;
600 	uint64_t hits;
601 
602 	tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
603 
604 	mtx_lock(&sc->reg_lock);
605 	if (hw_off_limits(sc))
606 		hits = 0;
607 	else if (is_t4(sc)) {
608 		uint64_t t;
609 
610 		read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&t, 8);
611 		hits = be64toh(t);
612 	} else {
613 		uint32_t t;
614 
615 		read_via_memwin(sc, 0, tcb_addr + 24, &t, 4);
616 		hits = be32toh(t);
617 	}
618 	mtx_unlock(&sc->reg_lock);
619 
620 	return (hits);
621 }
622 
623 int
624 get_filter(struct adapter *sc, struct t4_filter *t)
625 {
626 	if (t->fs.hash)
627 		return (get_hashfilter(sc, t));
628 	else
629 		return (get_tcamfilter(sc, t));
630 }
631 
632 static int
633 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
634     struct smt_entry *smt)
635 {
636 	struct filter_entry *f;
637 	struct fw_filter2_wr *fwr;
638 	u_int vnic_vld, vnic_vld_mask;
639 	struct wrq_cookie cookie;
640 	int i, rc, busy, locked;
641 	u_int tid;
642 	const int ntids = t->fs.type ? 4 : 1;
643 
644 	MPASS(!t->fs.hash);
645 	/* Already validated against fconf, iconf */
646 	MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
647 	MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
648 
649 	if (separate_hpfilter_region(sc) && t->fs.prio) {
650 		MPASS(t->idx < sc->tids.nhpftids);
651 		f = &sc->tids.hpftid_tab[t->idx];
652 		tid = sc->tids.hpftid_base + t->idx;
653 	} else {
654 		MPASS(t->idx < sc->tids.nftids);
655 		f = &sc->tids.ftid_tab[t->idx];
656 		tid = sc->tids.ftid_base + t->idx;
657 	}
658 	rc = busy = locked = 0;
659 	mtx_lock(&sc->tids.ftid_lock);
660 	for (i = 0; i < ntids; i++) {
661 		busy += f[i].pending + f[i].valid;
662 		locked += f[i].locked;
663 	}
664 	if (locked > 0)
665 		rc = EPERM;
666 	else if (busy > 0)
667 		rc = EBUSY;
668 	else {
669 		int len16;
670 
671 		if (sc->params.filter2_wr_support)
672 			len16 = howmany(sizeof(struct fw_filter2_wr), 16);
673 		else
674 			len16 = howmany(sizeof(struct fw_filter_wr), 16);
675 		fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie);
676 		if (__predict_false(fwr == NULL))
677 			rc = ENOMEM;
678 		else {
679 			f->pending = 1;
680 			if (separate_hpfilter_region(sc) && t->fs.prio)
681 				sc->tids.hpftids_in_use++;
682 			else
683 				sc->tids.ftids_in_use++;
684 		}
685 	}
686 	mtx_unlock(&sc->tids.ftid_lock);
687 	if (rc != 0)
688 		return (rc);
689 
690 	/*
691 	 * Can't fail now.  A set-filter WR will definitely be sent.
692 	 */
693 
694 	f->tid = tid;
695 	f->fs = t->fs;
696 	f->l2te = l2te;
697 	f->smt = smt;
698 
699 	if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
700 		vnic_vld = 1;
701 	else
702 		vnic_vld = 0;
703 	if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
704 		vnic_vld_mask = 1;
705 	else
706 		vnic_vld_mask = 0;
707 
708 	bzero(fwr, sizeof(*fwr));
709 	if (sc->params.filter2_wr_support)
710 		fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR));
711 	else
712 		fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
713 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
714 	fwr->tid_to_iq =
715 	    htobe32(V_FW_FILTER_WR_TID(f->tid) |
716 		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
717 		V_FW_FILTER_WR_NOREPLY(0) |
718 		V_FW_FILTER_WR_IQ(f->fs.iq));
719 	fwr->del_filter_to_l2tix =
720 	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
721 		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
722 		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
723 		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
724 		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
725 		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
726 		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
727 		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
728 		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
729 		    f->fs.newvlan == VLAN_REWRITE) |
730 		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
731 		    f->fs.newvlan == VLAN_REWRITE) |
732 		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
733 		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
734 		V_FW_FILTER_WR_PRIO(f->fs.prio) |
735 		V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
736 	fwr->ethtype = htobe16(f->fs.val.ethtype);
737 	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
738 	fwr->frag_to_ovlan_vldm =
739 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
740 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
741 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
742 		V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
743 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
744 		V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
745 	fwr->smac_sel = 0;
746 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
747 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
748 	fwr->maci_to_matchtypem =
749 	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
750 		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
751 		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
752 		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
753 		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
754 		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
755 		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
756 		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
757 	fwr->ptcl = f->fs.val.proto;
758 	fwr->ptclm = f->fs.mask.proto;
759 	fwr->ttyp = f->fs.val.tos;
760 	fwr->ttypm = f->fs.mask.tos;
761 	fwr->ivlan = htobe16(f->fs.val.vlan);
762 	fwr->ivlanm = htobe16(f->fs.mask.vlan);
763 	fwr->ovlan = htobe16(f->fs.val.vnic);
764 	fwr->ovlanm = htobe16(f->fs.mask.vnic);
765 	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
766 	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
767 	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
768 	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
769 	fwr->lp = htobe16(f->fs.val.dport);
770 	fwr->lpm = htobe16(f->fs.mask.dport);
771 	fwr->fp = htobe16(f->fs.val.sport);
772 	fwr->fpm = htobe16(f->fs.mask.sport);
773 	/* sma = 0 tells the fw to use SMAC_SEL for source MAC address */
774 	bzero(fwr->sma, sizeof (fwr->sma));
775 	if (sc->params.filter2_wr_support) {
776 		fwr->filter_type_swapmac =
777 		    V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
778 		fwr->natmode_to_ulp_type =
779 		    V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
780 			ULP_MODE_TCPDDP : ULP_MODE_NONE) |
781 		    V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) |
782 		    V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
783 		memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip));
784 		memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip));
785 		fwr->newlport = htobe16(f->fs.nat_dport);
786 		fwr->newfport = htobe16(f->fs.nat_sport);
787 		fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
788 	}
789 	commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
790 
791 	/* Wait for response. */
792 	mtx_lock(&sc->tids.ftid_lock);
793 	for (;;) {
794 		if (f->pending == 0) {
795 			rc = f->valid ? 0 : EIO;
796 			break;
797 		}
798 		if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
799 			rc = EINPROGRESS;
800 			break;
801 		}
802 	}
803 	mtx_unlock(&sc->tids.ftid_lock);
804 	return (rc);
805 }
806 
807 static int
808 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
809     uint64_t *ftuple)
810 {
811 	struct tp_params *tp = &sc->params.tp;
812 	uint16_t fmask;
813 
814 	*ftuple = fmask = 0;
815 
816 	/*
817 	 * Initialize each of the fields which we care about which are present
818 	 * in the Compressed Filter Tuple.
819 	 */
820 	if (tp->vlan_shift >= 0 && fs->mask.vlan) {
821 		*ftuple |= (uint64_t)(F_FT_VLAN_VLD | fs->val.vlan) <<
822 		    tp->vlan_shift;
823 		fmask |= F_VLAN;
824 	}
825 
826 	if (tp->port_shift >= 0 && fs->mask.iport) {
827 		*ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
828 		fmask |= F_PORT;
829 	}
830 
831 	if (tp->protocol_shift >= 0 && fs->mask.proto) {
832 		*ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
833 		fmask |= F_PROTOCOL;
834 	}
835 
836 	if (tp->tos_shift >= 0 && fs->mask.tos) {
837 		*ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
838 		fmask |= F_TOS;
839 	}
840 
841 	if (tp->vnic_shift >= 0 && fs->mask.vnic) {
842 		/* vnic_mode was already validated. */
843 		if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
844 			MPASS(fs->mask.pfvf_vld);
845 		else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
846 			MPASS(fs->mask.ovlan_vld);
847 #ifdef notyet
848 		else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
849 			MPASS(fs->mask.encap_vld);
850 #endif
851 		*ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
852 		fmask |= F_VNIC_ID;
853 	}
854 
855 	if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
856 		*ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
857 		fmask |= F_MACMATCH;
858 	}
859 
860 	if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
861 		*ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
862 		fmask |= F_ETHERTYPE;
863 	}
864 
865 	if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
866 		*ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
867 		fmask |= F_MPSHITTYPE;
868 	}
869 
870 	if (tp->frag_shift >= 0 && fs->mask.frag) {
871 		*ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
872 		fmask |= F_FRAGMENTATION;
873 	}
874 
875 	if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
876 		*ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
877 		fmask |= F_FCOE;
878 	}
879 
880 	/* A hashfilter must conform to the hardware filter mask. */
881 	if (fmask != tp->filter_mask)
882 		return (EINVAL);
883 
884 	return (0);
885 }
886 
887 static bool
888 is_4tuple_specified(struct t4_filter_specification *fs)
889 {
890 	int i;
891 	const int n = fs->type ? 16 : 4;
892 
893 	if (fs->mask.sport != 0xffff || fs->mask.dport != 0xffff)
894 		return (false);
895 
896 	for (i = 0; i < n; i++) {
897 		if (fs->mask.sip[i] != 0xff)
898 			return (false);
899 		if (fs->mask.dip[i] != 0xff)
900 			return (false);
901 	}
902 
903 	return (true);
904 }
905 
906 int
907 set_filter(struct adapter *sc, struct t4_filter *t)
908 {
909 	struct tid_info *ti = &sc->tids;
910 	struct l2t_entry *l2te = NULL;
911 	struct smt_entry *smt = NULL;
912 	uint64_t ftuple;
913 	int rc;
914 
915 	/*
916 	 * Basic filter checks first.
917 	 */
918 
919 	if (t->fs.hash) {
920 		if (!is_hashfilter(sc) || ti->ntids == 0)
921 			return (ENOTSUP);
922 		/* Hardware, not user, selects a tid for hashfilters. */
923 		if (t->idx != (uint32_t)-1)
924 			return (EINVAL);
925 		/* T5 can't count hashfilter hits. */
926 		if (is_t5(sc) && t->fs.hitcnts)
927 			return (EINVAL);
928 		if (!is_4tuple_specified(&t->fs))
929 			return (EINVAL);
930 		rc = hashfilter_ntuple(sc, &t->fs, &ftuple);
931 		if (rc != 0)
932 			return (rc);
933 	} else {
934 		if (separate_hpfilter_region(sc) && t->fs.prio) {
935 			if (ti->nhpftids == 0)
936 				return (ENOTSUP);
937 			if (t->idx >= ti->nhpftids)
938 				return (EINVAL);
939 		} else {
940 			if (ti->nftids == 0)
941 				return (ENOTSUP);
942 			if (t->idx >= ti->nftids)
943 				return (EINVAL);
944 		}
945 		/* IPv6 filter idx must be 4 aligned */
946 		if (t->fs.type == 1 &&
947 		    ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
948 			return (EINVAL);
949 	}
950 
951 	/* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */
952 	if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
953 	    (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE ||
954 	    t->fs.swapmac || t->fs.nat_mode))
955 		return (ENOTSUP);
956 
957 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
958 		return (EINVAL);
959 	if (t->fs.val.iport >= sc->params.nports)
960 		return (EINVAL);
961 
962 	/* Can't specify an iqid/rss_info if not steering. */
963 	if (!t->fs.dirsteer && !t->fs.dirsteerhash && !t->fs.maskhash && t->fs.iq)
964 		return (EINVAL);
965 
966 	/* Validate against the global filter mode and ingress config */
967 	rc = check_fspec_against_fconf_iconf(sc, &t->fs);
968 	if (rc != 0)
969 		return (rc);
970 
971 	/*
972 	 * Basic checks passed.  Make sure the queues and tid tables are setup.
973 	 */
974 
975 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
976 	if (rc)
977 		return (rc);
978 
979 	if (hw_off_limits(sc)) {
980 		rc = ENXIO;
981 		goto done;
982 	}
983 
984 	if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0))
985 		goto done;
986 
987 	if (t->fs.hash) {
988 		if (__predict_false(ti->hftid_hash_4t == NULL)) {
989 			rc = alloc_hftid_hash(&sc->tids, HASH_NOWAIT);
990 			if (rc != 0)
991 				goto done;
992 		}
993 	} else if (separate_hpfilter_region(sc) && t->fs.prio &&
994 	    __predict_false(ti->hpftid_tab == NULL)) {
995 		MPASS(ti->nhpftids != 0);
996 		KASSERT(ti->hpftids_in_use == 0,
997 		    ("%s: no memory allocated but hpftids_in_use is %u",
998 		    __func__, ti->hpftids_in_use));
999 		ti->hpftid_tab = malloc(sizeof(struct filter_entry) *
1000 		    ti->nhpftids, M_CXGBE, M_NOWAIT | M_ZERO);
1001 		if (ti->hpftid_tab == NULL) {
1002 			rc = ENOMEM;
1003 			goto done;
1004 		}
1005 		if (!mtx_initialized(&sc->tids.ftid_lock)) {
1006 			mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
1007 			cv_init(&ti->ftid_cv, "t4fcv");
1008 		}
1009 	} else if (__predict_false(ti->ftid_tab == NULL)) {
1010 		MPASS(ti->nftids != 0);
1011 		KASSERT(ti->ftids_in_use == 0,
1012 		    ("%s: no memory allocated but ftids_in_use is %u",
1013 		    __func__, ti->ftids_in_use));
1014 		ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
1015 		    M_CXGBE, M_NOWAIT | M_ZERO);
1016 		if (ti->ftid_tab == NULL) {
1017 			rc = ENOMEM;
1018 			goto done;
1019 		}
1020 		if (!mtx_initialized(&sc->tids.ftid_lock)) {
1021 			mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
1022 			cv_init(&ti->ftid_cv, "t4fcv");
1023 		}
1024 	}
1025 done:
1026 	end_synchronized_op(sc, 0);
1027 	if (rc != 0)
1028 		return (rc);
1029 
1030 	/*
1031 	 * Allocate L2T entry, SMT entry, etc.
1032 	 */
1033 
1034 	if (t->fs.newdmac || t->fs.newvlan) {
1035 		/* This filter needs an L2T entry; allocate one. */
1036 		l2te = t4_l2t_alloc_switching(sc, t->fs.vlan, t->fs.eport,
1037 		    t->fs.dmac);
1038 		if (__predict_false(l2te == NULL)) {
1039 			rc = EAGAIN;
1040 			goto error;
1041 		}
1042 	}
1043 
1044 	if (t->fs.newsmac) {
1045 		/* This filter needs an SMT entry; allocate one. */
1046 		smt = t4_smt_alloc_switching(sc->smt, t->fs.smac);
1047 		if (__predict_false(smt == NULL)) {
1048 			rc = EAGAIN;
1049 			goto error;
1050 		}
1051 		rc = t4_smt_set_switching(sc, smt, 0x0, t->fs.smac);
1052 		if (rc)
1053 			goto error;
1054 	}
1055 
1056 	if (t->fs.hash)
1057 		rc = set_hashfilter(sc, t, ftuple, l2te, smt);
1058 	else
1059 		rc = set_tcamfilter(sc, t, l2te, smt);
1060 
1061 	if (rc != 0 && rc != EINPROGRESS) {
1062 error:
1063 		if (l2te)
1064 			t4_l2t_release(l2te);
1065 		if (smt)
1066 			t4_smt_release(smt);
1067 	}
1068 	return (rc);
1069 }
1070 
1071 static int
1072 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
1073 {
1074 	struct filter_entry *f;
1075 	struct fw_filter_wr *fwr;
1076 	struct wrq_cookie cookie;
1077 	int rc, nfilters;
1078 #ifdef INVARIANTS
1079 	u_int tid_base;
1080 #endif
1081 
1082 	mtx_lock(&sc->tids.ftid_lock);
1083 	if (separate_hpfilter_region(sc) && t->fs.prio) {
1084 		nfilters = sc->tids.nhpftids;
1085 		f = sc->tids.hpftid_tab;
1086 #ifdef INVARIANTS
1087 		tid_base = sc->tids.hpftid_base;
1088 #endif
1089 	} else {
1090 		nfilters = sc->tids.nftids;
1091 		f = sc->tids.ftid_tab;
1092 #ifdef INVARIANTS
1093 		tid_base = sc->tids.ftid_base;
1094 #endif
1095 	}
1096 	MPASS(f != NULL);	/* Caller checked this. */
1097 	if (t->idx >= nfilters) {
1098 		rc = EINVAL;
1099 		goto done;
1100 	}
1101 	f += t->idx;
1102 
1103 	if (f->locked) {
1104 		rc = EPERM;
1105 		goto done;
1106 	}
1107 	if (f->pending) {
1108 		rc = EBUSY;
1109 		goto done;
1110 	}
1111 	if (f->valid == 0) {
1112 		rc = EINVAL;
1113 		goto done;
1114 	}
1115 	MPASS(f->tid == tid_base + t->idx);
1116 	fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie);
1117 	if (fwr == NULL) {
1118 		rc = ENOMEM;
1119 		goto done;
1120 	}
1121 
1122 	bzero(fwr, sizeof (*fwr));
1123 	t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
1124 	f->pending = 1;
1125 	commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
1126 	t->fs = f->fs;	/* extra info for the caller */
1127 
1128 	for (;;) {
1129 		if (f->pending == 0) {
1130 			rc = f->valid ? EIO : 0;
1131 			break;
1132 		}
1133 		if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
1134 			rc = EINPROGRESS;
1135 			break;
1136 		}
1137 	}
1138 done:
1139 	mtx_unlock(&sc->tids.ftid_lock);
1140 	return (rc);
1141 }
1142 
1143 int
1144 del_filter(struct adapter *sc, struct t4_filter *t)
1145 {
1146 
1147 	/* No filters possible if not initialized yet. */
1148 	if (!(sc->flags & FULL_INIT_DONE))
1149 		return (EINVAL);
1150 
1151 	/*
1152 	 * The checks for tid tables ensure that the locks that del_* will reach
1153 	 * for are initialized.
1154 	 */
1155 	if (t->fs.hash) {
1156 		if (sc->tids.hftid_hash_4t != NULL)
1157 			return (del_hashfilter(sc, t));
1158 	} else if (separate_hpfilter_region(sc) && t->fs.prio) {
1159 		if (sc->tids.hpftid_tab != NULL)
1160 			return (del_tcamfilter(sc, t));
1161 	} else {
1162 		if (sc->tids.ftid_tab != NULL)
1163 			return (del_tcamfilter(sc, t));
1164 	}
1165 
1166 	return (EINVAL);
1167 }
1168 
1169 /*
1170  * Release secondary resources associated with the filter.
1171  */
1172 static void
1173 free_filter_resources(struct filter_entry *f)
1174 {
1175 
1176 	if (f->l2te) {
1177 		t4_l2t_release(f->l2te);
1178 		f->l2te = NULL;
1179 	}
1180 	if (f->smt) {
1181 		t4_smt_release(f->smt);
1182 		f->smt = NULL;
1183 	}
1184 }
1185 
1186 static int
1187 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
1188     uint64_t val, int no_reply)
1189 {
1190 	struct wrq_cookie cookie;
1191 	struct cpl_set_tcb_field *req;
1192 
1193 	req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie);
1194 	if (req == NULL)
1195 		return (ENOMEM);
1196 	bzero(req, sizeof(*req));
1197 	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
1198 	if (no_reply == 0) {
1199 		req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
1200 		    V_NO_REPLY(0));
1201 	} else
1202 		req->reply_ctrl = htobe16(V_NO_REPLY(1));
1203 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
1204 	req->mask = htobe64(mask);
1205 	req->val = htobe64(val);
1206 	commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie);
1207 
1208 	return (0);
1209 }
1210 
1211 /* Set one of the t_flags bits in the TCB. */
1212 static inline int
1213 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val,
1214     u_int no_reply)
1215 {
1216 
1217 	return (set_tcb_field(sc, tid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
1218 	    (uint64_t)val << bit_pos, no_reply));
1219 }
1220 
1221 int
1222 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1223 {
1224 	struct adapter *sc = iq->adapter;
1225 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1226 	u_int tid = GET_TID(rpl);
1227 	u_int rc, idx;
1228 	struct filter_entry *f;
1229 
1230 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
1231 	    rss->opcode));
1232 
1233 
1234 	if (is_hpftid(sc, tid)) {
1235 		idx = tid - sc->tids.hpftid_base;
1236 		f = &sc->tids.hpftid_tab[idx];
1237 	} else if (is_ftid(sc, tid)) {
1238 		idx = tid - sc->tids.ftid_base;
1239 		f = &sc->tids.ftid_tab[idx];
1240 	} else
1241 		panic("%s: FW reply for invalid TID %d.", __func__, tid);
1242 
1243 	MPASS(f->tid == tid);
1244 	rc = G_COOKIE(rpl->cookie);
1245 
1246 	mtx_lock(&sc->tids.ftid_lock);
1247 	KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
1248 	    __func__, rc, tid));
1249 	switch(rc) {
1250 	case FW_FILTER_WR_FLT_ADDED:
1251 		/* set-filter succeeded */
1252 		f->valid = 1;
1253 		if (f->fs.newsmac) {
1254 			MPASS(f->smt != NULL);
1255 			set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1256 			set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1257 			    V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1258 			    V_TCB_SMAC_SEL(f->smt->idx), 1);
1259 			/* XXX: wait for reply to TCB update before !pending */
1260 		}
1261 		break;
1262 	case FW_FILTER_WR_FLT_DELETED:
1263 		/* del-filter succeeded */
1264 		MPASS(f->valid == 1);
1265 		f->valid = 0;
1266 		/* Fall through */
1267 	case FW_FILTER_WR_SMT_TBL_FULL:
1268 		/* set-filter failed due to lack of SMT space. */
1269 		MPASS(f->valid == 0);
1270 		free_filter_resources(f);
1271 		if (separate_hpfilter_region(sc) && f->fs.prio)
1272 			sc->tids.hpftids_in_use--;
1273 		else
1274 			sc->tids.ftids_in_use--;
1275 		break;
1276 	case FW_FILTER_WR_SUCCESS:
1277 	case FW_FILTER_WR_EINVAL:
1278 	default:
1279 		panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
1280 		    idx);
1281 	}
1282 	f->pending = 0;
1283 	cv_broadcast(&sc->tids.ftid_cv);
1284 	mtx_unlock(&sc->tids.ftid_lock);
1285 
1286 	return (0);
1287 }
1288 
1289 /*
1290  * This is the reply to the Active Open that created the filter.  Additional TCB
1291  * updates may be required to complete the filter configuration.
1292  */
1293 int
1294 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
1295     struct mbuf *m)
1296 {
1297 	struct adapter *sc = iq->adapter;
1298 	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
1299 	u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
1300 	u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
1301 	struct filter_entry *f = lookup_atid(sc, atid);
1302 
1303 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1304 
1305 	mtx_lock(&sc->tids.hftid_lock);
1306 	KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
1307 	KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
1308 	    __func__, f, f->tid));
1309 	if (status == CPL_ERR_NONE) {
1310 		f->tid = GET_TID(cpl);
1311 		MPASS(lookup_hftid(sc, f->tid) == NULL);
1312 		insert_hftid(sc, f);
1313 		/*
1314 		 * Leave the filter pending until it is fully set up, which will
1315 		 * be indicated by the reply to the last TCB update.  No need to
1316 		 * unblock the ioctl thread either.
1317 		 */
1318 		if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
1319 			goto done;
1320 		f->valid = 1;
1321 		f->pending = 0;
1322 	} else {
1323 		/* provide errno instead of tid to ioctl */
1324 		f->tid = act_open_rpl_status_to_errno(status);
1325 		f->valid = 0;
1326 		f->pending = 0;
1327 		if (act_open_has_tid(status))
1328 			release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]);
1329 		free_filter_resources(f);
1330 		remove_hf(sc, f);
1331 		if (f->locked == 0)
1332 			free(f, M_CXGBE);
1333 	}
1334 	cv_broadcast(&sc->tids.hftid_cv);
1335 done:
1336 	mtx_unlock(&sc->tids.hftid_lock);
1337 
1338 	free_atid(sc, atid);
1339 	return (0);
1340 }
1341 
1342 int
1343 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
1344     struct mbuf *m)
1345 {
1346 	struct adapter *sc = iq->adapter;
1347 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1348 	u_int tid = GET_TID(rpl);
1349 	struct filter_entry *f;
1350 
1351 	mtx_lock(&sc->tids.hftid_lock);
1352 	f = lookup_hftid(sc, tid);
1353 	KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1354 	KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1355 	    f, tid));
1356 	KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
1357 	    __func__, f, tid));
1358 	f->pending = 0;
1359 	if (rpl->status == 0) {
1360 		f->valid = 1;
1361 	} else {
1362 		f->tid = EIO;
1363 		f->valid = 0;
1364 		free_filter_resources(f);
1365 		remove_hftid(sc, f);
1366 		remove_hf(sc, f);
1367 		release_tid(sc, tid, &sc->sge.ctrlq[0]);
1368 		if (f->locked == 0)
1369 			free(f, M_CXGBE);
1370 	}
1371 	cv_broadcast(&sc->tids.hftid_cv);
1372 	mtx_unlock(&sc->tids.hftid_lock);
1373 
1374 	return (0);
1375 }
1376 
1377 int
1378 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
1379     struct mbuf *m)
1380 {
1381 	struct adapter *sc = iq->adapter;
1382 	const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1383 	unsigned int tid = GET_TID(cpl);
1384 	struct filter_entry *f;
1385 
1386 	mtx_lock(&sc->tids.hftid_lock);
1387 	f = lookup_hftid(sc, tid);
1388 	KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1389 	KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1390 	    f, tid));
1391 	KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
1392 	    tid));
1393 	f->pending = 0;
1394 	if (cpl->status == 0) {
1395 		f->valid = 0;
1396 		free_filter_resources(f);
1397 		remove_hftid(sc, f);
1398 		remove_hf(sc, f);
1399 		release_tid(sc, tid, &sc->sge.ctrlq[0]);
1400 		if (f->locked == 0)
1401 			free(f, M_CXGBE);
1402 	}
1403 	cv_broadcast(&sc->tids.hftid_cv);
1404 	mtx_unlock(&sc->tids.hftid_lock);
1405 
1406 	return (0);
1407 }
1408 
1409 static int
1410 get_tcamfilter(struct adapter *sc, struct t4_filter *t)
1411 {
1412 	int i, nfilters;
1413 	struct filter_entry *f;
1414 	u_int in_use;
1415 #ifdef INVARIANTS
1416 	u_int tid_base;
1417 #endif
1418 
1419 	MPASS(!t->fs.hash);
1420 
1421 	if (separate_hpfilter_region(sc) && t->fs.prio) {
1422 		nfilters = sc->tids.nhpftids;
1423 		f = sc->tids.hpftid_tab;
1424 		in_use = sc->tids.hpftids_in_use;
1425 #ifdef INVARIANTS
1426 		tid_base = sc->tids.hpftid_base;
1427 #endif
1428 	} else {
1429 		nfilters = sc->tids.nftids;
1430 		f = sc->tids.ftid_tab;
1431 		in_use = sc->tids.ftids_in_use;
1432 #ifdef INVARIANTS
1433 		tid_base = sc->tids.ftid_base;
1434 #endif
1435 	}
1436 
1437 	if (in_use == 0 || f == NULL || t->idx >= nfilters) {
1438 		t->idx = 0xffffffff;
1439 		return (0);
1440 	}
1441 
1442 	f += t->idx;
1443 	mtx_lock(&sc->tids.ftid_lock);
1444 	for (i = t->idx; i < nfilters; i++, f++) {
1445 		if (f->valid) {
1446 			MPASS(f->tid == tid_base + i);
1447 			t->idx = i;
1448 			t->l2tidx = f->l2te ? f->l2te->idx : 0;
1449 			t->smtidx = f->smt ? f->smt->idx : 0;
1450 			if (f->fs.hitcnts)
1451 				t->hits = get_filter_hits(sc, f->tid);
1452 			else
1453 				t->hits = UINT64_MAX;
1454 			t->fs = f->fs;
1455 
1456 			goto done;
1457 		}
1458 	}
1459 	t->idx = 0xffffffff;
1460 done:
1461 	mtx_unlock(&sc->tids.ftid_lock);
1462 	return (0);
1463 }
1464 
1465 static int
1466 get_hashfilter(struct adapter *sc, struct t4_filter *t)
1467 {
1468 	struct tid_info *ti = &sc->tids;
1469 	int tid;
1470 	struct filter_entry *f;
1471 	const int inv_tid = ti->ntids + ti->tid_base;
1472 
1473 	MPASS(t->fs.hash);
1474 
1475 	if (ti->tids_in_use == 0 || ti->hftid_hash_tid == NULL ||
1476 	    t->idx >= inv_tid) {
1477 		t->idx = 0xffffffff;
1478 		return (0);
1479 	}
1480 	if (t->idx < ti->tid_base)
1481 		t->idx = ti->tid_base;
1482 
1483 	mtx_lock(&ti->hftid_lock);
1484 	for (tid = t->idx; tid < inv_tid; tid++) {
1485 		f = lookup_hftid(sc, tid);
1486 		if (f != NULL && f->valid) {
1487 			t->idx = tid;
1488 			t->l2tidx = f->l2te ? f->l2te->idx : 0;
1489 			t->smtidx = f->smt ? f->smt->idx : 0;
1490 			if (f->fs.hitcnts)
1491 				t->hits = get_filter_hits(sc, tid);
1492 			else
1493 				t->hits = UINT64_MAX;
1494 			t->fs = f->fs;
1495 
1496 			goto done;
1497 		}
1498 	}
1499 	t->idx = 0xffffffff;
1500 done:
1501 	mtx_unlock(&ti->hftid_lock);
1502 	return (0);
1503 }
1504 
1505 static void
1506 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
1507     uint64_t ftuple, struct cpl_act_open_req6 *cpl)
1508 {
1509 	struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
1510 	struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
1511 
1512 	/* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1513 	MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1514 	MPASS(atid >= 0);
1515 
1516 	if (chip_id(sc) == CHELSIO_T5) {
1517 		INIT_TP_WR(cpl5, 0);
1518 	} else {
1519 		INIT_TP_WR(cpl6, 0);
1520 		cpl6->rsvd2 = 0;
1521 		cpl6->opt3 = 0;
1522 	}
1523 
1524 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1525 	    V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1526 	    V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1527 	cpl->local_port = htobe16(f->fs.val.dport);
1528 	cpl->peer_port = htobe16(f->fs.val.sport);
1529 	cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1530 	cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1531 	cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1532 	cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1533 	cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1534 	    f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1535 	    V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1536 	    V_NO_CONG(f->fs.rpttid) |
1537 	    V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1538 	    F_TCAM_BYPASS | F_NON_OFFLOAD);
1539 
1540 	cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1541 	cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1542 	    V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1543 	    V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1544 	    F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1545 	    V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1546 	    V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1547 }
1548 
1549 static void
1550 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1551     uint64_t ftuple, struct cpl_act_open_req *cpl)
1552 {
1553 	struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1554 	struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1555 
1556 	/* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1557 	MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1558 	MPASS(atid >= 0);
1559 
1560 	if (chip_id(sc) == CHELSIO_T5) {
1561 		INIT_TP_WR(cpl5, 0);
1562 	} else {
1563 		INIT_TP_WR(cpl6, 0);
1564 		cpl6->rsvd2 = 0;
1565 		cpl6->opt3 = 0;
1566 	}
1567 
1568 	OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1569 	    V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1570 	    V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1571 	cpl->local_port = htobe16(f->fs.val.dport);
1572 	cpl->peer_port = htobe16(f->fs.val.sport);
1573 	cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1574 	    f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1575 	cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1576 		f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1577 	cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1578 	    f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1579 	    V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1580 	    V_NO_CONG(f->fs.rpttid) |
1581 	    V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1582 	    F_TCAM_BYPASS | F_NON_OFFLOAD);
1583 
1584 	cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1585 	cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1586 	    V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1587 	    V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1588 	    F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1589 	    V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1590 	    V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1591 }
1592 
1593 static int
1594 act_open_cpl_len16(struct adapter *sc, int isipv6)
1595 {
1596 	int idx;
1597 	static const int sz_table[3][2] = {
1598 		{
1599 			howmany(sizeof (struct cpl_act_open_req), 16),
1600 			howmany(sizeof (struct cpl_act_open_req6), 16)
1601 		},
1602 		{
1603 			howmany(sizeof (struct cpl_t5_act_open_req), 16),
1604 			howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1605 		},
1606 		{
1607 			howmany(sizeof (struct cpl_t6_act_open_req), 16),
1608 			howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1609 		},
1610 	};
1611 
1612 	MPASS(chip_id(sc) >= CHELSIO_T4);
1613 	idx = min(chip_id(sc) - CHELSIO_T4, 2);
1614 
1615 	return (sz_table[idx][!!isipv6]);
1616 }
1617 
1618 static int
1619 set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
1620     struct l2t_entry *l2te, struct smt_entry *smt)
1621 {
1622 	void *wr;
1623 	struct wrq_cookie cookie;
1624 	struct filter_entry *f;
1625 	int rc, atid = -1;
1626 	uint32_t hash;
1627 
1628 	MPASS(t->fs.hash);
1629 	/* Already validated against fconf, iconf */
1630 	MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1631 	MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1632 
1633 	hash = hf_hashfn_4t(&t->fs);
1634 
1635 	mtx_lock(&sc->tids.hftid_lock);
1636 	if (lookup_hf(sc, &t->fs, hash) != NULL) {
1637 		rc = EEXIST;
1638 		goto done;
1639 	}
1640 
1641 	f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1642 	if (__predict_false(f == NULL)) {
1643 		rc = ENOMEM;
1644 		goto done;
1645 	}
1646 	f->fs = t->fs;
1647 	f->l2te = l2te;
1648 	f->smt = smt;
1649 
1650 	atid = alloc_atid(sc, f);
1651 	if (__predict_false(atid) == -1) {
1652 		free(f, M_CXGBE);
1653 		rc = EAGAIN;
1654 		goto done;
1655 	}
1656 	MPASS(atid >= 0);
1657 
1658 	wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type),
1659 	    &cookie);
1660 	if (wr == NULL) {
1661 		free_atid(sc, atid);
1662 		free(f, M_CXGBE);
1663 		rc = ENOMEM;
1664 		goto done;
1665 	}
1666 	if (f->fs.type)
1667 		mk_act_open_req6(sc, f, atid, ftuple, wr);
1668 	else
1669 		mk_act_open_req(sc, f, atid, ftuple, wr);
1670 
1671 	f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1672 	f->pending = 1;
1673 	f->tid = -1;
1674 	insert_hf(sc, f, hash);
1675 	commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1676 
1677 	for (;;) {
1678 		MPASS(f->locked);
1679 		if (f->pending == 0) {
1680 			if (f->valid) {
1681 				rc = 0;
1682 				f->locked = 0;
1683 				t->idx = f->tid;
1684 			} else {
1685 				rc = f->tid;
1686 				free(f, M_CXGBE);
1687 			}
1688 			break;
1689 		}
1690 		if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1691 			f->locked = 0;
1692 			rc = EINPROGRESS;
1693 			break;
1694 		}
1695 	}
1696 done:
1697 	mtx_unlock(&sc->tids.hftid_lock);
1698 	return (rc);
1699 }
1700 
1701 /* SET_TCB_FIELD sent as a ULP command looks like this */
1702 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
1703     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
1704 
1705 static void *
1706 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
1707 		uint64_t val, uint32_t tid, uint32_t qid)
1708 {
1709 	struct ulptx_idata *ulpsc;
1710 	struct cpl_set_tcb_field_core *req;
1711 
1712 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1713 	ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
1714 
1715 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1716 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1717 	ulpsc->len = htobe32(sizeof(*req));
1718 
1719 	req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
1720 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1721 	req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
1722 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
1723 	req->mask = htobe64(mask);
1724 	req->val = htobe64(val);
1725 
1726 	ulpsc = (struct ulptx_idata *)(req + 1);
1727 	if (LEN__SET_TCB_FIELD_ULP % 16) {
1728 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1729 		ulpsc->len = htobe32(0);
1730 		return (ulpsc + 1);
1731 	}
1732 	return (ulpsc);
1733 }
1734 
1735 /* ABORT_REQ sent as a ULP command looks like this */
1736 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1737 	sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1738 
1739 static void *
1740 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1741 {
1742 	struct ulptx_idata *ulpsc;
1743 	struct cpl_abort_req_core *req;
1744 
1745 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1746 	ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1747 
1748 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1749 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1750 	ulpsc->len = htobe32(sizeof(*req));
1751 
1752 	req = (struct cpl_abort_req_core *)(ulpsc + 1);
1753 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1754 	req->rsvd0 = htonl(0);
1755 	req->rsvd1 = 0;
1756 	req->cmd = CPL_ABORT_NO_RST;
1757 
1758 	ulpsc = (struct ulptx_idata *)(req + 1);
1759 	if (LEN__ABORT_REQ_ULP % 16) {
1760 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1761 		ulpsc->len = htobe32(0);
1762 		return (ulpsc + 1);
1763 	}
1764 	return (ulpsc);
1765 }
1766 
1767 /* ABORT_RPL sent as a ULP command looks like this */
1768 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1769 	sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1770 
1771 static void *
1772 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1773 {
1774 	struct ulptx_idata *ulpsc;
1775 	struct cpl_abort_rpl_core *rpl;
1776 
1777 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1778 	ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1779 
1780 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1781 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1782 	ulpsc->len = htobe32(sizeof(*rpl));
1783 
1784 	rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1785 	OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1786 	rpl->rsvd0 = htonl(0);
1787 	rpl->rsvd1 = 0;
1788 	rpl->cmd = CPL_ABORT_NO_RST;
1789 
1790 	ulpsc = (struct ulptx_idata *)(rpl + 1);
1791 	if (LEN__ABORT_RPL_ULP % 16) {
1792 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1793 		ulpsc->len = htobe32(0);
1794 		return (ulpsc + 1);
1795 	}
1796 	return (ulpsc);
1797 }
1798 
1799 static inline int
1800 del_hashfilter_wrlen(void)
1801 {
1802 
1803 	return (sizeof(struct work_request_hdr) +
1804 	    roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1805 	    roundup2(LEN__ABORT_REQ_ULP, 16) +
1806 	    roundup2(LEN__ABORT_RPL_ULP, 16));
1807 }
1808 
1809 static void
1810 mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
1811 {
1812 	struct ulp_txpkt *ulpmc;
1813 
1814 	INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1815 	ulpmc = (struct ulp_txpkt *)(wrh + 1);
1816 	ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
1817 	    V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
1818 	ulpmc = mk_abort_req_ulp(ulpmc, tid);
1819 	ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
1820 }
1821 
1822 static int
1823 del_hashfilter(struct adapter *sc, struct t4_filter *t)
1824 {
1825 	struct tid_info *ti = &sc->tids;
1826 	void *wr;
1827 	struct filter_entry *f;
1828 	struct wrq_cookie cookie;
1829 	int rc;
1830 	const int wrlen = del_hashfilter_wrlen();
1831 	const int inv_tid = ti->ntids + ti->tid_base;
1832 
1833 	MPASS(sc->tids.hftid_hash_4t != NULL);
1834 	MPASS(sc->tids.ntids > 0);
1835 
1836 	if (t->idx < sc->tids.tid_base || t->idx >= inv_tid)
1837 		return (EINVAL);
1838 
1839 	mtx_lock(&ti->hftid_lock);
1840 	f = lookup_hftid(sc, t->idx);
1841 	if (f == NULL || f->valid == 0) {
1842 		rc = EINVAL;
1843 		goto done;
1844 	}
1845 	MPASS(f->tid == t->idx);
1846 	if (f->locked) {
1847 		rc = EPERM;
1848 		goto done;
1849 	}
1850 	if (f->pending) {
1851 		rc = EBUSY;
1852 		goto done;
1853 	}
1854 	wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie);
1855 	if (wr == NULL) {
1856 		rc = ENOMEM;
1857 		goto done;
1858 	}
1859 
1860 	mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
1861 	f->locked = 1;
1862 	f->pending = 1;
1863 	commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1864 	t->fs = f->fs;	/* extra info for the caller */
1865 
1866 	for (;;) {
1867 		MPASS(f->locked);
1868 		if (f->pending == 0) {
1869 			if (f->valid) {
1870 				f->locked = 0;
1871 				rc = EIO;
1872 			} else {
1873 				rc = 0;
1874 				free(f, M_CXGBE);
1875 			}
1876 			break;
1877 		}
1878 		if (cv_wait_sig(&ti->hftid_cv, &ti->hftid_lock) != 0) {
1879 			f->locked = 0;
1880 			rc = EINPROGRESS;
1881 			break;
1882 		}
1883 	}
1884 done:
1885 	mtx_unlock(&ti->hftid_lock);
1886 	return (rc);
1887 }
1888 
1889 #define WORD_MASK       0xffffffff
1890 static void
1891 set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip,
1892     const bool sip, const bool dp, const bool sp)
1893 {
1894 
1895 	if (dip) {
1896 		if (f->fs.type) {
1897 			set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK,
1898 			    f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 |
1899 			    f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1);
1900 
1901 			set_tcb_field(sc, f->tid,
1902 			    W_TCB_SND_UNA_RAW + 1, WORD_MASK,
1903 			    f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 |
1904 			    f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1);
1905 
1906 			set_tcb_field(sc, f->tid,
1907 			    W_TCB_SND_UNA_RAW + 2, WORD_MASK,
1908 			    f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 |
1909 			    f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1);
1910 
1911 			set_tcb_field(sc, f->tid,
1912 			    W_TCB_SND_UNA_RAW + 3, WORD_MASK,
1913 			    f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1914 			    f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1915 		} else {
1916 			set_tcb_field(sc, f->tid,
1917 			    W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK,
1918 			    f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
1919 			    f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
1920 		}
1921 	}
1922 
1923 	if (sip) {
1924 		if (f->fs.type) {
1925 			set_tcb_field(sc, f->tid,
1926 			    W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK,
1927 			    f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 |
1928 			    f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1);
1929 
1930 			set_tcb_field(sc, f->tid,
1931 			    W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK,
1932 			    f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 |
1933 			    f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1);
1934 
1935 			set_tcb_field(sc, f->tid,
1936 			    W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK,
1937 			    f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 |
1938 			    f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1);
1939 
1940 			set_tcb_field(sc, f->tid,
1941 			    W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK,
1942 			    f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1943 			    f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1944 
1945 		} else {
1946 			set_tcb_field(sc, f->tid,
1947 			    W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK,
1948 			    f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
1949 			    f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
1950 		}
1951 	}
1952 
1953 	set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK,
1954 	    (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1);
1955 }
1956 
1957 /*
1958  * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
1959  * last of the series of updates requested a reply.  The reply informs the
1960  * driver that the filter is fully setup.
1961  */
1962 static int
1963 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
1964 {
1965 	int updated = 0;
1966 
1967 	MPASS(f->tid < sc->tids.ntids);
1968 	MPASS(f->fs.hash);
1969 	MPASS(f->pending);
1970 	MPASS(f->valid == 0);
1971 
1972 	if (f->fs.newdmac) {
1973 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1);
1974 		updated++;
1975 	}
1976 
1977 	if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
1978 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1);
1979 		updated++;
1980 	}
1981 
1982 	if (f->fs.newsmac) {
1983 		MPASS(f->smt != NULL);
1984 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1985 		set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1986 		    V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smt->idx),
1987 		    1);
1988 		updated++;
1989 	}
1990 
1991 	switch(f->fs.nat_mode) {
1992 	case NAT_MODE_NONE:
1993 		break;
1994 	case NAT_MODE_DIP:
1995 		set_nat_params(sc, f, true, false, false, false);
1996 		updated++;
1997 		break;
1998 	case NAT_MODE_DIP_DP:
1999 		set_nat_params(sc, f, true, false, true, false);
2000 		updated++;
2001 		break;
2002 	case NAT_MODE_DIP_DP_SIP:
2003 		set_nat_params(sc, f, true, true, true, false);
2004 		updated++;
2005 		break;
2006 	case NAT_MODE_DIP_DP_SP:
2007 		set_nat_params(sc, f, true, false, true, true);
2008 		updated++;
2009 		break;
2010 	case NAT_MODE_SIP_SP:
2011 		set_nat_params(sc, f, false, true, false, true);
2012 		updated++;
2013 		break;
2014 	case NAT_MODE_DIP_SIP_SP:
2015 		set_nat_params(sc, f, true, true, false, true);
2016 		updated++;
2017 		break;
2018 	case NAT_MODE_ALL:
2019 		set_nat_params(sc, f, true, true, true, true);
2020 		updated++;
2021 		break;
2022 	default:
2023 		MPASS(0);	/* should have been validated earlier */
2024 		break;
2025 
2026 	}
2027 
2028 	if (f->fs.nat_seq_chk) {
2029 		set_tcb_field(sc, f->tid, W_TCB_RCV_NXT,
2030 		    V_TCB_RCV_NXT(M_TCB_RCV_NXT),
2031 		    V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1);
2032 		updated++;
2033 	}
2034 
2035 	if (is_t5(sc) && f->fs.action == FILTER_DROP) {
2036 		/*
2037 		 * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop.
2038 		 */
2039 		set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) |
2040 		    V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1);
2041 		updated++;
2042 	}
2043 
2044 	/*
2045 	 * Enable switching after all secondary resources (L2T entry, SMT entry,
2046 	 * etc.) are setup so that any switched packet will use correct
2047 	 * values.
2048 	 */
2049 	if (f->fs.action == FILTER_SWITCH) {
2050 		set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1);
2051 		updated++;
2052 	}
2053 
2054 	if (f->fs.hitcnts || updated > 0) {
2055 		set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
2056 		    V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
2057 		    V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
2058 		    V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
2059 		return (EINPROGRESS);
2060 	}
2061 
2062 	return (0);
2063 }
2064