xref: /freebsd/sys/dev/cxgbe/t4_netmap.c (revision 325151a3)
1 /*-
2  * Copyright (c) 2014 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #ifdef DEV_NETMAP
35 #include <sys/param.h>
36 #include <sys/eventhandler.h>
37 #include <sys/lock.h>
38 #include <sys/types.h>
39 #include <sys/mbuf.h>
40 #include <sys/selinfo.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <machine/bus.h>
44 #include <net/ethernet.h>
45 #include <net/if.h>
46 #include <net/if_media.h>
47 #include <net/if_var.h>
48 #include <net/if_clone.h>
49 #include <net/if_types.h>
50 #include <net/netmap.h>
51 #include <dev/netmap/netmap_kern.h>
52 
53 #include "common/common.h"
54 #include "common/t4_regs.h"
55 #include "common/t4_regs_values.h"
56 
57 extern int fl_pad;	/* XXXNM */
58 extern int spg_len;	/* XXXNM */
59 extern int fl_pktshift;	/* XXXNM */
60 
61 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe netmap parameters");
62 
63 /*
64  * 0 = normal netmap rx
65  * 1 = black hole
66  * 2 = supermassive black hole (buffer packing enabled)
67  */
68 int black_hole = 0;
69 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RDTUN, &black_hole, 0,
70     "Sink incoming packets.");
71 
72 int rx_ndesc = 256;
73 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN,
74     &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated.");
75 
76 int holdoff_tmr_idx = 2;
77 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN,
78     &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues.");
79 
80 /*
81  * Congestion drops.
82  * -1: no congestion feedback (not recommended).
83  *  0: backpressure the channel instead of dropping packets right away.
84  *  1: no backpressure, drop packets for the congested queue immediately.
85  */
86 static int nm_cong_drop = 1;
87 TUNABLE_INT("hw.cxgbe.nm_cong_drop", &nm_cong_drop);
88 
89 /* netmap ifnet routines */
90 static void cxgbe_nm_init(void *);
91 static int cxgbe_nm_ioctl(struct ifnet *, unsigned long, caddr_t);
92 static int cxgbe_nm_transmit(struct ifnet *, struct mbuf *);
93 static void cxgbe_nm_qflush(struct ifnet *);
94 
95 static int cxgbe_nm_init_synchronized(struct port_info *);
96 static int cxgbe_nm_uninit_synchronized(struct port_info *);
97 
98 static void
99 cxgbe_nm_init(void *arg)
100 {
101 	struct port_info *pi = arg;
102 	struct adapter *sc = pi->adapter;
103 
104 	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nminit") != 0)
105 		return;
106 	cxgbe_nm_init_synchronized(pi);
107 	end_synchronized_op(sc, 0);
108 
109 	return;
110 }
111 
112 static int
113 cxgbe_nm_init_synchronized(struct port_info *pi)
114 {
115 	struct adapter *sc = pi->adapter;
116 	struct ifnet *ifp = pi->nm_ifp;
117 	int rc = 0;
118 
119 	ASSERT_SYNCHRONIZED_OP(sc);
120 
121 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
122 		return (0);	/* already running */
123 
124 	if (!(sc->flags & FULL_INIT_DONE) &&
125 	    ((rc = adapter_full_init(sc)) != 0))
126 		return (rc);	/* error message displayed already */
127 
128 	if (!(pi->flags & PORT_INIT_DONE) &&
129 	    ((rc = port_full_init(pi)) != 0))
130 		return (rc);	/* error message displayed already */
131 
132 	rc = update_mac_settings(ifp, XGMAC_ALL);
133 	if (rc)
134 		return (rc);	/* error message displayed already */
135 
136 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
137 
138 	return (rc);
139 }
140 
141 static int
142 cxgbe_nm_uninit_synchronized(struct port_info *pi)
143 {
144 #ifdef INVARIANTS
145 	struct adapter *sc = pi->adapter;
146 #endif
147 	struct ifnet *ifp = pi->nm_ifp;
148 
149 	ASSERT_SYNCHRONIZED_OP(sc);
150 
151 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
152 
153 	return (0);
154 }
155 
156 static int
157 cxgbe_nm_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
158 {
159 	int rc = 0, mtu, flags;
160 	struct port_info *pi = ifp->if_softc;
161 	struct adapter *sc = pi->adapter;
162 	struct ifreq *ifr = (struct ifreq *)data;
163 	uint32_t mask;
164 
165 	MPASS(pi->nm_ifp == ifp);
166 
167 	switch (cmd) {
168 	case SIOCSIFMTU:
169 		mtu = ifr->ifr_mtu;
170 		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
171 			return (EINVAL);
172 
173 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nmtu");
174 		if (rc)
175 			return (rc);
176 		ifp->if_mtu = mtu;
177 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
178 			rc = update_mac_settings(ifp, XGMAC_MTU);
179 		end_synchronized_op(sc, 0);
180 		break;
181 
182 	case SIOCSIFFLAGS:
183 		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nflg");
184 		if (rc)
185 			return (rc);
186 
187 		if (ifp->if_flags & IFF_UP) {
188 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
189 				flags = pi->nmif_flags;
190 				if ((ifp->if_flags ^ flags) &
191 				    (IFF_PROMISC | IFF_ALLMULTI)) {
192 					rc = update_mac_settings(ifp,
193 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
194 				}
195 			} else
196 				rc = cxgbe_nm_init_synchronized(pi);
197 			pi->nmif_flags = ifp->if_flags;
198 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
199 			rc = cxgbe_nm_uninit_synchronized(pi);
200 		end_synchronized_op(sc, 0);
201 		break;
202 
203 	case SIOCADDMULTI:
204 	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
205 		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4nmulti");
206 		if (rc)
207 			return (rc);
208 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
209 			rc = update_mac_settings(ifp, XGMAC_MCADDRS);
210 		end_synchronized_op(sc, LOCK_HELD);
211 		break;
212 
213 	case SIOCSIFCAP:
214 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
215 		if (mask & IFCAP_TXCSUM) {
216 			ifp->if_capenable ^= IFCAP_TXCSUM;
217 			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
218 		}
219 		if (mask & IFCAP_TXCSUM_IPV6) {
220 			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
221 			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
222 		}
223 		if (mask & IFCAP_RXCSUM)
224 			ifp->if_capenable ^= IFCAP_RXCSUM;
225 		if (mask & IFCAP_RXCSUM_IPV6)
226 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
227 		break;
228 
229 	case SIOCSIFMEDIA:
230 	case SIOCGIFMEDIA:
231 		ifmedia_ioctl(ifp, ifr, &pi->nm_media, cmd);
232 		break;
233 
234 	default:
235 		rc = ether_ioctl(ifp, cmd, data);
236 	}
237 
238 	return (rc);
239 }
240 
241 static int
242 cxgbe_nm_transmit(struct ifnet *ifp, struct mbuf *m)
243 {
244 
245 	m_freem(m);
246 	return (0);
247 }
248 
249 static void
250 cxgbe_nm_qflush(struct ifnet *ifp)
251 {
252 
253 	return;
254 }
255 
256 static int
257 alloc_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int cong)
258 {
259 	int rc, cntxt_id, i;
260 	__be32 v;
261 	struct adapter *sc = pi->adapter;
262 	struct netmap_adapter *na = NA(pi->nm_ifp);
263 	struct fw_iq_cmd c;
264 
265 	MPASS(na != NULL);
266 	MPASS(nm_rxq->iq_desc != NULL);
267 	MPASS(nm_rxq->fl_desc != NULL);
268 
269 	bzero(nm_rxq->iq_desc, pi->qsize_rxq * IQ_ESIZE);
270 	bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + spg_len);
271 
272 	bzero(&c, sizeof(c));
273 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
274 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
275 	    V_FW_IQ_CMD_VFN(0));
276 	c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
277 	    FW_LEN16(c));
278 	if (pi->flags & INTR_NM_RXQ) {
279 		KASSERT(nm_rxq->intr_idx < sc->intr_count,
280 		    ("%s: invalid direct intr_idx %d", __func__,
281 		    nm_rxq->intr_idx));
282 		v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
283 	} else {
284 		CXGBE_UNIMPLEMENTED(__func__);	/* XXXNM: needs review */
285 		v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx) |
286 		    F_FW_IQ_CMD_IQANDST;
287 	}
288 	c.type_to_iqandstindex = htobe32(v |
289 	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
290 	    V_FW_IQ_CMD_VIID(pi->nm_viid) |
291 	    V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
292 	c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
293 	    F_FW_IQ_CMD_IQGTSMODE |
294 	    V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
295 	    V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
296 	c.iqsize = htobe16(pi->qsize_rxq);
297 	c.iqaddr = htobe64(nm_rxq->iq_ba);
298 	if (cong >= 0) {
299 		c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN |
300 		    V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF |
301 		    F_FW_IQ_CMD_FL0CONGEN);
302 	}
303 	c.iqns_to_fl0congen |=
304 	    htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
305 		F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
306 		(fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
307 		(black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0));
308 	c.fl0dcaen_to_fl0cidxfthresh =
309 	    htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_128B) |
310 		V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
311 	c.fl0size = htobe16(na->num_rx_desc / 8 + spg_len / EQ_ESIZE);
312 	c.fl0addr = htobe64(nm_rxq->fl_ba);
313 
314 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
315 	if (rc != 0) {
316 		device_printf(sc->dev,
317 		    "failed to create netmap ingress queue: %d\n", rc);
318 		return (rc);
319 	}
320 
321 	nm_rxq->iq_cidx = 0;
322 	MPASS(nm_rxq->iq_sidx == pi->qsize_rxq - spg_len / IQ_ESIZE);
323 	nm_rxq->iq_gen = F_RSPD_GEN;
324 	nm_rxq->iq_cntxt_id = be16toh(c.iqid);
325 	nm_rxq->iq_abs_id = be16toh(c.physiqid);
326 	cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start;
327 	if (cntxt_id >= sc->sge.niq) {
328 		panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)",
329 		    __func__, cntxt_id, sc->sge.niq - 1);
330 	}
331 	sc->sge.iqmap[cntxt_id] = (void *)nm_rxq;
332 
333 	nm_rxq->fl_cntxt_id = be16toh(c.fl0id);
334 	nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
335 	MPASS(nm_rxq->fl_sidx == na->num_rx_desc);
336 	cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start;
337 	if (cntxt_id >= sc->sge.neq) {
338 		panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)",
339 		    __func__, cntxt_id, sc->sge.neq - 1);
340 	}
341 	sc->sge.eqmap[cntxt_id] = (void *)nm_rxq;
342 
343 	nm_rxq->fl_db_val = F_DBPRIO | V_QID(nm_rxq->fl_cntxt_id) | V_PIDX(0);
344 	if (is_t5(sc))
345 		nm_rxq->fl_db_val |= F_DBTYPE;
346 
347 	if (is_t5(sc) && cong >= 0) {
348 		uint32_t param, val;
349 
350 		param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
351 		    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
352 		    V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id);
353 		param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
354 		    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
355 		    V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id);
356 		if (cong == 0)
357 			val = 1 << 19;
358 		else {
359 			val = 2 << 19;
360 			for (i = 0; i < 4; i++) {
361 				if (cong & (1 << i))
362 					val |= 1 << (i << 2);
363 			}
364 		}
365 
366 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
367 		if (rc != 0) {
368 			/* report error but carry on */
369 			device_printf(sc->dev,
370 			    "failed to set congestion manager context for "
371 			    "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc);
372 		}
373 	}
374 
375 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
376 	    V_INGRESSQID(nm_rxq->iq_cntxt_id) |
377 	    V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
378 
379 	return (rc);
380 }
381 
382 static int
383 free_nm_rxq_hwq(struct port_info *pi, struct sge_nm_rxq *nm_rxq)
384 {
385 	struct adapter *sc = pi->adapter;
386 	int rc;
387 
388 	rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
389 	    nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff);
390 	if (rc != 0)
391 		device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n",
392 		    __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc);
393 	return (rc);
394 }
395 
396 static int
397 alloc_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq)
398 {
399 	int rc, cntxt_id;
400 	size_t len;
401 	struct adapter *sc = pi->adapter;
402 	struct netmap_adapter *na = NA(pi->nm_ifp);
403 	struct fw_eq_eth_cmd c;
404 
405 	MPASS(na != NULL);
406 	MPASS(nm_txq->desc != NULL);
407 
408 	len = na->num_tx_desc * EQ_ESIZE + spg_len;
409 	bzero(nm_txq->desc, len);
410 
411 	bzero(&c, sizeof(c));
412 	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
413 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
414 	    V_FW_EQ_ETH_CMD_VFN(0));
415 	c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
416 	    F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
417 	c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
418 	    F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(pi->nm_viid));
419 	c.fetchszm_to_iqid =
420 	    htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
421 		V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
422 		V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
423 	c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
424 		      V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
425 		      V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE));
426 	c.eqaddr = htobe64(nm_txq->ba);
427 
428 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
429 	if (rc != 0) {
430 		device_printf(pi->dev,
431 		    "failed to create netmap egress queue: %d\n", rc);
432 		return (rc);
433 	}
434 
435 	nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
436 	cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start;
437 	if (cntxt_id >= sc->sge.neq)
438 	    panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__,
439 		cntxt_id, sc->sge.neq - 1);
440 	sc->sge.eqmap[cntxt_id] = (void *)nm_txq;
441 
442 	nm_txq->pidx = nm_txq->cidx = 0;
443 	MPASS(nm_txq->sidx == na->num_tx_desc);
444 	nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0;
445 
446 	nm_txq->doorbells = sc->doorbells;
447 	if (isset(&nm_txq->doorbells, DOORBELL_UDB) ||
448 	    isset(&nm_txq->doorbells, DOORBELL_UDBWC) ||
449 	    isset(&nm_txq->doorbells, DOORBELL_WCWR)) {
450 		uint32_t s_qpp = sc->sge.eq_s_qpp;
451 		uint32_t mask = (1 << s_qpp) - 1;
452 		volatile uint8_t *udb;
453 
454 		udb = sc->udbs_base + UDBS_DB_OFFSET;
455 		udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT;
456 		nm_txq->udb_qid = nm_txq->cntxt_id & mask;
457 		if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
458 	    		clrbit(&nm_txq->doorbells, DOORBELL_WCWR);
459 		else {
460 			udb += nm_txq->udb_qid << UDBS_SEG_SHIFT;
461 			nm_txq->udb_qid = 0;
462 		}
463 		nm_txq->udb = (volatile void *)udb;
464 	}
465 
466 	return (rc);
467 }
468 
469 static int
470 free_nm_txq_hwq(struct port_info *pi, struct sge_nm_txq *nm_txq)
471 {
472 	struct adapter *sc = pi->adapter;
473 	int rc;
474 
475 	rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
476 	if (rc != 0)
477 		device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__,
478 		    nm_txq->cntxt_id, rc);
479 	return (rc);
480 }
481 
482 static int
483 cxgbe_netmap_on(struct adapter *sc, struct port_info *pi, struct ifnet *ifp,
484     struct netmap_adapter *na)
485 {
486 	struct netmap_slot *slot;
487 	struct sge_nm_rxq *nm_rxq;
488 	struct sge_nm_txq *nm_txq;
489 	int rc, i, j, hwidx;
490 	struct hw_buf_info *hwb;
491 	uint16_t *rss;
492 
493 	ASSERT_SYNCHRONIZED_OP(sc);
494 
495 	if ((pi->flags & PORT_INIT_DONE) == 0 ||
496 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
497 		return (EAGAIN);
498 
499 	hwb = &sc->sge.hw_buf_info[0];
500 	for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) {
501 		if (hwb->size == NETMAP_BUF_SIZE(na))
502 			break;
503 	}
504 	if (i >= SGE_FLBUF_SIZES) {
505 		if_printf(ifp, "no hwidx for netmap buffer size %d.\n",
506 		    NETMAP_BUF_SIZE(na));
507 		return (ENXIO);
508 	}
509 	hwidx = i;
510 
511 	/* Must set caps before calling netmap_reset */
512 	nm_set_native_flags(na);
513 
514 	for_each_nm_rxq(pi, i, nm_rxq) {
515 		alloc_nm_rxq_hwq(pi, nm_rxq, tnl_cong(pi, nm_cong_drop));
516 		nm_rxq->fl_hwidx = hwidx;
517 		slot = netmap_reset(na, NR_RX, i, 0);
518 		MPASS(slot != NULL);	/* XXXNM: error check, not assert */
519 
520 		/* We deal with 8 bufs at a time */
521 		MPASS((na->num_rx_desc & 7) == 0);
522 		MPASS(na->num_rx_desc == nm_rxq->fl_sidx);
523 		for (j = 0; j < nm_rxq->fl_sidx; j++) {
524 			uint64_t ba;
525 
526 			PNMB(na, &slot[j], &ba);
527 			MPASS(ba != 0);
528 			nm_rxq->fl_desc[j] = htobe64(ba | hwidx);
529 		}
530 		j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8;
531 		MPASS((j & 7) == 0);
532 		j /= 8;	/* driver pidx to hardware pidx */
533 		wmb();
534 		t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
535 		    nm_rxq->fl_db_val | V_PIDX(j));
536 	}
537 
538 	for_each_nm_txq(pi, i, nm_txq) {
539 		alloc_nm_txq_hwq(pi, nm_txq);
540 		slot = netmap_reset(na, NR_TX, i, 0);
541 		MPASS(slot != NULL);	/* XXXNM: error check, not assert */
542 	}
543 
544 	rss = malloc(pi->nm_rss_size * sizeof (*rss), M_CXGBE, M_ZERO |
545 	    M_WAITOK);
546 	for (i = 0; i < pi->nm_rss_size;) {
547 		for_each_nm_rxq(pi, j, nm_rxq) {
548 			rss[i++] = nm_rxq->iq_abs_id;
549 			if (i == pi->nm_rss_size)
550 				break;
551 		}
552 	}
553 	rc = -t4_config_rss_range(sc, sc->mbox, pi->nm_viid, 0, pi->nm_rss_size,
554 	    rss, pi->nm_rss_size);
555 	if (rc != 0)
556 		if_printf(ifp, "netmap rss_config failed: %d\n", rc);
557 	free(rss, M_CXGBE);
558 
559 	rc = -t4_enable_vi(sc, sc->mbox, pi->nm_viid, true, true);
560 	if (rc != 0)
561 		if_printf(ifp, "netmap enable_vi failed: %d\n", rc);
562 
563 	return (rc);
564 }
565 
566 static int
567 cxgbe_netmap_off(struct adapter *sc, struct port_info *pi, struct ifnet *ifp,
568     struct netmap_adapter *na)
569 {
570 	int rc, i;
571 	struct sge_nm_txq *nm_txq;
572 	struct sge_nm_rxq *nm_rxq;
573 
574 	ASSERT_SYNCHRONIZED_OP(sc);
575 
576 	rc = -t4_enable_vi(sc, sc->mbox, pi->nm_viid, false, false);
577 	if (rc != 0)
578 		if_printf(ifp, "netmap disable_vi failed: %d\n", rc);
579 	nm_clear_native_flags(na);
580 
581 	for_each_nm_txq(pi, i, nm_txq) {
582 		struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
583 
584 		/* Wait for hw pidx to catch up ... */
585 		while (be16toh(nm_txq->pidx) != spg->pidx)
586 			pause("nmpidx", 1);
587 
588 		/* ... and then for the cidx. */
589 		while (spg->pidx != spg->cidx)
590 			pause("nmcidx", 1);
591 
592 		free_nm_txq_hwq(pi, nm_txq);
593 	}
594 	for_each_nm_rxq(pi, i, nm_rxq) {
595 		free_nm_rxq_hwq(pi, nm_rxq);
596 	}
597 
598 	return (rc);
599 }
600 
601 static int
602 cxgbe_netmap_reg(struct netmap_adapter *na, int on)
603 {
604 	struct ifnet *ifp = na->ifp;
605 	struct port_info *pi = ifp->if_softc;
606 	struct adapter *sc = pi->adapter;
607 	int rc;
608 
609 	rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4nmreg");
610 	if (rc != 0)
611 		return (rc);
612 	if (on)
613 		rc = cxgbe_netmap_on(sc, pi, ifp, na);
614 	else
615 		rc = cxgbe_netmap_off(sc, pi, ifp, na);
616 	end_synchronized_op(sc, 0);
617 
618 	return (rc);
619 }
620 
621 /* How many packets can a single type1 WR carry in n descriptors */
622 static inline int
623 ndesc_to_npkt(const int n)
624 {
625 
626 	MPASS(n > 0 && n <= SGE_MAX_WR_NDESC);
627 
628 	return (n * 2 - 1);
629 }
630 #define MAX_NPKT_IN_TYPE1_WR	(ndesc_to_npkt(SGE_MAX_WR_NDESC))
631 
632 /* Space (in descriptors) needed for a type1 WR that carries n packets */
633 static inline int
634 npkt_to_ndesc(const int n)
635 {
636 
637 	MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
638 
639 	return ((n + 2) / 2);
640 }
641 
642 /* Space (in 16B units) needed for a type1 WR that carries n packets */
643 static inline int
644 npkt_to_len16(const int n)
645 {
646 
647 	MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
648 
649 	return (n * 2 + 1);
650 }
651 
652 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx)
653 
654 static void
655 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq)
656 {
657 	int n;
658 	u_int db = nm_txq->doorbells;
659 
660 	MPASS(nm_txq->pidx != nm_txq->dbidx);
661 
662 	n = NMIDXDIFF(nm_txq, dbidx);
663 	if (n > 1)
664 		clrbit(&db, DOORBELL_WCWR);
665 	wmb();
666 
667 	switch (ffs(db) - 1) {
668 	case DOORBELL_UDB:
669 		*nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
670 		break;
671 
672 	case DOORBELL_WCWR: {
673 		volatile uint64_t *dst, *src;
674 
675 		/*
676 		 * Queues whose 128B doorbell segment fits in the page do not
677 		 * use relative qid (udb_qid is always 0).  Only queues with
678 		 * doorbell segments can do WCWR.
679 		 */
680 		KASSERT(nm_txq->udb_qid == 0 && n == 1,
681 		    ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p",
682 		    __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq));
683 
684 		dst = (volatile void *)((uintptr_t)nm_txq->udb +
685 		    UDBS_WR_OFFSET - UDBS_DB_OFFSET);
686 		src = (void *)&nm_txq->desc[nm_txq->dbidx];
687 		while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1])
688 			*dst++ = *src++;
689 		wmb();
690 		break;
691 	}
692 
693 	case DOORBELL_UDBWC:
694 		*nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
695 		wmb();
696 		break;
697 
698 	case DOORBELL_KDB:
699 		t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
700 		    V_QID(nm_txq->cntxt_id) | V_PIDX(n));
701 		break;
702 	}
703 	nm_txq->dbidx = nm_txq->pidx;
704 }
705 
706 int lazy_tx_credit_flush = 1;
707 
708 /*
709  * Write work requests to send 'npkt' frames and ring the doorbell to send them
710  * on their way.  No need to check for wraparound.
711  */
712 static void
713 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq,
714     struct netmap_kring *kring, int npkt, int npkt_remaining, int txcsum)
715 {
716 	struct netmap_ring *ring = kring->ring;
717 	struct netmap_slot *slot;
718 	const u_int lim = kring->nkr_num_slots - 1;
719 	struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx];
720 	uint16_t len;
721 	uint64_t ba;
722 	struct cpl_tx_pkt_core *cpl;
723 	struct ulptx_sgl *usgl;
724 	int i, n;
725 
726 	while (npkt) {
727 		n = min(npkt, MAX_NPKT_IN_TYPE1_WR);
728 		len = 0;
729 
730 		wr = (void *)&nm_txq->desc[nm_txq->pidx];
731 		wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
732 		wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n)));
733 		wr->npkt = n;
734 		wr->r3 = 0;
735 		wr->type = 1;
736 		cpl = (void *)(wr + 1);
737 
738 		for (i = 0; i < n; i++) {
739 			slot = &ring->slot[kring->nr_hwcur];
740 			PNMB(kring->na, slot, &ba);
741 			MPASS(ba != 0);
742 
743 			cpl->ctrl0 = nm_txq->cpl_ctrl0;
744 			cpl->pack = 0;
745 			cpl->len = htobe16(slot->len);
746 			/*
747 			 * netmap(4) says "netmap does not use features such as
748 			 * checksum offloading, TCP segmentation offloading,
749 			 * encryption, VLAN encapsulation/decapsulation, etc."
750 			 *
751 			 * So the ncxl interfaces have tx hardware checksumming
752 			 * disabled by default.  But you can override netmap by
753 			 * enabling IFCAP_TXCSUM on the interface manully.
754 			 */
755 			cpl->ctrl1 = txcsum ? 0 :
756 			    htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS);
757 
758 			usgl = (void *)(cpl + 1);
759 			usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
760 			    V_ULPTX_NSGE(1));
761 			usgl->len0 = htobe32(slot->len);
762 			usgl->addr0 = htobe64(ba);
763 
764 			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
765 			cpl = (void *)(usgl + 1);
766 			MPASS(slot->len + len <= UINT16_MAX);
767 			len += slot->len;
768 			kring->nr_hwcur = nm_next(kring->nr_hwcur, lim);
769 		}
770 		wr->plen = htobe16(len);
771 
772 		npkt -= n;
773 		nm_txq->pidx += npkt_to_ndesc(n);
774 		MPASS(nm_txq->pidx <= nm_txq->sidx);
775 		if (__predict_false(nm_txq->pidx == nm_txq->sidx)) {
776 			/*
777 			 * This routine doesn't know how to write WRs that wrap
778 			 * around.  Make sure it wasn't asked to.
779 			 */
780 			MPASS(npkt == 0);
781 			nm_txq->pidx = 0;
782 		}
783 
784 		if (npkt == 0 && npkt_remaining == 0) {
785 			/* All done. */
786 			if (lazy_tx_credit_flush == 0) {
787 				wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
788 				    F_FW_WR_EQUIQ);
789 				nm_txq->equeqidx = nm_txq->pidx;
790 				nm_txq->equiqidx = nm_txq->pidx;
791 			}
792 			ring_nm_txq_db(sc, nm_txq);
793 			return;
794 		}
795 
796 		if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) {
797 			wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
798 			    F_FW_WR_EQUIQ);
799 			nm_txq->equeqidx = nm_txq->pidx;
800 			nm_txq->equiqidx = nm_txq->pidx;
801 		} else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) {
802 			wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
803 			nm_txq->equeqidx = nm_txq->pidx;
804 		}
805 		if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC)
806 			ring_nm_txq_db(sc, nm_txq);
807 	}
808 
809 	/* Will get called again. */
810 	MPASS(npkt_remaining);
811 }
812 
813 /* How many contiguous free descriptors starting at pidx */
814 static inline int
815 contiguous_ndesc_available(struct sge_nm_txq *nm_txq)
816 {
817 
818 	if (nm_txq->cidx > nm_txq->pidx)
819 		return (nm_txq->cidx - nm_txq->pidx - 1);
820 	else if (nm_txq->cidx > 0)
821 		return (nm_txq->sidx - nm_txq->pidx);
822 	else
823 		return (nm_txq->sidx - nm_txq->pidx - 1);
824 }
825 
826 static int
827 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq)
828 {
829 	struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
830 	uint16_t hw_cidx = spg->cidx;	/* snapshot */
831 	struct fw_eth_tx_pkts_wr *wr;
832 	int n = 0;
833 
834 	hw_cidx = be16toh(hw_cidx);
835 
836 	while (nm_txq->cidx != hw_cidx) {
837 		wr = (void *)&nm_txq->desc[nm_txq->cidx];
838 
839 		MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)));
840 		MPASS(wr->type == 1);
841 		MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR);
842 
843 		n += wr->npkt;
844 		nm_txq->cidx += npkt_to_ndesc(wr->npkt);
845 
846 		/*
847 		 * We never sent a WR that wrapped around so the credits coming
848 		 * back, WR by WR, should never cause the cidx to wrap around
849 		 * either.
850 		 */
851 		MPASS(nm_txq->cidx <= nm_txq->sidx);
852 		if (__predict_false(nm_txq->cidx == nm_txq->sidx))
853 			nm_txq->cidx = 0;
854 	}
855 
856 	return (n);
857 }
858 
859 static int
860 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags)
861 {
862 	struct netmap_adapter *na = kring->na;
863 	struct ifnet *ifp = na->ifp;
864 	struct port_info *pi = ifp->if_softc;
865 	struct adapter *sc = pi->adapter;
866 	struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[pi->first_nm_txq + kring->ring_id];
867 	const u_int head = kring->rhead;
868 	u_int reclaimed = 0;
869 	int n, d, npkt_remaining, ndesc_remaining, txcsum;
870 
871 	/*
872 	 * Tx was at kring->nr_hwcur last time around and now we need to advance
873 	 * to kring->rhead.  Note that the driver's pidx moves independent of
874 	 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation
875 	 * between descriptors and frames isn't 1:1).
876 	 */
877 
878 	npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
879 	    kring->nkr_num_slots - kring->nr_hwcur + head;
880 	txcsum = ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
881 	while (npkt_remaining) {
882 		reclaimed += reclaim_nm_tx_desc(nm_txq);
883 		ndesc_remaining = contiguous_ndesc_available(nm_txq);
884 		/* Can't run out of descriptors with packets still remaining */
885 		MPASS(ndesc_remaining > 0);
886 
887 		/* # of desc needed to tx all remaining packets */
888 		d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC;
889 		if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR)
890 			d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR);
891 
892 		if (d <= ndesc_remaining)
893 			n = npkt_remaining;
894 		else {
895 			/* Can't send all, calculate how many can be sent */
896 			n = (ndesc_remaining / SGE_MAX_WR_NDESC) *
897 			    MAX_NPKT_IN_TYPE1_WR;
898 			if (ndesc_remaining % SGE_MAX_WR_NDESC)
899 				n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC);
900 		}
901 
902 		/* Send n packets and update nm_txq->pidx and kring->nr_hwcur */
903 		npkt_remaining -= n;
904 		cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining, txcsum);
905 	}
906 	MPASS(npkt_remaining == 0);
907 	MPASS(kring->nr_hwcur == head);
908 	MPASS(nm_txq->dbidx == nm_txq->pidx);
909 
910 	/*
911 	 * Second part: reclaim buffers for completed transmissions.
912 	 */
913 	if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
914 		reclaimed += reclaim_nm_tx_desc(nm_txq);
915 		kring->nr_hwtail += reclaimed;
916 		if (kring->nr_hwtail >= kring->nkr_num_slots)
917 			kring->nr_hwtail -= kring->nkr_num_slots;
918 	}
919 
920 	return (0);
921 }
922 
923 static int
924 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
925 {
926 	struct netmap_adapter *na = kring->na;
927 	struct netmap_ring *ring = kring->ring;
928 	struct ifnet *ifp = na->ifp;
929 	struct port_info *pi = ifp->if_softc;
930 	struct adapter *sc = pi->adapter;
931 	struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[pi->first_nm_rxq + kring->ring_id];
932 	u_int const head = kring->rhead;
933 	u_int n;
934 	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
935 
936 	if (black_hole)
937 		return (0);	/* No updates ever. */
938 
939 	if (netmap_no_pendintr || force_update) {
940 		kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx);
941 		kring->nr_kflags &= ~NKR_PENDINTR;
942 	}
943 
944 	/* Userspace done with buffers from kring->nr_hwcur to head */
945 	n = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
946 	    kring->nkr_num_slots - kring->nr_hwcur + head;
947 	n &= ~7U;
948 	if (n > 0) {
949 		u_int fl_pidx = nm_rxq->fl_pidx;
950 		struct netmap_slot *slot = &ring->slot[fl_pidx];
951 		uint64_t ba;
952 		int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx;
953 
954 		/*
955 		 * We always deal with 8 buffers at a time.  We must have
956 		 * stopped at an 8B boundary (fl_pidx) last time around and we
957 		 * must have a multiple of 8B buffers to give to the freelist.
958 		 */
959 		MPASS((fl_pidx & 7) == 0);
960 		MPASS((n & 7) == 0);
961 
962 		IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots);
963 		IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx);
964 
965 		while (n > 0) {
966 			for (i = 0; i < 8; i++, fl_pidx++, slot++) {
967 				PNMB(na, slot, &ba);
968 				MPASS(ba != 0);
969 				nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx);
970 				slot->flags &= ~NS_BUF_CHANGED;
971 				MPASS(fl_pidx <= nm_rxq->fl_sidx);
972 			}
973 			n -= 8;
974 			if (fl_pidx == nm_rxq->fl_sidx) {
975 				fl_pidx = 0;
976 				slot = &ring->slot[0];
977 			}
978 			if (++dbinc == 8 && n >= 32) {
979 				wmb();
980 				t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
981 				    nm_rxq->fl_db_val | V_PIDX(dbinc));
982 				dbinc = 0;
983 			}
984 		}
985 		MPASS(nm_rxq->fl_pidx == fl_pidx);
986 
987 		if (dbinc > 0) {
988 			wmb();
989 			t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
990 			    nm_rxq->fl_db_val | V_PIDX(dbinc));
991 		}
992 	}
993 
994 	return (0);
995 }
996 
997 /*
998  * Create an ifnet solely for netmap use and register it with the kernel.
999  */
1000 int
1001 create_netmap_ifnet(struct port_info *pi)
1002 {
1003 	struct adapter *sc = pi->adapter;
1004 	struct netmap_adapter na;
1005 	struct ifnet *ifp;
1006 	device_t dev = pi->dev;
1007 	uint8_t mac[ETHER_ADDR_LEN];
1008 	int rc;
1009 
1010 	if (pi->nnmtxq <= 0 || pi->nnmrxq <= 0)
1011 		return (0);
1012 	MPASS(pi->nm_ifp == NULL);
1013 
1014 	/*
1015 	 * Allocate a virtual interface exclusively for netmap use.  Give it the
1016 	 * MAC address normally reserved for use by a TOE interface.  (The TOE
1017 	 * driver on FreeBSD doesn't use it).
1018 	 */
1019 	rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, &mac[0],
1020 	    &pi->nm_rss_size, FW_VI_FUNC_OFLD, 0);
1021 	if (rc < 0) {
1022 		device_printf(dev, "unable to allocate netmap virtual "
1023 		    "interface for port %d: %d\n", pi->port_id, -rc);
1024 		return (-rc);
1025 	}
1026 	pi->nm_viid = rc;
1027 	pi->nm_xact_addr_filt = -1;
1028 
1029 	ifp = if_alloc(IFT_ETHER);
1030 	if (ifp == NULL) {
1031 		device_printf(dev, "Cannot allocate netmap ifnet\n");
1032 		return (ENOMEM);
1033 	}
1034 	pi->nm_ifp = ifp;
1035 	ifp->if_softc = pi;
1036 
1037 	if_initname(ifp, is_t4(pi->adapter) ? "ncxgbe" : "ncxl",
1038 	    device_get_unit(dev));
1039 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1040 
1041 	ifp->if_init = cxgbe_nm_init;
1042 	ifp->if_ioctl = cxgbe_nm_ioctl;
1043 	ifp->if_transmit = cxgbe_nm_transmit;
1044 	ifp->if_qflush = cxgbe_nm_qflush;
1045 
1046 	/*
1047 	 * netmap(4) says "netmap does not use features such as checksum
1048 	 * offloading, TCP segmentation offloading, encryption, VLAN
1049 	 * encapsulation/decapsulation, etc."
1050 	 *
1051 	 * By default we comply with the statement above.  But we do declare the
1052 	 * ifnet capable of L3/L4 checksumming so that a user can override
1053 	 * netmap and have the hardware do the L3/L4 checksums.
1054 	 */
1055 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU |
1056 	    IFCAP_HWCSUM_IPV6;
1057 	ifp->if_capenable = 0;
1058 	ifp->if_hwassist = 0;
1059 
1060 	/* nm_media has already been setup by the caller */
1061 
1062 	ether_ifattach(ifp, mac);
1063 
1064 	/*
1065 	 * Register with netmap in the kernel.
1066 	 */
1067 	bzero(&na, sizeof(na));
1068 
1069 	na.ifp = pi->nm_ifp;
1070 	na.na_flags = NAF_BDG_MAYSLEEP;
1071 
1072 	/* Netmap doesn't know about the space reserved for the status page. */
1073 	na.num_tx_desc = pi->qsize_txq - spg_len / EQ_ESIZE;
1074 
1075 	/*
1076 	 * The freelist's cidx/pidx drives netmap's rx cidx/pidx.  So
1077 	 * num_rx_desc is based on the number of buffers that can be held in the
1078 	 * freelist, and not the number of entries in the iq.  (These two are
1079 	 * not exactly the same due to the space taken up by the status page).
1080 	 */
1081 	na.num_rx_desc = (pi->qsize_rxq / 8) * 8;
1082 	na.nm_txsync = cxgbe_netmap_txsync;
1083 	na.nm_rxsync = cxgbe_netmap_rxsync;
1084 	na.nm_register = cxgbe_netmap_reg;
1085 	na.num_tx_rings = pi->nnmtxq;
1086 	na.num_rx_rings = pi->nnmrxq;
1087 	netmap_attach(&na);	/* This adds IFCAP_NETMAP to if_capabilities */
1088 
1089 	return (0);
1090 }
1091 
1092 int
1093 destroy_netmap_ifnet(struct port_info *pi)
1094 {
1095 	struct adapter *sc = pi->adapter;
1096 
1097 	if (pi->nm_ifp == NULL)
1098 		return (0);
1099 
1100 	netmap_detach(pi->nm_ifp);
1101 	ifmedia_removeall(&pi->nm_media);
1102 	ether_ifdetach(pi->nm_ifp);
1103 	if_free(pi->nm_ifp);
1104 	t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->nm_viid);
1105 
1106 	return (0);
1107 }
1108 
1109 static void
1110 handle_nm_fw6_msg(struct adapter *sc, struct ifnet *ifp,
1111     const struct cpl_fw6_msg *cpl)
1112 {
1113 	const struct cpl_sge_egr_update *egr;
1114 	uint32_t oq;
1115 	struct sge_nm_txq *nm_txq;
1116 
1117 	if (cpl->type != FW_TYPE_RSSCPL && cpl->type != FW6_TYPE_RSSCPL)
1118 		panic("%s: FW_TYPE 0x%x on nm_rxq.", __func__, cpl->type);
1119 
1120 	/* data[0] is RSS header */
1121 	egr = (const void *)&cpl->data[1];
1122 	oq = be32toh(egr->opcode_qid);
1123 	MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE);
1124 	nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start];
1125 
1126 	netmap_tx_irq(ifp, nm_txq->nid);
1127 }
1128 
1129 void
1130 t4_nm_intr(void *arg)
1131 {
1132 	struct sge_nm_rxq *nm_rxq = arg;
1133 	struct port_info *pi = nm_rxq->pi;
1134 	struct adapter *sc = pi->adapter;
1135 	struct ifnet *ifp = pi->nm_ifp;
1136 	struct netmap_adapter *na = NA(ifp);
1137 	struct netmap_kring *kring = &na->rx_rings[nm_rxq->nid];
1138 	struct netmap_ring *ring = kring->ring;
1139 	struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx];
1140 	uint32_t lq;
1141 	u_int n = 0, work = 0;
1142 	uint8_t opcode;
1143 	uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx);
1144 	u_int fl_credits = fl_cidx & 7;
1145 
1146 	while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) {
1147 
1148 		rmb();
1149 
1150 		lq = be32toh(d->rsp.pldbuflen_qid);
1151 		opcode = d->rss.opcode;
1152 
1153 		switch (G_RSPD_TYPE(d->rsp.u.type_gen)) {
1154 		case X_RSPD_TYPE_FLBUF:
1155 			if (black_hole != 2) {
1156 				/* No buffer packing so new buf every time */
1157 				MPASS(lq & F_RSPD_NEWBUF);
1158 			}
1159 
1160 			/* fall through */
1161 
1162 		case X_RSPD_TYPE_CPL:
1163 			MPASS(opcode < NUM_CPL_CMDS);
1164 
1165 			switch (opcode) {
1166 			case CPL_FW4_MSG:
1167 			case CPL_FW6_MSG:
1168 				handle_nm_fw6_msg(sc, ifp,
1169 				    (const void *)&d->cpl[0]);
1170 				break;
1171 			case CPL_RX_PKT:
1172 				ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - fl_pktshift;
1173 				ring->slot[fl_cidx].flags = kring->nkr_slot_flags;
1174 				fl_cidx += (lq & F_RSPD_NEWBUF) ? 1 : 0;
1175 				fl_credits += (lq & F_RSPD_NEWBUF) ? 1 : 0;
1176 				if (__predict_false(fl_cidx == nm_rxq->fl_sidx))
1177 					fl_cidx = 0;
1178 				break;
1179 			default:
1180 				panic("%s: unexpected opcode 0x%x on nm_rxq %p",
1181 				    __func__, opcode, nm_rxq);
1182 			}
1183 			break;
1184 
1185 		case X_RSPD_TYPE_INTR:
1186 			/* Not equipped to handle forwarded interrupts. */
1187 			panic("%s: netmap queue received interrupt for iq %u\n",
1188 			    __func__, lq);
1189 
1190 		default:
1191 			panic("%s: illegal response type %d on nm_rxq %p",
1192 			    __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq);
1193 		}
1194 
1195 		d++;
1196 		if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) {
1197 			nm_rxq->iq_cidx = 0;
1198 			d = &nm_rxq->iq_desc[0];
1199 			nm_rxq->iq_gen ^= F_RSPD_GEN;
1200 		}
1201 
1202 		if (__predict_false(++n == rx_ndesc)) {
1203 			atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1204 			if (black_hole && fl_credits >= 8) {
1205 				fl_credits /= 8;
1206 				IDXINCR(nm_rxq->fl_pidx, fl_credits * 8,
1207 				    nm_rxq->fl_sidx);
1208 				t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
1209 				    nm_rxq->fl_db_val | V_PIDX(fl_credits));
1210 				fl_credits = fl_cidx & 7;
1211 			} else if (!black_hole) {
1212 				netmap_rx_irq(ifp, nm_rxq->nid, &work);
1213 				MPASS(work != 0);
1214 			}
1215 			t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
1216 			    V_CIDXINC(n) | V_INGRESSQID(nm_rxq->iq_cntxt_id) |
1217 			    V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
1218 			n = 0;
1219 		}
1220 	}
1221 
1222 	atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
1223 	if (black_hole) {
1224 		fl_credits /= 8;
1225 		IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx);
1226 		t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
1227 		    nm_rxq->fl_db_val | V_PIDX(fl_credits));
1228 	} else
1229 		netmap_rx_irq(ifp, nm_rxq->nid, &work);
1230 
1231 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(n) |
1232 	    V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) |
1233 	    V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
1234 }
1235 #endif
1236