xref: /freebsd/sys/dev/mana/mana_en.c (revision 516b5059)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Microsoft Corp.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/smp.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <sys/time.h>
42 #include <sys/eventhandler.h>
43 
44 #include <machine/bus.h>
45 #include <machine/resource.h>
46 #include <machine/in_cksum.h>
47 
48 #include <net/if.h>
49 #include <net/if_var.h>
50 #include <net/if_types.h>
51 #include <net/if_vlan_var.h>
52 #ifdef RSS
53 #include <net/rss_config.h>
54 #endif
55 
56 #include <netinet/in_systm.h>
57 #include <netinet/in.h>
58 #include <netinet/if_ether.h>
59 #include <netinet/ip.h>
60 #include <netinet/ip6.h>
61 #include <netinet/tcp.h>
62 #include <netinet/udp.h>
63 
64 #include "mana.h"
65 #include "mana_sysctl.h"
66 
67 static int mana_up(struct mana_port_context *apc);
68 static int mana_down(struct mana_port_context *apc);
69 
70 static void
mana_rss_key_fill(void * k,size_t size)71 mana_rss_key_fill(void *k, size_t size)
72 {
73 	static bool rss_key_generated = false;
74 	static uint8_t rss_key[MANA_HASH_KEY_SIZE];
75 
76 	KASSERT(size <= MANA_HASH_KEY_SIZE,
77 	    ("Request more buytes than MANA RSS key can hold"));
78 
79 	if (!rss_key_generated) {
80 		arc4random_buf(rss_key, MANA_HASH_KEY_SIZE);
81 		rss_key_generated = true;
82 	}
83 	memcpy(k, rss_key, size);
84 }
85 
86 static int
mana_ifmedia_change(if_t ifp __unused)87 mana_ifmedia_change(if_t ifp __unused)
88 {
89 	return EOPNOTSUPP;
90 }
91 
92 static void
mana_ifmedia_status(if_t ifp,struct ifmediareq * ifmr)93 mana_ifmedia_status(if_t ifp, struct ifmediareq *ifmr)
94 {
95 	struct mana_port_context *apc = if_getsoftc(ifp);
96 
97 	if (!apc) {
98 		if_printf(ifp, "Port not available\n");
99 		return;
100 	}
101 
102 	MANA_APC_LOCK_LOCK(apc);
103 
104 	ifmr->ifm_status = IFM_AVALID;
105 	ifmr->ifm_active = IFM_ETHER;
106 
107 	if (!apc->port_is_up) {
108 		MANA_APC_LOCK_UNLOCK(apc);
109 		mana_dbg(NULL, "Port %u link is down\n", apc->port_idx);
110 		return;
111 	}
112 
113 	ifmr->ifm_status |= IFM_ACTIVE;
114 	ifmr->ifm_active |= IFM_100G_DR | IFM_FDX;
115 
116 	MANA_APC_LOCK_UNLOCK(apc);
117 }
118 
119 static uint64_t
mana_get_counter(if_t ifp,ift_counter cnt)120 mana_get_counter(if_t ifp, ift_counter cnt)
121 {
122 	struct mana_port_context *apc = if_getsoftc(ifp);
123 	struct mana_port_stats *stats = &apc->port_stats;
124 
125 	switch (cnt) {
126 	case IFCOUNTER_IPACKETS:
127 		return (counter_u64_fetch(stats->rx_packets));
128 	case IFCOUNTER_OPACKETS:
129 		return (counter_u64_fetch(stats->tx_packets));
130 	case IFCOUNTER_IBYTES:
131 		return (counter_u64_fetch(stats->rx_bytes));
132 	case IFCOUNTER_OBYTES:
133 		return (counter_u64_fetch(stats->tx_bytes));
134 	case IFCOUNTER_IQDROPS:
135 		return (counter_u64_fetch(stats->rx_drops));
136 	case IFCOUNTER_OQDROPS:
137 		return (counter_u64_fetch(stats->tx_drops));
138 	default:
139 		return (if_get_counter_default(ifp, cnt));
140 	}
141 }
142 
143 static void
mana_qflush(if_t ifp)144 mana_qflush(if_t ifp)
145 {
146 	if_qflush(ifp);
147 }
148 
149 int
mana_restart(struct mana_port_context * apc)150 mana_restart(struct mana_port_context *apc)
151 {
152 	int rc = 0;
153 
154 	MANA_APC_LOCK_LOCK(apc);
155 	if (apc->port_is_up)
156 		 mana_down(apc);
157 
158 	rc = mana_up(apc);
159 	MANA_APC_LOCK_UNLOCK(apc);
160 
161 	return (rc);
162 }
163 
164 static int
mana_ioctl(if_t ifp,u_long command,caddr_t data)165 mana_ioctl(if_t ifp, u_long command, caddr_t data)
166 {
167 	struct mana_port_context *apc = if_getsoftc(ifp);
168 	struct ifrsskey *ifrk;
169 	struct ifrsshash *ifrh;
170 	struct ifreq *ifr;
171 	uint16_t new_mtu;
172 	int rc = 0, mask;
173 
174 	switch (command) {
175 	case SIOCSIFMTU:
176 		ifr = (struct ifreq *)data;
177 		new_mtu = ifr->ifr_mtu;
178 		if (if_getmtu(ifp) == new_mtu)
179 			break;
180 		if ((new_mtu + 18 > MAX_FRAME_SIZE) ||
181 		    (new_mtu + 18 < MIN_FRAME_SIZE)) {
182 			if_printf(ifp, "Invalid MTU. new_mtu: %d, "
183 			    "max allowed: %d, min allowed: %d\n",
184 			    new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18);
185 			return EINVAL;
186 		}
187 		MANA_APC_LOCK_LOCK(apc);
188 		if (apc->port_is_up)
189 			mana_down(apc);
190 
191 		apc->frame_size = new_mtu + 18;
192 		if_setmtu(ifp, new_mtu);
193 		mana_dbg(NULL, "Set MTU to %d\n", new_mtu);
194 
195 		rc = mana_up(apc);
196 		MANA_APC_LOCK_UNLOCK(apc);
197 		break;
198 
199 	case SIOCSIFFLAGS:
200 		if (if_getflags(ifp) & IFF_UP) {
201 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
202 				MANA_APC_LOCK_LOCK(apc);
203 				if (!apc->port_is_up)
204 					rc = mana_up(apc);
205 				MANA_APC_LOCK_UNLOCK(apc);
206 			}
207 		} else {
208 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
209 				MANA_APC_LOCK_LOCK(apc);
210 				if (apc->port_is_up)
211 					mana_down(apc);
212 				MANA_APC_LOCK_UNLOCK(apc);
213 			}
214 		}
215 		break;
216 
217 	case SIOCSIFCAP:
218 		MANA_APC_LOCK_LOCK(apc);
219 		ifr = (struct ifreq *)data;
220 		/*
221 		 * Fix up requested capabilities w/ supported capabilities,
222 		 * since the supported capabilities could have been changed.
223 		 */
224 		mask = (ifr->ifr_reqcap & if_getcapabilities(ifp)) ^
225 		    if_getcapenable(ifp);
226 
227 		if (mask & IFCAP_TXCSUM) {
228 			if_togglecapenable(ifp, IFCAP_TXCSUM);
229 			if_togglehwassist(ifp, (CSUM_TCP | CSUM_UDP | CSUM_IP));
230 
231 			if ((IFCAP_TSO4 & if_getcapenable(ifp)) &&
232 			    !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
233 				mask &= ~IFCAP_TSO4;
234 				if_setcapenablebit(ifp, 0, IFCAP_TSO4);
235 				if_sethwassistbits(ifp, 0, CSUM_IP_TSO);
236 				mana_warn(NULL,
237 				    "Also disabled tso4 due to -txcsum.\n");
238 			}
239 		}
240 
241 		if (mask & IFCAP_TXCSUM_IPV6) {
242 			if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
243 			if_togglehwassist(ifp, (CSUM_UDP_IPV6 | CSUM_TCP_IPV6));
244 
245 			if ((IFCAP_TSO6 & if_getcapenable(ifp)) &&
246 			    !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
247 				mask &= ~IFCAP_TSO6;
248 				if_setcapenablebit(ifp, 0, IFCAP_TSO6);
249 				if_sethwassistbits(ifp, 0, CSUM_IP6_TSO);
250 				mana_warn(ifp,
251 				    "Also disabled tso6 due to -txcsum6.\n");
252 			}
253 		}
254 
255 		if (mask & IFCAP_RXCSUM)
256 			if_togglecapenable(ifp, IFCAP_RXCSUM);
257 		/* We can't diff IPv6 packets from IPv4 packets on RX path. */
258 		if (mask & IFCAP_RXCSUM_IPV6)
259 			if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
260 
261 		if (mask & IFCAP_LRO)
262 			if_togglecapenable(ifp, IFCAP_LRO);
263 
264 		if (mask & IFCAP_TSO4) {
265 			if (!(IFCAP_TSO4 & if_getcapenable(ifp)) &&
266 			    !(IFCAP_TXCSUM & if_getcapenable(ifp))) {
267 				MANA_APC_LOCK_UNLOCK(apc);
268 				if_printf(ifp, "Enable txcsum first.\n");
269 				rc = EAGAIN;
270 				goto out;
271 			}
272 			if_togglecapenable(ifp, IFCAP_TSO4);
273 			if_togglehwassist(ifp, CSUM_IP_TSO);
274 		}
275 
276 		if (mask & IFCAP_TSO6) {
277 			if (!(IFCAP_TSO6 & if_getcapenable(ifp)) &&
278 			    !(IFCAP_TXCSUM_IPV6 & if_getcapenable(ifp))) {
279 				MANA_APC_LOCK_UNLOCK(apc);
280 				if_printf(ifp, "Enable txcsum6 first.\n");
281 				rc = EAGAIN;
282 				goto out;
283 			}
284 			if_togglecapenable(ifp, IFCAP_TSO6);
285 			if_togglehwassist(ifp, CSUM_IP6_TSO);
286 		}
287 
288 		MANA_APC_LOCK_UNLOCK(apc);
289 out:
290 		break;
291 
292 	case SIOCSIFMEDIA:
293 	case SIOCGIFMEDIA:
294 	case SIOCGIFXMEDIA:
295 		ifr = (struct ifreq *)data;
296 		rc = ifmedia_ioctl(ifp, ifr, &apc->media, command);
297 		break;
298 
299 	case SIOCGIFRSSKEY:
300 		ifrk = (struct ifrsskey *)data;
301 		ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
302 		ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE;
303 		memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE);
304 		break;
305 
306 	case SIOCGIFRSSHASH:
307 		ifrh = (struct ifrsshash *)data;
308 		ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
309 		ifrh->ifrh_types =
310 		    RSS_TYPE_TCP_IPV4 |
311 		    RSS_TYPE_UDP_IPV4 |
312 		    RSS_TYPE_TCP_IPV6 |
313 		    RSS_TYPE_UDP_IPV6;
314 		break;
315 
316 	default:
317 		rc = ether_ioctl(ifp, command, data);
318 		break;
319 	}
320 
321 	return (rc);
322 }
323 
324 static inline void
mana_alloc_counters(counter_u64_t * begin,int size)325 mana_alloc_counters(counter_u64_t *begin, int size)
326 {
327 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
328 
329 	for (; begin < end; ++begin)
330 		*begin = counter_u64_alloc(M_WAITOK);
331 }
332 
333 static inline void
mana_free_counters(counter_u64_t * begin,int size)334 mana_free_counters(counter_u64_t *begin, int size)
335 {
336 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
337 
338 	for (; begin < end; ++begin)
339 		counter_u64_free(*begin);
340 }
341 
342 static bool
mana_can_tx(struct gdma_queue * wq)343 mana_can_tx(struct gdma_queue *wq)
344 {
345 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
346 }
347 
348 static inline int
mana_tx_map_mbuf(struct mana_port_context * apc,struct mana_send_buf_info * tx_info,struct mbuf ** m_head,struct mana_tx_package * tp,struct mana_stats * tx_stats)349 mana_tx_map_mbuf(struct mana_port_context *apc,
350     struct mana_send_buf_info *tx_info,
351     struct mbuf **m_head, struct mana_tx_package *tp,
352     struct mana_stats *tx_stats)
353 {
354 	struct gdma_dev *gd = apc->ac->gdma_dev;
355 	bus_dma_segment_t segs[MAX_MBUF_FRAGS];
356 	struct mbuf *m = *m_head;
357 	int err, nsegs, i;
358 
359 	err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map,
360 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
361 	if (err == EFBIG) {
362 		struct mbuf *m_new;
363 
364 		counter_u64_add(tx_stats->collapse, 1);
365 		m_new = m_collapse(m, M_NOWAIT, MAX_MBUF_FRAGS);
366 		if (unlikely(m_new == NULL)) {
367 			counter_u64_add(tx_stats->collapse_err, 1);
368 			return ENOBUFS;
369 		} else {
370 			*m_head = m = m_new;
371 		}
372 
373 		mana_warn(NULL,
374 		    "Too many segs in orig mbuf, m_collapse called\n");
375 
376 		err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag,
377 		    tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
378 	}
379 	if (!err) {
380 		for (i = 0; i < nsegs; i++) {
381 			tp->wqe_req.sgl[i].address = segs[i].ds_addr;
382 			tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey;
383 			tp->wqe_req.sgl[i].size = segs[i].ds_len;
384 		}
385 		tp->wqe_req.num_sge = nsegs;
386 
387 		tx_info->mbuf = *m_head;
388 
389 		bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
390 		    BUS_DMASYNC_PREWRITE);
391 	}
392 
393 	return err;
394 }
395 
396 static inline void
mana_tx_unmap_mbuf(struct mana_port_context * apc,struct mana_send_buf_info * tx_info)397 mana_tx_unmap_mbuf(struct mana_port_context *apc,
398     struct mana_send_buf_info *tx_info)
399 {
400 	bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
401 	    BUS_DMASYNC_POSTWRITE);
402 	bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map);
403 	if (tx_info->mbuf) {
404 		m_freem(tx_info->mbuf);
405 		tx_info->mbuf = NULL;
406 	}
407 }
408 
409 static inline int
mana_load_rx_mbuf(struct mana_port_context * apc,struct mana_rxq * rxq,struct mana_recv_buf_oob * rx_oob,bool alloc_mbuf)410 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
411     struct mana_recv_buf_oob *rx_oob, bool alloc_mbuf)
412 {
413 	bus_dma_segment_t segs[1];
414 	struct mbuf *mbuf;
415 	int nsegs, err;
416 	uint32_t mlen;
417 
418 	if (alloc_mbuf) {
419 		mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize);
420 		if (unlikely(mbuf == NULL)) {
421 			mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
422 			if (unlikely(mbuf == NULL)) {
423 				return ENOMEM;
424 			}
425 			mlen = MCLBYTES;
426 		} else {
427 			mlen = rxq->datasize;
428 		}
429 
430 		mbuf->m_pkthdr.len = mbuf->m_len = mlen;
431 	} else {
432 		if (rx_oob->mbuf) {
433 			mbuf = rx_oob->mbuf;
434 			mlen = rx_oob->mbuf->m_pkthdr.len;
435 		} else {
436 			return ENOMEM;
437 		}
438 	}
439 
440 	err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map,
441 	    mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
442 
443 	if (unlikely((err != 0) || (nsegs != 1))) {
444 		mana_warn(NULL, "Failed to map mbuf, error: %d, "
445 		    "nsegs: %d\n", err, nsegs);
446 		counter_u64_add(rxq->stats.dma_mapping_err, 1);
447 		goto error;
448 	}
449 
450 	bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
451 	    BUS_DMASYNC_PREREAD);
452 
453 	rx_oob->mbuf = mbuf;
454 	rx_oob->num_sge = 1;
455 	rx_oob->sgl[0].address = segs[0].ds_addr;
456 	rx_oob->sgl[0].size = mlen;
457 	rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
458 
459 	return 0;
460 
461 error:
462 	m_freem(mbuf);
463 	return EFAULT;
464 }
465 
466 static inline void
mana_unload_rx_mbuf(struct mana_port_context * apc,struct mana_rxq * rxq,struct mana_recv_buf_oob * rx_oob,bool free_mbuf)467 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
468     struct mana_recv_buf_oob *rx_oob, bool free_mbuf)
469 {
470 	bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
471 	    BUS_DMASYNC_POSTREAD);
472 	bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map);
473 
474 	if (free_mbuf && rx_oob->mbuf) {
475 		m_freem(rx_oob->mbuf);
476 		rx_oob->mbuf = NULL;
477 	}
478 }
479 
480 
481 /* Use couple mbuf PH_loc spaces for l3 and l4 protocal type */
482 #define MANA_L3_PROTO(_mbuf)	((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
483 #define MANA_L4_PROTO(_mbuf)	((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
484 
485 #define MANA_TXQ_FULL	(IFF_DRV_RUNNING | IFF_DRV_OACTIVE)
486 
487 static void
mana_xmit(struct mana_txq * txq)488 mana_xmit(struct mana_txq *txq)
489 {
490 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
491 	struct mana_send_buf_info *tx_info;
492 	if_t ndev = txq->ndev;
493 	struct mbuf *mbuf;
494 	struct mana_port_context *apc = if_getsoftc(ndev);
495 	struct mana_port_stats *port_stats = &apc->port_stats;
496 	struct gdma_dev *gd = apc->ac->gdma_dev;
497 	uint64_t packets, bytes;
498 	uint16_t next_to_use;
499 	struct mana_tx_package pkg = {};
500 	struct mana_stats *tx_stats;
501 	struct gdma_queue *gdma_sq;
502 	struct mana_cq *cq;
503 	int err, len;
504 	bool is_tso;
505 
506 	gdma_sq = txq->gdma_sq;
507 	cq = &apc->tx_qp[txq->idx].tx_cq;
508 	tx_stats = &txq->stats;
509 
510 	packets = 0;
511 	bytes = 0;
512 	next_to_use = txq->next_to_use;
513 
514 	while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) {
515 		if (!apc->port_is_up ||
516 		    (if_getdrvflags(ndev) & MANA_TXQ_FULL) != IFF_DRV_RUNNING) {
517 			drbr_putback(ndev, txq->txq_br, mbuf);
518 			break;
519 		}
520 
521 		if (!mana_can_tx(gdma_sq)) {
522 			/* SQ is full. Set the IFF_DRV_OACTIVE flag */
523 			if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0);
524 			counter_u64_add(tx_stats->stop, 1);
525 			uint64_t stops = counter_u64_fetch(tx_stats->stop);
526 			uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup);
527 #define MANA_TXQ_STOP_THRESHOLD		50
528 			if (stops > MANA_TXQ_STOP_THRESHOLD && wakeups > 0 &&
529 			    stops > wakeups && txq->alt_txq_idx == txq->idx) {
530 				txq->alt_txq_idx =
531 				    (txq->idx + (stops / wakeups))
532 				    % apc->num_queues;
533 				counter_u64_add(tx_stats->alt_chg, 1);
534 			}
535 
536 			drbr_putback(ndev, txq->txq_br, mbuf);
537 
538 			taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
539 			break;
540 		}
541 
542 		tx_info = &txq->tx_buf_info[next_to_use];
543 
544 		memset(&pkg, 0, sizeof(struct mana_tx_package));
545 		pkg.wqe_req.sgl = pkg.sgl_array;
546 
547 		err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats);
548 		if (unlikely(err)) {
549 			mana_dbg(NULL,
550 			    "Failed to map tx mbuf, err %d\n", err);
551 
552 			counter_u64_add(tx_stats->dma_mapping_err, 1);
553 
554 			/* The mbuf is still there. Free it */
555 			m_freem(mbuf);
556 			/* Advance the drbr queue */
557 			drbr_advance(ndev, txq->txq_br);
558 			continue;
559 		}
560 
561 		pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
562 		pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
563 
564 		if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
565 			pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
566 			pkt_fmt = MANA_LONG_PKT_FMT;
567 		} else {
568 			pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
569 		}
570 
571 		pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
572 
573 		if (pkt_fmt == MANA_SHORT_PKT_FMT)
574 			pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
575 		else
576 			pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
577 
578 		pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
579 		pkg.wqe_req.flags = 0;
580 		pkg.wqe_req.client_data_unit = 0;
581 
582 		is_tso = false;
583 		if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
584 			is_tso =  true;
585 
586 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
587 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
588 			else
589 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
590 
591 			pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
592 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
593 			pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen;
594 
595 			pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz;
596 			pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
597 		} else if (mbuf->m_pkthdr.csum_flags &
598 		    (CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP6_UDP | CSUM_IP6_TCP)) {
599 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) {
600 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
601 				pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
602 			} else {
603 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
604 			}
605 
606 			if (MANA_L4_PROTO(mbuf) == IPPROTO_TCP) {
607 				pkg.tx_oob.s_oob.comp_tcp_csum = 1;
608 				pkg.tx_oob.s_oob.trans_off =
609 				    mbuf->m_pkthdr.l3hlen;
610 			} else {
611 				pkg.tx_oob.s_oob.comp_udp_csum = 1;
612 			}
613 		} else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) {
614 			pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
615 			pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
616 		} else {
617 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
618 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
619 			else if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IPV6)
620 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
621 		}
622 
623 		len = mbuf->m_pkthdr.len;
624 
625 		err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
626 		    (struct gdma_posted_wqe_info *)&tx_info->wqe_inf);
627 		if (unlikely(err)) {
628 			/* Should not happen */
629 			if_printf(ndev, "Failed to post TX OOB: %d\n", err);
630 
631 			mana_tx_unmap_mbuf(apc, tx_info);
632 
633 			drbr_advance(ndev, txq->txq_br);
634 			continue;
635 		}
636 
637 		next_to_use =
638 		    (next_to_use + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
639 
640 		(void)atomic_inc_return(&txq->pending_sends);
641 
642 		drbr_advance(ndev, txq->txq_br);
643 
644 		mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
645 
646 		packets++;
647 		bytes += len;
648 
649 		if (is_tso) {
650 			txq->tso_pkts++;
651 			txq->tso_bytes += len;
652 		}
653 	}
654 
655 	counter_enter();
656 	counter_u64_add_protected(tx_stats->packets, packets);
657 	counter_u64_add_protected(port_stats->tx_packets, packets);
658 	counter_u64_add_protected(tx_stats->bytes, bytes);
659 	counter_u64_add_protected(port_stats->tx_bytes, bytes);
660 	counter_exit();
661 
662 	txq->next_to_use = next_to_use;
663 }
664 
665 static void
mana_xmit_taskfunc(void * arg,int pending)666 mana_xmit_taskfunc(void *arg, int pending)
667 {
668 	struct mana_txq *txq = (struct mana_txq *)arg;
669 	if_t ndev = txq->ndev;
670 	struct mana_port_context *apc = if_getsoftc(ndev);
671 
672 	while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up &&
673 	    (if_getdrvflags(ndev) & MANA_TXQ_FULL) == IFF_DRV_RUNNING) {
674 		mtx_lock(&txq->txq_mtx);
675 		mana_xmit(txq);
676 		mtx_unlock(&txq->txq_mtx);
677 	}
678 }
679 
680 #define PULLUP_HDR(m, len)				\
681 do {							\
682 	if (unlikely((m)->m_len < (len))) {		\
683 		(m) = m_pullup((m), (len));		\
684 		if ((m) == NULL)			\
685 			return (NULL);			\
686 	}						\
687 } while (0)
688 
689 /*
690  * If this function failed, the mbuf would be freed.
691  */
692 static inline struct mbuf *
mana_tso_fixup(struct mbuf * mbuf)693 mana_tso_fixup(struct mbuf *mbuf)
694 {
695 	struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
696 	struct tcphdr *th;
697 	uint16_t etype;
698 	int ehlen;
699 
700 	if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
701 		etype = ntohs(eh->evl_proto);
702 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
703 	} else {
704 		etype = ntohs(eh->evl_encap_proto);
705 		ehlen = ETHER_HDR_LEN;
706 	}
707 
708 	if (etype == ETHERTYPE_IP) {
709 		struct ip *ip;
710 		int iphlen;
711 
712 		PULLUP_HDR(mbuf, ehlen + sizeof(*ip));
713 		ip = mtodo(mbuf, ehlen);
714 		iphlen = ip->ip_hl << 2;
715 		mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
716 
717 		PULLUP_HDR(mbuf, ehlen + iphlen + sizeof(*th));
718 		th = mtodo(mbuf, ehlen + iphlen);
719 
720 		ip->ip_len = 0;
721 		ip->ip_sum = 0;
722 		th->th_sum = in_pseudo(ip->ip_src.s_addr,
723 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
724 	} else if (etype == ETHERTYPE_IPV6) {
725 		struct ip6_hdr *ip6;
726 
727 		PULLUP_HDR(mbuf, ehlen + sizeof(*ip6) + sizeof(*th));
728 		ip6 = mtodo(mbuf, ehlen);
729 		if (ip6->ip6_nxt != IPPROTO_TCP) {
730 			/* Realy something wrong, just return */
731 			mana_dbg(NULL, "TSO mbuf not TCP, freed.\n");
732 			m_freem(mbuf);
733 			return NULL;
734 		}
735 		mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
736 
737 		th = mtodo(mbuf, ehlen + sizeof(*ip6));
738 
739 		ip6->ip6_plen = 0;
740 		th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
741 	} else {
742 		/* CSUM_TSO is set but not IP protocol. */
743 		mana_warn(NULL, "TSO mbuf not right, freed.\n");
744 		m_freem(mbuf);
745 		return NULL;
746 	}
747 
748 	MANA_L3_PROTO(mbuf) = etype;
749 
750 	return (mbuf);
751 }
752 
753 /*
754  * If this function failed, the mbuf would be freed.
755  */
756 static inline struct mbuf *
mana_mbuf_csum_check(struct mbuf * mbuf)757 mana_mbuf_csum_check(struct mbuf *mbuf)
758 {
759 	struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
760 	struct mbuf *mbuf_next;
761 	uint16_t etype;
762 	int offset;
763 	int ehlen;
764 
765 	if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
766 		etype = ntohs(eh->evl_proto);
767 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
768 	} else {
769 		etype = ntohs(eh->evl_encap_proto);
770 		ehlen = ETHER_HDR_LEN;
771 	}
772 
773 	mbuf_next = m_getptr(mbuf, ehlen, &offset);
774 
775 	MANA_L4_PROTO(mbuf) = 0;
776 	if (etype == ETHERTYPE_IP) {
777 		const struct ip *ip;
778 		int iphlen;
779 
780 		ip = (struct ip *)(mtodo(mbuf_next, offset));
781 		iphlen = ip->ip_hl << 2;
782 		mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
783 
784 		MANA_L4_PROTO(mbuf) = ip->ip_p;
785 	} else if (etype == ETHERTYPE_IPV6) {
786 		const struct ip6_hdr *ip6;
787 
788 		ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset));
789 		mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
790 
791 		MANA_L4_PROTO(mbuf) = ip6->ip6_nxt;
792 	} else {
793 		MANA_L4_PROTO(mbuf) = 0;
794 	}
795 
796 	MANA_L3_PROTO(mbuf) = etype;
797 
798 	return (mbuf);
799 }
800 
801 static int
mana_start_xmit(if_t ifp,struct mbuf * m)802 mana_start_xmit(if_t ifp, struct mbuf *m)
803 {
804 	struct mana_port_context *apc = if_getsoftc(ifp);
805 	struct mana_txq *txq;
806 	int is_drbr_empty;
807 	uint16_t txq_id;
808 	int err;
809 
810 	if (unlikely((!apc->port_is_up) ||
811 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
812 		return ENODEV;
813 
814 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
815 		m = mana_tso_fixup(m);
816 		if (unlikely(m == NULL)) {
817 			counter_enter();
818 			counter_u64_add_protected(apc->port_stats.tx_drops, 1);
819 			counter_exit();
820 			return EIO;
821 		}
822 	} else {
823 		m = mana_mbuf_csum_check(m);
824 		if (unlikely(m == NULL)) {
825 			counter_enter();
826 			counter_u64_add_protected(apc->port_stats.tx_drops, 1);
827 			counter_exit();
828 			return EIO;
829 		}
830 	}
831 
832 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
833 		uint32_t hash = m->m_pkthdr.flowid;
834 		txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] %
835 		    apc->num_queues;
836 	} else {
837 		txq_id = m->m_pkthdr.flowid % apc->num_queues;
838 	}
839 
840 	if (apc->enable_tx_altq)
841 		txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx;
842 
843 	txq = &apc->tx_qp[txq_id].txq;
844 
845 	is_drbr_empty = drbr_empty(ifp, txq->txq_br);
846 	err = drbr_enqueue(ifp, txq->txq_br, m);
847 	if (unlikely(err)) {
848 		mana_warn(NULL, "txq %u failed to enqueue: %d\n",
849 		    txq_id, err);
850 		taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
851 		return err;
852 	}
853 
854 	if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) {
855 		mana_xmit(txq);
856 		mtx_unlock(&txq->txq_mtx);
857 	} else {
858 		taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
859 	}
860 
861 	return 0;
862 }
863 
864 static void
mana_cleanup_port_context(struct mana_port_context * apc)865 mana_cleanup_port_context(struct mana_port_context *apc)
866 {
867 	bus_dma_tag_destroy(apc->tx_buf_tag);
868 	bus_dma_tag_destroy(apc->rx_buf_tag);
869 	apc->rx_buf_tag = NULL;
870 
871 	free(apc->rxqs, M_DEVBUF);
872 	apc->rxqs = NULL;
873 
874 	mana_free_counters((counter_u64_t *)&apc->port_stats,
875 	    sizeof(struct mana_port_stats));
876 }
877 
878 static int
mana_init_port_context(struct mana_port_context * apc)879 mana_init_port_context(struct mana_port_context *apc)
880 {
881 	device_t dev = apc->ac->gdma_dev->gdma_context->dev;
882 	uint32_t tso_maxsize;
883 	int err;
884 
885 	tso_maxsize = MANA_TSO_MAX_SZ;
886 
887 	/* Create DMA tag for tx bufs */
888 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
889 	    1, 0,			/* alignment, boundary	*/
890 	    BUS_SPACE_MAXADDR,		/* lowaddr		*/
891 	    BUS_SPACE_MAXADDR,		/* highaddr		*/
892 	    NULL, NULL,			/* filter, filterarg	*/
893 	    tso_maxsize,		/* maxsize		*/
894 	    MAX_MBUF_FRAGS,		/* nsegments		*/
895 	    tso_maxsize,		/* maxsegsize		*/
896 	    0,				/* flags		*/
897 	    NULL, NULL,			/* lockfunc, lockfuncarg*/
898 	    &apc->tx_buf_tag);
899 	if (unlikely(err)) {
900 		device_printf(dev, "Feiled to create TX DMA tag\n");
901 		return err;
902 	}
903 
904 	/* Create DMA tag for rx bufs */
905 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
906 	    64, 0,			/* alignment, boundary	*/
907 	    BUS_SPACE_MAXADDR,		/* lowaddr		*/
908 	    BUS_SPACE_MAXADDR,		/* highaddr		*/
909 	    NULL, NULL,			/* filter, filterarg	*/
910 	    MJUMPAGESIZE,		/* maxsize		*/
911 	    1,				/* nsegments		*/
912 	    MJUMPAGESIZE,		/* maxsegsize		*/
913 	    0,				/* flags		*/
914 	    NULL, NULL,			/* lockfunc, lockfuncarg*/
915 	    &apc->rx_buf_tag);
916 	if (unlikely(err)) {
917 		device_printf(dev, "Feiled to create RX DMA tag\n");
918 		return err;
919 	}
920 
921 	apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
922 	    M_DEVBUF, M_WAITOK | M_ZERO);
923 
924 	if (!apc->rxqs) {
925 		bus_dma_tag_destroy(apc->tx_buf_tag);
926 		bus_dma_tag_destroy(apc->rx_buf_tag);
927 		apc->rx_buf_tag = NULL;
928 		return ENOMEM;
929 	}
930 
931 	return 0;
932 }
933 
934 static int
mana_send_request(struct mana_context * ac,void * in_buf,uint32_t in_len,void * out_buf,uint32_t out_len)935 mana_send_request(struct mana_context *ac, void *in_buf,
936     uint32_t in_len, void *out_buf, uint32_t out_len)
937 {
938 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
939 	struct gdma_resp_hdr *resp = out_buf;
940 	struct gdma_req_hdr *req = in_buf;
941 	device_t dev = gc->dev;
942 	static atomic_t activity_id;
943 	int err;
944 
945 	req->dev_id = gc->mana.dev_id;
946 	req->activity_id = atomic_inc_return(&activity_id);
947 
948 	mana_dbg(NULL, "activity_id  = %u\n", activity_id);
949 
950 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
951 	    out_buf);
952 	if (err || resp->status) {
953 		device_printf(dev, "Failed to send mana message: %d, 0x%x\n",
954 			err, resp->status);
955 		return err ? err : EPROTO;
956 	}
957 
958 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
959 	    req->activity_id != resp->activity_id) {
960 		device_printf(dev,
961 		    "Unexpected mana message response: %x,%x,%x,%x\n",
962 		    req->dev_id.as_uint32, resp->dev_id.as_uint32,
963 		    req->activity_id, resp->activity_id);
964 		return EPROTO;
965 	}
966 
967 	return 0;
968 }
969 
970 static int
mana_verify_resp_hdr(const struct gdma_resp_hdr * resp_hdr,const enum mana_command_code expected_code,const uint32_t min_size)971 mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
972     const enum mana_command_code expected_code,
973     const uint32_t min_size)
974 {
975 	if (resp_hdr->response.msg_type != expected_code)
976 		return EPROTO;
977 
978 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
979 		return EPROTO;
980 
981 	if (resp_hdr->response.msg_size < min_size)
982 		return EPROTO;
983 
984 	return 0;
985 }
986 
987 static int
mana_query_device_cfg(struct mana_context * ac,uint32_t proto_major_ver,uint32_t proto_minor_ver,uint32_t proto_micro_ver,uint16_t * max_num_vports)988 mana_query_device_cfg(struct mana_context *ac, uint32_t proto_major_ver,
989     uint32_t proto_minor_ver, uint32_t proto_micro_ver,
990     uint16_t *max_num_vports)
991 {
992 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
993 	struct mana_query_device_cfg_resp resp = {};
994 	struct mana_query_device_cfg_req req = {};
995 	device_t dev = gc->dev;
996 	int err = 0;
997 
998 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
999 	    sizeof(req), sizeof(resp));
1000 	req.proto_major_ver = proto_major_ver;
1001 	req.proto_minor_ver = proto_minor_ver;
1002 	req.proto_micro_ver = proto_micro_ver;
1003 
1004 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
1005 	if (err) {
1006 		device_printf(dev, "Failed to query config: %d", err);
1007 		return err;
1008 	}
1009 
1010 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
1011 	    sizeof(resp));
1012 	if (err || resp.hdr.status) {
1013 		device_printf(dev, "Invalid query result: %d, 0x%x\n", err,
1014 		    resp.hdr.status);
1015 		if (!err)
1016 			err = EPROTO;
1017 		return err;
1018 	}
1019 
1020 	*max_num_vports = resp.max_num_vports;
1021 
1022 	mana_dbg(NULL, "mana max_num_vports from device = %d\n",
1023 	    *max_num_vports);
1024 
1025 	return 0;
1026 }
1027 
1028 static int
mana_query_vport_cfg(struct mana_port_context * apc,uint32_t vport_index,uint32_t * max_sq,uint32_t * max_rq,uint32_t * num_indir_entry)1029 mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
1030     uint32_t *max_sq, uint32_t *max_rq, uint32_t *num_indir_entry)
1031 {
1032 	struct mana_query_vport_cfg_resp resp = {};
1033 	struct mana_query_vport_cfg_req req = {};
1034 	int err;
1035 
1036 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
1037 	    sizeof(req), sizeof(resp));
1038 
1039 	req.vport_index = vport_index;
1040 
1041 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1042 	    sizeof(resp));
1043 	if (err)
1044 		return err;
1045 
1046 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
1047 	    sizeof(resp));
1048 	if (err)
1049 		return err;
1050 
1051 	if (resp.hdr.status)
1052 		return EPROTO;
1053 
1054 	*max_sq = resp.max_num_sq;
1055 	*max_rq = resp.max_num_rq;
1056 	*num_indir_entry = resp.num_indirection_ent;
1057 
1058 	apc->port_handle = resp.vport;
1059 	memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN);
1060 
1061 	return 0;
1062 }
1063 
1064 void
mana_uncfg_vport(struct mana_port_context * apc)1065 mana_uncfg_vport(struct mana_port_context *apc)
1066 {
1067 	apc->vport_use_count--;
1068 	if (apc->vport_use_count < 0) {
1069 		mana_err(NULL,
1070 		    "WARNING: vport_use_count less than 0: %u\n",
1071 		    apc->vport_use_count);
1072 	}
1073 }
1074 
1075 int
mana_cfg_vport(struct mana_port_context * apc,uint32_t protection_dom_id,uint32_t doorbell_pg_id)1076 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
1077     uint32_t doorbell_pg_id)
1078 {
1079 	struct mana_config_vport_resp resp = {};
1080 	struct mana_config_vport_req req = {};
1081 	int err;
1082 
1083 	/* This function is used to program the Ethernet port in the hardware
1084 	 * table. It can be called from the Ethernet driver or the RDMA driver.
1085 	 *
1086 	 * For Ethernet usage, the hardware supports only one active user on a
1087 	 * physical port. The driver checks on the port usage before programming
1088 	 * the hardware when creating the RAW QP (RDMA driver) or exposing the
1089 	 * device to kernel NET layer (Ethernet driver).
1090 	 *
1091 	 * Because the RDMA driver doesn't know in advance which QP type the
1092 	 * user will create, it exposes the device with all its ports. The user
1093 	 * may not be able to create RAW QP on a port if this port is already
1094 	 * in used by the Ethernet driver from the kernel.
1095 	 *
1096 	 * This physical port limitation only applies to the RAW QP. For RC QP,
1097 	 * the hardware doesn't have this limitation. The user can create RC
1098 	 * QPs on a physical port up to the hardware limits independent of the
1099 	 * Ethernet usage on the same port.
1100 	 */
1101 	if (apc->vport_use_count > 0) {
1102 		return EBUSY;
1103 	}
1104 	apc->vport_use_count++;
1105 
1106 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1107 	    sizeof(req), sizeof(resp));
1108 	req.vport = apc->port_handle;
1109 	req.pdid = protection_dom_id;
1110 	req.doorbell_pageid = doorbell_pg_id;
1111 
1112 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1113 	    sizeof(resp));
1114 	if (err) {
1115 		if_printf(apc->ndev, "Failed to configure vPort: %d\n", err);
1116 		goto out;
1117 	}
1118 
1119 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1120 	    sizeof(resp));
1121 	if (err || resp.hdr.status) {
1122 		if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1123 		    err, resp.hdr.status);
1124 		if (!err)
1125 			err = EPROTO;
1126 
1127 		goto out;
1128 	}
1129 
1130 	apc->tx_shortform_allowed = resp.short_form_allowed;
1131 	apc->tx_vp_offset = resp.tx_vport_offset;
1132 
1133 	if_printf(apc->ndev, "Configured vPort %ju PD %u DB %u\n",
1134 	    apc->port_handle, protection_dom_id, doorbell_pg_id);
1135 
1136 out:
1137 	if (err)
1138 		mana_uncfg_vport(apc);
1139 
1140 	return err;
1141 }
1142 
1143 static int
mana_cfg_vport_steering(struct mana_port_context * apc,enum TRI_STATE rx,bool update_default_rxobj,bool update_key,bool update_tab)1144 mana_cfg_vport_steering(struct mana_port_context *apc,
1145     enum TRI_STATE rx,
1146     bool update_default_rxobj, bool update_key,
1147     bool update_tab)
1148 {
1149 	uint16_t num_entries = MANA_INDIRECT_TABLE_SIZE;
1150 	struct mana_cfg_rx_steer_req *req = NULL;
1151 	struct mana_cfg_rx_steer_resp resp = {};
1152 	if_t ndev = apc->ndev;
1153 	mana_handle_t *req_indir_tab;
1154 	uint32_t req_buf_size;
1155 	int err;
1156 
1157 	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1158 	req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
1159 	if (!req)
1160 		return ENOMEM;
1161 
1162 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1163 	    sizeof(resp));
1164 
1165 	req->vport = apc->port_handle;
1166 	req->num_indir_entries = num_entries;
1167 	req->indir_tab_offset = sizeof(*req);
1168 	req->rx_enable = rx;
1169 	req->rss_enable = apc->rss_state;
1170 	req->update_default_rxobj = update_default_rxobj;
1171 	req->update_hashkey = update_key;
1172 	req->update_indir_tab = update_tab;
1173 	req->default_rxobj = apc->default_rxobj;
1174 
1175 	if (update_key)
1176 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1177 
1178 	if (update_tab) {
1179 		req_indir_tab = (mana_handle_t *)(req + 1);
1180 		memcpy(req_indir_tab, apc->rxobj_table,
1181 		       req->num_indir_entries * sizeof(mana_handle_t));
1182 	}
1183 
1184 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1185 	    sizeof(resp));
1186 	if (err) {
1187 		if_printf(ndev, "Failed to configure vPort RX: %d\n", err);
1188 		goto out;
1189 	}
1190 
1191 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1192 	    sizeof(resp));
1193 	if (err) {
1194 		if_printf(ndev, "vPort RX configuration failed: %d\n", err);
1195 		goto out;
1196 	}
1197 
1198 	if (resp.hdr.status) {
1199 		if_printf(ndev, "vPort RX configuration failed: 0x%x\n",
1200 		    resp.hdr.status);
1201 		err = EPROTO;
1202 	}
1203 
1204 	if_printf(ndev, "Configured steering vPort %ju entries %u\n",
1205 	    apc->port_handle, num_entries);
1206 
1207 out:
1208 	free(req, M_DEVBUF);
1209 	return err;
1210 }
1211 
1212 int
mana_create_wq_obj(struct mana_port_context * apc,mana_handle_t vport,uint32_t wq_type,struct mana_obj_spec * wq_spec,struct mana_obj_spec * cq_spec,mana_handle_t * wq_obj)1213 mana_create_wq_obj(struct mana_port_context *apc,
1214     mana_handle_t vport,
1215     uint32_t wq_type, struct mana_obj_spec *wq_spec,
1216     struct mana_obj_spec *cq_spec,
1217     mana_handle_t *wq_obj)
1218 {
1219 	struct mana_create_wqobj_resp resp = {};
1220 	struct mana_create_wqobj_req req = {};
1221 	if_t ndev = apc->ndev;
1222 	int err;
1223 
1224 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1225 	    sizeof(req), sizeof(resp));
1226 	req.vport = vport;
1227 	req.wq_type = wq_type;
1228 	req.wq_gdma_region = wq_spec->gdma_region;
1229 	req.cq_gdma_region = cq_spec->gdma_region;
1230 	req.wq_size = wq_spec->queue_size;
1231 	req.cq_size = cq_spec->queue_size;
1232 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1233 	req.cq_parent_qid = cq_spec->attached_eq;
1234 
1235 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1236 	    sizeof(resp));
1237 	if (err) {
1238 		if_printf(ndev, "Failed to create WQ object: %d\n", err);
1239 		goto out;
1240 	}
1241 
1242 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1243 	    sizeof(resp));
1244 	if (err || resp.hdr.status) {
1245 		if_printf(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1246 		    resp.hdr.status);
1247 		if (!err)
1248 			err = EPROTO;
1249 		goto out;
1250 	}
1251 
1252 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
1253 		if_printf(ndev, "Got an invalid WQ object handle\n");
1254 		err = EPROTO;
1255 		goto out;
1256 	}
1257 
1258 	*wq_obj = resp.wq_obj;
1259 	wq_spec->queue_index = resp.wq_id;
1260 	cq_spec->queue_index = resp.cq_id;
1261 
1262 	return 0;
1263 out:
1264 	return err;
1265 }
1266 
1267 void
mana_destroy_wq_obj(struct mana_port_context * apc,uint32_t wq_type,mana_handle_t wq_obj)1268 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
1269     mana_handle_t wq_obj)
1270 {
1271 	struct mana_destroy_wqobj_resp resp = {};
1272 	struct mana_destroy_wqobj_req req = {};
1273 	if_t ndev = apc->ndev;
1274 	int err;
1275 
1276 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1277 	    sizeof(req), sizeof(resp));
1278 	req.wq_type = wq_type;
1279 	req.wq_obj_handle = wq_obj;
1280 
1281 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1282 	    sizeof(resp));
1283 	if (err) {
1284 		if_printf(ndev, "Failed to destroy WQ object: %d\n", err);
1285 		return;
1286 	}
1287 
1288 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1289 	    sizeof(resp));
1290 	if (err || resp.hdr.status)
1291 		if_printf(ndev, "Failed to destroy WQ object: %d, 0x%x\n",
1292 		    err, resp.hdr.status);
1293 }
1294 
1295 static void
mana_destroy_eq(struct mana_context * ac)1296 mana_destroy_eq(struct mana_context *ac)
1297 {
1298 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1299 	struct gdma_queue *eq;
1300 	int i;
1301 
1302 	if (!ac->eqs)
1303 		return;
1304 
1305 	for (i = 0; i < gc->max_num_queues; i++) {
1306 		eq = ac->eqs[i].eq;
1307 		if (!eq)
1308 			continue;
1309 
1310 		mana_gd_destroy_queue(gc, eq);
1311 	}
1312 
1313 	free(ac->eqs, M_DEVBUF);
1314 	ac->eqs = NULL;
1315 }
1316 
1317 static int
mana_create_eq(struct mana_context * ac)1318 mana_create_eq(struct mana_context *ac)
1319 {
1320 	struct gdma_dev *gd = ac->gdma_dev;
1321 	struct gdma_context *gc = gd->gdma_context;
1322 	struct gdma_queue_spec spec = {};
1323 	int err;
1324 	int i;
1325 
1326 	ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq),
1327 	    M_DEVBUF, M_WAITOK | M_ZERO);
1328 	if (!ac->eqs)
1329 		return ENOMEM;
1330 
1331 	spec.type = GDMA_EQ;
1332 	spec.monitor_avl_buf = false;
1333 	spec.queue_size = EQ_SIZE;
1334 	spec.eq.callback = NULL;
1335 	spec.eq.context = ac->eqs;
1336 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1337 
1338 	for (i = 0; i < gc->max_num_queues; i++) {
1339 		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1340 		if (err)
1341 			goto out;
1342 	}
1343 
1344 	return 0;
1345 out:
1346 	mana_destroy_eq(ac);
1347 	return err;
1348 }
1349 
1350 static int
mana_fence_rq(struct mana_port_context * apc,struct mana_rxq * rxq)1351 mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1352 {
1353 	struct mana_fence_rq_resp resp = {};
1354 	struct mana_fence_rq_req req = {};
1355 	int err;
1356 
1357 	init_completion(&rxq->fence_event);
1358 
1359 	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1360 	    sizeof(req), sizeof(resp));
1361 	req.wq_obj_handle = rxq->rxobj;
1362 
1363 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1364 	    sizeof(resp));
1365 	if (err) {
1366 		if_printf(apc->ndev, "Failed to fence RQ %u: %d\n",
1367 		    rxq->rxq_idx, err);
1368 		return err;
1369 	}
1370 
1371 	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1372 	if (err || resp.hdr.status) {
1373 		if_printf(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1374 		    rxq->rxq_idx, err, resp.hdr.status);
1375 		if (!err)
1376 			err = EPROTO;
1377 
1378 		return err;
1379 	}
1380 
1381 	if (wait_for_completion_timeout(&rxq->fence_event, 10 * hz)) {
1382 		if_printf(apc->ndev, "Failed to fence RQ %u: timed out\n",
1383 		    rxq->rxq_idx);
1384 		return ETIMEDOUT;
1385         }
1386 
1387 	return 0;
1388 }
1389 
1390 static void
mana_fence_rqs(struct mana_port_context * apc)1391 mana_fence_rqs(struct mana_port_context *apc)
1392 {
1393 	unsigned int rxq_idx;
1394 	struct mana_rxq *rxq;
1395 	int err;
1396 
1397 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1398 		rxq = apc->rxqs[rxq_idx];
1399 		err = mana_fence_rq(apc, rxq);
1400 
1401 		/* In case of any error, use sleep instead. */
1402 		if (err)
1403 			gdma_msleep(100);
1404 	}
1405 }
1406 
1407 static int
mana_move_wq_tail(struct gdma_queue * wq,uint32_t num_units)1408 mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
1409 {
1410 	uint32_t used_space_old;
1411 	uint32_t used_space_new;
1412 
1413 	used_space_old = wq->head - wq->tail;
1414 	used_space_new = wq->head - (wq->tail + num_units);
1415 
1416 	if (used_space_new > used_space_old) {
1417 		mana_err(NULL,
1418 		    "WARNING: new used space %u greater than old one %u\n",
1419 		    used_space_new, used_space_old);
1420 		return ERANGE;
1421 	}
1422 
1423 	wq->tail += num_units;
1424 	return 0;
1425 }
1426 
1427 static void
mana_poll_tx_cq(struct mana_cq * cq)1428 mana_poll_tx_cq(struct mana_cq *cq)
1429 {
1430 	struct gdma_comp *completions = cq->gdma_comp_buf;
1431 	struct gdma_posted_wqe_info *wqe_info;
1432 	struct mana_send_buf_info *tx_info;
1433 	unsigned int pkt_transmitted = 0;
1434 	unsigned int wqe_unit_cnt = 0;
1435 	struct mana_txq *txq = cq->txq;
1436 	struct mana_port_context *apc;
1437 	uint16_t next_to_complete;
1438 	if_t ndev;
1439 	int comp_read;
1440 	int txq_idx = txq->idx;;
1441 	int i;
1442 	int sa_drop = 0;
1443 
1444 	struct gdma_queue *gdma_wq;
1445 	unsigned int avail_space;
1446 	bool txq_full = false;
1447 
1448 	ndev = txq->ndev;
1449 	apc = if_getsoftc(ndev);
1450 
1451 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1452 	    CQE_POLLING_BUFFER);
1453 
1454 	if (comp_read < 1)
1455 		return;
1456 
1457 	next_to_complete = txq->next_to_complete;
1458 
1459 	for (i = 0; i < comp_read; i++) {
1460 		struct mana_tx_comp_oob *cqe_oob;
1461 
1462 		if (!completions[i].is_sq) {
1463 			mana_err(NULL, "WARNING: Not for SQ\n");
1464 			return;
1465 		}
1466 
1467 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1468 		if (cqe_oob->cqe_hdr.client_type !=
1469 				 MANA_CQE_COMPLETION) {
1470 			mana_err(NULL,
1471 			    "WARNING: Invalid CQE client type %u\n",
1472 			    cqe_oob->cqe_hdr.client_type);
1473 			return;
1474 		}
1475 
1476 		switch (cqe_oob->cqe_hdr.cqe_type) {
1477 		case CQE_TX_OKAY:
1478 			break;
1479 
1480 		case CQE_TX_SA_DROP:
1481 		case CQE_TX_MTU_DROP:
1482 		case CQE_TX_INVALID_OOB:
1483 		case CQE_TX_INVALID_ETH_TYPE:
1484 		case CQE_TX_HDR_PROCESSING_ERROR:
1485 		case CQE_TX_VF_DISABLED:
1486 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1487 		case CQE_TX_VPORT_DISABLED:
1488 		case CQE_TX_VLAN_TAGGING_VIOLATION:
1489 			sa_drop ++;
1490 			mana_dbg(NULL,
1491 			    "TX: txq %d CQE error %d, ntc = %d, "
1492 			    "pending sends = %d: err ignored.\n",
1493 			    txq_idx, cqe_oob->cqe_hdr.cqe_type,
1494 			    next_to_complete, txq->pending_sends);
1495 			counter_u64_add(txq->stats.cqe_err, 1);
1496 			break;
1497 
1498 		default:
1499 			/* If the CQE type is unknown, log a debug msg,
1500 			 * and still free the mbuf, etc.
1501 			 */
1502 			mana_dbg(NULL,
1503 			    "ERROR: TX: Unknown CQE type %d\n",
1504 			    cqe_oob->cqe_hdr.cqe_type);
1505 			counter_u64_add(txq->stats.cqe_unknown_type, 1);
1506 			break;
1507 		}
1508 		if (txq->gdma_txq_id != completions[i].wq_num) {
1509 			mana_dbg(NULL,
1510 			    "txq gdma id not match completion wq num: "
1511 			    "%d != %d\n",
1512 			    txq->gdma_txq_id, completions[i].wq_num);
1513 			break;
1514 		}
1515 
1516 		tx_info = &txq->tx_buf_info[next_to_complete];
1517 		if (!tx_info->mbuf) {
1518 			mana_err(NULL,
1519 			    "WARNING: txq %d Empty mbuf on tx_info: %u, "
1520 			    "ntu = %u, pending_sends = %d, "
1521 			    "transmitted = %d, sa_drop = %d, i = %d, comp_read = %d\n",
1522 			    txq_idx, next_to_complete, txq->next_to_use,
1523 			    txq->pending_sends, pkt_transmitted, sa_drop,
1524 			    i, comp_read);
1525 			break;
1526 		}
1527 
1528 		wqe_info = &tx_info->wqe_inf;
1529 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1530 
1531 		mana_tx_unmap_mbuf(apc, tx_info);
1532 		mb();
1533 
1534 		next_to_complete =
1535 		    (next_to_complete + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
1536 
1537 		pkt_transmitted++;
1538 	}
1539 
1540 	txq->next_to_complete = next_to_complete;
1541 
1542 	if (wqe_unit_cnt == 0) {
1543 		mana_err(NULL,
1544 		    "WARNING: TX ring not proceeding!\n");
1545 		return;
1546 	}
1547 
1548 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1549 
1550 	/* Ensure tail updated before checking q stop */
1551 	wmb();
1552 
1553 	gdma_wq = txq->gdma_sq;
1554 	avail_space = mana_gd_wq_avail_space(gdma_wq);
1555 
1556 
1557 	if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL) {
1558 		txq_full = true;
1559 	}
1560 
1561 	/* Ensure checking txq_full before apc->port_is_up. */
1562 	rmb();
1563 
1564 	if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1565 		/* Grab the txq lock and re-test */
1566 		mtx_lock(&txq->txq_mtx);
1567 		avail_space = mana_gd_wq_avail_space(gdma_wq);
1568 
1569 		if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL &&
1570 		    apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1571 			/* Clear the Q full flag */
1572 			if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING,
1573 			    IFF_DRV_OACTIVE);
1574 			counter_u64_add(txq->stats.wakeup, 1);
1575 			if (txq->alt_txq_idx != txq->idx) {
1576 				uint64_t stops = counter_u64_fetch(txq->stats.stop);
1577 				uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup);
1578 				/* Reset alt_txq_idx back if it is not overloaded */
1579 				if (stops < wakeups) {
1580 					txq->alt_txq_idx = txq->idx;
1581 					counter_u64_add(txq->stats.alt_reset, 1);
1582 				}
1583 			}
1584 			rmb();
1585 			/* Schedule a tx enqueue task */
1586 			taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
1587 		}
1588 		mtx_unlock(&txq->txq_mtx);
1589 	}
1590 
1591 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1592 		mana_err(NULL,
1593 		    "WARNING: TX %d pending_sends error: %d\n",
1594 		    txq->idx, txq->pending_sends);
1595 
1596 	cq->work_done = pkt_transmitted;
1597 }
1598 
1599 static void
mana_post_pkt_rxq(struct mana_rxq * rxq)1600 mana_post_pkt_rxq(struct mana_rxq *rxq)
1601 {
1602 	struct mana_recv_buf_oob *recv_buf_oob;
1603 	uint32_t curr_index;
1604 	int err;
1605 
1606 	curr_index = rxq->buf_index++;
1607 	if (rxq->buf_index == rxq->num_rx_buf)
1608 		rxq->buf_index = 0;
1609 
1610 	recv_buf_oob = &rxq->rx_oobs[curr_index];
1611 
1612 	err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1613 	    &recv_buf_oob->wqe_inf);
1614 	if (err) {
1615 		mana_err(NULL, "WARNING: rxq %u post pkt err %d\n",
1616 		    rxq->rxq_idx, err);
1617 		return;
1618 	}
1619 
1620 	if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) {
1621 		mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n",
1622 		    rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu);
1623 	}
1624 }
1625 
1626 static void
mana_rx_mbuf(struct mbuf * mbuf,struct mana_rxcomp_oob * cqe,struct mana_rxq * rxq)1627 mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
1628     struct mana_rxq *rxq)
1629 {
1630 	struct mana_stats *rx_stats = &rxq->stats;
1631 	if_t ndev = rxq->ndev;
1632 	uint32_t pkt_len = cqe->ppi[0].pkt_len;
1633 	uint16_t rxq_idx = rxq->rxq_idx;
1634 	struct mana_port_context *apc;
1635 	bool do_lro = false;
1636 	bool do_if_input;
1637 
1638 	apc = if_getsoftc(ndev);
1639 	rxq->rx_cq.work_done++;
1640 
1641 	if (!mbuf) {
1642 		return;
1643 	}
1644 
1645 	mbuf->m_flags |= M_PKTHDR;
1646 	mbuf->m_pkthdr.len = pkt_len;
1647 	mbuf->m_len = pkt_len;
1648 	mbuf->m_pkthdr.rcvif = ndev;
1649 
1650 	if ((if_getcapenable(ndev) & IFCAP_RXCSUM ||
1651 	    if_getcapenable(ndev) & IFCAP_RXCSUM_IPV6) &&
1652 	    (cqe->rx_iphdr_csum_succeed)) {
1653 		mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1654 		mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1655 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) {
1656 			mbuf->m_pkthdr.csum_flags |=
1657 			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1658 			mbuf->m_pkthdr.csum_data = 0xffff;
1659 
1660 			if (cqe->rx_tcp_csum_succeed)
1661 				do_lro = true;
1662 		}
1663 	}
1664 
1665 	if (cqe->rx_hashtype != 0) {
1666 		mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash;
1667 
1668 		uint16_t hashtype = cqe->rx_hashtype;
1669 		if (hashtype & NDIS_HASH_IPV4_MASK) {
1670 			hashtype &= NDIS_HASH_IPV4_MASK;
1671 			switch (hashtype) {
1672 			case NDIS_HASH_TCP_IPV4:
1673 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
1674 				break;
1675 			case NDIS_HASH_UDP_IPV4:
1676 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
1677 				break;
1678 			default:
1679 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
1680 			}
1681 		} else if (hashtype & NDIS_HASH_IPV6_MASK) {
1682 			hashtype &= NDIS_HASH_IPV6_MASK;
1683 			switch (hashtype) {
1684 			case NDIS_HASH_TCP_IPV6:
1685 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
1686 				break;
1687 			case NDIS_HASH_TCP_IPV6_EX:
1688 				M_HASHTYPE_SET(mbuf,
1689 				    M_HASHTYPE_RSS_TCP_IPV6_EX);
1690 				break;
1691 			case NDIS_HASH_UDP_IPV6:
1692 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
1693 				break;
1694 			case NDIS_HASH_UDP_IPV6_EX:
1695 				M_HASHTYPE_SET(mbuf,
1696 				    M_HASHTYPE_RSS_UDP_IPV6_EX);
1697 				break;
1698 			default:
1699 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
1700 			}
1701 		} else {
1702 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1703 		}
1704 	} else {
1705 		mbuf->m_pkthdr.flowid = rxq_idx;
1706 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1707 	}
1708 
1709 	do_if_input = true;
1710 	if ((if_getcapenable(ndev) & IFCAP_LRO) && do_lro) {
1711 		rxq->lro_tried++;
1712 		if (rxq->lro.lro_cnt != 0 &&
1713 		    tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
1714 			do_if_input = false;
1715 		else
1716 			rxq->lro_failed++;
1717 	}
1718 	if (do_if_input) {
1719 		if_input(ndev, mbuf);
1720 	}
1721 
1722 	counter_enter();
1723 	counter_u64_add_protected(rx_stats->packets, 1);
1724 	counter_u64_add_protected(apc->port_stats.rx_packets, 1);
1725 	counter_u64_add_protected(rx_stats->bytes, pkt_len);
1726 	counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len);
1727 	counter_exit();
1728 }
1729 
1730 static void
mana_process_rx_cqe(struct mana_rxq * rxq,struct mana_cq * cq,struct gdma_comp * cqe)1731 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1732     struct gdma_comp *cqe)
1733 {
1734 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1735 	struct mana_recv_buf_oob *rxbuf_oob;
1736 	if_t ndev = rxq->ndev;
1737 	struct mana_port_context *apc;
1738 	struct mbuf *old_mbuf;
1739 	uint32_t curr, pktlen;
1740 	int err;
1741 
1742 	switch (oob->cqe_hdr.cqe_type) {
1743 	case CQE_RX_OKAY:
1744 		break;
1745 
1746 	case CQE_RX_TRUNCATED:
1747 		apc = if_getsoftc(ndev);
1748 		counter_u64_add(apc->port_stats.rx_drops, 1);
1749 		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1750 		if_printf(ndev, "Dropped a truncated packet\n");
1751 		goto drop;
1752 
1753 	case CQE_RX_COALESCED_4:
1754 		if_printf(ndev, "RX coalescing is unsupported\n");
1755 		return;
1756 
1757 	case CQE_RX_OBJECT_FENCE:
1758 		complete(&rxq->fence_event);
1759 		return;
1760 
1761 	default:
1762 		if_printf(ndev, "Unknown RX CQE type = %d\n",
1763 		    oob->cqe_hdr.cqe_type);
1764 		return;
1765 	}
1766 
1767 	if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1768 		return;
1769 
1770 	pktlen = oob->ppi[0].pkt_len;
1771 
1772 	if (pktlen == 0) {
1773 		/* data packets should never have packetlength of zero */
1774 		if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%jx\n",
1775 		    rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1776 		return;
1777 	}
1778 
1779 	curr = rxq->buf_index;
1780 	rxbuf_oob = &rxq->rx_oobs[curr];
1781 	if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) {
1782 		mana_err(NULL, "WARNING: Rx Incorrect complete "
1783 		    "WQE size %u\n",
1784 		    rxbuf_oob->wqe_inf.wqe_size_in_bu);
1785 	}
1786 
1787 	apc = if_getsoftc(ndev);
1788 
1789 	old_mbuf = rxbuf_oob->mbuf;
1790 
1791 	/* Unload DMA map for the old mbuf */
1792 	mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
1793 
1794 	/* Load a new mbuf to replace the old one */
1795 	err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
1796 	if (err) {
1797 		mana_dbg(NULL,
1798 		    "failed to load rx mbuf, err = %d, packet dropped.\n",
1799 		    err);
1800 		counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
1801 		/*
1802 		 * Failed to load new mbuf, rxbuf_oob->mbuf is still
1803 		 * pointing to the old one. Drop the packet.
1804 		 */
1805 		 old_mbuf = NULL;
1806 		 /* Reload the existing mbuf */
1807 		 mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
1808 	}
1809 
1810 	mana_rx_mbuf(old_mbuf, oob, rxq);
1811 
1812 drop:
1813 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1814 
1815 	mana_post_pkt_rxq(rxq);
1816 }
1817 
1818 static void
mana_poll_rx_cq(struct mana_cq * cq)1819 mana_poll_rx_cq(struct mana_cq *cq)
1820 {
1821 	struct gdma_comp *comp = cq->gdma_comp_buf;
1822 	int comp_read, i;
1823 
1824 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1825 	KASSERT(comp_read <= CQE_POLLING_BUFFER,
1826 	    ("comp_read %d great than buf size %d",
1827 	    comp_read, CQE_POLLING_BUFFER));
1828 
1829 	for (i = 0; i < comp_read; i++) {
1830 		if (comp[i].is_sq == true) {
1831 			mana_err(NULL,
1832 			    "WARNING: CQE not for receive queue\n");
1833 			return;
1834 		}
1835 
1836 		/* verify recv cqe references the right rxq */
1837 		if (comp[i].wq_num != cq->rxq->gdma_id) {
1838 			mana_err(NULL,
1839 			    "WARNING: Received CQE %d  not for "
1840 			    "this receive queue %d\n",
1841 			    comp[i].wq_num,  cq->rxq->gdma_id);
1842 			return;
1843 		}
1844 
1845 		mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1846 	}
1847 
1848 	if (comp_read > 0) {
1849 		struct gdma_context *gc =
1850 		    cq->rxq->gdma_rq->gdma_dev->gdma_context;
1851 
1852 		mana_gd_wq_ring_doorbell(gc, cq->rxq->gdma_rq);
1853 	}
1854 
1855 	tcp_lro_flush_all(&cq->rxq->lro);
1856 }
1857 
1858 static void
mana_cq_handler(void * context,struct gdma_queue * gdma_queue)1859 mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1860 {
1861 	struct mana_cq *cq = context;
1862 	uint8_t arm_bit;
1863 
1864 	KASSERT(cq->gdma_cq == gdma_queue,
1865 	    ("cq do not match %p, %p", cq->gdma_cq, gdma_queue));
1866 
1867 	if (cq->type == MANA_CQ_TYPE_RX) {
1868 		mana_poll_rx_cq(cq);
1869 	} else {
1870 		mana_poll_tx_cq(cq);
1871 	}
1872 
1873 	if (cq->work_done < cq->budget && cq->do_not_ring_db == false)
1874 		arm_bit = SET_ARM_BIT;
1875 	else
1876 		arm_bit = 0;
1877 
1878 	mana_gd_ring_cq(gdma_queue, arm_bit);
1879 }
1880 
1881 #define MANA_POLL_BUDGET	8
1882 #define MANA_RX_BUDGET		256
1883 #define MANA_TX_BUDGET		MAX_SEND_BUFFERS_PER_QUEUE
1884 
1885 static void
mana_poll(void * arg,int pending)1886 mana_poll(void *arg, int pending)
1887 {
1888 	struct mana_cq *cq = arg;
1889 	int i;
1890 
1891 	cq->work_done = 0;
1892 	if (cq->type == MANA_CQ_TYPE_RX) {
1893 		cq->budget = MANA_RX_BUDGET;
1894 	} else {
1895 		cq->budget = MANA_TX_BUDGET;
1896 	}
1897 
1898 	for (i = 0; i < MANA_POLL_BUDGET; i++) {
1899 		/*
1900 		 * If this is the last loop, set the budget big enough
1901 		 * so it will arm the CQ any way.
1902 		 */
1903 		if (i == (MANA_POLL_BUDGET - 1))
1904 			cq->budget = CQE_POLLING_BUFFER + 1;
1905 
1906 		mana_cq_handler(cq, cq->gdma_cq);
1907 
1908 		if (cq->work_done < cq->budget)
1909 			break;
1910 
1911 		cq->work_done = 0;
1912 	}
1913 }
1914 
1915 static void
mana_schedule_task(void * arg,struct gdma_queue * gdma_queue)1916 mana_schedule_task(void *arg, struct gdma_queue *gdma_queue)
1917 {
1918 	struct mana_cq *cq = arg;
1919 
1920 	taskqueue_enqueue(cq->cleanup_tq, &cq->cleanup_task);
1921 }
1922 
1923 static void
mana_deinit_cq(struct mana_port_context * apc,struct mana_cq * cq)1924 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1925 {
1926 	struct gdma_dev *gd = apc->ac->gdma_dev;
1927 
1928 	if (!cq->gdma_cq)
1929 		return;
1930 
1931 	/* Drain cleanup taskqueue */
1932 	if (cq->cleanup_tq) {
1933 		while (taskqueue_cancel(cq->cleanup_tq,
1934 		    &cq->cleanup_task, NULL)) {
1935 			taskqueue_drain(cq->cleanup_tq,
1936 			    &cq->cleanup_task);
1937 		}
1938 
1939 		taskqueue_free(cq->cleanup_tq);
1940 	}
1941 
1942 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1943 }
1944 
1945 static void
mana_deinit_txq(struct mana_port_context * apc,struct mana_txq * txq)1946 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1947 {
1948 	struct gdma_dev *gd = apc->ac->gdma_dev;
1949 	struct mana_send_buf_info *txbuf_info;
1950 	uint32_t pending_sends;
1951 	int i;
1952 
1953 	if (!txq->gdma_sq)
1954 		return;
1955 
1956 	if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) {
1957 		mana_err(NULL,
1958 		    "WARNING: txq pending sends not zero: %u\n",
1959 		    pending_sends);
1960 	}
1961 
1962 	if (txq->next_to_use != txq->next_to_complete) {
1963 		mana_err(NULL,
1964 		    "WARNING: txq buf not completed, "
1965 		    "next use %u, next complete %u\n",
1966 		    txq->next_to_use, txq->next_to_complete);
1967 	}
1968 
1969 	/* Flush buf ring. Grab txq mtx lock */
1970 	if (txq->txq_br) {
1971 		mtx_lock(&txq->txq_mtx);
1972 		drbr_flush(apc->ndev, txq->txq_br);
1973 		mtx_unlock(&txq->txq_mtx);
1974 		buf_ring_free(txq->txq_br, M_DEVBUF);
1975 	}
1976 
1977 	/* Drain taskqueue */
1978 	if (txq->enqueue_tq) {
1979 		while (taskqueue_cancel(txq->enqueue_tq,
1980 		    &txq->enqueue_task, NULL)) {
1981 			taskqueue_drain(txq->enqueue_tq,
1982 			    &txq->enqueue_task);
1983 		}
1984 
1985 		taskqueue_free(txq->enqueue_tq);
1986 	}
1987 
1988 	if (txq->tx_buf_info) {
1989 		/* Free all mbufs which are still in-flight */
1990 		for (i = 0; i < MAX_SEND_BUFFERS_PER_QUEUE; i++) {
1991 			txbuf_info = &txq->tx_buf_info[i];
1992 			if (txbuf_info->mbuf) {
1993 				mana_tx_unmap_mbuf(apc, txbuf_info);
1994 			}
1995 		}
1996 
1997 		free(txq->tx_buf_info, M_DEVBUF);
1998 	}
1999 
2000 	mana_free_counters((counter_u64_t *)&txq->stats,
2001 	    sizeof(txq->stats));
2002 
2003 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
2004 
2005 	mtx_destroy(&txq->txq_mtx);
2006 }
2007 
2008 static void
mana_destroy_txq(struct mana_port_context * apc)2009 mana_destroy_txq(struct mana_port_context *apc)
2010 {
2011 	int i;
2012 
2013 	if (!apc->tx_qp)
2014 		return;
2015 
2016 	for (i = 0; i < apc->num_queues; i++) {
2017 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
2018 
2019 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
2020 
2021 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
2022 	}
2023 
2024 	free(apc->tx_qp, M_DEVBUF);
2025 	apc->tx_qp = NULL;
2026 }
2027 
2028 static int
mana_create_txq(struct mana_port_context * apc,if_t net)2029 mana_create_txq(struct mana_port_context *apc, if_t net)
2030 {
2031 	struct mana_context *ac = apc->ac;
2032 	struct gdma_dev *gd = ac->gdma_dev;
2033 	struct mana_obj_spec wq_spec;
2034 	struct mana_obj_spec cq_spec;
2035 	struct gdma_queue_spec spec;
2036 	struct gdma_context *gc;
2037 	struct mana_txq *txq;
2038 	struct mana_cq *cq;
2039 	uint32_t txq_size;
2040 	uint32_t cq_size;
2041 	int err;
2042 	int i;
2043 
2044 	apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
2045 	    M_DEVBUF, M_WAITOK | M_ZERO);
2046 	if (!apc->tx_qp)
2047 		return ENOMEM;
2048 
2049 	/*  The minimum size of the WQE is 32 bytes, hence
2050 	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
2051 	 *  the SQ can store. This value is then used to size other queues
2052 	 *  to prevent overflow.
2053 	 */
2054 	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
2055 	KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE),
2056 	    ("txq size not page aligned"));
2057 
2058 	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
2059 	cq_size = ALIGN(cq_size, PAGE_SIZE);
2060 
2061 	gc = gd->gdma_context;
2062 
2063 	for (i = 0; i < apc->num_queues; i++) {
2064 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
2065 
2066 		/* Create SQ */
2067 		txq = &apc->tx_qp[i].txq;
2068 
2069 		txq->ndev = net;
2070 		txq->vp_offset = apc->tx_vp_offset;
2071 		txq->idx = i;
2072 		txq->alt_txq_idx = i;
2073 
2074 		memset(&spec, 0, sizeof(spec));
2075 		spec.type = GDMA_SQ;
2076 		spec.monitor_avl_buf = true;
2077 		spec.queue_size = txq_size;
2078 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
2079 		if (err)
2080 			goto out;
2081 
2082 		/* Create SQ's CQ */
2083 		cq = &apc->tx_qp[i].tx_cq;
2084 		cq->type = MANA_CQ_TYPE_TX;
2085 
2086 		cq->txq = txq;
2087 
2088 		memset(&spec, 0, sizeof(spec));
2089 		spec.type = GDMA_CQ;
2090 		spec.monitor_avl_buf = false;
2091 		spec.queue_size = cq_size;
2092 		spec.cq.callback = mana_schedule_task;
2093 		spec.cq.parent_eq = ac->eqs[i].eq;
2094 		spec.cq.context = cq;
2095 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2096 		if (err)
2097 			goto out;
2098 
2099 		memset(&wq_spec, 0, sizeof(wq_spec));
2100 		memset(&cq_spec, 0, sizeof(cq_spec));
2101 
2102 		wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
2103 		wq_spec.queue_size = txq->gdma_sq->queue_size;
2104 
2105 		cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2106 		cq_spec.queue_size = cq->gdma_cq->queue_size;
2107 		cq_spec.modr_ctx_id = 0;
2108 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2109 
2110 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2111 		    &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object);
2112 
2113 		if (err)
2114 			goto out;
2115 
2116 		txq->gdma_sq->id = wq_spec.queue_index;
2117 		cq->gdma_cq->id = cq_spec.queue_index;
2118 
2119 		txq->gdma_sq->mem_info.dma_region_handle =
2120 		    GDMA_INVALID_DMA_REGION;
2121 		cq->gdma_cq->mem_info.dma_region_handle =
2122 		    GDMA_INVALID_DMA_REGION;
2123 
2124 		txq->gdma_txq_id = txq->gdma_sq->id;
2125 
2126 		cq->gdma_id = cq->gdma_cq->id;
2127 
2128 		mana_dbg(NULL,
2129 		    "txq %d, txq gdma id %d, txq cq gdma id %d\n",
2130 		    i, txq->gdma_txq_id, cq->gdma_id);;
2131 
2132 		if (cq->gdma_id >= gc->max_num_cqs) {
2133 			if_printf(net, "CQ id %u too large.\n", cq->gdma_id);
2134 			err = EINVAL;
2135 			goto out;
2136 		}
2137 
2138 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2139 
2140 		/* Initialize tx specific data */
2141 		txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE *
2142 		    sizeof(struct mana_send_buf_info),
2143 		    M_DEVBUF, M_WAITOK | M_ZERO);
2144 		if (unlikely(txq->tx_buf_info == NULL)) {
2145 			if_printf(net,
2146 			    "Failed to allocate tx buf info for SQ %u\n",
2147 			    txq->gdma_sq->id);
2148 			err = ENOMEM;
2149 			goto out;
2150 		}
2151 
2152 
2153 		snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
2154 		    "mana:tx(%d)", i);
2155 		mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF);
2156 
2157 		txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE,
2158 		    M_DEVBUF, M_WAITOK, &txq->txq_mtx);
2159 		if (unlikely(txq->txq_br == NULL)) {
2160 			if_printf(net,
2161 			    "Failed to allocate buf ring for SQ %u\n",
2162 			    txq->gdma_sq->id);
2163 			err = ENOMEM;
2164 			goto out;
2165 		}
2166 
2167 		/* Allocate taskqueue for deferred send */
2168 		TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
2169 		txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque",
2170 		    M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq);
2171 		if (unlikely(txq->enqueue_tq == NULL)) {
2172 			if_printf(net,
2173 			    "Unable to create tx %d enqueue task queue\n", i);
2174 			err = ENOMEM;
2175 			goto out;
2176 		}
2177 		taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET,
2178 		    "mana txq p%u-tx%d", apc->port_idx, i);
2179 
2180 		mana_alloc_counters((counter_u64_t *)&txq->stats,
2181 		    sizeof(txq->stats));
2182 
2183 		/* Allocate and start the cleanup task on CQ */
2184 		cq->do_not_ring_db = false;
2185 
2186 		NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2187 		cq->cleanup_tq =
2188 		    taskqueue_create_fast("mana tx cq cleanup",
2189 		    M_WAITOK, taskqueue_thread_enqueue,
2190 		    &cq->cleanup_tq);
2191 
2192 		if (apc->last_tx_cq_bind_cpu < 0)
2193 			apc->last_tx_cq_bind_cpu = CPU_FIRST();
2194 		cq->cpu = apc->last_tx_cq_bind_cpu;
2195 		apc->last_tx_cq_bind_cpu = CPU_NEXT(apc->last_tx_cq_bind_cpu);
2196 
2197 		if (apc->bind_cleanup_thread_cpu) {
2198 			cpuset_t cpu_mask;
2199 			CPU_SETOF(cq->cpu, &cpu_mask);
2200 			taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2201 			    1, PI_NET, &cpu_mask,
2202 			    "mana cq p%u-tx%u-cpu%d",
2203 			    apc->port_idx, txq->idx, cq->cpu);
2204 		} else {
2205 			taskqueue_start_threads(&cq->cleanup_tq, 1,
2206 			    PI_NET, "mana cq p%u-tx%u",
2207 			    apc->port_idx, txq->idx);
2208 		}
2209 
2210 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2211 	}
2212 
2213 	return 0;
2214 out:
2215 	mana_destroy_txq(apc);
2216 	return err;
2217 }
2218 
2219 static void
mana_destroy_rxq(struct mana_port_context * apc,struct mana_rxq * rxq,bool validate_state)2220 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
2221     bool validate_state)
2222 {
2223 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2224 	struct mana_recv_buf_oob *rx_oob;
2225 	int i;
2226 
2227 	if (!rxq)
2228 		return;
2229 
2230 	if (validate_state) {
2231 		/*
2232 		 * XXX Cancel and drain cleanup task queue here.
2233 		 */
2234 		;
2235 	}
2236 
2237 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2238 
2239 	mana_deinit_cq(apc, &rxq->rx_cq);
2240 
2241 	mana_free_counters((counter_u64_t *)&rxq->stats,
2242 	    sizeof(rxq->stats));
2243 
2244 	/* Free LRO resources */
2245 	tcp_lro_free(&rxq->lro);
2246 
2247 	for (i = 0; i < rxq->num_rx_buf; i++) {
2248 		rx_oob = &rxq->rx_oobs[i];
2249 
2250 		if (rx_oob->mbuf)
2251 			mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
2252 
2253 		bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2254 	}
2255 
2256 	if (rxq->gdma_rq)
2257 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
2258 
2259 	free(rxq, M_DEVBUF);
2260 }
2261 
2262 #define MANA_WQE_HEADER_SIZE 16
2263 #define MANA_WQE_SGE_SIZE 16
2264 
2265 static int
mana_alloc_rx_wqe(struct mana_port_context * apc,struct mana_rxq * rxq,uint32_t * rxq_size,uint32_t * cq_size)2266 mana_alloc_rx_wqe(struct mana_port_context *apc,
2267     struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size)
2268 {
2269 	struct mana_recv_buf_oob *rx_oob;
2270 	uint32_t buf_idx;
2271 	int err;
2272 
2273 	if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) {
2274 		mana_err(NULL,
2275 		    "WARNING: Invalid rxq datasize %u\n", rxq->datasize);
2276 	}
2277 
2278 	*rxq_size = 0;
2279 	*cq_size = 0;
2280 
2281 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2282 		rx_oob = &rxq->rx_oobs[buf_idx];
2283 		memset(rx_oob, 0, sizeof(*rx_oob));
2284 
2285 		err = bus_dmamap_create(apc->rx_buf_tag, 0,
2286 		    &rx_oob->dma_map);
2287 		if (err) {
2288 			mana_err(NULL,
2289 			    "Failed to  create rx DMA map for buf %d\n",
2290 			    buf_idx);
2291 			return err;
2292 		}
2293 
2294 		err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
2295 		if (err) {
2296 			mana_err(NULL,
2297 			    "Failed to  create rx DMA map for buf %d\n",
2298 			    buf_idx);
2299 			bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2300 			return err;
2301 		}
2302 
2303 		rx_oob->wqe_req.sgl = rx_oob->sgl;
2304 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2305 		rx_oob->wqe_req.inline_oob_size = 0;
2306 		rx_oob->wqe_req.inline_oob_data = NULL;
2307 		rx_oob->wqe_req.flags = 0;
2308 		rx_oob->wqe_req.client_data_unit = 0;
2309 
2310 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2311 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2312 		*cq_size += COMP_ENTRY_SIZE;
2313 	}
2314 
2315 	return 0;
2316 }
2317 
2318 static int
mana_push_wqe(struct mana_rxq * rxq)2319 mana_push_wqe(struct mana_rxq *rxq)
2320 {
2321 	struct mana_recv_buf_oob *rx_oob;
2322 	uint32_t buf_idx;
2323 	int err;
2324 
2325 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2326 		rx_oob = &rxq->rx_oobs[buf_idx];
2327 
2328 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2329 		    &rx_oob->wqe_inf);
2330 		if (err)
2331 			return ENOSPC;
2332 	}
2333 
2334 	return 0;
2335 }
2336 
2337 static struct mana_rxq *
mana_create_rxq(struct mana_port_context * apc,uint32_t rxq_idx,struct mana_eq * eq,if_t ndev)2338 mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
2339     struct mana_eq *eq, if_t ndev)
2340 {
2341 	struct gdma_dev *gd = apc->ac->gdma_dev;
2342 	struct mana_obj_spec wq_spec;
2343 	struct mana_obj_spec cq_spec;
2344 	struct gdma_queue_spec spec;
2345 	struct mana_cq *cq = NULL;
2346 	uint32_t cq_size, rq_size;
2347 	struct gdma_context *gc;
2348 	struct mana_rxq *rxq;
2349 	int err;
2350 
2351 	gc = gd->gdma_context;
2352 
2353 	rxq = malloc(sizeof(*rxq) +
2354 	    RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
2355 	    M_DEVBUF, M_WAITOK | M_ZERO);
2356 	if (!rxq)
2357 		return NULL;
2358 
2359 	rxq->ndev = ndev;
2360 	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2361 	rxq->rxq_idx = rxq_idx;
2362 	/*
2363 	 * Minimum size is MCLBYTES(2048) bytes for a mbuf cluster.
2364 	 * Now we just allow maximum size of 4096.
2365 	 */
2366 	rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
2367 	if (rxq->datasize > MAX_FRAME_SIZE)
2368 		rxq->datasize = MAX_FRAME_SIZE;
2369 
2370 	mana_dbg(NULL, "Setting rxq %d datasize %d\n",
2371 	    rxq_idx, rxq->datasize);
2372 
2373 	rxq->rxobj = INVALID_MANA_HANDLE;
2374 
2375 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2376 	if (err)
2377 		goto out;
2378 
2379 	/* Create LRO for the RQ */
2380 	if (if_getcapenable(ndev) & IFCAP_LRO) {
2381 		err = tcp_lro_init(&rxq->lro);
2382 		if (err) {
2383 			if_printf(ndev, "Failed to create LRO for rxq %d\n",
2384 			    rxq_idx);
2385 		} else {
2386 			rxq->lro.ifp = ndev;
2387 		}
2388 	}
2389 
2390 	mana_alloc_counters((counter_u64_t *)&rxq->stats,
2391 	    sizeof(rxq->stats));
2392 
2393 	rq_size = ALIGN(rq_size, PAGE_SIZE);
2394 	cq_size = ALIGN(cq_size, PAGE_SIZE);
2395 
2396 	/* Create RQ */
2397 	memset(&spec, 0, sizeof(spec));
2398 	spec.type = GDMA_RQ;
2399 	spec.monitor_avl_buf = true;
2400 	spec.queue_size = rq_size;
2401 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2402 	if (err)
2403 		goto out;
2404 
2405 	/* Create RQ's CQ */
2406 	cq = &rxq->rx_cq;
2407 	cq->type = MANA_CQ_TYPE_RX;
2408 	cq->rxq = rxq;
2409 
2410 	memset(&spec, 0, sizeof(spec));
2411 	spec.type = GDMA_CQ;
2412 	spec.monitor_avl_buf = false;
2413 	spec.queue_size = cq_size;
2414 	spec.cq.callback = mana_schedule_task;
2415 	spec.cq.parent_eq = eq->eq;
2416 	spec.cq.context = cq;
2417 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2418 	if (err)
2419 		goto out;
2420 
2421 	memset(&wq_spec, 0, sizeof(wq_spec));
2422 	memset(&cq_spec, 0, sizeof(cq_spec));
2423 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2424 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
2425 
2426 	cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2427 	cq_spec.queue_size = cq->gdma_cq->queue_size;
2428 	cq_spec.modr_ctx_id = 0;
2429 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2430 
2431 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2432 	    &wq_spec, &cq_spec, &rxq->rxobj);
2433 	if (err)
2434 		goto out;
2435 
2436 	rxq->gdma_rq->id = wq_spec.queue_index;
2437 	cq->gdma_cq->id = cq_spec.queue_index;
2438 
2439 	rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2440 	cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2441 
2442 	rxq->gdma_id = rxq->gdma_rq->id;
2443 	cq->gdma_id = cq->gdma_cq->id;
2444 
2445 	err = mana_push_wqe(rxq);
2446 	if (err)
2447 		goto out;
2448 
2449 	if (cq->gdma_id >= gc->max_num_cqs) {
2450 		err = EINVAL;
2451 		goto out;
2452 	}
2453 
2454 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2455 
2456 	/* Allocate and start the cleanup task on CQ */
2457 	cq->do_not_ring_db = false;
2458 
2459 	NET_TASK_INIT(&cq->cleanup_task, 0, mana_poll, cq);
2460 	cq->cleanup_tq =
2461 	    taskqueue_create_fast("mana rx cq cleanup",
2462 	    M_WAITOK, taskqueue_thread_enqueue,
2463 	    &cq->cleanup_tq);
2464 
2465 	if (apc->last_rx_cq_bind_cpu < 0)
2466 		apc->last_rx_cq_bind_cpu = CPU_FIRST();
2467 	cq->cpu = apc->last_rx_cq_bind_cpu;
2468 	apc->last_rx_cq_bind_cpu = CPU_NEXT(apc->last_rx_cq_bind_cpu);
2469 
2470 	if (apc->bind_cleanup_thread_cpu) {
2471 		cpuset_t cpu_mask;
2472 		CPU_SETOF(cq->cpu, &cpu_mask);
2473 		taskqueue_start_threads_cpuset(&cq->cleanup_tq,
2474 		    1, PI_NET, &cpu_mask,
2475 		    "mana cq p%u-rx%u-cpu%d",
2476 		    apc->port_idx, rxq->rxq_idx, cq->cpu);
2477 	} else {
2478 		taskqueue_start_threads(&cq->cleanup_tq, 1,
2479 		    PI_NET, "mana cq p%u-rx%u",
2480 		    apc->port_idx, rxq->rxq_idx);
2481 	}
2482 
2483 	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2484 out:
2485 	if (!err)
2486 		return rxq;
2487 
2488 	if_printf(ndev, "Failed to create RXQ: err = %d\n", err);
2489 
2490 	mana_destroy_rxq(apc, rxq, false);
2491 
2492 	if (cq)
2493 		mana_deinit_cq(apc, cq);
2494 
2495 	return NULL;
2496 }
2497 
2498 static int
mana_add_rx_queues(struct mana_port_context * apc,if_t ndev)2499 mana_add_rx_queues(struct mana_port_context *apc, if_t ndev)
2500 {
2501 	struct mana_context *ac = apc->ac;
2502 	struct mana_rxq *rxq;
2503 	int err = 0;
2504 	int i;
2505 
2506 	for (i = 0; i < apc->num_queues; i++) {
2507 		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2508 		if (!rxq) {
2509 			err = ENOMEM;
2510 			goto out;
2511 		}
2512 
2513 		apc->rxqs[i] = rxq;
2514 	}
2515 
2516 	apc->default_rxobj = apc->rxqs[0]->rxobj;
2517 out:
2518 	return err;
2519 }
2520 
2521 static void
mana_destroy_vport(struct mana_port_context * apc)2522 mana_destroy_vport(struct mana_port_context *apc)
2523 {
2524 	struct mana_rxq *rxq;
2525 	uint32_t rxq_idx;
2526 
2527 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2528 		rxq = apc->rxqs[rxq_idx];
2529 		if (!rxq)
2530 			continue;
2531 
2532 		mana_destroy_rxq(apc, rxq, true);
2533 		apc->rxqs[rxq_idx] = NULL;
2534 	}
2535 
2536 	mana_destroy_txq(apc);
2537 
2538 	mana_uncfg_vport(apc);
2539 }
2540 
2541 static int
mana_create_vport(struct mana_port_context * apc,if_t net)2542 mana_create_vport(struct mana_port_context *apc, if_t net)
2543 {
2544 	struct gdma_dev *gd = apc->ac->gdma_dev;
2545 	int err;
2546 
2547 	apc->default_rxobj = INVALID_MANA_HANDLE;
2548 
2549 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2550 	if (err)
2551 		return err;
2552 
2553 	return mana_create_txq(apc, net);
2554 }
2555 
2556 
mana_rss_table_init(struct mana_port_context * apc)2557 static void mana_rss_table_init(struct mana_port_context *apc)
2558 {
2559 	int i;
2560 
2561 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2562 		apc->indir_table[i] = i % apc->num_queues;
2563 }
2564 
mana_config_rss(struct mana_port_context * apc,enum TRI_STATE rx,bool update_hash,bool update_tab)2565 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2566 		    bool update_hash, bool update_tab)
2567 {
2568 	uint32_t queue_idx;
2569 	int err;
2570 	int i;
2571 
2572 	if (update_tab) {
2573 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2574 			queue_idx = apc->indir_table[i];
2575 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2576 		}
2577 	}
2578 
2579 	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2580 	if (err)
2581 		return err;
2582 
2583 	mana_fence_rqs(apc);
2584 
2585 	return 0;
2586 }
2587 
2588 static int
mana_init_port(if_t ndev)2589 mana_init_port(if_t ndev)
2590 {
2591 	struct mana_port_context *apc = if_getsoftc(ndev);
2592 	uint32_t max_txq, max_rxq, max_queues;
2593 	int port_idx = apc->port_idx;
2594 	uint32_t num_indirect_entries;
2595 	int err;
2596 
2597 	err = mana_init_port_context(apc);
2598 	if (err)
2599 		return err;
2600 
2601 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2602 	    &num_indirect_entries);
2603 	if (err) {
2604 		if_printf(ndev, "Failed to query info for vPort %d\n",
2605 		    port_idx);
2606 		goto reset_apc;
2607 	}
2608 
2609 	max_queues = min_t(uint32_t, max_txq, max_rxq);
2610 	if (apc->max_queues > max_queues)
2611 		apc->max_queues = max_queues;
2612 
2613 	if (apc->num_queues > apc->max_queues)
2614 		apc->num_queues = apc->max_queues;
2615 
2616 	return 0;
2617 
2618 reset_apc:
2619 	bus_dma_tag_destroy(apc->rx_buf_tag);
2620 	apc->rx_buf_tag = NULL;
2621 	free(apc->rxqs, M_DEVBUF);
2622 	apc->rxqs = NULL;
2623 	return err;
2624 }
2625 
2626 int
mana_alloc_queues(if_t ndev)2627 mana_alloc_queues(if_t ndev)
2628 {
2629 	struct mana_port_context *apc = if_getsoftc(ndev);
2630 	int err;
2631 
2632 	err = mana_create_vport(apc, ndev);
2633 	if (err)
2634 		return err;
2635 
2636 	err = mana_add_rx_queues(apc, ndev);
2637 	if (err)
2638 		goto destroy_vport;
2639 
2640 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2641 
2642 	mana_rss_table_init(apc);
2643 
2644 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2645 	if (err)
2646 		goto destroy_vport;
2647 
2648 	return 0;
2649 
2650 destroy_vport:
2651 	mana_destroy_vport(apc);
2652 	return err;
2653 }
2654 
2655 static int
mana_up(struct mana_port_context * apc)2656 mana_up(struct mana_port_context *apc)
2657 {
2658 	int err;
2659 
2660 	mana_dbg(NULL, "mana_up called\n");
2661 
2662 	err = mana_alloc_queues(apc->ndev);
2663 	if (err) {
2664 		mana_err(NULL, "Faile alloc mana queues: %d\n", err);
2665 		return err;
2666 	}
2667 
2668 	/* Add queue specific sysctl */
2669 	mana_sysctl_add_queues(apc);
2670 
2671 	apc->port_is_up = true;
2672 
2673 	/* Ensure port state updated before txq state */
2674 	wmb();
2675 
2676 	if_link_state_change(apc->ndev, LINK_STATE_UP);
2677 	if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2678 
2679 	return 0;
2680 }
2681 
2682 
2683 static void
mana_init(void * arg)2684 mana_init(void *arg)
2685 {
2686 	struct mana_port_context *apc = (struct mana_port_context *)arg;
2687 
2688 	MANA_APC_LOCK_LOCK(apc);
2689 	if (!apc->port_is_up) {
2690 		mana_up(apc);
2691 	}
2692 	MANA_APC_LOCK_UNLOCK(apc);
2693 }
2694 
2695 static int
mana_dealloc_queues(if_t ndev)2696 mana_dealloc_queues(if_t ndev)
2697 {
2698 	struct mana_port_context *apc = if_getsoftc(ndev);
2699 	struct mana_txq *txq;
2700 	int i, err;
2701 
2702 	if (apc->port_is_up)
2703 		return EINVAL;
2704 
2705 	/* No packet can be transmitted now since apc->port_is_up is false.
2706 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2707 	 * a txq because it may not timely see apc->port_is_up being cleared
2708 	 * to false, but it doesn't matter since mana_start_xmit() drops any
2709 	 * new packets due to apc->port_is_up being false.
2710 	 *
2711 	 * Drain all the in-flight TX packets
2712 	 */
2713 	for (i = 0; i < apc->num_queues; i++) {
2714 		txq = &apc->tx_qp[i].txq;
2715 
2716 		struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq;
2717 		struct mana_cq *rx_cq = &(apc->rxqs[i]->rx_cq);
2718 
2719 		tx_cq->do_not_ring_db = true;
2720 		rx_cq->do_not_ring_db = true;
2721 
2722 		/* Schedule a cleanup task */
2723 		taskqueue_enqueue(tx_cq->cleanup_tq, &tx_cq->cleanup_task);
2724 
2725 		while (atomic_read(&txq->pending_sends) > 0)
2726 			usleep_range(1000, 2000);
2727 	}
2728 
2729 	/* We're 100% sure the queues can no longer be woken up, because
2730 	 * we're sure now mana_poll_tx_cq() can't be running.
2731 	 */
2732 
2733 	apc->rss_state = TRI_STATE_FALSE;
2734 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2735 	if (err) {
2736 		if_printf(ndev, "Failed to disable vPort: %d\n", err);
2737 		return err;
2738 	}
2739 
2740 	mana_destroy_vport(apc);
2741 
2742 	return 0;
2743 }
2744 
2745 static int
mana_down(struct mana_port_context * apc)2746 mana_down(struct mana_port_context *apc)
2747 {
2748 	int err = 0;
2749 
2750 	apc->port_st_save = apc->port_is_up;
2751 	apc->port_is_up = false;
2752 
2753 	/* Ensure port state updated before txq state */
2754 	wmb();
2755 
2756 	if (apc->port_st_save) {
2757 		if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE,
2758 		    IFF_DRV_RUNNING);
2759 		if_link_state_change(apc->ndev, LINK_STATE_DOWN);
2760 
2761 		mana_sysctl_free_queues(apc);
2762 
2763 		err = mana_dealloc_queues(apc->ndev);
2764 		if (err) {
2765 			if_printf(apc->ndev,
2766 			    "Failed to bring down mana interface: %d\n", err);
2767 		}
2768 	}
2769 
2770 	return err;
2771 }
2772 
2773 int
mana_detach(if_t ndev)2774 mana_detach(if_t ndev)
2775 {
2776 	struct mana_port_context *apc = if_getsoftc(ndev);
2777 	int err;
2778 
2779 	ether_ifdetach(ndev);
2780 
2781 	if (!apc)
2782 		return 0;
2783 
2784 	MANA_APC_LOCK_LOCK(apc);
2785 	err = mana_down(apc);
2786 	MANA_APC_LOCK_UNLOCK(apc);
2787 
2788 	mana_cleanup_port_context(apc);
2789 
2790 	MANA_APC_LOCK_DESTROY(apc);
2791 
2792 	free(apc, M_DEVBUF);
2793 
2794 	return err;
2795 }
2796 
2797 static int
mana_probe_port(struct mana_context * ac,int port_idx,if_t * ndev_storage)2798 mana_probe_port(struct mana_context *ac, int port_idx,
2799     if_t *ndev_storage)
2800 {
2801 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2802 	struct mana_port_context *apc;
2803 	uint32_t hwassist;
2804 	if_t ndev;
2805 	int err;
2806 
2807 	ndev = if_alloc_dev(IFT_ETHER, gc->dev);
2808 	if (!ndev) {
2809 		mana_err(NULL, "Failed to allocate ifnet struct\n");
2810 		return ENOMEM;
2811 	}
2812 
2813 	*ndev_storage = ndev;
2814 
2815 	apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
2816 	if (!apc) {
2817 		mana_err(NULL, "Failed to allocate port context\n");
2818 		err = ENOMEM;
2819 		goto free_net;
2820 	}
2821 
2822 	apc->ac = ac;
2823 	apc->ndev = ndev;
2824 	apc->max_queues = gc->max_num_queues;
2825 	apc->num_queues = min_t(unsigned int,
2826 	    gc->max_num_queues, MANA_MAX_NUM_QUEUES);
2827 	apc->port_handle = INVALID_MANA_HANDLE;
2828 	apc->port_idx = port_idx;
2829 	apc->frame_size = DEFAULT_FRAME_SIZE;
2830 	apc->last_tx_cq_bind_cpu = -1;
2831 	apc->last_rx_cq_bind_cpu = -1;
2832 	apc->vport_use_count = 0;
2833 
2834 	MANA_APC_LOCK_INIT(apc);
2835 
2836 	if_initname(ndev, device_get_name(gc->dev), port_idx);
2837 	if_setdev(ndev,gc->dev);
2838 	if_setsoftc(ndev, apc);
2839 
2840 	if_setflags(ndev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2841 	if_setinitfn(ndev, mana_init);
2842 	if_settransmitfn(ndev, mana_start_xmit);
2843 	if_setqflushfn(ndev, mana_qflush);
2844 	if_setioctlfn(ndev, mana_ioctl);
2845 	if_setgetcounterfn(ndev, mana_get_counter);
2846 
2847 	if_setmtu(ndev, ETHERMTU);
2848 	if_setbaudrate(ndev, IF_Gbps(100));
2849 
2850 	mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2851 
2852 	err = mana_init_port(ndev);
2853 	if (err)
2854 		goto reset_apc;
2855 
2856 	if_setcapabilitiesbit(ndev,
2857 	    IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 |
2858 	    IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 |
2859 	    IFCAP_TSO4 | IFCAP_TSO6 |
2860 	    IFCAP_LRO | IFCAP_LINKSTATE, 0);
2861 
2862 	/* Enable all available capabilities by default. */
2863 	if_setcapenable(ndev, if_getcapabilities(ndev));
2864 
2865 	/* TSO parameters */
2866 	if_sethwtsomax(ndev, MANA_TSO_MAX_SZ -
2867 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2868 	if_sethwtsomaxsegcount(ndev, MAX_MBUF_FRAGS);
2869 	if_sethwtsomaxsegsize(ndev, PAGE_SIZE);
2870 
2871 	hwassist = 0;
2872 	if (if_getcapenable(ndev) & (IFCAP_TSO4 | IFCAP_TSO6))
2873 		hwassist |= CSUM_TSO;
2874 	if (if_getcapenable(ndev) & IFCAP_TXCSUM)
2875 		hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
2876 	if (if_getcapenable(ndev) & IFCAP_TXCSUM_IPV6)
2877 		hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
2878 	mana_dbg(NULL, "set hwassist 0x%x\n", hwassist);
2879 	if_sethwassist(ndev, hwassist);
2880 
2881 	ifmedia_init(&apc->media, IFM_IMASK,
2882 	    mana_ifmedia_change, mana_ifmedia_status);
2883 	ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2884 	ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO);
2885 
2886 	ether_ifattach(ndev, apc->mac_addr);
2887 
2888 	/* Initialize statistics */
2889 	mana_alloc_counters((counter_u64_t *)&apc->port_stats,
2890 	    sizeof(struct mana_port_stats));
2891 	mana_sysctl_add_port(apc);
2892 
2893 	/* Tell the stack that the interface is not active */
2894 	if_setdrvflagbits(ndev, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2895 
2896 	return 0;
2897 
2898 reset_apc:
2899 	free(apc, M_DEVBUF);
2900 free_net:
2901 	*ndev_storage = NULL;
2902 	if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2903 	if_free(ndev);
2904 	return err;
2905 }
2906 
mana_probe(struct gdma_dev * gd)2907 int mana_probe(struct gdma_dev *gd)
2908 {
2909 	struct gdma_context *gc = gd->gdma_context;
2910 	device_t dev = gc->dev;
2911 	struct mana_context *ac;
2912 	int err;
2913 	int i;
2914 
2915 	device_printf(dev, "%s protocol version: %d.%d.%d\n", DEVICE_NAME,
2916 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2917 
2918 	err = mana_gd_register_device(gd);
2919 	if (err)
2920 		return err;
2921 
2922 	ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
2923 	if (!ac)
2924 		return ENOMEM;
2925 
2926 	ac->gdma_dev = gd;
2927 	ac->num_ports = 1;
2928 	gd->driver_data = ac;
2929 
2930 	err = mana_create_eq(ac);
2931 	if (err)
2932 		goto out;
2933 
2934 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2935 	    MANA_MICRO_VERSION, &ac->num_ports);
2936 	if (err)
2937 		goto out;
2938 
2939 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2940 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2941 
2942 	for (i = 0; i < ac->num_ports; i++) {
2943 		err = mana_probe_port(ac, i, &ac->ports[i]);
2944 		if (err) {
2945 			device_printf(dev,
2946 			    "Failed to probe mana port %d\n", i);
2947 			break;
2948 		}
2949 	}
2950 
2951 out:
2952 	if (err)
2953 		mana_remove(gd);
2954 
2955 	return err;
2956 }
2957 
2958 void
mana_remove(struct gdma_dev * gd)2959 mana_remove(struct gdma_dev *gd)
2960 {
2961 	struct gdma_context *gc = gd->gdma_context;
2962 	struct mana_context *ac = gd->driver_data;
2963 	device_t dev = gc->dev;
2964 	if_t ndev;
2965 	int i;
2966 
2967 	for (i = 0; i < ac->num_ports; i++) {
2968 		ndev = ac->ports[i];
2969 		if (!ndev) {
2970 			if (i == 0)
2971 				device_printf(dev, "No net device to remove\n");
2972 			goto out;
2973 		}
2974 
2975 		mana_detach(ndev);
2976 
2977 		if_free(ndev);
2978 	}
2979 
2980 	mana_destroy_eq(ac);
2981 
2982 out:
2983 	mana_gd_deregister_device(gd);
2984 	gd->driver_data = NULL;
2985 	gd->gdma_context = NULL;
2986 	free(ac, M_DEVBUF);
2987 }
2988