xref: /freebsd/sys/dev/mana/mana_en.c (revision 10ff414c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Microsoft Corp.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/smp.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/time.h>
44 #include <sys/eventhandler.h>
45 
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <machine/in_cksum.h>
49 
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_types.h>
53 #include <net/if_vlan_var.h>
54 #ifdef RSS
55 #include <net/rss_config.h>
56 #endif
57 
58 #include <netinet/in_systm.h>
59 #include <netinet/in.h>
60 #include <netinet/if_ether.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
63 #include <netinet/tcp.h>
64 #include <netinet/udp.h>
65 
66 #include "mana.h"
67 #include "mana_sysctl.h"
68 
69 static int mana_up(struct mana_port_context *apc);
70 static int mana_down(struct mana_port_context *apc);
71 
72 static void
73 mana_rss_key_fill(void *k, size_t size)
74 {
75 	static bool rss_key_generated = false;
76 	static uint8_t rss_key[MANA_HASH_KEY_SIZE];
77 
78 	KASSERT(size <= MANA_HASH_KEY_SIZE,
79 	    ("Request more buytes than MANA RSS key can hold"));
80 
81 	if (!rss_key_generated) {
82 		arc4random_buf(rss_key, MANA_HASH_KEY_SIZE);
83 		rss_key_generated = true;
84 	}
85 	memcpy(k, rss_key, size);
86 }
87 
88 static int
89 mana_ifmedia_change(struct ifnet *ifp __unused)
90 {
91 	return EOPNOTSUPP;
92 }
93 
94 static void
95 mana_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
96 {
97 	struct mana_port_context *apc = if_getsoftc(ifp);
98 
99 	if (!apc) {
100 		if_printf(ifp, "Port not available\n");
101 		return;
102 	}
103 
104 	MANA_APC_LOCK_LOCK(apc);
105 
106 	ifmr->ifm_status = IFM_AVALID;
107 	ifmr->ifm_active = IFM_ETHER;
108 
109 	if (!apc->port_is_up) {
110 		MANA_APC_LOCK_UNLOCK(apc);
111 		mana_info(NULL, "Port %u link is down\n", apc->port_idx);
112 		return;
113 	}
114 
115 	ifmr->ifm_status |= IFM_ACTIVE;
116 	ifmr->ifm_active |= IFM_100G_DR | IFM_FDX;
117 
118 	MANA_APC_LOCK_UNLOCK(apc);
119 }
120 
121 static uint64_t
122 mana_get_counter(struct ifnet *ifp, ift_counter cnt)
123 {
124 	struct mana_port_context *apc = if_getsoftc(ifp);
125 	struct mana_port_stats *stats = &apc->port_stats;
126 
127 	switch (cnt) {
128 	case IFCOUNTER_IPACKETS:
129 		return (counter_u64_fetch(stats->rx_packets));
130 	case IFCOUNTER_OPACKETS:
131 		return (counter_u64_fetch(stats->tx_packets));
132 	case IFCOUNTER_IBYTES:
133 		return (counter_u64_fetch(stats->rx_bytes));
134 	case IFCOUNTER_OBYTES:
135 		return (counter_u64_fetch(stats->tx_bytes));
136 	case IFCOUNTER_IQDROPS:
137 		return (counter_u64_fetch(stats->rx_drops));
138 	case IFCOUNTER_OQDROPS:
139 		return (counter_u64_fetch(stats->tx_drops));
140 	default:
141 		return (if_get_counter_default(ifp, cnt));
142 	}
143 }
144 
145 static void
146 mana_drain_eq_task(struct gdma_queue *queue)
147 {
148 	if (!queue || !queue->eq.cleanup_tq)
149 		return;
150 
151 	while (taskqueue_cancel(queue->eq.cleanup_tq,
152 	    &queue->eq.cleanup_task, NULL)) {
153 		taskqueue_drain(queue->eq.cleanup_tq,
154 		    &queue->eq.cleanup_task);
155 	}
156 }
157 
158 static void
159 mana_qflush(struct ifnet *ifp)
160 {
161 	if_qflush(ifp);
162 }
163 
164 int
165 mana_restart(struct mana_port_context *apc)
166 {
167 	int rc = 0;
168 
169 	MANA_APC_LOCK_LOCK(apc);
170 	if (apc->port_is_up)
171 		 mana_down(apc);
172 
173 	rc = mana_up(apc);
174 	MANA_APC_LOCK_UNLOCK(apc);
175 
176 	return (rc);
177 }
178 
179 static int
180 mana_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
181 {
182 	struct mana_port_context *apc = if_getsoftc(ifp);
183 	struct ifrsskey *ifrk;
184 	struct ifrsshash *ifrh;
185 	struct ifreq *ifr;
186 	uint16_t new_mtu;
187 	int rc = 0;
188 
189 	switch (command) {
190 	case SIOCSIFMTU:
191 		ifr = (struct ifreq *)data;
192 		new_mtu = ifr->ifr_mtu;
193 		if (ifp->if_mtu == new_mtu)
194 			break;
195 		if ((new_mtu + 18 > MAX_FRAME_SIZE) ||
196 		    (new_mtu + 18 < MIN_FRAME_SIZE)) {
197 			if_printf(ifp, "Invalid MTU. new_mtu: %d, "
198 			    "max allowed: %d, min allowed: %d\n",
199 			    new_mtu, MAX_FRAME_SIZE - 18, MIN_FRAME_SIZE - 18);
200 			return EINVAL;
201 		}
202 		MANA_APC_LOCK_LOCK(apc);
203 		if (apc->port_is_up)
204 			mana_down(apc);
205 
206 		apc->frame_size = new_mtu + 18;
207 		if_setmtu(ifp, new_mtu);
208 		mana_dbg(NULL, "Set MTU to %d\n", new_mtu);
209 
210 		rc = mana_up(apc);
211 		MANA_APC_LOCK_UNLOCK(apc);
212 		break;
213 
214 	case SIOCSIFFLAGS:
215 		if (ifp->if_flags & IFF_UP) {
216 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
217 				MANA_APC_LOCK_LOCK(apc);
218 				if (!apc->port_is_up)
219 					rc = mana_up(apc);
220 				MANA_APC_LOCK_UNLOCK(apc);
221 			}
222 		} else {
223 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
224 				MANA_APC_LOCK_LOCK(apc);
225 				if (apc->port_is_up)
226 					mana_down(apc);
227 				MANA_APC_LOCK_UNLOCK(apc);
228 			}
229 		}
230 		break;
231 
232 	case SIOCSIFMEDIA:
233 	case SIOCGIFMEDIA:
234 	case SIOCGIFXMEDIA:
235 		ifr = (struct ifreq *)data;
236 		rc = ifmedia_ioctl(ifp, ifr, &apc->media, command);
237 		break;
238 
239 	case SIOCGIFRSSKEY:
240 		ifrk = (struct ifrsskey *)data;
241 		ifrk->ifrk_func = RSS_FUNC_TOEPLITZ;
242 		ifrk->ifrk_keylen = MANA_HASH_KEY_SIZE;
243 		memcpy(ifrk->ifrk_key, apc->hashkey, MANA_HASH_KEY_SIZE);
244 		break;
245 
246 	case SIOCGIFRSSHASH:
247 		ifrh = (struct ifrsshash *)data;
248 		ifrh->ifrh_func = RSS_FUNC_TOEPLITZ;
249 		ifrh->ifrh_types =
250 		    RSS_TYPE_TCP_IPV4 |
251 		    RSS_TYPE_UDP_IPV4 |
252 		    RSS_TYPE_TCP_IPV6 |
253 		    RSS_TYPE_UDP_IPV6;
254 		break;
255 
256 	default:
257 		rc = ether_ioctl(ifp, command, data);
258 		break;
259 	}
260 
261 	return (rc);
262 }
263 
264 static inline void
265 mana_alloc_counters(counter_u64_t *begin, int size)
266 {
267 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
268 
269 	for (; begin < end; ++begin)
270 		*begin = counter_u64_alloc(M_WAITOK);
271 }
272 
273 static inline void
274 mana_free_counters(counter_u64_t *begin, int size)
275 {
276 	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
277 
278 	for (; begin < end; ++begin)
279 		counter_u64_free(*begin);
280 }
281 
282 static bool
283 mana_can_tx(struct gdma_queue *wq)
284 {
285 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
286 }
287 
288 static inline int
289 mana_tx_map_mbuf(struct mana_port_context *apc,
290     struct mana_send_buf_info *tx_info,
291     struct mbuf **m_head, struct mana_tx_package *tp,
292     struct mana_stats *tx_stats)
293 {
294 	struct gdma_dev *gd = apc->ac->gdma_dev;
295 	bus_dma_segment_t segs[MAX_MBUF_FRAGS];
296 	struct mbuf *m = *m_head;
297 	int err, nsegs, i;
298 
299 	err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag, tx_info->dma_map,
300 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
301 	if (err == EFBIG) {
302 		struct mbuf *m_new;
303 
304 		counter_u64_add(tx_stats->collapse, 1);
305 		m_new = m_collapse(m, M_NOWAIT, MAX_MBUF_FRAGS);
306 		if (unlikely(m_new == NULL)) {
307 			counter_u64_add(tx_stats->collapse_err, 1);
308 			return ENOBUFS;
309 		} else {
310 			*m_head = m = m_new;
311 		}
312 
313 		mana_warn(NULL,
314 		    "Too many segs in orig mbuf, m_collapse called\n");
315 
316 		err = bus_dmamap_load_mbuf_sg(apc->tx_buf_tag,
317 		    tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT);
318 	}
319 	if (!err) {
320 		for (i = 0; i < nsegs; i++) {
321 			tp->wqe_req.sgl[i].address = segs[i].ds_addr;
322 			tp->wqe_req.sgl[i].mem_key = gd->gpa_mkey;
323 			tp->wqe_req.sgl[i].size = segs[i].ds_len;
324 		}
325 		tp->wqe_req.num_sge = nsegs;
326 
327 		tx_info->mbuf = *m_head;
328 
329 		bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
330 		    BUS_DMASYNC_PREWRITE);
331 	}
332 
333 	return err;
334 }
335 
336 static inline void
337 mana_tx_unmap_mbuf(struct mana_port_context *apc,
338     struct mana_send_buf_info *tx_info)
339 {
340 	bus_dmamap_sync(apc->tx_buf_tag, tx_info->dma_map,
341 	    BUS_DMASYNC_POSTWRITE);
342 	bus_dmamap_unload(apc->tx_buf_tag, tx_info->dma_map);
343 	if (tx_info->mbuf) {
344 		m_freem(tx_info->mbuf);
345 		tx_info->mbuf = NULL;
346 	}
347 }
348 
349 static inline int
350 mana_load_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
351     struct mana_recv_buf_oob *rx_oob, bool alloc_mbuf)
352 {
353 	bus_dma_segment_t segs[1];
354 	struct mbuf *mbuf;
355 	int nsegs, err;
356 	uint32_t mlen;
357 
358 	if (alloc_mbuf) {
359 		mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rxq->datasize);
360 		if (unlikely(mbuf == NULL)) {
361 			mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
362 			if (unlikely(mbuf == NULL)) {
363 				return ENOMEM;
364 			}
365 			mlen = MCLBYTES;
366 		} else {
367 			mlen = rxq->datasize;
368 		}
369 
370 		mbuf->m_pkthdr.len = mbuf->m_len = mlen;
371 	} else {
372 		if (rx_oob->mbuf) {
373 			mbuf = rx_oob->mbuf;
374 			mlen = rx_oob->mbuf->m_pkthdr.len;
375 		} else {
376 			return ENOMEM;
377 		}
378 	}
379 
380 	err = bus_dmamap_load_mbuf_sg(apc->rx_buf_tag, rx_oob->dma_map,
381 	    mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
382 
383 	if (unlikely((err != 0) || (nsegs != 1))) {
384 		mana_warn(NULL, "Failed to map mbuf, error: %d, "
385 		    "nsegs: %d\n", err, nsegs);
386 		counter_u64_add(rxq->stats.dma_mapping_err, 1);
387 		goto error;
388 	}
389 
390 	bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
391 	    BUS_DMASYNC_PREREAD);
392 
393 	rx_oob->mbuf = mbuf;
394 	rx_oob->num_sge = 1;
395 	rx_oob->sgl[0].address = segs[0].ds_addr;
396 	rx_oob->sgl[0].size = mlen;
397 	rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
398 
399 	return 0;
400 
401 error:
402 	m_freem(mbuf);
403 	return EFAULT;
404 }
405 
406 static inline void
407 mana_unload_rx_mbuf(struct mana_port_context *apc, struct mana_rxq *rxq,
408     struct mana_recv_buf_oob *rx_oob, bool free_mbuf)
409 {
410 	bus_dmamap_sync(apc->rx_buf_tag, rx_oob->dma_map,
411 	    BUS_DMASYNC_POSTREAD);
412 	bus_dmamap_unload(apc->rx_buf_tag, rx_oob->dma_map);
413 
414 	if (free_mbuf && rx_oob->mbuf) {
415 		m_freem(rx_oob->mbuf);
416 		rx_oob->mbuf = NULL;
417 	}
418 }
419 
420 
421 /* Use couple mbuf PH_loc spaces for l3 and l4 protocal type */
422 #define MANA_L3_PROTO(_mbuf)	((_mbuf)->m_pkthdr.PH_loc.sixteen[0])
423 #define MANA_L4_PROTO(_mbuf)	((_mbuf)->m_pkthdr.PH_loc.sixteen[1])
424 
425 #define MANA_TXQ_FULL	(IFF_DRV_RUNNING | IFF_DRV_OACTIVE)
426 
427 static void
428 mana_xmit(struct mana_txq *txq)
429 {
430 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
431 	struct mana_send_buf_info *tx_info;
432 	struct ifnet *ndev = txq->ndev;
433 	struct mbuf *mbuf;
434 	struct mana_port_context *apc = if_getsoftc(ndev);
435 	struct mana_port_stats *port_stats = &apc->port_stats;
436 	struct gdma_dev *gd = apc->ac->gdma_dev;
437 	uint64_t packets, bytes;
438 	uint16_t next_to_use;
439 	struct mana_tx_package pkg = {};
440 	struct mana_stats *tx_stats;
441 	struct gdma_queue *gdma_sq;
442 	struct gdma_queue *gdma_eq;
443 	struct mana_cq *cq;
444 	int err, len;
445 
446 	gdma_sq = txq->gdma_sq;
447 	cq = &apc->tx_qp[txq->idx].tx_cq;
448 	gdma_eq = cq->gdma_cq->cq.parent;
449 	tx_stats = &txq->stats;
450 
451 	packets = 0;
452 	bytes = 0;
453 	next_to_use = txq->next_to_use;
454 
455 	while ((mbuf = drbr_peek(ndev, txq->txq_br)) != NULL) {
456 		if (!apc->port_is_up ||
457 		    (if_getdrvflags(ndev) & MANA_TXQ_FULL) != IFF_DRV_RUNNING) {
458 			drbr_putback(ndev, txq->txq_br, mbuf);
459 			break;
460 		}
461 
462 		if (!mana_can_tx(gdma_sq)) {
463 			/* SQ is full. Set the IFF_DRV_OACTIVE flag */
464 			if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE, 0);
465 			counter_u64_add(tx_stats->stop, 1);
466 			uint64_t stops = counter_u64_fetch(tx_stats->stop);
467 			uint64_t wakeups = counter_u64_fetch(tx_stats->wakeup);
468 #define MANA_TXQ_STOP_THRESHOLD		50
469 			if (stops > MANA_TXQ_STOP_THRESHOLD && wakeups > 0 &&
470 			    stops > wakeups && txq->alt_txq_idx == txq->idx) {
471 				txq->alt_txq_idx =
472 				    (txq->idx + (stops / wakeups))
473 				    % apc->num_queues;
474 				counter_u64_add(tx_stats->alt_chg, 1);
475 			}
476 
477 			drbr_putback(ndev, txq->txq_br, mbuf);
478 
479 			taskqueue_enqueue(gdma_eq->eq.cleanup_tq,
480 			    &gdma_eq->eq.cleanup_task);
481 			break;
482 		}
483 
484 		tx_info = &txq->tx_buf_info[next_to_use];
485 
486 		memset(&pkg, 0, sizeof(struct mana_tx_package));
487 		pkg.wqe_req.sgl = pkg.sgl_array;
488 
489 		err = mana_tx_map_mbuf(apc, tx_info, &mbuf, &pkg, tx_stats);
490 		if (unlikely(err)) {
491 			mana_dbg(NULL,
492 			    "Failed to map tx mbuf, err %d\n", err);
493 
494 			counter_u64_add(tx_stats->dma_mapping_err, 1);
495 
496 			/* The mbuf is still there. Free it */
497 			m_freem(mbuf);
498 			/* Advance the drbr queue */
499 			drbr_advance(ndev, txq->txq_br);
500 			continue;
501 		}
502 
503 		pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
504 		pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
505 
506 		if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
507 			pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
508 			pkt_fmt = MANA_LONG_PKT_FMT;
509 		} else {
510 			pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
511 		}
512 
513 		pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
514 
515 		if (pkt_fmt == MANA_SHORT_PKT_FMT)
516 			pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
517 		else
518 			pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
519 
520 		pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
521 		pkg.wqe_req.flags = 0;
522 		pkg.wqe_req.client_data_unit = 0;
523 
524 		if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) {
525 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
526 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
527 			else
528 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
529 
530 			pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
531 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
532 			pkg.tx_oob.s_oob.trans_off = mbuf->m_pkthdr.l3hlen;
533 
534 			pkg.wqe_req.client_data_unit = mbuf->m_pkthdr.tso_segsz;
535 			pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
536 		} else if (mbuf->m_pkthdr.csum_flags &
537 		    (CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP6_UDP | CSUM_IP6_TCP)) {
538 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP) {
539 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
540 				pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
541 			} else {
542 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
543 			}
544 
545 			if (MANA_L4_PROTO(mbuf) == IPPROTO_TCP) {
546 				pkg.tx_oob.s_oob.comp_tcp_csum = 1;
547 				pkg.tx_oob.s_oob.trans_off =
548 				    mbuf->m_pkthdr.l3hlen;
549 			} else {
550 				pkg.tx_oob.s_oob.comp_udp_csum = 1;
551 			}
552 		} else if (mbuf->m_pkthdr.csum_flags & CSUM_IP) {
553 			pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
554 			pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
555 		} else {
556 			if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IP)
557 				pkg.tx_oob.s_oob.is_outer_ipv4 = 1;
558 			else if (MANA_L3_PROTO(mbuf) == ETHERTYPE_IPV6)
559 				pkg.tx_oob.s_oob.is_outer_ipv6 = 1;
560 		}
561 
562 		len = mbuf->m_pkthdr.len;
563 
564 		err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
565 		    (struct gdma_posted_wqe_info *)&tx_info->wqe_inf);
566 		if (unlikely(err)) {
567 			/* Should not happen */
568 			if_printf(ndev, "Failed to post TX OOB: %d\n", err);
569 
570 			mana_tx_unmap_mbuf(apc, tx_info);
571 
572 			drbr_advance(ndev, txq->txq_br);
573 			continue;
574 		}
575 
576 		next_to_use =
577 		    (next_to_use + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
578 
579 		(void)atomic_inc_return(&txq->pending_sends);
580 
581 		drbr_advance(ndev, txq->txq_br);
582 
583 		mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
584 
585 		packets++;
586 		bytes += len;
587 	}
588 
589 	counter_enter();
590 	counter_u64_add_protected(tx_stats->packets, packets);
591 	counter_u64_add_protected(port_stats->tx_packets, packets);
592 	counter_u64_add_protected(tx_stats->bytes, bytes);
593 	counter_u64_add_protected(port_stats->tx_bytes, bytes);
594 	counter_exit();
595 
596 	txq->next_to_use = next_to_use;
597 }
598 
599 static void
600 mana_xmit_taskfunc(void *arg, int pending)
601 {
602 	struct mana_txq *txq = (struct mana_txq *)arg;
603 	struct ifnet *ndev = txq->ndev;
604 	struct mana_port_context *apc = if_getsoftc(ndev);
605 
606 	while (!drbr_empty(ndev, txq->txq_br) && apc->port_is_up &&
607 	    (if_getdrvflags(ndev) & MANA_TXQ_FULL) == IFF_DRV_RUNNING) {
608 		mtx_lock(&txq->txq_mtx);
609 		mana_xmit(txq);
610 		mtx_unlock(&txq->txq_mtx);
611 	}
612 }
613 
614 #define PULLUP_HDR(m, len)				\
615 do {							\
616 	if (unlikely((m)->m_len < (len))) {		\
617 		(m) = m_pullup((m), (len));		\
618 		if ((m) == NULL)			\
619 			return (NULL);			\
620 	}						\
621 } while (0)
622 
623 /*
624  * If this function failed, the mbuf would be freed.
625  */
626 static inline struct mbuf *
627 mana_tso_fixup(struct mbuf *mbuf)
628 {
629 	struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
630 	struct tcphdr *th;
631 	uint16_t etype;
632 	int ehlen;
633 
634 	if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
635 		etype = ntohs(eh->evl_proto);
636 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
637 	} else {
638 		etype = ntohs(eh->evl_encap_proto);
639 		ehlen = ETHER_HDR_LEN;
640 	}
641 
642 	if (etype == ETHERTYPE_IP) {
643 		struct ip *ip;
644 		int iphlen;
645 
646 		PULLUP_HDR(mbuf, ehlen + sizeof(*ip));
647 		ip = mtodo(mbuf, ehlen);
648 		iphlen = ip->ip_hl << 2;
649 		mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
650 
651 		PULLUP_HDR(mbuf, ehlen + iphlen + sizeof(*th));
652 		th = mtodo(mbuf, ehlen + iphlen);
653 
654 		ip->ip_len = 0;
655 		ip->ip_sum = 0;
656 		th->th_sum = in_pseudo(ip->ip_src.s_addr,
657 		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
658 	} else if (etype == ETHERTYPE_IPV6) {
659 		struct ip6_hdr *ip6;
660 
661 		PULLUP_HDR(mbuf, ehlen + sizeof(*ip6) + sizeof(*th));
662 		ip6 = mtodo(mbuf, ehlen);
663 		if (ip6->ip6_nxt != IPPROTO_TCP) {
664 			/* Realy something wrong, just return */
665 			mana_dbg(NULL, "TSO mbuf not TCP, freed.\n");
666 			m_freem(mbuf);
667 			return NULL;
668 		}
669 		mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
670 
671 		th = mtodo(mbuf, ehlen + sizeof(*ip6));
672 
673 		ip6->ip6_plen = 0;
674 		th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
675 	} else {
676 		/* CSUM_TSO is set but not IP protocol. */
677 		mana_warn(NULL, "TSO mbuf not right, freed.\n");
678 		m_freem(mbuf);
679 		return NULL;
680 	}
681 
682 	MANA_L3_PROTO(mbuf) = etype;
683 
684 	return (mbuf);
685 }
686 
687 /*
688  * If this function failed, the mbuf would be freed.
689  */
690 static inline struct mbuf *
691 mana_mbuf_csum_check(struct mbuf *mbuf)
692 {
693 	struct ether_vlan_header *eh = mtod(mbuf, struct ether_vlan_header *);
694 	struct mbuf *mbuf_next;
695 	uint16_t etype;
696 	int offset;
697 	int ehlen;
698 
699 	if (eh->evl_encap_proto == ntohs(ETHERTYPE_VLAN)) {
700 		etype = ntohs(eh->evl_proto);
701 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
702 	} else {
703 		etype = ntohs(eh->evl_encap_proto);
704 		ehlen = ETHER_HDR_LEN;
705 	}
706 
707 	mbuf_next = m_getptr(mbuf, ehlen, &offset);
708 
709 	MANA_L4_PROTO(mbuf) = 0;
710 	if (etype == ETHERTYPE_IP) {
711 		const struct ip *ip;
712 		int iphlen;
713 
714 		ip = (struct ip *)(mtodo(mbuf_next, offset));
715 		iphlen = ip->ip_hl << 2;
716 		mbuf->m_pkthdr.l3hlen = ehlen + iphlen;
717 
718 		MANA_L4_PROTO(mbuf) = ip->ip_p;
719 	} else if (etype == ETHERTYPE_IPV6) {
720 		const struct ip6_hdr *ip6;
721 
722 		ip6 = (struct ip6_hdr *)(mtodo(mbuf_next, offset));
723 		mbuf->m_pkthdr.l3hlen = ehlen + sizeof(*ip6);
724 
725 		MANA_L4_PROTO(mbuf) = ip6->ip6_nxt;
726 	} else {
727 		MANA_L4_PROTO(mbuf) = 0;
728 	}
729 
730 	MANA_L3_PROTO(mbuf) = etype;
731 
732 	return (mbuf);
733 }
734 
735 static int
736 mana_start_xmit(struct ifnet *ifp, struct mbuf *m)
737 {
738 	struct mana_port_context *apc = if_getsoftc(ifp);
739 	struct mana_txq *txq;
740 	int is_drbr_empty;
741 	uint16_t txq_id;
742 	int err;
743 
744 	if (unlikely((!apc->port_is_up) ||
745 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
746 		return ENODEV;
747 
748 	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
749 		m = mana_tso_fixup(m);
750 		if (unlikely(m == NULL)) {
751 			counter_enter();
752 			counter_u64_add_protected(apc->port_stats.tx_drops, 1);
753 			counter_exit();
754 			return EIO;
755 		}
756 	} else {
757 		m = mana_mbuf_csum_check(m);
758 		if (unlikely(m == NULL)) {
759 			counter_enter();
760 			counter_u64_add_protected(apc->port_stats.tx_drops, 1);
761 			counter_exit();
762 			return EIO;
763 		}
764 	}
765 
766 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
767 		uint32_t hash = m->m_pkthdr.flowid;
768 		txq_id = apc->indir_table[(hash) & MANA_INDIRECT_TABLE_MASK] %
769 		    apc->num_queues;
770 	} else {
771 		txq_id = m->m_pkthdr.flowid % apc->num_queues;
772 	}
773 
774 	if (apc->enable_tx_altq)
775 		txq_id = apc->tx_qp[txq_id].txq.alt_txq_idx;
776 
777 	txq = &apc->tx_qp[txq_id].txq;
778 
779 	is_drbr_empty = drbr_empty(ifp, txq->txq_br);
780 	err = drbr_enqueue(ifp, txq->txq_br, m);
781 	if (unlikely(err)) {
782 		mana_warn(NULL, "txq %u failed to enqueue: %d\n",
783 		    txq_id, err);
784 		taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
785 		return err;
786 	}
787 
788 	if (is_drbr_empty && mtx_trylock(&txq->txq_mtx)) {
789 		mana_xmit(txq);
790 		mtx_unlock(&txq->txq_mtx);
791 	} else {
792 		taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
793 	}
794 
795 	return 0;
796 }
797 
798 static void
799 mana_cleanup_port_context(struct mana_port_context *apc)
800 {
801 	bus_dma_tag_destroy(apc->tx_buf_tag);
802 	bus_dma_tag_destroy(apc->rx_buf_tag);
803 	apc->rx_buf_tag = NULL;
804 
805 	free(apc->rxqs, M_DEVBUF);
806 	apc->rxqs = NULL;
807 
808 	mana_free_counters((counter_u64_t *)&apc->port_stats,
809 	    sizeof(struct mana_port_stats));
810 }
811 
812 static int
813 mana_init_port_context(struct mana_port_context *apc)
814 {
815 	device_t dev = apc->ac->gdma_dev->gdma_context->dev;
816 	uint32_t tso_maxsize;
817 	int err;
818 
819 	tso_maxsize = MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ -
820 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
821 
822 	/* Create DMA tag for tx bufs */
823 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
824 	    1, 0,			/* alignment, boundary	*/
825 	    BUS_SPACE_MAXADDR,		/* lowaddr		*/
826 	    BUS_SPACE_MAXADDR,		/* highaddr		*/
827 	    NULL, NULL,			/* filter, filterarg	*/
828 	    tso_maxsize,		/* maxsize		*/
829 	    MAX_MBUF_FRAGS,		/* nsegments		*/
830 	    tso_maxsize,		/* maxsegsize		*/
831 	    0,				/* flags		*/
832 	    NULL, NULL,			/* lockfunc, lockfuncarg*/
833 	    &apc->tx_buf_tag);
834 	if (unlikely(err)) {
835 		device_printf(dev, "Feiled to create TX DMA tag\n");
836 		return err;
837 	}
838 
839 	/* Create DMA tag for rx bufs */
840 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
841 	    64, 0,			/* alignment, boundary	*/
842 	    BUS_SPACE_MAXADDR,		/* lowaddr		*/
843 	    BUS_SPACE_MAXADDR,		/* highaddr		*/
844 	    NULL, NULL,			/* filter, filterarg	*/
845 	    MJUMPAGESIZE,		/* maxsize		*/
846 	    1,				/* nsegments		*/
847 	    MJUMPAGESIZE,		/* maxsegsize		*/
848 	    0,				/* flags		*/
849 	    NULL, NULL,			/* lockfunc, lockfuncarg*/
850 	    &apc->rx_buf_tag);
851 	if (unlikely(err)) {
852 		device_printf(dev, "Feiled to create RX DMA tag\n");
853 		return err;
854 	}
855 
856 	apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
857 	    M_DEVBUF, M_WAITOK | M_ZERO);
858 
859 	if (!apc->rxqs) {
860 		bus_dma_tag_destroy(apc->tx_buf_tag);
861 		bus_dma_tag_destroy(apc->rx_buf_tag);
862 		apc->rx_buf_tag = NULL;
863 		return ENOMEM;
864 	}
865 
866 	return 0;
867 }
868 
869 static int
870 mana_send_request(struct mana_context *ac, void *in_buf,
871     uint32_t in_len, void *out_buf, uint32_t out_len)
872 {
873 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
874 	struct gdma_resp_hdr *resp = out_buf;
875 	struct gdma_req_hdr *req = in_buf;
876 	device_t dev = gc->dev;
877 	static atomic_t activity_id;
878 	int err;
879 
880 	req->dev_id = gc->mana.dev_id;
881 	req->activity_id = atomic_inc_return(&activity_id);
882 
883 	mana_dbg(NULL, "activity_id  = %u\n", activity_id);
884 
885 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
886 	    out_buf);
887 	if (err || resp->status) {
888 		device_printf(dev, "Failed to send mana message: %d, 0x%x\n",
889 			err, resp->status);
890 		return err ? err : EPROTO;
891 	}
892 
893 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
894 	    req->activity_id != resp->activity_id) {
895 		device_printf(dev,
896 		    "Unexpected mana message response: %x,%x,%x,%x\n",
897 		    req->dev_id.as_uint32, resp->dev_id.as_uint32,
898 		    req->activity_id, resp->activity_id);
899 		return EPROTO;
900 	}
901 
902 	return 0;
903 }
904 
905 static int
906 mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
907     const enum mana_command_code expected_code,
908     const uint32_t min_size)
909 {
910 	if (resp_hdr->response.msg_type != expected_code)
911 		return EPROTO;
912 
913 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
914 		return EPROTO;
915 
916 	if (resp_hdr->response.msg_size < min_size)
917 		return EPROTO;
918 
919 	return 0;
920 }
921 
922 static int
923 mana_query_device_cfg(struct mana_context *ac, uint32_t proto_major_ver,
924     uint32_t proto_minor_ver, uint32_t proto_micro_ver,
925     uint16_t *max_num_vports)
926 {
927 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
928 	struct mana_query_device_cfg_resp resp = {};
929 	struct mana_query_device_cfg_req req = {};
930 	device_t dev = gc->dev;
931 	int err = 0;
932 
933 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
934 	    sizeof(req), sizeof(resp));
935 	req.proto_major_ver = proto_major_ver;
936 	req.proto_minor_ver = proto_minor_ver;
937 	req.proto_micro_ver = proto_micro_ver;
938 
939 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
940 	if (err) {
941 		device_printf(dev, "Failed to query config: %d", err);
942 		return err;
943 	}
944 
945 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
946 	    sizeof(resp));
947 	if (err || resp.hdr.status) {
948 		device_printf(dev, "Invalid query result: %d, 0x%x\n", err,
949 		    resp.hdr.status);
950 		if (!err)
951 			err = EPROTO;
952 		return err;
953 	}
954 
955 	*max_num_vports = resp.max_num_vports;
956 
957 	mana_dbg(NULL, "mana max_num_vports from device = %d\n",
958 	    *max_num_vports);
959 
960 	return 0;
961 }
962 
963 static int
964 mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
965     uint32_t *max_sq, uint32_t *max_rq, uint32_t *num_indir_entry)
966 {
967 	struct mana_query_vport_cfg_resp resp = {};
968 	struct mana_query_vport_cfg_req req = {};
969 	int err;
970 
971 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
972 	    sizeof(req), sizeof(resp));
973 
974 	req.vport_index = vport_index;
975 
976 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
977 	    sizeof(resp));
978 	if (err)
979 		return err;
980 
981 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
982 	    sizeof(resp));
983 	if (err)
984 		return err;
985 
986 	if (resp.hdr.status)
987 		return EPROTO;
988 
989 	*max_sq = resp.max_num_sq;
990 	*max_rq = resp.max_num_rq;
991 	*num_indir_entry = resp.num_indirection_ent;
992 
993 	apc->port_handle = resp.vport;
994 	memcpy(apc->mac_addr, resp.mac_addr, ETHER_ADDR_LEN);
995 
996 	return 0;
997 }
998 
999 static int
1000 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
1001     uint32_t doorbell_pg_id)
1002 {
1003 	struct mana_config_vport_resp resp = {};
1004 	struct mana_config_vport_req req = {};
1005 	int err;
1006 
1007 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1008 	    sizeof(req), sizeof(resp));
1009 	req.vport = apc->port_handle;
1010 	req.pdid = protection_dom_id;
1011 	req.doorbell_pageid = doorbell_pg_id;
1012 
1013 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1014 	    sizeof(resp));
1015 	if (err) {
1016 		if_printf(apc->ndev, "Failed to configure vPort: %d\n", err);
1017 		goto out;
1018 	}
1019 
1020 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1021 	    sizeof(resp));
1022 	if (err || resp.hdr.status) {
1023 		if_printf(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1024 		    err, resp.hdr.status);
1025 		if (!err)
1026 			err = EPROTO;
1027 
1028 		goto out;
1029 	}
1030 
1031 	apc->tx_shortform_allowed = resp.short_form_allowed;
1032 	apc->tx_vp_offset = resp.tx_vport_offset;
1033 out:
1034 	return err;
1035 }
1036 
1037 static int
1038 mana_cfg_vport_steering(struct mana_port_context *apc,
1039     enum TRI_STATE rx,
1040     bool update_default_rxobj, bool update_key,
1041     bool update_tab)
1042 {
1043 	uint16_t num_entries = MANA_INDIRECT_TABLE_SIZE;
1044 	struct mana_cfg_rx_steer_req *req = NULL;
1045 	struct mana_cfg_rx_steer_resp resp = {};
1046 	struct ifnet *ndev = apc->ndev;
1047 	mana_handle_t *req_indir_tab;
1048 	uint32_t req_buf_size;
1049 	int err;
1050 
1051 	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1052 	req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
1053 	if (!req)
1054 		return ENOMEM;
1055 
1056 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1057 	    sizeof(resp));
1058 
1059 	req->vport = apc->port_handle;
1060 	req->num_indir_entries = num_entries;
1061 	req->indir_tab_offset = sizeof(*req);
1062 	req->rx_enable = rx;
1063 	req->rss_enable = apc->rss_state;
1064 	req->update_default_rxobj = update_default_rxobj;
1065 	req->update_hashkey = update_key;
1066 	req->update_indir_tab = update_tab;
1067 	req->default_rxobj = apc->default_rxobj;
1068 
1069 	if (update_key)
1070 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1071 
1072 	if (update_tab) {
1073 		req_indir_tab = (mana_handle_t *)(req + 1);
1074 		memcpy(req_indir_tab, apc->rxobj_table,
1075 		       req->num_indir_entries * sizeof(mana_handle_t));
1076 	}
1077 
1078 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1079 	    sizeof(resp));
1080 	if (err) {
1081 		if_printf(ndev, "Failed to configure vPort RX: %d\n", err);
1082 		goto out;
1083 	}
1084 
1085 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1086 	    sizeof(resp));
1087 	if (err) {
1088 		if_printf(ndev, "vPort RX configuration failed: %d\n", err);
1089 		goto out;
1090 	}
1091 
1092 	if (resp.hdr.status) {
1093 		if_printf(ndev, "vPort RX configuration failed: 0x%x\n",
1094 		    resp.hdr.status);
1095 		err = EPROTO;
1096 	}
1097 out:
1098 	free(req, M_DEVBUF);
1099 	return err;
1100 }
1101 
1102 static int
1103 mana_create_wq_obj(struct mana_port_context *apc,
1104     mana_handle_t vport,
1105     uint32_t wq_type, struct mana_obj_spec *wq_spec,
1106     struct mana_obj_spec *cq_spec,
1107     mana_handle_t *wq_obj)
1108 {
1109 	struct mana_create_wqobj_resp resp = {};
1110 	struct mana_create_wqobj_req req = {};
1111 	struct ifnet *ndev = apc->ndev;
1112 	int err;
1113 
1114 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1115 	    sizeof(req), sizeof(resp));
1116 	req.vport = vport;
1117 	req.wq_type = wq_type;
1118 	req.wq_gdma_region = wq_spec->gdma_region;
1119 	req.cq_gdma_region = cq_spec->gdma_region;
1120 	req.wq_size = wq_spec->queue_size;
1121 	req.cq_size = cq_spec->queue_size;
1122 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1123 	req.cq_parent_qid = cq_spec->attached_eq;
1124 
1125 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1126 	    sizeof(resp));
1127 	if (err) {
1128 		if_printf(ndev, "Failed to create WQ object: %d\n", err);
1129 		goto out;
1130 	}
1131 
1132 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1133 	    sizeof(resp));
1134 	if (err || resp.hdr.status) {
1135 		if_printf(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1136 		    resp.hdr.status);
1137 		if (!err)
1138 			err = EPROTO;
1139 		goto out;
1140 	}
1141 
1142 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
1143 		if_printf(ndev, "Got an invalid WQ object handle\n");
1144 		err = EPROTO;
1145 		goto out;
1146 	}
1147 
1148 	*wq_obj = resp.wq_obj;
1149 	wq_spec->queue_index = resp.wq_id;
1150 	cq_spec->queue_index = resp.cq_id;
1151 
1152 	return 0;
1153 out:
1154 	return err;
1155 }
1156 
1157 static void
1158 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
1159     mana_handle_t wq_obj)
1160 {
1161 	struct mana_destroy_wqobj_resp resp = {};
1162 	struct mana_destroy_wqobj_req req = {};
1163 	struct ifnet *ndev = apc->ndev;
1164 	int err;
1165 
1166 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1167 	    sizeof(req), sizeof(resp));
1168 	req.wq_type = wq_type;
1169 	req.wq_obj_handle = wq_obj;
1170 
1171 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1172 	    sizeof(resp));
1173 	if (err) {
1174 		if_printf(ndev, "Failed to destroy WQ object: %d\n", err);
1175 		return;
1176 	}
1177 
1178 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1179 	    sizeof(resp));
1180 	if (err || resp.hdr.status)
1181 		if_printf(ndev, "Failed to destroy WQ object: %d, 0x%x\n",
1182 		    err, resp.hdr.status);
1183 }
1184 
1185 static void
1186 mana_init_cqe_poll_buf(struct gdma_comp *cqe_poll_buf)
1187 {
1188 	int i;
1189 
1190 	for (i = 0; i < CQE_POLLING_BUFFER; i++)
1191 		memset(&cqe_poll_buf[i], 0, sizeof(struct gdma_comp));
1192 }
1193 
1194 static void
1195 mana_destroy_eq(struct gdma_context *gc, struct mana_port_context *apc)
1196 {
1197 	struct gdma_queue *eq;
1198 	int i;
1199 
1200 	if (!apc->eqs)
1201 		return;
1202 
1203 	for (i = 0; i < apc->num_queues; i++) {
1204 		eq = apc->eqs[i].eq;
1205 		if (!eq)
1206 			continue;
1207 
1208 		mana_gd_destroy_queue(gc, eq);
1209 	}
1210 
1211 	free(apc->eqs, M_DEVBUF);
1212 	apc->eqs = NULL;
1213 }
1214 
1215 static int
1216 mana_create_eq(struct mana_port_context *apc)
1217 {
1218 	struct gdma_dev *gd = apc->ac->gdma_dev;
1219 	struct gdma_queue_spec spec = {};
1220 	int err;
1221 	int i;
1222 
1223 	apc->eqs = mallocarray(apc->num_queues, sizeof(struct mana_eq),
1224 	    M_DEVBUF, M_WAITOK | M_ZERO);
1225 	if (!apc->eqs)
1226 		return ENOMEM;
1227 
1228 	spec.type = GDMA_EQ;
1229 	spec.monitor_avl_buf = false;
1230 	spec.queue_size = EQ_SIZE;
1231 	spec.eq.callback = NULL;
1232 	spec.eq.context = apc->eqs;
1233 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1234 	spec.eq.ndev = apc->ndev;
1235 
1236 	for (i = 0; i < apc->num_queues; i++) {
1237 		mana_init_cqe_poll_buf(apc->eqs[i].cqe_poll);
1238 
1239 		err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq);
1240 		if (err)
1241 			goto out;
1242 	}
1243 
1244 	return 0;
1245 out:
1246 	mana_destroy_eq(gd->gdma_context, apc);
1247 	return err;
1248 }
1249 
1250 static int
1251 mana_move_wq_tail(struct gdma_queue *wq, uint32_t num_units)
1252 {
1253 	uint32_t used_space_old;
1254 	uint32_t used_space_new;
1255 
1256 	used_space_old = wq->head - wq->tail;
1257 	used_space_new = wq->head - (wq->tail + num_units);
1258 
1259 	if (used_space_new > used_space_old) {
1260 		mana_err(NULL,
1261 		    "WARNING: new used space %u greater than old one %u\n",
1262 		    used_space_new, used_space_old);
1263 		return ERANGE;
1264 	}
1265 
1266 	wq->tail += num_units;
1267 	return 0;
1268 }
1269 
1270 static void
1271 mana_poll_tx_cq(struct mana_cq *cq)
1272 {
1273 	struct gdma_comp *completions = cq->gdma_comp_buf;
1274 	struct gdma_posted_wqe_info *wqe_info;
1275 	struct mana_send_buf_info *tx_info;
1276 	unsigned int pkt_transmitted = 0;
1277 	unsigned int wqe_unit_cnt = 0;
1278 	struct mana_txq *txq = cq->txq;
1279 	struct mana_port_context *apc;
1280 	uint16_t next_to_complete;
1281 	struct ifnet *ndev;
1282 	int comp_read;
1283 	int txq_idx = txq->idx;;
1284 	int i;
1285 	int sa_drop = 0;
1286 
1287 	struct gdma_queue *gdma_wq;
1288 	unsigned int avail_space;
1289 	bool txq_full = false;
1290 
1291 	ndev = txq->ndev;
1292 	apc = if_getsoftc(ndev);
1293 
1294 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1295 	    CQE_POLLING_BUFFER);
1296 
1297 	next_to_complete = txq->next_to_complete;
1298 
1299 	for (i = 0; i < comp_read; i++) {
1300 		struct mana_tx_comp_oob *cqe_oob;
1301 
1302 		if (!completions[i].is_sq) {
1303 			mana_err(NULL, "WARNING: Not for SQ\n");
1304 			return;
1305 		}
1306 
1307 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1308 		if (cqe_oob->cqe_hdr.client_type !=
1309 				 MANA_CQE_COMPLETION) {
1310 			mana_err(NULL,
1311 			    "WARNING: Invalid CQE client type %u\n",
1312 			    cqe_oob->cqe_hdr.client_type);
1313 			return;
1314 		}
1315 
1316 		switch (cqe_oob->cqe_hdr.cqe_type) {
1317 		case CQE_TX_OKAY:
1318 			break;
1319 
1320 		case CQE_TX_SA_DROP:
1321 		case CQE_TX_MTU_DROP:
1322 		case CQE_TX_INVALID_OOB:
1323 		case CQE_TX_INVALID_ETH_TYPE:
1324 		case CQE_TX_HDR_PROCESSING_ERROR:
1325 		case CQE_TX_VF_DISABLED:
1326 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1327 		case CQE_TX_VPORT_DISABLED:
1328 		case CQE_TX_VLAN_TAGGING_VIOLATION:
1329 			sa_drop ++;
1330 			mana_err(NULL,
1331 			    "TX: txq %d CQE error %d, ntc = %d, "
1332 			    "pending sends = %d: err ignored.\n",
1333 			    txq_idx, cqe_oob->cqe_hdr.cqe_type,
1334 			    next_to_complete, txq->pending_sends);
1335 			break;
1336 
1337 		default:
1338 			/* If the CQE type is unexpected, log an error,
1339 			 * and go through the error path.
1340 			 */
1341 			mana_err(NULL,
1342 			    "ERROR: TX: Unexpected CQE type %d: HW BUG?\n",
1343 			    cqe_oob->cqe_hdr.cqe_type);
1344 			return;
1345 		}
1346 		if (txq->gdma_txq_id != completions[i].wq_num) {
1347 			mana_dbg(NULL,
1348 			    "txq gdma id not match completion wq num: "
1349 			    "%d != %d\n",
1350 			    txq->gdma_txq_id, completions[i].wq_num);
1351 			break;
1352 		}
1353 
1354 		tx_info = &txq->tx_buf_info[next_to_complete];
1355 		if (!tx_info->mbuf) {
1356 			mana_err(NULL,
1357 			    "WARNING: txq %d Empty mbuf on tx_info: %u, "
1358 			    "ntu = %u, pending_sends = %d, "
1359 			    "transmitted = %d, sa_drop = %d, i = %d, comp_read = %d\n",
1360 			    txq_idx, next_to_complete, txq->next_to_use,
1361 			    txq->pending_sends, pkt_transmitted, sa_drop,
1362 			    i, comp_read);
1363 			continue;
1364 		}
1365 
1366 		wqe_info = &tx_info->wqe_inf;
1367 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1368 
1369 		mana_tx_unmap_mbuf(apc, tx_info);
1370 		mb();
1371 
1372 		next_to_complete =
1373 		    (next_to_complete + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
1374 
1375 		pkt_transmitted++;
1376 	}
1377 
1378 	txq->next_to_complete = next_to_complete;
1379 
1380 	if (wqe_unit_cnt == 0) {
1381 		mana_err(NULL,
1382 		    "WARNING: TX ring not proceeding!\n");
1383 		return;
1384 	}
1385 
1386 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1387 
1388 	/* Ensure tail updated before checking q stop */
1389 	wmb();
1390 
1391 	gdma_wq = txq->gdma_sq;
1392 	avail_space = mana_gd_wq_avail_space(gdma_wq);
1393 
1394 
1395 	if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL) {
1396 		txq_full = true;
1397 	}
1398 
1399 	/* Ensure checking txq_full before apc->port_is_up. */
1400 	rmb();
1401 
1402 	if (txq_full && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1403 		/* Grab the txq lock and re-test */
1404 		mtx_lock(&txq->txq_mtx);
1405 		avail_space = mana_gd_wq_avail_space(gdma_wq);
1406 
1407 		if ((if_getdrvflags(ndev) & MANA_TXQ_FULL) == MANA_TXQ_FULL &&
1408 		    apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1409 			/* Clear the Q full flag */
1410 			if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING,
1411 			    IFF_DRV_OACTIVE);
1412 			counter_u64_add(txq->stats.wakeup, 1);
1413 			if (txq->alt_txq_idx != txq->idx) {
1414 				uint64_t stops = counter_u64_fetch(txq->stats.stop);
1415 				uint64_t wakeups = counter_u64_fetch(txq->stats.wakeup);
1416 				/* Reset alt_txq_idx back if it is not overloaded */
1417 				if (stops < wakeups) {
1418 					txq->alt_txq_idx = txq->idx;
1419 					counter_u64_add(txq->stats.alt_reset, 1);
1420 				}
1421 			}
1422 			rmb();
1423 			/* Schedule a tx enqueue task */
1424 			taskqueue_enqueue(txq->enqueue_tq, &txq->enqueue_task);
1425 		}
1426 		mtx_unlock(&txq->txq_mtx);
1427 	}
1428 
1429 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1430 		mana_err(NULL,
1431 		    "WARNING: TX %d pending_sends error: %d\n",
1432 		    txq->idx, txq->pending_sends);
1433 }
1434 
1435 static void
1436 mana_post_pkt_rxq(struct mana_rxq *rxq)
1437 {
1438 	struct mana_recv_buf_oob *recv_buf_oob;
1439 	uint32_t curr_index;
1440 	int err;
1441 
1442 	curr_index = rxq->buf_index++;
1443 	if (rxq->buf_index == rxq->num_rx_buf)
1444 		rxq->buf_index = 0;
1445 
1446 	recv_buf_oob = &rxq->rx_oobs[curr_index];
1447 
1448 	err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1449 	    &recv_buf_oob->wqe_inf);
1450 	if (err) {
1451 		mana_err(NULL, "WARNING: rxq %u post pkt err %d\n",
1452 		    rxq->rxq_idx, err);
1453 		return;
1454 	}
1455 
1456 	if (recv_buf_oob->wqe_inf.wqe_size_in_bu != 1) {
1457 		mana_err(NULL, "WARNING: rxq %u wqe_size_in_bu %u\n",
1458 		    rxq->rxq_idx, recv_buf_oob->wqe_inf.wqe_size_in_bu);
1459 	}
1460 }
1461 
1462 static void
1463 mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
1464     struct mana_rxq *rxq)
1465 {
1466 	struct mana_stats *rx_stats = &rxq->stats;
1467 	struct ifnet *ndev = rxq->ndev;
1468 	uint32_t pkt_len = cqe->ppi[0].pkt_len;
1469 	uint16_t rxq_idx = rxq->rxq_idx;
1470 	struct mana_port_context *apc;
1471 	struct gdma_queue *eq;
1472 	bool do_lro = false;
1473 	bool do_if_input;
1474 
1475 	apc = if_getsoftc(ndev);
1476 	eq = apc->eqs[rxq_idx].eq;
1477 	eq->eq.work_done++;
1478 
1479 	if (!mbuf) {
1480 		return;
1481 	}
1482 
1483 	mbuf->m_flags |= M_PKTHDR;
1484 	mbuf->m_pkthdr.len = pkt_len;
1485 	mbuf->m_len = pkt_len;
1486 	mbuf->m_pkthdr.rcvif = ndev;
1487 
1488 	if ((ndev->if_capenable & IFCAP_RXCSUM ||
1489 	    ndev->if_capenable & IFCAP_RXCSUM_IPV6) &&
1490 	    (cqe->rx_iphdr_csum_succeed)) {
1491 		mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1492 		mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1493 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed) {
1494 			mbuf->m_pkthdr.csum_flags |=
1495 			    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1496 			mbuf->m_pkthdr.csum_data = 0xffff;
1497 
1498 			if (cqe->rx_tcp_csum_succeed)
1499 				do_lro = true;
1500 		}
1501 	}
1502 
1503 	if (cqe->rx_hashtype != 0) {
1504 		mbuf->m_pkthdr.flowid = cqe->ppi[0].pkt_hash;
1505 
1506 		uint16_t hashtype = cqe->rx_hashtype;
1507 		if (hashtype & NDIS_HASH_IPV4_MASK) {
1508 			hashtype &= NDIS_HASH_IPV4_MASK;
1509 			switch (hashtype) {
1510 			case NDIS_HASH_TCP_IPV4:
1511 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
1512 				break;
1513 			case NDIS_HASH_UDP_IPV4:
1514 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
1515 				break;
1516 			default:
1517 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
1518 			}
1519 		} else if (hashtype & NDIS_HASH_IPV6_MASK) {
1520 			hashtype &= NDIS_HASH_IPV6_MASK;
1521 			switch (hashtype) {
1522 			case NDIS_HASH_TCP_IPV6:
1523 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
1524 				break;
1525 			case NDIS_HASH_TCP_IPV6_EX:
1526 				M_HASHTYPE_SET(mbuf,
1527 				    M_HASHTYPE_RSS_TCP_IPV6_EX);
1528 				break;
1529 			case NDIS_HASH_UDP_IPV6:
1530 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
1531 				break;
1532 			case NDIS_HASH_UDP_IPV6_EX:
1533 				M_HASHTYPE_SET(mbuf,
1534 				    M_HASHTYPE_RSS_UDP_IPV6_EX);
1535 				break;
1536 			default:
1537 				M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
1538 			}
1539 		} else {
1540 			M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
1541 		}
1542 	} else {
1543 		mbuf->m_pkthdr.flowid = rxq_idx;
1544 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_NONE);
1545 	}
1546 
1547 	do_if_input = true;
1548 	if ((ndev->if_capenable & IFCAP_LRO) && do_lro) {
1549 		if (rxq->lro.lro_cnt != 0 &&
1550 		    tcp_lro_rx(&rxq->lro, mbuf, 0) == 0)
1551 			do_if_input = false;
1552 	}
1553 	if (do_if_input) {
1554 		ndev->if_input(ndev, mbuf);
1555 	}
1556 
1557 	counter_enter();
1558 	counter_u64_add_protected(rx_stats->packets, 1);
1559 	counter_u64_add_protected(apc->port_stats.rx_packets, 1);
1560 	counter_u64_add_protected(rx_stats->bytes, pkt_len);
1561 	counter_u64_add_protected(apc->port_stats.rx_bytes, pkt_len);
1562 	counter_exit();
1563 }
1564 
1565 static void
1566 mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1567     struct gdma_comp *cqe)
1568 {
1569 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1570 	struct mana_recv_buf_oob *rxbuf_oob;
1571 	struct ifnet *ndev = rxq->ndev;
1572 	struct mana_port_context *apc;
1573 	struct mbuf *old_mbuf;
1574 	uint32_t curr, pktlen;
1575 	int err;
1576 
1577 	switch (oob->cqe_hdr.cqe_type) {
1578 	case CQE_RX_OKAY:
1579 		break;
1580 
1581 	case CQE_RX_TRUNCATED:
1582 		if_printf(ndev, "Dropped a truncated packet\n");
1583 		return;
1584 
1585 	case CQE_RX_COALESCED_4:
1586 		if_printf(ndev, "RX coalescing is unsupported\n");
1587 		return;
1588 
1589 	case CQE_RX_OBJECT_FENCE:
1590 		if_printf(ndev, "RX Fencing is unsupported\n");
1591 		return;
1592 
1593 	default:
1594 		if_printf(ndev, "Unknown RX CQE type = %d\n",
1595 		    oob->cqe_hdr.cqe_type);
1596 		return;
1597 	}
1598 
1599 	if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1600 		return;
1601 
1602 	pktlen = oob->ppi[0].pkt_len;
1603 
1604 	if (pktlen == 0) {
1605 		/* data packets should never have packetlength of zero */
1606 #if defined(__amd64__)
1607 		if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%lx\n",
1608 		    rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1609 #else
1610 		if_printf(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1611 		    rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1612 #endif
1613 		return;
1614 	}
1615 
1616 	curr = rxq->buf_index;
1617 	rxbuf_oob = &rxq->rx_oobs[curr];
1618 	if (rxbuf_oob->wqe_inf.wqe_size_in_bu != 1) {
1619 		mana_err(NULL, "WARNING: Rx Incorrect complete "
1620 		    "WQE size %u\n",
1621 		    rxbuf_oob->wqe_inf.wqe_size_in_bu);
1622 	}
1623 
1624 	apc = if_getsoftc(ndev);
1625 
1626 	old_mbuf = rxbuf_oob->mbuf;
1627 
1628 	/* Unload DMA map for the old mbuf */
1629 	mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
1630 
1631 	/* Load a new mbuf to replace the old one */
1632 	err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
1633 	if (err) {
1634 		mana_dbg(NULL,
1635 		    "failed to load rx mbuf, err = %d, packet dropped.\n",
1636 		    err);
1637 		counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
1638 		/*
1639 		 * Failed to load new mbuf, rxbuf_oob->mbuf is still
1640 		 * pointing to the old one. Drop the packet.
1641 		 */
1642 		 old_mbuf = NULL;
1643 		 /* Reload the existing mbuf */
1644 		 mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
1645 	}
1646 
1647 	mana_rx_mbuf(old_mbuf, oob, rxq);
1648 
1649 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1650 
1651 	mana_post_pkt_rxq(rxq);
1652 }
1653 
1654 static void
1655 mana_poll_rx_cq(struct mana_cq *cq)
1656 {
1657 	struct gdma_comp *comp = cq->gdma_comp_buf;
1658 	int comp_read, i;
1659 
1660 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1661 	KASSERT(comp_read <= CQE_POLLING_BUFFER,
1662 	    ("comp_read %d great than buf size %d",
1663 	    comp_read, CQE_POLLING_BUFFER));
1664 
1665 	for (i = 0; i < comp_read; i++) {
1666 		if (comp[i].is_sq == true) {
1667 			mana_err(NULL,
1668 			    "WARNING: CQE not for receive queue\n");
1669 			return;
1670 		}
1671 
1672 		/* verify recv cqe references the right rxq */
1673 		if (comp[i].wq_num != cq->rxq->gdma_id) {
1674 			mana_err(NULL,
1675 			    "WARNING: Received CQE %d  not for "
1676 			    "this receive queue %d\n",
1677 			    comp[i].wq_num,  cq->rxq->gdma_id);
1678 			return;
1679 		}
1680 
1681 		mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1682 	}
1683 
1684 	tcp_lro_flush_all(&cq->rxq->lro);
1685 }
1686 
1687 static void
1688 mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1689 {
1690 	struct mana_cq *cq = context;
1691 
1692 	KASSERT(cq->gdma_cq == gdma_queue,
1693 	    ("cq do not match %p, %p", cq->gdma_cq, gdma_queue));
1694 
1695 	if (cq->type == MANA_CQ_TYPE_RX) {
1696 		mana_poll_rx_cq(cq);
1697 	} else {
1698 		mana_poll_tx_cq(cq);
1699 	}
1700 
1701 	mana_gd_arm_cq(gdma_queue);
1702 }
1703 
1704 static void
1705 mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1706 {
1707 	struct gdma_dev *gd = apc->ac->gdma_dev;
1708 
1709 	if (!cq->gdma_cq)
1710 		return;
1711 
1712 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1713 }
1714 
1715 static void
1716 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1717 {
1718 	struct gdma_dev *gd = apc->ac->gdma_dev;
1719 	struct mana_send_buf_info *txbuf_info;
1720 	uint32_t pending_sends;
1721 	int i;
1722 
1723 	if (!txq->gdma_sq)
1724 		return;
1725 
1726 	if ((pending_sends = atomic_read(&txq->pending_sends)) > 0) {
1727 		mana_err(NULL,
1728 		    "WARNING: txq pending sends not zero: %u\n",
1729 		    pending_sends);
1730 	}
1731 
1732 	if (txq->next_to_use != txq->next_to_complete) {
1733 		mana_err(NULL,
1734 		    "WARNING: txq buf not completed, "
1735 		    "next use %u, next complete %u\n",
1736 		    txq->next_to_use, txq->next_to_complete);
1737 	}
1738 
1739 	/* Flush buf ring. Grab txq mtx lock */
1740 	if (txq->txq_br) {
1741 		mtx_lock(&txq->txq_mtx);
1742 		drbr_flush(apc->ndev, txq->txq_br);
1743 		mtx_unlock(&txq->txq_mtx);
1744 		buf_ring_free(txq->txq_br, M_DEVBUF);
1745 	}
1746 
1747 	/* Drain taskqueue */
1748 	if (txq->enqueue_tq) {
1749 		while (taskqueue_cancel(txq->enqueue_tq,
1750 		    &txq->enqueue_task, NULL)) {
1751 			taskqueue_drain(txq->enqueue_tq,
1752 			    &txq->enqueue_task);
1753 		}
1754 
1755 		taskqueue_free(txq->enqueue_tq);
1756 	}
1757 
1758 	if (txq->tx_buf_info) {
1759 		/* Free all mbufs which are still in-flight */
1760 		for (i = 0; i < MAX_SEND_BUFFERS_PER_QUEUE; i++) {
1761 			txbuf_info = &txq->tx_buf_info[i];
1762 			if (txbuf_info->mbuf) {
1763 				mana_tx_unmap_mbuf(apc, txbuf_info);
1764 			}
1765 		}
1766 
1767 		free(txq->tx_buf_info, M_DEVBUF);
1768 	}
1769 
1770 	mana_free_counters((counter_u64_t *)&txq->stats,
1771 	    sizeof(txq->stats));
1772 
1773 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1774 
1775 	mtx_destroy(&txq->txq_mtx);
1776 }
1777 
1778 static void
1779 mana_destroy_txq(struct mana_port_context *apc)
1780 {
1781 	int i;
1782 
1783 	if (!apc->tx_qp)
1784 		return;
1785 
1786 	for (i = 0; i < apc->num_queues; i++) {
1787 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1788 
1789 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1790 
1791 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1792 	}
1793 
1794 	free(apc->tx_qp, M_DEVBUF);
1795 	apc->tx_qp = NULL;
1796 }
1797 
1798 static int
1799 mana_create_txq(struct mana_port_context *apc, struct ifnet *net)
1800 {
1801 	struct gdma_dev *gd = apc->ac->gdma_dev;
1802 	struct mana_obj_spec wq_spec;
1803 	struct mana_obj_spec cq_spec;
1804 	struct gdma_queue_spec spec;
1805 	struct gdma_context *gc;
1806 	struct mana_txq *txq;
1807 	struct mana_cq *cq;
1808 	uint32_t txq_size;
1809 	uint32_t cq_size;
1810 	int err;
1811 	int i;
1812 
1813 	apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
1814 	    M_DEVBUF, M_WAITOK | M_ZERO);
1815 	if (!apc->tx_qp)
1816 		return ENOMEM;
1817 
1818 	/*  The minimum size of the WQE is 32 bytes, hence
1819 	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1820 	 *  the SQ can store. This value is then used to size other queues
1821 	 *  to prevent overflow.
1822 	 */
1823 	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1824 	KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE),
1825 	    ("txq size not page aligned"));
1826 
1827 	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1828 	cq_size = ALIGN(cq_size, PAGE_SIZE);
1829 
1830 	gc = gd->gdma_context;
1831 
1832 	for (i = 0; i < apc->num_queues; i++) {
1833 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1834 
1835 		/* Create SQ */
1836 		txq = &apc->tx_qp[i].txq;
1837 
1838 		txq->ndev = net;
1839 		txq->vp_offset = apc->tx_vp_offset;
1840 		txq->idx = i;
1841 		txq->alt_txq_idx = i;
1842 
1843 		memset(&spec, 0, sizeof(spec));
1844 		spec.type = GDMA_SQ;
1845 		spec.monitor_avl_buf = true;
1846 		spec.queue_size = txq_size;
1847 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1848 		if (err)
1849 			goto out;
1850 
1851 		/* Create SQ's CQ */
1852 		cq = &apc->tx_qp[i].tx_cq;
1853 		cq->gdma_comp_buf = apc->eqs[i].cqe_poll;
1854 		cq->type = MANA_CQ_TYPE_TX;
1855 
1856 		cq->txq = txq;
1857 
1858 		memset(&spec, 0, sizeof(spec));
1859 		spec.type = GDMA_CQ;
1860 		spec.monitor_avl_buf = false;
1861 		spec.queue_size = cq_size;
1862 		spec.cq.callback = mana_cq_handler;
1863 		spec.cq.parent_eq = apc->eqs[i].eq;
1864 		spec.cq.context = cq;
1865 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1866 		if (err)
1867 			goto out;
1868 
1869 		memset(&wq_spec, 0, sizeof(wq_spec));
1870 		memset(&cq_spec, 0, sizeof(cq_spec));
1871 
1872 		wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1873 		wq_spec.queue_size = txq->gdma_sq->queue_size;
1874 
1875 		cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1876 		cq_spec.queue_size = cq->gdma_cq->queue_size;
1877 		cq_spec.modr_ctx_id = 0;
1878 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1879 
1880 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1881 		    &wq_spec, &cq_spec, &apc->tx_qp[i].tx_object);
1882 
1883 		if (err)
1884 			goto out;
1885 
1886 		txq->gdma_sq->id = wq_spec.queue_index;
1887 		cq->gdma_cq->id = cq_spec.queue_index;
1888 
1889 		txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1890 		cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1891 
1892 		txq->gdma_txq_id = txq->gdma_sq->id;
1893 
1894 		cq->gdma_id = cq->gdma_cq->id;
1895 
1896 		mana_dbg(NULL,
1897 		    "txq %d, txq gdma id %d, txq cq gdma id %d\n",
1898 		    i, txq->gdma_txq_id, cq->gdma_id);;
1899 
1900 		if (cq->gdma_id >= gc->max_num_cqs) {
1901 			if_printf(net, "CQ id %u too large.\n", cq->gdma_id);
1902 			return EINVAL;
1903 		}
1904 
1905 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1906 
1907 		/* Initialize tx specific data */
1908 		txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE *
1909 		    sizeof(struct mana_send_buf_info),
1910 		    M_DEVBUF, M_WAITOK | M_ZERO);
1911 		if (unlikely(txq->tx_buf_info == NULL)) {
1912 			if_printf(net,
1913 			    "Failed to allocate tx buf info for SQ %u\n",
1914 			    txq->gdma_sq->id);
1915 			err = ENOMEM;
1916 			goto out;
1917 		}
1918 
1919 
1920 		snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
1921 		    "mana:tx(%d)", i);
1922 		mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF);
1923 
1924 		txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE,
1925 		    M_DEVBUF, M_WAITOK, &txq->txq_mtx);
1926 		if (unlikely(txq->txq_br == NULL)) {
1927 			if_printf(net,
1928 			    "Failed to allocate buf ring for SQ %u\n",
1929 			    txq->gdma_sq->id);
1930 			err = ENOMEM;
1931 			goto out;
1932 		}
1933 
1934 		/* Allocate taskqueue for deferred send */
1935 		TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
1936 		txq->enqueue_tq = taskqueue_create_fast("mana_tx_enque",
1937 		    M_NOWAIT, taskqueue_thread_enqueue, &txq->enqueue_tq);
1938 		if (unlikely(txq->enqueue_tq == NULL)) {
1939 			if_printf(net,
1940 			    "Unable to create tx %d enqueue task queue\n", i);
1941 			err = ENOMEM;
1942 			goto out;
1943 		}
1944 		taskqueue_start_threads(&txq->enqueue_tq, 1, PI_NET,
1945 		    "mana txq %d", i);
1946 
1947 		mana_alloc_counters((counter_u64_t *)&txq->stats,
1948 		    sizeof(txq->stats));
1949 
1950 		mana_gd_arm_cq(cq->gdma_cq);
1951 	}
1952 
1953 	return 0;
1954 out:
1955 	mana_destroy_txq(apc);
1956 	return err;
1957 }
1958 
1959 static void
1960 mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq,
1961     bool validate_state)
1962 {
1963 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1964 	struct mana_recv_buf_oob *rx_oob;
1965 	int i;
1966 
1967 	if (!rxq)
1968 		return;
1969 
1970 	if (validate_state) {
1971 		/*
1972 		 * XXX Cancel and drain cleanup task queue here.
1973 		 */
1974 		;
1975 	}
1976 
1977 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1978 
1979 	mana_deinit_cq(apc, &rxq->rx_cq);
1980 
1981 	mana_free_counters((counter_u64_t *)&rxq->stats,
1982 	    sizeof(rxq->stats));
1983 
1984 	/* Free LRO resources */
1985 	tcp_lro_free(&rxq->lro);
1986 
1987 	for (i = 0; i < rxq->num_rx_buf; i++) {
1988 		rx_oob = &rxq->rx_oobs[i];
1989 
1990 		if (rx_oob->mbuf)
1991 			mana_unload_rx_mbuf(apc, rxq, rx_oob, true);
1992 
1993 		bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
1994 	}
1995 
1996 	if (rxq->gdma_rq)
1997 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
1998 
1999 	free(rxq, M_DEVBUF);
2000 }
2001 
2002 #define MANA_WQE_HEADER_SIZE 16
2003 #define MANA_WQE_SGE_SIZE 16
2004 
2005 static int
2006 mana_alloc_rx_wqe(struct mana_port_context *apc,
2007     struct mana_rxq *rxq, uint32_t *rxq_size, uint32_t *cq_size)
2008 {
2009 	struct mana_recv_buf_oob *rx_oob;
2010 	uint32_t buf_idx;
2011 	int err;
2012 
2013 	if (rxq->datasize == 0 || rxq->datasize > PAGE_SIZE) {
2014 		mana_err(NULL,
2015 		    "WARNING: Invalid rxq datasize %u\n", rxq->datasize);
2016 	}
2017 
2018 	*rxq_size = 0;
2019 	*cq_size = 0;
2020 
2021 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2022 		rx_oob = &rxq->rx_oobs[buf_idx];
2023 		memset(rx_oob, 0, sizeof(*rx_oob));
2024 
2025 		err = bus_dmamap_create(apc->rx_buf_tag, 0,
2026 		    &rx_oob->dma_map);
2027 		if (err) {
2028 			mana_err(NULL,
2029 			    "Failed to  create rx DMA map for buf %d\n",
2030 			    buf_idx);
2031 			return err;
2032 		}
2033 
2034 		err = mana_load_rx_mbuf(apc, rxq, rx_oob, true);
2035 		if (err) {
2036 			mana_err(NULL,
2037 			    "Failed to  create rx DMA map for buf %d\n",
2038 			    buf_idx);
2039 			bus_dmamap_destroy(apc->rx_buf_tag, rx_oob->dma_map);
2040 			return err;
2041 		}
2042 
2043 		rx_oob->wqe_req.sgl = rx_oob->sgl;
2044 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2045 		rx_oob->wqe_req.inline_oob_size = 0;
2046 		rx_oob->wqe_req.inline_oob_data = NULL;
2047 		rx_oob->wqe_req.flags = 0;
2048 		rx_oob->wqe_req.client_data_unit = 0;
2049 
2050 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2051 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2052 		*cq_size += COMP_ENTRY_SIZE;
2053 	}
2054 
2055 	return 0;
2056 }
2057 
2058 static int
2059 mana_push_wqe(struct mana_rxq *rxq)
2060 {
2061 	struct mana_recv_buf_oob *rx_oob;
2062 	uint32_t buf_idx;
2063 	int err;
2064 
2065 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2066 		rx_oob = &rxq->rx_oobs[buf_idx];
2067 
2068 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2069 		    &rx_oob->wqe_inf);
2070 		if (err)
2071 			return ENOSPC;
2072 	}
2073 
2074 	return 0;
2075 }
2076 
2077 static struct mana_rxq *
2078 mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
2079     struct mana_eq *eq, struct ifnet *ndev)
2080 {
2081 	struct gdma_dev *gd = apc->ac->gdma_dev;
2082 	struct mana_obj_spec wq_spec;
2083 	struct mana_obj_spec cq_spec;
2084 	struct gdma_queue_spec spec;
2085 	struct mana_cq *cq = NULL;
2086 	uint32_t cq_size, rq_size;
2087 	struct gdma_context *gc;
2088 	struct mana_rxq *rxq;
2089 	int err;
2090 
2091 	gc = gd->gdma_context;
2092 
2093 	rxq = malloc(sizeof(*rxq) +
2094 	    RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
2095 	    M_DEVBUF, M_WAITOK | M_ZERO);
2096 	if (!rxq)
2097 		return NULL;
2098 
2099 	rxq->ndev = ndev;
2100 	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2101 	rxq->rxq_idx = rxq_idx;
2102 	/*
2103 	 * Minimum size is MCLBYTES(2048) bytes for a mbuf cluster.
2104 	 * Now we just allow maxium size of 4096.
2105 	 */
2106 	rxq->datasize = ALIGN(apc->frame_size, MCLBYTES);
2107 	if (rxq->datasize > MAX_FRAME_SIZE)
2108 		rxq->datasize = MAX_FRAME_SIZE;
2109 
2110 	mana_dbg(NULL, "Setting rxq %d datasize %d\n",
2111 	    rxq_idx, rxq->datasize);
2112 
2113 	rxq->rxobj = INVALID_MANA_HANDLE;
2114 
2115 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2116 	if (err)
2117 		goto out;
2118 
2119 	/* Create LRO for the RQ */
2120 	if (ndev->if_capenable & IFCAP_LRO) {
2121 		err = tcp_lro_init(&rxq->lro);
2122 		if (err) {
2123 			if_printf(ndev, "Failed to create LRO for rxq %d\n",
2124 			    rxq_idx);
2125 		} else {
2126 			rxq->lro.ifp = ndev;
2127 		}
2128 	}
2129 
2130 	mana_alloc_counters((counter_u64_t *)&rxq->stats,
2131 	    sizeof(rxq->stats));
2132 
2133 	rq_size = ALIGN(rq_size, PAGE_SIZE);
2134 	cq_size = ALIGN(cq_size, PAGE_SIZE);
2135 
2136 	/* Create RQ */
2137 	memset(&spec, 0, sizeof(spec));
2138 	spec.type = GDMA_RQ;
2139 	spec.monitor_avl_buf = true;
2140 	spec.queue_size = rq_size;
2141 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2142 	if (err)
2143 		goto out;
2144 
2145 	/* Create RQ's CQ */
2146 	cq = &rxq->rx_cq;
2147 	cq->gdma_comp_buf = eq->cqe_poll;
2148 	cq->type = MANA_CQ_TYPE_RX;
2149 	cq->rxq = rxq;
2150 
2151 	memset(&spec, 0, sizeof(spec));
2152 	spec.type = GDMA_CQ;
2153 	spec.monitor_avl_buf = false;
2154 	spec.queue_size = cq_size;
2155 	spec.cq.callback = mana_cq_handler;
2156 	spec.cq.parent_eq = eq->eq;
2157 	spec.cq.context = cq;
2158 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2159 	if (err)
2160 		goto out;
2161 
2162 	memset(&wq_spec, 0, sizeof(wq_spec));
2163 	memset(&cq_spec, 0, sizeof(cq_spec));
2164 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
2165 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
2166 
2167 	cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
2168 	cq_spec.queue_size = cq->gdma_cq->queue_size;
2169 	cq_spec.modr_ctx_id = 0;
2170 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2171 
2172 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2173 	    &wq_spec, &cq_spec, &rxq->rxobj);
2174 	if (err)
2175 		goto out;
2176 
2177 	rxq->gdma_rq->id = wq_spec.queue_index;
2178 	cq->gdma_cq->id = cq_spec.queue_index;
2179 
2180 	rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
2181 	cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
2182 
2183 	rxq->gdma_id = rxq->gdma_rq->id;
2184 	cq->gdma_id = cq->gdma_cq->id;
2185 
2186 	err = mana_push_wqe(rxq);
2187 	if (err)
2188 		goto out;
2189 
2190 	if (cq->gdma_id >= gc->max_num_cqs)
2191 		goto out;
2192 
2193 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2194 
2195 	mana_gd_arm_cq(cq->gdma_cq);
2196 out:
2197 	if (!err)
2198 		return rxq;
2199 
2200 	if_printf(ndev, "Failed to create RXQ: err = %d\n", err);
2201 
2202 	mana_destroy_rxq(apc, rxq, false);
2203 
2204 	if (cq)
2205 		mana_deinit_cq(apc, cq);
2206 
2207 	return NULL;
2208 }
2209 
2210 static int
2211 mana_add_rx_queues(struct mana_port_context *apc, struct ifnet *ndev)
2212 {
2213 	struct mana_rxq *rxq;
2214 	int err = 0;
2215 	int i;
2216 
2217 	for (i = 0; i < apc->num_queues; i++) {
2218 		rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev);
2219 		if (!rxq) {
2220 			err = ENOMEM;
2221 			goto out;
2222 		}
2223 
2224 		apc->rxqs[i] = rxq;
2225 	}
2226 
2227 	apc->default_rxobj = apc->rxqs[0]->rxobj;
2228 out:
2229 	return err;
2230 }
2231 
2232 static void
2233 mana_destroy_vport(struct mana_port_context *apc)
2234 {
2235 	struct mana_rxq *rxq;
2236 	uint32_t rxq_idx;
2237 	struct mana_cq *rx_cq;
2238 	struct gdma_queue *cq, *eq;
2239 
2240 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2241 		rxq = apc->rxqs[rxq_idx];
2242 		if (!rxq)
2243 			continue;
2244 
2245 		rx_cq = &rxq->rx_cq;
2246 		if ((cq = rx_cq->gdma_cq) != NULL) {
2247 			eq = cq->cq.parent;
2248 			mana_drain_eq_task(eq);
2249 		}
2250 
2251 		mana_destroy_rxq(apc, rxq, true);
2252 		apc->rxqs[rxq_idx] = NULL;
2253 	}
2254 
2255 	mana_destroy_txq(apc);
2256 }
2257 
2258 static int
2259 mana_create_vport(struct mana_port_context *apc, struct ifnet *net)
2260 {
2261 	struct gdma_dev *gd = apc->ac->gdma_dev;
2262 	int err;
2263 
2264 	apc->default_rxobj = INVALID_MANA_HANDLE;
2265 
2266 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2267 	if (err)
2268 		return err;
2269 
2270 	return mana_create_txq(apc, net);
2271 }
2272 
2273 
2274 static void mana_rss_table_init(struct mana_port_context *apc)
2275 {
2276 	int i;
2277 
2278 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2279 		apc->indir_table[i] = i % apc->num_queues;
2280 }
2281 
2282 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2283 		    bool update_hash, bool update_tab)
2284 {
2285 	uint32_t queue_idx;
2286 	int i;
2287 
2288 	if (update_tab) {
2289 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2290 			queue_idx = apc->indir_table[i];
2291 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2292 		}
2293 	}
2294 
2295 	return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2296 }
2297 
2298 static int
2299 mana_init_port(struct ifnet *ndev)
2300 {
2301 	struct mana_port_context *apc = if_getsoftc(ndev);
2302 	uint32_t max_txq, max_rxq, max_queues;
2303 	int port_idx = apc->port_idx;
2304 	uint32_t num_indirect_entries;
2305 	int err;
2306 
2307 	err = mana_init_port_context(apc);
2308 	if (err)
2309 		return err;
2310 
2311 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2312 	    &num_indirect_entries);
2313 	if (err) {
2314 		if_printf(ndev, "Failed to query info for vPort 0\n");
2315 		goto reset_apc;
2316 	}
2317 
2318 	max_queues = min_t(uint32_t, max_txq, max_rxq);
2319 	if (apc->max_queues > max_queues)
2320 		apc->max_queues = max_queues;
2321 
2322 	if (apc->num_queues > apc->max_queues)
2323 		apc->num_queues = apc->max_queues;
2324 
2325 	return 0;
2326 
2327 reset_apc:
2328 	bus_dma_tag_destroy(apc->rx_buf_tag);
2329 	apc->rx_buf_tag = NULL;
2330 	free(apc->rxqs, M_DEVBUF);
2331 	apc->rxqs = NULL;
2332 	return err;
2333 }
2334 
2335 int
2336 mana_alloc_queues(struct ifnet *ndev)
2337 {
2338 	struct mana_port_context *apc = if_getsoftc(ndev);
2339 	struct gdma_dev *gd = apc->ac->gdma_dev;
2340 	int err;
2341 
2342 	err = mana_create_eq(apc);
2343 	if (err)
2344 		return err;
2345 
2346 	err = mana_create_vport(apc, ndev);
2347 	if (err)
2348 		goto destroy_eq;
2349 
2350 	err = mana_add_rx_queues(apc, ndev);
2351 	if (err)
2352 		goto destroy_vport;
2353 
2354 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2355 
2356 	mana_rss_table_init(apc);
2357 
2358 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2359 	if (err)
2360 		goto destroy_vport;
2361 
2362 	return 0;
2363 
2364 destroy_vport:
2365 	mana_destroy_vport(apc);
2366 destroy_eq:
2367 	mana_destroy_eq(gd->gdma_context, apc);
2368 	return err;
2369 }
2370 
2371 static int
2372 mana_up(struct mana_port_context *apc)
2373 {
2374 	int err;
2375 
2376 	mana_dbg(NULL, "mana_up called\n");
2377 
2378 	err = mana_alloc_queues(apc->ndev);
2379 	if (err) {
2380 		mana_err(NULL, "Faile alloc mana queues: %d\n", err);
2381 		return err;
2382 	}
2383 
2384 	/* Add queue specific sysctl */
2385 	mana_sysctl_add_queues(apc);
2386 
2387 	apc->port_is_up = true;
2388 
2389 	/* Ensure port state updated before txq state */
2390 	wmb();
2391 
2392 	if_link_state_change(apc->ndev, LINK_STATE_UP);
2393 	if_setdrvflagbits(apc->ndev, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2394 
2395 	return 0;
2396 }
2397 
2398 
2399 static void
2400 mana_init(void *arg)
2401 {
2402 	struct mana_port_context *apc = (struct mana_port_context *)arg;
2403 
2404 	MANA_APC_LOCK_LOCK(apc);
2405 	if (!apc->port_is_up) {
2406 		mana_up(apc);
2407 	}
2408 	MANA_APC_LOCK_UNLOCK(apc);
2409 }
2410 
2411 static int
2412 mana_dealloc_queues(struct ifnet *ndev)
2413 {
2414 	struct mana_port_context *apc = if_getsoftc(ndev);
2415 	struct mana_txq *txq;
2416 	int i, err;
2417 
2418 	if (apc->port_is_up)
2419 		return EINVAL;
2420 
2421 	/* No packet can be transmitted now since apc->port_is_up is false.
2422 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2423 	 * a txq because it may not timely see apc->port_is_up being cleared
2424 	 * to false, but it doesn't matter since mana_start_xmit() drops any
2425 	 * new packets due to apc->port_is_up being false.
2426 	 *
2427 	 * Drain all the in-flight TX packets
2428 	 */
2429 	for (i = 0; i < apc->num_queues; i++) {
2430 		txq = &apc->tx_qp[i].txq;
2431 
2432 		struct mana_cq *tx_cq = &apc->tx_qp[i].tx_cq;
2433 		struct gdma_queue *eq = NULL;
2434 		if (tx_cq->gdma_cq)
2435 			eq = tx_cq->gdma_cq->cq.parent;
2436 		if (eq) {
2437 			/* Stop EQ interrupt */
2438 			eq->eq.do_not_ring_db = true;
2439 			/* Schedule a cleanup task */
2440 			taskqueue_enqueue(eq->eq.cleanup_tq,
2441 			    &eq->eq.cleanup_task);
2442 		}
2443 
2444 		while (atomic_read(&txq->pending_sends) > 0)
2445 			usleep_range(1000, 2000);
2446 	}
2447 
2448 	/* We're 100% sure the queues can no longer be woken up, because
2449 	 * we're sure now mana_poll_tx_cq() can't be running.
2450 	 */
2451 
2452 	apc->rss_state = TRI_STATE_FALSE;
2453 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2454 	if (err) {
2455 		if_printf(ndev, "Failed to disable vPort: %d\n", err);
2456 		return err;
2457 	}
2458 
2459 	/* TODO: Implement RX fencing */
2460 	gdma_msleep(1000);
2461 
2462 	mana_destroy_vport(apc);
2463 
2464 	mana_destroy_eq(apc->ac->gdma_dev->gdma_context, apc);
2465 
2466 	return 0;
2467 }
2468 
2469 static int
2470 mana_down(struct mana_port_context *apc)
2471 {
2472 	int err = 0;
2473 
2474 	apc->port_st_save = apc->port_is_up;
2475 	apc->port_is_up = false;
2476 
2477 	/* Ensure port state updated before txq state */
2478 	wmb();
2479 
2480 	if (apc->port_st_save) {
2481 		if_setdrvflagbits(apc->ndev, IFF_DRV_OACTIVE,
2482 		    IFF_DRV_RUNNING);
2483 		if_link_state_change(apc->ndev, LINK_STATE_DOWN);
2484 
2485 		mana_sysctl_free_queues(apc);
2486 
2487 		err = mana_dealloc_queues(apc->ndev);
2488 		if (err) {
2489 			if_printf(apc->ndev,
2490 			    "Failed to bring down mana interface: %d\n", err);
2491 		}
2492 	}
2493 
2494 	return err;
2495 }
2496 
2497 int
2498 mana_detach(struct ifnet *ndev)
2499 {
2500 	struct mana_port_context *apc = if_getsoftc(ndev);
2501 	int err;
2502 
2503 	ether_ifdetach(ndev);
2504 
2505 	if (!apc)
2506 		return 0;
2507 
2508 	MANA_APC_LOCK_LOCK(apc);
2509 	err = mana_down(apc);
2510 	MANA_APC_LOCK_UNLOCK(apc);
2511 
2512 	mana_cleanup_port_context(apc);
2513 
2514 	MANA_APC_LOCK_DESTROY(apc);
2515 
2516 	free(apc, M_DEVBUF);
2517 
2518 	return err;
2519 }
2520 
2521 static int
2522 mana_probe_port(struct mana_context *ac, int port_idx,
2523     struct ifnet **ndev_storage)
2524 {
2525 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2526 	struct mana_port_context *apc;
2527 	struct ifnet *ndev;
2528 	int err;
2529 
2530 	ndev = if_alloc_dev(IFT_ETHER, gc->dev);
2531 	if (!ndev) {
2532 		mana_err(NULL, "Failed to allocate ifnet struct\n");
2533 		return ENOMEM;
2534 	}
2535 
2536 	*ndev_storage = ndev;
2537 
2538 	apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
2539 	if (!apc) {
2540 		mana_err(NULL, "Failed to allocate port context\n");
2541 		err = ENOMEM;
2542 		goto free_net;
2543 	}
2544 
2545 	apc->ac = ac;
2546 	apc->ndev = ndev;
2547 	apc->max_queues = gc->max_num_queues;
2548 	apc->num_queues = min_t(unsigned int,
2549 	    gc->max_num_queues, MANA_MAX_NUM_QUEUES);
2550 	apc->port_handle = INVALID_MANA_HANDLE;
2551 	apc->port_idx = port_idx;
2552 	apc->frame_size = DEFAULT_FRAME_SIZE;
2553 
2554 	MANA_APC_LOCK_INIT(apc);
2555 
2556 	if_initname(ndev, device_get_name(gc->dev), port_idx);
2557 	if_setdev(ndev,gc->dev);
2558 	if_setsoftc(ndev, apc);
2559 
2560 	if_setflags(ndev, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2561 	if_setinitfn(ndev, mana_init);
2562 	if_settransmitfn(ndev, mana_start_xmit);
2563 	if_setqflushfn(ndev, mana_qflush);
2564 	if_setioctlfn(ndev, mana_ioctl);
2565 	if_setgetcounterfn(ndev, mana_get_counter);
2566 
2567 	if_setmtu(ndev, ETHERMTU);
2568 	if_setbaudrate(ndev, IF_Gbps(100));
2569 
2570 	mana_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2571 
2572 	err = mana_init_port(ndev);
2573 	if (err)
2574 		goto reset_apc;
2575 
2576 	ndev->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6;
2577 	ndev->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6;
2578 	ndev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
2579 
2580 	ndev->if_capabilities |= IFCAP_LRO | IFCAP_LINKSTATE;
2581 
2582 	/* Enable all available capabilities by default. */
2583 	ndev->if_capenable = ndev->if_capabilities;
2584 
2585 	/* TSO parameters */
2586 	ndev->if_hw_tsomax = MAX_MBUF_FRAGS * MANA_TSO_MAXSEG_SZ -
2587 	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2588 	ndev->if_hw_tsomaxsegcount = MAX_MBUF_FRAGS;
2589 	ndev->if_hw_tsomaxsegsize = PAGE_SIZE;
2590 
2591 	ifmedia_init(&apc->media, IFM_IMASK,
2592 	    mana_ifmedia_change, mana_ifmedia_status);
2593 	ifmedia_add(&apc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2594 	ifmedia_set(&apc->media, IFM_ETHER | IFM_AUTO);
2595 
2596 	ether_ifattach(ndev, apc->mac_addr);
2597 
2598 	/* Initialize statistics */
2599 	mana_alloc_counters((counter_u64_t *)&apc->port_stats,
2600 	    sizeof(struct mana_port_stats));
2601 	mana_sysctl_add_port(apc);
2602 
2603 	/* Tell the stack that the interface is not active */
2604 	if_setdrvflagbits(ndev, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
2605 
2606 	return 0;
2607 
2608 reset_apc:
2609 	free(apc, M_DEVBUF);
2610 free_net:
2611 	*ndev_storage = NULL;
2612 	if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2613 	if_free(ndev);
2614 	return err;
2615 }
2616 
2617 int mana_probe(struct gdma_dev *gd)
2618 {
2619 	struct gdma_context *gc = gd->gdma_context;
2620 	device_t dev = gc->dev;
2621 	struct mana_context *ac;
2622 	int err;
2623 	int i;
2624 
2625 	device_printf(dev, "%s protocol version: %d.%d.%d\n", DEVICE_NAME,
2626 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2627 
2628 	err = mana_gd_register_device(gd);
2629 	if (err)
2630 		return err;
2631 
2632 	ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
2633 	if (!ac)
2634 		return ENOMEM;
2635 
2636 	ac->gdma_dev = gd;
2637 	ac->num_ports = 1;
2638 	gd->driver_data = ac;
2639 
2640 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2641 	    MANA_MICRO_VERSION, &ac->num_ports);
2642 	if (err)
2643 		goto out;
2644 
2645 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2646 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2647 
2648 	for (i = 0; i < ac->num_ports; i++) {
2649 		err = mana_probe_port(ac, i, &ac->ports[i]);
2650 		if (err) {
2651 			device_printf(dev,
2652 			    "Failed to probe mana port %d\n", i);
2653 			break;
2654 		}
2655 	}
2656 
2657 out:
2658 	if (err)
2659 		mana_remove(gd);
2660 
2661 	return err;
2662 }
2663 
2664 void
2665 mana_remove(struct gdma_dev *gd)
2666 {
2667 	struct gdma_context *gc = gd->gdma_context;
2668 	struct mana_context *ac = gd->driver_data;
2669 	device_t dev = gc->dev;
2670 	struct ifnet *ndev;
2671 	int i;
2672 
2673 	for (i = 0; i < ac->num_ports; i++) {
2674 		ndev = ac->ports[i];
2675 		if (!ndev) {
2676 			if (i == 0)
2677 				device_printf(dev, "No net device to remove\n");
2678 			goto out;
2679 		}
2680 
2681 		mana_detach(ndev);
2682 
2683 		if_free(ndev);
2684 	}
2685 out:
2686 	mana_gd_deregister_device(gd);
2687 	gd->driver_data = NULL;
2688 	gd->gdma_context = NULL;
2689 	free(ac, M_DEVBUF);
2690 }
2691