1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <uapi/linux/bpf.h>
5 
6 #include <linux/inetdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/filter.h>
10 #include <linux/mm.h>
11 
12 #include <net/checksum.h>
13 #include <net/ip6_checksum.h>
14 
15 #include "mana.h"
16 
17 /* Microsoft Azure Network Adapter (MANA) functions */
18 
19 static int mana_open(struct net_device *ndev)
20 {
21 	struct mana_port_context *apc = netdev_priv(ndev);
22 	int err;
23 
24 	err = mana_alloc_queues(ndev);
25 	if (err)
26 		return err;
27 
28 	apc->port_is_up = true;
29 
30 	/* Ensure port state updated before txq state */
31 	smp_wmb();
32 
33 	netif_carrier_on(ndev);
34 	netif_tx_wake_all_queues(ndev);
35 
36 	return 0;
37 }
38 
39 static int mana_close(struct net_device *ndev)
40 {
41 	struct mana_port_context *apc = netdev_priv(ndev);
42 
43 	if (!apc->port_is_up)
44 		return 0;
45 
46 	return mana_detach(ndev, true);
47 }
48 
49 static bool mana_can_tx(struct gdma_queue *wq)
50 {
51 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
52 }
53 
54 static unsigned int mana_checksum_info(struct sk_buff *skb)
55 {
56 	if (skb->protocol == htons(ETH_P_IP)) {
57 		struct iphdr *ip = ip_hdr(skb);
58 
59 		if (ip->protocol == IPPROTO_TCP)
60 			return IPPROTO_TCP;
61 
62 		if (ip->protocol == IPPROTO_UDP)
63 			return IPPROTO_UDP;
64 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
65 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
66 
67 		if (ip6->nexthdr == IPPROTO_TCP)
68 			return IPPROTO_TCP;
69 
70 		if (ip6->nexthdr == IPPROTO_UDP)
71 			return IPPROTO_UDP;
72 	}
73 
74 	/* No csum offloading */
75 	return 0;
76 }
77 
78 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
79 			struct mana_tx_package *tp)
80 {
81 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
82 	struct gdma_dev *gd = apc->ac->gdma_dev;
83 	struct gdma_context *gc;
84 	struct device *dev;
85 	skb_frag_t *frag;
86 	dma_addr_t da;
87 	int i;
88 
89 	gc = gd->gdma_context;
90 	dev = gc->dev;
91 	da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
92 
93 	if (dma_mapping_error(dev, da))
94 		return -ENOMEM;
95 
96 	ash->dma_handle[0] = da;
97 	ash->size[0] = skb_headlen(skb);
98 
99 	tp->wqe_req.sgl[0].address = ash->dma_handle[0];
100 	tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
101 	tp->wqe_req.sgl[0].size = ash->size[0];
102 
103 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
104 		frag = &skb_shinfo(skb)->frags[i];
105 		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
106 				      DMA_TO_DEVICE);
107 
108 		if (dma_mapping_error(dev, da))
109 			goto frag_err;
110 
111 		ash->dma_handle[i + 1] = da;
112 		ash->size[i + 1] = skb_frag_size(frag);
113 
114 		tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
115 		tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
116 		tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
117 	}
118 
119 	return 0;
120 
121 frag_err:
122 	for (i = i - 1; i >= 0; i--)
123 		dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
124 			       DMA_TO_DEVICE);
125 
126 	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
127 
128 	return -ENOMEM;
129 }
130 
131 int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
132 {
133 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
134 	struct mana_port_context *apc = netdev_priv(ndev);
135 	u16 txq_idx = skb_get_queue_mapping(skb);
136 	struct gdma_dev *gd = apc->ac->gdma_dev;
137 	bool ipv4 = false, ipv6 = false;
138 	struct mana_tx_package pkg = {};
139 	struct netdev_queue *net_txq;
140 	struct mana_stats_tx *tx_stats;
141 	struct gdma_queue *gdma_sq;
142 	unsigned int csum_type;
143 	struct mana_txq *txq;
144 	struct mana_cq *cq;
145 	int err, len;
146 
147 	if (unlikely(!apc->port_is_up))
148 		goto tx_drop;
149 
150 	if (skb_cow_head(skb, MANA_HEADROOM))
151 		goto tx_drop_count;
152 
153 	txq = &apc->tx_qp[txq_idx].txq;
154 	gdma_sq = txq->gdma_sq;
155 	cq = &apc->tx_qp[txq_idx].tx_cq;
156 
157 	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
158 	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
159 
160 	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
161 		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
162 		pkt_fmt = MANA_LONG_PKT_FMT;
163 	} else {
164 		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
165 	}
166 
167 	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
168 
169 	if (pkt_fmt == MANA_SHORT_PKT_FMT)
170 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
171 	else
172 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
173 
174 	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
175 	pkg.wqe_req.flags = 0;
176 	pkg.wqe_req.client_data_unit = 0;
177 
178 	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
179 	WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
180 
181 	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
182 		pkg.wqe_req.sgl = pkg.sgl_array;
183 	} else {
184 		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
185 					    sizeof(struct gdma_sge),
186 					    GFP_ATOMIC);
187 		if (!pkg.sgl_ptr)
188 			goto tx_drop_count;
189 
190 		pkg.wqe_req.sgl = pkg.sgl_ptr;
191 	}
192 
193 	if (skb->protocol == htons(ETH_P_IP))
194 		ipv4 = true;
195 	else if (skb->protocol == htons(ETH_P_IPV6))
196 		ipv6 = true;
197 
198 	if (skb_is_gso(skb)) {
199 		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
200 		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
201 
202 		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
203 		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
204 		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
205 
206 		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
207 		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
208 		if (ipv4) {
209 			ip_hdr(skb)->tot_len = 0;
210 			ip_hdr(skb)->check = 0;
211 			tcp_hdr(skb)->check =
212 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
213 						   ip_hdr(skb)->daddr, 0,
214 						   IPPROTO_TCP, 0);
215 		} else {
216 			ipv6_hdr(skb)->payload_len = 0;
217 			tcp_hdr(skb)->check =
218 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
219 						 &ipv6_hdr(skb)->daddr, 0,
220 						 IPPROTO_TCP, 0);
221 		}
222 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
223 		csum_type = mana_checksum_info(skb);
224 
225 		if (csum_type == IPPROTO_TCP) {
226 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
227 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
228 
229 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
230 			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
231 
232 		} else if (csum_type == IPPROTO_UDP) {
233 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
234 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
235 
236 			pkg.tx_oob.s_oob.comp_udp_csum = 1;
237 		} else {
238 			/* Can't do offload of this type of checksum */
239 			if (skb_checksum_help(skb))
240 				goto free_sgl_ptr;
241 		}
242 	}
243 
244 	if (mana_map_skb(skb, apc, &pkg))
245 		goto free_sgl_ptr;
246 
247 	skb_queue_tail(&txq->pending_skbs, skb);
248 
249 	len = skb->len;
250 	net_txq = netdev_get_tx_queue(ndev, txq_idx);
251 
252 	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
253 					(struct gdma_posted_wqe_info *)skb->cb);
254 	if (!mana_can_tx(gdma_sq)) {
255 		netif_tx_stop_queue(net_txq);
256 		apc->eth_stats.stop_queue++;
257 	}
258 
259 	if (err) {
260 		(void)skb_dequeue_tail(&txq->pending_skbs);
261 		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
262 		err = NETDEV_TX_BUSY;
263 		goto tx_busy;
264 	}
265 
266 	err = NETDEV_TX_OK;
267 	atomic_inc(&txq->pending_sends);
268 
269 	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
270 
271 	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
272 	skb = NULL;
273 
274 	tx_stats = &txq->stats;
275 	u64_stats_update_begin(&tx_stats->syncp);
276 	tx_stats->packets++;
277 	tx_stats->bytes += len;
278 	u64_stats_update_end(&tx_stats->syncp);
279 
280 tx_busy:
281 	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
282 		netif_tx_wake_queue(net_txq);
283 		apc->eth_stats.wake_queue++;
284 	}
285 
286 	kfree(pkg.sgl_ptr);
287 	return err;
288 
289 free_sgl_ptr:
290 	kfree(pkg.sgl_ptr);
291 tx_drop_count:
292 	ndev->stats.tx_dropped++;
293 tx_drop:
294 	dev_kfree_skb_any(skb);
295 	return NETDEV_TX_OK;
296 }
297 
298 static void mana_get_stats64(struct net_device *ndev,
299 			     struct rtnl_link_stats64 *st)
300 {
301 	struct mana_port_context *apc = netdev_priv(ndev);
302 	unsigned int num_queues = apc->num_queues;
303 	struct mana_stats_rx *rx_stats;
304 	struct mana_stats_tx *tx_stats;
305 	unsigned int start;
306 	u64 packets, bytes;
307 	int q;
308 
309 	if (!apc->port_is_up)
310 		return;
311 
312 	netdev_stats_to_stats64(st, &ndev->stats);
313 
314 	for (q = 0; q < num_queues; q++) {
315 		rx_stats = &apc->rxqs[q]->stats;
316 
317 		do {
318 			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
319 			packets = rx_stats->packets;
320 			bytes = rx_stats->bytes;
321 		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
322 
323 		st->rx_packets += packets;
324 		st->rx_bytes += bytes;
325 	}
326 
327 	for (q = 0; q < num_queues; q++) {
328 		tx_stats = &apc->tx_qp[q].txq.stats;
329 
330 		do {
331 			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
332 			packets = tx_stats->packets;
333 			bytes = tx_stats->bytes;
334 		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
335 
336 		st->tx_packets += packets;
337 		st->tx_bytes += bytes;
338 	}
339 }
340 
341 static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
342 			     int old_q)
343 {
344 	struct mana_port_context *apc = netdev_priv(ndev);
345 	u32 hash = skb_get_hash(skb);
346 	struct sock *sk = skb->sk;
347 	int txq;
348 
349 	txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
350 
351 	if (txq != old_q && sk && sk_fullsock(sk) &&
352 	    rcu_access_pointer(sk->sk_dst_cache))
353 		sk_tx_queue_set(sk, txq);
354 
355 	return txq;
356 }
357 
358 static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
359 			     struct net_device *sb_dev)
360 {
361 	int txq;
362 
363 	if (ndev->real_num_tx_queues == 1)
364 		return 0;
365 
366 	txq = sk_tx_queue_get(skb->sk);
367 
368 	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
369 		if (skb_rx_queue_recorded(skb))
370 			txq = skb_get_rx_queue(skb);
371 		else
372 			txq = mana_get_tx_queue(ndev, skb, txq);
373 	}
374 
375 	return txq;
376 }
377 
378 static const struct net_device_ops mana_devops = {
379 	.ndo_open		= mana_open,
380 	.ndo_stop		= mana_close,
381 	.ndo_select_queue	= mana_select_queue,
382 	.ndo_start_xmit		= mana_start_xmit,
383 	.ndo_validate_addr	= eth_validate_addr,
384 	.ndo_get_stats64	= mana_get_stats64,
385 	.ndo_bpf		= mana_bpf,
386 	.ndo_xdp_xmit		= mana_xdp_xmit,
387 };
388 
389 static void mana_cleanup_port_context(struct mana_port_context *apc)
390 {
391 	kfree(apc->rxqs);
392 	apc->rxqs = NULL;
393 }
394 
395 static int mana_init_port_context(struct mana_port_context *apc)
396 {
397 	apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
398 			    GFP_KERNEL);
399 
400 	return !apc->rxqs ? -ENOMEM : 0;
401 }
402 
403 static int mana_send_request(struct mana_context *ac, void *in_buf,
404 			     u32 in_len, void *out_buf, u32 out_len)
405 {
406 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
407 	struct gdma_resp_hdr *resp = out_buf;
408 	struct gdma_req_hdr *req = in_buf;
409 	struct device *dev = gc->dev;
410 	static atomic_t activity_id;
411 	int err;
412 
413 	req->dev_id = gc->mana.dev_id;
414 	req->activity_id = atomic_inc_return(&activity_id);
415 
416 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
417 				   out_buf);
418 	if (err || resp->status) {
419 		dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
420 			err, resp->status);
421 		return err ? err : -EPROTO;
422 	}
423 
424 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
425 	    req->activity_id != resp->activity_id) {
426 		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
427 			req->dev_id.as_uint32, resp->dev_id.as_uint32,
428 			req->activity_id, resp->activity_id);
429 		return -EPROTO;
430 	}
431 
432 	return 0;
433 }
434 
435 static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
436 				const enum mana_command_code expected_code,
437 				const u32 min_size)
438 {
439 	if (resp_hdr->response.msg_type != expected_code)
440 		return -EPROTO;
441 
442 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
443 		return -EPROTO;
444 
445 	if (resp_hdr->response.msg_size < min_size)
446 		return -EPROTO;
447 
448 	return 0;
449 }
450 
451 static int mana_pf_register_hw_vport(struct mana_port_context *apc)
452 {
453 	struct mana_register_hw_vport_resp resp = {};
454 	struct mana_register_hw_vport_req req = {};
455 	int err;
456 
457 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
458 			     sizeof(req), sizeof(resp));
459 	req.attached_gfid = 1;
460 	req.is_pf_default_vport = 1;
461 	req.allow_all_ether_types = 1;
462 
463 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
464 				sizeof(resp));
465 	if (err) {
466 		netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
467 		return err;
468 	}
469 
470 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
471 				   sizeof(resp));
472 	if (err || resp.hdr.status) {
473 		netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
474 			   err, resp.hdr.status);
475 		return err ? err : -EPROTO;
476 	}
477 
478 	apc->port_handle = resp.hw_vport_handle;
479 	return 0;
480 }
481 
482 static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
483 {
484 	struct mana_deregister_hw_vport_resp resp = {};
485 	struct mana_deregister_hw_vport_req req = {};
486 	int err;
487 
488 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
489 			     sizeof(req), sizeof(resp));
490 	req.hw_vport_handle = apc->port_handle;
491 
492 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
493 				sizeof(resp));
494 	if (err) {
495 		netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
496 			   err);
497 		return;
498 	}
499 
500 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
501 				   sizeof(resp));
502 	if (err || resp.hdr.status)
503 		netdev_err(apc->ndev,
504 			   "Failed to deregister hw vPort: %d, 0x%x\n",
505 			   err, resp.hdr.status);
506 }
507 
508 static int mana_pf_register_filter(struct mana_port_context *apc)
509 {
510 	struct mana_register_filter_resp resp = {};
511 	struct mana_register_filter_req req = {};
512 	int err;
513 
514 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
515 			     sizeof(req), sizeof(resp));
516 	req.vport = apc->port_handle;
517 	memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
518 
519 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
520 				sizeof(resp));
521 	if (err) {
522 		netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
523 		return err;
524 	}
525 
526 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
527 				   sizeof(resp));
528 	if (err || resp.hdr.status) {
529 		netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
530 			   err, resp.hdr.status);
531 		return err ? err : -EPROTO;
532 	}
533 
534 	apc->pf_filter_handle = resp.filter_handle;
535 	return 0;
536 }
537 
538 static void mana_pf_deregister_filter(struct mana_port_context *apc)
539 {
540 	struct mana_deregister_filter_resp resp = {};
541 	struct mana_deregister_filter_req req = {};
542 	int err;
543 
544 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
545 			     sizeof(req), sizeof(resp));
546 	req.filter_handle = apc->pf_filter_handle;
547 
548 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
549 				sizeof(resp));
550 	if (err) {
551 		netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
552 			   err);
553 		return;
554 	}
555 
556 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
557 				   sizeof(resp));
558 	if (err || resp.hdr.status)
559 		netdev_err(apc->ndev,
560 			   "Failed to deregister filter: %d, 0x%x\n",
561 			   err, resp.hdr.status);
562 }
563 
564 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
565 				 u32 proto_minor_ver, u32 proto_micro_ver,
566 				 u16 *max_num_vports)
567 {
568 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
569 	struct mana_query_device_cfg_resp resp = {};
570 	struct mana_query_device_cfg_req req = {};
571 	struct device *dev = gc->dev;
572 	int err = 0;
573 
574 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
575 			     sizeof(req), sizeof(resp));
576 	req.proto_major_ver = proto_major_ver;
577 	req.proto_minor_ver = proto_minor_ver;
578 	req.proto_micro_ver = proto_micro_ver;
579 
580 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
581 	if (err) {
582 		dev_err(dev, "Failed to query config: %d", err);
583 		return err;
584 	}
585 
586 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
587 				   sizeof(resp));
588 	if (err || resp.hdr.status) {
589 		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
590 			resp.hdr.status);
591 		if (!err)
592 			err = -EPROTO;
593 		return err;
594 	}
595 
596 	*max_num_vports = resp.max_num_vports;
597 
598 	return 0;
599 }
600 
601 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
602 				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
603 {
604 	struct mana_query_vport_cfg_resp resp = {};
605 	struct mana_query_vport_cfg_req req = {};
606 	int err;
607 
608 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
609 			     sizeof(req), sizeof(resp));
610 
611 	req.vport_index = vport_index;
612 
613 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
614 				sizeof(resp));
615 	if (err)
616 		return err;
617 
618 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
619 				   sizeof(resp));
620 	if (err)
621 		return err;
622 
623 	if (resp.hdr.status)
624 		return -EPROTO;
625 
626 	*max_sq = resp.max_num_sq;
627 	*max_rq = resp.max_num_rq;
628 	*num_indir_entry = resp.num_indirection_ent;
629 
630 	apc->port_handle = resp.vport;
631 	ether_addr_copy(apc->mac_addr, resp.mac_addr);
632 
633 	return 0;
634 }
635 
636 static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
637 			  u32 doorbell_pg_id)
638 {
639 	struct mana_config_vport_resp resp = {};
640 	struct mana_config_vport_req req = {};
641 	int err;
642 
643 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
644 			     sizeof(req), sizeof(resp));
645 	req.vport = apc->port_handle;
646 	req.pdid = protection_dom_id;
647 	req.doorbell_pageid = doorbell_pg_id;
648 
649 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
650 				sizeof(resp));
651 	if (err) {
652 		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
653 		goto out;
654 	}
655 
656 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
657 				   sizeof(resp));
658 	if (err || resp.hdr.status) {
659 		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
660 			   err, resp.hdr.status);
661 		if (!err)
662 			err = -EPROTO;
663 
664 		goto out;
665 	}
666 
667 	apc->tx_shortform_allowed = resp.short_form_allowed;
668 	apc->tx_vp_offset = resp.tx_vport_offset;
669 out:
670 	return err;
671 }
672 
673 static int mana_cfg_vport_steering(struct mana_port_context *apc,
674 				   enum TRI_STATE rx,
675 				   bool update_default_rxobj, bool update_key,
676 				   bool update_tab)
677 {
678 	u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
679 	struct mana_cfg_rx_steer_req *req = NULL;
680 	struct mana_cfg_rx_steer_resp resp = {};
681 	struct net_device *ndev = apc->ndev;
682 	mana_handle_t *req_indir_tab;
683 	u32 req_buf_size;
684 	int err;
685 
686 	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
687 	req = kzalloc(req_buf_size, GFP_KERNEL);
688 	if (!req)
689 		return -ENOMEM;
690 
691 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
692 			     sizeof(resp));
693 
694 	req->vport = apc->port_handle;
695 	req->num_indir_entries = num_entries;
696 	req->indir_tab_offset = sizeof(*req);
697 	req->rx_enable = rx;
698 	req->rss_enable = apc->rss_state;
699 	req->update_default_rxobj = update_default_rxobj;
700 	req->update_hashkey = update_key;
701 	req->update_indir_tab = update_tab;
702 	req->default_rxobj = apc->default_rxobj;
703 
704 	if (update_key)
705 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
706 
707 	if (update_tab) {
708 		req_indir_tab = (mana_handle_t *)(req + 1);
709 		memcpy(req_indir_tab, apc->rxobj_table,
710 		       req->num_indir_entries * sizeof(mana_handle_t));
711 	}
712 
713 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
714 				sizeof(resp));
715 	if (err) {
716 		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
717 		goto out;
718 	}
719 
720 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
721 				   sizeof(resp));
722 	if (err) {
723 		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
724 		goto out;
725 	}
726 
727 	if (resp.hdr.status) {
728 		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
729 			   resp.hdr.status);
730 		err = -EPROTO;
731 	}
732 out:
733 	kfree(req);
734 	return err;
735 }
736 
737 static int mana_create_wq_obj(struct mana_port_context *apc,
738 			      mana_handle_t vport,
739 			      u32 wq_type, struct mana_obj_spec *wq_spec,
740 			      struct mana_obj_spec *cq_spec,
741 			      mana_handle_t *wq_obj)
742 {
743 	struct mana_create_wqobj_resp resp = {};
744 	struct mana_create_wqobj_req req = {};
745 	struct net_device *ndev = apc->ndev;
746 	int err;
747 
748 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
749 			     sizeof(req), sizeof(resp));
750 	req.vport = vport;
751 	req.wq_type = wq_type;
752 	req.wq_gdma_region = wq_spec->gdma_region;
753 	req.cq_gdma_region = cq_spec->gdma_region;
754 	req.wq_size = wq_spec->queue_size;
755 	req.cq_size = cq_spec->queue_size;
756 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
757 	req.cq_parent_qid = cq_spec->attached_eq;
758 
759 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
760 				sizeof(resp));
761 	if (err) {
762 		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
763 		goto out;
764 	}
765 
766 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
767 				   sizeof(resp));
768 	if (err || resp.hdr.status) {
769 		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
770 			   resp.hdr.status);
771 		if (!err)
772 			err = -EPROTO;
773 		goto out;
774 	}
775 
776 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
777 		netdev_err(ndev, "Got an invalid WQ object handle\n");
778 		err = -EPROTO;
779 		goto out;
780 	}
781 
782 	*wq_obj = resp.wq_obj;
783 	wq_spec->queue_index = resp.wq_id;
784 	cq_spec->queue_index = resp.cq_id;
785 
786 	return 0;
787 out:
788 	return err;
789 }
790 
791 static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
792 				mana_handle_t wq_obj)
793 {
794 	struct mana_destroy_wqobj_resp resp = {};
795 	struct mana_destroy_wqobj_req req = {};
796 	struct net_device *ndev = apc->ndev;
797 	int err;
798 
799 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
800 			     sizeof(req), sizeof(resp));
801 	req.wq_type = wq_type;
802 	req.wq_obj_handle = wq_obj;
803 
804 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
805 				sizeof(resp));
806 	if (err) {
807 		netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
808 		return;
809 	}
810 
811 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
812 				   sizeof(resp));
813 	if (err || resp.hdr.status)
814 		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
815 			   resp.hdr.status);
816 }
817 
818 static void mana_destroy_eq(struct mana_context *ac)
819 {
820 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
821 	struct gdma_queue *eq;
822 	int i;
823 
824 	if (!ac->eqs)
825 		return;
826 
827 	for (i = 0; i < gc->max_num_queues; i++) {
828 		eq = ac->eqs[i].eq;
829 		if (!eq)
830 			continue;
831 
832 		mana_gd_destroy_queue(gc, eq);
833 	}
834 
835 	kfree(ac->eqs);
836 	ac->eqs = NULL;
837 }
838 
839 static int mana_create_eq(struct mana_context *ac)
840 {
841 	struct gdma_dev *gd = ac->gdma_dev;
842 	struct gdma_context *gc = gd->gdma_context;
843 	struct gdma_queue_spec spec = {};
844 	int err;
845 	int i;
846 
847 	ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
848 			  GFP_KERNEL);
849 	if (!ac->eqs)
850 		return -ENOMEM;
851 
852 	spec.type = GDMA_EQ;
853 	spec.monitor_avl_buf = false;
854 	spec.queue_size = EQ_SIZE;
855 	spec.eq.callback = NULL;
856 	spec.eq.context = ac->eqs;
857 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
858 
859 	for (i = 0; i < gc->max_num_queues; i++) {
860 		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
861 		if (err)
862 			goto out;
863 	}
864 
865 	return 0;
866 out:
867 	mana_destroy_eq(ac);
868 	return err;
869 }
870 
871 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
872 {
873 	struct mana_fence_rq_resp resp = {};
874 	struct mana_fence_rq_req req = {};
875 	int err;
876 
877 	init_completion(&rxq->fence_event);
878 
879 	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
880 			     sizeof(req), sizeof(resp));
881 	req.wq_obj_handle =  rxq->rxobj;
882 
883 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
884 				sizeof(resp));
885 	if (err) {
886 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
887 			   rxq->rxq_idx, err);
888 		return err;
889 	}
890 
891 	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
892 	if (err || resp.hdr.status) {
893 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
894 			   rxq->rxq_idx, err, resp.hdr.status);
895 		if (!err)
896 			err = -EPROTO;
897 
898 		return err;
899 	}
900 
901 	if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
902 		netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
903 			   rxq->rxq_idx);
904 		return -ETIMEDOUT;
905 	}
906 
907 	return 0;
908 }
909 
910 static void mana_fence_rqs(struct mana_port_context *apc)
911 {
912 	unsigned int rxq_idx;
913 	struct mana_rxq *rxq;
914 	int err;
915 
916 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
917 		rxq = apc->rxqs[rxq_idx];
918 		err = mana_fence_rq(apc, rxq);
919 
920 		/* In case of any error, use sleep instead. */
921 		if (err)
922 			msleep(100);
923 	}
924 }
925 
926 static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
927 {
928 	u32 used_space_old;
929 	u32 used_space_new;
930 
931 	used_space_old = wq->head - wq->tail;
932 	used_space_new = wq->head - (wq->tail + num_units);
933 
934 	if (WARN_ON_ONCE(used_space_new > used_space_old))
935 		return -ERANGE;
936 
937 	wq->tail += num_units;
938 	return 0;
939 }
940 
941 static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
942 {
943 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
944 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
945 	struct device *dev = gc->dev;
946 	int i;
947 
948 	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
949 
950 	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
951 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
952 			       DMA_TO_DEVICE);
953 }
954 
955 static void mana_poll_tx_cq(struct mana_cq *cq)
956 {
957 	struct gdma_comp *completions = cq->gdma_comp_buf;
958 	struct gdma_posted_wqe_info *wqe_info;
959 	unsigned int pkt_transmitted = 0;
960 	unsigned int wqe_unit_cnt = 0;
961 	struct mana_txq *txq = cq->txq;
962 	struct mana_port_context *apc;
963 	struct netdev_queue *net_txq;
964 	struct gdma_queue *gdma_wq;
965 	unsigned int avail_space;
966 	struct net_device *ndev;
967 	struct sk_buff *skb;
968 	bool txq_stopped;
969 	int comp_read;
970 	int i;
971 
972 	ndev = txq->ndev;
973 	apc = netdev_priv(ndev);
974 
975 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
976 				    CQE_POLLING_BUFFER);
977 
978 	if (comp_read < 1)
979 		return;
980 
981 	for (i = 0; i < comp_read; i++) {
982 		struct mana_tx_comp_oob *cqe_oob;
983 
984 		if (WARN_ON_ONCE(!completions[i].is_sq))
985 			return;
986 
987 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
988 		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
989 				 MANA_CQE_COMPLETION))
990 			return;
991 
992 		switch (cqe_oob->cqe_hdr.cqe_type) {
993 		case CQE_TX_OKAY:
994 			break;
995 
996 		case CQE_TX_SA_DROP:
997 		case CQE_TX_MTU_DROP:
998 		case CQE_TX_INVALID_OOB:
999 		case CQE_TX_INVALID_ETH_TYPE:
1000 		case CQE_TX_HDR_PROCESSING_ERROR:
1001 		case CQE_TX_VF_DISABLED:
1002 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1003 		case CQE_TX_VPORT_DISABLED:
1004 		case CQE_TX_VLAN_TAGGING_VIOLATION:
1005 			WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
1006 				  cqe_oob->cqe_hdr.cqe_type);
1007 			break;
1008 
1009 		default:
1010 			/* If the CQE type is unexpected, log an error, assert,
1011 			 * and go through the error path.
1012 			 */
1013 			WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
1014 				  cqe_oob->cqe_hdr.cqe_type);
1015 			return;
1016 		}
1017 
1018 		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1019 			return;
1020 
1021 		skb = skb_dequeue(&txq->pending_skbs);
1022 		if (WARN_ON_ONCE(!skb))
1023 			return;
1024 
1025 		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1026 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1027 
1028 		mana_unmap_skb(skb, apc);
1029 
1030 		napi_consume_skb(skb, cq->budget);
1031 
1032 		pkt_transmitted++;
1033 	}
1034 
1035 	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1036 		return;
1037 
1038 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1039 
1040 	gdma_wq = txq->gdma_sq;
1041 	avail_space = mana_gd_wq_avail_space(gdma_wq);
1042 
1043 	/* Ensure tail updated before checking q stop */
1044 	smp_mb();
1045 
1046 	net_txq = txq->net_txq;
1047 	txq_stopped = netif_tx_queue_stopped(net_txq);
1048 
1049 	/* Ensure checking txq_stopped before apc->port_is_up. */
1050 	smp_rmb();
1051 
1052 	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1053 		netif_tx_wake_queue(net_txq);
1054 		apc->eth_stats.wake_queue++;
1055 	}
1056 
1057 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1058 		WARN_ON_ONCE(1);
1059 
1060 	cq->work_done = pkt_transmitted;
1061 }
1062 
1063 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1064 {
1065 	struct mana_recv_buf_oob *recv_buf_oob;
1066 	u32 curr_index;
1067 	int err;
1068 
1069 	curr_index = rxq->buf_index++;
1070 	if (rxq->buf_index == rxq->num_rx_buf)
1071 		rxq->buf_index = 0;
1072 
1073 	recv_buf_oob = &rxq->rx_oobs[curr_index];
1074 
1075 	err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1076 				    &recv_buf_oob->wqe_inf);
1077 	if (WARN_ON_ONCE(err))
1078 		return;
1079 
1080 	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1081 }
1082 
1083 static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
1084 				      struct xdp_buff *xdp)
1085 {
1086 	struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
1087 
1088 	if (!skb)
1089 		return NULL;
1090 
1091 	if (xdp->data_hard_start) {
1092 		skb_reserve(skb, xdp->data - xdp->data_hard_start);
1093 		skb_put(skb, xdp->data_end - xdp->data);
1094 	} else {
1095 		skb_reserve(skb, XDP_PACKET_HEADROOM);
1096 		skb_put(skb, pkt_len);
1097 	}
1098 
1099 	return skb;
1100 }
1101 
1102 static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
1103 			struct mana_rxq *rxq)
1104 {
1105 	struct mana_stats_rx *rx_stats = &rxq->stats;
1106 	struct net_device *ndev = rxq->ndev;
1107 	uint pkt_len = cqe->ppi[0].pkt_len;
1108 	u16 rxq_idx = rxq->rxq_idx;
1109 	struct napi_struct *napi;
1110 	struct xdp_buff xdp = {};
1111 	struct sk_buff *skb;
1112 	u32 hash_value;
1113 	u32 act;
1114 
1115 	rxq->rx_cq.work_done++;
1116 	napi = &rxq->rx_cq.napi;
1117 
1118 	if (!buf_va) {
1119 		++ndev->stats.rx_dropped;
1120 		return;
1121 	}
1122 
1123 	act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1124 
1125 	if (act == XDP_REDIRECT && !rxq->xdp_rc)
1126 		return;
1127 
1128 	if (act != XDP_PASS && act != XDP_TX)
1129 		goto drop_xdp;
1130 
1131 	skb = mana_build_skb(buf_va, pkt_len, &xdp);
1132 
1133 	if (!skb)
1134 		goto drop;
1135 
1136 	skb->dev = napi->dev;
1137 
1138 	skb->protocol = eth_type_trans(skb, ndev);
1139 	skb_checksum_none_assert(skb);
1140 	skb_record_rx_queue(skb, rxq_idx);
1141 
1142 	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1143 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1144 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1145 	}
1146 
1147 	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1148 		hash_value = cqe->ppi[0].pkt_hash;
1149 
1150 		if (cqe->rx_hashtype & MANA_HASH_L4)
1151 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1152 		else
1153 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1154 	}
1155 
1156 	u64_stats_update_begin(&rx_stats->syncp);
1157 	rx_stats->packets++;
1158 	rx_stats->bytes += pkt_len;
1159 
1160 	if (act == XDP_TX)
1161 		rx_stats->xdp_tx++;
1162 	u64_stats_update_end(&rx_stats->syncp);
1163 
1164 	if (act == XDP_TX) {
1165 		skb_set_queue_mapping(skb, rxq_idx);
1166 		mana_xdp_tx(skb, ndev);
1167 		return;
1168 	}
1169 
1170 	napi_gro_receive(napi, skb);
1171 
1172 	return;
1173 
1174 drop_xdp:
1175 	u64_stats_update_begin(&rx_stats->syncp);
1176 	rx_stats->xdp_drop++;
1177 	u64_stats_update_end(&rx_stats->syncp);
1178 
1179 drop:
1180 	WARN_ON_ONCE(rxq->xdp_save_page);
1181 	rxq->xdp_save_page = virt_to_page(buf_va);
1182 
1183 	++ndev->stats.rx_dropped;
1184 
1185 	return;
1186 }
1187 
1188 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1189 				struct gdma_comp *cqe)
1190 {
1191 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1192 	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1193 	struct net_device *ndev = rxq->ndev;
1194 	struct mana_recv_buf_oob *rxbuf_oob;
1195 	struct device *dev = gc->dev;
1196 	void *new_buf, *old_buf;
1197 	struct page *new_page;
1198 	u32 curr, pktlen;
1199 	dma_addr_t da;
1200 
1201 	switch (oob->cqe_hdr.cqe_type) {
1202 	case CQE_RX_OKAY:
1203 		break;
1204 
1205 	case CQE_RX_TRUNCATED:
1206 		++ndev->stats.rx_dropped;
1207 		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1208 		netdev_warn_once(ndev, "Dropped a truncated packet\n");
1209 		goto drop;
1210 
1211 	case CQE_RX_COALESCED_4:
1212 		netdev_err(ndev, "RX coalescing is unsupported\n");
1213 		return;
1214 
1215 	case CQE_RX_OBJECT_FENCE:
1216 		complete(&rxq->fence_event);
1217 		return;
1218 
1219 	default:
1220 		netdev_err(ndev, "Unknown RX CQE type = %d\n",
1221 			   oob->cqe_hdr.cqe_type);
1222 		return;
1223 	}
1224 
1225 	pktlen = oob->ppi[0].pkt_len;
1226 
1227 	if (pktlen == 0) {
1228 		/* data packets should never have packetlength of zero */
1229 		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1230 			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1231 		return;
1232 	}
1233 
1234 	curr = rxq->buf_index;
1235 	rxbuf_oob = &rxq->rx_oobs[curr];
1236 	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1237 
1238 	/* Reuse XDP dropped page if available */
1239 	if (rxq->xdp_save_page) {
1240 		new_page = rxq->xdp_save_page;
1241 		rxq->xdp_save_page = NULL;
1242 	} else {
1243 		new_page = alloc_page(GFP_ATOMIC);
1244 	}
1245 
1246 	if (new_page) {
1247 		da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
1248 				  DMA_FROM_DEVICE);
1249 
1250 		if (dma_mapping_error(dev, da)) {
1251 			__free_page(new_page);
1252 			new_page = NULL;
1253 		}
1254 	}
1255 
1256 	new_buf = new_page ? page_to_virt(new_page) : NULL;
1257 
1258 	if (new_buf) {
1259 		dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
1260 			       DMA_FROM_DEVICE);
1261 
1262 		old_buf = rxbuf_oob->buf_va;
1263 
1264 		/* refresh the rxbuf_oob with the new page */
1265 		rxbuf_oob->buf_va = new_buf;
1266 		rxbuf_oob->buf_dma_addr = da;
1267 		rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
1268 	} else {
1269 		old_buf = NULL; /* drop the packet if no memory */
1270 	}
1271 
1272 	mana_rx_skb(old_buf, oob, rxq);
1273 
1274 drop:
1275 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1276 
1277 	mana_post_pkt_rxq(rxq);
1278 }
1279 
1280 static void mana_poll_rx_cq(struct mana_cq *cq)
1281 {
1282 	struct gdma_comp *comp = cq->gdma_comp_buf;
1283 	struct mana_rxq *rxq = cq->rxq;
1284 	int comp_read, i;
1285 
1286 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1287 	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1288 
1289 	rxq->xdp_flush = false;
1290 
1291 	for (i = 0; i < comp_read; i++) {
1292 		if (WARN_ON_ONCE(comp[i].is_sq))
1293 			return;
1294 
1295 		/* verify recv cqe references the right rxq */
1296 		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1297 			return;
1298 
1299 		mana_process_rx_cqe(rxq, cq, &comp[i]);
1300 	}
1301 
1302 	if (rxq->xdp_flush)
1303 		xdp_do_flush();
1304 }
1305 
1306 static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1307 {
1308 	struct mana_cq *cq = context;
1309 	u8 arm_bit;
1310 
1311 	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1312 
1313 	if (cq->type == MANA_CQ_TYPE_RX)
1314 		mana_poll_rx_cq(cq);
1315 	else
1316 		mana_poll_tx_cq(cq);
1317 
1318 	if (cq->work_done < cq->budget &&
1319 	    napi_complete_done(&cq->napi, cq->work_done)) {
1320 		arm_bit = SET_ARM_BIT;
1321 	} else {
1322 		arm_bit = 0;
1323 	}
1324 
1325 	mana_gd_ring_cq(gdma_queue, arm_bit);
1326 }
1327 
1328 static int mana_poll(struct napi_struct *napi, int budget)
1329 {
1330 	struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1331 
1332 	cq->work_done = 0;
1333 	cq->budget = budget;
1334 
1335 	mana_cq_handler(cq, cq->gdma_cq);
1336 
1337 	return min(cq->work_done, budget);
1338 }
1339 
1340 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1341 {
1342 	struct mana_cq *cq = context;
1343 
1344 	napi_schedule_irqoff(&cq->napi);
1345 }
1346 
1347 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1348 {
1349 	struct gdma_dev *gd = apc->ac->gdma_dev;
1350 
1351 	if (!cq->gdma_cq)
1352 		return;
1353 
1354 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1355 }
1356 
1357 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1358 {
1359 	struct gdma_dev *gd = apc->ac->gdma_dev;
1360 
1361 	if (!txq->gdma_sq)
1362 		return;
1363 
1364 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1365 }
1366 
1367 static void mana_destroy_txq(struct mana_port_context *apc)
1368 {
1369 	struct napi_struct *napi;
1370 	int i;
1371 
1372 	if (!apc->tx_qp)
1373 		return;
1374 
1375 	for (i = 0; i < apc->num_queues; i++) {
1376 		napi = &apc->tx_qp[i].tx_cq.napi;
1377 		napi_synchronize(napi);
1378 		napi_disable(napi);
1379 		netif_napi_del(napi);
1380 
1381 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1382 
1383 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1384 
1385 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1386 	}
1387 
1388 	kfree(apc->tx_qp);
1389 	apc->tx_qp = NULL;
1390 }
1391 
1392 static int mana_create_txq(struct mana_port_context *apc,
1393 			   struct net_device *net)
1394 {
1395 	struct mana_context *ac = apc->ac;
1396 	struct gdma_dev *gd = ac->gdma_dev;
1397 	struct mana_obj_spec wq_spec;
1398 	struct mana_obj_spec cq_spec;
1399 	struct gdma_queue_spec spec;
1400 	struct gdma_context *gc;
1401 	struct mana_txq *txq;
1402 	struct mana_cq *cq;
1403 	u32 txq_size;
1404 	u32 cq_size;
1405 	int err;
1406 	int i;
1407 
1408 	apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1409 			     GFP_KERNEL);
1410 	if (!apc->tx_qp)
1411 		return -ENOMEM;
1412 
1413 	/*  The minimum size of the WQE is 32 bytes, hence
1414 	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1415 	 *  the SQ can store. This value is then used to size other queues
1416 	 *  to prevent overflow.
1417 	 */
1418 	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1419 	BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1420 
1421 	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1422 	cq_size = PAGE_ALIGN(cq_size);
1423 
1424 	gc = gd->gdma_context;
1425 
1426 	for (i = 0; i < apc->num_queues; i++) {
1427 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1428 
1429 		/* Create SQ */
1430 		txq = &apc->tx_qp[i].txq;
1431 
1432 		u64_stats_init(&txq->stats.syncp);
1433 		txq->ndev = net;
1434 		txq->net_txq = netdev_get_tx_queue(net, i);
1435 		txq->vp_offset = apc->tx_vp_offset;
1436 		skb_queue_head_init(&txq->pending_skbs);
1437 
1438 		memset(&spec, 0, sizeof(spec));
1439 		spec.type = GDMA_SQ;
1440 		spec.monitor_avl_buf = true;
1441 		spec.queue_size = txq_size;
1442 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1443 		if (err)
1444 			goto out;
1445 
1446 		/* Create SQ's CQ */
1447 		cq = &apc->tx_qp[i].tx_cq;
1448 		cq->type = MANA_CQ_TYPE_TX;
1449 
1450 		cq->txq = txq;
1451 
1452 		memset(&spec, 0, sizeof(spec));
1453 		spec.type = GDMA_CQ;
1454 		spec.monitor_avl_buf = false;
1455 		spec.queue_size = cq_size;
1456 		spec.cq.callback = mana_schedule_napi;
1457 		spec.cq.parent_eq = ac->eqs[i].eq;
1458 		spec.cq.context = cq;
1459 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1460 		if (err)
1461 			goto out;
1462 
1463 		memset(&wq_spec, 0, sizeof(wq_spec));
1464 		memset(&cq_spec, 0, sizeof(cq_spec));
1465 
1466 		wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1467 		wq_spec.queue_size = txq->gdma_sq->queue_size;
1468 
1469 		cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1470 		cq_spec.queue_size = cq->gdma_cq->queue_size;
1471 		cq_spec.modr_ctx_id = 0;
1472 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1473 
1474 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1475 					 &wq_spec, &cq_spec,
1476 					 &apc->tx_qp[i].tx_object);
1477 
1478 		if (err)
1479 			goto out;
1480 
1481 		txq->gdma_sq->id = wq_spec.queue_index;
1482 		cq->gdma_cq->id = cq_spec.queue_index;
1483 
1484 		txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1485 		cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1486 
1487 		txq->gdma_txq_id = txq->gdma_sq->id;
1488 
1489 		cq->gdma_id = cq->gdma_cq->id;
1490 
1491 		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1492 			err = -EINVAL;
1493 			goto out;
1494 		}
1495 
1496 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1497 
1498 		netif_napi_add_tx(net, &cq->napi, mana_poll);
1499 		napi_enable(&cq->napi);
1500 
1501 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1502 	}
1503 
1504 	return 0;
1505 out:
1506 	mana_destroy_txq(apc);
1507 	return err;
1508 }
1509 
1510 static void mana_destroy_rxq(struct mana_port_context *apc,
1511 			     struct mana_rxq *rxq, bool validate_state)
1512 
1513 {
1514 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1515 	struct mana_recv_buf_oob *rx_oob;
1516 	struct device *dev = gc->dev;
1517 	struct napi_struct *napi;
1518 	int i;
1519 
1520 	if (!rxq)
1521 		return;
1522 
1523 	napi = &rxq->rx_cq.napi;
1524 
1525 	if (validate_state)
1526 		napi_synchronize(napi);
1527 
1528 	napi_disable(napi);
1529 
1530 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
1531 
1532 	netif_napi_del(napi);
1533 
1534 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1535 
1536 	mana_deinit_cq(apc, &rxq->rx_cq);
1537 
1538 	if (rxq->xdp_save_page)
1539 		__free_page(rxq->xdp_save_page);
1540 
1541 	for (i = 0; i < rxq->num_rx_buf; i++) {
1542 		rx_oob = &rxq->rx_oobs[i];
1543 
1544 		if (!rx_oob->buf_va)
1545 			continue;
1546 
1547 		dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
1548 			       DMA_FROM_DEVICE);
1549 
1550 		free_page((unsigned long)rx_oob->buf_va);
1551 		rx_oob->buf_va = NULL;
1552 	}
1553 
1554 	if (rxq->gdma_rq)
1555 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
1556 
1557 	kfree(rxq);
1558 }
1559 
1560 #define MANA_WQE_HEADER_SIZE 16
1561 #define MANA_WQE_SGE_SIZE 16
1562 
1563 static int mana_alloc_rx_wqe(struct mana_port_context *apc,
1564 			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
1565 {
1566 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1567 	struct mana_recv_buf_oob *rx_oob;
1568 	struct device *dev = gc->dev;
1569 	struct page *page;
1570 	dma_addr_t da;
1571 	u32 buf_idx;
1572 
1573 	WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
1574 
1575 	*rxq_size = 0;
1576 	*cq_size = 0;
1577 
1578 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1579 		rx_oob = &rxq->rx_oobs[buf_idx];
1580 		memset(rx_oob, 0, sizeof(*rx_oob));
1581 
1582 		page = alloc_page(GFP_KERNEL);
1583 		if (!page)
1584 			return -ENOMEM;
1585 
1586 		da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
1587 				  DMA_FROM_DEVICE);
1588 
1589 		if (dma_mapping_error(dev, da)) {
1590 			__free_page(page);
1591 			return -ENOMEM;
1592 		}
1593 
1594 		rx_oob->buf_va = page_to_virt(page);
1595 		rx_oob->buf_dma_addr = da;
1596 
1597 		rx_oob->num_sge = 1;
1598 		rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
1599 		rx_oob->sgl[0].size = rxq->datasize;
1600 		rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
1601 
1602 		rx_oob->wqe_req.sgl = rx_oob->sgl;
1603 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
1604 		rx_oob->wqe_req.inline_oob_size = 0;
1605 		rx_oob->wqe_req.inline_oob_data = NULL;
1606 		rx_oob->wqe_req.flags = 0;
1607 		rx_oob->wqe_req.client_data_unit = 0;
1608 
1609 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
1610 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
1611 		*cq_size += COMP_ENTRY_SIZE;
1612 	}
1613 
1614 	return 0;
1615 }
1616 
1617 static int mana_push_wqe(struct mana_rxq *rxq)
1618 {
1619 	struct mana_recv_buf_oob *rx_oob;
1620 	u32 buf_idx;
1621 	int err;
1622 
1623 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1624 		rx_oob = &rxq->rx_oobs[buf_idx];
1625 
1626 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
1627 					    &rx_oob->wqe_inf);
1628 		if (err)
1629 			return -ENOSPC;
1630 	}
1631 
1632 	return 0;
1633 }
1634 
1635 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
1636 					u32 rxq_idx, struct mana_eq *eq,
1637 					struct net_device *ndev)
1638 {
1639 	struct gdma_dev *gd = apc->ac->gdma_dev;
1640 	struct mana_obj_spec wq_spec;
1641 	struct mana_obj_spec cq_spec;
1642 	struct gdma_queue_spec spec;
1643 	struct mana_cq *cq = NULL;
1644 	struct gdma_context *gc;
1645 	u32 cq_size, rq_size;
1646 	struct mana_rxq *rxq;
1647 	int err;
1648 
1649 	gc = gd->gdma_context;
1650 
1651 	rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
1652 		      GFP_KERNEL);
1653 	if (!rxq)
1654 		return NULL;
1655 
1656 	rxq->ndev = ndev;
1657 	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
1658 	rxq->rxq_idx = rxq_idx;
1659 	rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
1660 	rxq->rxobj = INVALID_MANA_HANDLE;
1661 
1662 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
1663 	if (err)
1664 		goto out;
1665 
1666 	rq_size = PAGE_ALIGN(rq_size);
1667 	cq_size = PAGE_ALIGN(cq_size);
1668 
1669 	/* Create RQ */
1670 	memset(&spec, 0, sizeof(spec));
1671 	spec.type = GDMA_RQ;
1672 	spec.monitor_avl_buf = true;
1673 	spec.queue_size = rq_size;
1674 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
1675 	if (err)
1676 		goto out;
1677 
1678 	/* Create RQ's CQ */
1679 	cq = &rxq->rx_cq;
1680 	cq->type = MANA_CQ_TYPE_RX;
1681 	cq->rxq = rxq;
1682 
1683 	memset(&spec, 0, sizeof(spec));
1684 	spec.type = GDMA_CQ;
1685 	spec.monitor_avl_buf = false;
1686 	spec.queue_size = cq_size;
1687 	spec.cq.callback = mana_schedule_napi;
1688 	spec.cq.parent_eq = eq->eq;
1689 	spec.cq.context = cq;
1690 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1691 	if (err)
1692 		goto out;
1693 
1694 	memset(&wq_spec, 0, sizeof(wq_spec));
1695 	memset(&cq_spec, 0, sizeof(cq_spec));
1696 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
1697 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
1698 
1699 	cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1700 	cq_spec.queue_size = cq->gdma_cq->queue_size;
1701 	cq_spec.modr_ctx_id = 0;
1702 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1703 
1704 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
1705 				 &wq_spec, &cq_spec, &rxq->rxobj);
1706 	if (err)
1707 		goto out;
1708 
1709 	rxq->gdma_rq->id = wq_spec.queue_index;
1710 	cq->gdma_cq->id = cq_spec.queue_index;
1711 
1712 	rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1713 	cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1714 
1715 	rxq->gdma_id = rxq->gdma_rq->id;
1716 	cq->gdma_id = cq->gdma_cq->id;
1717 
1718 	err = mana_push_wqe(rxq);
1719 	if (err)
1720 		goto out;
1721 
1722 	if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1723 		err = -EINVAL;
1724 		goto out;
1725 	}
1726 
1727 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1728 
1729 	netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
1730 
1731 	WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
1732 				 cq->napi.napi_id));
1733 	WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
1734 					   MEM_TYPE_PAGE_SHARED, NULL));
1735 
1736 	napi_enable(&cq->napi);
1737 
1738 	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1739 out:
1740 	if (!err)
1741 		return rxq;
1742 
1743 	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
1744 
1745 	mana_destroy_rxq(apc, rxq, false);
1746 
1747 	if (cq)
1748 		mana_deinit_cq(apc, cq);
1749 
1750 	return NULL;
1751 }
1752 
1753 static int mana_add_rx_queues(struct mana_port_context *apc,
1754 			      struct net_device *ndev)
1755 {
1756 	struct mana_context *ac = apc->ac;
1757 	struct mana_rxq *rxq;
1758 	int err = 0;
1759 	int i;
1760 
1761 	for (i = 0; i < apc->num_queues; i++) {
1762 		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
1763 		if (!rxq) {
1764 			err = -ENOMEM;
1765 			goto out;
1766 		}
1767 
1768 		u64_stats_init(&rxq->stats.syncp);
1769 
1770 		apc->rxqs[i] = rxq;
1771 	}
1772 
1773 	apc->default_rxobj = apc->rxqs[0]->rxobj;
1774 out:
1775 	return err;
1776 }
1777 
1778 static void mana_destroy_vport(struct mana_port_context *apc)
1779 {
1780 	struct gdma_dev *gd = apc->ac->gdma_dev;
1781 	struct mana_rxq *rxq;
1782 	u32 rxq_idx;
1783 
1784 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1785 		rxq = apc->rxqs[rxq_idx];
1786 		if (!rxq)
1787 			continue;
1788 
1789 		mana_destroy_rxq(apc, rxq, true);
1790 		apc->rxqs[rxq_idx] = NULL;
1791 	}
1792 
1793 	mana_destroy_txq(apc);
1794 
1795 	if (gd->gdma_context->is_pf)
1796 		mana_pf_deregister_hw_vport(apc);
1797 }
1798 
1799 static int mana_create_vport(struct mana_port_context *apc,
1800 			     struct net_device *net)
1801 {
1802 	struct gdma_dev *gd = apc->ac->gdma_dev;
1803 	int err;
1804 
1805 	apc->default_rxobj = INVALID_MANA_HANDLE;
1806 
1807 	if (gd->gdma_context->is_pf) {
1808 		err = mana_pf_register_hw_vport(apc);
1809 		if (err)
1810 			return err;
1811 	}
1812 
1813 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
1814 	if (err)
1815 		return err;
1816 
1817 	return mana_create_txq(apc, net);
1818 }
1819 
1820 static void mana_rss_table_init(struct mana_port_context *apc)
1821 {
1822 	int i;
1823 
1824 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
1825 		apc->indir_table[i] =
1826 			ethtool_rxfh_indir_default(i, apc->num_queues);
1827 }
1828 
1829 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
1830 		    bool update_hash, bool update_tab)
1831 {
1832 	u32 queue_idx;
1833 	int err;
1834 	int i;
1835 
1836 	if (update_tab) {
1837 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
1838 			queue_idx = apc->indir_table[i];
1839 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
1840 		}
1841 	}
1842 
1843 	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
1844 	if (err)
1845 		return err;
1846 
1847 	mana_fence_rqs(apc);
1848 
1849 	return 0;
1850 }
1851 
1852 static int mana_init_port(struct net_device *ndev)
1853 {
1854 	struct mana_port_context *apc = netdev_priv(ndev);
1855 	u32 max_txq, max_rxq, max_queues;
1856 	int port_idx = apc->port_idx;
1857 	u32 num_indirect_entries;
1858 	int err;
1859 
1860 	err = mana_init_port_context(apc);
1861 	if (err)
1862 		return err;
1863 
1864 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
1865 				   &num_indirect_entries);
1866 	if (err) {
1867 		netdev_err(ndev, "Failed to query info for vPort %d\n",
1868 			   port_idx);
1869 		goto reset_apc;
1870 	}
1871 
1872 	max_queues = min_t(u32, max_txq, max_rxq);
1873 	if (apc->max_queues > max_queues)
1874 		apc->max_queues = max_queues;
1875 
1876 	if (apc->num_queues > apc->max_queues)
1877 		apc->num_queues = apc->max_queues;
1878 
1879 	eth_hw_addr_set(ndev, apc->mac_addr);
1880 
1881 	return 0;
1882 
1883 reset_apc:
1884 	kfree(apc->rxqs);
1885 	apc->rxqs = NULL;
1886 	return err;
1887 }
1888 
1889 int mana_alloc_queues(struct net_device *ndev)
1890 {
1891 	struct mana_port_context *apc = netdev_priv(ndev);
1892 	struct gdma_dev *gd = apc->ac->gdma_dev;
1893 	int err;
1894 
1895 	err = mana_create_vport(apc, ndev);
1896 	if (err)
1897 		return err;
1898 
1899 	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
1900 	if (err)
1901 		goto destroy_vport;
1902 
1903 	err = mana_add_rx_queues(apc, ndev);
1904 	if (err)
1905 		goto destroy_vport;
1906 
1907 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
1908 
1909 	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
1910 	if (err)
1911 		goto destroy_vport;
1912 
1913 	mana_rss_table_init(apc);
1914 
1915 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
1916 	if (err)
1917 		goto destroy_vport;
1918 
1919 	if (gd->gdma_context->is_pf) {
1920 		err = mana_pf_register_filter(apc);
1921 		if (err)
1922 			goto destroy_vport;
1923 	}
1924 
1925 	mana_chn_setxdp(apc, mana_xdp_get(apc));
1926 
1927 	return 0;
1928 
1929 destroy_vport:
1930 	mana_destroy_vport(apc);
1931 	return err;
1932 }
1933 
1934 int mana_attach(struct net_device *ndev)
1935 {
1936 	struct mana_port_context *apc = netdev_priv(ndev);
1937 	int err;
1938 
1939 	ASSERT_RTNL();
1940 
1941 	err = mana_init_port(ndev);
1942 	if (err)
1943 		return err;
1944 
1945 	if (apc->port_st_save) {
1946 		err = mana_alloc_queues(ndev);
1947 		if (err) {
1948 			mana_cleanup_port_context(apc);
1949 			return err;
1950 		}
1951 	}
1952 
1953 	apc->port_is_up = apc->port_st_save;
1954 
1955 	/* Ensure port state updated before txq state */
1956 	smp_wmb();
1957 
1958 	if (apc->port_is_up)
1959 		netif_carrier_on(ndev);
1960 
1961 	netif_device_attach(ndev);
1962 
1963 	return 0;
1964 }
1965 
1966 static int mana_dealloc_queues(struct net_device *ndev)
1967 {
1968 	struct mana_port_context *apc = netdev_priv(ndev);
1969 	struct gdma_dev *gd = apc->ac->gdma_dev;
1970 	struct mana_txq *txq;
1971 	int i, err;
1972 
1973 	if (apc->port_is_up)
1974 		return -EINVAL;
1975 
1976 	mana_chn_setxdp(apc, NULL);
1977 
1978 	if (gd->gdma_context->is_pf)
1979 		mana_pf_deregister_filter(apc);
1980 
1981 	/* No packet can be transmitted now since apc->port_is_up is false.
1982 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
1983 	 * a txq because it may not timely see apc->port_is_up being cleared
1984 	 * to false, but it doesn't matter since mana_start_xmit() drops any
1985 	 * new packets due to apc->port_is_up being false.
1986 	 *
1987 	 * Drain all the in-flight TX packets
1988 	 */
1989 	for (i = 0; i < apc->num_queues; i++) {
1990 		txq = &apc->tx_qp[i].txq;
1991 
1992 		while (atomic_read(&txq->pending_sends) > 0)
1993 			usleep_range(1000, 2000);
1994 	}
1995 
1996 	/* We're 100% sure the queues can no longer be woken up, because
1997 	 * we're sure now mana_poll_tx_cq() can't be running.
1998 	 */
1999 
2000 	apc->rss_state = TRI_STATE_FALSE;
2001 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2002 	if (err) {
2003 		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
2004 		return err;
2005 	}
2006 
2007 	mana_destroy_vport(apc);
2008 
2009 	return 0;
2010 }
2011 
2012 int mana_detach(struct net_device *ndev, bool from_close)
2013 {
2014 	struct mana_port_context *apc = netdev_priv(ndev);
2015 	int err;
2016 
2017 	ASSERT_RTNL();
2018 
2019 	apc->port_st_save = apc->port_is_up;
2020 	apc->port_is_up = false;
2021 
2022 	/* Ensure port state updated before txq state */
2023 	smp_wmb();
2024 
2025 	netif_tx_disable(ndev);
2026 	netif_carrier_off(ndev);
2027 
2028 	if (apc->port_st_save) {
2029 		err = mana_dealloc_queues(ndev);
2030 		if (err)
2031 			return err;
2032 	}
2033 
2034 	if (!from_close) {
2035 		netif_device_detach(ndev);
2036 		mana_cleanup_port_context(apc);
2037 	}
2038 
2039 	return 0;
2040 }
2041 
2042 static int mana_probe_port(struct mana_context *ac, int port_idx,
2043 			   struct net_device **ndev_storage)
2044 {
2045 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2046 	struct mana_port_context *apc;
2047 	struct net_device *ndev;
2048 	int err;
2049 
2050 	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
2051 				 gc->max_num_queues);
2052 	if (!ndev)
2053 		return -ENOMEM;
2054 
2055 	*ndev_storage = ndev;
2056 
2057 	apc = netdev_priv(ndev);
2058 	apc->ac = ac;
2059 	apc->ndev = ndev;
2060 	apc->max_queues = gc->max_num_queues;
2061 	apc->num_queues = gc->max_num_queues;
2062 	apc->port_handle = INVALID_MANA_HANDLE;
2063 	apc->pf_filter_handle = INVALID_MANA_HANDLE;
2064 	apc->port_idx = port_idx;
2065 
2066 	ndev->netdev_ops = &mana_devops;
2067 	ndev->ethtool_ops = &mana_ethtool_ops;
2068 	ndev->mtu = ETH_DATA_LEN;
2069 	ndev->max_mtu = ndev->mtu;
2070 	ndev->min_mtu = ndev->mtu;
2071 	ndev->needed_headroom = MANA_HEADROOM;
2072 	SET_NETDEV_DEV(ndev, gc->dev);
2073 
2074 	netif_carrier_off(ndev);
2075 
2076 	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2077 
2078 	err = mana_init_port(ndev);
2079 	if (err)
2080 		goto free_net;
2081 
2082 	netdev_lockdep_set_classes(ndev);
2083 
2084 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2085 	ndev->hw_features |= NETIF_F_RXCSUM;
2086 	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2087 	ndev->hw_features |= NETIF_F_RXHASH;
2088 	ndev->features = ndev->hw_features;
2089 	ndev->vlan_features = 0;
2090 
2091 	err = register_netdev(ndev);
2092 	if (err) {
2093 		netdev_err(ndev, "Unable to register netdev.\n");
2094 		goto reset_apc;
2095 	}
2096 
2097 	return 0;
2098 
2099 reset_apc:
2100 	kfree(apc->rxqs);
2101 	apc->rxqs = NULL;
2102 free_net:
2103 	*ndev_storage = NULL;
2104 	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2105 	free_netdev(ndev);
2106 	return err;
2107 }
2108 
2109 int mana_probe(struct gdma_dev *gd, bool resuming)
2110 {
2111 	struct gdma_context *gc = gd->gdma_context;
2112 	struct mana_context *ac = gd->driver_data;
2113 	struct device *dev = gc->dev;
2114 	u16 num_ports = 0;
2115 	int err;
2116 	int i;
2117 
2118 	dev_info(dev,
2119 		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
2120 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2121 
2122 	err = mana_gd_register_device(gd);
2123 	if (err)
2124 		return err;
2125 
2126 	if (!resuming) {
2127 		ac = kzalloc(sizeof(*ac), GFP_KERNEL);
2128 		if (!ac)
2129 			return -ENOMEM;
2130 
2131 		ac->gdma_dev = gd;
2132 		gd->driver_data = ac;
2133 	}
2134 
2135 	err = mana_create_eq(ac);
2136 	if (err)
2137 		goto out;
2138 
2139 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2140 				    MANA_MICRO_VERSION, &num_ports);
2141 	if (err)
2142 		goto out;
2143 
2144 	if (!resuming) {
2145 		ac->num_ports = num_ports;
2146 	} else {
2147 		if (ac->num_ports != num_ports) {
2148 			dev_err(dev, "The number of vPorts changed: %d->%d\n",
2149 				ac->num_ports, num_ports);
2150 			err = -EPROTO;
2151 			goto out;
2152 		}
2153 	}
2154 
2155 	if (ac->num_ports == 0)
2156 		dev_err(dev, "Failed to detect any vPort\n");
2157 
2158 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2159 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2160 
2161 	if (!resuming) {
2162 		for (i = 0; i < ac->num_ports; i++) {
2163 			err = mana_probe_port(ac, i, &ac->ports[i]);
2164 			if (err)
2165 				break;
2166 		}
2167 	} else {
2168 		for (i = 0; i < ac->num_ports; i++) {
2169 			rtnl_lock();
2170 			err = mana_attach(ac->ports[i]);
2171 			rtnl_unlock();
2172 			if (err)
2173 				break;
2174 		}
2175 	}
2176 out:
2177 	if (err)
2178 		mana_remove(gd, false);
2179 
2180 	return err;
2181 }
2182 
2183 void mana_remove(struct gdma_dev *gd, bool suspending)
2184 {
2185 	struct gdma_context *gc = gd->gdma_context;
2186 	struct mana_context *ac = gd->driver_data;
2187 	struct device *dev = gc->dev;
2188 	struct net_device *ndev;
2189 	int err;
2190 	int i;
2191 
2192 	for (i = 0; i < ac->num_ports; i++) {
2193 		ndev = ac->ports[i];
2194 		if (!ndev) {
2195 			if (i == 0)
2196 				dev_err(dev, "No net device to remove\n");
2197 			goto out;
2198 		}
2199 
2200 		/* All cleanup actions should stay after rtnl_lock(), otherwise
2201 		 * other functions may access partially cleaned up data.
2202 		 */
2203 		rtnl_lock();
2204 
2205 		err = mana_detach(ndev, false);
2206 		if (err)
2207 			netdev_err(ndev, "Failed to detach vPort %d: %d\n",
2208 				   i, err);
2209 
2210 		if (suspending) {
2211 			/* No need to unregister the ndev. */
2212 			rtnl_unlock();
2213 			continue;
2214 		}
2215 
2216 		unregister_netdevice(ndev);
2217 
2218 		rtnl_unlock();
2219 
2220 		free_netdev(ndev);
2221 	}
2222 
2223 	mana_destroy_eq(ac);
2224 
2225 out:
2226 	mana_gd_deregister_device(gd);
2227 
2228 	if (suspending)
2229 		return;
2230 
2231 	gd->driver_data = NULL;
2232 	gd->gdma_context = NULL;
2233 	kfree(ac);
2234 }
2235