1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * aQuantia Corporation Network Driver
4  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5  */
6 
7 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
8 
9 #include "aq_ring.h"
10 #include "aq_nic.h"
11 #include "aq_hw.h"
12 #include "aq_hw_utils.h"
13 
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 
17 static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
18 {
19 	unsigned int len = PAGE_SIZE << rxpage->order;
20 
21 	dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
22 
23 	/* Drop the ref for being in the ring. */
24 	__free_pages(rxpage->page, rxpage->order);
25 	rxpage->page = NULL;
26 }
27 
28 static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
29 			 struct device *dev)
30 {
31 	struct page *page;
32 	dma_addr_t daddr;
33 	int ret = -ENOMEM;
34 
35 	page = dev_alloc_pages(order);
36 	if (unlikely(!page))
37 		goto err_exit;
38 
39 	daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
40 			     DMA_FROM_DEVICE);
41 
42 	if (unlikely(dma_mapping_error(dev, daddr)))
43 		goto free_page;
44 
45 	rxpage->page = page;
46 	rxpage->daddr = daddr;
47 	rxpage->order = order;
48 	rxpage->pg_off = 0;
49 
50 	return 0;
51 
52 free_page:
53 	__free_pages(page, order);
54 
55 err_exit:
56 	return ret;
57 }
58 
59 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
60 			  int order)
61 {
62 	int ret;
63 
64 	if (rxbuf->rxdata.page) {
65 		/* One means ring is the only user and can reuse */
66 		if (page_ref_count(rxbuf->rxdata.page) > 1) {
67 			/* Try reuse buffer */
68 			rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
69 			if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
70 				(PAGE_SIZE << order)) {
71 				self->stats.rx.pg_flips++;
72 			} else {
73 				/* Buffer exhausted. We have other users and
74 				 * should release this page and realloc
75 				 */
76 				aq_free_rxpage(&rxbuf->rxdata,
77 					       aq_nic_get_dev(self->aq_nic));
78 				self->stats.rx.pg_losts++;
79 			}
80 		} else {
81 			rxbuf->rxdata.pg_off = 0;
82 			self->stats.rx.pg_reuses++;
83 		}
84 	}
85 
86 	if (!rxbuf->rxdata.page) {
87 		ret = aq_get_rxpage(&rxbuf->rxdata, order,
88 				    aq_nic_get_dev(self->aq_nic));
89 		return ret;
90 	}
91 
92 	return 0;
93 }
94 
95 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
96 				       struct aq_nic_s *aq_nic)
97 {
98 	int err = 0;
99 
100 	self->buff_ring =
101 		kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
102 
103 	if (!self->buff_ring) {
104 		err = -ENOMEM;
105 		goto err_exit;
106 	}
107 	self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
108 					   self->size * self->dx_size,
109 					   &self->dx_ring_pa, GFP_KERNEL);
110 	if (!self->dx_ring) {
111 		err = -ENOMEM;
112 		goto err_exit;
113 	}
114 
115 err_exit:
116 	if (err < 0) {
117 		aq_ring_free(self);
118 		self = NULL;
119 	}
120 	return self;
121 }
122 
123 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
124 				   struct aq_nic_s *aq_nic,
125 				   unsigned int idx,
126 				   struct aq_nic_cfg_s *aq_nic_cfg)
127 {
128 	int err = 0;
129 
130 	self->aq_nic = aq_nic;
131 	self->idx = idx;
132 	self->size = aq_nic_cfg->txds;
133 	self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
134 
135 	self = aq_ring_alloc(self, aq_nic);
136 	if (!self) {
137 		err = -ENOMEM;
138 		goto err_exit;
139 	}
140 
141 err_exit:
142 	if (err < 0) {
143 		aq_ring_free(self);
144 		self = NULL;
145 	}
146 	return self;
147 }
148 
149 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
150 				   struct aq_nic_s *aq_nic,
151 				   unsigned int idx,
152 				   struct aq_nic_cfg_s *aq_nic_cfg)
153 {
154 	int err = 0;
155 
156 	self->aq_nic = aq_nic;
157 	self->idx = idx;
158 	self->size = aq_nic_cfg->rxds;
159 	self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
160 	self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
161 			       (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
162 
163 	if (aq_nic_cfg->rxpageorder > self->page_order)
164 		self->page_order = aq_nic_cfg->rxpageorder;
165 
166 	self = aq_ring_alloc(self, aq_nic);
167 	if (!self) {
168 		err = -ENOMEM;
169 		goto err_exit;
170 	}
171 
172 err_exit:
173 	if (err < 0) {
174 		aq_ring_free(self);
175 		self = NULL;
176 	}
177 	return self;
178 }
179 
180 int aq_ring_init(struct aq_ring_s *self)
181 {
182 	self->hw_head = 0;
183 	self->sw_head = 0;
184 	self->sw_tail = 0;
185 	return 0;
186 }
187 
188 static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
189 				       unsigned int t)
190 {
191 	return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
192 }
193 
194 void aq_ring_update_queue_state(struct aq_ring_s *ring)
195 {
196 	if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
197 		aq_ring_queue_stop(ring);
198 	else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
199 		aq_ring_queue_wake(ring);
200 }
201 
202 void aq_ring_queue_wake(struct aq_ring_s *ring)
203 {
204 	struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
205 
206 	if (__netif_subqueue_stopped(ndev, ring->idx)) {
207 		netif_wake_subqueue(ndev, ring->idx);
208 		ring->stats.tx.queue_restarts++;
209 	}
210 }
211 
212 void aq_ring_queue_stop(struct aq_ring_s *ring)
213 {
214 	struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
215 
216 	if (!__netif_subqueue_stopped(ndev, ring->idx))
217 		netif_stop_subqueue(ndev, ring->idx);
218 }
219 
220 bool aq_ring_tx_clean(struct aq_ring_s *self)
221 {
222 	struct device *dev = aq_nic_get_dev(self->aq_nic);
223 	unsigned int budget;
224 
225 	for (budget = AQ_CFG_TX_CLEAN_BUDGET;
226 	     budget && self->sw_head != self->hw_head; budget--) {
227 		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
228 
229 		if (likely(buff->is_mapped)) {
230 			if (unlikely(buff->is_sop)) {
231 				if (!buff->is_eop &&
232 				    buff->eop_index != 0xffffU &&
233 				    (!aq_ring_dx_in_range(self->sw_head,
234 						buff->eop_index,
235 						self->hw_head)))
236 					break;
237 
238 				dma_unmap_single(dev, buff->pa, buff->len,
239 						 DMA_TO_DEVICE);
240 			} else {
241 				dma_unmap_page(dev, buff->pa, buff->len,
242 					       DMA_TO_DEVICE);
243 			}
244 		}
245 
246 		if (unlikely(buff->is_eop))
247 			dev_kfree_skb_any(buff->skb);
248 
249 		buff->pa = 0U;
250 		buff->eop_index = 0xffffU;
251 		self->sw_head = aq_ring_next_dx(self, self->sw_head);
252 	}
253 
254 	return !!budget;
255 }
256 
257 static void aq_rx_checksum(struct aq_ring_s *self,
258 			   struct aq_ring_buff_s *buff,
259 			   struct sk_buff *skb)
260 {
261 	if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
262 		return;
263 
264 	if (unlikely(buff->is_cso_err)) {
265 		++self->stats.rx.errors;
266 		skb->ip_summed = CHECKSUM_NONE;
267 		return;
268 	}
269 	if (buff->is_ip_cso) {
270 		__skb_incr_checksum_unnecessary(skb);
271 	} else {
272 		skb->ip_summed = CHECKSUM_NONE;
273 	}
274 
275 	if (buff->is_udp_cso || buff->is_tcp_cso)
276 		__skb_incr_checksum_unnecessary(skb);
277 }
278 
279 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
280 int aq_ring_rx_clean(struct aq_ring_s *self,
281 		     struct napi_struct *napi,
282 		     int *work_done,
283 		     int budget)
284 {
285 	struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
286 	bool is_rsc_completed = true;
287 	int err = 0;
288 
289 	for (; (self->sw_head != self->hw_head) && budget;
290 		self->sw_head = aq_ring_next_dx(self, self->sw_head),
291 		--budget, ++(*work_done)) {
292 		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
293 		struct aq_ring_buff_s *buff_ = NULL;
294 		struct sk_buff *skb = NULL;
295 		unsigned int next_ = 0U;
296 		unsigned int i = 0U;
297 		u16 hdr_len;
298 
299 		if (buff->is_cleaned)
300 			continue;
301 
302 		if (!buff->is_eop) {
303 			buff_ = buff;
304 			do {
305 				next_ = buff_->next,
306 				buff_ = &self->buff_ring[next_];
307 				is_rsc_completed =
308 					aq_ring_dx_in_range(self->sw_head,
309 							    next_,
310 							    self->hw_head);
311 
312 				if (unlikely(!is_rsc_completed))
313 					break;
314 
315 				buff->is_error |= buff_->is_error;
316 				buff->is_cso_err |= buff_->is_cso_err;
317 
318 			} while (!buff_->is_eop);
319 
320 			if (!is_rsc_completed) {
321 				err = 0;
322 				goto err_exit;
323 			}
324 			if (buff->is_error || buff->is_cso_err) {
325 				buff_ = buff;
326 				do {
327 					next_ = buff_->next,
328 					buff_ = &self->buff_ring[next_];
329 
330 					buff_->is_cleaned = true;
331 				} while (!buff_->is_eop);
332 
333 				++self->stats.rx.errors;
334 				continue;
335 			}
336 		}
337 
338 		if (buff->is_error) {
339 			++self->stats.rx.errors;
340 			continue;
341 		}
342 
343 		dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
344 					      buff->rxdata.daddr,
345 					      buff->rxdata.pg_off,
346 					      buff->len, DMA_FROM_DEVICE);
347 
348 		/* for single fragment packets use build_skb() */
349 		if (buff->is_eop &&
350 		    buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
351 			skb = build_skb(aq_buf_vaddr(&buff->rxdata),
352 					AQ_CFG_RX_FRAME_MAX);
353 			if (unlikely(!skb)) {
354 				err = -ENOMEM;
355 				goto err_exit;
356 			}
357 			skb_put(skb, buff->len);
358 			page_ref_inc(buff->rxdata.page);
359 		} else {
360 			skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
361 			if (unlikely(!skb)) {
362 				err = -ENOMEM;
363 				goto err_exit;
364 			}
365 
366 			hdr_len = buff->len;
367 			if (hdr_len > AQ_CFG_RX_HDR_SIZE)
368 				hdr_len = eth_get_headlen(skb->dev,
369 							  aq_buf_vaddr(&buff->rxdata),
370 							  AQ_CFG_RX_HDR_SIZE);
371 
372 			memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
373 			       ALIGN(hdr_len, sizeof(long)));
374 
375 			if (buff->len - hdr_len > 0) {
376 				skb_add_rx_frag(skb, 0, buff->rxdata.page,
377 						buff->rxdata.pg_off + hdr_len,
378 						buff->len - hdr_len,
379 						AQ_CFG_RX_FRAME_MAX);
380 				page_ref_inc(buff->rxdata.page);
381 			}
382 
383 			if (!buff->is_eop) {
384 				buff_ = buff;
385 				i = 1U;
386 				do {
387 					next_ = buff_->next,
388 					buff_ = &self->buff_ring[next_];
389 
390 					dma_sync_single_range_for_cpu(
391 							aq_nic_get_dev(self->aq_nic),
392 							buff_->rxdata.daddr,
393 							buff_->rxdata.pg_off,
394 							buff_->len,
395 							DMA_FROM_DEVICE);
396 					skb_add_rx_frag(skb, i++,
397 							buff_->rxdata.page,
398 							buff_->rxdata.pg_off,
399 							buff_->len,
400 							AQ_CFG_RX_FRAME_MAX);
401 					page_ref_inc(buff_->rxdata.page);
402 					buff_->is_cleaned = 1;
403 
404 					buff->is_ip_cso &= buff_->is_ip_cso;
405 					buff->is_udp_cso &= buff_->is_udp_cso;
406 					buff->is_tcp_cso &= buff_->is_tcp_cso;
407 					buff->is_cso_err |= buff_->is_cso_err;
408 
409 				} while (!buff_->is_eop);
410 			}
411 		}
412 
413 		if (buff->is_vlan)
414 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
415 					       buff->vlan_rx_tag);
416 
417 		skb->protocol = eth_type_trans(skb, ndev);
418 
419 		aq_rx_checksum(self, buff, skb);
420 
421 		skb_set_hash(skb, buff->rss_hash,
422 			     buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
423 			     PKT_HASH_TYPE_NONE);
424 
425 		skb_record_rx_queue(skb, self->idx);
426 
427 		++self->stats.rx.packets;
428 		self->stats.rx.bytes += skb->len;
429 
430 		napi_gro_receive(napi, skb);
431 	}
432 
433 err_exit:
434 	return err;
435 }
436 
437 int aq_ring_rx_fill(struct aq_ring_s *self)
438 {
439 	unsigned int page_order = self->page_order;
440 	struct aq_ring_buff_s *buff = NULL;
441 	int err = 0;
442 	int i = 0;
443 
444 	if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
445 					   self->size / 2))
446 		return err;
447 
448 	for (i = aq_ring_avail_dx(self); i--;
449 		self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
450 		buff = &self->buff_ring[self->sw_tail];
451 
452 		buff->flags = 0U;
453 		buff->len = AQ_CFG_RX_FRAME_MAX;
454 
455 		err = aq_get_rxpages(self, buff, page_order);
456 		if (err)
457 			goto err_exit;
458 
459 		buff->pa = aq_buf_daddr(&buff->rxdata);
460 		buff = NULL;
461 	}
462 
463 err_exit:
464 	return err;
465 }
466 
467 void aq_ring_rx_deinit(struct aq_ring_s *self)
468 {
469 	if (!self)
470 		goto err_exit;
471 
472 	for (; self->sw_head != self->sw_tail;
473 		self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
474 		struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
475 
476 		aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
477 	}
478 
479 err_exit:;
480 }
481 
482 void aq_ring_free(struct aq_ring_s *self)
483 {
484 	if (!self)
485 		goto err_exit;
486 
487 	kfree(self->buff_ring);
488 
489 	if (self->dx_ring)
490 		dma_free_coherent(aq_nic_get_dev(self->aq_nic),
491 				  self->size * self->dx_size, self->dx_ring,
492 				  self->dx_ring_pa);
493 
494 err_exit:;
495 }
496