1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 /* The driver transmit and receive code */
5
6 #include <linux/prefetch.h>
7 #include <linux/mm.h>
8 #include <linux/bpf_trace.h>
9 #include <net/xdp.h>
10 #include "ice_txrx_lib.h"
11 #include "ice_lib.h"
12 #include "ice.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_xsk.h"
15
16 #define ICE_RX_HDR_SIZE 256
17
18 #define FDIR_DESC_RXDID 0x40
19 #define ICE_FDIR_CLEAN_DELAY 10
20
21 /**
22 * ice_prgm_fdir_fltr - Program a Flow Director filter
23 * @vsi: VSI to send dummy packet
24 * @fdir_desc: flow director descriptor
25 * @raw_packet: allocated buffer for flow director
26 */
27 int
ice_prgm_fdir_fltr(struct ice_vsi * vsi,struct ice_fltr_desc * fdir_desc,u8 * raw_packet)28 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
29 u8 *raw_packet)
30 {
31 struct ice_tx_buf *tx_buf, *first;
32 struct ice_fltr_desc *f_desc;
33 struct ice_tx_desc *tx_desc;
34 struct ice_ring *tx_ring;
35 struct device *dev;
36 dma_addr_t dma;
37 u32 td_cmd;
38 u16 i;
39
40 /* VSI and Tx ring */
41 if (!vsi)
42 return -ENOENT;
43 tx_ring = vsi->tx_rings[0];
44 if (!tx_ring || !tx_ring->desc)
45 return -ENOENT;
46 dev = tx_ring->dev;
47
48 /* we are using two descriptors to add/del a filter and we can wait */
49 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
50 if (!i)
51 return -EAGAIN;
52 msleep_interruptible(1);
53 }
54
55 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
56 DMA_TO_DEVICE);
57
58 if (dma_mapping_error(dev, dma))
59 return -EINVAL;
60
61 /* grab the next descriptor */
62 i = tx_ring->next_to_use;
63 first = &tx_ring->tx_buf[i];
64 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
65 memcpy(f_desc, fdir_desc, sizeof(*f_desc));
66
67 i++;
68 i = (i < tx_ring->count) ? i : 0;
69 tx_desc = ICE_TX_DESC(tx_ring, i);
70 tx_buf = &tx_ring->tx_buf[i];
71
72 i++;
73 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
74
75 memset(tx_buf, 0, sizeof(*tx_buf));
76 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
77 dma_unmap_addr_set(tx_buf, dma, dma);
78
79 tx_desc->buf_addr = cpu_to_le64(dma);
80 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
81 ICE_TX_DESC_CMD_RE;
82
83 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
84 tx_buf->raw_buf = raw_packet;
85
86 tx_desc->cmd_type_offset_bsz =
87 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
88
89 /* Force memory write to complete before letting h/w know
90 * there are new descriptors to fetch.
91 */
92 wmb();
93
94 /* mark the data descriptor to be watched */
95 first->next_to_watch = tx_desc;
96
97 writel(tx_ring->next_to_use, tx_ring->tail);
98
99 return 0;
100 }
101
102 /**
103 * ice_unmap_and_free_tx_buf - Release a Tx buffer
104 * @ring: the ring that owns the buffer
105 * @tx_buf: the buffer to free
106 */
107 static void
ice_unmap_and_free_tx_buf(struct ice_ring * ring,struct ice_tx_buf * tx_buf)108 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
109 {
110 if (tx_buf->skb) {
111 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
112 devm_kfree(ring->dev, tx_buf->raw_buf);
113 else if (ice_ring_is_xdp(ring))
114 page_frag_free(tx_buf->raw_buf);
115 else
116 dev_kfree_skb_any(tx_buf->skb);
117 if (dma_unmap_len(tx_buf, len))
118 dma_unmap_single(ring->dev,
119 dma_unmap_addr(tx_buf, dma),
120 dma_unmap_len(tx_buf, len),
121 DMA_TO_DEVICE);
122 } else if (dma_unmap_len(tx_buf, len)) {
123 dma_unmap_page(ring->dev,
124 dma_unmap_addr(tx_buf, dma),
125 dma_unmap_len(tx_buf, len),
126 DMA_TO_DEVICE);
127 }
128
129 tx_buf->next_to_watch = NULL;
130 tx_buf->skb = NULL;
131 dma_unmap_len_set(tx_buf, len, 0);
132 /* tx_buf must be completely set up in the transmit path */
133 }
134
txring_txq(const struct ice_ring * ring)135 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
136 {
137 return netdev_get_tx_queue(ring->netdev, ring->q_index);
138 }
139
140 /**
141 * ice_clean_tx_ring - Free any empty Tx buffers
142 * @tx_ring: ring to be cleaned
143 */
ice_clean_tx_ring(struct ice_ring * tx_ring)144 void ice_clean_tx_ring(struct ice_ring *tx_ring)
145 {
146 u16 i;
147
148 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
149 ice_xsk_clean_xdp_ring(tx_ring);
150 goto tx_skip_free;
151 }
152
153 /* ring already cleared, nothing to do */
154 if (!tx_ring->tx_buf)
155 return;
156
157 /* Free all the Tx ring sk_buffs */
158 for (i = 0; i < tx_ring->count; i++)
159 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
160
161 tx_skip_free:
162 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
163
164 /* Zero out the descriptor ring */
165 memset(tx_ring->desc, 0, tx_ring->size);
166
167 tx_ring->next_to_use = 0;
168 tx_ring->next_to_clean = 0;
169
170 if (!tx_ring->netdev)
171 return;
172
173 /* cleanup Tx queue statistics */
174 netdev_tx_reset_queue(txring_txq(tx_ring));
175 }
176
177 /**
178 * ice_free_tx_ring - Free Tx resources per queue
179 * @tx_ring: Tx descriptor ring for a specific queue
180 *
181 * Free all transmit software resources
182 */
ice_free_tx_ring(struct ice_ring * tx_ring)183 void ice_free_tx_ring(struct ice_ring *tx_ring)
184 {
185 ice_clean_tx_ring(tx_ring);
186 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
187 tx_ring->tx_buf = NULL;
188
189 if (tx_ring->desc) {
190 dmam_free_coherent(tx_ring->dev, tx_ring->size,
191 tx_ring->desc, tx_ring->dma);
192 tx_ring->desc = NULL;
193 }
194 }
195
196 /**
197 * ice_clean_tx_irq - Reclaim resources after transmit completes
198 * @tx_ring: Tx ring to clean
199 * @napi_budget: Used to determine if we are in netpoll
200 *
201 * Returns true if there's any budget left (e.g. the clean is finished)
202 */
ice_clean_tx_irq(struct ice_ring * tx_ring,int napi_budget)203 static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
204 {
205 unsigned int total_bytes = 0, total_pkts = 0;
206 unsigned int budget = ICE_DFLT_IRQ_WORK;
207 struct ice_vsi *vsi = tx_ring->vsi;
208 s16 i = tx_ring->next_to_clean;
209 struct ice_tx_desc *tx_desc;
210 struct ice_tx_buf *tx_buf;
211
212 tx_buf = &tx_ring->tx_buf[i];
213 tx_desc = ICE_TX_DESC(tx_ring, i);
214 i -= tx_ring->count;
215
216 prefetch(&vsi->state);
217
218 do {
219 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
220
221 /* if next_to_watch is not set then there is no work pending */
222 if (!eop_desc)
223 break;
224
225 smp_rmb(); /* prevent any other reads prior to eop_desc */
226
227 /* if the descriptor isn't done, no work yet to do */
228 if (!(eop_desc->cmd_type_offset_bsz &
229 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
230 break;
231
232 /* clear next_to_watch to prevent false hangs */
233 tx_buf->next_to_watch = NULL;
234
235 /* update the statistics for this packet */
236 total_bytes += tx_buf->bytecount;
237 total_pkts += tx_buf->gso_segs;
238
239 if (ice_ring_is_xdp(tx_ring))
240 page_frag_free(tx_buf->raw_buf);
241 else
242 /* free the skb */
243 napi_consume_skb(tx_buf->skb, napi_budget);
244
245 /* unmap skb header data */
246 dma_unmap_single(tx_ring->dev,
247 dma_unmap_addr(tx_buf, dma),
248 dma_unmap_len(tx_buf, len),
249 DMA_TO_DEVICE);
250
251 /* clear tx_buf data */
252 tx_buf->skb = NULL;
253 dma_unmap_len_set(tx_buf, len, 0);
254
255 /* unmap remaining buffers */
256 while (tx_desc != eop_desc) {
257 tx_buf++;
258 tx_desc++;
259 i++;
260 if (unlikely(!i)) {
261 i -= tx_ring->count;
262 tx_buf = tx_ring->tx_buf;
263 tx_desc = ICE_TX_DESC(tx_ring, 0);
264 }
265
266 /* unmap any remaining paged data */
267 if (dma_unmap_len(tx_buf, len)) {
268 dma_unmap_page(tx_ring->dev,
269 dma_unmap_addr(tx_buf, dma),
270 dma_unmap_len(tx_buf, len),
271 DMA_TO_DEVICE);
272 dma_unmap_len_set(tx_buf, len, 0);
273 }
274 }
275
276 /* move us one more past the eop_desc for start of next pkt */
277 tx_buf++;
278 tx_desc++;
279 i++;
280 if (unlikely(!i)) {
281 i -= tx_ring->count;
282 tx_buf = tx_ring->tx_buf;
283 tx_desc = ICE_TX_DESC(tx_ring, 0);
284 }
285
286 prefetch(tx_desc);
287
288 /* update budget accounting */
289 budget--;
290 } while (likely(budget));
291
292 i += tx_ring->count;
293 tx_ring->next_to_clean = i;
294
295 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
296
297 if (ice_ring_is_xdp(tx_ring))
298 return !!budget;
299
300 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
301 total_bytes);
302
303 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
304 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
305 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
306 /* Make sure that anybody stopping the queue after this
307 * sees the new next_to_clean.
308 */
309 smp_mb();
310 if (__netif_subqueue_stopped(tx_ring->netdev,
311 tx_ring->q_index) &&
312 !test_bit(ICE_VSI_DOWN, vsi->state)) {
313 netif_wake_subqueue(tx_ring->netdev,
314 tx_ring->q_index);
315 ++tx_ring->tx_stats.restart_q;
316 }
317 }
318
319 return !!budget;
320 }
321
322 /**
323 * ice_setup_tx_ring - Allocate the Tx descriptors
324 * @tx_ring: the Tx ring to set up
325 *
326 * Return 0 on success, negative on error
327 */
ice_setup_tx_ring(struct ice_ring * tx_ring)328 int ice_setup_tx_ring(struct ice_ring *tx_ring)
329 {
330 struct device *dev = tx_ring->dev;
331
332 if (!dev)
333 return -ENOMEM;
334
335 /* warn if we are about to overwrite the pointer */
336 WARN_ON(tx_ring->tx_buf);
337 tx_ring->tx_buf =
338 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
339 GFP_KERNEL);
340 if (!tx_ring->tx_buf)
341 return -ENOMEM;
342
343 /* round up to nearest page */
344 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
345 PAGE_SIZE);
346 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
347 GFP_KERNEL);
348 if (!tx_ring->desc) {
349 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
350 tx_ring->size);
351 goto err;
352 }
353
354 tx_ring->next_to_use = 0;
355 tx_ring->next_to_clean = 0;
356 tx_ring->tx_stats.prev_pkt = -1;
357 return 0;
358
359 err:
360 devm_kfree(dev, tx_ring->tx_buf);
361 tx_ring->tx_buf = NULL;
362 return -ENOMEM;
363 }
364
365 /**
366 * ice_clean_rx_ring - Free Rx buffers
367 * @rx_ring: ring to be cleaned
368 */
ice_clean_rx_ring(struct ice_ring * rx_ring)369 void ice_clean_rx_ring(struct ice_ring *rx_ring)
370 {
371 struct device *dev = rx_ring->dev;
372 u16 i;
373
374 /* ring already cleared, nothing to do */
375 if (!rx_ring->rx_buf)
376 return;
377
378 if (rx_ring->skb) {
379 dev_kfree_skb(rx_ring->skb);
380 rx_ring->skb = NULL;
381 }
382
383 if (rx_ring->xsk_pool) {
384 ice_xsk_clean_rx_ring(rx_ring);
385 goto rx_skip_free;
386 }
387
388 /* Free all the Rx ring sk_buffs */
389 for (i = 0; i < rx_ring->count; i++) {
390 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
391
392 if (!rx_buf->page)
393 continue;
394
395 /* Invalidate cache lines that may have been written to by
396 * device so that we avoid corrupting memory.
397 */
398 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
399 rx_buf->page_offset,
400 rx_ring->rx_buf_len,
401 DMA_FROM_DEVICE);
402
403 /* free resources associated with mapping */
404 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
405 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
406 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
407
408 rx_buf->page = NULL;
409 rx_buf->page_offset = 0;
410 }
411
412 rx_skip_free:
413 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
414
415 /* Zero out the descriptor ring */
416 memset(rx_ring->desc, 0, rx_ring->size);
417
418 rx_ring->next_to_alloc = 0;
419 rx_ring->next_to_clean = 0;
420 rx_ring->next_to_use = 0;
421 }
422
423 /**
424 * ice_free_rx_ring - Free Rx resources
425 * @rx_ring: ring to clean the resources from
426 *
427 * Free all receive software resources
428 */
ice_free_rx_ring(struct ice_ring * rx_ring)429 void ice_free_rx_ring(struct ice_ring *rx_ring)
430 {
431 ice_clean_rx_ring(rx_ring);
432 if (rx_ring->vsi->type == ICE_VSI_PF)
433 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
434 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
435 rx_ring->xdp_prog = NULL;
436 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
437 rx_ring->rx_buf = NULL;
438
439 if (rx_ring->desc) {
440 dmam_free_coherent(rx_ring->dev, rx_ring->size,
441 rx_ring->desc, rx_ring->dma);
442 rx_ring->desc = NULL;
443 }
444 }
445
446 /**
447 * ice_setup_rx_ring - Allocate the Rx descriptors
448 * @rx_ring: the Rx ring to set up
449 *
450 * Return 0 on success, negative on error
451 */
ice_setup_rx_ring(struct ice_ring * rx_ring)452 int ice_setup_rx_ring(struct ice_ring *rx_ring)
453 {
454 struct device *dev = rx_ring->dev;
455
456 if (!dev)
457 return -ENOMEM;
458
459 /* warn if we are about to overwrite the pointer */
460 WARN_ON(rx_ring->rx_buf);
461 rx_ring->rx_buf =
462 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
463 GFP_KERNEL);
464 if (!rx_ring->rx_buf)
465 return -ENOMEM;
466
467 /* round up to nearest page */
468 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
469 PAGE_SIZE);
470 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
471 GFP_KERNEL);
472 if (!rx_ring->desc) {
473 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
474 rx_ring->size);
475 goto err;
476 }
477
478 rx_ring->next_to_use = 0;
479 rx_ring->next_to_clean = 0;
480
481 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
482 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
483
484 if (rx_ring->vsi->type == ICE_VSI_PF &&
485 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
486 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
487 rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
488 goto err;
489 return 0;
490
491 err:
492 devm_kfree(dev, rx_ring->rx_buf);
493 rx_ring->rx_buf = NULL;
494 return -ENOMEM;
495 }
496
497 static unsigned int
ice_rx_frame_truesize(struct ice_ring * rx_ring,unsigned int __maybe_unused size)498 ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
499 {
500 unsigned int truesize;
501
502 #if (PAGE_SIZE < 8192)
503 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
504 #else
505 truesize = rx_ring->rx_offset ?
506 SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
507 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
508 SKB_DATA_ALIGN(size);
509 #endif
510 return truesize;
511 }
512
513 /**
514 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
515 * @rx_ring: Rx ring
516 * @xdp: xdp_buff used as input to the XDP program
517 * @xdp_prog: XDP program to run
518 *
519 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
520 */
521 static int
ice_run_xdp(struct ice_ring * rx_ring,struct xdp_buff * xdp,struct bpf_prog * xdp_prog)522 ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
523 struct bpf_prog *xdp_prog)
524 {
525 struct ice_ring *xdp_ring;
526 int err;
527 u32 act;
528
529 act = bpf_prog_run_xdp(xdp_prog, xdp);
530 switch (act) {
531 case XDP_PASS:
532 return ICE_XDP_PASS;
533 case XDP_TX:
534 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
535 return ice_xmit_xdp_buff(xdp, xdp_ring);
536 case XDP_REDIRECT:
537 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
538 return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
539 default:
540 bpf_warn_invalid_xdp_action(act);
541 fallthrough;
542 case XDP_ABORTED:
543 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
544 fallthrough;
545 case XDP_DROP:
546 return ICE_XDP_CONSUMED;
547 }
548 }
549
550 /**
551 * ice_xdp_xmit - submit packets to XDP ring for transmission
552 * @dev: netdev
553 * @n: number of XDP frames to be transmitted
554 * @frames: XDP frames to be transmitted
555 * @flags: transmit flags
556 *
557 * Returns number of frames successfully sent. Failed frames
558 * will be free'ed by XDP core.
559 * For error cases, a negative errno code is returned and no-frames
560 * are transmitted (caller must handle freeing frames).
561 */
562 int
ice_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)563 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
564 u32 flags)
565 {
566 struct ice_netdev_priv *np = netdev_priv(dev);
567 unsigned int queue_index = smp_processor_id();
568 struct ice_vsi *vsi = np->vsi;
569 struct ice_ring *xdp_ring;
570 int nxmit = 0, i;
571
572 if (test_bit(ICE_VSI_DOWN, vsi->state))
573 return -ENETDOWN;
574
575 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
576 return -ENXIO;
577
578 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
579 return -EINVAL;
580
581 xdp_ring = vsi->xdp_rings[queue_index];
582 for (i = 0; i < n; i++) {
583 struct xdp_frame *xdpf = frames[i];
584 int err;
585
586 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
587 if (err != ICE_XDP_TX)
588 break;
589 nxmit++;
590 }
591
592 if (unlikely(flags & XDP_XMIT_FLUSH))
593 ice_xdp_ring_update_tail(xdp_ring);
594
595 return nxmit;
596 }
597
598 /**
599 * ice_alloc_mapped_page - recycle or make a new page
600 * @rx_ring: ring to use
601 * @bi: rx_buf struct to modify
602 *
603 * Returns true if the page was successfully allocated or
604 * reused.
605 */
606 static bool
ice_alloc_mapped_page(struct ice_ring * rx_ring,struct ice_rx_buf * bi)607 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
608 {
609 struct page *page = bi->page;
610 dma_addr_t dma;
611
612 /* since we are recycling buffers we should seldom need to alloc */
613 if (likely(page))
614 return true;
615
616 /* alloc new page for storage */
617 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
618 if (unlikely(!page)) {
619 rx_ring->rx_stats.alloc_page_failed++;
620 return false;
621 }
622
623 /* map page for use */
624 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
625 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
626
627 /* if mapping failed free memory back to system since
628 * there isn't much point in holding memory we can't use
629 */
630 if (dma_mapping_error(rx_ring->dev, dma)) {
631 __free_pages(page, ice_rx_pg_order(rx_ring));
632 rx_ring->rx_stats.alloc_page_failed++;
633 return false;
634 }
635
636 bi->dma = dma;
637 bi->page = page;
638 bi->page_offset = rx_ring->rx_offset;
639 page_ref_add(page, USHRT_MAX - 1);
640 bi->pagecnt_bias = USHRT_MAX;
641
642 return true;
643 }
644
645 /**
646 * ice_alloc_rx_bufs - Replace used receive buffers
647 * @rx_ring: ring to place buffers on
648 * @cleaned_count: number of buffers to replace
649 *
650 * Returns false if all allocations were successful, true if any fail. Returning
651 * true signals to the caller that we didn't replace cleaned_count buffers and
652 * there is more work to do.
653 *
654 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
655 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
656 * multiple tail writes per call.
657 */
ice_alloc_rx_bufs(struct ice_ring * rx_ring,u16 cleaned_count)658 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
659 {
660 union ice_32b_rx_flex_desc *rx_desc;
661 u16 ntu = rx_ring->next_to_use;
662 struct ice_rx_buf *bi;
663
664 /* do nothing if no valid netdev defined */
665 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
666 !cleaned_count)
667 return false;
668
669 /* get the Rx descriptor and buffer based on next_to_use */
670 rx_desc = ICE_RX_DESC(rx_ring, ntu);
671 bi = &rx_ring->rx_buf[ntu];
672
673 do {
674 /* if we fail here, we have work remaining */
675 if (!ice_alloc_mapped_page(rx_ring, bi))
676 break;
677
678 /* sync the buffer for use by the device */
679 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
680 bi->page_offset,
681 rx_ring->rx_buf_len,
682 DMA_FROM_DEVICE);
683
684 /* Refresh the desc even if buffer_addrs didn't change
685 * because each write-back erases this info.
686 */
687 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
688
689 rx_desc++;
690 bi++;
691 ntu++;
692 if (unlikely(ntu == rx_ring->count)) {
693 rx_desc = ICE_RX_DESC(rx_ring, 0);
694 bi = rx_ring->rx_buf;
695 ntu = 0;
696 }
697
698 /* clear the status bits for the next_to_use descriptor */
699 rx_desc->wb.status_error0 = 0;
700
701 cleaned_count--;
702 } while (cleaned_count);
703
704 if (rx_ring->next_to_use != ntu)
705 ice_release_rx_desc(rx_ring, ntu);
706
707 return !!cleaned_count;
708 }
709
710 /**
711 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
712 * @rx_buf: Rx buffer to adjust
713 * @size: Size of adjustment
714 *
715 * Update the offset within page so that Rx buf will be ready to be reused.
716 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
717 * so the second half of page assigned to Rx buffer will be used, otherwise
718 * the offset is moved by "size" bytes
719 */
720 static void
ice_rx_buf_adjust_pg_offset(struct ice_rx_buf * rx_buf,unsigned int size)721 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
722 {
723 #if (PAGE_SIZE < 8192)
724 /* flip page offset to other buffer */
725 rx_buf->page_offset ^= size;
726 #else
727 /* move offset up to the next cache line */
728 rx_buf->page_offset += size;
729 #endif
730 }
731
732 /**
733 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
734 * @rx_buf: buffer containing the page
735 * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
736 *
737 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
738 * which will assign the current buffer to the buffer that next_to_alloc is
739 * pointing to; otherwise, the DMA mapping needs to be destroyed and
740 * page freed
741 */
742 static bool
ice_can_reuse_rx_page(struct ice_rx_buf * rx_buf,int rx_buf_pgcnt)743 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
744 {
745 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
746 struct page *page = rx_buf->page;
747
748 /* avoid re-using remote and pfmemalloc pages */
749 if (!dev_page_is_reusable(page))
750 return false;
751
752 #if (PAGE_SIZE < 8192)
753 /* if we are only owner of page we can reuse it */
754 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
755 return false;
756 #else
757 #define ICE_LAST_OFFSET \
758 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
759 if (rx_buf->page_offset > ICE_LAST_OFFSET)
760 return false;
761 #endif /* PAGE_SIZE < 8192) */
762
763 /* If we have drained the page fragment pool we need to update
764 * the pagecnt_bias and page count so that we fully restock the
765 * number of references the driver holds.
766 */
767 if (unlikely(pagecnt_bias == 1)) {
768 page_ref_add(page, USHRT_MAX - 1);
769 rx_buf->pagecnt_bias = USHRT_MAX;
770 }
771
772 return true;
773 }
774
775 /**
776 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
777 * @rx_ring: Rx descriptor ring to transact packets on
778 * @rx_buf: buffer containing page to add
779 * @skb: sk_buff to place the data into
780 * @size: packet length from rx_desc
781 *
782 * This function will add the data contained in rx_buf->page to the skb.
783 * It will just attach the page as a frag to the skb.
784 * The function will then update the page offset.
785 */
786 static void
ice_add_rx_frag(struct ice_ring * rx_ring,struct ice_rx_buf * rx_buf,struct sk_buff * skb,unsigned int size)787 ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
788 struct sk_buff *skb, unsigned int size)
789 {
790 #if (PAGE_SIZE >= 8192)
791 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
792 #else
793 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
794 #endif
795
796 if (!size)
797 return;
798 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
799 rx_buf->page_offset, size, truesize);
800
801 /* page is being used so we must update the page offset */
802 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
803 }
804
805 /**
806 * ice_reuse_rx_page - page flip buffer and store it back on the ring
807 * @rx_ring: Rx descriptor ring to store buffers on
808 * @old_buf: donor buffer to have page reused
809 *
810 * Synchronizes page for reuse by the adapter
811 */
812 static void
ice_reuse_rx_page(struct ice_ring * rx_ring,struct ice_rx_buf * old_buf)813 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
814 {
815 u16 nta = rx_ring->next_to_alloc;
816 struct ice_rx_buf *new_buf;
817
818 new_buf = &rx_ring->rx_buf[nta];
819
820 /* update, and store next to alloc */
821 nta++;
822 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
823
824 /* Transfer page from old buffer to new buffer.
825 * Move each member individually to avoid possible store
826 * forwarding stalls and unnecessary copy of skb.
827 */
828 new_buf->dma = old_buf->dma;
829 new_buf->page = old_buf->page;
830 new_buf->page_offset = old_buf->page_offset;
831 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
832 }
833
834 /**
835 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
836 * @rx_ring: Rx descriptor ring to transact packets on
837 * @size: size of buffer to add to skb
838 * @rx_buf_pgcnt: rx_buf page refcount
839 *
840 * This function will pull an Rx buffer from the ring and synchronize it
841 * for use by the CPU.
842 */
843 static struct ice_rx_buf *
ice_get_rx_buf(struct ice_ring * rx_ring,const unsigned int size,int * rx_buf_pgcnt)844 ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
845 int *rx_buf_pgcnt)
846 {
847 struct ice_rx_buf *rx_buf;
848
849 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
850 *rx_buf_pgcnt =
851 #if (PAGE_SIZE < 8192)
852 page_count(rx_buf->page);
853 #else
854 0;
855 #endif
856 prefetchw(rx_buf->page);
857
858 if (!size)
859 return rx_buf;
860 /* we are reusing so sync this buffer for CPU use */
861 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
862 rx_buf->page_offset, size,
863 DMA_FROM_DEVICE);
864
865 /* We have pulled a buffer for use, so decrement pagecnt_bias */
866 rx_buf->pagecnt_bias--;
867
868 return rx_buf;
869 }
870
871 /**
872 * ice_build_skb - Build skb around an existing buffer
873 * @rx_ring: Rx descriptor ring to transact packets on
874 * @rx_buf: Rx buffer to pull data from
875 * @xdp: xdp_buff pointing to the data
876 *
877 * This function builds an skb around an existing Rx buffer, taking care
878 * to set up the skb correctly and avoid any memcpy overhead.
879 */
880 static struct sk_buff *
ice_build_skb(struct ice_ring * rx_ring,struct ice_rx_buf * rx_buf,struct xdp_buff * xdp)881 ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
882 struct xdp_buff *xdp)
883 {
884 u8 metasize = xdp->data - xdp->data_meta;
885 #if (PAGE_SIZE < 8192)
886 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
887 #else
888 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
889 SKB_DATA_ALIGN(xdp->data_end -
890 xdp->data_hard_start);
891 #endif
892 struct sk_buff *skb;
893
894 /* Prefetch first cache line of first page. If xdp->data_meta
895 * is unused, this points exactly as xdp->data, otherwise we
896 * likely have a consumer accessing first few bytes of meta
897 * data, and then actual data.
898 */
899 net_prefetch(xdp->data_meta);
900 /* build an skb around the page buffer */
901 skb = build_skb(xdp->data_hard_start, truesize);
902 if (unlikely(!skb))
903 return NULL;
904
905 /* must to record Rx queue, otherwise OS features such as
906 * symmetric queue won't work
907 */
908 skb_record_rx_queue(skb, rx_ring->q_index);
909
910 /* update pointers within the skb to store the data */
911 skb_reserve(skb, xdp->data - xdp->data_hard_start);
912 __skb_put(skb, xdp->data_end - xdp->data);
913 if (metasize)
914 skb_metadata_set(skb, metasize);
915
916 /* buffer is used by skb, update page_offset */
917 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
918
919 return skb;
920 }
921
922 /**
923 * ice_construct_skb - Allocate skb and populate it
924 * @rx_ring: Rx descriptor ring to transact packets on
925 * @rx_buf: Rx buffer to pull data from
926 * @xdp: xdp_buff pointing to the data
927 *
928 * This function allocates an skb. It then populates it with the page
929 * data from the current receive descriptor, taking care to set up the
930 * skb correctly.
931 */
932 static struct sk_buff *
ice_construct_skb(struct ice_ring * rx_ring,struct ice_rx_buf * rx_buf,struct xdp_buff * xdp)933 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
934 struct xdp_buff *xdp)
935 {
936 unsigned int size = xdp->data_end - xdp->data;
937 unsigned int headlen;
938 struct sk_buff *skb;
939
940 /* prefetch first cache line of first page */
941 net_prefetch(xdp->data);
942
943 /* allocate a skb to store the frags */
944 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
945 GFP_ATOMIC | __GFP_NOWARN);
946 if (unlikely(!skb))
947 return NULL;
948
949 skb_record_rx_queue(skb, rx_ring->q_index);
950 /* Determine available headroom for copy */
951 headlen = size;
952 if (headlen > ICE_RX_HDR_SIZE)
953 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
954
955 /* align pull length to size of long to optimize memcpy performance */
956 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
957 sizeof(long)));
958
959 /* if we exhaust the linear part then add what is left as a frag */
960 size -= headlen;
961 if (size) {
962 #if (PAGE_SIZE >= 8192)
963 unsigned int truesize = SKB_DATA_ALIGN(size);
964 #else
965 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
966 #endif
967 skb_add_rx_frag(skb, 0, rx_buf->page,
968 rx_buf->page_offset + headlen, size, truesize);
969 /* buffer is used by skb, update page_offset */
970 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
971 } else {
972 /* buffer is unused, reset bias back to rx_buf; data was copied
973 * onto skb's linear part so there's no need for adjusting
974 * page offset and we can reuse this buffer as-is
975 */
976 rx_buf->pagecnt_bias++;
977 }
978
979 return skb;
980 }
981
982 /**
983 * ice_put_rx_buf - Clean up used buffer and either recycle or free
984 * @rx_ring: Rx descriptor ring to transact packets on
985 * @rx_buf: Rx buffer to pull data from
986 * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
987 *
988 * This function will update next_to_clean and then clean up the contents
989 * of the rx_buf. It will either recycle the buffer or unmap it and free
990 * the associated resources.
991 */
992 static void
ice_put_rx_buf(struct ice_ring * rx_ring,struct ice_rx_buf * rx_buf,int rx_buf_pgcnt)993 ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
994 int rx_buf_pgcnt)
995 {
996 u16 ntc = rx_ring->next_to_clean + 1;
997
998 /* fetch, update, and store next to clean */
999 ntc = (ntc < rx_ring->count) ? ntc : 0;
1000 rx_ring->next_to_clean = ntc;
1001
1002 if (!rx_buf)
1003 return;
1004
1005 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1006 /* hand second half of page back to the ring */
1007 ice_reuse_rx_page(rx_ring, rx_buf);
1008 } else {
1009 /* we are not reusing the buffer so unmap it */
1010 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1011 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1012 ICE_RX_DMA_ATTR);
1013 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1014 }
1015
1016 /* clear contents of buffer_info */
1017 rx_buf->page = NULL;
1018 }
1019
1020 /**
1021 * ice_is_non_eop - process handling of non-EOP buffers
1022 * @rx_ring: Rx ring being processed
1023 * @rx_desc: Rx descriptor for current buffer
1024 *
1025 * If the buffer is an EOP buffer, this function exits returning false,
1026 * otherwise return true indicating that this is in fact a non-EOP buffer.
1027 */
1028 static bool
ice_is_non_eop(struct ice_ring * rx_ring,union ice_32b_rx_flex_desc * rx_desc)1029 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
1030 {
1031 /* if we are the last buffer then there is nothing else to do */
1032 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1033 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
1034 return false;
1035
1036 rx_ring->rx_stats.non_eop_descs++;
1037
1038 return true;
1039 }
1040
1041 /**
1042 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1043 * @rx_ring: Rx descriptor ring to transact packets on
1044 * @budget: Total limit on number of packets to process
1045 *
1046 * This function provides a "bounce buffer" approach to Rx interrupt
1047 * processing. The advantage to this is that on systems that have
1048 * expensive overhead for IOMMU access this provides a means of avoiding
1049 * it by maintaining the mapping of the page to the system.
1050 *
1051 * Returns amount of work completed
1052 */
ice_clean_rx_irq(struct ice_ring * rx_ring,int budget)1053 int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1054 {
1055 unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
1056 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1057 unsigned int offset = rx_ring->rx_offset;
1058 unsigned int xdp_res, xdp_xmit = 0;
1059 struct sk_buff *skb = rx_ring->skb;
1060 struct bpf_prog *xdp_prog = NULL;
1061 struct xdp_buff xdp;
1062 bool failure;
1063
1064 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1065 #if (PAGE_SIZE < 8192)
1066 frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1067 #endif
1068 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1069
1070 /* start the loop to process Rx packets bounded by 'budget' */
1071 while (likely(total_rx_pkts < (unsigned int)budget)) {
1072 union ice_32b_rx_flex_desc *rx_desc;
1073 struct ice_rx_buf *rx_buf;
1074 unsigned char *hard_start;
1075 unsigned int size;
1076 u16 stat_err_bits;
1077 int rx_buf_pgcnt;
1078 u16 vlan_tag = 0;
1079 u8 rx_ptype;
1080
1081 /* get the Rx desc from Rx ring based on 'next_to_clean' */
1082 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1083
1084 /* status_error_len will always be zero for unused descriptors
1085 * because it's cleared in cleanup, and overlaps with hdr_addr
1086 * which is always zero because packet split isn't used, if the
1087 * hardware wrote DD then it will be non-zero
1088 */
1089 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1090 if (!ice_test_staterr(rx_desc, stat_err_bits))
1091 break;
1092
1093 /* This memory barrier is needed to keep us from reading
1094 * any other fields out of the rx_desc until we know the
1095 * DD bit is set.
1096 */
1097 dma_rmb();
1098
1099 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1100 struct ice_vsi *ctrl_vsi = rx_ring->vsi;
1101
1102 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
1103 ctrl_vsi->vf_id != ICE_INVAL_VFID)
1104 ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
1105 ice_put_rx_buf(rx_ring, NULL, 0);
1106 cleaned_count++;
1107 continue;
1108 }
1109
1110 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1111 ICE_RX_FLX_DESC_PKT_LEN_M;
1112
1113 /* retrieve a buffer from the ring */
1114 rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
1115
1116 if (!size) {
1117 xdp.data = NULL;
1118 xdp.data_end = NULL;
1119 xdp.data_hard_start = NULL;
1120 xdp.data_meta = NULL;
1121 goto construct_skb;
1122 }
1123
1124 hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1125 offset;
1126 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1127 #if (PAGE_SIZE > 4096)
1128 /* At larger PAGE_SIZE, frame_sz depend on len size */
1129 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1130 #endif
1131
1132 rcu_read_lock();
1133 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1134 if (!xdp_prog) {
1135 rcu_read_unlock();
1136 goto construct_skb;
1137 }
1138
1139 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
1140 rcu_read_unlock();
1141 if (!xdp_res)
1142 goto construct_skb;
1143 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1144 xdp_xmit |= xdp_res;
1145 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1146 } else {
1147 rx_buf->pagecnt_bias++;
1148 }
1149 total_rx_bytes += size;
1150 total_rx_pkts++;
1151
1152 cleaned_count++;
1153 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1154 continue;
1155 construct_skb:
1156 if (skb) {
1157 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1158 } else if (likely(xdp.data)) {
1159 if (ice_ring_uses_build_skb(rx_ring))
1160 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1161 else
1162 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1163 }
1164 /* exit if we failed to retrieve a buffer */
1165 if (!skb) {
1166 rx_ring->rx_stats.alloc_buf_failed++;
1167 if (rx_buf)
1168 rx_buf->pagecnt_bias++;
1169 break;
1170 }
1171
1172 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1173 cleaned_count++;
1174
1175 /* skip if it is NOP desc */
1176 if (ice_is_non_eop(rx_ring, rx_desc))
1177 continue;
1178
1179 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1180 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1181 dev_kfree_skb_any(skb);
1182 continue;
1183 }
1184
1185 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1186 if (ice_test_staterr(rx_desc, stat_err_bits))
1187 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1188
1189 /* pad the skb if needed, to make a valid ethernet frame */
1190 if (eth_skb_pad(skb)) {
1191 skb = NULL;
1192 continue;
1193 }
1194
1195 /* probably a little skewed due to removing CRC */
1196 total_rx_bytes += skb->len;
1197
1198 /* populate checksum, VLAN, and protocol */
1199 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1200 ICE_RX_FLEX_DESC_PTYPE_M;
1201
1202 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1203
1204 /* send completed skb up the stack */
1205 ice_receive_skb(rx_ring, skb, vlan_tag);
1206 skb = NULL;
1207
1208 /* update budget accounting */
1209 total_rx_pkts++;
1210 }
1211
1212 /* return up to cleaned_count buffers to hardware */
1213 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1214
1215 if (xdp_prog)
1216 ice_finalize_xdp_rx(rx_ring, xdp_xmit);
1217 rx_ring->skb = skb;
1218
1219 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1220
1221 /* guarantee a trip back through this routine if there was a failure */
1222 return failure ? budget : (int)total_rx_pkts;
1223 }
1224
1225 /**
1226 * ice_net_dim - Update net DIM algorithm
1227 * @q_vector: the vector associated with the interrupt
1228 *
1229 * Create a DIM sample and notify net_dim() so that it can possibly decide
1230 * a new ITR value based on incoming packets, bytes, and interrupts.
1231 *
1232 * This function is a no-op if the ring is not configured to dynamic ITR.
1233 */
ice_net_dim(struct ice_q_vector * q_vector)1234 static void ice_net_dim(struct ice_q_vector *q_vector)
1235 {
1236 struct ice_ring_container *tx = &q_vector->tx;
1237 struct ice_ring_container *rx = &q_vector->rx;
1238
1239 if (ITR_IS_DYNAMIC(tx)) {
1240 struct dim_sample dim_sample = {};
1241 u64 packets = 0, bytes = 0;
1242 struct ice_ring *ring;
1243
1244 ice_for_each_ring(ring, q_vector->tx) {
1245 packets += ring->stats.pkts;
1246 bytes += ring->stats.bytes;
1247 }
1248
1249 dim_update_sample(q_vector->total_events, packets, bytes,
1250 &dim_sample);
1251
1252 net_dim(&tx->dim, dim_sample);
1253 }
1254
1255 if (ITR_IS_DYNAMIC(rx)) {
1256 struct dim_sample dim_sample = {};
1257 u64 packets = 0, bytes = 0;
1258 struct ice_ring *ring;
1259
1260 ice_for_each_ring(ring, q_vector->rx) {
1261 packets += ring->stats.pkts;
1262 bytes += ring->stats.bytes;
1263 }
1264
1265 dim_update_sample(q_vector->total_events, packets, bytes,
1266 &dim_sample);
1267
1268 net_dim(&rx->dim, dim_sample);
1269 }
1270 }
1271
1272 /**
1273 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1274 * @itr_idx: interrupt throttling index
1275 * @itr: interrupt throttling value in usecs
1276 */
ice_buildreg_itr(u16 itr_idx,u16 itr)1277 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1278 {
1279 /* The ITR value is reported in microseconds, and the register value is
1280 * recorded in 2 microsecond units. For this reason we only need to
1281 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1282 * granularity as a shift instead of division. The mask makes sure the
1283 * ITR value is never odd so we don't accidentally write into the field
1284 * prior to the ITR field.
1285 */
1286 itr &= ICE_ITR_MASK;
1287
1288 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1289 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1290 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1291 }
1292
1293 /**
1294 * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt
1295 * @q_vector: the vector associated with the interrupt to enable
1296 *
1297 * Update the net_dim() algorithm and re-enable the interrupt associated with
1298 * this vector.
1299 *
1300 * If the VSI is down, the interrupt will not be re-enabled.
1301 */
ice_update_ena_itr(struct ice_q_vector * q_vector)1302 static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1303 {
1304 struct ice_vsi *vsi = q_vector->vsi;
1305 bool wb_en = q_vector->wb_on_itr;
1306 u32 itr_val;
1307
1308 if (test_bit(ICE_DOWN, vsi->state))
1309 return;
1310
1311 /* When exiting WB_ON_ITR, let ITR resume its normal
1312 * interrupts-enabled path.
1313 */
1314 if (wb_en)
1315 q_vector->wb_on_itr = false;
1316
1317 /* This will do nothing if dynamic updates are not enabled. */
1318 ice_net_dim(q_vector);
1319
1320 /* net_dim() updates ITR out-of-band using a work item */
1321 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1322 /* trigger an immediate software interrupt when exiting
1323 * busy poll, to make sure to catch any pending cleanups
1324 * that might have been missed due to interrupt state
1325 * transition.
1326 */
1327 if (wb_en) {
1328 itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
1329 GLINT_DYN_CTL_SW_ITR_INDX_M |
1330 GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
1331 }
1332 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1333 }
1334
1335 /**
1336 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1337 * @q_vector: q_vector to set WB_ON_ITR on
1338 *
1339 * We need to tell hardware to write-back completed descriptors even when
1340 * interrupts are disabled. Descriptors will be written back on cache line
1341 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1342 * descriptors may not be written back if they don't fill a cache line until
1343 * the next interrupt.
1344 *
1345 * This sets the write-back frequency to whatever was set previously for the
1346 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
1347 * aren't meddling with the INTENA_M bit.
1348 */
ice_set_wb_on_itr(struct ice_q_vector * q_vector)1349 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1350 {
1351 struct ice_vsi *vsi = q_vector->vsi;
1352
1353 /* already in wb_on_itr mode no need to change it */
1354 if (q_vector->wb_on_itr)
1355 return;
1356
1357 /* use previously set ITR values for all of the ITR indices by
1358 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
1359 * be static in non-adaptive mode (user configured)
1360 */
1361 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1362 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
1363 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
1364 GLINT_DYN_CTL_WB_ON_ITR_M);
1365
1366 q_vector->wb_on_itr = true;
1367 }
1368
1369 /**
1370 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1371 * @napi: napi struct with our devices info in it
1372 * @budget: amount of work driver is allowed to do this pass, in packets
1373 *
1374 * This function will clean all queues associated with a q_vector.
1375 *
1376 * Returns the amount of work done
1377 */
ice_napi_poll(struct napi_struct * napi,int budget)1378 int ice_napi_poll(struct napi_struct *napi, int budget)
1379 {
1380 struct ice_q_vector *q_vector =
1381 container_of(napi, struct ice_q_vector, napi);
1382 bool clean_complete = true;
1383 struct ice_ring *ring;
1384 int budget_per_ring;
1385 int work_done = 0;
1386
1387 /* Since the actual Tx work is minimal, we can give the Tx a larger
1388 * budget and be more aggressive about cleaning up the Tx descriptors.
1389 */
1390 ice_for_each_ring(ring, q_vector->tx) {
1391 bool wd = ring->xsk_pool ?
1392 ice_clean_tx_irq_zc(ring, budget) :
1393 ice_clean_tx_irq(ring, budget);
1394
1395 if (!wd)
1396 clean_complete = false;
1397 }
1398
1399 /* Handle case where we are called by netpoll with a budget of 0 */
1400 if (unlikely(budget <= 0))
1401 return budget;
1402
1403 /* normally we have 1 Rx ring per q_vector */
1404 if (unlikely(q_vector->num_ring_rx > 1))
1405 /* We attempt to distribute budget to each Rx queue fairly, but
1406 * don't allow the budget to go below 1 because that would exit
1407 * polling early.
1408 */
1409 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1410 else
1411 /* Max of 1 Rx ring in this q_vector so give it the budget */
1412 budget_per_ring = budget;
1413
1414 ice_for_each_ring(ring, q_vector->rx) {
1415 int cleaned;
1416
1417 /* A dedicated path for zero-copy allows making a single
1418 * comparison in the irq context instead of many inside the
1419 * ice_clean_rx_irq function and makes the codebase cleaner.
1420 */
1421 cleaned = ring->xsk_pool ?
1422 ice_clean_rx_irq_zc(ring, budget_per_ring) :
1423 ice_clean_rx_irq(ring, budget_per_ring);
1424 work_done += cleaned;
1425 /* if we clean as many as budgeted, we must not be done */
1426 if (cleaned >= budget_per_ring)
1427 clean_complete = false;
1428 }
1429
1430 /* If work not completed, return budget and polling will return */
1431 if (!clean_complete) {
1432 /* Set the writeback on ITR so partial completions of
1433 * cache-lines will still continue even if we're polling.
1434 */
1435 ice_set_wb_on_itr(q_vector);
1436 return budget;
1437 }
1438
1439 /* Exit the polling mode, but don't re-enable interrupts if stack might
1440 * poll us due to busy-polling
1441 */
1442 if (likely(napi_complete_done(napi, work_done)))
1443 ice_update_ena_itr(q_vector);
1444 else
1445 ice_set_wb_on_itr(q_vector);
1446
1447 return min_t(int, work_done, budget - 1);
1448 }
1449
1450 /**
1451 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1452 * @tx_ring: the ring to be checked
1453 * @size: the size buffer we want to assure is available
1454 *
1455 * Returns -EBUSY if a stop is needed, else 0
1456 */
__ice_maybe_stop_tx(struct ice_ring * tx_ring,unsigned int size)1457 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1458 {
1459 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1460 /* Memory barrier before checking head and tail */
1461 smp_mb();
1462
1463 /* Check again in a case another CPU has just made room available. */
1464 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1465 return -EBUSY;
1466
1467 /* A reprieve! - use start_subqueue because it doesn't call schedule */
1468 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1469 ++tx_ring->tx_stats.restart_q;
1470 return 0;
1471 }
1472
1473 /**
1474 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1475 * @tx_ring: the ring to be checked
1476 * @size: the size buffer we want to assure is available
1477 *
1478 * Returns 0 if stop is not needed
1479 */
ice_maybe_stop_tx(struct ice_ring * tx_ring,unsigned int size)1480 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1481 {
1482 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1483 return 0;
1484
1485 return __ice_maybe_stop_tx(tx_ring, size);
1486 }
1487
1488 /**
1489 * ice_tx_map - Build the Tx descriptor
1490 * @tx_ring: ring to send buffer on
1491 * @first: first buffer info buffer to use
1492 * @off: pointer to struct that holds offload parameters
1493 *
1494 * This function loops over the skb data pointed to by *first
1495 * and gets a physical address for each memory location and programs
1496 * it and the length into the transmit descriptor.
1497 */
1498 static void
ice_tx_map(struct ice_ring * tx_ring,struct ice_tx_buf * first,struct ice_tx_offload_params * off)1499 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1500 struct ice_tx_offload_params *off)
1501 {
1502 u64 td_offset, td_tag, td_cmd;
1503 u16 i = tx_ring->next_to_use;
1504 unsigned int data_len, size;
1505 struct ice_tx_desc *tx_desc;
1506 struct ice_tx_buf *tx_buf;
1507 struct sk_buff *skb;
1508 skb_frag_t *frag;
1509 dma_addr_t dma;
1510
1511 td_tag = off->td_l2tag1;
1512 td_cmd = off->td_cmd;
1513 td_offset = off->td_offset;
1514 skb = first->skb;
1515
1516 data_len = skb->data_len;
1517 size = skb_headlen(skb);
1518
1519 tx_desc = ICE_TX_DESC(tx_ring, i);
1520
1521 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1522 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1523 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1524 ICE_TX_FLAGS_VLAN_S;
1525 }
1526
1527 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1528
1529 tx_buf = first;
1530
1531 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1532 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1533
1534 if (dma_mapping_error(tx_ring->dev, dma))
1535 goto dma_error;
1536
1537 /* record length, and DMA address */
1538 dma_unmap_len_set(tx_buf, len, size);
1539 dma_unmap_addr_set(tx_buf, dma, dma);
1540
1541 /* align size to end of page */
1542 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1543 tx_desc->buf_addr = cpu_to_le64(dma);
1544
1545 /* account for data chunks larger than the hardware
1546 * can handle
1547 */
1548 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1549 tx_desc->cmd_type_offset_bsz =
1550 ice_build_ctob(td_cmd, td_offset, max_data,
1551 td_tag);
1552
1553 tx_desc++;
1554 i++;
1555
1556 if (i == tx_ring->count) {
1557 tx_desc = ICE_TX_DESC(tx_ring, 0);
1558 i = 0;
1559 }
1560
1561 dma += max_data;
1562 size -= max_data;
1563
1564 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1565 tx_desc->buf_addr = cpu_to_le64(dma);
1566 }
1567
1568 if (likely(!data_len))
1569 break;
1570
1571 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1572 size, td_tag);
1573
1574 tx_desc++;
1575 i++;
1576
1577 if (i == tx_ring->count) {
1578 tx_desc = ICE_TX_DESC(tx_ring, 0);
1579 i = 0;
1580 }
1581
1582 size = skb_frag_size(frag);
1583 data_len -= size;
1584
1585 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1586 DMA_TO_DEVICE);
1587
1588 tx_buf = &tx_ring->tx_buf[i];
1589 }
1590
1591 /* record bytecount for BQL */
1592 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1593
1594 /* record SW timestamp if HW timestamp is not available */
1595 skb_tx_timestamp(first->skb);
1596
1597 i++;
1598 if (i == tx_ring->count)
1599 i = 0;
1600
1601 /* write last descriptor with RS and EOP bits */
1602 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1603 tx_desc->cmd_type_offset_bsz =
1604 ice_build_ctob(td_cmd, td_offset, size, td_tag);
1605
1606 /* Force memory writes to complete before letting h/w know there
1607 * are new descriptors to fetch.
1608 *
1609 * We also use this memory barrier to make certain all of the
1610 * status bits have been updated before next_to_watch is written.
1611 */
1612 wmb();
1613
1614 /* set next_to_watch value indicating a packet is present */
1615 first->next_to_watch = tx_desc;
1616
1617 tx_ring->next_to_use = i;
1618
1619 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1620
1621 /* notify HW of packet */
1622 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1623 writel(i, tx_ring->tail);
1624
1625 return;
1626
1627 dma_error:
1628 /* clear DMA mappings for failed tx_buf map */
1629 for (;;) {
1630 tx_buf = &tx_ring->tx_buf[i];
1631 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1632 if (tx_buf == first)
1633 break;
1634 if (i == 0)
1635 i = tx_ring->count;
1636 i--;
1637 }
1638
1639 tx_ring->next_to_use = i;
1640 }
1641
1642 /**
1643 * ice_tx_csum - Enable Tx checksum offloads
1644 * @first: pointer to the first descriptor
1645 * @off: pointer to struct that holds offload parameters
1646 *
1647 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1648 */
1649 static
ice_tx_csum(struct ice_tx_buf * first,struct ice_tx_offload_params * off)1650 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1651 {
1652 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1653 struct sk_buff *skb = first->skb;
1654 union {
1655 struct iphdr *v4;
1656 struct ipv6hdr *v6;
1657 unsigned char *hdr;
1658 } ip;
1659 union {
1660 struct tcphdr *tcp;
1661 unsigned char *hdr;
1662 } l4;
1663 __be16 frag_off, protocol;
1664 unsigned char *exthdr;
1665 u32 offset, cmd = 0;
1666 u8 l4_proto = 0;
1667
1668 if (skb->ip_summed != CHECKSUM_PARTIAL)
1669 return 0;
1670
1671 ip.hdr = skb_network_header(skb);
1672 l4.hdr = skb_transport_header(skb);
1673
1674 /* compute outer L2 header size */
1675 l2_len = ip.hdr - skb->data;
1676 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1677
1678 protocol = vlan_get_protocol(skb);
1679
1680 if (protocol == htons(ETH_P_IP))
1681 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1682 else if (protocol == htons(ETH_P_IPV6))
1683 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1684
1685 if (skb->encapsulation) {
1686 bool gso_ena = false;
1687 u32 tunnel = 0;
1688
1689 /* define outer network header type */
1690 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1691 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1692 ICE_TX_CTX_EIPT_IPV4 :
1693 ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1694 l4_proto = ip.v4->protocol;
1695 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1696 int ret;
1697
1698 tunnel |= ICE_TX_CTX_EIPT_IPV6;
1699 exthdr = ip.hdr + sizeof(*ip.v6);
1700 l4_proto = ip.v6->nexthdr;
1701 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1702 &l4_proto, &frag_off);
1703 if (ret < 0)
1704 return -1;
1705 }
1706
1707 /* define outer transport */
1708 switch (l4_proto) {
1709 case IPPROTO_UDP:
1710 tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1711 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1712 break;
1713 case IPPROTO_GRE:
1714 tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1715 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1716 break;
1717 case IPPROTO_IPIP:
1718 case IPPROTO_IPV6:
1719 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1720 l4.hdr = skb_inner_network_header(skb);
1721 break;
1722 default:
1723 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1724 return -1;
1725
1726 skb_checksum_help(skb);
1727 return 0;
1728 }
1729
1730 /* compute outer L3 header size */
1731 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1732 ICE_TXD_CTX_QW0_EIPLEN_S;
1733
1734 /* switch IP header pointer from outer to inner header */
1735 ip.hdr = skb_inner_network_header(skb);
1736
1737 /* compute tunnel header size */
1738 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1739 ICE_TXD_CTX_QW0_NATLEN_S;
1740
1741 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1742 /* indicate if we need to offload outer UDP header */
1743 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1744 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1745 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1746
1747 /* record tunnel offload values */
1748 off->cd_tunnel_params |= tunnel;
1749
1750 /* set DTYP=1 to indicate that it's an Tx context descriptor
1751 * in IPsec tunnel mode with Tx offloads in Quad word 1
1752 */
1753 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1754
1755 /* switch L4 header pointer from outer to inner */
1756 l4.hdr = skb_inner_transport_header(skb);
1757 l4_proto = 0;
1758
1759 /* reset type as we transition from outer to inner headers */
1760 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1761 if (ip.v4->version == 4)
1762 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1763 if (ip.v6->version == 6)
1764 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1765 }
1766
1767 /* Enable IP checksum offloads */
1768 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1769 l4_proto = ip.v4->protocol;
1770 /* the stack computes the IP header already, the only time we
1771 * need the hardware to recompute it is in the case of TSO.
1772 */
1773 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1774 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1775 else
1776 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1777
1778 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1779 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1780 exthdr = ip.hdr + sizeof(*ip.v6);
1781 l4_proto = ip.v6->nexthdr;
1782 if (l4.hdr != exthdr)
1783 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1784 &frag_off);
1785 } else {
1786 return -1;
1787 }
1788
1789 /* compute inner L3 header size */
1790 l3_len = l4.hdr - ip.hdr;
1791 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1792
1793 /* Enable L4 checksum offloads */
1794 switch (l4_proto) {
1795 case IPPROTO_TCP:
1796 /* enable checksum offloads */
1797 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1798 l4_len = l4.tcp->doff;
1799 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1800 break;
1801 case IPPROTO_UDP:
1802 /* enable UDP checksum offload */
1803 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1804 l4_len = (sizeof(struct udphdr) >> 2);
1805 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1806 break;
1807 case IPPROTO_SCTP:
1808 /* enable SCTP checksum offload */
1809 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1810 l4_len = sizeof(struct sctphdr) >> 2;
1811 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1812 break;
1813
1814 default:
1815 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1816 return -1;
1817 skb_checksum_help(skb);
1818 return 0;
1819 }
1820
1821 off->td_cmd |= cmd;
1822 off->td_offset |= offset;
1823 return 1;
1824 }
1825
1826 /**
1827 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1828 * @tx_ring: ring to send buffer on
1829 * @first: pointer to struct ice_tx_buf
1830 *
1831 * Checks the skb and set up correspondingly several generic transmit flags
1832 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1833 */
1834 static void
ice_tx_prepare_vlan_flags(struct ice_ring * tx_ring,struct ice_tx_buf * first)1835 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1836 {
1837 struct sk_buff *skb = first->skb;
1838
1839 /* nothing left to do, software offloaded VLAN */
1840 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
1841 return;
1842
1843 /* currently, we always assume 802.1Q for VLAN insertion as VLAN
1844 * insertion for 802.1AD is not supported
1845 */
1846 if (skb_vlan_tag_present(skb)) {
1847 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1848 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1849 }
1850
1851 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1852 }
1853
1854 /**
1855 * ice_tso - computes mss and TSO length to prepare for TSO
1856 * @first: pointer to struct ice_tx_buf
1857 * @off: pointer to struct that holds offload parameters
1858 *
1859 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1860 */
1861 static
ice_tso(struct ice_tx_buf * first,struct ice_tx_offload_params * off)1862 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1863 {
1864 struct sk_buff *skb = first->skb;
1865 union {
1866 struct iphdr *v4;
1867 struct ipv6hdr *v6;
1868 unsigned char *hdr;
1869 } ip;
1870 union {
1871 struct tcphdr *tcp;
1872 struct udphdr *udp;
1873 unsigned char *hdr;
1874 } l4;
1875 u64 cd_mss, cd_tso_len;
1876 u32 paylen;
1877 u8 l4_start;
1878 int err;
1879
1880 if (skb->ip_summed != CHECKSUM_PARTIAL)
1881 return 0;
1882
1883 if (!skb_is_gso(skb))
1884 return 0;
1885
1886 err = skb_cow_head(skb, 0);
1887 if (err < 0)
1888 return err;
1889
1890 /* cppcheck-suppress unreadVariable */
1891 ip.hdr = skb_network_header(skb);
1892 l4.hdr = skb_transport_header(skb);
1893
1894 /* initialize outer IP header fields */
1895 if (ip.v4->version == 4) {
1896 ip.v4->tot_len = 0;
1897 ip.v4->check = 0;
1898 } else {
1899 ip.v6->payload_len = 0;
1900 }
1901
1902 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1903 SKB_GSO_GRE_CSUM |
1904 SKB_GSO_IPXIP4 |
1905 SKB_GSO_IPXIP6 |
1906 SKB_GSO_UDP_TUNNEL |
1907 SKB_GSO_UDP_TUNNEL_CSUM)) {
1908 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1909 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1910 l4.udp->len = 0;
1911
1912 /* determine offset of outer transport header */
1913 l4_start = (u8)(l4.hdr - skb->data);
1914
1915 /* remove payload length from outer checksum */
1916 paylen = skb->len - l4_start;
1917 csum_replace_by_diff(&l4.udp->check,
1918 (__force __wsum)htonl(paylen));
1919 }
1920
1921 /* reset pointers to inner headers */
1922
1923 /* cppcheck-suppress unreadVariable */
1924 ip.hdr = skb_inner_network_header(skb);
1925 l4.hdr = skb_inner_transport_header(skb);
1926
1927 /* initialize inner IP header fields */
1928 if (ip.v4->version == 4) {
1929 ip.v4->tot_len = 0;
1930 ip.v4->check = 0;
1931 } else {
1932 ip.v6->payload_len = 0;
1933 }
1934 }
1935
1936 /* determine offset of transport header */
1937 l4_start = (u8)(l4.hdr - skb->data);
1938
1939 /* remove payload length from checksum */
1940 paylen = skb->len - l4_start;
1941
1942 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1943 csum_replace_by_diff(&l4.udp->check,
1944 (__force __wsum)htonl(paylen));
1945 /* compute length of UDP segmentation header */
1946 off->header_len = (u8)sizeof(l4.udp) + l4_start;
1947 } else {
1948 csum_replace_by_diff(&l4.tcp->check,
1949 (__force __wsum)htonl(paylen));
1950 /* compute length of TCP segmentation header */
1951 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
1952 }
1953
1954 /* update gso_segs and bytecount */
1955 first->gso_segs = skb_shinfo(skb)->gso_segs;
1956 first->bytecount += (first->gso_segs - 1) * off->header_len;
1957
1958 cd_tso_len = skb->len - off->header_len;
1959 cd_mss = skb_shinfo(skb)->gso_size;
1960
1961 /* record cdesc_qw1 with TSO parameters */
1962 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1963 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1964 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1965 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1966 first->tx_flags |= ICE_TX_FLAGS_TSO;
1967 return 1;
1968 }
1969
1970 /**
1971 * ice_txd_use_count - estimate the number of descriptors needed for Tx
1972 * @size: transmit request size in bytes
1973 *
1974 * Due to hardware alignment restrictions (4K alignment), we need to
1975 * assume that we can have no more than 12K of data per descriptor, even
1976 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1977 * Thus, we need to divide by 12K. But division is slow! Instead,
1978 * we decompose the operation into shifts and one relatively cheap
1979 * multiply operation.
1980 *
1981 * To divide by 12K, we first divide by 4K, then divide by 3:
1982 * To divide by 4K, shift right by 12 bits
1983 * To divide by 3, multiply by 85, then divide by 256
1984 * (Divide by 256 is done by shifting right by 8 bits)
1985 * Finally, we add one to round up. Because 256 isn't an exact multiple of
1986 * 3, we'll underestimate near each multiple of 12K. This is actually more
1987 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1988 * segment. For our purposes this is accurate out to 1M which is orders of
1989 * magnitude greater than our largest possible GSO size.
1990 *
1991 * This would then be implemented as:
1992 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
1993 *
1994 * Since multiplication and division are commutative, we can reorder
1995 * operations into:
1996 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1997 */
ice_txd_use_count(unsigned int size)1998 static unsigned int ice_txd_use_count(unsigned int size)
1999 {
2000 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2001 }
2002
2003 /**
2004 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2005 * @skb: send buffer
2006 *
2007 * Returns number of data descriptors needed for this skb.
2008 */
ice_xmit_desc_count(struct sk_buff * skb)2009 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2010 {
2011 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2012 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2013 unsigned int count = 0, size = skb_headlen(skb);
2014
2015 for (;;) {
2016 count += ice_txd_use_count(size);
2017
2018 if (!nr_frags--)
2019 break;
2020
2021 size = skb_frag_size(frag++);
2022 }
2023
2024 return count;
2025 }
2026
2027 /**
2028 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2029 * @skb: send buffer
2030 *
2031 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2032 * and so we need to figure out the cases where we need to linearize the skb.
2033 *
2034 * For TSO we need to count the TSO header and segment payload separately.
2035 * As such we need to check cases where we have 7 fragments or more as we
2036 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2037 * the segment payload in the first descriptor, and another 7 for the
2038 * fragments.
2039 */
__ice_chk_linearize(struct sk_buff * skb)2040 static bool __ice_chk_linearize(struct sk_buff *skb)
2041 {
2042 const skb_frag_t *frag, *stale;
2043 int nr_frags, sum;
2044
2045 /* no need to check if number of frags is less than 7 */
2046 nr_frags = skb_shinfo(skb)->nr_frags;
2047 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2048 return false;
2049
2050 /* We need to walk through the list and validate that each group
2051 * of 6 fragments totals at least gso_size.
2052 */
2053 nr_frags -= ICE_MAX_BUF_TXD - 2;
2054 frag = &skb_shinfo(skb)->frags[0];
2055
2056 /* Initialize size to the negative value of gso_size minus 1. We
2057 * use this as the worst case scenario in which the frag ahead
2058 * of us only provides one byte which is why we are limited to 6
2059 * descriptors for a single transmit as the header and previous
2060 * fragment are already consuming 2 descriptors.
2061 */
2062 sum = 1 - skb_shinfo(skb)->gso_size;
2063
2064 /* Add size of frags 0 through 4 to create our initial sum */
2065 sum += skb_frag_size(frag++);
2066 sum += skb_frag_size(frag++);
2067 sum += skb_frag_size(frag++);
2068 sum += skb_frag_size(frag++);
2069 sum += skb_frag_size(frag++);
2070
2071 /* Walk through fragments adding latest fragment, testing it, and
2072 * then removing stale fragments from the sum.
2073 */
2074 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2075 int stale_size = skb_frag_size(stale);
2076
2077 sum += skb_frag_size(frag++);
2078
2079 /* The stale fragment may present us with a smaller
2080 * descriptor than the actual fragment size. To account
2081 * for that we need to remove all the data on the front and
2082 * figure out what the remainder would be in the last
2083 * descriptor associated with the fragment.
2084 */
2085 if (stale_size > ICE_MAX_DATA_PER_TXD) {
2086 int align_pad = -(skb_frag_off(stale)) &
2087 (ICE_MAX_READ_REQ_SIZE - 1);
2088
2089 sum -= align_pad;
2090 stale_size -= align_pad;
2091
2092 do {
2093 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2094 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2095 } while (stale_size > ICE_MAX_DATA_PER_TXD);
2096 }
2097
2098 /* if sum is negative we failed to make sufficient progress */
2099 if (sum < 0)
2100 return true;
2101
2102 if (!nr_frags--)
2103 break;
2104
2105 sum -= stale_size;
2106 }
2107
2108 return false;
2109 }
2110
2111 /**
2112 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2113 * @skb: send buffer
2114 * @count: number of buffers used
2115 *
2116 * Note: Our HW can't scatter-gather more than 8 fragments to build
2117 * a packet on the wire and so we need to figure out the cases where we
2118 * need to linearize the skb.
2119 */
ice_chk_linearize(struct sk_buff * skb,unsigned int count)2120 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2121 {
2122 /* Both TSO and single send will work if count is less than 8 */
2123 if (likely(count < ICE_MAX_BUF_TXD))
2124 return false;
2125
2126 if (skb_is_gso(skb))
2127 return __ice_chk_linearize(skb);
2128
2129 /* we can support up to 8 data buffers for a single send */
2130 return count != ICE_MAX_BUF_TXD;
2131 }
2132
2133 /**
2134 * ice_xmit_frame_ring - Sends buffer on Tx ring
2135 * @skb: send buffer
2136 * @tx_ring: ring to send buffer on
2137 *
2138 * Returns NETDEV_TX_OK if sent, else an error code
2139 */
2140 static netdev_tx_t
ice_xmit_frame_ring(struct sk_buff * skb,struct ice_ring * tx_ring)2141 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2142 {
2143 struct ice_tx_offload_params offload = { 0 };
2144 struct ice_vsi *vsi = tx_ring->vsi;
2145 struct ice_tx_buf *first;
2146 unsigned int count;
2147 int tso, csum;
2148
2149 count = ice_xmit_desc_count(skb);
2150 if (ice_chk_linearize(skb, count)) {
2151 if (__skb_linearize(skb))
2152 goto out_drop;
2153 count = ice_txd_use_count(skb->len);
2154 tx_ring->tx_stats.tx_linearize++;
2155 }
2156
2157 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2158 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2159 * + 4 desc gap to avoid the cache line where head is,
2160 * + 1 desc for context descriptor,
2161 * otherwise try next time
2162 */
2163 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2164 ICE_DESCS_FOR_CTX_DESC)) {
2165 tx_ring->tx_stats.tx_busy++;
2166 return NETDEV_TX_BUSY;
2167 }
2168
2169 offload.tx_ring = tx_ring;
2170
2171 /* record the location of the first descriptor for this packet */
2172 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2173 first->skb = skb;
2174 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2175 first->gso_segs = 1;
2176 first->tx_flags = 0;
2177
2178 /* prepare the VLAN tagging flags for Tx */
2179 ice_tx_prepare_vlan_flags(tx_ring, first);
2180
2181 /* set up TSO offload */
2182 tso = ice_tso(first, &offload);
2183 if (tso < 0)
2184 goto out_drop;
2185
2186 /* always set up Tx checksum offload */
2187 csum = ice_tx_csum(first, &offload);
2188 if (csum < 0)
2189 goto out_drop;
2190
2191 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2192 if (unlikely(skb->priority == TC_PRIO_CONTROL &&
2193 vsi->type == ICE_VSI_PF &&
2194 vsi->port_info->qos_cfg.is_sw_lldp))
2195 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2196 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2197 ICE_TXD_CTX_QW1_CMD_S);
2198
2199 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2200 struct ice_tx_ctx_desc *cdesc;
2201 u16 i = tx_ring->next_to_use;
2202
2203 /* grab the next descriptor */
2204 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2205 i++;
2206 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2207
2208 /* setup context descriptor */
2209 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2210 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2211 cdesc->rsvd = cpu_to_le16(0);
2212 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2213 }
2214
2215 ice_tx_map(tx_ring, first, &offload);
2216 return NETDEV_TX_OK;
2217
2218 out_drop:
2219 dev_kfree_skb_any(skb);
2220 return NETDEV_TX_OK;
2221 }
2222
2223 /**
2224 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2225 * @skb: send buffer
2226 * @netdev: network interface device structure
2227 *
2228 * Returns NETDEV_TX_OK if sent, else an error code
2229 */
ice_start_xmit(struct sk_buff * skb,struct net_device * netdev)2230 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2231 {
2232 struct ice_netdev_priv *np = netdev_priv(netdev);
2233 struct ice_vsi *vsi = np->vsi;
2234 struct ice_ring *tx_ring;
2235
2236 tx_ring = vsi->tx_rings[skb->queue_mapping];
2237
2238 /* hardware can't handle really short frames, hardware padding works
2239 * beyond this point
2240 */
2241 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2242 return NETDEV_TX_OK;
2243
2244 return ice_xmit_frame_ring(skb, tx_ring);
2245 }
2246
2247 /**
2248 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2249 * @tx_ring: tx_ring to clean
2250 */
ice_clean_ctrl_tx_irq(struct ice_ring * tx_ring)2251 void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
2252 {
2253 struct ice_vsi *vsi = tx_ring->vsi;
2254 s16 i = tx_ring->next_to_clean;
2255 int budget = ICE_DFLT_IRQ_WORK;
2256 struct ice_tx_desc *tx_desc;
2257 struct ice_tx_buf *tx_buf;
2258
2259 tx_buf = &tx_ring->tx_buf[i];
2260 tx_desc = ICE_TX_DESC(tx_ring, i);
2261 i -= tx_ring->count;
2262
2263 do {
2264 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2265
2266 /* if next_to_watch is not set then there is no pending work */
2267 if (!eop_desc)
2268 break;
2269
2270 /* prevent any other reads prior to eop_desc */
2271 smp_rmb();
2272
2273 /* if the descriptor isn't done, no work to do */
2274 if (!(eop_desc->cmd_type_offset_bsz &
2275 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2276 break;
2277
2278 /* clear next_to_watch to prevent false hangs */
2279 tx_buf->next_to_watch = NULL;
2280 tx_desc->buf_addr = 0;
2281 tx_desc->cmd_type_offset_bsz = 0;
2282
2283 /* move past filter desc */
2284 tx_buf++;
2285 tx_desc++;
2286 i++;
2287 if (unlikely(!i)) {
2288 i -= tx_ring->count;
2289 tx_buf = tx_ring->tx_buf;
2290 tx_desc = ICE_TX_DESC(tx_ring, 0);
2291 }
2292
2293 /* unmap the data header */
2294 if (dma_unmap_len(tx_buf, len))
2295 dma_unmap_single(tx_ring->dev,
2296 dma_unmap_addr(tx_buf, dma),
2297 dma_unmap_len(tx_buf, len),
2298 DMA_TO_DEVICE);
2299 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2300 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2301
2302 /* clear next_to_watch to prevent false hangs */
2303 tx_buf->raw_buf = NULL;
2304 tx_buf->tx_flags = 0;
2305 tx_buf->next_to_watch = NULL;
2306 dma_unmap_len_set(tx_buf, len, 0);
2307 tx_desc->buf_addr = 0;
2308 tx_desc->cmd_type_offset_bsz = 0;
2309
2310 /* move past eop_desc for start of next FD desc */
2311 tx_buf++;
2312 tx_desc++;
2313 i++;
2314 if (unlikely(!i)) {
2315 i -= tx_ring->count;
2316 tx_buf = tx_ring->tx_buf;
2317 tx_desc = ICE_TX_DESC(tx_ring, 0);
2318 }
2319
2320 budget--;
2321 } while (likely(budget));
2322
2323 i += tx_ring->count;
2324 tx_ring->next_to_clean = i;
2325
2326 /* re-enable interrupt if needed */
2327 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2328 }
2329