1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
6 #include <net/xdp.h>
7 #include "ice.h"
8 #include "ice_base.h"
9 #include "ice_type.h"
10 #include "ice_xsk.h"
11 #include "ice_txrx.h"
12 #include "ice_txrx_lib.h"
13 #include "ice_lib.h"
14
ice_xdp_buf(struct ice_rx_ring * rx_ring,u32 idx)15 static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
16 {
17 return &rx_ring->xdp_buf[idx];
18 }
19
20 /**
21 * ice_qp_reset_stats - Resets all stats for rings of given index
22 * @vsi: VSI that contains rings of interest
23 * @q_idx: ring index in array
24 */
ice_qp_reset_stats(struct ice_vsi * vsi,u16 q_idx)25 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
26 {
27 struct ice_vsi_stats *vsi_stat;
28 struct ice_pf *pf;
29
30 pf = vsi->back;
31 if (!pf->vsi_stats)
32 return;
33
34 vsi_stat = pf->vsi_stats[vsi->idx];
35 if (!vsi_stat)
36 return;
37
38 memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
39 sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
40 memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
41 sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
42 if (ice_is_xdp_ena_vsi(vsi))
43 memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
44 sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
45 }
46
47 /**
48 * ice_qp_clean_rings - Cleans all the rings of a given index
49 * @vsi: VSI that contains rings of interest
50 * @q_idx: ring index in array
51 */
ice_qp_clean_rings(struct ice_vsi * vsi,u16 q_idx)52 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
53 {
54 ice_clean_tx_ring(vsi->tx_rings[q_idx]);
55 if (ice_is_xdp_ena_vsi(vsi)) {
56 synchronize_rcu();
57 ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
58 }
59 ice_clean_rx_ring(vsi->rx_rings[q_idx]);
60 }
61
62 /**
63 * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
64 * @vsi: VSI that has netdev
65 * @q_vector: q_vector that has NAPI context
66 * @enable: true for enable, false for disable
67 */
68 static void
ice_qvec_toggle_napi(struct ice_vsi * vsi,struct ice_q_vector * q_vector,bool enable)69 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
70 bool enable)
71 {
72 if (!vsi->netdev || !q_vector)
73 return;
74
75 if (enable)
76 napi_enable(&q_vector->napi);
77 else
78 napi_disable(&q_vector->napi);
79 }
80
81 /**
82 * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
83 * @vsi: the VSI that contains queue vector being un-configured
84 * @rx_ring: Rx ring that will have its IRQ disabled
85 * @q_vector: queue vector
86 */
87 static void
ice_qvec_dis_irq(struct ice_vsi * vsi,struct ice_rx_ring * rx_ring,struct ice_q_vector * q_vector)88 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
89 struct ice_q_vector *q_vector)
90 {
91 struct ice_pf *pf = vsi->back;
92 struct ice_hw *hw = &pf->hw;
93 u16 reg;
94 u32 val;
95
96 /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
97 * here only QINT_RQCTL
98 */
99 reg = rx_ring->reg_idx;
100 val = rd32(hw, QINT_RQCTL(reg));
101 val &= ~QINT_RQCTL_CAUSE_ENA_M;
102 wr32(hw, QINT_RQCTL(reg), val);
103
104 if (q_vector) {
105 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
106 ice_flush(hw);
107 synchronize_irq(q_vector->irq.virq);
108 }
109 }
110
111 /**
112 * ice_qvec_cfg_msix - Enable IRQ for given queue vector
113 * @vsi: the VSI that contains queue vector
114 * @q_vector: queue vector
115 */
116 static void
ice_qvec_cfg_msix(struct ice_vsi * vsi,struct ice_q_vector * q_vector)117 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
118 {
119 u16 reg_idx = q_vector->reg_idx;
120 struct ice_pf *pf = vsi->back;
121 struct ice_hw *hw = &pf->hw;
122 struct ice_tx_ring *tx_ring;
123 struct ice_rx_ring *rx_ring;
124
125 ice_cfg_itr(hw, q_vector);
126
127 ice_for_each_tx_ring(tx_ring, q_vector->tx)
128 ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
129 q_vector->tx.itr_idx);
130
131 ice_for_each_rx_ring(rx_ring, q_vector->rx)
132 ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
133 q_vector->rx.itr_idx);
134
135 ice_flush(hw);
136 }
137
138 /**
139 * ice_qvec_ena_irq - Enable IRQ for given queue vector
140 * @vsi: the VSI that contains queue vector
141 * @q_vector: queue vector
142 */
ice_qvec_ena_irq(struct ice_vsi * vsi,struct ice_q_vector * q_vector)143 static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
144 {
145 struct ice_pf *pf = vsi->back;
146 struct ice_hw *hw = &pf->hw;
147
148 ice_irq_dynamic_ena(hw, vsi, q_vector);
149
150 ice_flush(hw);
151 }
152
153 /**
154 * ice_qp_dis - Disables a queue pair
155 * @vsi: VSI of interest
156 * @q_idx: ring index in array
157 *
158 * Returns 0 on success, negative on failure.
159 */
ice_qp_dis(struct ice_vsi * vsi,u16 q_idx)160 static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
161 {
162 struct ice_txq_meta txq_meta = { };
163 struct ice_q_vector *q_vector;
164 struct ice_tx_ring *tx_ring;
165 struct ice_rx_ring *rx_ring;
166 int timeout = 50;
167 int err;
168
169 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
170 return -EINVAL;
171
172 tx_ring = vsi->tx_rings[q_idx];
173 rx_ring = vsi->rx_rings[q_idx];
174 q_vector = rx_ring->q_vector;
175
176 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
177 timeout--;
178 if (!timeout)
179 return -EBUSY;
180 usleep_range(1000, 2000);
181 }
182
183 ice_qvec_dis_irq(vsi, rx_ring, q_vector);
184 ice_qvec_toggle_napi(vsi, q_vector, false);
185
186 netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
187
188 ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
189 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
190 if (err)
191 return err;
192 if (ice_is_xdp_ena_vsi(vsi)) {
193 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
194
195 memset(&txq_meta, 0, sizeof(txq_meta));
196 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
197 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
198 &txq_meta);
199 if (err)
200 return err;
201 }
202 err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
203 if (err)
204 return err;
205
206 ice_qp_clean_rings(vsi, q_idx);
207 ice_qp_reset_stats(vsi, q_idx);
208
209 return 0;
210 }
211
212 /**
213 * ice_qp_ena - Enables a queue pair
214 * @vsi: VSI of interest
215 * @q_idx: ring index in array
216 *
217 * Returns 0 on success, negative on failure.
218 */
ice_qp_ena(struct ice_vsi * vsi,u16 q_idx)219 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
220 {
221 struct ice_q_vector *q_vector;
222 int err;
223
224 err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
225 if (err)
226 return err;
227
228 if (ice_is_xdp_ena_vsi(vsi)) {
229 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
230
231 err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
232 if (err)
233 return err;
234 ice_set_ring_xdp(xdp_ring);
235 ice_tx_xsk_pool(vsi, q_idx);
236 }
237
238 err = ice_vsi_cfg_single_rxq(vsi, q_idx);
239 if (err)
240 return err;
241
242 q_vector = vsi->rx_rings[q_idx]->q_vector;
243 ice_qvec_cfg_msix(vsi, q_vector);
244
245 err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
246 if (err)
247 return err;
248
249 ice_qvec_toggle_napi(vsi, q_vector, true);
250 ice_qvec_ena_irq(vsi, q_vector);
251
252 netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
253 clear_bit(ICE_CFG_BUSY, vsi->state);
254
255 return 0;
256 }
257
258 /**
259 * ice_xsk_pool_disable - disable a buffer pool region
260 * @vsi: Current VSI
261 * @qid: queue ID
262 *
263 * Returns 0 on success, negative on failure
264 */
ice_xsk_pool_disable(struct ice_vsi * vsi,u16 qid)265 static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
266 {
267 struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
268
269 if (!pool)
270 return -EINVAL;
271
272 xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
273
274 return 0;
275 }
276
277 /**
278 * ice_xsk_pool_enable - enable a buffer pool region
279 * @vsi: Current VSI
280 * @pool: pointer to a requested buffer pool region
281 * @qid: queue ID
282 *
283 * Returns 0 on success, negative on failure
284 */
285 static int
ice_xsk_pool_enable(struct ice_vsi * vsi,struct xsk_buff_pool * pool,u16 qid)286 ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
287 {
288 int err;
289
290 if (vsi->type != ICE_VSI_PF)
291 return -EINVAL;
292
293 if (qid >= vsi->netdev->real_num_rx_queues ||
294 qid >= vsi->netdev->real_num_tx_queues)
295 return -EINVAL;
296
297 err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
298 ICE_RX_DMA_ATTR);
299 if (err)
300 return err;
301
302 return 0;
303 }
304
305 /**
306 * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
307 * @rx_ring: Rx ring
308 * @pool_present: is pool for XSK present
309 *
310 * Try allocating memory and return ENOMEM, if failed to allocate.
311 * If allocation was successful, substitute buffer with allocated one.
312 * Returns 0 on success, negative on failure
313 */
314 static int
ice_realloc_rx_xdp_bufs(struct ice_rx_ring * rx_ring,bool pool_present)315 ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
316 {
317 size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
318 sizeof(*rx_ring->rx_buf);
319 void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
320
321 if (!sw_ring)
322 return -ENOMEM;
323
324 if (pool_present) {
325 kfree(rx_ring->rx_buf);
326 rx_ring->rx_buf = NULL;
327 rx_ring->xdp_buf = sw_ring;
328 } else {
329 kfree(rx_ring->xdp_buf);
330 rx_ring->xdp_buf = NULL;
331 rx_ring->rx_buf = sw_ring;
332 }
333
334 return 0;
335 }
336
337 /**
338 * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
339 * @vsi: Current VSI
340 * @zc: is zero copy set
341 *
342 * Reallocate buffer for rx_rings that might be used by XSK.
343 * XDP requires more memory, than rx_buf provides.
344 * Returns 0 on success, negative on failure
345 */
ice_realloc_zc_buf(struct ice_vsi * vsi,bool zc)346 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
347 {
348 struct ice_rx_ring *rx_ring;
349 uint i;
350
351 ice_for_each_rxq(vsi, i) {
352 rx_ring = vsi->rx_rings[i];
353 if (!rx_ring->xsk_pool)
354 continue;
355
356 if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
357 return -ENOMEM;
358 }
359
360 return 0;
361 }
362
363 /**
364 * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
365 * @vsi: Current VSI
366 * @pool: buffer pool to enable/associate to a ring, NULL to disable
367 * @qid: queue ID
368 *
369 * Returns 0 on success, negative on failure
370 */
ice_xsk_pool_setup(struct ice_vsi * vsi,struct xsk_buff_pool * pool,u16 qid)371 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
372 {
373 bool if_running, pool_present = !!pool;
374 int ret = 0, pool_failure = 0;
375
376 if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
377 netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
378 pool_failure = -EINVAL;
379 goto failure;
380 }
381
382 if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
383
384 if (if_running) {
385 struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
386
387 ret = ice_qp_dis(vsi, qid);
388 if (ret) {
389 netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
390 goto xsk_pool_if_up;
391 }
392
393 ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
394 if (ret)
395 goto xsk_pool_if_up;
396 }
397
398 pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
399 ice_xsk_pool_disable(vsi, qid);
400
401 xsk_pool_if_up:
402 if (if_running) {
403 ret = ice_qp_ena(vsi, qid);
404 if (!ret && pool_present)
405 napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
406 else if (ret)
407 netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
408 }
409
410 failure:
411 if (pool_failure) {
412 netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
413 pool_present ? "en" : "dis", pool_failure);
414 return pool_failure;
415 }
416
417 return ret;
418 }
419
420 /**
421 * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it
422 * @pool: XSK Buffer pool to pull the buffers from
423 * @xdp: SW ring of xdp_buff that will hold the buffers
424 * @rx_desc: Pointer to Rx descriptors that will be filled
425 * @count: The number of buffers to allocate
426 *
427 * This function allocates a number of Rx buffers from the fill ring
428 * or the internal recycle mechanism and places them on the Rx ring.
429 *
430 * Note that ring wrap should be handled by caller of this function.
431 *
432 * Returns the amount of allocated Rx descriptors
433 */
ice_fill_rx_descs(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,union ice_32b_rx_flex_desc * rx_desc,u16 count)434 static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
435 union ice_32b_rx_flex_desc *rx_desc, u16 count)
436 {
437 dma_addr_t dma;
438 u16 buffs;
439 int i;
440
441 buffs = xsk_buff_alloc_batch(pool, xdp, count);
442 for (i = 0; i < buffs; i++) {
443 dma = xsk_buff_xdp_get_dma(*xdp);
444 rx_desc->read.pkt_addr = cpu_to_le64(dma);
445 rx_desc->wb.status_error0 = 0;
446
447 /* Put private info that changes on a per-packet basis
448 * into xdp_buff_xsk->cb.
449 */
450 ice_xdp_meta_set_desc(*xdp, rx_desc);
451
452 rx_desc++;
453 xdp++;
454 }
455
456 return buffs;
457 }
458
459 /**
460 * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
461 * @rx_ring: Rx ring
462 * @count: The number of buffers to allocate
463 *
464 * Place the @count of descriptors onto Rx ring. Handle the ring wrap
465 * for case where space from next_to_use up to the end of ring is less
466 * than @count. Finally do a tail bump.
467 *
468 * Returns true if all allocations were successful, false if any fail.
469 */
__ice_alloc_rx_bufs_zc(struct ice_rx_ring * rx_ring,u16 count)470 static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
471 {
472 u32 nb_buffs_extra = 0, nb_buffs = 0;
473 union ice_32b_rx_flex_desc *rx_desc;
474 u16 ntu = rx_ring->next_to_use;
475 u16 total_count = count;
476 struct xdp_buff **xdp;
477
478 rx_desc = ICE_RX_DESC(rx_ring, ntu);
479 xdp = ice_xdp_buf(rx_ring, ntu);
480
481 if (ntu + count >= rx_ring->count) {
482 nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
483 rx_desc,
484 rx_ring->count - ntu);
485 if (nb_buffs_extra != rx_ring->count - ntu) {
486 ntu += nb_buffs_extra;
487 goto exit;
488 }
489 rx_desc = ICE_RX_DESC(rx_ring, 0);
490 xdp = ice_xdp_buf(rx_ring, 0);
491 ntu = 0;
492 count -= nb_buffs_extra;
493 ice_release_rx_desc(rx_ring, 0);
494 }
495
496 nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
497
498 ntu += nb_buffs;
499 if (ntu == rx_ring->count)
500 ntu = 0;
501
502 exit:
503 if (rx_ring->next_to_use != ntu)
504 ice_release_rx_desc(rx_ring, ntu);
505
506 return total_count == (nb_buffs_extra + nb_buffs);
507 }
508
509 /**
510 * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
511 * @rx_ring: Rx ring
512 * @count: The number of buffers to allocate
513 *
514 * Wrapper for internal allocation routine; figure out how many tail
515 * bumps should take place based on the given threshold
516 *
517 * Returns true if all calls to internal alloc routine succeeded
518 */
ice_alloc_rx_bufs_zc(struct ice_rx_ring * rx_ring,u16 count)519 bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
520 {
521 u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
522 u16 leftover, i, tail_bumps;
523
524 tail_bumps = count / rx_thresh;
525 leftover = count - (tail_bumps * rx_thresh);
526
527 for (i = 0; i < tail_bumps; i++)
528 if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
529 return false;
530 return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
531 }
532
533 /**
534 * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
535 * @rx_ring: Rx ring
536 * @xdp: Pointer to XDP buffer
537 *
538 * This function allocates a new skb from a zero-copy Rx buffer.
539 *
540 * Returns the skb on success, NULL on failure.
541 */
542 static struct sk_buff *
ice_construct_skb_zc(struct ice_rx_ring * rx_ring,struct xdp_buff * xdp)543 ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
544 {
545 unsigned int totalsize = xdp->data_end - xdp->data_meta;
546 unsigned int metasize = xdp->data - xdp->data_meta;
547 struct skb_shared_info *sinfo = NULL;
548 struct sk_buff *skb;
549 u32 nr_frags = 0;
550
551 if (unlikely(xdp_buff_has_frags(xdp))) {
552 sinfo = xdp_get_shared_info_from_buff(xdp);
553 nr_frags = sinfo->nr_frags;
554 }
555 net_prefetch(xdp->data_meta);
556
557 skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
558 if (unlikely(!skb))
559 return NULL;
560
561 memcpy(__skb_put(skb, totalsize), xdp->data_meta,
562 ALIGN(totalsize, sizeof(long)));
563
564 if (metasize) {
565 skb_metadata_set(skb, metasize);
566 __skb_pull(skb, metasize);
567 }
568
569 if (likely(!xdp_buff_has_frags(xdp)))
570 goto out;
571
572 for (int i = 0; i < nr_frags; i++) {
573 struct skb_shared_info *skinfo = skb_shinfo(skb);
574 skb_frag_t *frag = &sinfo->frags[i];
575 struct page *page;
576 void *addr;
577
578 page = dev_alloc_page();
579 if (!page) {
580 dev_kfree_skb(skb);
581 return NULL;
582 }
583 addr = page_to_virt(page);
584
585 memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
586
587 __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
588 addr, 0, skb_frag_size(frag));
589 }
590
591 out:
592 xsk_buff_free(xdp);
593 return skb;
594 }
595
596 /**
597 * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
598 * @xdp_ring: XDP Tx ring
599 */
ice_clean_xdp_irq_zc(struct ice_tx_ring * xdp_ring)600 static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
601 {
602 u16 ntc = xdp_ring->next_to_clean;
603 struct ice_tx_desc *tx_desc;
604 u16 cnt = xdp_ring->count;
605 struct ice_tx_buf *tx_buf;
606 u16 completed_frames = 0;
607 u16 xsk_frames = 0;
608 u16 last_rs;
609 int i;
610
611 last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
612 tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
613 if (tx_desc->cmd_type_offset_bsz &
614 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
615 if (last_rs >= ntc)
616 completed_frames = last_rs - ntc + 1;
617 else
618 completed_frames = last_rs + cnt - ntc + 1;
619 }
620
621 if (!completed_frames)
622 return 0;
623
624 if (likely(!xdp_ring->xdp_tx_active)) {
625 xsk_frames = completed_frames;
626 goto skip;
627 }
628
629 ntc = xdp_ring->next_to_clean;
630 for (i = 0; i < completed_frames; i++) {
631 tx_buf = &xdp_ring->tx_buf[ntc];
632
633 if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
634 tx_buf->type = ICE_TX_BUF_EMPTY;
635 xsk_buff_free(tx_buf->xdp);
636 xdp_ring->xdp_tx_active--;
637 } else {
638 xsk_frames++;
639 }
640
641 ntc++;
642 if (ntc >= xdp_ring->count)
643 ntc = 0;
644 }
645 skip:
646 tx_desc->cmd_type_offset_bsz = 0;
647 xdp_ring->next_to_clean += completed_frames;
648 if (xdp_ring->next_to_clean >= cnt)
649 xdp_ring->next_to_clean -= cnt;
650 if (xsk_frames)
651 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
652
653 return completed_frames;
654 }
655
656 /**
657 * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
658 * @xdp: XDP buffer to xmit
659 * @xdp_ring: XDP ring to produce descriptor onto
660 *
661 * note that this function works directly on xdp_buff, no need to convert
662 * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
663 * side will be able to xsk_buff_free() it.
664 *
665 * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there
666 * was not enough space on XDP ring
667 */
ice_xmit_xdp_tx_zc(struct xdp_buff * xdp,struct ice_tx_ring * xdp_ring)668 static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
669 struct ice_tx_ring *xdp_ring)
670 {
671 struct skb_shared_info *sinfo = NULL;
672 u32 size = xdp->data_end - xdp->data;
673 u32 ntu = xdp_ring->next_to_use;
674 struct ice_tx_desc *tx_desc;
675 struct ice_tx_buf *tx_buf;
676 struct xdp_buff *head;
677 u32 nr_frags = 0;
678 u32 free_space;
679 u32 frag = 0;
680
681 free_space = ICE_DESC_UNUSED(xdp_ring);
682 if (free_space < ICE_RING_QUARTER(xdp_ring))
683 free_space += ice_clean_xdp_irq_zc(xdp_ring);
684
685 if (unlikely(!free_space))
686 goto busy;
687
688 if (unlikely(xdp_buff_has_frags(xdp))) {
689 sinfo = xdp_get_shared_info_from_buff(xdp);
690 nr_frags = sinfo->nr_frags;
691 if (free_space < nr_frags + 1)
692 goto busy;
693 }
694
695 tx_desc = ICE_TX_DESC(xdp_ring, ntu);
696 tx_buf = &xdp_ring->tx_buf[ntu];
697 head = xdp;
698
699 for (;;) {
700 dma_addr_t dma;
701
702 dma = xsk_buff_xdp_get_dma(xdp);
703 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
704
705 tx_buf->xdp = xdp;
706 tx_buf->type = ICE_TX_BUF_XSK_TX;
707 tx_desc->buf_addr = cpu_to_le64(dma);
708 tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
709 /* account for each xdp_buff from xsk_buff_pool */
710 xdp_ring->xdp_tx_active++;
711
712 if (++ntu == xdp_ring->count)
713 ntu = 0;
714
715 if (frag == nr_frags)
716 break;
717
718 tx_desc = ICE_TX_DESC(xdp_ring, ntu);
719 tx_buf = &xdp_ring->tx_buf[ntu];
720
721 xdp = xsk_buff_get_frag(head);
722 size = skb_frag_size(&sinfo->frags[frag]);
723 frag++;
724 }
725
726 xdp_ring->next_to_use = ntu;
727 /* update last descriptor from a frame with EOP */
728 tx_desc->cmd_type_offset_bsz |=
729 cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
730
731 return ICE_XDP_TX;
732
733 busy:
734 xdp_ring->ring_stats->tx_stats.tx_busy++;
735
736 return ICE_XDP_CONSUMED;
737 }
738
739 /**
740 * ice_run_xdp_zc - Executes an XDP program in zero-copy path
741 * @rx_ring: Rx ring
742 * @xdp: xdp_buff used as input to the XDP program
743 * @xdp_prog: XDP program to run
744 * @xdp_ring: ring to be used for XDP_TX action
745 *
746 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
747 */
748 static int
ice_run_xdp_zc(struct ice_rx_ring * rx_ring,struct xdp_buff * xdp,struct bpf_prog * xdp_prog,struct ice_tx_ring * xdp_ring)749 ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
750 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
751 {
752 int err, result = ICE_XDP_PASS;
753 u32 act;
754
755 act = bpf_prog_run_xdp(xdp_prog, xdp);
756
757 if (likely(act == XDP_REDIRECT)) {
758 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
759 if (!err)
760 return ICE_XDP_REDIR;
761 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
762 result = ICE_XDP_EXIT;
763 else
764 result = ICE_XDP_CONSUMED;
765 goto out_failure;
766 }
767
768 switch (act) {
769 case XDP_PASS:
770 break;
771 case XDP_TX:
772 result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
773 if (result == ICE_XDP_CONSUMED)
774 goto out_failure;
775 break;
776 case XDP_DROP:
777 result = ICE_XDP_CONSUMED;
778 break;
779 default:
780 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
781 fallthrough;
782 case XDP_ABORTED:
783 result = ICE_XDP_CONSUMED;
784 out_failure:
785 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
786 break;
787 }
788
789 return result;
790 }
791
792 static int
ice_add_xsk_frag(struct ice_rx_ring * rx_ring,struct xdp_buff * first,struct xdp_buff * xdp,const unsigned int size)793 ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
794 struct xdp_buff *xdp, const unsigned int size)
795 {
796 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
797
798 if (!size)
799 return 0;
800
801 if (!xdp_buff_has_frags(first)) {
802 sinfo->nr_frags = 0;
803 sinfo->xdp_frags_size = 0;
804 xdp_buff_set_frags_flag(first);
805 }
806
807 if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
808 xsk_buff_free(first);
809 return -ENOMEM;
810 }
811
812 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
813 virt_to_page(xdp->data_hard_start),
814 XDP_PACKET_HEADROOM, size);
815 sinfo->xdp_frags_size += size;
816 xsk_buff_add_frag(xdp);
817
818 return 0;
819 }
820
821 /**
822 * ice_clean_rx_irq_zc - consumes packets from the hardware ring
823 * @rx_ring: AF_XDP Rx ring
824 * @budget: NAPI budget
825 *
826 * Returns number of processed packets on success, remaining budget on failure.
827 */
ice_clean_rx_irq_zc(struct ice_rx_ring * rx_ring,int budget)828 int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
829 {
830 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
831 struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
832 u32 ntc = rx_ring->next_to_clean;
833 u32 ntu = rx_ring->next_to_use;
834 struct xdp_buff *first = NULL;
835 struct ice_tx_ring *xdp_ring;
836 unsigned int xdp_xmit = 0;
837 struct bpf_prog *xdp_prog;
838 u32 cnt = rx_ring->count;
839 bool failure = false;
840 int entries_to_alloc;
841
842 /* ZC patch is enabled only when XDP program is set,
843 * so here it can not be NULL
844 */
845 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
846 xdp_ring = rx_ring->xdp_ring;
847
848 if (ntc != rx_ring->first_desc)
849 first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
850
851 while (likely(total_rx_packets < (unsigned int)budget)) {
852 union ice_32b_rx_flex_desc *rx_desc;
853 unsigned int size, xdp_res = 0;
854 struct xdp_buff *xdp;
855 struct sk_buff *skb;
856 u16 stat_err_bits;
857 u16 vlan_tci;
858
859 rx_desc = ICE_RX_DESC(rx_ring, ntc);
860
861 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
862 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
863 break;
864
865 /* This memory barrier is needed to keep us from reading
866 * any other fields out of the rx_desc until we have
867 * verified the descriptor has been written back.
868 */
869 dma_rmb();
870
871 if (unlikely(ntc == ntu))
872 break;
873
874 xdp = *ice_xdp_buf(rx_ring, ntc);
875
876 size = le16_to_cpu(rx_desc->wb.pkt_len) &
877 ICE_RX_FLX_DESC_PKT_LEN_M;
878
879 xsk_buff_set_size(xdp, size);
880 xsk_buff_dma_sync_for_cpu(xdp);
881
882 if (!first) {
883 first = xdp;
884 } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
885 break;
886 }
887
888 if (++ntc == cnt)
889 ntc = 0;
890
891 if (ice_is_non_eop(rx_ring, rx_desc))
892 continue;
893
894 xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
895 if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
896 xdp_xmit |= xdp_res;
897 } else if (xdp_res == ICE_XDP_EXIT) {
898 failure = true;
899 first = NULL;
900 rx_ring->first_desc = ntc;
901 break;
902 } else if (xdp_res == ICE_XDP_CONSUMED) {
903 xsk_buff_free(first);
904 } else if (xdp_res == ICE_XDP_PASS) {
905 goto construct_skb;
906 }
907
908 total_rx_bytes += xdp_get_buff_len(first);
909 total_rx_packets++;
910
911 first = NULL;
912 rx_ring->first_desc = ntc;
913 continue;
914
915 construct_skb:
916 /* XDP_PASS path */
917 skb = ice_construct_skb_zc(rx_ring, first);
918 if (!skb) {
919 rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
920 break;
921 }
922
923 first = NULL;
924 rx_ring->first_desc = ntc;
925
926 if (eth_skb_pad(skb)) {
927 skb = NULL;
928 continue;
929 }
930
931 total_rx_bytes += skb->len;
932 total_rx_packets++;
933
934 vlan_tci = ice_get_vlan_tci(rx_desc);
935
936 ice_process_skb_fields(rx_ring, rx_desc, skb);
937 ice_receive_skb(rx_ring, skb, vlan_tci);
938 }
939
940 rx_ring->next_to_clean = ntc;
941 entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
942 if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
943 failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
944
945 ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
946 ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
947
948 if (xsk_uses_need_wakeup(xsk_pool)) {
949 /* ntu could have changed when allocating entries above, so
950 * use rx_ring value instead of stack based one
951 */
952 if (failure || ntc == rx_ring->next_to_use)
953 xsk_set_rx_need_wakeup(xsk_pool);
954 else
955 xsk_clear_rx_need_wakeup(xsk_pool);
956
957 return (int)total_rx_packets;
958 }
959
960 return failure ? budget : (int)total_rx_packets;
961 }
962
963 /**
964 * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
965 * @xdp_ring: XDP ring to produce the HW Tx descriptor on
966 * @desc: AF_XDP descriptor to pull the DMA address and length from
967 * @total_bytes: bytes accumulator that will be used for stats update
968 */
ice_xmit_pkt(struct ice_tx_ring * xdp_ring,struct xdp_desc * desc,unsigned int * total_bytes)969 static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
970 unsigned int *total_bytes)
971 {
972 struct ice_tx_desc *tx_desc;
973 dma_addr_t dma;
974
975 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
976 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
977
978 tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
979 tx_desc->buf_addr = cpu_to_le64(dma);
980 tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(desc),
981 0, desc->len, 0);
982
983 *total_bytes += desc->len;
984 }
985
986 /**
987 * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
988 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
989 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
990 * @total_bytes: bytes accumulator that will be used for stats update
991 */
ice_xmit_pkt_batch(struct ice_tx_ring * xdp_ring,struct xdp_desc * descs,unsigned int * total_bytes)992 static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
993 unsigned int *total_bytes)
994 {
995 u16 ntu = xdp_ring->next_to_use;
996 struct ice_tx_desc *tx_desc;
997 u32 i;
998
999 loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
1000 dma_addr_t dma;
1001
1002 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
1003 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
1004
1005 tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
1006 tx_desc->buf_addr = cpu_to_le64(dma);
1007 tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(&descs[i]),
1008 0, descs[i].len, 0);
1009
1010 *total_bytes += descs[i].len;
1011 }
1012
1013 xdp_ring->next_to_use = ntu;
1014 }
1015
1016 /**
1017 * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
1018 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
1019 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
1020 * @nb_pkts: count of packets to be send
1021 * @total_bytes: bytes accumulator that will be used for stats update
1022 */
ice_fill_tx_hw_ring(struct ice_tx_ring * xdp_ring,struct xdp_desc * descs,u32 nb_pkts,unsigned int * total_bytes)1023 static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
1024 u32 nb_pkts, unsigned int *total_bytes)
1025 {
1026 u32 batched, leftover, i;
1027
1028 batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
1029 leftover = nb_pkts & (PKTS_PER_BATCH - 1);
1030 for (i = 0; i < batched; i += PKTS_PER_BATCH)
1031 ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
1032 for (; i < batched + leftover; i++)
1033 ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
1034 }
1035
1036 /**
1037 * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
1038 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
1039 *
1040 * Returns true if there is no more work that needs to be done, false otherwise
1041 */
ice_xmit_zc(struct ice_tx_ring * xdp_ring)1042 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
1043 {
1044 struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
1045 u32 nb_pkts, nb_processed = 0;
1046 unsigned int total_bytes = 0;
1047 int budget;
1048
1049 ice_clean_xdp_irq_zc(xdp_ring);
1050
1051 budget = ICE_DESC_UNUSED(xdp_ring);
1052 budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
1053
1054 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
1055 if (!nb_pkts)
1056 return true;
1057
1058 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
1059 nb_processed = xdp_ring->count - xdp_ring->next_to_use;
1060 ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
1061 xdp_ring->next_to_use = 0;
1062 }
1063
1064 ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
1065 &total_bytes);
1066
1067 ice_set_rs_bit(xdp_ring);
1068 ice_xdp_ring_update_tail(xdp_ring);
1069 ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
1070
1071 if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
1072 xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
1073
1074 return nb_pkts < budget;
1075 }
1076
1077 /**
1078 * ice_xsk_wakeup - Implements ndo_xsk_wakeup
1079 * @netdev: net_device
1080 * @queue_id: queue to wake up
1081 * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
1082 *
1083 * Returns negative on error, zero otherwise.
1084 */
1085 int
ice_xsk_wakeup(struct net_device * netdev,u32 queue_id,u32 __always_unused flags)1086 ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
1087 u32 __always_unused flags)
1088 {
1089 struct ice_netdev_priv *np = netdev_priv(netdev);
1090 struct ice_q_vector *q_vector;
1091 struct ice_vsi *vsi = np->vsi;
1092 struct ice_tx_ring *ring;
1093
1094 if (test_bit(ICE_VSI_DOWN, vsi->state))
1095 return -ENETDOWN;
1096
1097 if (!ice_is_xdp_ena_vsi(vsi))
1098 return -EINVAL;
1099
1100 if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
1101 return -EINVAL;
1102
1103 ring = vsi->rx_rings[queue_id]->xdp_ring;
1104
1105 if (!ring->xsk_pool)
1106 return -EINVAL;
1107
1108 /* The idea here is that if NAPI is running, mark a miss, so
1109 * it will run again. If not, trigger an interrupt and
1110 * schedule the NAPI from interrupt context. If NAPI would be
1111 * scheduled here, the interrupt affinity would not be
1112 * honored.
1113 */
1114 q_vector = ring->q_vector;
1115 if (!napi_if_scheduled_mark_missed(&q_vector->napi))
1116 ice_trigger_sw_intr(&vsi->back->hw, q_vector);
1117
1118 return 0;
1119 }
1120
1121 /**
1122 * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
1123 * @vsi: VSI to be checked
1124 *
1125 * Returns true if any of the Rx rings has an AF_XDP buff pool attached
1126 */
ice_xsk_any_rx_ring_ena(struct ice_vsi * vsi)1127 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
1128 {
1129 int i;
1130
1131 ice_for_each_rxq(vsi, i) {
1132 if (xsk_get_pool_from_qid(vsi->netdev, i))
1133 return true;
1134 }
1135
1136 return false;
1137 }
1138
1139 /**
1140 * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
1141 * @rx_ring: ring to be cleaned
1142 */
ice_xsk_clean_rx_ring(struct ice_rx_ring * rx_ring)1143 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
1144 {
1145 u16 ntc = rx_ring->next_to_clean;
1146 u16 ntu = rx_ring->next_to_use;
1147
1148 while (ntc != ntu) {
1149 struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
1150
1151 xsk_buff_free(xdp);
1152 ntc++;
1153 if (ntc >= rx_ring->count)
1154 ntc = 0;
1155 }
1156 }
1157
1158 /**
1159 * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
1160 * @xdp_ring: XDP_Tx ring
1161 */
ice_xsk_clean_xdp_ring(struct ice_tx_ring * xdp_ring)1162 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
1163 {
1164 u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
1165 u32 xsk_frames = 0;
1166
1167 while (ntc != ntu) {
1168 struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
1169
1170 if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
1171 tx_buf->type = ICE_TX_BUF_EMPTY;
1172 xsk_buff_free(tx_buf->xdp);
1173 } else {
1174 xsk_frames++;
1175 }
1176
1177 ntc++;
1178 if (ntc >= xdp_ring->count)
1179 ntc = 0;
1180 }
1181
1182 if (xsk_frames)
1183 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
1184 }
1185