1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #ifndef __MLX5_EN_XSK_RX_H__ 5 #define __MLX5_EN_XSK_RX_H__ 6 7 #include "en.h" 8 #include <net/xdp_sock_drv.h> 9 10 /* RX data path */ 11 12 struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, 13 struct mlx5e_mpw_info *wi, 14 u16 cqe_bcnt, 15 u32 head_offset, 16 u32 page_idx); 17 struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, 18 struct mlx5e_wqe_frag_info *wi, 19 u32 cqe_bcnt); 20 21 static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq, 22 struct mlx5e_dma_info *dma_info) 23 { 24 dma_info->xsk = xsk_buff_alloc(rq->xsk_pool); 25 if (!dma_info->xsk) 26 return -ENOMEM; 27 28 /* Store the DMA address without headroom. In striding RQ case, we just 29 * provide pages for UMR, and headroom is counted at the setup stage 30 * when creating a WQE. In non-striding RQ case, headroom is accounted 31 * in mlx5e_alloc_rx_wqe. 32 */ 33 dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk); 34 35 return 0; 36 } 37 38 static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err) 39 { 40 if (!xsk_uses_need_wakeup(rq->xsk_pool)) 41 return alloc_err; 42 43 if (unlikely(alloc_err)) 44 xsk_set_rx_need_wakeup(rq->xsk_pool); 45 else 46 xsk_clear_rx_need_wakeup(rq->xsk_pool); 47 48 return false; 49 } 50 51 #endif /* __MLX5_EN_XSK_RX_H__ */ 52