xref: /linux/include/net/page_pool/types.h (revision 021bc4b9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef _NET_PAGE_POOL_TYPES_H
4 #define _NET_PAGE_POOL_TYPES_H
5 
6 #include <linux/dma-direction.h>
7 #include <linux/ptr_ring.h>
8 #include <linux/types.h>
9 
10 #define PP_FLAG_DMA_MAP		BIT(0) /* Should page_pool do the DMA
11 					* map/unmap
12 					*/
13 #define PP_FLAG_DMA_SYNC_DEV	BIT(1) /* If set all pages that the driver gets
14 					* from page_pool will be
15 					* DMA-synced-for-device according to
16 					* the length provided by the device
17 					* driver.
18 					* Please note DMA-sync-for-CPU is still
19 					* device driver responsibility
20 					*/
21 #define PP_FLAG_ALL		(PP_FLAG_DMA_MAP |\
22 				 PP_FLAG_DMA_SYNC_DEV)
23 
24 /*
25  * Fast allocation side cache array/stack
26  *
27  * The cache size and refill watermark is related to the network
28  * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
29  * ring is usually refilled and the max consumed elements will be 64,
30  * thus a natural max size of objects needed in the cache.
31  *
32  * Keeping room for more objects, is due to XDP_DROP use-case.  As
33  * XDP_DROP allows the opportunity to recycle objects directly into
34  * this array, as it shares the same softirq/NAPI protection.  If
35  * cache is already full (or partly full) then the XDP_DROP recycles
36  * would have to take a slower code path.
37  */
38 #define PP_ALLOC_CACHE_SIZE	128
39 #define PP_ALLOC_CACHE_REFILL	64
40 struct pp_alloc_cache {
41 	u32 count;
42 	struct page *cache[PP_ALLOC_CACHE_SIZE];
43 };
44 
45 /**
46  * struct page_pool_params - page pool parameters
47  * @flags:	PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
48  * @order:	2^order pages on allocation
49  * @pool_size:	size of the ptr_ring
50  * @nid:	NUMA node id to allocate from pages from
51  * @dev:	device, for DMA pre-mapping purposes
52  * @netdev:	netdev this pool will serve (leave as NULL if none or multiple)
53  * @napi:	NAPI which is the sole consumer of pages, otherwise NULL
54  * @dma_dir:	DMA mapping direction
55  * @max_len:	max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
56  * @offset:	DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
57  */
58 struct page_pool_params {
59 	struct_group_tagged(page_pool_params_fast, fast,
60 		unsigned int	flags;
61 		unsigned int	order;
62 		unsigned int	pool_size;
63 		int		nid;
64 		struct device	*dev;
65 		struct napi_struct *napi;
66 		enum dma_data_direction dma_dir;
67 		unsigned int	max_len;
68 		unsigned int	offset;
69 	);
70 	struct_group_tagged(page_pool_params_slow, slow,
71 		struct net_device *netdev;
72 /* private: used by test code only */
73 		void (*init_callback)(struct page *page, void *arg);
74 		void *init_arg;
75 	);
76 };
77 
78 #ifdef CONFIG_PAGE_POOL_STATS
79 /**
80  * struct page_pool_alloc_stats - allocation statistics
81  * @fast:	successful fast path allocations
82  * @slow:	slow path order-0 allocations
83  * @slow_high_order: slow path high order allocations
84  * @empty:	ptr ring is empty, so a slow path allocation was forced
85  * @refill:	an allocation which triggered a refill of the cache
86  * @waive:	pages obtained from the ptr ring that cannot be added to
87  *		the cache due to a NUMA mismatch
88  */
89 struct page_pool_alloc_stats {
90 	u64 fast;
91 	u64 slow;
92 	u64 slow_high_order;
93 	u64 empty;
94 	u64 refill;
95 	u64 waive;
96 };
97 
98 /**
99  * struct page_pool_recycle_stats - recycling (freeing) statistics
100  * @cached:	recycling placed page in the page pool cache
101  * @cache_full:	page pool cache was full
102  * @ring:	page placed into the ptr ring
103  * @ring_full:	page released from page pool because the ptr ring was full
104  * @released_refcnt:	page released (and not recycled) because refcnt > 1
105  */
106 struct page_pool_recycle_stats {
107 	u64 cached;
108 	u64 cache_full;
109 	u64 ring;
110 	u64 ring_full;
111 	u64 released_refcnt;
112 };
113 
114 /**
115  * struct page_pool_stats - combined page pool use statistics
116  * @alloc_stats:	see struct page_pool_alloc_stats
117  * @recycle_stats:	see struct page_pool_recycle_stats
118  *
119  * Wrapper struct for combining page pool stats with different storage
120  * requirements.
121  */
122 struct page_pool_stats {
123 	struct page_pool_alloc_stats alloc_stats;
124 	struct page_pool_recycle_stats recycle_stats;
125 };
126 #endif
127 
128 struct page_pool {
129 	struct page_pool_params_fast p;
130 
131 	bool has_init_callback;
132 
133 	long frag_users;
134 	struct page *frag_page;
135 	unsigned int frag_offset;
136 	u32 pages_state_hold_cnt;
137 
138 	struct delayed_work release_dw;
139 	void (*disconnect)(void *pool);
140 	unsigned long defer_start;
141 	unsigned long defer_warn;
142 
143 #ifdef CONFIG_PAGE_POOL_STATS
144 	/* these stats are incremented while in softirq context */
145 	struct page_pool_alloc_stats alloc_stats;
146 #endif
147 	u32 xdp_mem_id;
148 
149 	/*
150 	 * Data structure for allocation side
151 	 *
152 	 * Drivers allocation side usually already perform some kind
153 	 * of resource protection.  Piggyback on this protection, and
154 	 * require driver to protect allocation side.
155 	 *
156 	 * For NIC drivers this means, allocate a page_pool per
157 	 * RX-queue. As the RX-queue is already protected by
158 	 * Softirq/BH scheduling and napi_schedule. NAPI schedule
159 	 * guarantee that a single napi_struct will only be scheduled
160 	 * on a single CPU (see napi_schedule).
161 	 */
162 	struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
163 
164 	/* Data structure for storing recycled pages.
165 	 *
166 	 * Returning/freeing pages is more complicated synchronization
167 	 * wise, because free's can happen on remote CPUs, with no
168 	 * association with allocation resource.
169 	 *
170 	 * Use ptr_ring, as it separates consumer and producer
171 	 * efficiently, it a way that doesn't bounce cache-lines.
172 	 *
173 	 * TODO: Implement bulk return pages into this structure.
174 	 */
175 	struct ptr_ring ring;
176 
177 #ifdef CONFIG_PAGE_POOL_STATS
178 	/* recycle stats are per-cpu to avoid locking */
179 	struct page_pool_recycle_stats __percpu *recycle_stats;
180 #endif
181 	atomic_t pages_state_release_cnt;
182 
183 	/* A page_pool is strictly tied to a single RX-queue being
184 	 * protected by NAPI, due to above pp_alloc_cache. This
185 	 * refcnt serves purpose is to simplify drivers error handling.
186 	 */
187 	refcount_t user_cnt;
188 
189 	u64 destroy_cnt;
190 
191 	/* Slow/Control-path information follows */
192 	struct page_pool_params_slow slow;
193 	/* User-facing fields, protected by page_pools_lock */
194 	struct {
195 		struct hlist_node list;
196 		u64 detach_time;
197 		u32 napi_id;
198 		u32 id;
199 	} user;
200 };
201 
202 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
203 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
204 				  unsigned int size, gfp_t gfp);
205 struct page_pool *page_pool_create(const struct page_pool_params *params);
206 
207 struct xdp_mem_info;
208 
209 #ifdef CONFIG_PAGE_POOL
210 void page_pool_unlink_napi(struct page_pool *pool);
211 void page_pool_destroy(struct page_pool *pool);
212 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
213 			   struct xdp_mem_info *mem);
214 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
215 			     int count);
216 #else
217 static inline void page_pool_unlink_napi(struct page_pool *pool)
218 {
219 }
220 
221 static inline void page_pool_destroy(struct page_pool *pool)
222 {
223 }
224 
225 static inline void page_pool_use_xdp_mem(struct page_pool *pool,
226 					 void (*disconnect)(void *),
227 					 struct xdp_mem_info *mem)
228 {
229 }
230 
231 static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
232 					   int count)
233 {
234 }
235 #endif
236 
237 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
238 				unsigned int dma_sync_size,
239 				bool allow_direct);
240 
241 static inline bool is_page_pool_compiled_in(void)
242 {
243 #ifdef CONFIG_PAGE_POOL
244 	return true;
245 #else
246 	return false;
247 #endif
248 }
249 
250 /* Caller must provide appropriate safe context, e.g. NAPI. */
251 void page_pool_update_nid(struct page_pool *pool, int new_nid);
252 
253 #endif /* _NET_PAGE_POOL_H */
254