1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGE_REF_H
3 #define _LINUX_PAGE_REF_H
4
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
7 #include <linux/page-flags.h>
8 #include <linux/tracepoint-defs.h>
9
10 DECLARE_TRACEPOINT(page_ref_set);
11 DECLARE_TRACEPOINT(page_ref_mod);
12 DECLARE_TRACEPOINT(page_ref_mod_and_test);
13 DECLARE_TRACEPOINT(page_ref_mod_and_return);
14 DECLARE_TRACEPOINT(page_ref_mod_unless);
15 DECLARE_TRACEPOINT(page_ref_freeze);
16 DECLARE_TRACEPOINT(page_ref_unfreeze);
17
18 #ifdef CONFIG_DEBUG_PAGE_REF
19
20 /*
21 * Ideally we would want to use the trace_<tracepoint>_enabled() helper
22 * functions. But due to include header file issues, that is not
23 * feasible. Instead we have to open code the static key functions.
24 *
25 * See trace_##name##_enabled(void) in include/linux/tracepoint.h
26 */
27 #define page_ref_tracepoint_active(t) tracepoint_enabled(t)
28
29 extern void __page_ref_set(struct page *page, int v);
30 extern void __page_ref_mod(struct page *page, int v);
31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
33 extern void __page_ref_mod_unless(struct page *page, int v, int u);
34 extern void __page_ref_freeze(struct page *page, int v, int ret);
35 extern void __page_ref_unfreeze(struct page *page, int v);
36
37 #else
38
39 #define page_ref_tracepoint_active(t) false
40
__page_ref_set(struct page * page,int v)41 static inline void __page_ref_set(struct page *page, int v)
42 {
43 }
__page_ref_mod(struct page * page,int v)44 static inline void __page_ref_mod(struct page *page, int v)
45 {
46 }
__page_ref_mod_and_test(struct page * page,int v,int ret)47 static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
48 {
49 }
__page_ref_mod_and_return(struct page * page,int v,int ret)50 static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
51 {
52 }
__page_ref_mod_unless(struct page * page,int v,int u)53 static inline void __page_ref_mod_unless(struct page *page, int v, int u)
54 {
55 }
__page_ref_freeze(struct page * page,int v,int ret)56 static inline void __page_ref_freeze(struct page *page, int v, int ret)
57 {
58 }
__page_ref_unfreeze(struct page * page,int v)59 static inline void __page_ref_unfreeze(struct page *page, int v)
60 {
61 }
62
63 #endif
64
page_ref_count(const struct page * page)65 static inline int page_ref_count(const struct page *page)
66 {
67 return atomic_read(&page->_refcount);
68 }
69
70 /**
71 * folio_ref_count - The reference count on this folio.
72 * @folio: The folio.
73 *
74 * The refcount is usually incremented by calls to folio_get() and
75 * decremented by calls to folio_put(). Some typical users of the
76 * folio refcount:
77 *
78 * - Each reference from a page table
79 * - The page cache
80 * - Filesystem private data
81 * - The LRU list
82 * - Pipes
83 * - Direct IO which references this page in the process address space
84 *
85 * Return: The number of references to this folio.
86 */
folio_ref_count(const struct folio * folio)87 static inline int folio_ref_count(const struct folio *folio)
88 {
89 return page_ref_count(&folio->page);
90 }
91
page_count(const struct page * page)92 static inline int page_count(const struct page *page)
93 {
94 return folio_ref_count(page_folio(page));
95 }
96
set_page_count(struct page * page,int v)97 static inline void set_page_count(struct page *page, int v)
98 {
99 atomic_set(&page->_refcount, v);
100 if (page_ref_tracepoint_active(page_ref_set))
101 __page_ref_set(page, v);
102 }
103
folio_set_count(struct folio * folio,int v)104 static inline void folio_set_count(struct folio *folio, int v)
105 {
106 set_page_count(&folio->page, v);
107 }
108
109 /*
110 * Setup the page count before being freed into the page allocator for
111 * the first time (boot or memory hotplug)
112 */
init_page_count(struct page * page)113 static inline void init_page_count(struct page *page)
114 {
115 set_page_count(page, 1);
116 }
117
page_ref_add(struct page * page,int nr)118 static inline void page_ref_add(struct page *page, int nr)
119 {
120 atomic_add(nr, &page->_refcount);
121 if (page_ref_tracepoint_active(page_ref_mod))
122 __page_ref_mod(page, nr);
123 }
124
folio_ref_add(struct folio * folio,int nr)125 static inline void folio_ref_add(struct folio *folio, int nr)
126 {
127 page_ref_add(&folio->page, nr);
128 }
129
page_ref_sub(struct page * page,int nr)130 static inline void page_ref_sub(struct page *page, int nr)
131 {
132 atomic_sub(nr, &page->_refcount);
133 if (page_ref_tracepoint_active(page_ref_mod))
134 __page_ref_mod(page, -nr);
135 }
136
folio_ref_sub(struct folio * folio,int nr)137 static inline void folio_ref_sub(struct folio *folio, int nr)
138 {
139 page_ref_sub(&folio->page, nr);
140 }
141
folio_ref_sub_return(struct folio * folio,int nr)142 static inline int folio_ref_sub_return(struct folio *folio, int nr)
143 {
144 int ret = atomic_sub_return(nr, &folio->_refcount);
145
146 if (page_ref_tracepoint_active(page_ref_mod_and_return))
147 __page_ref_mod_and_return(&folio->page, -nr, ret);
148 return ret;
149 }
150
page_ref_inc(struct page * page)151 static inline void page_ref_inc(struct page *page)
152 {
153 atomic_inc(&page->_refcount);
154 if (page_ref_tracepoint_active(page_ref_mod))
155 __page_ref_mod(page, 1);
156 }
157
folio_ref_inc(struct folio * folio)158 static inline void folio_ref_inc(struct folio *folio)
159 {
160 page_ref_inc(&folio->page);
161 }
162
page_ref_dec(struct page * page)163 static inline void page_ref_dec(struct page *page)
164 {
165 atomic_dec(&page->_refcount);
166 if (page_ref_tracepoint_active(page_ref_mod))
167 __page_ref_mod(page, -1);
168 }
169
folio_ref_dec(struct folio * folio)170 static inline void folio_ref_dec(struct folio *folio)
171 {
172 page_ref_dec(&folio->page);
173 }
174
page_ref_sub_and_test(struct page * page,int nr)175 static inline int page_ref_sub_and_test(struct page *page, int nr)
176 {
177 int ret = atomic_sub_and_test(nr, &page->_refcount);
178
179 if (page_ref_tracepoint_active(page_ref_mod_and_test))
180 __page_ref_mod_and_test(page, -nr, ret);
181 return ret;
182 }
183
folio_ref_sub_and_test(struct folio * folio,int nr)184 static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
185 {
186 return page_ref_sub_and_test(&folio->page, nr);
187 }
188
page_ref_inc_return(struct page * page)189 static inline int page_ref_inc_return(struct page *page)
190 {
191 int ret = atomic_inc_return(&page->_refcount);
192
193 if (page_ref_tracepoint_active(page_ref_mod_and_return))
194 __page_ref_mod_and_return(page, 1, ret);
195 return ret;
196 }
197
folio_ref_inc_return(struct folio * folio)198 static inline int folio_ref_inc_return(struct folio *folio)
199 {
200 return page_ref_inc_return(&folio->page);
201 }
202
page_ref_dec_and_test(struct page * page)203 static inline int page_ref_dec_and_test(struct page *page)
204 {
205 int ret = atomic_dec_and_test(&page->_refcount);
206
207 if (page_ref_tracepoint_active(page_ref_mod_and_test))
208 __page_ref_mod_and_test(page, -1, ret);
209 return ret;
210 }
211
folio_ref_dec_and_test(struct folio * folio)212 static inline int folio_ref_dec_and_test(struct folio *folio)
213 {
214 return page_ref_dec_and_test(&folio->page);
215 }
216
page_ref_dec_return(struct page * page)217 static inline int page_ref_dec_return(struct page *page)
218 {
219 int ret = atomic_dec_return(&page->_refcount);
220
221 if (page_ref_tracepoint_active(page_ref_mod_and_return))
222 __page_ref_mod_and_return(page, -1, ret);
223 return ret;
224 }
225
folio_ref_dec_return(struct folio * folio)226 static inline int folio_ref_dec_return(struct folio *folio)
227 {
228 return page_ref_dec_return(&folio->page);
229 }
230
page_ref_add_unless(struct page * page,int nr,int u)231 static inline bool page_ref_add_unless(struct page *page, int nr, int u)
232 {
233 bool ret = atomic_add_unless(&page->_refcount, nr, u);
234
235 if (page_ref_tracepoint_active(page_ref_mod_unless))
236 __page_ref_mod_unless(page, nr, ret);
237 return ret;
238 }
239
folio_ref_add_unless(struct folio * folio,int nr,int u)240 static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
241 {
242 return page_ref_add_unless(&folio->page, nr, u);
243 }
244
245 /**
246 * folio_try_get - Attempt to increase the refcount on a folio.
247 * @folio: The folio.
248 *
249 * If you do not already have a reference to a folio, you can attempt to
250 * get one using this function. It may fail if, for example, the folio
251 * has been freed since you found a pointer to it, or it is frozen for
252 * the purposes of splitting or migration.
253 *
254 * Return: True if the reference count was successfully incremented.
255 */
folio_try_get(struct folio * folio)256 static inline bool folio_try_get(struct folio *folio)
257 {
258 return folio_ref_add_unless(folio, 1, 0);
259 }
260
folio_ref_try_add_rcu(struct folio * folio,int count)261 static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
262 {
263 #ifdef CONFIG_TINY_RCU
264 /*
265 * The caller guarantees the folio will not be freed from interrupt
266 * context, so (on !SMP) we only need preemption to be disabled
267 * and TINY_RCU does that for us.
268 */
269 # ifdef CONFIG_PREEMPT_COUNT
270 VM_BUG_ON(!in_atomic() && !irqs_disabled());
271 # endif
272 VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
273 folio_ref_add(folio, count);
274 #else
275 if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
276 /* Either the folio has been freed, or will be freed. */
277 return false;
278 }
279 #endif
280 return true;
281 }
282
283 /**
284 * folio_try_get_rcu - Attempt to increase the refcount on a folio.
285 * @folio: The folio.
286 *
287 * This is a version of folio_try_get() optimised for non-SMP kernels.
288 * If you are still holding the rcu_read_lock() after looking up the
289 * page and know that the page cannot have its refcount decreased to
290 * zero in interrupt context, you can use this instead of folio_try_get().
291 *
292 * Example users include get_user_pages_fast() (as pages are not unmapped
293 * from interrupt context) and the page cache lookups (as pages are not
294 * truncated from interrupt context). We also know that pages are not
295 * frozen in interrupt context for the purposes of splitting or migration.
296 *
297 * You can also use this function if you're holding a lock that prevents
298 * pages being frozen & removed; eg the i_pages lock for the page cache
299 * or the mmap_lock or page table lock for page tables. In this case,
300 * it will always succeed, and you could have used a plain folio_get(),
301 * but it's sometimes more convenient to have a common function called
302 * from both locked and RCU-protected contexts.
303 *
304 * Return: True if the reference count was successfully incremented.
305 */
folio_try_get_rcu(struct folio * folio)306 static inline bool folio_try_get_rcu(struct folio *folio)
307 {
308 return folio_ref_try_add_rcu(folio, 1);
309 }
310
page_ref_freeze(struct page * page,int count)311 static inline int page_ref_freeze(struct page *page, int count)
312 {
313 int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
314
315 if (page_ref_tracepoint_active(page_ref_freeze))
316 __page_ref_freeze(page, count, ret);
317 return ret;
318 }
319
folio_ref_freeze(struct folio * folio,int count)320 static inline int folio_ref_freeze(struct folio *folio, int count)
321 {
322 return page_ref_freeze(&folio->page, count);
323 }
324
page_ref_unfreeze(struct page * page,int count)325 static inline void page_ref_unfreeze(struct page *page, int count)
326 {
327 VM_BUG_ON_PAGE(page_count(page) != 0, page);
328 VM_BUG_ON(count == 0);
329
330 atomic_set_release(&page->_refcount, count);
331 if (page_ref_tracepoint_active(page_ref_unfreeze))
332 __page_ref_unfreeze(page, count);
333 }
334
folio_ref_unfreeze(struct folio * folio,int count)335 static inline void folio_ref_unfreeze(struct folio *folio, int count)
336 {
337 page_ref_unfreeze(&folio->page, count);
338 }
339 #endif
340