1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Macros for manipulating and testing page->flags
4 */
5
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16
17 /*
18 * Various page->flags bits:
19 *
20 * PG_reserved is set for special pages. The "struct page" of such a page
21 * should in general not be touched (e.g. set dirty) except by its owner.
22 * Pages marked as PG_reserved include:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24 * initrd, HW tables)
25 * - Pages reserved or allocated early during boot (before the page allocator
26 * was initialized). This includes (depending on the architecture) the
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 * much more. Once (if ever) freed, PG_reserved is cleared and they will
29 * be given to the page allocator.
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 * to read/write these pages might end badly. Don't touch!
32 * - The zero page(s)
33 * - Pages not added to the page allocator when onlining a section because
34 * they were excluded via the online_page_callback() or because they are
35 * PG_hwpoison.
36 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37 * control pages, vmcoreinfo)
38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39 * not marked PG_reserved (as they might be in use by somebody else who does
40 * not respect the caching strategy).
41 * - Pages part of an offline section (struct pages of offline sections should
42 * not be trusted as they will be initialized when first onlined).
43 * - MCA pages on ia64
44 * - Pages holding CPU notes for POWER Firmware Assisted Dump
45 * - Device memory (e.g. PMEM, DAX, HMM)
46 * Some PG_reserved pages will be excluded from the hibernation image.
47 * PG_reserved does in general not hinder anybody from dumping or swapping
48 * and is no longer required for remap_pfn_range(). ioremap might require it.
49 * Consequently, PG_reserved for a page mapped into user space can indicate
50 * the zero page, the vDSO, MMIO pages or device memory.
51 *
52 * The PG_private bitflag is set on pagecache pages if they contain filesystem
53 * specific data (which is normally at page->private). It can be used by
54 * private allocations for its own usage.
55 *
56 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58 * is set before writeback starts and cleared when it finishes.
59 *
60 * PG_locked also pins a page in pagecache, and blocks truncation of the file
61 * while it is held.
62 *
63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64 * to become unlocked.
65 *
66 * PG_swapbacked is set when a page uses swap as a backing storage. This are
67 * usually PageAnon or shmem pages but please note that even anonymous pages
68 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69 * a result of MADV_FREE).
70 *
71 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
72 * file-backed pagecache (see mm/vmscan.c).
73 *
74 * PG_error is set to indicate that an I/O error occurred on this page.
75 *
76 * PG_arch_1 is an architecture specific page state bit. The generic code
77 * guarantees that this bit is cleared for a page when it first is entered into
78 * the page cache.
79 *
80 * PG_hwpoison indicates that a page got corrupted in hardware and contains
81 * data with incorrect ECC bits that triggered a machine check. Accessing is
82 * not safe since it may cause another machine check. Don't touch!
83 */
84
85 /*
86 * Don't use the pageflags directly. Use the PageFoo macros.
87 *
88 * The page flags field is split into two parts, the main flags area
89 * which extends from the low bits upwards, and the fields area which
90 * extends from the high bits downwards.
91 *
92 * | FIELD | ... | FLAGS |
93 * N-1 ^ 0
94 * (NR_PAGEFLAGS)
95 *
96 * The fields area is reserved for fields mapping zone, node (for NUMA) and
97 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
98 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
99 */
100 enum pageflags {
101 PG_locked, /* Page is locked. Don't touch. */
102 PG_writeback, /* Page is under writeback */
103 PG_referenced,
104 PG_uptodate,
105 PG_dirty,
106 PG_lru,
107 PG_head, /* Must be in bit 6 */
108 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
109 PG_active,
110 PG_workingset,
111 PG_error,
112 PG_slab,
113 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
114 PG_arch_1,
115 PG_reserved,
116 PG_private, /* If pagecache, has fs-private data */
117 PG_private_2, /* If pagecache, has fs aux data */
118 PG_mappedtodisk, /* Has blocks allocated on-disk */
119 PG_reclaim, /* To be reclaimed asap */
120 PG_swapbacked, /* Page is backed by RAM/swap */
121 PG_unevictable, /* Page is "unevictable" */
122 #ifdef CONFIG_MMU
123 PG_mlocked, /* Page is vma mlocked */
124 #endif
125 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
126 PG_uncached, /* Page has been mapped as uncached */
127 #endif
128 #ifdef CONFIG_MEMORY_FAILURE
129 PG_hwpoison, /* hardware poisoned page. Don't touch */
130 #endif
131 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
132 PG_young,
133 PG_idle,
134 #endif
135 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
136 PG_arch_2,
137 PG_arch_3,
138 #endif
139 __NR_PAGEFLAGS,
140
141 PG_readahead = PG_reclaim,
142
143 /*
144 * Depending on the way an anonymous folio can be mapped into a page
145 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
146 * THP), PG_anon_exclusive may be set only for the head page or for
147 * tail pages of an anonymous folio. For now, we only expect it to be
148 * set on tail pages for PTE-mapped THP.
149 */
150 PG_anon_exclusive = PG_mappedtodisk,
151
152 /* Filesystems */
153 PG_checked = PG_owner_priv_1,
154
155 /* SwapBacked */
156 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
157
158 /* Two page bits are conscripted by FS-Cache to maintain local caching
159 * state. These bits are set on pages belonging to the netfs's inodes
160 * when those inodes are being locally cached.
161 */
162 PG_fscache = PG_private_2, /* page backed by cache */
163
164 /* XEN */
165 /* Pinned in Xen as a read-only pagetable page. */
166 PG_pinned = PG_owner_priv_1,
167 /* Pinned as part of domain save (see xen_mm_pin_all()). */
168 PG_savepinned = PG_dirty,
169 /* Has a grant mapping of another (foreign) domain's page. */
170 PG_foreign = PG_owner_priv_1,
171 /* Remapped by swiotlb-xen. */
172 PG_xen_remapped = PG_owner_priv_1,
173
174 /* non-lru isolated movable page */
175 PG_isolated = PG_reclaim,
176
177 /* Only valid for buddy pages. Used to track pages that are reported */
178 PG_reported = PG_uptodate,
179
180 #ifdef CONFIG_MEMORY_HOTPLUG
181 /* For self-hosted memmap pages */
182 PG_vmemmap_self_hosted = PG_owner_priv_1,
183 #endif
184
185 /*
186 * Flags only valid for compound pages. Stored in first tail page's
187 * flags word. Cannot use the first 8 flags or any flag marked as
188 * PF_ANY.
189 */
190
191 /* At least one page in this folio has the hwpoison flag set */
192 PG_has_hwpoisoned = PG_error,
193 PG_large_rmappable = PG_workingset, /* anon or file-backed */
194 };
195
196 #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
197
198 #ifndef __GENERATING_BOUNDS_H
199
200 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
201 DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
202
203 /*
204 * Return the real head page struct iff the @page is a fake head page, otherwise
205 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
206 */
page_fixed_fake_head(const struct page * page)207 static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
208 {
209 if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
210 return page;
211
212 /*
213 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
214 * struct page. The alignment check aims to avoid access the fields (
215 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
216 * cold cacheline in some cases.
217 */
218 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
219 test_bit(PG_head, &page->flags)) {
220 /*
221 * We can safely access the field of the @page[1] with PG_head
222 * because the @page is a compound page composed with at least
223 * two contiguous pages.
224 */
225 unsigned long head = READ_ONCE(page[1].compound_head);
226
227 if (likely(head & 1))
228 return (const struct page *)(head - 1);
229 }
230 return page;
231 }
232 #else
page_fixed_fake_head(const struct page * page)233 static inline const struct page *page_fixed_fake_head(const struct page *page)
234 {
235 return page;
236 }
237 #endif
238
page_is_fake_head(const struct page * page)239 static __always_inline int page_is_fake_head(const struct page *page)
240 {
241 return page_fixed_fake_head(page) != page;
242 }
243
_compound_head(const struct page * page)244 static inline unsigned long _compound_head(const struct page *page)
245 {
246 unsigned long head = READ_ONCE(page->compound_head);
247
248 if (unlikely(head & 1))
249 return head - 1;
250 return (unsigned long)page_fixed_fake_head(page);
251 }
252
253 #define compound_head(page) ((typeof(page))_compound_head(page))
254
255 /**
256 * page_folio - Converts from page to folio.
257 * @p: The page.
258 *
259 * Every page is part of a folio. This function cannot be called on a
260 * NULL pointer.
261 *
262 * Context: No reference, nor lock is required on @page. If the caller
263 * does not hold a reference, this call may race with a folio split, so
264 * it should re-check the folio still contains this page after gaining
265 * a reference on the folio.
266 * Return: The folio which contains this page.
267 */
268 #define page_folio(p) (_Generic((p), \
269 const struct page *: (const struct folio *)_compound_head(p), \
270 struct page *: (struct folio *)_compound_head(p)))
271
272 /**
273 * folio_page - Return a page from a folio.
274 * @folio: The folio.
275 * @n: The page number to return.
276 *
277 * @n is relative to the start of the folio. This function does not
278 * check that the page number lies within @folio; the caller is presumed
279 * to have a reference to the page.
280 */
281 #define folio_page(folio, n) nth_page(&(folio)->page, n)
282
PageTail(const struct page * page)283 static __always_inline int PageTail(const struct page *page)
284 {
285 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
286 }
287
PageCompound(const struct page * page)288 static __always_inline int PageCompound(const struct page *page)
289 {
290 return test_bit(PG_head, &page->flags) ||
291 READ_ONCE(page->compound_head) & 1;
292 }
293
294 #define PAGE_POISON_PATTERN -1l
PagePoisoned(const struct page * page)295 static inline int PagePoisoned(const struct page *page)
296 {
297 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
298 }
299
300 #ifdef CONFIG_DEBUG_VM
301 void page_init_poison(struct page *page, size_t size);
302 #else
page_init_poison(struct page * page,size_t size)303 static inline void page_init_poison(struct page *page, size_t size)
304 {
305 }
306 #endif
307
const_folio_flags(const struct folio * folio,unsigned n)308 static const unsigned long *const_folio_flags(const struct folio *folio,
309 unsigned n)
310 {
311 const struct page *page = &folio->page;
312
313 VM_BUG_ON_PGFLAGS(PageTail(page), page);
314 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
315 return &page[n].flags;
316 }
317
folio_flags(struct folio * folio,unsigned n)318 static unsigned long *folio_flags(struct folio *folio, unsigned n)
319 {
320 struct page *page = &folio->page;
321
322 VM_BUG_ON_PGFLAGS(PageTail(page), page);
323 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
324 return &page[n].flags;
325 }
326
327 /*
328 * Page flags policies wrt compound pages
329 *
330 * PF_POISONED_CHECK
331 * check if this struct page poisoned/uninitialized
332 *
333 * PF_ANY:
334 * the page flag is relevant for small, head and tail pages.
335 *
336 * PF_HEAD:
337 * for compound page all operations related to the page flag applied to
338 * head page.
339 *
340 * PF_NO_TAIL:
341 * modifications of the page flag must be done on small or head pages,
342 * checks can be done on tail pages too.
343 *
344 * PF_NO_COMPOUND:
345 * the page flag is not relevant for compound pages.
346 *
347 * PF_SECOND:
348 * the page flag is stored in the first tail page.
349 */
350 #define PF_POISONED_CHECK(page) ({ \
351 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
352 page; })
353 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
354 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
355 #define PF_NO_TAIL(page, enforce) ({ \
356 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
357 PF_POISONED_CHECK(compound_head(page)); })
358 #define PF_NO_COMPOUND(page, enforce) ({ \
359 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
360 PF_POISONED_CHECK(page); })
361 #define PF_SECOND(page, enforce) ({ \
362 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
363 PF_POISONED_CHECK(&page[1]); })
364
365 /* Which page is the flag stored in */
366 #define FOLIO_PF_ANY 0
367 #define FOLIO_PF_HEAD 0
368 #define FOLIO_PF_NO_TAIL 0
369 #define FOLIO_PF_NO_COMPOUND 0
370 #define FOLIO_PF_SECOND 1
371
372 #define FOLIO_HEAD_PAGE 0
373 #define FOLIO_SECOND_PAGE 1
374
375 /*
376 * Macros to create function definitions for page flags
377 */
378 #define FOLIO_TEST_FLAG(name, page) \
379 static __always_inline bool folio_test_##name(const struct folio *folio) \
380 { return test_bit(PG_##name, const_folio_flags(folio, page)); }
381
382 #define FOLIO_SET_FLAG(name, page) \
383 static __always_inline void folio_set_##name(struct folio *folio) \
384 { set_bit(PG_##name, folio_flags(folio, page)); }
385
386 #define FOLIO_CLEAR_FLAG(name, page) \
387 static __always_inline void folio_clear_##name(struct folio *folio) \
388 { clear_bit(PG_##name, folio_flags(folio, page)); }
389
390 #define __FOLIO_SET_FLAG(name, page) \
391 static __always_inline void __folio_set_##name(struct folio *folio) \
392 { __set_bit(PG_##name, folio_flags(folio, page)); }
393
394 #define __FOLIO_CLEAR_FLAG(name, page) \
395 static __always_inline void __folio_clear_##name(struct folio *folio) \
396 { __clear_bit(PG_##name, folio_flags(folio, page)); }
397
398 #define FOLIO_TEST_SET_FLAG(name, page) \
399 static __always_inline bool folio_test_set_##name(struct folio *folio) \
400 { return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
401
402 #define FOLIO_TEST_CLEAR_FLAG(name, page) \
403 static __always_inline bool folio_test_clear_##name(struct folio *folio) \
404 { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
405
406 #define FOLIO_FLAG(name, page) \
407 FOLIO_TEST_FLAG(name, page) \
408 FOLIO_SET_FLAG(name, page) \
409 FOLIO_CLEAR_FLAG(name, page)
410
411 #define TESTPAGEFLAG(uname, lname, policy) \
412 FOLIO_TEST_FLAG(lname, FOLIO_##policy) \
413 static __always_inline int Page##uname(const struct page *page) \
414 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
415
416 #define SETPAGEFLAG(uname, lname, policy) \
417 FOLIO_SET_FLAG(lname, FOLIO_##policy) \
418 static __always_inline void SetPage##uname(struct page *page) \
419 { set_bit(PG_##lname, &policy(page, 1)->flags); }
420
421 #define CLEARPAGEFLAG(uname, lname, policy) \
422 FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
423 static __always_inline void ClearPage##uname(struct page *page) \
424 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
425
426 #define __SETPAGEFLAG(uname, lname, policy) \
427 __FOLIO_SET_FLAG(lname, FOLIO_##policy) \
428 static __always_inline void __SetPage##uname(struct page *page) \
429 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
430
431 #define __CLEARPAGEFLAG(uname, lname, policy) \
432 __FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
433 static __always_inline void __ClearPage##uname(struct page *page) \
434 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
435
436 #define TESTSETFLAG(uname, lname, policy) \
437 FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \
438 static __always_inline int TestSetPage##uname(struct page *page) \
439 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
440
441 #define TESTCLEARFLAG(uname, lname, policy) \
442 FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \
443 static __always_inline int TestClearPage##uname(struct page *page) \
444 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
445
446 #define PAGEFLAG(uname, lname, policy) \
447 TESTPAGEFLAG(uname, lname, policy) \
448 SETPAGEFLAG(uname, lname, policy) \
449 CLEARPAGEFLAG(uname, lname, policy)
450
451 #define __PAGEFLAG(uname, lname, policy) \
452 TESTPAGEFLAG(uname, lname, policy) \
453 __SETPAGEFLAG(uname, lname, policy) \
454 __CLEARPAGEFLAG(uname, lname, policy)
455
456 #define TESTSCFLAG(uname, lname, policy) \
457 TESTSETFLAG(uname, lname, policy) \
458 TESTCLEARFLAG(uname, lname, policy)
459
460 #define FOLIO_TEST_FLAG_FALSE(name) \
461 static inline bool folio_test_##name(const struct folio *folio) \
462 { return false; }
463 #define FOLIO_SET_FLAG_NOOP(name) \
464 static inline void folio_set_##name(struct folio *folio) { }
465 #define FOLIO_CLEAR_FLAG_NOOP(name) \
466 static inline void folio_clear_##name(struct folio *folio) { }
467 #define __FOLIO_SET_FLAG_NOOP(name) \
468 static inline void __folio_set_##name(struct folio *folio) { }
469 #define __FOLIO_CLEAR_FLAG_NOOP(name) \
470 static inline void __folio_clear_##name(struct folio *folio) { }
471 #define FOLIO_TEST_SET_FLAG_FALSE(name) \
472 static inline bool folio_test_set_##name(struct folio *folio) \
473 { return false; }
474 #define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \
475 static inline bool folio_test_clear_##name(struct folio *folio) \
476 { return false; }
477
478 #define FOLIO_FLAG_FALSE(name) \
479 FOLIO_TEST_FLAG_FALSE(name) \
480 FOLIO_SET_FLAG_NOOP(name) \
481 FOLIO_CLEAR_FLAG_NOOP(name)
482
483 #define TESTPAGEFLAG_FALSE(uname, lname) \
484 FOLIO_TEST_FLAG_FALSE(lname) \
485 static inline int Page##uname(const struct page *page) { return 0; }
486
487 #define SETPAGEFLAG_NOOP(uname, lname) \
488 FOLIO_SET_FLAG_NOOP(lname) \
489 static inline void SetPage##uname(struct page *page) { }
490
491 #define CLEARPAGEFLAG_NOOP(uname, lname) \
492 FOLIO_CLEAR_FLAG_NOOP(lname) \
493 static inline void ClearPage##uname(struct page *page) { }
494
495 #define __CLEARPAGEFLAG_NOOP(uname, lname) \
496 __FOLIO_CLEAR_FLAG_NOOP(lname) \
497 static inline void __ClearPage##uname(struct page *page) { }
498
499 #define TESTSETFLAG_FALSE(uname, lname) \
500 FOLIO_TEST_SET_FLAG_FALSE(lname) \
501 static inline int TestSetPage##uname(struct page *page) { return 0; }
502
503 #define TESTCLEARFLAG_FALSE(uname, lname) \
504 FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \
505 static inline int TestClearPage##uname(struct page *page) { return 0; }
506
507 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
508 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
509
510 #define TESTSCFLAG_FALSE(uname, lname) \
511 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
512
513 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
514 FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
515 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
516 PAGEFLAG(Referenced, referenced, PF_HEAD)
517 TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
518 __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
519 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
520 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
521 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
522 TESTCLEARFLAG(LRU, lru, PF_HEAD)
523 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
524 TESTCLEARFLAG(Active, active, PF_HEAD)
525 PAGEFLAG(Workingset, workingset, PF_HEAD)
526 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
527 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
528 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
529
530 /* Xen */
531 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
532 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
533 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
534 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(XenRemapped,xen_remapped,PF_NO_COMPOUND)535 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
536 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
537
538 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
539 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
540 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
541 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
542 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
543 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
544
545 /*
546 * Private page markings that may be used by the filesystem that owns the page
547 * for its own purposes.
548 * - PG_private and PG_private_2 cause release_folio() and co to be invoked
549 */
550 PAGEFLAG(Private, private, PF_ANY)
551 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
552 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
553 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
554
555 /*
556 * Only test-and-set exist for PG_writeback. The unconditional operators are
557 * risky: they bypass page accounting.
558 */
559 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
560 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
561 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
562
563 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
564 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
565 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
566 PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
567 TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
568
569 #ifdef CONFIG_HIGHMEM
570 /*
571 * Must use a macro here due to header dependency issues. page_zone() is not
572 * available at this point.
573 */
574 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
575 #define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f))
576 #else
577 PAGEFLAG_FALSE(HighMem, highmem)
578 #endif
579
580 #ifdef CONFIG_SWAP
581 static __always_inline bool folio_test_swapcache(const struct folio *folio)
582 {
583 return folio_test_swapbacked(folio) &&
584 test_bit(PG_swapcache, const_folio_flags(folio, 0));
585 }
586
PageSwapCache(const struct page * page)587 static __always_inline bool PageSwapCache(const struct page *page)
588 {
589 return folio_test_swapcache(page_folio(page));
590 }
591
592 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
593 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
594 #else
595 PAGEFLAG_FALSE(SwapCache, swapcache)
596 #endif
597
598 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
599 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
600 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
601
602 #ifdef CONFIG_MMU
603 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
604 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
605 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
606 #else
607 PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
608 TESTSCFLAG_FALSE(Mlocked, mlocked)
609 #endif
610
611 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
612 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
613 #else
614 PAGEFLAG_FALSE(Uncached, uncached)
615 #endif
616
617 #ifdef CONFIG_MEMORY_FAILURE
618 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
619 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
620 #define __PG_HWPOISON (1UL << PG_hwpoison)
621 #define MAGIC_HWPOISON 0x48575053U /* HWPS */
622 extern void SetPageHWPoisonTakenOff(struct page *page);
623 extern void ClearPageHWPoisonTakenOff(struct page *page);
624 extern bool take_page_off_buddy(struct page *page);
625 extern bool put_page_back_buddy(struct page *page);
626 #else
627 PAGEFLAG_FALSE(HWPoison, hwpoison)
628 #define __PG_HWPOISON 0
629 #endif
630
631 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
FOLIO_TEST_FLAG(young,FOLIO_HEAD_PAGE)632 FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE)
633 FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE)
634 FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE)
635 FOLIO_FLAG(idle, FOLIO_HEAD_PAGE)
636 #endif
637
638 /*
639 * PageReported() is used to track reported free pages within the Buddy
640 * allocator. We can use the non-atomic version of the test and set
641 * operations as both should be shielded with the zone lock to prevent
642 * any possible races on the setting or clearing of the bit.
643 */
644 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
645
646 #ifdef CONFIG_MEMORY_HOTPLUG
647 PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
648 #else
649 PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
650 #endif
651
652 /*
653 * On an anonymous page mapped into a user virtual memory area,
654 * page->mapping points to its anon_vma, not to a struct address_space;
655 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
656 *
657 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
658 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
659 * bit; and then page->mapping points, not to an anon_vma, but to a private
660 * structure which KSM associates with that merged page. See ksm.h.
661 *
662 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
663 * page and then page->mapping points to a struct movable_operations.
664 *
665 * Please note that, confusingly, "page_mapping" refers to the inode
666 * address_space which maps the page from disk; whereas "page_mapped"
667 * refers to user virtual address space into which the page is mapped.
668 *
669 * For slab pages, since slab reuses the bits in struct page to store its
670 * internal states, the page->mapping does not exist as such, nor do these
671 * flags below. So in order to avoid testing non-existent bits, please
672 * make sure that PageSlab(page) actually evaluates to false before calling
673 * the following functions (e.g., PageAnon). See mm/slab.h.
674 */
675 #define PAGE_MAPPING_ANON 0x1
676 #define PAGE_MAPPING_MOVABLE 0x2
677 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
678 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
679
680 /*
681 * Different with flags above, this flag is used only for fsdax mode. It
682 * indicates that this page->mapping is now under reflink case.
683 */
684 #define PAGE_MAPPING_DAX_SHARED ((void *)0x1)
685
686 static __always_inline bool folio_mapping_flags(const struct folio *folio)
687 {
688 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
689 }
690
PageMappingFlags(const struct page * page)691 static __always_inline int PageMappingFlags(const struct page *page)
692 {
693 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
694 }
695
folio_test_anon(const struct folio * folio)696 static __always_inline bool folio_test_anon(const struct folio *folio)
697 {
698 return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
699 }
700
PageAnon(const struct page * page)701 static __always_inline bool PageAnon(const struct page *page)
702 {
703 return folio_test_anon(page_folio(page));
704 }
705
__folio_test_movable(const struct folio * folio)706 static __always_inline bool __folio_test_movable(const struct folio *folio)
707 {
708 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
709 PAGE_MAPPING_MOVABLE;
710 }
711
__PageMovable(const struct page * page)712 static __always_inline int __PageMovable(const struct page *page)
713 {
714 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
715 PAGE_MAPPING_MOVABLE;
716 }
717
718 #ifdef CONFIG_KSM
719 /*
720 * A KSM page is one of those write-protected "shared pages" or "merged pages"
721 * which KSM maps into multiple mms, wherever identical anonymous page content
722 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
723 * anon_vma, but to that page's node of the stable tree.
724 */
folio_test_ksm(const struct folio * folio)725 static __always_inline bool folio_test_ksm(const struct folio *folio)
726 {
727 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
728 PAGE_MAPPING_KSM;
729 }
730
PageKsm(const struct page * page)731 static __always_inline bool PageKsm(const struct page *page)
732 {
733 return folio_test_ksm(page_folio(page));
734 }
735 #else
736 TESTPAGEFLAG_FALSE(Ksm, ksm)
737 #endif
738
739 u64 stable_page_flags(struct page *page);
740
741 /**
742 * folio_xor_flags_has_waiters - Change some folio flags.
743 * @folio: The folio.
744 * @mask: Bits set in this word will be changed.
745 *
746 * This must only be used for flags which are changed with the folio
747 * lock held. For example, it is unsafe to use for PG_dirty as that
748 * can be set without the folio lock held. It can also only be used
749 * on flags which are in the range 0-6 as some of the implementations
750 * only affect those bits.
751 *
752 * Return: Whether there are tasks waiting on the folio.
753 */
folio_xor_flags_has_waiters(struct folio * folio,unsigned long mask)754 static inline bool folio_xor_flags_has_waiters(struct folio *folio,
755 unsigned long mask)
756 {
757 return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0));
758 }
759
760 /**
761 * folio_test_uptodate - Is this folio up to date?
762 * @folio: The folio.
763 *
764 * The uptodate flag is set on a folio when every byte in the folio is
765 * at least as new as the corresponding bytes on storage. Anonymous
766 * and CoW folios are always uptodate. If the folio is not uptodate,
767 * some of the bytes in it may be; see the is_partially_uptodate()
768 * address_space operation.
769 */
folio_test_uptodate(const struct folio * folio)770 static inline bool folio_test_uptodate(const struct folio *folio)
771 {
772 bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0));
773 /*
774 * Must ensure that the data we read out of the folio is loaded
775 * _after_ we've loaded folio->flags to check the uptodate bit.
776 * We can skip the barrier if the folio is not uptodate, because
777 * we wouldn't be reading anything from it.
778 *
779 * See folio_mark_uptodate() for the other side of the story.
780 */
781 if (ret)
782 smp_rmb();
783
784 return ret;
785 }
786
PageUptodate(const struct page * page)787 static inline int PageUptodate(const struct page *page)
788 {
789 return folio_test_uptodate(page_folio(page));
790 }
791
__folio_mark_uptodate(struct folio * folio)792 static __always_inline void __folio_mark_uptodate(struct folio *folio)
793 {
794 smp_wmb();
795 __set_bit(PG_uptodate, folio_flags(folio, 0));
796 }
797
folio_mark_uptodate(struct folio * folio)798 static __always_inline void folio_mark_uptodate(struct folio *folio)
799 {
800 /*
801 * Memory barrier must be issued before setting the PG_uptodate bit,
802 * so that all previous stores issued in order to bring the folio
803 * uptodate are actually visible before folio_test_uptodate becomes true.
804 */
805 smp_wmb();
806 set_bit(PG_uptodate, folio_flags(folio, 0));
807 }
808
__SetPageUptodate(struct page * page)809 static __always_inline void __SetPageUptodate(struct page *page)
810 {
811 __folio_mark_uptodate((struct folio *)page);
812 }
813
SetPageUptodate(struct page * page)814 static __always_inline void SetPageUptodate(struct page *page)
815 {
816 folio_mark_uptodate((struct folio *)page);
817 }
818
819 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
820
821 void __folio_start_writeback(struct folio *folio, bool keep_write);
822 void set_page_writeback(struct page *page);
823
824 #define folio_start_writeback(folio) \
825 __folio_start_writeback(folio, false)
826 #define folio_start_writeback_keepwrite(folio) \
827 __folio_start_writeback(folio, true)
828
folio_test_head(const struct folio * folio)829 static __always_inline bool folio_test_head(const struct folio *folio)
830 {
831 return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY));
832 }
833
PageHead(const struct page * page)834 static __always_inline int PageHead(const struct page *page)
835 {
836 PF_POISONED_CHECK(page);
837 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
838 }
839
__SETPAGEFLAG(Head,head,PF_ANY)840 __SETPAGEFLAG(Head, head, PF_ANY)
841 __CLEARPAGEFLAG(Head, head, PF_ANY)
842 CLEARPAGEFLAG(Head, head, PF_ANY)
843
844 /**
845 * folio_test_large() - Does this folio contain more than one page?
846 * @folio: The folio to test.
847 *
848 * Return: True if the folio is larger than one page.
849 */
850 static inline bool folio_test_large(const struct folio *folio)
851 {
852 return folio_test_head(folio);
853 }
854
set_compound_head(struct page * page,struct page * head)855 static __always_inline void set_compound_head(struct page *page, struct page *head)
856 {
857 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
858 }
859
clear_compound_head(struct page * page)860 static __always_inline void clear_compound_head(struct page *page)
861 {
862 WRITE_ONCE(page->compound_head, 0);
863 }
864
865 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ClearPageCompound(struct page * page)866 static inline void ClearPageCompound(struct page *page)
867 {
868 BUG_ON(!PageHead(page));
869 ClearPageHead(page);
870 }
PAGEFLAG(LargeRmappable,large_rmappable,PF_SECOND)871 PAGEFLAG(LargeRmappable, large_rmappable, PF_SECOND)
872 #else
873 TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
874 #endif
875
876 #define PG_head_mask ((1UL << PG_head))
877
878 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
879 /*
880 * PageHuge() only returns true for hugetlbfs pages, but not for
881 * normal or transparent huge pages.
882 *
883 * PageTransHuge() returns true for both transparent huge and
884 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
885 * called only in the core VM paths where hugetlbfs pages can't exist.
886 */
887 static inline int PageTransHuge(const struct page *page)
888 {
889 VM_BUG_ON_PAGE(PageTail(page), page);
890 return PageHead(page);
891 }
892
893 /*
894 * PageTransCompound returns true for both transparent huge pages
895 * and hugetlbfs pages, so it should only be called when it's known
896 * that hugetlbfs pages aren't involved.
897 */
PageTransCompound(const struct page * page)898 static inline int PageTransCompound(const struct page *page)
899 {
900 return PageCompound(page);
901 }
902
903 /*
904 * PageTransTail returns true for both transparent huge pages
905 * and hugetlbfs pages, so it should only be called when it's known
906 * that hugetlbfs pages aren't involved.
907 */
PageTransTail(const struct page * page)908 static inline int PageTransTail(const struct page *page)
909 {
910 return PageTail(page);
911 }
912 #else
913 TESTPAGEFLAG_FALSE(TransHuge, transhuge)
914 TESTPAGEFLAG_FALSE(TransCompound, transcompound)
915 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
916 TESTPAGEFLAG_FALSE(TransTail, transtail)
917 #endif
918
919 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
920 /*
921 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
922 * compound page.
923 *
924 * This flag is set by hwpoison handler. Cleared by THP split or free page.
925 */
PAGEFLAG(HasHWPoisoned,has_hwpoisoned,PF_SECOND)926 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
927 TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
928 #else
929 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
930 TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
931 #endif
932
933 /*
934 * For pages that are never mapped to userspace (and aren't PageSlab),
935 * page_type may be used. Because it is initialised to -1, we invert the
936 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
937 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
938 * low bits so that an underflow or overflow of _mapcount won't be
939 * mistaken for a page type value.
940 */
941
942 #define PAGE_TYPE_BASE 0xf0000000
943 /* Reserve 0x0000007f to catch underflows of _mapcount */
944 #define PAGE_MAPCOUNT_RESERVE -128
945 #define PG_buddy 0x00000080
946 #define PG_offline 0x00000100
947 #define PG_table 0x00000200
948 #define PG_guard 0x00000400
949 #define PG_hugetlb 0x00000800
950
951 #define PageType(page, flag) \
952 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
953 #define folio_test_type(folio, flag) \
954 ((folio->page.page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
955
956 static inline int page_type_has_type(unsigned int page_type)
957 {
958 return (int)page_type < PAGE_MAPCOUNT_RESERVE;
959 }
960
page_has_type(const struct page * page)961 static inline int page_has_type(const struct page *page)
962 {
963 return page_type_has_type(page->page_type);
964 }
965
966 #define FOLIO_TYPE_OPS(lname, fname) \
967 static __always_inline bool folio_test_##fname(const struct folio *folio)\
968 { \
969 return folio_test_type(folio, PG_##lname); \
970 } \
971 static __always_inline void __folio_set_##fname(struct folio *folio) \
972 { \
973 VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
974 folio->page.page_type &= ~PG_##lname; \
975 } \
976 static __always_inline void __folio_clear_##fname(struct folio *folio) \
977 { \
978 VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
979 folio->page.page_type |= PG_##lname; \
980 }
981
982 #define PAGE_TYPE_OPS(uname, lname, fname) \
983 FOLIO_TYPE_OPS(lname, fname) \
984 static __always_inline int Page##uname(const struct page *page) \
985 { \
986 return PageType(page, PG_##lname); \
987 } \
988 static __always_inline void __SetPage##uname(struct page *page) \
989 { \
990 VM_BUG_ON_PAGE(!PageType(page, 0), page); \
991 page->page_type &= ~PG_##lname; \
992 } \
993 static __always_inline void __ClearPage##uname(struct page *page) \
994 { \
995 VM_BUG_ON_PAGE(!Page##uname(page), page); \
996 page->page_type |= PG_##lname; \
997 }
998
999 /*
1000 * PageBuddy() indicates that the page is free and in the buddy system
1001 * (see mm/page_alloc.c).
1002 */
1003 PAGE_TYPE_OPS(Buddy, buddy, buddy)
1004
1005 /*
1006 * PageOffline() indicates that the page is logically offline although the
1007 * containing section is online. (e.g. inflated in a balloon driver or
1008 * not onlined when onlining the section).
1009 * The content of these pages is effectively stale. Such pages should not
1010 * be touched (read/write/dump/save) except by their owner.
1011 *
1012 * If a driver wants to allow to offline unmovable PageOffline() pages without
1013 * putting them back to the buddy, it can do so via the memory notifier by
1014 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
1015 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
1016 * pages (now with a reference count of zero) are treated like free pages,
1017 * allowing the containing memory block to get offlined. A driver that
1018 * relies on this feature is aware that re-onlining the memory block will
1019 * require to re-set the pages PageOffline() and not giving them to the
1020 * buddy via online_page_callback_t.
1021 *
1022 * There are drivers that mark a page PageOffline() and expect there won't be
1023 * any further access to page content. PFN walkers that read content of random
1024 * pages should check PageOffline() and synchronize with such drivers using
1025 * page_offline_freeze()/page_offline_thaw().
1026 */
1027 PAGE_TYPE_OPS(Offline, offline, offline)
1028
1029 extern void page_offline_freeze(void);
1030 extern void page_offline_thaw(void);
1031 extern void page_offline_begin(void);
1032 extern void page_offline_end(void);
1033
1034 /*
1035 * Marks pages in use as page tables.
1036 */
PAGE_TYPE_OPS(Table,table,pgtable)1037 PAGE_TYPE_OPS(Table, table, pgtable)
1038
1039 /*
1040 * Marks guardpages used with debug_pagealloc.
1041 */
1042 PAGE_TYPE_OPS(Guard, guard, guard)
1043
1044 #ifdef CONFIG_HUGETLB_PAGE
1045 FOLIO_TYPE_OPS(hugetlb, hugetlb)
1046 #else
1047 FOLIO_TEST_FLAG_FALSE(hugetlb)
1048 #endif
1049
1050 /**
1051 * PageHuge - Determine if the page belongs to hugetlbfs
1052 * @page: The page to test.
1053 *
1054 * Context: Any context.
1055 * Return: True for hugetlbfs pages, false for anon pages or pages
1056 * belonging to other filesystems.
1057 */
1058 static inline bool PageHuge(const struct page *page)
1059 {
1060 return folio_test_hugetlb(page_folio(page));
1061 }
1062
1063 /*
1064 * Check if a page is currently marked HWPoisoned. Note that this check is
1065 * best effort only and inherently racy: there is no way to synchronize with
1066 * failing hardware.
1067 */
is_page_hwpoison(struct page * page)1068 static inline bool is_page_hwpoison(struct page *page)
1069 {
1070 if (PageHWPoison(page))
1071 return true;
1072 return PageHuge(page) && PageHWPoison(compound_head(page));
1073 }
1074
1075 extern bool is_free_buddy_page(struct page *page);
1076
1077 PAGEFLAG(Isolated, isolated, PF_ANY);
1078
PageAnonExclusive(const struct page * page)1079 static __always_inline int PageAnonExclusive(const struct page *page)
1080 {
1081 VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1082 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1083 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1084 }
1085
SetPageAnonExclusive(struct page * page)1086 static __always_inline void SetPageAnonExclusive(struct page *page)
1087 {
1088 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1089 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1090 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1091 }
1092
ClearPageAnonExclusive(struct page * page)1093 static __always_inline void ClearPageAnonExclusive(struct page *page)
1094 {
1095 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1096 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1097 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1098 }
1099
__ClearPageAnonExclusive(struct page * page)1100 static __always_inline void __ClearPageAnonExclusive(struct page *page)
1101 {
1102 VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1103 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1104 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1105 }
1106
1107 #ifdef CONFIG_MMU
1108 #define __PG_MLOCKED (1UL << PG_mlocked)
1109 #else
1110 #define __PG_MLOCKED 0
1111 #endif
1112
1113 /*
1114 * Flags checked when a page is freed. Pages being freed should not have
1115 * these flags set. If they are, there is a problem.
1116 */
1117 #define PAGE_FLAGS_CHECK_AT_FREE \
1118 (1UL << PG_lru | 1UL << PG_locked | \
1119 1UL << PG_private | 1UL << PG_private_2 | \
1120 1UL << PG_writeback | 1UL << PG_reserved | \
1121 1UL << PG_slab | 1UL << PG_active | \
1122 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK)
1123
1124 /*
1125 * Flags checked when a page is prepped for return by the page allocator.
1126 * Pages being prepped should not have these flags set. If they are set,
1127 * there has been a kernel bug or struct page corruption.
1128 *
1129 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1130 * alloc-free cycle to prevent from reusing the page.
1131 */
1132 #define PAGE_FLAGS_CHECK_AT_PREP \
1133 ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
1134
1135 /*
1136 * Flags stored in the second page of a compound page. They may overlap
1137 * the CHECK_AT_FREE flags above, so need to be cleared.
1138 */
1139 #define PAGE_FLAGS_SECOND \
1140 (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
1141 1UL << PG_large_rmappable)
1142
1143 #define PAGE_FLAGS_PRIVATE \
1144 (1UL << PG_private | 1UL << PG_private_2)
1145 /**
1146 * page_has_private - Determine if page has private stuff
1147 * @page: The page to be checked
1148 *
1149 * Determine if a page has private stuff, indicating that release routines
1150 * should be invoked upon it.
1151 */
page_has_private(const struct page * page)1152 static inline int page_has_private(const struct page *page)
1153 {
1154 return !!(page->flags & PAGE_FLAGS_PRIVATE);
1155 }
1156
folio_has_private(const struct folio * folio)1157 static inline bool folio_has_private(const struct folio *folio)
1158 {
1159 return page_has_private(&folio->page);
1160 }
1161
1162 #undef PF_ANY
1163 #undef PF_HEAD
1164 #undef PF_NO_TAIL
1165 #undef PF_NO_COMPOUND
1166 #undef PF_SECOND
1167 #endif /* !__GENERATING_BOUNDS_H */
1168
1169 #endif /* PAGE_FLAGS_H */
1170