1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Macros for manipulating and testing page->flags
4 */
5
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16
17 /*
18 * Various page->flags bits:
19 *
20 * PG_reserved is set for special pages. The "struct page" of such a page
21 * should in general not be touched (e.g. set dirty) except by its owner.
22 * Pages marked as PG_reserved include:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24 * initrd, HW tables)
25 * - Pages reserved or allocated early during boot (before the page allocator
26 * was initialized). This includes (depending on the architecture) the
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 * much more. Once (if ever) freed, PG_reserved is cleared and they will
29 * be given to the page allocator.
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 * to read/write these pages might end badly. Don't touch!
32 * - The zero page(s)
33 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
34 * control pages, vmcoreinfo)
35 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
36 * not marked PG_reserved (as they might be in use by somebody else who does
37 * not respect the caching strategy).
38 * - MCA pages on ia64
39 * - Pages holding CPU notes for POWER Firmware Assisted Dump
40 * - Device memory (e.g. PMEM, DAX, HMM)
41 * Some PG_reserved pages will be excluded from the hibernation image.
42 * PG_reserved does in general not hinder anybody from dumping or swapping
43 * and is no longer required for remap_pfn_range(). ioremap might require it.
44 * Consequently, PG_reserved for a page mapped into user space can indicate
45 * the zero page, the vDSO, MMIO pages or device memory.
46 *
47 * The PG_private bitflag is set on pagecache pages if they contain filesystem
48 * specific data (which is normally at page->private). It can be used by
49 * private allocations for its own usage.
50 *
51 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
52 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
53 * is set before writeback starts and cleared when it finishes.
54 *
55 * PG_locked also pins a page in pagecache, and blocks truncation of the file
56 * while it is held.
57 *
58 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
59 * to become unlocked.
60 *
61 * PG_swapbacked is set when a page uses swap as a backing storage. This are
62 * usually PageAnon or shmem pages but please note that even anonymous pages
63 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
64 * a result of MADV_FREE).
65 *
66 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
67 * file-backed pagecache (see mm/vmscan.c).
68 *
69 * PG_arch_1 is an architecture specific page state bit. The generic code
70 * guarantees that this bit is cleared for a page when it first is entered into
71 * the page cache.
72 *
73 * PG_hwpoison indicates that a page got corrupted in hardware and contains
74 * data with incorrect ECC bits that triggered a machine check. Accessing is
75 * not safe since it may cause another machine check. Don't touch!
76 */
77
78 /*
79 * Don't use the pageflags directly. Use the PageFoo macros.
80 *
81 * The page flags field is split into two parts, the main flags area
82 * which extends from the low bits upwards, and the fields area which
83 * extends from the high bits downwards.
84 *
85 * | FIELD | ... | FLAGS |
86 * N-1 ^ 0
87 * (NR_PAGEFLAGS)
88 *
89 * The fields area is reserved for fields mapping zone, node (for NUMA) and
90 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
91 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
92 */
93 enum pageflags {
94 PG_locked, /* Page is locked. Don't touch. */
95 PG_writeback, /* Page is under writeback */
96 PG_referenced,
97 PG_uptodate,
98 PG_dirty,
99 PG_lru,
100 PG_head, /* Must be in bit 6 */
101 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
102 PG_active,
103 PG_workingset,
104 PG_owner_priv_1, /* Owner use. If pagecache, fs may use */
105 PG_owner_2, /* Owner use. If pagecache, fs may use */
106 PG_arch_1,
107 PG_reserved,
108 PG_private, /* If pagecache, has fs-private data */
109 PG_private_2, /* If pagecache, has fs aux data */
110 PG_reclaim, /* To be reclaimed asap */
111 PG_swapbacked, /* Page is backed by RAM/swap */
112 PG_unevictable, /* Page is "unevictable" */
113 #ifdef CONFIG_MMU
114 PG_mlocked, /* Page is vma mlocked */
115 #endif
116 #ifdef CONFIG_MEMORY_FAILURE
117 PG_hwpoison, /* hardware poisoned page. Don't touch */
118 #endif
119 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
120 PG_young,
121 PG_idle,
122 #endif
123 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
124 PG_arch_2,
125 #endif
126 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
127 PG_arch_3,
128 #endif
129 __NR_PAGEFLAGS,
130
131 PG_readahead = PG_reclaim,
132
133 /* Anonymous memory (and shmem) */
134 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
135 /* Some filesystems */
136 PG_checked = PG_owner_priv_1,
137
138 /*
139 * Depending on the way an anonymous folio can be mapped into a page
140 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
141 * THP), PG_anon_exclusive may be set only for the head page or for
142 * tail pages of an anonymous folio. For now, we only expect it to be
143 * set on tail pages for PTE-mapped THP.
144 */
145 PG_anon_exclusive = PG_owner_2,
146
147 /*
148 * Set if all buffer heads in the folio are mapped.
149 * Filesystems which do not use BHs can use it for their own purpose.
150 */
151 PG_mappedtodisk = PG_owner_2,
152
153 /* Two page bits are conscripted by FS-Cache to maintain local caching
154 * state. These bits are set on pages belonging to the netfs's inodes
155 * when those inodes are being locally cached.
156 */
157 PG_fscache = PG_private_2, /* page backed by cache */
158
159 /* XEN */
160 /* Pinned in Xen as a read-only pagetable page. */
161 PG_pinned = PG_owner_priv_1,
162 /* Pinned as part of domain save (see xen_mm_pin_all()). */
163 PG_savepinned = PG_dirty,
164 /* Has a grant mapping of another (foreign) domain's page. */
165 PG_foreign = PG_owner_priv_1,
166 /* Remapped by swiotlb-xen. */
167 PG_xen_remapped = PG_owner_priv_1,
168
169 /* non-lru isolated movable page */
170 PG_isolated = PG_reclaim,
171
172 /* Only valid for buddy pages. Used to track pages that are reported */
173 PG_reported = PG_uptodate,
174
175 #ifdef CONFIG_MEMORY_HOTPLUG
176 /* For self-hosted memmap pages */
177 PG_vmemmap_self_hosted = PG_owner_priv_1,
178 #endif
179
180 /*
181 * Flags only valid for compound pages. Stored in first tail page's
182 * flags word. Cannot use the first 8 flags or any flag marked as
183 * PF_ANY.
184 */
185
186 /* At least one page in this folio has the hwpoison flag set */
187 PG_has_hwpoisoned = PG_active,
188 PG_large_rmappable = PG_workingset, /* anon or file-backed */
189 PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */
190 };
191
192 #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
193
194 #ifndef __GENERATING_BOUNDS_H
195
196 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
197 DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
198
199 /*
200 * Return the real head page struct iff the @page is a fake head page, otherwise
201 * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
202 */
page_fixed_fake_head(const struct page * page)203 static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
204 {
205 if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
206 return page;
207
208 /*
209 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
210 * struct page. The alignment check aims to avoid access the fields (
211 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
212 * cold cacheline in some cases.
213 */
214 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
215 test_bit(PG_head, &page->flags)) {
216 /*
217 * We can safely access the field of the @page[1] with PG_head
218 * because the @page is a compound page composed with at least
219 * two contiguous pages.
220 */
221 unsigned long head = READ_ONCE(page[1].compound_head);
222
223 if (likely(head & 1))
224 return (const struct page *)(head - 1);
225 }
226 return page;
227 }
228 #else
page_fixed_fake_head(const struct page * page)229 static inline const struct page *page_fixed_fake_head(const struct page *page)
230 {
231 return page;
232 }
233 #endif
234
page_is_fake_head(const struct page * page)235 static __always_inline int page_is_fake_head(const struct page *page)
236 {
237 return page_fixed_fake_head(page) != page;
238 }
239
_compound_head(const struct page * page)240 static __always_inline unsigned long _compound_head(const struct page *page)
241 {
242 unsigned long head = READ_ONCE(page->compound_head);
243
244 if (unlikely(head & 1))
245 return head - 1;
246 return (unsigned long)page_fixed_fake_head(page);
247 }
248
249 #define compound_head(page) ((typeof(page))_compound_head(page))
250
251 /**
252 * page_folio - Converts from page to folio.
253 * @p: The page.
254 *
255 * Every page is part of a folio. This function cannot be called on a
256 * NULL pointer.
257 *
258 * Context: No reference, nor lock is required on @page. If the caller
259 * does not hold a reference, this call may race with a folio split, so
260 * it should re-check the folio still contains this page after gaining
261 * a reference on the folio.
262 * Return: The folio which contains this page.
263 */
264 #define page_folio(p) (_Generic((p), \
265 const struct page *: (const struct folio *)_compound_head(p), \
266 struct page *: (struct folio *)_compound_head(p)))
267
268 /**
269 * folio_page - Return a page from a folio.
270 * @folio: The folio.
271 * @n: The page number to return.
272 *
273 * @n is relative to the start of the folio. This function does not
274 * check that the page number lies within @folio; the caller is presumed
275 * to have a reference to the page.
276 */
277 #define folio_page(folio, n) nth_page(&(folio)->page, n)
278
PageTail(const struct page * page)279 static __always_inline int PageTail(const struct page *page)
280 {
281 return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
282 }
283
PageCompound(const struct page * page)284 static __always_inline int PageCompound(const struct page *page)
285 {
286 return test_bit(PG_head, &page->flags) ||
287 READ_ONCE(page->compound_head) & 1;
288 }
289
290 #define PAGE_POISON_PATTERN -1l
PagePoisoned(const struct page * page)291 static inline int PagePoisoned(const struct page *page)
292 {
293 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
294 }
295
296 #ifdef CONFIG_DEBUG_VM
297 void page_init_poison(struct page *page, size_t size);
298 #else
page_init_poison(struct page * page,size_t size)299 static inline void page_init_poison(struct page *page, size_t size)
300 {
301 }
302 #endif
303
const_folio_flags(const struct folio * folio,unsigned n)304 static const unsigned long *const_folio_flags(const struct folio *folio,
305 unsigned n)
306 {
307 const struct page *page = &folio->page;
308
309 VM_BUG_ON_PGFLAGS(PageTail(page), page);
310 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
311 return &page[n].flags;
312 }
313
folio_flags(struct folio * folio,unsigned n)314 static unsigned long *folio_flags(struct folio *folio, unsigned n)
315 {
316 struct page *page = &folio->page;
317
318 VM_BUG_ON_PGFLAGS(PageTail(page), page);
319 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
320 return &page[n].flags;
321 }
322
323 /*
324 * Page flags policies wrt compound pages
325 *
326 * PF_POISONED_CHECK
327 * check if this struct page poisoned/uninitialized
328 *
329 * PF_ANY:
330 * the page flag is relevant for small, head and tail pages.
331 *
332 * PF_HEAD:
333 * for compound page all operations related to the page flag applied to
334 * head page.
335 *
336 * PF_NO_TAIL:
337 * modifications of the page flag must be done on small or head pages,
338 * checks can be done on tail pages too.
339 *
340 * PF_NO_COMPOUND:
341 * the page flag is not relevant for compound pages.
342 *
343 * PF_SECOND:
344 * the page flag is stored in the first tail page.
345 */
346 #define PF_POISONED_CHECK(page) ({ \
347 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
348 page; })
349 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
350 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
351 #define PF_NO_TAIL(page, enforce) ({ \
352 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
353 PF_POISONED_CHECK(compound_head(page)); })
354 #define PF_NO_COMPOUND(page, enforce) ({ \
355 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
356 PF_POISONED_CHECK(page); })
357 #define PF_SECOND(page, enforce) ({ \
358 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
359 PF_POISONED_CHECK(&page[1]); })
360
361 /* Which page is the flag stored in */
362 #define FOLIO_PF_ANY 0
363 #define FOLIO_PF_HEAD 0
364 #define FOLIO_PF_NO_TAIL 0
365 #define FOLIO_PF_NO_COMPOUND 0
366 #define FOLIO_PF_SECOND 1
367
368 #define FOLIO_HEAD_PAGE 0
369 #define FOLIO_SECOND_PAGE 1
370
371 /*
372 * Macros to create function definitions for page flags
373 */
374 #define FOLIO_TEST_FLAG(name, page) \
375 static __always_inline bool folio_test_##name(const struct folio *folio) \
376 { return test_bit(PG_##name, const_folio_flags(folio, page)); }
377
378 #define FOLIO_SET_FLAG(name, page) \
379 static __always_inline void folio_set_##name(struct folio *folio) \
380 { set_bit(PG_##name, folio_flags(folio, page)); }
381
382 #define FOLIO_CLEAR_FLAG(name, page) \
383 static __always_inline void folio_clear_##name(struct folio *folio) \
384 { clear_bit(PG_##name, folio_flags(folio, page)); }
385
386 #define __FOLIO_SET_FLAG(name, page) \
387 static __always_inline void __folio_set_##name(struct folio *folio) \
388 { __set_bit(PG_##name, folio_flags(folio, page)); }
389
390 #define __FOLIO_CLEAR_FLAG(name, page) \
391 static __always_inline void __folio_clear_##name(struct folio *folio) \
392 { __clear_bit(PG_##name, folio_flags(folio, page)); }
393
394 #define FOLIO_TEST_SET_FLAG(name, page) \
395 static __always_inline bool folio_test_set_##name(struct folio *folio) \
396 { return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
397
398 #define FOLIO_TEST_CLEAR_FLAG(name, page) \
399 static __always_inline bool folio_test_clear_##name(struct folio *folio) \
400 { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
401
402 #define FOLIO_FLAG(name, page) \
403 FOLIO_TEST_FLAG(name, page) \
404 FOLIO_SET_FLAG(name, page) \
405 FOLIO_CLEAR_FLAG(name, page)
406
407 #define TESTPAGEFLAG(uname, lname, policy) \
408 FOLIO_TEST_FLAG(lname, FOLIO_##policy) \
409 static __always_inline int Page##uname(const struct page *page) \
410 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
411
412 #define SETPAGEFLAG(uname, lname, policy) \
413 FOLIO_SET_FLAG(lname, FOLIO_##policy) \
414 static __always_inline void SetPage##uname(struct page *page) \
415 { set_bit(PG_##lname, &policy(page, 1)->flags); }
416
417 #define CLEARPAGEFLAG(uname, lname, policy) \
418 FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
419 static __always_inline void ClearPage##uname(struct page *page) \
420 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
421
422 #define __SETPAGEFLAG(uname, lname, policy) \
423 __FOLIO_SET_FLAG(lname, FOLIO_##policy) \
424 static __always_inline void __SetPage##uname(struct page *page) \
425 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
426
427 #define __CLEARPAGEFLAG(uname, lname, policy) \
428 __FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
429 static __always_inline void __ClearPage##uname(struct page *page) \
430 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
431
432 #define TESTSETFLAG(uname, lname, policy) \
433 FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \
434 static __always_inline int TestSetPage##uname(struct page *page) \
435 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
436
437 #define TESTCLEARFLAG(uname, lname, policy) \
438 FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \
439 static __always_inline int TestClearPage##uname(struct page *page) \
440 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
441
442 #define PAGEFLAG(uname, lname, policy) \
443 TESTPAGEFLAG(uname, lname, policy) \
444 SETPAGEFLAG(uname, lname, policy) \
445 CLEARPAGEFLAG(uname, lname, policy)
446
447 #define __PAGEFLAG(uname, lname, policy) \
448 TESTPAGEFLAG(uname, lname, policy) \
449 __SETPAGEFLAG(uname, lname, policy) \
450 __CLEARPAGEFLAG(uname, lname, policy)
451
452 #define TESTSCFLAG(uname, lname, policy) \
453 TESTSETFLAG(uname, lname, policy) \
454 TESTCLEARFLAG(uname, lname, policy)
455
456 #define FOLIO_TEST_FLAG_FALSE(name) \
457 static inline bool folio_test_##name(const struct folio *folio) \
458 { return false; }
459 #define FOLIO_SET_FLAG_NOOP(name) \
460 static inline void folio_set_##name(struct folio *folio) { }
461 #define FOLIO_CLEAR_FLAG_NOOP(name) \
462 static inline void folio_clear_##name(struct folio *folio) { }
463 #define __FOLIO_SET_FLAG_NOOP(name) \
464 static inline void __folio_set_##name(struct folio *folio) { }
465 #define __FOLIO_CLEAR_FLAG_NOOP(name) \
466 static inline void __folio_clear_##name(struct folio *folio) { }
467 #define FOLIO_TEST_SET_FLAG_FALSE(name) \
468 static inline bool folio_test_set_##name(struct folio *folio) \
469 { return false; }
470 #define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \
471 static inline bool folio_test_clear_##name(struct folio *folio) \
472 { return false; }
473
474 #define FOLIO_FLAG_FALSE(name) \
475 FOLIO_TEST_FLAG_FALSE(name) \
476 FOLIO_SET_FLAG_NOOP(name) \
477 FOLIO_CLEAR_FLAG_NOOP(name)
478
479 #define TESTPAGEFLAG_FALSE(uname, lname) \
480 FOLIO_TEST_FLAG_FALSE(lname) \
481 static inline int Page##uname(const struct page *page) { return 0; }
482
483 #define SETPAGEFLAG_NOOP(uname, lname) \
484 FOLIO_SET_FLAG_NOOP(lname) \
485 static inline void SetPage##uname(struct page *page) { }
486
487 #define CLEARPAGEFLAG_NOOP(uname, lname) \
488 FOLIO_CLEAR_FLAG_NOOP(lname) \
489 static inline void ClearPage##uname(struct page *page) { }
490
491 #define __CLEARPAGEFLAG_NOOP(uname, lname) \
492 __FOLIO_CLEAR_FLAG_NOOP(lname) \
493 static inline void __ClearPage##uname(struct page *page) { }
494
495 #define TESTSETFLAG_FALSE(uname, lname) \
496 FOLIO_TEST_SET_FLAG_FALSE(lname) \
497 static inline int TestSetPage##uname(struct page *page) { return 0; }
498
499 #define TESTCLEARFLAG_FALSE(uname, lname) \
500 FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \
501 static inline int TestClearPage##uname(struct page *page) { return 0; }
502
503 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
504 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
505
506 #define TESTSCFLAG_FALSE(uname, lname) \
507 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
508
509 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
510 FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
511 FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
512 FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
513 __FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
514 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
515 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
516 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
517 TESTCLEARFLAG(LRU, lru, PF_HEAD)
518 FOLIO_FLAG(active, FOLIO_HEAD_PAGE)
519 __FOLIO_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
520 FOLIO_TEST_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
521 PAGEFLAG(Workingset, workingset, PF_HEAD)
522 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
523 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
524
525 /* Xen */
526 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
527 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
528 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
529 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(XenRemapped,xen_remapped,PF_NO_COMPOUND)530 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
531 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
532
533 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
534 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
535 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
536 FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE)
537 __FOLIO_CLEAR_FLAG(swapbacked, FOLIO_HEAD_PAGE)
538 __FOLIO_SET_FLAG(swapbacked, FOLIO_HEAD_PAGE)
539
540 /*
541 * Private page markings that may be used by the filesystem that owns the page
542 * for its own purposes.
543 * - PG_private and PG_private_2 cause release_folio() and co to be invoked
544 */
545 PAGEFLAG(Private, private, PF_ANY)
546 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
547
548 /* owner_2 can be set on tail pages for anon memory */
549 FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE)
550
551 /*
552 * Only test-and-set exist for PG_writeback. The unconditional operators are
553 * risky: they bypass page accounting.
554 */
555 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
556 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
557 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
558
559 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
560 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
561 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
562 FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE)
563 FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE)
564
565 #ifdef CONFIG_HIGHMEM
566 /*
567 * Must use a macro here due to header dependency issues. page_zone() is not
568 * available at this point.
569 */
570 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
571 #define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f))
572 #else
573 PAGEFLAG_FALSE(HighMem, highmem)
574 #endif
575
576 #ifdef CONFIG_SWAP
577 static __always_inline bool folio_test_swapcache(const struct folio *folio)
578 {
579 return folio_test_swapbacked(folio) &&
580 test_bit(PG_swapcache, const_folio_flags(folio, 0));
581 }
582
FOLIO_SET_FLAG(swapcache,FOLIO_HEAD_PAGE)583 FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE)
584 FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE)
585 #else
586 FOLIO_FLAG_FALSE(swapcache)
587 #endif
588
589 FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE)
590 __FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
591 FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
592
593 #ifdef CONFIG_MMU
594 FOLIO_FLAG(mlocked, FOLIO_HEAD_PAGE)
595 __FOLIO_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
596 FOLIO_TEST_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
597 FOLIO_TEST_SET_FLAG(mlocked, FOLIO_HEAD_PAGE)
598 #else
599 FOLIO_FLAG_FALSE(mlocked)
600 __FOLIO_CLEAR_FLAG_NOOP(mlocked)
601 FOLIO_TEST_CLEAR_FLAG_FALSE(mlocked)
602 FOLIO_TEST_SET_FLAG_FALSE(mlocked)
603 #endif
604
605 #ifdef CONFIG_MEMORY_FAILURE
606 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
607 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
608 #define __PG_HWPOISON (1UL << PG_hwpoison)
609 #else
610 PAGEFLAG_FALSE(HWPoison, hwpoison)
611 #define __PG_HWPOISON 0
612 #endif
613
614 #ifdef CONFIG_PAGE_IDLE_FLAG
615 #ifdef CONFIG_64BIT
616 FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE)
617 FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE)
618 FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE)
619 FOLIO_FLAG(idle, FOLIO_HEAD_PAGE)
620 #endif
621 /* See page_idle.h for !64BIT workaround */
622 #else /* !CONFIG_PAGE_IDLE_FLAG */
623 FOLIO_FLAG_FALSE(young)
624 FOLIO_TEST_CLEAR_FLAG_FALSE(young)
625 FOLIO_FLAG_FALSE(idle)
626 #endif
627
628 /*
629 * PageReported() is used to track reported free pages within the Buddy
630 * allocator. We can use the non-atomic version of the test and set
631 * operations as both should be shielded with the zone lock to prevent
632 * any possible races on the setting or clearing of the bit.
633 */
634 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
635
636 #ifdef CONFIG_MEMORY_HOTPLUG
637 PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
638 #else
639 PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
640 #endif
641
642 /*
643 * On an anonymous folio mapped into a user virtual memory area,
644 * folio->mapping points to its anon_vma, not to a struct address_space;
645 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
646 *
647 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
648 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
649 * bit; and then folio->mapping points, not to an anon_vma, but to a private
650 * structure which KSM associates with that merged page. See ksm.h.
651 *
652 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
653 * page and then folio->mapping points to a struct movable_operations.
654 *
655 * Please note that, confusingly, "folio_mapping" refers to the inode
656 * address_space which maps the folio from disk; whereas "folio_mapped"
657 * refers to user virtual address space into which the folio is mapped.
658 *
659 * For slab pages, since slab reuses the bits in struct page to store its
660 * internal states, the folio->mapping does not exist as such, nor do
661 * these flags below. So in order to avoid testing non-existent bits,
662 * please make sure that folio_test_slab(folio) actually evaluates to
663 * false before calling the following functions (e.g., folio_test_anon).
664 * See mm/slab.h.
665 */
666 #define PAGE_MAPPING_ANON 0x1
667 #define PAGE_MAPPING_MOVABLE 0x2
668 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
669 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
670
671 /*
672 * Different with flags above, this flag is used only for fsdax mode. It
673 * indicates that this page->mapping is now under reflink case.
674 */
675 #define PAGE_MAPPING_DAX_SHARED ((void *)0x1)
676
677 static __always_inline bool folio_mapping_flags(const struct folio *folio)
678 {
679 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
680 }
681
PageMappingFlags(const struct page * page)682 static __always_inline bool PageMappingFlags(const struct page *page)
683 {
684 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
685 }
686
folio_test_anon(const struct folio * folio)687 static __always_inline bool folio_test_anon(const struct folio *folio)
688 {
689 return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
690 }
691
PageAnon(const struct page * page)692 static __always_inline bool PageAnon(const struct page *page)
693 {
694 return folio_test_anon(page_folio(page));
695 }
696
__folio_test_movable(const struct folio * folio)697 static __always_inline bool __folio_test_movable(const struct folio *folio)
698 {
699 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
700 PAGE_MAPPING_MOVABLE;
701 }
702
__PageMovable(const struct page * page)703 static __always_inline bool __PageMovable(const struct page *page)
704 {
705 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
706 PAGE_MAPPING_MOVABLE;
707 }
708
709 #ifdef CONFIG_KSM
710 /*
711 * A KSM page is one of those write-protected "shared pages" or "merged pages"
712 * which KSM maps into multiple mms, wherever identical anonymous page content
713 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
714 * anon_vma, but to that page's node of the stable tree.
715 */
folio_test_ksm(const struct folio * folio)716 static __always_inline bool folio_test_ksm(const struct folio *folio)
717 {
718 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
719 PAGE_MAPPING_KSM;
720 }
721
PageKsm(const struct page * page)722 static __always_inline bool PageKsm(const struct page *page)
723 {
724 return folio_test_ksm(page_folio(page));
725 }
726 #else
727 TESTPAGEFLAG_FALSE(Ksm, ksm)
728 #endif
729
730 u64 stable_page_flags(const struct page *page);
731
732 /**
733 * folio_xor_flags_has_waiters - Change some folio flags.
734 * @folio: The folio.
735 * @mask: Bits set in this word will be changed.
736 *
737 * This must only be used for flags which are changed with the folio
738 * lock held. For example, it is unsafe to use for PG_dirty as that
739 * can be set without the folio lock held. It can also only be used
740 * on flags which are in the range 0-6 as some of the implementations
741 * only affect those bits.
742 *
743 * Return: Whether there are tasks waiting on the folio.
744 */
folio_xor_flags_has_waiters(struct folio * folio,unsigned long mask)745 static inline bool folio_xor_flags_has_waiters(struct folio *folio,
746 unsigned long mask)
747 {
748 return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0));
749 }
750
751 /**
752 * folio_test_uptodate - Is this folio up to date?
753 * @folio: The folio.
754 *
755 * The uptodate flag is set on a folio when every byte in the folio is
756 * at least as new as the corresponding bytes on storage. Anonymous
757 * and CoW folios are always uptodate. If the folio is not uptodate,
758 * some of the bytes in it may be; see the is_partially_uptodate()
759 * address_space operation.
760 */
folio_test_uptodate(const struct folio * folio)761 static inline bool folio_test_uptodate(const struct folio *folio)
762 {
763 bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0));
764 /*
765 * Must ensure that the data we read out of the folio is loaded
766 * _after_ we've loaded folio->flags to check the uptodate bit.
767 * We can skip the barrier if the folio is not uptodate, because
768 * we wouldn't be reading anything from it.
769 *
770 * See folio_mark_uptodate() for the other side of the story.
771 */
772 if (ret)
773 smp_rmb();
774
775 return ret;
776 }
777
PageUptodate(const struct page * page)778 static inline bool PageUptodate(const struct page *page)
779 {
780 return folio_test_uptodate(page_folio(page));
781 }
782
__folio_mark_uptodate(struct folio * folio)783 static __always_inline void __folio_mark_uptodate(struct folio *folio)
784 {
785 smp_wmb();
786 __set_bit(PG_uptodate, folio_flags(folio, 0));
787 }
788
folio_mark_uptodate(struct folio * folio)789 static __always_inline void folio_mark_uptodate(struct folio *folio)
790 {
791 /*
792 * Memory barrier must be issued before setting the PG_uptodate bit,
793 * so that all previous stores issued in order to bring the folio
794 * uptodate are actually visible before folio_test_uptodate becomes true.
795 */
796 smp_wmb();
797 set_bit(PG_uptodate, folio_flags(folio, 0));
798 }
799
__SetPageUptodate(struct page * page)800 static __always_inline void __SetPageUptodate(struct page *page)
801 {
802 __folio_mark_uptodate((struct folio *)page);
803 }
804
SetPageUptodate(struct page * page)805 static __always_inline void SetPageUptodate(struct page *page)
806 {
807 folio_mark_uptodate((struct folio *)page);
808 }
809
810 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
811
812 void __folio_start_writeback(struct folio *folio, bool keep_write);
813 void set_page_writeback(struct page *page);
814
815 #define folio_start_writeback(folio) \
816 __folio_start_writeback(folio, false)
817 #define folio_start_writeback_keepwrite(folio) \
818 __folio_start_writeback(folio, true)
819
folio_test_head(const struct folio * folio)820 static __always_inline bool folio_test_head(const struct folio *folio)
821 {
822 return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY));
823 }
824
PageHead(const struct page * page)825 static __always_inline int PageHead(const struct page *page)
826 {
827 PF_POISONED_CHECK(page);
828 return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
829 }
830
__SETPAGEFLAG(Head,head,PF_ANY)831 __SETPAGEFLAG(Head, head, PF_ANY)
832 __CLEARPAGEFLAG(Head, head, PF_ANY)
833 CLEARPAGEFLAG(Head, head, PF_ANY)
834
835 /**
836 * folio_test_large() - Does this folio contain more than one page?
837 * @folio: The folio to test.
838 *
839 * Return: True if the folio is larger than one page.
840 */
841 static inline bool folio_test_large(const struct folio *folio)
842 {
843 return folio_test_head(folio);
844 }
845
set_compound_head(struct page * page,struct page * head)846 static __always_inline void set_compound_head(struct page *page, struct page *head)
847 {
848 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
849 }
850
clear_compound_head(struct page * page)851 static __always_inline void clear_compound_head(struct page *page)
852 {
853 WRITE_ONCE(page->compound_head, 0);
854 }
855
856 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ClearPageCompound(struct page * page)857 static inline void ClearPageCompound(struct page *page)
858 {
859 BUG_ON(!PageHead(page));
860 ClearPageHead(page);
861 }
FOLIO_FLAG(large_rmappable,FOLIO_SECOND_PAGE)862 FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
863 FOLIO_TEST_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
864 /*
865 * PG_partially_mapped is protected by deferred_split split_queue_lock,
866 * so its safe to use non-atomic set/clear.
867 */
868 __FOLIO_SET_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
869 __FOLIO_CLEAR_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
870 #else
871 FOLIO_FLAG_FALSE(large_rmappable)
872 FOLIO_TEST_FLAG_FALSE(partially_mapped)
873 __FOLIO_SET_FLAG_NOOP(partially_mapped)
874 __FOLIO_CLEAR_FLAG_NOOP(partially_mapped)
875 #endif
876
877 #define PG_head_mask ((1UL << PG_head))
878
879 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
880 /*
881 * PageHuge() only returns true for hugetlbfs pages, but not for
882 * normal or transparent huge pages.
883 *
884 * PageTransHuge() returns true for both transparent huge and
885 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
886 * called only in the core VM paths where hugetlbfs pages can't exist.
887 */
888 static inline int PageTransHuge(const struct page *page)
889 {
890 VM_BUG_ON_PAGE(PageTail(page), page);
891 return PageHead(page);
892 }
893
894 /*
895 * PageTransCompound returns true for both transparent huge pages
896 * and hugetlbfs pages, so it should only be called when it's known
897 * that hugetlbfs pages aren't involved.
898 */
PageTransCompound(const struct page * page)899 static inline int PageTransCompound(const struct page *page)
900 {
901 return PageCompound(page);
902 }
903
904 /*
905 * PageTransTail returns true for both transparent huge pages
906 * and hugetlbfs pages, so it should only be called when it's known
907 * that hugetlbfs pages aren't involved.
908 */
PageTransTail(const struct page * page)909 static inline int PageTransTail(const struct page *page)
910 {
911 return PageTail(page);
912 }
913 #else
914 TESTPAGEFLAG_FALSE(TransHuge, transhuge)
915 TESTPAGEFLAG_FALSE(TransCompound, transcompound)
916 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
917 TESTPAGEFLAG_FALSE(TransTail, transtail)
918 #endif
919
920 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
921 /*
922 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
923 * compound page.
924 *
925 * This flag is set by hwpoison handler. Cleared by THP split or free page.
926 */
927 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
928 TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
929 #else
930 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
931 TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
932 #endif
933
934 /*
935 * For pages that do not use mapcount, page_type may be used.
936 * The low 24 bits of pagetype may be used for your own purposes, as long
937 * as you are careful to not affect the top 8 bits. The low bits of
938 * pagetype will be overwritten when you clear the page_type from the page.
939 */
940 enum pagetype {
941 /* 0x00-0x7f are positive numbers, ie mapcount */
942 /* Reserve 0x80-0xef for mapcount overflow. */
943 PGTY_buddy = 0xf0,
944 PGTY_offline = 0xf1,
945 PGTY_table = 0xf2,
946 PGTY_guard = 0xf3,
947 PGTY_hugetlb = 0xf4,
948 PGTY_slab = 0xf5,
949 PGTY_zsmalloc = 0xf6,
950 PGTY_unaccepted = 0xf7,
951
952 PGTY_mapcount_underflow = 0xff
953 };
954
page_type_has_type(int page_type)955 static inline bool page_type_has_type(int page_type)
956 {
957 return page_type < (PGTY_mapcount_underflow << 24);
958 }
959
960 /* This takes a mapcount which is one more than page->_mapcount */
page_mapcount_is_type(unsigned int mapcount)961 static inline bool page_mapcount_is_type(unsigned int mapcount)
962 {
963 return page_type_has_type(mapcount - 1);
964 }
965
page_has_type(const struct page * page)966 static inline bool page_has_type(const struct page *page)
967 {
968 return page_mapcount_is_type(data_race(page->page_type));
969 }
970
971 #define FOLIO_TYPE_OPS(lname, fname) \
972 static __always_inline bool folio_test_##fname(const struct folio *folio) \
973 { \
974 return data_race(folio->page.page_type >> 24) == PGTY_##lname; \
975 } \
976 static __always_inline void __folio_set_##fname(struct folio *folio) \
977 { \
978 if (folio_test_##fname(folio)) \
979 return; \
980 VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
981 folio); \
982 folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
983 } \
984 static __always_inline void __folio_clear_##fname(struct folio *folio) \
985 { \
986 if (folio->page.page_type == UINT_MAX) \
987 return; \
988 VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
989 folio->page.page_type = UINT_MAX; \
990 }
991
992 #define PAGE_TYPE_OPS(uname, lname, fname) \
993 FOLIO_TYPE_OPS(lname, fname) \
994 static __always_inline int Page##uname(const struct page *page) \
995 { \
996 return data_race(page->page_type >> 24) == PGTY_##lname; \
997 } \
998 static __always_inline void __SetPage##uname(struct page *page) \
999 { \
1000 if (Page##uname(page)) \
1001 return; \
1002 VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
1003 page->page_type = (unsigned int)PGTY_##lname << 24; \
1004 } \
1005 static __always_inline void __ClearPage##uname(struct page *page) \
1006 { \
1007 if (page->page_type == UINT_MAX) \
1008 return; \
1009 VM_BUG_ON_PAGE(!Page##uname(page), page); \
1010 page->page_type = UINT_MAX; \
1011 }
1012
1013 /*
1014 * PageBuddy() indicates that the page is free and in the buddy system
1015 * (see mm/page_alloc.c).
1016 */
1017 PAGE_TYPE_OPS(Buddy, buddy, buddy)
1018
1019 /*
1020 * PageOffline() indicates that the page is logically offline although the
1021 * containing section is online. (e.g. inflated in a balloon driver or
1022 * not onlined when onlining the section).
1023 * The content of these pages is effectively stale. Such pages should not
1024 * be touched (read/write/dump/save) except by their owner.
1025 *
1026 * When a memory block gets onlined, all pages are initialized with a
1027 * refcount of 1 and PageOffline(). generic_online_page() will
1028 * take care of clearing PageOffline().
1029 *
1030 * If a driver wants to allow to offline unmovable PageOffline() pages without
1031 * putting them back to the buddy, it can do so via the memory notifier by
1032 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
1033 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
1034 * pages (now with a reference count of zero) are treated like free (unmanaged)
1035 * pages, allowing the containing memory block to get offlined. A driver that
1036 * relies on this feature is aware that re-onlining the memory block will
1037 * require not giving them to the buddy via generic_online_page().
1038 *
1039 * Memory offlining code will not adjust the managed page count for any
1040 * PageOffline() pages, treating them like they were never exposed to the
1041 * buddy using generic_online_page().
1042 *
1043 * There are drivers that mark a page PageOffline() and expect there won't be
1044 * any further access to page content. PFN walkers that read content of random
1045 * pages should check PageOffline() and synchronize with such drivers using
1046 * page_offline_freeze()/page_offline_thaw().
1047 */
1048 PAGE_TYPE_OPS(Offline, offline, offline)
1049
1050 extern void page_offline_freeze(void);
1051 extern void page_offline_thaw(void);
1052 extern void page_offline_begin(void);
1053 extern void page_offline_end(void);
1054
1055 /*
1056 * Marks pages in use as page tables.
1057 */
PAGE_TYPE_OPS(Table,table,pgtable)1058 PAGE_TYPE_OPS(Table, table, pgtable)
1059
1060 /*
1061 * Marks guardpages used with debug_pagealloc.
1062 */
1063 PAGE_TYPE_OPS(Guard, guard, guard)
1064
1065 FOLIO_TYPE_OPS(slab, slab)
1066
1067 /**
1068 * PageSlab - Determine if the page belongs to the slab allocator
1069 * @page: The page to test.
1070 *
1071 * Context: Any context.
1072 * Return: True for slab pages, false for any other kind of page.
1073 */
1074 static inline bool PageSlab(const struct page *page)
1075 {
1076 return folio_test_slab(page_folio(page));
1077 }
1078
1079 #ifdef CONFIG_HUGETLB_PAGE
FOLIO_TYPE_OPS(hugetlb,hugetlb)1080 FOLIO_TYPE_OPS(hugetlb, hugetlb)
1081 #else
1082 FOLIO_TEST_FLAG_FALSE(hugetlb)
1083 #endif
1084
1085 PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
1086
1087 /*
1088 * Mark pages that has to be accepted before touched for the first time.
1089 *
1090 * Serialized with zone lock.
1091 */
1092 PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
1093
1094 /**
1095 * PageHuge - Determine if the page belongs to hugetlbfs
1096 * @page: The page to test.
1097 *
1098 * Context: Any context.
1099 * Return: True for hugetlbfs pages, false for anon pages or pages
1100 * belonging to other filesystems.
1101 */
1102 static inline bool PageHuge(const struct page *page)
1103 {
1104 return folio_test_hugetlb(page_folio(page));
1105 }
1106
1107 /*
1108 * Check if a page is currently marked HWPoisoned. Note that this check is
1109 * best effort only and inherently racy: there is no way to synchronize with
1110 * failing hardware.
1111 */
is_page_hwpoison(const struct page * page)1112 static inline bool is_page_hwpoison(const struct page *page)
1113 {
1114 const struct folio *folio;
1115
1116 if (PageHWPoison(page))
1117 return true;
1118 folio = page_folio(page);
1119 return folio_test_hugetlb(folio) && PageHWPoison(&folio->page);
1120 }
1121
1122 bool is_free_buddy_page(const struct page *page);
1123
1124 PAGEFLAG(Isolated, isolated, PF_ANY);
1125
PageAnonExclusive(const struct page * page)1126 static __always_inline int PageAnonExclusive(const struct page *page)
1127 {
1128 VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1129 /*
1130 * HugeTLB stores this information on the head page; THP keeps it per
1131 * page
1132 */
1133 if (PageHuge(page))
1134 page = compound_head(page);
1135 return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1136 }
1137
SetPageAnonExclusive(struct page * page)1138 static __always_inline void SetPageAnonExclusive(struct page *page)
1139 {
1140 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1141 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1142 set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1143 }
1144
ClearPageAnonExclusive(struct page * page)1145 static __always_inline void ClearPageAnonExclusive(struct page *page)
1146 {
1147 VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1148 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1149 clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1150 }
1151
__ClearPageAnonExclusive(struct page * page)1152 static __always_inline void __ClearPageAnonExclusive(struct page *page)
1153 {
1154 VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1155 VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1156 __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1157 }
1158
1159 #ifdef CONFIG_MMU
1160 #define __PG_MLOCKED (1UL << PG_mlocked)
1161 #else
1162 #define __PG_MLOCKED 0
1163 #endif
1164
1165 /*
1166 * Flags checked when a page is freed. Pages being freed should not have
1167 * these flags set. If they are, there is a problem.
1168 */
1169 #define PAGE_FLAGS_CHECK_AT_FREE \
1170 (1UL << PG_lru | 1UL << PG_locked | \
1171 1UL << PG_private | 1UL << PG_private_2 | \
1172 1UL << PG_writeback | 1UL << PG_reserved | \
1173 1UL << PG_active | \
1174 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK)
1175
1176 /*
1177 * Flags checked when a page is prepped for return by the page allocator.
1178 * Pages being prepped should not have these flags set. If they are set,
1179 * there has been a kernel bug or struct page corruption.
1180 *
1181 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1182 * alloc-free cycle to prevent from reusing the page.
1183 */
1184 #define PAGE_FLAGS_CHECK_AT_PREP \
1185 ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
1186
1187 /*
1188 * Flags stored in the second page of a compound page. They may overlap
1189 * the CHECK_AT_FREE flags above, so need to be cleared.
1190 */
1191 #define PAGE_FLAGS_SECOND \
1192 (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
1193 1UL << PG_large_rmappable | 1UL << PG_partially_mapped)
1194
1195 #define PAGE_FLAGS_PRIVATE \
1196 (1UL << PG_private | 1UL << PG_private_2)
1197 /**
1198 * folio_has_private - Determine if folio has private stuff
1199 * @folio: The folio to be checked
1200 *
1201 * Determine if a folio has private stuff, indicating that release routines
1202 * should be invoked upon it.
1203 */
folio_has_private(const struct folio * folio)1204 static inline int folio_has_private(const struct folio *folio)
1205 {
1206 return !!(folio->flags & PAGE_FLAGS_PRIVATE);
1207 }
1208
1209 #undef PF_ANY
1210 #undef PF_HEAD
1211 #undef PF_NO_TAIL
1212 #undef PF_NO_COMPOUND
1213 #undef PF_SECOND
1214 #endif /* !__GENERATING_BOUNDS_H */
1215
1216 #endif /* PAGE_FLAGS_H */
1217