xref: /linux/include/linux/page-flags.h (revision dc9e6f70)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Macros for manipulating and testing page->flags
4  */
5 
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8 
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16 
17 /*
18  * Various page->flags bits:
19  *
20  * PG_reserved is set for special pages. The "struct page" of such a page
21  * should in general not be touched (e.g. set dirty) except by its owner.
22  * Pages marked as PG_reserved include:
23  * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24  *   initrd, HW tables)
25  * - Pages reserved or allocated early during boot (before the page allocator
26  *   was initialized). This includes (depending on the architecture) the
27  *   initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28  *   much more. Once (if ever) freed, PG_reserved is cleared and they will
29  *   be given to the page allocator.
30  * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31  *   to read/write these pages might end badly. Don't touch!
32  * - The zero page(s)
33  * - Pages allocated in the context of kexec/kdump (loaded kernel image,
34  *   control pages, vmcoreinfo)
35  * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
36  *   not marked PG_reserved (as they might be in use by somebody else who does
37  *   not respect the caching strategy).
38  * - MCA pages on ia64
39  * - Pages holding CPU notes for POWER Firmware Assisted Dump
40  * - Device memory (e.g. PMEM, DAX, HMM)
41  * Some PG_reserved pages will be excluded from the hibernation image.
42  * PG_reserved does in general not hinder anybody from dumping or swapping
43  * and is no longer required for remap_pfn_range(). ioremap might require it.
44  * Consequently, PG_reserved for a page mapped into user space can indicate
45  * the zero page, the vDSO, MMIO pages or device memory.
46  *
47  * The PG_private bitflag is set on pagecache pages if they contain filesystem
48  * specific data (which is normally at page->private). It can be used by
49  * private allocations for its own usage.
50  *
51  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
52  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
53  * is set before writeback starts and cleared when it finishes.
54  *
55  * PG_locked also pins a page in pagecache, and blocks truncation of the file
56  * while it is held.
57  *
58  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
59  * to become unlocked.
60  *
61  * PG_swapbacked is set when a page uses swap as a backing storage.  This are
62  * usually PageAnon or shmem pages but please note that even anonymous pages
63  * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
64  * a result of MADV_FREE).
65  *
66  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
67  * file-backed pagecache (see mm/vmscan.c).
68  *
69  * PG_error is set to indicate that an I/O error occurred on this page.
70  *
71  * PG_arch_1 is an architecture specific page state bit.  The generic code
72  * guarantees that this bit is cleared for a page when it first is entered into
73  * the page cache.
74  *
75  * PG_hwpoison indicates that a page got corrupted in hardware and contains
76  * data with incorrect ECC bits that triggered a machine check. Accessing is
77  * not safe since it may cause another machine check. Don't touch!
78  */
79 
80 /*
81  * Don't use the pageflags directly.  Use the PageFoo macros.
82  *
83  * The page flags field is split into two parts, the main flags area
84  * which extends from the low bits upwards, and the fields area which
85  * extends from the high bits downwards.
86  *
87  *  | FIELD | ... | FLAGS |
88  *  N-1           ^       0
89  *               (NR_PAGEFLAGS)
90  *
91  * The fields area is reserved for fields mapping zone, node (for NUMA) and
92  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
93  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
94  */
95 enum pageflags {
96 	PG_locked,		/* Page is locked. Don't touch. */
97 	PG_writeback,		/* Page is under writeback */
98 	PG_referenced,
99 	PG_uptodate,
100 	PG_dirty,
101 	PG_lru,
102 	PG_head,		/* Must be in bit 6 */
103 	PG_waiters,		/* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
104 	PG_active,
105 	PG_workingset,
106 	PG_error,
107 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
108 	PG_arch_1,
109 	PG_reserved,
110 	PG_private,		/* If pagecache, has fs-private data */
111 	PG_private_2,		/* If pagecache, has fs aux data */
112 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
113 	PG_reclaim,		/* To be reclaimed asap */
114 	PG_swapbacked,		/* Page is backed by RAM/swap */
115 	PG_unevictable,		/* Page is "unevictable"  */
116 #ifdef CONFIG_MMU
117 	PG_mlocked,		/* Page is vma mlocked */
118 #endif
119 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
120 	PG_uncached,		/* Page has been mapped as uncached */
121 #endif
122 #ifdef CONFIG_MEMORY_FAILURE
123 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
124 #endif
125 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
126 	PG_young,
127 	PG_idle,
128 #endif
129 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
130 	PG_arch_2,
131 	PG_arch_3,
132 #endif
133 	__NR_PAGEFLAGS,
134 
135 	PG_readahead = PG_reclaim,
136 
137 	/*
138 	 * Depending on the way an anonymous folio can be mapped into a page
139 	 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
140 	 * THP), PG_anon_exclusive may be set only for the head page or for
141 	 * tail pages of an anonymous folio. For now, we only expect it to be
142 	 * set on tail pages for PTE-mapped THP.
143 	 */
144 	PG_anon_exclusive = PG_mappedtodisk,
145 
146 	/* Filesystems */
147 	PG_checked = PG_owner_priv_1,
148 
149 	/* SwapBacked */
150 	PG_swapcache = PG_owner_priv_1,	/* Swap page: swp_entry_t in private */
151 
152 	/* Two page bits are conscripted by FS-Cache to maintain local caching
153 	 * state.  These bits are set on pages belonging to the netfs's inodes
154 	 * when those inodes are being locally cached.
155 	 */
156 	PG_fscache = PG_private_2,	/* page backed by cache */
157 
158 	/* XEN */
159 	/* Pinned in Xen as a read-only pagetable page. */
160 	PG_pinned = PG_owner_priv_1,
161 	/* Pinned as part of domain save (see xen_mm_pin_all()). */
162 	PG_savepinned = PG_dirty,
163 	/* Has a grant mapping of another (foreign) domain's page. */
164 	PG_foreign = PG_owner_priv_1,
165 	/* Remapped by swiotlb-xen. */
166 	PG_xen_remapped = PG_owner_priv_1,
167 
168 	/* non-lru isolated movable page */
169 	PG_isolated = PG_reclaim,
170 
171 	/* Only valid for buddy pages. Used to track pages that are reported */
172 	PG_reported = PG_uptodate,
173 
174 #ifdef CONFIG_MEMORY_HOTPLUG
175 	/* For self-hosted memmap pages */
176 	PG_vmemmap_self_hosted = PG_owner_priv_1,
177 #endif
178 
179 	/*
180 	 * Flags only valid for compound pages.  Stored in first tail page's
181 	 * flags word.  Cannot use the first 8 flags or any flag marked as
182 	 * PF_ANY.
183 	 */
184 
185 	/* At least one page in this folio has the hwpoison flag set */
186 	PG_has_hwpoisoned = PG_error,
187 	PG_large_rmappable = PG_workingset, /* anon or file-backed */
188 };
189 
190 #define PAGEFLAGS_MASK		((1UL << NR_PAGEFLAGS) - 1)
191 
192 #ifndef __GENERATING_BOUNDS_H
193 
194 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
195 DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
196 
197 /*
198  * Return the real head page struct iff the @page is a fake head page, otherwise
199  * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
200  */
page_fixed_fake_head(const struct page * page)201 static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
202 {
203 	if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
204 		return page;
205 
206 	/*
207 	 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
208 	 * struct page. The alignment check aims to avoid access the fields (
209 	 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
210 	 * cold cacheline in some cases.
211 	 */
212 	if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
213 	    test_bit(PG_head, &page->flags)) {
214 		/*
215 		 * We can safely access the field of the @page[1] with PG_head
216 		 * because the @page is a compound page composed with at least
217 		 * two contiguous pages.
218 		 */
219 		unsigned long head = READ_ONCE(page[1].compound_head);
220 
221 		if (likely(head & 1))
222 			return (const struct page *)(head - 1);
223 	}
224 	return page;
225 }
226 #else
page_fixed_fake_head(const struct page * page)227 static inline const struct page *page_fixed_fake_head(const struct page *page)
228 {
229 	return page;
230 }
231 #endif
232 
page_is_fake_head(const struct page * page)233 static __always_inline int page_is_fake_head(const struct page *page)
234 {
235 	return page_fixed_fake_head(page) != page;
236 }
237 
_compound_head(const struct page * page)238 static inline unsigned long _compound_head(const struct page *page)
239 {
240 	unsigned long head = READ_ONCE(page->compound_head);
241 
242 	if (unlikely(head & 1))
243 		return head - 1;
244 	return (unsigned long)page_fixed_fake_head(page);
245 }
246 
247 #define compound_head(page)	((typeof(page))_compound_head(page))
248 
249 /**
250  * page_folio - Converts from page to folio.
251  * @p: The page.
252  *
253  * Every page is part of a folio.  This function cannot be called on a
254  * NULL pointer.
255  *
256  * Context: No reference, nor lock is required on @page.  If the caller
257  * does not hold a reference, this call may race with a folio split, so
258  * it should re-check the folio still contains this page after gaining
259  * a reference on the folio.
260  * Return: The folio which contains this page.
261  */
262 #define page_folio(p)		(_Generic((p),				\
263 	const struct page *:	(const struct folio *)_compound_head(p), \
264 	struct page *:		(struct folio *)_compound_head(p)))
265 
266 /**
267  * folio_page - Return a page from a folio.
268  * @folio: The folio.
269  * @n: The page number to return.
270  *
271  * @n is relative to the start of the folio.  This function does not
272  * check that the page number lies within @folio; the caller is presumed
273  * to have a reference to the page.
274  */
275 #define folio_page(folio, n)	nth_page(&(folio)->page, n)
276 
PageTail(const struct page * page)277 static __always_inline int PageTail(const struct page *page)
278 {
279 	return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
280 }
281 
PageCompound(const struct page * page)282 static __always_inline int PageCompound(const struct page *page)
283 {
284 	return test_bit(PG_head, &page->flags) ||
285 	       READ_ONCE(page->compound_head) & 1;
286 }
287 
288 #define	PAGE_POISON_PATTERN	-1l
PagePoisoned(const struct page * page)289 static inline int PagePoisoned(const struct page *page)
290 {
291 	return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
292 }
293 
294 #ifdef CONFIG_DEBUG_VM
295 void page_init_poison(struct page *page, size_t size);
296 #else
page_init_poison(struct page * page,size_t size)297 static inline void page_init_poison(struct page *page, size_t size)
298 {
299 }
300 #endif
301 
const_folio_flags(const struct folio * folio,unsigned n)302 static const unsigned long *const_folio_flags(const struct folio *folio,
303 		unsigned n)
304 {
305 	const struct page *page = &folio->page;
306 
307 	VM_BUG_ON_PGFLAGS(PageTail(page), page);
308 	VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
309 	return &page[n].flags;
310 }
311 
folio_flags(struct folio * folio,unsigned n)312 static unsigned long *folio_flags(struct folio *folio, unsigned n)
313 {
314 	struct page *page = &folio->page;
315 
316 	VM_BUG_ON_PGFLAGS(PageTail(page), page);
317 	VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
318 	return &page[n].flags;
319 }
320 
321 /*
322  * Page flags policies wrt compound pages
323  *
324  * PF_POISONED_CHECK
325  *     check if this struct page poisoned/uninitialized
326  *
327  * PF_ANY:
328  *     the page flag is relevant for small, head and tail pages.
329  *
330  * PF_HEAD:
331  *     for compound page all operations related to the page flag applied to
332  *     head page.
333  *
334  * PF_NO_TAIL:
335  *     modifications of the page flag must be done on small or head pages,
336  *     checks can be done on tail pages too.
337  *
338  * PF_NO_COMPOUND:
339  *     the page flag is not relevant for compound pages.
340  *
341  * PF_SECOND:
342  *     the page flag is stored in the first tail page.
343  */
344 #define PF_POISONED_CHECK(page) ({					\
345 		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
346 		page; })
347 #define PF_ANY(page, enforce)	PF_POISONED_CHECK(page)
348 #define PF_HEAD(page, enforce)	PF_POISONED_CHECK(compound_head(page))
349 #define PF_NO_TAIL(page, enforce) ({					\
350 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
351 		PF_POISONED_CHECK(compound_head(page)); })
352 #define PF_NO_COMPOUND(page, enforce) ({				\
353 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
354 		PF_POISONED_CHECK(page); })
355 #define PF_SECOND(page, enforce) ({					\
356 		VM_BUG_ON_PGFLAGS(!PageHead(page), page);		\
357 		PF_POISONED_CHECK(&page[1]); })
358 
359 /* Which page is the flag stored in */
360 #define FOLIO_PF_ANY		0
361 #define FOLIO_PF_HEAD		0
362 #define FOLIO_PF_NO_TAIL	0
363 #define FOLIO_PF_NO_COMPOUND	0
364 #define FOLIO_PF_SECOND		1
365 
366 #define FOLIO_HEAD_PAGE		0
367 #define FOLIO_SECOND_PAGE	1
368 
369 /*
370  * Macros to create function definitions for page flags
371  */
372 #define FOLIO_TEST_FLAG(name, page)					\
373 static __always_inline bool folio_test_##name(const struct folio *folio) \
374 { return test_bit(PG_##name, const_folio_flags(folio, page)); }
375 
376 #define FOLIO_SET_FLAG(name, page)					\
377 static __always_inline void folio_set_##name(struct folio *folio)	\
378 { set_bit(PG_##name, folio_flags(folio, page)); }
379 
380 #define FOLIO_CLEAR_FLAG(name, page)					\
381 static __always_inline void folio_clear_##name(struct folio *folio)	\
382 { clear_bit(PG_##name, folio_flags(folio, page)); }
383 
384 #define __FOLIO_SET_FLAG(name, page)					\
385 static __always_inline void __folio_set_##name(struct folio *folio)	\
386 { __set_bit(PG_##name, folio_flags(folio, page)); }
387 
388 #define __FOLIO_CLEAR_FLAG(name, page)					\
389 static __always_inline void __folio_clear_##name(struct folio *folio)	\
390 { __clear_bit(PG_##name, folio_flags(folio, page)); }
391 
392 #define FOLIO_TEST_SET_FLAG(name, page)					\
393 static __always_inline bool folio_test_set_##name(struct folio *folio)	\
394 { return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
395 
396 #define FOLIO_TEST_CLEAR_FLAG(name, page)				\
397 static __always_inline bool folio_test_clear_##name(struct folio *folio) \
398 { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
399 
400 #define FOLIO_FLAG(name, page)						\
401 FOLIO_TEST_FLAG(name, page)						\
402 FOLIO_SET_FLAG(name, page)						\
403 FOLIO_CLEAR_FLAG(name, page)
404 
405 #define TESTPAGEFLAG(uname, lname, policy)				\
406 FOLIO_TEST_FLAG(lname, FOLIO_##policy)					\
407 static __always_inline int Page##uname(const struct page *page)		\
408 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
409 
410 #define SETPAGEFLAG(uname, lname, policy)				\
411 FOLIO_SET_FLAG(lname, FOLIO_##policy)					\
412 static __always_inline void SetPage##uname(struct page *page)		\
413 { set_bit(PG_##lname, &policy(page, 1)->flags); }
414 
415 #define CLEARPAGEFLAG(uname, lname, policy)				\
416 FOLIO_CLEAR_FLAG(lname, FOLIO_##policy)					\
417 static __always_inline void ClearPage##uname(struct page *page)		\
418 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
419 
420 #define __SETPAGEFLAG(uname, lname, policy)				\
421 __FOLIO_SET_FLAG(lname, FOLIO_##policy)					\
422 static __always_inline void __SetPage##uname(struct page *page)		\
423 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
424 
425 #define __CLEARPAGEFLAG(uname, lname, policy)				\
426 __FOLIO_CLEAR_FLAG(lname, FOLIO_##policy)				\
427 static __always_inline void __ClearPage##uname(struct page *page)	\
428 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
429 
430 #define TESTSETFLAG(uname, lname, policy)				\
431 FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy)				\
432 static __always_inline int TestSetPage##uname(struct page *page)	\
433 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
434 
435 #define TESTCLEARFLAG(uname, lname, policy)				\
436 FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy)				\
437 static __always_inline int TestClearPage##uname(struct page *page)	\
438 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
439 
440 #define PAGEFLAG(uname, lname, policy)					\
441 	TESTPAGEFLAG(uname, lname, policy)				\
442 	SETPAGEFLAG(uname, lname, policy)				\
443 	CLEARPAGEFLAG(uname, lname, policy)
444 
445 #define __PAGEFLAG(uname, lname, policy)				\
446 	TESTPAGEFLAG(uname, lname, policy)				\
447 	__SETPAGEFLAG(uname, lname, policy)				\
448 	__CLEARPAGEFLAG(uname, lname, policy)
449 
450 #define TESTSCFLAG(uname, lname, policy)				\
451 	TESTSETFLAG(uname, lname, policy)				\
452 	TESTCLEARFLAG(uname, lname, policy)
453 
454 #define FOLIO_TEST_FLAG_FALSE(name)					\
455 static inline bool folio_test_##name(const struct folio *folio)		\
456 { return false; }
457 #define FOLIO_SET_FLAG_NOOP(name)					\
458 static inline void folio_set_##name(struct folio *folio) { }
459 #define FOLIO_CLEAR_FLAG_NOOP(name)					\
460 static inline void folio_clear_##name(struct folio *folio) { }
461 #define __FOLIO_SET_FLAG_NOOP(name)					\
462 static inline void __folio_set_##name(struct folio *folio) { }
463 #define __FOLIO_CLEAR_FLAG_NOOP(name)					\
464 static inline void __folio_clear_##name(struct folio *folio) { }
465 #define FOLIO_TEST_SET_FLAG_FALSE(name)					\
466 static inline bool folio_test_set_##name(struct folio *folio)		\
467 { return false; }
468 #define FOLIO_TEST_CLEAR_FLAG_FALSE(name)				\
469 static inline bool folio_test_clear_##name(struct folio *folio)		\
470 { return false; }
471 
472 #define FOLIO_FLAG_FALSE(name)						\
473 FOLIO_TEST_FLAG_FALSE(name)						\
474 FOLIO_SET_FLAG_NOOP(name)						\
475 FOLIO_CLEAR_FLAG_NOOP(name)
476 
477 #define TESTPAGEFLAG_FALSE(uname, lname)				\
478 FOLIO_TEST_FLAG_FALSE(lname)						\
479 static inline int Page##uname(const struct page *page) { return 0; }
480 
481 #define SETPAGEFLAG_NOOP(uname, lname)					\
482 FOLIO_SET_FLAG_NOOP(lname)						\
483 static inline void SetPage##uname(struct page *page) {  }
484 
485 #define CLEARPAGEFLAG_NOOP(uname, lname)				\
486 FOLIO_CLEAR_FLAG_NOOP(lname)						\
487 static inline void ClearPage##uname(struct page *page) {  }
488 
489 #define __CLEARPAGEFLAG_NOOP(uname, lname)				\
490 __FOLIO_CLEAR_FLAG_NOOP(lname)						\
491 static inline void __ClearPage##uname(struct page *page) {  }
492 
493 #define TESTSETFLAG_FALSE(uname, lname)					\
494 FOLIO_TEST_SET_FLAG_FALSE(lname)					\
495 static inline int TestSetPage##uname(struct page *page) { return 0; }
496 
497 #define TESTCLEARFLAG_FALSE(uname, lname)				\
498 FOLIO_TEST_CLEAR_FLAG_FALSE(lname)					\
499 static inline int TestClearPage##uname(struct page *page) { return 0; }
500 
501 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname)	\
502 	SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
503 
504 #define TESTSCFLAG_FALSE(uname, lname)					\
505 	TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
506 
507 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
508 FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
509 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
510 FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
511 	FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
512 	__FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
513 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
514 	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
515 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
516 	TESTCLEARFLAG(LRU, lru, PF_HEAD)
517 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
518 	TESTCLEARFLAG(Active, active, PF_HEAD)
519 PAGEFLAG(Workingset, workingset, PF_HEAD)
520 	TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
521 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
522 
523 /* Xen */
524 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
525 	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
526 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
527 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(XenRemapped,xen_remapped,PF_NO_COMPOUND)528 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
529 	TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
530 
531 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
532 	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
533 	__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
534 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
535 	__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
536 	__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
537 
538 /*
539  * Private page markings that may be used by the filesystem that owns the page
540  * for its own purposes.
541  * - PG_private and PG_private_2 cause release_folio() and co to be invoked
542  */
543 PAGEFLAG(Private, private, PF_ANY)
544 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
545 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
546 	TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
547 
548 /*
549  * Only test-and-set exist for PG_writeback.  The unconditional operators are
550  * risky: they bypass page accounting.
551  */
552 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
553 	TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
554 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
555 
556 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
557 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
558 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
559 PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
560 	TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
561 
562 #ifdef CONFIG_HIGHMEM
563 /*
564  * Must use a macro here due to header dependency issues. page_zone() is not
565  * available at this point.
566  */
567 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
568 #define folio_test_highmem(__f)	is_highmem_idx(folio_zonenum(__f))
569 #else
570 PAGEFLAG_FALSE(HighMem, highmem)
571 #endif
572 
573 #ifdef CONFIG_SWAP
574 static __always_inline bool folio_test_swapcache(const struct folio *folio)
575 {
576 	return folio_test_swapbacked(folio) &&
577 			test_bit(PG_swapcache, const_folio_flags(folio, 0));
578 }
579 
PageSwapCache(const struct page * page)580 static __always_inline bool PageSwapCache(const struct page *page)
581 {
582 	return folio_test_swapcache(page_folio(page));
583 }
584 
SETPAGEFLAG(SwapCache,swapcache,PF_NO_TAIL)585 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
586 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
587 #else
588 PAGEFLAG_FALSE(SwapCache, swapcache)
589 #endif
590 
591 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
592 	__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
593 	TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
594 
595 #ifdef CONFIG_MMU
596 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
597 	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
598 	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
599 #else
600 PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
601 	TESTSCFLAG_FALSE(Mlocked, mlocked)
602 #endif
603 
604 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
605 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
606 #else
607 PAGEFLAG_FALSE(Uncached, uncached)
608 #endif
609 
610 #ifdef CONFIG_MEMORY_FAILURE
611 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
612 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
613 #define __PG_HWPOISON (1UL << PG_hwpoison)
614 #else
615 PAGEFLAG_FALSE(HWPoison, hwpoison)
616 #define __PG_HWPOISON 0
617 #endif
618 
619 #ifdef CONFIG_PAGE_IDLE_FLAG
620 #ifdef CONFIG_64BIT
621 FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE)
622 FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE)
623 FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE)
624 FOLIO_FLAG(idle, FOLIO_HEAD_PAGE)
625 #endif
626 /* See page_idle.h for !64BIT workaround */
627 #else /* !CONFIG_PAGE_IDLE_FLAG */
628 FOLIO_FLAG_FALSE(young)
629 FOLIO_TEST_CLEAR_FLAG_FALSE(young)
630 FOLIO_FLAG_FALSE(idle)
631 #endif
632 
633 /*
634  * PageReported() is used to track reported free pages within the Buddy
635  * allocator. We can use the non-atomic version of the test and set
636  * operations as both should be shielded with the zone lock to prevent
637  * any possible races on the setting or clearing of the bit.
638  */
639 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
640 
641 #ifdef CONFIG_MEMORY_HOTPLUG
642 PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
643 #else
644 PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
645 #endif
646 
647 /*
648  * On an anonymous folio mapped into a user virtual memory area,
649  * folio->mapping points to its anon_vma, not to a struct address_space;
650  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
651  *
652  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
653  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
654  * bit; and then folio->mapping points, not to an anon_vma, but to a private
655  * structure which KSM associates with that merged page.  See ksm.h.
656  *
657  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
658  * page and then folio->mapping points to a struct movable_operations.
659  *
660  * Please note that, confusingly, "folio_mapping" refers to the inode
661  * address_space which maps the folio from disk; whereas "folio_mapped"
662  * refers to user virtual address space into which the folio is mapped.
663  *
664  * For slab pages, since slab reuses the bits in struct page to store its
665  * internal states, the folio->mapping does not exist as such, nor do
666  * these flags below.  So in order to avoid testing non-existent bits,
667  * please make sure that folio_test_slab(folio) actually evaluates to
668  * false before calling the following functions (e.g., folio_test_anon).
669  * See mm/slab.h.
670  */
671 #define PAGE_MAPPING_ANON	0x1
672 #define PAGE_MAPPING_MOVABLE	0x2
673 #define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
674 #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
675 
676 /*
677  * Different with flags above, this flag is used only for fsdax mode.  It
678  * indicates that this page->mapping is now under reflink case.
679  */
680 #define PAGE_MAPPING_DAX_SHARED	((void *)0x1)
681 
682 static __always_inline bool folio_mapping_flags(const struct folio *folio)
683 {
684 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
685 }
686 
PageMappingFlags(const struct page * page)687 static __always_inline bool PageMappingFlags(const struct page *page)
688 {
689 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
690 }
691 
folio_test_anon(const struct folio * folio)692 static __always_inline bool folio_test_anon(const struct folio *folio)
693 {
694 	return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
695 }
696 
PageAnon(const struct page * page)697 static __always_inline bool PageAnon(const struct page *page)
698 {
699 	return folio_test_anon(page_folio(page));
700 }
701 
__folio_test_movable(const struct folio * folio)702 static __always_inline bool __folio_test_movable(const struct folio *folio)
703 {
704 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
705 			PAGE_MAPPING_MOVABLE;
706 }
707 
__PageMovable(const struct page * page)708 static __always_inline bool __PageMovable(const struct page *page)
709 {
710 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
711 				PAGE_MAPPING_MOVABLE;
712 }
713 
714 #ifdef CONFIG_KSM
715 /*
716  * A KSM page is one of those write-protected "shared pages" or "merged pages"
717  * which KSM maps into multiple mms, wherever identical anonymous page content
718  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
719  * anon_vma, but to that page's node of the stable tree.
720  */
folio_test_ksm(const struct folio * folio)721 static __always_inline bool folio_test_ksm(const struct folio *folio)
722 {
723 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
724 				PAGE_MAPPING_KSM;
725 }
726 
PageKsm(const struct page * page)727 static __always_inline bool PageKsm(const struct page *page)
728 {
729 	return folio_test_ksm(page_folio(page));
730 }
731 #else
732 TESTPAGEFLAG_FALSE(Ksm, ksm)
733 #endif
734 
735 u64 stable_page_flags(const struct page *page);
736 
737 /**
738  * folio_xor_flags_has_waiters - Change some folio flags.
739  * @folio: The folio.
740  * @mask: Bits set in this word will be changed.
741  *
742  * This must only be used for flags which are changed with the folio
743  * lock held.  For example, it is unsafe to use for PG_dirty as that
744  * can be set without the folio lock held.  It can also only be used
745  * on flags which are in the range 0-6 as some of the implementations
746  * only affect those bits.
747  *
748  * Return: Whether there are tasks waiting on the folio.
749  */
folio_xor_flags_has_waiters(struct folio * folio,unsigned long mask)750 static inline bool folio_xor_flags_has_waiters(struct folio *folio,
751 		unsigned long mask)
752 {
753 	return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0));
754 }
755 
756 /**
757  * folio_test_uptodate - Is this folio up to date?
758  * @folio: The folio.
759  *
760  * The uptodate flag is set on a folio when every byte in the folio is
761  * at least as new as the corresponding bytes on storage.  Anonymous
762  * and CoW folios are always uptodate.  If the folio is not uptodate,
763  * some of the bytes in it may be; see the is_partially_uptodate()
764  * address_space operation.
765  */
folio_test_uptodate(const struct folio * folio)766 static inline bool folio_test_uptodate(const struct folio *folio)
767 {
768 	bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0));
769 	/*
770 	 * Must ensure that the data we read out of the folio is loaded
771 	 * _after_ we've loaded folio->flags to check the uptodate bit.
772 	 * We can skip the barrier if the folio is not uptodate, because
773 	 * we wouldn't be reading anything from it.
774 	 *
775 	 * See folio_mark_uptodate() for the other side of the story.
776 	 */
777 	if (ret)
778 		smp_rmb();
779 
780 	return ret;
781 }
782 
PageUptodate(const struct page * page)783 static inline bool PageUptodate(const struct page *page)
784 {
785 	return folio_test_uptodate(page_folio(page));
786 }
787 
__folio_mark_uptodate(struct folio * folio)788 static __always_inline void __folio_mark_uptodate(struct folio *folio)
789 {
790 	smp_wmb();
791 	__set_bit(PG_uptodate, folio_flags(folio, 0));
792 }
793 
folio_mark_uptodate(struct folio * folio)794 static __always_inline void folio_mark_uptodate(struct folio *folio)
795 {
796 	/*
797 	 * Memory barrier must be issued before setting the PG_uptodate bit,
798 	 * so that all previous stores issued in order to bring the folio
799 	 * uptodate are actually visible before folio_test_uptodate becomes true.
800 	 */
801 	smp_wmb();
802 	set_bit(PG_uptodate, folio_flags(folio, 0));
803 }
804 
__SetPageUptodate(struct page * page)805 static __always_inline void __SetPageUptodate(struct page *page)
806 {
807 	__folio_mark_uptodate((struct folio *)page);
808 }
809 
SetPageUptodate(struct page * page)810 static __always_inline void SetPageUptodate(struct page *page)
811 {
812 	folio_mark_uptodate((struct folio *)page);
813 }
814 
815 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
816 
817 void __folio_start_writeback(struct folio *folio, bool keep_write);
818 void set_page_writeback(struct page *page);
819 
820 #define folio_start_writeback(folio)			\
821 	__folio_start_writeback(folio, false)
822 #define folio_start_writeback_keepwrite(folio)	\
823 	__folio_start_writeback(folio, true)
824 
folio_test_head(const struct folio * folio)825 static __always_inline bool folio_test_head(const struct folio *folio)
826 {
827 	return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY));
828 }
829 
PageHead(const struct page * page)830 static __always_inline int PageHead(const struct page *page)
831 {
832 	PF_POISONED_CHECK(page);
833 	return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
834 }
835 
__SETPAGEFLAG(Head,head,PF_ANY)836 __SETPAGEFLAG(Head, head, PF_ANY)
837 __CLEARPAGEFLAG(Head, head, PF_ANY)
838 CLEARPAGEFLAG(Head, head, PF_ANY)
839 
840 /**
841  * folio_test_large() - Does this folio contain more than one page?
842  * @folio: The folio to test.
843  *
844  * Return: True if the folio is larger than one page.
845  */
846 static inline bool folio_test_large(const struct folio *folio)
847 {
848 	return folio_test_head(folio);
849 }
850 
set_compound_head(struct page * page,struct page * head)851 static __always_inline void set_compound_head(struct page *page, struct page *head)
852 {
853 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
854 }
855 
clear_compound_head(struct page * page)856 static __always_inline void clear_compound_head(struct page *page)
857 {
858 	WRITE_ONCE(page->compound_head, 0);
859 }
860 
861 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ClearPageCompound(struct page * page)862 static inline void ClearPageCompound(struct page *page)
863 {
864 	BUG_ON(!PageHead(page));
865 	ClearPageHead(page);
866 }
FOLIO_FLAG(large_rmappable,FOLIO_SECOND_PAGE)867 FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
868 #else
869 FOLIO_FLAG_FALSE(large_rmappable)
870 #endif
871 
872 #define PG_head_mask ((1UL << PG_head))
873 
874 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
875 /*
876  * PageHuge() only returns true for hugetlbfs pages, but not for
877  * normal or transparent huge pages.
878  *
879  * PageTransHuge() returns true for both transparent huge and
880  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
881  * called only in the core VM paths where hugetlbfs pages can't exist.
882  */
883 static inline int PageTransHuge(const struct page *page)
884 {
885 	VM_BUG_ON_PAGE(PageTail(page), page);
886 	return PageHead(page);
887 }
888 
889 /*
890  * PageTransCompound returns true for both transparent huge pages
891  * and hugetlbfs pages, so it should only be called when it's known
892  * that hugetlbfs pages aren't involved.
893  */
PageTransCompound(const struct page * page)894 static inline int PageTransCompound(const struct page *page)
895 {
896 	return PageCompound(page);
897 }
898 
899 /*
900  * PageTransTail returns true for both transparent huge pages
901  * and hugetlbfs pages, so it should only be called when it's known
902  * that hugetlbfs pages aren't involved.
903  */
PageTransTail(const struct page * page)904 static inline int PageTransTail(const struct page *page)
905 {
906 	return PageTail(page);
907 }
908 #else
909 TESTPAGEFLAG_FALSE(TransHuge, transhuge)
910 TESTPAGEFLAG_FALSE(TransCompound, transcompound)
911 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
912 TESTPAGEFLAG_FALSE(TransTail, transtail)
913 #endif
914 
915 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
916 /*
917  * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
918  * compound page.
919  *
920  * This flag is set by hwpoison handler.  Cleared by THP split or free page.
921  */
922 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
923 	TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
924 #else
925 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
926 	TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
927 #endif
928 
929 /*
930  * For pages that are never mapped to userspace,
931  * page_type may be used.  Because it is initialised to -1, we invert the
932  * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
933  * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
934  * low bits so that an underflow or overflow of _mapcount won't be
935  * mistaken for a page type value.
936  */
937 
938 enum pagetype {
939 	PG_buddy	= 0x40000000,
940 	PG_offline	= 0x20000000,
941 	PG_table	= 0x10000000,
942 	PG_guard	= 0x08000000,
943 	PG_hugetlb	= 0x04000000,
944 	PG_slab		= 0x02000000,
945 	PG_zsmalloc	= 0x01000000,
946 
947 	PAGE_TYPE_BASE	= 0x80000000,
948 
949 	/*
950 	 * Reserve 0xffff0000 - 0xfffffffe to catch _mapcount underflows and
951 	 * allow owners that set a type to reuse the lower 16 bit for their own
952 	 * purposes.
953 	 */
954 	PAGE_MAPCOUNT_RESERVE	= ~0x0000ffff,
955 };
956 
957 #define PageType(page, flag)						\
958 	((READ_ONCE(page->page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
959 #define folio_test_type(folio, flag)					\
960 	((READ_ONCE(folio->page.page_type) & (PAGE_TYPE_BASE | flag))  == PAGE_TYPE_BASE)
961 
page_type_has_type(unsigned int page_type)962 static inline int page_type_has_type(unsigned int page_type)
963 {
964 	return (int)page_type < PAGE_MAPCOUNT_RESERVE;
965 }
966 
page_has_type(const struct page * page)967 static inline int page_has_type(const struct page *page)
968 {
969 	return page_type_has_type(READ_ONCE(page->page_type));
970 }
971 
972 #define FOLIO_TYPE_OPS(lname, fname)					\
973 static __always_inline bool folio_test_##fname(const struct folio *folio)\
974 {									\
975 	return folio_test_type(folio, PG_##lname);			\
976 }									\
977 static __always_inline void __folio_set_##fname(struct folio *folio)	\
978 {									\
979 	VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio);		\
980 	folio->page.page_type &= ~PG_##lname;				\
981 }									\
982 static __always_inline void __folio_clear_##fname(struct folio *folio)	\
983 {									\
984 	VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio);		\
985 	folio->page.page_type |= PG_##lname;				\
986 }
987 
988 #define PAGE_TYPE_OPS(uname, lname, fname)				\
989 FOLIO_TYPE_OPS(lname, fname)						\
990 static __always_inline int Page##uname(const struct page *page)		\
991 {									\
992 	return PageType(page, PG_##lname);				\
993 }									\
994 static __always_inline void __SetPage##uname(struct page *page)		\
995 {									\
996 	VM_BUG_ON_PAGE(!PageType(page, 0), page);			\
997 	page->page_type &= ~PG_##lname;					\
998 }									\
999 static __always_inline void __ClearPage##uname(struct page *page)	\
1000 {									\
1001 	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
1002 	page->page_type |= PG_##lname;					\
1003 }
1004 
1005 /*
1006  * PageBuddy() indicates that the page is free and in the buddy system
1007  * (see mm/page_alloc.c).
1008  */
1009 PAGE_TYPE_OPS(Buddy, buddy, buddy)
1010 
1011 /*
1012  * PageOffline() indicates that the page is logically offline although the
1013  * containing section is online. (e.g. inflated in a balloon driver or
1014  * not onlined when onlining the section).
1015  * The content of these pages is effectively stale. Such pages should not
1016  * be touched (read/write/dump/save) except by their owner.
1017  *
1018  * When a memory block gets onlined, all pages are initialized with a
1019  * refcount of 1 and PageOffline(). generic_online_page() will
1020  * take care of clearing PageOffline().
1021  *
1022  * If a driver wants to allow to offline unmovable PageOffline() pages without
1023  * putting them back to the buddy, it can do so via the memory notifier by
1024  * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
1025  * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
1026  * pages (now with a reference count of zero) are treated like free (unmanaged)
1027  * pages, allowing the containing memory block to get offlined. A driver that
1028  * relies on this feature is aware that re-onlining the memory block will
1029  * require not giving them to the buddy via generic_online_page().
1030  *
1031  * Memory offlining code will not adjust the managed page count for any
1032  * PageOffline() pages, treating them like they were never exposed to the
1033  * buddy using generic_online_page().
1034  *
1035  * There are drivers that mark a page PageOffline() and expect there won't be
1036  * any further access to page content. PFN walkers that read content of random
1037  * pages should check PageOffline() and synchronize with such drivers using
1038  * page_offline_freeze()/page_offline_thaw().
1039  */
1040 PAGE_TYPE_OPS(Offline, offline, offline)
1041 
1042 extern void page_offline_freeze(void);
1043 extern void page_offline_thaw(void);
1044 extern void page_offline_begin(void);
1045 extern void page_offline_end(void);
1046 
1047 /*
1048  * Marks pages in use as page tables.
1049  */
PAGE_TYPE_OPS(Table,table,pgtable)1050 PAGE_TYPE_OPS(Table, table, pgtable)
1051 
1052 /*
1053  * Marks guardpages used with debug_pagealloc.
1054  */
1055 PAGE_TYPE_OPS(Guard, guard, guard)
1056 
1057 FOLIO_TYPE_OPS(slab, slab)
1058 
1059 /**
1060  * PageSlab - Determine if the page belongs to the slab allocator
1061  * @page: The page to test.
1062  *
1063  * Context: Any context.
1064  * Return: True for slab pages, false for any other kind of page.
1065  */
1066 static inline bool PageSlab(const struct page *page)
1067 {
1068 	return folio_test_slab(page_folio(page));
1069 }
1070 
1071 #ifdef CONFIG_HUGETLB_PAGE
FOLIO_TYPE_OPS(hugetlb,hugetlb)1072 FOLIO_TYPE_OPS(hugetlb, hugetlb)
1073 #else
1074 FOLIO_TEST_FLAG_FALSE(hugetlb)
1075 #endif
1076 
1077 PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
1078 
1079 /**
1080  * PageHuge - Determine if the page belongs to hugetlbfs
1081  * @page: The page to test.
1082  *
1083  * Context: Any context.
1084  * Return: True for hugetlbfs pages, false for anon pages or pages
1085  * belonging to other filesystems.
1086  */
1087 static inline bool PageHuge(const struct page *page)
1088 {
1089 	return folio_test_hugetlb(page_folio(page));
1090 }
1091 
1092 /*
1093  * Check if a page is currently marked HWPoisoned. Note that this check is
1094  * best effort only and inherently racy: there is no way to synchronize with
1095  * failing hardware.
1096  */
is_page_hwpoison(const struct page * page)1097 static inline bool is_page_hwpoison(const struct page *page)
1098 {
1099 	const struct folio *folio;
1100 
1101 	if (PageHWPoison(page))
1102 		return true;
1103 	folio = page_folio(page);
1104 	return folio_test_hugetlb(folio) && PageHWPoison(&folio->page);
1105 }
1106 
1107 bool is_free_buddy_page(const struct page *page);
1108 
1109 PAGEFLAG(Isolated, isolated, PF_ANY);
1110 
PageAnonExclusive(const struct page * page)1111 static __always_inline int PageAnonExclusive(const struct page *page)
1112 {
1113 	VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1114 	/*
1115 	 * HugeTLB stores this information on the head page; THP keeps it per
1116 	 * page
1117 	 */
1118 	if (PageHuge(page))
1119 		page = compound_head(page);
1120 	return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1121 }
1122 
SetPageAnonExclusive(struct page * page)1123 static __always_inline void SetPageAnonExclusive(struct page *page)
1124 {
1125 	VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1126 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1127 	set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1128 }
1129 
ClearPageAnonExclusive(struct page * page)1130 static __always_inline void ClearPageAnonExclusive(struct page *page)
1131 {
1132 	VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1133 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1134 	clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1135 }
1136 
__ClearPageAnonExclusive(struct page * page)1137 static __always_inline void __ClearPageAnonExclusive(struct page *page)
1138 {
1139 	VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1140 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1141 	__clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1142 }
1143 
1144 #ifdef CONFIG_MMU
1145 #define __PG_MLOCKED		(1UL << PG_mlocked)
1146 #else
1147 #define __PG_MLOCKED		0
1148 #endif
1149 
1150 /*
1151  * Flags checked when a page is freed.  Pages being freed should not have
1152  * these flags set.  If they are, there is a problem.
1153  */
1154 #define PAGE_FLAGS_CHECK_AT_FREE				\
1155 	(1UL << PG_lru		| 1UL << PG_locked	|	\
1156 	 1UL << PG_private	| 1UL << PG_private_2	|	\
1157 	 1UL << PG_writeback	| 1UL << PG_reserved	|	\
1158 	 1UL << PG_active 	|				\
1159 	 1UL << PG_unevictable	| __PG_MLOCKED | LRU_GEN_MASK)
1160 
1161 /*
1162  * Flags checked when a page is prepped for return by the page allocator.
1163  * Pages being prepped should not have these flags set.  If they are set,
1164  * there has been a kernel bug or struct page corruption.
1165  *
1166  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1167  * alloc-free cycle to prevent from reusing the page.
1168  */
1169 #define PAGE_FLAGS_CHECK_AT_PREP	\
1170 	((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
1171 
1172 /*
1173  * Flags stored in the second page of a compound page.  They may overlap
1174  * the CHECK_AT_FREE flags above, so need to be cleared.
1175  */
1176 #define PAGE_FLAGS_SECOND						\
1177 	(0xffUL /* order */		| 1UL << PG_has_hwpoisoned |	\
1178 	 1UL << PG_large_rmappable)
1179 
1180 #define PAGE_FLAGS_PRIVATE				\
1181 	(1UL << PG_private | 1UL << PG_private_2)
1182 /**
1183  * page_has_private - Determine if page has private stuff
1184  * @page: The page to be checked
1185  *
1186  * Determine if a page has private stuff, indicating that release routines
1187  * should be invoked upon it.
1188  */
page_has_private(const struct page * page)1189 static inline int page_has_private(const struct page *page)
1190 {
1191 	return !!(page->flags & PAGE_FLAGS_PRIVATE);
1192 }
1193 
folio_has_private(const struct folio * folio)1194 static inline bool folio_has_private(const struct folio *folio)
1195 {
1196 	return page_has_private(&folio->page);
1197 }
1198 
1199 #undef PF_ANY
1200 #undef PF_HEAD
1201 #undef PF_NO_TAIL
1202 #undef PF_NO_COMPOUND
1203 #undef PF_SECOND
1204 #endif /* !__GENERATING_BOUNDS_H */
1205 
1206 #endif	/* PAGE_FLAGS_H */
1207