xref: /linux/include/linux/mm_types_task.h (revision 394290cb)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_TASK_H
3 #define _LINUX_MM_TYPES_TASK_H
4 
5 /*
6  * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
7  *
8  * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
9  */
10 
11 #include <linux/types.h>
12 
13 #include <asm/page.h>
14 
15 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
16 #include <asm/tlbbatch.h>
17 #endif
18 
19 #define ALLOC_SPLIT_PTLOCKS	(SPINLOCK_SIZE > BITS_PER_LONG/8)
20 
21 /*
22  * When updating this, please also update struct resident_page_types[] in
23  * kernel/fork.c
24  */
25 enum {
26 	MM_FILEPAGES,	/* Resident file mapping pages */
27 	MM_ANONPAGES,	/* Resident anonymous pages */
28 	MM_SWAPENTS,	/* Anonymous swap entries */
29 	MM_SHMEMPAGES,	/* Resident shared memory pages */
30 	NR_MM_COUNTERS
31 };
32 
33 struct page;
34 
35 struct page_frag {
36 	struct page *page;
37 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
38 	__u32 offset;
39 	__u32 size;
40 #else
41 	__u16 offset;
42 	__u16 size;
43 #endif
44 };
45 
46 /* Track pages that require TLB flushes */
47 struct tlbflush_unmap_batch {
48 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
49 	/*
50 	 * The arch code makes the following promise: generic code can modify a
51 	 * PTE, then call arch_tlbbatch_add_pending() (which internally provides
52 	 * all needed barriers), then call arch_tlbbatch_flush(), and the entries
53 	 * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
54 	 * returns.
55 	 */
56 	struct arch_tlbflush_unmap_batch arch;
57 
58 	/* True if a flush is needed. */
59 	bool flush_required;
60 
61 	/*
62 	 * If true then the PTE was dirty when unmapped. The entry must be
63 	 * flushed before IO is initiated or a stale TLB entry potentially
64 	 * allows an update without redirtying the page.
65 	 */
66 	bool writable;
67 #endif
68 };
69 
70 #endif /* _LINUX_MM_TYPES_TASK_H */
71