xref: /linux/include/linux/mm_types_task.h (revision 2da68a77)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_TASK_H
3 #define _LINUX_MM_TYPES_TASK_H
4 
5 /*
6  * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
7  *
8  * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
9  */
10 
11 #include <linux/types.h>
12 #include <linux/threads.h>
13 #include <linux/atomic.h>
14 #include <linux/cpumask.h>
15 
16 #include <asm/page.h>
17 
18 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
19 #include <asm/tlbbatch.h>
20 #endif
21 
22 #define USE_SPLIT_PTE_PTLOCKS	(NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
23 #define USE_SPLIT_PMD_PTLOCKS	(USE_SPLIT_PTE_PTLOCKS && \
24 		IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
25 #define ALLOC_SPLIT_PTLOCKS	(SPINLOCK_SIZE > BITS_PER_LONG/8)
26 
27 /*
28  * When updating this, please also update struct resident_page_types[] in
29  * kernel/fork.c
30  */
31 enum {
32 	MM_FILEPAGES,	/* Resident file mapping pages */
33 	MM_ANONPAGES,	/* Resident anonymous pages */
34 	MM_SWAPENTS,	/* Anonymous swap entries */
35 	MM_SHMEMPAGES,	/* Resident shared memory pages */
36 	NR_MM_COUNTERS
37 };
38 
39 #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
40 #define SPLIT_RSS_COUNTING
41 /* per-thread cached information, */
42 struct task_rss_stat {
43 	int events;	/* for synchronization threshold */
44 	int count[NR_MM_COUNTERS];
45 };
46 #endif /* USE_SPLIT_PTE_PTLOCKS */
47 
48 struct mm_rss_stat {
49 	atomic_long_t count[NR_MM_COUNTERS];
50 };
51 
52 struct page_frag {
53 	struct page *page;
54 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
55 	__u32 offset;
56 	__u32 size;
57 #else
58 	__u16 offset;
59 	__u16 size;
60 #endif
61 };
62 
63 /* Track pages that require TLB flushes */
64 struct tlbflush_unmap_batch {
65 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
66 	/*
67 	 * The arch code makes the following promise: generic code can modify a
68 	 * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
69 	 * needed barriers), then call arch_tlbbatch_flush(), and the entries
70 	 * will be flushed on all CPUs by the time that arch_tlbbatch_flush()
71 	 * returns.
72 	 */
73 	struct arch_tlbflush_unmap_batch arch;
74 
75 	/* True if a flush is needed. */
76 	bool flush_required;
77 
78 	/*
79 	 * If true then the PTE was dirty when unmapped. The entry must be
80 	 * flushed before IO is initiated or a stale TLB entry potentially
81 	 * allows an update without redirtying the page.
82 	 */
83 	bool writable;
84 #endif
85 };
86 
87 #endif /* _LINUX_MM_TYPES_TASK_H */
88