xref: /linux/include/linux/mman.h (revision d6fd48ef)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMAN_H
3 #define _LINUX_MMAN_H
4 
5 #include <linux/mm.h>
6 #include <linux/percpu_counter.h>
7 
8 #include <linux/atomic.h>
9 #include <uapi/linux/mman.h>
10 
11 /*
12  * Arrange for legacy / undefined architecture specific flags to be
13  * ignored by mmap handling code.
14  */
15 #ifndef MAP_32BIT
16 #define MAP_32BIT 0
17 #endif
18 #ifndef MAP_HUGE_2MB
19 #define MAP_HUGE_2MB 0
20 #endif
21 #ifndef MAP_HUGE_1GB
22 #define MAP_HUGE_1GB 0
23 #endif
24 #ifndef MAP_UNINITIALIZED
25 #define MAP_UNINITIALIZED 0
26 #endif
27 #ifndef MAP_SYNC
28 #define MAP_SYNC 0
29 #endif
30 
31 /*
32  * The historical set of flags that all mmap implementations implicitly
33  * support when a ->mmap_validate() op is not provided in file_operations.
34  *
35  * MAP_EXECUTABLE and MAP_DENYWRITE are completely ignored throughout the
36  * kernel.
37  */
38 #define LEGACY_MAP_MASK (MAP_SHARED \
39 		| MAP_PRIVATE \
40 		| MAP_FIXED \
41 		| MAP_ANONYMOUS \
42 		| MAP_DENYWRITE \
43 		| MAP_EXECUTABLE \
44 		| MAP_UNINITIALIZED \
45 		| MAP_GROWSDOWN \
46 		| MAP_LOCKED \
47 		| MAP_NORESERVE \
48 		| MAP_POPULATE \
49 		| MAP_NONBLOCK \
50 		| MAP_STACK \
51 		| MAP_HUGETLB \
52 		| MAP_32BIT \
53 		| MAP_HUGE_2MB \
54 		| MAP_HUGE_1GB)
55 
56 extern int sysctl_overcommit_memory;
57 extern int sysctl_overcommit_ratio;
58 extern unsigned long sysctl_overcommit_kbytes;
59 extern struct percpu_counter vm_committed_as;
60 
61 #ifdef CONFIG_SMP
62 extern s32 vm_committed_as_batch;
63 extern void mm_compute_batch(int overcommit_policy);
64 #else
65 #define vm_committed_as_batch 0
66 static inline void mm_compute_batch(int overcommit_policy)
67 {
68 }
69 #endif
70 
71 unsigned long vm_memory_committed(void);
72 
73 static inline void vm_acct_memory(long pages)
74 {
75 	percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch);
76 }
77 
78 static inline void vm_unacct_memory(long pages)
79 {
80 	vm_acct_memory(-pages);
81 }
82 
83 /*
84  * Allow architectures to handle additional protection and flag bits. The
85  * overriding macros must be defined in the arch-specific asm/mman.h file.
86  */
87 
88 #ifndef arch_calc_vm_prot_bits
89 #define arch_calc_vm_prot_bits(prot, pkey) 0
90 #endif
91 
92 #ifndef arch_calc_vm_flag_bits
93 #define arch_calc_vm_flag_bits(flags) 0
94 #endif
95 
96 #ifndef arch_validate_prot
97 /*
98  * This is called from mprotect().  PROT_GROWSDOWN and PROT_GROWSUP have
99  * already been masked out.
100  *
101  * Returns true if the prot flags are valid
102  */
103 static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
104 {
105 	return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
106 }
107 #define arch_validate_prot arch_validate_prot
108 #endif
109 
110 #ifndef arch_validate_flags
111 /*
112  * This is called from mmap() and mprotect() with the updated vma->vm_flags.
113  *
114  * Returns true if the VM_* flags are valid.
115  */
116 static inline bool arch_validate_flags(unsigned long flags)
117 {
118 	return true;
119 }
120 #define arch_validate_flags arch_validate_flags
121 #endif
122 
123 /*
124  * Optimisation macro.  It is equivalent to:
125  *      (x & bit1) ? bit2 : 0
126  * but this version is faster.
127  * ("bit1" and "bit2" must be single bits)
128  */
129 #define _calc_vm_trans(x, bit1, bit2) \
130   ((!(bit1) || !(bit2)) ? 0 : \
131   ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
132    : ((x) & (bit1)) / ((bit1) / (bit2))))
133 
134 /*
135  * Combine the mmap "prot" argument into "vm_flags" used internally.
136  */
137 static inline unsigned long
138 calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
139 {
140 	return _calc_vm_trans(prot, PROT_READ,  VM_READ ) |
141 	       _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
142 	       _calc_vm_trans(prot, PROT_EXEC,  VM_EXEC) |
143 	       arch_calc_vm_prot_bits(prot, pkey);
144 }
145 
146 /*
147  * Combine the mmap "flags" argument into "vm_flags" used internally.
148  */
149 static inline unsigned long
150 calc_vm_flag_bits(unsigned long flags)
151 {
152 	return _calc_vm_trans(flags, MAP_GROWSDOWN,  VM_GROWSDOWN ) |
153 	       _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    ) |
154 	       _calc_vm_trans(flags, MAP_SYNC,	     VM_SYNC      ) |
155 	       arch_calc_vm_flag_bits(flags);
156 }
157 
158 unsigned long vm_commit_limit(void);
159 
160 /*
161  * Denies creating a writable executable mapping or gaining executable permissions.
162  *
163  * This denies the following:
164  *
165  * 	a)	mmap(PROT_WRITE | PROT_EXEC)
166  *
167  *	b)	mmap(PROT_WRITE)
168  *		mprotect(PROT_EXEC)
169  *
170  *	c)	mmap(PROT_WRITE)
171  *		mprotect(PROT_READ)
172  *		mprotect(PROT_EXEC)
173  *
174  * But allows the following:
175  *
176  *	d)	mmap(PROT_READ | PROT_EXEC)
177  *		mmap(PROT_READ | PROT_EXEC | PROT_BTI)
178  */
179 static inline bool map_deny_write_exec(struct vm_area_struct *vma,  unsigned long vm_flags)
180 {
181 	if (!test_bit(MMF_HAS_MDWE, &current->mm->flags))
182 		return false;
183 
184 	if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE))
185 		return true;
186 
187 	if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC))
188 		return true;
189 
190 	return false;
191 }
192 
193 #endif /* _LINUX_MMAN_H */
194