xref: /linux/include/linux/sched/mm.h (revision 1e525507)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
4 
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
9 #include <linux/gfp.h>
10 #include <linux/sync_core.h>
11 
12 /*
13  * Routines for handling mm_structs
14  */
15 extern struct mm_struct *mm_alloc(void);
16 
17 /**
18  * mmgrab() - Pin a &struct mm_struct.
19  * @mm: The &struct mm_struct to pin.
20  *
21  * Make sure that @mm will not get freed even after the owning task
22  * exits. This doesn't guarantee that the associated address space
23  * will still exist later on and mmget_not_zero() has to be used before
24  * accessing it.
25  *
26  * This is a preferred way to pin @mm for a longer/unbounded amount
27  * of time.
28  *
29  * Use mmdrop() to release the reference acquired by mmgrab().
30  *
31  * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
32  * of &mm_struct.mm_count vs &mm_struct.mm_users.
33  */
34 static inline void mmgrab(struct mm_struct *mm)
35 {
36 	atomic_inc(&mm->mm_count);
37 }
38 
39 static inline void smp_mb__after_mmgrab(void)
40 {
41 	smp_mb__after_atomic();
42 }
43 
44 extern void __mmdrop(struct mm_struct *mm);
45 
46 static inline void mmdrop(struct mm_struct *mm)
47 {
48 	/*
49 	 * The implicit full barrier implied by atomic_dec_and_test() is
50 	 * required by the membarrier system call before returning to
51 	 * user-space, after storing to rq->curr.
52 	 */
53 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
54 		__mmdrop(mm);
55 }
56 
57 #ifdef CONFIG_PREEMPT_RT
58 /*
59  * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
60  * by far the least expensive way to do that.
61  */
62 static inline void __mmdrop_delayed(struct rcu_head *rhp)
63 {
64 	struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
65 
66 	__mmdrop(mm);
67 }
68 
69 /*
70  * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
71  * kernels via RCU.
72  */
73 static inline void mmdrop_sched(struct mm_struct *mm)
74 {
75 	/* Provides a full memory barrier. See mmdrop() */
76 	if (atomic_dec_and_test(&mm->mm_count))
77 		call_rcu(&mm->delayed_drop, __mmdrop_delayed);
78 }
79 #else
80 static inline void mmdrop_sched(struct mm_struct *mm)
81 {
82 	mmdrop(mm);
83 }
84 #endif
85 
86 /* Helpers for lazy TLB mm refcounting */
87 static inline void mmgrab_lazy_tlb(struct mm_struct *mm)
88 {
89 	if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
90 		mmgrab(mm);
91 }
92 
93 static inline void mmdrop_lazy_tlb(struct mm_struct *mm)
94 {
95 	if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) {
96 		mmdrop(mm);
97 	} else {
98 		/*
99 		 * mmdrop_lazy_tlb must provide a full memory barrier, see the
100 		 * membarrier comment finish_task_switch which relies on this.
101 		 */
102 		smp_mb();
103 	}
104 }
105 
106 static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm)
107 {
108 	if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
109 		mmdrop_sched(mm);
110 	else
111 		smp_mb(); /* see mmdrop_lazy_tlb() above */
112 }
113 
114 /**
115  * mmget() - Pin the address space associated with a &struct mm_struct.
116  * @mm: The address space to pin.
117  *
118  * Make sure that the address space of the given &struct mm_struct doesn't
119  * go away. This does not protect against parts of the address space being
120  * modified or freed, however.
121  *
122  * Never use this function to pin this address space for an
123  * unbounded/indefinite amount of time.
124  *
125  * Use mmput() to release the reference acquired by mmget().
126  *
127  * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
128  * of &mm_struct.mm_count vs &mm_struct.mm_users.
129  */
130 static inline void mmget(struct mm_struct *mm)
131 {
132 	atomic_inc(&mm->mm_users);
133 }
134 
135 static inline bool mmget_not_zero(struct mm_struct *mm)
136 {
137 	return atomic_inc_not_zero(&mm->mm_users);
138 }
139 
140 /* mmput gets rid of the mappings and all user-space */
141 extern void mmput(struct mm_struct *);
142 #ifdef CONFIG_MMU
143 /* same as above but performs the slow path from the async context. Can
144  * be called from the atomic context as well
145  */
146 void mmput_async(struct mm_struct *);
147 #endif
148 
149 /* Grab a reference to a task's mm, if it is not already going away */
150 extern struct mm_struct *get_task_mm(struct task_struct *task);
151 /*
152  * Grab a reference to a task's mm, if it is not already going away
153  * and ptrace_may_access with the mode parameter passed to it
154  * succeeds.
155  */
156 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
157 /* Remove the current tasks stale references to the old mm_struct on exit() */
158 extern void exit_mm_release(struct task_struct *, struct mm_struct *);
159 /* Remove the current tasks stale references to the old mm_struct on exec() */
160 extern void exec_mm_release(struct task_struct *, struct mm_struct *);
161 
162 #ifdef CONFIG_MEMCG
163 extern void mm_update_next_owner(struct mm_struct *mm);
164 #else
165 static inline void mm_update_next_owner(struct mm_struct *mm)
166 {
167 }
168 #endif /* CONFIG_MEMCG */
169 
170 #ifdef CONFIG_MMU
171 #ifndef arch_get_mmap_end
172 #define arch_get_mmap_end(addr, len, flags)	(TASK_SIZE)
173 #endif
174 
175 #ifndef arch_get_mmap_base
176 #define arch_get_mmap_base(addr, base) (base)
177 #endif
178 
179 extern void arch_pick_mmap_layout(struct mm_struct *mm,
180 				  struct rlimit *rlim_stack);
181 extern unsigned long
182 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
183 		       unsigned long, unsigned long);
184 extern unsigned long
185 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
186 			  unsigned long len, unsigned long pgoff,
187 			  unsigned long flags);
188 
189 unsigned long
190 generic_get_unmapped_area(struct file *filp, unsigned long addr,
191 			  unsigned long len, unsigned long pgoff,
192 			  unsigned long flags);
193 unsigned long
194 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
195 				  unsigned long len, unsigned long pgoff,
196 				  unsigned long flags);
197 #else
198 static inline void arch_pick_mmap_layout(struct mm_struct *mm,
199 					 struct rlimit *rlim_stack) {}
200 #endif
201 
202 static inline bool in_vfork(struct task_struct *tsk)
203 {
204 	bool ret;
205 
206 	/*
207 	 * need RCU to access ->real_parent if CLONE_VM was used along with
208 	 * CLONE_PARENT.
209 	 *
210 	 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
211 	 * imply CLONE_VM
212 	 *
213 	 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
214 	 * ->real_parent is not necessarily the task doing vfork(), so in
215 	 * theory we can't rely on task_lock() if we want to dereference it.
216 	 *
217 	 * And in this case we can't trust the real_parent->mm == tsk->mm
218 	 * check, it can be false negative. But we do not care, if init or
219 	 * another oom-unkillable task does this it should blame itself.
220 	 */
221 	rcu_read_lock();
222 	ret = tsk->vfork_done &&
223 			rcu_dereference(tsk->real_parent)->mm == tsk->mm;
224 	rcu_read_unlock();
225 
226 	return ret;
227 }
228 
229 /*
230  * Applies per-task gfp context to the given allocation flags.
231  * PF_MEMALLOC_NOIO implies GFP_NOIO
232  * PF_MEMALLOC_NOFS implies GFP_NOFS
233  * PF_MEMALLOC_PIN  implies !GFP_MOVABLE
234  */
235 static inline gfp_t current_gfp_context(gfp_t flags)
236 {
237 	unsigned int pflags = READ_ONCE(current->flags);
238 
239 	if (unlikely(pflags & (PF_MEMALLOC_NOIO |
240 			       PF_MEMALLOC_NOFS |
241 			       PF_MEMALLOC_NORECLAIM |
242 			       PF_MEMALLOC_NOWARN |
243 			       PF_MEMALLOC_PIN))) {
244 		/*
245 		 * Stronger flags before weaker flags:
246 		 * NORECLAIM implies NOIO, which in turn implies NOFS
247 		 */
248 		if (pflags & PF_MEMALLOC_NORECLAIM)
249 			flags &= ~__GFP_DIRECT_RECLAIM;
250 		else if (pflags & PF_MEMALLOC_NOIO)
251 			flags &= ~(__GFP_IO | __GFP_FS);
252 		else if (pflags & PF_MEMALLOC_NOFS)
253 			flags &= ~__GFP_FS;
254 
255 		if (pflags & PF_MEMALLOC_NOWARN)
256 			flags |= __GFP_NOWARN;
257 
258 		if (pflags & PF_MEMALLOC_PIN)
259 			flags &= ~__GFP_MOVABLE;
260 	}
261 	return flags;
262 }
263 
264 #ifdef CONFIG_LOCKDEP
265 extern void __fs_reclaim_acquire(unsigned long ip);
266 extern void __fs_reclaim_release(unsigned long ip);
267 extern void fs_reclaim_acquire(gfp_t gfp_mask);
268 extern void fs_reclaim_release(gfp_t gfp_mask);
269 #else
270 static inline void __fs_reclaim_acquire(unsigned long ip) { }
271 static inline void __fs_reclaim_release(unsigned long ip) { }
272 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
273 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
274 #endif
275 
276 /* Any memory-allocation retry loop should use
277  * memalloc_retry_wait(), and pass the flags for the most
278  * constrained allocation attempt that might have failed.
279  * This provides useful documentation of where loops are,
280  * and a central place to fine tune the waiting as the MM
281  * implementation changes.
282  */
283 static inline void memalloc_retry_wait(gfp_t gfp_flags)
284 {
285 	/* We use io_schedule_timeout because waiting for memory
286 	 * typically included waiting for dirty pages to be
287 	 * written out, which requires IO.
288 	 */
289 	__set_current_state(TASK_UNINTERRUPTIBLE);
290 	gfp_flags = current_gfp_context(gfp_flags);
291 	if (gfpflags_allow_blocking(gfp_flags) &&
292 	    !(gfp_flags & __GFP_NORETRY))
293 		/* Probably waited already, no need for much more */
294 		io_schedule_timeout(1);
295 	else
296 		/* Probably didn't wait, and has now released a lock,
297 		 * so now is a good time to wait
298 		 */
299 		io_schedule_timeout(HZ/50);
300 }
301 
302 /**
303  * might_alloc - Mark possible allocation sites
304  * @gfp_mask: gfp_t flags that would be used to allocate
305  *
306  * Similar to might_sleep() and other annotations, this can be used in functions
307  * that might allocate, but often don't. Compiles to nothing without
308  * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
309  */
310 static inline void might_alloc(gfp_t gfp_mask)
311 {
312 	fs_reclaim_acquire(gfp_mask);
313 	fs_reclaim_release(gfp_mask);
314 
315 	might_sleep_if(gfpflags_allow_blocking(gfp_mask));
316 }
317 
318 /**
319  * memalloc_flags_save - Add a PF_* flag to current->flags, save old value
320  *
321  * This allows PF_* flags to be conveniently added, irrespective of current
322  * value, and then the old version restored with memalloc_flags_restore().
323  */
324 static inline unsigned memalloc_flags_save(unsigned flags)
325 {
326 	unsigned oldflags = ~current->flags & flags;
327 	current->flags |= flags;
328 	return oldflags;
329 }
330 
331 static inline void memalloc_flags_restore(unsigned flags)
332 {
333 	current->flags &= ~flags;
334 }
335 
336 /**
337  * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
338  *
339  * This functions marks the beginning of the GFP_NOIO allocation scope.
340  * All further allocations will implicitly drop __GFP_IO flag and so
341  * they are safe for the IO critical section from the allocation recursion
342  * point of view. Use memalloc_noio_restore to end the scope with flags
343  * returned by this function.
344  *
345  * Context: This function is safe to be used from any context.
346  * Return: The saved flags to be passed to memalloc_noio_restore.
347  */
348 static inline unsigned int memalloc_noio_save(void)
349 {
350 	return memalloc_flags_save(PF_MEMALLOC_NOIO);
351 }
352 
353 /**
354  * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
355  * @flags: Flags to restore.
356  *
357  * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
358  * Always make sure that the given flags is the return value from the
359  * pairing memalloc_noio_save call.
360  */
361 static inline void memalloc_noio_restore(unsigned int flags)
362 {
363 	memalloc_flags_restore(flags);
364 }
365 
366 /**
367  * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
368  *
369  * This functions marks the beginning of the GFP_NOFS allocation scope.
370  * All further allocations will implicitly drop __GFP_FS flag and so
371  * they are safe for the FS critical section from the allocation recursion
372  * point of view. Use memalloc_nofs_restore to end the scope with flags
373  * returned by this function.
374  *
375  * Context: This function is safe to be used from any context.
376  * Return: The saved flags to be passed to memalloc_nofs_restore.
377  */
378 static inline unsigned int memalloc_nofs_save(void)
379 {
380 	return memalloc_flags_save(PF_MEMALLOC_NOFS);
381 }
382 
383 /**
384  * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
385  * @flags: Flags to restore.
386  *
387  * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
388  * Always make sure that the given flags is the return value from the
389  * pairing memalloc_nofs_save call.
390  */
391 static inline void memalloc_nofs_restore(unsigned int flags)
392 {
393 	memalloc_flags_restore(flags);
394 }
395 
396 /**
397  * memalloc_noreclaim_save - Marks implicit __GFP_MEMALLOC scope.
398  *
399  * This function marks the beginning of the __GFP_MEMALLOC allocation scope.
400  * All further allocations will implicitly add the __GFP_MEMALLOC flag, which
401  * prevents entering reclaim and allows access to all memory reserves. This
402  * should only be used when the caller guarantees the allocation will allow more
403  * memory to be freed very shortly, i.e. it needs to allocate some memory in
404  * the process of freeing memory, and cannot reclaim due to potential recursion.
405  *
406  * Users of this scope have to be extremely careful to not deplete the reserves
407  * completely and implement a throttling mechanism which controls the
408  * consumption of the reserve based on the amount of freed memory. Usage of a
409  * pre-allocated pool (e.g. mempool) should be always considered before using
410  * this scope.
411  *
412  * Individual allocations under the scope can opt out using __GFP_NOMEMALLOC
413  *
414  * Context: This function should not be used in an interrupt context as that one
415  *          does not give PF_MEMALLOC access to reserves.
416  *          See __gfp_pfmemalloc_flags().
417  * Return: The saved flags to be passed to memalloc_noreclaim_restore.
418  */
419 static inline unsigned int memalloc_noreclaim_save(void)
420 {
421 	return memalloc_flags_save(PF_MEMALLOC);
422 }
423 
424 /**
425  * memalloc_noreclaim_restore - Ends the implicit __GFP_MEMALLOC scope.
426  * @flags: Flags to restore.
427  *
428  * Ends the implicit __GFP_MEMALLOC scope started by memalloc_noreclaim_save
429  * function. Always make sure that the given flags is the return value from the
430  * pairing memalloc_noreclaim_save call.
431  */
432 static inline void memalloc_noreclaim_restore(unsigned int flags)
433 {
434 	memalloc_flags_restore(flags);
435 }
436 
437 /**
438  * memalloc_pin_save - Marks implicit ~__GFP_MOVABLE scope.
439  *
440  * This function marks the beginning of the ~__GFP_MOVABLE allocation scope.
441  * All further allocations will implicitly remove the __GFP_MOVABLE flag, which
442  * will constraint the allocations to zones that allow long term pinning, i.e.
443  * not ZONE_MOVABLE zones.
444  *
445  * Return: The saved flags to be passed to memalloc_pin_restore.
446  */
447 static inline unsigned int memalloc_pin_save(void)
448 {
449 	return memalloc_flags_save(PF_MEMALLOC_PIN);
450 }
451 
452 /**
453  * memalloc_pin_restore - Ends the implicit ~__GFP_MOVABLE scope.
454  * @flags: Flags to restore.
455  *
456  * Ends the implicit ~__GFP_MOVABLE scope started by memalloc_pin_save function.
457  * Always make sure that the given flags is the return value from the pairing
458  * memalloc_pin_save call.
459  */
460 static inline void memalloc_pin_restore(unsigned int flags)
461 {
462 	memalloc_flags_restore(flags);
463 }
464 
465 #ifdef CONFIG_MEMCG
466 DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
467 /**
468  * set_active_memcg - Starts the remote memcg charging scope.
469  * @memcg: memcg to charge.
470  *
471  * This function marks the beginning of the remote memcg charging scope. All the
472  * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
473  * given memcg.
474  *
475  * Please, make sure that caller has a reference to the passed memcg structure,
476  * so its lifetime is guaranteed to exceed the scope between two
477  * set_active_memcg() calls.
478  *
479  * NOTE: This function can nest. Users must save the return value and
480  * reset the previous value after their own charging scope is over.
481  */
482 static inline struct mem_cgroup *
483 set_active_memcg(struct mem_cgroup *memcg)
484 {
485 	struct mem_cgroup *old;
486 
487 	if (!in_task()) {
488 		old = this_cpu_read(int_active_memcg);
489 		this_cpu_write(int_active_memcg, memcg);
490 	} else {
491 		old = current->active_memcg;
492 		current->active_memcg = memcg;
493 	}
494 
495 	return old;
496 }
497 #else
498 static inline struct mem_cgroup *
499 set_active_memcg(struct mem_cgroup *memcg)
500 {
501 	return NULL;
502 }
503 #endif
504 
505 #ifdef CONFIG_MEMBARRIER
506 enum {
507 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY		= (1U << 0),
508 	MEMBARRIER_STATE_PRIVATE_EXPEDITED			= (1U << 1),
509 	MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY			= (1U << 2),
510 	MEMBARRIER_STATE_GLOBAL_EXPEDITED			= (1U << 3),
511 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY	= (1U << 4),
512 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE		= (1U << 5),
513 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY		= (1U << 6),
514 	MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ			= (1U << 7),
515 };
516 
517 enum {
518 	MEMBARRIER_FLAG_SYNC_CORE	= (1U << 0),
519 	MEMBARRIER_FLAG_RSEQ		= (1U << 1),
520 };
521 
522 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
523 #include <asm/membarrier.h>
524 #endif
525 
526 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
527 {
528 	if (current->mm != mm)
529 		return;
530 	if (likely(!(atomic_read(&mm->membarrier_state) &
531 		     MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
532 		return;
533 	sync_core_before_usermode();
534 }
535 
536 extern void membarrier_exec_mmap(struct mm_struct *mm);
537 
538 extern void membarrier_update_current_mm(struct mm_struct *next_mm);
539 
540 #else
541 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
542 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
543 					     struct mm_struct *next,
544 					     struct task_struct *tsk)
545 {
546 }
547 #endif
548 static inline void membarrier_exec_mmap(struct mm_struct *mm)
549 {
550 }
551 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
552 {
553 }
554 static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
555 {
556 }
557 #endif
558 
559 #endif /* _LINUX_SCHED_MM_H */
560