1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
4
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
9 #include <linux/gfp.h>
10 #include <linux/sync_core.h>
11 #include <linux/sched/coredump.h>
12
13 /*
14 * Routines for handling mm_structs
15 */
16 extern struct mm_struct *mm_alloc(void);
17
18 /**
19 * mmgrab() - Pin a &struct mm_struct.
20 * @mm: The &struct mm_struct to pin.
21 *
22 * Make sure that @mm will not get freed even after the owning task
23 * exits. This doesn't guarantee that the associated address space
24 * will still exist later on and mmget_not_zero() has to be used before
25 * accessing it.
26 *
27 * This is a preferred way to pin @mm for a longer/unbounded amount
28 * of time.
29 *
30 * Use mmdrop() to release the reference acquired by mmgrab().
31 *
32 * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
33 * of &mm_struct.mm_count vs &mm_struct.mm_users.
34 */
mmgrab(struct mm_struct * mm)35 static inline void mmgrab(struct mm_struct *mm)
36 {
37 atomic_inc(&mm->mm_count);
38 }
39
smp_mb__after_mmgrab(void)40 static inline void smp_mb__after_mmgrab(void)
41 {
42 smp_mb__after_atomic();
43 }
44
45 extern void __mmdrop(struct mm_struct *mm);
46
mmdrop(struct mm_struct * mm)47 static inline void mmdrop(struct mm_struct *mm)
48 {
49 /*
50 * The implicit full barrier implied by atomic_dec_and_test() is
51 * required by the membarrier system call before returning to
52 * user-space, after storing to rq->curr.
53 */
54 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
55 __mmdrop(mm);
56 }
57
58 #ifdef CONFIG_PREEMPT_RT
59 /*
60 * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
61 * by far the least expensive way to do that.
62 */
__mmdrop_delayed(struct rcu_head * rhp)63 static inline void __mmdrop_delayed(struct rcu_head *rhp)
64 {
65 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
66
67 __mmdrop(mm);
68 }
69
70 /*
71 * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
72 * kernels via RCU.
73 */
mmdrop_sched(struct mm_struct * mm)74 static inline void mmdrop_sched(struct mm_struct *mm)
75 {
76 /* Provides a full memory barrier. See mmdrop() */
77 if (atomic_dec_and_test(&mm->mm_count))
78 call_rcu(&mm->delayed_drop, __mmdrop_delayed);
79 }
80 #else
mmdrop_sched(struct mm_struct * mm)81 static inline void mmdrop_sched(struct mm_struct *mm)
82 {
83 mmdrop(mm);
84 }
85 #endif
86
87 /* Helpers for lazy TLB mm refcounting */
mmgrab_lazy_tlb(struct mm_struct * mm)88 static inline void mmgrab_lazy_tlb(struct mm_struct *mm)
89 {
90 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
91 mmgrab(mm);
92 }
93
mmdrop_lazy_tlb(struct mm_struct * mm)94 static inline void mmdrop_lazy_tlb(struct mm_struct *mm)
95 {
96 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) {
97 mmdrop(mm);
98 } else {
99 /*
100 * mmdrop_lazy_tlb must provide a full memory barrier, see the
101 * membarrier comment finish_task_switch which relies on this.
102 */
103 smp_mb();
104 }
105 }
106
mmdrop_lazy_tlb_sched(struct mm_struct * mm)107 static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm)
108 {
109 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
110 mmdrop_sched(mm);
111 else
112 smp_mb(); /* see mmdrop_lazy_tlb() above */
113 }
114
115 /**
116 * mmget() - Pin the address space associated with a &struct mm_struct.
117 * @mm: The address space to pin.
118 *
119 * Make sure that the address space of the given &struct mm_struct doesn't
120 * go away. This does not protect against parts of the address space being
121 * modified or freed, however.
122 *
123 * Never use this function to pin this address space for an
124 * unbounded/indefinite amount of time.
125 *
126 * Use mmput() to release the reference acquired by mmget().
127 *
128 * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
129 * of &mm_struct.mm_count vs &mm_struct.mm_users.
130 */
mmget(struct mm_struct * mm)131 static inline void mmget(struct mm_struct *mm)
132 {
133 atomic_inc(&mm->mm_users);
134 }
135
mmget_not_zero(struct mm_struct * mm)136 static inline bool mmget_not_zero(struct mm_struct *mm)
137 {
138 return atomic_inc_not_zero(&mm->mm_users);
139 }
140
141 /* mmput gets rid of the mappings and all user-space */
142 extern void mmput(struct mm_struct *);
143 #ifdef CONFIG_MMU
144 /* same as above but performs the slow path from the async context. Can
145 * be called from the atomic context as well
146 */
147 void mmput_async(struct mm_struct *);
148 #endif
149
150 /* Grab a reference to a task's mm, if it is not already going away */
151 extern struct mm_struct *get_task_mm(struct task_struct *task);
152 /*
153 * Grab a reference to a task's mm, if it is not already going away
154 * and ptrace_may_access with the mode parameter passed to it
155 * succeeds.
156 */
157 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
158 /* Remove the current tasks stale references to the old mm_struct on exit() */
159 extern void exit_mm_release(struct task_struct *, struct mm_struct *);
160 /* Remove the current tasks stale references to the old mm_struct on exec() */
161 extern void exec_mm_release(struct task_struct *, struct mm_struct *);
162
163 #ifdef CONFIG_MEMCG
164 extern void mm_update_next_owner(struct mm_struct *mm);
165 #else
mm_update_next_owner(struct mm_struct * mm)166 static inline void mm_update_next_owner(struct mm_struct *mm)
167 {
168 }
169 #endif /* CONFIG_MEMCG */
170
171 #ifdef CONFIG_MMU
172 #ifndef arch_get_mmap_end
173 #define arch_get_mmap_end(addr, len, flags) (TASK_SIZE)
174 #endif
175
176 #ifndef arch_get_mmap_base
177 #define arch_get_mmap_base(addr, base) (base)
178 #endif
179
180 extern void arch_pick_mmap_layout(struct mm_struct *mm,
181 struct rlimit *rlim_stack);
182 extern unsigned long
183 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
184 unsigned long, unsigned long);
185 extern unsigned long
186 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
187 unsigned long len, unsigned long pgoff,
188 unsigned long flags);
189
190 unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp,
191 unsigned long addr, unsigned long len,
192 unsigned long pgoff, unsigned long flags);
193
194 unsigned long
195 arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
196 unsigned long len, unsigned long pgoff,
197 unsigned long flags, vm_flags_t vm_flags);
198 unsigned long
199 arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr,
200 unsigned long len, unsigned long pgoff,
201 unsigned long flags, vm_flags_t);
202
203 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
204 struct file *filp,
205 unsigned long addr,
206 unsigned long len,
207 unsigned long pgoff,
208 unsigned long flags,
209 vm_flags_t vm_flags);
210
211 unsigned long
212 generic_get_unmapped_area(struct file *filp, unsigned long addr,
213 unsigned long len, unsigned long pgoff,
214 unsigned long flags);
215 unsigned long
216 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
217 unsigned long len, unsigned long pgoff,
218 unsigned long flags);
219 #else
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)220 static inline void arch_pick_mmap_layout(struct mm_struct *mm,
221 struct rlimit *rlim_stack) {}
222 #endif
223
in_vfork(struct task_struct * tsk)224 static inline bool in_vfork(struct task_struct *tsk)
225 {
226 bool ret;
227
228 /*
229 * need RCU to access ->real_parent if CLONE_VM was used along with
230 * CLONE_PARENT.
231 *
232 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
233 * imply CLONE_VM
234 *
235 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
236 * ->real_parent is not necessarily the task doing vfork(), so in
237 * theory we can't rely on task_lock() if we want to dereference it.
238 *
239 * And in this case we can't trust the real_parent->mm == tsk->mm
240 * check, it can be false negative. But we do not care, if init or
241 * another oom-unkillable task does this it should blame itself.
242 */
243 rcu_read_lock();
244 ret = tsk->vfork_done &&
245 rcu_dereference(tsk->real_parent)->mm == tsk->mm;
246 rcu_read_unlock();
247
248 return ret;
249 }
250
251 /*
252 * Applies per-task gfp context to the given allocation flags.
253 * PF_MEMALLOC_NOIO implies GFP_NOIO
254 * PF_MEMALLOC_NOFS implies GFP_NOFS
255 * PF_MEMALLOC_PIN implies !GFP_MOVABLE
256 */
current_gfp_context(gfp_t flags)257 static inline gfp_t current_gfp_context(gfp_t flags)
258 {
259 unsigned int pflags = READ_ONCE(current->flags);
260
261 if (unlikely(pflags & (PF_MEMALLOC_NOIO |
262 PF_MEMALLOC_NOFS |
263 PF_MEMALLOC_NORECLAIM |
264 PF_MEMALLOC_NOWARN |
265 PF_MEMALLOC_PIN))) {
266 /*
267 * Stronger flags before weaker flags:
268 * NORECLAIM implies NOIO, which in turn implies NOFS
269 */
270 if (pflags & PF_MEMALLOC_NORECLAIM)
271 flags &= ~__GFP_DIRECT_RECLAIM;
272 else if (pflags & PF_MEMALLOC_NOIO)
273 flags &= ~(__GFP_IO | __GFP_FS);
274 else if (pflags & PF_MEMALLOC_NOFS)
275 flags &= ~__GFP_FS;
276
277 if (pflags & PF_MEMALLOC_NOWARN)
278 flags |= __GFP_NOWARN;
279
280 if (pflags & PF_MEMALLOC_PIN)
281 flags &= ~__GFP_MOVABLE;
282 }
283 return flags;
284 }
285
286 #ifdef CONFIG_LOCKDEP
287 extern void __fs_reclaim_acquire(unsigned long ip);
288 extern void __fs_reclaim_release(unsigned long ip);
289 extern void fs_reclaim_acquire(gfp_t gfp_mask);
290 extern void fs_reclaim_release(gfp_t gfp_mask);
291 #else
__fs_reclaim_acquire(unsigned long ip)292 static inline void __fs_reclaim_acquire(unsigned long ip) { }
__fs_reclaim_release(unsigned long ip)293 static inline void __fs_reclaim_release(unsigned long ip) { }
fs_reclaim_acquire(gfp_t gfp_mask)294 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
fs_reclaim_release(gfp_t gfp_mask)295 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
296 #endif
297
298 /* Any memory-allocation retry loop should use
299 * memalloc_retry_wait(), and pass the flags for the most
300 * constrained allocation attempt that might have failed.
301 * This provides useful documentation of where loops are,
302 * and a central place to fine tune the waiting as the MM
303 * implementation changes.
304 */
memalloc_retry_wait(gfp_t gfp_flags)305 static inline void memalloc_retry_wait(gfp_t gfp_flags)
306 {
307 /* We use io_schedule_timeout because waiting for memory
308 * typically included waiting for dirty pages to be
309 * written out, which requires IO.
310 */
311 __set_current_state(TASK_UNINTERRUPTIBLE);
312 gfp_flags = current_gfp_context(gfp_flags);
313 if (gfpflags_allow_blocking(gfp_flags) &&
314 !(gfp_flags & __GFP_NORETRY))
315 /* Probably waited already, no need for much more */
316 io_schedule_timeout(1);
317 else
318 /* Probably didn't wait, and has now released a lock,
319 * so now is a good time to wait
320 */
321 io_schedule_timeout(HZ/50);
322 }
323
324 /**
325 * might_alloc - Mark possible allocation sites
326 * @gfp_mask: gfp_t flags that would be used to allocate
327 *
328 * Similar to might_sleep() and other annotations, this can be used in functions
329 * that might allocate, but often don't. Compiles to nothing without
330 * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
331 */
might_alloc(gfp_t gfp_mask)332 static inline void might_alloc(gfp_t gfp_mask)
333 {
334 fs_reclaim_acquire(gfp_mask);
335 fs_reclaim_release(gfp_mask);
336
337 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
338 }
339
340 /**
341 * memalloc_flags_save - Add a PF_* flag to current->flags, save old value
342 *
343 * This allows PF_* flags to be conveniently added, irrespective of current
344 * value, and then the old version restored with memalloc_flags_restore().
345 */
memalloc_flags_save(unsigned flags)346 static inline unsigned memalloc_flags_save(unsigned flags)
347 {
348 unsigned oldflags = ~current->flags & flags;
349 current->flags |= flags;
350 return oldflags;
351 }
352
memalloc_flags_restore(unsigned flags)353 static inline void memalloc_flags_restore(unsigned flags)
354 {
355 current->flags &= ~flags;
356 }
357
358 /**
359 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
360 *
361 * This functions marks the beginning of the GFP_NOIO allocation scope.
362 * All further allocations will implicitly drop __GFP_IO flag and so
363 * they are safe for the IO critical section from the allocation recursion
364 * point of view. Use memalloc_noio_restore to end the scope with flags
365 * returned by this function.
366 *
367 * Context: This function is safe to be used from any context.
368 * Return: The saved flags to be passed to memalloc_noio_restore.
369 */
memalloc_noio_save(void)370 static inline unsigned int memalloc_noio_save(void)
371 {
372 return memalloc_flags_save(PF_MEMALLOC_NOIO);
373 }
374
375 /**
376 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
377 * @flags: Flags to restore.
378 *
379 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
380 * Always make sure that the given flags is the return value from the
381 * pairing memalloc_noio_save call.
382 */
memalloc_noio_restore(unsigned int flags)383 static inline void memalloc_noio_restore(unsigned int flags)
384 {
385 memalloc_flags_restore(flags);
386 }
387
388 /**
389 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
390 *
391 * This functions marks the beginning of the GFP_NOFS allocation scope.
392 * All further allocations will implicitly drop __GFP_FS flag and so
393 * they are safe for the FS critical section from the allocation recursion
394 * point of view. Use memalloc_nofs_restore to end the scope with flags
395 * returned by this function.
396 *
397 * Context: This function is safe to be used from any context.
398 * Return: The saved flags to be passed to memalloc_nofs_restore.
399 */
memalloc_nofs_save(void)400 static inline unsigned int memalloc_nofs_save(void)
401 {
402 return memalloc_flags_save(PF_MEMALLOC_NOFS);
403 }
404
405 /**
406 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
407 * @flags: Flags to restore.
408 *
409 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
410 * Always make sure that the given flags is the return value from the
411 * pairing memalloc_nofs_save call.
412 */
memalloc_nofs_restore(unsigned int flags)413 static inline void memalloc_nofs_restore(unsigned int flags)
414 {
415 memalloc_flags_restore(flags);
416 }
417
418 /**
419 * memalloc_noreclaim_save - Marks implicit __GFP_MEMALLOC scope.
420 *
421 * This function marks the beginning of the __GFP_MEMALLOC allocation scope.
422 * All further allocations will implicitly add the __GFP_MEMALLOC flag, which
423 * prevents entering reclaim and allows access to all memory reserves. This
424 * should only be used when the caller guarantees the allocation will allow more
425 * memory to be freed very shortly, i.e. it needs to allocate some memory in
426 * the process of freeing memory, and cannot reclaim due to potential recursion.
427 *
428 * Users of this scope have to be extremely careful to not deplete the reserves
429 * completely and implement a throttling mechanism which controls the
430 * consumption of the reserve based on the amount of freed memory. Usage of a
431 * pre-allocated pool (e.g. mempool) should be always considered before using
432 * this scope.
433 *
434 * Individual allocations under the scope can opt out using __GFP_NOMEMALLOC
435 *
436 * Context: This function should not be used in an interrupt context as that one
437 * does not give PF_MEMALLOC access to reserves.
438 * See __gfp_pfmemalloc_flags().
439 * Return: The saved flags to be passed to memalloc_noreclaim_restore.
440 */
memalloc_noreclaim_save(void)441 static inline unsigned int memalloc_noreclaim_save(void)
442 {
443 return memalloc_flags_save(PF_MEMALLOC);
444 }
445
446 /**
447 * memalloc_noreclaim_restore - Ends the implicit __GFP_MEMALLOC scope.
448 * @flags: Flags to restore.
449 *
450 * Ends the implicit __GFP_MEMALLOC scope started by memalloc_noreclaim_save
451 * function. Always make sure that the given flags is the return value from the
452 * pairing memalloc_noreclaim_save call.
453 */
memalloc_noreclaim_restore(unsigned int flags)454 static inline void memalloc_noreclaim_restore(unsigned int flags)
455 {
456 memalloc_flags_restore(flags);
457 }
458
459 /**
460 * memalloc_pin_save - Marks implicit ~__GFP_MOVABLE scope.
461 *
462 * This function marks the beginning of the ~__GFP_MOVABLE allocation scope.
463 * All further allocations will implicitly remove the __GFP_MOVABLE flag, which
464 * will constraint the allocations to zones that allow long term pinning, i.e.
465 * not ZONE_MOVABLE zones.
466 *
467 * Return: The saved flags to be passed to memalloc_pin_restore.
468 */
memalloc_pin_save(void)469 static inline unsigned int memalloc_pin_save(void)
470 {
471 return memalloc_flags_save(PF_MEMALLOC_PIN);
472 }
473
474 /**
475 * memalloc_pin_restore - Ends the implicit ~__GFP_MOVABLE scope.
476 * @flags: Flags to restore.
477 *
478 * Ends the implicit ~__GFP_MOVABLE scope started by memalloc_pin_save function.
479 * Always make sure that the given flags is the return value from the pairing
480 * memalloc_pin_save call.
481 */
memalloc_pin_restore(unsigned int flags)482 static inline void memalloc_pin_restore(unsigned int flags)
483 {
484 memalloc_flags_restore(flags);
485 }
486
487 #ifdef CONFIG_MEMCG
488 DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
489 /**
490 * set_active_memcg - Starts the remote memcg charging scope.
491 * @memcg: memcg to charge.
492 *
493 * This function marks the beginning of the remote memcg charging scope. All the
494 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
495 * given memcg.
496 *
497 * Please, make sure that caller has a reference to the passed memcg structure,
498 * so its lifetime is guaranteed to exceed the scope between two
499 * set_active_memcg() calls.
500 *
501 * NOTE: This function can nest. Users must save the return value and
502 * reset the previous value after their own charging scope is over.
503 */
504 static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)505 set_active_memcg(struct mem_cgroup *memcg)
506 {
507 struct mem_cgroup *old;
508
509 if (!in_task()) {
510 old = this_cpu_read(int_active_memcg);
511 this_cpu_write(int_active_memcg, memcg);
512 } else {
513 old = current->active_memcg;
514 current->active_memcg = memcg;
515 }
516
517 return old;
518 }
519 #else
520 static inline struct mem_cgroup *
set_active_memcg(struct mem_cgroup * memcg)521 set_active_memcg(struct mem_cgroup *memcg)
522 {
523 return NULL;
524 }
525 #endif
526
527 #ifdef CONFIG_MEMBARRIER
528 enum {
529 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
530 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
531 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
532 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
533 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
534 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
535 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
536 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
537 };
538
539 enum {
540 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
541 MEMBARRIER_FLAG_RSEQ = (1U << 1),
542 };
543
544 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
545 #include <asm/membarrier.h>
546 #endif
547
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)548 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
549 {
550 if (current->mm != mm)
551 return;
552 if (likely(!(atomic_read(&mm->membarrier_state) &
553 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
554 return;
555 sync_core_before_usermode();
556 }
557
558 extern void membarrier_exec_mmap(struct mm_struct *mm);
559
560 extern void membarrier_update_current_mm(struct mm_struct *next_mm);
561
562 #else
563 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
membarrier_arch_switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)564 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
565 struct mm_struct *next,
566 struct task_struct *tsk)
567 {
568 }
569 #endif
membarrier_exec_mmap(struct mm_struct * mm)570 static inline void membarrier_exec_mmap(struct mm_struct *mm)
571 {
572 }
membarrier_mm_sync_core_before_usermode(struct mm_struct * mm)573 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
574 {
575 }
membarrier_update_current_mm(struct mm_struct * next_mm)576 static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
577 {
578 }
579 #endif
580
581 #endif /* _LINUX_SCHED_MM_H */
582