xref: /freebsd/sys/vm/vm_pagequeue.h (revision 29363fb4)
1 /*-
2  * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3  *
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  */
60 
61 #ifndef	_VM_PAGEQUEUE_
62 #define	_VM_PAGEQUEUE_
63 
64 #ifdef _KERNEL
65 struct vm_pagequeue {
66 	struct mtx	pq_mutex;
67 	struct pglist	pq_pl;
68 	int		pq_cnt;
69 	const char	* const pq_name;
70 	uint64_t	pq_pdpages;
71 } __aligned(CACHE_LINE_SIZE);
72 
73 #if __SIZEOF_LONG__ == 8
74 #define	VM_BATCHQUEUE_SIZE	63
75 #else
76 #define	VM_BATCHQUEUE_SIZE	15
77 #endif
78 
79 struct vm_batchqueue {
80 	vm_page_t	bq_pa[VM_BATCHQUEUE_SIZE];
81 	int		bq_cnt;
82 } __aligned(CACHE_LINE_SIZE);
83 
84 #include <vm/uma.h>
85 #include <sys/_blockcount.h>
86 #include <sys/pidctrl.h>
87 struct sysctl_oid;
88 
89 /*
90  * One vm_domain per NUMA domain.  Contains pagequeues, free page structures,
91  * and accounting.
92  *
93  * Lock Key:
94  * f	vmd_free_mtx
95  * p	vmd_pageout_mtx
96  * d	vm_domainset_lock
97  * a	atomic
98  * c	const after boot
99  * q	page queue lock
100  *
101  * A unique page daemon thread manages each vm_domain structure and is
102  * responsible for ensuring that some free memory is available by freeing
103  * inactive pages and aging active pages.  To decide how many pages to process,
104  * it uses thresholds derived from the number of pages in the domain:
105  *
106  *  vmd_page_count
107  *       ---
108  *        |
109  *        |-> vmd_inactive_target (~3%)
110  *        |   - The active queue scan target is given by
111  *        |     (vmd_inactive_target + vmd_free_target - vmd_free_count).
112  *        |
113  *        |
114  *        |-> vmd_free_target (~2%)
115  *        |   - Target for page reclamation.
116  *        |
117  *        |-> vmd_pageout_wakeup_thresh (~1.8%)
118  *        |   - Threshold for waking up the page daemon.
119  *        |
120  *        |
121  *        |-> vmd_free_min (~0.5%)
122  *        |   - First low memory threshold.
123  *        |   - Causes per-CPU caching to be lazily disabled in UMA.
124  *        |   - vm_wait() sleeps below this threshold.
125  *        |
126  *        |-> vmd_free_severe (~0.25%)
127  *        |   - Second low memory threshold.
128  *        |   - Triggers aggressive UMA reclamation, disables delayed buffer
129  *        |     writes.
130  *        |
131  *        |-> vmd_free_reserved (~0.13%)
132  *        |   - Minimum for VM_ALLOC_NORMAL page allocations.
133  *        |-> vmd_pageout_free_min (32 + 2 pages)
134  *        |   - Minimum for waking a page daemon thread sleeping in vm_wait().
135  *        |-> vmd_interrupt_free_min (2 pages)
136  *        |   - Minimum for VM_ALLOC_SYSTEM page allocations.
137  *       ---
138  *
139  *--
140  * Free page count regulation:
141  *
142  * The page daemon attempts to ensure that the free page count is above the free
143  * target.  It wakes up periodically (every 100ms) to input the current free
144  * page shortage (free_target - free_count) to a PID controller, which in
145  * response outputs the number of pages to attempt to reclaim.  The shortage's
146  * current magnitude, rate of change, and cumulative value are together used to
147  * determine the controller's output.  The page daemon target thus adapts
148  * dynamically to the system's demand for free pages, resulting in less
149  * burstiness than a simple hysteresis loop.
150  *
151  * When the free page count drops below the wakeup threshold,
152  * vm_domain_allocate() proactively wakes up the page daemon.  This helps ensure
153  * that the system responds promptly to a large instantaneous free page
154  * shortage.
155  *
156  * The page daemon also attempts to ensure that some fraction of the system's
157  * memory is present in the inactive (I) and laundry (L) page queues, so that it
158  * can respond promptly to a sudden free page shortage.  In particular, the page
159  * daemon thread aggressively scans active pages so long as the following
160  * condition holds:
161  *
162  *         len(I) + len(L) + free_target - free_count < inactive_target
163  *
164  * Otherwise, when the inactive target is met, the page daemon periodically
165  * scans a small portion of the active queue in order to maintain up-to-date
166  * per-page access history.  Unreferenced pages in the active queue thus
167  * eventually migrate to the inactive queue.
168  *
169  * The per-domain laundry thread periodically launders dirty pages based on the
170  * number of clean pages freed by the page daemon since the last laundering.  If
171  * the page daemon fails to meet its scan target (i.e., the PID controller
172  * output) because of a shortage of clean inactive pages, the laundry thread
173  * attempts to launder enough pages to meet the free page target.
174  *
175  *--
176  * Page allocation priorities:
177  *
178  * The system defines three page allocation priorities: VM_ALLOC_NORMAL,
179  * VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT.  An interrupt-priority allocation can
180  * claim any free page.  This priority is used in the pmap layer when attempting
181  * to allocate a page for the kernel page tables; in such cases an allocation
182  * failure will usually result in a kernel panic.  The system priority is used
183  * for most other kernel memory allocations, for instance by UMA's slab
184  * allocator or the buffer cache.  Such allocations will fail if the free count
185  * is below interrupt_free_min.  All other allocations occur at the normal
186  * priority, which is typically used for allocation of user pages, for instance
187  * in the page fault handler or when allocating page table pages or pv_entry
188  * structures for user pmaps.  Such allocations fail if the free count is below
189  * the free_reserved threshold.
190  *
191  *--
192  * Free memory shortages:
193  *
194  * The system uses the free_min and free_severe thresholds to apply
195  * back-pressure and give the page daemon a chance to recover.  When a page
196  * allocation fails due to a shortage and the allocating thread cannot handle
197  * failure, it may call vm_wait() to sleep until free pages are available.
198  * vm_domain_freecnt_inc() wakes sleeping threads once the free page count rises
199  * above the free_min threshold; the page daemon and laundry threads are given
200  * priority and will wake up once free_count reaches the (much smaller)
201  * pageout_free_min threshold.
202  *
203  * On NUMA systems, the domainset iterators always prefer NUMA domains where the
204  * free page count is above the free_min threshold.  This means that given the
205  * choice between two NUMA domains, one above the free_min threshold and one
206  * below, the former will be used to satisfy the allocation request regardless
207  * of the domain selection policy.
208  *
209  * In addition to reclaiming memory from the page queues, the vm_lowmem event
210  * fires every ten seconds so long as the system is under memory pressure (i.e.,
211  * vmd_free_count < vmd_free_target).  This allows kernel subsystems to register
212  * for notifications of free page shortages, upon which they may shrink their
213  * caches.  Following a vm_lowmem event, UMA's caches are pruned to ensure that
214  * they do not contain an excess of unused memory.  When a domain is below the
215  * free_min threshold, UMA limits the population of per-CPU caches.  When a
216  * domain falls below the free_severe threshold, UMA's caches are completely
217  * drained.
218  *
219  * If the system encounters a global memory shortage, it may resort to the
220  * out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a
221  * last-ditch attempt to free up some pages.  Either of the two following
222  * conditions will activate the OOM killer:
223  *
224  *  1. The page daemons collectively fail to reclaim any pages during their
225  *     inactive queue scans.  After vm_pageout_oom_seq consecutive scans fail,
226  *     the page daemon thread votes for an OOM kill, and an OOM kill is
227  *     triggered when all page daemons have voted.  This heuristic is strict and
228  *     may fail to trigger even when the system is effectively deadlocked.
229  *
230  *  2. Threads in the user fault handler are repeatedly unable to make progress
231  *     while allocating a page to satisfy the fault.  After
232  *     vm_pfault_oom_attempts page allocation failures with intervening
233  *     vm_wait() calls, the faulting thread will trigger an OOM kill.
234  */
235 struct vm_domain {
236 	struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
237 	struct mtx_padalign vmd_free_mtx;
238 	struct mtx_padalign vmd_pageout_mtx;
239 	struct vm_pgcache {
240 		int domain;
241 		int pool;
242 		uma_zone_t zone;
243 	} vmd_pgcache[VM_NFREEPOOL];
244 	struct vmem *vmd_kernel_arena;	/* (c) per-domain kva R/W arena. */
245 	struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
246 	u_int vmd_domain;		/* (c) Domain number. */
247 	u_int vmd_page_count;		/* (c) Total page count. */
248 	long vmd_segs;			/* (c) bitmask of the segments */
249 	u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */
250 	u_int vmd_pageout_deficit;	/* (a) Estimated number of pages deficit */
251 	uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
252 
253 	/* Paging control variables, used within single threaded page daemon. */
254 	struct pidctrl vmd_pid;		/* Pageout controller. */
255 	boolean_t vmd_oom;
256 	u_int vmd_inactive_threads;
257 	u_int vmd_inactive_shortage;		/* Per-thread shortage. */
258 	blockcount_t vmd_inactive_running;	/* Number of inactive threads. */
259 	blockcount_t vmd_inactive_starting;	/* Number of threads started. */
260 	volatile u_int vmd_addl_shortage;	/* Shortage accumulator. */
261 	volatile u_int vmd_inactive_freed;	/* Successful inactive frees. */
262 	volatile u_int vmd_inactive_us;		/* Microseconds for above. */
263 	u_int vmd_inactive_pps;		/* Exponential decay frees/second. */
264 	int vmd_oom_seq;
265 	int vmd_last_active_scan;
266 	struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */
267 	struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
268 	struct vm_page vmd_clock[2]; /* markers for active queue scan */
269 
270 	int vmd_pageout_wanted;		/* (a, p) pageout daemon wait channel */
271 	int vmd_pageout_pages_needed;	/* (d) page daemon waiting for pages? */
272 	bool vmd_minset;		/* (d) Are we in vm_min_domains? */
273 	bool vmd_severeset;		/* (d) Are we in vm_severe_domains? */
274 	enum {
275 		VM_LAUNDRY_IDLE = 0,
276 		VM_LAUNDRY_BACKGROUND,
277 		VM_LAUNDRY_SHORTFALL
278 	} vmd_laundry_request;
279 
280 	/* Paging thresholds and targets. */
281 	u_int vmd_clean_pages_freed;	/* (q) accumulator for laundry thread */
282 	u_int vmd_background_launder_target; /* (c) */
283 	u_int vmd_free_reserved;	/* (c) pages reserved for deadlock */
284 	u_int vmd_free_target;		/* (c) pages desired free */
285 	u_int vmd_free_min;		/* (c) pages desired free */
286 	u_int vmd_inactive_target;	/* (c) pages desired inactive */
287 	u_int vmd_pageout_free_min;	/* (c) min pages reserved for kernel */
288 	u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */
289 	u_int vmd_interrupt_free_min;	/* (c) reserved pages for int code */
290 	u_int vmd_free_severe;		/* (c) severe page depletion point */
291 
292 	/* Name for sysctl etc. */
293 	struct sysctl_oid *vmd_oid;
294 	char vmd_name[sizeof(__XSTRING(MAXMEMDOM))];
295 } __aligned(CACHE_LINE_SIZE);
296 
297 extern struct vm_domain vm_dom[MAXMEMDOM];
298 
299 #define	VM_DOMAIN(n)		(&vm_dom[(n)])
300 #define	VM_DOMAIN_EMPTY(n)	(vm_dom[(n)].vmd_page_count == 0)
301 
302 #define	vm_pagequeue_assert_locked(pq)	mtx_assert(&(pq)->pq_mutex, MA_OWNED)
303 #define	vm_pagequeue_lock(pq)		mtx_lock(&(pq)->pq_mutex)
304 #define	vm_pagequeue_lockptr(pq)	(&(pq)->pq_mutex)
305 #define	vm_pagequeue_trylock(pq)	mtx_trylock(&(pq)->pq_mutex)
306 #define	vm_pagequeue_unlock(pq)		mtx_unlock(&(pq)->pq_mutex)
307 
308 #define	vm_domain_free_assert_locked(n)					\
309 	    mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
310 #define	vm_domain_free_assert_unlocked(n)				\
311 	    mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
312 #define	vm_domain_free_lock(d)						\
313 	    mtx_lock(vm_domain_free_lockptr((d)))
314 #define	vm_domain_free_lockptr(d)					\
315 	    (&(d)->vmd_free_mtx)
316 #define	vm_domain_free_trylock(d)					\
317 	    mtx_trylock(vm_domain_free_lockptr((d)))
318 #define	vm_domain_free_unlock(d)					\
319 	    mtx_unlock(vm_domain_free_lockptr((d)))
320 
321 #define	vm_domain_pageout_lockptr(d)					\
322 	    (&(d)->vmd_pageout_mtx)
323 #define	vm_domain_pageout_assert_locked(n)				\
324 	    mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED)
325 #define	vm_domain_pageout_assert_unlocked(n)				\
326 	    mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED)
327 #define	vm_domain_pageout_lock(d)					\
328 	    mtx_lock(vm_domain_pageout_lockptr((d)))
329 #define	vm_domain_pageout_unlock(d)					\
330 	    mtx_unlock(vm_domain_pageout_lockptr((d)))
331 
332 static __inline void
vm_pagequeue_cnt_add(struct vm_pagequeue * pq,int addend)333 vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
334 {
335 
336 	vm_pagequeue_assert_locked(pq);
337 	pq->pq_cnt += addend;
338 }
339 #define	vm_pagequeue_cnt_inc(pq)	vm_pagequeue_cnt_add((pq), 1)
340 #define	vm_pagequeue_cnt_dec(pq)	vm_pagequeue_cnt_add((pq), -1)
341 
342 static inline void
vm_pagequeue_remove(struct vm_pagequeue * pq,vm_page_t m)343 vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
344 {
345 
346 	TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
347 	vm_pagequeue_cnt_dec(pq);
348 }
349 
350 static inline void
vm_batchqueue_init(struct vm_batchqueue * bq)351 vm_batchqueue_init(struct vm_batchqueue *bq)
352 {
353 
354 	bq->bq_cnt = 0;
355 }
356 
357 static inline int
vm_batchqueue_insert(struct vm_batchqueue * bq,vm_page_t m)358 vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
359 {
360 	int slots_free;
361 
362 	slots_free = nitems(bq->bq_pa) - bq->bq_cnt;
363 	if (slots_free > 0) {
364 		bq->bq_pa[bq->bq_cnt++] = m;
365 		return (slots_free);
366 	}
367 	return (slots_free);
368 }
369 
370 static inline vm_page_t
vm_batchqueue_pop(struct vm_batchqueue * bq)371 vm_batchqueue_pop(struct vm_batchqueue *bq)
372 {
373 
374 	if (bq->bq_cnt == 0)
375 		return (NULL);
376 	return (bq->bq_pa[--bq->bq_cnt]);
377 }
378 
379 void vm_domain_set(struct vm_domain *vmd);
380 void vm_domain_clear(struct vm_domain *vmd);
381 int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
382 
383 /*
384  *      vm_pagequeue_domain:
385  *
386  *      Return the memory domain the page belongs to.
387  */
388 static inline struct vm_domain *
vm_pagequeue_domain(vm_page_t m)389 vm_pagequeue_domain(vm_page_t m)
390 {
391 
392 	return (VM_DOMAIN(vm_page_domain(m)));
393 }
394 
395 /*
396  * Return the number of pages we need to free-up or cache
397  * A positive number indicates that we do not have enough free pages.
398  */
399 static inline int
vm_paging_target(struct vm_domain * vmd)400 vm_paging_target(struct vm_domain *vmd)
401 {
402 
403 	return (vmd->vmd_free_target - vmd->vmd_free_count);
404 }
405 
406 /*
407  * Returns TRUE if the pagedaemon needs to be woken up.
408  */
409 static inline int
vm_paging_needed(struct vm_domain * vmd,u_int free_count)410 vm_paging_needed(struct vm_domain *vmd, u_int free_count)
411 {
412 
413 	return (free_count < vmd->vmd_pageout_wakeup_thresh);
414 }
415 
416 /*
417  * Returns TRUE if the domain is below the min paging target.
418  */
419 static inline int
vm_paging_min(struct vm_domain * vmd)420 vm_paging_min(struct vm_domain *vmd)
421 {
422 
423         return (vmd->vmd_free_min > vmd->vmd_free_count);
424 }
425 
426 /*
427  * Returns TRUE if the domain is below the severe paging target.
428  */
429 static inline int
vm_paging_severe(struct vm_domain * vmd)430 vm_paging_severe(struct vm_domain *vmd)
431 {
432 
433         return (vmd->vmd_free_severe > vmd->vmd_free_count);
434 }
435 
436 /*
437  * Return the number of pages we need to launder.
438  * A positive number indicates that we have a shortfall of clean pages.
439  */
440 static inline int
vm_laundry_target(struct vm_domain * vmd)441 vm_laundry_target(struct vm_domain *vmd)
442 {
443 
444 	return (vm_paging_target(vmd));
445 }
446 
447 void pagedaemon_wakeup(int domain);
448 
449 static inline void
vm_domain_freecnt_inc(struct vm_domain * vmd,int adj)450 vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
451 {
452 	u_int old, new;
453 
454 	old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
455 	new = old + adj;
456 	/*
457 	 * Only update bitsets on transitions.  Notice we short-circuit the
458 	 * rest of the checks if we're above min already.
459 	 */
460 	if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min ||
461 	    (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) ||
462 	    (old < vmd->vmd_pageout_free_min &&
463 	    new >= vmd->vmd_pageout_free_min)))
464 		vm_domain_clear(vmd);
465 }
466 
467 #endif	/* _KERNEL */
468 #endif				/* !_VM_PAGEQUEUE_ */
469