xref: /dragonfly/sys/vm/vm_page.h (revision e05899ce)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Copyright (c) 2003-2017 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * The Mach Operating System project at Carnegie-Mellon University.
8  *
9  * This code is derived from software contributed to The DragonFly Project
10  * by Matthew Dillon <dillon@backplane.com>
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 
65 /*
66  * Resident memory system definitions.
67  */
68 
69 #ifndef	_VM_VM_PAGE_H_
70 #define	_VM_VM_PAGE_H_
71 
72 #ifndef _SYS_TYPES_H_
73 #include <sys/types.h>
74 #endif
75 #ifndef _SYS_TREE_H_
76 #include <sys/tree.h>
77 #endif
78 #ifndef _MACHINE_PMAP_H_
79 #include <machine/pmap.h>
80 #endif
81 #ifndef _VM_PMAP_H_
82 #include <vm/pmap.h>
83 #endif
84 #include <machine/atomic.h>
85 
86 #ifdef _KERNEL
87 
88 #ifndef _SYS_SYSTM_H_
89 #include <sys/systm.h>
90 #endif
91 #ifndef _SYS_SPINLOCK_H_
92 #include <sys/spinlock.h>
93 #endif
94 
95 #ifdef __x86_64__
96 #include <machine/vmparam.h>
97 #endif
98 
99 #endif
100 
101 /*
102  * vm_page structure
103  *
104  * hard-busy: (PBUSY_LOCKED)
105  *
106  *	Hard-busying a page allows major manipulation of the page structure.
107  *	No new soft-busies can accumulate while a page is hard-busied.  The
108  *	page busying code typically waits for all soft-busies to drop before
109  *	allowing the hard-busy.
110  *
111  * soft-busy: (PBUSY_MASK)
112  *
113  *	Soft-busying a page typically indicates I/O or read-only use of
114  *	the content.  A page can have multiple soft-busies on it.  New
115  *	soft-busies block on any hard-busied page (wait for the hard-busy
116  *	to go away).
117  *
118  * hold_count
119  *
120  *	This prevents a page from being freed.  This does not prevent any
121  *	other operation.  The page may still be disassociated from its
122  *	object and essentially scrapped.  It just won't be reused while
123  *	a non-zero hold_count is present.
124  *
125  * wire_count
126  *
127  *	This indicates that the page has been wired into memory somewhere
128  *	(typically a buffer cache buffer, or a user wire).  The pageout
129  *	daemon will skip wired pages.
130  */
131 TAILQ_HEAD(pglist, vm_page);
132 
133 struct vm_object;
134 
135 int rb_vm_page_compare(struct vm_page *, struct vm_page *);
136 
137 struct vm_page_rb_tree;
138 RB_PROTOTYPE2(vm_page_rb_tree, vm_page, rb_entry,
139 	      rb_vm_page_compare, vm_pindex_t);
140 
141 struct vm_page {
142 	TAILQ_ENTRY(vm_page) pageq;	/* vm_page_queues[] list (P)	*/
143 	RB_ENTRY(vm_page) rb_entry;	/* Red-Black tree based at object */
144 	struct spinlock	spin;
145 	struct vm_object *object;	/* which object am I in (O,P)*/
146 	vm_pindex_t pindex;		/* offset into object (O,P) */
147 	vm_paddr_t phys_addr;		/* physical address of page */
148 	struct md_page md;		/* machine dependant stuff */
149 	uint16_t queue;			/* page queue index */
150 	uint16_t pc;			/* page color */
151 	uint8_t	act_count;		/* page usage count */
152 	uint8_t	pat_mode;		/* hardware page attribute */
153 	uint8_t	valid;			/* map of valid DEV_BSIZE chunks */
154 	uint8_t	dirty;			/* map of dirty DEV_BSIZE chunks */
155 	uint32_t flags;			/* see below */
156 	uint32_t wire_count;		/* wired down maps refs (P) */
157 	uint32_t busy_count;		/* soft-busy and hard-busy */
158 	int 	hold_count;		/* page hold count */
159 	int	ku_pagecnt;		/* kmalloc helper */
160 #ifdef VM_PAGE_DEBUG
161 	const char *busy_func;
162 	int	busy_line;
163 #endif
164 };
165 
166 #define PBUSY_LOCKED		0x80000000U
167 #define PBUSY_WANTED		0x40000000U
168 #define PBUSY_SWAPINPROG	0x20000000U
169 #define PBUSY_MASK		0x1FFFFFFFU
170 
171 #ifndef __VM_PAGE_T_DEFINED__
172 #define __VM_PAGE_T_DEFINED__
173 typedef struct vm_page *vm_page_t;
174 #endif
175 
176 /*
177  * Page coloring parameters.  We use generous parameters designed to
178  * statistically spread pages over available cpu cache space.  This has
179  * become less important over time as cache associativity is higher
180  * in modern times but we still use the core algorithm to help reduce
181  * lock contention between cpus.
182  *
183  * Page coloring cannot be disabled.
184  *
185  * In today's world of many-core systems, we must be able to provide enough VM
186  * page queues for each logical cpu thread to cover the L1/L2/L3 cache set
187  * associativity.  If we don't, the cpu caches will not be properly utilized.
188  * Using 2048 allows 8-way set-assoc with 256 logical cpus.
189  */
190 #define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
191 #define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
192 #define PQ_L2_SIZE 2048	/* Must be enough for maximal ncpus x hw set-assoc */
193 #define PQ_L2_MASK	(PQ_L2_SIZE - 1)
194 
195 #define PQ_NONE		0
196 #define PQ_FREE		(1 + 0*PQ_L2_SIZE)
197 #define PQ_INACTIVE	(1 + 1*PQ_L2_SIZE)
198 #define PQ_ACTIVE	(1 + 2*PQ_L2_SIZE)
199 #define PQ_CACHE	(1 + 3*PQ_L2_SIZE)
200 #define PQ_HOLD		(1 + 4*PQ_L2_SIZE)
201 #define PQ_COUNT	(1 + 5*PQ_L2_SIZE)
202 
203 /*
204  * Scan support
205  */
206 struct vm_map;
207 
208 struct rb_vm_page_scan_info {
209 	vm_pindex_t	start_pindex;
210 	vm_pindex_t	end_pindex;
211 	int		limit;
212 	int		desired;
213 	int		error;
214 	int		pagerflags;
215 	int		count;
216 	int		unused01;
217 	vm_offset_t	addr;
218 	vm_pindex_t	backing_offset_index;
219 	struct vm_object *object;
220 	struct vm_object *backing_object;
221 	struct vm_page	*mpte;
222 	struct pmap	*pmap;
223 	struct vm_map	*map;
224 };
225 
226 int rb_vm_page_scancmp(struct vm_page *, void *);
227 
228 struct vpgqueues {
229 	struct spinlock spin;
230 	struct pglist pl;
231 	int	cnt_offset;	/* offset into vmstats structure (int) */
232 	int	lcnt;
233 	int	flipflop;	/* probably not the best place */
234 	int	unused00;
235 	int	unused01;
236 	char	unused[64 - sizeof(struct pglist) -
237 			sizeof(int *) - sizeof(int) * 4];
238 };
239 
240 extern struct vpgqueues vm_page_queues[PQ_COUNT];
241 
242 /*
243  * These are the flags defined for vm_page.
244  *
245  *  PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
246  *  not under PV management but otherwise should be treated as a
247  *  normal page.  Pages not under PV management cannot be paged out
248  *  via the object/vm_page_t because there is no knowledge of their
249  *  pte mappings, nor can they be removed from their objects via
250  *  the object, and such pages are also not on any PQ queue.  The
251  *  PG_MAPPED and PG_WRITEABLE flags are not applicable.
252  *
253  *  PG_MAPPED only applies to managed pages, indicating whether the page
254  *  is mapped onto one or more pmaps.  A page might still be mapped to
255  *  special pmaps in an unmanaged fashion, for example when mapped into a
256  *  buffer cache buffer, without setting PG_MAPPED.
257  *
258  *  PG_WRITEABLE indicates that there may be a writeable managed pmap entry
259  *  somewhere, and that the page can be dirtied by hardware at any time
260  *  and may have to be tested for that.  The modified bit in unmanaged
261  *  mappings or in the special clean map is not tested.
262  *
263  *  PG_SWAPPED indicates that the page is backed by a swap block.  Any
264  *  VM object type other than OBJT_DEFAULT can have swap-backed pages now.
265  */
266 #define	PG_UNUSED0001	0x00000001
267 #define	PG_UNUSED0002	0x00000002
268 #define PG_WINATCFLS	0x00000004	/* flush dirty page on inactive q */
269 #define	PG_FICTITIOUS	0x00000008	/* physical page doesn't exist (O) */
270 #define	PG_WRITEABLE	0x00000010	/* page is writeable */
271 #define PG_MAPPED	0x00000020	/* page is mapped (managed) */
272 #define	PG_UNUSED0040	0x00000040
273 #define PG_REFERENCED	0x00000080	/* page has been referenced */
274 #define PG_CLEANCHK	0x00000100	/* page will be checked for cleaning */
275 #define PG_UNUSED0200	0x00000200
276 #define PG_NOSYNC	0x00000400	/* do not collect for syncer */
277 #define PG_UNMANAGED	0x00000800	/* No PV management for page */
278 #define PG_MARKER	0x00001000	/* special queue marker page */
279 #define PG_RAM		0x00002000	/* read ahead mark */
280 #define PG_SWAPPED	0x00004000	/* backed by swap */
281 #define PG_NOTMETA	0x00008000	/* do not back with swap */
282 #define PG_UNUSED10000	0x00010000
283 #define PG_UNUSED20000	0x00020000
284 #define PG_NEED_COMMIT	0x00040000	/* clean page requires commit */
285 
286 #define PG_KEEP_NEWPAGE_MASK	(0)
287 
288 /*
289  * Misc constants.
290  */
291 
292 #define ACT_DECLINE		1
293 #define ACT_ADVANCE		3
294 #define ACT_INIT		5
295 #define ACT_MAX			64
296 
297 #ifdef VM_PAGE_DEBUG
298 #define VM_PAGE_DEBUG_EXT(name)	name ## _debug
299 #define VM_PAGE_DEBUG_ARGS	, const char *func, int lineno
300 #else
301 #define VM_PAGE_DEBUG_EXT(name)	name
302 #define VM_PAGE_DEBUG_ARGS
303 #endif
304 
305 #ifdef _KERNEL
306 /*
307  * Each pageable resident page falls into one of four lists:
308  *
309  *	free
310  *		Available for allocation now.
311  *
312  * The following are all LRU sorted:
313  *
314  *	cache
315  *		Almost available for allocation. Still in an
316  *		object, but clean and immediately freeable at
317  *		non-interrupt times.
318  *
319  *	inactive
320  *		Low activity, candidates for reclamation.
321  *		This is the list of pages that should be
322  *		paged out next.
323  *
324  *	active
325  *		Pages that are "active" i.e. they have been
326  *		recently referenced.
327  *
328  *	zero
329  *		Pages that are really free and have been pre-zeroed
330  *
331  */
332 
333 extern struct vm_page *vm_page_array;	/* First resident page in table */
334 extern vm_pindex_t vm_page_array_size;	/* number of vm_page_t's */
335 extern vm_pindex_t first_page;		/* first physical page number */
336 
337 #define VM_PAGE_TO_PHYS(entry)	\
338 		((entry)->phys_addr)
339 
340 #define PHYS_TO_VM_PAGE(pa)	\
341 		(&vm_page_array[atop(pa) - first_page])
342 
343 
344 #if PAGE_SIZE == 4096
345 #define VM_PAGE_BITS_ALL 0xff
346 #endif
347 
348 /*
349  * Note: the code will always use nominally free pages from the free list
350  * before trying other flag-specified sources.
351  *
352  * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT
353  * must be specified.  VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL
354  * is also specified.
355  */
356 #define VM_ALLOC_NORMAL		0x0001	/* ok to use cache pages */
357 #define VM_ALLOC_SYSTEM		0x0002	/* ok to exhaust most of free list */
358 #define VM_ALLOC_INTERRUPT	0x0004	/* ok to exhaust entire free list */
359 #define	VM_ALLOC_ZERO		0x0008	/* req pre-zero'd memory if avail */
360 #define	VM_ALLOC_QUICK		0x0010	/* like NORMAL but do not use cache */
361 #define VM_ALLOC_FORCE_ZERO	0x0020	/* zero page even if already valid */
362 #define VM_ALLOC_NULL_OK	0x0040	/* ok to return NULL on collision */
363 #define	VM_ALLOC_RETRY		0x0080	/* indefinite block (vm_page_grab()) */
364 #define VM_ALLOC_USE_GD		0x0100	/* use per-gd cache */
365 #define VM_ALLOC_CPU_SPEC	0x0200
366 
367 #define VM_ALLOC_CPU_SHIFT	16
368 #define VM_ALLOC_CPU(n)		(((n) << VM_ALLOC_CPU_SHIFT) | \
369 				 VM_ALLOC_CPU_SPEC)
370 #define VM_ALLOC_GETCPU(flags)	((flags) >> VM_ALLOC_CPU_SHIFT)
371 
372 void vm_page_queue_spin_lock(vm_page_t);
373 void vm_page_queues_spin_lock(u_short);
374 void vm_page_and_queue_spin_lock(vm_page_t);
375 
376 void vm_page_queue_spin_unlock(vm_page_t);
377 void vm_page_queues_spin_unlock(u_short);
378 void vm_page_and_queue_spin_unlock(vm_page_t m);
379 
380 void vm_page_init(vm_page_t m);
381 void vm_page_io_finish(vm_page_t m);
382 void vm_page_io_start(vm_page_t m);
383 void vm_page_need_commit(vm_page_t m);
384 void vm_page_clear_commit(vm_page_t m);
385 void vm_page_wakeup(vm_page_t m);
386 void vm_page_hold(vm_page_t);
387 void vm_page_unhold(vm_page_t);
388 void vm_page_activate (vm_page_t);
389 
390 vm_size_t vm_contig_avail_pages(void);
391 vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int);
392 vm_page_t vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high,
393                      unsigned long alignment, unsigned long boundary,
394 		     unsigned long size, vm_memattr_t memattr);
395 
396 vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int);
397 void vm_page_cache (vm_page_t);
398 int vm_page_try_to_cache (vm_page_t);
399 int vm_page_try_to_free (vm_page_t);
400 void vm_page_dontneed (vm_page_t);
401 void vm_page_deactivate (vm_page_t);
402 void vm_page_deactivate_locked (vm_page_t);
403 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
404 int vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t);
405 
406 vm_page_t vm_page_hash_get(vm_object_t object, vm_pindex_t pindex);
407 
408 vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t);
409 vm_page_t vm_page_lookup_sbusy_try(struct vm_object *object,
410 		vm_pindex_t pindex, int pgoff, int pgbytes);
411 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)(
412 		struct vm_object *, vm_pindex_t, int, const char *
413 		VM_PAGE_DEBUG_ARGS);
414 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)(
415 		struct vm_object *, vm_pindex_t, int, int *
416 		VM_PAGE_DEBUG_ARGS);
417 void vm_page_remove (vm_page_t);
418 void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t);
419 void vm_page_startup (void);
420 void vm_numa_organize(vm_paddr_t ran_beg, vm_paddr_t bytes, int physid);
421 void vm_numa_organize_finalize(void);
422 void vm_page_unmanage (vm_page_t);
423 void vm_page_unwire (vm_page_t, int);
424 void vm_page_wire (vm_page_t);
425 void vm_page_unqueue (vm_page_t);
426 void vm_page_unqueue_nowakeup (vm_page_t);
427 vm_page_t vm_page_next (vm_page_t);
428 void vm_page_set_validclean (vm_page_t, int, int);
429 void vm_page_set_validdirty (vm_page_t, int, int);
430 void vm_page_set_valid (vm_page_t, int, int);
431 void vm_page_set_dirty (vm_page_t, int, int);
432 void vm_page_clear_dirty (vm_page_t, int, int);
433 void vm_page_set_invalid (vm_page_t, int, int);
434 int vm_page_is_valid (vm_page_t, int, int);
435 void vm_page_test_dirty (vm_page_t);
436 int vm_page_bits (int, int);
437 vm_page_t vm_page_list_find(int basequeue, int index);
438 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
439 void vm_page_free_toq(vm_page_t m);
440 void vm_page_free_contig(vm_page_t m, unsigned long size);
441 vm_page_t vm_page_free_fromq_fast(void);
442 void vm_page_dirty(vm_page_t m);
443 void vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg);
444 int vm_page_sbusy_try(vm_page_t m);
445 void VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m,
446 			int also_m_busy, const char *wmsg VM_PAGE_DEBUG_ARGS);
447 int VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m,
448 			int also_m_busy VM_PAGE_DEBUG_ARGS);
449 u_short vm_get_pg_color(int cpuid, vm_object_t object, vm_pindex_t pindex);
450 
451 #ifdef VM_PAGE_DEBUG
452 
453 #define vm_page_lookup_busy_wait(object, pindex, alsob, msg)		\
454 	vm_page_lookup_busy_wait_debug(object, pindex, alsob, msg,	\
455 					__func__, __LINE__)
456 
457 #define vm_page_lookup_busy_try(object, pindex, alsob, errorp)		\
458 	vm_page_lookup_busy_try_debug(object, pindex, alsob, errorp,	\
459 					__func__, __LINE__)
460 
461 #define vm_page_busy_wait(m, alsob, msg)				\
462 	vm_page_busy_wait_debug(m, alsob, msg, __func__, __LINE__)
463 
464 #define vm_page_busy_try(m, alsob)					\
465 	vm_page_busy_try_debug(m, alsob, __func__, __LINE__)
466 
467 #endif
468 
469 #endif				/* _KERNEL */
470 #endif				/* !_VM_VM_PAGE_H_ */
471