xref: /dragonfly/sys/vm/vm_page.h (revision cb740add)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  *
60  * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $
61  */
62 
63 /*
64  *	Resident memory system definitions.
65  */
66 
67 #ifndef	_VM_VM_PAGE_H_
68 #define	_VM_VM_PAGE_H_
69 
70 #ifndef _SYS_TYPES_H_
71 #include <sys/types.h>
72 #endif
73 #ifndef _SYS_TREE_H_
74 #include <sys/tree.h>
75 #endif
76 #ifndef _MACHINE_PMAP_H_
77 #include <machine/pmap.h>
78 #endif
79 #ifndef _VM_PMAP_H_
80 #include <vm/pmap.h>
81 #endif
82 #include <machine/atomic.h>
83 
84 #ifdef _KERNEL
85 
86 #ifndef _SYS_SYSTM_H_
87 #include <sys/systm.h>
88 #endif
89 #ifndef _SYS_SPINLOCK_H_
90 #include <sys/spinlock.h>
91 #endif
92 #ifndef _SYS_THREAD2_H_
93 #include <sys/thread2.h>
94 #endif
95 
96 #ifdef __x86_64__
97 #include <machine/vmparam.h>
98 #endif
99 
100 #endif
101 
102 typedef enum vm_page_event { VMEVENT_NONE, VMEVENT_COW } vm_page_event_t;
103 
104 struct vm_page_action {
105 	LIST_ENTRY(vm_page_action) entry;
106 	struct vm_page		*m;
107 	vm_page_event_t		event;
108 	void			(*func)(struct vm_page *,
109 					struct vm_page_action *);
110 	void			*data;
111 };
112 
113 typedef struct vm_page_action *vm_page_action_t;
114 
115 /*
116  *	Management of resident (logical) pages.
117  *
118  *	A small structure is kept for each resident
119  *	page, indexed by page number.  Each structure
120  *	is an element of several lists:
121  *
122  *		A hash table bucket used to quickly
123  *		perform object/offset lookups
124  *
125  *		A list of all pages for a given object,
126  *		so they can be quickly deactivated at
127  *		time of deallocation.
128  *
129  *		An ordered list of pages due for pageout.
130  *
131  *	In addition, the structure contains the object
132  *	and offset to which this page belongs (for pageout),
133  *	and sundry status bits.
134  *
135  *	Fields in this structure are locked either by the lock on the
136  *	object that the page belongs to (O) or by the lock on the page
137  *	queues (P).
138  *
139  *	The 'valid' and 'dirty' fields are distinct.  A page may have dirty
140  *	bits set without having associated valid bits set.  This is used by
141  *	NFS to implement piecemeal writes.
142  */
143 
144 TAILQ_HEAD(pglist, vm_page);
145 
146 struct vm_object;
147 
148 int rb_vm_page_compare(struct vm_page *, struct vm_page *);
149 
150 struct vm_page_rb_tree;
151 RB_PROTOTYPE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare, vm_pindex_t);
152 
153 struct vm_page {
154 	TAILQ_ENTRY(vm_page) pageq;	/* vm_page_queues[] list (P)	*/
155 	RB_ENTRY(vm_page) rb_entry;	/* Red-Black tree based at object */
156 	struct spinlock	spin;
157 
158 	struct vm_object *object;	/* which object am I in (O,P)*/
159 	vm_pindex_t pindex;		/* offset into object (O,P) */
160 	vm_paddr_t phys_addr;		/* physical address of page */
161 	struct md_page md;		/* machine dependant stuff */
162 	u_short	queue;			/* page queue index */
163 	u_short	pc;			/* page color */
164 	u_char	act_count;		/* page usage count */
165 	u_char	busy;			/* page busy count */
166 	u_char	pat_mode;		/* hardware page attribute */
167 	u_char	unused02;
168 	u_int32_t flags;		/* see below */
169 	u_int	wire_count;		/* wired down maps refs (P) */
170 	int 	hold_count;		/* page hold count */
171 
172 	/*
173 	 * NOTE that these must support one bit per DEV_BSIZE in a page!!!
174 	 * so, on normal X86 kernels, they must be at least 8 bits wide.
175 	 */
176 	u_char	valid;			/* map of valid DEV_BSIZE chunks */
177 	u_char	dirty;			/* map of dirty DEV_BSIZE chunks */
178 
179 	int	ku_pagecnt;		/* kmalloc helper */
180 #ifdef VM_PAGE_DEBUG
181 	const char *busy_func;
182 	int	busy_line;
183 #endif
184 };
185 
186 #ifdef VM_PAGE_DEBUG
187 #define VM_PAGE_DEBUG_EXT(name)	name ## _debug
188 #define VM_PAGE_DEBUG_ARGS	, const char *func, int lineno
189 #else
190 #define VM_PAGE_DEBUG_EXT(name)	name
191 #define VM_PAGE_DEBUG_ARGS
192 #endif
193 
194 #ifndef __VM_PAGE_T_DEFINED__
195 #define __VM_PAGE_T_DEFINED__
196 typedef struct vm_page *vm_page_t;
197 #endif
198 
199 /*
200  * Page coloring parameters.  We use generous parameters designed to
201  * statistically spread pages over available cpu cache space.  This has
202  * become less important over time as cache associativity is higher
203  * in modern times but we still use the core algorithm to help reduce
204  * lock contention between cpus.
205  *
206  * Page coloring cannot be disabled.
207  */
208 
209 #define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
210 #define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
211 #define PQ_L2_SIZE 512	/* A number of colors opt for 1M cache */
212 #define PQ_L2_MASK	(PQ_L2_SIZE - 1)
213 
214 #define PQ_NONE		0
215 #define PQ_FREE		(1 + 0*PQ_L2_SIZE)
216 #define PQ_INACTIVE	(1 + 1*PQ_L2_SIZE)
217 #define PQ_ACTIVE	(1 + 2*PQ_L2_SIZE)
218 #define PQ_CACHE	(1 + 3*PQ_L2_SIZE)
219 #define PQ_HOLD		(1 + 4*PQ_L2_SIZE)
220 #define PQ_COUNT	(1 + 5*PQ_L2_SIZE)
221 
222 /*
223  * Scan support
224  */
225 struct vm_map;
226 
227 struct rb_vm_page_scan_info {
228 	vm_pindex_t	start_pindex;
229 	vm_pindex_t	end_pindex;
230 	int		limit;
231 	int		desired;
232 	int		error;
233 	int		pagerflags;
234 	int		count;
235 	int		unused01;
236 	vm_offset_t	addr;
237 	vm_pindex_t	backing_offset_index;
238 	struct vm_object *object;
239 	struct vm_object *backing_object;
240 	struct vm_page	*mpte;
241 	struct pmap	*pmap;
242 	struct vm_map	*map;
243 };
244 
245 int rb_vm_page_scancmp(struct vm_page *, void *);
246 
247 struct vpgqueues {
248 	struct spinlock spin;
249 	struct pglist pl;
250 	int	cnt_offset;	/* offset into vmstats structure (int) */
251 	int	lcnt;
252 	int	flipflop;	/* probably not the best place */
253 	int	unused00;
254 	int	unused01;
255 	char	unused[64 - sizeof(struct pglist) -
256 			sizeof(int *) - sizeof(int) * 4];
257 };
258 
259 extern struct vpgqueues vm_page_queues[PQ_COUNT];
260 
261 /*
262  * These are the flags defined for vm_page.
263  *
264  *  PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
265  *  not under PV management but otherwise should be treated as a
266  *  normal page.  Pages not under PV management cannot be paged out
267  *  via the object/vm_page_t because there is no knowledge of their
268  *  pte mappings, nor can they be removed from their objects via
269  *  the object, and such pages are also not on any PQ queue.  The
270  *  PG_MAPPED and PG_WRITEABLE flags are not applicable.
271  *
272  *  PG_MAPPED only applies to managed pages, indicating whether the page
273  *  is mapped onto one or more pmaps.  A page might still be mapped to
274  *  special pmaps in an unmanaged fashion, for example when mapped into a
275  *  buffer cache buffer, without setting PG_MAPPED.
276  *
277  *  PG_WRITEABLE indicates that there may be a writeable managed pmap entry
278  *  somewhere, and that the page can be dirtied by hardware at any time
279  *  and may have to be tested for that.  The modified bit in unmanaged
280  *  mappings or in the special clean map is not tested.
281  *
282  *  PG_SWAPPED indicates that the page is backed by a swap block.  Any
283  *  VM object type other than OBJT_DEFAULT can have swap-backed pages now.
284  *
285  *  PG_SBUSY is set when m->busy != 0.  PG_SBUSY and m->busy are only modified
286  *  when the page is PG_BUSY.
287  */
288 #define	PG_BUSY		0x00000001	/* page is in transit (O) */
289 #define	PG_WANTED	0x00000002	/* someone is waiting for page (O) */
290 #define PG_WINATCFLS	0x00000004	/* flush dirty page on inactive q */
291 #define	PG_FICTITIOUS	0x00000008	/* physical page doesn't exist (O) */
292 #define	PG_WRITEABLE	0x00000010	/* page is writeable */
293 #define PG_MAPPED	0x00000020	/* page is mapped (managed) */
294 #define	PG_UNUSED0040	0x00000040
295 #define PG_REFERENCED	0x00000080	/* page has been referenced */
296 #define PG_CLEANCHK	0x00000100	/* page will be checked for cleaning */
297 #define PG_SWAPINPROG	0x00000200	/* swap I/O in progress on page	     */
298 #define PG_NOSYNC	0x00000400	/* do not collect for syncer */
299 #define PG_UNMANAGED	0x00000800	/* No PV management for page */
300 #define PG_MARKER	0x00001000	/* special queue marker page */
301 #define PG_RAM		0x00002000	/* read ahead mark */
302 #define PG_SWAPPED	0x00004000	/* backed by swap */
303 #define PG_NOTMETA	0x00008000	/* do not back with swap */
304 #define PG_ACTIONLIST	0x00010000	/* lookaside action list present */
305 #define PG_SBUSY	0x00020000	/* soft-busy also set */
306 #define PG_NEED_COMMIT	0x00040000	/* clean page requires commit */
307 
308 #define PG_KEEP_NEWPAGE_MASK	(PG_BUSY | PG_SBUSY |		\
309 				 PG_WANTED | PG_ACTIONLIST)
310 
311 
312 /*
313  * Misc constants.
314  */
315 
316 #define ACT_DECLINE		1
317 #define ACT_ADVANCE		3
318 #define ACT_INIT		5
319 #define ACT_MAX			64
320 
321 #ifdef _KERNEL
322 /*
323  * Each pageable resident page falls into one of four lists:
324  *
325  *	free
326  *		Available for allocation now.
327  *
328  * The following are all LRU sorted:
329  *
330  *	cache
331  *		Almost available for allocation. Still in an
332  *		object, but clean and immediately freeable at
333  *		non-interrupt times.
334  *
335  *	inactive
336  *		Low activity, candidates for reclamation.
337  *		This is the list of pages that should be
338  *		paged out next.
339  *
340  *	active
341  *		Pages that are "active" i.e. they have been
342  *		recently referenced.
343  *
344  *	zero
345  *		Pages that are really free and have been pre-zeroed
346  *
347  */
348 
349 extern struct vm_page *vm_page_array;	/* First resident page in table */
350 extern int vm_page_array_size;		/* number of vm_page_t's */
351 extern long first_page;			/* first physical page number */
352 
353 #define VM_PAGE_TO_PHYS(entry)	\
354 		((entry)->phys_addr)
355 
356 #define PHYS_TO_VM_PAGE(pa)	\
357 		(&vm_page_array[atop(pa) - first_page])
358 
359 
360 #if PAGE_SIZE == 4096
361 #define VM_PAGE_BITS_ALL 0xff
362 #endif
363 
364 /*
365  * Note: the code will always use nominally free pages from the free list
366  * before trying other flag-specified sources.
367  *
368  * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT
369  * must be specified.  VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL
370  * is also specified.
371  */
372 #define VM_ALLOC_NORMAL		0x0001	/* ok to use cache pages */
373 #define VM_ALLOC_SYSTEM		0x0002	/* ok to exhaust most of free list */
374 #define VM_ALLOC_INTERRUPT	0x0004	/* ok to exhaust entire free list */
375 #define	VM_ALLOC_ZERO		0x0008	/* req pre-zero'd memory if avail */
376 #define	VM_ALLOC_QUICK		0x0010	/* like NORMAL but do not use cache */
377 #define VM_ALLOC_FORCE_ZERO	0x0020	/* zero page even if already valid */
378 #define VM_ALLOC_NULL_OK	0x0040	/* ok to return NULL on collision */
379 #define	VM_ALLOC_RETRY		0x0080	/* indefinite block (vm_page_grab()) */
380 #define VM_ALLOC_USE_GD		0x0100	/* use per-gd cache */
381 #define VM_ALLOC_CPU_SPEC	0x0200
382 
383 #define VM_ALLOC_CPU_SHIFT	16
384 #define VM_ALLOC_CPU(n)		(((n) << VM_ALLOC_CPU_SHIFT) | \
385 				 VM_ALLOC_CPU_SPEC)
386 #define VM_ALLOC_GETCPU(flags)	((flags) >> VM_ALLOC_CPU_SHIFT)
387 
388 void vm_page_queue_spin_lock(vm_page_t);
389 void vm_page_queues_spin_lock(u_short);
390 void vm_page_and_queue_spin_lock(vm_page_t);
391 
392 void vm_page_queue_spin_unlock(vm_page_t);
393 void vm_page_queues_spin_unlock(u_short);
394 void vm_page_and_queue_spin_unlock(vm_page_t m);
395 
396 void vm_page_init(vm_page_t m);
397 void vm_page_io_finish(vm_page_t m);
398 void vm_page_io_start(vm_page_t m);
399 void vm_page_need_commit(vm_page_t m);
400 void vm_page_clear_commit(vm_page_t m);
401 void vm_page_wakeup(vm_page_t m);
402 void vm_page_hold(vm_page_t);
403 void vm_page_unhold(vm_page_t);
404 void vm_page_activate (vm_page_t);
405 
406 vm_size_t vm_contig_avail_pages(void);
407 vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int);
408 vm_page_t vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high,
409                      unsigned long alignment, unsigned long boundary,
410 		     unsigned long size, vm_memattr_t memattr);
411 
412 vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int);
413 void vm_page_cache (vm_page_t);
414 int vm_page_try_to_cache (vm_page_t);
415 int vm_page_try_to_free (vm_page_t);
416 void vm_page_dontneed (vm_page_t);
417 void vm_page_deactivate (vm_page_t);
418 void vm_page_deactivate_locked (vm_page_t);
419 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
420 int vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t);
421 vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t);
422 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)(
423 		struct vm_object *, vm_pindex_t, int, const char *
424 		VM_PAGE_DEBUG_ARGS);
425 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)(
426 		struct vm_object *, vm_pindex_t, int, int *
427 		VM_PAGE_DEBUG_ARGS);
428 vm_page_t vm_page_repurpose(struct vm_object *, vm_pindex_t, int, int *,
429 		vm_page_t, int *, int *);
430 void vm_page_remove (vm_page_t);
431 void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t);
432 void vm_page_startup (void);
433 void vm_numa_organize(vm_paddr_t ran_beg, vm_paddr_t bytes, int physid);
434 void vm_page_unmanage (vm_page_t);
435 void vm_page_unwire (vm_page_t, int);
436 void vm_page_wire (vm_page_t);
437 void vm_page_unqueue (vm_page_t);
438 void vm_page_unqueue_nowakeup (vm_page_t);
439 vm_page_t vm_page_next (vm_page_t);
440 void vm_page_set_validclean (vm_page_t, int, int);
441 void vm_page_set_validdirty (vm_page_t, int, int);
442 void vm_page_set_valid (vm_page_t, int, int);
443 void vm_page_set_dirty (vm_page_t, int, int);
444 void vm_page_clear_dirty (vm_page_t, int, int);
445 void vm_page_set_invalid (vm_page_t, int, int);
446 int vm_page_is_valid (vm_page_t, int, int);
447 void vm_page_test_dirty (vm_page_t);
448 int vm_page_bits (int, int);
449 vm_page_t vm_page_list_find(int basequeue, int index);
450 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
451 void vm_page_free_toq(vm_page_t m);
452 void vm_page_free_contig(vm_page_t m, unsigned long size);
453 vm_page_t vm_page_free_fromq_fast(void);
454 void vm_page_event_internal(vm_page_t, vm_page_event_t);
455 void vm_page_dirty(vm_page_t m);
456 void vm_page_register_action(vm_page_action_t action, vm_page_event_t event);
457 void vm_page_unregister_action(vm_page_action_t action);
458 void vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg);
459 void VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m,
460 			int also_m_busy, const char *wmsg VM_PAGE_DEBUG_ARGS);
461 int VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m,
462 			int also_m_busy VM_PAGE_DEBUG_ARGS);
463 u_short vm_get_pg_color(int cpuid, vm_object_t object, vm_pindex_t pindex);
464 
465 #ifdef VM_PAGE_DEBUG
466 
467 #define vm_page_lookup_busy_wait(object, pindex, alsob, msg)		\
468 	vm_page_lookup_busy_wait_debug(object, pindex, alsob, msg,	\
469 					__func__, __LINE__)
470 
471 #define vm_page_lookup_busy_try(object, pindex, alsob, errorp)		\
472 	vm_page_lookup_busy_try_debug(object, pindex, alsob, errorp,	\
473 					__func__, __LINE__)
474 
475 #define vm_page_busy_wait(m, alsob, msg)				\
476 	vm_page_busy_wait_debug(m, alsob, msg, __func__, __LINE__)
477 
478 #define vm_page_busy_try(m, alsob)					\
479 	vm_page_busy_try_debug(m, alsob, __func__, __LINE__)
480 
481 #endif
482 
483 #endif				/* _KERNEL */
484 #endif				/* !_VM_VM_PAGE_H_ */
485