xref: /freebsd/sys/vm/vm_page.h (revision 39beb93c)
1 /*-
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 4. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
33  *
34  *
35  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36  * All rights reserved.
37  *
38  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39  *
40  * Permission to use, copy, modify and distribute this software and
41  * its documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie the
58  * rights to redistribute these changes.
59  *
60  * $FreeBSD$
61  */
62 
63 /*
64  *	Resident memory system definitions.
65  */
66 
67 #ifndef	_VM_PAGE_
68 #define	_VM_PAGE_
69 
70 #include <vm/pmap.h>
71 
72 /*
73  *	Management of resident (logical) pages.
74  *
75  *	A small structure is kept for each resident
76  *	page, indexed by page number.  Each structure
77  *	is an element of several lists:
78  *
79  *		A hash table bucket used to quickly
80  *		perform object/offset lookups
81  *
82  *		A list of all pages for a given object,
83  *		so they can be quickly deactivated at
84  *		time of deallocation.
85  *
86  *		An ordered list of pages due for pageout.
87  *
88  *	In addition, the structure contains the object
89  *	and offset to which this page belongs (for pageout),
90  *	and sundry status bits.
91  *
92  *	Fields in this structure are locked either by the lock on the
93  *	object that the page belongs to (O) or by the lock on the page
94  *	queues (P).
95  *
96  *	The 'valid' and 'dirty' fields are distinct.  A page may have dirty
97  *	bits set without having associated valid bits set.  This is used by
98  *	NFS to implement piecemeal writes.
99  */
100 
101 TAILQ_HEAD(pglist, vm_page);
102 
103 struct vm_page {
104 	TAILQ_ENTRY(vm_page) pageq;	/* queue info for FIFO queue or free list (P) */
105 	TAILQ_ENTRY(vm_page) listq;	/* pages in same object (O) 	*/
106 	struct vm_page *left;		/* splay tree link (O)		*/
107 	struct vm_page *right;		/* splay tree link (O)		*/
108 
109 	vm_object_t object;		/* which object am I in (O,P)*/
110 	vm_pindex_t pindex;		/* offset into object (O,P) */
111 	vm_paddr_t phys_addr;		/* physical address of page */
112 	struct md_page md;		/* machine dependant stuff */
113 	uint8_t	queue;			/* page queue index */
114 	int8_t segind;
115 	u_short	flags;			/* see below */
116 	uint8_t	order;			/* index of the buddy queue */
117 	uint8_t pool;
118 	u_short cow;			/* page cow mapping count */
119 	u_int wire_count;		/* wired down maps refs (P) */
120 	short hold_count;		/* page hold count */
121 	u_short oflags;			/* page flags (O) */
122 	u_char	act_count;		/* page usage count */
123 	u_char	busy;			/* page busy count (O) */
124 	/* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
125 	/* so, on normal X86 kernels, they must be at least 8 bits wide */
126 #if PAGE_SIZE == 4096
127 	u_char	valid;			/* map of valid DEV_BSIZE chunks (O) */
128 	u_char	dirty;			/* map of dirty DEV_BSIZE chunks */
129 #elif PAGE_SIZE == 8192
130 	u_short	valid;			/* map of valid DEV_BSIZE chunks (O) */
131 	u_short	dirty;			/* map of dirty DEV_BSIZE chunks */
132 #elif PAGE_SIZE == 16384
133 	u_int valid;			/* map of valid DEV_BSIZE chunks (O) */
134 	u_int dirty;			/* map of dirty DEV_BSIZE chunks */
135 #elif PAGE_SIZE == 32768
136 	u_long valid;			/* map of valid DEV_BSIZE chunks (O) */
137 	u_long dirty;			/* map of dirty DEV_BSIZE chunks */
138 #endif
139 };
140 
141 /*
142  * Page flags stored in oflags:
143  *
144  * Access to these page flags is synchronized by the lock on the object
145  * containing the page (O).
146  */
147 #define	VPO_BUSY	0x0001	/* page is in transit */
148 #define	VPO_WANTED	0x0002	/* someone is waiting for page */
149 #define	VPO_CLEANCHK	0x0100	/* page will be checked for cleaning */
150 #define	VPO_SWAPINPROG	0x0200	/* swap I/O in progress on page */
151 #define	VPO_NOSYNC	0x0400	/* do not collect for syncer */
152 
153 #define PQ_NONE		0
154 #define	PQ_INACTIVE	1
155 #define	PQ_ACTIVE	2
156 #define	PQ_HOLD		3
157 #define	PQ_COUNT	4
158 
159 /* Returns the real queue a page is on. */
160 #define VM_PAGE_GETQUEUE(m)	((m)->queue)
161 
162 /* Returns the well known queue a page is on. */
163 #define VM_PAGE_GETKNOWNQUEUE2(m)	VM_PAGE_GETQUEUE(m)
164 
165 /* Returns true if the page is in the named well known queue. */
166 #define VM_PAGE_INQUEUE2(m, q)	(VM_PAGE_GETKNOWNQUEUE2(m) == (q))
167 
168 /* Sets the queue a page is on. */
169 #define VM_PAGE_SETQUEUE2(m, q)	(VM_PAGE_GETQUEUE(m) = (q))
170 
171 struct vpgqueues {
172 	struct pglist pl;
173 	int	*cnt;
174 };
175 
176 extern struct vpgqueues vm_page_queues[PQ_COUNT];
177 extern struct mtx vm_page_queue_free_mtx;
178 
179 /*
180  * These are the flags defined for vm_page.
181  *
182  * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
183  * 	 not under PV management but otherwise should be treated as a
184  *	 normal page.  Pages not under PV management cannot be paged out
185  *	 via the object/vm_page_t because there is no knowledge of their
186  *	 pte mappings, nor can they be removed from their objects via
187  *	 the object, and such pages are also not on any PQ queue.
188  */
189 #define	PG_CACHED	0x0001		/* page is cached */
190 #define	PG_FREE		0x0002		/* page is free */
191 #define PG_WINATCFLS	0x0004		/* flush dirty page on inactive q */
192 #define	PG_FICTITIOUS	0x0008		/* physical page doesn't exist (O) */
193 #define	PG_WRITEABLE	0x0010		/* page is mapped writeable */
194 #define	PG_ZERO		0x0040		/* page is zeroed */
195 #define PG_REFERENCED	0x0080		/* page has been referenced */
196 #define PG_UNMANAGED	0x0800		/* No PV management for page */
197 #define PG_MARKER	0x1000		/* special queue marker page */
198 #define	PG_SLAB		0x2000		/* object pointer is actually a slab */
199 
200 /*
201  * Misc constants.
202  */
203 #define ACT_DECLINE		1
204 #define ACT_ADVANCE		3
205 #define ACT_INIT		5
206 #define ACT_MAX			64
207 
208 #ifdef _KERNEL
209 
210 #include <vm/vm_param.h>
211 
212 /*
213  * Each pageable resident page falls into one of five lists:
214  *
215  *	free
216  *		Available for allocation now.
217  *
218  *	cache
219  *		Almost available for allocation. Still associated with
220  *		an object, but clean and immediately freeable.
221  *
222  *	hold
223  *		Will become free after a pending I/O operation
224  *		completes.
225  *
226  * The following lists are LRU sorted:
227  *
228  *	inactive
229  *		Low activity, candidates for reclamation.
230  *		This is the list of pages that should be
231  *		paged out next.
232  *
233  *	active
234  *		Pages that are "active" i.e. they have been
235  *		recently referenced.
236  *
237  */
238 
239 extern int vm_page_zero_count;
240 
241 extern vm_page_t vm_page_array;		/* First resident page in table */
242 extern int vm_page_array_size;		/* number of vm_page_t's */
243 extern long first_page;			/* first physical page number */
244 
245 #define	VM_PAGE_IS_FREE(m)	(((m)->flags & PG_FREE) != 0)
246 
247 #define VM_PAGE_TO_PHYS(entry)	((entry)->phys_addr)
248 
249 vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa);
250 
251 static __inline vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
252 
253 static __inline vm_page_t
254 PHYS_TO_VM_PAGE(vm_paddr_t pa)
255 {
256 #ifdef VM_PHYSSEG_SPARSE
257 	return (vm_phys_paddr_to_vm_page(pa));
258 #elif defined(VM_PHYSSEG_DENSE)
259 	return (&vm_page_array[atop(pa) - first_page]);
260 #else
261 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
262 #endif
263 }
264 
265 extern struct mtx vm_page_queue_mtx;
266 #define vm_page_lock_queues()   mtx_lock(&vm_page_queue_mtx)
267 #define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx)
268 
269 #if PAGE_SIZE == 4096
270 #define VM_PAGE_BITS_ALL 0xffu
271 #elif PAGE_SIZE == 8192
272 #define VM_PAGE_BITS_ALL 0xffffu
273 #elif PAGE_SIZE == 16384
274 #define VM_PAGE_BITS_ALL 0xffffffffu
275 #elif PAGE_SIZE == 32768
276 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
277 #endif
278 
279 /* page allocation classes: */
280 #define VM_ALLOC_NORMAL		0
281 #define VM_ALLOC_INTERRUPT	1
282 #define VM_ALLOC_SYSTEM		2
283 #define	VM_ALLOC_CLASS_MASK	3
284 /* page allocation flags: */
285 #define	VM_ALLOC_WIRED		0x0020	/* non pageable */
286 #define	VM_ALLOC_ZERO		0x0040	/* Try to obtain a zeroed page */
287 #define	VM_ALLOC_RETRY		0x0080	/* vm_page_grab() only */
288 #define	VM_ALLOC_NOOBJ		0x0100	/* No associated object */
289 #define	VM_ALLOC_NOBUSY		0x0200	/* Do not busy the page */
290 #define	VM_ALLOC_IFCACHED	0x0400	/* Fail if the page is not cached */
291 #define	VM_ALLOC_IFNOTCACHED	0x0800	/* Fail if the page is cached */
292 
293 void vm_page_flag_set(vm_page_t m, unsigned short bits);
294 void vm_page_flag_clear(vm_page_t m, unsigned short bits);
295 void vm_page_busy(vm_page_t m);
296 void vm_page_flash(vm_page_t m);
297 void vm_page_io_start(vm_page_t m);
298 void vm_page_io_finish(vm_page_t m);
299 void vm_page_hold(vm_page_t mem);
300 void vm_page_unhold(vm_page_t mem);
301 void vm_page_free(vm_page_t m);
302 void vm_page_free_zero(vm_page_t m);
303 void vm_page_dirty(vm_page_t m);
304 void vm_page_wakeup(vm_page_t m);
305 
306 void vm_pageq_remove(vm_page_t m);
307 
308 void vm_page_activate (vm_page_t);
309 vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int);
310 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
311 void vm_page_cache (register vm_page_t);
312 void vm_page_cache_free(vm_object_t, vm_pindex_t, vm_pindex_t);
313 void vm_page_cache_remove(vm_page_t);
314 void vm_page_cache_transfer(vm_object_t, vm_pindex_t, vm_object_t);
315 int vm_page_try_to_cache (vm_page_t);
316 int vm_page_try_to_free (vm_page_t);
317 void vm_page_dontneed (register vm_page_t);
318 void vm_page_deactivate (vm_page_t);
319 void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
320 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
321 void vm_page_remove (vm_page_t);
322 void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
323 void vm_page_requeue(vm_page_t m);
324 void vm_page_sleep(vm_page_t m, const char *msg);
325 vm_page_t vm_page_splay(vm_pindex_t, vm_page_t);
326 vm_offset_t vm_page_startup(vm_offset_t vaddr);
327 void vm_page_unwire (vm_page_t, int);
328 void vm_page_wire (vm_page_t);
329 void vm_page_set_validclean (vm_page_t, int, int);
330 void vm_page_clear_dirty (vm_page_t, int, int);
331 void vm_page_set_invalid (vm_page_t, int, int);
332 int vm_page_is_valid (vm_page_t, int, int);
333 void vm_page_test_dirty (vm_page_t);
334 int vm_page_bits (int, int);
335 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
336 void vm_page_free_toq(vm_page_t m);
337 void vm_page_zero_idle_wakeup(void);
338 void vm_page_cowfault (vm_page_t);
339 int vm_page_cowsetup(vm_page_t);
340 void vm_page_cowclear (vm_page_t);
341 
342 /*
343  *	vm_page_sleep_if_busy:
344  *
345  *	Sleep and release the page queues lock if VPO_BUSY is set or,
346  *	if also_m_busy is TRUE, busy is non-zero.  Returns TRUE if the
347  *	thread slept and the page queues lock was released.
348  *	Otherwise, retains the page queues lock and returns FALSE.
349  *
350  *	The object containing the given page must be locked.
351  */
352 static __inline int
353 vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
354 {
355 
356 	if ((m->oflags & VPO_BUSY) || (also_m_busy && m->busy)) {
357 		vm_page_sleep(m, msg);
358 		return (TRUE);
359 	}
360 	return (FALSE);
361 }
362 
363 /*
364  *	vm_page_undirty:
365  *
366  *	Set page to not be dirty.  Note: does not clear pmap modify bits
367  */
368 static __inline void
369 vm_page_undirty(vm_page_t m)
370 {
371 	m->dirty = 0;
372 }
373 
374 #endif				/* _KERNEL */
375 #endif				/* !_VM_PAGE_ */
376