xref: /dragonfly/sys/vm/vm_page.h (revision 7bc7e232)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $
65  * $DragonFly: src/sys/vm/vm_page.h,v 1.26 2007/07/02 15:57:48 dillon Exp $
66  */
67 
68 /*
69  *	Resident memory system definitions.
70  */
71 
72 #ifndef	_VM_VM_PAGE_H_
73 #define	_VM_VM_PAGE_H_
74 
75 #if !defined(KLD_MODULE) && defined(_KERNEL)
76 #include "opt_vmpage.h"
77 #endif
78 
79 #ifndef _SYS_TYPES_H_
80 #include <sys/types.h>
81 #endif
82 #ifndef _SYS_TREE_H_
83 #include <sys/tree.h>
84 #endif
85 #ifndef _MACHINE_PMAP_H_
86 #include <machine/pmap.h>
87 #endif
88 #ifndef _VM_PMAP_H_
89 #include <vm/pmap.h>
90 #endif
91 #ifndef _MACHINE_ATOMIC_H_
92 #include <machine/atomic.h>
93 #endif
94 
95 #ifdef _KERNEL
96 
97 #ifndef _SYS_SYSTM_H_
98 #include <sys/systm.h>
99 #endif
100 #ifndef _SYS_THREAD2_H_
101 #include <sys/thread2.h>
102 #endif
103 
104 #endif
105 
106 /*
107  *	Management of resident (logical) pages.
108  *
109  *	A small structure is kept for each resident
110  *	page, indexed by page number.  Each structure
111  *	is an element of several lists:
112  *
113  *		A hash table bucket used to quickly
114  *		perform object/offset lookups
115  *
116  *		A list of all pages for a given object,
117  *		so they can be quickly deactivated at
118  *		time of deallocation.
119  *
120  *		An ordered list of pages due for pageout.
121  *
122  *	In addition, the structure contains the object
123  *	and offset to which this page belongs (for pageout),
124  *	and sundry status bits.
125  *
126  *	Fields in this structure are locked either by the lock on the
127  *	object that the page belongs to (O) or by the lock on the page
128  *	queues (P).
129  *
130  *	The 'valid' and 'dirty' fields are distinct.  A page may have dirty
131  *	bits set without having associated valid bits set.  This is used by
132  *	NFS to implement piecemeal writes.
133  */
134 
135 TAILQ_HEAD(pglist, vm_page);
136 
137 struct msf_buf;
138 struct vm_object;
139 
140 int rb_vm_page_compare(struct vm_page *, struct vm_page *);
141 
142 struct vm_page_rb_tree;
143 RB_PROTOTYPE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare, vm_pindex_t);
144 
145 struct vm_page {
146 	TAILQ_ENTRY(vm_page) pageq;	/* vm_page_queues[] list (P)	*/
147 	RB_ENTRY(vm_page) rb_entry;	/* Red-Black tree based at object */
148 
149 	struct vm_object *object;	/* which object am I in (O,P)*/
150 	vm_pindex_t pindex;		/* offset into object (O,P) */
151 	vm_paddr_t phys_addr;		/* physical address of page */
152 	struct md_page md;		/* machine dependant stuff */
153 	u_short	queue;			/* page queue index */
154 	u_short	flags;			/* see below */
155 	u_short	pc;			/* page color */
156 	u_short wire_count;		/* wired down maps refs (P) */
157 	int 	hold_count;		/* page hold count */
158 	u_char	act_count;		/* page usage count */
159 	u_char	busy;			/* page busy count */
160 
161 	/*
162 	 * NOTE that these must support one bit per DEV_BSIZE in a page!!!
163 	 * so, on normal X86 kernels, they must be at least 8 bits wide.
164 	 */
165 #if PAGE_SIZE == 4096
166 	u_char	valid;			/* map of valid DEV_BSIZE chunks */
167 	u_char	dirty;			/* map of dirty DEV_BSIZE chunks */
168 #elif PAGE_SIZE == 8192
169 	u_short	valid;			/* map of valid DEV_BSIZE chunks */
170 	u_short	dirty;			/* map of dirty DEV_BSIZE chunks */
171 #endif
172 	struct msf_buf *msf_hint; 	/* first page of an msfbuf map */
173 };
174 
175 #ifndef __VM_PAGE_T_DEFINED__
176 #define __VM_PAGE_T_DEFINED__
177 typedef struct vm_page *vm_page_t;
178 #endif
179 
180 /*
181  * note: currently use SWAPBLK_NONE as an absolute value rather then
182  * a flag bit.
183  */
184 #define SWAPBLK_MASK	((daddr_t)((u_daddr_t)-1 >> 1))		/* mask */
185 #define SWAPBLK_NONE	((daddr_t)((u_daddr_t)SWAPBLK_MASK + 1))/* flag */
186 
187 /*
188  * Page coloring parameters.  We default to a middle of the road optimization.
189  * Larger selections would not really hurt us but if a machine does not have
190  * a lot of memory it could cause vm_page_alloc() to eat more cpu cycles
191  * looking for free pages.
192  *
193  * Page coloring cannot be disabled.  Modules do not have access to most PQ
194  * constants because they can change between builds.
195  */
196 #if defined(_KERNEL) && !defined(KLD_MODULE)
197 
198 #if !defined(PQ_CACHESIZE)
199 #define PQ_CACHESIZE 256	/* max is 1024 (MB) */
200 #endif
201 
202 #if PQ_CACHESIZE >= 1024
203 #define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
204 #define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
205 #define PQ_L2_SIZE 256	/* A number of colors opt for 1M cache */
206 
207 #elif PQ_CACHESIZE >= 512
208 #define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
209 #define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
210 #define PQ_L2_SIZE 128	/* A number of colors opt for 512K cache */
211 
212 #elif PQ_CACHESIZE >= 256
213 #define PQ_PRIME1 13	/* Prime number somewhat less than PQ_HASH_SIZE */
214 #define PQ_PRIME2 7	/* Prime number somewhat less than PQ_HASH_SIZE */
215 #define PQ_L2_SIZE 64	/* A number of colors opt for 256K cache */
216 
217 #elif PQ_CACHESIZE >= 128
218 #define PQ_PRIME1 9	/* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */
219 #define PQ_PRIME2 5	/* Prime number somewhat less than PQ_HASH_SIZE */
220 #define PQ_L2_SIZE 32	/* A number of colors opt for 128k cache */
221 
222 #else
223 #define PQ_PRIME1 5	/* Prime number somewhat less than PQ_HASH_SIZE */
224 #define PQ_PRIME2 3	/* Prime number somewhat less than PQ_HASH_SIZE */
225 #define PQ_L2_SIZE 16	/* A reasonable number of colors (opt for 64K cache) */
226 
227 #endif
228 
229 #define PQ_L2_MASK	(PQ_L2_SIZE - 1)
230 
231 #endif /* KERNEL && !KLD_MODULE */
232 
233 /*
234  *
235  * The queue array is always based on PQ_MAXL2_SIZE regardless of the actual
236  * cache size chosen in order to present a uniform interface for modules.
237  */
238 #define PQ_MAXL2_SIZE	256	/* fixed maximum (in pages) / module compat */
239 
240 #if PQ_L2_SIZE > PQ_MAXL2_SIZE
241 #error "Illegal PQ_L2_SIZE"
242 #endif
243 
244 #define PQ_NONE		0
245 #define PQ_FREE		1
246 #define PQ_INACTIVE	(1 + 1*PQ_MAXL2_SIZE)
247 #define PQ_ACTIVE	(2 + 1*PQ_MAXL2_SIZE)
248 #define PQ_CACHE	(3 + 1*PQ_MAXL2_SIZE)
249 #define PQ_HOLD		(3 + 2*PQ_MAXL2_SIZE)
250 #define PQ_COUNT	(4 + 2*PQ_MAXL2_SIZE)
251 
252 /*
253  * Scan support
254  */
255 struct vm_map;
256 
257 struct rb_vm_page_scan_info {
258 	vm_pindex_t	start_pindex;
259 	vm_pindex_t	end_pindex;
260 	int		limit;
261 	int		desired;
262 	int		error;
263 	int		pagerflags;
264 	vm_offset_t	addr;
265 	vm_pindex_t	backing_offset_index;
266 	struct vm_object *object;
267 	struct vm_object *backing_object;
268 	struct vm_page	*mpte;
269 	struct pmap	*pmap;
270 	struct vm_map	*map;
271 };
272 
273 int rb_vm_page_scancmp(struct vm_page *, void *);
274 
275 struct vpgqueues {
276 	struct pglist pl;
277 	int	*cnt;
278 	int	lcnt;
279 	int	flipflop;	/* probably not the best place */
280 };
281 
282 extern struct vpgqueues vm_page_queues[PQ_COUNT];
283 
284 /*
285  * These are the flags defined for vm_page.
286  *
287  * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
288  * 	 not under PV management but otherwise should be treated as a
289  *	 normal page.  Pages not under PV management cannot be paged out
290  *	 via the object/vm_page_t because there is no knowledge of their
291  *	 pte mappings, nor can they be removed from their objects via
292  *	 the object, and such pages are also not on any PQ queue.
293  */
294 #define	PG_BUSY		0x0001		/* page is in transit (O) */
295 #define	PG_WANTED	0x0002		/* someone is waiting for page (O) */
296 #define PG_WINATCFLS	0x0004		/* flush dirty page on inactive q */
297 #define	PG_FICTITIOUS	0x0008		/* physical page doesn't exist (O) */
298 #define	PG_WRITEABLE	0x0010		/* page is mapped writeable */
299 #define PG_MAPPED	0x0020		/* page is mapped */
300 #define	PG_ZERO		0x0040		/* page is zeroed */
301 #define PG_REFERENCED	0x0080		/* page has been referenced */
302 #define PG_CLEANCHK	0x0100		/* page will be checked for cleaning */
303 #define PG_SWAPINPROG	0x0200		/* swap I/O in progress on page	     */
304 #define PG_NOSYNC	0x0400		/* do not collect for syncer */
305 #define PG_UNMANAGED	0x0800		/* No PV management for page */
306 #define PG_MARKER	0x1000		/* special queue marker page */
307 
308 /*
309  * Misc constants.
310  */
311 
312 #define ACT_DECLINE		1
313 #define ACT_ADVANCE		3
314 #define ACT_INIT		5
315 #define ACT_MAX			64
316 
317 #ifdef _KERNEL
318 /*
319  * Each pageable resident page falls into one of four lists:
320  *
321  *	free
322  *		Available for allocation now.
323  *
324  * The following are all LRU sorted:
325  *
326  *	cache
327  *		Almost available for allocation. Still in an
328  *		object, but clean and immediately freeable at
329  *		non-interrupt times.
330  *
331  *	inactive
332  *		Low activity, candidates for reclamation.
333  *		This is the list of pages that should be
334  *		paged out next.
335  *
336  *	active
337  *		Pages that are "active" i.e. they have been
338  *		recently referenced.
339  *
340  *	zero
341  *		Pages that are really free and have been pre-zeroed
342  *
343  */
344 
345 extern int vm_page_zero_count;
346 extern struct vm_page *vm_page_array;	/* First resident page in table */
347 extern int vm_page_array_size;		/* number of vm_page_t's */
348 extern long first_page;			/* first physical page number */
349 
350 #define VM_PAGE_TO_PHYS(entry)	\
351 		((entry)->phys_addr)
352 
353 #define PHYS_TO_VM_PAGE(pa)	\
354 		(&vm_page_array[atop(pa) - first_page])
355 
356 /*
357  *	Functions implemented as macros
358  */
359 
360 static __inline void
361 vm_page_flag_set(vm_page_t m, unsigned int bits)
362 {
363 	atomic_set_short(&(m)->flags, bits);
364 }
365 
366 static __inline void
367 vm_page_flag_clear(vm_page_t m, unsigned int bits)
368 {
369 	atomic_clear_short(&(m)->flags, bits);
370 }
371 
372 static __inline void
373 vm_page_busy(vm_page_t m)
374 {
375 	KASSERT((m->flags & PG_BUSY) == 0,
376 		("vm_page_busy: page already busy!!!"));
377 	vm_page_flag_set(m, PG_BUSY);
378 }
379 
380 /*
381  *	vm_page_flash:
382  *
383  *	wakeup anyone waiting for the page.
384  */
385 
386 static __inline void
387 vm_page_flash(vm_page_t m)
388 {
389 	if (m->flags & PG_WANTED) {
390 		vm_page_flag_clear(m, PG_WANTED);
391 		wakeup(m);
392 	}
393 }
394 
395 /*
396  * Clear the PG_BUSY flag and wakeup anyone waiting for the page.  This
397  * is typically the last call you make on a page before moving onto
398  * other things.
399  */
400 static __inline void
401 vm_page_wakeup(vm_page_t m)
402 {
403 	KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
404 	vm_page_flag_clear(m, PG_BUSY);
405 	vm_page_flash(m);
406 }
407 
408 /*
409  * These routines manipulate the 'soft busy' count for a page.  A soft busy
410  * is almost like PG_BUSY except that it allows certain compatible operations
411  * to occur on the page while it is busy.  For example, a page undergoing a
412  * write can still be mapped read-only.
413  */
414 static __inline void
415 vm_page_io_start(vm_page_t m)
416 {
417 	atomic_add_char(&(m)->busy, 1);
418 }
419 
420 static __inline void
421 vm_page_io_finish(vm_page_t m)
422 {
423 	atomic_subtract_char(&m->busy, 1);
424 	if (m->busy == 0)
425 		vm_page_flash(m);
426 }
427 
428 
429 #if PAGE_SIZE == 4096
430 #define VM_PAGE_BITS_ALL 0xff
431 #endif
432 
433 #if PAGE_SIZE == 8192
434 #define VM_PAGE_BITS_ALL 0xffff
435 #endif
436 
437 /*
438  * Note: the code will always use nominally free pages from the free list
439  * before trying other flag-specified sources.
440  *
441  * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT
442  * must be specified.  VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL
443  * is also specified.
444  */
445 #define VM_ALLOC_NORMAL		0x01	/* ok to use cache pages */
446 #define VM_ALLOC_SYSTEM		0x02	/* ok to exhaust most of free list */
447 #define VM_ALLOC_INTERRUPT	0x04	/* ok to exhaust entire free list */
448 #define	VM_ALLOC_ZERO		0x08	/* req pre-zero'd memory if avail */
449 #define	VM_ALLOC_RETRY		0x80	/* indefinite block (vm_page_grab()) */
450 
451 void vm_page_unhold(vm_page_t mem);
452 void vm_page_activate (vm_page_t);
453 vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int);
454 vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int);
455 void vm_page_cache (vm_page_t);
456 int vm_page_try_to_cache (vm_page_t);
457 int vm_page_try_to_free (vm_page_t);
458 void vm_page_dontneed (vm_page_t);
459 void vm_page_deactivate (vm_page_t);
460 void vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t);
461 vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t);
462 void vm_page_remove (vm_page_t);
463 void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t);
464 vm_offset_t vm_page_startup (vm_offset_t);
465 vm_page_t vm_add_new_page (vm_paddr_t pa);
466 void vm_page_unmanage (vm_page_t);
467 void vm_page_unwire (vm_page_t, int);
468 void vm_page_wire (vm_page_t);
469 void vm_page_unqueue (vm_page_t);
470 void vm_page_unqueue_nowakeup (vm_page_t);
471 void vm_page_set_validclean (vm_page_t, int, int);
472 void vm_page_set_dirty (vm_page_t, int, int);
473 void vm_page_clear_dirty (vm_page_t, int, int);
474 void vm_page_set_invalid (vm_page_t, int, int);
475 int vm_page_is_valid (vm_page_t, int, int);
476 void vm_page_test_dirty (vm_page_t);
477 int vm_page_bits (int, int);
478 vm_page_t vm_page_list_find(int basequeue, int index, boolean_t prefer_zero);
479 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
480 void vm_page_free_toq(vm_page_t m);
481 vm_offset_t vm_contig_pg_kmap(int, u_long, vm_map_t, int);
482 void vm_contig_pg_free(int, u_long);
483 
484 /*
485  * Holding a page keeps it from being reused.  Other parts of the system
486  * can still disassociate the page from its current object and free it, or
487  * perform read or write I/O on it and/or otherwise manipulate the page,
488  * but if the page is held the VM system will leave the page and its data
489  * intact and not reuse the page for other purposes until the last hold
490  * reference is released.  (see vm_page_wire() if you want to prevent the
491  * page from being disassociated from its object too).
492  *
493  * This routine must be called while at splvm() or better.
494  *
495  * The caller must still validate the contents of the page and, if necessary,
496  * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete
497  * before manipulating the page.
498  */
499 static __inline void
500 vm_page_hold(vm_page_t mem)
501 {
502 	mem->hold_count++;
503 }
504 
505 /*
506  * Reduce the protection of a page.  This routine never raises the
507  * protection and therefore can be safely called if the page is already
508  * at VM_PROT_NONE (it will be a NOP effectively ).
509  *
510  * VM_PROT_NONE will remove all user mappings of a page.  This is often
511  * necessary when a page changes state (for example, turns into a copy-on-write
512  * page or needs to be frozen for write I/O) in order to force a fault, or
513  * to force a page's dirty bits to be synchronized and avoid hardware
514  * (modified/accessed) bit update races with pmap changes.
515  *
516  * Since 'prot' is usually a constant, this inline usually winds up optimizing
517  * out the primary conditional.
518  */
519 static __inline void
520 vm_page_protect(vm_page_t mem, int prot)
521 {
522 	if (prot == VM_PROT_NONE) {
523 		if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
524 			pmap_page_protect(mem, VM_PROT_NONE);
525 			vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED);
526 		}
527 	} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
528 		pmap_page_protect(mem, VM_PROT_READ);
529 		vm_page_flag_clear(mem, PG_WRITEABLE);
530 	}
531 }
532 
533 /*
534  * Zero-fill the specified page.  The entire contents of the page will be
535  * zero'd out.
536  */
537 static __inline boolean_t
538 vm_page_zero_fill(vm_page_t m)
539 {
540 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
541 	return (TRUE);
542 }
543 
544 /*
545  * Copy the contents of src_m to dest_m.  The pages must be stable but spl
546  * and other protections depend on context.
547  */
548 static __inline void
549 vm_page_copy(vm_page_t src_m, vm_page_t dest_m)
550 {
551 	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
552 	dest_m->valid = VM_PAGE_BITS_ALL;
553 }
554 
555 /*
556  * Free a page.  The page must be marked BUSY.
557  *
558  * The clearing of PG_ZERO is a temporary safety until the code can be
559  * reviewed to determine that PG_ZERO is being properly cleared on
560  * write faults or maps.  PG_ZERO was previously cleared in
561  * vm_page_alloc().
562  */
563 static __inline void
564 vm_page_free(vm_page_t m)
565 {
566 	vm_page_flag_clear(m, PG_ZERO);
567 	vm_page_free_toq(m);
568 }
569 
570 /*
571  * Free a page to the zerod-pages queue
572  */
573 static __inline void
574 vm_page_free_zero(vm_page_t m)
575 {
576 	vm_page_flag_set(m, PG_ZERO);
577 	vm_page_free_toq(m);
578 }
579 
580 /*
581  * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
582  * m->busy is zero.  Returns TRUE if it had to sleep ( including if
583  * it almost had to sleep and made temporary spl*() mods), FALSE
584  * otherwise.
585  *
586  * This routine assumes that interrupts can only remove the busy
587  * status from a page, not set the busy status or change it from
588  * PG_BUSY to m->busy or vise versa (which would create a timing
589  * window).
590  *
591  * Note: as an inline, 'also_m_busy' is usually a constant and well
592  * optimized.
593  */
594 static __inline int
595 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
596 {
597 	if ((m->flags & PG_BUSY) || (also_m_busy && m->busy))  {
598 		crit_enter();
599 		if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
600 			/*
601 			 * Page is busy. Wait and retry.
602 			 */
603 			vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
604 			tsleep(m, 0, msg, 0);
605 		}
606 		crit_exit();
607 		return(TRUE);
608 		/* not reached */
609 	}
610 	return(FALSE);
611 }
612 
613 /*
614  * Make page all dirty
615  */
616 static __inline void
617 _vm_page_dirty(vm_page_t m, const char *info)
618 {
619 #ifdef INVARIANTS
620 	int pqtype = m->queue - m->pc;
621 #endif
622 	KASSERT(pqtype != PQ_CACHE && pqtype != PQ_FREE,
623 		("vm_page_dirty: page in free/cache queue!"));
624 	m->dirty = VM_PAGE_BITS_ALL;
625 }
626 
627 #define vm_page_dirty(m)	_vm_page_dirty(m, __FUNCTION__)
628 
629 /*
630  * Set page to not be dirty.  Note: does not clear pmap modify bits .
631  */
632 static __inline void
633 vm_page_undirty(vm_page_t m)
634 {
635 	m->dirty = 0;
636 }
637 
638 #endif				/* _KERNEL */
639 #endif				/* !_VM_VM_PAGE_H_ */
640