xref: /dragonfly/sys/vm/vm_page.h (revision f02303f9)
1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * The Mach Operating System project at Carnegie-Mellon University.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.h	8.2 (Berkeley) 12/13/93
37  *
38  *
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  *
64  * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $
65  * $DragonFly: src/sys/vm/vm_page.h,v 1.25 2006/12/02 23:13:46 dillon Exp $
66  */
67 
68 /*
69  *	Resident memory system definitions.
70  */
71 
72 #ifndef	_VM_VM_PAGE_H_
73 #define	_VM_VM_PAGE_H_
74 
75 #if !defined(KLD_MODULE) && defined(_KERNEL)
76 #include "opt_vmpage.h"
77 #endif
78 
79 #ifndef _SYS_TYPES_H_
80 #include <sys/types.h>
81 #endif
82 #ifndef _SYS_TREE_H_
83 #include <sys/tree.h>
84 #endif
85 #ifndef _MACHINE_PMAP_H_
86 #include <machine/pmap.h>
87 #endif
88 #ifndef _VM_PMAP_H_
89 #include <vm/pmap.h>
90 #endif
91 #ifndef _MACHINE_ATOMIC_H_
92 #include <machine/atomic.h>
93 #endif
94 
95 #ifdef _KERNEL
96 
97 #ifndef _SYS_SYSTM_H_
98 #include <sys/systm.h>
99 #endif
100 #ifndef _SYS_THREAD2_H_
101 #include <sys/thread2.h>
102 #endif
103 
104 #endif
105 
106 /*
107  *	Management of resident (logical) pages.
108  *
109  *	A small structure is kept for each resident
110  *	page, indexed by page number.  Each structure
111  *	is an element of several lists:
112  *
113  *		A hash table bucket used to quickly
114  *		perform object/offset lookups
115  *
116  *		A list of all pages for a given object,
117  *		so they can be quickly deactivated at
118  *		time of deallocation.
119  *
120  *		An ordered list of pages due for pageout.
121  *
122  *	In addition, the structure contains the object
123  *	and offset to which this page belongs (for pageout),
124  *	and sundry status bits.
125  *
126  *	Fields in this structure are locked either by the lock on the
127  *	object that the page belongs to (O) or by the lock on the page
128  *	queues (P).
129  *
130  *	The 'valid' and 'dirty' fields are distinct.  A page may have dirty
131  *	bits set without having associated valid bits set.  This is used by
132  *	NFS to implement piecemeal writes.
133  */
134 
135 TAILQ_HEAD(pglist, vm_page);
136 
137 struct msf_buf;
138 struct vm_object;
139 
140 int rb_vm_page_compare(struct vm_page *, struct vm_page *);
141 
142 struct vm_page_rb_tree;
143 RB_PROTOTYPE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare, vm_pindex_t);
144 
145 struct vm_page {
146 	TAILQ_ENTRY(vm_page) pageq;	/* vm_page_queues[] list (P)	*/
147 	RB_ENTRY(vm_page) rb_entry;	/* Red-Black tree based at object */
148 
149 	struct vm_object *object;	/* which object am I in (O,P)*/
150 	vm_pindex_t pindex;		/* offset into object (O,P) */
151 	vm_paddr_t phys_addr;		/* physical address of page */
152 	struct md_page md;		/* machine dependant stuff */
153 	u_short	queue;			/* page queue index */
154 	u_short	flags;			/* see below */
155 	u_short	pc;			/* page color */
156 	u_short wire_count;		/* wired down maps refs (P) */
157 	short hold_count;		/* page hold count */
158 	u_char	act_count;		/* page usage count */
159 	u_char	busy;			/* page busy count */
160 
161 	/*
162 	 * NOTE that these must support one bit per DEV_BSIZE in a page!!!
163 	 * so, on normal X86 kernels, they must be at least 8 bits wide.
164 	 */
165 #if PAGE_SIZE == 4096
166 	u_char	valid;			/* map of valid DEV_BSIZE chunks */
167 	u_char	dirty;			/* map of dirty DEV_BSIZE chunks */
168 	u_char	unused1;
169 	u_char	unused2;
170 #elif PAGE_SIZE == 8192
171 	u_short	valid;			/* map of valid DEV_BSIZE chunks */
172 	u_short	dirty;			/* map of dirty DEV_BSIZE chunks */
173 #endif
174 	struct msf_buf *msf_hint; 	/* first page of an msfbuf map */
175 };
176 
177 #ifndef __VM_PAGE_T_DEFINED__
178 #define __VM_PAGE_T_DEFINED__
179 typedef struct vm_page *vm_page_t;
180 #endif
181 
182 /*
183  * note: currently use SWAPBLK_NONE as an absolute value rather then
184  * a flag bit.
185  */
186 #define SWAPBLK_MASK	((daddr_t)((u_daddr_t)-1 >> 1))		/* mask */
187 #define SWAPBLK_NONE	((daddr_t)((u_daddr_t)SWAPBLK_MASK + 1))/* flag */
188 
189 /*
190  * Page coloring parameters.  We default to a middle of the road optimization.
191  * Larger selections would not really hurt us but if a machine does not have
192  * a lot of memory it could cause vm_page_alloc() to eat more cpu cycles
193  * looking for free pages.
194  *
195  * Page coloring cannot be disabled.  Modules do not have access to most PQ
196  * constants because they can change between builds.
197  */
198 #if defined(_KERNEL) && !defined(KLD_MODULE)
199 
200 #if !defined(PQ_CACHESIZE)
201 #define PQ_CACHESIZE 256	/* max is 1024 (MB) */
202 #endif
203 
204 #if PQ_CACHESIZE >= 1024
205 #define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
206 #define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
207 #define PQ_L2_SIZE 256	/* A number of colors opt for 1M cache */
208 
209 #elif PQ_CACHESIZE >= 512
210 #define PQ_PRIME1 31	/* Prime number somewhat less than PQ_HASH_SIZE */
211 #define PQ_PRIME2 23	/* Prime number somewhat less than PQ_HASH_SIZE */
212 #define PQ_L2_SIZE 128	/* A number of colors opt for 512K cache */
213 
214 #elif PQ_CACHESIZE >= 256
215 #define PQ_PRIME1 13	/* Prime number somewhat less than PQ_HASH_SIZE */
216 #define PQ_PRIME2 7	/* Prime number somewhat less than PQ_HASH_SIZE */
217 #define PQ_L2_SIZE 64	/* A number of colors opt for 256K cache */
218 
219 #elif PQ_CACHESIZE >= 128
220 #define PQ_PRIME1 9	/* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */
221 #define PQ_PRIME2 5	/* Prime number somewhat less than PQ_HASH_SIZE */
222 #define PQ_L2_SIZE 32	/* A number of colors opt for 128k cache */
223 
224 #else
225 #define PQ_PRIME1 5	/* Prime number somewhat less than PQ_HASH_SIZE */
226 #define PQ_PRIME2 3	/* Prime number somewhat less than PQ_HASH_SIZE */
227 #define PQ_L2_SIZE 16	/* A reasonable number of colors (opt for 64K cache) */
228 
229 #endif
230 
231 #define PQ_L2_MASK	(PQ_L2_SIZE - 1)
232 
233 #endif /* KERNEL && !KLD_MODULE */
234 
235 /*
236  *
237  * The queue array is always based on PQ_MAXL2_SIZE regardless of the actual
238  * cache size chosen in order to present a uniform interface for modules.
239  */
240 #define PQ_MAXL2_SIZE	256	/* fixed maximum (in pages) / module compat */
241 
242 #if PQ_L2_SIZE > PQ_MAXL2_SIZE
243 #error "Illegal PQ_L2_SIZE"
244 #endif
245 
246 #define PQ_NONE		0
247 #define PQ_FREE		1
248 #define PQ_INACTIVE	(1 + 1*PQ_MAXL2_SIZE)
249 #define PQ_ACTIVE	(2 + 1*PQ_MAXL2_SIZE)
250 #define PQ_CACHE	(3 + 1*PQ_MAXL2_SIZE)
251 #define PQ_HOLD		(3 + 2*PQ_MAXL2_SIZE)
252 #define PQ_COUNT	(4 + 2*PQ_MAXL2_SIZE)
253 
254 /*
255  * Scan support
256  */
257 struct vm_map;
258 
259 struct rb_vm_page_scan_info {
260 	vm_pindex_t	start_pindex;
261 	vm_pindex_t	end_pindex;
262 	int		limit;
263 	int		desired;
264 	int		error;
265 	int		pagerflags;
266 	vm_offset_t	addr;
267 	vm_pindex_t	backing_offset_index;
268 	struct vm_object *object;
269 	struct vm_object *backing_object;
270 	struct vm_page	*mpte;
271 	struct pmap	*pmap;
272 	struct vm_map	*map;
273 };
274 
275 int rb_vm_page_scancmp(struct vm_page *, void *);
276 
277 struct vpgqueues {
278 	struct pglist pl;
279 	int	*cnt;
280 	int	lcnt;
281 	int	flipflop;	/* probably not the best place */
282 };
283 
284 extern struct vpgqueues vm_page_queues[PQ_COUNT];
285 
286 /*
287  * These are the flags defined for vm_page.
288  *
289  * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
290  * 	 not under PV management but otherwise should be treated as a
291  *	 normal page.  Pages not under PV management cannot be paged out
292  *	 via the object/vm_page_t because there is no knowledge of their
293  *	 pte mappings, nor can they be removed from their objects via
294  *	 the object, and such pages are also not on any PQ queue.
295  */
296 #define	PG_BUSY		0x0001		/* page is in transit (O) */
297 #define	PG_WANTED	0x0002		/* someone is waiting for page (O) */
298 #define PG_WINATCFLS	0x0004		/* flush dirty page on inactive q */
299 #define	PG_FICTITIOUS	0x0008		/* physical page doesn't exist (O) */
300 #define	PG_WRITEABLE	0x0010		/* page is mapped writeable */
301 #define PG_MAPPED	0x0020		/* page is mapped */
302 #define	PG_ZERO		0x0040		/* page is zeroed */
303 #define PG_REFERENCED	0x0080		/* page has been referenced */
304 #define PG_CLEANCHK	0x0100		/* page will be checked for cleaning */
305 #define PG_SWAPINPROG	0x0200		/* swap I/O in progress on page	     */
306 #define PG_NOSYNC	0x0400		/* do not collect for syncer */
307 #define PG_UNMANAGED	0x0800		/* No PV management for page */
308 #define PG_MARKER	0x1000		/* special queue marker page */
309 
310 /*
311  * Misc constants.
312  */
313 
314 #define ACT_DECLINE		1
315 #define ACT_ADVANCE		3
316 #define ACT_INIT		5
317 #define ACT_MAX			64
318 
319 #ifdef _KERNEL
320 /*
321  * Each pageable resident page falls into one of four lists:
322  *
323  *	free
324  *		Available for allocation now.
325  *
326  * The following are all LRU sorted:
327  *
328  *	cache
329  *		Almost available for allocation. Still in an
330  *		object, but clean and immediately freeable at
331  *		non-interrupt times.
332  *
333  *	inactive
334  *		Low activity, candidates for reclamation.
335  *		This is the list of pages that should be
336  *		paged out next.
337  *
338  *	active
339  *		Pages that are "active" i.e. they have been
340  *		recently referenced.
341  *
342  *	zero
343  *		Pages that are really free and have been pre-zeroed
344  *
345  */
346 
347 extern int vm_page_zero_count;
348 extern struct vm_page *vm_page_array;	/* First resident page in table */
349 extern int vm_page_array_size;		/* number of vm_page_t's */
350 extern long first_page;			/* first physical page number */
351 
352 #define VM_PAGE_TO_PHYS(entry)	\
353 		((entry)->phys_addr)
354 
355 #define PHYS_TO_VM_PAGE(pa)	\
356 		(&vm_page_array[atop(pa) - first_page])
357 
358 /*
359  *	Functions implemented as macros
360  */
361 
362 static __inline void
363 vm_page_flag_set(vm_page_t m, unsigned int bits)
364 {
365 	atomic_set_short(&(m)->flags, bits);
366 }
367 
368 static __inline void
369 vm_page_flag_clear(vm_page_t m, unsigned int bits)
370 {
371 	atomic_clear_short(&(m)->flags, bits);
372 }
373 
374 static __inline void
375 vm_page_busy(vm_page_t m)
376 {
377 	KASSERT((m->flags & PG_BUSY) == 0,
378 		("vm_page_busy: page already busy!!!"));
379 	vm_page_flag_set(m, PG_BUSY);
380 }
381 
382 /*
383  *	vm_page_flash:
384  *
385  *	wakeup anyone waiting for the page.
386  */
387 
388 static __inline void
389 vm_page_flash(vm_page_t m)
390 {
391 	if (m->flags & PG_WANTED) {
392 		vm_page_flag_clear(m, PG_WANTED);
393 		wakeup(m);
394 	}
395 }
396 
397 /*
398  * Clear the PG_BUSY flag and wakeup anyone waiting for the page.  This
399  * is typically the last call you make on a page before moving onto
400  * other things.
401  */
402 static __inline void
403 vm_page_wakeup(vm_page_t m)
404 {
405 	KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
406 	vm_page_flag_clear(m, PG_BUSY);
407 	vm_page_flash(m);
408 }
409 
410 /*
411  * These routines manipulate the 'soft busy' count for a page.  A soft busy
412  * is almost like PG_BUSY except that it allows certain compatible operations
413  * to occur on the page while it is busy.  For example, a page undergoing a
414  * write can still be mapped read-only.
415  */
416 static __inline void
417 vm_page_io_start(vm_page_t m)
418 {
419 	atomic_add_char(&(m)->busy, 1);
420 }
421 
422 static __inline void
423 vm_page_io_finish(vm_page_t m)
424 {
425 	atomic_subtract_char(&m->busy, 1);
426 	if (m->busy == 0)
427 		vm_page_flash(m);
428 }
429 
430 
431 #if PAGE_SIZE == 4096
432 #define VM_PAGE_BITS_ALL 0xff
433 #endif
434 
435 #if PAGE_SIZE == 8192
436 #define VM_PAGE_BITS_ALL 0xffff
437 #endif
438 
439 /*
440  * Note: the code will always use nominally free pages from the free list
441  * before trying other flag-specified sources.
442  *
443  * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT
444  * must be specified.  VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL
445  * is also specified.
446  */
447 #define VM_ALLOC_NORMAL		0x01	/* ok to use cache pages */
448 #define VM_ALLOC_SYSTEM		0x02	/* ok to exhaust most of free list */
449 #define VM_ALLOC_INTERRUPT	0x04	/* ok to exhaust entire free list */
450 #define	VM_ALLOC_ZERO		0x08	/* req pre-zero'd memory if avail */
451 #define	VM_ALLOC_RETRY		0x80	/* indefinite block (vm_page_grab()) */
452 
453 void vm_page_unhold(vm_page_t mem);
454 void vm_page_activate (vm_page_t);
455 vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int);
456 vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int);
457 void vm_page_cache (vm_page_t);
458 int vm_page_try_to_cache (vm_page_t);
459 int vm_page_try_to_free (vm_page_t);
460 void vm_page_dontneed (vm_page_t);
461 void vm_page_deactivate (vm_page_t);
462 void vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t);
463 vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t);
464 void vm_page_remove (vm_page_t);
465 void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t);
466 vm_offset_t vm_page_startup (vm_offset_t);
467 vm_page_t vm_add_new_page (vm_paddr_t pa);
468 void vm_page_unmanage (vm_page_t);
469 void vm_page_unwire (vm_page_t, int);
470 void vm_page_wire (vm_page_t);
471 void vm_page_unqueue (vm_page_t);
472 void vm_page_unqueue_nowakeup (vm_page_t);
473 void vm_page_set_validclean (vm_page_t, int, int);
474 void vm_page_set_dirty (vm_page_t, int, int);
475 void vm_page_clear_dirty (vm_page_t, int, int);
476 void vm_page_set_invalid (vm_page_t, int, int);
477 int vm_page_is_valid (vm_page_t, int, int);
478 void vm_page_test_dirty (vm_page_t);
479 int vm_page_bits (int, int);
480 vm_page_t vm_page_list_find(int basequeue, int index, boolean_t prefer_zero);
481 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
482 void vm_page_free_toq(vm_page_t m);
483 vm_offset_t vm_contig_pg_kmap(int, u_long, vm_map_t, int);
484 void vm_contig_pg_free(int, u_long);
485 
486 /*
487  * Holding a page keeps it from being reused.  Other parts of the system
488  * can still disassociate the page from its current object and free it, or
489  * perform read or write I/O on it and/or otherwise manipulate the page,
490  * but if the page is held the VM system will leave the page and its data
491  * intact and not reuse the page for other purposes until the last hold
492  * reference is released.  (see vm_page_wire() if you want to prevent the
493  * page from being disassociated from its object too).
494  *
495  * This routine must be called while at splvm() or better.
496  *
497  * The caller must still validate the contents of the page and, if necessary,
498  * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete
499  * before manipulating the page.
500  */
501 static __inline void
502 vm_page_hold(vm_page_t mem)
503 {
504 	mem->hold_count++;
505 }
506 
507 /*
508  * Reduce the protection of a page.  This routine never raises the
509  * protection and therefore can be safely called if the page is already
510  * at VM_PROT_NONE (it will be a NOP effectively ).
511  *
512  * VM_PROT_NONE will remove all user mappings of a page.  This is often
513  * necessary when a page changes state (for example, turns into a copy-on-write
514  * page or needs to be frozen for write I/O) in order to force a fault, or
515  * to force a page's dirty bits to be synchronized and avoid hardware
516  * (modified/accessed) bit update races with pmap changes.
517  *
518  * Since 'prot' is usually a constant, this inline usually winds up optimizing
519  * out the primary conditional.
520  */
521 static __inline void
522 vm_page_protect(vm_page_t mem, int prot)
523 {
524 	if (prot == VM_PROT_NONE) {
525 		if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
526 			pmap_page_protect(mem, VM_PROT_NONE);
527 			vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED);
528 		}
529 	} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
530 		pmap_page_protect(mem, VM_PROT_READ);
531 		vm_page_flag_clear(mem, PG_WRITEABLE);
532 	}
533 }
534 
535 /*
536  * Zero-fill the specified page.  The entire contents of the page will be
537  * zero'd out.
538  */
539 static __inline boolean_t
540 vm_page_zero_fill(vm_page_t m)
541 {
542 	pmap_zero_page(VM_PAGE_TO_PHYS(m));
543 	return (TRUE);
544 }
545 
546 /*
547  * Copy the contents of src_m to dest_m.  The pages must be stable but spl
548  * and other protections depend on context.
549  */
550 static __inline void
551 vm_page_copy(vm_page_t src_m, vm_page_t dest_m)
552 {
553 	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
554 	dest_m->valid = VM_PAGE_BITS_ALL;
555 }
556 
557 /*
558  * Free a page.  The page must be marked BUSY.
559  *
560  * The clearing of PG_ZERO is a temporary safety until the code can be
561  * reviewed to determine that PG_ZERO is being properly cleared on
562  * write faults or maps.  PG_ZERO was previously cleared in
563  * vm_page_alloc().
564  */
565 static __inline void
566 vm_page_free(vm_page_t m)
567 {
568 	vm_page_flag_clear(m, PG_ZERO);
569 	vm_page_free_toq(m);
570 }
571 
572 /*
573  * Free a page to the zerod-pages queue
574  */
575 static __inline void
576 vm_page_free_zero(vm_page_t m)
577 {
578 	vm_page_flag_set(m, PG_ZERO);
579 	vm_page_free_toq(m);
580 }
581 
582 /*
583  * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
584  * m->busy is zero.  Returns TRUE if it had to sleep ( including if
585  * it almost had to sleep and made temporary spl*() mods), FALSE
586  * otherwise.
587  *
588  * This routine assumes that interrupts can only remove the busy
589  * status from a page, not set the busy status or change it from
590  * PG_BUSY to m->busy or vise versa (which would create a timing
591  * window).
592  *
593  * Note: as an inline, 'also_m_busy' is usually a constant and well
594  * optimized.
595  */
596 static __inline int
597 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
598 {
599 	if ((m->flags & PG_BUSY) || (also_m_busy && m->busy))  {
600 		crit_enter();
601 		if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
602 			/*
603 			 * Page is busy. Wait and retry.
604 			 */
605 			vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
606 			tsleep(m, 0, msg, 0);
607 		}
608 		crit_exit();
609 		return(TRUE);
610 		/* not reached */
611 	}
612 	return(FALSE);
613 }
614 
615 /*
616  * Make page all dirty
617  */
618 static __inline void
619 _vm_page_dirty(vm_page_t m, const char *info)
620 {
621 #ifdef INVARIANTS
622 	int pqtype = m->queue - m->pc;
623 #endif
624 	KASSERT(pqtype != PQ_CACHE && pqtype != PQ_FREE,
625 		("vm_page_dirty: page in free/cache queue!"));
626 	m->dirty = VM_PAGE_BITS_ALL;
627 }
628 
629 #define vm_page_dirty(m)	_vm_page_dirty(m, __FUNCTION__)
630 
631 /*
632  * Set page to not be dirty.  Note: does not clear pmap modify bits .
633  */
634 static __inline void
635 vm_page_undirty(vm_page_t m)
636 {
637 	m->dirty = 0;
638 }
639 
640 #endif				/* _KERNEL */
641 #endif				/* !_VM_VM_PAGE_H_ */
642