xref: /dragonfly/sys/vm/vm_page.c (revision 38b5d46c)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
35  * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $
36  */
37 
38 /*
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 /*
65  * Resident memory management module.  The module manipulates 'VM pages'.
66  * A VM page is the core building block for memory management.
67  */
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/malloc.h>
72 #include <sys/proc.h>
73 #include <sys/vmmeter.h>
74 #include <sys/vnode.h>
75 #include <sys/kernel.h>
76 #include <sys/alist.h>
77 #include <sys/sysctl.h>
78 #include <sys/cpu_topology.h>
79 
80 #include <vm/vm.h>
81 #include <vm/vm_param.h>
82 #include <sys/lock.h>
83 #include <vm/vm_kern.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pageout.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vm_extern.h>
91 #include <vm/swap_pager.h>
92 
93 #include <machine/inttypes.h>
94 #include <machine/md_var.h>
95 #include <machine/specialreg.h>
96 
97 #include <vm/vm_page2.h>
98 #include <sys/spinlock2.h>
99 
100 /*
101  * Action hash for user umtx support.
102  */
103 #define VMACTION_HSIZE		256
104 #define VMACTION_HMASK		(VMACTION_HSIZE - 1)
105 
106 /*
107  * SET - Minimum required set associative size, must be a power of 2.  We
108  *	 want this to match or exceed the set-associativeness of the cpu.
109  *
110  * GRP - A larger set that allows bleed-over into the domains of other
111  *	 nearby cpus.  Also must be a power of 2.  Used by the page zeroing
112  *	 code to smooth things out a bit.
113  */
114 #define PQ_SET_ASSOC		16
115 #define PQ_SET_ASSOC_MASK	(PQ_SET_ASSOC - 1)
116 
117 #define PQ_GRP_ASSOC		(PQ_SET_ASSOC * 2)
118 #define PQ_GRP_ASSOC_MASK	(PQ_GRP_ASSOC - 1)
119 
120 static void vm_page_queue_init(void);
121 static void vm_page_free_wakeup(void);
122 static vm_page_t vm_page_select_cache(u_short pg_color);
123 static vm_page_t _vm_page_list_find2(int basequeue, int index);
124 static void _vm_page_deactivate_locked(vm_page_t m, int athead);
125 
126 /*
127  * Array of tailq lists
128  */
129 __cachealign struct vpgqueues vm_page_queues[PQ_COUNT];
130 
131 LIST_HEAD(vm_page_action_list, vm_page_action);
132 struct vm_page_action_list	action_list[VMACTION_HSIZE];
133 static volatile int vm_pages_waiting;
134 
135 static struct alist vm_contig_alist;
136 static struct almeta vm_contig_ameta[ALIST_RECORDS_65536];
137 static struct spinlock vm_contig_spin = SPINLOCK_INITIALIZER(&vm_contig_spin, "vm_contig_spin");
138 
139 static u_long vm_dma_reserved = 0;
140 TUNABLE_ULONG("vm.dma_reserved", &vm_dma_reserved);
141 SYSCTL_ULONG(_vm, OID_AUTO, dma_reserved, CTLFLAG_RD, &vm_dma_reserved, 0,
142 	    "Memory reserved for DMA");
143 SYSCTL_UINT(_vm, OID_AUTO, dma_free_pages, CTLFLAG_RD,
144 	    &vm_contig_alist.bl_free, 0, "Memory reserved for DMA");
145 
146 static int vm_contig_verbose = 0;
147 TUNABLE_INT("vm.contig_verbose", &vm_contig_verbose);
148 
149 RB_GENERATE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare,
150 	     vm_pindex_t, pindex);
151 
152 static void
153 vm_page_queue_init(void)
154 {
155 	int i;
156 
157 	for (i = 0; i < PQ_L2_SIZE; i++)
158 		vm_page_queues[PQ_FREE+i].cnt = &vmstats.v_free_count;
159 	for (i = 0; i < PQ_L2_SIZE; i++)
160 		vm_page_queues[PQ_CACHE+i].cnt = &vmstats.v_cache_count;
161 	for (i = 0; i < PQ_L2_SIZE; i++)
162 		vm_page_queues[PQ_INACTIVE+i].cnt = &vmstats.v_inactive_count;
163 	for (i = 0; i < PQ_L2_SIZE; i++)
164 		vm_page_queues[PQ_ACTIVE+i].cnt = &vmstats.v_active_count;
165 	for (i = 0; i < PQ_L2_SIZE; i++)
166 		vm_page_queues[PQ_HOLD+i].cnt = &vmstats.v_active_count;
167 	/* PQ_NONE has no queue */
168 
169 	for (i = 0; i < PQ_COUNT; i++) {
170 		TAILQ_INIT(&vm_page_queues[i].pl);
171 		spin_init(&vm_page_queues[i].spin, "vm_page_queue_init");
172 	}
173 
174 	for (i = 0; i < VMACTION_HSIZE; i++)
175 		LIST_INIT(&action_list[i]);
176 }
177 
178 /*
179  * note: place in initialized data section?  Is this necessary?
180  */
181 long first_page = 0;
182 int vm_page_array_size = 0;
183 vm_page_t vm_page_array = NULL;
184 vm_paddr_t vm_low_phys_reserved;
185 
186 /*
187  * (low level boot)
188  *
189  * Sets the page size, perhaps based upon the memory size.
190  * Must be called before any use of page-size dependent functions.
191  */
192 void
193 vm_set_page_size(void)
194 {
195 	if (vmstats.v_page_size == 0)
196 		vmstats.v_page_size = PAGE_SIZE;
197 	if (((vmstats.v_page_size - 1) & vmstats.v_page_size) != 0)
198 		panic("vm_set_page_size: page size not a power of two");
199 }
200 
201 /*
202  * (low level boot)
203  *
204  * Add a new page to the freelist for use by the system.  New pages
205  * are added to both the head and tail of the associated free page
206  * queue in a bottom-up fashion, so both zero'd and non-zero'd page
207  * requests pull 'recent' adds (higher physical addresses) first.
208  *
209  * Beware that the page zeroing daemon will also be running soon after
210  * boot, moving pages from the head to the tail of the PQ_FREE queues.
211  *
212  * Must be called in a critical section.
213  */
214 static void
215 vm_add_new_page(vm_paddr_t pa)
216 {
217 	struct vpgqueues *vpq;
218 	vm_page_t m;
219 
220 	m = PHYS_TO_VM_PAGE(pa);
221 	m->phys_addr = pa;
222 	m->flags = 0;
223 	m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
224 	m->pat_mode = PAT_WRITE_BACK;
225 	/*
226 	 * Twist for cpu localization in addition to page coloring, so
227 	 * different cpus selecting by m->queue get different page colors.
228 	 */
229 	m->pc ^= ((pa >> PAGE_SHIFT) / PQ_L2_SIZE) & PQ_L2_MASK;
230 	m->pc ^= ((pa >> PAGE_SHIFT) / (PQ_L2_SIZE * PQ_L2_SIZE)) & PQ_L2_MASK;
231 	/*
232 	 * Reserve a certain number of contiguous low memory pages for
233 	 * contigmalloc() to use.
234 	 */
235 	if (pa < vm_low_phys_reserved) {
236 		atomic_add_int(&vmstats.v_page_count, 1);
237 		atomic_add_int(&vmstats.v_dma_pages, 1);
238 		m->queue = PQ_NONE;
239 		m->wire_count = 1;
240 		atomic_add_int(&vmstats.v_wire_count, 1);
241 		alist_free(&vm_contig_alist, pa >> PAGE_SHIFT, 1);
242 		return;
243 	}
244 
245 	/*
246 	 * General page
247 	 */
248 	m->queue = m->pc + PQ_FREE;
249 	KKASSERT(m->dirty == 0);
250 
251 	atomic_add_int(&vmstats.v_page_count, 1);
252 	atomic_add_int(&vmstats.v_free_count, 1);
253 	vpq = &vm_page_queues[m->queue];
254 	TAILQ_INSERT_HEAD(&vpq->pl, m, pageq);
255 	++vpq->lcnt;
256 }
257 
258 /*
259  * (low level boot)
260  *
261  * Initializes the resident memory module.
262  *
263  * Preallocates memory for critical VM structures and arrays prior to
264  * kernel_map becoming available.
265  *
266  * Memory is allocated from (virtual2_start, virtual2_end) if available,
267  * otherwise memory is allocated from (virtual_start, virtual_end).
268  *
269  * On x86-64 (virtual_start, virtual_end) is only 2GB and may not be
270  * large enough to hold vm_page_array & other structures for machines with
271  * large amounts of ram, so we want to use virtual2* when available.
272  */
273 void
274 vm_page_startup(void)
275 {
276 	vm_offset_t vaddr = virtual2_start ? virtual2_start : virtual_start;
277 	vm_offset_t mapped;
278 	vm_size_t npages;
279 	vm_paddr_t page_range;
280 	vm_paddr_t new_end;
281 	int i;
282 	vm_paddr_t pa;
283 	int nblocks;
284 	vm_paddr_t last_pa;
285 	vm_paddr_t end;
286 	vm_paddr_t biggestone, biggestsize;
287 	vm_paddr_t total;
288 
289 	total = 0;
290 	biggestsize = 0;
291 	biggestone = 0;
292 	nblocks = 0;
293 	vaddr = round_page(vaddr);
294 
295 	for (i = 0; phys_avail[i + 1]; i += 2) {
296 		phys_avail[i] = round_page64(phys_avail[i]);
297 		phys_avail[i + 1] = trunc_page64(phys_avail[i + 1]);
298 	}
299 
300 	for (i = 0; phys_avail[i + 1]; i += 2) {
301 		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
302 
303 		if (size > biggestsize) {
304 			biggestone = i;
305 			biggestsize = size;
306 		}
307 		++nblocks;
308 		total += size;
309 	}
310 
311 	end = phys_avail[biggestone+1];
312 	end = trunc_page(end);
313 
314 	/*
315 	 * Initialize the queue headers for the free queue, the active queue
316 	 * and the inactive queue.
317 	 */
318 	vm_page_queue_init();
319 
320 #if !defined(_KERNEL_VIRTUAL)
321 	/*
322 	 * VKERNELs don't support minidumps and as such don't need
323 	 * vm_page_dump
324 	 *
325 	 * Allocate a bitmap to indicate that a random physical page
326 	 * needs to be included in a minidump.
327 	 *
328 	 * The amd64 port needs this to indicate which direct map pages
329 	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
330 	 *
331 	 * However, i386 still needs this workspace internally within the
332 	 * minidump code.  In theory, they are not needed on i386, but are
333 	 * included should the sf_buf code decide to use them.
334 	 */
335 	page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
336 	vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
337 	end -= vm_page_dump_size;
338 	vm_page_dump = (void *)pmap_map(&vaddr, end, end + vm_page_dump_size,
339 	    VM_PROT_READ | VM_PROT_WRITE);
340 	bzero((void *)vm_page_dump, vm_page_dump_size);
341 #endif
342 	/*
343 	 * Compute the number of pages of memory that will be available for
344 	 * use (taking into account the overhead of a page structure per
345 	 * page).
346 	 */
347 	first_page = phys_avail[0] / PAGE_SIZE;
348 	page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
349 	npages = (total - (page_range * sizeof(struct vm_page))) / PAGE_SIZE;
350 
351 #ifndef _KERNEL_VIRTUAL
352 	/*
353 	 * (only applies to real kernels)
354 	 *
355 	 * Reserve a large amount of low memory for potential 32-bit DMA
356 	 * space allocations.  Once device initialization is complete we
357 	 * release most of it, but keep (vm_dma_reserved) memory reserved
358 	 * for later use.  Typically for X / graphics.  Through trial and
359 	 * error we find that GPUs usually requires ~60-100MB or so.
360 	 *
361 	 * By default, 128M is left in reserve on machines with 2G+ of ram.
362 	 */
363 	vm_low_phys_reserved = (vm_paddr_t)65536 << PAGE_SHIFT;
364 	if (vm_low_phys_reserved > total / 4)
365 		vm_low_phys_reserved = total / 4;
366 	if (vm_dma_reserved == 0) {
367 		vm_dma_reserved = 128 * 1024 * 1024;	/* 128MB */
368 		if (vm_dma_reserved > total / 16)
369 			vm_dma_reserved = total / 16;
370 	}
371 #endif
372 	alist_init(&vm_contig_alist, 65536, vm_contig_ameta,
373 		   ALIST_RECORDS_65536);
374 
375 	/*
376 	 * Initialize the mem entry structures now, and put them in the free
377 	 * queue.
378 	 */
379 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
380 	mapped = pmap_map(&vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE);
381 	vm_page_array = (vm_page_t)mapped;
382 
383 #if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL)
384 	/*
385 	 * since pmap_map on amd64 returns stuff out of a direct-map region,
386 	 * we have to manually add these pages to the minidump tracking so
387 	 * that they can be dumped, including the vm_page_array.
388 	 */
389 	for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
390 		dump_add_page(pa);
391 #endif
392 
393 	/*
394 	 * Clear all of the page structures
395 	 */
396 	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
397 	vm_page_array_size = page_range;
398 
399 	/*
400 	 * Construct the free queue(s) in ascending order (by physical
401 	 * address) so that the first 16MB of physical memory is allocated
402 	 * last rather than first.  On large-memory machines, this avoids
403 	 * the exhaustion of low physical memory before isa_dmainit has run.
404 	 */
405 	vmstats.v_page_count = 0;
406 	vmstats.v_free_count = 0;
407 	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
408 		pa = phys_avail[i];
409 		if (i == biggestone)
410 			last_pa = new_end;
411 		else
412 			last_pa = phys_avail[i + 1];
413 		while (pa < last_pa && npages-- > 0) {
414 			vm_add_new_page(pa);
415 			pa += PAGE_SIZE;
416 		}
417 	}
418 	if (virtual2_start)
419 		virtual2_start = vaddr;
420 	else
421 		virtual_start = vaddr;
422 }
423 
424 /*
425  * We tended to reserve a ton of memory for contigmalloc().  Now that most
426  * drivers have initialized we want to return most the remaining free
427  * reserve back to the VM page queues so they can be used for normal
428  * allocations.
429  *
430  * We leave vm_dma_reserved bytes worth of free pages in the reserve pool.
431  */
432 static void
433 vm_page_startup_finish(void *dummy __unused)
434 {
435 	alist_blk_t blk;
436 	alist_blk_t rblk;
437 	alist_blk_t count;
438 	alist_blk_t xcount;
439 	alist_blk_t bfree;
440 	vm_page_t m;
441 
442 	spin_lock(&vm_contig_spin);
443 	for (;;) {
444 		bfree = alist_free_info(&vm_contig_alist, &blk, &count);
445 		if (bfree <= vm_dma_reserved / PAGE_SIZE)
446 			break;
447 		if (count == 0)
448 			break;
449 
450 		/*
451 		 * Figure out how much of the initial reserve we have to
452 		 * free in order to reach our target.
453 		 */
454 		bfree -= vm_dma_reserved / PAGE_SIZE;
455 		if (count > bfree) {
456 			blk += count - bfree;
457 			count = bfree;
458 		}
459 
460 		/*
461 		 * Calculate the nearest power of 2 <= count.
462 		 */
463 		for (xcount = 1; xcount <= count; xcount <<= 1)
464 			;
465 		xcount >>= 1;
466 		blk += count - xcount;
467 		count = xcount;
468 
469 		/*
470 		 * Allocate the pages from the alist, then free them to
471 		 * the normal VM page queues.
472 		 *
473 		 * Pages allocated from the alist are wired.  We have to
474 		 * busy, unwire, and free them.  We must also adjust
475 		 * vm_low_phys_reserved before freeing any pages to prevent
476 		 * confusion.
477 		 */
478 		rblk = alist_alloc(&vm_contig_alist, blk, count);
479 		if (rblk != blk) {
480 			kprintf("vm_page_startup_finish: Unable to return "
481 				"dma space @0x%08x/%d -> 0x%08x\n",
482 				blk, count, rblk);
483 			break;
484 		}
485 		atomic_add_int(&vmstats.v_dma_pages, -count);
486 		spin_unlock(&vm_contig_spin);
487 
488 		m = PHYS_TO_VM_PAGE((vm_paddr_t)blk << PAGE_SHIFT);
489 		vm_low_phys_reserved = VM_PAGE_TO_PHYS(m);
490 		while (count) {
491 			vm_page_busy_wait(m, FALSE, "cpgfr");
492 			vm_page_unwire(m, 0);
493 			vm_page_free(m);
494 			--count;
495 			++m;
496 		}
497 		spin_lock(&vm_contig_spin);
498 	}
499 	spin_unlock(&vm_contig_spin);
500 
501 	/*
502 	 * Print out how much DMA space drivers have already allocated and
503 	 * how much is left over.
504 	 */
505 	kprintf("DMA space used: %jdk, remaining available: %jdk\n",
506 		(intmax_t)(vmstats.v_dma_pages - vm_contig_alist.bl_free) *
507 		(PAGE_SIZE / 1024),
508 		(intmax_t)vm_contig_alist.bl_free * (PAGE_SIZE / 1024));
509 }
510 SYSINIT(vm_pgend, SI_SUB_PROC0_POST, SI_ORDER_ANY,
511 	vm_page_startup_finish, NULL);
512 
513 
514 /*
515  * Scan comparison function for Red-Black tree scans.  An inclusive
516  * (start,end) is expected.  Other fields are not used.
517  */
518 int
519 rb_vm_page_scancmp(struct vm_page *p, void *data)
520 {
521 	struct rb_vm_page_scan_info *info = data;
522 
523 	if (p->pindex < info->start_pindex)
524 		return(-1);
525 	if (p->pindex > info->end_pindex)
526 		return(1);
527 	return(0);
528 }
529 
530 int
531 rb_vm_page_compare(struct vm_page *p1, struct vm_page *p2)
532 {
533 	if (p1->pindex < p2->pindex)
534 		return(-1);
535 	if (p1->pindex > p2->pindex)
536 		return(1);
537 	return(0);
538 }
539 
540 void
541 vm_page_init(vm_page_t m)
542 {
543 	/* do nothing for now.  Called from pmap_page_init() */
544 }
545 
546 /*
547  * Each page queue has its own spin lock, which is fairly optimal for
548  * allocating and freeing pages at least.
549  *
550  * The caller must hold the vm_page_spin_lock() before locking a vm_page's
551  * queue spinlock via this function.  Also note that m->queue cannot change
552  * unless both the page and queue are locked.
553  */
554 static __inline
555 void
556 _vm_page_queue_spin_lock(vm_page_t m)
557 {
558 	u_short queue;
559 
560 	queue = m->queue;
561 	if (queue != PQ_NONE) {
562 		spin_lock(&vm_page_queues[queue].spin);
563 		KKASSERT(queue == m->queue);
564 	}
565 }
566 
567 static __inline
568 void
569 _vm_page_queue_spin_unlock(vm_page_t m)
570 {
571 	u_short queue;
572 
573 	queue = m->queue;
574 	cpu_ccfence();
575 	if (queue != PQ_NONE)
576 		spin_unlock(&vm_page_queues[queue].spin);
577 }
578 
579 static __inline
580 void
581 _vm_page_queues_spin_lock(u_short queue)
582 {
583 	cpu_ccfence();
584 	if (queue != PQ_NONE)
585 		spin_lock(&vm_page_queues[queue].spin);
586 }
587 
588 
589 static __inline
590 void
591 _vm_page_queues_spin_unlock(u_short queue)
592 {
593 	cpu_ccfence();
594 	if (queue != PQ_NONE)
595 		spin_unlock(&vm_page_queues[queue].spin);
596 }
597 
598 void
599 vm_page_queue_spin_lock(vm_page_t m)
600 {
601 	_vm_page_queue_spin_lock(m);
602 }
603 
604 void
605 vm_page_queues_spin_lock(u_short queue)
606 {
607 	_vm_page_queues_spin_lock(queue);
608 }
609 
610 void
611 vm_page_queue_spin_unlock(vm_page_t m)
612 {
613 	_vm_page_queue_spin_unlock(m);
614 }
615 
616 void
617 vm_page_queues_spin_unlock(u_short queue)
618 {
619 	_vm_page_queues_spin_unlock(queue);
620 }
621 
622 /*
623  * This locks the specified vm_page and its queue in the proper order
624  * (page first, then queue).  The queue may change so the caller must
625  * recheck on return.
626  */
627 static __inline
628 void
629 _vm_page_and_queue_spin_lock(vm_page_t m)
630 {
631 	vm_page_spin_lock(m);
632 	_vm_page_queue_spin_lock(m);
633 }
634 
635 static __inline
636 void
637 _vm_page_and_queue_spin_unlock(vm_page_t m)
638 {
639 	_vm_page_queues_spin_unlock(m->queue);
640 	vm_page_spin_unlock(m);
641 }
642 
643 void
644 vm_page_and_queue_spin_unlock(vm_page_t m)
645 {
646 	_vm_page_and_queue_spin_unlock(m);
647 }
648 
649 void
650 vm_page_and_queue_spin_lock(vm_page_t m)
651 {
652 	_vm_page_and_queue_spin_lock(m);
653 }
654 
655 /*
656  * Helper function removes vm_page from its current queue.
657  * Returns the base queue the page used to be on.
658  *
659  * The vm_page and the queue must be spinlocked.
660  * This function will unlock the queue but leave the page spinlocked.
661  */
662 static __inline u_short
663 _vm_page_rem_queue_spinlocked(vm_page_t m)
664 {
665 	struct vpgqueues *pq;
666 	u_short queue;
667 	u_short oqueue;
668 
669 	queue = m->queue;
670 	if (queue != PQ_NONE) {
671 		pq = &vm_page_queues[queue];
672 		TAILQ_REMOVE(&pq->pl, m, pageq);
673 		atomic_add_int(pq->cnt, -1);
674 		pq->lcnt--;
675 		m->queue = PQ_NONE;
676 		oqueue = queue;
677 		if ((queue - m->pc) == PQ_CACHE || (queue - m->pc) == PQ_FREE)
678 			queue -= m->pc;
679 		vm_page_queues_spin_unlock(oqueue);	/* intended */
680 	}
681 	return queue;
682 }
683 
684 /*
685  * Helper function places the vm_page on the specified queue.
686  *
687  * The vm_page must be spinlocked.
688  * This function will return with both the page and the queue locked.
689  */
690 static __inline void
691 _vm_page_add_queue_spinlocked(vm_page_t m, u_short queue, int athead)
692 {
693 	struct vpgqueues *pq;
694 
695 	KKASSERT(m->queue == PQ_NONE);
696 
697 	if (queue != PQ_NONE) {
698 		vm_page_queues_spin_lock(queue);
699 		pq = &vm_page_queues[queue];
700 		++pq->lcnt;
701 		atomic_add_int(pq->cnt, 1);
702 		m->queue = queue;
703 
704 		/*
705 		 * PQ_FREE is always handled LIFO style to try to provide
706 		 * cache-hot pages to programs.
707 		 */
708 		if (queue - m->pc == PQ_FREE) {
709 			TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
710 		} else if (athead) {
711 			TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
712 		} else {
713 			TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
714 		}
715 		/* leave the queue spinlocked */
716 	}
717 }
718 
719 /*
720  * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
721  * m->busy is zero.  Returns TRUE if it had to sleep, FALSE if we
722  * did not.  Only one sleep call will be made before returning.
723  *
724  * This function does NOT busy the page and on return the page is not
725  * guaranteed to be available.
726  */
727 void
728 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
729 {
730 	u_int32_t flags;
731 
732 	for (;;) {
733 		flags = m->flags;
734 		cpu_ccfence();
735 
736 		if ((flags & PG_BUSY) == 0 &&
737 		    (also_m_busy == 0 || (flags & PG_SBUSY) == 0)) {
738 			break;
739 		}
740 		tsleep_interlock(m, 0);
741 		if (atomic_cmpset_int(&m->flags, flags,
742 				      flags | PG_WANTED | PG_REFERENCED)) {
743 			tsleep(m, PINTERLOCKED, msg, 0);
744 			break;
745 		}
746 	}
747 }
748 
749 /*
750  * This calculates and returns a page color given an optional VM object and
751  * either a pindex or an iterator.  We attempt to return a cpu-localized
752  * pg_color that is still roughly 16-way set-associative.  The CPU topology
753  * is used if it was probed.
754  *
755  * The caller may use the returned value to index into e.g. PQ_FREE when
756  * allocating a page in order to nominally obtain pages that are hopefully
757  * already localized to the requesting cpu.  This function is not able to
758  * provide any sort of guarantee of this, but does its best to improve
759  * hardware cache management performance.
760  *
761  * WARNING! The caller must mask the returned value with PQ_L2_MASK.
762  */
763 u_short
764 vm_get_pg_color(globaldata_t gd, vm_object_t object, vm_pindex_t pindex)
765 {
766 	u_short pg_color;
767 	int phys_id;
768 	int core_id;
769 	int object_pg_color;
770 
771 	phys_id = get_cpu_phys_id(gd->gd_cpuid);
772 	core_id = get_cpu_core_id(gd->gd_cpuid);
773 	object_pg_color = object ? object->pg_color : 0;
774 
775 	if (cpu_topology_phys_ids && cpu_topology_core_ids) {
776 		int grpsize = PQ_L2_SIZE / cpu_topology_phys_ids;
777 
778 		if (grpsize / cpu_topology_core_ids >= PQ_SET_ASSOC) {
779 			/*
780 			 * Enough space for a full break-down.
781 			 */
782 			pg_color = phys_id * grpsize;
783 			pg_color += core_id * grpsize / cpu_topology_core_ids;
784 			pg_color += (pindex + object_pg_color) %
785 				    (grpsize / cpu_topology_core_ids);
786 		} else {
787 			/*
788 			 * Not enough space, split up by physical package,
789 			 * then split up by core id but only down to a
790 			 * 16-set.  If all else fails, force a 16-set.
791 			 */
792 			pg_color = phys_id * grpsize;
793 			if (grpsize > 16) {
794 				pg_color += 16 * (core_id % (grpsize / 16));
795 				grpsize = 16;
796 			} else {
797 				grpsize = 16;
798 			}
799 			pg_color += (pindex + object_pg_color) %
800 				    grpsize;
801 		}
802 	} else {
803 		/*
804 		 * Unknown topology, distribute things evenly.
805 		 */
806 		pg_color = gd->gd_cpuid * PQ_L2_SIZE / ncpus;
807 		pg_color += pindex + object_pg_color;
808 	}
809 	return pg_color;
810 }
811 
812 /*
813  * Wait until PG_BUSY can be set, then set it.  If also_m_busy is TRUE we
814  * also wait for m->busy to become 0 before setting PG_BUSY.
815  */
816 void
817 VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m,
818 				     int also_m_busy, const char *msg
819 				     VM_PAGE_DEBUG_ARGS)
820 {
821 	u_int32_t flags;
822 
823 	for (;;) {
824 		flags = m->flags;
825 		cpu_ccfence();
826 		if (flags & PG_BUSY) {
827 			tsleep_interlock(m, 0);
828 			if (atomic_cmpset_int(&m->flags, flags,
829 					  flags | PG_WANTED | PG_REFERENCED)) {
830 				tsleep(m, PINTERLOCKED, msg, 0);
831 			}
832 		} else if (also_m_busy && (flags & PG_SBUSY)) {
833 			tsleep_interlock(m, 0);
834 			if (atomic_cmpset_int(&m->flags, flags,
835 					  flags | PG_WANTED | PG_REFERENCED)) {
836 				tsleep(m, PINTERLOCKED, msg, 0);
837 			}
838 		} else {
839 			if (atomic_cmpset_int(&m->flags, flags,
840 					      flags | PG_BUSY)) {
841 #ifdef VM_PAGE_DEBUG
842 				m->busy_func = func;
843 				m->busy_line = lineno;
844 #endif
845 				break;
846 			}
847 		}
848 	}
849 }
850 
851 /*
852  * Attempt to set PG_BUSY.  If also_m_busy is TRUE we only succeed if m->busy
853  * is also 0.
854  *
855  * Returns non-zero on failure.
856  */
857 int
858 VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m, int also_m_busy
859 				    VM_PAGE_DEBUG_ARGS)
860 {
861 	u_int32_t flags;
862 
863 	for (;;) {
864 		flags = m->flags;
865 		cpu_ccfence();
866 		if (flags & PG_BUSY)
867 			return TRUE;
868 		if (also_m_busy && (flags & PG_SBUSY))
869 			return TRUE;
870 		if (atomic_cmpset_int(&m->flags, flags, flags | PG_BUSY)) {
871 #ifdef VM_PAGE_DEBUG
872 				m->busy_func = func;
873 				m->busy_line = lineno;
874 #endif
875 			return FALSE;
876 		}
877 	}
878 }
879 
880 /*
881  * Clear the PG_BUSY flag and return non-zero to indicate to the caller
882  * that a wakeup() should be performed.
883  *
884  * The vm_page must be spinlocked and will remain spinlocked on return.
885  * The related queue must NOT be spinlocked (which could deadlock us).
886  *
887  * (inline version)
888  */
889 static __inline
890 int
891 _vm_page_wakeup(vm_page_t m)
892 {
893 	u_int32_t flags;
894 
895 	for (;;) {
896 		flags = m->flags;
897 		cpu_ccfence();
898 		if (atomic_cmpset_int(&m->flags, flags,
899 				      flags & ~(PG_BUSY | PG_WANTED))) {
900 			break;
901 		}
902 	}
903 	return(flags & PG_WANTED);
904 }
905 
906 /*
907  * Clear the PG_BUSY flag and wakeup anyone waiting for the page.  This
908  * is typically the last call you make on a page before moving onto
909  * other things.
910  */
911 void
912 vm_page_wakeup(vm_page_t m)
913 {
914         KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
915 	vm_page_spin_lock(m);
916 	if (_vm_page_wakeup(m)) {
917 		vm_page_spin_unlock(m);
918 		wakeup(m);
919 	} else {
920 		vm_page_spin_unlock(m);
921 	}
922 }
923 
924 /*
925  * Holding a page keeps it from being reused.  Other parts of the system
926  * can still disassociate the page from its current object and free it, or
927  * perform read or write I/O on it and/or otherwise manipulate the page,
928  * but if the page is held the VM system will leave the page and its data
929  * intact and not reuse the page for other purposes until the last hold
930  * reference is released.  (see vm_page_wire() if you want to prevent the
931  * page from being disassociated from its object too).
932  *
933  * The caller must still validate the contents of the page and, if necessary,
934  * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete
935  * before manipulating the page.
936  *
937  * XXX get vm_page_spin_lock() here and move FREE->HOLD if necessary
938  */
939 void
940 vm_page_hold(vm_page_t m)
941 {
942 	vm_page_spin_lock(m);
943 	atomic_add_int(&m->hold_count, 1);
944 	if (m->queue - m->pc == PQ_FREE) {
945 		_vm_page_queue_spin_lock(m);
946 		_vm_page_rem_queue_spinlocked(m);
947 		_vm_page_add_queue_spinlocked(m, PQ_HOLD + m->pc, 0);
948 		_vm_page_queue_spin_unlock(m);
949 	}
950 	vm_page_spin_unlock(m);
951 }
952 
953 /*
954  * The opposite of vm_page_hold().  If the page is on the HOLD queue
955  * it was freed while held and must be moved back to the FREE queue.
956  */
957 void
958 vm_page_unhold(vm_page_t m)
959 {
960 	KASSERT(m->hold_count > 0 && m->queue - m->pc != PQ_FREE,
961 		("vm_page_unhold: pg %p illegal hold_count (%d) or on FREE queue (%d)",
962 		 m, m->hold_count, m->queue - m->pc));
963 	vm_page_spin_lock(m);
964 	atomic_add_int(&m->hold_count, -1);
965 	if (m->hold_count == 0 && m->queue - m->pc == PQ_HOLD) {
966 		_vm_page_queue_spin_lock(m);
967 		_vm_page_rem_queue_spinlocked(m);
968 		_vm_page_add_queue_spinlocked(m, PQ_FREE + m->pc, 0);
969 		_vm_page_queue_spin_unlock(m);
970 	}
971 	vm_page_spin_unlock(m);
972 }
973 
974 /*
975  *	vm_page_getfake:
976  *
977  *	Create a fictitious page with the specified physical address and
978  *	memory attribute.  The memory attribute is the only the machine-
979  *	dependent aspect of a fictitious page that must be initialized.
980  */
981 
982 void
983 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
984 {
985 
986 	if ((m->flags & PG_FICTITIOUS) != 0) {
987 		/*
988 		 * The page's memattr might have changed since the
989 		 * previous initialization.  Update the pmap to the
990 		 * new memattr.
991 		 */
992 		goto memattr;
993 	}
994 	m->phys_addr = paddr;
995 	m->queue = PQ_NONE;
996 	/* Fictitious pages don't use "segind". */
997 	/* Fictitious pages don't use "order" or "pool". */
998 	m->flags = PG_FICTITIOUS | PG_UNMANAGED | PG_BUSY;
999 	m->wire_count = 1;
1000 	pmap_page_init(m);
1001 memattr:
1002 	pmap_page_set_memattr(m, memattr);
1003 }
1004 
1005 /*
1006  * Inserts the given vm_page into the object and object list.
1007  *
1008  * The pagetables are not updated but will presumably fault the page
1009  * in if necessary, or if a kernel page the caller will at some point
1010  * enter the page into the kernel's pmap.  We are not allowed to block
1011  * here so we *can't* do this anyway.
1012  *
1013  * This routine may not block.
1014  * This routine must be called with the vm_object held.
1015  * This routine must be called with a critical section held.
1016  *
1017  * This routine returns TRUE if the page was inserted into the object
1018  * successfully, and FALSE if the page already exists in the object.
1019  */
1020 int
1021 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
1022 {
1023 	ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(object));
1024 	if (m->object != NULL)
1025 		panic("vm_page_insert: already inserted");
1026 
1027 	object->generation++;
1028 
1029 	/*
1030 	 * Record the object/offset pair in this page and add the
1031 	 * pv_list_count of the page to the object.
1032 	 *
1033 	 * The vm_page spin lock is required for interactions with the pmap.
1034 	 */
1035 	vm_page_spin_lock(m);
1036 	m->object = object;
1037 	m->pindex = pindex;
1038 	if (vm_page_rb_tree_RB_INSERT(&object->rb_memq, m)) {
1039 		m->object = NULL;
1040 		m->pindex = 0;
1041 		vm_page_spin_unlock(m);
1042 		return FALSE;
1043 	}
1044 	++object->resident_page_count;
1045 	++mycpu->gd_vmtotal.t_rm;
1046 	/* atomic_add_int(&object->agg_pv_list_count, m->md.pv_list_count); */
1047 	vm_page_spin_unlock(m);
1048 
1049 	/*
1050 	 * Since we are inserting a new and possibly dirty page,
1051 	 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
1052 	 */
1053 	if ((m->valid & m->dirty) ||
1054 	    (m->flags & (PG_WRITEABLE | PG_NEED_COMMIT)))
1055 		vm_object_set_writeable_dirty(object);
1056 
1057 	/*
1058 	 * Checks for a swap assignment and sets PG_SWAPPED if appropriate.
1059 	 */
1060 	swap_pager_page_inserted(m);
1061 	return TRUE;
1062 }
1063 
1064 /*
1065  * Removes the given vm_page_t from the (object,index) table
1066  *
1067  * The underlying pmap entry (if any) is NOT removed here.
1068  * This routine may not block.
1069  *
1070  * The page must be BUSY and will remain BUSY on return.
1071  * No other requirements.
1072  *
1073  * NOTE: FreeBSD side effect was to unbusy the page on return.  We leave
1074  *	 it busy.
1075  */
1076 void
1077 vm_page_remove(vm_page_t m)
1078 {
1079 	vm_object_t object;
1080 
1081 	if (m->object == NULL) {
1082 		return;
1083 	}
1084 
1085 	if ((m->flags & PG_BUSY) == 0)
1086 		panic("vm_page_remove: page not busy");
1087 
1088 	object = m->object;
1089 
1090 	vm_object_hold(object);
1091 
1092 	/*
1093 	 * Remove the page from the object and update the object.
1094 	 *
1095 	 * The vm_page spin lock is required for interactions with the pmap.
1096 	 */
1097 	vm_page_spin_lock(m);
1098 	vm_page_rb_tree_RB_REMOVE(&object->rb_memq, m);
1099 	--object->resident_page_count;
1100 	--mycpu->gd_vmtotal.t_rm;
1101 	/* atomic_add_int(&object->agg_pv_list_count, -m->md.pv_list_count); */
1102 	m->object = NULL;
1103 	vm_page_spin_unlock(m);
1104 
1105 	object->generation++;
1106 
1107 	vm_object_drop(object);
1108 }
1109 
1110 /*
1111  * Locate and return the page at (object, pindex), or NULL if the
1112  * page could not be found.
1113  *
1114  * The caller must hold the vm_object token.
1115  */
1116 vm_page_t
1117 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1118 {
1119 	vm_page_t m;
1120 
1121 	/*
1122 	 * Search the hash table for this object/offset pair
1123 	 */
1124 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1125 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
1126 	KKASSERT(m == NULL || (m->object == object && m->pindex == pindex));
1127 	return(m);
1128 }
1129 
1130 vm_page_t
1131 VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)(struct vm_object *object,
1132 					    vm_pindex_t pindex,
1133 					    int also_m_busy, const char *msg
1134 					    VM_PAGE_DEBUG_ARGS)
1135 {
1136 	u_int32_t flags;
1137 	vm_page_t m;
1138 
1139 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1140 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
1141 	while (m) {
1142 		KKASSERT(m->object == object && m->pindex == pindex);
1143 		flags = m->flags;
1144 		cpu_ccfence();
1145 		if (flags & PG_BUSY) {
1146 			tsleep_interlock(m, 0);
1147 			if (atomic_cmpset_int(&m->flags, flags,
1148 					  flags | PG_WANTED | PG_REFERENCED)) {
1149 				tsleep(m, PINTERLOCKED, msg, 0);
1150 				m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq,
1151 							      pindex);
1152 			}
1153 		} else if (also_m_busy && (flags & PG_SBUSY)) {
1154 			tsleep_interlock(m, 0);
1155 			if (atomic_cmpset_int(&m->flags, flags,
1156 					  flags | PG_WANTED | PG_REFERENCED)) {
1157 				tsleep(m, PINTERLOCKED, msg, 0);
1158 				m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq,
1159 							      pindex);
1160 			}
1161 		} else if (atomic_cmpset_int(&m->flags, flags,
1162 					     flags | PG_BUSY)) {
1163 #ifdef VM_PAGE_DEBUG
1164 			m->busy_func = func;
1165 			m->busy_line = lineno;
1166 #endif
1167 			break;
1168 		}
1169 	}
1170 	return m;
1171 }
1172 
1173 /*
1174  * Attempt to lookup and busy a page.
1175  *
1176  * Returns NULL if the page could not be found
1177  *
1178  * Returns a vm_page and error == TRUE if the page exists but could not
1179  * be busied.
1180  *
1181  * Returns a vm_page and error == FALSE on success.
1182  */
1183 vm_page_t
1184 VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)(struct vm_object *object,
1185 					   vm_pindex_t pindex,
1186 					   int also_m_busy, int *errorp
1187 					   VM_PAGE_DEBUG_ARGS)
1188 {
1189 	u_int32_t flags;
1190 	vm_page_t m;
1191 
1192 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1193 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
1194 	*errorp = FALSE;
1195 	while (m) {
1196 		KKASSERT(m->object == object && m->pindex == pindex);
1197 		flags = m->flags;
1198 		cpu_ccfence();
1199 		if (flags & PG_BUSY) {
1200 			*errorp = TRUE;
1201 			break;
1202 		}
1203 		if (also_m_busy && (flags & PG_SBUSY)) {
1204 			*errorp = TRUE;
1205 			break;
1206 		}
1207 		if (atomic_cmpset_int(&m->flags, flags, flags | PG_BUSY)) {
1208 #ifdef VM_PAGE_DEBUG
1209 			m->busy_func = func;
1210 			m->busy_line = lineno;
1211 #endif
1212 			break;
1213 		}
1214 	}
1215 	return m;
1216 }
1217 
1218 /*
1219  * Attempt to repurpose the passed-in page.  If the passed-in page cannot
1220  * be repurposed it will be released, *must_reenter will be set to 1, and
1221  * this function will fall-through to vm_page_lookup_busy_try().
1222  *
1223  * The passed-in page must be wired and not busy.  The returned page will
1224  * be busied and not wired.
1225  *
1226  * A different page may be returned.  The returned page will be busied and
1227  * not wired.
1228  *
1229  * NULL can be returned.  If so, the required page could not be busied.
1230  * The passed-in page will be unwired.
1231  */
1232 vm_page_t
1233 vm_page_repurpose(struct vm_object *object, vm_pindex_t pindex,
1234 		  int also_m_busy, int *errorp, vm_page_t m,
1235 		  int *must_reenter, int *iswired)
1236 {
1237 	if (m) {
1238 		/*
1239 		 * Do not mess with pages in a complex state, such as pages
1240 		 * which are mapped, as repurposing such pages can be more
1241 		 * expensive than simply allocatin a new one.
1242 		 *
1243 		 * NOTE: Soft-busying can deadlock against putpages or I/O
1244 		 *	 so we only allow hard-busying here.
1245 		 */
1246 		KKASSERT(also_m_busy == FALSE);
1247 		vm_page_busy_wait(m, also_m_busy, "biodep");
1248 
1249 		if ((m->flags & (PG_UNMANAGED | PG_MAPPED |
1250 				 PG_FICTITIOUS | PG_SBUSY)) ||
1251 		    m->busy || m->wire_count != 1 || m->hold_count) {
1252 			vm_page_unwire(m, 0);
1253 			vm_page_wakeup(m);
1254 			/* fall through to normal lookup */
1255 		} else if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
1256 			vm_page_unwire(m, 0);
1257 			vm_page_deactivate(m);
1258 			vm_page_wakeup(m);
1259 			/* fall through to normal lookup */
1260 		} else {
1261 			/*
1262 			 * We can safely repurpose the page.  It should
1263 			 * already be unqueued.
1264 			 */
1265 			KKASSERT(m->queue == PQ_NONE && m->dirty == 0);
1266 			vm_page_remove(m);
1267 			m->valid = 0;
1268 			m->act_count = 0;
1269 			if (vm_page_insert(m, object, pindex)) {
1270 				*errorp = 0;
1271 				*iswired = 1;
1272 
1273 				return m;
1274 			}
1275 			vm_page_unwire(m, 0);
1276 			vm_page_free(m);
1277 			/* fall through to normal lookup */
1278 		}
1279 	}
1280 
1281 	/*
1282 	 * Cannot repurpose page, attempt to locate the desired page.  May
1283 	 * return NULL.
1284 	 */
1285 	*must_reenter = 1;
1286 	*iswired = 0;
1287 	m = vm_page_lookup_busy_try(object, pindex, also_m_busy, errorp);
1288 
1289 	return m;
1290 }
1291 
1292 /*
1293  * Caller must hold the related vm_object
1294  */
1295 vm_page_t
1296 vm_page_next(vm_page_t m)
1297 {
1298 	vm_page_t next;
1299 
1300 	next = vm_page_rb_tree_RB_NEXT(m);
1301 	if (next && next->pindex != m->pindex + 1)
1302 		next = NULL;
1303 	return (next);
1304 }
1305 
1306 /*
1307  * vm_page_rename()
1308  *
1309  * Move the given vm_page from its current object to the specified
1310  * target object/offset.  The page must be busy and will remain so
1311  * on return.
1312  *
1313  * new_object must be held.
1314  * This routine might block. XXX ?
1315  *
1316  * NOTE: Swap associated with the page must be invalidated by the move.  We
1317  *       have to do this for several reasons:  (1) we aren't freeing the
1318  *       page, (2) we are dirtying the page, (3) the VM system is probably
1319  *       moving the page from object A to B, and will then later move
1320  *       the backing store from A to B and we can't have a conflict.
1321  *
1322  * NOTE: We *always* dirty the page.  It is necessary both for the
1323  *       fact that we moved it, and because we may be invalidating
1324  *	 swap.  If the page is on the cache, we have to deactivate it
1325  *	 or vm_page_dirty() will panic.  Dirty pages are not allowed
1326  *	 on the cache.
1327  */
1328 void
1329 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1330 {
1331 	KKASSERT(m->flags & PG_BUSY);
1332 	ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(new_object));
1333 	if (m->object) {
1334 		ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(m->object));
1335 		vm_page_remove(m);
1336 	}
1337 	if (vm_page_insert(m, new_object, new_pindex) == FALSE) {
1338 		panic("vm_page_rename: target exists (%p,%"PRIu64")",
1339 		      new_object, new_pindex);
1340 	}
1341 	if (m->queue - m->pc == PQ_CACHE)
1342 		vm_page_deactivate(m);
1343 	vm_page_dirty(m);
1344 }
1345 
1346 /*
1347  * vm_page_unqueue() without any wakeup.  This routine is used when a page
1348  * is to remain BUSYied by the caller.
1349  *
1350  * This routine may not block.
1351  */
1352 void
1353 vm_page_unqueue_nowakeup(vm_page_t m)
1354 {
1355 	vm_page_and_queue_spin_lock(m);
1356 	(void)_vm_page_rem_queue_spinlocked(m);
1357 	vm_page_spin_unlock(m);
1358 }
1359 
1360 /*
1361  * vm_page_unqueue() - Remove a page from its queue, wakeup the pagedemon
1362  * if necessary.
1363  *
1364  * This routine may not block.
1365  */
1366 void
1367 vm_page_unqueue(vm_page_t m)
1368 {
1369 	u_short queue;
1370 
1371 	vm_page_and_queue_spin_lock(m);
1372 	queue = _vm_page_rem_queue_spinlocked(m);
1373 	if (queue == PQ_FREE || queue == PQ_CACHE) {
1374 		vm_page_spin_unlock(m);
1375 		pagedaemon_wakeup();
1376 	} else {
1377 		vm_page_spin_unlock(m);
1378 	}
1379 }
1380 
1381 /*
1382  * vm_page_list_find()
1383  *
1384  * Find a page on the specified queue with color optimization.
1385  *
1386  * The page coloring optimization attempts to locate a page that does
1387  * not overload other nearby pages in the object in the cpu's L1 or L2
1388  * caches.  We need this optimization because cpu caches tend to be
1389  * physical caches, while object spaces tend to be virtual.
1390  *
1391  * The page coloring optimization also, very importantly, tries to localize
1392  * memory to cpus and physical sockets.
1393  *
1394  * On MP systems each PQ_FREE and PQ_CACHE color queue has its own spinlock
1395  * and the algorithm is adjusted to localize allocations on a per-core basis.
1396  * This is done by 'twisting' the colors.
1397  *
1398  * The page is returned spinlocked and removed from its queue (it will
1399  * be on PQ_NONE), or NULL. The page is not PG_BUSY'd.  The caller
1400  * is responsible for dealing with the busy-page case (usually by
1401  * deactivating the page and looping).
1402  *
1403  * NOTE:  This routine is carefully inlined.  A non-inlined version
1404  *	  is available for outside callers but the only critical path is
1405  *	  from within this source file.
1406  *
1407  * NOTE:  This routine assumes that the vm_pages found in PQ_CACHE and PQ_FREE
1408  *	  represent stable storage, allowing us to order our locks vm_page
1409  *	  first, then queue.
1410  */
1411 static __inline
1412 vm_page_t
1413 _vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
1414 {
1415 	vm_page_t m;
1416 
1417 	for (;;) {
1418 		if (prefer_zero) {
1419 			m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl,
1420 				       pglist);
1421 		} else {
1422 			m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
1423 		}
1424 		if (m == NULL) {
1425 			m = _vm_page_list_find2(basequeue, index);
1426 			return(m);
1427 		}
1428 		vm_page_and_queue_spin_lock(m);
1429 		if (m->queue == basequeue + index) {
1430 			_vm_page_rem_queue_spinlocked(m);
1431 			/* vm_page_t spin held, no queue spin */
1432 			break;
1433 		}
1434 		vm_page_and_queue_spin_unlock(m);
1435 	}
1436 	return(m);
1437 }
1438 
1439 /*
1440  * If we could not find the page in the desired queue try to find it in
1441  * a nearby queue.
1442  */
1443 static vm_page_t
1444 _vm_page_list_find2(int basequeue, int index)
1445 {
1446 	struct vpgqueues *pq;
1447 	vm_page_t m = NULL;
1448 	int pqmask = PQ_SET_ASSOC_MASK >> 1;
1449 	int pqi;
1450 	int i;
1451 
1452 	index &= PQ_L2_MASK;
1453 	pq = &vm_page_queues[basequeue];
1454 
1455 	/*
1456 	 * Run local sets of 16, 32, 64, 128, and the whole queue if all
1457 	 * else fails (PQ_L2_MASK which is 255).
1458 	 */
1459 	do {
1460 		pqmask = (pqmask << 1) | 1;
1461 		for (i = 0; i <= pqmask; ++i) {
1462 			pqi = (index & ~pqmask) | ((index + i) & pqmask);
1463 			m = TAILQ_FIRST(&pq[pqi].pl);
1464 			if (m) {
1465 				_vm_page_and_queue_spin_lock(m);
1466 				if (m->queue == basequeue + pqi) {
1467 					_vm_page_rem_queue_spinlocked(m);
1468 					return(m);
1469 				}
1470 				_vm_page_and_queue_spin_unlock(m);
1471 				--i;
1472 				continue;
1473 			}
1474 		}
1475 	} while (pqmask != PQ_L2_MASK);
1476 
1477 	return(m);
1478 }
1479 
1480 /*
1481  * Returns a vm_page candidate for allocation.  The page is not busied so
1482  * it can move around.  The caller must busy the page (and typically
1483  * deactivate it if it cannot be busied!)
1484  *
1485  * Returns a spinlocked vm_page that has been removed from its queue.
1486  */
1487 vm_page_t
1488 vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
1489 {
1490 	return(_vm_page_list_find(basequeue, index, prefer_zero));
1491 }
1492 
1493 /*
1494  * Find a page on the cache queue with color optimization, remove it
1495  * from the queue, and busy it.  The returned page will not be spinlocked.
1496  *
1497  * A candidate failure will be deactivated.  Candidates can fail due to
1498  * being busied by someone else, in which case they will be deactivated.
1499  *
1500  * This routine may not block.
1501  *
1502  */
1503 static vm_page_t
1504 vm_page_select_cache(u_short pg_color)
1505 {
1506 	vm_page_t m;
1507 
1508 	for (;;) {
1509 		m = _vm_page_list_find(PQ_CACHE, pg_color & PQ_L2_MASK, FALSE);
1510 		if (m == NULL)
1511 			break;
1512 		/*
1513 		 * (m) has been removed from its queue and spinlocked
1514 		 */
1515 		if (vm_page_busy_try(m, TRUE)) {
1516 			_vm_page_deactivate_locked(m, 0);
1517 			vm_page_spin_unlock(m);
1518 		} else {
1519 			/*
1520 			 * We successfully busied the page
1521 			 */
1522 			if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) == 0 &&
1523 			    m->hold_count == 0 &&
1524 			    m->wire_count == 0 &&
1525 			    (m->dirty & m->valid) == 0) {
1526 				vm_page_spin_unlock(m);
1527 				pagedaemon_wakeup();
1528 				return(m);
1529 			}
1530 
1531 			/*
1532 			 * The page cannot be recycled, deactivate it.
1533 			 */
1534 			_vm_page_deactivate_locked(m, 0);
1535 			if (_vm_page_wakeup(m)) {
1536 				vm_page_spin_unlock(m);
1537 				wakeup(m);
1538 			} else {
1539 				vm_page_spin_unlock(m);
1540 			}
1541 		}
1542 	}
1543 	return (m);
1544 }
1545 
1546 /*
1547  * Find a free or zero page, with specified preference.  We attempt to
1548  * inline the nominal case and fall back to _vm_page_select_free()
1549  * otherwise.  A busied page is removed from the queue and returned.
1550  *
1551  * This routine may not block.
1552  */
1553 static __inline vm_page_t
1554 vm_page_select_free(u_short pg_color, boolean_t prefer_zero)
1555 {
1556 	vm_page_t m;
1557 
1558 	for (;;) {
1559 		m = _vm_page_list_find(PQ_FREE, pg_color & PQ_L2_MASK,
1560 				       prefer_zero);
1561 		if (m == NULL)
1562 			break;
1563 		if (vm_page_busy_try(m, TRUE)) {
1564 			/*
1565 			 * Various mechanisms such as a pmap_collect can
1566 			 * result in a busy page on the free queue.  We
1567 			 * have to move the page out of the way so we can
1568 			 * retry the allocation.  If the other thread is not
1569 			 * allocating the page then m->valid will remain 0 and
1570 			 * the pageout daemon will free the page later on.
1571 			 *
1572 			 * Since we could not busy the page, however, we
1573 			 * cannot make assumptions as to whether the page
1574 			 * will be allocated by the other thread or not,
1575 			 * so all we can do is deactivate it to move it out
1576 			 * of the way.  In particular, if the other thread
1577 			 * wires the page it may wind up on the inactive
1578 			 * queue and the pageout daemon will have to deal
1579 			 * with that case too.
1580 			 */
1581 			_vm_page_deactivate_locked(m, 0);
1582 			vm_page_spin_unlock(m);
1583 		} else {
1584 			/*
1585 			 * Theoretically if we are able to busy the page
1586 			 * atomic with the queue removal (using the vm_page
1587 			 * lock) nobody else should be able to mess with the
1588 			 * page before us.
1589 			 */
1590 			KKASSERT((m->flags & (PG_UNMANAGED |
1591 					      PG_NEED_COMMIT)) == 0);
1592 			KASSERT(m->hold_count == 0, ("m->hold_count is not zero "
1593 						     "pg %p q=%d flags=%08x hold=%d wire=%d",
1594 						     m, m->queue, m->flags, m->hold_count, m->wire_count));
1595 			KKASSERT(m->wire_count == 0);
1596 			vm_page_spin_unlock(m);
1597 			pagedaemon_wakeup();
1598 
1599 			/* return busied and removed page */
1600 			return(m);
1601 		}
1602 	}
1603 	return(m);
1604 }
1605 
1606 /*
1607  * vm_page_alloc()
1608  *
1609  * Allocate and return a memory cell associated with this VM object/offset
1610  * pair.  If object is NULL an unassociated page will be allocated.
1611  *
1612  * The returned page will be busied and removed from its queues.  This
1613  * routine can block and may return NULL if a race occurs and the page
1614  * is found to already exist at the specified (object, pindex).
1615  *
1616  *	VM_ALLOC_NORMAL		allow use of cache pages, nominal free drain
1617  *	VM_ALLOC_QUICK		like normal but cannot use cache
1618  *	VM_ALLOC_SYSTEM		greater free drain
1619  *	VM_ALLOC_INTERRUPT	allow free list to be completely drained
1620  *	VM_ALLOC_ZERO		advisory request for pre-zero'd page only
1621  *	VM_ALLOC_FORCE_ZERO	advisory request for pre-zero'd page only
1622  *	VM_ALLOC_NULL_OK	ok to return NULL on insertion collision
1623  *				(see vm_page_grab())
1624  *	VM_ALLOC_USE_GD		ok to use per-gd cache
1625  *
1626  * The object must be held if not NULL
1627  * This routine may not block
1628  *
1629  * Additional special handling is required when called from an interrupt
1630  * (VM_ALLOC_INTERRUPT).  We are not allowed to mess with the page cache
1631  * in this case.
1632  */
1633 vm_page_t
1634 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
1635 {
1636 	globaldata_t gd = mycpu;
1637 	vm_object_t obj;
1638 	vm_page_t m;
1639 	u_short pg_color;
1640 
1641 #if 0
1642 	/*
1643 	 * Special per-cpu free VM page cache.  The pages are pre-busied
1644 	 * and pre-zerod for us.
1645 	 */
1646 	if (gd->gd_vmpg_count && (page_req & VM_ALLOC_USE_GD)) {
1647 		crit_enter_gd(gd);
1648 		if (gd->gd_vmpg_count) {
1649 			m = gd->gd_vmpg_array[--gd->gd_vmpg_count];
1650 			crit_exit_gd(gd);
1651 			goto done;
1652                 }
1653 		crit_exit_gd(gd);
1654         }
1655 #endif
1656 	m = NULL;
1657 
1658 	/*
1659 	 * CPU LOCALIZATION
1660 	 *
1661 	 * CPU localization algorithm.  Break the page queues up by physical
1662 	 * id and core id (note that two cpu threads will have the same core
1663 	 * id, and core_id != gd_cpuid).
1664 	 *
1665 	 * This is nowhere near perfect, for example the last pindex in a
1666 	 * subgroup will overflow into the next cpu or package.  But this
1667 	 * should get us good page reuse locality in heavy mixed loads.
1668 	 */
1669 	pg_color = vm_get_pg_color(gd, object, pindex);
1670 
1671 	KKASSERT(page_req &
1672 		(VM_ALLOC_NORMAL|VM_ALLOC_QUICK|
1673 		 VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
1674 
1675 	/*
1676 	 * Certain system threads (pageout daemon, buf_daemon's) are
1677 	 * allowed to eat deeper into the free page list.
1678 	 */
1679 	if (curthread->td_flags & TDF_SYSTHREAD)
1680 		page_req |= VM_ALLOC_SYSTEM;
1681 
1682 	/*
1683 	 * Impose various limitations.  Note that the v_free_reserved test
1684 	 * must match the opposite of vm_page_count_target() to avoid
1685 	 * livelocks, be careful.
1686 	 */
1687 loop:
1688 	if (vmstats.v_free_count >= vmstats.v_free_reserved ||
1689 	    ((page_req & VM_ALLOC_INTERRUPT) && vmstats.v_free_count > 0) ||
1690 	    ((page_req & VM_ALLOC_SYSTEM) && vmstats.v_cache_count == 0 &&
1691 		vmstats.v_free_count > vmstats.v_interrupt_free_min)
1692 	) {
1693 		/*
1694 		 * The free queue has sufficient free pages to take one out.
1695 		 */
1696 		if (page_req & (VM_ALLOC_ZERO | VM_ALLOC_FORCE_ZERO))
1697 			m = vm_page_select_free(pg_color, TRUE);
1698 		else
1699 			m = vm_page_select_free(pg_color, FALSE);
1700 	} else if (page_req & VM_ALLOC_NORMAL) {
1701 		/*
1702 		 * Allocatable from the cache (non-interrupt only).  On
1703 		 * success, we must free the page and try again, thus
1704 		 * ensuring that vmstats.v_*_free_min counters are replenished.
1705 		 */
1706 #ifdef INVARIANTS
1707 		if (curthread->td_preempted) {
1708 			kprintf("vm_page_alloc(): warning, attempt to allocate"
1709 				" cache page from preempting interrupt\n");
1710 			m = NULL;
1711 		} else {
1712 			m = vm_page_select_cache(pg_color);
1713 		}
1714 #else
1715 		m = vm_page_select_cache(pg_color);
1716 #endif
1717 		/*
1718 		 * On success move the page into the free queue and loop.
1719 		 *
1720 		 * Only do this if we can safely acquire the vm_object lock,
1721 		 * because this is effectively a random page and the caller
1722 		 * might be holding the lock shared, we don't want to
1723 		 * deadlock.
1724 		 */
1725 		if (m != NULL) {
1726 			KASSERT(m->dirty == 0,
1727 				("Found dirty cache page %p", m));
1728 			if ((obj = m->object) != NULL) {
1729 				if (vm_object_hold_try(obj)) {
1730 					vm_page_protect(m, VM_PROT_NONE);
1731 					vm_page_free(m);
1732 					/* m->object NULL here */
1733 					vm_object_drop(obj);
1734 				} else {
1735 					vm_page_deactivate(m);
1736 					vm_page_wakeup(m);
1737 				}
1738 			} else {
1739 				vm_page_protect(m, VM_PROT_NONE);
1740 				vm_page_free(m);
1741 			}
1742 			goto loop;
1743 		}
1744 
1745 		/*
1746 		 * On failure return NULL
1747 		 */
1748 #if defined(DIAGNOSTIC)
1749 		if (vmstats.v_cache_count > 0)
1750 			kprintf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", vmstats.v_cache_count);
1751 #endif
1752 		atomic_add_int(&vm_pageout_deficit, 1);
1753 		pagedaemon_wakeup();
1754 		return (NULL);
1755 	} else {
1756 		/*
1757 		 * No pages available, wakeup the pageout daemon and give up.
1758 		 */
1759 		atomic_add_int(&vm_pageout_deficit, 1);
1760 		pagedaemon_wakeup();
1761 		return (NULL);
1762 	}
1763 
1764 	/*
1765 	 * v_free_count can race so loop if we don't find the expected
1766 	 * page.
1767 	 */
1768 	if (m == NULL)
1769 		goto loop;
1770 
1771 	/*
1772 	 * Good page found.  The page has already been busied for us and
1773 	 * removed from its queues.
1774 	 */
1775 	KASSERT(m->dirty == 0,
1776 		("vm_page_alloc: free/cache page %p was dirty", m));
1777 	KKASSERT(m->queue == PQ_NONE);
1778 
1779 #if 0
1780 done:
1781 #endif
1782 	/*
1783 	 * Initialize the structure, inheriting some flags but clearing
1784 	 * all the rest.  The page has already been busied for us.
1785 	 */
1786 	vm_page_flag_clear(m, ~(PG_BUSY | PG_SBUSY));
1787 	KKASSERT(m->wire_count == 0);
1788 	KKASSERT(m->busy == 0);
1789 	m->act_count = 0;
1790 	m->valid = 0;
1791 
1792 	/*
1793 	 * Caller must be holding the object lock (asserted by
1794 	 * vm_page_insert()).
1795 	 *
1796 	 * NOTE: Inserting a page here does not insert it into any pmaps
1797 	 *	 (which could cause us to block allocating memory).
1798 	 *
1799 	 * NOTE: If no object an unassociated page is allocated, m->pindex
1800 	 *	 can be used by the caller for any purpose.
1801 	 */
1802 	if (object) {
1803 		if (vm_page_insert(m, object, pindex) == FALSE) {
1804 			vm_page_free(m);
1805 			if ((page_req & VM_ALLOC_NULL_OK) == 0)
1806 				panic("PAGE RACE %p[%ld]/%p",
1807 				      object, (long)pindex, m);
1808 			m = NULL;
1809 		}
1810 	} else {
1811 		m->pindex = pindex;
1812 	}
1813 
1814 	/*
1815 	 * Don't wakeup too often - wakeup the pageout daemon when
1816 	 * we would be nearly out of memory.
1817 	 */
1818 	pagedaemon_wakeup();
1819 
1820 	/*
1821 	 * A PG_BUSY page is returned.
1822 	 */
1823 	return (m);
1824 }
1825 
1826 /*
1827  * Returns number of pages available in our DMA memory reserve
1828  * (adjusted with vm.dma_reserved=<value>m in /boot/loader.conf)
1829  */
1830 vm_size_t
1831 vm_contig_avail_pages(void)
1832 {
1833 	alist_blk_t blk;
1834 	alist_blk_t count;
1835 	alist_blk_t bfree;
1836 	spin_lock(&vm_contig_spin);
1837 	bfree = alist_free_info(&vm_contig_alist, &blk, &count);
1838 	spin_unlock(&vm_contig_spin);
1839 
1840 	return bfree;
1841 }
1842 
1843 /*
1844  * Attempt to allocate contiguous physical memory with the specified
1845  * requirements.
1846  */
1847 vm_page_t
1848 vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high,
1849 		     unsigned long alignment, unsigned long boundary,
1850 		     unsigned long size, vm_memattr_t memattr)
1851 {
1852 	alist_blk_t blk;
1853 	vm_page_t m;
1854 	int i;
1855 
1856 	alignment >>= PAGE_SHIFT;
1857 	if (alignment == 0)
1858 		alignment = 1;
1859 	boundary >>= PAGE_SHIFT;
1860 	if (boundary == 0)
1861 		boundary = 1;
1862 	size = (size + PAGE_MASK) >> PAGE_SHIFT;
1863 
1864 	spin_lock(&vm_contig_spin);
1865 	blk = alist_alloc(&vm_contig_alist, 0, size);
1866 	if (blk == ALIST_BLOCK_NONE) {
1867 		spin_unlock(&vm_contig_spin);
1868 		if (bootverbose) {
1869 			kprintf("vm_page_alloc_contig: %ldk nospace\n",
1870 				(size + PAGE_MASK) * (PAGE_SIZE / 1024));
1871 		}
1872 		return(NULL);
1873 	}
1874 	if (high && ((vm_paddr_t)(blk + size) << PAGE_SHIFT) > high) {
1875 		alist_free(&vm_contig_alist, blk, size);
1876 		spin_unlock(&vm_contig_spin);
1877 		if (bootverbose) {
1878 			kprintf("vm_page_alloc_contig: %ldk high "
1879 				"%016jx failed\n",
1880 				(size + PAGE_MASK) * (PAGE_SIZE / 1024),
1881 				(intmax_t)high);
1882 		}
1883 		return(NULL);
1884 	}
1885 	spin_unlock(&vm_contig_spin);
1886 	if (vm_contig_verbose) {
1887 		kprintf("vm_page_alloc_contig: %016jx/%ldk\n",
1888 			(intmax_t)(vm_paddr_t)blk << PAGE_SHIFT,
1889 			(size + PAGE_MASK) * (PAGE_SIZE / 1024));
1890 	}
1891 
1892 	m = PHYS_TO_VM_PAGE((vm_paddr_t)blk << PAGE_SHIFT);
1893 	if (memattr != VM_MEMATTR_DEFAULT)
1894 		for (i = 0;i < size;i++)
1895 			pmap_page_set_memattr(&m[i], memattr);
1896 	return m;
1897 }
1898 
1899 /*
1900  * Free contiguously allocated pages.  The pages will be wired but not busy.
1901  * When freeing to the alist we leave them wired and not busy.
1902  */
1903 void
1904 vm_page_free_contig(vm_page_t m, unsigned long size)
1905 {
1906 	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1907 	vm_pindex_t start = pa >> PAGE_SHIFT;
1908 	vm_pindex_t pages = (size + PAGE_MASK) >> PAGE_SHIFT;
1909 
1910 	if (vm_contig_verbose) {
1911 		kprintf("vm_page_free_contig:  %016jx/%ldk\n",
1912 			(intmax_t)pa, size / 1024);
1913 	}
1914 	if (pa < vm_low_phys_reserved) {
1915 		KKASSERT(pa + size <= vm_low_phys_reserved);
1916 		spin_lock(&vm_contig_spin);
1917 		alist_free(&vm_contig_alist, start, pages);
1918 		spin_unlock(&vm_contig_spin);
1919 	} else {
1920 		while (pages) {
1921 			vm_page_busy_wait(m, FALSE, "cpgfr");
1922 			vm_page_unwire(m, 0);
1923 			vm_page_free(m);
1924 			--pages;
1925 			++m;
1926 		}
1927 
1928 	}
1929 }
1930 
1931 
1932 /*
1933  * Wait for sufficient free memory for nominal heavy memory use kernel
1934  * operations.
1935  *
1936  * WARNING!  Be sure never to call this in any vm_pageout code path, which
1937  *	     will trivially deadlock the system.
1938  */
1939 void
1940 vm_wait_nominal(void)
1941 {
1942 	while (vm_page_count_min(0))
1943 		vm_wait(0);
1944 }
1945 
1946 /*
1947  * Test if vm_wait_nominal() would block.
1948  */
1949 int
1950 vm_test_nominal(void)
1951 {
1952 	if (vm_page_count_min(0))
1953 		return(1);
1954 	return(0);
1955 }
1956 
1957 /*
1958  * Block until free pages are available for allocation, called in various
1959  * places before memory allocations.
1960  *
1961  * The caller may loop if vm_page_count_min() == FALSE so we cannot be
1962  * more generous then that.
1963  */
1964 void
1965 vm_wait(int timo)
1966 {
1967 	/*
1968 	 * never wait forever
1969 	 */
1970 	if (timo == 0)
1971 		timo = hz;
1972 	lwkt_gettoken(&vm_token);
1973 
1974 	if (curthread == pagethread) {
1975 		/*
1976 		 * The pageout daemon itself needs pages, this is bad.
1977 		 */
1978 		if (vm_page_count_min(0)) {
1979 			vm_pageout_pages_needed = 1;
1980 			tsleep(&vm_pageout_pages_needed, 0, "VMWait", timo);
1981 		}
1982 	} else {
1983 		/*
1984 		 * Wakeup the pageout daemon if necessary and wait.
1985 		 *
1986 		 * Do not wait indefinitely for the target to be reached,
1987 		 * as load might prevent it from being reached any time soon.
1988 		 * But wait a little to try to slow down page allocations
1989 		 * and to give more important threads (the pagedaemon)
1990 		 * allocation priority.
1991 		 */
1992 		if (vm_page_count_target()) {
1993 			if (vm_pages_needed == 0) {
1994 				vm_pages_needed = 1;
1995 				wakeup(&vm_pages_needed);
1996 			}
1997 			++vm_pages_waiting;	/* SMP race ok */
1998 			tsleep(&vmstats.v_free_count, 0, "vmwait", timo);
1999 		}
2000 	}
2001 	lwkt_reltoken(&vm_token);
2002 }
2003 
2004 /*
2005  * Block until free pages are available for allocation
2006  *
2007  * Called only from vm_fault so that processes page faulting can be
2008  * easily tracked.
2009  */
2010 void
2011 vm_wait_pfault(void)
2012 {
2013 	/*
2014 	 * Wakeup the pageout daemon if necessary and wait.
2015 	 *
2016 	 * Do not wait indefinitely for the target to be reached,
2017 	 * as load might prevent it from being reached any time soon.
2018 	 * But wait a little to try to slow down page allocations
2019 	 * and to give more important threads (the pagedaemon)
2020 	 * allocation priority.
2021 	 */
2022 	if (vm_page_count_min(0)) {
2023 		lwkt_gettoken(&vm_token);
2024 		while (vm_page_count_severe()) {
2025 			if (vm_page_count_target()) {
2026 				thread_t td;
2027 
2028 				if (vm_pages_needed == 0) {
2029 					vm_pages_needed = 1;
2030 					wakeup(&vm_pages_needed);
2031 				}
2032 				++vm_pages_waiting;	/* SMP race ok */
2033 				tsleep(&vmstats.v_free_count, 0, "pfault", hz);
2034 
2035 				/*
2036 				 * Do not stay stuck in the loop if the system is trying
2037 				 * to kill the process.
2038 				 */
2039 				td = curthread;
2040 				if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL))
2041 					break;
2042 			}
2043 		}
2044 		lwkt_reltoken(&vm_token);
2045 	}
2046 }
2047 
2048 /*
2049  * Put the specified page on the active list (if appropriate).  Ensure
2050  * that act_count is at least ACT_INIT but do not otherwise mess with it.
2051  *
2052  * The caller should be holding the page busied ? XXX
2053  * This routine may not block.
2054  */
2055 void
2056 vm_page_activate(vm_page_t m)
2057 {
2058 	u_short oqueue;
2059 
2060 	vm_page_spin_lock(m);
2061 	if (m->queue - m->pc != PQ_ACTIVE) {
2062 		_vm_page_queue_spin_lock(m);
2063 		oqueue = _vm_page_rem_queue_spinlocked(m);
2064 		/* page is left spinlocked, queue is unlocked */
2065 
2066 		if (oqueue == PQ_CACHE)
2067 			mycpu->gd_cnt.v_reactivated++;
2068 		if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
2069 			if (m->act_count < ACT_INIT)
2070 				m->act_count = ACT_INIT;
2071 			_vm_page_add_queue_spinlocked(m, PQ_ACTIVE + m->pc, 0);
2072 		}
2073 		_vm_page_and_queue_spin_unlock(m);
2074 		if (oqueue == PQ_CACHE || oqueue == PQ_FREE)
2075 			pagedaemon_wakeup();
2076 	} else {
2077 		if (m->act_count < ACT_INIT)
2078 			m->act_count = ACT_INIT;
2079 		vm_page_spin_unlock(m);
2080 	}
2081 }
2082 
2083 /*
2084  * Helper routine for vm_page_free_toq() and vm_page_cache().  This
2085  * routine is called when a page has been added to the cache or free
2086  * queues.
2087  *
2088  * This routine may not block.
2089  */
2090 static __inline void
2091 vm_page_free_wakeup(void)
2092 {
2093 	/*
2094 	 * If the pageout daemon itself needs pages, then tell it that
2095 	 * there are some free.
2096 	 */
2097 	if (vm_pageout_pages_needed &&
2098 	    vmstats.v_cache_count + vmstats.v_free_count >=
2099 	    vmstats.v_pageout_free_min
2100 	) {
2101 		vm_pageout_pages_needed = 0;
2102 		wakeup(&vm_pageout_pages_needed);
2103 	}
2104 
2105 	/*
2106 	 * Wakeup processes that are waiting on memory.
2107 	 *
2108 	 * Generally speaking we want to wakeup stuck processes as soon as
2109 	 * possible.  !vm_page_count_min(0) is the absolute minimum point
2110 	 * where we can do this.  Wait a bit longer to reduce degenerate
2111 	 * re-blocking (vm_page_free_hysteresis).  The target check is just
2112 	 * to make sure the min-check w/hysteresis does not exceed the
2113 	 * normal target.
2114 	 */
2115 	if (vm_pages_waiting) {
2116 		if (!vm_page_count_min(vm_page_free_hysteresis) ||
2117 		    !vm_page_count_target()) {
2118 			vm_pages_waiting = 0;
2119 			wakeup(&vmstats.v_free_count);
2120 			++mycpu->gd_cnt.v_ppwakeups;
2121 		}
2122 #if 0
2123 		if (!vm_page_count_target()) {
2124 			/*
2125 			 * Plenty of pages are free, wakeup everyone.
2126 			 */
2127 			vm_pages_waiting = 0;
2128 			wakeup(&vmstats.v_free_count);
2129 			++mycpu->gd_cnt.v_ppwakeups;
2130 		} else if (!vm_page_count_min(0)) {
2131 			/*
2132 			 * Some pages are free, wakeup someone.
2133 			 */
2134 			int wcount = vm_pages_waiting;
2135 			if (wcount > 0)
2136 				--wcount;
2137 			vm_pages_waiting = wcount;
2138 			wakeup_one(&vmstats.v_free_count);
2139 			++mycpu->gd_cnt.v_ppwakeups;
2140 		}
2141 #endif
2142 	}
2143 }
2144 
2145 /*
2146  * Returns the given page to the PQ_FREE or PQ_HOLD list and disassociates
2147  * it from its VM object.
2148  *
2149  * The vm_page must be PG_BUSY on entry.  PG_BUSY will be released on
2150  * return (the page will have been freed).
2151  */
2152 void
2153 vm_page_free_toq(vm_page_t m)
2154 {
2155 	mycpu->gd_cnt.v_tfree++;
2156 	KKASSERT((m->flags & PG_MAPPED) == 0);
2157 	KKASSERT(m->flags & PG_BUSY);
2158 
2159 	if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
2160 		kprintf("vm_page_free: pindex(%lu), busy(%d), "
2161 			"PG_BUSY(%d), hold(%d)\n",
2162 			(u_long)m->pindex, m->busy,
2163 			((m->flags & PG_BUSY) ? 1 : 0), m->hold_count);
2164 		if ((m->queue - m->pc) == PQ_FREE)
2165 			panic("vm_page_free: freeing free page");
2166 		else
2167 			panic("vm_page_free: freeing busy page");
2168 	}
2169 
2170 	/*
2171 	 * Remove from object, spinlock the page and its queues and
2172 	 * remove from any queue.  No queue spinlock will be held
2173 	 * after this section (because the page was removed from any
2174 	 * queue).
2175 	 */
2176 	vm_page_remove(m);
2177 	vm_page_and_queue_spin_lock(m);
2178 	_vm_page_rem_queue_spinlocked(m);
2179 
2180 	/*
2181 	 * No further management of fictitious pages occurs beyond object
2182 	 * and queue removal.
2183 	 */
2184 	if ((m->flags & PG_FICTITIOUS) != 0) {
2185 		vm_page_spin_unlock(m);
2186 		vm_page_wakeup(m);
2187 		return;
2188 	}
2189 
2190 	m->valid = 0;
2191 	vm_page_undirty(m);
2192 
2193 	if (m->wire_count != 0) {
2194 		if (m->wire_count > 1) {
2195 		    panic(
2196 			"vm_page_free: invalid wire count (%d), pindex: 0x%lx",
2197 			m->wire_count, (long)m->pindex);
2198 		}
2199 		panic("vm_page_free: freeing wired page");
2200 	}
2201 
2202 	/*
2203 	 * Clear the UNMANAGED flag when freeing an unmanaged page.
2204 	 * Clear the NEED_COMMIT flag
2205 	 */
2206 	if (m->flags & PG_UNMANAGED)
2207 		vm_page_flag_clear(m, PG_UNMANAGED);
2208 	if (m->flags & PG_NEED_COMMIT)
2209 		vm_page_flag_clear(m, PG_NEED_COMMIT);
2210 
2211 	if (m->hold_count != 0) {
2212 		_vm_page_add_queue_spinlocked(m, PQ_HOLD + m->pc, 0);
2213 	} else {
2214 		_vm_page_add_queue_spinlocked(m, PQ_FREE + m->pc, 0);
2215 	}
2216 
2217 	/*
2218 	 * This sequence allows us to clear PG_BUSY while still holding
2219 	 * its spin lock, which reduces contention vs allocators.  We
2220 	 * must not leave the queue locked or _vm_page_wakeup() may
2221 	 * deadlock.
2222 	 */
2223 	_vm_page_queue_spin_unlock(m);
2224 	if (_vm_page_wakeup(m)) {
2225 		vm_page_spin_unlock(m);
2226 		wakeup(m);
2227 	} else {
2228 		vm_page_spin_unlock(m);
2229 	}
2230 	vm_page_free_wakeup();
2231 }
2232 
2233 /*
2234  * vm_page_unmanage()
2235  *
2236  * Prevent PV management from being done on the page.  The page is
2237  * removed from the paging queues as if it were wired, and as a
2238  * consequence of no longer being managed the pageout daemon will not
2239  * touch it (since there is no way to locate the pte mappings for the
2240  * page).  madvise() calls that mess with the pmap will also no longer
2241  * operate on the page.
2242  *
2243  * Beyond that the page is still reasonably 'normal'.  Freeing the page
2244  * will clear the flag.
2245  *
2246  * This routine is used by OBJT_PHYS objects - objects using unswappable
2247  * physical memory as backing store rather then swap-backed memory and
2248  * will eventually be extended to support 4MB unmanaged physical
2249  * mappings.
2250  *
2251  * Caller must be holding the page busy.
2252  */
2253 void
2254 vm_page_unmanage(vm_page_t m)
2255 {
2256 	KKASSERT(m->flags & PG_BUSY);
2257 	if ((m->flags & PG_UNMANAGED) == 0) {
2258 		if (m->wire_count == 0)
2259 			vm_page_unqueue(m);
2260 	}
2261 	vm_page_flag_set(m, PG_UNMANAGED);
2262 }
2263 
2264 /*
2265  * Mark this page as wired down by yet another map, removing it from
2266  * paging queues as necessary.
2267  *
2268  * Caller must be holding the page busy.
2269  */
2270 void
2271 vm_page_wire(vm_page_t m)
2272 {
2273 	/*
2274 	 * Only bump the wire statistics if the page is not already wired,
2275 	 * and only unqueue the page if it is on some queue (if it is unmanaged
2276 	 * it is already off the queues).  Don't do anything with fictitious
2277 	 * pages because they are always wired.
2278 	 */
2279 	KKASSERT(m->flags & PG_BUSY);
2280 	if ((m->flags & PG_FICTITIOUS) == 0) {
2281 		if (atomic_fetchadd_int(&m->wire_count, 1) == 0) {
2282 			if ((m->flags & PG_UNMANAGED) == 0)
2283 				vm_page_unqueue(m);
2284 			atomic_add_int(&vmstats.v_wire_count, 1);
2285 		}
2286 		KASSERT(m->wire_count != 0,
2287 			("vm_page_wire: wire_count overflow m=%p", m));
2288 	}
2289 }
2290 
2291 /*
2292  * Release one wiring of this page, potentially enabling it to be paged again.
2293  *
2294  * Many pages placed on the inactive queue should actually go
2295  * into the cache, but it is difficult to figure out which.  What
2296  * we do instead, if the inactive target is well met, is to put
2297  * clean pages at the head of the inactive queue instead of the tail.
2298  * This will cause them to be moved to the cache more quickly and
2299  * if not actively re-referenced, freed more quickly.  If we just
2300  * stick these pages at the end of the inactive queue, heavy filesystem
2301  * meta-data accesses can cause an unnecessary paging load on memory bound
2302  * processes.  This optimization causes one-time-use metadata to be
2303  * reused more quickly.
2304  *
2305  * Pages marked PG_NEED_COMMIT are always activated and never placed on
2306  * the inactive queue.  This helps the pageout daemon determine memory
2307  * pressure and act on out-of-memory situations more quickly.
2308  *
2309  * BUT, if we are in a low-memory situation we have no choice but to
2310  * put clean pages on the cache queue.
2311  *
2312  * A number of routines use vm_page_unwire() to guarantee that the page
2313  * will go into either the inactive or active queues, and will NEVER
2314  * be placed in the cache - for example, just after dirtying a page.
2315  * dirty pages in the cache are not allowed.
2316  *
2317  * This routine may not block.
2318  */
2319 void
2320 vm_page_unwire(vm_page_t m, int activate)
2321 {
2322 	KKASSERT(m->flags & PG_BUSY);
2323 	if (m->flags & PG_FICTITIOUS) {
2324 		/* do nothing */
2325 	} else if (m->wire_count <= 0) {
2326 		panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
2327 	} else {
2328 		if (atomic_fetchadd_int(&m->wire_count, -1) == 1) {
2329 			atomic_add_int(&vmstats.v_wire_count, -1);
2330 			if (m->flags & PG_UNMANAGED) {
2331 				;
2332 			} else if (activate || (m->flags & PG_NEED_COMMIT)) {
2333 				vm_page_spin_lock(m);
2334 				_vm_page_add_queue_spinlocked(m,
2335 							PQ_ACTIVE + m->pc, 0);
2336 				_vm_page_and_queue_spin_unlock(m);
2337 			} else {
2338 				vm_page_spin_lock(m);
2339 				vm_page_flag_clear(m, PG_WINATCFLS);
2340 				_vm_page_add_queue_spinlocked(m,
2341 							PQ_INACTIVE + m->pc, 0);
2342 				++vm_swapcache_inactive_heuristic;
2343 				_vm_page_and_queue_spin_unlock(m);
2344 			}
2345 		}
2346 	}
2347 }
2348 
2349 /*
2350  * Move the specified page to the inactive queue.  If the page has
2351  * any associated swap, the swap is deallocated.
2352  *
2353  * Normally athead is 0 resulting in LRU operation.  athead is set
2354  * to 1 if we want this page to be 'as if it were placed in the cache',
2355  * except without unmapping it from the process address space.
2356  *
2357  * vm_page's spinlock must be held on entry and will remain held on return.
2358  * This routine may not block.
2359  */
2360 static void
2361 _vm_page_deactivate_locked(vm_page_t m, int athead)
2362 {
2363 	u_short oqueue;
2364 
2365 	/*
2366 	 * Ignore if already inactive.
2367 	 */
2368 	if (m->queue - m->pc == PQ_INACTIVE)
2369 		return;
2370 	_vm_page_queue_spin_lock(m);
2371 	oqueue = _vm_page_rem_queue_spinlocked(m);
2372 
2373 	if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
2374 		if (oqueue == PQ_CACHE)
2375 			mycpu->gd_cnt.v_reactivated++;
2376 		vm_page_flag_clear(m, PG_WINATCFLS);
2377 		_vm_page_add_queue_spinlocked(m, PQ_INACTIVE + m->pc, athead);
2378 		if (athead == 0)
2379 			++vm_swapcache_inactive_heuristic;
2380 	}
2381 	/* NOTE: PQ_NONE if condition not taken */
2382 	_vm_page_queue_spin_unlock(m);
2383 	/* leaves vm_page spinlocked */
2384 }
2385 
2386 /*
2387  * Attempt to deactivate a page.
2388  *
2389  * No requirements.
2390  */
2391 void
2392 vm_page_deactivate(vm_page_t m)
2393 {
2394 	vm_page_spin_lock(m);
2395 	_vm_page_deactivate_locked(m, 0);
2396 	vm_page_spin_unlock(m);
2397 }
2398 
2399 void
2400 vm_page_deactivate_locked(vm_page_t m)
2401 {
2402 	_vm_page_deactivate_locked(m, 0);
2403 }
2404 
2405 /*
2406  * Attempt to move a busied page to PQ_CACHE, then unconditionally unbusy it.
2407  *
2408  * This function returns non-zero if it successfully moved the page to
2409  * PQ_CACHE.
2410  *
2411  * This function unconditionally unbusies the page on return.
2412  */
2413 int
2414 vm_page_try_to_cache(vm_page_t m)
2415 {
2416 	vm_page_spin_lock(m);
2417 	if (m->dirty || m->hold_count || m->wire_count ||
2418 	    (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT))) {
2419 		if (_vm_page_wakeup(m)) {
2420 			vm_page_spin_unlock(m);
2421 			wakeup(m);
2422 		} else {
2423 			vm_page_spin_unlock(m);
2424 		}
2425 		return(0);
2426 	}
2427 	vm_page_spin_unlock(m);
2428 
2429 	/*
2430 	 * Page busied by us and no longer spinlocked.  Dirty pages cannot
2431 	 * be moved to the cache.
2432 	 */
2433 	vm_page_test_dirty(m);
2434 	if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2435 		vm_page_wakeup(m);
2436 		return(0);
2437 	}
2438 	vm_page_cache(m);
2439 	return(1);
2440 }
2441 
2442 /*
2443  * Attempt to free the page.  If we cannot free it, we do nothing.
2444  * 1 is returned on success, 0 on failure.
2445  *
2446  * No requirements.
2447  */
2448 int
2449 vm_page_try_to_free(vm_page_t m)
2450 {
2451 	vm_page_spin_lock(m);
2452 	if (vm_page_busy_try(m, TRUE)) {
2453 		vm_page_spin_unlock(m);
2454 		return(0);
2455 	}
2456 
2457 	/*
2458 	 * The page can be in any state, including already being on the free
2459 	 * queue.  Check to see if it really can be freed.
2460 	 */
2461 	if (m->dirty ||				/* can't free if it is dirty */
2462 	    m->hold_count ||			/* or held (XXX may be wrong) */
2463 	    m->wire_count ||			/* or wired */
2464 	    (m->flags & (PG_UNMANAGED |		/* or unmanaged */
2465 			 PG_NEED_COMMIT)) ||	/* or needs a commit */
2466 	    m->queue - m->pc == PQ_FREE ||	/* already on PQ_FREE */
2467 	    m->queue - m->pc == PQ_HOLD) {	/* already on PQ_HOLD */
2468 		if (_vm_page_wakeup(m)) {
2469 			vm_page_spin_unlock(m);
2470 			wakeup(m);
2471 		} else {
2472 			vm_page_spin_unlock(m);
2473 		}
2474 		return(0);
2475 	}
2476 	vm_page_spin_unlock(m);
2477 
2478 	/*
2479 	 * We can probably free the page.
2480 	 *
2481 	 * Page busied by us and no longer spinlocked.  Dirty pages will
2482 	 * not be freed by this function.    We have to re-test the
2483 	 * dirty bit after cleaning out the pmaps.
2484 	 */
2485 	vm_page_test_dirty(m);
2486 	if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2487 		vm_page_wakeup(m);
2488 		return(0);
2489 	}
2490 	vm_page_protect(m, VM_PROT_NONE);
2491 	if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2492 		vm_page_wakeup(m);
2493 		return(0);
2494 	}
2495 	vm_page_free(m);
2496 	return(1);
2497 }
2498 
2499 /*
2500  * vm_page_cache
2501  *
2502  * Put the specified page onto the page cache queue (if appropriate).
2503  *
2504  * The page must be busy, and this routine will release the busy and
2505  * possibly even free the page.
2506  */
2507 void
2508 vm_page_cache(vm_page_t m)
2509 {
2510 	if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
2511 	    m->busy || m->wire_count || m->hold_count) {
2512 		kprintf("vm_page_cache: attempting to cache busy/held page\n");
2513 		vm_page_wakeup(m);
2514 		return;
2515 	}
2516 
2517 	/*
2518 	 * Already in the cache (and thus not mapped)
2519 	 */
2520 	if ((m->queue - m->pc) == PQ_CACHE) {
2521 		KKASSERT((m->flags & PG_MAPPED) == 0);
2522 		vm_page_wakeup(m);
2523 		return;
2524 	}
2525 
2526 	/*
2527 	 * Caller is required to test m->dirty, but note that the act of
2528 	 * removing the page from its maps can cause it to become dirty
2529 	 * on an SMP system due to another cpu running in usermode.
2530 	 */
2531 	if (m->dirty) {
2532 		panic("vm_page_cache: caching a dirty page, pindex: %ld",
2533 			(long)m->pindex);
2534 	}
2535 
2536 	/*
2537 	 * Remove all pmaps and indicate that the page is not
2538 	 * writeable or mapped.  Our vm_page_protect() call may
2539 	 * have blocked (especially w/ VM_PROT_NONE), so recheck
2540 	 * everything.
2541 	 */
2542 	vm_page_protect(m, VM_PROT_NONE);
2543 	if ((m->flags & (PG_UNMANAGED | PG_MAPPED)) ||
2544 	    m->busy || m->wire_count || m->hold_count) {
2545 		vm_page_wakeup(m);
2546 	} else if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2547 		vm_page_deactivate(m);
2548 		vm_page_wakeup(m);
2549 	} else {
2550 		_vm_page_and_queue_spin_lock(m);
2551 		_vm_page_rem_queue_spinlocked(m);
2552 		_vm_page_add_queue_spinlocked(m, PQ_CACHE + m->pc, 0);
2553 		_vm_page_queue_spin_unlock(m);
2554 		if (_vm_page_wakeup(m)) {
2555 			vm_page_spin_unlock(m);
2556 			wakeup(m);
2557 		} else {
2558 			vm_page_spin_unlock(m);
2559 		}
2560 		vm_page_free_wakeup();
2561 	}
2562 }
2563 
2564 /*
2565  * vm_page_dontneed()
2566  *
2567  * Cache, deactivate, or do nothing as appropriate.  This routine
2568  * is typically used by madvise() MADV_DONTNEED.
2569  *
2570  * Generally speaking we want to move the page into the cache so
2571  * it gets reused quickly.  However, this can result in a silly syndrome
2572  * due to the page recycling too quickly.  Small objects will not be
2573  * fully cached.  On the otherhand, if we move the page to the inactive
2574  * queue we wind up with a problem whereby very large objects
2575  * unnecessarily blow away our inactive and cache queues.
2576  *
2577  * The solution is to move the pages based on a fixed weighting.  We
2578  * either leave them alone, deactivate them, or move them to the cache,
2579  * where moving them to the cache has the highest weighting.
2580  * By forcing some pages into other queues we eventually force the
2581  * system to balance the queues, potentially recovering other unrelated
2582  * space from active.  The idea is to not force this to happen too
2583  * often.
2584  *
2585  * The page must be busied.
2586  */
2587 void
2588 vm_page_dontneed(vm_page_t m)
2589 {
2590 	static int dnweight;
2591 	int dnw;
2592 	int head;
2593 
2594 	dnw = ++dnweight;
2595 
2596 	/*
2597 	 * occassionally leave the page alone
2598 	 */
2599 	if ((dnw & 0x01F0) == 0 ||
2600 	    m->queue - m->pc == PQ_INACTIVE ||
2601 	    m->queue - m->pc == PQ_CACHE
2602 	) {
2603 		if (m->act_count >= ACT_INIT)
2604 			--m->act_count;
2605 		return;
2606 	}
2607 
2608 	/*
2609 	 * If vm_page_dontneed() is inactivating a page, it must clear
2610 	 * the referenced flag; otherwise the pagedaemon will see references
2611 	 * on the page in the inactive queue and reactivate it. Until the
2612 	 * page can move to the cache queue, madvise's job is not done.
2613 	 */
2614 	vm_page_flag_clear(m, PG_REFERENCED);
2615 	pmap_clear_reference(m);
2616 
2617 	if (m->dirty == 0)
2618 		vm_page_test_dirty(m);
2619 
2620 	if (m->dirty || (dnw & 0x0070) == 0) {
2621 		/*
2622 		 * Deactivate the page 3 times out of 32.
2623 		 */
2624 		head = 0;
2625 	} else {
2626 		/*
2627 		 * Cache the page 28 times out of every 32.  Note that
2628 		 * the page is deactivated instead of cached, but placed
2629 		 * at the head of the queue instead of the tail.
2630 		 */
2631 		head = 1;
2632 	}
2633 	vm_page_spin_lock(m);
2634 	_vm_page_deactivate_locked(m, head);
2635 	vm_page_spin_unlock(m);
2636 }
2637 
2638 /*
2639  * These routines manipulate the 'soft busy' count for a page.  A soft busy
2640  * is almost like PG_BUSY except that it allows certain compatible operations
2641  * to occur on the page while it is busy.  For example, a page undergoing a
2642  * write can still be mapped read-only.
2643  *
2644  * Because vm_pages can overlap buffers m->busy can be > 1.  m->busy is only
2645  * adjusted while the vm_page is PG_BUSY so the flash will occur when the
2646  * busy bit is cleared.
2647  */
2648 void
2649 vm_page_io_start(vm_page_t m)
2650 {
2651         KASSERT(m->flags & PG_BUSY, ("vm_page_io_start: page not busy!!!"));
2652         atomic_add_char(&m->busy, 1);
2653 	vm_page_flag_set(m, PG_SBUSY);
2654 }
2655 
2656 void
2657 vm_page_io_finish(vm_page_t m)
2658 {
2659         KASSERT(m->flags & PG_BUSY, ("vm_page_io_finish: page not busy!!!"));
2660         atomic_subtract_char(&m->busy, 1);
2661 	if (m->busy == 0)
2662 		vm_page_flag_clear(m, PG_SBUSY);
2663 }
2664 
2665 /*
2666  * Indicate that a clean VM page requires a filesystem commit and cannot
2667  * be reused.  Used by tmpfs.
2668  */
2669 void
2670 vm_page_need_commit(vm_page_t m)
2671 {
2672 	vm_page_flag_set(m, PG_NEED_COMMIT);
2673 	vm_object_set_writeable_dirty(m->object);
2674 }
2675 
2676 void
2677 vm_page_clear_commit(vm_page_t m)
2678 {
2679 	vm_page_flag_clear(m, PG_NEED_COMMIT);
2680 }
2681 
2682 /*
2683  * Grab a page, blocking if it is busy and allocating a page if necessary.
2684  * A busy page is returned or NULL.  The page may or may not be valid and
2685  * might not be on a queue (the caller is responsible for the disposition of
2686  * the page).
2687  *
2688  * If VM_ALLOC_ZERO is specified and the grab must allocate a new page, the
2689  * page will be zero'd and marked valid.
2690  *
2691  * If VM_ALLOC_FORCE_ZERO is specified the page will be zero'd and marked
2692  * valid even if it already exists.
2693  *
2694  * If VM_ALLOC_RETRY is specified this routine will never return NULL.  Also
2695  * note that VM_ALLOC_NORMAL must be specified if VM_ALLOC_RETRY is specified.
2696  * VM_ALLOC_NULL_OK is implied when VM_ALLOC_RETRY is specified.
2697  *
2698  * This routine may block, but if VM_ALLOC_RETRY is not set then NULL is
2699  * always returned if we had blocked.
2700  *
2701  * This routine may not be called from an interrupt.
2702  *
2703  * No other requirements.
2704  */
2705 vm_page_t
2706 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
2707 {
2708 	vm_page_t m;
2709 	int error;
2710 	int shared = 1;
2711 
2712 	KKASSERT(allocflags &
2713 		(VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
2714 	vm_object_hold_shared(object);
2715 	for (;;) {
2716 		m = vm_page_lookup_busy_try(object, pindex, TRUE, &error);
2717 		if (error) {
2718 			vm_page_sleep_busy(m, TRUE, "pgrbwt");
2719 			if ((allocflags & VM_ALLOC_RETRY) == 0) {
2720 				m = NULL;
2721 				break;
2722 			}
2723 			/* retry */
2724 		} else if (m == NULL) {
2725 			if (shared) {
2726 				vm_object_upgrade(object);
2727 				shared = 0;
2728 			}
2729 			if (allocflags & VM_ALLOC_RETRY)
2730 				allocflags |= VM_ALLOC_NULL_OK;
2731 			m = vm_page_alloc(object, pindex,
2732 					  allocflags & ~VM_ALLOC_RETRY);
2733 			if (m)
2734 				break;
2735 			vm_wait(0);
2736 			if ((allocflags & VM_ALLOC_RETRY) == 0)
2737 				goto failed;
2738 		} else {
2739 			/* m found */
2740 			break;
2741 		}
2742 	}
2743 
2744 	/*
2745 	 * If VM_ALLOC_ZERO an invalid page will be zero'd and set valid.
2746 	 *
2747 	 * If VM_ALLOC_FORCE_ZERO the page is unconditionally zero'd and set
2748 	 * valid even if already valid.
2749 	 *
2750 	 * NOTE!  We have removed all of the PG_ZERO optimizations and also
2751 	 *	  removed the idle zeroing code.  These optimizations actually
2752 	 *	  slow things down on modern cpus because the zerod area is
2753 	 *	  likely uncached, placing a memory-access burden on the
2754 	 *	  accesors taking the fault.
2755 	 *
2756 	 *	  By always zeroing the page in-line with the fault, no
2757 	 *	  dynamic ram reads are needed and the caches are hot, ready
2758 	 *	  for userland to access the memory.
2759 	 */
2760 	if (m->valid == 0) {
2761 		if (allocflags & (VM_ALLOC_ZERO | VM_ALLOC_FORCE_ZERO)) {
2762 			pmap_zero_page(VM_PAGE_TO_PHYS(m));
2763 			m->valid = VM_PAGE_BITS_ALL;
2764 		}
2765 	} else if (allocflags & VM_ALLOC_FORCE_ZERO) {
2766 		pmap_zero_page(VM_PAGE_TO_PHYS(m));
2767 		m->valid = VM_PAGE_BITS_ALL;
2768 	}
2769 failed:
2770 	vm_object_drop(object);
2771 	return(m);
2772 }
2773 
2774 /*
2775  * Mapping function for valid bits or for dirty bits in
2776  * a page.  May not block.
2777  *
2778  * Inputs are required to range within a page.
2779  *
2780  * No requirements.
2781  * Non blocking.
2782  */
2783 int
2784 vm_page_bits(int base, int size)
2785 {
2786 	int first_bit;
2787 	int last_bit;
2788 
2789 	KASSERT(
2790 	    base + size <= PAGE_SIZE,
2791 	    ("vm_page_bits: illegal base/size %d/%d", base, size)
2792 	);
2793 
2794 	if (size == 0)		/* handle degenerate case */
2795 		return(0);
2796 
2797 	first_bit = base >> DEV_BSHIFT;
2798 	last_bit = (base + size - 1) >> DEV_BSHIFT;
2799 
2800 	return ((2 << last_bit) - (1 << first_bit));
2801 }
2802 
2803 /*
2804  * Sets portions of a page valid and clean.  The arguments are expected
2805  * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2806  * of any partial chunks touched by the range.  The invalid portion of
2807  * such chunks will be zero'd.
2808  *
2809  * NOTE: When truncating a buffer vnode_pager_setsize() will automatically
2810  *	 align base to DEV_BSIZE so as not to mark clean a partially
2811  *	 truncated device block.  Otherwise the dirty page status might be
2812  *	 lost.
2813  *
2814  * This routine may not block.
2815  *
2816  * (base + size) must be less then or equal to PAGE_SIZE.
2817  */
2818 static void
2819 _vm_page_zero_valid(vm_page_t m, int base, int size)
2820 {
2821 	int frag;
2822 	int endoff;
2823 
2824 	if (size == 0)	/* handle degenerate case */
2825 		return;
2826 
2827 	/*
2828 	 * If the base is not DEV_BSIZE aligned and the valid
2829 	 * bit is clear, we have to zero out a portion of the
2830 	 * first block.
2831 	 */
2832 
2833 	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2834 	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
2835 	) {
2836 		pmap_zero_page_area(
2837 		    VM_PAGE_TO_PHYS(m),
2838 		    frag,
2839 		    base - frag
2840 		);
2841 	}
2842 
2843 	/*
2844 	 * If the ending offset is not DEV_BSIZE aligned and the
2845 	 * valid bit is clear, we have to zero out a portion of
2846 	 * the last block.
2847 	 */
2848 
2849 	endoff = base + size;
2850 
2851 	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2852 	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
2853 	) {
2854 		pmap_zero_page_area(
2855 		    VM_PAGE_TO_PHYS(m),
2856 		    endoff,
2857 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
2858 		);
2859 	}
2860 }
2861 
2862 /*
2863  * Set valid, clear dirty bits.  If validating the entire
2864  * page we can safely clear the pmap modify bit.  We also
2865  * use this opportunity to clear the PG_NOSYNC flag.  If a process
2866  * takes a write fault on a MAP_NOSYNC memory area the flag will
2867  * be set again.
2868  *
2869  * We set valid bits inclusive of any overlap, but we can only
2870  * clear dirty bits for DEV_BSIZE chunks that are fully within
2871  * the range.
2872  *
2873  * Page must be busied?
2874  * No other requirements.
2875  */
2876 void
2877 vm_page_set_valid(vm_page_t m, int base, int size)
2878 {
2879 	_vm_page_zero_valid(m, base, size);
2880 	m->valid |= vm_page_bits(base, size);
2881 }
2882 
2883 
2884 /*
2885  * Set valid bits and clear dirty bits.
2886  *
2887  * NOTE: This function does not clear the pmap modified bit.
2888  *	 Also note that e.g. NFS may use a byte-granular base
2889  *	 and size.
2890  *
2891  * WARNING: Page must be busied?  But vfs_clean_one_page() will call
2892  *	    this without necessarily busying the page (via bdwrite()).
2893  *	    So for now vm_token must also be held.
2894  *
2895  * No other requirements.
2896  */
2897 void
2898 vm_page_set_validclean(vm_page_t m, int base, int size)
2899 {
2900 	int pagebits;
2901 
2902 	_vm_page_zero_valid(m, base, size);
2903 	pagebits = vm_page_bits(base, size);
2904 	m->valid |= pagebits;
2905 	m->dirty &= ~pagebits;
2906 	if (base == 0 && size == PAGE_SIZE) {
2907 		/*pmap_clear_modify(m);*/
2908 		vm_page_flag_clear(m, PG_NOSYNC);
2909 	}
2910 }
2911 
2912 /*
2913  * Set valid & dirty.  Used by buwrite()
2914  *
2915  * WARNING: Page must be busied?  But vfs_dirty_one_page() will
2916  *	    call this function in buwrite() so for now vm_token must
2917  *	    be held.
2918  *
2919  * No other requirements.
2920  */
2921 void
2922 vm_page_set_validdirty(vm_page_t m, int base, int size)
2923 {
2924 	int pagebits;
2925 
2926 	pagebits = vm_page_bits(base, size);
2927 	m->valid |= pagebits;
2928 	m->dirty |= pagebits;
2929 	if (m->object)
2930 	       vm_object_set_writeable_dirty(m->object);
2931 }
2932 
2933 /*
2934  * Clear dirty bits.
2935  *
2936  * NOTE: This function does not clear the pmap modified bit.
2937  *	 Also note that e.g. NFS may use a byte-granular base
2938  *	 and size.
2939  *
2940  * Page must be busied?
2941  * No other requirements.
2942  */
2943 void
2944 vm_page_clear_dirty(vm_page_t m, int base, int size)
2945 {
2946 	m->dirty &= ~vm_page_bits(base, size);
2947 	if (base == 0 && size == PAGE_SIZE) {
2948 		/*pmap_clear_modify(m);*/
2949 		vm_page_flag_clear(m, PG_NOSYNC);
2950 	}
2951 }
2952 
2953 /*
2954  * Make the page all-dirty.
2955  *
2956  * Also make sure the related object and vnode reflect the fact that the
2957  * object may now contain a dirty page.
2958  *
2959  * Page must be busied?
2960  * No other requirements.
2961  */
2962 void
2963 vm_page_dirty(vm_page_t m)
2964 {
2965 #ifdef INVARIANTS
2966         int pqtype = m->queue - m->pc;
2967 #endif
2968         KASSERT(pqtype != PQ_CACHE && pqtype != PQ_FREE,
2969                 ("vm_page_dirty: page in free/cache queue!"));
2970 	if (m->dirty != VM_PAGE_BITS_ALL) {
2971 		m->dirty = VM_PAGE_BITS_ALL;
2972 		if (m->object)
2973 			vm_object_set_writeable_dirty(m->object);
2974 	}
2975 }
2976 
2977 /*
2978  * Invalidates DEV_BSIZE'd chunks within a page.  Both the
2979  * valid and dirty bits for the effected areas are cleared.
2980  *
2981  * Page must be busied?
2982  * Does not block.
2983  * No other requirements.
2984  */
2985 void
2986 vm_page_set_invalid(vm_page_t m, int base, int size)
2987 {
2988 	int bits;
2989 
2990 	bits = vm_page_bits(base, size);
2991 	m->valid &= ~bits;
2992 	m->dirty &= ~bits;
2993 	m->object->generation++;
2994 }
2995 
2996 /*
2997  * The kernel assumes that the invalid portions of a page contain
2998  * garbage, but such pages can be mapped into memory by user code.
2999  * When this occurs, we must zero out the non-valid portions of the
3000  * page so user code sees what it expects.
3001  *
3002  * Pages are most often semi-valid when the end of a file is mapped
3003  * into memory and the file's size is not page aligned.
3004  *
3005  * Page must be busied?
3006  * No other requirements.
3007  */
3008 void
3009 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
3010 {
3011 	int b;
3012 	int i;
3013 
3014 	/*
3015 	 * Scan the valid bits looking for invalid sections that
3016 	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
3017 	 * valid bit may be set ) have already been zerod by
3018 	 * vm_page_set_validclean().
3019 	 */
3020 	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
3021 		if (i == (PAGE_SIZE / DEV_BSIZE) ||
3022 		    (m->valid & (1 << i))
3023 		) {
3024 			if (i > b) {
3025 				pmap_zero_page_area(
3026 				    VM_PAGE_TO_PHYS(m),
3027 				    b << DEV_BSHIFT,
3028 				    (i - b) << DEV_BSHIFT
3029 				);
3030 			}
3031 			b = i + 1;
3032 		}
3033 	}
3034 
3035 	/*
3036 	 * setvalid is TRUE when we can safely set the zero'd areas
3037 	 * as being valid.  We can do this if there are no cache consistency
3038 	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
3039 	 */
3040 	if (setvalid)
3041 		m->valid = VM_PAGE_BITS_ALL;
3042 }
3043 
3044 /*
3045  * Is a (partial) page valid?  Note that the case where size == 0
3046  * will return FALSE in the degenerate case where the page is entirely
3047  * invalid, and TRUE otherwise.
3048  *
3049  * Does not block.
3050  * No other requirements.
3051  */
3052 int
3053 vm_page_is_valid(vm_page_t m, int base, int size)
3054 {
3055 	int bits = vm_page_bits(base, size);
3056 
3057 	if (m->valid && ((m->valid & bits) == bits))
3058 		return 1;
3059 	else
3060 		return 0;
3061 }
3062 
3063 /*
3064  * update dirty bits from pmap/mmu.  May not block.
3065  *
3066  * Caller must hold the page busy
3067  */
3068 void
3069 vm_page_test_dirty(vm_page_t m)
3070 {
3071 	if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
3072 		vm_page_dirty(m);
3073 	}
3074 }
3075 
3076 /*
3077  * Register an action, associating it with its vm_page
3078  */
3079 void
3080 vm_page_register_action(vm_page_action_t action, vm_page_event_t event)
3081 {
3082 	struct vm_page_action_list *list;
3083 	int hv;
3084 
3085 	hv = (int)((intptr_t)action->m >> 8) & VMACTION_HMASK;
3086 	list = &action_list[hv];
3087 
3088 	lwkt_gettoken(&vm_token);
3089 	vm_page_flag_set(action->m, PG_ACTIONLIST);
3090 	action->event = event;
3091 	LIST_INSERT_HEAD(list, action, entry);
3092 	lwkt_reltoken(&vm_token);
3093 }
3094 
3095 /*
3096  * Unregister an action, disassociating it from its related vm_page
3097  */
3098 void
3099 vm_page_unregister_action(vm_page_action_t action)
3100 {
3101 	struct vm_page_action_list *list;
3102 	int hv;
3103 
3104 	lwkt_gettoken(&vm_token);
3105 	if (action->event != VMEVENT_NONE) {
3106 		action->event = VMEVENT_NONE;
3107 		LIST_REMOVE(action, entry);
3108 
3109 		hv = (int)((intptr_t)action->m >> 8) & VMACTION_HMASK;
3110 		list = &action_list[hv];
3111 		if (LIST_EMPTY(list))
3112 			vm_page_flag_clear(action->m, PG_ACTIONLIST);
3113 	}
3114 	lwkt_reltoken(&vm_token);
3115 }
3116 
3117 /*
3118  * Issue an event on a VM page.  Corresponding action structures are
3119  * removed from the page's list and called.
3120  *
3121  * If the vm_page has no more pending action events we clear its
3122  * PG_ACTIONLIST flag.
3123  */
3124 void
3125 vm_page_event_internal(vm_page_t m, vm_page_event_t event)
3126 {
3127 	struct vm_page_action_list *list;
3128 	struct vm_page_action *scan;
3129 	struct vm_page_action *next;
3130 	int hv;
3131 	int all;
3132 
3133 	hv = (int)((intptr_t)m >> 8) & VMACTION_HMASK;
3134 	list = &action_list[hv];
3135 	all = 1;
3136 
3137 	lwkt_gettoken(&vm_token);
3138 	LIST_FOREACH_MUTABLE(scan, list, entry, next) {
3139 		if (scan->m == m) {
3140 			if (scan->event == event) {
3141 				scan->event = VMEVENT_NONE;
3142 				LIST_REMOVE(scan, entry);
3143 				scan->func(m, scan);
3144 				/* XXX */
3145 			} else {
3146 				all = 0;
3147 			}
3148 		}
3149 	}
3150 	if (all)
3151 		vm_page_flag_clear(m, PG_ACTIONLIST);
3152 	lwkt_reltoken(&vm_token);
3153 }
3154 
3155 #include "opt_ddb.h"
3156 #ifdef DDB
3157 #include <sys/kernel.h>
3158 
3159 #include <ddb/ddb.h>
3160 
3161 DB_SHOW_COMMAND(page, vm_page_print_page_info)
3162 {
3163 	db_printf("vmstats.v_free_count: %d\n", vmstats.v_free_count);
3164 	db_printf("vmstats.v_cache_count: %d\n", vmstats.v_cache_count);
3165 	db_printf("vmstats.v_inactive_count: %d\n", vmstats.v_inactive_count);
3166 	db_printf("vmstats.v_active_count: %d\n", vmstats.v_active_count);
3167 	db_printf("vmstats.v_wire_count: %d\n", vmstats.v_wire_count);
3168 	db_printf("vmstats.v_free_reserved: %d\n", vmstats.v_free_reserved);
3169 	db_printf("vmstats.v_free_min: %d\n", vmstats.v_free_min);
3170 	db_printf("vmstats.v_free_target: %d\n", vmstats.v_free_target);
3171 	db_printf("vmstats.v_cache_min: %d\n", vmstats.v_cache_min);
3172 	db_printf("vmstats.v_inactive_target: %d\n", vmstats.v_inactive_target);
3173 }
3174 
3175 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
3176 {
3177 	int i;
3178 	db_printf("PQ_FREE:");
3179 	for(i=0;i<PQ_L2_SIZE;i++) {
3180 		db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
3181 	}
3182 	db_printf("\n");
3183 
3184 	db_printf("PQ_CACHE:");
3185 	for(i=0;i<PQ_L2_SIZE;i++) {
3186 		db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
3187 	}
3188 	db_printf("\n");
3189 
3190 	db_printf("PQ_ACTIVE:");
3191 	for(i=0;i<PQ_L2_SIZE;i++) {
3192 		db_printf(" %d", vm_page_queues[PQ_ACTIVE + i].lcnt);
3193 	}
3194 	db_printf("\n");
3195 
3196 	db_printf("PQ_INACTIVE:");
3197 	for(i=0;i<PQ_L2_SIZE;i++) {
3198 		db_printf(" %d", vm_page_queues[PQ_INACTIVE + i].lcnt);
3199 	}
3200 	db_printf("\n");
3201 }
3202 #endif /* DDB */
3203