xref: /dragonfly/sys/vm/vm_page.c (revision a42bad2d)
1 /*
2  * Copyright (c) 1991 Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 2003-2011 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * The Mach Operating System project at Carnegie-Mellon University.
8  *
9  * This code is derived from software contributed to The DragonFly Project
10  * by Matthew Dillon <dillon@backplane.com>
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37  * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $
38  */
39 
40 /*
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45  *
46  * Permission to use, copy, modify and distribute this software and
47  * its documentation is hereby granted, provided that both the copyright
48  * notice and this permission notice appear in all copies of the
49  * software, derivative works or modified versions, and any portions
50  * thereof, and that both notices appear in supporting documentation.
51  *
52  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55  *
56  * Carnegie Mellon requests users of this software to return to
57  *
58  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59  *  School of Computer Science
60  *  Carnegie Mellon University
61  *  Pittsburgh PA 15213-3890
62  *
63  * any improvements or extensions that they make and grant Carnegie the
64  * rights to redistribute these changes.
65  */
66 /*
67  * Resident memory management module.  The module manipulates 'VM pages'.
68  * A VM page is the core building block for memory management.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/proc.h>
75 #include <sys/vmmeter.h>
76 #include <sys/vnode.h>
77 #include <sys/kernel.h>
78 #include <sys/alist.h>
79 #include <sys/sysctl.h>
80 #include <sys/cpu_topology.h>
81 
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <sys/lock.h>
85 #include <vm/vm_kern.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_pager.h>
92 #include <vm/vm_extern.h>
93 #include <vm/swap_pager.h>
94 
95 #include <machine/inttypes.h>
96 #include <machine/md_var.h>
97 #include <machine/specialreg.h>
98 #include <machine/bus_dma.h>
99 
100 #include <vm/vm_page2.h>
101 #include <sys/spinlock2.h>
102 
103 /*
104  * SET - Minimum required set associative size, must be a power of 2.  We
105  *	 want this to match or exceed the set-associativeness of the cpu.
106  *
107  * GRP - A larger set that allows bleed-over into the domains of other
108  *	 nearby cpus.  Also must be a power of 2.  Used by the page zeroing
109  *	 code to smooth things out a bit.
110  */
111 #define PQ_SET_ASSOC		16
112 #define PQ_SET_ASSOC_MASK	(PQ_SET_ASSOC - 1)
113 
114 #define PQ_GRP_ASSOC		(PQ_SET_ASSOC * 2)
115 #define PQ_GRP_ASSOC_MASK	(PQ_GRP_ASSOC - 1)
116 
117 static void vm_page_queue_init(void);
118 static void vm_page_free_wakeup(void);
119 static vm_page_t vm_page_select_cache(u_short pg_color);
120 static vm_page_t _vm_page_list_find2(int basequeue, int index);
121 static void _vm_page_deactivate_locked(vm_page_t m, int athead);
122 
123 /*
124  * Array of tailq lists
125  */
126 __cachealign struct vpgqueues vm_page_queues[PQ_COUNT];
127 
128 static volatile int vm_pages_waiting;
129 static struct alist vm_contig_alist;
130 static struct almeta vm_contig_ameta[ALIST_RECORDS_65536];
131 static struct spinlock vm_contig_spin = SPINLOCK_INITIALIZER(&vm_contig_spin, "vm_contig_spin");
132 
133 static u_long vm_dma_reserved = 0;
134 TUNABLE_ULONG("vm.dma_reserved", &vm_dma_reserved);
135 SYSCTL_ULONG(_vm, OID_AUTO, dma_reserved, CTLFLAG_RD, &vm_dma_reserved, 0,
136 	    "Memory reserved for DMA");
137 SYSCTL_UINT(_vm, OID_AUTO, dma_free_pages, CTLFLAG_RD,
138 	    &vm_contig_alist.bl_free, 0, "Memory reserved for DMA");
139 
140 static int vm_contig_verbose = 0;
141 TUNABLE_INT("vm.contig_verbose", &vm_contig_verbose);
142 
143 RB_GENERATE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare,
144 	     vm_pindex_t, pindex);
145 
146 static void
147 vm_page_queue_init(void)
148 {
149 	int i;
150 
151 	for (i = 0; i < PQ_L2_SIZE; i++)
152 		vm_page_queues[PQ_FREE+i].cnt_offset =
153 			offsetof(struct vmstats, v_free_count);
154 	for (i = 0; i < PQ_L2_SIZE; i++)
155 		vm_page_queues[PQ_CACHE+i].cnt_offset =
156 			offsetof(struct vmstats, v_cache_count);
157 	for (i = 0; i < PQ_L2_SIZE; i++)
158 		vm_page_queues[PQ_INACTIVE+i].cnt_offset =
159 			offsetof(struct vmstats, v_inactive_count);
160 	for (i = 0; i < PQ_L2_SIZE; i++)
161 		vm_page_queues[PQ_ACTIVE+i].cnt_offset =
162 			offsetof(struct vmstats, v_active_count);
163 	for (i = 0; i < PQ_L2_SIZE; i++)
164 		vm_page_queues[PQ_HOLD+i].cnt_offset =
165 			offsetof(struct vmstats, v_active_count);
166 	/* PQ_NONE has no queue */
167 
168 	for (i = 0; i < PQ_COUNT; i++) {
169 		TAILQ_INIT(&vm_page_queues[i].pl);
170 		spin_init(&vm_page_queues[i].spin, "vm_page_queue_init");
171 	}
172 }
173 
174 /*
175  * note: place in initialized data section?  Is this necessary?
176  */
177 vm_pindex_t first_page = 0;
178 vm_pindex_t vm_page_array_size = 0;
179 vm_page_t vm_page_array = NULL;
180 vm_paddr_t vm_low_phys_reserved;
181 
182 /*
183  * (low level boot)
184  *
185  * Sets the page size, perhaps based upon the memory size.
186  * Must be called before any use of page-size dependent functions.
187  */
188 void
189 vm_set_page_size(void)
190 {
191 	if (vmstats.v_page_size == 0)
192 		vmstats.v_page_size = PAGE_SIZE;
193 	if (((vmstats.v_page_size - 1) & vmstats.v_page_size) != 0)
194 		panic("vm_set_page_size: page size not a power of two");
195 }
196 
197 /*
198  * (low level boot)
199  *
200  * Add a new page to the freelist for use by the system.  New pages
201  * are added to both the head and tail of the associated free page
202  * queue in a bottom-up fashion, so both zero'd and non-zero'd page
203  * requests pull 'recent' adds (higher physical addresses) first.
204  *
205  * Beware that the page zeroing daemon will also be running soon after
206  * boot, moving pages from the head to the tail of the PQ_FREE queues.
207  *
208  * Must be called in a critical section.
209  */
210 static void
211 vm_add_new_page(vm_paddr_t pa)
212 {
213 	struct vpgqueues *vpq;
214 	vm_page_t m;
215 
216 	m = PHYS_TO_VM_PAGE(pa);
217 	m->phys_addr = pa;
218 	m->flags = 0;
219 	m->pat_mode = PAT_WRITE_BACK;
220 	m->pc = (pa >> PAGE_SHIFT);
221 
222 	/*
223 	 * Twist for cpu localization in addition to page coloring, so
224 	 * different cpus selecting by m->queue get different page colors.
225 	 */
226 	m->pc ^= ((pa >> PAGE_SHIFT) / PQ_L2_SIZE);
227 	m->pc ^= ((pa >> PAGE_SHIFT) / (PQ_L2_SIZE * PQ_L2_SIZE));
228 	m->pc &= PQ_L2_MASK;
229 
230 	/*
231 	 * Reserve a certain number of contiguous low memory pages for
232 	 * contigmalloc() to use.
233 	 */
234 	if (pa < vm_low_phys_reserved) {
235 		atomic_add_long(&vmstats.v_page_count, 1);
236 		atomic_add_long(&vmstats.v_dma_pages, 1);
237 		m->queue = PQ_NONE;
238 		m->wire_count = 1;
239 		atomic_add_long(&vmstats.v_wire_count, 1);
240 		alist_free(&vm_contig_alist, pa >> PAGE_SHIFT, 1);
241 		return;
242 	}
243 
244 	/*
245 	 * General page
246 	 */
247 	m->queue = m->pc + PQ_FREE;
248 	KKASSERT(m->dirty == 0);
249 
250 	atomic_add_long(&vmstats.v_page_count, 1);
251 	atomic_add_long(&vmstats.v_free_count, 1);
252 	vpq = &vm_page_queues[m->queue];
253 	TAILQ_INSERT_HEAD(&vpq->pl, m, pageq);
254 	++vpq->lcnt;
255 }
256 
257 /*
258  * (low level boot)
259  *
260  * Initializes the resident memory module.
261  *
262  * Preallocates memory for critical VM structures and arrays prior to
263  * kernel_map becoming available.
264  *
265  * Memory is allocated from (virtual2_start, virtual2_end) if available,
266  * otherwise memory is allocated from (virtual_start, virtual_end).
267  *
268  * On x86-64 (virtual_start, virtual_end) is only 2GB and may not be
269  * large enough to hold vm_page_array & other structures for machines with
270  * large amounts of ram, so we want to use virtual2* when available.
271  */
272 void
273 vm_page_startup(void)
274 {
275 	vm_offset_t vaddr = virtual2_start ? virtual2_start : virtual_start;
276 	vm_offset_t mapped;
277 	vm_pindex_t npages;
278 	vm_paddr_t page_range;
279 	vm_paddr_t new_end;
280 	int i;
281 	vm_paddr_t pa;
282 	vm_paddr_t last_pa;
283 	vm_paddr_t end;
284 	vm_paddr_t biggestone, biggestsize;
285 	vm_paddr_t total;
286 	vm_page_t m;
287 
288 	total = 0;
289 	biggestsize = 0;
290 	biggestone = 0;
291 	vaddr = round_page(vaddr);
292 
293 	/*
294 	 * Make sure ranges are page-aligned.
295 	 */
296 	for (i = 0; phys_avail[i].phys_end; ++i) {
297 		phys_avail[i].phys_beg = round_page64(phys_avail[i].phys_beg);
298 		phys_avail[i].phys_end = trunc_page64(phys_avail[i].phys_end);
299 		if (phys_avail[i].phys_end < phys_avail[i].phys_beg)
300 			phys_avail[i].phys_end = phys_avail[i].phys_beg;
301 	}
302 
303 	/*
304 	 * Locate largest block
305 	 */
306 	for (i = 0; phys_avail[i].phys_end; ++i) {
307 		vm_paddr_t size = phys_avail[i].phys_end -
308 				  phys_avail[i].phys_beg;
309 
310 		if (size > biggestsize) {
311 			biggestone = i;
312 			biggestsize = size;
313 		}
314 		total += size;
315 	}
316 	--i;	/* adjust to last entry for use down below */
317 
318 	end = phys_avail[biggestone].phys_end;
319 	end = trunc_page(end);
320 
321 	/*
322 	 * Initialize the queue headers for the free queue, the active queue
323 	 * and the inactive queue.
324 	 */
325 	vm_page_queue_init();
326 
327 #if !defined(_KERNEL_VIRTUAL)
328 	/*
329 	 * VKERNELs don't support minidumps and as such don't need
330 	 * vm_page_dump
331 	 *
332 	 * Allocate a bitmap to indicate that a random physical page
333 	 * needs to be included in a minidump.
334 	 *
335 	 * The amd64 port needs this to indicate which direct map pages
336 	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
337 	 *
338 	 * However, x86 still needs this workspace internally within the
339 	 * minidump code.  In theory, they are not needed on x86, but are
340 	 * included should the sf_buf code decide to use them.
341 	 */
342 	page_range = phys_avail[i].phys_end / PAGE_SIZE;
343 	vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
344 	end -= vm_page_dump_size;
345 	vm_page_dump = (void *)pmap_map(&vaddr, end, end + vm_page_dump_size,
346 					VM_PROT_READ | VM_PROT_WRITE);
347 	bzero((void *)vm_page_dump, vm_page_dump_size);
348 #endif
349 	/*
350 	 * Compute the number of pages of memory that will be available for
351 	 * use (taking into account the overhead of a page structure per
352 	 * page).
353 	 */
354 	first_page = phys_avail[0].phys_beg / PAGE_SIZE;
355 	page_range = phys_avail[i].phys_end / PAGE_SIZE - first_page;
356 	npages = (total - (page_range * sizeof(struct vm_page))) / PAGE_SIZE;
357 
358 #ifndef _KERNEL_VIRTUAL
359 	/*
360 	 * (only applies to real kernels)
361 	 *
362 	 * Reserve a large amount of low memory for potential 32-bit DMA
363 	 * space allocations.  Once device initialization is complete we
364 	 * release most of it, but keep (vm_dma_reserved) memory reserved
365 	 * for later use.  Typically for X / graphics.  Through trial and
366 	 * error we find that GPUs usually requires ~60-100MB or so.
367 	 *
368 	 * By default, 128M is left in reserve on machines with 2G+ of ram.
369 	 */
370 	vm_low_phys_reserved = (vm_paddr_t)65536 << PAGE_SHIFT;
371 	if (vm_low_phys_reserved > total / 4)
372 		vm_low_phys_reserved = total / 4;
373 	if (vm_dma_reserved == 0) {
374 		vm_dma_reserved = 128 * 1024 * 1024;	/* 128MB */
375 		if (vm_dma_reserved > total / 16)
376 			vm_dma_reserved = total / 16;
377 	}
378 #endif
379 	alist_init(&vm_contig_alist, 65536, vm_contig_ameta,
380 		   ALIST_RECORDS_65536);
381 
382 	/*
383 	 * Initialize the mem entry structures now, and put them in the free
384 	 * queue.
385 	 */
386 	if (bootverbose && ctob(physmem) >= 400LL*1024*1024*1024)
387 		kprintf("initializing vm_page_array ");
388 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
389 	mapped = pmap_map(&vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE);
390 	vm_page_array = (vm_page_t)mapped;
391 
392 #if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL)
393 	/*
394 	 * since pmap_map on amd64 returns stuff out of a direct-map region,
395 	 * we have to manually add these pages to the minidump tracking so
396 	 * that they can be dumped, including the vm_page_array.
397 	 */
398 	for (pa = new_end;
399 	     pa < phys_avail[biggestone].phys_end;
400 	     pa += PAGE_SIZE) {
401 		dump_add_page(pa);
402 	}
403 #endif
404 
405 	/*
406 	 * Clear all of the page structures, run basic initialization so
407 	 * PHYS_TO_VM_PAGE() operates properly even on pages not in the
408 	 * map.
409 	 */
410 	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
411 	vm_page_array_size = page_range;
412 	if (bootverbose && ctob(physmem) >= 400LL*1024*1024*1024)
413 		kprintf("size = 0x%zx\n", vm_page_array_size);
414 
415 	m = &vm_page_array[0];
416 	pa = ptoa(first_page);
417 	for (i = 0; i < page_range; ++i) {
418 		spin_init(&m->spin, "vm_page");
419 		m->phys_addr = pa;
420 		pa += PAGE_SIZE;
421 		++m;
422 	}
423 
424 	/*
425 	 * Construct the free queue(s) in ascending order (by physical
426 	 * address) so that the first 16MB of physical memory is allocated
427 	 * last rather than first.  On large-memory machines, this avoids
428 	 * the exhaustion of low physical memory before isa_dma_init has run.
429 	 */
430 	vmstats.v_page_count = 0;
431 	vmstats.v_free_count = 0;
432 	for (i = 0; phys_avail[i].phys_end && npages > 0; ++i) {
433 		pa = phys_avail[i].phys_beg;
434 		if (i == biggestone)
435 			last_pa = new_end;
436 		else
437 			last_pa = phys_avail[i].phys_end;
438 		while (pa < last_pa && npages-- > 0) {
439 			vm_add_new_page(pa);
440 			pa += PAGE_SIZE;
441 		}
442 	}
443 	if (virtual2_start)
444 		virtual2_start = vaddr;
445 	else
446 		virtual_start = vaddr;
447 	mycpu->gd_vmstats = vmstats;
448 }
449 
450 /*
451  * Reorganize VM pages based on numa data.  May be called as many times as
452  * necessary.  Will reorganize the vm_page_t page color and related queue(s)
453  * to allow vm_page_alloc() to choose pages based on socket affinity.
454  *
455  * NOTE: This function is only called while we are still in UP mode, so
456  *	 we only need a critical section to protect the queues (which
457  *	 saves a lot of time, there are likely a ton of pages).
458  */
459 void
460 vm_numa_organize(vm_paddr_t ran_beg, vm_paddr_t bytes, int physid)
461 {
462 	vm_paddr_t scan_beg;
463 	vm_paddr_t scan_end;
464 	vm_paddr_t ran_end;
465 	struct vpgqueues *vpq;
466 	vm_page_t m;
467 	vm_page_t mend;
468 	int i;
469 	int socket_mod;
470 	int socket_value;
471 
472 	/*
473 	 * Check if no physical information, or there was only one socket
474 	 * (so don't waste time doing nothing!).
475 	 */
476 	if (cpu_topology_phys_ids <= 1 ||
477 	    cpu_topology_core_ids == 0) {
478 		return;
479 	}
480 
481 	/*
482 	 * Setup for our iteration.  Note that ACPI may iterate CPU
483 	 * sockets starting at 0 or 1 or some other number.  The
484 	 * cpu_topology code mod's it against the socket count.
485 	 */
486 	ran_end = ran_beg + bytes;
487 	physid %= cpu_topology_phys_ids;
488 
489 	socket_mod = PQ_L2_SIZE / cpu_topology_phys_ids;
490 	socket_value = physid * socket_mod;
491 	mend = &vm_page_array[vm_page_array_size];
492 
493 	crit_enter();
494 
495 	/*
496 	 * Adjust vm_page->pc and requeue all affected pages.  The
497 	 * allocator will then be able to localize memory allocations
498 	 * to some degree.
499 	 */
500 	for (i = 0; phys_avail[i].phys_end; ++i) {
501 		scan_beg = phys_avail[i].phys_beg;
502 		scan_end = phys_avail[i].phys_end;
503 		if (scan_end <= ran_beg)
504 			continue;
505 		if (scan_beg >= ran_end)
506 			continue;
507 		if (scan_beg < ran_beg)
508 			scan_beg = ran_beg;
509 		if (scan_end > ran_end)
510 			scan_end = ran_end;
511 		if (atop(scan_end) > first_page + vm_page_array_size)
512 			scan_end = ptoa(first_page + vm_page_array_size);
513 
514 		m = PHYS_TO_VM_PAGE(scan_beg);
515 		while (scan_beg < scan_end) {
516 			KKASSERT(m < mend);
517 			if (m->queue != PQ_NONE) {
518 				vpq = &vm_page_queues[m->queue];
519 				TAILQ_REMOVE(&vpq->pl, m, pageq);
520 				--vpq->lcnt;
521 				/* queue doesn't change, no need to adj cnt */
522 				m->queue -= m->pc;
523 				m->pc %= socket_mod;
524 				m->pc += socket_value;
525 				m->pc &= PQ_L2_MASK;
526 				m->queue += m->pc;
527 				vpq = &vm_page_queues[m->queue];
528 				TAILQ_INSERT_HEAD(&vpq->pl, m, pageq);
529 				++vpq->lcnt;
530 				/* queue doesn't change, no need to adj cnt */
531 			} else {
532 				m->pc %= socket_mod;
533 				m->pc += socket_value;
534 				m->pc &= PQ_L2_MASK;
535 			}
536 			scan_beg += PAGE_SIZE;
537 			++m;
538 		}
539 	}
540 	crit_exit();
541 }
542 
543 /*
544  * We tended to reserve a ton of memory for contigmalloc().  Now that most
545  * drivers have initialized we want to return most the remaining free
546  * reserve back to the VM page queues so they can be used for normal
547  * allocations.
548  *
549  * We leave vm_dma_reserved bytes worth of free pages in the reserve pool.
550  */
551 static void
552 vm_page_startup_finish(void *dummy __unused)
553 {
554 	alist_blk_t blk;
555 	alist_blk_t rblk;
556 	alist_blk_t count;
557 	alist_blk_t xcount;
558 	alist_blk_t bfree;
559 	vm_page_t m;
560 
561 	spin_lock(&vm_contig_spin);
562 	for (;;) {
563 		bfree = alist_free_info(&vm_contig_alist, &blk, &count);
564 		if (bfree <= vm_dma_reserved / PAGE_SIZE)
565 			break;
566 		if (count == 0)
567 			break;
568 
569 		/*
570 		 * Figure out how much of the initial reserve we have to
571 		 * free in order to reach our target.
572 		 */
573 		bfree -= vm_dma_reserved / PAGE_SIZE;
574 		if (count > bfree) {
575 			blk += count - bfree;
576 			count = bfree;
577 		}
578 
579 		/*
580 		 * Calculate the nearest power of 2 <= count.
581 		 */
582 		for (xcount = 1; xcount <= count; xcount <<= 1)
583 			;
584 		xcount >>= 1;
585 		blk += count - xcount;
586 		count = xcount;
587 
588 		/*
589 		 * Allocate the pages from the alist, then free them to
590 		 * the normal VM page queues.
591 		 *
592 		 * Pages allocated from the alist are wired.  We have to
593 		 * busy, unwire, and free them.  We must also adjust
594 		 * vm_low_phys_reserved before freeing any pages to prevent
595 		 * confusion.
596 		 */
597 		rblk = alist_alloc(&vm_contig_alist, blk, count);
598 		if (rblk != blk) {
599 			kprintf("vm_page_startup_finish: Unable to return "
600 				"dma space @0x%08x/%d -> 0x%08x\n",
601 				blk, count, rblk);
602 			break;
603 		}
604 		atomic_add_long(&vmstats.v_dma_pages, -(long)count);
605 		spin_unlock(&vm_contig_spin);
606 
607 		m = PHYS_TO_VM_PAGE((vm_paddr_t)blk << PAGE_SHIFT);
608 		vm_low_phys_reserved = VM_PAGE_TO_PHYS(m);
609 		while (count) {
610 			vm_page_busy_wait(m, FALSE, "cpgfr");
611 			vm_page_unwire(m, 0);
612 			vm_page_free(m);
613 			--count;
614 			++m;
615 		}
616 		spin_lock(&vm_contig_spin);
617 	}
618 	spin_unlock(&vm_contig_spin);
619 
620 	/*
621 	 * Print out how much DMA space drivers have already allocated and
622 	 * how much is left over.
623 	 */
624 	kprintf("DMA space used: %jdk, remaining available: %jdk\n",
625 		(intmax_t)(vmstats.v_dma_pages - vm_contig_alist.bl_free) *
626 		(PAGE_SIZE / 1024),
627 		(intmax_t)vm_contig_alist.bl_free * (PAGE_SIZE / 1024));
628 }
629 SYSINIT(vm_pgend, SI_SUB_PROC0_POST, SI_ORDER_ANY,
630 	vm_page_startup_finish, NULL);
631 
632 
633 /*
634  * Scan comparison function for Red-Black tree scans.  An inclusive
635  * (start,end) is expected.  Other fields are not used.
636  */
637 int
638 rb_vm_page_scancmp(struct vm_page *p, void *data)
639 {
640 	struct rb_vm_page_scan_info *info = data;
641 
642 	if (p->pindex < info->start_pindex)
643 		return(-1);
644 	if (p->pindex > info->end_pindex)
645 		return(1);
646 	return(0);
647 }
648 
649 int
650 rb_vm_page_compare(struct vm_page *p1, struct vm_page *p2)
651 {
652 	if (p1->pindex < p2->pindex)
653 		return(-1);
654 	if (p1->pindex > p2->pindex)
655 		return(1);
656 	return(0);
657 }
658 
659 void
660 vm_page_init(vm_page_t m)
661 {
662 	/* do nothing for now.  Called from pmap_page_init() */
663 }
664 
665 /*
666  * Each page queue has its own spin lock, which is fairly optimal for
667  * allocating and freeing pages at least.
668  *
669  * The caller must hold the vm_page_spin_lock() before locking a vm_page's
670  * queue spinlock via this function.  Also note that m->queue cannot change
671  * unless both the page and queue are locked.
672  */
673 static __inline
674 void
675 _vm_page_queue_spin_lock(vm_page_t m)
676 {
677 	u_short queue;
678 
679 	queue = m->queue;
680 	if (queue != PQ_NONE) {
681 		spin_lock(&vm_page_queues[queue].spin);
682 		KKASSERT(queue == m->queue);
683 	}
684 }
685 
686 static __inline
687 void
688 _vm_page_queue_spin_unlock(vm_page_t m)
689 {
690 	u_short queue;
691 
692 	queue = m->queue;
693 	cpu_ccfence();
694 	if (queue != PQ_NONE)
695 		spin_unlock(&vm_page_queues[queue].spin);
696 }
697 
698 static __inline
699 void
700 _vm_page_queues_spin_lock(u_short queue)
701 {
702 	cpu_ccfence();
703 	if (queue != PQ_NONE)
704 		spin_lock(&vm_page_queues[queue].spin);
705 }
706 
707 
708 static __inline
709 void
710 _vm_page_queues_spin_unlock(u_short queue)
711 {
712 	cpu_ccfence();
713 	if (queue != PQ_NONE)
714 		spin_unlock(&vm_page_queues[queue].spin);
715 }
716 
717 void
718 vm_page_queue_spin_lock(vm_page_t m)
719 {
720 	_vm_page_queue_spin_lock(m);
721 }
722 
723 void
724 vm_page_queues_spin_lock(u_short queue)
725 {
726 	_vm_page_queues_spin_lock(queue);
727 }
728 
729 void
730 vm_page_queue_spin_unlock(vm_page_t m)
731 {
732 	_vm_page_queue_spin_unlock(m);
733 }
734 
735 void
736 vm_page_queues_spin_unlock(u_short queue)
737 {
738 	_vm_page_queues_spin_unlock(queue);
739 }
740 
741 /*
742  * This locks the specified vm_page and its queue in the proper order
743  * (page first, then queue).  The queue may change so the caller must
744  * recheck on return.
745  */
746 static __inline
747 void
748 _vm_page_and_queue_spin_lock(vm_page_t m)
749 {
750 	vm_page_spin_lock(m);
751 	_vm_page_queue_spin_lock(m);
752 }
753 
754 static __inline
755 void
756 _vm_page_and_queue_spin_unlock(vm_page_t m)
757 {
758 	_vm_page_queues_spin_unlock(m->queue);
759 	vm_page_spin_unlock(m);
760 }
761 
762 void
763 vm_page_and_queue_spin_unlock(vm_page_t m)
764 {
765 	_vm_page_and_queue_spin_unlock(m);
766 }
767 
768 void
769 vm_page_and_queue_spin_lock(vm_page_t m)
770 {
771 	_vm_page_and_queue_spin_lock(m);
772 }
773 
774 /*
775  * Helper function removes vm_page from its current queue.
776  * Returns the base queue the page used to be on.
777  *
778  * The vm_page and the queue must be spinlocked.
779  * This function will unlock the queue but leave the page spinlocked.
780  */
781 static __inline u_short
782 _vm_page_rem_queue_spinlocked(vm_page_t m)
783 {
784 	struct vpgqueues *pq;
785 	u_short queue;
786 	u_short oqueue;
787 	long *cnt;
788 
789 	queue = m->queue;
790 	if (queue != PQ_NONE) {
791 		pq = &vm_page_queues[queue];
792 		TAILQ_REMOVE(&pq->pl, m, pageq);
793 
794 		/*
795 		 * Adjust our pcpu stats.  In order for the nominal low-memory
796 		 * algorithms to work properly we don't let any pcpu stat get
797 		 * too negative before we force it to be rolled-up into the
798 		 * global stats.  Otherwise our pageout and vm_wait tests
799 		 * will fail badly.
800 		 *
801 		 * The idea here is to reduce unnecessary SMP cache
802 		 * mastership changes in the global vmstats, which can be
803 		 * particularly bad in multi-socket systems.
804 		 */
805 		cnt = (long *)((char *)&mycpu->gd_vmstats_adj + pq->cnt_offset);
806 		atomic_add_long(cnt, -1);
807 		if (*cnt < -VMMETER_SLOP_COUNT) {
808 			u_long copy = atomic_swap_long(cnt, 0);
809 			cnt = (long *)((char *)&vmstats + pq->cnt_offset);
810 			atomic_add_long(cnt, copy);
811 			cnt = (long *)((char *)&mycpu->gd_vmstats +
812 				      pq->cnt_offset);
813 			atomic_add_long(cnt, copy);
814 		}
815 		pq->lcnt--;
816 		m->queue = PQ_NONE;
817 		oqueue = queue;
818 		queue -= m->pc;
819 		vm_page_queues_spin_unlock(oqueue);	/* intended */
820 	}
821 	return queue;
822 }
823 
824 /*
825  * Helper function places the vm_page on the specified queue.  Generally
826  * speaking only PQ_FREE pages are placed at the head, to allow them to
827  * be allocated sooner rather than later on the assumption that they
828  * are cache-hot.
829  *
830  * The vm_page must be spinlocked.
831  * This function will return with both the page and the queue locked.
832  */
833 static __inline void
834 _vm_page_add_queue_spinlocked(vm_page_t m, u_short queue, int athead)
835 {
836 	struct vpgqueues *pq;
837 	u_long *cnt;
838 
839 	KKASSERT(m->queue == PQ_NONE);
840 
841 	if (queue != PQ_NONE) {
842 		vm_page_queues_spin_lock(queue);
843 		pq = &vm_page_queues[queue];
844 		++pq->lcnt;
845 
846 		/*
847 		 * Adjust our pcpu stats.  If a system entity really needs
848 		 * to incorporate the count it will call vmstats_rollup()
849 		 * to roll it all up into the global vmstats strufture.
850 		 */
851 		cnt = (long *)((char *)&mycpu->gd_vmstats_adj + pq->cnt_offset);
852 		atomic_add_long(cnt, 1);
853 
854 		/*
855 		 * PQ_FREE is always handled LIFO style to try to provide
856 		 * cache-hot pages to programs.
857 		 */
858 		m->queue = queue;
859 		if (queue - m->pc == PQ_FREE) {
860 			TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
861 		} else if (athead) {
862 			TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
863 		} else {
864 			TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
865 		}
866 		/* leave the queue spinlocked */
867 	}
868 }
869 
870 /*
871  * Wait until page is no longer BUSY.  If also_m_busy is TRUE we wait
872  * until the page is no longer BUSY or SBUSY (busy_count field is 0).
873  *
874  * Returns TRUE if it had to sleep, FALSE if we did not.  Only one sleep
875  * call will be made before returning.
876  *
877  * This function does NOT busy the page and on return the page is not
878  * guaranteed to be available.
879  */
880 void
881 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
882 {
883 	u_int32_t busy_count;
884 
885 	for (;;) {
886 		busy_count = m->busy_count;
887 		cpu_ccfence();
888 
889 		if ((busy_count & PBUSY_LOCKED) == 0 &&
890 		    (also_m_busy == 0 || (busy_count & PBUSY_MASK) == 0)) {
891 			break;
892 		}
893 		tsleep_interlock(m, 0);
894 		if (atomic_cmpset_int(&m->busy_count, busy_count,
895 				      busy_count | PBUSY_WANTED)) {
896 			atomic_set_int(&m->flags, PG_REFERENCED);
897 			tsleep(m, PINTERLOCKED, msg, 0);
898 			break;
899 		}
900 	}
901 }
902 
903 /*
904  * This calculates and returns a page color given an optional VM object and
905  * either a pindex or an iterator.  We attempt to return a cpu-localized
906  * pg_color that is still roughly 16-way set-associative.  The CPU topology
907  * is used if it was probed.
908  *
909  * The caller may use the returned value to index into e.g. PQ_FREE when
910  * allocating a page in order to nominally obtain pages that are hopefully
911  * already localized to the requesting cpu.  This function is not able to
912  * provide any sort of guarantee of this, but does its best to improve
913  * hardware cache management performance.
914  *
915  * WARNING! The caller must mask the returned value with PQ_L2_MASK.
916  */
917 u_short
918 vm_get_pg_color(int cpuid, vm_object_t object, vm_pindex_t pindex)
919 {
920 	u_short pg_color;
921 	int phys_id;
922 	int core_id;
923 	int object_pg_color;
924 
925 	phys_id = get_cpu_phys_id(cpuid);
926 	core_id = get_cpu_core_id(cpuid);
927 	object_pg_color = object ? object->pg_color : 0;
928 
929 	if (cpu_topology_phys_ids && cpu_topology_core_ids) {
930 		int grpsize;
931 
932 		/*
933 		 * Break us down by socket and cpu
934 		 */
935 		pg_color = phys_id * PQ_L2_SIZE / cpu_topology_phys_ids;
936 		pg_color += core_id * PQ_L2_SIZE /
937 			    (cpu_topology_core_ids * cpu_topology_phys_ids);
938 
939 		/*
940 		 * Calculate remaining component for object/queue color
941 		 */
942 		grpsize = PQ_L2_SIZE / (cpu_topology_core_ids *
943 					cpu_topology_phys_ids);
944 		if (grpsize >= 8) {
945 			pg_color += (pindex + object_pg_color) % grpsize;
946 		} else {
947 			if (grpsize <= 2) {
948 				grpsize = 8;
949 			} else {
950 				/* 3->9, 4->8, 5->10, 6->12, 7->14 */
951 				grpsize += grpsize;
952 				if (grpsize < 8)
953 					grpsize += grpsize;
954 			}
955 			pg_color += (pindex + object_pg_color) % grpsize;
956 		}
957 	} else {
958 		/*
959 		 * Unknown topology, distribute things evenly.
960 		 */
961 		pg_color = cpuid * PQ_L2_SIZE / ncpus;
962 		pg_color += pindex + object_pg_color;
963 	}
964 	return (pg_color & PQ_L2_MASK);
965 }
966 
967 /*
968  * Wait until BUSY can be set, then set it.  If also_m_busy is TRUE we
969  * also wait for m->busy_count to become 0 before setting PBUSY_LOCKED.
970  */
971 void
972 VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m,
973 				     int also_m_busy, const char *msg
974 				     VM_PAGE_DEBUG_ARGS)
975 {
976 	u_int32_t busy_count;
977 
978 	for (;;) {
979 		busy_count = m->busy_count;
980 		cpu_ccfence();
981 		if (busy_count & PBUSY_LOCKED) {
982 			tsleep_interlock(m, 0);
983 			if (atomic_cmpset_int(&m->busy_count, busy_count,
984 					  busy_count | PBUSY_WANTED)) {
985 				atomic_set_int(&m->flags, PG_REFERENCED);
986 				tsleep(m, PINTERLOCKED, msg, 0);
987 			}
988 		} else if (also_m_busy && busy_count) {
989 			tsleep_interlock(m, 0);
990 			if (atomic_cmpset_int(&m->busy_count, busy_count,
991 					  busy_count | PBUSY_WANTED)) {
992 				atomic_set_int(&m->flags, PG_REFERENCED);
993 				tsleep(m, PINTERLOCKED, msg, 0);
994 			}
995 		} else {
996 			if (atomic_cmpset_int(&m->busy_count, busy_count,
997 					      busy_count | PBUSY_LOCKED)) {
998 #ifdef VM_PAGE_DEBUG
999 				m->busy_func = func;
1000 				m->busy_line = lineno;
1001 #endif
1002 				break;
1003 			}
1004 		}
1005 	}
1006 }
1007 
1008 /*
1009  * Attempt to set BUSY.  If also_m_busy is TRUE we only succeed if
1010  * m->busy_count is also 0.
1011  *
1012  * Returns non-zero on failure.
1013  */
1014 int
1015 VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m, int also_m_busy
1016 				    VM_PAGE_DEBUG_ARGS)
1017 {
1018 	u_int32_t busy_count;
1019 
1020 	for (;;) {
1021 		busy_count = m->busy_count;
1022 		cpu_ccfence();
1023 		if (busy_count & PBUSY_LOCKED)
1024 			return TRUE;
1025 		if (also_m_busy && (busy_count & PBUSY_MASK) != 0)
1026 			return TRUE;
1027 		if (atomic_cmpset_int(&m->busy_count, busy_count,
1028 				      busy_count | PBUSY_LOCKED)) {
1029 #ifdef VM_PAGE_DEBUG
1030 				m->busy_func = func;
1031 				m->busy_line = lineno;
1032 #endif
1033 			return FALSE;
1034 		}
1035 	}
1036 }
1037 
1038 /*
1039  * Clear the BUSY flag and return non-zero to indicate to the caller
1040  * that a wakeup() should be performed.
1041  *
1042  * The vm_page must be spinlocked and will remain spinlocked on return.
1043  * The related queue must NOT be spinlocked (which could deadlock us).
1044  *
1045  * (inline version)
1046  */
1047 static __inline
1048 int
1049 _vm_page_wakeup(vm_page_t m)
1050 {
1051 	u_int32_t busy_count;
1052 
1053 	for (;;) {
1054 		busy_count = m->busy_count;
1055 		cpu_ccfence();
1056 		if (atomic_cmpset_int(&m->busy_count, busy_count,
1057 				      busy_count &
1058 				      ~(PBUSY_LOCKED | PBUSY_WANTED))) {
1059 			break;
1060 		}
1061 	}
1062 	return((int)(busy_count & PBUSY_WANTED));
1063 }
1064 
1065 /*
1066  * Clear the BUSY flag and wakeup anyone waiting for the page.  This
1067  * is typically the last call you make on a page before moving onto
1068  * other things.
1069  */
1070 void
1071 vm_page_wakeup(vm_page_t m)
1072 {
1073         KASSERT(m->busy_count & PBUSY_LOCKED,
1074 		("vm_page_wakeup: page not busy!!!"));
1075 	vm_page_spin_lock(m);
1076 	if (_vm_page_wakeup(m)) {
1077 		vm_page_spin_unlock(m);
1078 		wakeup(m);
1079 	} else {
1080 		vm_page_spin_unlock(m);
1081 	}
1082 }
1083 
1084 /*
1085  * Holding a page keeps it from being reused.  Other parts of the system
1086  * can still disassociate the page from its current object and free it, or
1087  * perform read or write I/O on it and/or otherwise manipulate the page,
1088  * but if the page is held the VM system will leave the page and its data
1089  * intact and not reuse the page for other purposes until the last hold
1090  * reference is released.  (see vm_page_wire() if you want to prevent the
1091  * page from being disassociated from its object too).
1092  *
1093  * The caller must still validate the contents of the page and, if necessary,
1094  * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete
1095  * before manipulating the page.
1096  *
1097  * XXX get vm_page_spin_lock() here and move FREE->HOLD if necessary
1098  */
1099 void
1100 vm_page_hold(vm_page_t m)
1101 {
1102 	vm_page_spin_lock(m);
1103 	atomic_add_int(&m->hold_count, 1);
1104 	if (m->queue - m->pc == PQ_FREE) {
1105 		_vm_page_queue_spin_lock(m);
1106 		_vm_page_rem_queue_spinlocked(m);
1107 		_vm_page_add_queue_spinlocked(m, PQ_HOLD + m->pc, 0);
1108 		_vm_page_queue_spin_unlock(m);
1109 	}
1110 	vm_page_spin_unlock(m);
1111 }
1112 
1113 /*
1114  * The opposite of vm_page_hold().  If the page is on the HOLD queue
1115  * it was freed while held and must be moved back to the FREE queue.
1116  */
1117 void
1118 vm_page_unhold(vm_page_t m)
1119 {
1120 	KASSERT(m->hold_count > 0 && m->queue - m->pc != PQ_FREE,
1121 		("vm_page_unhold: pg %p illegal hold_count (%d) or on FREE queue (%d)",
1122 		 m, m->hold_count, m->queue - m->pc));
1123 	vm_page_spin_lock(m);
1124 	atomic_add_int(&m->hold_count, -1);
1125 	if (m->hold_count == 0 && m->queue - m->pc == PQ_HOLD) {
1126 		_vm_page_queue_spin_lock(m);
1127 		_vm_page_rem_queue_spinlocked(m);
1128 		_vm_page_add_queue_spinlocked(m, PQ_FREE + m->pc, 1);
1129 		_vm_page_queue_spin_unlock(m);
1130 	}
1131 	vm_page_spin_unlock(m);
1132 }
1133 
1134 /*
1135  *	vm_page_getfake:
1136  *
1137  *	Create a fictitious page with the specified physical address and
1138  *	memory attribute.  The memory attribute is the only the machine-
1139  *	dependent aspect of a fictitious page that must be initialized.
1140  */
1141 
1142 void
1143 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
1144 {
1145 
1146 	if ((m->flags & PG_FICTITIOUS) != 0) {
1147 		/*
1148 		 * The page's memattr might have changed since the
1149 		 * previous initialization.  Update the pmap to the
1150 		 * new memattr.
1151 		 */
1152 		goto memattr;
1153 	}
1154 	m->phys_addr = paddr;
1155 	m->queue = PQ_NONE;
1156 	/* Fictitious pages don't use "segind". */
1157 	/* Fictitious pages don't use "order" or "pool". */
1158 	m->flags = PG_FICTITIOUS | PG_UNMANAGED;
1159 	m->busy_count = PBUSY_LOCKED;
1160 	m->wire_count = 1;
1161 	spin_init(&m->spin, "fake_page");
1162 	pmap_page_init(m);
1163 memattr:
1164 	pmap_page_set_memattr(m, memattr);
1165 }
1166 
1167 /*
1168  * Inserts the given vm_page into the object and object list.
1169  *
1170  * The pagetables are not updated but will presumably fault the page
1171  * in if necessary, or if a kernel page the caller will at some point
1172  * enter the page into the kernel's pmap.  We are not allowed to block
1173  * here so we *can't* do this anyway.
1174  *
1175  * This routine may not block.
1176  * This routine must be called with the vm_object held.
1177  * This routine must be called with a critical section held.
1178  *
1179  * This routine returns TRUE if the page was inserted into the object
1180  * successfully, and FALSE if the page already exists in the object.
1181  */
1182 int
1183 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
1184 {
1185 	ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(object));
1186 	if (m->object != NULL)
1187 		panic("vm_page_insert: already inserted");
1188 
1189 	atomic_add_int(&object->generation, 1);
1190 
1191 	/*
1192 	 * Record the object/offset pair in this page and add the
1193 	 * pv_list_count of the page to the object.
1194 	 *
1195 	 * The vm_page spin lock is required for interactions with the pmap.
1196 	 */
1197 	vm_page_spin_lock(m);
1198 	m->object = object;
1199 	m->pindex = pindex;
1200 	if (vm_page_rb_tree_RB_INSERT(&object->rb_memq, m)) {
1201 		m->object = NULL;
1202 		m->pindex = 0;
1203 		vm_page_spin_unlock(m);
1204 		return FALSE;
1205 	}
1206 	++object->resident_page_count;
1207 	++mycpu->gd_vmtotal.t_rm;
1208 	vm_page_spin_unlock(m);
1209 
1210 	/*
1211 	 * Since we are inserting a new and possibly dirty page,
1212 	 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
1213 	 */
1214 	if ((m->valid & m->dirty) ||
1215 	    (m->flags & (PG_WRITEABLE | PG_NEED_COMMIT)))
1216 		vm_object_set_writeable_dirty(object);
1217 
1218 	/*
1219 	 * Checks for a swap assignment and sets PG_SWAPPED if appropriate.
1220 	 */
1221 	swap_pager_page_inserted(m);
1222 	return TRUE;
1223 }
1224 
1225 /*
1226  * Removes the given vm_page_t from the (object,index) table
1227  *
1228  * The underlying pmap entry (if any) is NOT removed here.
1229  * This routine may not block.
1230  *
1231  * The page must be BUSY and will remain BUSY on return.
1232  * No other requirements.
1233  *
1234  * NOTE: FreeBSD side effect was to unbusy the page on return.  We leave
1235  *	 it busy.
1236  */
1237 void
1238 vm_page_remove(vm_page_t m)
1239 {
1240 	vm_object_t object;
1241 
1242 	if (m->object == NULL) {
1243 		return;
1244 	}
1245 
1246 	if ((m->busy_count & PBUSY_LOCKED) == 0)
1247 		panic("vm_page_remove: page not busy");
1248 
1249 	object = m->object;
1250 
1251 	vm_object_hold(object);
1252 
1253 	/*
1254 	 * Remove the page from the object and update the object.
1255 	 *
1256 	 * The vm_page spin lock is required for interactions with the pmap.
1257 	 */
1258 	vm_page_spin_lock(m);
1259 	vm_page_rb_tree_RB_REMOVE(&object->rb_memq, m);
1260 	--object->resident_page_count;
1261 	--mycpu->gd_vmtotal.t_rm;
1262 	m->object = NULL;
1263 	atomic_add_int(&object->generation, 1);
1264 	vm_page_spin_unlock(m);
1265 
1266 	vm_object_drop(object);
1267 }
1268 
1269 /*
1270  * Locate and return the page at (object, pindex), or NULL if the
1271  * page could not be found.
1272  *
1273  * The caller must hold the vm_object token.
1274  */
1275 vm_page_t
1276 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1277 {
1278 	vm_page_t m;
1279 
1280 	/*
1281 	 * Search the hash table for this object/offset pair
1282 	 */
1283 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1284 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
1285 	KKASSERT(m == NULL || (m->object == object && m->pindex == pindex));
1286 	return(m);
1287 }
1288 
1289 vm_page_t
1290 VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)(struct vm_object *object,
1291 					    vm_pindex_t pindex,
1292 					    int also_m_busy, const char *msg
1293 					    VM_PAGE_DEBUG_ARGS)
1294 {
1295 	u_int32_t busy_count;
1296 	vm_page_t m;
1297 
1298 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1299 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
1300 	while (m) {
1301 		KKASSERT(m->object == object && m->pindex == pindex);
1302 		busy_count = m->busy_count;
1303 		cpu_ccfence();
1304 		if (busy_count & PBUSY_LOCKED) {
1305 			tsleep_interlock(m, 0);
1306 			if (atomic_cmpset_int(&m->busy_count, busy_count,
1307 					  busy_count | PBUSY_WANTED)) {
1308 				atomic_set_int(&m->flags, PG_REFERENCED);
1309 				tsleep(m, PINTERLOCKED, msg, 0);
1310 				m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq,
1311 							      pindex);
1312 			}
1313 		} else if (also_m_busy && busy_count) {
1314 			tsleep_interlock(m, 0);
1315 			if (atomic_cmpset_int(&m->busy_count, busy_count,
1316 					  busy_count | PBUSY_WANTED)) {
1317 				atomic_set_int(&m->flags, PG_REFERENCED);
1318 				tsleep(m, PINTERLOCKED, msg, 0);
1319 				m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq,
1320 							      pindex);
1321 			}
1322 		} else if (atomic_cmpset_int(&m->busy_count, busy_count,
1323 					     busy_count | PBUSY_LOCKED)) {
1324 #ifdef VM_PAGE_DEBUG
1325 			m->busy_func = func;
1326 			m->busy_line = lineno;
1327 #endif
1328 			break;
1329 		}
1330 	}
1331 	return m;
1332 }
1333 
1334 /*
1335  * Attempt to lookup and busy a page.
1336  *
1337  * Returns NULL if the page could not be found
1338  *
1339  * Returns a vm_page and error == TRUE if the page exists but could not
1340  * be busied.
1341  *
1342  * Returns a vm_page and error == FALSE on success.
1343  */
1344 vm_page_t
1345 VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)(struct vm_object *object,
1346 					   vm_pindex_t pindex,
1347 					   int also_m_busy, int *errorp
1348 					   VM_PAGE_DEBUG_ARGS)
1349 {
1350 	u_int32_t busy_count;
1351 	vm_page_t m;
1352 
1353 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1354 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
1355 	*errorp = FALSE;
1356 	while (m) {
1357 		KKASSERT(m->object == object && m->pindex == pindex);
1358 		busy_count = m->busy_count;
1359 		cpu_ccfence();
1360 		if (busy_count & PBUSY_LOCKED) {
1361 			*errorp = TRUE;
1362 			break;
1363 		}
1364 		if (also_m_busy && busy_count) {
1365 			*errorp = TRUE;
1366 			break;
1367 		}
1368 		if (atomic_cmpset_int(&m->busy_count, busy_count,
1369 				      busy_count | PBUSY_LOCKED)) {
1370 #ifdef VM_PAGE_DEBUG
1371 			m->busy_func = func;
1372 			m->busy_line = lineno;
1373 #endif
1374 			break;
1375 		}
1376 	}
1377 	return m;
1378 }
1379 
1380 /*
1381  * Returns a page that is only soft-busied for use by the caller in
1382  * a read-only fashion.  Returns NULL if the page could not be found,
1383  * the soft busy could not be obtained, or the page data is invalid.
1384  */
1385 vm_page_t
1386 vm_page_lookup_sbusy_try(struct vm_object *object, vm_pindex_t pindex,
1387 			 int pgoff, int pgbytes)
1388 {
1389 	vm_page_t m;
1390 
1391 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1392 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
1393 	if (m) {
1394 		if ((m->valid != VM_PAGE_BITS_ALL &&
1395 		     !vm_page_is_valid(m, pgoff, pgbytes)) ||
1396 		    (m->flags & PG_FICTITIOUS)) {
1397 			m = NULL;
1398 		} else if (vm_page_sbusy_try(m)) {
1399 			m = NULL;
1400 		} else if ((m->valid != VM_PAGE_BITS_ALL &&
1401 			    !vm_page_is_valid(m, pgoff, pgbytes)) ||
1402 			   (m->flags & PG_FICTITIOUS)) {
1403 			vm_page_sbusy_drop(m);
1404 			m = NULL;
1405 		}
1406 	}
1407 	return m;
1408 }
1409 
1410 /*
1411  * Caller must hold the related vm_object
1412  */
1413 vm_page_t
1414 vm_page_next(vm_page_t m)
1415 {
1416 	vm_page_t next;
1417 
1418 	next = vm_page_rb_tree_RB_NEXT(m);
1419 	if (next && next->pindex != m->pindex + 1)
1420 		next = NULL;
1421 	return (next);
1422 }
1423 
1424 /*
1425  * vm_page_rename()
1426  *
1427  * Move the given vm_page from its current object to the specified
1428  * target object/offset.  The page must be busy and will remain so
1429  * on return.
1430  *
1431  * new_object must be held.
1432  * This routine might block. XXX ?
1433  *
1434  * NOTE: Swap associated with the page must be invalidated by the move.  We
1435  *       have to do this for several reasons:  (1) we aren't freeing the
1436  *       page, (2) we are dirtying the page, (3) the VM system is probably
1437  *       moving the page from object A to B, and will then later move
1438  *       the backing store from A to B and we can't have a conflict.
1439  *
1440  * NOTE: We *always* dirty the page.  It is necessary both for the
1441  *       fact that we moved it, and because we may be invalidating
1442  *	 swap.  If the page is on the cache, we have to deactivate it
1443  *	 or vm_page_dirty() will panic.  Dirty pages are not allowed
1444  *	 on the cache.
1445  */
1446 void
1447 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1448 {
1449 	KKASSERT(m->busy_count & PBUSY_LOCKED);
1450 	ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(new_object));
1451 	if (m->object) {
1452 		ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(m->object));
1453 		vm_page_remove(m);
1454 	}
1455 	if (vm_page_insert(m, new_object, new_pindex) == FALSE) {
1456 		panic("vm_page_rename: target exists (%p,%"PRIu64")",
1457 		      new_object, new_pindex);
1458 	}
1459 	if (m->queue - m->pc == PQ_CACHE)
1460 		vm_page_deactivate(m);
1461 	vm_page_dirty(m);
1462 }
1463 
1464 /*
1465  * vm_page_unqueue() without any wakeup.  This routine is used when a page
1466  * is to remain BUSYied by the caller.
1467  *
1468  * This routine may not block.
1469  */
1470 void
1471 vm_page_unqueue_nowakeup(vm_page_t m)
1472 {
1473 	vm_page_and_queue_spin_lock(m);
1474 	(void)_vm_page_rem_queue_spinlocked(m);
1475 	vm_page_spin_unlock(m);
1476 }
1477 
1478 /*
1479  * vm_page_unqueue() - Remove a page from its queue, wakeup the pagedemon
1480  * if necessary.
1481  *
1482  * This routine may not block.
1483  */
1484 void
1485 vm_page_unqueue(vm_page_t m)
1486 {
1487 	u_short queue;
1488 
1489 	vm_page_and_queue_spin_lock(m);
1490 	queue = _vm_page_rem_queue_spinlocked(m);
1491 	if (queue == PQ_FREE || queue == PQ_CACHE) {
1492 		vm_page_spin_unlock(m);
1493 		pagedaemon_wakeup();
1494 	} else {
1495 		vm_page_spin_unlock(m);
1496 	}
1497 }
1498 
1499 /*
1500  * vm_page_list_find()
1501  *
1502  * Find a page on the specified queue with color optimization.
1503  *
1504  * The page coloring optimization attempts to locate a page that does
1505  * not overload other nearby pages in the object in the cpu's L1 or L2
1506  * caches.  We need this optimization because cpu caches tend to be
1507  * physical caches, while object spaces tend to be virtual.
1508  *
1509  * The page coloring optimization also, very importantly, tries to localize
1510  * memory to cpus and physical sockets.
1511  *
1512  * On MP systems each PQ_FREE and PQ_CACHE color queue has its own spinlock
1513  * and the algorithm is adjusted to localize allocations on a per-core basis.
1514  * This is done by 'twisting' the colors.
1515  *
1516  * The page is returned spinlocked and removed from its queue (it will
1517  * be on PQ_NONE), or NULL. The page is not BUSY'd.  The caller
1518  * is responsible for dealing with the busy-page case (usually by
1519  * deactivating the page and looping).
1520  *
1521  * NOTE:  This routine is carefully inlined.  A non-inlined version
1522  *	  is available for outside callers but the only critical path is
1523  *	  from within this source file.
1524  *
1525  * NOTE:  This routine assumes that the vm_pages found in PQ_CACHE and PQ_FREE
1526  *	  represent stable storage, allowing us to order our locks vm_page
1527  *	  first, then queue.
1528  */
1529 static __inline
1530 vm_page_t
1531 _vm_page_list_find(int basequeue, int index)
1532 {
1533 	vm_page_t m;
1534 
1535 	for (;;) {
1536 		m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
1537 		if (m == NULL) {
1538 			m = _vm_page_list_find2(basequeue, index);
1539 			return(m);
1540 		}
1541 		vm_page_and_queue_spin_lock(m);
1542 		if (m->queue == basequeue + index) {
1543 			_vm_page_rem_queue_spinlocked(m);
1544 			/* vm_page_t spin held, no queue spin */
1545 			break;
1546 		}
1547 		vm_page_and_queue_spin_unlock(m);
1548 	}
1549 	return(m);
1550 }
1551 
1552 /*
1553  * If we could not find the page in the desired queue try to find it in
1554  * a nearby queue.
1555  */
1556 static vm_page_t
1557 _vm_page_list_find2(int basequeue, int index)
1558 {
1559 	struct vpgqueues *pq;
1560 	vm_page_t m = NULL;
1561 	int pqmask = PQ_SET_ASSOC_MASK >> 1;
1562 	int pqi;
1563 	int i;
1564 
1565 	index &= PQ_L2_MASK;
1566 	pq = &vm_page_queues[basequeue];
1567 
1568 	/*
1569 	 * Run local sets of 16, 32, 64, 128, and the whole queue if all
1570 	 * else fails (PQ_L2_MASK which is 255).
1571 	 */
1572 	do {
1573 		pqmask = (pqmask << 1) | 1;
1574 		for (i = 0; i <= pqmask; ++i) {
1575 			pqi = (index & ~pqmask) | ((index + i) & pqmask);
1576 			m = TAILQ_FIRST(&pq[pqi].pl);
1577 			if (m) {
1578 				_vm_page_and_queue_spin_lock(m);
1579 				if (m->queue == basequeue + pqi) {
1580 					_vm_page_rem_queue_spinlocked(m);
1581 					return(m);
1582 				}
1583 				_vm_page_and_queue_spin_unlock(m);
1584 				--i;
1585 				continue;
1586 			}
1587 		}
1588 	} while (pqmask != PQ_L2_MASK);
1589 
1590 	return(m);
1591 }
1592 
1593 /*
1594  * Returns a vm_page candidate for allocation.  The page is not busied so
1595  * it can move around.  The caller must busy the page (and typically
1596  * deactivate it if it cannot be busied!)
1597  *
1598  * Returns a spinlocked vm_page that has been removed from its queue.
1599  */
1600 vm_page_t
1601 vm_page_list_find(int basequeue, int index)
1602 {
1603 	return(_vm_page_list_find(basequeue, index));
1604 }
1605 
1606 /*
1607  * Find a page on the cache queue with color optimization, remove it
1608  * from the queue, and busy it.  The returned page will not be spinlocked.
1609  *
1610  * A candidate failure will be deactivated.  Candidates can fail due to
1611  * being busied by someone else, in which case they will be deactivated.
1612  *
1613  * This routine may not block.
1614  *
1615  */
1616 static vm_page_t
1617 vm_page_select_cache(u_short pg_color)
1618 {
1619 	vm_page_t m;
1620 
1621 	for (;;) {
1622 		m = _vm_page_list_find(PQ_CACHE, pg_color & PQ_L2_MASK);
1623 		if (m == NULL)
1624 			break;
1625 		/*
1626 		 * (m) has been removed from its queue and spinlocked
1627 		 */
1628 		if (vm_page_busy_try(m, TRUE)) {
1629 			_vm_page_deactivate_locked(m, 0);
1630 			vm_page_spin_unlock(m);
1631 		} else {
1632 			/*
1633 			 * We successfully busied the page
1634 			 */
1635 			if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) == 0 &&
1636 			    m->hold_count == 0 &&
1637 			    m->wire_count == 0 &&
1638 			    (m->dirty & m->valid) == 0) {
1639 				vm_page_spin_unlock(m);
1640 				pagedaemon_wakeup();
1641 				return(m);
1642 			}
1643 
1644 			/*
1645 			 * The page cannot be recycled, deactivate it.
1646 			 */
1647 			_vm_page_deactivate_locked(m, 0);
1648 			if (_vm_page_wakeup(m)) {
1649 				vm_page_spin_unlock(m);
1650 				wakeup(m);
1651 			} else {
1652 				vm_page_spin_unlock(m);
1653 			}
1654 		}
1655 	}
1656 	return (m);
1657 }
1658 
1659 /*
1660  * Find a free page.  We attempt to inline the nominal case and fall back
1661  * to _vm_page_select_free() otherwise.  A busied page is removed from
1662  * the queue and returned.
1663  *
1664  * This routine may not block.
1665  */
1666 static __inline vm_page_t
1667 vm_page_select_free(u_short pg_color)
1668 {
1669 	vm_page_t m;
1670 
1671 	for (;;) {
1672 		m = _vm_page_list_find(PQ_FREE, pg_color & PQ_L2_MASK);
1673 		if (m == NULL)
1674 			break;
1675 		if (vm_page_busy_try(m, TRUE)) {
1676 			/*
1677 			 * Various mechanisms such as a pmap_collect can
1678 			 * result in a busy page on the free queue.  We
1679 			 * have to move the page out of the way so we can
1680 			 * retry the allocation.  If the other thread is not
1681 			 * allocating the page then m->valid will remain 0 and
1682 			 * the pageout daemon will free the page later on.
1683 			 *
1684 			 * Since we could not busy the page, however, we
1685 			 * cannot make assumptions as to whether the page
1686 			 * will be allocated by the other thread or not,
1687 			 * so all we can do is deactivate it to move it out
1688 			 * of the way.  In particular, if the other thread
1689 			 * wires the page it may wind up on the inactive
1690 			 * queue and the pageout daemon will have to deal
1691 			 * with that case too.
1692 			 */
1693 			_vm_page_deactivate_locked(m, 0);
1694 			vm_page_spin_unlock(m);
1695 		} else {
1696 			/*
1697 			 * Theoretically if we are able to busy the page
1698 			 * atomic with the queue removal (using the vm_page
1699 			 * lock) nobody else should be able to mess with the
1700 			 * page before us.
1701 			 */
1702 			KKASSERT((m->flags & (PG_UNMANAGED |
1703 					      PG_NEED_COMMIT)) == 0);
1704 			KASSERT(m->hold_count == 0, ("m->hold_count is not zero "
1705 						     "pg %p q=%d flags=%08x hold=%d wire=%d",
1706 						     m, m->queue, m->flags, m->hold_count, m->wire_count));
1707 			KKASSERT(m->wire_count == 0);
1708 			vm_page_spin_unlock(m);
1709 			pagedaemon_wakeup();
1710 
1711 			/* return busied and removed page */
1712 			return(m);
1713 		}
1714 	}
1715 	return(m);
1716 }
1717 
1718 /*
1719  * vm_page_alloc()
1720  *
1721  * Allocate and return a memory cell associated with this VM object/offset
1722  * pair.  If object is NULL an unassociated page will be allocated.
1723  *
1724  * The returned page will be busied and removed from its queues.  This
1725  * routine can block and may return NULL if a race occurs and the page
1726  * is found to already exist at the specified (object, pindex).
1727  *
1728  *	VM_ALLOC_NORMAL		allow use of cache pages, nominal free drain
1729  *	VM_ALLOC_QUICK		like normal but cannot use cache
1730  *	VM_ALLOC_SYSTEM		greater free drain
1731  *	VM_ALLOC_INTERRUPT	allow free list to be completely drained
1732  *	VM_ALLOC_ZERO		advisory request for pre-zero'd page only
1733  *	VM_ALLOC_FORCE_ZERO	advisory request for pre-zero'd page only
1734  *	VM_ALLOC_NULL_OK	ok to return NULL on insertion collision
1735  *				(see vm_page_grab())
1736  *	VM_ALLOC_USE_GD		ok to use per-gd cache
1737  *
1738  *	VM_ALLOC_CPU(n)		allocate using specified cpu localization
1739  *
1740  * The object must be held if not NULL
1741  * This routine may not block
1742  *
1743  * Additional special handling is required when called from an interrupt
1744  * (VM_ALLOC_INTERRUPT).  We are not allowed to mess with the page cache
1745  * in this case.
1746  */
1747 vm_page_t
1748 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
1749 {
1750 	globaldata_t gd;
1751 	vm_object_t obj;
1752 	vm_page_t m;
1753 	u_short pg_color;
1754 	int cpuid_local;
1755 
1756 #if 0
1757 	/*
1758 	 * Special per-cpu free VM page cache.  The pages are pre-busied
1759 	 * and pre-zerod for us.
1760 	 */
1761 	if (gd->gd_vmpg_count && (page_req & VM_ALLOC_USE_GD)) {
1762 		crit_enter_gd(gd);
1763 		if (gd->gd_vmpg_count) {
1764 			m = gd->gd_vmpg_array[--gd->gd_vmpg_count];
1765 			crit_exit_gd(gd);
1766 			goto done;
1767                 }
1768 		crit_exit_gd(gd);
1769         }
1770 #endif
1771 	m = NULL;
1772 
1773 	/*
1774 	 * CPU LOCALIZATION
1775 	 *
1776 	 * CPU localization algorithm.  Break the page queues up by physical
1777 	 * id and core id (note that two cpu threads will have the same core
1778 	 * id, and core_id != gd_cpuid).
1779 	 *
1780 	 * This is nowhere near perfect, for example the last pindex in a
1781 	 * subgroup will overflow into the next cpu or package.  But this
1782 	 * should get us good page reuse locality in heavy mixed loads.
1783 	 *
1784 	 * (may be executed before the APs are started, so other GDs might
1785 	 *  not exist!)
1786 	 */
1787 	if (page_req & VM_ALLOC_CPU_SPEC)
1788 		cpuid_local = VM_ALLOC_GETCPU(page_req);
1789 	else
1790 		cpuid_local = mycpu->gd_cpuid;
1791 
1792 	pg_color = vm_get_pg_color(cpuid_local, object, pindex);
1793 
1794 	KKASSERT(page_req &
1795 		(VM_ALLOC_NORMAL|VM_ALLOC_QUICK|
1796 		 VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
1797 
1798 	/*
1799 	 * Certain system threads (pageout daemon, buf_daemon's) are
1800 	 * allowed to eat deeper into the free page list.
1801 	 */
1802 	if (curthread->td_flags & TDF_SYSTHREAD)
1803 		page_req |= VM_ALLOC_SYSTEM;
1804 
1805 	/*
1806 	 * Impose various limitations.  Note that the v_free_reserved test
1807 	 * must match the opposite of vm_page_count_target() to avoid
1808 	 * livelocks, be careful.
1809 	 */
1810 loop:
1811 	gd = mycpu;
1812 	if (gd->gd_vmstats.v_free_count >= gd->gd_vmstats.v_free_reserved ||
1813 	    ((page_req & VM_ALLOC_INTERRUPT) &&
1814 	     gd->gd_vmstats.v_free_count > 0) ||
1815 	    ((page_req & VM_ALLOC_SYSTEM) &&
1816 	     gd->gd_vmstats.v_cache_count == 0 &&
1817 		gd->gd_vmstats.v_free_count >
1818 		gd->gd_vmstats.v_interrupt_free_min)
1819 	) {
1820 		/*
1821 		 * The free queue has sufficient free pages to take one out.
1822 		 */
1823 		m = vm_page_select_free(pg_color);
1824 	} else if (page_req & VM_ALLOC_NORMAL) {
1825 		/*
1826 		 * Allocatable from the cache (non-interrupt only).  On
1827 		 * success, we must free the page and try again, thus
1828 		 * ensuring that vmstats.v_*_free_min counters are replenished.
1829 		 */
1830 #ifdef INVARIANTS
1831 		if (curthread->td_preempted) {
1832 			kprintf("vm_page_alloc(): warning, attempt to allocate"
1833 				" cache page from preempting interrupt\n");
1834 			m = NULL;
1835 		} else {
1836 			m = vm_page_select_cache(pg_color);
1837 		}
1838 #else
1839 		m = vm_page_select_cache(pg_color);
1840 #endif
1841 		/*
1842 		 * On success move the page into the free queue and loop.
1843 		 *
1844 		 * Only do this if we can safely acquire the vm_object lock,
1845 		 * because this is effectively a random page and the caller
1846 		 * might be holding the lock shared, we don't want to
1847 		 * deadlock.
1848 		 */
1849 		if (m != NULL) {
1850 			KASSERT(m->dirty == 0,
1851 				("Found dirty cache page %p", m));
1852 			if ((obj = m->object) != NULL) {
1853 				if (vm_object_hold_try(obj)) {
1854 					vm_page_protect(m, VM_PROT_NONE);
1855 					vm_page_free(m);
1856 					/* m->object NULL here */
1857 					vm_object_drop(obj);
1858 				} else {
1859 					vm_page_deactivate(m);
1860 					vm_page_wakeup(m);
1861 				}
1862 			} else {
1863 				vm_page_protect(m, VM_PROT_NONE);
1864 				vm_page_free(m);
1865 			}
1866 			goto loop;
1867 		}
1868 
1869 		/*
1870 		 * On failure return NULL
1871 		 */
1872 		atomic_add_int(&vm_pageout_deficit, 1);
1873 		pagedaemon_wakeup();
1874 		return (NULL);
1875 	} else {
1876 		/*
1877 		 * No pages available, wakeup the pageout daemon and give up.
1878 		 */
1879 		atomic_add_int(&vm_pageout_deficit, 1);
1880 		pagedaemon_wakeup();
1881 		return (NULL);
1882 	}
1883 
1884 	/*
1885 	 * v_free_count can race so loop if we don't find the expected
1886 	 * page.
1887 	 */
1888 	if (m == NULL) {
1889 		vmstats_rollup();
1890 		goto loop;
1891 	}
1892 
1893 	/*
1894 	 * Good page found.  The page has already been busied for us and
1895 	 * removed from its queues.
1896 	 */
1897 	KASSERT(m->dirty == 0,
1898 		("vm_page_alloc: free/cache page %p was dirty", m));
1899 	KKASSERT(m->queue == PQ_NONE);
1900 
1901 #if 0
1902 done:
1903 #endif
1904 	/*
1905 	 * Initialize the structure, inheriting some flags but clearing
1906 	 * all the rest.  The page has already been busied for us.
1907 	 */
1908 	vm_page_flag_clear(m, ~PG_KEEP_NEWPAGE_MASK);
1909 
1910 	KKASSERT(m->wire_count == 0);
1911 	KKASSERT((m->busy_count & PBUSY_MASK) == 0);
1912 	m->act_count = 0;
1913 	m->valid = 0;
1914 
1915 	/*
1916 	 * Caller must be holding the object lock (asserted by
1917 	 * vm_page_insert()).
1918 	 *
1919 	 * NOTE: Inserting a page here does not insert it into any pmaps
1920 	 *	 (which could cause us to block allocating memory).
1921 	 *
1922 	 * NOTE: If no object an unassociated page is allocated, m->pindex
1923 	 *	 can be used by the caller for any purpose.
1924 	 */
1925 	if (object) {
1926 		if (vm_page_insert(m, object, pindex) == FALSE) {
1927 			vm_page_free(m);
1928 			if ((page_req & VM_ALLOC_NULL_OK) == 0)
1929 				panic("PAGE RACE %p[%ld]/%p",
1930 				      object, (long)pindex, m);
1931 			m = NULL;
1932 		}
1933 	} else {
1934 		m->pindex = pindex;
1935 	}
1936 
1937 	/*
1938 	 * Don't wakeup too often - wakeup the pageout daemon when
1939 	 * we would be nearly out of memory.
1940 	 */
1941 	pagedaemon_wakeup();
1942 
1943 	/*
1944 	 * A BUSY page is returned.
1945 	 */
1946 	return (m);
1947 }
1948 
1949 /*
1950  * Returns number of pages available in our DMA memory reserve
1951  * (adjusted with vm.dma_reserved=<value>m in /boot/loader.conf)
1952  */
1953 vm_size_t
1954 vm_contig_avail_pages(void)
1955 {
1956 	alist_blk_t blk;
1957 	alist_blk_t count;
1958 	alist_blk_t bfree;
1959 	spin_lock(&vm_contig_spin);
1960 	bfree = alist_free_info(&vm_contig_alist, &blk, &count);
1961 	spin_unlock(&vm_contig_spin);
1962 
1963 	return bfree;
1964 }
1965 
1966 /*
1967  * Attempt to allocate contiguous physical memory with the specified
1968  * requirements.
1969  */
1970 vm_page_t
1971 vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high,
1972 		     unsigned long alignment, unsigned long boundary,
1973 		     unsigned long size, vm_memattr_t memattr)
1974 {
1975 	alist_blk_t blk;
1976 	vm_page_t m;
1977 	vm_pindex_t i;
1978 #if 0
1979 	static vm_pindex_t contig_rover;
1980 #endif
1981 
1982 	alignment >>= PAGE_SHIFT;
1983 	if (alignment == 0)
1984 		alignment = 1;
1985 	boundary >>= PAGE_SHIFT;
1986 	if (boundary == 0)
1987 		boundary = 1;
1988 	size = (size + PAGE_MASK) >> PAGE_SHIFT;
1989 
1990 #if 0
1991 	/*
1992 	 * Disabled temporarily until we find a solution for DRM (a flag
1993 	 * to always use the free space reserve, for performance).
1994 	 */
1995 	if (high == BUS_SPACE_MAXADDR && alignment <= PAGE_SIZE &&
1996 	    boundary <= PAGE_SIZE && size == 1 &&
1997 	    memattr == VM_MEMATTR_DEFAULT) {
1998 		/*
1999 		 * Any page will work, use vm_page_alloc()
2000 		 * (e.g. when used from kmem_alloc_attr())
2001 		 */
2002 		m = vm_page_alloc(NULL, (contig_rover++) & 0x7FFFFFFF,
2003 				  VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM |
2004 				  VM_ALLOC_INTERRUPT);
2005 		m->valid = VM_PAGE_BITS_ALL;
2006 		vm_page_wire(m);
2007 		vm_page_wakeup(m);
2008 	} else
2009 #endif
2010 	{
2011 		/*
2012 		 * Use the low-memory dma reserve
2013 		 */
2014 		spin_lock(&vm_contig_spin);
2015 		blk = alist_alloc(&vm_contig_alist, 0, size);
2016 		if (blk == ALIST_BLOCK_NONE) {
2017 			spin_unlock(&vm_contig_spin);
2018 			if (bootverbose) {
2019 				kprintf("vm_page_alloc_contig: %ldk nospace\n",
2020 					(size << PAGE_SHIFT) / 1024);
2021 				print_backtrace(5);
2022 			}
2023 			return(NULL);
2024 		}
2025 		if (high && ((vm_paddr_t)(blk + size) << PAGE_SHIFT) > high) {
2026 			alist_free(&vm_contig_alist, blk, size);
2027 			spin_unlock(&vm_contig_spin);
2028 			if (bootverbose) {
2029 				kprintf("vm_page_alloc_contig: %ldk high "
2030 					"%016jx failed\n",
2031 					(size << PAGE_SHIFT) / 1024,
2032 					(intmax_t)high);
2033 			}
2034 			return(NULL);
2035 		}
2036 		spin_unlock(&vm_contig_spin);
2037 		m = PHYS_TO_VM_PAGE((vm_paddr_t)blk << PAGE_SHIFT);
2038 	}
2039 	if (vm_contig_verbose) {
2040 		kprintf("vm_page_alloc_contig: %016jx/%ldk "
2041 			"(%016jx-%016jx al=%lu bo=%lu pgs=%lu attr=%d\n",
2042 			(intmax_t)m->phys_addr,
2043 			(size << PAGE_SHIFT) / 1024,
2044 			low, high, alignment, boundary, size, memattr);
2045 	}
2046 	if (memattr != VM_MEMATTR_DEFAULT) {
2047 		for (i = 0;i < size; i++)
2048 			pmap_page_set_memattr(&m[i], memattr);
2049 	}
2050 	return m;
2051 }
2052 
2053 /*
2054  * Free contiguously allocated pages.  The pages will be wired but not busy.
2055  * When freeing to the alist we leave them wired and not busy.
2056  */
2057 void
2058 vm_page_free_contig(vm_page_t m, unsigned long size)
2059 {
2060 	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
2061 	vm_pindex_t start = pa >> PAGE_SHIFT;
2062 	vm_pindex_t pages = (size + PAGE_MASK) >> PAGE_SHIFT;
2063 
2064 	if (vm_contig_verbose) {
2065 		kprintf("vm_page_free_contig:  %016jx/%ldk\n",
2066 			(intmax_t)pa, size / 1024);
2067 	}
2068 	if (pa < vm_low_phys_reserved) {
2069 		KKASSERT(pa + size <= vm_low_phys_reserved);
2070 		spin_lock(&vm_contig_spin);
2071 		alist_free(&vm_contig_alist, start, pages);
2072 		spin_unlock(&vm_contig_spin);
2073 	} else {
2074 		while (pages) {
2075 			vm_page_busy_wait(m, FALSE, "cpgfr");
2076 			vm_page_unwire(m, 0);
2077 			vm_page_free(m);
2078 			--pages;
2079 			++m;
2080 		}
2081 
2082 	}
2083 }
2084 
2085 
2086 /*
2087  * Wait for sufficient free memory for nominal heavy memory use kernel
2088  * operations.
2089  *
2090  * WARNING!  Be sure never to call this in any vm_pageout code path, which
2091  *	     will trivially deadlock the system.
2092  */
2093 void
2094 vm_wait_nominal(void)
2095 {
2096 	while (vm_page_count_min(0))
2097 		vm_wait(0);
2098 }
2099 
2100 /*
2101  * Test if vm_wait_nominal() would block.
2102  */
2103 int
2104 vm_test_nominal(void)
2105 {
2106 	if (vm_page_count_min(0))
2107 		return(1);
2108 	return(0);
2109 }
2110 
2111 /*
2112  * Block until free pages are available for allocation, called in various
2113  * places before memory allocations.
2114  *
2115  * The caller may loop if vm_page_count_min() == FALSE so we cannot be
2116  * more generous then that.
2117  */
2118 void
2119 vm_wait(int timo)
2120 {
2121 	/*
2122 	 * never wait forever
2123 	 */
2124 	if (timo == 0)
2125 		timo = hz;
2126 	lwkt_gettoken(&vm_token);
2127 
2128 	if (curthread == pagethread ||
2129 	    curthread == emergpager) {
2130 		/*
2131 		 * The pageout daemon itself needs pages, this is bad.
2132 		 */
2133 		if (vm_page_count_min(0)) {
2134 			vm_pageout_pages_needed = 1;
2135 			tsleep(&vm_pageout_pages_needed, 0, "VMWait", timo);
2136 		}
2137 	} else {
2138 		/*
2139 		 * Wakeup the pageout daemon if necessary and wait.
2140 		 *
2141 		 * Do not wait indefinitely for the target to be reached,
2142 		 * as load might prevent it from being reached any time soon.
2143 		 * But wait a little to try to slow down page allocations
2144 		 * and to give more important threads (the pagedaemon)
2145 		 * allocation priority.
2146 		 */
2147 		if (vm_page_count_target()) {
2148 			if (vm_pages_needed == 0) {
2149 				vm_pages_needed = 1;
2150 				wakeup(&vm_pages_needed);
2151 			}
2152 			++vm_pages_waiting;	/* SMP race ok */
2153 			tsleep(&vmstats.v_free_count, 0, "vmwait", timo);
2154 		}
2155 	}
2156 	lwkt_reltoken(&vm_token);
2157 }
2158 
2159 /*
2160  * Block until free pages are available for allocation
2161  *
2162  * Called only from vm_fault so that processes page faulting can be
2163  * easily tracked.
2164  */
2165 void
2166 vm_wait_pfault(void)
2167 {
2168 	/*
2169 	 * Wakeup the pageout daemon if necessary and wait.
2170 	 *
2171 	 * Do not wait indefinitely for the target to be reached,
2172 	 * as load might prevent it from being reached any time soon.
2173 	 * But wait a little to try to slow down page allocations
2174 	 * and to give more important threads (the pagedaemon)
2175 	 * allocation priority.
2176 	 */
2177 	if (vm_page_count_min(0)) {
2178 		lwkt_gettoken(&vm_token);
2179 		while (vm_page_count_severe()) {
2180 			if (vm_page_count_target()) {
2181 				thread_t td;
2182 
2183 				if (vm_pages_needed == 0) {
2184 					vm_pages_needed = 1;
2185 					wakeup(&vm_pages_needed);
2186 				}
2187 				++vm_pages_waiting;	/* SMP race ok */
2188 				tsleep(&vmstats.v_free_count, 0, "pfault", hz);
2189 
2190 				/*
2191 				 * Do not stay stuck in the loop if the system is trying
2192 				 * to kill the process.
2193 				 */
2194 				td = curthread;
2195 				if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL))
2196 					break;
2197 			}
2198 		}
2199 		lwkt_reltoken(&vm_token);
2200 	}
2201 }
2202 
2203 /*
2204  * Put the specified page on the active list (if appropriate).  Ensure
2205  * that act_count is at least ACT_INIT but do not otherwise mess with it.
2206  *
2207  * The caller should be holding the page busied ? XXX
2208  * This routine may not block.
2209  */
2210 void
2211 vm_page_activate(vm_page_t m)
2212 {
2213 	u_short oqueue;
2214 
2215 	vm_page_spin_lock(m);
2216 	if (m->queue - m->pc != PQ_ACTIVE) {
2217 		_vm_page_queue_spin_lock(m);
2218 		oqueue = _vm_page_rem_queue_spinlocked(m);
2219 		/* page is left spinlocked, queue is unlocked */
2220 
2221 		if (oqueue == PQ_CACHE)
2222 			mycpu->gd_cnt.v_reactivated++;
2223 		if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
2224 			if (m->act_count < ACT_INIT)
2225 				m->act_count = ACT_INIT;
2226 			_vm_page_add_queue_spinlocked(m, PQ_ACTIVE + m->pc, 0);
2227 		}
2228 		_vm_page_and_queue_spin_unlock(m);
2229 		if (oqueue == PQ_CACHE || oqueue == PQ_FREE)
2230 			pagedaemon_wakeup();
2231 	} else {
2232 		if (m->act_count < ACT_INIT)
2233 			m->act_count = ACT_INIT;
2234 		vm_page_spin_unlock(m);
2235 	}
2236 }
2237 
2238 /*
2239  * Helper routine for vm_page_free_toq() and vm_page_cache().  This
2240  * routine is called when a page has been added to the cache or free
2241  * queues.
2242  *
2243  * This routine may not block.
2244  */
2245 static __inline void
2246 vm_page_free_wakeup(void)
2247 {
2248 	globaldata_t gd = mycpu;
2249 
2250 	/*
2251 	 * If the pageout daemon itself needs pages, then tell it that
2252 	 * there are some free.
2253 	 */
2254 	if (vm_pageout_pages_needed &&
2255 	    gd->gd_vmstats.v_cache_count + gd->gd_vmstats.v_free_count >=
2256 	    gd->gd_vmstats.v_pageout_free_min
2257 	) {
2258 		vm_pageout_pages_needed = 0;
2259 		wakeup(&vm_pageout_pages_needed);
2260 	}
2261 
2262 	/*
2263 	 * Wakeup processes that are waiting on memory.
2264 	 *
2265 	 * Generally speaking we want to wakeup stuck processes as soon as
2266 	 * possible.  !vm_page_count_min(0) is the absolute minimum point
2267 	 * where we can do this.  Wait a bit longer to reduce degenerate
2268 	 * re-blocking (vm_page_free_hysteresis).  The target check is just
2269 	 * to make sure the min-check w/hysteresis does not exceed the
2270 	 * normal target.
2271 	 */
2272 	if (vm_pages_waiting) {
2273 		if (!vm_page_count_min(vm_page_free_hysteresis) ||
2274 		    !vm_page_count_target()) {
2275 			vm_pages_waiting = 0;
2276 			wakeup(&vmstats.v_free_count);
2277 			++mycpu->gd_cnt.v_ppwakeups;
2278 		}
2279 #if 0
2280 		if (!vm_page_count_target()) {
2281 			/*
2282 			 * Plenty of pages are free, wakeup everyone.
2283 			 */
2284 			vm_pages_waiting = 0;
2285 			wakeup(&vmstats.v_free_count);
2286 			++mycpu->gd_cnt.v_ppwakeups;
2287 		} else if (!vm_page_count_min(0)) {
2288 			/*
2289 			 * Some pages are free, wakeup someone.
2290 			 */
2291 			int wcount = vm_pages_waiting;
2292 			if (wcount > 0)
2293 				--wcount;
2294 			vm_pages_waiting = wcount;
2295 			wakeup_one(&vmstats.v_free_count);
2296 			++mycpu->gd_cnt.v_ppwakeups;
2297 		}
2298 #endif
2299 	}
2300 }
2301 
2302 /*
2303  * Returns the given page to the PQ_FREE or PQ_HOLD list and disassociates
2304  * it from its VM object.
2305  *
2306  * The vm_page must be BUSY on entry.  BUSY will be released on
2307  * return (the page will have been freed).
2308  */
2309 void
2310 vm_page_free_toq(vm_page_t m)
2311 {
2312 	mycpu->gd_cnt.v_tfree++;
2313 	KKASSERT((m->flags & PG_MAPPED) == 0);
2314 	KKASSERT(m->busy_count & PBUSY_LOCKED);
2315 
2316 	if ((m->busy_count & PBUSY_MASK) || ((m->queue - m->pc) == PQ_FREE)) {
2317 		kprintf("vm_page_free: pindex(%lu), busy %08x, "
2318 			"hold(%d)\n",
2319 			(u_long)m->pindex, m->busy_count, m->hold_count);
2320 		if ((m->queue - m->pc) == PQ_FREE)
2321 			panic("vm_page_free: freeing free page");
2322 		else
2323 			panic("vm_page_free: freeing busy page");
2324 	}
2325 
2326 	/*
2327 	 * Remove from object, spinlock the page and its queues and
2328 	 * remove from any queue.  No queue spinlock will be held
2329 	 * after this section (because the page was removed from any
2330 	 * queue).
2331 	 */
2332 	vm_page_remove(m);
2333 	vm_page_and_queue_spin_lock(m);
2334 	_vm_page_rem_queue_spinlocked(m);
2335 
2336 	/*
2337 	 * No further management of fictitious pages occurs beyond object
2338 	 * and queue removal.
2339 	 */
2340 	if ((m->flags & PG_FICTITIOUS) != 0) {
2341 		vm_page_spin_unlock(m);
2342 		vm_page_wakeup(m);
2343 		return;
2344 	}
2345 
2346 	m->valid = 0;
2347 	vm_page_undirty(m);
2348 
2349 	if (m->wire_count != 0) {
2350 		if (m->wire_count > 1) {
2351 		    panic(
2352 			"vm_page_free: invalid wire count (%d), pindex: 0x%lx",
2353 			m->wire_count, (long)m->pindex);
2354 		}
2355 		panic("vm_page_free: freeing wired page");
2356 	}
2357 
2358 	/*
2359 	 * Clear the UNMANAGED flag when freeing an unmanaged page.
2360 	 * Clear the NEED_COMMIT flag
2361 	 */
2362 	if (m->flags & PG_UNMANAGED)
2363 		vm_page_flag_clear(m, PG_UNMANAGED);
2364 	if (m->flags & PG_NEED_COMMIT)
2365 		vm_page_flag_clear(m, PG_NEED_COMMIT);
2366 
2367 	if (m->hold_count != 0) {
2368 		_vm_page_add_queue_spinlocked(m, PQ_HOLD + m->pc, 0);
2369 	} else {
2370 		_vm_page_add_queue_spinlocked(m, PQ_FREE + m->pc, 1);
2371 	}
2372 
2373 	/*
2374 	 * This sequence allows us to clear BUSY while still holding
2375 	 * its spin lock, which reduces contention vs allocators.  We
2376 	 * must not leave the queue locked or _vm_page_wakeup() may
2377 	 * deadlock.
2378 	 */
2379 	_vm_page_queue_spin_unlock(m);
2380 	if (_vm_page_wakeup(m)) {
2381 		vm_page_spin_unlock(m);
2382 		wakeup(m);
2383 	} else {
2384 		vm_page_spin_unlock(m);
2385 	}
2386 	vm_page_free_wakeup();
2387 }
2388 
2389 /*
2390  * vm_page_unmanage()
2391  *
2392  * Prevent PV management from being done on the page.  The page is
2393  * removed from the paging queues as if it were wired, and as a
2394  * consequence of no longer being managed the pageout daemon will not
2395  * touch it (since there is no way to locate the pte mappings for the
2396  * page).  madvise() calls that mess with the pmap will also no longer
2397  * operate on the page.
2398  *
2399  * Beyond that the page is still reasonably 'normal'.  Freeing the page
2400  * will clear the flag.
2401  *
2402  * This routine is used by OBJT_PHYS objects - objects using unswappable
2403  * physical memory as backing store rather then swap-backed memory and
2404  * will eventually be extended to support 4MB unmanaged physical
2405  * mappings.
2406  *
2407  * Caller must be holding the page busy.
2408  */
2409 void
2410 vm_page_unmanage(vm_page_t m)
2411 {
2412 	KKASSERT(m->busy_count & PBUSY_LOCKED);
2413 	if ((m->flags & PG_UNMANAGED) == 0) {
2414 		if (m->wire_count == 0)
2415 			vm_page_unqueue(m);
2416 	}
2417 	vm_page_flag_set(m, PG_UNMANAGED);
2418 }
2419 
2420 /*
2421  * Mark this page as wired down by yet another map, removing it from
2422  * paging queues as necessary.
2423  *
2424  * Caller must be holding the page busy.
2425  */
2426 void
2427 vm_page_wire(vm_page_t m)
2428 {
2429 	/*
2430 	 * Only bump the wire statistics if the page is not already wired,
2431 	 * and only unqueue the page if it is on some queue (if it is unmanaged
2432 	 * it is already off the queues).  Don't do anything with fictitious
2433 	 * pages because they are always wired.
2434 	 */
2435 	KKASSERT(m->busy_count & PBUSY_LOCKED);
2436 	if ((m->flags & PG_FICTITIOUS) == 0) {
2437 		if (atomic_fetchadd_int(&m->wire_count, 1) == 0) {
2438 			if ((m->flags & PG_UNMANAGED) == 0)
2439 				vm_page_unqueue(m);
2440 			atomic_add_long(&mycpu->gd_vmstats_adj.v_wire_count, 1);
2441 		}
2442 		KASSERT(m->wire_count != 0,
2443 			("vm_page_wire: wire_count overflow m=%p", m));
2444 	}
2445 }
2446 
2447 /*
2448  * Release one wiring of this page, potentially enabling it to be paged again.
2449  *
2450  * Many pages placed on the inactive queue should actually go
2451  * into the cache, but it is difficult to figure out which.  What
2452  * we do instead, if the inactive target is well met, is to put
2453  * clean pages at the head of the inactive queue instead of the tail.
2454  * This will cause them to be moved to the cache more quickly and
2455  * if not actively re-referenced, freed more quickly.  If we just
2456  * stick these pages at the end of the inactive queue, heavy filesystem
2457  * meta-data accesses can cause an unnecessary paging load on memory bound
2458  * processes.  This optimization causes one-time-use metadata to be
2459  * reused more quickly.
2460  *
2461  * Pages marked PG_NEED_COMMIT are always activated and never placed on
2462  * the inactive queue.  This helps the pageout daemon determine memory
2463  * pressure and act on out-of-memory situations more quickly.
2464  *
2465  * BUT, if we are in a low-memory situation we have no choice but to
2466  * put clean pages on the cache queue.
2467  *
2468  * A number of routines use vm_page_unwire() to guarantee that the page
2469  * will go into either the inactive or active queues, and will NEVER
2470  * be placed in the cache - for example, just after dirtying a page.
2471  * dirty pages in the cache are not allowed.
2472  *
2473  * This routine may not block.
2474  */
2475 void
2476 vm_page_unwire(vm_page_t m, int activate)
2477 {
2478 	KKASSERT(m->busy_count & PBUSY_LOCKED);
2479 	if (m->flags & PG_FICTITIOUS) {
2480 		/* do nothing */
2481 	} else if (m->wire_count <= 0) {
2482 		panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
2483 	} else {
2484 		if (atomic_fetchadd_int(&m->wire_count, -1) == 1) {
2485 			atomic_add_long(&mycpu->gd_vmstats_adj.v_wire_count,-1);
2486 			if (m->flags & PG_UNMANAGED) {
2487 				;
2488 			} else if (activate || (m->flags & PG_NEED_COMMIT)) {
2489 				vm_page_spin_lock(m);
2490 				_vm_page_add_queue_spinlocked(m,
2491 							PQ_ACTIVE + m->pc, 0);
2492 				_vm_page_and_queue_spin_unlock(m);
2493 			} else {
2494 				vm_page_spin_lock(m);
2495 				vm_page_flag_clear(m, PG_WINATCFLS);
2496 				_vm_page_add_queue_spinlocked(m,
2497 							PQ_INACTIVE + m->pc, 0);
2498 				++vm_swapcache_inactive_heuristic;
2499 				_vm_page_and_queue_spin_unlock(m);
2500 			}
2501 		}
2502 	}
2503 }
2504 
2505 /*
2506  * Move the specified page to the inactive queue.  If the page has
2507  * any associated swap, the swap is deallocated.
2508  *
2509  * Normally athead is 0 resulting in LRU operation.  athead is set
2510  * to 1 if we want this page to be 'as if it were placed in the cache',
2511  * except without unmapping it from the process address space.
2512  *
2513  * vm_page's spinlock must be held on entry and will remain held on return.
2514  * This routine may not block.
2515  */
2516 static void
2517 _vm_page_deactivate_locked(vm_page_t m, int athead)
2518 {
2519 	u_short oqueue;
2520 
2521 	/*
2522 	 * Ignore if already inactive.
2523 	 */
2524 	if (m->queue - m->pc == PQ_INACTIVE)
2525 		return;
2526 	_vm_page_queue_spin_lock(m);
2527 	oqueue = _vm_page_rem_queue_spinlocked(m);
2528 
2529 	if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
2530 		if (oqueue == PQ_CACHE)
2531 			mycpu->gd_cnt.v_reactivated++;
2532 		vm_page_flag_clear(m, PG_WINATCFLS);
2533 		_vm_page_add_queue_spinlocked(m, PQ_INACTIVE + m->pc, athead);
2534 		if (athead == 0)
2535 			++vm_swapcache_inactive_heuristic;
2536 	}
2537 	/* NOTE: PQ_NONE if condition not taken */
2538 	_vm_page_queue_spin_unlock(m);
2539 	/* leaves vm_page spinlocked */
2540 }
2541 
2542 /*
2543  * Attempt to deactivate a page.
2544  *
2545  * No requirements.
2546  */
2547 void
2548 vm_page_deactivate(vm_page_t m)
2549 {
2550 	vm_page_spin_lock(m);
2551 	_vm_page_deactivate_locked(m, 0);
2552 	vm_page_spin_unlock(m);
2553 }
2554 
2555 void
2556 vm_page_deactivate_locked(vm_page_t m)
2557 {
2558 	_vm_page_deactivate_locked(m, 0);
2559 }
2560 
2561 /*
2562  * Attempt to move a busied page to PQ_CACHE, then unconditionally unbusy it.
2563  *
2564  * This function returns non-zero if it successfully moved the page to
2565  * PQ_CACHE.
2566  *
2567  * This function unconditionally unbusies the page on return.
2568  */
2569 int
2570 vm_page_try_to_cache(vm_page_t m)
2571 {
2572 	vm_page_spin_lock(m);
2573 	if (m->dirty || m->hold_count || m->wire_count ||
2574 	    (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT))) {
2575 		if (_vm_page_wakeup(m)) {
2576 			vm_page_spin_unlock(m);
2577 			wakeup(m);
2578 		} else {
2579 			vm_page_spin_unlock(m);
2580 		}
2581 		return(0);
2582 	}
2583 	vm_page_spin_unlock(m);
2584 
2585 	/*
2586 	 * Page busied by us and no longer spinlocked.  Dirty pages cannot
2587 	 * be moved to the cache.
2588 	 */
2589 	vm_page_test_dirty(m);
2590 	if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2591 		vm_page_wakeup(m);
2592 		return(0);
2593 	}
2594 	vm_page_cache(m);
2595 	return(1);
2596 }
2597 
2598 /*
2599  * Attempt to free the page.  If we cannot free it, we do nothing.
2600  * 1 is returned on success, 0 on failure.
2601  *
2602  * No requirements.
2603  */
2604 int
2605 vm_page_try_to_free(vm_page_t m)
2606 {
2607 	vm_page_spin_lock(m);
2608 	if (vm_page_busy_try(m, TRUE)) {
2609 		vm_page_spin_unlock(m);
2610 		return(0);
2611 	}
2612 
2613 	/*
2614 	 * The page can be in any state, including already being on the free
2615 	 * queue.  Check to see if it really can be freed.
2616 	 */
2617 	if (m->dirty ||				/* can't free if it is dirty */
2618 	    m->hold_count ||			/* or held (XXX may be wrong) */
2619 	    m->wire_count ||			/* or wired */
2620 	    (m->flags & (PG_UNMANAGED |		/* or unmanaged */
2621 			 PG_NEED_COMMIT)) ||	/* or needs a commit */
2622 	    m->queue - m->pc == PQ_FREE ||	/* already on PQ_FREE */
2623 	    m->queue - m->pc == PQ_HOLD) {	/* already on PQ_HOLD */
2624 		if (_vm_page_wakeup(m)) {
2625 			vm_page_spin_unlock(m);
2626 			wakeup(m);
2627 		} else {
2628 			vm_page_spin_unlock(m);
2629 		}
2630 		return(0);
2631 	}
2632 	vm_page_spin_unlock(m);
2633 
2634 	/*
2635 	 * We can probably free the page.
2636 	 *
2637 	 * Page busied by us and no longer spinlocked.  Dirty pages will
2638 	 * not be freed by this function.    We have to re-test the
2639 	 * dirty bit after cleaning out the pmaps.
2640 	 */
2641 	vm_page_test_dirty(m);
2642 	if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2643 		vm_page_wakeup(m);
2644 		return(0);
2645 	}
2646 	vm_page_protect(m, VM_PROT_NONE);
2647 	if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2648 		vm_page_wakeup(m);
2649 		return(0);
2650 	}
2651 	vm_page_free(m);
2652 	return(1);
2653 }
2654 
2655 /*
2656  * vm_page_cache
2657  *
2658  * Put the specified page onto the page cache queue (if appropriate).
2659  *
2660  * The page must be busy, and this routine will release the busy and
2661  * possibly even free the page.
2662  */
2663 void
2664 vm_page_cache(vm_page_t m)
2665 {
2666 	/*
2667 	 * Not suitable for the cache
2668 	 */
2669 	if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
2670 	    (m->busy_count & PBUSY_MASK) ||
2671 	    m->wire_count || m->hold_count) {
2672 		vm_page_wakeup(m);
2673 		return;
2674 	}
2675 
2676 	/*
2677 	 * Already in the cache (and thus not mapped)
2678 	 */
2679 	if ((m->queue - m->pc) == PQ_CACHE) {
2680 		KKASSERT((m->flags & PG_MAPPED) == 0);
2681 		vm_page_wakeup(m);
2682 		return;
2683 	}
2684 
2685 	/*
2686 	 * Caller is required to test m->dirty, but note that the act of
2687 	 * removing the page from its maps can cause it to become dirty
2688 	 * on an SMP system due to another cpu running in usermode.
2689 	 */
2690 	if (m->dirty) {
2691 		panic("vm_page_cache: caching a dirty page, pindex: %ld",
2692 			(long)m->pindex);
2693 	}
2694 
2695 	/*
2696 	 * Remove all pmaps and indicate that the page is not
2697 	 * writeable or mapped.  Our vm_page_protect() call may
2698 	 * have blocked (especially w/ VM_PROT_NONE), so recheck
2699 	 * everything.
2700 	 */
2701 	vm_page_protect(m, VM_PROT_NONE);
2702 	if ((m->flags & (PG_UNMANAGED | PG_MAPPED)) ||
2703 	    (m->busy_count & PBUSY_MASK) ||
2704 	    m->wire_count || m->hold_count) {
2705 		vm_page_wakeup(m);
2706 	} else if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2707 		vm_page_deactivate(m);
2708 		vm_page_wakeup(m);
2709 	} else {
2710 		_vm_page_and_queue_spin_lock(m);
2711 		_vm_page_rem_queue_spinlocked(m);
2712 		_vm_page_add_queue_spinlocked(m, PQ_CACHE + m->pc, 0);
2713 		_vm_page_queue_spin_unlock(m);
2714 		if (_vm_page_wakeup(m)) {
2715 			vm_page_spin_unlock(m);
2716 			wakeup(m);
2717 		} else {
2718 			vm_page_spin_unlock(m);
2719 		}
2720 		vm_page_free_wakeup();
2721 	}
2722 }
2723 
2724 /*
2725  * vm_page_dontneed()
2726  *
2727  * Cache, deactivate, or do nothing as appropriate.  This routine
2728  * is typically used by madvise() MADV_DONTNEED.
2729  *
2730  * Generally speaking we want to move the page into the cache so
2731  * it gets reused quickly.  However, this can result in a silly syndrome
2732  * due to the page recycling too quickly.  Small objects will not be
2733  * fully cached.  On the otherhand, if we move the page to the inactive
2734  * queue we wind up with a problem whereby very large objects
2735  * unnecessarily blow away our inactive and cache queues.
2736  *
2737  * The solution is to move the pages based on a fixed weighting.  We
2738  * either leave them alone, deactivate them, or move them to the cache,
2739  * where moving them to the cache has the highest weighting.
2740  * By forcing some pages into other queues we eventually force the
2741  * system to balance the queues, potentially recovering other unrelated
2742  * space from active.  The idea is to not force this to happen too
2743  * often.
2744  *
2745  * The page must be busied.
2746  */
2747 void
2748 vm_page_dontneed(vm_page_t m)
2749 {
2750 	static int dnweight;
2751 	int dnw;
2752 	int head;
2753 
2754 	dnw = ++dnweight;
2755 
2756 	/*
2757 	 * occassionally leave the page alone
2758 	 */
2759 	if ((dnw & 0x01F0) == 0 ||
2760 	    m->queue - m->pc == PQ_INACTIVE ||
2761 	    m->queue - m->pc == PQ_CACHE
2762 	) {
2763 		if (m->act_count >= ACT_INIT)
2764 			--m->act_count;
2765 		return;
2766 	}
2767 
2768 	/*
2769 	 * If vm_page_dontneed() is inactivating a page, it must clear
2770 	 * the referenced flag; otherwise the pagedaemon will see references
2771 	 * on the page in the inactive queue and reactivate it. Until the
2772 	 * page can move to the cache queue, madvise's job is not done.
2773 	 */
2774 	vm_page_flag_clear(m, PG_REFERENCED);
2775 	pmap_clear_reference(m);
2776 
2777 	if (m->dirty == 0)
2778 		vm_page_test_dirty(m);
2779 
2780 	if (m->dirty || (dnw & 0x0070) == 0) {
2781 		/*
2782 		 * Deactivate the page 3 times out of 32.
2783 		 */
2784 		head = 0;
2785 	} else {
2786 		/*
2787 		 * Cache the page 28 times out of every 32.  Note that
2788 		 * the page is deactivated instead of cached, but placed
2789 		 * at the head of the queue instead of the tail.
2790 		 */
2791 		head = 1;
2792 	}
2793 	vm_page_spin_lock(m);
2794 	_vm_page_deactivate_locked(m, head);
2795 	vm_page_spin_unlock(m);
2796 }
2797 
2798 /*
2799  * These routines manipulate the 'soft busy' count for a page.  A soft busy
2800  * is almost like a hard BUSY except that it allows certain compatible
2801  * operations to occur on the page while it is busy.  For example, a page
2802  * undergoing a write can still be mapped read-only.
2803  *
2804  * We also use soft-busy to quickly pmap_enter shared read-only pages
2805  * without having to hold the page locked.
2806  *
2807  * The soft-busy count can be > 1 in situations where multiple threads
2808  * are pmap_enter()ing the same page simultaneously, or when two buffer
2809  * cache buffers overlap the same page.
2810  *
2811  * The caller must hold the page BUSY when making these two calls.
2812  */
2813 void
2814 vm_page_io_start(vm_page_t m)
2815 {
2816 	uint32_t ocount;
2817 
2818 	ocount = atomic_fetchadd_int(&m->busy_count, 1);
2819 	KKASSERT(ocount & PBUSY_LOCKED);
2820 }
2821 
2822 void
2823 vm_page_io_finish(vm_page_t m)
2824 {
2825 	uint32_t ocount;
2826 
2827 	ocount = atomic_fetchadd_int(&m->busy_count, -1);
2828 	KKASSERT(ocount & PBUSY_MASK);
2829 #if 0
2830 	if (((ocount - 1) & (PBUSY_LOCKED | PBUSY_MASK)) == 0)
2831 		wakeup(m);
2832 #endif
2833 }
2834 
2835 /*
2836  * Attempt to soft-busy a page.  The page must not be PBUSY_LOCKED.
2837  *
2838  * We can't use fetchadd here because we might race a hard-busy and the
2839  * page freeing code asserts on a non-zero soft-busy count (even if only
2840  * temporary).
2841  *
2842  * Returns 0 on success, non-zero on failure.
2843  */
2844 int
2845 vm_page_sbusy_try(vm_page_t m)
2846 {
2847 	uint32_t ocount;
2848 
2849 	for (;;) {
2850 		ocount = m->busy_count;
2851 		cpu_ccfence();
2852 		if (ocount & PBUSY_LOCKED)
2853 			return 1;
2854 		if (atomic_cmpset_int(&m->busy_count, ocount, ocount + 1))
2855 			break;
2856 	}
2857 	return 0;
2858 #if 0
2859 	if (m->busy_count & PBUSY_LOCKED)
2860 		return 1;
2861 	ocount = atomic_fetchadd_int(&m->busy_count, 1);
2862 	if (ocount & PBUSY_LOCKED) {
2863 		vm_page_sbusy_drop(m);
2864 		return 1;
2865 	}
2866 	return 0;
2867 #endif
2868 }
2869 
2870 /*
2871  * Indicate that a clean VM page requires a filesystem commit and cannot
2872  * be reused.  Used by tmpfs.
2873  */
2874 void
2875 vm_page_need_commit(vm_page_t m)
2876 {
2877 	vm_page_flag_set(m, PG_NEED_COMMIT);
2878 	vm_object_set_writeable_dirty(m->object);
2879 }
2880 
2881 void
2882 vm_page_clear_commit(vm_page_t m)
2883 {
2884 	vm_page_flag_clear(m, PG_NEED_COMMIT);
2885 }
2886 
2887 /*
2888  * Grab a page, blocking if it is busy and allocating a page if necessary.
2889  * A busy page is returned or NULL.  The page may or may not be valid and
2890  * might not be on a queue (the caller is responsible for the disposition of
2891  * the page).
2892  *
2893  * If VM_ALLOC_ZERO is specified and the grab must allocate a new page, the
2894  * page will be zero'd and marked valid.
2895  *
2896  * If VM_ALLOC_FORCE_ZERO is specified the page will be zero'd and marked
2897  * valid even if it already exists.
2898  *
2899  * If VM_ALLOC_RETRY is specified this routine will never return NULL.  Also
2900  * note that VM_ALLOC_NORMAL must be specified if VM_ALLOC_RETRY is specified.
2901  * VM_ALLOC_NULL_OK is implied when VM_ALLOC_RETRY is specified.
2902  *
2903  * This routine may block, but if VM_ALLOC_RETRY is not set then NULL is
2904  * always returned if we had blocked.
2905  *
2906  * This routine may not be called from an interrupt.
2907  *
2908  * No other requirements.
2909  */
2910 vm_page_t
2911 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
2912 {
2913 	vm_page_t m;
2914 	int error;
2915 	int shared = 1;
2916 
2917 	KKASSERT(allocflags &
2918 		(VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
2919 	vm_object_hold_shared(object);
2920 	for (;;) {
2921 		m = vm_page_lookup_busy_try(object, pindex, TRUE, &error);
2922 		if (error) {
2923 			vm_page_sleep_busy(m, TRUE, "pgrbwt");
2924 			if ((allocflags & VM_ALLOC_RETRY) == 0) {
2925 				m = NULL;
2926 				break;
2927 			}
2928 			/* retry */
2929 		} else if (m == NULL) {
2930 			if (shared) {
2931 				vm_object_upgrade(object);
2932 				shared = 0;
2933 			}
2934 			if (allocflags & VM_ALLOC_RETRY)
2935 				allocflags |= VM_ALLOC_NULL_OK;
2936 			m = vm_page_alloc(object, pindex,
2937 					  allocflags & ~VM_ALLOC_RETRY);
2938 			if (m)
2939 				break;
2940 			vm_wait(0);
2941 			if ((allocflags & VM_ALLOC_RETRY) == 0)
2942 				goto failed;
2943 		} else {
2944 			/* m found */
2945 			break;
2946 		}
2947 	}
2948 
2949 	/*
2950 	 * If VM_ALLOC_ZERO an invalid page will be zero'd and set valid.
2951 	 *
2952 	 * If VM_ALLOC_FORCE_ZERO the page is unconditionally zero'd and set
2953 	 * valid even if already valid.
2954 	 *
2955 	 * NOTE!  We have removed all of the PG_ZERO optimizations and also
2956 	 *	  removed the idle zeroing code.  These optimizations actually
2957 	 *	  slow things down on modern cpus because the zerod area is
2958 	 *	  likely uncached, placing a memory-access burden on the
2959 	 *	  accesors taking the fault.
2960 	 *
2961 	 *	  By always zeroing the page in-line with the fault, no
2962 	 *	  dynamic ram reads are needed and the caches are hot, ready
2963 	 *	  for userland to access the memory.
2964 	 */
2965 	if (m->valid == 0) {
2966 		if (allocflags & (VM_ALLOC_ZERO | VM_ALLOC_FORCE_ZERO)) {
2967 			pmap_zero_page(VM_PAGE_TO_PHYS(m));
2968 			m->valid = VM_PAGE_BITS_ALL;
2969 		}
2970 	} else if (allocflags & VM_ALLOC_FORCE_ZERO) {
2971 		pmap_zero_page(VM_PAGE_TO_PHYS(m));
2972 		m->valid = VM_PAGE_BITS_ALL;
2973 	}
2974 failed:
2975 	vm_object_drop(object);
2976 	return(m);
2977 }
2978 
2979 /*
2980  * Mapping function for valid bits or for dirty bits in
2981  * a page.  May not block.
2982  *
2983  * Inputs are required to range within a page.
2984  *
2985  * No requirements.
2986  * Non blocking.
2987  */
2988 int
2989 vm_page_bits(int base, int size)
2990 {
2991 	int first_bit;
2992 	int last_bit;
2993 
2994 	KASSERT(
2995 	    base + size <= PAGE_SIZE,
2996 	    ("vm_page_bits: illegal base/size %d/%d", base, size)
2997 	);
2998 
2999 	if (size == 0)		/* handle degenerate case */
3000 		return(0);
3001 
3002 	first_bit = base >> DEV_BSHIFT;
3003 	last_bit = (base + size - 1) >> DEV_BSHIFT;
3004 
3005 	return ((2 << last_bit) - (1 << first_bit));
3006 }
3007 
3008 /*
3009  * Sets portions of a page valid and clean.  The arguments are expected
3010  * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
3011  * of any partial chunks touched by the range.  The invalid portion of
3012  * such chunks will be zero'd.
3013  *
3014  * NOTE: When truncating a buffer vnode_pager_setsize() will automatically
3015  *	 align base to DEV_BSIZE so as not to mark clean a partially
3016  *	 truncated device block.  Otherwise the dirty page status might be
3017  *	 lost.
3018  *
3019  * This routine may not block.
3020  *
3021  * (base + size) must be less then or equal to PAGE_SIZE.
3022  */
3023 static void
3024 _vm_page_zero_valid(vm_page_t m, int base, int size)
3025 {
3026 	int frag;
3027 	int endoff;
3028 
3029 	if (size == 0)	/* handle degenerate case */
3030 		return;
3031 
3032 	/*
3033 	 * If the base is not DEV_BSIZE aligned and the valid
3034 	 * bit is clear, we have to zero out a portion of the
3035 	 * first block.
3036 	 */
3037 
3038 	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
3039 	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
3040 	) {
3041 		pmap_zero_page_area(
3042 		    VM_PAGE_TO_PHYS(m),
3043 		    frag,
3044 		    base - frag
3045 		);
3046 	}
3047 
3048 	/*
3049 	 * If the ending offset is not DEV_BSIZE aligned and the
3050 	 * valid bit is clear, we have to zero out a portion of
3051 	 * the last block.
3052 	 */
3053 
3054 	endoff = base + size;
3055 
3056 	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
3057 	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
3058 	) {
3059 		pmap_zero_page_area(
3060 		    VM_PAGE_TO_PHYS(m),
3061 		    endoff,
3062 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
3063 		);
3064 	}
3065 }
3066 
3067 /*
3068  * Set valid, clear dirty bits.  If validating the entire
3069  * page we can safely clear the pmap modify bit.  We also
3070  * use this opportunity to clear the PG_NOSYNC flag.  If a process
3071  * takes a write fault on a MAP_NOSYNC memory area the flag will
3072  * be set again.
3073  *
3074  * We set valid bits inclusive of any overlap, but we can only
3075  * clear dirty bits for DEV_BSIZE chunks that are fully within
3076  * the range.
3077  *
3078  * Page must be busied?
3079  * No other requirements.
3080  */
3081 void
3082 vm_page_set_valid(vm_page_t m, int base, int size)
3083 {
3084 	_vm_page_zero_valid(m, base, size);
3085 	m->valid |= vm_page_bits(base, size);
3086 }
3087 
3088 
3089 /*
3090  * Set valid bits and clear dirty bits.
3091  *
3092  * Page must be busied by caller.
3093  *
3094  * NOTE: This function does not clear the pmap modified bit.
3095  *	 Also note that e.g. NFS may use a byte-granular base
3096  *	 and size.
3097  *
3098  * No other requirements.
3099  */
3100 void
3101 vm_page_set_validclean(vm_page_t m, int base, int size)
3102 {
3103 	int pagebits;
3104 
3105 	_vm_page_zero_valid(m, base, size);
3106 	pagebits = vm_page_bits(base, size);
3107 	m->valid |= pagebits;
3108 	m->dirty &= ~pagebits;
3109 	if (base == 0 && size == PAGE_SIZE) {
3110 		/*pmap_clear_modify(m);*/
3111 		vm_page_flag_clear(m, PG_NOSYNC);
3112 	}
3113 }
3114 
3115 /*
3116  * Set valid & dirty.  Used by buwrite()
3117  *
3118  * Page must be busied by caller.
3119  */
3120 void
3121 vm_page_set_validdirty(vm_page_t m, int base, int size)
3122 {
3123 	int pagebits;
3124 
3125 	pagebits = vm_page_bits(base, size);
3126 	m->valid |= pagebits;
3127 	m->dirty |= pagebits;
3128 	if (m->object)
3129 	       vm_object_set_writeable_dirty(m->object);
3130 }
3131 
3132 /*
3133  * Clear dirty bits.
3134  *
3135  * NOTE: This function does not clear the pmap modified bit.
3136  *	 Also note that e.g. NFS may use a byte-granular base
3137  *	 and size.
3138  *
3139  * Page must be busied?
3140  * No other requirements.
3141  */
3142 void
3143 vm_page_clear_dirty(vm_page_t m, int base, int size)
3144 {
3145 	m->dirty &= ~vm_page_bits(base, size);
3146 	if (base == 0 && size == PAGE_SIZE) {
3147 		/*pmap_clear_modify(m);*/
3148 		vm_page_flag_clear(m, PG_NOSYNC);
3149 	}
3150 }
3151 
3152 /*
3153  * Make the page all-dirty.
3154  *
3155  * Also make sure the related object and vnode reflect the fact that the
3156  * object may now contain a dirty page.
3157  *
3158  * Page must be busied?
3159  * No other requirements.
3160  */
3161 void
3162 vm_page_dirty(vm_page_t m)
3163 {
3164 #ifdef INVARIANTS
3165         int pqtype = m->queue - m->pc;
3166 #endif
3167         KASSERT(pqtype != PQ_CACHE && pqtype != PQ_FREE,
3168                 ("vm_page_dirty: page in free/cache queue!"));
3169 	if (m->dirty != VM_PAGE_BITS_ALL) {
3170 		m->dirty = VM_PAGE_BITS_ALL;
3171 		if (m->object)
3172 			vm_object_set_writeable_dirty(m->object);
3173 	}
3174 }
3175 
3176 /*
3177  * Invalidates DEV_BSIZE'd chunks within a page.  Both the
3178  * valid and dirty bits for the effected areas are cleared.
3179  *
3180  * Page must be busied?
3181  * Does not block.
3182  * No other requirements.
3183  */
3184 void
3185 vm_page_set_invalid(vm_page_t m, int base, int size)
3186 {
3187 	int bits;
3188 
3189 	bits = vm_page_bits(base, size);
3190 	m->valid &= ~bits;
3191 	m->dirty &= ~bits;
3192 	atomic_add_int(&m->object->generation, 1);
3193 }
3194 
3195 /*
3196  * The kernel assumes that the invalid portions of a page contain
3197  * garbage, but such pages can be mapped into memory by user code.
3198  * When this occurs, we must zero out the non-valid portions of the
3199  * page so user code sees what it expects.
3200  *
3201  * Pages are most often semi-valid when the end of a file is mapped
3202  * into memory and the file's size is not page aligned.
3203  *
3204  * Page must be busied?
3205  * No other requirements.
3206  */
3207 void
3208 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
3209 {
3210 	int b;
3211 	int i;
3212 
3213 	/*
3214 	 * Scan the valid bits looking for invalid sections that
3215 	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
3216 	 * valid bit may be set ) have already been zerod by
3217 	 * vm_page_set_validclean().
3218 	 */
3219 	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
3220 		if (i == (PAGE_SIZE / DEV_BSIZE) ||
3221 		    (m->valid & (1 << i))
3222 		) {
3223 			if (i > b) {
3224 				pmap_zero_page_area(
3225 				    VM_PAGE_TO_PHYS(m),
3226 				    b << DEV_BSHIFT,
3227 				    (i - b) << DEV_BSHIFT
3228 				);
3229 			}
3230 			b = i + 1;
3231 		}
3232 	}
3233 
3234 	/*
3235 	 * setvalid is TRUE when we can safely set the zero'd areas
3236 	 * as being valid.  We can do this if there are no cache consistency
3237 	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
3238 	 */
3239 	if (setvalid)
3240 		m->valid = VM_PAGE_BITS_ALL;
3241 }
3242 
3243 /*
3244  * Is a (partial) page valid?  Note that the case where size == 0
3245  * will return FALSE in the degenerate case where the page is entirely
3246  * invalid, and TRUE otherwise.
3247  *
3248  * Does not block.
3249  * No other requirements.
3250  */
3251 int
3252 vm_page_is_valid(vm_page_t m, int base, int size)
3253 {
3254 	int bits = vm_page_bits(base, size);
3255 
3256 	if (m->valid && ((m->valid & bits) == bits))
3257 		return 1;
3258 	else
3259 		return 0;
3260 }
3261 
3262 /*
3263  * update dirty bits from pmap/mmu.  May not block.
3264  *
3265  * Caller must hold the page busy
3266  */
3267 void
3268 vm_page_test_dirty(vm_page_t m)
3269 {
3270 	if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
3271 		vm_page_dirty(m);
3272 	}
3273 }
3274 
3275 #include "opt_ddb.h"
3276 #ifdef DDB
3277 #include <ddb/ddb.h>
3278 
3279 DB_SHOW_COMMAND(page, vm_page_print_page_info)
3280 {
3281 	db_printf("vmstats.v_free_count: %ld\n", vmstats.v_free_count);
3282 	db_printf("vmstats.v_cache_count: %ld\n", vmstats.v_cache_count);
3283 	db_printf("vmstats.v_inactive_count: %ld\n", vmstats.v_inactive_count);
3284 	db_printf("vmstats.v_active_count: %ld\n", vmstats.v_active_count);
3285 	db_printf("vmstats.v_wire_count: %ld\n", vmstats.v_wire_count);
3286 	db_printf("vmstats.v_free_reserved: %ld\n", vmstats.v_free_reserved);
3287 	db_printf("vmstats.v_free_min: %ld\n", vmstats.v_free_min);
3288 	db_printf("vmstats.v_free_target: %ld\n", vmstats.v_free_target);
3289 	db_printf("vmstats.v_cache_min: %ld\n", vmstats.v_cache_min);
3290 	db_printf("vmstats.v_inactive_target: %ld\n",
3291 		  vmstats.v_inactive_target);
3292 }
3293 
3294 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
3295 {
3296 	int i;
3297 	db_printf("PQ_FREE:");
3298 	for (i = 0; i < PQ_L2_SIZE; i++) {
3299 		db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
3300 	}
3301 	db_printf("\n");
3302 
3303 	db_printf("PQ_CACHE:");
3304 	for(i = 0; i < PQ_L2_SIZE; i++) {
3305 		db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
3306 	}
3307 	db_printf("\n");
3308 
3309 	db_printf("PQ_ACTIVE:");
3310 	for(i = 0; i < PQ_L2_SIZE; i++) {
3311 		db_printf(" %d", vm_page_queues[PQ_ACTIVE + i].lcnt);
3312 	}
3313 	db_printf("\n");
3314 
3315 	db_printf("PQ_INACTIVE:");
3316 	for(i = 0; i < PQ_L2_SIZE; i++) {
3317 		db_printf(" %d", vm_page_queues[PQ_INACTIVE + i].lcnt);
3318 	}
3319 	db_printf("\n");
3320 }
3321 #endif /* DDB */
3322