xref: /dragonfly/sys/vm/vm_page.c (revision 44753b81)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991 Regents of the University of California.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
35  * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $
36  */
37 
38 /*
39  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40  * All rights reserved.
41  *
42  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 /*
65  * Resident memory management module.  The module manipulates 'VM pages'.
66  * A VM page is the core building block for memory management.
67  */
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/malloc.h>
72 #include <sys/proc.h>
73 #include <sys/vmmeter.h>
74 #include <sys/vnode.h>
75 #include <sys/kernel.h>
76 #include <sys/alist.h>
77 #include <sys/sysctl.h>
78 #include <sys/cpu_topology.h>
79 
80 #include <vm/vm.h>
81 #include <vm/vm_param.h>
82 #include <sys/lock.h>
83 #include <vm/vm_kern.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pageout.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vm_extern.h>
91 #include <vm/swap_pager.h>
92 
93 #include <machine/inttypes.h>
94 #include <machine/md_var.h>
95 #include <machine/specialreg.h>
96 
97 #include <vm/vm_page2.h>
98 #include <sys/spinlock2.h>
99 
100 /*
101  * Action hash for user umtx support.
102  */
103 #define VMACTION_HSIZE		256
104 #define VMACTION_HMASK		(VMACTION_HSIZE - 1)
105 
106 /*
107  * SET - Minimum required set associative size, must be a power of 2.  We
108  *	 want this to match or exceed the set-associativeness of the cpu.
109  *
110  * GRP - A larger set that allows bleed-over into the domains of other
111  *	 nearby cpus.  Also must be a power of 2.  Used by the page zeroing
112  *	 code to smooth things out a bit.
113  */
114 #define PQ_SET_ASSOC		16
115 #define PQ_SET_ASSOC_MASK	(PQ_SET_ASSOC - 1)
116 
117 #define PQ_GRP_ASSOC		(PQ_SET_ASSOC * 2)
118 #define PQ_GRP_ASSOC_MASK	(PQ_GRP_ASSOC - 1)
119 
120 static void vm_page_queue_init(void);
121 static void vm_page_free_wakeup(void);
122 static vm_page_t vm_page_select_cache(u_short pg_color);
123 static vm_page_t _vm_page_list_find2(int basequeue, int index);
124 static void _vm_page_deactivate_locked(vm_page_t m, int athead);
125 
126 /*
127  * Array of tailq lists
128  */
129 __cachealign struct vpgqueues vm_page_queues[PQ_COUNT];
130 
131 LIST_HEAD(vm_page_action_list, vm_page_action);
132 struct vm_page_action_list	action_list[VMACTION_HSIZE];
133 static volatile int vm_pages_waiting;
134 
135 static struct alist vm_contig_alist;
136 static struct almeta vm_contig_ameta[ALIST_RECORDS_65536];
137 static struct spinlock vm_contig_spin = SPINLOCK_INITIALIZER(&vm_contig_spin, "vm_contig_spin");
138 
139 static u_long vm_dma_reserved = 0;
140 TUNABLE_ULONG("vm.dma_reserved", &vm_dma_reserved);
141 SYSCTL_ULONG(_vm, OID_AUTO, dma_reserved, CTLFLAG_RD, &vm_dma_reserved, 0,
142 	    "Memory reserved for DMA");
143 SYSCTL_UINT(_vm, OID_AUTO, dma_free_pages, CTLFLAG_RD,
144 	    &vm_contig_alist.bl_free, 0, "Memory reserved for DMA");
145 
146 static int vm_contig_verbose = 0;
147 TUNABLE_INT("vm.contig_verbose", &vm_contig_verbose);
148 
149 RB_GENERATE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare,
150 	     vm_pindex_t, pindex);
151 
152 static void
153 vm_page_queue_init(void)
154 {
155 	int i;
156 
157 	for (i = 0; i < PQ_L2_SIZE; i++)
158 		vm_page_queues[PQ_FREE+i].cnt = &vmstats.v_free_count;
159 	for (i = 0; i < PQ_L2_SIZE; i++)
160 		vm_page_queues[PQ_CACHE+i].cnt = &vmstats.v_cache_count;
161 	for (i = 0; i < PQ_L2_SIZE; i++)
162 		vm_page_queues[PQ_INACTIVE+i].cnt = &vmstats.v_inactive_count;
163 	for (i = 0; i < PQ_L2_SIZE; i++)
164 		vm_page_queues[PQ_ACTIVE+i].cnt = &vmstats.v_active_count;
165 	for (i = 0; i < PQ_L2_SIZE; i++)
166 		vm_page_queues[PQ_HOLD+i].cnt = &vmstats.v_active_count;
167 	/* PQ_NONE has no queue */
168 
169 	for (i = 0; i < PQ_COUNT; i++) {
170 		TAILQ_INIT(&vm_page_queues[i].pl);
171 		spin_init(&vm_page_queues[i].spin, "vm_page_queue_init");
172 	}
173 
174 	for (i = 0; i < VMACTION_HSIZE; i++)
175 		LIST_INIT(&action_list[i]);
176 }
177 
178 /*
179  * note: place in initialized data section?  Is this necessary?
180  */
181 long first_page = 0;
182 int vm_page_array_size = 0;
183 vm_page_t vm_page_array = NULL;
184 vm_paddr_t vm_low_phys_reserved;
185 
186 /*
187  * (low level boot)
188  *
189  * Sets the page size, perhaps based upon the memory size.
190  * Must be called before any use of page-size dependent functions.
191  */
192 void
193 vm_set_page_size(void)
194 {
195 	if (vmstats.v_page_size == 0)
196 		vmstats.v_page_size = PAGE_SIZE;
197 	if (((vmstats.v_page_size - 1) & vmstats.v_page_size) != 0)
198 		panic("vm_set_page_size: page size not a power of two");
199 }
200 
201 /*
202  * (low level boot)
203  *
204  * Add a new page to the freelist for use by the system.  New pages
205  * are added to both the head and tail of the associated free page
206  * queue in a bottom-up fashion, so both zero'd and non-zero'd page
207  * requests pull 'recent' adds (higher physical addresses) first.
208  *
209  * Beware that the page zeroing daemon will also be running soon after
210  * boot, moving pages from the head to the tail of the PQ_FREE queues.
211  *
212  * Must be called in a critical section.
213  */
214 static void
215 vm_add_new_page(vm_paddr_t pa)
216 {
217 	struct vpgqueues *vpq;
218 	vm_page_t m;
219 
220 	m = PHYS_TO_VM_PAGE(pa);
221 	m->phys_addr = pa;
222 	m->flags = 0;
223 	m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
224 	m->pat_mode = PAT_WRITE_BACK;
225 	/*
226 	 * Twist for cpu localization in addition to page coloring, so
227 	 * different cpus selecting by m->queue get different page colors.
228 	 */
229 	m->pc ^= ((pa >> PAGE_SHIFT) / PQ_L2_SIZE) & PQ_L2_MASK;
230 	m->pc ^= ((pa >> PAGE_SHIFT) / (PQ_L2_SIZE * PQ_L2_SIZE)) & PQ_L2_MASK;
231 	/*
232 	 * Reserve a certain number of contiguous low memory pages for
233 	 * contigmalloc() to use.
234 	 */
235 	if (pa < vm_low_phys_reserved) {
236 		atomic_add_int(&vmstats.v_page_count, 1);
237 		atomic_add_int(&vmstats.v_dma_pages, 1);
238 		m->queue = PQ_NONE;
239 		m->wire_count = 1;
240 		atomic_add_int(&vmstats.v_wire_count, 1);
241 		alist_free(&vm_contig_alist, pa >> PAGE_SHIFT, 1);
242 		return;
243 	}
244 
245 	/*
246 	 * General page
247 	 */
248 	m->queue = m->pc + PQ_FREE;
249 	KKASSERT(m->dirty == 0);
250 
251 	atomic_add_int(&vmstats.v_page_count, 1);
252 	atomic_add_int(&vmstats.v_free_count, 1);
253 	vpq = &vm_page_queues[m->queue];
254 	TAILQ_INSERT_HEAD(&vpq->pl, m, pageq);
255 	++vpq->lcnt;
256 }
257 
258 /*
259  * (low level boot)
260  *
261  * Initializes the resident memory module.
262  *
263  * Preallocates memory for critical VM structures and arrays prior to
264  * kernel_map becoming available.
265  *
266  * Memory is allocated from (virtual2_start, virtual2_end) if available,
267  * otherwise memory is allocated from (virtual_start, virtual_end).
268  *
269  * On x86-64 (virtual_start, virtual_end) is only 2GB and may not be
270  * large enough to hold vm_page_array & other structures for machines with
271  * large amounts of ram, so we want to use virtual2* when available.
272  */
273 void
274 vm_page_startup(void)
275 {
276 	vm_offset_t vaddr = virtual2_start ? virtual2_start : virtual_start;
277 	vm_offset_t mapped;
278 	vm_size_t npages;
279 	vm_paddr_t page_range;
280 	vm_paddr_t new_end;
281 	int i;
282 	vm_paddr_t pa;
283 	int nblocks;
284 	vm_paddr_t last_pa;
285 	vm_paddr_t end;
286 	vm_paddr_t biggestone, biggestsize;
287 	vm_paddr_t total;
288 
289 	total = 0;
290 	biggestsize = 0;
291 	biggestone = 0;
292 	nblocks = 0;
293 	vaddr = round_page(vaddr);
294 
295 	for (i = 0; phys_avail[i + 1]; i += 2) {
296 		phys_avail[i] = round_page64(phys_avail[i]);
297 		phys_avail[i + 1] = trunc_page64(phys_avail[i + 1]);
298 	}
299 
300 	for (i = 0; phys_avail[i + 1]; i += 2) {
301 		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
302 
303 		if (size > biggestsize) {
304 			biggestone = i;
305 			biggestsize = size;
306 		}
307 		++nblocks;
308 		total += size;
309 	}
310 
311 	end = phys_avail[biggestone+1];
312 	end = trunc_page(end);
313 
314 	/*
315 	 * Initialize the queue headers for the free queue, the active queue
316 	 * and the inactive queue.
317 	 */
318 	vm_page_queue_init();
319 
320 #if !defined(_KERNEL_VIRTUAL)
321 	/*
322 	 * VKERNELs don't support minidumps and as such don't need
323 	 * vm_page_dump
324 	 *
325 	 * Allocate a bitmap to indicate that a random physical page
326 	 * needs to be included in a minidump.
327 	 *
328 	 * The amd64 port needs this to indicate which direct map pages
329 	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
330 	 *
331 	 * However, i386 still needs this workspace internally within the
332 	 * minidump code.  In theory, they are not needed on i386, but are
333 	 * included should the sf_buf code decide to use them.
334 	 */
335 	page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
336 	vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
337 	end -= vm_page_dump_size;
338 	vm_page_dump = (void *)pmap_map(&vaddr, end, end + vm_page_dump_size,
339 	    VM_PROT_READ | VM_PROT_WRITE);
340 	bzero((void *)vm_page_dump, vm_page_dump_size);
341 #endif
342 	/*
343 	 * Compute the number of pages of memory that will be available for
344 	 * use (taking into account the overhead of a page structure per
345 	 * page).
346 	 */
347 	first_page = phys_avail[0] / PAGE_SIZE;
348 	page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
349 	npages = (total - (page_range * sizeof(struct vm_page))) / PAGE_SIZE;
350 
351 #ifndef _KERNEL_VIRTUAL
352 	/*
353 	 * (only applies to real kernels)
354 	 *
355 	 * Reserve a large amount of low memory for potential 32-bit DMA
356 	 * space allocations.  Once device initialization is complete we
357 	 * release most of it, but keep (vm_dma_reserved) memory reserved
358 	 * for later use.  Typically for X / graphics.  Through trial and
359 	 * error we find that GPUs usually requires ~60-100MB or so.
360 	 *
361 	 * By default, 128M is left in reserve on machines with 2G+ of ram.
362 	 */
363 	vm_low_phys_reserved = (vm_paddr_t)65536 << PAGE_SHIFT;
364 	if (vm_low_phys_reserved > total / 4)
365 		vm_low_phys_reserved = total / 4;
366 	if (vm_dma_reserved == 0) {
367 		vm_dma_reserved = 128 * 1024 * 1024;	/* 128MB */
368 		if (vm_dma_reserved > total / 16)
369 			vm_dma_reserved = total / 16;
370 	}
371 #endif
372 	alist_init(&vm_contig_alist, 65536, vm_contig_ameta,
373 		   ALIST_RECORDS_65536);
374 
375 	/*
376 	 * Initialize the mem entry structures now, and put them in the free
377 	 * queue.
378 	 */
379 	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
380 	mapped = pmap_map(&vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE);
381 	vm_page_array = (vm_page_t)mapped;
382 
383 #if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL)
384 	/*
385 	 * since pmap_map on amd64 returns stuff out of a direct-map region,
386 	 * we have to manually add these pages to the minidump tracking so
387 	 * that they can be dumped, including the vm_page_array.
388 	 */
389 	for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
390 		dump_add_page(pa);
391 #endif
392 
393 	/*
394 	 * Clear all of the page structures
395 	 */
396 	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
397 	vm_page_array_size = page_range;
398 
399 	/*
400 	 * Construct the free queue(s) in ascending order (by physical
401 	 * address) so that the first 16MB of physical memory is allocated
402 	 * last rather than first.  On large-memory machines, this avoids
403 	 * the exhaustion of low physical memory before isa_dmainit has run.
404 	 */
405 	vmstats.v_page_count = 0;
406 	vmstats.v_free_count = 0;
407 	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
408 		pa = phys_avail[i];
409 		if (i == biggestone)
410 			last_pa = new_end;
411 		else
412 			last_pa = phys_avail[i + 1];
413 		while (pa < last_pa && npages-- > 0) {
414 			vm_add_new_page(pa);
415 			pa += PAGE_SIZE;
416 		}
417 	}
418 	if (virtual2_start)
419 		virtual2_start = vaddr;
420 	else
421 		virtual_start = vaddr;
422 }
423 
424 /*
425  * We tended to reserve a ton of memory for contigmalloc().  Now that most
426  * drivers have initialized we want to return most the remaining free
427  * reserve back to the VM page queues so they can be used for normal
428  * allocations.
429  *
430  * We leave vm_dma_reserved bytes worth of free pages in the reserve pool.
431  */
432 static void
433 vm_page_startup_finish(void *dummy __unused)
434 {
435 	alist_blk_t blk;
436 	alist_blk_t rblk;
437 	alist_blk_t count;
438 	alist_blk_t xcount;
439 	alist_blk_t bfree;
440 	vm_page_t m;
441 
442 	spin_lock(&vm_contig_spin);
443 	for (;;) {
444 		bfree = alist_free_info(&vm_contig_alist, &blk, &count);
445 		if (bfree <= vm_dma_reserved / PAGE_SIZE)
446 			break;
447 		if (count == 0)
448 			break;
449 
450 		/*
451 		 * Figure out how much of the initial reserve we have to
452 		 * free in order to reach our target.
453 		 */
454 		bfree -= vm_dma_reserved / PAGE_SIZE;
455 		if (count > bfree) {
456 			blk += count - bfree;
457 			count = bfree;
458 		}
459 
460 		/*
461 		 * Calculate the nearest power of 2 <= count.
462 		 */
463 		for (xcount = 1; xcount <= count; xcount <<= 1)
464 			;
465 		xcount >>= 1;
466 		blk += count - xcount;
467 		count = xcount;
468 
469 		/*
470 		 * Allocate the pages from the alist, then free them to
471 		 * the normal VM page queues.
472 		 *
473 		 * Pages allocated from the alist are wired.  We have to
474 		 * busy, unwire, and free them.  We must also adjust
475 		 * vm_low_phys_reserved before freeing any pages to prevent
476 		 * confusion.
477 		 */
478 		rblk = alist_alloc(&vm_contig_alist, blk, count);
479 		if (rblk != blk) {
480 			kprintf("vm_page_startup_finish: Unable to return "
481 				"dma space @0x%08x/%d -> 0x%08x\n",
482 				blk, count, rblk);
483 			break;
484 		}
485 		atomic_add_int(&vmstats.v_dma_pages, -count);
486 		spin_unlock(&vm_contig_spin);
487 
488 		m = PHYS_TO_VM_PAGE((vm_paddr_t)blk << PAGE_SHIFT);
489 		vm_low_phys_reserved = VM_PAGE_TO_PHYS(m);
490 		while (count) {
491 			vm_page_busy_wait(m, FALSE, "cpgfr");
492 			vm_page_unwire(m, 0);
493 			vm_page_free(m);
494 			--count;
495 			++m;
496 		}
497 		spin_lock(&vm_contig_spin);
498 	}
499 	spin_unlock(&vm_contig_spin);
500 
501 	/*
502 	 * Print out how much DMA space drivers have already allocated and
503 	 * how much is left over.
504 	 */
505 	kprintf("DMA space used: %jdk, remaining available: %jdk\n",
506 		(intmax_t)(vmstats.v_dma_pages - vm_contig_alist.bl_free) *
507 		(PAGE_SIZE / 1024),
508 		(intmax_t)vm_contig_alist.bl_free * (PAGE_SIZE / 1024));
509 }
510 SYSINIT(vm_pgend, SI_SUB_PROC0_POST, SI_ORDER_ANY,
511 	vm_page_startup_finish, NULL);
512 
513 
514 /*
515  * Scan comparison function for Red-Black tree scans.  An inclusive
516  * (start,end) is expected.  Other fields are not used.
517  */
518 int
519 rb_vm_page_scancmp(struct vm_page *p, void *data)
520 {
521 	struct rb_vm_page_scan_info *info = data;
522 
523 	if (p->pindex < info->start_pindex)
524 		return(-1);
525 	if (p->pindex > info->end_pindex)
526 		return(1);
527 	return(0);
528 }
529 
530 int
531 rb_vm_page_compare(struct vm_page *p1, struct vm_page *p2)
532 {
533 	if (p1->pindex < p2->pindex)
534 		return(-1);
535 	if (p1->pindex > p2->pindex)
536 		return(1);
537 	return(0);
538 }
539 
540 void
541 vm_page_init(vm_page_t m)
542 {
543 	/* do nothing for now.  Called from pmap_page_init() */
544 }
545 
546 /*
547  * Each page queue has its own spin lock, which is fairly optimal for
548  * allocating and freeing pages at least.
549  *
550  * The caller must hold the vm_page_spin_lock() before locking a vm_page's
551  * queue spinlock via this function.  Also note that m->queue cannot change
552  * unless both the page and queue are locked.
553  */
554 static __inline
555 void
556 _vm_page_queue_spin_lock(vm_page_t m)
557 {
558 	u_short queue;
559 
560 	queue = m->queue;
561 	if (queue != PQ_NONE) {
562 		spin_lock(&vm_page_queues[queue].spin);
563 		KKASSERT(queue == m->queue);
564 	}
565 }
566 
567 static __inline
568 void
569 _vm_page_queue_spin_unlock(vm_page_t m)
570 {
571 	u_short queue;
572 
573 	queue = m->queue;
574 	cpu_ccfence();
575 	if (queue != PQ_NONE)
576 		spin_unlock(&vm_page_queues[queue].spin);
577 }
578 
579 static __inline
580 void
581 _vm_page_queues_spin_lock(u_short queue)
582 {
583 	cpu_ccfence();
584 	if (queue != PQ_NONE)
585 		spin_lock(&vm_page_queues[queue].spin);
586 }
587 
588 
589 static __inline
590 void
591 _vm_page_queues_spin_unlock(u_short queue)
592 {
593 	cpu_ccfence();
594 	if (queue != PQ_NONE)
595 		spin_unlock(&vm_page_queues[queue].spin);
596 }
597 
598 void
599 vm_page_queue_spin_lock(vm_page_t m)
600 {
601 	_vm_page_queue_spin_lock(m);
602 }
603 
604 void
605 vm_page_queues_spin_lock(u_short queue)
606 {
607 	_vm_page_queues_spin_lock(queue);
608 }
609 
610 void
611 vm_page_queue_spin_unlock(vm_page_t m)
612 {
613 	_vm_page_queue_spin_unlock(m);
614 }
615 
616 void
617 vm_page_queues_spin_unlock(u_short queue)
618 {
619 	_vm_page_queues_spin_unlock(queue);
620 }
621 
622 /*
623  * This locks the specified vm_page and its queue in the proper order
624  * (page first, then queue).  The queue may change so the caller must
625  * recheck on return.
626  */
627 static __inline
628 void
629 _vm_page_and_queue_spin_lock(vm_page_t m)
630 {
631 	vm_page_spin_lock(m);
632 	_vm_page_queue_spin_lock(m);
633 }
634 
635 static __inline
636 void
637 _vm_page_and_queue_spin_unlock(vm_page_t m)
638 {
639 	_vm_page_queues_spin_unlock(m->queue);
640 	vm_page_spin_unlock(m);
641 }
642 
643 void
644 vm_page_and_queue_spin_unlock(vm_page_t m)
645 {
646 	_vm_page_and_queue_spin_unlock(m);
647 }
648 
649 void
650 vm_page_and_queue_spin_lock(vm_page_t m)
651 {
652 	_vm_page_and_queue_spin_lock(m);
653 }
654 
655 /*
656  * Helper function removes vm_page from its current queue.
657  * Returns the base queue the page used to be on.
658  *
659  * The vm_page and the queue must be spinlocked.
660  * This function will unlock the queue but leave the page spinlocked.
661  */
662 static __inline u_short
663 _vm_page_rem_queue_spinlocked(vm_page_t m)
664 {
665 	struct vpgqueues *pq;
666 	u_short queue;
667 	u_short oqueue;
668 
669 	queue = m->queue;
670 	if (queue != PQ_NONE) {
671 		pq = &vm_page_queues[queue];
672 		TAILQ_REMOVE(&pq->pl, m, pageq);
673 		atomic_add_int(pq->cnt, -1);
674 		pq->lcnt--;
675 		m->queue = PQ_NONE;
676 		oqueue = queue;
677 		if ((queue - m->pc) == PQ_CACHE || (queue - m->pc) == PQ_FREE)
678 			queue -= m->pc;
679 		vm_page_queues_spin_unlock(oqueue);	/* intended */
680 	}
681 	return queue;
682 }
683 
684 /*
685  * Helper function places the vm_page on the specified queue.
686  *
687  * The vm_page must be spinlocked.
688  * This function will return with both the page and the queue locked.
689  */
690 static __inline void
691 _vm_page_add_queue_spinlocked(vm_page_t m, u_short queue, int athead)
692 {
693 	struct vpgqueues *pq;
694 
695 	KKASSERT(m->queue == PQ_NONE);
696 
697 	if (queue != PQ_NONE) {
698 		vm_page_queues_spin_lock(queue);
699 		pq = &vm_page_queues[queue];
700 		++pq->lcnt;
701 		atomic_add_int(pq->cnt, 1);
702 		m->queue = queue;
703 
704 		/*
705 		 * PQ_FREE is always handled LIFO style to try to provide
706 		 * cache-hot pages to programs.
707 		 */
708 		if (queue - m->pc == PQ_FREE) {
709 			TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
710 		} else if (athead) {
711 			TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
712 		} else {
713 			TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
714 		}
715 		/* leave the queue spinlocked */
716 	}
717 }
718 
719 /*
720  * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
721  * m->busy is zero.  Returns TRUE if it had to sleep, FALSE if we
722  * did not.  Only one sleep call will be made before returning.
723  *
724  * This function does NOT busy the page and on return the page is not
725  * guaranteed to be available.
726  */
727 void
728 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
729 {
730 	u_int32_t flags;
731 
732 	for (;;) {
733 		flags = m->flags;
734 		cpu_ccfence();
735 
736 		if ((flags & PG_BUSY) == 0 &&
737 		    (also_m_busy == 0 || (flags & PG_SBUSY) == 0)) {
738 			break;
739 		}
740 		tsleep_interlock(m, 0);
741 		if (atomic_cmpset_int(&m->flags, flags,
742 				      flags | PG_WANTED | PG_REFERENCED)) {
743 			tsleep(m, PINTERLOCKED, msg, 0);
744 			break;
745 		}
746 	}
747 }
748 
749 /*
750  * This calculates and returns a page color given an optional VM object and
751  * either a pindex or an iterator.  We attempt to return a cpu-localized
752  * pg_color that is still roughly 16-way set-associative.  The CPU topology
753  * is used if it was probed.
754  *
755  * The caller may use the returned value to index into e.g. PQ_FREE when
756  * allocating a page in order to nominally obtain pages that are hopefully
757  * already localized to the requesting cpu.  This function is not able to
758  * provide any sort of guarantee of this, but does its best to improve
759  * hardware cache management performance.
760  *
761  * WARNING! The caller must mask the returned value with PQ_L2_MASK.
762  */
763 u_short
764 vm_get_pg_color(globaldata_t gd, vm_object_t object, vm_pindex_t pindex)
765 {
766 	u_short pg_color;
767 	int phys_id;
768 	int core_id;
769 	int object_pg_color;
770 
771 	phys_id = get_cpu_phys_id(gd->gd_cpuid);
772 	core_id = get_cpu_core_id(gd->gd_cpuid);
773 	object_pg_color = object ? object->pg_color : 0;
774 
775 	if (cpu_topology_phys_ids && cpu_topology_core_ids) {
776 		int grpsize = PQ_L2_SIZE / cpu_topology_phys_ids;
777 
778 		if (grpsize / cpu_topology_core_ids >= PQ_SET_ASSOC) {
779 			/*
780 			 * Enough space for a full break-down.
781 			 */
782 			pg_color = phys_id * grpsize;
783 			pg_color += core_id * grpsize / cpu_topology_core_ids;
784 			pg_color += (pindex + object_pg_color) %
785 				    (grpsize / cpu_topology_core_ids);
786 		} else {
787 			/*
788 			 * Not enough space, split up by physical package,
789 			 * then split up by core id but only down to a
790 			 * 16-set.  If all else fails, force a 16-set.
791 			 */
792 			pg_color = phys_id * grpsize;
793 			if (grpsize > 16) {
794 				pg_color += 16 * (core_id % (grpsize / 16));
795 				grpsize = 16;
796 			} else {
797 				grpsize = 16;
798 			}
799 			pg_color += (pindex + object_pg_color) %
800 				    grpsize;
801 		}
802 	} else {
803 		/*
804 		 * Unknown topology, distribute things evenly.
805 		 */
806 		pg_color = gd->gd_cpuid * PQ_L2_SIZE / ncpus;
807 		pg_color += pindex + object_pg_color;
808 	}
809 	return pg_color;
810 }
811 
812 /*
813  * Wait until PG_BUSY can be set, then set it.  If also_m_busy is TRUE we
814  * also wait for m->busy to become 0 before setting PG_BUSY.
815  */
816 void
817 VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m,
818 				     int also_m_busy, const char *msg
819 				     VM_PAGE_DEBUG_ARGS)
820 {
821 	u_int32_t flags;
822 
823 	for (;;) {
824 		flags = m->flags;
825 		cpu_ccfence();
826 		if (flags & PG_BUSY) {
827 			tsleep_interlock(m, 0);
828 			if (atomic_cmpset_int(&m->flags, flags,
829 					  flags | PG_WANTED | PG_REFERENCED)) {
830 				tsleep(m, PINTERLOCKED, msg, 0);
831 			}
832 		} else if (also_m_busy && (flags & PG_SBUSY)) {
833 			tsleep_interlock(m, 0);
834 			if (atomic_cmpset_int(&m->flags, flags,
835 					  flags | PG_WANTED | PG_REFERENCED)) {
836 				tsleep(m, PINTERLOCKED, msg, 0);
837 			}
838 		} else {
839 			if (atomic_cmpset_int(&m->flags, flags,
840 					      flags | PG_BUSY)) {
841 #ifdef VM_PAGE_DEBUG
842 				m->busy_func = func;
843 				m->busy_line = lineno;
844 #endif
845 				break;
846 			}
847 		}
848 	}
849 }
850 
851 /*
852  * Attempt to set PG_BUSY.  If also_m_busy is TRUE we only succeed if m->busy
853  * is also 0.
854  *
855  * Returns non-zero on failure.
856  */
857 int
858 VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m, int also_m_busy
859 				    VM_PAGE_DEBUG_ARGS)
860 {
861 	u_int32_t flags;
862 
863 	for (;;) {
864 		flags = m->flags;
865 		cpu_ccfence();
866 		if (flags & PG_BUSY)
867 			return TRUE;
868 		if (also_m_busy && (flags & PG_SBUSY))
869 			return TRUE;
870 		if (atomic_cmpset_int(&m->flags, flags, flags | PG_BUSY)) {
871 #ifdef VM_PAGE_DEBUG
872 				m->busy_func = func;
873 				m->busy_line = lineno;
874 #endif
875 			return FALSE;
876 		}
877 	}
878 }
879 
880 /*
881  * Clear the PG_BUSY flag and return non-zero to indicate to the caller
882  * that a wakeup() should be performed.
883  *
884  * The vm_page must be spinlocked and will remain spinlocked on return.
885  * The related queue must NOT be spinlocked (which could deadlock us).
886  *
887  * (inline version)
888  */
889 static __inline
890 int
891 _vm_page_wakeup(vm_page_t m)
892 {
893 	u_int32_t flags;
894 
895 	for (;;) {
896 		flags = m->flags;
897 		cpu_ccfence();
898 		if (atomic_cmpset_int(&m->flags, flags,
899 				      flags & ~(PG_BUSY | PG_WANTED))) {
900 			break;
901 		}
902 	}
903 	return(flags & PG_WANTED);
904 }
905 
906 /*
907  * Clear the PG_BUSY flag and wakeup anyone waiting for the page.  This
908  * is typically the last call you make on a page before moving onto
909  * other things.
910  */
911 void
912 vm_page_wakeup(vm_page_t m)
913 {
914         KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
915 	vm_page_spin_lock(m);
916 	if (_vm_page_wakeup(m)) {
917 		vm_page_spin_unlock(m);
918 		wakeup(m);
919 	} else {
920 		vm_page_spin_unlock(m);
921 	}
922 }
923 
924 /*
925  * Holding a page keeps it from being reused.  Other parts of the system
926  * can still disassociate the page from its current object and free it, or
927  * perform read or write I/O on it and/or otherwise manipulate the page,
928  * but if the page is held the VM system will leave the page and its data
929  * intact and not reuse the page for other purposes until the last hold
930  * reference is released.  (see vm_page_wire() if you want to prevent the
931  * page from being disassociated from its object too).
932  *
933  * The caller must still validate the contents of the page and, if necessary,
934  * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete
935  * before manipulating the page.
936  *
937  * XXX get vm_page_spin_lock() here and move FREE->HOLD if necessary
938  */
939 void
940 vm_page_hold(vm_page_t m)
941 {
942 	vm_page_spin_lock(m);
943 	atomic_add_int(&m->hold_count, 1);
944 	if (m->queue - m->pc == PQ_FREE) {
945 		_vm_page_queue_spin_lock(m);
946 		_vm_page_rem_queue_spinlocked(m);
947 		_vm_page_add_queue_spinlocked(m, PQ_HOLD + m->pc, 0);
948 		_vm_page_queue_spin_unlock(m);
949 	}
950 	vm_page_spin_unlock(m);
951 }
952 
953 /*
954  * The opposite of vm_page_hold().  If the page is on the HOLD queue
955  * it was freed while held and must be moved back to the FREE queue.
956  */
957 void
958 vm_page_unhold(vm_page_t m)
959 {
960 	KASSERT(m->hold_count > 0 && m->queue - m->pc != PQ_FREE,
961 		("vm_page_unhold: pg %p illegal hold_count (%d) or on FREE queue (%d)",
962 		 m, m->hold_count, m->queue - m->pc));
963 	vm_page_spin_lock(m);
964 	atomic_add_int(&m->hold_count, -1);
965 	if (m->hold_count == 0 && m->queue - m->pc == PQ_HOLD) {
966 		_vm_page_queue_spin_lock(m);
967 		_vm_page_rem_queue_spinlocked(m);
968 		_vm_page_add_queue_spinlocked(m, PQ_FREE + m->pc, 0);
969 		_vm_page_queue_spin_unlock(m);
970 	}
971 	vm_page_spin_unlock(m);
972 }
973 
974 /*
975  *	vm_page_getfake:
976  *
977  *	Create a fictitious page with the specified physical address and
978  *	memory attribute.  The memory attribute is the only the machine-
979  *	dependent aspect of a fictitious page that must be initialized.
980  */
981 
982 void
983 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
984 {
985 
986 	if ((m->flags & PG_FICTITIOUS) != 0) {
987 		/*
988 		 * The page's memattr might have changed since the
989 		 * previous initialization.  Update the pmap to the
990 		 * new memattr.
991 		 */
992 		goto memattr;
993 	}
994 	m->phys_addr = paddr;
995 	m->queue = PQ_NONE;
996 	/* Fictitious pages don't use "segind". */
997 	/* Fictitious pages don't use "order" or "pool". */
998 	m->flags = PG_FICTITIOUS | PG_UNMANAGED | PG_BUSY;
999 	m->wire_count = 1;
1000 	pmap_page_init(m);
1001 memattr:
1002 	pmap_page_set_memattr(m, memattr);
1003 }
1004 
1005 /*
1006  * Inserts the given vm_page into the object and object list.
1007  *
1008  * The pagetables are not updated but will presumably fault the page
1009  * in if necessary, or if a kernel page the caller will at some point
1010  * enter the page into the kernel's pmap.  We are not allowed to block
1011  * here so we *can't* do this anyway.
1012  *
1013  * This routine may not block.
1014  * This routine must be called with the vm_object held.
1015  * This routine must be called with a critical section held.
1016  *
1017  * This routine returns TRUE if the page was inserted into the object
1018  * successfully, and FALSE if the page already exists in the object.
1019  */
1020 int
1021 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
1022 {
1023 	ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(object));
1024 	if (m->object != NULL)
1025 		panic("vm_page_insert: already inserted");
1026 
1027 	object->generation++;
1028 
1029 	/*
1030 	 * Record the object/offset pair in this page and add the
1031 	 * pv_list_count of the page to the object.
1032 	 *
1033 	 * The vm_page spin lock is required for interactions with the pmap.
1034 	 */
1035 	vm_page_spin_lock(m);
1036 	m->object = object;
1037 	m->pindex = pindex;
1038 	if (vm_page_rb_tree_RB_INSERT(&object->rb_memq, m)) {
1039 		m->object = NULL;
1040 		m->pindex = 0;
1041 		vm_page_spin_unlock(m);
1042 		return FALSE;
1043 	}
1044 	++object->resident_page_count;
1045 	++mycpu->gd_vmtotal.t_rm;
1046 	/* atomic_add_int(&object->agg_pv_list_count, m->md.pv_list_count); */
1047 	vm_page_spin_unlock(m);
1048 
1049 	/*
1050 	 * Since we are inserting a new and possibly dirty page,
1051 	 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
1052 	 */
1053 	if ((m->valid & m->dirty) ||
1054 	    (m->flags & (PG_WRITEABLE | PG_NEED_COMMIT)))
1055 		vm_object_set_writeable_dirty(object);
1056 
1057 	/*
1058 	 * Checks for a swap assignment and sets PG_SWAPPED if appropriate.
1059 	 */
1060 	swap_pager_page_inserted(m);
1061 	return TRUE;
1062 }
1063 
1064 /*
1065  * Removes the given vm_page_t from the (object,index) table
1066  *
1067  * The underlying pmap entry (if any) is NOT removed here.
1068  * This routine may not block.
1069  *
1070  * The page must be BUSY and will remain BUSY on return.
1071  * No other requirements.
1072  *
1073  * NOTE: FreeBSD side effect was to unbusy the page on return.  We leave
1074  *	 it busy.
1075  */
1076 void
1077 vm_page_remove(vm_page_t m)
1078 {
1079 	vm_object_t object;
1080 
1081 	if (m->object == NULL) {
1082 		return;
1083 	}
1084 
1085 	if ((m->flags & PG_BUSY) == 0)
1086 		panic("vm_page_remove: page not busy");
1087 
1088 	object = m->object;
1089 
1090 	vm_object_hold(object);
1091 
1092 	/*
1093 	 * Remove the page from the object and update the object.
1094 	 *
1095 	 * The vm_page spin lock is required for interactions with the pmap.
1096 	 */
1097 	vm_page_spin_lock(m);
1098 	vm_page_rb_tree_RB_REMOVE(&object->rb_memq, m);
1099 	--object->resident_page_count;
1100 	--mycpu->gd_vmtotal.t_rm;
1101 	/* atomic_add_int(&object->agg_pv_list_count, -m->md.pv_list_count); */
1102 	m->object = NULL;
1103 	vm_page_spin_unlock(m);
1104 
1105 	object->generation++;
1106 
1107 	vm_object_drop(object);
1108 }
1109 
1110 /*
1111  * Locate and return the page at (object, pindex), or NULL if the
1112  * page could not be found.
1113  *
1114  * The caller must hold the vm_object token.
1115  */
1116 vm_page_t
1117 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1118 {
1119 	vm_page_t m;
1120 
1121 	/*
1122 	 * Search the hash table for this object/offset pair
1123 	 */
1124 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1125 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
1126 	KKASSERT(m == NULL || (m->object == object && m->pindex == pindex));
1127 	return(m);
1128 }
1129 
1130 vm_page_t
1131 VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)(struct vm_object *object,
1132 					    vm_pindex_t pindex,
1133 					    int also_m_busy, const char *msg
1134 					    VM_PAGE_DEBUG_ARGS)
1135 {
1136 	u_int32_t flags;
1137 	vm_page_t m;
1138 
1139 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1140 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
1141 	while (m) {
1142 		KKASSERT(m->object == object && m->pindex == pindex);
1143 		flags = m->flags;
1144 		cpu_ccfence();
1145 		if (flags & PG_BUSY) {
1146 			tsleep_interlock(m, 0);
1147 			if (atomic_cmpset_int(&m->flags, flags,
1148 					  flags | PG_WANTED | PG_REFERENCED)) {
1149 				tsleep(m, PINTERLOCKED, msg, 0);
1150 				m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq,
1151 							      pindex);
1152 			}
1153 		} else if (also_m_busy && (flags & PG_SBUSY)) {
1154 			tsleep_interlock(m, 0);
1155 			if (atomic_cmpset_int(&m->flags, flags,
1156 					  flags | PG_WANTED | PG_REFERENCED)) {
1157 				tsleep(m, PINTERLOCKED, msg, 0);
1158 				m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq,
1159 							      pindex);
1160 			}
1161 		} else if (atomic_cmpset_int(&m->flags, flags,
1162 					     flags | PG_BUSY)) {
1163 #ifdef VM_PAGE_DEBUG
1164 			m->busy_func = func;
1165 			m->busy_line = lineno;
1166 #endif
1167 			break;
1168 		}
1169 	}
1170 	return m;
1171 }
1172 
1173 /*
1174  * Attempt to lookup and busy a page.
1175  *
1176  * Returns NULL if the page could not be found
1177  *
1178  * Returns a vm_page and error == TRUE if the page exists but could not
1179  * be busied.
1180  *
1181  * Returns a vm_page and error == FALSE on success.
1182  */
1183 vm_page_t
1184 VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)(struct vm_object *object,
1185 					   vm_pindex_t pindex,
1186 					   int also_m_busy, int *errorp
1187 					   VM_PAGE_DEBUG_ARGS)
1188 {
1189 	u_int32_t flags;
1190 	vm_page_t m;
1191 
1192 	ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1193 	m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
1194 	*errorp = FALSE;
1195 	while (m) {
1196 		KKASSERT(m->object == object && m->pindex == pindex);
1197 		flags = m->flags;
1198 		cpu_ccfence();
1199 		if (flags & PG_BUSY) {
1200 			*errorp = TRUE;
1201 			break;
1202 		}
1203 		if (also_m_busy && (flags & PG_SBUSY)) {
1204 			*errorp = TRUE;
1205 			break;
1206 		}
1207 		if (atomic_cmpset_int(&m->flags, flags, flags | PG_BUSY)) {
1208 #ifdef VM_PAGE_DEBUG
1209 			m->busy_func = func;
1210 			m->busy_line = lineno;
1211 #endif
1212 			break;
1213 		}
1214 	}
1215 	return m;
1216 }
1217 
1218 /*
1219  * Attempt to repurpose the passed-in page.  If the passed-in page cannot
1220  * be repurposed it will be released, *must_reenter will be set to 1, and
1221  * this function will fall-through to vm_page_lookup_busy_try().
1222  *
1223  * The passed-in page must be wired and not busy.  The returned page will
1224  * be busied and not wired.
1225  *
1226  * A different page may be returned.  The returned page will be busied and
1227  * not wired.
1228  *
1229  * NULL can be returned.  If so, the required page could not be busied.
1230  * The passed-in page will be unwired.
1231  */
1232 vm_page_t
1233 vm_page_repurpose(struct vm_object *object, vm_pindex_t pindex,
1234 		  int also_m_busy, int *errorp, vm_page_t m,
1235 		  int *must_reenter, int *iswired)
1236 {
1237 	if (m) {
1238 		vm_page_busy_wait(m, TRUE, "biodep");
1239 		if ((m->flags & (PG_UNMANAGED | PG_MAPPED | PG_FICTITIOUS)) ||
1240 		    m->busy || m->wire_count != 1 || m->hold_count) {
1241 			vm_page_unwire(m, 0);
1242 			vm_page_wakeup(m);
1243 			/* fall through to normal lookup */
1244 		} else if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
1245 			vm_page_unwire(m, 0);
1246 			vm_page_deactivate(m);
1247 			vm_page_wakeup(m);
1248 			/* fall through to normal lookup */
1249 		} else {
1250 			/*
1251 			 * We can safely repurpose the page.  It should
1252 			 * already be unqueued.
1253 			 */
1254 			KKASSERT(m->queue == PQ_NONE && m->dirty == 0);
1255 			vm_page_remove(m);
1256 			m->valid = 0;
1257 			m->act_count = 0;
1258 			if (vm_page_insert(m, object, pindex)) {
1259 				*errorp = 0;
1260 				*iswired = 1;
1261 
1262 				return m;
1263 			}
1264 			vm_page_unwire(m, 0);
1265 			vm_page_free(m);
1266 			/* fall through to normal lookup */
1267 		}
1268 	}
1269 	*must_reenter = 1;
1270 	*iswired = 0;
1271 	m = vm_page_lookup_busy_try(object, pindex, also_m_busy, errorp);
1272 
1273 	return m;
1274 }
1275 
1276 /*
1277  * Caller must hold the related vm_object
1278  */
1279 vm_page_t
1280 vm_page_next(vm_page_t m)
1281 {
1282 	vm_page_t next;
1283 
1284 	next = vm_page_rb_tree_RB_NEXT(m);
1285 	if (next && next->pindex != m->pindex + 1)
1286 		next = NULL;
1287 	return (next);
1288 }
1289 
1290 /*
1291  * vm_page_rename()
1292  *
1293  * Move the given vm_page from its current object to the specified
1294  * target object/offset.  The page must be busy and will remain so
1295  * on return.
1296  *
1297  * new_object must be held.
1298  * This routine might block. XXX ?
1299  *
1300  * NOTE: Swap associated with the page must be invalidated by the move.  We
1301  *       have to do this for several reasons:  (1) we aren't freeing the
1302  *       page, (2) we are dirtying the page, (3) the VM system is probably
1303  *       moving the page from object A to B, and will then later move
1304  *       the backing store from A to B and we can't have a conflict.
1305  *
1306  * NOTE: We *always* dirty the page.  It is necessary both for the
1307  *       fact that we moved it, and because we may be invalidating
1308  *	 swap.  If the page is on the cache, we have to deactivate it
1309  *	 or vm_page_dirty() will panic.  Dirty pages are not allowed
1310  *	 on the cache.
1311  */
1312 void
1313 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1314 {
1315 	KKASSERT(m->flags & PG_BUSY);
1316 	ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(new_object));
1317 	if (m->object) {
1318 		ASSERT_LWKT_TOKEN_HELD_EXCL(vm_object_token(m->object));
1319 		vm_page_remove(m);
1320 	}
1321 	if (vm_page_insert(m, new_object, new_pindex) == FALSE) {
1322 		panic("vm_page_rename: target exists (%p,%"PRIu64")",
1323 		      new_object, new_pindex);
1324 	}
1325 	if (m->queue - m->pc == PQ_CACHE)
1326 		vm_page_deactivate(m);
1327 	vm_page_dirty(m);
1328 }
1329 
1330 /*
1331  * vm_page_unqueue() without any wakeup.  This routine is used when a page
1332  * is to remain BUSYied by the caller.
1333  *
1334  * This routine may not block.
1335  */
1336 void
1337 vm_page_unqueue_nowakeup(vm_page_t m)
1338 {
1339 	vm_page_and_queue_spin_lock(m);
1340 	(void)_vm_page_rem_queue_spinlocked(m);
1341 	vm_page_spin_unlock(m);
1342 }
1343 
1344 /*
1345  * vm_page_unqueue() - Remove a page from its queue, wakeup the pagedemon
1346  * if necessary.
1347  *
1348  * This routine may not block.
1349  */
1350 void
1351 vm_page_unqueue(vm_page_t m)
1352 {
1353 	u_short queue;
1354 
1355 	vm_page_and_queue_spin_lock(m);
1356 	queue = _vm_page_rem_queue_spinlocked(m);
1357 	if (queue == PQ_FREE || queue == PQ_CACHE) {
1358 		vm_page_spin_unlock(m);
1359 		pagedaemon_wakeup();
1360 	} else {
1361 		vm_page_spin_unlock(m);
1362 	}
1363 }
1364 
1365 /*
1366  * vm_page_list_find()
1367  *
1368  * Find a page on the specified queue with color optimization.
1369  *
1370  * The page coloring optimization attempts to locate a page that does
1371  * not overload other nearby pages in the object in the cpu's L1 or L2
1372  * caches.  We need this optimization because cpu caches tend to be
1373  * physical caches, while object spaces tend to be virtual.
1374  *
1375  * The page coloring optimization also, very importantly, tries to localize
1376  * memory to cpus and physical sockets.
1377  *
1378  * On MP systems each PQ_FREE and PQ_CACHE color queue has its own spinlock
1379  * and the algorithm is adjusted to localize allocations on a per-core basis.
1380  * This is done by 'twisting' the colors.
1381  *
1382  * The page is returned spinlocked and removed from its queue (it will
1383  * be on PQ_NONE), or NULL. The page is not PG_BUSY'd.  The caller
1384  * is responsible for dealing with the busy-page case (usually by
1385  * deactivating the page and looping).
1386  *
1387  * NOTE:  This routine is carefully inlined.  A non-inlined version
1388  *	  is available for outside callers but the only critical path is
1389  *	  from within this source file.
1390  *
1391  * NOTE:  This routine assumes that the vm_pages found in PQ_CACHE and PQ_FREE
1392  *	  represent stable storage, allowing us to order our locks vm_page
1393  *	  first, then queue.
1394  */
1395 static __inline
1396 vm_page_t
1397 _vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
1398 {
1399 	vm_page_t m;
1400 
1401 	for (;;) {
1402 		if (prefer_zero) {
1403 			m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl,
1404 				       pglist);
1405 		} else {
1406 			m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
1407 		}
1408 		if (m == NULL) {
1409 			m = _vm_page_list_find2(basequeue, index);
1410 			return(m);
1411 		}
1412 		vm_page_and_queue_spin_lock(m);
1413 		if (m->queue == basequeue + index) {
1414 			_vm_page_rem_queue_spinlocked(m);
1415 			/* vm_page_t spin held, no queue spin */
1416 			break;
1417 		}
1418 		vm_page_and_queue_spin_unlock(m);
1419 	}
1420 	return(m);
1421 }
1422 
1423 /*
1424  * If we could not find the page in the desired queue try to find it in
1425  * a nearby queue.
1426  */
1427 static vm_page_t
1428 _vm_page_list_find2(int basequeue, int index)
1429 {
1430 	struct vpgqueues *pq;
1431 	vm_page_t m = NULL;
1432 	int pqmask = PQ_SET_ASSOC_MASK >> 1;
1433 	int pqi;
1434 	int i;
1435 
1436 	index &= PQ_L2_MASK;
1437 	pq = &vm_page_queues[basequeue];
1438 
1439 	/*
1440 	 * Run local sets of 16, 32, 64, 128, and the whole queue if all
1441 	 * else fails (PQ_L2_MASK which is 255).
1442 	 */
1443 	do {
1444 		pqmask = (pqmask << 1) | 1;
1445 		for (i = 0; i <= pqmask; ++i) {
1446 			pqi = (index & ~pqmask) | ((index + i) & pqmask);
1447 			m = TAILQ_FIRST(&pq[pqi].pl);
1448 			if (m) {
1449 				_vm_page_and_queue_spin_lock(m);
1450 				if (m->queue == basequeue + pqi) {
1451 					_vm_page_rem_queue_spinlocked(m);
1452 					return(m);
1453 				}
1454 				_vm_page_and_queue_spin_unlock(m);
1455 				--i;
1456 				continue;
1457 			}
1458 		}
1459 	} while (pqmask != PQ_L2_MASK);
1460 
1461 	return(m);
1462 }
1463 
1464 /*
1465  * Returns a vm_page candidate for allocation.  The page is not busied so
1466  * it can move around.  The caller must busy the page (and typically
1467  * deactivate it if it cannot be busied!)
1468  *
1469  * Returns a spinlocked vm_page that has been removed from its queue.
1470  */
1471 vm_page_t
1472 vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
1473 {
1474 	return(_vm_page_list_find(basequeue, index, prefer_zero));
1475 }
1476 
1477 /*
1478  * Find a page on the cache queue with color optimization, remove it
1479  * from the queue, and busy it.  The returned page will not be spinlocked.
1480  *
1481  * A candidate failure will be deactivated.  Candidates can fail due to
1482  * being busied by someone else, in which case they will be deactivated.
1483  *
1484  * This routine may not block.
1485  *
1486  */
1487 static vm_page_t
1488 vm_page_select_cache(u_short pg_color)
1489 {
1490 	vm_page_t m;
1491 
1492 	for (;;) {
1493 		m = _vm_page_list_find(PQ_CACHE, pg_color & PQ_L2_MASK, FALSE);
1494 		if (m == NULL)
1495 			break;
1496 		/*
1497 		 * (m) has been removed from its queue and spinlocked
1498 		 */
1499 		if (vm_page_busy_try(m, TRUE)) {
1500 			_vm_page_deactivate_locked(m, 0);
1501 			vm_page_spin_unlock(m);
1502 		} else {
1503 			/*
1504 			 * We successfully busied the page
1505 			 */
1506 			if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) == 0 &&
1507 			    m->hold_count == 0 &&
1508 			    m->wire_count == 0 &&
1509 			    (m->dirty & m->valid) == 0) {
1510 				vm_page_spin_unlock(m);
1511 				pagedaemon_wakeup();
1512 				return(m);
1513 			}
1514 
1515 			/*
1516 			 * The page cannot be recycled, deactivate it.
1517 			 */
1518 			_vm_page_deactivate_locked(m, 0);
1519 			if (_vm_page_wakeup(m)) {
1520 				vm_page_spin_unlock(m);
1521 				wakeup(m);
1522 			} else {
1523 				vm_page_spin_unlock(m);
1524 			}
1525 		}
1526 	}
1527 	return (m);
1528 }
1529 
1530 /*
1531  * Find a free or zero page, with specified preference.  We attempt to
1532  * inline the nominal case and fall back to _vm_page_select_free()
1533  * otherwise.  A busied page is removed from the queue and returned.
1534  *
1535  * This routine may not block.
1536  */
1537 static __inline vm_page_t
1538 vm_page_select_free(u_short pg_color, boolean_t prefer_zero)
1539 {
1540 	vm_page_t m;
1541 
1542 	for (;;) {
1543 		m = _vm_page_list_find(PQ_FREE, pg_color & PQ_L2_MASK,
1544 				       prefer_zero);
1545 		if (m == NULL)
1546 			break;
1547 		if (vm_page_busy_try(m, TRUE)) {
1548 			/*
1549 			 * Various mechanisms such as a pmap_collect can
1550 			 * result in a busy page on the free queue.  We
1551 			 * have to move the page out of the way so we can
1552 			 * retry the allocation.  If the other thread is not
1553 			 * allocating the page then m->valid will remain 0 and
1554 			 * the pageout daemon will free the page later on.
1555 			 *
1556 			 * Since we could not busy the page, however, we
1557 			 * cannot make assumptions as to whether the page
1558 			 * will be allocated by the other thread or not,
1559 			 * so all we can do is deactivate it to move it out
1560 			 * of the way.  In particular, if the other thread
1561 			 * wires the page it may wind up on the inactive
1562 			 * queue and the pageout daemon will have to deal
1563 			 * with that case too.
1564 			 */
1565 			_vm_page_deactivate_locked(m, 0);
1566 			vm_page_spin_unlock(m);
1567 		} else {
1568 			/*
1569 			 * Theoretically if we are able to busy the page
1570 			 * atomic with the queue removal (using the vm_page
1571 			 * lock) nobody else should be able to mess with the
1572 			 * page before us.
1573 			 */
1574 			KKASSERT((m->flags & (PG_UNMANAGED |
1575 					      PG_NEED_COMMIT)) == 0);
1576 			KASSERT(m->hold_count == 0, ("m->hold_count is not zero "
1577 						     "pg %p q=%d flags=%08x hold=%d wire=%d",
1578 						     m, m->queue, m->flags, m->hold_count, m->wire_count));
1579 			KKASSERT(m->wire_count == 0);
1580 			vm_page_spin_unlock(m);
1581 			pagedaemon_wakeup();
1582 
1583 			/* return busied and removed page */
1584 			return(m);
1585 		}
1586 	}
1587 	return(m);
1588 }
1589 
1590 /*
1591  * vm_page_alloc()
1592  *
1593  * Allocate and return a memory cell associated with this VM object/offset
1594  * pair.  If object is NULL an unassociated page will be allocated.
1595  *
1596  * The returned page will be busied and removed from its queues.  This
1597  * routine can block and may return NULL if a race occurs and the page
1598  * is found to already exist at the specified (object, pindex).
1599  *
1600  *	VM_ALLOC_NORMAL		allow use of cache pages, nominal free drain
1601  *	VM_ALLOC_QUICK		like normal but cannot use cache
1602  *	VM_ALLOC_SYSTEM		greater free drain
1603  *	VM_ALLOC_INTERRUPT	allow free list to be completely drained
1604  *	VM_ALLOC_ZERO		advisory request for pre-zero'd page only
1605  *	VM_ALLOC_FORCE_ZERO	advisory request for pre-zero'd page only
1606  *	VM_ALLOC_NULL_OK	ok to return NULL on insertion collision
1607  *				(see vm_page_grab())
1608  *	VM_ALLOC_USE_GD		ok to use per-gd cache
1609  *
1610  * The object must be held if not NULL
1611  * This routine may not block
1612  *
1613  * Additional special handling is required when called from an interrupt
1614  * (VM_ALLOC_INTERRUPT).  We are not allowed to mess with the page cache
1615  * in this case.
1616  */
1617 vm_page_t
1618 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
1619 {
1620 	globaldata_t gd = mycpu;
1621 	vm_object_t obj;
1622 	vm_page_t m;
1623 	u_short pg_color;
1624 
1625 #if 0
1626 	/*
1627 	 * Special per-cpu free VM page cache.  The pages are pre-busied
1628 	 * and pre-zerod for us.
1629 	 */
1630 	if (gd->gd_vmpg_count && (page_req & VM_ALLOC_USE_GD)) {
1631 		crit_enter_gd(gd);
1632 		if (gd->gd_vmpg_count) {
1633 			m = gd->gd_vmpg_array[--gd->gd_vmpg_count];
1634 			crit_exit_gd(gd);
1635 			goto done;
1636                 }
1637 		crit_exit_gd(gd);
1638         }
1639 #endif
1640 	m = NULL;
1641 
1642 	/*
1643 	 * CPU LOCALIZATION
1644 	 *
1645 	 * CPU localization algorithm.  Break the page queues up by physical
1646 	 * id and core id (note that two cpu threads will have the same core
1647 	 * id, and core_id != gd_cpuid).
1648 	 *
1649 	 * This is nowhere near perfect, for example the last pindex in a
1650 	 * subgroup will overflow into the next cpu or package.  But this
1651 	 * should get us good page reuse locality in heavy mixed loads.
1652 	 */
1653 	pg_color = vm_get_pg_color(gd, object, pindex);
1654 
1655 	KKASSERT(page_req &
1656 		(VM_ALLOC_NORMAL|VM_ALLOC_QUICK|
1657 		 VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
1658 
1659 	/*
1660 	 * Certain system threads (pageout daemon, buf_daemon's) are
1661 	 * allowed to eat deeper into the free page list.
1662 	 */
1663 	if (curthread->td_flags & TDF_SYSTHREAD)
1664 		page_req |= VM_ALLOC_SYSTEM;
1665 
1666 	/*
1667 	 * Impose various limitations.  Note that the v_free_reserved test
1668 	 * must match the opposite of vm_page_count_target() to avoid
1669 	 * livelocks, be careful.
1670 	 */
1671 loop:
1672 	if (vmstats.v_free_count >= vmstats.v_free_reserved ||
1673 	    ((page_req & VM_ALLOC_INTERRUPT) && vmstats.v_free_count > 0) ||
1674 	    ((page_req & VM_ALLOC_SYSTEM) && vmstats.v_cache_count == 0 &&
1675 		vmstats.v_free_count > vmstats.v_interrupt_free_min)
1676 	) {
1677 		/*
1678 		 * The free queue has sufficient free pages to take one out.
1679 		 */
1680 		if (page_req & (VM_ALLOC_ZERO | VM_ALLOC_FORCE_ZERO))
1681 			m = vm_page_select_free(pg_color, TRUE);
1682 		else
1683 			m = vm_page_select_free(pg_color, FALSE);
1684 	} else if (page_req & VM_ALLOC_NORMAL) {
1685 		/*
1686 		 * Allocatable from the cache (non-interrupt only).  On
1687 		 * success, we must free the page and try again, thus
1688 		 * ensuring that vmstats.v_*_free_min counters are replenished.
1689 		 */
1690 #ifdef INVARIANTS
1691 		if (curthread->td_preempted) {
1692 			kprintf("vm_page_alloc(): warning, attempt to allocate"
1693 				" cache page from preempting interrupt\n");
1694 			m = NULL;
1695 		} else {
1696 			m = vm_page_select_cache(pg_color);
1697 		}
1698 #else
1699 		m = vm_page_select_cache(pg_color);
1700 #endif
1701 		/*
1702 		 * On success move the page into the free queue and loop.
1703 		 *
1704 		 * Only do this if we can safely acquire the vm_object lock,
1705 		 * because this is effectively a random page and the caller
1706 		 * might be holding the lock shared, we don't want to
1707 		 * deadlock.
1708 		 */
1709 		if (m != NULL) {
1710 			KASSERT(m->dirty == 0,
1711 				("Found dirty cache page %p", m));
1712 			if ((obj = m->object) != NULL) {
1713 				if (vm_object_hold_try(obj)) {
1714 					vm_page_protect(m, VM_PROT_NONE);
1715 					vm_page_free(m);
1716 					/* m->object NULL here */
1717 					vm_object_drop(obj);
1718 				} else {
1719 					vm_page_deactivate(m);
1720 					vm_page_wakeup(m);
1721 				}
1722 			} else {
1723 				vm_page_protect(m, VM_PROT_NONE);
1724 				vm_page_free(m);
1725 			}
1726 			goto loop;
1727 		}
1728 
1729 		/*
1730 		 * On failure return NULL
1731 		 */
1732 #if defined(DIAGNOSTIC)
1733 		if (vmstats.v_cache_count > 0)
1734 			kprintf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", vmstats.v_cache_count);
1735 #endif
1736 		vm_pageout_deficit++;
1737 		pagedaemon_wakeup();
1738 		return (NULL);
1739 	} else {
1740 		/*
1741 		 * No pages available, wakeup the pageout daemon and give up.
1742 		 */
1743 		vm_pageout_deficit++;
1744 		pagedaemon_wakeup();
1745 		return (NULL);
1746 	}
1747 
1748 	/*
1749 	 * v_free_count can race so loop if we don't find the expected
1750 	 * page.
1751 	 */
1752 	if (m == NULL)
1753 		goto loop;
1754 
1755 	/*
1756 	 * Good page found.  The page has already been busied for us and
1757 	 * removed from its queues.
1758 	 */
1759 	KASSERT(m->dirty == 0,
1760 		("vm_page_alloc: free/cache page %p was dirty", m));
1761 	KKASSERT(m->queue == PQ_NONE);
1762 
1763 #if 0
1764 done:
1765 #endif
1766 	/*
1767 	 * Initialize the structure, inheriting some flags but clearing
1768 	 * all the rest.  The page has already been busied for us.
1769 	 */
1770 	vm_page_flag_clear(m, ~(PG_BUSY | PG_SBUSY));
1771 	KKASSERT(m->wire_count == 0);
1772 	KKASSERT(m->busy == 0);
1773 	m->act_count = 0;
1774 	m->valid = 0;
1775 
1776 	/*
1777 	 * Caller must be holding the object lock (asserted by
1778 	 * vm_page_insert()).
1779 	 *
1780 	 * NOTE: Inserting a page here does not insert it into any pmaps
1781 	 *	 (which could cause us to block allocating memory).
1782 	 *
1783 	 * NOTE: If no object an unassociated page is allocated, m->pindex
1784 	 *	 can be used by the caller for any purpose.
1785 	 */
1786 	if (object) {
1787 		if (vm_page_insert(m, object, pindex) == FALSE) {
1788 			vm_page_free(m);
1789 			if ((page_req & VM_ALLOC_NULL_OK) == 0)
1790 				panic("PAGE RACE %p[%ld]/%p",
1791 				      object, (long)pindex, m);
1792 			m = NULL;
1793 		}
1794 	} else {
1795 		m->pindex = pindex;
1796 	}
1797 
1798 	/*
1799 	 * Don't wakeup too often - wakeup the pageout daemon when
1800 	 * we would be nearly out of memory.
1801 	 */
1802 	pagedaemon_wakeup();
1803 
1804 	/*
1805 	 * A PG_BUSY page is returned.
1806 	 */
1807 	return (m);
1808 }
1809 
1810 /*
1811  * Returns number of pages available in our DMA memory reserve
1812  * (adjusted with vm.dma_reserved=<value>m in /boot/loader.conf)
1813  */
1814 vm_size_t
1815 vm_contig_avail_pages(void)
1816 {
1817 	alist_blk_t blk;
1818 	alist_blk_t count;
1819 	alist_blk_t bfree;
1820 	spin_lock(&vm_contig_spin);
1821 	bfree = alist_free_info(&vm_contig_alist, &blk, &count);
1822 	spin_unlock(&vm_contig_spin);
1823 
1824 	return bfree;
1825 }
1826 
1827 /*
1828  * Attempt to allocate contiguous physical memory with the specified
1829  * requirements.
1830  */
1831 vm_page_t
1832 vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high,
1833 		     unsigned long alignment, unsigned long boundary,
1834 		     unsigned long size, vm_memattr_t memattr)
1835 {
1836 	alist_blk_t blk;
1837 	vm_page_t m;
1838 	int i;
1839 
1840 	alignment >>= PAGE_SHIFT;
1841 	if (alignment == 0)
1842 		alignment = 1;
1843 	boundary >>= PAGE_SHIFT;
1844 	if (boundary == 0)
1845 		boundary = 1;
1846 	size = (size + PAGE_MASK) >> PAGE_SHIFT;
1847 
1848 	spin_lock(&vm_contig_spin);
1849 	blk = alist_alloc(&vm_contig_alist, 0, size);
1850 	if (blk == ALIST_BLOCK_NONE) {
1851 		spin_unlock(&vm_contig_spin);
1852 		if (bootverbose) {
1853 			kprintf("vm_page_alloc_contig: %ldk nospace\n",
1854 				(size + PAGE_MASK) * (PAGE_SIZE / 1024));
1855 		}
1856 		return(NULL);
1857 	}
1858 	if (high && ((vm_paddr_t)(blk + size) << PAGE_SHIFT) > high) {
1859 		alist_free(&vm_contig_alist, blk, size);
1860 		spin_unlock(&vm_contig_spin);
1861 		if (bootverbose) {
1862 			kprintf("vm_page_alloc_contig: %ldk high "
1863 				"%016jx failed\n",
1864 				(size + PAGE_MASK) * (PAGE_SIZE / 1024),
1865 				(intmax_t)high);
1866 		}
1867 		return(NULL);
1868 	}
1869 	spin_unlock(&vm_contig_spin);
1870 	if (vm_contig_verbose) {
1871 		kprintf("vm_page_alloc_contig: %016jx/%ldk\n",
1872 			(intmax_t)(vm_paddr_t)blk << PAGE_SHIFT,
1873 			(size + PAGE_MASK) * (PAGE_SIZE / 1024));
1874 	}
1875 
1876 	m = PHYS_TO_VM_PAGE((vm_paddr_t)blk << PAGE_SHIFT);
1877 	if (memattr != VM_MEMATTR_DEFAULT)
1878 		for (i = 0;i < size;i++)
1879 			pmap_page_set_memattr(&m[i], memattr);
1880 	return m;
1881 }
1882 
1883 /*
1884  * Free contiguously allocated pages.  The pages will be wired but not busy.
1885  * When freeing to the alist we leave them wired and not busy.
1886  */
1887 void
1888 vm_page_free_contig(vm_page_t m, unsigned long size)
1889 {
1890 	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1891 	vm_pindex_t start = pa >> PAGE_SHIFT;
1892 	vm_pindex_t pages = (size + PAGE_MASK) >> PAGE_SHIFT;
1893 
1894 	if (vm_contig_verbose) {
1895 		kprintf("vm_page_free_contig:  %016jx/%ldk\n",
1896 			(intmax_t)pa, size / 1024);
1897 	}
1898 	if (pa < vm_low_phys_reserved) {
1899 		KKASSERT(pa + size <= vm_low_phys_reserved);
1900 		spin_lock(&vm_contig_spin);
1901 		alist_free(&vm_contig_alist, start, pages);
1902 		spin_unlock(&vm_contig_spin);
1903 	} else {
1904 		while (pages) {
1905 			vm_page_busy_wait(m, FALSE, "cpgfr");
1906 			vm_page_unwire(m, 0);
1907 			vm_page_free(m);
1908 			--pages;
1909 			++m;
1910 		}
1911 
1912 	}
1913 }
1914 
1915 
1916 /*
1917  * Wait for sufficient free memory for nominal heavy memory use kernel
1918  * operations.
1919  *
1920  * WARNING!  Be sure never to call this in any vm_pageout code path, which
1921  *	     will trivially deadlock the system.
1922  */
1923 void
1924 vm_wait_nominal(void)
1925 {
1926 	while (vm_page_count_min(0))
1927 		vm_wait(0);
1928 }
1929 
1930 /*
1931  * Test if vm_wait_nominal() would block.
1932  */
1933 int
1934 vm_test_nominal(void)
1935 {
1936 	if (vm_page_count_min(0))
1937 		return(1);
1938 	return(0);
1939 }
1940 
1941 /*
1942  * Block until free pages are available for allocation, called in various
1943  * places before memory allocations.
1944  *
1945  * The caller may loop if vm_page_count_min() == FALSE so we cannot be
1946  * more generous then that.
1947  */
1948 void
1949 vm_wait(int timo)
1950 {
1951 	/*
1952 	 * never wait forever
1953 	 */
1954 	if (timo == 0)
1955 		timo = hz;
1956 	lwkt_gettoken(&vm_token);
1957 
1958 	if (curthread == pagethread) {
1959 		/*
1960 		 * The pageout daemon itself needs pages, this is bad.
1961 		 */
1962 		if (vm_page_count_min(0)) {
1963 			vm_pageout_pages_needed = 1;
1964 			tsleep(&vm_pageout_pages_needed, 0, "VMWait", timo);
1965 		}
1966 	} else {
1967 		/*
1968 		 * Wakeup the pageout daemon if necessary and wait.
1969 		 *
1970 		 * Do not wait indefinitely for the target to be reached,
1971 		 * as load might prevent it from being reached any time soon.
1972 		 * But wait a little to try to slow down page allocations
1973 		 * and to give more important threads (the pagedaemon)
1974 		 * allocation priority.
1975 		 */
1976 		if (vm_page_count_target()) {
1977 			if (vm_pages_needed == 0) {
1978 				vm_pages_needed = 1;
1979 				wakeup(&vm_pages_needed);
1980 			}
1981 			++vm_pages_waiting;	/* SMP race ok */
1982 			tsleep(&vmstats.v_free_count, 0, "vmwait", timo);
1983 		}
1984 	}
1985 	lwkt_reltoken(&vm_token);
1986 }
1987 
1988 /*
1989  * Block until free pages are available for allocation
1990  *
1991  * Called only from vm_fault so that processes page faulting can be
1992  * easily tracked.
1993  */
1994 void
1995 vm_wait_pfault(void)
1996 {
1997 	/*
1998 	 * Wakeup the pageout daemon if necessary and wait.
1999 	 *
2000 	 * Do not wait indefinitely for the target to be reached,
2001 	 * as load might prevent it from being reached any time soon.
2002 	 * But wait a little to try to slow down page allocations
2003 	 * and to give more important threads (the pagedaemon)
2004 	 * allocation priority.
2005 	 */
2006 	if (vm_page_count_min(0)) {
2007 		lwkt_gettoken(&vm_token);
2008 		while (vm_page_count_severe()) {
2009 			if (vm_page_count_target()) {
2010 				thread_t td;
2011 
2012 				if (vm_pages_needed == 0) {
2013 					vm_pages_needed = 1;
2014 					wakeup(&vm_pages_needed);
2015 				}
2016 				++vm_pages_waiting;	/* SMP race ok */
2017 				tsleep(&vmstats.v_free_count, 0, "pfault", hz);
2018 
2019 				/*
2020 				 * Do not stay stuck in the loop if the system is trying
2021 				 * to kill the process.
2022 				 */
2023 				td = curthread;
2024 				if (td->td_proc && (td->td_proc->p_flags & P_LOWMEMKILL))
2025 					break;
2026 			}
2027 		}
2028 		lwkt_reltoken(&vm_token);
2029 	}
2030 }
2031 
2032 /*
2033  * Put the specified page on the active list (if appropriate).  Ensure
2034  * that act_count is at least ACT_INIT but do not otherwise mess with it.
2035  *
2036  * The caller should be holding the page busied ? XXX
2037  * This routine may not block.
2038  */
2039 void
2040 vm_page_activate(vm_page_t m)
2041 {
2042 	u_short oqueue;
2043 
2044 	vm_page_spin_lock(m);
2045 	if (m->queue - m->pc != PQ_ACTIVE) {
2046 		_vm_page_queue_spin_lock(m);
2047 		oqueue = _vm_page_rem_queue_spinlocked(m);
2048 		/* page is left spinlocked, queue is unlocked */
2049 
2050 		if (oqueue == PQ_CACHE)
2051 			mycpu->gd_cnt.v_reactivated++;
2052 		if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
2053 			if (m->act_count < ACT_INIT)
2054 				m->act_count = ACT_INIT;
2055 			_vm_page_add_queue_spinlocked(m, PQ_ACTIVE + m->pc, 0);
2056 		}
2057 		_vm_page_and_queue_spin_unlock(m);
2058 		if (oqueue == PQ_CACHE || oqueue == PQ_FREE)
2059 			pagedaemon_wakeup();
2060 	} else {
2061 		if (m->act_count < ACT_INIT)
2062 			m->act_count = ACT_INIT;
2063 		vm_page_spin_unlock(m);
2064 	}
2065 }
2066 
2067 /*
2068  * Helper routine for vm_page_free_toq() and vm_page_cache().  This
2069  * routine is called when a page has been added to the cache or free
2070  * queues.
2071  *
2072  * This routine may not block.
2073  */
2074 static __inline void
2075 vm_page_free_wakeup(void)
2076 {
2077 	/*
2078 	 * If the pageout daemon itself needs pages, then tell it that
2079 	 * there are some free.
2080 	 */
2081 	if (vm_pageout_pages_needed &&
2082 	    vmstats.v_cache_count + vmstats.v_free_count >=
2083 	    vmstats.v_pageout_free_min
2084 	) {
2085 		vm_pageout_pages_needed = 0;
2086 		wakeup(&vm_pageout_pages_needed);
2087 	}
2088 
2089 	/*
2090 	 * Wakeup processes that are waiting on memory.
2091 	 *
2092 	 * Generally speaking we want to wakeup stuck processes as soon as
2093 	 * possible.  !vm_page_count_min(0) is the absolute minimum point
2094 	 * where we can do this.  Wait a bit longer to reduce degenerate
2095 	 * re-blocking (vm_page_free_hysteresis).  The target check is just
2096 	 * to make sure the min-check w/hysteresis does not exceed the
2097 	 * normal target.
2098 	 */
2099 	if (vm_pages_waiting) {
2100 		if (!vm_page_count_min(vm_page_free_hysteresis) ||
2101 		    !vm_page_count_target()) {
2102 			vm_pages_waiting = 0;
2103 			wakeup(&vmstats.v_free_count);
2104 			++mycpu->gd_cnt.v_ppwakeups;
2105 		}
2106 #if 0
2107 		if (!vm_page_count_target()) {
2108 			/*
2109 			 * Plenty of pages are free, wakeup everyone.
2110 			 */
2111 			vm_pages_waiting = 0;
2112 			wakeup(&vmstats.v_free_count);
2113 			++mycpu->gd_cnt.v_ppwakeups;
2114 		} else if (!vm_page_count_min(0)) {
2115 			/*
2116 			 * Some pages are free, wakeup someone.
2117 			 */
2118 			int wcount = vm_pages_waiting;
2119 			if (wcount > 0)
2120 				--wcount;
2121 			vm_pages_waiting = wcount;
2122 			wakeup_one(&vmstats.v_free_count);
2123 			++mycpu->gd_cnt.v_ppwakeups;
2124 		}
2125 #endif
2126 	}
2127 }
2128 
2129 /*
2130  * Returns the given page to the PQ_FREE or PQ_HOLD list and disassociates
2131  * it from its VM object.
2132  *
2133  * The vm_page must be PG_BUSY on entry.  PG_BUSY will be released on
2134  * return (the page will have been freed).
2135  */
2136 void
2137 vm_page_free_toq(vm_page_t m)
2138 {
2139 	mycpu->gd_cnt.v_tfree++;
2140 	KKASSERT((m->flags & PG_MAPPED) == 0);
2141 	KKASSERT(m->flags & PG_BUSY);
2142 
2143 	if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
2144 		kprintf("vm_page_free: pindex(%lu), busy(%d), "
2145 			"PG_BUSY(%d), hold(%d)\n",
2146 			(u_long)m->pindex, m->busy,
2147 			((m->flags & PG_BUSY) ? 1 : 0), m->hold_count);
2148 		if ((m->queue - m->pc) == PQ_FREE)
2149 			panic("vm_page_free: freeing free page");
2150 		else
2151 			panic("vm_page_free: freeing busy page");
2152 	}
2153 
2154 	/*
2155 	 * Remove from object, spinlock the page and its queues and
2156 	 * remove from any queue.  No queue spinlock will be held
2157 	 * after this section (because the page was removed from any
2158 	 * queue).
2159 	 */
2160 	vm_page_remove(m);
2161 	vm_page_and_queue_spin_lock(m);
2162 	_vm_page_rem_queue_spinlocked(m);
2163 
2164 	/*
2165 	 * No further management of fictitious pages occurs beyond object
2166 	 * and queue removal.
2167 	 */
2168 	if ((m->flags & PG_FICTITIOUS) != 0) {
2169 		vm_page_spin_unlock(m);
2170 		vm_page_wakeup(m);
2171 		return;
2172 	}
2173 
2174 	m->valid = 0;
2175 	vm_page_undirty(m);
2176 
2177 	if (m->wire_count != 0) {
2178 		if (m->wire_count > 1) {
2179 		    panic(
2180 			"vm_page_free: invalid wire count (%d), pindex: 0x%lx",
2181 			m->wire_count, (long)m->pindex);
2182 		}
2183 		panic("vm_page_free: freeing wired page");
2184 	}
2185 
2186 	/*
2187 	 * Clear the UNMANAGED flag when freeing an unmanaged page.
2188 	 * Clear the NEED_COMMIT flag
2189 	 */
2190 	if (m->flags & PG_UNMANAGED)
2191 		vm_page_flag_clear(m, PG_UNMANAGED);
2192 	if (m->flags & PG_NEED_COMMIT)
2193 		vm_page_flag_clear(m, PG_NEED_COMMIT);
2194 
2195 	if (m->hold_count != 0) {
2196 		_vm_page_add_queue_spinlocked(m, PQ_HOLD + m->pc, 0);
2197 	} else {
2198 		_vm_page_add_queue_spinlocked(m, PQ_FREE + m->pc, 0);
2199 	}
2200 
2201 	/*
2202 	 * This sequence allows us to clear PG_BUSY while still holding
2203 	 * its spin lock, which reduces contention vs allocators.  We
2204 	 * must not leave the queue locked or _vm_page_wakeup() may
2205 	 * deadlock.
2206 	 */
2207 	_vm_page_queue_spin_unlock(m);
2208 	if (_vm_page_wakeup(m)) {
2209 		vm_page_spin_unlock(m);
2210 		wakeup(m);
2211 	} else {
2212 		vm_page_spin_unlock(m);
2213 	}
2214 	vm_page_free_wakeup();
2215 }
2216 
2217 /*
2218  * vm_page_unmanage()
2219  *
2220  * Prevent PV management from being done on the page.  The page is
2221  * removed from the paging queues as if it were wired, and as a
2222  * consequence of no longer being managed the pageout daemon will not
2223  * touch it (since there is no way to locate the pte mappings for the
2224  * page).  madvise() calls that mess with the pmap will also no longer
2225  * operate on the page.
2226  *
2227  * Beyond that the page is still reasonably 'normal'.  Freeing the page
2228  * will clear the flag.
2229  *
2230  * This routine is used by OBJT_PHYS objects - objects using unswappable
2231  * physical memory as backing store rather then swap-backed memory and
2232  * will eventually be extended to support 4MB unmanaged physical
2233  * mappings.
2234  *
2235  * Caller must be holding the page busy.
2236  */
2237 void
2238 vm_page_unmanage(vm_page_t m)
2239 {
2240 	KKASSERT(m->flags & PG_BUSY);
2241 	if ((m->flags & PG_UNMANAGED) == 0) {
2242 		if (m->wire_count == 0)
2243 			vm_page_unqueue(m);
2244 	}
2245 	vm_page_flag_set(m, PG_UNMANAGED);
2246 }
2247 
2248 /*
2249  * Mark this page as wired down by yet another map, removing it from
2250  * paging queues as necessary.
2251  *
2252  * Caller must be holding the page busy.
2253  */
2254 void
2255 vm_page_wire(vm_page_t m)
2256 {
2257 	/*
2258 	 * Only bump the wire statistics if the page is not already wired,
2259 	 * and only unqueue the page if it is on some queue (if it is unmanaged
2260 	 * it is already off the queues).  Don't do anything with fictitious
2261 	 * pages because they are always wired.
2262 	 */
2263 	KKASSERT(m->flags & PG_BUSY);
2264 	if ((m->flags & PG_FICTITIOUS) == 0) {
2265 		if (atomic_fetchadd_int(&m->wire_count, 1) == 0) {
2266 			if ((m->flags & PG_UNMANAGED) == 0)
2267 				vm_page_unqueue(m);
2268 			atomic_add_int(&vmstats.v_wire_count, 1);
2269 		}
2270 		KASSERT(m->wire_count != 0,
2271 			("vm_page_wire: wire_count overflow m=%p", m));
2272 	}
2273 }
2274 
2275 /*
2276  * Release one wiring of this page, potentially enabling it to be paged again.
2277  *
2278  * Many pages placed on the inactive queue should actually go
2279  * into the cache, but it is difficult to figure out which.  What
2280  * we do instead, if the inactive target is well met, is to put
2281  * clean pages at the head of the inactive queue instead of the tail.
2282  * This will cause them to be moved to the cache more quickly and
2283  * if not actively re-referenced, freed more quickly.  If we just
2284  * stick these pages at the end of the inactive queue, heavy filesystem
2285  * meta-data accesses can cause an unnecessary paging load on memory bound
2286  * processes.  This optimization causes one-time-use metadata to be
2287  * reused more quickly.
2288  *
2289  * Pages marked PG_NEED_COMMIT are always activated and never placed on
2290  * the inactive queue.  This helps the pageout daemon determine memory
2291  * pressure and act on out-of-memory situations more quickly.
2292  *
2293  * BUT, if we are in a low-memory situation we have no choice but to
2294  * put clean pages on the cache queue.
2295  *
2296  * A number of routines use vm_page_unwire() to guarantee that the page
2297  * will go into either the inactive or active queues, and will NEVER
2298  * be placed in the cache - for example, just after dirtying a page.
2299  * dirty pages in the cache are not allowed.
2300  *
2301  * This routine may not block.
2302  */
2303 void
2304 vm_page_unwire(vm_page_t m, int activate)
2305 {
2306 	KKASSERT(m->flags & PG_BUSY);
2307 	if (m->flags & PG_FICTITIOUS) {
2308 		/* do nothing */
2309 	} else if (m->wire_count <= 0) {
2310 		panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
2311 	} else {
2312 		if (atomic_fetchadd_int(&m->wire_count, -1) == 1) {
2313 			atomic_add_int(&vmstats.v_wire_count, -1);
2314 			if (m->flags & PG_UNMANAGED) {
2315 				;
2316 			} else if (activate || (m->flags & PG_NEED_COMMIT)) {
2317 				vm_page_spin_lock(m);
2318 				_vm_page_add_queue_spinlocked(m,
2319 							PQ_ACTIVE + m->pc, 0);
2320 				_vm_page_and_queue_spin_unlock(m);
2321 			} else {
2322 				vm_page_spin_lock(m);
2323 				vm_page_flag_clear(m, PG_WINATCFLS);
2324 				_vm_page_add_queue_spinlocked(m,
2325 							PQ_INACTIVE + m->pc, 0);
2326 				++vm_swapcache_inactive_heuristic;
2327 				_vm_page_and_queue_spin_unlock(m);
2328 			}
2329 		}
2330 	}
2331 }
2332 
2333 /*
2334  * Move the specified page to the inactive queue.  If the page has
2335  * any associated swap, the swap is deallocated.
2336  *
2337  * Normally athead is 0 resulting in LRU operation.  athead is set
2338  * to 1 if we want this page to be 'as if it were placed in the cache',
2339  * except without unmapping it from the process address space.
2340  *
2341  * vm_page's spinlock must be held on entry and will remain held on return.
2342  * This routine may not block.
2343  */
2344 static void
2345 _vm_page_deactivate_locked(vm_page_t m, int athead)
2346 {
2347 	u_short oqueue;
2348 
2349 	/*
2350 	 * Ignore if already inactive.
2351 	 */
2352 	if (m->queue - m->pc == PQ_INACTIVE)
2353 		return;
2354 	_vm_page_queue_spin_lock(m);
2355 	oqueue = _vm_page_rem_queue_spinlocked(m);
2356 
2357 	if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
2358 		if (oqueue == PQ_CACHE)
2359 			mycpu->gd_cnt.v_reactivated++;
2360 		vm_page_flag_clear(m, PG_WINATCFLS);
2361 		_vm_page_add_queue_spinlocked(m, PQ_INACTIVE + m->pc, athead);
2362 		if (athead == 0)
2363 			++vm_swapcache_inactive_heuristic;
2364 	}
2365 	/* NOTE: PQ_NONE if condition not taken */
2366 	_vm_page_queue_spin_unlock(m);
2367 	/* leaves vm_page spinlocked */
2368 }
2369 
2370 /*
2371  * Attempt to deactivate a page.
2372  *
2373  * No requirements.
2374  */
2375 void
2376 vm_page_deactivate(vm_page_t m)
2377 {
2378 	vm_page_spin_lock(m);
2379 	_vm_page_deactivate_locked(m, 0);
2380 	vm_page_spin_unlock(m);
2381 }
2382 
2383 void
2384 vm_page_deactivate_locked(vm_page_t m)
2385 {
2386 	_vm_page_deactivate_locked(m, 0);
2387 }
2388 
2389 /*
2390  * Attempt to move a page to PQ_CACHE.
2391  *
2392  * Returns 0 on failure, 1 on success
2393  *
2394  * The page should NOT be busied by the caller.  This function will validate
2395  * whether the page can be safely moved to the cache.
2396  */
2397 int
2398 vm_page_try_to_cache(vm_page_t m)
2399 {
2400 	vm_page_spin_lock(m);
2401 	if (vm_page_busy_try(m, TRUE)) {
2402 		vm_page_spin_unlock(m);
2403 		return(0);
2404 	}
2405 	if (m->dirty || m->hold_count || m->wire_count ||
2406 	    (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT))) {
2407 		if (_vm_page_wakeup(m)) {
2408 			vm_page_spin_unlock(m);
2409 			wakeup(m);
2410 		} else {
2411 			vm_page_spin_unlock(m);
2412 		}
2413 		return(0);
2414 	}
2415 	vm_page_spin_unlock(m);
2416 
2417 	/*
2418 	 * Page busied by us and no longer spinlocked.  Dirty pages cannot
2419 	 * be moved to the cache.
2420 	 */
2421 	vm_page_test_dirty(m);
2422 	if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2423 		vm_page_wakeup(m);
2424 		return(0);
2425 	}
2426 	vm_page_cache(m);
2427 	return(1);
2428 }
2429 
2430 /*
2431  * Attempt to free the page.  If we cannot free it, we do nothing.
2432  * 1 is returned on success, 0 on failure.
2433  *
2434  * No requirements.
2435  */
2436 int
2437 vm_page_try_to_free(vm_page_t m)
2438 {
2439 	vm_page_spin_lock(m);
2440 	if (vm_page_busy_try(m, TRUE)) {
2441 		vm_page_spin_unlock(m);
2442 		return(0);
2443 	}
2444 
2445 	/*
2446 	 * The page can be in any state, including already being on the free
2447 	 * queue.  Check to see if it really can be freed.
2448 	 */
2449 	if (m->dirty ||				/* can't free if it is dirty */
2450 	    m->hold_count ||			/* or held (XXX may be wrong) */
2451 	    m->wire_count ||			/* or wired */
2452 	    (m->flags & (PG_UNMANAGED |		/* or unmanaged */
2453 			 PG_NEED_COMMIT)) ||	/* or needs a commit */
2454 	    m->queue - m->pc == PQ_FREE ||	/* already on PQ_FREE */
2455 	    m->queue - m->pc == PQ_HOLD) {	/* already on PQ_HOLD */
2456 		if (_vm_page_wakeup(m)) {
2457 			vm_page_spin_unlock(m);
2458 			wakeup(m);
2459 		} else {
2460 			vm_page_spin_unlock(m);
2461 		}
2462 		return(0);
2463 	}
2464 	vm_page_spin_unlock(m);
2465 
2466 	/*
2467 	 * We can probably free the page.
2468 	 *
2469 	 * Page busied by us and no longer spinlocked.  Dirty pages will
2470 	 * not be freed by this function.    We have to re-test the
2471 	 * dirty bit after cleaning out the pmaps.
2472 	 */
2473 	vm_page_test_dirty(m);
2474 	if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2475 		vm_page_wakeup(m);
2476 		return(0);
2477 	}
2478 	vm_page_protect(m, VM_PROT_NONE);
2479 	if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2480 		vm_page_wakeup(m);
2481 		return(0);
2482 	}
2483 	vm_page_free(m);
2484 	return(1);
2485 }
2486 
2487 /*
2488  * vm_page_cache
2489  *
2490  * Put the specified page onto the page cache queue (if appropriate).
2491  *
2492  * The page must be busy, and this routine will release the busy and
2493  * possibly even free the page.
2494  */
2495 void
2496 vm_page_cache(vm_page_t m)
2497 {
2498 	if ((m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) ||
2499 	    m->busy || m->wire_count || m->hold_count) {
2500 		kprintf("vm_page_cache: attempting to cache busy/held page\n");
2501 		vm_page_wakeup(m);
2502 		return;
2503 	}
2504 
2505 	/*
2506 	 * Already in the cache (and thus not mapped)
2507 	 */
2508 	if ((m->queue - m->pc) == PQ_CACHE) {
2509 		KKASSERT((m->flags & PG_MAPPED) == 0);
2510 		vm_page_wakeup(m);
2511 		return;
2512 	}
2513 
2514 	/*
2515 	 * Caller is required to test m->dirty, but note that the act of
2516 	 * removing the page from its maps can cause it to become dirty
2517 	 * on an SMP system due to another cpu running in usermode.
2518 	 */
2519 	if (m->dirty) {
2520 		panic("vm_page_cache: caching a dirty page, pindex: %ld",
2521 			(long)m->pindex);
2522 	}
2523 
2524 	/*
2525 	 * Remove all pmaps and indicate that the page is not
2526 	 * writeable or mapped.  Our vm_page_protect() call may
2527 	 * have blocked (especially w/ VM_PROT_NONE), so recheck
2528 	 * everything.
2529 	 */
2530 	vm_page_protect(m, VM_PROT_NONE);
2531 	if ((m->flags & (PG_UNMANAGED | PG_MAPPED)) ||
2532 	    m->busy || m->wire_count || m->hold_count) {
2533 		vm_page_wakeup(m);
2534 	} else if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
2535 		vm_page_deactivate(m);
2536 		vm_page_wakeup(m);
2537 	} else {
2538 		_vm_page_and_queue_spin_lock(m);
2539 		_vm_page_rem_queue_spinlocked(m);
2540 		_vm_page_add_queue_spinlocked(m, PQ_CACHE + m->pc, 0);
2541 		_vm_page_queue_spin_unlock(m);
2542 		if (_vm_page_wakeup(m)) {
2543 			vm_page_spin_unlock(m);
2544 			wakeup(m);
2545 		} else {
2546 			vm_page_spin_unlock(m);
2547 		}
2548 		vm_page_free_wakeup();
2549 	}
2550 }
2551 
2552 /*
2553  * vm_page_dontneed()
2554  *
2555  * Cache, deactivate, or do nothing as appropriate.  This routine
2556  * is typically used by madvise() MADV_DONTNEED.
2557  *
2558  * Generally speaking we want to move the page into the cache so
2559  * it gets reused quickly.  However, this can result in a silly syndrome
2560  * due to the page recycling too quickly.  Small objects will not be
2561  * fully cached.  On the otherhand, if we move the page to the inactive
2562  * queue we wind up with a problem whereby very large objects
2563  * unnecessarily blow away our inactive and cache queues.
2564  *
2565  * The solution is to move the pages based on a fixed weighting.  We
2566  * either leave them alone, deactivate them, or move them to the cache,
2567  * where moving them to the cache has the highest weighting.
2568  * By forcing some pages into other queues we eventually force the
2569  * system to balance the queues, potentially recovering other unrelated
2570  * space from active.  The idea is to not force this to happen too
2571  * often.
2572  *
2573  * The page must be busied.
2574  */
2575 void
2576 vm_page_dontneed(vm_page_t m)
2577 {
2578 	static int dnweight;
2579 	int dnw;
2580 	int head;
2581 
2582 	dnw = ++dnweight;
2583 
2584 	/*
2585 	 * occassionally leave the page alone
2586 	 */
2587 	if ((dnw & 0x01F0) == 0 ||
2588 	    m->queue - m->pc == PQ_INACTIVE ||
2589 	    m->queue - m->pc == PQ_CACHE
2590 	) {
2591 		if (m->act_count >= ACT_INIT)
2592 			--m->act_count;
2593 		return;
2594 	}
2595 
2596 	/*
2597 	 * If vm_page_dontneed() is inactivating a page, it must clear
2598 	 * the referenced flag; otherwise the pagedaemon will see references
2599 	 * on the page in the inactive queue and reactivate it. Until the
2600 	 * page can move to the cache queue, madvise's job is not done.
2601 	 */
2602 	vm_page_flag_clear(m, PG_REFERENCED);
2603 	pmap_clear_reference(m);
2604 
2605 	if (m->dirty == 0)
2606 		vm_page_test_dirty(m);
2607 
2608 	if (m->dirty || (dnw & 0x0070) == 0) {
2609 		/*
2610 		 * Deactivate the page 3 times out of 32.
2611 		 */
2612 		head = 0;
2613 	} else {
2614 		/*
2615 		 * Cache the page 28 times out of every 32.  Note that
2616 		 * the page is deactivated instead of cached, but placed
2617 		 * at the head of the queue instead of the tail.
2618 		 */
2619 		head = 1;
2620 	}
2621 	vm_page_spin_lock(m);
2622 	_vm_page_deactivate_locked(m, head);
2623 	vm_page_spin_unlock(m);
2624 }
2625 
2626 /*
2627  * These routines manipulate the 'soft busy' count for a page.  A soft busy
2628  * is almost like PG_BUSY except that it allows certain compatible operations
2629  * to occur on the page while it is busy.  For example, a page undergoing a
2630  * write can still be mapped read-only.
2631  *
2632  * Because vm_pages can overlap buffers m->busy can be > 1.  m->busy is only
2633  * adjusted while the vm_page is PG_BUSY so the flash will occur when the
2634  * busy bit is cleared.
2635  */
2636 void
2637 vm_page_io_start(vm_page_t m)
2638 {
2639         KASSERT(m->flags & PG_BUSY, ("vm_page_io_start: page not busy!!!"));
2640         atomic_add_char(&m->busy, 1);
2641 	vm_page_flag_set(m, PG_SBUSY);
2642 }
2643 
2644 void
2645 vm_page_io_finish(vm_page_t m)
2646 {
2647         KASSERT(m->flags & PG_BUSY, ("vm_page_io_finish: page not busy!!!"));
2648         atomic_subtract_char(&m->busy, 1);
2649 	if (m->busy == 0)
2650 		vm_page_flag_clear(m, PG_SBUSY);
2651 }
2652 
2653 /*
2654  * Indicate that a clean VM page requires a filesystem commit and cannot
2655  * be reused.  Used by tmpfs.
2656  */
2657 void
2658 vm_page_need_commit(vm_page_t m)
2659 {
2660 	vm_page_flag_set(m, PG_NEED_COMMIT);
2661 	vm_object_set_writeable_dirty(m->object);
2662 }
2663 
2664 void
2665 vm_page_clear_commit(vm_page_t m)
2666 {
2667 	vm_page_flag_clear(m, PG_NEED_COMMIT);
2668 }
2669 
2670 /*
2671  * Grab a page, blocking if it is busy and allocating a page if necessary.
2672  * A busy page is returned or NULL.  The page may or may not be valid and
2673  * might not be on a queue (the caller is responsible for the disposition of
2674  * the page).
2675  *
2676  * If VM_ALLOC_ZERO is specified and the grab must allocate a new page, the
2677  * page will be zero'd and marked valid.
2678  *
2679  * If VM_ALLOC_FORCE_ZERO is specified the page will be zero'd and marked
2680  * valid even if it already exists.
2681  *
2682  * If VM_ALLOC_RETRY is specified this routine will never return NULL.  Also
2683  * note that VM_ALLOC_NORMAL must be specified if VM_ALLOC_RETRY is specified.
2684  * VM_ALLOC_NULL_OK is implied when VM_ALLOC_RETRY is specified.
2685  *
2686  * This routine may block, but if VM_ALLOC_RETRY is not set then NULL is
2687  * always returned if we had blocked.
2688  *
2689  * This routine may not be called from an interrupt.
2690  *
2691  * No other requirements.
2692  */
2693 vm_page_t
2694 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
2695 {
2696 	vm_page_t m;
2697 	int error;
2698 	int shared = 1;
2699 
2700 	KKASSERT(allocflags &
2701 		(VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
2702 	vm_object_hold_shared(object);
2703 	for (;;) {
2704 		m = vm_page_lookup_busy_try(object, pindex, TRUE, &error);
2705 		if (error) {
2706 			vm_page_sleep_busy(m, TRUE, "pgrbwt");
2707 			if ((allocflags & VM_ALLOC_RETRY) == 0) {
2708 				m = NULL;
2709 				break;
2710 			}
2711 			/* retry */
2712 		} else if (m == NULL) {
2713 			if (shared) {
2714 				vm_object_upgrade(object);
2715 				shared = 0;
2716 			}
2717 			if (allocflags & VM_ALLOC_RETRY)
2718 				allocflags |= VM_ALLOC_NULL_OK;
2719 			m = vm_page_alloc(object, pindex,
2720 					  allocflags & ~VM_ALLOC_RETRY);
2721 			if (m)
2722 				break;
2723 			vm_wait(0);
2724 			if ((allocflags & VM_ALLOC_RETRY) == 0)
2725 				goto failed;
2726 		} else {
2727 			/* m found */
2728 			break;
2729 		}
2730 	}
2731 
2732 	/*
2733 	 * If VM_ALLOC_ZERO an invalid page will be zero'd and set valid.
2734 	 *
2735 	 * If VM_ALLOC_FORCE_ZERO the page is unconditionally zero'd and set
2736 	 * valid even if already valid.
2737 	 *
2738 	 * NOTE!  We have removed all of the PG_ZERO optimizations and also
2739 	 *	  removed the idle zeroing code.  These optimizations actually
2740 	 *	  slow things down on modern cpus because the zerod area is
2741 	 *	  likely uncached, placing a memory-access burden on the
2742 	 *	  accesors taking the fault.
2743 	 *
2744 	 *	  By always zeroing the page in-line with the fault, no
2745 	 *	  dynamic ram reads are needed and the caches are hot, ready
2746 	 *	  for userland to access the memory.
2747 	 */
2748 	if (m->valid == 0) {
2749 		if (allocflags & (VM_ALLOC_ZERO | VM_ALLOC_FORCE_ZERO)) {
2750 			pmap_zero_page(VM_PAGE_TO_PHYS(m));
2751 			m->valid = VM_PAGE_BITS_ALL;
2752 		}
2753 	} else if (allocflags & VM_ALLOC_FORCE_ZERO) {
2754 		pmap_zero_page(VM_PAGE_TO_PHYS(m));
2755 		m->valid = VM_PAGE_BITS_ALL;
2756 	}
2757 failed:
2758 	vm_object_drop(object);
2759 	return(m);
2760 }
2761 
2762 /*
2763  * Mapping function for valid bits or for dirty bits in
2764  * a page.  May not block.
2765  *
2766  * Inputs are required to range within a page.
2767  *
2768  * No requirements.
2769  * Non blocking.
2770  */
2771 int
2772 vm_page_bits(int base, int size)
2773 {
2774 	int first_bit;
2775 	int last_bit;
2776 
2777 	KASSERT(
2778 	    base + size <= PAGE_SIZE,
2779 	    ("vm_page_bits: illegal base/size %d/%d", base, size)
2780 	);
2781 
2782 	if (size == 0)		/* handle degenerate case */
2783 		return(0);
2784 
2785 	first_bit = base >> DEV_BSHIFT;
2786 	last_bit = (base + size - 1) >> DEV_BSHIFT;
2787 
2788 	return ((2 << last_bit) - (1 << first_bit));
2789 }
2790 
2791 /*
2792  * Sets portions of a page valid and clean.  The arguments are expected
2793  * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2794  * of any partial chunks touched by the range.  The invalid portion of
2795  * such chunks will be zero'd.
2796  *
2797  * NOTE: When truncating a buffer vnode_pager_setsize() will automatically
2798  *	 align base to DEV_BSIZE so as not to mark clean a partially
2799  *	 truncated device block.  Otherwise the dirty page status might be
2800  *	 lost.
2801  *
2802  * This routine may not block.
2803  *
2804  * (base + size) must be less then or equal to PAGE_SIZE.
2805  */
2806 static void
2807 _vm_page_zero_valid(vm_page_t m, int base, int size)
2808 {
2809 	int frag;
2810 	int endoff;
2811 
2812 	if (size == 0)	/* handle degenerate case */
2813 		return;
2814 
2815 	/*
2816 	 * If the base is not DEV_BSIZE aligned and the valid
2817 	 * bit is clear, we have to zero out a portion of the
2818 	 * first block.
2819 	 */
2820 
2821 	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2822 	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
2823 	) {
2824 		pmap_zero_page_area(
2825 		    VM_PAGE_TO_PHYS(m),
2826 		    frag,
2827 		    base - frag
2828 		);
2829 	}
2830 
2831 	/*
2832 	 * If the ending offset is not DEV_BSIZE aligned and the
2833 	 * valid bit is clear, we have to zero out a portion of
2834 	 * the last block.
2835 	 */
2836 
2837 	endoff = base + size;
2838 
2839 	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2840 	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
2841 	) {
2842 		pmap_zero_page_area(
2843 		    VM_PAGE_TO_PHYS(m),
2844 		    endoff,
2845 		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
2846 		);
2847 	}
2848 }
2849 
2850 /*
2851  * Set valid, clear dirty bits.  If validating the entire
2852  * page we can safely clear the pmap modify bit.  We also
2853  * use this opportunity to clear the PG_NOSYNC flag.  If a process
2854  * takes a write fault on a MAP_NOSYNC memory area the flag will
2855  * be set again.
2856  *
2857  * We set valid bits inclusive of any overlap, but we can only
2858  * clear dirty bits for DEV_BSIZE chunks that are fully within
2859  * the range.
2860  *
2861  * Page must be busied?
2862  * No other requirements.
2863  */
2864 void
2865 vm_page_set_valid(vm_page_t m, int base, int size)
2866 {
2867 	_vm_page_zero_valid(m, base, size);
2868 	m->valid |= vm_page_bits(base, size);
2869 }
2870 
2871 
2872 /*
2873  * Set valid bits and clear dirty bits.
2874  *
2875  * NOTE: This function does not clear the pmap modified bit.
2876  *	 Also note that e.g. NFS may use a byte-granular base
2877  *	 and size.
2878  *
2879  * WARNING: Page must be busied?  But vfs_clean_one_page() will call
2880  *	    this without necessarily busying the page (via bdwrite()).
2881  *	    So for now vm_token must also be held.
2882  *
2883  * No other requirements.
2884  */
2885 void
2886 vm_page_set_validclean(vm_page_t m, int base, int size)
2887 {
2888 	int pagebits;
2889 
2890 	_vm_page_zero_valid(m, base, size);
2891 	pagebits = vm_page_bits(base, size);
2892 	m->valid |= pagebits;
2893 	m->dirty &= ~pagebits;
2894 	if (base == 0 && size == PAGE_SIZE) {
2895 		/*pmap_clear_modify(m);*/
2896 		vm_page_flag_clear(m, PG_NOSYNC);
2897 	}
2898 }
2899 
2900 /*
2901  * Set valid & dirty.  Used by buwrite()
2902  *
2903  * WARNING: Page must be busied?  But vfs_dirty_one_page() will
2904  *	    call this function in buwrite() so for now vm_token must
2905  *	    be held.
2906  *
2907  * No other requirements.
2908  */
2909 void
2910 vm_page_set_validdirty(vm_page_t m, int base, int size)
2911 {
2912 	int pagebits;
2913 
2914 	pagebits = vm_page_bits(base, size);
2915 	m->valid |= pagebits;
2916 	m->dirty |= pagebits;
2917 	if (m->object)
2918 	       vm_object_set_writeable_dirty(m->object);
2919 }
2920 
2921 /*
2922  * Clear dirty bits.
2923  *
2924  * NOTE: This function does not clear the pmap modified bit.
2925  *	 Also note that e.g. NFS may use a byte-granular base
2926  *	 and size.
2927  *
2928  * Page must be busied?
2929  * No other requirements.
2930  */
2931 void
2932 vm_page_clear_dirty(vm_page_t m, int base, int size)
2933 {
2934 	m->dirty &= ~vm_page_bits(base, size);
2935 	if (base == 0 && size == PAGE_SIZE) {
2936 		/*pmap_clear_modify(m);*/
2937 		vm_page_flag_clear(m, PG_NOSYNC);
2938 	}
2939 }
2940 
2941 /*
2942  * Make the page all-dirty.
2943  *
2944  * Also make sure the related object and vnode reflect the fact that the
2945  * object may now contain a dirty page.
2946  *
2947  * Page must be busied?
2948  * No other requirements.
2949  */
2950 void
2951 vm_page_dirty(vm_page_t m)
2952 {
2953 #ifdef INVARIANTS
2954         int pqtype = m->queue - m->pc;
2955 #endif
2956         KASSERT(pqtype != PQ_CACHE && pqtype != PQ_FREE,
2957                 ("vm_page_dirty: page in free/cache queue!"));
2958 	if (m->dirty != VM_PAGE_BITS_ALL) {
2959 		m->dirty = VM_PAGE_BITS_ALL;
2960 		if (m->object)
2961 			vm_object_set_writeable_dirty(m->object);
2962 	}
2963 }
2964 
2965 /*
2966  * Invalidates DEV_BSIZE'd chunks within a page.  Both the
2967  * valid and dirty bits for the effected areas are cleared.
2968  *
2969  * Page must be busied?
2970  * Does not block.
2971  * No other requirements.
2972  */
2973 void
2974 vm_page_set_invalid(vm_page_t m, int base, int size)
2975 {
2976 	int bits;
2977 
2978 	bits = vm_page_bits(base, size);
2979 	m->valid &= ~bits;
2980 	m->dirty &= ~bits;
2981 	m->object->generation++;
2982 }
2983 
2984 /*
2985  * The kernel assumes that the invalid portions of a page contain
2986  * garbage, but such pages can be mapped into memory by user code.
2987  * When this occurs, we must zero out the non-valid portions of the
2988  * page so user code sees what it expects.
2989  *
2990  * Pages are most often semi-valid when the end of a file is mapped
2991  * into memory and the file's size is not page aligned.
2992  *
2993  * Page must be busied?
2994  * No other requirements.
2995  */
2996 void
2997 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
2998 {
2999 	int b;
3000 	int i;
3001 
3002 	/*
3003 	 * Scan the valid bits looking for invalid sections that
3004 	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
3005 	 * valid bit may be set ) have already been zerod by
3006 	 * vm_page_set_validclean().
3007 	 */
3008 	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
3009 		if (i == (PAGE_SIZE / DEV_BSIZE) ||
3010 		    (m->valid & (1 << i))
3011 		) {
3012 			if (i > b) {
3013 				pmap_zero_page_area(
3014 				    VM_PAGE_TO_PHYS(m),
3015 				    b << DEV_BSHIFT,
3016 				    (i - b) << DEV_BSHIFT
3017 				);
3018 			}
3019 			b = i + 1;
3020 		}
3021 	}
3022 
3023 	/*
3024 	 * setvalid is TRUE when we can safely set the zero'd areas
3025 	 * as being valid.  We can do this if there are no cache consistency
3026 	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
3027 	 */
3028 	if (setvalid)
3029 		m->valid = VM_PAGE_BITS_ALL;
3030 }
3031 
3032 /*
3033  * Is a (partial) page valid?  Note that the case where size == 0
3034  * will return FALSE in the degenerate case where the page is entirely
3035  * invalid, and TRUE otherwise.
3036  *
3037  * Does not block.
3038  * No other requirements.
3039  */
3040 int
3041 vm_page_is_valid(vm_page_t m, int base, int size)
3042 {
3043 	int bits = vm_page_bits(base, size);
3044 
3045 	if (m->valid && ((m->valid & bits) == bits))
3046 		return 1;
3047 	else
3048 		return 0;
3049 }
3050 
3051 /*
3052  * update dirty bits from pmap/mmu.  May not block.
3053  *
3054  * Caller must hold the page busy
3055  */
3056 void
3057 vm_page_test_dirty(vm_page_t m)
3058 {
3059 	if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
3060 		vm_page_dirty(m);
3061 	}
3062 }
3063 
3064 /*
3065  * Register an action, associating it with its vm_page
3066  */
3067 void
3068 vm_page_register_action(vm_page_action_t action, vm_page_event_t event)
3069 {
3070 	struct vm_page_action_list *list;
3071 	int hv;
3072 
3073 	hv = (int)((intptr_t)action->m >> 8) & VMACTION_HMASK;
3074 	list = &action_list[hv];
3075 
3076 	lwkt_gettoken(&vm_token);
3077 	vm_page_flag_set(action->m, PG_ACTIONLIST);
3078 	action->event = event;
3079 	LIST_INSERT_HEAD(list, action, entry);
3080 	lwkt_reltoken(&vm_token);
3081 }
3082 
3083 /*
3084  * Unregister an action, disassociating it from its related vm_page
3085  */
3086 void
3087 vm_page_unregister_action(vm_page_action_t action)
3088 {
3089 	struct vm_page_action_list *list;
3090 	int hv;
3091 
3092 	lwkt_gettoken(&vm_token);
3093 	if (action->event != VMEVENT_NONE) {
3094 		action->event = VMEVENT_NONE;
3095 		LIST_REMOVE(action, entry);
3096 
3097 		hv = (int)((intptr_t)action->m >> 8) & VMACTION_HMASK;
3098 		list = &action_list[hv];
3099 		if (LIST_EMPTY(list))
3100 			vm_page_flag_clear(action->m, PG_ACTIONLIST);
3101 	}
3102 	lwkt_reltoken(&vm_token);
3103 }
3104 
3105 /*
3106  * Issue an event on a VM page.  Corresponding action structures are
3107  * removed from the page's list and called.
3108  *
3109  * If the vm_page has no more pending action events we clear its
3110  * PG_ACTIONLIST flag.
3111  */
3112 void
3113 vm_page_event_internal(vm_page_t m, vm_page_event_t event)
3114 {
3115 	struct vm_page_action_list *list;
3116 	struct vm_page_action *scan;
3117 	struct vm_page_action *next;
3118 	int hv;
3119 	int all;
3120 
3121 	hv = (int)((intptr_t)m >> 8) & VMACTION_HMASK;
3122 	list = &action_list[hv];
3123 	all = 1;
3124 
3125 	lwkt_gettoken(&vm_token);
3126 	LIST_FOREACH_MUTABLE(scan, list, entry, next) {
3127 		if (scan->m == m) {
3128 			if (scan->event == event) {
3129 				scan->event = VMEVENT_NONE;
3130 				LIST_REMOVE(scan, entry);
3131 				scan->func(m, scan);
3132 				/* XXX */
3133 			} else {
3134 				all = 0;
3135 			}
3136 		}
3137 	}
3138 	if (all)
3139 		vm_page_flag_clear(m, PG_ACTIONLIST);
3140 	lwkt_reltoken(&vm_token);
3141 }
3142 
3143 #include "opt_ddb.h"
3144 #ifdef DDB
3145 #include <sys/kernel.h>
3146 
3147 #include <ddb/ddb.h>
3148 
3149 DB_SHOW_COMMAND(page, vm_page_print_page_info)
3150 {
3151 	db_printf("vmstats.v_free_count: %d\n", vmstats.v_free_count);
3152 	db_printf("vmstats.v_cache_count: %d\n", vmstats.v_cache_count);
3153 	db_printf("vmstats.v_inactive_count: %d\n", vmstats.v_inactive_count);
3154 	db_printf("vmstats.v_active_count: %d\n", vmstats.v_active_count);
3155 	db_printf("vmstats.v_wire_count: %d\n", vmstats.v_wire_count);
3156 	db_printf("vmstats.v_free_reserved: %d\n", vmstats.v_free_reserved);
3157 	db_printf("vmstats.v_free_min: %d\n", vmstats.v_free_min);
3158 	db_printf("vmstats.v_free_target: %d\n", vmstats.v_free_target);
3159 	db_printf("vmstats.v_cache_min: %d\n", vmstats.v_cache_min);
3160 	db_printf("vmstats.v_inactive_target: %d\n", vmstats.v_inactive_target);
3161 }
3162 
3163 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
3164 {
3165 	int i;
3166 	db_printf("PQ_FREE:");
3167 	for(i=0;i<PQ_L2_SIZE;i++) {
3168 		db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
3169 	}
3170 	db_printf("\n");
3171 
3172 	db_printf("PQ_CACHE:");
3173 	for(i=0;i<PQ_L2_SIZE;i++) {
3174 		db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
3175 	}
3176 	db_printf("\n");
3177 
3178 	db_printf("PQ_ACTIVE:");
3179 	for(i=0;i<PQ_L2_SIZE;i++) {
3180 		db_printf(" %d", vm_page_queues[PQ_ACTIVE + i].lcnt);
3181 	}
3182 	db_printf("\n");
3183 
3184 	db_printf("PQ_INACTIVE:");
3185 	for(i=0;i<PQ_L2_SIZE;i++) {
3186 		db_printf(" %d", vm_page_queues[PQ_INACTIVE + i].lcnt);
3187 	}
3188 	db_printf("\n");
3189 }
3190 #endif /* DDB */
3191