xref: /openbsd/sys/uvm/uvm_page.c (revision 17df1aa7)
1 /*	$OpenBSD: uvm_page.c,v 1.100 2010/04/22 19:02:55 oga Exp $	*/
2 /*	$NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * Copyright (c) 1991, 1993, The Regents of the University of California.
7  *
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by Charles D. Cranor,
24  *      Washington University, the University of California, Berkeley and
25  *      its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
43  * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
44  *
45  *
46  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47  * All rights reserved.
48  *
49  * Permission to use, copy, modify and distribute this software and
50  * its documentation is hereby granted, provided that both the copyright
51  * notice and this permission notice appear in all copies of the
52  * software, derivative works or modified versions, and any portions
53  * thereof, and that both notices appear in supporting documentation.
54  *
55  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58  *
59  * Carnegie Mellon requests users of this software to return to
60  *
61  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62  *  School of Computer Science
63  *  Carnegie Mellon University
64  *  Pittsburgh PA 15213-3890
65  *
66  * any improvements or extensions that they make and grant Carnegie the
67  * rights to redistribute these changes.
68  */
69 
70 /*
71  * uvm_page.c: page ops.
72  */
73 
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/sched.h>
77 #include <sys/kernel.h>
78 #include <sys/vnode.h>
79 #include <sys/mount.h>
80 #include <sys/proc.h>
81 
82 #include <uvm/uvm.h>
83 
84 /*
85  * for object trees
86  */
87 RB_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp);
88 
89 int
90 uvm_pagecmp(struct vm_page *a, struct vm_page *b)
91 {
92 	return (a->offset < b->offset ? -1 : a->offset > b->offset);
93 }
94 
95 /*
96  * global vars... XXXCDC: move to uvm. structure.
97  */
98 
99 /*
100  * physical memory config is stored in vm_physmem.
101  */
102 
103 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
104 int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
105 
106 /*
107  * Some supported CPUs in a given architecture don't support all
108  * of the things necessary to do idle page zero'ing efficiently.
109  * We therefore provide a way to disable it from machdep code here.
110  */
111 
112 /*
113  * XXX disabled until we can find a way to do this without causing
114  * problems for either cpu caches or DMA latency.
115  */
116 boolean_t vm_page_zero_enable = FALSE;
117 
118 /*
119  * local variables
120  */
121 
122 /*
123  * these variables record the values returned by vm_page_bootstrap,
124  * for debugging purposes.  The implementation of uvm_pageboot_alloc
125  * and pmap_startup here also uses them internally.
126  */
127 
128 static vaddr_t      virtual_space_start;
129 static vaddr_t      virtual_space_end;
130 
131 /*
132  * History
133  */
134 UVMHIST_DECL(pghist);
135 
136 /*
137  * local prototypes
138  */
139 
140 static void uvm_pageinsert(struct vm_page *);
141 static void uvm_pageremove(struct vm_page *);
142 
143 /*
144  * inline functions
145  */
146 
147 /*
148  * uvm_pageinsert: insert a page in the object
149  *
150  * => caller must lock object
151  * => caller must lock page queues XXX questionable
152  * => call should have already set pg's object and offset pointers
153  *    and bumped the version counter
154  */
155 
156 __inline static void
157 uvm_pageinsert(struct vm_page *pg)
158 {
159 	UVMHIST_FUNC("uvm_pageinsert"); UVMHIST_CALLED(pghist);
160 
161 	KASSERT((pg->pg_flags & PG_TABLED) == 0);
162 	/* XXX should we check duplicates? */
163 	RB_INSERT(uvm_objtree, &pg->uobject->memt, pg);
164 	atomic_setbits_int(&pg->pg_flags, PG_TABLED);
165 	pg->uobject->uo_npages++;
166 }
167 
168 /*
169  * uvm_page_remove: remove page from object
170  *
171  * => caller must lock object
172  * => caller must lock page queues
173  */
174 
175 static __inline void
176 uvm_pageremove(struct vm_page *pg)
177 {
178 	UVMHIST_FUNC("uvm_pageremove"); UVMHIST_CALLED(pghist);
179 
180 	KASSERT(pg->pg_flags & PG_TABLED);
181 	RB_REMOVE(uvm_objtree, &pg->uobject->memt, pg);
182 
183 	atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
184 	pg->uobject->uo_npages--;
185 	pg->uobject = NULL;
186 	pg->pg_version++;
187 }
188 
189 /*
190  * uvm_page_init: init the page system.   called from uvm_init().
191  *
192  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
193  */
194 
195 void
196 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
197 {
198 	vsize_t freepages, pagecount, n;
199 	vm_page_t pagearray;
200 	int lcv, i;
201 	paddr_t paddr;
202 #if defined(UVMHIST)
203 	static struct uvm_history_ent pghistbuf[100];
204 #endif
205 
206 	UVMHIST_FUNC("uvm_page_init");
207 	UVMHIST_INIT_STATIC(pghist, pghistbuf);
208 	UVMHIST_CALLED(pghist);
209 
210 	/*
211 	 * init the page queues and page queue locks
212 	 */
213 
214 	TAILQ_INIT(&uvm.page_active);
215 	TAILQ_INIT(&uvm.page_inactive_swp);
216 	TAILQ_INIT(&uvm.page_inactive_obj);
217 	simple_lock_init(&uvm.pageqlock);
218 	mtx_init(&uvm.fpageqlock, IPL_VM);
219 	uvm_pmr_init();
220 
221 	/*
222 	 * allocate vm_page structures.
223 	 */
224 
225 	/*
226 	 * sanity check:
227 	 * before calling this function the MD code is expected to register
228 	 * some free RAM with the uvm_page_physload() function.   our job
229 	 * now is to allocate vm_page structures for this memory.
230 	 */
231 
232 	if (vm_nphysseg == 0)
233 		panic("uvm_page_bootstrap: no memory pre-allocated");
234 
235 	/*
236 	 * first calculate the number of free pages...
237 	 *
238 	 * note that we use start/end rather than avail_start/avail_end.
239 	 * this allows us to allocate extra vm_page structures in case we
240 	 * want to return some memory to the pool after booting.
241 	 */
242 
243 	freepages = 0;
244 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
245 		freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
246 
247 	/*
248 	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
249 	 * use.   for each page of memory we use we need a vm_page structure.
250 	 * thus, the total number of pages we can use is the total size of
251 	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
252 	 * structure.   we add one to freepages as a fudge factor to avoid
253 	 * truncation errors (since we can only allocate in terms of whole
254 	 * pages).
255 	 */
256 
257 	pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) /
258 	    (PAGE_SIZE + sizeof(struct vm_page));
259 	pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
260 	    sizeof(struct vm_page));
261 	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
262 
263 	/*
264 	 * init the vm_page structures and put them in the correct place.
265 	 */
266 
267 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
268 		n = vm_physmem[lcv].end - vm_physmem[lcv].start;
269 		if (n > pagecount) {
270 			panic("uvm_page_init: lost %ld page(s) in init\n",
271 			    (long)(n - pagecount));
272 			    /* XXXCDC: shouldn't happen? */
273 			/* n = pagecount; */
274 		}
275 
276 		/* set up page array pointers */
277 		vm_physmem[lcv].pgs = pagearray;
278 		pagearray += n;
279 		pagecount -= n;
280 		vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
281 
282 		/* init and free vm_pages (we've already zeroed them) */
283 		paddr = ptoa(vm_physmem[lcv].start);
284 		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
285 			vm_physmem[lcv].pgs[i].phys_addr = paddr;
286 #ifdef __HAVE_VM_PAGE_MD
287 			VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]);
288 #endif
289 			if (atop(paddr) >= vm_physmem[lcv].avail_start &&
290 			    atop(paddr) <= vm_physmem[lcv].avail_end) {
291 				uvmexp.npages++;
292 			}
293 		}
294 
295 		/*
296 		 * Add pages to free pool.
297 		 */
298 		uvm_pmr_freepages(&vm_physmem[lcv].pgs[
299 		    vm_physmem[lcv].avail_start - vm_physmem[lcv].start],
300 		    vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start);
301 	}
302 
303 	/*
304 	 * pass up the values of virtual_space_start and
305 	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
306 	 * layers of the VM.
307 	 */
308 
309 	*kvm_startp = round_page(virtual_space_start);
310 	*kvm_endp = trunc_page(virtual_space_end);
311 
312 	/*
313 	 * init locks for kernel threads
314 	 */
315 	mtx_init(&uvm.aiodoned_lock, IPL_BIO);
316 
317 	/*
318 	 * init reserve thresholds
319 	 * XXXCDC - values may need adjusting
320 	 */
321 	uvmexp.reserve_pagedaemon = 4;
322 	uvmexp.reserve_kernel = 6;
323 	uvmexp.anonminpct = 10;
324 	uvmexp.vnodeminpct = 10;
325 	uvmexp.vtextminpct = 5;
326 	uvmexp.anonmin = uvmexp.anonminpct * 256 / 100;
327 	uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100;
328 	uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100;
329 
330   	/*
331 	 * determine if we should zero pages in the idle loop.
332 	 */
333 
334 	uvm.page_idle_zero = vm_page_zero_enable;
335 
336 	/*
337 	 * done!
338 	 */
339 
340 	uvm.page_init_done = TRUE;
341 }
342 
343 /*
344  * uvm_setpagesize: set the page size
345  *
346  * => sets page_shift and page_mask from uvmexp.pagesize.
347  */
348 
349 void
350 uvm_setpagesize(void)
351 {
352 	if (uvmexp.pagesize == 0)
353 		uvmexp.pagesize = DEFAULT_PAGE_SIZE;
354 	uvmexp.pagemask = uvmexp.pagesize - 1;
355 	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
356 		panic("uvm_setpagesize: page size not a power of two");
357 	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
358 		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
359 			break;
360 }
361 
362 /*
363  * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
364  */
365 
366 vaddr_t
367 uvm_pageboot_alloc(vsize_t size)
368 {
369 #if defined(PMAP_STEAL_MEMORY)
370 	vaddr_t addr;
371 
372 	/*
373 	 * defer bootstrap allocation to MD code (it may want to allocate
374 	 * from a direct-mapped segment).  pmap_steal_memory should round
375 	 * off virtual_space_start/virtual_space_end.
376 	 */
377 
378 	addr = pmap_steal_memory(size, &virtual_space_start,
379 	    &virtual_space_end);
380 
381 	return(addr);
382 
383 #else /* !PMAP_STEAL_MEMORY */
384 
385 	static boolean_t initialized = FALSE;
386 	vaddr_t addr, vaddr;
387 	paddr_t paddr;
388 
389 	/* round to page size */
390 	size = round_page(size);
391 
392 	/*
393 	 * on first call to this function, initialize ourselves.
394 	 */
395 	if (initialized == FALSE) {
396 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
397 
398 		/* round it the way we like it */
399 		virtual_space_start = round_page(virtual_space_start);
400 		virtual_space_end = trunc_page(virtual_space_end);
401 
402 		initialized = TRUE;
403 	}
404 
405 	/*
406 	 * allocate virtual memory for this request
407 	 */
408 	if (virtual_space_start == virtual_space_end ||
409 	    (virtual_space_end - virtual_space_start) < size)
410 		panic("uvm_pageboot_alloc: out of virtual space");
411 
412 	addr = virtual_space_start;
413 
414 #ifdef PMAP_GROWKERNEL
415 	/*
416 	 * If the kernel pmap can't map the requested space,
417 	 * then allocate more resources for it.
418 	 */
419 	if (uvm_maxkaddr < (addr + size)) {
420 		uvm_maxkaddr = pmap_growkernel(addr + size);
421 		if (uvm_maxkaddr < (addr + size))
422 			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
423 	}
424 #endif
425 
426 	virtual_space_start += size;
427 
428 	/*
429 	 * allocate and mapin physical pages to back new virtual pages
430 	 */
431 
432 	for (vaddr = round_page(addr) ; vaddr < addr + size ;
433 	    vaddr += PAGE_SIZE) {
434 
435 		if (!uvm_page_physget(&paddr))
436 			panic("uvm_pageboot_alloc: out of memory");
437 
438 		/*
439 		 * Note this memory is no longer managed, so using
440 		 * pmap_kenter is safe.
441 		 */
442 		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
443 	}
444 	pmap_update(pmap_kernel());
445 	return(addr);
446 #endif	/* PMAP_STEAL_MEMORY */
447 }
448 
449 #if !defined(PMAP_STEAL_MEMORY)
450 /*
451  * uvm_page_physget: "steal" one page from the vm_physmem structure.
452  *
453  * => attempt to allocate it off the end of a segment in which the "avail"
454  *    values match the start/end values.   if we can't do that, then we
455  *    will advance both values (making them equal, and removing some
456  *    vm_page structures from the non-avail area).
457  * => return false if out of memory.
458  */
459 
460 /* subroutine: try to allocate from memory chunks on the specified freelist */
461 static boolean_t uvm_page_physget_freelist(paddr_t *, int);
462 
463 static boolean_t
464 uvm_page_physget_freelist(paddr_t *paddrp, int freelist)
465 {
466 	int lcv, x;
467 	UVMHIST_FUNC("uvm_page_physget_freelist"); UVMHIST_CALLED(pghist);
468 
469 	/* pass 1: try allocating from a matching end */
470 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
471 	(VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
472 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
473 #else
474 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
475 #endif
476 	{
477 
478 		if (uvm.page_init_done == TRUE)
479 			panic("uvm_page_physget: called _after_ bootstrap");
480 
481 		if (vm_physmem[lcv].free_list != freelist)
482 			continue;
483 
484 		/* try from front */
485 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
486 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
487 			*paddrp = ptoa(vm_physmem[lcv].avail_start);
488 			vm_physmem[lcv].avail_start++;
489 			vm_physmem[lcv].start++;
490 			/* nothing left?   nuke it */
491 			if (vm_physmem[lcv].avail_start ==
492 			    vm_physmem[lcv].end) {
493 				if (vm_nphysseg == 1)
494 				    panic("uvm_page_physget: out of memory!");
495 				vm_nphysseg--;
496 				for (x = lcv ; x < vm_nphysseg ; x++)
497 					/* structure copy */
498 					vm_physmem[x] = vm_physmem[x+1];
499 			}
500 			return (TRUE);
501 		}
502 
503 		/* try from rear */
504 		if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
505 		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
506 			*paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
507 			vm_physmem[lcv].avail_end--;
508 			vm_physmem[lcv].end--;
509 			/* nothing left?   nuke it */
510 			if (vm_physmem[lcv].avail_end ==
511 			    vm_physmem[lcv].start) {
512 				if (vm_nphysseg == 1)
513 				    panic("uvm_page_physget: out of memory!");
514 				vm_nphysseg--;
515 				for (x = lcv ; x < vm_nphysseg ; x++)
516 					/* structure copy */
517 					vm_physmem[x] = vm_physmem[x+1];
518 			}
519 			return (TRUE);
520 		}
521 	}
522 
523 	/* pass2: forget about matching ends, just allocate something */
524 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
525 	(VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
526 	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
527 #else
528 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
529 #endif
530 	{
531 
532 		/* any room in this bank? */
533 		if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
534 			continue;  /* nope */
535 
536 		*paddrp = ptoa(vm_physmem[lcv].avail_start);
537 		vm_physmem[lcv].avail_start++;
538 		/* truncate! */
539 		vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
540 
541 		/* nothing left?   nuke it */
542 		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
543 			if (vm_nphysseg == 1)
544 				panic("uvm_page_physget: out of memory!");
545 			vm_nphysseg--;
546 			for (x = lcv ; x < vm_nphysseg ; x++)
547 				/* structure copy */
548 				vm_physmem[x] = vm_physmem[x+1];
549 		}
550 		return (TRUE);
551 	}
552 
553 	return (FALSE);        /* whoops! */
554 }
555 
556 boolean_t
557 uvm_page_physget(paddr_t *paddrp)
558 {
559 	int i;
560 	UVMHIST_FUNC("uvm_page_physget"); UVMHIST_CALLED(pghist);
561 
562 	/* try in the order of freelist preference */
563 	for (i = 0; i < VM_NFREELIST; i++)
564 		if (uvm_page_physget_freelist(paddrp, i) == TRUE)
565 			return (TRUE);
566 	return (FALSE);
567 }
568 #endif /* PMAP_STEAL_MEMORY */
569 
570 /*
571  * uvm_page_physload: load physical memory into VM system
572  *
573  * => all args are PFs
574  * => all pages in start/end get vm_page structures
575  * => areas marked by avail_start/avail_end get added to the free page pool
576  * => we are limited to VM_PHYSSEG_MAX physical memory segments
577  */
578 
579 void
580 uvm_page_physload_flags(paddr_t start, paddr_t end, paddr_t avail_start,
581     paddr_t avail_end, int free_list, int flags)
582 {
583 	int preload, lcv;
584 	psize_t npages;
585 	struct vm_page *pgs;
586 	struct vm_physseg *ps;
587 
588 	if (uvmexp.pagesize == 0)
589 		panic("uvm_page_physload: page size not set!");
590 
591 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
592 		panic("uvm_page_physload: bad free list %d", free_list);
593 
594 	if (start >= end)
595 		panic("uvm_page_physload: start >= end");
596 
597 	/*
598 	 * do we have room?
599 	 */
600 	if (vm_nphysseg == VM_PHYSSEG_MAX) {
601 		printf("uvm_page_physload: unable to load physical memory "
602 		    "segment\n");
603 		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
604 		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
605 		printf("\tincrease VM_PHYSSEG_MAX\n");
606 		return;
607 	}
608 
609 	/*
610 	 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
611 	 * called yet, so malloc is not available).
612 	 */
613 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
614 		if (vm_physmem[lcv].pgs)
615 			break;
616 	}
617 	preload = (lcv == vm_nphysseg);
618 
619 	/*
620 	 * if VM is already running, attempt to malloc() vm_page structures
621 	 */
622 	if (!preload) {
623 		/*
624 		 * XXXCDC: need some sort of lockout for this case
625 		 * right now it is only used by devices so it should be alright.
626 		 */
627  		paddr_t paddr;
628 
629  		npages = end - start;  /* # of pages */
630 
631 		pgs = (struct vm_page *)uvm_km_zalloc(kernel_map,
632 		    npages * sizeof(*pgs));
633 		if (pgs == NULL) {
634 			printf("uvm_page_physload: can not malloc vm_page "
635 			    "structs for segment\n");
636 			printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
637 			return;
638 		}
639 		/* init phys_addr and free pages, XXX uvmexp.npages */
640 		for (lcv = 0, paddr = ptoa(start); lcv < npages;
641 		    lcv++, paddr += PAGE_SIZE) {
642 			pgs[lcv].phys_addr = paddr;
643 #ifdef __HAVE_VM_PAGE_MD
644 			VM_MDPAGE_INIT(&pgs[lcv]);
645 #endif
646 			if (atop(paddr) >= avail_start &&
647 			    atop(paddr) <= avail_end) {
648 				if (flags & PHYSLOAD_DEVICE) {
649 					atomic_setbits_int(&pgs[lcv].pg_flags,
650 					    PG_DEV);
651 					pgs[lcv].wire_count = 1;
652 				} else {
653 #if defined(VM_PHYSSEG_NOADD)
654 		panic("uvm_page_physload: tried to add RAM after vm_mem_init");
655 #endif
656 				}
657 			}
658 		}
659 
660 		/*
661 		 * Add pages to free pool.
662 		 */
663 		if ((flags & PHYSLOAD_DEVICE) == 0) {
664 			uvm_pmr_freepages(&pgs[avail_start - start],
665 			    avail_end - avail_start);
666 		}
667 
668 		/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
669 	} else {
670 
671 		/* gcc complains if these don't get init'd */
672 		pgs = NULL;
673 		npages = 0;
674 
675 	}
676 
677 	/*
678 	 * now insert us in the proper place in vm_physmem[]
679 	 */
680 
681 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
682 
683 	/* random: put it at the end (easy!) */
684 	ps = &vm_physmem[vm_nphysseg];
685 
686 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
687 
688 	{
689 		int x;
690 		/* sort by address for binary search */
691 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
692 			if (start < vm_physmem[lcv].start)
693 				break;
694 		ps = &vm_physmem[lcv];
695 		/* move back other entries, if necessary ... */
696 		for (x = vm_nphysseg ; x > lcv ; x--)
697 			/* structure copy */
698 			vm_physmem[x] = vm_physmem[x - 1];
699 	}
700 
701 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
702 
703 	{
704 		int x;
705 		/* sort by largest segment first */
706 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
707 			if ((end - start) >
708 			    (vm_physmem[lcv].end - vm_physmem[lcv].start))
709 				break;
710 		ps = &vm_physmem[lcv];
711 		/* move back other entries, if necessary ... */
712 		for (x = vm_nphysseg ; x > lcv ; x--)
713 			/* structure copy */
714 			vm_physmem[x] = vm_physmem[x - 1];
715 	}
716 
717 #else
718 
719 	panic("uvm_page_physload: unknown physseg strategy selected!");
720 
721 #endif
722 
723 	ps->start = start;
724 	ps->end = end;
725 	ps->avail_start = avail_start;
726 	ps->avail_end = avail_end;
727 	if (preload) {
728 		ps->pgs = NULL;
729 	} else {
730 		ps->pgs = pgs;
731 		ps->lastpg = pgs + npages - 1;
732 	}
733 	ps->free_list = free_list;
734 	vm_nphysseg++;
735 
736 	/*
737 	 * done!
738 	 */
739 
740 	return;
741 }
742 
743 #ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
744 
745 void uvm_page_physdump(void); /* SHUT UP GCC */
746 
747 /* call from DDB */
748 void
749 uvm_page_physdump(void)
750 {
751 	int lcv;
752 
753 	printf("uvm_page_physdump: physical memory config [segs=%d of %d]:\n",
754 	    vm_nphysseg, VM_PHYSSEG_MAX);
755 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
756 		printf("0x%llx->0x%llx [0x%llx->0x%llx]\n",
757 		    (long long)vm_physmem[lcv].start,
758 		    (long long)vm_physmem[lcv].end,
759 		    (long long)vm_physmem[lcv].avail_start,
760 		    (long long)vm_physmem[lcv].avail_end);
761 	printf("STRATEGY = ");
762 	switch (VM_PHYSSEG_STRAT) {
763 	case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
764 	case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break;
765 	case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break;
766 	default: printf("<<UNKNOWN>>!!!!\n");
767 	}
768 }
769 #endif
770 
771 void
772 uvm_shutdown(void)
773 {
774 #ifdef UVM_SWAP_ENCRYPT
775 	uvm_swap_finicrypt_all();
776 #endif
777 }
778 
779 /*
780  * uvm_pagealloc_strat: allocate vm_page from a particular free list.
781  *
782  * => return null if no pages free
783  * => wake up pagedaemon if number of free pages drops below low water mark
784  * => if obj != NULL, obj must be locked (to put in tree)
785  * => if anon != NULL, anon must be locked (to put in anon)
786  * => only one of obj or anon can be non-null
787  * => caller must activate/deactivate page if it is not wired.
788  */
789 
790 struct vm_page *
791 uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
792     int flags)
793 {
794 	struct vm_page *pg;
795 	struct pglist pgl;
796 	int pmr_flags;
797 	boolean_t use_reserve;
798 	UVMHIST_FUNC("uvm_pagealloc"); UVMHIST_CALLED(pghist);
799 
800 	KASSERT(obj == NULL || anon == NULL);
801 	KASSERT(off == trunc_page(off));
802 
803 	/*
804 	 * check to see if we need to generate some free pages waking
805 	 * the pagedaemon.
806 	 */
807 	if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin ||
808 	    ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg &&
809 	    (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg))
810 		wakeup(&uvm.pagedaemon);
811 
812 	/*
813 	 * fail if any of these conditions is true:
814 	 * [1]  there really are no free pages, or
815 	 * [2]  only kernel "reserved" pages remain and
816 	 *        the page isn't being allocated to a kernel object.
817 	 * [3]  only pagedaemon "reserved" pages remain and
818 	 *        the requestor isn't the pagedaemon.
819 	 */
820 
821 	use_reserve = (flags & UVM_PGA_USERESERVE) ||
822 		(obj && UVM_OBJ_IS_KERN_OBJECT(obj));
823 	if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
824 	    (uvmexp.free <= uvmexp.reserve_pagedaemon &&
825 	     !((curproc == uvm.pagedaemon_proc) ||
826 	      (curproc == syncerproc))))
827 		goto fail;
828 
829 	pmr_flags = UVM_PLA_NOWAIT;
830 	if (flags & UVM_PGA_ZERO)
831 		pmr_flags |= UVM_PLA_ZERO;
832 	TAILQ_INIT(&pgl);
833 	if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0)
834 		goto fail;
835 
836 	pg = TAILQ_FIRST(&pgl);
837 	KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL);
838 
839 	pg->offset = off;
840 	pg->uobject = obj;
841 	pg->uanon = anon;
842 	KASSERT((pg->pg_flags & PG_DEV) == 0);
843 	atomic_setbits_int(&pg->pg_flags, PG_BUSY|PG_CLEAN|PG_FAKE);
844 	if (flags & UVM_PGA_ZERO)
845 		atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
846 	if (anon) {
847 		anon->an_page = pg;
848 		atomic_setbits_int(&pg->pg_flags, PQ_ANON);
849 	} else if (obj)
850 		uvm_pageinsert(pg);
851 
852 #if defined(UVM_PAGE_TRKOWN)
853 	pg->owner_tag = NULL;
854 #endif
855 	UVM_PAGE_OWN(pg, "new alloc");
856 
857 	UVMHIST_LOG(pghist, "allocated pg %p/%lx", pg,
858 	    (u_long)VM_PAGE_TO_PHYS(pg), 0, 0);
859 	return(pg);
860 
861  fail:
862 	UVMHIST_LOG(pghist, "failed!", 0, 0, 0, 0);
863 	return (NULL);
864 }
865 
866 /*
867  * uvm_pagerealloc: reallocate a page from one object to another
868  *
869  * => both objects must be locked
870  */
871 
872 void
873 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
874 {
875 
876 	UVMHIST_FUNC("uvm_pagerealloc"); UVMHIST_CALLED(pghist);
877 
878 	/*
879 	 * remove it from the old object
880 	 */
881 
882 	if (pg->uobject) {
883 		uvm_pageremove(pg);
884 	}
885 
886 	/*
887 	 * put it in the new object
888 	 */
889 
890 	if (newobj) {
891 		pg->uobject = newobj;
892 		pg->offset = newoff;
893 		pg->pg_version++;
894 		uvm_pageinsert(pg);
895 	}
896 }
897 
898 
899 /*
900  * uvm_pagefree: free page
901  *
902  * => erase page's identity (i.e. remove from object)
903  * => put page on free list
904  * => caller must lock owning object (either anon or uvm_object)
905  * => caller must lock page queues
906  * => assumes all valid mappings of pg are gone
907  */
908 
909 void
910 uvm_pagefree(struct vm_page *pg)
911 {
912 	int saved_loan_count = pg->loan_count;
913 	UVMHIST_FUNC("uvm_pagefree"); UVMHIST_CALLED(pghist);
914 
915 #ifdef DEBUG
916 	if (pg->uobject == (void *)0xdeadbeef &&
917 	    pg->uanon == (void *)0xdeadbeef) {
918 		panic("uvm_pagefree: freeing free page %p", pg);
919 	}
920 #endif
921 
922 	UVMHIST_LOG(pghist, "freeing pg %p/%lx", pg,
923 	    (u_long)VM_PAGE_TO_PHYS(pg), 0, 0);
924 	KASSERT((pg->pg_flags & PG_DEV) == 0);
925 
926 	/*
927 	 * if the page was an object page (and thus "TABLED"), remove it
928 	 * from the object.
929 	 */
930 
931 	if (pg->pg_flags & PG_TABLED) {
932 
933 		/*
934 		 * if the object page is on loan we are going to drop ownership.
935 		 * it is possible that an anon will take over as owner for this
936 		 * page later on.   the anon will want a !PG_CLEAN page so that
937 		 * it knows it needs to allocate swap if it wants to page the
938 		 * page out.
939 		 */
940 
941 		/* in case an anon takes over */
942 		if (saved_loan_count)
943 			atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
944 		uvm_pageremove(pg);
945 
946 		/*
947 		 * if our page was on loan, then we just lost control over it
948 		 * (in fact, if it was loaned to an anon, the anon may have
949 		 * already taken over ownership of the page by now and thus
950 		 * changed the loan_count [e.g. in uvmfault_anonget()]) we just
951 		 * return (when the last loan is dropped, then the page can be
952 		 * freed by whatever was holding the last loan).
953 		 */
954 
955 		if (saved_loan_count)
956 			return;
957 	} else if (saved_loan_count && pg->uanon) {
958 		/*
959 		 * if our page is owned by an anon and is loaned out to the
960 		 * kernel then we just want to drop ownership and return.
961 		 * the kernel must free the page when all its loans clear ...
962 		 * note that the kernel can't change the loan status of our
963 		 * page as long as we are holding PQ lock.
964 		 */
965 		atomic_clearbits_int(&pg->pg_flags, PQ_ANON);
966 		pg->uanon->an_page = NULL;
967 		pg->uanon = NULL;
968 		return;
969 	}
970 	KASSERT(saved_loan_count == 0);
971 
972 	/*
973 	 * now remove the page from the queues
974 	 */
975 
976 	if (pg->pg_flags & PQ_ACTIVE) {
977 		TAILQ_REMOVE(&uvm.page_active, pg, pageq);
978 		atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
979 		uvmexp.active--;
980 	}
981 	if (pg->pg_flags & PQ_INACTIVE) {
982 		if (pg->pg_flags & PQ_SWAPBACKED)
983 			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
984 		else
985 			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
986 		atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
987 		uvmexp.inactive--;
988 	}
989 
990 	/*
991 	 * if the page was wired, unwire it now.
992 	 */
993 
994 	if (pg->wire_count) {
995 		pg->wire_count = 0;
996 		uvmexp.wired--;
997 	}
998 	if (pg->uanon) {
999 		pg->uanon->an_page = NULL;
1000 		pg->uanon = NULL;
1001 		atomic_clearbits_int(&pg->pg_flags, PQ_ANON);
1002 	}
1003 
1004 	/*
1005 	 * Clean page state bits.
1006 	 */
1007 	atomic_clearbits_int(&pg->pg_flags, PQ_AOBJ); /* XXX: find culprit */
1008 	atomic_clearbits_int(&pg->pg_flags, PQ_ENCRYPT|
1009 	    PG_ZERO|PG_FAKE|PG_BUSY|PG_RELEASED|PG_CLEAN|PG_CLEANCHK);
1010 
1011 	/*
1012 	 * and put on free queue
1013 	 */
1014 
1015 #ifdef DEBUG
1016 	pg->uobject = (void *)0xdeadbeef;
1017 	pg->offset = 0xdeadbeef;
1018 	pg->uanon = (void *)0xdeadbeef;
1019 #endif
1020 
1021 	uvm_pmr_freepages(pg, 1);
1022 
1023 	if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
1024 		uvm.page_idle_zero = vm_page_zero_enable;
1025 }
1026 
1027 /*
1028  * uvm_page_unbusy: unbusy an array of pages.
1029  *
1030  * => pages must either all belong to the same object, or all belong to anons.
1031  * => if pages are object-owned, object must be locked.
1032  * => if pages are anon-owned, anons must be unlockd and have 0 refcount.
1033  */
1034 
1035 void
1036 uvm_page_unbusy(struct vm_page **pgs, int npgs)
1037 {
1038 	struct vm_page *pg;
1039 	struct uvm_object *uobj;
1040 	int i;
1041 	UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(pdhist);
1042 
1043 	for (i = 0; i < npgs; i++) {
1044 		pg = pgs[i];
1045 
1046 		if (pg == NULL || pg == PGO_DONTCARE) {
1047 			continue;
1048 		}
1049 		if (pg->pg_flags & PG_WANTED) {
1050 			wakeup(pg);
1051 		}
1052 		if (pg->pg_flags & PG_RELEASED) {
1053 			UVMHIST_LOG(pdhist, "releasing pg %p", pg,0,0,0);
1054 			uobj = pg->uobject;
1055 			if (uobj != NULL) {
1056 				uvm_lock_pageq();
1057 				pmap_page_protect(pg, VM_PROT_NONE);
1058 				/* XXX won't happen right now */
1059 				if (pg->pg_flags & PQ_ANON)
1060 					uao_dropswap(uobj,
1061 					    pg->offset >> PAGE_SHIFT);
1062 				uvm_pagefree(pg);
1063 				uvm_unlock_pageq();
1064 			} else {
1065 				atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
1066 				UVM_PAGE_OWN(pg, NULL);
1067 				uvm_anfree(pg->uanon);
1068 			}
1069 		} else {
1070 			UVMHIST_LOG(pdhist, "unbusying pg %p", pg,0,0,0);
1071 			atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
1072 			UVM_PAGE_OWN(pg, NULL);
1073 		}
1074 	}
1075 }
1076 
1077 #if defined(UVM_PAGE_TRKOWN)
1078 /*
1079  * uvm_page_own: set or release page ownership
1080  *
1081  * => this is a debugging function that keeps track of who sets PG_BUSY
1082  *	and where they do it.   it can be used to track down problems
1083  *	such a process setting "PG_BUSY" and never releasing it.
1084  * => page's object [if any] must be locked
1085  * => if "tag" is NULL then we are releasing page ownership
1086  */
1087 void
1088 uvm_page_own(struct vm_page *pg, char *tag)
1089 {
1090 	/* gain ownership? */
1091 	if (tag) {
1092 		if (pg->owner_tag) {
1093 			printf("uvm_page_own: page %p already owned "
1094 			    "by proc %d [%s]\n", pg,
1095 			     pg->owner, pg->owner_tag);
1096 			panic("uvm_page_own");
1097 		}
1098 		pg->owner = (curproc) ? curproc->p_pid :  (pid_t) -1;
1099 		pg->owner_tag = tag;
1100 		return;
1101 	}
1102 
1103 	/* drop ownership */
1104 	if (pg->owner_tag == NULL) {
1105 		printf("uvm_page_own: dropping ownership of an non-owned "
1106 		    "page (%p)\n", pg);
1107 		panic("uvm_page_own");
1108 	}
1109 	pg->owner_tag = NULL;
1110 	return;
1111 }
1112 #endif
1113 
1114 /*
1115  * uvm_pageidlezero: zero free pages while the system is idle.
1116  *
1117  * => we do at least one iteration per call, if we are below the target.
1118  * => we loop until we either reach the target or whichqs indicates that
1119  *	there is a process ready to run.
1120  */
1121 void
1122 uvm_pageidlezero(void)
1123 {
1124 #if 0 /* disabled: need new code */
1125 	struct vm_page *pg;
1126 	struct pgfreelist *pgfl;
1127 	int free_list;
1128 	UVMHIST_FUNC("uvm_pageidlezero"); UVMHIST_CALLED(pghist);
1129 
1130 	do {
1131 		uvm_lock_fpageq();
1132 
1133 		if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
1134 			uvm.page_idle_zero = FALSE;
1135 			uvm_unlock_fpageq();
1136 			return;
1137 		}
1138 
1139 		for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
1140 			pgfl = &uvm.page_free[free_list];
1141 			if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[
1142 			    PGFL_UNKNOWN])) != NULL)
1143 				break;
1144 		}
1145 
1146 		if (pg == NULL) {
1147 			/*
1148 			 * No non-zero'd pages; don't bother trying again
1149 			 * until we know we have non-zero'd pages free.
1150 			 */
1151 			uvm.page_idle_zero = FALSE;
1152 			uvm_unlock_fpageq();
1153 			return;
1154 		}
1155 
1156 		TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq);
1157 		uvmexp.free--;
1158 		uvm_unlock_fpageq();
1159 
1160 #ifdef PMAP_PAGEIDLEZERO
1161 		if (PMAP_PAGEIDLEZERO(pg) == FALSE) {
1162 			/*
1163 			 * The machine-dependent code detected some
1164 			 * reason for us to abort zeroing pages,
1165 			 * probably because there is a process now
1166 			 * ready to run.
1167 			 */
1168 			uvm_lock_fpageq();
1169 			TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN],
1170 			    pg, pageq);
1171 			uvmexp.free++;
1172 			uvmexp.zeroaborts++;
1173 			uvm_unlock_fpageq();
1174 			return;
1175 		}
1176 #else
1177 		/*
1178 		 * XXX This will toast the cache unless the pmap_zero_page()
1179 		 * XXX implementation does uncached access.
1180 		 */
1181 		pmap_zero_page(pg);
1182 #endif
1183 		atomic_setbits_int(&pg->pg_flags, PG_ZERO);
1184 
1185 		uvm_lock_fpageq();
1186 		TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq);
1187 		uvmexp.free++;
1188 		uvmexp.zeropages++;
1189 		uvm_unlock_fpageq();
1190 	} while (curcpu_is_idle());
1191 #endif /* 0 */
1192 }
1193 
1194 /*
1195  * when VM_PHYSSEG_MAX is 1, we can simplify these functions
1196  */
1197 
1198 #if VM_PHYSSEG_MAX > 1
1199 /*
1200  * vm_physseg_find: find vm_physseg structure that belongs to a PA
1201  */
1202 int
1203 vm_physseg_find(paddr_t pframe, int *offp)
1204 {
1205 
1206 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
1207 	/* binary search for it */
1208 	int	start, len, try;
1209 
1210 	/*
1211 	 * if try is too large (thus target is less than than try) we reduce
1212 	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
1213 	 *
1214 	 * if the try is too small (thus target is greater than try) then
1215 	 * we set the new start to be (try + 1).   this means we need to
1216 	 * reduce the length to (round(len/2) - 1).
1217 	 *
1218 	 * note "adjust" below which takes advantage of the fact that
1219 	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
1220 	 * for any value of len we may have
1221 	 */
1222 
1223 	for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
1224 		try = start + (len / 2);	/* try in the middle */
1225 
1226 		/* start past our try? */
1227 		if (pframe >= vm_physmem[try].start) {
1228 			/* was try correct? */
1229 			if (pframe < vm_physmem[try].end) {
1230 				if (offp)
1231 					*offp = pframe - vm_physmem[try].start;
1232 				return(try);            /* got it */
1233 			}
1234 			start = try + 1;	/* next time, start here */
1235 			len--;			/* "adjust" */
1236 		} else {
1237 			/*
1238 			 * pframe before try, just reduce length of
1239 			 * region, done in "for" loop
1240 			 */
1241 		}
1242 	}
1243 	return(-1);
1244 
1245 #else
1246 	/* linear search for it */
1247 	int	lcv;
1248 
1249 	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
1250 		if (pframe >= vm_physmem[lcv].start &&
1251 		    pframe < vm_physmem[lcv].end) {
1252 			if (offp)
1253 				*offp = pframe - vm_physmem[lcv].start;
1254 			return(lcv);		   /* got it */
1255 		}
1256 	}
1257 	return(-1);
1258 
1259 #endif
1260 }
1261 
1262 /*
1263  * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
1264  * back from an I/O mapping (ugh!).   used in some MD code as well.
1265  */
1266 struct vm_page *
1267 PHYS_TO_VM_PAGE(paddr_t pa)
1268 {
1269 	paddr_t pf = atop(pa);
1270 	int	off;
1271 	int	psi;
1272 
1273 	psi = vm_physseg_find(pf, &off);
1274 
1275 	return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]);
1276 }
1277 #endif /* VM_PHYSSEG_MAX > 1 */
1278 
1279 /*
1280  * uvm_pagelookup: look up a page
1281  *
1282  * => caller should lock object to keep someone from pulling the page
1283  *	out from under it
1284  */
1285 struct vm_page *
1286 uvm_pagelookup(struct uvm_object *obj, voff_t off)
1287 {
1288 	/* XXX if stack is too much, handroll */
1289 	struct vm_page pg;
1290 
1291 	pg.offset = off;
1292 	return (RB_FIND(uvm_objtree, &obj->memt, &pg));
1293 }
1294 
1295 /*
1296  * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
1297  *
1298  * => caller must lock page queues
1299  */
1300 void
1301 uvm_pagewire(struct vm_page *pg)
1302 {
1303 	if (pg->wire_count == 0) {
1304 		if (pg->pg_flags & PQ_ACTIVE) {
1305 			TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1306 			atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1307 			uvmexp.active--;
1308 		}
1309 		if (pg->pg_flags & PQ_INACTIVE) {
1310 			if (pg->pg_flags & PQ_SWAPBACKED)
1311 				TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
1312 			else
1313 				TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
1314 			atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
1315 			uvmexp.inactive--;
1316 		}
1317 		uvmexp.wired++;
1318 	}
1319 	pg->wire_count++;
1320 }
1321 
1322 /*
1323  * uvm_pageunwire: unwire the page.
1324  *
1325  * => activate if wire count goes to zero.
1326  * => caller must lock page queues
1327  */
1328 void
1329 uvm_pageunwire(struct vm_page *pg)
1330 {
1331 	pg->wire_count--;
1332 	if (pg->wire_count == 0) {
1333 		TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
1334 		uvmexp.active++;
1335 		atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
1336 		uvmexp.wired--;
1337 	}
1338 }
1339 
1340 /*
1341  * uvm_pagedeactivate: deactivate page -- no pmaps have access to page
1342  *
1343  * => caller must lock page queues
1344  * => caller must check to make sure page is not wired
1345  * => object that page belongs to must be locked (so we can adjust pg->flags)
1346  */
1347 void
1348 uvm_pagedeactivate(struct vm_page *pg)
1349 {
1350 	if (pg->pg_flags & PQ_ACTIVE) {
1351 		TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1352 		atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1353 		uvmexp.active--;
1354 	}
1355 	if ((pg->pg_flags & PQ_INACTIVE) == 0) {
1356 		KASSERT(pg->wire_count == 0);
1357 		if (pg->pg_flags & PQ_SWAPBACKED)
1358 			TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq);
1359 		else
1360 			TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq);
1361 		atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE);
1362 		uvmexp.inactive++;
1363 		pmap_clear_reference(pg);
1364 		/*
1365 		 * update the "clean" bit.  this isn't 100%
1366 		 * accurate, and doesn't have to be.  we'll
1367 		 * re-sync it after we zap all mappings when
1368 		 * scanning the inactive list.
1369 		 */
1370 		if ((pg->pg_flags & PG_CLEAN) != 0 &&
1371 		    pmap_is_modified(pg))
1372 			atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1373 	}
1374 }
1375 
1376 /*
1377  * uvm_pageactivate: activate page
1378  *
1379  * => caller must lock page queues
1380  */
1381 void
1382 uvm_pageactivate(struct vm_page *pg)
1383 {
1384 	if (pg->pg_flags & PQ_INACTIVE) {
1385 		if (pg->pg_flags & PQ_SWAPBACKED)
1386 			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
1387 		else
1388 			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
1389 		atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
1390 		uvmexp.inactive--;
1391 	}
1392 	if (pg->wire_count == 0) {
1393 
1394 		/*
1395 		 * if page is already active, remove it from list so we
1396 		 * can put it at tail.  if it wasn't active, then mark
1397 		 * it active and bump active count
1398 		 */
1399 		if (pg->pg_flags & PQ_ACTIVE)
1400 			TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1401 		else {
1402 			atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
1403 			uvmexp.active++;
1404 		}
1405 
1406 		TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
1407 	}
1408 }
1409 
1410 /*
1411  * uvm_pagezero: zero fill a page
1412  *
1413  * => if page is part of an object then the object should be locked
1414  *	to protect pg->flags.
1415  */
1416 void
1417 uvm_pagezero(struct vm_page *pg)
1418 {
1419 	atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1420 	pmap_zero_page(pg);
1421 }
1422 
1423 /*
1424  * uvm_pagecopy: copy a page
1425  *
1426  * => if page is part of an object then the object should be locked
1427  *	to protect pg->flags.
1428  */
1429 void
1430 uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
1431 {
1432 	atomic_clearbits_int(&dst->pg_flags, PG_CLEAN);
1433 	pmap_copy_page(src, dst);
1434 }
1435 
1436 /*
1437  * uvm_page_lookup_freelist: look up the free list for the specified page
1438  */
1439 int
1440 uvm_page_lookup_freelist(struct vm_page *pg)
1441 {
1442 #if VM_PHYSSEG_MAX == 1
1443 	return (vm_physmem[0].free_list);
1444 #else
1445 	int lcv;
1446 
1447 	lcv = vm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
1448 	KASSERT(lcv != -1);
1449 	return (vm_physmem[lcv].free_list);
1450 #endif
1451 }
1452