xref: /openbsd/sys/uvm/uvm_page.c (revision 9b7c3dbb)
1 /*	$OpenBSD: uvm_page.c,v 1.144 2015/10/30 16:47:01 miod Exp $	*/
2 /*	$NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * Copyright (c) 1991, 1993, The Regents of the University of California.
7  *
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * The Mach Operating System project at Carnegie-Mellon University.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
38  * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
39  *
40  *
41  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42  * All rights reserved.
43  *
44  * Permission to use, copy, modify and distribute this software and
45  * its documentation is hereby granted, provided that both the copyright
46  * notice and this permission notice appear in all copies of the
47  * software, derivative works or modified versions, and any portions
48  * thereof, and that both notices appear in supporting documentation.
49  *
50  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53  *
54  * Carnegie Mellon requests users of this software to return to
55  *
56  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57  *  School of Computer Science
58  *  Carnegie Mellon University
59  *  Pittsburgh PA 15213-3890
60  *
61  * any improvements or extensions that they make and grant Carnegie the
62  * rights to redistribute these changes.
63  */
64 
65 /*
66  * uvm_page.c: page ops.
67  */
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/sched.h>
72 #include <sys/vnode.h>
73 #include <sys/mount.h>
74 #include <sys/proc.h>
75 
76 #include <uvm/uvm.h>
77 
78 /*
79  * for object trees
80  */
81 RB_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp);
82 
83 int
84 uvm_pagecmp(struct vm_page *a, struct vm_page *b)
85 {
86 	return (a->offset < b->offset ? -1 : a->offset > b->offset);
87 }
88 
89 /*
90  * global vars... XXXCDC: move to uvm. structure.
91  */
92 /*
93  * physical memory config is stored in vm_physmem.
94  */
95 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
96 int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
97 
98 /*
99  * Some supported CPUs in a given architecture don't support all
100  * of the things necessary to do idle page zero'ing efficiently.
101  * We therefore provide a way to disable it from machdep code here.
102  */
103 
104 /*
105  * local variables
106  */
107 /*
108  * these variables record the values returned by vm_page_bootstrap,
109  * for debugging purposes.  The implementation of uvm_pageboot_alloc
110  * and pmap_startup here also uses them internally.
111  */
112 static vaddr_t      virtual_space_start;
113 static vaddr_t      virtual_space_end;
114 
115 /*
116  * local prototypes
117  */
118 static void uvm_pageinsert(struct vm_page *);
119 static void uvm_pageremove(struct vm_page *);
120 
121 /*
122  * inline functions
123  */
124 /*
125  * uvm_pageinsert: insert a page in the object
126  *
127  * => caller must lock page queues XXX questionable
128  * => call should have already set pg's object and offset pointers
129  *    and bumped the version counter
130  */
131 __inline static void
132 uvm_pageinsert(struct vm_page *pg)
133 {
134 	struct vm_page	*dupe;
135 
136 	KASSERT((pg->pg_flags & PG_TABLED) == 0);
137 	dupe = RB_INSERT(uvm_objtree, &pg->uobject->memt, pg);
138 	/* not allowed to insert over another page */
139 	KASSERT(dupe == NULL);
140 	atomic_setbits_int(&pg->pg_flags, PG_TABLED);
141 	pg->uobject->uo_npages++;
142 }
143 
144 /*
145  * uvm_page_remove: remove page from object
146  *
147  * => caller must lock page queues
148  */
149 static __inline void
150 uvm_pageremove(struct vm_page *pg)
151 {
152 	KASSERT(pg->pg_flags & PG_TABLED);
153 	RB_REMOVE(uvm_objtree, &pg->uobject->memt, pg);
154 
155 	atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
156 	pg->uobject->uo_npages--;
157 	pg->uobject = NULL;
158 	pg->pg_version++;
159 }
160 
161 /*
162  * uvm_page_init: init the page system.   called from uvm_init().
163  *
164  * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
165  */
166 void
167 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
168 {
169 	vsize_t freepages, pagecount, n;
170 	vm_page_t pagearray, curpg;
171 	int lcv, i;
172 	paddr_t paddr, pgno;
173 	struct vm_physseg *seg;
174 
175 	/*
176 	 * init the page queues and page queue locks
177 	 */
178 
179 	TAILQ_INIT(&uvm.page_active);
180 	TAILQ_INIT(&uvm.page_inactive_swp);
181 	TAILQ_INIT(&uvm.page_inactive_obj);
182 	mtx_init(&uvm.pageqlock, IPL_NONE);
183 	mtx_init(&uvm.fpageqlock, IPL_VM);
184 	uvm_pmr_init();
185 
186 	/*
187 	 * allocate vm_page structures.
188 	 */
189 
190 	/*
191 	 * sanity check:
192 	 * before calling this function the MD code is expected to register
193 	 * some free RAM with the uvm_page_physload() function.   our job
194 	 * now is to allocate vm_page structures for this memory.
195 	 */
196 
197 	if (vm_nphysseg == 0)
198 		panic("uvm_page_bootstrap: no memory pre-allocated");
199 
200 	/*
201 	 * first calculate the number of free pages...
202 	 *
203 	 * note that we use start/end rather than avail_start/avail_end.
204 	 * this allows us to allocate extra vm_page structures in case we
205 	 * want to return some memory to the pool after booting.
206 	 */
207 
208 	freepages = 0;
209 	for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
210 		freepages += (seg->end - seg->start);
211 
212 	/*
213 	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
214 	 * use.   for each page of memory we use we need a vm_page structure.
215 	 * thus, the total number of pages we can use is the total size of
216 	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
217 	 * structure.   we add one to freepages as a fudge factor to avoid
218 	 * truncation errors (since we can only allocate in terms of whole
219 	 * pages).
220 	 */
221 
222 	pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) /
223 	    (PAGE_SIZE + sizeof(struct vm_page));
224 	pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
225 	    sizeof(struct vm_page));
226 	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
227 
228 	/* init the vm_page structures and put them in the correct place. */
229 	for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) {
230 		n = seg->end - seg->start;
231 		if (n > pagecount) {
232 			panic("uvm_page_init: lost %ld page(s) in init",
233 			    (long)(n - pagecount));
234 			    /* XXXCDC: shouldn't happen? */
235 			/* n = pagecount; */
236 		}
237 
238 		/* set up page array pointers */
239 		seg->pgs = pagearray;
240 		pagearray += n;
241 		pagecount -= n;
242 		seg->lastpg = seg->pgs + (n - 1);
243 
244 		/* init and free vm_pages (we've already zeroed them) */
245 		pgno = seg->start;
246 		paddr = ptoa(pgno);
247 		for (i = 0, curpg = seg->pgs; i < n;
248 		    i++, curpg++, pgno++, paddr += PAGE_SIZE) {
249 			curpg->phys_addr = paddr;
250 			VM_MDPAGE_INIT(curpg);
251 			if (pgno >= seg->avail_start &&
252 			    pgno < seg->avail_end) {
253 				uvmexp.npages++;
254 			}
255 		}
256 
257 		/* Add pages to free pool. */
258 		uvm_pmr_freepages(&seg->pgs[seg->avail_start - seg->start],
259 		    seg->avail_end - seg->avail_start);
260 	}
261 
262 	/*
263 	 * pass up the values of virtual_space_start and
264 	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
265 	 * layers of the VM.
266 	 */
267 
268 	*kvm_startp = round_page(virtual_space_start);
269 	*kvm_endp = trunc_page(virtual_space_end);
270 
271 	/* init locks for kernel threads */
272 	mtx_init(&uvm.aiodoned_lock, IPL_BIO);
273 
274 	/*
275 	 * init reserve thresholds
276 	 * XXXCDC - values may need adjusting
277 	 */
278 	uvmexp.reserve_pagedaemon = 4;
279 	uvmexp.reserve_kernel = 6;
280 	uvmexp.anonminpct = 10;
281 	uvmexp.vnodeminpct = 10;
282 	uvmexp.vtextminpct = 5;
283 	uvmexp.anonmin = uvmexp.anonminpct * 256 / 100;
284 	uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100;
285 	uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100;
286 
287 	uvm.page_init_done = TRUE;
288 }
289 
290 /*
291  * uvm_setpagesize: set the page size
292  *
293  * => sets page_shift and page_mask from uvmexp.pagesize.
294  */
295 void
296 uvm_setpagesize(void)
297 {
298 	if (uvmexp.pagesize == 0)
299 		uvmexp.pagesize = DEFAULT_PAGE_SIZE;
300 	uvmexp.pagemask = uvmexp.pagesize - 1;
301 	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
302 		panic("uvm_setpagesize: page size not a power of two");
303 	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
304 		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
305 			break;
306 }
307 
308 /*
309  * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
310  */
311 vaddr_t
312 uvm_pageboot_alloc(vsize_t size)
313 {
314 #if defined(PMAP_STEAL_MEMORY)
315 	vaddr_t addr;
316 
317 	/*
318 	 * defer bootstrap allocation to MD code (it may want to allocate
319 	 * from a direct-mapped segment).  pmap_steal_memory should round
320 	 * off virtual_space_start/virtual_space_end.
321 	 */
322 
323 	addr = pmap_steal_memory(size, &virtual_space_start,
324 	    &virtual_space_end);
325 
326 	return(addr);
327 
328 #else /* !PMAP_STEAL_MEMORY */
329 
330 	static boolean_t initialized = FALSE;
331 	vaddr_t addr, vaddr;
332 	paddr_t paddr;
333 
334 	/* round to page size */
335 	size = round_page(size);
336 
337 	/* on first call to this function, initialize ourselves. */
338 	if (initialized == FALSE) {
339 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
340 
341 		/* round it the way we like it */
342 		virtual_space_start = round_page(virtual_space_start);
343 		virtual_space_end = trunc_page(virtual_space_end);
344 
345 		initialized = TRUE;
346 	}
347 
348 	/* allocate virtual memory for this request */
349 	if (virtual_space_start == virtual_space_end ||
350 	    (virtual_space_end - virtual_space_start) < size)
351 		panic("uvm_pageboot_alloc: out of virtual space");
352 
353 	addr = virtual_space_start;
354 
355 #ifdef PMAP_GROWKERNEL
356 	/*
357 	 * If the kernel pmap can't map the requested space,
358 	 * then allocate more resources for it.
359 	 */
360 	if (uvm_maxkaddr < (addr + size)) {
361 		uvm_maxkaddr = pmap_growkernel(addr + size);
362 		if (uvm_maxkaddr < (addr + size))
363 			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
364 	}
365 #endif
366 
367 	virtual_space_start += size;
368 
369 	/* allocate and mapin physical pages to back new virtual pages */
370 	for (vaddr = round_page(addr) ; vaddr < addr + size ;
371 	    vaddr += PAGE_SIZE) {
372 		if (!uvm_page_physget(&paddr))
373 			panic("uvm_pageboot_alloc: out of memory");
374 
375 		/*
376 		 * Note this memory is no longer managed, so using
377 		 * pmap_kenter is safe.
378 		 */
379 		pmap_kenter_pa(vaddr, paddr, PROT_READ | PROT_WRITE);
380 	}
381 	pmap_update(pmap_kernel());
382 	return(addr);
383 #endif	/* PMAP_STEAL_MEMORY */
384 }
385 
386 #if !defined(PMAP_STEAL_MEMORY)
387 /*
388  * uvm_page_physget: "steal" one page from the vm_physmem structure.
389  *
390  * => attempt to allocate it off the end of a segment in which the "avail"
391  *    values match the start/end values.   if we can't do that, then we
392  *    will advance both values (making them equal, and removing some
393  *    vm_page structures from the non-avail area).
394  * => return false if out of memory.
395  */
396 
397 boolean_t
398 uvm_page_physget(paddr_t *paddrp)
399 {
400 	int lcv;
401 	struct vm_physseg *seg;
402 
403 	/* pass 1: try allocating from a matching end */
404 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
405 	(VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
406 	for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0;
407 	    lcv--, seg--)
408 #else
409 	for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
410 #endif
411 	{
412 		if (uvm.page_init_done == TRUE)
413 			panic("uvm_page_physget: called _after_ bootstrap");
414 
415 		/* try from front */
416 		if (seg->avail_start == seg->start &&
417 		    seg->avail_start < seg->avail_end) {
418 			*paddrp = ptoa(seg->avail_start);
419 			seg->avail_start++;
420 			seg->start++;
421 			/* nothing left?   nuke it */
422 			if (seg->avail_start == seg->end) {
423 				if (vm_nphysseg == 1)
424 				    panic("uvm_page_physget: out of memory!");
425 				vm_nphysseg--;
426 				for (; lcv < vm_nphysseg; lcv++, seg++)
427 					/* structure copy */
428 					seg[0] = seg[1];
429 			}
430 			return (TRUE);
431 		}
432 
433 		/* try from rear */
434 		if (seg->avail_end == seg->end &&
435 		    seg->avail_start < seg->avail_end) {
436 			*paddrp = ptoa(seg->avail_end - 1);
437 			seg->avail_end--;
438 			seg->end--;
439 			/* nothing left?   nuke it */
440 			if (seg->avail_end == seg->start) {
441 				if (vm_nphysseg == 1)
442 				    panic("uvm_page_physget: out of memory!");
443 				vm_nphysseg--;
444 				for (; lcv < vm_nphysseg ; lcv++, seg++)
445 					/* structure copy */
446 					seg[0] = seg[1];
447 			}
448 			return (TRUE);
449 		}
450 	}
451 
452 	/* pass2: forget about matching ends, just allocate something */
453 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
454 	(VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
455 	for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0;
456 	    lcv--, seg--)
457 #else
458 	for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
459 #endif
460 	{
461 
462 		/* any room in this bank? */
463 		if (seg->avail_start >= seg->avail_end)
464 			continue;  /* nope */
465 
466 		*paddrp = ptoa(seg->avail_start);
467 		seg->avail_start++;
468 		/* truncate! */
469 		seg->start = seg->avail_start;
470 
471 		/* nothing left?   nuke it */
472 		if (seg->avail_start == seg->end) {
473 			if (vm_nphysseg == 1)
474 				panic("uvm_page_physget: out of memory!");
475 			vm_nphysseg--;
476 			for (; lcv < vm_nphysseg ; lcv++, seg++)
477 				/* structure copy */
478 				seg[0] = seg[1];
479 		}
480 		return (TRUE);
481 	}
482 
483 	return (FALSE);        /* whoops! */
484 }
485 
486 #endif /* PMAP_STEAL_MEMORY */
487 
488 /*
489  * uvm_page_physload: load physical memory into VM system
490  *
491  * => all args are PFs
492  * => all pages in start/end get vm_page structures
493  * => areas marked by avail_start/avail_end get added to the free page pool
494  * => we are limited to VM_PHYSSEG_MAX physical memory segments
495  */
496 
497 void
498 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
499     paddr_t avail_end, int flags)
500 {
501 	int preload, lcv;
502 	psize_t npages;
503 	struct vm_page *pgs;
504 	struct vm_physseg *ps, *seg;
505 
506 #ifdef DIAGNOSTIC
507 	if (uvmexp.pagesize == 0)
508 		panic("uvm_page_physload: page size not set!");
509 
510 	if (start >= end)
511 		panic("uvm_page_physload: start >= end");
512 #endif
513 
514 	/* do we have room? */
515 	if (vm_nphysseg == VM_PHYSSEG_MAX) {
516 		printf("uvm_page_physload: unable to load physical memory "
517 		    "segment\n");
518 		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
519 		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
520 		printf("\tincrease VM_PHYSSEG_MAX\n");
521 		return;
522 	}
523 
524 	/*
525 	 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
526 	 * called yet, so malloc is not available).
527 	 */
528 	for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) {
529 		if (seg->pgs)
530 			break;
531 	}
532 	preload = (lcv == vm_nphysseg);
533 
534 	/* if VM is already running, attempt to malloc() vm_page structures */
535 	if (!preload) {
536 		/*
537 		 * XXXCDC: need some sort of lockout for this case
538 		 * right now it is only used by devices so it should be alright.
539 		 */
540  		paddr_t paddr;
541 
542  		npages = end - start;  /* # of pages */
543 
544 		pgs = (struct vm_page *)uvm_km_zalloc(kernel_map,
545 		    npages * sizeof(*pgs));
546 		if (pgs == NULL) {
547 			printf("uvm_page_physload: can not malloc vm_page "
548 			    "structs for segment\n");
549 			printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
550 			return;
551 		}
552 		/* init phys_addr and free pages, XXX uvmexp.npages */
553 		for (lcv = 0, paddr = ptoa(start); lcv < npages;
554 		    lcv++, paddr += PAGE_SIZE) {
555 			pgs[lcv].phys_addr = paddr;
556 			VM_MDPAGE_INIT(&pgs[lcv]);
557 			if (atop(paddr) >= avail_start &&
558 			    atop(paddr) < avail_end) {
559 				if (flags & PHYSLOAD_DEVICE) {
560 					atomic_setbits_int(&pgs[lcv].pg_flags,
561 					    PG_DEV);
562 					pgs[lcv].wire_count = 1;
563 				} else {
564 #if defined(VM_PHYSSEG_NOADD)
565 		panic("uvm_page_physload: tried to add RAM after vm_mem_init");
566 #endif
567 				}
568 			}
569 		}
570 
571 		/* Add pages to free pool. */
572 		if ((flags & PHYSLOAD_DEVICE) == 0) {
573 			uvm_pmr_freepages(&pgs[avail_start - start],
574 			    avail_end - avail_start);
575 		}
576 
577 		/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
578 	} else {
579 		/* gcc complains if these don't get init'd */
580 		pgs = NULL;
581 		npages = 0;
582 
583 	}
584 
585 	/* now insert us in the proper place in vm_physmem[] */
586 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
587 	/* random: put it at the end (easy!) */
588 	ps = &vm_physmem[vm_nphysseg];
589 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
590 	{
591 		int x;
592 		/* sort by address for binary search */
593 		for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++)
594 			if (start < seg->start)
595 				break;
596 		ps = seg;
597 		/* move back other entries, if necessary ... */
598 		for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv;
599 		    x--, seg--)
600 			/* structure copy */
601 			seg[1] = seg[0];
602 	}
603 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
604 	{
605 		int x;
606 		/* sort by largest segment first */
607 		for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++)
608 			if ((end - start) >
609 			    (seg->end - seg->start))
610 				break;
611 		ps = &vm_physmem[lcv];
612 		/* move back other entries, if necessary ... */
613 		for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv;
614 		    x--, seg--)
615 			/* structure copy */
616 			seg[1] = seg[0];
617 	}
618 #else
619 	panic("uvm_page_physload: unknown physseg strategy selected!");
620 #endif
621 
622 	ps->start = start;
623 	ps->end = end;
624 	ps->avail_start = avail_start;
625 	ps->avail_end = avail_end;
626 	if (preload) {
627 		ps->pgs = NULL;
628 	} else {
629 		ps->pgs = pgs;
630 		ps->lastpg = pgs + npages - 1;
631 	}
632 	vm_nphysseg++;
633 
634 	return;
635 }
636 
637 #ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
638 
639 void uvm_page_physdump(void); /* SHUT UP GCC */
640 
641 /* call from DDB */
642 void
643 uvm_page_physdump(void)
644 {
645 	int lcv;
646 	struct vm_physseg *seg;
647 
648 	printf("uvm_page_physdump: physical memory config [segs=%d of %d]:\n",
649 	    vm_nphysseg, VM_PHYSSEG_MAX);
650 	for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
651 		printf("0x%llx->0x%llx [0x%llx->0x%llx]\n",
652 		    (long long)seg->start,
653 		    (long long)seg->end,
654 		    (long long)seg->avail_start,
655 		    (long long)seg->avail_end);
656 	printf("STRATEGY = ");
657 	switch (VM_PHYSSEG_STRAT) {
658 	case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
659 	case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break;
660 	case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break;
661 	default: printf("<<UNKNOWN>>!!!!\n");
662 	}
663 }
664 #endif
665 
666 void
667 uvm_shutdown(void)
668 {
669 #ifdef UVM_SWAP_ENCRYPT
670 	uvm_swap_finicrypt_all();
671 #endif
672 }
673 
674 /*
675  * Perform insert of a given page in the specified anon of obj.
676  * This is basically, uvm_pagealloc, but with the page already given.
677  */
678 void
679 uvm_pagealloc_pg(struct vm_page *pg, struct uvm_object *obj, voff_t off,
680     struct vm_anon *anon)
681 {
682 	int	flags;
683 
684 	flags = PG_BUSY | PG_FAKE;
685 	pg->offset = off;
686 	pg->uobject = obj;
687 	pg->uanon = anon;
688 
689 	if (anon) {
690 		anon->an_page = pg;
691 		flags |= PQ_ANON;
692 	} else if (obj)
693 		uvm_pageinsert(pg);
694 	atomic_setbits_int(&pg->pg_flags, flags);
695 #if defined(UVM_PAGE_TRKOWN)
696 	pg->owner_tag = NULL;
697 #endif
698 	UVM_PAGE_OWN(pg, "new alloc");
699 }
700 
701 /*
702  * uvm_pglistalloc: allocate a list of pages
703  *
704  * => allocated pages are placed at the tail of rlist.  rlist is
705  *    assumed to be properly initialized by caller.
706  * => returns 0 on success or errno on failure
707  * => doesn't take into account clean non-busy pages on inactive list
708  *	that could be used(?)
709  * => params:
710  *	size		the size of the allocation, rounded to page size.
711  *	low		the low address of the allowed allocation range.
712  *	high		the high address of the allowed allocation range.
713  *	alignment	memory must be aligned to this power-of-two boundary.
714  *	boundary	no segment in the allocation may cross this
715  *			power-of-two boundary (relative to zero).
716  * => flags:
717  *	UVM_PLA_NOWAIT	fail if allocation fails
718  *	UVM_PLA_WAITOK	wait for memory to become avail
719  *	UVM_PLA_ZERO	return zeroed memory
720  */
721 int
722 uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment,
723     paddr_t boundary, struct pglist *rlist, int nsegs, int flags)
724 {
725 	KASSERT((alignment & (alignment - 1)) == 0);
726 	KASSERT((boundary & (boundary - 1)) == 0);
727 	KASSERT(!(flags & UVM_PLA_WAITOK) ^ !(flags & UVM_PLA_NOWAIT));
728 
729 	if (size == 0)
730 		return (EINVAL);
731 	size = atop(round_page(size));
732 
733 	/*
734 	 * check to see if we need to generate some free pages waking
735 	 * the pagedaemon.
736 	 */
737 	if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin ||
738 	    ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg &&
739 	    (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg))
740 		wakeup(&uvm.pagedaemon);
741 
742 	/*
743 	 * XXX uvm_pglistalloc is currently only used for kernel
744 	 * objects. Unlike the checks in uvm_pagealloc, below, here
745 	 * we are always allowed to use the kernel reserve. However, we
746 	 * have to enforce the pagedaemon reserve here or allocations
747 	 * via this path could consume everything and we can't
748 	 * recover in the page daemon.
749 	 */
750  again:
751 	if ((uvmexp.free <= uvmexp.reserve_pagedaemon + size &&
752 	    !((curproc == uvm.pagedaemon_proc) ||
753 		(curproc == syncerproc)))) {
754 		if (flags & UVM_PLA_WAITOK) {
755 			uvm_wait("uvm_pglistalloc");
756 			goto again;
757 		}
758 		return (ENOMEM);
759 	}
760 
761 	if ((high & PAGE_MASK) != PAGE_MASK) {
762 		printf("uvm_pglistalloc: Upper boundary 0x%lx "
763 		    "not on pagemask.\n", (unsigned long)high);
764 	}
765 
766 	/*
767 	 * Our allocations are always page granularity, so our alignment
768 	 * must be, too.
769 	 */
770 	if (alignment < PAGE_SIZE)
771 		alignment = PAGE_SIZE;
772 
773 	low = atop(roundup(low, alignment));
774 	/*
775 	 * high + 1 may result in overflow, in which case high becomes 0x0,
776 	 * which is the 'don't care' value.
777 	 * The only requirement in that case is that low is also 0x0, or the
778 	 * low<high assert will fail.
779 	 */
780 	high = atop(high + 1);
781 	alignment = atop(alignment);
782 	if (boundary < PAGE_SIZE && boundary != 0)
783 		boundary = PAGE_SIZE;
784 	boundary = atop(boundary);
785 
786 	return uvm_pmr_getpages(size, low, high, alignment, boundary, nsegs,
787 	    flags, rlist);
788 }
789 
790 /*
791  * uvm_pglistfree: free a list of pages
792  *
793  * => pages should already be unmapped
794  */
795 void
796 uvm_pglistfree(struct pglist *list)
797 {
798 	uvm_pmr_freepageq(list);
799 }
800 
801 /*
802  * interface used by the buffer cache to allocate a buffer at a time.
803  * The pages are allocated wired in DMA accessible memory
804  */
805 int
806 uvm_pagealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size,
807     int flags)
808 {
809 	struct pglist    plist;
810 	struct vm_page  *pg;
811 	int              i, r;
812 
813 
814 	TAILQ_INIT(&plist);
815 	r = uvm_pglistalloc(size, dma_constraint.ucr_low,
816 	    dma_constraint.ucr_high, 0, 0, &plist, atop(round_page(size)),
817 	    flags);
818 	if (r == 0) {
819 		i = 0;
820 		while ((pg = TAILQ_FIRST(&plist)) != NULL) {
821 			pg->wire_count = 1;
822 			atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE);
823 			KASSERT((pg->pg_flags & PG_DEV) == 0);
824 			TAILQ_REMOVE(&plist, pg, pageq);
825 			uvm_pagealloc_pg(pg, obj, off + ptoa(i++), NULL);
826 		}
827 	}
828 	return r;
829 }
830 
831 /*
832  * interface used by the buffer cache to reallocate a buffer at a time.
833  * The pages are reallocated wired outside the DMA accessible region.
834  *
835  */
836 int
837 uvm_pagerealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size,
838     int flags, struct uvm_constraint_range *where)
839 {
840 	struct pglist    plist;
841 	struct vm_page  *pg, *tpg;
842 	int              i, r;
843 	voff_t		offset;
844 
845 
846 	TAILQ_INIT(&plist);
847 	if (size == 0)
848 		panic("size 0 uvm_pagerealloc");
849 	r = uvm_pglistalloc(size, where->ucr_low, where->ucr_high, 0,
850 	    0, &plist, atop(round_page(size)), flags);
851 	if (r == 0) {
852 		i = 0;
853 		while((pg = TAILQ_FIRST(&plist)) != NULL) {
854 			offset = off + ptoa(i++);
855 			tpg = uvm_pagelookup(obj, offset);
856 			pg->wire_count = 1;
857 			atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE);
858 			KASSERT((pg->pg_flags & PG_DEV) == 0);
859 			TAILQ_REMOVE(&plist, pg, pageq);
860 			uvm_pagecopy(tpg, pg);
861 			uvm_pagefree(tpg);
862 			uvm_pagealloc_pg(pg, obj, offset, NULL);
863 		}
864 	}
865 	return r;
866 }
867 
868 /*
869  * uvm_pagealloc_strat: allocate vm_page from a particular free list.
870  *
871  * => return null if no pages free
872  * => wake up pagedaemon if number of free pages drops below low water mark
873  * => only one of obj or anon can be non-null
874  * => caller must activate/deactivate page if it is not wired.
875  */
876 
877 struct vm_page *
878 uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
879     int flags)
880 {
881 	struct vm_page *pg;
882 	struct pglist pgl;
883 	int pmr_flags;
884 	boolean_t use_reserve;
885 
886 	KASSERT(obj == NULL || anon == NULL);
887 	KASSERT(off == trunc_page(off));
888 
889 	/*
890 	 * check to see if we need to generate some free pages waking
891 	 * the pagedaemon.
892 	 */
893 	if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin ||
894 	    ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg &&
895 	    (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg))
896 		wakeup(&uvm.pagedaemon);
897 
898 	/*
899 	 * fail if any of these conditions is true:
900 	 * [1]  there really are no free pages, or
901 	 * [2]  only kernel "reserved" pages remain and
902 	 *        the page isn't being allocated to a kernel object.
903 	 * [3]  only pagedaemon "reserved" pages remain and
904 	 *        the requestor isn't the pagedaemon.
905 	 */
906 	use_reserve = (flags & UVM_PGA_USERESERVE) ||
907 		(obj && UVM_OBJ_IS_KERN_OBJECT(obj));
908 	if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
909 	    (uvmexp.free <= uvmexp.reserve_pagedaemon &&
910 	     !((curproc == uvm.pagedaemon_proc) ||
911 	      (curproc == syncerproc))))
912 		goto fail;
913 
914 	pmr_flags = UVM_PLA_NOWAIT;
915 	if (flags & UVM_PGA_ZERO)
916 		pmr_flags |= UVM_PLA_ZERO;
917 	TAILQ_INIT(&pgl);
918 	if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0)
919 		goto fail;
920 
921 	pg = TAILQ_FIRST(&pgl);
922 	KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL);
923 
924 	uvm_pagealloc_pg(pg, obj, off, anon);
925 	KASSERT((pg->pg_flags & PG_DEV) == 0);
926 	if (flags & UVM_PGA_ZERO)
927 		atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
928 	else
929 		atomic_setbits_int(&pg->pg_flags, PG_CLEAN);
930 
931 	return(pg);
932 
933 fail:
934 	return (NULL);
935 }
936 
937 /*
938  * uvm_pagerealloc: reallocate a page from one object to another
939  */
940 
941 void
942 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
943 {
944 
945 	/* remove it from the old object */
946 	if (pg->uobject) {
947 		uvm_pageremove(pg);
948 	}
949 
950 	/* put it in the new object */
951 	if (newobj) {
952 		pg->uobject = newobj;
953 		pg->offset = newoff;
954 		pg->pg_version++;
955 		uvm_pageinsert(pg);
956 	}
957 }
958 
959 
960 /*
961  * uvm_pagefree: free page
962  *
963  * => erase page's identity (i.e. remove from object)
964  * => put page on free list
965  * => caller must lock page queues
966  * => assumes all valid mappings of pg are gone
967  */
968 void
969 uvm_pagefree(struct vm_page *pg)
970 {
971 	u_int flags_to_clear = 0;
972 
973 #ifdef DEBUG
974 	if (pg->uobject == (void *)0xdeadbeef &&
975 	    pg->uanon == (void *)0xdeadbeef) {
976 		panic("uvm_pagefree: freeing free page %p", pg);
977 	}
978 #endif
979 
980 	KASSERT((pg->pg_flags & PG_DEV) == 0);
981 
982 	/*
983 	 * if the page was an object page (and thus "TABLED"), remove it
984 	 * from the object.
985 	 */
986 	if (pg->pg_flags & PG_TABLED)
987 		uvm_pageremove(pg);
988 
989 	/* now remove the page from the queues */
990 	if (pg->pg_flags & PQ_ACTIVE) {
991 		TAILQ_REMOVE(&uvm.page_active, pg, pageq);
992 		flags_to_clear |= PQ_ACTIVE;
993 		uvmexp.active--;
994 	}
995 	if (pg->pg_flags & PQ_INACTIVE) {
996 		if (pg->pg_flags & PQ_SWAPBACKED)
997 			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
998 		else
999 			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
1000 		flags_to_clear |= PQ_INACTIVE;
1001 		uvmexp.inactive--;
1002 	}
1003 
1004 	/* if the page was wired, unwire it now. */
1005 	if (pg->wire_count) {
1006 		pg->wire_count = 0;
1007 		uvmexp.wired--;
1008 	}
1009 	if (pg->uanon) {
1010 		pg->uanon->an_page = NULL;
1011 		pg->uanon = NULL;
1012 	}
1013 
1014 	/* Clean page state bits. */
1015 	flags_to_clear |= PQ_ANON|PQ_AOBJ|PQ_ENCRYPT|PG_ZERO|PG_FAKE|PG_BUSY|
1016 	    PG_RELEASED|PG_CLEAN|PG_CLEANCHK;
1017 	atomic_clearbits_int(&pg->pg_flags, flags_to_clear);
1018 
1019 	/* and put on free queue */
1020 #ifdef DEBUG
1021 	pg->uobject = (void *)0xdeadbeef;
1022 	pg->offset = 0xdeadbeef;
1023 	pg->uanon = (void *)0xdeadbeef;
1024 #endif
1025 
1026 	uvm_pmr_freepages(pg, 1);
1027 }
1028 
1029 /*
1030  * uvm_page_unbusy: unbusy an array of pages.
1031  *
1032  * => pages must either all belong to the same object, or all belong to anons.
1033  * => if pages are anon-owned, anons must have 0 refcount.
1034  */
1035 void
1036 uvm_page_unbusy(struct vm_page **pgs, int npgs)
1037 {
1038 	struct vm_page *pg;
1039 	struct uvm_object *uobj;
1040 	int i;
1041 
1042 	for (i = 0; i < npgs; i++) {
1043 		pg = pgs[i];
1044 
1045 		if (pg == NULL || pg == PGO_DONTCARE) {
1046 			continue;
1047 		}
1048 		if (pg->pg_flags & PG_WANTED) {
1049 			wakeup(pg);
1050 		}
1051 		if (pg->pg_flags & PG_RELEASED) {
1052 			uobj = pg->uobject;
1053 			if (uobj != NULL) {
1054 				uvm_lock_pageq();
1055 				pmap_page_protect(pg, PROT_NONE);
1056 				/* XXX won't happen right now */
1057 				if (pg->pg_flags & PQ_AOBJ)
1058 					uao_dropswap(uobj,
1059 					    pg->offset >> PAGE_SHIFT);
1060 				uvm_pagefree(pg);
1061 				uvm_unlock_pageq();
1062 			} else {
1063 				atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
1064 				UVM_PAGE_OWN(pg, NULL);
1065 				uvm_anfree(pg->uanon);
1066 			}
1067 		} else {
1068 			atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
1069 			UVM_PAGE_OWN(pg, NULL);
1070 		}
1071 	}
1072 }
1073 
1074 #if defined(UVM_PAGE_TRKOWN)
1075 /*
1076  * uvm_page_own: set or release page ownership
1077  *
1078  * => this is a debugging function that keeps track of who sets PG_BUSY
1079  *	and where they do it.   it can be used to track down problems
1080  *	such a process setting "PG_BUSY" and never releasing it.
1081  * => if "tag" is NULL then we are releasing page ownership
1082  */
1083 void
1084 uvm_page_own(struct vm_page *pg, char *tag)
1085 {
1086 	/* gain ownership? */
1087 	if (tag) {
1088 		if (pg->owner_tag) {
1089 			printf("uvm_page_own: page %p already owned "
1090 			    "by proc %d [%s]\n", pg,
1091 			     pg->owner, pg->owner_tag);
1092 			panic("uvm_page_own");
1093 		}
1094 		pg->owner = (curproc) ? curproc->p_pid :  (pid_t) -1;
1095 		pg->owner_tag = tag;
1096 		return;
1097 	}
1098 
1099 	/* drop ownership */
1100 	if (pg->owner_tag == NULL) {
1101 		printf("uvm_page_own: dropping ownership of an non-owned "
1102 		    "page (%p)\n", pg);
1103 		panic("uvm_page_own");
1104 	}
1105 	pg->owner_tag = NULL;
1106 	return;
1107 }
1108 #endif
1109 
1110 /*
1111  * when VM_PHYSSEG_MAX is 1, we can simplify these functions
1112  */
1113 
1114 #if VM_PHYSSEG_MAX > 1
1115 /*
1116  * vm_physseg_find: find vm_physseg structure that belongs to a PA
1117  */
1118 int
1119 vm_physseg_find(paddr_t pframe, int *offp)
1120 {
1121 	struct vm_physseg *seg;
1122 
1123 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
1124 	/* binary search for it */
1125 	int	start, len, try;
1126 
1127 	/*
1128 	 * if try is too large (thus target is less than than try) we reduce
1129 	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
1130 	 *
1131 	 * if the try is too small (thus target is greater than try) then
1132 	 * we set the new start to be (try + 1).   this means we need to
1133 	 * reduce the length to (round(len/2) - 1).
1134 	 *
1135 	 * note "adjust" below which takes advantage of the fact that
1136 	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
1137 	 * for any value of len we may have
1138 	 */
1139 
1140 	for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
1141 		try = start + (len / 2);	/* try in the middle */
1142 		seg = vm_physmem + try;
1143 
1144 		/* start past our try? */
1145 		if (pframe >= seg->start) {
1146 			/* was try correct? */
1147 			if (pframe < seg->end) {
1148 				if (offp)
1149 					*offp = pframe - seg->start;
1150 				return(try);            /* got it */
1151 			}
1152 			start = try + 1;	/* next time, start here */
1153 			len--;			/* "adjust" */
1154 		} else {
1155 			/*
1156 			 * pframe before try, just reduce length of
1157 			 * region, done in "for" loop
1158 			 */
1159 		}
1160 	}
1161 	return(-1);
1162 
1163 #else
1164 	/* linear search for it */
1165 	int	lcv;
1166 
1167 	for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) {
1168 		if (pframe >= seg->start && pframe < seg->end) {
1169 			if (offp)
1170 				*offp = pframe - seg->start;
1171 			return(lcv);		   /* got it */
1172 		}
1173 	}
1174 	return(-1);
1175 
1176 #endif
1177 }
1178 
1179 /*
1180  * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
1181  * back from an I/O mapping (ugh!).   used in some MD code as well.
1182  */
1183 struct vm_page *
1184 PHYS_TO_VM_PAGE(paddr_t pa)
1185 {
1186 	paddr_t pf = atop(pa);
1187 	int	off;
1188 	int	psi;
1189 
1190 	psi = vm_physseg_find(pf, &off);
1191 
1192 	return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]);
1193 }
1194 #endif /* VM_PHYSSEG_MAX > 1 */
1195 
1196 /*
1197  * uvm_pagelookup: look up a page
1198  */
1199 struct vm_page *
1200 uvm_pagelookup(struct uvm_object *obj, voff_t off)
1201 {
1202 	/* XXX if stack is too much, handroll */
1203 	struct vm_page pg;
1204 
1205 	pg.offset = off;
1206 	return (RB_FIND(uvm_objtree, &obj->memt, &pg));
1207 }
1208 
1209 /*
1210  * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
1211  *
1212  * => caller must lock page queues
1213  */
1214 void
1215 uvm_pagewire(struct vm_page *pg)
1216 {
1217 	if (pg->wire_count == 0) {
1218 		if (pg->pg_flags & PQ_ACTIVE) {
1219 			TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1220 			atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1221 			uvmexp.active--;
1222 		}
1223 		if (pg->pg_flags & PQ_INACTIVE) {
1224 			if (pg->pg_flags & PQ_SWAPBACKED)
1225 				TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
1226 			else
1227 				TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
1228 			atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
1229 			uvmexp.inactive--;
1230 		}
1231 		uvmexp.wired++;
1232 	}
1233 	pg->wire_count++;
1234 }
1235 
1236 /*
1237  * uvm_pageunwire: unwire the page.
1238  *
1239  * => activate if wire count goes to zero.
1240  * => caller must lock page queues
1241  */
1242 void
1243 uvm_pageunwire(struct vm_page *pg)
1244 {
1245 	pg->wire_count--;
1246 	if (pg->wire_count == 0) {
1247 		TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
1248 		uvmexp.active++;
1249 		atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
1250 		uvmexp.wired--;
1251 	}
1252 }
1253 
1254 /*
1255  * uvm_pagedeactivate: deactivate page -- no pmaps have access to page
1256  *
1257  * => caller must lock page queues
1258  * => caller must check to make sure page is not wired
1259  * => object that page belongs to must be locked (so we can adjust pg->flags)
1260  */
1261 void
1262 uvm_pagedeactivate(struct vm_page *pg)
1263 {
1264 	if (pg->pg_flags & PQ_ACTIVE) {
1265 		TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1266 		atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1267 		uvmexp.active--;
1268 	}
1269 	if ((pg->pg_flags & PQ_INACTIVE) == 0) {
1270 		KASSERT(pg->wire_count == 0);
1271 		if (pg->pg_flags & PQ_SWAPBACKED)
1272 			TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq);
1273 		else
1274 			TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq);
1275 		atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE);
1276 		uvmexp.inactive++;
1277 		pmap_clear_reference(pg);
1278 		/*
1279 		 * update the "clean" bit.  this isn't 100%
1280 		 * accurate, and doesn't have to be.  we'll
1281 		 * re-sync it after we zap all mappings when
1282 		 * scanning the inactive list.
1283 		 */
1284 		if ((pg->pg_flags & PG_CLEAN) != 0 &&
1285 		    pmap_is_modified(pg))
1286 			atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1287 	}
1288 }
1289 
1290 /*
1291  * uvm_pageactivate: activate page
1292  *
1293  * => caller must lock page queues
1294  */
1295 void
1296 uvm_pageactivate(struct vm_page *pg)
1297 {
1298 	if (pg->pg_flags & PQ_INACTIVE) {
1299 		if (pg->pg_flags & PQ_SWAPBACKED)
1300 			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
1301 		else
1302 			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
1303 		atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
1304 		uvmexp.inactive--;
1305 	}
1306 	if (pg->wire_count == 0) {
1307 		/*
1308 		 * if page is already active, remove it from list so we
1309 		 * can put it at tail.  if it wasn't active, then mark
1310 		 * it active and bump active count
1311 		 */
1312 		if (pg->pg_flags & PQ_ACTIVE)
1313 			TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1314 		else {
1315 			atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
1316 			uvmexp.active++;
1317 		}
1318 
1319 		TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
1320 	}
1321 }
1322 
1323 /*
1324  * uvm_pagezero: zero fill a page
1325  */
1326 void
1327 uvm_pagezero(struct vm_page *pg)
1328 {
1329 	atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1330 	pmap_zero_page(pg);
1331 }
1332 
1333 /*
1334  * uvm_pagecopy: copy a page
1335  */
1336 void
1337 uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
1338 {
1339 	atomic_clearbits_int(&dst->pg_flags, PG_CLEAN);
1340 	pmap_copy_page(src, dst);
1341 }
1342 
1343 /*
1344  * uvm_pagecount: count the number of physical pages in the address range.
1345  */
1346 psize_t
1347 uvm_pagecount(struct uvm_constraint_range* constraint)
1348 {
1349 	int lcv;
1350 	psize_t sz;
1351 	paddr_t low, high;
1352 	paddr_t ps_low, ps_high;
1353 
1354 	/* Algorithm uses page numbers. */
1355 	low = atop(constraint->ucr_low);
1356 	high = atop(constraint->ucr_high);
1357 
1358 	sz = 0;
1359 	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
1360 		ps_low = MAX(low, vm_physmem[lcv].avail_start);
1361 		ps_high = MIN(high, vm_physmem[lcv].avail_end);
1362 		if (ps_low < ps_high)
1363 			sz += ps_high - ps_low;
1364 	}
1365 	return sz;
1366 }
1367