1 /* $OpenBSD: uvm_page.c,v 1.180 2024/12/27 12:04:40 mpi Exp $ */
2 /* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
3
4 /*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * Copyright (c) 1991, 1993, The Regents of the University of California.
7 *
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * The Mach Operating System project at Carnegie-Mellon University.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
38 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
39 *
40 *
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 */
64
65 /*
66 * uvm_page.c: page ops.
67 */
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/sched.h>
72 #include <sys/vnode.h>
73 #include <sys/mount.h>
74 #include <sys/proc.h>
75 #include <sys/smr.h>
76
77 #include <uvm/uvm.h>
78
79 /*
80 * for object trees
81 */
82 RBT_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp);
83
84 int
uvm_pagecmp(const struct vm_page * a,const struct vm_page * b)85 uvm_pagecmp(const struct vm_page *a, const struct vm_page *b)
86 {
87 return a->offset < b->offset ? -1 : a->offset > b->offset;
88 }
89
90 /*
91 * global vars... XXXCDC: move to uvm. structure.
92 */
93 /*
94 * physical memory config is stored in vm_physmem.
95 */
96 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
97 int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
98
99 /*
100 * Some supported CPUs in a given architecture don't support all
101 * of the things necessary to do idle page zero'ing efficiently.
102 * We therefore provide a way to disable it from machdep code here.
103 */
104
105 /*
106 * local variables
107 */
108 /*
109 * these variables record the values returned by vm_page_bootstrap,
110 * for debugging purposes. The implementation of uvm_pageboot_alloc
111 * and pmap_startup here also uses them internally.
112 */
113 static vaddr_t virtual_space_start;
114 static vaddr_t virtual_space_end;
115
116 /*
117 * local prototypes
118 */
119 static void uvm_pageinsert(struct vm_page *);
120 static void uvm_pageremove(struct vm_page *);
121 int uvm_page_owner_locked_p(struct vm_page *, boolean_t);
122
123 /*
124 * inline functions
125 */
126 /*
127 * uvm_pageinsert: insert a page in the object
128 *
129 * => caller must lock object
130 * => call should have already set pg's object and offset pointers
131 * and bumped the version counter
132 */
133 static inline void
uvm_pageinsert(struct vm_page * pg)134 uvm_pageinsert(struct vm_page *pg)
135 {
136 struct vm_page *dupe;
137
138 KASSERT(UVM_OBJ_IS_DUMMY(pg->uobject) ||
139 rw_write_held(pg->uobject->vmobjlock));
140 KASSERT((pg->pg_flags & PG_TABLED) == 0);
141
142 dupe = RBT_INSERT(uvm_objtree, &pg->uobject->memt, pg);
143 /* not allowed to insert over another page */
144 KASSERT(dupe == NULL);
145 atomic_setbits_int(&pg->pg_flags, PG_TABLED);
146 pg->uobject->uo_npages++;
147 }
148
149 /*
150 * uvm_page_remove: remove page from object
151 *
152 * => caller must lock object
153 */
154 static inline void
uvm_pageremove(struct vm_page * pg)155 uvm_pageremove(struct vm_page *pg)
156 {
157 KASSERT(UVM_OBJ_IS_DUMMY(pg->uobject) ||
158 rw_write_held(pg->uobject->vmobjlock));
159 KASSERT(pg->pg_flags & PG_TABLED);
160
161 RBT_REMOVE(uvm_objtree, &pg->uobject->memt, pg);
162
163 atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
164 pg->uobject->uo_npages--;
165 pg->uobject = NULL;
166 pg->pg_version++;
167 }
168
169 /*
170 * uvm_page_init: init the page system. called from uvm_init().
171 *
172 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
173 */
174 void
uvm_page_init(vaddr_t * kvm_startp,vaddr_t * kvm_endp)175 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
176 {
177 vsize_t freepages, pagecount, n;
178 vm_page_t pagearray, curpg;
179 int lcv, i;
180 paddr_t paddr, pgno;
181 struct vm_physseg *seg;
182
183 /*
184 * init the page queues and page queue locks
185 */
186
187 TAILQ_INIT(&uvm.page_active);
188 TAILQ_INIT(&uvm.page_inactive);
189 mtx_init(&uvm.pageqlock, IPL_VM);
190 mtx_init(&uvm.fpageqlock, IPL_VM);
191 uvm_pmr_init();
192
193 /*
194 * allocate vm_page structures.
195 */
196
197 /*
198 * sanity check:
199 * before calling this function the MD code is expected to register
200 * some free RAM with the uvm_page_physload() function. our job
201 * now is to allocate vm_page structures for this memory.
202 */
203
204 if (vm_nphysseg == 0)
205 panic("uvm_page_bootstrap: no memory pre-allocated");
206
207 /*
208 * first calculate the number of free pages...
209 *
210 * note that we use start/end rather than avail_start/avail_end.
211 * this allows us to allocate extra vm_page structures in case we
212 * want to return some memory to the pool after booting.
213 */
214
215 freepages = 0;
216 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
217 freepages += (seg->end - seg->start);
218
219 /*
220 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
221 * use. for each page of memory we use we need a vm_page structure.
222 * thus, the total number of pages we can use is the total size of
223 * the memory divided by the PAGE_SIZE plus the size of the vm_page
224 * structure. we add one to freepages as a fudge factor to avoid
225 * truncation errors (since we can only allocate in terms of whole
226 * pages).
227 */
228
229 pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) /
230 (PAGE_SIZE + sizeof(struct vm_page));
231 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
232 sizeof(struct vm_page));
233 memset(pagearray, 0, pagecount * sizeof(struct vm_page));
234
235 /* init the vm_page structures and put them in the correct place. */
236 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) {
237 n = seg->end - seg->start;
238 if (n > pagecount) {
239 panic("uvm_page_init: lost %ld page(s) in init",
240 (long)(n - pagecount));
241 /* XXXCDC: shouldn't happen? */
242 /* n = pagecount; */
243 }
244
245 /* set up page array pointers */
246 seg->pgs = pagearray;
247 pagearray += n;
248 pagecount -= n;
249 seg->lastpg = seg->pgs + (n - 1);
250
251 /* init and free vm_pages (we've already zeroed them) */
252 pgno = seg->start;
253 paddr = ptoa(pgno);
254 for (i = 0, curpg = seg->pgs; i < n;
255 i++, curpg++, pgno++, paddr += PAGE_SIZE) {
256 curpg->phys_addr = paddr;
257 VM_MDPAGE_INIT(curpg);
258 if (pgno >= seg->avail_start &&
259 pgno < seg->avail_end) {
260 uvmexp.npages++;
261 }
262 }
263
264 /* Add pages to free pool. */
265 uvm_pmr_freepages(&seg->pgs[seg->avail_start - seg->start],
266 seg->avail_end - seg->avail_start);
267 }
268
269 /*
270 * pass up the values of virtual_space_start and
271 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
272 * layers of the VM.
273 */
274
275 *kvm_startp = round_page(virtual_space_start);
276 *kvm_endp = trunc_page(virtual_space_end);
277
278 /* init locks for kernel threads */
279 mtx_init(&uvm.aiodoned_lock, IPL_BIO);
280
281 /*
282 * init reserve thresholds.
283 *
284 * XXX As long as some disk drivers cannot write any physical
285 * XXX page, we need DMA reachable reserves for the pagedaemon.
286 * XXX We cannot enforce such requirement but it should be ok
287 * XXX in most of the cases because the pmemrange tries hard to
288 * XXX allocate them last.
289 */
290 uvmexp.reserve_pagedaemon = 4;
291 uvmexp.reserve_kernel = uvmexp.reserve_pagedaemon + 4;
292
293 uvm.page_init_done = TRUE;
294 }
295
296 /*
297 * uvm_setpagesize: set the page size
298 *
299 * => sets page_shift and page_mask from uvmexp.pagesize.
300 */
301 void
uvm_setpagesize(void)302 uvm_setpagesize(void)
303 {
304 if (uvmexp.pagesize == 0)
305 uvmexp.pagesize = DEFAULT_PAGE_SIZE;
306 uvmexp.pagemask = uvmexp.pagesize - 1;
307 if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
308 panic("uvm_setpagesize: page size not a power of two");
309 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
310 if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
311 break;
312 }
313
314 /*
315 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
316 */
317 vaddr_t
uvm_pageboot_alloc(vsize_t size)318 uvm_pageboot_alloc(vsize_t size)
319 {
320 #if defined(PMAP_STEAL_MEMORY)
321 vaddr_t addr;
322
323 /*
324 * defer bootstrap allocation to MD code (it may want to allocate
325 * from a direct-mapped segment). pmap_steal_memory should round
326 * off virtual_space_start/virtual_space_end.
327 */
328
329 addr = pmap_steal_memory(size, &virtual_space_start,
330 &virtual_space_end);
331
332 return addr;
333
334 #else /* !PMAP_STEAL_MEMORY */
335
336 static boolean_t initialized = FALSE;
337 vaddr_t addr, vaddr;
338 paddr_t paddr;
339
340 /* round to page size */
341 size = round_page(size);
342
343 /* on first call to this function, initialize ourselves. */
344 if (initialized == FALSE) {
345 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
346
347 /* round it the way we like it */
348 virtual_space_start = round_page(virtual_space_start);
349 virtual_space_end = trunc_page(virtual_space_end);
350
351 initialized = TRUE;
352 }
353
354 /* allocate virtual memory for this request */
355 if (virtual_space_start == virtual_space_end ||
356 (virtual_space_end - virtual_space_start) < size)
357 panic("uvm_pageboot_alloc: out of virtual space");
358
359 addr = virtual_space_start;
360
361 #ifdef PMAP_GROWKERNEL
362 /*
363 * If the kernel pmap can't map the requested space,
364 * then allocate more resources for it.
365 */
366 if (uvm_maxkaddr < (addr + size)) {
367 uvm_maxkaddr = pmap_growkernel(addr + size);
368 if (uvm_maxkaddr < (addr + size))
369 panic("uvm_pageboot_alloc: pmap_growkernel() failed");
370 }
371 #endif
372
373 virtual_space_start += size;
374
375 /* allocate and mapin physical pages to back new virtual pages */
376 for (vaddr = round_page(addr) ; vaddr < addr + size ;
377 vaddr += PAGE_SIZE) {
378 if (!uvm_page_physget(&paddr))
379 panic("uvm_pageboot_alloc: out of memory");
380
381 /*
382 * Note this memory is no longer managed, so using
383 * pmap_kenter is safe.
384 */
385 pmap_kenter_pa(vaddr, paddr, PROT_READ | PROT_WRITE);
386 }
387 pmap_update(pmap_kernel());
388 return addr;
389 #endif /* PMAP_STEAL_MEMORY */
390 }
391
392 #if !defined(PMAP_STEAL_MEMORY)
393 /*
394 * uvm_page_physget: "steal" one page from the vm_physmem structure.
395 *
396 * => attempt to allocate it off the end of a segment in which the "avail"
397 * values match the start/end values. if we can't do that, then we
398 * will advance both values (making them equal, and removing some
399 * vm_page structures from the non-avail area).
400 * => return false if out of memory.
401 */
402
403 boolean_t
uvm_page_physget(paddr_t * paddrp)404 uvm_page_physget(paddr_t *paddrp)
405 {
406 int lcv;
407 struct vm_physseg *seg;
408
409 /* pass 1: try allocating from a matching end */
410 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
411 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
412 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0;
413 lcv--, seg--)
414 #else
415 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
416 #endif
417 {
418 if (uvm.page_init_done == TRUE)
419 panic("uvm_page_physget: called _after_ bootstrap");
420
421 /* try from front */
422 if (seg->avail_start == seg->start &&
423 seg->avail_start < seg->avail_end) {
424 *paddrp = ptoa(seg->avail_start);
425 seg->avail_start++;
426 seg->start++;
427 /* nothing left? nuke it */
428 if (seg->avail_start == seg->end) {
429 if (vm_nphysseg == 1)
430 panic("uvm_page_physget: out of memory!");
431 vm_nphysseg--;
432 for (; lcv < vm_nphysseg; lcv++, seg++)
433 /* structure copy */
434 seg[0] = seg[1];
435 }
436 return TRUE;
437 }
438
439 /* try from rear */
440 if (seg->avail_end == seg->end &&
441 seg->avail_start < seg->avail_end) {
442 *paddrp = ptoa(seg->avail_end - 1);
443 seg->avail_end--;
444 seg->end--;
445 /* nothing left? nuke it */
446 if (seg->avail_end == seg->start) {
447 if (vm_nphysseg == 1)
448 panic("uvm_page_physget: out of memory!");
449 vm_nphysseg--;
450 for (; lcv < vm_nphysseg ; lcv++, seg++)
451 /* structure copy */
452 seg[0] = seg[1];
453 }
454 return TRUE;
455 }
456 }
457
458 /* pass2: forget about matching ends, just allocate something */
459 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
460 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
461 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0;
462 lcv--, seg--)
463 #else
464 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
465 #endif
466 {
467
468 /* any room in this bank? */
469 if (seg->avail_start >= seg->avail_end)
470 continue; /* nope */
471
472 *paddrp = ptoa(seg->avail_start);
473 seg->avail_start++;
474 /* truncate! */
475 seg->start = seg->avail_start;
476
477 /* nothing left? nuke it */
478 if (seg->avail_start == seg->end) {
479 if (vm_nphysseg == 1)
480 panic("uvm_page_physget: out of memory!");
481 vm_nphysseg--;
482 for (; lcv < vm_nphysseg ; lcv++, seg++)
483 /* structure copy */
484 seg[0] = seg[1];
485 }
486 return TRUE;
487 }
488
489 return FALSE; /* whoops! */
490 }
491
492 #endif /* PMAP_STEAL_MEMORY */
493
494 /*
495 * uvm_page_physload: load physical memory into VM system
496 *
497 * => all args are PFs
498 * => all pages in start/end get vm_page structures
499 * => areas marked by avail_start/avail_end get added to the free page pool
500 * => we are limited to VM_PHYSSEG_MAX physical memory segments
501 */
502
503 void
uvm_page_physload(paddr_t start,paddr_t end,paddr_t avail_start,paddr_t avail_end,int flags)504 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
505 paddr_t avail_end, int flags)
506 {
507 int preload, lcv;
508 psize_t npages;
509 struct vm_page *pgs;
510 struct vm_physseg *ps, *seg;
511
512 #ifdef DIAGNOSTIC
513 if (uvmexp.pagesize == 0)
514 panic("uvm_page_physload: page size not set!");
515
516 if (start >= end)
517 panic("uvm_page_physload: start >= end");
518 #endif
519
520 /* do we have room? */
521 if (vm_nphysseg == VM_PHYSSEG_MAX) {
522 printf("uvm_page_physload: unable to load physical memory "
523 "segment\n");
524 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
525 VM_PHYSSEG_MAX, (long long)start, (long long)end);
526 printf("\tincrease VM_PHYSSEG_MAX\n");
527 return;
528 }
529
530 /*
531 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
532 * called yet, so malloc is not available).
533 */
534 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) {
535 if (seg->pgs)
536 break;
537 }
538 preload = (lcv == vm_nphysseg);
539
540 /* if VM is already running, attempt to malloc() vm_page structures */
541 if (!preload) {
542 /*
543 * XXXCDC: need some sort of lockout for this case
544 * right now it is only used by devices so it should be alright.
545 */
546 paddr_t paddr;
547
548 npages = end - start; /* # of pages */
549
550 pgs = km_alloc(round_page(npages * sizeof(*pgs)),
551 &kv_any, &kp_zero, &kd_waitok);
552 if (pgs == NULL) {
553 printf("uvm_page_physload: can not malloc vm_page "
554 "structs for segment\n");
555 printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
556 return;
557 }
558 /* init phys_addr and free pages, XXX uvmexp.npages */
559 for (lcv = 0, paddr = ptoa(start); lcv < npages;
560 lcv++, paddr += PAGE_SIZE) {
561 pgs[lcv].phys_addr = paddr;
562 VM_MDPAGE_INIT(&pgs[lcv]);
563 if (atop(paddr) >= avail_start &&
564 atop(paddr) < avail_end) {
565 if (flags & PHYSLOAD_DEVICE) {
566 atomic_setbits_int(&pgs[lcv].pg_flags,
567 PG_DEV);
568 pgs[lcv].wire_count = 1;
569 } else {
570 #if defined(VM_PHYSSEG_NOADD)
571 panic("uvm_page_physload: tried to add RAM after vm_mem_init");
572 #endif
573 }
574 }
575 }
576
577 /* Add pages to free pool. */
578 if ((flags & PHYSLOAD_DEVICE) == 0) {
579 uvm_pmr_freepages(&pgs[avail_start - start],
580 avail_end - avail_start);
581 }
582
583 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
584 } else {
585 /* gcc complains if these don't get init'd */
586 pgs = NULL;
587 npages = 0;
588
589 }
590
591 /* now insert us in the proper place in vm_physmem[] */
592 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
593 /* random: put it at the end (easy!) */
594 ps = &vm_physmem[vm_nphysseg];
595 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
596 {
597 int x;
598 /* sort by address for binary search */
599 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++)
600 if (start < seg->start)
601 break;
602 ps = seg;
603 /* move back other entries, if necessary ... */
604 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv;
605 x--, seg--)
606 /* structure copy */
607 seg[1] = seg[0];
608 }
609 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
610 {
611 int x;
612 /* sort by largest segment first */
613 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++)
614 if ((end - start) >
615 (seg->end - seg->start))
616 break;
617 ps = &vm_physmem[lcv];
618 /* move back other entries, if necessary ... */
619 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv;
620 x--, seg--)
621 /* structure copy */
622 seg[1] = seg[0];
623 }
624 #else
625 panic("uvm_page_physload: unknown physseg strategy selected!");
626 #endif
627
628 ps->start = start;
629 ps->end = end;
630 ps->avail_start = avail_start;
631 ps->avail_end = avail_end;
632 if (preload) {
633 ps->pgs = NULL;
634 } else {
635 ps->pgs = pgs;
636 ps->lastpg = pgs + npages - 1;
637 }
638 vm_nphysseg++;
639
640 return;
641 }
642
643 #ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
644
645 void uvm_page_physdump(void); /* SHUT UP GCC */
646
647 /* call from DDB */
648 void
uvm_page_physdump(void)649 uvm_page_physdump(void)
650 {
651 int lcv;
652 struct vm_physseg *seg;
653
654 printf("uvm_page_physdump: physical memory config [segs=%d of %d]:\n",
655 vm_nphysseg, VM_PHYSSEG_MAX);
656 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
657 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n",
658 (long long)seg->start,
659 (long long)seg->end,
660 (long long)seg->avail_start,
661 (long long)seg->avail_end);
662 printf("STRATEGY = ");
663 switch (VM_PHYSSEG_STRAT) {
664 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
665 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break;
666 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break;
667 default: printf("<<UNKNOWN>>!!!!\n");
668 }
669 }
670 #endif
671
672 void
uvm_shutdown(void)673 uvm_shutdown(void)
674 {
675 #ifdef UVM_SWAP_ENCRYPT
676 uvm_swap_finicrypt_all();
677 #endif
678 smr_flush();
679 }
680
681 /*
682 * Perform insert of a given page in the specified anon of obj.
683 * This is basically, uvm_pagealloc, but with the page already given.
684 */
685 void
uvm_pagealloc_pg(struct vm_page * pg,struct uvm_object * obj,voff_t off,struct vm_anon * anon)686 uvm_pagealloc_pg(struct vm_page *pg, struct uvm_object *obj, voff_t off,
687 struct vm_anon *anon)
688 {
689 int flags;
690
691 KASSERT(obj == NULL || anon == NULL);
692 KASSERT(anon == NULL || off == 0);
693 KASSERT(off == trunc_page(off));
694 KASSERT(obj == NULL || UVM_OBJ_IS_DUMMY(obj) ||
695 rw_write_held(obj->vmobjlock));
696 KASSERT(anon == NULL || anon->an_lock == NULL ||
697 rw_write_held(anon->an_lock));
698
699 flags = PG_BUSY | PG_FAKE;
700 pg->offset = off;
701 pg->uobject = obj;
702 pg->uanon = anon;
703 KASSERT(uvm_page_owner_locked_p(pg, TRUE));
704 if (anon) {
705 anon->an_page = pg;
706 flags |= PQ_ANON;
707 } else if (obj)
708 uvm_pageinsert(pg);
709 atomic_setbits_int(&pg->pg_flags, flags);
710 #if defined(UVM_PAGE_TRKOWN)
711 pg->owner_tag = NULL;
712 #endif
713 UVM_PAGE_OWN(pg, "new alloc");
714 }
715
716 /*
717 * uvm_pglistalloc: allocate a list of pages
718 *
719 * => allocated pages are placed at the tail of rlist. rlist is
720 * assumed to be properly initialized by caller.
721 * => returns 0 on success or errno on failure
722 * => doesn't take into account clean non-busy pages on inactive list
723 * that could be used(?)
724 * => params:
725 * size the size of the allocation, rounded to page size.
726 * low the low address of the allowed allocation range.
727 * high the high address of the allowed allocation range.
728 * alignment memory must be aligned to this power-of-two boundary.
729 * boundary no segment in the allocation may cross this
730 * power-of-two boundary (relative to zero).
731 * => flags:
732 * UVM_PLA_NOWAIT fail if allocation fails
733 * UVM_PLA_WAITOK wait for memory to become avail
734 * UVM_PLA_ZERO return zeroed memory
735 */
736 int
uvm_pglistalloc(psize_t size,paddr_t low,paddr_t high,paddr_t alignment,paddr_t boundary,struct pglist * rlist,int nsegs,int flags)737 uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment,
738 paddr_t boundary, struct pglist *rlist, int nsegs, int flags)
739 {
740 KASSERT((alignment & (alignment - 1)) == 0);
741 KASSERT((boundary & (boundary - 1)) == 0);
742 KASSERT(!(flags & UVM_PLA_WAITOK) ^ !(flags & UVM_PLA_NOWAIT));
743
744 if (size == 0)
745 return EINVAL;
746 size = atop(round_page(size));
747
748 /*
749 * XXX uvm_pglistalloc is currently only used for kernel
750 * objects. Unlike the checks in uvm_pagealloc, below, here
751 * we are always allowed to use the kernel reserve.
752 */
753 flags |= UVM_PLA_USERESERVE;
754
755 if ((high & PAGE_MASK) != PAGE_MASK) {
756 printf("uvm_pglistalloc: Upper boundary 0x%lx "
757 "not on pagemask.\n", (unsigned long)high);
758 }
759
760 /*
761 * Our allocations are always page granularity, so our alignment
762 * must be, too.
763 */
764 if (alignment < PAGE_SIZE)
765 alignment = PAGE_SIZE;
766
767 low = atop(roundup(low, alignment));
768 /*
769 * high + 1 may result in overflow, in which case high becomes 0x0,
770 * which is the 'don't care' value.
771 * The only requirement in that case is that low is also 0x0, or the
772 * low<high assert will fail.
773 */
774 high = atop(high + 1);
775 alignment = atop(alignment);
776 if (boundary < PAGE_SIZE && boundary != 0)
777 boundary = PAGE_SIZE;
778 boundary = atop(boundary);
779
780 return uvm_pmr_getpages(size, low, high, alignment, boundary, nsegs,
781 flags, rlist);
782 }
783
784 /*
785 * uvm_pglistfree: free a list of pages
786 *
787 * => pages should already be unmapped
788 */
789 void
uvm_pglistfree(struct pglist * list)790 uvm_pglistfree(struct pglist *list)
791 {
792 uvm_pmr_freepageq(list);
793 }
794
795 /*
796 * interface used by the buffer cache to allocate a buffer at a time.
797 * The pages are allocated wired in DMA accessible memory
798 */
799 int
uvm_pagealloc_multi(struct uvm_object * obj,voff_t off,vsize_t size,int flags)800 uvm_pagealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size,
801 int flags)
802 {
803 struct pglist plist;
804 struct vm_page *pg;
805 int i, r;
806
807 KASSERT(UVM_OBJ_IS_BUFCACHE(obj));
808 KERNEL_ASSERT_LOCKED();
809
810 TAILQ_INIT(&plist);
811 r = uvm_pglistalloc(size, dma_constraint.ucr_low,
812 dma_constraint.ucr_high, 0, 0, &plist, atop(round_page(size)),
813 flags);
814 if (r == 0) {
815 i = 0;
816 while ((pg = TAILQ_FIRST(&plist)) != NULL) {
817 pg->wire_count = 1;
818 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE);
819 KASSERT((pg->pg_flags & PG_DEV) == 0);
820 TAILQ_REMOVE(&plist, pg, pageq);
821 uvm_pagealloc_pg(pg, obj, off + ptoa(i++), NULL);
822 }
823 }
824 return r;
825 }
826
827 /*
828 * interface used by the buffer cache to reallocate a buffer at a time.
829 * The pages are reallocated wired outside the DMA accessible region.
830 *
831 */
832 int
uvm_pagerealloc_multi(struct uvm_object * obj,voff_t off,vsize_t size,int flags,struct uvm_constraint_range * where)833 uvm_pagerealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size,
834 int flags, struct uvm_constraint_range *where)
835 {
836 struct pglist plist;
837 struct vm_page *pg, *tpg;
838 int i, r;
839 voff_t offset;
840
841 KASSERT(UVM_OBJ_IS_BUFCACHE(obj));
842 KERNEL_ASSERT_LOCKED();
843
844 TAILQ_INIT(&plist);
845 if (size == 0)
846 panic("size 0 uvm_pagerealloc");
847 r = uvm_pglistalloc(size, where->ucr_low, where->ucr_high, 0,
848 0, &plist, atop(round_page(size)), flags);
849 if (r == 0) {
850 i = 0;
851 while((pg = TAILQ_FIRST(&plist)) != NULL) {
852 offset = off + ptoa(i++);
853 tpg = uvm_pagelookup(obj, offset);
854 KASSERT(tpg != NULL);
855 pg->wire_count = 1;
856 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE);
857 KASSERT((pg->pg_flags & PG_DEV) == 0);
858 TAILQ_REMOVE(&plist, pg, pageq);
859 uvm_pagecopy(tpg, pg);
860 KASSERT(tpg->wire_count == 1);
861 tpg->wire_count = 0;
862 uvm_lock_pageq();
863 uvm_pagefree(tpg);
864 uvm_unlock_pageq();
865 uvm_pagealloc_pg(pg, obj, offset, NULL);
866 }
867 }
868 return r;
869 }
870
871 /*
872 * uvm_pagealloc: allocate vm_page from a particular free list.
873 *
874 * => return null if no pages free
875 * => wake up pagedaemon if number of free pages drops below low water mark
876 * => only one of obj or anon can be non-null
877 * => caller must activate/deactivate page if it is not wired.
878 */
879 struct vm_page *
uvm_pagealloc(struct uvm_object * obj,voff_t off,struct vm_anon * anon,int flags)880 uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
881 int flags)
882 {
883 struct vm_page *pg = NULL;
884 int pmr_flags;
885
886 KASSERT(obj == NULL || anon == NULL);
887 KASSERT(anon == NULL || off == 0);
888 KASSERT(off == trunc_page(off));
889 KASSERT(obj == NULL || UVM_OBJ_IS_DUMMY(obj) ||
890 rw_write_held(obj->vmobjlock));
891 KASSERT(anon == NULL || anon->an_lock == NULL ||
892 rw_write_held(anon->an_lock));
893
894 pmr_flags = UVM_PLA_NOWAIT;
895
896 /*
897 * We're allowed to use the kernel reserve if the page is
898 * being allocated to a kernel object.
899 */
900 if ((flags & UVM_PGA_USERESERVE) ||
901 (obj != NULL && UVM_OBJ_IS_KERN_OBJECT(obj)))
902 pmr_flags |= UVM_PLA_USERESERVE;
903
904 if (flags & UVM_PGA_ZERO)
905 pmr_flags |= UVM_PLA_ZERO;
906
907 pg = uvm_pmr_cache_get(pmr_flags);
908 if (pg == NULL)
909 return NULL;
910 uvm_pagealloc_pg(pg, obj, off, anon);
911 KASSERT((pg->pg_flags & PG_DEV) == 0);
912 if (flags & UVM_PGA_ZERO)
913 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
914 else
915 atomic_setbits_int(&pg->pg_flags, PG_CLEAN);
916
917 return pg;
918 }
919
920 /*
921 * uvm_pagerealloc: reallocate a page from one object to another
922 */
923
924 void
uvm_pagerealloc(struct vm_page * pg,struct uvm_object * newobj,voff_t newoff)925 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
926 {
927
928 /* remove it from the old object */
929 if (pg->uobject) {
930 uvm_pageremove(pg);
931 }
932
933 /* put it in the new object */
934 if (newobj) {
935 pg->uobject = newobj;
936 pg->offset = newoff;
937 pg->pg_version++;
938 uvm_pageinsert(pg);
939 }
940 }
941
942 /*
943 * uvm_pageclean: clean page
944 *
945 * => erase page's identity (i.e. remove from object)
946 * => caller must lock page queues if `pg' is managed
947 * => assumes all valid mappings of pg are gone
948 */
949 void
uvm_pageclean(struct vm_page * pg)950 uvm_pageclean(struct vm_page *pg)
951 {
952 u_int flags_to_clear = 0;
953
954 if ((pg->pg_flags & (PG_TABLED|PQ_ACTIVE|PQ_INACTIVE)) &&
955 (pg->uobject == NULL || !UVM_OBJ_IS_PMAP(pg->uobject)))
956 MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
957
958 #ifdef DEBUG
959 if (pg->uobject == (void *)0xdeadbeef &&
960 pg->uanon == (void *)0xdeadbeef) {
961 panic("uvm_pagefree: freeing free page %p", pg);
962 }
963 #endif
964
965 KASSERT((pg->pg_flags & PG_DEV) == 0);
966 KASSERT(pg->uobject == NULL || UVM_OBJ_IS_DUMMY(pg->uobject) ||
967 rw_write_held(pg->uobject->vmobjlock));
968 KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
969 rw_write_held(pg->uanon->an_lock));
970
971 /*
972 * if the page was an object page (and thus "TABLED"), remove it
973 * from the object.
974 */
975 if (pg->pg_flags & PG_TABLED)
976 uvm_pageremove(pg);
977
978 /*
979 * now remove the page from the queues
980 */
981 uvm_pagedequeue(pg);
982
983 /*
984 * if the page was wired, unwire it now.
985 */
986 if (pg->wire_count) {
987 pg->wire_count = 0;
988 uvmexp.wired--;
989 }
990 if (pg->uanon) {
991 pg->uanon->an_page = NULL;
992 pg->uanon = NULL;
993 }
994
995 /* Clean page state bits. */
996 flags_to_clear |= PQ_ANON|PQ_AOBJ|PQ_ENCRYPT|PG_ZERO|PG_FAKE|PG_BUSY|
997 PG_RELEASED|PG_CLEAN|PG_CLEANCHK;
998 atomic_clearbits_int(&pg->pg_flags, flags_to_clear);
999
1000 #ifdef DEBUG
1001 pg->uobject = (void *)0xdeadbeef;
1002 pg->offset = 0xdeadbeef;
1003 pg->uanon = (void *)0xdeadbeef;
1004 #endif
1005 }
1006
1007 /*
1008 * uvm_pagefree: free page
1009 *
1010 * => erase page's identity (i.e. remove from object)
1011 * => put page on free list
1012 * => caller must lock page queues if `pg' is managed
1013 * => assumes all valid mappings of pg are gone
1014 */
1015 void
uvm_pagefree(struct vm_page * pg)1016 uvm_pagefree(struct vm_page *pg)
1017 {
1018 uvm_pageclean(pg);
1019 uvm_pmr_cache_put(pg);
1020 }
1021
1022 /*
1023 * uvm_page_unbusy: unbusy an array of pages.
1024 *
1025 * => pages must either all belong to the same object, or all belong to anons.
1026 * => if pages are object-owned, object must be locked.
1027 * => if pages are anon-owned, anons must have 0 refcount.
1028 * => caller must make sure that anon-owned pages are not PG_RELEASED.
1029 */
1030 void
uvm_page_unbusy(struct vm_page ** pgs,int npgs)1031 uvm_page_unbusy(struct vm_page **pgs, int npgs)
1032 {
1033 struct vm_page *pg;
1034 int i;
1035
1036 for (i = 0; i < npgs; i++) {
1037 pg = pgs[i];
1038
1039 if (pg == NULL || pg == PGO_DONTCARE) {
1040 continue;
1041 }
1042
1043 KASSERT(uvm_page_owner_locked_p(pg, TRUE));
1044 KASSERT(pg->pg_flags & PG_BUSY);
1045
1046 if (pg->pg_flags & PG_WANTED) {
1047 wakeup(pg);
1048 }
1049 if (pg->pg_flags & PG_RELEASED) {
1050 KASSERT(pg->uobject != NULL ||
1051 (pg->uanon != NULL && pg->uanon->an_ref > 0));
1052 atomic_clearbits_int(&pg->pg_flags, PG_RELEASED);
1053 pmap_page_protect(pg, PROT_NONE);
1054 uvm_pagefree(pg);
1055 } else {
1056 KASSERT((pg->pg_flags & PG_FAKE) == 0);
1057 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
1058 UVM_PAGE_OWN(pg, NULL);
1059 }
1060 }
1061 }
1062
1063 /*
1064 * uvm_pagewait: wait for a busy page
1065 *
1066 * => page must be known PG_BUSY
1067 * => object must be locked
1068 * => object will be unlocked on return
1069 */
1070 void
uvm_pagewait(struct vm_page * pg,struct rwlock * lock,const char * wmesg)1071 uvm_pagewait(struct vm_page *pg, struct rwlock *lock, const char *wmesg)
1072 {
1073 KASSERT(rw_lock_held(lock));
1074 KASSERT((pg->pg_flags & PG_BUSY) != 0);
1075 KASSERT(uvm_page_owner_locked_p(pg, FALSE));
1076
1077 atomic_setbits_int(&pg->pg_flags, PG_WANTED);
1078 rwsleep_nsec(pg, lock, PVM | PNORELOCK, wmesg, INFSLP);
1079 }
1080
1081 #if defined(UVM_PAGE_TRKOWN)
1082 /*
1083 * uvm_page_own: set or release page ownership
1084 *
1085 * => this is a debugging function that keeps track of who sets PG_BUSY
1086 * and where they do it. it can be used to track down problems
1087 * such a thread setting "PG_BUSY" and never releasing it.
1088 * => if "tag" is NULL then we are releasing page ownership
1089 */
1090 void
uvm_page_own(struct vm_page * pg,char * tag)1091 uvm_page_own(struct vm_page *pg, char *tag)
1092 {
1093 /* gain ownership? */
1094 if (tag) {
1095 if (pg->owner_tag) {
1096 printf("uvm_page_own: page %p already owned "
1097 "by thread %d [%s]\n", pg,
1098 pg->owner, pg->owner_tag);
1099 panic("uvm_page_own");
1100 }
1101 pg->owner = (curproc) ? curproc->p_tid : (pid_t) -1;
1102 pg->owner_tag = tag;
1103 return;
1104 }
1105
1106 /* drop ownership */
1107 if (pg->owner_tag == NULL) {
1108 printf("uvm_page_own: dropping ownership of an non-owned "
1109 "page (%p)\n", pg);
1110 panic("uvm_page_own");
1111 }
1112 pg->owner_tag = NULL;
1113 return;
1114 }
1115 #endif
1116
1117 /*
1118 * when VM_PHYSSEG_MAX is 1, we can simplify these functions
1119 */
1120
1121 #if VM_PHYSSEG_MAX > 1
1122 /*
1123 * vm_physseg_find: find vm_physseg structure that belongs to a PA
1124 */
1125 int
vm_physseg_find(paddr_t pframe,int * offp)1126 vm_physseg_find(paddr_t pframe, int *offp)
1127 {
1128 struct vm_physseg *seg;
1129
1130 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
1131 /* binary search for it */
1132 int start, len, try;
1133
1134 /*
1135 * if try is too large (thus target is less than try) we reduce
1136 * the length to trunc(len/2) [i.e. everything smaller than "try"]
1137 *
1138 * if the try is too small (thus target is greater than try) then
1139 * we set the new start to be (try + 1). this means we need to
1140 * reduce the length to (round(len/2) - 1).
1141 *
1142 * note "adjust" below which takes advantage of the fact that
1143 * (round(len/2) - 1) == trunc((len - 1) / 2)
1144 * for any value of len we may have
1145 */
1146
1147 for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
1148 try = start + (len / 2); /* try in the middle */
1149 seg = vm_physmem + try;
1150
1151 /* start past our try? */
1152 if (pframe >= seg->start) {
1153 /* was try correct? */
1154 if (pframe < seg->end) {
1155 if (offp)
1156 *offp = pframe - seg->start;
1157 return try; /* got it */
1158 }
1159 start = try + 1; /* next time, start here */
1160 len--; /* "adjust" */
1161 } else {
1162 /*
1163 * pframe before try, just reduce length of
1164 * region, done in "for" loop
1165 */
1166 }
1167 }
1168 return -1;
1169
1170 #else
1171 /* linear search for it */
1172 int lcv;
1173
1174 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) {
1175 if (pframe >= seg->start && pframe < seg->end) {
1176 if (offp)
1177 *offp = pframe - seg->start;
1178 return lcv; /* got it */
1179 }
1180 }
1181 return -1;
1182
1183 #endif
1184 }
1185
1186 /*
1187 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
1188 * back from an I/O mapping (ugh!). used in some MD code as well.
1189 */
1190 struct vm_page *
PHYS_TO_VM_PAGE(paddr_t pa)1191 PHYS_TO_VM_PAGE(paddr_t pa)
1192 {
1193 paddr_t pf = atop(pa);
1194 int off;
1195 int psi;
1196
1197 psi = vm_physseg_find(pf, &off);
1198
1199 return (psi == -1) ? NULL : &vm_physmem[psi].pgs[off];
1200 }
1201 #endif /* VM_PHYSSEG_MAX > 1 */
1202
1203 /*
1204 * uvm_pagelookup: look up a page
1205 */
1206 struct vm_page *
uvm_pagelookup(struct uvm_object * obj,voff_t off)1207 uvm_pagelookup(struct uvm_object *obj, voff_t off)
1208 {
1209 /* XXX if stack is too much, handroll */
1210 struct vm_page p, *pg;
1211
1212 p.offset = off;
1213 pg = RBT_FIND(uvm_objtree, &obj->memt, &p);
1214
1215 KASSERT(pg == NULL || obj->uo_npages != 0);
1216 KASSERT(pg == NULL || (pg->pg_flags & PG_RELEASED) == 0 ||
1217 (pg->pg_flags & PG_BUSY) != 0);
1218 return (pg);
1219 }
1220
1221 /*
1222 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
1223 *
1224 * => caller must lock page queues
1225 */
1226 void
uvm_pagewire(struct vm_page * pg)1227 uvm_pagewire(struct vm_page *pg)
1228 {
1229 KASSERT(uvm_page_owner_locked_p(pg, TRUE));
1230 MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
1231
1232 if (pg->wire_count == 0) {
1233 uvm_pagedequeue(pg);
1234 uvmexp.wired++;
1235 }
1236 pg->wire_count++;
1237 }
1238
1239 /*
1240 * uvm_pageunwire: unwire the page.
1241 *
1242 * => activate if wire count goes to zero.
1243 * => caller must lock page queues
1244 */
1245 void
uvm_pageunwire(struct vm_page * pg)1246 uvm_pageunwire(struct vm_page *pg)
1247 {
1248 KASSERT(uvm_page_owner_locked_p(pg, TRUE));
1249 MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
1250
1251 pg->wire_count--;
1252 if (pg->wire_count == 0) {
1253 uvm_pageactivate(pg);
1254 uvmexp.wired--;
1255 }
1256 }
1257
1258 /*
1259 * uvm_pagedeactivate: deactivate page.
1260 *
1261 * => caller must lock page queues
1262 * => caller must check to make sure page is not wired
1263 * => object that page belongs to must be locked (so we can adjust pg->flags)
1264 */
1265 void
uvm_pagedeactivate(struct vm_page * pg)1266 uvm_pagedeactivate(struct vm_page *pg)
1267 {
1268 KASSERT(uvm_page_owner_locked_p(pg, FALSE));
1269 MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
1270
1271 pmap_page_protect(pg, PROT_NONE);
1272
1273 if (pg->pg_flags & PQ_ACTIVE) {
1274 TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1275 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1276 uvmexp.active--;
1277 }
1278 if ((pg->pg_flags & PQ_INACTIVE) == 0) {
1279 KASSERT(pg->wire_count == 0);
1280 TAILQ_INSERT_TAIL(&uvm.page_inactive, pg, pageq);
1281 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE);
1282 uvmexp.inactive++;
1283 pmap_clear_reference(pg);
1284 /*
1285 * update the "clean" bit. this isn't 100%
1286 * accurate, and doesn't have to be. we'll
1287 * re-sync it after we zap all mappings when
1288 * scanning the inactive list.
1289 */
1290 if ((pg->pg_flags & PG_CLEAN) != 0 &&
1291 pmap_is_modified(pg))
1292 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1293 }
1294 }
1295
1296 /*
1297 * uvm_pageactivate: activate page
1298 *
1299 * => caller must lock page queues
1300 */
1301 void
uvm_pageactivate(struct vm_page * pg)1302 uvm_pageactivate(struct vm_page *pg)
1303 {
1304 KASSERT(uvm_page_owner_locked_p(pg, FALSE));
1305 MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
1306
1307 uvm_pagedequeue(pg);
1308 if (pg->wire_count == 0) {
1309 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
1310 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
1311 uvmexp.active++;
1312
1313 }
1314 }
1315
1316 /*
1317 * uvm_pagedequeue: remove a page from any paging queue
1318 */
1319 void
uvm_pagedequeue(struct vm_page * pg)1320 uvm_pagedequeue(struct vm_page *pg)
1321 {
1322 if (pg->pg_flags & PQ_ACTIVE) {
1323 TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1324 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1325 uvmexp.active--;
1326 }
1327 if (pg->pg_flags & PQ_INACTIVE) {
1328 TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
1329 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
1330 uvmexp.inactive--;
1331 }
1332 }
1333 /*
1334 * uvm_pagezero: zero fill a page
1335 */
1336 void
uvm_pagezero(struct vm_page * pg)1337 uvm_pagezero(struct vm_page *pg)
1338 {
1339 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1340 pmap_zero_page(pg);
1341 }
1342
1343 /*
1344 * uvm_pagecopy: copy a page
1345 */
1346 void
uvm_pagecopy(struct vm_page * src,struct vm_page * dst)1347 uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
1348 {
1349 atomic_clearbits_int(&dst->pg_flags, PG_CLEAN);
1350 pmap_copy_page(src, dst);
1351 }
1352
1353 /*
1354 * uvm_page_owner_locked_p: return true if object associated with page is
1355 * locked. this is a weak check for runtime assertions only.
1356 */
1357 int
uvm_page_owner_locked_p(struct vm_page * pg,boolean_t exclusive)1358 uvm_page_owner_locked_p(struct vm_page *pg, boolean_t exclusive)
1359 {
1360 if (pg->uobject != NULL) {
1361 if (UVM_OBJ_IS_DUMMY(pg->uobject))
1362 return 1;
1363 return exclusive
1364 ? rw_write_held(pg->uobject->vmobjlock)
1365 : rw_lock_held(pg->uobject->vmobjlock);
1366 }
1367 if (pg->uanon != NULL) {
1368 return rw_write_held(pg->uanon->an_lock);
1369 }
1370 return 1;
1371 }
1372
1373 /*
1374 * uvm_pagecount: count the number of physical pages in the address range.
1375 */
1376 psize_t
uvm_pagecount(struct uvm_constraint_range * constraint)1377 uvm_pagecount(struct uvm_constraint_range* constraint)
1378 {
1379 int lcv;
1380 psize_t sz;
1381 paddr_t low, high;
1382 paddr_t ps_low, ps_high;
1383
1384 /* Algorithm uses page numbers. */
1385 low = atop(constraint->ucr_low);
1386 high = atop(constraint->ucr_high);
1387
1388 sz = 0;
1389 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
1390 ps_low = MAX(low, vm_physmem[lcv].avail_start);
1391 ps_high = MIN(high, vm_physmem[lcv].avail_end);
1392 if (ps_low < ps_high)
1393 sz += ps_high - ps_low;
1394 }
1395 return sz;
1396 }
1397