1 /* $OpenBSD: uvm_page.c,v 1.177 2024/05/01 12:54:27 mpi Exp $ */
2 /* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
3
4 /*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * Copyright (c) 1991, 1993, The Regents of the University of California.
7 *
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * The Mach Operating System project at Carnegie-Mellon University.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
38 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
39 *
40 *
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 */
64
65 /*
66 * uvm_page.c: page ops.
67 */
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/sched.h>
72 #include <sys/vnode.h>
73 #include <sys/mount.h>
74 #include <sys/proc.h>
75 #include <sys/smr.h>
76
77 #include <uvm/uvm.h>
78
79 /*
80 * for object trees
81 */
82 RBT_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp);
83
84 int
uvm_pagecmp(const struct vm_page * a,const struct vm_page * b)85 uvm_pagecmp(const struct vm_page *a, const struct vm_page *b)
86 {
87 return a->offset < b->offset ? -1 : a->offset > b->offset;
88 }
89
90 /*
91 * global vars... XXXCDC: move to uvm. structure.
92 */
93 /*
94 * physical memory config is stored in vm_physmem.
95 */
96 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
97 int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
98
99 /*
100 * Some supported CPUs in a given architecture don't support all
101 * of the things necessary to do idle page zero'ing efficiently.
102 * We therefore provide a way to disable it from machdep code here.
103 */
104
105 /*
106 * local variables
107 */
108 /*
109 * these variables record the values returned by vm_page_bootstrap,
110 * for debugging purposes. The implementation of uvm_pageboot_alloc
111 * and pmap_startup here also uses them internally.
112 */
113 static vaddr_t virtual_space_start;
114 static vaddr_t virtual_space_end;
115
116 /*
117 * local prototypes
118 */
119 static void uvm_pageinsert(struct vm_page *);
120 static void uvm_pageremove(struct vm_page *);
121 int uvm_page_owner_locked_p(struct vm_page *);
122
123 /*
124 * inline functions
125 */
126 /*
127 * uvm_pageinsert: insert a page in the object
128 *
129 * => caller must lock object
130 * => call should have already set pg's object and offset pointers
131 * and bumped the version counter
132 */
133 static inline void
uvm_pageinsert(struct vm_page * pg)134 uvm_pageinsert(struct vm_page *pg)
135 {
136 struct vm_page *dupe;
137
138 KASSERT(UVM_OBJ_IS_DUMMY(pg->uobject) ||
139 rw_write_held(pg->uobject->vmobjlock));
140 KASSERT((pg->pg_flags & PG_TABLED) == 0);
141
142 dupe = RBT_INSERT(uvm_objtree, &pg->uobject->memt, pg);
143 /* not allowed to insert over another page */
144 KASSERT(dupe == NULL);
145 atomic_setbits_int(&pg->pg_flags, PG_TABLED);
146 pg->uobject->uo_npages++;
147 }
148
149 /*
150 * uvm_page_remove: remove page from object
151 *
152 * => caller must lock object
153 */
154 static inline void
uvm_pageremove(struct vm_page * pg)155 uvm_pageremove(struct vm_page *pg)
156 {
157 KASSERT(UVM_OBJ_IS_DUMMY(pg->uobject) ||
158 rw_write_held(pg->uobject->vmobjlock));
159 KASSERT(pg->pg_flags & PG_TABLED);
160
161 RBT_REMOVE(uvm_objtree, &pg->uobject->memt, pg);
162
163 atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
164 pg->uobject->uo_npages--;
165 pg->uobject = NULL;
166 pg->pg_version++;
167 }
168
169 /*
170 * uvm_page_init: init the page system. called from uvm_init().
171 *
172 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
173 */
174 void
uvm_page_init(vaddr_t * kvm_startp,vaddr_t * kvm_endp)175 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
176 {
177 vsize_t freepages, pagecount, n;
178 vm_page_t pagearray, curpg;
179 int lcv, i;
180 paddr_t paddr, pgno;
181 struct vm_physseg *seg;
182
183 /*
184 * init the page queues and page queue locks
185 */
186
187 TAILQ_INIT(&uvm.page_active);
188 TAILQ_INIT(&uvm.page_inactive);
189 mtx_init(&uvm.pageqlock, IPL_VM);
190 mtx_init(&uvm.fpageqlock, IPL_VM);
191 uvm_pmr_init();
192
193 /*
194 * allocate vm_page structures.
195 */
196
197 /*
198 * sanity check:
199 * before calling this function the MD code is expected to register
200 * some free RAM with the uvm_page_physload() function. our job
201 * now is to allocate vm_page structures for this memory.
202 */
203
204 if (vm_nphysseg == 0)
205 panic("uvm_page_bootstrap: no memory pre-allocated");
206
207 /*
208 * first calculate the number of free pages...
209 *
210 * note that we use start/end rather than avail_start/avail_end.
211 * this allows us to allocate extra vm_page structures in case we
212 * want to return some memory to the pool after booting.
213 */
214
215 freepages = 0;
216 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
217 freepages += (seg->end - seg->start);
218
219 /*
220 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
221 * use. for each page of memory we use we need a vm_page structure.
222 * thus, the total number of pages we can use is the total size of
223 * the memory divided by the PAGE_SIZE plus the size of the vm_page
224 * structure. we add one to freepages as a fudge factor to avoid
225 * truncation errors (since we can only allocate in terms of whole
226 * pages).
227 */
228
229 pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) /
230 (PAGE_SIZE + sizeof(struct vm_page));
231 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
232 sizeof(struct vm_page));
233 memset(pagearray, 0, pagecount * sizeof(struct vm_page));
234
235 /* init the vm_page structures and put them in the correct place. */
236 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) {
237 n = seg->end - seg->start;
238 if (n > pagecount) {
239 panic("uvm_page_init: lost %ld page(s) in init",
240 (long)(n - pagecount));
241 /* XXXCDC: shouldn't happen? */
242 /* n = pagecount; */
243 }
244
245 /* set up page array pointers */
246 seg->pgs = pagearray;
247 pagearray += n;
248 pagecount -= n;
249 seg->lastpg = seg->pgs + (n - 1);
250
251 /* init and free vm_pages (we've already zeroed them) */
252 pgno = seg->start;
253 paddr = ptoa(pgno);
254 for (i = 0, curpg = seg->pgs; i < n;
255 i++, curpg++, pgno++, paddr += PAGE_SIZE) {
256 curpg->phys_addr = paddr;
257 VM_MDPAGE_INIT(curpg);
258 if (pgno >= seg->avail_start &&
259 pgno < seg->avail_end) {
260 uvmexp.npages++;
261 }
262 }
263
264 /* Add pages to free pool. */
265 uvm_pmr_freepages(&seg->pgs[seg->avail_start - seg->start],
266 seg->avail_end - seg->avail_start);
267 }
268
269 /*
270 * pass up the values of virtual_space_start and
271 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
272 * layers of the VM.
273 */
274
275 *kvm_startp = round_page(virtual_space_start);
276 *kvm_endp = trunc_page(virtual_space_end);
277
278 /* init locks for kernel threads */
279 mtx_init(&uvm.aiodoned_lock, IPL_BIO);
280
281 /*
282 * init reserve thresholds
283 * XXXCDC - values may need adjusting
284 */
285 uvmexp.reserve_pagedaemon = 4;
286 uvmexp.reserve_kernel = 8;
287 uvmexp.anonminpct = 10;
288 uvmexp.vnodeminpct = 10;
289 uvmexp.vtextminpct = 5;
290 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100;
291 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100;
292 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100;
293
294 uvm.page_init_done = TRUE;
295 }
296
297 /*
298 * uvm_setpagesize: set the page size
299 *
300 * => sets page_shift and page_mask from uvmexp.pagesize.
301 */
302 void
uvm_setpagesize(void)303 uvm_setpagesize(void)
304 {
305 if (uvmexp.pagesize == 0)
306 uvmexp.pagesize = DEFAULT_PAGE_SIZE;
307 uvmexp.pagemask = uvmexp.pagesize - 1;
308 if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
309 panic("uvm_setpagesize: page size not a power of two");
310 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
311 if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
312 break;
313 }
314
315 /*
316 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
317 */
318 vaddr_t
uvm_pageboot_alloc(vsize_t size)319 uvm_pageboot_alloc(vsize_t size)
320 {
321 #if defined(PMAP_STEAL_MEMORY)
322 vaddr_t addr;
323
324 /*
325 * defer bootstrap allocation to MD code (it may want to allocate
326 * from a direct-mapped segment). pmap_steal_memory should round
327 * off virtual_space_start/virtual_space_end.
328 */
329
330 addr = pmap_steal_memory(size, &virtual_space_start,
331 &virtual_space_end);
332
333 return addr;
334
335 #else /* !PMAP_STEAL_MEMORY */
336
337 static boolean_t initialized = FALSE;
338 vaddr_t addr, vaddr;
339 paddr_t paddr;
340
341 /* round to page size */
342 size = round_page(size);
343
344 /* on first call to this function, initialize ourselves. */
345 if (initialized == FALSE) {
346 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
347
348 /* round it the way we like it */
349 virtual_space_start = round_page(virtual_space_start);
350 virtual_space_end = trunc_page(virtual_space_end);
351
352 initialized = TRUE;
353 }
354
355 /* allocate virtual memory for this request */
356 if (virtual_space_start == virtual_space_end ||
357 (virtual_space_end - virtual_space_start) < size)
358 panic("uvm_pageboot_alloc: out of virtual space");
359
360 addr = virtual_space_start;
361
362 #ifdef PMAP_GROWKERNEL
363 /*
364 * If the kernel pmap can't map the requested space,
365 * then allocate more resources for it.
366 */
367 if (uvm_maxkaddr < (addr + size)) {
368 uvm_maxkaddr = pmap_growkernel(addr + size);
369 if (uvm_maxkaddr < (addr + size))
370 panic("uvm_pageboot_alloc: pmap_growkernel() failed");
371 }
372 #endif
373
374 virtual_space_start += size;
375
376 /* allocate and mapin physical pages to back new virtual pages */
377 for (vaddr = round_page(addr) ; vaddr < addr + size ;
378 vaddr += PAGE_SIZE) {
379 if (!uvm_page_physget(&paddr))
380 panic("uvm_pageboot_alloc: out of memory");
381
382 /*
383 * Note this memory is no longer managed, so using
384 * pmap_kenter is safe.
385 */
386 pmap_kenter_pa(vaddr, paddr, PROT_READ | PROT_WRITE);
387 }
388 pmap_update(pmap_kernel());
389 return addr;
390 #endif /* PMAP_STEAL_MEMORY */
391 }
392
393 #if !defined(PMAP_STEAL_MEMORY)
394 /*
395 * uvm_page_physget: "steal" one page from the vm_physmem structure.
396 *
397 * => attempt to allocate it off the end of a segment in which the "avail"
398 * values match the start/end values. if we can't do that, then we
399 * will advance both values (making them equal, and removing some
400 * vm_page structures from the non-avail area).
401 * => return false if out of memory.
402 */
403
404 boolean_t
uvm_page_physget(paddr_t * paddrp)405 uvm_page_physget(paddr_t *paddrp)
406 {
407 int lcv;
408 struct vm_physseg *seg;
409
410 /* pass 1: try allocating from a matching end */
411 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
412 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
413 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0;
414 lcv--, seg--)
415 #else
416 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
417 #endif
418 {
419 if (uvm.page_init_done == TRUE)
420 panic("uvm_page_physget: called _after_ bootstrap");
421
422 /* try from front */
423 if (seg->avail_start == seg->start &&
424 seg->avail_start < seg->avail_end) {
425 *paddrp = ptoa(seg->avail_start);
426 seg->avail_start++;
427 seg->start++;
428 /* nothing left? nuke it */
429 if (seg->avail_start == seg->end) {
430 if (vm_nphysseg == 1)
431 panic("uvm_page_physget: out of memory!");
432 vm_nphysseg--;
433 for (; lcv < vm_nphysseg; lcv++, seg++)
434 /* structure copy */
435 seg[0] = seg[1];
436 }
437 return TRUE;
438 }
439
440 /* try from rear */
441 if (seg->avail_end == seg->end &&
442 seg->avail_start < seg->avail_end) {
443 *paddrp = ptoa(seg->avail_end - 1);
444 seg->avail_end--;
445 seg->end--;
446 /* nothing left? nuke it */
447 if (seg->avail_end == seg->start) {
448 if (vm_nphysseg == 1)
449 panic("uvm_page_physget: out of memory!");
450 vm_nphysseg--;
451 for (; lcv < vm_nphysseg ; lcv++, seg++)
452 /* structure copy */
453 seg[0] = seg[1];
454 }
455 return TRUE;
456 }
457 }
458
459 /* pass2: forget about matching ends, just allocate something */
460 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
461 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
462 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0;
463 lcv--, seg--)
464 #else
465 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
466 #endif
467 {
468
469 /* any room in this bank? */
470 if (seg->avail_start >= seg->avail_end)
471 continue; /* nope */
472
473 *paddrp = ptoa(seg->avail_start);
474 seg->avail_start++;
475 /* truncate! */
476 seg->start = seg->avail_start;
477
478 /* nothing left? nuke it */
479 if (seg->avail_start == seg->end) {
480 if (vm_nphysseg == 1)
481 panic("uvm_page_physget: out of memory!");
482 vm_nphysseg--;
483 for (; lcv < vm_nphysseg ; lcv++, seg++)
484 /* structure copy */
485 seg[0] = seg[1];
486 }
487 return TRUE;
488 }
489
490 return FALSE; /* whoops! */
491 }
492
493 #endif /* PMAP_STEAL_MEMORY */
494
495 /*
496 * uvm_page_physload: load physical memory into VM system
497 *
498 * => all args are PFs
499 * => all pages in start/end get vm_page structures
500 * => areas marked by avail_start/avail_end get added to the free page pool
501 * => we are limited to VM_PHYSSEG_MAX physical memory segments
502 */
503
504 void
uvm_page_physload(paddr_t start,paddr_t end,paddr_t avail_start,paddr_t avail_end,int flags)505 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
506 paddr_t avail_end, int flags)
507 {
508 int preload, lcv;
509 psize_t npages;
510 struct vm_page *pgs;
511 struct vm_physseg *ps, *seg;
512
513 #ifdef DIAGNOSTIC
514 if (uvmexp.pagesize == 0)
515 panic("uvm_page_physload: page size not set!");
516
517 if (start >= end)
518 panic("uvm_page_physload: start >= end");
519 #endif
520
521 /* do we have room? */
522 if (vm_nphysseg == VM_PHYSSEG_MAX) {
523 printf("uvm_page_physload: unable to load physical memory "
524 "segment\n");
525 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
526 VM_PHYSSEG_MAX, (long long)start, (long long)end);
527 printf("\tincrease VM_PHYSSEG_MAX\n");
528 return;
529 }
530
531 /*
532 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
533 * called yet, so malloc is not available).
534 */
535 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) {
536 if (seg->pgs)
537 break;
538 }
539 preload = (lcv == vm_nphysseg);
540
541 /* if VM is already running, attempt to malloc() vm_page structures */
542 if (!preload) {
543 /*
544 * XXXCDC: need some sort of lockout for this case
545 * right now it is only used by devices so it should be alright.
546 */
547 paddr_t paddr;
548
549 npages = end - start; /* # of pages */
550
551 pgs = km_alloc(round_page(npages * sizeof(*pgs)),
552 &kv_any, &kp_zero, &kd_waitok);
553 if (pgs == NULL) {
554 printf("uvm_page_physload: can not malloc vm_page "
555 "structs for segment\n");
556 printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
557 return;
558 }
559 /* init phys_addr and free pages, XXX uvmexp.npages */
560 for (lcv = 0, paddr = ptoa(start); lcv < npages;
561 lcv++, paddr += PAGE_SIZE) {
562 pgs[lcv].phys_addr = paddr;
563 VM_MDPAGE_INIT(&pgs[lcv]);
564 if (atop(paddr) >= avail_start &&
565 atop(paddr) < avail_end) {
566 if (flags & PHYSLOAD_DEVICE) {
567 atomic_setbits_int(&pgs[lcv].pg_flags,
568 PG_DEV);
569 pgs[lcv].wire_count = 1;
570 } else {
571 #if defined(VM_PHYSSEG_NOADD)
572 panic("uvm_page_physload: tried to add RAM after vm_mem_init");
573 #endif
574 }
575 }
576 }
577
578 /* Add pages to free pool. */
579 if ((flags & PHYSLOAD_DEVICE) == 0) {
580 uvm_pmr_freepages(&pgs[avail_start - start],
581 avail_end - avail_start);
582 }
583
584 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
585 } else {
586 /* gcc complains if these don't get init'd */
587 pgs = NULL;
588 npages = 0;
589
590 }
591
592 /* now insert us in the proper place in vm_physmem[] */
593 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
594 /* random: put it at the end (easy!) */
595 ps = &vm_physmem[vm_nphysseg];
596 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
597 {
598 int x;
599 /* sort by address for binary search */
600 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++)
601 if (start < seg->start)
602 break;
603 ps = seg;
604 /* move back other entries, if necessary ... */
605 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv;
606 x--, seg--)
607 /* structure copy */
608 seg[1] = seg[0];
609 }
610 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
611 {
612 int x;
613 /* sort by largest segment first */
614 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++)
615 if ((end - start) >
616 (seg->end - seg->start))
617 break;
618 ps = &vm_physmem[lcv];
619 /* move back other entries, if necessary ... */
620 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv;
621 x--, seg--)
622 /* structure copy */
623 seg[1] = seg[0];
624 }
625 #else
626 panic("uvm_page_physload: unknown physseg strategy selected!");
627 #endif
628
629 ps->start = start;
630 ps->end = end;
631 ps->avail_start = avail_start;
632 ps->avail_end = avail_end;
633 if (preload) {
634 ps->pgs = NULL;
635 } else {
636 ps->pgs = pgs;
637 ps->lastpg = pgs + npages - 1;
638 }
639 vm_nphysseg++;
640
641 return;
642 }
643
644 #ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
645
646 void uvm_page_physdump(void); /* SHUT UP GCC */
647
648 /* call from DDB */
649 void
uvm_page_physdump(void)650 uvm_page_physdump(void)
651 {
652 int lcv;
653 struct vm_physseg *seg;
654
655 printf("uvm_page_physdump: physical memory config [segs=%d of %d]:\n",
656 vm_nphysseg, VM_PHYSSEG_MAX);
657 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
658 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n",
659 (long long)seg->start,
660 (long long)seg->end,
661 (long long)seg->avail_start,
662 (long long)seg->avail_end);
663 printf("STRATEGY = ");
664 switch (VM_PHYSSEG_STRAT) {
665 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
666 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break;
667 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break;
668 default: printf("<<UNKNOWN>>!!!!\n");
669 }
670 }
671 #endif
672
673 void
uvm_shutdown(void)674 uvm_shutdown(void)
675 {
676 #ifdef UVM_SWAP_ENCRYPT
677 uvm_swap_finicrypt_all();
678 #endif
679 smr_flush();
680 }
681
682 /*
683 * Perform insert of a given page in the specified anon of obj.
684 * This is basically, uvm_pagealloc, but with the page already given.
685 */
686 void
uvm_pagealloc_pg(struct vm_page * pg,struct uvm_object * obj,voff_t off,struct vm_anon * anon)687 uvm_pagealloc_pg(struct vm_page *pg, struct uvm_object *obj, voff_t off,
688 struct vm_anon *anon)
689 {
690 int flags;
691
692 KASSERT(obj == NULL || anon == NULL);
693 KASSERT(anon == NULL || off == 0);
694 KASSERT(off == trunc_page(off));
695 KASSERT(obj == NULL || UVM_OBJ_IS_DUMMY(obj) ||
696 rw_write_held(obj->vmobjlock));
697 KASSERT(anon == NULL || anon->an_lock == NULL ||
698 rw_write_held(anon->an_lock));
699
700 flags = PG_BUSY | PG_FAKE;
701 pg->offset = off;
702 pg->uobject = obj;
703 pg->uanon = anon;
704 KASSERT(uvm_page_owner_locked_p(pg));
705 if (anon) {
706 anon->an_page = pg;
707 flags |= PQ_ANON;
708 } else if (obj)
709 uvm_pageinsert(pg);
710 atomic_setbits_int(&pg->pg_flags, flags);
711 #if defined(UVM_PAGE_TRKOWN)
712 pg->owner_tag = NULL;
713 #endif
714 UVM_PAGE_OWN(pg, "new alloc");
715 }
716
717 /*
718 * uvm_pglistalloc: allocate a list of pages
719 *
720 * => allocated pages are placed at the tail of rlist. rlist is
721 * assumed to be properly initialized by caller.
722 * => returns 0 on success or errno on failure
723 * => doesn't take into account clean non-busy pages on inactive list
724 * that could be used(?)
725 * => params:
726 * size the size of the allocation, rounded to page size.
727 * low the low address of the allowed allocation range.
728 * high the high address of the allowed allocation range.
729 * alignment memory must be aligned to this power-of-two boundary.
730 * boundary no segment in the allocation may cross this
731 * power-of-two boundary (relative to zero).
732 * => flags:
733 * UVM_PLA_NOWAIT fail if allocation fails
734 * UVM_PLA_WAITOK wait for memory to become avail
735 * UVM_PLA_ZERO return zeroed memory
736 */
737 int
uvm_pglistalloc(psize_t size,paddr_t low,paddr_t high,paddr_t alignment,paddr_t boundary,struct pglist * rlist,int nsegs,int flags)738 uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment,
739 paddr_t boundary, struct pglist *rlist, int nsegs, int flags)
740 {
741 KASSERT((alignment & (alignment - 1)) == 0);
742 KASSERT((boundary & (boundary - 1)) == 0);
743 KASSERT(!(flags & UVM_PLA_WAITOK) ^ !(flags & UVM_PLA_NOWAIT));
744
745 if (size == 0)
746 return EINVAL;
747 size = atop(round_page(size));
748
749 /*
750 * XXX uvm_pglistalloc is currently only used for kernel
751 * objects. Unlike the checks in uvm_pagealloc, below, here
752 * we are always allowed to use the kernel reserve.
753 */
754 flags |= UVM_PLA_USERESERVE;
755
756 if ((high & PAGE_MASK) != PAGE_MASK) {
757 printf("uvm_pglistalloc: Upper boundary 0x%lx "
758 "not on pagemask.\n", (unsigned long)high);
759 }
760
761 /*
762 * Our allocations are always page granularity, so our alignment
763 * must be, too.
764 */
765 if (alignment < PAGE_SIZE)
766 alignment = PAGE_SIZE;
767
768 low = atop(roundup(low, alignment));
769 /*
770 * high + 1 may result in overflow, in which case high becomes 0x0,
771 * which is the 'don't care' value.
772 * The only requirement in that case is that low is also 0x0, or the
773 * low<high assert will fail.
774 */
775 high = atop(high + 1);
776 alignment = atop(alignment);
777 if (boundary < PAGE_SIZE && boundary != 0)
778 boundary = PAGE_SIZE;
779 boundary = atop(boundary);
780
781 return uvm_pmr_getpages(size, low, high, alignment, boundary, nsegs,
782 flags, rlist);
783 }
784
785 /*
786 * uvm_pglistfree: free a list of pages
787 *
788 * => pages should already be unmapped
789 */
790 void
uvm_pglistfree(struct pglist * list)791 uvm_pglistfree(struct pglist *list)
792 {
793 uvm_pmr_freepageq(list);
794 }
795
796 /*
797 * interface used by the buffer cache to allocate a buffer at a time.
798 * The pages are allocated wired in DMA accessible memory
799 */
800 int
uvm_pagealloc_multi(struct uvm_object * obj,voff_t off,vsize_t size,int flags)801 uvm_pagealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size,
802 int flags)
803 {
804 struct pglist plist;
805 struct vm_page *pg;
806 int i, r;
807
808 KASSERT(UVM_OBJ_IS_BUFCACHE(obj));
809 KERNEL_ASSERT_LOCKED();
810
811 TAILQ_INIT(&plist);
812 r = uvm_pglistalloc(size, dma_constraint.ucr_low,
813 dma_constraint.ucr_high, 0, 0, &plist, atop(round_page(size)),
814 flags);
815 if (r == 0) {
816 i = 0;
817 while ((pg = TAILQ_FIRST(&plist)) != NULL) {
818 pg->wire_count = 1;
819 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE);
820 KASSERT((pg->pg_flags & PG_DEV) == 0);
821 TAILQ_REMOVE(&plist, pg, pageq);
822 uvm_pagealloc_pg(pg, obj, off + ptoa(i++), NULL);
823 }
824 }
825 return r;
826 }
827
828 /*
829 * interface used by the buffer cache to reallocate a buffer at a time.
830 * The pages are reallocated wired outside the DMA accessible region.
831 *
832 */
833 int
uvm_pagerealloc_multi(struct uvm_object * obj,voff_t off,vsize_t size,int flags,struct uvm_constraint_range * where)834 uvm_pagerealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size,
835 int flags, struct uvm_constraint_range *where)
836 {
837 struct pglist plist;
838 struct vm_page *pg, *tpg;
839 int i, r;
840 voff_t offset;
841
842 KASSERT(UVM_OBJ_IS_BUFCACHE(obj));
843 KERNEL_ASSERT_LOCKED();
844
845 TAILQ_INIT(&plist);
846 if (size == 0)
847 panic("size 0 uvm_pagerealloc");
848 r = uvm_pglistalloc(size, where->ucr_low, where->ucr_high, 0,
849 0, &plist, atop(round_page(size)), flags);
850 if (r == 0) {
851 i = 0;
852 while((pg = TAILQ_FIRST(&plist)) != NULL) {
853 offset = off + ptoa(i++);
854 tpg = uvm_pagelookup(obj, offset);
855 KASSERT(tpg != NULL);
856 pg->wire_count = 1;
857 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE);
858 KASSERT((pg->pg_flags & PG_DEV) == 0);
859 TAILQ_REMOVE(&plist, pg, pageq);
860 uvm_pagecopy(tpg, pg);
861 KASSERT(tpg->wire_count == 1);
862 tpg->wire_count = 0;
863 uvm_lock_pageq();
864 uvm_pagefree(tpg);
865 uvm_unlock_pageq();
866 uvm_pagealloc_pg(pg, obj, offset, NULL);
867 }
868 }
869 return r;
870 }
871
872 /*
873 * uvm_pagealloc: allocate vm_page from a particular free list.
874 *
875 * => return null if no pages free
876 * => wake up pagedaemon if number of free pages drops below low water mark
877 * => only one of obj or anon can be non-null
878 * => caller must activate/deactivate page if it is not wired.
879 */
880 struct vm_page *
uvm_pagealloc(struct uvm_object * obj,voff_t off,struct vm_anon * anon,int flags)881 uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
882 int flags)
883 {
884 struct vm_page *pg = NULL;
885 int pmr_flags;
886
887 KASSERT(obj == NULL || anon == NULL);
888 KASSERT(anon == NULL || off == 0);
889 KASSERT(off == trunc_page(off));
890 KASSERT(obj == NULL || UVM_OBJ_IS_DUMMY(obj) ||
891 rw_write_held(obj->vmobjlock));
892 KASSERT(anon == NULL || anon->an_lock == NULL ||
893 rw_write_held(anon->an_lock));
894
895 pmr_flags = UVM_PLA_NOWAIT;
896
897 /*
898 * We're allowed to use the kernel reserve if the page is
899 * being allocated to a kernel object.
900 */
901 if ((flags & UVM_PGA_USERESERVE) ||
902 (obj != NULL && UVM_OBJ_IS_KERN_OBJECT(obj)))
903 pmr_flags |= UVM_PLA_USERESERVE;
904
905 if (flags & UVM_PGA_ZERO)
906 pmr_flags |= UVM_PLA_ZERO;
907
908 pg = uvm_pmr_cache_get(pmr_flags);
909 if (pg == NULL)
910 return NULL;
911 uvm_pagealloc_pg(pg, obj, off, anon);
912 KASSERT((pg->pg_flags & PG_DEV) == 0);
913 if (flags & UVM_PGA_ZERO)
914 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
915 else
916 atomic_setbits_int(&pg->pg_flags, PG_CLEAN);
917
918 return pg;
919 }
920
921 /*
922 * uvm_pagerealloc: reallocate a page from one object to another
923 */
924
925 void
uvm_pagerealloc(struct vm_page * pg,struct uvm_object * newobj,voff_t newoff)926 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
927 {
928
929 /* remove it from the old object */
930 if (pg->uobject) {
931 uvm_pageremove(pg);
932 }
933
934 /* put it in the new object */
935 if (newobj) {
936 pg->uobject = newobj;
937 pg->offset = newoff;
938 pg->pg_version++;
939 uvm_pageinsert(pg);
940 }
941 }
942
943 /*
944 * uvm_pageclean: clean page
945 *
946 * => erase page's identity (i.e. remove from object)
947 * => caller must lock page queues if `pg' is managed
948 * => assumes all valid mappings of pg are gone
949 */
950 void
uvm_pageclean(struct vm_page * pg)951 uvm_pageclean(struct vm_page *pg)
952 {
953 u_int flags_to_clear = 0;
954
955 if ((pg->pg_flags & (PG_TABLED|PQ_ACTIVE|PQ_INACTIVE)) &&
956 (pg->uobject == NULL || !UVM_OBJ_IS_PMAP(pg->uobject)))
957 MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
958
959 #ifdef DEBUG
960 if (pg->uobject == (void *)0xdeadbeef &&
961 pg->uanon == (void *)0xdeadbeef) {
962 panic("uvm_pagefree: freeing free page %p", pg);
963 }
964 #endif
965
966 KASSERT((pg->pg_flags & PG_DEV) == 0);
967 KASSERT(pg->uobject == NULL || UVM_OBJ_IS_DUMMY(pg->uobject) ||
968 rw_write_held(pg->uobject->vmobjlock));
969 KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
970 rw_write_held(pg->uanon->an_lock));
971
972 /*
973 * if the page was an object page (and thus "TABLED"), remove it
974 * from the object.
975 */
976 if (pg->pg_flags & PG_TABLED)
977 uvm_pageremove(pg);
978
979 /*
980 * now remove the page from the queues
981 */
982 uvm_pagedequeue(pg);
983
984 /*
985 * if the page was wired, unwire it now.
986 */
987 if (pg->wire_count) {
988 pg->wire_count = 0;
989 uvmexp.wired--;
990 }
991 if (pg->uanon) {
992 pg->uanon->an_page = NULL;
993 pg->uanon = NULL;
994 }
995
996 /* Clean page state bits. */
997 flags_to_clear |= PQ_ANON|PQ_AOBJ|PQ_ENCRYPT|PG_ZERO|PG_FAKE|PG_BUSY|
998 PG_RELEASED|PG_CLEAN|PG_CLEANCHK;
999 atomic_clearbits_int(&pg->pg_flags, flags_to_clear);
1000
1001 #ifdef DEBUG
1002 pg->uobject = (void *)0xdeadbeef;
1003 pg->offset = 0xdeadbeef;
1004 pg->uanon = (void *)0xdeadbeef;
1005 #endif
1006 }
1007
1008 /*
1009 * uvm_pagefree: free page
1010 *
1011 * => erase page's identity (i.e. remove from object)
1012 * => put page on free list
1013 * => caller must lock page queues if `pg' is managed
1014 * => assumes all valid mappings of pg are gone
1015 */
1016 void
uvm_pagefree(struct vm_page * pg)1017 uvm_pagefree(struct vm_page *pg)
1018 {
1019 uvm_pageclean(pg);
1020 uvm_pmr_cache_put(pg);
1021 }
1022
1023 /*
1024 * uvm_page_unbusy: unbusy an array of pages.
1025 *
1026 * => pages must either all belong to the same object, or all belong to anons.
1027 * => if pages are object-owned, object must be locked.
1028 * => if pages are anon-owned, anons must have 0 refcount.
1029 * => caller must make sure that anon-owned pages are not PG_RELEASED.
1030 */
1031 void
uvm_page_unbusy(struct vm_page ** pgs,int npgs)1032 uvm_page_unbusy(struct vm_page **pgs, int npgs)
1033 {
1034 struct vm_page *pg;
1035 int i;
1036
1037 for (i = 0; i < npgs; i++) {
1038 pg = pgs[i];
1039
1040 if (pg == NULL || pg == PGO_DONTCARE) {
1041 continue;
1042 }
1043
1044 KASSERT(uvm_page_owner_locked_p(pg));
1045 KASSERT(pg->pg_flags & PG_BUSY);
1046
1047 if (pg->pg_flags & PG_WANTED) {
1048 wakeup(pg);
1049 }
1050 if (pg->pg_flags & PG_RELEASED) {
1051 KASSERT(pg->uobject != NULL ||
1052 (pg->uanon != NULL && pg->uanon->an_ref > 0));
1053 atomic_clearbits_int(&pg->pg_flags, PG_RELEASED);
1054 pmap_page_protect(pg, PROT_NONE);
1055 uvm_pagefree(pg);
1056 } else {
1057 KASSERT((pg->pg_flags & PG_FAKE) == 0);
1058 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
1059 UVM_PAGE_OWN(pg, NULL);
1060 }
1061 }
1062 }
1063
1064 /*
1065 * uvm_pagewait: wait for a busy page
1066 *
1067 * => page must be known PG_BUSY
1068 * => object must be locked
1069 * => object will be unlocked on return
1070 */
1071 void
uvm_pagewait(struct vm_page * pg,struct rwlock * lock,const char * wmesg)1072 uvm_pagewait(struct vm_page *pg, struct rwlock *lock, const char *wmesg)
1073 {
1074 KASSERT(rw_lock_held(lock));
1075 KASSERT((pg->pg_flags & PG_BUSY) != 0);
1076
1077 atomic_setbits_int(&pg->pg_flags, PG_WANTED);
1078 rwsleep_nsec(pg, lock, PVM | PNORELOCK, wmesg, INFSLP);
1079 }
1080
1081 #if defined(UVM_PAGE_TRKOWN)
1082 /*
1083 * uvm_page_own: set or release page ownership
1084 *
1085 * => this is a debugging function that keeps track of who sets PG_BUSY
1086 * and where they do it. it can be used to track down problems
1087 * such a thread setting "PG_BUSY" and never releasing it.
1088 * => if "tag" is NULL then we are releasing page ownership
1089 */
1090 void
uvm_page_own(struct vm_page * pg,char * tag)1091 uvm_page_own(struct vm_page *pg, char *tag)
1092 {
1093 /* gain ownership? */
1094 if (tag) {
1095 if (pg->owner_tag) {
1096 printf("uvm_page_own: page %p already owned "
1097 "by thread %d [%s]\n", pg,
1098 pg->owner, pg->owner_tag);
1099 panic("uvm_page_own");
1100 }
1101 pg->owner = (curproc) ? curproc->p_tid : (pid_t) -1;
1102 pg->owner_tag = tag;
1103 return;
1104 }
1105
1106 /* drop ownership */
1107 if (pg->owner_tag == NULL) {
1108 printf("uvm_page_own: dropping ownership of an non-owned "
1109 "page (%p)\n", pg);
1110 panic("uvm_page_own");
1111 }
1112 pg->owner_tag = NULL;
1113 return;
1114 }
1115 #endif
1116
1117 /*
1118 * when VM_PHYSSEG_MAX is 1, we can simplify these functions
1119 */
1120
1121 #if VM_PHYSSEG_MAX > 1
1122 /*
1123 * vm_physseg_find: find vm_physseg structure that belongs to a PA
1124 */
1125 int
vm_physseg_find(paddr_t pframe,int * offp)1126 vm_physseg_find(paddr_t pframe, int *offp)
1127 {
1128 struct vm_physseg *seg;
1129
1130 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
1131 /* binary search for it */
1132 int start, len, try;
1133
1134 /*
1135 * if try is too large (thus target is less than try) we reduce
1136 * the length to trunc(len/2) [i.e. everything smaller than "try"]
1137 *
1138 * if the try is too small (thus target is greater than try) then
1139 * we set the new start to be (try + 1). this means we need to
1140 * reduce the length to (round(len/2) - 1).
1141 *
1142 * note "adjust" below which takes advantage of the fact that
1143 * (round(len/2) - 1) == trunc((len - 1) / 2)
1144 * for any value of len we may have
1145 */
1146
1147 for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
1148 try = start + (len / 2); /* try in the middle */
1149 seg = vm_physmem + try;
1150
1151 /* start past our try? */
1152 if (pframe >= seg->start) {
1153 /* was try correct? */
1154 if (pframe < seg->end) {
1155 if (offp)
1156 *offp = pframe - seg->start;
1157 return try; /* got it */
1158 }
1159 start = try + 1; /* next time, start here */
1160 len--; /* "adjust" */
1161 } else {
1162 /*
1163 * pframe before try, just reduce length of
1164 * region, done in "for" loop
1165 */
1166 }
1167 }
1168 return -1;
1169
1170 #else
1171 /* linear search for it */
1172 int lcv;
1173
1174 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) {
1175 if (pframe >= seg->start && pframe < seg->end) {
1176 if (offp)
1177 *offp = pframe - seg->start;
1178 return lcv; /* got it */
1179 }
1180 }
1181 return -1;
1182
1183 #endif
1184 }
1185
1186 /*
1187 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
1188 * back from an I/O mapping (ugh!). used in some MD code as well.
1189 */
1190 struct vm_page *
PHYS_TO_VM_PAGE(paddr_t pa)1191 PHYS_TO_VM_PAGE(paddr_t pa)
1192 {
1193 paddr_t pf = atop(pa);
1194 int off;
1195 int psi;
1196
1197 psi = vm_physseg_find(pf, &off);
1198
1199 return (psi == -1) ? NULL : &vm_physmem[psi].pgs[off];
1200 }
1201 #endif /* VM_PHYSSEG_MAX > 1 */
1202
1203 /*
1204 * uvm_pagelookup: look up a page
1205 */
1206 struct vm_page *
uvm_pagelookup(struct uvm_object * obj,voff_t off)1207 uvm_pagelookup(struct uvm_object *obj, voff_t off)
1208 {
1209 /* XXX if stack is too much, handroll */
1210 struct vm_page p, *pg;
1211
1212 p.offset = off;
1213 pg = RBT_FIND(uvm_objtree, &obj->memt, &p);
1214
1215 KASSERT(pg == NULL || obj->uo_npages != 0);
1216 KASSERT(pg == NULL || (pg->pg_flags & PG_RELEASED) == 0 ||
1217 (pg->pg_flags & PG_BUSY) != 0);
1218 return (pg);
1219 }
1220
1221 /*
1222 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
1223 *
1224 * => caller must lock page queues
1225 */
1226 void
uvm_pagewire(struct vm_page * pg)1227 uvm_pagewire(struct vm_page *pg)
1228 {
1229 KASSERT(uvm_page_owner_locked_p(pg));
1230 MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
1231
1232 if (pg->wire_count == 0) {
1233 uvm_pagedequeue(pg);
1234 uvmexp.wired++;
1235 }
1236 pg->wire_count++;
1237 }
1238
1239 /*
1240 * uvm_pageunwire: unwire the page.
1241 *
1242 * => activate if wire count goes to zero.
1243 * => caller must lock page queues
1244 */
1245 void
uvm_pageunwire(struct vm_page * pg)1246 uvm_pageunwire(struct vm_page *pg)
1247 {
1248 KASSERT(uvm_page_owner_locked_p(pg));
1249 MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
1250
1251 pg->wire_count--;
1252 if (pg->wire_count == 0) {
1253 uvm_pageactivate(pg);
1254 uvmexp.wired--;
1255 }
1256 }
1257
1258 /*
1259 * uvm_pagedeactivate: deactivate page -- no pmaps have access to page
1260 *
1261 * => caller must lock page queues
1262 * => caller must check to make sure page is not wired
1263 * => object that page belongs to must be locked (so we can adjust pg->flags)
1264 */
1265 void
uvm_pagedeactivate(struct vm_page * pg)1266 uvm_pagedeactivate(struct vm_page *pg)
1267 {
1268 KASSERT(uvm_page_owner_locked_p(pg));
1269 MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
1270
1271 if (pg->pg_flags & PQ_ACTIVE) {
1272 TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1273 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1274 uvmexp.active--;
1275 }
1276 if ((pg->pg_flags & PQ_INACTIVE) == 0) {
1277 KASSERT(pg->wire_count == 0);
1278 TAILQ_INSERT_TAIL(&uvm.page_inactive, pg, pageq);
1279 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE);
1280 uvmexp.inactive++;
1281 pmap_clear_reference(pg);
1282 /*
1283 * update the "clean" bit. this isn't 100%
1284 * accurate, and doesn't have to be. we'll
1285 * re-sync it after we zap all mappings when
1286 * scanning the inactive list.
1287 */
1288 if ((pg->pg_flags & PG_CLEAN) != 0 &&
1289 pmap_is_modified(pg))
1290 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1291 }
1292 }
1293
1294 /*
1295 * uvm_pageactivate: activate page
1296 *
1297 * => caller must lock page queues
1298 */
1299 void
uvm_pageactivate(struct vm_page * pg)1300 uvm_pageactivate(struct vm_page *pg)
1301 {
1302 KASSERT(uvm_page_owner_locked_p(pg));
1303 MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
1304
1305 uvm_pagedequeue(pg);
1306 if (pg->wire_count == 0) {
1307 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
1308 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
1309 uvmexp.active++;
1310
1311 }
1312 }
1313
1314 /*
1315 * uvm_pagedequeue: remove a page from any paging queue
1316 */
1317 void
uvm_pagedequeue(struct vm_page * pg)1318 uvm_pagedequeue(struct vm_page *pg)
1319 {
1320 if (pg->pg_flags & PQ_ACTIVE) {
1321 TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1322 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1323 uvmexp.active--;
1324 }
1325 if (pg->pg_flags & PQ_INACTIVE) {
1326 TAILQ_REMOVE(&uvm.page_inactive, pg, pageq);
1327 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
1328 uvmexp.inactive--;
1329 }
1330 }
1331 /*
1332 * uvm_pagezero: zero fill a page
1333 */
1334 void
uvm_pagezero(struct vm_page * pg)1335 uvm_pagezero(struct vm_page *pg)
1336 {
1337 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1338 pmap_zero_page(pg);
1339 }
1340
1341 /*
1342 * uvm_pagecopy: copy a page
1343 */
1344 void
uvm_pagecopy(struct vm_page * src,struct vm_page * dst)1345 uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
1346 {
1347 atomic_clearbits_int(&dst->pg_flags, PG_CLEAN);
1348 pmap_copy_page(src, dst);
1349 }
1350
1351 /*
1352 * uvm_page_owner_locked_p: return true if object associated with page is
1353 * locked. this is a weak check for runtime assertions only.
1354 */
1355 int
uvm_page_owner_locked_p(struct vm_page * pg)1356 uvm_page_owner_locked_p(struct vm_page *pg)
1357 {
1358 if (pg->uobject != NULL) {
1359 if (UVM_OBJ_IS_DUMMY(pg->uobject))
1360 return 1;
1361 return rw_write_held(pg->uobject->vmobjlock);
1362 }
1363 if (pg->uanon != NULL) {
1364 return rw_write_held(pg->uanon->an_lock);
1365 }
1366 return 1;
1367 }
1368
1369 /*
1370 * uvm_pagecount: count the number of physical pages in the address range.
1371 */
1372 psize_t
uvm_pagecount(struct uvm_constraint_range * constraint)1373 uvm_pagecount(struct uvm_constraint_range* constraint)
1374 {
1375 int lcv;
1376 psize_t sz;
1377 paddr_t low, high;
1378 paddr_t ps_low, ps_high;
1379
1380 /* Algorithm uses page numbers. */
1381 low = atop(constraint->ucr_low);
1382 high = atop(constraint->ucr_high);
1383
1384 sz = 0;
1385 for (lcv = 0; lcv < vm_nphysseg; lcv++) {
1386 ps_low = MAX(low, vm_physmem[lcv].avail_start);
1387 ps_high = MIN(high, vm_physmem[lcv].avail_end);
1388 if (ps_low < ps_high)
1389 sz += ps_high - ps_low;
1390 }
1391 return sz;
1392 }
1393