1 /* $OpenBSD: uvm_page.c,v 1.148 2019/02/26 14:24:21 visa Exp $ */ 2 /* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 38 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 39 * 40 * 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65 /* 66 * uvm_page.c: page ops. 67 */ 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/sched.h> 72 #include <sys/vnode.h> 73 #include <sys/mount.h> 74 #include <sys/proc.h> 75 #include <sys/smr.h> 76 77 #include <uvm/uvm.h> 78 79 /* 80 * for object trees 81 */ 82 RBT_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp); 83 84 int 85 uvm_pagecmp(const struct vm_page *a, const struct vm_page *b) 86 { 87 return (a->offset < b->offset ? -1 : a->offset > b->offset); 88 } 89 90 /* 91 * global vars... XXXCDC: move to uvm. structure. 92 */ 93 /* 94 * physical memory config is stored in vm_physmem. 95 */ 96 struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 97 int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 98 99 /* 100 * Some supported CPUs in a given architecture don't support all 101 * of the things necessary to do idle page zero'ing efficiently. 102 * We therefore provide a way to disable it from machdep code here. 103 */ 104 105 /* 106 * local variables 107 */ 108 /* 109 * these variables record the values returned by vm_page_bootstrap, 110 * for debugging purposes. The implementation of uvm_pageboot_alloc 111 * and pmap_startup here also uses them internally. 112 */ 113 static vaddr_t virtual_space_start; 114 static vaddr_t virtual_space_end; 115 116 /* 117 * local prototypes 118 */ 119 static void uvm_pageinsert(struct vm_page *); 120 static void uvm_pageremove(struct vm_page *); 121 122 /* 123 * inline functions 124 */ 125 /* 126 * uvm_pageinsert: insert a page in the object 127 * 128 * => caller must lock page queues XXX questionable 129 * => call should have already set pg's object and offset pointers 130 * and bumped the version counter 131 */ 132 __inline static void 133 uvm_pageinsert(struct vm_page *pg) 134 { 135 struct vm_page *dupe; 136 137 KASSERT((pg->pg_flags & PG_TABLED) == 0); 138 dupe = RBT_INSERT(uvm_objtree, &pg->uobject->memt, pg); 139 /* not allowed to insert over another page */ 140 KASSERT(dupe == NULL); 141 atomic_setbits_int(&pg->pg_flags, PG_TABLED); 142 pg->uobject->uo_npages++; 143 } 144 145 /* 146 * uvm_page_remove: remove page from object 147 * 148 * => caller must lock page queues 149 */ 150 static __inline void 151 uvm_pageremove(struct vm_page *pg) 152 { 153 KASSERT(pg->pg_flags & PG_TABLED); 154 RBT_REMOVE(uvm_objtree, &pg->uobject->memt, pg); 155 156 atomic_clearbits_int(&pg->pg_flags, PG_TABLED); 157 pg->uobject->uo_npages--; 158 pg->uobject = NULL; 159 pg->pg_version++; 160 } 161 162 /* 163 * uvm_page_init: init the page system. called from uvm_init(). 164 * 165 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 166 */ 167 void 168 uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp) 169 { 170 vsize_t freepages, pagecount, n; 171 vm_page_t pagearray, curpg; 172 int lcv, i; 173 paddr_t paddr, pgno; 174 struct vm_physseg *seg; 175 176 /* 177 * init the page queues and page queue locks 178 */ 179 180 TAILQ_INIT(&uvm.page_active); 181 TAILQ_INIT(&uvm.page_inactive_swp); 182 TAILQ_INIT(&uvm.page_inactive_obj); 183 mtx_init(&uvm.pageqlock, IPL_NONE); 184 mtx_init(&uvm.fpageqlock, IPL_VM); 185 uvm_pmr_init(); 186 187 /* 188 * allocate vm_page structures. 189 */ 190 191 /* 192 * sanity check: 193 * before calling this function the MD code is expected to register 194 * some free RAM with the uvm_page_physload() function. our job 195 * now is to allocate vm_page structures for this memory. 196 */ 197 198 if (vm_nphysseg == 0) 199 panic("uvm_page_bootstrap: no memory pre-allocated"); 200 201 /* 202 * first calculate the number of free pages... 203 * 204 * note that we use start/end rather than avail_start/avail_end. 205 * this allows us to allocate extra vm_page structures in case we 206 * want to return some memory to the pool after booting. 207 */ 208 209 freepages = 0; 210 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 211 freepages += (seg->end - seg->start); 212 213 /* 214 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 215 * use. for each page of memory we use we need a vm_page structure. 216 * thus, the total number of pages we can use is the total size of 217 * the memory divided by the PAGE_SIZE plus the size of the vm_page 218 * structure. we add one to freepages as a fudge factor to avoid 219 * truncation errors (since we can only allocate in terms of whole 220 * pages). 221 */ 222 223 pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) / 224 (PAGE_SIZE + sizeof(struct vm_page)); 225 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 226 sizeof(struct vm_page)); 227 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 228 229 /* init the vm_page structures and put them in the correct place. */ 230 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) { 231 n = seg->end - seg->start; 232 if (n > pagecount) { 233 panic("uvm_page_init: lost %ld page(s) in init", 234 (long)(n - pagecount)); 235 /* XXXCDC: shouldn't happen? */ 236 /* n = pagecount; */ 237 } 238 239 /* set up page array pointers */ 240 seg->pgs = pagearray; 241 pagearray += n; 242 pagecount -= n; 243 seg->lastpg = seg->pgs + (n - 1); 244 245 /* init and free vm_pages (we've already zeroed them) */ 246 pgno = seg->start; 247 paddr = ptoa(pgno); 248 for (i = 0, curpg = seg->pgs; i < n; 249 i++, curpg++, pgno++, paddr += PAGE_SIZE) { 250 curpg->phys_addr = paddr; 251 VM_MDPAGE_INIT(curpg); 252 if (pgno >= seg->avail_start && 253 pgno < seg->avail_end) { 254 uvmexp.npages++; 255 } 256 } 257 258 /* Add pages to free pool. */ 259 uvm_pmr_freepages(&seg->pgs[seg->avail_start - seg->start], 260 seg->avail_end - seg->avail_start); 261 } 262 263 /* 264 * pass up the values of virtual_space_start and 265 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 266 * layers of the VM. 267 */ 268 269 *kvm_startp = round_page(virtual_space_start); 270 *kvm_endp = trunc_page(virtual_space_end); 271 272 /* init locks for kernel threads */ 273 mtx_init(&uvm.aiodoned_lock, IPL_BIO); 274 275 /* 276 * init reserve thresholds 277 * XXXCDC - values may need adjusting 278 */ 279 uvmexp.reserve_pagedaemon = 4; 280 uvmexp.reserve_kernel = 6; 281 uvmexp.anonminpct = 10; 282 uvmexp.vnodeminpct = 10; 283 uvmexp.vtextminpct = 5; 284 uvmexp.anonmin = uvmexp.anonminpct * 256 / 100; 285 uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100; 286 uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100; 287 288 uvm.page_init_done = TRUE; 289 } 290 291 /* 292 * uvm_setpagesize: set the page size 293 * 294 * => sets page_shift and page_mask from uvmexp.pagesize. 295 */ 296 void 297 uvm_setpagesize(void) 298 { 299 if (uvmexp.pagesize == 0) 300 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 301 uvmexp.pagemask = uvmexp.pagesize - 1; 302 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 303 panic("uvm_setpagesize: page size not a power of two"); 304 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 305 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 306 break; 307 } 308 309 /* 310 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 311 */ 312 vaddr_t 313 uvm_pageboot_alloc(vsize_t size) 314 { 315 #if defined(PMAP_STEAL_MEMORY) 316 vaddr_t addr; 317 318 /* 319 * defer bootstrap allocation to MD code (it may want to allocate 320 * from a direct-mapped segment). pmap_steal_memory should round 321 * off virtual_space_start/virtual_space_end. 322 */ 323 324 addr = pmap_steal_memory(size, &virtual_space_start, 325 &virtual_space_end); 326 327 return(addr); 328 329 #else /* !PMAP_STEAL_MEMORY */ 330 331 static boolean_t initialized = FALSE; 332 vaddr_t addr, vaddr; 333 paddr_t paddr; 334 335 /* round to page size */ 336 size = round_page(size); 337 338 /* on first call to this function, initialize ourselves. */ 339 if (initialized == FALSE) { 340 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 341 342 /* round it the way we like it */ 343 virtual_space_start = round_page(virtual_space_start); 344 virtual_space_end = trunc_page(virtual_space_end); 345 346 initialized = TRUE; 347 } 348 349 /* allocate virtual memory for this request */ 350 if (virtual_space_start == virtual_space_end || 351 (virtual_space_end - virtual_space_start) < size) 352 panic("uvm_pageboot_alloc: out of virtual space"); 353 354 addr = virtual_space_start; 355 356 #ifdef PMAP_GROWKERNEL 357 /* 358 * If the kernel pmap can't map the requested space, 359 * then allocate more resources for it. 360 */ 361 if (uvm_maxkaddr < (addr + size)) { 362 uvm_maxkaddr = pmap_growkernel(addr + size); 363 if (uvm_maxkaddr < (addr + size)) 364 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 365 } 366 #endif 367 368 virtual_space_start += size; 369 370 /* allocate and mapin physical pages to back new virtual pages */ 371 for (vaddr = round_page(addr) ; vaddr < addr + size ; 372 vaddr += PAGE_SIZE) { 373 if (!uvm_page_physget(&paddr)) 374 panic("uvm_pageboot_alloc: out of memory"); 375 376 /* 377 * Note this memory is no longer managed, so using 378 * pmap_kenter is safe. 379 */ 380 pmap_kenter_pa(vaddr, paddr, PROT_READ | PROT_WRITE); 381 } 382 pmap_update(pmap_kernel()); 383 return(addr); 384 #endif /* PMAP_STEAL_MEMORY */ 385 } 386 387 #if !defined(PMAP_STEAL_MEMORY) 388 /* 389 * uvm_page_physget: "steal" one page from the vm_physmem structure. 390 * 391 * => attempt to allocate it off the end of a segment in which the "avail" 392 * values match the start/end values. if we can't do that, then we 393 * will advance both values (making them equal, and removing some 394 * vm_page structures from the non-avail area). 395 * => return false if out of memory. 396 */ 397 398 boolean_t 399 uvm_page_physget(paddr_t *paddrp) 400 { 401 int lcv; 402 struct vm_physseg *seg; 403 404 /* pass 1: try allocating from a matching end */ 405 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 406 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 407 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0; 408 lcv--, seg--) 409 #else 410 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 411 #endif 412 { 413 if (uvm.page_init_done == TRUE) 414 panic("uvm_page_physget: called _after_ bootstrap"); 415 416 /* try from front */ 417 if (seg->avail_start == seg->start && 418 seg->avail_start < seg->avail_end) { 419 *paddrp = ptoa(seg->avail_start); 420 seg->avail_start++; 421 seg->start++; 422 /* nothing left? nuke it */ 423 if (seg->avail_start == seg->end) { 424 if (vm_nphysseg == 1) 425 panic("uvm_page_physget: out of memory!"); 426 vm_nphysseg--; 427 for (; lcv < vm_nphysseg; lcv++, seg++) 428 /* structure copy */ 429 seg[0] = seg[1]; 430 } 431 return (TRUE); 432 } 433 434 /* try from rear */ 435 if (seg->avail_end == seg->end && 436 seg->avail_start < seg->avail_end) { 437 *paddrp = ptoa(seg->avail_end - 1); 438 seg->avail_end--; 439 seg->end--; 440 /* nothing left? nuke it */ 441 if (seg->avail_end == seg->start) { 442 if (vm_nphysseg == 1) 443 panic("uvm_page_physget: out of memory!"); 444 vm_nphysseg--; 445 for (; lcv < vm_nphysseg ; lcv++, seg++) 446 /* structure copy */ 447 seg[0] = seg[1]; 448 } 449 return (TRUE); 450 } 451 } 452 453 /* pass2: forget about matching ends, just allocate something */ 454 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 455 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 456 for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0; 457 lcv--, seg--) 458 #else 459 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 460 #endif 461 { 462 463 /* any room in this bank? */ 464 if (seg->avail_start >= seg->avail_end) 465 continue; /* nope */ 466 467 *paddrp = ptoa(seg->avail_start); 468 seg->avail_start++; 469 /* truncate! */ 470 seg->start = seg->avail_start; 471 472 /* nothing left? nuke it */ 473 if (seg->avail_start == seg->end) { 474 if (vm_nphysseg == 1) 475 panic("uvm_page_physget: out of memory!"); 476 vm_nphysseg--; 477 for (; lcv < vm_nphysseg ; lcv++, seg++) 478 /* structure copy */ 479 seg[0] = seg[1]; 480 } 481 return (TRUE); 482 } 483 484 return (FALSE); /* whoops! */ 485 } 486 487 #endif /* PMAP_STEAL_MEMORY */ 488 489 /* 490 * uvm_page_physload: load physical memory into VM system 491 * 492 * => all args are PFs 493 * => all pages in start/end get vm_page structures 494 * => areas marked by avail_start/avail_end get added to the free page pool 495 * => we are limited to VM_PHYSSEG_MAX physical memory segments 496 */ 497 498 void 499 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start, 500 paddr_t avail_end, int flags) 501 { 502 int preload, lcv; 503 psize_t npages; 504 struct vm_page *pgs; 505 struct vm_physseg *ps, *seg; 506 507 #ifdef DIAGNOSTIC 508 if (uvmexp.pagesize == 0) 509 panic("uvm_page_physload: page size not set!"); 510 511 if (start >= end) 512 panic("uvm_page_physload: start >= end"); 513 #endif 514 515 /* do we have room? */ 516 if (vm_nphysseg == VM_PHYSSEG_MAX) { 517 printf("uvm_page_physload: unable to load physical memory " 518 "segment\n"); 519 printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n", 520 VM_PHYSSEG_MAX, (long long)start, (long long)end); 521 printf("\tincrease VM_PHYSSEG_MAX\n"); 522 return; 523 } 524 525 /* 526 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 527 * called yet, so malloc is not available). 528 */ 529 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) { 530 if (seg->pgs) 531 break; 532 } 533 preload = (lcv == vm_nphysseg); 534 535 /* if VM is already running, attempt to malloc() vm_page structures */ 536 if (!preload) { 537 /* 538 * XXXCDC: need some sort of lockout for this case 539 * right now it is only used by devices so it should be alright. 540 */ 541 paddr_t paddr; 542 543 npages = end - start; /* # of pages */ 544 545 pgs = (struct vm_page *)uvm_km_zalloc(kernel_map, 546 npages * sizeof(*pgs)); 547 if (pgs == NULL) { 548 printf("uvm_page_physload: can not malloc vm_page " 549 "structs for segment\n"); 550 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 551 return; 552 } 553 /* init phys_addr and free pages, XXX uvmexp.npages */ 554 for (lcv = 0, paddr = ptoa(start); lcv < npages; 555 lcv++, paddr += PAGE_SIZE) { 556 pgs[lcv].phys_addr = paddr; 557 VM_MDPAGE_INIT(&pgs[lcv]); 558 if (atop(paddr) >= avail_start && 559 atop(paddr) < avail_end) { 560 if (flags & PHYSLOAD_DEVICE) { 561 atomic_setbits_int(&pgs[lcv].pg_flags, 562 PG_DEV); 563 pgs[lcv].wire_count = 1; 564 } else { 565 #if defined(VM_PHYSSEG_NOADD) 566 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 567 #endif 568 } 569 } 570 } 571 572 /* Add pages to free pool. */ 573 if ((flags & PHYSLOAD_DEVICE) == 0) { 574 uvm_pmr_freepages(&pgs[avail_start - start], 575 avail_end - avail_start); 576 } 577 578 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 579 } else { 580 /* gcc complains if these don't get init'd */ 581 pgs = NULL; 582 npages = 0; 583 584 } 585 586 /* now insert us in the proper place in vm_physmem[] */ 587 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 588 /* random: put it at the end (easy!) */ 589 ps = &vm_physmem[vm_nphysseg]; 590 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 591 { 592 int x; 593 /* sort by address for binary search */ 594 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) 595 if (start < seg->start) 596 break; 597 ps = seg; 598 /* move back other entries, if necessary ... */ 599 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv; 600 x--, seg--) 601 /* structure copy */ 602 seg[1] = seg[0]; 603 } 604 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 605 { 606 int x; 607 /* sort by largest segment first */ 608 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) 609 if ((end - start) > 610 (seg->end - seg->start)) 611 break; 612 ps = &vm_physmem[lcv]; 613 /* move back other entries, if necessary ... */ 614 for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv; 615 x--, seg--) 616 /* structure copy */ 617 seg[1] = seg[0]; 618 } 619 #else 620 panic("uvm_page_physload: unknown physseg strategy selected!"); 621 #endif 622 623 ps->start = start; 624 ps->end = end; 625 ps->avail_start = avail_start; 626 ps->avail_end = avail_end; 627 if (preload) { 628 ps->pgs = NULL; 629 } else { 630 ps->pgs = pgs; 631 ps->lastpg = pgs + npages - 1; 632 } 633 vm_nphysseg++; 634 635 return; 636 } 637 638 #ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 639 640 void uvm_page_physdump(void); /* SHUT UP GCC */ 641 642 /* call from DDB */ 643 void 644 uvm_page_physdump(void) 645 { 646 int lcv; 647 struct vm_physseg *seg; 648 649 printf("uvm_page_physdump: physical memory config [segs=%d of %d]:\n", 650 vm_nphysseg, VM_PHYSSEG_MAX); 651 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) 652 printf("0x%llx->0x%llx [0x%llx->0x%llx]\n", 653 (long long)seg->start, 654 (long long)seg->end, 655 (long long)seg->avail_start, 656 (long long)seg->avail_end); 657 printf("STRATEGY = "); 658 switch (VM_PHYSSEG_STRAT) { 659 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 660 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 661 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 662 default: printf("<<UNKNOWN>>!!!!\n"); 663 } 664 } 665 #endif 666 667 void 668 uvm_shutdown(void) 669 { 670 #ifdef UVM_SWAP_ENCRYPT 671 uvm_swap_finicrypt_all(); 672 #endif 673 smr_flush(); 674 } 675 676 /* 677 * Perform insert of a given page in the specified anon of obj. 678 * This is basically, uvm_pagealloc, but with the page already given. 679 */ 680 void 681 uvm_pagealloc_pg(struct vm_page *pg, struct uvm_object *obj, voff_t off, 682 struct vm_anon *anon) 683 { 684 int flags; 685 686 flags = PG_BUSY | PG_FAKE; 687 pg->offset = off; 688 pg->uobject = obj; 689 pg->uanon = anon; 690 691 if (anon) { 692 anon->an_page = pg; 693 flags |= PQ_ANON; 694 } else if (obj) 695 uvm_pageinsert(pg); 696 atomic_setbits_int(&pg->pg_flags, flags); 697 #if defined(UVM_PAGE_TRKOWN) 698 pg->owner_tag = NULL; 699 #endif 700 UVM_PAGE_OWN(pg, "new alloc"); 701 } 702 703 /* 704 * uvm_pglistalloc: allocate a list of pages 705 * 706 * => allocated pages are placed at the tail of rlist. rlist is 707 * assumed to be properly initialized by caller. 708 * => returns 0 on success or errno on failure 709 * => doesn't take into account clean non-busy pages on inactive list 710 * that could be used(?) 711 * => params: 712 * size the size of the allocation, rounded to page size. 713 * low the low address of the allowed allocation range. 714 * high the high address of the allowed allocation range. 715 * alignment memory must be aligned to this power-of-two boundary. 716 * boundary no segment in the allocation may cross this 717 * power-of-two boundary (relative to zero). 718 * => flags: 719 * UVM_PLA_NOWAIT fail if allocation fails 720 * UVM_PLA_WAITOK wait for memory to become avail 721 * UVM_PLA_ZERO return zeroed memory 722 */ 723 int 724 uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment, 725 paddr_t boundary, struct pglist *rlist, int nsegs, int flags) 726 { 727 KASSERT((alignment & (alignment - 1)) == 0); 728 KASSERT((boundary & (boundary - 1)) == 0); 729 KASSERT(!(flags & UVM_PLA_WAITOK) ^ !(flags & UVM_PLA_NOWAIT)); 730 731 if (size == 0) 732 return (EINVAL); 733 size = atop(round_page(size)); 734 735 /* 736 * check to see if we need to generate some free pages waking 737 * the pagedaemon. 738 */ 739 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 740 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 741 (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) 742 wakeup(&uvm.pagedaemon); 743 744 /* 745 * XXX uvm_pglistalloc is currently only used for kernel 746 * objects. Unlike the checks in uvm_pagealloc, below, here 747 * we are always allowed to use the kernel reserve. However, we 748 * have to enforce the pagedaemon reserve here or allocations 749 * via this path could consume everything and we can't 750 * recover in the page daemon. 751 */ 752 again: 753 if ((uvmexp.free <= uvmexp.reserve_pagedaemon + size && 754 !((curproc == uvm.pagedaemon_proc) || 755 (curproc == syncerproc)))) { 756 if (flags & UVM_PLA_WAITOK) { 757 uvm_wait("uvm_pglistalloc"); 758 goto again; 759 } 760 return (ENOMEM); 761 } 762 763 if ((high & PAGE_MASK) != PAGE_MASK) { 764 printf("uvm_pglistalloc: Upper boundary 0x%lx " 765 "not on pagemask.\n", (unsigned long)high); 766 } 767 768 /* 769 * Our allocations are always page granularity, so our alignment 770 * must be, too. 771 */ 772 if (alignment < PAGE_SIZE) 773 alignment = PAGE_SIZE; 774 775 low = atop(roundup(low, alignment)); 776 /* 777 * high + 1 may result in overflow, in which case high becomes 0x0, 778 * which is the 'don't care' value. 779 * The only requirement in that case is that low is also 0x0, or the 780 * low<high assert will fail. 781 */ 782 high = atop(high + 1); 783 alignment = atop(alignment); 784 if (boundary < PAGE_SIZE && boundary != 0) 785 boundary = PAGE_SIZE; 786 boundary = atop(boundary); 787 788 return uvm_pmr_getpages(size, low, high, alignment, boundary, nsegs, 789 flags, rlist); 790 } 791 792 /* 793 * uvm_pglistfree: free a list of pages 794 * 795 * => pages should already be unmapped 796 */ 797 void 798 uvm_pglistfree(struct pglist *list) 799 { 800 uvm_pmr_freepageq(list); 801 } 802 803 /* 804 * interface used by the buffer cache to allocate a buffer at a time. 805 * The pages are allocated wired in DMA accessible memory 806 */ 807 int 808 uvm_pagealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size, 809 int flags) 810 { 811 struct pglist plist; 812 struct vm_page *pg; 813 int i, r; 814 815 816 TAILQ_INIT(&plist); 817 r = uvm_pglistalloc(size, dma_constraint.ucr_low, 818 dma_constraint.ucr_high, 0, 0, &plist, atop(round_page(size)), 819 flags); 820 if (r == 0) { 821 i = 0; 822 while ((pg = TAILQ_FIRST(&plist)) != NULL) { 823 pg->wire_count = 1; 824 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE); 825 KASSERT((pg->pg_flags & PG_DEV) == 0); 826 TAILQ_REMOVE(&plist, pg, pageq); 827 uvm_pagealloc_pg(pg, obj, off + ptoa(i++), NULL); 828 } 829 } 830 return r; 831 } 832 833 /* 834 * interface used by the buffer cache to reallocate a buffer at a time. 835 * The pages are reallocated wired outside the DMA accessible region. 836 * 837 */ 838 int 839 uvm_pagerealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size, 840 int flags, struct uvm_constraint_range *where) 841 { 842 struct pglist plist; 843 struct vm_page *pg, *tpg; 844 int i, r; 845 voff_t offset; 846 847 848 TAILQ_INIT(&plist); 849 if (size == 0) 850 panic("size 0 uvm_pagerealloc"); 851 r = uvm_pglistalloc(size, where->ucr_low, where->ucr_high, 0, 852 0, &plist, atop(round_page(size)), flags); 853 if (r == 0) { 854 i = 0; 855 while((pg = TAILQ_FIRST(&plist)) != NULL) { 856 offset = off + ptoa(i++); 857 tpg = uvm_pagelookup(obj, offset); 858 KASSERT(tpg != NULL); 859 pg->wire_count = 1; 860 atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE); 861 KASSERT((pg->pg_flags & PG_DEV) == 0); 862 TAILQ_REMOVE(&plist, pg, pageq); 863 uvm_pagecopy(tpg, pg); 864 KASSERT(tpg->wire_count == 1); 865 tpg->wire_count = 0; 866 uvm_pagefree(tpg); 867 uvm_pagealloc_pg(pg, obj, offset, NULL); 868 } 869 } 870 return r; 871 } 872 873 /* 874 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 875 * 876 * => return null if no pages free 877 * => wake up pagedaemon if number of free pages drops below low water mark 878 * => only one of obj or anon can be non-null 879 * => caller must activate/deactivate page if it is not wired. 880 */ 881 882 struct vm_page * 883 uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon, 884 int flags) 885 { 886 struct vm_page *pg; 887 struct pglist pgl; 888 int pmr_flags; 889 boolean_t use_reserve; 890 891 KASSERT(obj == NULL || anon == NULL); 892 KASSERT(off == trunc_page(off)); 893 894 /* 895 * check to see if we need to generate some free pages waking 896 * the pagedaemon. 897 */ 898 if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin || 899 ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg && 900 (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg)) 901 wakeup(&uvm.pagedaemon); 902 903 /* 904 * fail if any of these conditions is true: 905 * [1] there really are no free pages, or 906 * [2] only kernel "reserved" pages remain and 907 * the page isn't being allocated to a kernel object. 908 * [3] only pagedaemon "reserved" pages remain and 909 * the requestor isn't the pagedaemon. 910 */ 911 use_reserve = (flags & UVM_PGA_USERESERVE) || 912 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 913 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 914 (uvmexp.free <= uvmexp.reserve_pagedaemon && 915 !((curproc == uvm.pagedaemon_proc) || 916 (curproc == syncerproc)))) 917 goto fail; 918 919 pmr_flags = UVM_PLA_NOWAIT; 920 if (flags & UVM_PGA_ZERO) 921 pmr_flags |= UVM_PLA_ZERO; 922 TAILQ_INIT(&pgl); 923 if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0) 924 goto fail; 925 926 pg = TAILQ_FIRST(&pgl); 927 KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL); 928 929 uvm_pagealloc_pg(pg, obj, off, anon); 930 KASSERT((pg->pg_flags & PG_DEV) == 0); 931 if (flags & UVM_PGA_ZERO) 932 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 933 else 934 atomic_setbits_int(&pg->pg_flags, PG_CLEAN); 935 936 return(pg); 937 938 fail: 939 return (NULL); 940 } 941 942 /* 943 * uvm_pagerealloc: reallocate a page from one object to another 944 */ 945 946 void 947 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff) 948 { 949 950 /* remove it from the old object */ 951 if (pg->uobject) { 952 uvm_pageremove(pg); 953 } 954 955 /* put it in the new object */ 956 if (newobj) { 957 pg->uobject = newobj; 958 pg->offset = newoff; 959 pg->pg_version++; 960 uvm_pageinsert(pg); 961 } 962 } 963 964 965 /* 966 * uvm_pagefree: free page 967 * 968 * => erase page's identity (i.e. remove from object) 969 * => put page on free list 970 * => caller must lock page queues 971 * => assumes all valid mappings of pg are gone 972 */ 973 void 974 uvm_pagefree(struct vm_page *pg) 975 { 976 u_int flags_to_clear = 0; 977 978 #ifdef DEBUG 979 if (pg->uobject == (void *)0xdeadbeef && 980 pg->uanon == (void *)0xdeadbeef) { 981 panic("uvm_pagefree: freeing free page %p", pg); 982 } 983 #endif 984 985 KASSERT((pg->pg_flags & PG_DEV) == 0); 986 987 /* 988 * if the page was an object page (and thus "TABLED"), remove it 989 * from the object. 990 */ 991 if (pg->pg_flags & PG_TABLED) 992 uvm_pageremove(pg); 993 994 /* now remove the page from the queues */ 995 if (pg->pg_flags & PQ_ACTIVE) { 996 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 997 flags_to_clear |= PQ_ACTIVE; 998 uvmexp.active--; 999 } 1000 if (pg->pg_flags & PQ_INACTIVE) { 1001 if (pg->pg_flags & PQ_SWAPBACKED) 1002 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1003 else 1004 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1005 flags_to_clear |= PQ_INACTIVE; 1006 uvmexp.inactive--; 1007 } 1008 1009 /* if the page was wired, unwire it now. */ 1010 if (pg->wire_count) { 1011 pg->wire_count = 0; 1012 uvmexp.wired--; 1013 } 1014 if (pg->uanon) { 1015 pg->uanon->an_page = NULL; 1016 pg->uanon = NULL; 1017 } 1018 1019 /* Clean page state bits. */ 1020 flags_to_clear |= PQ_ANON|PQ_AOBJ|PQ_ENCRYPT|PG_ZERO|PG_FAKE|PG_BUSY| 1021 PG_RELEASED|PG_CLEAN|PG_CLEANCHK; 1022 atomic_clearbits_int(&pg->pg_flags, flags_to_clear); 1023 1024 /* and put on free queue */ 1025 #ifdef DEBUG 1026 pg->uobject = (void *)0xdeadbeef; 1027 pg->offset = 0xdeadbeef; 1028 pg->uanon = (void *)0xdeadbeef; 1029 #endif 1030 1031 uvm_pmr_freepages(pg, 1); 1032 } 1033 1034 /* 1035 * uvm_page_unbusy: unbusy an array of pages. 1036 * 1037 * => pages must either all belong to the same object, or all belong to anons. 1038 * => if pages are anon-owned, anons must have 0 refcount. 1039 */ 1040 void 1041 uvm_page_unbusy(struct vm_page **pgs, int npgs) 1042 { 1043 struct vm_page *pg; 1044 struct uvm_object *uobj; 1045 int i; 1046 1047 for (i = 0; i < npgs; i++) { 1048 pg = pgs[i]; 1049 1050 if (pg == NULL || pg == PGO_DONTCARE) { 1051 continue; 1052 } 1053 if (pg->pg_flags & PG_WANTED) { 1054 wakeup(pg); 1055 } 1056 if (pg->pg_flags & PG_RELEASED) { 1057 uobj = pg->uobject; 1058 if (uobj != NULL) { 1059 uvm_lock_pageq(); 1060 pmap_page_protect(pg, PROT_NONE); 1061 /* XXX won't happen right now */ 1062 if (pg->pg_flags & PQ_AOBJ) 1063 uao_dropswap(uobj, 1064 pg->offset >> PAGE_SHIFT); 1065 uvm_pagefree(pg); 1066 uvm_unlock_pageq(); 1067 } else { 1068 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 1069 UVM_PAGE_OWN(pg, NULL); 1070 uvm_anfree(pg->uanon); 1071 } 1072 } else { 1073 atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY); 1074 UVM_PAGE_OWN(pg, NULL); 1075 } 1076 } 1077 } 1078 1079 #if defined(UVM_PAGE_TRKOWN) 1080 /* 1081 * uvm_page_own: set or release page ownership 1082 * 1083 * => this is a debugging function that keeps track of who sets PG_BUSY 1084 * and where they do it. it can be used to track down problems 1085 * such a thread setting "PG_BUSY" and never releasing it. 1086 * => if "tag" is NULL then we are releasing page ownership 1087 */ 1088 void 1089 uvm_page_own(struct vm_page *pg, char *tag) 1090 { 1091 /* gain ownership? */ 1092 if (tag) { 1093 if (pg->owner_tag) { 1094 printf("uvm_page_own: page %p already owned " 1095 "by thread %d [%s]\n", pg, 1096 pg->owner, pg->owner_tag); 1097 panic("uvm_page_own"); 1098 } 1099 pg->owner = (curproc) ? curproc->p_tid : (pid_t) -1; 1100 pg->owner_tag = tag; 1101 return; 1102 } 1103 1104 /* drop ownership */ 1105 if (pg->owner_tag == NULL) { 1106 printf("uvm_page_own: dropping ownership of an non-owned " 1107 "page (%p)\n", pg); 1108 panic("uvm_page_own"); 1109 } 1110 pg->owner_tag = NULL; 1111 return; 1112 } 1113 #endif 1114 1115 /* 1116 * when VM_PHYSSEG_MAX is 1, we can simplify these functions 1117 */ 1118 1119 #if VM_PHYSSEG_MAX > 1 1120 /* 1121 * vm_physseg_find: find vm_physseg structure that belongs to a PA 1122 */ 1123 int 1124 vm_physseg_find(paddr_t pframe, int *offp) 1125 { 1126 struct vm_physseg *seg; 1127 1128 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 1129 /* binary search for it */ 1130 int start, len, try; 1131 1132 /* 1133 * if try is too large (thus target is less than than try) we reduce 1134 * the length to trunc(len/2) [i.e. everything smaller than "try"] 1135 * 1136 * if the try is too small (thus target is greater than try) then 1137 * we set the new start to be (try + 1). this means we need to 1138 * reduce the length to (round(len/2) - 1). 1139 * 1140 * note "adjust" below which takes advantage of the fact that 1141 * (round(len/2) - 1) == trunc((len - 1) / 2) 1142 * for any value of len we may have 1143 */ 1144 1145 for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) { 1146 try = start + (len / 2); /* try in the middle */ 1147 seg = vm_physmem + try; 1148 1149 /* start past our try? */ 1150 if (pframe >= seg->start) { 1151 /* was try correct? */ 1152 if (pframe < seg->end) { 1153 if (offp) 1154 *offp = pframe - seg->start; 1155 return(try); /* got it */ 1156 } 1157 start = try + 1; /* next time, start here */ 1158 len--; /* "adjust" */ 1159 } else { 1160 /* 1161 * pframe before try, just reduce length of 1162 * region, done in "for" loop 1163 */ 1164 } 1165 } 1166 return(-1); 1167 1168 #else 1169 /* linear search for it */ 1170 int lcv; 1171 1172 for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) { 1173 if (pframe >= seg->start && pframe < seg->end) { 1174 if (offp) 1175 *offp = pframe - seg->start; 1176 return(lcv); /* got it */ 1177 } 1178 } 1179 return(-1); 1180 1181 #endif 1182 } 1183 1184 /* 1185 * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages 1186 * back from an I/O mapping (ugh!). used in some MD code as well. 1187 */ 1188 struct vm_page * 1189 PHYS_TO_VM_PAGE(paddr_t pa) 1190 { 1191 paddr_t pf = atop(pa); 1192 int off; 1193 int psi; 1194 1195 psi = vm_physseg_find(pf, &off); 1196 1197 return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]); 1198 } 1199 #endif /* VM_PHYSSEG_MAX > 1 */ 1200 1201 /* 1202 * uvm_pagelookup: look up a page 1203 */ 1204 struct vm_page * 1205 uvm_pagelookup(struct uvm_object *obj, voff_t off) 1206 { 1207 /* XXX if stack is too much, handroll */ 1208 struct vm_page pg; 1209 1210 pg.offset = off; 1211 return (RBT_FIND(uvm_objtree, &obj->memt, &pg)); 1212 } 1213 1214 /* 1215 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp 1216 * 1217 * => caller must lock page queues 1218 */ 1219 void 1220 uvm_pagewire(struct vm_page *pg) 1221 { 1222 if (pg->wire_count == 0) { 1223 if (pg->pg_flags & PQ_ACTIVE) { 1224 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1225 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1226 uvmexp.active--; 1227 } 1228 if (pg->pg_flags & PQ_INACTIVE) { 1229 if (pg->pg_flags & PQ_SWAPBACKED) 1230 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1231 else 1232 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1233 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1234 uvmexp.inactive--; 1235 } 1236 uvmexp.wired++; 1237 } 1238 pg->wire_count++; 1239 } 1240 1241 /* 1242 * uvm_pageunwire: unwire the page. 1243 * 1244 * => activate if wire count goes to zero. 1245 * => caller must lock page queues 1246 */ 1247 void 1248 uvm_pageunwire(struct vm_page *pg) 1249 { 1250 pg->wire_count--; 1251 if (pg->wire_count == 0) { 1252 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1253 uvmexp.active++; 1254 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1255 uvmexp.wired--; 1256 } 1257 } 1258 1259 /* 1260 * uvm_pagedeactivate: deactivate page -- no pmaps have access to page 1261 * 1262 * => caller must lock page queues 1263 * => caller must check to make sure page is not wired 1264 * => object that page belongs to must be locked (so we can adjust pg->flags) 1265 */ 1266 void 1267 uvm_pagedeactivate(struct vm_page *pg) 1268 { 1269 if (pg->pg_flags & PQ_ACTIVE) { 1270 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1271 atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE); 1272 uvmexp.active--; 1273 } 1274 if ((pg->pg_flags & PQ_INACTIVE) == 0) { 1275 KASSERT(pg->wire_count == 0); 1276 if (pg->pg_flags & PQ_SWAPBACKED) 1277 TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); 1278 else 1279 TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); 1280 atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); 1281 uvmexp.inactive++; 1282 pmap_clear_reference(pg); 1283 /* 1284 * update the "clean" bit. this isn't 100% 1285 * accurate, and doesn't have to be. we'll 1286 * re-sync it after we zap all mappings when 1287 * scanning the inactive list. 1288 */ 1289 if ((pg->pg_flags & PG_CLEAN) != 0 && 1290 pmap_is_modified(pg)) 1291 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1292 } 1293 } 1294 1295 /* 1296 * uvm_pageactivate: activate page 1297 * 1298 * => caller must lock page queues 1299 */ 1300 void 1301 uvm_pageactivate(struct vm_page *pg) 1302 { 1303 if (pg->pg_flags & PQ_INACTIVE) { 1304 if (pg->pg_flags & PQ_SWAPBACKED) 1305 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1306 else 1307 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1308 atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); 1309 uvmexp.inactive--; 1310 } 1311 if (pg->wire_count == 0) { 1312 /* 1313 * if page is already active, remove it from list so we 1314 * can put it at tail. if it wasn't active, then mark 1315 * it active and bump active count 1316 */ 1317 if (pg->pg_flags & PQ_ACTIVE) 1318 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1319 else { 1320 atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE); 1321 uvmexp.active++; 1322 } 1323 1324 TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq); 1325 } 1326 } 1327 1328 /* 1329 * uvm_pagezero: zero fill a page 1330 */ 1331 void 1332 uvm_pagezero(struct vm_page *pg) 1333 { 1334 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 1335 pmap_zero_page(pg); 1336 } 1337 1338 /* 1339 * uvm_pagecopy: copy a page 1340 */ 1341 void 1342 uvm_pagecopy(struct vm_page *src, struct vm_page *dst) 1343 { 1344 atomic_clearbits_int(&dst->pg_flags, PG_CLEAN); 1345 pmap_copy_page(src, dst); 1346 } 1347 1348 /* 1349 * uvm_pagecount: count the number of physical pages in the address range. 1350 */ 1351 psize_t 1352 uvm_pagecount(struct uvm_constraint_range* constraint) 1353 { 1354 int lcv; 1355 psize_t sz; 1356 paddr_t low, high; 1357 paddr_t ps_low, ps_high; 1358 1359 /* Algorithm uses page numbers. */ 1360 low = atop(constraint->ucr_low); 1361 high = atop(constraint->ucr_high); 1362 1363 sz = 0; 1364 for (lcv = 0; lcv < vm_nphysseg; lcv++) { 1365 ps_low = MAX(low, vm_physmem[lcv].avail_start); 1366 ps_high = MIN(high, vm_physmem[lcv].avail_end); 1367 if (ps_low < ps_high) 1368 sz += ps_high - ps_low; 1369 } 1370 return sz; 1371 } 1372