1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $ 65 * $DragonFly: src/sys/vm/vm_pager.c,v 1.8 2003/08/20 08:03:01 rob Exp $ 66 */ 67 68 /* 69 * Paging space routine stubs. Emulates a matchmaker-like interface 70 * for builtin pagers. 71 */ 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/kernel.h> 76 #include <sys/vnode.h> 77 #include <sys/buf.h> 78 #include <sys/ucred.h> 79 #include <sys/malloc.h> 80 #include <sys/proc.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_param.h> 84 #include <vm/vm_object.h> 85 #include <vm/vm_page.h> 86 #include <vm/vm_pager.h> 87 #include <vm/vm_extern.h> 88 89 #include <sys/buf2.h> 90 91 MALLOC_DEFINE(M_VMPGDATA, "VM pgdata", "XXX: VM pager private data"); 92 93 extern struct pagerops defaultpagerops; 94 extern struct pagerops swappagerops; 95 extern struct pagerops vnodepagerops; 96 extern struct pagerops devicepagerops; 97 extern struct pagerops physpagerops; 98 99 int cluster_pbuf_freecnt = -1; /* unlimited to begin with */ 100 101 static int dead_pager_getpages (vm_object_t, vm_page_t *, int, int); 102 static vm_object_t dead_pager_alloc (void *, vm_ooffset_t, vm_prot_t, 103 vm_ooffset_t); 104 static void dead_pager_putpages (vm_object_t, vm_page_t *, int, int, int *); 105 static boolean_t dead_pager_haspage (vm_object_t, vm_pindex_t, int *, int *); 106 static void dead_pager_dealloc (vm_object_t); 107 108 static int 109 dead_pager_getpages(obj, ma, count, req) 110 vm_object_t obj; 111 vm_page_t *ma; 112 int count; 113 int req; 114 { 115 return VM_PAGER_FAIL; 116 } 117 118 static vm_object_t 119 dead_pager_alloc(handle, size, prot, off) 120 void *handle; 121 vm_ooffset_t size; 122 vm_prot_t prot; 123 vm_ooffset_t off; 124 { 125 return NULL; 126 } 127 128 static void 129 dead_pager_putpages(object, m, count, flags, rtvals) 130 vm_object_t object; 131 vm_page_t *m; 132 int count; 133 int flags; 134 int *rtvals; 135 { 136 int i; 137 138 for (i = 0; i < count; i++) { 139 rtvals[i] = VM_PAGER_AGAIN; 140 } 141 } 142 143 static int 144 dead_pager_haspage(object, pindex, prev, next) 145 vm_object_t object; 146 vm_pindex_t pindex; 147 int *prev; 148 int *next; 149 { 150 if (prev) 151 *prev = 0; 152 if (next) 153 *next = 0; 154 return FALSE; 155 } 156 157 static void 158 dead_pager_dealloc(object) 159 vm_object_t object; 160 { 161 return; 162 } 163 164 static struct pagerops deadpagerops = { 165 NULL, 166 dead_pager_alloc, 167 dead_pager_dealloc, 168 dead_pager_getpages, 169 dead_pager_putpages, 170 dead_pager_haspage, 171 NULL 172 }; 173 174 struct pagerops *pagertab[] = { 175 &defaultpagerops, /* OBJT_DEFAULT */ 176 &swappagerops, /* OBJT_SWAP */ 177 &vnodepagerops, /* OBJT_VNODE */ 178 &devicepagerops, /* OBJT_DEVICE */ 179 &physpagerops, /* OBJT_PHYS */ 180 &deadpagerops /* OBJT_DEAD */ 181 }; 182 183 int npagers = sizeof(pagertab) / sizeof(pagertab[0]); 184 185 /* 186 * Kernel address space for mapping pages. 187 * Used by pagers where KVAs are needed for IO. 188 * 189 * XXX needs to be large enough to support the number of pending async 190 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size 191 * (MAXPHYS == 64k) if you want to get the most efficiency. 192 */ 193 #define PAGER_MAP_SIZE (8 * 1024 * 1024) 194 195 int pager_map_size = PAGER_MAP_SIZE; 196 vm_map_t pager_map; 197 static int bswneeded; 198 static vm_offset_t swapbkva; /* swap buffers kva */ 199 200 void 201 vm_pager_init() 202 { 203 struct pagerops **pgops; 204 205 /* 206 * Initialize known pagers 207 */ 208 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) 209 if (pgops && ((*pgops)->pgo_init != NULL)) 210 (*(*pgops)->pgo_init) (); 211 } 212 213 void 214 vm_pager_bufferinit() 215 { 216 struct buf *bp; 217 int i; 218 219 bp = swbuf; 220 /* 221 * Now set up swap and physical I/O buffer headers. 222 */ 223 for (i = 0; i < nswbuf; i++, bp++) { 224 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist); 225 BUF_LOCKINIT(bp); 226 LIST_INIT(&bp->b_dep); 227 bp->b_xflags = 0; 228 } 229 230 cluster_pbuf_freecnt = nswbuf / 2; 231 232 swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS); 233 if (!swapbkva) 234 panic("Not enough pager_map VM space for physical buffers"); 235 } 236 237 /* 238 * Allocate an instance of a pager of the given type. 239 * Size, protection and offset parameters are passed in for pagers that 240 * need to perform page-level validation (e.g. the device pager). 241 */ 242 vm_object_t 243 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size, vm_prot_t prot, 244 vm_ooffset_t off) 245 { 246 struct pagerops *ops; 247 248 ops = pagertab[type]; 249 if (ops) 250 return ((*ops->pgo_alloc) (handle, size, prot, off)); 251 return (NULL); 252 } 253 254 void 255 vm_pager_deallocate(object) 256 vm_object_t object; 257 { 258 (*pagertab[object->type]->pgo_dealloc) (object); 259 } 260 261 /* 262 * vm_pager_strategy: 263 * 264 * called with no specific spl 265 * Execute strategy routine directly to pager. 266 */ 267 268 void 269 vm_pager_strategy(vm_object_t object, struct buf *bp) 270 { 271 if (pagertab[object->type]->pgo_strategy) { 272 (*pagertab[object->type]->pgo_strategy)(object, bp); 273 } else { 274 bp->b_flags |= B_ERROR; 275 bp->b_error = ENXIO; 276 biodone(bp); 277 } 278 } 279 280 /* 281 * vm_pager_get_pages() - inline, see vm/vm_pager.h 282 * vm_pager_put_pages() - inline, see vm/vm_pager.h 283 * vm_pager_has_page() - inline, see vm/vm_pager.h 284 * vm_pager_page_inserted() - inline, see vm/vm_pager.h 285 * vm_pager_page_removed() - inline, see vm/vm_pager.h 286 */ 287 288 #if 0 289 /* 290 * vm_pager_sync: 291 * 292 * Called by pageout daemon before going back to sleep. 293 * Gives pagers a chance to clean up any completed async pageing 294 * operations. 295 */ 296 void 297 vm_pager_sync() 298 { 299 struct pagerops **pgops; 300 301 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) 302 if (pgops && ((*pgops)->pgo_sync != NULL)) 303 (*(*pgops)->pgo_sync) (); 304 } 305 306 #endif 307 308 vm_offset_t 309 vm_pager_map_page(m) 310 vm_page_t m; 311 { 312 vm_offset_t kva; 313 314 kva = kmem_alloc_wait(pager_map, PAGE_SIZE); 315 pmap_kenter(kva, VM_PAGE_TO_PHYS(m)); 316 return (kva); 317 } 318 319 void 320 vm_pager_unmap_page(kva) 321 vm_offset_t kva; 322 { 323 pmap_kremove(kva); 324 kmem_free_wakeup(pager_map, kva, PAGE_SIZE); 325 } 326 327 vm_object_t 328 vm_pager_object_lookup(pg_list, handle) 329 struct pagerlst *pg_list; 330 void *handle; 331 { 332 vm_object_t object; 333 334 for (object = TAILQ_FIRST(pg_list); object != NULL; object = TAILQ_NEXT(object,pager_object_list)) 335 if (object->handle == handle) 336 return (object); 337 return (NULL); 338 } 339 340 /* 341 * initialize a physical buffer 342 */ 343 344 static void 345 initpbuf(struct buf *bp) 346 { 347 bp->b_qindex = QUEUE_NONE; 348 bp->b_data = (caddr_t) (MAXPHYS * (bp - swbuf)) + swapbkva; 349 bp->b_kvabase = bp->b_data; 350 bp->b_kvasize = MAXPHYS; 351 bp->b_xflags = 0; 352 bp->b_flags = 0; 353 bp->b_error = 0; 354 BUF_LOCK(bp, LK_EXCLUSIVE); 355 } 356 357 /* 358 * allocate a physical buffer 359 * 360 * There are a limited number (nswbuf) of physical buffers. We need 361 * to make sure that no single subsystem is able to hog all of them, 362 * so each subsystem implements a counter which is typically initialized 363 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and 364 * increments it on release, and blocks if the counter hits zero. A 365 * subsystem may initialize the counter to -1 to disable the feature, 366 * but it must still be sure to match up all uses of getpbuf() with 367 * relpbuf() using the same variable. 368 * 369 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed 370 * relatively soon when the rest of the subsystems get smart about it. XXX 371 */ 372 struct buf * 373 getpbuf(pfreecnt) 374 int *pfreecnt; 375 { 376 int s; 377 struct buf *bp; 378 379 s = splvm(); 380 381 for (;;) { 382 if (pfreecnt) { 383 while (*pfreecnt == 0) { 384 tsleep(pfreecnt, 0, "wswbuf0", 0); 385 } 386 } 387 388 /* get a bp from the swap buffer header pool */ 389 if ((bp = TAILQ_FIRST(&bswlist)) != NULL) 390 break; 391 392 bswneeded = 1; 393 tsleep(&bswneeded, 0, "wswbuf1", 0); 394 /* loop in case someone else grabbed one */ 395 } 396 TAILQ_REMOVE(&bswlist, bp, b_freelist); 397 if (pfreecnt) 398 --*pfreecnt; 399 splx(s); 400 401 initpbuf(bp); 402 return bp; 403 } 404 405 /* 406 * allocate a physical buffer, if one is available. 407 * 408 * Note that there is no NULL hack here - all subsystems using this 409 * call understand how to use pfreecnt. 410 */ 411 struct buf * 412 trypbuf(pfreecnt) 413 int *pfreecnt; 414 { 415 int s; 416 struct buf *bp; 417 418 s = splvm(); 419 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) { 420 splx(s); 421 return NULL; 422 } 423 TAILQ_REMOVE(&bswlist, bp, b_freelist); 424 425 --*pfreecnt; 426 427 splx(s); 428 429 initpbuf(bp); 430 431 return bp; 432 } 433 434 /* 435 * release a physical buffer 436 * 437 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed 438 * relatively soon when the rest of the subsystems get smart about it. XXX 439 */ 440 void 441 relpbuf(bp, pfreecnt) 442 struct buf *bp; 443 int *pfreecnt; 444 { 445 int s; 446 447 s = splvm(); 448 449 if (bp->b_vp) 450 pbrelvp(bp); 451 452 BUF_UNLOCK(bp); 453 454 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist); 455 456 if (bswneeded) { 457 bswneeded = 0; 458 wakeup(&bswneeded); 459 } 460 if (pfreecnt) { 461 if (++*pfreecnt == 1) 462 wakeup(pfreecnt); 463 } 464 splx(s); 465 } 466 467 /******************************************************** 468 * CHAINING FUNCTIONS * 469 ******************************************************** 470 * 471 * These functions support recursion of I/O operations 472 * on bp's, typically by chaining one or more 'child' bp's 473 * to the parent. Synchronous, asynchronous, and semi-synchronous 474 * chaining is possible. 475 */ 476 477 /* 478 * vm_pager_chain_iodone: 479 * 480 * io completion routine for child bp. Currently we fudge a bit 481 * on dealing with b_resid. Since users of these routines may issue 482 * multiple children simultaniously, sequencing of the error can be lost. 483 */ 484 485 static void 486 vm_pager_chain_iodone(struct buf *nbp) 487 { 488 struct buf *bp; 489 490 if ((bp = nbp->b_chain.parent) != NULL) { 491 if (nbp->b_flags & B_ERROR) { 492 bp->b_flags |= B_ERROR; 493 bp->b_error = nbp->b_error; 494 } else if (nbp->b_resid != 0) { 495 bp->b_flags |= B_ERROR; 496 bp->b_error = EINVAL; 497 } else { 498 bp->b_resid -= nbp->b_bcount; 499 } 500 nbp->b_chain.parent = NULL; 501 --bp->b_chain.count; 502 if (bp->b_flags & B_WANT) { 503 bp->b_flags &= ~B_WANT; 504 wakeup(bp); 505 } 506 if (!bp->b_chain.count && (bp->b_xflags & BX_AUTOCHAINDONE)) { 507 bp->b_xflags &= ~BX_AUTOCHAINDONE; 508 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 509 bp->b_flags |= B_ERROR; 510 bp->b_error = EINVAL; 511 } 512 biodone(bp); 513 } 514 } 515 nbp->b_flags |= B_DONE; 516 nbp->b_flags &= ~B_ASYNC; 517 relpbuf(nbp, NULL); 518 } 519 520 /* 521 * getchainbuf: 522 * 523 * Obtain a physical buffer and chain it to its parent buffer. When 524 * I/O completes, the parent buffer will be B_SIGNAL'd. Errors are 525 * automatically propogated to the parent 526 * 527 * Since these are brand new buffers, we do not have to clear B_INVAL 528 * and B_ERROR because they are already clear. 529 */ 530 531 struct buf * 532 getchainbuf(struct buf *bp, struct vnode *vp, int flags) 533 { 534 struct buf *nbp = getpbuf(NULL); 535 536 nbp->b_chain.parent = bp; 537 ++bp->b_chain.count; 538 539 if (bp->b_chain.count > 4) 540 waitchainbuf(bp, 4, 0); 541 542 nbp->b_flags = B_CALL | (bp->b_flags & B_ORDERED) | flags; 543 nbp->b_iodone = vm_pager_chain_iodone; 544 545 if (vp) 546 pbgetvp(vp, nbp); 547 return(nbp); 548 } 549 550 void 551 flushchainbuf(struct buf *nbp) 552 { 553 if (nbp->b_bcount) { 554 nbp->b_bufsize = nbp->b_bcount; 555 if ((nbp->b_flags & B_READ) == 0) 556 nbp->b_dirtyend = nbp->b_bcount; 557 BUF_KERNPROC(nbp); 558 VOP_STRATEGY(nbp->b_vp, nbp); 559 } else { 560 biodone(nbp); 561 } 562 } 563 564 void 565 waitchainbuf(struct buf *bp, int count, int done) 566 { 567 int s; 568 569 s = splbio(); 570 while (bp->b_chain.count > count) { 571 bp->b_flags |= B_WANT; 572 tsleep(bp, 0, "bpchain", 0); 573 } 574 if (done) { 575 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) { 576 bp->b_flags |= B_ERROR; 577 bp->b_error = EINVAL; 578 } 579 biodone(bp); 580 } 581 splx(s); 582 } 583 584 void 585 autochaindone(struct buf *bp) 586 { 587 int s; 588 589 s = splbio(); 590 if (bp->b_chain.count == 0) 591 biodone(bp); 592 else 593 bp->b_xflags |= BX_AUTOCHAINDONE; 594 splx(s); 595 } 596 597