1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94 39 * 40 * 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 * 66 * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $ 67 * $DragonFly: src/sys/vm/vm_pager.c,v 1.24 2007/11/06 03:50:01 dillon Exp $ 68 */ 69 70 /* 71 * Paging space routine stubs. Emulates a matchmaker-like interface 72 * for builtin pagers. 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/kernel.h> 78 #include <sys/vnode.h> 79 #include <sys/buf.h> 80 #include <sys/ucred.h> 81 #include <sys/malloc.h> 82 #include <sys/dsched.h> 83 #include <sys/proc.h> 84 #include <sys/sysctl.h> 85 #include <sys/thread2.h> 86 87 #include <vm/vm.h> 88 #include <vm/vm_param.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_object.h> 91 #include <vm/vm_page.h> 92 #include <vm/vm_pager.h> 93 #include <vm/vm_extern.h> 94 95 #include <sys/buf2.h> 96 97 MALLOC_DEFINE(M_VMPGDATA, "VM pgdata", "XXX: VM pager private data"); 98 99 extern struct pagerops defaultpagerops; 100 extern struct pagerops swappagerops; 101 extern struct pagerops vnodepagerops; 102 extern struct pagerops devicepagerops; 103 extern struct pagerops physpagerops; 104 105 int cluster_pbuf_freecnt = -1; /* unlimited to begin with */ 106 107 static int dead_pager_getpage (vm_object_t, vm_page_t *, int); 108 static void dead_pager_putpages (vm_object_t, vm_page_t *, int, int, int *); 109 static boolean_t dead_pager_haspage (vm_object_t, vm_pindex_t); 110 static void dead_pager_dealloc (vm_object_t); 111 112 /* 113 * No requirements. 114 */ 115 static int 116 dead_pager_getpage(vm_object_t obj, vm_page_t *mpp, int seqaccess) 117 { 118 return VM_PAGER_FAIL; 119 } 120 121 /* 122 * No requirements. 123 */ 124 static void 125 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count, int flags, 126 int *rtvals) 127 { 128 int i; 129 130 for (i = 0; i < count; i++) { 131 rtvals[i] = VM_PAGER_AGAIN; 132 } 133 } 134 135 /* 136 * No requirements. 137 */ 138 static int 139 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex) 140 { 141 return FALSE; 142 } 143 144 /* 145 * No requirements. 146 */ 147 static void 148 dead_pager_dealloc(vm_object_t object) 149 { 150 KKASSERT(object->swblock_count == 0); 151 return; 152 } 153 154 static struct pagerops deadpagerops = { 155 dead_pager_dealloc, 156 dead_pager_getpage, 157 dead_pager_putpages, 158 dead_pager_haspage 159 }; 160 161 struct pagerops *pagertab[] = { 162 &defaultpagerops, /* OBJT_DEFAULT */ 163 &swappagerops, /* OBJT_SWAP */ 164 &vnodepagerops, /* OBJT_VNODE */ 165 &devicepagerops, /* OBJT_DEVICE */ 166 &physpagerops, /* OBJT_PHYS */ 167 &deadpagerops /* OBJT_DEAD */ 168 }; 169 170 int npagers = sizeof(pagertab) / sizeof(pagertab[0]); 171 172 /* 173 * Kernel address space for mapping pages. 174 * Used by pagers where KVAs are needed for IO. 175 * 176 * XXX needs to be large enough to support the number of pending async 177 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size 178 * (MAXPHYS == 64k) if you want to get the most efficiency. 179 */ 180 #define PAGER_MAP_SIZE (8 * 1024 * 1024) 181 182 TAILQ_HEAD(swqueue, buf); 183 184 int pager_map_size = PAGER_MAP_SIZE; 185 struct vm_map pager_map; 186 187 static int bswneeded_raw; 188 static int bswneeded_kva; 189 static int nswbuf_raw; 190 static struct buf *swbuf_raw; 191 static vm_offset_t swapbkva; /* swap buffers kva */ 192 static struct swqueue bswlist_raw; /* without kva */ 193 static struct swqueue bswlist_kva; /* with kva */ 194 static struct spinlock bswspin = SPINLOCK_INITIALIZER(&bswspin); 195 static int pbuf_raw_count; 196 static int pbuf_kva_count; 197 198 SYSCTL_INT(_vfs, OID_AUTO, pbuf_raw_count, CTLFLAG_RD, &pbuf_raw_count, 0, ""); 199 SYSCTL_INT(_vfs, OID_AUTO, pbuf_kva_count, CTLFLAG_RD, &pbuf_kva_count, 0, ""); 200 201 /* 202 * Initialize the swap buffer list. 203 * 204 * Called from the low level boot code only. 205 */ 206 static void 207 vm_pager_init(void *arg __unused) 208 { 209 TAILQ_INIT(&bswlist_raw); 210 TAILQ_INIT(&bswlist_kva); 211 } 212 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_SECOND, vm_pager_init, NULL) 213 214 /* 215 * Called from the low level boot code only. 216 */ 217 void 218 vm_pager_bufferinit(void) 219 { 220 struct buf *bp; 221 int i; 222 223 /* 224 * Reserve KVM space for pbuf data. 225 */ 226 swapbkva = kmem_alloc_pageable(&pager_map, nswbuf * MAXPHYS); 227 if (!swapbkva) 228 panic("Not enough pager_map VM space for physical buffers"); 229 230 /* 231 * Initial pbuf setup. These pbufs have KVA reservations. 232 */ 233 bp = swbuf; 234 for (i = 0; i < nswbuf; ++i, ++bp) { 235 bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva; 236 bp->b_kvasize = MAXPHYS; 237 BUF_LOCKINIT(bp); 238 buf_dep_init(bp); 239 TAILQ_INSERT_HEAD(&bswlist_kva, bp, b_freelist); 240 ++pbuf_kva_count; 241 } 242 243 /* 244 * Initial pbuf setup. These pbufs do not have KVA reservations, 245 * so we can have a lot more of them. These are typically used 246 * to massage low level buf/bio requests. 247 */ 248 nswbuf_raw = nbuf * 2; 249 swbuf_raw = (void *)kmem_alloc(&kernel_map, 250 round_page(nswbuf_raw * sizeof(struct buf))); 251 bp = swbuf_raw; 252 for (i = 0; i < nswbuf_raw; ++i, ++bp) { 253 BUF_LOCKINIT(bp); 254 buf_dep_init(bp); 255 TAILQ_INSERT_HEAD(&bswlist_raw, bp, b_freelist); 256 ++pbuf_raw_count; 257 } 258 259 /* 260 * Allow the clustering code to use half of our pbufs. 261 */ 262 cluster_pbuf_freecnt = nswbuf / 2; 263 } 264 265 /* 266 * No requirements. 267 */ 268 void 269 vm_pager_deallocate(vm_object_t object) 270 { 271 (*pagertab[object->type]->pgo_dealloc) (object); 272 } 273 274 /* 275 * vm_pager_get_pages() - inline, see vm/vm_pager.h 276 * vm_pager_put_pages() - inline, see vm/vm_pager.h 277 * vm_pager_has_page() - inline, see vm/vm_pager.h 278 * vm_pager_page_inserted() - inline, see vm/vm_pager.h 279 * vm_pager_page_removed() - inline, see vm/vm_pager.h 280 */ 281 282 #if 0 283 /* 284 * vm_pager_sync: 285 * 286 * Called by pageout daemon before going back to sleep. 287 * Gives pagers a chance to clean up any completed async pageing 288 * operations. 289 */ 290 void 291 vm_pager_sync(void) 292 { 293 struct pagerops **pgops; 294 295 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) 296 if (pgops && ((*pgops)->pgo_sync != NULL)) 297 (*(*pgops)->pgo_sync) (); 298 } 299 300 #endif 301 302 /* 303 * Initialize a physical buffer. 304 * 305 * No requirements. 306 */ 307 static void 308 initpbuf(struct buf *bp) 309 { 310 bp->b_qindex = 0; /* BQUEUE_NONE */ 311 bp->b_data = bp->b_kvabase; /* NULL if pbuf sans kva */ 312 bp->b_flags = B_PAGING; 313 bp->b_cmd = BUF_CMD_DONE; 314 bp->b_error = 0; 315 bp->b_bcount = 0; 316 bp->b_bufsize = MAXPHYS; 317 initbufbio(bp); 318 xio_init(&bp->b_xio); 319 BUF_LOCK(bp, LK_EXCLUSIVE); 320 } 321 322 /* 323 * Allocate a physical buffer 324 * 325 * There are a limited number (nswbuf) of physical buffers. We need 326 * to make sure that no single subsystem is able to hog all of them, 327 * so each subsystem implements a counter which is typically initialized 328 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and 329 * increments it on release, and blocks if the counter hits zero. A 330 * subsystem may initialize the counter to -1 to disable the feature, 331 * but it must still be sure to match up all uses of getpbuf() with 332 * relpbuf() using the same variable. 333 * 334 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed 335 * relatively soon when the rest of the subsystems get smart about it. XXX 336 * 337 * Physical buffers can be with or without KVA space reserved. There 338 * are severe limitations on the ones with KVA reserved, and fewer 339 * limitations on the ones without. getpbuf() gets one without, 340 * getpbuf_kva() gets one with. 341 * 342 * No requirements. 343 */ 344 struct buf * 345 getpbuf(int *pfreecnt) 346 { 347 struct buf *bp; 348 349 spin_lock(&bswspin); 350 351 for (;;) { 352 if (pfreecnt) { 353 while (*pfreecnt == 0) 354 ssleep(pfreecnt, &bswspin, 0, "wswbuf0", 0); 355 } 356 357 /* get a bp from the swap buffer header pool */ 358 if ((bp = TAILQ_FIRST(&bswlist_raw)) != NULL) 359 break; 360 bswneeded_raw = 1; 361 ssleep(&bswneeded_raw, &bswspin, 0, "wswbuf1", 0); 362 /* loop in case someone else grabbed one */ 363 } 364 TAILQ_REMOVE(&bswlist_raw, bp, b_freelist); 365 --pbuf_raw_count; 366 if (pfreecnt) 367 --*pfreecnt; 368 369 spin_unlock(&bswspin); 370 371 initpbuf(bp); 372 KKASSERT(dsched_is_clear_buf_priv(bp)); 373 374 return (bp); 375 } 376 377 struct buf * 378 getpbuf_kva(int *pfreecnt) 379 { 380 struct buf *bp; 381 382 spin_lock(&bswspin); 383 384 for (;;) { 385 if (pfreecnt) { 386 while (*pfreecnt == 0) 387 ssleep(pfreecnt, &bswspin, 0, "wswbuf0", 0); 388 } 389 390 /* get a bp from the swap buffer header pool */ 391 if ((bp = TAILQ_FIRST(&bswlist_kva)) != NULL) 392 break; 393 bswneeded_kva = 1; 394 ssleep(&bswneeded_kva, &bswspin, 0, "wswbuf1", 0); 395 /* loop in case someone else grabbed one */ 396 } 397 TAILQ_REMOVE(&bswlist_kva, bp, b_freelist); 398 --pbuf_kva_count; 399 if (pfreecnt) 400 --*pfreecnt; 401 402 spin_unlock(&bswspin); 403 404 initpbuf(bp); 405 KKASSERT(dsched_is_clear_buf_priv(bp)); 406 407 return (bp); 408 } 409 410 /* 411 * Allocate a physical buffer, if one is available. 412 * 413 * Note that there is no NULL hack here - all subsystems using this 414 * call understand how to use pfreecnt. 415 * 416 * No requirements. 417 */ 418 struct buf * 419 trypbuf(int *pfreecnt) 420 { 421 struct buf *bp; 422 423 spin_lock(&bswspin); 424 425 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist_raw)) == NULL) { 426 spin_unlock(&bswspin); 427 return NULL; 428 } 429 TAILQ_REMOVE(&bswlist_raw, bp, b_freelist); 430 --pbuf_raw_count; 431 --*pfreecnt; 432 433 spin_unlock(&bswspin); 434 435 initpbuf(bp); 436 437 return bp; 438 } 439 440 struct buf * 441 trypbuf_kva(int *pfreecnt) 442 { 443 struct buf *bp; 444 445 spin_lock(&bswspin); 446 447 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist_kva)) == NULL) { 448 spin_unlock(&bswspin); 449 return NULL; 450 } 451 TAILQ_REMOVE(&bswlist_kva, bp, b_freelist); 452 --pbuf_kva_count; 453 --*pfreecnt; 454 455 spin_unlock(&bswspin); 456 457 initpbuf(bp); 458 459 return bp; 460 } 461 462 /* 463 * Release a physical buffer 464 * 465 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed 466 * relatively soon when the rest of the subsystems get smart about it. XXX 467 * 468 * No requirements. 469 */ 470 void 471 relpbuf(struct buf *bp, int *pfreecnt) 472 { 473 int wake_bsw_kva = 0; 474 int wake_bsw_raw = 0; 475 int wake_freecnt = 0; 476 477 KKASSERT(bp->b_flags & B_PAGING); 478 dsched_exit_buf(bp); 479 480 BUF_UNLOCK(bp); 481 482 spin_lock(&bswspin); 483 if (bp->b_kvabase) { 484 TAILQ_INSERT_HEAD(&bswlist_kva, bp, b_freelist); 485 ++pbuf_kva_count; 486 } else { 487 TAILQ_INSERT_HEAD(&bswlist_raw, bp, b_freelist); 488 ++pbuf_raw_count; 489 } 490 if (bswneeded_kva) { 491 bswneeded_kva = 0; 492 wake_bsw_kva = 1; 493 } 494 if (bswneeded_raw) { 495 bswneeded_raw = 0; 496 wake_bsw_raw = 1; 497 } 498 if (pfreecnt) { 499 if (++*pfreecnt == 1) 500 wake_freecnt = 1; 501 } 502 spin_unlock(&bswspin); 503 504 if (wake_bsw_kva) 505 wakeup(&bswneeded_kva); 506 if (wake_bsw_raw) 507 wakeup(&bswneeded_raw); 508 if (wake_freecnt) 509 wakeup(pfreecnt); 510 } 511