1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $ 65 * $DragonFly: src/sys/vm/vm_pager.c,v 1.24 2007/11/06 03:50:01 dillon Exp $ 66 */ 67 68 /* 69 * Paging space routine stubs. Emulates a matchmaker-like interface 70 * for builtin pagers. 71 */ 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/kernel.h> 76 #include <sys/vnode.h> 77 #include <sys/buf.h> 78 #include <sys/ucred.h> 79 #include <sys/malloc.h> 80 #include <sys/proc.h> 81 #include <sys/thread2.h> 82 83 #include <vm/vm.h> 84 #include <vm/vm_param.h> 85 #include <vm/vm_object.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_pager.h> 88 #include <vm/vm_extern.h> 89 90 #include <sys/buf2.h> 91 92 MALLOC_DEFINE(M_VMPGDATA, "VM pgdata", "XXX: VM pager private data"); 93 94 extern struct pagerops defaultpagerops; 95 extern struct pagerops swappagerops; 96 extern struct pagerops vnodepagerops; 97 extern struct pagerops devicepagerops; 98 extern struct pagerops physpagerops; 99 100 int cluster_pbuf_freecnt = -1; /* unlimited to begin with */ 101 102 static int dead_pager_getpages (vm_object_t, vm_page_t *, int, int); 103 static vm_object_t dead_pager_alloc (void *, off_t, vm_prot_t, off_t); 104 static void dead_pager_putpages (vm_object_t, vm_page_t *, int, int, int *); 105 static boolean_t dead_pager_haspage (vm_object_t, vm_pindex_t, int *, int *); 106 static void dead_pager_dealloc (vm_object_t); 107 108 static int 109 dead_pager_getpages(vm_object_t obj, vm_page_t *ma, int count, int req) 110 { 111 return VM_PAGER_FAIL; 112 } 113 114 static vm_object_t 115 dead_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t off) 116 { 117 return NULL; 118 } 119 120 static void 121 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count, int flags, 122 int *rtvals) 123 { 124 int i; 125 126 for (i = 0; i < count; i++) { 127 rtvals[i] = VM_PAGER_AGAIN; 128 } 129 } 130 131 static int 132 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *prev, int *next) 133 { 134 if (prev) 135 *prev = 0; 136 if (next) 137 *next = 0; 138 return FALSE; 139 } 140 141 static void 142 dead_pager_dealloc(vm_object_t object) 143 { 144 return; 145 } 146 147 static struct pagerops deadpagerops = { 148 NULL, 149 dead_pager_alloc, 150 dead_pager_dealloc, 151 dead_pager_getpages, 152 dead_pager_putpages, 153 dead_pager_haspage, 154 NULL 155 }; 156 157 struct pagerops *pagertab[] = { 158 &defaultpagerops, /* OBJT_DEFAULT */ 159 &swappagerops, /* OBJT_SWAP */ 160 &vnodepagerops, /* OBJT_VNODE */ 161 &devicepagerops, /* OBJT_DEVICE */ 162 &physpagerops, /* OBJT_PHYS */ 163 &deadpagerops /* OBJT_DEAD */ 164 }; 165 166 int npagers = sizeof(pagertab) / sizeof(pagertab[0]); 167 168 /* 169 * Kernel address space for mapping pages. 170 * Used by pagers where KVAs are needed for IO. 171 * 172 * XXX needs to be large enough to support the number of pending async 173 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size 174 * (MAXPHYS == 64k) if you want to get the most efficiency. 175 */ 176 #define PAGER_MAP_SIZE (8 * 1024 * 1024) 177 178 int pager_map_size = PAGER_MAP_SIZE; 179 struct vm_map pager_map; 180 181 static int bswneeded; 182 static vm_offset_t swapbkva; /* swap buffers kva */ 183 static TAILQ_HEAD(swqueue, buf) bswlist; 184 static struct spinlock bswspin = SPINLOCK_INITIALIZER(&bswspin); 185 186 void 187 vm_pager_init(void) 188 { 189 struct pagerops **pgops; 190 191 /* 192 * Initialize the swap buffer list. 193 */ 194 TAILQ_INIT(&bswlist); 195 196 /* 197 * Initialize known pagers 198 */ 199 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) 200 if (pgops && ((*pgops)->pgo_init != NULL)) 201 (*(*pgops)->pgo_init) (); 202 } 203 204 void 205 vm_pager_bufferinit(void) 206 { 207 struct buf *bp; 208 int i; 209 210 /* 211 * Reserve KVM space for pbuf data. 212 */ 213 swapbkva = kmem_alloc_pageable(&pager_map, nswbuf * MAXPHYS); 214 if (!swapbkva) 215 panic("Not enough pager_map VM space for physical buffers"); 216 217 /* 218 * Initial pbuf setup. 219 */ 220 bp = swbuf; 221 for (i = 0; i < nswbuf; ++i, ++bp) { 222 bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva; 223 bp->b_kvasize = MAXPHYS; 224 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist); 225 BUF_LOCKINIT(bp); 226 buf_dep_init(bp); 227 } 228 229 /* 230 * Allow the clustering code to use half of our pbufs. 231 */ 232 cluster_pbuf_freecnt = nswbuf / 2; 233 } 234 235 /* 236 * Allocate an instance of a pager of the given type. 237 * Size, protection and offset parameters are passed in for pagers that 238 * need to perform page-level validation (e.g. the device pager). 239 */ 240 vm_object_t 241 vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size, 242 vm_prot_t prot, off_t off) 243 { 244 struct pagerops *ops; 245 246 ops = pagertab[type]; 247 if (ops) 248 return ((*ops->pgo_alloc) (handle, size, prot, off)); 249 return (NULL); 250 } 251 252 void 253 vm_pager_deallocate(vm_object_t object) 254 { 255 (*pagertab[object->type]->pgo_dealloc) (object); 256 } 257 258 /* 259 * vm_pager_strategy: 260 * 261 * called with no specific spl 262 * Execute strategy routine directly to pager. 263 */ 264 265 void 266 vm_pager_strategy(vm_object_t object, struct bio *bio) 267 { 268 struct buf *bp; 269 270 if (pagertab[object->type]->pgo_strategy) { 271 (*pagertab[object->type]->pgo_strategy)(object, bio); 272 } else { 273 bp = bio->bio_buf; 274 bp->b_flags |= B_ERROR; 275 bp->b_error = ENXIO; 276 biodone(bio); 277 } 278 } 279 280 /* 281 * vm_pager_get_pages() - inline, see vm/vm_pager.h 282 * vm_pager_put_pages() - inline, see vm/vm_pager.h 283 * vm_pager_has_page() - inline, see vm/vm_pager.h 284 * vm_pager_page_inserted() - inline, see vm/vm_pager.h 285 * vm_pager_page_removed() - inline, see vm/vm_pager.h 286 */ 287 288 #if 0 289 /* 290 * vm_pager_sync: 291 * 292 * Called by pageout daemon before going back to sleep. 293 * Gives pagers a chance to clean up any completed async pageing 294 * operations. 295 */ 296 void 297 vm_pager_sync(void) 298 { 299 struct pagerops **pgops; 300 301 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) 302 if (pgops && ((*pgops)->pgo_sync != NULL)) 303 (*(*pgops)->pgo_sync) (); 304 } 305 306 #endif 307 308 vm_object_t 309 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle) 310 { 311 vm_object_t object; 312 313 for (object = TAILQ_FIRST(pg_list); object != NULL; object = TAILQ_NEXT(object,pager_object_list)) 314 if (object->handle == handle) 315 return (object); 316 return (NULL); 317 } 318 319 /* 320 * Initialize a physical buffer. 321 */ 322 static void 323 initpbuf(struct buf *bp) 324 { 325 bp->b_qindex = 0; /* BQUEUE_NONE */ 326 bp->b_data = bp->b_kvabase; 327 bp->b_flags = B_PAGING; 328 bp->b_cmd = BUF_CMD_DONE; 329 bp->b_error = 0; 330 bp->b_bcount = 0; 331 bp->b_bufsize = MAXPHYS; 332 initbufbio(bp); 333 xio_init(&bp->b_xio); 334 BUF_LOCK(bp, LK_EXCLUSIVE); 335 } 336 337 /* 338 * Allocate a physical buffer 339 * 340 * There are a limited number (nswbuf) of physical buffers. We need 341 * to make sure that no single subsystem is able to hog all of them, 342 * so each subsystem implements a counter which is typically initialized 343 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and 344 * increments it on release, and blocks if the counter hits zero. A 345 * subsystem may initialize the counter to -1 to disable the feature, 346 * but it must still be sure to match up all uses of getpbuf() with 347 * relpbuf() using the same variable. 348 * 349 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed 350 * relatively soon when the rest of the subsystems get smart about it. XXX 351 * 352 * MPSAFE 353 */ 354 struct buf * 355 getpbuf(int *pfreecnt) 356 { 357 struct buf *bp; 358 359 spin_lock_wr(&bswspin); 360 361 for (;;) { 362 if (pfreecnt) { 363 while (*pfreecnt == 0) 364 ssleep(pfreecnt, &bswspin, 0, "wswbuf0", 0); 365 } 366 367 /* get a bp from the swap buffer header pool */ 368 if ((bp = TAILQ_FIRST(&bswlist)) != NULL) 369 break; 370 bswneeded = 1; 371 ssleep(&bswneeded, &bswspin, 0, "wswbuf1", 0); 372 /* loop in case someone else grabbed one */ 373 } 374 TAILQ_REMOVE(&bswlist, bp, b_freelist); 375 if (pfreecnt) 376 --*pfreecnt; 377 378 spin_unlock_wr(&bswspin); 379 380 initpbuf(bp); 381 return bp; 382 } 383 384 /* 385 * Allocate a physical buffer, if one is available. 386 * 387 * Note that there is no NULL hack here - all subsystems using this 388 * call understand how to use pfreecnt. 389 * 390 * MPSAFE 391 */ 392 struct buf * 393 trypbuf(int *pfreecnt) 394 { 395 struct buf *bp; 396 397 spin_lock_wr(&bswspin); 398 399 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) { 400 spin_unlock_wr(&bswspin); 401 return NULL; 402 } 403 TAILQ_REMOVE(&bswlist, bp, b_freelist); 404 --*pfreecnt; 405 406 spin_unlock_wr(&bswspin); 407 408 initpbuf(bp); 409 410 return bp; 411 } 412 413 /* 414 * Release a physical buffer 415 * 416 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed 417 * relatively soon when the rest of the subsystems get smart about it. XXX 418 * 419 * MPSAFE 420 */ 421 void 422 relpbuf(struct buf *bp, int *pfreecnt) 423 { 424 int wake_bsw = 0; 425 int wake_freecnt = 0; 426 427 KKASSERT(bp->b_flags & B_PAGING); 428 429 spin_lock_wr(&bswspin); 430 431 BUF_UNLOCK(bp); 432 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist); 433 if (bswneeded) { 434 bswneeded = 0; 435 wake_bsw = 1; 436 } 437 if (pfreecnt) { 438 if (++*pfreecnt == 1) 439 wake_freecnt = 1; 440 } 441 442 spin_unlock_wr(&bswspin); 443 444 if (wake_bsw) 445 wakeup(&bswneeded); 446 if (wake_freecnt) 447 wakeup(pfreecnt); 448 } 449 450