1 /* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $FreeBSD: src/sys/kern/vfs_bio.c,v 1.242.2.20 2003/05/28 18:38:10 alc Exp $ 15 * $DragonFly: src/sys/kern/vfs_bio.c,v 1.49 2005/08/25 20:11:18 hmp Exp $ 16 */ 17 18 /* 19 * this file contains a new buffer I/O scheme implementing a coherent 20 * VM object and buffer cache scheme. Pains have been taken to make 21 * sure that the performance degradation associated with schemes such 22 * as this is not realized. 23 * 24 * Author: John S. Dyson 25 * Significant help during the development and debugging phases 26 * had been provided by David Greenman, also of the FreeBSD core team. 27 * 28 * see man buf(9) for more info. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/buf.h> 34 #include <sys/conf.h> 35 #include <sys/eventhandler.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mount.h> 39 #include <sys/kernel.h> 40 #include <sys/kthread.h> 41 #include <sys/proc.h> 42 #include <sys/reboot.h> 43 #include <sys/resourcevar.h> 44 #include <sys/sysctl.h> 45 #include <sys/vmmeter.h> 46 #include <sys/vnode.h> 47 #include <sys/proc.h> 48 #include <vm/vm.h> 49 #include <vm/vm_param.h> 50 #include <vm/vm_kern.h> 51 #include <vm/vm_pageout.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_object.h> 54 #include <vm/vm_extern.h> 55 #include <vm/vm_map.h> 56 57 #include <sys/buf2.h> 58 #include <sys/thread2.h> 59 #include <vm/vm_page2.h> 60 61 /* 62 * Buffer queues. 63 */ 64 #define BUFFER_QUEUES 6 65 enum bufq_type { 66 BQUEUE_NONE, /* not on any queue */ 67 BQUEUE_LOCKED, /* locked buffers */ 68 BQUEUE_CLEAN, /* non-B_DELWRI buffers */ 69 BQUEUE_DIRTY, /* B_DELWRI buffers */ 70 BQUEUE_EMPTYKVA, /* empty buffer headers with KVA assignment */ 71 BQUEUE_EMPTY /* empty buffer headers */ 72 }; 73 TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES]; 74 75 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 76 77 struct bio_ops bioops; /* I/O operation notification */ 78 79 struct buf *buf; /* buffer header pool */ 80 81 static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 82 vm_offset_t to); 83 static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 84 vm_offset_t to); 85 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 86 int pageno, vm_page_t m); 87 static void vfs_clean_pages(struct buf * bp); 88 static void vfs_setdirty(struct buf *bp); 89 static void vfs_vmio_release(struct buf *bp); 90 #if 0 91 static void vfs_backgroundwritedone(struct buf *bp); 92 #endif 93 static int flushbufqueues(void); 94 95 static int bd_request; 96 97 static void buf_daemon (void); 98 /* 99 * bogus page -- for I/O to/from partially complete buffers 100 * this is a temporary solution to the problem, but it is not 101 * really that bad. it would be better to split the buffer 102 * for input in the case of buffers partially already in memory, 103 * but the code is intricate enough already. 104 */ 105 vm_page_t bogus_page; 106 int vmiodirenable = TRUE; 107 int runningbufspace; 108 struct lwkt_token buftimetoken; /* Interlock on setting prio and timo */ 109 110 static int bufspace, maxbufspace, 111 bufmallocspace, maxbufmallocspace, lobufspace, hibufspace; 112 static int bufreusecnt, bufdefragcnt, buffreekvacnt; 113 static int needsbuffer; 114 static int lorunningspace, hirunningspace, runningbufreq; 115 static int numdirtybuffers, lodirtybuffers, hidirtybuffers; 116 static int numfreebuffers, lofreebuffers, hifreebuffers; 117 static int getnewbufcalls; 118 static int getnewbufrestarts; 119 120 /* 121 * Sysctls for operational control of the buffer cache. 122 */ 123 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0, 124 "Number of dirty buffers to flush before bufdaemon becomes inactive"); 125 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0, 126 "High watermark used to trigger explicit flushing of dirty buffers"); 127 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0, 128 "Low watermark for special reserve in low-memory situations"); 129 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0, 130 "High watermark for special reserve in low-memory situations"); 131 SYSCTL_INT(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0, 132 "Minimum amount of buffer space required for active I/O"); 133 SYSCTL_INT(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0, 134 "Maximum amount of buffer space to usable for active I/O"); 135 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0, 136 "Use the VM system for performing directory writes"); 137 /* 138 * Sysctls determining current state of the buffer cache. 139 */ 140 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0, 141 "Pending number of dirty buffers"); 142 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0, 143 "Number of free buffers on the buffer cache free list"); 144 SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0, 145 "I/O bytes currently in progress due to asynchronous writes"); 146 SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0, 147 "Hard limit on maximum amount of memory usable for buffer space"); 148 SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0, 149 "Soft limit on maximum amount of memory usable for buffer space"); 150 SYSCTL_INT(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0, 151 "Minimum amount of memory to reserve for system buffer space"); 152 SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0, 153 "Amount of memory available for buffers"); 154 SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RD, &maxbufmallocspace, 155 0, "Maximum amount of memory reserved for buffers using malloc"); 156 SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0, 157 "Amount of memory left for buffers using malloc-scheme"); 158 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD, &getnewbufcalls, 0, 159 "New buffer header acquisition requests"); 160 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RD, &getnewbufrestarts, 161 0, "New buffer header acquisition restarts"); 162 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RD, &bufdefragcnt, 0, 163 "Buffer acquisition restarts due to fragmented buffer map"); 164 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RD, &buffreekvacnt, 0, 165 "Amount of time KVA space was deallocated in an arbitrary buffer"); 166 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RD, &bufreusecnt, 0, 167 "Amount of time buffer re-use operations were successful"); 168 SYSCTL_INT(_debug_sizeof, OID_AUTO, buf, CTLFLAG_RD, 0, sizeof(struct buf), 169 "sizeof(struct buf)"); 170 171 #if 0 172 /* 173 * Disable background writes for now. There appear to be races in the 174 * flags tests and locking operations as well as races in the completion 175 * code modifying the original bp (origbp) without holding a lock, assuming 176 * critical section protection when there might not be critical section 177 * protection. 178 * 179 * XXX disable also because the RB tree can't handle multiple blocks with 180 * the same lblkno. 181 */ 182 static int dobkgrdwrite = 0; 183 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, 184 "Do background writes (honoring the BV_BKGRDWRITE flag)?"); 185 #endif 186 187 static int bufhashmask; 188 static int bufhashshift; 189 static LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; 190 char *buf_wmesg = BUF_WMESG; 191 192 extern int vm_swap_size; 193 194 #define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 195 #define VFS_BIO_NEED_DIRTYFLUSH 0x02 /* waiting for dirty buffer flush */ 196 #define VFS_BIO_NEED_FREE 0x04 /* wait for free bufs, hi hysteresis */ 197 #define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 198 199 /* 200 * Buffer hash table code. Note that the logical block scans linearly, which 201 * gives us some L1 cache locality. 202 */ 203 204 static __inline 205 struct bufhashhdr * 206 bufhash(struct vnode *vnp, daddr_t bn) 207 { 208 u_int64_t hashkey64; 209 int hashkey; 210 211 /* 212 * A variation on the Fibonacci hash that Knuth credits to 213 * R. W. Floyd, see Knuth's _Art of Computer Programming, 214 * Volume 3 / Sorting and Searching_ 215 * 216 * We reduce the argument to 32 bits before doing the hash to 217 * avoid the need for a slow 64x64 multiply on 32 bit platforms. 218 * 219 * sizeof(struct vnode) is 168 on i386, so toss some of the lower 220 * bits of the vnode address to reduce the key range, which 221 * improves the distribution of keys across buckets. 222 * 223 * The file system cylinder group blocks are very heavily 224 * used. They are located at invervals of fbg, which is 225 * on the order of 89 to 94 * 2^10, depending on other 226 * filesystem parameters, for a 16k block size. Smaller block 227 * sizes will reduce fpg approximately proportionally. This 228 * will cause the cylinder group index to be hashed using the 229 * lower bits of the hash multiplier, which will not distribute 230 * the keys as uniformly in a classic Fibonacci hash where a 231 * relatively small number of the upper bits of the result 232 * are used. Using 2^16 as a close-enough approximation to 233 * fpg, split the hash multiplier in half, with the upper 16 234 * bits being the inverse of the golden ratio, and the lower 235 * 16 bits being a fraction between 1/3 and 3/7 (closer to 236 * 3/7 in this case), that gives good experimental results. 237 */ 238 hashkey64 = ((u_int64_t)(uintptr_t)vnp >> 3) + (u_int64_t)bn; 239 hashkey = (((u_int32_t)(hashkey64 + (hashkey64 >> 32)) * 0x9E376DB1u) >> 240 bufhashshift) & bufhashmask; 241 return(&bufhashtbl[hashkey]); 242 } 243 244 /* 245 * numdirtywakeup: 246 * 247 * If someone is blocked due to there being too many dirty buffers, 248 * and numdirtybuffers is now reasonable, wake them up. 249 */ 250 251 static __inline void 252 numdirtywakeup(int level) 253 { 254 if (numdirtybuffers <= level) { 255 if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) { 256 needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH; 257 wakeup(&needsbuffer); 258 } 259 } 260 } 261 262 /* 263 * bufspacewakeup: 264 * 265 * Called when buffer space is potentially available for recovery. 266 * getnewbuf() will block on this flag when it is unable to free 267 * sufficient buffer space. Buffer space becomes recoverable when 268 * bp's get placed back in the queues. 269 */ 270 271 static __inline void 272 bufspacewakeup(void) 273 { 274 /* 275 * If someone is waiting for BUF space, wake them up. Even 276 * though we haven't freed the kva space yet, the waiting 277 * process will be able to now. 278 */ 279 if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { 280 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; 281 wakeup(&needsbuffer); 282 } 283 } 284 285 /* 286 * runningbufwakeup: 287 * 288 * Accounting for I/O in progress. 289 * 290 */ 291 static __inline void 292 runningbufwakeup(struct buf *bp) 293 { 294 if (bp->b_runningbufspace) { 295 runningbufspace -= bp->b_runningbufspace; 296 bp->b_runningbufspace = 0; 297 if (runningbufreq && runningbufspace <= lorunningspace) { 298 runningbufreq = 0; 299 wakeup(&runningbufreq); 300 } 301 } 302 } 303 304 /* 305 * bufcountwakeup: 306 * 307 * Called when a buffer has been added to one of the free queues to 308 * account for the buffer and to wakeup anyone waiting for free buffers. 309 * This typically occurs when large amounts of metadata are being handled 310 * by the buffer cache ( else buffer space runs out first, usually ). 311 */ 312 313 static __inline void 314 bufcountwakeup(void) 315 { 316 ++numfreebuffers; 317 if (needsbuffer) { 318 needsbuffer &= ~VFS_BIO_NEED_ANY; 319 if (numfreebuffers >= hifreebuffers) 320 needsbuffer &= ~VFS_BIO_NEED_FREE; 321 wakeup(&needsbuffer); 322 } 323 } 324 325 /* 326 * waitrunningbufspace() 327 * 328 * runningbufspace is a measure of the amount of I/O currently 329 * running. This routine is used in async-write situations to 330 * prevent creating huge backups of pending writes to a device. 331 * Only asynchronous writes are governed by this function. 332 * 333 * Reads will adjust runningbufspace, but will not block based on it. 334 * The read load has a side effect of reducing the allowed write load. 335 * 336 * This does NOT turn an async write into a sync write. It waits 337 * for earlier writes to complete and generally returns before the 338 * caller's write has reached the device. 339 */ 340 static __inline void 341 waitrunningbufspace(void) 342 { 343 if (runningbufspace > hirunningspace) { 344 crit_enter(); 345 while (runningbufspace > hirunningspace) { 346 ++runningbufreq; 347 tsleep(&runningbufreq, 0, "wdrain", 0); 348 } 349 crit_exit(); 350 } 351 } 352 353 /* 354 * vfs_buf_test_cache: 355 * 356 * Called when a buffer is extended. This function clears the B_CACHE 357 * bit if the newly extended portion of the buffer does not contain 358 * valid data. 359 */ 360 static __inline__ 361 void 362 vfs_buf_test_cache(struct buf *bp, 363 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 364 vm_page_t m) 365 { 366 if (bp->b_flags & B_CACHE) { 367 int base = (foff + off) & PAGE_MASK; 368 if (vm_page_is_valid(m, base, size) == 0) 369 bp->b_flags &= ~B_CACHE; 370 } 371 } 372 373 /* 374 * bd_wakeup: 375 * 376 * Wake up the buffer daemon if the number of outstanding dirty buffers 377 * is above specified threshold 'dirtybuflevel'. 378 * 379 * The buffer daemon is explicitly woken up when (a) the pending number 380 * of dirty buffers exceeds the recovery and stall mid-point value, 381 * (b) during bwillwrite() or (c) buf freelist was exhausted. 382 */ 383 static __inline__ 384 void 385 bd_wakeup(int dirtybuflevel) 386 { 387 if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) { 388 bd_request = 1; 389 wakeup(&bd_request); 390 } 391 } 392 393 /* 394 * bd_speedup: 395 * 396 * Speed up the buffer cache flushing process. 397 */ 398 399 static __inline__ 400 void 401 bd_speedup(void) 402 { 403 bd_wakeup(1); 404 } 405 406 /* 407 * bufhashinit: 408 * 409 * Initialize buffer headers and related structures. 410 */ 411 412 caddr_t 413 bufhashinit(caddr_t vaddr) 414 { 415 /* first, make a null hash table */ 416 bufhashshift = 29; 417 for (bufhashmask = 8; bufhashmask < nbuf / 4; bufhashmask <<= 1) 418 bufhashshift--; 419 bufhashtbl = (void *)vaddr; 420 vaddr = vaddr + sizeof(*bufhashtbl) * bufhashmask; 421 --bufhashmask; 422 return(vaddr); 423 } 424 425 /* 426 * bufinit: 427 * 428 * Load time initialisation of the buffer cache, called from machine 429 * dependant initialization code. 430 */ 431 void 432 bufinit(void) 433 { 434 struct buf *bp; 435 vm_offset_t bogus_offset; 436 int i; 437 438 LIST_INIT(&invalhash); 439 lwkt_token_init(&buftimetoken); 440 441 for (i = 0; i <= bufhashmask; i++) 442 LIST_INIT(&bufhashtbl[i]); 443 444 /* next, make a null set of free lists */ 445 for (i = 0; i < BUFFER_QUEUES; i++) 446 TAILQ_INIT(&bufqueues[i]); 447 448 /* finally, initialize each buffer header and stick on empty q */ 449 for (i = 0; i < nbuf; i++) { 450 bp = &buf[i]; 451 bzero(bp, sizeof *bp); 452 bp->b_bio.bio_buf = bp; /* back pointer (temporary) */ 453 bp->b_flags = B_INVAL; /* we're just an empty header */ 454 bp->b_dev = NODEV; 455 bp->b_qindex = BQUEUE_EMPTY; 456 bp->b_xflags = 0; 457 bp->b_iodone = NULL; 458 xio_init(&bp->b_xio); 459 LIST_INIT(&bp->b_dep); 460 BUF_LOCKINIT(bp); 461 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_EMPTY], bp, b_freelist); 462 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 463 } 464 465 /* 466 * maxbufspace is the absolute maximum amount of buffer space we are 467 * allowed to reserve in KVM and in real terms. The absolute maximum 468 * is nominally used by buf_daemon. hibufspace is the nominal maximum 469 * used by most other processes. The differential is required to 470 * ensure that buf_daemon is able to run when other processes might 471 * be blocked waiting for buffer space. 472 * 473 * maxbufspace is based on BKVASIZE. Allocating buffers larger then 474 * this may result in KVM fragmentation which is not handled optimally 475 * by the system. 476 */ 477 maxbufspace = nbuf * BKVASIZE; 478 hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10); 479 lobufspace = hibufspace - MAXBSIZE; 480 481 lorunningspace = 512 * 1024; 482 hirunningspace = 1024 * 1024; 483 484 /* 485 * Limit the amount of malloc memory since it is wired permanently into 486 * the kernel space. Even though this is accounted for in the buffer 487 * allocation, we don't want the malloced region to grow uncontrolled. 488 * The malloc scheme improves memory utilization significantly on average 489 * (small) directories. 490 */ 491 maxbufmallocspace = hibufspace / 20; 492 493 /* 494 * Reduce the chance of a deadlock occuring by limiting the number 495 * of delayed-write dirty buffers we allow to stack up. 496 */ 497 hidirtybuffers = nbuf / 4 + 20; 498 numdirtybuffers = 0; 499 /* 500 * To support extreme low-memory systems, make sure hidirtybuffers cannot 501 * eat up all available buffer space. This occurs when our minimum cannot 502 * be met. We try to size hidirtybuffers to 3/4 our buffer space assuming 503 * BKVASIZE'd (8K) buffers. 504 */ 505 while (hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) { 506 hidirtybuffers >>= 1; 507 } 508 lodirtybuffers = hidirtybuffers / 2; 509 510 /* 511 * Try to keep the number of free buffers in the specified range, 512 * and give special processes (e.g. like buf_daemon) access to an 513 * emergency reserve. 514 */ 515 lofreebuffers = nbuf / 18 + 5; 516 hifreebuffers = 2 * lofreebuffers; 517 numfreebuffers = nbuf; 518 519 /* 520 * Maximum number of async ops initiated per buf_daemon loop. This is 521 * somewhat of a hack at the moment, we really need to limit ourselves 522 * based on the number of bytes of I/O in-transit that were initiated 523 * from buf_daemon. 524 */ 525 526 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 527 bogus_page = vm_page_alloc(kernel_object, 528 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 529 VM_ALLOC_NORMAL); 530 vmstats.v_wire_count++; 531 532 } 533 534 /* 535 * bfreekva: 536 * 537 * Free the KVA allocation for buffer 'bp'. 538 * 539 * Must be called from a critical section as this is the only locking for 540 * buffer_map. 541 * 542 * Since this call frees up buffer space, we call bufspacewakeup(). 543 */ 544 static void 545 bfreekva(struct buf * bp) 546 { 547 int count; 548 549 if (bp->b_kvasize) { 550 ++buffreekvacnt; 551 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 552 vm_map_lock(buffer_map); 553 bufspace -= bp->b_kvasize; 554 vm_map_delete(buffer_map, 555 (vm_offset_t) bp->b_kvabase, 556 (vm_offset_t) bp->b_kvabase + bp->b_kvasize, 557 &count 558 ); 559 vm_map_unlock(buffer_map); 560 vm_map_entry_release(count); 561 bp->b_kvasize = 0; 562 bufspacewakeup(); 563 } 564 } 565 566 /* 567 * bremfree: 568 * 569 * Remove the buffer from the appropriate free list. 570 */ 571 void 572 bremfree(struct buf * bp) 573 { 574 int old_qindex; 575 576 crit_enter(); 577 old_qindex = bp->b_qindex; 578 579 if (bp->b_qindex != BQUEUE_NONE) { 580 KASSERT(BUF_REFCNTNB(bp) == 1, 581 ("bremfree: bp %p not locked",bp)); 582 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 583 bp->b_qindex = BQUEUE_NONE; 584 } else { 585 if (BUF_REFCNTNB(bp) <= 1) 586 panic("bremfree: removing a buffer not on a queue"); 587 } 588 589 /* 590 * Fixup numfreebuffers count. If the buffer is invalid or not 591 * delayed-write, and it was on the EMPTY, LRU, or AGE queues, 592 * the buffer was free and we must decrement numfreebuffers. 593 */ 594 if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) { 595 switch(old_qindex) { 596 case BQUEUE_DIRTY: 597 case BQUEUE_CLEAN: 598 case BQUEUE_EMPTY: 599 case BQUEUE_EMPTYKVA: 600 --numfreebuffers; 601 break; 602 default: 603 break; 604 } 605 } 606 crit_exit(); 607 } 608 609 610 /* 611 * bread: 612 * 613 * Get a buffer with the specified data. Look in the cache first. We 614 * must clear B_ERROR and B_INVAL prior to initiating I/O. If B_CACHE 615 * is set, the buffer is valid and we do not have to do anything ( see 616 * getblk() ). 617 */ 618 int 619 bread(struct vnode * vp, daddr_t blkno, int size, struct buf ** bpp) 620 { 621 struct buf *bp; 622 623 bp = getblk(vp, blkno, size, 0, 0); 624 *bpp = bp; 625 626 /* if not found in cache, do some I/O */ 627 if ((bp->b_flags & B_CACHE) == 0) { 628 KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp)); 629 bp->b_flags |= B_READ; 630 bp->b_flags &= ~(B_ERROR | B_INVAL); 631 vfs_busy_pages(bp, 0); 632 VOP_STRATEGY(vp, bp); 633 return (biowait(bp)); 634 } 635 return (0); 636 } 637 638 /* 639 * breadn: 640 * 641 * Operates like bread, but also starts asynchronous I/O on 642 * read-ahead blocks. We must clear B_ERROR and B_INVAL prior 643 * to initiating I/O . If B_CACHE is set, the buffer is valid 644 * and we do not have to do anything. 645 */ 646 int 647 breadn(struct vnode * vp, daddr_t blkno, int size, daddr_t * rablkno, 648 int *rabsize, int cnt, struct buf ** bpp) 649 { 650 struct buf *bp, *rabp; 651 int i; 652 int rv = 0, readwait = 0; 653 654 *bpp = bp = getblk(vp, blkno, size, 0, 0); 655 656 /* if not found in cache, do some I/O */ 657 if ((bp->b_flags & B_CACHE) == 0) { 658 bp->b_flags |= B_READ; 659 bp->b_flags &= ~(B_ERROR | B_INVAL); 660 vfs_busy_pages(bp, 0); 661 VOP_STRATEGY(vp, bp); 662 ++readwait; 663 } 664 665 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 666 if (inmem(vp, *rablkno)) 667 continue; 668 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 669 670 if ((rabp->b_flags & B_CACHE) == 0) { 671 rabp->b_flags |= B_READ | B_ASYNC; 672 rabp->b_flags &= ~(B_ERROR | B_INVAL); 673 vfs_busy_pages(rabp, 0); 674 BUF_KERNPROC(rabp); 675 VOP_STRATEGY(vp, rabp); 676 } else { 677 brelse(rabp); 678 } 679 } 680 681 if (readwait) { 682 rv = biowait(bp); 683 } 684 return (rv); 685 } 686 687 /* 688 * bwrite: 689 * 690 * Write, release buffer on completion. (Done by iodone 691 * if async). Do not bother writing anything if the buffer 692 * is invalid. 693 * 694 * Note that we set B_CACHE here, indicating that buffer is 695 * fully valid and thus cacheable. This is true even of NFS 696 * now so we set it generally. This could be set either here 697 * or in biodone() since the I/O is synchronous. We put it 698 * here. 699 */ 700 int 701 bwrite(struct buf * bp) 702 { 703 int oldflags; 704 #if 0 705 struct buf *newbp; 706 #endif 707 708 if (bp->b_flags & B_INVAL) { 709 brelse(bp); 710 return (0); 711 } 712 713 oldflags = bp->b_flags; 714 715 if (BUF_REFCNTNB(bp) == 0) 716 panic("bwrite: buffer is not busy???"); 717 crit_enter(); 718 /* 719 * If a background write is already in progress, delay 720 * writing this block if it is asynchronous. Otherwise 721 * wait for the background write to complete. 722 */ 723 if (bp->b_xflags & BX_BKGRDINPROG) { 724 if (bp->b_flags & B_ASYNC) { 725 crit_exit(); 726 bdwrite(bp); 727 return (0); 728 } 729 bp->b_xflags |= BX_BKGRDWAIT; 730 tsleep(&bp->b_xflags, 0, "biord", 0); 731 if (bp->b_xflags & BX_BKGRDINPROG) 732 panic("bwrite: still writing"); 733 } 734 735 /* Mark the buffer clean */ 736 bundirty(bp); 737 738 #if 0 739 /* 740 * If this buffer is marked for background writing and we 741 * do not have to wait for it, make a copy and write the 742 * copy so as to leave this buffer ready for further use. 743 * 744 * This optimization eats a lot of memory. If we have a page 745 * or buffer shortfull we can't do it. 746 * 747 * XXX DISABLED! This had to be removed to support the RB_TREE 748 * work and, really, this isn't the best place to do this sort 749 * of thing anyway. We really need a device copy-on-write feature. 750 */ 751 if (dobkgrdwrite && 752 (bp->b_xflags & BX_BKGRDWRITE) && 753 (bp->b_flags & B_ASYNC) && 754 !vm_page_count_severe() && 755 !buf_dirty_count_severe()) { 756 if (bp->b_iodone) 757 panic("bwrite: need chained iodone"); 758 759 /* get a new block */ 760 newbp = geteblk(bp->b_bufsize); 761 762 /* set it to be identical to the old block */ 763 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 764 newbp->b_lblkno = bp->b_lblkno; 765 newbp->b_blkno = bp->b_blkno; 766 newbp->b_offset = bp->b_offset; 767 newbp->b_iodone = vfs_backgroundwritedone; 768 newbp->b_flags |= B_ASYNC; 769 newbp->b_flags &= ~B_INVAL; 770 bgetvp(bp->b_vp, newbp); 771 772 /* move over the dependencies */ 773 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_movedeps) 774 (*bioops.io_movedeps)(bp, newbp); 775 776 /* 777 * Initiate write on the copy, release the original to 778 * the B_LOCKED queue so that it cannot go away until 779 * the background write completes. If not locked it could go 780 * away and then be reconstituted while it was being written. 781 * If the reconstituted buffer were written, we could end up 782 * with two background copies being written at the same time. 783 */ 784 bp->b_xflags |= BX_BKGRDINPROG; 785 bp->b_flags |= B_LOCKED; 786 bqrelse(bp); 787 bp = newbp; 788 } 789 #endif 790 791 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR); 792 bp->b_flags |= B_CACHE; 793 794 bp->b_vp->v_numoutput++; 795 vfs_busy_pages(bp, 1); 796 797 /* 798 * Normal bwrites pipeline writes 799 */ 800 bp->b_runningbufspace = bp->b_bufsize; 801 runningbufspace += bp->b_runningbufspace; 802 803 crit_exit(); 804 if (oldflags & B_ASYNC) 805 BUF_KERNPROC(bp); 806 VOP_STRATEGY(bp->b_vp, bp); 807 808 if ((oldflags & B_ASYNC) == 0) { 809 int rtval = biowait(bp); 810 brelse(bp); 811 return (rtval); 812 } else if ((oldflags & B_NOWDRAIN) == 0) { 813 /* 814 * don't allow the async write to saturate the I/O 815 * system. Deadlocks can occur only if a device strategy 816 * routine (like in VN) turns around and issues another 817 * high-level write, in which case B_NOWDRAIN is expected 818 * to be set. Otherwise we will not deadlock here because 819 * we are blocking waiting for I/O that is already in-progress 820 * to complete. 821 */ 822 waitrunningbufspace(); 823 } 824 825 return (0); 826 } 827 828 #if 0 829 /* 830 * Complete a background write started from bwrite. 831 */ 832 static void 833 vfs_backgroundwritedone(struct buf *bp) 834 { 835 struct buf *origbp; 836 837 /* 838 * Find the original buffer that we are writing. 839 */ 840 if ((origbp = gbincore(bp->b_vp, bp->b_lblkno)) == NULL) 841 panic("backgroundwritedone: lost buffer"); 842 /* 843 * Process dependencies then return any unfinished ones. 844 */ 845 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 846 (*bioops.io_complete)(bp); 847 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_movedeps) 848 (*bioops.io_movedeps)(bp, origbp); 849 /* 850 * Clear the BX_BKGRDINPROG flag in the original buffer 851 * and awaken it if it is waiting for the write to complete. 852 * If BX_BKGRDINPROG is not set in the original buffer it must 853 * have been released and re-instantiated - which is not legal. 854 */ 855 KASSERT((origbp->b_xflags & BX_BKGRDINPROG), ("backgroundwritedone: lost buffer2")); 856 origbp->b_xflags &= ~BX_BKGRDINPROG; 857 if (origbp->b_xflags & BX_BKGRDWAIT) { 858 origbp->b_xflags &= ~BX_BKGRDWAIT; 859 wakeup(&origbp->b_xflags); 860 } 861 /* 862 * Clear the B_LOCKED flag and remove it from the locked 863 * queue if it currently resides there. 864 */ 865 origbp->b_flags &= ~B_LOCKED; 866 if (BUF_LOCK(origbp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 867 bremfree(origbp); 868 bqrelse(origbp); 869 } 870 /* 871 * This buffer is marked B_NOCACHE, so when it is released 872 * by biodone, it will be tossed. We mark it with B_READ 873 * to avoid biodone doing a second vwakeup. 874 */ 875 bp->b_flags |= B_NOCACHE | B_READ; 876 bp->b_flags &= ~(B_CACHE | B_DONE); 877 bp->b_iodone = NULL; 878 biodone(bp); 879 } 880 #endif 881 882 /* 883 * bdwrite: 884 * 885 * Delayed write. (Buffer is marked dirty). Do not bother writing 886 * anything if the buffer is marked invalid. 887 * 888 * Note that since the buffer must be completely valid, we can safely 889 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 890 * biodone() in order to prevent getblk from writing the buffer 891 * out synchronously. 892 */ 893 void 894 bdwrite(struct buf *bp) 895 { 896 if (BUF_REFCNTNB(bp) == 0) 897 panic("bdwrite: buffer is not busy"); 898 899 if (bp->b_flags & B_INVAL) { 900 brelse(bp); 901 return; 902 } 903 bdirty(bp); 904 905 /* 906 * Set B_CACHE, indicating that the buffer is fully valid. This is 907 * true even of NFS now. 908 */ 909 bp->b_flags |= B_CACHE; 910 911 /* 912 * This bmap keeps the system from needing to do the bmap later, 913 * perhaps when the system is attempting to do a sync. Since it 914 * is likely that the indirect block -- or whatever other datastructure 915 * that the filesystem needs is still in memory now, it is a good 916 * thing to do this. Note also, that if the pageout daemon is 917 * requesting a sync -- there might not be enough memory to do 918 * the bmap then... So, this is important to do. 919 */ 920 if (bp->b_lblkno == bp->b_blkno) { 921 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 922 } 923 924 /* 925 * Set the *dirty* buffer range based upon the VM system dirty pages. 926 */ 927 vfs_setdirty(bp); 928 929 /* 930 * We need to do this here to satisfy the vnode_pager and the 931 * pageout daemon, so that it thinks that the pages have been 932 * "cleaned". Note that since the pages are in a delayed write 933 * buffer -- the VFS layer "will" see that the pages get written 934 * out on the next sync, or perhaps the cluster will be completed. 935 */ 936 vfs_clean_pages(bp); 937 bqrelse(bp); 938 939 /* 940 * Wakeup the buffer flushing daemon if we have a lot of dirty 941 * buffers (midpoint between our recovery point and our stall 942 * point). 943 */ 944 bd_wakeup((lodirtybuffers + hidirtybuffers) / 2); 945 946 /* 947 * note: we cannot initiate I/O from a bdwrite even if we wanted to, 948 * due to the softdep code. 949 */ 950 } 951 952 /* 953 * bdirty: 954 * 955 * Turn buffer into delayed write request. We must clear B_READ and 956 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to 957 * itself to properly update it in the dirty/clean lists. We mark it 958 * B_DONE to ensure that any asynchronization of the buffer properly 959 * clears B_DONE ( else a panic will occur later ). 960 * 961 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which 962 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty() 963 * should only be called if the buffer is known-good. 964 * 965 * Since the buffer is not on a queue, we do not update the numfreebuffers 966 * count. 967 * 968 * Must be called from a critical section. 969 * The buffer must be on BQUEUE_NONE. 970 */ 971 void 972 bdirty(struct buf *bp) 973 { 974 KASSERT(bp->b_qindex == BQUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 975 bp->b_flags &= ~(B_READ|B_RELBUF); 976 977 if ((bp->b_flags & B_DELWRI) == 0) { 978 bp->b_flags |= B_DONE | B_DELWRI; 979 reassignbuf(bp, bp->b_vp); 980 ++numdirtybuffers; 981 bd_wakeup((lodirtybuffers + hidirtybuffers) / 2); 982 } 983 } 984 985 /* 986 * bundirty: 987 * 988 * Clear B_DELWRI for buffer. 989 * 990 * Since the buffer is not on a queue, we do not update the numfreebuffers 991 * count. 992 * 993 * Must be called from a critical section. 994 * 995 * The buffer is typically on BQUEUE_NONE but there is one case in 996 * brelse() that calls this function after placing the buffer on 997 * a different queue. 998 */ 999 1000 void 1001 bundirty(struct buf *bp) 1002 { 1003 if (bp->b_flags & B_DELWRI) { 1004 bp->b_flags &= ~B_DELWRI; 1005 reassignbuf(bp, bp->b_vp); 1006 --numdirtybuffers; 1007 numdirtywakeup(lodirtybuffers); 1008 } 1009 /* 1010 * Since it is now being written, we can clear its deferred write flag. 1011 */ 1012 bp->b_flags &= ~B_DEFERRED; 1013 } 1014 1015 /* 1016 * bawrite: 1017 * 1018 * Asynchronous write. Start output on a buffer, but do not wait for 1019 * it to complete. The buffer is released when the output completes. 1020 * 1021 * bwrite() ( or the VOP routine anyway ) is responsible for handling 1022 * B_INVAL buffers. Not us. 1023 */ 1024 void 1025 bawrite(struct buf * bp) 1026 { 1027 bp->b_flags |= B_ASYNC; 1028 (void) VOP_BWRITE(bp->b_vp, bp); 1029 } 1030 1031 /* 1032 * bowrite: 1033 * 1034 * Ordered write. Start output on a buffer, and flag it so that the 1035 * device will write it in the order it was queued. The buffer is 1036 * released when the output completes. bwrite() ( or the VOP routine 1037 * anyway ) is responsible for handling B_INVAL buffers. 1038 */ 1039 int 1040 bowrite(struct buf * bp) 1041 { 1042 bp->b_flags |= B_ORDERED | B_ASYNC; 1043 return (VOP_BWRITE(bp->b_vp, bp)); 1044 } 1045 1046 /* 1047 * bwillwrite: 1048 * 1049 * Called prior to the locking of any vnodes when we are expecting to 1050 * write. We do not want to starve the buffer cache with too many 1051 * dirty buffers so we block here. By blocking prior to the locking 1052 * of any vnodes we attempt to avoid the situation where a locked vnode 1053 * prevents the various system daemons from flushing related buffers. 1054 */ 1055 1056 void 1057 bwillwrite(void) 1058 { 1059 if (numdirtybuffers >= hidirtybuffers) { 1060 crit_enter(); 1061 while (numdirtybuffers >= hidirtybuffers) { 1062 bd_wakeup(1); 1063 needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH; 1064 tsleep(&needsbuffer, 0, "flswai", 0); 1065 } 1066 crit_exit(); 1067 } 1068 } 1069 1070 /* 1071 * buf_dirty_count_severe: 1072 * 1073 * Return true if we have too many dirty buffers. 1074 */ 1075 int 1076 buf_dirty_count_severe(void) 1077 { 1078 return(numdirtybuffers >= hidirtybuffers); 1079 } 1080 1081 /* 1082 * brelse: 1083 * 1084 * Release a busy buffer and, if requested, free its resources. The 1085 * buffer will be stashed in the appropriate bufqueue[] allowing it 1086 * to be accessed later as a cache entity or reused for other purposes. 1087 */ 1088 void 1089 brelse(struct buf * bp) 1090 { 1091 #ifdef INVARIANTS 1092 int saved_flags = bp->b_flags; 1093 #endif 1094 1095 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1096 1097 crit_enter(); 1098 1099 if (bp->b_flags & B_LOCKED) 1100 bp->b_flags &= ~B_ERROR; 1101 1102 if ((bp->b_flags & (B_READ | B_ERROR | B_INVAL)) == B_ERROR) { 1103 /* 1104 * Failed write, redirty. Must clear B_ERROR to prevent 1105 * pages from being scrapped. If B_INVAL is set then 1106 * this case is not run and the next case is run to 1107 * destroy the buffer. B_INVAL can occur if the buffer 1108 * is outside the range supported by the underlying device. 1109 */ 1110 bp->b_flags &= ~B_ERROR; 1111 bdirty(bp); 1112 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) || 1113 (bp->b_bufsize <= 0)) { 1114 /* 1115 * Either a failed I/O or we were asked to free or not 1116 * cache the buffer. 1117 */ 1118 bp->b_flags |= B_INVAL; 1119 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 1120 (*bioops.io_deallocate)(bp); 1121 if (bp->b_flags & B_DELWRI) { 1122 --numdirtybuffers; 1123 numdirtywakeup(lodirtybuffers); 1124 } 1125 bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF); 1126 } 1127 1128 /* 1129 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 1130 * is called with B_DELWRI set, the underlying pages may wind up 1131 * getting freed causing a previous write (bdwrite()) to get 'lost' 1132 * because pages associated with a B_DELWRI bp are marked clean. 1133 * 1134 * We still allow the B_INVAL case to call vfs_vmio_release(), even 1135 * if B_DELWRI is set. 1136 * 1137 * If B_DELWRI is not set we may have to set B_RELBUF if we are low 1138 * on pages to return pages to the VM page queues. 1139 */ 1140 if (bp->b_flags & B_DELWRI) 1141 bp->b_flags &= ~B_RELBUF; 1142 else if (vm_page_count_severe() && !(bp->b_xflags & BX_BKGRDINPROG)) 1143 bp->b_flags |= B_RELBUF; 1144 1145 /* 1146 * At this point destroying the buffer is governed by the B_INVAL 1147 * or B_RELBUF flags. 1148 */ 1149 1150 /* 1151 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 1152 * constituted, not even NFS buffers now. Two flags effect this. If 1153 * B_INVAL, the struct buf is invalidated but the VM object is kept 1154 * around ( i.e. so it is trivial to reconstitute the buffer later ). 1155 * 1156 * If B_ERROR or B_NOCACHE is set, pages in the VM object will be 1157 * invalidated. B_ERROR cannot be set for a failed write unless the 1158 * buffer is also B_INVAL because it hits the re-dirtying code above. 1159 * 1160 * Normally we can do this whether a buffer is B_DELWRI or not. If 1161 * the buffer is an NFS buffer, it is tracking piecemeal writes or 1162 * the commit state and we cannot afford to lose the buffer. If the 1163 * buffer has a background write in progress, we need to keep it 1164 * around to prevent it from being reconstituted and starting a second 1165 * background write. 1166 */ 1167 if ((bp->b_flags & B_VMIO) 1168 && !(bp->b_vp->v_tag == VT_NFS && 1169 !vn_isdisk(bp->b_vp, NULL) && 1170 (bp->b_flags & B_DELWRI)) 1171 ) { 1172 /* 1173 * Rundown for VMIO buffers which are not dirty NFS buffers. 1174 */ 1175 int i, j, resid; 1176 vm_page_t m; 1177 off_t foff; 1178 vm_pindex_t poff; 1179 vm_object_t obj; 1180 struct vnode *vp; 1181 1182 vp = bp->b_vp; 1183 1184 /* 1185 * Get the base offset and length of the buffer. Note that 1186 * in the VMIO case if the buffer block size is not 1187 * page-aligned then b_data pointer may not be page-aligned. 1188 * But our b_xio.xio_pages array *IS* page aligned. 1189 * 1190 * block sizes less then DEV_BSIZE (usually 512) are not 1191 * supported due to the page granularity bits (m->valid, 1192 * m->dirty, etc...). 1193 * 1194 * See man buf(9) for more information 1195 */ 1196 1197 resid = bp->b_bufsize; 1198 foff = bp->b_offset; 1199 1200 for (i = 0; i < bp->b_xio.xio_npages; i++) { 1201 m = bp->b_xio.xio_pages[i]; 1202 vm_page_flag_clear(m, PG_ZERO); 1203 /* 1204 * If we hit a bogus page, fixup *all* of them 1205 * now. Note that we left these pages wired 1206 * when we removed them so they had better exist, 1207 * and they cannot be ripped out from under us so 1208 * no critical section protection is necessary. 1209 */ 1210 if (m == bogus_page) { 1211 VOP_GETVOBJECT(vp, &obj); 1212 poff = OFF_TO_IDX(bp->b_offset); 1213 1214 for (j = i; j < bp->b_xio.xio_npages; j++) { 1215 vm_page_t mtmp; 1216 1217 mtmp = bp->b_xio.xio_pages[j]; 1218 if (mtmp == bogus_page) { 1219 mtmp = vm_page_lookup(obj, poff + j); 1220 if (!mtmp) { 1221 panic("brelse: page missing"); 1222 } 1223 bp->b_xio.xio_pages[j] = mtmp; 1224 } 1225 } 1226 1227 if ((bp->b_flags & B_INVAL) == 0) { 1228 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 1229 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 1230 } 1231 m = bp->b_xio.xio_pages[i]; 1232 } 1233 1234 /* 1235 * Invalidate the backing store if B_NOCACHE is set 1236 * (e.g. used with vinvalbuf()). If this is NFS 1237 * we impose a requirement that the block size be 1238 * a multiple of PAGE_SIZE and create a temporary 1239 * hack to basically invalidate the whole page. The 1240 * problem is that NFS uses really odd buffer sizes 1241 * especially when tracking piecemeal writes and 1242 * it also vinvalbuf()'s a lot, which would result 1243 * in only partial page validation and invalidation 1244 * here. If the file page is mmap()'d, however, 1245 * all the valid bits get set so after we invalidate 1246 * here we would end up with weird m->valid values 1247 * like 0xfc. nfs_getpages() can't handle this so 1248 * we clear all the valid bits for the NFS case 1249 * instead of just some of them. 1250 * 1251 * The real bug is the VM system having to set m->valid 1252 * to VM_PAGE_BITS_ALL for faulted-in pages, which 1253 * itself is an artifact of the whole 512-byte 1254 * granular mess that exists to support odd block 1255 * sizes and UFS meta-data block sizes (e.g. 6144). 1256 * A complete rewrite is required. 1257 */ 1258 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 1259 int poffset = foff & PAGE_MASK; 1260 int presid; 1261 1262 presid = PAGE_SIZE - poffset; 1263 if (bp->b_vp->v_tag == VT_NFS && 1264 bp->b_vp->v_type == VREG) { 1265 ; /* entire page */ 1266 } else if (presid > resid) { 1267 presid = resid; 1268 } 1269 KASSERT(presid >= 0, ("brelse: extra page")); 1270 vm_page_set_invalid(m, poffset, presid); 1271 } 1272 resid -= PAGE_SIZE - (foff & PAGE_MASK); 1273 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 1274 } 1275 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1276 vfs_vmio_release(bp); 1277 } else if (bp->b_flags & B_VMIO) { 1278 /* 1279 * Rundown for VMIO buffers which are dirty NFS buffers. Such 1280 * buffers contain tracking ranges for NFS and cannot normally 1281 * be released. Due to the dirty check above this series of 1282 * conditionals, B_RELBUF probably will never be set in this 1283 * codepath. 1284 */ 1285 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1286 vfs_vmio_release(bp); 1287 } else { 1288 /* 1289 * Rundown for non-VMIO buffers. 1290 */ 1291 if (bp->b_flags & (B_INVAL | B_RELBUF)) { 1292 #if 0 1293 if (bp->b_vp) 1294 printf("brelse bp %p %08x/%08lx: Warning, caught and fixed brelvp bug\n", bp, saved_flags, bp->b_flags); 1295 #endif 1296 if (bp->b_bufsize) 1297 allocbuf(bp, 0); 1298 if (bp->b_vp) 1299 brelvp(bp); 1300 } 1301 } 1302 1303 if (bp->b_qindex != BQUEUE_NONE) 1304 panic("brelse: free buffer onto another queue???"); 1305 if (BUF_REFCNTNB(bp) > 1) { 1306 /* Temporary panic to verify exclusive locking */ 1307 /* This panic goes away when we allow shared refs */ 1308 panic("brelse: multiple refs"); 1309 /* do not release to free list */ 1310 BUF_UNLOCK(bp); 1311 crit_exit(); 1312 return; 1313 } 1314 1315 /* 1316 * Figure out the correct queue to place the cleaned up buffer on. 1317 * Buffers placed in the EMPTY or EMPTYKVA had better already be 1318 * disassociated from their vnode. 1319 */ 1320 1321 if (bp->b_bufsize == 0) { 1322 /* 1323 * Buffers with no memory. Due to conditionals near the top 1324 * of brelse() such buffers should probably already be 1325 * marked B_INVAL and disassociated from their vnode. 1326 */ 1327 bp->b_flags |= B_INVAL; 1328 bp->b_xflags &= ~BX_BKGRDWRITE; 1329 KASSERT(bp->b_vp == NULL, ("bp1 %p flags %08x/%08lx vnode %p unexpectededly still associated!", bp, saved_flags, bp->b_flags, bp->b_vp)); 1330 if (bp->b_xflags & BX_BKGRDINPROG) 1331 panic("losing buffer 1"); 1332 if (bp->b_kvasize) { 1333 bp->b_qindex = BQUEUE_EMPTYKVA; 1334 } else { 1335 bp->b_qindex = BQUEUE_EMPTY; 1336 } 1337 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist); 1338 LIST_REMOVE(bp, b_hash); 1339 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1340 bp->b_dev = NODEV; 1341 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 1342 /* 1343 * Buffers with junk contents. Again these buffers had better 1344 * already be disassociated from their vnode. 1345 */ 1346 KASSERT(bp->b_vp == NULL, ("bp2 %p flags %08x/%08lx vnode %p unexpectededly still associated!", bp, saved_flags, bp->b_flags, bp->b_vp)); 1347 bp->b_flags |= B_INVAL; 1348 bp->b_xflags &= ~BX_BKGRDWRITE; 1349 if (bp->b_xflags & BX_BKGRDINPROG) 1350 panic("losing buffer 2"); 1351 bp->b_qindex = BQUEUE_CLEAN; 1352 TAILQ_INSERT_HEAD(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); 1353 LIST_REMOVE(bp, b_hash); 1354 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1355 bp->b_dev = NODEV; 1356 } else if (bp->b_flags & B_LOCKED) { 1357 /* 1358 * Buffers that are locked. 1359 */ 1360 bp->b_qindex = BQUEUE_LOCKED; 1361 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_LOCKED], bp, b_freelist); 1362 } else { 1363 /* 1364 * Remaining buffers. These buffers are still associated with 1365 * their vnode. 1366 */ 1367 switch(bp->b_flags & (B_DELWRI|B_AGE)) { 1368 case B_DELWRI | B_AGE: 1369 bp->b_qindex = BQUEUE_DIRTY; 1370 TAILQ_INSERT_HEAD(&bufqueues[BQUEUE_DIRTY], bp, b_freelist); 1371 break; 1372 case B_DELWRI: 1373 bp->b_qindex = BQUEUE_DIRTY; 1374 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_DIRTY], bp, b_freelist); 1375 break; 1376 case B_AGE: 1377 bp->b_qindex = BQUEUE_CLEAN; 1378 TAILQ_INSERT_HEAD(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); 1379 break; 1380 default: 1381 bp->b_qindex = BQUEUE_CLEAN; 1382 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); 1383 break; 1384 } 1385 } 1386 1387 /* 1388 * If B_INVAL, clear B_DELWRI. We've already placed the buffer 1389 * on the correct queue. 1390 */ 1391 if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) 1392 bundirty(bp); 1393 1394 /* 1395 * Fixup numfreebuffers count. The bp is on an appropriate queue 1396 * unless locked. We then bump numfreebuffers if it is not B_DELWRI. 1397 * We've already handled the B_INVAL case ( B_DELWRI will be clear 1398 * if B_INVAL is set ). 1399 */ 1400 if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI)) 1401 bufcountwakeup(); 1402 1403 /* 1404 * Something we can maybe free or reuse 1405 */ 1406 if (bp->b_bufsize || bp->b_kvasize) 1407 bufspacewakeup(); 1408 1409 /* unlock */ 1410 BUF_UNLOCK(bp); 1411 bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | 1412 B_DIRECT | B_NOWDRAIN); 1413 crit_exit(); 1414 } 1415 1416 /* 1417 * bqrelse: 1418 * 1419 * Release a buffer back to the appropriate queue but do not try to free 1420 * it. The buffer is expected to be used again soon. 1421 * 1422 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 1423 * biodone() to requeue an async I/O on completion. It is also used when 1424 * known good buffers need to be requeued but we think we may need the data 1425 * again soon. 1426 * 1427 * XXX we should be able to leave the B_RELBUF hint set on completion. 1428 */ 1429 void 1430 bqrelse(struct buf * bp) 1431 { 1432 crit_enter(); 1433 1434 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1435 1436 if (bp->b_qindex != BQUEUE_NONE) 1437 panic("bqrelse: free buffer onto another queue???"); 1438 if (BUF_REFCNTNB(bp) > 1) { 1439 /* do not release to free list */ 1440 panic("bqrelse: multiple refs"); 1441 BUF_UNLOCK(bp); 1442 crit_exit(); 1443 return; 1444 } 1445 if (bp->b_flags & B_LOCKED) { 1446 bp->b_flags &= ~B_ERROR; 1447 bp->b_qindex = BQUEUE_LOCKED; 1448 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_LOCKED], bp, b_freelist); 1449 /* buffers with stale but valid contents */ 1450 } else if (bp->b_flags & B_DELWRI) { 1451 bp->b_qindex = BQUEUE_DIRTY; 1452 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_DIRTY], bp, b_freelist); 1453 } else if (vm_page_count_severe()) { 1454 /* 1455 * We are too low on memory, we have to try to free the 1456 * buffer (most importantly: the wired pages making up its 1457 * backing store) *now*. 1458 */ 1459 crit_exit(); 1460 brelse(bp); 1461 return; 1462 } else { 1463 bp->b_qindex = BQUEUE_CLEAN; 1464 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); 1465 } 1466 1467 if ((bp->b_flags & B_LOCKED) == 0 && 1468 ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))) { 1469 bufcountwakeup(); 1470 } 1471 1472 /* 1473 * Something we can maybe free or reuse. 1474 */ 1475 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI)) 1476 bufspacewakeup(); 1477 1478 /* 1479 * Final cleanup and unlock. Clear bits that are only used while a 1480 * buffer is actively locked. 1481 */ 1482 bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 1483 BUF_UNLOCK(bp); 1484 crit_exit(); 1485 } 1486 1487 /* 1488 * vfs_vmio_release: 1489 * 1490 * Return backing pages held by the buffer 'bp' back to the VM system 1491 * if possible. The pages are freed if they are no longer valid or 1492 * attempt to free if it was used for direct I/O otherwise they are 1493 * sent to the page cache. 1494 * 1495 * Pages that were marked busy are left alone and skipped. 1496 * 1497 * The KVA mapping (b_data) for the underlying pages is removed by 1498 * this function. 1499 */ 1500 static void 1501 vfs_vmio_release(struct buf *bp) 1502 { 1503 int i; 1504 vm_page_t m; 1505 1506 crit_enter(); 1507 for (i = 0; i < bp->b_xio.xio_npages; i++) { 1508 m = bp->b_xio.xio_pages[i]; 1509 bp->b_xio.xio_pages[i] = NULL; 1510 /* 1511 * In order to keep page LRU ordering consistent, put 1512 * everything on the inactive queue. 1513 */ 1514 vm_page_unwire(m, 0); 1515 /* 1516 * We don't mess with busy pages, it is 1517 * the responsibility of the process that 1518 * busied the pages to deal with them. 1519 */ 1520 if ((m->flags & PG_BUSY) || (m->busy != 0)) 1521 continue; 1522 1523 if (m->wire_count == 0) { 1524 vm_page_flag_clear(m, PG_ZERO); 1525 /* 1526 * Might as well free the page if we can and it has 1527 * no valid data. We also free the page if the 1528 * buffer was used for direct I/O. 1529 */ 1530 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && 1531 m->hold_count == 0) { 1532 vm_page_busy(m); 1533 vm_page_protect(m, VM_PROT_NONE); 1534 vm_page_free(m); 1535 } else if (bp->b_flags & B_DIRECT) { 1536 vm_page_try_to_free(m); 1537 } else if (vm_page_count_severe()) { 1538 vm_page_try_to_cache(m); 1539 } 1540 } 1541 } 1542 crit_exit(); 1543 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages); 1544 if (bp->b_bufsize) { 1545 bufspacewakeup(); 1546 bp->b_bufsize = 0; 1547 } 1548 bp->b_xio.xio_npages = 0; 1549 bp->b_flags &= ~B_VMIO; 1550 if (bp->b_vp) 1551 brelvp(bp); 1552 } 1553 1554 /* 1555 * gbincore: 1556 * 1557 * Check to see if a block is currently memory resident. 1558 */ 1559 struct buf * 1560 gbincore(struct vnode * vp, daddr_t blkno) 1561 { 1562 struct buf *bp; 1563 struct bufhashhdr *bh; 1564 1565 bh = bufhash(vp, blkno); 1566 LIST_FOREACH(bp, bh, b_hash) { 1567 if (bp->b_vp == vp && bp->b_lblkno == blkno) 1568 break; 1569 } 1570 return (bp); 1571 } 1572 1573 /* 1574 * vfs_bio_awrite: 1575 * 1576 * Implement clustered async writes for clearing out B_DELWRI buffers. 1577 * This is much better then the old way of writing only one buffer at 1578 * a time. Note that we may not be presented with the buffers in the 1579 * correct order, so we search for the cluster in both directions. 1580 */ 1581 int 1582 vfs_bio_awrite(struct buf * bp) 1583 { 1584 int i; 1585 int j; 1586 daddr_t lblkno = bp->b_lblkno; 1587 struct vnode *vp = bp->b_vp; 1588 int ncl; 1589 struct buf *bpa; 1590 int nwritten; 1591 int size; 1592 int maxcl; 1593 1594 crit_enter(); 1595 /* 1596 * right now we support clustered writing only to regular files. If 1597 * we find a clusterable block we could be in the middle of a cluster 1598 * rather then at the beginning. 1599 */ 1600 if ((vp->v_type == VREG) && 1601 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 1602 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 1603 1604 size = vp->v_mount->mnt_stat.f_iosize; 1605 maxcl = MAXPHYS / size; 1606 1607 for (i = 1; i < maxcl; i++) { 1608 if ((bpa = gbincore(vp, lblkno + i)) && 1609 BUF_REFCNT(bpa) == 0 && 1610 ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) == 1611 (B_DELWRI | B_CLUSTEROK)) && 1612 (bpa->b_bufsize == size)) { 1613 if ((bpa->b_blkno == bpa->b_lblkno) || 1614 (bpa->b_blkno != 1615 bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 1616 break; 1617 } else { 1618 break; 1619 } 1620 } 1621 for (j = 1; i + j <= maxcl && j <= lblkno; j++) { 1622 if ((bpa = gbincore(vp, lblkno - j)) && 1623 BUF_REFCNT(bpa) == 0 && 1624 ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) == 1625 (B_DELWRI | B_CLUSTEROK)) && 1626 (bpa->b_bufsize == size)) { 1627 if ((bpa->b_blkno == bpa->b_lblkno) || 1628 (bpa->b_blkno != 1629 bp->b_blkno - ((j * size) >> DEV_BSHIFT))) 1630 break; 1631 } else { 1632 break; 1633 } 1634 } 1635 --j; 1636 ncl = i + j; 1637 /* 1638 * this is a possible cluster write 1639 */ 1640 if (ncl != 1) { 1641 nwritten = cluster_wbuild(vp, size, lblkno - j, ncl); 1642 crit_exit(); 1643 return nwritten; 1644 } 1645 } 1646 1647 BUF_LOCK(bp, LK_EXCLUSIVE); 1648 bremfree(bp); 1649 bp->b_flags |= B_ASYNC; 1650 1651 crit_exit(); 1652 /* 1653 * default (old) behavior, writing out only one block 1654 * 1655 * XXX returns b_bufsize instead of b_bcount for nwritten? 1656 */ 1657 nwritten = bp->b_bufsize; 1658 (void) VOP_BWRITE(bp->b_vp, bp); 1659 1660 return nwritten; 1661 } 1662 1663 /* 1664 * getnewbuf: 1665 * 1666 * Find and initialize a new buffer header, freeing up existing buffers 1667 * in the bufqueues as necessary. The new buffer is returned locked. 1668 * 1669 * Important: B_INVAL is not set. If the caller wishes to throw the 1670 * buffer away, the caller must set B_INVAL prior to calling brelse(). 1671 * 1672 * We block if: 1673 * We have insufficient buffer headers 1674 * We have insufficient buffer space 1675 * buffer_map is too fragmented ( space reservation fails ) 1676 * If we have to flush dirty buffers ( but we try to avoid this ) 1677 * 1678 * To avoid VFS layer recursion we do not flush dirty buffers ourselves. 1679 * Instead we ask the buf daemon to do it for us. We attempt to 1680 * avoid piecemeal wakeups of the pageout daemon. 1681 */ 1682 1683 static struct buf * 1684 getnewbuf(int slpflag, int slptimeo, int size, int maxsize) 1685 { 1686 struct buf *bp; 1687 struct buf *nbp; 1688 int defrag = 0; 1689 int nqindex; 1690 static int flushingbufs; 1691 1692 /* 1693 * We can't afford to block since we might be holding a vnode lock, 1694 * which may prevent system daemons from running. We deal with 1695 * low-memory situations by proactively returning memory and running 1696 * async I/O rather then sync I/O. 1697 */ 1698 1699 ++getnewbufcalls; 1700 --getnewbufrestarts; 1701 restart: 1702 ++getnewbufrestarts; 1703 1704 /* 1705 * Setup for scan. If we do not have enough free buffers, 1706 * we setup a degenerate case that immediately fails. Note 1707 * that if we are specially marked process, we are allowed to 1708 * dip into our reserves. 1709 * 1710 * The scanning sequence is nominally: EMPTY->EMPTYKVA->CLEAN 1711 * 1712 * We start with EMPTYKVA. If the list is empty we backup to EMPTY. 1713 * However, there are a number of cases (defragging, reusing, ...) 1714 * where we cannot backup. 1715 */ 1716 nqindex = BQUEUE_EMPTYKVA; 1717 nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTYKVA]); 1718 1719 if (nbp == NULL) { 1720 /* 1721 * If no EMPTYKVA buffers and we are either 1722 * defragging or reusing, locate a CLEAN buffer 1723 * to free or reuse. If bufspace useage is low 1724 * skip this step so we can allocate a new buffer. 1725 */ 1726 if (defrag || bufspace >= lobufspace) { 1727 nqindex = BQUEUE_CLEAN; 1728 nbp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]); 1729 } 1730 1731 /* 1732 * If we could not find or were not allowed to reuse a 1733 * CLEAN buffer, check to see if it is ok to use an EMPTY 1734 * buffer. We can only use an EMPTY buffer if allocating 1735 * its KVA would not otherwise run us out of buffer space. 1736 */ 1737 if (nbp == NULL && defrag == 0 && 1738 bufspace + maxsize < hibufspace) { 1739 nqindex = BQUEUE_EMPTY; 1740 nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTY]); 1741 } 1742 } 1743 1744 /* 1745 * Run scan, possibly freeing data and/or kva mappings on the fly 1746 * depending. 1747 */ 1748 1749 while ((bp = nbp) != NULL) { 1750 int qindex = nqindex; 1751 1752 /* 1753 * Calculate next bp ( we can only use it if we do not block 1754 * or do other fancy things ). 1755 */ 1756 if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) { 1757 switch(qindex) { 1758 case BQUEUE_EMPTY: 1759 nqindex = BQUEUE_EMPTYKVA; 1760 if ((nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTYKVA]))) 1761 break; 1762 /* fall through */ 1763 case BQUEUE_EMPTYKVA: 1764 nqindex = BQUEUE_CLEAN; 1765 if ((nbp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]))) 1766 break; 1767 /* fall through */ 1768 case BQUEUE_CLEAN: 1769 /* 1770 * nbp is NULL. 1771 */ 1772 break; 1773 } 1774 } 1775 1776 /* 1777 * Sanity Checks 1778 */ 1779 KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp)); 1780 1781 /* 1782 * Note: we no longer distinguish between VMIO and non-VMIO 1783 * buffers. 1784 */ 1785 1786 KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex)); 1787 1788 /* 1789 * If we are defragging then we need a buffer with 1790 * b_kvasize != 0. XXX this situation should no longer 1791 * occur, if defrag is non-zero the buffer's b_kvasize 1792 * should also be non-zero at this point. XXX 1793 */ 1794 if (defrag && bp->b_kvasize == 0) { 1795 printf("Warning: defrag empty buffer %p\n", bp); 1796 continue; 1797 } 1798 1799 /* 1800 * Start freeing the bp. This is somewhat involved. nbp 1801 * remains valid only for BQUEUE_EMPTY[KVA] bp's. Buffers 1802 * on the clean list must be disassociated from their 1803 * current vnode. Buffers on the empty[kva] lists have 1804 * already been disassociated. 1805 */ 1806 1807 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) 1808 panic("getnewbuf: locked buf"); 1809 bremfree(bp); 1810 1811 if (qindex == BQUEUE_CLEAN) { 1812 if (bp->b_flags & B_VMIO) { 1813 bp->b_flags &= ~B_ASYNC; 1814 vfs_vmio_release(bp); 1815 } 1816 if (bp->b_vp) 1817 brelvp(bp); 1818 } 1819 1820 /* 1821 * NOTE: nbp is now entirely invalid. We can only restart 1822 * the scan from this point on. 1823 * 1824 * Get the rest of the buffer freed up. b_kva* is still 1825 * valid after this operation. 1826 */ 1827 1828 KASSERT(bp->b_vp == NULL, ("bp3 %p flags %08lx vnode %p qindex %d unexpectededly still associated!", bp, bp->b_flags, bp->b_vp, qindex)); 1829 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 1830 (*bioops.io_deallocate)(bp); 1831 if (bp->b_xflags & BX_BKGRDINPROG) 1832 panic("losing buffer 3"); 1833 LIST_REMOVE(bp, b_hash); 1834 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1835 1836 /* 1837 * critical section protection is not required when 1838 * scrapping a buffer's contents because it is already 1839 * wired. 1840 */ 1841 if (bp->b_bufsize) 1842 allocbuf(bp, 0); 1843 1844 bp->b_flags = 0; 1845 bp->b_xflags = 0; 1846 bp->b_dev = NODEV; 1847 bp->b_vp = NULL; 1848 bp->b_blkno = bp->b_lblkno = 0; 1849 bp->b_offset = NOOFFSET; 1850 bp->b_iodone = NULL; 1851 bp->b_error = 0; 1852 bp->b_resid = 0; 1853 bp->b_bcount = 0; 1854 bp->b_xio.xio_npages = 0; 1855 bp->b_dirtyoff = bp->b_dirtyend = 0; 1856 1857 LIST_INIT(&bp->b_dep); 1858 1859 /* 1860 * If we are defragging then free the buffer. 1861 */ 1862 if (defrag) { 1863 bp->b_flags |= B_INVAL; 1864 bfreekva(bp); 1865 brelse(bp); 1866 defrag = 0; 1867 goto restart; 1868 } 1869 1870 /* 1871 * If we are overcomitted then recover the buffer and its 1872 * KVM space. This occurs in rare situations when multiple 1873 * processes are blocked in getnewbuf() or allocbuf(). 1874 */ 1875 if (bufspace >= hibufspace) 1876 flushingbufs = 1; 1877 if (flushingbufs && bp->b_kvasize != 0) { 1878 bp->b_flags |= B_INVAL; 1879 bfreekva(bp); 1880 brelse(bp); 1881 goto restart; 1882 } 1883 if (bufspace < lobufspace) 1884 flushingbufs = 0; 1885 break; 1886 } 1887 1888 /* 1889 * If we exhausted our list, sleep as appropriate. We may have to 1890 * wakeup various daemons and write out some dirty buffers. 1891 * 1892 * Generally we are sleeping due to insufficient buffer space. 1893 */ 1894 1895 if (bp == NULL) { 1896 int flags; 1897 char *waitmsg; 1898 1899 if (defrag) { 1900 flags = VFS_BIO_NEED_BUFSPACE; 1901 waitmsg = "nbufkv"; 1902 } else if (bufspace >= hibufspace) { 1903 waitmsg = "nbufbs"; 1904 flags = VFS_BIO_NEED_BUFSPACE; 1905 } else { 1906 waitmsg = "newbuf"; 1907 flags = VFS_BIO_NEED_ANY; 1908 } 1909 1910 bd_speedup(); /* heeeelp */ 1911 1912 needsbuffer |= flags; 1913 while (needsbuffer & flags) { 1914 if (tsleep(&needsbuffer, slpflag, waitmsg, slptimeo)) 1915 return (NULL); 1916 } 1917 } else { 1918 /* 1919 * We finally have a valid bp. We aren't quite out of the 1920 * woods, we still have to reserve kva space. In order 1921 * to keep fragmentation sane we only allocate kva in 1922 * BKVASIZE chunks. 1923 */ 1924 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK; 1925 1926 if (maxsize != bp->b_kvasize) { 1927 vm_offset_t addr = 0; 1928 int count; 1929 1930 bfreekva(bp); 1931 1932 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1933 vm_map_lock(buffer_map); 1934 1935 if (vm_map_findspace(buffer_map, 1936 vm_map_min(buffer_map), maxsize, 1937 maxsize, &addr)) { 1938 /* 1939 * Uh oh. Buffer map is too fragmented. We 1940 * must defragment the map. 1941 */ 1942 vm_map_unlock(buffer_map); 1943 vm_map_entry_release(count); 1944 ++bufdefragcnt; 1945 defrag = 1; 1946 bp->b_flags |= B_INVAL; 1947 brelse(bp); 1948 goto restart; 1949 } 1950 if (addr) { 1951 vm_map_insert(buffer_map, &count, 1952 NULL, 0, 1953 addr, addr + maxsize, 1954 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1955 1956 bp->b_kvabase = (caddr_t) addr; 1957 bp->b_kvasize = maxsize; 1958 bufspace += bp->b_kvasize; 1959 ++bufreusecnt; 1960 } 1961 vm_map_unlock(buffer_map); 1962 vm_map_entry_release(count); 1963 } 1964 bp->b_data = bp->b_kvabase; 1965 } 1966 return(bp); 1967 } 1968 1969 /* 1970 * buf_daemon: 1971 * 1972 * Buffer flushing daemon. Buffers are normally flushed by the 1973 * update daemon but if it cannot keep up this process starts to 1974 * take the load in an attempt to prevent getnewbuf() from blocking. 1975 */ 1976 1977 static struct thread *bufdaemonthread; 1978 1979 static struct kproc_desc buf_kp = { 1980 "bufdaemon", 1981 buf_daemon, 1982 &bufdaemonthread 1983 }; 1984 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp) 1985 1986 static void 1987 buf_daemon() 1988 { 1989 /* 1990 * This process needs to be suspended prior to shutdown sync. 1991 */ 1992 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, 1993 bufdaemonthread, SHUTDOWN_PRI_LAST); 1994 1995 /* 1996 * This process is allowed to take the buffer cache to the limit 1997 */ 1998 crit_enter(); 1999 2000 for (;;) { 2001 kproc_suspend_loop(); 2002 2003 /* 2004 * Do the flush. Limit the amount of in-transit I/O we 2005 * allow to build up, otherwise we would completely saturate 2006 * the I/O system. Wakeup any waiting processes before we 2007 * normally would so they can run in parallel with our drain. 2008 */ 2009 while (numdirtybuffers > lodirtybuffers) { 2010 if (flushbufqueues() == 0) 2011 break; 2012 waitrunningbufspace(); 2013 numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2); 2014 } 2015 2016 /* 2017 * Only clear bd_request if we have reached our low water 2018 * mark. The buf_daemon normally waits 5 seconds and 2019 * then incrementally flushes any dirty buffers that have 2020 * built up, within reason. 2021 * 2022 * If we were unable to hit our low water mark and couldn't 2023 * find any flushable buffers, we sleep half a second. 2024 * Otherwise we loop immediately. 2025 */ 2026 if (numdirtybuffers <= lodirtybuffers) { 2027 /* 2028 * We reached our low water mark, reset the 2029 * request and sleep until we are needed again. 2030 * The sleep is just so the suspend code works. 2031 */ 2032 bd_request = 0; 2033 tsleep(&bd_request, 0, "psleep", hz); 2034 } else { 2035 /* 2036 * We couldn't find any flushable dirty buffers but 2037 * still have too many dirty buffers, we 2038 * have to sleep and try again. (rare) 2039 */ 2040 tsleep(&bd_request, 0, "qsleep", hz / 2); 2041 } 2042 } 2043 } 2044 2045 /* 2046 * flushbufqueues: 2047 * 2048 * Try to flush a buffer in the dirty queue. We must be careful to 2049 * free up B_INVAL buffers instead of write them, which NFS is 2050 * particularly sensitive to. 2051 */ 2052 2053 static int 2054 flushbufqueues(void) 2055 { 2056 struct buf *bp; 2057 int r = 0; 2058 2059 bp = TAILQ_FIRST(&bufqueues[BQUEUE_DIRTY]); 2060 2061 while (bp) { 2062 KASSERT((bp->b_flags & B_DELWRI), ("unexpected clean buffer %p", bp)); 2063 if ((bp->b_flags & B_DELWRI) != 0 && 2064 (bp->b_xflags & BX_BKGRDINPROG) == 0) { 2065 if (bp->b_flags & B_INVAL) { 2066 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) 2067 panic("flushbufqueues: locked buf"); 2068 bremfree(bp); 2069 brelse(bp); 2070 ++r; 2071 break; 2072 } 2073 if (LIST_FIRST(&bp->b_dep) != NULL && 2074 bioops.io_countdeps && 2075 (bp->b_flags & B_DEFERRED) == 0 && 2076 (*bioops.io_countdeps)(bp, 0)) { 2077 TAILQ_REMOVE(&bufqueues[BQUEUE_DIRTY], 2078 bp, b_freelist); 2079 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_DIRTY], 2080 bp, b_freelist); 2081 bp->b_flags |= B_DEFERRED; 2082 bp = TAILQ_FIRST(&bufqueues[BQUEUE_DIRTY]); 2083 continue; 2084 } 2085 vfs_bio_awrite(bp); 2086 ++r; 2087 break; 2088 } 2089 bp = TAILQ_NEXT(bp, b_freelist); 2090 } 2091 return (r); 2092 } 2093 2094 /* 2095 * incore: 2096 * 2097 * Check to see if a block is currently resident in memory. 2098 */ 2099 struct buf * 2100 incore(struct vnode * vp, daddr_t blkno) 2101 { 2102 struct buf *bp; 2103 2104 crit_enter(); 2105 bp = gbincore(vp, blkno); 2106 crit_exit(); 2107 return (bp); 2108 } 2109 2110 /* 2111 * inmem: 2112 * 2113 * Returns true if no I/O is needed to access the associated VM object. 2114 * This is like incore except it also hunts around in the VM system for 2115 * the data. 2116 * 2117 * Note that we ignore vm_page_free() races from interrupts against our 2118 * lookup, since if the caller is not protected our return value will not 2119 * be any more valid then otherwise once we exit the critical section. 2120 */ 2121 int 2122 inmem(struct vnode * vp, daddr_t blkno) 2123 { 2124 vm_object_t obj; 2125 vm_offset_t toff, tinc, size; 2126 vm_page_t m; 2127 vm_ooffset_t off; 2128 2129 if (incore(vp, blkno)) 2130 return 1; 2131 if (vp->v_mount == NULL) 2132 return 0; 2133 if (VOP_GETVOBJECT(vp, &obj) != 0 || (vp->v_flag & VOBJBUF) == 0) 2134 return 0; 2135 2136 size = PAGE_SIZE; 2137 if (size > vp->v_mount->mnt_stat.f_iosize) 2138 size = vp->v_mount->mnt_stat.f_iosize; 2139 off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize; 2140 2141 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 2142 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 2143 if (!m) 2144 return 0; 2145 tinc = size; 2146 if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK)) 2147 tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK); 2148 if (vm_page_is_valid(m, 2149 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 2150 return 0; 2151 } 2152 return 1; 2153 } 2154 2155 /* 2156 * vfs_setdirty: 2157 * 2158 * Sets the dirty range for a buffer based on the status of the dirty 2159 * bits in the pages comprising the buffer. 2160 * 2161 * The range is limited to the size of the buffer. 2162 * 2163 * This routine is primarily used by NFS, but is generalized for the 2164 * B_VMIO case. 2165 */ 2166 static void 2167 vfs_setdirty(struct buf *bp) 2168 { 2169 int i; 2170 vm_object_t object; 2171 2172 /* 2173 * Degenerate case - empty buffer 2174 */ 2175 2176 if (bp->b_bufsize == 0) 2177 return; 2178 2179 /* 2180 * We qualify the scan for modified pages on whether the 2181 * object has been flushed yet. The OBJ_WRITEABLE flag 2182 * is not cleared simply by protecting pages off. 2183 */ 2184 2185 if ((bp->b_flags & B_VMIO) == 0) 2186 return; 2187 2188 object = bp->b_xio.xio_pages[0]->object; 2189 2190 if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY)) 2191 printf("Warning: object %p writeable but not mightbedirty\n", object); 2192 if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY)) 2193 printf("Warning: object %p mightbedirty but not writeable\n", object); 2194 2195 if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) { 2196 vm_offset_t boffset; 2197 vm_offset_t eoffset; 2198 2199 /* 2200 * test the pages to see if they have been modified directly 2201 * by users through the VM system. 2202 */ 2203 for (i = 0; i < bp->b_xio.xio_npages; i++) { 2204 vm_page_flag_clear(bp->b_xio.xio_pages[i], PG_ZERO); 2205 vm_page_test_dirty(bp->b_xio.xio_pages[i]); 2206 } 2207 2208 /* 2209 * Calculate the encompassing dirty range, boffset and eoffset, 2210 * (eoffset - boffset) bytes. 2211 */ 2212 2213 for (i = 0; i < bp->b_xio.xio_npages; i++) { 2214 if (bp->b_xio.xio_pages[i]->dirty) 2215 break; 2216 } 2217 boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 2218 2219 for (i = bp->b_xio.xio_npages - 1; i >= 0; --i) { 2220 if (bp->b_xio.xio_pages[i]->dirty) { 2221 break; 2222 } 2223 } 2224 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); 2225 2226 /* 2227 * Fit it to the buffer. 2228 */ 2229 2230 if (eoffset > bp->b_bcount) 2231 eoffset = bp->b_bcount; 2232 2233 /* 2234 * If we have a good dirty range, merge with the existing 2235 * dirty range. 2236 */ 2237 2238 if (boffset < eoffset) { 2239 if (bp->b_dirtyoff > boffset) 2240 bp->b_dirtyoff = boffset; 2241 if (bp->b_dirtyend < eoffset) 2242 bp->b_dirtyend = eoffset; 2243 } 2244 } 2245 } 2246 2247 /* 2248 * getblk: 2249 * 2250 * Get a block given a specified block and offset into a file/device. 2251 * The buffers B_DONE bit will be cleared on return, making it almost 2252 * ready for an I/O initiation. B_INVAL may or may not be set on 2253 * return. The caller should clear B_INVAL prior to initiating a 2254 * READ. 2255 * 2256 * IT IS IMPORTANT TO UNDERSTAND THAT IF YOU CALL GETBLK() AND B_CACHE 2257 * IS NOT SET, YOU MUST INITIALIZE THE RETURNED BUFFER, ISSUE A READ, 2258 * OR SET B_INVAL BEFORE RETIRING IT. If you retire a getblk'd buffer 2259 * without doing any of those things the system will likely believe 2260 * the buffer to be valid (especially if it is not B_VMIO), and the 2261 * next getblk() will return the buffer with B_CACHE set. 2262 * 2263 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 2264 * an existing buffer. 2265 * 2266 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 2267 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 2268 * and then cleared based on the backing VM. If the previous buffer is 2269 * non-0-sized but invalid, B_CACHE will be cleared. 2270 * 2271 * If getblk() must create a new buffer, the new buffer is returned with 2272 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 2273 * case it is returned with B_INVAL clear and B_CACHE set based on the 2274 * backing VM. 2275 * 2276 * getblk() also forces a VOP_BWRITE() for any B_DELWRI buffer whos 2277 * B_CACHE bit is clear. 2278 * 2279 * What this means, basically, is that the caller should use B_CACHE to 2280 * determine whether the buffer is fully valid or not and should clear 2281 * B_INVAL prior to issuing a read. If the caller intends to validate 2282 * the buffer by loading its data area with something, the caller needs 2283 * to clear B_INVAL. If the caller does this without issuing an I/O, 2284 * the caller should set B_CACHE ( as an optimization ), else the caller 2285 * should issue the I/O and biodone() will set B_CACHE if the I/O was 2286 * a write attempt or if it was a successfull read. If the caller 2287 * intends to issue a READ, the caller must clear B_INVAL and B_ERROR 2288 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 2289 */ 2290 struct buf * 2291 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 2292 { 2293 struct buf *bp; 2294 struct bufhashhdr *bh; 2295 2296 if (size > MAXBSIZE) 2297 panic("getblk: size(%d) > MAXBSIZE(%d)", size, MAXBSIZE); 2298 2299 crit_enter(); 2300 loop: 2301 /* 2302 * Block if we are low on buffers. Certain processes are allowed 2303 * to completely exhaust the buffer cache. 2304 * 2305 * If this check ever becomes a bottleneck it may be better to 2306 * move it into the else, when gbincore() fails. At the moment 2307 * it isn't a problem. 2308 * 2309 * XXX remove, we cannot afford to block anywhere if holding a vnode 2310 * lock in low-memory situation, so take it to the max. 2311 */ 2312 if (numfreebuffers == 0) { 2313 if (!curproc) 2314 return NULL; 2315 needsbuffer |= VFS_BIO_NEED_ANY; 2316 tsleep(&needsbuffer, slpflag, "newbuf", slptimeo); 2317 } 2318 2319 if ((bp = gbincore(vp, blkno))) { 2320 /* 2321 * Buffer is in-core. If the buffer is not busy, it must 2322 * be on a queue. 2323 */ 2324 2325 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 2326 if (BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL, 2327 "getblk", slpflag, slptimeo) == ENOLCK) 2328 goto loop; 2329 crit_exit(); 2330 return (struct buf *) NULL; 2331 } 2332 2333 /* 2334 * The buffer is locked. B_CACHE is cleared if the buffer is 2335 * invalid. Otherwise, for a non-VMIO buffer, B_CACHE is set 2336 * and for a VMIO buffer B_CACHE is adjusted according to the 2337 * backing VM cache. 2338 */ 2339 if (bp->b_flags & B_INVAL) 2340 bp->b_flags &= ~B_CACHE; 2341 else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0) 2342 bp->b_flags |= B_CACHE; 2343 bremfree(bp); 2344 2345 /* 2346 * check for size inconsistancies for non-VMIO case. 2347 */ 2348 2349 if (bp->b_bcount != size) { 2350 if ((bp->b_flags & B_VMIO) == 0 || 2351 (size > bp->b_kvasize)) { 2352 if (bp->b_flags & B_DELWRI) { 2353 bp->b_flags |= B_NOCACHE; 2354 VOP_BWRITE(bp->b_vp, bp); 2355 } else { 2356 if ((bp->b_flags & B_VMIO) && 2357 (LIST_FIRST(&bp->b_dep) == NULL)) { 2358 bp->b_flags |= B_RELBUF; 2359 brelse(bp); 2360 } else { 2361 bp->b_flags |= B_NOCACHE; 2362 VOP_BWRITE(bp->b_vp, bp); 2363 } 2364 } 2365 goto loop; 2366 } 2367 } 2368 2369 /* 2370 * If the size is inconsistant in the VMIO case, we can resize 2371 * the buffer. This might lead to B_CACHE getting set or 2372 * cleared. If the size has not changed, B_CACHE remains 2373 * unchanged from its previous state. 2374 */ 2375 2376 if (bp->b_bcount != size) 2377 allocbuf(bp, size); 2378 2379 KASSERT(bp->b_offset != NOOFFSET, 2380 ("getblk: no buffer offset")); 2381 2382 /* 2383 * A buffer with B_DELWRI set and B_CACHE clear must 2384 * be committed before we can return the buffer in 2385 * order to prevent the caller from issuing a read 2386 * ( due to B_CACHE not being set ) and overwriting 2387 * it. 2388 * 2389 * Most callers, including NFS and FFS, need this to 2390 * operate properly either because they assume they 2391 * can issue a read if B_CACHE is not set, or because 2392 * ( for example ) an uncached B_DELWRI might loop due 2393 * to softupdates re-dirtying the buffer. In the latter 2394 * case, B_CACHE is set after the first write completes, 2395 * preventing further loops. 2396 * 2397 * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE 2398 * above while extending the buffer, we cannot allow the 2399 * buffer to remain with B_CACHE set after the write 2400 * completes or it will represent a corrupt state. To 2401 * deal with this we set B_NOCACHE to scrap the buffer 2402 * after the write. 2403 * 2404 * We might be able to do something fancy, like setting 2405 * B_CACHE in bwrite() except if B_DELWRI is already set, 2406 * so the below call doesn't set B_CACHE, but that gets real 2407 * confusing. This is much easier. 2408 */ 2409 2410 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 2411 bp->b_flags |= B_NOCACHE; 2412 VOP_BWRITE(bp->b_vp, bp); 2413 goto loop; 2414 } 2415 2416 crit_exit(); 2417 bp->b_flags &= ~B_DONE; 2418 } else { 2419 /* 2420 * Buffer is not in-core, create new buffer. The buffer 2421 * returned by getnewbuf() is locked. Note that the returned 2422 * buffer is also considered valid (not marked B_INVAL). 2423 * 2424 * Calculating the offset for the I/O requires figuring out 2425 * the block size. We use DEV_BSIZE for VBLK or VCHR and 2426 * the mount's f_iosize otherwise. If the vnode does not 2427 * have an associated mount we assume that the passed size is 2428 * the block size. 2429 * 2430 * Note that vn_isdisk() cannot be used here since it may 2431 * return a failure for numerous reasons. Note that the 2432 * buffer size may be larger then the block size (the caller 2433 * will use block numbers with the proper multiple). Beware 2434 * of using any v_* fields which are part of unions. In 2435 * particular, in DragonFly the mount point overloading 2436 * mechanism is such that the underlying directory (with a 2437 * non-NULL v_mountedhere) is not a special case. 2438 */ 2439 int bsize, maxsize, vmio; 2440 off_t offset; 2441 2442 if (vp->v_type == VBLK || vp->v_type == VCHR) 2443 bsize = DEV_BSIZE; 2444 else if (vp->v_mount) 2445 bsize = vp->v_mount->mnt_stat.f_iosize; 2446 else 2447 bsize = size; 2448 2449 offset = (off_t)blkno * bsize; 2450 vmio = (VOP_GETVOBJECT(vp, NULL) == 0) && (vp->v_flag & VOBJBUF); 2451 maxsize = vmio ? size + (offset & PAGE_MASK) : size; 2452 maxsize = imax(maxsize, bsize); 2453 2454 if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == NULL) { 2455 if (slpflag || slptimeo) { 2456 crit_exit(); 2457 return NULL; 2458 } 2459 goto loop; 2460 } 2461 2462 /* 2463 * This code is used to make sure that a buffer is not 2464 * created while the getnewbuf routine is blocked. 2465 * This can be a problem whether the vnode is locked or not. 2466 * If the buffer is created out from under us, we have to 2467 * throw away the one we just created. There is now window 2468 * race because we are safely running in a critical section 2469 * from the point of the duplicate buffer creation through 2470 * to here, and we've locked the buffer. 2471 */ 2472 if (gbincore(vp, blkno)) { 2473 bp->b_flags |= B_INVAL; 2474 brelse(bp); 2475 goto loop; 2476 } 2477 2478 /* 2479 * Insert the buffer into the hash, so that it can 2480 * be found by incore. bgetvp() and bufhash() 2481 * must be synchronized with each other. 2482 */ 2483 bp->b_blkno = bp->b_lblkno = blkno; 2484 bp->b_offset = offset; 2485 2486 bgetvp(vp, bp); 2487 LIST_REMOVE(bp, b_hash); 2488 bh = bufhash(vp, blkno); 2489 LIST_INSERT_HEAD(bh, bp, b_hash); 2490 2491 /* 2492 * set B_VMIO bit. allocbuf() the buffer bigger. Since the 2493 * buffer size starts out as 0, B_CACHE will be set by 2494 * allocbuf() for the VMIO case prior to it testing the 2495 * backing store for validity. 2496 */ 2497 2498 if (vmio) { 2499 bp->b_flags |= B_VMIO; 2500 #if defined(VFS_BIO_DEBUG) 2501 if (vn_canvmio(vp) != TRUE) 2502 printf("getblk: vmioing file type %d???\n", vp->v_type); 2503 #endif 2504 } else { 2505 bp->b_flags &= ~B_VMIO; 2506 } 2507 2508 allocbuf(bp, size); 2509 2510 crit_exit(); 2511 bp->b_flags &= ~B_DONE; 2512 } 2513 return (bp); 2514 } 2515 2516 /* 2517 * geteblk: 2518 * 2519 * Get an empty, disassociated buffer of given size. The buffer is 2520 * initially set to B_INVAL. 2521 * 2522 * critical section protection is not required for the allocbuf() 2523 * call because races are impossible here. 2524 */ 2525 struct buf * 2526 geteblk(int size) 2527 { 2528 struct buf *bp; 2529 int maxsize; 2530 2531 maxsize = (size + BKVAMASK) & ~BKVAMASK; 2532 2533 crit_enter(); 2534 while ((bp = getnewbuf(0, 0, size, maxsize)) == 0) 2535 ; 2536 crit_exit(); 2537 allocbuf(bp, size); 2538 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 2539 return (bp); 2540 } 2541 2542 2543 /* 2544 * allocbuf: 2545 * 2546 * This code constitutes the buffer memory from either anonymous system 2547 * memory (in the case of non-VMIO operations) or from an associated 2548 * VM object (in the case of VMIO operations). This code is able to 2549 * resize a buffer up or down. 2550 * 2551 * Note that this code is tricky, and has many complications to resolve 2552 * deadlock or inconsistant data situations. Tread lightly!!! 2553 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 2554 * the caller. Calling this code willy nilly can result in the loss of data. 2555 * 2556 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 2557 * B_CACHE for the non-VMIO case. 2558 * 2559 * This routine does not need to be called from a critical section but you 2560 * must own the buffer. 2561 */ 2562 int 2563 allocbuf(struct buf *bp, int size) 2564 { 2565 int newbsize, mbsize; 2566 int i; 2567 2568 if (BUF_REFCNT(bp) == 0) 2569 panic("allocbuf: buffer not busy"); 2570 2571 if (bp->b_kvasize < size) 2572 panic("allocbuf: buffer too small"); 2573 2574 if ((bp->b_flags & B_VMIO) == 0) { 2575 caddr_t origbuf; 2576 int origbufsize; 2577 /* 2578 * Just get anonymous memory from the kernel. Don't 2579 * mess with B_CACHE. 2580 */ 2581 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2582 if (bp->b_flags & B_MALLOC) 2583 newbsize = mbsize; 2584 else 2585 newbsize = round_page(size); 2586 2587 if (newbsize < bp->b_bufsize) { 2588 /* 2589 * malloced buffers are not shrunk 2590 */ 2591 if (bp->b_flags & B_MALLOC) { 2592 if (newbsize) { 2593 bp->b_bcount = size; 2594 } else { 2595 free(bp->b_data, M_BIOBUF); 2596 if (bp->b_bufsize) { 2597 bufmallocspace -= bp->b_bufsize; 2598 bufspacewakeup(); 2599 bp->b_bufsize = 0; 2600 } 2601 bp->b_data = bp->b_kvabase; 2602 bp->b_bcount = 0; 2603 bp->b_flags &= ~B_MALLOC; 2604 } 2605 return 1; 2606 } 2607 vm_hold_free_pages( 2608 bp, 2609 (vm_offset_t) bp->b_data + newbsize, 2610 (vm_offset_t) bp->b_data + bp->b_bufsize); 2611 } else if (newbsize > bp->b_bufsize) { 2612 /* 2613 * We only use malloced memory on the first allocation. 2614 * and revert to page-allocated memory when the buffer 2615 * grows. 2616 */ 2617 if ( (bufmallocspace < maxbufmallocspace) && 2618 (bp->b_bufsize == 0) && 2619 (mbsize <= PAGE_SIZE/2)) { 2620 2621 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 2622 bp->b_bufsize = mbsize; 2623 bp->b_bcount = size; 2624 bp->b_flags |= B_MALLOC; 2625 bufmallocspace += mbsize; 2626 return 1; 2627 } 2628 origbuf = NULL; 2629 origbufsize = 0; 2630 /* 2631 * If the buffer is growing on its other-than-first allocation, 2632 * then we revert to the page-allocation scheme. 2633 */ 2634 if (bp->b_flags & B_MALLOC) { 2635 origbuf = bp->b_data; 2636 origbufsize = bp->b_bufsize; 2637 bp->b_data = bp->b_kvabase; 2638 if (bp->b_bufsize) { 2639 bufmallocspace -= bp->b_bufsize; 2640 bufspacewakeup(); 2641 bp->b_bufsize = 0; 2642 } 2643 bp->b_flags &= ~B_MALLOC; 2644 newbsize = round_page(newbsize); 2645 } 2646 vm_hold_load_pages( 2647 bp, 2648 (vm_offset_t) bp->b_data + bp->b_bufsize, 2649 (vm_offset_t) bp->b_data + newbsize); 2650 if (origbuf) { 2651 bcopy(origbuf, bp->b_data, origbufsize); 2652 free(origbuf, M_BIOBUF); 2653 } 2654 } 2655 } else { 2656 vm_page_t m; 2657 int desiredpages; 2658 2659 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2660 desiredpages = (size == 0) ? 0 : 2661 num_pages((bp->b_offset & PAGE_MASK) + newbsize); 2662 2663 if (bp->b_flags & B_MALLOC) 2664 panic("allocbuf: VMIO buffer can't be malloced"); 2665 /* 2666 * Set B_CACHE initially if buffer is 0 length or will become 2667 * 0-length. 2668 */ 2669 if (size == 0 || bp->b_bufsize == 0) 2670 bp->b_flags |= B_CACHE; 2671 2672 if (newbsize < bp->b_bufsize) { 2673 /* 2674 * DEV_BSIZE aligned new buffer size is less then the 2675 * DEV_BSIZE aligned existing buffer size. Figure out 2676 * if we have to remove any pages. 2677 */ 2678 if (desiredpages < bp->b_xio.xio_npages) { 2679 for (i = desiredpages; i < bp->b_xio.xio_npages; i++) { 2680 /* 2681 * the page is not freed here -- it 2682 * is the responsibility of 2683 * vnode_pager_setsize 2684 */ 2685 m = bp->b_xio.xio_pages[i]; 2686 KASSERT(m != bogus_page, 2687 ("allocbuf: bogus page found")); 2688 while (vm_page_sleep_busy(m, TRUE, "biodep")) 2689 ; 2690 2691 bp->b_xio.xio_pages[i] = NULL; 2692 vm_page_unwire(m, 0); 2693 } 2694 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + 2695 (desiredpages << PAGE_SHIFT), (bp->b_xio.xio_npages - desiredpages)); 2696 bp->b_xio.xio_npages = desiredpages; 2697 } 2698 } else if (size > bp->b_bcount) { 2699 /* 2700 * We are growing the buffer, possibly in a 2701 * byte-granular fashion. 2702 */ 2703 struct vnode *vp; 2704 vm_object_t obj; 2705 vm_offset_t toff; 2706 vm_offset_t tinc; 2707 2708 /* 2709 * Step 1, bring in the VM pages from the object, 2710 * allocating them if necessary. We must clear 2711 * B_CACHE if these pages are not valid for the 2712 * range covered by the buffer. 2713 * 2714 * critical section protection is required to protect 2715 * against interrupts unbusying and freeing pages 2716 * between our vm_page_lookup() and our 2717 * busycheck/wiring call. 2718 */ 2719 vp = bp->b_vp; 2720 VOP_GETVOBJECT(vp, &obj); 2721 2722 crit_enter(); 2723 while (bp->b_xio.xio_npages < desiredpages) { 2724 vm_page_t m; 2725 vm_pindex_t pi; 2726 2727 pi = OFF_TO_IDX(bp->b_offset) + bp->b_xio.xio_npages; 2728 if ((m = vm_page_lookup(obj, pi)) == NULL) { 2729 /* 2730 * note: must allocate system pages 2731 * since blocking here could intefere 2732 * with paging I/O, no matter which 2733 * process we are. 2734 */ 2735 m = vm_page_alloc(obj, pi, VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM); 2736 if (m == NULL) { 2737 vm_wait(); 2738 vm_pageout_deficit += desiredpages - 2739 bp->b_xio.xio_npages; 2740 } else { 2741 vm_page_wire(m); 2742 vm_page_wakeup(m); 2743 bp->b_flags &= ~B_CACHE; 2744 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; 2745 ++bp->b_xio.xio_npages; 2746 } 2747 continue; 2748 } 2749 2750 /* 2751 * We found a page. If we have to sleep on it, 2752 * retry because it might have gotten freed out 2753 * from under us. 2754 * 2755 * We can only test PG_BUSY here. Blocking on 2756 * m->busy might lead to a deadlock: 2757 * 2758 * vm_fault->getpages->cluster_read->allocbuf 2759 * 2760 */ 2761 2762 if (vm_page_sleep_busy(m, FALSE, "pgtblk")) 2763 continue; 2764 2765 /* 2766 * We have a good page. Should we wakeup the 2767 * page daemon? 2768 */ 2769 if ((curthread != pagethread) && 2770 ((m->queue - m->pc) == PQ_CACHE) && 2771 ((vmstats.v_free_count + vmstats.v_cache_count) < 2772 (vmstats.v_free_min + vmstats.v_cache_min))) { 2773 pagedaemon_wakeup(); 2774 } 2775 vm_page_flag_clear(m, PG_ZERO); 2776 vm_page_wire(m); 2777 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; 2778 ++bp->b_xio.xio_npages; 2779 } 2780 crit_exit(); 2781 2782 /* 2783 * Step 2. We've loaded the pages into the buffer, 2784 * we have to figure out if we can still have B_CACHE 2785 * set. Note that B_CACHE is set according to the 2786 * byte-granular range ( bcount and size ), not the 2787 * aligned range ( newbsize ). 2788 * 2789 * The VM test is against m->valid, which is DEV_BSIZE 2790 * aligned. Needless to say, the validity of the data 2791 * needs to also be DEV_BSIZE aligned. Note that this 2792 * fails with NFS if the server or some other client 2793 * extends the file's EOF. If our buffer is resized, 2794 * B_CACHE may remain set! XXX 2795 */ 2796 2797 toff = bp->b_bcount; 2798 tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK); 2799 2800 while ((bp->b_flags & B_CACHE) && toff < size) { 2801 vm_pindex_t pi; 2802 2803 if (tinc > (size - toff)) 2804 tinc = size - toff; 2805 2806 pi = ((bp->b_offset & PAGE_MASK) + toff) >> 2807 PAGE_SHIFT; 2808 2809 vfs_buf_test_cache( 2810 bp, 2811 bp->b_offset, 2812 toff, 2813 tinc, 2814 bp->b_xio.xio_pages[pi] 2815 ); 2816 toff += tinc; 2817 tinc = PAGE_SIZE; 2818 } 2819 2820 /* 2821 * Step 3, fixup the KVM pmap. Remember that 2822 * bp->b_data is relative to bp->b_offset, but 2823 * bp->b_offset may be offset into the first page. 2824 */ 2825 2826 bp->b_data = (caddr_t) 2827 trunc_page((vm_offset_t)bp->b_data); 2828 pmap_qenter( 2829 (vm_offset_t)bp->b_data, 2830 bp->b_xio.xio_pages, 2831 bp->b_xio.xio_npages 2832 ); 2833 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 2834 (vm_offset_t)(bp->b_offset & PAGE_MASK)); 2835 } 2836 } 2837 if (newbsize < bp->b_bufsize) 2838 bufspacewakeup(); 2839 bp->b_bufsize = newbsize; /* actual buffer allocation */ 2840 bp->b_bcount = size; /* requested buffer size */ 2841 return 1; 2842 } 2843 2844 /* 2845 * biowait: 2846 * 2847 * Wait for buffer I/O completion, returning error status. The buffer 2848 * is left locked and B_DONE on return. B_EINTR is converted into an 2849 * EINTR error and cleared. 2850 */ 2851 int 2852 biowait(struct buf * bp) 2853 { 2854 crit_enter(); 2855 while ((bp->b_flags & B_DONE) == 0) { 2856 if (bp->b_flags & B_READ) 2857 tsleep(bp, 0, "biord", 0); 2858 else 2859 tsleep(bp, 0, "biowr", 0); 2860 } 2861 crit_exit(); 2862 if (bp->b_flags & B_EINTR) { 2863 bp->b_flags &= ~B_EINTR; 2864 return (EINTR); 2865 } 2866 if (bp->b_flags & B_ERROR) { 2867 return (bp->b_error ? bp->b_error : EIO); 2868 } else { 2869 return (0); 2870 } 2871 } 2872 2873 /* 2874 * biodone: 2875 * 2876 * Finish I/O on a buffer, optionally calling a completion function. 2877 * This is usually called from an interrupt so process blocking is 2878 * not allowed. 2879 * 2880 * biodone is also responsible for setting B_CACHE in a B_VMIO bp. 2881 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 2882 * assuming B_INVAL is clear. 2883 * 2884 * For the VMIO case, we set B_CACHE if the op was a read and no 2885 * read error occured, or if the op was a write. B_CACHE is never 2886 * set if the buffer is invalid or otherwise uncacheable. 2887 * 2888 * biodone does not mess with B_INVAL, allowing the I/O routine or the 2889 * initiator to leave B_INVAL set to brelse the buffer out of existance 2890 * in the biodone routine. 2891 * 2892 * b_dev is required to be reinitialized prior to the top level strategy 2893 * call in a device stack. To avoid improper reuse, biodone() sets 2894 * b_dev to NODEV. 2895 */ 2896 void 2897 biodone(struct buf *bp) 2898 { 2899 int error; 2900 2901 crit_enter(); 2902 2903 KASSERT(BUF_REFCNTNB(bp) > 0, ("biodone: bp %p not busy %d", bp, BUF_REFCNTNB(bp))); 2904 KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp)); 2905 biodone_t *biodone_func; 2906 2907 bp->b_flags |= B_DONE; 2908 bp->b_dev = NODEV; 2909 runningbufwakeup(bp); 2910 2911 if (bp->b_flags & B_FREEBUF) { 2912 brelse(bp); 2913 crit_exit(); 2914 return; 2915 } 2916 2917 if ((bp->b_flags & B_READ) == 0) { 2918 vwakeup(bp); 2919 } 2920 2921 /* call optional completion function if requested */ 2922 if (bp->b_iodone != NULL) { 2923 biodone_func = bp->b_iodone; 2924 bp->b_iodone = NULL; 2925 (*biodone_func) (bp); 2926 crit_exit(); 2927 return; 2928 } 2929 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 2930 (*bioops.io_complete)(bp); 2931 2932 if (bp->b_flags & B_VMIO) { 2933 int i; 2934 vm_ooffset_t foff; 2935 vm_page_t m; 2936 vm_object_t obj; 2937 int iosize; 2938 struct vnode *vp = bp->b_vp; 2939 2940 error = VOP_GETVOBJECT(vp, &obj); 2941 2942 #if defined(VFS_BIO_DEBUG) 2943 if (vp->v_holdcnt == 0) { 2944 panic("biodone: zero vnode hold count"); 2945 } 2946 2947 if (error) { 2948 panic("biodone: missing VM object"); 2949 } 2950 2951 if ((vp->v_flag & VOBJBUF) == 0) { 2952 panic("biodone: vnode is not setup for merged cache"); 2953 } 2954 #endif 2955 2956 foff = bp->b_offset; 2957 KASSERT(bp->b_offset != NOOFFSET, 2958 ("biodone: no buffer offset")); 2959 2960 if (error) { 2961 panic("biodone: no object"); 2962 } 2963 #if defined(VFS_BIO_DEBUG) 2964 if (obj->paging_in_progress < bp->b_xio.xio_npages) { 2965 printf("biodone: paging in progress(%d) < bp->b_xio.xio_npages(%d)\n", 2966 obj->paging_in_progress, bp->b_xio.xio_npages); 2967 } 2968 #endif 2969 2970 /* 2971 * Set B_CACHE if the op was a normal read and no error 2972 * occured. B_CACHE is set for writes in the b*write() 2973 * routines. 2974 */ 2975 iosize = bp->b_bcount - bp->b_resid; 2976 if ((bp->b_flags & (B_READ|B_FREEBUF|B_INVAL|B_NOCACHE|B_ERROR)) == B_READ) { 2977 bp->b_flags |= B_CACHE; 2978 } 2979 2980 for (i = 0; i < bp->b_xio.xio_npages; i++) { 2981 int bogusflag = 0; 2982 int resid; 2983 2984 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff; 2985 if (resid > iosize) 2986 resid = iosize; 2987 2988 /* 2989 * cleanup bogus pages, restoring the originals. Since 2990 * the originals should still be wired, we don't have 2991 * to worry about interrupt/freeing races destroying 2992 * the VM object association. 2993 */ 2994 m = bp->b_xio.xio_pages[i]; 2995 if (m == bogus_page) { 2996 bogusflag = 1; 2997 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 2998 if (m == NULL) 2999 panic("biodone: page disappeared"); 3000 bp->b_xio.xio_pages[i] = m; 3001 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3002 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 3003 } 3004 #if defined(VFS_BIO_DEBUG) 3005 if (OFF_TO_IDX(foff) != m->pindex) { 3006 printf( 3007 "biodone: foff(%lu)/m->pindex(%d) mismatch\n", 3008 (unsigned long)foff, m->pindex); 3009 } 3010 #endif 3011 3012 /* 3013 * In the write case, the valid and clean bits are 3014 * already changed correctly ( see bdwrite() ), so we 3015 * only need to do this here in the read case. 3016 */ 3017 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 3018 vfs_page_set_valid(bp, foff, i, m); 3019 } 3020 vm_page_flag_clear(m, PG_ZERO); 3021 3022 /* 3023 * when debugging new filesystems or buffer I/O methods, this 3024 * is the most common error that pops up. if you see this, you 3025 * have not set the page busy flag correctly!!! 3026 */ 3027 if (m->busy == 0) { 3028 printf("biodone: page busy < 0, " 3029 "pindex: %d, foff: 0x(%x,%x), " 3030 "resid: %d, index: %d\n", 3031 (int) m->pindex, (int)(foff >> 32), 3032 (int) foff & 0xffffffff, resid, i); 3033 if (!vn_isdisk(vp, NULL)) 3034 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 3035 bp->b_vp->v_mount->mnt_stat.f_iosize, 3036 (int) bp->b_lblkno, 3037 bp->b_flags, bp->b_xio.xio_npages); 3038 else 3039 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 3040 (int) bp->b_lblkno, 3041 bp->b_flags, bp->b_xio.xio_npages); 3042 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 3043 m->valid, m->dirty, m->wire_count); 3044 panic("biodone: page busy < 0"); 3045 } 3046 vm_page_io_finish(m); 3047 vm_object_pip_subtract(obj, 1); 3048 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3049 iosize -= resid; 3050 } 3051 if (obj) 3052 vm_object_pip_wakeupn(obj, 0); 3053 } 3054 3055 /* 3056 * For asynchronous completions, release the buffer now. The brelse 3057 * will do a wakeup there if necessary - so no need to do a wakeup 3058 * here in the async case. The sync case always needs to do a wakeup. 3059 */ 3060 3061 if (bp->b_flags & B_ASYNC) { 3062 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 3063 brelse(bp); 3064 else 3065 bqrelse(bp); 3066 } else { 3067 wakeup(bp); 3068 } 3069 crit_exit(); 3070 } 3071 3072 /* 3073 * vfs_unbusy_pages: 3074 * 3075 * This routine is called in lieu of iodone in the case of 3076 * incomplete I/O. This keeps the busy status for pages 3077 * consistant. 3078 */ 3079 void 3080 vfs_unbusy_pages(struct buf *bp) 3081 { 3082 int i; 3083 3084 runningbufwakeup(bp); 3085 if (bp->b_flags & B_VMIO) { 3086 struct vnode *vp = bp->b_vp; 3087 vm_object_t obj; 3088 3089 VOP_GETVOBJECT(vp, &obj); 3090 3091 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3092 vm_page_t m = bp->b_xio.xio_pages[i]; 3093 3094 /* 3095 * When restoring bogus changes the original pages 3096 * should still be wired, so we are in no danger of 3097 * losing the object association and do not need 3098 * critical section protection particularly. 3099 */ 3100 if (m == bogus_page) { 3101 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 3102 if (!m) { 3103 panic("vfs_unbusy_pages: page missing"); 3104 } 3105 bp->b_xio.xio_pages[i] = m; 3106 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3107 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 3108 } 3109 vm_object_pip_subtract(obj, 1); 3110 vm_page_flag_clear(m, PG_ZERO); 3111 vm_page_io_finish(m); 3112 } 3113 vm_object_pip_wakeupn(obj, 0); 3114 } 3115 } 3116 3117 /* 3118 * vfs_page_set_valid: 3119 * 3120 * Set the valid bits in a page based on the supplied offset. The 3121 * range is restricted to the buffer's size. 3122 * 3123 * This routine is typically called after a read completes. 3124 */ 3125 static void 3126 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 3127 { 3128 vm_ooffset_t soff, eoff; 3129 3130 /* 3131 * Start and end offsets in buffer. eoff - soff may not cross a 3132 * page boundry or cross the end of the buffer. The end of the 3133 * buffer, in this case, is our file EOF, not the allocation size 3134 * of the buffer. 3135 */ 3136 soff = off; 3137 eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3138 if (eoff > bp->b_offset + bp->b_bcount) 3139 eoff = bp->b_offset + bp->b_bcount; 3140 3141 /* 3142 * Set valid range. This is typically the entire buffer and thus the 3143 * entire page. 3144 */ 3145 if (eoff > soff) { 3146 vm_page_set_validclean( 3147 m, 3148 (vm_offset_t) (soff & PAGE_MASK), 3149 (vm_offset_t) (eoff - soff) 3150 ); 3151 } 3152 } 3153 3154 /* 3155 * vfs_busy_pages: 3156 * 3157 * This routine is called before a device strategy routine. 3158 * It is used to tell the VM system that paging I/O is in 3159 * progress, and treat the pages associated with the buffer 3160 * almost as being PG_BUSY. Also the object 'paging_in_progress' 3161 * flag is handled to make sure that the object doesn't become 3162 * inconsistant. 3163 * 3164 * Since I/O has not been initiated yet, certain buffer flags 3165 * such as B_ERROR or B_INVAL may be in an inconsistant state 3166 * and should be ignored. 3167 */ 3168 void 3169 vfs_busy_pages(struct buf *bp, int clear_modify) 3170 { 3171 int i, bogus; 3172 struct proc *p = curthread->td_proc; 3173 3174 if (bp->b_flags & B_VMIO) { 3175 struct vnode *vp = bp->b_vp; 3176 vm_object_t obj; 3177 vm_ooffset_t foff; 3178 3179 VOP_GETVOBJECT(vp, &obj); 3180 foff = bp->b_offset; 3181 KASSERT(bp->b_offset != NOOFFSET, 3182 ("vfs_busy_pages: no buffer offset")); 3183 vfs_setdirty(bp); 3184 3185 retry: 3186 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3187 vm_page_t m = bp->b_xio.xio_pages[i]; 3188 if (vm_page_sleep_busy(m, FALSE, "vbpage")) 3189 goto retry; 3190 } 3191 3192 bogus = 0; 3193 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3194 vm_page_t m = bp->b_xio.xio_pages[i]; 3195 3196 vm_page_flag_clear(m, PG_ZERO); 3197 if ((bp->b_flags & B_CLUSTER) == 0) { 3198 vm_object_pip_add(obj, 1); 3199 vm_page_io_start(m); 3200 } 3201 3202 /* 3203 * When readying a buffer for a read ( i.e 3204 * clear_modify == 0 ), it is important to do 3205 * bogus_page replacement for valid pages in 3206 * partially instantiated buffers. Partially 3207 * instantiated buffers can, in turn, occur when 3208 * reconstituting a buffer from its VM backing store 3209 * base. We only have to do this if B_CACHE is 3210 * clear ( which causes the I/O to occur in the 3211 * first place ). The replacement prevents the read 3212 * I/O from overwriting potentially dirty VM-backed 3213 * pages. XXX bogus page replacement is, uh, bogus. 3214 * It may not work properly with small-block devices. 3215 * We need to find a better way. 3216 */ 3217 3218 vm_page_protect(m, VM_PROT_NONE); 3219 if (clear_modify) 3220 vfs_page_set_valid(bp, foff, i, m); 3221 else if (m->valid == VM_PAGE_BITS_ALL && 3222 (bp->b_flags & B_CACHE) == 0) { 3223 bp->b_xio.xio_pages[i] = bogus_page; 3224 bogus++; 3225 } 3226 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3227 } 3228 if (bogus) 3229 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3230 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 3231 } 3232 3233 /* 3234 * This is the easiest place to put the process accounting for the I/O 3235 * for now. 3236 */ 3237 if (p != NULL) { 3238 if (bp->b_flags & B_READ) 3239 p->p_stats->p_ru.ru_inblock++; 3240 else 3241 p->p_stats->p_ru.ru_oublock++; 3242 } 3243 } 3244 3245 /* 3246 * vfs_clean_pages: 3247 * 3248 * Tell the VM system that the pages associated with this buffer 3249 * are clean. This is used for delayed writes where the data is 3250 * going to go to disk eventually without additional VM intevention. 3251 * 3252 * Note that while we only really need to clean through to b_bcount, we 3253 * just go ahead and clean through to b_bufsize. 3254 */ 3255 static void 3256 vfs_clean_pages(struct buf *bp) 3257 { 3258 int i; 3259 3260 if (bp->b_flags & B_VMIO) { 3261 vm_ooffset_t foff; 3262 3263 foff = bp->b_offset; 3264 KASSERT(bp->b_offset != NOOFFSET, 3265 ("vfs_clean_pages: no buffer offset")); 3266 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3267 vm_page_t m = bp->b_xio.xio_pages[i]; 3268 vm_ooffset_t noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3269 vm_ooffset_t eoff = noff; 3270 3271 if (eoff > bp->b_offset + bp->b_bufsize) 3272 eoff = bp->b_offset + bp->b_bufsize; 3273 vfs_page_set_valid(bp, foff, i, m); 3274 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */ 3275 foff = noff; 3276 } 3277 } 3278 } 3279 3280 /* 3281 * vfs_bio_set_validclean: 3282 * 3283 * Set the range within the buffer to valid and clean. The range is 3284 * relative to the beginning of the buffer, b_offset. Note that b_offset 3285 * itself may be offset from the beginning of the first page. 3286 */ 3287 3288 void 3289 vfs_bio_set_validclean(struct buf *bp, int base, int size) 3290 { 3291 if (bp->b_flags & B_VMIO) { 3292 int i; 3293 int n; 3294 3295 /* 3296 * Fixup base to be relative to beginning of first page. 3297 * Set initial n to be the maximum number of bytes in the 3298 * first page that can be validated. 3299 */ 3300 3301 base += (bp->b_offset & PAGE_MASK); 3302 n = PAGE_SIZE - (base & PAGE_MASK); 3303 3304 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_xio.xio_npages; ++i) { 3305 vm_page_t m = bp->b_xio.xio_pages[i]; 3306 3307 if (n > size) 3308 n = size; 3309 3310 vm_page_set_validclean(m, base & PAGE_MASK, n); 3311 base += n; 3312 size -= n; 3313 n = PAGE_SIZE; 3314 } 3315 } 3316 } 3317 3318 /* 3319 * vfs_bio_clrbuf: 3320 * 3321 * Clear a buffer. This routine essentially fakes an I/O, so we need 3322 * to clear B_ERROR and B_INVAL. 3323 * 3324 * Note that while we only theoretically need to clear through b_bcount, 3325 * we go ahead and clear through b_bufsize. 3326 */ 3327 3328 void 3329 vfs_bio_clrbuf(struct buf *bp) 3330 { 3331 int i, mask = 0; 3332 caddr_t sa, ea; 3333 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { 3334 bp->b_flags &= ~(B_INVAL|B_ERROR); 3335 if ((bp->b_xio.xio_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 3336 (bp->b_offset & PAGE_MASK) == 0) { 3337 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 3338 if ((bp->b_xio.xio_pages[0]->valid & mask) == mask) { 3339 bp->b_resid = 0; 3340 return; 3341 } 3342 if (((bp->b_xio.xio_pages[0]->flags & PG_ZERO) == 0) && 3343 ((bp->b_xio.xio_pages[0]->valid & mask) == 0)) { 3344 bzero(bp->b_data, bp->b_bufsize); 3345 bp->b_xio.xio_pages[0]->valid |= mask; 3346 bp->b_resid = 0; 3347 return; 3348 } 3349 } 3350 ea = sa = bp->b_data; 3351 for(i=0;i<bp->b_xio.xio_npages;i++,sa=ea) { 3352 int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE; 3353 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); 3354 ea = (caddr_t)(vm_offset_t)ulmin( 3355 (u_long)(vm_offset_t)ea, 3356 (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize); 3357 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 3358 if ((bp->b_xio.xio_pages[i]->valid & mask) == mask) 3359 continue; 3360 if ((bp->b_xio.xio_pages[i]->valid & mask) == 0) { 3361 if ((bp->b_xio.xio_pages[i]->flags & PG_ZERO) == 0) { 3362 bzero(sa, ea - sa); 3363 } 3364 } else { 3365 for (; sa < ea; sa += DEV_BSIZE, j++) { 3366 if (((bp->b_xio.xio_pages[i]->flags & PG_ZERO) == 0) && 3367 (bp->b_xio.xio_pages[i]->valid & (1<<j)) == 0) 3368 bzero(sa, DEV_BSIZE); 3369 } 3370 } 3371 bp->b_xio.xio_pages[i]->valid |= mask; 3372 vm_page_flag_clear(bp->b_xio.xio_pages[i], PG_ZERO); 3373 } 3374 bp->b_resid = 0; 3375 } else { 3376 clrbuf(bp); 3377 } 3378 } 3379 3380 /* 3381 * vm_hold_load_pages: 3382 * 3383 * Load pages into the buffer's address space. The pages are 3384 * allocated from the kernel object in order to reduce interference 3385 * with the any VM paging I/O activity. The range of loaded 3386 * pages will be wired. 3387 * 3388 * If a page cannot be allocated, the 'pagedaemon' is woken up to 3389 * retrieve the full range (to - from) of pages. 3390 * 3391 */ 3392 void 3393 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) 3394 { 3395 vm_offset_t pg; 3396 vm_page_t p; 3397 int index; 3398 3399 to = round_page(to); 3400 from = round_page(from); 3401 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 3402 3403 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 3404 3405 tryagain: 3406 3407 /* 3408 * Note: must allocate system pages since blocking here 3409 * could intefere with paging I/O, no matter which 3410 * process we are. 3411 */ 3412 p = vm_page_alloc(kernel_object, 3413 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 3414 VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM); 3415 if (!p) { 3416 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 3417 vm_wait(); 3418 goto tryagain; 3419 } 3420 vm_page_wire(p); 3421 p->valid = VM_PAGE_BITS_ALL; 3422 vm_page_flag_clear(p, PG_ZERO); 3423 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 3424 bp->b_xio.xio_pages[index] = p; 3425 vm_page_wakeup(p); 3426 } 3427 bp->b_xio.xio_npages = index; 3428 } 3429 3430 /* 3431 * vm_hold_free_pages: 3432 * 3433 * Return pages associated with the buffer back to the VM system. 3434 * 3435 * The range of pages underlying the buffer's address space will 3436 * be unmapped and un-wired. 3437 */ 3438 void 3439 vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) 3440 { 3441 vm_offset_t pg; 3442 vm_page_t p; 3443 int index, newnpages; 3444 3445 from = round_page(from); 3446 to = round_page(to); 3447 newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 3448 3449 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 3450 p = bp->b_xio.xio_pages[index]; 3451 if (p && (index < bp->b_xio.xio_npages)) { 3452 if (p->busy) { 3453 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 3454 bp->b_blkno, bp->b_lblkno); 3455 } 3456 bp->b_xio.xio_pages[index] = NULL; 3457 pmap_kremove(pg); 3458 vm_page_busy(p); 3459 vm_page_unwire(p, 0); 3460 vm_page_free(p); 3461 } 3462 } 3463 bp->b_xio.xio_npages = newnpages; 3464 } 3465 3466 /* 3467 * vmapbuf: 3468 * 3469 * Map an IO request into kernel virtual address space. 3470 * 3471 * All requests are (re)mapped into kernel VA space. 3472 * Notice that we use b_bufsize for the size of the buffer 3473 * to be mapped. b_bcount might be modified by the driver. 3474 */ 3475 int 3476 vmapbuf(struct buf *bp) 3477 { 3478 caddr_t addr, v, kva; 3479 vm_paddr_t pa; 3480 int pidx; 3481 int i; 3482 struct vm_page *m; 3483 3484 if ((bp->b_flags & B_PHYS) == 0) 3485 panic("vmapbuf"); 3486 if (bp->b_bufsize < 0) 3487 return (-1); 3488 for (v = bp->b_saveaddr, 3489 addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), 3490 pidx = 0; 3491 addr < bp->b_data + bp->b_bufsize; 3492 addr += PAGE_SIZE, v += PAGE_SIZE, pidx++) { 3493 /* 3494 * Do the vm_fault if needed; do the copy-on-write thing 3495 * when reading stuff off device into memory. 3496 */ 3497 retry: 3498 i = vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data, 3499 (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ); 3500 if (i < 0) { 3501 for (i = 0; i < pidx; ++i) { 3502 vm_page_unhold(bp->b_xio.xio_pages[i]); 3503 bp->b_xio.xio_pages[i] = NULL; 3504 } 3505 return(-1); 3506 } 3507 3508 /* 3509 * WARNING! If sparc support is MFCd in the future this will 3510 * have to be changed from pmap_kextract() to pmap_extract() 3511 * ala -current. 3512 */ 3513 #ifdef __sparc64__ 3514 #error "If MFCing sparc support use pmap_extract" 3515 #endif 3516 pa = pmap_kextract((vm_offset_t)addr); 3517 if (pa == 0) { 3518 printf("vmapbuf: warning, race against user address during I/O"); 3519 goto retry; 3520 } 3521 m = PHYS_TO_VM_PAGE(pa); 3522 vm_page_hold(m); 3523 bp->b_xio.xio_pages[pidx] = m; 3524 } 3525 if (pidx > btoc(MAXPHYS)) 3526 panic("vmapbuf: mapped more than MAXPHYS"); 3527 pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_xio.xio_pages, pidx); 3528 3529 kva = bp->b_saveaddr; 3530 bp->b_xio.xio_npages = pidx; 3531 bp->b_saveaddr = bp->b_data; 3532 bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK); 3533 return(0); 3534 } 3535 3536 /* 3537 * vunmapbuf: 3538 * 3539 * Free the io map PTEs associated with this IO operation. 3540 * We also invalidate the TLB entries and restore the original b_addr. 3541 */ 3542 void 3543 vunmapbuf(struct buf *bp) 3544 { 3545 int pidx; 3546 int npages; 3547 vm_page_t *m; 3548 3549 if ((bp->b_flags & B_PHYS) == 0) 3550 panic("vunmapbuf"); 3551 3552 npages = bp->b_xio.xio_npages; 3553 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), 3554 npages); 3555 m = bp->b_xio.xio_pages; 3556 for (pidx = 0; pidx < npages; pidx++) 3557 vm_page_unhold(*m++); 3558 3559 bp->b_data = bp->b_saveaddr; 3560 } 3561 3562 /* 3563 * print out statistics from the current status of the buffer pool 3564 * this can be toggeled by the system control option debug.syncprt 3565 */ 3566 #ifdef DEBUG 3567 void 3568 vfs_bufstats(void) 3569 { 3570 int i, j, count; 3571 struct buf *bp; 3572 struct bqueues *dp; 3573 int counts[(MAXBSIZE / PAGE_SIZE) + 1]; 3574 static char *bname[3] = { "LOCKED", "LRU", "AGE" }; 3575 3576 for (dp = bufqueues, i = 0; dp < &bufqueues[3]; dp++, i++) { 3577 count = 0; 3578 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 3579 counts[j] = 0; 3580 crit_enter(); 3581 TAILQ_FOREACH(bp, dp, b_freelist) { 3582 counts[bp->b_bufsize/PAGE_SIZE]++; 3583 count++; 3584 } 3585 crit_exit(); 3586 printf("%s: total-%d", bname[i], count); 3587 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 3588 if (counts[j] != 0) 3589 printf(", %d-%d", j * PAGE_SIZE, counts[j]); 3590 printf("\n"); 3591 } 3592 } 3593 #endif 3594 3595 #include "opt_ddb.h" 3596 #ifdef DDB 3597 #include <ddb/ddb.h> 3598 3599 DB_SHOW_COMMAND(buffer, db_show_buffer) 3600 { 3601 /* get args */ 3602 struct buf *bp = (struct buf *)addr; 3603 3604 if (!have_addr) { 3605 db_printf("usage: show buffer <addr>\n"); 3606 return; 3607 } 3608 3609 db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS); 3610 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 3611 "b_resid = %ld\nb_dev = (%d,%d), b_data = %p, " 3612 "b_blkno = %d, b_pblkno = %d\n", 3613 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 3614 major(bp->b_dev), minor(bp->b_dev), 3615 bp->b_data, bp->b_blkno, bp->b_pblkno); 3616 if (bp->b_xio.xio_npages) { 3617 int i; 3618 db_printf("b_xio.xio_npages = %d, pages(OBJ, IDX, PA): ", 3619 bp->b_xio.xio_npages); 3620 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3621 vm_page_t m; 3622 m = bp->b_xio.xio_pages[i]; 3623 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 3624 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 3625 if ((i + 1) < bp->b_xio.xio_npages) 3626 db_printf(","); 3627 } 3628 db_printf("\n"); 3629 } 3630 } 3631 #endif /* DDB */ 3632