1 /* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $FreeBSD: src/sys/kern/vfs_bio.c,v 1.242.2.20 2003/05/28 18:38:10 alc Exp $ 15 */ 16 17 /* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 * 27 * see man buf(9) for more info. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/buf.h> 33 #include <sys/conf.h> 34 #include <sys/devicestat.h> 35 #include <sys/eventhandler.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mount.h> 39 #include <sys/kernel.h> 40 #include <sys/kthread.h> 41 #include <sys/proc.h> 42 #include <sys/reboot.h> 43 #include <sys/resourcevar.h> 44 #include <sys/sysctl.h> 45 #include <sys/vmmeter.h> 46 #include <sys/vnode.h> 47 #include <sys/dsched.h> 48 #include <vm/vm.h> 49 #include <vm/vm_param.h> 50 #include <vm/vm_kern.h> 51 #include <vm/vm_pageout.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_object.h> 54 #include <vm/vm_extern.h> 55 #include <vm/vm_map.h> 56 #include <vm/vm_pager.h> 57 #include <vm/swap_pager.h> 58 59 #include <sys/buf2.h> 60 #include <sys/thread2.h> 61 #include <sys/spinlock2.h> 62 #include <sys/mplock2.h> 63 #include <vm/vm_page2.h> 64 65 #include "opt_ddb.h" 66 #ifdef DDB 67 #include <ddb/ddb.h> 68 #endif 69 70 /* 71 * Buffer queues. 72 */ 73 enum bufq_type { 74 BQUEUE_NONE, /* not on any queue */ 75 BQUEUE_LOCKED, /* locked buffers */ 76 BQUEUE_CLEAN, /* non-B_DELWRI buffers */ 77 BQUEUE_DIRTY, /* B_DELWRI buffers */ 78 BQUEUE_DIRTY_HW, /* B_DELWRI buffers - heavy weight */ 79 BQUEUE_EMPTYKVA, /* empty buffer headers with KVA assignment */ 80 BQUEUE_EMPTY, /* empty buffer headers */ 81 82 BUFFER_QUEUES /* number of buffer queues */ 83 }; 84 85 typedef enum bufq_type bufq_type_t; 86 87 #define BD_WAKE_SIZE 16384 88 #define BD_WAKE_MASK (BD_WAKE_SIZE - 1) 89 90 TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES]; 91 static struct spinlock bufqspin = SPINLOCK_INITIALIZER(&bufqspin); 92 static struct spinlock bufcspin = SPINLOCK_INITIALIZER(&bufcspin); 93 94 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 95 96 struct buf *buf; /* buffer header pool */ 97 98 static void vfs_clean_pages(struct buf *bp); 99 static void vfs_clean_one_page(struct buf *bp, int pageno, vm_page_t m); 100 #if 0 101 static void vfs_dirty_one_page(struct buf *bp, int pageno, vm_page_t m); 102 #endif 103 static void vfs_vmio_release(struct buf *bp); 104 static int flushbufqueues(struct buf *marker, bufq_type_t q); 105 static vm_page_t bio_page_alloc(struct buf *bp, vm_object_t obj, 106 vm_pindex_t pg, int deficit); 107 108 static void bd_signal(long totalspace); 109 static void buf_daemon(void); 110 static void buf_daemon_hw(void); 111 112 /* 113 * bogus page -- for I/O to/from partially complete buffers 114 * this is a temporary solution to the problem, but it is not 115 * really that bad. it would be better to split the buffer 116 * for input in the case of buffers partially already in memory, 117 * but the code is intricate enough already. 118 */ 119 vm_page_t bogus_page; 120 121 /* 122 * These are all static, but make the ones we export globals so we do 123 * not need to use compiler magic. 124 */ 125 long bufspace; /* locked by buffer_map */ 126 long maxbufspace; 127 static long bufmallocspace; /* atomic ops */ 128 long maxbufmallocspace, lobufspace, hibufspace; 129 static long bufreusecnt, bufdefragcnt, buffreekvacnt; 130 static long lorunningspace; 131 static long hirunningspace; 132 static long runningbufreq; /* locked by bufcspin */ 133 static long dirtykvaspace; /* locked by bufcspin */ 134 static long dirtybufspace; /* locked by bufcspin */ 135 static long dirtybufcount; /* locked by bufcspin */ 136 static long dirtybufspacehw; /* locked by bufcspin */ 137 static long dirtybufcounthw; /* locked by bufcspin */ 138 static long runningbufspace; /* locked by bufcspin */ 139 static long runningbufcount; /* locked by bufcspin */ 140 long lodirtybufspace; 141 long hidirtybufspace; 142 static int getnewbufcalls; 143 static int getnewbufrestarts; 144 static int recoverbufcalls; 145 static int needsbuffer; /* locked by bufcspin */ 146 static int bd_request; /* locked by bufcspin */ 147 static int bd_request_hw; /* locked by bufcspin */ 148 static u_int bd_wake_ary[BD_WAKE_SIZE]; 149 static u_int bd_wake_index; 150 static u_int vm_cycle_point = 40; /* 23-36 will migrate more act->inact */ 151 static int debug_commit; 152 153 static struct thread *bufdaemon_td; 154 static struct thread *bufdaemonhw_td; 155 static u_int lowmempgallocs; 156 static u_int lowmempgfails; 157 158 /* 159 * Sysctls for operational control of the buffer cache. 160 */ 161 SYSCTL_LONG(_vfs, OID_AUTO, lodirtybufspace, CTLFLAG_RW, &lodirtybufspace, 0, 162 "Number of dirty buffers to flush before bufdaemon becomes inactive"); 163 SYSCTL_LONG(_vfs, OID_AUTO, hidirtybufspace, CTLFLAG_RW, &hidirtybufspace, 0, 164 "High watermark used to trigger explicit flushing of dirty buffers"); 165 SYSCTL_LONG(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0, 166 "Minimum amount of buffer space required for active I/O"); 167 SYSCTL_LONG(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0, 168 "Maximum amount of buffer space to usable for active I/O"); 169 SYSCTL_UINT(_vfs, OID_AUTO, lowmempgallocs, CTLFLAG_RW, &lowmempgallocs, 0, 170 "Page allocations done during periods of very low free memory"); 171 SYSCTL_UINT(_vfs, OID_AUTO, lowmempgfails, CTLFLAG_RW, &lowmempgfails, 0, 172 "Page allocations which failed during periods of very low free memory"); 173 SYSCTL_UINT(_vfs, OID_AUTO, vm_cycle_point, CTLFLAG_RW, &vm_cycle_point, 0, 174 "Recycle pages to active or inactive queue transition pt 0-64"); 175 /* 176 * Sysctls determining current state of the buffer cache. 177 */ 178 SYSCTL_LONG(_vfs, OID_AUTO, nbuf, CTLFLAG_RD, &nbuf, 0, 179 "Total number of buffers in buffer cache"); 180 SYSCTL_LONG(_vfs, OID_AUTO, dirtykvaspace, CTLFLAG_RD, &dirtykvaspace, 0, 181 "KVA reserved by dirty buffers (all)"); 182 SYSCTL_LONG(_vfs, OID_AUTO, dirtybufspace, CTLFLAG_RD, &dirtybufspace, 0, 183 "Pending bytes of dirty buffers (all)"); 184 SYSCTL_LONG(_vfs, OID_AUTO, dirtybufspacehw, CTLFLAG_RD, &dirtybufspacehw, 0, 185 "Pending bytes of dirty buffers (heavy weight)"); 186 SYSCTL_LONG(_vfs, OID_AUTO, dirtybufcount, CTLFLAG_RD, &dirtybufcount, 0, 187 "Pending number of dirty buffers"); 188 SYSCTL_LONG(_vfs, OID_AUTO, dirtybufcounthw, CTLFLAG_RD, &dirtybufcounthw, 0, 189 "Pending number of dirty buffers (heavy weight)"); 190 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0, 191 "I/O bytes currently in progress due to asynchronous writes"); 192 SYSCTL_LONG(_vfs, OID_AUTO, runningbufcount, CTLFLAG_RD, &runningbufcount, 0, 193 "I/O buffers currently in progress due to asynchronous writes"); 194 SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0, 195 "Hard limit on maximum amount of memory usable for buffer space"); 196 SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0, 197 "Soft limit on maximum amount of memory usable for buffer space"); 198 SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0, 199 "Minimum amount of memory to reserve for system buffer space"); 200 SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0, 201 "Amount of memory available for buffers"); 202 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RD, &maxbufmallocspace, 203 0, "Maximum amount of memory reserved for buffers using malloc"); 204 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0, 205 "Amount of memory left for buffers using malloc-scheme"); 206 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD, &getnewbufcalls, 0, 207 "New buffer header acquisition requests"); 208 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RD, &getnewbufrestarts, 209 0, "New buffer header acquisition restarts"); 210 SYSCTL_INT(_vfs, OID_AUTO, recoverbufcalls, CTLFLAG_RD, &recoverbufcalls, 0, 211 "Recover VM space in an emergency"); 212 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RD, &bufdefragcnt, 0, 213 "Buffer acquisition restarts due to fragmented buffer map"); 214 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RD, &buffreekvacnt, 0, 215 "Amount of time KVA space was deallocated in an arbitrary buffer"); 216 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RD, &bufreusecnt, 0, 217 "Amount of time buffer re-use operations were successful"); 218 SYSCTL_INT(_vfs, OID_AUTO, debug_commit, CTLFLAG_RW, &debug_commit, 0, ""); 219 SYSCTL_INT(_debug_sizeof, OID_AUTO, buf, CTLFLAG_RD, 0, sizeof(struct buf), 220 "sizeof(struct buf)"); 221 222 char *buf_wmesg = BUF_WMESG; 223 224 #define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 225 #define VFS_BIO_NEED_UNUSED02 0x02 226 #define VFS_BIO_NEED_UNUSED04 0x04 227 #define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 228 229 /* 230 * bufspacewakeup: 231 * 232 * Called when buffer space is potentially available for recovery. 233 * getnewbuf() will block on this flag when it is unable to free 234 * sufficient buffer space. Buffer space becomes recoverable when 235 * bp's get placed back in the queues. 236 */ 237 static __inline void 238 bufspacewakeup(void) 239 { 240 /* 241 * If someone is waiting for BUF space, wake them up. Even 242 * though we haven't freed the kva space yet, the waiting 243 * process will be able to now. 244 */ 245 spin_lock(&bufcspin); 246 if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { 247 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; 248 spin_unlock(&bufcspin); 249 wakeup(&needsbuffer); 250 } else { 251 spin_unlock(&bufcspin); 252 } 253 } 254 255 /* 256 * runningbufwakeup: 257 * 258 * Accounting for I/O in progress. 259 * 260 */ 261 static __inline void 262 runningbufwakeup(struct buf *bp) 263 { 264 long totalspace; 265 long limit; 266 267 if ((totalspace = bp->b_runningbufspace) != 0) { 268 spin_lock(&bufcspin); 269 runningbufspace -= totalspace; 270 --runningbufcount; 271 bp->b_runningbufspace = 0; 272 273 /* 274 * see waitrunningbufspace() for limit test. 275 */ 276 limit = hirunningspace * 3 / 6; 277 if (runningbufreq && runningbufspace <= limit) { 278 runningbufreq = 0; 279 spin_unlock(&bufcspin); 280 wakeup(&runningbufreq); 281 } else { 282 spin_unlock(&bufcspin); 283 } 284 bd_signal(totalspace); 285 } 286 } 287 288 /* 289 * bufcountwakeup: 290 * 291 * Called when a buffer has been added to one of the free queues to 292 * account for the buffer and to wakeup anyone waiting for free buffers. 293 * This typically occurs when large amounts of metadata are being handled 294 * by the buffer cache ( else buffer space runs out first, usually ). 295 * 296 * MPSAFE 297 */ 298 static __inline void 299 bufcountwakeup(void) 300 { 301 spin_lock(&bufcspin); 302 if (needsbuffer) { 303 needsbuffer &= ~VFS_BIO_NEED_ANY; 304 spin_unlock(&bufcspin); 305 wakeup(&needsbuffer); 306 } else { 307 spin_unlock(&bufcspin); 308 } 309 } 310 311 /* 312 * waitrunningbufspace() 313 * 314 * If runningbufspace exceeds 4/6 hirunningspace we block until 315 * runningbufspace drops to 3/6 hirunningspace. We also block if another 316 * thread blocked here in order to be fair, even if runningbufspace 317 * is now lower than the limit. 318 * 319 * The caller may be using this function to block in a tight loop, we 320 * must block while runningbufspace is greater than at least 321 * hirunningspace * 3 / 6. 322 */ 323 void 324 waitrunningbufspace(void) 325 { 326 long limit = hirunningspace * 4 / 6; 327 328 if (runningbufspace > limit || runningbufreq) { 329 spin_lock(&bufcspin); 330 while (runningbufspace > limit || runningbufreq) { 331 runningbufreq = 1; 332 ssleep(&runningbufreq, &bufcspin, 0, "wdrn1", 0); 333 } 334 spin_unlock(&bufcspin); 335 } 336 } 337 338 /* 339 * buf_dirty_count_severe: 340 * 341 * Return true if we have too many dirty buffers. 342 */ 343 int 344 buf_dirty_count_severe(void) 345 { 346 return (runningbufspace + dirtykvaspace >= hidirtybufspace || 347 dirtybufcount >= nbuf / 2); 348 } 349 350 /* 351 * Return true if the amount of running I/O is severe and BIOQ should 352 * start bursting. 353 */ 354 int 355 buf_runningbufspace_severe(void) 356 { 357 return (runningbufspace >= hirunningspace * 4 / 6); 358 } 359 360 /* 361 * vfs_buf_test_cache: 362 * 363 * Called when a buffer is extended. This function clears the B_CACHE 364 * bit if the newly extended portion of the buffer does not contain 365 * valid data. 366 * 367 * NOTE! Dirty VM pages are not processed into dirty (B_DELWRI) buffer 368 * cache buffers. The VM pages remain dirty, as someone had mmap()'d 369 * them while a clean buffer was present. 370 */ 371 static __inline__ 372 void 373 vfs_buf_test_cache(struct buf *bp, 374 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 375 vm_page_t m) 376 { 377 if (bp->b_flags & B_CACHE) { 378 int base = (foff + off) & PAGE_MASK; 379 if (vm_page_is_valid(m, base, size) == 0) 380 bp->b_flags &= ~B_CACHE; 381 } 382 } 383 384 /* 385 * bd_speedup() 386 * 387 * Spank the buf_daemon[_hw] if the total dirty buffer space exceeds the 388 * low water mark. 389 * 390 * MPSAFE 391 */ 392 static __inline__ 393 void 394 bd_speedup(void) 395 { 396 if (dirtykvaspace < lodirtybufspace && dirtybufcount < nbuf / 2) 397 return; 398 399 if (bd_request == 0 && 400 (dirtykvaspace > lodirtybufspace / 2 || 401 dirtybufcount - dirtybufcounthw >= nbuf / 2)) { 402 spin_lock(&bufcspin); 403 bd_request = 1; 404 spin_unlock(&bufcspin); 405 wakeup(&bd_request); 406 } 407 if (bd_request_hw == 0 && 408 (dirtykvaspace > lodirtybufspace / 2 || 409 dirtybufcounthw >= nbuf / 2)) { 410 spin_lock(&bufcspin); 411 bd_request_hw = 1; 412 spin_unlock(&bufcspin); 413 wakeup(&bd_request_hw); 414 } 415 } 416 417 /* 418 * bd_heatup() 419 * 420 * Get the buf_daemon heated up when the number of running and dirty 421 * buffers exceeds the mid-point. 422 * 423 * Return the total number of dirty bytes past the second mid point 424 * as a measure of how much excess dirty data there is in the system. 425 * 426 * MPSAFE 427 */ 428 long 429 bd_heatup(void) 430 { 431 long mid1; 432 long mid2; 433 long totalspace; 434 435 mid1 = lodirtybufspace + (hidirtybufspace - lodirtybufspace) / 2; 436 437 totalspace = runningbufspace + dirtykvaspace; 438 if (totalspace >= mid1 || dirtybufcount >= nbuf / 2) { 439 bd_speedup(); 440 mid2 = mid1 + (hidirtybufspace - mid1) / 2; 441 if (totalspace >= mid2) 442 return(totalspace - mid2); 443 } 444 return(0); 445 } 446 447 /* 448 * bd_wait() 449 * 450 * Wait for the buffer cache to flush (totalspace) bytes worth of 451 * buffers, then return. 452 * 453 * Regardless this function blocks while the number of dirty buffers 454 * exceeds hidirtybufspace. 455 * 456 * MPSAFE 457 */ 458 void 459 bd_wait(long totalspace) 460 { 461 u_int i; 462 int count; 463 464 if (curthread == bufdaemonhw_td || curthread == bufdaemon_td) 465 return; 466 467 while (totalspace > 0) { 468 bd_heatup(); 469 if (totalspace > runningbufspace + dirtykvaspace) 470 totalspace = runningbufspace + dirtykvaspace; 471 count = totalspace / BKVASIZE; 472 if (count >= BD_WAKE_SIZE) 473 count = BD_WAKE_SIZE - 1; 474 475 spin_lock(&bufcspin); 476 i = (bd_wake_index + count) & BD_WAKE_MASK; 477 ++bd_wake_ary[i]; 478 479 /* 480 * This is not a strict interlock, so we play a bit loose 481 * with locking access to dirtybufspace* 482 */ 483 tsleep_interlock(&bd_wake_ary[i], 0); 484 spin_unlock(&bufcspin); 485 tsleep(&bd_wake_ary[i], PINTERLOCKED, "flstik", hz); 486 487 totalspace = runningbufspace + dirtykvaspace - hidirtybufspace; 488 } 489 } 490 491 /* 492 * bd_signal() 493 * 494 * This function is called whenever runningbufspace or dirtykvaspace 495 * is reduced. Track threads waiting for run+dirty buffer I/O 496 * complete. 497 * 498 * MPSAFE 499 */ 500 static void 501 bd_signal(long totalspace) 502 { 503 u_int i; 504 505 if (totalspace > 0) { 506 if (totalspace > BKVASIZE * BD_WAKE_SIZE) 507 totalspace = BKVASIZE * BD_WAKE_SIZE; 508 spin_lock(&bufcspin); 509 while (totalspace > 0) { 510 i = bd_wake_index++; 511 i &= BD_WAKE_MASK; 512 if (bd_wake_ary[i]) { 513 bd_wake_ary[i] = 0; 514 spin_unlock(&bufcspin); 515 wakeup(&bd_wake_ary[i]); 516 spin_lock(&bufcspin); 517 } 518 totalspace -= BKVASIZE; 519 } 520 spin_unlock(&bufcspin); 521 } 522 } 523 524 /* 525 * BIO tracking support routines. 526 * 527 * Release a ref on a bio_track. Wakeup requests are atomically released 528 * along with the last reference so bk_active will never wind up set to 529 * only 0x80000000. 530 * 531 * MPSAFE 532 */ 533 static 534 void 535 bio_track_rel(struct bio_track *track) 536 { 537 int active; 538 int desired; 539 540 /* 541 * Shortcut 542 */ 543 active = track->bk_active; 544 if (active == 1 && atomic_cmpset_int(&track->bk_active, 1, 0)) 545 return; 546 547 /* 548 * Full-on. Note that the wait flag is only atomically released on 549 * the 1->0 count transition. 550 * 551 * We check for a negative count transition using bit 30 since bit 31 552 * has a different meaning. 553 */ 554 for (;;) { 555 desired = (active & 0x7FFFFFFF) - 1; 556 if (desired) 557 desired |= active & 0x80000000; 558 if (atomic_cmpset_int(&track->bk_active, active, desired)) { 559 if (desired & 0x40000000) 560 panic("bio_track_rel: bad count: %p", track); 561 if (active & 0x80000000) 562 wakeup(track); 563 break; 564 } 565 active = track->bk_active; 566 } 567 } 568 569 /* 570 * Wait for the tracking count to reach 0. 571 * 572 * Use atomic ops such that the wait flag is only set atomically when 573 * bk_active is non-zero. 574 * 575 * MPSAFE 576 */ 577 int 578 bio_track_wait(struct bio_track *track, int slp_flags, int slp_timo) 579 { 580 int active; 581 int desired; 582 int error; 583 584 /* 585 * Shortcut 586 */ 587 if (track->bk_active == 0) 588 return(0); 589 590 /* 591 * Full-on. Note that the wait flag may only be atomically set if 592 * the active count is non-zero. 593 * 594 * NOTE: We cannot optimize active == desired since a wakeup could 595 * clear active prior to our tsleep_interlock(). 596 */ 597 error = 0; 598 while ((active = track->bk_active) != 0) { 599 cpu_ccfence(); 600 desired = active | 0x80000000; 601 tsleep_interlock(track, slp_flags); 602 if (atomic_cmpset_int(&track->bk_active, active, desired)) { 603 error = tsleep(track, slp_flags | PINTERLOCKED, 604 "trwait", slp_timo); 605 if (error) 606 break; 607 } 608 } 609 return (error); 610 } 611 612 /* 613 * bufinit: 614 * 615 * Load time initialisation of the buffer cache, called from machine 616 * dependant initialization code. 617 */ 618 void 619 bufinit(void) 620 { 621 struct buf *bp; 622 vm_offset_t bogus_offset; 623 long i; 624 625 /* next, make a null set of free lists */ 626 for (i = 0; i < BUFFER_QUEUES; i++) 627 TAILQ_INIT(&bufqueues[i]); 628 629 /* finally, initialize each buffer header and stick on empty q */ 630 for (i = 0; i < nbuf; i++) { 631 bp = &buf[i]; 632 bzero(bp, sizeof *bp); 633 bp->b_flags = B_INVAL; /* we're just an empty header */ 634 bp->b_cmd = BUF_CMD_DONE; 635 bp->b_qindex = BQUEUE_EMPTY; 636 initbufbio(bp); 637 xio_init(&bp->b_xio); 638 buf_dep_init(bp); 639 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_EMPTY], bp, b_freelist); 640 } 641 642 /* 643 * maxbufspace is the absolute maximum amount of buffer space we are 644 * allowed to reserve in KVM and in real terms. The absolute maximum 645 * is nominally used by buf_daemon. hibufspace is the nominal maximum 646 * used by most other processes. The differential is required to 647 * ensure that buf_daemon is able to run when other processes might 648 * be blocked waiting for buffer space. 649 * 650 * maxbufspace is based on BKVASIZE. Allocating buffers larger then 651 * this may result in KVM fragmentation which is not handled optimally 652 * by the system. 653 */ 654 maxbufspace = nbuf * BKVASIZE; 655 hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10); 656 lobufspace = hibufspace - MAXBSIZE; 657 658 lorunningspace = 512 * 1024; 659 /* hirunningspace -- see below */ 660 661 /* 662 * Limit the amount of malloc memory since it is wired permanently 663 * into the kernel space. Even though this is accounted for in 664 * the buffer allocation, we don't want the malloced region to grow 665 * uncontrolled. The malloc scheme improves memory utilization 666 * significantly on average (small) directories. 667 */ 668 maxbufmallocspace = hibufspace / 20; 669 670 /* 671 * Reduce the chance of a deadlock occuring by limiting the number 672 * of delayed-write dirty buffers we allow to stack up. 673 * 674 * We don't want too much actually queued to the device at once 675 * (XXX this needs to be per-mount!), because the buffers will 676 * wind up locked for a very long period of time while the I/O 677 * drains. 678 */ 679 hidirtybufspace = hibufspace / 2; /* dirty + running */ 680 hirunningspace = hibufspace / 16; /* locked & queued to device */ 681 if (hirunningspace < 1024 * 1024) 682 hirunningspace = 1024 * 1024; 683 684 dirtykvaspace = 0; 685 dirtybufspace = 0; 686 dirtybufspacehw = 0; 687 688 lodirtybufspace = hidirtybufspace / 2; 689 690 /* 691 * Maximum number of async ops initiated per buf_daemon loop. This is 692 * somewhat of a hack at the moment, we really need to limit ourselves 693 * based on the number of bytes of I/O in-transit that were initiated 694 * from buf_daemon. 695 */ 696 697 bogus_offset = kmem_alloc_pageable(&kernel_map, PAGE_SIZE); 698 vm_object_hold(&kernel_object); 699 bogus_page = vm_page_alloc(&kernel_object, 700 (bogus_offset >> PAGE_SHIFT), 701 VM_ALLOC_NORMAL); 702 vm_object_drop(&kernel_object); 703 vmstats.v_wire_count++; 704 705 } 706 707 /* 708 * Initialize the embedded bio structures, typically used by 709 * deprecated code which tries to allocate its own struct bufs. 710 */ 711 void 712 initbufbio(struct buf *bp) 713 { 714 bp->b_bio1.bio_buf = bp; 715 bp->b_bio1.bio_prev = NULL; 716 bp->b_bio1.bio_offset = NOOFFSET; 717 bp->b_bio1.bio_next = &bp->b_bio2; 718 bp->b_bio1.bio_done = NULL; 719 bp->b_bio1.bio_flags = 0; 720 721 bp->b_bio2.bio_buf = bp; 722 bp->b_bio2.bio_prev = &bp->b_bio1; 723 bp->b_bio2.bio_offset = NOOFFSET; 724 bp->b_bio2.bio_next = NULL; 725 bp->b_bio2.bio_done = NULL; 726 bp->b_bio2.bio_flags = 0; 727 728 BUF_LOCKINIT(bp); 729 } 730 731 /* 732 * Reinitialize the embedded bio structures as well as any additional 733 * translation cache layers. 734 */ 735 void 736 reinitbufbio(struct buf *bp) 737 { 738 struct bio *bio; 739 740 for (bio = &bp->b_bio1; bio; bio = bio->bio_next) { 741 bio->bio_done = NULL; 742 bio->bio_offset = NOOFFSET; 743 } 744 } 745 746 /* 747 * Undo the effects of an initbufbio(). 748 */ 749 void 750 uninitbufbio(struct buf *bp) 751 { 752 dsched_exit_buf(bp); 753 BUF_LOCKFREE(bp); 754 } 755 756 /* 757 * Push another BIO layer onto an existing BIO and return it. The new 758 * BIO layer may already exist, holding cached translation data. 759 */ 760 struct bio * 761 push_bio(struct bio *bio) 762 { 763 struct bio *nbio; 764 765 if ((nbio = bio->bio_next) == NULL) { 766 int index = bio - &bio->bio_buf->b_bio_array[0]; 767 if (index >= NBUF_BIO - 1) { 768 panic("push_bio: too many layers bp %p", 769 bio->bio_buf); 770 } 771 nbio = &bio->bio_buf->b_bio_array[index + 1]; 772 bio->bio_next = nbio; 773 nbio->bio_prev = bio; 774 nbio->bio_buf = bio->bio_buf; 775 nbio->bio_offset = NOOFFSET; 776 nbio->bio_done = NULL; 777 nbio->bio_next = NULL; 778 } 779 KKASSERT(nbio->bio_done == NULL); 780 return(nbio); 781 } 782 783 /* 784 * Pop a BIO translation layer, returning the previous layer. The 785 * must have been previously pushed. 786 */ 787 struct bio * 788 pop_bio(struct bio *bio) 789 { 790 return(bio->bio_prev); 791 } 792 793 void 794 clearbiocache(struct bio *bio) 795 { 796 while (bio) { 797 bio->bio_offset = NOOFFSET; 798 bio = bio->bio_next; 799 } 800 } 801 802 /* 803 * bfreekva: 804 * 805 * Free the KVA allocation for buffer 'bp'. 806 * 807 * Must be called from a critical section as this is the only locking for 808 * buffer_map. 809 * 810 * Since this call frees up buffer space, we call bufspacewakeup(). 811 * 812 * MPALMOSTSAFE 813 */ 814 static void 815 bfreekva(struct buf *bp) 816 { 817 int count; 818 819 if (bp->b_kvasize) { 820 ++buffreekvacnt; 821 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 822 vm_map_lock(&buffer_map); 823 bufspace -= bp->b_kvasize; 824 vm_map_delete(&buffer_map, 825 (vm_offset_t) bp->b_kvabase, 826 (vm_offset_t) bp->b_kvabase + bp->b_kvasize, 827 &count 828 ); 829 vm_map_unlock(&buffer_map); 830 vm_map_entry_release(count); 831 bp->b_kvasize = 0; 832 bp->b_kvabase = NULL; 833 bufspacewakeup(); 834 } 835 } 836 837 /* 838 * bremfree: 839 * 840 * Remove the buffer from the appropriate free list. 841 */ 842 static __inline void 843 _bremfree(struct buf *bp) 844 { 845 if (bp->b_qindex != BQUEUE_NONE) { 846 KASSERT(BUF_REFCNTNB(bp) == 1, 847 ("bremfree: bp %p not locked",bp)); 848 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 849 bp->b_qindex = BQUEUE_NONE; 850 } else { 851 if (BUF_REFCNTNB(bp) <= 1) 852 panic("bremfree: removing a buffer not on a queue"); 853 } 854 } 855 856 void 857 bremfree(struct buf *bp) 858 { 859 spin_lock(&bufqspin); 860 _bremfree(bp); 861 spin_unlock(&bufqspin); 862 } 863 864 static void 865 bremfree_locked(struct buf *bp) 866 { 867 _bremfree(bp); 868 } 869 870 /* 871 * This version of bread issues any required I/O asyncnronously and 872 * makes a callback on completion. 873 * 874 * The callback must check whether BIO_DONE is set in the bio and issue 875 * the bpdone(bp, 0) if it isn't. The callback is responsible for clearing 876 * BIO_DONE and disposing of the I/O (bqrelse()ing it). 877 */ 878 void 879 breadcb(struct vnode *vp, off_t loffset, int size, 880 void (*func)(struct bio *), void *arg) 881 { 882 struct buf *bp; 883 884 bp = getblk(vp, loffset, size, 0, 0); 885 886 /* if not found in cache, do some I/O */ 887 if ((bp->b_flags & B_CACHE) == 0) { 888 bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL); 889 bp->b_cmd = BUF_CMD_READ; 890 bp->b_bio1.bio_done = func; 891 bp->b_bio1.bio_caller_info1.ptr = arg; 892 vfs_busy_pages(vp, bp); 893 BUF_KERNPROC(bp); 894 vn_strategy(vp, &bp->b_bio1); 895 } else if (func) { 896 /* 897 * Since we are issuing the callback synchronously it cannot 898 * race the BIO_DONE, so no need for atomic ops here. 899 */ 900 /*bp->b_bio1.bio_done = func;*/ 901 bp->b_bio1.bio_caller_info1.ptr = arg; 902 bp->b_bio1.bio_flags |= BIO_DONE; 903 func(&bp->b_bio1); 904 } else { 905 bqrelse(bp); 906 } 907 } 908 909 /* 910 * breadnx() - Terminal function for bread() and breadn(). 911 * 912 * This function will start asynchronous I/O on read-ahead blocks as well 913 * as satisfy the primary request. 914 * 915 * We must clear B_ERROR and B_INVAL prior to initiating I/O. If B_CACHE is 916 * set, the buffer is valid and we do not have to do anything. 917 */ 918 int 919 breadnx(struct vnode *vp, off_t loffset, int size, off_t *raoffset, 920 int *rabsize, int cnt, struct buf **bpp) 921 { 922 struct buf *bp, *rabp; 923 int i; 924 int rv = 0, readwait = 0; 925 926 if (*bpp) 927 bp = *bpp; 928 else 929 *bpp = bp = getblk(vp, loffset, size, 0, 0); 930 931 /* if not found in cache, do some I/O */ 932 if ((bp->b_flags & B_CACHE) == 0) { 933 bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL); 934 bp->b_cmd = BUF_CMD_READ; 935 bp->b_bio1.bio_done = biodone_sync; 936 bp->b_bio1.bio_flags |= BIO_SYNC; 937 vfs_busy_pages(vp, bp); 938 vn_strategy(vp, &bp->b_bio1); 939 ++readwait; 940 } 941 942 for (i = 0; i < cnt; i++, raoffset++, rabsize++) { 943 if (inmem(vp, *raoffset)) 944 continue; 945 rabp = getblk(vp, *raoffset, *rabsize, 0, 0); 946 947 if ((rabp->b_flags & B_CACHE) == 0) { 948 rabp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL); 949 rabp->b_cmd = BUF_CMD_READ; 950 vfs_busy_pages(vp, rabp); 951 BUF_KERNPROC(rabp); 952 vn_strategy(vp, &rabp->b_bio1); 953 } else { 954 brelse(rabp); 955 } 956 } 957 if (readwait) 958 rv = biowait(&bp->b_bio1, "biord"); 959 return (rv); 960 } 961 962 /* 963 * bwrite: 964 * 965 * Synchronous write, waits for completion. 966 * 967 * Write, release buffer on completion. (Done by iodone 968 * if async). Do not bother writing anything if the buffer 969 * is invalid. 970 * 971 * Note that we set B_CACHE here, indicating that buffer is 972 * fully valid and thus cacheable. This is true even of NFS 973 * now so we set it generally. This could be set either here 974 * or in biodone() since the I/O is synchronous. We put it 975 * here. 976 */ 977 int 978 bwrite(struct buf *bp) 979 { 980 int error; 981 982 if (bp->b_flags & B_INVAL) { 983 brelse(bp); 984 return (0); 985 } 986 if (BUF_REFCNTNB(bp) == 0) 987 panic("bwrite: buffer is not busy???"); 988 989 /* Mark the buffer clean */ 990 bundirty(bp); 991 992 bp->b_flags &= ~(B_ERROR | B_EINTR); 993 bp->b_flags |= B_CACHE; 994 bp->b_cmd = BUF_CMD_WRITE; 995 bp->b_bio1.bio_done = biodone_sync; 996 bp->b_bio1.bio_flags |= BIO_SYNC; 997 vfs_busy_pages(bp->b_vp, bp); 998 999 /* 1000 * Normal bwrites pipeline writes. NOTE: b_bufsize is only 1001 * valid for vnode-backed buffers. 1002 */ 1003 bsetrunningbufspace(bp, bp->b_bufsize); 1004 vn_strategy(bp->b_vp, &bp->b_bio1); 1005 error = biowait(&bp->b_bio1, "biows"); 1006 brelse(bp); 1007 1008 return (error); 1009 } 1010 1011 /* 1012 * bawrite: 1013 * 1014 * Asynchronous write. Start output on a buffer, but do not wait for 1015 * it to complete. The buffer is released when the output completes. 1016 * 1017 * bwrite() ( or the VOP routine anyway ) is responsible for handling 1018 * B_INVAL buffers. Not us. 1019 */ 1020 void 1021 bawrite(struct buf *bp) 1022 { 1023 if (bp->b_flags & B_INVAL) { 1024 brelse(bp); 1025 return; 1026 } 1027 if (BUF_REFCNTNB(bp) == 0) 1028 panic("bwrite: buffer is not busy???"); 1029 1030 /* Mark the buffer clean */ 1031 bundirty(bp); 1032 1033 bp->b_flags &= ~(B_ERROR | B_EINTR); 1034 bp->b_flags |= B_CACHE; 1035 bp->b_cmd = BUF_CMD_WRITE; 1036 KKASSERT(bp->b_bio1.bio_done == NULL); 1037 vfs_busy_pages(bp->b_vp, bp); 1038 1039 /* 1040 * Normal bwrites pipeline writes. NOTE: b_bufsize is only 1041 * valid for vnode-backed buffers. 1042 */ 1043 bsetrunningbufspace(bp, bp->b_bufsize); 1044 BUF_KERNPROC(bp); 1045 vn_strategy(bp->b_vp, &bp->b_bio1); 1046 } 1047 1048 /* 1049 * bowrite: 1050 * 1051 * Ordered write. Start output on a buffer, and flag it so that the 1052 * device will write it in the order it was queued. The buffer is 1053 * released when the output completes. bwrite() ( or the VOP routine 1054 * anyway ) is responsible for handling B_INVAL buffers. 1055 */ 1056 int 1057 bowrite(struct buf *bp) 1058 { 1059 bp->b_flags |= B_ORDERED; 1060 bawrite(bp); 1061 return (0); 1062 } 1063 1064 /* 1065 * bdwrite: 1066 * 1067 * Delayed write. (Buffer is marked dirty). Do not bother writing 1068 * anything if the buffer is marked invalid. 1069 * 1070 * Note that since the buffer must be completely valid, we can safely 1071 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 1072 * biodone() in order to prevent getblk from writing the buffer 1073 * out synchronously. 1074 */ 1075 void 1076 bdwrite(struct buf *bp) 1077 { 1078 if (BUF_REFCNTNB(bp) == 0) 1079 panic("bdwrite: buffer is not busy"); 1080 1081 if (bp->b_flags & B_INVAL) { 1082 brelse(bp); 1083 return; 1084 } 1085 bdirty(bp); 1086 1087 if (dsched_is_clear_buf_priv(bp)) 1088 dsched_new_buf(bp); 1089 1090 /* 1091 * Set B_CACHE, indicating that the buffer is fully valid. This is 1092 * true even of NFS now. 1093 */ 1094 bp->b_flags |= B_CACHE; 1095 1096 /* 1097 * This bmap keeps the system from needing to do the bmap later, 1098 * perhaps when the system is attempting to do a sync. Since it 1099 * is likely that the indirect block -- or whatever other datastructure 1100 * that the filesystem needs is still in memory now, it is a good 1101 * thing to do this. Note also, that if the pageout daemon is 1102 * requesting a sync -- there might not be enough memory to do 1103 * the bmap then... So, this is important to do. 1104 */ 1105 if (bp->b_bio2.bio_offset == NOOFFSET) { 1106 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset, 1107 NULL, NULL, BUF_CMD_WRITE); 1108 } 1109 1110 /* 1111 * Because the underlying pages may still be mapped and 1112 * writable trying to set the dirty buffer (b_dirtyoff/end) 1113 * range here will be inaccurate. 1114 * 1115 * However, we must still clean the pages to satisfy the 1116 * vnode_pager and pageout daemon, so theythink the pages 1117 * have been "cleaned". What has really occured is that 1118 * they've been earmarked for later writing by the buffer 1119 * cache. 1120 * 1121 * So we get the b_dirtyoff/end update but will not actually 1122 * depend on it (NFS that is) until the pages are busied for 1123 * writing later on. 1124 */ 1125 vfs_clean_pages(bp); 1126 bqrelse(bp); 1127 1128 /* 1129 * note: we cannot initiate I/O from a bdwrite even if we wanted to, 1130 * due to the softdep code. 1131 */ 1132 } 1133 1134 /* 1135 * Fake write - return pages to VM system as dirty, leave the buffer clean. 1136 * This is used by tmpfs. 1137 * 1138 * It is important for any VFS using this routine to NOT use it for 1139 * IO_SYNC or IO_ASYNC operations which occur when the system really 1140 * wants to flush VM pages to backing store. 1141 */ 1142 void 1143 buwrite(struct buf *bp) 1144 { 1145 vm_page_t m; 1146 int i; 1147 1148 /* 1149 * Only works for VMIO buffers. If the buffer is already 1150 * marked for delayed-write we can't avoid the bdwrite(). 1151 */ 1152 if ((bp->b_flags & B_VMIO) == 0 || (bp->b_flags & B_DELWRI)) { 1153 bdwrite(bp); 1154 return; 1155 } 1156 1157 /* 1158 * Mark as needing a commit. 1159 */ 1160 for (i = 0; i < bp->b_xio.xio_npages; i++) { 1161 m = bp->b_xio.xio_pages[i]; 1162 vm_page_need_commit(m); 1163 } 1164 bqrelse(bp); 1165 } 1166 1167 /* 1168 * bdirty: 1169 * 1170 * Turn buffer into delayed write request by marking it B_DELWRI. 1171 * B_RELBUF and B_NOCACHE must be cleared. 1172 * 1173 * We reassign the buffer to itself to properly update it in the 1174 * dirty/clean lists. 1175 * 1176 * Must be called from a critical section. 1177 * The buffer must be on BQUEUE_NONE. 1178 */ 1179 void 1180 bdirty(struct buf *bp) 1181 { 1182 KASSERT(bp->b_qindex == BQUEUE_NONE, 1183 ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 1184 if (bp->b_flags & B_NOCACHE) { 1185 kprintf("bdirty: clearing B_NOCACHE on buf %p\n", bp); 1186 bp->b_flags &= ~B_NOCACHE; 1187 } 1188 if (bp->b_flags & B_INVAL) { 1189 kprintf("bdirty: warning, dirtying invalid buffer %p\n", bp); 1190 } 1191 bp->b_flags &= ~B_RELBUF; 1192 1193 if ((bp->b_flags & B_DELWRI) == 0) { 1194 lwkt_gettoken(&bp->b_vp->v_token); 1195 bp->b_flags |= B_DELWRI; 1196 reassignbuf(bp); 1197 lwkt_reltoken(&bp->b_vp->v_token); 1198 1199 spin_lock(&bufcspin); 1200 ++dirtybufcount; 1201 dirtykvaspace += bp->b_kvasize; 1202 dirtybufspace += bp->b_bufsize; 1203 if (bp->b_flags & B_HEAVY) { 1204 ++dirtybufcounthw; 1205 dirtybufspacehw += bp->b_bufsize; 1206 } 1207 spin_unlock(&bufcspin); 1208 1209 bd_heatup(); 1210 } 1211 } 1212 1213 /* 1214 * Set B_HEAVY, indicating that this is a heavy-weight buffer that 1215 * needs to be flushed with a different buf_daemon thread to avoid 1216 * deadlocks. B_HEAVY also imposes restrictions in getnewbuf(). 1217 */ 1218 void 1219 bheavy(struct buf *bp) 1220 { 1221 if ((bp->b_flags & B_HEAVY) == 0) { 1222 bp->b_flags |= B_HEAVY; 1223 if (bp->b_flags & B_DELWRI) { 1224 spin_lock(&bufcspin); 1225 ++dirtybufcounthw; 1226 dirtybufspacehw += bp->b_bufsize; 1227 spin_unlock(&bufcspin); 1228 } 1229 } 1230 } 1231 1232 /* 1233 * bundirty: 1234 * 1235 * Clear B_DELWRI for buffer. 1236 * 1237 * Must be called from a critical section. 1238 * 1239 * The buffer is typically on BQUEUE_NONE but there is one case in 1240 * brelse() that calls this function after placing the buffer on 1241 * a different queue. 1242 * 1243 * MPSAFE 1244 */ 1245 void 1246 bundirty(struct buf *bp) 1247 { 1248 if (bp->b_flags & B_DELWRI) { 1249 lwkt_gettoken(&bp->b_vp->v_token); 1250 bp->b_flags &= ~B_DELWRI; 1251 reassignbuf(bp); 1252 lwkt_reltoken(&bp->b_vp->v_token); 1253 1254 spin_lock(&bufcspin); 1255 --dirtybufcount; 1256 dirtykvaspace -= bp->b_kvasize; 1257 dirtybufspace -= bp->b_bufsize; 1258 if (bp->b_flags & B_HEAVY) { 1259 --dirtybufcounthw; 1260 dirtybufspacehw -= bp->b_bufsize; 1261 } 1262 spin_unlock(&bufcspin); 1263 1264 bd_signal(bp->b_bufsize); 1265 } 1266 /* 1267 * Since it is now being written, we can clear its deferred write flag. 1268 */ 1269 bp->b_flags &= ~B_DEFERRED; 1270 } 1271 1272 /* 1273 * Set the b_runningbufspace field, used to track how much I/O is 1274 * in progress at any given moment. 1275 */ 1276 void 1277 bsetrunningbufspace(struct buf *bp, int bytes) 1278 { 1279 bp->b_runningbufspace = bytes; 1280 if (bytes) { 1281 spin_lock(&bufcspin); 1282 runningbufspace += bytes; 1283 ++runningbufcount; 1284 spin_unlock(&bufcspin); 1285 } 1286 } 1287 1288 /* 1289 * brelse: 1290 * 1291 * Release a busy buffer and, if requested, free its resources. The 1292 * buffer will be stashed in the appropriate bufqueue[] allowing it 1293 * to be accessed later as a cache entity or reused for other purposes. 1294 * 1295 * MPALMOSTSAFE 1296 */ 1297 void 1298 brelse(struct buf *bp) 1299 { 1300 #ifdef INVARIANTS 1301 int saved_flags = bp->b_flags; 1302 #endif 1303 1304 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1305 1306 /* 1307 * If B_NOCACHE is set we are being asked to destroy the buffer and 1308 * its backing store. Clear B_DELWRI. 1309 * 1310 * B_NOCACHE is set in two cases: (1) when the caller really wants 1311 * to destroy the buffer and backing store and (2) when the caller 1312 * wants to destroy the buffer and backing store after a write 1313 * completes. 1314 */ 1315 if ((bp->b_flags & (B_NOCACHE|B_DELWRI)) == (B_NOCACHE|B_DELWRI)) { 1316 bundirty(bp); 1317 } 1318 1319 if ((bp->b_flags & (B_INVAL | B_DELWRI)) == B_DELWRI) { 1320 /* 1321 * A re-dirtied buffer is only subject to destruction 1322 * by B_INVAL. B_ERROR and B_NOCACHE are ignored. 1323 */ 1324 /* leave buffer intact */ 1325 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 1326 (bp->b_bufsize <= 0)) { 1327 /* 1328 * Either a failed read or we were asked to free or not 1329 * cache the buffer. This path is reached with B_DELWRI 1330 * set only if B_INVAL is already set. B_NOCACHE governs 1331 * backing store destruction. 1332 * 1333 * NOTE: HAMMER will set B_LOCKED in buf_deallocate if the 1334 * buffer cannot be immediately freed. 1335 */ 1336 bp->b_flags |= B_INVAL; 1337 if (LIST_FIRST(&bp->b_dep) != NULL) 1338 buf_deallocate(bp); 1339 if (bp->b_flags & B_DELWRI) { 1340 spin_lock(&bufcspin); 1341 --dirtybufcount; 1342 dirtykvaspace -= bp->b_kvasize; 1343 dirtybufspace -= bp->b_bufsize; 1344 if (bp->b_flags & B_HEAVY) { 1345 --dirtybufcounthw; 1346 dirtybufspacehw -= bp->b_bufsize; 1347 } 1348 spin_unlock(&bufcspin); 1349 1350 bd_signal(bp->b_bufsize); 1351 } 1352 bp->b_flags &= ~(B_DELWRI | B_CACHE); 1353 } 1354 1355 /* 1356 * We must clear B_RELBUF if B_DELWRI or B_LOCKED is set, 1357 * or if b_refs is non-zero. 1358 * 1359 * If vfs_vmio_release() is called with either bit set, the 1360 * underlying pages may wind up getting freed causing a previous 1361 * write (bdwrite()) to get 'lost' because pages associated with 1362 * a B_DELWRI bp are marked clean. Pages associated with a 1363 * B_LOCKED buffer may be mapped by the filesystem. 1364 * 1365 * If we want to release the buffer ourselves (rather then the 1366 * originator asking us to release it), give the originator a 1367 * chance to countermand the release by setting B_LOCKED. 1368 * 1369 * We still allow the B_INVAL case to call vfs_vmio_release(), even 1370 * if B_DELWRI is set. 1371 * 1372 * If B_DELWRI is not set we may have to set B_RELBUF if we are low 1373 * on pages to return pages to the VM page queues. 1374 */ 1375 if ((bp->b_flags & (B_DELWRI | B_LOCKED)) || bp->b_refs) { 1376 bp->b_flags &= ~B_RELBUF; 1377 } else if (vm_page_count_min(0)) { 1378 if (LIST_FIRST(&bp->b_dep) != NULL) 1379 buf_deallocate(bp); /* can set B_LOCKED */ 1380 if (bp->b_flags & (B_DELWRI | B_LOCKED)) 1381 bp->b_flags &= ~B_RELBUF; 1382 else 1383 bp->b_flags |= B_RELBUF; 1384 } 1385 1386 /* 1387 * Make sure b_cmd is clear. It may have already been cleared by 1388 * biodone(). 1389 * 1390 * At this point destroying the buffer is governed by the B_INVAL 1391 * or B_RELBUF flags. 1392 */ 1393 bp->b_cmd = BUF_CMD_DONE; 1394 dsched_exit_buf(bp); 1395 1396 /* 1397 * VMIO buffer rundown. Make sure the VM page array is restored 1398 * after an I/O may have replaces some of the pages with bogus pages 1399 * in order to not destroy dirty pages in a fill-in read. 1400 * 1401 * Note that due to the code above, if a buffer is marked B_DELWRI 1402 * then the B_RELBUF and B_NOCACHE bits will always be clear. 1403 * B_INVAL may still be set, however. 1404 * 1405 * For clean buffers, B_INVAL or B_RELBUF will destroy the buffer 1406 * but not the backing store. B_NOCACHE will destroy the backing 1407 * store. 1408 * 1409 * Note that dirty NFS buffers contain byte-granular write ranges 1410 * and should not be destroyed w/ B_INVAL even if the backing store 1411 * is left intact. 1412 */ 1413 if (bp->b_flags & B_VMIO) { 1414 /* 1415 * Rundown for VMIO buffers which are not dirty NFS buffers. 1416 */ 1417 int i, j, resid; 1418 vm_page_t m; 1419 off_t foff; 1420 vm_pindex_t poff; 1421 vm_object_t obj; 1422 struct vnode *vp; 1423 1424 vp = bp->b_vp; 1425 1426 /* 1427 * Get the base offset and length of the buffer. Note that 1428 * in the VMIO case if the buffer block size is not 1429 * page-aligned then b_data pointer may not be page-aligned. 1430 * But our b_xio.xio_pages array *IS* page aligned. 1431 * 1432 * block sizes less then DEV_BSIZE (usually 512) are not 1433 * supported due to the page granularity bits (m->valid, 1434 * m->dirty, etc...). 1435 * 1436 * See man buf(9) for more information 1437 */ 1438 1439 resid = bp->b_bufsize; 1440 foff = bp->b_loffset; 1441 1442 for (i = 0; i < bp->b_xio.xio_npages; i++) { 1443 m = bp->b_xio.xio_pages[i]; 1444 vm_page_flag_clear(m, PG_ZERO); 1445 /* 1446 * If we hit a bogus page, fixup *all* of them 1447 * now. Note that we left these pages wired 1448 * when we removed them so they had better exist, 1449 * and they cannot be ripped out from under us so 1450 * no critical section protection is necessary. 1451 */ 1452 if (m == bogus_page) { 1453 obj = vp->v_object; 1454 poff = OFF_TO_IDX(bp->b_loffset); 1455 1456 vm_object_hold(obj); 1457 for (j = i; j < bp->b_xio.xio_npages; j++) { 1458 vm_page_t mtmp; 1459 1460 mtmp = bp->b_xio.xio_pages[j]; 1461 if (mtmp == bogus_page) { 1462 mtmp = vm_page_lookup(obj, poff + j); 1463 if (!mtmp) { 1464 panic("brelse: page missing"); 1465 } 1466 bp->b_xio.xio_pages[j] = mtmp; 1467 } 1468 } 1469 bp->b_flags &= ~B_HASBOGUS; 1470 vm_object_drop(obj); 1471 1472 if ((bp->b_flags & B_INVAL) == 0) { 1473 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 1474 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 1475 } 1476 m = bp->b_xio.xio_pages[i]; 1477 } 1478 1479 /* 1480 * Invalidate the backing store if B_NOCACHE is set 1481 * (e.g. used with vinvalbuf()). If this is NFS 1482 * we impose a requirement that the block size be 1483 * a multiple of PAGE_SIZE and create a temporary 1484 * hack to basically invalidate the whole page. The 1485 * problem is that NFS uses really odd buffer sizes 1486 * especially when tracking piecemeal writes and 1487 * it also vinvalbuf()'s a lot, which would result 1488 * in only partial page validation and invalidation 1489 * here. If the file page is mmap()'d, however, 1490 * all the valid bits get set so after we invalidate 1491 * here we would end up with weird m->valid values 1492 * like 0xfc. nfs_getpages() can't handle this so 1493 * we clear all the valid bits for the NFS case 1494 * instead of just some of them. 1495 * 1496 * The real bug is the VM system having to set m->valid 1497 * to VM_PAGE_BITS_ALL for faulted-in pages, which 1498 * itself is an artifact of the whole 512-byte 1499 * granular mess that exists to support odd block 1500 * sizes and UFS meta-data block sizes (e.g. 6144). 1501 * A complete rewrite is required. 1502 * 1503 * XXX 1504 */ 1505 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 1506 int poffset = foff & PAGE_MASK; 1507 int presid; 1508 1509 presid = PAGE_SIZE - poffset; 1510 if (bp->b_vp->v_tag == VT_NFS && 1511 bp->b_vp->v_type == VREG) { 1512 ; /* entire page */ 1513 } else if (presid > resid) { 1514 presid = resid; 1515 } 1516 KASSERT(presid >= 0, ("brelse: extra page")); 1517 vm_page_set_invalid(m, poffset, presid); 1518 1519 /* 1520 * Also make sure any swap cache is removed 1521 * as it is now stale (HAMMER in particular 1522 * uses B_NOCACHE to deal with buffer 1523 * aliasing). 1524 */ 1525 swap_pager_unswapped(m); 1526 } 1527 resid -= PAGE_SIZE - (foff & PAGE_MASK); 1528 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 1529 } 1530 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1531 vfs_vmio_release(bp); 1532 } else { 1533 /* 1534 * Rundown for non-VMIO buffers. 1535 */ 1536 if (bp->b_flags & (B_INVAL | B_RELBUF)) { 1537 if (bp->b_bufsize) 1538 allocbuf(bp, 0); 1539 KKASSERT (LIST_FIRST(&bp->b_dep) == NULL); 1540 if (bp->b_vp) 1541 brelvp(bp); 1542 } 1543 } 1544 1545 if (bp->b_qindex != BQUEUE_NONE) 1546 panic("brelse: free buffer onto another queue???"); 1547 if (BUF_REFCNTNB(bp) > 1) { 1548 /* Temporary panic to verify exclusive locking */ 1549 /* This panic goes away when we allow shared refs */ 1550 panic("brelse: multiple refs"); 1551 /* NOT REACHED */ 1552 return; 1553 } 1554 1555 /* 1556 * Figure out the correct queue to place the cleaned up buffer on. 1557 * Buffers placed in the EMPTY or EMPTYKVA had better already be 1558 * disassociated from their vnode. 1559 */ 1560 spin_lock(&bufqspin); 1561 if (bp->b_flags & B_LOCKED) { 1562 /* 1563 * Buffers that are locked are placed in the locked queue 1564 * immediately, regardless of their state. 1565 */ 1566 bp->b_qindex = BQUEUE_LOCKED; 1567 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_LOCKED], bp, b_freelist); 1568 } else if (bp->b_bufsize == 0) { 1569 /* 1570 * Buffers with no memory. Due to conditionals near the top 1571 * of brelse() such buffers should probably already be 1572 * marked B_INVAL and disassociated from their vnode. 1573 */ 1574 bp->b_flags |= B_INVAL; 1575 KASSERT(bp->b_vp == NULL, ("bp1 %p flags %08x/%08x vnode %p unexpectededly still associated!", bp, saved_flags, bp->b_flags, bp->b_vp)); 1576 KKASSERT((bp->b_flags & B_HASHED) == 0); 1577 if (bp->b_kvasize) { 1578 bp->b_qindex = BQUEUE_EMPTYKVA; 1579 } else { 1580 bp->b_qindex = BQUEUE_EMPTY; 1581 } 1582 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist); 1583 } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) { 1584 /* 1585 * Buffers with junk contents. Again these buffers had better 1586 * already be disassociated from their vnode. 1587 */ 1588 KASSERT(bp->b_vp == NULL, ("bp2 %p flags %08x/%08x vnode %p unexpectededly still associated!", bp, saved_flags, bp->b_flags, bp->b_vp)); 1589 KKASSERT((bp->b_flags & B_HASHED) == 0); 1590 bp->b_flags |= B_INVAL; 1591 bp->b_qindex = BQUEUE_CLEAN; 1592 TAILQ_INSERT_HEAD(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); 1593 } else { 1594 /* 1595 * Remaining buffers. These buffers are still associated with 1596 * their vnode. 1597 */ 1598 switch(bp->b_flags & (B_DELWRI|B_HEAVY)) { 1599 case B_DELWRI: 1600 bp->b_qindex = BQUEUE_DIRTY; 1601 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_DIRTY], bp, b_freelist); 1602 break; 1603 case B_DELWRI | B_HEAVY: 1604 bp->b_qindex = BQUEUE_DIRTY_HW; 1605 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_DIRTY_HW], bp, 1606 b_freelist); 1607 break; 1608 default: 1609 /* 1610 * NOTE: Buffers are always placed at the end of the 1611 * queue. If B_AGE is not set the buffer will cycle 1612 * through the queue twice. 1613 */ 1614 bp->b_qindex = BQUEUE_CLEAN; 1615 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); 1616 break; 1617 } 1618 } 1619 spin_unlock(&bufqspin); 1620 1621 /* 1622 * If B_INVAL, clear B_DELWRI. We've already placed the buffer 1623 * on the correct queue. 1624 */ 1625 if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) 1626 bundirty(bp); 1627 1628 /* 1629 * The bp is on an appropriate queue unless locked. If it is not 1630 * locked or dirty we can wakeup threads waiting for buffer space. 1631 * 1632 * We've already handled the B_INVAL case ( B_DELWRI will be clear 1633 * if B_INVAL is set ). 1634 */ 1635 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) 1636 bufcountwakeup(); 1637 1638 /* 1639 * Something we can maybe free or reuse 1640 */ 1641 if (bp->b_bufsize || bp->b_kvasize) 1642 bufspacewakeup(); 1643 1644 /* 1645 * Clean up temporary flags and unlock the buffer. 1646 */ 1647 bp->b_flags &= ~(B_ORDERED | B_NOCACHE | B_RELBUF | B_DIRECT); 1648 BUF_UNLOCK(bp); 1649 } 1650 1651 /* 1652 * bqrelse: 1653 * 1654 * Release a buffer back to the appropriate queue but do not try to free 1655 * it. The buffer is expected to be used again soon. 1656 * 1657 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 1658 * biodone() to requeue an async I/O on completion. It is also used when 1659 * known good buffers need to be requeued but we think we may need the data 1660 * again soon. 1661 * 1662 * XXX we should be able to leave the B_RELBUF hint set on completion. 1663 * 1664 * MPSAFE 1665 */ 1666 void 1667 bqrelse(struct buf *bp) 1668 { 1669 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1670 1671 if (bp->b_qindex != BQUEUE_NONE) 1672 panic("bqrelse: free buffer onto another queue???"); 1673 if (BUF_REFCNTNB(bp) > 1) { 1674 /* do not release to free list */ 1675 panic("bqrelse: multiple refs"); 1676 return; 1677 } 1678 1679 buf_act_advance(bp); 1680 1681 spin_lock(&bufqspin); 1682 if (bp->b_flags & B_LOCKED) { 1683 /* 1684 * Locked buffers are released to the locked queue. However, 1685 * if the buffer is dirty it will first go into the dirty 1686 * queue and later on after the I/O completes successfully it 1687 * will be released to the locked queue. 1688 */ 1689 bp->b_qindex = BQUEUE_LOCKED; 1690 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_LOCKED], bp, b_freelist); 1691 } else if (bp->b_flags & B_DELWRI) { 1692 bp->b_qindex = (bp->b_flags & B_HEAVY) ? 1693 BQUEUE_DIRTY_HW : BQUEUE_DIRTY; 1694 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist); 1695 } else if (vm_page_count_min(0)) { 1696 /* 1697 * We are too low on memory, we have to try to free the 1698 * buffer (most importantly: the wired pages making up its 1699 * backing store) *now*. 1700 */ 1701 spin_unlock(&bufqspin); 1702 brelse(bp); 1703 return; 1704 } else { 1705 bp->b_qindex = BQUEUE_CLEAN; 1706 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); 1707 } 1708 spin_unlock(&bufqspin); 1709 1710 if ((bp->b_flags & B_LOCKED) == 0 && 1711 ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)) { 1712 bufcountwakeup(); 1713 } 1714 1715 /* 1716 * Something we can maybe free or reuse. 1717 */ 1718 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI)) 1719 bufspacewakeup(); 1720 1721 /* 1722 * Final cleanup and unlock. Clear bits that are only used while a 1723 * buffer is actively locked. 1724 */ 1725 bp->b_flags &= ~(B_ORDERED | B_NOCACHE | B_RELBUF); 1726 dsched_exit_buf(bp); 1727 BUF_UNLOCK(bp); 1728 } 1729 1730 /* 1731 * Hold a buffer, preventing it from being reused. This will prevent 1732 * normal B_RELBUF operations on the buffer but will not prevent B_INVAL 1733 * operations. If a B_INVAL operation occurs the buffer will remain held 1734 * but the underlying pages may get ripped out. 1735 * 1736 * These functions are typically used in VOP_READ/VOP_WRITE functions 1737 * to hold a buffer during a copyin or copyout, preventing deadlocks 1738 * or recursive lock panics when read()/write() is used over mmap()'d 1739 * space. 1740 * 1741 * NOTE: bqhold() requires that the buffer be locked at the time of the 1742 * hold. bqdrop() has no requirements other than the buffer having 1743 * previously been held. 1744 */ 1745 void 1746 bqhold(struct buf *bp) 1747 { 1748 atomic_add_int(&bp->b_refs, 1); 1749 } 1750 1751 void 1752 bqdrop(struct buf *bp) 1753 { 1754 KKASSERT(bp->b_refs > 0); 1755 atomic_add_int(&bp->b_refs, -1); 1756 } 1757 1758 /* 1759 * Return backing pages held by the buffer 'bp' back to the VM system. 1760 * This routine is called when the bp is invalidated, released, or 1761 * reused. 1762 * 1763 * The KVA mapping (b_data) for the underlying pages is removed by 1764 * this function. 1765 * 1766 * WARNING! This routine is integral to the low memory critical path 1767 * when a buffer is B_RELBUF'd. If the system has a severe page 1768 * deficit we need to get the page(s) onto the PQ_FREE or PQ_CACHE 1769 * queues so they can be reused in the current pageout daemon 1770 * pass. 1771 */ 1772 static void 1773 vfs_vmio_release(struct buf *bp) 1774 { 1775 int i; 1776 vm_page_t m; 1777 1778 for (i = 0; i < bp->b_xio.xio_npages; i++) { 1779 m = bp->b_xio.xio_pages[i]; 1780 bp->b_xio.xio_pages[i] = NULL; 1781 1782 /* 1783 * We need to own the page in order to safely unwire it. 1784 */ 1785 vm_page_busy_wait(m, FALSE, "vmiopg"); 1786 1787 /* 1788 * The VFS is telling us this is not a meta-data buffer 1789 * even if it is backed by a block device. 1790 */ 1791 if (bp->b_flags & B_NOTMETA) 1792 vm_page_flag_set(m, PG_NOTMETA); 1793 1794 /* 1795 * This is a very important bit of code. We try to track 1796 * VM page use whether the pages are wired into the buffer 1797 * cache or not. While wired into the buffer cache the 1798 * bp tracks the act_count. 1799 * 1800 * We can choose to place unwired pages on the inactive 1801 * queue (0) or active queue (1). If we place too many 1802 * on the active queue the queue will cycle the act_count 1803 * on pages we'd like to keep, just from single-use pages 1804 * (such as when doing a tar-up or file scan). 1805 */ 1806 if (bp->b_act_count < vm_cycle_point) 1807 vm_page_unwire(m, 0); 1808 else 1809 vm_page_unwire(m, 1); 1810 1811 /* 1812 * If the wire_count has dropped to 0 we may need to take 1813 * further action before unbusying the page. 1814 * 1815 * WARNING: vm_page_try_*() also checks PG_NEED_COMMIT for us. 1816 */ 1817 if (m->wire_count == 0) { 1818 vm_page_flag_clear(m, PG_ZERO); 1819 1820 if (bp->b_flags & B_DIRECT) { 1821 /* 1822 * Attempt to free the page if B_DIRECT is 1823 * set, the caller does not desire the page 1824 * to be cached. 1825 */ 1826 vm_page_wakeup(m); 1827 vm_page_try_to_free(m); 1828 } else if ((bp->b_flags & B_NOTMETA) || 1829 vm_page_count_min(0)) { 1830 /* 1831 * Attempt to move the page to PQ_CACHE 1832 * if B_NOTMETA is set. This flag is set 1833 * by HAMMER to remove one of the two pages 1834 * present when double buffering is enabled. 1835 * 1836 * Attempt to move the page to PQ_CACHE 1837 * If we have a severe page deficit. This 1838 * will cause buffer cache operations related 1839 * to pageouts to recycle the related pages 1840 * in order to avoid a low memory deadlock. 1841 */ 1842 m->act_count = bp->b_act_count; 1843 vm_page_wakeup(m); 1844 vm_page_try_to_cache(m); 1845 } else { 1846 /* 1847 * Nominal case, leave the page on the 1848 * queue the original unwiring placed it on 1849 * (active or inactive). 1850 */ 1851 m->act_count = bp->b_act_count; 1852 vm_page_wakeup(m); 1853 } 1854 } else { 1855 vm_page_wakeup(m); 1856 } 1857 } 1858 1859 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), 1860 bp->b_xio.xio_npages); 1861 if (bp->b_bufsize) { 1862 bufspacewakeup(); 1863 bp->b_bufsize = 0; 1864 } 1865 bp->b_xio.xio_npages = 0; 1866 bp->b_flags &= ~B_VMIO; 1867 KKASSERT (LIST_FIRST(&bp->b_dep) == NULL); 1868 if (bp->b_vp) 1869 brelvp(bp); 1870 } 1871 1872 /* 1873 * getnewbuf: 1874 * 1875 * Find and initialize a new buffer header, freeing up existing buffers 1876 * in the bufqueues as necessary. The new buffer is returned locked. 1877 * 1878 * Important: B_INVAL is not set. If the caller wishes to throw the 1879 * buffer away, the caller must set B_INVAL prior to calling brelse(). 1880 * 1881 * We block if: 1882 * We have insufficient buffer headers 1883 * We have insufficient buffer space 1884 * buffer_map is too fragmented ( space reservation fails ) 1885 * If we have to flush dirty buffers ( but we try to avoid this ) 1886 * 1887 * To avoid VFS layer recursion we do not flush dirty buffers ourselves. 1888 * Instead we ask the buf daemon to do it for us. We attempt to 1889 * avoid piecemeal wakeups of the pageout daemon. 1890 * 1891 * MPALMOSTSAFE 1892 */ 1893 struct buf * 1894 getnewbuf(int blkflags, int slptimeo, int size, int maxsize) 1895 { 1896 struct buf *bp; 1897 struct buf *nbp; 1898 int defrag = 0; 1899 int nqindex; 1900 int slpflags = (blkflags & GETBLK_PCATCH) ? PCATCH : 0; 1901 static int flushingbufs; 1902 1903 /* 1904 * We can't afford to block since we might be holding a vnode lock, 1905 * which may prevent system daemons from running. We deal with 1906 * low-memory situations by proactively returning memory and running 1907 * async I/O rather then sync I/O. 1908 */ 1909 1910 ++getnewbufcalls; 1911 --getnewbufrestarts; 1912 restart: 1913 ++getnewbufrestarts; 1914 1915 /* 1916 * Setup for scan. If we do not have enough free buffers, 1917 * we setup a degenerate case that immediately fails. Note 1918 * that if we are specially marked process, we are allowed to 1919 * dip into our reserves. 1920 * 1921 * The scanning sequence is nominally: EMPTY->EMPTYKVA->CLEAN 1922 * 1923 * We start with EMPTYKVA. If the list is empty we backup to EMPTY. 1924 * However, there are a number of cases (defragging, reusing, ...) 1925 * where we cannot backup. 1926 */ 1927 nqindex = BQUEUE_EMPTYKVA; 1928 spin_lock(&bufqspin); 1929 nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTYKVA]); 1930 1931 if (nbp == NULL) { 1932 /* 1933 * If no EMPTYKVA buffers and we are either 1934 * defragging or reusing, locate a CLEAN buffer 1935 * to free or reuse. If bufspace useage is low 1936 * skip this step so we can allocate a new buffer. 1937 */ 1938 if (defrag || bufspace >= lobufspace) { 1939 nqindex = BQUEUE_CLEAN; 1940 nbp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]); 1941 } 1942 1943 /* 1944 * If we could not find or were not allowed to reuse a 1945 * CLEAN buffer, check to see if it is ok to use an EMPTY 1946 * buffer. We can only use an EMPTY buffer if allocating 1947 * its KVA would not otherwise run us out of buffer space. 1948 */ 1949 if (nbp == NULL && defrag == 0 && 1950 bufspace + maxsize < hibufspace) { 1951 nqindex = BQUEUE_EMPTY; 1952 nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTY]); 1953 } 1954 } 1955 1956 /* 1957 * Run scan, possibly freeing data and/or kva mappings on the fly 1958 * depending. 1959 * 1960 * WARNING! bufqspin is held! 1961 */ 1962 while ((bp = nbp) != NULL) { 1963 int qindex = nqindex; 1964 1965 nbp = TAILQ_NEXT(bp, b_freelist); 1966 1967 /* 1968 * BQUEUE_CLEAN - B_AGE special case. If not set the bp 1969 * cycles through the queue twice before being selected. 1970 */ 1971 if (qindex == BQUEUE_CLEAN && 1972 (bp->b_flags & B_AGE) == 0 && nbp) { 1973 bp->b_flags |= B_AGE; 1974 TAILQ_REMOVE(&bufqueues[qindex], bp, b_freelist); 1975 TAILQ_INSERT_TAIL(&bufqueues[qindex], bp, b_freelist); 1976 continue; 1977 } 1978 1979 /* 1980 * Calculate next bp ( we can only use it if we do not block 1981 * or do other fancy things ). 1982 */ 1983 if (nbp == NULL) { 1984 switch(qindex) { 1985 case BQUEUE_EMPTY: 1986 nqindex = BQUEUE_EMPTYKVA; 1987 if ((nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTYKVA]))) 1988 break; 1989 /* fall through */ 1990 case BQUEUE_EMPTYKVA: 1991 nqindex = BQUEUE_CLEAN; 1992 if ((nbp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]))) 1993 break; 1994 /* fall through */ 1995 case BQUEUE_CLEAN: 1996 /* 1997 * nbp is NULL. 1998 */ 1999 break; 2000 } 2001 } 2002 2003 /* 2004 * Sanity Checks 2005 */ 2006 KASSERT(bp->b_qindex == qindex, 2007 ("getnewbuf: inconsistent queue %d bp %p", qindex, bp)); 2008 2009 /* 2010 * Note: we no longer distinguish between VMIO and non-VMIO 2011 * buffers. 2012 */ 2013 KASSERT((bp->b_flags & B_DELWRI) == 0, 2014 ("delwri buffer %p found in queue %d", bp, qindex)); 2015 2016 /* 2017 * Do not try to reuse a buffer with a non-zero b_refs. 2018 * This is an unsynchronized test. A synchronized test 2019 * is also performed after we lock the buffer. 2020 */ 2021 if (bp->b_refs) 2022 continue; 2023 2024 /* 2025 * If we are defragging then we need a buffer with 2026 * b_kvasize != 0. XXX this situation should no longer 2027 * occur, if defrag is non-zero the buffer's b_kvasize 2028 * should also be non-zero at this point. XXX 2029 */ 2030 if (defrag && bp->b_kvasize == 0) { 2031 kprintf("Warning: defrag empty buffer %p\n", bp); 2032 continue; 2033 } 2034 2035 /* 2036 * Start freeing the bp. This is somewhat involved. nbp 2037 * remains valid only for BQUEUE_EMPTY[KVA] bp's. Buffers 2038 * on the clean list must be disassociated from their 2039 * current vnode. Buffers on the empty[kva] lists have 2040 * already been disassociated. 2041 * 2042 * b_refs is checked after locking along with queue changes. 2043 * We must check here to deal with zero->nonzero transitions 2044 * made by the owner of the buffer lock, which is used by 2045 * VFS's to hold the buffer while issuing an unlocked 2046 * uiomove()s. We cannot invalidate the buffer's pages 2047 * for this case. Once we successfully lock a buffer the 2048 * only 0->1 transitions of b_refs will occur via findblk(). 2049 * 2050 * We must also check for queue changes after successful 2051 * locking as the current lock holder may dispose of the 2052 * buffer and change its queue. 2053 */ 2054 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 2055 spin_unlock(&bufqspin); 2056 tsleep(&bd_request, 0, "gnbxxx", (hz + 99) / 100); 2057 goto restart; 2058 } 2059 if (bp->b_qindex != qindex || bp->b_refs) { 2060 spin_unlock(&bufqspin); 2061 BUF_UNLOCK(bp); 2062 goto restart; 2063 } 2064 bremfree_locked(bp); 2065 spin_unlock(&bufqspin); 2066 2067 /* 2068 * Dependancies must be handled before we disassociate the 2069 * vnode. 2070 * 2071 * NOTE: HAMMER will set B_LOCKED if the buffer cannot 2072 * be immediately disassociated. HAMMER then becomes 2073 * responsible for releasing the buffer. 2074 * 2075 * NOTE: bufqspin is UNLOCKED now. 2076 */ 2077 if (LIST_FIRST(&bp->b_dep) != NULL) { 2078 buf_deallocate(bp); 2079 if (bp->b_flags & B_LOCKED) { 2080 bqrelse(bp); 2081 goto restart; 2082 } 2083 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 2084 } 2085 2086 if (qindex == BQUEUE_CLEAN) { 2087 if (bp->b_flags & B_VMIO) 2088 vfs_vmio_release(bp); 2089 if (bp->b_vp) 2090 brelvp(bp); 2091 } 2092 2093 /* 2094 * NOTE: nbp is now entirely invalid. We can only restart 2095 * the scan from this point on. 2096 * 2097 * Get the rest of the buffer freed up. b_kva* is still 2098 * valid after this operation. 2099 */ 2100 KASSERT(bp->b_vp == NULL, 2101 ("bp3 %p flags %08x vnode %p qindex %d " 2102 "unexpectededly still associated!", 2103 bp, bp->b_flags, bp->b_vp, qindex)); 2104 KKASSERT((bp->b_flags & B_HASHED) == 0); 2105 2106 /* 2107 * critical section protection is not required when 2108 * scrapping a buffer's contents because it is already 2109 * wired. 2110 */ 2111 if (bp->b_bufsize) 2112 allocbuf(bp, 0); 2113 2114 bp->b_flags = B_BNOCLIP; 2115 bp->b_cmd = BUF_CMD_DONE; 2116 bp->b_vp = NULL; 2117 bp->b_error = 0; 2118 bp->b_resid = 0; 2119 bp->b_bcount = 0; 2120 bp->b_xio.xio_npages = 0; 2121 bp->b_dirtyoff = bp->b_dirtyend = 0; 2122 bp->b_act_count = ACT_INIT; 2123 reinitbufbio(bp); 2124 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 2125 buf_dep_init(bp); 2126 if (blkflags & GETBLK_BHEAVY) 2127 bp->b_flags |= B_HEAVY; 2128 2129 /* 2130 * If we are defragging then free the buffer. 2131 */ 2132 if (defrag) { 2133 bp->b_flags |= B_INVAL; 2134 bfreekva(bp); 2135 brelse(bp); 2136 defrag = 0; 2137 goto restart; 2138 } 2139 2140 /* 2141 * If we are overcomitted then recover the buffer and its 2142 * KVM space. This occurs in rare situations when multiple 2143 * processes are blocked in getnewbuf() or allocbuf(). 2144 */ 2145 if (bufspace >= hibufspace) 2146 flushingbufs = 1; 2147 if (flushingbufs && bp->b_kvasize != 0) { 2148 bp->b_flags |= B_INVAL; 2149 bfreekva(bp); 2150 brelse(bp); 2151 goto restart; 2152 } 2153 if (bufspace < lobufspace) 2154 flushingbufs = 0; 2155 2156 /* 2157 * b_refs can transition to a non-zero value while we hold 2158 * the buffer locked due to a findblk(). Our brelvp() above 2159 * interlocked any future possible transitions due to 2160 * findblk()s. 2161 * 2162 * If we find b_refs to be non-zero we can destroy the 2163 * buffer's contents but we cannot yet reuse the buffer. 2164 */ 2165 if (bp->b_refs) { 2166 bp->b_flags |= B_INVAL; 2167 bfreekva(bp); 2168 brelse(bp); 2169 goto restart; 2170 } 2171 break; 2172 /* NOT REACHED, bufqspin not held */ 2173 } 2174 2175 /* 2176 * If we exhausted our list, sleep as appropriate. We may have to 2177 * wakeup various daemons and write out some dirty buffers. 2178 * 2179 * Generally we are sleeping due to insufficient buffer space. 2180 * 2181 * NOTE: bufqspin is held if bp is NULL, else it is not held. 2182 */ 2183 if (bp == NULL) { 2184 int flags; 2185 char *waitmsg; 2186 2187 spin_unlock(&bufqspin); 2188 if (defrag) { 2189 flags = VFS_BIO_NEED_BUFSPACE; 2190 waitmsg = "nbufkv"; 2191 } else if (bufspace >= hibufspace) { 2192 waitmsg = "nbufbs"; 2193 flags = VFS_BIO_NEED_BUFSPACE; 2194 } else { 2195 waitmsg = "newbuf"; 2196 flags = VFS_BIO_NEED_ANY; 2197 } 2198 2199 bd_speedup(); /* heeeelp */ 2200 spin_lock(&bufcspin); 2201 needsbuffer |= flags; 2202 while (needsbuffer & flags) { 2203 if (ssleep(&needsbuffer, &bufcspin, 2204 slpflags, waitmsg, slptimeo)) { 2205 spin_unlock(&bufcspin); 2206 return (NULL); 2207 } 2208 } 2209 spin_unlock(&bufcspin); 2210 } else { 2211 /* 2212 * We finally have a valid bp. We aren't quite out of the 2213 * woods, we still have to reserve kva space. In order 2214 * to keep fragmentation sane we only allocate kva in 2215 * BKVASIZE chunks. 2216 * 2217 * (bufqspin is not held) 2218 */ 2219 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK; 2220 2221 if (maxsize != bp->b_kvasize) { 2222 vm_offset_t addr = 0; 2223 int count; 2224 2225 bfreekva(bp); 2226 2227 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 2228 vm_map_lock(&buffer_map); 2229 2230 if (vm_map_findspace(&buffer_map, 2231 vm_map_min(&buffer_map), maxsize, 2232 maxsize, 0, &addr)) { 2233 /* 2234 * Uh oh. Buffer map is too fragmented. We 2235 * must defragment the map. 2236 */ 2237 vm_map_unlock(&buffer_map); 2238 vm_map_entry_release(count); 2239 ++bufdefragcnt; 2240 defrag = 1; 2241 bp->b_flags |= B_INVAL; 2242 brelse(bp); 2243 goto restart; 2244 } 2245 if (addr) { 2246 vm_map_insert(&buffer_map, &count, 2247 NULL, 0, 2248 addr, addr + maxsize, 2249 VM_MAPTYPE_NORMAL, 2250 VM_PROT_ALL, VM_PROT_ALL, 2251 MAP_NOFAULT); 2252 2253 bp->b_kvabase = (caddr_t) addr; 2254 bp->b_kvasize = maxsize; 2255 bufspace += bp->b_kvasize; 2256 ++bufreusecnt; 2257 } 2258 vm_map_unlock(&buffer_map); 2259 vm_map_entry_release(count); 2260 } 2261 bp->b_data = bp->b_kvabase; 2262 } 2263 return(bp); 2264 } 2265 2266 #if 0 2267 /* 2268 * This routine is called in an emergency to recover VM pages from the 2269 * buffer cache by cashing in clean buffers. The idea is to recover 2270 * enough pages to be able to satisfy a stuck bio_page_alloc(). 2271 * 2272 * XXX Currently not implemented. This function can wind up deadlocking 2273 * against another thread holding one or more of the backing pages busy. 2274 */ 2275 static int 2276 recoverbufpages(void) 2277 { 2278 struct buf *bp; 2279 int bytes = 0; 2280 2281 ++recoverbufcalls; 2282 2283 spin_lock(&bufqspin); 2284 while (bytes < MAXBSIZE) { 2285 bp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]); 2286 if (bp == NULL) 2287 break; 2288 2289 /* 2290 * BQUEUE_CLEAN - B_AGE special case. If not set the bp 2291 * cycles through the queue twice before being selected. 2292 */ 2293 if ((bp->b_flags & B_AGE) == 0 && TAILQ_NEXT(bp, b_freelist)) { 2294 bp->b_flags |= B_AGE; 2295 TAILQ_REMOVE(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); 2296 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], 2297 bp, b_freelist); 2298 continue; 2299 } 2300 2301 /* 2302 * Sanity Checks 2303 */ 2304 KKASSERT(bp->b_qindex == BQUEUE_CLEAN); 2305 KKASSERT((bp->b_flags & B_DELWRI) == 0); 2306 2307 /* 2308 * Start freeing the bp. This is somewhat involved. 2309 * 2310 * Buffers on the clean list must be disassociated from 2311 * their current vnode 2312 */ 2313 2314 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 2315 kprintf("recoverbufpages: warning, locked buf %p, " 2316 "race corrected\n", 2317 bp); 2318 ssleep(&bd_request, &bufqspin, 0, "gnbxxx", hz / 100); 2319 continue; 2320 } 2321 if (bp->b_qindex != BQUEUE_CLEAN) { 2322 kprintf("recoverbufpages: warning, BUF_LOCK blocked " 2323 "unexpectedly on buf %p index %d, race " 2324 "corrected\n", 2325 bp, bp->b_qindex); 2326 BUF_UNLOCK(bp); 2327 continue; 2328 } 2329 bremfree_locked(bp); 2330 spin_unlock(&bufqspin); 2331 2332 /* 2333 * Sanity check. Only BQUEUE_DIRTY[_HW] employs markers. 2334 */ 2335 KKASSERT((bp->b_flags & B_MARKER) == 0); 2336 2337 /* 2338 * Dependancies must be handled before we disassociate the 2339 * vnode. 2340 * 2341 * NOTE: HAMMER will set B_LOCKED if the buffer cannot 2342 * be immediately disassociated. HAMMER then becomes 2343 * responsible for releasing the buffer. 2344 */ 2345 if (LIST_FIRST(&bp->b_dep) != NULL) { 2346 buf_deallocate(bp); 2347 if (bp->b_flags & B_LOCKED) { 2348 bqrelse(bp); 2349 spin_lock(&bufqspin); 2350 continue; 2351 } 2352 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 2353 } 2354 2355 bytes += bp->b_bufsize; 2356 2357 if (bp->b_flags & B_VMIO) { 2358 bp->b_flags |= B_DIRECT; /* try to free pages */ 2359 vfs_vmio_release(bp); 2360 } 2361 if (bp->b_vp) 2362 brelvp(bp); 2363 2364 KKASSERT(bp->b_vp == NULL); 2365 KKASSERT((bp->b_flags & B_HASHED) == 0); 2366 2367 /* 2368 * critical section protection is not required when 2369 * scrapping a buffer's contents because it is already 2370 * wired. 2371 */ 2372 if (bp->b_bufsize) 2373 allocbuf(bp, 0); 2374 2375 bp->b_flags = B_BNOCLIP; 2376 bp->b_cmd = BUF_CMD_DONE; 2377 bp->b_vp = NULL; 2378 bp->b_error = 0; 2379 bp->b_resid = 0; 2380 bp->b_bcount = 0; 2381 bp->b_xio.xio_npages = 0; 2382 bp->b_dirtyoff = bp->b_dirtyend = 0; 2383 reinitbufbio(bp); 2384 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 2385 buf_dep_init(bp); 2386 bp->b_flags |= B_INVAL; 2387 /* bfreekva(bp); */ 2388 brelse(bp); 2389 spin_lock(&bufqspin); 2390 } 2391 spin_unlock(&bufqspin); 2392 return(bytes); 2393 } 2394 #endif 2395 2396 /* 2397 * buf_daemon: 2398 * 2399 * Buffer flushing daemon. Buffers are normally flushed by the 2400 * update daemon but if it cannot keep up this process starts to 2401 * take the load in an attempt to prevent getnewbuf() from blocking. 2402 * 2403 * Once a flush is initiated it does not stop until the number 2404 * of buffers falls below lodirtybuffers, but we will wake up anyone 2405 * waiting at the mid-point. 2406 */ 2407 static struct kproc_desc buf_kp = { 2408 "bufdaemon", 2409 buf_daemon, 2410 &bufdaemon_td 2411 }; 2412 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, 2413 kproc_start, &buf_kp) 2414 2415 static struct kproc_desc bufhw_kp = { 2416 "bufdaemon_hw", 2417 buf_daemon_hw, 2418 &bufdaemonhw_td 2419 }; 2420 SYSINIT(bufdaemon_hw, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, 2421 kproc_start, &bufhw_kp) 2422 2423 /* 2424 * MPSAFE thread 2425 */ 2426 static void 2427 buf_daemon1(struct thread *td, int queue, int (*buf_limit_fn)(long), 2428 int *bd_req) 2429 { 2430 long limit; 2431 struct buf *marker; 2432 2433 marker = kmalloc(sizeof(*marker), M_BIOBUF, M_WAITOK | M_ZERO); 2434 marker->b_flags |= B_MARKER; 2435 marker->b_qindex = BQUEUE_NONE; 2436 2437 /* 2438 * This process needs to be suspended prior to shutdown sync. 2439 */ 2440 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, 2441 td, SHUTDOWN_PRI_LAST); 2442 curthread->td_flags |= TDF_SYSTHREAD; 2443 2444 /* 2445 * This process is allowed to take the buffer cache to the limit 2446 */ 2447 for (;;) { 2448 kproc_suspend_loop(); 2449 2450 /* 2451 * Do the flush as long as the number of dirty buffers 2452 * (including those running) exceeds lodirtybufspace. 2453 * 2454 * When flushing limit running I/O to hirunningspace 2455 * Do the flush. Limit the amount of in-transit I/O we 2456 * allow to build up, otherwise we would completely saturate 2457 * the I/O system. Wakeup any waiting processes before we 2458 * normally would so they can run in parallel with our drain. 2459 * 2460 * Our aggregate normal+HW lo water mark is lodirtybufspace, 2461 * but because we split the operation into two threads we 2462 * have to cut it in half for each thread. 2463 */ 2464 waitrunningbufspace(); 2465 limit = lodirtybufspace / 2; 2466 while (buf_limit_fn(limit)) { 2467 if (flushbufqueues(marker, queue) == 0) 2468 break; 2469 if (runningbufspace < hirunningspace) 2470 continue; 2471 waitrunningbufspace(); 2472 } 2473 2474 /* 2475 * We reached our low water mark, reset the 2476 * request and sleep until we are needed again. 2477 * The sleep is just so the suspend code works. 2478 */ 2479 spin_lock(&bufcspin); 2480 if (*bd_req == 0) 2481 ssleep(bd_req, &bufcspin, 0, "psleep", hz); 2482 *bd_req = 0; 2483 spin_unlock(&bufcspin); 2484 } 2485 /* NOT REACHED */ 2486 /*kfree(marker, M_BIOBUF);*/ 2487 } 2488 2489 static int 2490 buf_daemon_limit(long limit) 2491 { 2492 return (runningbufspace + dirtykvaspace > limit || 2493 dirtybufcount - dirtybufcounthw >= nbuf / 2); 2494 } 2495 2496 static int 2497 buf_daemon_hw_limit(long limit) 2498 { 2499 return (runningbufspace + dirtykvaspace > limit || 2500 dirtybufcounthw >= nbuf / 2); 2501 } 2502 2503 static void 2504 buf_daemon(void) 2505 { 2506 buf_daemon1(bufdaemon_td, BQUEUE_DIRTY, buf_daemon_limit, 2507 &bd_request); 2508 } 2509 2510 static void 2511 buf_daemon_hw(void) 2512 { 2513 buf_daemon1(bufdaemonhw_td, BQUEUE_DIRTY_HW, buf_daemon_hw_limit, 2514 &bd_request_hw); 2515 } 2516 2517 /* 2518 * flushbufqueues: 2519 * 2520 * Try to flush a buffer in the dirty queue. We must be careful to 2521 * free up B_INVAL buffers instead of write them, which NFS is 2522 * particularly sensitive to. 2523 * 2524 * B_RELBUF may only be set by VFSs. We do set B_AGE to indicate 2525 * that we really want to try to get the buffer out and reuse it 2526 * due to the write load on the machine. 2527 * 2528 * We must lock the buffer in order to check its validity before we 2529 * can mess with its contents. bufqspin isn't enough. 2530 */ 2531 static int 2532 flushbufqueues(struct buf *marker, bufq_type_t q) 2533 { 2534 struct buf *bp; 2535 int r = 0; 2536 2537 KKASSERT(marker->b_qindex == BQUEUE_NONE); 2538 KKASSERT(marker->b_flags & B_MARKER); 2539 2540 /* 2541 * Spinlock needed to perform operations on the queue and may be 2542 * held through a non-blocking BUF_LOCK(), but cannot be held when 2543 * BUF_UNLOCK()ing or through any other major operation. 2544 */ 2545 spin_lock(&bufqspin); 2546 marker->b_qindex = q; 2547 TAILQ_INSERT_HEAD(&bufqueues[q], marker, b_freelist); 2548 bp = marker; 2549 2550 while ((bp = TAILQ_NEXT(bp, b_freelist)) != NULL) { 2551 /* 2552 * NOTE: spinlock is always held at the top of the loop 2553 */ 2554 if (bp->b_flags & B_MARKER) 2555 continue; 2556 if ((bp->b_flags & B_DELWRI) == 0) { 2557 kprintf("Unexpected clean buffer %p\n", bp); 2558 continue; 2559 } 2560 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) 2561 continue; 2562 KKASSERT(bp->b_qindex == q); 2563 2564 /* 2565 * Once the buffer is locked we will have no choice but to 2566 * unlock the spinlock around a later BUF_UNLOCK and re-set 2567 * bp = marker when looping. Move the marker now to make 2568 * things easier. 2569 */ 2570 TAILQ_REMOVE(&bufqueues[q], marker, b_freelist); 2571 TAILQ_INSERT_AFTER(&bufqueues[q], bp, marker, b_freelist); 2572 2573 /* 2574 * Must recheck B_DELWRI after successfully locking 2575 * the buffer. 2576 */ 2577 if ((bp->b_flags & B_DELWRI) == 0) { 2578 spin_unlock(&bufqspin); 2579 BUF_UNLOCK(bp); 2580 spin_lock(&bufqspin); 2581 bp = marker; 2582 continue; 2583 } 2584 2585 /* 2586 * Remove the buffer from its queue. We still own the 2587 * spinlock here. 2588 */ 2589 _bremfree(bp); 2590 2591 /* 2592 * Disposing of an invalid buffer counts as a flush op 2593 */ 2594 if (bp->b_flags & B_INVAL) { 2595 spin_unlock(&bufqspin); 2596 brelse(bp); 2597 spin_lock(&bufqspin); 2598 ++r; 2599 break; 2600 } 2601 2602 /* 2603 * Release the spinlock for the more complex ops we 2604 * are now going to do. 2605 */ 2606 spin_unlock(&bufqspin); 2607 lwkt_yield(); 2608 2609 /* 2610 * This is a bit messy 2611 */ 2612 if (LIST_FIRST(&bp->b_dep) != NULL && 2613 (bp->b_flags & B_DEFERRED) == 0 && 2614 buf_countdeps(bp, 0)) { 2615 spin_lock(&bufqspin); 2616 TAILQ_INSERT_TAIL(&bufqueues[q], bp, b_freelist); 2617 bp->b_qindex = q; 2618 bp->b_flags |= B_DEFERRED; 2619 spin_unlock(&bufqspin); 2620 BUF_UNLOCK(bp); 2621 spin_lock(&bufqspin); 2622 bp = marker; 2623 continue; 2624 } 2625 2626 /* 2627 * spinlock not held here. 2628 * 2629 * If the buffer has a dependancy, buf_checkwrite() must 2630 * also return 0 for us to be able to initate the write. 2631 * 2632 * If the buffer is flagged B_ERROR it may be requeued 2633 * over and over again, we try to avoid a live lock. 2634 * 2635 * NOTE: buf_checkwrite is MPSAFE. 2636 */ 2637 if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { 2638 brelse(bp); 2639 } else if (bp->b_flags & B_ERROR) { 2640 tsleep(bp, 0, "bioer", 1); 2641 bp->b_flags &= ~B_AGE; 2642 cluster_awrite(bp); 2643 } else { 2644 bp->b_flags |= B_AGE; 2645 cluster_awrite(bp); 2646 } 2647 spin_lock(&bufqspin); 2648 ++r; 2649 break; 2650 } 2651 TAILQ_REMOVE(&bufqueues[q], marker, b_freelist); 2652 marker->b_qindex = BQUEUE_NONE; 2653 spin_unlock(&bufqspin); 2654 2655 return (r); 2656 } 2657 2658 /* 2659 * inmem: 2660 * 2661 * Returns true if no I/O is needed to access the associated VM object. 2662 * This is like findblk except it also hunts around in the VM system for 2663 * the data. 2664 * 2665 * Note that we ignore vm_page_free() races from interrupts against our 2666 * lookup, since if the caller is not protected our return value will not 2667 * be any more valid then otherwise once we exit the critical section. 2668 */ 2669 int 2670 inmem(struct vnode *vp, off_t loffset) 2671 { 2672 vm_object_t obj; 2673 vm_offset_t toff, tinc, size; 2674 vm_page_t m; 2675 int res = 1; 2676 2677 if (findblk(vp, loffset, FINDBLK_TEST)) 2678 return 1; 2679 if (vp->v_mount == NULL) 2680 return 0; 2681 if ((obj = vp->v_object) == NULL) 2682 return 0; 2683 2684 size = PAGE_SIZE; 2685 if (size > vp->v_mount->mnt_stat.f_iosize) 2686 size = vp->v_mount->mnt_stat.f_iosize; 2687 2688 vm_object_hold(obj); 2689 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 2690 m = vm_page_lookup(obj, OFF_TO_IDX(loffset + toff)); 2691 if (m == NULL) { 2692 res = 0; 2693 break; 2694 } 2695 tinc = size; 2696 if (tinc > PAGE_SIZE - ((toff + loffset) & PAGE_MASK)) 2697 tinc = PAGE_SIZE - ((toff + loffset) & PAGE_MASK); 2698 if (vm_page_is_valid(m, 2699 (vm_offset_t) ((toff + loffset) & PAGE_MASK), tinc) == 0) { 2700 res = 0; 2701 break; 2702 } 2703 } 2704 vm_object_drop(obj); 2705 return (res); 2706 } 2707 2708 /* 2709 * findblk: 2710 * 2711 * Locate and return the specified buffer. Unless flagged otherwise, 2712 * a locked buffer will be returned if it exists or NULL if it does not. 2713 * 2714 * findblk()'d buffers are still on the bufqueues and if you intend 2715 * to use your (locked NON-TEST) buffer you need to bremfree(bp) 2716 * and possibly do other stuff to it. 2717 * 2718 * FINDBLK_TEST - Do not lock the buffer. The caller is responsible 2719 * for locking the buffer and ensuring that it remains 2720 * the desired buffer after locking. 2721 * 2722 * FINDBLK_NBLOCK - Lock the buffer non-blocking. If we are unable 2723 * to acquire the lock we return NULL, even if the 2724 * buffer exists. 2725 * 2726 * FINDBLK_REF - Returns the buffer ref'd, which prevents normal 2727 * reuse by getnewbuf() but does not prevent 2728 * disassociation (B_INVAL). Used to avoid deadlocks 2729 * against random (vp,loffset)s due to reassignment. 2730 * 2731 * (0) - Lock the buffer blocking. 2732 * 2733 * MPSAFE 2734 */ 2735 struct buf * 2736 findblk(struct vnode *vp, off_t loffset, int flags) 2737 { 2738 struct buf *bp; 2739 int lkflags; 2740 2741 lkflags = LK_EXCLUSIVE; 2742 if (flags & FINDBLK_NBLOCK) 2743 lkflags |= LK_NOWAIT; 2744 2745 for (;;) { 2746 /* 2747 * Lookup. Ref the buf while holding v_token to prevent 2748 * reuse (but does not prevent diassociation). 2749 */ 2750 lwkt_gettoken_shared(&vp->v_token); 2751 bp = buf_rb_hash_RB_LOOKUP(&vp->v_rbhash_tree, loffset); 2752 if (bp == NULL) { 2753 lwkt_reltoken(&vp->v_token); 2754 return(NULL); 2755 } 2756 bqhold(bp); 2757 lwkt_reltoken(&vp->v_token); 2758 2759 /* 2760 * If testing only break and return bp, do not lock. 2761 */ 2762 if (flags & FINDBLK_TEST) 2763 break; 2764 2765 /* 2766 * Lock the buffer, return an error if the lock fails. 2767 * (only FINDBLK_NBLOCK can cause the lock to fail). 2768 */ 2769 if (BUF_LOCK(bp, lkflags)) { 2770 atomic_subtract_int(&bp->b_refs, 1); 2771 /* bp = NULL; not needed */ 2772 return(NULL); 2773 } 2774 2775 /* 2776 * Revalidate the locked buf before allowing it to be 2777 * returned. 2778 */ 2779 if (bp->b_vp == vp && bp->b_loffset == loffset) 2780 break; 2781 atomic_subtract_int(&bp->b_refs, 1); 2782 BUF_UNLOCK(bp); 2783 } 2784 2785 /* 2786 * Success 2787 */ 2788 if ((flags & FINDBLK_REF) == 0) 2789 atomic_subtract_int(&bp->b_refs, 1); 2790 return(bp); 2791 } 2792 2793 /* 2794 * getcacheblk: 2795 * 2796 * Similar to getblk() except only returns the buffer if it is 2797 * B_CACHE and requires no other manipulation. Otherwise NULL 2798 * is returned. 2799 * 2800 * If B_RAM is set the buffer might be just fine, but we return 2801 * NULL anyway because we want the code to fall through to the 2802 * cluster read. Otherwise read-ahead breaks. 2803 * 2804 * If blksize is 0 the buffer cache buffer must already be fully 2805 * cached. 2806 * 2807 * If blksize is non-zero getblk() will be used, allowing a buffer 2808 * to be reinstantiated from its VM backing store. The buffer must 2809 * still be fully cached after reinstantiation to be returned. 2810 */ 2811 struct buf * 2812 getcacheblk(struct vnode *vp, off_t loffset, int blksize, int blkflags) 2813 { 2814 struct buf *bp; 2815 int fndflags = (blkflags & GETBLK_NOWAIT) ? FINDBLK_NBLOCK : 0; 2816 2817 if (blksize) { 2818 bp = getblk(vp, loffset, blksize, blkflags, 0); 2819 if (bp) { 2820 if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) == 2821 B_CACHE) { 2822 bp->b_flags &= ~B_AGE; 2823 } else { 2824 brelse(bp); 2825 bp = NULL; 2826 } 2827 } 2828 } else { 2829 bp = findblk(vp, loffset, fndflags); 2830 if (bp) { 2831 if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) == 2832 B_CACHE) { 2833 bp->b_flags &= ~B_AGE; 2834 bremfree(bp); 2835 } else { 2836 BUF_UNLOCK(bp); 2837 bp = NULL; 2838 } 2839 } 2840 } 2841 return (bp); 2842 } 2843 2844 /* 2845 * getblk: 2846 * 2847 * Get a block given a specified block and offset into a file/device. 2848 * B_INVAL may or may not be set on return. The caller should clear 2849 * B_INVAL prior to initiating a READ. 2850 * 2851 * IT IS IMPORTANT TO UNDERSTAND THAT IF YOU CALL GETBLK() AND B_CACHE 2852 * IS NOT SET, YOU MUST INITIALIZE THE RETURNED BUFFER, ISSUE A READ, 2853 * OR SET B_INVAL BEFORE RETIRING IT. If you retire a getblk'd buffer 2854 * without doing any of those things the system will likely believe 2855 * the buffer to be valid (especially if it is not B_VMIO), and the 2856 * next getblk() will return the buffer with B_CACHE set. 2857 * 2858 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 2859 * an existing buffer. 2860 * 2861 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 2862 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 2863 * and then cleared based on the backing VM. If the previous buffer is 2864 * non-0-sized but invalid, B_CACHE will be cleared. 2865 * 2866 * If getblk() must create a new buffer, the new buffer is returned with 2867 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 2868 * case it is returned with B_INVAL clear and B_CACHE set based on the 2869 * backing VM. 2870 * 2871 * getblk() also forces a bwrite() for any B_DELWRI buffer whos 2872 * B_CACHE bit is clear. 2873 * 2874 * What this means, basically, is that the caller should use B_CACHE to 2875 * determine whether the buffer is fully valid or not and should clear 2876 * B_INVAL prior to issuing a read. If the caller intends to validate 2877 * the buffer by loading its data area with something, the caller needs 2878 * to clear B_INVAL. If the caller does this without issuing an I/O, 2879 * the caller should set B_CACHE ( as an optimization ), else the caller 2880 * should issue the I/O and biodone() will set B_CACHE if the I/O was 2881 * a write attempt or if it was a successfull read. If the caller 2882 * intends to issue a READ, the caller must clear B_INVAL and B_ERROR 2883 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 2884 * 2885 * getblk flags: 2886 * 2887 * GETBLK_PCATCH - catch signal if blocked, can cause NULL return 2888 * GETBLK_BHEAVY - heavy-weight buffer cache buffer 2889 * 2890 * MPALMOSTSAFE 2891 */ 2892 struct buf * 2893 getblk(struct vnode *vp, off_t loffset, int size, int blkflags, int slptimeo) 2894 { 2895 struct buf *bp; 2896 int slpflags = (blkflags & GETBLK_PCATCH) ? PCATCH : 0; 2897 int error; 2898 int lkflags; 2899 2900 if (size > MAXBSIZE) 2901 panic("getblk: size(%d) > MAXBSIZE(%d)", size, MAXBSIZE); 2902 if (vp->v_object == NULL) 2903 panic("getblk: vnode %p has no object!", vp); 2904 2905 loop: 2906 if ((bp = findblk(vp, loffset, FINDBLK_REF | FINDBLK_TEST)) != NULL) { 2907 /* 2908 * The buffer was found in the cache, but we need to lock it. 2909 * We must acquire a ref on the bp to prevent reuse, but 2910 * this will not prevent disassociation (brelvp()) so we 2911 * must recheck (vp,loffset) after acquiring the lock. 2912 * 2913 * Without the ref the buffer could potentially be reused 2914 * before we acquire the lock and create a deadlock 2915 * situation between the thread trying to reuse the buffer 2916 * and us due to the fact that we would wind up blocking 2917 * on a random (vp,loffset). 2918 */ 2919 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 2920 if (blkflags & GETBLK_NOWAIT) { 2921 bqdrop(bp); 2922 return(NULL); 2923 } 2924 lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 2925 if (blkflags & GETBLK_PCATCH) 2926 lkflags |= LK_PCATCH; 2927 error = BUF_TIMELOCK(bp, lkflags, "getblk", slptimeo); 2928 if (error) { 2929 bqdrop(bp); 2930 if (error == ENOLCK) 2931 goto loop; 2932 return (NULL); 2933 } 2934 /* buffer may have changed on us */ 2935 } 2936 bqdrop(bp); 2937 2938 /* 2939 * Once the buffer has been locked, make sure we didn't race 2940 * a buffer recyclement. Buffers that are no longer hashed 2941 * will have b_vp == NULL, so this takes care of that check 2942 * as well. 2943 */ 2944 if (bp->b_vp != vp || bp->b_loffset != loffset) { 2945 kprintf("Warning buffer %p (vp %p loffset %lld) " 2946 "was recycled\n", 2947 bp, vp, (long long)loffset); 2948 BUF_UNLOCK(bp); 2949 goto loop; 2950 } 2951 2952 /* 2953 * If SZMATCH any pre-existing buffer must be of the requested 2954 * size or NULL is returned. The caller absolutely does not 2955 * want getblk() to bwrite() the buffer on a size mismatch. 2956 */ 2957 if ((blkflags & GETBLK_SZMATCH) && size != bp->b_bcount) { 2958 BUF_UNLOCK(bp); 2959 return(NULL); 2960 } 2961 2962 /* 2963 * All vnode-based buffers must be backed by a VM object. 2964 */ 2965 KKASSERT(bp->b_flags & B_VMIO); 2966 KKASSERT(bp->b_cmd == BUF_CMD_DONE); 2967 bp->b_flags &= ~B_AGE; 2968 2969 /* 2970 * Make sure that B_INVAL buffers do not have a cached 2971 * block number translation. 2972 */ 2973 if ((bp->b_flags & B_INVAL) && (bp->b_bio2.bio_offset != NOOFFSET)) { 2974 kprintf("Warning invalid buffer %p (vp %p loffset %lld)" 2975 " did not have cleared bio_offset cache\n", 2976 bp, vp, (long long)loffset); 2977 clearbiocache(&bp->b_bio2); 2978 } 2979 2980 /* 2981 * The buffer is locked. B_CACHE is cleared if the buffer is 2982 * invalid. 2983 */ 2984 if (bp->b_flags & B_INVAL) 2985 bp->b_flags &= ~B_CACHE; 2986 bremfree(bp); 2987 2988 /* 2989 * Any size inconsistancy with a dirty buffer or a buffer 2990 * with a softupdates dependancy must be resolved. Resizing 2991 * the buffer in such circumstances can lead to problems. 2992 * 2993 * Dirty or dependant buffers are written synchronously. 2994 * Other types of buffers are simply released and 2995 * reconstituted as they may be backed by valid, dirty VM 2996 * pages (but not marked B_DELWRI). 2997 * 2998 * NFS NOTE: NFS buffers which straddle EOF are oddly-sized 2999 * and may be left over from a prior truncation (and thus 3000 * no longer represent the actual EOF point), so we 3001 * definitely do not want to B_NOCACHE the backing store. 3002 */ 3003 if (size != bp->b_bcount) { 3004 if (bp->b_flags & B_DELWRI) { 3005 bp->b_flags |= B_RELBUF; 3006 bwrite(bp); 3007 } else if (LIST_FIRST(&bp->b_dep)) { 3008 bp->b_flags |= B_RELBUF; 3009 bwrite(bp); 3010 } else { 3011 bp->b_flags |= B_RELBUF; 3012 brelse(bp); 3013 } 3014 goto loop; 3015 } 3016 KKASSERT(size <= bp->b_kvasize); 3017 KASSERT(bp->b_loffset != NOOFFSET, 3018 ("getblk: no buffer offset")); 3019 3020 /* 3021 * A buffer with B_DELWRI set and B_CACHE clear must 3022 * be committed before we can return the buffer in 3023 * order to prevent the caller from issuing a read 3024 * ( due to B_CACHE not being set ) and overwriting 3025 * it. 3026 * 3027 * Most callers, including NFS and FFS, need this to 3028 * operate properly either because they assume they 3029 * can issue a read if B_CACHE is not set, or because 3030 * ( for example ) an uncached B_DELWRI might loop due 3031 * to softupdates re-dirtying the buffer. In the latter 3032 * case, B_CACHE is set after the first write completes, 3033 * preventing further loops. 3034 * 3035 * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE 3036 * above while extending the buffer, we cannot allow the 3037 * buffer to remain with B_CACHE set after the write 3038 * completes or it will represent a corrupt state. To 3039 * deal with this we set B_NOCACHE to scrap the buffer 3040 * after the write. 3041 * 3042 * XXX Should this be B_RELBUF instead of B_NOCACHE? 3043 * I'm not even sure this state is still possible 3044 * now that getblk() writes out any dirty buffers 3045 * on size changes. 3046 * 3047 * We might be able to do something fancy, like setting 3048 * B_CACHE in bwrite() except if B_DELWRI is already set, 3049 * so the below call doesn't set B_CACHE, but that gets real 3050 * confusing. This is much easier. 3051 */ 3052 3053 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 3054 kprintf("getblk: Warning, bp %p loff=%jx DELWRI set " 3055 "and CACHE clear, b_flags %08x\n", 3056 bp, (uintmax_t)bp->b_loffset, bp->b_flags); 3057 bp->b_flags |= B_NOCACHE; 3058 bwrite(bp); 3059 goto loop; 3060 } 3061 } else { 3062 /* 3063 * Buffer is not in-core, create new buffer. The buffer 3064 * returned by getnewbuf() is locked. Note that the returned 3065 * buffer is also considered valid (not marked B_INVAL). 3066 * 3067 * Calculating the offset for the I/O requires figuring out 3068 * the block size. We use DEV_BSIZE for VBLK or VCHR and 3069 * the mount's f_iosize otherwise. If the vnode does not 3070 * have an associated mount we assume that the passed size is 3071 * the block size. 3072 * 3073 * Note that vn_isdisk() cannot be used here since it may 3074 * return a failure for numerous reasons. Note that the 3075 * buffer size may be larger then the block size (the caller 3076 * will use block numbers with the proper multiple). Beware 3077 * of using any v_* fields which are part of unions. In 3078 * particular, in DragonFly the mount point overloading 3079 * mechanism uses the namecache only and the underlying 3080 * directory vnode is not a special case. 3081 */ 3082 int bsize, maxsize; 3083 3084 if (vp->v_type == VBLK || vp->v_type == VCHR) 3085 bsize = DEV_BSIZE; 3086 else if (vp->v_mount) 3087 bsize = vp->v_mount->mnt_stat.f_iosize; 3088 else 3089 bsize = size; 3090 3091 maxsize = size + (loffset & PAGE_MASK); 3092 maxsize = imax(maxsize, bsize); 3093 3094 bp = getnewbuf(blkflags, slptimeo, size, maxsize); 3095 if (bp == NULL) { 3096 if (slpflags || slptimeo) 3097 return NULL; 3098 goto loop; 3099 } 3100 3101 /* 3102 * Atomically insert the buffer into the hash, so that it can 3103 * be found by findblk(). 3104 * 3105 * If bgetvp() returns non-zero a collision occured, and the 3106 * bp will not be associated with the vnode. 3107 * 3108 * Make sure the translation layer has been cleared. 3109 */ 3110 bp->b_loffset = loffset; 3111 bp->b_bio2.bio_offset = NOOFFSET; 3112 /* bp->b_bio2.bio_next = NULL; */ 3113 3114 if (bgetvp(vp, bp, size)) { 3115 bp->b_flags |= B_INVAL; 3116 brelse(bp); 3117 goto loop; 3118 } 3119 3120 /* 3121 * All vnode-based buffers must be backed by a VM object. 3122 */ 3123 KKASSERT(vp->v_object != NULL); 3124 bp->b_flags |= B_VMIO; 3125 KKASSERT(bp->b_cmd == BUF_CMD_DONE); 3126 3127 allocbuf(bp, size); 3128 } 3129 KKASSERT(dsched_is_clear_buf_priv(bp)); 3130 return (bp); 3131 } 3132 3133 /* 3134 * regetblk(bp) 3135 * 3136 * Reacquire a buffer that was previously released to the locked queue, 3137 * or reacquire a buffer which is interlocked by having bioops->io_deallocate 3138 * set B_LOCKED (which handles the acquisition race). 3139 * 3140 * To this end, either B_LOCKED must be set or the dependancy list must be 3141 * non-empty. 3142 * 3143 * MPSAFE 3144 */ 3145 void 3146 regetblk(struct buf *bp) 3147 { 3148 KKASSERT((bp->b_flags & B_LOCKED) || LIST_FIRST(&bp->b_dep) != NULL); 3149 BUF_LOCK(bp, LK_EXCLUSIVE | LK_RETRY); 3150 bremfree(bp); 3151 } 3152 3153 /* 3154 * geteblk: 3155 * 3156 * Get an empty, disassociated buffer of given size. The buffer is 3157 * initially set to B_INVAL. 3158 * 3159 * critical section protection is not required for the allocbuf() 3160 * call because races are impossible here. 3161 * 3162 * MPALMOSTSAFE 3163 */ 3164 struct buf * 3165 geteblk(int size) 3166 { 3167 struct buf *bp; 3168 int maxsize; 3169 3170 maxsize = (size + BKVAMASK) & ~BKVAMASK; 3171 3172 while ((bp = getnewbuf(0, 0, size, maxsize)) == NULL) 3173 ; 3174 allocbuf(bp, size); 3175 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 3176 KKASSERT(dsched_is_clear_buf_priv(bp)); 3177 return (bp); 3178 } 3179 3180 3181 /* 3182 * allocbuf: 3183 * 3184 * This code constitutes the buffer memory from either anonymous system 3185 * memory (in the case of non-VMIO operations) or from an associated 3186 * VM object (in the case of VMIO operations). This code is able to 3187 * resize a buffer up or down. 3188 * 3189 * Note that this code is tricky, and has many complications to resolve 3190 * deadlock or inconsistant data situations. Tread lightly!!! 3191 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 3192 * the caller. Calling this code willy nilly can result in the loss of 3193 * data. 3194 * 3195 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 3196 * B_CACHE for the non-VMIO case. 3197 * 3198 * This routine does not need to be called from a critical section but you 3199 * must own the buffer. 3200 * 3201 * MPSAFE 3202 */ 3203 int 3204 allocbuf(struct buf *bp, int size) 3205 { 3206 int newbsize, mbsize; 3207 int i; 3208 3209 if (BUF_REFCNT(bp) == 0) 3210 panic("allocbuf: buffer not busy"); 3211 3212 if (bp->b_kvasize < size) 3213 panic("allocbuf: buffer too small"); 3214 3215 if ((bp->b_flags & B_VMIO) == 0) { 3216 caddr_t origbuf; 3217 int origbufsize; 3218 /* 3219 * Just get anonymous memory from the kernel. Don't 3220 * mess with B_CACHE. 3221 */ 3222 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 3223 if (bp->b_flags & B_MALLOC) 3224 newbsize = mbsize; 3225 else 3226 newbsize = round_page(size); 3227 3228 if (newbsize < bp->b_bufsize) { 3229 /* 3230 * Malloced buffers are not shrunk 3231 */ 3232 if (bp->b_flags & B_MALLOC) { 3233 if (newbsize) { 3234 bp->b_bcount = size; 3235 } else { 3236 kfree(bp->b_data, M_BIOBUF); 3237 if (bp->b_bufsize) { 3238 atomic_subtract_long(&bufmallocspace, bp->b_bufsize); 3239 bufspacewakeup(); 3240 bp->b_bufsize = 0; 3241 } 3242 bp->b_data = bp->b_kvabase; 3243 bp->b_bcount = 0; 3244 bp->b_flags &= ~B_MALLOC; 3245 } 3246 return 1; 3247 } 3248 vm_hold_free_pages( 3249 bp, 3250 (vm_offset_t) bp->b_data + newbsize, 3251 (vm_offset_t) bp->b_data + bp->b_bufsize); 3252 } else if (newbsize > bp->b_bufsize) { 3253 /* 3254 * We only use malloced memory on the first allocation. 3255 * and revert to page-allocated memory when the buffer 3256 * grows. 3257 */ 3258 if ((bufmallocspace < maxbufmallocspace) && 3259 (bp->b_bufsize == 0) && 3260 (mbsize <= PAGE_SIZE/2)) { 3261 3262 bp->b_data = kmalloc(mbsize, M_BIOBUF, M_WAITOK); 3263 bp->b_bufsize = mbsize; 3264 bp->b_bcount = size; 3265 bp->b_flags |= B_MALLOC; 3266 atomic_add_long(&bufmallocspace, mbsize); 3267 return 1; 3268 } 3269 origbuf = NULL; 3270 origbufsize = 0; 3271 /* 3272 * If the buffer is growing on its other-than-first 3273 * allocation, then we revert to the page-allocation 3274 * scheme. 3275 */ 3276 if (bp->b_flags & B_MALLOC) { 3277 origbuf = bp->b_data; 3278 origbufsize = bp->b_bufsize; 3279 bp->b_data = bp->b_kvabase; 3280 if (bp->b_bufsize) { 3281 atomic_subtract_long(&bufmallocspace, 3282 bp->b_bufsize); 3283 bufspacewakeup(); 3284 bp->b_bufsize = 0; 3285 } 3286 bp->b_flags &= ~B_MALLOC; 3287 newbsize = round_page(newbsize); 3288 } 3289 vm_hold_load_pages( 3290 bp, 3291 (vm_offset_t) bp->b_data + bp->b_bufsize, 3292 (vm_offset_t) bp->b_data + newbsize); 3293 if (origbuf) { 3294 bcopy(origbuf, bp->b_data, origbufsize); 3295 kfree(origbuf, M_BIOBUF); 3296 } 3297 } 3298 } else { 3299 vm_page_t m; 3300 int desiredpages; 3301 3302 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 3303 desiredpages = ((int)(bp->b_loffset & PAGE_MASK) + 3304 newbsize + PAGE_MASK) >> PAGE_SHIFT; 3305 KKASSERT(desiredpages <= XIO_INTERNAL_PAGES); 3306 3307 if (bp->b_flags & B_MALLOC) 3308 panic("allocbuf: VMIO buffer can't be malloced"); 3309 /* 3310 * Set B_CACHE initially if buffer is 0 length or will become 3311 * 0-length. 3312 */ 3313 if (size == 0 || bp->b_bufsize == 0) 3314 bp->b_flags |= B_CACHE; 3315 3316 if (newbsize < bp->b_bufsize) { 3317 /* 3318 * DEV_BSIZE aligned new buffer size is less then the 3319 * DEV_BSIZE aligned existing buffer size. Figure out 3320 * if we have to remove any pages. 3321 */ 3322 if (desiredpages < bp->b_xio.xio_npages) { 3323 for (i = desiredpages; i < bp->b_xio.xio_npages; i++) { 3324 /* 3325 * the page is not freed here -- it 3326 * is the responsibility of 3327 * vnode_pager_setsize 3328 */ 3329 m = bp->b_xio.xio_pages[i]; 3330 KASSERT(m != bogus_page, 3331 ("allocbuf: bogus page found")); 3332 vm_page_busy_wait(m, TRUE, "biodep"); 3333 bp->b_xio.xio_pages[i] = NULL; 3334 vm_page_unwire(m, 0); 3335 vm_page_wakeup(m); 3336 } 3337 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + 3338 (desiredpages << PAGE_SHIFT), (bp->b_xio.xio_npages - desiredpages)); 3339 bp->b_xio.xio_npages = desiredpages; 3340 } 3341 } else if (size > bp->b_bcount) { 3342 /* 3343 * We are growing the buffer, possibly in a 3344 * byte-granular fashion. 3345 */ 3346 struct vnode *vp; 3347 vm_object_t obj; 3348 vm_offset_t toff; 3349 vm_offset_t tinc; 3350 3351 /* 3352 * Step 1, bring in the VM pages from the object, 3353 * allocating them if necessary. We must clear 3354 * B_CACHE if these pages are not valid for the 3355 * range covered by the buffer. 3356 * 3357 * critical section protection is required to protect 3358 * against interrupts unbusying and freeing pages 3359 * between our vm_page_lookup() and our 3360 * busycheck/wiring call. 3361 */ 3362 vp = bp->b_vp; 3363 obj = vp->v_object; 3364 3365 vm_object_hold(obj); 3366 while (bp->b_xio.xio_npages < desiredpages) { 3367 vm_page_t m; 3368 vm_pindex_t pi; 3369 int error; 3370 3371 pi = OFF_TO_IDX(bp->b_loffset) + 3372 bp->b_xio.xio_npages; 3373 3374 /* 3375 * Blocking on m->busy might lead to a 3376 * deadlock: 3377 * 3378 * vm_fault->getpages->cluster_read->allocbuf 3379 */ 3380 m = vm_page_lookup_busy_try(obj, pi, FALSE, 3381 &error); 3382 if (error) { 3383 vm_page_sleep_busy(m, FALSE, "pgtblk"); 3384 continue; 3385 } 3386 if (m == NULL) { 3387 /* 3388 * note: must allocate system pages 3389 * since blocking here could intefere 3390 * with paging I/O, no matter which 3391 * process we are. 3392 */ 3393 m = bio_page_alloc(bp, obj, pi, desiredpages - bp->b_xio.xio_npages); 3394 if (m) { 3395 vm_page_wire(m); 3396 vm_page_flag_clear(m, PG_ZERO); 3397 vm_page_wakeup(m); 3398 bp->b_flags &= ~B_CACHE; 3399 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; 3400 ++bp->b_xio.xio_npages; 3401 } 3402 continue; 3403 } 3404 3405 /* 3406 * We found a page and were able to busy it. 3407 */ 3408 vm_page_flag_clear(m, PG_ZERO); 3409 vm_page_wire(m); 3410 vm_page_wakeup(m); 3411 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; 3412 ++bp->b_xio.xio_npages; 3413 if (bp->b_act_count < m->act_count) 3414 bp->b_act_count = m->act_count; 3415 } 3416 vm_object_drop(obj); 3417 3418 /* 3419 * Step 2. We've loaded the pages into the buffer, 3420 * we have to figure out if we can still have B_CACHE 3421 * set. Note that B_CACHE is set according to the 3422 * byte-granular range ( bcount and size ), not the 3423 * aligned range ( newbsize ). 3424 * 3425 * The VM test is against m->valid, which is DEV_BSIZE 3426 * aligned. Needless to say, the validity of the data 3427 * needs to also be DEV_BSIZE aligned. Note that this 3428 * fails with NFS if the server or some other client 3429 * extends the file's EOF. If our buffer is resized, 3430 * B_CACHE may remain set! XXX 3431 */ 3432 3433 toff = bp->b_bcount; 3434 tinc = PAGE_SIZE - ((bp->b_loffset + toff) & PAGE_MASK); 3435 3436 while ((bp->b_flags & B_CACHE) && toff < size) { 3437 vm_pindex_t pi; 3438 3439 if (tinc > (size - toff)) 3440 tinc = size - toff; 3441 3442 pi = ((bp->b_loffset & PAGE_MASK) + toff) >> 3443 PAGE_SHIFT; 3444 3445 vfs_buf_test_cache( 3446 bp, 3447 bp->b_loffset, 3448 toff, 3449 tinc, 3450 bp->b_xio.xio_pages[pi] 3451 ); 3452 toff += tinc; 3453 tinc = PAGE_SIZE; 3454 } 3455 3456 /* 3457 * Step 3, fixup the KVM pmap. Remember that 3458 * bp->b_data is relative to bp->b_loffset, but 3459 * bp->b_loffset may be offset into the first page. 3460 */ 3461 3462 bp->b_data = (caddr_t) 3463 trunc_page((vm_offset_t)bp->b_data); 3464 pmap_qenter( 3465 (vm_offset_t)bp->b_data, 3466 bp->b_xio.xio_pages, 3467 bp->b_xio.xio_npages 3468 ); 3469 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 3470 (vm_offset_t)(bp->b_loffset & PAGE_MASK)); 3471 } 3472 } 3473 3474 /* adjust space use on already-dirty buffer */ 3475 if (bp->b_flags & B_DELWRI) { 3476 spin_lock(&bufcspin); 3477 /* dirtykvaspace unchanged */ 3478 dirtybufspace += newbsize - bp->b_bufsize; 3479 if (bp->b_flags & B_HEAVY) 3480 dirtybufspacehw += newbsize - bp->b_bufsize; 3481 spin_unlock(&bufcspin); 3482 } 3483 if (newbsize < bp->b_bufsize) 3484 bufspacewakeup(); 3485 bp->b_bufsize = newbsize; /* actual buffer allocation */ 3486 bp->b_bcount = size; /* requested buffer size */ 3487 return 1; 3488 } 3489 3490 /* 3491 * biowait: 3492 * 3493 * Wait for buffer I/O completion, returning error status. B_EINTR 3494 * is converted into an EINTR error but not cleared (since a chain 3495 * of biowait() calls may occur). 3496 * 3497 * On return bpdone() will have been called but the buffer will remain 3498 * locked and will not have been brelse()'d. 3499 * 3500 * NOTE! If a timeout is specified and ETIMEDOUT occurs the I/O is 3501 * likely still in progress on return. 3502 * 3503 * NOTE! This operation is on a BIO, not a BUF. 3504 * 3505 * NOTE! BIO_DONE is cleared by vn_strategy() 3506 * 3507 * MPSAFE 3508 */ 3509 static __inline int 3510 _biowait(struct bio *bio, const char *wmesg, int to) 3511 { 3512 struct buf *bp = bio->bio_buf; 3513 u_int32_t flags; 3514 u_int32_t nflags; 3515 int error; 3516 3517 KKASSERT(bio == &bp->b_bio1); 3518 for (;;) { 3519 flags = bio->bio_flags; 3520 if (flags & BIO_DONE) 3521 break; 3522 nflags = flags | BIO_WANT; 3523 tsleep_interlock(bio, 0); 3524 if (atomic_cmpset_int(&bio->bio_flags, flags, nflags)) { 3525 if (wmesg) 3526 error = tsleep(bio, PINTERLOCKED, wmesg, to); 3527 else if (bp->b_cmd == BUF_CMD_READ) 3528 error = tsleep(bio, PINTERLOCKED, "biord", to); 3529 else 3530 error = tsleep(bio, PINTERLOCKED, "biowr", to); 3531 if (error) { 3532 kprintf("tsleep error biowait %d\n", error); 3533 return (error); 3534 } 3535 } 3536 } 3537 3538 /* 3539 * Finish up. 3540 */ 3541 KKASSERT(bp->b_cmd == BUF_CMD_DONE); 3542 bio->bio_flags &= ~(BIO_DONE | BIO_SYNC); 3543 if (bp->b_flags & B_EINTR) 3544 return (EINTR); 3545 if (bp->b_flags & B_ERROR) 3546 return (bp->b_error ? bp->b_error : EIO); 3547 return (0); 3548 } 3549 3550 int 3551 biowait(struct bio *bio, const char *wmesg) 3552 { 3553 return(_biowait(bio, wmesg, 0)); 3554 } 3555 3556 int 3557 biowait_timeout(struct bio *bio, const char *wmesg, int to) 3558 { 3559 return(_biowait(bio, wmesg, to)); 3560 } 3561 3562 /* 3563 * This associates a tracking count with an I/O. vn_strategy() and 3564 * dev_dstrategy() do this automatically but there are a few cases 3565 * where a vnode or device layer is bypassed when a block translation 3566 * is cached. In such cases bio_start_transaction() may be called on 3567 * the bypassed layers so the system gets an I/O in progress indication 3568 * for those higher layers. 3569 */ 3570 void 3571 bio_start_transaction(struct bio *bio, struct bio_track *track) 3572 { 3573 bio->bio_track = track; 3574 if (dsched_is_clear_buf_priv(bio->bio_buf)) 3575 dsched_new_buf(bio->bio_buf); 3576 bio_track_ref(track); 3577 } 3578 3579 /* 3580 * Initiate I/O on a vnode. 3581 * 3582 * SWAPCACHE OPERATION: 3583 * 3584 * Real buffer cache buffers have a non-NULL bp->b_vp. Unfortunately 3585 * devfs also uses b_vp for fake buffers so we also have to check 3586 * that B_PAGING is 0. In this case the passed 'vp' is probably the 3587 * underlying block device. The swap assignments are related to the 3588 * buffer cache buffer's b_vp, not the passed vp. 3589 * 3590 * The passed vp == bp->b_vp only in the case where the strategy call 3591 * is made on the vp itself for its own buffers (a regular file or 3592 * block device vp). The filesystem usually then re-calls vn_strategy() 3593 * after translating the request to an underlying device. 3594 * 3595 * Cluster buffers set B_CLUSTER and the passed vp is the vp of the 3596 * underlying buffer cache buffers. 3597 * 3598 * We can only deal with page-aligned buffers at the moment, because 3599 * we can't tell what the real dirty state for pages straddling a buffer 3600 * are. 3601 * 3602 * In order to call swap_pager_strategy() we must provide the VM object 3603 * and base offset for the underlying buffer cache pages so it can find 3604 * the swap blocks. 3605 */ 3606 void 3607 vn_strategy(struct vnode *vp, struct bio *bio) 3608 { 3609 struct bio_track *track; 3610 struct buf *bp = bio->bio_buf; 3611 3612 KKASSERT(bp->b_cmd != BUF_CMD_DONE); 3613 3614 /* 3615 * Set when an I/O is issued on the bp. Cleared by consumers 3616 * (aka HAMMER), allowing the consumer to determine if I/O had 3617 * actually occurred. 3618 */ 3619 bp->b_flags |= B_IODEBUG; 3620 3621 /* 3622 * Handle the swap cache intercept. 3623 */ 3624 if (vn_cache_strategy(vp, bio)) 3625 return; 3626 3627 /* 3628 * Otherwise do the operation through the filesystem 3629 */ 3630 if (bp->b_cmd == BUF_CMD_READ) 3631 track = &vp->v_track_read; 3632 else 3633 track = &vp->v_track_write; 3634 KKASSERT((bio->bio_flags & BIO_DONE) == 0); 3635 bio->bio_track = track; 3636 if (dsched_is_clear_buf_priv(bio->bio_buf)) 3637 dsched_new_buf(bio->bio_buf); 3638 bio_track_ref(track); 3639 vop_strategy(*vp->v_ops, vp, bio); 3640 } 3641 3642 static void vn_cache_strategy_callback(struct bio *bio); 3643 3644 int 3645 vn_cache_strategy(struct vnode *vp, struct bio *bio) 3646 { 3647 struct buf *bp = bio->bio_buf; 3648 struct bio *nbio; 3649 vm_object_t object; 3650 vm_page_t m; 3651 int i; 3652 3653 /* 3654 * Is this buffer cache buffer suitable for reading from 3655 * the swap cache? 3656 */ 3657 if (vm_swapcache_read_enable == 0 || 3658 bp->b_cmd != BUF_CMD_READ || 3659 ((bp->b_flags & B_CLUSTER) == 0 && 3660 (bp->b_vp == NULL || (bp->b_flags & B_PAGING))) || 3661 ((int)bp->b_loffset & PAGE_MASK) != 0 || 3662 (bp->b_bcount & PAGE_MASK) != 0) { 3663 return(0); 3664 } 3665 3666 /* 3667 * Figure out the original VM object (it will match the underlying 3668 * VM pages). Note that swap cached data uses page indices relative 3669 * to that object, not relative to bio->bio_offset. 3670 */ 3671 if (bp->b_flags & B_CLUSTER) 3672 object = vp->v_object; 3673 else 3674 object = bp->b_vp->v_object; 3675 3676 /* 3677 * In order to be able to use the swap cache all underlying VM 3678 * pages must be marked as such, and we can't have any bogus pages. 3679 */ 3680 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 3681 m = bp->b_xio.xio_pages[i]; 3682 if ((m->flags & PG_SWAPPED) == 0) 3683 break; 3684 if (m == bogus_page) 3685 break; 3686 } 3687 3688 /* 3689 * If we are good then issue the I/O using swap_pager_strategy(). 3690 * 3691 * We can only do this if the buffer actually supports object-backed 3692 * I/O. If it doesn't npages will be 0. 3693 */ 3694 if (i && i == bp->b_xio.xio_npages) { 3695 m = bp->b_xio.xio_pages[0]; 3696 nbio = push_bio(bio); 3697 nbio->bio_done = vn_cache_strategy_callback; 3698 nbio->bio_offset = ptoa(m->pindex); 3699 KKASSERT(m->object == object); 3700 swap_pager_strategy(object, nbio); 3701 return(1); 3702 } 3703 return(0); 3704 } 3705 3706 /* 3707 * This is a bit of a hack but since the vn_cache_strategy() function can 3708 * override a VFS's strategy function we must make sure that the bio, which 3709 * is probably bio2, doesn't leak an unexpected offset value back to the 3710 * filesystem. The filesystem (e.g. UFS) might otherwise assume that the 3711 * bio went through its own file strategy function and the the bio2 offset 3712 * is a cached disk offset when, in fact, it isn't. 3713 */ 3714 static void 3715 vn_cache_strategy_callback(struct bio *bio) 3716 { 3717 bio->bio_offset = NOOFFSET; 3718 biodone(pop_bio(bio)); 3719 } 3720 3721 /* 3722 * bpdone: 3723 * 3724 * Finish I/O on a buffer after all BIOs have been processed. 3725 * Called when the bio chain is exhausted or by biowait. If called 3726 * by biowait, elseit is typically 0. 3727 * 3728 * bpdone is also responsible for setting B_CACHE in a B_VMIO bp. 3729 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 3730 * assuming B_INVAL is clear. 3731 * 3732 * For the VMIO case, we set B_CACHE if the op was a read and no 3733 * read error occured, or if the op was a write. B_CACHE is never 3734 * set if the buffer is invalid or otherwise uncacheable. 3735 * 3736 * bpdone does not mess with B_INVAL, allowing the I/O routine or the 3737 * initiator to leave B_INVAL set to brelse the buffer out of existance 3738 * in the biodone routine. 3739 */ 3740 void 3741 bpdone(struct buf *bp, int elseit) 3742 { 3743 buf_cmd_t cmd; 3744 3745 KASSERT(BUF_REFCNTNB(bp) > 0, 3746 ("biodone: bp %p not busy %d", bp, BUF_REFCNTNB(bp))); 3747 KASSERT(bp->b_cmd != BUF_CMD_DONE, 3748 ("biodone: bp %p already done!", bp)); 3749 3750 /* 3751 * No more BIOs are left. All completion functions have been dealt 3752 * with, now we clean up the buffer. 3753 */ 3754 cmd = bp->b_cmd; 3755 bp->b_cmd = BUF_CMD_DONE; 3756 3757 /* 3758 * Only reads and writes are processed past this point. 3759 */ 3760 if (cmd != BUF_CMD_READ && cmd != BUF_CMD_WRITE) { 3761 if (cmd == BUF_CMD_FREEBLKS) 3762 bp->b_flags |= B_NOCACHE; 3763 if (elseit) 3764 brelse(bp); 3765 return; 3766 } 3767 3768 /* 3769 * Warning: softupdates may re-dirty the buffer, and HAMMER can do 3770 * a lot worse. XXX - move this above the clearing of b_cmd 3771 */ 3772 if (LIST_FIRST(&bp->b_dep) != NULL) 3773 buf_complete(bp); /* MPSAFE */ 3774 3775 /* 3776 * A failed write must re-dirty the buffer unless B_INVAL 3777 * was set. Only applicable to normal buffers (with VPs). 3778 * vinum buffers may not have a vp. 3779 */ 3780 if (cmd == BUF_CMD_WRITE && 3781 (bp->b_flags & (B_ERROR | B_INVAL)) == B_ERROR) { 3782 bp->b_flags &= ~B_NOCACHE; 3783 if (bp->b_vp) 3784 bdirty(bp); 3785 } 3786 3787 if (bp->b_flags & B_VMIO) { 3788 int i; 3789 vm_ooffset_t foff; 3790 vm_page_t m; 3791 vm_object_t obj; 3792 int iosize; 3793 struct vnode *vp = bp->b_vp; 3794 3795 obj = vp->v_object; 3796 3797 #if defined(VFS_BIO_DEBUG) 3798 if (vp->v_auxrefs == 0) 3799 panic("biodone: zero vnode hold count"); 3800 if ((vp->v_flag & VOBJBUF) == 0) 3801 panic("biodone: vnode is not setup for merged cache"); 3802 #endif 3803 3804 foff = bp->b_loffset; 3805 KASSERT(foff != NOOFFSET, ("biodone: no buffer offset")); 3806 KASSERT(obj != NULL, ("biodone: missing VM object")); 3807 3808 #if defined(VFS_BIO_DEBUG) 3809 if (obj->paging_in_progress < bp->b_xio.xio_npages) { 3810 kprintf("biodone: paging in progress(%d) < " 3811 "bp->b_xio.xio_npages(%d)\n", 3812 obj->paging_in_progress, 3813 bp->b_xio.xio_npages); 3814 } 3815 #endif 3816 3817 /* 3818 * Set B_CACHE if the op was a normal read and no error 3819 * occured. B_CACHE is set for writes in the b*write() 3820 * routines. 3821 */ 3822 iosize = bp->b_bcount - bp->b_resid; 3823 if (cmd == BUF_CMD_READ && 3824 (bp->b_flags & (B_INVAL|B_NOCACHE|B_ERROR)) == 0) { 3825 bp->b_flags |= B_CACHE; 3826 } 3827 3828 vm_object_hold(obj); 3829 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3830 int bogusflag = 0; 3831 int resid; 3832 3833 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff; 3834 if (resid > iosize) 3835 resid = iosize; 3836 3837 /* 3838 * cleanup bogus pages, restoring the originals. Since 3839 * the originals should still be wired, we don't have 3840 * to worry about interrupt/freeing races destroying 3841 * the VM object association. 3842 */ 3843 m = bp->b_xio.xio_pages[i]; 3844 if (m == bogus_page) { 3845 bogusflag = 1; 3846 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 3847 if (m == NULL) 3848 panic("biodone: page disappeared"); 3849 bp->b_xio.xio_pages[i] = m; 3850 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3851 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 3852 } 3853 #if defined(VFS_BIO_DEBUG) 3854 if (OFF_TO_IDX(foff) != m->pindex) { 3855 kprintf("biodone: foff(%lu)/m->pindex(%ld) " 3856 "mismatch\n", 3857 (unsigned long)foff, (long)m->pindex); 3858 } 3859 #endif 3860 3861 /* 3862 * In the write case, the valid and clean bits are 3863 * already changed correctly (see bdwrite()), so we 3864 * only need to do this here in the read case. 3865 */ 3866 vm_page_busy_wait(m, FALSE, "bpdpgw"); 3867 if (cmd == BUF_CMD_READ && !bogusflag && resid > 0) { 3868 vfs_clean_one_page(bp, i, m); 3869 } 3870 vm_page_flag_clear(m, PG_ZERO); 3871 3872 /* 3873 * when debugging new filesystems or buffer I/O 3874 * methods, this is the most common error that pops 3875 * up. if you see this, you have not set the page 3876 * busy flag correctly!!! 3877 */ 3878 if (m->busy == 0) { 3879 kprintf("biodone: page busy < 0, " 3880 "pindex: %d, foff: 0x(%x,%x), " 3881 "resid: %d, index: %d\n", 3882 (int) m->pindex, (int)(foff >> 32), 3883 (int) foff & 0xffffffff, resid, i); 3884 if (!vn_isdisk(vp, NULL)) 3885 kprintf(" iosize: %ld, loffset: %lld, " 3886 "flags: 0x%08x, npages: %d\n", 3887 bp->b_vp->v_mount->mnt_stat.f_iosize, 3888 (long long)bp->b_loffset, 3889 bp->b_flags, bp->b_xio.xio_npages); 3890 else 3891 kprintf(" VDEV, loffset: %lld, flags: 0x%08x, npages: %d\n", 3892 (long long)bp->b_loffset, 3893 bp->b_flags, bp->b_xio.xio_npages); 3894 kprintf(" valid: 0x%x, dirty: 0x%x, " 3895 "wired: %d\n", 3896 m->valid, m->dirty, 3897 m->wire_count); 3898 panic("biodone: page busy < 0"); 3899 } 3900 vm_page_io_finish(m); 3901 vm_page_wakeup(m); 3902 vm_object_pip_wakeup(obj); 3903 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3904 iosize -= resid; 3905 } 3906 bp->b_flags &= ~B_HASBOGUS; 3907 vm_object_drop(obj); 3908 } 3909 3910 /* 3911 * Finish up by releasing the buffer. There are no more synchronous 3912 * or asynchronous completions, those were handled by bio_done 3913 * callbacks. 3914 */ 3915 if (elseit) { 3916 if (bp->b_flags & (B_NOCACHE|B_INVAL|B_ERROR|B_RELBUF)) 3917 brelse(bp); 3918 else 3919 bqrelse(bp); 3920 } 3921 } 3922 3923 /* 3924 * Normal biodone. 3925 */ 3926 void 3927 biodone(struct bio *bio) 3928 { 3929 struct buf *bp = bio->bio_buf; 3930 3931 runningbufwakeup(bp); 3932 3933 /* 3934 * Run up the chain of BIO's. Leave b_cmd intact for the duration. 3935 */ 3936 while (bio) { 3937 biodone_t *done_func; 3938 struct bio_track *track; 3939 3940 /* 3941 * BIO tracking. Most but not all BIOs are tracked. 3942 */ 3943 if ((track = bio->bio_track) != NULL) { 3944 bio_track_rel(track); 3945 bio->bio_track = NULL; 3946 } 3947 3948 /* 3949 * A bio_done function terminates the loop. The function 3950 * will be responsible for any further chaining and/or 3951 * buffer management. 3952 * 3953 * WARNING! The done function can deallocate the buffer! 3954 */ 3955 if ((done_func = bio->bio_done) != NULL) { 3956 bio->bio_done = NULL; 3957 done_func(bio); 3958 return; 3959 } 3960 bio = bio->bio_prev; 3961 } 3962 3963 /* 3964 * If we've run out of bio's do normal [a]synchronous completion. 3965 */ 3966 bpdone(bp, 1); 3967 } 3968 3969 /* 3970 * Synchronous biodone - this terminates a synchronous BIO. 3971 * 3972 * bpdone() is called with elseit=FALSE, leaving the buffer completed 3973 * but still locked. The caller must brelse() the buffer after waiting 3974 * for completion. 3975 */ 3976 void 3977 biodone_sync(struct bio *bio) 3978 { 3979 struct buf *bp = bio->bio_buf; 3980 int flags; 3981 int nflags; 3982 3983 KKASSERT(bio == &bp->b_bio1); 3984 bpdone(bp, 0); 3985 3986 for (;;) { 3987 flags = bio->bio_flags; 3988 nflags = (flags | BIO_DONE) & ~BIO_WANT; 3989 3990 if (atomic_cmpset_int(&bio->bio_flags, flags, nflags)) { 3991 if (flags & BIO_WANT) 3992 wakeup(bio); 3993 break; 3994 } 3995 } 3996 } 3997 3998 /* 3999 * vfs_unbusy_pages: 4000 * 4001 * This routine is called in lieu of iodone in the case of 4002 * incomplete I/O. This keeps the busy status for pages 4003 * consistant. 4004 */ 4005 void 4006 vfs_unbusy_pages(struct buf *bp) 4007 { 4008 int i; 4009 4010 runningbufwakeup(bp); 4011 4012 if (bp->b_flags & B_VMIO) { 4013 struct vnode *vp = bp->b_vp; 4014 vm_object_t obj; 4015 4016 obj = vp->v_object; 4017 vm_object_hold(obj); 4018 4019 for (i = 0; i < bp->b_xio.xio_npages; i++) { 4020 vm_page_t m = bp->b_xio.xio_pages[i]; 4021 4022 /* 4023 * When restoring bogus changes the original pages 4024 * should still be wired, so we are in no danger of 4025 * losing the object association and do not need 4026 * critical section protection particularly. 4027 */ 4028 if (m == bogus_page) { 4029 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_loffset) + i); 4030 if (!m) { 4031 panic("vfs_unbusy_pages: page missing"); 4032 } 4033 bp->b_xio.xio_pages[i] = m; 4034 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 4035 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 4036 } 4037 vm_page_busy_wait(m, FALSE, "bpdpgw"); 4038 vm_page_flag_clear(m, PG_ZERO); 4039 vm_page_io_finish(m); 4040 vm_page_wakeup(m); 4041 vm_object_pip_wakeup(obj); 4042 } 4043 bp->b_flags &= ~B_HASBOGUS; 4044 vm_object_drop(obj); 4045 } 4046 } 4047 4048 /* 4049 * vfs_busy_pages: 4050 * 4051 * This routine is called before a device strategy routine. 4052 * It is used to tell the VM system that paging I/O is in 4053 * progress, and treat the pages associated with the buffer 4054 * almost as being PG_BUSY. Also the object 'paging_in_progress' 4055 * flag is handled to make sure that the object doesn't become 4056 * inconsistant. 4057 * 4058 * Since I/O has not been initiated yet, certain buffer flags 4059 * such as B_ERROR or B_INVAL may be in an inconsistant state 4060 * and should be ignored. 4061 * 4062 * MPSAFE 4063 */ 4064 void 4065 vfs_busy_pages(struct vnode *vp, struct buf *bp) 4066 { 4067 int i, bogus; 4068 struct lwp *lp = curthread->td_lwp; 4069 4070 /* 4071 * The buffer's I/O command must already be set. If reading, 4072 * B_CACHE must be 0 (double check against callers only doing 4073 * I/O when B_CACHE is 0). 4074 */ 4075 KKASSERT(bp->b_cmd != BUF_CMD_DONE); 4076 KKASSERT(bp->b_cmd == BUF_CMD_WRITE || (bp->b_flags & B_CACHE) == 0); 4077 4078 if (bp->b_flags & B_VMIO) { 4079 vm_object_t obj; 4080 4081 obj = vp->v_object; 4082 KASSERT(bp->b_loffset != NOOFFSET, 4083 ("vfs_busy_pages: no buffer offset")); 4084 4085 /* 4086 * Busy all the pages. We have to busy them all at once 4087 * to avoid deadlocks. 4088 */ 4089 retry: 4090 for (i = 0; i < bp->b_xio.xio_npages; i++) { 4091 vm_page_t m = bp->b_xio.xio_pages[i]; 4092 4093 if (vm_page_busy_try(m, FALSE)) { 4094 vm_page_sleep_busy(m, FALSE, "vbpage"); 4095 while (--i >= 0) 4096 vm_page_wakeup(bp->b_xio.xio_pages[i]); 4097 goto retry; 4098 } 4099 } 4100 4101 /* 4102 * Setup for I/O, soft-busy the page right now because 4103 * the next loop may block. 4104 */ 4105 for (i = 0; i < bp->b_xio.xio_npages; i++) { 4106 vm_page_t m = bp->b_xio.xio_pages[i]; 4107 4108 vm_page_flag_clear(m, PG_ZERO); 4109 if ((bp->b_flags & B_CLUSTER) == 0) { 4110 vm_object_pip_add(obj, 1); 4111 vm_page_io_start(m); 4112 } 4113 } 4114 4115 /* 4116 * Adjust protections for I/O and do bogus-page mapping. 4117 * Assume that vm_page_protect() can block (it can block 4118 * if VM_PROT_NONE, don't take any chances regardless). 4119 * 4120 * In particular note that for writes we must incorporate 4121 * page dirtyness from the VM system into the buffer's 4122 * dirty range. 4123 * 4124 * For reads we theoretically must incorporate page dirtyness 4125 * from the VM system to determine if the page needs bogus 4126 * replacement, but we shortcut the test by simply checking 4127 * that all m->valid bits are set, indicating that the page 4128 * is fully valid and does not need to be re-read. For any 4129 * VM system dirtyness the page will also be fully valid 4130 * since it was mapped at one point. 4131 */ 4132 bogus = 0; 4133 for (i = 0; i < bp->b_xio.xio_npages; i++) { 4134 vm_page_t m = bp->b_xio.xio_pages[i]; 4135 4136 vm_page_flag_clear(m, PG_ZERO); /* XXX */ 4137 if (bp->b_cmd == BUF_CMD_WRITE) { 4138 /* 4139 * When readying a vnode-backed buffer for 4140 * a write we must zero-fill any invalid 4141 * portions of the backing VM pages, mark 4142 * it valid and clear related dirty bits. 4143 * 4144 * vfs_clean_one_page() incorporates any 4145 * VM dirtyness and updates the b_dirtyoff 4146 * range (after we've made the page RO). 4147 * 4148 * It is also expected that the pmap modified 4149 * bit has already been cleared by the 4150 * vm_page_protect(). We may not be able 4151 * to clear all dirty bits for a page if it 4152 * was also memory mapped (NFS). 4153 * 4154 * Finally be sure to unassign any swap-cache 4155 * backing store as it is now stale. 4156 */ 4157 vm_page_protect(m, VM_PROT_READ); 4158 vfs_clean_one_page(bp, i, m); 4159 swap_pager_unswapped(m); 4160 } else if (m->valid == VM_PAGE_BITS_ALL) { 4161 /* 4162 * When readying a vnode-backed buffer for 4163 * read we must replace any dirty pages with 4164 * a bogus page so dirty data is not destroyed 4165 * when filling gaps. 4166 * 4167 * To avoid testing whether the page is 4168 * dirty we instead test that the page was 4169 * at some point mapped (m->valid fully 4170 * valid) with the understanding that 4171 * this also covers the dirty case. 4172 */ 4173 bp->b_xio.xio_pages[i] = bogus_page; 4174 bp->b_flags |= B_HASBOGUS; 4175 bogus++; 4176 } else if (m->valid & m->dirty) { 4177 /* 4178 * This case should not occur as partial 4179 * dirtyment can only happen if the buffer 4180 * is B_CACHE, and this code is not entered 4181 * if the buffer is B_CACHE. 4182 */ 4183 kprintf("Warning: vfs_busy_pages - page not " 4184 "fully valid! loff=%jx bpf=%08x " 4185 "idx=%d val=%02x dir=%02x\n", 4186 (uintmax_t)bp->b_loffset, bp->b_flags, 4187 i, m->valid, m->dirty); 4188 vm_page_protect(m, VM_PROT_NONE); 4189 } else { 4190 /* 4191 * The page is not valid and can be made 4192 * part of the read. 4193 */ 4194 vm_page_protect(m, VM_PROT_NONE); 4195 } 4196 vm_page_wakeup(m); 4197 } 4198 if (bogus) { 4199 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 4200 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 4201 } 4202 } 4203 4204 /* 4205 * This is the easiest place to put the process accounting for the I/O 4206 * for now. 4207 */ 4208 if (lp != NULL) { 4209 if (bp->b_cmd == BUF_CMD_READ) 4210 lp->lwp_ru.ru_inblock++; 4211 else 4212 lp->lwp_ru.ru_oublock++; 4213 } 4214 } 4215 4216 /* 4217 * Tell the VM system that the pages associated with this buffer 4218 * are clean. This is used for delayed writes where the data is 4219 * going to go to disk eventually without additional VM intevention. 4220 * 4221 * NOTE: While we only really need to clean through to b_bcount, we 4222 * just go ahead and clean through to b_bufsize. 4223 */ 4224 static void 4225 vfs_clean_pages(struct buf *bp) 4226 { 4227 vm_page_t m; 4228 int i; 4229 4230 if ((bp->b_flags & B_VMIO) == 0) 4231 return; 4232 4233 KASSERT(bp->b_loffset != NOOFFSET, 4234 ("vfs_clean_pages: no buffer offset")); 4235 4236 for (i = 0; i < bp->b_xio.xio_npages; i++) { 4237 m = bp->b_xio.xio_pages[i]; 4238 vfs_clean_one_page(bp, i, m); 4239 } 4240 } 4241 4242 /* 4243 * vfs_clean_one_page: 4244 * 4245 * Set the valid bits and clear the dirty bits in a page within a 4246 * buffer. The range is restricted to the buffer's size and the 4247 * buffer's logical offset might index into the first page. 4248 * 4249 * The caller has busied or soft-busied the page and it is not mapped, 4250 * test and incorporate the dirty bits into b_dirtyoff/end before 4251 * clearing them. Note that we need to clear the pmap modified bits 4252 * after determining the the page was dirty, vm_page_set_validclean() 4253 * does not do it for us. 4254 * 4255 * This routine is typically called after a read completes (dirty should 4256 * be zero in that case as we are not called on bogus-replace pages), 4257 * or before a write is initiated. 4258 */ 4259 static void 4260 vfs_clean_one_page(struct buf *bp, int pageno, vm_page_t m) 4261 { 4262 int bcount; 4263 int xoff; 4264 int soff; 4265 int eoff; 4266 4267 /* 4268 * Calculate offset range within the page but relative to buffer's 4269 * loffset. loffset might be offset into the first page. 4270 */ 4271 xoff = (int)bp->b_loffset & PAGE_MASK; /* loffset offset into pg 0 */ 4272 bcount = bp->b_bcount + xoff; /* offset adjusted */ 4273 4274 if (pageno == 0) { 4275 soff = xoff; 4276 eoff = PAGE_SIZE; 4277 } else { 4278 soff = (pageno << PAGE_SHIFT); 4279 eoff = soff + PAGE_SIZE; 4280 } 4281 if (eoff > bcount) 4282 eoff = bcount; 4283 if (soff >= eoff) 4284 return; 4285 4286 /* 4287 * Test dirty bits and adjust b_dirtyoff/end. 4288 * 4289 * If dirty pages are incorporated into the bp any prior 4290 * B_NEEDCOMMIT state (NFS) must be cleared because the 4291 * caller has not taken into account the new dirty data. 4292 * 4293 * If the page was memory mapped the dirty bits might go beyond the 4294 * end of the buffer, but we can't really make the assumption that 4295 * a file EOF straddles the buffer (even though this is the case for 4296 * NFS if B_NEEDCOMMIT is also set). So for the purposes of clearing 4297 * B_NEEDCOMMIT we only test the dirty bits covered by the buffer. 4298 * This also saves some console spam. 4299 * 4300 * When clearing B_NEEDCOMMIT we must also clear B_CLUSTEROK, 4301 * NFS can handle huge commits but not huge writes. 4302 */ 4303 vm_page_test_dirty(m); 4304 if (m->dirty) { 4305 if ((bp->b_flags & B_NEEDCOMMIT) && 4306 (m->dirty & vm_page_bits(soff & PAGE_MASK, eoff - soff))) { 4307 if (debug_commit) 4308 kprintf("Warning: vfs_clean_one_page: bp %p " 4309 "loff=%jx,%d flgs=%08x clr B_NEEDCOMMIT" 4310 " cmd %d vd %02x/%02x x/s/e %d %d %d " 4311 "doff/end %d %d\n", 4312 bp, (uintmax_t)bp->b_loffset, bp->b_bcount, 4313 bp->b_flags, bp->b_cmd, 4314 m->valid, m->dirty, xoff, soff, eoff, 4315 bp->b_dirtyoff, bp->b_dirtyend); 4316 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 4317 if (debug_commit) 4318 print_backtrace(-1); 4319 } 4320 /* 4321 * Only clear the pmap modified bits if ALL the dirty bits 4322 * are set, otherwise the system might mis-clear portions 4323 * of a page. 4324 */ 4325 if (m->dirty == VM_PAGE_BITS_ALL && 4326 (bp->b_flags & B_NEEDCOMMIT) == 0) { 4327 pmap_clear_modify(m); 4328 } 4329 if (bp->b_dirtyoff > soff - xoff) 4330 bp->b_dirtyoff = soff - xoff; 4331 if (bp->b_dirtyend < eoff - xoff) 4332 bp->b_dirtyend = eoff - xoff; 4333 } 4334 4335 /* 4336 * Set related valid bits, clear related dirty bits. 4337 * Does not mess with the pmap modified bit. 4338 * 4339 * WARNING! We cannot just clear all of m->dirty here as the 4340 * buffer cache buffers may use a DEV_BSIZE'd aligned 4341 * block size, or have an odd size (e.g. NFS at file EOF). 4342 * The putpages code can clear m->dirty to 0. 4343 * 4344 * If a VOP_WRITE generates a buffer cache buffer which 4345 * covers the same space as mapped writable pages the 4346 * buffer flush might not be able to clear all the dirty 4347 * bits and still require a putpages from the VM system 4348 * to finish it off. 4349 * 4350 * WARNING! vm_page_set_validclean() currently assumes vm_token 4351 * is held. The page might not be busied (bdwrite() case). 4352 * XXX remove this comment once we've validated that this 4353 * is no longer an issue. 4354 */ 4355 vm_page_set_validclean(m, soff & PAGE_MASK, eoff - soff); 4356 } 4357 4358 #if 0 4359 /* 4360 * Similar to vfs_clean_one_page() but sets the bits to valid and dirty. 4361 * The page data is assumed to be valid (there is no zeroing here). 4362 */ 4363 static void 4364 vfs_dirty_one_page(struct buf *bp, int pageno, vm_page_t m) 4365 { 4366 int bcount; 4367 int xoff; 4368 int soff; 4369 int eoff; 4370 4371 /* 4372 * Calculate offset range within the page but relative to buffer's 4373 * loffset. loffset might be offset into the first page. 4374 */ 4375 xoff = (int)bp->b_loffset & PAGE_MASK; /* loffset offset into pg 0 */ 4376 bcount = bp->b_bcount + xoff; /* offset adjusted */ 4377 4378 if (pageno == 0) { 4379 soff = xoff; 4380 eoff = PAGE_SIZE; 4381 } else { 4382 soff = (pageno << PAGE_SHIFT); 4383 eoff = soff + PAGE_SIZE; 4384 } 4385 if (eoff > bcount) 4386 eoff = bcount; 4387 if (soff >= eoff) 4388 return; 4389 vm_page_set_validdirty(m, soff & PAGE_MASK, eoff - soff); 4390 } 4391 #endif 4392 4393 /* 4394 * vfs_bio_clrbuf: 4395 * 4396 * Clear a buffer. This routine essentially fakes an I/O, so we need 4397 * to clear B_ERROR and B_INVAL. 4398 * 4399 * Note that while we only theoretically need to clear through b_bcount, 4400 * we go ahead and clear through b_bufsize. 4401 */ 4402 4403 void 4404 vfs_bio_clrbuf(struct buf *bp) 4405 { 4406 int i, mask = 0; 4407 caddr_t sa, ea; 4408 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { 4409 bp->b_flags &= ~(B_INVAL | B_EINTR | B_ERROR); 4410 if ((bp->b_xio.xio_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 4411 (bp->b_loffset & PAGE_MASK) == 0) { 4412 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 4413 if ((bp->b_xio.xio_pages[0]->valid & mask) == mask) { 4414 bp->b_resid = 0; 4415 return; 4416 } 4417 if (((bp->b_xio.xio_pages[0]->flags & PG_ZERO) == 0) && 4418 ((bp->b_xio.xio_pages[0]->valid & mask) == 0)) { 4419 bzero(bp->b_data, bp->b_bufsize); 4420 bp->b_xio.xio_pages[0]->valid |= mask; 4421 bp->b_resid = 0; 4422 return; 4423 } 4424 } 4425 sa = bp->b_data; 4426 for(i=0;i<bp->b_xio.xio_npages;i++,sa=ea) { 4427 int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE; 4428 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); 4429 ea = (caddr_t)(vm_offset_t)ulmin( 4430 (u_long)(vm_offset_t)ea, 4431 (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize); 4432 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 4433 if ((bp->b_xio.xio_pages[i]->valid & mask) == mask) 4434 continue; 4435 if ((bp->b_xio.xio_pages[i]->valid & mask) == 0) { 4436 if ((bp->b_xio.xio_pages[i]->flags & PG_ZERO) == 0) { 4437 bzero(sa, ea - sa); 4438 } 4439 } else { 4440 for (; sa < ea; sa += DEV_BSIZE, j++) { 4441 if (((bp->b_xio.xio_pages[i]->flags & PG_ZERO) == 0) && 4442 (bp->b_xio.xio_pages[i]->valid & (1<<j)) == 0) 4443 bzero(sa, DEV_BSIZE); 4444 } 4445 } 4446 bp->b_xio.xio_pages[i]->valid |= mask; 4447 vm_page_flag_clear(bp->b_xio.xio_pages[i], PG_ZERO); 4448 } 4449 bp->b_resid = 0; 4450 } else { 4451 clrbuf(bp); 4452 } 4453 } 4454 4455 /* 4456 * vm_hold_load_pages: 4457 * 4458 * Load pages into the buffer's address space. The pages are 4459 * allocated from the kernel object in order to reduce interference 4460 * with the any VM paging I/O activity. The range of loaded 4461 * pages will be wired. 4462 * 4463 * If a page cannot be allocated, the 'pagedaemon' is woken up to 4464 * retrieve the full range (to - from) of pages. 4465 * 4466 * MPSAFE 4467 */ 4468 void 4469 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) 4470 { 4471 vm_offset_t pg; 4472 vm_page_t p; 4473 int index; 4474 4475 to = round_page(to); 4476 from = round_page(from); 4477 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 4478 4479 pg = from; 4480 while (pg < to) { 4481 /* 4482 * Note: must allocate system pages since blocking here 4483 * could intefere with paging I/O, no matter which 4484 * process we are. 4485 */ 4486 vm_object_hold(&kernel_object); 4487 p = bio_page_alloc(bp, &kernel_object, pg >> PAGE_SHIFT, 4488 (vm_pindex_t)((to - pg) >> PAGE_SHIFT)); 4489 vm_object_drop(&kernel_object); 4490 if (p) { 4491 vm_page_wire(p); 4492 p->valid = VM_PAGE_BITS_ALL; 4493 vm_page_flag_clear(p, PG_ZERO); 4494 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 4495 bp->b_xio.xio_pages[index] = p; 4496 vm_page_wakeup(p); 4497 4498 pg += PAGE_SIZE; 4499 ++index; 4500 } 4501 } 4502 bp->b_xio.xio_npages = index; 4503 } 4504 4505 /* 4506 * Allocate a page for a buffer cache buffer. 4507 * 4508 * If NULL is returned the caller is expected to retry (typically check if 4509 * the page already exists on retry before trying to allocate one). 4510 * 4511 * NOTE! Low-memory handling is dealt with in b[q]relse(), not here. This 4512 * function will use the system reserve with the hope that the page 4513 * allocations can be returned to PQ_CACHE/PQ_FREE when the caller 4514 * is done with the buffer. 4515 * 4516 * NOTE! However, TMPFS is a special case because flushing a dirty buffer 4517 * to TMPFS doesn't clean the page. For TMPFS, only the pagedaemon 4518 * is capable of retiring pages (to swap). For TMPFS we don't dig 4519 * into the system reserve because doing so could stall out pretty 4520 * much every process running on the system. 4521 */ 4522 static 4523 vm_page_t 4524 bio_page_alloc(struct buf *bp, vm_object_t obj, vm_pindex_t pg, int deficit) 4525 { 4526 int vmflags = VM_ALLOC_NORMAL | VM_ALLOC_NULL_OK; 4527 vm_page_t p; 4528 4529 ASSERT_LWKT_TOKEN_HELD(vm_object_token(obj)); 4530 4531 /* 4532 * Try a normal allocation first. 4533 */ 4534 p = vm_page_alloc(obj, pg, vmflags); 4535 if (p) 4536 return(p); 4537 if (vm_page_lookup(obj, pg)) 4538 return(NULL); 4539 vm_pageout_deficit += deficit; 4540 4541 /* 4542 * Try again, digging into the system reserve. 4543 * 4544 * Trying to recover pages from the buffer cache here can deadlock 4545 * against other threads trying to busy underlying pages so we 4546 * depend on the code in brelse() and bqrelse() to free/cache the 4547 * underlying buffer cache pages when memory is low. 4548 */ 4549 if (curthread->td_flags & TDF_SYSTHREAD) 4550 vmflags |= VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT; 4551 else if (bp->b_vp && bp->b_vp->v_tag == VT_TMPFS) 4552 vmflags |= 0; 4553 else 4554 vmflags |= VM_ALLOC_SYSTEM; 4555 4556 /*recoverbufpages();*/ 4557 p = vm_page_alloc(obj, pg, vmflags); 4558 if (p) 4559 return(p); 4560 if (vm_page_lookup(obj, pg)) 4561 return(NULL); 4562 4563 /* 4564 * Wait for memory to free up and try again 4565 */ 4566 if (vm_page_count_severe()) 4567 ++lowmempgallocs; 4568 vm_wait(hz / 20 + 1); 4569 4570 p = vm_page_alloc(obj, pg, vmflags); 4571 if (p) 4572 return(p); 4573 if (vm_page_lookup(obj, pg)) 4574 return(NULL); 4575 4576 /* 4577 * Ok, now we are really in trouble. 4578 */ 4579 { 4580 static struct krate biokrate = { .freq = 1 }; 4581 krateprintf(&biokrate, 4582 "Warning: bio_page_alloc: memory exhausted " 4583 "during bufcache page allocation from %s\n", 4584 curthread->td_comm); 4585 } 4586 if (curthread->td_flags & TDF_SYSTHREAD) 4587 vm_wait(hz / 20 + 1); 4588 else 4589 vm_wait(hz / 2 + 1); 4590 return (NULL); 4591 } 4592 4593 /* 4594 * vm_hold_free_pages: 4595 * 4596 * Return pages associated with the buffer back to the VM system. 4597 * 4598 * The range of pages underlying the buffer's address space will 4599 * be unmapped and un-wired. 4600 * 4601 * MPSAFE 4602 */ 4603 void 4604 vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) 4605 { 4606 vm_offset_t pg; 4607 vm_page_t p; 4608 int index, newnpages; 4609 4610 from = round_page(from); 4611 to = round_page(to); 4612 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 4613 newnpages = index; 4614 4615 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 4616 p = bp->b_xio.xio_pages[index]; 4617 if (p && (index < bp->b_xio.xio_npages)) { 4618 if (p->busy) { 4619 kprintf("vm_hold_free_pages: doffset: %lld, " 4620 "loffset: %lld\n", 4621 (long long)bp->b_bio2.bio_offset, 4622 (long long)bp->b_loffset); 4623 } 4624 bp->b_xio.xio_pages[index] = NULL; 4625 pmap_kremove(pg); 4626 vm_page_busy_wait(p, FALSE, "vmhldpg"); 4627 vm_page_unwire(p, 0); 4628 vm_page_free(p); 4629 } 4630 } 4631 bp->b_xio.xio_npages = newnpages; 4632 } 4633 4634 /* 4635 * vmapbuf: 4636 * 4637 * Map a user buffer into KVM via a pbuf. On return the buffer's 4638 * b_data, b_bufsize, and b_bcount will be set, and its XIO page array 4639 * initialized. 4640 */ 4641 int 4642 vmapbuf(struct buf *bp, caddr_t udata, int bytes) 4643 { 4644 caddr_t addr; 4645 vm_offset_t va; 4646 vm_page_t m; 4647 int vmprot; 4648 int error; 4649 int pidx; 4650 int i; 4651 4652 /* 4653 * bp had better have a command and it better be a pbuf. 4654 */ 4655 KKASSERT(bp->b_cmd != BUF_CMD_DONE); 4656 KKASSERT(bp->b_flags & B_PAGING); 4657 KKASSERT(bp->b_kvabase); 4658 4659 if (bytes < 0) 4660 return (-1); 4661 4662 /* 4663 * Map the user data into KVM. Mappings have to be page-aligned. 4664 */ 4665 addr = (caddr_t)trunc_page((vm_offset_t)udata); 4666 pidx = 0; 4667 4668 vmprot = VM_PROT_READ; 4669 if (bp->b_cmd == BUF_CMD_READ) 4670 vmprot |= VM_PROT_WRITE; 4671 4672 while (addr < udata + bytes) { 4673 /* 4674 * Do the vm_fault if needed; do the copy-on-write thing 4675 * when reading stuff off device into memory. 4676 * 4677 * vm_fault_page*() returns a held VM page. 4678 */ 4679 va = (addr >= udata) ? (vm_offset_t)addr : (vm_offset_t)udata; 4680 va = trunc_page(va); 4681 4682 m = vm_fault_page_quick(va, vmprot, &error); 4683 if (m == NULL) { 4684 for (i = 0; i < pidx; ++i) { 4685 vm_page_unhold(bp->b_xio.xio_pages[i]); 4686 bp->b_xio.xio_pages[i] = NULL; 4687 } 4688 return(-1); 4689 } 4690 bp->b_xio.xio_pages[pidx] = m; 4691 addr += PAGE_SIZE; 4692 ++pidx; 4693 } 4694 4695 /* 4696 * Map the page array and set the buffer fields to point to 4697 * the mapped data buffer. 4698 */ 4699 if (pidx > btoc(MAXPHYS)) 4700 panic("vmapbuf: mapped more than MAXPHYS"); 4701 pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_xio.xio_pages, pidx); 4702 4703 bp->b_xio.xio_npages = pidx; 4704 bp->b_data = bp->b_kvabase + ((int)(intptr_t)udata & PAGE_MASK); 4705 bp->b_bcount = bytes; 4706 bp->b_bufsize = bytes; 4707 return(0); 4708 } 4709 4710 /* 4711 * vunmapbuf: 4712 * 4713 * Free the io map PTEs associated with this IO operation. 4714 * We also invalidate the TLB entries and restore the original b_addr. 4715 */ 4716 void 4717 vunmapbuf(struct buf *bp) 4718 { 4719 int pidx; 4720 int npages; 4721 4722 KKASSERT(bp->b_flags & B_PAGING); 4723 4724 npages = bp->b_xio.xio_npages; 4725 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages); 4726 for (pidx = 0; pidx < npages; ++pidx) { 4727 vm_page_unhold(bp->b_xio.xio_pages[pidx]); 4728 bp->b_xio.xio_pages[pidx] = NULL; 4729 } 4730 bp->b_xio.xio_npages = 0; 4731 bp->b_data = bp->b_kvabase; 4732 } 4733 4734 /* 4735 * Scan all buffers in the system and issue the callback. 4736 */ 4737 int 4738 scan_all_buffers(int (*callback)(struct buf *, void *), void *info) 4739 { 4740 int count = 0; 4741 int error; 4742 long n; 4743 4744 for (n = 0; n < nbuf; ++n) { 4745 if ((error = callback(&buf[n], info)) < 0) { 4746 count = error; 4747 break; 4748 } 4749 count += error; 4750 } 4751 return (count); 4752 } 4753 4754 /* 4755 * nestiobuf_iodone: biodone callback for nested buffers and propagate 4756 * completion to the master buffer. 4757 */ 4758 static void 4759 nestiobuf_iodone(struct bio *bio) 4760 { 4761 struct bio *mbio; 4762 struct buf *mbp, *bp; 4763 struct devstat *stats; 4764 int error; 4765 int donebytes; 4766 4767 bp = bio->bio_buf; 4768 mbio = bio->bio_caller_info1.ptr; 4769 stats = bio->bio_caller_info2.ptr; 4770 mbp = mbio->bio_buf; 4771 4772 KKASSERT(bp->b_bcount <= bp->b_bufsize); 4773 KKASSERT(mbp != bp); 4774 4775 error = bp->b_error; 4776 if (bp->b_error == 0 && 4777 (bp->b_bcount < bp->b_bufsize || bp->b_resid > 0)) { 4778 /* 4779 * Not all got transfered, raise an error. We have no way to 4780 * propagate these conditions to mbp. 4781 */ 4782 error = EIO; 4783 } 4784 4785 donebytes = bp->b_bufsize; 4786 4787 relpbuf(bp, NULL); 4788 4789 nestiobuf_done(mbio, donebytes, error, stats); 4790 } 4791 4792 void 4793 nestiobuf_done(struct bio *mbio, int donebytes, int error, struct devstat *stats) 4794 { 4795 struct buf *mbp; 4796 4797 mbp = mbio->bio_buf; 4798 4799 KKASSERT((int)(intptr_t)mbio->bio_driver_info > 0); 4800 4801 /* 4802 * If an error occured, propagate it to the master buffer. 4803 * 4804 * Several biodone()s may wind up running concurrently so 4805 * use an atomic op to adjust b_flags. 4806 */ 4807 if (error) { 4808 mbp->b_error = error; 4809 atomic_set_int(&mbp->b_flags, B_ERROR); 4810 } 4811 4812 /* 4813 * Decrement the operations in progress counter and terminate the 4814 * I/O if this was the last bit. 4815 */ 4816 if (atomic_fetchadd_int((int *)&mbio->bio_driver_info, -1) == 1) { 4817 mbp->b_resid = 0; 4818 if (stats) 4819 devstat_end_transaction_buf(stats, mbp); 4820 biodone(mbio); 4821 } 4822 } 4823 4824 /* 4825 * Initialize a nestiobuf for use. Set an initial count of 1 to prevent 4826 * the mbio from being biodone()'d while we are still adding sub-bios to 4827 * it. 4828 */ 4829 void 4830 nestiobuf_init(struct bio *bio) 4831 { 4832 bio->bio_driver_info = (void *)1; 4833 } 4834 4835 /* 4836 * The BIOs added to the nestedio have already been started, remove the 4837 * count that placeheld our mbio and biodone() it if the count would 4838 * transition to 0. 4839 */ 4840 void 4841 nestiobuf_start(struct bio *mbio) 4842 { 4843 struct buf *mbp = mbio->bio_buf; 4844 4845 /* 4846 * Decrement the operations in progress counter and terminate the 4847 * I/O if this was the last bit. 4848 */ 4849 if (atomic_fetchadd_int((int *)&mbio->bio_driver_info, -1) == 1) { 4850 if (mbp->b_flags & B_ERROR) 4851 mbp->b_resid = mbp->b_bcount; 4852 else 4853 mbp->b_resid = 0; 4854 biodone(mbio); 4855 } 4856 } 4857 4858 /* 4859 * Set an intermediate error prior to calling nestiobuf_start() 4860 */ 4861 void 4862 nestiobuf_error(struct bio *mbio, int error) 4863 { 4864 struct buf *mbp = mbio->bio_buf; 4865 4866 if (error) { 4867 mbp->b_error = error; 4868 atomic_set_int(&mbp->b_flags, B_ERROR); 4869 } 4870 } 4871 4872 /* 4873 * nestiobuf_add: setup a "nested" buffer. 4874 * 4875 * => 'mbp' is a "master" buffer which is being divided into sub pieces. 4876 * => 'bp' should be a buffer allocated by getiobuf. 4877 * => 'offset' is a byte offset in the master buffer. 4878 * => 'size' is a size in bytes of this nested buffer. 4879 */ 4880 void 4881 nestiobuf_add(struct bio *mbio, struct buf *bp, int offset, size_t size, struct devstat *stats) 4882 { 4883 struct buf *mbp = mbio->bio_buf; 4884 struct vnode *vp = mbp->b_vp; 4885 4886 KKASSERT(mbp->b_bcount >= offset + size); 4887 4888 atomic_add_int((int *)&mbio->bio_driver_info, 1); 4889 4890 /* kernel needs to own the lock for it to be released in biodone */ 4891 BUF_KERNPROC(bp); 4892 bp->b_vp = vp; 4893 bp->b_cmd = mbp->b_cmd; 4894 bp->b_bio1.bio_done = nestiobuf_iodone; 4895 bp->b_data = (char *)mbp->b_data + offset; 4896 bp->b_resid = bp->b_bcount = size; 4897 bp->b_bufsize = bp->b_bcount; 4898 4899 bp->b_bio1.bio_track = NULL; 4900 bp->b_bio1.bio_caller_info1.ptr = mbio; 4901 bp->b_bio1.bio_caller_info2.ptr = stats; 4902 } 4903 4904 /* 4905 * print out statistics from the current status of the buffer pool 4906 * this can be toggeled by the system control option debug.syncprt 4907 */ 4908 #ifdef DEBUG 4909 void 4910 vfs_bufstats(void) 4911 { 4912 int i, j, count; 4913 struct buf *bp; 4914 struct bqueues *dp; 4915 int counts[(MAXBSIZE / PAGE_SIZE) + 1]; 4916 static char *bname[3] = { "LOCKED", "LRU", "AGE" }; 4917 4918 for (dp = bufqueues, i = 0; dp < &bufqueues[3]; dp++, i++) { 4919 count = 0; 4920 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 4921 counts[j] = 0; 4922 4923 spin_lock(&bufqspin); 4924 TAILQ_FOREACH(bp, dp, b_freelist) { 4925 if (bp->b_flags & B_MARKER) 4926 continue; 4927 counts[bp->b_bufsize/PAGE_SIZE]++; 4928 count++; 4929 } 4930 spin_unlock(&bufqspin); 4931 4932 kprintf("%s: total-%d", bname[i], count); 4933 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 4934 if (counts[j] != 0) 4935 kprintf(", %d-%d", j * PAGE_SIZE, counts[j]); 4936 kprintf("\n"); 4937 } 4938 } 4939 #endif 4940 4941 #ifdef DDB 4942 4943 DB_SHOW_COMMAND(buffer, db_show_buffer) 4944 { 4945 /* get args */ 4946 struct buf *bp = (struct buf *)addr; 4947 4948 if (!have_addr) { 4949 db_printf("usage: show buffer <addr>\n"); 4950 return; 4951 } 4952 4953 db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS); 4954 db_printf("b_cmd = %d\n", bp->b_cmd); 4955 db_printf("b_error = %d, b_bufsize = %d, b_bcount = %d, " 4956 "b_resid = %d\n, b_data = %p, " 4957 "bio_offset(disk) = %lld, bio_offset(phys) = %lld\n", 4958 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 4959 bp->b_data, 4960 (long long)bp->b_bio2.bio_offset, 4961 (long long)(bp->b_bio2.bio_next ? 4962 bp->b_bio2.bio_next->bio_offset : (off_t)-1)); 4963 if (bp->b_xio.xio_npages) { 4964 int i; 4965 db_printf("b_xio.xio_npages = %d, pages(OBJ, IDX, PA): ", 4966 bp->b_xio.xio_npages); 4967 for (i = 0; i < bp->b_xio.xio_npages; i++) { 4968 vm_page_t m; 4969 m = bp->b_xio.xio_pages[i]; 4970 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 4971 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 4972 if ((i + 1) < bp->b_xio.xio_npages) 4973 db_printf(","); 4974 } 4975 db_printf("\n"); 4976 } 4977 } 4978 #endif /* DDB */ 4979