1 /* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $FreeBSD: src/sys/kern/vfs_bio.c,v 1.242.2.20 2003/05/28 18:38:10 alc Exp $ 15 */ 16 17 /* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 * 27 * see man buf(9) for more info. Note that man buf(9) doesn't reflect 28 * the actual buf/bio implementation in DragonFly. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/buf.h> 34 #include <sys/conf.h> 35 #include <sys/devicestat.h> 36 #include <sys/eventhandler.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mount.h> 40 #include <sys/kernel.h> 41 #include <sys/kthread.h> 42 #include <sys/proc.h> 43 #include <sys/reboot.h> 44 #include <sys/resourcevar.h> 45 #include <sys/sysctl.h> 46 #include <sys/vmmeter.h> 47 #include <sys/vnode.h> 48 #include <sys/dsched.h> 49 #include <vm/vm.h> 50 #include <vm/vm_param.h> 51 #include <vm/vm_kern.h> 52 #include <vm/vm_pageout.h> 53 #include <vm/vm_page.h> 54 #include <vm/vm_object.h> 55 #include <vm/vm_extern.h> 56 #include <vm/vm_map.h> 57 #include <vm/vm_pager.h> 58 #include <vm/swap_pager.h> 59 60 #include <sys/buf2.h> 61 #include <sys/thread2.h> 62 #include <sys/spinlock2.h> 63 #include <vm/vm_page2.h> 64 65 #include "opt_ddb.h" 66 #ifdef DDB 67 #include <ddb/ddb.h> 68 #endif 69 70 /* 71 * Buffer queues. 72 */ 73 enum bufq_type { 74 BQUEUE_NONE, /* not on any queue */ 75 BQUEUE_LOCKED, /* locked buffers */ 76 BQUEUE_CLEAN, /* non-B_DELWRI buffers */ 77 BQUEUE_DIRTY, /* B_DELWRI buffers */ 78 BQUEUE_DIRTY_HW, /* B_DELWRI buffers - heavy weight */ 79 BQUEUE_EMPTY, /* empty buffer headers */ 80 81 BUFFER_QUEUES /* number of buffer queues */ 82 }; 83 84 typedef enum bufq_type bufq_type_t; 85 86 #define BD_WAKE_SIZE 16384 87 #define BD_WAKE_MASK (BD_WAKE_SIZE - 1) 88 89 TAILQ_HEAD(bqueues, buf); 90 91 struct bufpcpu { 92 struct spinlock spin; 93 struct bqueues bufqueues[BUFFER_QUEUES]; 94 } __cachealign; 95 96 struct bufpcpu bufpcpu[MAXCPU]; 97 98 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 99 100 struct buf *buf; /* buffer header pool */ 101 102 static void vfs_clean_pages(struct buf *bp); 103 static void vfs_clean_one_page(struct buf *bp, int pageno, vm_page_t m); 104 #if 0 105 static void vfs_dirty_one_page(struct buf *bp, int pageno, vm_page_t m); 106 #endif 107 static void vfs_vmio_release(struct buf *bp); 108 static int flushbufqueues(struct buf *marker, bufq_type_t q); 109 static vm_page_t bio_page_alloc(struct buf *bp, vm_object_t obj, 110 vm_pindex_t pg, int deficit); 111 112 static void bd_signal(long totalspace); 113 static void buf_daemon(void); 114 static void buf_daemon_hw(void); 115 116 /* 117 * bogus page -- for I/O to/from partially complete buffers 118 * this is a temporary solution to the problem, but it is not 119 * really that bad. it would be better to split the buffer 120 * for input in the case of buffers partially already in memory, 121 * but the code is intricate enough already. 122 */ 123 vm_page_t bogus_page; 124 125 /* 126 * These are all static, but make the ones we export globals so we do 127 * not need to use compiler magic. 128 */ 129 long bufspace; /* atomic ops */ 130 long maxbufspace; 131 long maxbufmallocspace, lobufspace, hibufspace; 132 static long lorunningspace; 133 static long hirunningspace; 134 static long dirtykvaspace; /* atomic */ 135 long dirtybufspace; /* atomic (global for systat) */ 136 static long dirtybufcount; /* atomic */ 137 static long dirtybufspacehw; /* atomic */ 138 static long dirtybufcounthw; /* atomic */ 139 static long runningbufspace; /* atomic */ 140 static long runningbufcount; /* atomic */ 141 long lodirtybufspace; 142 long hidirtybufspace; 143 static int getnewbufcalls; 144 static int needsbuffer; /* atomic */ 145 static int runningbufreq; /* atomic */ 146 static int bd_request; /* atomic */ 147 static int bd_request_hw; /* atomic */ 148 static u_int bd_wake_ary[BD_WAKE_SIZE]; 149 static u_int bd_wake_index; 150 static u_int vm_cycle_point = 40; /* 23-36 will migrate more act->inact */ 151 static int debug_commit; 152 static int debug_bufbio; 153 static int debug_kvabio; 154 static long bufcache_bw = 200 * 1024 * 1024; 155 156 static struct thread *bufdaemon_td; 157 static struct thread *bufdaemonhw_td; 158 static u_int lowmempgallocs; 159 static u_int flushperqueue = 1024; 160 161 /* 162 * Sysctls for operational control of the buffer cache. 163 */ 164 SYSCTL_UINT(_vfs, OID_AUTO, flushperqueue, CTLFLAG_RW, &flushperqueue, 0, 165 "Number of buffers to flush from each per-cpu queue"); 166 SYSCTL_LONG(_vfs, OID_AUTO, lodirtybufspace, CTLFLAG_RW, &lodirtybufspace, 0, 167 "Number of dirty buffers to flush before bufdaemon becomes inactive"); 168 SYSCTL_LONG(_vfs, OID_AUTO, hidirtybufspace, CTLFLAG_RW, &hidirtybufspace, 0, 169 "High watermark used to trigger explicit flushing of dirty buffers"); 170 SYSCTL_LONG(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0, 171 "Minimum amount of buffer space required for active I/O"); 172 SYSCTL_LONG(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0, 173 "Maximum amount of buffer space to usable for active I/O"); 174 SYSCTL_LONG(_vfs, OID_AUTO, bufcache_bw, CTLFLAG_RW, &bufcache_bw, 0, 175 "Buffer-cache -> VM page cache transfer bandwidth"); 176 SYSCTL_UINT(_vfs, OID_AUTO, lowmempgallocs, CTLFLAG_RW, &lowmempgallocs, 0, 177 "Page allocations done during periods of very low free memory"); 178 SYSCTL_UINT(_vfs, OID_AUTO, vm_cycle_point, CTLFLAG_RW, &vm_cycle_point, 0, 179 "Recycle pages to active or inactive queue transition pt 0-64"); 180 /* 181 * Sysctls determining current state of the buffer cache. 182 */ 183 SYSCTL_LONG(_vfs, OID_AUTO, nbuf, CTLFLAG_RD, &nbuf, 0, 184 "Total number of buffers in buffer cache"); 185 SYSCTL_LONG(_vfs, OID_AUTO, dirtykvaspace, CTLFLAG_RD, &dirtykvaspace, 0, 186 "KVA reserved by dirty buffers (all)"); 187 SYSCTL_LONG(_vfs, OID_AUTO, dirtybufspace, CTLFLAG_RD, &dirtybufspace, 0, 188 "Pending bytes of dirty buffers (all)"); 189 SYSCTL_LONG(_vfs, OID_AUTO, dirtybufspacehw, CTLFLAG_RD, &dirtybufspacehw, 0, 190 "Pending bytes of dirty buffers (heavy weight)"); 191 SYSCTL_LONG(_vfs, OID_AUTO, dirtybufcount, CTLFLAG_RD, &dirtybufcount, 0, 192 "Pending number of dirty buffers"); 193 SYSCTL_LONG(_vfs, OID_AUTO, dirtybufcounthw, CTLFLAG_RD, &dirtybufcounthw, 0, 194 "Pending number of dirty buffers (heavy weight)"); 195 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0, 196 "I/O bytes currently in progress due to asynchronous writes"); 197 SYSCTL_LONG(_vfs, OID_AUTO, runningbufcount, CTLFLAG_RD, &runningbufcount, 0, 198 "I/O buffers currently in progress due to asynchronous writes"); 199 SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0, 200 "Hard limit on maximum amount of memory usable for buffer space"); 201 SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0, 202 "Soft limit on maximum amount of memory usable for buffer space"); 203 SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0, 204 "Minimum amount of memory to reserve for system buffer space"); 205 SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0, 206 "Amount of memory available for buffers"); 207 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RD, &maxbufmallocspace, 208 0, "Maximum amount of memory reserved for buffers using malloc"); 209 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD, &getnewbufcalls, 0, 210 "New buffer header acquisition requests"); 211 SYSCTL_INT(_vfs, OID_AUTO, debug_commit, CTLFLAG_RW, &debug_commit, 0, ""); 212 SYSCTL_INT(_vfs, OID_AUTO, debug_bufbio, CTLFLAG_RW, &debug_bufbio, 0, ""); 213 SYSCTL_INT(_vfs, OID_AUTO, debug_kvabio, CTLFLAG_RW, &debug_kvabio, 0, ""); 214 SYSCTL_INT(_debug_sizeof, OID_AUTO, buf, CTLFLAG_RD, 0, sizeof(struct buf), 215 "sizeof(struct buf)"); 216 217 char *buf_wmesg = BUF_WMESG; 218 219 #define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 220 #define VFS_BIO_NEED_UNUSED02 0x02 221 #define VFS_BIO_NEED_UNUSED04 0x04 222 #define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 223 224 /* 225 * Called when buffer space is potentially available for recovery. 226 * getnewbuf() will block on this flag when it is unable to free 227 * sufficient buffer space. Buffer space becomes recoverable when 228 * bp's get placed back in the queues. 229 */ 230 static __inline void 231 bufspacewakeup(void) 232 { 233 /* 234 * If someone is waiting for BUF space, wake them up. Even 235 * though we haven't freed the kva space yet, the waiting 236 * process will be able to now. 237 */ 238 for (;;) { 239 int flags = needsbuffer; 240 cpu_ccfence(); 241 if ((flags & VFS_BIO_NEED_BUFSPACE) == 0) 242 break; 243 if (atomic_cmpset_int(&needsbuffer, flags, 244 flags & ~VFS_BIO_NEED_BUFSPACE)) { 245 wakeup(&needsbuffer); 246 break; 247 } 248 /* retry */ 249 } 250 } 251 252 /* 253 * runningbufwakeup: 254 * 255 * Accounting for I/O in progress. 256 * 257 */ 258 static __inline void 259 runningbufwakeup(struct buf *bp) 260 { 261 long totalspace; 262 long flags; 263 264 if ((totalspace = bp->b_runningbufspace) != 0) { 265 atomic_add_long(&runningbufspace, -totalspace); 266 atomic_add_long(&runningbufcount, -1); 267 bp->b_runningbufspace = 0; 268 269 /* 270 * see waitrunningbufspace() for limit test. 271 */ 272 for (;;) { 273 flags = runningbufreq; 274 cpu_ccfence(); 275 if (flags == 0) 276 break; 277 if (atomic_cmpset_int(&runningbufreq, flags, 0)) { 278 wakeup(&runningbufreq); 279 break; 280 } 281 /* retry */ 282 } 283 bd_signal(totalspace); 284 } 285 } 286 287 /* 288 * bufcountwakeup: 289 * 290 * Called when a buffer has been added to one of the free queues to 291 * account for the buffer and to wakeup anyone waiting for free buffers. 292 * This typically occurs when large amounts of metadata are being handled 293 * by the buffer cache ( else buffer space runs out first, usually ). 294 */ 295 static __inline void 296 bufcountwakeup(void) 297 { 298 long flags; 299 300 for (;;) { 301 flags = needsbuffer; 302 if (flags == 0) 303 break; 304 if (atomic_cmpset_int(&needsbuffer, flags, 305 (flags & ~VFS_BIO_NEED_ANY))) { 306 wakeup(&needsbuffer); 307 break; 308 } 309 /* retry */ 310 } 311 } 312 313 /* 314 * waitrunningbufspace() 315 * 316 * If runningbufspace exceeds 4/6 hirunningspace we block until 317 * runningbufspace drops to 3/6 hirunningspace. We also block if another 318 * thread blocked here in order to be fair, even if runningbufspace 319 * is now lower than the limit. 320 * 321 * The caller may be using this function to block in a tight loop, we 322 * must block while runningbufspace is greater than at least 323 * hirunningspace * 3 / 6. 324 */ 325 void 326 waitrunningbufspace(void) 327 { 328 long limit = hirunningspace * 4 / 6; 329 long flags; 330 331 while (runningbufspace > limit || runningbufreq) { 332 tsleep_interlock(&runningbufreq, 0); 333 flags = atomic_fetchadd_int(&runningbufreq, 1); 334 if (runningbufspace > limit || flags) 335 tsleep(&runningbufreq, PINTERLOCKED, "wdrn1", hz); 336 } 337 } 338 339 /* 340 * buf_dirty_count_severe: 341 * 342 * Return true if we have too many dirty buffers. 343 */ 344 int 345 buf_dirty_count_severe(void) 346 { 347 return (runningbufspace + dirtykvaspace >= hidirtybufspace || 348 dirtybufcount >= nbuf / 2); 349 } 350 351 /* 352 * Return true if the amount of running I/O is severe and BIOQ should 353 * start bursting. 354 */ 355 int 356 buf_runningbufspace_severe(void) 357 { 358 return (runningbufspace >= hirunningspace * 4 / 6); 359 } 360 361 /* 362 * vfs_buf_test_cache: 363 * 364 * Called when a buffer is extended. This function clears the B_CACHE 365 * bit if the newly extended portion of the buffer does not contain 366 * valid data. 367 * 368 * NOTE! Dirty VM pages are not processed into dirty (B_DELWRI) buffer 369 * cache buffers. The VM pages remain dirty, as someone had mmap()'d 370 * them while a clean buffer was present. 371 */ 372 static __inline__ 373 void 374 vfs_buf_test_cache(struct buf *bp, 375 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 376 vm_page_t m) 377 { 378 if (bp->b_flags & B_CACHE) { 379 int base = (foff + off) & PAGE_MASK; 380 if (vm_page_is_valid(m, base, size) == 0) 381 bp->b_flags &= ~B_CACHE; 382 } 383 } 384 385 /* 386 * bd_speedup() 387 * 388 * Spank the buf_daemon[_hw] if the total dirty buffer space exceeds the 389 * low water mark. 390 */ 391 static __inline__ 392 void 393 bd_speedup(void) 394 { 395 if (dirtykvaspace < lodirtybufspace && dirtybufcount < nbuf / 2) 396 return; 397 398 if (bd_request == 0 && 399 (dirtykvaspace > lodirtybufspace / 2 || 400 dirtybufcount - dirtybufcounthw >= nbuf / 2)) { 401 if (atomic_fetchadd_int(&bd_request, 1) == 0) 402 wakeup(&bd_request); 403 } 404 if (bd_request_hw == 0 && 405 (dirtykvaspace > lodirtybufspace / 2 || 406 dirtybufcounthw >= nbuf / 2)) { 407 if (atomic_fetchadd_int(&bd_request_hw, 1) == 0) 408 wakeup(&bd_request_hw); 409 } 410 } 411 412 /* 413 * bd_heatup() 414 * 415 * Get the buf_daemon heated up when the number of running and dirty 416 * buffers exceeds the mid-point. 417 * 418 * Return the total number of dirty bytes past the second mid point 419 * as a measure of how much excess dirty data there is in the system. 420 */ 421 long 422 bd_heatup(void) 423 { 424 long mid1; 425 long mid2; 426 long totalspace; 427 428 mid1 = lodirtybufspace + (hidirtybufspace - lodirtybufspace) / 2; 429 430 totalspace = runningbufspace + dirtykvaspace; 431 if (totalspace >= mid1 || dirtybufcount >= nbuf / 2) { 432 bd_speedup(); 433 mid2 = mid1 + (hidirtybufspace - mid1) / 2; 434 if (totalspace >= mid2) 435 return(totalspace - mid2); 436 } 437 return(0); 438 } 439 440 /* 441 * bd_wait() 442 * 443 * Wait for the buffer cache to flush (totalspace) bytes worth of 444 * buffers, then return. 445 * 446 * Regardless this function blocks while the number of dirty buffers 447 * exceeds hidirtybufspace. 448 */ 449 void 450 bd_wait(long totalspace) 451 { 452 u_int i; 453 u_int j; 454 u_int mi; 455 int count; 456 457 if (curthread == bufdaemonhw_td || curthread == bufdaemon_td) 458 return; 459 460 while (totalspace > 0) { 461 bd_heatup(); 462 463 /* 464 * Order is important. Suppliers adjust bd_wake_index after 465 * updating runningbufspace/dirtykvaspace. We want to fetch 466 * bd_wake_index before accessing. Any error should thus 467 * be in our favor. 468 */ 469 i = atomic_fetchadd_int(&bd_wake_index, 0); 470 if (totalspace > runningbufspace + dirtykvaspace) 471 totalspace = runningbufspace + dirtykvaspace; 472 count = totalspace / MAXBSIZE; 473 if (count >= BD_WAKE_SIZE / 2) 474 count = BD_WAKE_SIZE / 2; 475 i = i + count; 476 mi = i & BD_WAKE_MASK; 477 478 /* 479 * This is not a strict interlock, so we play a bit loose 480 * with locking access to dirtybufspace*. We have to re-check 481 * bd_wake_index to ensure that it hasn't passed us. 482 */ 483 tsleep_interlock(&bd_wake_ary[mi], 0); 484 atomic_add_int(&bd_wake_ary[mi], 1); 485 j = atomic_fetchadd_int(&bd_wake_index, 0); 486 if ((int)(i - j) >= 0) 487 tsleep(&bd_wake_ary[mi], PINTERLOCKED, "flstik", hz); 488 489 totalspace = runningbufspace + dirtykvaspace - hidirtybufspace; 490 } 491 } 492 493 /* 494 * bd_signal() 495 * 496 * This function is called whenever runningbufspace or dirtykvaspace 497 * is reduced. Track threads waiting for run+dirty buffer I/O 498 * complete. 499 */ 500 static void 501 bd_signal(long totalspace) 502 { 503 u_int i; 504 505 if (totalspace > 0) { 506 if (totalspace > MAXBSIZE * BD_WAKE_SIZE) 507 totalspace = MAXBSIZE * BD_WAKE_SIZE; 508 while (totalspace > 0) { 509 i = atomic_fetchadd_int(&bd_wake_index, 1); 510 i &= BD_WAKE_MASK; 511 if (atomic_readandclear_int(&bd_wake_ary[i])) 512 wakeup(&bd_wake_ary[i]); 513 totalspace -= MAXBSIZE; 514 } 515 } 516 } 517 518 /* 519 * BIO tracking support routines. 520 * 521 * Release a ref on a bio_track. Wakeup requests are atomically released 522 * along with the last reference so bk_active will never wind up set to 523 * only 0x80000000. 524 */ 525 static 526 void 527 bio_track_rel(struct bio_track *track) 528 { 529 int active; 530 int desired; 531 532 /* 533 * Shortcut 534 */ 535 active = track->bk_active; 536 if (active == 1 && atomic_cmpset_int(&track->bk_active, 1, 0)) 537 return; 538 539 /* 540 * Full-on. Note that the wait flag is only atomically released on 541 * the 1->0 count transition. 542 * 543 * We check for a negative count transition using bit 30 since bit 31 544 * has a different meaning. 545 */ 546 for (;;) { 547 desired = (active & 0x7FFFFFFF) - 1; 548 if (desired) 549 desired |= active & 0x80000000; 550 if (atomic_cmpset_int(&track->bk_active, active, desired)) { 551 if (desired & 0x40000000) 552 panic("bio_track_rel: bad count: %p", track); 553 if (active & 0x80000000) 554 wakeup(track); 555 break; 556 } 557 active = track->bk_active; 558 } 559 } 560 561 /* 562 * Wait for the tracking count to reach 0. 563 * 564 * Use atomic ops such that the wait flag is only set atomically when 565 * bk_active is non-zero. 566 */ 567 int 568 bio_track_wait(struct bio_track *track, int slp_flags, int slp_timo) 569 { 570 int active; 571 int desired; 572 int error; 573 574 /* 575 * Shortcut 576 */ 577 if (track->bk_active == 0) 578 return(0); 579 580 /* 581 * Full-on. Note that the wait flag may only be atomically set if 582 * the active count is non-zero. 583 * 584 * NOTE: We cannot optimize active == desired since a wakeup could 585 * clear active prior to our tsleep_interlock(). 586 */ 587 error = 0; 588 while ((active = track->bk_active) != 0) { 589 cpu_ccfence(); 590 desired = active | 0x80000000; 591 tsleep_interlock(track, slp_flags); 592 if (atomic_cmpset_int(&track->bk_active, active, desired)) { 593 error = tsleep(track, slp_flags | PINTERLOCKED, 594 "trwait", slp_timo); 595 if (error) 596 break; 597 } 598 } 599 return (error); 600 } 601 602 /* 603 * bufinit: 604 * 605 * Load time initialisation of the buffer cache, called from machine 606 * dependant initialization code. 607 */ 608 static 609 void 610 bufinit(void *dummy __unused) 611 { 612 struct bufpcpu *pcpu; 613 struct buf *bp; 614 vm_offset_t bogus_offset; 615 int i; 616 int j; 617 long n; 618 619 /* next, make a null set of free lists */ 620 for (i = 0; i < ncpus; ++i) { 621 pcpu = &bufpcpu[i]; 622 spin_init(&pcpu->spin, "bufinit"); 623 for (j = 0; j < BUFFER_QUEUES; j++) 624 TAILQ_INIT(&pcpu->bufqueues[j]); 625 } 626 627 /* 628 * Finally, initialize each buffer header and stick on empty q. 629 * Each buffer gets its own KVA reservation. 630 */ 631 i = 0; 632 pcpu = &bufpcpu[i]; 633 634 for (n = 0; n < nbuf; n++) { 635 bp = &buf[n]; 636 bzero(bp, sizeof *bp); 637 bp->b_flags = B_INVAL; /* we're just an empty header */ 638 bp->b_cmd = BUF_CMD_DONE; 639 bp->b_qindex = BQUEUE_EMPTY; 640 bp->b_qcpu = i; 641 bp->b_kvabase = (void *)(vm_map_min(&buffer_map) + 642 MAXBSIZE * n); 643 bp->b_kvasize = MAXBSIZE; 644 initbufbio(bp); 645 xio_init(&bp->b_xio); 646 buf_dep_init(bp); 647 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 648 bp, b_freelist); 649 650 i = (i + 1) % ncpus; 651 pcpu = &bufpcpu[i]; 652 } 653 654 /* 655 * maxbufspace is the absolute maximum amount of buffer space we are 656 * allowed to reserve in KVM and in real terms. The absolute maximum 657 * is nominally used by buf_daemon. hibufspace is the nominal maximum 658 * used by most other processes. The differential is required to 659 * ensure that buf_daemon is able to run when other processes might 660 * be blocked waiting for buffer space. 661 * 662 * Calculate hysteresis (lobufspace, hibufspace). Don't make it 663 * too large or we might lockup a cpu for too long a period of 664 * time in our tight loop. 665 */ 666 maxbufspace = nbuf * NBUFCALCSIZE; 667 hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10); 668 lobufspace = hibufspace * 7 / 8; 669 if (hibufspace - lobufspace > 64 * 1024 * 1024) 670 lobufspace = hibufspace - 64 * 1024 * 1024; 671 if (lobufspace > hibufspace - MAXBSIZE) 672 lobufspace = hibufspace - MAXBSIZE; 673 674 lorunningspace = 512 * 1024; 675 /* hirunningspace -- see below */ 676 677 /* 678 * Limit the amount of malloc memory since it is wired permanently 679 * into the kernel space. Even though this is accounted for in 680 * the buffer allocation, we don't want the malloced region to grow 681 * uncontrolled. The malloc scheme improves memory utilization 682 * significantly on average (small) directories. 683 */ 684 maxbufmallocspace = hibufspace / 20; 685 686 /* 687 * Reduce the chance of a deadlock occuring by limiting the number 688 * of delayed-write dirty buffers we allow to stack up. 689 * 690 * We don't want too much actually queued to the device at once 691 * (XXX this needs to be per-mount!), because the buffers will 692 * wind up locked for a very long period of time while the I/O 693 * drains. 694 */ 695 hidirtybufspace = hibufspace / 2; /* dirty + running */ 696 hirunningspace = hibufspace / 16; /* locked & queued to device */ 697 if (hirunningspace < 1024 * 1024) 698 hirunningspace = 1024 * 1024; 699 700 dirtykvaspace = 0; 701 dirtybufspace = 0; 702 dirtybufspacehw = 0; 703 704 lodirtybufspace = hidirtybufspace / 2; 705 706 /* 707 * Maximum number of async ops initiated per buf_daemon loop. This is 708 * somewhat of a hack at the moment, we really need to limit ourselves 709 * based on the number of bytes of I/O in-transit that were initiated 710 * from buf_daemon. 711 */ 712 713 bogus_offset = kmem_alloc_pageable(&kernel_map, PAGE_SIZE, 714 VM_SUBSYS_BOGUS); 715 vm_object_hold(&kernel_object); 716 bogus_page = vm_page_alloc(&kernel_object, 717 (bogus_offset >> PAGE_SHIFT), 718 VM_ALLOC_NORMAL); 719 vm_object_drop(&kernel_object); 720 vmstats.v_wire_count++; 721 722 } 723 724 SYSINIT(do_bufinit, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, bufinit, NULL); 725 726 /* 727 * Initialize the embedded bio structures, typically used by 728 * deprecated code which tries to allocate its own struct bufs. 729 */ 730 void 731 initbufbio(struct buf *bp) 732 { 733 bp->b_bio1.bio_buf = bp; 734 bp->b_bio1.bio_prev = NULL; 735 bp->b_bio1.bio_offset = NOOFFSET; 736 bp->b_bio1.bio_next = &bp->b_bio2; 737 bp->b_bio1.bio_done = NULL; 738 bp->b_bio1.bio_flags = 0; 739 740 bp->b_bio2.bio_buf = bp; 741 bp->b_bio2.bio_prev = &bp->b_bio1; 742 bp->b_bio2.bio_offset = NOOFFSET; 743 bp->b_bio2.bio_next = NULL; 744 bp->b_bio2.bio_done = NULL; 745 bp->b_bio2.bio_flags = 0; 746 747 BUF_LOCKINIT(bp); 748 } 749 750 /* 751 * Reinitialize the embedded bio structures as well as any additional 752 * translation cache layers. 753 */ 754 void 755 reinitbufbio(struct buf *bp) 756 { 757 struct bio *bio; 758 759 for (bio = &bp->b_bio1; bio; bio = bio->bio_next) { 760 bio->bio_done = NULL; 761 bio->bio_offset = NOOFFSET; 762 } 763 } 764 765 /* 766 * Undo the effects of an initbufbio(). 767 */ 768 void 769 uninitbufbio(struct buf *bp) 770 { 771 dsched_buf_exit(bp); 772 BUF_LOCKFREE(bp); 773 } 774 775 /* 776 * Push another BIO layer onto an existing BIO and return it. The new 777 * BIO layer may already exist, holding cached translation data. 778 */ 779 struct bio * 780 push_bio(struct bio *bio) 781 { 782 struct bio *nbio; 783 784 if ((nbio = bio->bio_next) == NULL) { 785 int index = bio - &bio->bio_buf->b_bio_array[0]; 786 if (index >= NBUF_BIO - 1) { 787 panic("push_bio: too many layers %d for bp %p", 788 index, bio->bio_buf); 789 } 790 nbio = &bio->bio_buf->b_bio_array[index + 1]; 791 bio->bio_next = nbio; 792 nbio->bio_prev = bio; 793 nbio->bio_buf = bio->bio_buf; 794 nbio->bio_offset = NOOFFSET; 795 nbio->bio_done = NULL; 796 nbio->bio_next = NULL; 797 } 798 KKASSERT(nbio->bio_done == NULL); 799 return(nbio); 800 } 801 802 /* 803 * Pop a BIO translation layer, returning the previous layer. The 804 * must have been previously pushed. 805 */ 806 struct bio * 807 pop_bio(struct bio *bio) 808 { 809 return(bio->bio_prev); 810 } 811 812 void 813 clearbiocache(struct bio *bio) 814 { 815 while (bio) { 816 bio->bio_offset = NOOFFSET; 817 bio = bio->bio_next; 818 } 819 } 820 821 /* 822 * Remove the buffer from the appropriate free list. 823 * (caller must be locked) 824 */ 825 static __inline void 826 _bremfree(struct buf *bp) 827 { 828 struct bufpcpu *pcpu = &bufpcpu[bp->b_qcpu]; 829 830 if (bp->b_qindex != BQUEUE_NONE) { 831 KASSERT(BUF_LOCKINUSE(bp), ("bremfree: bp %p not locked", bp)); 832 TAILQ_REMOVE(&pcpu->bufqueues[bp->b_qindex], bp, b_freelist); 833 bp->b_qindex = BQUEUE_NONE; 834 } else { 835 if (!BUF_LOCKINUSE(bp)) 836 panic("bremfree: removing a buffer not on a queue"); 837 } 838 } 839 840 /* 841 * bremfree() - must be called with a locked buffer 842 */ 843 void 844 bremfree(struct buf *bp) 845 { 846 struct bufpcpu *pcpu = &bufpcpu[bp->b_qcpu]; 847 848 spin_lock(&pcpu->spin); 849 _bremfree(bp); 850 spin_unlock(&pcpu->spin); 851 } 852 853 /* 854 * bremfree_locked - must be called with pcpu->spin locked 855 */ 856 static void 857 bremfree_locked(struct buf *bp) 858 { 859 _bremfree(bp); 860 } 861 862 /* 863 * This version of bread issues any required I/O asyncnronously and 864 * makes a callback on completion. 865 * 866 * The callback must check whether BIO_DONE is set in the bio and issue 867 * the bpdone(bp, 0) if it isn't. The callback is responsible for clearing 868 * BIO_DONE and disposing of the I/O (bqrelse()ing it). 869 */ 870 void 871 breadcb(struct vnode *vp, off_t loffset, int size, int bflags, 872 void (*func)(struct bio *), void *arg) 873 { 874 struct buf *bp; 875 876 bp = getblk(vp, loffset, size, 0, 0); 877 878 /* if not found in cache, do some I/O */ 879 if ((bp->b_flags & B_CACHE) == 0) { 880 bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL | B_NOTMETA); 881 bp->b_flags |= bflags; 882 bp->b_cmd = BUF_CMD_READ; 883 bp->b_bio1.bio_done = func; 884 bp->b_bio1.bio_caller_info1.ptr = arg; 885 vfs_busy_pages(vp, bp); 886 BUF_KERNPROC(bp); 887 vn_strategy(vp, &bp->b_bio1); 888 } else if (func) { 889 /* 890 * Since we are issuing the callback synchronously it cannot 891 * race the BIO_DONE, so no need for atomic ops here. 892 */ 893 /*bp->b_bio1.bio_done = func;*/ 894 bp->b_bio1.bio_caller_info1.ptr = arg; 895 bp->b_bio1.bio_flags |= BIO_DONE; 896 func(&bp->b_bio1); 897 } else { 898 bqrelse(bp); 899 } 900 } 901 902 /* 903 * breadnx() - Terminal function for bread() and breadn(). 904 * 905 * This function will start asynchronous I/O on read-ahead blocks as well 906 * as satisfy the primary request. 907 * 908 * We must clear B_ERROR and B_INVAL prior to initiating I/O. If B_CACHE is 909 * set, the buffer is valid and we do not have to do anything. 910 */ 911 int 912 breadnx(struct vnode *vp, off_t loffset, int size, int bflags, 913 off_t *raoffset, int *rabsize, 914 int cnt, struct buf **bpp) 915 { 916 struct buf *bp, *rabp; 917 int i; 918 int rv = 0, readwait = 0; 919 int blkflags = (bflags & B_KVABIO) ? GETBLK_KVABIO : 0; 920 921 if (*bpp) 922 bp = *bpp; 923 else 924 *bpp = bp = getblk(vp, loffset, size, blkflags, 0); 925 926 /* if not found in cache, do some I/O */ 927 if ((bp->b_flags & B_CACHE) == 0) { 928 bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL | B_NOTMETA); 929 bp->b_flags |= bflags; 930 bp->b_cmd = BUF_CMD_READ; 931 bp->b_bio1.bio_done = biodone_sync; 932 bp->b_bio1.bio_flags |= BIO_SYNC; 933 vfs_busy_pages(vp, bp); 934 vn_strategy(vp, &bp->b_bio1); 935 ++readwait; 936 } 937 938 for (i = 0; i < cnt; i++, raoffset++, rabsize++) { 939 if (inmem(vp, *raoffset)) 940 continue; 941 rabp = getblk(vp, *raoffset, *rabsize, GETBLK_KVABIO, 0); 942 943 if ((rabp->b_flags & B_CACHE) == 0) { 944 rabp->b_flags &= ~(B_ERROR | B_EINTR | 945 B_INVAL | B_NOTMETA); 946 rabp->b_flags |= (bflags & ~B_KVABIO); 947 rabp->b_cmd = BUF_CMD_READ; 948 vfs_busy_pages(vp, rabp); 949 BUF_KERNPROC(rabp); 950 vn_strategy(vp, &rabp->b_bio1); 951 } else { 952 brelse(rabp); 953 } 954 } 955 if (readwait) 956 rv = biowait(&bp->b_bio1, "biord"); 957 return (rv); 958 } 959 960 /* 961 * bwrite: 962 * 963 * Synchronous write, waits for completion. 964 * 965 * Write, release buffer on completion. (Done by iodone 966 * if async). Do not bother writing anything if the buffer 967 * is invalid. 968 * 969 * Note that we set B_CACHE here, indicating that buffer is 970 * fully valid and thus cacheable. This is true even of NFS 971 * now so we set it generally. This could be set either here 972 * or in biodone() since the I/O is synchronous. We put it 973 * here. 974 */ 975 int 976 bwrite(struct buf *bp) 977 { 978 int error; 979 980 if (bp->b_flags & B_INVAL) { 981 brelse(bp); 982 return (0); 983 } 984 if (BUF_LOCKINUSE(bp) == 0) 985 panic("bwrite: buffer is not busy???"); 986 987 /* 988 * NOTE: We no longer mark the buffer clear prior to the vn_strategy() 989 * call because it will remove the buffer from the vnode's 990 * dirty buffer list prematurely and possibly cause filesystem 991 * checks to race buffer flushes. This is now handled in 992 * bpdone(). 993 * 994 * bundirty(bp); REMOVED 995 */ 996 997 bp->b_flags &= ~(B_ERROR | B_EINTR); 998 bp->b_flags |= B_CACHE; 999 bp->b_cmd = BUF_CMD_WRITE; 1000 bp->b_bio1.bio_done = biodone_sync; 1001 bp->b_bio1.bio_flags |= BIO_SYNC; 1002 vfs_busy_pages(bp->b_vp, bp); 1003 1004 /* 1005 * Normal bwrites pipeline writes. NOTE: b_bufsize is only 1006 * valid for vnode-backed buffers. 1007 */ 1008 bsetrunningbufspace(bp, bp->b_bufsize); 1009 vn_strategy(bp->b_vp, &bp->b_bio1); 1010 error = biowait(&bp->b_bio1, "biows"); 1011 brelse(bp); 1012 1013 return (error); 1014 } 1015 1016 /* 1017 * bawrite: 1018 * 1019 * Asynchronous write. Start output on a buffer, but do not wait for 1020 * it to complete. The buffer is released when the output completes. 1021 * 1022 * bwrite() ( or the VOP routine anyway ) is responsible for handling 1023 * B_INVAL buffers. Not us. 1024 */ 1025 void 1026 bawrite(struct buf *bp) 1027 { 1028 if (bp->b_flags & B_INVAL) { 1029 brelse(bp); 1030 return; 1031 } 1032 if (BUF_LOCKINUSE(bp) == 0) 1033 panic("bawrite: buffer is not busy???"); 1034 1035 /* 1036 * NOTE: We no longer mark the buffer clear prior to the vn_strategy() 1037 * call because it will remove the buffer from the vnode's 1038 * dirty buffer list prematurely and possibly cause filesystem 1039 * checks to race buffer flushes. This is now handled in 1040 * bpdone(). 1041 * 1042 * bundirty(bp); REMOVED 1043 */ 1044 bp->b_flags &= ~(B_ERROR | B_EINTR); 1045 bp->b_flags |= B_CACHE; 1046 bp->b_cmd = BUF_CMD_WRITE; 1047 KKASSERT(bp->b_bio1.bio_done == NULL); 1048 vfs_busy_pages(bp->b_vp, bp); 1049 1050 /* 1051 * Normal bwrites pipeline writes. NOTE: b_bufsize is only 1052 * valid for vnode-backed buffers. 1053 */ 1054 bsetrunningbufspace(bp, bp->b_bufsize); 1055 BUF_KERNPROC(bp); 1056 vn_strategy(bp->b_vp, &bp->b_bio1); 1057 } 1058 1059 /* 1060 * bdwrite: 1061 * 1062 * Delayed write. (Buffer is marked dirty). Do not bother writing 1063 * anything if the buffer is marked invalid. 1064 * 1065 * Note that since the buffer must be completely valid, we can safely 1066 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 1067 * biodone() in order to prevent getblk from writing the buffer 1068 * out synchronously. 1069 */ 1070 void 1071 bdwrite(struct buf *bp) 1072 { 1073 if (BUF_LOCKINUSE(bp) == 0) 1074 panic("bdwrite: buffer is not busy"); 1075 1076 if (bp->b_flags & B_INVAL) { 1077 brelse(bp); 1078 return; 1079 } 1080 bdirty(bp); 1081 1082 dsched_buf_enter(bp); /* might stack */ 1083 1084 /* 1085 * Set B_CACHE, indicating that the buffer is fully valid. This is 1086 * true even of NFS now. 1087 */ 1088 bp->b_flags |= B_CACHE; 1089 1090 /* 1091 * This bmap keeps the system from needing to do the bmap later, 1092 * perhaps when the system is attempting to do a sync. Since it 1093 * is likely that the indirect block -- or whatever other datastructure 1094 * that the filesystem needs is still in memory now, it is a good 1095 * thing to do this. Note also, that if the pageout daemon is 1096 * requesting a sync -- there might not be enough memory to do 1097 * the bmap then... So, this is important to do. 1098 */ 1099 if (bp->b_bio2.bio_offset == NOOFFSET) { 1100 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset, 1101 NULL, NULL, BUF_CMD_WRITE); 1102 } 1103 1104 /* 1105 * Because the underlying pages may still be mapped and 1106 * writable trying to set the dirty buffer (b_dirtyoff/end) 1107 * range here will be inaccurate. 1108 * 1109 * However, we must still clean the pages to satisfy the 1110 * vnode_pager and pageout daemon, so they think the pages 1111 * have been "cleaned". What has really occured is that 1112 * they've been earmarked for later writing by the buffer 1113 * cache. 1114 * 1115 * So we get the b_dirtyoff/end update but will not actually 1116 * depend on it (NFS that is) until the pages are busied for 1117 * writing later on. 1118 */ 1119 vfs_clean_pages(bp); 1120 bqrelse(bp); 1121 1122 /* 1123 * note: we cannot initiate I/O from a bdwrite even if we wanted to, 1124 * due to the softdep code. 1125 */ 1126 } 1127 1128 /* 1129 * Fake write - return pages to VM system as dirty, leave the buffer clean. 1130 * This is used by tmpfs. 1131 * 1132 * It is important for any VFS using this routine to NOT use it for 1133 * IO_SYNC or IO_ASYNC operations which occur when the system really 1134 * wants to flush VM pages to backing store. 1135 */ 1136 void 1137 buwrite(struct buf *bp) 1138 { 1139 vm_page_t m; 1140 int i; 1141 1142 /* 1143 * Only works for VMIO buffers. If the buffer is already 1144 * marked for delayed-write we can't avoid the bdwrite(). 1145 */ 1146 if ((bp->b_flags & B_VMIO) == 0 || (bp->b_flags & B_DELWRI)) { 1147 bdwrite(bp); 1148 return; 1149 } 1150 1151 /* 1152 * Mark as needing a commit. 1153 */ 1154 for (i = 0; i < bp->b_xio.xio_npages; i++) { 1155 m = bp->b_xio.xio_pages[i]; 1156 vm_page_need_commit(m); 1157 } 1158 bqrelse(bp); 1159 } 1160 1161 /* 1162 * bdirty: 1163 * 1164 * Turn buffer into delayed write request by marking it B_DELWRI. 1165 * B_RELBUF and B_NOCACHE must be cleared. 1166 * 1167 * We reassign the buffer to itself to properly update it in the 1168 * dirty/clean lists. 1169 * 1170 * Must be called from a critical section. 1171 * The buffer must be on BQUEUE_NONE. 1172 */ 1173 void 1174 bdirty(struct buf *bp) 1175 { 1176 KASSERT(bp->b_qindex == BQUEUE_NONE, 1177 ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 1178 if (bp->b_flags & B_NOCACHE) { 1179 kprintf("bdirty: clearing B_NOCACHE on buf %p\n", bp); 1180 bp->b_flags &= ~B_NOCACHE; 1181 } 1182 if (bp->b_flags & B_INVAL) { 1183 kprintf("bdirty: warning, dirtying invalid buffer %p\n", bp); 1184 } 1185 bp->b_flags &= ~B_RELBUF; 1186 1187 if ((bp->b_flags & B_DELWRI) == 0) { 1188 lwkt_gettoken(&bp->b_vp->v_token); 1189 bp->b_flags |= B_DELWRI; 1190 reassignbuf(bp); 1191 lwkt_reltoken(&bp->b_vp->v_token); 1192 1193 atomic_add_long(&dirtybufcount, 1); 1194 atomic_add_long(&dirtykvaspace, bp->b_kvasize); 1195 atomic_add_long(&dirtybufspace, bp->b_bufsize); 1196 if (bp->b_flags & B_HEAVY) { 1197 atomic_add_long(&dirtybufcounthw, 1); 1198 atomic_add_long(&dirtybufspacehw, bp->b_bufsize); 1199 } 1200 bd_heatup(); 1201 } 1202 } 1203 1204 /* 1205 * Set B_HEAVY, indicating that this is a heavy-weight buffer that 1206 * needs to be flushed with a different buf_daemon thread to avoid 1207 * deadlocks. B_HEAVY also imposes restrictions in getnewbuf(). 1208 */ 1209 void 1210 bheavy(struct buf *bp) 1211 { 1212 if ((bp->b_flags & B_HEAVY) == 0) { 1213 bp->b_flags |= B_HEAVY; 1214 if (bp->b_flags & B_DELWRI) { 1215 atomic_add_long(&dirtybufcounthw, 1); 1216 atomic_add_long(&dirtybufspacehw, bp->b_bufsize); 1217 } 1218 } 1219 } 1220 1221 /* 1222 * bundirty: 1223 * 1224 * Clear B_DELWRI for buffer. 1225 * 1226 * Must be called from a critical section. 1227 * 1228 * The buffer is typically on BQUEUE_NONE but there is one case in 1229 * brelse() that calls this function after placing the buffer on 1230 * a different queue. 1231 */ 1232 void 1233 bundirty(struct buf *bp) 1234 { 1235 if (bp->b_flags & B_DELWRI) { 1236 lwkt_gettoken(&bp->b_vp->v_token); 1237 bp->b_flags &= ~B_DELWRI; 1238 reassignbuf(bp); 1239 lwkt_reltoken(&bp->b_vp->v_token); 1240 1241 atomic_add_long(&dirtybufcount, -1); 1242 atomic_add_long(&dirtykvaspace, -bp->b_kvasize); 1243 atomic_add_long(&dirtybufspace, -bp->b_bufsize); 1244 if (bp->b_flags & B_HEAVY) { 1245 atomic_add_long(&dirtybufcounthw, -1); 1246 atomic_add_long(&dirtybufspacehw, -bp->b_bufsize); 1247 } 1248 bd_signal(bp->b_bufsize); 1249 } 1250 /* 1251 * Since it is now being written, we can clear its deferred write flag. 1252 */ 1253 bp->b_flags &= ~B_DEFERRED; 1254 } 1255 1256 /* 1257 * Set the b_runningbufspace field, used to track how much I/O is 1258 * in progress at any given moment. 1259 */ 1260 void 1261 bsetrunningbufspace(struct buf *bp, int bytes) 1262 { 1263 bp->b_runningbufspace = bytes; 1264 if (bytes) { 1265 atomic_add_long(&runningbufspace, bytes); 1266 atomic_add_long(&runningbufcount, 1); 1267 } 1268 } 1269 1270 /* 1271 * brelse: 1272 * 1273 * Release a busy buffer and, if requested, free its resources. The 1274 * buffer will be stashed in the appropriate bufqueue[] allowing it 1275 * to be accessed later as a cache entity or reused for other purposes. 1276 */ 1277 void 1278 brelse(struct buf *bp) 1279 { 1280 struct bufpcpu *pcpu; 1281 #ifdef INVARIANTS 1282 int saved_flags = bp->b_flags; 1283 #endif 1284 1285 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 1286 ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1287 1288 /* 1289 * If B_NOCACHE is set we are being asked to destroy the buffer and 1290 * its backing store. Clear B_DELWRI. 1291 * 1292 * B_NOCACHE is set in two cases: (1) when the caller really wants 1293 * to destroy the buffer and backing store and (2) when the caller 1294 * wants to destroy the buffer and backing store after a write 1295 * completes. 1296 */ 1297 if ((bp->b_flags & (B_NOCACHE|B_DELWRI)) == (B_NOCACHE|B_DELWRI)) { 1298 bundirty(bp); 1299 } 1300 1301 if ((bp->b_flags & (B_INVAL | B_DELWRI)) == B_DELWRI) { 1302 /* 1303 * A re-dirtied buffer is only subject to destruction 1304 * by B_INVAL. B_ERROR and B_NOCACHE are ignored. 1305 */ 1306 /* leave buffer intact */ 1307 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 1308 (bp->b_bufsize <= 0)) { 1309 /* 1310 * Either a failed read or we were asked to free or not 1311 * cache the buffer. This path is reached with B_DELWRI 1312 * set only if B_INVAL is already set. B_NOCACHE governs 1313 * backing store destruction. 1314 * 1315 * NOTE: HAMMER will set B_LOCKED in buf_deallocate if the 1316 * buffer cannot be immediately freed. 1317 */ 1318 bp->b_flags |= B_INVAL; 1319 if (LIST_FIRST(&bp->b_dep) != NULL) 1320 buf_deallocate(bp); 1321 if (bp->b_flags & B_DELWRI) { 1322 atomic_add_long(&dirtybufcount, -1); 1323 atomic_add_long(&dirtykvaspace, -bp->b_kvasize); 1324 atomic_add_long(&dirtybufspace, -bp->b_bufsize); 1325 if (bp->b_flags & B_HEAVY) { 1326 atomic_add_long(&dirtybufcounthw, -1); 1327 atomic_add_long(&dirtybufspacehw, 1328 -bp->b_bufsize); 1329 } 1330 bd_signal(bp->b_bufsize); 1331 } 1332 bp->b_flags &= ~(B_DELWRI | B_CACHE); 1333 } 1334 1335 /* 1336 * We must clear B_RELBUF if B_DELWRI or B_LOCKED is set, 1337 * or if b_refs is non-zero. 1338 * 1339 * If vfs_vmio_release() is called with either bit set, the 1340 * underlying pages may wind up getting freed causing a previous 1341 * write (bdwrite()) to get 'lost' because pages associated with 1342 * a B_DELWRI bp are marked clean. Pages associated with a 1343 * B_LOCKED buffer may be mapped by the filesystem. 1344 * 1345 * If we want to release the buffer ourselves (rather then the 1346 * originator asking us to release it), give the originator a 1347 * chance to countermand the release by setting B_LOCKED. 1348 * 1349 * We still allow the B_INVAL case to call vfs_vmio_release(), even 1350 * if B_DELWRI is set. 1351 * 1352 * If B_DELWRI is not set we may have to set B_RELBUF if we are low 1353 * on pages to return pages to the VM page queues. 1354 */ 1355 if ((bp->b_flags & (B_DELWRI | B_LOCKED)) || bp->b_refs) { 1356 bp->b_flags &= ~B_RELBUF; 1357 } else if (vm_page_count_min(0)) { 1358 if (LIST_FIRST(&bp->b_dep) != NULL) 1359 buf_deallocate(bp); /* can set B_LOCKED */ 1360 if (bp->b_flags & (B_DELWRI | B_LOCKED)) 1361 bp->b_flags &= ~B_RELBUF; 1362 else 1363 bp->b_flags |= B_RELBUF; 1364 } 1365 1366 /* 1367 * Make sure b_cmd is clear. It may have already been cleared by 1368 * biodone(). 1369 * 1370 * At this point destroying the buffer is governed by the B_INVAL 1371 * or B_RELBUF flags. 1372 */ 1373 bp->b_cmd = BUF_CMD_DONE; 1374 dsched_buf_exit(bp); 1375 1376 /* 1377 * VMIO buffer rundown. Make sure the VM page array is restored 1378 * after an I/O may have replaces some of the pages with bogus pages 1379 * in order to not destroy dirty pages in a fill-in read. 1380 * 1381 * Note that due to the code above, if a buffer is marked B_DELWRI 1382 * then the B_RELBUF and B_NOCACHE bits will always be clear. 1383 * B_INVAL may still be set, however. 1384 * 1385 * For clean buffers, B_INVAL or B_RELBUF will destroy the buffer 1386 * but not the backing store. B_NOCACHE will destroy the backing 1387 * store. 1388 * 1389 * Note that dirty NFS buffers contain byte-granular write ranges 1390 * and should not be destroyed w/ B_INVAL even if the backing store 1391 * is left intact. 1392 */ 1393 if (bp->b_flags & B_VMIO) { 1394 /* 1395 * Rundown for VMIO buffers which are not dirty NFS buffers. 1396 */ 1397 int i, j, resid; 1398 vm_page_t m; 1399 off_t foff; 1400 vm_pindex_t poff; 1401 vm_object_t obj; 1402 struct vnode *vp; 1403 1404 vp = bp->b_vp; 1405 1406 /* 1407 * Get the base offset and length of the buffer. Note that 1408 * in the VMIO case if the buffer block size is not 1409 * page-aligned then b_data pointer may not be page-aligned. 1410 * But our b_xio.xio_pages array *IS* page aligned. 1411 * 1412 * block sizes less then DEV_BSIZE (usually 512) are not 1413 * supported due to the page granularity bits (m->valid, 1414 * m->dirty, etc...). 1415 * 1416 * See man buf(9) for more information 1417 */ 1418 1419 resid = bp->b_bufsize; 1420 foff = bp->b_loffset; 1421 1422 for (i = 0; i < bp->b_xio.xio_npages; i++) { 1423 m = bp->b_xio.xio_pages[i]; 1424 1425 /* 1426 * If we hit a bogus page, fixup *all* of them 1427 * now. Note that we left these pages wired 1428 * when we removed them so they had better exist, 1429 * and they cannot be ripped out from under us so 1430 * no critical section protection is necessary. 1431 */ 1432 if (m == bogus_page) { 1433 obj = vp->v_object; 1434 poff = OFF_TO_IDX(bp->b_loffset); 1435 1436 vm_object_hold(obj); 1437 for (j = i; j < bp->b_xio.xio_npages; j++) { 1438 vm_page_t mtmp; 1439 1440 mtmp = bp->b_xio.xio_pages[j]; 1441 if (mtmp == bogus_page) { 1442 if ((bp->b_flags & B_HASBOGUS) == 0) 1443 panic("brelse: bp %p corrupt bogus", bp); 1444 mtmp = vm_page_lookup(obj, poff + j); 1445 if (!mtmp) 1446 panic("brelse: bp %p page %d missing", bp, j); 1447 bp->b_xio.xio_pages[j] = mtmp; 1448 } 1449 } 1450 vm_object_drop(obj); 1451 1452 if ((bp->b_flags & B_HASBOGUS) || 1453 (bp->b_flags & B_INVAL) == 0) { 1454 pmap_qenter_noinval( 1455 trunc_page((vm_offset_t)bp->b_data), 1456 bp->b_xio.xio_pages, 1457 bp->b_xio.xio_npages); 1458 bp->b_flags &= ~B_HASBOGUS; 1459 bp->b_flags |= B_KVABIO; 1460 bkvareset(bp); 1461 } 1462 m = bp->b_xio.xio_pages[i]; 1463 } 1464 1465 /* 1466 * Invalidate the backing store if B_NOCACHE is set 1467 * (e.g. used with vinvalbuf()). If this is NFS 1468 * we impose a requirement that the block size be 1469 * a multiple of PAGE_SIZE and create a temporary 1470 * hack to basically invalidate the whole page. The 1471 * problem is that NFS uses really odd buffer sizes 1472 * especially when tracking piecemeal writes and 1473 * it also vinvalbuf()'s a lot, which would result 1474 * in only partial page validation and invalidation 1475 * here. If the file page is mmap()'d, however, 1476 * all the valid bits get set so after we invalidate 1477 * here we would end up with weird m->valid values 1478 * like 0xfc. nfs_getpages() can't handle this so 1479 * we clear all the valid bits for the NFS case 1480 * instead of just some of them. 1481 * 1482 * The real bug is the VM system having to set m->valid 1483 * to VM_PAGE_BITS_ALL for faulted-in pages, which 1484 * itself is an artifact of the whole 512-byte 1485 * granular mess that exists to support odd block 1486 * sizes and UFS meta-data block sizes (e.g. 6144). 1487 * A complete rewrite is required. 1488 * 1489 * XXX 1490 */ 1491 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 1492 int poffset = foff & PAGE_MASK; 1493 int presid; 1494 1495 presid = PAGE_SIZE - poffset; 1496 if (bp->b_vp->v_tag == VT_NFS && 1497 bp->b_vp->v_type == VREG) { 1498 ; /* entire page */ 1499 } else if (presid > resid) { 1500 presid = resid; 1501 } 1502 KASSERT(presid >= 0, ("brelse: extra page")); 1503 vm_page_set_invalid(m, poffset, presid); 1504 1505 /* 1506 * Also make sure any swap cache is removed 1507 * as it is now stale (HAMMER in particular 1508 * uses B_NOCACHE to deal with buffer 1509 * aliasing). 1510 */ 1511 swap_pager_unswapped(m); 1512 } 1513 resid -= PAGE_SIZE - (foff & PAGE_MASK); 1514 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 1515 } 1516 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1517 vfs_vmio_release(bp); 1518 } else { 1519 /* 1520 * Rundown for non-VMIO buffers. 1521 */ 1522 if (bp->b_flags & (B_INVAL | B_RELBUF)) { 1523 if (bp->b_bufsize) 1524 allocbuf(bp, 0); 1525 KKASSERT (LIST_FIRST(&bp->b_dep) == NULL); 1526 if (bp->b_vp) 1527 brelvp(bp); 1528 } 1529 } 1530 1531 if (bp->b_qindex != BQUEUE_NONE) 1532 panic("brelse: free buffer onto another queue???"); 1533 1534 /* 1535 * Figure out the correct queue to place the cleaned up buffer on. 1536 * Buffers placed in the EMPTY or EMPTYKVA had better already be 1537 * disassociated from their vnode. 1538 * 1539 * Return the buffer to its original pcpu area 1540 */ 1541 pcpu = &bufpcpu[bp->b_qcpu]; 1542 spin_lock(&pcpu->spin); 1543 1544 if (bp->b_flags & B_LOCKED) { 1545 /* 1546 * Buffers that are locked are placed in the locked queue 1547 * immediately, regardless of their state. 1548 */ 1549 bp->b_qindex = BQUEUE_LOCKED; 1550 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1551 bp, b_freelist); 1552 } else if (bp->b_bufsize == 0) { 1553 /* 1554 * Buffers with no memory. Due to conditionals near the top 1555 * of brelse() such buffers should probably already be 1556 * marked B_INVAL and disassociated from their vnode. 1557 */ 1558 bp->b_flags |= B_INVAL; 1559 KASSERT(bp->b_vp == NULL, 1560 ("bp1 %p flags %08x/%08x vnode %p " 1561 "unexpectededly still associated!", 1562 bp, saved_flags, bp->b_flags, bp->b_vp)); 1563 KKASSERT((bp->b_flags & B_HASHED) == 0); 1564 bp->b_qindex = BQUEUE_EMPTY; 1565 TAILQ_INSERT_HEAD(&pcpu->bufqueues[bp->b_qindex], 1566 bp, b_freelist); 1567 } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) { 1568 /* 1569 * Buffers with junk contents. Again these buffers had better 1570 * already be disassociated from their vnode. 1571 */ 1572 KASSERT(bp->b_vp == NULL, 1573 ("bp2 %p flags %08x/%08x vnode %p unexpectededly " 1574 "still associated!", 1575 bp, saved_flags, bp->b_flags, bp->b_vp)); 1576 KKASSERT((bp->b_flags & B_HASHED) == 0); 1577 bp->b_flags |= B_INVAL; 1578 bp->b_qindex = BQUEUE_CLEAN; 1579 TAILQ_INSERT_HEAD(&pcpu->bufqueues[bp->b_qindex], 1580 bp, b_freelist); 1581 } else { 1582 /* 1583 * Remaining buffers. These buffers are still associated with 1584 * their vnode. 1585 */ 1586 switch(bp->b_flags & (B_DELWRI|B_HEAVY)) { 1587 case B_DELWRI: 1588 bp->b_qindex = BQUEUE_DIRTY; 1589 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1590 bp, b_freelist); 1591 break; 1592 case B_DELWRI | B_HEAVY: 1593 bp->b_qindex = BQUEUE_DIRTY_HW; 1594 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1595 bp, b_freelist); 1596 break; 1597 default: 1598 /* 1599 * NOTE: Buffers are always placed at the end of the 1600 * queue. If B_AGE is not set the buffer will cycle 1601 * through the queue twice. 1602 */ 1603 bp->b_qindex = BQUEUE_CLEAN; 1604 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1605 bp, b_freelist); 1606 break; 1607 } 1608 } 1609 spin_unlock(&pcpu->spin); 1610 1611 /* 1612 * If B_INVAL, clear B_DELWRI. We've already placed the buffer 1613 * on the correct queue but we have not yet unlocked it. 1614 */ 1615 if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) 1616 bundirty(bp); 1617 1618 /* 1619 * The bp is on an appropriate queue unless locked. If it is not 1620 * locked or dirty we can wakeup threads waiting for buffer space. 1621 * 1622 * We've already handled the B_INVAL case ( B_DELWRI will be clear 1623 * if B_INVAL is set ). 1624 */ 1625 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) 1626 bufcountwakeup(); 1627 1628 /* 1629 * Something we can maybe free or reuse 1630 */ 1631 if (bp->b_bufsize || bp->b_kvasize) 1632 bufspacewakeup(); 1633 1634 /* 1635 * Clean up temporary flags and unlock the buffer. 1636 */ 1637 bp->b_flags &= ~(B_NOCACHE | B_RELBUF | B_DIRECT); 1638 BUF_UNLOCK(bp); 1639 } 1640 1641 /* 1642 * bqrelse: 1643 * 1644 * Release a buffer back to the appropriate queue but do not try to free 1645 * it. The buffer is expected to be used again soon. 1646 * 1647 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 1648 * biodone() to requeue an async I/O on completion. It is also used when 1649 * known good buffers need to be requeued but we think we may need the data 1650 * again soon. 1651 * 1652 * XXX we should be able to leave the B_RELBUF hint set on completion. 1653 */ 1654 void 1655 bqrelse(struct buf *bp) 1656 { 1657 struct bufpcpu *pcpu; 1658 1659 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), 1660 ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1661 1662 if (bp->b_qindex != BQUEUE_NONE) 1663 panic("bqrelse: free buffer onto another queue???"); 1664 1665 buf_act_advance(bp); 1666 1667 pcpu = &bufpcpu[bp->b_qcpu]; 1668 spin_lock(&pcpu->spin); 1669 1670 if (bp->b_flags & B_LOCKED) { 1671 /* 1672 * Locked buffers are released to the locked queue. However, 1673 * if the buffer is dirty it will first go into the dirty 1674 * queue and later on after the I/O completes successfully it 1675 * will be released to the locked queue. 1676 */ 1677 bp->b_qindex = BQUEUE_LOCKED; 1678 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1679 bp, b_freelist); 1680 } else if (bp->b_flags & B_DELWRI) { 1681 bp->b_qindex = (bp->b_flags & B_HEAVY) ? 1682 BQUEUE_DIRTY_HW : BQUEUE_DIRTY; 1683 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1684 bp, b_freelist); 1685 } else if (vm_page_count_min(0)) { 1686 /* 1687 * We are too low on memory, we have to try to free the 1688 * buffer (most importantly: the wired pages making up its 1689 * backing store) *now*. 1690 */ 1691 spin_unlock(&pcpu->spin); 1692 brelse(bp); 1693 return; 1694 } else { 1695 bp->b_qindex = BQUEUE_CLEAN; 1696 TAILQ_INSERT_TAIL(&pcpu->bufqueues[bp->b_qindex], 1697 bp, b_freelist); 1698 } 1699 spin_unlock(&pcpu->spin); 1700 1701 /* 1702 * We have now placed the buffer on the proper queue, but have yet 1703 * to unlock it. 1704 */ 1705 if ((bp->b_flags & B_LOCKED) == 0 && 1706 ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)) { 1707 bufcountwakeup(); 1708 } 1709 1710 /* 1711 * Something we can maybe free or reuse. 1712 */ 1713 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI)) 1714 bufspacewakeup(); 1715 1716 /* 1717 * Final cleanup and unlock. Clear bits that are only used while a 1718 * buffer is actively locked. 1719 */ 1720 bp->b_flags &= ~(B_NOCACHE | B_RELBUF); 1721 dsched_buf_exit(bp); 1722 BUF_UNLOCK(bp); 1723 } 1724 1725 /* 1726 * Hold a buffer, preventing it from being reused. This will prevent 1727 * normal B_RELBUF operations on the buffer but will not prevent B_INVAL 1728 * operations. If a B_INVAL operation occurs the buffer will remain held 1729 * but the underlying pages may get ripped out. 1730 * 1731 * These functions are typically used in VOP_READ/VOP_WRITE functions 1732 * to hold a buffer during a copyin or copyout, preventing deadlocks 1733 * or recursive lock panics when read()/write() is used over mmap()'d 1734 * space. 1735 * 1736 * NOTE: bqhold() requires that the buffer be locked at the time of the 1737 * hold. bqdrop() has no requirements other than the buffer having 1738 * previously been held. 1739 */ 1740 void 1741 bqhold(struct buf *bp) 1742 { 1743 atomic_add_int(&bp->b_refs, 1); 1744 } 1745 1746 void 1747 bqdrop(struct buf *bp) 1748 { 1749 KKASSERT(bp->b_refs > 0); 1750 atomic_add_int(&bp->b_refs, -1); 1751 } 1752 1753 /* 1754 * Return backing pages held by the buffer 'bp' back to the VM system. 1755 * This routine is called when the bp is invalidated, released, or 1756 * reused. 1757 * 1758 * The KVA mapping (b_data) for the underlying pages is removed by 1759 * this function. 1760 * 1761 * WARNING! This routine is integral to the low memory critical path 1762 * when a buffer is B_RELBUF'd. If the system has a severe page 1763 * deficit we need to get the page(s) onto the PQ_FREE or PQ_CACHE 1764 * queues so they can be reused in the current pageout daemon 1765 * pass. 1766 */ 1767 static void 1768 vfs_vmio_release(struct buf *bp) 1769 { 1770 int i; 1771 vm_page_t m; 1772 1773 for (i = 0; i < bp->b_xio.xio_npages; i++) { 1774 m = bp->b_xio.xio_pages[i]; 1775 bp->b_xio.xio_pages[i] = NULL; 1776 1777 /* 1778 * We need to own the page in order to safely unwire it. 1779 */ 1780 vm_page_busy_wait(m, FALSE, "vmiopg"); 1781 1782 /* 1783 * The VFS is telling us this is not a meta-data buffer 1784 * even if it is backed by a block device. 1785 */ 1786 if (bp->b_flags & B_NOTMETA) 1787 vm_page_flag_set(m, PG_NOTMETA); 1788 1789 /* 1790 * This is a very important bit of code. We try to track 1791 * VM page use whether the pages are wired into the buffer 1792 * cache or not. While wired into the buffer cache the 1793 * bp tracks the act_count. 1794 * 1795 * We can choose to place unwired pages on the inactive 1796 * queue (0) or active queue (1). If we place too many 1797 * on the active queue the queue will cycle the act_count 1798 * on pages we'd like to keep, just from single-use pages 1799 * (such as when doing a tar-up or file scan). 1800 */ 1801 if (bp->b_act_count < vm_cycle_point) 1802 vm_page_unwire(m, 0); 1803 else 1804 vm_page_unwire(m, 1); 1805 1806 /* 1807 * If the wire_count has dropped to 0 we may need to take 1808 * further action before unbusying the page. 1809 * 1810 * WARNING: vm_page_try_*() also checks PG_NEED_COMMIT for us. 1811 */ 1812 if (m->wire_count == 0) { 1813 if (bp->b_flags & B_DIRECT) { 1814 /* 1815 * Attempt to free the page if B_DIRECT is 1816 * set, the caller does not desire the page 1817 * to be cached. 1818 */ 1819 vm_page_wakeup(m); 1820 vm_page_try_to_free(m); 1821 } else if ((bp->b_flags & B_NOTMETA) || 1822 vm_page_count_min(0)) { 1823 /* 1824 * Attempt to move the page to PQ_CACHE 1825 * if B_NOTMETA is set. This flag is set 1826 * by HAMMER to remove one of the two pages 1827 * present when double buffering is enabled. 1828 * 1829 * Attempt to move the page to PQ_CACHE 1830 * If we have a severe page deficit. This 1831 * will cause buffer cache operations related 1832 * to pageouts to recycle the related pages 1833 * in order to avoid a low memory deadlock. 1834 */ 1835 m->act_count = bp->b_act_count; 1836 vm_page_try_to_cache(m); 1837 } else { 1838 /* 1839 * Nominal case, leave the page on the 1840 * queue the original unwiring placed it on 1841 * (active or inactive). 1842 */ 1843 m->act_count = bp->b_act_count; 1844 vm_page_wakeup(m); 1845 } 1846 } else { 1847 vm_page_wakeup(m); 1848 } 1849 } 1850 1851 /* 1852 * Zero out the pmap pte's for the mapping, but don't bother 1853 * invalidating the TLB. The range will be properly invalidating 1854 * when new pages are entered into the mapping. 1855 * 1856 * This in particular reduces tmpfs tear-down overhead and reduces 1857 * buffer cache re-use overhead (one invalidation sequence instead 1858 * of two per re-use). 1859 */ 1860 pmap_qremove_noinval(trunc_page((vm_offset_t) bp->b_data), 1861 bp->b_xio.xio_npages); 1862 CPUMASK_ASSZERO(bp->b_cpumask); 1863 if (bp->b_bufsize) { 1864 atomic_add_long(&bufspace, -bp->b_bufsize); 1865 bp->b_bufsize = 0; 1866 bufspacewakeup(); 1867 } 1868 bp->b_xio.xio_npages = 0; 1869 bp->b_flags &= ~B_VMIO; 1870 KKASSERT (LIST_FIRST(&bp->b_dep) == NULL); 1871 if (bp->b_vp) 1872 brelvp(bp); 1873 } 1874 1875 /* 1876 * Find and initialize a new buffer header, freeing up existing buffers 1877 * in the bufqueues as necessary. The new buffer is returned locked. 1878 * 1879 * Important: B_INVAL is not set. If the caller wishes to throw the 1880 * buffer away, the caller must set B_INVAL prior to calling brelse(). 1881 * 1882 * We block if: 1883 * We have insufficient buffer headers 1884 * We have insufficient buffer space 1885 * 1886 * To avoid VFS layer recursion we do not flush dirty buffers ourselves. 1887 * Instead we ask the buf daemon to do it for us. We attempt to 1888 * avoid piecemeal wakeups of the pageout daemon. 1889 */ 1890 struct buf * 1891 getnewbuf(int blkflags, int slptimeo, int size, int maxsize) 1892 { 1893 struct bufpcpu *pcpu; 1894 struct buf *bp; 1895 struct buf *nbp; 1896 int nqindex; 1897 int nqcpu; 1898 int slpflags = (blkflags & GETBLK_PCATCH) ? PCATCH : 0; 1899 int maxloops = 200000; 1900 int restart_reason = 0; 1901 struct buf *restart_bp = NULL; 1902 static char flushingbufs[MAXCPU]; 1903 char *flushingp; 1904 1905 /* 1906 * We can't afford to block since we might be holding a vnode lock, 1907 * which may prevent system daemons from running. We deal with 1908 * low-memory situations by proactively returning memory and running 1909 * async I/O rather then sync I/O. 1910 */ 1911 1912 ++getnewbufcalls; 1913 nqcpu = mycpu->gd_cpuid; 1914 flushingp = &flushingbufs[nqcpu]; 1915 restart: 1916 if (bufspace < lobufspace) 1917 *flushingp = 0; 1918 1919 if (debug_bufbio && --maxloops == 0) 1920 panic("getnewbuf, excessive loops on cpu %d restart %d (%p)", 1921 mycpu->gd_cpuid, restart_reason, restart_bp); 1922 1923 /* 1924 * Setup for scan. If we do not have enough free buffers, 1925 * we setup a degenerate case that immediately fails. Note 1926 * that if we are specially marked process, we are allowed to 1927 * dip into our reserves. 1928 * 1929 * The scanning sequence is nominally: EMPTY->CLEAN 1930 */ 1931 pcpu = &bufpcpu[nqcpu]; 1932 spin_lock(&pcpu->spin); 1933 1934 /* 1935 * Prime the scan for this cpu. Locate the first buffer to 1936 * check. If we are flushing buffers we must skip the 1937 * EMPTY queue. 1938 */ 1939 nqindex = BQUEUE_EMPTY; 1940 nbp = TAILQ_FIRST(&pcpu->bufqueues[BQUEUE_EMPTY]); 1941 if (nbp == NULL || *flushingp) { 1942 nqindex = BQUEUE_CLEAN; 1943 nbp = TAILQ_FIRST(&pcpu->bufqueues[BQUEUE_CLEAN]); 1944 } 1945 1946 /* 1947 * Run scan, possibly freeing data and/or kva mappings on the fly, 1948 * depending. 1949 * 1950 * WARNING! spin is held! 1951 */ 1952 while ((bp = nbp) != NULL) { 1953 int qindex = nqindex; 1954 1955 nbp = TAILQ_NEXT(bp, b_freelist); 1956 1957 /* 1958 * BQUEUE_CLEAN - B_AGE special case. If not set the bp 1959 * cycles through the queue twice before being selected. 1960 */ 1961 if (qindex == BQUEUE_CLEAN && 1962 (bp->b_flags & B_AGE) == 0 && nbp) { 1963 bp->b_flags |= B_AGE; 1964 TAILQ_REMOVE(&pcpu->bufqueues[qindex], 1965 bp, b_freelist); 1966 TAILQ_INSERT_TAIL(&pcpu->bufqueues[qindex], 1967 bp, b_freelist); 1968 continue; 1969 } 1970 1971 /* 1972 * Calculate next bp ( we can only use it if we do not block 1973 * or do other fancy things ). 1974 */ 1975 if (nbp == NULL) { 1976 switch(qindex) { 1977 case BQUEUE_EMPTY: 1978 nqindex = BQUEUE_CLEAN; 1979 if ((nbp = TAILQ_FIRST(&pcpu->bufqueues[BQUEUE_CLEAN]))) 1980 break; 1981 /* fall through */ 1982 case BQUEUE_CLEAN: 1983 /* 1984 * nbp is NULL. 1985 */ 1986 break; 1987 } 1988 } 1989 1990 /* 1991 * Sanity Checks 1992 */ 1993 KASSERT(bp->b_qindex == qindex, 1994 ("getnewbuf: inconsistent queue %d bp %p", qindex, bp)); 1995 1996 /* 1997 * Note: we no longer distinguish between VMIO and non-VMIO 1998 * buffers. 1999 */ 2000 KASSERT((bp->b_flags & B_DELWRI) == 0, 2001 ("delwri buffer %p found in queue %d", bp, qindex)); 2002 2003 /* 2004 * Do not try to reuse a buffer with a non-zero b_refs. 2005 * This is an unsynchronized test. A synchronized test 2006 * is also performed after we lock the buffer. 2007 */ 2008 if (bp->b_refs) 2009 continue; 2010 2011 /* 2012 * Start freeing the bp. This is somewhat involved. nbp 2013 * remains valid only for BQUEUE_EMPTY bp's. Buffers 2014 * on the clean list must be disassociated from their 2015 * current vnode. Buffers on the empty lists have 2016 * already been disassociated. 2017 * 2018 * b_refs is checked after locking along with queue changes. 2019 * We must check here to deal with zero->nonzero transitions 2020 * made by the owner of the buffer lock, which is used by 2021 * VFS's to hold the buffer while issuing an unlocked 2022 * uiomove()s. We cannot invalidate the buffer's pages 2023 * for this case. Once we successfully lock a buffer the 2024 * only 0->1 transitions of b_refs will occur via findblk(). 2025 * 2026 * We must also check for queue changes after successful 2027 * locking as the current lock holder may dispose of the 2028 * buffer and change its queue. 2029 */ 2030 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 2031 spin_unlock(&pcpu->spin); 2032 tsleep(&bd_request, 0, "gnbxxx", (hz + 99) / 100); 2033 restart_reason = 1; 2034 restart_bp = bp; 2035 goto restart; 2036 } 2037 if (bp->b_qindex != qindex || bp->b_refs) { 2038 spin_unlock(&pcpu->spin); 2039 BUF_UNLOCK(bp); 2040 restart_reason = 2; 2041 restart_bp = bp; 2042 goto restart; 2043 } 2044 bremfree_locked(bp); 2045 spin_unlock(&pcpu->spin); 2046 2047 /* 2048 * Dependancies must be handled before we disassociate the 2049 * vnode. 2050 * 2051 * NOTE: HAMMER will set B_LOCKED if the buffer cannot 2052 * be immediately disassociated. HAMMER then becomes 2053 * responsible for releasing the buffer. 2054 * 2055 * NOTE: spin is UNLOCKED now. 2056 */ 2057 if (LIST_FIRST(&bp->b_dep) != NULL) { 2058 buf_deallocate(bp); 2059 if (bp->b_flags & B_LOCKED) { 2060 bqrelse(bp); 2061 restart_reason = 3; 2062 restart_bp = bp; 2063 goto restart; 2064 } 2065 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 2066 } 2067 2068 /* 2069 * CLEAN buffers have content or associations that must be 2070 * cleaned out if not repurposing. 2071 */ 2072 if (qindex == BQUEUE_CLEAN) { 2073 if (bp->b_flags & B_VMIO) 2074 vfs_vmio_release(bp); 2075 if (bp->b_vp) 2076 brelvp(bp); 2077 } 2078 2079 /* 2080 * NOTE: nbp is now entirely invalid. We can only restart 2081 * the scan from this point on. 2082 * 2083 * Get the rest of the buffer freed up. b_kva* is still 2084 * valid after this operation. 2085 */ 2086 KASSERT(bp->b_vp == NULL, 2087 ("bp3 %p flags %08x vnode %p qindex %d " 2088 "unexpectededly still associated!", 2089 bp, bp->b_flags, bp->b_vp, qindex)); 2090 KKASSERT((bp->b_flags & B_HASHED) == 0); 2091 2092 if (bp->b_bufsize) 2093 allocbuf(bp, 0); 2094 2095 if (bp->b_flags & (B_VNDIRTY | B_VNCLEAN | B_HASHED)) { 2096 kprintf("getnewbuf: caught bug vp queue " 2097 "%p/%08x qidx %d\n", 2098 bp, bp->b_flags, qindex); 2099 brelvp(bp); 2100 } 2101 bp->b_flags = B_BNOCLIP; 2102 bp->b_cmd = BUF_CMD_DONE; 2103 bp->b_vp = NULL; 2104 bp->b_error = 0; 2105 bp->b_resid = 0; 2106 bp->b_bcount = 0; 2107 bp->b_xio.xio_npages = 0; 2108 bp->b_dirtyoff = bp->b_dirtyend = 0; 2109 bp->b_act_count = ACT_INIT; 2110 reinitbufbio(bp); 2111 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 2112 buf_dep_init(bp); 2113 if (blkflags & GETBLK_BHEAVY) 2114 bp->b_flags |= B_HEAVY; 2115 2116 if (bufspace >= hibufspace) 2117 *flushingp = 1; 2118 if (bufspace < lobufspace) 2119 *flushingp = 0; 2120 if (*flushingp) { 2121 bp->b_flags |= B_INVAL; 2122 brelse(bp); 2123 restart_reason = 5; 2124 restart_bp = bp; 2125 goto restart; 2126 } 2127 2128 /* 2129 * b_refs can transition to a non-zero value while we hold 2130 * the buffer locked due to a findblk(). Our brelvp() above 2131 * interlocked any future possible transitions due to 2132 * findblk()s. 2133 * 2134 * If we find b_refs to be non-zero we can destroy the 2135 * buffer's contents but we cannot yet reuse the buffer. 2136 */ 2137 if (bp->b_refs) { 2138 bp->b_flags |= B_INVAL; 2139 brelse(bp); 2140 restart_reason = 6; 2141 restart_bp = bp; 2142 2143 goto restart; 2144 } 2145 2146 /* 2147 * We found our buffer! 2148 */ 2149 break; 2150 } 2151 2152 /* 2153 * If we exhausted our list, iterate other cpus. If that fails, 2154 * sleep as appropriate. We may have to wakeup various daemons 2155 * and write out some dirty buffers. 2156 * 2157 * Generally we are sleeping due to insufficient buffer space. 2158 * 2159 * NOTE: spin is held if bp is NULL, else it is not held. 2160 */ 2161 if (bp == NULL) { 2162 int flags; 2163 char *waitmsg; 2164 2165 spin_unlock(&pcpu->spin); 2166 2167 nqcpu = (nqcpu + 1) % ncpus; 2168 if (nqcpu != mycpu->gd_cpuid) { 2169 restart_reason = 7; 2170 restart_bp = bp; 2171 goto restart; 2172 } 2173 2174 if (bufspace >= hibufspace) { 2175 waitmsg = "bufspc"; 2176 flags = VFS_BIO_NEED_BUFSPACE; 2177 } else { 2178 waitmsg = "newbuf"; 2179 flags = VFS_BIO_NEED_ANY; 2180 } 2181 2182 bd_speedup(); /* heeeelp */ 2183 atomic_set_int(&needsbuffer, flags); 2184 while (needsbuffer & flags) { 2185 int value; 2186 2187 tsleep_interlock(&needsbuffer, 0); 2188 value = atomic_fetchadd_int(&needsbuffer, 0); 2189 if (value & flags) { 2190 if (tsleep(&needsbuffer, PINTERLOCKED|slpflags, 2191 waitmsg, slptimeo)) { 2192 return (NULL); 2193 } 2194 } 2195 } 2196 } else { 2197 /* 2198 * We finally have a valid bp. Reset b_data. 2199 * 2200 * (spin is not held) 2201 */ 2202 bp->b_data = bp->b_kvabase; 2203 } 2204 return(bp); 2205 } 2206 2207 /* 2208 * buf_daemon: 2209 * 2210 * Buffer flushing daemon. Buffers are normally flushed by the 2211 * update daemon but if it cannot keep up this process starts to 2212 * take the load in an attempt to prevent getnewbuf() from blocking. 2213 * 2214 * Once a flush is initiated it does not stop until the number 2215 * of buffers falls below lodirtybuffers, but we will wake up anyone 2216 * waiting at the mid-point. 2217 */ 2218 static struct kproc_desc buf_kp = { 2219 "bufdaemon", 2220 buf_daemon, 2221 &bufdaemon_td 2222 }; 2223 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, 2224 kproc_start, &buf_kp); 2225 2226 static struct kproc_desc bufhw_kp = { 2227 "bufdaemon_hw", 2228 buf_daemon_hw, 2229 &bufdaemonhw_td 2230 }; 2231 SYSINIT(bufdaemon_hw, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, 2232 kproc_start, &bufhw_kp); 2233 2234 static void 2235 buf_daemon1(struct thread *td, int queue, int (*buf_limit_fn)(long), 2236 int *bd_req) 2237 { 2238 long limit; 2239 struct buf *marker; 2240 2241 marker = kmalloc(sizeof(*marker), M_BIOBUF, M_WAITOK | M_ZERO); 2242 marker->b_flags |= B_MARKER; 2243 marker->b_qindex = BQUEUE_NONE; 2244 marker->b_qcpu = 0; 2245 2246 /* 2247 * This process needs to be suspended prior to shutdown sync. 2248 */ 2249 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, 2250 td, SHUTDOWN_PRI_LAST); 2251 curthread->td_flags |= TDF_SYSTHREAD; 2252 2253 /* 2254 * This process is allowed to take the buffer cache to the limit 2255 */ 2256 for (;;) { 2257 kproc_suspend_loop(); 2258 2259 /* 2260 * Do the flush as long as the number of dirty buffers 2261 * (including those running) exceeds lodirtybufspace. 2262 * 2263 * When flushing limit running I/O to hirunningspace 2264 * Do the flush. Limit the amount of in-transit I/O we 2265 * allow to build up, otherwise we would completely saturate 2266 * the I/O system. Wakeup any waiting processes before we 2267 * normally would so they can run in parallel with our drain. 2268 * 2269 * Our aggregate normal+HW lo water mark is lodirtybufspace, 2270 * but because we split the operation into two threads we 2271 * have to cut it in half for each thread. 2272 */ 2273 waitrunningbufspace(); 2274 limit = lodirtybufspace / 2; 2275 while (buf_limit_fn(limit)) { 2276 if (flushbufqueues(marker, queue) == 0) 2277 break; 2278 if (runningbufspace < hirunningspace) 2279 continue; 2280 waitrunningbufspace(); 2281 } 2282 2283 /* 2284 * We reached our low water mark, reset the 2285 * request and sleep until we are needed again. 2286 * The sleep is just so the suspend code works. 2287 */ 2288 tsleep_interlock(bd_req, 0); 2289 if (atomic_swap_int(bd_req, 0) == 0) 2290 tsleep(bd_req, PINTERLOCKED, "psleep", hz); 2291 } 2292 /* NOT REACHED */ 2293 /*kfree(marker, M_BIOBUF);*/ 2294 } 2295 2296 static int 2297 buf_daemon_limit(long limit) 2298 { 2299 return (runningbufspace + dirtykvaspace > limit || 2300 dirtybufcount - dirtybufcounthw >= nbuf / 2); 2301 } 2302 2303 static int 2304 buf_daemon_hw_limit(long limit) 2305 { 2306 return (runningbufspace + dirtykvaspace > limit || 2307 dirtybufcounthw >= nbuf / 2); 2308 } 2309 2310 static void 2311 buf_daemon(void) 2312 { 2313 buf_daemon1(bufdaemon_td, BQUEUE_DIRTY, buf_daemon_limit, 2314 &bd_request); 2315 } 2316 2317 static void 2318 buf_daemon_hw(void) 2319 { 2320 buf_daemon1(bufdaemonhw_td, BQUEUE_DIRTY_HW, buf_daemon_hw_limit, 2321 &bd_request_hw); 2322 } 2323 2324 /* 2325 * Flush up to (flushperqueue) buffers in the dirty queue. Each cpu has a 2326 * localized version of the queue. Each call made to this function iterates 2327 * to another cpu. It is desireable to flush several buffers from the same 2328 * cpu's queue at once, as these are likely going to be linear. 2329 * 2330 * We must be careful to free up B_INVAL buffers instead of write them, which 2331 * NFS is particularly sensitive to. 2332 * 2333 * B_RELBUF may only be set by VFSs. We do set B_AGE to indicate that we 2334 * really want to try to get the buffer out and reuse it due to the write 2335 * load on the machine. 2336 * 2337 * We must lock the buffer in order to check its validity before we can mess 2338 * with its contents. spin isn't enough. 2339 */ 2340 static int 2341 flushbufqueues(struct buf *marker, bufq_type_t q) 2342 { 2343 struct bufpcpu *pcpu; 2344 struct buf *bp; 2345 int r = 0; 2346 u_int loops = flushperqueue; 2347 int lcpu = marker->b_qcpu; 2348 2349 KKASSERT(marker->b_qindex == BQUEUE_NONE); 2350 KKASSERT(marker->b_flags & B_MARKER); 2351 2352 again: 2353 /* 2354 * Spinlock needed to perform operations on the queue and may be 2355 * held through a non-blocking BUF_LOCK(), but cannot be held when 2356 * BUF_UNLOCK()ing or through any other major operation. 2357 */ 2358 pcpu = &bufpcpu[marker->b_qcpu]; 2359 spin_lock(&pcpu->spin); 2360 marker->b_qindex = q; 2361 TAILQ_INSERT_HEAD(&pcpu->bufqueues[q], marker, b_freelist); 2362 bp = marker; 2363 2364 while ((bp = TAILQ_NEXT(bp, b_freelist)) != NULL) { 2365 /* 2366 * NOTE: spinlock is always held at the top of the loop 2367 */ 2368 if (bp->b_flags & B_MARKER) 2369 continue; 2370 if ((bp->b_flags & B_DELWRI) == 0) { 2371 kprintf("Unexpected clean buffer %p\n", bp); 2372 continue; 2373 } 2374 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) 2375 continue; 2376 KKASSERT(bp->b_qcpu == marker->b_qcpu && bp->b_qindex == q); 2377 2378 /* 2379 * Once the buffer is locked we will have no choice but to 2380 * unlock the spinlock around a later BUF_UNLOCK and re-set 2381 * bp = marker when looping. Move the marker now to make 2382 * things easier. 2383 */ 2384 TAILQ_REMOVE(&pcpu->bufqueues[q], marker, b_freelist); 2385 TAILQ_INSERT_AFTER(&pcpu->bufqueues[q], bp, marker, b_freelist); 2386 2387 /* 2388 * Must recheck B_DELWRI after successfully locking 2389 * the buffer. 2390 */ 2391 if ((bp->b_flags & B_DELWRI) == 0) { 2392 spin_unlock(&pcpu->spin); 2393 BUF_UNLOCK(bp); 2394 spin_lock(&pcpu->spin); 2395 bp = marker; 2396 continue; 2397 } 2398 2399 /* 2400 * Remove the buffer from its queue. We still own the 2401 * spinlock here. 2402 */ 2403 _bremfree(bp); 2404 2405 /* 2406 * Disposing of an invalid buffer counts as a flush op 2407 */ 2408 if (bp->b_flags & B_INVAL) { 2409 spin_unlock(&pcpu->spin); 2410 brelse(bp); 2411 goto doloop; 2412 } 2413 2414 /* 2415 * Release the spinlock for the more complex ops we 2416 * are now going to do. 2417 */ 2418 spin_unlock(&pcpu->spin); 2419 lwkt_yield(); 2420 2421 /* 2422 * This is a bit messy 2423 */ 2424 if (LIST_FIRST(&bp->b_dep) != NULL && 2425 (bp->b_flags & B_DEFERRED) == 0 && 2426 buf_countdeps(bp, 0)) { 2427 spin_lock(&pcpu->spin); 2428 TAILQ_INSERT_TAIL(&pcpu->bufqueues[q], bp, b_freelist); 2429 bp->b_qindex = q; 2430 bp->b_flags |= B_DEFERRED; 2431 spin_unlock(&pcpu->spin); 2432 BUF_UNLOCK(bp); 2433 spin_lock(&pcpu->spin); 2434 bp = marker; 2435 continue; 2436 } 2437 2438 /* 2439 * spinlock not held here. 2440 * 2441 * If the buffer has a dependancy, buf_checkwrite() must 2442 * also return 0 for us to be able to initate the write. 2443 * 2444 * If the buffer is flagged B_ERROR it may be requeued 2445 * over and over again, we try to avoid a live lock. 2446 */ 2447 if (LIST_FIRST(&bp->b_dep) != NULL && buf_checkwrite(bp)) { 2448 brelse(bp); 2449 } else if (bp->b_flags & B_ERROR) { 2450 tsleep(bp, 0, "bioer", 1); 2451 bp->b_flags &= ~B_AGE; 2452 cluster_awrite(bp); 2453 } else { 2454 bp->b_flags |= B_AGE | B_KVABIO; 2455 cluster_awrite(bp); 2456 } 2457 /* bp invalid but needs to be NULL-tested if we break out */ 2458 doloop: 2459 spin_lock(&pcpu->spin); 2460 ++r; 2461 if (--loops == 0) 2462 break; 2463 bp = marker; 2464 } 2465 /* bp is invalid here but can be NULL-tested to advance */ 2466 2467 TAILQ_REMOVE(&pcpu->bufqueues[q], marker, b_freelist); 2468 marker->b_qindex = BQUEUE_NONE; 2469 spin_unlock(&pcpu->spin); 2470 2471 /* 2472 * Advance the marker to be fair. 2473 */ 2474 marker->b_qcpu = (marker->b_qcpu + 1) % ncpus; 2475 if (bp == NULL) { 2476 if (marker->b_qcpu != lcpu) 2477 goto again; 2478 } 2479 2480 return (r); 2481 } 2482 2483 /* 2484 * inmem: 2485 * 2486 * Returns true if no I/O is needed to access the associated VM object. 2487 * This is like findblk except it also hunts around in the VM system for 2488 * the data. 2489 * 2490 * Note that we ignore vm_page_free() races from interrupts against our 2491 * lookup, since if the caller is not protected our return value will not 2492 * be any more valid then otherwise once we exit the critical section. 2493 */ 2494 int 2495 inmem(struct vnode *vp, off_t loffset) 2496 { 2497 vm_object_t obj; 2498 vm_offset_t toff, tinc, size; 2499 vm_page_t m; 2500 int res = 1; 2501 2502 if (findblk(vp, loffset, FINDBLK_TEST)) 2503 return 1; 2504 if (vp->v_mount == NULL) 2505 return 0; 2506 if ((obj = vp->v_object) == NULL) 2507 return 0; 2508 2509 size = PAGE_SIZE; 2510 if (size > vp->v_mount->mnt_stat.f_iosize) 2511 size = vp->v_mount->mnt_stat.f_iosize; 2512 2513 vm_object_hold(obj); 2514 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 2515 m = vm_page_lookup(obj, OFF_TO_IDX(loffset + toff)); 2516 if (m == NULL) { 2517 res = 0; 2518 break; 2519 } 2520 tinc = size; 2521 if (tinc > PAGE_SIZE - ((toff + loffset) & PAGE_MASK)) 2522 tinc = PAGE_SIZE - ((toff + loffset) & PAGE_MASK); 2523 if (vm_page_is_valid(m, 2524 (vm_offset_t) ((toff + loffset) & PAGE_MASK), tinc) == 0) { 2525 res = 0; 2526 break; 2527 } 2528 } 2529 vm_object_drop(obj); 2530 return (res); 2531 } 2532 2533 /* 2534 * findblk: 2535 * 2536 * Locate and return the specified buffer. Unless flagged otherwise, 2537 * a locked buffer will be returned if it exists or NULL if it does not. 2538 * 2539 * findblk()'d buffers are still on the bufqueues and if you intend 2540 * to use your (locked NON-TEST) buffer you need to bremfree(bp) 2541 * and possibly do other stuff to it. 2542 * 2543 * FINDBLK_TEST - Do not lock the buffer. The caller is responsible 2544 * for locking the buffer and ensuring that it remains 2545 * the desired buffer after locking. 2546 * 2547 * FINDBLK_NBLOCK - Lock the buffer non-blocking. If we are unable 2548 * to acquire the lock we return NULL, even if the 2549 * buffer exists. 2550 * 2551 * FINDBLK_REF - Returns the buffer ref'd, which prevents normal 2552 * reuse by getnewbuf() but does not prevent 2553 * disassociation (B_INVAL). Used to avoid deadlocks 2554 * against random (vp,loffset)s due to reassignment. 2555 * 2556 * FINDBLK_KVABIO - Only applicable when returning a locked buffer. 2557 * Indicates that the caller supports B_KVABIO. 2558 * 2559 * (0) - Lock the buffer blocking. 2560 */ 2561 struct buf * 2562 findblk(struct vnode *vp, off_t loffset, int flags) 2563 { 2564 struct buf *bp; 2565 int lkflags; 2566 2567 lkflags = LK_EXCLUSIVE; 2568 if (flags & FINDBLK_NBLOCK) 2569 lkflags |= LK_NOWAIT; 2570 2571 for (;;) { 2572 /* 2573 * Lookup. Ref the buf while holding v_token to prevent 2574 * reuse (but does not prevent diassociation). 2575 */ 2576 lwkt_gettoken_shared(&vp->v_token); 2577 bp = buf_rb_hash_RB_LOOKUP(&vp->v_rbhash_tree, loffset); 2578 if (bp == NULL) { 2579 lwkt_reltoken(&vp->v_token); 2580 return(NULL); 2581 } 2582 bqhold(bp); 2583 lwkt_reltoken(&vp->v_token); 2584 2585 /* 2586 * If testing only break and return bp, do not lock. 2587 */ 2588 if (flags & FINDBLK_TEST) 2589 break; 2590 2591 /* 2592 * Lock the buffer, return an error if the lock fails. 2593 * (only FINDBLK_NBLOCK can cause the lock to fail). 2594 */ 2595 if (BUF_LOCK(bp, lkflags)) { 2596 atomic_subtract_int(&bp->b_refs, 1); 2597 /* bp = NULL; not needed */ 2598 return(NULL); 2599 } 2600 2601 /* 2602 * Revalidate the locked buf before allowing it to be 2603 * returned. 2604 * 2605 * B_KVABIO is only set/cleared when locking. When 2606 * clearing B_KVABIO, we must ensure that the buffer 2607 * is synchronized to all cpus. 2608 */ 2609 if (bp->b_vp == vp && bp->b_loffset == loffset) { 2610 if (flags & FINDBLK_KVABIO) 2611 bp->b_flags |= B_KVABIO; 2612 else 2613 bkvasync_all(bp); 2614 break; 2615 } 2616 atomic_subtract_int(&bp->b_refs, 1); 2617 BUF_UNLOCK(bp); 2618 } 2619 2620 /* 2621 * Success 2622 */ 2623 if ((flags & FINDBLK_REF) == 0) 2624 atomic_subtract_int(&bp->b_refs, 1); 2625 return(bp); 2626 } 2627 2628 /* 2629 * getcacheblk: 2630 * 2631 * Similar to getblk() except only returns the buffer if it is 2632 * B_CACHE and requires no other manipulation. Otherwise NULL 2633 * is returned. NULL is also returned if GETBLK_NOWAIT is set 2634 * and the getblk() would block. 2635 * 2636 * If B_RAM is set the buffer might be just fine, but we return 2637 * NULL anyway because we want the code to fall through to the 2638 * cluster read to issue more read-aheads. Otherwise read-ahead breaks. 2639 * 2640 * If blksize is 0 the buffer cache buffer must already be fully 2641 * cached. 2642 * 2643 * If blksize is non-zero getblk() will be used, allowing a buffer 2644 * to be reinstantiated from its VM backing store. The buffer must 2645 * still be fully cached after reinstantiation to be returned. 2646 */ 2647 struct buf * 2648 getcacheblk(struct vnode *vp, off_t loffset, int blksize, int blkflags) 2649 { 2650 struct buf *bp; 2651 int fndflags = 0; 2652 2653 if (blkflags & GETBLK_NOWAIT) 2654 fndflags |= FINDBLK_NBLOCK; 2655 if (blkflags & GETBLK_KVABIO) 2656 fndflags |= FINDBLK_KVABIO; 2657 2658 if (blksize) { 2659 bp = getblk(vp, loffset, blksize, blkflags, 0); 2660 if (bp) { 2661 if ((bp->b_flags & (B_INVAL | B_CACHE)) == B_CACHE) { 2662 bp->b_flags &= ~B_AGE; 2663 if (bp->b_flags & B_RAM) { 2664 bqrelse(bp); 2665 bp = NULL; 2666 } 2667 } else { 2668 brelse(bp); 2669 bp = NULL; 2670 } 2671 } 2672 } else { 2673 bp = findblk(vp, loffset, fndflags); 2674 if (bp) { 2675 if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) == 2676 B_CACHE) { 2677 bp->b_flags &= ~B_AGE; 2678 bremfree(bp); 2679 } else { 2680 BUF_UNLOCK(bp); 2681 bp = NULL; 2682 } 2683 } 2684 } 2685 return (bp); 2686 } 2687 2688 /* 2689 * getblk: 2690 * 2691 * Get a block given a specified block and offset into a file/device. 2692 * B_INVAL may or may not be set on return. The caller should clear 2693 * B_INVAL prior to initiating a READ. 2694 * 2695 * IT IS IMPORTANT TO UNDERSTAND THAT IF YOU CALL GETBLK() AND B_CACHE 2696 * IS NOT SET, YOU MUST INITIALIZE THE RETURNED BUFFER, ISSUE A READ, 2697 * OR SET B_INVAL BEFORE RETIRING IT. If you retire a getblk'd buffer 2698 * without doing any of those things the system will likely believe 2699 * the buffer to be valid (especially if it is not B_VMIO), and the 2700 * next getblk() will return the buffer with B_CACHE set. 2701 * 2702 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 2703 * an existing buffer. 2704 * 2705 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 2706 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 2707 * and then cleared based on the backing VM. If the previous buffer is 2708 * non-0-sized but invalid, B_CACHE will be cleared. 2709 * 2710 * If getblk() must create a new buffer, the new buffer is returned with 2711 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 2712 * case it is returned with B_INVAL clear and B_CACHE set based on the 2713 * backing VM. 2714 * 2715 * getblk() also forces a bwrite() for any B_DELWRI buffer whos 2716 * B_CACHE bit is clear. 2717 * 2718 * What this means, basically, is that the caller should use B_CACHE to 2719 * determine whether the buffer is fully valid or not and should clear 2720 * B_INVAL prior to issuing a read. If the caller intends to validate 2721 * the buffer by loading its data area with something, the caller needs 2722 * to clear B_INVAL. If the caller does this without issuing an I/O, 2723 * the caller should set B_CACHE ( as an optimization ), else the caller 2724 * should issue the I/O and biodone() will set B_CACHE if the I/O was 2725 * a write attempt or if it was a successfull read. If the caller 2726 * intends to issue a READ, the caller must clear B_INVAL and B_ERROR 2727 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 2728 * 2729 * getblk flags: 2730 * 2731 * GETBLK_PCATCH - catch signal if blocked, can cause NULL return 2732 * GETBLK_BHEAVY - heavy-weight buffer cache buffer 2733 */ 2734 struct buf * 2735 getblk(struct vnode *vp, off_t loffset, int size, int blkflags, int slptimeo) 2736 { 2737 struct buf *bp; 2738 int slpflags = (blkflags & GETBLK_PCATCH) ? PCATCH : 0; 2739 int error; 2740 int lkflags; 2741 2742 if (size > MAXBSIZE) 2743 panic("getblk: size(%d) > MAXBSIZE(%d)", size, MAXBSIZE); 2744 if (vp->v_object == NULL) 2745 panic("getblk: vnode %p has no object!", vp); 2746 2747 /* 2748 * NOTE: findblk does not try to resolve KVABIO in REF-only mode. 2749 * we still have to handle that ourselves. 2750 */ 2751 loop: 2752 if ((bp = findblk(vp, loffset, FINDBLK_REF | FINDBLK_TEST)) != NULL) { 2753 /* 2754 * The buffer was found in the cache, but we need to lock it. 2755 * We must acquire a ref on the bp to prevent reuse, but 2756 * this will not prevent disassociation (brelvp()) so we 2757 * must recheck (vp,loffset) after acquiring the lock. 2758 * 2759 * Without the ref the buffer could potentially be reused 2760 * before we acquire the lock and create a deadlock 2761 * situation between the thread trying to reuse the buffer 2762 * and us due to the fact that we would wind up blocking 2763 * on a random (vp,loffset). 2764 */ 2765 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 2766 if (blkflags & GETBLK_NOWAIT) { 2767 bqdrop(bp); 2768 return(NULL); 2769 } 2770 lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 2771 if (blkflags & GETBLK_PCATCH) 2772 lkflags |= LK_PCATCH; 2773 error = BUF_TIMELOCK(bp, lkflags, "getblk", slptimeo); 2774 if (error) { 2775 bqdrop(bp); 2776 if (error == ENOLCK) 2777 goto loop; 2778 return (NULL); 2779 } 2780 /* buffer may have changed on us */ 2781 } 2782 bqdrop(bp); 2783 2784 /* 2785 * Once the buffer has been locked, make sure we didn't race 2786 * a buffer recyclement. Buffers that are no longer hashed 2787 * will have b_vp == NULL, so this takes care of that check 2788 * as well. 2789 */ 2790 if (bp->b_vp != vp || bp->b_loffset != loffset) { 2791 #if 0 2792 kprintf("Warning buffer %p (vp %p loffset %lld) " 2793 "was recycled\n", 2794 bp, vp, (long long)loffset); 2795 #endif 2796 BUF_UNLOCK(bp); 2797 goto loop; 2798 } 2799 2800 /* 2801 * If SZMATCH any pre-existing buffer must be of the requested 2802 * size or NULL is returned. The caller absolutely does not 2803 * want getblk() to bwrite() the buffer on a size mismatch. 2804 */ 2805 if ((blkflags & GETBLK_SZMATCH) && size != bp->b_bcount) { 2806 BUF_UNLOCK(bp); 2807 return(NULL); 2808 } 2809 2810 /* 2811 * All vnode-based buffers must be backed by a VM object. 2812 * 2813 * Set B_KVABIO for any incidental work, we will fix it 2814 * up later. 2815 */ 2816 KKASSERT(bp->b_flags & B_VMIO); 2817 KKASSERT(bp->b_cmd == BUF_CMD_DONE); 2818 bp->b_flags &= ~B_AGE; 2819 bp->b_flags |= B_KVABIO; 2820 2821 /* 2822 * Make sure that B_INVAL buffers do not have a cached 2823 * block number translation. 2824 */ 2825 if ((bp->b_flags & B_INVAL) && 2826 (bp->b_bio2.bio_offset != NOOFFSET)) { 2827 kprintf("Warning invalid buffer %p (vp %p loffset %lld)" 2828 " did not have cleared bio_offset cache\n", 2829 bp, vp, (long long)loffset); 2830 clearbiocache(&bp->b_bio2); 2831 } 2832 2833 /* 2834 * The buffer is locked. B_CACHE is cleared if the buffer is 2835 * invalid. 2836 * 2837 * After the bremfree(), disposals must use b[q]relse(). 2838 */ 2839 if (bp->b_flags & B_INVAL) 2840 bp->b_flags &= ~B_CACHE; 2841 bremfree(bp); 2842 2843 /* 2844 * Any size inconsistancy with a dirty buffer or a buffer 2845 * with a softupdates dependancy must be resolved. Resizing 2846 * the buffer in such circumstances can lead to problems. 2847 * 2848 * Dirty or dependant buffers are written synchronously. 2849 * Other types of buffers are simply released and 2850 * reconstituted as they may be backed by valid, dirty VM 2851 * pages (but not marked B_DELWRI). 2852 * 2853 * NFS NOTE: NFS buffers which straddle EOF are oddly-sized 2854 * and may be left over from a prior truncation (and thus 2855 * no longer represent the actual EOF point), so we 2856 * definitely do not want to B_NOCACHE the backing store. 2857 */ 2858 if (size != bp->b_bcount) { 2859 if (bp->b_flags & B_DELWRI) { 2860 bp->b_flags |= B_RELBUF; 2861 bwrite(bp); 2862 } else if (LIST_FIRST(&bp->b_dep)) { 2863 bp->b_flags |= B_RELBUF; 2864 bwrite(bp); 2865 } else { 2866 bp->b_flags |= B_RELBUF; 2867 brelse(bp); 2868 } 2869 goto loop; 2870 } 2871 KKASSERT(size <= bp->b_kvasize); 2872 KASSERT(bp->b_loffset != NOOFFSET, 2873 ("getblk: no buffer offset")); 2874 2875 /* 2876 * A buffer with B_DELWRI set and B_CACHE clear must 2877 * be committed before we can return the buffer in 2878 * order to prevent the caller from issuing a read 2879 * ( due to B_CACHE not being set ) and overwriting 2880 * it. 2881 * 2882 * Most callers, including NFS and FFS, need this to 2883 * operate properly either because they assume they 2884 * can issue a read if B_CACHE is not set, or because 2885 * ( for example ) an uncached B_DELWRI might loop due 2886 * to softupdates re-dirtying the buffer. In the latter 2887 * case, B_CACHE is set after the first write completes, 2888 * preventing further loops. 2889 * 2890 * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE 2891 * above while extending the buffer, we cannot allow the 2892 * buffer to remain with B_CACHE set after the write 2893 * completes or it will represent a corrupt state. To 2894 * deal with this we set B_NOCACHE to scrap the buffer 2895 * after the write. 2896 * 2897 * XXX Should this be B_RELBUF instead of B_NOCACHE? 2898 * I'm not even sure this state is still possible 2899 * now that getblk() writes out any dirty buffers 2900 * on size changes. 2901 * 2902 * We might be able to do something fancy, like setting 2903 * B_CACHE in bwrite() except if B_DELWRI is already set, 2904 * so the below call doesn't set B_CACHE, but that gets real 2905 * confusing. This is much easier. 2906 */ 2907 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 2908 kprintf("getblk: Warning, bp %p loff=%jx DELWRI set " 2909 "and CACHE clear, b_flags %08x\n", 2910 bp, (uintmax_t)bp->b_loffset, bp->b_flags); 2911 bp->b_flags |= B_NOCACHE; 2912 bwrite(bp); 2913 goto loop; 2914 } 2915 } else { 2916 /* 2917 * Buffer is not in-core, create new buffer. The buffer 2918 * returned by getnewbuf() is locked. Note that the returned 2919 * buffer is also considered valid (not marked B_INVAL). 2920 * 2921 * Calculating the offset for the I/O requires figuring out 2922 * the block size. We use DEV_BSIZE for VBLK or VCHR and 2923 * the mount's f_iosize otherwise. If the vnode does not 2924 * have an associated mount we assume that the passed size is 2925 * the block size. 2926 * 2927 * Note that vn_isdisk() cannot be used here since it may 2928 * return a failure for numerous reasons. Note that the 2929 * buffer size may be larger then the block size (the caller 2930 * will use block numbers with the proper multiple). Beware 2931 * of using any v_* fields which are part of unions. In 2932 * particular, in DragonFly the mount point overloading 2933 * mechanism uses the namecache only and the underlying 2934 * directory vnode is not a special case. 2935 */ 2936 int bsize, maxsize; 2937 2938 if (vp->v_type == VBLK || vp->v_type == VCHR) 2939 bsize = DEV_BSIZE; 2940 else if (vp->v_mount) 2941 bsize = vp->v_mount->mnt_stat.f_iosize; 2942 else 2943 bsize = size; 2944 2945 maxsize = size + (loffset & PAGE_MASK); 2946 maxsize = imax(maxsize, bsize); 2947 2948 bp = getnewbuf(blkflags, slptimeo, size, maxsize); 2949 if (bp == NULL) { 2950 if (slpflags || slptimeo) 2951 return NULL; 2952 goto loop; 2953 } 2954 2955 /* 2956 * Atomically insert the buffer into the hash, so that it can 2957 * be found by findblk(). 2958 * 2959 * If bgetvp() returns non-zero a collision occured, and the 2960 * bp will not be associated with the vnode. 2961 * 2962 * Make sure the translation layer has been cleared. 2963 */ 2964 bp->b_loffset = loffset; 2965 bp->b_bio2.bio_offset = NOOFFSET; 2966 /* bp->b_bio2.bio_next = NULL; */ 2967 2968 if (bgetvp(vp, bp, size)) { 2969 bp->b_flags |= B_INVAL; 2970 brelse(bp); 2971 goto loop; 2972 } 2973 2974 /* 2975 * All vnode-based buffers must be backed by a VM object. 2976 * 2977 * Set B_KVABIO for incidental work 2978 */ 2979 KKASSERT(vp->v_object != NULL); 2980 bp->b_flags |= B_VMIO | B_KVABIO; 2981 KKASSERT(bp->b_cmd == BUF_CMD_DONE); 2982 2983 allocbuf(bp, size); 2984 } 2985 2986 /* 2987 * Do the nasty smp broadcast (if the buffer needs it) when KVABIO 2988 * is not supported. 2989 */ 2990 if (bp && (blkflags & GETBLK_KVABIO) == 0) { 2991 bkvasync_all(bp); 2992 } 2993 return (bp); 2994 } 2995 2996 /* 2997 * regetblk(bp) 2998 * 2999 * Reacquire a buffer that was previously released to the locked queue, 3000 * or reacquire a buffer which is interlocked by having bioops->io_deallocate 3001 * set B_LOCKED (which handles the acquisition race). 3002 * 3003 * To this end, either B_LOCKED must be set or the dependancy list must be 3004 * non-empty. 3005 */ 3006 void 3007 regetblk(struct buf *bp) 3008 { 3009 KKASSERT((bp->b_flags & B_LOCKED) || LIST_FIRST(&bp->b_dep) != NULL); 3010 BUF_LOCK(bp, LK_EXCLUSIVE | LK_RETRY); 3011 bremfree(bp); 3012 } 3013 3014 /* 3015 * allocbuf: 3016 * 3017 * This code constitutes the buffer memory from either anonymous system 3018 * memory (in the case of non-VMIO operations) or from an associated 3019 * VM object (in the case of VMIO operations). This code is able to 3020 * resize a buffer up or down. 3021 * 3022 * Note that this code is tricky, and has many complications to resolve 3023 * deadlock or inconsistant data situations. Tread lightly!!! 3024 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 3025 * the caller. Calling this code willy nilly can result in the loss of 3026 * data. 3027 * 3028 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 3029 * B_CACHE for the non-VMIO case. 3030 * 3031 * This routine does not need to be called from a critical section but you 3032 * must own the buffer. 3033 */ 3034 void 3035 allocbuf(struct buf *bp, int size) 3036 { 3037 vm_page_t m; 3038 int newbsize; 3039 int desiredpages; 3040 int i; 3041 3042 if (BUF_LOCKINUSE(bp) == 0) 3043 panic("allocbuf: buffer not busy"); 3044 3045 if (bp->b_kvasize < size) 3046 panic("allocbuf: buffer too small"); 3047 3048 KKASSERT(bp->b_flags & B_VMIO); 3049 3050 newbsize = roundup2(size, DEV_BSIZE); 3051 desiredpages = ((int)(bp->b_loffset & PAGE_MASK) + 3052 newbsize + PAGE_MASK) >> PAGE_SHIFT; 3053 KKASSERT(desiredpages <= XIO_INTERNAL_PAGES); 3054 3055 /* 3056 * Set B_CACHE initially if buffer is 0 length or will become 3057 * 0-length. 3058 */ 3059 if (size == 0 || bp->b_bufsize == 0) 3060 bp->b_flags |= B_CACHE; 3061 3062 if (newbsize < bp->b_bufsize) { 3063 /* 3064 * DEV_BSIZE aligned new buffer size is less then the 3065 * DEV_BSIZE aligned existing buffer size. Figure out 3066 * if we have to remove any pages. 3067 */ 3068 if (desiredpages < bp->b_xio.xio_npages) { 3069 for (i = desiredpages; i < bp->b_xio.xio_npages; i++) { 3070 /* 3071 * the page is not freed here -- it 3072 * is the responsibility of 3073 * vnode_pager_setsize 3074 */ 3075 m = bp->b_xio.xio_pages[i]; 3076 KASSERT(m != bogus_page, 3077 ("allocbuf: bogus page found")); 3078 vm_page_busy_wait(m, TRUE, "biodep"); 3079 bp->b_xio.xio_pages[i] = NULL; 3080 vm_page_unwire(m, 0); 3081 vm_page_wakeup(m); 3082 } 3083 pmap_qremove_noinval((vm_offset_t) 3084 trunc_page((vm_offset_t)bp->b_data) + 3085 (desiredpages << PAGE_SHIFT), 3086 (bp->b_xio.xio_npages - desiredpages)); 3087 bp->b_xio.xio_npages = desiredpages; 3088 3089 /* 3090 * Don't bother invalidating the pmap changes 3091 * (which wastes global SMP invalidation IPIs) 3092 * when setting the size to 0. This case occurs 3093 * when called via getnewbuf() during buffer 3094 * recyclement. 3095 */ 3096 if (desiredpages == 0) { 3097 CPUMASK_ASSZERO(bp->b_cpumask); 3098 } else { 3099 bkvareset(bp); 3100 } 3101 } 3102 } else if (size > bp->b_bcount) { 3103 /* 3104 * We are growing the buffer, possibly in a 3105 * byte-granular fashion. 3106 */ 3107 struct vnode *vp; 3108 vm_object_t obj; 3109 vm_offset_t toff; 3110 vm_offset_t tinc; 3111 3112 /* 3113 * Step 1, bring in the VM pages from the object, 3114 * allocating them if necessary. We must clear 3115 * B_CACHE if these pages are not valid for the 3116 * range covered by the buffer. 3117 */ 3118 vp = bp->b_vp; 3119 obj = vp->v_object; 3120 3121 vm_object_hold(obj); 3122 while (bp->b_xio.xio_npages < desiredpages) { 3123 vm_page_t m; 3124 vm_pindex_t pi; 3125 int error; 3126 3127 pi = OFF_TO_IDX(bp->b_loffset) + 3128 bp->b_xio.xio_npages; 3129 3130 /* 3131 * Blocking on m->busy_count might lead to a 3132 * deadlock: 3133 * 3134 * vm_fault->getpages->cluster_read->allocbuf 3135 */ 3136 m = vm_page_lookup_busy_try(obj, pi, FALSE, 3137 &error); 3138 if (error) { 3139 vm_page_sleep_busy(m, FALSE, "pgtblk"); 3140 continue; 3141 } 3142 if (m == NULL) { 3143 /* 3144 * note: must allocate system pages 3145 * since blocking here could intefere 3146 * with paging I/O, no matter which 3147 * process we are. 3148 */ 3149 m = bio_page_alloc(bp, obj, pi, 3150 desiredpages - 3151 bp->b_xio.xio_npages); 3152 if (m) { 3153 vm_page_wire(m); 3154 vm_page_wakeup(m); 3155 bp->b_flags &= ~B_CACHE; 3156 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; 3157 ++bp->b_xio.xio_npages; 3158 } 3159 continue; 3160 } 3161 3162 /* 3163 * We found a page and were able to busy it. 3164 */ 3165 vm_page_wire(m); 3166 vm_page_wakeup(m); 3167 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; 3168 ++bp->b_xio.xio_npages; 3169 if (bp->b_act_count < m->act_count) 3170 bp->b_act_count = m->act_count; 3171 } 3172 vm_object_drop(obj); 3173 3174 /* 3175 * Step 2. We've loaded the pages into the buffer, 3176 * we have to figure out if we can still have B_CACHE 3177 * set. Note that B_CACHE is set according to the 3178 * byte-granular range ( bcount and size ), not the 3179 * aligned range ( newbsize ). 3180 * 3181 * The VM test is against m->valid, which is DEV_BSIZE 3182 * aligned. Needless to say, the validity of the data 3183 * needs to also be DEV_BSIZE aligned. Note that this 3184 * fails with NFS if the server or some other client 3185 * extends the file's EOF. If our buffer is resized, 3186 * B_CACHE may remain set! XXX 3187 */ 3188 3189 toff = bp->b_bcount; 3190 tinc = PAGE_SIZE - ((bp->b_loffset + toff) & PAGE_MASK); 3191 3192 while ((bp->b_flags & B_CACHE) && toff < size) { 3193 vm_pindex_t pi; 3194 3195 if (tinc > (size - toff)) 3196 tinc = size - toff; 3197 3198 pi = ((bp->b_loffset & PAGE_MASK) + toff) >> 3199 PAGE_SHIFT; 3200 3201 vfs_buf_test_cache( 3202 bp, 3203 bp->b_loffset, 3204 toff, 3205 tinc, 3206 bp->b_xio.xio_pages[pi] 3207 ); 3208 toff += tinc; 3209 tinc = PAGE_SIZE; 3210 } 3211 3212 /* 3213 * Step 3, fixup the KVM pmap. Remember that 3214 * bp->b_data is relative to bp->b_loffset, but 3215 * bp->b_loffset may be offset into the first page. 3216 */ 3217 bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data); 3218 pmap_qenter_noinval((vm_offset_t)bp->b_data, 3219 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 3220 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 3221 (vm_offset_t)(bp->b_loffset & PAGE_MASK)); 3222 bkvareset(bp); 3223 } 3224 atomic_add_long(&bufspace, newbsize - bp->b_bufsize); 3225 3226 /* adjust space use on already-dirty buffer */ 3227 if (bp->b_flags & B_DELWRI) { 3228 /* dirtykvaspace unchanged */ 3229 atomic_add_long(&dirtybufspace, newbsize - bp->b_bufsize); 3230 if (bp->b_flags & B_HEAVY) { 3231 atomic_add_long(&dirtybufspacehw, 3232 newbsize - bp->b_bufsize); 3233 } 3234 } 3235 bp->b_bufsize = newbsize; /* actual buffer allocation */ 3236 bp->b_bcount = size; /* requested buffer size */ 3237 bufspacewakeup(); 3238 } 3239 3240 /* 3241 * biowait: 3242 * 3243 * Wait for buffer I/O completion, returning error status. B_EINTR 3244 * is converted into an EINTR error but not cleared (since a chain 3245 * of biowait() calls may occur). 3246 * 3247 * On return bpdone() will have been called but the buffer will remain 3248 * locked and will not have been brelse()'d. 3249 * 3250 * NOTE! If a timeout is specified and ETIMEDOUT occurs the I/O is 3251 * likely still in progress on return. 3252 * 3253 * NOTE! This operation is on a BIO, not a BUF. 3254 * 3255 * NOTE! BIO_DONE is cleared by vn_strategy() 3256 */ 3257 static __inline int 3258 _biowait(struct bio *bio, const char *wmesg, int to) 3259 { 3260 struct buf *bp = bio->bio_buf; 3261 u_int32_t flags; 3262 u_int32_t nflags; 3263 int error; 3264 3265 KKASSERT(bio == &bp->b_bio1); 3266 for (;;) { 3267 flags = bio->bio_flags; 3268 if (flags & BIO_DONE) 3269 break; 3270 nflags = flags | BIO_WANT; 3271 tsleep_interlock(bio, 0); 3272 if (atomic_cmpset_int(&bio->bio_flags, flags, nflags)) { 3273 if (wmesg) 3274 error = tsleep(bio, PINTERLOCKED, wmesg, to); 3275 else if (bp->b_cmd == BUF_CMD_READ) 3276 error = tsleep(bio, PINTERLOCKED, "biord", to); 3277 else 3278 error = tsleep(bio, PINTERLOCKED, "biowr", to); 3279 if (error) { 3280 kprintf("tsleep error biowait %d\n", error); 3281 return (error); 3282 } 3283 } 3284 } 3285 3286 /* 3287 * Finish up. 3288 */ 3289 KKASSERT(bp->b_cmd == BUF_CMD_DONE); 3290 bio->bio_flags &= ~(BIO_DONE | BIO_SYNC); 3291 if (bp->b_flags & B_EINTR) 3292 return (EINTR); 3293 if (bp->b_flags & B_ERROR) 3294 return (bp->b_error ? bp->b_error : EIO); 3295 return (0); 3296 } 3297 3298 int 3299 biowait(struct bio *bio, const char *wmesg) 3300 { 3301 return(_biowait(bio, wmesg, 0)); 3302 } 3303 3304 int 3305 biowait_timeout(struct bio *bio, const char *wmesg, int to) 3306 { 3307 return(_biowait(bio, wmesg, to)); 3308 } 3309 3310 /* 3311 * This associates a tracking count with an I/O. vn_strategy() and 3312 * dev_dstrategy() do this automatically but there are a few cases 3313 * where a vnode or device layer is bypassed when a block translation 3314 * is cached. In such cases bio_start_transaction() may be called on 3315 * the bypassed layers so the system gets an I/O in progress indication 3316 * for those higher layers. 3317 */ 3318 void 3319 bio_start_transaction(struct bio *bio, struct bio_track *track) 3320 { 3321 bio->bio_track = track; 3322 bio_track_ref(track); 3323 dsched_buf_enter(bio->bio_buf); /* might stack */ 3324 } 3325 3326 /* 3327 * Initiate I/O on a vnode. 3328 * 3329 * SWAPCACHE OPERATION: 3330 * 3331 * Real buffer cache buffers have a non-NULL bp->b_vp. Unfortunately 3332 * devfs also uses b_vp for fake buffers so we also have to check 3333 * that B_PAGING is 0. In this case the passed 'vp' is probably the 3334 * underlying block device. The swap assignments are related to the 3335 * buffer cache buffer's b_vp, not the passed vp. 3336 * 3337 * The passed vp == bp->b_vp only in the case where the strategy call 3338 * is made on the vp itself for its own buffers (a regular file or 3339 * block device vp). The filesystem usually then re-calls vn_strategy() 3340 * after translating the request to an underlying device. 3341 * 3342 * Cluster buffers set B_CLUSTER and the passed vp is the vp of the 3343 * underlying buffer cache buffers. 3344 * 3345 * We can only deal with page-aligned buffers at the moment, because 3346 * we can't tell what the real dirty state for pages straddling a buffer 3347 * are. 3348 * 3349 * In order to call swap_pager_strategy() we must provide the VM object 3350 * and base offset for the underlying buffer cache pages so it can find 3351 * the swap blocks. 3352 */ 3353 void 3354 vn_strategy(struct vnode *vp, struct bio *bio) 3355 { 3356 struct bio_track *track; 3357 struct buf *bp = bio->bio_buf; 3358 3359 KKASSERT(bp->b_cmd != BUF_CMD_DONE); 3360 3361 /* 3362 * Set when an I/O is issued on the bp. Cleared by consumers 3363 * (aka HAMMER), allowing the consumer to determine if I/O had 3364 * actually occurred. 3365 */ 3366 bp->b_flags |= B_IOISSUED; 3367 3368 /* 3369 * Handle the swapcache intercept. 3370 * 3371 * NOTE: The swapcache itself always supports KVABIO and will 3372 * do the right thing if its underlying devices do not. 3373 */ 3374 if (vn_cache_strategy(vp, bio)) 3375 return; 3376 3377 /* 3378 * If the vnode does not support KVABIO and the buffer is using 3379 * KVABIO, we must synchronize b_data to all cpus before dispatching. 3380 */ 3381 if ((vp->v_flag & VKVABIO) == 0 && (bp->b_flags & B_KVABIO)) 3382 bkvasync_all(bp); 3383 3384 /* 3385 * Otherwise do the operation through the filesystem 3386 */ 3387 if (bp->b_cmd == BUF_CMD_READ) 3388 track = &vp->v_track_read; 3389 else 3390 track = &vp->v_track_write; 3391 KKASSERT((bio->bio_flags & BIO_DONE) == 0); 3392 bio->bio_track = track; 3393 bio_track_ref(track); 3394 dsched_buf_enter(bp); /* might stack */ 3395 vop_strategy(*vp->v_ops, vp, bio); 3396 } 3397 3398 /* 3399 * vn_cache_strategy() 3400 * 3401 * NOTE: This function supports the KVABIO API wherein b_data might not 3402 * be synchronized to the current cpu. 3403 */ 3404 static void vn_cache_strategy_callback(struct bio *bio); 3405 3406 int 3407 vn_cache_strategy(struct vnode *vp, struct bio *bio) 3408 { 3409 struct buf *bp = bio->bio_buf; 3410 struct bio *nbio; 3411 vm_object_t object; 3412 vm_page_t m; 3413 int i; 3414 3415 /* 3416 * Stop using swapcache if paniced, dumping, or dumped 3417 */ 3418 if (panicstr || dumping) 3419 return(0); 3420 3421 /* 3422 * Is this buffer cache buffer suitable for reading from 3423 * the swap cache? 3424 */ 3425 if (vm_swapcache_read_enable == 0 || 3426 bp->b_cmd != BUF_CMD_READ || 3427 ((bp->b_flags & B_CLUSTER) == 0 && 3428 (bp->b_vp == NULL || (bp->b_flags & B_PAGING))) || 3429 ((int)bp->b_loffset & PAGE_MASK) != 0 || 3430 (bp->b_bcount & PAGE_MASK) != 0) { 3431 return(0); 3432 } 3433 3434 /* 3435 * Figure out the original VM object (it will match the underlying 3436 * VM pages). Note that swap cached data uses page indices relative 3437 * to that object, not relative to bio->bio_offset. 3438 */ 3439 if (bp->b_flags & B_CLUSTER) 3440 object = vp->v_object; 3441 else 3442 object = bp->b_vp->v_object; 3443 3444 /* 3445 * In order to be able to use the swap cache all underlying VM 3446 * pages must be marked as such, and we can't have any bogus pages. 3447 */ 3448 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 3449 m = bp->b_xio.xio_pages[i]; 3450 if ((m->flags & PG_SWAPPED) == 0) 3451 break; 3452 if (m == bogus_page) 3453 break; 3454 } 3455 3456 /* 3457 * If we are good then issue the I/O using swap_pager_strategy(). 3458 * 3459 * We can only do this if the buffer actually supports object-backed 3460 * I/O. If it doesn't npages will be 0. 3461 */ 3462 if (i && i == bp->b_xio.xio_npages) { 3463 m = bp->b_xio.xio_pages[0]; 3464 nbio = push_bio(bio); 3465 nbio->bio_done = vn_cache_strategy_callback; 3466 nbio->bio_offset = ptoa(m->pindex); 3467 KKASSERT(m->object == object); 3468 swap_pager_strategy(object, nbio); 3469 return(1); 3470 } 3471 return(0); 3472 } 3473 3474 /* 3475 * This is a bit of a hack but since the vn_cache_strategy() function can 3476 * override a VFS's strategy function we must make sure that the bio, which 3477 * is probably bio2, doesn't leak an unexpected offset value back to the 3478 * filesystem. The filesystem (e.g. UFS) might otherwise assume that the 3479 * bio went through its own file strategy function and the the bio2 offset 3480 * is a cached disk offset when, in fact, it isn't. 3481 */ 3482 static void 3483 vn_cache_strategy_callback(struct bio *bio) 3484 { 3485 bio->bio_offset = NOOFFSET; 3486 biodone(pop_bio(bio)); 3487 } 3488 3489 /* 3490 * bpdone: 3491 * 3492 * Finish I/O on a buffer after all BIOs have been processed. 3493 * Called when the bio chain is exhausted or by biowait. If called 3494 * by biowait, elseit is typically 0. 3495 * 3496 * bpdone is also responsible for setting B_CACHE in a B_VMIO bp. 3497 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 3498 * assuming B_INVAL is clear. 3499 * 3500 * For the VMIO case, we set B_CACHE if the op was a read and no 3501 * read error occured, or if the op was a write. B_CACHE is never 3502 * set if the buffer is invalid or otherwise uncacheable. 3503 * 3504 * bpdone does not mess with B_INVAL, allowing the I/O routine or the 3505 * initiator to leave B_INVAL set to brelse the buffer out of existance 3506 * in the biodone routine. 3507 * 3508 * bpdone is responsible for calling bundirty() on the buffer after a 3509 * successful write. We previously did this prior to initiating the 3510 * write under the assumption that the buffer might be dirtied again 3511 * while the write was in progress, however doing it before-hand creates 3512 * a race condition prior to the call to vn_strategy() where the 3513 * filesystem may not be aware that a dirty buffer is present. 3514 * It should not be possible for the buffer or its underlying pages to 3515 * be redirtied prior to bpdone()'s unbusying of the underlying VM 3516 * pages. 3517 */ 3518 void 3519 bpdone(struct buf *bp, int elseit) 3520 { 3521 buf_cmd_t cmd; 3522 3523 KASSERT(BUF_LOCKINUSE(bp), ("bpdone: bp %p not busy", bp)); 3524 KASSERT(bp->b_cmd != BUF_CMD_DONE, 3525 ("bpdone: bp %p already done!", bp)); 3526 3527 /* 3528 * No more BIOs are left. All completion functions have been dealt 3529 * with, now we clean up the buffer. 3530 */ 3531 cmd = bp->b_cmd; 3532 bp->b_cmd = BUF_CMD_DONE; 3533 3534 /* 3535 * Only reads and writes are processed past this point. 3536 */ 3537 if (cmd != BUF_CMD_READ && cmd != BUF_CMD_WRITE) { 3538 if (cmd == BUF_CMD_FREEBLKS) 3539 bp->b_flags |= B_NOCACHE; 3540 if (elseit) 3541 brelse(bp); 3542 return; 3543 } 3544 3545 /* 3546 * A failed write must re-dirty the buffer unless B_INVAL 3547 * was set. 3548 * 3549 * A successful write must clear the dirty flag. This is done after 3550 * the write to ensure that the buffer remains on the vnode's dirty 3551 * list for filesystem interlocks / checks until the write is actually 3552 * complete. HAMMER2 is sensitive to this issue. 3553 * 3554 * Only applicable to normal buffers (with VPs). vinum buffers may 3555 * not have a vp. 3556 * 3557 * Must be done prior to calling buf_complete() as the callback might 3558 * re-dirty the buffer. 3559 */ 3560 if (cmd == BUF_CMD_WRITE) { 3561 if ((bp->b_flags & (B_ERROR | B_INVAL)) == B_ERROR) { 3562 bp->b_flags &= ~B_NOCACHE; 3563 if (bp->b_vp) 3564 bdirty(bp); 3565 } else { 3566 if (bp->b_vp) 3567 bundirty(bp); 3568 } 3569 } 3570 3571 /* 3572 * Warning: softupdates may re-dirty the buffer, and HAMMER can do 3573 * a lot worse. XXX - move this above the clearing of b_cmd 3574 */ 3575 if (LIST_FIRST(&bp->b_dep) != NULL) 3576 buf_complete(bp); 3577 3578 if (bp->b_flags & B_VMIO) { 3579 int i; 3580 vm_ooffset_t foff; 3581 vm_page_t m; 3582 vm_object_t obj; 3583 int iosize; 3584 struct vnode *vp = bp->b_vp; 3585 3586 obj = vp->v_object; 3587 3588 #if defined(VFS_BIO_DEBUG) 3589 if (vp->v_auxrefs == 0) 3590 panic("bpdone: zero vnode hold count"); 3591 if ((vp->v_flag & VOBJBUF) == 0) 3592 panic("bpdone: vnode is not setup for merged cache"); 3593 #endif 3594 3595 foff = bp->b_loffset; 3596 KASSERT(foff != NOOFFSET, ("bpdone: no buffer offset")); 3597 KASSERT(obj != NULL, ("bpdone: missing VM object")); 3598 3599 #if defined(VFS_BIO_DEBUG) 3600 if (obj->paging_in_progress < bp->b_xio.xio_npages) { 3601 kprintf("bpdone: paging in progress(%d) < " 3602 "bp->b_xio.xio_npages(%d)\n", 3603 obj->paging_in_progress, 3604 bp->b_xio.xio_npages); 3605 } 3606 #endif 3607 3608 /* 3609 * Set B_CACHE if the op was a normal read and no error 3610 * occured. B_CACHE is set for writes in the b*write() 3611 * routines. 3612 */ 3613 iosize = bp->b_bcount - bp->b_resid; 3614 if (cmd == BUF_CMD_READ && 3615 (bp->b_flags & (B_INVAL|B_NOCACHE|B_ERROR)) == 0) { 3616 bp->b_flags |= B_CACHE; 3617 } 3618 3619 vm_object_hold(obj); 3620 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3621 int resid; 3622 int isbogus; 3623 3624 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff; 3625 if (resid > iosize) 3626 resid = iosize; 3627 3628 /* 3629 * cleanup bogus pages, restoring the originals. Since 3630 * the originals should still be wired, we don't have 3631 * to worry about interrupt/freeing races destroying 3632 * the VM object association. 3633 */ 3634 m = bp->b_xio.xio_pages[i]; 3635 if (m == bogus_page) { 3636 if ((bp->b_flags & B_HASBOGUS) == 0) 3637 panic("bpdone: bp %p corrupt bogus", bp); 3638 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 3639 if (m == NULL) 3640 panic("bpdone: page disappeared"); 3641 bp->b_xio.xio_pages[i] = m; 3642 isbogus = 1; 3643 } else { 3644 isbogus = 0; 3645 } 3646 #if defined(VFS_BIO_DEBUG) 3647 if (OFF_TO_IDX(foff) != m->pindex) { 3648 kprintf("bpdone: foff(%lu)/m->pindex(%ld) " 3649 "mismatch\n", 3650 (unsigned long)foff, (long)m->pindex); 3651 } 3652 #endif 3653 3654 /* 3655 * In the write case, the valid and clean bits are 3656 * already changed correctly (see bdwrite()), so we 3657 * only need to do this here in the read case. 3658 */ 3659 vm_page_busy_wait(m, FALSE, "bpdpgw"); 3660 if (cmd == BUF_CMD_READ && isbogus == 0 && resid > 0) 3661 vfs_clean_one_page(bp, i, m); 3662 3663 /* 3664 * when debugging new filesystems or buffer I/O 3665 * methods, this is the most common error that pops 3666 * up. if you see this, you have not set the page 3667 * busy flag correctly!!! 3668 */ 3669 if ((m->busy_count & PBUSY_MASK) == 0) { 3670 kprintf("bpdone: page busy < 0, " 3671 "pindex: %d, foff: 0x(%x,%x), " 3672 "resid: %d, index: %d\n", 3673 (int) m->pindex, (int)(foff >> 32), 3674 (int) foff & 0xffffffff, resid, i); 3675 if (!vn_isdisk(vp, NULL)) 3676 kprintf(" iosize: %ld, loffset: %lld, " 3677 "flags: 0x%08x, npages: %d\n", 3678 bp->b_vp->v_mount->mnt_stat.f_iosize, 3679 (long long)bp->b_loffset, 3680 bp->b_flags, bp->b_xio.xio_npages); 3681 else 3682 kprintf(" VDEV, loffset: %lld, flags: 0x%08x, npages: %d\n", 3683 (long long)bp->b_loffset, 3684 bp->b_flags, bp->b_xio.xio_npages); 3685 kprintf(" valid: 0x%x, dirty: 0x%x, " 3686 "wired: %d\n", 3687 m->valid, m->dirty, 3688 m->wire_count); 3689 panic("bpdone: page busy < 0"); 3690 } 3691 vm_page_io_finish(m); 3692 vm_page_wakeup(m); 3693 vm_object_pip_wakeup(obj); 3694 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3695 iosize -= resid; 3696 } 3697 if (bp->b_flags & B_HASBOGUS) { 3698 pmap_qenter_noinval(trunc_page((vm_offset_t)bp->b_data), 3699 bp->b_xio.xio_pages, 3700 bp->b_xio.xio_npages); 3701 bp->b_flags &= ~B_HASBOGUS; 3702 bkvareset(bp); 3703 } 3704 vm_object_drop(obj); 3705 } 3706 3707 /* 3708 * Finish up by releasing the buffer. There are no more synchronous 3709 * or asynchronous completions, those were handled by bio_done 3710 * callbacks. 3711 */ 3712 if (elseit) { 3713 if (bp->b_flags & (B_NOCACHE|B_INVAL|B_ERROR|B_RELBUF)) 3714 brelse(bp); 3715 else 3716 bqrelse(bp); 3717 } 3718 } 3719 3720 /* 3721 * Normal biodone. 3722 */ 3723 void 3724 biodone(struct bio *bio) 3725 { 3726 struct buf *bp = bio->bio_buf; 3727 3728 runningbufwakeup(bp); 3729 3730 /* 3731 * Run up the chain of BIO's. Leave b_cmd intact for the duration. 3732 */ 3733 while (bio) { 3734 biodone_t *done_func; 3735 struct bio_track *track; 3736 3737 /* 3738 * BIO tracking. Most but not all BIOs are tracked. 3739 */ 3740 if ((track = bio->bio_track) != NULL) { 3741 bio_track_rel(track); 3742 bio->bio_track = NULL; 3743 } 3744 3745 /* 3746 * A bio_done function terminates the loop. The function 3747 * will be responsible for any further chaining and/or 3748 * buffer management. 3749 * 3750 * WARNING! The done function can deallocate the buffer! 3751 */ 3752 if ((done_func = bio->bio_done) != NULL) { 3753 bio->bio_done = NULL; 3754 done_func(bio); 3755 return; 3756 } 3757 bio = bio->bio_prev; 3758 } 3759 3760 /* 3761 * If we've run out of bio's do normal [a]synchronous completion. 3762 */ 3763 bpdone(bp, 1); 3764 } 3765 3766 /* 3767 * Synchronous biodone - this terminates a synchronous BIO. 3768 * 3769 * bpdone() is called with elseit=FALSE, leaving the buffer completed 3770 * but still locked. The caller must brelse() the buffer after waiting 3771 * for completion. 3772 */ 3773 void 3774 biodone_sync(struct bio *bio) 3775 { 3776 struct buf *bp = bio->bio_buf; 3777 int flags; 3778 int nflags; 3779 3780 KKASSERT(bio == &bp->b_bio1); 3781 bpdone(bp, 0); 3782 3783 for (;;) { 3784 flags = bio->bio_flags; 3785 nflags = (flags | BIO_DONE) & ~BIO_WANT; 3786 3787 if (atomic_cmpset_int(&bio->bio_flags, flags, nflags)) { 3788 if (flags & BIO_WANT) 3789 wakeup(bio); 3790 break; 3791 } 3792 } 3793 } 3794 3795 /* 3796 * vfs_unbusy_pages: 3797 * 3798 * This routine is called in lieu of iodone in the case of 3799 * incomplete I/O. This keeps the busy status for pages 3800 * consistant. 3801 */ 3802 void 3803 vfs_unbusy_pages(struct buf *bp) 3804 { 3805 int i; 3806 3807 runningbufwakeup(bp); 3808 3809 if (bp->b_flags & B_VMIO) { 3810 struct vnode *vp = bp->b_vp; 3811 vm_object_t obj; 3812 3813 obj = vp->v_object; 3814 vm_object_hold(obj); 3815 3816 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3817 vm_page_t m = bp->b_xio.xio_pages[i]; 3818 3819 /* 3820 * When restoring bogus changes the original pages 3821 * should still be wired, so we are in no danger of 3822 * losing the object association and do not need 3823 * critical section protection particularly. 3824 */ 3825 if (m == bogus_page) { 3826 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_loffset) + i); 3827 if (!m) { 3828 panic("vfs_unbusy_pages: page missing"); 3829 } 3830 bp->b_xio.xio_pages[i] = m; 3831 } 3832 vm_page_busy_wait(m, FALSE, "bpdpgw"); 3833 vm_page_io_finish(m); 3834 vm_page_wakeup(m); 3835 vm_object_pip_wakeup(obj); 3836 } 3837 if (bp->b_flags & B_HASBOGUS) { 3838 pmap_qenter_noinval(trunc_page((vm_offset_t)bp->b_data), 3839 bp->b_xio.xio_pages, 3840 bp->b_xio.xio_npages); 3841 bp->b_flags &= ~B_HASBOGUS; 3842 bkvareset(bp); 3843 } 3844 vm_object_drop(obj); 3845 } 3846 } 3847 3848 /* 3849 * vfs_busy_pages: 3850 * 3851 * This routine is called before a device strategy routine. 3852 * It is used to tell the VM system that paging I/O is in 3853 * progress, and treat the pages associated with the buffer 3854 * almost as being PBUSY_LOCKED. Also the object 'paging_in_progress' 3855 * flag is handled to make sure that the object doesn't become 3856 * inconsistant. 3857 * 3858 * Since I/O has not been initiated yet, certain buffer flags 3859 * such as B_ERROR or B_INVAL may be in an inconsistant state 3860 * and should be ignored. 3861 */ 3862 void 3863 vfs_busy_pages(struct vnode *vp, struct buf *bp) 3864 { 3865 int i, bogus; 3866 struct lwp *lp = curthread->td_lwp; 3867 3868 /* 3869 * The buffer's I/O command must already be set. If reading, 3870 * B_CACHE must be 0 (double check against callers only doing 3871 * I/O when B_CACHE is 0). 3872 */ 3873 KKASSERT(bp->b_cmd != BUF_CMD_DONE); 3874 KKASSERT(bp->b_cmd == BUF_CMD_WRITE || (bp->b_flags & B_CACHE) == 0); 3875 3876 if (bp->b_flags & B_VMIO) { 3877 vm_object_t obj; 3878 3879 obj = vp->v_object; 3880 KASSERT(bp->b_loffset != NOOFFSET, 3881 ("vfs_busy_pages: no buffer offset")); 3882 3883 /* 3884 * Busy all the pages. We have to busy them all at once 3885 * to avoid deadlocks. 3886 */ 3887 retry: 3888 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3889 vm_page_t m = bp->b_xio.xio_pages[i]; 3890 3891 if (vm_page_busy_try(m, FALSE)) { 3892 vm_page_sleep_busy(m, FALSE, "vbpage"); 3893 while (--i >= 0) 3894 vm_page_wakeup(bp->b_xio.xio_pages[i]); 3895 goto retry; 3896 } 3897 } 3898 3899 /* 3900 * Setup for I/O, soft-busy the page right now because 3901 * the next loop may block. 3902 */ 3903 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3904 vm_page_t m = bp->b_xio.xio_pages[i]; 3905 3906 if ((bp->b_flags & B_CLUSTER) == 0) { 3907 vm_object_pip_add(obj, 1); 3908 vm_page_io_start(m); 3909 } 3910 } 3911 3912 /* 3913 * Adjust protections for I/O and do bogus-page mapping. 3914 * Assume that vm_page_protect() can block (it can block 3915 * if VM_PROT_NONE, don't take any chances regardless). 3916 * 3917 * In particular note that for writes we must incorporate 3918 * page dirtyness from the VM system into the buffer's 3919 * dirty range. 3920 * 3921 * For reads we theoretically must incorporate page dirtyness 3922 * from the VM system to determine if the page needs bogus 3923 * replacement, but we shortcut the test by simply checking 3924 * that all m->valid bits are set, indicating that the page 3925 * is fully valid and does not need to be re-read. For any 3926 * VM system dirtyness the page will also be fully valid 3927 * since it was mapped at one point. 3928 */ 3929 bogus = 0; 3930 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3931 vm_page_t m = bp->b_xio.xio_pages[i]; 3932 3933 if (bp->b_cmd == BUF_CMD_WRITE) { 3934 /* 3935 * When readying a vnode-backed buffer for 3936 * a write we must zero-fill any invalid 3937 * portions of the backing VM pages, mark 3938 * it valid and clear related dirty bits. 3939 * 3940 * vfs_clean_one_page() incorporates any 3941 * VM dirtyness and updates the b_dirtyoff 3942 * range (after we've made the page RO). 3943 * 3944 * It is also expected that the pmap modified 3945 * bit has already been cleared by the 3946 * vm_page_protect(). We may not be able 3947 * to clear all dirty bits for a page if it 3948 * was also memory mapped (NFS). 3949 * 3950 * Finally be sure to unassign any swap-cache 3951 * backing store as it is now stale. 3952 */ 3953 vm_page_protect(m, VM_PROT_READ); 3954 vfs_clean_one_page(bp, i, m); 3955 swap_pager_unswapped(m); 3956 } else if (m->valid == VM_PAGE_BITS_ALL) { 3957 /* 3958 * When readying a vnode-backed buffer for 3959 * read we must replace any dirty pages with 3960 * a bogus page so dirty data is not destroyed 3961 * when filling gaps. 3962 * 3963 * To avoid testing whether the page is 3964 * dirty we instead test that the page was 3965 * at some point mapped (m->valid fully 3966 * valid) with the understanding that 3967 * this also covers the dirty case. 3968 */ 3969 bp->b_xio.xio_pages[i] = bogus_page; 3970 bp->b_flags |= B_HASBOGUS; 3971 bogus++; 3972 } else if (m->valid & m->dirty) { 3973 /* 3974 * This case should not occur as partial 3975 * dirtyment can only happen if the buffer 3976 * is B_CACHE, and this code is not entered 3977 * if the buffer is B_CACHE. 3978 */ 3979 kprintf("Warning: vfs_busy_pages - page not " 3980 "fully valid! loff=%jx bpf=%08x " 3981 "idx=%d val=%02x dir=%02x\n", 3982 (uintmax_t)bp->b_loffset, bp->b_flags, 3983 i, m->valid, m->dirty); 3984 vm_page_protect(m, VM_PROT_NONE); 3985 } else { 3986 /* 3987 * The page is not valid and can be made 3988 * part of the read. 3989 */ 3990 vm_page_protect(m, VM_PROT_NONE); 3991 } 3992 vm_page_wakeup(m); 3993 } 3994 if (bogus) { 3995 pmap_qenter_noinval(trunc_page((vm_offset_t)bp->b_data), 3996 bp->b_xio.xio_pages, 3997 bp->b_xio.xio_npages); 3998 bkvareset(bp); 3999 } 4000 } 4001 4002 /* 4003 * This is the easiest place to put the process accounting for the I/O 4004 * for now. 4005 */ 4006 if (lp != NULL) { 4007 if (bp->b_cmd == BUF_CMD_READ) 4008 lp->lwp_ru.ru_inblock++; 4009 else 4010 lp->lwp_ru.ru_oublock++; 4011 } 4012 } 4013 4014 /* 4015 * Tell the VM system that the pages associated with this buffer 4016 * are clean. This is used for delayed writes where the data is 4017 * going to go to disk eventually without additional VM intevention. 4018 * 4019 * NOTE: While we only really need to clean through to b_bcount, we 4020 * just go ahead and clean through to b_bufsize. 4021 */ 4022 static void 4023 vfs_clean_pages(struct buf *bp) 4024 { 4025 vm_page_t m; 4026 int i; 4027 4028 if ((bp->b_flags & B_VMIO) == 0) 4029 return; 4030 4031 KASSERT(bp->b_loffset != NOOFFSET, 4032 ("vfs_clean_pages: no buffer offset")); 4033 4034 for (i = 0; i < bp->b_xio.xio_npages; i++) { 4035 m = bp->b_xio.xio_pages[i]; 4036 vfs_clean_one_page(bp, i, m); 4037 } 4038 } 4039 4040 /* 4041 * vfs_clean_one_page: 4042 * 4043 * Set the valid bits and clear the dirty bits in a page within a 4044 * buffer. The range is restricted to the buffer's size and the 4045 * buffer's logical offset might index into the first page. 4046 * 4047 * The caller has busied or soft-busied the page and it is not mapped, 4048 * test and incorporate the dirty bits into b_dirtyoff/end before 4049 * clearing them. Note that we need to clear the pmap modified bits 4050 * after determining the the page was dirty, vm_page_set_validclean() 4051 * does not do it for us. 4052 * 4053 * This routine is typically called after a read completes (dirty should 4054 * be zero in that case as we are not called on bogus-replace pages), 4055 * or before a write is initiated. 4056 */ 4057 static void 4058 vfs_clean_one_page(struct buf *bp, int pageno, vm_page_t m) 4059 { 4060 int bcount; 4061 int xoff; 4062 int soff; 4063 int eoff; 4064 4065 /* 4066 * Calculate offset range within the page but relative to buffer's 4067 * loffset. loffset might be offset into the first page. 4068 */ 4069 xoff = (int)bp->b_loffset & PAGE_MASK; /* loffset offset into pg 0 */ 4070 bcount = bp->b_bcount + xoff; /* offset adjusted */ 4071 4072 if (pageno == 0) { 4073 soff = xoff; 4074 eoff = PAGE_SIZE; 4075 } else { 4076 soff = (pageno << PAGE_SHIFT); 4077 eoff = soff + PAGE_SIZE; 4078 } 4079 if (eoff > bcount) 4080 eoff = bcount; 4081 if (soff >= eoff) 4082 return; 4083 4084 /* 4085 * Test dirty bits and adjust b_dirtyoff/end. 4086 * 4087 * If dirty pages are incorporated into the bp any prior 4088 * B_NEEDCOMMIT state (NFS) must be cleared because the 4089 * caller has not taken into account the new dirty data. 4090 * 4091 * If the page was memory mapped the dirty bits might go beyond the 4092 * end of the buffer, but we can't really make the assumption that 4093 * a file EOF straddles the buffer (even though this is the case for 4094 * NFS if B_NEEDCOMMIT is also set). So for the purposes of clearing 4095 * B_NEEDCOMMIT we only test the dirty bits covered by the buffer. 4096 * This also saves some console spam. 4097 * 4098 * When clearing B_NEEDCOMMIT we must also clear B_CLUSTEROK, 4099 * NFS can handle huge commits but not huge writes. 4100 */ 4101 vm_page_test_dirty(m); 4102 if (m->dirty) { 4103 if ((bp->b_flags & B_NEEDCOMMIT) && 4104 (m->dirty & vm_page_bits(soff & PAGE_MASK, eoff - soff))) { 4105 if (debug_commit) 4106 kprintf("Warning: vfs_clean_one_page: bp %p " 4107 "loff=%jx,%d flgs=%08x clr B_NEEDCOMMIT" 4108 " cmd %d vd %02x/%02x x/s/e %d %d %d " 4109 "doff/end %d %d\n", 4110 bp, (uintmax_t)bp->b_loffset, bp->b_bcount, 4111 bp->b_flags, bp->b_cmd, 4112 m->valid, m->dirty, xoff, soff, eoff, 4113 bp->b_dirtyoff, bp->b_dirtyend); 4114 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); 4115 if (debug_commit) 4116 print_backtrace(-1); 4117 } 4118 /* 4119 * Only clear the pmap modified bits if ALL the dirty bits 4120 * are set, otherwise the system might mis-clear portions 4121 * of a page. 4122 */ 4123 if (m->dirty == VM_PAGE_BITS_ALL && 4124 (bp->b_flags & B_NEEDCOMMIT) == 0) { 4125 pmap_clear_modify(m); 4126 } 4127 if (bp->b_dirtyoff > soff - xoff) 4128 bp->b_dirtyoff = soff - xoff; 4129 if (bp->b_dirtyend < eoff - xoff) 4130 bp->b_dirtyend = eoff - xoff; 4131 } 4132 4133 /* 4134 * Set related valid bits, clear related dirty bits. 4135 * Does not mess with the pmap modified bit. 4136 * 4137 * WARNING! We cannot just clear all of m->dirty here as the 4138 * buffer cache buffers may use a DEV_BSIZE'd aligned 4139 * block size, or have an odd size (e.g. NFS at file EOF). 4140 * The putpages code can clear m->dirty to 0. 4141 * 4142 * If a VOP_WRITE generates a buffer cache buffer which 4143 * covers the same space as mapped writable pages the 4144 * buffer flush might not be able to clear all the dirty 4145 * bits and still require a putpages from the VM system 4146 * to finish it off. 4147 * 4148 * WARNING! vm_page_set_validclean() currently assumes vm_token 4149 * is held. The page might not be busied (bdwrite() case). 4150 * XXX remove this comment once we've validated that this 4151 * is no longer an issue. 4152 */ 4153 vm_page_set_validclean(m, soff & PAGE_MASK, eoff - soff); 4154 } 4155 4156 #if 0 4157 /* 4158 * Similar to vfs_clean_one_page() but sets the bits to valid and dirty. 4159 * The page data is assumed to be valid (there is no zeroing here). 4160 */ 4161 static void 4162 vfs_dirty_one_page(struct buf *bp, int pageno, vm_page_t m) 4163 { 4164 int bcount; 4165 int xoff; 4166 int soff; 4167 int eoff; 4168 4169 /* 4170 * Calculate offset range within the page but relative to buffer's 4171 * loffset. loffset might be offset into the first page. 4172 */ 4173 xoff = (int)bp->b_loffset & PAGE_MASK; /* loffset offset into pg 0 */ 4174 bcount = bp->b_bcount + xoff; /* offset adjusted */ 4175 4176 if (pageno == 0) { 4177 soff = xoff; 4178 eoff = PAGE_SIZE; 4179 } else { 4180 soff = (pageno << PAGE_SHIFT); 4181 eoff = soff + PAGE_SIZE; 4182 } 4183 if (eoff > bcount) 4184 eoff = bcount; 4185 if (soff >= eoff) 4186 return; 4187 vm_page_set_validdirty(m, soff & PAGE_MASK, eoff - soff); 4188 } 4189 #endif 4190 4191 /* 4192 * vfs_bio_clrbuf: 4193 * 4194 * Clear a buffer. This routine essentially fakes an I/O, so we need 4195 * to clear B_ERROR and B_INVAL. 4196 * 4197 * Note that while we only theoretically need to clear through b_bcount, 4198 * we go ahead and clear through b_bufsize. 4199 */ 4200 void 4201 vfs_bio_clrbuf(struct buf *bp) 4202 { 4203 int i, mask = 0; 4204 caddr_t sa, ea; 4205 KKASSERT(bp->b_flags & B_VMIO); 4206 4207 bp->b_flags &= ~(B_INVAL | B_EINTR | B_ERROR); 4208 bkvasync(bp); 4209 4210 if ((bp->b_xio.xio_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 4211 (bp->b_loffset & PAGE_MASK) == 0) { 4212 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 4213 if ((bp->b_xio.xio_pages[0]->valid & mask) == mask) { 4214 bp->b_resid = 0; 4215 return; 4216 } 4217 if ((bp->b_xio.xio_pages[0]->valid & mask) == 0) { 4218 bzero(bp->b_data, bp->b_bufsize); 4219 bp->b_xio.xio_pages[0]->valid |= mask; 4220 bp->b_resid = 0; 4221 return; 4222 } 4223 } 4224 sa = bp->b_data; 4225 for(i = 0; i < bp->b_xio.xio_npages; i++, sa=ea) { 4226 int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE; 4227 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); 4228 ea = (caddr_t)(vm_offset_t)ulmin( 4229 (u_long)(vm_offset_t)ea, 4230 (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize); 4231 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 4232 if ((bp->b_xio.xio_pages[i]->valid & mask) == mask) 4233 continue; 4234 if ((bp->b_xio.xio_pages[i]->valid & mask) == 0) { 4235 bzero(sa, ea - sa); 4236 } else { 4237 for (; sa < ea; sa += DEV_BSIZE, j++) { 4238 if ((bp->b_xio.xio_pages[i]->valid & 4239 (1<<j)) == 0) { 4240 bzero(sa, DEV_BSIZE); 4241 } 4242 } 4243 } 4244 bp->b_xio.xio_pages[i]->valid |= mask; 4245 } 4246 bp->b_resid = 0; 4247 } 4248 4249 /* 4250 * Allocate a page for a buffer cache buffer. 4251 * 4252 * If NULL is returned the caller is expected to retry (typically check if 4253 * the page already exists on retry before trying to allocate one). 4254 * 4255 * NOTE! Low-memory handling is dealt with in b[q]relse(), not here. This 4256 * function will use the system reserve with the hope that the page 4257 * allocations can be returned to PQ_CACHE/PQ_FREE when the caller 4258 * is done with the buffer. 4259 * 4260 * NOTE! However, TMPFS is a special case because flushing a dirty buffer 4261 * to TMPFS doesn't clean the page. For TMPFS, only the pagedaemon 4262 * is capable of retiring pages (to swap). For TMPFS we don't dig 4263 * into the system reserve because doing so could stall out pretty 4264 * much every process running on the system. 4265 */ 4266 static 4267 vm_page_t 4268 bio_page_alloc(struct buf *bp, vm_object_t obj, vm_pindex_t pg, int deficit) 4269 { 4270 int vmflags = VM_ALLOC_NORMAL | VM_ALLOC_NULL_OK; 4271 vm_page_t p; 4272 4273 ASSERT_LWKT_TOKEN_HELD(vm_object_token(obj)); 4274 4275 /* 4276 * Try a normal allocation first. 4277 */ 4278 p = vm_page_alloc(obj, pg, vmflags); 4279 if (p) 4280 return(p); 4281 if (vm_page_lookup(obj, pg)) 4282 return(NULL); 4283 vm_pageout_deficit += deficit; 4284 4285 /* 4286 * Try again, digging into the system reserve. 4287 * 4288 * Trying to recover pages from the buffer cache here can deadlock 4289 * against other threads trying to busy underlying pages so we 4290 * depend on the code in brelse() and bqrelse() to free/cache the 4291 * underlying buffer cache pages when memory is low. 4292 */ 4293 if (curthread->td_flags & TDF_SYSTHREAD) 4294 vmflags |= VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT; 4295 else if (bp->b_vp && bp->b_vp->v_tag == VT_TMPFS) 4296 vmflags |= 0; 4297 else 4298 vmflags |= VM_ALLOC_SYSTEM; 4299 4300 /*recoverbufpages();*/ 4301 p = vm_page_alloc(obj, pg, vmflags); 4302 if (p) 4303 return(p); 4304 if (vm_page_lookup(obj, pg)) 4305 return(NULL); 4306 4307 /* 4308 * Wait for memory to free up and try again 4309 */ 4310 if (vm_page_count_severe()) 4311 ++lowmempgallocs; 4312 vm_wait(hz / 20 + 1); 4313 4314 p = vm_page_alloc(obj, pg, vmflags); 4315 if (p) 4316 return(p); 4317 if (vm_page_lookup(obj, pg)) 4318 return(NULL); 4319 4320 /* 4321 * Ok, now we are really in trouble. 4322 */ 4323 if (bootverbose) { 4324 static struct krate biokrate = { .freq = 1 }; 4325 krateprintf(&biokrate, 4326 "Warning: bio_page_alloc: memory exhausted " 4327 "during buffer cache page allocation from %s\n", 4328 curthread->td_comm); 4329 } 4330 if (curthread->td_flags & TDF_SYSTHREAD) 4331 vm_wait(hz / 20 + 1); 4332 else 4333 vm_wait(hz / 2 + 1); 4334 return (NULL); 4335 } 4336 4337 /* 4338 * The buffer's mapping has changed. Adjust the buffer's memory 4339 * synchronization. The caller is the exclusive holder of the buffer 4340 * and has set or cleared B_KVABIO according to preference. 4341 * 4342 * WARNING! If the caller is using B_KVABIO mode, this function will 4343 * not map the data to the current cpu. The caller must also 4344 * call bkvasync(bp). 4345 */ 4346 void 4347 bkvareset(struct buf *bp) 4348 { 4349 if (bp->b_flags & B_KVABIO) { 4350 CPUMASK_ASSZERO(bp->b_cpumask); 4351 } else { 4352 CPUMASK_ORMASK(bp->b_cpumask, smp_active_mask); 4353 smp_invltlb(); 4354 cpu_invltlb(); 4355 } 4356 } 4357 4358 /* 4359 * The buffer will be used by the caller on the caller's cpu, synchronize 4360 * its data to the current cpu. 4361 * 4362 * If B_KVABIO is not set, the buffer is already fully synchronized. 4363 */ 4364 void 4365 bkvasync(struct buf *bp) 4366 { 4367 int cpuid = mycpu->gd_cpuid; 4368 char *bdata; 4369 4370 if ((bp->b_flags & B_KVABIO) && 4371 CPUMASK_TESTBIT(bp->b_cpumask, cpuid) == 0) { 4372 bdata = bp->b_data; 4373 while (bdata < bp->b_data + bp->b_bufsize) { 4374 cpu_invlpg(bdata); 4375 bdata += PAGE_SIZE - 4376 ((intptr_t)bdata & PAGE_MASK); 4377 } 4378 ATOMIC_CPUMASK_ORBIT(bp->b_cpumask, cpuid); 4379 } 4380 } 4381 4382 /* 4383 * The buffer will be used by a subsystem that does not understand 4384 * the KVABIO API. Make sure its data is synchronized to all cpus. 4385 * 4386 * If B_KVABIO is not set, the buffer is already fully synchronized. 4387 * 4388 * NOTE! This is the only safe way to clear B_KVABIO on a buffer. 4389 */ 4390 void 4391 bkvasync_all(struct buf *bp) 4392 { 4393 if (debug_kvabio > 0) { 4394 --debug_kvabio; 4395 print_backtrace(10); 4396 } 4397 4398 if ((bp->b_flags & B_KVABIO) && 4399 CPUMASK_CMPMASKNEQ(bp->b_cpumask, smp_active_mask)) { 4400 smp_invltlb(); 4401 cpu_invltlb(); 4402 ATOMIC_CPUMASK_ORMASK(bp->b_cpumask, smp_active_mask); 4403 } 4404 bp->b_flags &= ~B_KVABIO; 4405 } 4406 4407 /* 4408 * Scan all buffers in the system and issue the callback. 4409 */ 4410 int 4411 scan_all_buffers(int (*callback)(struct buf *, void *), void *info) 4412 { 4413 int count = 0; 4414 int error; 4415 long n; 4416 4417 for (n = 0; n < nbuf; ++n) { 4418 if ((error = callback(&buf[n], info)) < 0) { 4419 count = error; 4420 break; 4421 } 4422 count += error; 4423 } 4424 return (count); 4425 } 4426 4427 /* 4428 * nestiobuf_iodone: biodone callback for nested buffers and propagate 4429 * completion to the master buffer. 4430 */ 4431 static void 4432 nestiobuf_iodone(struct bio *bio) 4433 { 4434 struct bio *mbio; 4435 struct buf *mbp, *bp; 4436 struct devstat *stats; 4437 int error; 4438 int donebytes; 4439 4440 bp = bio->bio_buf; 4441 mbio = bio->bio_caller_info1.ptr; 4442 stats = bio->bio_caller_info2.ptr; 4443 mbp = mbio->bio_buf; 4444 4445 KKASSERT(bp->b_bcount <= bp->b_bufsize); 4446 KKASSERT(mbp != bp); 4447 4448 error = bp->b_error; 4449 if (bp->b_error == 0 && 4450 (bp->b_bcount < bp->b_bufsize || bp->b_resid > 0)) { 4451 /* 4452 * Not all got transfered, raise an error. We have no way to 4453 * propagate these conditions to mbp. 4454 */ 4455 error = EIO; 4456 } 4457 4458 donebytes = bp->b_bufsize; 4459 4460 relpbuf(bp, NULL); 4461 4462 nestiobuf_done(mbio, donebytes, error, stats); 4463 } 4464 4465 void 4466 nestiobuf_done(struct bio *mbio, int donebytes, int error, struct devstat *stats) 4467 { 4468 struct buf *mbp; 4469 4470 mbp = mbio->bio_buf; 4471 4472 KKASSERT((int)(intptr_t)mbio->bio_driver_info > 0); 4473 4474 /* 4475 * If an error occured, propagate it to the master buffer. 4476 * 4477 * Several biodone()s may wind up running concurrently so 4478 * use an atomic op to adjust b_flags. 4479 */ 4480 if (error) { 4481 mbp->b_error = error; 4482 atomic_set_int(&mbp->b_flags, B_ERROR); 4483 } 4484 4485 /* 4486 * Decrement the operations in progress counter and terminate the 4487 * I/O if this was the last bit. 4488 */ 4489 if (atomic_fetchadd_int((int *)&mbio->bio_driver_info, -1) == 1) { 4490 mbp->b_resid = 0; 4491 if (stats) 4492 devstat_end_transaction_buf(stats, mbp); 4493 biodone(mbio); 4494 } 4495 } 4496 4497 /* 4498 * Initialize a nestiobuf for use. Set an initial count of 1 to prevent 4499 * the mbio from being biodone()'d while we are still adding sub-bios to 4500 * it. 4501 */ 4502 void 4503 nestiobuf_init(struct bio *bio) 4504 { 4505 bio->bio_driver_info = (void *)1; 4506 } 4507 4508 /* 4509 * The BIOs added to the nestedio have already been started, remove the 4510 * count that placeheld our mbio and biodone() it if the count would 4511 * transition to 0. 4512 */ 4513 void 4514 nestiobuf_start(struct bio *mbio) 4515 { 4516 struct buf *mbp = mbio->bio_buf; 4517 4518 /* 4519 * Decrement the operations in progress counter and terminate the 4520 * I/O if this was the last bit. 4521 */ 4522 if (atomic_fetchadd_int((int *)&mbio->bio_driver_info, -1) == 1) { 4523 if (mbp->b_flags & B_ERROR) 4524 mbp->b_resid = mbp->b_bcount; 4525 else 4526 mbp->b_resid = 0; 4527 biodone(mbio); 4528 } 4529 } 4530 4531 /* 4532 * Set an intermediate error prior to calling nestiobuf_start() 4533 */ 4534 void 4535 nestiobuf_error(struct bio *mbio, int error) 4536 { 4537 struct buf *mbp = mbio->bio_buf; 4538 4539 if (error) { 4540 mbp->b_error = error; 4541 atomic_set_int(&mbp->b_flags, B_ERROR); 4542 } 4543 } 4544 4545 /* 4546 * nestiobuf_add: setup a "nested" buffer. 4547 * 4548 * => 'mbp' is a "master" buffer which is being divided into sub pieces. 4549 * => 'bp' should be a buffer allocated by getiobuf. 4550 * => 'offset' is a byte offset in the master buffer. 4551 * => 'size' is a size in bytes of this nested buffer. 4552 */ 4553 void 4554 nestiobuf_add(struct bio *mbio, struct buf *bp, int offset, size_t size, struct devstat *stats) 4555 { 4556 struct buf *mbp = mbio->bio_buf; 4557 struct vnode *vp = mbp->b_vp; 4558 4559 KKASSERT(mbp->b_bcount >= offset + size); 4560 4561 atomic_add_int((int *)&mbio->bio_driver_info, 1); 4562 4563 /* kernel needs to own the lock for it to be released in biodone */ 4564 BUF_KERNPROC(bp); 4565 bp->b_vp = vp; 4566 bp->b_cmd = mbp->b_cmd; 4567 bp->b_bio1.bio_done = nestiobuf_iodone; 4568 bp->b_data = (char *)mbp->b_data + offset; 4569 bp->b_resid = bp->b_bcount = size; 4570 bp->b_bufsize = bp->b_bcount; 4571 4572 bp->b_bio1.bio_track = NULL; 4573 bp->b_bio1.bio_caller_info1.ptr = mbio; 4574 bp->b_bio1.bio_caller_info2.ptr = stats; 4575 } 4576 4577 #ifdef DDB 4578 4579 DB_SHOW_COMMAND(buffer, db_show_buffer) 4580 { 4581 /* get args */ 4582 struct buf *bp = (struct buf *)addr; 4583 4584 if (!have_addr) { 4585 db_printf("usage: show buffer <addr>\n"); 4586 return; 4587 } 4588 4589 db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS); 4590 db_printf("b_cmd = %d\n", bp->b_cmd); 4591 db_printf("b_error = %d, b_bufsize = %d, b_bcount = %d, " 4592 "b_resid = %d\n, b_data = %p, " 4593 "bio_offset(disk) = %lld, bio_offset(phys) = %lld\n", 4594 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 4595 bp->b_data, 4596 (long long)bp->b_bio2.bio_offset, 4597 (long long)(bp->b_bio2.bio_next ? 4598 bp->b_bio2.bio_next->bio_offset : (off_t)-1)); 4599 if (bp->b_xio.xio_npages) { 4600 int i; 4601 db_printf("b_xio.xio_npages = %d, pages(OBJ, IDX, PA): ", 4602 bp->b_xio.xio_npages); 4603 for (i = 0; i < bp->b_xio.xio_npages; i++) { 4604 vm_page_t m; 4605 m = bp->b_xio.xio_pages[i]; 4606 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 4607 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 4608 if ((i + 1) < bp->b_xio.xio_npages) 4609 db_printf(","); 4610 } 4611 db_printf("\n"); 4612 } 4613 } 4614 #endif /* DDB */ 4615