1 /* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $FreeBSD: src/sys/kern/vfs_bio.c,v 1.242.2.20 2003/05/28 18:38:10 alc Exp $ 15 * $DragonFly: src/sys/kern/vfs_bio.c,v 1.115 2008/08/13 11:02:31 swildner Exp $ 16 */ 17 18 /* 19 * this file contains a new buffer I/O scheme implementing a coherent 20 * VM object and buffer cache scheme. Pains have been taken to make 21 * sure that the performance degradation associated with schemes such 22 * as this is not realized. 23 * 24 * Author: John S. Dyson 25 * Significant help during the development and debugging phases 26 * had been provided by David Greenman, also of the FreeBSD core team. 27 * 28 * see man buf(9) for more info. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/buf.h> 34 #include <sys/conf.h> 35 #include <sys/eventhandler.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/mount.h> 39 #include <sys/kernel.h> 40 #include <sys/kthread.h> 41 #include <sys/proc.h> 42 #include <sys/reboot.h> 43 #include <sys/resourcevar.h> 44 #include <sys/sysctl.h> 45 #include <sys/vmmeter.h> 46 #include <sys/vnode.h> 47 #include <sys/proc.h> 48 #include <vm/vm.h> 49 #include <vm/vm_param.h> 50 #include <vm/vm_kern.h> 51 #include <vm/vm_pageout.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_object.h> 54 #include <vm/vm_extern.h> 55 #include <vm/vm_map.h> 56 57 #include <sys/buf2.h> 58 #include <sys/thread2.h> 59 #include <sys/spinlock2.h> 60 #include <vm/vm_page2.h> 61 62 #include "opt_ddb.h" 63 #ifdef DDB 64 #include <ddb/ddb.h> 65 #endif 66 67 /* 68 * Buffer queues. 69 */ 70 enum bufq_type { 71 BQUEUE_NONE, /* not on any queue */ 72 BQUEUE_LOCKED, /* locked buffers */ 73 BQUEUE_CLEAN, /* non-B_DELWRI buffers */ 74 BQUEUE_DIRTY, /* B_DELWRI buffers */ 75 BQUEUE_DIRTY_HW, /* B_DELWRI buffers - heavy weight */ 76 BQUEUE_EMPTYKVA, /* empty buffer headers with KVA assignment */ 77 BQUEUE_EMPTY, /* empty buffer headers */ 78 79 BUFFER_QUEUES /* number of buffer queues */ 80 }; 81 82 typedef enum bufq_type bufq_type_t; 83 84 #define BD_WAKE_SIZE 128 85 #define BD_WAKE_MASK (BD_WAKE_SIZE - 1) 86 87 TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES]; 88 89 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 90 91 struct buf *buf; /* buffer header pool */ 92 93 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 94 int pageno, vm_page_t m); 95 static void vfs_clean_pages(struct buf *bp); 96 static void vfs_setdirty(struct buf *bp); 97 static void vfs_vmio_release(struct buf *bp); 98 static int flushbufqueues(bufq_type_t q); 99 static vm_page_t bio_page_alloc(vm_object_t obj, vm_pindex_t pg, int deficit); 100 101 static void bd_signal(int totalspace); 102 static void buf_daemon(void); 103 static void buf_daemon_hw(void); 104 105 /* 106 * bogus page -- for I/O to/from partially complete buffers 107 * this is a temporary solution to the problem, but it is not 108 * really that bad. it would be better to split the buffer 109 * for input in the case of buffers partially already in memory, 110 * but the code is intricate enough already. 111 */ 112 vm_page_t bogus_page; 113 114 /* 115 * These are all static, but make the ones we export globals so we do 116 * not need to use compiler magic. 117 */ 118 int bufspace, maxbufspace, 119 bufmallocspace, maxbufmallocspace, lobufspace, hibufspace; 120 static int bufreusecnt, bufdefragcnt, buffreekvacnt; 121 static int lorunningspace, hirunningspace, runningbufreq; 122 int dirtybufspace, dirtybufspacehw, lodirtybufspace, hidirtybufspace; 123 int dirtybufcount, dirtybufcounthw; 124 int runningbufspace, runningbufcount; 125 static int getnewbufcalls; 126 static int getnewbufrestarts; 127 static int recoverbufcalls; 128 static int needsbuffer; /* locked by needsbuffer_spin */ 129 static int bd_request; /* locked by needsbuffer_spin */ 130 static int bd_request_hw; /* locked by needsbuffer_spin */ 131 static u_int bd_wake_ary[BD_WAKE_SIZE]; 132 static u_int bd_wake_index; 133 static struct spinlock needsbuffer_spin; 134 135 static struct thread *bufdaemon_td; 136 static struct thread *bufdaemonhw_td; 137 138 139 /* 140 * Sysctls for operational control of the buffer cache. 141 */ 142 SYSCTL_INT(_vfs, OID_AUTO, lodirtybufspace, CTLFLAG_RW, &lodirtybufspace, 0, 143 "Number of dirty buffers to flush before bufdaemon becomes inactive"); 144 SYSCTL_INT(_vfs, OID_AUTO, hidirtybufspace, CTLFLAG_RW, &hidirtybufspace, 0, 145 "High watermark used to trigger explicit flushing of dirty buffers"); 146 SYSCTL_INT(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0, 147 "Minimum amount of buffer space required for active I/O"); 148 SYSCTL_INT(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0, 149 "Maximum amount of buffer space to usable for active I/O"); 150 /* 151 * Sysctls determining current state of the buffer cache. 152 */ 153 SYSCTL_INT(_vfs, OID_AUTO, nbuf, CTLFLAG_RD, &nbuf, 0, 154 "Total number of buffers in buffer cache"); 155 SYSCTL_INT(_vfs, OID_AUTO, dirtybufspace, CTLFLAG_RD, &dirtybufspace, 0, 156 "Pending bytes of dirty buffers (all)"); 157 SYSCTL_INT(_vfs, OID_AUTO, dirtybufspacehw, CTLFLAG_RD, &dirtybufspacehw, 0, 158 "Pending bytes of dirty buffers (heavy weight)"); 159 SYSCTL_INT(_vfs, OID_AUTO, dirtybufcount, CTLFLAG_RD, &dirtybufcount, 0, 160 "Pending number of dirty buffers"); 161 SYSCTL_INT(_vfs, OID_AUTO, dirtybufcounthw, CTLFLAG_RD, &dirtybufcounthw, 0, 162 "Pending number of dirty buffers (heavy weight)"); 163 SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0, 164 "I/O bytes currently in progress due to asynchronous writes"); 165 SYSCTL_INT(_vfs, OID_AUTO, runningbufcount, CTLFLAG_RD, &runningbufcount, 0, 166 "I/O buffers currently in progress due to asynchronous writes"); 167 SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0, 168 "Hard limit on maximum amount of memory usable for buffer space"); 169 SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0, 170 "Soft limit on maximum amount of memory usable for buffer space"); 171 SYSCTL_INT(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0, 172 "Minimum amount of memory to reserve for system buffer space"); 173 SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0, 174 "Amount of memory available for buffers"); 175 SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RD, &maxbufmallocspace, 176 0, "Maximum amount of memory reserved for buffers using malloc"); 177 SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0, 178 "Amount of memory left for buffers using malloc-scheme"); 179 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD, &getnewbufcalls, 0, 180 "New buffer header acquisition requests"); 181 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RD, &getnewbufrestarts, 182 0, "New buffer header acquisition restarts"); 183 SYSCTL_INT(_vfs, OID_AUTO, recoverbufcalls, CTLFLAG_RD, &recoverbufcalls, 0, 184 "Recover VM space in an emergency"); 185 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RD, &bufdefragcnt, 0, 186 "Buffer acquisition restarts due to fragmented buffer map"); 187 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RD, &buffreekvacnt, 0, 188 "Amount of time KVA space was deallocated in an arbitrary buffer"); 189 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RD, &bufreusecnt, 0, 190 "Amount of time buffer re-use operations were successful"); 191 SYSCTL_INT(_debug_sizeof, OID_AUTO, buf, CTLFLAG_RD, 0, sizeof(struct buf), 192 "sizeof(struct buf)"); 193 194 char *buf_wmesg = BUF_WMESG; 195 196 extern int vm_swap_size; 197 198 #define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */ 199 #define VFS_BIO_NEED_UNUSED02 0x02 200 #define VFS_BIO_NEED_UNUSED04 0x04 201 #define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */ 202 203 /* 204 * bufspacewakeup: 205 * 206 * Called when buffer space is potentially available for recovery. 207 * getnewbuf() will block on this flag when it is unable to free 208 * sufficient buffer space. Buffer space becomes recoverable when 209 * bp's get placed back in the queues. 210 */ 211 212 static __inline void 213 bufspacewakeup(void) 214 { 215 /* 216 * If someone is waiting for BUF space, wake them up. Even 217 * though we haven't freed the kva space yet, the waiting 218 * process will be able to now. 219 */ 220 if (needsbuffer & VFS_BIO_NEED_BUFSPACE) { 221 spin_lock_wr(&needsbuffer_spin); 222 needsbuffer &= ~VFS_BIO_NEED_BUFSPACE; 223 spin_unlock_wr(&needsbuffer_spin); 224 wakeup(&needsbuffer); 225 } 226 } 227 228 /* 229 * runningbufwakeup: 230 * 231 * Accounting for I/O in progress. 232 * 233 */ 234 static __inline void 235 runningbufwakeup(struct buf *bp) 236 { 237 int totalspace; 238 239 if ((totalspace = bp->b_runningbufspace) != 0) { 240 runningbufspace -= totalspace; 241 --runningbufcount; 242 bp->b_runningbufspace = 0; 243 if (runningbufreq && runningbufspace <= lorunningspace) { 244 runningbufreq = 0; 245 wakeup(&runningbufreq); 246 } 247 bd_signal(totalspace); 248 } 249 } 250 251 /* 252 * bufcountwakeup: 253 * 254 * Called when a buffer has been added to one of the free queues to 255 * account for the buffer and to wakeup anyone waiting for free buffers. 256 * This typically occurs when large amounts of metadata are being handled 257 * by the buffer cache ( else buffer space runs out first, usually ). 258 */ 259 260 static __inline void 261 bufcountwakeup(void) 262 { 263 if (needsbuffer) { 264 spin_lock_wr(&needsbuffer_spin); 265 needsbuffer &= ~VFS_BIO_NEED_ANY; 266 spin_unlock_wr(&needsbuffer_spin); 267 wakeup(&needsbuffer); 268 } 269 } 270 271 /* 272 * waitrunningbufspace() 273 * 274 * Wait for the amount of running I/O to drop to a reasonable level. 275 * 276 * The caller may be using this function to block in a tight loop, we 277 * must block of runningbufspace is greater then the passed limit. 278 * And even with that it may not be enough, due to the presence of 279 * B_LOCKED dirty buffers, so also wait for at least one running buffer 280 * to complete. 281 */ 282 static __inline void 283 waitrunningbufspace(int limit) 284 { 285 int lorun; 286 287 if (lorunningspace < limit) 288 lorun = lorunningspace; 289 else 290 lorun = limit; 291 292 crit_enter(); 293 if (runningbufspace > lorun) { 294 while (runningbufspace > lorun) { 295 ++runningbufreq; 296 tsleep(&runningbufreq, 0, "wdrain", 0); 297 } 298 } else if (runningbufspace) { 299 ++runningbufreq; 300 tsleep(&runningbufreq, 0, "wdrain2", 1); 301 } 302 crit_exit(); 303 } 304 305 /* 306 * vfs_buf_test_cache: 307 * 308 * Called when a buffer is extended. This function clears the B_CACHE 309 * bit if the newly extended portion of the buffer does not contain 310 * valid data. 311 */ 312 static __inline__ 313 void 314 vfs_buf_test_cache(struct buf *bp, 315 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 316 vm_page_t m) 317 { 318 if (bp->b_flags & B_CACHE) { 319 int base = (foff + off) & PAGE_MASK; 320 if (vm_page_is_valid(m, base, size) == 0) 321 bp->b_flags &= ~B_CACHE; 322 } 323 } 324 325 /* 326 * bd_speedup() 327 * 328 * Spank the buf_daemon[_hw] if the total dirty buffer space exceeds the 329 * low water mark. 330 */ 331 static __inline__ 332 void 333 bd_speedup(void) 334 { 335 if (dirtybufspace < lodirtybufspace && dirtybufcount < nbuf / 2) 336 return; 337 338 if (bd_request == 0 && 339 (dirtybufspace - dirtybufspacehw > lodirtybufspace / 2 || 340 dirtybufcount - dirtybufcounthw >= nbuf / 2)) { 341 spin_lock_wr(&needsbuffer_spin); 342 bd_request = 1; 343 spin_unlock_wr(&needsbuffer_spin); 344 wakeup(&bd_request); 345 } 346 if (bd_request_hw == 0 && 347 (dirtybufspacehw > lodirtybufspace / 2 || 348 dirtybufcounthw >= nbuf / 2)) { 349 spin_lock_wr(&needsbuffer_spin); 350 bd_request_hw = 1; 351 spin_unlock_wr(&needsbuffer_spin); 352 wakeup(&bd_request_hw); 353 } 354 } 355 356 /* 357 * bd_heatup() 358 * 359 * Get the buf_daemon heated up when the number of running and dirty 360 * buffers exceeds the mid-point. 361 */ 362 int 363 bd_heatup(void) 364 { 365 int mid1; 366 int mid2; 367 int totalspace; 368 369 mid1 = lodirtybufspace + (hidirtybufspace - lodirtybufspace) / 2; 370 371 totalspace = runningbufspace + dirtybufspace; 372 if (totalspace >= mid1 || dirtybufcount >= nbuf / 2) { 373 bd_speedup(); 374 mid2 = mid1 + (hidirtybufspace - mid1) / 2; 375 if (totalspace >= mid2) 376 return(totalspace - mid2); 377 } 378 return(0); 379 } 380 381 /* 382 * bd_wait() 383 * 384 * Wait for the buffer cache to flush (totalspace) bytes worth of 385 * buffers, then return. 386 * 387 * Regardless this function blocks while the number of dirty buffers 388 * exceeds hidirtybufspace. 389 */ 390 void 391 bd_wait(int totalspace) 392 { 393 u_int i; 394 int count; 395 396 if (curthread == bufdaemonhw_td || curthread == bufdaemon_td) 397 return; 398 399 while (totalspace > 0) { 400 bd_heatup(); 401 crit_enter(); 402 if (totalspace > runningbufspace + dirtybufspace) 403 totalspace = runningbufspace + dirtybufspace; 404 count = totalspace / BKVASIZE; 405 if (count >= BD_WAKE_SIZE) 406 count = BD_WAKE_SIZE - 1; 407 i = (bd_wake_index + count) & BD_WAKE_MASK; 408 ++bd_wake_ary[i]; 409 tsleep(&bd_wake_ary[i], 0, "flstik", hz); 410 crit_exit(); 411 412 totalspace = runningbufspace + dirtybufspace - hidirtybufspace; 413 } 414 } 415 416 /* 417 * bd_signal() 418 * 419 * This function is called whenever runningbufspace or dirtybufspace 420 * is reduced. Track threads waiting for run+dirty buffer I/O 421 * complete. 422 */ 423 static void 424 bd_signal(int totalspace) 425 { 426 u_int i; 427 428 while (totalspace > 0) { 429 i = atomic_fetchadd_int(&bd_wake_index, 1); 430 i &= BD_WAKE_MASK; 431 if (bd_wake_ary[i]) { 432 bd_wake_ary[i] = 0; 433 wakeup(&bd_wake_ary[i]); 434 } 435 totalspace -= BKVASIZE; 436 } 437 } 438 439 /* 440 * bufinit: 441 * 442 * Load time initialisation of the buffer cache, called from machine 443 * dependant initialization code. 444 */ 445 void 446 bufinit(void) 447 { 448 struct buf *bp; 449 vm_offset_t bogus_offset; 450 int i; 451 452 spin_init(&needsbuffer_spin); 453 454 /* next, make a null set of free lists */ 455 for (i = 0; i < BUFFER_QUEUES; i++) 456 TAILQ_INIT(&bufqueues[i]); 457 458 /* finally, initialize each buffer header and stick on empty q */ 459 for (i = 0; i < nbuf; i++) { 460 bp = &buf[i]; 461 bzero(bp, sizeof *bp); 462 bp->b_flags = B_INVAL; /* we're just an empty header */ 463 bp->b_cmd = BUF_CMD_DONE; 464 bp->b_qindex = BQUEUE_EMPTY; 465 initbufbio(bp); 466 xio_init(&bp->b_xio); 467 buf_dep_init(bp); 468 BUF_LOCKINIT(bp); 469 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_EMPTY], bp, b_freelist); 470 } 471 472 /* 473 * maxbufspace is the absolute maximum amount of buffer space we are 474 * allowed to reserve in KVM and in real terms. The absolute maximum 475 * is nominally used by buf_daemon. hibufspace is the nominal maximum 476 * used by most other processes. The differential is required to 477 * ensure that buf_daemon is able to run when other processes might 478 * be blocked waiting for buffer space. 479 * 480 * maxbufspace is based on BKVASIZE. Allocating buffers larger then 481 * this may result in KVM fragmentation which is not handled optimally 482 * by the system. 483 */ 484 maxbufspace = nbuf * BKVASIZE; 485 hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10); 486 lobufspace = hibufspace - MAXBSIZE; 487 488 lorunningspace = 512 * 1024; 489 hirunningspace = 1024 * 1024; 490 491 /* 492 * Limit the amount of malloc memory since it is wired permanently 493 * into the kernel space. Even though this is accounted for in 494 * the buffer allocation, we don't want the malloced region to grow 495 * uncontrolled. The malloc scheme improves memory utilization 496 * significantly on average (small) directories. 497 */ 498 maxbufmallocspace = hibufspace / 20; 499 500 /* 501 * Reduce the chance of a deadlock occuring by limiting the number 502 * of delayed-write dirty buffers we allow to stack up. 503 */ 504 hidirtybufspace = hibufspace / 2; 505 dirtybufspace = 0; 506 dirtybufspacehw = 0; 507 508 lodirtybufspace = hidirtybufspace / 2; 509 510 /* 511 * Maximum number of async ops initiated per buf_daemon loop. This is 512 * somewhat of a hack at the moment, we really need to limit ourselves 513 * based on the number of bytes of I/O in-transit that were initiated 514 * from buf_daemon. 515 */ 516 517 bogus_offset = kmem_alloc_pageable(&kernel_map, PAGE_SIZE); 518 bogus_page = vm_page_alloc(&kernel_object, 519 (bogus_offset >> PAGE_SHIFT), 520 VM_ALLOC_NORMAL); 521 vmstats.v_wire_count++; 522 523 } 524 525 /* 526 * Initialize the embedded bio structures 527 */ 528 void 529 initbufbio(struct buf *bp) 530 { 531 bp->b_bio1.bio_buf = bp; 532 bp->b_bio1.bio_prev = NULL; 533 bp->b_bio1.bio_offset = NOOFFSET; 534 bp->b_bio1.bio_next = &bp->b_bio2; 535 bp->b_bio1.bio_done = NULL; 536 537 bp->b_bio2.bio_buf = bp; 538 bp->b_bio2.bio_prev = &bp->b_bio1; 539 bp->b_bio2.bio_offset = NOOFFSET; 540 bp->b_bio2.bio_next = NULL; 541 bp->b_bio2.bio_done = NULL; 542 } 543 544 /* 545 * Reinitialize the embedded bio structures as well as any additional 546 * translation cache layers. 547 */ 548 void 549 reinitbufbio(struct buf *bp) 550 { 551 struct bio *bio; 552 553 for (bio = &bp->b_bio1; bio; bio = bio->bio_next) { 554 bio->bio_done = NULL; 555 bio->bio_offset = NOOFFSET; 556 } 557 } 558 559 /* 560 * Push another BIO layer onto an existing BIO and return it. The new 561 * BIO layer may already exist, holding cached translation data. 562 */ 563 struct bio * 564 push_bio(struct bio *bio) 565 { 566 struct bio *nbio; 567 568 if ((nbio = bio->bio_next) == NULL) { 569 int index = bio - &bio->bio_buf->b_bio_array[0]; 570 if (index >= NBUF_BIO - 1) { 571 panic("push_bio: too many layers bp %p\n", 572 bio->bio_buf); 573 } 574 nbio = &bio->bio_buf->b_bio_array[index + 1]; 575 bio->bio_next = nbio; 576 nbio->bio_prev = bio; 577 nbio->bio_buf = bio->bio_buf; 578 nbio->bio_offset = NOOFFSET; 579 nbio->bio_done = NULL; 580 nbio->bio_next = NULL; 581 } 582 KKASSERT(nbio->bio_done == NULL); 583 return(nbio); 584 } 585 586 /* 587 * Pop a BIO translation layer, returning the previous layer. The 588 * must have been previously pushed. 589 */ 590 struct bio * 591 pop_bio(struct bio *bio) 592 { 593 return(bio->bio_prev); 594 } 595 596 void 597 clearbiocache(struct bio *bio) 598 { 599 while (bio) { 600 bio->bio_offset = NOOFFSET; 601 bio = bio->bio_next; 602 } 603 } 604 605 /* 606 * bfreekva: 607 * 608 * Free the KVA allocation for buffer 'bp'. 609 * 610 * Must be called from a critical section as this is the only locking for 611 * buffer_map. 612 * 613 * Since this call frees up buffer space, we call bufspacewakeup(). 614 */ 615 static void 616 bfreekva(struct buf *bp) 617 { 618 int count; 619 620 if (bp->b_kvasize) { 621 ++buffreekvacnt; 622 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 623 vm_map_lock(&buffer_map); 624 bufspace -= bp->b_kvasize; 625 vm_map_delete(&buffer_map, 626 (vm_offset_t) bp->b_kvabase, 627 (vm_offset_t) bp->b_kvabase + bp->b_kvasize, 628 &count 629 ); 630 vm_map_unlock(&buffer_map); 631 vm_map_entry_release(count); 632 bp->b_kvasize = 0; 633 bufspacewakeup(); 634 } 635 } 636 637 /* 638 * bremfree: 639 * 640 * Remove the buffer from the appropriate free list. 641 */ 642 void 643 bremfree(struct buf *bp) 644 { 645 crit_enter(); 646 647 if (bp->b_qindex != BQUEUE_NONE) { 648 KASSERT(BUF_REFCNTNB(bp) == 1, 649 ("bremfree: bp %p not locked",bp)); 650 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 651 bp->b_qindex = BQUEUE_NONE; 652 } else { 653 if (BUF_REFCNTNB(bp) <= 1) 654 panic("bremfree: removing a buffer not on a queue"); 655 } 656 657 crit_exit(); 658 } 659 660 661 /* 662 * bread: 663 * 664 * Get a buffer with the specified data. Look in the cache first. We 665 * must clear B_ERROR and B_INVAL prior to initiating I/O. If B_CACHE 666 * is set, the buffer is valid and we do not have to do anything ( see 667 * getblk() ). 668 */ 669 int 670 bread(struct vnode *vp, off_t loffset, int size, struct buf **bpp) 671 { 672 struct buf *bp; 673 674 bp = getblk(vp, loffset, size, 0, 0); 675 *bpp = bp; 676 677 /* if not found in cache, do some I/O */ 678 if ((bp->b_flags & B_CACHE) == 0) { 679 KASSERT(!(bp->b_flags & B_ASYNC), 680 ("bread: illegal async bp %p", bp)); 681 bp->b_flags &= ~(B_ERROR | B_INVAL); 682 bp->b_cmd = BUF_CMD_READ; 683 vfs_busy_pages(vp, bp); 684 vn_strategy(vp, &bp->b_bio1); 685 return (biowait(bp)); 686 } 687 return (0); 688 } 689 690 /* 691 * breadn: 692 * 693 * Operates like bread, but also starts asynchronous I/O on 694 * read-ahead blocks. We must clear B_ERROR and B_INVAL prior 695 * to initiating I/O . If B_CACHE is set, the buffer is valid 696 * and we do not have to do anything. 697 */ 698 int 699 breadn(struct vnode *vp, off_t loffset, int size, off_t *raoffset, 700 int *rabsize, int cnt, struct buf **bpp) 701 { 702 struct buf *bp, *rabp; 703 int i; 704 int rv = 0, readwait = 0; 705 706 *bpp = bp = getblk(vp, loffset, size, 0, 0); 707 708 /* if not found in cache, do some I/O */ 709 if ((bp->b_flags & B_CACHE) == 0) { 710 bp->b_flags &= ~(B_ERROR | B_INVAL); 711 bp->b_cmd = BUF_CMD_READ; 712 vfs_busy_pages(vp, bp); 713 vn_strategy(vp, &bp->b_bio1); 714 ++readwait; 715 } 716 717 for (i = 0; i < cnt; i++, raoffset++, rabsize++) { 718 if (inmem(vp, *raoffset)) 719 continue; 720 rabp = getblk(vp, *raoffset, *rabsize, 0, 0); 721 722 if ((rabp->b_flags & B_CACHE) == 0) { 723 rabp->b_flags |= B_ASYNC; 724 rabp->b_flags &= ~(B_ERROR | B_INVAL); 725 rabp->b_cmd = BUF_CMD_READ; 726 vfs_busy_pages(vp, rabp); 727 BUF_KERNPROC(rabp); 728 vn_strategy(vp, &rabp->b_bio1); 729 } else { 730 brelse(rabp); 731 } 732 } 733 734 if (readwait) { 735 rv = biowait(bp); 736 } 737 return (rv); 738 } 739 740 /* 741 * bwrite: 742 * 743 * Write, release buffer on completion. (Done by iodone 744 * if async). Do not bother writing anything if the buffer 745 * is invalid. 746 * 747 * Note that we set B_CACHE here, indicating that buffer is 748 * fully valid and thus cacheable. This is true even of NFS 749 * now so we set it generally. This could be set either here 750 * or in biodone() since the I/O is synchronous. We put it 751 * here. 752 */ 753 int 754 bwrite(struct buf *bp) 755 { 756 int oldflags; 757 758 if (bp->b_flags & B_INVAL) { 759 brelse(bp); 760 return (0); 761 } 762 763 oldflags = bp->b_flags; 764 765 if (BUF_REFCNTNB(bp) == 0) 766 panic("bwrite: buffer is not busy???"); 767 crit_enter(); 768 769 /* Mark the buffer clean */ 770 bundirty(bp); 771 772 bp->b_flags &= ~B_ERROR; 773 bp->b_flags |= B_CACHE; 774 bp->b_cmd = BUF_CMD_WRITE; 775 vfs_busy_pages(bp->b_vp, bp); 776 777 /* 778 * Normal bwrites pipeline writes. NOTE: b_bufsize is only 779 * valid for vnode-backed buffers. 780 */ 781 bp->b_runningbufspace = bp->b_bufsize; 782 if (bp->b_runningbufspace) { 783 runningbufspace += bp->b_runningbufspace; 784 ++runningbufcount; 785 } 786 787 crit_exit(); 788 if (oldflags & B_ASYNC) 789 BUF_KERNPROC(bp); 790 vn_strategy(bp->b_vp, &bp->b_bio1); 791 792 if ((oldflags & B_ASYNC) == 0) { 793 int rtval = biowait(bp); 794 brelse(bp); 795 return (rtval); 796 } 797 return (0); 798 } 799 800 /* 801 * bdwrite: 802 * 803 * Delayed write. (Buffer is marked dirty). Do not bother writing 804 * anything if the buffer is marked invalid. 805 * 806 * Note that since the buffer must be completely valid, we can safely 807 * set B_CACHE. In fact, we have to set B_CACHE here rather then in 808 * biodone() in order to prevent getblk from writing the buffer 809 * out synchronously. 810 */ 811 void 812 bdwrite(struct buf *bp) 813 { 814 if (BUF_REFCNTNB(bp) == 0) 815 panic("bdwrite: buffer is not busy"); 816 817 if (bp->b_flags & B_INVAL) { 818 brelse(bp); 819 return; 820 } 821 bdirty(bp); 822 823 /* 824 * Set B_CACHE, indicating that the buffer is fully valid. This is 825 * true even of NFS now. 826 */ 827 bp->b_flags |= B_CACHE; 828 829 /* 830 * This bmap keeps the system from needing to do the bmap later, 831 * perhaps when the system is attempting to do a sync. Since it 832 * is likely that the indirect block -- or whatever other datastructure 833 * that the filesystem needs is still in memory now, it is a good 834 * thing to do this. Note also, that if the pageout daemon is 835 * requesting a sync -- there might not be enough memory to do 836 * the bmap then... So, this is important to do. 837 */ 838 if (bp->b_bio2.bio_offset == NOOFFSET) { 839 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset, 840 NULL, NULL, BUF_CMD_WRITE); 841 } 842 843 /* 844 * Set the *dirty* buffer range based upon the VM system dirty pages. 845 */ 846 vfs_setdirty(bp); 847 848 /* 849 * We need to do this here to satisfy the vnode_pager and the 850 * pageout daemon, so that it thinks that the pages have been 851 * "cleaned". Note that since the pages are in a delayed write 852 * buffer -- the VFS layer "will" see that the pages get written 853 * out on the next sync, or perhaps the cluster will be completed. 854 */ 855 vfs_clean_pages(bp); 856 bqrelse(bp); 857 858 /* 859 * note: we cannot initiate I/O from a bdwrite even if we wanted to, 860 * due to the softdep code. 861 */ 862 } 863 864 /* 865 * bdirty: 866 * 867 * Turn buffer into delayed write request by marking it B_DELWRI. 868 * B_RELBUF and B_NOCACHE must be cleared. 869 * 870 * We reassign the buffer to itself to properly update it in the 871 * dirty/clean lists. 872 * 873 * Must be called from a critical section. 874 * The buffer must be on BQUEUE_NONE. 875 */ 876 void 877 bdirty(struct buf *bp) 878 { 879 KASSERT(bp->b_qindex == BQUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); 880 if (bp->b_flags & B_NOCACHE) { 881 kprintf("bdirty: clearing B_NOCACHE on buf %p\n", bp); 882 bp->b_flags &= ~B_NOCACHE; 883 } 884 if (bp->b_flags & B_INVAL) { 885 kprintf("bdirty: warning, dirtying invalid buffer %p\n", bp); 886 } 887 bp->b_flags &= ~B_RELBUF; 888 889 if ((bp->b_flags & B_DELWRI) == 0) { 890 bp->b_flags |= B_DELWRI; 891 reassignbuf(bp); 892 ++dirtybufcount; 893 dirtybufspace += bp->b_bufsize; 894 if (bp->b_flags & B_HEAVY) { 895 ++dirtybufcounthw; 896 dirtybufspacehw += bp->b_bufsize; 897 } 898 bd_heatup(); 899 } 900 } 901 902 /* 903 * Set B_HEAVY, indicating that this is a heavy-weight buffer that 904 * needs to be flushed with a different buf_daemon thread to avoid 905 * deadlocks. B_HEAVY also imposes restrictions in getnewbuf(). 906 */ 907 void 908 bheavy(struct buf *bp) 909 { 910 if ((bp->b_flags & B_HEAVY) == 0) { 911 bp->b_flags |= B_HEAVY; 912 if (bp->b_flags & B_DELWRI) { 913 ++dirtybufcounthw; 914 dirtybufspacehw += bp->b_bufsize; 915 } 916 } 917 } 918 919 /* 920 * bundirty: 921 * 922 * Clear B_DELWRI for buffer. 923 * 924 * Must be called from a critical section. 925 * 926 * The buffer is typically on BQUEUE_NONE but there is one case in 927 * brelse() that calls this function after placing the buffer on 928 * a different queue. 929 */ 930 931 void 932 bundirty(struct buf *bp) 933 { 934 if (bp->b_flags & B_DELWRI) { 935 bp->b_flags &= ~B_DELWRI; 936 reassignbuf(bp); 937 --dirtybufcount; 938 dirtybufspace -= bp->b_bufsize; 939 if (bp->b_flags & B_HEAVY) { 940 --dirtybufcounthw; 941 dirtybufspacehw -= bp->b_bufsize; 942 } 943 bd_signal(bp->b_bufsize); 944 } 945 /* 946 * Since it is now being written, we can clear its deferred write flag. 947 */ 948 bp->b_flags &= ~B_DEFERRED; 949 } 950 951 /* 952 * bawrite: 953 * 954 * Asynchronous write. Start output on a buffer, but do not wait for 955 * it to complete. The buffer is released when the output completes. 956 * 957 * bwrite() ( or the VOP routine anyway ) is responsible for handling 958 * B_INVAL buffers. Not us. 959 */ 960 void 961 bawrite(struct buf *bp) 962 { 963 bp->b_flags |= B_ASYNC; 964 bwrite(bp); 965 } 966 967 /* 968 * bowrite: 969 * 970 * Ordered write. Start output on a buffer, and flag it so that the 971 * device will write it in the order it was queued. The buffer is 972 * released when the output completes. bwrite() ( or the VOP routine 973 * anyway ) is responsible for handling B_INVAL buffers. 974 */ 975 int 976 bowrite(struct buf *bp) 977 { 978 bp->b_flags |= B_ORDERED | B_ASYNC; 979 return (bwrite(bp)); 980 } 981 982 /* 983 * buf_dirty_count_severe: 984 * 985 * Return true if we have too many dirty buffers. 986 */ 987 int 988 buf_dirty_count_severe(void) 989 { 990 return (runningbufspace + dirtybufspace >= hidirtybufspace || 991 dirtybufcount >= nbuf / 2); 992 } 993 994 /* 995 * brelse: 996 * 997 * Release a busy buffer and, if requested, free its resources. The 998 * buffer will be stashed in the appropriate bufqueue[] allowing it 999 * to be accessed later as a cache entity or reused for other purposes. 1000 */ 1001 void 1002 brelse(struct buf *bp) 1003 { 1004 #ifdef INVARIANTS 1005 int saved_flags = bp->b_flags; 1006 #endif 1007 1008 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1009 1010 crit_enter(); 1011 1012 /* 1013 * If B_NOCACHE is set we are being asked to destroy the buffer and 1014 * its backing store. Clear B_DELWRI. 1015 * 1016 * B_NOCACHE is set in two cases: (1) when the caller really wants 1017 * to destroy the buffer and backing store and (2) when the caller 1018 * wants to destroy the buffer and backing store after a write 1019 * completes. 1020 */ 1021 if ((bp->b_flags & (B_NOCACHE|B_DELWRI)) == (B_NOCACHE|B_DELWRI)) { 1022 bundirty(bp); 1023 } 1024 1025 if ((bp->b_flags & (B_INVAL | B_DELWRI)) == B_DELWRI) { 1026 /* 1027 * A re-dirtied buffer is only subject to destruction 1028 * by B_INVAL. B_ERROR and B_NOCACHE are ignored. 1029 */ 1030 /* leave buffer intact */ 1031 } else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 1032 (bp->b_bufsize <= 0)) { 1033 /* 1034 * Either a failed read or we were asked to free or not 1035 * cache the buffer. This path is reached with B_DELWRI 1036 * set only if B_INVAL is already set. B_NOCACHE governs 1037 * backing store destruction. 1038 * 1039 * NOTE: HAMMER will set B_LOCKED in buf_deallocate if the 1040 * buffer cannot be immediately freed. 1041 */ 1042 bp->b_flags |= B_INVAL; 1043 if (LIST_FIRST(&bp->b_dep) != NULL) 1044 buf_deallocate(bp); 1045 if (bp->b_flags & B_DELWRI) { 1046 --dirtybufcount; 1047 dirtybufspace -= bp->b_bufsize; 1048 if (bp->b_flags & B_HEAVY) { 1049 --dirtybufcounthw; 1050 dirtybufspacehw -= bp->b_bufsize; 1051 } 1052 bd_signal(bp->b_bufsize); 1053 } 1054 bp->b_flags &= ~(B_DELWRI | B_CACHE); 1055 } 1056 1057 /* 1058 * We must clear B_RELBUF if B_DELWRI or B_LOCKED is set. 1059 * If vfs_vmio_release() is called with either bit set, the 1060 * underlying pages may wind up getting freed causing a previous 1061 * write (bdwrite()) to get 'lost' because pages associated with 1062 * a B_DELWRI bp are marked clean. Pages associated with a 1063 * B_LOCKED buffer may be mapped by the filesystem. 1064 * 1065 * If we want to release the buffer ourselves (rather then the 1066 * originator asking us to release it), give the originator a 1067 * chance to countermand the release by setting B_LOCKED. 1068 * 1069 * We still allow the B_INVAL case to call vfs_vmio_release(), even 1070 * if B_DELWRI is set. 1071 * 1072 * If B_DELWRI is not set we may have to set B_RELBUF if we are low 1073 * on pages to return pages to the VM page queues. 1074 */ 1075 if (bp->b_flags & (B_DELWRI | B_LOCKED)) { 1076 bp->b_flags &= ~B_RELBUF; 1077 } else if (vm_page_count_severe()) { 1078 if (LIST_FIRST(&bp->b_dep) != NULL) 1079 buf_deallocate(bp); /* can set B_LOCKED */ 1080 if (bp->b_flags & (B_DELWRI | B_LOCKED)) 1081 bp->b_flags &= ~B_RELBUF; 1082 else 1083 bp->b_flags |= B_RELBUF; 1084 } 1085 1086 /* 1087 * Make sure b_cmd is clear. It may have already been cleared by 1088 * biodone(). 1089 * 1090 * At this point destroying the buffer is governed by the B_INVAL 1091 * or B_RELBUF flags. 1092 */ 1093 bp->b_cmd = BUF_CMD_DONE; 1094 1095 /* 1096 * VMIO buffer rundown. Make sure the VM page array is restored 1097 * after an I/O may have replaces some of the pages with bogus pages 1098 * in order to not destroy dirty pages in a fill-in read. 1099 * 1100 * Note that due to the code above, if a buffer is marked B_DELWRI 1101 * then the B_RELBUF and B_NOCACHE bits will always be clear. 1102 * B_INVAL may still be set, however. 1103 * 1104 * For clean buffers, B_INVAL or B_RELBUF will destroy the buffer 1105 * but not the backing store. B_NOCACHE will destroy the backing 1106 * store. 1107 * 1108 * Note that dirty NFS buffers contain byte-granular write ranges 1109 * and should not be destroyed w/ B_INVAL even if the backing store 1110 * is left intact. 1111 */ 1112 if (bp->b_flags & B_VMIO) { 1113 /* 1114 * Rundown for VMIO buffers which are not dirty NFS buffers. 1115 */ 1116 int i, j, resid; 1117 vm_page_t m; 1118 off_t foff; 1119 vm_pindex_t poff; 1120 vm_object_t obj; 1121 struct vnode *vp; 1122 1123 vp = bp->b_vp; 1124 1125 /* 1126 * Get the base offset and length of the buffer. Note that 1127 * in the VMIO case if the buffer block size is not 1128 * page-aligned then b_data pointer may not be page-aligned. 1129 * But our b_xio.xio_pages array *IS* page aligned. 1130 * 1131 * block sizes less then DEV_BSIZE (usually 512) are not 1132 * supported due to the page granularity bits (m->valid, 1133 * m->dirty, etc...). 1134 * 1135 * See man buf(9) for more information 1136 */ 1137 1138 resid = bp->b_bufsize; 1139 foff = bp->b_loffset; 1140 1141 for (i = 0; i < bp->b_xio.xio_npages; i++) { 1142 m = bp->b_xio.xio_pages[i]; 1143 vm_page_flag_clear(m, PG_ZERO); 1144 /* 1145 * If we hit a bogus page, fixup *all* of them 1146 * now. Note that we left these pages wired 1147 * when we removed them so they had better exist, 1148 * and they cannot be ripped out from under us so 1149 * no critical section protection is necessary. 1150 */ 1151 if (m == bogus_page) { 1152 obj = vp->v_object; 1153 poff = OFF_TO_IDX(bp->b_loffset); 1154 1155 for (j = i; j < bp->b_xio.xio_npages; j++) { 1156 vm_page_t mtmp; 1157 1158 mtmp = bp->b_xio.xio_pages[j]; 1159 if (mtmp == bogus_page) { 1160 mtmp = vm_page_lookup(obj, poff + j); 1161 if (!mtmp) { 1162 panic("brelse: page missing"); 1163 } 1164 bp->b_xio.xio_pages[j] = mtmp; 1165 } 1166 } 1167 1168 if ((bp->b_flags & B_INVAL) == 0) { 1169 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 1170 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 1171 } 1172 m = bp->b_xio.xio_pages[i]; 1173 } 1174 1175 /* 1176 * Invalidate the backing store if B_NOCACHE is set 1177 * (e.g. used with vinvalbuf()). If this is NFS 1178 * we impose a requirement that the block size be 1179 * a multiple of PAGE_SIZE and create a temporary 1180 * hack to basically invalidate the whole page. The 1181 * problem is that NFS uses really odd buffer sizes 1182 * especially when tracking piecemeal writes and 1183 * it also vinvalbuf()'s a lot, which would result 1184 * in only partial page validation and invalidation 1185 * here. If the file page is mmap()'d, however, 1186 * all the valid bits get set so after we invalidate 1187 * here we would end up with weird m->valid values 1188 * like 0xfc. nfs_getpages() can't handle this so 1189 * we clear all the valid bits for the NFS case 1190 * instead of just some of them. 1191 * 1192 * The real bug is the VM system having to set m->valid 1193 * to VM_PAGE_BITS_ALL for faulted-in pages, which 1194 * itself is an artifact of the whole 512-byte 1195 * granular mess that exists to support odd block 1196 * sizes and UFS meta-data block sizes (e.g. 6144). 1197 * A complete rewrite is required. 1198 */ 1199 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 1200 int poffset = foff & PAGE_MASK; 1201 int presid; 1202 1203 presid = PAGE_SIZE - poffset; 1204 if (bp->b_vp->v_tag == VT_NFS && 1205 bp->b_vp->v_type == VREG) { 1206 ; /* entire page */ 1207 } else if (presid > resid) { 1208 presid = resid; 1209 } 1210 KASSERT(presid >= 0, ("brelse: extra page")); 1211 vm_page_set_invalid(m, poffset, presid); 1212 } 1213 resid -= PAGE_SIZE - (foff & PAGE_MASK); 1214 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 1215 } 1216 if (bp->b_flags & (B_INVAL | B_RELBUF)) 1217 vfs_vmio_release(bp); 1218 } else { 1219 /* 1220 * Rundown for non-VMIO buffers. 1221 */ 1222 if (bp->b_flags & (B_INVAL | B_RELBUF)) { 1223 #if 0 1224 if (bp->b_vp) 1225 kprintf("brelse bp %p %08x/%08x: Warning, caught and fixed brelvp bug\n", bp, saved_flags, bp->b_flags); 1226 #endif 1227 if (bp->b_bufsize) 1228 allocbuf(bp, 0); 1229 KKASSERT (LIST_FIRST(&bp->b_dep) == NULL); 1230 if (bp->b_vp) 1231 brelvp(bp); 1232 } 1233 } 1234 1235 if (bp->b_qindex != BQUEUE_NONE) 1236 panic("brelse: free buffer onto another queue???"); 1237 if (BUF_REFCNTNB(bp) > 1) { 1238 /* Temporary panic to verify exclusive locking */ 1239 /* This panic goes away when we allow shared refs */ 1240 panic("brelse: multiple refs"); 1241 /* do not release to free list */ 1242 BUF_UNLOCK(bp); 1243 crit_exit(); 1244 return; 1245 } 1246 1247 /* 1248 * Figure out the correct queue to place the cleaned up buffer on. 1249 * Buffers placed in the EMPTY or EMPTYKVA had better already be 1250 * disassociated from their vnode. 1251 */ 1252 if (bp->b_flags & B_LOCKED) { 1253 /* 1254 * Buffers that are locked are placed in the locked queue 1255 * immediately, regardless of their state. 1256 */ 1257 bp->b_qindex = BQUEUE_LOCKED; 1258 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_LOCKED], bp, b_freelist); 1259 } else if (bp->b_bufsize == 0) { 1260 /* 1261 * Buffers with no memory. Due to conditionals near the top 1262 * of brelse() such buffers should probably already be 1263 * marked B_INVAL and disassociated from their vnode. 1264 */ 1265 bp->b_flags |= B_INVAL; 1266 KASSERT(bp->b_vp == NULL, ("bp1 %p flags %08x/%08x vnode %p unexpectededly still associated!", bp, saved_flags, bp->b_flags, bp->b_vp)); 1267 KKASSERT((bp->b_flags & B_HASHED) == 0); 1268 if (bp->b_kvasize) { 1269 bp->b_qindex = BQUEUE_EMPTYKVA; 1270 } else { 1271 bp->b_qindex = BQUEUE_EMPTY; 1272 } 1273 TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist); 1274 } else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) { 1275 /* 1276 * Buffers with junk contents. Again these buffers had better 1277 * already be disassociated from their vnode. 1278 */ 1279 KASSERT(bp->b_vp == NULL, ("bp2 %p flags %08x/%08x vnode %p unexpectededly still associated!", bp, saved_flags, bp->b_flags, bp->b_vp)); 1280 KKASSERT((bp->b_flags & B_HASHED) == 0); 1281 bp->b_flags |= B_INVAL; 1282 bp->b_qindex = BQUEUE_CLEAN; 1283 TAILQ_INSERT_HEAD(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); 1284 } else { 1285 /* 1286 * Remaining buffers. These buffers are still associated with 1287 * their vnode. 1288 */ 1289 switch(bp->b_flags & (B_DELWRI|B_HEAVY)) { 1290 case B_DELWRI: 1291 bp->b_qindex = BQUEUE_DIRTY; 1292 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_DIRTY], bp, b_freelist); 1293 break; 1294 case B_DELWRI | B_HEAVY: 1295 bp->b_qindex = BQUEUE_DIRTY_HW; 1296 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_DIRTY_HW], bp, 1297 b_freelist); 1298 break; 1299 default: 1300 /* 1301 * NOTE: Buffers are always placed at the end of the 1302 * queue. If B_AGE is not set the buffer will cycle 1303 * through the queue twice. 1304 */ 1305 bp->b_qindex = BQUEUE_CLEAN; 1306 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); 1307 break; 1308 } 1309 } 1310 1311 /* 1312 * If B_INVAL, clear B_DELWRI. We've already placed the buffer 1313 * on the correct queue. 1314 */ 1315 if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) 1316 bundirty(bp); 1317 1318 /* 1319 * The bp is on an appropriate queue unless locked. If it is not 1320 * locked or dirty we can wakeup threads waiting for buffer space. 1321 * 1322 * We've already handled the B_INVAL case ( B_DELWRI will be clear 1323 * if B_INVAL is set ). 1324 */ 1325 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) 1326 bufcountwakeup(); 1327 1328 /* 1329 * Something we can maybe free or reuse 1330 */ 1331 if (bp->b_bufsize || bp->b_kvasize) 1332 bufspacewakeup(); 1333 1334 /* 1335 * Clean up temporary flags and unlock the buffer. 1336 */ 1337 bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_RELBUF | B_DIRECT); 1338 BUF_UNLOCK(bp); 1339 crit_exit(); 1340 } 1341 1342 /* 1343 * bqrelse: 1344 * 1345 * Release a buffer back to the appropriate queue but do not try to free 1346 * it. The buffer is expected to be used again soon. 1347 * 1348 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by 1349 * biodone() to requeue an async I/O on completion. It is also used when 1350 * known good buffers need to be requeued but we think we may need the data 1351 * again soon. 1352 * 1353 * XXX we should be able to leave the B_RELBUF hint set on completion. 1354 */ 1355 void 1356 bqrelse(struct buf *bp) 1357 { 1358 crit_enter(); 1359 1360 KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp)); 1361 1362 if (bp->b_qindex != BQUEUE_NONE) 1363 panic("bqrelse: free buffer onto another queue???"); 1364 if (BUF_REFCNTNB(bp) > 1) { 1365 /* do not release to free list */ 1366 panic("bqrelse: multiple refs"); 1367 BUF_UNLOCK(bp); 1368 crit_exit(); 1369 return; 1370 } 1371 if (bp->b_flags & B_LOCKED) { 1372 /* 1373 * Locked buffers are released to the locked queue. However, 1374 * if the buffer is dirty it will first go into the dirty 1375 * queue and later on after the I/O completes successfully it 1376 * will be released to the locked queue. 1377 */ 1378 bp->b_qindex = BQUEUE_LOCKED; 1379 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_LOCKED], bp, b_freelist); 1380 } else if (bp->b_flags & B_DELWRI) { 1381 bp->b_qindex = (bp->b_flags & B_HEAVY) ? 1382 BQUEUE_DIRTY_HW : BQUEUE_DIRTY; 1383 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist); 1384 } else if (vm_page_count_severe()) { 1385 /* 1386 * We are too low on memory, we have to try to free the 1387 * buffer (most importantly: the wired pages making up its 1388 * backing store) *now*. 1389 */ 1390 crit_exit(); 1391 brelse(bp); 1392 return; 1393 } else { 1394 bp->b_qindex = BQUEUE_CLEAN; 1395 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); 1396 } 1397 1398 if ((bp->b_flags & B_LOCKED) == 0 && 1399 ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0)) { 1400 bufcountwakeup(); 1401 } 1402 1403 /* 1404 * Something we can maybe free or reuse. 1405 */ 1406 if (bp->b_bufsize && !(bp->b_flags & B_DELWRI)) 1407 bufspacewakeup(); 1408 1409 /* 1410 * Final cleanup and unlock. Clear bits that are only used while a 1411 * buffer is actively locked. 1412 */ 1413 bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_RELBUF); 1414 BUF_UNLOCK(bp); 1415 crit_exit(); 1416 } 1417 1418 /* 1419 * vfs_vmio_release: 1420 * 1421 * Return backing pages held by the buffer 'bp' back to the VM system 1422 * if possible. The pages are freed if they are no longer valid or 1423 * attempt to free if it was used for direct I/O otherwise they are 1424 * sent to the page cache. 1425 * 1426 * Pages that were marked busy are left alone and skipped. 1427 * 1428 * The KVA mapping (b_data) for the underlying pages is removed by 1429 * this function. 1430 */ 1431 static void 1432 vfs_vmio_release(struct buf *bp) 1433 { 1434 int i; 1435 vm_page_t m; 1436 1437 crit_enter(); 1438 for (i = 0; i < bp->b_xio.xio_npages; i++) { 1439 m = bp->b_xio.xio_pages[i]; 1440 bp->b_xio.xio_pages[i] = NULL; 1441 /* 1442 * In order to keep page LRU ordering consistent, put 1443 * everything on the inactive queue. 1444 */ 1445 vm_page_unwire(m, 0); 1446 /* 1447 * We don't mess with busy pages, it is 1448 * the responsibility of the process that 1449 * busied the pages to deal with them. 1450 */ 1451 if ((m->flags & PG_BUSY) || (m->busy != 0)) 1452 continue; 1453 1454 if (m->wire_count == 0) { 1455 vm_page_flag_clear(m, PG_ZERO); 1456 /* 1457 * Might as well free the page if we can and it has 1458 * no valid data. We also free the page if the 1459 * buffer was used for direct I/O. 1460 */ 1461 if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && 1462 m->hold_count == 0) { 1463 vm_page_busy(m); 1464 vm_page_protect(m, VM_PROT_NONE); 1465 vm_page_free(m); 1466 } else if (bp->b_flags & B_DIRECT) { 1467 vm_page_try_to_free(m); 1468 } else if (vm_page_count_severe()) { 1469 vm_page_try_to_cache(m); 1470 } 1471 } 1472 } 1473 crit_exit(); 1474 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages); 1475 if (bp->b_bufsize) { 1476 bufspacewakeup(); 1477 bp->b_bufsize = 0; 1478 } 1479 bp->b_xio.xio_npages = 0; 1480 bp->b_flags &= ~B_VMIO; 1481 KKASSERT (LIST_FIRST(&bp->b_dep) == NULL); 1482 if (bp->b_vp) 1483 brelvp(bp); 1484 } 1485 1486 /* 1487 * vfs_bio_awrite: 1488 * 1489 * Implement clustered async writes for clearing out B_DELWRI buffers. 1490 * This is much better then the old way of writing only one buffer at 1491 * a time. Note that we may not be presented with the buffers in the 1492 * correct order, so we search for the cluster in both directions. 1493 * 1494 * The buffer is locked on call. 1495 */ 1496 int 1497 vfs_bio_awrite(struct buf *bp) 1498 { 1499 int i; 1500 int j; 1501 off_t loffset = bp->b_loffset; 1502 struct vnode *vp = bp->b_vp; 1503 int nbytes; 1504 struct buf *bpa; 1505 int nwritten; 1506 int size; 1507 1508 crit_enter(); 1509 /* 1510 * right now we support clustered writing only to regular files. If 1511 * we find a clusterable block we could be in the middle of a cluster 1512 * rather then at the beginning. 1513 * 1514 * NOTE: b_bio1 contains the logical loffset and is aliased 1515 * to b_loffset. b_bio2 contains the translated block number. 1516 */ 1517 if ((vp->v_type == VREG) && 1518 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 1519 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 1520 1521 size = vp->v_mount->mnt_stat.f_iosize; 1522 1523 for (i = size; i < MAXPHYS; i += size) { 1524 if ((bpa = findblk(vp, loffset + i)) && 1525 BUF_REFCNT(bpa) == 0 && 1526 ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) == 1527 (B_DELWRI | B_CLUSTEROK)) && 1528 (bpa->b_bufsize == size)) { 1529 if ((bpa->b_bio2.bio_offset == NOOFFSET) || 1530 (bpa->b_bio2.bio_offset != 1531 bp->b_bio2.bio_offset + i)) 1532 break; 1533 } else { 1534 break; 1535 } 1536 } 1537 for (j = size; i + j <= MAXPHYS && j <= loffset; j += size) { 1538 if ((bpa = findblk(vp, loffset - j)) && 1539 BUF_REFCNT(bpa) == 0 && 1540 ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) == 1541 (B_DELWRI | B_CLUSTEROK)) && 1542 (bpa->b_bufsize == size)) { 1543 if ((bpa->b_bio2.bio_offset == NOOFFSET) || 1544 (bpa->b_bio2.bio_offset != 1545 bp->b_bio2.bio_offset - j)) 1546 break; 1547 } else { 1548 break; 1549 } 1550 } 1551 j -= size; 1552 nbytes = (i + j); 1553 /* 1554 * this is a possible cluster write 1555 */ 1556 if (nbytes != size) { 1557 BUF_UNLOCK(bp); 1558 nwritten = cluster_wbuild(vp, size, 1559 loffset - j, nbytes); 1560 crit_exit(); 1561 return nwritten; 1562 } 1563 } 1564 1565 bremfree(bp); 1566 bp->b_flags |= B_ASYNC; 1567 1568 crit_exit(); 1569 /* 1570 * default (old) behavior, writing out only one block 1571 * 1572 * XXX returns b_bufsize instead of b_bcount for nwritten? 1573 */ 1574 nwritten = bp->b_bufsize; 1575 bwrite(bp); 1576 1577 return nwritten; 1578 } 1579 1580 /* 1581 * getnewbuf: 1582 * 1583 * Find and initialize a new buffer header, freeing up existing buffers 1584 * in the bufqueues as necessary. The new buffer is returned locked. 1585 * 1586 * Important: B_INVAL is not set. If the caller wishes to throw the 1587 * buffer away, the caller must set B_INVAL prior to calling brelse(). 1588 * 1589 * We block if: 1590 * We have insufficient buffer headers 1591 * We have insufficient buffer space 1592 * buffer_map is too fragmented ( space reservation fails ) 1593 * If we have to flush dirty buffers ( but we try to avoid this ) 1594 * 1595 * To avoid VFS layer recursion we do not flush dirty buffers ourselves. 1596 * Instead we ask the buf daemon to do it for us. We attempt to 1597 * avoid piecemeal wakeups of the pageout daemon. 1598 */ 1599 1600 static struct buf * 1601 getnewbuf(int blkflags, int slptimeo, int size, int maxsize) 1602 { 1603 struct buf *bp; 1604 struct buf *nbp; 1605 int defrag = 0; 1606 int nqindex; 1607 int slpflags = (blkflags & GETBLK_PCATCH) ? PCATCH : 0; 1608 static int flushingbufs; 1609 1610 /* 1611 * We can't afford to block since we might be holding a vnode lock, 1612 * which may prevent system daemons from running. We deal with 1613 * low-memory situations by proactively returning memory and running 1614 * async I/O rather then sync I/O. 1615 */ 1616 1617 ++getnewbufcalls; 1618 --getnewbufrestarts; 1619 restart: 1620 ++getnewbufrestarts; 1621 1622 /* 1623 * Setup for scan. If we do not have enough free buffers, 1624 * we setup a degenerate case that immediately fails. Note 1625 * that if we are specially marked process, we are allowed to 1626 * dip into our reserves. 1627 * 1628 * The scanning sequence is nominally: EMPTY->EMPTYKVA->CLEAN 1629 * 1630 * We start with EMPTYKVA. If the list is empty we backup to EMPTY. 1631 * However, there are a number of cases (defragging, reusing, ...) 1632 * where we cannot backup. 1633 */ 1634 nqindex = BQUEUE_EMPTYKVA; 1635 nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTYKVA]); 1636 1637 if (nbp == NULL) { 1638 /* 1639 * If no EMPTYKVA buffers and we are either 1640 * defragging or reusing, locate a CLEAN buffer 1641 * to free or reuse. If bufspace useage is low 1642 * skip this step so we can allocate a new buffer. 1643 */ 1644 if (defrag || bufspace >= lobufspace) { 1645 nqindex = BQUEUE_CLEAN; 1646 nbp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]); 1647 } 1648 1649 /* 1650 * If we could not find or were not allowed to reuse a 1651 * CLEAN buffer, check to see if it is ok to use an EMPTY 1652 * buffer. We can only use an EMPTY buffer if allocating 1653 * its KVA would not otherwise run us out of buffer space. 1654 */ 1655 if (nbp == NULL && defrag == 0 && 1656 bufspace + maxsize < hibufspace) { 1657 nqindex = BQUEUE_EMPTY; 1658 nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTY]); 1659 } 1660 } 1661 1662 /* 1663 * Run scan, possibly freeing data and/or kva mappings on the fly 1664 * depending. 1665 */ 1666 1667 while ((bp = nbp) != NULL) { 1668 int qindex = nqindex; 1669 1670 nbp = TAILQ_NEXT(bp, b_freelist); 1671 1672 /* 1673 * BQUEUE_CLEAN - B_AGE special case. If not set the bp 1674 * cycles through the queue twice before being selected. 1675 */ 1676 if (qindex == BQUEUE_CLEAN && 1677 (bp->b_flags & B_AGE) == 0 && nbp) { 1678 bp->b_flags |= B_AGE; 1679 TAILQ_REMOVE(&bufqueues[qindex], bp, b_freelist); 1680 TAILQ_INSERT_TAIL(&bufqueues[qindex], bp, b_freelist); 1681 continue; 1682 } 1683 1684 /* 1685 * Calculate next bp ( we can only use it if we do not block 1686 * or do other fancy things ). 1687 */ 1688 if (nbp == NULL) { 1689 switch(qindex) { 1690 case BQUEUE_EMPTY: 1691 nqindex = BQUEUE_EMPTYKVA; 1692 if ((nbp = TAILQ_FIRST(&bufqueues[BQUEUE_EMPTYKVA]))) 1693 break; 1694 /* fall through */ 1695 case BQUEUE_EMPTYKVA: 1696 nqindex = BQUEUE_CLEAN; 1697 if ((nbp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]))) 1698 break; 1699 /* fall through */ 1700 case BQUEUE_CLEAN: 1701 /* 1702 * nbp is NULL. 1703 */ 1704 break; 1705 } 1706 } 1707 1708 /* 1709 * Sanity Checks 1710 */ 1711 KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistent queue %d bp %p", qindex, bp)); 1712 1713 /* 1714 * Note: we no longer distinguish between VMIO and non-VMIO 1715 * buffers. 1716 */ 1717 1718 KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex)); 1719 1720 /* 1721 * If we are defragging then we need a buffer with 1722 * b_kvasize != 0. XXX this situation should no longer 1723 * occur, if defrag is non-zero the buffer's b_kvasize 1724 * should also be non-zero at this point. XXX 1725 */ 1726 if (defrag && bp->b_kvasize == 0) { 1727 kprintf("Warning: defrag empty buffer %p\n", bp); 1728 continue; 1729 } 1730 1731 /* 1732 * Start freeing the bp. This is somewhat involved. nbp 1733 * remains valid only for BQUEUE_EMPTY[KVA] bp's. Buffers 1734 * on the clean list must be disassociated from their 1735 * current vnode. Buffers on the empty[kva] lists have 1736 * already been disassociated. 1737 */ 1738 1739 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1740 kprintf("getnewbuf: warning, locked buf %p, race corrected\n", bp); 1741 tsleep(&bd_request, 0, "gnbxxx", hz / 100); 1742 goto restart; 1743 } 1744 if (bp->b_qindex != qindex) { 1745 kprintf("getnewbuf: warning, BUF_LOCK blocked unexpectedly on buf %p index %d->%d, race corrected\n", bp, qindex, bp->b_qindex); 1746 BUF_UNLOCK(bp); 1747 goto restart; 1748 } 1749 bremfree(bp); 1750 1751 /* 1752 * Dependancies must be handled before we disassociate the 1753 * vnode. 1754 * 1755 * NOTE: HAMMER will set B_LOCKED if the buffer cannot 1756 * be immediately disassociated. HAMMER then becomes 1757 * responsible for releasing the buffer. 1758 */ 1759 if (LIST_FIRST(&bp->b_dep) != NULL) { 1760 buf_deallocate(bp); 1761 if (bp->b_flags & B_LOCKED) { 1762 bqrelse(bp); 1763 goto restart; 1764 } 1765 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 1766 } 1767 1768 if (qindex == BQUEUE_CLEAN) { 1769 if (bp->b_flags & B_VMIO) { 1770 bp->b_flags &= ~B_ASYNC; 1771 vfs_vmio_release(bp); 1772 } 1773 if (bp->b_vp) 1774 brelvp(bp); 1775 } 1776 1777 /* 1778 * NOTE: nbp is now entirely invalid. We can only restart 1779 * the scan from this point on. 1780 * 1781 * Get the rest of the buffer freed up. b_kva* is still 1782 * valid after this operation. 1783 */ 1784 1785 KASSERT(bp->b_vp == NULL, ("bp3 %p flags %08x vnode %p qindex %d unexpectededly still associated!", bp, bp->b_flags, bp->b_vp, qindex)); 1786 KKASSERT((bp->b_flags & B_HASHED) == 0); 1787 1788 /* 1789 * critical section protection is not required when 1790 * scrapping a buffer's contents because it is already 1791 * wired. 1792 */ 1793 if (bp->b_bufsize) 1794 allocbuf(bp, 0); 1795 1796 bp->b_flags = B_BNOCLIP; 1797 bp->b_cmd = BUF_CMD_DONE; 1798 bp->b_vp = NULL; 1799 bp->b_error = 0; 1800 bp->b_resid = 0; 1801 bp->b_bcount = 0; 1802 bp->b_xio.xio_npages = 0; 1803 bp->b_dirtyoff = bp->b_dirtyend = 0; 1804 reinitbufbio(bp); 1805 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 1806 buf_dep_init(bp); 1807 if (blkflags & GETBLK_BHEAVY) 1808 bp->b_flags |= B_HEAVY; 1809 1810 /* 1811 * If we are defragging then free the buffer. 1812 */ 1813 if (defrag) { 1814 bp->b_flags |= B_INVAL; 1815 bfreekva(bp); 1816 brelse(bp); 1817 defrag = 0; 1818 goto restart; 1819 } 1820 1821 /* 1822 * If we are overcomitted then recover the buffer and its 1823 * KVM space. This occurs in rare situations when multiple 1824 * processes are blocked in getnewbuf() or allocbuf(). 1825 */ 1826 if (bufspace >= hibufspace) 1827 flushingbufs = 1; 1828 if (flushingbufs && bp->b_kvasize != 0) { 1829 bp->b_flags |= B_INVAL; 1830 bfreekva(bp); 1831 brelse(bp); 1832 goto restart; 1833 } 1834 if (bufspace < lobufspace) 1835 flushingbufs = 0; 1836 break; 1837 } 1838 1839 /* 1840 * If we exhausted our list, sleep as appropriate. We may have to 1841 * wakeup various daemons and write out some dirty buffers. 1842 * 1843 * Generally we are sleeping due to insufficient buffer space. 1844 */ 1845 1846 if (bp == NULL) { 1847 int flags; 1848 char *waitmsg; 1849 1850 if (defrag) { 1851 flags = VFS_BIO_NEED_BUFSPACE; 1852 waitmsg = "nbufkv"; 1853 } else if (bufspace >= hibufspace) { 1854 waitmsg = "nbufbs"; 1855 flags = VFS_BIO_NEED_BUFSPACE; 1856 } else { 1857 waitmsg = "newbuf"; 1858 flags = VFS_BIO_NEED_ANY; 1859 } 1860 1861 needsbuffer |= flags; 1862 bd_speedup(); /* heeeelp */ 1863 while (needsbuffer & flags) { 1864 if (tsleep(&needsbuffer, slpflags, waitmsg, slptimeo)) 1865 return (NULL); 1866 } 1867 } else { 1868 /* 1869 * We finally have a valid bp. We aren't quite out of the 1870 * woods, we still have to reserve kva space. In order 1871 * to keep fragmentation sane we only allocate kva in 1872 * BKVASIZE chunks. 1873 */ 1874 maxsize = (maxsize + BKVAMASK) & ~BKVAMASK; 1875 1876 if (maxsize != bp->b_kvasize) { 1877 vm_offset_t addr = 0; 1878 int count; 1879 1880 bfreekva(bp); 1881 1882 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 1883 vm_map_lock(&buffer_map); 1884 1885 if (vm_map_findspace(&buffer_map, 1886 vm_map_min(&buffer_map), maxsize, 1887 maxsize, &addr)) { 1888 /* 1889 * Uh oh. Buffer map is too fragmented. We 1890 * must defragment the map. 1891 */ 1892 vm_map_unlock(&buffer_map); 1893 vm_map_entry_release(count); 1894 ++bufdefragcnt; 1895 defrag = 1; 1896 bp->b_flags |= B_INVAL; 1897 brelse(bp); 1898 goto restart; 1899 } 1900 if (addr) { 1901 vm_map_insert(&buffer_map, &count, 1902 NULL, 0, 1903 addr, addr + maxsize, 1904 VM_MAPTYPE_NORMAL, 1905 VM_PROT_ALL, VM_PROT_ALL, 1906 MAP_NOFAULT); 1907 1908 bp->b_kvabase = (caddr_t) addr; 1909 bp->b_kvasize = maxsize; 1910 bufspace += bp->b_kvasize; 1911 ++bufreusecnt; 1912 } 1913 vm_map_unlock(&buffer_map); 1914 vm_map_entry_release(count); 1915 } 1916 bp->b_data = bp->b_kvabase; 1917 } 1918 return(bp); 1919 } 1920 1921 /* 1922 * This routine is called in an emergency to recover VM pages from the 1923 * buffer cache by cashing in clean buffers. The idea is to recover 1924 * enough pages to be able to satisfy a stuck bio_page_alloc(). 1925 */ 1926 static int 1927 recoverbufpages(void) 1928 { 1929 struct buf *bp; 1930 int bytes = 0; 1931 1932 ++recoverbufcalls; 1933 1934 while (bytes < MAXBSIZE) { 1935 bp = TAILQ_FIRST(&bufqueues[BQUEUE_CLEAN]); 1936 if (bp == NULL) 1937 break; 1938 1939 /* 1940 * BQUEUE_CLEAN - B_AGE special case. If not set the bp 1941 * cycles through the queue twice before being selected. 1942 */ 1943 if ((bp->b_flags & B_AGE) == 0 && TAILQ_NEXT(bp, b_freelist)) { 1944 bp->b_flags |= B_AGE; 1945 TAILQ_REMOVE(&bufqueues[BQUEUE_CLEAN], bp, b_freelist); 1946 TAILQ_INSERT_TAIL(&bufqueues[BQUEUE_CLEAN], 1947 bp, b_freelist); 1948 continue; 1949 } 1950 1951 /* 1952 * Sanity Checks 1953 */ 1954 KKASSERT(bp->b_qindex == BQUEUE_CLEAN); 1955 KKASSERT((bp->b_flags & B_DELWRI) == 0); 1956 1957 /* 1958 * Start freeing the bp. This is somewhat involved. 1959 * 1960 * Buffers on the clean list must be disassociated from 1961 * their current vnode 1962 */ 1963 1964 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1965 kprintf("recoverbufpages: warning, locked buf %p, race corrected\n", bp); 1966 tsleep(&bd_request, 0, "gnbxxx", hz / 100); 1967 continue; 1968 } 1969 if (bp->b_qindex != BQUEUE_CLEAN) { 1970 kprintf("recoverbufpages: warning, BUF_LOCK blocked unexpectedly on buf %p index %d, race corrected\n", bp, bp->b_qindex); 1971 BUF_UNLOCK(bp); 1972 continue; 1973 } 1974 bremfree(bp); 1975 1976 /* 1977 * Dependancies must be handled before we disassociate the 1978 * vnode. 1979 * 1980 * NOTE: HAMMER will set B_LOCKED if the buffer cannot 1981 * be immediately disassociated. HAMMER then becomes 1982 * responsible for releasing the buffer. 1983 */ 1984 if (LIST_FIRST(&bp->b_dep) != NULL) { 1985 buf_deallocate(bp); 1986 if (bp->b_flags & B_LOCKED) { 1987 bqrelse(bp); 1988 continue; 1989 } 1990 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 1991 } 1992 1993 bytes += bp->b_bufsize; 1994 1995 if (bp->b_flags & B_VMIO) { 1996 bp->b_flags &= ~B_ASYNC; 1997 bp->b_flags |= B_DIRECT; /* try to free pages */ 1998 vfs_vmio_release(bp); 1999 } 2000 if (bp->b_vp) 2001 brelvp(bp); 2002 2003 KKASSERT(bp->b_vp == NULL); 2004 KKASSERT((bp->b_flags & B_HASHED) == 0); 2005 2006 /* 2007 * critical section protection is not required when 2008 * scrapping a buffer's contents because it is already 2009 * wired. 2010 */ 2011 if (bp->b_bufsize) 2012 allocbuf(bp, 0); 2013 2014 bp->b_flags = B_BNOCLIP; 2015 bp->b_cmd = BUF_CMD_DONE; 2016 bp->b_vp = NULL; 2017 bp->b_error = 0; 2018 bp->b_resid = 0; 2019 bp->b_bcount = 0; 2020 bp->b_xio.xio_npages = 0; 2021 bp->b_dirtyoff = bp->b_dirtyend = 0; 2022 reinitbufbio(bp); 2023 KKASSERT(LIST_FIRST(&bp->b_dep) == NULL); 2024 buf_dep_init(bp); 2025 bp->b_flags |= B_INVAL; 2026 /* bfreekva(bp); */ 2027 brelse(bp); 2028 } 2029 return(bytes); 2030 } 2031 2032 /* 2033 * buf_daemon: 2034 * 2035 * Buffer flushing daemon. Buffers are normally flushed by the 2036 * update daemon but if it cannot keep up this process starts to 2037 * take the load in an attempt to prevent getnewbuf() from blocking. 2038 * 2039 * Once a flush is initiated it does not stop until the number 2040 * of buffers falls below lodirtybuffers, but we will wake up anyone 2041 * waiting at the mid-point. 2042 */ 2043 2044 static struct kproc_desc buf_kp = { 2045 "bufdaemon", 2046 buf_daemon, 2047 &bufdaemon_td 2048 }; 2049 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, 2050 kproc_start, &buf_kp) 2051 2052 static struct kproc_desc bufhw_kp = { 2053 "bufdaemon_hw", 2054 buf_daemon_hw, 2055 &bufdaemonhw_td 2056 }; 2057 SYSINIT(bufdaemon_hw, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, 2058 kproc_start, &bufhw_kp) 2059 2060 static void 2061 buf_daemon(void) 2062 { 2063 int limit; 2064 2065 /* 2066 * This process needs to be suspended prior to shutdown sync. 2067 */ 2068 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, 2069 bufdaemon_td, SHUTDOWN_PRI_LAST); 2070 curthread->td_flags |= TDF_SYSTHREAD; 2071 2072 /* 2073 * This process is allowed to take the buffer cache to the limit 2074 */ 2075 crit_enter(); 2076 2077 for (;;) { 2078 kproc_suspend_loop(); 2079 2080 /* 2081 * Do the flush. Limit the amount of in-transit I/O we 2082 * allow to build up, otherwise we would completely saturate 2083 * the I/O system. Wakeup any waiting processes before we 2084 * normally would so they can run in parallel with our drain. 2085 * 2086 * Our aggregate normal+HW lo water mark is lodirtybufspace, 2087 * but because we split the operation into two threads we 2088 * have to cut it in half for each thread. 2089 */ 2090 limit = lodirtybufspace / 2; 2091 waitrunningbufspace(limit); 2092 while (runningbufspace + dirtybufspace > limit || 2093 dirtybufcount - dirtybufcounthw >= nbuf / 2) { 2094 if (flushbufqueues(BQUEUE_DIRTY) == 0) 2095 break; 2096 waitrunningbufspace(limit); 2097 } 2098 2099 /* 2100 * We reached our low water mark, reset the 2101 * request and sleep until we are needed again. 2102 * The sleep is just so the suspend code works. 2103 */ 2104 spin_lock_wr(&needsbuffer_spin); 2105 if (bd_request == 0) { 2106 msleep(&bd_request, &needsbuffer_spin, 0, 2107 "psleep", hz); 2108 } 2109 bd_request = 0; 2110 spin_unlock_wr(&needsbuffer_spin); 2111 } 2112 } 2113 2114 static void 2115 buf_daemon_hw(void) 2116 { 2117 int limit; 2118 2119 /* 2120 * This process needs to be suspended prior to shutdown sync. 2121 */ 2122 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, 2123 bufdaemonhw_td, SHUTDOWN_PRI_LAST); 2124 curthread->td_flags |= TDF_SYSTHREAD; 2125 2126 /* 2127 * This process is allowed to take the buffer cache to the limit 2128 */ 2129 crit_enter(); 2130 2131 for (;;) { 2132 kproc_suspend_loop(); 2133 2134 /* 2135 * Do the flush. Limit the amount of in-transit I/O we 2136 * allow to build up, otherwise we would completely saturate 2137 * the I/O system. Wakeup any waiting processes before we 2138 * normally would so they can run in parallel with our drain. 2139 * 2140 * Our aggregate normal+HW lo water mark is lodirtybufspace, 2141 * but because we split the operation into two threads we 2142 * have to cut it in half for each thread. 2143 */ 2144 limit = lodirtybufspace / 2; 2145 waitrunningbufspace(limit); 2146 while (runningbufspace + dirtybufspacehw > limit || 2147 dirtybufcounthw >= nbuf / 2) { 2148 if (flushbufqueues(BQUEUE_DIRTY_HW) == 0) 2149 break; 2150 waitrunningbufspace(limit); 2151 } 2152 2153 /* 2154 * We reached our low water mark, reset the 2155 * request and sleep until we are needed again. 2156 * The sleep is just so the suspend code works. 2157 */ 2158 spin_lock_wr(&needsbuffer_spin); 2159 if (bd_request_hw == 0) { 2160 msleep(&bd_request_hw, &needsbuffer_spin, 0, 2161 "psleep", hz); 2162 } 2163 bd_request_hw = 0; 2164 spin_unlock_wr(&needsbuffer_spin); 2165 } 2166 } 2167 2168 /* 2169 * flushbufqueues: 2170 * 2171 * Try to flush a buffer in the dirty queue. We must be careful to 2172 * free up B_INVAL buffers instead of write them, which NFS is 2173 * particularly sensitive to. 2174 * 2175 * B_RELBUF may only be set by VFSs. We do set B_AGE to indicate 2176 * that we really want to try to get the buffer out and reuse it 2177 * due to the write load on the machine. 2178 */ 2179 2180 static int 2181 flushbufqueues(bufq_type_t q) 2182 { 2183 struct buf *bp; 2184 int r = 0; 2185 2186 bp = TAILQ_FIRST(&bufqueues[q]); 2187 while (bp) { 2188 KASSERT((bp->b_flags & B_DELWRI), 2189 ("unexpected clean buffer %p", bp)); 2190 2191 if (bp->b_flags & B_DELWRI) { 2192 if (bp->b_flags & B_INVAL) { 2193 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0) 2194 panic("flushbufqueues: locked buf"); 2195 bremfree(bp); 2196 brelse(bp); 2197 ++r; 2198 break; 2199 } 2200 if (LIST_FIRST(&bp->b_dep) != NULL && 2201 (bp->b_flags & B_DEFERRED) == 0 && 2202 buf_countdeps(bp, 0)) { 2203 TAILQ_REMOVE(&bufqueues[q], bp, b_freelist); 2204 TAILQ_INSERT_TAIL(&bufqueues[q], bp, 2205 b_freelist); 2206 bp->b_flags |= B_DEFERRED; 2207 bp = TAILQ_FIRST(&bufqueues[q]); 2208 continue; 2209 } 2210 2211 /* 2212 * Only write it out if we can successfully lock 2213 * it. If the buffer has a dependancy, 2214 * buf_checkwrite must also return 0 for us to 2215 * be able to initate the write. 2216 * 2217 * If the buffer is flagged B_ERROR it may be 2218 * requeued over and over again, we try to 2219 * avoid a live lock. 2220 */ 2221 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 2222 if (LIST_FIRST(&bp->b_dep) != NULL && 2223 buf_checkwrite(bp)) { 2224 bremfree(bp); 2225 brelse(bp); 2226 } else if (bp->b_flags & B_ERROR) { 2227 tsleep(bp, 0, "bioer", 1); 2228 bp->b_flags &= ~B_AGE; 2229 vfs_bio_awrite(bp); 2230 } else { 2231 bp->b_flags |= B_AGE; 2232 vfs_bio_awrite(bp); 2233 } 2234 ++r; 2235 break; 2236 } 2237 } 2238 bp = TAILQ_NEXT(bp, b_freelist); 2239 } 2240 return (r); 2241 } 2242 2243 /* 2244 * inmem: 2245 * 2246 * Returns true if no I/O is needed to access the associated VM object. 2247 * This is like findblk except it also hunts around in the VM system for 2248 * the data. 2249 * 2250 * Note that we ignore vm_page_free() races from interrupts against our 2251 * lookup, since if the caller is not protected our return value will not 2252 * be any more valid then otherwise once we exit the critical section. 2253 */ 2254 int 2255 inmem(struct vnode *vp, off_t loffset) 2256 { 2257 vm_object_t obj; 2258 vm_offset_t toff, tinc, size; 2259 vm_page_t m; 2260 2261 if (findblk(vp, loffset)) 2262 return 1; 2263 if (vp->v_mount == NULL) 2264 return 0; 2265 if ((obj = vp->v_object) == NULL) 2266 return 0; 2267 2268 size = PAGE_SIZE; 2269 if (size > vp->v_mount->mnt_stat.f_iosize) 2270 size = vp->v_mount->mnt_stat.f_iosize; 2271 2272 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 2273 m = vm_page_lookup(obj, OFF_TO_IDX(loffset + toff)); 2274 if (m == NULL) 2275 return 0; 2276 tinc = size; 2277 if (tinc > PAGE_SIZE - ((toff + loffset) & PAGE_MASK)) 2278 tinc = PAGE_SIZE - ((toff + loffset) & PAGE_MASK); 2279 if (vm_page_is_valid(m, 2280 (vm_offset_t) ((toff + loffset) & PAGE_MASK), tinc) == 0) 2281 return 0; 2282 } 2283 return 1; 2284 } 2285 2286 /* 2287 * vfs_setdirty: 2288 * 2289 * Sets the dirty range for a buffer based on the status of the dirty 2290 * bits in the pages comprising the buffer. 2291 * 2292 * The range is limited to the size of the buffer. 2293 * 2294 * This routine is primarily used by NFS, but is generalized for the 2295 * B_VMIO case. 2296 */ 2297 static void 2298 vfs_setdirty(struct buf *bp) 2299 { 2300 int i; 2301 vm_object_t object; 2302 2303 /* 2304 * Degenerate case - empty buffer 2305 */ 2306 2307 if (bp->b_bufsize == 0) 2308 return; 2309 2310 /* 2311 * We qualify the scan for modified pages on whether the 2312 * object has been flushed yet. The OBJ_WRITEABLE flag 2313 * is not cleared simply by protecting pages off. 2314 */ 2315 2316 if ((bp->b_flags & B_VMIO) == 0) 2317 return; 2318 2319 object = bp->b_xio.xio_pages[0]->object; 2320 2321 if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY)) 2322 kprintf("Warning: object %p writeable but not mightbedirty\n", object); 2323 if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY)) 2324 kprintf("Warning: object %p mightbedirty but not writeable\n", object); 2325 2326 if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) { 2327 vm_offset_t boffset; 2328 vm_offset_t eoffset; 2329 2330 /* 2331 * test the pages to see if they have been modified directly 2332 * by users through the VM system. 2333 */ 2334 for (i = 0; i < bp->b_xio.xio_npages; i++) { 2335 vm_page_flag_clear(bp->b_xio.xio_pages[i], PG_ZERO); 2336 vm_page_test_dirty(bp->b_xio.xio_pages[i]); 2337 } 2338 2339 /* 2340 * Calculate the encompassing dirty range, boffset and eoffset, 2341 * (eoffset - boffset) bytes. 2342 */ 2343 2344 for (i = 0; i < bp->b_xio.xio_npages; i++) { 2345 if (bp->b_xio.xio_pages[i]->dirty) 2346 break; 2347 } 2348 boffset = (i << PAGE_SHIFT) - (bp->b_loffset & PAGE_MASK); 2349 2350 for (i = bp->b_xio.xio_npages - 1; i >= 0; --i) { 2351 if (bp->b_xio.xio_pages[i]->dirty) { 2352 break; 2353 } 2354 } 2355 eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_loffset & PAGE_MASK); 2356 2357 /* 2358 * Fit it to the buffer. 2359 */ 2360 2361 if (eoffset > bp->b_bcount) 2362 eoffset = bp->b_bcount; 2363 2364 /* 2365 * If we have a good dirty range, merge with the existing 2366 * dirty range. 2367 */ 2368 2369 if (boffset < eoffset) { 2370 if (bp->b_dirtyoff > boffset) 2371 bp->b_dirtyoff = boffset; 2372 if (bp->b_dirtyend < eoffset) 2373 bp->b_dirtyend = eoffset; 2374 } 2375 } 2376 } 2377 2378 /* 2379 * findblk: 2380 * 2381 * Locate and return the specified buffer, or NULL if the buffer does 2382 * not exist. Do not attempt to lock the buffer or manipulate it in 2383 * any way. The caller must validate that the correct buffer has been 2384 * obtain after locking it. 2385 */ 2386 struct buf * 2387 findblk(struct vnode *vp, off_t loffset) 2388 { 2389 struct buf *bp; 2390 2391 crit_enter(); 2392 bp = buf_rb_hash_RB_LOOKUP(&vp->v_rbhash_tree, loffset); 2393 crit_exit(); 2394 return(bp); 2395 } 2396 2397 /* 2398 * getblk: 2399 * 2400 * Get a block given a specified block and offset into a file/device. 2401 * B_INVAL may or may not be set on return. The caller should clear 2402 * B_INVAL prior to initiating a READ. 2403 * 2404 * IT IS IMPORTANT TO UNDERSTAND THAT IF YOU CALL GETBLK() AND B_CACHE 2405 * IS NOT SET, YOU MUST INITIALIZE THE RETURNED BUFFER, ISSUE A READ, 2406 * OR SET B_INVAL BEFORE RETIRING IT. If you retire a getblk'd buffer 2407 * without doing any of those things the system will likely believe 2408 * the buffer to be valid (especially if it is not B_VMIO), and the 2409 * next getblk() will return the buffer with B_CACHE set. 2410 * 2411 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for 2412 * an existing buffer. 2413 * 2414 * For a VMIO buffer, B_CACHE is modified according to the backing VM. 2415 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set 2416 * and then cleared based on the backing VM. If the previous buffer is 2417 * non-0-sized but invalid, B_CACHE will be cleared. 2418 * 2419 * If getblk() must create a new buffer, the new buffer is returned with 2420 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which 2421 * case it is returned with B_INVAL clear and B_CACHE set based on the 2422 * backing VM. 2423 * 2424 * getblk() also forces a bwrite() for any B_DELWRI buffer whos 2425 * B_CACHE bit is clear. 2426 * 2427 * What this means, basically, is that the caller should use B_CACHE to 2428 * determine whether the buffer is fully valid or not and should clear 2429 * B_INVAL prior to issuing a read. If the caller intends to validate 2430 * the buffer by loading its data area with something, the caller needs 2431 * to clear B_INVAL. If the caller does this without issuing an I/O, 2432 * the caller should set B_CACHE ( as an optimization ), else the caller 2433 * should issue the I/O and biodone() will set B_CACHE if the I/O was 2434 * a write attempt or if it was a successfull read. If the caller 2435 * intends to issue a READ, the caller must clear B_INVAL and B_ERROR 2436 * prior to issuing the READ. biodone() will *not* clear B_INVAL. 2437 * 2438 * getblk flags: 2439 * 2440 * GETBLK_PCATCH - catch signal if blocked, can cause NULL return 2441 * GETBLK_BHEAVY - heavy-weight buffer cache buffer 2442 */ 2443 struct buf * 2444 getblk(struct vnode *vp, off_t loffset, int size, int blkflags, int slptimeo) 2445 { 2446 struct buf *bp; 2447 int slpflags = (blkflags & GETBLK_PCATCH) ? PCATCH : 0; 2448 int error; 2449 2450 if (size > MAXBSIZE) 2451 panic("getblk: size(%d) > MAXBSIZE(%d)", size, MAXBSIZE); 2452 if (vp->v_object == NULL) 2453 panic("getblk: vnode %p has no object!", vp); 2454 2455 crit_enter(); 2456 loop: 2457 if ((bp = findblk(vp, loffset))) { 2458 /* 2459 * The buffer was found in the cache, but we need to lock it. 2460 * Even with LK_NOWAIT the lockmgr may break our critical 2461 * section, so double-check the validity of the buffer 2462 * once the lock has been obtained. 2463 */ 2464 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 2465 if (blkflags & GETBLK_NOWAIT) { 2466 crit_exit(); 2467 return(NULL); 2468 } 2469 int lkflags = LK_EXCLUSIVE | LK_SLEEPFAIL; 2470 if (blkflags & GETBLK_PCATCH) 2471 lkflags |= LK_PCATCH; 2472 error = BUF_TIMELOCK(bp, lkflags, "getblk", slptimeo); 2473 if (error) { 2474 if (error == ENOLCK) 2475 goto loop; 2476 crit_exit(); 2477 return (NULL); 2478 } 2479 } 2480 2481 /* 2482 * Once the buffer has been locked, make sure we didn't race 2483 * a buffer recyclement. Buffers that are no longer hashed 2484 * will have b_vp == NULL, so this takes care of that check 2485 * as well. 2486 */ 2487 if (bp->b_vp != vp || bp->b_loffset != loffset) { 2488 kprintf("Warning buffer %p (vp %p loffset %lld) was recycled\n", bp, vp, loffset); 2489 BUF_UNLOCK(bp); 2490 goto loop; 2491 } 2492 2493 /* 2494 * If SZMATCH any pre-existing buffer must be of the requested 2495 * size or NULL is returned. The caller absolutely does not 2496 * want getblk() to bwrite() the buffer on a size mismatch. 2497 */ 2498 if ((blkflags & GETBLK_SZMATCH) && size != bp->b_bcount) { 2499 BUF_UNLOCK(bp); 2500 crit_exit(); 2501 return(NULL); 2502 } 2503 2504 /* 2505 * All vnode-based buffers must be backed by a VM object. 2506 */ 2507 KKASSERT(bp->b_flags & B_VMIO); 2508 KKASSERT(bp->b_cmd == BUF_CMD_DONE); 2509 bp->b_flags &= ~B_AGE; 2510 2511 /* 2512 * Make sure that B_INVAL buffers do not have a cached 2513 * block number translation. 2514 */ 2515 if ((bp->b_flags & B_INVAL) && (bp->b_bio2.bio_offset != NOOFFSET)) { 2516 kprintf("Warning invalid buffer %p (vp %p loffset %lld) did not have cleared bio_offset cache\n", bp, vp, loffset); 2517 clearbiocache(&bp->b_bio2); 2518 } 2519 2520 /* 2521 * The buffer is locked. B_CACHE is cleared if the buffer is 2522 * invalid. 2523 */ 2524 if (bp->b_flags & B_INVAL) 2525 bp->b_flags &= ~B_CACHE; 2526 bremfree(bp); 2527 2528 /* 2529 * Any size inconsistancy with a dirty buffer or a buffer 2530 * with a softupdates dependancy must be resolved. Resizing 2531 * the buffer in such circumstances can lead to problems. 2532 */ 2533 if (size != bp->b_bcount) { 2534 if (bp->b_flags & B_DELWRI) { 2535 bp->b_flags |= B_NOCACHE; 2536 bwrite(bp); 2537 } else if (LIST_FIRST(&bp->b_dep)) { 2538 bp->b_flags |= B_NOCACHE; 2539 bwrite(bp); 2540 } else { 2541 bp->b_flags |= B_RELBUF; 2542 brelse(bp); 2543 } 2544 goto loop; 2545 } 2546 KKASSERT(size <= bp->b_kvasize); 2547 KASSERT(bp->b_loffset != NOOFFSET, 2548 ("getblk: no buffer offset")); 2549 2550 /* 2551 * A buffer with B_DELWRI set and B_CACHE clear must 2552 * be committed before we can return the buffer in 2553 * order to prevent the caller from issuing a read 2554 * ( due to B_CACHE not being set ) and overwriting 2555 * it. 2556 * 2557 * Most callers, including NFS and FFS, need this to 2558 * operate properly either because they assume they 2559 * can issue a read if B_CACHE is not set, or because 2560 * ( for example ) an uncached B_DELWRI might loop due 2561 * to softupdates re-dirtying the buffer. In the latter 2562 * case, B_CACHE is set after the first write completes, 2563 * preventing further loops. 2564 * 2565 * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE 2566 * above while extending the buffer, we cannot allow the 2567 * buffer to remain with B_CACHE set after the write 2568 * completes or it will represent a corrupt state. To 2569 * deal with this we set B_NOCACHE to scrap the buffer 2570 * after the write. 2571 * 2572 * We might be able to do something fancy, like setting 2573 * B_CACHE in bwrite() except if B_DELWRI is already set, 2574 * so the below call doesn't set B_CACHE, but that gets real 2575 * confusing. This is much easier. 2576 */ 2577 2578 if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) { 2579 bp->b_flags |= B_NOCACHE; 2580 bwrite(bp); 2581 goto loop; 2582 } 2583 crit_exit(); 2584 } else { 2585 /* 2586 * Buffer is not in-core, create new buffer. The buffer 2587 * returned by getnewbuf() is locked. Note that the returned 2588 * buffer is also considered valid (not marked B_INVAL). 2589 * 2590 * Calculating the offset for the I/O requires figuring out 2591 * the block size. We use DEV_BSIZE for VBLK or VCHR and 2592 * the mount's f_iosize otherwise. If the vnode does not 2593 * have an associated mount we assume that the passed size is 2594 * the block size. 2595 * 2596 * Note that vn_isdisk() cannot be used here since it may 2597 * return a failure for numerous reasons. Note that the 2598 * buffer size may be larger then the block size (the caller 2599 * will use block numbers with the proper multiple). Beware 2600 * of using any v_* fields which are part of unions. In 2601 * particular, in DragonFly the mount point overloading 2602 * mechanism uses the namecache only and the underlying 2603 * directory vnode is not a special case. 2604 */ 2605 int bsize, maxsize; 2606 2607 if (vp->v_type == VBLK || vp->v_type == VCHR) 2608 bsize = DEV_BSIZE; 2609 else if (vp->v_mount) 2610 bsize = vp->v_mount->mnt_stat.f_iosize; 2611 else 2612 bsize = size; 2613 2614 maxsize = size + (loffset & PAGE_MASK); 2615 maxsize = imax(maxsize, bsize); 2616 2617 if ((bp = getnewbuf(blkflags, slptimeo, size, maxsize)) == NULL) { 2618 if (slpflags || slptimeo) { 2619 crit_exit(); 2620 return NULL; 2621 } 2622 goto loop; 2623 } 2624 2625 /* 2626 * This code is used to make sure that a buffer is not 2627 * created while the getnewbuf routine is blocked. 2628 * This can be a problem whether the vnode is locked or not. 2629 * If the buffer is created out from under us, we have to 2630 * throw away the one we just created. There is no window 2631 * race because we are safely running in a critical section 2632 * from the point of the duplicate buffer creation through 2633 * to here, and we've locked the buffer. 2634 */ 2635 if (findblk(vp, loffset)) { 2636 bp->b_flags |= B_INVAL; 2637 brelse(bp); 2638 goto loop; 2639 } 2640 2641 /* 2642 * Insert the buffer into the hash, so that it can 2643 * be found by findblk(). 2644 * 2645 * Make sure the translation layer has been cleared. 2646 */ 2647 bp->b_loffset = loffset; 2648 bp->b_bio2.bio_offset = NOOFFSET; 2649 /* bp->b_bio2.bio_next = NULL; */ 2650 2651 bgetvp(vp, bp); 2652 2653 /* 2654 * All vnode-based buffers must be backed by a VM object. 2655 */ 2656 KKASSERT(vp->v_object != NULL); 2657 bp->b_flags |= B_VMIO; 2658 KKASSERT(bp->b_cmd == BUF_CMD_DONE); 2659 2660 allocbuf(bp, size); 2661 2662 crit_exit(); 2663 } 2664 return (bp); 2665 } 2666 2667 /* 2668 * regetblk(bp) 2669 * 2670 * Reacquire a buffer that was previously released to the locked queue, 2671 * or reacquire a buffer which is interlocked by having bioops->io_deallocate 2672 * set B_LOCKED (which handles the acquisition race). 2673 * 2674 * To this end, either B_LOCKED must be set or the dependancy list must be 2675 * non-empty. 2676 */ 2677 void 2678 regetblk(struct buf *bp) 2679 { 2680 KKASSERT((bp->b_flags & B_LOCKED) || LIST_FIRST(&bp->b_dep) != NULL); 2681 BUF_LOCK(bp, LK_EXCLUSIVE | LK_RETRY); 2682 crit_enter(); 2683 bremfree(bp); 2684 crit_exit(); 2685 } 2686 2687 /* 2688 * geteblk: 2689 * 2690 * Get an empty, disassociated buffer of given size. The buffer is 2691 * initially set to B_INVAL. 2692 * 2693 * critical section protection is not required for the allocbuf() 2694 * call because races are impossible here. 2695 */ 2696 struct buf * 2697 geteblk(int size) 2698 { 2699 struct buf *bp; 2700 int maxsize; 2701 2702 maxsize = (size + BKVAMASK) & ~BKVAMASK; 2703 2704 crit_enter(); 2705 while ((bp = getnewbuf(0, 0, size, maxsize)) == 0) 2706 ; 2707 crit_exit(); 2708 allocbuf(bp, size); 2709 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 2710 return (bp); 2711 } 2712 2713 2714 /* 2715 * allocbuf: 2716 * 2717 * This code constitutes the buffer memory from either anonymous system 2718 * memory (in the case of non-VMIO operations) or from an associated 2719 * VM object (in the case of VMIO operations). This code is able to 2720 * resize a buffer up or down. 2721 * 2722 * Note that this code is tricky, and has many complications to resolve 2723 * deadlock or inconsistant data situations. Tread lightly!!! 2724 * There are B_CACHE and B_DELWRI interactions that must be dealt with by 2725 * the caller. Calling this code willy nilly can result in the loss of data. 2726 * 2727 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with 2728 * B_CACHE for the non-VMIO case. 2729 * 2730 * This routine does not need to be called from a critical section but you 2731 * must own the buffer. 2732 */ 2733 int 2734 allocbuf(struct buf *bp, int size) 2735 { 2736 int newbsize, mbsize; 2737 int i; 2738 2739 if (BUF_REFCNT(bp) == 0) 2740 panic("allocbuf: buffer not busy"); 2741 2742 if (bp->b_kvasize < size) 2743 panic("allocbuf: buffer too small"); 2744 2745 if ((bp->b_flags & B_VMIO) == 0) { 2746 caddr_t origbuf; 2747 int origbufsize; 2748 /* 2749 * Just get anonymous memory from the kernel. Don't 2750 * mess with B_CACHE. 2751 */ 2752 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2753 if (bp->b_flags & B_MALLOC) 2754 newbsize = mbsize; 2755 else 2756 newbsize = round_page(size); 2757 2758 if (newbsize < bp->b_bufsize) { 2759 /* 2760 * Malloced buffers are not shrunk 2761 */ 2762 if (bp->b_flags & B_MALLOC) { 2763 if (newbsize) { 2764 bp->b_bcount = size; 2765 } else { 2766 kfree(bp->b_data, M_BIOBUF); 2767 if (bp->b_bufsize) { 2768 bufmallocspace -= bp->b_bufsize; 2769 bufspacewakeup(); 2770 bp->b_bufsize = 0; 2771 } 2772 bp->b_data = bp->b_kvabase; 2773 bp->b_bcount = 0; 2774 bp->b_flags &= ~B_MALLOC; 2775 } 2776 return 1; 2777 } 2778 vm_hold_free_pages( 2779 bp, 2780 (vm_offset_t) bp->b_data + newbsize, 2781 (vm_offset_t) bp->b_data + bp->b_bufsize); 2782 } else if (newbsize > bp->b_bufsize) { 2783 /* 2784 * We only use malloced memory on the first allocation. 2785 * and revert to page-allocated memory when the buffer 2786 * grows. 2787 */ 2788 if ((bufmallocspace < maxbufmallocspace) && 2789 (bp->b_bufsize == 0) && 2790 (mbsize <= PAGE_SIZE/2)) { 2791 2792 bp->b_data = kmalloc(mbsize, M_BIOBUF, M_WAITOK); 2793 bp->b_bufsize = mbsize; 2794 bp->b_bcount = size; 2795 bp->b_flags |= B_MALLOC; 2796 bufmallocspace += mbsize; 2797 return 1; 2798 } 2799 origbuf = NULL; 2800 origbufsize = 0; 2801 /* 2802 * If the buffer is growing on its other-than-first 2803 * allocation, then we revert to the page-allocation 2804 * scheme. 2805 */ 2806 if (bp->b_flags & B_MALLOC) { 2807 origbuf = bp->b_data; 2808 origbufsize = bp->b_bufsize; 2809 bp->b_data = bp->b_kvabase; 2810 if (bp->b_bufsize) { 2811 bufmallocspace -= bp->b_bufsize; 2812 bufspacewakeup(); 2813 bp->b_bufsize = 0; 2814 } 2815 bp->b_flags &= ~B_MALLOC; 2816 newbsize = round_page(newbsize); 2817 } 2818 vm_hold_load_pages( 2819 bp, 2820 (vm_offset_t) bp->b_data + bp->b_bufsize, 2821 (vm_offset_t) bp->b_data + newbsize); 2822 if (origbuf) { 2823 bcopy(origbuf, bp->b_data, origbufsize); 2824 kfree(origbuf, M_BIOBUF); 2825 } 2826 } 2827 } else { 2828 vm_page_t m; 2829 int desiredpages; 2830 2831 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 2832 desiredpages = ((int)(bp->b_loffset & PAGE_MASK) + 2833 newbsize + PAGE_MASK) >> PAGE_SHIFT; 2834 KKASSERT(desiredpages <= XIO_INTERNAL_PAGES); 2835 2836 if (bp->b_flags & B_MALLOC) 2837 panic("allocbuf: VMIO buffer can't be malloced"); 2838 /* 2839 * Set B_CACHE initially if buffer is 0 length or will become 2840 * 0-length. 2841 */ 2842 if (size == 0 || bp->b_bufsize == 0) 2843 bp->b_flags |= B_CACHE; 2844 2845 if (newbsize < bp->b_bufsize) { 2846 /* 2847 * DEV_BSIZE aligned new buffer size is less then the 2848 * DEV_BSIZE aligned existing buffer size. Figure out 2849 * if we have to remove any pages. 2850 */ 2851 if (desiredpages < bp->b_xio.xio_npages) { 2852 for (i = desiredpages; i < bp->b_xio.xio_npages; i++) { 2853 /* 2854 * the page is not freed here -- it 2855 * is the responsibility of 2856 * vnode_pager_setsize 2857 */ 2858 m = bp->b_xio.xio_pages[i]; 2859 KASSERT(m != bogus_page, 2860 ("allocbuf: bogus page found")); 2861 while (vm_page_sleep_busy(m, TRUE, "biodep")) 2862 ; 2863 2864 bp->b_xio.xio_pages[i] = NULL; 2865 vm_page_unwire(m, 0); 2866 } 2867 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + 2868 (desiredpages << PAGE_SHIFT), (bp->b_xio.xio_npages - desiredpages)); 2869 bp->b_xio.xio_npages = desiredpages; 2870 } 2871 } else if (size > bp->b_bcount) { 2872 /* 2873 * We are growing the buffer, possibly in a 2874 * byte-granular fashion. 2875 */ 2876 struct vnode *vp; 2877 vm_object_t obj; 2878 vm_offset_t toff; 2879 vm_offset_t tinc; 2880 2881 /* 2882 * Step 1, bring in the VM pages from the object, 2883 * allocating them if necessary. We must clear 2884 * B_CACHE if these pages are not valid for the 2885 * range covered by the buffer. 2886 * 2887 * critical section protection is required to protect 2888 * against interrupts unbusying and freeing pages 2889 * between our vm_page_lookup() and our 2890 * busycheck/wiring call. 2891 */ 2892 vp = bp->b_vp; 2893 obj = vp->v_object; 2894 2895 crit_enter(); 2896 while (bp->b_xio.xio_npages < desiredpages) { 2897 vm_page_t m; 2898 vm_pindex_t pi; 2899 2900 pi = OFF_TO_IDX(bp->b_loffset) + bp->b_xio.xio_npages; 2901 if ((m = vm_page_lookup(obj, pi)) == NULL) { 2902 /* 2903 * note: must allocate system pages 2904 * since blocking here could intefere 2905 * with paging I/O, no matter which 2906 * process we are. 2907 */ 2908 m = bio_page_alloc(obj, pi, desiredpages - bp->b_xio.xio_npages); 2909 if (m) { 2910 vm_page_wire(m); 2911 vm_page_wakeup(m); 2912 bp->b_flags &= ~B_CACHE; 2913 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; 2914 ++bp->b_xio.xio_npages; 2915 } 2916 continue; 2917 } 2918 2919 /* 2920 * We found a page. If we have to sleep on it, 2921 * retry because it might have gotten freed out 2922 * from under us. 2923 * 2924 * We can only test PG_BUSY here. Blocking on 2925 * m->busy might lead to a deadlock: 2926 * 2927 * vm_fault->getpages->cluster_read->allocbuf 2928 * 2929 */ 2930 2931 if (vm_page_sleep_busy(m, FALSE, "pgtblk")) 2932 continue; 2933 vm_page_flag_clear(m, PG_ZERO); 2934 vm_page_wire(m); 2935 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; 2936 ++bp->b_xio.xio_npages; 2937 } 2938 crit_exit(); 2939 2940 /* 2941 * Step 2. We've loaded the pages into the buffer, 2942 * we have to figure out if we can still have B_CACHE 2943 * set. Note that B_CACHE is set according to the 2944 * byte-granular range ( bcount and size ), not the 2945 * aligned range ( newbsize ). 2946 * 2947 * The VM test is against m->valid, which is DEV_BSIZE 2948 * aligned. Needless to say, the validity of the data 2949 * needs to also be DEV_BSIZE aligned. Note that this 2950 * fails with NFS if the server or some other client 2951 * extends the file's EOF. If our buffer is resized, 2952 * B_CACHE may remain set! XXX 2953 */ 2954 2955 toff = bp->b_bcount; 2956 tinc = PAGE_SIZE - ((bp->b_loffset + toff) & PAGE_MASK); 2957 2958 while ((bp->b_flags & B_CACHE) && toff < size) { 2959 vm_pindex_t pi; 2960 2961 if (tinc > (size - toff)) 2962 tinc = size - toff; 2963 2964 pi = ((bp->b_loffset & PAGE_MASK) + toff) >> 2965 PAGE_SHIFT; 2966 2967 vfs_buf_test_cache( 2968 bp, 2969 bp->b_loffset, 2970 toff, 2971 tinc, 2972 bp->b_xio.xio_pages[pi] 2973 ); 2974 toff += tinc; 2975 tinc = PAGE_SIZE; 2976 } 2977 2978 /* 2979 * Step 3, fixup the KVM pmap. Remember that 2980 * bp->b_data is relative to bp->b_loffset, but 2981 * bp->b_loffset may be offset into the first page. 2982 */ 2983 2984 bp->b_data = (caddr_t) 2985 trunc_page((vm_offset_t)bp->b_data); 2986 pmap_qenter( 2987 (vm_offset_t)bp->b_data, 2988 bp->b_xio.xio_pages, 2989 bp->b_xio.xio_npages 2990 ); 2991 bp->b_data = (caddr_t)((vm_offset_t)bp->b_data | 2992 (vm_offset_t)(bp->b_loffset & PAGE_MASK)); 2993 } 2994 } 2995 2996 /* adjust space use on already-dirty buffer */ 2997 if (bp->b_flags & B_DELWRI) { 2998 dirtybufspace += newbsize - bp->b_bufsize; 2999 if (bp->b_flags & B_HEAVY) 3000 dirtybufspacehw += newbsize - bp->b_bufsize; 3001 } 3002 if (newbsize < bp->b_bufsize) 3003 bufspacewakeup(); 3004 bp->b_bufsize = newbsize; /* actual buffer allocation */ 3005 bp->b_bcount = size; /* requested buffer size */ 3006 return 1; 3007 } 3008 3009 /* 3010 * biowait: 3011 * 3012 * Wait for buffer I/O completion, returning error status. The buffer 3013 * is left locked on return. B_EINTR is converted into an EINTR error 3014 * and cleared. 3015 * 3016 * NOTE! The original b_cmd is lost on return, since b_cmd will be 3017 * set to BUF_CMD_DONE. 3018 */ 3019 int 3020 biowait(struct buf *bp) 3021 { 3022 crit_enter(); 3023 while (bp->b_cmd != BUF_CMD_DONE) { 3024 if (bp->b_cmd == BUF_CMD_READ) 3025 tsleep(bp, 0, "biord", 0); 3026 else 3027 tsleep(bp, 0, "biowr", 0); 3028 } 3029 crit_exit(); 3030 if (bp->b_flags & B_EINTR) { 3031 bp->b_flags &= ~B_EINTR; 3032 return (EINTR); 3033 } 3034 if (bp->b_flags & B_ERROR) { 3035 return (bp->b_error ? bp->b_error : EIO); 3036 } else { 3037 return (0); 3038 } 3039 } 3040 3041 /* 3042 * This associates a tracking count with an I/O. vn_strategy() and 3043 * dev_dstrategy() do this automatically but there are a few cases 3044 * where a vnode or device layer is bypassed when a block translation 3045 * is cached. In such cases bio_start_transaction() may be called on 3046 * the bypassed layers so the system gets an I/O in progress indication 3047 * for those higher layers. 3048 */ 3049 void 3050 bio_start_transaction(struct bio *bio, struct bio_track *track) 3051 { 3052 bio->bio_track = track; 3053 atomic_add_int(&track->bk_active, 1); 3054 } 3055 3056 /* 3057 * Initiate I/O on a vnode. 3058 */ 3059 void 3060 vn_strategy(struct vnode *vp, struct bio *bio) 3061 { 3062 struct bio_track *track; 3063 3064 KKASSERT(bio->bio_buf->b_cmd != BUF_CMD_DONE); 3065 if (bio->bio_buf->b_cmd == BUF_CMD_READ) 3066 track = &vp->v_track_read; 3067 else 3068 track = &vp->v_track_write; 3069 bio->bio_track = track; 3070 atomic_add_int(&track->bk_active, 1); 3071 vop_strategy(*vp->v_ops, vp, bio); 3072 } 3073 3074 3075 /* 3076 * biodone: 3077 * 3078 * Finish I/O on a buffer, optionally calling a completion function. 3079 * This is usually called from an interrupt so process blocking is 3080 * not allowed. 3081 * 3082 * biodone is also responsible for setting B_CACHE in a B_VMIO bp. 3083 * In a non-VMIO bp, B_CACHE will be set on the next getblk() 3084 * assuming B_INVAL is clear. 3085 * 3086 * For the VMIO case, we set B_CACHE if the op was a read and no 3087 * read error occured, or if the op was a write. B_CACHE is never 3088 * set if the buffer is invalid or otherwise uncacheable. 3089 * 3090 * biodone does not mess with B_INVAL, allowing the I/O routine or the 3091 * initiator to leave B_INVAL set to brelse the buffer out of existance 3092 * in the biodone routine. 3093 */ 3094 void 3095 biodone(struct bio *bio) 3096 { 3097 struct buf *bp = bio->bio_buf; 3098 buf_cmd_t cmd; 3099 3100 crit_enter(); 3101 3102 KASSERT(BUF_REFCNTNB(bp) > 0, 3103 ("biodone: bp %p not busy %d", bp, BUF_REFCNTNB(bp))); 3104 KASSERT(bp->b_cmd != BUF_CMD_DONE, 3105 ("biodone: bp %p already done!", bp)); 3106 3107 runningbufwakeup(bp); 3108 3109 /* 3110 * Run up the chain of BIO's. Leave b_cmd intact for the duration. 3111 */ 3112 while (bio) { 3113 biodone_t *done_func; 3114 struct bio_track *track; 3115 3116 /* 3117 * BIO tracking. Most but not all BIOs are tracked. 3118 */ 3119 if ((track = bio->bio_track) != NULL) { 3120 atomic_subtract_int(&track->bk_active, 1); 3121 if (track->bk_active < 0) { 3122 panic("biodone: bad active count bio %p\n", 3123 bio); 3124 } 3125 if (track->bk_waitflag) { 3126 track->bk_waitflag = 0; 3127 wakeup(track); 3128 } 3129 bio->bio_track = NULL; 3130 } 3131 3132 /* 3133 * A bio_done function terminates the loop. The function 3134 * will be responsible for any further chaining and/or 3135 * buffer management. 3136 * 3137 * WARNING! The done function can deallocate the buffer! 3138 */ 3139 if ((done_func = bio->bio_done) != NULL) { 3140 bio->bio_done = NULL; 3141 done_func(bio); 3142 crit_exit(); 3143 return; 3144 } 3145 bio = bio->bio_prev; 3146 } 3147 3148 cmd = bp->b_cmd; 3149 bp->b_cmd = BUF_CMD_DONE; 3150 3151 /* 3152 * Only reads and writes are processed past this point. 3153 */ 3154 if (cmd != BUF_CMD_READ && cmd != BUF_CMD_WRITE) { 3155 if (cmd == BUF_CMD_FREEBLKS) 3156 bp->b_flags |= B_NOCACHE; 3157 brelse(bp); 3158 crit_exit(); 3159 return; 3160 } 3161 3162 /* 3163 * Warning: softupdates may re-dirty the buffer, and HAMMER can do 3164 * a lot worse. XXX - move this above the clearing of b_cmd 3165 */ 3166 if (LIST_FIRST(&bp->b_dep) != NULL) 3167 buf_complete(bp); 3168 3169 /* 3170 * A failed write must re-dirty the buffer unless B_INVAL 3171 * was set. 3172 */ 3173 if (cmd == BUF_CMD_WRITE && 3174 (bp->b_flags & (B_ERROR | B_INVAL)) == B_ERROR) { 3175 bp->b_flags &= ~B_NOCACHE; 3176 bdirty(bp); 3177 } 3178 3179 3180 if (bp->b_flags & B_VMIO) { 3181 int i; 3182 vm_ooffset_t foff; 3183 vm_page_t m; 3184 vm_object_t obj; 3185 int iosize; 3186 struct vnode *vp = bp->b_vp; 3187 3188 obj = vp->v_object; 3189 3190 #if defined(VFS_BIO_DEBUG) 3191 if (vp->v_auxrefs == 0) 3192 panic("biodone: zero vnode hold count"); 3193 if ((vp->v_flag & VOBJBUF) == 0) 3194 panic("biodone: vnode is not setup for merged cache"); 3195 #endif 3196 3197 foff = bp->b_loffset; 3198 KASSERT(foff != NOOFFSET, ("biodone: no buffer offset")); 3199 KASSERT(obj != NULL, ("biodone: missing VM object")); 3200 3201 #if defined(VFS_BIO_DEBUG) 3202 if (obj->paging_in_progress < bp->b_xio.xio_npages) { 3203 kprintf("biodone: paging in progress(%d) < bp->b_xio.xio_npages(%d)\n", 3204 obj->paging_in_progress, bp->b_xio.xio_npages); 3205 } 3206 #endif 3207 3208 /* 3209 * Set B_CACHE if the op was a normal read and no error 3210 * occured. B_CACHE is set for writes in the b*write() 3211 * routines. 3212 */ 3213 iosize = bp->b_bcount - bp->b_resid; 3214 if (cmd == BUF_CMD_READ && (bp->b_flags & (B_INVAL|B_NOCACHE|B_ERROR)) == 0) { 3215 bp->b_flags |= B_CACHE; 3216 } 3217 3218 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3219 int bogusflag = 0; 3220 int resid; 3221 3222 resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff; 3223 if (resid > iosize) 3224 resid = iosize; 3225 3226 /* 3227 * cleanup bogus pages, restoring the originals. Since 3228 * the originals should still be wired, we don't have 3229 * to worry about interrupt/freeing races destroying 3230 * the VM object association. 3231 */ 3232 m = bp->b_xio.xio_pages[i]; 3233 if (m == bogus_page) { 3234 bogusflag = 1; 3235 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 3236 if (m == NULL) 3237 panic("biodone: page disappeared"); 3238 bp->b_xio.xio_pages[i] = m; 3239 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3240 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 3241 } 3242 #if defined(VFS_BIO_DEBUG) 3243 if (OFF_TO_IDX(foff) != m->pindex) { 3244 kprintf( 3245 "biodone: foff(%lu)/m->pindex(%d) mismatch\n", 3246 (unsigned long)foff, m->pindex); 3247 } 3248 #endif 3249 3250 /* 3251 * In the write case, the valid and clean bits are 3252 * already changed correctly ( see bdwrite() ), so we 3253 * only need to do this here in the read case. 3254 */ 3255 if (cmd == BUF_CMD_READ && !bogusflag && resid > 0) { 3256 vfs_page_set_valid(bp, foff, i, m); 3257 } 3258 vm_page_flag_clear(m, PG_ZERO); 3259 3260 /* 3261 * when debugging new filesystems or buffer I/O methods, this 3262 * is the most common error that pops up. if you see this, you 3263 * have not set the page busy flag correctly!!! 3264 */ 3265 if (m->busy == 0) { 3266 kprintf("biodone: page busy < 0, " 3267 "pindex: %d, foff: 0x(%x,%x), " 3268 "resid: %d, index: %d\n", 3269 (int) m->pindex, (int)(foff >> 32), 3270 (int) foff & 0xffffffff, resid, i); 3271 if (!vn_isdisk(vp, NULL)) 3272 kprintf(" iosize: %ld, loffset: %lld, flags: 0x%08x, npages: %d\n", 3273 bp->b_vp->v_mount->mnt_stat.f_iosize, 3274 bp->b_loffset, 3275 bp->b_flags, bp->b_xio.xio_npages); 3276 else 3277 kprintf(" VDEV, loffset: %lld, flags: 0x%08x, npages: %d\n", 3278 bp->b_loffset, 3279 bp->b_flags, bp->b_xio.xio_npages); 3280 kprintf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 3281 m->valid, m->dirty, m->wire_count); 3282 panic("biodone: page busy < 0"); 3283 } 3284 vm_page_io_finish(m); 3285 vm_object_pip_subtract(obj, 1); 3286 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3287 iosize -= resid; 3288 } 3289 if (obj) 3290 vm_object_pip_wakeupn(obj, 0); 3291 } 3292 3293 /* 3294 * For asynchronous completions, release the buffer now. The brelse 3295 * will do a wakeup there if necessary - so no need to do a wakeup 3296 * here in the async case. The sync case always needs to do a wakeup. 3297 */ 3298 3299 if (bp->b_flags & B_ASYNC) { 3300 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 3301 brelse(bp); 3302 else 3303 bqrelse(bp); 3304 } else { 3305 wakeup(bp); 3306 } 3307 crit_exit(); 3308 } 3309 3310 /* 3311 * vfs_unbusy_pages: 3312 * 3313 * This routine is called in lieu of iodone in the case of 3314 * incomplete I/O. This keeps the busy status for pages 3315 * consistant. 3316 */ 3317 void 3318 vfs_unbusy_pages(struct buf *bp) 3319 { 3320 int i; 3321 3322 runningbufwakeup(bp); 3323 if (bp->b_flags & B_VMIO) { 3324 struct vnode *vp = bp->b_vp; 3325 vm_object_t obj; 3326 3327 obj = vp->v_object; 3328 3329 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3330 vm_page_t m = bp->b_xio.xio_pages[i]; 3331 3332 /* 3333 * When restoring bogus changes the original pages 3334 * should still be wired, so we are in no danger of 3335 * losing the object association and do not need 3336 * critical section protection particularly. 3337 */ 3338 if (m == bogus_page) { 3339 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_loffset) + i); 3340 if (!m) { 3341 panic("vfs_unbusy_pages: page missing"); 3342 } 3343 bp->b_xio.xio_pages[i] = m; 3344 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3345 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 3346 } 3347 vm_object_pip_subtract(obj, 1); 3348 vm_page_flag_clear(m, PG_ZERO); 3349 vm_page_io_finish(m); 3350 } 3351 vm_object_pip_wakeupn(obj, 0); 3352 } 3353 } 3354 3355 /* 3356 * vfs_page_set_valid: 3357 * 3358 * Set the valid bits in a page based on the supplied offset. The 3359 * range is restricted to the buffer's size. 3360 * 3361 * This routine is typically called after a read completes. 3362 */ 3363 static void 3364 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 3365 { 3366 vm_ooffset_t soff, eoff; 3367 3368 /* 3369 * Start and end offsets in buffer. eoff - soff may not cross a 3370 * page boundry or cross the end of the buffer. The end of the 3371 * buffer, in this case, is our file EOF, not the allocation size 3372 * of the buffer. 3373 */ 3374 soff = off; 3375 eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3376 if (eoff > bp->b_loffset + bp->b_bcount) 3377 eoff = bp->b_loffset + bp->b_bcount; 3378 3379 /* 3380 * Set valid range. This is typically the entire buffer and thus the 3381 * entire page. 3382 */ 3383 if (eoff > soff) { 3384 vm_page_set_validclean( 3385 m, 3386 (vm_offset_t) (soff & PAGE_MASK), 3387 (vm_offset_t) (eoff - soff) 3388 ); 3389 } 3390 } 3391 3392 /* 3393 * vfs_busy_pages: 3394 * 3395 * This routine is called before a device strategy routine. 3396 * It is used to tell the VM system that paging I/O is in 3397 * progress, and treat the pages associated with the buffer 3398 * almost as being PG_BUSY. Also the object 'paging_in_progress' 3399 * flag is handled to make sure that the object doesn't become 3400 * inconsistant. 3401 * 3402 * Since I/O has not been initiated yet, certain buffer flags 3403 * such as B_ERROR or B_INVAL may be in an inconsistant state 3404 * and should be ignored. 3405 */ 3406 void 3407 vfs_busy_pages(struct vnode *vp, struct buf *bp) 3408 { 3409 int i, bogus; 3410 struct lwp *lp = curthread->td_lwp; 3411 3412 /* 3413 * The buffer's I/O command must already be set. If reading, 3414 * B_CACHE must be 0 (double check against callers only doing 3415 * I/O when B_CACHE is 0). 3416 */ 3417 KKASSERT(bp->b_cmd != BUF_CMD_DONE); 3418 KKASSERT(bp->b_cmd == BUF_CMD_WRITE || (bp->b_flags & B_CACHE) == 0); 3419 3420 if (bp->b_flags & B_VMIO) { 3421 vm_object_t obj; 3422 vm_ooffset_t foff; 3423 3424 obj = vp->v_object; 3425 foff = bp->b_loffset; 3426 KASSERT(bp->b_loffset != NOOFFSET, 3427 ("vfs_busy_pages: no buffer offset")); 3428 vfs_setdirty(bp); 3429 3430 /* 3431 * Loop until none of the pages are busy. 3432 */ 3433 retry: 3434 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3435 vm_page_t m = bp->b_xio.xio_pages[i]; 3436 3437 if (vm_page_sleep_busy(m, FALSE, "vbpage")) 3438 goto retry; 3439 } 3440 3441 /* 3442 * Setup for I/O, soft-busy the page right now because 3443 * the next loop may block. 3444 */ 3445 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3446 vm_page_t m = bp->b_xio.xio_pages[i]; 3447 3448 vm_page_flag_clear(m, PG_ZERO); 3449 if ((bp->b_flags & B_CLUSTER) == 0) { 3450 vm_object_pip_add(obj, 1); 3451 vm_page_io_start(m); 3452 } 3453 } 3454 3455 /* 3456 * Adjust protections for I/O and do bogus-page mapping. 3457 * Assume that vm_page_protect() can block (it can block 3458 * if VM_PROT_NONE, don't take any chances regardless). 3459 */ 3460 bogus = 0; 3461 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3462 vm_page_t m = bp->b_xio.xio_pages[i]; 3463 3464 /* 3465 * When readying a vnode-backed buffer for a write 3466 * we must zero-fill any invalid portions of the 3467 * backing VM pages. 3468 * 3469 * When readying a vnode-backed buffer for a read 3470 * we must replace any dirty pages with a bogus 3471 * page so we do not destroy dirty data when 3472 * filling in gaps. Dirty pages might not 3473 * necessarily be marked dirty yet, so use m->valid 3474 * as a reasonable test. 3475 * 3476 * Bogus page replacement is, uh, bogus. We need 3477 * to find a better way. 3478 */ 3479 if (bp->b_cmd == BUF_CMD_WRITE) { 3480 vm_page_protect(m, VM_PROT_READ); 3481 vfs_page_set_valid(bp, foff, i, m); 3482 } else if (m->valid == VM_PAGE_BITS_ALL) { 3483 bp->b_xio.xio_pages[i] = bogus_page; 3484 bogus++; 3485 } else { 3486 vm_page_protect(m, VM_PROT_NONE); 3487 } 3488 foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3489 } 3490 if (bogus) 3491 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), 3492 bp->b_xio.xio_pages, bp->b_xio.xio_npages); 3493 } 3494 3495 /* 3496 * This is the easiest place to put the process accounting for the I/O 3497 * for now. 3498 */ 3499 if (lp != NULL) { 3500 if (bp->b_cmd == BUF_CMD_READ) 3501 lp->lwp_ru.ru_inblock++; 3502 else 3503 lp->lwp_ru.ru_oublock++; 3504 } 3505 } 3506 3507 /* 3508 * vfs_clean_pages: 3509 * 3510 * Tell the VM system that the pages associated with this buffer 3511 * are clean. This is used for delayed writes where the data is 3512 * going to go to disk eventually without additional VM intevention. 3513 * 3514 * Note that while we only really need to clean through to b_bcount, we 3515 * just go ahead and clean through to b_bufsize. 3516 */ 3517 static void 3518 vfs_clean_pages(struct buf *bp) 3519 { 3520 int i; 3521 3522 if (bp->b_flags & B_VMIO) { 3523 vm_ooffset_t foff; 3524 3525 foff = bp->b_loffset; 3526 KASSERT(foff != NOOFFSET, ("vfs_clean_pages: no buffer offset")); 3527 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3528 vm_page_t m = bp->b_xio.xio_pages[i]; 3529 vm_ooffset_t noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; 3530 3531 vfs_page_set_valid(bp, foff, i, m); 3532 foff = noff; 3533 } 3534 } 3535 } 3536 3537 /* 3538 * vfs_bio_set_validclean: 3539 * 3540 * Set the range within the buffer to valid and clean. The range is 3541 * relative to the beginning of the buffer, b_loffset. Note that 3542 * b_loffset itself may be offset from the beginning of the first page. 3543 */ 3544 3545 void 3546 vfs_bio_set_validclean(struct buf *bp, int base, int size) 3547 { 3548 if (bp->b_flags & B_VMIO) { 3549 int i; 3550 int n; 3551 3552 /* 3553 * Fixup base to be relative to beginning of first page. 3554 * Set initial n to be the maximum number of bytes in the 3555 * first page that can be validated. 3556 */ 3557 3558 base += (bp->b_loffset & PAGE_MASK); 3559 n = PAGE_SIZE - (base & PAGE_MASK); 3560 3561 for (i = base / PAGE_SIZE; size > 0 && i < bp->b_xio.xio_npages; ++i) { 3562 vm_page_t m = bp->b_xio.xio_pages[i]; 3563 3564 if (n > size) 3565 n = size; 3566 3567 vm_page_set_validclean(m, base & PAGE_MASK, n); 3568 base += n; 3569 size -= n; 3570 n = PAGE_SIZE; 3571 } 3572 } 3573 } 3574 3575 /* 3576 * vfs_bio_clrbuf: 3577 * 3578 * Clear a buffer. This routine essentially fakes an I/O, so we need 3579 * to clear B_ERROR and B_INVAL. 3580 * 3581 * Note that while we only theoretically need to clear through b_bcount, 3582 * we go ahead and clear through b_bufsize. 3583 */ 3584 3585 void 3586 vfs_bio_clrbuf(struct buf *bp) 3587 { 3588 int i, mask = 0; 3589 caddr_t sa, ea; 3590 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { 3591 bp->b_flags &= ~(B_INVAL|B_ERROR); 3592 if ((bp->b_xio.xio_npages == 1) && (bp->b_bufsize < PAGE_SIZE) && 3593 (bp->b_loffset & PAGE_MASK) == 0) { 3594 mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1; 3595 if ((bp->b_xio.xio_pages[0]->valid & mask) == mask) { 3596 bp->b_resid = 0; 3597 return; 3598 } 3599 if (((bp->b_xio.xio_pages[0]->flags & PG_ZERO) == 0) && 3600 ((bp->b_xio.xio_pages[0]->valid & mask) == 0)) { 3601 bzero(bp->b_data, bp->b_bufsize); 3602 bp->b_xio.xio_pages[0]->valid |= mask; 3603 bp->b_resid = 0; 3604 return; 3605 } 3606 } 3607 sa = bp->b_data; 3608 for(i=0;i<bp->b_xio.xio_npages;i++,sa=ea) { 3609 int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE; 3610 ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE); 3611 ea = (caddr_t)(vm_offset_t)ulmin( 3612 (u_long)(vm_offset_t)ea, 3613 (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize); 3614 mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j; 3615 if ((bp->b_xio.xio_pages[i]->valid & mask) == mask) 3616 continue; 3617 if ((bp->b_xio.xio_pages[i]->valid & mask) == 0) { 3618 if ((bp->b_xio.xio_pages[i]->flags & PG_ZERO) == 0) { 3619 bzero(sa, ea - sa); 3620 } 3621 } else { 3622 for (; sa < ea; sa += DEV_BSIZE, j++) { 3623 if (((bp->b_xio.xio_pages[i]->flags & PG_ZERO) == 0) && 3624 (bp->b_xio.xio_pages[i]->valid & (1<<j)) == 0) 3625 bzero(sa, DEV_BSIZE); 3626 } 3627 } 3628 bp->b_xio.xio_pages[i]->valid |= mask; 3629 vm_page_flag_clear(bp->b_xio.xio_pages[i], PG_ZERO); 3630 } 3631 bp->b_resid = 0; 3632 } else { 3633 clrbuf(bp); 3634 } 3635 } 3636 3637 /* 3638 * vm_hold_load_pages: 3639 * 3640 * Load pages into the buffer's address space. The pages are 3641 * allocated from the kernel object in order to reduce interference 3642 * with the any VM paging I/O activity. The range of loaded 3643 * pages will be wired. 3644 * 3645 * If a page cannot be allocated, the 'pagedaemon' is woken up to 3646 * retrieve the full range (to - from) of pages. 3647 * 3648 */ 3649 void 3650 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) 3651 { 3652 vm_offset_t pg; 3653 vm_page_t p; 3654 int index; 3655 3656 to = round_page(to); 3657 from = round_page(from); 3658 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 3659 3660 pg = from; 3661 while (pg < to) { 3662 /* 3663 * Note: must allocate system pages since blocking here 3664 * could intefere with paging I/O, no matter which 3665 * process we are. 3666 */ 3667 p = bio_page_alloc(&kernel_object, pg >> PAGE_SHIFT, 3668 (vm_pindex_t)((to - pg) >> PAGE_SHIFT)); 3669 if (p) { 3670 vm_page_wire(p); 3671 p->valid = VM_PAGE_BITS_ALL; 3672 vm_page_flag_clear(p, PG_ZERO); 3673 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 3674 bp->b_xio.xio_pages[index] = p; 3675 vm_page_wakeup(p); 3676 3677 pg += PAGE_SIZE; 3678 ++index; 3679 } 3680 } 3681 bp->b_xio.xio_npages = index; 3682 } 3683 3684 /* 3685 * Allocate pages for a buffer cache buffer. 3686 * 3687 * Under extremely severe memory conditions even allocating out of the 3688 * system reserve can fail. If this occurs we must allocate out of the 3689 * interrupt reserve to avoid a deadlock with the pageout daemon. 3690 * 3691 * The pageout daemon can run (putpages -> VOP_WRITE -> getblk -> allocbuf). 3692 * If the buffer cache's vm_page_alloc() fails a vm_wait() can deadlock 3693 * against the pageout daemon if pages are not freed from other sources. 3694 */ 3695 static 3696 vm_page_t 3697 bio_page_alloc(vm_object_t obj, vm_pindex_t pg, int deficit) 3698 { 3699 vm_page_t p; 3700 3701 /* 3702 * Try a normal allocation, allow use of system reserve. 3703 */ 3704 p = vm_page_alloc(obj, pg, VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM); 3705 if (p) 3706 return(p); 3707 3708 /* 3709 * The normal allocation failed and we clearly have a page 3710 * deficit. Try to reclaim some clean VM pages directly 3711 * from the buffer cache. 3712 */ 3713 vm_pageout_deficit += deficit; 3714 recoverbufpages(); 3715 3716 /* 3717 * We may have blocked, the caller will know what to do if the 3718 * page now exists. 3719 */ 3720 if (vm_page_lookup(obj, pg)) 3721 return(NULL); 3722 3723 /* 3724 * Allocate and allow use of the interrupt reserve. 3725 * 3726 * If after all that we still can't allocate a VM page we are 3727 * in real trouble, but we slog on anyway hoping that the system 3728 * won't deadlock. 3729 */ 3730 p = vm_page_alloc(obj, pg, VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM | 3731 VM_ALLOC_INTERRUPT); 3732 if (p) { 3733 if (vm_page_count_severe()) { 3734 kprintf("bio_page_alloc: WARNING emergency page " 3735 "allocation\n"); 3736 vm_wait(hz / 20); 3737 } 3738 } else { 3739 kprintf("bio_page_alloc: WARNING emergency page " 3740 "allocation failed\n"); 3741 vm_wait(hz * 5); 3742 } 3743 return(p); 3744 } 3745 3746 /* 3747 * vm_hold_free_pages: 3748 * 3749 * Return pages associated with the buffer back to the VM system. 3750 * 3751 * The range of pages underlying the buffer's address space will 3752 * be unmapped and un-wired. 3753 */ 3754 void 3755 vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to) 3756 { 3757 vm_offset_t pg; 3758 vm_page_t p; 3759 int index, newnpages; 3760 3761 from = round_page(from); 3762 to = round_page(to); 3763 newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 3764 3765 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 3766 p = bp->b_xio.xio_pages[index]; 3767 if (p && (index < bp->b_xio.xio_npages)) { 3768 if (p->busy) { 3769 kprintf("vm_hold_free_pages: doffset: %lld, loffset: %lld\n", 3770 bp->b_bio2.bio_offset, bp->b_loffset); 3771 } 3772 bp->b_xio.xio_pages[index] = NULL; 3773 pmap_kremove(pg); 3774 vm_page_busy(p); 3775 vm_page_unwire(p, 0); 3776 vm_page_free(p); 3777 } 3778 } 3779 bp->b_xio.xio_npages = newnpages; 3780 } 3781 3782 /* 3783 * vmapbuf: 3784 * 3785 * Map a user buffer into KVM via a pbuf. On return the buffer's 3786 * b_data, b_bufsize, and b_bcount will be set, and its XIO page array 3787 * initialized. 3788 */ 3789 int 3790 vmapbuf(struct buf *bp, caddr_t udata, int bytes) 3791 { 3792 caddr_t addr; 3793 vm_offset_t va; 3794 vm_page_t m; 3795 int vmprot; 3796 int error; 3797 int pidx; 3798 int i; 3799 3800 /* 3801 * bp had better have a command and it better be a pbuf. 3802 */ 3803 KKASSERT(bp->b_cmd != BUF_CMD_DONE); 3804 KKASSERT(bp->b_flags & B_PAGING); 3805 3806 if (bytes < 0) 3807 return (-1); 3808 3809 /* 3810 * Map the user data into KVM. Mappings have to be page-aligned. 3811 */ 3812 addr = (caddr_t)trunc_page((vm_offset_t)udata); 3813 pidx = 0; 3814 3815 vmprot = VM_PROT_READ; 3816 if (bp->b_cmd == BUF_CMD_READ) 3817 vmprot |= VM_PROT_WRITE; 3818 3819 while (addr < udata + bytes) { 3820 /* 3821 * Do the vm_fault if needed; do the copy-on-write thing 3822 * when reading stuff off device into memory. 3823 * 3824 * vm_fault_page*() returns a held VM page. 3825 */ 3826 va = (addr >= udata) ? (vm_offset_t)addr : (vm_offset_t)udata; 3827 va = trunc_page(va); 3828 3829 m = vm_fault_page_quick(va, vmprot, &error); 3830 if (m == NULL) { 3831 for (i = 0; i < pidx; ++i) { 3832 vm_page_unhold(bp->b_xio.xio_pages[i]); 3833 bp->b_xio.xio_pages[i] = NULL; 3834 } 3835 return(-1); 3836 } 3837 bp->b_xio.xio_pages[pidx] = m; 3838 addr += PAGE_SIZE; 3839 ++pidx; 3840 } 3841 3842 /* 3843 * Map the page array and set the buffer fields to point to 3844 * the mapped data buffer. 3845 */ 3846 if (pidx > btoc(MAXPHYS)) 3847 panic("vmapbuf: mapped more than MAXPHYS"); 3848 pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_xio.xio_pages, pidx); 3849 3850 bp->b_xio.xio_npages = pidx; 3851 bp->b_data = bp->b_kvabase + ((int)(intptr_t)udata & PAGE_MASK); 3852 bp->b_bcount = bytes; 3853 bp->b_bufsize = bytes; 3854 return(0); 3855 } 3856 3857 /* 3858 * vunmapbuf: 3859 * 3860 * Free the io map PTEs associated with this IO operation. 3861 * We also invalidate the TLB entries and restore the original b_addr. 3862 */ 3863 void 3864 vunmapbuf(struct buf *bp) 3865 { 3866 int pidx; 3867 int npages; 3868 3869 KKASSERT(bp->b_flags & B_PAGING); 3870 3871 npages = bp->b_xio.xio_npages; 3872 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages); 3873 for (pidx = 0; pidx < npages; ++pidx) { 3874 vm_page_unhold(bp->b_xio.xio_pages[pidx]); 3875 bp->b_xio.xio_pages[pidx] = NULL; 3876 } 3877 bp->b_xio.xio_npages = 0; 3878 bp->b_data = bp->b_kvabase; 3879 } 3880 3881 /* 3882 * Scan all buffers in the system and issue the callback. 3883 */ 3884 int 3885 scan_all_buffers(int (*callback)(struct buf *, void *), void *info) 3886 { 3887 int count = 0; 3888 int error; 3889 int n; 3890 3891 for (n = 0; n < nbuf; ++n) { 3892 if ((error = callback(&buf[n], info)) < 0) { 3893 count = error; 3894 break; 3895 } 3896 count += error; 3897 } 3898 return (count); 3899 } 3900 3901 /* 3902 * print out statistics from the current status of the buffer pool 3903 * this can be toggeled by the system control option debug.syncprt 3904 */ 3905 #ifdef DEBUG 3906 void 3907 vfs_bufstats(void) 3908 { 3909 int i, j, count; 3910 struct buf *bp; 3911 struct bqueues *dp; 3912 int counts[(MAXBSIZE / PAGE_SIZE) + 1]; 3913 static char *bname[3] = { "LOCKED", "LRU", "AGE" }; 3914 3915 for (dp = bufqueues, i = 0; dp < &bufqueues[3]; dp++, i++) { 3916 count = 0; 3917 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 3918 counts[j] = 0; 3919 crit_enter(); 3920 TAILQ_FOREACH(bp, dp, b_freelist) { 3921 counts[bp->b_bufsize/PAGE_SIZE]++; 3922 count++; 3923 } 3924 crit_exit(); 3925 kprintf("%s: total-%d", bname[i], count); 3926 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 3927 if (counts[j] != 0) 3928 kprintf(", %d-%d", j * PAGE_SIZE, counts[j]); 3929 kprintf("\n"); 3930 } 3931 } 3932 #endif 3933 3934 #ifdef DDB 3935 3936 DB_SHOW_COMMAND(buffer, db_show_buffer) 3937 { 3938 /* get args */ 3939 struct buf *bp = (struct buf *)addr; 3940 3941 if (!have_addr) { 3942 db_printf("usage: show buffer <addr>\n"); 3943 return; 3944 } 3945 3946 db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS); 3947 db_printf("b_cmd = %d\n", bp->b_cmd); 3948 db_printf("b_error = %d, b_bufsize = %d, b_bcount = %d, " 3949 "b_resid = %d\n, b_data = %p, " 3950 "bio_offset(disk) = %lld, bio_offset(phys) = %lld\n", 3951 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 3952 bp->b_data, bp->b_bio2.bio_offset, (bp->b_bio2.bio_next ? bp->b_bio2.bio_next->bio_offset : (off_t)-1)); 3953 if (bp->b_xio.xio_npages) { 3954 int i; 3955 db_printf("b_xio.xio_npages = %d, pages(OBJ, IDX, PA): ", 3956 bp->b_xio.xio_npages); 3957 for (i = 0; i < bp->b_xio.xio_npages; i++) { 3958 vm_page_t m; 3959 m = bp->b_xio.xio_pages[i]; 3960 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 3961 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 3962 if ((i + 1) < bp->b_xio.xio_npages) 3963 db_printf(","); 3964 } 3965 db_printf("\n"); 3966 } 3967 } 3968 #endif /* DDB */ 3969