1 /*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $ 37 * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.30 2007/08/13 17:31:51 dillon Exp $ 38 */ 39 40 #include "opt_debug_cluster.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 #include <sys/buf.h> 47 #include <sys/vnode.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/resourcevar.h> 51 #include <sys/vmmeter.h> 52 #include <vm/vm.h> 53 #include <vm/vm_object.h> 54 #include <vm/vm_page.h> 55 #include <sys/sysctl.h> 56 #include <sys/buf2.h> 57 #include <vm/vm_page2.h> 58 59 #if defined(CLUSTERDEBUG) 60 #include <sys/sysctl.h> 61 static int rcluster= 0; 62 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, ""); 63 #endif 64 65 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer"); 66 67 static struct cluster_save * 68 cluster_collectbufs (struct vnode *vp, struct buf *last_bp, 69 int lblocksize); 70 static struct buf * 71 cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset, 72 off_t doffset, int size, int run, 73 struct buf *fbp, int doasync); 74 static void cluster_callback (struct bio *); 75 76 77 static int write_behind = 1; 78 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, ""); 79 80 extern vm_page_t bogus_page; 81 82 extern int cluster_pbuf_freecnt; 83 84 /* 85 * Maximum number of blocks for read-ahead. 86 */ 87 #define MAXRA 32 88 89 /* 90 * This replaces bread. 91 */ 92 int 93 cluster_read(struct vnode *vp, off_t filesize, off_t loffset, 94 int size, int totread, int seqcount, struct buf **bpp) 95 { 96 struct buf *bp, *rbp, *reqbp; 97 off_t origoffset; 98 off_t doffset; 99 int error; 100 int i; 101 int maxra, racluster; 102 103 error = 0; 104 105 /* 106 * Try to limit the amount of read-ahead by a few 107 * ad-hoc parameters. This needs work!!! 108 */ 109 racluster = vp->v_mount->mnt_iosize_max / size; 110 maxra = 2 * racluster + (totread / size); 111 if (maxra > MAXRA) 112 maxra = MAXRA; 113 if (maxra > nbuf/8) 114 maxra = nbuf/8; 115 116 /* 117 * get the requested block 118 */ 119 *bpp = reqbp = bp = getblk(vp, loffset, size, 0, 0); 120 origoffset = loffset; 121 122 /* 123 * if it is in the cache, then check to see if the reads have been 124 * sequential. If they have, then try some read-ahead, otherwise 125 * back-off on prospective read-aheads. 126 */ 127 if (bp->b_flags & B_CACHE) { 128 if (!seqcount) { 129 return 0; 130 } else if ((bp->b_flags & B_RAM) == 0) { 131 return 0; 132 } else { 133 struct buf *tbp; 134 bp->b_flags &= ~B_RAM; 135 /* 136 * We do the crit here so that there is no window 137 * between the findblk and the b_usecount increment 138 * below. We opt to keep the crit out of the loop 139 * for efficiency. 140 */ 141 crit_enter(); 142 for (i = 1; i < maxra; i++) { 143 if (!(tbp = findblk(vp, loffset + i * size))) { 144 break; 145 } 146 147 /* 148 * Set another read-ahead mark so we know 149 * to check again. 150 */ 151 if (((i % racluster) == (racluster - 1)) || 152 (i == (maxra - 1))) 153 tbp->b_flags |= B_RAM; 154 } 155 crit_exit(); 156 if (i >= maxra) { 157 return 0; 158 } 159 loffset += i * size; 160 } 161 reqbp = bp = NULL; 162 } else { 163 off_t firstread = bp->b_loffset; 164 int nblks; 165 166 KASSERT(firstread != NOOFFSET, 167 ("cluster_read: no buffer offset")); 168 if (firstread + totread > filesize) 169 totread = (int)(filesize - firstread); 170 nblks = totread / size; 171 if (nblks) { 172 int burstbytes; 173 174 if (nblks > racluster) 175 nblks = racluster; 176 177 error = VOP_BMAP(vp, loffset, 178 &doffset, &burstbytes, NULL); 179 if (error) 180 goto single_block_read; 181 if (doffset == NOOFFSET) 182 goto single_block_read; 183 if (burstbytes < size * 2) 184 goto single_block_read; 185 if (nblks > burstbytes / size) 186 nblks = burstbytes / size; 187 188 bp = cluster_rbuild(vp, filesize, loffset, 189 doffset, size, nblks, bp, 0); 190 loffset += bp->b_bufsize; 191 } else { 192 single_block_read: 193 /* 194 * if it isn't in the cache, then get a chunk from 195 * disk if sequential, otherwise just get the block. 196 */ 197 bp->b_flags |= B_RAM; 198 loffset += size; 199 } 200 } 201 202 /* 203 * Handle the synchronous read. This only occurs if B_CACHE was 204 * not set. bp (and rbp) could be either a cluster bp or a normal 205 * bp depending on the what cluster_rbuild() decided to do. If 206 * it is a cluster bp, vfs_busy_pages() has already been called. 207 */ 208 if (bp) { 209 #if defined(CLUSTERDEBUG) 210 if (rcluster) 211 kprintf("S(%lld,%d,%d) ", 212 bp->b_loffset, bp->b_bcount, seqcount); 213 #endif 214 bp->b_cmd = BUF_CMD_READ; 215 if ((bp->b_flags & B_CLUSTER) == 0) 216 vfs_busy_pages(vp, bp); 217 bp->b_flags &= ~(B_ERROR|B_INVAL); 218 if ((bp->b_flags & B_ASYNC) || bp->b_bio1.bio_done != NULL) 219 BUF_KERNPROC(bp); 220 vn_strategy(vp, &bp->b_bio1); 221 error = bp->b_error; 222 } 223 224 /* 225 * If we have been doing sequential I/O, then do some read-ahead. 226 */ 227 rbp = NULL; 228 if (!error && 229 seqcount && 230 loffset < origoffset + seqcount * size && 231 loffset + size <= filesize 232 ) { 233 int nblksread; 234 int ntoread; 235 int burstbytes; 236 237 rbp = getblk(vp, loffset, size, 0, 0); 238 if ((rbp->b_flags & B_CACHE)) { 239 bqrelse(rbp); 240 goto no_read_ahead; 241 } 242 243 error = VOP_BMAP(vp, loffset, 244 &doffset, &burstbytes, NULL); 245 if (error || doffset == NOOFFSET) { 246 rbp->b_flags |= B_INVAL; 247 brelse(rbp); 248 rbp = NULL; 249 goto no_read_ahead; 250 } 251 ntoread = burstbytes / size; 252 nblksread = (totread + size - 1) / size; 253 if (seqcount < nblksread) 254 seqcount = nblksread; 255 if (seqcount < ntoread) 256 ntoread = seqcount; 257 258 rbp->b_flags |= B_RAM; 259 if (burstbytes) { 260 rbp = cluster_rbuild(vp, filesize, loffset, 261 doffset, size, 262 ntoread, rbp, 1); 263 } else { 264 rbp->b_bio2.bio_offset = doffset; 265 } 266 #if defined(CLUSTERDEBUG) 267 if (rcluster) { 268 if (bp) 269 kprintf("A+(%lld,%d,%lld,%d) ", 270 rbp->b_loffset, rbp->b_bcount, 271 rbp->b_loffset - origoffset, 272 seqcount); 273 else 274 kprintf("A(%lld,%d,%lld,%d) ", 275 rbp->b_loffset, rbp->b_bcount, 276 rbp->b_loffset - origoffset, 277 seqcount); 278 } 279 #endif 280 rbp->b_flags &= ~(B_ERROR|B_INVAL); 281 rbp->b_flags |= B_ASYNC; 282 rbp->b_cmd = BUF_CMD_READ; 283 284 if ((rbp->b_flags & B_CLUSTER) == 0) 285 vfs_busy_pages(vp, rbp); 286 BUF_KERNPROC(rbp); /* B_ASYNC */ 287 vn_strategy(vp, &rbp->b_bio1); 288 } 289 no_read_ahead: 290 291 if (reqbp) 292 return (biowait(reqbp)); 293 else 294 return (error); 295 } 296 297 /* 298 * If blocks are contiguous on disk, use this to provide clustered 299 * read ahead. We will read as many blocks as possible sequentially 300 * and then parcel them up into logical blocks in the buffer hash table. 301 */ 302 static struct buf * 303 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, 304 off_t doffset, int size, int run, struct buf *fbp, int doasync) 305 { 306 struct buf *bp, *tbp; 307 off_t boffset; 308 int i, j; 309 310 KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 311 ("cluster_rbuild: size %d != filesize %ld\n", 312 size, vp->v_mount->mnt_stat.f_iosize)); 313 314 /* 315 * avoid a division 316 */ 317 while (loffset + run * size > filesize) { 318 --run; 319 } 320 321 tbp = fbp; 322 tbp->b_bio2.bio_offset = doffset; 323 if((tbp->b_flags & B_MALLOC) || 324 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) { 325 return tbp; 326 } 327 328 bp = trypbuf(&cluster_pbuf_freecnt); 329 if (bp == NULL) 330 return tbp; 331 332 /* 333 * We are synthesizing a buffer out of vm_page_t's, but 334 * if the block size is not page aligned then the starting 335 * address may not be either. Inherit the b_data offset 336 * from the original buffer. 337 */ 338 bp->b_data = (char *)((vm_offset_t)bp->b_data | 339 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 340 bp->b_flags |= B_ASYNC | B_CLUSTER | B_VMIO; 341 bp->b_cmd = BUF_CMD_READ; 342 bp->b_bio1.bio_done = cluster_callback; 343 bp->b_bio1.bio_caller_info1.cluster_head = NULL; 344 bp->b_bio1.bio_caller_info2.cluster_tail = NULL; 345 bp->b_loffset = loffset; 346 bp->b_bio2.bio_offset = NOOFFSET; 347 KASSERT(bp->b_loffset != NOOFFSET, 348 ("cluster_rbuild: no buffer offset")); 349 350 bp->b_bcount = 0; 351 bp->b_bufsize = 0; 352 bp->b_xio.xio_npages = 0; 353 354 for (boffset = doffset, i = 0; i < run; ++i, boffset += size) { 355 if (i) { 356 if ((bp->b_xio.xio_npages * PAGE_SIZE) + 357 round_page(size) > vp->v_mount->mnt_iosize_max) { 358 break; 359 } 360 361 /* 362 * Shortcut some checks and try to avoid buffers that 363 * would block in the lock. The same checks have to 364 * be made again after we officially get the buffer. 365 */ 366 if ((tbp = findblk(vp, loffset + i * size)) != NULL) { 367 if (BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) 368 break; 369 BUF_UNLOCK(tbp); 370 371 for (j = 0; j < tbp->b_xio.xio_npages; j++) { 372 if (tbp->b_xio.xio_pages[j]->valid) 373 break; 374 } 375 376 if (j != tbp->b_xio.xio_npages) 377 break; 378 379 if (tbp->b_bcount != size) 380 break; 381 } 382 383 tbp = getblk(vp, loffset + i * size, size, 0, 0); 384 385 /* 386 * Stop scanning if the buffer is fuly valid 387 * (marked B_CACHE), or locked (may be doing a 388 * background write), or if the buffer is not 389 * VMIO backed. The clustering code can only deal 390 * with VMIO-backed buffers. 391 */ 392 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) || 393 (tbp->b_flags & B_VMIO) == 0) { 394 bqrelse(tbp); 395 break; 396 } 397 398 /* 399 * The buffer must be completely invalid in order to 400 * take part in the cluster. If it is partially valid 401 * then we stop. 402 */ 403 for (j = 0;j < tbp->b_xio.xio_npages; j++) { 404 if (tbp->b_xio.xio_pages[j]->valid) 405 break; 406 } 407 if (j != tbp->b_xio.xio_npages) { 408 bqrelse(tbp); 409 break; 410 } 411 412 /* 413 * Set a read-ahead mark as appropriate 414 */ 415 if (i == 1 || i == (run - 1)) 416 tbp->b_flags |= B_RAM; 417 418 /* 419 * Set the block number if it isn't set, otherwise 420 * if it is make sure it matches the block number we 421 * expect. 422 */ 423 if (tbp->b_bio2.bio_offset == NOOFFSET) { 424 tbp->b_bio2.bio_offset = boffset; 425 } else if (tbp->b_bio2.bio_offset != boffset) { 426 brelse(tbp); 427 break; 428 } 429 } 430 /* 431 * The first buffer is setup async if doasync is specified. 432 * All other buffers in the cluster are setup async. This 433 * way the caller can decide how to deal with the requested 434 * buffer. 435 */ 436 if (i || doasync) 437 tbp->b_flags |= B_ASYNC; 438 tbp->b_cmd = BUF_CMD_READ; 439 BUF_KERNPROC(tbp); 440 cluster_append(&bp->b_bio1, tbp); 441 for (j = 0; j < tbp->b_xio.xio_npages; ++j) { 442 vm_page_t m; 443 m = tbp->b_xio.xio_pages[j]; 444 vm_page_io_start(m); 445 vm_object_pip_add(m->object, 1); 446 if ((bp->b_xio.xio_npages == 0) || 447 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) { 448 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; 449 bp->b_xio.xio_npages++; 450 } 451 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 452 tbp->b_xio.xio_pages[j] = bogus_page; 453 } 454 /* 455 * XXX shouldn't this be += size for both, like in 456 * cluster_wbuild()? 457 * 458 * Don't inherit tbp->b_bufsize as it may be larger due to 459 * a non-page-aligned size. Instead just aggregate using 460 * 'size'. 461 */ 462 if (tbp->b_bcount != size) 463 kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, size); 464 if (tbp->b_bufsize != size) 465 kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, size); 466 bp->b_bcount += size; 467 bp->b_bufsize += size; 468 } 469 470 /* 471 * Fully valid pages in the cluster are already good and do not need 472 * to be re-read from disk. Replace the page with bogus_page 473 */ 474 for (j = 0; j < bp->b_xio.xio_npages; j++) { 475 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) == 476 VM_PAGE_BITS_ALL) { 477 bp->b_xio.xio_pages[j] = bogus_page; 478 } 479 } 480 if (bp->b_bufsize > bp->b_kvasize) { 481 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)", 482 bp->b_bufsize, bp->b_kvasize); 483 } 484 485 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 486 (vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages); 487 return (bp); 488 } 489 490 /* 491 * Cleanup after a clustered read or write. 492 * This is complicated by the fact that any of the buffers might have 493 * extra memory (if there were no empty buffer headers at allocbuf time) 494 * that we will need to shift around. 495 * 496 * The returned bio is &bp->b_bio1 497 */ 498 void 499 cluster_callback(struct bio *bio) 500 { 501 struct buf *bp = bio->bio_buf; 502 struct buf *tbp; 503 int error = 0; 504 505 /* 506 * Must propogate errors to all the components. A short read (EOF) 507 * is a critical error. 508 */ 509 if (bp->b_flags & B_ERROR) { 510 error = bp->b_error; 511 } else if (bp->b_bcount != bp->b_bufsize) { 512 panic("cluster_callback: unexpected EOF on cluster %p!", bio); 513 } 514 515 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages); 516 /* 517 * Move memory from the large cluster buffer into the component 518 * buffers and mark IO as done on these. Since the memory map 519 * is the same, no actual copying is required. 520 */ 521 while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) { 522 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next; 523 if (error) { 524 tbp->b_flags |= B_ERROR; 525 tbp->b_error = error; 526 } else { 527 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 528 tbp->b_flags &= ~(B_ERROR|B_INVAL); 529 /* 530 * XXX the bdwrite()/bqrelse() issued during 531 * cluster building clears B_RELBUF (see bqrelse() 532 * comment). If direct I/O was specified, we have 533 * to restore it here to allow the buffer and VM 534 * to be freed. 535 */ 536 if (tbp->b_flags & B_DIRECT) 537 tbp->b_flags |= B_RELBUF; 538 } 539 biodone(&tbp->b_bio1); 540 } 541 relpbuf(bp, &cluster_pbuf_freecnt); 542 } 543 544 /* 545 * cluster_wbuild_wb: 546 * 547 * Implement modified write build for cluster. 548 * 549 * write_behind = 0 write behind disabled 550 * write_behind = 1 write behind normal (default) 551 * write_behind = 2 write behind backed-off 552 */ 553 554 static __inline int 555 cluster_wbuild_wb(struct vnode *vp, int size, off_t start_loffset, int len) 556 { 557 int r = 0; 558 559 switch(write_behind) { 560 case 2: 561 if (start_loffset < len) 562 break; 563 start_loffset -= len; 564 /* fall through */ 565 case 1: 566 r = cluster_wbuild(vp, size, start_loffset, len); 567 /* fall through */ 568 default: 569 /* fall through */ 570 break; 571 } 572 return(r); 573 } 574 575 /* 576 * Do clustered write for FFS. 577 * 578 * Three cases: 579 * 1. Write is not sequential (write asynchronously) 580 * Write is sequential: 581 * 2. beginning of cluster - begin cluster 582 * 3. middle of a cluster - add to cluster 583 * 4. end of a cluster - asynchronously write cluster 584 */ 585 void 586 cluster_write(struct buf *bp, off_t filesize, int seqcount) 587 { 588 struct vnode *vp; 589 off_t loffset; 590 int maxclen, cursize; 591 int lblocksize; 592 int async; 593 594 vp = bp->b_vp; 595 if (vp->v_type == VREG) { 596 async = vp->v_mount->mnt_flag & MNT_ASYNC; 597 lblocksize = vp->v_mount->mnt_stat.f_iosize; 598 } else { 599 async = 0; 600 lblocksize = bp->b_bufsize; 601 } 602 loffset = bp->b_loffset; 603 KASSERT(bp->b_loffset != NOOFFSET, 604 ("cluster_write: no buffer offset")); 605 606 /* Initialize vnode to beginning of file. */ 607 if (loffset == 0) 608 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 609 610 if (vp->v_clen == 0 || loffset != vp->v_lastw + lblocksize || 611 bp->b_bio2.bio_offset == NOOFFSET || 612 (bp->b_bio2.bio_offset != vp->v_lasta + lblocksize)) { 613 maxclen = vp->v_mount->mnt_iosize_max; 614 if (vp->v_clen != 0) { 615 /* 616 * Next block is not sequential. 617 * 618 * If we are not writing at end of file, the process 619 * seeked to another point in the file since its last 620 * write, or we have reached our maximum cluster size, 621 * then push the previous cluster. Otherwise try 622 * reallocating to make it sequential. 623 * 624 * Change to algorithm: only push previous cluster if 625 * it was sequential from the point of view of the 626 * seqcount heuristic, otherwise leave the buffer 627 * intact so we can potentially optimize the I/O 628 * later on in the buf_daemon or update daemon 629 * flush. 630 */ 631 cursize = vp->v_lastw - vp->v_cstart + lblocksize; 632 if (bp->b_loffset + lblocksize != filesize || 633 loffset != vp->v_lastw + lblocksize || vp->v_clen <= cursize) { 634 if (!async && seqcount > 0) { 635 cluster_wbuild_wb(vp, lblocksize, 636 vp->v_cstart, cursize); 637 } 638 } else { 639 struct buf **bpp, **endbp; 640 struct cluster_save *buflist; 641 642 buflist = cluster_collectbufs(vp, bp, 643 lblocksize); 644 endbp = &buflist->bs_children 645 [buflist->bs_nchildren - 1]; 646 if (VOP_REALLOCBLKS(vp, buflist)) { 647 /* 648 * Failed, push the previous cluster 649 * if *really* writing sequentially 650 * in the logical file (seqcount > 1), 651 * otherwise delay it in the hopes that 652 * the low level disk driver can 653 * optimize the write ordering. 654 */ 655 for (bpp = buflist->bs_children; 656 bpp < endbp; bpp++) 657 brelse(*bpp); 658 kfree(buflist, M_SEGMENT); 659 if (seqcount > 1) { 660 cluster_wbuild_wb(vp, 661 lblocksize, vp->v_cstart, 662 cursize); 663 } 664 } else { 665 /* 666 * Succeeded, keep building cluster. 667 */ 668 for (bpp = buflist->bs_children; 669 bpp <= endbp; bpp++) 670 bdwrite(*bpp); 671 kfree(buflist, M_SEGMENT); 672 vp->v_lastw = loffset; 673 vp->v_lasta = bp->b_bio2.bio_offset; 674 return; 675 } 676 } 677 } 678 /* 679 * Consider beginning a cluster. If at end of file, make 680 * cluster as large as possible, otherwise find size of 681 * existing cluster. 682 */ 683 if ((vp->v_type == VREG) && 684 bp->b_loffset + lblocksize != filesize && 685 (bp->b_bio2.bio_offset == NOOFFSET) && 686 (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL) || 687 bp->b_bio2.bio_offset == NOOFFSET)) { 688 bawrite(bp); 689 vp->v_clen = 0; 690 vp->v_lasta = bp->b_bio2.bio_offset; 691 vp->v_cstart = loffset + lblocksize; 692 vp->v_lastw = loffset; 693 return; 694 } 695 if (maxclen > lblocksize) 696 vp->v_clen = maxclen - lblocksize; 697 else 698 vp->v_clen = 0; 699 if (!async && vp->v_clen == 0) { /* I/O not contiguous */ 700 vp->v_cstart = loffset + lblocksize; 701 bawrite(bp); 702 } else { /* Wait for rest of cluster */ 703 vp->v_cstart = loffset; 704 bdwrite(bp); 705 } 706 } else if (loffset == vp->v_cstart + vp->v_clen) { 707 /* 708 * At end of cluster, write it out if seqcount tells us we 709 * are operating sequentially, otherwise let the buf or 710 * update daemon handle it. 711 */ 712 bdwrite(bp); 713 if (seqcount > 1) 714 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, 715 vp->v_clen + lblocksize); 716 vp->v_clen = 0; 717 vp->v_cstart = loffset + lblocksize; 718 } else if (vm_page_count_severe()) { 719 /* 720 * We are low on memory, get it going NOW 721 */ 722 bawrite(bp); 723 } else { 724 /* 725 * In the middle of a cluster, so just delay the I/O for now. 726 */ 727 bdwrite(bp); 728 } 729 vp->v_lastw = loffset; 730 vp->v_lasta = bp->b_bio2.bio_offset; 731 } 732 733 734 /* 735 * This is an awful lot like cluster_rbuild...wish they could be combined. 736 * The last lbn argument is the current block on which I/O is being 737 * performed. Check to see that it doesn't fall in the middle of 738 * the current block (if last_bp == NULL). 739 */ 740 int 741 cluster_wbuild(struct vnode *vp, int size, off_t start_loffset, int bytes) 742 { 743 struct buf *bp, *tbp; 744 int i, j; 745 int totalwritten = 0; 746 747 while (bytes > 0) { 748 crit_enter(); 749 /* 750 * If the buffer is not delayed-write (i.e. dirty), or it 751 * is delayed-write but either locked or inval, it cannot 752 * partake in the clustered write. 753 */ 754 if (((tbp = findblk(vp, start_loffset)) == NULL) || 755 ((tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI) || 756 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) { 757 start_loffset += size; 758 bytes -= size; 759 crit_exit(); 760 continue; 761 } 762 bremfree(tbp); 763 KKASSERT(tbp->b_cmd == BUF_CMD_DONE); 764 crit_exit(); 765 766 /* 767 * Extra memory in the buffer, punt on this buffer. 768 * XXX we could handle this in most cases, but we would 769 * have to push the extra memory down to after our max 770 * possible cluster size and then potentially pull it back 771 * up if the cluster was terminated prematurely--too much 772 * hassle. 773 */ 774 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) || 775 (tbp->b_bcount != tbp->b_bufsize) || 776 (tbp->b_bcount != size) || 777 (bytes == size) || 778 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 779 totalwritten += tbp->b_bufsize; 780 bawrite(tbp); 781 start_loffset += size; 782 bytes -= size; 783 continue; 784 } 785 786 /* 787 * Set up the pbuf. Track our append point with b_bcount 788 * and b_bufsize. b_bufsize is not used by the device but 789 * our caller uses it to loop clusters and we use it to 790 * detect a premature EOF on the block device. 791 */ 792 bp->b_bcount = 0; 793 bp->b_bufsize = 0; 794 bp->b_xio.xio_npages = 0; 795 bp->b_loffset = tbp->b_loffset; 796 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset; 797 798 /* 799 * We are synthesizing a buffer out of vm_page_t's, but 800 * if the block size is not page aligned then the starting 801 * address may not be either. Inherit the b_data offset 802 * from the original buffer. 803 */ 804 bp->b_data = (char *)((vm_offset_t)bp->b_data | 805 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 806 bp->b_flags &= ~B_ERROR; 807 bp->b_flags |= B_CLUSTER | B_BNOCLIP | 808 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT | B_NOWDRAIN)); 809 bp->b_bio1.bio_done = cluster_callback; 810 bp->b_bio1.bio_caller_info1.cluster_head = NULL; 811 bp->b_bio1.bio_caller_info2.cluster_tail = NULL; 812 /* 813 * From this location in the file, scan forward to see 814 * if there are buffers with adjacent data that need to 815 * be written as well. 816 */ 817 for (i = 0; i < bytes; (i += size), (start_loffset += size)) { 818 if (i != 0) { /* If not the first buffer */ 819 crit_enter(); 820 /* 821 * If the adjacent data is not even in core it 822 * can't need to be written. 823 */ 824 if ((tbp = findblk(vp, start_loffset)) == NULL) { 825 crit_exit(); 826 break; 827 } 828 829 /* 830 * If it IS in core, but has different 831 * characteristics, or is locked (which 832 * means it could be undergoing a background 833 * I/O or be in a weird state), then don't 834 * cluster with it. 835 */ 836 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 837 B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 838 != (B_DELWRI | B_CLUSTEROK | 839 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 840 (tbp->b_flags & B_LOCKED) || 841 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) { 842 crit_exit(); 843 break; 844 } 845 846 /* 847 * Check that the combined cluster 848 * would make sense with regard to pages 849 * and would not be too large 850 */ 851 if ((tbp->b_bcount != size) || 852 ((bp->b_bio2.bio_offset + i) != 853 tbp->b_bio2.bio_offset) || 854 ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) > 855 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) { 856 BUF_UNLOCK(tbp); 857 crit_exit(); 858 break; 859 } 860 /* 861 * Ok, it's passed all the tests, 862 * so remove it from the free list 863 * and mark it busy. We will use it. 864 */ 865 bremfree(tbp); 866 KKASSERT(tbp->b_cmd == BUF_CMD_DONE); 867 crit_exit(); 868 } /* end of code for non-first buffers only */ 869 870 /* 871 * If the IO is via the VM then we do some 872 * special VM hackery (yuck). Since the buffer's 873 * block size may not be page-aligned it is possible 874 * for a page to be shared between two buffers. We 875 * have to get rid of the duplication when building 876 * the cluster. 877 */ 878 if (tbp->b_flags & B_VMIO) { 879 vm_page_t m; 880 881 if (i != 0) { /* if not first buffer */ 882 for (j = 0; j < tbp->b_xio.xio_npages; ++j) { 883 m = tbp->b_xio.xio_pages[j]; 884 if (m->flags & PG_BUSY) { 885 bqrelse(tbp); 886 goto finishcluster; 887 } 888 } 889 } 890 891 for (j = 0; j < tbp->b_xio.xio_npages; ++j) { 892 m = tbp->b_xio.xio_pages[j]; 893 vm_page_io_start(m); 894 vm_object_pip_add(m->object, 1); 895 if ((bp->b_xio.xio_npages == 0) || 896 (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) { 897 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m; 898 bp->b_xio.xio_npages++; 899 } 900 } 901 } 902 bp->b_bcount += size; 903 bp->b_bufsize += size; 904 905 crit_enter(); 906 bundirty(tbp); 907 tbp->b_flags &= ~B_ERROR; 908 tbp->b_flags |= B_ASYNC; 909 tbp->b_cmd = BUF_CMD_WRITE; 910 crit_exit(); 911 BUF_KERNPROC(tbp); 912 cluster_append(&bp->b_bio1, tbp); 913 914 /* 915 * check for latent dependencies to be handled 916 */ 917 if (LIST_FIRST(&tbp->b_dep) != NULL && bioops.io_start) 918 (*bioops.io_start)(tbp); 919 920 } 921 finishcluster: 922 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 923 (vm_page_t *) bp->b_xio.xio_pages, bp->b_xio.xio_npages); 924 if (bp->b_bufsize > bp->b_kvasize) { 925 panic( 926 "cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n", 927 bp->b_bufsize, bp->b_kvasize); 928 } 929 totalwritten += bp->b_bufsize; 930 bp->b_dirtyoff = 0; 931 bp->b_dirtyend = bp->b_bufsize; 932 bp->b_flags |= B_ASYNC; 933 bp->b_cmd = BUF_CMD_WRITE; 934 vfs_busy_pages(vp, bp); 935 bp->b_runningbufspace = bp->b_bufsize; 936 runningbufspace += bp->b_runningbufspace; 937 BUF_KERNPROC(bp); /* B_ASYNC */ 938 vn_strategy(vp, &bp->b_bio1); 939 940 bytes -= i; 941 } 942 return totalwritten; 943 } 944 945 /* 946 * Collect together all the buffers in a cluster. 947 * Plus add one additional buffer. 948 */ 949 static struct cluster_save * 950 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int lblocksize) 951 { 952 struct cluster_save *buflist; 953 struct buf *bp; 954 off_t loffset; 955 int i, len; 956 957 len = (int)(vp->v_lastw - vp->v_cstart + lblocksize) / lblocksize; 958 buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 959 M_SEGMENT, M_WAITOK); 960 buflist->bs_nchildren = 0; 961 buflist->bs_children = (struct buf **) (buflist + 1); 962 for (loffset = vp->v_cstart, i = 0; i < len; (loffset += lblocksize), i++) { 963 (void) bread(vp, loffset, last_bp->b_bcount, &bp); 964 buflist->bs_children[i] = bp; 965 if (bp->b_bio2.bio_offset == NOOFFSET) { 966 VOP_BMAP(bp->b_vp, bp->b_loffset, 967 &bp->b_bio2.bio_offset, NULL, NULL); 968 } 969 } 970 buflist->bs_children[i] = bp = last_bp; 971 if (bp->b_bio2.bio_offset == NOOFFSET) { 972 VOP_BMAP(bp->b_vp, bp->b_loffset, 973 &bp->b_bio2.bio_offset, NULL, NULL); 974 } 975 buflist->bs_nchildren = i + 1; 976 return (buflist); 977 } 978 979 void 980 cluster_append(struct bio *bio, struct buf *tbp) 981 { 982 tbp->b_cluster_next = NULL; 983 if (bio->bio_caller_info1.cluster_head == NULL) { 984 bio->bio_caller_info1.cluster_head = tbp; 985 bio->bio_caller_info2.cluster_tail = tbp; 986 } else { 987 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp; 988 bio->bio_caller_info2.cluster_tail = tbp; 989 } 990 } 991 992