1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 /* 37 * This module handles low level logical file I/O (strategy) which backs 38 * the logical buffer cache. 39 * 40 * [De]compression, zero-block, check codes, and buffer cache operations 41 * for file data is handled here. 42 * 43 * Live dedup makes its home here as well. 44 */ 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/fcntl.h> 50 #include <sys/buf.h> 51 #include <sys/proc.h> 52 #include <sys/namei.h> 53 #include <sys/mount.h> 54 #include <sys/vnode.h> 55 #include <sys/mountctl.h> 56 #include <sys/dirent.h> 57 #include <sys/uio.h> 58 #include <sys/objcache.h> 59 #include <sys/event.h> 60 #include <sys/file.h> 61 #include <vfs/fifofs/fifo.h> 62 63 #include "hammer2.h" 64 #include "hammer2_lz4.h" 65 66 #include "zlib/hammer2_zlib.h" 67 68 struct objcache *cache_buffer_read; 69 struct objcache *cache_buffer_write; 70 71 /* 72 * Strategy code (async logical file buffer I/O from system) 73 * 74 * WARNING: The strategy code cannot safely use hammer2 transactions 75 * as this can deadlock against vfs_sync's vfsync() call 76 * if multiple flushes are queued. All H2 structures must 77 * already be present and ready for the DIO. 78 * 79 * Reads can be initiated asynchronously, writes have to be 80 * spooled to a separate thread for action to avoid deadlocks. 81 */ 82 static void hammer2_strategy_xop_read(hammer2_xop_t *arg, int clindex); 83 static void hammer2_strategy_xop_write(hammer2_xop_t *arg, int clindex); 84 static int hammer2_strategy_read(struct vop_strategy_args *ap); 85 static int hammer2_strategy_write(struct vop_strategy_args *ap); 86 static void hammer2_strategy_read_completion(hammer2_chain_t *chain, 87 char *data, struct bio *bio); 88 89 static void hammer2_dedup_record(hammer2_chain_t *chain, char *data); 90 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp, 91 char **datap, int pblksize); 92 93 int 94 hammer2_vop_strategy(struct vop_strategy_args *ap) 95 { 96 struct bio *biop; 97 struct buf *bp; 98 int error; 99 100 biop = ap->a_bio; 101 bp = biop->bio_buf; 102 103 switch(bp->b_cmd) { 104 case BUF_CMD_READ: 105 error = hammer2_strategy_read(ap); 106 ++hammer2_iod_file_read; 107 break; 108 case BUF_CMD_WRITE: 109 error = hammer2_strategy_write(ap); 110 ++hammer2_iod_file_write; 111 break; 112 default: 113 bp->b_error = error = EINVAL; 114 bp->b_flags |= B_ERROR; 115 biodone(biop); 116 break; 117 } 118 return (error); 119 } 120 121 /* 122 * Return the largest contiguous physical disk range for the logical 123 * request, in bytes. 124 * 125 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb) 126 * 127 * Basically disabled, the logical buffer write thread has to deal with 128 * buffers one-at-a-time. 129 */ 130 int 131 hammer2_vop_bmap(struct vop_bmap_args *ap) 132 { 133 *ap->a_doffsetp = NOOFFSET; 134 if (ap->a_runp) 135 *ap->a_runp = 0; 136 if (ap->a_runb) 137 *ap->a_runb = 0; 138 return (EOPNOTSUPP); 139 } 140 141 /**************************************************************************** 142 * READ SUPPORT * 143 ****************************************************************************/ 144 /* 145 * Callback used in read path in case that a block is compressed with LZ4. 146 */ 147 static 148 void 149 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio) 150 { 151 struct buf *bp; 152 char *compressed_buffer; 153 int compressed_size; 154 int result; 155 156 bp = bio->bio_buf; 157 158 #if 0 159 if bio->bio_caller_info2.index && 160 bio->bio_caller_info1.uvalue32 != 161 crc32(bp->b_data, bp->b_bufsize) --- return error 162 #endif 163 164 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE); 165 compressed_size = *(const int *)data; 166 KKASSERT(compressed_size <= bytes - sizeof(int)); 167 168 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT); 169 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]), 170 compressed_buffer, 171 compressed_size, 172 bp->b_bufsize); 173 if (result < 0) { 174 kprintf("READ PATH: Error during decompression." 175 "bio %016jx/%d\n", 176 (intmax_t)bio->bio_offset, bytes); 177 /* make sure it isn't random garbage */ 178 bzero(compressed_buffer, bp->b_bufsize); 179 } 180 KKASSERT(result <= bp->b_bufsize); 181 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize); 182 if (result < bp->b_bufsize) 183 bzero(bp->b_data + result, bp->b_bufsize - result); 184 objcache_put(cache_buffer_read, compressed_buffer); 185 bp->b_resid = 0; 186 bp->b_flags |= B_AGE; 187 } 188 189 /* 190 * Callback used in read path in case that a block is compressed with ZLIB. 191 * It is almost identical to LZ4 callback, so in theory they can be unified, 192 * but we didn't want to make changes in bio structure for that. 193 */ 194 static 195 void 196 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio) 197 { 198 struct buf *bp; 199 char *compressed_buffer; 200 z_stream strm_decompress; 201 int result; 202 int ret; 203 204 bp = bio->bio_buf; 205 206 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE); 207 strm_decompress.avail_in = 0; 208 strm_decompress.next_in = Z_NULL; 209 210 ret = inflateInit(&strm_decompress); 211 212 if (ret != Z_OK) 213 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n"); 214 215 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT); 216 strm_decompress.next_in = __DECONST(char *, data); 217 218 /* XXX supply proper size, subset of device bp */ 219 strm_decompress.avail_in = bytes; 220 strm_decompress.next_out = compressed_buffer; 221 strm_decompress.avail_out = bp->b_bufsize; 222 223 ret = inflate(&strm_decompress, Z_FINISH); 224 if (ret != Z_STREAM_END) { 225 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n"); 226 bzero(compressed_buffer, bp->b_bufsize); 227 } 228 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize); 229 result = bp->b_bufsize - strm_decompress.avail_out; 230 if (result < bp->b_bufsize) 231 bzero(bp->b_data + result, strm_decompress.avail_out); 232 objcache_put(cache_buffer_read, compressed_buffer); 233 ret = inflateEnd(&strm_decompress); 234 235 bp->b_resid = 0; 236 bp->b_flags |= B_AGE; 237 } 238 239 /* 240 * Logical buffer I/O, async read. 241 */ 242 static 243 int 244 hammer2_strategy_read(struct vop_strategy_args *ap) 245 { 246 hammer2_xop_strategy_t *xop; 247 struct buf *bp; 248 struct bio *bio; 249 struct bio *nbio; 250 hammer2_inode_t *ip; 251 hammer2_key_t lbase; 252 253 bio = ap->a_bio; 254 bp = bio->bio_buf; 255 ip = VTOI(ap->a_vp); 256 nbio = push_bio(bio); 257 258 lbase = bio->bio_offset; 259 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0); 260 261 xop = hammer2_xop_alloc(ip, 0); 262 xop->finished = 0; 263 xop->bio = bio; 264 xop->lbase = lbase; 265 hammer2_mtx_init(&xop->lock, "h2bio"); 266 hammer2_xop_start(&xop->head, hammer2_strategy_xop_read); 267 /* asynchronous completion */ 268 269 return(0); 270 } 271 272 /* 273 * Per-node XOP (threaded), do a synchronous lookup of the chain and 274 * its data. The frontend is asynchronous, so we are also responsible 275 * for racing to terminate the frontend. 276 */ 277 static 278 void 279 hammer2_strategy_xop_read(hammer2_xop_t *arg, int clindex) 280 { 281 hammer2_xop_strategy_t *xop = &arg->xop_strategy; 282 hammer2_chain_t *parent; 283 hammer2_chain_t *chain; 284 hammer2_key_t key_dummy; 285 hammer2_key_t lbase; 286 struct bio *bio; 287 struct buf *bp; 288 int cache_index = -1; 289 int error; 290 291 lbase = xop->lbase; 292 bio = xop->bio; 293 bp = bio->bio_buf; 294 295 parent = hammer2_inode_chain(xop->head.ip1, clindex, 296 HAMMER2_RESOLVE_ALWAYS | 297 HAMMER2_RESOLVE_SHARED); 298 if (parent) { 299 chain = hammer2_chain_lookup(&parent, &key_dummy, 300 lbase, lbase, 301 &cache_index, 302 HAMMER2_LOOKUP_ALWAYS | 303 HAMMER2_LOOKUP_SHARED); 304 error = chain ? chain->error : 0; 305 } else { 306 error = EIO; 307 chain = NULL; 308 } 309 error = hammer2_xop_feed(&xop->head, chain, clindex, error); 310 if (chain) { 311 hammer2_chain_unlock(chain); 312 hammer2_chain_drop(chain); 313 } 314 if (parent) { 315 hammer2_chain_unlock(parent); 316 hammer2_chain_drop(parent); 317 } 318 chain = NULL; /* safety */ 319 parent = NULL; /* safety */ 320 321 /* 322 * Race to finish the frontend 323 */ 324 if (xop->finished) 325 return; 326 hammer2_mtx_ex(&xop->lock); 327 if (xop->finished) { 328 hammer2_mtx_unlock(&xop->lock); 329 return; 330 } 331 332 /* 333 * Async operation has not completed and we now own the lock. 334 * Determine if we can complete the operation by issuing the 335 * frontend collection non-blocking. 336 */ 337 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT); 338 339 switch(error) { 340 case 0: 341 xop->finished = 1; 342 hammer2_mtx_unlock(&xop->lock); 343 chain = xop->head.cluster.focus; 344 hammer2_strategy_read_completion(chain, (char *)chain->data, 345 xop->bio); 346 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 347 biodone(bio); 348 break; 349 case ENOENT: 350 xop->finished = 1; 351 hammer2_mtx_unlock(&xop->lock); 352 bp->b_resid = 0; 353 bp->b_error = 0; 354 bzero(bp->b_data, bp->b_bcount); 355 biodone(bio); 356 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 357 break; 358 case EINPROGRESS: 359 hammer2_mtx_unlock(&xop->lock); 360 break; 361 default: 362 xop->finished = 1; 363 hammer2_mtx_unlock(&xop->lock); 364 bp->b_flags |= B_ERROR; 365 bp->b_error = EIO; 366 biodone(bio); 367 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 368 break; 369 } 370 } 371 372 static 373 void 374 hammer2_strategy_read_completion(hammer2_chain_t *chain, char *data, 375 struct bio *bio) 376 { 377 struct buf *bp = bio->bio_buf; 378 379 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 380 /* 381 * Data is embedded in the inode (copy from inode). 382 */ 383 bcopy(((hammer2_inode_data_t *)data)->u.data, 384 bp->b_data, HAMMER2_EMBEDDED_BYTES); 385 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES, 386 bp->b_bcount - HAMMER2_EMBEDDED_BYTES); 387 bp->b_resid = 0; 388 bp->b_error = 0; 389 } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) { 390 /* 391 * Data is on-media, record for live dedup. 392 */ 393 hammer2_dedup_record(chain, data); 394 395 /* 396 * Decopmression and copy. 397 */ 398 switch (HAMMER2_DEC_COMP(chain->bref.methods)) { 399 case HAMMER2_COMP_LZ4: 400 hammer2_decompress_LZ4_callback(data, chain->bytes, 401 bio); 402 break; 403 case HAMMER2_COMP_ZLIB: 404 hammer2_decompress_ZLIB_callback(data, chain->bytes, 405 bio); 406 break; 407 case HAMMER2_COMP_NONE: 408 KKASSERT(chain->bytes <= bp->b_bcount); 409 bcopy(data, bp->b_data, chain->bytes); 410 if (chain->bytes < bp->b_bcount) { 411 bzero(bp->b_data + chain->bytes, 412 bp->b_bcount - chain->bytes); 413 } 414 bp->b_flags |= B_NOTMETA; 415 bp->b_resid = 0; 416 bp->b_error = 0; 417 break; 418 default: 419 panic("hammer2_strategy_read: " 420 "unknown compression type"); 421 } 422 } else { 423 panic("hammer2_strategy_read: unknown bref type"); 424 } 425 } 426 427 /**************************************************************************** 428 * WRITE SUPPORT * 429 ****************************************************************************/ 430 431 /* 432 * Functions for compression in threads, 433 * from hammer2_vnops.c 434 */ 435 static void hammer2_write_file_core(struct buf *bp, hammer2_inode_t *ip, 436 hammer2_chain_t **parentp, 437 hammer2_key_t lbase, int ioflag, int pblksize, 438 hammer2_tid_t mtid, int *errorp); 439 static void hammer2_compress_and_write(struct buf *bp, hammer2_inode_t *ip, 440 hammer2_chain_t **parentp, 441 hammer2_key_t lbase, int ioflag, int pblksize, 442 hammer2_tid_t mtid, int *errorp, 443 int comp_algo, int check_algo); 444 static void hammer2_zero_check_and_write(struct buf *bp, hammer2_inode_t *ip, 445 hammer2_chain_t **parentp, 446 hammer2_key_t lbase, int ioflag, int pblksize, 447 hammer2_tid_t mtid, int *errorp, 448 int check_algo); 449 static int test_block_zeros(const char *buf, size_t bytes); 450 static void zero_write(struct buf *bp, hammer2_inode_t *ip, 451 hammer2_chain_t **parentp, 452 hammer2_key_t lbase, 453 hammer2_tid_t mtid, int *errorp); 454 static void hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp, 455 int ioflag, int pblksize, 456 hammer2_tid_t mtid, int *errorp, 457 int check_algo); 458 459 static 460 int 461 hammer2_strategy_write(struct vop_strategy_args *ap) 462 { 463 hammer2_xop_strategy_t *xop; 464 hammer2_pfs_t *pmp; 465 struct bio *bio; 466 struct buf *bp; 467 hammer2_inode_t *ip; 468 469 bio = ap->a_bio; 470 bp = bio->bio_buf; 471 ip = VTOI(ap->a_vp); 472 pmp = ip->pmp; 473 474 hammer2_lwinprog_ref(pmp); 475 hammer2_trans_assert_strategy(pmp); 476 477 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 478 xop->finished = 0; 479 xop->bio = bio; 480 xop->lbase = bio->bio_offset; 481 hammer2_xop_start(&xop->head, hammer2_strategy_xop_write); 482 /* asynchronous completion */ 483 484 hammer2_lwinprog_wait(pmp, hammer2_flush_pipe); 485 486 return(0); 487 } 488 489 /* 490 * Per-node XOP (threaded). Write the logical buffer to the media. 491 */ 492 static 493 void 494 hammer2_strategy_xop_write(hammer2_xop_t *arg, int clindex) 495 { 496 hammer2_xop_strategy_t *xop = &arg->xop_strategy; 497 hammer2_chain_t *parent; 498 hammer2_key_t lbase; 499 hammer2_inode_t *ip; 500 struct bio *bio; 501 struct buf *bp; 502 int error; 503 int lblksize; 504 int pblksize; 505 506 lbase = xop->lbase; 507 bio = xop->bio; 508 bp = bio->bio_buf; 509 ip = xop->head.ip1; 510 511 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */ 512 513 lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL); 514 pblksize = hammer2_calc_physical(ip, lbase); 515 parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS); 516 hammer2_write_file_core(bp, ip, &parent, 517 lbase, IO_ASYNC, pblksize, 518 xop->head.mtid, &error); 519 if (parent) { 520 hammer2_chain_unlock(parent); 521 hammer2_chain_drop(parent); 522 parent = NULL; /* safety */ 523 } 524 error = hammer2_xop_feed(&xop->head, NULL, clindex, error); 525 526 /* 527 * Race to finish the frontend 528 */ 529 if (xop->finished) 530 return; 531 hammer2_mtx_ex(&xop->lock); 532 if (xop->finished) { 533 hammer2_mtx_unlock(&xop->lock); 534 return; 535 } 536 537 /* 538 * Async operation has not completed and we now own the lock. 539 * Determine if we can complete the operation by issuing the 540 * frontend collection non-blocking. 541 */ 542 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT); 543 544 switch(error) { 545 case ENOENT: 546 case 0: 547 xop->finished = 1; 548 hammer2_mtx_unlock(&xop->lock); 549 bp->b_resid = 0; 550 bp->b_error = 0; 551 biodone(bio); 552 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 553 hammer2_lwinprog_drop(ip->pmp); 554 break; 555 case EINPROGRESS: 556 hammer2_mtx_unlock(&xop->lock); 557 break; 558 default: 559 xop->finished = 1; 560 hammer2_mtx_unlock(&xop->lock); 561 bp->b_flags |= B_ERROR; 562 bp->b_error = EIO; 563 biodone(bio); 564 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 565 hammer2_lwinprog_drop(ip->pmp); 566 break; 567 } 568 } 569 570 /* 571 * Wait for pending I/O to complete 572 */ 573 void 574 hammer2_bioq_sync(hammer2_pfs_t *pmp) 575 { 576 hammer2_lwinprog_wait(pmp, 0); 577 } 578 579 /* 580 * Create a new cluster at (cparent, lbase) and assign physical storage, 581 * returning a cluster suitable for I/O. The cluster will be in a modified 582 * state. 583 * 584 * cparent can wind up being anything. 585 * 586 * If datap is not NULL, *datap points to the real data we intend to write. 587 * If we can dedup the storage location we set *datap to NULL to indicate 588 * to the caller that a dedup occurred. 589 * 590 * NOTE: Special case for data embedded in inode. 591 */ 592 static 593 hammer2_chain_t * 594 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp, 595 hammer2_key_t lbase, int pblksize, 596 hammer2_tid_t mtid, char **datap, int *errorp) 597 { 598 hammer2_chain_t *chain; 599 hammer2_key_t key_dummy; 600 hammer2_off_t dedup_off; 601 int pradix = hammer2_getradix(pblksize); 602 int cache_index = -1; 603 604 /* 605 * Locate the chain associated with lbase, return a locked chain. 606 * However, do not instantiate any data reference (which utilizes a 607 * device buffer) because we will be using direct IO via the 608 * logical buffer cache buffer. 609 */ 610 *errorp = 0; 611 KKASSERT(pblksize >= HAMMER2_ALLOC_MIN); 612 retry: 613 chain = hammer2_chain_lookup(parentp, &key_dummy, 614 lbase, lbase, 615 &cache_index, 616 HAMMER2_LOOKUP_NODATA); 617 if (chain == NULL) { 618 /* 619 * We found a hole, create a new chain entry. 620 * 621 * NOTE: DATA chains are created without device backing 622 * store (nor do we want any). 623 */ 624 dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap, 625 pblksize); 626 *errorp = hammer2_chain_create(parentp, &chain, ip->pmp, 627 lbase, HAMMER2_PBUFRADIX, 628 HAMMER2_BREF_TYPE_DATA, 629 pblksize, mtid, 630 dedup_off, 0); 631 if (chain == NULL) { 632 panic("hammer2_chain_create: par=%p error=%d\n", 633 *parentp, *errorp); 634 goto retry; 635 } 636 /*ip->delta_dcount += pblksize;*/ 637 } else { 638 switch (chain->bref.type) { 639 case HAMMER2_BREF_TYPE_INODE: 640 /* 641 * The data is embedded in the inode, which requires 642 * a bit more finess. 643 */ 644 hammer2_chain_modify_ip(ip, chain, mtid, 0); 645 break; 646 case HAMMER2_BREF_TYPE_DATA: 647 dedup_off = hammer2_dedup_lookup(chain->hmp, datap, 648 pblksize); 649 if (chain->bytes != pblksize) { 650 hammer2_chain_resize(ip, *parentp, chain, 651 mtid, dedup_off, 652 pradix, 653 HAMMER2_MODIFY_OPTDATA); 654 } 655 656 /* 657 * DATA buffers must be marked modified whether the 658 * data is in a logical buffer or not. We also have 659 * to make this call to fixup the chain data pointers 660 * after resizing in case this is an encrypted or 661 * compressed buffer. 662 */ 663 hammer2_chain_modify(chain, mtid, dedup_off, 664 HAMMER2_MODIFY_OPTDATA); 665 break; 666 default: 667 panic("hammer2_assign_physical: bad type"); 668 /* NOT REACHED */ 669 break; 670 } 671 } 672 return (chain); 673 } 674 675 /* 676 * hammer2_write_file_core() - hammer2_write_thread() helper 677 * 678 * The core write function which determines which path to take 679 * depending on compression settings. We also have to locate the 680 * related chains so we can calculate and set the check data for 681 * the blockref. 682 */ 683 static 684 void 685 hammer2_write_file_core(struct buf *bp, hammer2_inode_t *ip, 686 hammer2_chain_t **parentp, 687 hammer2_key_t lbase, int ioflag, int pblksize, 688 hammer2_tid_t mtid, int *errorp) 689 { 690 hammer2_chain_t *chain; 691 char *data = bp->b_data; 692 693 switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) { 694 case HAMMER2_COMP_NONE: 695 /* 696 * We have to assign physical storage to the buffer 697 * we intend to dirty or write now to avoid deadlocks 698 * in the strategy code later. 699 * 700 * This can return NOOFFSET for inode-embedded data. 701 * The strategy code will take care of it in that case. 702 */ 703 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize, 704 mtid, &data, errorp); 705 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 706 hammer2_inode_data_t *wipdata; 707 708 wipdata = &chain->data->ipdata; 709 KKASSERT(wipdata->meta.op_flags & 710 HAMMER2_OPFLAG_DIRECTDATA); 711 KKASSERT(bp->b_loffset == 0); 712 bcopy(bp->b_data, wipdata->u.data, 713 HAMMER2_EMBEDDED_BYTES); 714 ++hammer2_iod_file_wembed; 715 } else if (data == NULL) { 716 /* 717 * Copy of data already present on-media. 718 */ 719 chain->bref.methods = 720 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) + 721 HAMMER2_ENC_CHECK(ip->meta.check_algo); 722 hammer2_chain_setcheck(chain, bp->b_data); 723 } else { 724 hammer2_write_bp(chain, bp, ioflag, pblksize, 725 mtid, errorp, ip->meta.check_algo); 726 } 727 if (chain) { 728 hammer2_chain_unlock(chain); 729 hammer2_chain_drop(chain); 730 } 731 break; 732 case HAMMER2_COMP_AUTOZERO: 733 /* 734 * Check for zero-fill only 735 */ 736 hammer2_zero_check_and_write(bp, ip, parentp, 737 lbase, ioflag, pblksize, 738 mtid, errorp, 739 ip->meta.check_algo); 740 break; 741 case HAMMER2_COMP_LZ4: 742 case HAMMER2_COMP_ZLIB: 743 default: 744 /* 745 * Check for zero-fill and attempt compression. 746 */ 747 hammer2_compress_and_write(bp, ip, parentp, 748 lbase, ioflag, pblksize, 749 mtid, errorp, 750 ip->meta.comp_algo, 751 ip->meta.check_algo); 752 break; 753 } 754 } 755 756 /* 757 * Helper 758 * 759 * Generic function that will perform the compression in compression 760 * write path. The compression algorithm is determined by the settings 761 * obtained from inode. 762 */ 763 static 764 void 765 hammer2_compress_and_write(struct buf *bp, hammer2_inode_t *ip, 766 hammer2_chain_t **parentp, 767 hammer2_key_t lbase, int ioflag, int pblksize, 768 hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo) 769 { 770 hammer2_chain_t *chain; 771 int comp_size; 772 int comp_block_size; 773 char *comp_buffer; 774 char *data; 775 776 if (test_block_zeros(bp->b_data, pblksize)) { 777 zero_write(bp, ip, parentp, lbase, mtid, errorp); 778 return; 779 } 780 781 comp_size = 0; 782 comp_buffer = NULL; 783 784 KKASSERT(pblksize / 2 <= 32768); 785 786 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0) { 787 z_stream strm_compress; 788 int comp_level; 789 int ret; 790 791 switch(HAMMER2_DEC_ALGO(comp_algo)) { 792 case HAMMER2_COMP_LZ4: 793 comp_buffer = objcache_get(cache_buffer_write, 794 M_INTWAIT); 795 comp_size = LZ4_compress_limitedOutput( 796 bp->b_data, 797 &comp_buffer[sizeof(int)], 798 pblksize, 799 pblksize / 2 - sizeof(int)); 800 /* 801 * We need to prefix with the size, LZ4 802 * doesn't do it for us. Add the related 803 * overhead. 804 */ 805 *(int *)comp_buffer = comp_size; 806 if (comp_size) 807 comp_size += sizeof(int); 808 break; 809 case HAMMER2_COMP_ZLIB: 810 comp_level = HAMMER2_DEC_LEVEL(comp_algo); 811 if (comp_level == 0) 812 comp_level = 6; /* default zlib compression */ 813 else if (comp_level < 6) 814 comp_level = 6; 815 else if (comp_level > 9) 816 comp_level = 9; 817 ret = deflateInit(&strm_compress, comp_level); 818 if (ret != Z_OK) { 819 kprintf("HAMMER2 ZLIB: fatal error " 820 "on deflateInit.\n"); 821 } 822 823 comp_buffer = objcache_get(cache_buffer_write, 824 M_INTWAIT); 825 strm_compress.next_in = bp->b_data; 826 strm_compress.avail_in = pblksize; 827 strm_compress.next_out = comp_buffer; 828 strm_compress.avail_out = pblksize / 2; 829 ret = deflate(&strm_compress, Z_FINISH); 830 if (ret == Z_STREAM_END) { 831 comp_size = pblksize / 2 - 832 strm_compress.avail_out; 833 } else { 834 comp_size = 0; 835 } 836 ret = deflateEnd(&strm_compress); 837 break; 838 default: 839 kprintf("Error: Unknown compression method.\n"); 840 kprintf("Comp_method = %d.\n", comp_algo); 841 break; 842 } 843 } 844 845 if (comp_size == 0) { 846 /* 847 * compression failed or turned off 848 */ 849 comp_block_size = pblksize; /* safety */ 850 if (++ip->comp_heuristic > 128) 851 ip->comp_heuristic = 8; 852 } else { 853 /* 854 * compression succeeded 855 */ 856 ip->comp_heuristic = 0; 857 if (comp_size <= 1024) { 858 comp_block_size = 1024; 859 } else if (comp_size <= 2048) { 860 comp_block_size = 2048; 861 } else if (comp_size <= 4096) { 862 comp_block_size = 4096; 863 } else if (comp_size <= 8192) { 864 comp_block_size = 8192; 865 } else if (comp_size <= 16384) { 866 comp_block_size = 16384; 867 } else if (comp_size <= 32768) { 868 comp_block_size = 32768; 869 } else { 870 panic("hammer2: WRITE PATH: " 871 "Weird comp_size value."); 872 /* NOT REACHED */ 873 comp_block_size = pblksize; 874 } 875 876 /* 877 * Must zero the remainder or dedup (which operates on a 878 * physical block basis) will not find matches. 879 */ 880 if (comp_size < comp_block_size) { 881 bzero(comp_buffer + comp_size, 882 comp_block_size - comp_size); 883 } 884 } 885 886 /* 887 * Assign physical storage, data will be set to NULL if a live-dedup 888 * was successful. 889 */ 890 data = comp_size ? comp_buffer : bp->b_data; 891 chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size, 892 mtid, &data, errorp); 893 894 if (*errorp) { 895 kprintf("WRITE PATH: An error occurred while " 896 "assigning physical space.\n"); 897 KKASSERT(chain == NULL); 898 goto done; 899 } 900 901 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 902 hammer2_inode_data_t *wipdata; 903 904 hammer2_chain_modify_ip(ip, chain, mtid, 0); 905 wipdata = &chain->data->ipdata; 906 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA); 907 KKASSERT(bp->b_loffset == 0); 908 bcopy(bp->b_data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES); 909 ++hammer2_iod_file_wembed; 910 } else if (data == NULL) { 911 /* 912 * Live deduplication, a copy of the data is already present 913 * on the media. 914 */ 915 char *bdata; 916 917 if (comp_size) { 918 chain->bref.methods = 919 HAMMER2_ENC_COMP(comp_algo) + 920 HAMMER2_ENC_CHECK(check_algo); 921 } else { 922 chain->bref.methods = 923 HAMMER2_ENC_COMP( 924 HAMMER2_COMP_NONE) + 925 HAMMER2_ENC_CHECK(check_algo); 926 } 927 bdata = comp_size ? comp_buffer : bp->b_data; 928 hammer2_chain_setcheck(chain, bdata); 929 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 930 } else { 931 hammer2_io_t *dio; 932 char *bdata; 933 934 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED); 935 936 switch(chain->bref.type) { 937 case HAMMER2_BREF_TYPE_INODE: 938 panic("hammer2_write_bp: unexpected inode\n"); 939 break; 940 case HAMMER2_BREF_TYPE_DATA: 941 /* 942 * Optimize out the read-before-write 943 * if possible. 944 */ 945 *errorp = hammer2_io_newnz(chain->hmp, 946 chain->bref.data_off, 947 chain->bytes, 948 &dio); 949 if (*errorp) { 950 hammer2_io_brelse(&dio); 951 kprintf("hammer2: WRITE PATH: " 952 "dbp bread error\n"); 953 break; 954 } 955 bdata = hammer2_io_data(dio, chain->bref.data_off); 956 957 /* 958 * When loading the block make sure we don't 959 * leave garbage after the compressed data. 960 */ 961 if (comp_size) { 962 chain->bref.methods = 963 HAMMER2_ENC_COMP(comp_algo) + 964 HAMMER2_ENC_CHECK(check_algo); 965 bcopy(comp_buffer, bdata, comp_size); 966 } else { 967 chain->bref.methods = 968 HAMMER2_ENC_COMP( 969 HAMMER2_COMP_NONE) + 970 HAMMER2_ENC_CHECK(check_algo); 971 bcopy(bp->b_data, bdata, pblksize); 972 } 973 974 /* 975 * The flush code doesn't calculate check codes for 976 * file data (doing so can result in excessive I/O), 977 * so we do it here. 978 * 979 * Record for dedup only after the DIO's buffer cache 980 * buffer has been updated. 981 */ 982 hammer2_chain_setcheck(chain, bdata); 983 hammer2_dedup_record(chain, bdata); 984 985 /* 986 * Device buffer is now valid, chain is no longer in 987 * the initial state. 988 * 989 * (No blockref table worries with file data) 990 */ 991 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 992 993 /* Now write the related bdp. */ 994 if (ioflag & IO_SYNC) { 995 /* 996 * Synchronous I/O requested. 997 */ 998 hammer2_io_bwrite(&dio); 999 /* 1000 } else if ((ioflag & IO_DIRECT) && 1001 loff + n == pblksize) { 1002 hammer2_io_bdwrite(&dio); 1003 */ 1004 } else if (ioflag & IO_ASYNC) { 1005 hammer2_io_bawrite(&dio); 1006 } else { 1007 hammer2_io_bdwrite(&dio); 1008 } 1009 break; 1010 default: 1011 panic("hammer2_write_bp: bad chain type %d\n", 1012 chain->bref.type); 1013 /* NOT REACHED */ 1014 break; 1015 } 1016 } 1017 done: 1018 if (chain) { 1019 hammer2_chain_unlock(chain); 1020 hammer2_chain_drop(chain); 1021 } 1022 if (comp_buffer) 1023 objcache_put(cache_buffer_write, comp_buffer); 1024 } 1025 1026 /* 1027 * Helper 1028 * 1029 * Function that performs zero-checking and writing without compression, 1030 * it corresponds to default zero-checking path. 1031 */ 1032 static 1033 void 1034 hammer2_zero_check_and_write(struct buf *bp, hammer2_inode_t *ip, 1035 hammer2_chain_t **parentp, 1036 hammer2_key_t lbase, int ioflag, int pblksize, 1037 hammer2_tid_t mtid, int *errorp, 1038 int check_algo) 1039 { 1040 hammer2_chain_t *chain; 1041 char *data = bp->b_data; 1042 1043 if (test_block_zeros(bp->b_data, pblksize)) { 1044 zero_write(bp, ip, parentp, lbase, mtid, errorp); 1045 } else { 1046 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize, 1047 mtid, &data, errorp); 1048 if (data) { 1049 hammer2_write_bp(chain, bp, ioflag, pblksize, 1050 mtid, errorp, check_algo); 1051 } /* else dedup occurred */ 1052 if (chain) { 1053 hammer2_chain_unlock(chain); 1054 hammer2_chain_drop(chain); 1055 } 1056 } 1057 } 1058 1059 /* 1060 * Helper 1061 * 1062 * A function to test whether a block of data contains only zeros, 1063 * returns TRUE (non-zero) if the block is all zeros. 1064 */ 1065 static 1066 int 1067 test_block_zeros(const char *buf, size_t bytes) 1068 { 1069 size_t i; 1070 1071 for (i = 0; i < bytes; i += sizeof(long)) { 1072 if (*(const long *)(buf + i) != 0) 1073 return (0); 1074 } 1075 return (1); 1076 } 1077 1078 /* 1079 * Helper 1080 * 1081 * Function to "write" a block that contains only zeros. 1082 */ 1083 static 1084 void 1085 zero_write(struct buf *bp, hammer2_inode_t *ip, 1086 hammer2_chain_t **parentp, 1087 hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp __unused) 1088 { 1089 hammer2_chain_t *chain; 1090 hammer2_key_t key_dummy; 1091 int cache_index = -1; 1092 1093 chain = hammer2_chain_lookup(parentp, &key_dummy, 1094 lbase, lbase, 1095 &cache_index, 1096 HAMMER2_LOOKUP_NODATA); 1097 if (chain) { 1098 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 1099 hammer2_inode_data_t *wipdata; 1100 1101 hammer2_chain_modify_ip(ip, chain, mtid, 0); 1102 wipdata = &chain->data->ipdata; 1103 KKASSERT(wipdata->meta.op_flags & 1104 HAMMER2_OPFLAG_DIRECTDATA); 1105 KKASSERT(bp->b_loffset == 0); 1106 bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES); 1107 ++hammer2_iod_file_wembed; 1108 } else { 1109 hammer2_chain_delete(*parentp, chain, 1110 mtid, HAMMER2_DELETE_PERMANENT); 1111 ++hammer2_iod_file_wzero; 1112 } 1113 hammer2_chain_unlock(chain); 1114 hammer2_chain_drop(chain); 1115 } else { 1116 ++hammer2_iod_file_wzero; 1117 } 1118 } 1119 1120 /* 1121 * Helper 1122 * 1123 * Function to write the data as it is, without performing any sort of 1124 * compression. This function is used in path without compression and 1125 * default zero-checking path. 1126 */ 1127 static 1128 void 1129 hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp, int ioflag, 1130 int pblksize, 1131 hammer2_tid_t mtid, int *errorp, int check_algo) 1132 { 1133 hammer2_inode_data_t *wipdata; 1134 hammer2_io_t *dio; 1135 char *bdata; 1136 int error; 1137 1138 error = 0; /* XXX TODO below */ 1139 1140 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED); 1141 1142 switch(chain->bref.type) { 1143 case HAMMER2_BREF_TYPE_INODE: 1144 wipdata = &chain->data->ipdata; 1145 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA); 1146 KKASSERT(bp->b_loffset == 0); 1147 bcopy(bp->b_data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES); 1148 error = 0; 1149 ++hammer2_iod_file_wembed; 1150 break; 1151 case HAMMER2_BREF_TYPE_DATA: 1152 error = hammer2_io_newnz(chain->hmp, 1153 chain->bref.data_off, 1154 chain->bytes, &dio); 1155 if (error) { 1156 hammer2_io_bqrelse(&dio); 1157 kprintf("hammer2: WRITE PATH: " 1158 "dbp bread error\n"); 1159 break; 1160 } 1161 bdata = hammer2_io_data(dio, chain->bref.data_off); 1162 1163 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) + 1164 HAMMER2_ENC_CHECK(check_algo); 1165 bcopy(bp->b_data, bdata, chain->bytes); 1166 1167 /* 1168 * The flush code doesn't calculate check codes for 1169 * file data (doing so can result in excessive I/O), 1170 * so we do it here. 1171 * 1172 * Record for dedup only after the DIO's buffer cache 1173 * buffer has been updated. 1174 */ 1175 hammer2_chain_setcheck(chain, bdata); 1176 hammer2_dedup_record(chain, bdata); 1177 1178 /* 1179 * Device buffer is now valid, chain is no longer in 1180 * the initial state. 1181 * 1182 * (No blockref table worries with file data) 1183 */ 1184 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 1185 1186 if (ioflag & IO_SYNC) { 1187 /* 1188 * Synchronous I/O requested. 1189 */ 1190 hammer2_io_bwrite(&dio); 1191 /* 1192 } else if ((ioflag & IO_DIRECT) && 1193 loff + n == pblksize) { 1194 hammer2_io_bdwrite(&dio); 1195 */ 1196 } else if (ioflag & IO_ASYNC) { 1197 hammer2_io_bawrite(&dio); 1198 } else { 1199 hammer2_io_bdwrite(&dio); 1200 } 1201 break; 1202 default: 1203 panic("hammer2_write_bp: bad chain type %d\n", 1204 chain->bref.type); 1205 /* NOT REACHED */ 1206 error = 0; 1207 break; 1208 } 1209 KKASSERT(error == 0); /* XXX TODO */ 1210 *errorp = error; 1211 } 1212 1213 /* 1214 * LIVE DEDUP HEURISTIC 1215 * 1216 * WARNING! This code is SMP safe but the heuristic allows SMP collisions. 1217 * All fields must be loaded into locals and validated. 1218 */ 1219 static 1220 void 1221 hammer2_dedup_record(hammer2_chain_t *chain, char *data) 1222 { 1223 hammer2_dev_t *hmp; 1224 hammer2_dedup_t *dedup; 1225 int32_t crc; 1226 int best = 0; 1227 int i; 1228 int dticks; 1229 1230 hmp = chain->hmp; 1231 crc = hammer2_icrc32(data, chain->bytes); 1232 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)]; 1233 for (i = 0; i < 4; ++i) { 1234 if (dedup[i].data_crc == crc) { 1235 best = i; 1236 break; 1237 } 1238 dticks = (int)(dedup[i].ticks - dedup[best].ticks); 1239 if (dticks < 0 || dticks > hz * 60 * 30) 1240 best = i; 1241 } 1242 dedup += best; 1243 if (hammer2_debug & 0x40000) { 1244 kprintf("REC %04x %08x %016jx\n", 1245 (int)(dedup - hmp->heur_dedup), 1246 crc, 1247 chain->bref.data_off); 1248 } 1249 dedup->ticks = ticks; 1250 dedup->data_off = chain->bref.data_off; 1251 dedup->data_crc = crc; 1252 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUP); 1253 } 1254 1255 static 1256 hammer2_off_t 1257 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize) 1258 { 1259 hammer2_dedup_t *dedup; 1260 hammer2_io_t *dio; 1261 hammer2_off_t off; 1262 uint32_t crc; 1263 char *data; 1264 int i; 1265 1266 data = *datap; 1267 if (data == NULL) 1268 return 0; 1269 1270 crc = hammer2_icrc32(data, pblksize); 1271 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)]; 1272 1273 if (hammer2_debug & 0x40000) { 1274 kprintf("LOC %04x/4 %08x\n", 1275 (int)(dedup - hmp->heur_dedup), 1276 crc); 1277 } 1278 1279 for (i = 0; i < 4; ++i) { 1280 off = dedup[i].data_off; 1281 cpu_ccfence(); 1282 if (dedup[i].data_crc != crc) 1283 continue; 1284 if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize) 1285 continue; 1286 dio = hammer2_io_getquick(hmp, off, pblksize); 1287 if (dio && 1288 bcmp(data, hammer2_io_data(dio, off), pblksize) == 0) { 1289 if (hammer2_debug & 0x40000) { 1290 kprintf("DEDUP SUCCESS %016jx\n", 1291 (intmax_t)off); 1292 } 1293 hammer2_io_putblk(&dio); 1294 *datap = NULL; 1295 dedup[i].ticks = ticks; /* update use */ 1296 ++hammer2_iod_file_wdedup; 1297 return off; /* RETURN */ 1298 } 1299 if (dio) 1300 hammer2_io_putblk(&dio); 1301 } 1302 return 0; 1303 } 1304 1305 /* 1306 * Poof. Races are ok, if someone gets in and reuses a dedup offset 1307 * before or while we are clearing it they will also recover the freemap 1308 * entry (set it to fully allocated), so a bulkfree race can only set it 1309 * to a possibly-free state. 1310 * 1311 * XXX ok, well, not really sure races are ok but going to run with it 1312 * for the moment. 1313 */ 1314 void 1315 hammer2_dedup_clear(hammer2_dev_t *hmp) 1316 { 1317 int i; 1318 1319 for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) { 1320 hmp->heur_dedup[i].data_off = 0; 1321 hmp->heur_dedup[i].ticks = ticks - 1; 1322 } 1323 } 1324