1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 /* 37 * This module handles low level logical file I/O (strategy) which backs 38 * the logical buffer cache. 39 * 40 * [De]compression, zero-block, check codes, and buffer cache operations 41 * for file data is handled here. 42 * 43 * Live dedup makes its home here as well. 44 */ 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/fcntl.h> 50 #include <sys/buf.h> 51 #include <sys/proc.h> 52 #include <sys/namei.h> 53 #include <sys/mount.h> 54 #include <sys/vnode.h> 55 #include <sys/mountctl.h> 56 #include <sys/dirent.h> 57 #include <sys/uio.h> 58 #include <sys/objcache.h> 59 #include <sys/event.h> 60 #include <sys/file.h> 61 #include <vfs/fifofs/fifo.h> 62 63 #include "hammer2.h" 64 #include "hammer2_lz4.h" 65 66 #include "zlib/hammer2_zlib.h" 67 68 struct objcache *cache_buffer_read; 69 struct objcache *cache_buffer_write; 70 71 /* 72 * Strategy code (async logical file buffer I/O from system) 73 * 74 * WARNING: The strategy code cannot safely use hammer2 transactions 75 * as this can deadlock against vfs_sync's vfsync() call 76 * if multiple flushes are queued. All H2 structures must 77 * already be present and ready for the DIO. 78 * 79 * Reads can be initiated asynchronously, writes have to be 80 * spooled to a separate thread for action to avoid deadlocks. 81 */ 82 static void hammer2_strategy_xop_read(hammer2_xop_t *arg, int clindex); 83 static void hammer2_strategy_xop_write(hammer2_xop_t *arg, int clindex); 84 static int hammer2_strategy_read(struct vop_strategy_args *ap); 85 static int hammer2_strategy_write(struct vop_strategy_args *ap); 86 static void hammer2_strategy_read_completion(hammer2_chain_t *chain, 87 char *data, struct bio *bio); 88 89 static void hammer2_dedup_record(hammer2_chain_t *chain, char *data); 90 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp, 91 char **datap, int pblksize); 92 93 int 94 hammer2_vop_strategy(struct vop_strategy_args *ap) 95 { 96 struct bio *biop; 97 struct buf *bp; 98 int error; 99 100 biop = ap->a_bio; 101 bp = biop->bio_buf; 102 103 switch(bp->b_cmd) { 104 case BUF_CMD_READ: 105 error = hammer2_strategy_read(ap); 106 ++hammer2_iod_file_read; 107 break; 108 case BUF_CMD_WRITE: 109 error = hammer2_strategy_write(ap); 110 ++hammer2_iod_file_write; 111 break; 112 default: 113 bp->b_error = error = EINVAL; 114 bp->b_flags |= B_ERROR; 115 biodone(biop); 116 break; 117 } 118 return (error); 119 } 120 121 /* 122 * Return the largest contiguous physical disk range for the logical 123 * request, in bytes. 124 * 125 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb) 126 * 127 * Basically disabled, the logical buffer write thread has to deal with 128 * buffers one-at-a-time. 129 */ 130 int 131 hammer2_vop_bmap(struct vop_bmap_args *ap) 132 { 133 *ap->a_doffsetp = NOOFFSET; 134 if (ap->a_runp) 135 *ap->a_runp = 0; 136 if (ap->a_runb) 137 *ap->a_runb = 0; 138 return (EOPNOTSUPP); 139 } 140 141 /**************************************************************************** 142 * READ SUPPORT * 143 ****************************************************************************/ 144 /* 145 * Callback used in read path in case that a block is compressed with LZ4. 146 */ 147 static 148 void 149 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio) 150 { 151 struct buf *bp; 152 char *compressed_buffer; 153 int compressed_size; 154 int result; 155 156 bp = bio->bio_buf; 157 158 #if 0 159 if bio->bio_caller_info2.index && 160 bio->bio_caller_info1.uvalue32 != 161 crc32(bp->b_data, bp->b_bufsize) --- return error 162 #endif 163 164 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE); 165 compressed_size = *(const int *)data; 166 KKASSERT(compressed_size <= bytes - sizeof(int)); 167 168 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT); 169 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]), 170 compressed_buffer, 171 compressed_size, 172 bp->b_bufsize); 173 if (result < 0) { 174 kprintf("READ PATH: Error during decompression." 175 "bio %016jx/%d\n", 176 (intmax_t)bio->bio_offset, bytes); 177 /* make sure it isn't random garbage */ 178 bzero(compressed_buffer, bp->b_bufsize); 179 } 180 KKASSERT(result <= bp->b_bufsize); 181 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize); 182 if (result < bp->b_bufsize) 183 bzero(bp->b_data + result, bp->b_bufsize - result); 184 objcache_put(cache_buffer_read, compressed_buffer); 185 bp->b_resid = 0; 186 bp->b_flags |= B_AGE; 187 } 188 189 /* 190 * Callback used in read path in case that a block is compressed with ZLIB. 191 * It is almost identical to LZ4 callback, so in theory they can be unified, 192 * but we didn't want to make changes in bio structure for that. 193 */ 194 static 195 void 196 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio) 197 { 198 struct buf *bp; 199 char *compressed_buffer; 200 z_stream strm_decompress; 201 int result; 202 int ret; 203 204 bp = bio->bio_buf; 205 206 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE); 207 strm_decompress.avail_in = 0; 208 strm_decompress.next_in = Z_NULL; 209 210 ret = inflateInit(&strm_decompress); 211 212 if (ret != Z_OK) 213 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n"); 214 215 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT); 216 strm_decompress.next_in = __DECONST(char *, data); 217 218 /* XXX supply proper size, subset of device bp */ 219 strm_decompress.avail_in = bytes; 220 strm_decompress.next_out = compressed_buffer; 221 strm_decompress.avail_out = bp->b_bufsize; 222 223 ret = inflate(&strm_decompress, Z_FINISH); 224 if (ret != Z_STREAM_END) { 225 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n"); 226 bzero(compressed_buffer, bp->b_bufsize); 227 } 228 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize); 229 result = bp->b_bufsize - strm_decompress.avail_out; 230 if (result < bp->b_bufsize) 231 bzero(bp->b_data + result, strm_decompress.avail_out); 232 objcache_put(cache_buffer_read, compressed_buffer); 233 ret = inflateEnd(&strm_decompress); 234 235 bp->b_resid = 0; 236 bp->b_flags |= B_AGE; 237 } 238 239 /* 240 * Logical buffer I/O, async read. 241 */ 242 static 243 int 244 hammer2_strategy_read(struct vop_strategy_args *ap) 245 { 246 hammer2_xop_strategy_t *xop; 247 struct buf *bp; 248 struct bio *bio; 249 struct bio *nbio; 250 hammer2_inode_t *ip; 251 hammer2_key_t lbase; 252 253 bio = ap->a_bio; 254 bp = bio->bio_buf; 255 ip = VTOI(ap->a_vp); 256 nbio = push_bio(bio); 257 258 lbase = bio->bio_offset; 259 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0); 260 261 xop = hammer2_xop_alloc(ip, 0); 262 xop->finished = 0; 263 xop->bio = bio; 264 xop->lbase = lbase; 265 hammer2_mtx_init(&xop->lock, "h2bior"); 266 hammer2_xop_start(&xop->head, hammer2_strategy_xop_read); 267 /* asynchronous completion */ 268 269 return(0); 270 } 271 272 /* 273 * Per-node XOP (threaded), do a synchronous lookup of the chain and 274 * its data. The frontend is asynchronous, so we are also responsible 275 * for racing to terminate the frontend. 276 */ 277 static 278 void 279 hammer2_strategy_xop_read(hammer2_xop_t *arg, int clindex) 280 { 281 hammer2_xop_strategy_t *xop = &arg->xop_strategy; 282 hammer2_chain_t *parent; 283 hammer2_chain_t *chain; 284 hammer2_key_t key_dummy; 285 hammer2_key_t lbase; 286 struct bio *bio; 287 struct buf *bp; 288 int cache_index = -1; 289 int error; 290 291 lbase = xop->lbase; 292 bio = xop->bio; 293 bp = bio->bio_buf; 294 295 parent = hammer2_inode_chain(xop->head.ip1, clindex, 296 HAMMER2_RESOLVE_ALWAYS | 297 HAMMER2_RESOLVE_SHARED); 298 if (parent) { 299 chain = hammer2_chain_lookup(&parent, &key_dummy, 300 lbase, lbase, 301 &cache_index, 302 HAMMER2_LOOKUP_ALWAYS | 303 HAMMER2_LOOKUP_SHARED); 304 error = chain ? chain->error : 0; 305 } else { 306 error = EIO; 307 chain = NULL; 308 } 309 error = hammer2_xop_feed(&xop->head, chain, clindex, error); 310 if (chain) { 311 hammer2_chain_unlock(chain); 312 hammer2_chain_drop(chain); 313 } 314 if (parent) { 315 hammer2_chain_unlock(parent); 316 hammer2_chain_drop(parent); 317 } 318 chain = NULL; /* safety */ 319 parent = NULL; /* safety */ 320 321 /* 322 * Race to finish the frontend 323 */ 324 if (xop->finished) 325 return; 326 hammer2_mtx_ex(&xop->lock); 327 if (xop->finished) { 328 hammer2_mtx_unlock(&xop->lock); 329 return; 330 } 331 332 /* 333 * Async operation has not completed and we now own the lock. 334 * Determine if we can complete the operation by issuing the 335 * frontend collection non-blocking. 336 */ 337 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT); 338 339 switch(error) { 340 case 0: 341 xop->finished = 1; 342 hammer2_mtx_unlock(&xop->lock); 343 chain = xop->head.cluster.focus; 344 hammer2_strategy_read_completion(chain, (char *)chain->data, 345 xop->bio); 346 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 347 biodone(bio); 348 break; 349 case ENOENT: 350 xop->finished = 1; 351 hammer2_mtx_unlock(&xop->lock); 352 bp->b_resid = 0; 353 bp->b_error = 0; 354 bzero(bp->b_data, bp->b_bcount); 355 biodone(bio); 356 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 357 break; 358 case EINPROGRESS: 359 hammer2_mtx_unlock(&xop->lock); 360 break; 361 default: 362 xop->finished = 1; 363 hammer2_mtx_unlock(&xop->lock); 364 bp->b_flags |= B_ERROR; 365 bp->b_error = EIO; 366 biodone(bio); 367 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 368 break; 369 } 370 } 371 372 static 373 void 374 hammer2_strategy_read_completion(hammer2_chain_t *chain, char *data, 375 struct bio *bio) 376 { 377 struct buf *bp = bio->bio_buf; 378 379 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 380 /* 381 * Data is embedded in the inode (copy from inode). 382 */ 383 bcopy(((hammer2_inode_data_t *)data)->u.data, 384 bp->b_data, HAMMER2_EMBEDDED_BYTES); 385 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES, 386 bp->b_bcount - HAMMER2_EMBEDDED_BYTES); 387 bp->b_resid = 0; 388 bp->b_error = 0; 389 } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) { 390 /* 391 * Data is on-media, record for live dedup. 392 */ 393 hammer2_dedup_record(chain, data); 394 395 /* 396 * Decopmression and copy. 397 */ 398 switch (HAMMER2_DEC_COMP(chain->bref.methods)) { 399 case HAMMER2_COMP_LZ4: 400 hammer2_decompress_LZ4_callback(data, chain->bytes, 401 bio); 402 break; 403 case HAMMER2_COMP_ZLIB: 404 hammer2_decompress_ZLIB_callback(data, chain->bytes, 405 bio); 406 break; 407 case HAMMER2_COMP_NONE: 408 KKASSERT(chain->bytes <= bp->b_bcount); 409 bcopy(data, bp->b_data, chain->bytes); 410 if (chain->bytes < bp->b_bcount) { 411 bzero(bp->b_data + chain->bytes, 412 bp->b_bcount - chain->bytes); 413 } 414 bp->b_flags |= B_NOTMETA; 415 bp->b_resid = 0; 416 bp->b_error = 0; 417 break; 418 default: 419 panic("hammer2_strategy_read: " 420 "unknown compression type"); 421 } 422 } else { 423 panic("hammer2_strategy_read: unknown bref type"); 424 } 425 } 426 427 /**************************************************************************** 428 * WRITE SUPPORT * 429 ****************************************************************************/ 430 431 /* 432 * Functions for compression in threads, 433 * from hammer2_vnops.c 434 */ 435 static void hammer2_write_file_core(struct buf *bp, hammer2_inode_t *ip, 436 hammer2_chain_t **parentp, 437 hammer2_key_t lbase, int ioflag, int pblksize, 438 hammer2_tid_t mtid, int *errorp); 439 static void hammer2_compress_and_write(struct buf *bp, hammer2_inode_t *ip, 440 hammer2_chain_t **parentp, 441 hammer2_key_t lbase, int ioflag, int pblksize, 442 hammer2_tid_t mtid, int *errorp, 443 int comp_algo, int check_algo); 444 static void hammer2_zero_check_and_write(struct buf *bp, hammer2_inode_t *ip, 445 hammer2_chain_t **parentp, 446 hammer2_key_t lbase, int ioflag, int pblksize, 447 hammer2_tid_t mtid, int *errorp, 448 int check_algo); 449 static int test_block_zeros(const char *buf, size_t bytes); 450 static void zero_write(struct buf *bp, hammer2_inode_t *ip, 451 hammer2_chain_t **parentp, 452 hammer2_key_t lbase, 453 hammer2_tid_t mtid, int *errorp); 454 static void hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp, 455 int ioflag, int pblksize, 456 hammer2_tid_t mtid, int *errorp, 457 int check_algo); 458 459 static 460 int 461 hammer2_strategy_write(struct vop_strategy_args *ap) 462 { 463 hammer2_xop_strategy_t *xop; 464 hammer2_pfs_t *pmp; 465 struct bio *bio; 466 struct buf *bp; 467 hammer2_inode_t *ip; 468 469 bio = ap->a_bio; 470 bp = bio->bio_buf; 471 ip = VTOI(ap->a_vp); 472 pmp = ip->pmp; 473 474 hammer2_lwinprog_ref(pmp); 475 hammer2_trans_assert_strategy(pmp); 476 477 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 478 xop->finished = 0; 479 xop->bio = bio; 480 xop->lbase = bio->bio_offset; 481 hammer2_mtx_init(&xop->lock, "h2biow"); 482 hammer2_xop_start(&xop->head, hammer2_strategy_xop_write); 483 /* asynchronous completion */ 484 485 hammer2_lwinprog_wait(pmp, hammer2_flush_pipe); 486 487 return(0); 488 } 489 490 /* 491 * Per-node XOP (threaded). Write the logical buffer to the media. 492 */ 493 static 494 void 495 hammer2_strategy_xop_write(hammer2_xop_t *arg, int clindex) 496 { 497 hammer2_xop_strategy_t *xop = &arg->xop_strategy; 498 hammer2_chain_t *parent; 499 hammer2_key_t lbase; 500 hammer2_inode_t *ip; 501 struct bio *bio; 502 struct buf *bp; 503 int error; 504 int lblksize; 505 int pblksize; 506 507 lbase = xop->lbase; 508 bio = xop->bio; 509 bp = bio->bio_buf; 510 ip = xop->head.ip1; 511 512 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */ 513 514 lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL); 515 pblksize = hammer2_calc_physical(ip, lbase); 516 parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS); 517 hammer2_write_file_core(bp, ip, &parent, 518 lbase, IO_ASYNC, pblksize, 519 xop->head.mtid, &error); 520 if (parent) { 521 hammer2_chain_unlock(parent); 522 hammer2_chain_drop(parent); 523 parent = NULL; /* safety */ 524 } 525 error = hammer2_xop_feed(&xop->head, NULL, clindex, error); 526 527 /* 528 * Race to finish the frontend 529 */ 530 if (xop->finished) 531 return; 532 hammer2_mtx_ex(&xop->lock); 533 if (xop->finished) { 534 hammer2_mtx_unlock(&xop->lock); 535 return; 536 } 537 538 /* 539 * Async operation has not completed and we now own the lock. 540 * Determine if we can complete the operation by issuing the 541 * frontend collection non-blocking. 542 */ 543 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT); 544 545 switch(error) { 546 case ENOENT: 547 case 0: 548 xop->finished = 1; 549 hammer2_mtx_unlock(&xop->lock); 550 bp->b_resid = 0; 551 bp->b_error = 0; 552 biodone(bio); 553 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 554 hammer2_lwinprog_drop(ip->pmp); 555 break; 556 case EINPROGRESS: 557 hammer2_mtx_unlock(&xop->lock); 558 break; 559 default: 560 xop->finished = 1; 561 hammer2_mtx_unlock(&xop->lock); 562 bp->b_flags |= B_ERROR; 563 bp->b_error = EIO; 564 biodone(bio); 565 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 566 hammer2_lwinprog_drop(ip->pmp); 567 break; 568 } 569 } 570 571 /* 572 * Wait for pending I/O to complete 573 */ 574 void 575 hammer2_bioq_sync(hammer2_pfs_t *pmp) 576 { 577 hammer2_lwinprog_wait(pmp, 0); 578 } 579 580 /* 581 * Create a new cluster at (cparent, lbase) and assign physical storage, 582 * returning a cluster suitable for I/O. The cluster will be in a modified 583 * state. 584 * 585 * cparent can wind up being anything. 586 * 587 * If datap is not NULL, *datap points to the real data we intend to write. 588 * If we can dedup the storage location we set *datap to NULL to indicate 589 * to the caller that a dedup occurred. 590 * 591 * NOTE: Special case for data embedded in inode. 592 */ 593 static 594 hammer2_chain_t * 595 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp, 596 hammer2_key_t lbase, int pblksize, 597 hammer2_tid_t mtid, char **datap, int *errorp) 598 { 599 hammer2_chain_t *chain; 600 hammer2_key_t key_dummy; 601 hammer2_off_t dedup_off; 602 int pradix = hammer2_getradix(pblksize); 603 int cache_index = -1; 604 605 /* 606 * Locate the chain associated with lbase, return a locked chain. 607 * However, do not instantiate any data reference (which utilizes a 608 * device buffer) because we will be using direct IO via the 609 * logical buffer cache buffer. 610 */ 611 *errorp = 0; 612 KKASSERT(pblksize >= HAMMER2_ALLOC_MIN); 613 retry: 614 chain = hammer2_chain_lookup(parentp, &key_dummy, 615 lbase, lbase, 616 &cache_index, 617 HAMMER2_LOOKUP_NODATA); 618 if (chain == NULL) { 619 /* 620 * We found a hole, create a new chain entry. 621 * 622 * NOTE: DATA chains are created without device backing 623 * store (nor do we want any). 624 */ 625 dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap, 626 pblksize); 627 *errorp = hammer2_chain_create(parentp, &chain, ip->pmp, 628 lbase, HAMMER2_PBUFRADIX, 629 HAMMER2_BREF_TYPE_DATA, 630 pblksize, mtid, 631 dedup_off, 0); 632 if (chain == NULL) { 633 panic("hammer2_chain_create: par=%p error=%d\n", 634 *parentp, *errorp); 635 goto retry; 636 } 637 /*ip->delta_dcount += pblksize;*/ 638 } else { 639 switch (chain->bref.type) { 640 case HAMMER2_BREF_TYPE_INODE: 641 /* 642 * The data is embedded in the inode, which requires 643 * a bit more finess. 644 */ 645 hammer2_chain_modify_ip(ip, chain, mtid, 0); 646 break; 647 case HAMMER2_BREF_TYPE_DATA: 648 dedup_off = hammer2_dedup_lookup(chain->hmp, datap, 649 pblksize); 650 if (chain->bytes != pblksize) { 651 hammer2_chain_resize(ip, *parentp, chain, 652 mtid, dedup_off, 653 pradix, 654 HAMMER2_MODIFY_OPTDATA); 655 } 656 657 /* 658 * DATA buffers must be marked modified whether the 659 * data is in a logical buffer or not. We also have 660 * to make this call to fixup the chain data pointers 661 * after resizing in case this is an encrypted or 662 * compressed buffer. 663 */ 664 hammer2_chain_modify(chain, mtid, dedup_off, 665 HAMMER2_MODIFY_OPTDATA); 666 break; 667 default: 668 panic("hammer2_assign_physical: bad type"); 669 /* NOT REACHED */ 670 break; 671 } 672 } 673 return (chain); 674 } 675 676 /* 677 * hammer2_write_file_core() - hammer2_write_thread() helper 678 * 679 * The core write function which determines which path to take 680 * depending on compression settings. We also have to locate the 681 * related chains so we can calculate and set the check data for 682 * the blockref. 683 */ 684 static 685 void 686 hammer2_write_file_core(struct buf *bp, hammer2_inode_t *ip, 687 hammer2_chain_t **parentp, 688 hammer2_key_t lbase, int ioflag, int pblksize, 689 hammer2_tid_t mtid, int *errorp) 690 { 691 hammer2_chain_t *chain; 692 char *data = bp->b_data; 693 694 switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) { 695 case HAMMER2_COMP_NONE: 696 /* 697 * We have to assign physical storage to the buffer 698 * we intend to dirty or write now to avoid deadlocks 699 * in the strategy code later. 700 * 701 * This can return NOOFFSET for inode-embedded data. 702 * The strategy code will take care of it in that case. 703 */ 704 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize, 705 mtid, &data, errorp); 706 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 707 hammer2_inode_data_t *wipdata; 708 709 wipdata = &chain->data->ipdata; 710 KKASSERT(wipdata->meta.op_flags & 711 HAMMER2_OPFLAG_DIRECTDATA); 712 KKASSERT(bp->b_loffset == 0); 713 bcopy(bp->b_data, wipdata->u.data, 714 HAMMER2_EMBEDDED_BYTES); 715 ++hammer2_iod_file_wembed; 716 } else if (data == NULL) { 717 /* 718 * Copy of data already present on-media. 719 */ 720 chain->bref.methods = 721 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) + 722 HAMMER2_ENC_CHECK(ip->meta.check_algo); 723 hammer2_chain_setcheck(chain, bp->b_data); 724 } else { 725 hammer2_write_bp(chain, bp, ioflag, pblksize, 726 mtid, errorp, ip->meta.check_algo); 727 } 728 if (chain) { 729 hammer2_chain_unlock(chain); 730 hammer2_chain_drop(chain); 731 } 732 break; 733 case HAMMER2_COMP_AUTOZERO: 734 /* 735 * Check for zero-fill only 736 */ 737 hammer2_zero_check_and_write(bp, ip, parentp, 738 lbase, ioflag, pblksize, 739 mtid, errorp, 740 ip->meta.check_algo); 741 break; 742 case HAMMER2_COMP_LZ4: 743 case HAMMER2_COMP_ZLIB: 744 default: 745 /* 746 * Check for zero-fill and attempt compression. 747 */ 748 hammer2_compress_and_write(bp, ip, parentp, 749 lbase, ioflag, pblksize, 750 mtid, errorp, 751 ip->meta.comp_algo, 752 ip->meta.check_algo); 753 break; 754 } 755 } 756 757 /* 758 * Helper 759 * 760 * Generic function that will perform the compression in compression 761 * write path. The compression algorithm is determined by the settings 762 * obtained from inode. 763 */ 764 static 765 void 766 hammer2_compress_and_write(struct buf *bp, hammer2_inode_t *ip, 767 hammer2_chain_t **parentp, 768 hammer2_key_t lbase, int ioflag, int pblksize, 769 hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo) 770 { 771 hammer2_chain_t *chain; 772 int comp_size; 773 int comp_block_size; 774 char *comp_buffer; 775 char *data; 776 777 if (test_block_zeros(bp->b_data, pblksize)) { 778 zero_write(bp, ip, parentp, lbase, mtid, errorp); 779 return; 780 } 781 782 comp_size = 0; 783 comp_buffer = NULL; 784 785 KKASSERT(pblksize / 2 <= 32768); 786 787 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0) { 788 z_stream strm_compress; 789 int comp_level; 790 int ret; 791 792 switch(HAMMER2_DEC_ALGO(comp_algo)) { 793 case HAMMER2_COMP_LZ4: 794 comp_buffer = objcache_get(cache_buffer_write, 795 M_INTWAIT); 796 comp_size = LZ4_compress_limitedOutput( 797 bp->b_data, 798 &comp_buffer[sizeof(int)], 799 pblksize, 800 pblksize / 2 - sizeof(int)); 801 /* 802 * We need to prefix with the size, LZ4 803 * doesn't do it for us. Add the related 804 * overhead. 805 */ 806 *(int *)comp_buffer = comp_size; 807 if (comp_size) 808 comp_size += sizeof(int); 809 break; 810 case HAMMER2_COMP_ZLIB: 811 comp_level = HAMMER2_DEC_LEVEL(comp_algo); 812 if (comp_level == 0) 813 comp_level = 6; /* default zlib compression */ 814 else if (comp_level < 6) 815 comp_level = 6; 816 else if (comp_level > 9) 817 comp_level = 9; 818 ret = deflateInit(&strm_compress, comp_level); 819 if (ret != Z_OK) { 820 kprintf("HAMMER2 ZLIB: fatal error " 821 "on deflateInit.\n"); 822 } 823 824 comp_buffer = objcache_get(cache_buffer_write, 825 M_INTWAIT); 826 strm_compress.next_in = bp->b_data; 827 strm_compress.avail_in = pblksize; 828 strm_compress.next_out = comp_buffer; 829 strm_compress.avail_out = pblksize / 2; 830 ret = deflate(&strm_compress, Z_FINISH); 831 if (ret == Z_STREAM_END) { 832 comp_size = pblksize / 2 - 833 strm_compress.avail_out; 834 } else { 835 comp_size = 0; 836 } 837 ret = deflateEnd(&strm_compress); 838 break; 839 default: 840 kprintf("Error: Unknown compression method.\n"); 841 kprintf("Comp_method = %d.\n", comp_algo); 842 break; 843 } 844 } 845 846 if (comp_size == 0) { 847 /* 848 * compression failed or turned off 849 */ 850 comp_block_size = pblksize; /* safety */ 851 if (++ip->comp_heuristic > 128) 852 ip->comp_heuristic = 8; 853 } else { 854 /* 855 * compression succeeded 856 */ 857 ip->comp_heuristic = 0; 858 if (comp_size <= 1024) { 859 comp_block_size = 1024; 860 } else if (comp_size <= 2048) { 861 comp_block_size = 2048; 862 } else if (comp_size <= 4096) { 863 comp_block_size = 4096; 864 } else if (comp_size <= 8192) { 865 comp_block_size = 8192; 866 } else if (comp_size <= 16384) { 867 comp_block_size = 16384; 868 } else if (comp_size <= 32768) { 869 comp_block_size = 32768; 870 } else { 871 panic("hammer2: WRITE PATH: " 872 "Weird comp_size value."); 873 /* NOT REACHED */ 874 comp_block_size = pblksize; 875 } 876 877 /* 878 * Must zero the remainder or dedup (which operates on a 879 * physical block basis) will not find matches. 880 */ 881 if (comp_size < comp_block_size) { 882 bzero(comp_buffer + comp_size, 883 comp_block_size - comp_size); 884 } 885 } 886 887 /* 888 * Assign physical storage, data will be set to NULL if a live-dedup 889 * was successful. 890 */ 891 data = comp_size ? comp_buffer : bp->b_data; 892 chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size, 893 mtid, &data, errorp); 894 895 if (*errorp) { 896 kprintf("WRITE PATH: An error occurred while " 897 "assigning physical space.\n"); 898 KKASSERT(chain == NULL); 899 goto done; 900 } 901 902 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 903 hammer2_inode_data_t *wipdata; 904 905 hammer2_chain_modify_ip(ip, chain, mtid, 0); 906 wipdata = &chain->data->ipdata; 907 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA); 908 KKASSERT(bp->b_loffset == 0); 909 bcopy(bp->b_data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES); 910 ++hammer2_iod_file_wembed; 911 } else if (data == NULL) { 912 /* 913 * Live deduplication, a copy of the data is already present 914 * on the media. 915 */ 916 char *bdata; 917 918 if (comp_size) { 919 chain->bref.methods = 920 HAMMER2_ENC_COMP(comp_algo) + 921 HAMMER2_ENC_CHECK(check_algo); 922 } else { 923 chain->bref.methods = 924 HAMMER2_ENC_COMP( 925 HAMMER2_COMP_NONE) + 926 HAMMER2_ENC_CHECK(check_algo); 927 } 928 bdata = comp_size ? comp_buffer : bp->b_data; 929 hammer2_chain_setcheck(chain, bdata); 930 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 931 } else { 932 hammer2_io_t *dio; 933 char *bdata; 934 935 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED); 936 937 switch(chain->bref.type) { 938 case HAMMER2_BREF_TYPE_INODE: 939 panic("hammer2_write_bp: unexpected inode\n"); 940 break; 941 case HAMMER2_BREF_TYPE_DATA: 942 /* 943 * Optimize out the read-before-write 944 * if possible. 945 */ 946 *errorp = hammer2_io_newnz(chain->hmp, 947 chain->bref.data_off, 948 chain->bytes, 949 &dio); 950 if (*errorp) { 951 hammer2_io_brelse(&dio); 952 kprintf("hammer2: WRITE PATH: " 953 "dbp bread error\n"); 954 break; 955 } 956 bdata = hammer2_io_data(dio, chain->bref.data_off); 957 958 /* 959 * When loading the block make sure we don't 960 * leave garbage after the compressed data. 961 */ 962 if (comp_size) { 963 chain->bref.methods = 964 HAMMER2_ENC_COMP(comp_algo) + 965 HAMMER2_ENC_CHECK(check_algo); 966 bcopy(comp_buffer, bdata, comp_size); 967 } else { 968 chain->bref.methods = 969 HAMMER2_ENC_COMP( 970 HAMMER2_COMP_NONE) + 971 HAMMER2_ENC_CHECK(check_algo); 972 bcopy(bp->b_data, bdata, pblksize); 973 } 974 975 /* 976 * The flush code doesn't calculate check codes for 977 * file data (doing so can result in excessive I/O), 978 * so we do it here. 979 * 980 * Record for dedup only after the DIO's buffer cache 981 * buffer has been updated. 982 */ 983 hammer2_chain_setcheck(chain, bdata); 984 hammer2_dedup_record(chain, bdata); 985 986 /* 987 * Device buffer is now valid, chain is no longer in 988 * the initial state. 989 * 990 * (No blockref table worries with file data) 991 */ 992 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 993 994 /* Now write the related bdp. */ 995 if (ioflag & IO_SYNC) { 996 /* 997 * Synchronous I/O requested. 998 */ 999 hammer2_io_bwrite(&dio); 1000 /* 1001 } else if ((ioflag & IO_DIRECT) && 1002 loff + n == pblksize) { 1003 hammer2_io_bdwrite(&dio); 1004 */ 1005 } else if (ioflag & IO_ASYNC) { 1006 hammer2_io_bawrite(&dio); 1007 } else { 1008 hammer2_io_bdwrite(&dio); 1009 } 1010 break; 1011 default: 1012 panic("hammer2_write_bp: bad chain type %d\n", 1013 chain->bref.type); 1014 /* NOT REACHED */ 1015 break; 1016 } 1017 } 1018 done: 1019 if (chain) { 1020 hammer2_chain_unlock(chain); 1021 hammer2_chain_drop(chain); 1022 } 1023 if (comp_buffer) 1024 objcache_put(cache_buffer_write, comp_buffer); 1025 } 1026 1027 /* 1028 * Helper 1029 * 1030 * Function that performs zero-checking and writing without compression, 1031 * it corresponds to default zero-checking path. 1032 */ 1033 static 1034 void 1035 hammer2_zero_check_and_write(struct buf *bp, hammer2_inode_t *ip, 1036 hammer2_chain_t **parentp, 1037 hammer2_key_t lbase, int ioflag, int pblksize, 1038 hammer2_tid_t mtid, int *errorp, 1039 int check_algo) 1040 { 1041 hammer2_chain_t *chain; 1042 char *data = bp->b_data; 1043 1044 if (test_block_zeros(bp->b_data, pblksize)) { 1045 zero_write(bp, ip, parentp, lbase, mtid, errorp); 1046 } else { 1047 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize, 1048 mtid, &data, errorp); 1049 if (data) { 1050 hammer2_write_bp(chain, bp, ioflag, pblksize, 1051 mtid, errorp, check_algo); 1052 } /* else dedup occurred */ 1053 if (chain) { 1054 hammer2_chain_unlock(chain); 1055 hammer2_chain_drop(chain); 1056 } 1057 } 1058 } 1059 1060 /* 1061 * Helper 1062 * 1063 * A function to test whether a block of data contains only zeros, 1064 * returns TRUE (non-zero) if the block is all zeros. 1065 */ 1066 static 1067 int 1068 test_block_zeros(const char *buf, size_t bytes) 1069 { 1070 size_t i; 1071 1072 for (i = 0; i < bytes; i += sizeof(long)) { 1073 if (*(const long *)(buf + i) != 0) 1074 return (0); 1075 } 1076 return (1); 1077 } 1078 1079 /* 1080 * Helper 1081 * 1082 * Function to "write" a block that contains only zeros. 1083 */ 1084 static 1085 void 1086 zero_write(struct buf *bp, hammer2_inode_t *ip, 1087 hammer2_chain_t **parentp, 1088 hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp __unused) 1089 { 1090 hammer2_chain_t *chain; 1091 hammer2_key_t key_dummy; 1092 int cache_index = -1; 1093 1094 chain = hammer2_chain_lookup(parentp, &key_dummy, 1095 lbase, lbase, 1096 &cache_index, 1097 HAMMER2_LOOKUP_NODATA); 1098 if (chain) { 1099 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 1100 hammer2_inode_data_t *wipdata; 1101 1102 hammer2_chain_modify_ip(ip, chain, mtid, 0); 1103 wipdata = &chain->data->ipdata; 1104 KKASSERT(wipdata->meta.op_flags & 1105 HAMMER2_OPFLAG_DIRECTDATA); 1106 KKASSERT(bp->b_loffset == 0); 1107 bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES); 1108 ++hammer2_iod_file_wembed; 1109 } else { 1110 hammer2_chain_delete(*parentp, chain, 1111 mtid, HAMMER2_DELETE_PERMANENT); 1112 ++hammer2_iod_file_wzero; 1113 } 1114 hammer2_chain_unlock(chain); 1115 hammer2_chain_drop(chain); 1116 } else { 1117 ++hammer2_iod_file_wzero; 1118 } 1119 } 1120 1121 /* 1122 * Helper 1123 * 1124 * Function to write the data as it is, without performing any sort of 1125 * compression. This function is used in path without compression and 1126 * default zero-checking path. 1127 */ 1128 static 1129 void 1130 hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp, int ioflag, 1131 int pblksize, 1132 hammer2_tid_t mtid, int *errorp, int check_algo) 1133 { 1134 hammer2_inode_data_t *wipdata; 1135 hammer2_io_t *dio; 1136 char *bdata; 1137 int error; 1138 1139 error = 0; /* XXX TODO below */ 1140 1141 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED); 1142 1143 switch(chain->bref.type) { 1144 case HAMMER2_BREF_TYPE_INODE: 1145 wipdata = &chain->data->ipdata; 1146 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA); 1147 KKASSERT(bp->b_loffset == 0); 1148 bcopy(bp->b_data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES); 1149 error = 0; 1150 ++hammer2_iod_file_wembed; 1151 break; 1152 case HAMMER2_BREF_TYPE_DATA: 1153 error = hammer2_io_newnz(chain->hmp, 1154 chain->bref.data_off, 1155 chain->bytes, &dio); 1156 if (error) { 1157 hammer2_io_bqrelse(&dio); 1158 kprintf("hammer2: WRITE PATH: " 1159 "dbp bread error\n"); 1160 break; 1161 } 1162 bdata = hammer2_io_data(dio, chain->bref.data_off); 1163 1164 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) + 1165 HAMMER2_ENC_CHECK(check_algo); 1166 bcopy(bp->b_data, bdata, chain->bytes); 1167 1168 /* 1169 * The flush code doesn't calculate check codes for 1170 * file data (doing so can result in excessive I/O), 1171 * so we do it here. 1172 * 1173 * Record for dedup only after the DIO's buffer cache 1174 * buffer has been updated. 1175 */ 1176 hammer2_chain_setcheck(chain, bdata); 1177 hammer2_dedup_record(chain, bdata); 1178 1179 /* 1180 * Device buffer is now valid, chain is no longer in 1181 * the initial state. 1182 * 1183 * (No blockref table worries with file data) 1184 */ 1185 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 1186 1187 if (ioflag & IO_SYNC) { 1188 /* 1189 * Synchronous I/O requested. 1190 */ 1191 hammer2_io_bwrite(&dio); 1192 /* 1193 } else if ((ioflag & IO_DIRECT) && 1194 loff + n == pblksize) { 1195 hammer2_io_bdwrite(&dio); 1196 */ 1197 } else if (ioflag & IO_ASYNC) { 1198 hammer2_io_bawrite(&dio); 1199 } else { 1200 hammer2_io_bdwrite(&dio); 1201 } 1202 break; 1203 default: 1204 panic("hammer2_write_bp: bad chain type %d\n", 1205 chain->bref.type); 1206 /* NOT REACHED */ 1207 error = 0; 1208 break; 1209 } 1210 KKASSERT(error == 0); /* XXX TODO */ 1211 *errorp = error; 1212 } 1213 1214 /* 1215 * LIVE DEDUP HEURISTIC 1216 * 1217 * WARNING! This code is SMP safe but the heuristic allows SMP collisions. 1218 * All fields must be loaded into locals and validated. 1219 */ 1220 static 1221 void 1222 hammer2_dedup_record(hammer2_chain_t *chain, char *data) 1223 { 1224 hammer2_dev_t *hmp; 1225 hammer2_dedup_t *dedup; 1226 int32_t crc; 1227 int best = 0; 1228 int i; 1229 int dticks; 1230 1231 hmp = chain->hmp; 1232 crc = hammer2_icrc32(data, chain->bytes); 1233 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)]; 1234 for (i = 0; i < 4; ++i) { 1235 if (dedup[i].data_crc == crc) { 1236 best = i; 1237 break; 1238 } 1239 dticks = (int)(dedup[i].ticks - dedup[best].ticks); 1240 if (dticks < 0 || dticks > hz * 60 * 30) 1241 best = i; 1242 } 1243 dedup += best; 1244 if (hammer2_debug & 0x40000) { 1245 kprintf("REC %04x %08x %016jx\n", 1246 (int)(dedup - hmp->heur_dedup), 1247 crc, 1248 chain->bref.data_off); 1249 } 1250 dedup->ticks = ticks; 1251 dedup->data_off = chain->bref.data_off; 1252 dedup->data_crc = crc; 1253 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUP); 1254 } 1255 1256 static 1257 hammer2_off_t 1258 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize) 1259 { 1260 hammer2_dedup_t *dedup; 1261 hammer2_io_t *dio; 1262 hammer2_off_t off; 1263 uint32_t crc; 1264 char *data; 1265 int i; 1266 1267 data = *datap; 1268 if (data == NULL) 1269 return 0; 1270 1271 crc = hammer2_icrc32(data, pblksize); 1272 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)]; 1273 1274 if (hammer2_debug & 0x40000) { 1275 kprintf("LOC %04x/4 %08x\n", 1276 (int)(dedup - hmp->heur_dedup), 1277 crc); 1278 } 1279 1280 for (i = 0; i < 4; ++i) { 1281 off = dedup[i].data_off; 1282 cpu_ccfence(); 1283 if (dedup[i].data_crc != crc) 1284 continue; 1285 if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize) 1286 continue; 1287 dio = hammer2_io_getquick(hmp, off, pblksize); 1288 if (dio && 1289 bcmp(data, hammer2_io_data(dio, off), pblksize) == 0) { 1290 if (hammer2_debug & 0x40000) { 1291 kprintf("DEDUP SUCCESS %016jx\n", 1292 (intmax_t)off); 1293 } 1294 hammer2_io_putblk(&dio); 1295 *datap = NULL; 1296 dedup[i].ticks = ticks; /* update use */ 1297 ++hammer2_iod_file_wdedup; 1298 return off; /* RETURN */ 1299 } 1300 if (dio) 1301 hammer2_io_putblk(&dio); 1302 } 1303 return 0; 1304 } 1305 1306 /* 1307 * Poof. Races are ok, if someone gets in and reuses a dedup offset 1308 * before or while we are clearing it they will also recover the freemap 1309 * entry (set it to fully allocated), so a bulkfree race can only set it 1310 * to a possibly-free state. 1311 * 1312 * XXX ok, well, not really sure races are ok but going to run with it 1313 * for the moment. 1314 */ 1315 void 1316 hammer2_dedup_clear(hammer2_dev_t *hmp) 1317 { 1318 int i; 1319 1320 for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) { 1321 hmp->heur_dedup[i].data_off = 0; 1322 hmp->heur_dedup[i].ticks = ticks - 1; 1323 } 1324 } 1325