1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 /* 37 * This module handles low level logical file I/O (strategy) which backs 38 * the logical buffer cache. 39 * 40 * [De]compression, zero-block, check codes, and buffer cache operations 41 * for file data is handled here. 42 * 43 * Live dedup makes its home here as well. 44 */ 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/fcntl.h> 50 #include <sys/buf.h> 51 #include <sys/proc.h> 52 #include <sys/namei.h> 53 #include <sys/mount.h> 54 #include <sys/vnode.h> 55 #include <sys/mountctl.h> 56 #include <sys/dirent.h> 57 #include <sys/uio.h> 58 #include <sys/objcache.h> 59 #include <sys/event.h> 60 #include <sys/file.h> 61 #include <vfs/fifofs/fifo.h> 62 63 #include "hammer2.h" 64 #include "hammer2_lz4.h" 65 66 #include "zlib/hammer2_zlib.h" 67 68 struct objcache *cache_buffer_read; 69 struct objcache *cache_buffer_write; 70 71 /* 72 * Strategy code (async logical file buffer I/O from system) 73 * 74 * It should only be possible for this to be called outside of a flush, 75 * or during the PREFLUSH stage of a flush. A transaction must be used 76 * to interlock against a new flush starting up to avoid corrupting the 77 * flush. 78 * 79 * Except for the transaction init (which should normally not block), 80 * we essentially run the strategy operation asynchronously via a XOP. 81 * 82 * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync() 83 * calls but it has in the past when multiple flushes are queued. 84 * 85 * XXX We currently terminate the transaction once we get a quorum, otherwise 86 * the frontend can stall, but this can leave the remaining nodes with 87 * a potential flush conflict. We need to delay flushes on those nodes 88 * until running transactions complete separately from the normal 89 * transaction sequencing. FIXME TODO. 90 */ 91 static void hammer2_strategy_xop_read(hammer2_xop_t *arg, int clindex); 92 static void hammer2_strategy_xop_write(hammer2_xop_t *arg, int clindex); 93 static int hammer2_strategy_read(struct vop_strategy_args *ap); 94 static int hammer2_strategy_write(struct vop_strategy_args *ap); 95 static void hammer2_strategy_read_completion(hammer2_chain_t *chain, 96 char *data, struct bio *bio); 97 98 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp, 99 char **datap, int pblksize); 100 101 int h2timer[32]; 102 int h2last; 103 int h2lid; 104 105 #define TIMER(which) do { \ 106 if (h2last) \ 107 h2timer[h2lid] += (int)(ticks - h2last);\ 108 h2last = ticks; \ 109 h2lid = which; \ 110 } while(0) 111 112 int 113 hammer2_vop_strategy(struct vop_strategy_args *ap) 114 { 115 struct bio *biop; 116 struct buf *bp; 117 int error; 118 119 biop = ap->a_bio; 120 bp = biop->bio_buf; 121 122 switch(bp->b_cmd) { 123 case BUF_CMD_READ: 124 error = hammer2_strategy_read(ap); 125 ++hammer2_iod_file_read; 126 break; 127 case BUF_CMD_WRITE: 128 error = hammer2_strategy_write(ap); 129 ++hammer2_iod_file_write; 130 break; 131 default: 132 bp->b_error = error = EINVAL; 133 bp->b_flags |= B_ERROR; 134 biodone(biop); 135 break; 136 } 137 return (error); 138 } 139 140 /* 141 * Return the largest contiguous physical disk range for the logical 142 * request, in bytes. 143 * 144 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb) 145 * 146 * Basically disabled, the logical buffer write thread has to deal with 147 * buffers one-at-a-time. Note that this should not prevent cluster_read() 148 * from reading-ahead, it simply prevents it from trying form a single 149 * cluster buffer for the logical request. H2 already uses 64KB buffers! 150 */ 151 int 152 hammer2_vop_bmap(struct vop_bmap_args *ap) 153 { 154 *ap->a_doffsetp = NOOFFSET; 155 if (ap->a_runp) 156 *ap->a_runp = 0; 157 if (ap->a_runb) 158 *ap->a_runb = 0; 159 return (EOPNOTSUPP); 160 } 161 162 /**************************************************************************** 163 * READ SUPPORT * 164 ****************************************************************************/ 165 /* 166 * Callback used in read path in case that a block is compressed with LZ4. 167 */ 168 static 169 void 170 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio) 171 { 172 struct buf *bp; 173 char *compressed_buffer; 174 int compressed_size; 175 int result; 176 177 bp = bio->bio_buf; 178 179 #if 0 180 if bio->bio_caller_info2.index && 181 bio->bio_caller_info1.uvalue32 != 182 crc32(bp->b_data, bp->b_bufsize) --- return error 183 #endif 184 185 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE); 186 compressed_size = *(const int *)data; 187 KKASSERT((uint32_t)compressed_size <= bytes - sizeof(int)); 188 189 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT); 190 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]), 191 compressed_buffer, 192 compressed_size, 193 bp->b_bufsize); 194 if (result < 0) { 195 kprintf("READ PATH: Error during decompression." 196 "bio %016jx/%d\n", 197 (intmax_t)bio->bio_offset, bytes); 198 /* make sure it isn't random garbage */ 199 bzero(compressed_buffer, bp->b_bufsize); 200 } 201 KKASSERT(result <= bp->b_bufsize); 202 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize); 203 if (result < bp->b_bufsize) 204 bzero(bp->b_data + result, bp->b_bufsize - result); 205 objcache_put(cache_buffer_read, compressed_buffer); 206 bp->b_resid = 0; 207 bp->b_flags |= B_AGE; 208 } 209 210 /* 211 * Callback used in read path in case that a block is compressed with ZLIB. 212 * It is almost identical to LZ4 callback, so in theory they can be unified, 213 * but we didn't want to make changes in bio structure for that. 214 */ 215 static 216 void 217 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio) 218 { 219 struct buf *bp; 220 char *compressed_buffer; 221 z_stream strm_decompress; 222 int result; 223 int ret; 224 225 bp = bio->bio_buf; 226 227 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE); 228 strm_decompress.avail_in = 0; 229 strm_decompress.next_in = Z_NULL; 230 231 ret = inflateInit(&strm_decompress); 232 233 if (ret != Z_OK) 234 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n"); 235 236 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT); 237 strm_decompress.next_in = __DECONST(char *, data); 238 239 /* XXX supply proper size, subset of device bp */ 240 strm_decompress.avail_in = bytes; 241 strm_decompress.next_out = compressed_buffer; 242 strm_decompress.avail_out = bp->b_bufsize; 243 244 ret = inflate(&strm_decompress, Z_FINISH); 245 if (ret != Z_STREAM_END) { 246 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n"); 247 bzero(compressed_buffer, bp->b_bufsize); 248 } 249 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize); 250 result = bp->b_bufsize - strm_decompress.avail_out; 251 if (result < bp->b_bufsize) 252 bzero(bp->b_data + result, strm_decompress.avail_out); 253 objcache_put(cache_buffer_read, compressed_buffer); 254 ret = inflateEnd(&strm_decompress); 255 256 bp->b_resid = 0; 257 bp->b_flags |= B_AGE; 258 } 259 260 /* 261 * Logical buffer I/O, async read. 262 */ 263 static 264 int 265 hammer2_strategy_read(struct vop_strategy_args *ap) 266 { 267 hammer2_xop_strategy_t *xop; 268 struct buf *bp; 269 struct bio *bio; 270 struct bio *nbio; 271 hammer2_inode_t *ip; 272 hammer2_key_t lbase; 273 274 bio = ap->a_bio; 275 bp = bio->bio_buf; 276 ip = VTOI(ap->a_vp); 277 nbio = push_bio(bio); 278 279 lbase = bio->bio_offset; 280 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0); 281 282 if (bp->b_bio1.bio_flags & BIO_SYNC) { 283 xop = hammer2_xop_alloc(ip, 0); 284 } else { 285 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_ITERATOR); 286 } 287 xop->finished = 0; 288 xop->bio = bio; 289 xop->lbase = lbase; 290 hammer2_mtx_init(&xop->lock, "h2bior"); 291 hammer2_xop_start(&xop->head, hammer2_strategy_xop_read); 292 /* asynchronous completion */ 293 294 return(0); 295 } 296 297 /* 298 * Per-node XOP (threaded), do a synchronous lookup of the chain and 299 * its data. The frontend is asynchronous, so we are also responsible 300 * for racing to terminate the frontend. 301 */ 302 static 303 void 304 hammer2_strategy_xop_read(hammer2_xop_t *arg, int clindex) 305 { 306 hammer2_xop_strategy_t *xop = &arg->xop_strategy; 307 hammer2_chain_t *parent; 308 hammer2_chain_t *chain; 309 hammer2_key_t key_dummy; 310 hammer2_key_t lbase; 311 struct bio *bio; 312 struct buf *bp; 313 int cache_index = -1; 314 int error; 315 316 TIMER(0); 317 lbase = xop->lbase; 318 bio = xop->bio; 319 bp = bio->bio_buf; 320 321 /* 322 * This is difficult to optimize. The logical buffer might be 323 * partially dirty (contain dummy zero-fill pages), which would 324 * mess up our crc calculation if we were to try a direct read. 325 * So for now we always double-buffer through the underlying 326 * storage. 327 * 328 * If not for the above problem we could conditionalize on 329 * (1) 64KB buffer, (2) one chain (not multi-master) and 330 * (3) !hammer2_double_buffer, and issue a direct read into the 331 * logical buffer. 332 */ 333 parent = hammer2_inode_chain(xop->head.ip1, clindex, 334 HAMMER2_RESOLVE_ALWAYS | 335 HAMMER2_RESOLVE_SHARED); 336 TIMER(1); 337 if (parent) { 338 chain = hammer2_chain_lookup(&parent, &key_dummy, 339 lbase, lbase, 340 &cache_index, 341 HAMMER2_LOOKUP_ALWAYS | 342 HAMMER2_LOOKUP_SHARED); 343 error = chain ? chain->error : 0; 344 } else { 345 error = EIO; 346 chain = NULL; 347 } 348 TIMER(2); 349 error = hammer2_xop_feed(&xop->head, chain, clindex, error); 350 TIMER(3); 351 if (chain) { 352 hammer2_chain_unlock(chain); 353 hammer2_chain_drop(chain); 354 } 355 if (parent) { 356 hammer2_chain_unlock(parent); 357 hammer2_chain_drop(parent); 358 } 359 chain = NULL; /* safety */ 360 parent = NULL; /* safety */ 361 TIMER(4); 362 363 /* 364 * Race to finish the frontend 365 */ 366 if (xop->finished) 367 return; 368 hammer2_mtx_ex(&xop->lock); 369 if (xop->finished) { 370 hammer2_mtx_unlock(&xop->lock); 371 return; 372 } 373 374 /* 375 * Async operation has not completed and we now own the lock. 376 * Determine if we can complete the operation by issuing the 377 * frontend collection non-blocking. 378 * 379 * H2 double-buffers the data, setting B_NOTMETA on the logical 380 * buffer hints to the OS that the logical buffer should not be 381 * swapcached (since the device buffer can be). 382 * 383 * Also note that even for compressed data we would rather the 384 * kernel cache/swapcache device buffers more and (decompressed) 385 * logical buffers less, since that will significantly improve 386 * the amount of end-user data that can be cached. 387 */ 388 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT); 389 TIMER(5); 390 391 switch(error) { 392 case 0: 393 xop->finished = 1; 394 hammer2_mtx_unlock(&xop->lock); 395 bp->b_flags |= B_NOTMETA; 396 chain = xop->head.cluster.focus; 397 hammer2_strategy_read_completion(chain, (char *)chain->data, 398 xop->bio); 399 biodone(bio); 400 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 401 break; 402 case ENOENT: 403 xop->finished = 1; 404 hammer2_mtx_unlock(&xop->lock); 405 bp->b_flags |= B_NOTMETA; 406 bp->b_resid = 0; 407 bp->b_error = 0; 408 bzero(bp->b_data, bp->b_bcount); 409 biodone(bio); 410 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 411 break; 412 case EINPROGRESS: 413 hammer2_mtx_unlock(&xop->lock); 414 break; 415 default: 416 kprintf("strategy_xop_read: error %d loff=%016jx\n", 417 error, bp->b_loffset); 418 xop->finished = 1; 419 hammer2_mtx_unlock(&xop->lock); 420 bp->b_flags |= B_ERROR; 421 bp->b_error = EIO; 422 biodone(bio); 423 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 424 break; 425 } 426 TIMER(6); 427 } 428 429 static 430 void 431 hammer2_strategy_read_completion(hammer2_chain_t *chain, char *data, 432 struct bio *bio) 433 { 434 struct buf *bp = bio->bio_buf; 435 436 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 437 /* 438 * Data is embedded in the inode (copy from inode). 439 */ 440 bcopy(((hammer2_inode_data_t *)data)->u.data, 441 bp->b_data, HAMMER2_EMBEDDED_BYTES); 442 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES, 443 bp->b_bcount - HAMMER2_EMBEDDED_BYTES); 444 bp->b_resid = 0; 445 bp->b_error = 0; 446 } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) { 447 /* 448 * Data is on-media, record for live dedup. Release the 449 * chain (try to free it) when done. The data is still 450 * cached by both the buffer cache in front and the 451 * block device behind us. This leaves more room in the 452 * LRU chain cache for meta-data chains which we really 453 * want to retain. 454 */ 455 hammer2_dedup_record(chain, data); 456 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 457 458 /* 459 * Decompression and copy. 460 */ 461 switch (HAMMER2_DEC_COMP(chain->bref.methods)) { 462 case HAMMER2_COMP_LZ4: 463 hammer2_decompress_LZ4_callback(data, chain->bytes, 464 bio); 465 /* b_resid set by call */ 466 break; 467 case HAMMER2_COMP_ZLIB: 468 hammer2_decompress_ZLIB_callback(data, chain->bytes, 469 bio); 470 /* b_resid set by call */ 471 break; 472 case HAMMER2_COMP_NONE: 473 KKASSERT(chain->bytes <= bp->b_bcount); 474 bcopy(data, bp->b_data, chain->bytes); 475 if (chain->bytes < bp->b_bcount) { 476 bzero(bp->b_data + chain->bytes, 477 bp->b_bcount - chain->bytes); 478 } 479 bp->b_resid = 0; 480 bp->b_error = 0; 481 break; 482 default: 483 panic("hammer2_strategy_read: " 484 "unknown compression type"); 485 } 486 } else { 487 panic("hammer2_strategy_read: unknown bref type"); 488 } 489 } 490 491 /**************************************************************************** 492 * WRITE SUPPORT * 493 ****************************************************************************/ 494 495 /* 496 * Functions for compression in threads, 497 * from hammer2_vnops.c 498 */ 499 static void hammer2_write_file_core(struct buf *bp, hammer2_inode_t *ip, 500 hammer2_chain_t **parentp, 501 hammer2_key_t lbase, int ioflag, int pblksize, 502 hammer2_tid_t mtid, int *errorp); 503 static void hammer2_compress_and_write(struct buf *bp, hammer2_inode_t *ip, 504 hammer2_chain_t **parentp, 505 hammer2_key_t lbase, int ioflag, int pblksize, 506 hammer2_tid_t mtid, int *errorp, 507 int comp_algo, int check_algo); 508 static void hammer2_zero_check_and_write(struct buf *bp, hammer2_inode_t *ip, 509 hammer2_chain_t **parentp, 510 hammer2_key_t lbase, int ioflag, int pblksize, 511 hammer2_tid_t mtid, int *errorp, 512 int check_algo); 513 static int test_block_zeros(const char *buf, size_t bytes); 514 static void zero_write(struct buf *bp, hammer2_inode_t *ip, 515 hammer2_chain_t **parentp, 516 hammer2_key_t lbase, 517 hammer2_tid_t mtid, int *errorp); 518 static void hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp, 519 int ioflag, int pblksize, 520 hammer2_tid_t mtid, int *errorp, 521 int check_algo); 522 523 static 524 int 525 hammer2_strategy_write(struct vop_strategy_args *ap) 526 { 527 hammer2_xop_strategy_t *xop; 528 hammer2_pfs_t *pmp; 529 struct bio *bio; 530 struct buf *bp; 531 hammer2_inode_t *ip; 532 533 bio = ap->a_bio; 534 bp = bio->bio_buf; 535 ip = VTOI(ap->a_vp); 536 pmp = ip->pmp; 537 538 hammer2_lwinprog_ref(pmp); 539 hammer2_trans_assert_strategy(pmp); 540 hammer2_trans_init(pmp, HAMMER2_TRANS_BUFCACHE); 541 542 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING); 543 xop->finished = 0; 544 xop->bio = bio; 545 xop->lbase = bio->bio_offset; 546 hammer2_mtx_init(&xop->lock, "h2biow"); 547 hammer2_xop_start(&xop->head, hammer2_strategy_xop_write); 548 /* asynchronous completion */ 549 550 hammer2_lwinprog_wait(pmp, hammer2_flush_pipe); 551 552 return(0); 553 } 554 555 /* 556 * Per-node XOP (threaded). Write the logical buffer to the media. 557 */ 558 static 559 void 560 hammer2_strategy_xop_write(hammer2_xop_t *arg, int clindex) 561 { 562 hammer2_xop_strategy_t *xop = &arg->xop_strategy; 563 hammer2_chain_t *parent; 564 hammer2_key_t lbase; 565 hammer2_inode_t *ip; 566 struct bio *bio; 567 struct buf *bp; 568 int error; 569 int lblksize; 570 int pblksize; 571 572 lbase = xop->lbase; 573 bio = xop->bio; 574 bp = bio->bio_buf; 575 ip = xop->head.ip1; 576 577 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */ 578 579 lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL); 580 pblksize = hammer2_calc_physical(ip, lbase); 581 parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS); 582 hammer2_write_file_core(bp, ip, &parent, 583 lbase, IO_ASYNC, pblksize, 584 xop->head.mtid, &error); 585 if (parent) { 586 hammer2_chain_unlock(parent); 587 hammer2_chain_drop(parent); 588 parent = NULL; /* safety */ 589 } 590 hammer2_xop_feed(&xop->head, NULL, clindex, error); 591 592 /* 593 * Race to finish the frontend 594 */ 595 if (xop->finished) 596 return; 597 hammer2_mtx_ex(&xop->lock); 598 if (xop->finished) { 599 hammer2_mtx_unlock(&xop->lock); 600 return; 601 } 602 603 /* 604 * Async operation has not completed and we now own the lock. 605 * Determine if we can complete the operation by issuing the 606 * frontend collection non-blocking. 607 * 608 * H2 double-buffers the data, setting B_NOTMETA on the logical 609 * buffer hints to the OS that the logical buffer should not be 610 * swapcached (since the device buffer can be). 611 */ 612 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT); 613 614 if (error == EINPROGRESS) { 615 hammer2_mtx_unlock(&xop->lock); 616 return; 617 } 618 619 /* 620 * Async operation has completed. 621 */ 622 xop->finished = 1; 623 hammer2_mtx_unlock(&xop->lock); 624 625 if (error == ENOENT || error == 0) { 626 bp->b_flags |= B_NOTMETA; 627 bp->b_resid = 0; 628 bp->b_error = 0; 629 biodone(bio); 630 } else { 631 kprintf("strategy_xop_write: error %d loff=%016jx\n", 632 error, bp->b_loffset); 633 bp->b_flags |= B_ERROR; 634 bp->b_error = EIO; 635 biodone(bio); 636 } 637 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 638 hammer2_trans_assert_strategy(ip->pmp); 639 hammer2_lwinprog_drop(ip->pmp); 640 hammer2_trans_done(ip->pmp); 641 } 642 643 /* 644 * Wait for pending I/O to complete 645 */ 646 void 647 hammer2_bioq_sync(hammer2_pfs_t *pmp) 648 { 649 hammer2_lwinprog_wait(pmp, 0); 650 } 651 652 /* 653 * Create a new cluster at (cparent, lbase) and assign physical storage, 654 * returning a cluster suitable for I/O. The cluster will be in a modified 655 * state. 656 * 657 * cparent can wind up being anything. 658 * 659 * If datap is not NULL, *datap points to the real data we intend to write. 660 * If we can dedup the storage location we set *datap to NULL to indicate 661 * to the caller that a dedup occurred. 662 * 663 * NOTE: Special case for data embedded in inode. 664 */ 665 static 666 hammer2_chain_t * 667 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp, 668 hammer2_key_t lbase, int pblksize, 669 hammer2_tid_t mtid, char **datap, int *errorp) 670 { 671 hammer2_chain_t *chain; 672 hammer2_key_t key_dummy; 673 hammer2_off_t dedup_off; 674 int pradix = hammer2_getradix(pblksize); 675 int cache_index = -1; 676 677 /* 678 * Locate the chain associated with lbase, return a locked chain. 679 * However, do not instantiate any data reference (which utilizes a 680 * device buffer) because we will be using direct IO via the 681 * logical buffer cache buffer. 682 */ 683 *errorp = 0; 684 KKASSERT(pblksize >= HAMMER2_ALLOC_MIN); 685 retry: 686 TIMER(30); 687 chain = hammer2_chain_lookup(parentp, &key_dummy, 688 lbase, lbase, 689 &cache_index, 690 HAMMER2_LOOKUP_NODATA); 691 692 /* 693 * The lookup code should not return a DELETED chain to us, unless 694 * its a short-file embedded in the inode. Then it is possible for 695 * the lookup to return a deleted inode. 696 */ 697 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED) && 698 chain->bref.type != HAMMER2_BREF_TYPE_INODE) { 699 kprintf("assign physical deleted chain @ " 700 "%016jx (%016jx.%02x) ip %016jx\n", 701 lbase, chain->bref.data_off, chain->bref.type, 702 ip->meta.inum); 703 Debugger("bleh"); 704 } 705 706 if (chain == NULL) { 707 /* 708 * We found a hole, create a new chain entry. 709 * 710 * NOTE: DATA chains are created without device backing 711 * store (nor do we want any). 712 */ 713 dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap, 714 pblksize); 715 *errorp = hammer2_chain_create(parentp, &chain, 716 ip->pmp, 717 HAMMER2_ENC_CHECK(ip->meta.check_algo) | 718 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE), 719 lbase, HAMMER2_PBUFRADIX, 720 HAMMER2_BREF_TYPE_DATA, 721 pblksize, mtid, 722 dedup_off, 0); 723 if (chain == NULL) { 724 panic("hammer2_chain_create: par=%p error=%d\n", 725 *parentp, *errorp); 726 goto retry; 727 } 728 /*ip->delta_dcount += pblksize;*/ 729 } else { 730 switch (chain->bref.type) { 731 case HAMMER2_BREF_TYPE_INODE: 732 /* 733 * The data is embedded in the inode, which requires 734 * a bit more finess. 735 */ 736 hammer2_chain_modify_ip(ip, chain, mtid, 0); 737 break; 738 case HAMMER2_BREF_TYPE_DATA: 739 dedup_off = hammer2_dedup_lookup(chain->hmp, datap, 740 pblksize); 741 if (chain->bytes != pblksize) { 742 hammer2_chain_resize(ip, *parentp, chain, 743 mtid, dedup_off, 744 pradix, 745 HAMMER2_MODIFY_OPTDATA); 746 } 747 748 /* 749 * DATA buffers must be marked modified whether the 750 * data is in a logical buffer or not. We also have 751 * to make this call to fixup the chain data pointers 752 * after resizing in case this is an encrypted or 753 * compressed buffer. 754 */ 755 hammer2_chain_modify(chain, mtid, dedup_off, 756 HAMMER2_MODIFY_OPTDATA); 757 break; 758 default: 759 panic("hammer2_assign_physical: bad type"); 760 /* NOT REACHED */ 761 break; 762 } 763 } 764 TIMER(31); 765 return (chain); 766 } 767 768 /* 769 * hammer2_write_file_core() - hammer2_write_thread() helper 770 * 771 * The core write function which determines which path to take 772 * depending on compression settings. We also have to locate the 773 * related chains so we can calculate and set the check data for 774 * the blockref. 775 */ 776 static 777 void 778 hammer2_write_file_core(struct buf *bp, hammer2_inode_t *ip, 779 hammer2_chain_t **parentp, 780 hammer2_key_t lbase, int ioflag, int pblksize, 781 hammer2_tid_t mtid, int *errorp) 782 { 783 hammer2_chain_t *chain; 784 char *data = bp->b_data; 785 786 *errorp = 0; 787 788 switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) { 789 case HAMMER2_COMP_NONE: 790 /* 791 * We have to assign physical storage to the buffer 792 * we intend to dirty or write now to avoid deadlocks 793 * in the strategy code later. 794 * 795 * This can return NOOFFSET for inode-embedded data. 796 * The strategy code will take care of it in that case. 797 */ 798 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize, 799 mtid, &data, errorp); 800 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 801 hammer2_inode_data_t *wipdata; 802 803 wipdata = &chain->data->ipdata; 804 KKASSERT(wipdata->meta.op_flags & 805 HAMMER2_OPFLAG_DIRECTDATA); 806 KKASSERT(bp->b_loffset == 0); 807 bcopy(bp->b_data, wipdata->u.data, 808 HAMMER2_EMBEDDED_BYTES); 809 ++hammer2_iod_file_wembed; 810 } else if (data == NULL) { 811 /* 812 * Copy of data already present on-media. 813 */ 814 chain->bref.methods = 815 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) + 816 HAMMER2_ENC_CHECK(ip->meta.check_algo); 817 hammer2_chain_setcheck(chain, bp->b_data); 818 } else { 819 hammer2_write_bp(chain, bp, ioflag, pblksize, 820 mtid, errorp, ip->meta.check_algo); 821 } 822 if (chain) { 823 hammer2_chain_unlock(chain); 824 hammer2_chain_drop(chain); 825 } 826 break; 827 case HAMMER2_COMP_AUTOZERO: 828 /* 829 * Check for zero-fill only 830 */ 831 hammer2_zero_check_and_write(bp, ip, parentp, 832 lbase, ioflag, pblksize, 833 mtid, errorp, 834 ip->meta.check_algo); 835 break; 836 case HAMMER2_COMP_LZ4: 837 case HAMMER2_COMP_ZLIB: 838 default: 839 /* 840 * Check for zero-fill and attempt compression. 841 */ 842 hammer2_compress_and_write(bp, ip, parentp, 843 lbase, ioflag, pblksize, 844 mtid, errorp, 845 ip->meta.comp_algo, 846 ip->meta.check_algo); 847 break; 848 } 849 } 850 851 /* 852 * Helper 853 * 854 * Generic function that will perform the compression in compression 855 * write path. The compression algorithm is determined by the settings 856 * obtained from inode. 857 */ 858 static 859 void 860 hammer2_compress_and_write(struct buf *bp, hammer2_inode_t *ip, 861 hammer2_chain_t **parentp, 862 hammer2_key_t lbase, int ioflag, int pblksize, 863 hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo) 864 { 865 hammer2_chain_t *chain; 866 int comp_size; 867 int comp_block_size; 868 char *comp_buffer; 869 char *data; 870 871 if (test_block_zeros(bp->b_data, pblksize)) { 872 zero_write(bp, ip, parentp, lbase, mtid, errorp); 873 return; 874 } 875 876 comp_size = 0; 877 comp_buffer = NULL; 878 879 KKASSERT(pblksize / 2 <= 32768); 880 881 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0) { 882 z_stream strm_compress; 883 int comp_level; 884 int ret; 885 886 switch(HAMMER2_DEC_ALGO(comp_algo)) { 887 case HAMMER2_COMP_LZ4: 888 comp_buffer = objcache_get(cache_buffer_write, 889 M_INTWAIT); 890 comp_size = LZ4_compress_limitedOutput( 891 bp->b_data, 892 &comp_buffer[sizeof(int)], 893 pblksize, 894 pblksize / 2 - sizeof(int)); 895 /* 896 * We need to prefix with the size, LZ4 897 * doesn't do it for us. Add the related 898 * overhead. 899 */ 900 *(int *)comp_buffer = comp_size; 901 if (comp_size) 902 comp_size += sizeof(int); 903 break; 904 case HAMMER2_COMP_ZLIB: 905 comp_level = HAMMER2_DEC_LEVEL(comp_algo); 906 if (comp_level == 0) 907 comp_level = 6; /* default zlib compression */ 908 else if (comp_level < 6) 909 comp_level = 6; 910 else if (comp_level > 9) 911 comp_level = 9; 912 ret = deflateInit(&strm_compress, comp_level); 913 if (ret != Z_OK) { 914 kprintf("HAMMER2 ZLIB: fatal error " 915 "on deflateInit.\n"); 916 } 917 918 comp_buffer = objcache_get(cache_buffer_write, 919 M_INTWAIT); 920 strm_compress.next_in = bp->b_data; 921 strm_compress.avail_in = pblksize; 922 strm_compress.next_out = comp_buffer; 923 strm_compress.avail_out = pblksize / 2; 924 ret = deflate(&strm_compress, Z_FINISH); 925 if (ret == Z_STREAM_END) { 926 comp_size = pblksize / 2 - 927 strm_compress.avail_out; 928 } else { 929 comp_size = 0; 930 } 931 ret = deflateEnd(&strm_compress); 932 break; 933 default: 934 kprintf("Error: Unknown compression method.\n"); 935 kprintf("Comp_method = %d.\n", comp_algo); 936 break; 937 } 938 } 939 940 if (comp_size == 0) { 941 /* 942 * compression failed or turned off 943 */ 944 comp_block_size = pblksize; /* safety */ 945 if (++ip->comp_heuristic > 128) 946 ip->comp_heuristic = 8; 947 } else { 948 /* 949 * compression succeeded 950 */ 951 ip->comp_heuristic = 0; 952 if (comp_size <= 1024) { 953 comp_block_size = 1024; 954 } else if (comp_size <= 2048) { 955 comp_block_size = 2048; 956 } else if (comp_size <= 4096) { 957 comp_block_size = 4096; 958 } else if (comp_size <= 8192) { 959 comp_block_size = 8192; 960 } else if (comp_size <= 16384) { 961 comp_block_size = 16384; 962 } else if (comp_size <= 32768) { 963 comp_block_size = 32768; 964 } else { 965 panic("hammer2: WRITE PATH: " 966 "Weird comp_size value."); 967 /* NOT REACHED */ 968 comp_block_size = pblksize; 969 } 970 971 /* 972 * Must zero the remainder or dedup (which operates on a 973 * physical block basis) will not find matches. 974 */ 975 if (comp_size < comp_block_size) { 976 bzero(comp_buffer + comp_size, 977 comp_block_size - comp_size); 978 } 979 } 980 981 /* 982 * Assign physical storage, data will be set to NULL if a live-dedup 983 * was successful. 984 */ 985 data = comp_size ? comp_buffer : bp->b_data; 986 chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size, 987 mtid, &data, errorp); 988 989 if (*errorp) { 990 kprintf("WRITE PATH: An error occurred while " 991 "assigning physical space.\n"); 992 KKASSERT(chain == NULL); 993 goto done; 994 } 995 996 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 997 hammer2_inode_data_t *wipdata; 998 999 hammer2_chain_modify_ip(ip, chain, mtid, 0); 1000 wipdata = &chain->data->ipdata; 1001 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA); 1002 KKASSERT(bp->b_loffset == 0); 1003 bcopy(bp->b_data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES); 1004 ++hammer2_iod_file_wembed; 1005 } else if (data == NULL) { 1006 /* 1007 * Live deduplication, a copy of the data is already present 1008 * on the media. 1009 */ 1010 char *bdata; 1011 1012 if (comp_size) { 1013 chain->bref.methods = 1014 HAMMER2_ENC_COMP(comp_algo) + 1015 HAMMER2_ENC_CHECK(check_algo); 1016 } else { 1017 chain->bref.methods = 1018 HAMMER2_ENC_COMP( 1019 HAMMER2_COMP_NONE) + 1020 HAMMER2_ENC_CHECK(check_algo); 1021 } 1022 bdata = comp_size ? comp_buffer : bp->b_data; 1023 hammer2_chain_setcheck(chain, bdata); 1024 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 1025 } else { 1026 hammer2_io_t *dio; 1027 char *bdata; 1028 1029 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED); 1030 1031 switch(chain->bref.type) { 1032 case HAMMER2_BREF_TYPE_INODE: 1033 panic("hammer2_write_bp: unexpected inode\n"); 1034 break; 1035 case HAMMER2_BREF_TYPE_DATA: 1036 /* 1037 * Optimize out the read-before-write 1038 * if possible. 1039 */ 1040 *errorp = hammer2_io_newnz(chain->hmp, 1041 chain->bref.type, 1042 chain->bref.data_off, 1043 chain->bytes, 1044 &dio); 1045 if (*errorp) { 1046 hammer2_io_brelse(&dio); 1047 kprintf("hammer2: WRITE PATH: " 1048 "dbp bread error\n"); 1049 break; 1050 } 1051 bdata = hammer2_io_data(dio, chain->bref.data_off); 1052 1053 /* 1054 * When loading the block make sure we don't 1055 * leave garbage after the compressed data. 1056 */ 1057 if (comp_size) { 1058 chain->bref.methods = 1059 HAMMER2_ENC_COMP(comp_algo) + 1060 HAMMER2_ENC_CHECK(check_algo); 1061 bcopy(comp_buffer, bdata, comp_size); 1062 } else { 1063 chain->bref.methods = 1064 HAMMER2_ENC_COMP( 1065 HAMMER2_COMP_NONE) + 1066 HAMMER2_ENC_CHECK(check_algo); 1067 bcopy(bp->b_data, bdata, pblksize); 1068 } 1069 1070 /* 1071 * The flush code doesn't calculate check codes for 1072 * file data (doing so can result in excessive I/O), 1073 * so we do it here. 1074 */ 1075 hammer2_chain_setcheck(chain, bdata); 1076 hammer2_dedup_record(chain, bdata); 1077 1078 /* 1079 * Device buffer is now valid, chain is no longer in 1080 * the initial state. 1081 * 1082 * (No blockref table worries with file data) 1083 */ 1084 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 1085 1086 /* Now write the related bdp. */ 1087 if (ioflag & IO_SYNC) { 1088 /* 1089 * Synchronous I/O requested. 1090 */ 1091 hammer2_io_bwrite(&dio); 1092 /* 1093 } else if ((ioflag & IO_DIRECT) && 1094 loff + n == pblksize) { 1095 hammer2_io_bdwrite(&dio); 1096 */ 1097 } else if (ioflag & IO_ASYNC) { 1098 hammer2_io_bawrite(&dio); 1099 } else { 1100 hammer2_io_bdwrite(&dio); 1101 } 1102 break; 1103 default: 1104 panic("hammer2_write_bp: bad chain type %d\n", 1105 chain->bref.type); 1106 /* NOT REACHED */ 1107 break; 1108 } 1109 } 1110 done: 1111 if (chain) { 1112 hammer2_chain_unlock(chain); 1113 hammer2_chain_drop(chain); 1114 } 1115 if (comp_buffer) 1116 objcache_put(cache_buffer_write, comp_buffer); 1117 } 1118 1119 /* 1120 * Helper 1121 * 1122 * Function that performs zero-checking and writing without compression, 1123 * it corresponds to default zero-checking path. 1124 */ 1125 static 1126 void 1127 hammer2_zero_check_and_write(struct buf *bp, hammer2_inode_t *ip, 1128 hammer2_chain_t **parentp, 1129 hammer2_key_t lbase, int ioflag, int pblksize, 1130 hammer2_tid_t mtid, int *errorp, 1131 int check_algo) 1132 { 1133 hammer2_chain_t *chain; 1134 char *data = bp->b_data; 1135 1136 if (test_block_zeros(bp->b_data, pblksize)) { 1137 zero_write(bp, ip, parentp, lbase, mtid, errorp); 1138 } else { 1139 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize, 1140 mtid, &data, errorp); 1141 if (data) { 1142 hammer2_write_bp(chain, bp, ioflag, pblksize, 1143 mtid, errorp, check_algo); 1144 } /* else dedup occurred */ 1145 if (chain) { 1146 hammer2_chain_unlock(chain); 1147 hammer2_chain_drop(chain); 1148 } 1149 } 1150 } 1151 1152 /* 1153 * Helper 1154 * 1155 * A function to test whether a block of data contains only zeros, 1156 * returns TRUE (non-zero) if the block is all zeros. 1157 */ 1158 static 1159 int 1160 test_block_zeros(const char *buf, size_t bytes) 1161 { 1162 size_t i; 1163 1164 for (i = 0; i < bytes; i += sizeof(long)) { 1165 if (*(const long *)(buf + i) != 0) 1166 return (0); 1167 } 1168 return (1); 1169 } 1170 1171 /* 1172 * Helper 1173 * 1174 * Function to "write" a block that contains only zeros. 1175 */ 1176 static 1177 void 1178 zero_write(struct buf *bp, hammer2_inode_t *ip, 1179 hammer2_chain_t **parentp, 1180 hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp) 1181 { 1182 hammer2_chain_t *chain; 1183 hammer2_key_t key_dummy; 1184 int cache_index = -1; 1185 1186 *errorp = 0; 1187 chain = hammer2_chain_lookup(parentp, &key_dummy, 1188 lbase, lbase, 1189 &cache_index, 1190 HAMMER2_LOOKUP_NODATA); 1191 if (chain) { 1192 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 1193 hammer2_inode_data_t *wipdata; 1194 1195 hammer2_chain_modify_ip(ip, chain, mtid, 0); 1196 wipdata = &chain->data->ipdata; 1197 KKASSERT(wipdata->meta.op_flags & 1198 HAMMER2_OPFLAG_DIRECTDATA); 1199 KKASSERT(bp->b_loffset == 0); 1200 bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES); 1201 ++hammer2_iod_file_wembed; 1202 } else { 1203 hammer2_chain_delete(*parentp, chain, 1204 mtid, HAMMER2_DELETE_PERMANENT); 1205 ++hammer2_iod_file_wzero; 1206 } 1207 hammer2_chain_unlock(chain); 1208 hammer2_chain_drop(chain); 1209 } else { 1210 ++hammer2_iod_file_wzero; 1211 } 1212 } 1213 1214 /* 1215 * Helper 1216 * 1217 * Function to write the data as it is, without performing any sort of 1218 * compression. This function is used in path without compression and 1219 * default zero-checking path. 1220 */ 1221 static 1222 void 1223 hammer2_write_bp(hammer2_chain_t *chain, struct buf *bp, int ioflag, 1224 int pblksize, 1225 hammer2_tid_t mtid, int *errorp, int check_algo) 1226 { 1227 hammer2_inode_data_t *wipdata; 1228 hammer2_io_t *dio; 1229 char *bdata; 1230 int error; 1231 1232 error = 0; /* XXX TODO below */ 1233 1234 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED); 1235 1236 switch(chain->bref.type) { 1237 case HAMMER2_BREF_TYPE_INODE: 1238 wipdata = &chain->data->ipdata; 1239 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA); 1240 KKASSERT(bp->b_loffset == 0); 1241 bcopy(bp->b_data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES); 1242 error = 0; 1243 ++hammer2_iod_file_wembed; 1244 break; 1245 case HAMMER2_BREF_TYPE_DATA: 1246 error = hammer2_io_newnz(chain->hmp, 1247 chain->bref.type, 1248 chain->bref.data_off, 1249 chain->bytes, &dio); 1250 if (error) { 1251 hammer2_io_bqrelse(&dio); 1252 kprintf("hammer2: WRITE PATH: " 1253 "dbp bread error\n"); 1254 break; 1255 } 1256 bdata = hammer2_io_data(dio, chain->bref.data_off); 1257 1258 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) + 1259 HAMMER2_ENC_CHECK(check_algo); 1260 bcopy(bp->b_data, bdata, chain->bytes); 1261 1262 /* 1263 * The flush code doesn't calculate check codes for 1264 * file data (doing so can result in excessive I/O), 1265 * so we do it here. 1266 */ 1267 hammer2_chain_setcheck(chain, bdata); 1268 hammer2_dedup_record(chain, bdata); 1269 1270 /* 1271 * Device buffer is now valid, chain is no longer in 1272 * the initial state. 1273 * 1274 * (No blockref table worries with file data) 1275 */ 1276 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 1277 1278 if (ioflag & IO_SYNC) { 1279 /* 1280 * Synchronous I/O requested. 1281 */ 1282 hammer2_io_bwrite(&dio); 1283 /* 1284 } else if ((ioflag & IO_DIRECT) && 1285 loff + n == pblksize) { 1286 hammer2_io_bdwrite(&dio); 1287 */ 1288 } else if (ioflag & IO_ASYNC) { 1289 hammer2_io_bawrite(&dio); 1290 } else { 1291 hammer2_io_bdwrite(&dio); 1292 } 1293 break; 1294 default: 1295 panic("hammer2_write_bp: bad chain type %d\n", 1296 chain->bref.type); 1297 /* NOT REACHED */ 1298 error = 0; 1299 break; 1300 } 1301 KKASSERT(error == 0); /* XXX TODO */ 1302 *errorp = error; 1303 } 1304 1305 /* 1306 * LIVE DEDUP HEURISTIC 1307 * 1308 * WARNING! This code is SMP safe but the heuristic allows SMP collisions. 1309 * All fields must be loaded into locals and validated. 1310 * 1311 * WARNING! Should only be used for file data, hammer2_chain_modify() only 1312 * checks for the dedup case on data chains. Also, dedup data can 1313 * only be recorded for committed chains (so NOT strategy writes 1314 * which can undergo further modification after the fact!). 1315 */ 1316 void 1317 hammer2_dedup_record(hammer2_chain_t *chain, char *data) 1318 { 1319 hammer2_dev_t *hmp; 1320 hammer2_dedup_t *dedup; 1321 uint64_t crc; 1322 int best = 0; 1323 int i; 1324 int dticks; 1325 1326 if (hammer2_dedup_enable == 0) 1327 return; 1328 1329 /* 1330 * Only committed data can be recorded for de-duplication, otherwise 1331 * the contents may change out from under us. So, on read if the 1332 * chain is not modified, and on flush when the chain is committed. 1333 */ 1334 if ((chain->flags & 1335 (HAMMER2_CHAIN_MODIFIED | HAMMER2_CHAIN_INITIAL)) == 0) { 1336 return; 1337 } 1338 1339 1340 hmp = chain->hmp; 1341 1342 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) { 1343 case HAMMER2_CHECK_ISCSI32: 1344 /* 1345 * XXX use the built-in crc (the dedup lookup sequencing 1346 * needs to be fixed so the check code is already present 1347 * when dedup_lookup is called) 1348 */ 1349 #if 0 1350 crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value; 1351 #endif 1352 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED); 1353 break; 1354 case HAMMER2_CHECK_XXHASH64: 1355 crc = chain->bref.check.xxhash64.value; 1356 break; 1357 case HAMMER2_CHECK_SHA192: 1358 /* 1359 * XXX use the built-in crc (the dedup lookup sequencing 1360 * needs to be fixed so the check code is already present 1361 * when dedup_lookup is called) 1362 */ 1363 #if 0 1364 crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^ 1365 ((uint64_t *)chain->bref.check.sha192.data)[1] ^ 1366 ((uint64_t *)chain->bref.check.sha192.data)[2]; 1367 #endif 1368 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED); 1369 break; 1370 default: 1371 /* 1372 * Cannot dedup without a check code 1373 * 1374 * NOTE: In particular, CHECK_NONE allows a sector to be 1375 * overwritten without copy-on-write, recording 1376 * a dedup block for a CHECK_NONE object would be 1377 * a disaster! 1378 */ 1379 return; 1380 } 1381 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)]; 1382 for (i = 0; i < 4; ++i) { 1383 if (dedup[i].data_crc == crc) { 1384 best = i; 1385 break; 1386 } 1387 dticks = (int)(dedup[i].ticks - dedup[best].ticks); 1388 if (dticks < 0 || dticks > hz * 60 * 30) 1389 best = i; 1390 } 1391 dedup += best; 1392 if (hammer2_debug & 0x40000) { 1393 kprintf("REC %04x %016jx %016jx\n", 1394 (int)(dedup - hmp->heur_dedup), 1395 crc, 1396 chain->bref.data_off); 1397 } 1398 dedup->ticks = ticks; 1399 dedup->data_off = chain->bref.data_off; 1400 dedup->data_crc = crc; 1401 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUP); 1402 } 1403 1404 static 1405 hammer2_off_t 1406 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize) 1407 { 1408 hammer2_dedup_t *dedup; 1409 hammer2_io_t *dio; 1410 hammer2_off_t off; 1411 uint64_t crc; 1412 char *data; 1413 int i; 1414 1415 if (hammer2_dedup_enable == 0) 1416 return 0; 1417 data = *datap; 1418 if (data == NULL) 1419 return 0; 1420 1421 /* 1422 * XXX use the built-in crc (the dedup lookup sequencing 1423 * needs to be fixed so the check code is already present 1424 * when dedup_lookup is called) 1425 */ 1426 crc = XXH64(data, pblksize, XXH_HAMMER2_SEED); 1427 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)]; 1428 1429 if (hammer2_debug & 0x40000) { 1430 kprintf("LOC %04x/4 %016jx\n", 1431 (int)(dedup - hmp->heur_dedup), 1432 crc); 1433 } 1434 1435 for (i = 0; i < 4; ++i) { 1436 off = dedup[i].data_off; 1437 cpu_ccfence(); 1438 if (dedup[i].data_crc != crc) 1439 continue; 1440 if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize) 1441 continue; 1442 dio = hammer2_io_getquick(hmp, off, pblksize); 1443 if (dio && 1444 bcmp(data, hammer2_io_data(dio, off), pblksize) == 0) { 1445 /* 1446 * Make sure the INVALOK flag is cleared to prevent 1447 * the possibly-dirty bp from being invalidated now 1448 * that we are using it as part of a de-dup operation. 1449 */ 1450 if (hammer2_debug & 0x40000) { 1451 kprintf("DEDUP SUCCESS %016jx\n", 1452 (intmax_t)off); 1453 } 1454 atomic_clear_64(&dio->refs, HAMMER2_DIO_INVALOK); 1455 hammer2_io_putblk(&dio); 1456 *datap = NULL; 1457 dedup[i].ticks = ticks; /* update use */ 1458 ++hammer2_iod_file_wdedup; 1459 1460 return off; /* RETURN */ 1461 } 1462 if (dio) 1463 hammer2_io_putblk(&dio); 1464 } 1465 return 0; 1466 } 1467 1468 /* 1469 * Poof. Races are ok, if someone gets in and reuses a dedup offset 1470 * before or while we are clearing it they will also recover the freemap 1471 * entry (set it to fully allocated), so a bulkfree race can only set it 1472 * to a possibly-free state. 1473 * 1474 * XXX ok, well, not really sure races are ok but going to run with it 1475 * for the moment. 1476 */ 1477 void 1478 hammer2_dedup_clear(hammer2_dev_t *hmp) 1479 { 1480 int i; 1481 1482 for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) { 1483 hmp->heur_dedup[i].data_off = 0; 1484 hmp->heur_dedup[i].ticks = ticks - 1; 1485 } 1486 } 1487