1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 /* 37 * This module handles low level logical file I/O (strategy) which backs 38 * the logical buffer cache. 39 * 40 * [De]compression, zero-block, check codes, and buffer cache operations 41 * for file data is handled here. 42 * 43 * Live dedup makes its home here as well. 44 */ 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/fcntl.h> 50 #include <sys/buf.h> 51 #include <sys/proc.h> 52 #include <sys/namei.h> 53 #include <sys/mount.h> 54 #include <sys/vnode.h> 55 #include <sys/mountctl.h> 56 #include <sys/dirent.h> 57 #include <sys/uio.h> 58 #include <sys/objcache.h> 59 #include <sys/event.h> 60 #include <sys/file.h> 61 #include <vfs/fifofs/fifo.h> 62 63 #include "hammer2.h" 64 #include "hammer2_lz4.h" 65 66 #include "zlib/hammer2_zlib.h" 67 68 struct objcache *cache_buffer_read; 69 struct objcache *cache_buffer_write; 70 71 /* 72 * Strategy code (async logical file buffer I/O from system) 73 * 74 * Except for the transaction init (which should normally not block), 75 * we essentially run the strategy operation asynchronously via a XOP. 76 * 77 * WARNING! The XOP deals with buffer synchronization. It is not synchronized 78 * to the current cpu. 79 * 80 * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync() 81 * calls but it has in the past when multiple flushes are queued. 82 * 83 * XXX We currently terminate the transaction once we get a quorum, otherwise 84 * the frontend can stall, but this can leave the remaining nodes with 85 * a potential flush conflict. We need to delay flushes on those nodes 86 * until running transactions complete separately from the normal 87 * transaction sequencing. FIXME TODO. 88 */ 89 static void hammer2_strategy_xop_read(hammer2_thread_t *thr, 90 hammer2_xop_t *arg); 91 static void hammer2_strategy_xop_write(hammer2_thread_t *thr, 92 hammer2_xop_t *arg); 93 static int hammer2_strategy_read(struct vop_strategy_args *ap); 94 static int hammer2_strategy_write(struct vop_strategy_args *ap); 95 static void hammer2_strategy_read_completion(hammer2_chain_t *chain, 96 char *data, struct bio *bio); 97 98 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp, 99 char **datap, int pblksize); 100 101 int 102 hammer2_vop_strategy(struct vop_strategy_args *ap) 103 { 104 struct bio *biop; 105 struct buf *bp; 106 int error; 107 108 biop = ap->a_bio; 109 bp = biop->bio_buf; 110 111 switch(bp->b_cmd) { 112 case BUF_CMD_READ: 113 error = hammer2_strategy_read(ap); 114 ++hammer2_iod_file_read; 115 break; 116 case BUF_CMD_WRITE: 117 error = hammer2_strategy_write(ap); 118 ++hammer2_iod_file_write; 119 break; 120 default: 121 bp->b_error = error = EINVAL; 122 bp->b_flags |= B_ERROR; 123 biodone(biop); 124 break; 125 } 126 return (error); 127 } 128 129 /* 130 * Return the largest contiguous physical disk range for the logical 131 * request, in bytes. 132 * 133 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb) 134 * 135 * Basically disabled, the logical buffer write thread has to deal with 136 * buffers one-at-a-time. Note that this should not prevent cluster_read() 137 * from reading-ahead, it simply prevents it from trying form a single 138 * cluster buffer for the logical request. H2 already uses 64KB buffers! 139 */ 140 int 141 hammer2_vop_bmap(struct vop_bmap_args *ap) 142 { 143 *ap->a_doffsetp = NOOFFSET; 144 if (ap->a_runp) 145 *ap->a_runp = 0; 146 if (ap->a_runb) 147 *ap->a_runb = 0; 148 return (EOPNOTSUPP); 149 } 150 151 /**************************************************************************** 152 * READ SUPPORT * 153 ****************************************************************************/ 154 /* 155 * Callback used in read path in case that a block is compressed with LZ4. 156 */ 157 static 158 void 159 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio) 160 { 161 struct buf *bp; 162 char *compressed_buffer; 163 int compressed_size; 164 int result; 165 166 bp = bio->bio_buf; 167 168 #if 0 169 if bio->bio_caller_info2.index && 170 bio->bio_caller_info1.uvalue32 != 171 crc32(bp->b_data, bp->b_bufsize) --- return error 172 #endif 173 174 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE); 175 compressed_size = *(const int *)data; 176 KKASSERT((uint32_t)compressed_size <= bytes - sizeof(int)); 177 178 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT); 179 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]), 180 compressed_buffer, 181 compressed_size, 182 bp->b_bufsize); 183 if (result < 0) { 184 kprintf("READ PATH: Error during decompression." 185 "bio %016jx/%d\n", 186 (intmax_t)bio->bio_offset, bytes); 187 /* make sure it isn't random garbage */ 188 bzero(compressed_buffer, bp->b_bufsize); 189 } 190 KKASSERT(result <= bp->b_bufsize); 191 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize); 192 if (result < bp->b_bufsize) 193 bzero(bp->b_data + result, bp->b_bufsize - result); 194 objcache_put(cache_buffer_read, compressed_buffer); 195 bp->b_resid = 0; 196 bp->b_flags |= B_AGE; 197 } 198 199 /* 200 * Callback used in read path in case that a block is compressed with ZLIB. 201 * It is almost identical to LZ4 callback, so in theory they can be unified, 202 * but we didn't want to make changes in bio structure for that. 203 */ 204 static 205 void 206 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio) 207 { 208 struct buf *bp; 209 char *compressed_buffer; 210 z_stream strm_decompress; 211 int result; 212 int ret; 213 214 bp = bio->bio_buf; 215 216 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE); 217 strm_decompress.avail_in = 0; 218 strm_decompress.next_in = Z_NULL; 219 220 ret = inflateInit(&strm_decompress); 221 222 if (ret != Z_OK) 223 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n"); 224 225 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT); 226 strm_decompress.next_in = __DECONST(char *, data); 227 228 /* XXX supply proper size, subset of device bp */ 229 strm_decompress.avail_in = bytes; 230 strm_decompress.next_out = compressed_buffer; 231 strm_decompress.avail_out = bp->b_bufsize; 232 233 ret = inflate(&strm_decompress, Z_FINISH); 234 if (ret != Z_STREAM_END) { 235 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n"); 236 bzero(compressed_buffer, bp->b_bufsize); 237 } 238 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize); 239 result = bp->b_bufsize - strm_decompress.avail_out; 240 if (result < bp->b_bufsize) 241 bzero(bp->b_data + result, strm_decompress.avail_out); 242 objcache_put(cache_buffer_read, compressed_buffer); 243 ret = inflateEnd(&strm_decompress); 244 245 bp->b_resid = 0; 246 bp->b_flags |= B_AGE; 247 } 248 249 /* 250 * Logical buffer I/O, async read. 251 */ 252 static 253 int 254 hammer2_strategy_read(struct vop_strategy_args *ap) 255 { 256 hammer2_xop_strategy_t *xop; 257 struct buf *bp; 258 struct bio *bio; 259 struct bio *nbio; 260 hammer2_inode_t *ip; 261 hammer2_key_t lbase; 262 263 bio = ap->a_bio; 264 bp = bio->bio_buf; 265 ip = VTOI(ap->a_vp); 266 nbio = push_bio(bio); 267 268 lbase = bio->bio_offset; 269 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0); 270 271 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_STRATEGY); 272 xop->finished = 0; 273 xop->bio = bio; 274 xop->lbase = lbase; 275 hammer2_mtx_init(&xop->lock, "h2bior"); 276 hammer2_xop_start(&xop->head, hammer2_strategy_xop_read); 277 /* asynchronous completion */ 278 279 return(0); 280 } 281 282 /* 283 * Per-node XOP (threaded), do a synchronous lookup of the chain and 284 * its data. The frontend is asynchronous, so we are also responsible 285 * for racing to terminate the frontend. 286 */ 287 static 288 void 289 hammer2_strategy_xop_read(hammer2_thread_t *thr, hammer2_xop_t *arg) 290 { 291 hammer2_xop_strategy_t *xop = &arg->xop_strategy; 292 hammer2_chain_t *parent; 293 hammer2_chain_t *chain; 294 hammer2_key_t key_dummy; 295 hammer2_key_t lbase; 296 struct bio *bio; 297 struct buf *bp; 298 int error; 299 300 /* 301 * Note that we can race completion of the bio supplied by 302 * the front-end so we cannot access it until we determine 303 * that we are the ones finishing it up. 304 */ 305 lbase = xop->lbase; 306 307 /* 308 * This is difficult to optimize. The logical buffer might be 309 * partially dirty (contain dummy zero-fill pages), which would 310 * mess up our crc calculation if we were to try a direct read. 311 * So for now we always double-buffer through the underlying 312 * storage. 313 * 314 * If not for the above problem we could conditionalize on 315 * (1) 64KB buffer, (2) one chain (not multi-master) and 316 * (3) !hammer2_double_buffer, and issue a direct read into the 317 * logical buffer. 318 */ 319 parent = hammer2_inode_chain(xop->head.ip1, thr->clindex, 320 HAMMER2_RESOLVE_ALWAYS | 321 HAMMER2_RESOLVE_SHARED); 322 if (parent) { 323 chain = hammer2_chain_lookup(&parent, &key_dummy, 324 lbase, lbase, 325 &error, 326 HAMMER2_LOOKUP_ALWAYS | 327 HAMMER2_LOOKUP_SHARED); 328 if (chain) 329 error = chain->error; 330 } else { 331 error = HAMMER2_ERROR_EIO; 332 chain = NULL; 333 } 334 error = hammer2_xop_feed(&xop->head, chain, thr->clindex, error); 335 if (chain) { 336 hammer2_chain_unlock(chain); 337 hammer2_chain_drop(chain); 338 } 339 if (parent) { 340 hammer2_chain_unlock(parent); 341 hammer2_chain_drop(parent); 342 } 343 chain = NULL; /* safety */ 344 parent = NULL; /* safety */ 345 346 /* 347 * Race to finish the frontend. First-to-complete. bio is only 348 * valid if we are determined to be the ones able to complete 349 * the operation. 350 */ 351 if (xop->finished) 352 return; 353 hammer2_mtx_ex(&xop->lock); 354 if (xop->finished) { 355 hammer2_mtx_unlock(&xop->lock); 356 return; 357 } 358 bio = xop->bio; 359 bp = bio->bio_buf; 360 bkvasync(bp); 361 362 /* 363 * Async operation has not completed and we now own the lock. 364 * Determine if we can complete the operation by issuing the 365 * frontend collection non-blocking. 366 * 367 * H2 double-buffers the data, setting B_NOTMETA on the logical 368 * buffer hints to the OS that the logical buffer should not be 369 * swapcached (since the device buffer can be). 370 * 371 * Also note that even for compressed data we would rather the 372 * kernel cache/swapcache device buffers more and (decompressed) 373 * logical buffers less, since that will significantly improve 374 * the amount of end-user data that can be cached. 375 * 376 * NOTE: The chain->data for xop->head.cluster.focus will be 377 * synchronized to the current cpu by xop_collect(), 378 * but other chains in the cluster might not be. 379 */ 380 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT); 381 382 switch(error) { 383 case 0: 384 xop->finished = 1; 385 hammer2_mtx_unlock(&xop->lock); 386 bp->b_flags |= B_NOTMETA; 387 chain = xop->head.cluster.focus; 388 hammer2_strategy_read_completion(chain, (char *)chain->data, 389 xop->bio); 390 biodone(bio); 391 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 392 break; 393 case HAMMER2_ERROR_ENOENT: 394 xop->finished = 1; 395 hammer2_mtx_unlock(&xop->lock); 396 bp->b_flags |= B_NOTMETA; 397 bp->b_resid = 0; 398 bp->b_error = 0; 399 bzero(bp->b_data, bp->b_bcount); 400 biodone(bio); 401 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 402 break; 403 case HAMMER2_ERROR_EINPROGRESS: 404 hammer2_mtx_unlock(&xop->lock); 405 break; 406 default: 407 kprintf("strategy_xop_read: error %08x loff=%016jx\n", 408 error, bp->b_loffset); 409 xop->finished = 1; 410 hammer2_mtx_unlock(&xop->lock); 411 bp->b_flags |= B_ERROR; 412 bp->b_error = EIO; 413 biodone(bio); 414 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 415 break; 416 } 417 } 418 419 static 420 void 421 hammer2_strategy_read_completion(hammer2_chain_t *chain, char *data, 422 struct bio *bio) 423 { 424 struct buf *bp = bio->bio_buf; 425 426 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 427 /* 428 * Copy from in-memory inode structure. 429 */ 430 bcopy(((hammer2_inode_data_t *)data)->u.data, 431 bp->b_data, HAMMER2_EMBEDDED_BYTES); 432 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES, 433 bp->b_bcount - HAMMER2_EMBEDDED_BYTES); 434 bp->b_resid = 0; 435 bp->b_error = 0; 436 } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) { 437 /* 438 * Data is on-media, record for live dedup. Release the 439 * chain (try to free it) when done. The data is still 440 * cached by both the buffer cache in front and the 441 * block device behind us. This leaves more room in the 442 * LRU chain cache for meta-data chains which we really 443 * want to retain. 444 * 445 * NOTE: Deduplication cannot be safely recorded for 446 * records without a check code. 447 */ 448 hammer2_dedup_record(chain, NULL, data); 449 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE); 450 451 /* 452 * Decompression and copy. 453 */ 454 switch (HAMMER2_DEC_COMP(chain->bref.methods)) { 455 case HAMMER2_COMP_LZ4: 456 hammer2_decompress_LZ4_callback(data, chain->bytes, 457 bio); 458 /* b_resid set by call */ 459 break; 460 case HAMMER2_COMP_ZLIB: 461 hammer2_decompress_ZLIB_callback(data, chain->bytes, 462 bio); 463 /* b_resid set by call */ 464 break; 465 case HAMMER2_COMP_NONE: 466 KKASSERT(chain->bytes <= bp->b_bcount); 467 bcopy(data, bp->b_data, chain->bytes); 468 if (chain->bytes < bp->b_bcount) { 469 bzero(bp->b_data + chain->bytes, 470 bp->b_bcount - chain->bytes); 471 } 472 bp->b_resid = 0; 473 bp->b_error = 0; 474 break; 475 default: 476 panic("hammer2_strategy_read: " 477 "unknown compression type"); 478 } 479 } else { 480 panic("hammer2_strategy_read: unknown bref type"); 481 } 482 } 483 484 /**************************************************************************** 485 * WRITE SUPPORT * 486 ****************************************************************************/ 487 488 /* 489 * Functions for compression in threads, 490 * from hammer2_vnops.c 491 */ 492 static void hammer2_write_file_core(char *data, hammer2_inode_t *ip, 493 hammer2_chain_t **parentp, 494 hammer2_key_t lbase, int ioflag, int pblksize, 495 hammer2_tid_t mtid, int *errorp); 496 static void hammer2_compress_and_write(char *data, hammer2_inode_t *ip, 497 hammer2_chain_t **parentp, 498 hammer2_key_t lbase, int ioflag, int pblksize, 499 hammer2_tid_t mtid, int *errorp, 500 int comp_algo, int check_algo); 501 static void hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip, 502 hammer2_chain_t **parentp, 503 hammer2_key_t lbase, int ioflag, int pblksize, 504 hammer2_tid_t mtid, int *errorp, 505 int check_algo); 506 static int test_block_zeros(const char *buf, size_t bytes); 507 static void zero_write(char *data, hammer2_inode_t *ip, 508 hammer2_chain_t **parentp, 509 hammer2_key_t lbase, 510 hammer2_tid_t mtid, int *errorp); 511 static void hammer2_write_bp(hammer2_chain_t *chain, char *data, 512 int ioflag, int pblksize, 513 hammer2_tid_t mtid, int *errorp, 514 int check_algo); 515 516 static 517 int 518 hammer2_strategy_write(struct vop_strategy_args *ap) 519 { 520 hammer2_xop_strategy_t *xop; 521 hammer2_pfs_t *pmp; 522 struct bio *bio; 523 struct buf *bp; 524 hammer2_inode_t *ip; 525 526 bio = ap->a_bio; 527 bp = bio->bio_buf; 528 ip = VTOI(ap->a_vp); 529 pmp = ip->pmp; 530 531 hammer2_lwinprog_ref(pmp); 532 hammer2_trans_assert_strategy(pmp); 533 hammer2_trans_init(pmp, HAMMER2_TRANS_BUFCACHE); 534 535 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | 536 HAMMER2_XOP_STRATEGY); 537 xop->finished = 0; 538 xop->bio = bio; 539 xop->lbase = bio->bio_offset; 540 hammer2_mtx_init(&xop->lock, "h2biow"); 541 hammer2_xop_start(&xop->head, hammer2_strategy_xop_write); 542 /* asynchronous completion */ 543 544 hammer2_lwinprog_wait(pmp, hammer2_flush_pipe); 545 546 return(0); 547 } 548 549 /* 550 * Per-node XOP (threaded). Write the logical buffer to the media. 551 * 552 * This is a bit problematic because there may be multiple target and 553 * any of them may be able to release the bp. In addition, if our 554 * particulr target is offline we don't want to block the bp (and thus 555 * the frontend). To accomplish this we copy the data to the per-thr 556 * scratch buffer. 557 */ 558 static 559 void 560 hammer2_strategy_xop_write(hammer2_thread_t *thr, hammer2_xop_t *arg) 561 { 562 hammer2_xop_strategy_t *xop = &arg->xop_strategy; 563 hammer2_chain_t *parent; 564 hammer2_key_t lbase; 565 hammer2_inode_t *ip; 566 struct bio *bio; 567 struct buf *bp; 568 int error; 569 int lblksize; 570 int pblksize; 571 hammer2_off_t bio_offset; 572 char *bio_data; 573 574 /* 575 * We can only access the bp/bio if the frontend has not yet 576 * completed. 577 */ 578 if (xop->finished) 579 return; 580 hammer2_mtx_sh(&xop->lock); 581 if (xop->finished) { 582 hammer2_mtx_unlock(&xop->lock); 583 return; 584 } 585 586 lbase = xop->lbase; 587 bio = xop->bio; /* ephermal */ 588 bp = bio->bio_buf; /* ephermal */ 589 ip = xop->head.ip1; /* retained by ref */ 590 bio_offset = bio->bio_offset; 591 bio_data = thr->scratch; 592 593 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */ 594 595 lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL); 596 pblksize = hammer2_calc_physical(ip, lbase); 597 bkvasync(bp); 598 bcopy(bp->b_data, bio_data, lblksize); 599 600 hammer2_mtx_unlock(&xop->lock); 601 bp = NULL; /* safety, illegal to access after unlock */ 602 bio = NULL; /* safety, illegal to access after unlock */ 603 604 /* 605 * Actual operation 606 */ 607 parent = hammer2_inode_chain(ip, thr->clindex, HAMMER2_RESOLVE_ALWAYS); 608 hammer2_write_file_core(bio_data, ip, &parent, 609 lbase, IO_ASYNC, pblksize, 610 xop->head.mtid, &error); 611 if (parent) { 612 hammer2_chain_unlock(parent); 613 hammer2_chain_drop(parent); 614 parent = NULL; /* safety */ 615 } 616 hammer2_xop_feed(&xop->head, NULL, thr->clindex, error); 617 618 /* 619 * Try to complete the operation on behalf of the front-end. 620 */ 621 if (xop->finished) 622 return; 623 hammer2_mtx_ex(&xop->lock); 624 if (xop->finished) { 625 hammer2_mtx_unlock(&xop->lock); 626 return; 627 } 628 629 /* 630 * Async operation has not completed and we now own the lock. 631 * Determine if we can complete the operation by issuing the 632 * frontend collection non-blocking. 633 * 634 * H2 double-buffers the data, setting B_NOTMETA on the logical 635 * buffer hints to the OS that the logical buffer should not be 636 * swapcached (since the device buffer can be). 637 */ 638 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT); 639 640 if (error == HAMMER2_ERROR_EINPROGRESS) { 641 hammer2_mtx_unlock(&xop->lock); 642 return; 643 } 644 645 /* 646 * Async operation has completed. 647 */ 648 xop->finished = 1; 649 hammer2_mtx_unlock(&xop->lock); 650 651 bio = xop->bio; /* now owned by us */ 652 bp = bio->bio_buf; /* now owned by us */ 653 654 if (error == HAMMER2_ERROR_ENOENT || error == 0) { 655 bp->b_flags |= B_NOTMETA; 656 bp->b_resid = 0; 657 bp->b_error = 0; 658 biodone(bio); 659 } else { 660 kprintf("strategy_xop_write: error %d loff=%016jx\n", 661 error, bp->b_loffset); 662 bp->b_flags |= B_ERROR; 663 bp->b_error = EIO; 664 biodone(bio); 665 } 666 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 667 hammer2_trans_assert_strategy(ip->pmp); 668 hammer2_lwinprog_drop(ip->pmp); 669 hammer2_trans_done(ip->pmp); 670 } 671 672 /* 673 * Wait for pending I/O to complete 674 */ 675 void 676 hammer2_bioq_sync(hammer2_pfs_t *pmp) 677 { 678 hammer2_lwinprog_wait(pmp, 0); 679 } 680 681 /* 682 * Assign physical storage at (cparent, lbase), returning a suitable chain 683 * and setting *errorp appropriately. 684 * 685 * If no error occurs, the returned chain will be in a modified state. 686 * 687 * If an error occurs, the returned chain may or may not be NULL. If 688 * not-null any chain->error (if not 0) will also be rolled up into *errorp. 689 * So the caller only needs to test *errorp. 690 * 691 * cparent can wind up being anything. 692 * 693 * If datap is not NULL, *datap points to the real data we intend to write. 694 * If we can dedup the storage location we set *datap to NULL to indicate 695 * to the caller that a dedup occurred. 696 * 697 * NOTE: Special case for data embedded in inode. 698 */ 699 static 700 hammer2_chain_t * 701 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp, 702 hammer2_key_t lbase, int pblksize, 703 hammer2_tid_t mtid, char **datap, int *errorp) 704 { 705 hammer2_chain_t *chain; 706 hammer2_key_t key_dummy; 707 hammer2_off_t dedup_off; 708 int pradix = hammer2_getradix(pblksize); 709 710 /* 711 * Locate the chain associated with lbase, return a locked chain. 712 * However, do not instantiate any data reference (which utilizes a 713 * device buffer) because we will be using direct IO via the 714 * logical buffer cache buffer. 715 */ 716 KKASSERT(pblksize >= HAMMER2_ALLOC_MIN); 717 718 chain = hammer2_chain_lookup(parentp, &key_dummy, 719 lbase, lbase, 720 errorp, 721 HAMMER2_LOOKUP_NODATA); 722 723 /* 724 * The lookup code should not return a DELETED chain to us, unless 725 * its a short-file embedded in the inode. Then it is possible for 726 * the lookup to return a deleted inode. 727 */ 728 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED) && 729 chain->bref.type != HAMMER2_BREF_TYPE_INODE) { 730 kprintf("assign physical deleted chain @ " 731 "%016jx (%016jx.%02x) ip %016jx\n", 732 lbase, chain->bref.data_off, chain->bref.type, 733 ip->meta.inum); 734 Debugger("bleh"); 735 } 736 737 if (chain == NULL) { 738 /* 739 * We found a hole, create a new chain entry. 740 * 741 * NOTE: DATA chains are created without device backing 742 * store (nor do we want any). 743 */ 744 dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap, 745 pblksize); 746 *errorp |= hammer2_chain_create(parentp, &chain, 747 ip->pmp, 748 HAMMER2_ENC_CHECK(ip->meta.check_algo) | 749 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE), 750 lbase, HAMMER2_PBUFRADIX, 751 HAMMER2_BREF_TYPE_DATA, 752 pblksize, mtid, 753 dedup_off, 0); 754 if (chain == NULL) 755 goto failed; 756 /*ip->delta_dcount += pblksize;*/ 757 } else if (chain->error == 0) { 758 switch (chain->bref.type) { 759 case HAMMER2_BREF_TYPE_INODE: 760 /* 761 * The data is embedded in the inode, which requires 762 * a bit more finess. 763 */ 764 *errorp |= hammer2_chain_modify_ip(ip, chain, mtid, 0); 765 break; 766 case HAMMER2_BREF_TYPE_DATA: 767 dedup_off = hammer2_dedup_lookup(chain->hmp, datap, 768 pblksize); 769 if (chain->bytes != pblksize) { 770 *errorp |= hammer2_chain_resize(chain, 771 mtid, dedup_off, 772 pradix, 773 HAMMER2_MODIFY_OPTDATA); 774 if (*errorp) 775 break; 776 } 777 778 /* 779 * DATA buffers must be marked modified whether the 780 * data is in a logical buffer or not. We also have 781 * to make this call to fixup the chain data pointers 782 * after resizing in case this is an encrypted or 783 * compressed buffer. 784 */ 785 *errorp |= hammer2_chain_modify(chain, mtid, dedup_off, 786 HAMMER2_MODIFY_OPTDATA); 787 break; 788 default: 789 panic("hammer2_assign_physical: bad type"); 790 /* NOT REACHED */ 791 break; 792 } 793 } else { 794 *errorp = chain->error; 795 } 796 failed: 797 return (chain); 798 } 799 800 /* 801 * hammer2_write_file_core() - hammer2_write_thread() helper 802 * 803 * The core write function which determines which path to take 804 * depending on compression settings. We also have to locate the 805 * related chains so we can calculate and set the check data for 806 * the blockref. 807 */ 808 static 809 void 810 hammer2_write_file_core(char *data, hammer2_inode_t *ip, 811 hammer2_chain_t **parentp, 812 hammer2_key_t lbase, int ioflag, int pblksize, 813 hammer2_tid_t mtid, int *errorp) 814 { 815 hammer2_chain_t *chain; 816 char *bdata; 817 818 *errorp = 0; 819 820 switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) { 821 case HAMMER2_COMP_NONE: 822 /* 823 * We have to assign physical storage to the buffer 824 * we intend to dirty or write now to avoid deadlocks 825 * in the strategy code later. 826 * 827 * This can return NOOFFSET for inode-embedded data. 828 * The strategy code will take care of it in that case. 829 */ 830 bdata = data; 831 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize, 832 mtid, &bdata, errorp); 833 if (*errorp) { 834 /* skip modifications */ 835 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 836 hammer2_inode_data_t *wipdata; 837 838 wipdata = &chain->data->ipdata; 839 KKASSERT(wipdata->meta.op_flags & 840 HAMMER2_OPFLAG_DIRECTDATA); 841 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES); 842 ++hammer2_iod_file_wembed; 843 } else if (bdata == NULL) { 844 /* 845 * Copy of data already present on-media. 846 */ 847 chain->bref.methods = 848 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) + 849 HAMMER2_ENC_CHECK(ip->meta.check_algo); 850 hammer2_chain_setcheck(chain, data); 851 } else { 852 hammer2_write_bp(chain, data, ioflag, pblksize, 853 mtid, errorp, ip->meta.check_algo); 854 } 855 if (chain) { 856 hammer2_chain_unlock(chain); 857 hammer2_chain_drop(chain); 858 } 859 break; 860 case HAMMER2_COMP_AUTOZERO: 861 /* 862 * Check for zero-fill only 863 */ 864 hammer2_zero_check_and_write(data, ip, parentp, 865 lbase, ioflag, pblksize, 866 mtid, errorp, 867 ip->meta.check_algo); 868 break; 869 case HAMMER2_COMP_LZ4: 870 case HAMMER2_COMP_ZLIB: 871 default: 872 /* 873 * Check for zero-fill and attempt compression. 874 */ 875 hammer2_compress_and_write(data, ip, parentp, 876 lbase, ioflag, pblksize, 877 mtid, errorp, 878 ip->meta.comp_algo, 879 ip->meta.check_algo); 880 break; 881 } 882 } 883 884 /* 885 * Helper 886 * 887 * Generic function that will perform the compression in compression 888 * write path. The compression algorithm is determined by the settings 889 * obtained from inode. 890 */ 891 static 892 void 893 hammer2_compress_and_write(char *data, hammer2_inode_t *ip, 894 hammer2_chain_t **parentp, 895 hammer2_key_t lbase, int ioflag, int pblksize, 896 hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo) 897 { 898 hammer2_chain_t *chain; 899 int comp_size; 900 int comp_block_size; 901 char *comp_buffer; 902 char *bdata; 903 904 /* 905 * An all-zeros write creates a hole unless the check code 906 * is disabled. When the check code is disabled all writes 907 * are done in-place, including any all-zeros writes. 908 * 909 * NOTE: A snapshot will still force a copy-on-write 910 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c). 911 */ 912 if (check_algo != HAMMER2_CHECK_NONE && 913 test_block_zeros(data, pblksize)) { 914 zero_write(data, ip, parentp, lbase, mtid, errorp); 915 return; 916 } 917 918 /* 919 * Compression requested. Try to compress the block. We store 920 * the data normally if we cannot sufficiently compress it. 921 * 922 * We have a heuristic to detect files which are mostly 923 * uncompressable and avoid the compression attempt in that 924 * case. If the compression heuristic is turned off, we always 925 * try to compress. 926 */ 927 comp_size = 0; 928 comp_buffer = NULL; 929 930 KKASSERT(pblksize / 2 <= 32768); 931 932 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0 || 933 hammer2_always_compress) { 934 z_stream strm_compress; 935 int comp_level; 936 int ret; 937 938 switch(HAMMER2_DEC_ALGO(comp_algo)) { 939 case HAMMER2_COMP_LZ4: 940 /* 941 * We need to prefix with the size, LZ4 942 * doesn't do it for us. Add the related 943 * overhead. 944 * 945 * NOTE: The LZ4 code seems to assume at least an 946 * 8-byte buffer size granularity and may 947 * overrun the buffer if given a 4-byte 948 * granularity. 949 */ 950 comp_buffer = objcache_get(cache_buffer_write, 951 M_INTWAIT); 952 comp_size = LZ4_compress_limitedOutput( 953 data, 954 &comp_buffer[sizeof(int)], 955 pblksize, 956 pblksize / 2 - sizeof(int64_t)); 957 *(int *)comp_buffer = comp_size; 958 if (comp_size) 959 comp_size += sizeof(int); 960 break; 961 case HAMMER2_COMP_ZLIB: 962 comp_level = HAMMER2_DEC_LEVEL(comp_algo); 963 if (comp_level == 0) 964 comp_level = 6; /* default zlib compression */ 965 else if (comp_level < 6) 966 comp_level = 6; 967 else if (comp_level > 9) 968 comp_level = 9; 969 ret = deflateInit(&strm_compress, comp_level); 970 if (ret != Z_OK) { 971 kprintf("HAMMER2 ZLIB: fatal error " 972 "on deflateInit.\n"); 973 } 974 975 comp_buffer = objcache_get(cache_buffer_write, 976 M_INTWAIT); 977 strm_compress.next_in = data; 978 strm_compress.avail_in = pblksize; 979 strm_compress.next_out = comp_buffer; 980 strm_compress.avail_out = pblksize / 2; 981 ret = deflate(&strm_compress, Z_FINISH); 982 if (ret == Z_STREAM_END) { 983 comp_size = pblksize / 2 - 984 strm_compress.avail_out; 985 } else { 986 comp_size = 0; 987 } 988 ret = deflateEnd(&strm_compress); 989 break; 990 default: 991 kprintf("Error: Unknown compression method.\n"); 992 kprintf("Comp_method = %d.\n", comp_algo); 993 break; 994 } 995 } 996 997 if (comp_size == 0) { 998 /* 999 * compression failed or turned off 1000 */ 1001 comp_block_size = pblksize; /* safety */ 1002 if (++ip->comp_heuristic > 128) 1003 ip->comp_heuristic = 8; 1004 } else { 1005 /* 1006 * compression succeeded 1007 */ 1008 ip->comp_heuristic = 0; 1009 if (comp_size <= 1024) { 1010 comp_block_size = 1024; 1011 } else if (comp_size <= 2048) { 1012 comp_block_size = 2048; 1013 } else if (comp_size <= 4096) { 1014 comp_block_size = 4096; 1015 } else if (comp_size <= 8192) { 1016 comp_block_size = 8192; 1017 } else if (comp_size <= 16384) { 1018 comp_block_size = 16384; 1019 } else if (comp_size <= 32768) { 1020 comp_block_size = 32768; 1021 } else { 1022 panic("hammer2: WRITE PATH: " 1023 "Weird comp_size value."); 1024 /* NOT REACHED */ 1025 comp_block_size = pblksize; 1026 } 1027 1028 /* 1029 * Must zero the remainder or dedup (which operates on a 1030 * physical block basis) will not find matches. 1031 */ 1032 if (comp_size < comp_block_size) { 1033 bzero(comp_buffer + comp_size, 1034 comp_block_size - comp_size); 1035 } 1036 } 1037 1038 /* 1039 * Assign physical storage, data will be set to NULL if a live-dedup 1040 * was successful. 1041 */ 1042 bdata = comp_size ? comp_buffer : data; 1043 chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size, 1044 mtid, &bdata, errorp); 1045 1046 if (*errorp) { 1047 goto done; 1048 } 1049 1050 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 1051 hammer2_inode_data_t *wipdata; 1052 1053 *errorp = hammer2_chain_modify_ip(ip, chain, mtid, 0); 1054 if (*errorp == 0) { 1055 wipdata = &chain->data->ipdata; 1056 KKASSERT(wipdata->meta.op_flags & 1057 HAMMER2_OPFLAG_DIRECTDATA); 1058 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES); 1059 ++hammer2_iod_file_wembed; 1060 } 1061 } else if (bdata == NULL) { 1062 /* 1063 * Live deduplication, a copy of the data is already present 1064 * on the media. 1065 */ 1066 if (comp_size) { 1067 chain->bref.methods = 1068 HAMMER2_ENC_COMP(comp_algo) + 1069 HAMMER2_ENC_CHECK(check_algo); 1070 } else { 1071 chain->bref.methods = 1072 HAMMER2_ENC_COMP( 1073 HAMMER2_COMP_NONE) + 1074 HAMMER2_ENC_CHECK(check_algo); 1075 } 1076 bdata = comp_size ? comp_buffer : data; 1077 hammer2_chain_setcheck(chain, bdata); 1078 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 1079 } else { 1080 hammer2_io_t *dio; 1081 1082 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED); 1083 1084 switch(chain->bref.type) { 1085 case HAMMER2_BREF_TYPE_INODE: 1086 panic("hammer2_write_bp: unexpected inode\n"); 1087 break; 1088 case HAMMER2_BREF_TYPE_DATA: 1089 /* 1090 * Optimize out the read-before-write 1091 * if possible. 1092 */ 1093 *errorp = hammer2_io_newnz(chain->hmp, 1094 chain->bref.type, 1095 chain->bref.data_off, 1096 chain->bytes, 1097 &dio); 1098 if (*errorp) { 1099 hammer2_io_brelse(&dio); 1100 kprintf("hammer2: WRITE PATH: " 1101 "dbp bread error\n"); 1102 break; 1103 } 1104 bdata = hammer2_io_data(dio, chain->bref.data_off); 1105 1106 /* 1107 * When loading the block make sure we don't 1108 * leave garbage after the compressed data. 1109 */ 1110 if (comp_size) { 1111 chain->bref.methods = 1112 HAMMER2_ENC_COMP(comp_algo) + 1113 HAMMER2_ENC_CHECK(check_algo); 1114 bcopy(comp_buffer, bdata, comp_size); 1115 } else { 1116 chain->bref.methods = 1117 HAMMER2_ENC_COMP( 1118 HAMMER2_COMP_NONE) + 1119 HAMMER2_ENC_CHECK(check_algo); 1120 bcopy(data, bdata, pblksize); 1121 } 1122 1123 /* 1124 * The flush code doesn't calculate check codes for 1125 * file data (doing so can result in excessive I/O), 1126 * so we do it here. 1127 */ 1128 hammer2_chain_setcheck(chain, bdata); 1129 1130 /* 1131 * Device buffer is now valid, chain is no longer in 1132 * the initial state. 1133 * 1134 * (No blockref table worries with file data) 1135 */ 1136 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 1137 hammer2_dedup_record(chain, dio, bdata); 1138 1139 /* Now write the related bdp. */ 1140 if (ioflag & IO_SYNC) { 1141 /* 1142 * Synchronous I/O requested. 1143 */ 1144 hammer2_io_bwrite(&dio); 1145 /* 1146 } else if ((ioflag & IO_DIRECT) && 1147 loff + n == pblksize) { 1148 hammer2_io_bdwrite(&dio); 1149 */ 1150 } else if (ioflag & IO_ASYNC) { 1151 hammer2_io_bawrite(&dio); 1152 } else { 1153 hammer2_io_bdwrite(&dio); 1154 } 1155 break; 1156 default: 1157 panic("hammer2_write_bp: bad chain type %d\n", 1158 chain->bref.type); 1159 /* NOT REACHED */ 1160 break; 1161 } 1162 } 1163 done: 1164 if (chain) { 1165 hammer2_chain_unlock(chain); 1166 hammer2_chain_drop(chain); 1167 } 1168 if (comp_buffer) 1169 objcache_put(cache_buffer_write, comp_buffer); 1170 } 1171 1172 /* 1173 * Helper 1174 * 1175 * Function that performs zero-checking and writing without compression, 1176 * it corresponds to default zero-checking path. 1177 */ 1178 static 1179 void 1180 hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip, 1181 hammer2_chain_t **parentp, 1182 hammer2_key_t lbase, int ioflag, int pblksize, 1183 hammer2_tid_t mtid, int *errorp, 1184 int check_algo) 1185 { 1186 hammer2_chain_t *chain; 1187 char *bdata; 1188 1189 if (check_algo != HAMMER2_CHECK_NONE && 1190 test_block_zeros(data, pblksize)) { 1191 /* 1192 * An all-zeros write creates a hole unless the check code 1193 * is disabled. When the check code is disabled all writes 1194 * are done in-place, including any all-zeros writes. 1195 * 1196 * NOTE: A snapshot will still force a copy-on-write 1197 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c). 1198 */ 1199 zero_write(data, ip, parentp, lbase, mtid, errorp); 1200 } else { 1201 /* 1202 * Normal write 1203 */ 1204 bdata = data; 1205 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize, 1206 mtid, &bdata, errorp); 1207 if (*errorp) { 1208 /* do nothing */ 1209 } else if (bdata) { 1210 hammer2_write_bp(chain, data, ioflag, pblksize, 1211 mtid, errorp, check_algo); 1212 } else { 1213 /* dedup occurred */ 1214 chain->bref.methods = 1215 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) + 1216 HAMMER2_ENC_CHECK(check_algo); 1217 hammer2_chain_setcheck(chain, data); 1218 } 1219 if (chain) { 1220 hammer2_chain_unlock(chain); 1221 hammer2_chain_drop(chain); 1222 } 1223 } 1224 } 1225 1226 /* 1227 * Helper 1228 * 1229 * A function to test whether a block of data contains only zeros, 1230 * returns TRUE (non-zero) if the block is all zeros. 1231 */ 1232 static 1233 int 1234 test_block_zeros(const char *buf, size_t bytes) 1235 { 1236 size_t i; 1237 1238 for (i = 0; i < bytes; i += sizeof(long)) { 1239 if (*(const long *)(buf + i) != 0) 1240 return (0); 1241 } 1242 return (1); 1243 } 1244 1245 /* 1246 * Helper 1247 * 1248 * Function to "write" a block that contains only zeros. 1249 */ 1250 static 1251 void 1252 zero_write(char *data, hammer2_inode_t *ip, 1253 hammer2_chain_t **parentp, 1254 hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp) 1255 { 1256 hammer2_chain_t *chain; 1257 hammer2_key_t key_dummy; 1258 1259 chain = hammer2_chain_lookup(parentp, &key_dummy, 1260 lbase, lbase, 1261 errorp, 1262 HAMMER2_LOOKUP_NODATA); 1263 if (chain) { 1264 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) { 1265 hammer2_inode_data_t *wipdata; 1266 1267 if (*errorp == 0) { 1268 *errorp = hammer2_chain_modify_ip(ip, chain, 1269 mtid, 0); 1270 } 1271 if (*errorp == 0) { 1272 wipdata = &chain->data->ipdata; 1273 KKASSERT(wipdata->meta.op_flags & 1274 HAMMER2_OPFLAG_DIRECTDATA); 1275 bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES); 1276 ++hammer2_iod_file_wembed; 1277 } 1278 } else { 1279 /* chain->error ok for deletion */ 1280 hammer2_chain_delete(*parentp, chain, 1281 mtid, HAMMER2_DELETE_PERMANENT); 1282 ++hammer2_iod_file_wzero; 1283 } 1284 hammer2_chain_unlock(chain); 1285 hammer2_chain_drop(chain); 1286 } else { 1287 ++hammer2_iod_file_wzero; 1288 } 1289 } 1290 1291 /* 1292 * Helper 1293 * 1294 * Function to write the data as it is, without performing any sort of 1295 * compression. This function is used in path without compression and 1296 * default zero-checking path. 1297 */ 1298 static 1299 void 1300 hammer2_write_bp(hammer2_chain_t *chain, char *data, int ioflag, 1301 int pblksize, 1302 hammer2_tid_t mtid, int *errorp, int check_algo) 1303 { 1304 hammer2_inode_data_t *wipdata; 1305 hammer2_io_t *dio; 1306 char *bdata; 1307 int error; 1308 1309 error = 0; /* XXX TODO below */ 1310 1311 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED); 1312 1313 switch(chain->bref.type) { 1314 case HAMMER2_BREF_TYPE_INODE: 1315 wipdata = &chain->data->ipdata; 1316 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA); 1317 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES); 1318 error = 0; 1319 ++hammer2_iod_file_wembed; 1320 break; 1321 case HAMMER2_BREF_TYPE_DATA: 1322 error = hammer2_io_newnz(chain->hmp, 1323 chain->bref.type, 1324 chain->bref.data_off, 1325 chain->bytes, &dio); 1326 if (error) { 1327 hammer2_io_bqrelse(&dio); 1328 kprintf("hammer2: WRITE PATH: " 1329 "dbp bread error\n"); 1330 break; 1331 } 1332 bdata = hammer2_io_data(dio, chain->bref.data_off); 1333 1334 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) + 1335 HAMMER2_ENC_CHECK(check_algo); 1336 bcopy(data, bdata, chain->bytes); 1337 1338 /* 1339 * The flush code doesn't calculate check codes for 1340 * file data (doing so can result in excessive I/O), 1341 * so we do it here. 1342 */ 1343 hammer2_chain_setcheck(chain, bdata); 1344 1345 /* 1346 * Device buffer is now valid, chain is no longer in 1347 * the initial state. 1348 * 1349 * (No blockref table worries with file data) 1350 */ 1351 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL); 1352 hammer2_dedup_record(chain, dio, bdata); 1353 1354 if (ioflag & IO_SYNC) { 1355 /* 1356 * Synchronous I/O requested. 1357 */ 1358 hammer2_io_bwrite(&dio); 1359 /* 1360 } else if ((ioflag & IO_DIRECT) && 1361 loff + n == pblksize) { 1362 hammer2_io_bdwrite(&dio); 1363 */ 1364 } else if (ioflag & IO_ASYNC) { 1365 hammer2_io_bawrite(&dio); 1366 } else { 1367 hammer2_io_bdwrite(&dio); 1368 } 1369 break; 1370 default: 1371 panic("hammer2_write_bp: bad chain type %d\n", 1372 chain->bref.type); 1373 /* NOT REACHED */ 1374 error = 0; 1375 break; 1376 } 1377 *errorp = error; 1378 } 1379 1380 /* 1381 * LIVE DEDUP HEURISTICS 1382 * 1383 * Record media and crc information for possible dedup operation. Note 1384 * that the dedup mask bits must also be set in the related DIO for a dedup 1385 * to be fully validated (which is handled in the freemap allocation code). 1386 * 1387 * WARNING! This code is SMP safe but the heuristic allows SMP collisions. 1388 * All fields must be loaded into locals and validated. 1389 * 1390 * WARNING! Should only be used for file data and directory entries, 1391 * hammer2_chain_modify() only checks for the dedup case on data 1392 * chains. Also, dedup data can only be recorded for committed 1393 * chains (so NOT strategy writes which can undergo further 1394 * modification after the fact!). 1395 */ 1396 void 1397 hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio, char *data) 1398 { 1399 hammer2_dev_t *hmp; 1400 hammer2_dedup_t *dedup; 1401 uint64_t crc; 1402 uint64_t mask; 1403 int best = 0; 1404 int i; 1405 int dticks; 1406 1407 /* 1408 * We can only record a dedup if we have media data to test against. 1409 * If dedup is not enabled, return early, which allows a chain to 1410 * remain marked MODIFIED (which might have benefits in special 1411 * situations, though typically it does not). 1412 */ 1413 if (hammer2_dedup_enable == 0) 1414 return; 1415 if (dio == NULL) { 1416 dio = chain->dio; 1417 if (dio == NULL) 1418 return; 1419 } 1420 1421 hmp = chain->hmp; 1422 1423 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) { 1424 case HAMMER2_CHECK_ISCSI32: 1425 /* 1426 * XXX use the built-in crc (the dedup lookup sequencing 1427 * needs to be fixed so the check code is already present 1428 * when dedup_lookup is called) 1429 */ 1430 #if 0 1431 crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value; 1432 #endif 1433 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED); 1434 break; 1435 case HAMMER2_CHECK_XXHASH64: 1436 crc = chain->bref.check.xxhash64.value; 1437 break; 1438 case HAMMER2_CHECK_SHA192: 1439 /* 1440 * XXX use the built-in crc (the dedup lookup sequencing 1441 * needs to be fixed so the check code is already present 1442 * when dedup_lookup is called) 1443 */ 1444 #if 0 1445 crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^ 1446 ((uint64_t *)chain->bref.check.sha192.data)[1] ^ 1447 ((uint64_t *)chain->bref.check.sha192.data)[2]; 1448 #endif 1449 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED); 1450 break; 1451 default: 1452 /* 1453 * Cannot dedup without a check code 1454 * 1455 * NOTE: In particular, CHECK_NONE allows a sector to be 1456 * overwritten without copy-on-write, recording 1457 * a dedup block for a CHECK_NONE object would be 1458 * a disaster! 1459 */ 1460 return; 1461 } 1462 1463 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUPABLE); 1464 1465 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)]; 1466 for (i = 0; i < 4; ++i) { 1467 if (dedup[i].data_crc == crc) { 1468 best = i; 1469 break; 1470 } 1471 dticks = (int)(dedup[i].ticks - dedup[best].ticks); 1472 if (dticks < 0 || dticks > hz * 60 * 30) 1473 best = i; 1474 } 1475 dedup += best; 1476 if (hammer2_debug & 0x40000) { 1477 kprintf("REC %04x %016jx %016jx\n", 1478 (int)(dedup - hmp->heur_dedup), 1479 crc, 1480 chain->bref.data_off); 1481 } 1482 dedup->ticks = ticks; 1483 dedup->data_off = chain->bref.data_off; 1484 dedup->data_crc = crc; 1485 1486 /* 1487 * Set the valid bits for the dedup only after we know the data 1488 * buffer has been updated. The alloc bits were set (and the valid 1489 * bits cleared) when the media was allocated. 1490 * 1491 * This is done in two stages becuase the bulkfree code can race 1492 * the gap between allocation and data population. Both masks must 1493 * be set before a bcmp/dedup operation is able to use the block. 1494 */ 1495 mask = hammer2_dedup_mask(dio, chain->bref.data_off, chain->bytes); 1496 atomic_set_64(&dio->dedup_valid, mask); 1497 1498 #if 0 1499 /* 1500 * XXX removed. MODIFIED is an integral part of the flush code, 1501 * lets not just clear it 1502 */ 1503 /* 1504 * Once we record the dedup the chain must be marked clean to 1505 * prevent reuse of the underlying block. Remember that this 1506 * write occurs when the buffer cache is flushed (i.e. on sync(), 1507 * fsync(), filesystem periodic sync, or when the kernel needs to 1508 * flush a buffer), and not whenever the user write()s. 1509 */ 1510 if (chain->flags & HAMMER2_CHAIN_MODIFIED) { 1511 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED); 1512 atomic_add_long(&hammer2_count_modified_chains, -1); 1513 if (chain->pmp) 1514 hammer2_pfs_memory_wakeup(chain->pmp); 1515 } 1516 #endif 1517 } 1518 1519 static 1520 hammer2_off_t 1521 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize) 1522 { 1523 hammer2_dedup_t *dedup; 1524 hammer2_io_t *dio; 1525 hammer2_off_t off; 1526 uint64_t crc; 1527 uint64_t mask; 1528 char *data; 1529 char *dtmp; 1530 int i; 1531 1532 if (hammer2_dedup_enable == 0) 1533 return 0; 1534 data = *datap; 1535 if (data == NULL) 1536 return 0; 1537 1538 /* 1539 * XXX use the built-in crc (the dedup lookup sequencing 1540 * needs to be fixed so the check code is already present 1541 * when dedup_lookup is called) 1542 */ 1543 crc = XXH64(data, pblksize, XXH_HAMMER2_SEED); 1544 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)]; 1545 1546 if (hammer2_debug & 0x40000) { 1547 kprintf("LOC %04x/4 %016jx\n", 1548 (int)(dedup - hmp->heur_dedup), 1549 crc); 1550 } 1551 1552 for (i = 0; i < 4; ++i) { 1553 off = dedup[i].data_off; 1554 cpu_ccfence(); 1555 if (dedup[i].data_crc != crc) 1556 continue; 1557 if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize) 1558 continue; 1559 dio = hammer2_io_getquick(hmp, off, pblksize); 1560 if (dio) { 1561 dtmp = hammer2_io_data(dio, off), 1562 mask = hammer2_dedup_mask(dio, off, pblksize); 1563 if ((dio->dedup_alloc & mask) == mask && 1564 (dio->dedup_valid & mask) == mask && 1565 bcmp(data, dtmp, pblksize) == 0) { 1566 if (hammer2_debug & 0x40000) { 1567 kprintf("DEDUP SUCCESS %016jx\n", 1568 (intmax_t)off); 1569 } 1570 hammer2_io_putblk(&dio); 1571 *datap = NULL; 1572 dedup[i].ticks = ticks; /* update use */ 1573 atomic_add_long(&hammer2_iod_file_wdedup, 1574 pblksize); 1575 1576 return off; /* RETURN */ 1577 } 1578 hammer2_io_putblk(&dio); 1579 } 1580 } 1581 return 0; 1582 } 1583 1584 /* 1585 * Poof. Races are ok, if someone gets in and reuses a dedup offset 1586 * before or while we are clearing it they will also recover the freemap 1587 * entry (set it to fully allocated), so a bulkfree race can only set it 1588 * to a possibly-free state. 1589 * 1590 * XXX ok, well, not really sure races are ok but going to run with it 1591 * for the moment. 1592 */ 1593 void 1594 hammer2_dedup_clear(hammer2_dev_t *hmp) 1595 { 1596 int i; 1597 1598 for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) { 1599 hmp->heur_dedup[i].data_off = 0; 1600 hmp->heur_dedup[i].ticks = ticks - 1; 1601 } 1602 } 1603