1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org>
5 * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Matthew Dillon <dillon@dragonflybsd.org>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37 /*
38 * This module handles low level logical file I/O (strategy) which backs
39 * the logical buffer cache.
40 *
41 * [De]compression, zero-block, check codes, and buffer cache operations
42 * for file data is handled here.
43 *
44 * Live dedup makes its home here as well.
45 */
46
47 /*
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/buf.h>
52 #include <sys/proc.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/objcache.h>
56 */
57
58 #include "hammer2.h"
59 #include "hammer2_lz4.h"
60
61 #include "zlib/hammer2_zlib.h"
62
63 /*
64 struct objcache *cache_buffer_read;
65 struct objcache *cache_buffer_write;
66 */
67
68 /*
69 * Strategy code (async logical file buffer I/O from system)
70 *
71 * Except for the transaction init (which should normally not block),
72 * we essentially run the strategy operation asynchronously via a XOP.
73 *
74 * WARNING! The XOP deals with buffer synchronization. It is not synchronized
75 * to the current cpu.
76 *
77 * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync()
78 * calls but it has in the past when multiple flushes are queued.
79 *
80 * XXX We currently terminate the transaction once we get a quorum, otherwise
81 * the frontend can stall, but this can leave the remaining nodes with
82 * a potential flush conflict. We need to delay flushes on those nodes
83 * until running transactions complete separately from the normal
84 * transaction sequencing. FIXME TODO.
85 */
86 static int hammer2_strategy_read(struct vop_strategy_args *ap);
87 static int hammer2_strategy_write(struct vop_strategy_args *ap);
88 static void hammer2_strategy_read_completion(hammer2_chain_t *focus,
89 const char *data, struct bio *bio);
90
91 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp,
92 char **datap, int pblksize);
93
94 int
hammer2_vop_strategy(struct vop_strategy_args * ap)95 hammer2_vop_strategy(struct vop_strategy_args *ap)
96 {
97 struct bio *biop;
98 struct m_buf *bp;
99 int error;
100
101 biop = ap->a_bio;
102 bp = biop->bio_buf;
103
104 switch(bp->b_cmd) {
105 case BUF_CMD_READ:
106 error = hammer2_strategy_read(ap);
107 break;
108 case BUF_CMD_WRITE:
109 error = hammer2_strategy_write(ap);
110 break;
111 default:
112 assert(0);
113 /*
114 bp->b_error = error = EINVAL;
115 bp->b_flags |= B_ERROR;
116 biodone(biop);
117 */
118 break;
119 }
120 return (error);
121 }
122
123 /*
124 * Return the largest contiguous physical disk range for the logical
125 * request, in bytes.
126 *
127 * (struct m_vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
128 *
129 * Basically disabled, the logical buffer write thread has to deal with
130 * buffers one-at-a-time. Note that this should not prevent cluster_read()
131 * from reading-ahead, it simply prevents it from trying form a single
132 * cluster buffer for the logical request. H2 already uses 64KB buffers!
133 */
134 int
hammer2_vop_bmap(struct vop_bmap_args * ap)135 hammer2_vop_bmap(struct vop_bmap_args *ap)
136 {
137 *ap->a_doffsetp = NOOFFSET;
138 if (ap->a_runp)
139 *ap->a_runp = 0;
140 if (ap->a_runb)
141 *ap->a_runb = 0;
142 return (EOPNOTSUPP);
143 }
144
145 /****************************************************************************
146 * READ SUPPORT *
147 ****************************************************************************/
148 /*
149 * Callback used in read path in case that a block is compressed with LZ4.
150 */
151 static
152 void
hammer2_decompress_LZ4_callback(const char * data,u_int bytes,struct bio * bio)153 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
154 {
155 struct m_buf *bp;
156 char *compressed_buffer;
157 int compressed_size;
158 int result;
159
160 bp = bio->bio_buf;
161
162 #if 0
163 if bio->bio_caller_info2.index &&
164 bio->bio_caller_info1.uvalue32 !=
165 crc32(bp->b_data, bp->b_bufsize) --- return error
166 #endif
167
168 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
169 compressed_size = *(const int *)data;
170 KKASSERT((uint32_t)compressed_size <= bytes - sizeof(int));
171
172 compressed_buffer = ecalloc(1, 65536);
173 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
174 compressed_buffer,
175 compressed_size,
176 bp->b_bufsize);
177 if (result < 0) {
178 kprintf("READ PATH: Error during decompression."
179 "bio %016jx/%d\n",
180 (intmax_t)bio->bio_offset, bytes);
181 /* make sure it isn't random garbage */
182 bzero(compressed_buffer, bp->b_bufsize);
183 }
184 KKASSERT(result <= bp->b_bufsize);
185 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
186 if (result < bp->b_bufsize)
187 bzero(bp->b_data + result, bp->b_bufsize - result);
188 free(compressed_buffer);
189 /*
190 bp->b_resid = 0;
191 bp->b_flags |= B_AGE;
192 */
193 }
194
195 /*
196 * Callback used in read path in case that a block is compressed with ZLIB.
197 * It is almost identical to LZ4 callback, so in theory they can be unified,
198 * but we didn't want to make changes in bio structure for that.
199 */
200 static
201 void
hammer2_decompress_ZLIB_callback(const char * data,u_int bytes,struct bio * bio)202 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
203 {
204 struct m_buf *bp;
205 char *compressed_buffer;
206 z_stream strm_decompress;
207 int result;
208 int ret;
209
210 bp = bio->bio_buf;
211
212 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
213 strm_decompress.avail_in = 0;
214 strm_decompress.next_in = Z_NULL;
215
216 ret = inflateInit(&strm_decompress);
217
218 if (ret != Z_OK)
219 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
220
221 compressed_buffer = ecalloc(1, 65536);
222 strm_decompress.next_in = __DECONST(char *, data);
223
224 /* XXX supply proper size, subset of device bp */
225 strm_decompress.avail_in = bytes;
226 strm_decompress.next_out = compressed_buffer;
227 strm_decompress.avail_out = bp->b_bufsize;
228
229 ret = inflate(&strm_decompress, Z_FINISH);
230 if (ret != Z_STREAM_END) {
231 kprintf("HAMMER2 ZLIB: Fatal error during decompression.\n");
232 bzero(compressed_buffer, bp->b_bufsize);
233 }
234 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
235 result = bp->b_bufsize - strm_decompress.avail_out;
236 if (result < bp->b_bufsize)
237 bzero(bp->b_data + result, strm_decompress.avail_out);
238 free(compressed_buffer);
239 ret = inflateEnd(&strm_decompress);
240
241 /*
242 bp->b_resid = 0;
243 bp->b_flags |= B_AGE;
244 */
245 }
246
247 /*
248 * Logical buffer I/O, async read.
249 */
250 static
251 int
hammer2_strategy_read(struct vop_strategy_args * ap)252 hammer2_strategy_read(struct vop_strategy_args *ap)
253 {
254 hammer2_xop_strategy_t *xop;
255 struct bio *bio;
256 hammer2_inode_t *ip;
257 hammer2_key_t lbase;
258
259 bio = ap->a_bio;
260 ip = VTOI(ap->a_vp);
261
262 lbase = bio->bio_offset;
263 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
264
265 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_STRATEGY);
266 xop->finished = 0;
267 xop->bio = bio;
268 xop->lbase = lbase;
269 hammer2_mtx_init(&xop->lock, "h2bior");
270 hammer2_xop_start(&xop->head, &hammer2_strategy_read_desc);
271 /* asynchronous completion */
272
273 return(0);
274 }
275
276 /*
277 * Per-node XOP (threaded), do a synchronous lookup of the chain and
278 * its data. The frontend is asynchronous, so we are also responsible
279 * for racing to terminate the frontend.
280 */
281 void
hammer2_xop_strategy_read(hammer2_xop_t * arg,void * scratch,int clindex)282 hammer2_xop_strategy_read(hammer2_xop_t *arg, void *scratch, int clindex)
283 {
284 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
285 hammer2_chain_t *parent;
286 hammer2_chain_t *chain;
287 hammer2_chain_t *focus;
288 hammer2_key_t key_dummy;
289 hammer2_key_t lbase;
290 struct bio *bio;
291 struct m_buf *bp;
292 const char *data;
293 int error;
294
295 /*
296 * Note that we can race completion of the bio supplied by
297 * the front-end so we cannot access it until we determine
298 * that we are the ones finishing it up.
299 */
300 lbase = xop->lbase;
301
302 /*
303 * This is difficult to optimize. The logical buffer might be
304 * partially dirty (contain dummy zero-fill pages), which would
305 * mess up our crc calculation if we were to try a direct read.
306 * So for now we always double-buffer through the underlying
307 * storage.
308 *
309 * If not for the above problem we could conditionalize on
310 * (1) 64KB buffer, (2) one chain (not multi-master) and
311 * (3) !hammer2_double_buffer, and issue a direct read into the
312 * logical buffer.
313 */
314 parent = hammer2_inode_chain(xop->head.ip1, clindex,
315 HAMMER2_RESOLVE_ALWAYS |
316 HAMMER2_RESOLVE_SHARED);
317 if (parent) {
318 chain = hammer2_chain_lookup(&parent, &key_dummy,
319 lbase, lbase,
320 &error,
321 HAMMER2_LOOKUP_ALWAYS |
322 HAMMER2_LOOKUP_SHARED);
323 if (chain)
324 error = chain->error;
325 } else {
326 error = HAMMER2_ERROR_EIO;
327 chain = NULL;
328 }
329 error = hammer2_xop_feed(&xop->head, chain, clindex, error);
330 if (chain) {
331 hammer2_chain_unlock(chain);
332 hammer2_chain_drop(chain);
333 }
334 if (parent) {
335 hammer2_chain_unlock(parent);
336 hammer2_chain_drop(parent);
337 }
338 chain = NULL; /* safety */
339 parent = NULL; /* safety */
340
341 /*
342 * Race to finish the frontend. First-to-complete. bio is only
343 * valid if we are determined to be the ones able to complete
344 * the operation.
345 */
346 if (xop->finished)
347 return;
348 hammer2_mtx_ex(&xop->lock);
349 if (xop->finished) {
350 hammer2_mtx_unlock(&xop->lock);
351 return;
352 }
353 bio = xop->bio;
354 bp = bio->bio_buf;
355 bkvasync(bp);
356
357 /*
358 * Async operation has not completed and we now own the lock.
359 * Determine if we can complete the operation by issuing the
360 * frontend collection non-blocking.
361 *
362 * H2 double-buffers the data, setting B_NOTMETA on the logical
363 * buffer hints to the OS that the logical buffer should not be
364 * swapcached (since the device buffer can be).
365 *
366 * Also note that even for compressed data we would rather the
367 * kernel cache/swapcache device buffers more and (decompressed)
368 * logical buffers less, since that will significantly improve
369 * the amount of end-user data that can be cached.
370 *
371 * NOTE: The chain->data for xop->head.cluster.focus will be
372 * synchronized to the current cpu by xop_collect(),
373 * but other chains in the cluster might not be.
374 */
375 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
376
377 switch(error) {
378 case 0:
379 xop->finished = 1;
380 hammer2_mtx_unlock(&xop->lock);
381 //bp->b_flags |= B_NOTMETA;
382 focus = xop->head.cluster.focus;
383 data = hammer2_xop_gdata(&xop->head)->buf;
384 hammer2_strategy_read_completion(focus, data, xop->bio);
385 hammer2_xop_pdata(&xop->head);
386 //biodone(bio);
387 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
388 break;
389 case HAMMER2_ERROR_ENOENT:
390 xop->finished = 1;
391 hammer2_mtx_unlock(&xop->lock);
392 /*
393 bp->b_flags |= B_NOTMETA;
394 bp->b_resid = 0;
395 bp->b_error = 0;
396 */
397 bzero(bp->b_data, bp->b_bcount);
398 //biodone(bio);
399 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
400 break;
401 case HAMMER2_ERROR_EINPROGRESS:
402 hammer2_mtx_unlock(&xop->lock);
403 break;
404 default:
405 kprintf("xop_strategy_read: error %08x loff=%016jx\n",
406 error, (intmax_t)bp->b_loffset);
407 xop->finished = 1;
408 hammer2_mtx_unlock(&xop->lock);
409 assert(0);
410 /*
411 bp->b_flags |= B_ERROR;
412 bp->b_error = EIO;
413 biodone(bio);
414 */
415 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
416 break;
417 }
418 }
419
420 static
421 void
hammer2_strategy_read_completion(hammer2_chain_t * focus,const char * data,struct bio * bio)422 hammer2_strategy_read_completion(hammer2_chain_t *focus, const char *data,
423 struct bio *bio)
424 {
425 struct m_buf *bp = bio->bio_buf;
426
427 if (focus->bref.type == HAMMER2_BREF_TYPE_INODE) {
428 /*
429 * Copy from in-memory inode structure.
430 */
431 bcopy(((const hammer2_inode_data_t *)data)->u.data,
432 bp->b_data, HAMMER2_EMBEDDED_BYTES);
433 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
434 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
435 /*
436 bp->b_resid = 0;
437 bp->b_error = 0;
438 */
439 } else if (focus->bref.type == HAMMER2_BREF_TYPE_DATA) {
440 /*
441 * Data is on-media, record for live dedup. Release the
442 * chain (try to free it) when done. The data is still
443 * cached by both the buffer cache in front and the
444 * block device behind us. This leaves more room in the
445 * LRU chain cache for meta-data chains which we really
446 * want to retain.
447 *
448 * NOTE: Deduplication cannot be safely recorded for
449 * records without a check code.
450 */
451 hammer2_dedup_record(focus, NULL, data);
452 atomic_set_int(&focus->flags, HAMMER2_CHAIN_RELEASE);
453
454 /*
455 * Decompression and copy.
456 */
457 switch (HAMMER2_DEC_COMP(focus->bref.methods)) {
458 case HAMMER2_COMP_LZ4:
459 hammer2_decompress_LZ4_callback(data, focus->bytes,
460 bio);
461 /* b_resid set by call */
462 break;
463 case HAMMER2_COMP_ZLIB:
464 hammer2_decompress_ZLIB_callback(data, focus->bytes,
465 bio);
466 /* b_resid set by call */
467 break;
468 case HAMMER2_COMP_NONE:
469 KKASSERT(focus->bytes <= bp->b_bcount);
470 bcopy(data, bp->b_data, focus->bytes);
471 if (focus->bytes < bp->b_bcount) {
472 bzero(bp->b_data + focus->bytes,
473 bp->b_bcount - focus->bytes);
474 }
475 /*
476 bp->b_resid = 0;
477 bp->b_error = 0;
478 */
479 break;
480 default:
481 panic("hammer2_strategy_read_completion: "
482 "unknown compression type");
483 }
484 } else {
485 panic("hammer2_strategy_read_completion: unknown bref type");
486 }
487 }
488
489 /****************************************************************************
490 * WRITE SUPPORT *
491 ****************************************************************************/
492
493 /*
494 * Functions for compression in threads,
495 * from hammer2_vnops.c
496 */
497 static void hammer2_write_file_core(char *data, hammer2_inode_t *ip,
498 hammer2_chain_t **parentp,
499 hammer2_key_t lbase, int ioflag, int pblksize,
500 hammer2_tid_t mtid, int *errorp);
501 static void hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
502 hammer2_chain_t **parentp,
503 hammer2_key_t lbase, int ioflag, int pblksize,
504 hammer2_tid_t mtid, int *errorp,
505 int comp_algo, int check_algo);
506 static void hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
507 hammer2_chain_t **parentp,
508 hammer2_key_t lbase, int ioflag, int pblksize,
509 hammer2_tid_t mtid, int *errorp,
510 int check_algo);
511 static int test_block_zeros(const char *buf, size_t bytes);
512 static void zero_write(char *data, hammer2_inode_t *ip,
513 hammer2_chain_t **parentp,
514 hammer2_key_t lbase,
515 hammer2_tid_t mtid, int *errorp);
516 static void hammer2_write_bp(hammer2_chain_t *chain, char *data,
517 int ioflag, int pblksize,
518 hammer2_tid_t mtid, int *errorp,
519 int check_algo);
520
521 int
hammer2_strategy_write(struct vop_strategy_args * ap)522 hammer2_strategy_write(struct vop_strategy_args *ap)
523 {
524 hammer2_xop_strategy_t *xop;
525 hammer2_pfs_t *pmp;
526 struct bio *bio;
527 hammer2_inode_t *ip;
528
529 bio = ap->a_bio;
530 ip = VTOI(ap->a_vp);
531 pmp = ip->pmp;
532
533 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
534 hammer2_lwinprog_ref(pmp);
535 hammer2_trans_assert_strategy(pmp);
536 hammer2_trans_init(pmp, HAMMER2_TRANS_BUFCACHE);
537
538 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING |
539 HAMMER2_XOP_STRATEGY);
540 xop->finished = 0;
541 xop->bio = bio;
542 xop->lbase = bio->bio_offset;
543 hammer2_mtx_init(&xop->lock, "h2biow");
544 hammer2_xop_start(&xop->head, &hammer2_strategy_write_desc);
545 /* asynchronous completion */
546
547 hammer2_lwinprog_wait(pmp, hammer2_flush_pipe);
548
549 return(0);
550 }
551
552 /*
553 * Per-node XOP (threaded). Write the logical buffer to the media.
554 *
555 * This is a bit problematic because there may be multiple target and
556 * any of them may be able to release the bp. In addition, if our
557 * particulr target is offline we don't want to block the bp (and thus
558 * the frontend). To accomplish this we copy the data to the per-thr
559 * scratch buffer.
560 */
561 void
hammer2_xop_strategy_write(hammer2_xop_t * arg,void * scratch,int clindex)562 hammer2_xop_strategy_write(hammer2_xop_t *arg, void *scratch, int clindex)
563 {
564 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
565 hammer2_chain_t *parent;
566 hammer2_key_t lbase;
567 hammer2_inode_t *ip;
568 struct bio *bio;
569 struct m_buf *bp;
570 int error;
571 int lblksize;
572 int pblksize;
573 char *bio_data;
574
575 /*
576 * We can only access the bp/bio if the frontend has not yet
577 * completed.
578 */
579 if (xop->finished)
580 return;
581 hammer2_mtx_sh(&xop->lock);
582 if (xop->finished) {
583 hammer2_mtx_unlock(&xop->lock);
584 return;
585 }
586
587 lbase = xop->lbase;
588 bio = xop->bio; /* ephermal */
589 bp = bio->bio_buf; /* ephermal */
590 ip = xop->head.ip1; /* retained by ref */
591 bio_data = scratch;
592
593 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */
594
595 lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL);
596 pblksize = hammer2_calc_physical(ip, lbase);
597 bkvasync(bp);
598 KKASSERT(lblksize <= MAXPHYS);
599 bcopy(bp->b_data, bio_data, lblksize);
600
601 hammer2_mtx_unlock(&xop->lock);
602 bp = NULL; /* safety, illegal to access after unlock */
603 bio = NULL; /* safety, illegal to access after unlock */
604
605 /*
606 * Actual operation
607 */
608 parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
609 hammer2_write_file_core(bio_data, ip, &parent,
610 lbase, IO_ASYNC, pblksize,
611 xop->head.mtid, &error);
612 if (parent) {
613 hammer2_chain_unlock(parent);
614 hammer2_chain_drop(parent);
615 parent = NULL; /* safety */
616 }
617 hammer2_xop_feed(&xop->head, NULL, clindex, error);
618
619 /*
620 * Try to complete the operation on behalf of the front-end.
621 */
622 if (xop->finished)
623 return;
624 hammer2_mtx_ex(&xop->lock);
625 if (xop->finished) {
626 hammer2_mtx_unlock(&xop->lock);
627 return;
628 }
629
630 /*
631 * Async operation has not completed and we now own the lock.
632 * Determine if we can complete the operation by issuing the
633 * frontend collection non-blocking.
634 *
635 * H2 double-buffers the data, setting B_NOTMETA on the logical
636 * buffer hints to the OS that the logical buffer should not be
637 * swapcached (since the device buffer can be).
638 */
639 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
640
641 if (error == HAMMER2_ERROR_EINPROGRESS) {
642 hammer2_mtx_unlock(&xop->lock);
643 return;
644 }
645
646 /*
647 * Async operation has completed.
648 */
649 xop->finished = 1;
650 hammer2_mtx_unlock(&xop->lock);
651
652 bio = xop->bio; /* now owned by us */
653 bp = bio->bio_buf; /* now owned by us */
654
655 if (error == HAMMER2_ERROR_ENOENT || error == 0) {
656 /*
657 bp->b_flags |= B_NOTMETA;
658 bp->b_resid = 0;
659 bp->b_error = 0;
660 biodone(bio);
661 */
662 } else {
663 kprintf("xop_strategy_write: error %d loff=%016jx\n",
664 error, (intmax_t)bp->b_loffset);
665 assert(0);
666 /*
667 bp->b_flags |= B_ERROR;
668 bp->b_error = EIO;
669 biodone(bio);
670 */
671 }
672 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
673 hammer2_trans_assert_strategy(ip->pmp);
674 hammer2_lwinprog_drop(ip->pmp);
675 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE);
676 }
677
678 /*
679 * Wait for pending I/O to complete
680 */
681 void
hammer2_bioq_sync(hammer2_pfs_t * pmp)682 hammer2_bioq_sync(hammer2_pfs_t *pmp)
683 {
684 hammer2_lwinprog_wait(pmp, 0);
685 }
686
687 /*
688 * Assign physical storage at (cparent, lbase), returning a suitable chain
689 * and setting *errorp appropriately.
690 *
691 * If no error occurs, the returned chain will be in a modified state.
692 *
693 * If an error occurs, the returned chain may or may not be NULL. If
694 * not-null any chain->error (if not 0) will also be rolled up into *errorp.
695 * So the caller only needs to test *errorp.
696 *
697 * cparent can wind up being anything.
698 *
699 * If datap is not NULL, *datap points to the real data we intend to write.
700 * If we can dedup the storage location we set *datap to NULL to indicate
701 * to the caller that a dedup occurred.
702 *
703 * NOTE: Special case for data embedded in inode.
704 */
705 static
706 hammer2_chain_t *
hammer2_assign_physical(hammer2_inode_t * ip,hammer2_chain_t ** parentp,hammer2_key_t lbase,int pblksize,hammer2_tid_t mtid,char ** datap,int * errorp)707 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp,
708 hammer2_key_t lbase, int pblksize,
709 hammer2_tid_t mtid, char **datap, int *errorp)
710 {
711 hammer2_chain_t *chain;
712 hammer2_key_t key_dummy;
713 hammer2_off_t dedup_off;
714 int pradix = hammer2_getradix(pblksize);
715
716 /*
717 * Locate the chain associated with lbase, return a locked chain.
718 * However, do not instantiate any data reference (which utilizes a
719 * device buffer) because we will be using direct IO via the
720 * logical buffer cache buffer.
721 */
722 KKASSERT(pblksize >= HAMMER2_ALLOC_MIN);
723
724 chain = hammer2_chain_lookup(parentp, &key_dummy,
725 lbase, lbase,
726 errorp,
727 HAMMER2_LOOKUP_NODATA);
728
729 /*
730 * The lookup code should not return a DELETED chain to us, unless
731 * its a short-file embedded in the inode. Then it is possible for
732 * the lookup to return a deleted inode.
733 */
734 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED) &&
735 chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
736 kprintf("assign physical deleted chain @ "
737 "%016jx (%016jx.%02x) ip %016jx\n",
738 lbase, chain->bref.data_off, chain->bref.type,
739 ip->meta.inum);
740 Debugger("bleh");
741 }
742
743 if (chain == NULL) {
744 /*
745 * We found a hole, create a new chain entry.
746 *
747 * NOTE: DATA chains are created without device backing
748 * store (nor do we want any).
749 */
750 dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap,
751 pblksize);
752 *errorp |= hammer2_chain_create(parentp, &chain, NULL, ip->pmp,
753 HAMMER2_ENC_CHECK(ip->meta.check_algo) |
754 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE),
755 lbase, HAMMER2_PBUFRADIX,
756 HAMMER2_BREF_TYPE_DATA,
757 pblksize, mtid,
758 dedup_off, 0);
759 if (chain == NULL)
760 goto failed;
761 /*ip->delta_dcount += pblksize;*/
762 } else if (chain->error == 0) {
763 switch (chain->bref.type) {
764 case HAMMER2_BREF_TYPE_INODE:
765 /*
766 * The data is embedded in the inode, which requires
767 * a bit more finess.
768 */
769 *errorp |= hammer2_chain_modify_ip(ip, chain, mtid, 0);
770 break;
771 case HAMMER2_BREF_TYPE_DATA:
772 dedup_off = hammer2_dedup_lookup(chain->hmp, datap,
773 pblksize);
774 if (chain->bytes != pblksize) {
775 *errorp |= hammer2_chain_resize(chain,
776 mtid, dedup_off,
777 pradix,
778 HAMMER2_MODIFY_OPTDATA);
779 if (*errorp)
780 break;
781 }
782
783 /*
784 * DATA buffers must be marked modified whether the
785 * data is in a logical buffer or not. We also have
786 * to make this call to fixup the chain data pointers
787 * after resizing in case this is an encrypted or
788 * compressed buffer.
789 */
790 *errorp |= hammer2_chain_modify(chain, mtid, dedup_off,
791 HAMMER2_MODIFY_OPTDATA);
792 break;
793 default:
794 panic("hammer2_assign_physical: bad type");
795 /* NOT REACHED */
796 break;
797 }
798 } else {
799 *errorp = chain->error;
800 }
801 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
802 failed:
803 return (chain);
804 }
805
806 /*
807 * hammer2_write_file_core()
808 *
809 * The core write function which determines which path to take
810 * depending on compression settings. We also have to locate the
811 * related chains so we can calculate and set the check data for
812 * the blockref.
813 */
814 static
815 void
hammer2_write_file_core(char * data,hammer2_inode_t * ip,hammer2_chain_t ** parentp,hammer2_key_t lbase,int ioflag,int pblksize,hammer2_tid_t mtid,int * errorp)816 hammer2_write_file_core(char *data, hammer2_inode_t *ip,
817 hammer2_chain_t **parentp,
818 hammer2_key_t lbase, int ioflag, int pblksize,
819 hammer2_tid_t mtid, int *errorp)
820 {
821 hammer2_chain_t *chain;
822 char *bdata;
823
824 *errorp = 0;
825
826 switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) {
827 case HAMMER2_COMP_NONE:
828 /*
829 * We have to assign physical storage to the buffer
830 * we intend to dirty or write now to avoid deadlocks
831 * in the strategy code later.
832 *
833 * This can return NOOFFSET for inode-embedded data.
834 * The strategy code will take care of it in that case.
835 */
836 bdata = data;
837 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
838 mtid, &bdata, errorp);
839 if (*errorp) {
840 /* skip modifications */
841 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
842 hammer2_inode_data_t *wipdata;
843
844 wipdata = &chain->data->ipdata;
845 KKASSERT(wipdata->meta.op_flags &
846 HAMMER2_OPFLAG_DIRECTDATA);
847 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
848 ++hammer2_iod_file_wembed;
849 } else if (bdata == NULL) {
850 /*
851 * Copy of data already present on-media.
852 */
853 chain->bref.methods =
854 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
855 HAMMER2_ENC_CHECK(ip->meta.check_algo);
856 hammer2_chain_setcheck(chain, data);
857 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
858 } else {
859 hammer2_write_bp(chain, data, ioflag, pblksize,
860 mtid, errorp, ip->meta.check_algo);
861 }
862 if (chain) {
863 hammer2_chain_unlock(chain);
864 hammer2_chain_drop(chain);
865 }
866 break;
867 case HAMMER2_COMP_AUTOZERO:
868 /*
869 * Check for zero-fill only
870 */
871 hammer2_zero_check_and_write(data, ip, parentp,
872 lbase, ioflag, pblksize,
873 mtid, errorp,
874 ip->meta.check_algo);
875 break;
876 case HAMMER2_COMP_LZ4:
877 case HAMMER2_COMP_ZLIB:
878 default:
879 /*
880 * Check for zero-fill and attempt compression.
881 */
882 hammer2_compress_and_write(data, ip, parentp,
883 lbase, ioflag, pblksize,
884 mtid, errorp,
885 ip->meta.comp_algo,
886 ip->meta.check_algo);
887 break;
888 }
889 }
890
891 /*
892 * Helper
893 *
894 * Generic function that will perform the compression in compression
895 * write path. The compression algorithm is determined by the settings
896 * obtained from inode.
897 */
898 static
899 void
hammer2_compress_and_write(char * data,hammer2_inode_t * ip,hammer2_chain_t ** parentp,hammer2_key_t lbase,int ioflag,int pblksize,hammer2_tid_t mtid,int * errorp,int comp_algo,int check_algo)900 hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
901 hammer2_chain_t **parentp,
902 hammer2_key_t lbase, int ioflag, int pblksize,
903 hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo)
904 {
905 hammer2_chain_t *chain;
906 int comp_size;
907 int comp_block_size;
908 char *comp_buffer;
909 char *bdata;
910
911 /*
912 * An all-zeros write creates a hole unless the check code
913 * is disabled. When the check code is disabled all writes
914 * are done in-place, including any all-zeros writes.
915 *
916 * NOTE: A snapshot will still force a copy-on-write
917 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
918 */
919 if (check_algo != HAMMER2_CHECK_NONE &&
920 test_block_zeros(data, pblksize)) {
921 zero_write(data, ip, parentp, lbase, mtid, errorp);
922 return;
923 }
924
925 /*
926 * Compression requested. Try to compress the block. We store
927 * the data normally if we cannot sufficiently compress it.
928 *
929 * We have a heuristic to detect files which are mostly
930 * uncompressable and avoid the compression attempt in that
931 * case. If the compression heuristic is turned off, we always
932 * try to compress.
933 */
934 comp_size = 0;
935 comp_buffer = NULL;
936
937 KKASSERT(pblksize / 2 <= 32768);
938
939 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0 ||
940 hammer2_always_compress) {
941 z_stream strm_compress;
942 int comp_level;
943 int ret;
944
945 switch(HAMMER2_DEC_ALGO(comp_algo)) {
946 case HAMMER2_COMP_LZ4:
947 /*
948 * We need to prefix with the size, LZ4
949 * doesn't do it for us. Add the related
950 * overhead.
951 *
952 * NOTE: The LZ4 code seems to assume at least an
953 * 8-byte buffer size granularity and may
954 * overrun the buffer if given a 4-byte
955 * granularity.
956 */
957 comp_buffer = ecalloc(1, 32768);
958 comp_size = LZ4_compress_limitedOutput(
959 data,
960 &comp_buffer[sizeof(int)],
961 pblksize,
962 pblksize / 2 - sizeof(int64_t));
963 *(int *)comp_buffer = comp_size;
964 if (comp_size)
965 comp_size += sizeof(int);
966 break;
967 case HAMMER2_COMP_ZLIB:
968 comp_level = HAMMER2_DEC_LEVEL(comp_algo);
969 if (comp_level == 0)
970 comp_level = 6; /* default zlib compression */
971 else if (comp_level < 6)
972 comp_level = 6;
973 else if (comp_level > 9)
974 comp_level = 9;
975 ret = deflateInit(&strm_compress, comp_level);
976 if (ret != Z_OK) {
977 kprintf("HAMMER2 ZLIB: fatal error "
978 "on deflateInit.\n");
979 }
980
981 comp_buffer = ecalloc(1, 32768);
982 strm_compress.next_in = data;
983 strm_compress.avail_in = pblksize;
984 strm_compress.next_out = comp_buffer;
985 strm_compress.avail_out = pblksize / 2;
986 ret = deflate(&strm_compress, Z_FINISH);
987 if (ret == Z_STREAM_END) {
988 comp_size = pblksize / 2 -
989 strm_compress.avail_out;
990 } else {
991 comp_size = 0;
992 }
993 ret = deflateEnd(&strm_compress);
994 break;
995 default:
996 kprintf("Error: Unknown compression method.\n");
997 kprintf("Comp_method = %d.\n", comp_algo);
998 break;
999 }
1000 }
1001
1002 if (comp_size == 0) {
1003 /*
1004 * compression failed or turned off
1005 */
1006 comp_block_size = pblksize; /* safety */
1007 if (++ip->comp_heuristic > 128)
1008 ip->comp_heuristic = 8;
1009 } else {
1010 /*
1011 * compression succeeded
1012 */
1013 ip->comp_heuristic = 0;
1014 if (comp_size <= 1024) {
1015 comp_block_size = 1024;
1016 } else if (comp_size <= 2048) {
1017 comp_block_size = 2048;
1018 } else if (comp_size <= 4096) {
1019 comp_block_size = 4096;
1020 } else if (comp_size <= 8192) {
1021 comp_block_size = 8192;
1022 } else if (comp_size <= 16384) {
1023 comp_block_size = 16384;
1024 } else if (comp_size <= 32768) {
1025 comp_block_size = 32768;
1026 } else {
1027 panic("hammer2: WRITE PATH: "
1028 "Weird comp_size value.");
1029 /* NOT REACHED */
1030 comp_block_size = pblksize;
1031 }
1032
1033 /*
1034 * Must zero the remainder or dedup (which operates on a
1035 * physical block basis) will not find matches.
1036 */
1037 if (comp_size < comp_block_size) {
1038 bzero(comp_buffer + comp_size,
1039 comp_block_size - comp_size);
1040 }
1041 }
1042
1043 /*
1044 * Assign physical storage, bdata will be set to NULL if a live-dedup
1045 * was successful.
1046 */
1047 bdata = comp_size ? comp_buffer : data;
1048 chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size,
1049 mtid, &bdata, errorp);
1050
1051 if (*errorp) {
1052 goto done;
1053 }
1054
1055 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1056 hammer2_inode_data_t *wipdata;
1057
1058 *errorp = hammer2_chain_modify_ip(ip, chain, mtid, 0);
1059 if (*errorp == 0) {
1060 wipdata = &chain->data->ipdata;
1061 KKASSERT(wipdata->meta.op_flags &
1062 HAMMER2_OPFLAG_DIRECTDATA);
1063 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1064 ++hammer2_iod_file_wembed;
1065 }
1066 } else if (bdata == NULL) {
1067 /*
1068 * Live deduplication, a copy of the data is already present
1069 * on the media.
1070 */
1071 if (comp_size) {
1072 chain->bref.methods =
1073 HAMMER2_ENC_COMP(comp_algo) +
1074 HAMMER2_ENC_CHECK(check_algo);
1075 } else {
1076 chain->bref.methods =
1077 HAMMER2_ENC_COMP(
1078 HAMMER2_COMP_NONE) +
1079 HAMMER2_ENC_CHECK(check_algo);
1080 }
1081 bdata = comp_size ? comp_buffer : data;
1082 hammer2_chain_setcheck(chain, bdata);
1083 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1084 } else {
1085 hammer2_io_t *dio;
1086
1087 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1088
1089 switch(chain->bref.type) {
1090 case HAMMER2_BREF_TYPE_INODE:
1091 panic("hammer2_compress_and_write: unexpected inode\n");
1092 break;
1093 case HAMMER2_BREF_TYPE_DATA:
1094 /*
1095 * Optimize out the read-before-write
1096 * if possible.
1097 */
1098 *errorp = hammer2_io_newnz(chain->hmp,
1099 chain->bref.type,
1100 chain->bref.data_off,
1101 chain->bytes,
1102 &dio);
1103 if (*errorp) {
1104 hammer2_io_brelse(&dio);
1105 kprintf("hammer2: WRITE PATH: "
1106 "dbp bread error\n");
1107 break;
1108 }
1109 bdata = hammer2_io_data(dio, chain->bref.data_off);
1110
1111 /*
1112 * When loading the block make sure we don't
1113 * leave garbage after the compressed data.
1114 */
1115 if (comp_size) {
1116 chain->bref.methods =
1117 HAMMER2_ENC_COMP(comp_algo) +
1118 HAMMER2_ENC_CHECK(check_algo);
1119 bcopy(comp_buffer, bdata, comp_block_size);
1120 } else {
1121 chain->bref.methods =
1122 HAMMER2_ENC_COMP(
1123 HAMMER2_COMP_NONE) +
1124 HAMMER2_ENC_CHECK(check_algo);
1125 bcopy(data, bdata, pblksize);
1126 }
1127
1128 /*
1129 * The flush code doesn't calculate check codes for
1130 * file data (doing so can result in excessive I/O),
1131 * so we do it here.
1132 */
1133 hammer2_chain_setcheck(chain, bdata);
1134
1135 /*
1136 * Device buffer is now valid, chain is no longer in
1137 * the initial state.
1138 *
1139 * (No blockref table worries with file data)
1140 */
1141 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1142 hammer2_dedup_record(chain, dio, bdata);
1143
1144 /* Now write the related bdp. */
1145 if (ioflag & IO_SYNC) {
1146 /*
1147 * Synchronous I/O requested.
1148 */
1149 hammer2_io_bwrite(&dio);
1150 /*
1151 } else if ((ioflag & IO_DIRECT) &&
1152 loff + n == pblksize) {
1153 hammer2_io_bdwrite(&dio);
1154 */
1155 } else if (ioflag & IO_ASYNC) {
1156 hammer2_io_bawrite(&dio);
1157 } else {
1158 hammer2_io_bdwrite(&dio);
1159 }
1160 break;
1161 default:
1162 panic("hammer2_compress_and_write: bad chain type %d\n",
1163 chain->bref.type);
1164 /* NOT REACHED */
1165 break;
1166 }
1167 }
1168 done:
1169 if (chain) {
1170 hammer2_chain_unlock(chain);
1171 hammer2_chain_drop(chain);
1172 }
1173 if (comp_buffer)
1174 free(comp_buffer);
1175 }
1176
1177 /*
1178 * Helper
1179 *
1180 * Function that performs zero-checking and writing without compression,
1181 * it corresponds to default zero-checking path.
1182 */
1183 static
1184 void
hammer2_zero_check_and_write(char * data,hammer2_inode_t * ip,hammer2_chain_t ** parentp,hammer2_key_t lbase,int ioflag,int pblksize,hammer2_tid_t mtid,int * errorp,int check_algo)1185 hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
1186 hammer2_chain_t **parentp,
1187 hammer2_key_t lbase, int ioflag, int pblksize,
1188 hammer2_tid_t mtid, int *errorp,
1189 int check_algo)
1190 {
1191 hammer2_chain_t *chain;
1192 char *bdata;
1193
1194 if (check_algo != HAMMER2_CHECK_NONE &&
1195 test_block_zeros(data, pblksize)) {
1196 /*
1197 * An all-zeros write creates a hole unless the check code
1198 * is disabled. When the check code is disabled all writes
1199 * are done in-place, including any all-zeros writes.
1200 *
1201 * NOTE: A snapshot will still force a copy-on-write
1202 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
1203 */
1204 zero_write(data, ip, parentp, lbase, mtid, errorp);
1205 } else {
1206 /*
1207 * Normal write (bdata set to NULL if de-duplicated)
1208 */
1209 bdata = data;
1210 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
1211 mtid, &bdata, errorp);
1212 if (*errorp) {
1213 /* do nothing */
1214 } else if (bdata) {
1215 hammer2_write_bp(chain, data, ioflag, pblksize,
1216 mtid, errorp, check_algo);
1217 } else {
1218 /* dedup occurred */
1219 chain->bref.methods =
1220 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1221 HAMMER2_ENC_CHECK(check_algo);
1222 hammer2_chain_setcheck(chain, data);
1223 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1224 }
1225 if (chain) {
1226 hammer2_chain_unlock(chain);
1227 hammer2_chain_drop(chain);
1228 }
1229 }
1230 }
1231
1232 /*
1233 * Helper
1234 *
1235 * A function to test whether a block of data contains only zeros,
1236 * returns TRUE (non-zero) if the block is all zeros.
1237 */
1238 static
1239 int
test_block_zeros(const char * buf,size_t bytes)1240 test_block_zeros(const char *buf, size_t bytes)
1241 {
1242 size_t i;
1243
1244 for (i = 0; i < bytes; i += sizeof(long)) {
1245 if (*(const long *)(buf + i) != 0)
1246 return (0);
1247 }
1248 return (1);
1249 }
1250
1251 /*
1252 * Helper
1253 *
1254 * Function to "write" a block that contains only zeros.
1255 */
1256 static
1257 void
zero_write(char * data,hammer2_inode_t * ip,hammer2_chain_t ** parentp,hammer2_key_t lbase,hammer2_tid_t mtid,int * errorp)1258 zero_write(char *data, hammer2_inode_t *ip,
1259 hammer2_chain_t **parentp,
1260 hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp)
1261 {
1262 hammer2_chain_t *chain;
1263 hammer2_key_t key_dummy;
1264
1265 chain = hammer2_chain_lookup(parentp, &key_dummy,
1266 lbase, lbase,
1267 errorp,
1268 HAMMER2_LOOKUP_NODATA);
1269 if (chain) {
1270 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1271 hammer2_inode_data_t *wipdata;
1272
1273 if (*errorp == 0) {
1274 *errorp = hammer2_chain_modify_ip(ip, chain,
1275 mtid, 0);
1276 }
1277 if (*errorp == 0) {
1278 wipdata = &chain->data->ipdata;
1279 KKASSERT(wipdata->meta.op_flags &
1280 HAMMER2_OPFLAG_DIRECTDATA);
1281 bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1282 ++hammer2_iod_file_wembed;
1283 }
1284 } else {
1285 /* chain->error ok for deletion */
1286 hammer2_chain_delete(*parentp, chain,
1287 mtid, HAMMER2_DELETE_PERMANENT);
1288 ++hammer2_iod_file_wzero;
1289 }
1290 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1291 hammer2_chain_unlock(chain);
1292 hammer2_chain_drop(chain);
1293 } else {
1294 ++hammer2_iod_file_wzero;
1295 }
1296 }
1297
1298 /*
1299 * Helper
1300 *
1301 * Function to write the data as it is, without performing any sort of
1302 * compression. This function is used in path without compression and
1303 * default zero-checking path.
1304 */
1305 static
1306 void
hammer2_write_bp(hammer2_chain_t * chain,char * data,int ioflag,int pblksize,hammer2_tid_t mtid,int * errorp,int check_algo)1307 hammer2_write_bp(hammer2_chain_t *chain, char *data, int ioflag,
1308 int pblksize,
1309 hammer2_tid_t mtid, int *errorp, int check_algo)
1310 {
1311 hammer2_inode_data_t *wipdata;
1312 hammer2_io_t *dio;
1313 char *bdata;
1314 int error;
1315
1316 error = 0; /* XXX TODO below */
1317
1318 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1319
1320 switch(chain->bref.type) {
1321 case HAMMER2_BREF_TYPE_INODE:
1322 wipdata = &chain->data->ipdata;
1323 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1324 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1325 error = 0;
1326 ++hammer2_iod_file_wembed;
1327 break;
1328 case HAMMER2_BREF_TYPE_DATA:
1329 error = hammer2_io_newnz(chain->hmp,
1330 chain->bref.type,
1331 chain->bref.data_off,
1332 chain->bytes, &dio);
1333 if (error) {
1334 hammer2_io_bqrelse(&dio);
1335 kprintf("hammer2: WRITE PATH: "
1336 "dbp bread error\n");
1337 break;
1338 }
1339 bdata = hammer2_io_data(dio, chain->bref.data_off);
1340
1341 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1342 HAMMER2_ENC_CHECK(check_algo);
1343 bcopy(data, bdata, chain->bytes);
1344
1345 /*
1346 * The flush code doesn't calculate check codes for
1347 * file data (doing so can result in excessive I/O),
1348 * so we do it here.
1349 */
1350 hammer2_chain_setcheck(chain, bdata);
1351
1352 /*
1353 * Device buffer is now valid, chain is no longer in
1354 * the initial state.
1355 *
1356 * (No blockref table worries with file data)
1357 */
1358 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1359 hammer2_dedup_record(chain, dio, bdata);
1360
1361 if (ioflag & IO_SYNC) {
1362 /*
1363 * Synchronous I/O requested.
1364 */
1365 hammer2_io_bwrite(&dio);
1366 /*
1367 } else if ((ioflag & IO_DIRECT) &&
1368 loff + n == pblksize) {
1369 hammer2_io_bdwrite(&dio);
1370 */
1371 } else if (ioflag & IO_ASYNC) {
1372 hammer2_io_bawrite(&dio);
1373 } else {
1374 hammer2_io_bdwrite(&dio);
1375 }
1376 break;
1377 default:
1378 panic("hammer2_write_bp: bad chain type %d\n",
1379 chain->bref.type);
1380 /* NOT REACHED */
1381 error = 0;
1382 break;
1383 }
1384 *errorp = error;
1385 }
1386
1387 /*
1388 * LIVE DEDUP HEURISTICS
1389 *
1390 * Record media and crc information for possible dedup operation. Note
1391 * that the dedup mask bits must also be set in the related DIO for a dedup
1392 * to be fully validated (which is handled in the freemap allocation code).
1393 *
1394 * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1395 * All fields must be loaded into locals and validated.
1396 *
1397 * WARNING! Should only be used for file data and directory entries,
1398 * hammer2_chain_modify() only checks for the dedup case on data
1399 * chains. Also, dedup data can only be recorded for committed
1400 * chains (so NOT strategy writes which can undergo further
1401 * modification after the fact!).
1402 */
1403 void
hammer2_dedup_record(hammer2_chain_t * chain,hammer2_io_t * dio,const char * data)1404 hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio,
1405 const char *data)
1406 {
1407 hammer2_dev_t *hmp;
1408 hammer2_dedup_t *dedup;
1409 uint64_t crc;
1410 uint64_t mask;
1411 int best = 0;
1412 int i;
1413 int dticks;
1414
1415 /*
1416 * We can only record a dedup if we have media data to test against.
1417 * If dedup is not enabled, return early, which allows a chain to
1418 * remain marked MODIFIED (which might have benefits in special
1419 * situations, though typically it does not).
1420 */
1421 if (hammer2_dedup_enable == 0)
1422 return;
1423 if (dio == NULL) {
1424 dio = chain->dio;
1425 if (dio == NULL)
1426 return;
1427 }
1428
1429 hmp = chain->hmp;
1430
1431 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
1432 case HAMMER2_CHECK_ISCSI32:
1433 /*
1434 * XXX use the built-in crc (the dedup lookup sequencing
1435 * needs to be fixed so the check code is already present
1436 * when dedup_lookup is called)
1437 */
1438 #if 0
1439 crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value;
1440 #endif
1441 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1442 break;
1443 case HAMMER2_CHECK_XXHASH64:
1444 crc = chain->bref.check.xxhash64.value;
1445 break;
1446 case HAMMER2_CHECK_SHA192:
1447 /*
1448 * XXX use the built-in crc (the dedup lookup sequencing
1449 * needs to be fixed so the check code is already present
1450 * when dedup_lookup is called)
1451 */
1452 #if 0
1453 crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^
1454 ((uint64_t *)chain->bref.check.sha192.data)[1] ^
1455 ((uint64_t *)chain->bref.check.sha192.data)[2];
1456 #endif
1457 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1458 break;
1459 default:
1460 /*
1461 * Cannot dedup without a check code
1462 *
1463 * NOTE: In particular, CHECK_NONE allows a sector to be
1464 * overwritten without copy-on-write, recording
1465 * a dedup block for a CHECK_NONE object would be
1466 * a disaster!
1467 */
1468 return;
1469 }
1470
1471 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUPABLE);
1472
1473 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1474 for (i = 0; i < 4; ++i) {
1475 if (dedup[i].data_crc == crc) {
1476 best = i;
1477 break;
1478 }
1479 dticks = (int)(dedup[i].ticks - dedup[best].ticks);
1480 if (dticks < 0 || dticks > hz * 60 * 30)
1481 best = i;
1482 }
1483 dedup += best;
1484 if (hammer2_debug & 0x40000) {
1485 kprintf("REC %04x %016jx %016jx\n",
1486 (int)(dedup - hmp->heur_dedup),
1487 crc,
1488 chain->bref.data_off);
1489 }
1490 dedup->ticks = ticks;
1491 dedup->data_off = chain->bref.data_off;
1492 dedup->data_crc = crc;
1493
1494 /*
1495 * Set the valid bits for the dedup only after we know the data
1496 * buffer has been updated. The alloc bits were set (and the valid
1497 * bits cleared) when the media was allocated.
1498 *
1499 * This is done in two stages becuase the bulkfree code can race
1500 * the gap between allocation and data population. Both masks must
1501 * be set before a bcmp/dedup operation is able to use the block.
1502 */
1503 mask = hammer2_dedup_mask(dio, chain->bref.data_off, chain->bytes);
1504 atomic_set_64(&dio->dedup_valid, mask);
1505
1506 #if 0
1507 /*
1508 * XXX removed. MODIFIED is an integral part of the flush code,
1509 * lets not just clear it
1510 */
1511 /*
1512 * Once we record the dedup the chain must be marked clean to
1513 * prevent reuse of the underlying block. Remember that this
1514 * write occurs when the buffer cache is flushed (i.e. on sync(),
1515 * fsync(), filesystem periodic sync, or when the kernel needs to
1516 * flush a buffer), and not whenever the user write()s.
1517 */
1518 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1519 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1520 atomic_add_long(&hammer2_count_modified_chains, -1);
1521 if (chain->pmp)
1522 hammer2_pfs_memory_wakeup(chain->pmp, -1);
1523 }
1524 #endif
1525 }
1526
1527 static
1528 hammer2_off_t
hammer2_dedup_lookup(hammer2_dev_t * hmp,char ** datap,int pblksize)1529 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize)
1530 {
1531 hammer2_dedup_t *dedup;
1532 hammer2_io_t *dio;
1533 hammer2_off_t off;
1534 uint64_t crc;
1535 uint64_t mask;
1536 char *data;
1537 char *dtmp;
1538 int i;
1539
1540 if (hammer2_dedup_enable == 0)
1541 return 0;
1542 data = *datap;
1543 if (data == NULL)
1544 return 0;
1545
1546 /*
1547 * XXX use the built-in crc (the dedup lookup sequencing
1548 * needs to be fixed so the check code is already present
1549 * when dedup_lookup is called)
1550 */
1551 crc = XXH64(data, pblksize, XXH_HAMMER2_SEED);
1552 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1553
1554 if (hammer2_debug & 0x40000) {
1555 kprintf("LOC %04x/4 %016jx\n",
1556 (int)(dedup - hmp->heur_dedup),
1557 crc);
1558 }
1559
1560 for (i = 0; i < 4; ++i) {
1561 off = dedup[i].data_off;
1562 cpu_ccfence();
1563 if (dedup[i].data_crc != crc)
1564 continue;
1565 if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize)
1566 continue;
1567 dio = hammer2_io_getquick(hmp, off, pblksize);
1568 if (dio) {
1569 dtmp = hammer2_io_data(dio, off),
1570 mask = hammer2_dedup_mask(dio, off, pblksize);
1571 if ((dio->dedup_alloc & mask) == mask &&
1572 (dio->dedup_valid & mask) == mask &&
1573 bcmp(data, dtmp, pblksize) == 0) {
1574 if (hammer2_debug & 0x40000) {
1575 kprintf("DEDUP SUCCESS %016jx\n",
1576 (intmax_t)off);
1577 }
1578 hammer2_io_putblk(&dio);
1579 *datap = NULL;
1580 dedup[i].ticks = ticks; /* update use */
1581 atomic_add_long(&hammer2_iod_file_wdedup,
1582 pblksize);
1583
1584 return off; /* RETURN */
1585 }
1586 hammer2_io_putblk(&dio);
1587 }
1588 }
1589 return 0;
1590 }
1591
1592 /*
1593 * Poof. Races are ok, if someone gets in and reuses a dedup offset
1594 * before or while we are clearing it they will also recover the freemap
1595 * entry (set it to fully allocated), so a bulkfree race can only set it
1596 * to a possibly-free state.
1597 *
1598 * XXX ok, well, not really sure races are ok but going to run with it
1599 * for the moment.
1600 */
1601 void
hammer2_dedup_clear(hammer2_dev_t * hmp)1602 hammer2_dedup_clear(hammer2_dev_t *hmp)
1603 {
1604 int i;
1605
1606 for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) {
1607 hmp->heur_dedup[i].data_off = 0;
1608 hmp->heur_dedup[i].ticks = ticks - 1;
1609 }
1610 }
1611