1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org>
5  * Copyright (c) 2011-2022 The DragonFly Project.  All rights reserved.
6  *
7  * This code is derived from software contributed to The DragonFly Project
8  * by Matthew Dillon <dillon@dragonflybsd.org>
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in
18  *    the documentation and/or other materials provided with the
19  *    distribution.
20  * 3. Neither the name of The DragonFly Project nor the names of its
21  *    contributors may be used to endorse or promote products derived
22  *    from this software without specific, prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
28  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 /*
38  * This module handles low level logical file I/O (strategy) which backs
39  * the logical buffer cache.
40  *
41  * [De]compression, zero-block, check codes, and buffer cache operations
42  * for file data is handled here.
43  *
44  * Live dedup makes its home here as well.
45  */
46 
47 /*
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/buf.h>
52 #include <sys/proc.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/objcache.h>
56 */
57 
58 #include "hammer2.h"
59 #include "hammer2_lz4.h"
60 
61 #include "zlib/hammer2_zlib.h"
62 
63 /*
64 struct objcache *cache_buffer_read;
65 struct objcache *cache_buffer_write;
66 */
67 
68 /*
69  * Strategy code (async logical file buffer I/O from system)
70  *
71  * Except for the transaction init (which should normally not block),
72  * we essentially run the strategy operation asynchronously via a XOP.
73  *
74  * WARNING! The XOP deals with buffer synchronization.  It is not synchronized
75  *	    to the current cpu.
76  *
77  * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync()
78  *     calls but it has in the past when multiple flushes are queued.
79  *
80  * XXX We currently terminate the transaction once we get a quorum, otherwise
81  *     the frontend can stall, but this can leave the remaining nodes with
82  *     a potential flush conflict.  We need to delay flushes on those nodes
83  *     until running transactions complete separately from the normal
84  *     transaction sequencing.  FIXME TODO.
85  */
86 static int hammer2_strategy_read(struct vop_strategy_args *ap);
87 static int hammer2_strategy_write(struct vop_strategy_args *ap);
88 
89 /*
90 static void hammer2_strategy_read_completion(hammer2_chain_t *focus,
91 				const char *data, struct bio *bio);
92 */
93 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp,
94 			char **datap, int pblksize);
95 
96 int
97 hammer2_vop_strategy(struct vop_strategy_args *ap)
98 {
99 	struct bio *biop;
100 	struct m_buf *bp;
101 	int error;
102 
103 	biop = ap->a_bio;
104 	bp = biop->bio_buf;
105 
106 	switch(bp->b_cmd) {
107 	case BUF_CMD_READ:
108 		error = hammer2_strategy_read(ap);
109 		++hammer2_iod_file_read;
110 		break;
111 	case BUF_CMD_WRITE:
112 		error = hammer2_strategy_write(ap);
113 		++hammer2_iod_file_write;
114 		break;
115 	default:
116 		assert(0);
117 		/*
118 		bp->b_error = error = EINVAL;
119 		bp->b_flags |= B_ERROR;
120 		biodone(biop);
121 		*/
122 		break;
123 	}
124 	return (error);
125 }
126 
127 /*
128  * Return the largest contiguous physical disk range for the logical
129  * request, in bytes.
130  *
131  * (struct m_vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
132  *
133  * Basically disabled, the logical buffer write thread has to deal with
134  * buffers one-at-a-time.  Note that this should not prevent cluster_read()
135  * from reading-ahead, it simply prevents it from trying form a single
136  * cluster buffer for the logical request.  H2 already uses 64KB buffers!
137  */
138 int
139 hammer2_vop_bmap(struct vop_bmap_args *ap)
140 {
141 	*ap->a_doffsetp = NOOFFSET;
142 	if (ap->a_runp)
143 		*ap->a_runp = 0;
144 	if (ap->a_runb)
145 		*ap->a_runb = 0;
146 	return (EOPNOTSUPP);
147 }
148 
149 /****************************************************************************
150  *				READ SUPPORT				    *
151  ****************************************************************************/
152 #if 0
153 /*
154  * Callback used in read path in case that a block is compressed with LZ4.
155  */
156 static
157 void
158 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
159 {
160 	struct m_buf *bp;
161 	char *compressed_buffer;
162 	int compressed_size;
163 	int result;
164 
165 	bp = bio->bio_buf;
166 
167 #if 0
168 	if bio->bio_caller_info2.index &&
169 	      bio->bio_caller_info1.uvalue32 !=
170 	      crc32(bp->b_data, bp->b_bufsize) --- return error
171 #endif
172 
173 	KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
174 	compressed_size = *(const int *)data;
175 	KKASSERT((uint32_t)compressed_size <= bytes - sizeof(int));
176 
177 	compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
178 	result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
179 				     compressed_buffer,
180 				     compressed_size,
181 				     bp->b_bufsize);
182 	if (result < 0) {
183 		kprintf("READ PATH: Error during decompression."
184 			"bio %016jx/%d\n",
185 			(intmax_t)bio->bio_offset, bytes);
186 		/* make sure it isn't random garbage */
187 		bzero(compressed_buffer, bp->b_bufsize);
188 	}
189 	KKASSERT(result <= bp->b_bufsize);
190 	bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
191 	if (result < bp->b_bufsize)
192 		bzero((char *)bp->b_data + result, bp->b_bufsize - result);
193 	objcache_put(cache_buffer_read, compressed_buffer);
194 	bp->b_resid = 0;
195 	bp->b_flags |= B_AGE;
196 }
197 
198 /*
199  * Callback used in read path in case that a block is compressed with ZLIB.
200  * It is almost identical to LZ4 callback, so in theory they can be unified,
201  * but we didn't want to make changes in bio structure for that.
202  */
203 static
204 void
205 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
206 {
207 	struct m_buf *bp;
208 	char *compressed_buffer;
209 	z_stream strm_decompress;
210 	int result;
211 	int ret;
212 
213 	bp = bio->bio_buf;
214 
215 	KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
216 	strm_decompress.avail_in = 0;
217 	strm_decompress.next_in = Z_NULL;
218 
219 	ret = inflateInit(&strm_decompress);
220 
221 	if (ret != Z_OK)
222 		kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
223 
224 	compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
225 	strm_decompress.next_in = __DECONST(char *, data);
226 
227 	/* XXX supply proper size, subset of device bp */
228 	strm_decompress.avail_in = bytes;
229 	strm_decompress.next_out = compressed_buffer;
230 	strm_decompress.avail_out = bp->b_bufsize;
231 
232 	ret = inflate(&strm_decompress, Z_FINISH);
233 	if (ret != Z_STREAM_END) {
234 		kprintf("HAMMER2 ZLIB: Fatal error during decompression.\n");
235 		bzero(compressed_buffer, bp->b_bufsize);
236 	}
237 	bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
238 	result = bp->b_bufsize - strm_decompress.avail_out;
239 	if (result < bp->b_bufsize)
240 		bzero((char *)bp->b_data + result, strm_decompress.avail_out);
241 	objcache_put(cache_buffer_read, compressed_buffer);
242 	ret = inflateEnd(&strm_decompress);
243 
244 	bp->b_resid = 0;
245 	bp->b_flags |= B_AGE;
246 }
247 #endif
248 
249 /*
250  * Logical buffer I/O, async read.
251  */
252 static
253 int
254 hammer2_strategy_read(struct vop_strategy_args *ap)
255 {
256 	hammer2_xop_strategy_t *xop;
257 	struct bio *bio;
258 	hammer2_inode_t *ip;
259 	hammer2_key_t lbase;
260 
261 	bio = ap->a_bio;
262 	ip = VTOI(ap->a_vp);
263 
264 	lbase = bio->bio_offset;
265 	KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
266 
267 	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_STRATEGY);
268 	xop->finished = 0;
269 	xop->bio = bio;
270 	xop->lbase = lbase;
271 	hammer2_mtx_init(&xop->lock, "h2bior");
272 	hammer2_xop_start(&xop->head, &hammer2_strategy_read_desc);
273 	/* asynchronous completion */
274 
275 	return(0);
276 }
277 
278 /*
279  * Per-node XOP (threaded), do a synchronous lookup of the chain and
280  * its data.  The frontend is asynchronous, so we are also responsible
281  * for racing to terminate the frontend.
282  */
283 void
284 hammer2_xop_strategy_read(hammer2_xop_t *arg, void *scratch, int clindex)
285 {
286 	assert(0);
287 #if 0
288 	hammer2_xop_strategy_t *xop = &arg->xop_strategy;
289 	hammer2_chain_t *parent;
290 	hammer2_chain_t *chain;
291 	hammer2_chain_t *focus;
292 	hammer2_key_t key_dummy;
293 	hammer2_key_t lbase;
294 	struct bio *bio;
295 	struct m_buf *bp;
296 	const char *data;
297 	int error;
298 
299 	/*
300 	 * Note that we can race completion of the bio supplied by
301 	 * the front-end so we cannot access it until we determine
302 	 * that we are the ones finishing it up.
303 	 */
304 	lbase = xop->lbase;
305 
306 	/*
307 	 * This is difficult to optimize.  The logical buffer might be
308 	 * partially dirty (contain dummy zero-fill pages), which would
309 	 * mess up our crc calculation if we were to try a direct read.
310 	 * So for now we always double-buffer through the underlying
311 	 * storage.
312 	 *
313 	 * If not for the above problem we could conditionalize on
314 	 * (1) 64KB buffer, (2) one chain (not multi-master) and
315 	 * (3) !hammer2_double_buffer, and issue a direct read into the
316 	 * logical buffer.
317 	 */
318 	parent = hammer2_inode_chain(xop->head.ip1, clindex,
319 				     HAMMER2_RESOLVE_ALWAYS |
320 				     HAMMER2_RESOLVE_SHARED);
321 	if (parent) {
322 		chain = hammer2_chain_lookup(&parent, &key_dummy,
323 					     lbase, lbase,
324 					     &error,
325 					     HAMMER2_LOOKUP_ALWAYS |
326 					     HAMMER2_LOOKUP_SHARED);
327 		if (chain)
328 			error = chain->error;
329 	} else {
330 		error = HAMMER2_ERROR_EIO;
331 		chain = NULL;
332 	}
333 	error = hammer2_xop_feed(&xop->head, chain, clindex, error);
334 	if (chain) {
335 		hammer2_chain_unlock(chain);
336 		hammer2_chain_drop(chain);
337 	}
338 	if (parent) {
339 		hammer2_chain_unlock(parent);
340 		hammer2_chain_drop(parent);
341 	}
342 	chain = NULL;	/* safety */
343 	parent = NULL;	/* safety */
344 
345 	/*
346 	 * Race to finish the frontend.  First-to-complete.  bio is only
347 	 * valid if we are determined to be the ones able to complete
348 	 * the operation.
349 	 */
350 	if (xop->finished)
351 		return;
352 	hammer2_mtx_ex(&xop->lock);
353 	if (xop->finished) {
354 		hammer2_mtx_unlock(&xop->lock);
355 		return;
356 	}
357 	bio = xop->bio;
358 	bp = bio->bio_buf;
359 	bkvasync(bp);
360 
361 	/*
362 	 * Async operation has not completed and we now own the lock.
363 	 * Determine if we can complete the operation by issuing the
364 	 * frontend collection non-blocking.
365 	 *
366 	 * H2 double-buffers the data, setting B_NOTMETA on the logical
367 	 * buffer hints to the OS that the logical buffer should not be
368 	 * swapcached (since the device buffer can be).
369 	 *
370 	 * Also note that even for compressed data we would rather the
371 	 * kernel cache/swapcache device buffers more and (decompressed)
372 	 * logical buffers less, since that will significantly improve
373 	 * the amount of end-user data that can be cached.
374 	 *
375 	 * NOTE: The chain->data for xop->head.cluster.focus will be
376 	 *	 synchronized to the current cpu by xop_collect(),
377 	 *	 but other chains in the cluster might not be.
378 	 */
379 	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
380 
381 	switch(error) {
382 	case 0:
383 		xop->finished = 1;
384 		hammer2_mtx_unlock(&xop->lock);
385 		bp->b_flags |= B_NOTMETA;
386 		focus = xop->head.cluster.focus;
387 		data = hammer2_xop_gdata(&xop->head)->buf;
388 		hammer2_strategy_read_completion(focus, data, xop->bio);
389 		hammer2_xop_pdata(&xop->head);
390 		biodone(bio);
391 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
392 		break;
393 	case HAMMER2_ERROR_ENOENT:
394 		xop->finished = 1;
395 		hammer2_mtx_unlock(&xop->lock);
396 		bp->b_flags |= B_NOTMETA;
397 		bp->b_resid = 0;
398 		bp->b_error = 0;
399 		bzero(bp->b_data, bp->b_bcount);
400 		biodone(bio);
401 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
402 		break;
403 	case HAMMER2_ERROR_EINPROGRESS:
404 		hammer2_mtx_unlock(&xop->lock);
405 		break;
406 	default:
407 		kprintf("xop_strategy_read: error %08x loff=%016jx\n",
408 			error, (intmax_t)bp->b_loffset);
409 		xop->finished = 1;
410 		hammer2_mtx_unlock(&xop->lock);
411 		bp->b_flags |= B_ERROR;
412 		bp->b_error = EIO;
413 		biodone(bio);
414 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
415 		break;
416 	}
417 #endif
418 }
419 
420 #if 0
421 static
422 void
423 hammer2_strategy_read_completion(hammer2_chain_t *focus, const char *data,
424 				 struct bio *bio)
425 {
426 	struct m_buf *bp = bio->bio_buf;
427 
428 	if (focus->bref.type == HAMMER2_BREF_TYPE_INODE) {
429 		/*
430 		 * Copy from in-memory inode structure.
431 		 */
432 		bcopy(((const hammer2_inode_data_t *)data)->u.data,
433 		      bp->b_data, HAMMER2_EMBEDDED_BYTES);
434 		bzero((char *)bp->b_data + HAMMER2_EMBEDDED_BYTES,
435 		      bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
436 		bp->b_resid = 0;
437 		bp->b_error = 0;
438 	} else if (focus->bref.type == HAMMER2_BREF_TYPE_DATA) {
439 		/*
440 		 * Data is on-media, record for live dedup.  Release the
441 		 * chain (try to free it) when done.  The data is still
442 		 * cached by both the buffer cache in front and the
443 		 * block device behind us.  This leaves more room in the
444 		 * LRU chain cache for meta-data chains which we really
445 		 * want to retain.
446 		 *
447 		 * NOTE: Deduplication cannot be safely recorded for
448 		 *	 records without a check code.
449 		 */
450 		hammer2_dedup_record(focus, NULL, data);
451 		atomic_set_int(&focus->flags, HAMMER2_CHAIN_RELEASE);
452 
453 		/*
454 		 * Decompression and copy.
455 		 */
456 		switch (HAMMER2_DEC_COMP(focus->bref.methods)) {
457 		case HAMMER2_COMP_LZ4:
458 			hammer2_decompress_LZ4_callback(data, focus->bytes,
459 							bio);
460 			/* b_resid set by call */
461 			break;
462 		case HAMMER2_COMP_ZLIB:
463 			hammer2_decompress_ZLIB_callback(data, focus->bytes,
464 							 bio);
465 			/* b_resid set by call */
466 			break;
467 		case HAMMER2_COMP_NONE:
468 			KKASSERT(focus->bytes <= bp->b_bcount);
469 			bcopy(data, bp->b_data, focus->bytes);
470 			if (focus->bytes < bp->b_bcount) {
471 				bzero((char *)bp->b_data + focus->bytes,
472 				      bp->b_bcount - focus->bytes);
473 			}
474 			bp->b_resid = 0;
475 			bp->b_error = 0;
476 			break;
477 		default:
478 			panic("hammer2_strategy_read_completion: "
479 			      "unknown compression type");
480 		}
481 	} else {
482 		panic("hammer2_strategy_read_completion: unknown bref type");
483 	}
484 }
485 #endif
486 
487 /****************************************************************************
488  *				WRITE SUPPORT				    *
489  ****************************************************************************/
490 
491 /*
492  * Functions for compression in threads,
493  * from hammer2_vnops.c
494  */
495 static void hammer2_write_file_core(char *data, hammer2_inode_t *ip,
496 				hammer2_chain_t **parentp,
497 				hammer2_key_t lbase, int ioflag, int pblksize,
498 				hammer2_tid_t mtid, int *errorp);
499 static void hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
500 				hammer2_chain_t **parentp,
501 				hammer2_key_t lbase, int ioflag, int pblksize,
502 				hammer2_tid_t mtid, int *errorp,
503 				int comp_algo, int check_algo);
504 static void hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
505 				hammer2_chain_t **parentp,
506 				hammer2_key_t lbase, int ioflag, int pblksize,
507 				hammer2_tid_t mtid, int *errorp,
508 				int check_algo);
509 static int test_block_zeros(const char *buf, size_t bytes);
510 static void zero_write(char *data, hammer2_inode_t *ip,
511 				hammer2_chain_t **parentp,
512 				hammer2_key_t lbase,
513 				hammer2_tid_t mtid, int *errorp);
514 static void hammer2_write_bp(hammer2_chain_t *chain, char *data,
515 				int ioflag, int pblksize,
516 				hammer2_tid_t mtid, int *errorp,
517 				int check_algo);
518 
519 int
520 hammer2_strategy_write(struct vop_strategy_args *ap)
521 {
522 	hammer2_xop_strategy_t *xop;
523 	hammer2_pfs_t *pmp;
524 	struct bio *bio;
525 	hammer2_inode_t *ip;
526 
527 	bio = ap->a_bio;
528 	ip = VTOI(ap->a_vp);
529 	pmp = ip->pmp;
530 
531 	atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
532 	hammer2_lwinprog_ref(pmp);
533 	hammer2_trans_assert_strategy(pmp);
534 	hammer2_trans_init(pmp, HAMMER2_TRANS_BUFCACHE);
535 
536 	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING |
537 				    HAMMER2_XOP_STRATEGY);
538 	xop->finished = 0;
539 	xop->bio = bio;
540 	xop->lbase = bio->bio_offset;
541 	hammer2_mtx_init(&xop->lock, "h2biow");
542 	hammer2_xop_start(&xop->head, &hammer2_strategy_write_desc);
543 	/* asynchronous completion */
544 
545 	hammer2_lwinprog_wait(pmp, hammer2_flush_pipe);
546 
547 	return(0);
548 }
549 
550 /*
551  * Per-node XOP (threaded).  Write the logical buffer to the media.
552  *
553  * This is a bit problematic because there may be multiple target and
554  * any of them may be able to release the bp.  In addition, if our
555  * particulr target is offline we don't want to block the bp (and thus
556  * the frontend).  To accomplish this we copy the data to the per-thr
557  * scratch buffer.
558  */
559 void
560 hammer2_xop_strategy_write(hammer2_xop_t *arg, void *scratch, int clindex)
561 {
562 	hammer2_xop_strategy_t *xop = &arg->xop_strategy;
563 	hammer2_chain_t *parent;
564 	hammer2_key_t lbase;
565 	hammer2_inode_t *ip;
566 	struct bio *bio;
567 	struct m_buf *bp;
568 	int error;
569 	int lblksize;
570 	int pblksize;
571 	char *bio_data;
572 
573 	/*
574 	 * We can only access the bp/bio if the frontend has not yet
575 	 * completed.
576 	 */
577 	if (xop->finished)
578 		return;
579 	hammer2_mtx_sh(&xop->lock);
580 	if (xop->finished) {
581 		hammer2_mtx_unlock(&xop->lock);
582 		return;
583 	}
584 
585 	lbase = xop->lbase;
586 	bio = xop->bio;			/* ephermal */
587 	bp = bio->bio_buf;		/* ephermal */
588 	ip = xop->head.ip1;		/* retained by ref */
589 	bio_data = scratch;
590 
591 	/* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */
592 
593 	lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL);
594 	pblksize = hammer2_calc_physical(ip, lbase);
595 	bkvasync(bp);
596 	KKASSERT(lblksize <= MAXPHYS);
597 	bcopy(bp->b_data, bio_data, lblksize);
598 
599 	hammer2_mtx_unlock(&xop->lock);
600 	bp = NULL;	/* safety, illegal to access after unlock */
601 	bio = NULL;	/* safety, illegal to access after unlock */
602 
603 	/*
604 	 * Actual operation
605 	 */
606 	parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
607 	hammer2_write_file_core(bio_data, ip, &parent,
608 				lbase, IO_ASYNC, pblksize,
609 				xop->head.mtid, &error);
610 	if (parent) {
611 		hammer2_chain_unlock(parent);
612 		hammer2_chain_drop(parent);
613 		parent = NULL;	/* safety */
614 	}
615 	hammer2_xop_feed(&xop->head, NULL, clindex, error);
616 
617 	/*
618 	 * Try to complete the operation on behalf of the front-end.
619 	 */
620 	if (xop->finished)
621 		return;
622 	hammer2_mtx_ex(&xop->lock);
623 	if (xop->finished) {
624 		hammer2_mtx_unlock(&xop->lock);
625 		return;
626 	}
627 
628 	/*
629 	 * Async operation has not completed and we now own the lock.
630 	 * Determine if we can complete the operation by issuing the
631 	 * frontend collection non-blocking.
632 	 *
633 	 * H2 double-buffers the data, setting B_NOTMETA on the logical
634 	 * buffer hints to the OS that the logical buffer should not be
635 	 * swapcached (since the device buffer can be).
636 	 */
637 	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
638 
639 	if (error == HAMMER2_ERROR_EINPROGRESS) {
640 		hammer2_mtx_unlock(&xop->lock);
641 		return;
642 	}
643 
644 	/*
645 	 * Async operation has completed.
646 	 */
647 	xop->finished = 1;
648 	hammer2_mtx_unlock(&xop->lock);
649 
650 	bio = xop->bio;		/* now owned by us */
651 	bp = bio->bio_buf;	/* now owned by us */
652 
653 	if (error == HAMMER2_ERROR_ENOENT || error == 0) {
654 		/*
655 		bp->b_flags |= B_NOTMETA;
656 		bp->b_resid = 0;
657 		bp->b_error = 0;
658 		biodone(bio);
659 		*/
660 	} else {
661 		kprintf("xop_strategy_write: error %d loff=%016jx\n",
662 			error, (intmax_t)bp->b_loffset);
663 		assert(0);
664 		/*
665 		bp->b_flags |= B_ERROR;
666 		bp->b_error = EIO;
667 		biodone(bio);
668 		*/
669 	}
670 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
671 	hammer2_trans_assert_strategy(ip->pmp);
672 	hammer2_lwinprog_drop(ip->pmp);
673 	hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE);
674 }
675 
676 /*
677  * Wait for pending I/O to complete
678  */
679 void
680 hammer2_bioq_sync(hammer2_pfs_t *pmp)
681 {
682 	hammer2_lwinprog_wait(pmp, 0);
683 }
684 
685 /*
686  * Assign physical storage at (cparent, lbase), returning a suitable chain
687  * and setting *errorp appropriately.
688  *
689  * If no error occurs, the returned chain will be in a modified state.
690  *
691  * If an error occurs, the returned chain may or may not be NULL.  If
692  * not-null any chain->error (if not 0) will also be rolled up into *errorp.
693  * So the caller only needs to test *errorp.
694  *
695  * cparent can wind up being anything.
696  *
697  * If datap is not NULL, *datap points to the real data we intend to write.
698  * If we can dedup the storage location we set *datap to NULL to indicate
699  * to the caller that a dedup occurred.
700  *
701  * NOTE: Special case for data embedded in inode.
702  */
703 static
704 hammer2_chain_t *
705 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp,
706 			hammer2_key_t lbase, int pblksize,
707 			hammer2_tid_t mtid, char **datap, int *errorp)
708 {
709 	hammer2_chain_t *chain;
710 	hammer2_key_t key_dummy;
711 	hammer2_off_t dedup_off;
712 	int pradix = hammer2_getradix(pblksize);
713 
714 	/*
715 	 * Locate the chain associated with lbase, return a locked chain.
716 	 * However, do not instantiate any data reference (which utilizes a
717 	 * device buffer) because we will be using direct IO via the
718 	 * logical buffer cache buffer.
719 	 */
720 	KKASSERT(pblksize >= HAMMER2_ALLOC_MIN);
721 
722 	chain = hammer2_chain_lookup(parentp, &key_dummy,
723 				     lbase, lbase,
724 				     errorp,
725 				     HAMMER2_LOOKUP_NODATA);
726 
727 	/*
728 	 * The lookup code should not return a DELETED chain to us, unless
729 	 * its a short-file embedded in the inode.  Then it is possible for
730 	 * the lookup to return a deleted inode.
731 	 */
732 	if (chain && (chain->flags & HAMMER2_CHAIN_DELETED) &&
733 	    chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
734 		kprintf("assign physical deleted chain @ "
735 			"%016jx (%016jx.%02x) ip %016jx\n",
736 			lbase, chain->bref.data_off, chain->bref.type,
737 			ip->meta.inum);
738 		Debugger("bleh");
739 	}
740 
741 	if (chain == NULL) {
742 		/*
743 		 * We found a hole, create a new chain entry.
744 		 *
745 		 * NOTE: DATA chains are created without device backing
746 		 *	 store (nor do we want any).
747 		 */
748 		dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap,
749 						 pblksize);
750 		*errorp |= hammer2_chain_create(parentp, &chain, NULL, ip->pmp,
751 				       HAMMER2_ENC_CHECK(ip->meta.check_algo) |
752 				       HAMMER2_ENC_COMP(HAMMER2_COMP_NONE),
753 					        lbase, HAMMER2_PBUFRADIX,
754 					        HAMMER2_BREF_TYPE_DATA,
755 					        pblksize, mtid,
756 					        dedup_off, 0);
757 		if (chain == NULL)
758 			goto failed;
759 		/*ip->delta_dcount += pblksize;*/
760 	} else if (chain->error == 0) {
761 		switch (chain->bref.type) {
762 		case HAMMER2_BREF_TYPE_INODE:
763 			/*
764 			 * The data is embedded in the inode, which requires
765 			 * a bit more finess.
766 			 */
767 			*errorp |= hammer2_chain_modify_ip(ip, chain, mtid, 0);
768 			break;
769 		case HAMMER2_BREF_TYPE_DATA:
770 			dedup_off = hammer2_dedup_lookup(chain->hmp, datap,
771 							 pblksize);
772 			if (chain->bytes != pblksize) {
773 				*errorp |= hammer2_chain_resize(chain,
774 						     mtid, dedup_off,
775 						     pradix,
776 						     HAMMER2_MODIFY_OPTDATA);
777 				if (*errorp)
778 					break;
779 			}
780 
781 			/*
782 			 * DATA buffers must be marked modified whether the
783 			 * data is in a logical buffer or not.  We also have
784 			 * to make this call to fixup the chain data pointers
785 			 * after resizing in case this is an encrypted or
786 			 * compressed buffer.
787 			 */
788 			*errorp |= hammer2_chain_modify(chain, mtid, dedup_off,
789 						        HAMMER2_MODIFY_OPTDATA);
790 			break;
791 		default:
792 			panic("hammer2_assign_physical: bad type");
793 			/* NOT REACHED */
794 			break;
795 		}
796 	} else {
797 		*errorp = chain->error;
798 	}
799 	atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
800 failed:
801 	return (chain);
802 }
803 
804 /*
805  * hammer2_write_file_core()
806  *
807  * The core write function which determines which path to take
808  * depending on compression settings.  We also have to locate the
809  * related chains so we can calculate and set the check data for
810  * the blockref.
811  */
812 static
813 void
814 hammer2_write_file_core(char *data, hammer2_inode_t *ip,
815 			hammer2_chain_t **parentp,
816 			hammer2_key_t lbase, int ioflag, int pblksize,
817 			hammer2_tid_t mtid, int *errorp)
818 {
819 	hammer2_chain_t *chain;
820 	char *bdata;
821 
822 	*errorp = 0;
823 
824 	switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) {
825 	case HAMMER2_COMP_NONE:
826 		/*
827 		 * We have to assign physical storage to the buffer
828 		 * we intend to dirty or write now to avoid deadlocks
829 		 * in the strategy code later.
830 		 *
831 		 * This can return NOOFFSET for inode-embedded data.
832 		 * The strategy code will take care of it in that case.
833 		 */
834 		bdata = data;
835 		chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
836 						mtid, &bdata, errorp);
837 		if (*errorp) {
838 			/* skip modifications */
839 		} else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
840 			hammer2_inode_data_t *wipdata;
841 
842 			wipdata = &chain->data->ipdata;
843 			KKASSERT(wipdata->meta.op_flags &
844 				 HAMMER2_OPFLAG_DIRECTDATA);
845 			bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
846 			++hammer2_iod_file_wembed;
847 		} else if (bdata == NULL) {
848 			/*
849 			 * Copy of data already present on-media.
850 			 */
851 			chain->bref.methods =
852 				HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
853 				HAMMER2_ENC_CHECK(ip->meta.check_algo);
854 			hammer2_chain_setcheck(chain, data);
855 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
856 		} else {
857 			hammer2_write_bp(chain, data, ioflag, pblksize,
858 					 mtid, errorp, ip->meta.check_algo);
859 		}
860 		if (chain) {
861 			hammer2_chain_unlock(chain);
862 			hammer2_chain_drop(chain);
863 		}
864 		break;
865 	case HAMMER2_COMP_AUTOZERO:
866 		/*
867 		 * Check for zero-fill only
868 		 */
869 		hammer2_zero_check_and_write(data, ip, parentp,
870 					     lbase, ioflag, pblksize,
871 					     mtid, errorp,
872 					     ip->meta.check_algo);
873 		break;
874 	case HAMMER2_COMP_LZ4:
875 	case HAMMER2_COMP_ZLIB:
876 	default:
877 		/*
878 		 * Check for zero-fill and attempt compression.
879 		 */
880 		hammer2_compress_and_write(data, ip, parentp,
881 					   lbase, ioflag, pblksize,
882 					   mtid, errorp,
883 					   ip->meta.comp_algo,
884 					   ip->meta.check_algo);
885 		break;
886 	}
887 }
888 
889 /*
890  * Helper
891  *
892  * Generic function that will perform the compression in compression
893  * write path. The compression algorithm is determined by the settings
894  * obtained from inode.
895  */
896 static
897 void
898 hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
899 	hammer2_chain_t **parentp,
900 	hammer2_key_t lbase, int ioflag, int pblksize,
901 	hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo)
902 {
903 	hammer2_chain_t *chain;
904 	int comp_size;
905 	int comp_block_size;
906 	char *comp_buffer;
907 	char *bdata;
908 
909 	/*
910 	 * An all-zeros write creates a hole unless the check code
911 	 * is disabled.  When the check code is disabled all writes
912 	 * are done in-place, including any all-zeros writes.
913 	 *
914 	 * NOTE: A snapshot will still force a copy-on-write
915 	 *	 (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
916 	 */
917 	if (check_algo != HAMMER2_CHECK_NONE &&
918 	    test_block_zeros(data, pblksize)) {
919 		zero_write(data, ip, parentp, lbase, mtid, errorp);
920 		return;
921 	}
922 
923 	/*
924 	 * Compression requested.  Try to compress the block.  We store
925 	 * the data normally if we cannot sufficiently compress it.
926 	 *
927 	 * We have a heuristic to detect files which are mostly
928 	 * uncompressable and avoid the compression attempt in that
929 	 * case.  If the compression heuristic is turned off, we always
930 	 * try to compress.
931 	 */
932 	comp_size = 0;
933 	comp_buffer = NULL;
934 
935 	KKASSERT(pblksize / 2 <= 32768);
936 
937 	if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0 ||
938 	    hammer2_always_compress) {
939 		z_stream strm_compress;
940 		int comp_level;
941 		int ret;
942 
943 		switch(HAMMER2_DEC_ALGO(comp_algo)) {
944 		case HAMMER2_COMP_LZ4:
945 			/*
946 			 * We need to prefix with the size, LZ4
947 			 * doesn't do it for us.  Add the related
948 			 * overhead.
949 			 *
950 			 * NOTE: The LZ4 code seems to assume at least an
951 			 *	 8-byte buffer size granularity and may
952 			 *	 overrun the buffer if given a 4-byte
953 			 *	 granularity.
954 			 */
955 			comp_buffer = ecalloc(1, 32768);
956 			comp_size = LZ4_compress_limitedOutput(
957 					data,
958 					&comp_buffer[sizeof(int)],
959 					pblksize,
960 					pblksize / 2 - sizeof(int64_t));
961 			*(int *)comp_buffer = comp_size;
962 			if (comp_size)
963 				comp_size += sizeof(int);
964 			break;
965 		case HAMMER2_COMP_ZLIB:
966 			comp_level = HAMMER2_DEC_LEVEL(comp_algo);
967 			if (comp_level == 0)
968 				comp_level = 6;	/* default zlib compression */
969 			else if (comp_level < 6)
970 				comp_level = 6;
971 			else if (comp_level > 9)
972 				comp_level = 9;
973 			ret = deflateInit(&strm_compress, comp_level);
974 			if (ret != Z_OK) {
975 				kprintf("HAMMER2 ZLIB: fatal error "
976 					"on deflateInit.\n");
977 			}
978 
979 			comp_buffer = ecalloc(1, 32768);
980 			strm_compress.next_in = data;
981 			strm_compress.avail_in = pblksize;
982 			strm_compress.next_out = comp_buffer;
983 			strm_compress.avail_out = pblksize / 2;
984 			ret = deflate(&strm_compress, Z_FINISH);
985 			if (ret == Z_STREAM_END) {
986 				comp_size = pblksize / 2 -
987 					    strm_compress.avail_out;
988 			} else {
989 				comp_size = 0;
990 			}
991 			ret = deflateEnd(&strm_compress);
992 			break;
993 		default:
994 			kprintf("Error: Unknown compression method.\n");
995 			kprintf("Comp_method = %d.\n", comp_algo);
996 			break;
997 		}
998 	}
999 
1000 	if (comp_size == 0) {
1001 		/*
1002 		 * compression failed or turned off
1003 		 */
1004 		comp_block_size = pblksize;	/* safety */
1005 		if (++ip->comp_heuristic > 128)
1006 			ip->comp_heuristic = 8;
1007 	} else {
1008 		/*
1009 		 * compression succeeded
1010 		 */
1011 		ip->comp_heuristic = 0;
1012 		if (comp_size <= 1024) {
1013 			comp_block_size = 1024;
1014 		} else if (comp_size <= 2048) {
1015 			comp_block_size = 2048;
1016 		} else if (comp_size <= 4096) {
1017 			comp_block_size = 4096;
1018 		} else if (comp_size <= 8192) {
1019 			comp_block_size = 8192;
1020 		} else if (comp_size <= 16384) {
1021 			comp_block_size = 16384;
1022 		} else if (comp_size <= 32768) {
1023 			comp_block_size = 32768;
1024 		} else {
1025 			panic("hammer2: WRITE PATH: "
1026 			      "Weird comp_size value.");
1027 			/* NOT REACHED */
1028 			comp_block_size = pblksize;
1029 		}
1030 
1031 		/*
1032 		 * Must zero the remainder or dedup (which operates on a
1033 		 * physical block basis) will not find matches.
1034 		 */
1035 		if (comp_size < comp_block_size) {
1036 			bzero(comp_buffer + comp_size,
1037 			      comp_block_size - comp_size);
1038 		}
1039 	}
1040 
1041 	/*
1042 	 * Assign physical storage, bdata will be set to NULL if a live-dedup
1043 	 * was successful.
1044 	 */
1045 	bdata = comp_size ? comp_buffer : data;
1046 	chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size,
1047 					mtid, &bdata, errorp);
1048 
1049 	if (*errorp) {
1050 		goto done;
1051 	}
1052 
1053 	if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1054 		hammer2_inode_data_t *wipdata;
1055 
1056 		*errorp = hammer2_chain_modify_ip(ip, chain, mtid, 0);
1057 		if (*errorp == 0) {
1058 			wipdata = &chain->data->ipdata;
1059 			KKASSERT(wipdata->meta.op_flags &
1060 				 HAMMER2_OPFLAG_DIRECTDATA);
1061 			bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1062 			++hammer2_iod_file_wembed;
1063 		}
1064 	} else if (bdata == NULL) {
1065 		/*
1066 		 * Live deduplication, a copy of the data is already present
1067 		 * on the media.
1068 		 */
1069 		if (comp_size) {
1070 			chain->bref.methods =
1071 				HAMMER2_ENC_COMP(comp_algo) +
1072 				HAMMER2_ENC_CHECK(check_algo);
1073 		} else {
1074 			chain->bref.methods =
1075 				HAMMER2_ENC_COMP(
1076 					HAMMER2_COMP_NONE) +
1077 				HAMMER2_ENC_CHECK(check_algo);
1078 		}
1079 		bdata = comp_size ? comp_buffer : data;
1080 		hammer2_chain_setcheck(chain, bdata);
1081 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1082 	} else {
1083 		hammer2_io_t *dio;
1084 
1085 		KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1086 
1087 		switch(chain->bref.type) {
1088 		case HAMMER2_BREF_TYPE_INODE:
1089 			panic("hammer2_compress_and_write: unexpected inode\n");
1090 			break;
1091 		case HAMMER2_BREF_TYPE_DATA:
1092 			/*
1093 			 * Optimize out the read-before-write
1094 			 * if possible.
1095 			 */
1096 			*errorp = hammer2_io_newnz(chain->hmp,
1097 						   chain->bref.type,
1098 						   chain->bref.data_off,
1099 						   chain->bytes,
1100 						   &dio);
1101 			if (*errorp) {
1102 				hammer2_io_brelse(&dio);
1103 				kprintf("hammer2: WRITE PATH: "
1104 					"dbp bread error\n");
1105 				break;
1106 			}
1107 			bdata = hammer2_io_data(dio, chain->bref.data_off);
1108 
1109 			/*
1110 			 * When loading the block make sure we don't
1111 			 * leave garbage after the compressed data.
1112 			 */
1113 			if (comp_size) {
1114 				chain->bref.methods =
1115 					HAMMER2_ENC_COMP(comp_algo) +
1116 					HAMMER2_ENC_CHECK(check_algo);
1117 				bcopy(comp_buffer, bdata, comp_block_size);
1118 			} else {
1119 				chain->bref.methods =
1120 					HAMMER2_ENC_COMP(
1121 						HAMMER2_COMP_NONE) +
1122 					HAMMER2_ENC_CHECK(check_algo);
1123 				bcopy(data, bdata, pblksize);
1124 			}
1125 
1126 			/*
1127 			 * The flush code doesn't calculate check codes for
1128 			 * file data (doing so can result in excessive I/O),
1129 			 * so we do it here.
1130 			 */
1131 			hammer2_chain_setcheck(chain, bdata);
1132 
1133 			/*
1134 			 * Device buffer is now valid, chain is no longer in
1135 			 * the initial state.
1136 			 *
1137 			 * (No blockref table worries with file data)
1138 			 */
1139 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1140 			hammer2_dedup_record(chain, dio, bdata);
1141 
1142 			/* Now write the related bdp. */
1143 			if (ioflag & IO_SYNC) {
1144 				/*
1145 				 * Synchronous I/O requested.
1146 				 */
1147 				hammer2_io_bwrite(&dio);
1148 			/*
1149 			} else if ((ioflag & IO_DIRECT) &&
1150 				   loff + n == pblksize) {
1151 				hammer2_io_bdwrite(&dio);
1152 			*/
1153 			} else if (ioflag & IO_ASYNC) {
1154 				hammer2_io_bawrite(&dio);
1155 			} else {
1156 				hammer2_io_bdwrite(&dio);
1157 			}
1158 			break;
1159 		default:
1160 			panic("hammer2_compress_and_write: bad chain type %d\n",
1161 				chain->bref.type);
1162 			/* NOT REACHED */
1163 			break;
1164 		}
1165 	}
1166 done:
1167 	if (chain) {
1168 		hammer2_chain_unlock(chain);
1169 		hammer2_chain_drop(chain);
1170 	}
1171 	if (comp_buffer)
1172 		free(comp_buffer);
1173 }
1174 
1175 /*
1176  * Helper
1177  *
1178  * Function that performs zero-checking and writing without compression,
1179  * it corresponds to default zero-checking path.
1180  */
1181 static
1182 void
1183 hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
1184 	hammer2_chain_t **parentp,
1185 	hammer2_key_t lbase, int ioflag, int pblksize,
1186 	hammer2_tid_t mtid, int *errorp,
1187 	int check_algo)
1188 {
1189 	hammer2_chain_t *chain;
1190 	char *bdata;
1191 
1192 	if (check_algo != HAMMER2_CHECK_NONE &&
1193 	    test_block_zeros(data, pblksize)) {
1194 		/*
1195 		 * An all-zeros write creates a hole unless the check code
1196 		 * is disabled.  When the check code is disabled all writes
1197 		 * are done in-place, including any all-zeros writes.
1198 		 *
1199 		 * NOTE: A snapshot will still force a copy-on-write
1200 		 *	 (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
1201 		 */
1202 		zero_write(data, ip, parentp, lbase, mtid, errorp);
1203 	} else {
1204 		/*
1205 		 * Normal write (bdata set to NULL if de-duplicated)
1206 		 */
1207 		bdata = data;
1208 		chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
1209 						mtid, &bdata, errorp);
1210 		if (*errorp) {
1211 			/* do nothing */
1212 		} else if (bdata) {
1213 			hammer2_write_bp(chain, data, ioflag, pblksize,
1214 					 mtid, errorp, check_algo);
1215 		} else {
1216 			/* dedup occurred */
1217 			chain->bref.methods =
1218 				HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1219 				HAMMER2_ENC_CHECK(check_algo);
1220 			hammer2_chain_setcheck(chain, data);
1221 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1222 		}
1223 		if (chain) {
1224 			hammer2_chain_unlock(chain);
1225 			hammer2_chain_drop(chain);
1226 		}
1227 	}
1228 }
1229 
1230 /*
1231  * Helper
1232  *
1233  * A function to test whether a block of data contains only zeros,
1234  * returns TRUE (non-zero) if the block is all zeros.
1235  */
1236 static
1237 int
1238 test_block_zeros(const char *buf, size_t bytes)
1239 {
1240 	size_t i;
1241 
1242 	for (i = 0; i < bytes; i += sizeof(long)) {
1243 		if (*(const long *)(buf + i) != 0)
1244 			return (0);
1245 	}
1246 	return (1);
1247 }
1248 
1249 /*
1250  * Helper
1251  *
1252  * Function to "write" a block that contains only zeros.
1253  */
1254 static
1255 void
1256 zero_write(char *data, hammer2_inode_t *ip,
1257 	   hammer2_chain_t **parentp,
1258 	   hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp)
1259 {
1260 	hammer2_chain_t *chain;
1261 	hammer2_key_t key_dummy;
1262 
1263 	chain = hammer2_chain_lookup(parentp, &key_dummy,
1264 				     lbase, lbase,
1265 				     errorp,
1266 				     HAMMER2_LOOKUP_NODATA);
1267 	if (chain) {
1268 		if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1269 			hammer2_inode_data_t *wipdata;
1270 
1271 			if (*errorp == 0) {
1272 				*errorp = hammer2_chain_modify_ip(ip, chain,
1273 								  mtid, 0);
1274 			}
1275 			if (*errorp == 0) {
1276 				wipdata = &chain->data->ipdata;
1277 				KKASSERT(wipdata->meta.op_flags &
1278 					 HAMMER2_OPFLAG_DIRECTDATA);
1279 				bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1280 				++hammer2_iod_file_wembed;
1281 			}
1282 		} else {
1283 			/* chain->error ok for deletion */
1284 			hammer2_chain_delete(*parentp, chain,
1285 					     mtid, HAMMER2_DELETE_PERMANENT);
1286 			++hammer2_iod_file_wzero;
1287 		}
1288 		atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1289 		hammer2_chain_unlock(chain);
1290 		hammer2_chain_drop(chain);
1291 	} else {
1292 		++hammer2_iod_file_wzero;
1293 	}
1294 }
1295 
1296 /*
1297  * Helper
1298  *
1299  * Function to write the data as it is, without performing any sort of
1300  * compression. This function is used in path without compression and
1301  * default zero-checking path.
1302  */
1303 static
1304 void
1305 hammer2_write_bp(hammer2_chain_t *chain, char *data, int ioflag,
1306 		 int pblksize,
1307 		 hammer2_tid_t mtid, int *errorp, int check_algo)
1308 {
1309 	hammer2_inode_data_t *wipdata;
1310 	hammer2_io_t *dio;
1311 	char *bdata;
1312 	int error;
1313 
1314 	error = 0;	/* XXX TODO below */
1315 
1316 	KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1317 
1318 	switch(chain->bref.type) {
1319 	case HAMMER2_BREF_TYPE_INODE:
1320 		wipdata = &chain->data->ipdata;
1321 		KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1322 		bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1323 		error = 0;
1324 		++hammer2_iod_file_wembed;
1325 		break;
1326 	case HAMMER2_BREF_TYPE_DATA:
1327 		error = hammer2_io_newnz(chain->hmp,
1328 					 chain->bref.type,
1329 					 chain->bref.data_off,
1330 					 chain->bytes, &dio);
1331 		if (error) {
1332 			hammer2_io_bqrelse(&dio);
1333 			kprintf("hammer2: WRITE PATH: "
1334 				"dbp bread error\n");
1335 			break;
1336 		}
1337 		bdata = hammer2_io_data(dio, chain->bref.data_off);
1338 
1339 		chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1340 				      HAMMER2_ENC_CHECK(check_algo);
1341 		bcopy(data, bdata, chain->bytes);
1342 
1343 		/*
1344 		 * The flush code doesn't calculate check codes for
1345 		 * file data (doing so can result in excessive I/O),
1346 		 * so we do it here.
1347 		 */
1348 		hammer2_chain_setcheck(chain, bdata);
1349 
1350 		/*
1351 		 * Device buffer is now valid, chain is no longer in
1352 		 * the initial state.
1353 		 *
1354 		 * (No blockref table worries with file data)
1355 		 */
1356 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1357 		hammer2_dedup_record(chain, dio, bdata);
1358 
1359 		if (ioflag & IO_SYNC) {
1360 			/*
1361 			 * Synchronous I/O requested.
1362 			 */
1363 			hammer2_io_bwrite(&dio);
1364 		/*
1365 		} else if ((ioflag & IO_DIRECT) &&
1366 			   loff + n == pblksize) {
1367 			hammer2_io_bdwrite(&dio);
1368 		*/
1369 		} else if (ioflag & IO_ASYNC) {
1370 			hammer2_io_bawrite(&dio);
1371 		} else {
1372 			hammer2_io_bdwrite(&dio);
1373 		}
1374 		break;
1375 	default:
1376 		panic("hammer2_write_bp: bad chain type %d\n",
1377 		      chain->bref.type);
1378 		/* NOT REACHED */
1379 		error = 0;
1380 		break;
1381 	}
1382 	*errorp = error;
1383 }
1384 
1385 /*
1386  * LIVE DEDUP HEURISTICS
1387  *
1388  * Record media and crc information for possible dedup operation.  Note
1389  * that the dedup mask bits must also be set in the related DIO for a dedup
1390  * to be fully validated (which is handled in the freemap allocation code).
1391  *
1392  * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1393  *	    All fields must be loaded into locals and validated.
1394  *
1395  * WARNING! Should only be used for file data and directory entries,
1396  *	    hammer2_chain_modify() only checks for the dedup case on data
1397  *	    chains.  Also, dedup data can only be recorded for committed
1398  *	    chains (so NOT strategy writes which can undergo further
1399  *	    modification after the fact!).
1400  */
1401 void
1402 hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio,
1403 		     const char *data)
1404 {
1405 	hammer2_dev_t *hmp;
1406 	hammer2_dedup_t *dedup;
1407 	uint64_t crc;
1408 	uint64_t mask;
1409 	int best = 0;
1410 	int i;
1411 	int dticks;
1412 
1413 	/*
1414 	 * We can only record a dedup if we have media data to test against.
1415 	 * If dedup is not enabled, return early, which allows a chain to
1416 	 * remain marked MODIFIED (which might have benefits in special
1417 	 * situations, though typically it does not).
1418 	 */
1419 	if (hammer2_dedup_enable == 0)
1420 		return;
1421 	if (dio == NULL) {
1422 		dio = chain->dio;
1423 		if (dio == NULL)
1424 			return;
1425 	}
1426 
1427 	hmp = chain->hmp;
1428 
1429 	switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
1430 	case HAMMER2_CHECK_ISCSI32:
1431 		/*
1432 		 * XXX use the built-in crc (the dedup lookup sequencing
1433 		 * needs to be fixed so the check code is already present
1434 		 * when dedup_lookup is called)
1435 		 */
1436 #if 0
1437 		crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value;
1438 #endif
1439 		crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1440 		break;
1441 	case HAMMER2_CHECK_XXHASH64:
1442 		crc = chain->bref.check.xxhash64.value;
1443 		break;
1444 	case HAMMER2_CHECK_SHA192:
1445 		/*
1446 		 * XXX use the built-in crc (the dedup lookup sequencing
1447 		 * needs to be fixed so the check code is already present
1448 		 * when dedup_lookup is called)
1449 		 */
1450 #if 0
1451 		crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^
1452 		      ((uint64_t *)chain->bref.check.sha192.data)[1] ^
1453 		      ((uint64_t *)chain->bref.check.sha192.data)[2];
1454 #endif
1455 		crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1456 		break;
1457 	default:
1458 		/*
1459 		 * Cannot dedup without a check code
1460 		 *
1461 		 * NOTE: In particular, CHECK_NONE allows a sector to be
1462 		 *	 overwritten without copy-on-write, recording
1463 		 *	 a dedup block for a CHECK_NONE object would be
1464 		 *	 a disaster!
1465 		 */
1466 		return;
1467 	}
1468 
1469 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUPABLE);
1470 
1471 	dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1472 	for (i = 0; i < 4; ++i) {
1473 		if (dedup[i].data_crc == crc) {
1474 			best = i;
1475 			break;
1476 		}
1477 		dticks = (int)(dedup[i].ticks - dedup[best].ticks);
1478 		if (dticks < 0 || dticks > hz * 60 * 30)
1479 			best = i;
1480 	}
1481 	dedup += best;
1482 	if (hammer2_debug & 0x40000) {
1483 		kprintf("REC %04x %016jx %016jx\n",
1484 			(int)(dedup - hmp->heur_dedup),
1485 			crc,
1486 			chain->bref.data_off);
1487 	}
1488 	dedup->ticks = ticks;
1489 	dedup->data_off = chain->bref.data_off;
1490 	dedup->data_crc = crc;
1491 
1492 	/*
1493 	 * Set the valid bits for the dedup only after we know the data
1494 	 * buffer has been updated.  The alloc bits were set (and the valid
1495 	 * bits cleared) when the media was allocated.
1496 	 *
1497 	 * This is done in two stages becuase the bulkfree code can race
1498 	 * the gap between allocation and data population.  Both masks must
1499 	 * be set before a bcmp/dedup operation is able to use the block.
1500 	 */
1501 	mask = hammer2_dedup_mask(dio, chain->bref.data_off, chain->bytes);
1502 	atomic_set_64(&dio->dedup_valid, mask);
1503 
1504 #if 0
1505 	/*
1506 	 * XXX removed. MODIFIED is an integral part of the flush code,
1507 	 * lets not just clear it
1508 	 */
1509 	/*
1510 	 * Once we record the dedup the chain must be marked clean to
1511 	 * prevent reuse of the underlying block.   Remember that this
1512 	 * write occurs when the buffer cache is flushed (i.e. on sync(),
1513 	 * fsync(), filesystem periodic sync, or when the kernel needs to
1514 	 * flush a buffer), and not whenever the user write()s.
1515 	 */
1516 	if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1517 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1518 		atomic_add_long(&hammer2_count_modified_chains, -1);
1519 		if (chain->pmp)
1520 			hammer2_pfs_memory_wakeup(chain->pmp, -1);
1521 	}
1522 #endif
1523 }
1524 
1525 static
1526 hammer2_off_t
1527 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize)
1528 {
1529 	hammer2_dedup_t *dedup;
1530 	hammer2_io_t *dio;
1531 	hammer2_off_t off;
1532 	uint64_t crc;
1533 	uint64_t mask;
1534 	char *data;
1535 	char *dtmp;
1536 	int i;
1537 
1538 	if (hammer2_dedup_enable == 0)
1539 		return 0;
1540 	data = *datap;
1541 	if (data == NULL)
1542 		return 0;
1543 
1544 	/*
1545 	 * XXX use the built-in crc (the dedup lookup sequencing
1546 	 * needs to be fixed so the check code is already present
1547 	 * when dedup_lookup is called)
1548 	 */
1549 	crc = XXH64(data, pblksize, XXH_HAMMER2_SEED);
1550 	dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1551 
1552 	if (hammer2_debug & 0x40000) {
1553 		kprintf("LOC %04x/4 %016jx\n",
1554 			(int)(dedup - hmp->heur_dedup),
1555 			crc);
1556 	}
1557 
1558 	for (i = 0; i < 4; ++i) {
1559 		off = dedup[i].data_off;
1560 		cpu_ccfence();
1561 		if (dedup[i].data_crc != crc)
1562 			continue;
1563 		if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize)
1564 			continue;
1565 		dio = hammer2_io_getquick(hmp, off, pblksize);
1566 		if (dio) {
1567 			dtmp = hammer2_io_data(dio, off),
1568 			mask = hammer2_dedup_mask(dio, off, pblksize);
1569 			if ((dio->dedup_alloc & mask) == mask &&
1570 			    (dio->dedup_valid & mask) == mask &&
1571 			    bcmp(data, dtmp, pblksize) == 0) {
1572 				if (hammer2_debug & 0x40000) {
1573 					kprintf("DEDUP SUCCESS %016jx\n",
1574 						(intmax_t)off);
1575 				}
1576 				hammer2_io_putblk(&dio);
1577 				*datap = NULL;
1578 				dedup[i].ticks = ticks;   /* update use */
1579 				atomic_add_long(&hammer2_iod_file_wdedup,
1580 						pblksize);
1581 
1582 				return off;		/* RETURN */
1583 			}
1584 			hammer2_io_putblk(&dio);
1585 		}
1586 	}
1587 	return 0;
1588 }
1589 
1590 /*
1591  * Poof.  Races are ok, if someone gets in and reuses a dedup offset
1592  * before or while we are clearing it they will also recover the freemap
1593  * entry (set it to fully allocated), so a bulkfree race can only set it
1594  * to a possibly-free state.
1595  *
1596  * XXX ok, well, not really sure races are ok but going to run with it
1597  *     for the moment.
1598  */
1599 void
1600 hammer2_dedup_clear(hammer2_dev_t *hmp)
1601 {
1602 	int i;
1603 
1604 	for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) {
1605 		hmp->heur_dedup[i].data_off = 0;
1606 		hmp->heur_dedup[i].ticks = ticks - 1;
1607 	}
1608 }
1609