1 /*
2  * Copyright (c) 2011-2015 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * This module handles low level logical file I/O (strategy) which backs
38  * the logical buffer cache.
39  *
40  * [De]compression, zero-block, check codes, and buffer cache operations
41  * for file data is handled here.
42  *
43  * Live dedup makes its home here as well.
44  */
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/fcntl.h>
50 #include <sys/buf.h>
51 #include <sys/proc.h>
52 #include <sys/namei.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/mountctl.h>
56 #include <sys/dirent.h>
57 #include <sys/uio.h>
58 #include <sys/objcache.h>
59 #include <sys/event.h>
60 #include <sys/file.h>
61 #include <vfs/fifofs/fifo.h>
62 
63 #include "hammer2.h"
64 #include "hammer2_lz4.h"
65 
66 #include "zlib/hammer2_zlib.h"
67 
68 struct objcache *cache_buffer_read;
69 struct objcache *cache_buffer_write;
70 
71 /*
72  * Strategy code (async logical file buffer I/O from system)
73  *
74  * Except for the transaction init (which should normally not block),
75  * we essentially run the strategy operation asynchronously via a XOP.
76  *
77  * WARNING! The XOP deals with buffer synchronization.  It is not synchronized
78  *	    to the current cpu.
79  *
80  * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync()
81  *     calls but it has in the past when multiple flushes are queued.
82  *
83  * XXX We currently terminate the transaction once we get a quorum, otherwise
84  *     the frontend can stall, but this can leave the remaining nodes with
85  *     a potential flush conflict.  We need to delay flushes on those nodes
86  *     until running transactions complete separately from the normal
87  *     transaction sequencing.  FIXME TODO.
88  */
89 static void hammer2_strategy_xop_read(hammer2_thread_t *thr,
90 				hammer2_xop_t *arg);
91 static void hammer2_strategy_xop_write(hammer2_thread_t *thr,
92 				hammer2_xop_t *arg);
93 static int hammer2_strategy_read(struct vop_strategy_args *ap);
94 static int hammer2_strategy_write(struct vop_strategy_args *ap);
95 static void hammer2_strategy_read_completion(hammer2_chain_t *chain,
96 				char *data, struct bio *bio);
97 
98 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp,
99 			char **datap, int pblksize);
100 
101 int
102 hammer2_vop_strategy(struct vop_strategy_args *ap)
103 {
104 	struct bio *biop;
105 	struct buf *bp;
106 	int error;
107 
108 	biop = ap->a_bio;
109 	bp = biop->bio_buf;
110 
111 	switch(bp->b_cmd) {
112 	case BUF_CMD_READ:
113 		error = hammer2_strategy_read(ap);
114 		++hammer2_iod_file_read;
115 		break;
116 	case BUF_CMD_WRITE:
117 		error = hammer2_strategy_write(ap);
118 		++hammer2_iod_file_write;
119 		break;
120 	default:
121 		bp->b_error = error = EINVAL;
122 		bp->b_flags |= B_ERROR;
123 		biodone(biop);
124 		break;
125 	}
126 	return (error);
127 }
128 
129 /*
130  * Return the largest contiguous physical disk range for the logical
131  * request, in bytes.
132  *
133  * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
134  *
135  * Basically disabled, the logical buffer write thread has to deal with
136  * buffers one-at-a-time.  Note that this should not prevent cluster_read()
137  * from reading-ahead, it simply prevents it from trying form a single
138  * cluster buffer for the logical request.  H2 already uses 64KB buffers!
139  */
140 int
141 hammer2_vop_bmap(struct vop_bmap_args *ap)
142 {
143 	*ap->a_doffsetp = NOOFFSET;
144 	if (ap->a_runp)
145 		*ap->a_runp = 0;
146 	if (ap->a_runb)
147 		*ap->a_runb = 0;
148 	return (EOPNOTSUPP);
149 }
150 
151 /****************************************************************************
152  *				READ SUPPORT				    *
153  ****************************************************************************/
154 /*
155  * Callback used in read path in case that a block is compressed with LZ4.
156  */
157 static
158 void
159 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
160 {
161 	struct buf *bp;
162 	char *compressed_buffer;
163 	int compressed_size;
164 	int result;
165 
166 	bp = bio->bio_buf;
167 
168 #if 0
169 	if bio->bio_caller_info2.index &&
170 	      bio->bio_caller_info1.uvalue32 !=
171 	      crc32(bp->b_data, bp->b_bufsize) --- return error
172 #endif
173 
174 	KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
175 	compressed_size = *(const int *)data;
176 	KKASSERT((uint32_t)compressed_size <= bytes - sizeof(int));
177 
178 	compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
179 	result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
180 				     compressed_buffer,
181 				     compressed_size,
182 				     bp->b_bufsize);
183 	if (result < 0) {
184 		kprintf("READ PATH: Error during decompression."
185 			"bio %016jx/%d\n",
186 			(intmax_t)bio->bio_offset, bytes);
187 		/* make sure it isn't random garbage */
188 		bzero(compressed_buffer, bp->b_bufsize);
189 	}
190 	KKASSERT(result <= bp->b_bufsize);
191 	bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
192 	if (result < bp->b_bufsize)
193 		bzero(bp->b_data + result, bp->b_bufsize - result);
194 	objcache_put(cache_buffer_read, compressed_buffer);
195 	bp->b_resid = 0;
196 	bp->b_flags |= B_AGE;
197 }
198 
199 /*
200  * Callback used in read path in case that a block is compressed with ZLIB.
201  * It is almost identical to LZ4 callback, so in theory they can be unified,
202  * but we didn't want to make changes in bio structure for that.
203  */
204 static
205 void
206 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
207 {
208 	struct buf *bp;
209 	char *compressed_buffer;
210 	z_stream strm_decompress;
211 	int result;
212 	int ret;
213 
214 	bp = bio->bio_buf;
215 
216 	KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
217 	strm_decompress.avail_in = 0;
218 	strm_decompress.next_in = Z_NULL;
219 
220 	ret = inflateInit(&strm_decompress);
221 
222 	if (ret != Z_OK)
223 		kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
224 
225 	compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
226 	strm_decompress.next_in = __DECONST(char *, data);
227 
228 	/* XXX supply proper size, subset of device bp */
229 	strm_decompress.avail_in = bytes;
230 	strm_decompress.next_out = compressed_buffer;
231 	strm_decompress.avail_out = bp->b_bufsize;
232 
233 	ret = inflate(&strm_decompress, Z_FINISH);
234 	if (ret != Z_STREAM_END) {
235 		kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
236 		bzero(compressed_buffer, bp->b_bufsize);
237 	}
238 	bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
239 	result = bp->b_bufsize - strm_decompress.avail_out;
240 	if (result < bp->b_bufsize)
241 		bzero(bp->b_data + result, strm_decompress.avail_out);
242 	objcache_put(cache_buffer_read, compressed_buffer);
243 	ret = inflateEnd(&strm_decompress);
244 
245 	bp->b_resid = 0;
246 	bp->b_flags |= B_AGE;
247 }
248 
249 /*
250  * Logical buffer I/O, async read.
251  */
252 static
253 int
254 hammer2_strategy_read(struct vop_strategy_args *ap)
255 {
256 	hammer2_xop_strategy_t *xop;
257 	struct buf *bp;
258 	struct bio *bio;
259 	struct bio *nbio;
260 	hammer2_inode_t *ip;
261 	hammer2_key_t lbase;
262 
263 	bio = ap->a_bio;
264 	bp = bio->bio_buf;
265 	ip = VTOI(ap->a_vp);
266 	nbio = push_bio(bio);
267 
268 	lbase = bio->bio_offset;
269 	KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
270 
271 	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_STRATEGY);
272 	xop->finished = 0;
273 	xop->bio = bio;
274 	xop->lbase = lbase;
275 	hammer2_mtx_init(&xop->lock, "h2bior");
276 	hammer2_xop_start(&xop->head, hammer2_strategy_xop_read);
277 	/* asynchronous completion */
278 
279 	return(0);
280 }
281 
282 /*
283  * Per-node XOP (threaded), do a synchronous lookup of the chain and
284  * its data.  The frontend is asynchronous, so we are also responsible
285  * for racing to terminate the frontend.
286  */
287 static
288 void
289 hammer2_strategy_xop_read(hammer2_thread_t *thr, hammer2_xop_t *arg)
290 {
291 	hammer2_xop_strategy_t *xop = &arg->xop_strategy;
292 	hammer2_chain_t *parent;
293 	hammer2_chain_t *chain;
294 	hammer2_key_t key_dummy;
295 	hammer2_key_t lbase;
296 	struct bio *bio;
297 	struct buf *bp;
298 	int error;
299 
300 	/*
301 	 * Note that we can race completion of the bio supplied by
302 	 * the front-end so we cannot access it until we determine
303 	 * that we are the ones finishing it up.
304 	 */
305 	lbase = xop->lbase;
306 
307 	/*
308 	 * This is difficult to optimize.  The logical buffer might be
309 	 * partially dirty (contain dummy zero-fill pages), which would
310 	 * mess up our crc calculation if we were to try a direct read.
311 	 * So for now we always double-buffer through the underlying
312 	 * storage.
313 	 *
314 	 * If not for the above problem we could conditionalize on
315 	 * (1) 64KB buffer, (2) one chain (not multi-master) and
316 	 * (3) !hammer2_double_buffer, and issue a direct read into the
317 	 * logical buffer.
318 	 */
319 	parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
320 				     HAMMER2_RESOLVE_ALWAYS |
321 				     HAMMER2_RESOLVE_SHARED);
322 	if (parent) {
323 		chain = hammer2_chain_lookup(&parent, &key_dummy,
324 					     lbase, lbase,
325 					     &error,
326 					     HAMMER2_LOOKUP_ALWAYS |
327 					     HAMMER2_LOOKUP_SHARED);
328 		if (chain)
329 			error = chain->error;
330 	} else {
331 		error = HAMMER2_ERROR_EIO;
332 		chain = NULL;
333 	}
334 	error = hammer2_xop_feed(&xop->head, chain, thr->clindex, error);
335 	if (chain) {
336 		hammer2_chain_unlock(chain);
337 		hammer2_chain_drop(chain);
338 	}
339 	if (parent) {
340 		hammer2_chain_unlock(parent);
341 		hammer2_chain_drop(parent);
342 	}
343 	chain = NULL;	/* safety */
344 	parent = NULL;	/* safety */
345 
346 	/*
347 	 * Race to finish the frontend.  First-to-complete.  bio is only
348 	 * valid if we are determined to be the ones able to complete
349 	 * the operation.
350 	 */
351 	if (xop->finished)
352 		return;
353 	hammer2_mtx_ex(&xop->lock);
354 	if (xop->finished) {
355 		hammer2_mtx_unlock(&xop->lock);
356 		return;
357 	}
358 	bio = xop->bio;
359 	bp = bio->bio_buf;
360 	bkvasync(bp);
361 
362 	/*
363 	 * Async operation has not completed and we now own the lock.
364 	 * Determine if we can complete the operation by issuing the
365 	 * frontend collection non-blocking.
366 	 *
367 	 * H2 double-buffers the data, setting B_NOTMETA on the logical
368 	 * buffer hints to the OS that the logical buffer should not be
369 	 * swapcached (since the device buffer can be).
370 	 *
371 	 * Also note that even for compressed data we would rather the
372 	 * kernel cache/swapcache device buffers more and (decompressed)
373 	 * logical buffers less, since that will significantly improve
374 	 * the amount of end-user data that can be cached.
375 	 */
376 	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
377 
378 	switch(error) {
379 	case 0:
380 		xop->finished = 1;
381 		hammer2_mtx_unlock(&xop->lock);
382 		bp->b_flags |= B_NOTMETA;
383 		chain = xop->head.cluster.focus;
384 		hammer2_strategy_read_completion(chain, (char *)chain->data,
385 						 xop->bio);
386 		biodone(bio);
387 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
388 		break;
389 	case HAMMER2_ERROR_ENOENT:
390 		xop->finished = 1;
391 		hammer2_mtx_unlock(&xop->lock);
392 		bp->b_flags |= B_NOTMETA;
393 		bp->b_resid = 0;
394 		bp->b_error = 0;
395 		bzero(bp->b_data, bp->b_bcount);
396 		biodone(bio);
397 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
398 		break;
399 	case HAMMER2_ERROR_EINPROGRESS:
400 		hammer2_mtx_unlock(&xop->lock);
401 		break;
402 	default:
403 		kprintf("strategy_xop_read: error %08x loff=%016jx\n",
404 			error, bp->b_loffset);
405 		xop->finished = 1;
406 		hammer2_mtx_unlock(&xop->lock);
407 		bp->b_flags |= B_ERROR;
408 		bp->b_error = EIO;
409 		biodone(bio);
410 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
411 		break;
412 	}
413 }
414 
415 static
416 void
417 hammer2_strategy_read_completion(hammer2_chain_t *chain, char *data,
418 				 struct bio *bio)
419 {
420 	struct buf *bp = bio->bio_buf;
421 
422 	if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
423 		/*
424 		 * Copy from in-memory inode structure.
425 		 */
426 		bcopy(((hammer2_inode_data_t *)data)->u.data,
427 		      bp->b_data, HAMMER2_EMBEDDED_BYTES);
428 		bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
429 		      bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
430 		bp->b_resid = 0;
431 		bp->b_error = 0;
432 	} else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
433 		/*
434 		 * Data is on-media, record for live dedup.  Release the
435 		 * chain (try to free it) when done.  The data is still
436 		 * cached by both the buffer cache in front and the
437 		 * block device behind us.  This leaves more room in the
438 		 * LRU chain cache for meta-data chains which we really
439 		 * want to retain.
440 		 *
441 		 * NOTE: Deduplication cannot be safely recorded for
442 		 *	 records without a check code.
443 		 */
444 		hammer2_dedup_record(chain, NULL, data);
445 		atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
446 
447 		/*
448 		 * Decompression and copy.
449 		 */
450 		switch (HAMMER2_DEC_COMP(chain->bref.methods)) {
451 		case HAMMER2_COMP_LZ4:
452 			hammer2_decompress_LZ4_callback(data, chain->bytes,
453 							bio);
454 			/* b_resid set by call */
455 			break;
456 		case HAMMER2_COMP_ZLIB:
457 			hammer2_decompress_ZLIB_callback(data, chain->bytes,
458 							 bio);
459 			/* b_resid set by call */
460 			break;
461 		case HAMMER2_COMP_NONE:
462 			KKASSERT(chain->bytes <= bp->b_bcount);
463 			bcopy(data, bp->b_data, chain->bytes);
464 			if (chain->bytes < bp->b_bcount) {
465 				bzero(bp->b_data + chain->bytes,
466 				      bp->b_bcount - chain->bytes);
467 			}
468 			bp->b_resid = 0;
469 			bp->b_error = 0;
470 			break;
471 		default:
472 			panic("hammer2_strategy_read: "
473 			      "unknown compression type");
474 		}
475 	} else {
476 		panic("hammer2_strategy_read: unknown bref type");
477 	}
478 }
479 
480 /****************************************************************************
481  *				WRITE SUPPORT				    *
482  ****************************************************************************/
483 
484 /*
485  * Functions for compression in threads,
486  * from hammer2_vnops.c
487  */
488 static void hammer2_write_file_core(char *data, hammer2_inode_t *ip,
489 				hammer2_chain_t **parentp,
490 				hammer2_key_t lbase, int ioflag, int pblksize,
491 				hammer2_tid_t mtid, int *errorp);
492 static void hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
493 				hammer2_chain_t **parentp,
494 				hammer2_key_t lbase, int ioflag, int pblksize,
495 				hammer2_tid_t mtid, int *errorp,
496 				int comp_algo, int check_algo);
497 static void hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
498 				hammer2_chain_t **parentp,
499 				hammer2_key_t lbase, int ioflag, int pblksize,
500 				hammer2_tid_t mtid, int *errorp,
501 				int check_algo);
502 static int test_block_zeros(const char *buf, size_t bytes);
503 static void zero_write(char *data, hammer2_inode_t *ip,
504 				hammer2_chain_t **parentp,
505 				hammer2_key_t lbase,
506 				hammer2_tid_t mtid, int *errorp);
507 static void hammer2_write_bp(hammer2_chain_t *chain, char *data,
508 				int ioflag, int pblksize,
509 				hammer2_tid_t mtid, int *errorp,
510 				int check_algo);
511 
512 static
513 int
514 hammer2_strategy_write(struct vop_strategy_args *ap)
515 {
516 	hammer2_xop_strategy_t *xop;
517 	hammer2_pfs_t *pmp;
518 	struct bio *bio;
519 	struct buf *bp;
520 	hammer2_inode_t *ip;
521 
522 	bio = ap->a_bio;
523 	bp = bio->bio_buf;
524 	ip = VTOI(ap->a_vp);
525 	pmp = ip->pmp;
526 
527 	hammer2_lwinprog_ref(pmp);
528 	hammer2_trans_assert_strategy(pmp);
529 	hammer2_trans_init(pmp, HAMMER2_TRANS_BUFCACHE);
530 
531 	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING |
532 				    HAMMER2_XOP_STRATEGY);
533 	xop->finished = 0;
534 	xop->bio = bio;
535 	xop->lbase = bio->bio_offset;
536 	hammer2_mtx_init(&xop->lock, "h2biow");
537 	hammer2_xop_start(&xop->head, hammer2_strategy_xop_write);
538 	/* asynchronous completion */
539 
540 	hammer2_lwinprog_wait(pmp, hammer2_flush_pipe);
541 
542 	return(0);
543 }
544 
545 /*
546  * Per-node XOP (threaded).  Write the logical buffer to the media.
547  *
548  * This is a bit problematic because there may be multiple target and
549  * any of them may be able to release the bp.  In addition, if our
550  * particulr target is offline we don't want to block the bp (and thus
551  * the frontend).  To accomplish this we copy the data to the per-thr
552  * scratch buffer.
553  */
554 static
555 void
556 hammer2_strategy_xop_write(hammer2_thread_t *thr, hammer2_xop_t *arg)
557 {
558 	hammer2_xop_strategy_t *xop = &arg->xop_strategy;
559 	hammer2_chain_t *parent;
560 	hammer2_key_t lbase;
561 	hammer2_inode_t *ip;
562 	struct bio *bio;
563 	struct buf *bp;
564 	int error;
565 	int lblksize;
566 	int pblksize;
567 	hammer2_off_t bio_offset;
568 	char *bio_data;
569 
570 	/*
571 	 * We can only access the bp/bio if the frontend has not yet
572 	 * completed.
573 	 */
574 	if (xop->finished)
575 		return;
576 	hammer2_mtx_sh(&xop->lock);
577 	if (xop->finished) {
578 		hammer2_mtx_unlock(&xop->lock);
579 		return;
580 	}
581 
582 	lbase = xop->lbase;
583 	bio = xop->bio;			/* ephermal */
584 	bp = bio->bio_buf;		/* ephermal */
585 	ip = xop->head.ip1;		/* retained by ref */
586 	bio_offset = bio->bio_offset;
587 	bio_data = thr->scratch;
588 
589 	/* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */
590 
591 	lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL);
592 	pblksize = hammer2_calc_physical(ip, lbase);
593 	bkvasync(bp);
594 	bcopy(bp->b_data, bio_data, lblksize);
595 
596 	hammer2_mtx_unlock(&xop->lock);
597 	bp = NULL;	/* safety, illegal to access after unlock */
598 	bio = NULL;	/* safety, illegal to access after unlock */
599 
600 	/*
601 	 * Actual operation
602 	 */
603 	parent = hammer2_inode_chain(ip, thr->clindex, HAMMER2_RESOLVE_ALWAYS);
604 	hammer2_write_file_core(bio_data, ip, &parent,
605 				lbase, IO_ASYNC, pblksize,
606 				xop->head.mtid, &error);
607 	if (parent) {
608 		hammer2_chain_unlock(parent);
609 		hammer2_chain_drop(parent);
610 		parent = NULL;	/* safety */
611 	}
612 	hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
613 
614 	/*
615 	 * Try to complete the operation on behalf of the front-end.
616 	 */
617 	if (xop->finished)
618 		return;
619 	hammer2_mtx_ex(&xop->lock);
620 	if (xop->finished) {
621 		hammer2_mtx_unlock(&xop->lock);
622 		return;
623 	}
624 
625 	/*
626 	 * Async operation has not completed and we now own the lock.
627 	 * Determine if we can complete the operation by issuing the
628 	 * frontend collection non-blocking.
629 	 *
630 	 * H2 double-buffers the data, setting B_NOTMETA on the logical
631 	 * buffer hints to the OS that the logical buffer should not be
632 	 * swapcached (since the device buffer can be).
633 	 */
634 	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
635 
636 	if (error == HAMMER2_ERROR_EINPROGRESS) {
637 		hammer2_mtx_unlock(&xop->lock);
638 		return;
639 	}
640 
641 	/*
642 	 * Async operation has completed.
643 	 */
644 	xop->finished = 1;
645 	hammer2_mtx_unlock(&xop->lock);
646 
647 	bio = xop->bio;		/* now owned by us */
648 	bp = bio->bio_buf;	/* now owned by us */
649 
650 	if (error == HAMMER2_ERROR_ENOENT || error == 0) {
651 		bp->b_flags |= B_NOTMETA;
652 		bp->b_resid = 0;
653 		bp->b_error = 0;
654 		biodone(bio);
655 	} else {
656 		kprintf("strategy_xop_write: error %d loff=%016jx\n",
657 			error, bp->b_loffset);
658 		bp->b_flags |= B_ERROR;
659 		bp->b_error = EIO;
660 		biodone(bio);
661 	}
662 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
663 	hammer2_trans_assert_strategy(ip->pmp);
664 	hammer2_lwinprog_drop(ip->pmp);
665 	hammer2_trans_done(ip->pmp);
666 }
667 
668 /*
669  * Wait for pending I/O to complete
670  */
671 void
672 hammer2_bioq_sync(hammer2_pfs_t *pmp)
673 {
674 	hammer2_lwinprog_wait(pmp, 0);
675 }
676 
677 /*
678  * Assign physical storage at (cparent, lbase), returning a suitable chain
679  * and setting *errorp appropriately.
680  *
681  * If no error occurs, the returned chain will be in a modified state.
682  *
683  * If an error occurs, the returned chain may or may not be NULL.  If
684  * not-null any chain->error (if not 0) will also be rolled up into *errorp.
685  * So the caller only needs to test *errorp.
686  *
687  * cparent can wind up being anything.
688  *
689  * If datap is not NULL, *datap points to the real data we intend to write.
690  * If we can dedup the storage location we set *datap to NULL to indicate
691  * to the caller that a dedup occurred.
692  *
693  * NOTE: Special case for data embedded in inode.
694  */
695 static
696 hammer2_chain_t *
697 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp,
698 			hammer2_key_t lbase, int pblksize,
699 			hammer2_tid_t mtid, char **datap, int *errorp)
700 {
701 	hammer2_chain_t *chain;
702 	hammer2_key_t key_dummy;
703 	hammer2_off_t dedup_off;
704 	int pradix = hammer2_getradix(pblksize);
705 
706 	/*
707 	 * Locate the chain associated with lbase, return a locked chain.
708 	 * However, do not instantiate any data reference (which utilizes a
709 	 * device buffer) because we will be using direct IO via the
710 	 * logical buffer cache buffer.
711 	 */
712 	KKASSERT(pblksize >= HAMMER2_ALLOC_MIN);
713 
714 	chain = hammer2_chain_lookup(parentp, &key_dummy,
715 				     lbase, lbase,
716 				     errorp,
717 				     HAMMER2_LOOKUP_NODATA);
718 
719 	/*
720 	 * The lookup code should not return a DELETED chain to us, unless
721 	 * its a short-file embedded in the inode.  Then it is possible for
722 	 * the lookup to return a deleted inode.
723 	 */
724 	if (chain && (chain->flags & HAMMER2_CHAIN_DELETED) &&
725 	    chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
726 		kprintf("assign physical deleted chain @ "
727 			"%016jx (%016jx.%02x) ip %016jx\n",
728 			lbase, chain->bref.data_off, chain->bref.type,
729 			ip->meta.inum);
730 		Debugger("bleh");
731 	}
732 
733 	if (chain == NULL) {
734 		/*
735 		 * We found a hole, create a new chain entry.
736 		 *
737 		 * NOTE: DATA chains are created without device backing
738 		 *	 store (nor do we want any).
739 		 */
740 		dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap,
741 						 pblksize);
742 		*errorp |= hammer2_chain_create(parentp, &chain,
743 					        ip->pmp,
744 				       HAMMER2_ENC_CHECK(ip->meta.check_algo) |
745 				       HAMMER2_ENC_COMP(HAMMER2_COMP_NONE),
746 					        lbase, HAMMER2_PBUFRADIX,
747 					        HAMMER2_BREF_TYPE_DATA,
748 					        pblksize, mtid,
749 					        dedup_off, 0);
750 		if (chain == NULL)
751 			goto failed;
752 		/*ip->delta_dcount += pblksize;*/
753 	} else if (chain->error == 0) {
754 		switch (chain->bref.type) {
755 		case HAMMER2_BREF_TYPE_INODE:
756 			/*
757 			 * The data is embedded in the inode, which requires
758 			 * a bit more finess.
759 			 */
760 			*errorp |= hammer2_chain_modify_ip(ip, chain, mtid, 0);
761 			break;
762 		case HAMMER2_BREF_TYPE_DATA:
763 			dedup_off = hammer2_dedup_lookup(chain->hmp, datap,
764 							 pblksize);
765 			if (chain->bytes != pblksize) {
766 				*errorp |= hammer2_chain_resize(chain,
767 						     mtid, dedup_off,
768 						     pradix,
769 						     HAMMER2_MODIFY_OPTDATA);
770 				if (*errorp)
771 					break;
772 			}
773 
774 			/*
775 			 * DATA buffers must be marked modified whether the
776 			 * data is in a logical buffer or not.  We also have
777 			 * to make this call to fixup the chain data pointers
778 			 * after resizing in case this is an encrypted or
779 			 * compressed buffer.
780 			 */
781 			*errorp |= hammer2_chain_modify(chain, mtid, dedup_off,
782 						        HAMMER2_MODIFY_OPTDATA);
783 			break;
784 		default:
785 			panic("hammer2_assign_physical: bad type");
786 			/* NOT REACHED */
787 			break;
788 		}
789 	} else {
790 		*errorp = chain->error;
791 	}
792 failed:
793 	return (chain);
794 }
795 
796 /*
797  * hammer2_write_file_core() - hammer2_write_thread() helper
798  *
799  * The core write function which determines which path to take
800  * depending on compression settings.  We also have to locate the
801  * related chains so we can calculate and set the check data for
802  * the blockref.
803  */
804 static
805 void
806 hammer2_write_file_core(char *data, hammer2_inode_t *ip,
807 			hammer2_chain_t **parentp,
808 			hammer2_key_t lbase, int ioflag, int pblksize,
809 			hammer2_tid_t mtid, int *errorp)
810 {
811 	hammer2_chain_t *chain;
812 	char *bdata;
813 
814 	*errorp = 0;
815 
816 	switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) {
817 	case HAMMER2_COMP_NONE:
818 		/*
819 		 * We have to assign physical storage to the buffer
820 		 * we intend to dirty or write now to avoid deadlocks
821 		 * in the strategy code later.
822 		 *
823 		 * This can return NOOFFSET for inode-embedded data.
824 		 * The strategy code will take care of it in that case.
825 		 */
826 		bdata = data;
827 		chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
828 						mtid, &bdata, errorp);
829 		if (*errorp) {
830 			/* skip modifications */
831 		} else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
832 			hammer2_inode_data_t *wipdata;
833 
834 			wipdata = &chain->data->ipdata;
835 			KKASSERT(wipdata->meta.op_flags &
836 				 HAMMER2_OPFLAG_DIRECTDATA);
837 			bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
838 			++hammer2_iod_file_wembed;
839 		} else if (bdata == NULL) {
840 			/*
841 			 * Copy of data already present on-media.
842 			 */
843 			chain->bref.methods =
844 				HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
845 				HAMMER2_ENC_CHECK(ip->meta.check_algo);
846 			hammer2_chain_setcheck(chain, data);
847 		} else {
848 			hammer2_write_bp(chain, data, ioflag, pblksize,
849 					 mtid, errorp, ip->meta.check_algo);
850 		}
851 		if (chain) {
852 			hammer2_chain_unlock(chain);
853 			hammer2_chain_drop(chain);
854 		}
855 		break;
856 	case HAMMER2_COMP_AUTOZERO:
857 		/*
858 		 * Check for zero-fill only
859 		 */
860 		hammer2_zero_check_and_write(data, ip, parentp,
861 					     lbase, ioflag, pblksize,
862 					     mtid, errorp,
863 					     ip->meta.check_algo);
864 		break;
865 	case HAMMER2_COMP_LZ4:
866 	case HAMMER2_COMP_ZLIB:
867 	default:
868 		/*
869 		 * Check for zero-fill and attempt compression.
870 		 */
871 		hammer2_compress_and_write(data, ip, parentp,
872 					   lbase, ioflag, pblksize,
873 					   mtid, errorp,
874 					   ip->meta.comp_algo,
875 					   ip->meta.check_algo);
876 		break;
877 	}
878 }
879 
880 /*
881  * Helper
882  *
883  * Generic function that will perform the compression in compression
884  * write path. The compression algorithm is determined by the settings
885  * obtained from inode.
886  */
887 static
888 void
889 hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
890 	hammer2_chain_t **parentp,
891 	hammer2_key_t lbase, int ioflag, int pblksize,
892 	hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo)
893 {
894 	hammer2_chain_t *chain;
895 	int comp_size;
896 	int comp_block_size;
897 	char *comp_buffer;
898 	char *bdata;
899 
900 	/*
901 	 * An all-zeros write creates a hole unless the check code
902 	 * is disabled.  When the check code is disabled all writes
903 	 * are done in-place, including any all-zeros writes.
904 	 *
905 	 * NOTE: A snapshot will still force a copy-on-write
906 	 *	 (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
907 	 */
908 	if (check_algo != HAMMER2_CHECK_NONE &&
909 	    test_block_zeros(data, pblksize)) {
910 		zero_write(data, ip, parentp, lbase, mtid, errorp);
911 		return;
912 	}
913 
914 	/*
915 	 * Compression requested.  Try to compress the block.  We store
916 	 * the data normally if we cannot sufficiently compress it.
917 	 *
918 	 * We have a heuristic to detect files which are mostly
919 	 * uncompressable and avoid the compression attempt in that
920 	 * case.  If the compression heuristic is turned off, we always
921 	 * try to compress.
922 	 */
923 	comp_size = 0;
924 	comp_buffer = NULL;
925 
926 	KKASSERT(pblksize / 2 <= 32768);
927 
928 	if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0 ||
929 	    hammer2_always_compress) {
930 		z_stream strm_compress;
931 		int comp_level;
932 		int ret;
933 
934 		switch(HAMMER2_DEC_ALGO(comp_algo)) {
935 		case HAMMER2_COMP_LZ4:
936 			/*
937 			 * We need to prefix with the size, LZ4
938 			 * doesn't do it for us.  Add the related
939 			 * overhead.
940 			 *
941 			 * NOTE: The LZ4 code seems to assume at least an
942 			 *	 8-byte buffer size granularity and may
943 			 *	 overrun the buffer if given a 4-byte
944 			 *	 granularity.
945 			 */
946 			comp_buffer = objcache_get(cache_buffer_write,
947 						   M_INTWAIT);
948 			comp_size = LZ4_compress_limitedOutput(
949 					data,
950 					&comp_buffer[sizeof(int)],
951 					pblksize,
952 					pblksize / 2 - sizeof(int64_t));
953 			*(int *)comp_buffer = comp_size;
954 			if (comp_size)
955 				comp_size += sizeof(int);
956 			break;
957 		case HAMMER2_COMP_ZLIB:
958 			comp_level = HAMMER2_DEC_LEVEL(comp_algo);
959 			if (comp_level == 0)
960 				comp_level = 6;	/* default zlib compression */
961 			else if (comp_level < 6)
962 				comp_level = 6;
963 			else if (comp_level > 9)
964 				comp_level = 9;
965 			ret = deflateInit(&strm_compress, comp_level);
966 			if (ret != Z_OK) {
967 				kprintf("HAMMER2 ZLIB: fatal error "
968 					"on deflateInit.\n");
969 			}
970 
971 			comp_buffer = objcache_get(cache_buffer_write,
972 						   M_INTWAIT);
973 			strm_compress.next_in = data;
974 			strm_compress.avail_in = pblksize;
975 			strm_compress.next_out = comp_buffer;
976 			strm_compress.avail_out = pblksize / 2;
977 			ret = deflate(&strm_compress, Z_FINISH);
978 			if (ret == Z_STREAM_END) {
979 				comp_size = pblksize / 2 -
980 					    strm_compress.avail_out;
981 			} else {
982 				comp_size = 0;
983 			}
984 			ret = deflateEnd(&strm_compress);
985 			break;
986 		default:
987 			kprintf("Error: Unknown compression method.\n");
988 			kprintf("Comp_method = %d.\n", comp_algo);
989 			break;
990 		}
991 	}
992 
993 	if (comp_size == 0) {
994 		/*
995 		 * compression failed or turned off
996 		 */
997 		comp_block_size = pblksize;	/* safety */
998 		if (++ip->comp_heuristic > 128)
999 			ip->comp_heuristic = 8;
1000 	} else {
1001 		/*
1002 		 * compression succeeded
1003 		 */
1004 		ip->comp_heuristic = 0;
1005 		if (comp_size <= 1024) {
1006 			comp_block_size = 1024;
1007 		} else if (comp_size <= 2048) {
1008 			comp_block_size = 2048;
1009 		} else if (comp_size <= 4096) {
1010 			comp_block_size = 4096;
1011 		} else if (comp_size <= 8192) {
1012 			comp_block_size = 8192;
1013 		} else if (comp_size <= 16384) {
1014 			comp_block_size = 16384;
1015 		} else if (comp_size <= 32768) {
1016 			comp_block_size = 32768;
1017 		} else {
1018 			panic("hammer2: WRITE PATH: "
1019 			      "Weird comp_size value.");
1020 			/* NOT REACHED */
1021 			comp_block_size = pblksize;
1022 		}
1023 
1024 		/*
1025 		 * Must zero the remainder or dedup (which operates on a
1026 		 * physical block basis) will not find matches.
1027 		 */
1028 		if (comp_size < comp_block_size) {
1029 			bzero(comp_buffer + comp_size,
1030 			      comp_block_size - comp_size);
1031 		}
1032 	}
1033 
1034 	/*
1035 	 * Assign physical storage, data will be set to NULL if a live-dedup
1036 	 * was successful.
1037 	 */
1038 	bdata = comp_size ? comp_buffer : data;
1039 	chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size,
1040 					mtid, &bdata, errorp);
1041 
1042 	if (*errorp) {
1043 		goto done;
1044 	}
1045 
1046 	if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1047 		hammer2_inode_data_t *wipdata;
1048 
1049 		*errorp = hammer2_chain_modify_ip(ip, chain, mtid, 0);
1050 		if (*errorp == 0) {
1051 			wipdata = &chain->data->ipdata;
1052 			KKASSERT(wipdata->meta.op_flags &
1053 				 HAMMER2_OPFLAG_DIRECTDATA);
1054 			bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1055 			++hammer2_iod_file_wembed;
1056 		}
1057 	} else if (bdata == NULL) {
1058 		/*
1059 		 * Live deduplication, a copy of the data is already present
1060 		 * on the media.
1061 		 */
1062 		if (comp_size) {
1063 			chain->bref.methods =
1064 				HAMMER2_ENC_COMP(comp_algo) +
1065 				HAMMER2_ENC_CHECK(check_algo);
1066 		} else {
1067 			chain->bref.methods =
1068 				HAMMER2_ENC_COMP(
1069 					HAMMER2_COMP_NONE) +
1070 				HAMMER2_ENC_CHECK(check_algo);
1071 		}
1072 		bdata = comp_size ? comp_buffer : data;
1073 		hammer2_chain_setcheck(chain, bdata);
1074 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1075 	} else {
1076 		hammer2_io_t *dio;
1077 
1078 		KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1079 
1080 		switch(chain->bref.type) {
1081 		case HAMMER2_BREF_TYPE_INODE:
1082 			panic("hammer2_write_bp: unexpected inode\n");
1083 			break;
1084 		case HAMMER2_BREF_TYPE_DATA:
1085 			/*
1086 			 * Optimize out the read-before-write
1087 			 * if possible.
1088 			 */
1089 			*errorp = hammer2_io_newnz(chain->hmp,
1090 						   chain->bref.type,
1091 						   chain->bref.data_off,
1092 						   chain->bytes,
1093 						   &dio);
1094 			if (*errorp) {
1095 				hammer2_io_brelse(&dio);
1096 				kprintf("hammer2: WRITE PATH: "
1097 					"dbp bread error\n");
1098 				break;
1099 			}
1100 			bdata = hammer2_io_data(dio, chain->bref.data_off);
1101 
1102 			/*
1103 			 * When loading the block make sure we don't
1104 			 * leave garbage after the compressed data.
1105 			 */
1106 			if (comp_size) {
1107 				chain->bref.methods =
1108 					HAMMER2_ENC_COMP(comp_algo) +
1109 					HAMMER2_ENC_CHECK(check_algo);
1110 				bcopy(comp_buffer, bdata, comp_size);
1111 			} else {
1112 				chain->bref.methods =
1113 					HAMMER2_ENC_COMP(
1114 						HAMMER2_COMP_NONE) +
1115 					HAMMER2_ENC_CHECK(check_algo);
1116 				bcopy(data, bdata, pblksize);
1117 			}
1118 
1119 			/*
1120 			 * The flush code doesn't calculate check codes for
1121 			 * file data (doing so can result in excessive I/O),
1122 			 * so we do it here.
1123 			 */
1124 			hammer2_chain_setcheck(chain, bdata);
1125 
1126 			/*
1127 			 * Device buffer is now valid, chain is no longer in
1128 			 * the initial state.
1129 			 *
1130 			 * (No blockref table worries with file data)
1131 			 */
1132 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1133 			hammer2_dedup_record(chain, dio, bdata);
1134 
1135 			/* Now write the related bdp. */
1136 			if (ioflag & IO_SYNC) {
1137 				/*
1138 				 * Synchronous I/O requested.
1139 				 */
1140 				hammer2_io_bwrite(&dio);
1141 			/*
1142 			} else if ((ioflag & IO_DIRECT) &&
1143 				   loff + n == pblksize) {
1144 				hammer2_io_bdwrite(&dio);
1145 			*/
1146 			} else if (ioflag & IO_ASYNC) {
1147 				hammer2_io_bawrite(&dio);
1148 			} else {
1149 				hammer2_io_bdwrite(&dio);
1150 			}
1151 			break;
1152 		default:
1153 			panic("hammer2_write_bp: bad chain type %d\n",
1154 				chain->bref.type);
1155 			/* NOT REACHED */
1156 			break;
1157 		}
1158 	}
1159 done:
1160 	if (chain) {
1161 		hammer2_chain_unlock(chain);
1162 		hammer2_chain_drop(chain);
1163 	}
1164 	if (comp_buffer)
1165 		objcache_put(cache_buffer_write, comp_buffer);
1166 }
1167 
1168 /*
1169  * Helper
1170  *
1171  * Function that performs zero-checking and writing without compression,
1172  * it corresponds to default zero-checking path.
1173  */
1174 static
1175 void
1176 hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
1177 	hammer2_chain_t **parentp,
1178 	hammer2_key_t lbase, int ioflag, int pblksize,
1179 	hammer2_tid_t mtid, int *errorp,
1180 	int check_algo)
1181 {
1182 	hammer2_chain_t *chain;
1183 	char *bdata;
1184 
1185 	if (check_algo != HAMMER2_CHECK_NONE &&
1186 	    test_block_zeros(data, pblksize)) {
1187 		/*
1188 		 * An all-zeros write creates a hole unless the check code
1189 		 * is disabled.  When the check code is disabled all writes
1190 		 * are done in-place, including any all-zeros writes.
1191 		 *
1192 		 * NOTE: A snapshot will still force a copy-on-write
1193 		 *	 (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
1194 		 */
1195 		zero_write(data, ip, parentp, lbase, mtid, errorp);
1196 	} else {
1197 		/*
1198 		 * Normal write
1199 		 */
1200 		bdata = data;
1201 		chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
1202 						mtid, &bdata, errorp);
1203 		if (*errorp) {
1204 			/* do nothing */
1205 		} else if (bdata) {
1206 			hammer2_write_bp(chain, data, ioflag, pblksize,
1207 					 mtid, errorp, check_algo);
1208 		} else {
1209 			/* dedup occurred */
1210 			chain->bref.methods =
1211 				HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1212 				HAMMER2_ENC_CHECK(check_algo);
1213 			hammer2_chain_setcheck(chain, data);
1214 		}
1215 		if (chain) {
1216 			hammer2_chain_unlock(chain);
1217 			hammer2_chain_drop(chain);
1218 		}
1219 	}
1220 }
1221 
1222 /*
1223  * Helper
1224  *
1225  * A function to test whether a block of data contains only zeros,
1226  * returns TRUE (non-zero) if the block is all zeros.
1227  */
1228 static
1229 int
1230 test_block_zeros(const char *buf, size_t bytes)
1231 {
1232 	size_t i;
1233 
1234 	for (i = 0; i < bytes; i += sizeof(long)) {
1235 		if (*(const long *)(buf + i) != 0)
1236 			return (0);
1237 	}
1238 	return (1);
1239 }
1240 
1241 /*
1242  * Helper
1243  *
1244  * Function to "write" a block that contains only zeros.
1245  */
1246 static
1247 void
1248 zero_write(char *data, hammer2_inode_t *ip,
1249 	   hammer2_chain_t **parentp,
1250 	   hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp)
1251 {
1252 	hammer2_chain_t *chain;
1253 	hammer2_key_t key_dummy;
1254 
1255 	chain = hammer2_chain_lookup(parentp, &key_dummy,
1256 				     lbase, lbase,
1257 				     errorp,
1258 				     HAMMER2_LOOKUP_NODATA);
1259 	if (chain) {
1260 		if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1261 			hammer2_inode_data_t *wipdata;
1262 
1263 			if (*errorp == 0) {
1264 				*errorp = hammer2_chain_modify_ip(ip, chain,
1265 								  mtid, 0);
1266 			}
1267 			if (*errorp == 0) {
1268 				wipdata = &chain->data->ipdata;
1269 				KKASSERT(wipdata->meta.op_flags &
1270 					 HAMMER2_OPFLAG_DIRECTDATA);
1271 				bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1272 				++hammer2_iod_file_wembed;
1273 			}
1274 		} else {
1275 			/* chain->error ok for deletion */
1276 			hammer2_chain_delete(*parentp, chain,
1277 					     mtid, HAMMER2_DELETE_PERMANENT);
1278 			++hammer2_iod_file_wzero;
1279 		}
1280 		hammer2_chain_unlock(chain);
1281 		hammer2_chain_drop(chain);
1282 	} else {
1283 		++hammer2_iod_file_wzero;
1284 	}
1285 }
1286 
1287 /*
1288  * Helper
1289  *
1290  * Function to write the data as it is, without performing any sort of
1291  * compression. This function is used in path without compression and
1292  * default zero-checking path.
1293  */
1294 static
1295 void
1296 hammer2_write_bp(hammer2_chain_t *chain, char *data, int ioflag,
1297 		 int pblksize,
1298 		 hammer2_tid_t mtid, int *errorp, int check_algo)
1299 {
1300 	hammer2_inode_data_t *wipdata;
1301 	hammer2_io_t *dio;
1302 	char *bdata;
1303 	int error;
1304 
1305 	error = 0;	/* XXX TODO below */
1306 
1307 	KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1308 
1309 	switch(chain->bref.type) {
1310 	case HAMMER2_BREF_TYPE_INODE:
1311 		wipdata = &chain->data->ipdata;
1312 		KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1313 		bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1314 		error = 0;
1315 		++hammer2_iod_file_wembed;
1316 		break;
1317 	case HAMMER2_BREF_TYPE_DATA:
1318 		error = hammer2_io_newnz(chain->hmp,
1319 					 chain->bref.type,
1320 					 chain->bref.data_off,
1321 					 chain->bytes, &dio);
1322 		if (error) {
1323 			hammer2_io_bqrelse(&dio);
1324 			kprintf("hammer2: WRITE PATH: "
1325 				"dbp bread error\n");
1326 			break;
1327 		}
1328 		bdata = hammer2_io_data(dio, chain->bref.data_off);
1329 
1330 		chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1331 				      HAMMER2_ENC_CHECK(check_algo);
1332 		bcopy(data, bdata, chain->bytes);
1333 
1334 		/*
1335 		 * The flush code doesn't calculate check codes for
1336 		 * file data (doing so can result in excessive I/O),
1337 		 * so we do it here.
1338 		 */
1339 		hammer2_chain_setcheck(chain, bdata);
1340 
1341 		/*
1342 		 * Device buffer is now valid, chain is no longer in
1343 		 * the initial state.
1344 		 *
1345 		 * (No blockref table worries with file data)
1346 		 */
1347 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1348 		hammer2_dedup_record(chain, dio, bdata);
1349 
1350 		if (ioflag & IO_SYNC) {
1351 			/*
1352 			 * Synchronous I/O requested.
1353 			 */
1354 			hammer2_io_bwrite(&dio);
1355 		/*
1356 		} else if ((ioflag & IO_DIRECT) &&
1357 			   loff + n == pblksize) {
1358 			hammer2_io_bdwrite(&dio);
1359 		*/
1360 		} else if (ioflag & IO_ASYNC) {
1361 			hammer2_io_bawrite(&dio);
1362 		} else {
1363 			hammer2_io_bdwrite(&dio);
1364 		}
1365 		break;
1366 	default:
1367 		panic("hammer2_write_bp: bad chain type %d\n",
1368 		      chain->bref.type);
1369 		/* NOT REACHED */
1370 		error = 0;
1371 		break;
1372 	}
1373 	*errorp = error;
1374 }
1375 
1376 /*
1377  * LIVE DEDUP HEURISTICS
1378  *
1379  * Record media and crc information for possible dedup operation.  Note
1380  * that the dedup mask bits must also be set in the related DIO for a dedup
1381  * to be fully validated (which is handled in the freemap allocation code).
1382  *
1383  * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1384  *	    All fields must be loaded into locals and validated.
1385  *
1386  * WARNING! Should only be used for file data and directory entries,
1387  *	    hammer2_chain_modify() only checks for the dedup case on data
1388  *	    chains.  Also, dedup data can only be recorded for committed
1389  *	    chains (so NOT strategy writes which can undergo further
1390  *	    modification after the fact!).
1391  */
1392 void
1393 hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio, char *data)
1394 {
1395 	hammer2_dev_t *hmp;
1396 	hammer2_dedup_t *dedup;
1397 	uint64_t crc;
1398 	uint64_t mask;
1399 	int best = 0;
1400 	int i;
1401 	int dticks;
1402 
1403 	/*
1404 	 * We can only record a dedup if we have media data to test against.
1405 	 * If dedup is not enabled, return early, which allows a chain to
1406 	 * remain marked MODIFIED (which might have benefits in special
1407 	 * situations, though typically it does not).
1408 	 */
1409 	if (hammer2_dedup_enable == 0)
1410 		return;
1411 	if (dio == NULL) {
1412 		dio = chain->dio;
1413 		if (dio == NULL)
1414 			return;
1415 	}
1416 
1417 	hmp = chain->hmp;
1418 
1419 	switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
1420 	case HAMMER2_CHECK_ISCSI32:
1421 		/*
1422 		 * XXX use the built-in crc (the dedup lookup sequencing
1423 		 * needs to be fixed so the check code is already present
1424 		 * when dedup_lookup is called)
1425 		 */
1426 #if 0
1427 		crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value;
1428 #endif
1429 		crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1430 		break;
1431 	case HAMMER2_CHECK_XXHASH64:
1432 		crc = chain->bref.check.xxhash64.value;
1433 		break;
1434 	case HAMMER2_CHECK_SHA192:
1435 		/*
1436 		 * XXX use the built-in crc (the dedup lookup sequencing
1437 		 * needs to be fixed so the check code is already present
1438 		 * when dedup_lookup is called)
1439 		 */
1440 #if 0
1441 		crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^
1442 		      ((uint64_t *)chain->bref.check.sha192.data)[1] ^
1443 		      ((uint64_t *)chain->bref.check.sha192.data)[2];
1444 #endif
1445 		crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1446 		break;
1447 	default:
1448 		/*
1449 		 * Cannot dedup without a check code
1450 		 *
1451 		 * NOTE: In particular, CHECK_NONE allows a sector to be
1452 		 *	 overwritten without copy-on-write, recording
1453 		 *	 a dedup block for a CHECK_NONE object would be
1454 		 *	 a disaster!
1455 		 */
1456 		return;
1457 	}
1458 
1459 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUPABLE);
1460 
1461 	dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1462 	for (i = 0; i < 4; ++i) {
1463 		if (dedup[i].data_crc == crc) {
1464 			best = i;
1465 			break;
1466 		}
1467 		dticks = (int)(dedup[i].ticks - dedup[best].ticks);
1468 		if (dticks < 0 || dticks > hz * 60 * 30)
1469 			best = i;
1470 	}
1471 	dedup += best;
1472 	if (hammer2_debug & 0x40000) {
1473 		kprintf("REC %04x %016jx %016jx\n",
1474 			(int)(dedup - hmp->heur_dedup),
1475 			crc,
1476 			chain->bref.data_off);
1477 	}
1478 	dedup->ticks = ticks;
1479 	dedup->data_off = chain->bref.data_off;
1480 	dedup->data_crc = crc;
1481 
1482 	/*
1483 	 * Set the valid bits for the dedup only after we know the data
1484 	 * buffer has been updated.  The alloc bits were set (and the valid
1485 	 * bits cleared) when the media was allocated.
1486 	 *
1487 	 * This is done in two stages becuase the bulkfree code can race
1488 	 * the gap between allocation and data population.  Both masks must
1489 	 * be set before a bcmp/dedup operation is able to use the block.
1490 	 */
1491 	mask = hammer2_dedup_mask(dio, chain->bref.data_off, chain->bytes);
1492 	atomic_set_64(&dio->dedup_valid, mask);
1493 
1494 #if 0
1495 	/*
1496 	 * XXX removed. MODIFIED is an integral part of the flush code,
1497 	 * lets not just clear it
1498 	 */
1499 	/*
1500 	 * Once we record the dedup the chain must be marked clean to
1501 	 * prevent reuse of the underlying block.   Remember that this
1502 	 * write occurs when the buffer cache is flushed (i.e. on sync(),
1503 	 * fsync(), filesystem periodic sync, or when the kernel needs to
1504 	 * flush a buffer), and not whenever the user write()s.
1505 	 */
1506 	if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1507 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1508 		atomic_add_long(&hammer2_count_modified_chains, -1);
1509 		if (chain->pmp)
1510 			hammer2_pfs_memory_wakeup(chain->pmp);
1511 	}
1512 #endif
1513 }
1514 
1515 static
1516 hammer2_off_t
1517 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize)
1518 {
1519 	hammer2_dedup_t *dedup;
1520 	hammer2_io_t *dio;
1521 	hammer2_off_t off;
1522 	uint64_t crc;
1523 	uint64_t mask;
1524 	char *data;
1525 	char *dtmp;
1526 	int i;
1527 
1528 	if (hammer2_dedup_enable == 0)
1529 		return 0;
1530 	data = *datap;
1531 	if (data == NULL)
1532 		return 0;
1533 
1534 	/*
1535 	 * XXX use the built-in crc (the dedup lookup sequencing
1536 	 * needs to be fixed so the check code is already present
1537 	 * when dedup_lookup is called)
1538 	 */
1539 	crc = XXH64(data, pblksize, XXH_HAMMER2_SEED);
1540 	dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1541 
1542 	if (hammer2_debug & 0x40000) {
1543 		kprintf("LOC %04x/4 %016jx\n",
1544 			(int)(dedup - hmp->heur_dedup),
1545 			crc);
1546 	}
1547 
1548 	for (i = 0; i < 4; ++i) {
1549 		off = dedup[i].data_off;
1550 		cpu_ccfence();
1551 		if (dedup[i].data_crc != crc)
1552 			continue;
1553 		if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize)
1554 			continue;
1555 		dio = hammer2_io_getquick(hmp, off, pblksize);
1556 		if (dio) {
1557 			dtmp = hammer2_io_data(dio, off),
1558 			mask = hammer2_dedup_mask(dio, off, pblksize);
1559 			if ((dio->dedup_alloc & mask) == mask &&
1560 			    (dio->dedup_valid & mask) == mask &&
1561 			    bcmp(data, dtmp, pblksize) == 0) {
1562 				if (hammer2_debug & 0x40000) {
1563 					kprintf("DEDUP SUCCESS %016jx\n",
1564 						(intmax_t)off);
1565 				}
1566 				hammer2_io_putblk(&dio);
1567 				*datap = NULL;
1568 				dedup[i].ticks = ticks;   /* update use */
1569 				atomic_add_long(&hammer2_iod_file_wdedup,
1570 						pblksize);
1571 
1572 				return off;		/* RETURN */
1573 			}
1574 			hammer2_io_putblk(&dio);
1575 		}
1576 	}
1577 	return 0;
1578 }
1579 
1580 /*
1581  * Poof.  Races are ok, if someone gets in and reuses a dedup offset
1582  * before or while we are clearing it they will also recover the freemap
1583  * entry (set it to fully allocated), so a bulkfree race can only set it
1584  * to a possibly-free state.
1585  *
1586  * XXX ok, well, not really sure races are ok but going to run with it
1587  *     for the moment.
1588  */
1589 void
1590 hammer2_dedup_clear(hammer2_dev_t *hmp)
1591 {
1592 	int i;
1593 
1594 	for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) {
1595 		hmp->heur_dedup[i].data_off = 0;
1596 		hmp->heur_dedup[i].ticks = ticks - 1;
1597 	}
1598 }
1599