1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * This module handles low level logical file I/O (strategy) which backs
38  * the logical buffer cache.
39  *
40  * [De]compression, zero-block, check codes, and buffer cache operations
41  * for file data is handled here.
42  *
43  * Live dedup makes its home here as well.
44  */
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/mount.h>
52 #include <sys/vnode.h>
53 #include <sys/objcache.h>
54 
55 #include "hammer2.h"
56 #include "hammer2_lz4.h"
57 
58 #include "zlib/hammer2_zlib.h"
59 
60 struct objcache *cache_buffer_read;
61 struct objcache *cache_buffer_write;
62 
63 /*
64  * Strategy code (async logical file buffer I/O from system)
65  *
66  * Except for the transaction init (which should normally not block),
67  * we essentially run the strategy operation asynchronously via a XOP.
68  *
69  * WARNING! The XOP deals with buffer synchronization.  It is not synchronized
70  *	    to the current cpu.
71  *
72  * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync()
73  *     calls but it has in the past when multiple flushes are queued.
74  *
75  * XXX We currently terminate the transaction once we get a quorum, otherwise
76  *     the frontend can stall, but this can leave the remaining nodes with
77  *     a potential flush conflict.  We need to delay flushes on those nodes
78  *     until running transactions complete separately from the normal
79  *     transaction sequencing.  FIXME TODO.
80  */
81 static int hammer2_strategy_read(struct vop_strategy_args *ap);
82 static int hammer2_strategy_write(struct vop_strategy_args *ap);
83 static void hammer2_strategy_read_completion(hammer2_chain_t *focus,
84 				const char *data, struct bio *bio);
85 
86 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp,
87 			char **datap, int pblksize);
88 
89 int
90 hammer2_vop_strategy(struct vop_strategy_args *ap)
91 {
92 	struct bio *biop;
93 	struct buf *bp;
94 	int error;
95 
96 	biop = ap->a_bio;
97 	bp = biop->bio_buf;
98 
99 	switch(bp->b_cmd) {
100 	case BUF_CMD_READ:
101 		error = hammer2_strategy_read(ap);
102 		break;
103 	case BUF_CMD_WRITE:
104 		error = hammer2_strategy_write(ap);
105 		break;
106 	default:
107 		bp->b_error = error = EINVAL;
108 		bp->b_flags |= B_ERROR;
109 		biodone(biop);
110 		break;
111 	}
112 	return (error);
113 }
114 
115 /*
116  * Return the largest contiguous physical disk range for the logical
117  * request, in bytes.
118  *
119  * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
120  *
121  * Basically disabled, the logical buffer write thread has to deal with
122  * buffers one-at-a-time.  Note that this should not prevent cluster_read()
123  * from reading-ahead, it simply prevents it from trying form a single
124  * cluster buffer for the logical request.  H2 already uses 64KB buffers!
125  */
126 int
127 hammer2_vop_bmap(struct vop_bmap_args *ap)
128 {
129 	*ap->a_doffsetp = NOOFFSET;
130 	if (ap->a_runp)
131 		*ap->a_runp = 0;
132 	if (ap->a_runb)
133 		*ap->a_runb = 0;
134 	return (EOPNOTSUPP);
135 }
136 
137 /****************************************************************************
138  *				READ SUPPORT				    *
139  ****************************************************************************/
140 /*
141  * Callback used in read path in case that a block is compressed with LZ4.
142  */
143 static
144 void
145 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
146 {
147 	struct buf *bp;
148 	char *compressed_buffer;
149 	int compressed_size;
150 	int result;
151 
152 	bp = bio->bio_buf;
153 
154 #if 0
155 	if bio->bio_caller_info2.index &&
156 	      bio->bio_caller_info1.uvalue32 !=
157 	      crc32(bp->b_data, bp->b_bufsize) --- return error
158 #endif
159 
160 	KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
161 	compressed_size = *(const int *)data;
162 	KKASSERT((uint32_t)compressed_size <= bytes - sizeof(int));
163 
164 	compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
165 	result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
166 				     compressed_buffer,
167 				     compressed_size,
168 				     bp->b_bufsize);
169 	if (result < 0) {
170 		kprintf("READ PATH: Error during decompression."
171 			"bio %016jx/%d\n",
172 			(intmax_t)bio->bio_offset, bytes);
173 		/* make sure it isn't random garbage */
174 		bzero(compressed_buffer, bp->b_bufsize);
175 	}
176 	KKASSERT(result <= bp->b_bufsize);
177 	bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
178 	if (result < bp->b_bufsize)
179 		bzero(bp->b_data + result, bp->b_bufsize - result);
180 	objcache_put(cache_buffer_read, compressed_buffer);
181 	bp->b_resid = 0;
182 	bp->b_flags |= B_AGE;
183 }
184 
185 /*
186  * Callback used in read path in case that a block is compressed with ZLIB.
187  * It is almost identical to LZ4 callback, so in theory they can be unified,
188  * but we didn't want to make changes in bio structure for that.
189  */
190 static
191 void
192 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
193 {
194 	struct buf *bp;
195 	char *compressed_buffer;
196 	z_stream strm_decompress;
197 	int result;
198 	int ret;
199 
200 	bp = bio->bio_buf;
201 
202 	KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
203 	strm_decompress.avail_in = 0;
204 	strm_decompress.next_in = Z_NULL;
205 
206 	ret = inflateInit(&strm_decompress);
207 
208 	if (ret != Z_OK)
209 		kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
210 
211 	compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
212 	strm_decompress.next_in = __DECONST(char *, data);
213 
214 	/* XXX supply proper size, subset of device bp */
215 	strm_decompress.avail_in = bytes;
216 	strm_decompress.next_out = compressed_buffer;
217 	strm_decompress.avail_out = bp->b_bufsize;
218 
219 	ret = inflate(&strm_decompress, Z_FINISH);
220 	if (ret != Z_STREAM_END) {
221 		kprintf("HAMMER2 ZLIB: Fatal error during decompression.\n");
222 		bzero(compressed_buffer, bp->b_bufsize);
223 	}
224 	bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
225 	result = bp->b_bufsize - strm_decompress.avail_out;
226 	if (result < bp->b_bufsize)
227 		bzero(bp->b_data + result, strm_decompress.avail_out);
228 	objcache_put(cache_buffer_read, compressed_buffer);
229 	ret = inflateEnd(&strm_decompress);
230 
231 	bp->b_resid = 0;
232 	bp->b_flags |= B_AGE;
233 }
234 
235 /*
236  * Logical buffer I/O, async read.
237  */
238 static
239 int
240 hammer2_strategy_read(struct vop_strategy_args *ap)
241 {
242 	hammer2_xop_strategy_t *xop;
243 	struct bio *bio;
244 	hammer2_inode_t *ip;
245 	hammer2_key_t lbase;
246 
247 	bio = ap->a_bio;
248 	ip = VTOI(ap->a_vp);
249 
250 	lbase = bio->bio_offset;
251 	KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
252 
253 	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_STRATEGY);
254 	xop->finished = 0;
255 	xop->bio = bio;
256 	xop->lbase = lbase;
257 	hammer2_mtx_init(&xop->lock, "h2bior");
258 	hammer2_xop_start(&xop->head, &hammer2_strategy_read_desc);
259 	/* asynchronous completion */
260 
261 	return(0);
262 }
263 
264 /*
265  * Per-node XOP (threaded), do a synchronous lookup of the chain and
266  * its data.  The frontend is asynchronous, so we are also responsible
267  * for racing to terminate the frontend.
268  */
269 void
270 hammer2_xop_strategy_read(hammer2_xop_t *arg, void *scratch, int clindex)
271 {
272 	hammer2_xop_strategy_t *xop = &arg->xop_strategy;
273 	hammer2_chain_t *parent;
274 	hammer2_chain_t *chain;
275 	hammer2_chain_t *focus;
276 	hammer2_key_t key_dummy;
277 	hammer2_key_t lbase;
278 	struct bio *bio;
279 	struct buf *bp;
280 	const char *data;
281 	int error;
282 
283 	/*
284 	 * Note that we can race completion of the bio supplied by
285 	 * the front-end so we cannot access it until we determine
286 	 * that we are the ones finishing it up.
287 	 */
288 	lbase = xop->lbase;
289 
290 	/*
291 	 * This is difficult to optimize.  The logical buffer might be
292 	 * partially dirty (contain dummy zero-fill pages), which would
293 	 * mess up our crc calculation if we were to try a direct read.
294 	 * So for now we always double-buffer through the underlying
295 	 * storage.
296 	 *
297 	 * If not for the above problem we could conditionalize on
298 	 * (1) 64KB buffer, (2) one chain (not multi-master) and
299 	 * (3) !hammer2_double_buffer, and issue a direct read into the
300 	 * logical buffer.
301 	 */
302 	parent = hammer2_inode_chain(xop->head.ip1, clindex,
303 				     HAMMER2_RESOLVE_ALWAYS |
304 				     HAMMER2_RESOLVE_SHARED);
305 	if (parent) {
306 		chain = hammer2_chain_lookup(&parent, &key_dummy,
307 					     lbase, lbase,
308 					     &error,
309 					     HAMMER2_LOOKUP_ALWAYS |
310 					     HAMMER2_LOOKUP_SHARED);
311 		if (chain)
312 			error = chain->error;
313 	} else {
314 		error = HAMMER2_ERROR_EIO;
315 		chain = NULL;
316 	}
317 	error = hammer2_xop_feed(&xop->head, chain, clindex, error);
318 	if (chain) {
319 		hammer2_chain_unlock(chain);
320 		hammer2_chain_drop(chain);
321 	}
322 	if (parent) {
323 		hammer2_chain_unlock(parent);
324 		hammer2_chain_drop(parent);
325 	}
326 	chain = NULL;	/* safety */
327 	parent = NULL;	/* safety */
328 
329 	/*
330 	 * Race to finish the frontend.  First-to-complete.  bio is only
331 	 * valid if we are determined to be the ones able to complete
332 	 * the operation.
333 	 */
334 	if (xop->finished)
335 		return;
336 	hammer2_mtx_ex(&xop->lock);
337 	if (xop->finished) {
338 		hammer2_mtx_unlock(&xop->lock);
339 		return;
340 	}
341 	bio = xop->bio;
342 	bp = bio->bio_buf;
343 	bkvasync(bp);
344 
345 	/*
346 	 * Async operation has not completed and we now own the lock.
347 	 * Determine if we can complete the operation by issuing the
348 	 * frontend collection non-blocking.
349 	 *
350 	 * H2 double-buffers the data, setting B_NOTMETA on the logical
351 	 * buffer hints to the OS that the logical buffer should not be
352 	 * swapcached (since the device buffer can be).
353 	 *
354 	 * Also note that even for compressed data we would rather the
355 	 * kernel cache/swapcache device buffers more and (decompressed)
356 	 * logical buffers less, since that will significantly improve
357 	 * the amount of end-user data that can be cached.
358 	 *
359 	 * NOTE: The chain->data for xop->head.cluster.focus will be
360 	 *	 synchronized to the current cpu by xop_collect(),
361 	 *	 but other chains in the cluster might not be.
362 	 */
363 	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
364 
365 	switch(error) {
366 	case 0:
367 		xop->finished = 1;
368 		hammer2_mtx_unlock(&xop->lock);
369 		bp->b_flags |= B_NOTMETA;
370 		focus = xop->head.cluster.focus;
371 		data = hammer2_xop_gdata(&xop->head)->buf;
372 		hammer2_strategy_read_completion(focus, data, xop->bio);
373 		hammer2_xop_pdata(&xop->head);
374 		biodone(bio);
375 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
376 		break;
377 	case HAMMER2_ERROR_ENOENT:
378 		xop->finished = 1;
379 		hammer2_mtx_unlock(&xop->lock);
380 		bp->b_flags |= B_NOTMETA;
381 		bp->b_resid = 0;
382 		bp->b_error = 0;
383 		bzero(bp->b_data, bp->b_bcount);
384 		biodone(bio);
385 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
386 		break;
387 	case HAMMER2_ERROR_EINPROGRESS:
388 		hammer2_mtx_unlock(&xop->lock);
389 		break;
390 	default:
391 		kprintf("xop_strategy_read: error %08x loff=%016jx\n",
392 			error, (intmax_t)bp->b_loffset);
393 		xop->finished = 1;
394 		hammer2_mtx_unlock(&xop->lock);
395 		bp->b_flags |= B_ERROR;
396 		bp->b_error = EIO;
397 		biodone(bio);
398 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
399 		break;
400 	}
401 }
402 
403 static
404 void
405 hammer2_strategy_read_completion(hammer2_chain_t *focus, const char *data,
406 				 struct bio *bio)
407 {
408 	struct buf *bp = bio->bio_buf;
409 
410 	if (focus->bref.type == HAMMER2_BREF_TYPE_INODE) {
411 		/*
412 		 * Copy from in-memory inode structure.
413 		 */
414 		bcopy(((const hammer2_inode_data_t *)data)->u.data,
415 		      bp->b_data, HAMMER2_EMBEDDED_BYTES);
416 		bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
417 		      bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
418 		bp->b_resid = 0;
419 		bp->b_error = 0;
420 	} else if (focus->bref.type == HAMMER2_BREF_TYPE_DATA) {
421 		/*
422 		 * Data is on-media, record for live dedup.  Release the
423 		 * chain (try to free it) when done.  The data is still
424 		 * cached by both the buffer cache in front and the
425 		 * block device behind us.
426 		 *
427 		 * NOTE: Deduplication cannot be safely recorded for
428 		 *	 records without a check code.
429 		 */
430 		hammer2_dedup_record(focus, NULL, data);
431 		atomic_set_int(&focus->flags, HAMMER2_CHAIN_RELEASE);
432 
433 		/*
434 		 * Decompression and copy.
435 		 */
436 		switch (HAMMER2_DEC_COMP(focus->bref.methods)) {
437 		case HAMMER2_COMP_LZ4:
438 			hammer2_decompress_LZ4_callback(data, focus->bytes,
439 							bio);
440 			/* b_resid set by call */
441 			break;
442 		case HAMMER2_COMP_ZLIB:
443 			hammer2_decompress_ZLIB_callback(data, focus->bytes,
444 							 bio);
445 			/* b_resid set by call */
446 			break;
447 		case HAMMER2_COMP_NONE:
448 			KKASSERT(focus->bytes <= bp->b_bcount);
449 			bcopy(data, bp->b_data, focus->bytes);
450 			if (focus->bytes < bp->b_bcount) {
451 				bzero(bp->b_data + focus->bytes,
452 				      bp->b_bcount - focus->bytes);
453 			}
454 			bp->b_resid = 0;
455 			bp->b_error = 0;
456 			break;
457 		default:
458 			panic("hammer2_strategy_read_completion: "
459 			      "unknown compression type");
460 		}
461 	} else {
462 		panic("hammer2_strategy_read_completion: unknown bref type");
463 	}
464 }
465 
466 /****************************************************************************
467  *				WRITE SUPPORT				    *
468  ****************************************************************************/
469 
470 /*
471  * Functions for compression in threads,
472  * from hammer2_vnops.c
473  */
474 static void hammer2_write_file_core(char *data, hammer2_inode_t *ip,
475 				hammer2_chain_t **parentp,
476 				hammer2_key_t lbase, int ioflag, int pblksize,
477 				hammer2_tid_t mtid, int *errorp);
478 static void hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
479 				hammer2_chain_t **parentp,
480 				hammer2_key_t lbase, int ioflag, int pblksize,
481 				hammer2_tid_t mtid, int *errorp,
482 				int comp_algo, int check_algo);
483 static void hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
484 				hammer2_chain_t **parentp,
485 				hammer2_key_t lbase, int ioflag, int pblksize,
486 				hammer2_tid_t mtid, int *errorp,
487 				int check_algo);
488 static int test_block_zeros(const char *buf, size_t bytes);
489 static void zero_write(char *data, hammer2_inode_t *ip,
490 				hammer2_chain_t **parentp,
491 				hammer2_key_t lbase,
492 				hammer2_tid_t mtid, int *errorp);
493 static void hammer2_write_bp(hammer2_chain_t *chain, char *data,
494 				int ioflag, int pblksize,
495 				hammer2_tid_t mtid, int *errorp,
496 				int check_algo);
497 
498 int
499 hammer2_strategy_write(struct vop_strategy_args *ap)
500 {
501 	hammer2_xop_strategy_t *xop;
502 	hammer2_pfs_t *pmp;
503 	struct bio *bio;
504 	hammer2_inode_t *ip;
505 
506 	bio = ap->a_bio;
507 	ip = VTOI(ap->a_vp);
508 	pmp = ip->pmp;
509 
510 	atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
511 	hammer2_lwinprog_ref(pmp);
512 	hammer2_trans_assert_strategy(pmp);
513 	hammer2_trans_init(pmp, HAMMER2_TRANS_BUFCACHE);
514 
515 	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING |
516 				    HAMMER2_XOP_STRATEGY);
517 	xop->finished = 0;
518 	xop->bio = bio;
519 	xop->lbase = bio->bio_offset;
520 	hammer2_mtx_init(&xop->lock, "h2biow");
521 	hammer2_xop_start(&xop->head, &hammer2_strategy_write_desc);
522 	/* asynchronous completion */
523 
524 	hammer2_lwinprog_wait(pmp, hammer2_flush_pipe);
525 
526 	return(0);
527 }
528 
529 /*
530  * Per-node XOP (threaded).  Write the logical buffer to the media.
531  *
532  * This is a bit problematic because there may be multiple target and
533  * any of them may be able to release the bp.  In addition, if our
534  * particulr target is offline we don't want to block the bp (and thus
535  * the frontend).  To accomplish this we copy the data to the per-thr
536  * scratch buffer.
537  */
538 void
539 hammer2_xop_strategy_write(hammer2_xop_t *arg, void *scratch, int clindex)
540 {
541 	hammer2_xop_strategy_t *xop = &arg->xop_strategy;
542 	hammer2_chain_t *parent;
543 	hammer2_key_t lbase;
544 	hammer2_inode_t *ip;
545 	struct bio *bio;
546 	struct buf *bp;
547 	int error;
548 	int lblksize;
549 	int pblksize;
550 	char *bio_data;
551 
552 	/*
553 	 * We can only access the bp/bio if the frontend has not yet
554 	 * completed.
555 	 */
556 	if (xop->finished)
557 		return;
558 	hammer2_mtx_sh(&xop->lock);
559 	if (xop->finished) {
560 		hammer2_mtx_unlock(&xop->lock);
561 		return;
562 	}
563 
564 	lbase = xop->lbase;
565 	bio = xop->bio;			/* ephermal */
566 	bp = bio->bio_buf;		/* ephermal */
567 	ip = xop->head.ip1;		/* retained by ref */
568 	bio_data = scratch;
569 
570 	/* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */
571 
572 	lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL);
573 	pblksize = hammer2_calc_physical(ip, lbase);
574 	bkvasync(bp);
575 	KKASSERT(lblksize <= MAXPHYS);
576 	bcopy(bp->b_data, bio_data, lblksize);
577 
578 	hammer2_mtx_unlock(&xop->lock);
579 	bp = NULL;	/* safety, illegal to access after unlock */
580 	bio = NULL;	/* safety, illegal to access after unlock */
581 
582 	/*
583 	 * Actual operation
584 	 */
585 	parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
586 	hammer2_write_file_core(bio_data, ip, &parent,
587 				lbase, IO_ASYNC, pblksize,
588 				xop->head.mtid, &error);
589 	if (parent) {
590 		hammer2_chain_unlock(parent);
591 		hammer2_chain_drop(parent);
592 		parent = NULL;	/* safety */
593 	}
594 	hammer2_xop_feed(&xop->head, NULL, clindex, error);
595 
596 	/*
597 	 * Try to complete the operation on behalf of the front-end.
598 	 */
599 	if (xop->finished)
600 		return;
601 	hammer2_mtx_ex(&xop->lock);
602 	if (xop->finished) {
603 		hammer2_mtx_unlock(&xop->lock);
604 		return;
605 	}
606 
607 	/*
608 	 * Async operation has not completed and we now own the lock.
609 	 * Determine if we can complete the operation by issuing the
610 	 * frontend collection non-blocking.
611 	 *
612 	 * H2 double-buffers the data, setting B_NOTMETA on the logical
613 	 * buffer hints to the OS that the logical buffer should not be
614 	 * swapcached (since the device buffer can be).
615 	 */
616 	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
617 
618 	if (error == HAMMER2_ERROR_EINPROGRESS) {
619 		hammer2_mtx_unlock(&xop->lock);
620 		return;
621 	}
622 
623 	/*
624 	 * Async operation has completed.
625 	 */
626 	xop->finished = 1;
627 	hammer2_mtx_unlock(&xop->lock);
628 
629 	bio = xop->bio;		/* now owned by us */
630 	bp = bio->bio_buf;	/* now owned by us */
631 
632 	if (error == HAMMER2_ERROR_ENOENT || error == 0) {
633 		bp->b_flags |= B_NOTMETA;
634 		bp->b_resid = 0;
635 		bp->b_error = 0;
636 		biodone(bio);
637 	} else {
638 		kprintf("xop_strategy_write: error %d loff=%016jx\n",
639 			error, (intmax_t)bp->b_loffset);
640 		bp->b_flags |= B_ERROR;
641 		bp->b_error = EIO;
642 		biodone(bio);
643 	}
644 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
645 	hammer2_trans_assert_strategy(ip->pmp);
646 	hammer2_lwinprog_drop(ip->pmp);
647 	hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE);
648 }
649 
650 /*
651  * Wait for pending I/O to complete
652  */
653 void
654 hammer2_bioq_sync(hammer2_pfs_t *pmp)
655 {
656 	hammer2_lwinprog_wait(pmp, 0);
657 }
658 
659 /*
660  * Assign physical storage at (cparent, lbase), returning a suitable chain
661  * and setting *errorp appropriately.
662  *
663  * If no error occurs, the returned chain will be in a modified state.
664  *
665  * If an error occurs, the returned chain may or may not be NULL.  If
666  * not-null any chain->error (if not 0) will also be rolled up into *errorp.
667  * So the caller only needs to test *errorp.
668  *
669  * cparent can wind up being anything.
670  *
671  * If datap is not NULL, *datap points to the real data we intend to write.
672  * If we can dedup the storage location we set *datap to NULL to indicate
673  * to the caller that a dedup occurred.
674  *
675  * NOTE: Special case for data embedded in inode.
676  */
677 static
678 hammer2_chain_t *
679 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp,
680 			hammer2_key_t lbase, int pblksize,
681 			hammer2_tid_t mtid, char **datap, int *errorp)
682 {
683 	hammer2_chain_t *chain;
684 	hammer2_key_t key_dummy;
685 	hammer2_off_t dedup_off;
686 	int pradix = hammer2_getradix(pblksize);
687 
688 	/*
689 	 * Locate the chain associated with lbase, return a locked chain.
690 	 * However, do not instantiate any data reference (which utilizes a
691 	 * device buffer) because we will be using direct IO via the
692 	 * logical buffer cache buffer.
693 	 */
694 	KKASSERT(pblksize >= HAMMER2_ALLOC_MIN);
695 
696 	chain = hammer2_chain_lookup(parentp, &key_dummy,
697 				     lbase, lbase,
698 				     errorp,
699 				     HAMMER2_LOOKUP_NODATA);
700 
701 	/*
702 	 * The lookup code should not return a DELETED chain to us, unless
703 	 * its a short-file embedded in the inode.  Then it is possible for
704 	 * the lookup to return a deleted inode.
705 	 */
706 	if (chain && (chain->flags & HAMMER2_CHAIN_DELETED) &&
707 	    chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
708 		kprintf("assign physical deleted chain @ "
709 			"%016jx (%016jx.%02x) ip %016jx\n",
710 			lbase, chain->bref.data_off, chain->bref.type,
711 			ip->meta.inum);
712 		Debugger("bleh");
713 	}
714 
715 	if (chain == NULL) {
716 		/*
717 		 * We found a hole, create a new chain entry.
718 		 *
719 		 * NOTE: DATA chains are created without device backing
720 		 *	 store (nor do we want any).
721 		 */
722 		dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap,
723 						 pblksize);
724 		*errorp |= hammer2_chain_create(parentp, &chain, NULL, ip->pmp,
725 				       HAMMER2_ENC_CHECK(ip->meta.check_algo) |
726 				       HAMMER2_ENC_COMP(HAMMER2_COMP_NONE),
727 					        lbase, HAMMER2_PBUFRADIX,
728 					        HAMMER2_BREF_TYPE_DATA,
729 					        pblksize, mtid,
730 					        dedup_off, 0);
731 		if (chain == NULL)
732 			goto failed;
733 		/*ip->delta_dcount += pblksize;*/
734 	} else if (chain->error == 0) {
735 		switch (chain->bref.type) {
736 		case HAMMER2_BREF_TYPE_INODE:
737 			/*
738 			 * The data is embedded in the inode, which requires
739 			 * a bit more finess.
740 			 */
741 			*errorp |= hammer2_chain_modify_ip(ip, chain, mtid, 0);
742 			break;
743 		case HAMMER2_BREF_TYPE_DATA:
744 			dedup_off = hammer2_dedup_lookup(chain->hmp, datap,
745 							 pblksize);
746 			if (chain->bytes != pblksize) {
747 				*errorp |= hammer2_chain_resize(chain,
748 						     mtid, dedup_off,
749 						     pradix,
750 						     HAMMER2_MODIFY_OPTDATA);
751 				if (*errorp)
752 					break;
753 			}
754 
755 			/*
756 			 * DATA buffers must be marked modified whether the
757 			 * data is in a logical buffer or not.  We also have
758 			 * to make this call to fixup the chain data pointers
759 			 * after resizing in case this is an encrypted or
760 			 * compressed buffer.
761 			 */
762 			*errorp |= hammer2_chain_modify(chain, mtid, dedup_off,
763 						        HAMMER2_MODIFY_OPTDATA);
764 			break;
765 		default:
766 			panic("hammer2_assign_physical: bad type");
767 			/* NOT REACHED */
768 			break;
769 		}
770 	} else {
771 		*errorp = chain->error;
772 	}
773 	atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
774 failed:
775 	return (chain);
776 }
777 
778 /*
779  * hammer2_write_file_core()
780  *
781  * The core write function which determines which path to take
782  * depending on compression settings.  We also have to locate the
783  * related chains so we can calculate and set the check data for
784  * the blockref.
785  */
786 static
787 void
788 hammer2_write_file_core(char *data, hammer2_inode_t *ip,
789 			hammer2_chain_t **parentp,
790 			hammer2_key_t lbase, int ioflag, int pblksize,
791 			hammer2_tid_t mtid, int *errorp)
792 {
793 	hammer2_chain_t *chain;
794 	char *bdata;
795 
796 	*errorp = 0;
797 
798 	switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) {
799 	case HAMMER2_COMP_NONE:
800 		/*
801 		 * We have to assign physical storage to the buffer
802 		 * we intend to dirty or write now to avoid deadlocks
803 		 * in the strategy code later.
804 		 *
805 		 * This can return NOOFFSET for inode-embedded data.
806 		 * The strategy code will take care of it in that case.
807 		 */
808 		bdata = data;
809 		chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
810 						mtid, &bdata, errorp);
811 		if (*errorp) {
812 			/* skip modifications */
813 		} else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
814 			hammer2_inode_data_t *wipdata;
815 
816 			wipdata = &chain->data->ipdata;
817 			KKASSERT(wipdata->meta.op_flags &
818 				 HAMMER2_OPFLAG_DIRECTDATA);
819 			bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
820 			++hammer2_iod_file_wembed;
821 		} else if (bdata == NULL) {
822 			/*
823 			 * Copy of data already present on-media.
824 			 */
825 			chain->bref.methods =
826 				HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
827 				HAMMER2_ENC_CHECK(ip->meta.check_algo);
828 			hammer2_chain_setcheck(chain, data);
829 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
830 		} else {
831 			hammer2_write_bp(chain, data, ioflag, pblksize,
832 					 mtid, errorp, ip->meta.check_algo);
833 		}
834 		if (chain) {
835 			hammer2_chain_unlock(chain);
836 			hammer2_chain_drop(chain);
837 		}
838 		break;
839 	case HAMMER2_COMP_AUTOZERO:
840 		/*
841 		 * Check for zero-fill only
842 		 */
843 		hammer2_zero_check_and_write(data, ip, parentp,
844 					     lbase, ioflag, pblksize,
845 					     mtid, errorp,
846 					     ip->meta.check_algo);
847 		break;
848 	case HAMMER2_COMP_LZ4:
849 	case HAMMER2_COMP_ZLIB:
850 	default:
851 		/*
852 		 * Check for zero-fill and attempt compression.
853 		 */
854 		hammer2_compress_and_write(data, ip, parentp,
855 					   lbase, ioflag, pblksize,
856 					   mtid, errorp,
857 					   ip->meta.comp_algo,
858 					   ip->meta.check_algo);
859 		break;
860 	}
861 }
862 
863 /*
864  * Helper
865  *
866  * Generic function that will perform the compression in compression
867  * write path. The compression algorithm is determined by the settings
868  * obtained from inode.
869  */
870 static
871 void
872 hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
873 	hammer2_chain_t **parentp,
874 	hammer2_key_t lbase, int ioflag, int pblksize,
875 	hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo)
876 {
877 	hammer2_chain_t *chain;
878 	int comp_size;
879 	int comp_block_size;
880 	char *comp_buffer;
881 	char *bdata;
882 
883 	/*
884 	 * An all-zeros write creates a hole unless the check code
885 	 * is disabled.  When the check code is disabled all writes
886 	 * are done in-place, including any all-zeros writes.
887 	 *
888 	 * NOTE: A snapshot will still force a copy-on-write
889 	 *	 (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
890 	 */
891 	if (check_algo != HAMMER2_CHECK_NONE &&
892 	    test_block_zeros(data, pblksize)) {
893 		zero_write(data, ip, parentp, lbase, mtid, errorp);
894 		return;
895 	}
896 
897 	/*
898 	 * Compression requested.  Try to compress the block.  We store
899 	 * the data normally if we cannot sufficiently compress it.
900 	 *
901 	 * We have a heuristic to detect files which are mostly
902 	 * uncompressable and avoid the compression attempt in that
903 	 * case.  If the compression heuristic is turned off, we always
904 	 * try to compress.
905 	 */
906 	comp_size = 0;
907 	comp_buffer = NULL;
908 
909 	KKASSERT(pblksize / 2 <= 32768);
910 
911 	if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0 ||
912 	    hammer2_always_compress) {
913 		z_stream strm_compress;
914 		int comp_level;
915 		int ret;
916 
917 		switch(HAMMER2_DEC_ALGO(comp_algo)) {
918 		case HAMMER2_COMP_LZ4:
919 			/*
920 			 * We need to prefix with the size, LZ4
921 			 * doesn't do it for us.  Add the related
922 			 * overhead.
923 			 *
924 			 * NOTE: The LZ4 code seems to assume at least an
925 			 *	 8-byte buffer size granularity and may
926 			 *	 overrun the buffer if given a 4-byte
927 			 *	 granularity.
928 			 */
929 			comp_buffer = objcache_get(cache_buffer_write,
930 						   M_INTWAIT);
931 			comp_size = LZ4_compress_limitedOutput(
932 					data,
933 					&comp_buffer[sizeof(int)],
934 					pblksize,
935 					pblksize / 2 - sizeof(int64_t));
936 			*(int *)comp_buffer = comp_size;
937 			if (comp_size)
938 				comp_size += sizeof(int);
939 			break;
940 		case HAMMER2_COMP_ZLIB:
941 			comp_level = HAMMER2_DEC_LEVEL(comp_algo);
942 			if (comp_level == 0)
943 				comp_level = 6;	/* default zlib compression */
944 			else if (comp_level < 6)
945 				comp_level = 6;
946 			else if (comp_level > 9)
947 				comp_level = 9;
948 			ret = deflateInit(&strm_compress, comp_level);
949 			if (ret != Z_OK) {
950 				kprintf("HAMMER2 ZLIB: fatal error "
951 					"on deflateInit.\n");
952 			}
953 
954 			comp_buffer = objcache_get(cache_buffer_write,
955 						   M_INTWAIT);
956 			strm_compress.next_in = data;
957 			strm_compress.avail_in = pblksize;
958 			strm_compress.next_out = comp_buffer;
959 			strm_compress.avail_out = pblksize / 2;
960 			ret = deflate(&strm_compress, Z_FINISH);
961 			if (ret == Z_STREAM_END) {
962 				comp_size = pblksize / 2 -
963 					    strm_compress.avail_out;
964 			} else {
965 				comp_size = 0;
966 			}
967 			ret = deflateEnd(&strm_compress);
968 			break;
969 		default:
970 			kprintf("Error: Unknown compression method.\n");
971 			kprintf("Comp_method = %d.\n", comp_algo);
972 			break;
973 		}
974 	}
975 
976 	if (comp_size == 0) {
977 		/*
978 		 * compression failed or turned off
979 		 */
980 		comp_block_size = pblksize;	/* safety */
981 		if (++ip->comp_heuristic > 128)
982 			ip->comp_heuristic = 8;
983 	} else {
984 		/*
985 		 * compression succeeded
986 		 */
987 		ip->comp_heuristic = 0;
988 		if (comp_size <= 1024) {
989 			comp_block_size = 1024;
990 		} else if (comp_size <= 2048) {
991 			comp_block_size = 2048;
992 		} else if (comp_size <= 4096) {
993 			comp_block_size = 4096;
994 		} else if (comp_size <= 8192) {
995 			comp_block_size = 8192;
996 		} else if (comp_size <= 16384) {
997 			comp_block_size = 16384;
998 		} else if (comp_size <= 32768) {
999 			comp_block_size = 32768;
1000 		} else {
1001 			panic("hammer2: WRITE PATH: "
1002 			      "Weird comp_size value.");
1003 			/* NOT REACHED */
1004 			comp_block_size = pblksize;
1005 		}
1006 
1007 		/*
1008 		 * Must zero the remainder or dedup (which operates on a
1009 		 * physical block basis) will not find matches.
1010 		 */
1011 		if (comp_size < comp_block_size) {
1012 			bzero(comp_buffer + comp_size,
1013 			      comp_block_size - comp_size);
1014 		}
1015 	}
1016 
1017 	/*
1018 	 * Assign physical storage, bdata will be set to NULL if a live-dedup
1019 	 * was successful.
1020 	 */
1021 	bdata = comp_size ? comp_buffer : data;
1022 	chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size,
1023 					mtid, &bdata, errorp);
1024 
1025 	if (*errorp) {
1026 		goto done;
1027 	}
1028 
1029 	if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1030 		hammer2_inode_data_t *wipdata;
1031 
1032 		*errorp = hammer2_chain_modify_ip(ip, chain, mtid, 0);
1033 		if (*errorp == 0) {
1034 			wipdata = &chain->data->ipdata;
1035 			KKASSERT(wipdata->meta.op_flags &
1036 				 HAMMER2_OPFLAG_DIRECTDATA);
1037 			bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1038 			++hammer2_iod_file_wembed;
1039 		}
1040 	} else if (bdata == NULL) {
1041 		/*
1042 		 * Live deduplication, a copy of the data is already present
1043 		 * on the media.
1044 		 */
1045 		if (comp_size) {
1046 			chain->bref.methods =
1047 				HAMMER2_ENC_COMP(comp_algo) +
1048 				HAMMER2_ENC_CHECK(check_algo);
1049 		} else {
1050 			chain->bref.methods =
1051 				HAMMER2_ENC_COMP(
1052 					HAMMER2_COMP_NONE) +
1053 				HAMMER2_ENC_CHECK(check_algo);
1054 		}
1055 		bdata = comp_size ? comp_buffer : data;
1056 		hammer2_chain_setcheck(chain, bdata);
1057 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1058 	} else {
1059 		hammer2_io_t *dio;
1060 
1061 		KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1062 
1063 		switch(chain->bref.type) {
1064 		case HAMMER2_BREF_TYPE_INODE:
1065 			panic("hammer2_compress_and_write: unexpected inode\n");
1066 			break;
1067 		case HAMMER2_BREF_TYPE_DATA:
1068 			/*
1069 			 * Optimize out the read-before-write
1070 			 * if possible.
1071 			 */
1072 			*errorp = hammer2_io_newnz(chain->hmp,
1073 						   chain->bref.type,
1074 						   chain->bref.data_off,
1075 						   chain->bytes,
1076 						   &dio);
1077 			if (*errorp) {
1078 				hammer2_io_brelse(&dio);
1079 				kprintf("hammer2: WRITE PATH: "
1080 					"dbp bread error\n");
1081 				break;
1082 			}
1083 			bdata = hammer2_io_data(dio, chain->bref.data_off);
1084 
1085 			/*
1086 			 * When loading the block make sure we don't
1087 			 * leave garbage after the compressed data.
1088 			 */
1089 			if (comp_size) {
1090 				chain->bref.methods =
1091 					HAMMER2_ENC_COMP(comp_algo) +
1092 					HAMMER2_ENC_CHECK(check_algo);
1093 				bcopy(comp_buffer, bdata, comp_block_size);
1094 			} else {
1095 				chain->bref.methods =
1096 					HAMMER2_ENC_COMP(
1097 						HAMMER2_COMP_NONE) +
1098 					HAMMER2_ENC_CHECK(check_algo);
1099 				bcopy(data, bdata, pblksize);
1100 			}
1101 
1102 			/*
1103 			 * The flush code doesn't calculate check codes for
1104 			 * file data (doing so can result in excessive I/O),
1105 			 * so we do it here.
1106 			 */
1107 			hammer2_chain_setcheck(chain, bdata);
1108 
1109 			/*
1110 			 * Device buffer is now valid, chain is no longer in
1111 			 * the initial state.
1112 			 *
1113 			 * (No blockref table worries with file data)
1114 			 */
1115 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1116 			hammer2_dedup_record(chain, dio, bdata);
1117 
1118 			/* Now write the related bdp. */
1119 			if (ioflag & IO_SYNC) {
1120 				/*
1121 				 * Synchronous I/O requested.
1122 				 */
1123 				hammer2_io_bwrite(&dio);
1124 			/*
1125 			} else if ((ioflag & IO_DIRECT) &&
1126 				   loff + n == pblksize) {
1127 				hammer2_io_bdwrite(&dio);
1128 			*/
1129 			} else if (ioflag & IO_ASYNC) {
1130 				hammer2_io_bawrite(&dio);
1131 			} else {
1132 				hammer2_io_bdwrite(&dio);
1133 			}
1134 			break;
1135 		default:
1136 			panic("hammer2_compress_and_write: bad chain type %d\n",
1137 				chain->bref.type);
1138 			/* NOT REACHED */
1139 			break;
1140 		}
1141 	}
1142 done:
1143 	if (chain) {
1144 		hammer2_chain_unlock(chain);
1145 		hammer2_chain_drop(chain);
1146 	}
1147 	if (comp_buffer)
1148 		objcache_put(cache_buffer_write, comp_buffer);
1149 }
1150 
1151 /*
1152  * Helper
1153  *
1154  * Function that performs zero-checking and writing without compression,
1155  * it corresponds to default zero-checking path.
1156  */
1157 static
1158 void
1159 hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
1160 	hammer2_chain_t **parentp,
1161 	hammer2_key_t lbase, int ioflag, int pblksize,
1162 	hammer2_tid_t mtid, int *errorp,
1163 	int check_algo)
1164 {
1165 	hammer2_chain_t *chain;
1166 	char *bdata;
1167 
1168 	if (check_algo != HAMMER2_CHECK_NONE &&
1169 	    test_block_zeros(data, pblksize)) {
1170 		/*
1171 		 * An all-zeros write creates a hole unless the check code
1172 		 * is disabled.  When the check code is disabled all writes
1173 		 * are done in-place, including any all-zeros writes.
1174 		 *
1175 		 * NOTE: A snapshot will still force a copy-on-write
1176 		 *	 (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
1177 		 */
1178 		zero_write(data, ip, parentp, lbase, mtid, errorp);
1179 	} else {
1180 		/*
1181 		 * Normal write (bdata set to NULL if de-duplicated)
1182 		 */
1183 		bdata = data;
1184 		chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
1185 						mtid, &bdata, errorp);
1186 		if (*errorp) {
1187 			/* do nothing */
1188 		} else if (bdata) {
1189 			hammer2_write_bp(chain, data, ioflag, pblksize,
1190 					 mtid, errorp, check_algo);
1191 		} else {
1192 			/* dedup occurred */
1193 			chain->bref.methods =
1194 				HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1195 				HAMMER2_ENC_CHECK(check_algo);
1196 			hammer2_chain_setcheck(chain, data);
1197 			atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1198 		}
1199 		if (chain) {
1200 			hammer2_chain_unlock(chain);
1201 			hammer2_chain_drop(chain);
1202 		}
1203 	}
1204 }
1205 
1206 /*
1207  * Helper
1208  *
1209  * A function to test whether a block of data contains only zeros,
1210  * returns TRUE (non-zero) if the block is all zeros.
1211  */
1212 static
1213 int
1214 test_block_zeros(const char *buf, size_t bytes)
1215 {
1216 	size_t i;
1217 
1218 	for (i = 0; i < bytes; i += sizeof(long)) {
1219 		if (*(const long *)(buf + i) != 0)
1220 			return (0);
1221 	}
1222 	return (1);
1223 }
1224 
1225 /*
1226  * Helper
1227  *
1228  * Function to "write" a block that contains only zeros.
1229  */
1230 static
1231 void
1232 zero_write(char *data, hammer2_inode_t *ip,
1233 	   hammer2_chain_t **parentp,
1234 	   hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp)
1235 {
1236 	hammer2_chain_t *chain;
1237 	hammer2_key_t key_dummy;
1238 
1239 	chain = hammer2_chain_lookup(parentp, &key_dummy,
1240 				     lbase, lbase,
1241 				     errorp,
1242 				     HAMMER2_LOOKUP_NODATA);
1243 	if (chain) {
1244 		if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1245 			hammer2_inode_data_t *wipdata;
1246 
1247 			if (*errorp == 0) {
1248 				*errorp = hammer2_chain_modify_ip(ip, chain,
1249 								  mtid, 0);
1250 			}
1251 			if (*errorp == 0) {
1252 				wipdata = &chain->data->ipdata;
1253 				KKASSERT(wipdata->meta.op_flags &
1254 					 HAMMER2_OPFLAG_DIRECTDATA);
1255 				bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1256 				++hammer2_iod_file_wembed;
1257 			}
1258 		} else {
1259 			/* chain->error ok for deletion */
1260 			hammer2_chain_delete(*parentp, chain,
1261 					     mtid, HAMMER2_DELETE_PERMANENT);
1262 			++hammer2_iod_file_wzero;
1263 		}
1264 		atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1265 		hammer2_chain_unlock(chain);
1266 		hammer2_chain_drop(chain);
1267 	} else {
1268 		++hammer2_iod_file_wzero;
1269 	}
1270 }
1271 
1272 /*
1273  * Helper
1274  *
1275  * Function to write the data as it is, without performing any sort of
1276  * compression. This function is used in path without compression and
1277  * default zero-checking path.
1278  */
1279 static
1280 void
1281 hammer2_write_bp(hammer2_chain_t *chain, char *data, int ioflag,
1282 		 int pblksize,
1283 		 hammer2_tid_t mtid, int *errorp, int check_algo)
1284 {
1285 	hammer2_inode_data_t *wipdata;
1286 	hammer2_io_t *dio;
1287 	char *bdata;
1288 	int error;
1289 
1290 	error = 0;	/* XXX TODO below */
1291 
1292 	KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1293 
1294 	switch(chain->bref.type) {
1295 	case HAMMER2_BREF_TYPE_INODE:
1296 		wipdata = &chain->data->ipdata;
1297 		KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1298 		bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1299 		error = 0;
1300 		++hammer2_iod_file_wembed;
1301 		break;
1302 	case HAMMER2_BREF_TYPE_DATA:
1303 		error = hammer2_io_newnz(chain->hmp,
1304 					 chain->bref.type,
1305 					 chain->bref.data_off,
1306 					 chain->bytes, &dio);
1307 		if (error) {
1308 			hammer2_io_bqrelse(&dio);
1309 			kprintf("hammer2: WRITE PATH: "
1310 				"dbp bread error\n");
1311 			break;
1312 		}
1313 		bdata = hammer2_io_data(dio, chain->bref.data_off);
1314 
1315 		chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1316 				      HAMMER2_ENC_CHECK(check_algo);
1317 		bcopy(data, bdata, chain->bytes);
1318 
1319 		/*
1320 		 * The flush code doesn't calculate check codes for
1321 		 * file data (doing so can result in excessive I/O),
1322 		 * so we do it here.
1323 		 */
1324 		hammer2_chain_setcheck(chain, bdata);
1325 
1326 		/*
1327 		 * Device buffer is now valid, chain is no longer in
1328 		 * the initial state.
1329 		 *
1330 		 * (No blockref table worries with file data)
1331 		 */
1332 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1333 		hammer2_dedup_record(chain, dio, bdata);
1334 
1335 		if (ioflag & IO_SYNC) {
1336 			/*
1337 			 * Synchronous I/O requested.
1338 			 */
1339 			hammer2_io_bwrite(&dio);
1340 		/*
1341 		} else if ((ioflag & IO_DIRECT) &&
1342 			   loff + n == pblksize) {
1343 			hammer2_io_bdwrite(&dio);
1344 		*/
1345 		} else if (ioflag & IO_ASYNC) {
1346 			hammer2_io_bawrite(&dio);
1347 		} else {
1348 			hammer2_io_bdwrite(&dio);
1349 		}
1350 		break;
1351 	default:
1352 		panic("hammer2_write_bp: bad chain type %d\n",
1353 		      chain->bref.type);
1354 		/* NOT REACHED */
1355 		error = 0;
1356 		break;
1357 	}
1358 	*errorp = error;
1359 }
1360 
1361 /*
1362  * LIVE DEDUP HEURISTICS
1363  *
1364  * Record media and crc information for possible dedup operation.  Note
1365  * that the dedup mask bits must also be set in the related DIO for a dedup
1366  * to be fully validated (which is handled in the freemap allocation code).
1367  *
1368  * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1369  *	    All fields must be loaded into locals and validated.
1370  *
1371  * WARNING! Should only be used for file data and directory entries,
1372  *	    hammer2_chain_modify() only checks for the dedup case on data
1373  *	    chains.  Also, dedup data can only be recorded for committed
1374  *	    chains (so NOT strategy writes which can undergo further
1375  *	    modification after the fact!).
1376  */
1377 void
1378 hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio,
1379 		     const char *data)
1380 {
1381 	hammer2_dev_t *hmp;
1382 	hammer2_dedup_t *dedup;
1383 	uint64_t crc;
1384 	uint64_t mask;
1385 	int best = 0;
1386 	int i;
1387 	int dticks;
1388 
1389 	/*
1390 	 * We can only record a dedup if we have media data to test against.
1391 	 * If dedup is not enabled, return early, which allows a chain to
1392 	 * remain marked MODIFIED (which might have benefits in special
1393 	 * situations, though typically it does not).
1394 	 */
1395 	if (hammer2_dedup_enable == 0)
1396 		return;
1397 	if (dio == NULL) {
1398 		dio = chain->dio;
1399 		if (dio == NULL)
1400 			return;
1401 	}
1402 
1403 	hmp = chain->hmp;
1404 
1405 	switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
1406 	case HAMMER2_CHECK_ISCSI32:
1407 		/*
1408 		 * XXX use the built-in crc (the dedup lookup sequencing
1409 		 * needs to be fixed so the check code is already present
1410 		 * when dedup_lookup is called)
1411 		 */
1412 #if 0
1413 		crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value;
1414 #endif
1415 		crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1416 		break;
1417 	case HAMMER2_CHECK_XXHASH64:
1418 		crc = chain->bref.check.xxhash64.value;
1419 		break;
1420 	case HAMMER2_CHECK_SHA192:
1421 		/*
1422 		 * XXX use the built-in crc (the dedup lookup sequencing
1423 		 * needs to be fixed so the check code is already present
1424 		 * when dedup_lookup is called)
1425 		 */
1426 #if 0
1427 		crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^
1428 		      ((uint64_t *)chain->bref.check.sha192.data)[1] ^
1429 		      ((uint64_t *)chain->bref.check.sha192.data)[2];
1430 #endif
1431 		crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1432 		break;
1433 	default:
1434 		/*
1435 		 * Cannot dedup without a check code
1436 		 *
1437 		 * NOTE: In particular, CHECK_NONE allows a sector to be
1438 		 *	 overwritten without copy-on-write, recording
1439 		 *	 a dedup block for a CHECK_NONE object would be
1440 		 *	 a disaster!
1441 		 */
1442 		return;
1443 	}
1444 
1445 	atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUPABLE);
1446 
1447 	dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1448 	for (i = 0; i < 4; ++i) {
1449 		if (dedup[i].data_crc == crc) {
1450 			best = i;
1451 			break;
1452 		}
1453 		dticks = (int)(dedup[i].ticks - dedup[best].ticks);
1454 		if (dticks < 0 || dticks > hz * 60 * 30)
1455 			best = i;
1456 	}
1457 	dedup += best;
1458 	if (hammer2_debug & 0x40000) {
1459 		kprintf("REC %04x %016jx %016jx\n",
1460 			(int)(dedup - hmp->heur_dedup),
1461 			crc,
1462 			chain->bref.data_off);
1463 	}
1464 	dedup->ticks = ticks;
1465 	dedup->data_off = chain->bref.data_off;
1466 	dedup->data_crc = crc;
1467 
1468 	/*
1469 	 * Set the valid bits for the dedup only after we know the data
1470 	 * buffer has been updated.  The alloc bits were set (and the valid
1471 	 * bits cleared) when the media was allocated.
1472 	 *
1473 	 * This is done in two stages becuase the bulkfree code can race
1474 	 * the gap between allocation and data population.  Both masks must
1475 	 * be set before a bcmp/dedup operation is able to use the block.
1476 	 */
1477 	mask = hammer2_dedup_mask(dio, chain->bref.data_off, chain->bytes);
1478 	atomic_set_64(&dio->dedup_valid, mask);
1479 
1480 #if 0
1481 	/*
1482 	 * XXX removed. MODIFIED is an integral part of the flush code,
1483 	 * lets not just clear it
1484 	 */
1485 	/*
1486 	 * Once we record the dedup the chain must be marked clean to
1487 	 * prevent reuse of the underlying block.   Remember that this
1488 	 * write occurs when the buffer cache is flushed (i.e. on sync(),
1489 	 * fsync(), filesystem periodic sync, or when the kernel needs to
1490 	 * flush a buffer), and not whenever the user write()s.
1491 	 */
1492 	if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1493 		atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1494 		atomic_add_long(&hammer2_count_modified_chains, -1);
1495 		if (chain->pmp)
1496 			hammer2_pfs_memory_wakeup(chain->pmp, -1);
1497 	}
1498 #endif
1499 }
1500 
1501 static
1502 hammer2_off_t
1503 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize)
1504 {
1505 	hammer2_dedup_t *dedup;
1506 	hammer2_io_t *dio;
1507 	hammer2_off_t off;
1508 	uint64_t crc;
1509 	uint64_t mask;
1510 	char *data;
1511 	char *dtmp;
1512 	int i;
1513 
1514 	if (hammer2_dedup_enable == 0)
1515 		return 0;
1516 	data = *datap;
1517 	if (data == NULL)
1518 		return 0;
1519 
1520 	/*
1521 	 * XXX use the built-in crc (the dedup lookup sequencing
1522 	 * needs to be fixed so the check code is already present
1523 	 * when dedup_lookup is called)
1524 	 */
1525 	crc = XXH64(data, pblksize, XXH_HAMMER2_SEED);
1526 	dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1527 
1528 	if (hammer2_debug & 0x40000) {
1529 		kprintf("LOC %04x/4 %016jx\n",
1530 			(int)(dedup - hmp->heur_dedup),
1531 			crc);
1532 	}
1533 
1534 	for (i = 0; i < 4; ++i) {
1535 		off = dedup[i].data_off;
1536 		cpu_ccfence();
1537 		if (dedup[i].data_crc != crc)
1538 			continue;
1539 		if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize)
1540 			continue;
1541 		dio = hammer2_io_getquick(hmp, off, pblksize);
1542 		if (dio) {
1543 			dtmp = hammer2_io_data(dio, off),
1544 			mask = hammer2_dedup_mask(dio, off, pblksize);
1545 			if ((dio->dedup_alloc & mask) == mask &&
1546 			    (dio->dedup_valid & mask) == mask &&
1547 			    bcmp(data, dtmp, pblksize) == 0) {
1548 				if (hammer2_debug & 0x40000) {
1549 					kprintf("DEDUP SUCCESS %016jx\n",
1550 						(intmax_t)off);
1551 				}
1552 				hammer2_io_putblk(&dio);
1553 				*datap = NULL;
1554 				dedup[i].ticks = ticks;   /* update use */
1555 				atomic_add_long(&hammer2_iod_file_wdedup,
1556 						pblksize);
1557 
1558 				return off;		/* RETURN */
1559 			}
1560 			hammer2_io_putblk(&dio);
1561 		}
1562 	}
1563 	return 0;
1564 }
1565 
1566 /*
1567  * Poof.  Races are ok, if someone gets in and reuses a dedup offset
1568  * before or while we are clearing it they will also recover the freemap
1569  * entry (set it to fully allocated), so a bulkfree race can only set it
1570  * to a possibly-free state.
1571  *
1572  * XXX ok, well, not really sure races are ok but going to run with it
1573  *     for the moment.
1574  */
1575 void
1576 hammer2_dedup_clear(hammer2_dev_t *hmp)
1577 {
1578 	int i;
1579 
1580 	for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) {
1581 		hmp->heur_dedup[i].data_off = 0;
1582 		hmp->heur_dedup[i].ticks = ticks - 1;
1583 	}
1584 }
1585