xref: /dragonfly/sys/vfs/hammer/hammer_io.c (revision 99dd49c5)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.55 2008/09/15 17:02:49 dillon Exp $
35  */
36 /*
37  * IO Primitives and buffer cache management
38  *
39  * All major data-tracking structures in HAMMER contain a struct hammer_io
40  * which is used to manage their backing store.  We use filesystem buffers
41  * for backing store and we leave them passively associated with their
42  * HAMMER structures.
43  *
44  * If the kernel tries to destroy a passively associated buf which we cannot
45  * yet let go we set B_LOCKED in the buffer and then actively released it
46  * later when we can.
47  */
48 
49 #include "hammer.h"
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
52 #include <sys/buf.h>
53 #include <sys/buf2.h>
54 
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
57 #if 0
58 static void hammer_io_direct_read_complete(struct bio *nbio);
59 #endif
60 static void hammer_io_direct_write_complete(struct bio *nbio);
61 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
62 static void hammer_io_set_modlist(struct hammer_io *io);
63 static void hammer_io_flush_mark(hammer_volume_t volume);
64 static void hammer_io_flush_sync_done(struct bio *bio);
65 
66 
67 /*
68  * Initialize a new, already-zero'd hammer_io structure, or reinitialize
69  * an existing hammer_io structure which may have switched to another type.
70  */
71 void
72 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
73 {
74 	io->volume = volume;
75 	io->hmp = volume->io.hmp;
76 	io->type = type;
77 }
78 
79 /*
80  * Helper routine to disassociate a buffer cache buffer from an I/O
81  * structure.  The buffer is unlocked and marked appropriate for reclamation.
82  *
83  * The io may have 0 or 1 references depending on who called us.  The
84  * caller is responsible for dealing with the refs.
85  *
86  * This call can only be made when no action is required on the buffer.
87  *
88  * The caller must own the buffer and the IO must indicate that the
89  * structure no longer owns it (io.released != 0).
90  */
91 static void
92 hammer_io_disassociate(hammer_io_structure_t iou)
93 {
94 	struct buf *bp = iou->io.bp;
95 
96 	KKASSERT(iou->io.released);
97 	KKASSERT(iou->io.modified == 0);
98 	KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
99 	buf_dep_init(bp);
100 	iou->io.bp = NULL;
101 
102 	/*
103 	 * If the buffer was locked someone wanted to get rid of it.
104 	 */
105 	if (bp->b_flags & B_LOCKED) {
106 		--hammer_count_io_locked;
107 		bp->b_flags &= ~B_LOCKED;
108 	}
109 	if (iou->io.reclaim) {
110 		bp->b_flags |= B_NOCACHE|B_RELBUF;
111 		iou->io.reclaim = 0;
112 	}
113 
114 	switch(iou->io.type) {
115 	case HAMMER_STRUCTURE_VOLUME:
116 		iou->volume.ondisk = NULL;
117 		break;
118 	case HAMMER_STRUCTURE_DATA_BUFFER:
119 	case HAMMER_STRUCTURE_META_BUFFER:
120 	case HAMMER_STRUCTURE_UNDO_BUFFER:
121 		iou->buffer.ondisk = NULL;
122 		break;
123 	}
124 }
125 
126 /*
127  * Wait for any physical IO to complete
128  */
129 void
130 hammer_io_wait(hammer_io_t io)
131 {
132 	if (io->running) {
133 		crit_enter();
134 		tsleep_interlock(io);
135 		io->waiting = 1;
136 		for (;;) {
137 			tsleep(io, 0, "hmrflw", 0);
138 			if (io->running == 0)
139 				break;
140 			tsleep_interlock(io);
141 			io->waiting = 1;
142 			if (io->running == 0)
143 				break;
144 		}
145 		crit_exit();
146 	}
147 }
148 
149 /*
150  * Wait for all hammer_io-initated write I/O's to complete.  This is not
151  * supposed to count direct I/O's but some can leak through (for
152  * non-full-sized direct I/Os).
153  */
154 void
155 hammer_io_wait_all(hammer_mount_t hmp, const char *ident)
156 {
157 	hammer_io_flush_sync(hmp);
158 	crit_enter();
159 	while (hmp->io_running_space)
160 		tsleep(&hmp->io_running_space, 0, ident, 0);
161 	crit_exit();
162 }
163 
164 #define HAMMER_MAXRA	4
165 
166 /*
167  * Load bp for a HAMMER structure.  The io must be exclusively locked by
168  * the caller.
169  *
170  * This routine is mostly used on meta-data and small-data blocks.  Generally
171  * speaking HAMMER assumes some locality of reference and will cluster
172  * a 64K read.
173  *
174  * Note that clustering occurs at the device layer, not the logical layer.
175  * If the buffers do not apply to the current operation they may apply to
176  * some other.
177  */
178 int
179 hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
180 {
181 	struct buf *bp;
182 	int   error;
183 
184 	if ((bp = io->bp) == NULL) {
185 		hammer_count_io_running_read += io->bytes;
186 		if (hammer_cluster_enable) {
187 			error = cluster_read(devvp, limit,
188 					     io->offset, io->bytes,
189 					     HAMMER_CLUSTER_SIZE,
190 					     HAMMER_CLUSTER_BUFS, &io->bp);
191 		} else {
192 			error = bread(devvp, io->offset, io->bytes, &io->bp);
193 		}
194 		hammer_stats_disk_read += io->bytes;
195 		hammer_count_io_running_read -= io->bytes;
196 
197 		/*
198 		 * The code generally assumes b_ops/b_dep has been set-up,
199 		 * even if we error out here.
200 		 */
201 		bp = io->bp;
202 		bp->b_ops = &hammer_bioops;
203 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
204 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
205 		BUF_KERNPROC(bp);
206 		KKASSERT(io->modified == 0);
207 		KKASSERT(io->running == 0);
208 		KKASSERT(io->waiting == 0);
209 		io->released = 0;	/* we hold an active lock on bp */
210 	} else {
211 		error = 0;
212 	}
213 	return(error);
214 }
215 
216 /*
217  * Similar to hammer_io_read() but returns a zero'd out buffer instead.
218  * Must be called with the IO exclusively locked.
219  *
220  * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
221  * I/O by forcing the buffer to not be in a released state before calling
222  * it.
223  *
224  * This function will also mark the IO as modified but it will not
225  * increment the modify_refs count.
226  */
227 int
228 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
229 {
230 	struct buf *bp;
231 
232 	if ((bp = io->bp) == NULL) {
233 		io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
234 		bp = io->bp;
235 		bp->b_ops = &hammer_bioops;
236 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
237 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
238 		io->released = 0;
239 		KKASSERT(io->running == 0);
240 		io->waiting = 0;
241 		BUF_KERNPROC(bp);
242 	} else {
243 		if (io->released) {
244 			regetblk(bp);
245 			BUF_KERNPROC(bp);
246 			io->released = 0;
247 		}
248 	}
249 	hammer_io_modify(io, 0);
250 	vfs_bio_clrbuf(bp);
251 	return(0);
252 }
253 
254 /*
255  * Remove potential device level aliases against buffers managed by high level
256  * vnodes.  Aliases can also be created due to mixed buffer sizes or via
257  * direct access to the backing store device.
258  *
259  * This is nasty because the buffers are also VMIO-backed.  Even if a buffer
260  * does not exist its backing VM pages might, and we have to invalidate
261  * those as well or a getblk() will reinstate them.
262  *
263  * Buffer cache buffers associated with hammer_buffers cannot be
264  * invalidated.
265  */
266 int
267 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
268 {
269 	hammer_io_structure_t iou;
270 	hammer_off_t phys_offset;
271 	struct buf *bp;
272 	int error;
273 
274 	phys_offset = volume->ondisk->vol_buf_beg +
275 		      (zone2_offset & HAMMER_OFF_SHORT_MASK);
276 	crit_enter();
277 	if ((bp = findblk(volume->devvp, phys_offset)) != NULL)
278 		bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
279 	else
280 		bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
281 	if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
282 #if 0
283 		hammer_ref(&iou->io.lock);
284 		hammer_io_clear_modify(&iou->io, 1);
285 		bundirty(bp);
286 		iou->io.released = 0;
287 		BUF_KERNPROC(bp);
288 		iou->io.reclaim = 1;
289 		iou->io.waitdep = 1;
290 		KKASSERT(iou->io.lock.refs == 1);
291 		hammer_rel_buffer(&iou->buffer, 0);
292 		/*hammer_io_deallocate(bp);*/
293 #endif
294 		bqrelse(bp);
295 		error = EAGAIN;
296 	} else {
297 		KKASSERT((bp->b_flags & B_LOCKED) == 0);
298 		bundirty(bp);
299 		bp->b_flags |= B_NOCACHE|B_RELBUF;
300 		brelse(bp);
301 		error = 0;
302 	}
303 	crit_exit();
304 	return(error);
305 }
306 
307 /*
308  * This routine is called on the last reference to a hammer structure.
309  * The io is usually interlocked with io.loading and io.refs must be 1.
310  *
311  * This routine may return a non-NULL bp to the caller for dispoal.  Disposal
312  * simply means the caller finishes decrementing the ref-count on the
313  * IO structure then brelse()'s the bp.  The bp may or may not still be
314  * passively associated with the IO.
315  *
316  * The only requirement here is that modified meta-data and volume-header
317  * buffer may NOT be disassociated from the IO structure, and consequently
318  * we also leave such buffers actively associated with the IO if they already
319  * are (since the kernel can't do anything with them anyway).  Only the
320  * flusher is allowed to write such buffers out.  Modified pure-data and
321  * undo buffers are returned to the kernel but left passively associated
322  * so we can track when the kernel writes the bp out.
323  */
324 struct buf *
325 hammer_io_release(struct hammer_io *io, int flush)
326 {
327 	union hammer_io_structure *iou = (void *)io;
328 	struct buf *bp;
329 
330 	if ((bp = io->bp) == NULL)
331 		return(NULL);
332 
333 	/*
334 	 * Try to flush a dirty IO to disk if asked to by the
335 	 * caller or if the kernel tried to flush the buffer in the past.
336 	 *
337 	 * Kernel-initiated flushes are only allowed for pure-data buffers.
338 	 * meta-data and volume buffers can only be flushed explicitly
339 	 * by HAMMER.
340 	 */
341 	if (io->modified) {
342 		if (flush) {
343 			hammer_io_flush(io);
344 		} else if (bp->b_flags & B_LOCKED) {
345 			switch(io->type) {
346 			case HAMMER_STRUCTURE_DATA_BUFFER:
347 			case HAMMER_STRUCTURE_UNDO_BUFFER:
348 				hammer_io_flush(io);
349 				break;
350 			default:
351 				break;
352 			}
353 		} /* else no explicit request to flush the buffer */
354 	}
355 
356 	/*
357 	 * Wait for the IO to complete if asked to.  This occurs when
358 	 * the buffer must be disposed of definitively during an umount
359 	 * or buffer invalidation.
360 	 */
361 	if (io->waitdep && io->running) {
362 		hammer_io_wait(io);
363 	}
364 
365 	/*
366 	 * Return control of the buffer to the kernel (with the provisio
367 	 * that our bioops can override kernel decisions with regards to
368 	 * the buffer).
369 	 */
370 	if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
371 		/*
372 		 * Always disassociate the bp if an explicit flush
373 		 * was requested and the IO completed with no error
374 		 * (so unmount can really clean up the structure).
375 		 */
376 		if (io->released) {
377 			regetblk(bp);
378 			BUF_KERNPROC(bp);
379 		} else {
380 			io->released = 1;
381 		}
382 		hammer_io_disassociate((hammer_io_structure_t)io);
383 		/* return the bp */
384 	} else if (io->modified) {
385 		/*
386 		 * Only certain IO types can be released to the kernel if
387 		 * the buffer has been modified.
388 		 *
389 		 * volume and meta-data IO types may only be explicitly
390 		 * flushed by HAMMER.
391 		 */
392 		switch(io->type) {
393 		case HAMMER_STRUCTURE_DATA_BUFFER:
394 		case HAMMER_STRUCTURE_UNDO_BUFFER:
395 			if (io->released == 0) {
396 				io->released = 1;
397 				bdwrite(bp);
398 			}
399 			break;
400 		default:
401 			break;
402 		}
403 		bp = NULL;	/* bp left associated */
404 	} else if (io->released == 0) {
405 		/*
406 		 * Clean buffers can be generally released to the kernel.
407 		 * We leave the bp passively associated with the HAMMER
408 		 * structure and use bioops to disconnect it later on
409 		 * if the kernel wants to discard the buffer.
410 		 *
411 		 * We can steal the structure's ownership of the bp.
412 		 */
413 		io->released = 1;
414 		if (bp->b_flags & B_LOCKED) {
415 			hammer_io_disassociate(iou);
416 			/* return the bp */
417 		} else {
418 			if (io->reclaim) {
419 				hammer_io_disassociate(iou);
420 				/* return the bp */
421 			} else {
422 				/* return the bp (bp passively associated) */
423 			}
424 		}
425 	} else {
426 		/*
427 		 * A released buffer is passively associate with our
428 		 * hammer_io structure.  The kernel cannot destroy it
429 		 * without making a bioops call.  If the kernel (B_LOCKED)
430 		 * or we (reclaim) requested that the buffer be destroyed
431 		 * we destroy it, otherwise we do a quick get/release to
432 		 * reset its position in the kernel's LRU list.
433 		 *
434 		 * Leaving the buffer passively associated allows us to
435 		 * use the kernel's LRU buffer flushing mechanisms rather
436 		 * then rolling our own.
437 		 *
438 		 * XXX there are two ways of doing this.  We can re-acquire
439 		 * and passively release to reset the LRU, or not.
440 		 */
441 		if (io->running == 0) {
442 			regetblk(bp);
443 			if ((bp->b_flags & B_LOCKED) || io->reclaim) {
444 				hammer_io_disassociate(iou);
445 				/* return the bp */
446 			} else {
447 				/* return the bp (bp passively associated) */
448 			}
449 		} else {
450 			/*
451 			 * bp is left passively associated but we do not
452 			 * try to reacquire it.  Interactions with the io
453 			 * structure will occur on completion of the bp's
454 			 * I/O.
455 			 */
456 			bp = NULL;
457 		}
458 	}
459 	return(bp);
460 }
461 
462 /*
463  * This routine is called with a locked IO when a flush is desired and
464  * no other references to the structure exists other then ours.  This
465  * routine is ONLY called when HAMMER believes it is safe to flush a
466  * potentially modified buffer out.
467  */
468 void
469 hammer_io_flush(struct hammer_io *io)
470 {
471 	struct buf *bp;
472 
473 	/*
474 	 * Degenerate case - nothing to flush if nothing is dirty.
475 	 */
476 	if (io->modified == 0) {
477 		return;
478 	}
479 
480 	KKASSERT(io->bp);
481 	KKASSERT(io->modify_refs <= 0);
482 
483 	/*
484 	 * Acquire ownership of the bp, particularly before we clear our
485 	 * modified flag.
486 	 *
487 	 * We are going to bawrite() this bp.  Don't leave a window where
488 	 * io->released is set, we actually own the bp rather then our
489 	 * buffer.
490 	 */
491 	bp = io->bp;
492 	if (io->released) {
493 		regetblk(bp);
494 		/* BUF_KERNPROC(io->bp); */
495 		/* io->released = 0; */
496 		KKASSERT(io->released);
497 		KKASSERT(io->bp == bp);
498 	}
499 	io->released = 1;
500 
501 	/*
502 	 * Acquire exclusive access to the bp and then clear the modified
503 	 * state of the buffer prior to issuing I/O to interlock any
504 	 * modifications made while the I/O is in progress.  This shouldn't
505 	 * happen anyway but losing data would be worse.  The modified bit
506 	 * will be rechecked after the IO completes.
507 	 *
508 	 * NOTE: This call also finalizes the buffer's content (inval == 0).
509 	 *
510 	 * This is only legal when lock.refs == 1 (otherwise we might clear
511 	 * the modified bit while there are still users of the cluster
512 	 * modifying the data).
513 	 *
514 	 * Do this before potentially blocking so any attempt to modify the
515 	 * ondisk while we are blocked blocks waiting for us.
516 	 */
517 	hammer_ref(&io->lock);
518 	hammer_io_clear_modify(io, 0);
519 	hammer_unref(&io->lock);
520 
521 	/*
522 	 * Transfer ownership to the kernel and initiate I/O.
523 	 */
524 	io->running = 1;
525 	io->hmp->io_running_space += io->bytes;
526 	hammer_count_io_running_write += io->bytes;
527 	bawrite(bp);
528 	hammer_io_flush_mark(io->volume);
529 }
530 
531 /************************************************************************
532  *				BUFFER DIRTYING				*
533  ************************************************************************
534  *
535  * These routines deal with dependancies created when IO buffers get
536  * modified.  The caller must call hammer_modify_*() on a referenced
537  * HAMMER structure prior to modifying its on-disk data.
538  *
539  * Any intent to modify an IO buffer acquires the related bp and imposes
540  * various write ordering dependancies.
541  */
542 
543 /*
544  * Mark a HAMMER structure as undergoing modification.  Meta-data buffers
545  * are locked until the flusher can deal with them, pure data buffers
546  * can be written out.
547  */
548 static
549 void
550 hammer_io_modify(hammer_io_t io, int count)
551 {
552 	/*
553 	 * io->modify_refs must be >= 0
554 	 */
555 	while (io->modify_refs < 0) {
556 		io->waitmod = 1;
557 		tsleep(io, 0, "hmrmod", 0);
558 	}
559 
560 	/*
561 	 * Shortcut if nothing to do.
562 	 */
563 	KKASSERT(io->lock.refs != 0 && io->bp != NULL);
564 	io->modify_refs += count;
565 	if (io->modified && io->released == 0)
566 		return;
567 
568 	hammer_lock_ex(&io->lock);
569 	if (io->modified == 0) {
570 		hammer_io_set_modlist(io);
571 		io->modified = 1;
572 	}
573 	if (io->released) {
574 		regetblk(io->bp);
575 		BUF_KERNPROC(io->bp);
576 		io->released = 0;
577 		KKASSERT(io->modified != 0);
578 	}
579 	hammer_unlock(&io->lock);
580 }
581 
582 static __inline
583 void
584 hammer_io_modify_done(hammer_io_t io)
585 {
586 	KKASSERT(io->modify_refs > 0);
587 	--io->modify_refs;
588 	if (io->modify_refs == 0 && io->waitmod) {
589 		io->waitmod = 0;
590 		wakeup(io);
591 	}
592 }
593 
594 void
595 hammer_io_write_interlock(hammer_io_t io)
596 {
597 	while (io->modify_refs != 0) {
598 		io->waitmod = 1;
599 		tsleep(io, 0, "hmrmod", 0);
600 	}
601 	io->modify_refs = -1;
602 }
603 
604 void
605 hammer_io_done_interlock(hammer_io_t io)
606 {
607 	KKASSERT(io->modify_refs == -1);
608 	io->modify_refs = 0;
609 	if (io->waitmod) {
610 		io->waitmod = 0;
611 		wakeup(io);
612 	}
613 }
614 
615 /*
616  * Caller intends to modify a volume's ondisk structure.
617  *
618  * This is only allowed if we are the flusher or we have a ref on the
619  * sync_lock.
620  */
621 void
622 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
623 		     void *base, int len)
624 {
625 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
626 
627 	hammer_io_modify(&volume->io, 1);
628 	if (len) {
629 		intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
630 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
631 		hammer_generate_undo(trans, &volume->io,
632 			 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
633 			 base, len);
634 	}
635 }
636 
637 /*
638  * Caller intends to modify a buffer's ondisk structure.
639  *
640  * This is only allowed if we are the flusher or we have a ref on the
641  * sync_lock.
642  */
643 void
644 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
645 		     void *base, int len)
646 {
647 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
648 
649 	hammer_io_modify(&buffer->io, 1);
650 	if (len) {
651 		intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
652 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
653 		hammer_generate_undo(trans, &buffer->io,
654 				     buffer->zone2_offset + rel_offset,
655 				     base, len);
656 	}
657 }
658 
659 void
660 hammer_modify_volume_done(hammer_volume_t volume)
661 {
662 	hammer_io_modify_done(&volume->io);
663 }
664 
665 void
666 hammer_modify_buffer_done(hammer_buffer_t buffer)
667 {
668 	hammer_io_modify_done(&buffer->io);
669 }
670 
671 /*
672  * Mark an entity as not being dirty any more and finalize any
673  * delayed adjustments to the buffer.
674  *
675  * Delayed adjustments are an important performance enhancement, allowing
676  * us to avoid recalculating B-Tree node CRCs over and over again when
677  * making bulk-modifications to the B-Tree.
678  *
679  * If inval is non-zero delayed adjustments are ignored.
680  *
681  * This routine may dereference related btree nodes and cause the
682  * buffer to be dereferenced.  The caller must own a reference on io.
683  */
684 void
685 hammer_io_clear_modify(struct hammer_io *io, int inval)
686 {
687 	if (io->modified == 0)
688 		return;
689 
690 	/*
691 	 * Take us off the mod-list and clear the modified bit.
692 	 */
693 	KKASSERT(io->mod_list != NULL);
694 	if (io->mod_list == &io->hmp->volu_list ||
695 	    io->mod_list == &io->hmp->meta_list) {
696 		io->hmp->locked_dirty_space -= io->bytes;
697 		hammer_count_dirtybufspace -= io->bytes;
698 	}
699 	TAILQ_REMOVE(io->mod_list, io, mod_entry);
700 	io->mod_list = NULL;
701 	io->modified = 0;
702 
703 	/*
704 	 * If this bit is not set there are no delayed adjustments.
705 	 */
706 	if (io->gencrc == 0)
707 		return;
708 	io->gencrc = 0;
709 
710 	/*
711 	 * Finalize requested CRCs.  The NEEDSCRC flag also holds a reference
712 	 * on the node (& underlying buffer).  Release the node after clearing
713 	 * the flag.
714 	 */
715 	if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
716 		hammer_buffer_t buffer = (void *)io;
717 		hammer_node_t node;
718 
719 restart:
720 		TAILQ_FOREACH(node, &buffer->clist, entry) {
721 			if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
722 				continue;
723 			node->flags &= ~HAMMER_NODE_NEEDSCRC;
724 			KKASSERT(node->ondisk);
725 			if (inval == 0)
726 				node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
727 			hammer_rel_node(node);
728 			goto restart;
729 		}
730 	}
731 	/* caller must still have ref on io */
732 	KKASSERT(io->lock.refs > 0);
733 }
734 
735 /*
736  * Clear the IO's modify list.  Even though the IO is no longer modified
737  * it may still be on the lose_list.  This routine is called just before
738  * the governing hammer_buffer is destroyed.
739  */
740 void
741 hammer_io_clear_modlist(struct hammer_io *io)
742 {
743 	KKASSERT(io->modified == 0);
744 	if (io->mod_list) {
745 		crit_enter();	/* biodone race against list */
746 		KKASSERT(io->mod_list == &io->hmp->lose_list);
747 		TAILQ_REMOVE(io->mod_list, io, mod_entry);
748 		io->mod_list = NULL;
749 		crit_exit();
750 	}
751 }
752 
753 static void
754 hammer_io_set_modlist(struct hammer_io *io)
755 {
756 	struct hammer_mount *hmp = io->hmp;
757 
758 	KKASSERT(io->mod_list == NULL);
759 
760 	switch(io->type) {
761 	case HAMMER_STRUCTURE_VOLUME:
762 		io->mod_list = &hmp->volu_list;
763 		hmp->locked_dirty_space += io->bytes;
764 		hammer_count_dirtybufspace += io->bytes;
765 		break;
766 	case HAMMER_STRUCTURE_META_BUFFER:
767 		io->mod_list = &hmp->meta_list;
768 		hmp->locked_dirty_space += io->bytes;
769 		hammer_count_dirtybufspace += io->bytes;
770 		break;
771 	case HAMMER_STRUCTURE_UNDO_BUFFER:
772 		io->mod_list = &hmp->undo_list;
773 		break;
774 	case HAMMER_STRUCTURE_DATA_BUFFER:
775 		io->mod_list = &hmp->data_list;
776 		break;
777 	}
778 	TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
779 }
780 
781 /************************************************************************
782  *				HAMMER_BIOOPS				*
783  ************************************************************************
784  *
785  */
786 
787 /*
788  * Pre-IO initiation kernel callback - cluster build only
789  */
790 static void
791 hammer_io_start(struct buf *bp)
792 {
793 }
794 
795 /*
796  * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
797  *
798  * NOTE: HAMMER may modify a buffer after initiating I/O.  The modified bit
799  * may also be set if we were marking a cluster header open.  Only remove
800  * our dependancy if the modified bit is clear.
801  */
802 static void
803 hammer_io_complete(struct buf *bp)
804 {
805 	union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
806 
807 	KKASSERT(iou->io.released == 1);
808 
809 	/*
810 	 * Deal with people waiting for I/O to drain
811 	 */
812 	if (iou->io.running) {
813 		/*
814 		 * Deal with critical write errors.  Once a critical error
815 		 * has been flagged in hmp the UNDO FIFO will not be updated.
816 		 * That way crash recover will give us a consistent
817 		 * filesystem.
818 		 *
819 		 * Because of this we can throw away failed UNDO buffers.  If
820 		 * we throw away META or DATA buffers we risk corrupting
821 		 * the now read-only version of the filesystem visible to
822 		 * the user.  Clear B_ERROR so the buffer is not re-dirtied
823 		 * by the kernel and ref the io so it doesn't get thrown
824 		 * away.
825 		 */
826 		if (bp->b_flags & B_ERROR) {
827 			hammer_critical_error(iou->io.hmp, NULL, bp->b_error,
828 					      "while flushing meta-data");
829 			switch(iou->io.type) {
830 			case HAMMER_STRUCTURE_UNDO_BUFFER:
831 				break;
832 			default:
833 				if (iou->io.ioerror == 0) {
834 					iou->io.ioerror = 1;
835 					if (iou->io.lock.refs == 0)
836 						++hammer_count_refedbufs;
837 					hammer_ref(&iou->io.lock);
838 				}
839 				break;
840 			}
841 			bp->b_flags &= ~B_ERROR;
842 			bundirty(bp);
843 #if 0
844 			hammer_io_set_modlist(&iou->io);
845 			iou->io.modified = 1;
846 #endif
847 		}
848 		hammer_stats_disk_write += iou->io.bytes;
849 		hammer_count_io_running_write -= iou->io.bytes;
850 		iou->io.hmp->io_running_space -= iou->io.bytes;
851 		if (iou->io.hmp->io_running_space == 0)
852 			wakeup(&iou->io.hmp->io_running_space);
853 		KKASSERT(iou->io.hmp->io_running_space >= 0);
854 		iou->io.running = 0;
855 	} else {
856 		hammer_stats_disk_read += iou->io.bytes;
857 	}
858 
859 	if (iou->io.waiting) {
860 		iou->io.waiting = 0;
861 		wakeup(iou);
862 	}
863 
864 	/*
865 	 * If B_LOCKED is set someone wanted to deallocate the bp at some
866 	 * point, do it now if refs has become zero.
867 	 */
868 	if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
869 		KKASSERT(iou->io.modified == 0);
870 		--hammer_count_io_locked;
871 		bp->b_flags &= ~B_LOCKED;
872 		hammer_io_deallocate(bp);
873 		/* structure may be dead now */
874 	}
875 }
876 
877 /*
878  * Callback from kernel when it wishes to deallocate a passively
879  * associated structure.  This mostly occurs with clean buffers
880  * but it may be possible for a holding structure to be marked dirty
881  * while its buffer is passively associated.  The caller owns the bp.
882  *
883  * If we cannot disassociate we set B_LOCKED to prevent the buffer
884  * from getting reused.
885  *
886  * WARNING: Because this can be called directly by getnewbuf we cannot
887  * recurse into the tree.  If a bp cannot be immediately disassociated
888  * our only recourse is to set B_LOCKED.
889  *
890  * WARNING: This may be called from an interrupt via hammer_io_complete()
891  */
892 static void
893 hammer_io_deallocate(struct buf *bp)
894 {
895 	hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
896 
897 	KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
898 	if (iou->io.lock.refs > 0 || iou->io.modified) {
899 		/*
900 		 * It is not legal to disassociate a modified buffer.  This
901 		 * case really shouldn't ever occur.
902 		 */
903 		bp->b_flags |= B_LOCKED;
904 		++hammer_count_io_locked;
905 	} else {
906 		/*
907 		 * Disassociate the BP.  If the io has no refs left we
908 		 * have to add it to the loose list.
909 		 */
910 		hammer_io_disassociate(iou);
911 		if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
912 			KKASSERT(iou->io.bp == NULL);
913 			KKASSERT(iou->io.mod_list == NULL);
914 			crit_enter();	/* biodone race against list */
915 			iou->io.mod_list = &iou->io.hmp->lose_list;
916 			TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
917 			crit_exit();
918 		}
919 	}
920 }
921 
922 static int
923 hammer_io_fsync(struct vnode *vp)
924 {
925 	return(0);
926 }
927 
928 /*
929  * NOTE: will not be called unless we tell the kernel about the
930  * bioops.  Unused... we use the mount's VFS_SYNC instead.
931  */
932 static int
933 hammer_io_sync(struct mount *mp)
934 {
935 	return(0);
936 }
937 
938 static void
939 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
940 {
941 }
942 
943 /*
944  * I/O pre-check for reading and writing.  HAMMER only uses this for
945  * B_CACHE buffers so checkread just shouldn't happen, but if it does
946  * allow it.
947  *
948  * Writing is a different case.  We don't want the kernel to try to write
949  * out a buffer that HAMMER may be modifying passively or which has a
950  * dependancy.  In addition, kernel-demanded writes can only proceed for
951  * certain types of buffers (i.e. UNDO and DATA types).  Other dirty
952  * buffer types can only be explicitly written by the flusher.
953  *
954  * checkwrite will only be called for bdwrite()n buffers.  If we return
955  * success the kernel is guaranteed to initiate the buffer write.
956  */
957 static int
958 hammer_io_checkread(struct buf *bp)
959 {
960 	return(0);
961 }
962 
963 static int
964 hammer_io_checkwrite(struct buf *bp)
965 {
966 	hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
967 
968 	/*
969 	 * This shouldn't happen under normal operation.
970 	 */
971 	if (io->type == HAMMER_STRUCTURE_VOLUME ||
972 	    io->type == HAMMER_STRUCTURE_META_BUFFER) {
973 		if (!panicstr)
974 			panic("hammer_io_checkwrite: illegal buffer");
975 		if ((bp->b_flags & B_LOCKED) == 0) {
976 			bp->b_flags |= B_LOCKED;
977 			++hammer_count_io_locked;
978 		}
979 		return(1);
980 	}
981 
982 	/*
983 	 * We can only clear the modified bit if the IO is not currently
984 	 * undergoing modification.  Otherwise we may miss changes.
985 	 *
986 	 * Only data and undo buffers can reach here.  These buffers do
987 	 * not have terminal crc functions but we temporarily reference
988 	 * the IO anyway, just in case.
989 	 */
990 	if (io->modify_refs == 0 && io->modified) {
991 		hammer_ref(&io->lock);
992 		hammer_io_clear_modify(io, 0);
993 		hammer_unref(&io->lock);
994 	} else if (io->modified) {
995 		KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
996 	}
997 
998 	/*
999 	 * The kernel is going to start the IO, set io->running.
1000 	 */
1001 	KKASSERT(io->running == 0);
1002 	io->running = 1;
1003 	io->hmp->io_running_space += io->bytes;
1004 	hammer_count_io_running_write += io->bytes;
1005 	return(0);
1006 }
1007 
1008 /*
1009  * Return non-zero if we wish to delay the kernel's attempt to flush
1010  * this buffer to disk.
1011  */
1012 static int
1013 hammer_io_countdeps(struct buf *bp, int n)
1014 {
1015 	return(0);
1016 }
1017 
1018 struct bio_ops hammer_bioops = {
1019 	.io_start	= hammer_io_start,
1020 	.io_complete	= hammer_io_complete,
1021 	.io_deallocate	= hammer_io_deallocate,
1022 	.io_fsync	= hammer_io_fsync,
1023 	.io_sync	= hammer_io_sync,
1024 	.io_movedeps	= hammer_io_movedeps,
1025 	.io_countdeps	= hammer_io_countdeps,
1026 	.io_checkread	= hammer_io_checkread,
1027 	.io_checkwrite	= hammer_io_checkwrite,
1028 };
1029 
1030 /************************************************************************
1031  *				DIRECT IO OPS 				*
1032  ************************************************************************
1033  *
1034  * These functions operate directly on the buffer cache buffer associated
1035  * with a front-end vnode rather then a back-end device vnode.
1036  */
1037 
1038 /*
1039  * Read a buffer associated with a front-end vnode directly from the
1040  * disk media.  The bio may be issued asynchronously.  If leaf is non-NULL
1041  * we validate the CRC.
1042  *
1043  * We must check for the presence of a HAMMER buffer to handle the case
1044  * where the reblocker has rewritten the data (which it does via the HAMMER
1045  * buffer system, not via the high-level vnode buffer cache), but not yet
1046  * committed the buffer to the media.
1047  */
1048 int
1049 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1050 		      hammer_btree_leaf_elm_t leaf)
1051 {
1052 	hammer_off_t buf_offset;
1053 	hammer_off_t zone2_offset;
1054 	hammer_volume_t volume;
1055 	struct buf *bp;
1056 	struct bio *nbio;
1057 	int vol_no;
1058 	int error;
1059 
1060 	buf_offset = bio->bio_offset;
1061 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1062 		 HAMMER_ZONE_LARGE_DATA);
1063 
1064 	/*
1065 	 * The buffer cache may have an aliased buffer (the reblocker can
1066 	 * write them).  If it does we have to sync any dirty data before
1067 	 * we can build our direct-read.  This is a non-critical code path.
1068 	 */
1069 	bp = bio->bio_buf;
1070 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1071 
1072 	/*
1073 	 * Resolve to a zone-2 offset.  The conversion just requires
1074 	 * munging the top 4 bits but we want to abstract it anyway
1075 	 * so the blockmap code can verify the zone assignment.
1076 	 */
1077 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1078 	if (error)
1079 		goto done;
1080 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1081 		 HAMMER_ZONE_RAW_BUFFER);
1082 
1083 	/*
1084 	 * Resolve volume and raw-offset for 3rd level bio.  The
1085 	 * offset will be specific to the volume.
1086 	 */
1087 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
1088 	volume = hammer_get_volume(hmp, vol_no, &error);
1089 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
1090 		error = EIO;
1091 
1092 	if (error == 0) {
1093 		/*
1094 		 * 3rd level bio
1095 		 */
1096 		nbio = push_bio(bio);
1097 		nbio->bio_offset = volume->ondisk->vol_buf_beg +
1098 				   (zone2_offset & HAMMER_OFF_SHORT_MASK);
1099 #if 0
1100 		/*
1101 		 * XXX disabled - our CRC check doesn't work if the OS
1102 		 * does bogus_page replacement on the direct-read.
1103 		 */
1104 		if (leaf && hammer_verify_data) {
1105 			nbio->bio_done = hammer_io_direct_read_complete;
1106 			nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1107 		}
1108 #endif
1109 		hammer_stats_disk_read += bp->b_bufsize;
1110 		vn_strategy(volume->devvp, nbio);
1111 	}
1112 	hammer_rel_volume(volume, 0);
1113 done:
1114 	if (error) {
1115 		kprintf("hammer_direct_read: failed @ %016llx\n",
1116 			zone2_offset);
1117 		bp->b_error = error;
1118 		bp->b_flags |= B_ERROR;
1119 		biodone(bio);
1120 	}
1121 	return(error);
1122 }
1123 
1124 #if 0
1125 /*
1126  * On completion of the BIO this callback must check the data CRC
1127  * and chain to the previous bio.
1128  */
1129 static
1130 void
1131 hammer_io_direct_read_complete(struct bio *nbio)
1132 {
1133 	struct bio *obio;
1134 	struct buf *bp;
1135 	u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1136 
1137 	bp = nbio->bio_buf;
1138 	if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1139 		kprintf("HAMMER: data_crc error @%016llx/%d\n",
1140 			nbio->bio_offset, bp->b_bufsize);
1141 		if (hammer_debug_debug)
1142 			Debugger("");
1143 		bp->b_flags |= B_ERROR;
1144 		bp->b_error = EIO;
1145 	}
1146 	obio = pop_bio(nbio);
1147 	biodone(obio);
1148 }
1149 #endif
1150 
1151 /*
1152  * Write a buffer associated with a front-end vnode directly to the
1153  * disk media.  The bio may be issued asynchronously.
1154  *
1155  * The BIO is associated with the specified record and RECF_DIRECT_IO
1156  * is set.  The recorded is added to its object.
1157  */
1158 int
1159 hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
1160 		       struct bio *bio)
1161 {
1162 	hammer_btree_leaf_elm_t leaf = &record->leaf;
1163 	hammer_off_t buf_offset;
1164 	hammer_off_t zone2_offset;
1165 	hammer_volume_t volume;
1166 	hammer_buffer_t buffer;
1167 	struct buf *bp;
1168 	struct bio *nbio;
1169 	char *ptr;
1170 	int vol_no;
1171 	int error;
1172 
1173 	buf_offset = leaf->data_offset;
1174 
1175 	KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1176 	KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1177 
1178 	if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1179 	    leaf->data_len >= HAMMER_BUFSIZE) {
1180 		/*
1181 		 * We are using the vnode's bio to write directly to the
1182 		 * media, any hammer_buffer at the same zone-X offset will
1183 		 * now have stale data.
1184 		 */
1185 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1186 		vol_no = HAMMER_VOL_DECODE(zone2_offset);
1187 		volume = hammer_get_volume(hmp, vol_no, &error);
1188 
1189 		if (error == 0 && zone2_offset >= volume->maxbuf_off)
1190 			error = EIO;
1191 		if (error == 0) {
1192 			bp = bio->bio_buf;
1193 			KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1194 			/*
1195 			hammer_del_buffers(hmp, buf_offset,
1196 					   zone2_offset, bp->b_bufsize);
1197 			*/
1198 
1199 			/*
1200 			 * Second level bio - cached zone2 offset.
1201 			 *
1202 			 * (We can put our bio_done function in either the
1203 			 *  2nd or 3rd level).
1204 			 */
1205 			nbio = push_bio(bio);
1206 			nbio->bio_offset = zone2_offset;
1207 			nbio->bio_done = hammer_io_direct_write_complete;
1208 			nbio->bio_caller_info1.ptr = record;
1209 			record->zone2_offset = zone2_offset;
1210 			record->flags |= HAMMER_RECF_DIRECT_IO |
1211 					 HAMMER_RECF_DIRECT_INVAL;
1212 
1213 			/*
1214 			 * Third level bio - raw offset specific to the
1215 			 * correct volume.
1216 			 */
1217 			zone2_offset &= HAMMER_OFF_SHORT_MASK;
1218 			nbio = push_bio(nbio);
1219 			nbio->bio_offset = volume->ondisk->vol_buf_beg +
1220 					   zone2_offset;
1221 			hammer_stats_disk_write += bp->b_bufsize;
1222 			vn_strategy(volume->devvp, nbio);
1223 			hammer_io_flush_mark(volume);
1224 		}
1225 		hammer_rel_volume(volume, 0);
1226 	} else {
1227 		/*
1228 		 * Must fit in a standard HAMMER buffer.  In this case all
1229 		 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
1230 		 * does not need to be set-up.
1231 		 */
1232 		KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1233 		buffer = NULL;
1234 		ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1235 		if (error == 0) {
1236 			bp = bio->bio_buf;
1237 			bp->b_flags |= B_AGE;
1238 			hammer_io_modify(&buffer->io, 1);
1239 			bcopy(bp->b_data, ptr, leaf->data_len);
1240 			hammer_io_modify_done(&buffer->io);
1241 			hammer_rel_buffer(buffer, 0);
1242 			bp->b_resid = 0;
1243 			biodone(bio);
1244 		}
1245 	}
1246 	if (error == 0) {
1247 		/*
1248 		 * The record is all setup now, add it.  Potential conflics
1249 		 * have already been dealt with.
1250 		 */
1251 		error = hammer_mem_add(record);
1252 		KKASSERT(error == 0);
1253 	} else {
1254 		/*
1255 		 * Major suckage occured.
1256 		 */
1257 		kprintf("hammer_direct_write: failed @ %016llx\n",
1258 			leaf->data_offset);
1259 		bp = bio->bio_buf;
1260 		bp->b_resid = 0;
1261 		bp->b_error = EIO;
1262 		bp->b_flags |= B_ERROR;
1263 		biodone(bio);
1264 		record->flags |= HAMMER_RECF_DELETED_FE;
1265 		hammer_rel_mem_record(record);
1266 	}
1267 	return(error);
1268 }
1269 
1270 /*
1271  * On completion of the BIO this callback must disconnect
1272  * it from the hammer_record and chain to the previous bio.
1273  *
1274  * An I/O error forces the mount to read-only.  Data buffers
1275  * are not B_LOCKED like meta-data buffers are, so we have to
1276  * throw the buffer away to prevent the kernel from retrying.
1277  */
1278 static
1279 void
1280 hammer_io_direct_write_complete(struct bio *nbio)
1281 {
1282 	struct bio *obio;
1283 	struct buf *bp;
1284 	hammer_record_t record = nbio->bio_caller_info1.ptr;
1285 
1286 	bp = nbio->bio_buf;
1287 	obio = pop_bio(nbio);
1288 	if (bp->b_flags & B_ERROR) {
1289 		hammer_critical_error(record->ip->hmp, record->ip,
1290 				      bp->b_error,
1291 				      "while writing bulk data");
1292 		bp->b_flags |= B_INVAL;
1293 	}
1294 	biodone(obio);
1295 
1296 	KKASSERT(record != NULL);
1297 	KKASSERT(record->flags & HAMMER_RECF_DIRECT_IO);
1298 	record->flags &= ~HAMMER_RECF_DIRECT_IO;
1299 	if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
1300 		record->flags &= ~HAMMER_RECF_DIRECT_WAIT;
1301 		wakeup(&record->flags);
1302 	}
1303 }
1304 
1305 
1306 /*
1307  * This is called before a record is either committed to the B-Tree
1308  * or destroyed, to resolve any associated direct-IO.
1309  *
1310  * (1) We must wait for any direct-IO related to the record to complete.
1311  *
1312  * (2) We must remove any buffer cache aliases for data accessed via
1313  *     leaf->data_offset or zone2_offset so non-direct-IO consumers
1314  *     (the mirroring and reblocking code) do not see stale data.
1315  */
1316 void
1317 hammer_io_direct_wait(hammer_record_t record)
1318 {
1319 	/*
1320 	 * Wait for I/O to complete
1321 	 */
1322 	if (record->flags & HAMMER_RECF_DIRECT_IO) {
1323 		crit_enter();
1324 		while (record->flags & HAMMER_RECF_DIRECT_IO) {
1325 			record->flags |= HAMMER_RECF_DIRECT_WAIT;
1326 			tsleep(&record->flags, 0, "hmdiow", 0);
1327 		}
1328 		crit_exit();
1329 	}
1330 
1331 	/*
1332 	 * Invalidate any related buffer cache aliases associated with the
1333 	 * backing device.  This is needed because the buffer cache buffer
1334 	 * for file data is associated with the file vnode, not the backing
1335 	 * device vnode.
1336 	 *
1337 	 * XXX I do not think this case can occur any more now that
1338 	 * reservations ensure that all such buffers are removed before
1339 	 * an area can be reused.
1340 	 */
1341 	if (record->flags & HAMMER_RECF_DIRECT_INVAL) {
1342 		KKASSERT(record->leaf.data_offset);
1343 		hammer_del_buffers(record->ip->hmp, record->leaf.data_offset,
1344 				   record->zone2_offset, record->leaf.data_len,
1345 				   1);
1346 		record->flags &= ~HAMMER_RECF_DIRECT_INVAL;
1347 	}
1348 }
1349 
1350 /*
1351  * This is called to remove the second-level cached zone-2 offset from
1352  * frontend buffer cache buffers, now stale due to a data relocation.
1353  * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1354  * by hammer_vop_strategy_read().
1355  *
1356  * This is rather nasty because here we have something like the reblocker
1357  * scanning the raw B-Tree with no held references on anything, really,
1358  * other then a shared lock on the B-Tree node, and we have to access the
1359  * frontend's buffer cache to check for and clean out the association.
1360  * Specifically, if the reblocker is moving data on the disk, these cached
1361  * offsets will become invalid.
1362  *
1363  * Only data record types associated with the large-data zone are subject
1364  * to direct-io and need to be checked.
1365  *
1366  */
1367 void
1368 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1369 {
1370 	struct hammer_inode_info iinfo;
1371 	int zone;
1372 
1373 	if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1374 		return;
1375 	zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1376 	if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1377 		return;
1378 	iinfo.obj_id = leaf->base.obj_id;
1379 	iinfo.obj_asof = 0;	/* unused */
1380 	iinfo.obj_localization = leaf->base.localization &
1381 				 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1382 	iinfo.u.leaf = leaf;
1383 	hammer_scan_inode_snapshots(hmp, &iinfo,
1384 				    hammer_io_direct_uncache_callback,
1385 				    leaf);
1386 }
1387 
1388 static int
1389 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1390 {
1391 	hammer_inode_info_t iinfo = data;
1392 	hammer_off_t data_offset;
1393 	hammer_off_t file_offset;
1394 	struct vnode *vp;
1395 	struct buf *bp;
1396 	int blksize;
1397 
1398 	if (ip->vp == NULL)
1399 		return(0);
1400 	data_offset = iinfo->u.leaf->data_offset;
1401 	file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1402 	blksize = iinfo->u.leaf->data_len;
1403 	KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1404 
1405 	hammer_ref(&ip->lock);
1406 	if (hammer_get_vnode(ip, &vp) == 0) {
1407 		if ((bp = findblk(ip->vp, file_offset)) != NULL &&
1408 		    bp->b_bio2.bio_offset != NOOFFSET) {
1409 			bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1410 			bp->b_bio2.bio_offset = NOOFFSET;
1411 			brelse(bp);
1412 		}
1413 		vput(vp);
1414 	}
1415 	hammer_rel_inode(ip, 0);
1416 	return(0);
1417 }
1418 
1419 
1420 /*
1421  * This function is called when writes may have occured on the volume,
1422  * indicating that the device may be holding cached writes.
1423  */
1424 static void
1425 hammer_io_flush_mark(hammer_volume_t volume)
1426 {
1427 	volume->vol_flags |= HAMMER_VOLF_NEEDFLUSH;
1428 }
1429 
1430 /*
1431  * This function ensures that the device has flushed any cached writes out.
1432  */
1433 void
1434 hammer_io_flush_sync(hammer_mount_t hmp)
1435 {
1436 	hammer_volume_t volume;
1437 	struct buf *bp_base = NULL;
1438 	struct buf *bp;
1439 
1440 	RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1441 		if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1442 			volume->vol_flags &= ~HAMMER_VOLF_NEEDFLUSH;
1443 			bp = getpbuf(NULL);
1444 			bp->b_bio1.bio_offset = 0;
1445 			bp->b_bufsize = 0;
1446 			bp->b_bcount = 0;
1447 			bp->b_cmd = BUF_CMD_FLUSH;
1448 			bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
1449 			bp->b_bio1.bio_done = hammer_io_flush_sync_done;
1450 			bp->b_flags |= B_ASYNC;
1451 			bp_base = bp;
1452 			vn_strategy(volume->devvp, &bp->b_bio1);
1453 		}
1454 	}
1455 	while ((bp = bp_base) != NULL) {
1456 		bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
1457 		while (bp->b_cmd != BUF_CMD_DONE) {
1458 			crit_enter();
1459 			tsleep_interlock(&bp->b_cmd);
1460 			if (bp->b_cmd != BUF_CMD_DONE)
1461 				tsleep(&bp->b_cmd, 0, "hmrFLS", 0);
1462 			crit_exit();
1463 		}
1464 		bp->b_flags &= ~B_ASYNC;
1465 		relpbuf(bp, NULL);
1466 	}
1467 }
1468 
1469 /*
1470  * Callback to deal with completed flush commands to the device.
1471  */
1472 static void
1473 hammer_io_flush_sync_done(struct bio *bio)
1474 {
1475 	struct buf *bp;
1476 
1477 	bp = bio->bio_buf;
1478 	bp->b_cmd = BUF_CMD_DONE;
1479 	wakeup(&bp->b_cmd);
1480 }
1481 
1482