xref: /dragonfly/sys/vfs/hammer/hammer_io.c (revision c1543a89)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_io.c,v 1.55 2008/09/15 17:02:49 dillon Exp $
35  */
36 /*
37  * IO Primitives and buffer cache management
38  *
39  * All major data-tracking structures in HAMMER contain a struct hammer_io
40  * which is used to manage their backing store.  We use filesystem buffers
41  * for backing store and we leave them passively associated with their
42  * HAMMER structures.
43  *
44  * If the kernel tries to destroy a passively associated buf which we cannot
45  * yet let go we set B_LOCKED in the buffer and then actively released it
46  * later when we can.
47  */
48 
49 #include "hammer.h"
50 #include <sys/fcntl.h>
51 #include <sys/nlookup.h>
52 #include <sys/buf.h>
53 #include <sys/buf2.h>
54 
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
57 #if 0
58 static void hammer_io_direct_read_complete(struct bio *nbio);
59 #endif
60 static void hammer_io_direct_write_complete(struct bio *nbio);
61 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
62 static void hammer_io_set_modlist(struct hammer_io *io);
63 static void hammer_io_flush_mark(hammer_volume_t volume);
64 
65 
66 /*
67  * Initialize a new, already-zero'd hammer_io structure, or reinitialize
68  * an existing hammer_io structure which may have switched to another type.
69  */
70 void
71 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
72 {
73 	io->volume = volume;
74 	io->hmp = volume->io.hmp;
75 	io->type = type;
76 }
77 
78 /*
79  * Helper routine to disassociate a buffer cache buffer from an I/O
80  * structure.  The buffer is unlocked and marked appropriate for reclamation.
81  *
82  * The io may have 0 or 1 references depending on who called us.  The
83  * caller is responsible for dealing with the refs.
84  *
85  * This call can only be made when no action is required on the buffer.
86  *
87  * The caller must own the buffer and the IO must indicate that the
88  * structure no longer owns it (io.released != 0).
89  */
90 static void
91 hammer_io_disassociate(hammer_io_structure_t iou)
92 {
93 	struct buf *bp = iou->io.bp;
94 
95 	KKASSERT(iou->io.released);
96 	KKASSERT(iou->io.modified == 0);
97 	KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
98 	buf_dep_init(bp);
99 	iou->io.bp = NULL;
100 
101 	/*
102 	 * If the buffer was locked someone wanted to get rid of it.
103 	 */
104 	if (bp->b_flags & B_LOCKED) {
105 		--hammer_count_io_locked;
106 		bp->b_flags &= ~B_LOCKED;
107 	}
108 	if (iou->io.reclaim) {
109 		bp->b_flags |= B_NOCACHE|B_RELBUF;
110 		iou->io.reclaim = 0;
111 	}
112 
113 	switch(iou->io.type) {
114 	case HAMMER_STRUCTURE_VOLUME:
115 		iou->volume.ondisk = NULL;
116 		break;
117 	case HAMMER_STRUCTURE_DATA_BUFFER:
118 	case HAMMER_STRUCTURE_META_BUFFER:
119 	case HAMMER_STRUCTURE_UNDO_BUFFER:
120 		iou->buffer.ondisk = NULL;
121 		break;
122 	}
123 }
124 
125 /*
126  * Wait for any physical IO to complete
127  *
128  * XXX we aren't interlocked against a spinlock or anything so there
129  *     is a small window in the interlock / io->running == 0 test.
130  */
131 void
132 hammer_io_wait(hammer_io_t io)
133 {
134 	if (io->running) {
135 		for (;;) {
136 			io->waiting = 1;
137 			tsleep_interlock(io, 0);
138 			if (io->running == 0)
139 				break;
140 			tsleep(io, PINTERLOCKED, "hmrflw", hz);
141 			if (io->running == 0)
142 				break;
143 		}
144 	}
145 }
146 
147 /*
148  * Wait for all hammer_io-initated write I/O's to complete.  This is not
149  * supposed to count direct I/O's but some can leak through (for
150  * non-full-sized direct I/Os).
151  */
152 void
153 hammer_io_wait_all(hammer_mount_t hmp, const char *ident)
154 {
155 	hammer_io_flush_sync(hmp);
156 	crit_enter();
157 	while (hmp->io_running_space)
158 		tsleep(&hmp->io_running_space, 0, ident, 0);
159 	crit_exit();
160 }
161 
162 /*
163  * Clear a flagged error condition on a I/O buffer.  The caller must hold
164  * its own ref on the buffer.
165  */
166 void
167 hammer_io_clear_error(struct hammer_io *io)
168 {
169 	if (io->ioerror) {
170 		io->ioerror = 0;
171 		hammer_unref(&io->lock);
172 		KKASSERT(io->lock.refs > 0);
173 	}
174 }
175 
176 
177 #define HAMMER_MAXRA	4
178 
179 /*
180  * Load bp for a HAMMER structure.  The io must be exclusively locked by
181  * the caller.
182  *
183  * This routine is mostly used on meta-data and small-data blocks.  Generally
184  * speaking HAMMER assumes some locality of reference and will cluster
185  * a 64K read.
186  *
187  * Note that clustering occurs at the device layer, not the logical layer.
188  * If the buffers do not apply to the current operation they may apply to
189  * some other.
190  */
191 int
192 hammer_io_read(struct vnode *devvp, struct hammer_io *io, hammer_off_t limit)
193 {
194 	struct buf *bp;
195 	int   error;
196 
197 	if ((bp = io->bp) == NULL) {
198 		hammer_count_io_running_read += io->bytes;
199 		if (hammer_cluster_enable) {
200 			error = cluster_read(devvp, limit,
201 					     io->offset, io->bytes,
202 					     HAMMER_CLUSTER_SIZE,
203 					     HAMMER_CLUSTER_BUFS, &io->bp);
204 		} else {
205 			error = bread(devvp, io->offset, io->bytes, &io->bp);
206 		}
207 		hammer_stats_disk_read += io->bytes;
208 		hammer_count_io_running_read -= io->bytes;
209 
210 		/*
211 		 * The code generally assumes b_ops/b_dep has been set-up,
212 		 * even if we error out here.
213 		 */
214 		bp = io->bp;
215 		bp->b_ops = &hammer_bioops;
216 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
217 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
218 		BUF_KERNPROC(bp);
219 		KKASSERT(io->modified == 0);
220 		KKASSERT(io->running == 0);
221 		KKASSERT(io->waiting == 0);
222 		io->released = 0;	/* we hold an active lock on bp */
223 	} else {
224 		error = 0;
225 	}
226 	return(error);
227 }
228 
229 /*
230  * Similar to hammer_io_read() but returns a zero'd out buffer instead.
231  * Must be called with the IO exclusively locked.
232  *
233  * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
234  * I/O by forcing the buffer to not be in a released state before calling
235  * it.
236  *
237  * This function will also mark the IO as modified but it will not
238  * increment the modify_refs count.
239  */
240 int
241 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
242 {
243 	struct buf *bp;
244 
245 	if ((bp = io->bp) == NULL) {
246 		io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
247 		bp = io->bp;
248 		bp->b_ops = &hammer_bioops;
249 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
250 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
251 		io->released = 0;
252 		KKASSERT(io->running == 0);
253 		io->waiting = 0;
254 		BUF_KERNPROC(bp);
255 	} else {
256 		if (io->released) {
257 			regetblk(bp);
258 			BUF_KERNPROC(bp);
259 			io->released = 0;
260 		}
261 	}
262 	hammer_io_modify(io, 0);
263 	vfs_bio_clrbuf(bp);
264 	return(0);
265 }
266 
267 /*
268  * Advance the activity count on the underlying buffer because
269  * HAMMER does not getblk/brelse on every access.
270  */
271 void
272 hammer_io_advance(struct hammer_io *io)
273 {
274 	if (io->bp)
275 		buf_act_advance(io->bp);
276 }
277 
278 /*
279  * Remove potential device level aliases against buffers managed by high level
280  * vnodes.  Aliases can also be created due to mixed buffer sizes or via
281  * direct access to the backing store device.
282  *
283  * This is nasty because the buffers are also VMIO-backed.  Even if a buffer
284  * does not exist its backing VM pages might, and we have to invalidate
285  * those as well or a getblk() will reinstate them.
286  *
287  * Buffer cache buffers associated with hammer_buffers cannot be
288  * invalidated.
289  */
290 int
291 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
292 {
293 	hammer_io_structure_t iou;
294 	hammer_off_t phys_offset;
295 	struct buf *bp;
296 	int error;
297 
298 	phys_offset = volume->ondisk->vol_buf_beg +
299 		      (zone2_offset & HAMMER_OFF_SHORT_MASK);
300 	crit_enter();
301 	if ((bp = findblk(volume->devvp, phys_offset, FINDBLK_TEST)) != NULL)
302 		bp = getblk(volume->devvp, phys_offset, bp->b_bufsize, 0, 0);
303 	else
304 		bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
305 	if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
306 #if 0
307 		hammer_ref(&iou->io.lock);
308 		hammer_io_clear_modify(&iou->io, 1);
309 		bundirty(bp);
310 		iou->io.released = 0;
311 		BUF_KERNPROC(bp);
312 		iou->io.reclaim = 1;
313 		iou->io.waitdep = 1;
314 		KKASSERT(iou->io.lock.refs == 1);
315 		hammer_rel_buffer(&iou->buffer, 0);
316 		/*hammer_io_deallocate(bp);*/
317 #endif
318 		bqrelse(bp);
319 		error = EAGAIN;
320 	} else {
321 		KKASSERT((bp->b_flags & B_LOCKED) == 0);
322 		bundirty(bp);
323 		bp->b_flags |= B_NOCACHE|B_RELBUF;
324 		brelse(bp);
325 		error = 0;
326 	}
327 	crit_exit();
328 	return(error);
329 }
330 
331 /*
332  * This routine is called on the last reference to a hammer structure.
333  * The io is usually interlocked with io.loading and io.refs must be 1.
334  *
335  * This routine may return a non-NULL bp to the caller for dispoal.  Disposal
336  * simply means the caller finishes decrementing the ref-count on the
337  * IO structure then brelse()'s the bp.  The bp may or may not still be
338  * passively associated with the IO.
339  *
340  * The only requirement here is that modified meta-data and volume-header
341  * buffer may NOT be disassociated from the IO structure, and consequently
342  * we also leave such buffers actively associated with the IO if they already
343  * are (since the kernel can't do anything with them anyway).  Only the
344  * flusher is allowed to write such buffers out.  Modified pure-data and
345  * undo buffers are returned to the kernel but left passively associated
346  * so we can track when the kernel writes the bp out.
347  */
348 struct buf *
349 hammer_io_release(struct hammer_io *io, int flush)
350 {
351 	union hammer_io_structure *iou = (void *)io;
352 	struct buf *bp;
353 
354 	if ((bp = io->bp) == NULL)
355 		return(NULL);
356 
357 	/*
358 	 * Try to flush a dirty IO to disk if asked to by the
359 	 * caller or if the kernel tried to flush the buffer in the past.
360 	 *
361 	 * Kernel-initiated flushes are only allowed for pure-data buffers.
362 	 * meta-data and volume buffers can only be flushed explicitly
363 	 * by HAMMER.
364 	 */
365 	if (io->modified) {
366 		if (flush) {
367 			hammer_io_flush(io, 0);
368 		} else if (bp->b_flags & B_LOCKED) {
369 			switch(io->type) {
370 			case HAMMER_STRUCTURE_DATA_BUFFER:
371 				hammer_io_flush(io, 0);
372 				break;
373 			case HAMMER_STRUCTURE_UNDO_BUFFER:
374 				hammer_io_flush(io, hammer_undo_reclaim(io));
375 				break;
376 			default:
377 				break;
378 			}
379 		} /* else no explicit request to flush the buffer */
380 	}
381 
382 	/*
383 	 * Wait for the IO to complete if asked to.  This occurs when
384 	 * the buffer must be disposed of definitively during an umount
385 	 * or buffer invalidation.
386 	 */
387 	if (io->waitdep && io->running) {
388 		hammer_io_wait(io);
389 	}
390 
391 	/*
392 	 * Return control of the buffer to the kernel (with the provisio
393 	 * that our bioops can override kernel decisions with regards to
394 	 * the buffer).
395 	 */
396 	if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
397 		/*
398 		 * Always disassociate the bp if an explicit flush
399 		 * was requested and the IO completed with no error
400 		 * (so unmount can really clean up the structure).
401 		 */
402 		if (io->released) {
403 			regetblk(bp);
404 			BUF_KERNPROC(bp);
405 		} else {
406 			io->released = 1;
407 		}
408 		hammer_io_disassociate((hammer_io_structure_t)io);
409 		/* return the bp */
410 	} else if (io->modified) {
411 		/*
412 		 * Only certain IO types can be released to the kernel if
413 		 * the buffer has been modified.
414 		 *
415 		 * volume and meta-data IO types may only be explicitly
416 		 * flushed by HAMMER.
417 		 */
418 		switch(io->type) {
419 		case HAMMER_STRUCTURE_DATA_BUFFER:
420 		case HAMMER_STRUCTURE_UNDO_BUFFER:
421 			if (io->released == 0) {
422 				io->released = 1;
423 				bdwrite(bp);
424 			}
425 			break;
426 		default:
427 			break;
428 		}
429 		bp = NULL;	/* bp left associated */
430 	} else if (io->released == 0) {
431 		/*
432 		 * Clean buffers can be generally released to the kernel.
433 		 * We leave the bp passively associated with the HAMMER
434 		 * structure and use bioops to disconnect it later on
435 		 * if the kernel wants to discard the buffer.
436 		 *
437 		 * We can steal the structure's ownership of the bp.
438 		 */
439 		io->released = 1;
440 		if (bp->b_flags & B_LOCKED) {
441 			hammer_io_disassociate(iou);
442 			/* return the bp */
443 		} else {
444 			if (io->reclaim) {
445 				hammer_io_disassociate(iou);
446 				/* return the bp */
447 			} else {
448 				/* return the bp (bp passively associated) */
449 			}
450 		}
451 	} else {
452 		/*
453 		 * A released buffer is passively associate with our
454 		 * hammer_io structure.  The kernel cannot destroy it
455 		 * without making a bioops call.  If the kernel (B_LOCKED)
456 		 * or we (reclaim) requested that the buffer be destroyed
457 		 * we destroy it, otherwise we do a quick get/release to
458 		 * reset its position in the kernel's LRU list.
459 		 *
460 		 * Leaving the buffer passively associated allows us to
461 		 * use the kernel's LRU buffer flushing mechanisms rather
462 		 * then rolling our own.
463 		 *
464 		 * XXX there are two ways of doing this.  We can re-acquire
465 		 * and passively release to reset the LRU, or not.
466 		 */
467 		if (io->running == 0) {
468 			regetblk(bp);
469 			if ((bp->b_flags & B_LOCKED) || io->reclaim) {
470 				hammer_io_disassociate(iou);
471 				/* return the bp */
472 			} else {
473 				/* return the bp (bp passively associated) */
474 			}
475 		} else {
476 			/*
477 			 * bp is left passively associated but we do not
478 			 * try to reacquire it.  Interactions with the io
479 			 * structure will occur on completion of the bp's
480 			 * I/O.
481 			 */
482 			bp = NULL;
483 		}
484 	}
485 	return(bp);
486 }
487 
488 /*
489  * This routine is called with a locked IO when a flush is desired and
490  * no other references to the structure exists other then ours.  This
491  * routine is ONLY called when HAMMER believes it is safe to flush a
492  * potentially modified buffer out.
493  */
494 void
495 hammer_io_flush(struct hammer_io *io, int reclaim)
496 {
497 	struct buf *bp;
498 
499 	/*
500 	 * Degenerate case - nothing to flush if nothing is dirty.
501 	 */
502 	if (io->modified == 0) {
503 		return;
504 	}
505 
506 	KKASSERT(io->bp);
507 	KKASSERT(io->modify_refs <= 0);
508 
509 	/*
510 	 * Acquire ownership of the bp, particularly before we clear our
511 	 * modified flag.
512 	 *
513 	 * We are going to bawrite() this bp.  Don't leave a window where
514 	 * io->released is set, we actually own the bp rather then our
515 	 * buffer.
516 	 */
517 	bp = io->bp;
518 	if (io->released) {
519 		regetblk(bp);
520 		/* BUF_KERNPROC(io->bp); */
521 		/* io->released = 0; */
522 		KKASSERT(io->released);
523 		KKASSERT(io->bp == bp);
524 	}
525 	io->released = 1;
526 
527 	if (reclaim) {
528 		io->reclaim = 1;
529 		if ((bp->b_flags & B_LOCKED) == 0) {
530 			bp->b_flags |= B_LOCKED;
531 			++hammer_count_io_locked;
532 		}
533 	}
534 
535 	/*
536 	 * Acquire exclusive access to the bp and then clear the modified
537 	 * state of the buffer prior to issuing I/O to interlock any
538 	 * modifications made while the I/O is in progress.  This shouldn't
539 	 * happen anyway but losing data would be worse.  The modified bit
540 	 * will be rechecked after the IO completes.
541 	 *
542 	 * NOTE: This call also finalizes the buffer's content (inval == 0).
543 	 *
544 	 * This is only legal when lock.refs == 1 (otherwise we might clear
545 	 * the modified bit while there are still users of the cluster
546 	 * modifying the data).
547 	 *
548 	 * Do this before potentially blocking so any attempt to modify the
549 	 * ondisk while we are blocked blocks waiting for us.
550 	 */
551 	hammer_ref(&io->lock);
552 	hammer_io_clear_modify(io, 0);
553 	hammer_unref(&io->lock);
554 
555 	/*
556 	 * Transfer ownership to the kernel and initiate I/O.
557 	 */
558 	io->running = 1;
559 	io->hmp->io_running_space += io->bytes;
560 	hammer_count_io_running_write += io->bytes;
561 	bawrite(bp);
562 	hammer_io_flush_mark(io->volume);
563 }
564 
565 /************************************************************************
566  *				BUFFER DIRTYING				*
567  ************************************************************************
568  *
569  * These routines deal with dependancies created when IO buffers get
570  * modified.  The caller must call hammer_modify_*() on a referenced
571  * HAMMER structure prior to modifying its on-disk data.
572  *
573  * Any intent to modify an IO buffer acquires the related bp and imposes
574  * various write ordering dependancies.
575  */
576 
577 /*
578  * Mark a HAMMER structure as undergoing modification.  Meta-data buffers
579  * are locked until the flusher can deal with them, pure data buffers
580  * can be written out.
581  */
582 static
583 void
584 hammer_io_modify(hammer_io_t io, int count)
585 {
586 	/*
587 	 * io->modify_refs must be >= 0
588 	 */
589 	while (io->modify_refs < 0) {
590 		io->waitmod = 1;
591 		tsleep(io, 0, "hmrmod", 0);
592 	}
593 
594 	/*
595 	 * Shortcut if nothing to do.
596 	 */
597 	KKASSERT(io->lock.refs != 0 && io->bp != NULL);
598 	io->modify_refs += count;
599 	if (io->modified && io->released == 0)
600 		return;
601 
602 	hammer_lock_ex(&io->lock);
603 	if (io->modified == 0) {
604 		hammer_io_set_modlist(io);
605 		io->modified = 1;
606 	}
607 	if (io->released) {
608 		regetblk(io->bp);
609 		BUF_KERNPROC(io->bp);
610 		io->released = 0;
611 		KKASSERT(io->modified != 0);
612 	}
613 	hammer_unlock(&io->lock);
614 }
615 
616 static __inline
617 void
618 hammer_io_modify_done(hammer_io_t io)
619 {
620 	KKASSERT(io->modify_refs > 0);
621 	--io->modify_refs;
622 	if (io->modify_refs == 0 && io->waitmod) {
623 		io->waitmod = 0;
624 		wakeup(io);
625 	}
626 }
627 
628 void
629 hammer_io_write_interlock(hammer_io_t io)
630 {
631 	while (io->modify_refs != 0) {
632 		io->waitmod = 1;
633 		tsleep(io, 0, "hmrmod", 0);
634 	}
635 	io->modify_refs = -1;
636 }
637 
638 void
639 hammer_io_done_interlock(hammer_io_t io)
640 {
641 	KKASSERT(io->modify_refs == -1);
642 	io->modify_refs = 0;
643 	if (io->waitmod) {
644 		io->waitmod = 0;
645 		wakeup(io);
646 	}
647 }
648 
649 /*
650  * Caller intends to modify a volume's ondisk structure.
651  *
652  * This is only allowed if we are the flusher or we have a ref on the
653  * sync_lock.
654  */
655 void
656 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
657 		     void *base, int len)
658 {
659 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
660 
661 	hammer_io_modify(&volume->io, 1);
662 	if (len) {
663 		intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
664 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
665 		hammer_generate_undo(trans,
666 			 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
667 			 base, len);
668 	}
669 }
670 
671 /*
672  * Caller intends to modify a buffer's ondisk structure.
673  *
674  * This is only allowed if we are the flusher or we have a ref on the
675  * sync_lock.
676  */
677 void
678 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
679 		     void *base, int len)
680 {
681 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
682 
683 	hammer_io_modify(&buffer->io, 1);
684 	if (len) {
685 		intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
686 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
687 		hammer_generate_undo(trans,
688 				     buffer->zone2_offset + rel_offset,
689 				     base, len);
690 	}
691 }
692 
693 void
694 hammer_modify_volume_done(hammer_volume_t volume)
695 {
696 	hammer_io_modify_done(&volume->io);
697 }
698 
699 void
700 hammer_modify_buffer_done(hammer_buffer_t buffer)
701 {
702 	hammer_io_modify_done(&buffer->io);
703 }
704 
705 /*
706  * Mark an entity as not being dirty any more and finalize any
707  * delayed adjustments to the buffer.
708  *
709  * Delayed adjustments are an important performance enhancement, allowing
710  * us to avoid recalculating B-Tree node CRCs over and over again when
711  * making bulk-modifications to the B-Tree.
712  *
713  * If inval is non-zero delayed adjustments are ignored.
714  *
715  * This routine may dereference related btree nodes and cause the
716  * buffer to be dereferenced.  The caller must own a reference on io.
717  */
718 void
719 hammer_io_clear_modify(struct hammer_io *io, int inval)
720 {
721 	if (io->modified == 0)
722 		return;
723 
724 	/*
725 	 * Take us off the mod-list and clear the modified bit.
726 	 */
727 	KKASSERT(io->mod_list != NULL);
728 	if (io->mod_list == &io->hmp->volu_list ||
729 	    io->mod_list == &io->hmp->meta_list) {
730 		io->hmp->locked_dirty_space -= io->bytes;
731 		hammer_count_dirtybufspace -= io->bytes;
732 	}
733 	TAILQ_REMOVE(io->mod_list, io, mod_entry);
734 	io->mod_list = NULL;
735 	io->modified = 0;
736 
737 	/*
738 	 * If this bit is not set there are no delayed adjustments.
739 	 */
740 	if (io->gencrc == 0)
741 		return;
742 	io->gencrc = 0;
743 
744 	/*
745 	 * Finalize requested CRCs.  The NEEDSCRC flag also holds a reference
746 	 * on the node (& underlying buffer).  Release the node after clearing
747 	 * the flag.
748 	 */
749 	if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
750 		hammer_buffer_t buffer = (void *)io;
751 		hammer_node_t node;
752 
753 restart:
754 		TAILQ_FOREACH(node, &buffer->clist, entry) {
755 			if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
756 				continue;
757 			node->flags &= ~HAMMER_NODE_NEEDSCRC;
758 			KKASSERT(node->ondisk);
759 			if (inval == 0)
760 				node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
761 			hammer_rel_node(node);
762 			goto restart;
763 		}
764 	}
765 	/* caller must still have ref on io */
766 	KKASSERT(io->lock.refs > 0);
767 }
768 
769 /*
770  * Clear the IO's modify list.  Even though the IO is no longer modified
771  * it may still be on the lose_list.  This routine is called just before
772  * the governing hammer_buffer is destroyed.
773  */
774 void
775 hammer_io_clear_modlist(struct hammer_io *io)
776 {
777 	KKASSERT(io->modified == 0);
778 	if (io->mod_list) {
779 		crit_enter();	/* biodone race against list */
780 		KKASSERT(io->mod_list == &io->hmp->lose_list);
781 		TAILQ_REMOVE(io->mod_list, io, mod_entry);
782 		io->mod_list = NULL;
783 		crit_exit();
784 	}
785 }
786 
787 static void
788 hammer_io_set_modlist(struct hammer_io *io)
789 {
790 	struct hammer_mount *hmp = io->hmp;
791 
792 	KKASSERT(io->mod_list == NULL);
793 
794 	switch(io->type) {
795 	case HAMMER_STRUCTURE_VOLUME:
796 		io->mod_list = &hmp->volu_list;
797 		hmp->locked_dirty_space += io->bytes;
798 		hammer_count_dirtybufspace += io->bytes;
799 		break;
800 	case HAMMER_STRUCTURE_META_BUFFER:
801 		io->mod_list = &hmp->meta_list;
802 		hmp->locked_dirty_space += io->bytes;
803 		hammer_count_dirtybufspace += io->bytes;
804 		break;
805 	case HAMMER_STRUCTURE_UNDO_BUFFER:
806 		io->mod_list = &hmp->undo_list;
807 		break;
808 	case HAMMER_STRUCTURE_DATA_BUFFER:
809 		io->mod_list = &hmp->data_list;
810 		break;
811 	}
812 	TAILQ_INSERT_TAIL(io->mod_list, io, mod_entry);
813 }
814 
815 /************************************************************************
816  *				HAMMER_BIOOPS				*
817  ************************************************************************
818  *
819  */
820 
821 /*
822  * Pre-IO initiation kernel callback - cluster build only
823  */
824 static void
825 hammer_io_start(struct buf *bp)
826 {
827 }
828 
829 /*
830  * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
831  *
832  * NOTE: HAMMER may modify a buffer after initiating I/O.  The modified bit
833  * may also be set if we were marking a cluster header open.  Only remove
834  * our dependancy if the modified bit is clear.
835  */
836 static void
837 hammer_io_complete(struct buf *bp)
838 {
839 	union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
840 
841 	KKASSERT(iou->io.released == 1);
842 
843 	/*
844 	 * Deal with people waiting for I/O to drain
845 	 */
846 	if (iou->io.running) {
847 		/*
848 		 * Deal with critical write errors.  Once a critical error
849 		 * has been flagged in hmp the UNDO FIFO will not be updated.
850 		 * That way crash recover will give us a consistent
851 		 * filesystem.
852 		 *
853 		 * Because of this we can throw away failed UNDO buffers.  If
854 		 * we throw away META or DATA buffers we risk corrupting
855 		 * the now read-only version of the filesystem visible to
856 		 * the user.  Clear B_ERROR so the buffer is not re-dirtied
857 		 * by the kernel and ref the io so it doesn't get thrown
858 		 * away.
859 		 */
860 		if (bp->b_flags & B_ERROR) {
861 			hammer_critical_error(iou->io.hmp, NULL, bp->b_error,
862 					      "while flushing meta-data");
863 			switch(iou->io.type) {
864 			case HAMMER_STRUCTURE_UNDO_BUFFER:
865 				break;
866 			default:
867 				if (iou->io.ioerror == 0) {
868 					iou->io.ioerror = 1;
869 					if (iou->io.lock.refs == 0)
870 						++hammer_count_refedbufs;
871 					hammer_ref(&iou->io.lock);
872 				}
873 				break;
874 			}
875 			bp->b_flags &= ~B_ERROR;
876 			bundirty(bp);
877 #if 0
878 			hammer_io_set_modlist(&iou->io);
879 			iou->io.modified = 1;
880 #endif
881 		}
882 		hammer_stats_disk_write += iou->io.bytes;
883 		hammer_count_io_running_write -= iou->io.bytes;
884 		iou->io.hmp->io_running_space -= iou->io.bytes;
885 		if (iou->io.hmp->io_running_space == 0)
886 			wakeup(&iou->io.hmp->io_running_space);
887 		KKASSERT(iou->io.hmp->io_running_space >= 0);
888 		iou->io.running = 0;
889 	} else {
890 		hammer_stats_disk_read += iou->io.bytes;
891 	}
892 
893 	if (iou->io.waiting) {
894 		iou->io.waiting = 0;
895 		wakeup(iou);
896 	}
897 
898 	/*
899 	 * If B_LOCKED is set someone wanted to deallocate the bp at some
900 	 * point, do it now if refs has become zero.
901 	 */
902 	if ((bp->b_flags & B_LOCKED) && iou->io.lock.refs == 0) {
903 		KKASSERT(iou->io.modified == 0);
904 		--hammer_count_io_locked;
905 		bp->b_flags &= ~B_LOCKED;
906 		hammer_io_deallocate(bp);
907 		/* structure may be dead now */
908 	}
909 }
910 
911 /*
912  * Callback from kernel when it wishes to deallocate a passively
913  * associated structure.  This mostly occurs with clean buffers
914  * but it may be possible for a holding structure to be marked dirty
915  * while its buffer is passively associated.  The caller owns the bp.
916  *
917  * If we cannot disassociate we set B_LOCKED to prevent the buffer
918  * from getting reused.
919  *
920  * WARNING: Because this can be called directly by getnewbuf we cannot
921  * recurse into the tree.  If a bp cannot be immediately disassociated
922  * our only recourse is to set B_LOCKED.
923  *
924  * WARNING: This may be called from an interrupt via hammer_io_complete()
925  */
926 static void
927 hammer_io_deallocate(struct buf *bp)
928 {
929 	hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
930 
931 	KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
932 	if (iou->io.lock.refs > 0 || iou->io.modified) {
933 		/*
934 		 * It is not legal to disassociate a modified buffer.  This
935 		 * case really shouldn't ever occur.
936 		 */
937 		bp->b_flags |= B_LOCKED;
938 		++hammer_count_io_locked;
939 	} else {
940 		/*
941 		 * Disassociate the BP.  If the io has no refs left we
942 		 * have to add it to the loose list.
943 		 */
944 		hammer_io_disassociate(iou);
945 		if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
946 			KKASSERT(iou->io.bp == NULL);
947 			KKASSERT(iou->io.mod_list == NULL);
948 			crit_enter();	/* biodone race against list */
949 			iou->io.mod_list = &iou->io.hmp->lose_list;
950 			TAILQ_INSERT_TAIL(iou->io.mod_list, &iou->io, mod_entry);
951 			crit_exit();
952 		}
953 	}
954 }
955 
956 static int
957 hammer_io_fsync(struct vnode *vp)
958 {
959 	return(0);
960 }
961 
962 /*
963  * NOTE: will not be called unless we tell the kernel about the
964  * bioops.  Unused... we use the mount's VFS_SYNC instead.
965  */
966 static int
967 hammer_io_sync(struct mount *mp)
968 {
969 	return(0);
970 }
971 
972 static void
973 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
974 {
975 }
976 
977 /*
978  * I/O pre-check for reading and writing.  HAMMER only uses this for
979  * B_CACHE buffers so checkread just shouldn't happen, but if it does
980  * allow it.
981  *
982  * Writing is a different case.  We don't want the kernel to try to write
983  * out a buffer that HAMMER may be modifying passively or which has a
984  * dependancy.  In addition, kernel-demanded writes can only proceed for
985  * certain types of buffers (i.e. UNDO and DATA types).  Other dirty
986  * buffer types can only be explicitly written by the flusher.
987  *
988  * checkwrite will only be called for bdwrite()n buffers.  If we return
989  * success the kernel is guaranteed to initiate the buffer write.
990  */
991 static int
992 hammer_io_checkread(struct buf *bp)
993 {
994 	return(0);
995 }
996 
997 static int
998 hammer_io_checkwrite(struct buf *bp)
999 {
1000 	hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
1001 
1002 	/*
1003 	 * This shouldn't happen under normal operation.
1004 	 */
1005 	if (io->type == HAMMER_STRUCTURE_VOLUME ||
1006 	    io->type == HAMMER_STRUCTURE_META_BUFFER) {
1007 		if (!panicstr)
1008 			panic("hammer_io_checkwrite: illegal buffer");
1009 		if ((bp->b_flags & B_LOCKED) == 0) {
1010 			bp->b_flags |= B_LOCKED;
1011 			++hammer_count_io_locked;
1012 		}
1013 		return(1);
1014 	}
1015 
1016 	/*
1017 	 * We can only clear the modified bit if the IO is not currently
1018 	 * undergoing modification.  Otherwise we may miss changes.
1019 	 *
1020 	 * Only data and undo buffers can reach here.  These buffers do
1021 	 * not have terminal crc functions but we temporarily reference
1022 	 * the IO anyway, just in case.
1023 	 */
1024 	if (io->modify_refs == 0 && io->modified) {
1025 		hammer_ref(&io->lock);
1026 		hammer_io_clear_modify(io, 0);
1027 		hammer_unref(&io->lock);
1028 	} else if (io->modified) {
1029 		KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
1030 	}
1031 
1032 	/*
1033 	 * The kernel is going to start the IO, set io->running.
1034 	 */
1035 	KKASSERT(io->running == 0);
1036 	io->running = 1;
1037 	io->hmp->io_running_space += io->bytes;
1038 	hammer_count_io_running_write += io->bytes;
1039 	return(0);
1040 }
1041 
1042 /*
1043  * Return non-zero if we wish to delay the kernel's attempt to flush
1044  * this buffer to disk.
1045  */
1046 static int
1047 hammer_io_countdeps(struct buf *bp, int n)
1048 {
1049 	return(0);
1050 }
1051 
1052 struct bio_ops hammer_bioops = {
1053 	.io_start	= hammer_io_start,
1054 	.io_complete	= hammer_io_complete,
1055 	.io_deallocate	= hammer_io_deallocate,
1056 	.io_fsync	= hammer_io_fsync,
1057 	.io_sync	= hammer_io_sync,
1058 	.io_movedeps	= hammer_io_movedeps,
1059 	.io_countdeps	= hammer_io_countdeps,
1060 	.io_checkread	= hammer_io_checkread,
1061 	.io_checkwrite	= hammer_io_checkwrite,
1062 };
1063 
1064 /************************************************************************
1065  *				DIRECT IO OPS 				*
1066  ************************************************************************
1067  *
1068  * These functions operate directly on the buffer cache buffer associated
1069  * with a front-end vnode rather then a back-end device vnode.
1070  */
1071 
1072 /*
1073  * Read a buffer associated with a front-end vnode directly from the
1074  * disk media.  The bio may be issued asynchronously.  If leaf is non-NULL
1075  * we validate the CRC.
1076  *
1077  * We must check for the presence of a HAMMER buffer to handle the case
1078  * where the reblocker has rewritten the data (which it does via the HAMMER
1079  * buffer system, not via the high-level vnode buffer cache), but not yet
1080  * committed the buffer to the media.
1081  */
1082 int
1083 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1084 		      hammer_btree_leaf_elm_t leaf)
1085 {
1086 	hammer_off_t buf_offset;
1087 	hammer_off_t zone2_offset;
1088 	hammer_volume_t volume;
1089 	struct buf *bp;
1090 	struct bio *nbio;
1091 	int vol_no;
1092 	int error;
1093 
1094 	buf_offset = bio->bio_offset;
1095 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1096 		 HAMMER_ZONE_LARGE_DATA);
1097 
1098 	/*
1099 	 * The buffer cache may have an aliased buffer (the reblocker can
1100 	 * write them).  If it does we have to sync any dirty data before
1101 	 * we can build our direct-read.  This is a non-critical code path.
1102 	 */
1103 	bp = bio->bio_buf;
1104 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1105 
1106 	/*
1107 	 * Resolve to a zone-2 offset.  The conversion just requires
1108 	 * munging the top 4 bits but we want to abstract it anyway
1109 	 * so the blockmap code can verify the zone assignment.
1110 	 */
1111 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1112 	if (error)
1113 		goto done;
1114 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1115 		 HAMMER_ZONE_RAW_BUFFER);
1116 
1117 	/*
1118 	 * Resolve volume and raw-offset for 3rd level bio.  The
1119 	 * offset will be specific to the volume.
1120 	 */
1121 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
1122 	volume = hammer_get_volume(hmp, vol_no, &error);
1123 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
1124 		error = EIO;
1125 
1126 	if (error == 0) {
1127 		/*
1128 		 * 3rd level bio
1129 		 */
1130 		nbio = push_bio(bio);
1131 		nbio->bio_offset = volume->ondisk->vol_buf_beg +
1132 				   (zone2_offset & HAMMER_OFF_SHORT_MASK);
1133 #if 0
1134 		/*
1135 		 * XXX disabled - our CRC check doesn't work if the OS
1136 		 * does bogus_page replacement on the direct-read.
1137 		 */
1138 		if (leaf && hammer_verify_data) {
1139 			nbio->bio_done = hammer_io_direct_read_complete;
1140 			nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1141 		}
1142 #endif
1143 		hammer_stats_disk_read += bp->b_bufsize;
1144 		vn_strategy(volume->devvp, nbio);
1145 	}
1146 	hammer_rel_volume(volume, 0);
1147 done:
1148 	if (error) {
1149 		kprintf("hammer_direct_read: failed @ %016llx\n",
1150 			(long long)zone2_offset);
1151 		bp->b_error = error;
1152 		bp->b_flags |= B_ERROR;
1153 		biodone(bio);
1154 	}
1155 	return(error);
1156 }
1157 
1158 #if 0
1159 /*
1160  * On completion of the BIO this callback must check the data CRC
1161  * and chain to the previous bio.
1162  */
1163 static
1164 void
1165 hammer_io_direct_read_complete(struct bio *nbio)
1166 {
1167 	struct bio *obio;
1168 	struct buf *bp;
1169 	u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1170 
1171 	bp = nbio->bio_buf;
1172 	if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1173 		kprintf("HAMMER: data_crc error @%016llx/%d\n",
1174 			nbio->bio_offset, bp->b_bufsize);
1175 		if (hammer_debug_critical)
1176 			Debugger("data_crc on read");
1177 		bp->b_flags |= B_ERROR;
1178 		bp->b_error = EIO;
1179 	}
1180 	obio = pop_bio(nbio);
1181 	biodone(obio);
1182 }
1183 #endif
1184 
1185 /*
1186  * Write a buffer associated with a front-end vnode directly to the
1187  * disk media.  The bio may be issued asynchronously.
1188  *
1189  * The BIO is associated with the specified record and RECF_DIRECT_IO
1190  * is set.  The recorded is added to its object.
1191  */
1192 int
1193 hammer_io_direct_write(hammer_mount_t hmp, hammer_record_t record,
1194 		       struct bio *bio)
1195 {
1196 	hammer_btree_leaf_elm_t leaf = &record->leaf;
1197 	hammer_off_t buf_offset;
1198 	hammer_off_t zone2_offset;
1199 	hammer_volume_t volume;
1200 	hammer_buffer_t buffer;
1201 	struct buf *bp;
1202 	struct bio *nbio;
1203 	char *ptr;
1204 	int vol_no;
1205 	int error;
1206 
1207 	buf_offset = leaf->data_offset;
1208 
1209 	KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1210 	KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1211 
1212 	if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1213 	    leaf->data_len >= HAMMER_BUFSIZE) {
1214 		/*
1215 		 * We are using the vnode's bio to write directly to the
1216 		 * media, any hammer_buffer at the same zone-X offset will
1217 		 * now have stale data.
1218 		 */
1219 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1220 		vol_no = HAMMER_VOL_DECODE(zone2_offset);
1221 		volume = hammer_get_volume(hmp, vol_no, &error);
1222 
1223 		if (error == 0 && zone2_offset >= volume->maxbuf_off)
1224 			error = EIO;
1225 		if (error == 0) {
1226 			bp = bio->bio_buf;
1227 			KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1228 			/*
1229 			hammer_del_buffers(hmp, buf_offset,
1230 					   zone2_offset, bp->b_bufsize);
1231 			*/
1232 
1233 			/*
1234 			 * Second level bio - cached zone2 offset.
1235 			 *
1236 			 * (We can put our bio_done function in either the
1237 			 *  2nd or 3rd level).
1238 			 */
1239 			nbio = push_bio(bio);
1240 			nbio->bio_offset = zone2_offset;
1241 			nbio->bio_done = hammer_io_direct_write_complete;
1242 			nbio->bio_caller_info1.ptr = record;
1243 			record->zone2_offset = zone2_offset;
1244 			record->flags |= HAMMER_RECF_DIRECT_IO |
1245 					 HAMMER_RECF_DIRECT_INVAL;
1246 
1247 			/*
1248 			 * Third level bio - raw offset specific to the
1249 			 * correct volume.
1250 			 */
1251 			zone2_offset &= HAMMER_OFF_SHORT_MASK;
1252 			nbio = push_bio(nbio);
1253 			nbio->bio_offset = volume->ondisk->vol_buf_beg +
1254 					   zone2_offset;
1255 			hammer_stats_disk_write += bp->b_bufsize;
1256 			vn_strategy(volume->devvp, nbio);
1257 			hammer_io_flush_mark(volume);
1258 		}
1259 		hammer_rel_volume(volume, 0);
1260 	} else {
1261 		/*
1262 		 * Must fit in a standard HAMMER buffer.  In this case all
1263 		 * consumers use the HAMMER buffer system and RECF_DIRECT_IO
1264 		 * does not need to be set-up.
1265 		 */
1266 		KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1267 		buffer = NULL;
1268 		ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1269 		if (error == 0) {
1270 			bp = bio->bio_buf;
1271 			bp->b_flags |= B_AGE;
1272 			hammer_io_modify(&buffer->io, 1);
1273 			bcopy(bp->b_data, ptr, leaf->data_len);
1274 			hammer_io_modify_done(&buffer->io);
1275 			hammer_rel_buffer(buffer, 0);
1276 			bp->b_resid = 0;
1277 			biodone(bio);
1278 		}
1279 	}
1280 	if (error == 0) {
1281 		/*
1282 		 * The record is all setup now, add it.  Potential conflics
1283 		 * have already been dealt with.
1284 		 */
1285 		error = hammer_mem_add(record);
1286 		KKASSERT(error == 0);
1287 	} else {
1288 		/*
1289 		 * Major suckage occured.  Also note:  The record was never added
1290 		 * to the tree so we do not have to worry about the backend.
1291 		 */
1292 		kprintf("hammer_direct_write: failed @ %016llx\n",
1293 			(long long)leaf->data_offset);
1294 		bp = bio->bio_buf;
1295 		bp->b_resid = 0;
1296 		bp->b_error = EIO;
1297 		bp->b_flags |= B_ERROR;
1298 		biodone(bio);
1299 		record->flags |= HAMMER_RECF_DELETED_FE;
1300 		hammer_rel_mem_record(record);
1301 	}
1302 	return(error);
1303 }
1304 
1305 /*
1306  * On completion of the BIO this callback must disconnect
1307  * it from the hammer_record and chain to the previous bio.
1308  *
1309  * An I/O error forces the mount to read-only.  Data buffers
1310  * are not B_LOCKED like meta-data buffers are, so we have to
1311  * throw the buffer away to prevent the kernel from retrying.
1312  */
1313 static
1314 void
1315 hammer_io_direct_write_complete(struct bio *nbio)
1316 {
1317 	struct bio *obio;
1318 	struct buf *bp;
1319 	hammer_record_t record = nbio->bio_caller_info1.ptr;
1320 
1321 	bp = nbio->bio_buf;
1322 	obio = pop_bio(nbio);
1323 	if (bp->b_flags & B_ERROR) {
1324 		hammer_critical_error(record->ip->hmp, record->ip,
1325 				      bp->b_error,
1326 				      "while writing bulk data");
1327 		bp->b_flags |= B_INVAL;
1328 	}
1329 	biodone(obio);
1330 
1331 	KKASSERT(record != NULL);
1332 	KKASSERT(record->flags & HAMMER_RECF_DIRECT_IO);
1333 	if (record->flags & HAMMER_RECF_DIRECT_WAIT) {
1334 		record->flags &= ~(HAMMER_RECF_DIRECT_IO |
1335 				   HAMMER_RECF_DIRECT_WAIT);
1336 		/* record can disappear once DIRECT_IO flag is cleared */
1337 		wakeup(&record->flags);
1338 	} else {
1339 		record->flags &= ~HAMMER_RECF_DIRECT_IO;
1340 		/* record can disappear once DIRECT_IO flag is cleared */
1341 	}
1342 }
1343 
1344 
1345 /*
1346  * This is called before a record is either committed to the B-Tree
1347  * or destroyed, to resolve any associated direct-IO.
1348  *
1349  * (1) We must wait for any direct-IO related to the record to complete.
1350  *
1351  * (2) We must remove any buffer cache aliases for data accessed via
1352  *     leaf->data_offset or zone2_offset so non-direct-IO consumers
1353  *     (the mirroring and reblocking code) do not see stale data.
1354  */
1355 void
1356 hammer_io_direct_wait(hammer_record_t record)
1357 {
1358 	/*
1359 	 * Wait for I/O to complete
1360 	 */
1361 	if (record->flags & HAMMER_RECF_DIRECT_IO) {
1362 		crit_enter();
1363 		while (record->flags & HAMMER_RECF_DIRECT_IO) {
1364 			record->flags |= HAMMER_RECF_DIRECT_WAIT;
1365 			tsleep(&record->flags, 0, "hmdiow", 0);
1366 		}
1367 		crit_exit();
1368 	}
1369 
1370 	/*
1371 	 * Invalidate any related buffer cache aliases associated with the
1372 	 * backing device.  This is needed because the buffer cache buffer
1373 	 * for file data is associated with the file vnode, not the backing
1374 	 * device vnode.
1375 	 *
1376 	 * XXX I do not think this case can occur any more now that
1377 	 * reservations ensure that all such buffers are removed before
1378 	 * an area can be reused.
1379 	 */
1380 	if (record->flags & HAMMER_RECF_DIRECT_INVAL) {
1381 		KKASSERT(record->leaf.data_offset);
1382 		hammer_del_buffers(record->ip->hmp, record->leaf.data_offset,
1383 				   record->zone2_offset, record->leaf.data_len,
1384 				   1);
1385 		record->flags &= ~HAMMER_RECF_DIRECT_INVAL;
1386 	}
1387 }
1388 
1389 /*
1390  * This is called to remove the second-level cached zone-2 offset from
1391  * frontend buffer cache buffers, now stale due to a data relocation.
1392  * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1393  * by hammer_vop_strategy_read().
1394  *
1395  * This is rather nasty because here we have something like the reblocker
1396  * scanning the raw B-Tree with no held references on anything, really,
1397  * other then a shared lock on the B-Tree node, and we have to access the
1398  * frontend's buffer cache to check for and clean out the association.
1399  * Specifically, if the reblocker is moving data on the disk, these cached
1400  * offsets will become invalid.
1401  *
1402  * Only data record types associated with the large-data zone are subject
1403  * to direct-io and need to be checked.
1404  *
1405  */
1406 void
1407 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1408 {
1409 	struct hammer_inode_info iinfo;
1410 	int zone;
1411 
1412 	if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1413 		return;
1414 	zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1415 	if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1416 		return;
1417 	iinfo.obj_id = leaf->base.obj_id;
1418 	iinfo.obj_asof = 0;	/* unused */
1419 	iinfo.obj_localization = leaf->base.localization &
1420 				 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1421 	iinfo.u.leaf = leaf;
1422 	hammer_scan_inode_snapshots(hmp, &iinfo,
1423 				    hammer_io_direct_uncache_callback,
1424 				    leaf);
1425 }
1426 
1427 static int
1428 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1429 {
1430 	hammer_inode_info_t iinfo = data;
1431 	hammer_off_t data_offset;
1432 	hammer_off_t file_offset;
1433 	struct vnode *vp;
1434 	struct buf *bp;
1435 	int blksize;
1436 
1437 	if (ip->vp == NULL)
1438 		return(0);
1439 	data_offset = iinfo->u.leaf->data_offset;
1440 	file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1441 	blksize = iinfo->u.leaf->data_len;
1442 	KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1443 
1444 	hammer_ref(&ip->lock);
1445 	if (hammer_get_vnode(ip, &vp) == 0) {
1446 		if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
1447 		    bp->b_bio2.bio_offset != NOOFFSET) {
1448 			bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1449 			bp->b_bio2.bio_offset = NOOFFSET;
1450 			brelse(bp);
1451 		}
1452 		vput(vp);
1453 	}
1454 	hammer_rel_inode(ip, 0);
1455 	return(0);
1456 }
1457 
1458 
1459 /*
1460  * This function is called when writes may have occured on the volume,
1461  * indicating that the device may be holding cached writes.
1462  */
1463 static void
1464 hammer_io_flush_mark(hammer_volume_t volume)
1465 {
1466 	volume->vol_flags |= HAMMER_VOLF_NEEDFLUSH;
1467 }
1468 
1469 /*
1470  * This function ensures that the device has flushed any cached writes out.
1471  */
1472 void
1473 hammer_io_flush_sync(hammer_mount_t hmp)
1474 {
1475 	hammer_volume_t volume;
1476 	struct buf *bp_base = NULL;
1477 	struct buf *bp;
1478 
1479 	RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1480 		if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1481 			volume->vol_flags &= ~HAMMER_VOLF_NEEDFLUSH;
1482 			bp = getpbuf(NULL);
1483 			bp->b_bio1.bio_offset = 0;
1484 			bp->b_bufsize = 0;
1485 			bp->b_bcount = 0;
1486 			bp->b_cmd = BUF_CMD_FLUSH;
1487 			bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
1488 			bp->b_bio1.bio_done = biodone_sync;
1489 			bp->b_bio1.bio_flags |= BIO_SYNC;
1490 			bp_base = bp;
1491 			vn_strategy(volume->devvp, &bp->b_bio1);
1492 		}
1493 	}
1494 	while ((bp = bp_base) != NULL) {
1495 		bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
1496 		biowait(&bp->b_bio1, "hmrFLS");
1497 		relpbuf(bp, NULL);
1498 	}
1499 }
1500