xref: /dragonfly/sys/vfs/hammer/hammer_io.c (revision c6f73aab)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * IO Primitives and buffer cache management
36  *
37  * All major data-tracking structures in HAMMER contain a struct hammer_io
38  * which is used to manage their backing store.  We use filesystem buffers
39  * for backing store and we leave them passively associated with their
40  * HAMMER structures.
41  *
42  * If the kernel tries to destroy a passively associated buf which we cannot
43  * yet let go we set B_LOCKED in the buffer and then actively released it
44  * later when we can.
45  *
46  * The io_token is required for anything which might race bioops and bio_done
47  * callbacks, with one exception: A successful hammer_try_interlock_norefs().
48  * the fs_token will be held in all other cases.
49  */
50 
51 #include <sys/fcntl.h>
52 #include <sys/nlookup.h>
53 
54 #include "hammer.h"
55 
56 static void hammer_io_modify(hammer_io_t io, int count);
57 static void hammer_io_deallocate(struct buf *bp);
58 static void hammer_indirect_callback(struct bio *bio);
59 static void hammer_io_direct_write_complete(struct bio *nbio);
60 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
61 static void hammer_io_set_modlist(struct hammer_io *io);
62 static void hammer_io_flush_mark(hammer_volume_t volume);
63 
64 static int
65 hammer_mod_rb_compare(hammer_io_t io1, hammer_io_t io2)
66 {
67 	hammer_off_t io1_offset;
68 	hammer_off_t io2_offset;
69 
70 	io1_offset = ((io1->offset & HAMMER_OFF_SHORT_MASK) << 8) |
71 		     io1->volume->vol_no;
72 	io2_offset = ((io2->offset & HAMMER_OFF_SHORT_MASK) << 8) |
73 		     io2->volume->vol_no;
74 
75 	if (io1_offset < io2_offset)
76 		return(-1);
77 	if (io1_offset > io2_offset)
78 		return(1);
79 	return(0);
80 }
81 
82 RB_GENERATE(hammer_mod_rb_tree, hammer_io, rb_node, hammer_mod_rb_compare);
83 
84 /*
85  * Initialize a new, already-zero'd hammer_io structure, or reinitialize
86  * an existing hammer_io structure which may have switched to another type.
87  */
88 void
89 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
90 {
91 	io->volume = volume;
92 	io->hmp = volume->io.hmp;
93 	io->type = type;
94 }
95 
96 /*
97  * Helper routine to disassociate a buffer cache buffer from an I/O
98  * structure.  The io must be interlocked and marked appropriately for
99  * reclamation.
100  *
101  * The io must be in a released state with the io->bp owned and
102  * locked by the caller of this function.  When not called from an
103  * io_deallocate() this cannot race an io_deallocate() since the
104  * kernel would be unable to get the buffer lock in that case.
105  * (The released state in this case means we own the bp, not the
106  * hammer_io structure).
107  *
108  * The io may have 0 or 1 references depending on who called us.  The
109  * caller is responsible for dealing with the refs.
110  *
111  * This call can only be made when no action is required on the buffer.
112  *
113  * This function is guaranteed not to race against anything because we
114  * own both the io lock and the bp lock and are interlocked with no
115  * references.
116  */
117 static void
118 hammer_io_disassociate(hammer_io_structure_t iou)
119 {
120 	struct buf *bp = iou->io.bp;
121 
122 	KKASSERT(iou->io.released);
123 	KKASSERT(iou->io.modified == 0);
124 	KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
125 	buf_dep_init(bp);
126 	iou->io.bp = NULL;
127 
128 	/*
129 	 * If the buffer was locked someone wanted to get rid of it.
130 	 */
131 	if (bp->b_flags & B_LOCKED) {
132 		atomic_add_int(&hammer_count_io_locked, -1);
133 		bp->b_flags &= ~B_LOCKED;
134 	}
135 	if (iou->io.reclaim) {
136 		bp->b_flags |= B_NOCACHE|B_RELBUF;
137 		iou->io.reclaim = 0;
138 	}
139 
140 	switch(iou->io.type) {
141 	case HAMMER_STRUCTURE_VOLUME:
142 		iou->volume.ondisk = NULL;
143 		break;
144 	case HAMMER_STRUCTURE_DATA_BUFFER:
145 	case HAMMER_STRUCTURE_META_BUFFER:
146 	case HAMMER_STRUCTURE_UNDO_BUFFER:
147 		iou->buffer.ondisk = NULL;
148 		break;
149 	case HAMMER_STRUCTURE_DUMMY:
150 		panic("hammer_io_disassociate: bad io type");
151 		break;
152 	}
153 }
154 
155 /*
156  * Wait for any physical IO to complete
157  *
158  * XXX we aren't interlocked against a spinlock or anything so there
159  *     is a small window in the interlock / io->running == 0 test.
160  */
161 void
162 hammer_io_wait(hammer_io_t io)
163 {
164 	if (io->running) {
165 		hammer_mount_t hmp = io->hmp;
166 
167 		lwkt_gettoken(&hmp->io_token);
168 		while (io->running) {
169 			io->waiting = 1;
170 			tsleep_interlock(io, 0);
171 			if (io->running)
172 				tsleep(io, PINTERLOCKED, "hmrflw", hz);
173 		}
174 		lwkt_reltoken(&hmp->io_token);
175 	}
176 }
177 
178 /*
179  * Wait for all currently queued HAMMER-initiated I/Os to complete.
180  *
181  * This is not supposed to count direct I/O's but some can leak
182  * through (for non-full-sized direct I/Os).
183  */
184 void
185 hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush)
186 {
187 	struct hammer_io iodummy;
188 	hammer_io_t io;
189 
190 	/*
191 	 * Degenerate case, no I/O is running
192 	 */
193 	lwkt_gettoken(&hmp->io_token);
194 	if (TAILQ_EMPTY(&hmp->iorun_list)) {
195 		lwkt_reltoken(&hmp->io_token);
196 		if (doflush)
197 			hammer_io_flush_sync(hmp);
198 		return;
199 	}
200 	bzero(&iodummy, sizeof(iodummy));
201 	iodummy.type = HAMMER_STRUCTURE_DUMMY;
202 
203 	/*
204 	 * Add placemarker and then wait until it becomes the head of
205 	 * the list.
206 	 */
207 	TAILQ_INSERT_TAIL(&hmp->iorun_list, &iodummy, iorun_entry);
208 	while (TAILQ_FIRST(&hmp->iorun_list) != &iodummy) {
209 		tsleep(&iodummy, 0, ident, 0);
210 	}
211 
212 	/*
213 	 * Chain in case several placemarkers are present.
214 	 */
215 	TAILQ_REMOVE(&hmp->iorun_list, &iodummy, iorun_entry);
216 	io = TAILQ_FIRST(&hmp->iorun_list);
217 	if (io && io->type == HAMMER_STRUCTURE_DUMMY)
218 		wakeup(io);
219 	lwkt_reltoken(&hmp->io_token);
220 
221 	if (doflush)
222 		hammer_io_flush_sync(hmp);
223 }
224 
225 /*
226  * Clear a flagged error condition on a I/O buffer.  The caller must hold
227  * its own ref on the buffer.
228  */
229 void
230 hammer_io_clear_error(struct hammer_io *io)
231 {
232 	hammer_mount_t hmp = io->hmp;
233 
234 	lwkt_gettoken(&hmp->io_token);
235 	if (io->ioerror) {
236 		io->ioerror = 0;
237 		hammer_rel(&io->lock);
238 		KKASSERT(hammer_isactive(&io->lock));
239 	}
240 	lwkt_reltoken(&hmp->io_token);
241 }
242 
243 void
244 hammer_io_clear_error_noassert(struct hammer_io *io)
245 {
246 	hammer_mount_t hmp = io->hmp;
247 
248 	lwkt_gettoken(&hmp->io_token);
249 	if (io->ioerror) {
250 		io->ioerror = 0;
251 		hammer_rel(&io->lock);
252 	}
253 	lwkt_reltoken(&hmp->io_token);
254 }
255 
256 /*
257  * This is an advisory function only which tells the buffer cache
258  * the bp is not a meta-data buffer, even though it is backed by
259  * a block device.
260  *
261  * This is used by HAMMER's reblocking code to avoid trying to
262  * swapcache the filesystem's data when it is read or written
263  * by the reblocking code.
264  *
265  * The caller has a ref on the buffer preventing the bp from
266  * being disassociated from it.
267  */
268 void
269 hammer_io_notmeta(hammer_buffer_t buffer)
270 {
271 	if ((buffer->io.bp->b_flags & B_NOTMETA) == 0) {
272 		hammer_mount_t hmp = buffer->io.hmp;
273 
274 		lwkt_gettoken(&hmp->io_token);
275 		buffer->io.bp->b_flags |= B_NOTMETA;
276 		lwkt_reltoken(&hmp->io_token);
277 	}
278 }
279 
280 /*
281  * Load bp for a HAMMER structure.  The io must be exclusively locked by
282  * the caller.
283  *
284  * This routine is mostly used on meta-data and small-data blocks.  Generally
285  * speaking HAMMER assumes some locality of reference and will cluster.
286  *
287  * Note that the caller (hammer_ondisk.c) may place further restrictions
288  * on clusterability via the limit (in bytes).  Typically large-data
289  * zones cannot be clustered due to their mixed buffer sizes.  This is
290  * not an issue since such clustering occurs in hammer_vnops at the
291  * regular file layer, whereas this is the buffered block device layer.
292  *
293  * No I/O callbacks can occur while we hold the buffer locked.
294  */
295 int
296 hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit)
297 {
298 	struct buf *bp;
299 	int   error;
300 
301 	if ((bp = io->bp) == NULL) {
302 		atomic_add_long(&hammer_count_io_running_read, io->bytes);
303 		if (hammer_cluster_enable && limit > io->bytes) {
304 			error = cluster_read(devvp, io->offset + limit,
305 					     io->offset, io->bytes,
306 					     HAMMER_CLUSTER_SIZE,
307 					     HAMMER_CLUSTER_SIZE,
308 					     &io->bp);
309 		} else {
310 			error = bread(devvp, io->offset, io->bytes, &io->bp);
311 		}
312 		hammer_stats_disk_read += io->bytes;
313 		atomic_add_long(&hammer_count_io_running_read, -io->bytes);
314 
315 		/*
316 		 * The code generally assumes b_ops/b_dep has been set-up,
317 		 * even if we error out here.
318 		 */
319 		bp = io->bp;
320 		if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
321 			const char *metatype;
322 
323 			switch(io->type) {
324 			case HAMMER_STRUCTURE_VOLUME:
325 				metatype = "volume";
326 				break;
327 			case HAMMER_STRUCTURE_META_BUFFER:
328 				switch(((struct hammer_buffer *)io)->
329 					zoneX_offset & HAMMER_OFF_ZONE_MASK) {
330 				case HAMMER_ZONE_BTREE:
331 					metatype = "btree";
332 					break;
333 				case HAMMER_ZONE_META:
334 					metatype = "meta";
335 					break;
336 				case HAMMER_ZONE_FREEMAP:
337 					metatype = "freemap";
338 					break;
339 				default:
340 					metatype = "meta?";
341 					break;
342 				}
343 				break;
344 			case HAMMER_STRUCTURE_DATA_BUFFER:
345 				metatype = "data";
346 				break;
347 			case HAMMER_STRUCTURE_UNDO_BUFFER:
348 				metatype = "undo";
349 				break;
350 			default:
351 				metatype = "unknown";
352 				break;
353 			}
354 			kprintf("doff %016jx %s\n",
355 				(intmax_t)bp->b_bio2.bio_offset,
356 				metatype);
357 		}
358 		bp->b_flags &= ~B_IODEBUG;
359 		bp->b_ops = &hammer_bioops;
360 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
361 
362 		/* io->worklist is locked by the io lock */
363 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
364 		BUF_KERNPROC(bp);
365 		KKASSERT(io->modified == 0);
366 		KKASSERT(io->running == 0);
367 		KKASSERT(io->waiting == 0);
368 		io->released = 0;	/* we hold an active lock on bp */
369 	} else {
370 		error = 0;
371 	}
372 	return(error);
373 }
374 
375 /*
376  * Similar to hammer_io_read() but returns a zero'd out buffer instead.
377  * Must be called with the IO exclusively locked.
378  *
379  * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
380  * I/O by forcing the buffer to not be in a released state before calling
381  * it.
382  *
383  * This function will also mark the IO as modified but it will not
384  * increment the modify_refs count.
385  *
386  * No I/O callbacks can occur while we hold the buffer locked.
387  */
388 int
389 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
390 {
391 	struct buf *bp;
392 
393 	if ((bp = io->bp) == NULL) {
394 		io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
395 		bp = io->bp;
396 		bp->b_ops = &hammer_bioops;
397 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
398 
399 		/* io->worklist is locked by the io lock */
400 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
401 		io->released = 0;
402 		KKASSERT(io->running == 0);
403 		io->waiting = 0;
404 		BUF_KERNPROC(bp);
405 	} else {
406 		if (io->released) {
407 			regetblk(bp);
408 			BUF_KERNPROC(bp);
409 			io->released = 0;
410 		}
411 	}
412 	hammer_io_modify(io, 0);
413 	vfs_bio_clrbuf(bp);
414 	return(0);
415 }
416 
417 /*
418  * Advance the activity count on the underlying buffer because
419  * HAMMER does not getblk/brelse on every access.
420  *
421  * The io->bp cannot go away while the buffer is referenced.
422  */
423 void
424 hammer_io_advance(struct hammer_io *io)
425 {
426 	if (io->bp)
427 		buf_act_advance(io->bp);
428 }
429 
430 /*
431  * Remove potential device level aliases against buffers managed by high level
432  * vnodes.  Aliases can also be created due to mixed buffer sizes or via
433  * direct access to the backing store device.
434  *
435  * This is nasty because the buffers are also VMIO-backed.  Even if a buffer
436  * does not exist its backing VM pages might, and we have to invalidate
437  * those as well or a getblk() will reinstate them.
438  *
439  * Buffer cache buffers associated with hammer_buffers cannot be
440  * invalidated.
441  */
442 int
443 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
444 {
445 	hammer_io_structure_t iou;
446 	hammer_mount_t hmp;
447 	hammer_off_t phys_offset;
448 	struct buf *bp;
449 	int error;
450 
451 	hmp = volume->io.hmp;
452 	lwkt_gettoken(&hmp->io_token);
453 
454 	/*
455 	 * If a device buffer already exists for the specified physical
456 	 * offset use that, otherwise instantiate a buffer to cover any
457 	 * related VM pages, set BNOCACHE, and brelse().
458 	 */
459 	phys_offset = volume->ondisk->vol_buf_beg +
460 		      (zone2_offset & HAMMER_OFF_SHORT_MASK);
461 	if ((bp = findblk(volume->devvp, phys_offset, 0)) != NULL)
462 		bremfree(bp);
463 	else
464 		bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
465 
466 	if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
467 #if 0
468 		hammer_ref(&iou->io.lock);
469 		hammer_io_clear_modify(&iou->io, 1);
470 		bundirty(bp);
471 		iou->io.released = 0;
472 		BUF_KERNPROC(bp);
473 		iou->io.reclaim = 1;
474 		iou->io.waitdep = 1;	/* XXX this is a fs_token field */
475 		KKASSERT(hammer_isactive(&iou->io.lock) == 1);
476 		hammer_rel_buffer(&iou->buffer, 0);
477 		/*hammer_io_deallocate(bp);*/
478 #endif
479 		bqrelse(bp);
480 		error = EAGAIN;
481 	} else {
482 		KKASSERT((bp->b_flags & B_LOCKED) == 0);
483 		bundirty(bp);
484 		bp->b_flags |= B_NOCACHE|B_RELBUF;
485 		brelse(bp);
486 		error = 0;
487 	}
488 	lwkt_reltoken(&hmp->io_token);
489 	return(error);
490 }
491 
492 /*
493  * This routine is called on the last reference to a hammer structure.
494  * The io must be interlocked with a refcount of zero.  The hammer structure
495  * will remain interlocked on return.
496  *
497  * This routine may return a non-NULL bp to the caller for dispoal.
498  * The caller typically brelse()'s the bp.
499  *
500  * The bp may or may not still be passively associated with the IO.  It
501  * will remain passively associated if it is unreleasable (e.g. a modified
502  * meta-data buffer).
503  *
504  * The only requirement here is that modified meta-data and volume-header
505  * buffer may NOT be disassociated from the IO structure, and consequently
506  * we also leave such buffers actively associated with the IO if they already
507  * are (since the kernel can't do anything with them anyway).  Only the
508  * flusher is allowed to write such buffers out.  Modified pure-data and
509  * undo buffers are returned to the kernel but left passively associated
510  * so we can track when the kernel writes the bp out.
511  */
512 struct buf *
513 hammer_io_release(struct hammer_io *io, int flush)
514 {
515 	union hammer_io_structure *iou = (void *)io;
516 	struct buf *bp;
517 
518 	if ((bp = io->bp) == NULL)
519 		return(NULL);
520 
521 	/*
522 	 * Try to flush a dirty IO to disk if asked to by the
523 	 * caller or if the kernel tried to flush the buffer in the past.
524 	 *
525 	 * Kernel-initiated flushes are only allowed for pure-data buffers.
526 	 * meta-data and volume buffers can only be flushed explicitly
527 	 * by HAMMER.
528 	 */
529 	if (io->modified) {
530 		if (flush) {
531 			hammer_io_flush(io, 0);
532 		} else if (bp->b_flags & B_LOCKED) {
533 			switch(io->type) {
534 			case HAMMER_STRUCTURE_DATA_BUFFER:
535 				hammer_io_flush(io, 0);
536 				break;
537 			case HAMMER_STRUCTURE_UNDO_BUFFER:
538 				hammer_io_flush(io, hammer_undo_reclaim(io));
539 				break;
540 			default:
541 				break;
542 			}
543 		} /* else no explicit request to flush the buffer */
544 	}
545 
546 	/*
547 	 * Wait for the IO to complete if asked to.  This occurs when
548 	 * the buffer must be disposed of definitively during an umount
549 	 * or buffer invalidation.
550 	 */
551 	if (io->waitdep && io->running) {
552 		hammer_io_wait(io);
553 	}
554 
555 	/*
556 	 * Return control of the buffer to the kernel (with the provisio
557 	 * that our bioops can override kernel decisions with regards to
558 	 * the buffer).
559 	 */
560 	if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
561 		/*
562 		 * Always disassociate the bp if an explicit flush
563 		 * was requested and the IO completed with no error
564 		 * (so unmount can really clean up the structure).
565 		 */
566 		if (io->released) {
567 			regetblk(bp);
568 			BUF_KERNPROC(bp);
569 		} else {
570 			io->released = 1;
571 		}
572 		hammer_io_disassociate((hammer_io_structure_t)io);
573 		/* return the bp */
574 	} else if (io->modified) {
575 		/*
576 		 * Only certain IO types can be released to the kernel if
577 		 * the buffer has been modified.
578 		 *
579 		 * volume and meta-data IO types may only be explicitly
580 		 * flushed by HAMMER.
581 		 */
582 		switch(io->type) {
583 		case HAMMER_STRUCTURE_DATA_BUFFER:
584 		case HAMMER_STRUCTURE_UNDO_BUFFER:
585 			if (io->released == 0) {
586 				io->released = 1;
587 				bp->b_flags |= B_CLUSTEROK;
588 				bdwrite(bp);
589 			}
590 			break;
591 		default:
592 			break;
593 		}
594 		bp = NULL;	/* bp left associated */
595 	} else if (io->released == 0) {
596 		/*
597 		 * Clean buffers can be generally released to the kernel.
598 		 * We leave the bp passively associated with the HAMMER
599 		 * structure and use bioops to disconnect it later on
600 		 * if the kernel wants to discard the buffer.
601 		 *
602 		 * We can steal the structure's ownership of the bp.
603 		 */
604 		io->released = 1;
605 		if (bp->b_flags & B_LOCKED) {
606 			hammer_io_disassociate(iou);
607 			/* return the bp */
608 		} else {
609 			if (io->reclaim) {
610 				hammer_io_disassociate(iou);
611 				/* return the bp */
612 			} else {
613 				/* return the bp (bp passively associated) */
614 			}
615 		}
616 	} else {
617 		/*
618 		 * A released buffer is passively associate with our
619 		 * hammer_io structure.  The kernel cannot destroy it
620 		 * without making a bioops call.  If the kernel (B_LOCKED)
621 		 * or we (reclaim) requested that the buffer be destroyed
622 		 * we destroy it, otherwise we do a quick get/release to
623 		 * reset its position in the kernel's LRU list.
624 		 *
625 		 * Leaving the buffer passively associated allows us to
626 		 * use the kernel's LRU buffer flushing mechanisms rather
627 		 * then rolling our own.
628 		 *
629 		 * XXX there are two ways of doing this.  We can re-acquire
630 		 * and passively release to reset the LRU, or not.
631 		 */
632 		if (io->running == 0) {
633 			regetblk(bp);
634 			if ((bp->b_flags & B_LOCKED) || io->reclaim) {
635 				hammer_io_disassociate(iou);
636 				/* return the bp */
637 			} else {
638 				/* return the bp (bp passively associated) */
639 			}
640 		} else {
641 			/*
642 			 * bp is left passively associated but we do not
643 			 * try to reacquire it.  Interactions with the io
644 			 * structure will occur on completion of the bp's
645 			 * I/O.
646 			 */
647 			bp = NULL;
648 		}
649 	}
650 	return(bp);
651 }
652 
653 /*
654  * This routine is called with a locked IO when a flush is desired and
655  * no other references to the structure exists other then ours.  This
656  * routine is ONLY called when HAMMER believes it is safe to flush a
657  * potentially modified buffer out.
658  *
659  * The locked io or io reference prevents a flush from being initiated
660  * by the kernel.
661  */
662 void
663 hammer_io_flush(struct hammer_io *io, int reclaim)
664 {
665 	struct buf *bp;
666 	hammer_mount_t hmp;
667 
668 	/*
669 	 * Degenerate case - nothing to flush if nothing is dirty.
670 	 */
671 	if (io->modified == 0)
672 		return;
673 
674 	KKASSERT(io->bp);
675 	KKASSERT(io->modify_refs <= 0);
676 
677 	/*
678 	 * Acquire ownership of the bp, particularly before we clear our
679 	 * modified flag.
680 	 *
681 	 * We are going to bawrite() this bp.  Don't leave a window where
682 	 * io->released is set, we actually own the bp rather then our
683 	 * buffer.
684 	 *
685 	 * The io_token should not be required here as only
686 	 */
687 	hmp = io->hmp;
688 	bp = io->bp;
689 	if (io->released) {
690 		regetblk(bp);
691 		/* BUF_KERNPROC(io->bp); */
692 		/* io->released = 0; */
693 		KKASSERT(io->released);
694 		KKASSERT(io->bp == bp);
695 	} else {
696 		io->released = 1;
697 	}
698 
699 	if (reclaim) {
700 		io->reclaim = 1;
701 		if ((bp->b_flags & B_LOCKED) == 0) {
702 			bp->b_flags |= B_LOCKED;
703 			atomic_add_int(&hammer_count_io_locked, 1);
704 		}
705 	}
706 
707 	/*
708 	 * Acquire exclusive access to the bp and then clear the modified
709 	 * state of the buffer prior to issuing I/O to interlock any
710 	 * modifications made while the I/O is in progress.  This shouldn't
711 	 * happen anyway but losing data would be worse.  The modified bit
712 	 * will be rechecked after the IO completes.
713 	 *
714 	 * NOTE: This call also finalizes the buffer's content (inval == 0).
715 	 *
716 	 * This is only legal when lock.refs == 1 (otherwise we might clear
717 	 * the modified bit while there are still users of the cluster
718 	 * modifying the data).
719 	 *
720 	 * Do this before potentially blocking so any attempt to modify the
721 	 * ondisk while we are blocked blocks waiting for us.
722 	 */
723 	hammer_ref(&io->lock);
724 	hammer_io_clear_modify(io, 0);
725 	hammer_rel(&io->lock);
726 
727 	if (hammer_debug_io & 0x0002)
728 		kprintf("hammer io_write %016jx\n", bp->b_bio1.bio_offset);
729 
730 	/*
731 	 * Transfer ownership to the kernel and initiate I/O.
732 	 *
733 	 * NOTE: We do not hold io_token so an atomic op is required to
734 	 *	 update io_running_space.
735 	 */
736 	io->running = 1;
737 	atomic_add_long(&hmp->io_running_space, io->bytes);
738 	atomic_add_long(&hammer_count_io_running_write, io->bytes);
739 	lwkt_gettoken(&hmp->io_token);
740 	TAILQ_INSERT_TAIL(&hmp->iorun_list, io, iorun_entry);
741 	lwkt_reltoken(&hmp->io_token);
742 	cluster_awrite(bp);
743 	hammer_io_flush_mark(io->volume);
744 }
745 
746 /************************************************************************
747  *				BUFFER DIRTYING				*
748  ************************************************************************
749  *
750  * These routines deal with dependancies created when IO buffers get
751  * modified.  The caller must call hammer_modify_*() on a referenced
752  * HAMMER structure prior to modifying its on-disk data.
753  *
754  * Any intent to modify an IO buffer acquires the related bp and imposes
755  * various write ordering dependancies.
756  */
757 
758 /*
759  * Mark a HAMMER structure as undergoing modification.  Meta-data buffers
760  * are locked until the flusher can deal with them, pure data buffers
761  * can be written out.
762  *
763  * The referenced io prevents races.
764  */
765 static
766 void
767 hammer_io_modify(hammer_io_t io, int count)
768 {
769 	/*
770 	 * io->modify_refs must be >= 0
771 	 */
772 	while (io->modify_refs < 0) {
773 		io->waitmod = 1;
774 		tsleep(io, 0, "hmrmod", 0);
775 	}
776 
777 	/*
778 	 * Shortcut if nothing to do.
779 	 */
780 	KKASSERT(hammer_isactive(&io->lock) && io->bp != NULL);
781 	io->modify_refs += count;
782 	if (io->modified && io->released == 0)
783 		return;
784 
785 	/*
786 	 * NOTE: It is important not to set the modified bit
787 	 *	 until after we have acquired the bp or we risk
788 	 *	 racing against checkwrite.
789 	 */
790 	hammer_lock_ex(&io->lock);
791 	if (io->released) {
792 		regetblk(io->bp);
793 		BUF_KERNPROC(io->bp);
794 		io->released = 0;
795 	}
796 	if (io->modified == 0) {
797 		hammer_io_set_modlist(io);
798 		io->modified = 1;
799 	}
800 	hammer_unlock(&io->lock);
801 }
802 
803 static __inline
804 void
805 hammer_io_modify_done(hammer_io_t io)
806 {
807 	KKASSERT(io->modify_refs > 0);
808 	--io->modify_refs;
809 	if (io->modify_refs == 0 && io->waitmod) {
810 		io->waitmod = 0;
811 		wakeup(io);
812 	}
813 }
814 
815 /*
816  * The write interlock blocks other threads trying to modify a buffer
817  * (they block in hammer_io_modify()) after us, or blocks us while other
818  * threads are in the middle of modifying a buffer.
819  *
820  * The caller also has a ref on the io, however if we are not careful
821  * we will race bioops callbacks (checkwrite).  To deal with this
822  * we must at least acquire and release the io_token, and it is probably
823  * better to hold it through the setting of modify_refs.
824  */
825 void
826 hammer_io_write_interlock(hammer_io_t io)
827 {
828 	hammer_mount_t hmp = io->hmp;
829 
830 	lwkt_gettoken(&hmp->io_token);
831 	while (io->modify_refs != 0) {
832 		io->waitmod = 1;
833 		tsleep(io, 0, "hmrmod", 0);
834 	}
835 	io->modify_refs = -1;
836 	lwkt_reltoken(&hmp->io_token);
837 }
838 
839 void
840 hammer_io_done_interlock(hammer_io_t io)
841 {
842 	KKASSERT(io->modify_refs == -1);
843 	io->modify_refs = 0;
844 	if (io->waitmod) {
845 		io->waitmod = 0;
846 		wakeup(io);
847 	}
848 }
849 
850 /*
851  * Caller intends to modify a volume's ondisk structure.
852  *
853  * This is only allowed if we are the flusher or we have a ref on the
854  * sync_lock.
855  */
856 void
857 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
858 		     void *base, int len)
859 {
860 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
861 
862 	hammer_io_modify(&volume->io, 1);
863 	if (len) {
864 		intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
865 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
866 		hammer_generate_undo(trans,
867 			 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
868 			 base, len);
869 	}
870 }
871 
872 /*
873  * Caller intends to modify a buffer's ondisk structure.
874  *
875  * This is only allowed if we are the flusher or we have a ref on the
876  * sync_lock.
877  */
878 void
879 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
880 		     void *base, int len)
881 {
882 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
883 
884 	hammer_io_modify(&buffer->io, 1);
885 	if (len) {
886 		intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
887 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
888 		hammer_generate_undo(trans,
889 				     buffer->zone2_offset + rel_offset,
890 				     base, len);
891 	}
892 }
893 
894 void
895 hammer_modify_volume_done(hammer_volume_t volume)
896 {
897 	hammer_io_modify_done(&volume->io);
898 }
899 
900 void
901 hammer_modify_buffer_done(hammer_buffer_t buffer)
902 {
903 	hammer_io_modify_done(&buffer->io);
904 }
905 
906 /*
907  * Mark an entity as not being dirty any more and finalize any
908  * delayed adjustments to the buffer.
909  *
910  * Delayed adjustments are an important performance enhancement, allowing
911  * us to avoid recalculating B-Tree node CRCs over and over again when
912  * making bulk-modifications to the B-Tree.
913  *
914  * If inval is non-zero delayed adjustments are ignored.
915  *
916  * This routine may dereference related btree nodes and cause the
917  * buffer to be dereferenced.  The caller must own a reference on io.
918  */
919 void
920 hammer_io_clear_modify(struct hammer_io *io, int inval)
921 {
922 	hammer_mount_t hmp;
923 
924 	/*
925 	 * io_token is needed to avoid races on mod_root
926 	 */
927 	if (io->modified == 0)
928 		return;
929 	hmp = io->hmp;
930 	lwkt_gettoken(&hmp->io_token);
931 	if (io->modified == 0) {
932 		lwkt_reltoken(&hmp->io_token);
933 		return;
934 	}
935 
936 	/*
937 	 * Take us off the mod-list and clear the modified bit.
938 	 */
939 	KKASSERT(io->mod_root != NULL);
940 	if (io->mod_root == &io->hmp->volu_root ||
941 	    io->mod_root == &io->hmp->meta_root) {
942 		io->hmp->locked_dirty_space -= io->bytes;
943 		atomic_add_long(&hammer_count_dirtybufspace, -io->bytes);
944 	}
945 	RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
946 	io->mod_root = NULL;
947 	io->modified = 0;
948 
949 	lwkt_reltoken(&hmp->io_token);
950 
951 	/*
952 	 * If this bit is not set there are no delayed adjustments.
953 	 */
954 	if (io->gencrc == 0)
955 		return;
956 	io->gencrc = 0;
957 
958 	/*
959 	 * Finalize requested CRCs.  The NEEDSCRC flag also holds a reference
960 	 * on the node (& underlying buffer).  Release the node after clearing
961 	 * the flag.
962 	 */
963 	if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
964 		hammer_buffer_t buffer = (void *)io;
965 		hammer_node_t node;
966 
967 restart:
968 		TAILQ_FOREACH(node, &buffer->clist, entry) {
969 			if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
970 				continue;
971 			node->flags &= ~HAMMER_NODE_NEEDSCRC;
972 			KKASSERT(node->ondisk);
973 			if (inval == 0)
974 				node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
975 			hammer_rel_node(node);
976 			goto restart;
977 		}
978 	}
979 	/* caller must still have ref on io */
980 	KKASSERT(hammer_isactive(&io->lock));
981 }
982 
983 /*
984  * Clear the IO's modify list.  Even though the IO is no longer modified
985  * it may still be on the lose_root.  This routine is called just before
986  * the governing hammer_buffer is destroyed.
987  *
988  * mod_root requires io_token protection.
989  */
990 void
991 hammer_io_clear_modlist(struct hammer_io *io)
992 {
993 	hammer_mount_t hmp = io->hmp;
994 
995 	KKASSERT(io->modified == 0);
996 	if (io->mod_root) {
997 		lwkt_gettoken(&hmp->io_token);
998 		if (io->mod_root) {
999 			KKASSERT(io->mod_root == &io->hmp->lose_root);
1000 			RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
1001 			io->mod_root = NULL;
1002 		}
1003 		lwkt_reltoken(&hmp->io_token);
1004 	}
1005 }
1006 
1007 static void
1008 hammer_io_set_modlist(struct hammer_io *io)
1009 {
1010 	struct hammer_mount *hmp = io->hmp;
1011 
1012 	lwkt_gettoken(&hmp->io_token);
1013 	KKASSERT(io->mod_root == NULL);
1014 
1015 	switch(io->type) {
1016 	case HAMMER_STRUCTURE_VOLUME:
1017 		io->mod_root = &hmp->volu_root;
1018 		hmp->locked_dirty_space += io->bytes;
1019 		atomic_add_long(&hammer_count_dirtybufspace, io->bytes);
1020 		break;
1021 	case HAMMER_STRUCTURE_META_BUFFER:
1022 		io->mod_root = &hmp->meta_root;
1023 		hmp->locked_dirty_space += io->bytes;
1024 		atomic_add_long(&hammer_count_dirtybufspace, io->bytes);
1025 		break;
1026 	case HAMMER_STRUCTURE_UNDO_BUFFER:
1027 		io->mod_root = &hmp->undo_root;
1028 		break;
1029 	case HAMMER_STRUCTURE_DATA_BUFFER:
1030 		io->mod_root = &hmp->data_root;
1031 		break;
1032 	case HAMMER_STRUCTURE_DUMMY:
1033 		panic("hammer_io_set_modlist: bad io type");
1034 		break; /* NOT REACHED */
1035 	}
1036 	if (RB_INSERT(hammer_mod_rb_tree, io->mod_root, io)) {
1037 		panic("hammer_io_set_modlist: duplicate entry");
1038 		/* NOT REACHED */
1039 	}
1040 	lwkt_reltoken(&hmp->io_token);
1041 }
1042 
1043 /************************************************************************
1044  *				HAMMER_BIOOPS				*
1045  ************************************************************************
1046  *
1047  */
1048 
1049 /*
1050  * Pre-IO initiation kernel callback - cluster build only
1051  *
1052  * bioops callback - hold io_token
1053  */
1054 static void
1055 hammer_io_start(struct buf *bp)
1056 {
1057 	/* nothing to do, so io_token not needed */
1058 }
1059 
1060 /*
1061  * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
1062  *
1063  * NOTE: HAMMER may modify a data buffer after we have initiated write
1064  *	 I/O.
1065  *
1066  * NOTE: MPSAFE callback
1067  *
1068  * bioops callback - hold io_token
1069  */
1070 static void
1071 hammer_io_complete(struct buf *bp)
1072 {
1073 	union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
1074 	struct hammer_mount *hmp = iou->io.hmp;
1075 	struct hammer_io *ionext;
1076 
1077 	lwkt_gettoken(&hmp->io_token);
1078 
1079 	KKASSERT(iou->io.released == 1);
1080 
1081 	/*
1082 	 * Deal with people waiting for I/O to drain
1083 	 */
1084 	if (iou->io.running) {
1085 		/*
1086 		 * Deal with critical write errors.  Once a critical error
1087 		 * has been flagged in hmp the UNDO FIFO will not be updated.
1088 		 * That way crash recover will give us a consistent
1089 		 * filesystem.
1090 		 *
1091 		 * Because of this we can throw away failed UNDO buffers.  If
1092 		 * we throw away META or DATA buffers we risk corrupting
1093 		 * the now read-only version of the filesystem visible to
1094 		 * the user.  Clear B_ERROR so the buffer is not re-dirtied
1095 		 * by the kernel and ref the io so it doesn't get thrown
1096 		 * away.
1097 		 */
1098 		if (bp->b_flags & B_ERROR) {
1099 			lwkt_gettoken(&hmp->fs_token);
1100 			hammer_critical_error(hmp, NULL, bp->b_error,
1101 					      "while flushing meta-data");
1102 			lwkt_reltoken(&hmp->fs_token);
1103 
1104 			switch(iou->io.type) {
1105 			case HAMMER_STRUCTURE_UNDO_BUFFER:
1106 				break;
1107 			default:
1108 				if (iou->io.ioerror == 0) {
1109 					iou->io.ioerror = 1;
1110 					hammer_ref(&iou->io.lock);
1111 				}
1112 				break;
1113 			}
1114 			bp->b_flags &= ~B_ERROR;
1115 			bundirty(bp);
1116 #if 0
1117 			hammer_io_set_modlist(&iou->io);
1118 			iou->io.modified = 1;
1119 #endif
1120 		}
1121 		hammer_stats_disk_write += iou->io.bytes;
1122 		atomic_add_long(&hammer_count_io_running_write, -iou->io.bytes);
1123 		atomic_add_long(&hmp->io_running_space, -iou->io.bytes);
1124 		KKASSERT(hmp->io_running_space >= 0);
1125 		iou->io.running = 0;
1126 
1127 		/*
1128 		 * Remove from iorun list and wakeup any multi-io waiter(s).
1129 		 */
1130 		if (TAILQ_FIRST(&hmp->iorun_list) == &iou->io) {
1131 			ionext = TAILQ_NEXT(&iou->io, iorun_entry);
1132 			if (ionext && ionext->type == HAMMER_STRUCTURE_DUMMY)
1133 				wakeup(ionext);
1134 		}
1135 		TAILQ_REMOVE(&hmp->iorun_list, &iou->io, iorun_entry);
1136 	} else {
1137 		hammer_stats_disk_read += iou->io.bytes;
1138 	}
1139 
1140 	if (iou->io.waiting) {
1141 		iou->io.waiting = 0;
1142 		wakeup(iou);
1143 	}
1144 
1145 	/*
1146 	 * If B_LOCKED is set someone wanted to deallocate the bp at some
1147 	 * point, try to do it now.  The operation will fail if there are
1148 	 * refs or if hammer_io_deallocate() is unable to gain the
1149 	 * interlock.
1150 	 */
1151 	if (bp->b_flags & B_LOCKED) {
1152 		atomic_add_int(&hammer_count_io_locked, -1);
1153 		bp->b_flags &= ~B_LOCKED;
1154 		hammer_io_deallocate(bp);
1155 		/* structure may be dead now */
1156 	}
1157 	lwkt_reltoken(&hmp->io_token);
1158 }
1159 
1160 /*
1161  * Callback from kernel when it wishes to deallocate a passively
1162  * associated structure.  This mostly occurs with clean buffers
1163  * but it may be possible for a holding structure to be marked dirty
1164  * while its buffer is passively associated.  The caller owns the bp.
1165  *
1166  * If we cannot disassociate we set B_LOCKED to prevent the buffer
1167  * from getting reused.
1168  *
1169  * WARNING: Because this can be called directly by getnewbuf we cannot
1170  * recurse into the tree.  If a bp cannot be immediately disassociated
1171  * our only recourse is to set B_LOCKED.
1172  *
1173  * WARNING: This may be called from an interrupt via hammer_io_complete()
1174  *
1175  * bioops callback - hold io_token
1176  */
1177 static void
1178 hammer_io_deallocate(struct buf *bp)
1179 {
1180 	hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
1181 	hammer_mount_t hmp;
1182 
1183 	hmp = iou->io.hmp;
1184 
1185 	lwkt_gettoken(&hmp->io_token);
1186 
1187 	KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
1188 	if (hammer_try_interlock_norefs(&iou->io.lock) == 0) {
1189 		/*
1190 		 * We cannot safely disassociate a bp from a referenced
1191 		 * or interlocked HAMMER structure.
1192 		 */
1193 		bp->b_flags |= B_LOCKED;
1194 		atomic_add_int(&hammer_count_io_locked, 1);
1195 	} else if (iou->io.modified) {
1196 		/*
1197 		 * It is not legal to disassociate a modified buffer.  This
1198 		 * case really shouldn't ever occur.
1199 		 */
1200 		bp->b_flags |= B_LOCKED;
1201 		atomic_add_int(&hammer_count_io_locked, 1);
1202 		hammer_put_interlock(&iou->io.lock, 0);
1203 	} else {
1204 		/*
1205 		 * Disassociate the BP.  If the io has no refs left we
1206 		 * have to add it to the loose list.  The kernel has
1207 		 * locked the buffer and therefore our io must be
1208 		 * in a released state.
1209 		 */
1210 		hammer_io_disassociate(iou);
1211 		if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
1212 			KKASSERT(iou->io.bp == NULL);
1213 			KKASSERT(iou->io.mod_root == NULL);
1214 			iou->io.mod_root = &hmp->lose_root;
1215 			if (RB_INSERT(hammer_mod_rb_tree, iou->io.mod_root,
1216 				      &iou->io)) {
1217 				panic("hammer_io_deallocate: duplicate entry");
1218 			}
1219 		}
1220 		hammer_put_interlock(&iou->io.lock, 1);
1221 	}
1222 	lwkt_reltoken(&hmp->io_token);
1223 }
1224 
1225 /*
1226  * bioops callback - hold io_token
1227  */
1228 static int
1229 hammer_io_fsync(struct vnode *vp)
1230 {
1231 	/* nothing to do, so io_token not needed */
1232 	return(0);
1233 }
1234 
1235 /*
1236  * NOTE: will not be called unless we tell the kernel about the
1237  * bioops.  Unused... we use the mount's VFS_SYNC instead.
1238  *
1239  * bioops callback - hold io_token
1240  */
1241 static int
1242 hammer_io_sync(struct mount *mp)
1243 {
1244 	/* nothing to do, so io_token not needed */
1245 	return(0);
1246 }
1247 
1248 /*
1249  * bioops callback - hold io_token
1250  */
1251 static void
1252 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
1253 {
1254 	/* nothing to do, so io_token not needed */
1255 }
1256 
1257 /*
1258  * I/O pre-check for reading and writing.  HAMMER only uses this for
1259  * B_CACHE buffers so checkread just shouldn't happen, but if it does
1260  * allow it.
1261  *
1262  * Writing is a different case.  We don't want the kernel to try to write
1263  * out a buffer that HAMMER may be modifying passively or which has a
1264  * dependancy.  In addition, kernel-demanded writes can only proceed for
1265  * certain types of buffers (i.e. UNDO and DATA types).  Other dirty
1266  * buffer types can only be explicitly written by the flusher.
1267  *
1268  * checkwrite will only be called for bdwrite()n buffers.  If we return
1269  * success the kernel is guaranteed to initiate the buffer write.
1270  *
1271  * bioops callback - hold io_token
1272  */
1273 static int
1274 hammer_io_checkread(struct buf *bp)
1275 {
1276 	/* nothing to do, so io_token not needed */
1277 	return(0);
1278 }
1279 
1280 /*
1281  * The kernel is asking us whether it can write out a dirty buffer or not.
1282  *
1283  * bioops callback - hold io_token
1284  */
1285 static int
1286 hammer_io_checkwrite(struct buf *bp)
1287 {
1288 	hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
1289 	hammer_mount_t hmp = io->hmp;
1290 
1291 	/*
1292 	 * This shouldn't happen under normal operation.
1293 	 */
1294 	lwkt_gettoken(&hmp->io_token);
1295 	if (io->type == HAMMER_STRUCTURE_VOLUME ||
1296 	    io->type == HAMMER_STRUCTURE_META_BUFFER) {
1297 		if (!panicstr)
1298 			panic("hammer_io_checkwrite: illegal buffer");
1299 		if ((bp->b_flags & B_LOCKED) == 0) {
1300 			bp->b_flags |= B_LOCKED;
1301 			atomic_add_int(&hammer_count_io_locked, 1);
1302 		}
1303 		lwkt_reltoken(&hmp->io_token);
1304 		return(1);
1305 	}
1306 
1307 	/*
1308 	 * We have to be able to interlock the IO to safely modify any
1309 	 * of its fields without holding the fs_token.  If we can't lock
1310 	 * it then we are racing someone.
1311 	 *
1312 	 * Our ownership of the bp lock prevents the io from being ripped
1313 	 * out from under us.
1314 	 */
1315 	if (hammer_try_interlock_norefs(&io->lock) == 0) {
1316 		bp->b_flags |= B_LOCKED;
1317 		atomic_add_int(&hammer_count_io_locked, 1);
1318 		lwkt_reltoken(&hmp->io_token);
1319 		return(1);
1320 	}
1321 
1322 	/*
1323 	 * The modified bit must be cleared prior to the initiation of
1324 	 * any IO (returning 0 initiates the IO).  Because this is a
1325 	 * normal data buffer hammer_io_clear_modify() runs through a
1326 	 * simple degenerate case.
1327 	 *
1328 	 * Return 0 will cause the kernel to initiate the IO, and we
1329 	 * must normally clear the modified bit before we begin.  If
1330 	 * the io has modify_refs we do not clear the modified bit,
1331 	 * otherwise we may miss changes.
1332 	 *
1333 	 * Only data and undo buffers can reach here.  These buffers do
1334 	 * not have terminal crc functions but we temporarily reference
1335 	 * the IO anyway, just in case.
1336 	 */
1337 	if (io->modify_refs == 0 && io->modified) {
1338 		hammer_ref(&io->lock);
1339 		hammer_io_clear_modify(io, 0);
1340 		hammer_rel(&io->lock);
1341 	} else if (io->modified) {
1342 		KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
1343 	}
1344 
1345 	/*
1346 	 * The kernel is going to start the IO, set io->running.
1347 	 */
1348 	KKASSERT(io->running == 0);
1349 	io->running = 1;
1350 	atomic_add_long(&io->hmp->io_running_space, io->bytes);
1351 	atomic_add_long(&hammer_count_io_running_write, io->bytes);
1352 	TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
1353 
1354 	hammer_put_interlock(&io->lock, 1);
1355 	lwkt_reltoken(&hmp->io_token);
1356 
1357 	return(0);
1358 }
1359 
1360 /*
1361  * Return non-zero if we wish to delay the kernel's attempt to flush
1362  * this buffer to disk.
1363  *
1364  * bioops callback - hold io_token
1365  */
1366 static int
1367 hammer_io_countdeps(struct buf *bp, int n)
1368 {
1369 	/* nothing to do, so io_token not needed */
1370 	return(0);
1371 }
1372 
1373 struct bio_ops hammer_bioops = {
1374 	.io_start	= hammer_io_start,
1375 	.io_complete	= hammer_io_complete,
1376 	.io_deallocate	= hammer_io_deallocate,
1377 	.io_fsync	= hammer_io_fsync,
1378 	.io_sync	= hammer_io_sync,
1379 	.io_movedeps	= hammer_io_movedeps,
1380 	.io_countdeps	= hammer_io_countdeps,
1381 	.io_checkread	= hammer_io_checkread,
1382 	.io_checkwrite	= hammer_io_checkwrite,
1383 };
1384 
1385 /************************************************************************
1386  *				DIRECT IO OPS 				*
1387  ************************************************************************
1388  *
1389  * These functions operate directly on the buffer cache buffer associated
1390  * with a front-end vnode rather then a back-end device vnode.
1391  */
1392 
1393 /*
1394  * Read a buffer associated with a front-end vnode directly from the
1395  * disk media.  The bio may be issued asynchronously.  If leaf is non-NULL
1396  * we validate the CRC.
1397  *
1398  * We must check for the presence of a HAMMER buffer to handle the case
1399  * where the reblocker has rewritten the data (which it does via the HAMMER
1400  * buffer system, not via the high-level vnode buffer cache), but not yet
1401  * committed the buffer to the media.
1402  */
1403 int
1404 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1405 		      hammer_btree_leaf_elm_t leaf)
1406 {
1407 	hammer_off_t buf_offset;
1408 	hammer_off_t zone2_offset;
1409 	hammer_volume_t volume;
1410 	struct buf *bp;
1411 	struct bio *nbio;
1412 	int vol_no;
1413 	int error;
1414 
1415 	buf_offset = bio->bio_offset;
1416 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1417 		 HAMMER_ZONE_LARGE_DATA);
1418 
1419 	/*
1420 	 * The buffer cache may have an aliased buffer (the reblocker can
1421 	 * write them).  If it does we have to sync any dirty data before
1422 	 * we can build our direct-read.  This is a non-critical code path.
1423 	 */
1424 	bp = bio->bio_buf;
1425 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1426 
1427 	/*
1428 	 * Resolve to a zone-2 offset.  The conversion just requires
1429 	 * munging the top 4 bits but we want to abstract it anyway
1430 	 * so the blockmap code can verify the zone assignment.
1431 	 */
1432 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1433 	if (error)
1434 		goto done;
1435 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1436 		 HAMMER_ZONE_RAW_BUFFER);
1437 
1438 	/*
1439 	 * Resolve volume and raw-offset for 3rd level bio.  The
1440 	 * offset will be specific to the volume.
1441 	 */
1442 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
1443 	volume = hammer_get_volume(hmp, vol_no, &error);
1444 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
1445 		error = EIO;
1446 
1447 	if (error == 0) {
1448 		/*
1449 		 * 3rd level bio
1450 		 */
1451 		nbio = push_bio(bio);
1452 		nbio->bio_offset = volume->ondisk->vol_buf_beg +
1453 				   (zone2_offset & HAMMER_OFF_SHORT_MASK);
1454 		hammer_stats_disk_read += bp->b_bufsize;
1455 		vn_strategy(volume->devvp, nbio);
1456 	}
1457 	hammer_rel_volume(volume, 0);
1458 done:
1459 	if (error) {
1460 		kprintf("hammer_direct_read: failed @ %016llx\n",
1461 			(long long)zone2_offset);
1462 		bp->b_error = error;
1463 		bp->b_flags |= B_ERROR;
1464 		biodone(bio);
1465 	}
1466 	return(error);
1467 }
1468 
1469 /*
1470  * This works similarly to hammer_io_direct_read() except instead of
1471  * directly reading from the device into the bio we instead indirectly
1472  * read through the device's buffer cache and then copy the data into
1473  * the bio.
1474  *
1475  * If leaf is non-NULL and validation is enabled, the CRC will be checked.
1476  *
1477  * This routine also executes asynchronously.  It allows hammer strategy
1478  * calls to operate asynchronously when in double_buffer mode (in addition
1479  * to operating asynchronously when in normal mode).
1480  */
1481 int
1482 hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio,
1483 			hammer_btree_leaf_elm_t leaf)
1484 {
1485 	hammer_off_t buf_offset;
1486 	hammer_off_t zone2_offset;
1487 	hammer_volume_t volume;
1488 	struct buf *bp;
1489 	int vol_no;
1490 	int error;
1491 
1492 	buf_offset = bio->bio_offset;
1493 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1494 		 HAMMER_ZONE_LARGE_DATA);
1495 
1496 	/*
1497 	 * The buffer cache may have an aliased buffer (the reblocker can
1498 	 * write them).  If it does we have to sync any dirty data before
1499 	 * we can build our direct-read.  This is a non-critical code path.
1500 	 */
1501 	bp = bio->bio_buf;
1502 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1503 
1504 	/*
1505 	 * Resolve to a zone-2 offset.  The conversion just requires
1506 	 * munging the top 4 bits but we want to abstract it anyway
1507 	 * so the blockmap code can verify the zone assignment.
1508 	 */
1509 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1510 	if (error)
1511 		goto done;
1512 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1513 		 HAMMER_ZONE_RAW_BUFFER);
1514 
1515 	/*
1516 	 * Resolve volume and raw-offset for 3rd level bio.  The
1517 	 * offset will be specific to the volume.
1518 	 */
1519 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
1520 	volume = hammer_get_volume(hmp, vol_no, &error);
1521 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
1522 		error = EIO;
1523 
1524 	if (error == 0) {
1525 		/*
1526 		 * Convert to the raw volume->devvp offset and acquire
1527 		 * the buf, issuing async I/O if necessary.
1528 		 */
1529 		buf_offset = volume->ondisk->vol_buf_beg +
1530 			     (zone2_offset & HAMMER_OFF_SHORT_MASK);
1531 
1532 		if (leaf && hammer_verify_data) {
1533 			bio->bio_caller_info1.uvalue32 = leaf->data_crc;
1534 			bio->bio_caller_info2.index = 1;
1535 		} else {
1536 			bio->bio_caller_info2.index = 0;
1537 		}
1538 		breadcb(volume->devvp, buf_offset, bp->b_bufsize,
1539 			hammer_indirect_callback, bio);
1540 	}
1541 	hammer_rel_volume(volume, 0);
1542 done:
1543 	if (error) {
1544 		kprintf("hammer_direct_read: failed @ %016llx\n",
1545 			(long long)zone2_offset);
1546 		bp->b_error = error;
1547 		bp->b_flags |= B_ERROR;
1548 		biodone(bio);
1549 	}
1550 	return(error);
1551 }
1552 
1553 /*
1554  * Indirect callback on completion.  bio/bp specify the device-backed
1555  * buffer.  bio->bio_caller_info1.ptr holds obio.
1556  *
1557  * obio/obp is the original regular file buffer.  obio->bio_caller_info*
1558  * contains the crc specification.
1559  *
1560  * We are responsible for calling bpdone() and bqrelse() on bio/bp, and
1561  * for calling biodone() on obio.
1562  */
1563 static void
1564 hammer_indirect_callback(struct bio *bio)
1565 {
1566 	struct buf *bp = bio->bio_buf;
1567 	struct buf *obp;
1568 	struct bio *obio;
1569 
1570 	/*
1571 	 * If BIO_DONE is already set the device buffer was already
1572 	 * fully valid (B_CACHE).  If it is not set then I/O was issued
1573 	 * and we have to run I/O completion as the last bio.
1574 	 *
1575 	 * Nobody is waiting for our device I/O to complete, we are
1576 	 * responsible for bqrelse()ing it which means we also have to do
1577 	 * the equivalent of biowait() and clear BIO_DONE (which breadcb()
1578 	 * may have set).
1579 	 *
1580 	 * Any preexisting device buffer should match the requested size,
1581 	 * but due to big-block recycling and other factors there is some
1582 	 * fragility there, so we assert that the device buffer covers
1583 	 * the request.
1584 	 */
1585 	if ((bio->bio_flags & BIO_DONE) == 0)
1586 		bpdone(bp, 0);
1587 	bio->bio_flags &= ~(BIO_DONE | BIO_SYNC);
1588 
1589 	obio = bio->bio_caller_info1.ptr;
1590 	obp = obio->bio_buf;
1591 
1592 	if (bp->b_flags & B_ERROR) {
1593 		obp->b_flags |= B_ERROR;
1594 		obp->b_error = bp->b_error;
1595 	} else if (obio->bio_caller_info2.index &&
1596 		   obio->bio_caller_info1.uvalue32 !=
1597 		    crc32(bp->b_data, bp->b_bufsize)) {
1598 		obp->b_flags |= B_ERROR;
1599 		obp->b_error = EIO;
1600 	} else {
1601 		KKASSERT(bp->b_bufsize >= obp->b_bufsize);
1602 		bcopy(bp->b_data, obp->b_data, obp->b_bufsize);
1603 		obp->b_resid = 0;
1604 		obp->b_flags |= B_AGE;
1605 	}
1606 	biodone(obio);
1607 	bqrelse(bp);
1608 }
1609 
1610 /*
1611  * Write a buffer associated with a front-end vnode directly to the
1612  * disk media.  The bio may be issued asynchronously.
1613  *
1614  * The BIO is associated with the specified record and RECG_DIRECT_IO
1615  * is set.  The recorded is added to its object.
1616  */
1617 int
1618 hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1619 		       hammer_record_t record)
1620 {
1621 	hammer_btree_leaf_elm_t leaf = &record->leaf;
1622 	hammer_off_t buf_offset;
1623 	hammer_off_t zone2_offset;
1624 	hammer_volume_t volume;
1625 	hammer_buffer_t buffer;
1626 	struct buf *bp;
1627 	struct bio *nbio;
1628 	char *ptr;
1629 	int vol_no;
1630 	int error;
1631 
1632 	buf_offset = leaf->data_offset;
1633 
1634 	KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1635 	KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1636 
1637 	/*
1638 	 * Issue or execute the I/O.  The new memory record must replace
1639 	 * the old one before the I/O completes, otherwise a reaquisition of
1640 	 * the buffer will load the old media data instead of the new.
1641 	 */
1642 	if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1643 	    leaf->data_len >= HAMMER_BUFSIZE) {
1644 		/*
1645 		 * We are using the vnode's bio to write directly to the
1646 		 * media, any hammer_buffer at the same zone-X offset will
1647 		 * now have stale data.
1648 		 */
1649 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1650 		vol_no = HAMMER_VOL_DECODE(zone2_offset);
1651 		volume = hammer_get_volume(hmp, vol_no, &error);
1652 
1653 		if (error == 0 && zone2_offset >= volume->maxbuf_off)
1654 			error = EIO;
1655 		if (error == 0) {
1656 			bp = bio->bio_buf;
1657 			KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1658 			/*
1659 			hammer_del_buffers(hmp, buf_offset,
1660 					   zone2_offset, bp->b_bufsize);
1661 			*/
1662 
1663 			/*
1664 			 * Second level bio - cached zone2 offset.
1665 			 *
1666 			 * (We can put our bio_done function in either the
1667 			 *  2nd or 3rd level).
1668 			 */
1669 			nbio = push_bio(bio);
1670 			nbio->bio_offset = zone2_offset;
1671 			nbio->bio_done = hammer_io_direct_write_complete;
1672 			nbio->bio_caller_info1.ptr = record;
1673 			record->zone2_offset = zone2_offset;
1674 			record->gflags |= HAMMER_RECG_DIRECT_IO |
1675 					 HAMMER_RECG_DIRECT_INVAL;
1676 
1677 			/*
1678 			 * Third level bio - raw offset specific to the
1679 			 * correct volume.
1680 			 */
1681 			zone2_offset &= HAMMER_OFF_SHORT_MASK;
1682 			nbio = push_bio(nbio);
1683 			nbio->bio_offset = volume->ondisk->vol_buf_beg +
1684 					   zone2_offset;
1685 			hammer_stats_disk_write += bp->b_bufsize;
1686 			hammer_ip_replace_bulk(hmp, record);
1687 			vn_strategy(volume->devvp, nbio);
1688 			hammer_io_flush_mark(volume);
1689 		}
1690 		hammer_rel_volume(volume, 0);
1691 	} else {
1692 		/*
1693 		 * Must fit in a standard HAMMER buffer.  In this case all
1694 		 * consumers use the HAMMER buffer system and RECG_DIRECT_IO
1695 		 * does not need to be set-up.
1696 		 */
1697 		KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1698 		buffer = NULL;
1699 		ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1700 		if (error == 0) {
1701 			bp = bio->bio_buf;
1702 			bp->b_flags |= B_AGE;
1703 			hammer_io_modify(&buffer->io, 1);
1704 			bcopy(bp->b_data, ptr, leaf->data_len);
1705 			hammer_io_modify_done(&buffer->io);
1706 			hammer_rel_buffer(buffer, 0);
1707 			bp->b_resid = 0;
1708 			hammer_ip_replace_bulk(hmp, record);
1709 			biodone(bio);
1710 		}
1711 	}
1712 	if (error) {
1713 		/*
1714 		 * Major suckage occured.  Also note:  The record was
1715 		 * never added to the tree so we do not have to worry
1716 		 * about the backend.
1717 		 */
1718 		kprintf("hammer_direct_write: failed @ %016llx\n",
1719 			(long long)leaf->data_offset);
1720 		bp = bio->bio_buf;
1721 		bp->b_resid = 0;
1722 		bp->b_error = EIO;
1723 		bp->b_flags |= B_ERROR;
1724 		biodone(bio);
1725 		record->flags |= HAMMER_RECF_DELETED_FE;
1726 		hammer_rel_mem_record(record);
1727 	}
1728 	return(error);
1729 }
1730 
1731 /*
1732  * On completion of the BIO this callback must disconnect
1733  * it from the hammer_record and chain to the previous bio.
1734  *
1735  * An I/O error forces the mount to read-only.  Data buffers
1736  * are not B_LOCKED like meta-data buffers are, so we have to
1737  * throw the buffer away to prevent the kernel from retrying.
1738  *
1739  * NOTE: MPSAFE callback, only modify fields we have explicit
1740  *	 access to (the bp and the record->gflags).
1741  */
1742 static
1743 void
1744 hammer_io_direct_write_complete(struct bio *nbio)
1745 {
1746 	struct bio *obio;
1747 	struct buf *bp;
1748 	hammer_record_t record;
1749 	hammer_mount_t hmp;
1750 
1751 	record = nbio->bio_caller_info1.ptr;
1752 	KKASSERT(record != NULL);
1753 	hmp = record->ip->hmp;
1754 
1755 	lwkt_gettoken(&hmp->io_token);
1756 
1757 	bp = nbio->bio_buf;
1758 	obio = pop_bio(nbio);
1759 	if (bp->b_flags & B_ERROR) {
1760 		lwkt_gettoken(&hmp->fs_token);
1761 		hammer_critical_error(hmp, record->ip, bp->b_error,
1762 				      "while writing bulk data");
1763 		lwkt_reltoken(&hmp->fs_token);
1764 		bp->b_flags |= B_INVAL;
1765 	}
1766 	biodone(obio);
1767 
1768 	KKASSERT(record->gflags & HAMMER_RECG_DIRECT_IO);
1769 	if (record->gflags & HAMMER_RECG_DIRECT_WAIT) {
1770 		record->gflags &= ~(HAMMER_RECG_DIRECT_IO |
1771 				    HAMMER_RECG_DIRECT_WAIT);
1772 		/* record can disappear once DIRECT_IO flag is cleared */
1773 		wakeup(&record->flags);
1774 	} else {
1775 		record->gflags &= ~HAMMER_RECG_DIRECT_IO;
1776 		/* record can disappear once DIRECT_IO flag is cleared */
1777 	}
1778 	lwkt_reltoken(&hmp->io_token);
1779 }
1780 
1781 
1782 /*
1783  * This is called before a record is either committed to the B-Tree
1784  * or destroyed, to resolve any associated direct-IO.
1785  *
1786  * (1) We must wait for any direct-IO related to the record to complete.
1787  *
1788  * (2) We must remove any buffer cache aliases for data accessed via
1789  *     leaf->data_offset or zone2_offset so non-direct-IO consumers
1790  *     (the mirroring and reblocking code) do not see stale data.
1791  */
1792 void
1793 hammer_io_direct_wait(hammer_record_t record)
1794 {
1795 	hammer_mount_t hmp = record->ip->hmp;
1796 
1797 	/*
1798 	 * Wait for I/O to complete
1799 	 */
1800 	if (record->gflags & HAMMER_RECG_DIRECT_IO) {
1801 		lwkt_gettoken(&hmp->io_token);
1802 		while (record->gflags & HAMMER_RECG_DIRECT_IO) {
1803 			record->gflags |= HAMMER_RECG_DIRECT_WAIT;
1804 			tsleep(&record->flags, 0, "hmdiow", 0);
1805 		}
1806 		lwkt_reltoken(&hmp->io_token);
1807 	}
1808 
1809 	/*
1810 	 * Invalidate any related buffer cache aliases associated with the
1811 	 * backing device.  This is needed because the buffer cache buffer
1812 	 * for file data is associated with the file vnode, not the backing
1813 	 * device vnode.
1814 	 *
1815 	 * XXX I do not think this case can occur any more now that
1816 	 * reservations ensure that all such buffers are removed before
1817 	 * an area can be reused.
1818 	 */
1819 	if (record->gflags & HAMMER_RECG_DIRECT_INVAL) {
1820 		KKASSERT(record->leaf.data_offset);
1821 		hammer_del_buffers(hmp, record->leaf.data_offset,
1822 				   record->zone2_offset, record->leaf.data_len,
1823 				   1);
1824 		record->gflags &= ~HAMMER_RECG_DIRECT_INVAL;
1825 	}
1826 }
1827 
1828 /*
1829  * This is called to remove the second-level cached zone-2 offset from
1830  * frontend buffer cache buffers, now stale due to a data relocation.
1831  * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1832  * by hammer_vop_strategy_read().
1833  *
1834  * This is rather nasty because here we have something like the reblocker
1835  * scanning the raw B-Tree with no held references on anything, really,
1836  * other then a shared lock on the B-Tree node, and we have to access the
1837  * frontend's buffer cache to check for and clean out the association.
1838  * Specifically, if the reblocker is moving data on the disk, these cached
1839  * offsets will become invalid.
1840  *
1841  * Only data record types associated with the large-data zone are subject
1842  * to direct-io and need to be checked.
1843  *
1844  */
1845 void
1846 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1847 {
1848 	struct hammer_inode_info iinfo;
1849 	int zone;
1850 
1851 	if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1852 		return;
1853 	zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1854 	if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1855 		return;
1856 	iinfo.obj_id = leaf->base.obj_id;
1857 	iinfo.obj_asof = 0;	/* unused */
1858 	iinfo.obj_localization = leaf->base.localization &
1859 				 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1860 	iinfo.u.leaf = leaf;
1861 	hammer_scan_inode_snapshots(hmp, &iinfo,
1862 				    hammer_io_direct_uncache_callback,
1863 				    leaf);
1864 }
1865 
1866 static int
1867 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1868 {
1869 	hammer_inode_info_t iinfo = data;
1870 	hammer_off_t file_offset;
1871 	struct vnode *vp;
1872 	struct buf *bp;
1873 	int blksize;
1874 
1875 	if (ip->vp == NULL)
1876 		return(0);
1877 	file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1878 	blksize = iinfo->u.leaf->data_len;
1879 	KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1880 
1881 	/*
1882 	 * Warning: FINDBLK_TEST return stable storage but not stable
1883 	 *	    contents.  It happens to be ok in this case.
1884 	 */
1885 	hammer_ref(&ip->lock);
1886 	if (hammer_get_vnode(ip, &vp) == 0) {
1887 		if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
1888 		    bp->b_bio2.bio_offset != NOOFFSET) {
1889 			bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1890 			bp->b_bio2.bio_offset = NOOFFSET;
1891 			brelse(bp);
1892 		}
1893 		vput(vp);
1894 	}
1895 	hammer_rel_inode(ip, 0);
1896 	return(0);
1897 }
1898 
1899 
1900 /*
1901  * This function is called when writes may have occured on the volume,
1902  * indicating that the device may be holding cached writes.
1903  */
1904 static void
1905 hammer_io_flush_mark(hammer_volume_t volume)
1906 {
1907 	atomic_set_int(&volume->vol_flags, HAMMER_VOLF_NEEDFLUSH);
1908 }
1909 
1910 /*
1911  * This function ensures that the device has flushed any cached writes out.
1912  */
1913 void
1914 hammer_io_flush_sync(hammer_mount_t hmp)
1915 {
1916 	hammer_volume_t volume;
1917 	struct buf *bp_base = NULL;
1918 	struct buf *bp;
1919 
1920 	RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1921 		if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1922 			atomic_clear_int(&volume->vol_flags,
1923 					 HAMMER_VOLF_NEEDFLUSH);
1924 			bp = getpbuf(NULL);
1925 			bp->b_bio1.bio_offset = 0;
1926 			bp->b_bufsize = 0;
1927 			bp->b_bcount = 0;
1928 			bp->b_cmd = BUF_CMD_FLUSH;
1929 			bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
1930 			bp->b_bio1.bio_done = biodone_sync;
1931 			bp->b_bio1.bio_flags |= BIO_SYNC;
1932 			bp_base = bp;
1933 			vn_strategy(volume->devvp, &bp->b_bio1);
1934 		}
1935 	}
1936 	while ((bp = bp_base) != NULL) {
1937 		bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
1938 		biowait(&bp->b_bio1, "hmrFLS");
1939 		relpbuf(bp, NULL);
1940 	}
1941 }
1942 
1943 /*
1944  * Limit the amount of backlog which we allow to build up
1945  */
1946 void
1947 hammer_io_limit_backlog(hammer_mount_t hmp)
1948 {
1949 	waitrunningbufspace();
1950 }
1951