xref: /dragonfly/sys/vfs/hammer/hammer_io.c (revision ce7a3582)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * IO Primitives and buffer cache management
36  *
37  * All major data-tracking structures in HAMMER contain a struct hammer_io
38  * which is used to manage their backing store.  We use filesystem buffers
39  * for backing store and we leave them passively associated with their
40  * HAMMER structures.
41  *
42  * If the kernel tries to destroy a passively associated buf which we cannot
43  * yet let go we set B_LOCKED in the buffer and then actively released it
44  * later when we can.
45  *
46  * The io_token is required for anything which might race bioops and bio_done
47  * callbacks, with one exception: A successful hammer_try_interlock_norefs().
48  * the fs_token will be held in all other cases.
49  */
50 
51 #include "hammer.h"
52 #include <sys/fcntl.h>
53 #include <sys/nlookup.h>
54 #include <sys/buf.h>
55 
56 #include <sys/buf2.h>
57 
58 static void hammer_io_modify(hammer_io_t io, int count);
59 static void hammer_io_deallocate(struct buf *bp);
60 static void hammer_indirect_callback(struct bio *bio);
61 #if 0
62 static void hammer_io_direct_read_complete(struct bio *nbio);
63 #endif
64 static void hammer_io_direct_write_complete(struct bio *nbio);
65 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
66 static void hammer_io_set_modlist(struct hammer_io *io);
67 static void hammer_io_flush_mark(hammer_volume_t volume);
68 
69 static int
70 hammer_mod_rb_compare(hammer_io_t io1, hammer_io_t io2)
71 {
72 	hammer_off_t io1_offset;
73 	hammer_off_t io2_offset;
74 
75 	io1_offset = ((io1->offset & HAMMER_OFF_SHORT_MASK) << 8) |
76 		     HAMMER_VOL_DECODE(io1->offset);
77 	io2_offset = ((io2->offset & HAMMER_OFF_SHORT_MASK) << 8) |
78 		     HAMMER_VOL_DECODE(io2->offset);
79 
80 	if (io1_offset < io2_offset)
81 		return(-1);
82 	if (io1_offset > io2_offset)
83 		return(1);
84 	return(0);
85 }
86 
87 RB_GENERATE(hammer_mod_rb_tree, hammer_io, rb_node, hammer_mod_rb_compare);
88 
89 /*
90  * Initialize a new, already-zero'd hammer_io structure, or reinitialize
91  * an existing hammer_io structure which may have switched to another type.
92  */
93 void
94 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
95 {
96 	io->volume = volume;
97 	io->hmp = volume->io.hmp;
98 	io->type = type;
99 }
100 
101 /*
102  * Helper routine to disassociate a buffer cache buffer from an I/O
103  * structure.  The io must be interlocked and marked appropriately for
104  * reclamation.
105  *
106  * The io must be in a released state with the io->bp owned and
107  * locked by the caller of this function.  When not called from an
108  * io_deallocate() this cannot race an io_deallocate() since the
109  * kernel would be unable to get the buffer lock in that case.
110  * (The released state in this case means we own the bp, not the
111  * hammer_io structure).
112  *
113  * The io may have 0 or 1 references depending on who called us.  The
114  * caller is responsible for dealing with the refs.
115  *
116  * This call can only be made when no action is required on the buffer.
117  *
118  * This function is guaranteed not to race against anything because we
119  * own both the io lock and the bp lock and are interlocked with no
120  * references.
121  */
122 static void
123 hammer_io_disassociate(hammer_io_structure_t iou)
124 {
125 	struct buf *bp = iou->io.bp;
126 
127 	KKASSERT(iou->io.released);
128 	KKASSERT(iou->io.modified == 0);
129 	KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
130 	buf_dep_init(bp);
131 	iou->io.bp = NULL;
132 
133 	/*
134 	 * If the buffer was locked someone wanted to get rid of it.
135 	 */
136 	if (bp->b_flags & B_LOCKED) {
137 		atomic_add_int(&hammer_count_io_locked, -1);
138 		bp->b_flags &= ~B_LOCKED;
139 	}
140 	if (iou->io.reclaim) {
141 		bp->b_flags |= B_NOCACHE|B_RELBUF;
142 		iou->io.reclaim = 0;
143 	}
144 
145 	switch(iou->io.type) {
146 	case HAMMER_STRUCTURE_VOLUME:
147 		iou->volume.ondisk = NULL;
148 		break;
149 	case HAMMER_STRUCTURE_DATA_BUFFER:
150 	case HAMMER_STRUCTURE_META_BUFFER:
151 	case HAMMER_STRUCTURE_UNDO_BUFFER:
152 		iou->buffer.ondisk = NULL;
153 		break;
154 	case HAMMER_STRUCTURE_DUMMY:
155 		panic("hammer_io_disassociate: bad io type");
156 		break;
157 	}
158 }
159 
160 /*
161  * Wait for any physical IO to complete
162  *
163  * XXX we aren't interlocked against a spinlock or anything so there
164  *     is a small window in the interlock / io->running == 0 test.
165  */
166 void
167 hammer_io_wait(hammer_io_t io)
168 {
169 	if (io->running) {
170 		hammer_mount_t hmp = io->hmp;
171 
172 		lwkt_gettoken(&hmp->io_token);
173 		while (io->running) {
174 			io->waiting = 1;
175 			tsleep_interlock(io, 0);
176 			if (io->running)
177 				tsleep(io, PINTERLOCKED, "hmrflw", hz);
178 		}
179 		lwkt_reltoken(&hmp->io_token);
180 	}
181 }
182 
183 /*
184  * Wait for all currently queued HAMMER-initiated I/Os to complete.
185  *
186  * This is not supposed to count direct I/O's but some can leak
187  * through (for non-full-sized direct I/Os).
188  */
189 void
190 hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush)
191 {
192 	struct hammer_io iodummy;
193 	hammer_io_t io;
194 
195 	/*
196 	 * Degenerate case, no I/O is running
197 	 */
198 	lwkt_gettoken(&hmp->io_token);
199 	if (TAILQ_EMPTY(&hmp->iorun_list)) {
200 		lwkt_reltoken(&hmp->io_token);
201 		if (doflush)
202 			hammer_io_flush_sync(hmp);
203 		return;
204 	}
205 	bzero(&iodummy, sizeof(iodummy));
206 	iodummy.type = HAMMER_STRUCTURE_DUMMY;
207 
208 	/*
209 	 * Add placemarker and then wait until it becomes the head of
210 	 * the list.
211 	 */
212 	TAILQ_INSERT_TAIL(&hmp->iorun_list, &iodummy, iorun_entry);
213 	while (TAILQ_FIRST(&hmp->iorun_list) != &iodummy) {
214 		tsleep(&iodummy, 0, ident, 0);
215 	}
216 
217 	/*
218 	 * Chain in case several placemarkers are present.
219 	 */
220 	TAILQ_REMOVE(&hmp->iorun_list, &iodummy, iorun_entry);
221 	io = TAILQ_FIRST(&hmp->iorun_list);
222 	if (io && io->type == HAMMER_STRUCTURE_DUMMY)
223 		wakeup(io);
224 	lwkt_reltoken(&hmp->io_token);
225 
226 	if (doflush)
227 		hammer_io_flush_sync(hmp);
228 }
229 
230 /*
231  * Clear a flagged error condition on a I/O buffer.  The caller must hold
232  * its own ref on the buffer.
233  */
234 void
235 hammer_io_clear_error(struct hammer_io *io)
236 {
237 	hammer_mount_t hmp = io->hmp;
238 
239 	lwkt_gettoken(&hmp->io_token);
240 	if (io->ioerror) {
241 		io->ioerror = 0;
242 		hammer_rel(&io->lock);
243 		KKASSERT(hammer_isactive(&io->lock));
244 	}
245 	lwkt_reltoken(&hmp->io_token);
246 }
247 
248 void
249 hammer_io_clear_error_noassert(struct hammer_io *io)
250 {
251 	hammer_mount_t hmp = io->hmp;
252 
253 	lwkt_gettoken(&hmp->io_token);
254 	if (io->ioerror) {
255 		io->ioerror = 0;
256 		hammer_rel(&io->lock);
257 	}
258 	lwkt_reltoken(&hmp->io_token);
259 }
260 
261 /*
262  * This is an advisory function only which tells the buffer cache
263  * the bp is not a meta-data buffer, even though it is backed by
264  * a block device.
265  *
266  * This is used by HAMMER's reblocking code to avoid trying to
267  * swapcache the filesystem's data when it is read or written
268  * by the reblocking code.
269  *
270  * The caller has a ref on the buffer preventing the bp from
271  * being disassociated from it.
272  */
273 void
274 hammer_io_notmeta(hammer_buffer_t buffer)
275 {
276 	if ((buffer->io.bp->b_flags & B_NOTMETA) == 0) {
277 		hammer_mount_t hmp = buffer->io.hmp;
278 
279 		lwkt_gettoken(&hmp->io_token);
280 		buffer->io.bp->b_flags |= B_NOTMETA;
281 		lwkt_reltoken(&hmp->io_token);
282 	}
283 }
284 
285 /*
286  * Load bp for a HAMMER structure.  The io must be exclusively locked by
287  * the caller.
288  *
289  * This routine is mostly used on meta-data and small-data blocks.  Generally
290  * speaking HAMMER assumes some locality of reference and will cluster.
291  *
292  * Note that the caller (hammer_ondisk.c) may place further restrictions
293  * on clusterability via the limit (in bytes).  Typically large-data
294  * zones cannot be clustered due to their mixed buffer sizes.  This is
295  * not an issue since such clustering occurs in hammer_vnops at the
296  * regular file layer, whereas this is the buffered block device layer.
297  *
298  * No I/O callbacks can occur while we hold the buffer locked.
299  */
300 int
301 hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit)
302 {
303 	struct buf *bp;
304 	int   error;
305 
306 	if ((bp = io->bp) == NULL) {
307 		atomic_add_long(&hammer_count_io_running_read, io->bytes);
308 		if (hammer_cluster_enable && limit > io->bytes) {
309 			error = cluster_read(devvp, io->offset + limit,
310 					     io->offset, io->bytes,
311 					     HAMMER_CLUSTER_SIZE,
312 					     HAMMER_CLUSTER_SIZE,
313 					     &io->bp);
314 		} else {
315 			error = bread(devvp, io->offset, io->bytes, &io->bp);
316 		}
317 		hammer_stats_disk_read += io->bytes;
318 		atomic_add_long(&hammer_count_io_running_read, -io->bytes);
319 
320 		/*
321 		 * The code generally assumes b_ops/b_dep has been set-up,
322 		 * even if we error out here.
323 		 */
324 		bp = io->bp;
325 		if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
326 			const char *metatype;
327 
328 			switch(io->type) {
329 			case HAMMER_STRUCTURE_VOLUME:
330 				metatype = "volume";
331 				break;
332 			case HAMMER_STRUCTURE_META_BUFFER:
333 				switch(((struct hammer_buffer *)io)->
334 					zoneX_offset & HAMMER_OFF_ZONE_MASK) {
335 				case HAMMER_ZONE_BTREE:
336 					metatype = "btree";
337 					break;
338 				case HAMMER_ZONE_META:
339 					metatype = "meta";
340 					break;
341 				case HAMMER_ZONE_FREEMAP:
342 					metatype = "freemap";
343 					break;
344 				default:
345 					metatype = "meta?";
346 					break;
347 				}
348 				break;
349 			case HAMMER_STRUCTURE_DATA_BUFFER:
350 				metatype = "data";
351 				break;
352 			case HAMMER_STRUCTURE_UNDO_BUFFER:
353 				metatype = "undo";
354 				break;
355 			default:
356 				metatype = "unknown";
357 				break;
358 			}
359 			kprintf("doff %016jx %s\n",
360 				(intmax_t)bp->b_bio2.bio_offset,
361 				metatype);
362 		}
363 		bp->b_flags &= ~B_IODEBUG;
364 		bp->b_ops = &hammer_bioops;
365 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
366 
367 		/* io->worklist is locked by the io lock */
368 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
369 		BUF_KERNPROC(bp);
370 		KKASSERT(io->modified == 0);
371 		KKASSERT(io->running == 0);
372 		KKASSERT(io->waiting == 0);
373 		io->released = 0;	/* we hold an active lock on bp */
374 	} else {
375 		error = 0;
376 	}
377 	return(error);
378 }
379 
380 /*
381  * Similar to hammer_io_read() but returns a zero'd out buffer instead.
382  * Must be called with the IO exclusively locked.
383  *
384  * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
385  * I/O by forcing the buffer to not be in a released state before calling
386  * it.
387  *
388  * This function will also mark the IO as modified but it will not
389  * increment the modify_refs count.
390  *
391  * No I/O callbacks can occur while we hold the buffer locked.
392  */
393 int
394 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
395 {
396 	struct buf *bp;
397 
398 	if ((bp = io->bp) == NULL) {
399 		io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
400 		bp = io->bp;
401 		bp->b_ops = &hammer_bioops;
402 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
403 
404 		/* io->worklist is locked by the io lock */
405 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
406 		io->released = 0;
407 		KKASSERT(io->running == 0);
408 		io->waiting = 0;
409 		BUF_KERNPROC(bp);
410 	} else {
411 		if (io->released) {
412 			regetblk(bp);
413 			BUF_KERNPROC(bp);
414 			io->released = 0;
415 		}
416 	}
417 	hammer_io_modify(io, 0);
418 	vfs_bio_clrbuf(bp);
419 	return(0);
420 }
421 
422 /*
423  * Advance the activity count on the underlying buffer because
424  * HAMMER does not getblk/brelse on every access.
425  *
426  * The io->bp cannot go away while the buffer is referenced.
427  */
428 void
429 hammer_io_advance(struct hammer_io *io)
430 {
431 	if (io->bp)
432 		buf_act_advance(io->bp);
433 }
434 
435 /*
436  * Remove potential device level aliases against buffers managed by high level
437  * vnodes.  Aliases can also be created due to mixed buffer sizes or via
438  * direct access to the backing store device.
439  *
440  * This is nasty because the buffers are also VMIO-backed.  Even if a buffer
441  * does not exist its backing VM pages might, and we have to invalidate
442  * those as well or a getblk() will reinstate them.
443  *
444  * Buffer cache buffers associated with hammer_buffers cannot be
445  * invalidated.
446  */
447 int
448 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
449 {
450 	hammer_io_structure_t iou;
451 	hammer_mount_t hmp;
452 	hammer_off_t phys_offset;
453 	struct buf *bp;
454 	int error;
455 
456 	hmp = volume->io.hmp;
457 	lwkt_gettoken(&hmp->io_token);
458 
459 	/*
460 	 * If a device buffer already exists for the specified physical
461 	 * offset use that, otherwise instantiate a buffer to cover any
462 	 * related VM pages, set BNOCACHE, and brelse().
463 	 */
464 	phys_offset = volume->ondisk->vol_buf_beg +
465 		      (zone2_offset & HAMMER_OFF_SHORT_MASK);
466 	if ((bp = findblk(volume->devvp, phys_offset, 0)) != NULL)
467 		bremfree(bp);
468 	else
469 		bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
470 
471 	if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
472 #if 0
473 		hammer_ref(&iou->io.lock);
474 		hammer_io_clear_modify(&iou->io, 1);
475 		bundirty(bp);
476 		iou->io.released = 0;
477 		BUF_KERNPROC(bp);
478 		iou->io.reclaim = 1;
479 		iou->io.waitdep = 1;	/* XXX this is a fs_token field */
480 		KKASSERT(hammer_isactive(&iou->io.lock) == 1);
481 		hammer_rel_buffer(&iou->buffer, 0);
482 		/*hammer_io_deallocate(bp);*/
483 #endif
484 		bqrelse(bp);
485 		error = EAGAIN;
486 	} else {
487 		KKASSERT((bp->b_flags & B_LOCKED) == 0);
488 		bundirty(bp);
489 		bp->b_flags |= B_NOCACHE|B_RELBUF;
490 		brelse(bp);
491 		error = 0;
492 	}
493 	lwkt_reltoken(&hmp->io_token);
494 	return(error);
495 }
496 
497 /*
498  * This routine is called on the last reference to a hammer structure.
499  * The io must be interlocked with a refcount of zero.  The hammer structure
500  * will remain interlocked on return.
501  *
502  * This routine may return a non-NULL bp to the caller for dispoal.
503  * The caller typically brelse()'s the bp.
504  *
505  * The bp may or may not still be passively associated with the IO.  It
506  * will remain passively associated if it is unreleasable (e.g. a modified
507  * meta-data buffer).
508  *
509  * The only requirement here is that modified meta-data and volume-header
510  * buffer may NOT be disassociated from the IO structure, and consequently
511  * we also leave such buffers actively associated with the IO if they already
512  * are (since the kernel can't do anything with them anyway).  Only the
513  * flusher is allowed to write such buffers out.  Modified pure-data and
514  * undo buffers are returned to the kernel but left passively associated
515  * so we can track when the kernel writes the bp out.
516  */
517 struct buf *
518 hammer_io_release(struct hammer_io *io, int flush)
519 {
520 	union hammer_io_structure *iou = (void *)io;
521 	struct buf *bp;
522 
523 	if ((bp = io->bp) == NULL)
524 		return(NULL);
525 
526 	/*
527 	 * Try to flush a dirty IO to disk if asked to by the
528 	 * caller or if the kernel tried to flush the buffer in the past.
529 	 *
530 	 * Kernel-initiated flushes are only allowed for pure-data buffers.
531 	 * meta-data and volume buffers can only be flushed explicitly
532 	 * by HAMMER.
533 	 */
534 	if (io->modified) {
535 		if (flush) {
536 			hammer_io_flush(io, 0);
537 		} else if (bp->b_flags & B_LOCKED) {
538 			switch(io->type) {
539 			case HAMMER_STRUCTURE_DATA_BUFFER:
540 				hammer_io_flush(io, 0);
541 				break;
542 			case HAMMER_STRUCTURE_UNDO_BUFFER:
543 				hammer_io_flush(io, hammer_undo_reclaim(io));
544 				break;
545 			default:
546 				break;
547 			}
548 		} /* else no explicit request to flush the buffer */
549 	}
550 
551 	/*
552 	 * Wait for the IO to complete if asked to.  This occurs when
553 	 * the buffer must be disposed of definitively during an umount
554 	 * or buffer invalidation.
555 	 */
556 	if (io->waitdep && io->running) {
557 		hammer_io_wait(io);
558 	}
559 
560 	/*
561 	 * Return control of the buffer to the kernel (with the provisio
562 	 * that our bioops can override kernel decisions with regards to
563 	 * the buffer).
564 	 */
565 	if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
566 		/*
567 		 * Always disassociate the bp if an explicit flush
568 		 * was requested and the IO completed with no error
569 		 * (so unmount can really clean up the structure).
570 		 */
571 		if (io->released) {
572 			regetblk(bp);
573 			BUF_KERNPROC(bp);
574 		} else {
575 			io->released = 1;
576 		}
577 		hammer_io_disassociate((hammer_io_structure_t)io);
578 		/* return the bp */
579 	} else if (io->modified) {
580 		/*
581 		 * Only certain IO types can be released to the kernel if
582 		 * the buffer has been modified.
583 		 *
584 		 * volume and meta-data IO types may only be explicitly
585 		 * flushed by HAMMER.
586 		 */
587 		switch(io->type) {
588 		case HAMMER_STRUCTURE_DATA_BUFFER:
589 		case HAMMER_STRUCTURE_UNDO_BUFFER:
590 			if (io->released == 0) {
591 				io->released = 1;
592 				bdwrite(bp);
593 			}
594 			break;
595 		default:
596 			break;
597 		}
598 		bp = NULL;	/* bp left associated */
599 	} else if (io->released == 0) {
600 		/*
601 		 * Clean buffers can be generally released to the kernel.
602 		 * We leave the bp passively associated with the HAMMER
603 		 * structure and use bioops to disconnect it later on
604 		 * if the kernel wants to discard the buffer.
605 		 *
606 		 * We can steal the structure's ownership of the bp.
607 		 */
608 		io->released = 1;
609 		if (bp->b_flags & B_LOCKED) {
610 			hammer_io_disassociate(iou);
611 			/* return the bp */
612 		} else {
613 			if (io->reclaim) {
614 				hammer_io_disassociate(iou);
615 				/* return the bp */
616 			} else {
617 				/* return the bp (bp passively associated) */
618 			}
619 		}
620 	} else {
621 		/*
622 		 * A released buffer is passively associate with our
623 		 * hammer_io structure.  The kernel cannot destroy it
624 		 * without making a bioops call.  If the kernel (B_LOCKED)
625 		 * or we (reclaim) requested that the buffer be destroyed
626 		 * we destroy it, otherwise we do a quick get/release to
627 		 * reset its position in the kernel's LRU list.
628 		 *
629 		 * Leaving the buffer passively associated allows us to
630 		 * use the kernel's LRU buffer flushing mechanisms rather
631 		 * then rolling our own.
632 		 *
633 		 * XXX there are two ways of doing this.  We can re-acquire
634 		 * and passively release to reset the LRU, or not.
635 		 */
636 		if (io->running == 0) {
637 			regetblk(bp);
638 			if ((bp->b_flags & B_LOCKED) || io->reclaim) {
639 				hammer_io_disassociate(iou);
640 				/* return the bp */
641 			} else {
642 				/* return the bp (bp passively associated) */
643 			}
644 		} else {
645 			/*
646 			 * bp is left passively associated but we do not
647 			 * try to reacquire it.  Interactions with the io
648 			 * structure will occur on completion of the bp's
649 			 * I/O.
650 			 */
651 			bp = NULL;
652 		}
653 	}
654 	return(bp);
655 }
656 
657 /*
658  * This routine is called with a locked IO when a flush is desired and
659  * no other references to the structure exists other then ours.  This
660  * routine is ONLY called when HAMMER believes it is safe to flush a
661  * potentially modified buffer out.
662  *
663  * The locked io or io reference prevents a flush from being initiated
664  * by the kernel.
665  */
666 void
667 hammer_io_flush(struct hammer_io *io, int reclaim)
668 {
669 	struct buf *bp;
670 	hammer_mount_t hmp;
671 
672 	/*
673 	 * Degenerate case - nothing to flush if nothing is dirty.
674 	 */
675 	if (io->modified == 0)
676 		return;
677 
678 	KKASSERT(io->bp);
679 	KKASSERT(io->modify_refs <= 0);
680 
681 	/*
682 	 * Acquire ownership of the bp, particularly before we clear our
683 	 * modified flag.
684 	 *
685 	 * We are going to bawrite() this bp.  Don't leave a window where
686 	 * io->released is set, we actually own the bp rather then our
687 	 * buffer.
688 	 *
689 	 * The io_token should not be required here as only
690 	 */
691 	hmp = io->hmp;
692 	bp = io->bp;
693 	if (io->released) {
694 		regetblk(bp);
695 		/* BUF_KERNPROC(io->bp); */
696 		/* io->released = 0; */
697 		KKASSERT(io->released);
698 		KKASSERT(io->bp == bp);
699 	} else {
700 		io->released = 1;
701 	}
702 
703 	if (reclaim) {
704 		io->reclaim = 1;
705 		if ((bp->b_flags & B_LOCKED) == 0) {
706 			bp->b_flags |= B_LOCKED;
707 			atomic_add_int(&hammer_count_io_locked, 1);
708 		}
709 	}
710 
711 	/*
712 	 * Acquire exclusive access to the bp and then clear the modified
713 	 * state of the buffer prior to issuing I/O to interlock any
714 	 * modifications made while the I/O is in progress.  This shouldn't
715 	 * happen anyway but losing data would be worse.  The modified bit
716 	 * will be rechecked after the IO completes.
717 	 *
718 	 * NOTE: This call also finalizes the buffer's content (inval == 0).
719 	 *
720 	 * This is only legal when lock.refs == 1 (otherwise we might clear
721 	 * the modified bit while there are still users of the cluster
722 	 * modifying the data).
723 	 *
724 	 * Do this before potentially blocking so any attempt to modify the
725 	 * ondisk while we are blocked blocks waiting for us.
726 	 */
727 	hammer_ref(&io->lock);
728 	hammer_io_clear_modify(io, 0);
729 	hammer_rel(&io->lock);
730 
731 	if (hammer_debug_io & 0x0002)
732 		kprintf("hammer io_write %016jx\n", bp->b_bio1.bio_offset);
733 
734 	/*
735 	 * Transfer ownership to the kernel and initiate I/O.
736 	 *
737 	 * NOTE: We do not hold io_token so an atomic op is required to
738 	 *	 update io_running_space.
739 	 */
740 	io->running = 1;
741 	atomic_add_long(&hmp->io_running_space, io->bytes);
742 	atomic_add_long(&hammer_count_io_running_write, io->bytes);
743 	lwkt_gettoken(&hmp->io_token);
744 	TAILQ_INSERT_TAIL(&hmp->iorun_list, io, iorun_entry);
745 	lwkt_reltoken(&hmp->io_token);
746 	bawrite(bp);
747 	hammer_io_flush_mark(io->volume);
748 }
749 
750 /************************************************************************
751  *				BUFFER DIRTYING				*
752  ************************************************************************
753  *
754  * These routines deal with dependancies created when IO buffers get
755  * modified.  The caller must call hammer_modify_*() on a referenced
756  * HAMMER structure prior to modifying its on-disk data.
757  *
758  * Any intent to modify an IO buffer acquires the related bp and imposes
759  * various write ordering dependancies.
760  */
761 
762 /*
763  * Mark a HAMMER structure as undergoing modification.  Meta-data buffers
764  * are locked until the flusher can deal with them, pure data buffers
765  * can be written out.
766  *
767  * The referenced io prevents races.
768  */
769 static
770 void
771 hammer_io_modify(hammer_io_t io, int count)
772 {
773 	/*
774 	 * io->modify_refs must be >= 0
775 	 */
776 	while (io->modify_refs < 0) {
777 		io->waitmod = 1;
778 		tsleep(io, 0, "hmrmod", 0);
779 	}
780 
781 	/*
782 	 * Shortcut if nothing to do.
783 	 */
784 	KKASSERT(hammer_isactive(&io->lock) && io->bp != NULL);
785 	io->modify_refs += count;
786 	if (io->modified && io->released == 0)
787 		return;
788 
789 	/*
790 	 * NOTE: It is important not to set the modified bit
791 	 *	 until after we have acquired the bp or we risk
792 	 *	 racing against checkwrite.
793 	 */
794 	hammer_lock_ex(&io->lock);
795 	if (io->released) {
796 		regetblk(io->bp);
797 		BUF_KERNPROC(io->bp);
798 		io->released = 0;
799 	}
800 	if (io->modified == 0) {
801 		hammer_io_set_modlist(io);
802 		io->modified = 1;
803 	}
804 	hammer_unlock(&io->lock);
805 }
806 
807 static __inline
808 void
809 hammer_io_modify_done(hammer_io_t io)
810 {
811 	KKASSERT(io->modify_refs > 0);
812 	--io->modify_refs;
813 	if (io->modify_refs == 0 && io->waitmod) {
814 		io->waitmod = 0;
815 		wakeup(io);
816 	}
817 }
818 
819 /*
820  * The write interlock blocks other threads trying to modify a buffer
821  * (they block in hammer_io_modify()) after us, or blocks us while other
822  * threads are in the middle of modifying a buffer.
823  *
824  * The caller also has a ref on the io, however if we are not careful
825  * we will race bioops callbacks (checkwrite).  To deal with this
826  * we must at least acquire and release the io_token, and it is probably
827  * better to hold it through the setting of modify_refs.
828  */
829 void
830 hammer_io_write_interlock(hammer_io_t io)
831 {
832 	hammer_mount_t hmp = io->hmp;
833 
834 	lwkt_gettoken(&hmp->io_token);
835 	while (io->modify_refs != 0) {
836 		io->waitmod = 1;
837 		tsleep(io, 0, "hmrmod", 0);
838 	}
839 	io->modify_refs = -1;
840 	lwkt_reltoken(&hmp->io_token);
841 }
842 
843 void
844 hammer_io_done_interlock(hammer_io_t io)
845 {
846 	KKASSERT(io->modify_refs == -1);
847 	io->modify_refs = 0;
848 	if (io->waitmod) {
849 		io->waitmod = 0;
850 		wakeup(io);
851 	}
852 }
853 
854 /*
855  * Caller intends to modify a volume's ondisk structure.
856  *
857  * This is only allowed if we are the flusher or we have a ref on the
858  * sync_lock.
859  */
860 void
861 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
862 		     void *base, int len)
863 {
864 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
865 
866 	hammer_io_modify(&volume->io, 1);
867 	if (len) {
868 		intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
869 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
870 		hammer_generate_undo(trans,
871 			 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
872 			 base, len);
873 	}
874 }
875 
876 /*
877  * Caller intends to modify a buffer's ondisk structure.
878  *
879  * This is only allowed if we are the flusher or we have a ref on the
880  * sync_lock.
881  */
882 void
883 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
884 		     void *base, int len)
885 {
886 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
887 
888 	hammer_io_modify(&buffer->io, 1);
889 	if (len) {
890 		intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
891 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
892 		hammer_generate_undo(trans,
893 				     buffer->zone2_offset + rel_offset,
894 				     base, len);
895 	}
896 }
897 
898 void
899 hammer_modify_volume_done(hammer_volume_t volume)
900 {
901 	hammer_io_modify_done(&volume->io);
902 }
903 
904 void
905 hammer_modify_buffer_done(hammer_buffer_t buffer)
906 {
907 	hammer_io_modify_done(&buffer->io);
908 }
909 
910 /*
911  * Mark an entity as not being dirty any more and finalize any
912  * delayed adjustments to the buffer.
913  *
914  * Delayed adjustments are an important performance enhancement, allowing
915  * us to avoid recalculating B-Tree node CRCs over and over again when
916  * making bulk-modifications to the B-Tree.
917  *
918  * If inval is non-zero delayed adjustments are ignored.
919  *
920  * This routine may dereference related btree nodes and cause the
921  * buffer to be dereferenced.  The caller must own a reference on io.
922  */
923 void
924 hammer_io_clear_modify(struct hammer_io *io, int inval)
925 {
926 	hammer_mount_t hmp;
927 
928 	/*
929 	 * io_token is needed to avoid races on mod_root
930 	 */
931 	if (io->modified == 0)
932 		return;
933 	hmp = io->hmp;
934 	lwkt_gettoken(&hmp->io_token);
935 	if (io->modified == 0) {
936 		lwkt_reltoken(&hmp->io_token);
937 		return;
938 	}
939 
940 	/*
941 	 * Take us off the mod-list and clear the modified bit.
942 	 */
943 	KKASSERT(io->mod_root != NULL);
944 	if (io->mod_root == &io->hmp->volu_root ||
945 	    io->mod_root == &io->hmp->meta_root) {
946 		io->hmp->locked_dirty_space -= io->bytes;
947 		atomic_add_long(&hammer_count_dirtybufspace, -io->bytes);
948 	}
949 	RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
950 	io->mod_root = NULL;
951 	io->modified = 0;
952 
953 	lwkt_reltoken(&hmp->io_token);
954 
955 	/*
956 	 * If this bit is not set there are no delayed adjustments.
957 	 */
958 	if (io->gencrc == 0)
959 		return;
960 	io->gencrc = 0;
961 
962 	/*
963 	 * Finalize requested CRCs.  The NEEDSCRC flag also holds a reference
964 	 * on the node (& underlying buffer).  Release the node after clearing
965 	 * the flag.
966 	 */
967 	if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
968 		hammer_buffer_t buffer = (void *)io;
969 		hammer_node_t node;
970 
971 restart:
972 		TAILQ_FOREACH(node, &buffer->clist, entry) {
973 			if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
974 				continue;
975 			node->flags &= ~HAMMER_NODE_NEEDSCRC;
976 			KKASSERT(node->ondisk);
977 			if (inval == 0)
978 				node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
979 			hammer_rel_node(node);
980 			goto restart;
981 		}
982 	}
983 	/* caller must still have ref on io */
984 	KKASSERT(hammer_isactive(&io->lock));
985 }
986 
987 /*
988  * Clear the IO's modify list.  Even though the IO is no longer modified
989  * it may still be on the lose_root.  This routine is called just before
990  * the governing hammer_buffer is destroyed.
991  *
992  * mod_root requires io_token protection.
993  */
994 void
995 hammer_io_clear_modlist(struct hammer_io *io)
996 {
997 	hammer_mount_t hmp = io->hmp;
998 
999 	KKASSERT(io->modified == 0);
1000 	if (io->mod_root) {
1001 		lwkt_gettoken(&hmp->io_token);
1002 		if (io->mod_root) {
1003 			KKASSERT(io->mod_root == &io->hmp->lose_root);
1004 			RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
1005 			io->mod_root = NULL;
1006 		}
1007 		lwkt_reltoken(&hmp->io_token);
1008 	}
1009 }
1010 
1011 static void
1012 hammer_io_set_modlist(struct hammer_io *io)
1013 {
1014 	struct hammer_mount *hmp = io->hmp;
1015 
1016 	lwkt_gettoken(&hmp->io_token);
1017 	KKASSERT(io->mod_root == NULL);
1018 
1019 	switch(io->type) {
1020 	case HAMMER_STRUCTURE_VOLUME:
1021 		io->mod_root = &hmp->volu_root;
1022 		hmp->locked_dirty_space += io->bytes;
1023 		atomic_add_long(&hammer_count_dirtybufspace, io->bytes);
1024 		break;
1025 	case HAMMER_STRUCTURE_META_BUFFER:
1026 		io->mod_root = &hmp->meta_root;
1027 		hmp->locked_dirty_space += io->bytes;
1028 		atomic_add_long(&hammer_count_dirtybufspace, io->bytes);
1029 		break;
1030 	case HAMMER_STRUCTURE_UNDO_BUFFER:
1031 		io->mod_root = &hmp->undo_root;
1032 		break;
1033 	case HAMMER_STRUCTURE_DATA_BUFFER:
1034 		io->mod_root = &hmp->data_root;
1035 		break;
1036 	case HAMMER_STRUCTURE_DUMMY:
1037 		panic("hammer_io_set_modlist: bad io type");
1038 		break; /* NOT REACHED */
1039 	}
1040 	if (RB_INSERT(hammer_mod_rb_tree, io->mod_root, io)) {
1041 		panic("hammer_io_set_modlist: duplicate entry");
1042 		/* NOT REACHED */
1043 	}
1044 	lwkt_reltoken(&hmp->io_token);
1045 }
1046 
1047 /************************************************************************
1048  *				HAMMER_BIOOPS				*
1049  ************************************************************************
1050  *
1051  */
1052 
1053 /*
1054  * Pre-IO initiation kernel callback - cluster build only
1055  *
1056  * bioops callback - hold io_token
1057  */
1058 static void
1059 hammer_io_start(struct buf *bp)
1060 {
1061 	/* nothing to do, so io_token not needed */
1062 }
1063 
1064 /*
1065  * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
1066  *
1067  * NOTE: HAMMER may modify a data buffer after we have initiated write
1068  *	 I/O.
1069  *
1070  * NOTE: MPSAFE callback
1071  *
1072  * bioops callback - hold io_token
1073  */
1074 static void
1075 hammer_io_complete(struct buf *bp)
1076 {
1077 	union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
1078 	struct hammer_mount *hmp = iou->io.hmp;
1079 	struct hammer_io *ionext;
1080 
1081 	lwkt_gettoken(&hmp->io_token);
1082 
1083 	KKASSERT(iou->io.released == 1);
1084 
1085 	/*
1086 	 * Deal with people waiting for I/O to drain
1087 	 */
1088 	if (iou->io.running) {
1089 		/*
1090 		 * Deal with critical write errors.  Once a critical error
1091 		 * has been flagged in hmp the UNDO FIFO will not be updated.
1092 		 * That way crash recover will give us a consistent
1093 		 * filesystem.
1094 		 *
1095 		 * Because of this we can throw away failed UNDO buffers.  If
1096 		 * we throw away META or DATA buffers we risk corrupting
1097 		 * the now read-only version of the filesystem visible to
1098 		 * the user.  Clear B_ERROR so the buffer is not re-dirtied
1099 		 * by the kernel and ref the io so it doesn't get thrown
1100 		 * away.
1101 		 */
1102 		if (bp->b_flags & B_ERROR) {
1103 			lwkt_gettoken(&hmp->fs_token);
1104 			hammer_critical_error(hmp, NULL, bp->b_error,
1105 					      "while flushing meta-data");
1106 			lwkt_reltoken(&hmp->fs_token);
1107 
1108 			switch(iou->io.type) {
1109 			case HAMMER_STRUCTURE_UNDO_BUFFER:
1110 				break;
1111 			default:
1112 				if (iou->io.ioerror == 0) {
1113 					iou->io.ioerror = 1;
1114 					hammer_ref(&iou->io.lock);
1115 				}
1116 				break;
1117 			}
1118 			bp->b_flags &= ~B_ERROR;
1119 			bundirty(bp);
1120 #if 0
1121 			hammer_io_set_modlist(&iou->io);
1122 			iou->io.modified = 1;
1123 #endif
1124 		}
1125 		hammer_stats_disk_write += iou->io.bytes;
1126 		atomic_add_long(&hammer_count_io_running_write, -iou->io.bytes);
1127 		atomic_add_long(&hmp->io_running_space, -iou->io.bytes);
1128 		KKASSERT(hmp->io_running_space >= 0);
1129 		iou->io.running = 0;
1130 
1131 		/*
1132 		 * Remove from iorun list and wakeup any multi-io waiter(s).
1133 		 */
1134 		if (TAILQ_FIRST(&hmp->iorun_list) == &iou->io) {
1135 			ionext = TAILQ_NEXT(&iou->io, iorun_entry);
1136 			if (ionext && ionext->type == HAMMER_STRUCTURE_DUMMY)
1137 				wakeup(ionext);
1138 		}
1139 		TAILQ_REMOVE(&hmp->iorun_list, &iou->io, iorun_entry);
1140 	} else {
1141 		hammer_stats_disk_read += iou->io.bytes;
1142 	}
1143 
1144 	if (iou->io.waiting) {
1145 		iou->io.waiting = 0;
1146 		wakeup(iou);
1147 	}
1148 
1149 	/*
1150 	 * If B_LOCKED is set someone wanted to deallocate the bp at some
1151 	 * point, try to do it now.  The operation will fail if there are
1152 	 * refs or if hammer_io_deallocate() is unable to gain the
1153 	 * interlock.
1154 	 */
1155 	if (bp->b_flags & B_LOCKED) {
1156 		atomic_add_int(&hammer_count_io_locked, -1);
1157 		bp->b_flags &= ~B_LOCKED;
1158 		hammer_io_deallocate(bp);
1159 		/* structure may be dead now */
1160 	}
1161 	lwkt_reltoken(&hmp->io_token);
1162 }
1163 
1164 /*
1165  * Callback from kernel when it wishes to deallocate a passively
1166  * associated structure.  This mostly occurs with clean buffers
1167  * but it may be possible for a holding structure to be marked dirty
1168  * while its buffer is passively associated.  The caller owns the bp.
1169  *
1170  * If we cannot disassociate we set B_LOCKED to prevent the buffer
1171  * from getting reused.
1172  *
1173  * WARNING: Because this can be called directly by getnewbuf we cannot
1174  * recurse into the tree.  If a bp cannot be immediately disassociated
1175  * our only recourse is to set B_LOCKED.
1176  *
1177  * WARNING: This may be called from an interrupt via hammer_io_complete()
1178  *
1179  * bioops callback - hold io_token
1180  */
1181 static void
1182 hammer_io_deallocate(struct buf *bp)
1183 {
1184 	hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
1185 	hammer_mount_t hmp;
1186 
1187 	hmp = iou->io.hmp;
1188 
1189 	lwkt_gettoken(&hmp->io_token);
1190 
1191 	KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
1192 	if (hammer_try_interlock_norefs(&iou->io.lock) == 0) {
1193 		/*
1194 		 * We cannot safely disassociate a bp from a referenced
1195 		 * or interlocked HAMMER structure.
1196 		 */
1197 		bp->b_flags |= B_LOCKED;
1198 		atomic_add_int(&hammer_count_io_locked, 1);
1199 	} else if (iou->io.modified) {
1200 		/*
1201 		 * It is not legal to disassociate a modified buffer.  This
1202 		 * case really shouldn't ever occur.
1203 		 */
1204 		bp->b_flags |= B_LOCKED;
1205 		atomic_add_int(&hammer_count_io_locked, 1);
1206 		hammer_put_interlock(&iou->io.lock, 0);
1207 	} else {
1208 		/*
1209 		 * Disassociate the BP.  If the io has no refs left we
1210 		 * have to add it to the loose list.  The kernel has
1211 		 * locked the buffer and therefore our io must be
1212 		 * in a released state.
1213 		 */
1214 		hammer_io_disassociate(iou);
1215 		if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
1216 			KKASSERT(iou->io.bp == NULL);
1217 			KKASSERT(iou->io.mod_root == NULL);
1218 			iou->io.mod_root = &hmp->lose_root;
1219 			if (RB_INSERT(hammer_mod_rb_tree, iou->io.mod_root,
1220 				      &iou->io)) {
1221 				panic("hammer_io_deallocate: duplicate entry");
1222 			}
1223 		}
1224 		hammer_put_interlock(&iou->io.lock, 1);
1225 	}
1226 	lwkt_reltoken(&hmp->io_token);
1227 }
1228 
1229 /*
1230  * bioops callback - hold io_token
1231  */
1232 static int
1233 hammer_io_fsync(struct vnode *vp)
1234 {
1235 	/* nothing to do, so io_token not needed */
1236 	return(0);
1237 }
1238 
1239 /*
1240  * NOTE: will not be called unless we tell the kernel about the
1241  * bioops.  Unused... we use the mount's VFS_SYNC instead.
1242  *
1243  * bioops callback - hold io_token
1244  */
1245 static int
1246 hammer_io_sync(struct mount *mp)
1247 {
1248 	/* nothing to do, so io_token not needed */
1249 	return(0);
1250 }
1251 
1252 /*
1253  * bioops callback - hold io_token
1254  */
1255 static void
1256 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
1257 {
1258 	/* nothing to do, so io_token not needed */
1259 }
1260 
1261 /*
1262  * I/O pre-check for reading and writing.  HAMMER only uses this for
1263  * B_CACHE buffers so checkread just shouldn't happen, but if it does
1264  * allow it.
1265  *
1266  * Writing is a different case.  We don't want the kernel to try to write
1267  * out a buffer that HAMMER may be modifying passively or which has a
1268  * dependancy.  In addition, kernel-demanded writes can only proceed for
1269  * certain types of buffers (i.e. UNDO and DATA types).  Other dirty
1270  * buffer types can only be explicitly written by the flusher.
1271  *
1272  * checkwrite will only be called for bdwrite()n buffers.  If we return
1273  * success the kernel is guaranteed to initiate the buffer write.
1274  *
1275  * bioops callback - hold io_token
1276  */
1277 static int
1278 hammer_io_checkread(struct buf *bp)
1279 {
1280 	/* nothing to do, so io_token not needed */
1281 	return(0);
1282 }
1283 
1284 /*
1285  * The kernel is asking us whether it can write out a dirty buffer or not.
1286  *
1287  * bioops callback - hold io_token
1288  */
1289 static int
1290 hammer_io_checkwrite(struct buf *bp)
1291 {
1292 	hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
1293 	hammer_mount_t hmp = io->hmp;
1294 
1295 	/*
1296 	 * This shouldn't happen under normal operation.
1297 	 */
1298 	lwkt_gettoken(&hmp->io_token);
1299 	if (io->type == HAMMER_STRUCTURE_VOLUME ||
1300 	    io->type == HAMMER_STRUCTURE_META_BUFFER) {
1301 		if (!panicstr)
1302 			panic("hammer_io_checkwrite: illegal buffer");
1303 		if ((bp->b_flags & B_LOCKED) == 0) {
1304 			bp->b_flags |= B_LOCKED;
1305 			atomic_add_int(&hammer_count_io_locked, 1);
1306 		}
1307 		lwkt_reltoken(&hmp->io_token);
1308 		return(1);
1309 	}
1310 
1311 	/*
1312 	 * We have to be able to interlock the IO to safely modify any
1313 	 * of its fields without holding the fs_token.  If we can't lock
1314 	 * it then we are racing someone.
1315 	 *
1316 	 * Our ownership of the bp lock prevents the io from being ripped
1317 	 * out from under us.
1318 	 */
1319 	if (hammer_try_interlock_norefs(&io->lock) == 0) {
1320 		bp->b_flags |= B_LOCKED;
1321 		atomic_add_int(&hammer_count_io_locked, 1);
1322 		lwkt_reltoken(&hmp->io_token);
1323 		return(1);
1324 	}
1325 
1326 	/*
1327 	 * The modified bit must be cleared prior to the initiation of
1328 	 * any IO (returning 0 initiates the IO).  Because this is a
1329 	 * normal data buffer hammer_io_clear_modify() runs through a
1330 	 * simple degenerate case.
1331 	 *
1332 	 * Return 0 will cause the kernel to initiate the IO, and we
1333 	 * must normally clear the modified bit before we begin.  If
1334 	 * the io has modify_refs we do not clear the modified bit,
1335 	 * otherwise we may miss changes.
1336 	 *
1337 	 * Only data and undo buffers can reach here.  These buffers do
1338 	 * not have terminal crc functions but we temporarily reference
1339 	 * the IO anyway, just in case.
1340 	 */
1341 	if (io->modify_refs == 0 && io->modified) {
1342 		hammer_ref(&io->lock);
1343 		hammer_io_clear_modify(io, 0);
1344 		hammer_rel(&io->lock);
1345 	} else if (io->modified) {
1346 		KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
1347 	}
1348 
1349 	/*
1350 	 * The kernel is going to start the IO, set io->running.
1351 	 */
1352 	KKASSERT(io->running == 0);
1353 	io->running = 1;
1354 	atomic_add_long(&io->hmp->io_running_space, io->bytes);
1355 	atomic_add_long(&hammer_count_io_running_write, io->bytes);
1356 	TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
1357 
1358 	hammer_put_interlock(&io->lock, 1);
1359 	lwkt_reltoken(&hmp->io_token);
1360 
1361 	return(0);
1362 }
1363 
1364 /*
1365  * Return non-zero if we wish to delay the kernel's attempt to flush
1366  * this buffer to disk.
1367  *
1368  * bioops callback - hold io_token
1369  */
1370 static int
1371 hammer_io_countdeps(struct buf *bp, int n)
1372 {
1373 	/* nothing to do, so io_token not needed */
1374 	return(0);
1375 }
1376 
1377 struct bio_ops hammer_bioops = {
1378 	.io_start	= hammer_io_start,
1379 	.io_complete	= hammer_io_complete,
1380 	.io_deallocate	= hammer_io_deallocate,
1381 	.io_fsync	= hammer_io_fsync,
1382 	.io_sync	= hammer_io_sync,
1383 	.io_movedeps	= hammer_io_movedeps,
1384 	.io_countdeps	= hammer_io_countdeps,
1385 	.io_checkread	= hammer_io_checkread,
1386 	.io_checkwrite	= hammer_io_checkwrite,
1387 };
1388 
1389 /************************************************************************
1390  *				DIRECT IO OPS 				*
1391  ************************************************************************
1392  *
1393  * These functions operate directly on the buffer cache buffer associated
1394  * with a front-end vnode rather then a back-end device vnode.
1395  */
1396 
1397 /*
1398  * Read a buffer associated with a front-end vnode directly from the
1399  * disk media.  The bio may be issued asynchronously.  If leaf is non-NULL
1400  * we validate the CRC.
1401  *
1402  * We must check for the presence of a HAMMER buffer to handle the case
1403  * where the reblocker has rewritten the data (which it does via the HAMMER
1404  * buffer system, not via the high-level vnode buffer cache), but not yet
1405  * committed the buffer to the media.
1406  */
1407 int
1408 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1409 		      hammer_btree_leaf_elm_t leaf)
1410 {
1411 	hammer_off_t buf_offset;
1412 	hammer_off_t zone2_offset;
1413 	hammer_volume_t volume;
1414 	struct buf *bp;
1415 	struct bio *nbio;
1416 	int vol_no;
1417 	int error;
1418 
1419 	buf_offset = bio->bio_offset;
1420 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1421 		 HAMMER_ZONE_LARGE_DATA);
1422 
1423 	/*
1424 	 * The buffer cache may have an aliased buffer (the reblocker can
1425 	 * write them).  If it does we have to sync any dirty data before
1426 	 * we can build our direct-read.  This is a non-critical code path.
1427 	 */
1428 	bp = bio->bio_buf;
1429 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1430 
1431 	/*
1432 	 * Resolve to a zone-2 offset.  The conversion just requires
1433 	 * munging the top 4 bits but we want to abstract it anyway
1434 	 * so the blockmap code can verify the zone assignment.
1435 	 */
1436 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1437 	if (error)
1438 		goto done;
1439 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1440 		 HAMMER_ZONE_RAW_BUFFER);
1441 
1442 	/*
1443 	 * Resolve volume and raw-offset for 3rd level bio.  The
1444 	 * offset will be specific to the volume.
1445 	 */
1446 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
1447 	volume = hammer_get_volume(hmp, vol_no, &error);
1448 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
1449 		error = EIO;
1450 
1451 	if (error == 0) {
1452 		/*
1453 		 * 3rd level bio
1454 		 */
1455 		nbio = push_bio(bio);
1456 		nbio->bio_offset = volume->ondisk->vol_buf_beg +
1457 				   (zone2_offset & HAMMER_OFF_SHORT_MASK);
1458 #if 0
1459 		/*
1460 		 * XXX disabled - our CRC check doesn't work if the OS
1461 		 * does bogus_page replacement on the direct-read.
1462 		 */
1463 		if (leaf && hammer_verify_data) {
1464 			nbio->bio_done = hammer_io_direct_read_complete;
1465 			nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1466 		}
1467 #endif
1468 		hammer_stats_disk_read += bp->b_bufsize;
1469 		vn_strategy(volume->devvp, nbio);
1470 	}
1471 	hammer_rel_volume(volume, 0);
1472 done:
1473 	if (error) {
1474 		kprintf("hammer_direct_read: failed @ %016llx\n",
1475 			(long long)zone2_offset);
1476 		bp->b_error = error;
1477 		bp->b_flags |= B_ERROR;
1478 		biodone(bio);
1479 	}
1480 	return(error);
1481 }
1482 
1483 /*
1484  * This works similarly to hammer_io_direct_read() except instead of
1485  * directly reading from the device into the bio we instead indirectly
1486  * read through the device's buffer cache and then copy the data into
1487  * the bio.
1488  *
1489  * If leaf is non-NULL and validation is enabled, the CRC will be checked.
1490  *
1491  * This routine also executes asynchronously.  It allows hammer strategy
1492  * calls to operate asynchronously when in double_buffer mode (in addition
1493  * to operating asynchronously when in normal mode).
1494  */
1495 int
1496 hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio,
1497 			hammer_btree_leaf_elm_t leaf)
1498 {
1499 	hammer_off_t buf_offset;
1500 	hammer_off_t zone2_offset;
1501 	hammer_volume_t volume;
1502 	struct buf *bp;
1503 	int vol_no;
1504 	int error;
1505 
1506 	buf_offset = bio->bio_offset;
1507 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1508 		 HAMMER_ZONE_LARGE_DATA);
1509 
1510 	/*
1511 	 * The buffer cache may have an aliased buffer (the reblocker can
1512 	 * write them).  If it does we have to sync any dirty data before
1513 	 * we can build our direct-read.  This is a non-critical code path.
1514 	 */
1515 	bp = bio->bio_buf;
1516 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1517 
1518 	/*
1519 	 * Resolve to a zone-2 offset.  The conversion just requires
1520 	 * munging the top 4 bits but we want to abstract it anyway
1521 	 * so the blockmap code can verify the zone assignment.
1522 	 */
1523 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1524 	if (error)
1525 		goto done;
1526 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1527 		 HAMMER_ZONE_RAW_BUFFER);
1528 
1529 	/*
1530 	 * Resolve volume and raw-offset for 3rd level bio.  The
1531 	 * offset will be specific to the volume.
1532 	 */
1533 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
1534 	volume = hammer_get_volume(hmp, vol_no, &error);
1535 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
1536 		error = EIO;
1537 
1538 	if (error == 0) {
1539 		/*
1540 		 * Convert to the raw volume->devvp offset and acquire
1541 		 * the buf, issuing async I/O if necessary.
1542 		 */
1543 		buf_offset = volume->ondisk->vol_buf_beg +
1544 			     (zone2_offset & HAMMER_OFF_SHORT_MASK);
1545 
1546 		if (leaf && hammer_verify_data) {
1547 			bio->bio_caller_info1.uvalue32 = leaf->data_crc;
1548 			bio->bio_caller_info2.index = 1;
1549 		} else {
1550 			bio->bio_caller_info2.index = 0;
1551 		}
1552 		breadcb(volume->devvp, buf_offset, bp->b_bufsize,
1553 			hammer_indirect_callback, bio);
1554 	}
1555 	hammer_rel_volume(volume, 0);
1556 done:
1557 	if (error) {
1558 		kprintf("hammer_direct_read: failed @ %016llx\n",
1559 			(long long)zone2_offset);
1560 		bp->b_error = error;
1561 		bp->b_flags |= B_ERROR;
1562 		biodone(bio);
1563 	}
1564 	return(error);
1565 }
1566 
1567 /*
1568  * Indirect callback on completion.  bio/bp specify the device-backed
1569  * buffer.  bio->bio_caller_info1.ptr holds obio.
1570  *
1571  * obio/obp is the original regular file buffer.  obio->bio_caller_info*
1572  * contains the crc specification.
1573  *
1574  * We are responsible for calling bpdone() and bqrelse() on bio/bp, and
1575  * for calling biodone() on obio.
1576  */
1577 static void
1578 hammer_indirect_callback(struct bio *bio)
1579 {
1580 	struct buf *bp = bio->bio_buf;
1581 	struct buf *obp;
1582 	struct bio *obio;
1583 
1584 	/*
1585 	 * If BIO_DONE is already set the device buffer was already
1586 	 * fully valid (B_CACHE).  If it is not set then I/O was issued
1587 	 * and we have to run I/O completion as the last bio.
1588 	 *
1589 	 * Nobody is waiting for our device I/O to complete, we are
1590 	 * responsible for bqrelse()ing it which means we also have to do
1591 	 * the equivalent of biowait() and clear BIO_DONE (which breadcb()
1592 	 * may have set).
1593 	 *
1594 	 * Any preexisting device buffer should match the requested size,
1595 	 * but due to bigblock recycling and other factors there is some
1596 	 * fragility there, so we assert that the device buffer covers
1597 	 * the request.
1598 	 */
1599 	if ((bio->bio_flags & BIO_DONE) == 0)
1600 		bpdone(bp, 0);
1601 	bio->bio_flags &= ~(BIO_DONE | BIO_SYNC);
1602 
1603 	obio = bio->bio_caller_info1.ptr;
1604 	obp = obio->bio_buf;
1605 
1606 	if (bp->b_flags & B_ERROR) {
1607 		obp->b_flags |= B_ERROR;
1608 		obp->b_error = bp->b_error;
1609 	} else if (obio->bio_caller_info2.index &&
1610 		   obio->bio_caller_info1.uvalue32 !=
1611 		    crc32(bp->b_data, bp->b_bufsize)) {
1612 		obp->b_flags |= B_ERROR;
1613 		obp->b_error = EIO;
1614 	} else {
1615 		KKASSERT(bp->b_bufsize >= obp->b_bufsize);
1616 		bcopy(bp->b_data, obp->b_data, obp->b_bufsize);
1617 		obp->b_resid = 0;
1618 		obp->b_flags |= B_AGE;
1619 	}
1620 	biodone(obio);
1621 	bqrelse(bp);
1622 }
1623 
1624 #if 0
1625 /*
1626  * On completion of the BIO this callback must check the data CRC
1627  * and chain to the previous bio.
1628  *
1629  * MPSAFE - since we do not modify and hammer_records we do not need
1630  *	    io_token.
1631  *
1632  * NOTE: MPSAFE callback
1633  */
1634 static
1635 void
1636 hammer_io_direct_read_complete(struct bio *nbio)
1637 {
1638 	struct bio *obio;
1639 	struct buf *bp;
1640 	u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1641 
1642 	bp = nbio->bio_buf;
1643 	if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1644 		kprintf("HAMMER: data_crc error @%016llx/%d\n",
1645 			nbio->bio_offset, bp->b_bufsize);
1646 		if (hammer_debug_critical)
1647 			Debugger("data_crc on read");
1648 		bp->b_flags |= B_ERROR;
1649 		bp->b_error = EIO;
1650 	}
1651 	obio = pop_bio(nbio);
1652 	biodone(obio);
1653 }
1654 #endif
1655 
1656 /*
1657  * Write a buffer associated with a front-end vnode directly to the
1658  * disk media.  The bio may be issued asynchronously.
1659  *
1660  * The BIO is associated with the specified record and RECG_DIRECT_IO
1661  * is set.  The recorded is added to its object.
1662  */
1663 int
1664 hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1665 		       hammer_record_t record)
1666 {
1667 	hammer_btree_leaf_elm_t leaf = &record->leaf;
1668 	hammer_off_t buf_offset;
1669 	hammer_off_t zone2_offset;
1670 	hammer_volume_t volume;
1671 	hammer_buffer_t buffer;
1672 	struct buf *bp;
1673 	struct bio *nbio;
1674 	char *ptr;
1675 	int vol_no;
1676 	int error;
1677 
1678 	buf_offset = leaf->data_offset;
1679 
1680 	KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1681 	KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1682 
1683 	/*
1684 	 * Issue or execute the I/O.  The new memory record must replace
1685 	 * the old one before the I/O completes, otherwise a reaquisition of
1686 	 * the buffer will load the old media data instead of the new.
1687 	 */
1688 	if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1689 	    leaf->data_len >= HAMMER_BUFSIZE) {
1690 		/*
1691 		 * We are using the vnode's bio to write directly to the
1692 		 * media, any hammer_buffer at the same zone-X offset will
1693 		 * now have stale data.
1694 		 */
1695 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1696 		vol_no = HAMMER_VOL_DECODE(zone2_offset);
1697 		volume = hammer_get_volume(hmp, vol_no, &error);
1698 
1699 		if (error == 0 && zone2_offset >= volume->maxbuf_off)
1700 			error = EIO;
1701 		if (error == 0) {
1702 			bp = bio->bio_buf;
1703 			KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1704 			/*
1705 			hammer_del_buffers(hmp, buf_offset,
1706 					   zone2_offset, bp->b_bufsize);
1707 			*/
1708 
1709 			/*
1710 			 * Second level bio - cached zone2 offset.
1711 			 *
1712 			 * (We can put our bio_done function in either the
1713 			 *  2nd or 3rd level).
1714 			 */
1715 			nbio = push_bio(bio);
1716 			nbio->bio_offset = zone2_offset;
1717 			nbio->bio_done = hammer_io_direct_write_complete;
1718 			nbio->bio_caller_info1.ptr = record;
1719 			record->zone2_offset = zone2_offset;
1720 			record->gflags |= HAMMER_RECG_DIRECT_IO |
1721 					 HAMMER_RECG_DIRECT_INVAL;
1722 
1723 			/*
1724 			 * Third level bio - raw offset specific to the
1725 			 * correct volume.
1726 			 */
1727 			zone2_offset &= HAMMER_OFF_SHORT_MASK;
1728 			nbio = push_bio(nbio);
1729 			nbio->bio_offset = volume->ondisk->vol_buf_beg +
1730 					   zone2_offset;
1731 			hammer_stats_disk_write += bp->b_bufsize;
1732 			hammer_ip_replace_bulk(hmp, record);
1733 			vn_strategy(volume->devvp, nbio);
1734 			hammer_io_flush_mark(volume);
1735 		}
1736 		hammer_rel_volume(volume, 0);
1737 	} else {
1738 		/*
1739 		 * Must fit in a standard HAMMER buffer.  In this case all
1740 		 * consumers use the HAMMER buffer system and RECG_DIRECT_IO
1741 		 * does not need to be set-up.
1742 		 */
1743 		KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1744 		buffer = NULL;
1745 		ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1746 		if (error == 0) {
1747 			bp = bio->bio_buf;
1748 			bp->b_flags |= B_AGE;
1749 			hammer_io_modify(&buffer->io, 1);
1750 			bcopy(bp->b_data, ptr, leaf->data_len);
1751 			hammer_io_modify_done(&buffer->io);
1752 			hammer_rel_buffer(buffer, 0);
1753 			bp->b_resid = 0;
1754 			hammer_ip_replace_bulk(hmp, record);
1755 			biodone(bio);
1756 		}
1757 	}
1758 	if (error) {
1759 		/*
1760 		 * Major suckage occured.  Also note:  The record was
1761 		 * never added to the tree so we do not have to worry
1762 		 * about the backend.
1763 		 */
1764 		kprintf("hammer_direct_write: failed @ %016llx\n",
1765 			(long long)leaf->data_offset);
1766 		bp = bio->bio_buf;
1767 		bp->b_resid = 0;
1768 		bp->b_error = EIO;
1769 		bp->b_flags |= B_ERROR;
1770 		biodone(bio);
1771 		record->flags |= HAMMER_RECF_DELETED_FE;
1772 		hammer_rel_mem_record(record);
1773 	}
1774 	return(error);
1775 }
1776 
1777 /*
1778  * On completion of the BIO this callback must disconnect
1779  * it from the hammer_record and chain to the previous bio.
1780  *
1781  * An I/O error forces the mount to read-only.  Data buffers
1782  * are not B_LOCKED like meta-data buffers are, so we have to
1783  * throw the buffer away to prevent the kernel from retrying.
1784  *
1785  * NOTE: MPSAFE callback, only modify fields we have explicit
1786  *	 access to (the bp and the record->gflags).
1787  */
1788 static
1789 void
1790 hammer_io_direct_write_complete(struct bio *nbio)
1791 {
1792 	struct bio *obio;
1793 	struct buf *bp;
1794 	hammer_record_t record;
1795 	hammer_mount_t hmp;
1796 
1797 	record = nbio->bio_caller_info1.ptr;
1798 	KKASSERT(record != NULL);
1799 	hmp = record->ip->hmp;
1800 
1801 	lwkt_gettoken(&hmp->io_token);
1802 
1803 	bp = nbio->bio_buf;
1804 	obio = pop_bio(nbio);
1805 	if (bp->b_flags & B_ERROR) {
1806 		lwkt_gettoken(&hmp->fs_token);
1807 		hammer_critical_error(hmp, record->ip,
1808 				      bp->b_error,
1809 				      "while writing bulk data");
1810 		lwkt_reltoken(&hmp->fs_token);
1811 		bp->b_flags |= B_INVAL;
1812 	}
1813 	biodone(obio);
1814 
1815 	KKASSERT(record->gflags & HAMMER_RECG_DIRECT_IO);
1816 	if (record->gflags & HAMMER_RECG_DIRECT_WAIT) {
1817 		record->gflags &= ~(HAMMER_RECG_DIRECT_IO |
1818 				    HAMMER_RECG_DIRECT_WAIT);
1819 		/* record can disappear once DIRECT_IO flag is cleared */
1820 		wakeup(&record->flags);
1821 	} else {
1822 		record->gflags &= ~HAMMER_RECG_DIRECT_IO;
1823 		/* record can disappear once DIRECT_IO flag is cleared */
1824 	}
1825 	lwkt_reltoken(&hmp->io_token);
1826 }
1827 
1828 
1829 /*
1830  * This is called before a record is either committed to the B-Tree
1831  * or destroyed, to resolve any associated direct-IO.
1832  *
1833  * (1) We must wait for any direct-IO related to the record to complete.
1834  *
1835  * (2) We must remove any buffer cache aliases for data accessed via
1836  *     leaf->data_offset or zone2_offset so non-direct-IO consumers
1837  *     (the mirroring and reblocking code) do not see stale data.
1838  */
1839 void
1840 hammer_io_direct_wait(hammer_record_t record)
1841 {
1842 	hammer_mount_t hmp = record->ip->hmp;
1843 
1844 	/*
1845 	 * Wait for I/O to complete
1846 	 */
1847 	if (record->gflags & HAMMER_RECG_DIRECT_IO) {
1848 		lwkt_gettoken(&hmp->io_token);
1849 		while (record->gflags & HAMMER_RECG_DIRECT_IO) {
1850 			record->gflags |= HAMMER_RECG_DIRECT_WAIT;
1851 			tsleep(&record->flags, 0, "hmdiow", 0);
1852 		}
1853 		lwkt_reltoken(&hmp->io_token);
1854 	}
1855 
1856 	/*
1857 	 * Invalidate any related buffer cache aliases associated with the
1858 	 * backing device.  This is needed because the buffer cache buffer
1859 	 * for file data is associated with the file vnode, not the backing
1860 	 * device vnode.
1861 	 *
1862 	 * XXX I do not think this case can occur any more now that
1863 	 * reservations ensure that all such buffers are removed before
1864 	 * an area can be reused.
1865 	 */
1866 	if (record->gflags & HAMMER_RECG_DIRECT_INVAL) {
1867 		KKASSERT(record->leaf.data_offset);
1868 		hammer_del_buffers(hmp, record->leaf.data_offset,
1869 				   record->zone2_offset, record->leaf.data_len,
1870 				   1);
1871 		record->gflags &= ~HAMMER_RECG_DIRECT_INVAL;
1872 	}
1873 }
1874 
1875 /*
1876  * This is called to remove the second-level cached zone-2 offset from
1877  * frontend buffer cache buffers, now stale due to a data relocation.
1878  * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1879  * by hammer_vop_strategy_read().
1880  *
1881  * This is rather nasty because here we have something like the reblocker
1882  * scanning the raw B-Tree with no held references on anything, really,
1883  * other then a shared lock on the B-Tree node, and we have to access the
1884  * frontend's buffer cache to check for and clean out the association.
1885  * Specifically, if the reblocker is moving data on the disk, these cached
1886  * offsets will become invalid.
1887  *
1888  * Only data record types associated with the large-data zone are subject
1889  * to direct-io and need to be checked.
1890  *
1891  */
1892 void
1893 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1894 {
1895 	struct hammer_inode_info iinfo;
1896 	int zone;
1897 
1898 	if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1899 		return;
1900 	zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1901 	if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1902 		return;
1903 	iinfo.obj_id = leaf->base.obj_id;
1904 	iinfo.obj_asof = 0;	/* unused */
1905 	iinfo.obj_localization = leaf->base.localization &
1906 				 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1907 	iinfo.u.leaf = leaf;
1908 	hammer_scan_inode_snapshots(hmp, &iinfo,
1909 				    hammer_io_direct_uncache_callback,
1910 				    leaf);
1911 }
1912 
1913 static int
1914 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1915 {
1916 	hammer_inode_info_t iinfo = data;
1917 	hammer_off_t data_offset;
1918 	hammer_off_t file_offset;
1919 	struct vnode *vp;
1920 	struct buf *bp;
1921 	int blksize;
1922 
1923 	if (ip->vp == NULL)
1924 		return(0);
1925 	data_offset = iinfo->u.leaf->data_offset;
1926 	file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1927 	blksize = iinfo->u.leaf->data_len;
1928 	KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1929 
1930 	/*
1931 	 * Warning: FINDBLK_TEST return stable storage but not stable
1932 	 *	    contents.  It happens to be ok in this case.
1933 	 */
1934 	hammer_ref(&ip->lock);
1935 	if (hammer_get_vnode(ip, &vp) == 0) {
1936 		if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
1937 		    bp->b_bio2.bio_offset != NOOFFSET) {
1938 			bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1939 			bp->b_bio2.bio_offset = NOOFFSET;
1940 			brelse(bp);
1941 		}
1942 		vput(vp);
1943 	}
1944 	hammer_rel_inode(ip, 0);
1945 	return(0);
1946 }
1947 
1948 
1949 /*
1950  * This function is called when writes may have occured on the volume,
1951  * indicating that the device may be holding cached writes.
1952  */
1953 static void
1954 hammer_io_flush_mark(hammer_volume_t volume)
1955 {
1956 	atomic_set_int(&volume->vol_flags, HAMMER_VOLF_NEEDFLUSH);
1957 }
1958 
1959 /*
1960  * This function ensures that the device has flushed any cached writes out.
1961  */
1962 void
1963 hammer_io_flush_sync(hammer_mount_t hmp)
1964 {
1965 	hammer_volume_t volume;
1966 	struct buf *bp_base = NULL;
1967 	struct buf *bp;
1968 
1969 	RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1970 		if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1971 			atomic_clear_int(&volume->vol_flags,
1972 					 HAMMER_VOLF_NEEDFLUSH);
1973 			bp = getpbuf(NULL);
1974 			bp->b_bio1.bio_offset = 0;
1975 			bp->b_bufsize = 0;
1976 			bp->b_bcount = 0;
1977 			bp->b_cmd = BUF_CMD_FLUSH;
1978 			bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
1979 			bp->b_bio1.bio_done = biodone_sync;
1980 			bp->b_bio1.bio_flags |= BIO_SYNC;
1981 			bp_base = bp;
1982 			vn_strategy(volume->devvp, &bp->b_bio1);
1983 		}
1984 	}
1985 	while ((bp = bp_base) != NULL) {
1986 		bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
1987 		biowait(&bp->b_bio1, "hmrFLS");
1988 		relpbuf(bp, NULL);
1989 	}
1990 }
1991 
1992 /*
1993  * Limit the amount of backlog which we allow to build up
1994  */
1995 void
1996 hammer_io_limit_backlog(hammer_mount_t hmp)
1997 {
1998 	waitrunningbufspace();
1999 }
2000