xref: /dragonfly/sys/vfs/hammer/hammer_io.c (revision 2983445f)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * IO Primitives and buffer cache management
36  *
37  * All major data-tracking structures in HAMMER contain a struct hammer_io
38  * which is used to manage their backing store.  We use filesystem buffers
39  * for backing store and we leave them passively associated with their
40  * HAMMER structures.
41  *
42  * If the kernel tries to destroy a passively associated buf which we cannot
43  * yet let go we set B_LOCKED in the buffer and then actively released it
44  * later when we can.
45  *
46  * The io_token is required for anything which might race bioops and bio_done
47  * callbacks, with one exception: A successful hammer_try_interlock_norefs().
48  * the fs_token will be held in all other cases.
49  */
50 
51 #include "hammer.h"
52 #include <sys/fcntl.h>
53 #include <sys/nlookup.h>
54 #include <sys/buf.h>
55 #include <sys/buf2.h>
56 
57 static void hammer_io_modify(hammer_io_t io, int count);
58 static void hammer_io_deallocate(struct buf *bp);
59 #if 0
60 static void hammer_io_direct_read_complete(struct bio *nbio);
61 #endif
62 static void hammer_io_direct_write_complete(struct bio *nbio);
63 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
64 static void hammer_io_set_modlist(struct hammer_io *io);
65 static void hammer_io_flush_mark(hammer_volume_t volume);
66 
67 static int
68 hammer_mod_rb_compare(hammer_io_t io1, hammer_io_t io2)
69 {
70 	hammer_off_t io1_offset;
71 	hammer_off_t io2_offset;
72 
73 	io1_offset = ((io1->offset & HAMMER_OFF_SHORT_MASK) << 8) |
74 		     HAMMER_VOL_DECODE(io1->offset);
75 	io2_offset = ((io2->offset & HAMMER_OFF_SHORT_MASK) << 8) |
76 		     HAMMER_VOL_DECODE(io2->offset);
77 
78 	if (io1_offset < io2_offset)
79 		return(-1);
80 	if (io1_offset > io2_offset)
81 		return(1);
82 	return(0);
83 }
84 
85 RB_GENERATE(hammer_mod_rb_tree, hammer_io, rb_node, hammer_mod_rb_compare);
86 
87 /*
88  * Initialize a new, already-zero'd hammer_io structure, or reinitialize
89  * an existing hammer_io structure which may have switched to another type.
90  */
91 void
92 hammer_io_init(hammer_io_t io, hammer_volume_t volume, enum hammer_io_type type)
93 {
94 	io->volume = volume;
95 	io->hmp = volume->io.hmp;
96 	io->type = type;
97 }
98 
99 /*
100  * Helper routine to disassociate a buffer cache buffer from an I/O
101  * structure.  The io must be interlocked and marked appropriately for
102  * reclamation.
103  *
104  * The io must be in a released state with the io->bp owned and
105  * locked by the caller of this function.  When not called from an
106  * io_deallocate() this cannot race an io_deallocate() since the
107  * kernel would be unable to get the buffer lock in that case.
108  * (The released state in this case means we own the bp, not the
109  * hammer_io structure).
110  *
111  * The io may have 0 or 1 references depending on who called us.  The
112  * caller is responsible for dealing with the refs.
113  *
114  * This call can only be made when no action is required on the buffer.
115  *
116  * This function is guaranteed not to race against anything because we
117  * own both the io lock and the bp lock and are interlocked with no
118  * references.
119  */
120 static void
121 hammer_io_disassociate(hammer_io_structure_t iou)
122 {
123 	struct buf *bp = iou->io.bp;
124 
125 	KKASSERT(iou->io.released);
126 	KKASSERT(iou->io.modified == 0);
127 	KKASSERT(LIST_FIRST(&bp->b_dep) == (void *)iou);
128 	buf_dep_init(bp);
129 	iou->io.bp = NULL;
130 
131 	/*
132 	 * If the buffer was locked someone wanted to get rid of it.
133 	 */
134 	if (bp->b_flags & B_LOCKED) {
135 		atomic_add_int(&hammer_count_io_locked, -1);
136 		bp->b_flags &= ~B_LOCKED;
137 	}
138 	if (iou->io.reclaim) {
139 		bp->b_flags |= B_NOCACHE|B_RELBUF;
140 		iou->io.reclaim = 0;
141 	}
142 
143 	switch(iou->io.type) {
144 	case HAMMER_STRUCTURE_VOLUME:
145 		iou->volume.ondisk = NULL;
146 		break;
147 	case HAMMER_STRUCTURE_DATA_BUFFER:
148 	case HAMMER_STRUCTURE_META_BUFFER:
149 	case HAMMER_STRUCTURE_UNDO_BUFFER:
150 		iou->buffer.ondisk = NULL;
151 		break;
152 	case HAMMER_STRUCTURE_DUMMY:
153 		panic("hammer_io_disassociate: bad io type");
154 		break;
155 	}
156 }
157 
158 /*
159  * Wait for any physical IO to complete
160  *
161  * XXX we aren't interlocked against a spinlock or anything so there
162  *     is a small window in the interlock / io->running == 0 test.
163  */
164 void
165 hammer_io_wait(hammer_io_t io)
166 {
167 	if (io->running) {
168 		hammer_mount_t hmp = io->hmp;
169 
170 		lwkt_gettoken(&hmp->io_token);
171 		while (io->running) {
172 			io->waiting = 1;
173 			tsleep_interlock(io, 0);
174 			if (io->running)
175 				tsleep(io, PINTERLOCKED, "hmrflw", hz);
176 		}
177 		lwkt_reltoken(&hmp->io_token);
178 	}
179 }
180 
181 /*
182  * Wait for all currently queued HAMMER-initiated I/Os to complete.
183  *
184  * This is not supposed to count direct I/O's but some can leak
185  * through (for non-full-sized direct I/Os).
186  */
187 void
188 hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush)
189 {
190 	struct hammer_io iodummy;
191 	hammer_io_t io;
192 
193 	/*
194 	 * Degenerate case, no I/O is running
195 	 */
196 	lwkt_gettoken(&hmp->io_token);
197 	if (TAILQ_EMPTY(&hmp->iorun_list)) {
198 		lwkt_reltoken(&hmp->io_token);
199 		if (doflush)
200 			hammer_io_flush_sync(hmp);
201 		return;
202 	}
203 	bzero(&iodummy, sizeof(iodummy));
204 	iodummy.type = HAMMER_STRUCTURE_DUMMY;
205 
206 	/*
207 	 * Add placemarker and then wait until it becomes the head of
208 	 * the list.
209 	 */
210 	TAILQ_INSERT_TAIL(&hmp->iorun_list, &iodummy, iorun_entry);
211 	while (TAILQ_FIRST(&hmp->iorun_list) != &iodummy) {
212 		tsleep(&iodummy, 0, ident, 0);
213 	}
214 
215 	/*
216 	 * Chain in case several placemarkers are present.
217 	 */
218 	TAILQ_REMOVE(&hmp->iorun_list, &iodummy, iorun_entry);
219 	io = TAILQ_FIRST(&hmp->iorun_list);
220 	if (io && io->type == HAMMER_STRUCTURE_DUMMY)
221 		wakeup(io);
222 	lwkt_reltoken(&hmp->io_token);
223 
224 	if (doflush)
225 		hammer_io_flush_sync(hmp);
226 }
227 
228 /*
229  * Clear a flagged error condition on a I/O buffer.  The caller must hold
230  * its own ref on the buffer.
231  */
232 void
233 hammer_io_clear_error(struct hammer_io *io)
234 {
235 	hammer_mount_t hmp = io->hmp;
236 
237 	lwkt_gettoken(&hmp->io_token);
238 	if (io->ioerror) {
239 		io->ioerror = 0;
240 		hammer_rel(&io->lock);
241 		KKASSERT(hammer_isactive(&io->lock));
242 	}
243 	lwkt_reltoken(&hmp->io_token);
244 }
245 
246 void
247 hammer_io_clear_error_noassert(struct hammer_io *io)
248 {
249 	hammer_mount_t hmp = io->hmp;
250 
251 	lwkt_gettoken(&hmp->io_token);
252 	if (io->ioerror) {
253 		io->ioerror = 0;
254 		hammer_rel(&io->lock);
255 	}
256 	lwkt_reltoken(&hmp->io_token);
257 }
258 
259 /*
260  * This is an advisory function only which tells the buffer cache
261  * the bp is not a meta-data buffer, even though it is backed by
262  * a block device.
263  *
264  * This is used by HAMMER's reblocking code to avoid trying to
265  * swapcache the filesystem's data when it is read or written
266  * by the reblocking code.
267  *
268  * The caller has a ref on the buffer preventing the bp from
269  * being disassociated from it.
270  */
271 void
272 hammer_io_notmeta(hammer_buffer_t buffer)
273 {
274 	if ((buffer->io.bp->b_flags & B_NOTMETA) == 0) {
275 		hammer_mount_t hmp = buffer->io.hmp;
276 
277 		lwkt_gettoken(&hmp->io_token);
278 		buffer->io.bp->b_flags |= B_NOTMETA;
279 		lwkt_reltoken(&hmp->io_token);
280 	}
281 }
282 
283 /*
284  * Load bp for a HAMMER structure.  The io must be exclusively locked by
285  * the caller.
286  *
287  * This routine is mostly used on meta-data and small-data blocks.  Generally
288  * speaking HAMMER assumes some locality of reference and will cluster.
289  *
290  * Note that the caller (hammer_ondisk.c) may place further restrictions
291  * on clusterability via the limit (in bytes).  Typically large-data
292  * zones cannot be clustered due to their mixed buffer sizes.  This is
293  * not an issue since such clustering occurs in hammer_vnops at the
294  * regular file layer, whereas this is the buffered block device layer.
295  *
296  * No I/O callbacks can occur while we hold the buffer locked.
297  */
298 int
299 hammer_io_read(struct vnode *devvp, struct hammer_io *io, int limit)
300 {
301 	struct buf *bp;
302 	int   error;
303 
304 	if ((bp = io->bp) == NULL) {
305 		atomic_add_int(&hammer_count_io_running_read, io->bytes);
306 		if (hammer_cluster_enable && limit > io->bytes) {
307 			error = cluster_read(devvp, io->offset + limit,
308 					     io->offset, io->bytes,
309 					     HAMMER_CLUSTER_SIZE,
310 					     HAMMER_CLUSTER_SIZE,
311 					     &io->bp);
312 		} else {
313 			error = bread(devvp, io->offset, io->bytes, &io->bp);
314 		}
315 		hammer_stats_disk_read += io->bytes;
316 		atomic_add_int(&hammer_count_io_running_read, -io->bytes);
317 
318 		/*
319 		 * The code generally assumes b_ops/b_dep has been set-up,
320 		 * even if we error out here.
321 		 */
322 		bp = io->bp;
323 		if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
324 			const char *metatype;
325 
326 			switch(io->type) {
327 			case HAMMER_STRUCTURE_VOLUME:
328 				metatype = "volume";
329 				break;
330 			case HAMMER_STRUCTURE_META_BUFFER:
331 				switch(((struct hammer_buffer *)io)->
332 					zoneX_offset & HAMMER_OFF_ZONE_MASK) {
333 				case HAMMER_ZONE_BTREE:
334 					metatype = "btree";
335 					break;
336 				case HAMMER_ZONE_META:
337 					metatype = "meta";
338 					break;
339 				case HAMMER_ZONE_FREEMAP:
340 					metatype = "freemap";
341 					break;
342 				default:
343 					metatype = "meta?";
344 					break;
345 				}
346 				break;
347 			case HAMMER_STRUCTURE_DATA_BUFFER:
348 				metatype = "data";
349 				break;
350 			case HAMMER_STRUCTURE_UNDO_BUFFER:
351 				metatype = "undo";
352 				break;
353 			default:
354 				metatype = "unknown";
355 				break;
356 			}
357 			kprintf("doff %016jx %s\n",
358 				(intmax_t)bp->b_bio2.bio_offset,
359 				metatype);
360 		}
361 		bp->b_flags &= ~B_IODEBUG;
362 		bp->b_ops = &hammer_bioops;
363 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
364 
365 		/* io->worklist is locked by the io lock */
366 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
367 		BUF_KERNPROC(bp);
368 		KKASSERT(io->modified == 0);
369 		KKASSERT(io->running == 0);
370 		KKASSERT(io->waiting == 0);
371 		io->released = 0;	/* we hold an active lock on bp */
372 	} else {
373 		error = 0;
374 	}
375 	return(error);
376 }
377 
378 /*
379  * Similar to hammer_io_read() but returns a zero'd out buffer instead.
380  * Must be called with the IO exclusively locked.
381  *
382  * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
383  * I/O by forcing the buffer to not be in a released state before calling
384  * it.
385  *
386  * This function will also mark the IO as modified but it will not
387  * increment the modify_refs count.
388  *
389  * No I/O callbacks can occur while we hold the buffer locked.
390  */
391 int
392 hammer_io_new(struct vnode *devvp, struct hammer_io *io)
393 {
394 	struct buf *bp;
395 
396 	if ((bp = io->bp) == NULL) {
397 		io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
398 		bp = io->bp;
399 		bp->b_ops = &hammer_bioops;
400 		KKASSERT(LIST_FIRST(&bp->b_dep) == NULL);
401 
402 		/* io->worklist is locked by the io lock */
403 		LIST_INSERT_HEAD(&bp->b_dep, &io->worklist, node);
404 		io->released = 0;
405 		KKASSERT(io->running == 0);
406 		io->waiting = 0;
407 		BUF_KERNPROC(bp);
408 	} else {
409 		if (io->released) {
410 			regetblk(bp);
411 			BUF_KERNPROC(bp);
412 			io->released = 0;
413 		}
414 	}
415 	hammer_io_modify(io, 0);
416 	vfs_bio_clrbuf(bp);
417 	return(0);
418 }
419 
420 /*
421  * Advance the activity count on the underlying buffer because
422  * HAMMER does not getblk/brelse on every access.
423  *
424  * The io->bp cannot go away while the buffer is referenced.
425  */
426 void
427 hammer_io_advance(struct hammer_io *io)
428 {
429 	if (io->bp)
430 		buf_act_advance(io->bp);
431 }
432 
433 /*
434  * Remove potential device level aliases against buffers managed by high level
435  * vnodes.  Aliases can also be created due to mixed buffer sizes or via
436  * direct access to the backing store device.
437  *
438  * This is nasty because the buffers are also VMIO-backed.  Even if a buffer
439  * does not exist its backing VM pages might, and we have to invalidate
440  * those as well or a getblk() will reinstate them.
441  *
442  * Buffer cache buffers associated with hammer_buffers cannot be
443  * invalidated.
444  */
445 int
446 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
447 {
448 	hammer_io_structure_t iou;
449 	hammer_mount_t hmp;
450 	hammer_off_t phys_offset;
451 	struct buf *bp;
452 	int error;
453 
454 	hmp = volume->io.hmp;
455 	lwkt_gettoken(&hmp->io_token);
456 
457 	/*
458 	 * If a device buffer already exists for the specified physical
459 	 * offset use that, otherwise instantiate a buffer to cover any
460 	 * related VM pages, set BNOCACHE, and brelse().
461 	 */
462 	phys_offset = volume->ondisk->vol_buf_beg +
463 		      (zone2_offset & HAMMER_OFF_SHORT_MASK);
464 	if ((bp = findblk(volume->devvp, phys_offset, 0)) != NULL)
465 		bremfree(bp);
466 	else
467 		bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
468 
469 	if ((iou = (void *)LIST_FIRST(&bp->b_dep)) != NULL) {
470 #if 0
471 		hammer_ref(&iou->io.lock);
472 		hammer_io_clear_modify(&iou->io, 1);
473 		bundirty(bp);
474 		iou->io.released = 0;
475 		BUF_KERNPROC(bp);
476 		iou->io.reclaim = 1;
477 		iou->io.waitdep = 1;	/* XXX this is a fs_token field */
478 		KKASSERT(hammer_isactive(&iou->io.lock) == 1);
479 		hammer_rel_buffer(&iou->buffer, 0);
480 		/*hammer_io_deallocate(bp);*/
481 #endif
482 		bqrelse(bp);
483 		error = EAGAIN;
484 	} else {
485 		KKASSERT((bp->b_flags & B_LOCKED) == 0);
486 		bundirty(bp);
487 		bp->b_flags |= B_NOCACHE|B_RELBUF;
488 		brelse(bp);
489 		error = 0;
490 	}
491 	lwkt_reltoken(&hmp->io_token);
492 	return(error);
493 }
494 
495 /*
496  * This routine is called on the last reference to a hammer structure.
497  * The io must be interlocked with a refcount of zero.  The hammer structure
498  * will remain interlocked on return.
499  *
500  * This routine may return a non-NULL bp to the caller for dispoal.
501  * The caller typically brelse()'s the bp.
502  *
503  * The bp may or may not still be passively associated with the IO.  It
504  * will remain passively associated if it is unreleasable (e.g. a modified
505  * meta-data buffer).
506  *
507  * The only requirement here is that modified meta-data and volume-header
508  * buffer may NOT be disassociated from the IO structure, and consequently
509  * we also leave such buffers actively associated with the IO if they already
510  * are (since the kernel can't do anything with them anyway).  Only the
511  * flusher is allowed to write such buffers out.  Modified pure-data and
512  * undo buffers are returned to the kernel but left passively associated
513  * so we can track when the kernel writes the bp out.
514  */
515 struct buf *
516 hammer_io_release(struct hammer_io *io, int flush)
517 {
518 	union hammer_io_structure *iou = (void *)io;
519 	struct buf *bp;
520 
521 	if ((bp = io->bp) == NULL)
522 		return(NULL);
523 
524 	/*
525 	 * Try to flush a dirty IO to disk if asked to by the
526 	 * caller or if the kernel tried to flush the buffer in the past.
527 	 *
528 	 * Kernel-initiated flushes are only allowed for pure-data buffers.
529 	 * meta-data and volume buffers can only be flushed explicitly
530 	 * by HAMMER.
531 	 */
532 	if (io->modified) {
533 		if (flush) {
534 			hammer_io_flush(io, 0);
535 		} else if (bp->b_flags & B_LOCKED) {
536 			switch(io->type) {
537 			case HAMMER_STRUCTURE_DATA_BUFFER:
538 				hammer_io_flush(io, 0);
539 				break;
540 			case HAMMER_STRUCTURE_UNDO_BUFFER:
541 				hammer_io_flush(io, hammer_undo_reclaim(io));
542 				break;
543 			default:
544 				break;
545 			}
546 		} /* else no explicit request to flush the buffer */
547 	}
548 
549 	/*
550 	 * Wait for the IO to complete if asked to.  This occurs when
551 	 * the buffer must be disposed of definitively during an umount
552 	 * or buffer invalidation.
553 	 */
554 	if (io->waitdep && io->running) {
555 		hammer_io_wait(io);
556 	}
557 
558 	/*
559 	 * Return control of the buffer to the kernel (with the provisio
560 	 * that our bioops can override kernel decisions with regards to
561 	 * the buffer).
562 	 */
563 	if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
564 		/*
565 		 * Always disassociate the bp if an explicit flush
566 		 * was requested and the IO completed with no error
567 		 * (so unmount can really clean up the structure).
568 		 */
569 		if (io->released) {
570 			regetblk(bp);
571 			BUF_KERNPROC(bp);
572 		} else {
573 			io->released = 1;
574 		}
575 		hammer_io_disassociate((hammer_io_structure_t)io);
576 		/* return the bp */
577 	} else if (io->modified) {
578 		/*
579 		 * Only certain IO types can be released to the kernel if
580 		 * the buffer has been modified.
581 		 *
582 		 * volume and meta-data IO types may only be explicitly
583 		 * flushed by HAMMER.
584 		 */
585 		switch(io->type) {
586 		case HAMMER_STRUCTURE_DATA_BUFFER:
587 		case HAMMER_STRUCTURE_UNDO_BUFFER:
588 			if (io->released == 0) {
589 				io->released = 1;
590 				bdwrite(bp);
591 			}
592 			break;
593 		default:
594 			break;
595 		}
596 		bp = NULL;	/* bp left associated */
597 	} else if (io->released == 0) {
598 		/*
599 		 * Clean buffers can be generally released to the kernel.
600 		 * We leave the bp passively associated with the HAMMER
601 		 * structure and use bioops to disconnect it later on
602 		 * if the kernel wants to discard the buffer.
603 		 *
604 		 * We can steal the structure's ownership of the bp.
605 		 */
606 		io->released = 1;
607 		if (bp->b_flags & B_LOCKED) {
608 			hammer_io_disassociate(iou);
609 			/* return the bp */
610 		} else {
611 			if (io->reclaim) {
612 				hammer_io_disassociate(iou);
613 				/* return the bp */
614 			} else {
615 				/* return the bp (bp passively associated) */
616 			}
617 		}
618 	} else {
619 		/*
620 		 * A released buffer is passively associate with our
621 		 * hammer_io structure.  The kernel cannot destroy it
622 		 * without making a bioops call.  If the kernel (B_LOCKED)
623 		 * or we (reclaim) requested that the buffer be destroyed
624 		 * we destroy it, otherwise we do a quick get/release to
625 		 * reset its position in the kernel's LRU list.
626 		 *
627 		 * Leaving the buffer passively associated allows us to
628 		 * use the kernel's LRU buffer flushing mechanisms rather
629 		 * then rolling our own.
630 		 *
631 		 * XXX there are two ways of doing this.  We can re-acquire
632 		 * and passively release to reset the LRU, or not.
633 		 */
634 		if (io->running == 0) {
635 			regetblk(bp);
636 			if ((bp->b_flags & B_LOCKED) || io->reclaim) {
637 				hammer_io_disassociate(iou);
638 				/* return the bp */
639 			} else {
640 				/* return the bp (bp passively associated) */
641 			}
642 		} else {
643 			/*
644 			 * bp is left passively associated but we do not
645 			 * try to reacquire it.  Interactions with the io
646 			 * structure will occur on completion of the bp's
647 			 * I/O.
648 			 */
649 			bp = NULL;
650 		}
651 	}
652 	return(bp);
653 }
654 
655 /*
656  * This routine is called with a locked IO when a flush is desired and
657  * no other references to the structure exists other then ours.  This
658  * routine is ONLY called when HAMMER believes it is safe to flush a
659  * potentially modified buffer out.
660  *
661  * The locked io or io reference prevents a flush from being initiated
662  * by the kernel.
663  */
664 void
665 hammer_io_flush(struct hammer_io *io, int reclaim)
666 {
667 	struct buf *bp;
668 	hammer_mount_t hmp;
669 
670 	/*
671 	 * Degenerate case - nothing to flush if nothing is dirty.
672 	 */
673 	if (io->modified == 0)
674 		return;
675 
676 	KKASSERT(io->bp);
677 	KKASSERT(io->modify_refs <= 0);
678 
679 	/*
680 	 * Acquire ownership of the bp, particularly before we clear our
681 	 * modified flag.
682 	 *
683 	 * We are going to bawrite() this bp.  Don't leave a window where
684 	 * io->released is set, we actually own the bp rather then our
685 	 * buffer.
686 	 *
687 	 * The io_token should not be required here as only
688 	 */
689 	hmp = io->hmp;
690 	bp = io->bp;
691 	if (io->released) {
692 		regetblk(bp);
693 		/* BUF_KERNPROC(io->bp); */
694 		/* io->released = 0; */
695 		KKASSERT(io->released);
696 		KKASSERT(io->bp == bp);
697 	} else {
698 		io->released = 1;
699 	}
700 
701 	if (reclaim) {
702 		io->reclaim = 1;
703 		if ((bp->b_flags & B_LOCKED) == 0) {
704 			bp->b_flags |= B_LOCKED;
705 			atomic_add_int(&hammer_count_io_locked, 1);
706 		}
707 	}
708 
709 	/*
710 	 * Acquire exclusive access to the bp and then clear the modified
711 	 * state of the buffer prior to issuing I/O to interlock any
712 	 * modifications made while the I/O is in progress.  This shouldn't
713 	 * happen anyway but losing data would be worse.  The modified bit
714 	 * will be rechecked after the IO completes.
715 	 *
716 	 * NOTE: This call also finalizes the buffer's content (inval == 0).
717 	 *
718 	 * This is only legal when lock.refs == 1 (otherwise we might clear
719 	 * the modified bit while there are still users of the cluster
720 	 * modifying the data).
721 	 *
722 	 * Do this before potentially blocking so any attempt to modify the
723 	 * ondisk while we are blocked blocks waiting for us.
724 	 */
725 	hammer_ref(&io->lock);
726 	hammer_io_clear_modify(io, 0);
727 	hammer_rel(&io->lock);
728 
729 	if (hammer_debug_io & 0x0002)
730 		kprintf("hammer io_write %016jx\n", bp->b_bio1.bio_offset);
731 
732 	/*
733 	 * Transfer ownership to the kernel and initiate I/O.
734 	 *
735 	 * NOTE: We do not hold io_token so an atomic op is required to
736 	 *	 update io_running_space.
737 	 */
738 	io->running = 1;
739 	atomic_add_int(&hmp->io_running_space, io->bytes);
740 	atomic_add_int(&hammer_count_io_running_write, io->bytes);
741 	lwkt_gettoken(&hmp->io_token);
742 	TAILQ_INSERT_TAIL(&hmp->iorun_list, io, iorun_entry);
743 	lwkt_reltoken(&hmp->io_token);
744 	bawrite(bp);
745 	hammer_io_flush_mark(io->volume);
746 }
747 
748 /************************************************************************
749  *				BUFFER DIRTYING				*
750  ************************************************************************
751  *
752  * These routines deal with dependancies created when IO buffers get
753  * modified.  The caller must call hammer_modify_*() on a referenced
754  * HAMMER structure prior to modifying its on-disk data.
755  *
756  * Any intent to modify an IO buffer acquires the related bp and imposes
757  * various write ordering dependancies.
758  */
759 
760 /*
761  * Mark a HAMMER structure as undergoing modification.  Meta-data buffers
762  * are locked until the flusher can deal with them, pure data buffers
763  * can be written out.
764  *
765  * The referenced io prevents races.
766  */
767 static
768 void
769 hammer_io_modify(hammer_io_t io, int count)
770 {
771 	/*
772 	 * io->modify_refs must be >= 0
773 	 */
774 	while (io->modify_refs < 0) {
775 		io->waitmod = 1;
776 		tsleep(io, 0, "hmrmod", 0);
777 	}
778 
779 	/*
780 	 * Shortcut if nothing to do.
781 	 */
782 	KKASSERT(hammer_isactive(&io->lock) && io->bp != NULL);
783 	io->modify_refs += count;
784 	if (io->modified && io->released == 0)
785 		return;
786 
787 	/*
788 	 * NOTE: It is important not to set the modified bit
789 	 *	 until after we have acquired the bp or we risk
790 	 *	 racing against checkwrite.
791 	 */
792 	hammer_lock_ex(&io->lock);
793 	if (io->released) {
794 		regetblk(io->bp);
795 		BUF_KERNPROC(io->bp);
796 		io->released = 0;
797 	}
798 	if (io->modified == 0) {
799 		hammer_io_set_modlist(io);
800 		io->modified = 1;
801 	}
802 	hammer_unlock(&io->lock);
803 }
804 
805 static __inline
806 void
807 hammer_io_modify_done(hammer_io_t io)
808 {
809 	KKASSERT(io->modify_refs > 0);
810 	--io->modify_refs;
811 	if (io->modify_refs == 0 && io->waitmod) {
812 		io->waitmod = 0;
813 		wakeup(io);
814 	}
815 }
816 
817 /*
818  * The write interlock blocks other threads trying to modify a buffer
819  * (they block in hammer_io_modify()) after us, or blocks us while other
820  * threads are in the middle of modifying a buffer.
821  *
822  * The caller also has a ref on the io, however if we are not careful
823  * we will race bioops callbacks (checkwrite).  To deal with this
824  * we must at least acquire and release the io_token, and it is probably
825  * better to hold it through the setting of modify_refs.
826  */
827 void
828 hammer_io_write_interlock(hammer_io_t io)
829 {
830 	hammer_mount_t hmp = io->hmp;
831 
832 	lwkt_gettoken(&hmp->io_token);
833 	while (io->modify_refs != 0) {
834 		io->waitmod = 1;
835 		tsleep(io, 0, "hmrmod", 0);
836 	}
837 	io->modify_refs = -1;
838 	lwkt_reltoken(&hmp->io_token);
839 }
840 
841 void
842 hammer_io_done_interlock(hammer_io_t io)
843 {
844 	KKASSERT(io->modify_refs == -1);
845 	io->modify_refs = 0;
846 	if (io->waitmod) {
847 		io->waitmod = 0;
848 		wakeup(io);
849 	}
850 }
851 
852 /*
853  * Caller intends to modify a volume's ondisk structure.
854  *
855  * This is only allowed if we are the flusher or we have a ref on the
856  * sync_lock.
857  */
858 void
859 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
860 		     void *base, int len)
861 {
862 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
863 
864 	hammer_io_modify(&volume->io, 1);
865 	if (len) {
866 		intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
867 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
868 		hammer_generate_undo(trans,
869 			 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
870 			 base, len);
871 	}
872 }
873 
874 /*
875  * Caller intends to modify a buffer's ondisk structure.
876  *
877  * This is only allowed if we are the flusher or we have a ref on the
878  * sync_lock.
879  */
880 void
881 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
882 		     void *base, int len)
883 {
884 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
885 
886 	hammer_io_modify(&buffer->io, 1);
887 	if (len) {
888 		intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
889 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
890 		hammer_generate_undo(trans,
891 				     buffer->zone2_offset + rel_offset,
892 				     base, len);
893 	}
894 }
895 
896 void
897 hammer_modify_volume_done(hammer_volume_t volume)
898 {
899 	hammer_io_modify_done(&volume->io);
900 }
901 
902 void
903 hammer_modify_buffer_done(hammer_buffer_t buffer)
904 {
905 	hammer_io_modify_done(&buffer->io);
906 }
907 
908 /*
909  * Mark an entity as not being dirty any more and finalize any
910  * delayed adjustments to the buffer.
911  *
912  * Delayed adjustments are an important performance enhancement, allowing
913  * us to avoid recalculating B-Tree node CRCs over and over again when
914  * making bulk-modifications to the B-Tree.
915  *
916  * If inval is non-zero delayed adjustments are ignored.
917  *
918  * This routine may dereference related btree nodes and cause the
919  * buffer to be dereferenced.  The caller must own a reference on io.
920  */
921 void
922 hammer_io_clear_modify(struct hammer_io *io, int inval)
923 {
924 	hammer_mount_t hmp;
925 
926 	/*
927 	 * io_token is needed to avoid races on mod_root
928 	 */
929 	if (io->modified == 0)
930 		return;
931 	hmp = io->hmp;
932 	lwkt_gettoken(&hmp->io_token);
933 	if (io->modified == 0) {
934 		lwkt_reltoken(&hmp->io_token);
935 		return;
936 	}
937 
938 	/*
939 	 * Take us off the mod-list and clear the modified bit.
940 	 */
941 	KKASSERT(io->mod_root != NULL);
942 	if (io->mod_root == &io->hmp->volu_root ||
943 	    io->mod_root == &io->hmp->meta_root) {
944 		io->hmp->locked_dirty_space -= io->bytes;
945 		atomic_add_int(&hammer_count_dirtybufspace, -io->bytes);
946 	}
947 	RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
948 	io->mod_root = NULL;
949 	io->modified = 0;
950 
951 	lwkt_reltoken(&hmp->io_token);
952 
953 	/*
954 	 * If this bit is not set there are no delayed adjustments.
955 	 */
956 	if (io->gencrc == 0)
957 		return;
958 	io->gencrc = 0;
959 
960 	/*
961 	 * Finalize requested CRCs.  The NEEDSCRC flag also holds a reference
962 	 * on the node (& underlying buffer).  Release the node after clearing
963 	 * the flag.
964 	 */
965 	if (io->type == HAMMER_STRUCTURE_META_BUFFER) {
966 		hammer_buffer_t buffer = (void *)io;
967 		hammer_node_t node;
968 
969 restart:
970 		TAILQ_FOREACH(node, &buffer->clist, entry) {
971 			if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
972 				continue;
973 			node->flags &= ~HAMMER_NODE_NEEDSCRC;
974 			KKASSERT(node->ondisk);
975 			if (inval == 0)
976 				node->ondisk->crc = crc32(&node->ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
977 			hammer_rel_node(node);
978 			goto restart;
979 		}
980 	}
981 	/* caller must still have ref on io */
982 	KKASSERT(hammer_isactive(&io->lock));
983 }
984 
985 /*
986  * Clear the IO's modify list.  Even though the IO is no longer modified
987  * it may still be on the lose_root.  This routine is called just before
988  * the governing hammer_buffer is destroyed.
989  *
990  * mod_root requires io_token protection.
991  */
992 void
993 hammer_io_clear_modlist(struct hammer_io *io)
994 {
995 	hammer_mount_t hmp = io->hmp;
996 
997 	KKASSERT(io->modified == 0);
998 	if (io->mod_root) {
999 		lwkt_gettoken(&hmp->io_token);
1000 		if (io->mod_root) {
1001 			KKASSERT(io->mod_root == &io->hmp->lose_root);
1002 			RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
1003 			io->mod_root = NULL;
1004 		}
1005 		lwkt_reltoken(&hmp->io_token);
1006 	}
1007 }
1008 
1009 static void
1010 hammer_io_set_modlist(struct hammer_io *io)
1011 {
1012 	struct hammer_mount *hmp = io->hmp;
1013 
1014 	lwkt_gettoken(&hmp->io_token);
1015 	KKASSERT(io->mod_root == NULL);
1016 
1017 	switch(io->type) {
1018 	case HAMMER_STRUCTURE_VOLUME:
1019 		io->mod_root = &hmp->volu_root;
1020 		hmp->locked_dirty_space += io->bytes;
1021 		atomic_add_int(&hammer_count_dirtybufspace, io->bytes);
1022 		break;
1023 	case HAMMER_STRUCTURE_META_BUFFER:
1024 		io->mod_root = &hmp->meta_root;
1025 		hmp->locked_dirty_space += io->bytes;
1026 		atomic_add_int(&hammer_count_dirtybufspace, io->bytes);
1027 		break;
1028 	case HAMMER_STRUCTURE_UNDO_BUFFER:
1029 		io->mod_root = &hmp->undo_root;
1030 		break;
1031 	case HAMMER_STRUCTURE_DATA_BUFFER:
1032 		io->mod_root = &hmp->data_root;
1033 		break;
1034 	case HAMMER_STRUCTURE_DUMMY:
1035 		panic("hammer_io_set_modlist: bad io type");
1036 		break; /* NOT REACHED */
1037 	}
1038 	if (RB_INSERT(hammer_mod_rb_tree, io->mod_root, io)) {
1039 		panic("hammer_io_set_modlist: duplicate entry");
1040 		/* NOT REACHED */
1041 	}
1042 	lwkt_reltoken(&hmp->io_token);
1043 }
1044 
1045 /************************************************************************
1046  *				HAMMER_BIOOPS				*
1047  ************************************************************************
1048  *
1049  */
1050 
1051 /*
1052  * Pre-IO initiation kernel callback - cluster build only
1053  *
1054  * bioops callback - hold io_token
1055  */
1056 static void
1057 hammer_io_start(struct buf *bp)
1058 {
1059 	/* nothing to do, so io_token not needed */
1060 }
1061 
1062 /*
1063  * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
1064  *
1065  * NOTE: HAMMER may modify a data buffer after we have initiated write
1066  *	 I/O.
1067  *
1068  * NOTE: MPSAFE callback
1069  *
1070  * bioops callback - hold io_token
1071  */
1072 static void
1073 hammer_io_complete(struct buf *bp)
1074 {
1075 	union hammer_io_structure *iou = (void *)LIST_FIRST(&bp->b_dep);
1076 	struct hammer_mount *hmp = iou->io.hmp;
1077 	struct hammer_io *ionext;
1078 
1079 	lwkt_gettoken(&hmp->io_token);
1080 
1081 	KKASSERT(iou->io.released == 1);
1082 
1083 	/*
1084 	 * Deal with people waiting for I/O to drain
1085 	 */
1086 	if (iou->io.running) {
1087 		/*
1088 		 * Deal with critical write errors.  Once a critical error
1089 		 * has been flagged in hmp the UNDO FIFO will not be updated.
1090 		 * That way crash recover will give us a consistent
1091 		 * filesystem.
1092 		 *
1093 		 * Because of this we can throw away failed UNDO buffers.  If
1094 		 * we throw away META or DATA buffers we risk corrupting
1095 		 * the now read-only version of the filesystem visible to
1096 		 * the user.  Clear B_ERROR so the buffer is not re-dirtied
1097 		 * by the kernel and ref the io so it doesn't get thrown
1098 		 * away.
1099 		 */
1100 		if (bp->b_flags & B_ERROR) {
1101 			lwkt_gettoken(&hmp->fs_token);
1102 			hammer_critical_error(hmp, NULL, bp->b_error,
1103 					      "while flushing meta-data");
1104 			lwkt_reltoken(&hmp->fs_token);
1105 
1106 			switch(iou->io.type) {
1107 			case HAMMER_STRUCTURE_UNDO_BUFFER:
1108 				break;
1109 			default:
1110 				if (iou->io.ioerror == 0) {
1111 					iou->io.ioerror = 1;
1112 					hammer_ref(&iou->io.lock);
1113 				}
1114 				break;
1115 			}
1116 			bp->b_flags &= ~B_ERROR;
1117 			bundirty(bp);
1118 #if 0
1119 			hammer_io_set_modlist(&iou->io);
1120 			iou->io.modified = 1;
1121 #endif
1122 		}
1123 		hammer_stats_disk_write += iou->io.bytes;
1124 		atomic_add_int(&hammer_count_io_running_write, -iou->io.bytes);
1125 		atomic_add_int(&hmp->io_running_space, -iou->io.bytes);
1126 		if (hmp->io_running_wakeup &&
1127 		    hmp->io_running_space < hammer_limit_running_io / 2) {
1128 		    hmp->io_running_wakeup = 0;
1129 		    wakeup(&hmp->io_running_wakeup);
1130 		}
1131 		KKASSERT(hmp->io_running_space >= 0);
1132 		iou->io.running = 0;
1133 
1134 		/*
1135 		 * Remove from iorun list and wakeup any multi-io waiter(s).
1136 		 */
1137 		if (TAILQ_FIRST(&hmp->iorun_list) == &iou->io) {
1138 			ionext = TAILQ_NEXT(&iou->io, iorun_entry);
1139 			if (ionext && ionext->type == HAMMER_STRUCTURE_DUMMY)
1140 				wakeup(ionext);
1141 		}
1142 		TAILQ_REMOVE(&hmp->iorun_list, &iou->io, iorun_entry);
1143 	} else {
1144 		hammer_stats_disk_read += iou->io.bytes;
1145 	}
1146 
1147 	if (iou->io.waiting) {
1148 		iou->io.waiting = 0;
1149 		wakeup(iou);
1150 	}
1151 
1152 	/*
1153 	 * If B_LOCKED is set someone wanted to deallocate the bp at some
1154 	 * point, try to do it now.  The operation will fail if there are
1155 	 * refs or if hammer_io_deallocate() is unable to gain the
1156 	 * interlock.
1157 	 */
1158 	if (bp->b_flags & B_LOCKED) {
1159 		atomic_add_int(&hammer_count_io_locked, -1);
1160 		bp->b_flags &= ~B_LOCKED;
1161 		hammer_io_deallocate(bp);
1162 		/* structure may be dead now */
1163 	}
1164 	lwkt_reltoken(&hmp->io_token);
1165 }
1166 
1167 /*
1168  * Callback from kernel when it wishes to deallocate a passively
1169  * associated structure.  This mostly occurs with clean buffers
1170  * but it may be possible for a holding structure to be marked dirty
1171  * while its buffer is passively associated.  The caller owns the bp.
1172  *
1173  * If we cannot disassociate we set B_LOCKED to prevent the buffer
1174  * from getting reused.
1175  *
1176  * WARNING: Because this can be called directly by getnewbuf we cannot
1177  * recurse into the tree.  If a bp cannot be immediately disassociated
1178  * our only recourse is to set B_LOCKED.
1179  *
1180  * WARNING: This may be called from an interrupt via hammer_io_complete()
1181  *
1182  * bioops callback - hold io_token
1183  */
1184 static void
1185 hammer_io_deallocate(struct buf *bp)
1186 {
1187 	hammer_io_structure_t iou = (void *)LIST_FIRST(&bp->b_dep);
1188 	hammer_mount_t hmp;
1189 
1190 	hmp = iou->io.hmp;
1191 
1192 	lwkt_gettoken(&hmp->io_token);
1193 
1194 	KKASSERT((bp->b_flags & B_LOCKED) == 0 && iou->io.running == 0);
1195 	if (hammer_try_interlock_norefs(&iou->io.lock) == 0) {
1196 		/*
1197 		 * We cannot safely disassociate a bp from a referenced
1198 		 * or interlocked HAMMER structure.
1199 		 */
1200 		bp->b_flags |= B_LOCKED;
1201 		atomic_add_int(&hammer_count_io_locked, 1);
1202 	} else if (iou->io.modified) {
1203 		/*
1204 		 * It is not legal to disassociate a modified buffer.  This
1205 		 * case really shouldn't ever occur.
1206 		 */
1207 		bp->b_flags |= B_LOCKED;
1208 		atomic_add_int(&hammer_count_io_locked, 1);
1209 		hammer_put_interlock(&iou->io.lock, 0);
1210 	} else {
1211 		/*
1212 		 * Disassociate the BP.  If the io has no refs left we
1213 		 * have to add it to the loose list.  The kernel has
1214 		 * locked the buffer and therefore our io must be
1215 		 * in a released state.
1216 		 */
1217 		hammer_io_disassociate(iou);
1218 		if (iou->io.type != HAMMER_STRUCTURE_VOLUME) {
1219 			KKASSERT(iou->io.bp == NULL);
1220 			KKASSERT(iou->io.mod_root == NULL);
1221 			iou->io.mod_root = &hmp->lose_root;
1222 			if (RB_INSERT(hammer_mod_rb_tree, iou->io.mod_root,
1223 				      &iou->io)) {
1224 				panic("hammer_io_deallocate: duplicate entry");
1225 			}
1226 		}
1227 		hammer_put_interlock(&iou->io.lock, 1);
1228 	}
1229 	lwkt_reltoken(&hmp->io_token);
1230 }
1231 
1232 /*
1233  * bioops callback - hold io_token
1234  */
1235 static int
1236 hammer_io_fsync(struct vnode *vp)
1237 {
1238 	/* nothing to do, so io_token not needed */
1239 	return(0);
1240 }
1241 
1242 /*
1243  * NOTE: will not be called unless we tell the kernel about the
1244  * bioops.  Unused... we use the mount's VFS_SYNC instead.
1245  *
1246  * bioops callback - hold io_token
1247  */
1248 static int
1249 hammer_io_sync(struct mount *mp)
1250 {
1251 	/* nothing to do, so io_token not needed */
1252 	return(0);
1253 }
1254 
1255 /*
1256  * bioops callback - hold io_token
1257  */
1258 static void
1259 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
1260 {
1261 	/* nothing to do, so io_token not needed */
1262 }
1263 
1264 /*
1265  * I/O pre-check for reading and writing.  HAMMER only uses this for
1266  * B_CACHE buffers so checkread just shouldn't happen, but if it does
1267  * allow it.
1268  *
1269  * Writing is a different case.  We don't want the kernel to try to write
1270  * out a buffer that HAMMER may be modifying passively or which has a
1271  * dependancy.  In addition, kernel-demanded writes can only proceed for
1272  * certain types of buffers (i.e. UNDO and DATA types).  Other dirty
1273  * buffer types can only be explicitly written by the flusher.
1274  *
1275  * checkwrite will only be called for bdwrite()n buffers.  If we return
1276  * success the kernel is guaranteed to initiate the buffer write.
1277  *
1278  * bioops callback - hold io_token
1279  */
1280 static int
1281 hammer_io_checkread(struct buf *bp)
1282 {
1283 	/* nothing to do, so io_token not needed */
1284 	return(0);
1285 }
1286 
1287 /*
1288  * The kernel is asking us whether it can write out a dirty buffer or not.
1289  *
1290  * bioops callback - hold io_token
1291  */
1292 static int
1293 hammer_io_checkwrite(struct buf *bp)
1294 {
1295 	hammer_io_t io = (void *)LIST_FIRST(&bp->b_dep);
1296 	hammer_mount_t hmp = io->hmp;
1297 
1298 	/*
1299 	 * This shouldn't happen under normal operation.
1300 	 */
1301 	lwkt_gettoken(&hmp->io_token);
1302 	if (io->type == HAMMER_STRUCTURE_VOLUME ||
1303 	    io->type == HAMMER_STRUCTURE_META_BUFFER) {
1304 		if (!panicstr)
1305 			panic("hammer_io_checkwrite: illegal buffer");
1306 		if ((bp->b_flags & B_LOCKED) == 0) {
1307 			bp->b_flags |= B_LOCKED;
1308 			atomic_add_int(&hammer_count_io_locked, 1);
1309 		}
1310 		lwkt_reltoken(&hmp->io_token);
1311 		return(1);
1312 	}
1313 
1314 	/*
1315 	 * We have to be able to interlock the IO to safely modify any
1316 	 * of its fields without holding the fs_token.  If we can't lock
1317 	 * it then we are racing someone.
1318 	 *
1319 	 * Our ownership of the bp lock prevents the io from being ripped
1320 	 * out from under us.
1321 	 */
1322 	if (hammer_try_interlock_norefs(&io->lock) == 0) {
1323 		bp->b_flags |= B_LOCKED;
1324 		atomic_add_int(&hammer_count_io_locked, 1);
1325 		lwkt_reltoken(&hmp->io_token);
1326 		return(1);
1327 	}
1328 
1329 	/*
1330 	 * The modified bit must be cleared prior to the initiation of
1331 	 * any IO (returning 0 initiates the IO).  Because this is a
1332 	 * normal data buffer hammer_io_clear_modify() runs through a
1333 	 * simple degenerate case.
1334 	 *
1335 	 * Return 0 will cause the kernel to initiate the IO, and we
1336 	 * must normally clear the modified bit before we begin.  If
1337 	 * the io has modify_refs we do not clear the modified bit,
1338 	 * otherwise we may miss changes.
1339 	 *
1340 	 * Only data and undo buffers can reach here.  These buffers do
1341 	 * not have terminal crc functions but we temporarily reference
1342 	 * the IO anyway, just in case.
1343 	 */
1344 	if (io->modify_refs == 0 && io->modified) {
1345 		hammer_ref(&io->lock);
1346 		hammer_io_clear_modify(io, 0);
1347 		hammer_rel(&io->lock);
1348 	} else if (io->modified) {
1349 		KKASSERT(io->type == HAMMER_STRUCTURE_DATA_BUFFER);
1350 	}
1351 
1352 	/*
1353 	 * The kernel is going to start the IO, set io->running.
1354 	 */
1355 	KKASSERT(io->running == 0);
1356 	io->running = 1;
1357 	atomic_add_int(&io->hmp->io_running_space, io->bytes);
1358 	atomic_add_int(&hammer_count_io_running_write, io->bytes);
1359 	TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
1360 
1361 	hammer_put_interlock(&io->lock, 1);
1362 	lwkt_reltoken(&hmp->io_token);
1363 
1364 	return(0);
1365 }
1366 
1367 /*
1368  * Return non-zero if we wish to delay the kernel's attempt to flush
1369  * this buffer to disk.
1370  *
1371  * bioops callback - hold io_token
1372  */
1373 static int
1374 hammer_io_countdeps(struct buf *bp, int n)
1375 {
1376 	/* nothing to do, so io_token not needed */
1377 	return(0);
1378 }
1379 
1380 struct bio_ops hammer_bioops = {
1381 	.io_start	= hammer_io_start,
1382 	.io_complete	= hammer_io_complete,
1383 	.io_deallocate	= hammer_io_deallocate,
1384 	.io_fsync	= hammer_io_fsync,
1385 	.io_sync	= hammer_io_sync,
1386 	.io_movedeps	= hammer_io_movedeps,
1387 	.io_countdeps	= hammer_io_countdeps,
1388 	.io_checkread	= hammer_io_checkread,
1389 	.io_checkwrite	= hammer_io_checkwrite,
1390 };
1391 
1392 /************************************************************************
1393  *				DIRECT IO OPS 				*
1394  ************************************************************************
1395  *
1396  * These functions operate directly on the buffer cache buffer associated
1397  * with a front-end vnode rather then a back-end device vnode.
1398  */
1399 
1400 /*
1401  * Read a buffer associated with a front-end vnode directly from the
1402  * disk media.  The bio may be issued asynchronously.  If leaf is non-NULL
1403  * we validate the CRC.
1404  *
1405  * We must check for the presence of a HAMMER buffer to handle the case
1406  * where the reblocker has rewritten the data (which it does via the HAMMER
1407  * buffer system, not via the high-level vnode buffer cache), but not yet
1408  * committed the buffer to the media.
1409  */
1410 int
1411 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1412 		      hammer_btree_leaf_elm_t leaf)
1413 {
1414 	hammer_off_t buf_offset;
1415 	hammer_off_t zone2_offset;
1416 	hammer_volume_t volume;
1417 	struct buf *bp;
1418 	struct bio *nbio;
1419 	int vol_no;
1420 	int error;
1421 
1422 	buf_offset = bio->bio_offset;
1423 	KKASSERT((buf_offset & HAMMER_OFF_ZONE_MASK) ==
1424 		 HAMMER_ZONE_LARGE_DATA);
1425 
1426 	/*
1427 	 * The buffer cache may have an aliased buffer (the reblocker can
1428 	 * write them).  If it does we have to sync any dirty data before
1429 	 * we can build our direct-read.  This is a non-critical code path.
1430 	 */
1431 	bp = bio->bio_buf;
1432 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1433 
1434 	/*
1435 	 * Resolve to a zone-2 offset.  The conversion just requires
1436 	 * munging the top 4 bits but we want to abstract it anyway
1437 	 * so the blockmap code can verify the zone assignment.
1438 	 */
1439 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1440 	if (error)
1441 		goto done;
1442 	KKASSERT((zone2_offset & HAMMER_OFF_ZONE_MASK) ==
1443 		 HAMMER_ZONE_RAW_BUFFER);
1444 
1445 	/*
1446 	 * Resolve volume and raw-offset for 3rd level bio.  The
1447 	 * offset will be specific to the volume.
1448 	 */
1449 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
1450 	volume = hammer_get_volume(hmp, vol_no, &error);
1451 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
1452 		error = EIO;
1453 
1454 	if (error == 0) {
1455 		/*
1456 		 * 3rd level bio
1457 		 */
1458 		nbio = push_bio(bio);
1459 		nbio->bio_offset = volume->ondisk->vol_buf_beg +
1460 				   (zone2_offset & HAMMER_OFF_SHORT_MASK);
1461 #if 0
1462 		/*
1463 		 * XXX disabled - our CRC check doesn't work if the OS
1464 		 * does bogus_page replacement on the direct-read.
1465 		 */
1466 		if (leaf && hammer_verify_data) {
1467 			nbio->bio_done = hammer_io_direct_read_complete;
1468 			nbio->bio_caller_info1.uvalue32 = leaf->data_crc;
1469 		}
1470 #endif
1471 		hammer_stats_disk_read += bp->b_bufsize;
1472 		vn_strategy(volume->devvp, nbio);
1473 	}
1474 	hammer_rel_volume(volume, 0);
1475 done:
1476 	if (error) {
1477 		kprintf("hammer_direct_read: failed @ %016llx\n",
1478 			(long long)zone2_offset);
1479 		bp->b_error = error;
1480 		bp->b_flags |= B_ERROR;
1481 		biodone(bio);
1482 	}
1483 	return(error);
1484 }
1485 
1486 #if 0
1487 /*
1488  * On completion of the BIO this callback must check the data CRC
1489  * and chain to the previous bio.
1490  *
1491  * MPSAFE - since we do not modify and hammer_records we do not need
1492  *	    io_token.
1493  *
1494  * NOTE: MPSAFE callback
1495  */
1496 static
1497 void
1498 hammer_io_direct_read_complete(struct bio *nbio)
1499 {
1500 	struct bio *obio;
1501 	struct buf *bp;
1502 	u_int32_t rec_crc = nbio->bio_caller_info1.uvalue32;
1503 
1504 	bp = nbio->bio_buf;
1505 	if (crc32(bp->b_data, bp->b_bufsize) != rec_crc) {
1506 		kprintf("HAMMER: data_crc error @%016llx/%d\n",
1507 			nbio->bio_offset, bp->b_bufsize);
1508 		if (hammer_debug_critical)
1509 			Debugger("data_crc on read");
1510 		bp->b_flags |= B_ERROR;
1511 		bp->b_error = EIO;
1512 	}
1513 	obio = pop_bio(nbio);
1514 	biodone(obio);
1515 }
1516 #endif
1517 
1518 /*
1519  * Write a buffer associated with a front-end vnode directly to the
1520  * disk media.  The bio may be issued asynchronously.
1521  *
1522  * The BIO is associated with the specified record and RECG_DIRECT_IO
1523  * is set.  The recorded is added to its object.
1524  */
1525 int
1526 hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1527 		       hammer_record_t record)
1528 {
1529 	hammer_btree_leaf_elm_t leaf = &record->leaf;
1530 	hammer_off_t buf_offset;
1531 	hammer_off_t zone2_offset;
1532 	hammer_volume_t volume;
1533 	hammer_buffer_t buffer;
1534 	struct buf *bp;
1535 	struct bio *nbio;
1536 	char *ptr;
1537 	int vol_no;
1538 	int error;
1539 
1540 	buf_offset = leaf->data_offset;
1541 
1542 	KKASSERT(buf_offset > HAMMER_ZONE_BTREE);
1543 	KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1544 
1545 	/*
1546 	 * Issue or execute the I/O.  The new memory record must replace
1547 	 * the old one before the I/O completes, otherwise a reaquisition of
1548 	 * the buffer will load the old media data instead of the new.
1549 	 */
1550 	if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1551 	    leaf->data_len >= HAMMER_BUFSIZE) {
1552 		/*
1553 		 * We are using the vnode's bio to write directly to the
1554 		 * media, any hammer_buffer at the same zone-X offset will
1555 		 * now have stale data.
1556 		 */
1557 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1558 		vol_no = HAMMER_VOL_DECODE(zone2_offset);
1559 		volume = hammer_get_volume(hmp, vol_no, &error);
1560 
1561 		if (error == 0 && zone2_offset >= volume->maxbuf_off)
1562 			error = EIO;
1563 		if (error == 0) {
1564 			bp = bio->bio_buf;
1565 			KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1566 			/*
1567 			hammer_del_buffers(hmp, buf_offset,
1568 					   zone2_offset, bp->b_bufsize);
1569 			*/
1570 
1571 			/*
1572 			 * Second level bio - cached zone2 offset.
1573 			 *
1574 			 * (We can put our bio_done function in either the
1575 			 *  2nd or 3rd level).
1576 			 */
1577 			nbio = push_bio(bio);
1578 			nbio->bio_offset = zone2_offset;
1579 			nbio->bio_done = hammer_io_direct_write_complete;
1580 			nbio->bio_caller_info1.ptr = record;
1581 			record->zone2_offset = zone2_offset;
1582 			record->gflags |= HAMMER_RECG_DIRECT_IO |
1583 					 HAMMER_RECG_DIRECT_INVAL;
1584 
1585 			/*
1586 			 * Third level bio - raw offset specific to the
1587 			 * correct volume.
1588 			 */
1589 			zone2_offset &= HAMMER_OFF_SHORT_MASK;
1590 			nbio = push_bio(nbio);
1591 			nbio->bio_offset = volume->ondisk->vol_buf_beg +
1592 					   zone2_offset;
1593 			hammer_stats_disk_write += bp->b_bufsize;
1594 			hammer_ip_replace_bulk(hmp, record);
1595 			vn_strategy(volume->devvp, nbio);
1596 			hammer_io_flush_mark(volume);
1597 		}
1598 		hammer_rel_volume(volume, 0);
1599 	} else {
1600 		/*
1601 		 * Must fit in a standard HAMMER buffer.  In this case all
1602 		 * consumers use the HAMMER buffer system and RECG_DIRECT_IO
1603 		 * does not need to be set-up.
1604 		 */
1605 		KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1606 		buffer = NULL;
1607 		ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1608 		if (error == 0) {
1609 			bp = bio->bio_buf;
1610 			bp->b_flags |= B_AGE;
1611 			hammer_io_modify(&buffer->io, 1);
1612 			bcopy(bp->b_data, ptr, leaf->data_len);
1613 			hammer_io_modify_done(&buffer->io);
1614 			hammer_rel_buffer(buffer, 0);
1615 			bp->b_resid = 0;
1616 			hammer_ip_replace_bulk(hmp, record);
1617 			biodone(bio);
1618 		}
1619 	}
1620 	if (error) {
1621 		/*
1622 		 * Major suckage occured.  Also note:  The record was
1623 		 * never added to the tree so we do not have to worry
1624 		 * about the backend.
1625 		 */
1626 		kprintf("hammer_direct_write: failed @ %016llx\n",
1627 			(long long)leaf->data_offset);
1628 		bp = bio->bio_buf;
1629 		bp->b_resid = 0;
1630 		bp->b_error = EIO;
1631 		bp->b_flags |= B_ERROR;
1632 		biodone(bio);
1633 		record->flags |= HAMMER_RECF_DELETED_FE;
1634 		hammer_rel_mem_record(record);
1635 	}
1636 	return(error);
1637 }
1638 
1639 /*
1640  * On completion of the BIO this callback must disconnect
1641  * it from the hammer_record and chain to the previous bio.
1642  *
1643  * An I/O error forces the mount to read-only.  Data buffers
1644  * are not B_LOCKED like meta-data buffers are, so we have to
1645  * throw the buffer away to prevent the kernel from retrying.
1646  *
1647  * NOTE: MPSAFE callback, only modify fields we have explicit
1648  *	 access to (the bp and the record->gflags).
1649  */
1650 static
1651 void
1652 hammer_io_direct_write_complete(struct bio *nbio)
1653 {
1654 	struct bio *obio;
1655 	struct buf *bp;
1656 	hammer_record_t record;
1657 	hammer_mount_t hmp;
1658 
1659 	record = nbio->bio_caller_info1.ptr;
1660 	KKASSERT(record != NULL);
1661 	hmp = record->ip->hmp;
1662 
1663 	lwkt_gettoken(&hmp->io_token);
1664 
1665 	bp = nbio->bio_buf;
1666 	obio = pop_bio(nbio);
1667 	if (bp->b_flags & B_ERROR) {
1668 		lwkt_gettoken(&hmp->fs_token);
1669 		hammer_critical_error(hmp, record->ip,
1670 				      bp->b_error,
1671 				      "while writing bulk data");
1672 		lwkt_reltoken(&hmp->fs_token);
1673 		bp->b_flags |= B_INVAL;
1674 	}
1675 	biodone(obio);
1676 
1677 	KKASSERT(record->gflags & HAMMER_RECG_DIRECT_IO);
1678 	if (record->gflags & HAMMER_RECG_DIRECT_WAIT) {
1679 		record->gflags &= ~(HAMMER_RECG_DIRECT_IO |
1680 				    HAMMER_RECG_DIRECT_WAIT);
1681 		/* record can disappear once DIRECT_IO flag is cleared */
1682 		wakeup(&record->flags);
1683 	} else {
1684 		record->gflags &= ~HAMMER_RECG_DIRECT_IO;
1685 		/* record can disappear once DIRECT_IO flag is cleared */
1686 	}
1687 	lwkt_reltoken(&hmp->io_token);
1688 }
1689 
1690 
1691 /*
1692  * This is called before a record is either committed to the B-Tree
1693  * or destroyed, to resolve any associated direct-IO.
1694  *
1695  * (1) We must wait for any direct-IO related to the record to complete.
1696  *
1697  * (2) We must remove any buffer cache aliases for data accessed via
1698  *     leaf->data_offset or zone2_offset so non-direct-IO consumers
1699  *     (the mirroring and reblocking code) do not see stale data.
1700  */
1701 void
1702 hammer_io_direct_wait(hammer_record_t record)
1703 {
1704 	hammer_mount_t hmp = record->ip->hmp;
1705 
1706 	/*
1707 	 * Wait for I/O to complete
1708 	 */
1709 	if (record->gflags & HAMMER_RECG_DIRECT_IO) {
1710 		lwkt_gettoken(&hmp->io_token);
1711 		while (record->gflags & HAMMER_RECG_DIRECT_IO) {
1712 			record->gflags |= HAMMER_RECG_DIRECT_WAIT;
1713 			tsleep(&record->flags, 0, "hmdiow", 0);
1714 		}
1715 		lwkt_reltoken(&hmp->io_token);
1716 	}
1717 
1718 	/*
1719 	 * Invalidate any related buffer cache aliases associated with the
1720 	 * backing device.  This is needed because the buffer cache buffer
1721 	 * for file data is associated with the file vnode, not the backing
1722 	 * device vnode.
1723 	 *
1724 	 * XXX I do not think this case can occur any more now that
1725 	 * reservations ensure that all such buffers are removed before
1726 	 * an area can be reused.
1727 	 */
1728 	if (record->gflags & HAMMER_RECG_DIRECT_INVAL) {
1729 		KKASSERT(record->leaf.data_offset);
1730 		hammer_del_buffers(hmp, record->leaf.data_offset,
1731 				   record->zone2_offset, record->leaf.data_len,
1732 				   1);
1733 		record->gflags &= ~HAMMER_RECG_DIRECT_INVAL;
1734 	}
1735 }
1736 
1737 /*
1738  * This is called to remove the second-level cached zone-2 offset from
1739  * frontend buffer cache buffers, now stale due to a data relocation.
1740  * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1741  * by hammer_vop_strategy_read().
1742  *
1743  * This is rather nasty because here we have something like the reblocker
1744  * scanning the raw B-Tree with no held references on anything, really,
1745  * other then a shared lock on the B-Tree node, and we have to access the
1746  * frontend's buffer cache to check for and clean out the association.
1747  * Specifically, if the reblocker is moving data on the disk, these cached
1748  * offsets will become invalid.
1749  *
1750  * Only data record types associated with the large-data zone are subject
1751  * to direct-io and need to be checked.
1752  *
1753  */
1754 void
1755 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1756 {
1757 	struct hammer_inode_info iinfo;
1758 	int zone;
1759 
1760 	if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1761 		return;
1762 	zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1763 	if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1764 		return;
1765 	iinfo.obj_id = leaf->base.obj_id;
1766 	iinfo.obj_asof = 0;	/* unused */
1767 	iinfo.obj_localization = leaf->base.localization &
1768 				 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1769 	iinfo.u.leaf = leaf;
1770 	hammer_scan_inode_snapshots(hmp, &iinfo,
1771 				    hammer_io_direct_uncache_callback,
1772 				    leaf);
1773 }
1774 
1775 static int
1776 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1777 {
1778 	hammer_inode_info_t iinfo = data;
1779 	hammer_off_t data_offset;
1780 	hammer_off_t file_offset;
1781 	struct vnode *vp;
1782 	struct buf *bp;
1783 	int blksize;
1784 
1785 	if (ip->vp == NULL)
1786 		return(0);
1787 	data_offset = iinfo->u.leaf->data_offset;
1788 	file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1789 	blksize = iinfo->u.leaf->data_len;
1790 	KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1791 
1792 	/*
1793 	 * Warning: FINDBLK_TEST return stable storage but not stable
1794 	 *	    contents.  It happens to be ok in this case.
1795 	 */
1796 	hammer_ref(&ip->lock);
1797 	if (hammer_get_vnode(ip, &vp) == 0) {
1798 		if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
1799 		    bp->b_bio2.bio_offset != NOOFFSET) {
1800 			bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1801 			bp->b_bio2.bio_offset = NOOFFSET;
1802 			brelse(bp);
1803 		}
1804 		vput(vp);
1805 	}
1806 	hammer_rel_inode(ip, 0);
1807 	return(0);
1808 }
1809 
1810 
1811 /*
1812  * This function is called when writes may have occured on the volume,
1813  * indicating that the device may be holding cached writes.
1814  */
1815 static void
1816 hammer_io_flush_mark(hammer_volume_t volume)
1817 {
1818 	atomic_set_int(&volume->vol_flags, HAMMER_VOLF_NEEDFLUSH);
1819 }
1820 
1821 /*
1822  * This function ensures that the device has flushed any cached writes out.
1823  */
1824 void
1825 hammer_io_flush_sync(hammer_mount_t hmp)
1826 {
1827 	hammer_volume_t volume;
1828 	struct buf *bp_base = NULL;
1829 	struct buf *bp;
1830 
1831 	RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1832 		if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1833 			atomic_clear_int(&volume->vol_flags,
1834 					 HAMMER_VOLF_NEEDFLUSH);
1835 			bp = getpbuf(NULL);
1836 			bp->b_bio1.bio_offset = 0;
1837 			bp->b_bufsize = 0;
1838 			bp->b_bcount = 0;
1839 			bp->b_cmd = BUF_CMD_FLUSH;
1840 			bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
1841 			bp->b_bio1.bio_done = biodone_sync;
1842 			bp->b_bio1.bio_flags |= BIO_SYNC;
1843 			bp_base = bp;
1844 			vn_strategy(volume->devvp, &bp->b_bio1);
1845 		}
1846 	}
1847 	while ((bp = bp_base) != NULL) {
1848 		bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
1849 		biowait(&bp->b_bio1, "hmrFLS");
1850 		relpbuf(bp, NULL);
1851 	}
1852 }
1853 
1854 /*
1855  * Limit the amount of backlog which we allow to build up
1856  */
1857 void
1858 hammer_io_limit_backlog(hammer_mount_t hmp)
1859 {
1860         while (hmp->io_running_space > hammer_limit_running_io) {
1861                 hmp->io_running_wakeup = 1;
1862                 tsleep(&hmp->io_running_wakeup, 0, "hmiolm", hz / 10);
1863         }
1864 }
1865