xref: /dragonfly/sys/vfs/hammer/hammer_io.c (revision 374a548a)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * IO Primitives and buffer cache management
36  *
37  * All major data-tracking structures in HAMMER contain a struct hammer_io
38  * which is used to manage their backing store.  We use filesystem buffers
39  * for backing store and we leave them passively associated with their
40  * HAMMER structures.
41  *
42  * If the kernel tries to destroy a passively associated buf which we cannot
43  * yet let go we set B_LOCKED in the buffer and then actively released it
44  * later when we can.
45  *
46  * The io_token is required for anything which might race bioops and bio_done
47  * callbacks, with one exception: A successful hammer_try_interlock_norefs().
48  * the fs_token will be held in all other cases.
49  */
50 
51 #include <sys/buf2.h>
52 
53 #include "hammer.h"
54 
55 static void hammer_io_modify(hammer_io_t io, int count);
56 static void hammer_io_deallocate(struct buf *bp);
57 static void hammer_indirect_callback(struct bio *bio);
58 static void hammer_io_direct_write_complete(struct bio *nbio);
59 static int hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data);
60 static void hammer_io_set_modlist(hammer_io_t io);
61 static __inline void hammer_io_flush_mark(hammer_volume_t volume);
62 static struct bio_ops hammer_bioops;
63 
64 static int
hammer_mod_rb_compare(hammer_io_t io1,hammer_io_t io2)65 hammer_mod_rb_compare(hammer_io_t io1, hammer_io_t io2)
66 {
67 	hammer_off_t io1_offset;
68 	hammer_off_t io2_offset;
69 
70 	/*
71 	 * Encoded offsets are neither valid block device offsets
72 	 * nor valid zone-X offsets.
73 	 */
74 	io1_offset = HAMMER_ENCODE(0, io1->volume->vol_no, io1->offset);
75 	io2_offset = HAMMER_ENCODE(0, io2->volume->vol_no, io2->offset);
76 
77 	if (io1_offset < io2_offset)
78 		return(-1);
79 	if (io1_offset > io2_offset)
80 		return(1);
81 	return(0);
82 }
83 
84 RB_GENERATE(hammer_mod_rb_tree, hammer_io, rb_node, hammer_mod_rb_compare);
85 
86 /*
87  * Initialize a new, already-zero'd hammer_io structure, or reinitialize
88  * an existing hammer_io structure which may have switched to another type.
89  */
90 void
hammer_io_init(hammer_io_t io,hammer_volume_t volume,hammer_io_type_t type)91 hammer_io_init(hammer_io_t io, hammer_volume_t volume, hammer_io_type_t type)
92 {
93 	io->volume = volume;
94 	io->hmp = volume->io.hmp;
95 	io->type = type;
96 }
97 
98 hammer_io_type_t
hammer_zone_to_iotype(int zone)99 hammer_zone_to_iotype(int zone)
100 {
101 	hammer_io_type_t iotype;
102 
103 	switch(zone) {
104 	case HAMMER_ZONE_RAW_VOLUME_INDEX:
105 		iotype = HAMMER_IOTYPE_VOLUME;
106 		break;
107 	case HAMMER_ZONE_RAW_BUFFER_INDEX:
108 	case HAMMER_ZONE_FREEMAP_INDEX:
109 	case HAMMER_ZONE_BTREE_INDEX:
110 	case HAMMER_ZONE_META_INDEX:
111 		iotype = HAMMER_IOTYPE_META_BUFFER;
112 		break;
113 	case HAMMER_ZONE_UNDO_INDEX:
114 		iotype = HAMMER_IOTYPE_UNDO_BUFFER;
115 		break;
116 	case HAMMER_ZONE_LARGE_DATA_INDEX:
117 	case HAMMER_ZONE_SMALL_DATA_INDEX:
118 		iotype = HAMMER_IOTYPE_DATA_BUFFER;
119 		break;
120 	default:
121 		iotype = HAMMER_IOTYPE_DUMMY;
122 		break;
123 	}
124 
125 	return(iotype);
126 }
127 
128 static const char*
hammer_io_to_iostring(hammer_io_t io)129 hammer_io_to_iostring(hammer_io_t io)
130 {
131 	const char *iostr = NULL;
132 
133 	switch(io->type) {
134 	case HAMMER_IOTYPE_VOLUME:
135 		iostr = "volume";
136 		break;
137 	case HAMMER_IOTYPE_META_BUFFER:
138 		switch(HAMMER_ZONE(HAMMER_ITOB(io)->zoneX_offset)) {
139 		case HAMMER_ZONE_RAW_BUFFER:
140 			iostr = "meta/raw_buffer";
141 			break;
142 		case HAMMER_ZONE_FREEMAP:
143 			iostr = "meta/freemap";
144 			break;
145 		case HAMMER_ZONE_BTREE:
146 			iostr = "meta/btree";
147 			break;
148 		case HAMMER_ZONE_META:
149 			iostr = "meta/meta";
150 			break;
151 		}
152 		break;
153 	case HAMMER_IOTYPE_UNDO_BUFFER:
154 		iostr = "undo";
155 		break;
156 	case HAMMER_IOTYPE_DATA_BUFFER:
157 		switch(HAMMER_ZONE(HAMMER_ITOB(io)->zoneX_offset)) {
158 		case HAMMER_ZONE_LARGE_DATA:
159 			iostr = "data/large_data";
160 			break;
161 		case HAMMER_ZONE_SMALL_DATA:
162 			iostr = "data/small_data";
163 			break;
164 		}
165 		break;
166 	case HAMMER_IOTYPE_DUMMY:
167 		iostr = "dummy";
168 		break;
169 	default:
170 		hpanic("bad io type");
171 		break;
172 	}
173 
174 	return(iostr);
175 }
176 
177 /*
178  * Helper routine to disassociate a buffer cache buffer from an I/O
179  * structure.  The io must be interlocked and marked appropriately for
180  * reclamation.
181  *
182  * The io must be in a released state with the io->bp owned and
183  * locked by the caller of this function.  When not called from an
184  * io_deallocate() this cannot race an io_deallocate() since the
185  * kernel would be unable to get the buffer lock in that case.
186  * (The released state in this case means we own the bp, not the
187  * hammer_io structure).
188  *
189  * The io may have 0 or 1 references depending on who called us.  The
190  * caller is responsible for dealing with the refs.
191  *
192  * This call can only be made when no action is required on the buffer.
193  *
194  * This function is guaranteed not to race against anything because we
195  * own both the io lock and the bp lock and are interlocked with no
196  * references.
197  */
198 static void
hammer_io_disassociate(hammer_io_t io)199 hammer_io_disassociate(hammer_io_t io)
200 {
201 	struct buf *bp = io->bp;
202 
203 	KKASSERT(io->released);
204 	KKASSERT(io->modified == 0);
205 	KKASSERT(hammer_buf_peek_io(bp) == io);
206 	buf_dep_init(bp);
207 	io->bp = NULL;
208 
209 	/*
210 	 * If the buffer was locked someone wanted to get rid of it.
211 	 */
212 	if (bp->b_flags & B_LOCKED) {
213 		atomic_add_int(&hammer_count_io_locked, -1);
214 		bp->b_flags &= ~B_LOCKED;
215 	}
216 	if (io->reclaim) {
217 		bp->b_flags |= B_NOCACHE|B_RELBUF;
218 		io->reclaim = 0;
219 	}
220 
221 	switch(io->type) {
222 	case HAMMER_IOTYPE_VOLUME:
223 		HAMMER_ITOV(io)->ondisk = NULL;
224 		break;
225 	case HAMMER_IOTYPE_DATA_BUFFER:
226 	case HAMMER_IOTYPE_META_BUFFER:
227 	case HAMMER_IOTYPE_UNDO_BUFFER:
228 		HAMMER_ITOB(io)->ondisk = NULL;
229 		break;
230 	case HAMMER_IOTYPE_DUMMY:
231 		hpanic("bad io type");
232 		break;
233 	}
234 }
235 
236 /*
237  * Wait for any physical IO to complete
238  *
239  * XXX we aren't interlocked against a spinlock or anything so there
240  *     is a small window in the interlock / io->running == 0 test.
241  */
242 void
hammer_io_wait(hammer_io_t io)243 hammer_io_wait(hammer_io_t io)
244 {
245 	if (io->running) {
246 		hammer_mount_t hmp = io->hmp;
247 
248 		lwkt_gettoken(&hmp->io_token);
249 		while (io->running) {
250 			io->waiting = 1;
251 			tsleep_interlock(io, 0);
252 			if (io->running)
253 				tsleep(io, PINTERLOCKED, "hmrflw", hz);
254 		}
255 		lwkt_reltoken(&hmp->io_token);
256 	}
257 }
258 
259 /*
260  * Wait for all currently queued HAMMER-initiated I/Os to complete.
261  *
262  * This is not supposed to count direct I/O's but some can leak
263  * through (for non-full-sized direct I/Os).
264  */
265 void
hammer_io_wait_all(hammer_mount_t hmp,const char * ident,int doflush)266 hammer_io_wait_all(hammer_mount_t hmp, const char *ident, int doflush)
267 {
268 	struct hammer_io iodummy;
269 	hammer_io_t io;
270 
271 	/*
272 	 * Degenerate case, no I/O is running
273 	 */
274 	lwkt_gettoken(&hmp->io_token);
275 	if (TAILQ_EMPTY(&hmp->iorun_list)) {
276 		lwkt_reltoken(&hmp->io_token);
277 		if (doflush)
278 			hammer_io_flush_sync(hmp);
279 		return;
280 	}
281 	bzero(&iodummy, sizeof(iodummy));
282 	iodummy.type = HAMMER_IOTYPE_DUMMY;
283 
284 	/*
285 	 * Add placemarker and then wait until it becomes the head of
286 	 * the list.
287 	 */
288 	TAILQ_INSERT_TAIL(&hmp->iorun_list, &iodummy, iorun_entry);
289 	while (TAILQ_FIRST(&hmp->iorun_list) != &iodummy) {
290 		tsleep(&iodummy, 0, ident, 0);
291 	}
292 
293 	/*
294 	 * Chain in case several placemarkers are present.
295 	 */
296 	TAILQ_REMOVE(&hmp->iorun_list, &iodummy, iorun_entry);
297 	io = TAILQ_FIRST(&hmp->iorun_list);
298 	if (io && io->type == HAMMER_IOTYPE_DUMMY)
299 		wakeup(io);
300 	lwkt_reltoken(&hmp->io_token);
301 
302 	if (doflush)
303 		hammer_io_flush_sync(hmp);
304 }
305 
306 /*
307  * Clear a flagged error condition on a I/O buffer.  The caller must hold
308  * its own ref on the buffer.
309  */
310 void
hammer_io_clear_error(hammer_io_t io)311 hammer_io_clear_error(hammer_io_t io)
312 {
313 	hammer_mount_t hmp = io->hmp;
314 
315 	lwkt_gettoken(&hmp->io_token);
316 	if (io->ioerror) {
317 		io->ioerror = 0;
318 		hammer_rel(&io->lock);
319 		KKASSERT(hammer_isactive(&io->lock));
320 	}
321 	lwkt_reltoken(&hmp->io_token);
322 }
323 
324 void
hammer_io_clear_error_noassert(hammer_io_t io)325 hammer_io_clear_error_noassert(hammer_io_t io)
326 {
327 	hammer_mount_t hmp = io->hmp;
328 
329 	lwkt_gettoken(&hmp->io_token);
330 	if (io->ioerror) {
331 		io->ioerror = 0;
332 		hammer_rel(&io->lock);
333 	}
334 	lwkt_reltoken(&hmp->io_token);
335 }
336 
337 /*
338  * This is an advisory function only which tells the buffer cache
339  * the bp is not a meta-data buffer, even though it is backed by
340  * a block device.
341  *
342  * This is used by HAMMER's reblocking code to avoid trying to
343  * swapcache the filesystem's data when it is read or written
344  * by the reblocking code.
345  *
346  * The caller has a ref on the buffer preventing the bp from
347  * being disassociated from it.
348  */
349 void
hammer_io_notmeta(hammer_buffer_t buffer)350 hammer_io_notmeta(hammer_buffer_t buffer)
351 {
352 	if ((buffer->io.bp->b_flags & B_NOTMETA) == 0) {
353 		hammer_mount_t hmp = buffer->io.hmp;
354 
355 		lwkt_gettoken(&hmp->io_token);
356 		buffer->io.bp->b_flags |= B_NOTMETA;
357 		lwkt_reltoken(&hmp->io_token);
358 	}
359 }
360 
361 /*
362  * Load bp for a HAMMER structure.  The io must be exclusively locked by
363  * the caller.
364  *
365  * This routine is mostly used on meta-data and small-data blocks.  Generally
366  * speaking HAMMER assumes some locality of reference and will cluster.
367  *
368  * Note that the caller (hammer_ondisk.c) may place further restrictions
369  * on clusterability via the limit (in bytes).  Typically large-data
370  * zones cannot be clustered due to their mixed buffer sizes.  This is
371  * not an issue since such clustering occurs in hammer_vnops at the
372  * regular file layer, whereas this is the buffered block device layer.
373  *
374  * No I/O callbacks can occur while we hold the buffer locked.
375  */
376 int
hammer_io_read(struct vnode * devvp,hammer_io_t io,int limit)377 hammer_io_read(struct vnode *devvp, hammer_io_t io, int limit)
378 {
379 	struct buf *bp;
380 	int   error;
381 
382 	if ((bp = io->bp) == NULL) {
383 		int hce = hammer_cluster_enable;
384 
385 		atomic_add_long(&hammer_count_io_running_read, io->bytes);
386 		if (hce && limit > io->bytes) {
387 			error = cluster_read(devvp, io->offset + limit,
388 					     io->offset, io->bytes,
389 					     HAMMER_CLUSTER_SIZE,
390 					     HAMMER_CLUSTER_SIZE * hce,
391 					     &io->bp);
392 		} else {
393 			error = bread(devvp, io->offset, io->bytes, &io->bp);
394 		}
395 		hammer_stats_disk_read += io->bytes;
396 		atomic_add_long(&hammer_count_io_running_read, -io->bytes);
397 
398 		/*
399 		 * The code generally assumes b_ops/b_dep has been set-up,
400 		 * even if we error out here.
401 		 */
402 		bp = io->bp;
403 		if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IOISSUED)) {
404 			hdkprintf("zone2_offset %016jx %s\n",
405 				(intmax_t)bp->b_bio2.bio_offset,
406 				hammer_io_to_iostring(io));
407 		}
408 		bp->b_flags &= ~B_IOISSUED;
409 		bp->b_ops = &hammer_bioops;
410 
411 		hammer_buf_attach_io(bp, io); /* locked by the io lock */
412 		BUF_KERNPROC(bp);
413 		KKASSERT(io->modified == 0);
414 		KKASSERT(io->running == 0);
415 		KKASSERT(io->waiting == 0);
416 		io->released = 0;	/* we hold an active lock on bp */
417 	} else {
418 		error = 0;
419 	}
420 	return(error);
421 }
422 
423 /*
424  * Similar to hammer_io_read() but returns a zero'd out buffer instead.
425  * Must be called with the IO exclusively locked.
426  *
427  * vfs_bio_clrbuf() is kinda nasty, enforce serialization against background
428  * I/O by forcing the buffer to not be in a released state before calling
429  * it.
430  *
431  * This function will also mark the IO as modified but it will not
432  * increment the modify_refs count.
433  *
434  * No I/O callbacks can occur while we hold the buffer locked.
435  */
436 int
hammer_io_new(struct vnode * devvp,hammer_io_t io)437 hammer_io_new(struct vnode *devvp, hammer_io_t io)
438 {
439 	struct buf *bp;
440 
441 	if ((bp = io->bp) == NULL) {
442 		io->bp = getblk(devvp, io->offset, io->bytes, 0, 0);
443 		bp = io->bp;
444 		bp->b_ops = &hammer_bioops;
445 
446 		hammer_buf_attach_io(bp, io); /* locked by the io lock */
447 		io->released = 0;
448 		KKASSERT(io->running == 0);
449 		io->waiting = 0;
450 		BUF_KERNPROC(bp);
451 	} else {
452 		if (io->released) {
453 			regetblk(bp);
454 			BUF_KERNPROC(bp);
455 			io->released = 0;
456 		}
457 	}
458 	hammer_io_modify(io, 0);
459 	vfs_bio_clrbuf(bp);
460 	return(0);
461 }
462 
463 /*
464  * Advance the activity count on the underlying buffer because
465  * HAMMER does not getblk/brelse on every access.
466  *
467  * The io->bp cannot go away while the buffer is referenced.
468  */
469 void
hammer_io_advance(hammer_io_t io)470 hammer_io_advance(hammer_io_t io)
471 {
472 	if (io->bp)
473 		buf_act_advance(io->bp);
474 }
475 
476 /*
477  * Remove potential device level aliases against buffers managed by high level
478  * vnodes.  Aliases can also be created due to mixed buffer sizes or via
479  * direct access to the backing store device.
480  *
481  * This is nasty because the buffers are also VMIO-backed.  Even if a buffer
482  * does not exist its backing VM pages might, and we have to invalidate
483  * those as well or a getblk() will reinstate them.
484  *
485  * Buffer cache buffers associated with hammer_buffers cannot be
486  * invalidated.
487  */
488 int
hammer_io_inval(hammer_volume_t volume,hammer_off_t zone2_offset)489 hammer_io_inval(hammer_volume_t volume, hammer_off_t zone2_offset)
490 {
491 	hammer_io_t io;
492 	hammer_mount_t hmp;
493 	hammer_off_t phys_offset;
494 	struct buf *bp;
495 	int error;
496 
497 	hmp = volume->io.hmp;
498 	lwkt_gettoken(&hmp->io_token);
499 
500 	/*
501 	 * If a device buffer already exists for the specified physical
502 	 * offset use that, otherwise instantiate a buffer to cover any
503 	 * related VM pages, set BNOCACHE, and brelse().
504 	 */
505 	phys_offset = hammer_xlate_to_phys(volume->ondisk, zone2_offset);
506 	if ((bp = findblk(volume->devvp, phys_offset, 0)) != NULL)
507 		bremfree(bp);
508 	else
509 		bp = getblk(volume->devvp, phys_offset, HAMMER_BUFSIZE, 0, 0);
510 
511 	if ((io = hammer_buf_peek_io(bp)) != NULL) {
512 #if 0
513 		hammer_ref(&io->lock);
514 		hammer_io_clear_modify(io, 1);
515 		bundirty(bp);
516 		io->released = 0;
517 		BUF_KERNPROC(bp);
518 		io->reclaim = 1;
519 		io->waitdep = 1;	/* XXX this is a fs_token field */
520 		KKASSERT(hammer_isactive(&io->lock) == 1);
521 		hammer_rel_buffer(HAMMER_ITOB(io), 0);
522 		/*hammer_io_deallocate(bp);*/
523 #endif
524 		bqrelse(bp);
525 		error = EAGAIN;
526 	} else {
527 		KKASSERT((bp->b_flags & B_LOCKED) == 0);
528 		bundirty(bp);
529 		bp->b_flags |= B_NOCACHE|B_RELBUF;
530 		brelse(bp);
531 		error = 0;
532 	}
533 	lwkt_reltoken(&hmp->io_token);
534 	return(error);
535 }
536 
537 /*
538  * This routine is called on the last reference to a hammer structure.
539  * The io must be interlocked with a refcount of zero.  The hammer structure
540  * will remain interlocked on return.
541  *
542  * This routine may return a non-NULL bp to the caller for dispoal.
543  * The caller typically brelse()'s the bp.
544  *
545  * The bp may or may not still be passively associated with the IO.  It
546  * will remain passively associated if it is unreleasable (e.g. a modified
547  * meta-data buffer).
548  *
549  * The only requirement here is that modified meta-data and volume-header
550  * buffer may NOT be disassociated from the IO structure, and consequently
551  * we also leave such buffers actively associated with the IO if they already
552  * are (since the kernel can't do anything with them anyway).  Only the
553  * flusher is allowed to write such buffers out.  Modified pure-data and
554  * undo buffers are returned to the kernel but left passively associated
555  * so we can track when the kernel writes the bp out.
556  */
557 struct buf *
hammer_io_release(hammer_io_t io,int flush)558 hammer_io_release(hammer_io_t io, int flush)
559 {
560 	struct buf *bp;
561 
562 	if ((bp = io->bp) == NULL)
563 		return(NULL);
564 
565 	/*
566 	 * Try to flush a dirty IO to disk if asked to by the
567 	 * caller or if the kernel tried to flush the buffer in the past.
568 	 *
569 	 * Kernel-initiated flushes are only allowed for pure-data buffers.
570 	 * meta-data and volume buffers can only be flushed explicitly
571 	 * by HAMMER.
572 	 */
573 	if (io->modified) {
574 		if (flush) {
575 			hammer_io_flush(io, 0);
576 		} else if (bp->b_flags & B_LOCKED) {
577 			switch(io->type) {
578 			case HAMMER_IOTYPE_DATA_BUFFER:
579 				hammer_io_flush(io, 0);
580 				break;
581 			case HAMMER_IOTYPE_UNDO_BUFFER:
582 				hammer_io_flush(io, hammer_undo_reclaim(io));
583 				break;
584 			default:
585 				break;
586 			}
587 		} /* else no explicit request to flush the buffer */
588 	}
589 
590 	/*
591 	 * Wait for the IO to complete if asked to.  This occurs when
592 	 * the buffer must be disposed of definitively during an umount
593 	 * or buffer invalidation.
594 	 */
595 	if (io->waitdep && io->running) {
596 		hammer_io_wait(io);
597 	}
598 
599 	/*
600 	 * Return control of the buffer to the kernel (with the provisio
601 	 * that our bioops can override kernel decisions with regards to
602 	 * the buffer).
603 	 */
604 	if ((flush || io->reclaim) && io->modified == 0 && io->running == 0) {
605 		/*
606 		 * Always disassociate the bp if an explicit flush
607 		 * was requested and the IO completed with no error
608 		 * (so unmount can really clean up the structure).
609 		 */
610 		if (io->released) {
611 			regetblk(bp);
612 			BUF_KERNPROC(bp);
613 		} else {
614 			io->released = 1;
615 		}
616 		hammer_io_disassociate(io);
617 		/* return the bp */
618 	} else if (io->modified) {
619 		/*
620 		 * Only certain IO types can be released to the kernel if
621 		 * the buffer has been modified.
622 		 *
623 		 * volume and meta-data IO types may only be explicitly
624 		 * flushed by HAMMER.
625 		 */
626 		switch(io->type) {
627 		case HAMMER_IOTYPE_DATA_BUFFER:
628 		case HAMMER_IOTYPE_UNDO_BUFFER:
629 			if (io->released == 0) {
630 				io->released = 1;
631 				bp->b_flags |= B_CLUSTEROK;
632 				bdwrite(bp);
633 			}
634 			break;
635 		default:
636 			break;
637 		}
638 		bp = NULL;	/* bp left associated */
639 	} else if (io->released == 0) {
640 		/*
641 		 * Clean buffers can be generally released to the kernel.
642 		 * We leave the bp passively associated with the HAMMER
643 		 * structure and use bioops to disconnect it later on
644 		 * if the kernel wants to discard the buffer.
645 		 *
646 		 * We can steal the structure's ownership of the bp.
647 		 */
648 		io->released = 1;
649 		if (bp->b_flags & B_LOCKED) {
650 			hammer_io_disassociate(io);
651 			/* return the bp */
652 		} else {
653 			if (io->reclaim) {
654 				hammer_io_disassociate(io);
655 				/* return the bp */
656 			} else {
657 				/* return the bp (bp passively associated) */
658 			}
659 		}
660 	} else {
661 		/*
662 		 * A released buffer is passively associate with our
663 		 * hammer_io structure.  The kernel cannot destroy it
664 		 * without making a bioops call.  If the kernel (B_LOCKED)
665 		 * or we (reclaim) requested that the buffer be destroyed
666 		 * we destroy it, otherwise we do a quick get/release to
667 		 * reset its position in the kernel's LRU list.
668 		 *
669 		 * Leaving the buffer passively associated allows us to
670 		 * use the kernel's LRU buffer flushing mechanisms rather
671 		 * then rolling our own.
672 		 *
673 		 * XXX there are two ways of doing this.  We can re-acquire
674 		 * and passively release to reset the LRU, or not.
675 		 */
676 		if (io->running == 0) {
677 			regetblk(bp);
678 			if ((bp->b_flags & B_LOCKED) || io->reclaim) {
679 				hammer_io_disassociate(io);
680 				/* return the bp */
681 			} else {
682 				/* return the bp (bp passively associated) */
683 			}
684 		} else {
685 			/*
686 			 * bp is left passively associated but we do not
687 			 * try to reacquire it.  Interactions with the io
688 			 * structure will occur on completion of the bp's
689 			 * I/O.
690 			 */
691 			bp = NULL;
692 		}
693 	}
694 	return(bp);
695 }
696 
697 /*
698  * This routine is called with a locked IO when a flush is desired and
699  * no other references to the structure exists other then ours.  This
700  * routine is ONLY called when HAMMER believes it is safe to flush a
701  * potentially modified buffer out.
702  *
703  * The locked io or io reference prevents a flush from being initiated
704  * by the kernel.
705  */
706 void
hammer_io_flush(hammer_io_t io,int reclaim)707 hammer_io_flush(hammer_io_t io, int reclaim)
708 {
709 	struct buf *bp;
710 	hammer_mount_t hmp;
711 
712 	/*
713 	 * Degenerate case - nothing to flush if nothing is dirty.
714 	 */
715 	if (io->modified == 0)
716 		return;
717 
718 	KKASSERT(io->bp);
719 	KKASSERT(io->modify_refs <= 0);
720 
721 	/*
722 	 * Acquire ownership of the bp, particularly before we clear our
723 	 * modified flag.
724 	 *
725 	 * We are going to bawrite() this bp.  Don't leave a window where
726 	 * io->released is set, we actually own the bp rather then our
727 	 * buffer.
728 	 *
729 	 * The io_token should not be required here as only
730 	 */
731 	hmp = io->hmp;
732 	bp = io->bp;
733 	if (io->released) {
734 		regetblk(bp);
735 		/* BUF_KERNPROC(io->bp); */
736 		/* io->released = 0; */
737 		KKASSERT(io->released);
738 		KKASSERT(io->bp == bp);
739 	} else {
740 		io->released = 1;
741 	}
742 
743 	if (reclaim) {
744 		io->reclaim = 1;
745 		if ((bp->b_flags & B_LOCKED) == 0) {
746 			bp->b_flags |= B_LOCKED;
747 			atomic_add_int(&hammer_count_io_locked, 1);
748 		}
749 	}
750 
751 	/*
752 	 * Acquire exclusive access to the bp and then clear the modified
753 	 * state of the buffer prior to issuing I/O to interlock any
754 	 * modifications made while the I/O is in progress.  This shouldn't
755 	 * happen anyway but losing data would be worse.  The modified bit
756 	 * will be rechecked after the IO completes.
757 	 *
758 	 * NOTE: This call also finalizes the buffer's content (inval == 0).
759 	 *
760 	 * This is only legal when lock.refs == 1 (otherwise we might clear
761 	 * the modified bit while there are still users of the cluster
762 	 * modifying the data).
763 	 *
764 	 * Do this before potentially blocking so any attempt to modify the
765 	 * ondisk while we are blocked blocks waiting for us.
766 	 */
767 	hammer_ref(&io->lock);
768 	hammer_io_clear_modify(io, 0);
769 	hammer_rel(&io->lock);
770 
771 	if (hammer_debug_io & 0x0002)
772 		hdkprintf("%016jx\n", bp->b_bio1.bio_offset);
773 
774 	/*
775 	 * Transfer ownership to the kernel and initiate I/O.
776 	 *
777 	 * NOTE: We do not hold io_token so an atomic op is required to
778 	 *	 update io_running_space.
779 	 */
780 	io->running = 1;
781 	atomic_add_long(&hmp->io_running_space, io->bytes);
782 	atomic_add_long(&hammer_count_io_running_write, io->bytes);
783 	lwkt_gettoken(&hmp->io_token);
784 	TAILQ_INSERT_TAIL(&hmp->iorun_list, io, iorun_entry);
785 	lwkt_reltoken(&hmp->io_token);
786 	cluster_awrite(bp);
787 	hammer_io_flush_mark(io->volume);
788 }
789 
790 /************************************************************************
791  *				BUFFER DIRTYING				*
792  ************************************************************************
793  *
794  * These routines deal with dependancies created when IO buffers get
795  * modified.  The caller must call hammer_modify_*() on a referenced
796  * HAMMER structure prior to modifying its on-disk data.
797  *
798  * Any intent to modify an IO buffer acquires the related bp and imposes
799  * various write ordering dependancies.
800  */
801 
802 /*
803  * Mark a HAMMER structure as undergoing modification.  Meta-data buffers
804  * are locked until the flusher can deal with them, pure data buffers
805  * can be written out.
806  *
807  * The referenced io prevents races.
808  */
809 static
810 void
hammer_io_modify(hammer_io_t io,int count)811 hammer_io_modify(hammer_io_t io, int count)
812 {
813 	/*
814 	 * io->modify_refs must be >= 0
815 	 */
816 	while (io->modify_refs < 0) {
817 		io->waitmod = 1;
818 		tsleep(io, 0, "hmrmod", 0);
819 	}
820 
821 	/*
822 	 * Shortcut if nothing to do.
823 	 */
824 	KKASSERT(hammer_isactive(&io->lock) && io->bp != NULL);
825 	io->modify_refs += count;
826 	if (io->modified && io->released == 0)
827 		return;
828 
829 	/*
830 	 * NOTE: It is important not to set the modified bit
831 	 *	 until after we have acquired the bp or we risk
832 	 *	 racing against checkwrite.
833 	 */
834 	hammer_lock_ex(&io->lock);
835 	if (io->released) {
836 		regetblk(io->bp);
837 		BUF_KERNPROC(io->bp);
838 		io->released = 0;
839 	}
840 	if (io->modified == 0) {
841 		hammer_io_set_modlist(io);
842 		io->modified = 1;
843 	}
844 	hammer_unlock(&io->lock);
845 }
846 
847 static __inline
848 void
hammer_io_modify_done(hammer_io_t io)849 hammer_io_modify_done(hammer_io_t io)
850 {
851 	KKASSERT(io->modify_refs > 0);
852 	--io->modify_refs;
853 	if (io->modify_refs == 0 && io->waitmod) {
854 		io->waitmod = 0;
855 		wakeup(io);
856 	}
857 }
858 
859 /*
860  * The write interlock blocks other threads trying to modify a buffer
861  * (they block in hammer_io_modify()) after us, or blocks us while other
862  * threads are in the middle of modifying a buffer.
863  *
864  * The caller also has a ref on the io, however if we are not careful
865  * we will race bioops callbacks (checkwrite).  To deal with this
866  * we must at least acquire and release the io_token, and it is probably
867  * better to hold it through the setting of modify_refs.
868  */
869 void
hammer_io_write_interlock(hammer_io_t io)870 hammer_io_write_interlock(hammer_io_t io)
871 {
872 	hammer_mount_t hmp = io->hmp;
873 
874 	lwkt_gettoken(&hmp->io_token);
875 	while (io->modify_refs != 0) {
876 		io->waitmod = 1;
877 		tsleep(io, 0, "hmrmod", 0);
878 	}
879 	io->modify_refs = -1;
880 	lwkt_reltoken(&hmp->io_token);
881 }
882 
883 void
hammer_io_done_interlock(hammer_io_t io)884 hammer_io_done_interlock(hammer_io_t io)
885 {
886 	KKASSERT(io->modify_refs == -1);
887 	io->modify_refs = 0;
888 	if (io->waitmod) {
889 		io->waitmod = 0;
890 		wakeup(io);
891 	}
892 }
893 
894 /*
895  * Caller intends to modify a volume's ondisk structure.
896  *
897  * This is only allowed if we are the flusher or we have a ref on the
898  * sync_lock.
899  */
900 void
hammer_modify_volume(hammer_transaction_t trans,hammer_volume_t volume,void * base,int len)901 hammer_modify_volume(hammer_transaction_t trans, hammer_volume_t volume,
902 		     void *base, int len)
903 {
904 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
905 
906 	hammer_io_modify(&volume->io, 1);
907 	if (len) {
908 		intptr_t rel_offset = (intptr_t)base - (intptr_t)volume->ondisk;
909 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
910 		hammer_generate_undo(trans,
911 			 HAMMER_ENCODE_RAW_VOLUME(volume->vol_no, rel_offset),
912 			 base, len);
913 	}
914 }
915 
916 /*
917  * Caller intends to modify a buffer's ondisk structure.
918  *
919  * This is only allowed if we are the flusher or we have a ref on the
920  * sync_lock.
921  */
922 void
hammer_modify_buffer(hammer_transaction_t trans,hammer_buffer_t buffer,void * base,int len)923 hammer_modify_buffer(hammer_transaction_t trans, hammer_buffer_t buffer,
924 		     void *base, int len)
925 {
926 	KKASSERT (trans == NULL || trans->sync_lock_refs > 0);
927 
928 	hammer_io_modify(&buffer->io, 1);
929 	if (len) {
930 		intptr_t rel_offset = (intptr_t)base - (intptr_t)buffer->ondisk;
931 		KKASSERT((rel_offset & ~(intptr_t)HAMMER_BUFMASK) == 0);
932 		hammer_generate_undo(trans,
933 				     buffer->zone2_offset + rel_offset,
934 				     base, len);
935 	}
936 }
937 
938 void
hammer_modify_volume_done(hammer_volume_t volume)939 hammer_modify_volume_done(hammer_volume_t volume)
940 {
941 	hammer_io_modify_done(&volume->io);
942 }
943 
944 void
hammer_modify_buffer_done(hammer_buffer_t buffer)945 hammer_modify_buffer_done(hammer_buffer_t buffer)
946 {
947 	hammer_io_modify_done(&buffer->io);
948 }
949 
950 /*
951  * Mark an entity as not being dirty any more and finalize any
952  * delayed adjustments to the buffer.
953  *
954  * Delayed adjustments are an important performance enhancement, allowing
955  * us to avoid recalculating B-Tree node CRCs over and over again when
956  * making bulk-modifications to the B-Tree.
957  *
958  * If inval is non-zero delayed adjustments are ignored.
959  *
960  * This routine may dereference related btree nodes and cause the
961  * buffer to be dereferenced.  The caller must own a reference on io.
962  */
963 void
hammer_io_clear_modify(hammer_io_t io,int inval)964 hammer_io_clear_modify(hammer_io_t io, int inval)
965 {
966 	hammer_mount_t hmp;
967 
968 	/*
969 	 * io_token is needed to avoid races on mod_root
970 	 */
971 	if (io->modified == 0)
972 		return;
973 	hmp = io->hmp;
974 	lwkt_gettoken(&hmp->io_token);
975 	if (io->modified == 0) {
976 		lwkt_reltoken(&hmp->io_token);
977 		return;
978 	}
979 
980 	/*
981 	 * Take us off the mod-list and clear the modified bit.
982 	 */
983 	KKASSERT(io->mod_root != NULL);
984 	if (io->mod_root == &io->hmp->volu_root ||
985 	    io->mod_root == &io->hmp->meta_root) {
986 		io->hmp->locked_dirty_space -= io->bytes;
987 		atomic_add_long(&hammer_count_dirtybufspace, -io->bytes);
988 	}
989 	RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
990 	io->mod_root = NULL;
991 	io->modified = 0;
992 
993 	lwkt_reltoken(&hmp->io_token);
994 
995 	/*
996 	 * If this bit is not set there are no delayed adjustments.
997 	 */
998 	if (io->gencrc == 0)
999 		return;
1000 	io->gencrc = 0;
1001 
1002 	/*
1003 	 * Finalize requested CRCs.  The NEEDSCRC flag also holds a reference
1004 	 * on the node (& underlying buffer).  Release the node after clearing
1005 	 * the flag.
1006 	 */
1007 	if (io->type == HAMMER_IOTYPE_META_BUFFER) {
1008 		hammer_buffer_t buffer = HAMMER_ITOB(io);
1009 		hammer_node_t node;
1010 
1011 restart:
1012 		TAILQ_FOREACH(node, &buffer->node_list, entry) {
1013 			if ((node->flags & HAMMER_NODE_NEEDSCRC) == 0)
1014 				continue;
1015 			node->flags &= ~HAMMER_NODE_NEEDSCRC;
1016 			KKASSERT(node->ondisk);
1017 			if (inval == 0)
1018 				hammer_crc_set_btree(hmp->version, node->ondisk);
1019 			hammer_rel_node(node);
1020 			goto restart;
1021 		}
1022 	}
1023 	/* caller must still have ref on io */
1024 	KKASSERT(hammer_isactive(&io->lock));
1025 }
1026 
1027 /*
1028  * Clear the IO's modify list.  Even though the IO is no longer modified
1029  * it may still be on the lose_root.  This routine is called just before
1030  * the governing hammer_buffer is destroyed.
1031  *
1032  * mod_root requires io_token protection.
1033  */
1034 void
hammer_io_clear_modlist(hammer_io_t io)1035 hammer_io_clear_modlist(hammer_io_t io)
1036 {
1037 	hammer_mount_t hmp = io->hmp;
1038 
1039 	KKASSERT(io->modified == 0);
1040 	if (io->mod_root) {
1041 		lwkt_gettoken(&hmp->io_token);
1042 		if (io->mod_root) {
1043 			KKASSERT(io->mod_root == &io->hmp->lose_root);
1044 			RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
1045 			io->mod_root = NULL;
1046 		}
1047 		lwkt_reltoken(&hmp->io_token);
1048 	}
1049 }
1050 
1051 static void
hammer_io_set_modlist(hammer_io_t io)1052 hammer_io_set_modlist(hammer_io_t io)
1053 {
1054 	hammer_mount_t hmp = io->hmp;
1055 
1056 	lwkt_gettoken(&hmp->io_token);
1057 	KKASSERT(io->mod_root == NULL);
1058 
1059 	switch(io->type) {
1060 	case HAMMER_IOTYPE_VOLUME:
1061 		io->mod_root = &hmp->volu_root;
1062 		hmp->locked_dirty_space += io->bytes;
1063 		atomic_add_long(&hammer_count_dirtybufspace, io->bytes);
1064 		break;
1065 	case HAMMER_IOTYPE_META_BUFFER:
1066 		io->mod_root = &hmp->meta_root;
1067 		hmp->locked_dirty_space += io->bytes;
1068 		atomic_add_long(&hammer_count_dirtybufspace, io->bytes);
1069 		break;
1070 	case HAMMER_IOTYPE_UNDO_BUFFER:
1071 		io->mod_root = &hmp->undo_root;
1072 		break;
1073 	case HAMMER_IOTYPE_DATA_BUFFER:
1074 		io->mod_root = &hmp->data_root;
1075 		break;
1076 	case HAMMER_IOTYPE_DUMMY:
1077 		hpanic("bad io type");
1078 		break; /* NOT REACHED */
1079 	}
1080 	if (RB_INSERT(hammer_mod_rb_tree, io->mod_root, io)) {
1081 		hpanic("duplicate entry @ %d:%015jx",
1082 			io->volume->vol_no, io->offset);
1083 		/* NOT REACHED */
1084 	}
1085 	lwkt_reltoken(&hmp->io_token);
1086 }
1087 
1088 /************************************************************************
1089  *				HAMMER_BIOOPS				*
1090  ************************************************************************
1091  *
1092  */
1093 
1094 /*
1095  * Pre-IO initiation kernel callback - cluster build only
1096  *
1097  * bioops callback - hold io_token
1098  */
1099 static void
hammer_io_start(struct buf * bp)1100 hammer_io_start(struct buf *bp)
1101 {
1102 	/* nothing to do, so io_token not needed */
1103 }
1104 
1105 /*
1106  * Post-IO completion kernel callback - MAY BE CALLED FROM INTERRUPT!
1107  *
1108  * NOTE: HAMMER may modify a data buffer after we have initiated write
1109  *	 I/O.
1110  *
1111  * NOTE: MPSAFE callback
1112  *
1113  * bioops callback - hold io_token
1114  */
1115 static void
hammer_io_complete(struct buf * bp)1116 hammer_io_complete(struct buf *bp)
1117 {
1118 	hammer_io_t io = hammer_buf_peek_io(bp);
1119 	hammer_mount_t hmp = io->hmp;
1120 	hammer_io_t ionext;
1121 
1122 	lwkt_gettoken(&hmp->io_token);
1123 
1124 	KKASSERT(io->released == 1);
1125 
1126 	/*
1127 	 * Deal with people waiting for I/O to drain
1128 	 */
1129 	if (io->running) {
1130 		/*
1131 		 * Deal with critical write errors.  Once a critical error
1132 		 * has been flagged in hmp the UNDO FIFO will not be updated.
1133 		 * That way crash recover will give us a consistent
1134 		 * filesystem.
1135 		 *
1136 		 * Because of this we can throw away failed UNDO buffers.  If
1137 		 * we throw away META or DATA buffers we risk corrupting
1138 		 * the now read-only version of the filesystem visible to
1139 		 * the user.  Clear B_ERROR so the buffer is not re-dirtied
1140 		 * by the kernel and ref the io so it doesn't get thrown
1141 		 * away.
1142 		 */
1143 		if (bp->b_flags & B_ERROR) {
1144 			lwkt_gettoken(&hmp->fs_token);
1145 			hammer_critical_error(hmp, NULL, bp->b_error,
1146 					      "while flushing meta-data");
1147 			lwkt_reltoken(&hmp->fs_token);
1148 
1149 			switch(io->type) {
1150 			case HAMMER_IOTYPE_UNDO_BUFFER:
1151 				break;
1152 			default:
1153 				if (io->ioerror == 0) {
1154 					io->ioerror = 1;
1155 					hammer_ref(&io->lock);
1156 				}
1157 				break;
1158 			}
1159 			bp->b_flags &= ~B_ERROR;
1160 			bundirty(bp);
1161 #if 0
1162 			hammer_io_set_modlist(io);
1163 			io->modified = 1;
1164 #endif
1165 		}
1166 		hammer_stats_disk_write += io->bytes;
1167 		atomic_add_long(&hammer_count_io_running_write, -io->bytes);
1168 		atomic_add_long(&hmp->io_running_space, -io->bytes);
1169 		KKASSERT(hmp->io_running_space >= 0);
1170 		io->running = 0;
1171 
1172 		/*
1173 		 * Remove from iorun list and wakeup any multi-io waiter(s).
1174 		 */
1175 		if (TAILQ_FIRST(&hmp->iorun_list) == io) {
1176 			ionext = TAILQ_NEXT(io, iorun_entry);
1177 			if (ionext && ionext->type == HAMMER_IOTYPE_DUMMY)
1178 				wakeup(ionext);
1179 		}
1180 		TAILQ_REMOVE(&hmp->iorun_list, io, iorun_entry);
1181 	} else {
1182 		hammer_stats_disk_read += io->bytes;
1183 	}
1184 
1185 	if (io->waiting) {
1186 		io->waiting = 0;
1187 		wakeup(io);
1188 	}
1189 
1190 	/*
1191 	 * If B_LOCKED is set someone wanted to deallocate the bp at some
1192 	 * point, try to do it now.  The operation will fail if there are
1193 	 * refs or if hammer_io_deallocate() is unable to gain the
1194 	 * interlock.
1195 	 */
1196 	if (bp->b_flags & B_LOCKED) {
1197 		atomic_add_int(&hammer_count_io_locked, -1);
1198 		bp->b_flags &= ~B_LOCKED;
1199 		hammer_io_deallocate(bp);
1200 		/* structure may be dead now */
1201 	}
1202 	lwkt_reltoken(&hmp->io_token);
1203 }
1204 
1205 /*
1206  * Callback from kernel when it wishes to deallocate a passively
1207  * associated structure.  This mostly occurs with clean buffers
1208  * but it may be possible for a holding structure to be marked dirty
1209  * while its buffer is passively associated.  The caller owns the bp.
1210  *
1211  * If we cannot disassociate we set B_LOCKED to prevent the buffer
1212  * from getting reused.
1213  *
1214  * WARNING: Because this can be called directly by getnewbuf we cannot
1215  * recurse into the tree.  If a bp cannot be immediately disassociated
1216  * our only recourse is to set B_LOCKED.
1217  *
1218  * WARNING: This may be called from an interrupt via hammer_io_complete()
1219  *
1220  * bioops callback - hold io_token
1221  */
1222 static void
hammer_io_deallocate(struct buf * bp)1223 hammer_io_deallocate(struct buf *bp)
1224 {
1225 	hammer_io_t io = hammer_buf_peek_io(bp);
1226 	hammer_mount_t hmp;
1227 
1228 	hmp = io->hmp;
1229 
1230 	lwkt_gettoken(&hmp->io_token);
1231 
1232 	KKASSERT((bp->b_flags & B_LOCKED) == 0 && io->running == 0);
1233 	if (hammer_try_interlock_norefs(&io->lock) == 0) {
1234 		/*
1235 		 * We cannot safely disassociate a bp from a referenced
1236 		 * or interlocked HAMMER structure.
1237 		 */
1238 		bp->b_flags |= B_LOCKED;
1239 		atomic_add_int(&hammer_count_io_locked, 1);
1240 	} else if (io->modified) {
1241 		/*
1242 		 * It is not legal to disassociate a modified buffer.  This
1243 		 * case really shouldn't ever occur.
1244 		 */
1245 		bp->b_flags |= B_LOCKED;
1246 		atomic_add_int(&hammer_count_io_locked, 1);
1247 		hammer_put_interlock(&io->lock, 0);
1248 	} else {
1249 		/*
1250 		 * Disassociate the BP.  If the io has no refs left we
1251 		 * have to add it to the loose list.  The kernel has
1252 		 * locked the buffer and therefore our io must be
1253 		 * in a released state.
1254 		 */
1255 		hammer_io_disassociate(io);
1256 		if (io->type != HAMMER_IOTYPE_VOLUME) {
1257 			KKASSERT(io->bp == NULL);
1258 			KKASSERT(io->mod_root == NULL);
1259 			io->mod_root = &hmp->lose_root;
1260 			if (RB_INSERT(hammer_mod_rb_tree, io->mod_root, io)) {
1261 				hpanic("duplicate entry @ %d:%015jx",
1262 					io->volume->vol_no, io->offset);
1263 				/* NOT REACHED */
1264 			}
1265 		}
1266 		hammer_put_interlock(&io->lock, 1);
1267 	}
1268 	lwkt_reltoken(&hmp->io_token);
1269 }
1270 
1271 /*
1272  * bioops callback - hold io_token
1273  */
1274 static int
hammer_io_fsync(struct vnode * vp)1275 hammer_io_fsync(struct vnode *vp)
1276 {
1277 	/* nothing to do, so io_token not needed */
1278 	return(0);
1279 }
1280 
1281 /*
1282  * NOTE: will not be called unless we tell the kernel about the
1283  * bioops.  Unused... we use the mount's VFS_SYNC instead.
1284  *
1285  * bioops callback - hold io_token
1286  */
1287 static int
hammer_io_sync(struct mount * mp)1288 hammer_io_sync(struct mount *mp)
1289 {
1290 	/* nothing to do, so io_token not needed */
1291 	return(0);
1292 }
1293 
1294 /*
1295  * bioops callback - hold io_token
1296  */
1297 static void
hammer_io_movedeps(struct buf * bp1,struct buf * bp2)1298 hammer_io_movedeps(struct buf *bp1, struct buf *bp2)
1299 {
1300 	/* nothing to do, so io_token not needed */
1301 }
1302 
1303 /*
1304  * I/O pre-check for reading and writing.  HAMMER only uses this for
1305  * B_CACHE buffers so checkread just shouldn't happen, but if it does
1306  * allow it.
1307  *
1308  * Writing is a different case.  We don't want the kernel to try to write
1309  * out a buffer that HAMMER may be modifying passively or which has a
1310  * dependancy.  In addition, kernel-demanded writes can only proceed for
1311  * certain types of buffers (i.e. UNDO and DATA types).  Other dirty
1312  * buffer types can only be explicitly written by the flusher.
1313  *
1314  * checkwrite will only be called for bdwrite()n buffers.  If we return
1315  * success the kernel is guaranteed to initiate the buffer write.
1316  *
1317  * bioops callback - hold io_token
1318  */
1319 static int
hammer_io_checkread(struct buf * bp)1320 hammer_io_checkread(struct buf *bp)
1321 {
1322 	/* nothing to do, so io_token not needed */
1323 	return(0);
1324 }
1325 
1326 /*
1327  * The kernel is asking us whether it can write out a dirty buffer or not.
1328  *
1329  * bioops callback - hold io_token
1330  */
1331 static int
hammer_io_checkwrite(struct buf * bp)1332 hammer_io_checkwrite(struct buf *bp)
1333 {
1334 	hammer_io_t io = hammer_buf_peek_io(bp);
1335 	hammer_mount_t hmp = io->hmp;
1336 
1337 	/*
1338 	 * This shouldn't happen under normal operation.
1339 	 */
1340 	lwkt_gettoken(&hmp->io_token);
1341 	if (io->type == HAMMER_IOTYPE_VOLUME ||
1342 	    io->type == HAMMER_IOTYPE_META_BUFFER) {
1343 		if (!panicstr)
1344 			hpanic("illegal buffer");
1345 		if ((bp->b_flags & B_LOCKED) == 0) {
1346 			bp->b_flags |= B_LOCKED;
1347 			atomic_add_int(&hammer_count_io_locked, 1);
1348 		}
1349 		lwkt_reltoken(&hmp->io_token);
1350 		return(1);
1351 	}
1352 
1353 	/*
1354 	 * We have to be able to interlock the IO to safely modify any
1355 	 * of its fields without holding the fs_token.  If we can't lock
1356 	 * it then we are racing someone.
1357 	 *
1358 	 * Our ownership of the bp lock prevents the io from being ripped
1359 	 * out from under us.
1360 	 */
1361 	if (hammer_try_interlock_norefs(&io->lock) == 0) {
1362 		bp->b_flags |= B_LOCKED;
1363 		atomic_add_int(&hammer_count_io_locked, 1);
1364 		lwkt_reltoken(&hmp->io_token);
1365 		return(1);
1366 	}
1367 
1368 	/*
1369 	 * The modified bit must be cleared prior to the initiation of
1370 	 * any IO (returning 0 initiates the IO).  Because this is a
1371 	 * normal data buffer hammer_io_clear_modify() runs through a
1372 	 * simple degenerate case.
1373 	 *
1374 	 * Return 0 will cause the kernel to initiate the IO, and we
1375 	 * must normally clear the modified bit before we begin.  If
1376 	 * the io has modify_refs we do not clear the modified bit,
1377 	 * otherwise we may miss changes.
1378 	 *
1379 	 * Only data and undo buffers can reach here.  These buffers do
1380 	 * not have terminal crc functions but we temporarily reference
1381 	 * the IO anyway, just in case.
1382 	 */
1383 	if (io->modify_refs == 0 && io->modified) {
1384 		hammer_ref(&io->lock);
1385 		hammer_io_clear_modify(io, 0);
1386 		hammer_rel(&io->lock);
1387 	} else if (io->modified) {
1388 		KKASSERT(io->type == HAMMER_IOTYPE_DATA_BUFFER);
1389 	}
1390 
1391 	/*
1392 	 * The kernel is going to start the IO, set io->running.
1393 	 */
1394 	KKASSERT(io->running == 0);
1395 	io->running = 1;
1396 	atomic_add_long(&io->hmp->io_running_space, io->bytes);
1397 	atomic_add_long(&hammer_count_io_running_write, io->bytes);
1398 	TAILQ_INSERT_TAIL(&io->hmp->iorun_list, io, iorun_entry);
1399 
1400 	hammer_put_interlock(&io->lock, 1);
1401 	lwkt_reltoken(&hmp->io_token);
1402 
1403 	return(0);
1404 }
1405 
1406 /*
1407  * Return non-zero if we wish to delay the kernel's attempt to flush
1408  * this buffer to disk.
1409  *
1410  * bioops callback - hold io_token
1411  */
1412 static int
hammer_io_countdeps(struct buf * bp,int n)1413 hammer_io_countdeps(struct buf *bp, int n)
1414 {
1415 	/* nothing to do, so io_token not needed */
1416 	return(0);
1417 }
1418 
1419 static struct bio_ops hammer_bioops = {
1420 	.io_start	= hammer_io_start,
1421 	.io_complete	= hammer_io_complete,
1422 	.io_deallocate	= hammer_io_deallocate,
1423 	.io_fsync	= hammer_io_fsync,
1424 	.io_sync	= hammer_io_sync,
1425 	.io_movedeps	= hammer_io_movedeps,
1426 	.io_countdeps	= hammer_io_countdeps,
1427 	.io_checkread	= hammer_io_checkread,
1428 	.io_checkwrite	= hammer_io_checkwrite,
1429 };
1430 
1431 /************************************************************************
1432  *				DIRECT IO OPS 				*
1433  ************************************************************************
1434  *
1435  * These functions operate directly on the buffer cache buffer associated
1436  * with a front-end vnode rather then a back-end device vnode.
1437  */
1438 
1439 /*
1440  * Read a buffer associated with a front-end vnode directly from the
1441  * disk media.  The bio may be issued asynchronously.  If leaf is non-NULL
1442  * we validate the CRC.
1443  *
1444  * We must check for the presence of a HAMMER buffer to handle the case
1445  * where the reblocker has rewritten the data (which it does via the HAMMER
1446  * buffer system, not via the high-level vnode buffer cache), but not yet
1447  * committed the buffer to the media.
1448  */
1449 int
hammer_io_direct_read(hammer_mount_t hmp,struct bio * bio,hammer_btree_leaf_elm_t leaf)1450 hammer_io_direct_read(hammer_mount_t hmp, struct bio *bio,
1451 		      hammer_btree_leaf_elm_t leaf)
1452 {
1453 	hammer_off_t buf_offset;
1454 	hammer_off_t zone2_offset;
1455 	hammer_volume_t volume;
1456 	struct buf *bp;
1457 	struct bio *nbio;
1458 	int vol_no;
1459 	int error;
1460 
1461 	buf_offset = bio->bio_offset;
1462 	KKASSERT(hammer_is_zone_large_data(buf_offset));
1463 
1464 	/*
1465 	 * The buffer cache may have an aliased buffer (the reblocker can
1466 	 * write them).  If it does we have to sync any dirty data before
1467 	 * we can build our direct-read.  This is a non-critical code path.
1468 	 */
1469 	bp = bio->bio_buf;
1470 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1471 
1472 	/*
1473 	 * Resolve to a zone-2 offset.  The conversion just requires
1474 	 * munging the top 4 bits but we want to abstract it anyway
1475 	 * so the blockmap code can verify the zone assignment.
1476 	 */
1477 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1478 	if (error)
1479 		goto done;
1480 	KKASSERT(hammer_is_zone_raw_buffer(zone2_offset));
1481 
1482 	/*
1483 	 * Resolve volume and raw-offset for 3rd level bio.  The
1484 	 * offset will be specific to the volume.
1485 	 */
1486 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
1487 	volume = hammer_get_volume(hmp, vol_no, &error);
1488 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
1489 		error = EIO;
1490 
1491 	if (error == 0) {
1492 		/*
1493 		 * 3rd level bio (the caller has already pushed once)
1494 		 */
1495 		nbio = push_bio(bio);
1496 		nbio->bio_offset = hammer_xlate_to_phys(volume->ondisk,
1497 							zone2_offset);
1498 		hammer_stats_disk_read += bp->b_bufsize;
1499 		vn_strategy(volume->devvp, nbio);
1500 	}
1501 	hammer_rel_volume(volume, 0);
1502 done:
1503 	if (error) {
1504 		hdkprintf("failed @ %016jx\n", (intmax_t)zone2_offset);
1505 		bp->b_error = error;
1506 		bp->b_flags |= B_ERROR;
1507 		biodone(bio);
1508 	}
1509 	return(error);
1510 }
1511 
1512 /*
1513  * This works similarly to hammer_io_direct_read() except instead of
1514  * directly reading from the device into the bio we instead indirectly
1515  * read through the device's buffer cache and then copy the data into
1516  * the bio.
1517  *
1518  * If leaf is non-NULL and validation is enabled, the CRC will be checked.
1519  *
1520  * This routine also executes asynchronously.  It allows hammer strategy
1521  * calls to operate asynchronously when in double_buffer mode (in addition
1522  * to operating asynchronously when in normal mode).
1523  */
1524 int
hammer_io_indirect_read(hammer_mount_t hmp,struct bio * bio,hammer_btree_leaf_elm_t leaf)1525 hammer_io_indirect_read(hammer_mount_t hmp, struct bio *bio,
1526 			hammer_btree_leaf_elm_t leaf)
1527 {
1528 	hammer_off_t buf_offset;
1529 	hammer_off_t zone2_offset;
1530 	hammer_volume_t volume;
1531 	struct buf *bp;
1532 	int vol_no;
1533 	int error;
1534 
1535 	buf_offset = bio->bio_offset;
1536 	KKASSERT(hammer_is_zone_large_data(buf_offset));
1537 
1538 	/*
1539 	 * The buffer cache may have an aliased buffer (the reblocker can
1540 	 * write them).  If it does we have to sync any dirty data before
1541 	 * we can build our direct-read.  This is a non-critical code path.
1542 	 */
1543 	bp = bio->bio_buf;
1544 	hammer_sync_buffers(hmp, buf_offset, bp->b_bufsize);
1545 
1546 	/*
1547 	 * Resolve to a zone-2 offset.  The conversion just requires
1548 	 * munging the top 4 bits but we want to abstract it anyway
1549 	 * so the blockmap code can verify the zone assignment.
1550 	 */
1551 	zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1552 	if (error)
1553 		goto done;
1554 	KKASSERT(hammer_is_zone_raw_buffer(zone2_offset));
1555 
1556 	/*
1557 	 * Resolve volume and raw-offset for 3rd level bio.  The
1558 	 * offset will be specific to the volume.
1559 	 */
1560 	vol_no = HAMMER_VOL_DECODE(zone2_offset);
1561 	volume = hammer_get_volume(hmp, vol_no, &error);
1562 	if (error == 0 && zone2_offset >= volume->maxbuf_off)
1563 		error = EIO;
1564 
1565 	if (error == 0) {
1566 		/*
1567 		 * Convert to the raw volume->devvp offset and acquire
1568 		 * the buf, issuing async I/O if necessary.
1569 		 */
1570 		hammer_off_t limit;
1571 		int hce;
1572 
1573 		buf_offset = hammer_xlate_to_phys(volume->ondisk, zone2_offset);
1574 
1575 		if (leaf && hammer_verify_data) {
1576 			bio->bio_caller_info1.uvalue32 = leaf->data_crc;
1577 			bio->bio_caller_info2.index = 1;
1578 		} else {
1579 			bio->bio_caller_info2.index = 0;
1580 		}
1581 		bio->bio_caller_info3.ptr = hmp;
1582 
1583 		hce = hammer_cluster_enable;
1584 		if (hce > 0) {
1585 			limit = HAMMER_BIGBLOCK_DOALIGN(zone2_offset);
1586 			limit -= zone2_offset;
1587 			cluster_readcb(volume->devvp, limit, buf_offset,
1588 				       bp->b_bufsize,
1589 				       B_NOTMETA,
1590 				       HAMMER_CLUSTER_SIZE,
1591 				       HAMMER_CLUSTER_SIZE * hce,
1592 				       hammer_indirect_callback,
1593 				       bio);
1594 		} else {
1595 			breadcb(volume->devvp, buf_offset, bp->b_bufsize,
1596 				B_NOTMETA,
1597 				hammer_indirect_callback, bio);
1598 		}
1599 	}
1600 	hammer_rel_volume(volume, 0);
1601 done:
1602 	if (error) {
1603 		hdkprintf("failed @ %016jx\n", (intmax_t)zone2_offset);
1604 		bp->b_error = error;
1605 		bp->b_flags |= B_ERROR;
1606 		biodone(bio);
1607 	}
1608 	return(error);
1609 }
1610 
1611 /*
1612  * Indirect callback on completion.  bio/bp specify the device-backed
1613  * buffer.  bio->bio_caller_info1.ptr holds obio.
1614  *
1615  * obio/obp is the original regular file buffer.  obio->bio_caller_info*
1616  * contains the crc specification.
1617  *
1618  * We are responsible for calling bpdone() and bqrelse() on bio/bp, and
1619  * for calling biodone() on obio.
1620  */
1621 static void
hammer_indirect_callback(struct bio * bio)1622 hammer_indirect_callback(struct bio *bio)
1623 {
1624 	struct buf *bp = bio->bio_buf;
1625 	struct buf *obp;
1626 	struct bio *obio;
1627 	hammer_mount_t hmp;
1628 
1629 	/*
1630 	 * If BIO_DONE is already set the device buffer was already
1631 	 * fully valid (B_CACHE).  If it is not set then I/O was issued
1632 	 * and we have to run I/O completion as the last bio.
1633 	 *
1634 	 * Nobody is waiting for our device I/O to complete, we are
1635 	 * responsible for bqrelse()ing it which means we also have to do
1636 	 * the equivalent of biowait() and clear BIO_DONE (which breadcb()
1637 	 * may have set).
1638 	 *
1639 	 * Any preexisting device buffer should match the requested size,
1640 	 * but due to big-block recycling and other factors there is some
1641 	 * fragility there, so we assert that the device buffer covers
1642 	 * the request.
1643 	 */
1644 	if ((bio->bio_flags & BIO_DONE) == 0)
1645 		bpdone(bp, 0);
1646 	bio->bio_flags &= ~(BIO_DONE | BIO_SYNC);
1647 
1648 	obio = bio->bio_caller_info1.ptr;
1649 	obp = obio->bio_buf;
1650 	hmp = obio->bio_caller_info3.ptr;
1651 
1652 	if (bp->b_flags & B_ERROR) {
1653 		/*
1654 		 * Error from block device
1655 		 */
1656 		obp->b_flags |= B_ERROR;
1657 		obp->b_error = bp->b_error;
1658 	} else if (obio->bio_caller_info2.index &&
1659 		   obio->bio_caller_info1.uvalue32 !=
1660 		    hammer_datacrc(hmp->version,
1661 				   bp->b_data, obp->b_bufsize) &&
1662 		    obio->bio_caller_info1.uvalue32 !=
1663 		    hammer_datacrc(HAMMER_VOL_VERSION_SIX,
1664 				   bp->b_data, obp->b_bufsize)) {
1665 		/*
1666 		 * CRC error.  First check against current hammer version,
1667 		 * then back-off and check against version 6 (the original
1668 		 * crc).
1669 		 */
1670 		obp->b_flags |= B_ERROR;
1671 		obp->b_error = EIO;
1672 	} else {
1673 		/*
1674 		 * Everything is ok
1675 		 */
1676 		KKASSERT(bp->b_bufsize >= obp->b_bufsize);
1677 		bcopy(bp->b_data, obp->b_data, obp->b_bufsize);
1678 		obp->b_resid = 0;
1679 		obp->b_flags |= B_AGE;
1680 	}
1681 	biodone(obio);
1682 	bqrelse(bp);
1683 }
1684 
1685 /*
1686  * Write a buffer associated with a front-end vnode directly to the
1687  * disk media.  The bio may be issued asynchronously.
1688  *
1689  * The BIO is associated with the specified record and RECG_DIRECT_IO
1690  * is set.  The recorded is added to its object.
1691  */
1692 int
hammer_io_direct_write(hammer_mount_t hmp,struct bio * bio,hammer_record_t record)1693 hammer_io_direct_write(hammer_mount_t hmp, struct bio *bio,
1694 		       hammer_record_t record)
1695 {
1696 	hammer_btree_leaf_elm_t leaf = &record->leaf;
1697 	hammer_off_t buf_offset;
1698 	hammer_off_t zone2_offset;
1699 	hammer_volume_t volume;
1700 	hammer_buffer_t buffer;
1701 	struct buf *bp;
1702 	struct bio *nbio;
1703 	char *ptr;
1704 	int vol_no;
1705 	int error;
1706 
1707 	buf_offset = leaf->data_offset;
1708 
1709 	KKASSERT(hammer_is_zone_record(buf_offset));
1710 	KKASSERT(bio->bio_buf->b_cmd == BUF_CMD_WRITE);
1711 
1712 	/*
1713 	 * Issue or execute the I/O.  The new memory record must replace
1714 	 * the old one before the I/O completes, otherwise a reaquisition of
1715 	 * the buffer will load the old media data instead of the new.
1716 	 */
1717 	if ((buf_offset & HAMMER_BUFMASK) == 0 &&
1718 	    leaf->data_len >= HAMMER_BUFSIZE) {
1719 		/*
1720 		 * We are using the vnode's bio to write directly to the
1721 		 * media, any hammer_buffer at the same zone-X offset will
1722 		 * now have stale data.
1723 		 */
1724 		zone2_offset = hammer_blockmap_lookup(hmp, buf_offset, &error);
1725 		vol_no = HAMMER_VOL_DECODE(zone2_offset);
1726 		volume = hammer_get_volume(hmp, vol_no, &error);
1727 
1728 		if (error == 0 && zone2_offset >= volume->maxbuf_off)
1729 			error = EIO;
1730 		if (error == 0) {
1731 			bp = bio->bio_buf;
1732 			KKASSERT((bp->b_bufsize & HAMMER_BUFMASK) == 0);
1733 
1734 			/*
1735 			 * Second level bio - cached zone2 offset.
1736 			 *
1737 			 * (We can put our bio_done function in either the
1738 			 *  2nd or 3rd level).
1739 			 */
1740 			nbio = push_bio(bio);
1741 			nbio->bio_offset = zone2_offset;
1742 			nbio->bio_done = hammer_io_direct_write_complete;
1743 			nbio->bio_caller_info1.ptr = record;
1744 			record->zone2_offset = zone2_offset;
1745 			record->gflags |= HAMMER_RECG_DIRECT_IO |
1746 					 HAMMER_RECG_DIRECT_INVAL;
1747 
1748 			/*
1749 			 * Third level bio - raw offset specific to the
1750 			 * correct volume.
1751 			 */
1752 			nbio = push_bio(nbio);
1753 			nbio->bio_offset = hammer_xlate_to_phys(volume->ondisk,
1754 								zone2_offset);
1755 			hammer_stats_disk_write += bp->b_bufsize;
1756 			hammer_ip_replace_bulk(hmp, record);
1757 			vn_strategy(volume->devvp, nbio);
1758 			hammer_io_flush_mark(volume);
1759 		}
1760 		hammer_rel_volume(volume, 0);
1761 	} else {
1762 		/*
1763 		 * Must fit in a standard HAMMER buffer.  In this case all
1764 		 * consumers use the HAMMER buffer system and RECG_DIRECT_IO
1765 		 * does not need to be set-up.
1766 		 */
1767 		KKASSERT(((buf_offset ^ (buf_offset + leaf->data_len - 1)) & ~HAMMER_BUFMASK64) == 0);
1768 		buffer = NULL;
1769 		ptr = hammer_bread(hmp, buf_offset, &error, &buffer);
1770 		if (error == 0) {
1771 			bp = bio->bio_buf;
1772 			bp->b_flags |= B_AGE;
1773 			hammer_io_modify(&buffer->io, 1);
1774 			bcopy(bp->b_data, ptr, leaf->data_len);
1775 			hammer_io_modify_done(&buffer->io);
1776 			hammer_rel_buffer(buffer, 0);
1777 			bp->b_resid = 0;
1778 			hammer_ip_replace_bulk(hmp, record);
1779 			biodone(bio);
1780 		}
1781 	}
1782 	if (error) {
1783 		/*
1784 		 * Major suckage occured.  Also note:  The record was
1785 		 * never added to the tree so we do not have to worry
1786 		 * about the backend.
1787 		 */
1788 		hdkprintf("failed @ %016jx\n", (intmax_t)leaf->data_offset);
1789 		bp = bio->bio_buf;
1790 		bp->b_resid = 0;
1791 		bp->b_error = EIO;
1792 		bp->b_flags |= B_ERROR;
1793 		biodone(bio);
1794 		record->flags |= HAMMER_RECF_DELETED_FE;
1795 		hammer_rel_mem_record(record);
1796 	}
1797 	return(error);
1798 }
1799 
1800 /*
1801  * On completion of the BIO this callback must disconnect
1802  * it from the hammer_record and chain to the previous bio.
1803  *
1804  * An I/O error forces the mount to read-only.  Data buffers
1805  * are not B_LOCKED like meta-data buffers are, so we have to
1806  * throw the buffer away to prevent the kernel from retrying.
1807  *
1808  * NOTE: MPSAFE callback, only modify fields we have explicit
1809  *	 access to (the bp and the record->gflags).
1810  */
1811 static
1812 void
hammer_io_direct_write_complete(struct bio * nbio)1813 hammer_io_direct_write_complete(struct bio *nbio)
1814 {
1815 	struct bio *obio;
1816 	struct buf *bp;
1817 	hammer_record_t record;
1818 	hammer_mount_t hmp;
1819 
1820 	record = nbio->bio_caller_info1.ptr;
1821 	KKASSERT(record != NULL);
1822 	hmp = record->ip->hmp;
1823 
1824 	lwkt_gettoken(&hmp->io_token);
1825 
1826 	bp = nbio->bio_buf;
1827 	obio = pop_bio(nbio);
1828 	if (bp->b_flags & B_ERROR) {
1829 		lwkt_gettoken(&hmp->fs_token);
1830 		hammer_critical_error(hmp, record->ip, bp->b_error,
1831 				      "while writing bulk data");
1832 		lwkt_reltoken(&hmp->fs_token);
1833 		bp->b_flags |= B_INVAL;
1834 	}
1835 
1836 	KKASSERT(record->gflags & HAMMER_RECG_DIRECT_IO);
1837 	if (record->gflags & HAMMER_RECG_DIRECT_WAIT) {
1838 		record->gflags &= ~(HAMMER_RECG_DIRECT_IO |
1839 				    HAMMER_RECG_DIRECT_WAIT);
1840 		/* record can disappear once DIRECT_IO flag is cleared */
1841 		wakeup(&record->flags);
1842 	} else {
1843 		record->gflags &= ~HAMMER_RECG_DIRECT_IO;
1844 		/* record can disappear once DIRECT_IO flag is cleared */
1845 	}
1846 
1847 	lwkt_reltoken(&hmp->io_token);
1848 
1849 	biodone(obio);
1850 }
1851 
1852 
1853 /*
1854  * This is called before a record is either committed to the B-Tree
1855  * or destroyed, to resolve any associated direct-IO.
1856  *
1857  * (1) We must wait for any direct-IO related to the record to complete.
1858  *
1859  * (2) We must remove any buffer cache aliases for data accessed via
1860  *     leaf->data_offset or zone2_offset so non-direct-IO consumers
1861  *     (the mirroring and reblocking code) do not see stale data.
1862  */
1863 void
hammer_io_direct_wait(hammer_record_t record)1864 hammer_io_direct_wait(hammer_record_t record)
1865 {
1866 	hammer_mount_t hmp = record->ip->hmp;
1867 
1868 	/*
1869 	 * Wait for I/O to complete
1870 	 */
1871 	if (record->gflags & HAMMER_RECG_DIRECT_IO) {
1872 		lwkt_gettoken(&hmp->io_token);
1873 		while (record->gflags & HAMMER_RECG_DIRECT_IO) {
1874 			record->gflags |= HAMMER_RECG_DIRECT_WAIT;
1875 			tsleep(&record->flags, 0, "hmdiow", 0);
1876 		}
1877 		lwkt_reltoken(&hmp->io_token);
1878 	}
1879 
1880 	/*
1881 	 * Invalidate any related buffer cache aliases associated with the
1882 	 * backing device.  This is needed because the buffer cache buffer
1883 	 * for file data is associated with the file vnode, not the backing
1884 	 * device vnode.
1885 	 *
1886 	 * XXX I do not think this case can occur any more now that
1887 	 * reservations ensure that all such buffers are removed before
1888 	 * an area can be reused.
1889 	 */
1890 	if (record->gflags & HAMMER_RECG_DIRECT_INVAL) {
1891 		KKASSERT(record->leaf.data_offset);
1892 		hammer_del_buffers(hmp, record->leaf.data_offset,
1893 				   record->zone2_offset, record->leaf.data_len,
1894 				   1);
1895 		record->gflags &= ~HAMMER_RECG_DIRECT_INVAL;
1896 	}
1897 }
1898 
1899 /*
1900  * This is called to remove the second-level cached zone-2 offset from
1901  * frontend buffer cache buffers, now stale due to a data relocation.
1902  * These offsets are generated by cluster_read() via VOP_BMAP, or directly
1903  * by hammer_vop_strategy_read().
1904  *
1905  * This is rather nasty because here we have something like the reblocker
1906  * scanning the raw B-Tree with no held references on anything, really,
1907  * other then a shared lock on the B-Tree node, and we have to access the
1908  * frontend's buffer cache to check for and clean out the association.
1909  * Specifically, if the reblocker is moving data on the disk, these cached
1910  * offsets will become invalid.
1911  *
1912  * Only data record types associated with the large-data zone are subject
1913  * to direct-io and need to be checked.
1914  *
1915  */
1916 void
hammer_io_direct_uncache(hammer_mount_t hmp,hammer_btree_leaf_elm_t leaf)1917 hammer_io_direct_uncache(hammer_mount_t hmp, hammer_btree_leaf_elm_t leaf)
1918 {
1919 	struct hammer_inode_info iinfo;
1920 	int zone;
1921 
1922 	if (leaf->base.rec_type != HAMMER_RECTYPE_DATA)
1923 		return;
1924 	zone = HAMMER_ZONE_DECODE(leaf->data_offset);
1925 	if (zone != HAMMER_ZONE_LARGE_DATA_INDEX)
1926 		return;
1927 	iinfo.obj_id = leaf->base.obj_id;
1928 	iinfo.obj_asof = 0;	/* unused */
1929 	iinfo.obj_localization = leaf->base.localization &
1930 				 HAMMER_LOCALIZE_PSEUDOFS_MASK;
1931 	iinfo.u.leaf = leaf;
1932 	hammer_scan_inode_snapshots(hmp, &iinfo,
1933 				    hammer_io_direct_uncache_callback,
1934 				    leaf);
1935 }
1936 
1937 static int
hammer_io_direct_uncache_callback(hammer_inode_t ip,void * data)1938 hammer_io_direct_uncache_callback(hammer_inode_t ip, void *data)
1939 {
1940 	hammer_inode_info_t iinfo = data;
1941 	hammer_off_t file_offset;
1942 	struct vnode *vp;
1943 	struct buf *bp;
1944 	int blksize;
1945 
1946 	if (ip->vp == NULL)
1947 		return(0);
1948 	file_offset = iinfo->u.leaf->base.key - iinfo->u.leaf->data_len;
1949 	blksize = iinfo->u.leaf->data_len;
1950 	KKASSERT((blksize & HAMMER_BUFMASK) == 0);
1951 
1952 	/*
1953 	 * Warning: FINDBLK_TEST return stable storage but not stable
1954 	 *	    contents.  It happens to be ok in this case.
1955 	 */
1956 	hammer_ref(&ip->lock);
1957 	if (hammer_get_vnode(ip, &vp) == 0) {
1958 		if ((bp = findblk(ip->vp, file_offset, FINDBLK_TEST)) != NULL &&
1959 		    bp->b_bio2.bio_offset != NOOFFSET) {
1960 			bp = getblk(ip->vp, file_offset, blksize, 0, 0);
1961 			bp->b_bio2.bio_offset = NOOFFSET;
1962 			brelse(bp);
1963 		}
1964 		vput(vp);
1965 	}
1966 	hammer_rel_inode(ip, 0);
1967 	return(0);
1968 }
1969 
1970 
1971 /*
1972  * This function is called when writes may have occured on the volume,
1973  * indicating that the device may be holding cached writes.
1974  */
1975 static __inline void
hammer_io_flush_mark(hammer_volume_t volume)1976 hammer_io_flush_mark(hammer_volume_t volume)
1977 {
1978 	atomic_set_int(&volume->vol_flags, HAMMER_VOLF_NEEDFLUSH);
1979 }
1980 
1981 /*
1982  * This function ensures that the device has flushed any cached writes out.
1983  */
1984 void
hammer_io_flush_sync(hammer_mount_t hmp)1985 hammer_io_flush_sync(hammer_mount_t hmp)
1986 {
1987 	hammer_volume_t volume;
1988 	struct buf *bp_base = NULL;
1989 	struct buf *bp;
1990 
1991 	RB_FOREACH(volume, hammer_vol_rb_tree, &hmp->rb_vols_root) {
1992 		if (volume->vol_flags & HAMMER_VOLF_NEEDFLUSH) {
1993 			atomic_clear_int(&volume->vol_flags,
1994 					 HAMMER_VOLF_NEEDFLUSH);
1995 			bp = getpbuf(NULL);
1996 			bp->b_bio1.bio_offset = 0;
1997 			bp->b_bufsize = 0;
1998 			bp->b_bcount = 0;
1999 			bp->b_cmd = BUF_CMD_FLUSH;
2000 			bp->b_bio1.bio_caller_info1.cluster_head = bp_base;
2001 			bp->b_bio1.bio_done = biodone_sync;
2002 			bp->b_bio1.bio_flags |= BIO_SYNC;
2003 			bp_base = bp;
2004 			vn_strategy(volume->devvp, &bp->b_bio1);
2005 		}
2006 	}
2007 	while ((bp = bp_base) != NULL) {
2008 		bp_base = bp->b_bio1.bio_caller_info1.cluster_head;
2009 		biowait(&bp->b_bio1, "hmrFLS");
2010 		relpbuf(bp, NULL);
2011 	}
2012 }
2013 
2014 /*
2015  * Limit the amount of backlog which we allow to build up
2016  */
2017 void
hammer_io_limit_backlog(hammer_mount_t hmp)2018 hammer_io_limit_backlog(hammer_mount_t hmp)
2019 {
2020 	waitrunningbufspace();
2021 }
2022