xref: /dragonfly/sys/vfs/hammer/hammer_vnops.c (revision aeaecd48)
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/mountctl.h>
36 #include <sys/namecache.h>
37 #include <sys/buf2.h>
38 #include <vfs/fifofs/fifo.h>
39 
40 #include "hammer.h"
41 
42 /*
43  * USERFS VNOPS
44  */
45 static int hammer_vop_fsync(struct vop_fsync_args *);
46 static int hammer_vop_read(struct vop_read_args *);
47 static int hammer_vop_write(struct vop_write_args *);
48 static int hammer_vop_access(struct vop_access_args *);
49 static int hammer_vop_advlock(struct vop_advlock_args *);
50 static int hammer_vop_close(struct vop_close_args *);
51 static int hammer_vop_ncreate(struct vop_ncreate_args *);
52 static int hammer_vop_getattr(struct vop_getattr_args *);
53 static int hammer_vop_nresolve(struct vop_nresolve_args *);
54 static int hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *);
55 static int hammer_vop_nlink(struct vop_nlink_args *);
56 static int hammer_vop_nmkdir(struct vop_nmkdir_args *);
57 static int hammer_vop_nmknod(struct vop_nmknod_args *);
58 static int hammer_vop_open(struct vop_open_args *);
59 static int hammer_vop_print(struct vop_print_args *);
60 static int hammer_vop_readdir(struct vop_readdir_args *);
61 static int hammer_vop_readlink(struct vop_readlink_args *);
62 static int hammer_vop_nremove(struct vop_nremove_args *);
63 static int hammer_vop_nrename(struct vop_nrename_args *);
64 static int hammer_vop_nrmdir(struct vop_nrmdir_args *);
65 static int hammer_vop_markatime(struct vop_markatime_args *);
66 static int hammer_vop_setattr(struct vop_setattr_args *);
67 static int hammer_vop_strategy(struct vop_strategy_args *);
68 static int hammer_vop_bmap(struct vop_bmap_args *ap);
69 static int hammer_vop_nsymlink(struct vop_nsymlink_args *);
70 static int hammer_vop_nwhiteout(struct vop_nwhiteout_args *);
71 static int hammer_vop_ioctl(struct vop_ioctl_args *);
72 static int hammer_vop_mountctl(struct vop_mountctl_args *);
73 static int hammer_vop_kqfilter (struct vop_kqfilter_args *);
74 
75 static int hammer_vop_fifoclose (struct vop_close_args *);
76 static int hammer_vop_fiforead (struct vop_read_args *);
77 static int hammer_vop_fifowrite (struct vop_write_args *);
78 static int hammer_vop_fifokqfilter (struct vop_kqfilter_args *);
79 
80 struct vop_ops hammer_vnode_vops = {
81 	.vop_default =		vop_defaultop,
82 	.vop_fsync =		hammer_vop_fsync,
83 	.vop_getpages =		vop_stdgetpages,
84 	.vop_putpages =		vop_stdputpages,
85 	.vop_read =		hammer_vop_read,
86 	.vop_write =		hammer_vop_write,
87 	.vop_access =		hammer_vop_access,
88 	.vop_advlock =		hammer_vop_advlock,
89 	.vop_close =		hammer_vop_close,
90 	.vop_ncreate =		hammer_vop_ncreate,
91 	.vop_getattr =		hammer_vop_getattr,
92 	.vop_inactive =		hammer_vop_inactive,
93 	.vop_reclaim =		hammer_vop_reclaim,
94 	.vop_nresolve =		hammer_vop_nresolve,
95 	.vop_nlookupdotdot =	hammer_vop_nlookupdotdot,
96 	.vop_nlink =		hammer_vop_nlink,
97 	.vop_nmkdir =		hammer_vop_nmkdir,
98 	.vop_nmknod =		hammer_vop_nmknod,
99 	.vop_open =		hammer_vop_open,
100 	.vop_pathconf =		vop_stdpathconf,
101 	.vop_print =		hammer_vop_print,
102 	.vop_readdir =		hammer_vop_readdir,
103 	.vop_readlink =		hammer_vop_readlink,
104 	.vop_nremove =		hammer_vop_nremove,
105 	.vop_nrename =		hammer_vop_nrename,
106 	.vop_nrmdir =		hammer_vop_nrmdir,
107 	.vop_markatime =	hammer_vop_markatime,
108 	.vop_setattr =		hammer_vop_setattr,
109 	.vop_bmap =		hammer_vop_bmap,
110 	.vop_strategy =		hammer_vop_strategy,
111 	.vop_nsymlink =		hammer_vop_nsymlink,
112 	.vop_nwhiteout =	hammer_vop_nwhiteout,
113 	.vop_ioctl =		hammer_vop_ioctl,
114 	.vop_mountctl =		hammer_vop_mountctl,
115 	.vop_kqfilter =		hammer_vop_kqfilter
116 };
117 
118 struct vop_ops hammer_spec_vops = {
119 	.vop_default =		vop_defaultop,
120 	.vop_fsync =		hammer_vop_fsync,
121 	.vop_read =		vop_stdnoread,
122 	.vop_write =		vop_stdnowrite,
123 	.vop_access =		hammer_vop_access,
124 	.vop_close =		hammer_vop_close,
125 	.vop_markatime =	hammer_vop_markatime,
126 	.vop_getattr =		hammer_vop_getattr,
127 	.vop_inactive =		hammer_vop_inactive,
128 	.vop_reclaim =		hammer_vop_reclaim,
129 	.vop_setattr =		hammer_vop_setattr
130 };
131 
132 struct vop_ops hammer_fifo_vops = {
133 	.vop_default =		fifo_vnoperate,
134 	.vop_fsync =		hammer_vop_fsync,
135 	.vop_read =		hammer_vop_fiforead,
136 	.vop_write =		hammer_vop_fifowrite,
137 	.vop_access =		hammer_vop_access,
138 	.vop_close =		hammer_vop_fifoclose,
139 	.vop_markatime =	hammer_vop_markatime,
140 	.vop_getattr =		hammer_vop_getattr,
141 	.vop_inactive =		hammer_vop_inactive,
142 	.vop_reclaim =		hammer_vop_reclaim,
143 	.vop_setattr =		hammer_vop_setattr,
144 	.vop_kqfilter =		hammer_vop_fifokqfilter
145 };
146 
147 static __inline
148 void
149 hammer_knote(struct vnode *vp, int flags)
150 {
151 	if (flags)
152 		KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
153 }
154 
155 static int hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
156 			   struct vnode *dvp, struct ucred *cred,
157 			   int flags, int isdir);
158 static int hammer_vop_strategy_read(struct vop_strategy_args *ap);
159 static int hammer_vop_strategy_write(struct vop_strategy_args *ap);
160 
161 /*
162  * hammer_vop_fsync { vp, waitfor }
163  *
164  * fsync() an inode to disk and wait for it to be completely committed
165  * such that the information would not be undone if a crash occured after
166  * return.
167  *
168  * NOTE: HAMMER's fsync()'s are going to remain expensive until we implement
169  *	 a REDO log.  A sysctl is provided to relax HAMMER's fsync()
170  *	 operation.
171  *
172  *	 Ultimately the combination of a REDO log and use of fast storage
173  *	 to front-end cluster caches will make fsync fast, but it aint
174  *	 here yet.  And, in anycase, we need real transactional
175  *	 all-or-nothing features which are not restricted to a single file.
176  */
177 static
178 int
179 hammer_vop_fsync(struct vop_fsync_args *ap)
180 {
181 	hammer_inode_t ip = VTOI(ap->a_vp);
182 	hammer_mount_t hmp = ip->hmp;
183 	int waitfor = ap->a_waitfor;
184 	int mode;
185 
186 	lwkt_gettoken(&hmp->fs_token);
187 
188 	/*
189 	 * Fsync rule relaxation (default is either full synchronous flush
190 	 * or REDO semantics with synchronous flush).
191 	 */
192 	if (ap->a_flags & VOP_FSYNC_SYSCALL) {
193 		switch(hammer_fsync_mode) {
194 		case 0:
195 mode0:
196 			/* no REDO, full synchronous flush */
197 			goto skip;
198 		case 1:
199 mode1:
200 			/* no REDO, full asynchronous flush */
201 			if (waitfor == MNT_WAIT)
202 				waitfor = MNT_NOWAIT;
203 			goto skip;
204 		case 2:
205 			/* REDO semantics, synchronous flush */
206 			if (hmp->version < HAMMER_VOL_VERSION_FOUR)
207 				goto mode0;
208 			mode = HAMMER_FLUSH_UNDOS_AUTO;
209 			break;
210 		case 3:
211 			/* REDO semantics, relaxed asynchronous flush */
212 			if (hmp->version < HAMMER_VOL_VERSION_FOUR)
213 				goto mode1;
214 			mode = HAMMER_FLUSH_UNDOS_RELAXED;
215 			if (waitfor == MNT_WAIT)
216 				waitfor = MNT_NOWAIT;
217 			break;
218 		case 4:
219 			/* ignore the fsync() system call */
220 			lwkt_reltoken(&hmp->fs_token);
221 			return(0);
222 		default:
223 			/* we have to do something */
224 			mode = HAMMER_FLUSH_UNDOS_RELAXED;
225 			if (waitfor == MNT_WAIT)
226 				waitfor = MNT_NOWAIT;
227 			break;
228 		}
229 
230 		/*
231 		 * Fast fsync only needs to flush the UNDO/REDO fifo if
232 		 * HAMMER_INODE_REDO is non-zero and the only modifications
233 		 * made to the file are write or write-extends.
234 		 */
235 		if ((ip->flags & HAMMER_INODE_REDO) &&
236 		    (ip->flags & HAMMER_INODE_MODMASK_NOREDO) == 0) {
237 			++hammer_count_fsyncs;
238 			hammer_flusher_flush_undos(hmp, mode);
239 			ip->redo_count = 0;
240 			if (ip->vp && (ip->flags & HAMMER_INODE_MODMASK) == 0)
241 				vclrisdirty(ip->vp);
242 			lwkt_reltoken(&hmp->fs_token);
243 			return(0);
244 		}
245 
246 		/*
247 		 * REDO is enabled by fsync(), the idea being we really only
248 		 * want to lay down REDO records when programs are using
249 		 * fsync() heavily.  The first fsync() on the file starts
250 		 * the gravy train going and later fsync()s keep it hot by
251 		 * resetting the redo_count.
252 		 *
253 		 * We weren't running REDOs before now so we have to fall
254 		 * through and do a full fsync of what we have.
255 		 */
256 		if (hmp->version >= HAMMER_VOL_VERSION_FOUR &&
257 		    (hmp->flags & HAMMER_MOUNT_REDO_RECOVERY_RUN) == 0) {
258 			ip->flags |= HAMMER_INODE_REDO;
259 			ip->redo_count = 0;
260 		}
261 	}
262 skip:
263 
264 	/*
265 	 * Do a full flush sequence.
266 	 *
267 	 * Attempt to release the vnode while waiting for the inode to
268 	 * finish flushing.  This can really mess up inactive->reclaim
269 	 * sequences so only do it if the vnode is active.
270 	 *
271 	 * WARNING! The VX lock functions must be used.  vn_lock() will
272 	 *	    fail when this is part of a VOP_RECLAIM sequence.
273 	 */
274 	++hammer_count_fsyncs;
275 	vfsync(ap->a_vp, waitfor, 1, NULL, NULL);
276 	hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
277 	if (waitfor == MNT_WAIT) {
278 		int dorelock;
279 
280 		if ((ap->a_vp->v_flag & VRECLAIMED) == 0) {
281 			vx_unlock(ap->a_vp);
282 			dorelock = 1;
283 		} else {
284 			dorelock = 0;
285 		}
286 		hammer_wait_inode(ip);
287 		if (dorelock)
288 			vx_lock(ap->a_vp);
289 	}
290 	if (ip->vp && (ip->flags & HAMMER_INODE_MODMASK) == 0)
291 		vclrisdirty(ip->vp);
292 	lwkt_reltoken(&hmp->fs_token);
293 	return (ip->error);
294 }
295 
296 /*
297  * hammer_vop_read { vp, uio, ioflag, cred }
298  *
299  * MPSAFE (for the cache safe does not require fs_token)
300  */
301 static
302 int
303 hammer_vop_read(struct vop_read_args *ap)
304 {
305 	struct hammer_transaction trans;
306 	hammer_inode_t ip;
307 	hammer_mount_t hmp;
308 	off_t offset;
309 	struct buf *bp;
310 	struct uio *uio;
311 	int error;
312 	int n;
313 	int seqcount;
314 	int ioseqcount;
315 	int blksize;
316 	int bigread;
317 	int got_trans;
318 	size_t resid;
319 
320 	if (ap->a_vp->v_type != VREG)
321 		return (EINVAL);
322 	ip = VTOI(ap->a_vp);
323 	hmp = ip->hmp;
324 	error = 0;
325 	got_trans = 0;
326 	uio = ap->a_uio;
327 
328 	/*
329 	 * Attempt to shortcut directly to the VM object using lwbufs.
330 	 * This is much faster than instantiating buffer cache buffers.
331 	 */
332 	resid = uio->uio_resid;
333 	error = vop_helper_read_shortcut(ap);
334 	hammer_stats_file_read += resid - uio->uio_resid;
335 	if (error)
336 		return (error);
337 	if (uio->uio_resid == 0)
338 		goto finished;
339 
340 	/*
341 	 * Allow the UIO's size to override the sequential heuristic.
342 	 */
343 	blksize = hammer_blocksize(uio->uio_offset);
344 	seqcount = (uio->uio_resid + (BKVASIZE - 1)) / BKVASIZE;
345 	ioseqcount = (ap->a_ioflag >> 16);
346 	if (seqcount < ioseqcount)
347 		seqcount = ioseqcount;
348 
349 	/*
350 	 * If reading or writing a huge amount of data we have to break
351 	 * atomicy and allow the operation to be interrupted by a signal
352 	 * or it can DOS the machine.
353 	 */
354 	bigread = (uio->uio_resid > 100 * 1024 * 1024);
355 
356 	/*
357 	 * Access the data typically in HAMMER_BUFSIZE blocks via the
358 	 * buffer cache, but HAMMER may use a variable block size based
359 	 * on the offset.
360 	 *
361 	 * XXX Temporary hack, delay the start transaction while we remain
362 	 *     MPSAFE.  NOTE: ino_data.size cannot change while vnode is
363 	 *     locked-shared.
364 	 */
365 	while (uio->uio_resid > 0 && uio->uio_offset < ip->ino_data.size) {
366 		int64_t base_offset;
367 		int64_t file_limit;
368 
369 		blksize = hammer_blocksize(uio->uio_offset);
370 		offset = (int)uio->uio_offset & (blksize - 1);
371 		base_offset = uio->uio_offset - offset;
372 
373 		if (bigread && (error = hammer_signal_check(ip->hmp)) != 0)
374 			break;
375 
376 		/*
377 		 * MPSAFE
378 		 */
379 		bp = getblk(ap->a_vp, base_offset, blksize, 0, 0);
380 		if ((bp->b_flags & (B_INVAL | B_CACHE | B_RAM)) == B_CACHE) {
381 			bp->b_flags &= ~B_AGE;
382 			error = 0;
383 			goto skip;
384 		}
385 		if (ap->a_ioflag & IO_NRDELAY) {
386 			bqrelse(bp);
387 			return (EWOULDBLOCK);
388 		}
389 
390 		/*
391 		 * MPUNSAFE
392 		 */
393 		if (got_trans == 0) {
394 			hammer_start_transaction(&trans, ip->hmp);
395 			got_trans = 1;
396 		}
397 
398 		/*
399 		 * NOTE: A valid bp has already been acquired, but was not
400 		 *	 B_CACHE.
401 		 */
402 		if (hammer_cluster_enable) {
403 			/*
404 			 * Use file_limit to prevent cluster_read() from
405 			 * creating buffers of the wrong block size past
406 			 * the demarc.
407 			 */
408 			file_limit = ip->ino_data.size;
409 			if (base_offset < HAMMER_XDEMARC &&
410 			    file_limit > HAMMER_XDEMARC) {
411 				file_limit = HAMMER_XDEMARC;
412 			}
413 			error = cluster_readx(ap->a_vp,
414 					     file_limit, base_offset,
415 					     blksize, uio->uio_resid,
416 					     seqcount * BKVASIZE, &bp);
417 		} else {
418 			error = breadnx(ap->a_vp, base_offset, blksize,
419 					NULL, NULL, 0, &bp);
420 		}
421 		if (error) {
422 			brelse(bp);
423 			break;
424 		}
425 skip:
426 		if ((hammer_debug_io & 0x0001) && (bp->b_flags & B_IODEBUG)) {
427 			hdkprintf("zone2_offset %016jx read file %016jx@%016jx\n",
428 				(intmax_t)bp->b_bio2.bio_offset,
429 				(intmax_t)ip->obj_id,
430 				(intmax_t)bp->b_loffset);
431 		}
432 		bp->b_flags &= ~B_IODEBUG;
433 		if (blksize == HAMMER_XBUFSIZE)
434 			bp->b_flags |= B_CLUSTEROK;
435 
436 		n = blksize - offset;
437 		if (n > uio->uio_resid)
438 			n = uio->uio_resid;
439 		if (n > ip->ino_data.size - uio->uio_offset)
440 			n = (int)(ip->ino_data.size - uio->uio_offset);
441 
442 		/*
443 		 * Set B_AGE, data has a lower priority than meta-data.
444 		 *
445 		 * Use a hold/unlock/drop sequence to run the uiomove
446 		 * with the buffer unlocked, avoiding deadlocks against
447 		 * read()s on mmap()'d spaces.
448 		 */
449 		bp->b_flags |= B_AGE;
450 		error = uiomovebp(bp, (char *)bp->b_data + offset, n, uio);
451 		bqrelse(bp);
452 
453 		if (error)
454 			break;
455 		hammer_stats_file_read += n;
456 	}
457 
458 finished:
459 
460 	/*
461 	 * Try to update the atime with just the inode lock for maximum
462 	 * concurrency.  If we can't shortcut it we have to get the full
463 	 * blown transaction.
464 	 */
465 	if (got_trans == 0 && hammer_update_atime_quick(ip) < 0) {
466 		hammer_start_transaction(&trans, ip->hmp);
467 		got_trans = 1;
468 	}
469 
470 	if (got_trans) {
471 		if ((ip->flags & HAMMER_INODE_RO) == 0 &&
472 		    (ip->hmp->mp->mnt_flag & MNT_NOATIME) == 0) {
473 			lwkt_gettoken(&hmp->fs_token);
474 			ip->ino_data.atime = trans.time;
475 			hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
476 			hammer_done_transaction(&trans);
477 			lwkt_reltoken(&hmp->fs_token);
478 		} else {
479 			hammer_done_transaction(&trans);
480 		}
481 	}
482 	return (error);
483 }
484 
485 /*
486  * hammer_vop_write { vp, uio, ioflag, cred }
487  */
488 static
489 int
490 hammer_vop_write(struct vop_write_args *ap)
491 {
492 	struct hammer_transaction trans;
493 	struct hammer_inode *ip;
494 	hammer_mount_t hmp;
495 	thread_t td;
496 	struct uio *uio;
497 	int offset;
498 	off_t base_offset;
499 	int64_t cluster_eof;
500 	struct buf *bp;
501 	int kflags;
502 	int error;
503 	int n;
504 	int flags;
505 	int seqcount;
506 	int bigwrite;
507 
508 	if (ap->a_vp->v_type != VREG)
509 		return (EINVAL);
510 	ip = VTOI(ap->a_vp);
511 	hmp = ip->hmp;
512 	error = 0;
513 	kflags = 0;
514 	seqcount = ap->a_ioflag >> 16;
515 
516 	if (ip->flags & HAMMER_INODE_RO)
517 		return (EROFS);
518 
519 	/*
520 	 * Create a transaction to cover the operations we perform.
521 	 */
522 	hammer_start_transaction(&trans, hmp);
523 	uio = ap->a_uio;
524 
525 	/*
526 	 * Check append mode
527 	 */
528 	if (ap->a_ioflag & IO_APPEND)
529 		uio->uio_offset = ip->ino_data.size;
530 
531 	/*
532 	 * Check for illegal write offsets.  Valid range is 0...2^63-1.
533 	 *
534 	 * NOTE: the base_off assignment is required to work around what
535 	 * I consider to be a GCC-4 optimization bug.
536 	 */
537 	if (uio->uio_offset < 0) {
538 		hammer_done_transaction(&trans);
539 		return (EFBIG);
540 	}
541 	base_offset = uio->uio_offset + uio->uio_resid;	/* work around gcc-4 */
542 	if (uio->uio_resid > 0 && base_offset <= uio->uio_offset) {
543 		hammer_done_transaction(&trans);
544 		return (EFBIG);
545 	}
546 
547 	if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
548 	    base_offset > td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
549 		hammer_done_transaction(&trans);
550 		lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
551 		return (EFBIG);
552 	}
553 
554 	/*
555 	 * If reading or writing a huge amount of data we have to break
556 	 * atomicy and allow the operation to be interrupted by a signal
557 	 * or it can DOS the machine.
558 	 *
559 	 * Preset redo_count so we stop generating REDOs earlier if the
560 	 * limit is exceeded.
561 	 *
562 	 * redo_count is heuristical, SMP races are ok
563 	 */
564 	bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
565 	if ((ip->flags & HAMMER_INODE_REDO) &&
566 	    ip->redo_count < hammer_limit_redo) {
567 		ip->redo_count += uio->uio_resid;
568 	}
569 
570 	/*
571 	 * Access the data typically in HAMMER_BUFSIZE blocks via the
572 	 * buffer cache, but HAMMER may use a variable block size based
573 	 * on the offset.
574 	 */
575 	while (uio->uio_resid > 0) {
576 		int fixsize = 0;
577 		int blksize;
578 		int blkmask;
579 		int trivial;
580 		int endofblk;
581 		off_t nsize;
582 
583 		if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE)) != 0)
584 			break;
585 		if (bigwrite && (error = hammer_signal_check(hmp)) != 0)
586 			break;
587 
588 		blksize = hammer_blocksize(uio->uio_offset);
589 
590 		/*
591 		 * Control the number of pending records associated with
592 		 * this inode.  If too many have accumulated start a
593 		 * flush.  Try to maintain a pipeline with the flusher.
594 		 *
595 		 * NOTE: It is possible for other sources to grow the
596 		 *	 records but not necessarily issue another flush,
597 		 *	 so use a timeout and ensure that a re-flush occurs.
598 		 */
599 		if (ip->rsv_recs >= hammer_limit_inode_recs) {
600 			lwkt_gettoken(&hmp->fs_token);
601 			hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
602 			while (ip->rsv_recs >= hammer_limit_inode_recs * 2) {
603 				ip->flags |= HAMMER_INODE_RECSW;
604 				tsleep(&ip->rsv_recs, 0, "hmrwww", hz);
605 				hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
606 			}
607 			lwkt_reltoken(&hmp->fs_token);
608 		}
609 
610 		/*
611 		 * Do not allow HAMMER to blow out the buffer cache.  Very
612 		 * large UIOs can lockout other processes due to bwillwrite()
613 		 * mechanics.
614 		 *
615 		 * The hammer inode is not locked during these operations.
616 		 * The vnode is locked which can interfere with the pageout
617 		 * daemon for non-UIO_NOCOPY writes but should not interfere
618 		 * with the buffer cache.  Even so, we cannot afford to
619 		 * allow the pageout daemon to build up too many dirty buffer
620 		 * cache buffers.
621 		 *
622 		 * Only call this if we aren't being recursively called from
623 		 * a virtual disk device (vn), else we may deadlock.
624 		 */
625 		if ((ap->a_ioflag & IO_RECURSE) == 0)
626 			bwillwrite(blksize);
627 
628 		/*
629 		 * Calculate the blocksize at the current offset and figure
630 		 * out how much we can actually write.
631 		 */
632 		blkmask = blksize - 1;
633 		offset = (int)uio->uio_offset & blkmask;
634 		base_offset = uio->uio_offset & ~(int64_t)blkmask;
635 		n = blksize - offset;
636 		if (n > uio->uio_resid) {
637 			n = uio->uio_resid;
638 			endofblk = 0;
639 		} else {
640 			endofblk = 1;
641 		}
642 		nsize = uio->uio_offset + n;
643 		if (nsize > ip->ino_data.size) {
644 			if (uio->uio_offset > ip->ino_data.size)
645 				trivial = 0;
646 			else
647 				trivial = 1;
648 			nvextendbuf(ap->a_vp,
649 				    ip->ino_data.size,
650 				    nsize,
651 				    hammer_blocksize(ip->ino_data.size),
652 				    hammer_blocksize(nsize),
653 				    hammer_blockoff(ip->ino_data.size),
654 				    hammer_blockoff(nsize),
655 				    trivial);
656 			fixsize = 1;
657 			kflags |= NOTE_EXTEND;
658 		}
659 
660 		if (uio->uio_segflg == UIO_NOCOPY) {
661 			/*
662 			 * Issuing a write with the same data backing the
663 			 * buffer.  Instantiate the buffer to collect the
664 			 * backing vm pages, then read-in any missing bits.
665 			 *
666 			 * This case is used by vop_stdputpages().
667 			 */
668 			bp = getblk(ap->a_vp, base_offset,
669 				    blksize, GETBLK_BHEAVY, 0);
670 			if ((bp->b_flags & B_CACHE) == 0) {
671 				bqrelse(bp);
672 				error = bread(ap->a_vp, base_offset,
673 					      blksize, &bp);
674 			}
675 		} else if (offset == 0 && uio->uio_resid >= blksize) {
676 			/*
677 			 * Even though we are entirely overwriting the buffer
678 			 * we may still have to zero it out to avoid a
679 			 * mmap/write visibility issue.
680 			 */
681 			bp = getblk(ap->a_vp, base_offset, blksize, GETBLK_BHEAVY, 0);
682 			if ((bp->b_flags & B_CACHE) == 0)
683 				vfs_bio_clrbuf(bp);
684 		} else if (base_offset >= ip->ino_data.size) {
685 			/*
686 			 * If the base offset of the buffer is beyond the
687 			 * file EOF, we don't have to issue a read.
688 			 */
689 			bp = getblk(ap->a_vp, base_offset,
690 				    blksize, GETBLK_BHEAVY, 0);
691 			vfs_bio_clrbuf(bp);
692 		} else {
693 			/*
694 			 * Partial overwrite, read in any missing bits then
695 			 * replace the portion being written.
696 			 */
697 			error = bread(ap->a_vp, base_offset, blksize, &bp);
698 			if (error == 0)
699 				bheavy(bp);
700 		}
701 		if (error == 0)
702 			error = uiomovebp(bp, bp->b_data + offset, n, uio);
703 
704 		lwkt_gettoken(&hmp->fs_token);
705 
706 		/*
707 		 * Generate REDO records if enabled and redo_count will not
708 		 * exceeded the limit.
709 		 *
710 		 * If redo_count exceeds the limit we stop generating records
711 		 * and clear HAMMER_INODE_REDO.  This will cause the next
712 		 * fsync() to do a full meta-data sync instead of just an
713 		 * UNDO/REDO fifo update.
714 		 *
715 		 * When clearing HAMMER_INODE_REDO any pre-existing REDOs
716 		 * will still be tracked.  The tracks will be terminated
717 		 * when the related meta-data (including possible data
718 		 * modifications which are not tracked via REDO) is
719 		 * flushed.
720 		 */
721 		if ((ip->flags & HAMMER_INODE_REDO) && error == 0) {
722 			if (ip->redo_count < hammer_limit_redo) {
723 				bp->b_flags |= B_VFSFLAG1;
724 				error = hammer_generate_redo(&trans, ip,
725 						     base_offset + offset,
726 						     HAMMER_REDO_WRITE,
727 						     bp->b_data + offset,
728 						     (size_t)n);
729 			} else {
730 				ip->flags &= ~HAMMER_INODE_REDO;
731 			}
732 		}
733 
734 		/*
735 		 * If we screwed up we have to undo any VM size changes we
736 		 * made.
737 		 */
738 		if (error) {
739 			brelse(bp);
740 			if (fixsize) {
741 				nvtruncbuf(ap->a_vp, ip->ino_data.size,
742 					  hammer_blocksize(ip->ino_data.size),
743 					  hammer_blockoff(ip->ino_data.size),
744 					  0);
745 			}
746 			lwkt_reltoken(&hmp->fs_token);
747 			break;
748 		}
749 		kflags |= NOTE_WRITE;
750 		hammer_stats_file_write += n;
751 		if (blksize == HAMMER_XBUFSIZE)
752 			bp->b_flags |= B_CLUSTEROK;
753 		if (ip->ino_data.size < uio->uio_offset) {
754 			ip->ino_data.size = uio->uio_offset;
755 			flags = HAMMER_INODE_SDIRTY;
756 		} else {
757 			flags = 0;
758 		}
759 		ip->ino_data.mtime = trans.time;
760 		flags |= HAMMER_INODE_MTIME | HAMMER_INODE_BUFS;
761 		hammer_modify_inode(&trans, ip, flags);
762 
763 		/*
764 		 * Once we dirty the buffer any cached zone-X offset
765 		 * becomes invalid.  HAMMER NOTE: no-history mode cannot
766 		 * allow overwriting over the same data sector unless
767 		 * we provide UNDOs for the old data, which we don't.
768 		 */
769 		bp->b_bio2.bio_offset = NOOFFSET;
770 
771 		lwkt_reltoken(&hmp->fs_token);
772 
773 		/*
774 		 * Final buffer disposition.
775 		 *
776 		 * Because meta-data updates are deferred, HAMMER is
777 		 * especially sensitive to excessive bdwrite()s because
778 		 * the I/O stream is not broken up by disk reads.  So the
779 		 * buffer cache simply cannot keep up.
780 		 *
781 		 * WARNING!  blksize is variable.  cluster_write() is
782 		 *	     expected to not blow up if it encounters
783 		 *	     buffers that do not match the passed blksize.
784 		 *
785 		 * NOTE!  Hammer shouldn't need to bawrite()/cluster_write().
786 		 *	  The ip->rsv_recs check should burst-flush the data.
787 		 *	  If we queue it immediately the buf could be left
788 		 *	  locked on the device queue for a very long time.
789 		 *
790 		 *	  However, failing to flush a dirty buffer out when
791 		 *        issued from the pageout daemon can result in a low
792 		 *        memory deadlock against bio_page_alloc(), so we
793 		 *	  have to bawrite() on IO_ASYNC as well.
794 		 *
795 		 * NOTE!  To avoid degenerate stalls due to mismatched block
796 		 *	  sizes we only honor IO_DIRECT on the write which
797 		 *	  abuts the end of the buffer.  However, we must
798 		 *	  honor IO_SYNC in case someone is silly enough to
799 		 *	  configure a HAMMER file as swap, or when HAMMER
800 		 *	  is serving NFS (for commits).  Ick ick.
801 		 */
802 		bp->b_flags |= B_AGE;
803 		if (blksize == HAMMER_XBUFSIZE)
804 			bp->b_flags |= B_CLUSTEROK;
805 
806 		if (ap->a_ioflag & IO_SYNC) {
807 			bwrite(bp);
808 		} else if ((ap->a_ioflag & IO_DIRECT) && endofblk) {
809 			bawrite(bp);
810 		} else if (ap->a_ioflag & IO_ASYNC) {
811 			bawrite(bp);
812 		} else if (hammer_cluster_enable &&
813 			   !(ap->a_vp->v_mount->mnt_flag & MNT_NOCLUSTERW)) {
814 			if (base_offset < HAMMER_XDEMARC)
815 				cluster_eof = hammer_blockdemarc(base_offset,
816 							 ip->ino_data.size);
817 			else
818 				cluster_eof = ip->ino_data.size;
819 			cluster_write(bp, cluster_eof, blksize, seqcount);
820 		} else {
821 			bdwrite(bp);
822 		}
823 	}
824 	hammer_done_transaction(&trans);
825 	hammer_knote(ap->a_vp, kflags);
826 
827 	return (error);
828 }
829 
830 /*
831  * hammer_vop_access { vp, mode, cred }
832  *
833  * MPSAFE - does not require fs_token
834  */
835 static
836 int
837 hammer_vop_access(struct vop_access_args *ap)
838 {
839 	struct hammer_inode *ip = VTOI(ap->a_vp);
840 	uid_t uid;
841 	gid_t gid;
842 	int error;
843 
844 	++hammer_stats_file_iopsr;
845 	uid = hammer_to_unix_xid(&ip->ino_data.uid);
846 	gid = hammer_to_unix_xid(&ip->ino_data.gid);
847 
848 	error = vop_helper_access(ap, uid, gid, ip->ino_data.mode,
849 				  ip->ino_data.uflags);
850 	return (error);
851 }
852 
853 /*
854  * hammer_vop_advlock { vp, id, op, fl, flags }
855  *
856  * MPSAFE - does not require fs_token
857  */
858 static
859 int
860 hammer_vop_advlock(struct vop_advlock_args *ap)
861 {
862 	hammer_inode_t ip = VTOI(ap->a_vp);
863 
864 	return (lf_advlock(ap, &ip->advlock, ip->ino_data.size));
865 }
866 
867 /*
868  * hammer_vop_close { vp, fflag }
869  *
870  * We can only sync-on-close for normal closes.  XXX disabled for now.
871  */
872 static
873 int
874 hammer_vop_close(struct vop_close_args *ap)
875 {
876 #if 0
877 	struct vnode *vp = ap->a_vp;
878 	hammer_inode_t ip = VTOI(vp);
879 	int waitfor;
880 	if (ip->flags & (HAMMER_INODE_CLOSESYNC|HAMMER_INODE_CLOSEASYNC)) {
881 		if (vn_islocked(vp) == LK_EXCLUSIVE &&
882 		    (vp->v_flag & (VINACTIVE|VRECLAIMED)) == 0) {
883 			if (ip->flags & HAMMER_INODE_CLOSESYNC)
884 				waitfor = MNT_WAIT;
885 			else
886 				waitfor = MNT_NOWAIT;
887 			ip->flags &= ~(HAMMER_INODE_CLOSESYNC |
888 				       HAMMER_INODE_CLOSEASYNC);
889 			VOP_FSYNC(vp, MNT_NOWAIT, waitfor);
890 		}
891 	}
892 #endif
893 	return (vop_stdclose(ap));
894 }
895 
896 /*
897  * hammer_vop_ncreate { nch, dvp, vpp, cred, vap }
898  *
899  * The operating system has already ensured that the directory entry
900  * does not exist and done all appropriate namespace locking.
901  */
902 static
903 int
904 hammer_vop_ncreate(struct vop_ncreate_args *ap)
905 {
906 	struct hammer_transaction trans;
907 	struct hammer_inode *dip;
908 	struct hammer_inode *nip;
909 	struct nchandle *nch;
910 	hammer_mount_t hmp;
911 	int error;
912 
913 	nch = ap->a_nch;
914 	dip = VTOI(ap->a_dvp);
915 	hmp = dip->hmp;
916 
917 	if (dip->flags & HAMMER_INODE_RO)
918 		return (EROFS);
919 	if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
920 		return (error);
921 
922 	/*
923 	 * Create a transaction to cover the operations we perform.
924 	 */
925 	lwkt_gettoken(&hmp->fs_token);
926 	hammer_start_transaction(&trans, hmp);
927 	++hammer_stats_file_iopsw;
928 
929 	/*
930 	 * Create a new filesystem object of the requested type.  The
931 	 * returned inode will be referenced and shared-locked to prevent
932 	 * it from being moved to the flusher.
933 	 */
934 	error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
935 				    dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
936 				    NULL, &nip);
937 	if (error) {
938 		hkprintf("hammer_create_inode error %d\n", error);
939 		hammer_done_transaction(&trans);
940 		*ap->a_vpp = NULL;
941 		lwkt_reltoken(&hmp->fs_token);
942 		return (error);
943 	}
944 
945 	/*
946 	 * Add the new filesystem object to the directory.  This will also
947 	 * bump the inode's link count.
948 	 */
949 	error = hammer_ip_add_directory(&trans, dip,
950 					nch->ncp->nc_name, nch->ncp->nc_nlen,
951 					nip);
952 	if (error)
953 		hkprintf("hammer_ip_add_directory error %d\n", error);
954 
955 	/*
956 	 * Finish up.
957 	 */
958 	if (error) {
959 		hammer_rel_inode(nip, 0);
960 		hammer_done_transaction(&trans);
961 		*ap->a_vpp = NULL;
962 	} else {
963 		error = hammer_get_vnode(nip, ap->a_vpp);
964 		hammer_done_transaction(&trans);
965 		hammer_rel_inode(nip, 0);
966 		if (error == 0) {
967 			cache_setunresolved(ap->a_nch);
968 			cache_setvp(ap->a_nch, *ap->a_vpp);
969 		}
970 		hammer_knote(ap->a_dvp, NOTE_WRITE);
971 	}
972 	lwkt_reltoken(&hmp->fs_token);
973 	return (error);
974 }
975 
976 /*
977  * hammer_vop_getattr { vp, vap }
978  *
979  * Retrieve an inode's attribute information.  When accessing inodes
980  * historically we fake the atime field to ensure consistent results.
981  * The atime field is stored in the B-Tree element and allowed to be
982  * updated without cycling the element.
983  *
984  * MPSAFE - does not require fs_token
985  */
986 static
987 int
988 hammer_vop_getattr(struct vop_getattr_args *ap)
989 {
990 	struct hammer_inode *ip = VTOI(ap->a_vp);
991 	struct vattr *vap = ap->a_vap;
992 
993 	/*
994 	 * We want the fsid to be different when accessing a filesystem
995 	 * with different as-of's so programs like diff don't think
996 	 * the files are the same.
997 	 *
998 	 * We also want the fsid to be the same when comparing snapshots,
999 	 * or when comparing mirrors (which might be backed by different
1000 	 * physical devices).  HAMMER fsids are based on the PFS's
1001 	 * shared_uuid field.
1002 	 *
1003 	 * XXX there is a chance of collision here.  The va_fsid reported
1004 	 * by stat is different from the more involved fsid used in the
1005 	 * mount structure.
1006 	 */
1007 	++hammer_stats_file_iopsr;
1008 	hammer_lock_sh(&ip->lock);
1009 	vap->va_fsid = ip->pfsm->fsid_udev ^ (uint32_t)ip->obj_asof ^
1010 		       (uint32_t)(ip->obj_asof >> 32);
1011 
1012 	vap->va_fileid = ip->ino_leaf.base.obj_id;
1013 	vap->va_mode = ip->ino_data.mode;
1014 	vap->va_nlink = ip->ino_data.nlinks;
1015 	vap->va_uid = hammer_to_unix_xid(&ip->ino_data.uid);
1016 	vap->va_gid = hammer_to_unix_xid(&ip->ino_data.gid);
1017 	vap->va_rmajor = 0;
1018 	vap->va_rminor = 0;
1019 	vap->va_size = ip->ino_data.size;
1020 
1021 	/*
1022 	 * Special case for @@PFS softlinks.  The actual size of the
1023 	 * expanded softlink is "@@0x%016llx:%05d" == 26 bytes.
1024 	 * or for MAX_TID is    "@@-1:%05d" == 10 bytes.
1025 	 *
1026 	 * Note that userspace hammer command does not allow users to
1027 	 * create a @@PFS softlink under an existing other PFS (id!=0)
1028 	 * so the ip localization here for @@PFS softlink is always 0.
1029 	 */
1030 	if (ip->ino_data.obj_type == HAMMER_OBJTYPE_SOFTLINK &&
1031 	    ip->ino_data.size == 10 &&
1032 	    ip->obj_asof == HAMMER_MAX_TID &&
1033 	    ip->obj_localization == HAMMER_DEF_LOCALIZATION &&
1034 	    strncmp(ip->ino_data.ext.symlink, "@@PFS", 5) == 0) {
1035 		    if (ip->pfsm->pfsd.mirror_flags & HAMMER_PFSD_SLAVE)
1036 			    vap->va_size = 26;
1037 		    else
1038 			    vap->va_size = 10;
1039 	}
1040 
1041 	/*
1042 	 * We must provide a consistent atime and mtime for snapshots
1043 	 * so people can do a 'tar cf - ... | md5' on them and get
1044 	 * consistent results.
1045 	 */
1046 	if (ip->flags & HAMMER_INODE_RO) {
1047 		hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_atime);
1048 		hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_mtime);
1049 	} else {
1050 		hammer_time_to_timespec(ip->ino_data.atime, &vap->va_atime);
1051 		hammer_time_to_timespec(ip->ino_data.mtime, &vap->va_mtime);
1052 	}
1053 	hammer_time_to_timespec(ip->ino_data.ctime, &vap->va_ctime);
1054 	vap->va_flags = ip->ino_data.uflags;
1055 	vap->va_gen = 1;	/* hammer inums are unique for all time */
1056 	vap->va_blocksize = HAMMER_BUFSIZE;
1057 	if (ip->ino_data.size >= HAMMER_XDEMARC) {
1058 		vap->va_bytes = (ip->ino_data.size + HAMMER_XBUFMASK64) &
1059 				~HAMMER_XBUFMASK64;
1060 	} else if (ip->ino_data.size > HAMMER_HBUFSIZE) {
1061 		vap->va_bytes = (ip->ino_data.size + HAMMER_BUFMASK64) &
1062 				~HAMMER_BUFMASK64;
1063 	} else {
1064 		vap->va_bytes = (ip->ino_data.size + 15) & ~15;
1065 	}
1066 
1067 	vap->va_type = hammer_get_vnode_type(ip->ino_data.obj_type);
1068 	vap->va_filerev = 0;	/* XXX */
1069 	vap->va_uid_uuid = ip->ino_data.uid;
1070 	vap->va_gid_uuid = ip->ino_data.gid;
1071 	vap->va_fsid_uuid = ip->hmp->fsid;
1072 	vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
1073 			  VA_FSID_UUID_VALID;
1074 
1075 	switch (ip->ino_data.obj_type) {
1076 	case HAMMER_OBJTYPE_CDEV:
1077 	case HAMMER_OBJTYPE_BDEV:
1078 		vap->va_rmajor = ip->ino_data.rmajor;
1079 		vap->va_rminor = ip->ino_data.rminor;
1080 		break;
1081 	default:
1082 		break;
1083 	}
1084 	hammer_unlock(&ip->lock);
1085 	return(0);
1086 }
1087 
1088 /*
1089  * hammer_vop_nresolve { nch, dvp, cred }
1090  *
1091  * Locate the requested directory entry.
1092  */
1093 static
1094 int
1095 hammer_vop_nresolve(struct vop_nresolve_args *ap)
1096 {
1097 	struct hammer_transaction trans;
1098 	struct namecache *ncp;
1099 	hammer_mount_t hmp;
1100 	hammer_inode_t dip;
1101 	hammer_inode_t ip;
1102 	hammer_tid_t asof;
1103 	struct hammer_cursor cursor;
1104 	struct vnode *vp;
1105 	int64_t namekey;
1106 	int error;
1107 	int i;
1108 	int nlen;
1109 	int flags;
1110 	int ispfs;
1111 	int64_t obj_id;
1112 	uint32_t localization;
1113 	uint32_t max_iterations;
1114 
1115 	/*
1116 	 * Misc initialization, plus handle as-of name extensions.  Look for
1117 	 * the '@@' extension.  Note that as-of files and directories cannot
1118 	 * be modified.
1119 	 */
1120 	dip = VTOI(ap->a_dvp);
1121 	ncp = ap->a_nch->ncp;
1122 	asof = dip->obj_asof;
1123 	localization = dip->obj_localization;	/* for code consistency */
1124 	nlen = ncp->nc_nlen;
1125 	flags = dip->flags & HAMMER_INODE_RO;
1126 	ispfs = 0;
1127 	hmp = dip->hmp;
1128 
1129 	lwkt_gettoken(&hmp->fs_token);
1130 	hammer_simple_transaction(&trans, hmp);
1131 	++hammer_stats_file_iopsr;
1132 
1133 	for (i = 0; i < nlen; ++i) {
1134 		if (ncp->nc_name[i] == '@' && ncp->nc_name[i+1] == '@') {
1135 			error = hammer_str_to_tid(ncp->nc_name + i + 2,
1136 						  &ispfs, &asof, &localization);
1137 			if (error != 0) {
1138 				i = nlen;
1139 				break;
1140 			}
1141 			if (asof != HAMMER_MAX_TID)
1142 				flags |= HAMMER_INODE_RO;
1143 			break;
1144 		}
1145 	}
1146 	nlen = i;
1147 
1148 	/*
1149 	 * If this is a PFS softlink we dive into the PFS
1150 	 */
1151 	if (ispfs && nlen == 0) {
1152 		ip = hammer_get_inode(&trans, dip, HAMMER_OBJID_ROOT,
1153 				      asof, localization,
1154 				      flags, &error);
1155 		if (error == 0) {
1156 			error = hammer_get_vnode(ip, &vp);
1157 			hammer_rel_inode(ip, 0);
1158 		} else {
1159 			vp = NULL;
1160 		}
1161 		if (error == 0) {
1162 			vn_unlock(vp);
1163 			cache_setvp(ap->a_nch, vp);
1164 			vrele(vp);
1165 		}
1166 		goto done;
1167 	}
1168 
1169 	/*
1170 	 * If there is no path component the time extension is relative to dip.
1171 	 * e.g. "fubar/@@<snapshot>"
1172 	 *
1173 	 * "." is handled by the kernel, but ".@@<snapshot>" is not.
1174 	 * e.g. "fubar/.@@<snapshot>"
1175 	 *
1176 	 * ".." is handled by the kernel.  We do not currently handle
1177 	 * "..@<snapshot>".
1178 	 */
1179 	if (nlen == 0 || (nlen == 1 && ncp->nc_name[0] == '.')) {
1180 		ip = hammer_get_inode(&trans, dip, dip->obj_id,
1181 				      asof, dip->obj_localization,
1182 				      flags, &error);
1183 		if (error == 0) {
1184 			error = hammer_get_vnode(ip, &vp);
1185 			hammer_rel_inode(ip, 0);
1186 		} else {
1187 			vp = NULL;
1188 		}
1189 		if (error == 0) {
1190 			vn_unlock(vp);
1191 			cache_setvp(ap->a_nch, vp);
1192 			vrele(vp);
1193 		}
1194 		goto done;
1195 	}
1196 
1197 	/*
1198 	 * Calculate the namekey and setup the key range for the scan.  This
1199 	 * works kinda like a chained hash table where the lower 32 bits
1200 	 * of the namekey synthesize the chain.
1201 	 *
1202 	 * The key range is inclusive of both key_beg and key_end.
1203 	 */
1204 	namekey = hammer_directory_namekey(dip, ncp->nc_name, nlen,
1205 					   &max_iterations);
1206 
1207 	error = hammer_init_cursor(&trans, &cursor, &dip->cache[1], dip);
1208 	cursor.key_beg.localization = dip->obj_localization |
1209 				      hammer_dir_localization(dip);
1210         cursor.key_beg.obj_id = dip->obj_id;
1211 	cursor.key_beg.key = namekey;
1212         cursor.key_beg.create_tid = 0;
1213         cursor.key_beg.delete_tid = 0;
1214         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1215         cursor.key_beg.obj_type = 0;
1216 
1217 	cursor.key_end = cursor.key_beg;
1218 	cursor.key_end.key += max_iterations;
1219 	cursor.asof = asof;
1220 	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1221 
1222 	/*
1223 	 * Scan all matching records (the chain), locate the one matching
1224 	 * the requested path component.
1225 	 *
1226 	 * The hammer_ip_*() functions merge in-memory records with on-disk
1227 	 * records for the purposes of the search.
1228 	 */
1229 	obj_id = 0;
1230 	localization = HAMMER_DEF_LOCALIZATION;
1231 
1232 	if (error == 0) {
1233 		error = hammer_ip_first(&cursor);
1234 		while (error == 0) {
1235 			error = hammer_ip_resolve_data(&cursor);
1236 			if (error)
1237 				break;
1238 			if (nlen == cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF &&
1239 			    bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
1240 				obj_id = cursor.data->entry.obj_id;
1241 				localization = cursor.data->entry.localization;
1242 				break;
1243 			}
1244 			error = hammer_ip_next(&cursor);
1245 		}
1246 	}
1247 	hammer_done_cursor(&cursor);
1248 
1249 	/*
1250 	 * Lookup the obj_id.  This should always succeed.  If it does not
1251 	 * the filesystem may be damaged and we return a dummy inode.
1252 	 */
1253 	if (error == 0) {
1254 		ip = hammer_get_inode(&trans, dip, obj_id,
1255 				      asof, localization,
1256 				      flags, &error);
1257 		if (error == ENOENT) {
1258 			hkprintf("WARNING: Missing inode for dirent \"%s\"\n"
1259 				"\tobj_id = %016llx, asof=%016llx, lo=%08x\n",
1260 				ncp->nc_name,
1261 				(long long)obj_id, (long long)asof,
1262 				localization);
1263 			error = 0;
1264 			ip = hammer_get_dummy_inode(&trans, dip, obj_id,
1265 						    asof, localization,
1266 						    flags, &error);
1267 		}
1268 		if (error == 0) {
1269 			error = hammer_get_vnode(ip, &vp);
1270 			hammer_rel_inode(ip, 0);
1271 		} else {
1272 			vp = NULL;
1273 		}
1274 		if (error == 0) {
1275 			vn_unlock(vp);
1276 			cache_setvp(ap->a_nch, vp);
1277 			vrele(vp);
1278 		}
1279 	} else if (error == ENOENT) {
1280 		cache_setvp(ap->a_nch, NULL);
1281 	}
1282 done:
1283 	hammer_done_transaction(&trans);
1284 	lwkt_reltoken(&hmp->fs_token);
1285 	return (error);
1286 }
1287 
1288 /*
1289  * hammer_vop_nlookupdotdot { dvp, vpp, cred }
1290  *
1291  * Locate the parent directory of a directory vnode.
1292  *
1293  * dvp is referenced but not locked.  *vpp must be returned referenced and
1294  * locked.  A parent_obj_id of 0 does not necessarily indicate that we are
1295  * at the root, instead it could indicate that the directory we were in was
1296  * removed.
1297  *
1298  * NOTE: as-of sequences are not linked into the directory structure.  If
1299  * we are at the root with a different asof then the mount point, reload
1300  * the same directory with the mount point's asof.   I'm not sure what this
1301  * will do to NFS.  We encode ASOF stamps in NFS file handles so it might not
1302  * get confused, but it hasn't been tested.
1303  */
1304 static
1305 int
1306 hammer_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1307 {
1308 	struct hammer_transaction trans;
1309 	struct hammer_inode *dip;
1310 	struct hammer_inode *ip;
1311 	hammer_mount_t hmp;
1312 	int64_t parent_obj_id;
1313 	uint32_t parent_obj_localization;
1314 	hammer_tid_t asof;
1315 	int error;
1316 
1317 	dip = VTOI(ap->a_dvp);
1318 	asof = dip->obj_asof;
1319 	hmp = dip->hmp;
1320 
1321 	/*
1322 	 * Whos are parent?  This could be the root of a pseudo-filesystem
1323 	 * whos parent is in another localization domain.
1324 	 */
1325 	lwkt_gettoken(&hmp->fs_token);
1326 	parent_obj_id = dip->ino_data.parent_obj_id;
1327 	if (dip->obj_id == HAMMER_OBJID_ROOT)
1328 		parent_obj_localization = HAMMER_DEF_LOCALIZATION;
1329 	else
1330 		parent_obj_localization = dip->obj_localization;
1331 
1332 	/*
1333 	 * It's probably a PFS root when dip->ino_data.parent_obj_id is 0.
1334 	 */
1335 	if (parent_obj_id == 0) {
1336 		if (dip->obj_id == HAMMER_OBJID_ROOT &&
1337 		   asof != hmp->asof) {
1338 			parent_obj_id = dip->obj_id;
1339 			asof = hmp->asof;
1340 			*ap->a_fakename = kmalloc(19, M_TEMP, M_WAITOK);
1341 			ksnprintf(*ap->a_fakename, 19, "0x%016llx",
1342 				  (long long)dip->obj_asof);
1343 		} else {
1344 			*ap->a_vpp = NULL;
1345 			lwkt_reltoken(&hmp->fs_token);
1346 			return ENOENT;
1347 		}
1348 	}
1349 
1350 	hammer_simple_transaction(&trans, hmp);
1351 	++hammer_stats_file_iopsr;
1352 
1353 	ip = hammer_get_inode(&trans, dip, parent_obj_id,
1354 			      asof, parent_obj_localization,
1355 			      dip->flags, &error);
1356 	if (ip) {
1357 		error = hammer_get_vnode(ip, ap->a_vpp);
1358 		hammer_rel_inode(ip, 0);
1359 	} else {
1360 		*ap->a_vpp = NULL;
1361 	}
1362 	hammer_done_transaction(&trans);
1363 	lwkt_reltoken(&hmp->fs_token);
1364 	return (error);
1365 }
1366 
1367 /*
1368  * hammer_vop_nlink { nch, dvp, vp, cred }
1369  */
1370 static
1371 int
1372 hammer_vop_nlink(struct vop_nlink_args *ap)
1373 {
1374 	struct hammer_transaction trans;
1375 	struct hammer_inode *dip;
1376 	struct hammer_inode *ip;
1377 	struct nchandle *nch;
1378 	hammer_mount_t hmp;
1379 	int error;
1380 
1381 	if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1382 		return(EXDEV);
1383 
1384 	nch = ap->a_nch;
1385 	dip = VTOI(ap->a_dvp);
1386 	ip = VTOI(ap->a_vp);
1387 	hmp = dip->hmp;
1388 
1389 	if (dip->obj_localization != ip->obj_localization)
1390 		return(EXDEV);
1391 
1392 	if (dip->flags & HAMMER_INODE_RO)
1393 		return (EROFS);
1394 	if (ip->flags & HAMMER_INODE_RO)
1395 		return (EROFS);
1396 	if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1397 		return (error);
1398 
1399 	/*
1400 	 * Create a transaction to cover the operations we perform.
1401 	 */
1402 	lwkt_gettoken(&hmp->fs_token);
1403 	hammer_start_transaction(&trans, hmp);
1404 	++hammer_stats_file_iopsw;
1405 
1406 	/*
1407 	 * Add the filesystem object to the directory.  Note that neither
1408 	 * dip nor ip are referenced or locked, but their vnodes are
1409 	 * referenced.  This function will bump the inode's link count.
1410 	 */
1411 	error = hammer_ip_add_directory(&trans, dip,
1412 					nch->ncp->nc_name, nch->ncp->nc_nlen,
1413 					ip);
1414 
1415 	/*
1416 	 * Finish up.
1417 	 */
1418 	if (error == 0) {
1419 		cache_setunresolved(nch);
1420 		cache_setvp(nch, ap->a_vp);
1421 	}
1422 	hammer_done_transaction(&trans);
1423 	hammer_knote(ap->a_vp, NOTE_LINK);
1424 	hammer_knote(ap->a_dvp, NOTE_WRITE);
1425 	lwkt_reltoken(&hmp->fs_token);
1426 	return (error);
1427 }
1428 
1429 /*
1430  * hammer_vop_nmkdir { nch, dvp, vpp, cred, vap }
1431  *
1432  * The operating system has already ensured that the directory entry
1433  * does not exist and done all appropriate namespace locking.
1434  */
1435 static
1436 int
1437 hammer_vop_nmkdir(struct vop_nmkdir_args *ap)
1438 {
1439 	struct hammer_transaction trans;
1440 	struct hammer_inode *dip;
1441 	struct hammer_inode *nip;
1442 	struct nchandle *nch;
1443 	hammer_mount_t hmp;
1444 	int error;
1445 
1446 	nch = ap->a_nch;
1447 	dip = VTOI(ap->a_dvp);
1448 	hmp = dip->hmp;
1449 
1450 	if (dip->flags & HAMMER_INODE_RO)
1451 		return (EROFS);
1452 	if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1453 		return (error);
1454 
1455 	/*
1456 	 * Create a transaction to cover the operations we perform.
1457 	 */
1458 	lwkt_gettoken(&hmp->fs_token);
1459 	hammer_start_transaction(&trans, hmp);
1460 	++hammer_stats_file_iopsw;
1461 
1462 	/*
1463 	 * Create a new filesystem object of the requested type.  The
1464 	 * returned inode will be referenced but not locked.
1465 	 */
1466 	error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1467 				    dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1468 				    NULL, &nip);
1469 	if (error) {
1470 		hammer_done_transaction(&trans);
1471 		*ap->a_vpp = NULL;
1472 		lwkt_reltoken(&hmp->fs_token);
1473 		return (error);
1474 	}
1475 	/*
1476 	 * Add the new filesystem object to the directory.  This will also
1477 	 * bump the inode's link count.
1478 	 */
1479 	error = hammer_ip_add_directory(&trans, dip,
1480 					nch->ncp->nc_name, nch->ncp->nc_nlen,
1481 					nip);
1482 	if (error)
1483 		hkprintf("hammer_mkdir (add) error %d\n", error);
1484 
1485 	/*
1486 	 * Finish up.
1487 	 */
1488 	if (error) {
1489 		hammer_rel_inode(nip, 0);
1490 		*ap->a_vpp = NULL;
1491 	} else {
1492 		error = hammer_get_vnode(nip, ap->a_vpp);
1493 		hammer_rel_inode(nip, 0);
1494 		if (error == 0) {
1495 			cache_setunresolved(ap->a_nch);
1496 			cache_setvp(ap->a_nch, *ap->a_vpp);
1497 		}
1498 	}
1499 	hammer_done_transaction(&trans);
1500 	if (error == 0)
1501 		hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1502 	lwkt_reltoken(&hmp->fs_token);
1503 	return (error);
1504 }
1505 
1506 /*
1507  * hammer_vop_nmknod { nch, dvp, vpp, cred, vap }
1508  *
1509  * The operating system has already ensured that the directory entry
1510  * does not exist and done all appropriate namespace locking.
1511  */
1512 static
1513 int
1514 hammer_vop_nmknod(struct vop_nmknod_args *ap)
1515 {
1516 	struct hammer_transaction trans;
1517 	struct hammer_inode *dip;
1518 	struct hammer_inode *nip;
1519 	struct nchandle *nch;
1520 	hammer_mount_t hmp;
1521 	int error;
1522 
1523 	nch = ap->a_nch;
1524 	dip = VTOI(ap->a_dvp);
1525 	hmp = dip->hmp;
1526 
1527 	if (dip->flags & HAMMER_INODE_RO)
1528 		return (EROFS);
1529 	if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1530 		return (error);
1531 
1532 	/*
1533 	 * Create a transaction to cover the operations we perform.
1534 	 */
1535 	lwkt_gettoken(&hmp->fs_token);
1536 	hammer_start_transaction(&trans, hmp);
1537 	++hammer_stats_file_iopsw;
1538 
1539 	/*
1540 	 * Create a new filesystem object of the requested type.  The
1541 	 * returned inode will be referenced but not locked.
1542 	 *
1543 	 * If mknod specifies a directory a pseudo-fs is created.
1544 	 */
1545 	error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
1546 				    dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
1547 				    NULL, &nip);
1548 	if (error) {
1549 		hammer_done_transaction(&trans);
1550 		*ap->a_vpp = NULL;
1551 		lwkt_reltoken(&hmp->fs_token);
1552 		return (error);
1553 	}
1554 
1555 	/*
1556 	 * Add the new filesystem object to the directory.  This will also
1557 	 * bump the inode's link count.
1558 	 */
1559 	error = hammer_ip_add_directory(&trans, dip,
1560 					nch->ncp->nc_name, nch->ncp->nc_nlen,
1561 					nip);
1562 
1563 	/*
1564 	 * Finish up.
1565 	 */
1566 	if (error) {
1567 		hammer_rel_inode(nip, 0);
1568 		*ap->a_vpp = NULL;
1569 	} else {
1570 		error = hammer_get_vnode(nip, ap->a_vpp);
1571 		hammer_rel_inode(nip, 0);
1572 		if (error == 0) {
1573 			cache_setunresolved(ap->a_nch);
1574 			cache_setvp(ap->a_nch, *ap->a_vpp);
1575 		}
1576 	}
1577 	hammer_done_transaction(&trans);
1578 	if (error == 0)
1579 		hammer_knote(ap->a_dvp, NOTE_WRITE);
1580 	lwkt_reltoken(&hmp->fs_token);
1581 	return (error);
1582 }
1583 
1584 /*
1585  * hammer_vop_open { vp, mode, cred, fp }
1586  *
1587  * MPSAFE (does not require fs_token)
1588  */
1589 static
1590 int
1591 hammer_vop_open(struct vop_open_args *ap)
1592 {
1593 	hammer_inode_t ip;
1594 
1595 	++hammer_stats_file_iopsr;
1596 	ip = VTOI(ap->a_vp);
1597 
1598 	if ((ap->a_mode & FWRITE) && (ip->flags & HAMMER_INODE_RO))
1599 		return (EROFS);
1600 	return(vop_stdopen(ap));
1601 }
1602 
1603 /*
1604  * hammer_vop_print { vp }
1605  */
1606 static
1607 int
1608 hammer_vop_print(struct vop_print_args *ap)
1609 {
1610 	return EOPNOTSUPP;
1611 }
1612 
1613 /*
1614  * hammer_vop_readdir { vp, uio, cred, *eofflag, *ncookies, off_t **cookies }
1615  */
1616 static
1617 int
1618 hammer_vop_readdir(struct vop_readdir_args *ap)
1619 {
1620 	struct hammer_transaction trans;
1621 	struct hammer_cursor cursor;
1622 	struct hammer_inode *ip;
1623 	hammer_mount_t hmp;
1624 	struct uio *uio;
1625 	hammer_base_elm_t base;
1626 	int error;
1627 	int cookie_index;
1628 	int ncookies;
1629 	off_t *cookies;
1630 	off_t saveoff;
1631 	int r;
1632 	int dtype;
1633 
1634 	++hammer_stats_file_iopsr;
1635 	ip = VTOI(ap->a_vp);
1636 	uio = ap->a_uio;
1637 	saveoff = uio->uio_offset;
1638 	hmp = ip->hmp;
1639 
1640 	if (ap->a_ncookies) {
1641 		ncookies = uio->uio_resid / 16 + 1;
1642 		if (ncookies > 1024)
1643 			ncookies = 1024;
1644 		cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
1645 		cookie_index = 0;
1646 	} else {
1647 		ncookies = -1;
1648 		cookies = NULL;
1649 		cookie_index = 0;
1650 	}
1651 
1652 	lwkt_gettoken(&hmp->fs_token);
1653 	hammer_simple_transaction(&trans, hmp);
1654 
1655 	/*
1656 	 * Handle artificial entries
1657 	 *
1658 	 * It should be noted that the minimum value for a directory
1659 	 * hash key on-media is 0x0000000100000000, so we can use anything
1660 	 * less then that to represent our 'special' key space.
1661 	 */
1662 	error = 0;
1663 	if (saveoff == 0) {
1664 		r = vop_write_dirent(&error, uio, ip->obj_id, DT_DIR, 1, ".");
1665 		if (r)
1666 			goto done;
1667 		if (cookies)
1668 			cookies[cookie_index] = saveoff;
1669 		++saveoff;
1670 		++cookie_index;
1671 		if (cookie_index == ncookies)
1672 			goto done;
1673 	}
1674 	if (saveoff == 1) {
1675 		if (ip->ino_data.parent_obj_id) {
1676 			r = vop_write_dirent(&error, uio,
1677 					     ip->ino_data.parent_obj_id,
1678 					     DT_DIR, 2, "..");
1679 		} else {
1680 			r = vop_write_dirent(&error, uio,
1681 					     ip->obj_id, DT_DIR, 2, "..");
1682 		}
1683 		if (r)
1684 			goto done;
1685 		if (cookies)
1686 			cookies[cookie_index] = saveoff;
1687 		++saveoff;
1688 		++cookie_index;
1689 		if (cookie_index == ncookies)
1690 			goto done;
1691 	}
1692 
1693 	/*
1694 	 * Key range (begin and end inclusive) to scan.  Directory keys
1695 	 * directly translate to a 64 bit 'seek' position.
1696 	 */
1697 	hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1698 	cursor.key_beg.localization = ip->obj_localization |
1699 				      hammer_dir_localization(ip);
1700 	cursor.key_beg.obj_id = ip->obj_id;
1701 	cursor.key_beg.create_tid = 0;
1702 	cursor.key_beg.delete_tid = 0;
1703         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
1704 	cursor.key_beg.obj_type = 0;
1705 	cursor.key_beg.key = saveoff;
1706 
1707 	cursor.key_end = cursor.key_beg;
1708 	cursor.key_end.key = HAMMER_MAX_KEY;
1709 	cursor.asof = ip->obj_asof;
1710 	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1711 
1712 	error = hammer_ip_first(&cursor);
1713 
1714 	while (error == 0) {
1715 		error = hammer_ip_resolve_data(&cursor);
1716 		if (error)
1717 			break;
1718 		base = &cursor.leaf->base;
1719 		saveoff = base->key;
1720 		KKASSERT(cursor.leaf->data_len > HAMMER_ENTRY_NAME_OFF);
1721 
1722 		if (base->obj_id != ip->obj_id)
1723 			hpanic("bad record at %p", cursor.node);
1724 
1725 		/*
1726 		 * Convert pseudo-filesystems into softlinks
1727 		 */
1728 		dtype = hammer_get_dtype(cursor.leaf->base.obj_type);
1729 		r = vop_write_dirent(
1730 			     &error, uio, cursor.data->entry.obj_id,
1731 			     dtype,
1732 			     cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF ,
1733 			     (void *)cursor.data->entry.name);
1734 		if (r)
1735 			break;
1736 		++saveoff;
1737 		if (cookies)
1738 			cookies[cookie_index] = base->key;
1739 		++cookie_index;
1740 		if (cookie_index == ncookies)
1741 			break;
1742 		error = hammer_ip_next(&cursor);
1743 	}
1744 	hammer_done_cursor(&cursor);
1745 
1746 done:
1747 	hammer_done_transaction(&trans);
1748 
1749 	if (ap->a_eofflag)
1750 		*ap->a_eofflag = (error == ENOENT);
1751 	uio->uio_offset = saveoff;
1752 	if (error && cookie_index == 0) {
1753 		if (error == ENOENT)
1754 			error = 0;
1755 		if (cookies) {
1756 			kfree(cookies, M_TEMP);
1757 			*ap->a_ncookies = 0;
1758 			*ap->a_cookies = NULL;
1759 		}
1760 	} else {
1761 		if (error == ENOENT)
1762 			error = 0;
1763 		if (cookies) {
1764 			*ap->a_ncookies = cookie_index;
1765 			*ap->a_cookies = cookies;
1766 		}
1767 	}
1768 	lwkt_reltoken(&hmp->fs_token);
1769 	return(error);
1770 }
1771 
1772 /*
1773  * hammer_vop_readlink { vp, uio, cred }
1774  */
1775 static
1776 int
1777 hammer_vop_readlink(struct vop_readlink_args *ap)
1778 {
1779 	struct hammer_transaction trans;
1780 	struct hammer_cursor cursor;
1781 	struct hammer_inode *ip;
1782 	hammer_mount_t hmp;
1783 	char buf[32];
1784 	uint32_t localization;
1785 	hammer_pseudofs_inmem_t pfsm;
1786 	int error;
1787 
1788 	ip = VTOI(ap->a_vp);
1789 	hmp = ip->hmp;
1790 
1791 	lwkt_gettoken(&hmp->fs_token);
1792 
1793 	/*
1794 	 * Shortcut if the symlink data was stuffed into ino_data.
1795 	 *
1796 	 * Also expand special "@@PFS%05d" softlinks (expansion only
1797 	 * occurs for non-historical (current) accesses made from the
1798 	 * primary filesystem).
1799 	 *
1800 	 * Note that userspace hammer command does not allow users to
1801 	 * create a @@PFS softlink under an existing other PFS (id!=0)
1802 	 * so the ip localization here for @@PFS softlink is always 0.
1803 	 */
1804 	if (ip->ino_data.size <= HAMMER_INODE_BASESYMLEN) {
1805 		char *ptr;
1806 		int bytes;
1807 
1808 		ptr = ip->ino_data.ext.symlink;
1809 		bytes = (int)ip->ino_data.size;
1810 		if (bytes == 10 &&
1811 		    ip->obj_asof == HAMMER_MAX_TID &&
1812 		    ip->obj_localization == HAMMER_DEF_LOCALIZATION &&
1813 		    strncmp(ptr, "@@PFS", 5) == 0) {
1814 			hammer_simple_transaction(&trans, hmp);
1815 			bcopy(ptr + 5, buf, 5);
1816 			buf[5] = 0;
1817 			localization = pfs_to_lo(strtoul(buf, NULL, 10));
1818 			pfsm = hammer_load_pseudofs(&trans, localization,
1819 						    &error);
1820 			if (error == 0) {
1821 				if (pfsm->pfsd.mirror_flags &
1822 				    HAMMER_PFSD_SLAVE) {
1823 					/* vap->va_size == 26 */
1824 					ksnprintf(buf, sizeof(buf),
1825 						  "@@0x%016llx:%05d",
1826 						  (long long)pfsm->pfsd.sync_end_tid,
1827 						  lo_to_pfs(localization));
1828 				} else {
1829 					/* vap->va_size == 10 */
1830 					ksnprintf(buf, sizeof(buf),
1831 						  "@@-1:%05d",
1832 						  lo_to_pfs(localization));
1833 				}
1834 				ptr = buf;
1835 				bytes = strlen(buf);
1836 			}
1837 			if (pfsm)
1838 				hammer_rel_pseudofs(hmp, pfsm);
1839 			hammer_done_transaction(&trans);
1840 		}
1841 		error = uiomove(ptr, bytes, ap->a_uio);
1842 		lwkt_reltoken(&hmp->fs_token);
1843 		return(error);
1844 	}
1845 
1846 	/*
1847 	 * Long version
1848 	 */
1849 	hammer_simple_transaction(&trans, hmp);
1850 	++hammer_stats_file_iopsr;
1851 	hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1852 
1853 	/*
1854 	 * Key range (begin and end inclusive) to scan.  Directory keys
1855 	 * directly translate to a 64 bit 'seek' position.
1856 	 */
1857 	cursor.key_beg.localization = ip->obj_localization |
1858 				      HAMMER_LOCALIZE_MISC;
1859 	cursor.key_beg.obj_id = ip->obj_id;
1860 	cursor.key_beg.create_tid = 0;
1861 	cursor.key_beg.delete_tid = 0;
1862         cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
1863 	cursor.key_beg.obj_type = 0;
1864 	cursor.key_beg.key = HAMMER_FIXKEY_SYMLINK;
1865 	cursor.asof = ip->obj_asof;
1866 	cursor.flags |= HAMMER_CURSOR_ASOF;
1867 
1868 	error = hammer_ip_lookup(&cursor);
1869 	if (error == 0) {
1870 		error = hammer_ip_resolve_data(&cursor);
1871 		if (error == 0) {
1872 			KKASSERT(cursor.leaf->data_len >=
1873 				 HAMMER_SYMLINK_NAME_OFF);
1874 			error = uiomove(cursor.data->symlink.name,
1875 					cursor.leaf->data_len -
1876 						HAMMER_SYMLINK_NAME_OFF,
1877 					ap->a_uio);
1878 		}
1879 	}
1880 	hammer_done_cursor(&cursor);
1881 	hammer_done_transaction(&trans);
1882 	lwkt_reltoken(&hmp->fs_token);
1883 	return(error);
1884 }
1885 
1886 /*
1887  * hammer_vop_nremove { nch, dvp, cred }
1888  */
1889 static
1890 int
1891 hammer_vop_nremove(struct vop_nremove_args *ap)
1892 {
1893 	struct hammer_transaction trans;
1894 	struct hammer_inode *dip;
1895 	hammer_mount_t hmp;
1896 	int error;
1897 
1898 	dip = VTOI(ap->a_dvp);
1899 	hmp = dip->hmp;
1900 
1901 	if (hammer_nohistory(dip) == 0 &&
1902 	    (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
1903 		return (error);
1904 	}
1905 
1906 	lwkt_gettoken(&hmp->fs_token);
1907 	hammer_start_transaction(&trans, hmp);
1908 	++hammer_stats_file_iopsw;
1909 	error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 0);
1910 	hammer_done_transaction(&trans);
1911 	if (error == 0)
1912 		hammer_knote(ap->a_dvp, NOTE_WRITE);
1913 	lwkt_reltoken(&hmp->fs_token);
1914 	return (error);
1915 }
1916 
1917 /*
1918  * hammer_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1919  */
1920 static
1921 int
1922 hammer_vop_nrename(struct vop_nrename_args *ap)
1923 {
1924 	struct hammer_transaction trans;
1925 	struct namecache *fncp;
1926 	struct namecache *tncp;
1927 	struct hammer_inode *fdip;
1928 	struct hammer_inode *tdip;
1929 	struct hammer_inode *ip;
1930 	hammer_mount_t hmp;
1931 	struct hammer_cursor cursor;
1932 	int64_t namekey;
1933 	uint32_t max_iterations;
1934 	int nlen, error;
1935 
1936 	if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1937 		return(EXDEV);
1938 	if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1939 		return(EXDEV);
1940 
1941 	fdip = VTOI(ap->a_fdvp);
1942 	tdip = VTOI(ap->a_tdvp);
1943 	fncp = ap->a_fnch->ncp;
1944 	tncp = ap->a_tnch->ncp;
1945 	ip = VTOI(fncp->nc_vp);
1946 	KKASSERT(ip != NULL);
1947 
1948 	hmp = ip->hmp;
1949 
1950 	if (fdip->obj_localization != tdip->obj_localization)
1951 		return(EXDEV);
1952 	if (fdip->obj_localization != ip->obj_localization)
1953 		return(EXDEV);
1954 
1955 	if (fdip->flags & HAMMER_INODE_RO)
1956 		return (EROFS);
1957 	if (tdip->flags & HAMMER_INODE_RO)
1958 		return (EROFS);
1959 	if (ip->flags & HAMMER_INODE_RO)
1960 		return (EROFS);
1961 	if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
1962 		return (error);
1963 
1964 	lwkt_gettoken(&hmp->fs_token);
1965 	hammer_start_transaction(&trans, hmp);
1966 	++hammer_stats_file_iopsw;
1967 
1968 	/*
1969 	 * Remove tncp from the target directory and then link ip as
1970 	 * tncp. XXX pass trans to dounlink
1971 	 *
1972 	 * Force the inode sync-time to match the transaction so it is
1973 	 * in-sync with the creation of the target directory entry.
1974 	 */
1975 	error = hammer_dounlink(&trans, ap->a_tnch, ap->a_tdvp,
1976 				ap->a_cred, 0, -1);
1977 	if (error == 0 || error == ENOENT) {
1978 		error = hammer_ip_add_directory(&trans, tdip,
1979 						tncp->nc_name, tncp->nc_nlen,
1980 						ip);
1981 		if (error == 0) {
1982 			ip->ino_data.parent_obj_id = tdip->obj_id;
1983 			ip->ino_data.ctime = trans.time;
1984 			hammer_modify_inode(&trans, ip, HAMMER_INODE_DDIRTY);
1985 		}
1986 	}
1987 	if (error)
1988 		goto failed; /* XXX */
1989 
1990 	/*
1991 	 * Locate the record in the originating directory and remove it.
1992 	 *
1993 	 * Calculate the namekey and setup the key range for the scan.  This
1994 	 * works kinda like a chained hash table where the lower 32 bits
1995 	 * of the namekey synthesize the chain.
1996 	 *
1997 	 * The key range is inclusive of both key_beg and key_end.
1998 	 */
1999 	namekey = hammer_directory_namekey(fdip, fncp->nc_name, fncp->nc_nlen,
2000 					   &max_iterations);
2001 retry:
2002 	hammer_init_cursor(&trans, &cursor, &fdip->cache[1], fdip);
2003 	cursor.key_beg.localization = fdip->obj_localization |
2004 				      hammer_dir_localization(fdip);
2005         cursor.key_beg.obj_id = fdip->obj_id;
2006 	cursor.key_beg.key = namekey;
2007         cursor.key_beg.create_tid = 0;
2008         cursor.key_beg.delete_tid = 0;
2009         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
2010         cursor.key_beg.obj_type = 0;
2011 
2012 	cursor.key_end = cursor.key_beg;
2013 	cursor.key_end.key += max_iterations;
2014 	cursor.asof = fdip->obj_asof;
2015 	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2016 
2017 	/*
2018 	 * Scan all matching records (the chain), locate the one matching
2019 	 * the requested path component.
2020 	 *
2021 	 * The hammer_ip_*() functions merge in-memory records with on-disk
2022 	 * records for the purposes of the search.
2023 	 */
2024 	error = hammer_ip_first(&cursor);
2025 	while (error == 0) {
2026 		if (hammer_ip_resolve_data(&cursor) != 0)
2027 			break;
2028 		nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
2029 		KKASSERT(nlen > 0);
2030 		if (fncp->nc_nlen == nlen &&
2031 		    bcmp(fncp->nc_name, cursor.data->entry.name, nlen) == 0) {
2032 			break;
2033 		}
2034 		error = hammer_ip_next(&cursor);
2035 	}
2036 
2037 	/*
2038 	 * If all is ok we have to get the inode so we can adjust nlinks.
2039 	 *
2040 	 * WARNING: hammer_ip_del_directory() may have to terminate the
2041 	 * cursor to avoid a recursion.  It's ok to call hammer_done_cursor()
2042 	 * twice.
2043 	 */
2044 	if (error == 0)
2045 		error = hammer_ip_del_directory(&trans, &cursor, fdip, ip);
2046 
2047 	/*
2048 	 * XXX A deadlock here will break rename's atomicy for the purposes
2049 	 * of crash recovery.
2050 	 */
2051 	if (error == EDEADLK) {
2052 		hammer_done_cursor(&cursor);
2053 		goto retry;
2054 	}
2055 
2056 	/*
2057 	 * Cleanup and tell the kernel that the rename succeeded.
2058 	 *
2059 	 * NOTE: ip->vp, if non-NULL, cannot be directly referenced
2060 	 *	 without formally acquiring the vp since the vp might
2061 	 *	 have zero refs on it, or in the middle of a reclaim,
2062 	 *	 etc.
2063 	 */
2064         hammer_done_cursor(&cursor);
2065 	if (error == 0) {
2066 		cache_rename(ap->a_fnch, ap->a_tnch);
2067 		hammer_knote(ap->a_fdvp, NOTE_WRITE);
2068 		hammer_knote(ap->a_tdvp, NOTE_WRITE);
2069 		while (ip->vp) {
2070 			struct vnode *vp;
2071 
2072 			error = hammer_get_vnode(ip, &vp);
2073 			if (error == 0 && vp) {
2074 				vn_unlock(vp);
2075 				hammer_knote(ip->vp, NOTE_RENAME);
2076 				vrele(vp);
2077 				break;
2078 			}
2079 			hdkprintf("ip/vp race2 avoided\n");
2080 		}
2081 	}
2082 
2083 failed:
2084 	hammer_done_transaction(&trans);
2085 	lwkt_reltoken(&hmp->fs_token);
2086 	return (error);
2087 }
2088 
2089 /*
2090  * hammer_vop_nrmdir { nch, dvp, cred }
2091  */
2092 static
2093 int
2094 hammer_vop_nrmdir(struct vop_nrmdir_args *ap)
2095 {
2096 	struct hammer_transaction trans;
2097 	struct hammer_inode *dip;
2098 	hammer_mount_t hmp;
2099 	int error;
2100 
2101 	dip = VTOI(ap->a_dvp);
2102 	hmp = dip->hmp;
2103 
2104 	if (hammer_nohistory(dip) == 0 &&
2105 	    (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2106 		return (error);
2107 	}
2108 
2109 	lwkt_gettoken(&hmp->fs_token);
2110 	hammer_start_transaction(&trans, hmp);
2111 	++hammer_stats_file_iopsw;
2112 	error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp, ap->a_cred, 0, 1);
2113 	hammer_done_transaction(&trans);
2114 	if (error == 0)
2115 		hammer_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
2116 	lwkt_reltoken(&hmp->fs_token);
2117 	return (error);
2118 }
2119 
2120 /*
2121  * hammer_vop_markatime { vp, cred }
2122  */
2123 static
2124 int
2125 hammer_vop_markatime(struct vop_markatime_args *ap)
2126 {
2127 	struct hammer_transaction trans;
2128 	struct hammer_inode *ip;
2129 	hammer_mount_t hmp;
2130 
2131 	ip = VTOI(ap->a_vp);
2132 	if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2133 		return (EROFS);
2134 	if (ip->flags & HAMMER_INODE_RO)
2135 		return (EROFS);
2136 	hmp = ip->hmp;
2137 	if (hmp->mp->mnt_flag & MNT_NOATIME)
2138 		return (0);
2139 	lwkt_gettoken(&hmp->fs_token);
2140 	hammer_start_transaction(&trans, hmp);
2141 	++hammer_stats_file_iopsw;
2142 
2143 	ip->ino_data.atime = trans.time;
2144 	hammer_modify_inode(&trans, ip, HAMMER_INODE_ATIME);
2145 	hammer_done_transaction(&trans);
2146 	hammer_knote(ap->a_vp, NOTE_ATTRIB);
2147 	lwkt_reltoken(&hmp->fs_token);
2148 	return (0);
2149 }
2150 
2151 /*
2152  * hammer_vop_setattr { vp, vap, cred }
2153  */
2154 static
2155 int
2156 hammer_vop_setattr(struct vop_setattr_args *ap)
2157 {
2158 	struct hammer_transaction trans;
2159 	struct hammer_inode *ip;
2160 	struct vattr *vap;
2161 	hammer_mount_t hmp;
2162 	int modflags;
2163 	int error;
2164 	int truncating;
2165 	int blksize;
2166 	int kflags;
2167 #if 0
2168 	int64_t aligned_size;
2169 #endif
2170 	uint32_t flags;
2171 
2172 	vap = ap->a_vap;
2173 	ip = ap->a_vp->v_data;
2174 	modflags = 0;
2175 	kflags = 0;
2176 	hmp = ip->hmp;
2177 
2178 	if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
2179 		return(EROFS);
2180 	if (ip->flags & HAMMER_INODE_RO)
2181 		return (EROFS);
2182 	if (hammer_nohistory(ip) == 0 &&
2183 	    (error = hammer_checkspace(hmp, HAMMER_CHKSPC_REMOVE)) != 0) {
2184 		return (error);
2185 	}
2186 
2187 	lwkt_gettoken(&hmp->fs_token);
2188 	hammer_start_transaction(&trans, hmp);
2189 	++hammer_stats_file_iopsw;
2190 	error = 0;
2191 
2192 	if (vap->va_flags != VNOVAL) {
2193 		flags = ip->ino_data.uflags;
2194 		error = vop_helper_setattr_flags(&flags, vap->va_flags,
2195 					 hammer_to_unix_xid(&ip->ino_data.uid),
2196 					 ap->a_cred);
2197 		if (error == 0) {
2198 			if (ip->ino_data.uflags != flags) {
2199 				ip->ino_data.uflags = flags;
2200 				ip->ino_data.ctime = trans.time;
2201 				modflags |= HAMMER_INODE_DDIRTY;
2202 				kflags |= NOTE_ATTRIB;
2203 			}
2204 			if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2205 				error = 0;
2206 				goto done;
2207 			}
2208 		}
2209 		goto done;
2210 	}
2211 	if (ip->ino_data.uflags & (IMMUTABLE | APPEND)) {
2212 		error = EPERM;
2213 		goto done;
2214 	}
2215 	if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
2216 		mode_t cur_mode = ip->ino_data.mode;
2217 		uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2218 		gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2219 		uuid_t uuid_uid;
2220 		uuid_t uuid_gid;
2221 
2222 		error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
2223 					 ap->a_cred,
2224 					 &cur_uid, &cur_gid, &cur_mode);
2225 		if (error == 0) {
2226 			hammer_guid_to_uuid(&uuid_uid, cur_uid);
2227 			hammer_guid_to_uuid(&uuid_gid, cur_gid);
2228 			if (bcmp(&uuid_uid, &ip->ino_data.uid,
2229 				 sizeof(uuid_uid)) ||
2230 			    bcmp(&uuid_gid, &ip->ino_data.gid,
2231 				 sizeof(uuid_gid)) ||
2232 			    ip->ino_data.mode != cur_mode) {
2233 				ip->ino_data.uid = uuid_uid;
2234 				ip->ino_data.gid = uuid_gid;
2235 				ip->ino_data.mode = cur_mode;
2236 				ip->ino_data.ctime = trans.time;
2237 				modflags |= HAMMER_INODE_DDIRTY;
2238 			}
2239 			kflags |= NOTE_ATTRIB;
2240 		}
2241 	}
2242 	while (vap->va_size != VNOVAL && ip->ino_data.size != vap->va_size) {
2243 		switch(ap->a_vp->v_type) {
2244 		case VREG:
2245 			if (vap->va_size == ip->ino_data.size)
2246 				break;
2247 
2248 			/*
2249 			 * Log the operation if in fast-fsync mode or if
2250 			 * there are unterminated redo write records present.
2251 			 *
2252 			 * The second check is needed so the recovery code
2253 			 * properly truncates write redos even if nominal
2254 			 * REDO operations is turned off due to excessive
2255 			 * writes, because the related records might be
2256 			 * destroyed and never lay down a TERM_WRITE.
2257 			 */
2258 			if ((ip->flags & HAMMER_INODE_REDO) ||
2259 			    (ip->flags & HAMMER_INODE_RDIRTY)) {
2260 				error = hammer_generate_redo(&trans, ip,
2261 							     vap->va_size,
2262 							     HAMMER_REDO_TRUNC,
2263 							     NULL, 0);
2264 			}
2265 			blksize = hammer_blocksize(vap->va_size);
2266 
2267 			/*
2268 			 * XXX break atomicy, we can deadlock the backend
2269 			 * if we do not release the lock.  Probably not a
2270 			 * big deal here.
2271 			 */
2272 			if (vap->va_size < ip->ino_data.size) {
2273 				nvtruncbuf(ap->a_vp, vap->va_size,
2274 					   blksize,
2275 					   hammer_blockoff(vap->va_size),
2276 					   0);
2277 				truncating = 1;
2278 				kflags |= NOTE_WRITE;
2279 			} else {
2280 				nvextendbuf(ap->a_vp,
2281 					    ip->ino_data.size,
2282 					    vap->va_size,
2283 					    hammer_blocksize(ip->ino_data.size),
2284 					    hammer_blocksize(vap->va_size),
2285 					    hammer_blockoff(ip->ino_data.size),
2286 					    hammer_blockoff(vap->va_size),
2287 					    0);
2288 				truncating = 0;
2289 				kflags |= NOTE_WRITE | NOTE_EXTEND;
2290 			}
2291 			ip->ino_data.size = vap->va_size;
2292 			ip->ino_data.mtime = trans.time;
2293 			/* XXX safe to use SDIRTY instead of DDIRTY here? */
2294 			modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2295 
2296 			/*
2297 			 * On-media truncation is cached in the inode until
2298 			 * the inode is synchronized.  We must immediately
2299 			 * handle any frontend records.
2300 			 */
2301 			if (truncating) {
2302 				hammer_ip_frontend_trunc(ip, vap->va_size);
2303 				if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2304 					ip->flags |= HAMMER_INODE_TRUNCATED;
2305 					ip->trunc_off = vap->va_size;
2306 					hammer_inode_dirty(ip);
2307 				} else if (ip->trunc_off > vap->va_size) {
2308 					ip->trunc_off = vap->va_size;
2309 				}
2310 			}
2311 
2312 #if 0
2313 			/*
2314 			 * When truncating, nvtruncbuf() may have cleaned out
2315 			 * a portion of the last block on-disk in the buffer
2316 			 * cache.  We must clean out any frontend records
2317 			 * for blocks beyond the new last block.
2318 			 */
2319 			aligned_size = (vap->va_size + (blksize - 1)) &
2320 				       ~(int64_t)(blksize - 1);
2321 			if (truncating && vap->va_size < aligned_size) {
2322 				aligned_size -= blksize;
2323 				hammer_ip_frontend_trunc(ip, aligned_size);
2324 			}
2325 #endif
2326 			break;
2327 		case VDATABASE:
2328 			if ((ip->flags & HAMMER_INODE_TRUNCATED) == 0) {
2329 				ip->flags |= HAMMER_INODE_TRUNCATED;
2330 				ip->trunc_off = vap->va_size;
2331 				hammer_inode_dirty(ip);
2332 			} else if (ip->trunc_off > vap->va_size) {
2333 				ip->trunc_off = vap->va_size;
2334 			}
2335 			hammer_ip_frontend_trunc(ip, vap->va_size);
2336 			ip->ino_data.size = vap->va_size;
2337 			ip->ino_data.mtime = trans.time;
2338 			modflags |= HAMMER_INODE_MTIME | HAMMER_INODE_DDIRTY;
2339 			kflags |= NOTE_ATTRIB;
2340 			break;
2341 		default:
2342 			error = EINVAL;
2343 			goto done;
2344 		}
2345 		break;
2346 	}
2347 	if (vap->va_atime.tv_sec != VNOVAL) {
2348 		ip->ino_data.atime = hammer_timespec_to_time(&vap->va_atime);
2349 		modflags |= HAMMER_INODE_ATIME;
2350 		kflags |= NOTE_ATTRIB;
2351 	}
2352 	if (vap->va_mtime.tv_sec != VNOVAL) {
2353 		ip->ino_data.mtime = hammer_timespec_to_time(&vap->va_mtime);
2354 		modflags |= HAMMER_INODE_MTIME;
2355 		kflags |= NOTE_ATTRIB;
2356 	}
2357 	if (vap->va_mode != (mode_t)VNOVAL) {
2358 		mode_t   cur_mode = ip->ino_data.mode;
2359 		uid_t cur_uid = hammer_to_unix_xid(&ip->ino_data.uid);
2360 		gid_t cur_gid = hammer_to_unix_xid(&ip->ino_data.gid);
2361 
2362 		error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
2363 					 cur_uid, cur_gid, &cur_mode);
2364 		if (error == 0 && ip->ino_data.mode != cur_mode) {
2365 			ip->ino_data.mode = cur_mode;
2366 			ip->ino_data.ctime = trans.time;
2367 			modflags |= HAMMER_INODE_DDIRTY;
2368 			kflags |= NOTE_ATTRIB;
2369 		}
2370 	}
2371 done:
2372 	if (error == 0)
2373 		hammer_modify_inode(&trans, ip, modflags);
2374 	hammer_done_transaction(&trans);
2375 	hammer_knote(ap->a_vp, kflags);
2376 	lwkt_reltoken(&hmp->fs_token);
2377 	return (error);
2378 }
2379 
2380 /*
2381  * hammer_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
2382  */
2383 static
2384 int
2385 hammer_vop_nsymlink(struct vop_nsymlink_args *ap)
2386 {
2387 	struct hammer_transaction trans;
2388 	struct hammer_inode *dip;
2389 	struct hammer_inode *nip;
2390 	hammer_record_t record;
2391 	struct nchandle *nch;
2392 	hammer_mount_t hmp;
2393 	int error;
2394 	int bytes;
2395 
2396 	ap->a_vap->va_type = VLNK;
2397 
2398 	nch = ap->a_nch;
2399 	dip = VTOI(ap->a_dvp);
2400 	hmp = dip->hmp;
2401 
2402 	if (dip->flags & HAMMER_INODE_RO)
2403 		return (EROFS);
2404 	if ((error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0)
2405 		return (error);
2406 
2407 	/*
2408 	 * Create a transaction to cover the operations we perform.
2409 	 */
2410 	lwkt_gettoken(&hmp->fs_token);
2411 	hammer_start_transaction(&trans, hmp);
2412 	++hammer_stats_file_iopsw;
2413 
2414 	/*
2415 	 * Create a new filesystem object of the requested type.  The
2416 	 * returned inode will be referenced but not locked.
2417 	 */
2418 
2419 	error = hammer_create_inode(&trans, ap->a_vap, ap->a_cred,
2420 				    dip, nch->ncp->nc_name, nch->ncp->nc_nlen,
2421 				    NULL, &nip);
2422 	if (error) {
2423 		hammer_done_transaction(&trans);
2424 		*ap->a_vpp = NULL;
2425 		lwkt_reltoken(&hmp->fs_token);
2426 		return (error);
2427 	}
2428 
2429 	/*
2430 	 * Add a record representing the symlink.  symlink stores the link
2431 	 * as pure data, not a string, and is no \0 terminated.
2432 	 */
2433 	if (error == 0) {
2434 		bytes = strlen(ap->a_target);
2435 
2436 		if (bytes <= HAMMER_INODE_BASESYMLEN) {
2437 			bcopy(ap->a_target, nip->ino_data.ext.symlink, bytes);
2438 		} else {
2439 			record = hammer_alloc_mem_record(nip, bytes);
2440 			record->type = HAMMER_MEM_RECORD_GENERAL;
2441 
2442 			record->leaf.base.localization = nip->obj_localization |
2443 							 HAMMER_LOCALIZE_MISC;
2444 			record->leaf.base.key = HAMMER_FIXKEY_SYMLINK;
2445 			record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
2446 			record->leaf.data_len = bytes;
2447 			KKASSERT(HAMMER_SYMLINK_NAME_OFF == 0);
2448 			bcopy(ap->a_target, record->data->symlink.name, bytes);
2449 			error = hammer_ip_add_record(&trans, record);
2450 		}
2451 
2452 		/*
2453 		 * Set the file size to the length of the link.
2454 		 */
2455 		if (error == 0) {
2456 			nip->ino_data.size = bytes;
2457 			hammer_modify_inode(&trans, nip, HAMMER_INODE_DDIRTY);
2458 		}
2459 	}
2460 	if (error == 0)
2461 		error = hammer_ip_add_directory(&trans, dip, nch->ncp->nc_name,
2462 						nch->ncp->nc_nlen, nip);
2463 
2464 	/*
2465 	 * Finish up.
2466 	 */
2467 	if (error) {
2468 		hammer_rel_inode(nip, 0);
2469 		*ap->a_vpp = NULL;
2470 	} else {
2471 		error = hammer_get_vnode(nip, ap->a_vpp);
2472 		hammer_rel_inode(nip, 0);
2473 		if (error == 0) {
2474 			cache_setunresolved(ap->a_nch);
2475 			cache_setvp(ap->a_nch, *ap->a_vpp);
2476 			hammer_knote(ap->a_dvp, NOTE_WRITE);
2477 		}
2478 	}
2479 	hammer_done_transaction(&trans);
2480 	lwkt_reltoken(&hmp->fs_token);
2481 	return (error);
2482 }
2483 
2484 /*
2485  * hammer_vop_nwhiteout { nch, dvp, cred, flags }
2486  */
2487 static
2488 int
2489 hammer_vop_nwhiteout(struct vop_nwhiteout_args *ap)
2490 {
2491 	struct hammer_transaction trans;
2492 	struct hammer_inode *dip;
2493 	hammer_mount_t hmp;
2494 	int error;
2495 
2496 	dip = VTOI(ap->a_dvp);
2497 	hmp = dip->hmp;
2498 
2499 	if (hammer_nohistory(dip) == 0 &&
2500 	    (error = hammer_checkspace(hmp, HAMMER_CHKSPC_CREATE)) != 0) {
2501 		return (error);
2502 	}
2503 
2504 	lwkt_gettoken(&hmp->fs_token);
2505 	hammer_start_transaction(&trans, hmp);
2506 	++hammer_stats_file_iopsw;
2507 	error = hammer_dounlink(&trans, ap->a_nch, ap->a_dvp,
2508 				ap->a_cred, ap->a_flags, -1);
2509 	hammer_done_transaction(&trans);
2510 	lwkt_reltoken(&hmp->fs_token);
2511 
2512 	return (error);
2513 }
2514 
2515 /*
2516  * hammer_vop_ioctl { vp, command, data, fflag, cred }
2517  */
2518 static
2519 int
2520 hammer_vop_ioctl(struct vop_ioctl_args *ap)
2521 {
2522 	struct hammer_inode *ip = ap->a_vp->v_data;
2523 	hammer_mount_t hmp = ip->hmp;
2524 	int error;
2525 
2526 	++hammer_stats_file_iopsr;
2527 	lwkt_gettoken(&hmp->fs_token);
2528 	error = hammer_ioctl(ip, ap->a_command, ap->a_data,
2529 			     ap->a_fflag, ap->a_cred);
2530 	lwkt_reltoken(&hmp->fs_token);
2531 	return (error);
2532 }
2533 
2534 static
2535 int
2536 hammer_vop_mountctl(struct vop_mountctl_args *ap)
2537 {
2538 	static const struct mountctl_opt extraopt[] = {
2539 		{ HMNT_NOHISTORY,	"nohistory" },
2540 		{ HMNT_MASTERID,	"master" },
2541 		{ HMNT_NOMIRROR,	"nomirror" },
2542 		{ 0, NULL}
2543 
2544 	};
2545 	struct hammer_mount *hmp;
2546 	struct mount *mp;
2547 	int usedbytes;
2548 	int error;
2549 
2550 	error = 0;
2551 	usedbytes = 0;
2552 	mp = ap->a_head.a_ops->head.vv_mount;
2553 	KKASSERT(mp->mnt_data != NULL);
2554 	hmp = (struct hammer_mount *)mp->mnt_data;
2555 
2556 	lwkt_gettoken(&hmp->fs_token);
2557 
2558 	switch(ap->a_op) {
2559 	case MOUNTCTL_SET_EXPORT:
2560 		if (ap->a_ctllen != sizeof(struct export_args))
2561 			error = EINVAL;
2562 		else
2563 			error = hammer_vfs_export(mp, ap->a_op,
2564 				      (const struct export_args *)ap->a_ctl);
2565 		break;
2566 	case MOUNTCTL_MOUNTFLAGS:
2567 		/*
2568 		 * Call standard mountctl VOP function
2569 		 * so we get user mount flags.
2570 		 */
2571 		error = vop_stdmountctl(ap);
2572 		if (error)
2573 			break;
2574 
2575 		usedbytes = *ap->a_res;
2576 
2577 		if (usedbytes > 0 && usedbytes < ap->a_buflen) {
2578 			usedbytes += vfs_flagstostr(hmp->hflags, extraopt,
2579 						    ap->a_buf,
2580 						    ap->a_buflen - usedbytes,
2581 						    &error);
2582 		}
2583 
2584 		*ap->a_res += usedbytes;
2585 		break;
2586 	default:
2587 		error = vop_stdmountctl(ap);
2588 		break;
2589 	}
2590 	lwkt_reltoken(&hmp->fs_token);
2591 	return(error);
2592 }
2593 
2594 /*
2595  * hammer_vop_strategy { vp, bio }
2596  *
2597  * Strategy call, used for regular file read & write only.  Note that the
2598  * bp may represent a cluster.
2599  *
2600  * To simplify operation and allow better optimizations in the future,
2601  * this code does not make any assumptions with regards to buffer alignment
2602  * or size.
2603  */
2604 static
2605 int
2606 hammer_vop_strategy(struct vop_strategy_args *ap)
2607 {
2608 	struct buf *bp;
2609 	int error;
2610 
2611 	bp = ap->a_bio->bio_buf;
2612 
2613 	switch(bp->b_cmd) {
2614 	case BUF_CMD_READ:
2615 		error = hammer_vop_strategy_read(ap);
2616 		break;
2617 	case BUF_CMD_WRITE:
2618 		error = hammer_vop_strategy_write(ap);
2619 		break;
2620 	default:
2621 		bp->b_error = error = EINVAL;
2622 		bp->b_flags |= B_ERROR;
2623 		biodone(ap->a_bio);
2624 		break;
2625 	}
2626 
2627 	/* hammer_dump_dedup_cache(((hammer_inode_t)ap->a_vp->v_data)->hmp); */
2628 
2629 	return (error);
2630 }
2631 
2632 /*
2633  * Read from a regular file.  Iterate the related records and fill in the
2634  * BIO/BUF.  Gaps are zero-filled.
2635  *
2636  * The support code in hammer_object.c should be used to deal with mixed
2637  * in-memory and on-disk records.
2638  *
2639  * NOTE: Can be called from the cluster code with an oversized buf.
2640  *
2641  * XXX atime update
2642  */
2643 static
2644 int
2645 hammer_vop_strategy_read(struct vop_strategy_args *ap)
2646 {
2647 	struct hammer_transaction trans;
2648 	struct hammer_inode *ip;
2649 	struct hammer_inode *dip;
2650 	hammer_mount_t hmp;
2651 	struct hammer_cursor cursor;
2652 	hammer_base_elm_t base;
2653 	hammer_off_t disk_offset;
2654 	struct bio *bio;
2655 	struct bio *nbio;
2656 	struct buf *bp;
2657 	int64_t rec_offset;
2658 	int64_t ran_end;
2659 	int64_t tmp64;
2660 	int error;
2661 	int boff;
2662 	int roff;
2663 	int n;
2664 	int isdedupable;
2665 
2666 	bio = ap->a_bio;
2667 	bp = bio->bio_buf;
2668 	ip = ap->a_vp->v_data;
2669 	hmp = ip->hmp;
2670 
2671 	/*
2672 	 * The zone-2 disk offset may have been set by the cluster code via
2673 	 * a BMAP operation, or else should be NOOFFSET.
2674 	 *
2675 	 * Checking the high bits for a match against zone-2 should suffice.
2676 	 *
2677 	 * In cases where a lot of data duplication is present it may be
2678 	 * more beneficial to drop through and doubule-buffer through the
2679 	 * device.
2680 	 */
2681 	nbio = push_bio(bio);
2682 	if ((nbio->bio_offset & HAMMER_OFF_ZONE_MASK) ==
2683 	    HAMMER_ZONE_LARGE_DATA) {
2684 		if (hammer_double_buffer == 0) {
2685 			lwkt_gettoken(&hmp->fs_token);
2686 			error = hammer_io_direct_read(hmp, nbio, NULL);
2687 			lwkt_reltoken(&hmp->fs_token);
2688 			return (error);
2689 		}
2690 
2691 		/*
2692 		 * Try to shortcut requests for double_buffer mode too.
2693 		 * Since this mode runs through the device buffer cache
2694 		 * only compatible buffer sizes (meaning those generated
2695 		 * by normal filesystem buffers) are legal.
2696 		 */
2697 		if (hammer_live_dedup == 0 && (bp->b_flags & B_PAGING) == 0) {
2698 			lwkt_gettoken(&hmp->fs_token);
2699 			error = hammer_io_indirect_read(hmp, nbio, NULL);
2700 			lwkt_reltoken(&hmp->fs_token);
2701 			return (error);
2702 		}
2703 	}
2704 
2705 	/*
2706 	 * Well, that sucked.  Do it the hard way.  If all the stars are
2707 	 * aligned we may still be able to issue a direct-read.
2708 	 */
2709 	lwkt_gettoken(&hmp->fs_token);
2710 	hammer_simple_transaction(&trans, hmp);
2711 	hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2712 
2713 	/*
2714 	 * Key range (begin and end inclusive) to scan.  Note that the key's
2715 	 * stored in the actual records represent BASE+LEN, not BASE.  The
2716 	 * first record containing bio_offset will have a key > bio_offset.
2717 	 */
2718 	cursor.key_beg.localization = ip->obj_localization |
2719 				      HAMMER_LOCALIZE_MISC;
2720 	cursor.key_beg.obj_id = ip->obj_id;
2721 	cursor.key_beg.create_tid = 0;
2722 	cursor.key_beg.delete_tid = 0;
2723 	cursor.key_beg.obj_type = 0;
2724 	cursor.key_beg.key = bio->bio_offset + 1;
2725 	cursor.asof = ip->obj_asof;
2726 	cursor.flags |= HAMMER_CURSOR_ASOF;
2727 
2728 	cursor.key_end = cursor.key_beg;
2729 	KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
2730 #if 0
2731 	if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
2732 		cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
2733 		cursor.key_end.rec_type = HAMMER_RECTYPE_DB;
2734 		cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2735 	} else
2736 #endif
2737 	{
2738 		ran_end = bio->bio_offset + bp->b_bufsize;
2739 		cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
2740 		cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
2741 		tmp64 = ran_end + MAXPHYS + 1;	/* work-around GCC-4 bug */
2742 		if (tmp64 < ran_end)
2743 			cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
2744 		else
2745 			cursor.key_end.key = ran_end + MAXPHYS + 1;
2746 	}
2747 	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
2748 
2749 	/*
2750 	 * Set NOSWAPCACHE for cursor data extraction if double buffering
2751 	 * is disabled or (if the file is not marked cacheable via chflags
2752 	 * and vm.swapcache_use_chflags is enabled).
2753 	 */
2754 	if (hammer_double_buffer == 0 ||
2755 	    ((ap->a_vp->v_flag & VSWAPCACHE) == 0 &&
2756 	     vm_swapcache_use_chflags)) {
2757 		cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE;
2758 	}
2759 
2760 	error = hammer_ip_first(&cursor);
2761 	boff = 0;
2762 
2763 	while (error == 0) {
2764 		/*
2765 		 * Get the base file offset of the record.  The key for
2766 		 * data records is (base + bytes) rather then (base).
2767 		 */
2768 		base = &cursor.leaf->base;
2769 		rec_offset = base->key - cursor.leaf->data_len;
2770 
2771 		/*
2772 		 * Calculate the gap, if any, and zero-fill it.
2773 		 *
2774 		 * n is the offset of the start of the record verses our
2775 		 * current seek offset in the bio.
2776 		 */
2777 		n = (int)(rec_offset - (bio->bio_offset + boff));
2778 		if (n > 0) {
2779 			if (n > bp->b_bufsize - boff)
2780 				n = bp->b_bufsize - boff;
2781 			bzero((char *)bp->b_data + boff, n);
2782 			boff += n;
2783 			n = 0;
2784 		}
2785 
2786 		/*
2787 		 * Calculate the data offset in the record and the number
2788 		 * of bytes we can copy.
2789 		 *
2790 		 * There are two degenerate cases.  First, boff may already
2791 		 * be at bp->b_bufsize.  Secondly, the data offset within
2792 		 * the record may exceed the record's size.
2793 		 */
2794 		roff = -n;
2795 		rec_offset += roff;
2796 		n = cursor.leaf->data_len - roff;
2797 		if (n <= 0) {
2798 			hdkprintf("bad n=%d roff=%d\n", n, roff);
2799 			n = 0;
2800 		} else if (n > bp->b_bufsize - boff) {
2801 			n = bp->b_bufsize - boff;
2802 		}
2803 
2804 		/*
2805 		 * Deal with cached truncations.  This cool bit of code
2806 		 * allows truncate()/ftruncate() to avoid having to sync
2807 		 * the file.
2808 		 *
2809 		 * If the frontend is truncated then all backend records are
2810 		 * subject to the frontend's truncation.
2811 		 *
2812 		 * If the backend is truncated then backend records on-disk
2813 		 * (but not in-memory) are subject to the backend's
2814 		 * truncation.  In-memory records owned by the backend
2815 		 * represent data written after the truncation point on the
2816 		 * backend and must not be truncated.
2817 		 *
2818 		 * Truncate operations deal with frontend buffer cache
2819 		 * buffers and frontend-owned in-memory records synchronously.
2820 		 */
2821 		if (ip->flags & HAMMER_INODE_TRUNCATED) {
2822 			if (hammer_cursor_ondisk(&cursor)/* ||
2823 			    cursor.iprec->flush_state == HAMMER_FST_FLUSH*/) {
2824 				if (ip->trunc_off <= rec_offset)
2825 					n = 0;
2826 				else if (ip->trunc_off < rec_offset + n)
2827 					n = (int)(ip->trunc_off - rec_offset);
2828 			}
2829 		}
2830 		if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2831 			if (hammer_cursor_ondisk(&cursor)) {
2832 				if (ip->sync_trunc_off <= rec_offset)
2833 					n = 0;
2834 				else if (ip->sync_trunc_off < rec_offset + n)
2835 					n = (int)(ip->sync_trunc_off - rec_offset);
2836 			}
2837 		}
2838 
2839 		/*
2840 		 * Try to issue a direct read into our bio if possible,
2841 		 * otherwise resolve the element data into a hammer_buffer
2842 		 * and copy.
2843 		 *
2844 		 * The buffer on-disk should be zerod past any real
2845 		 * truncation point, but may not be for any synthesized
2846 		 * truncation point from above.
2847 		 *
2848 		 * NOTE: disk_offset is only valid if the cursor data is
2849 		 *	 on-disk.
2850 		 */
2851 		disk_offset = cursor.leaf->data_offset + roff;
2852 		isdedupable = (boff == 0 && n == bp->b_bufsize &&
2853 			       hammer_cursor_ondisk(&cursor) &&
2854 			       ((int)disk_offset & HAMMER_BUFMASK) == 0);
2855 
2856 		if (isdedupable && hammer_double_buffer == 0) {
2857 			/*
2858 			 * Direct read case
2859 			 */
2860 			KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2861 				 HAMMER_ZONE_LARGE_DATA);
2862 			nbio->bio_offset = disk_offset;
2863 			error = hammer_io_direct_read(hmp, nbio, cursor.leaf);
2864 			if (hammer_live_dedup && error == 0)
2865 				hammer_dedup_cache_add(ip, cursor.leaf);
2866 			goto done;
2867 		} else if (isdedupable) {
2868 			/*
2869 			 * Async I/O case for reading from backing store
2870 			 * and copying the data to the filesystem buffer.
2871 			 * live-dedup has to verify the data anyway if it
2872 			 * gets a hit later so we can just add the entry
2873 			 * now.
2874 			 */
2875 			KKASSERT((disk_offset & HAMMER_OFF_ZONE_MASK) ==
2876 				 HAMMER_ZONE_LARGE_DATA);
2877 			nbio->bio_offset = disk_offset;
2878 			if (hammer_live_dedup)
2879 				hammer_dedup_cache_add(ip, cursor.leaf);
2880 			error = hammer_io_indirect_read(hmp, nbio, cursor.leaf);
2881 			goto done;
2882 		} else if (n) {
2883 			error = hammer_ip_resolve_data(&cursor);
2884 			if (error == 0) {
2885 				if (hammer_live_dedup && isdedupable)
2886 					hammer_dedup_cache_add(ip, cursor.leaf);
2887 				bcopy((char *)cursor.data + roff,
2888 				      (char *)bp->b_data + boff, n);
2889 			}
2890 		}
2891 		if (error)
2892 			break;
2893 
2894 		/*
2895 		 * We have to be sure that the only elements added to the
2896 		 * dedup cache are those which are already on-media.
2897 		 */
2898 		if (hammer_live_dedup && hammer_cursor_ondisk(&cursor))
2899 			hammer_dedup_cache_add(ip, cursor.leaf);
2900 
2901 		/*
2902 		 * Iterate until we have filled the request.
2903 		 */
2904 		boff += n;
2905 		if (boff == bp->b_bufsize)
2906 			break;
2907 		error = hammer_ip_next(&cursor);
2908 	}
2909 
2910 	/*
2911 	 * There may have been a gap after the last record
2912 	 */
2913 	if (error == ENOENT)
2914 		error = 0;
2915 	if (error == 0 && boff != bp->b_bufsize) {
2916 		KKASSERT(boff < bp->b_bufsize);
2917 		bzero((char *)bp->b_data + boff, bp->b_bufsize - boff);
2918 		/* boff = bp->b_bufsize; */
2919 	}
2920 
2921 	/*
2922 	 * Disallow swapcache operation on the vnode buffer if double
2923 	 * buffering is enabled, the swapcache will get the data via
2924 	 * the block device buffer.
2925 	 */
2926 	if (hammer_double_buffer)
2927 		bp->b_flags |= B_NOTMETA;
2928 
2929 	/*
2930 	 * Cleanup
2931 	 */
2932 	bp->b_resid = 0;
2933 	bp->b_error = error;
2934 	if (error)
2935 		bp->b_flags |= B_ERROR;
2936 	biodone(ap->a_bio);
2937 
2938 done:
2939 	/*
2940 	 * Cache the b-tree node for the last data read in cache[1].
2941 	 *
2942 	 * If we hit the file EOF then also cache the node in the
2943 	 * governing directory's cache[3], it will be used to initialize
2944 	 * the new inode's cache[1] for any inodes looked up via the directory.
2945 	 *
2946 	 * This doesn't reduce disk accesses since the B-Tree chain is
2947 	 * likely cached, but it does reduce cpu overhead when looking
2948 	 * up file offsets for cpdup/tar/cpio style iterations.
2949 	 */
2950 	if (cursor.node)
2951 		hammer_cache_node(&ip->cache[1], cursor.node);
2952 	if (ran_end >= ip->ino_data.size) {
2953 		dip = hammer_find_inode(&trans, ip->ino_data.parent_obj_id,
2954 					ip->obj_asof, ip->obj_localization);
2955 		if (dip) {
2956 			hammer_cache_node(&dip->cache[3], cursor.node);
2957 			hammer_rel_inode(dip, 0);
2958 		}
2959 	}
2960 	hammer_done_cursor(&cursor);
2961 	hammer_done_transaction(&trans);
2962 	lwkt_reltoken(&hmp->fs_token);
2963 	return(error);
2964 }
2965 
2966 /*
2967  * BMAP operation - used to support cluster_read() only.
2968  *
2969  * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
2970  *
2971  * This routine may return EOPNOTSUPP if the opration is not supported for
2972  * the specified offset.  The contents of the pointer arguments do not
2973  * need to be initialized in that case.
2974  *
2975  * If a disk address is available and properly aligned return 0 with
2976  * *doffsetp set to the zone-2 address, and *runp / *runb set appropriately
2977  * to the run-length relative to that offset.  Callers may assume that
2978  * *doffsetp is valid if 0 is returned, even if *runp is not sufficiently
2979  * large, so return EOPNOTSUPP if it is not sufficiently large.
2980  */
2981 static
2982 int
2983 hammer_vop_bmap(struct vop_bmap_args *ap)
2984 {
2985 	struct hammer_transaction trans;
2986 	struct hammer_inode *ip;
2987 	hammer_mount_t hmp;
2988 	struct hammer_cursor cursor;
2989 	hammer_base_elm_t base;
2990 	int64_t rec_offset;
2991 	int64_t ran_end;
2992 	int64_t tmp64;
2993 	int64_t base_offset;
2994 	int64_t base_disk_offset;
2995 	int64_t last_offset;
2996 	hammer_off_t last_disk_offset;
2997 	hammer_off_t disk_offset;
2998 	int	rec_len;
2999 	int	error;
3000 	int	blksize;
3001 
3002 	++hammer_stats_file_iopsr;
3003 	ip = ap->a_vp->v_data;
3004 	hmp = ip->hmp;
3005 
3006 	/*
3007 	 * We can only BMAP regular files.  We can't BMAP database files,
3008 	 * directories, etc.
3009 	 */
3010 	if (ip->ino_data.obj_type != HAMMER_OBJTYPE_REGFILE)
3011 		return(EOPNOTSUPP);
3012 
3013 	/*
3014 	 * bmap is typically called with runp/runb both NULL when used
3015 	 * for writing.  We do not support BMAP for writing atm.
3016 	 */
3017 	if (ap->a_cmd != BUF_CMD_READ)
3018 		return(EOPNOTSUPP);
3019 
3020 	/*
3021 	 * Scan the B-Tree to acquire blockmap addresses, then translate
3022 	 * to raw addresses.
3023 	 */
3024 	lwkt_gettoken(&hmp->fs_token);
3025 	hammer_simple_transaction(&trans, hmp);
3026 
3027 	hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
3028 
3029 	/*
3030 	 * Key range (begin and end inclusive) to scan.  Note that the key's
3031 	 * stored in the actual records represent BASE+LEN, not BASE.  The
3032 	 * first record containing bio_offset will have a key > bio_offset.
3033 	 */
3034 	cursor.key_beg.localization = ip->obj_localization |
3035 				      HAMMER_LOCALIZE_MISC;
3036 	cursor.key_beg.obj_id = ip->obj_id;
3037 	cursor.key_beg.create_tid = 0;
3038 	cursor.key_beg.delete_tid = 0;
3039 	cursor.key_beg.obj_type = 0;
3040 	if (ap->a_runb)
3041 		cursor.key_beg.key = ap->a_loffset - MAXPHYS + 1;
3042 	else
3043 		cursor.key_beg.key = ap->a_loffset + 1;
3044 	if (cursor.key_beg.key < 0)
3045 		cursor.key_beg.key = 0;
3046 	cursor.asof = ip->obj_asof;
3047 	cursor.flags |= HAMMER_CURSOR_ASOF;
3048 
3049 	cursor.key_end = cursor.key_beg;
3050 	KKASSERT(ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE);
3051 
3052 	ran_end = ap->a_loffset + MAXPHYS;
3053 	cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
3054 	cursor.key_end.rec_type = HAMMER_RECTYPE_DATA;
3055 	tmp64 = ran_end + MAXPHYS + 1;	/* work-around GCC-4 bug */
3056 	if (tmp64 < ran_end)
3057 		cursor.key_end.key = 0x7FFFFFFFFFFFFFFFLL;
3058 	else
3059 		cursor.key_end.key = ran_end + MAXPHYS + 1;
3060 
3061 	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
3062 
3063 	error = hammer_ip_first(&cursor);
3064 	base_offset = last_offset = 0;
3065 	base_disk_offset = last_disk_offset = 0;
3066 
3067 	while (error == 0) {
3068 		/*
3069 		 * Get the base file offset of the record.  The key for
3070 		 * data records is (base + bytes) rather then (base).
3071 		 *
3072 		 * NOTE: rec_offset + rec_len may exceed the end-of-file.
3073 		 * The extra bytes should be zero on-disk and the BMAP op
3074 		 * should still be ok.
3075 		 */
3076 		base = &cursor.leaf->base;
3077 		rec_offset = base->key - cursor.leaf->data_len;
3078 		rec_len    = cursor.leaf->data_len;
3079 
3080 		/*
3081 		 * Incorporate any cached truncation.
3082 		 *
3083 		 * NOTE: Modifications to rec_len based on synthesized
3084 		 * truncation points remove the guarantee that any extended
3085 		 * data on disk is zero (since the truncations may not have
3086 		 * taken place on-media yet).
3087 		 */
3088 		if (ip->flags & HAMMER_INODE_TRUNCATED) {
3089 			if (hammer_cursor_ondisk(&cursor) ||
3090 			    cursor.iprec->flush_state == HAMMER_FST_FLUSH) {
3091 				if (ip->trunc_off <= rec_offset)
3092 					rec_len = 0;
3093 				else if (ip->trunc_off < rec_offset + rec_len)
3094 					rec_len = (int)(ip->trunc_off - rec_offset);
3095 			}
3096 		}
3097 		if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
3098 			if (hammer_cursor_ondisk(&cursor)) {
3099 				if (ip->sync_trunc_off <= rec_offset)
3100 					rec_len = 0;
3101 				else if (ip->sync_trunc_off < rec_offset + rec_len)
3102 					rec_len = (int)(ip->sync_trunc_off - rec_offset);
3103 			}
3104 		}
3105 
3106 		/*
3107 		 * Accumulate information.  If we have hit a discontiguous
3108 		 * block reset base_offset unless we are already beyond the
3109 		 * requested offset.  If we are, that's it, we stop.
3110 		 */
3111 		if (error)
3112 			break;
3113 		if (hammer_cursor_ondisk(&cursor)) {
3114 			disk_offset = cursor.leaf->data_offset;
3115 			if (rec_offset != last_offset ||
3116 			    disk_offset != last_disk_offset) {
3117 				if (rec_offset > ap->a_loffset)
3118 					break;
3119 				base_offset = rec_offset;
3120 				base_disk_offset = disk_offset;
3121 			}
3122 			last_offset = rec_offset + rec_len;
3123 			last_disk_offset = disk_offset + rec_len;
3124 
3125 			if (hammer_live_dedup)
3126 				hammer_dedup_cache_add(ip, cursor.leaf);
3127 		}
3128 
3129 		error = hammer_ip_next(&cursor);
3130 	}
3131 
3132 	if (cursor.node)
3133 		hammer_cache_node(&ip->cache[1], cursor.node);
3134 
3135 	hammer_done_cursor(&cursor);
3136 	hammer_done_transaction(&trans);
3137 	lwkt_reltoken(&hmp->fs_token);
3138 
3139 	/*
3140 	 * If we couldn't find any records or the records we did find were
3141 	 * all behind the requested offset, return failure.  A forward
3142 	 * truncation can leave a hole w/ no on-disk records.
3143 	 */
3144 	if (last_offset == 0 || last_offset < ap->a_loffset)
3145 		return (EOPNOTSUPP);
3146 
3147 	/*
3148 	 * Figure out the block size at the requested offset and adjust
3149 	 * our limits so the cluster_read() does not create inappropriately
3150 	 * sized buffer cache buffers.
3151 	 */
3152 	blksize = hammer_blocksize(ap->a_loffset);
3153 	if (hammer_blocksize(base_offset) != blksize) {
3154 		base_offset = hammer_blockdemarc(base_offset, ap->a_loffset);
3155 	}
3156 	if (last_offset != ap->a_loffset &&
3157 	    hammer_blocksize(last_offset - 1) != blksize) {
3158 		last_offset = hammer_blockdemarc(ap->a_loffset,
3159 						 last_offset - 1);
3160 	}
3161 
3162 	/*
3163 	 * Returning EOPNOTSUPP simply prevents the direct-IO optimization
3164 	 * from occuring.
3165 	 */
3166 	disk_offset = base_disk_offset + (ap->a_loffset - base_offset);
3167 
3168 	if ((disk_offset & HAMMER_OFF_ZONE_MASK) != HAMMER_ZONE_LARGE_DATA) {
3169 		/*
3170 		 * Only large-data zones can be direct-IOd
3171 		 */
3172 		error = EOPNOTSUPP;
3173 	} else if ((disk_offset & HAMMER_BUFMASK) ||
3174 		   (last_offset - ap->a_loffset) < blksize) {
3175 		/*
3176 		 * doffsetp is not aligned or the forward run size does
3177 		 * not cover a whole buffer, disallow the direct I/O.
3178 		 */
3179 		error = EOPNOTSUPP;
3180 	} else {
3181 		/*
3182 		 * We're good.
3183 		 */
3184 		*ap->a_doffsetp = disk_offset;
3185 		if (ap->a_runb) {
3186 			*ap->a_runb = ap->a_loffset - base_offset;
3187 			KKASSERT(*ap->a_runb >= 0);
3188 		}
3189 		if (ap->a_runp) {
3190 			*ap->a_runp = last_offset - ap->a_loffset;
3191 			KKASSERT(*ap->a_runp >= 0);
3192 		}
3193 		error = 0;
3194 	}
3195 	return(error);
3196 }
3197 
3198 /*
3199  * Write to a regular file.   Because this is a strategy call the OS is
3200  * trying to actually get data onto the media.
3201  */
3202 static
3203 int
3204 hammer_vop_strategy_write(struct vop_strategy_args *ap)
3205 {
3206 	hammer_record_t record;
3207 	hammer_mount_t hmp;
3208 	hammer_inode_t ip;
3209 	struct bio *bio;
3210 	struct buf *bp;
3211 	int blksize __debugvar;
3212 	int bytes;
3213 	int error;
3214 
3215 	bio = ap->a_bio;
3216 	bp = bio->bio_buf;
3217 	ip = ap->a_vp->v_data;
3218 	hmp = ip->hmp;
3219 
3220 	blksize = hammer_blocksize(bio->bio_offset);
3221 	KKASSERT(bp->b_bufsize == blksize);
3222 
3223 	if (ip->flags & HAMMER_INODE_RO) {
3224 		bp->b_error = EROFS;
3225 		bp->b_flags |= B_ERROR;
3226 		biodone(ap->a_bio);
3227 		return(EROFS);
3228 	}
3229 
3230 	lwkt_gettoken(&hmp->fs_token);
3231 
3232 	/*
3233 	 * Disallow swapcache operation on the vnode buffer if double
3234 	 * buffering is enabled, the swapcache will get the data via
3235 	 * the block device buffer.
3236 	 */
3237 	if (hammer_double_buffer)
3238 		bp->b_flags |= B_NOTMETA;
3239 
3240 	/*
3241 	 * Interlock with inode destruction (no in-kernel or directory
3242 	 * topology visibility).  If we queue new IO while trying to
3243 	 * destroy the inode we can deadlock the vtrunc call in
3244 	 * hammer_inode_unloadable_check().
3245 	 *
3246 	 * Besides, there's no point flushing a bp associated with an
3247 	 * inode that is being destroyed on-media and has no kernel
3248 	 * references.
3249 	 */
3250 	if ((ip->flags | ip->sync_flags) &
3251 	    (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) {
3252 		bp->b_resid = 0;
3253 		biodone(ap->a_bio);
3254 		lwkt_reltoken(&hmp->fs_token);
3255 		return(0);
3256 	}
3257 
3258 	/*
3259 	 * Reserve space and issue a direct-write from the front-end.
3260 	 * NOTE: The direct_io code will hammer_bread/bcopy smaller
3261 	 * allocations.
3262 	 *
3263 	 * An in-memory record will be installed to reference the storage
3264 	 * until the flusher can get to it.
3265 	 *
3266 	 * Since we own the high level bio the front-end will not try to
3267 	 * do a direct-read until the write completes.
3268 	 *
3269 	 * NOTE: The only time we do not reserve a full-sized buffers
3270 	 * worth of data is if the file is small.  We do not try to
3271 	 * allocate a fragment (from the small-data zone) at the end of
3272 	 * an otherwise large file as this can lead to wildly separated
3273 	 * data.
3274 	 */
3275 	KKASSERT((bio->bio_offset & HAMMER_BUFMASK) == 0);
3276 	KKASSERT(bio->bio_offset < ip->ino_data.size);
3277 	if (bio->bio_offset || ip->ino_data.size > HAMMER_HBUFSIZE)
3278 		bytes = bp->b_bufsize;
3279 	else
3280 		bytes = ((int)ip->ino_data.size + 15) & ~15;
3281 
3282 	record = hammer_ip_add_bulk(ip, bio->bio_offset, bp->b_data,
3283 				    bytes, &error);
3284 
3285 	/*
3286 	 * B_VFSFLAG1 indicates that a REDO_WRITE entry was generated
3287 	 * in hammer_vop_write().  We must flag the record so the proper
3288 	 * REDO_TERM_WRITE entry is generated during the flush.
3289 	 */
3290 	if (record) {
3291 		if (bp->b_flags & B_VFSFLAG1) {
3292 			record->flags |= HAMMER_RECF_REDO;
3293 			bp->b_flags &= ~B_VFSFLAG1;
3294 		}
3295 		if (record->flags & HAMMER_RECF_DEDUPED) {
3296 			bp->b_resid = 0;
3297 			hammer_ip_replace_bulk(hmp, record);
3298 			biodone(ap->a_bio);
3299 		} else {
3300 			hammer_io_direct_write(hmp, bio, record);
3301 		}
3302 		if (ip->rsv_recs > 1 && hmp->rsv_recs > hammer_limit_recs)
3303 			hammer_flush_inode(ip, 0);
3304 	} else {
3305 		bp->b_bio2.bio_offset = NOOFFSET;
3306 		bp->b_error = error;
3307 		bp->b_flags |= B_ERROR;
3308 		biodone(ap->a_bio);
3309 	}
3310 	lwkt_reltoken(&hmp->fs_token);
3311 	return(error);
3312 }
3313 
3314 /*
3315  * dounlink - disconnect a directory entry
3316  *
3317  * XXX whiteout support not really in yet
3318  */
3319 static int
3320 hammer_dounlink(hammer_transaction_t trans, struct nchandle *nch,
3321 		struct vnode *dvp, struct ucred *cred,
3322 		int flags, int isdir)
3323 {
3324 	struct namecache *ncp;
3325 	hammer_inode_t dip;
3326 	hammer_inode_t ip;
3327 	hammer_mount_t hmp;
3328 	struct hammer_cursor cursor;
3329 	int64_t namekey;
3330 	uint32_t max_iterations;
3331 	int nlen, error;
3332 
3333 	/*
3334 	 * Calculate the namekey and setup the key range for the scan.  This
3335 	 * works kinda like a chained hash table where the lower 32 bits
3336 	 * of the namekey synthesize the chain.
3337 	 *
3338 	 * The key range is inclusive of both key_beg and key_end.
3339 	 */
3340 	dip = VTOI(dvp);
3341 	ncp = nch->ncp;
3342 	hmp = dip->hmp;
3343 
3344 	if (dip->flags & HAMMER_INODE_RO)
3345 		return (EROFS);
3346 
3347 	namekey = hammer_directory_namekey(dip, ncp->nc_name, ncp->nc_nlen,
3348 					   &max_iterations);
3349 retry:
3350 	hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
3351 	cursor.key_beg.localization = dip->obj_localization |
3352 				      hammer_dir_localization(dip);
3353         cursor.key_beg.obj_id = dip->obj_id;
3354 	cursor.key_beg.key = namekey;
3355         cursor.key_beg.create_tid = 0;
3356         cursor.key_beg.delete_tid = 0;
3357         cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
3358         cursor.key_beg.obj_type = 0;
3359 
3360 	cursor.key_end = cursor.key_beg;
3361 	cursor.key_end.key += max_iterations;
3362 	cursor.asof = dip->obj_asof;
3363 	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
3364 
3365 	/*
3366 	 * Scan all matching records (the chain), locate the one matching
3367 	 * the requested path component.  info->last_error contains the
3368 	 * error code on search termination and could be 0, ENOENT, or
3369 	 * something else.
3370 	 *
3371 	 * The hammer_ip_*() functions merge in-memory records with on-disk
3372 	 * records for the purposes of the search.
3373 	 */
3374 	error = hammer_ip_first(&cursor);
3375 
3376 	while (error == 0) {
3377 		error = hammer_ip_resolve_data(&cursor);
3378 		if (error)
3379 			break;
3380 		nlen = cursor.leaf->data_len - HAMMER_ENTRY_NAME_OFF;
3381 		KKASSERT(nlen > 0);
3382 		if (ncp->nc_nlen == nlen &&
3383 		    bcmp(ncp->nc_name, cursor.data->entry.name, nlen) == 0) {
3384 			break;
3385 		}
3386 		error = hammer_ip_next(&cursor);
3387 	}
3388 
3389 	/*
3390 	 * If all is ok we have to get the inode so we can adjust nlinks.
3391 	 * To avoid a deadlock with the flusher we must release the inode
3392 	 * lock on the directory when acquiring the inode for the entry.
3393 	 *
3394 	 * If the target is a directory, it must be empty.
3395 	 */
3396 	if (error == 0) {
3397 		hammer_unlock(&cursor.ip->lock);
3398 		ip = hammer_get_inode(trans, dip, cursor.data->entry.obj_id,
3399 				      hmp->asof,
3400 				      cursor.data->entry.localization,
3401 				      0, &error);
3402 		hammer_lock_sh(&cursor.ip->lock);
3403 		if (error == ENOENT) {
3404 			hkprintf("WARNING: Removing dirent w/missing inode "
3405 				"\"%s\"\n"
3406 				"\tobj_id = %016llx\n",
3407 				ncp->nc_name,
3408 				(long long)cursor.data->entry.obj_id);
3409 			error = 0;
3410 		}
3411 
3412 		/*
3413 		 * If isdir >= 0 we validate that the entry is or is not a
3414 		 * directory.  If isdir < 0 we don't care.
3415 		 */
3416 		if (error == 0 && isdir >= 0 && ip) {
3417 			if (isdir &&
3418 			    ip->ino_data.obj_type != HAMMER_OBJTYPE_DIRECTORY) {
3419 				error = ENOTDIR;
3420 			} else if (isdir == 0 &&
3421 			    ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
3422 				error = EISDIR;
3423 			}
3424 		}
3425 
3426 		/*
3427 		 * If we are trying to remove a directory the directory must
3428 		 * be empty.
3429 		 *
3430 		 * The check directory code can loop and deadlock/retry.  Our
3431 		 * own cursor's node locks must be released to avoid a 3-way
3432 		 * deadlock with the flusher if the check directory code
3433 		 * blocks.
3434 		 *
3435 		 * If any changes whatsoever have been made to the cursor
3436 		 * set EDEADLK and retry.
3437 		 *
3438 		 * WARNING: See warnings in hammer_unlock_cursor()
3439 		 *	    function.
3440 		 */
3441 		if (error == 0 && ip && ip->ino_data.obj_type ==
3442 				        HAMMER_OBJTYPE_DIRECTORY) {
3443 			hammer_unlock_cursor(&cursor);
3444 			error = hammer_ip_check_directory_empty(trans, ip);
3445 			hammer_lock_cursor(&cursor);
3446 			if (cursor.flags & HAMMER_CURSOR_RETEST) {
3447 				hkprintf("Warning: avoided deadlock "
3448 					"on rmdir '%s'\n",
3449 					ncp->nc_name);
3450 				error = EDEADLK;
3451 			}
3452 		}
3453 
3454 		/*
3455 		 * Delete the directory entry.
3456 		 *
3457 		 * WARNING: hammer_ip_del_directory() may have to terminate
3458 		 * the cursor to avoid a deadlock.  It is ok to call
3459 		 * hammer_done_cursor() twice.
3460 		 */
3461 		if (error == 0) {
3462 			error = hammer_ip_del_directory(trans, &cursor,
3463 							dip, ip);
3464 		}
3465 		hammer_done_cursor(&cursor);
3466 		if (error == 0) {
3467 			/*
3468 			 * Tell the namecache that we are now unlinked.
3469 			 */
3470 			cache_unlink(nch);
3471 
3472 			/*
3473 			 * NOTE: ip->vp, if non-NULL, cannot be directly
3474 			 *	 referenced without formally acquiring the
3475 			 *	 vp since the vp might have zero refs on it,
3476 			 *	 or in the middle of a reclaim, etc.
3477 			 *
3478 			 * NOTE: The cache_setunresolved() can rip the vp
3479 			 *	 out from under us since the vp may not have
3480 			 *	 any refs, in which case ip->vp will be NULL
3481 			 *	 from the outset.
3482 			 */
3483 			while (ip && ip->vp) {
3484 				struct vnode *vp;
3485 
3486 				error = hammer_get_vnode(ip, &vp);
3487 				if (error == 0 && vp) {
3488 					vn_unlock(vp);
3489 					hammer_knote(ip->vp, NOTE_DELETE);
3490 #if 0
3491 					/*
3492 					 * Don't do this, it can deadlock
3493 					 * on concurrent rm's of hardlinks.
3494 					 * Shouldn't be needed any more.
3495 					 */
3496 					cache_inval_vp(ip->vp, CINV_DESTROY);
3497 #endif
3498 					vrele(vp);
3499 					break;
3500 				}
3501 				hdkprintf("ip/vp race1 avoided\n");
3502 			}
3503 		}
3504 		if (ip)
3505 			hammer_rel_inode(ip, 0);
3506 	} else {
3507 		hammer_done_cursor(&cursor);
3508 	}
3509 	if (error == EDEADLK)
3510 		goto retry;
3511 
3512 	return (error);
3513 }
3514 
3515 /************************************************************************
3516  *			    FIFO AND SPECFS OPS				*
3517  ************************************************************************
3518  *
3519  */
3520 static int
3521 hammer_vop_fifoclose (struct vop_close_args *ap)
3522 {
3523 	/* XXX update itimes */
3524 	return (VOCALL(&fifo_vnode_vops, &ap->a_head));
3525 }
3526 
3527 static int
3528 hammer_vop_fiforead (struct vop_read_args *ap)
3529 {
3530 	int error;
3531 
3532 	error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3533 	/* XXX update access time */
3534 	return (error);
3535 }
3536 
3537 static int
3538 hammer_vop_fifowrite (struct vop_write_args *ap)
3539 {
3540 	int error;
3541 
3542 	error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3543 	/* XXX update access time */
3544 	return (error);
3545 }
3546 
3547 static
3548 int
3549 hammer_vop_fifokqfilter(struct vop_kqfilter_args *ap)
3550 {
3551 	int error;
3552 
3553 	error = VOCALL(&fifo_vnode_vops, &ap->a_head);
3554 	if (error)
3555 		error = hammer_vop_kqfilter(ap);
3556 	return(error);
3557 }
3558 
3559 /************************************************************************
3560  *			    KQFILTER OPS				*
3561  ************************************************************************
3562  *
3563  */
3564 static void filt_hammerdetach(struct knote *kn);
3565 static int filt_hammerread(struct knote *kn, long hint);
3566 static int filt_hammerwrite(struct knote *kn, long hint);
3567 static int filt_hammervnode(struct knote *kn, long hint);
3568 
3569 static struct filterops hammerread_filtops =
3570 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
3571 	  NULL, filt_hammerdetach, filt_hammerread };
3572 static struct filterops hammerwrite_filtops =
3573 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
3574 	  NULL, filt_hammerdetach, filt_hammerwrite };
3575 static struct filterops hammervnode_filtops =
3576 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
3577 	  NULL, filt_hammerdetach, filt_hammervnode };
3578 
3579 static
3580 int
3581 hammer_vop_kqfilter(struct vop_kqfilter_args *ap)
3582 {
3583 	struct vnode *vp = ap->a_vp;
3584 	struct knote *kn = ap->a_kn;
3585 
3586 	switch (kn->kn_filter) {
3587 	case EVFILT_READ:
3588 		kn->kn_fop = &hammerread_filtops;
3589 		break;
3590 	case EVFILT_WRITE:
3591 		kn->kn_fop = &hammerwrite_filtops;
3592 		break;
3593 	case EVFILT_VNODE:
3594 		kn->kn_fop = &hammervnode_filtops;
3595 		break;
3596 	default:
3597 		return (EOPNOTSUPP);
3598 	}
3599 
3600 	kn->kn_hook = (caddr_t)vp;
3601 
3602 	knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3603 
3604 	return(0);
3605 }
3606 
3607 static void
3608 filt_hammerdetach(struct knote *kn)
3609 {
3610 	struct vnode *vp = (void *)kn->kn_hook;
3611 
3612 	knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
3613 }
3614 
3615 static int
3616 filt_hammerread(struct knote *kn, long hint)
3617 {
3618 	struct vnode *vp = (void *)kn->kn_hook;
3619 	hammer_inode_t ip = VTOI(vp);
3620 	hammer_mount_t hmp = ip->hmp;
3621 	off_t off;
3622 
3623 	if (hint == NOTE_REVOKE) {
3624 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
3625 		return(1);
3626 	}
3627 	lwkt_gettoken(&hmp->fs_token);	/* XXX use per-ip-token */
3628 	off = ip->ino_data.size - kn->kn_fp->f_offset;
3629 	kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
3630 	lwkt_reltoken(&hmp->fs_token);
3631 	if (kn->kn_sfflags & NOTE_OLDAPI)
3632 		return(1);
3633 	return (kn->kn_data != 0);
3634 }
3635 
3636 static int
3637 filt_hammerwrite(struct knote *kn, long hint)
3638 {
3639 	if (hint == NOTE_REVOKE)
3640 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
3641 	kn->kn_data = 0;
3642 	return (1);
3643 }
3644 
3645 static int
3646 filt_hammervnode(struct knote *kn, long hint)
3647 {
3648 	if (kn->kn_sfflags & hint)
3649 		kn->kn_fflags |= hint;
3650 	if (hint == NOTE_REVOKE) {
3651 		kn->kn_flags |= (EV_EOF | EV_NODATA);
3652 		return (1);
3653 	}
3654 	return (kn->kn_fflags != 0);
3655 }
3656 
3657