xref: /dragonfly/sys/vfs/hammer2/hammer2_vnops.c (revision 938e74dc)
1 /*
2  * Copyright (c) 2011-2013 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * Kernel Filesystem interface
38  *
39  * NOTE! local ipdata pointers must be reloaded on any modifying operation
40  *	 to the inode as its underlying chain may have changed.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
59 
60 #include "hammer2.h"
61 #include "hammer2_lz4.h"
62 
63 #include "zlib/hammer2_zlib.h"
64 
65 #define ZFOFFSET	(-2LL)
66 
67 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
68 				int seqcount);
69 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
70 				int ioflag, int seqcount);
71 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
72 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
73 static void hammer2_decompress_LZ4_callback(hammer2_io_t *dio,
74 				hammer2_chain_t *arg_c,
75 				void *arg_p, off_t arg_o);
76 static void hammer2_decompress_ZLIB_callback(hammer2_io_t *dio,
77 				hammer2_chain_t *arg_c,
78 				void *arg_p, off_t arg_o);
79 
80 struct objcache *cache_buffer_read;
81 struct objcache *cache_buffer_write;
82 
83 /*
84  * Callback used in read path in case that a block is compressed with LZ4.
85  */
86 static
87 void
88 hammer2_decompress_LZ4_callback(hammer2_io_t *dio, hammer2_chain_t *arg_c,
89 				void *arg_p, off_t arg_o)
90 {
91 	struct buf *obp;
92 	struct bio *obio = arg_p;
93 	char *bdata;
94 	int bytes = 1 << (int)(arg_o & HAMMER2_OFF_MASK_RADIX);
95 
96 	/*
97 	 * If BIO_DONE is already set the device buffer was already
98 	 * fully valid (B_CACHE).  If it is not set then I/O was issued
99 	 * and we have to run I/O completion as the last bio.
100 	 *
101 	 * Nobody is waiting for our device I/O to complete, we are
102 	 * responsible for bqrelse()ing it which means we also have to do
103 	 * the equivalent of biowait() and clear BIO_DONE (which breadcb()
104 	 * may have set).
105 	 *
106 	 * Any preexisting device buffer should match the requested size,
107 	 * but due to bigblock recycling and other factors there is some
108 	 * fragility there, so we assert that the device buffer covers
109 	 * the request.
110 	 */
111 	obp = obio->bio_buf;
112 
113 	if (dio->bp->b_flags & B_ERROR) {
114 		obp->b_flags |= B_ERROR;
115 		obp->b_error = dio->bp->b_error;
116 #if 0
117 	} else if (obio->bio_caller_info2.index &&
118 		   obio->bio_caller_info1.uvalue32 !=
119 		    crc32(bp->b_data, bp->b_bufsize)) {
120 		obp->b_flags |= B_ERROR;
121 		obp->b_error = EIO;
122 #endif
123 	} else {
124 		char *compressed_buffer;
125 		int *compressed_size;
126 		int result;
127 
128 		KKASSERT(obp->b_bufsize <= HAMMER2_PBUFSIZE);
129 		bdata = hammer2_io_data(dio, arg_o);
130 		compressed_size = (int *)bdata;
131 		compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
132 		KKASSERT((unsigned int)*compressed_size <= HAMMER2_PBUFSIZE);
133 		result = LZ4_decompress_safe(&bdata[sizeof(int)],
134 					     compressed_buffer,
135 					     *compressed_size,
136 					     obp->b_bufsize);
137 		if (result < 0) {
138 			kprintf("READ PATH: Error during decompression."
139 				"bio %016jx/%d log %016jx/%d\n",
140 				(intmax_t)dio->pbase, dio->psize,
141 				(intmax_t)arg_o, bytes);
142 			/* make sure it isn't random garbage */
143 			bzero(compressed_buffer, obp->b_bufsize);
144 		}
145 		KKASSERT(result <= obp->b_bufsize);
146 		bcopy(compressed_buffer, obp->b_data, obp->b_bufsize);
147 		if (result < obp->b_bufsize)
148 			bzero(obp->b_data + result, obp->b_bufsize - result);
149 		objcache_put(cache_buffer_read, compressed_buffer);
150 		obp->b_resid = 0;
151 		obp->b_flags |= B_AGE;
152 	}
153 	biodone(obio);
154 }
155 
156 /*
157  * Callback used in read path in case that a block is compressed with ZLIB.
158  * It is almost identical to LZ4 callback, so in theory they can be unified,
159  * but we didn't want to make changes in bio structure for that.
160  */
161 static
162 void
163 hammer2_decompress_ZLIB_callback(hammer2_io_t *dio, hammer2_chain_t *arg_c,
164 				 void *arg_p, off_t arg_o)
165 {
166 	struct buf *obp;
167 	struct bio *obio = arg_p;
168 	char *bdata;
169 	int bytes = 1 << (int)(arg_o & HAMMER2_OFF_MASK_RADIX);
170 
171 	/*
172 	 * If BIO_DONE is already set the device buffer was already
173 	 * fully valid (B_CACHE).  If it is not set then I/O was issued
174 	 * and we have to run I/O completion as the last bio.
175 	 *
176 	 * Nobody is waiting for our device I/O to complete, we are
177 	 * responsible for bqrelse()ing it which means we also have to do
178 	 * the equivalent of biowait() and clear BIO_DONE (which breadcb()
179 	 * may have set).
180 	 *
181 	 * Any preexisting device buffer should match the requested size,
182 	 * but due to bigblock recycling and other factors there is some
183 	 * fragility there, so we assert that the device buffer covers
184 	 * the request.
185 	 */
186 	obp = obio->bio_buf;
187 
188 	if (dio->bp->b_flags & B_ERROR) {
189 		obp->b_flags |= B_ERROR;
190 		obp->b_error = dio->bp->b_error;
191 #if 0
192 	} else if (obio->bio_caller_info2.index &&
193 		   obio->bio_caller_info1.uvalue32 !=
194 		    crc32(bp->b_data, bp->b_bufsize)) {
195 		obp->b_flags |= B_ERROR;
196 		obp->b_error = EIO;
197 #endif
198 	} else {
199 		char *compressed_buffer;
200 		z_stream strm_decompress;
201 		int result;
202 		int ret;
203 
204 		KKASSERT(obp->b_bufsize <= HAMMER2_PBUFSIZE);
205 		strm_decompress.avail_in = 0;
206 		strm_decompress.next_in = Z_NULL;
207 
208 		ret = inflateInit(&strm_decompress);
209 
210 		if (ret != Z_OK)
211 			kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
212 
213 		bdata = hammer2_io_data(dio, arg_o);
214 		compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
215 		strm_decompress.next_in = bdata;
216 
217 		/* XXX supply proper size, subset of device bp */
218 		strm_decompress.avail_in = bytes;
219 		strm_decompress.next_out = compressed_buffer;
220 		strm_decompress.avail_out = obp->b_bufsize;
221 
222 		ret = inflate(&strm_decompress, Z_FINISH);
223 		if (ret != Z_STREAM_END) {
224 			kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
225 			bzero(compressed_buffer, obp->b_bufsize);
226 		}
227 		bcopy(compressed_buffer, obp->b_data, obp->b_bufsize);
228 		result = obp->b_bufsize - strm_decompress.avail_out;
229 		if (result < obp->b_bufsize)
230 			bzero(obp->b_data + result, strm_decompress.avail_out);
231 		objcache_put(cache_buffer_read, compressed_buffer);
232 		obp->b_resid = 0;
233 		obp->b_flags |= B_AGE;
234 		ret = inflateEnd(&strm_decompress);
235 	}
236 	biodone(obio);
237 }
238 
239 static __inline
240 void
241 hammer2_knote(struct vnode *vp, int flags)
242 {
243 	if (flags)
244 		KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
245 }
246 
247 /*
248  * Last reference to a vnode is going away but it is still cached.
249  */
250 static
251 int
252 hammer2_vop_inactive(struct vop_inactive_args *ap)
253 {
254 	hammer2_inode_t *ip;
255 	hammer2_chain_t *parent;
256 	struct vnode *vp;
257 
258 	vp = ap->a_vp;
259 	ip = VTOI(vp);
260 
261 	/*
262 	 * Degenerate case
263 	 */
264 	if (ip == NULL) {
265 		vrecycle(vp);
266 		return (0);
267 	}
268 
269 	/*
270 	 * Detect updates to the embedded data which may be synchronized by
271 	 * the strategy code.  Simply mark the inode modified so it gets
272 	 * picked up by our normal flush.
273 	 */
274 	parent = hammer2_inode_lock_ex(ip);
275 	KKASSERT(parent);
276 
277 	/*
278 	 * Check for deleted inodes and recycle immediately.
279 	 */
280 	if (parent->flags & HAMMER2_CHAIN_UNLINKED) {
281 		hammer2_inode_unlock_ex(ip, parent);
282 		vrecycle(vp);
283 	} else {
284 		hammer2_inode_unlock_ex(ip, parent);
285 	}
286 	return (0);
287 }
288 
289 /*
290  * Reclaim a vnode so that it can be reused; after the inode is
291  * disassociated, the filesystem must manage it alone.
292  */
293 static
294 int
295 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
296 {
297 	hammer2_chain_t *chain;
298 	hammer2_inode_t *ip;
299 	hammer2_trans_t trans;
300 	struct vnode *vp;
301 
302 	vp = ap->a_vp;
303 	ip = VTOI(vp);
304 	if (ip == NULL)
305 		return(0);
306 
307 	/*
308 	 * Set update_hi so we can detect and propagate the DELETED
309 	 * bit in the flush code.
310 	 *
311 	 * ip->chain might be stale, correct it before checking as older
312 	 * versions of the chain are likely marked deleted even if the
313 	 * file hasn't been.  XXX ip->chain should never be stale on
314 	 * reclaim.
315 	 */
316 	chain = hammer2_inode_lock_ex(ip);
317 #if 0
318 	if (chain->next_parent)
319 		kprintf("RECLAIM DUPLINKED IP: %p ip->ch=%p ch=%p np=%p\n",
320 			ip, ip->chain, chain, chain->next_parent);
321 #endif
322 
323 	/*
324 	 * The final close of a deleted file or directory marks it for
325 	 * destruction.  The DELETED flag allows the flusher to shortcut
326 	 * any modified blocks still unflushed (that is, just ignore them).
327 	 *
328 	 * HAMMER2 usually does not try to optimize the freemap by returning
329 	 * deleted blocks to it as it does not usually know how many snapshots
330 	 * might be referencing portions of the file/dir.  XXX TODO.
331 	 *
332 	 * XXX TODO - However, any modified file as-of when a snapshot is made
333 	 *	      cannot use this optimization as some of the modifications
334 	 *	      may wind up being part of the snapshot.
335 	 */
336 	vp->v_data = NULL;
337 	ip->vp = NULL;
338 	if (chain->flags & HAMMER2_CHAIN_UNLINKED) {
339 		kprintf("unlink on reclaim: %s\n",
340 			chain->data->ipdata.filename);
341 		hammer2_trans_init(&trans, ip->pmp, NULL,
342 				   HAMMER2_TRANS_BUFCACHE);
343 		hammer2_chain_delete(&trans, chain, 0);
344 		hammer2_chain_setsubmod(&trans, chain);
345 		spin_lock(&chain->core->cst.spin);
346 		if (chain->core->update_hi < trans.sync_tid)
347 			chain->core->update_hi = trans.sync_tid; /* needed? */
348 		spin_unlock(&chain->core->cst.spin);
349 		hammer2_trans_done(&trans);
350 	}
351 
352 	/*
353 	 * NOTE! We do not attempt to flush chains here, flushing is
354 	 *	 really fragile and could also deadlock.
355 	 */
356 	vclrisdirty(vp);
357 	hammer2_inode_unlock_ex(ip, chain);		/* unlock */
358 	hammer2_inode_drop(ip);				/* vp ref */
359 	/* chain no longer referenced */
360 	/* chain = NULL; not needed */
361 
362 	/*
363 	 * XXX handle background sync when ip dirty, kernel will no longer
364 	 * notify us regarding this inode because there is no longer a
365 	 * vnode attached to it.
366 	 */
367 
368 	return (0);
369 }
370 
371 static
372 int
373 hammer2_vop_fsync(struct vop_fsync_args *ap)
374 {
375 	hammer2_inode_t *ip;
376 	hammer2_trans_t trans;
377 	hammer2_chain_t *chain;
378 	struct vnode *vp;
379 
380 	vp = ap->a_vp;
381 	ip = VTOI(vp);
382 
383 #if 0
384 	/* XXX can't do this yet */
385 	hammer2_trans_init(&trans, ip->pmp, NULL, HAMMER2_TRANS_ISFLUSH);
386 	vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
387 	hammer2_trans_clear_invfsync(&trans);
388 #endif
389 	hammer2_trans_init(&trans, ip->pmp, NULL, 0);
390 	vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
391 
392 	/*
393 	 * Calling chain_flush here creates a lot of duplicative
394 	 * COW operations due to non-optimal vnode ordering.
395 	 *
396 	 * Only do it for an actual fsync() syscall.  The other forms
397 	 * which call this function will eventually call chain_flush
398 	 * on the volume root as a catch-all, which is far more optimal.
399 	 */
400 	chain = hammer2_inode_lock_ex(ip);
401 	atomic_clear_int(&ip->flags, HAMMER2_INODE_MODIFIED);
402 	vclrisdirty(vp);
403 	if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MTIME))
404 		hammer2_inode_fsync(&trans, ip, &chain);
405 
406 #if 0
407 	/*
408 	 * XXX creates discontinuity w/modify_tid
409 	 */
410 	if (ap->a_flags & VOP_FSYNC_SYSCALL) {
411 		hammer2_chain_flush(&trans, &chain);
412 	}
413 #endif
414 	hammer2_inode_unlock_ex(ip, chain);
415 	hammer2_trans_done(&trans);
416 
417 	return (0);
418 }
419 
420 static
421 int
422 hammer2_vop_access(struct vop_access_args *ap)
423 {
424 	hammer2_inode_t *ip = VTOI(ap->a_vp);
425 	hammer2_inode_data_t *ipdata;
426 	hammer2_chain_t *chain;
427 	uid_t uid;
428 	gid_t gid;
429 	int error;
430 
431 	chain = hammer2_inode_lock_sh(ip);
432 	ipdata = &chain->data->ipdata;
433 	uid = hammer2_to_unix_xid(&ipdata->uid);
434 	gid = hammer2_to_unix_xid(&ipdata->gid);
435 	error = vop_helper_access(ap, uid, gid, ipdata->mode, ipdata->uflags);
436 	hammer2_inode_unlock_sh(ip, chain);
437 
438 	return (error);
439 }
440 
441 static
442 int
443 hammer2_vop_getattr(struct vop_getattr_args *ap)
444 {
445 	hammer2_inode_data_t *ipdata;
446 	hammer2_chain_t *chain;
447 	hammer2_pfsmount_t *pmp;
448 	hammer2_inode_t *ip;
449 	struct vnode *vp;
450 	struct vattr *vap;
451 
452 	vp = ap->a_vp;
453 	vap = ap->a_vap;
454 
455 	ip = VTOI(vp);
456 	pmp = ip->pmp;
457 
458 	chain = hammer2_inode_lock_sh(ip);
459 	ipdata = &chain->data->ipdata;
460 
461 	vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
462 	vap->va_fileid = ipdata->inum;
463 	vap->va_mode = ipdata->mode;
464 	vap->va_nlink = ipdata->nlinks;
465 	vap->va_uid = hammer2_to_unix_xid(&ipdata->uid);
466 	vap->va_gid = hammer2_to_unix_xid(&ipdata->gid);
467 	vap->va_rmajor = 0;
468 	vap->va_rminor = 0;
469 	vap->va_size = ip->size;	/* protected by shared lock */
470 	vap->va_blocksize = HAMMER2_PBUFSIZE;
471 	vap->va_flags = ipdata->uflags;
472 	hammer2_time_to_timespec(ipdata->ctime, &vap->va_ctime);
473 	hammer2_time_to_timespec(ipdata->mtime, &vap->va_mtime);
474 	hammer2_time_to_timespec(ipdata->mtime, &vap->va_atime);
475 	vap->va_gen = 1;
476 	vap->va_bytes = vap->va_size;	/* XXX */
477 	vap->va_type = hammer2_get_vtype(chain);
478 	vap->va_filerev = 0;
479 	vap->va_uid_uuid = ipdata->uid;
480 	vap->va_gid_uuid = ipdata->gid;
481 	vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
482 			  VA_FSID_UUID_VALID;
483 
484 	hammer2_inode_unlock_sh(ip, chain);
485 
486 	return (0);
487 }
488 
489 static
490 int
491 hammer2_vop_setattr(struct vop_setattr_args *ap)
492 {
493 	hammer2_inode_data_t *ipdata;
494 	hammer2_inode_t *ip;
495 	hammer2_chain_t *chain;
496 	hammer2_trans_t trans;
497 	struct vnode *vp;
498 	struct vattr *vap;
499 	int error;
500 	int kflags = 0;
501 	int domtime = 0;
502 	uint64_t ctime;
503 
504 	vp = ap->a_vp;
505 	vap = ap->a_vap;
506 	hammer2_update_time(&ctime);
507 
508 	ip = VTOI(vp);
509 
510 	if (ip->pmp->ronly)
511 		return(EROFS);
512 
513 	hammer2_chain_memory_wait(ip->pmp);
514 	hammer2_trans_init(&trans, ip->pmp, NULL, 0);
515 	chain = hammer2_inode_lock_ex(ip);
516 	ipdata = &chain->data->ipdata;
517 	error = 0;
518 
519 	if (vap->va_flags != VNOVAL) {
520 		u_int32_t flags;
521 
522 		flags = ipdata->uflags;
523 		error = vop_helper_setattr_flags(&flags, vap->va_flags,
524 					 hammer2_to_unix_xid(&ipdata->uid),
525 					 ap->a_cred);
526 		if (error == 0) {
527 			if (ipdata->uflags != flags) {
528 				ipdata = hammer2_chain_modify_ip(&trans, ip,
529 								 &chain, 0);
530 				ipdata->uflags = flags;
531 				ipdata->ctime = ctime;
532 				kflags |= NOTE_ATTRIB;
533 			}
534 			if (ipdata->uflags & (IMMUTABLE | APPEND)) {
535 				error = 0;
536 				goto done;
537 			}
538 		}
539 		goto done;
540 	}
541 	if (ipdata->uflags & (IMMUTABLE | APPEND)) {
542 		error = EPERM;
543 		goto done;
544 	}
545 	if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
546 		mode_t cur_mode = ipdata->mode;
547 		uid_t cur_uid = hammer2_to_unix_xid(&ipdata->uid);
548 		gid_t cur_gid = hammer2_to_unix_xid(&ipdata->gid);
549 		uuid_t uuid_uid;
550 		uuid_t uuid_gid;
551 
552 		error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
553 					 ap->a_cred,
554 					 &cur_uid, &cur_gid, &cur_mode);
555 		if (error == 0) {
556 			hammer2_guid_to_uuid(&uuid_uid, cur_uid);
557 			hammer2_guid_to_uuid(&uuid_gid, cur_gid);
558 			if (bcmp(&uuid_uid, &ipdata->uid, sizeof(uuid_uid)) ||
559 			    bcmp(&uuid_gid, &ipdata->gid, sizeof(uuid_gid)) ||
560 			    ipdata->mode != cur_mode
561 			) {
562 				ipdata = hammer2_chain_modify_ip(&trans, ip,
563 								 &chain, 0);
564 				ipdata->uid = uuid_uid;
565 				ipdata->gid = uuid_gid;
566 				ipdata->mode = cur_mode;
567 				ipdata->ctime = ctime;
568 			}
569 			kflags |= NOTE_ATTRIB;
570 		}
571 	}
572 
573 	/*
574 	 * Resize the file
575 	 */
576 	if (vap->va_size != VNOVAL && ip->size != vap->va_size) {
577 		switch(vp->v_type) {
578 		case VREG:
579 			if (vap->va_size == ip->size)
580 				break;
581 			hammer2_inode_unlock_ex(ip, chain);
582 			if (vap->va_size < ip->size) {
583 				hammer2_truncate_file(ip, vap->va_size);
584 			} else {
585 				hammer2_extend_file(ip, vap->va_size);
586 			}
587 			chain = hammer2_inode_lock_ex(ip);
588 			ipdata = &chain->data->ipdata; /* RELOAD */
589 			domtime = 1;
590 			break;
591 		default:
592 			error = EINVAL;
593 			goto done;
594 		}
595 	}
596 #if 0
597 	/* atime not supported */
598 	if (vap->va_atime.tv_sec != VNOVAL) {
599 		ipdata = hammer2_chain_modify_ip(&trans, ip, &chain, 0);
600 		ipdata->atime = hammer2_timespec_to_time(&vap->va_atime);
601 		kflags |= NOTE_ATTRIB;
602 	}
603 #endif
604 	if (vap->va_mtime.tv_sec != VNOVAL) {
605 		ipdata = hammer2_chain_modify_ip(&trans, ip, &chain, 0);
606 		ipdata->mtime = hammer2_timespec_to_time(&vap->va_mtime);
607 		kflags |= NOTE_ATTRIB;
608 		domtime = 0;
609 	}
610 	if (vap->va_mode != (mode_t)VNOVAL) {
611 		mode_t cur_mode = ipdata->mode;
612 		uid_t cur_uid = hammer2_to_unix_xid(&ipdata->uid);
613 		gid_t cur_gid = hammer2_to_unix_xid(&ipdata->gid);
614 
615 		error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
616 					 cur_uid, cur_gid, &cur_mode);
617 		if (error == 0 && ipdata->mode != cur_mode) {
618 			ipdata = hammer2_chain_modify_ip(&trans, ip, &chain, 0);
619 			ipdata->mode = cur_mode;
620 			ipdata->ctime = ctime;
621 			kflags |= NOTE_ATTRIB;
622 		}
623 	}
624 
625 	/*
626 	 * If a truncation occurred we must call inode_fsync() now in order
627 	 * to trim the related data chains, otherwise a later expansion can
628 	 * cause havoc.
629 	 */
630 	hammer2_inode_fsync(&trans, ip, &chain);
631 
632 	/*
633 	 * Cleanup.  If domtime is set an additional inode modification
634 	 * must be flagged.  All other modifications will have already
635 	 * set INODE_MODIFIED and called vsetisdirty().
636 	 */
637 done:
638 	if (domtime) {
639 		atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED |
640 					   HAMMER2_INODE_MTIME);
641 		vsetisdirty(ip->vp);
642 	}
643 	hammer2_inode_unlock_ex(ip, chain);
644 	hammer2_trans_done(&trans);
645 	hammer2_knote(ip->vp, kflags);
646 
647 	return (error);
648 }
649 
650 static
651 int
652 hammer2_vop_readdir(struct vop_readdir_args *ap)
653 {
654 	hammer2_inode_data_t *ipdata;
655 	hammer2_inode_t *ip;
656 	hammer2_inode_t *xip;
657 	hammer2_chain_t *parent;
658 	hammer2_chain_t *chain;
659 	hammer2_chain_t *xchain;
660 	hammer2_tid_t inum;
661 	hammer2_key_t key_next;
662 	hammer2_key_t lkey;
663 	struct uio *uio;
664 	off_t *cookies;
665 	off_t saveoff;
666 	int cookie_index;
667 	int cache_index = -1;
668 	int ncookies;
669 	int error;
670 	int dtype;
671 	int r;
672 
673 	ip = VTOI(ap->a_vp);
674 	uio = ap->a_uio;
675 	saveoff = uio->uio_offset;
676 
677 	/*
678 	 * Setup cookies directory entry cookies if requested
679 	 */
680 	if (ap->a_ncookies) {
681 		ncookies = uio->uio_resid / 16 + 1;
682 		if (ncookies > 1024)
683 			ncookies = 1024;
684 		cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
685 	} else {
686 		ncookies = -1;
687 		cookies = NULL;
688 	}
689 	cookie_index = 0;
690 
691 	parent = hammer2_inode_lock_sh(ip);
692 	ipdata = &parent->data->ipdata;
693 
694 	/*
695 	 * Handle artificial entries.  To ensure that only positive 64 bit
696 	 * quantities are returned to userland we always strip off bit 63.
697 	 * The hash code is designed such that codes 0x0000-0x7FFF are not
698 	 * used, allowing us to use these codes for articial entries.
699 	 *
700 	 * Entry 0 is used for '.' and entry 1 is used for '..'.  Do not
701 	 * allow '..' to cross the mount point into (e.g.) the super-root.
702 	 */
703 	error = 0;
704 	chain = (void *)(intptr_t)-1;	/* non-NULL for early goto done case */
705 
706 	if (saveoff == 0) {
707 		inum = ipdata->inum & HAMMER2_DIRHASH_USERMSK;
708 		r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
709 		if (r)
710 			goto done;
711 		if (cookies)
712 			cookies[cookie_index] = saveoff;
713 		++saveoff;
714 		++cookie_index;
715 		if (cookie_index == ncookies)
716 			goto done;
717 	}
718 
719 	if (saveoff == 1) {
720 		/*
721 		 * Be careful with lockorder when accessing ".."
722 		 *
723 		 * (ip is the current dir. xip is the parent dir).
724 		 */
725 		inum = ipdata->inum & HAMMER2_DIRHASH_USERMSK;
726 		while (ip->pip != NULL && ip != ip->pmp->iroot) {
727 			xip = ip->pip;
728 			hammer2_inode_ref(xip);
729 			hammer2_inode_unlock_sh(ip, parent);
730 			xchain = hammer2_inode_lock_sh(xip);
731 			parent = hammer2_inode_lock_sh(ip);
732 			hammer2_inode_drop(xip);
733 			if (xip == ip->pip) {
734 				inum = xchain->data->ipdata.inum &
735 				       HAMMER2_DIRHASH_USERMSK;
736 				hammer2_inode_unlock_sh(xip, xchain);
737 				break;
738 			}
739 			hammer2_inode_unlock_sh(xip, xchain);
740 		}
741 		r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
742 		if (r)
743 			goto done;
744 		if (cookies)
745 			cookies[cookie_index] = saveoff;
746 		++saveoff;
747 		++cookie_index;
748 		if (cookie_index == ncookies)
749 			goto done;
750 	}
751 
752 	lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
753 	if (hammer2_debug & 0x0020)
754 		kprintf("readdir: lkey %016jx\n", lkey);
755 
756 	/*
757 	 * parent is the inode chain, already locked for us.  Don't
758 	 * double lock shared locks as this will screw up upgrades.
759 	 */
760 	if (error) {
761 		goto done;
762 	}
763 	chain = hammer2_chain_lookup(&parent, &key_next, lkey, lkey,
764 				     &cache_index, HAMMER2_LOOKUP_SHARED);
765 	if (chain == NULL) {
766 		chain = hammer2_chain_lookup(&parent, &key_next,
767 					     lkey, (hammer2_key_t)-1,
768 					     &cache_index,
769 					     HAMMER2_LOOKUP_SHARED);
770 	}
771 	while (chain) {
772 		if (hammer2_debug & 0x0020)
773 			kprintf("readdir: p=%p chain=%p %016jx (next %016jx)\n",
774 				parent, chain, chain->bref.key, key_next);
775 		if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
776 			dtype = hammer2_get_dtype(chain);
777 			saveoff = chain->bref.key & HAMMER2_DIRHASH_USERMSK;
778 			r = vop_write_dirent(&error, uio,
779 					     chain->data->ipdata.inum &
780 					      HAMMER2_DIRHASH_USERMSK,
781 					     dtype,
782 					     chain->data->ipdata.name_len,
783 					     chain->data->ipdata.filename);
784 			if (r)
785 				break;
786 			if (cookies)
787 				cookies[cookie_index] = saveoff;
788 			++cookie_index;
789 		} else {
790 			/* XXX chain error */
791 			kprintf("bad chain type readdir %d\n",
792 				chain->bref.type);
793 		}
794 
795 		/*
796 		 * Keys may not be returned in order so once we have a
797 		 * placemarker (chain) the scan must allow the full range
798 		 * or some entries will be missed.
799 		 */
800 		chain = hammer2_chain_next(&parent, chain, &key_next,
801 					   key_next, (hammer2_key_t)-1,
802 					   &cache_index, HAMMER2_LOOKUP_SHARED);
803 		if (chain) {
804 			saveoff = (chain->bref.key &
805 				   HAMMER2_DIRHASH_USERMSK) + 1;
806 		} else {
807 			saveoff = (hammer2_key_t)-1;
808 		}
809 		if (cookie_index == ncookies)
810 			break;
811 	}
812 	if (chain)
813 		hammer2_chain_unlock(chain);
814 done:
815 	hammer2_inode_unlock_sh(ip, parent);
816 	if (ap->a_eofflag)
817 		*ap->a_eofflag = (chain == NULL);
818 	if (hammer2_debug & 0x0020)
819 		kprintf("readdir: done at %016jx\n", saveoff);
820 	uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
821 	if (error && cookie_index == 0) {
822 		if (cookies) {
823 			kfree(cookies, M_TEMP);
824 			*ap->a_ncookies = 0;
825 			*ap->a_cookies = NULL;
826 		}
827 	} else {
828 		if (cookies) {
829 			*ap->a_ncookies = cookie_index;
830 			*ap->a_cookies = cookies;
831 		}
832 	}
833 	return (error);
834 }
835 
836 /*
837  * hammer2_vop_readlink { vp, uio, cred }
838  */
839 static
840 int
841 hammer2_vop_readlink(struct vop_readlink_args *ap)
842 {
843 	struct vnode *vp;
844 	hammer2_inode_t *ip;
845 	int error;
846 
847 	vp = ap->a_vp;
848 	if (vp->v_type != VLNK)
849 		return (EINVAL);
850 	ip = VTOI(vp);
851 
852 	error = hammer2_read_file(ip, ap->a_uio, 0);
853 	return (error);
854 }
855 
856 static
857 int
858 hammer2_vop_read(struct vop_read_args *ap)
859 {
860 	struct vnode *vp;
861 	hammer2_inode_t *ip;
862 	struct uio *uio;
863 	int error;
864 	int seqcount;
865 	int bigread;
866 
867 	/*
868 	 * Read operations supported on this vnode?
869 	 */
870 	vp = ap->a_vp;
871 	if (vp->v_type != VREG)
872 		return (EINVAL);
873 
874 	/*
875 	 * Misc
876 	 */
877 	ip = VTOI(vp);
878 	uio = ap->a_uio;
879 	error = 0;
880 
881 	seqcount = ap->a_ioflag >> 16;
882 	bigread = (uio->uio_resid > 100 * 1024 * 1024);
883 
884 	error = hammer2_read_file(ip, uio, seqcount);
885 	return (error);
886 }
887 
888 static
889 int
890 hammer2_vop_write(struct vop_write_args *ap)
891 {
892 	hammer2_inode_t *ip;
893 	hammer2_trans_t trans;
894 	thread_t td;
895 	struct vnode *vp;
896 	struct uio *uio;
897 	int error;
898 	int seqcount;
899 	int bigwrite;
900 
901 	/*
902 	 * Read operations supported on this vnode?
903 	 */
904 	vp = ap->a_vp;
905 	if (vp->v_type != VREG)
906 		return (EINVAL);
907 
908 	/*
909 	 * Misc
910 	 */
911 	ip = VTOI(vp);
912 	uio = ap->a_uio;
913 	error = 0;
914 	if (ip->pmp->ronly)
915 		return (EROFS);
916 
917 	seqcount = ap->a_ioflag >> 16;
918 	bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
919 
920 	/*
921 	 * Check resource limit
922 	 */
923 	if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
924 	    uio->uio_offset + uio->uio_resid >
925 	     td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
926 		lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
927 		return (EFBIG);
928 	}
929 
930 	bigwrite = (uio->uio_resid > 100 * 1024 * 1024);
931 
932 	/*
933 	 * The transaction interlocks against flushes initiations
934 	 * (note: but will run concurrently with the actual flush).
935 	 */
936 	hammer2_trans_init(&trans, ip->pmp, NULL, 0);
937 	error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount);
938 	hammer2_trans_done(&trans);
939 
940 	return (error);
941 }
942 
943 /*
944  * Perform read operations on a file or symlink given an UNLOCKED
945  * inode and uio.
946  *
947  * The passed ip is not locked.
948  */
949 static
950 int
951 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
952 {
953 	hammer2_off_t size;
954 	struct buf *bp;
955 	int error;
956 
957 	error = 0;
958 
959 	/*
960 	 * UIO read loop.
961 	 */
962 	ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
963 	size = ip->size;
964 	ccms_thread_unlock(&ip->topo_cst);
965 
966 	while (uio->uio_resid > 0 && uio->uio_offset < size) {
967 		hammer2_key_t lbase;
968 		hammer2_key_t leof;
969 		int lblksize;
970 		int loff;
971 		int n;
972 
973 		lblksize = hammer2_calc_logical(ip, uio->uio_offset,
974 						&lbase, &leof);
975 
976 		error = cluster_read(ip->vp, leof, lbase, lblksize,
977 				     uio->uio_resid, seqcount * BKVASIZE,
978 				     &bp);
979 
980 		if (error)
981 			break;
982 		loff = (int)(uio->uio_offset - lbase);
983 		n = lblksize - loff;
984 		if (n > uio->uio_resid)
985 			n = uio->uio_resid;
986 		if (n > size - uio->uio_offset)
987 			n = (int)(size - uio->uio_offset);
988 		bp->b_flags |= B_AGE;
989 		uiomove((char *)bp->b_data + loff, n, uio);
990 		bqrelse(bp);
991 	}
992 	return (error);
993 }
994 
995 /*
996  * Write to the file represented by the inode via the logical buffer cache.
997  * The inode may represent a regular file or a symlink.
998  *
999  * The inode must not be locked.
1000  */
1001 static
1002 int
1003 hammer2_write_file(hammer2_inode_t *ip,
1004 		   struct uio *uio, int ioflag, int seqcount)
1005 {
1006 	hammer2_key_t old_eof;
1007 	hammer2_key_t new_eof;
1008 	struct buf *bp;
1009 	int kflags;
1010 	int error;
1011 	int modified;
1012 
1013 	/*
1014 	 * Setup if append
1015 	 */
1016 	ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
1017 	if (ioflag & IO_APPEND)
1018 		uio->uio_offset = ip->size;
1019 	old_eof = ip->size;
1020 	ccms_thread_unlock(&ip->topo_cst);
1021 
1022 	/*
1023 	 * Extend the file if necessary.  If the write fails at some point
1024 	 * we will truncate it back down to cover as much as we were able
1025 	 * to write.
1026 	 *
1027 	 * Doing this now makes it easier to calculate buffer sizes in
1028 	 * the loop.
1029 	 */
1030 	kflags = 0;
1031 	error = 0;
1032 	modified = 0;
1033 
1034 	if (uio->uio_offset + uio->uio_resid > old_eof) {
1035 		new_eof = uio->uio_offset + uio->uio_resid;
1036 		modified = 1;
1037 		hammer2_extend_file(ip, new_eof);
1038 		kflags |= NOTE_EXTEND;
1039 	} else {
1040 		new_eof = old_eof;
1041 	}
1042 
1043 	/*
1044 	 * UIO write loop
1045 	 */
1046 	while (uio->uio_resid > 0) {
1047 		hammer2_key_t lbase;
1048 		int trivial;
1049 		int endofblk;
1050 		int lblksize;
1051 		int loff;
1052 		int n;
1053 
1054 		/*
1055 		 * Don't allow the buffer build to blow out the buffer
1056 		 * cache.
1057 		 */
1058 		if ((ioflag & IO_RECURSE) == 0)
1059 			bwillwrite(HAMMER2_PBUFSIZE);
1060 
1061 		/*
1062 		 * This nominally tells us how much we can cluster and
1063 		 * what the logical buffer size needs to be.  Currently
1064 		 * we don't try to cluster the write and just handle one
1065 		 * block at a time.
1066 		 */
1067 		lblksize = hammer2_calc_logical(ip, uio->uio_offset,
1068 						&lbase, NULL);
1069 		loff = (int)(uio->uio_offset - lbase);
1070 
1071 		KKASSERT(lblksize <= 65536);
1072 
1073 		/*
1074 		 * Calculate bytes to copy this transfer and whether the
1075 		 * copy completely covers the buffer or not.
1076 		 */
1077 		trivial = 0;
1078 		n = lblksize - loff;
1079 		if (n > uio->uio_resid) {
1080 			n = uio->uio_resid;
1081 			if (loff == lbase && uio->uio_offset + n == new_eof)
1082 				trivial = 1;
1083 			endofblk = 0;
1084 		} else {
1085 			if (loff == 0)
1086 				trivial = 1;
1087 			endofblk = 1;
1088 		}
1089 
1090 		/*
1091 		 * Get the buffer
1092 		 */
1093 		if (uio->uio_segflg == UIO_NOCOPY) {
1094 			/*
1095 			 * Issuing a write with the same data backing the
1096 			 * buffer.  Instantiate the buffer to collect the
1097 			 * backing vm pages, then read-in any missing bits.
1098 			 *
1099 			 * This case is used by vop_stdputpages().
1100 			 */
1101 			bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1102 			if ((bp->b_flags & B_CACHE) == 0) {
1103 				bqrelse(bp);
1104 				error = bread(ip->vp, lbase, lblksize, &bp);
1105 			}
1106 		} else if (trivial) {
1107 			/*
1108 			 * Even though we are entirely overwriting the buffer
1109 			 * we may still have to zero it out to avoid a
1110 			 * mmap/write visibility issue.
1111 			 */
1112 			bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1113 			if ((bp->b_flags & B_CACHE) == 0)
1114 				vfs_bio_clrbuf(bp);
1115 		} else {
1116 			/*
1117 			 * Partial overwrite, read in any missing bits then
1118 			 * replace the portion being written.
1119 			 *
1120 			 * (The strategy code will detect zero-fill physical
1121 			 * blocks for this case).
1122 			 */
1123 			error = bread(ip->vp, lbase, lblksize, &bp);
1124 			if (error == 0)
1125 				bheavy(bp);
1126 		}
1127 
1128 		if (error) {
1129 			brelse(bp);
1130 			break;
1131 		}
1132 
1133 		/*
1134 		 * Ok, copy the data in
1135 		 */
1136 		error = uiomove(bp->b_data + loff, n, uio);
1137 		kflags |= NOTE_WRITE;
1138 		modified = 1;
1139 		if (error) {
1140 			brelse(bp);
1141 			break;
1142 		}
1143 
1144 		/*
1145 		 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1146 		 *	    with IO_SYNC or IO_ASYNC set.  These writes
1147 		 *	    must be handled as the pageout daemon expects.
1148 		 */
1149 		if (ioflag & IO_SYNC) {
1150 			bwrite(bp);
1151 		} else if ((ioflag & IO_DIRECT) && endofblk) {
1152 			bawrite(bp);
1153 		} else if (ioflag & IO_ASYNC) {
1154 			bawrite(bp);
1155 		} else {
1156 			bdwrite(bp);
1157 		}
1158 	}
1159 
1160 	/*
1161 	 * Cleanup.  If we extended the file EOF but failed to write through
1162 	 * the entire write is a failure and we have to back-up.
1163 	 */
1164 	if (error && new_eof != old_eof) {
1165 		hammer2_truncate_file(ip, old_eof);
1166 	} else if (modified) {
1167 		ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
1168 		hammer2_update_time(&ip->mtime);
1169 		atomic_set_int(&ip->flags, HAMMER2_INODE_MTIME);
1170 		ccms_thread_unlock(&ip->topo_cst);
1171 	}
1172 	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
1173 	hammer2_knote(ip->vp, kflags);
1174 	vsetisdirty(ip->vp);
1175 
1176 	return error;
1177 }
1178 
1179 /*
1180  * Truncate the size of a file.  The inode must not be locked.
1181  *
1182  * NOTE: Caller handles setting HAMMER2_INODE_MODIFIED
1183  */
1184 static
1185 void
1186 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1187 {
1188 	hammer2_key_t lbase;
1189 	int nblksize;
1190 
1191 	if (ip->vp) {
1192 		nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1193 		nvtruncbuf(ip->vp, nsize,
1194 			   nblksize, (int)nsize & (nblksize - 1),
1195 			   0);
1196 	}
1197 	ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
1198 	ip->size = nsize;
1199 	atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1200 	ccms_thread_unlock(&ip->topo_cst);
1201 }
1202 
1203 /*
1204  * Extend the size of a file.  The inode must not be locked.
1205  *
1206  * NOTE: Caller handles setting HAMMER2_INODE_MODIFIED
1207  */
1208 static
1209 void
1210 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1211 {
1212 	hammer2_key_t lbase;
1213 	hammer2_key_t osize;
1214 	int oblksize;
1215 	int nblksize;
1216 
1217 	ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);
1218 	osize = ip->size;
1219 	ip->size = nsize;
1220 	ccms_thread_unlock(&ip->topo_cst);
1221 
1222 	if (ip->vp) {
1223 		oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1224 		nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1225 		nvextendbuf(ip->vp,
1226 			    osize, nsize,
1227 			    oblksize, nblksize,
1228 			    -1, -1, 0);
1229 	}
1230 	atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1231 }
1232 
1233 static
1234 int
1235 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1236 {
1237 	hammer2_inode_t *ip;
1238 	hammer2_inode_t *dip;
1239 	hammer2_chain_t *parent;
1240 	hammer2_chain_t *chain;
1241 	hammer2_chain_t *ochain;
1242 	hammer2_trans_t trans;
1243 	hammer2_key_t key_next;
1244 	hammer2_key_t lhc;
1245 	struct namecache *ncp;
1246 	const uint8_t *name;
1247 	size_t name_len;
1248 	int error = 0;
1249 	int cache_index = -1;
1250 	struct vnode *vp;
1251 
1252 	dip = VTOI(ap->a_dvp);
1253 	ncp = ap->a_nch->ncp;
1254 	name = ncp->nc_name;
1255 	name_len = ncp->nc_nlen;
1256 	lhc = hammer2_dirhash(name, name_len);
1257 
1258 	/*
1259 	 * Note: In DragonFly the kernel handles '.' and '..'.
1260 	 */
1261 	parent = hammer2_inode_lock_sh(dip);
1262 	chain = hammer2_chain_lookup(&parent, &key_next,
1263 				     lhc, lhc + HAMMER2_DIRHASH_LOMASK,
1264 				     &cache_index, HAMMER2_LOOKUP_SHARED);
1265 	while (chain) {
1266 		if (chain->bref.type == HAMMER2_BREF_TYPE_INODE &&
1267 		    name_len == chain->data->ipdata.name_len &&
1268 		    bcmp(name, chain->data->ipdata.filename, name_len) == 0) {
1269 			break;
1270 		}
1271 		chain = hammer2_chain_next(&parent, chain, &key_next,
1272 					   key_next,
1273 					   lhc + HAMMER2_DIRHASH_LOMASK,
1274 					   &cache_index, HAMMER2_LOOKUP_SHARED);
1275 	}
1276 	hammer2_inode_unlock_sh(dip, parent);
1277 
1278 	/*
1279 	 * If the inode represents a forwarding entry for a hardlink we have
1280 	 * to locate the actual inode.  The original ip is saved for possible
1281 	 * deconsolidation.  (ip) will only be set to non-NULL when we have
1282 	 * to locate the real file via a hardlink.  ip will be referenced but
1283 	 * not locked in that situation.  chain is passed in locked and
1284 	 * returned locked.
1285 	 *
1286 	 * XXX what kind of chain lock?
1287 	 */
1288 	ochain = NULL;
1289 	if (chain && chain->data->ipdata.type == HAMMER2_OBJTYPE_HARDLINK) {
1290 		error = hammer2_hardlink_find(dip, &chain, &ochain);
1291 		if (error) {
1292 			kprintf("hammer2: unable to find hardlink\n");
1293 			if (chain) {
1294 				hammer2_chain_unlock(chain);
1295 				chain = NULL;
1296 			}
1297 			goto failed;
1298 		}
1299 	}
1300 
1301 	/*
1302 	 * Deconsolidate any hardlink whos nlinks == 1.  Ignore errors.
1303 	 * If an error occurs chain and ip are left alone.
1304 	 *
1305 	 * XXX upgrade shared lock?
1306 	 */
1307 	if (ochain && chain &&
1308 	    chain->data->ipdata.nlinks == 1 && !dip->pmp->ronly) {
1309 		kprintf("hammer2: need to unconsolidate hardlink for %s\n",
1310 			chain->data->ipdata.filename);
1311 		/* XXX retain shared lock on dip? (currently not held) */
1312 		hammer2_trans_init(&trans, dip->pmp, NULL, 0);
1313 		hammer2_hardlink_deconsolidate(&trans, dip, &chain, &ochain);
1314 		hammer2_trans_done(&trans);
1315 	}
1316 
1317 	/*
1318 	 * Acquire the related vnode
1319 	 *
1320 	 * NOTE: For error processing, only ENOENT resolves the namecache
1321 	 *	 entry to NULL, otherwise we just return the error and
1322 	 *	 leave the namecache unresolved.
1323 	 *
1324 	 * NOTE: multiple hammer2_inode structures can be aliased to the
1325 	 *	 same chain element, for example for hardlinks.  This
1326 	 *	 use case does not 'reattach' inode associations that
1327 	 *	 might already exist, but always allocates a new one.
1328 	 *
1329 	 * WARNING: inode structure is locked exclusively via inode_get
1330 	 *	    but chain was locked shared.  inode_unlock_ex()
1331 	 *	    will handle it properly.
1332 	 */
1333 	if (chain) {
1334 		ip = hammer2_inode_get(dip->pmp, dip, chain);
1335 		vp = hammer2_igetv(ip, &error);
1336 		if (error == 0) {
1337 			vn_unlock(vp);
1338 			cache_setvp(ap->a_nch, vp);
1339 		} else if (error == ENOENT) {
1340 			cache_setvp(ap->a_nch, NULL);
1341 		}
1342 		hammer2_inode_unlock_ex(ip, chain);
1343 
1344 		/*
1345 		 * The vp should not be released until after we've disposed
1346 		 * of our locks, because it might cause vop_inactive() to
1347 		 * be called.
1348 		 */
1349 		if (vp)
1350 			vrele(vp);
1351 	} else {
1352 		error = ENOENT;
1353 		cache_setvp(ap->a_nch, NULL);
1354 	}
1355 failed:
1356 	KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1357 		("resolve error %d/%p chain %p ap %p\n",
1358 		 error, ap->a_nch->ncp->nc_vp, chain, ap));
1359 	if (ochain)
1360 		hammer2_chain_drop(ochain);
1361 	return error;
1362 }
1363 
1364 static
1365 int
1366 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1367 {
1368 	hammer2_inode_t *dip;
1369 	hammer2_inode_t *ip;
1370 	hammer2_chain_t *parent;
1371 	int error;
1372 
1373 	dip = VTOI(ap->a_dvp);
1374 
1375 	if ((ip = dip->pip) == NULL) {
1376 		*ap->a_vpp = NULL;
1377 		return ENOENT;
1378 	}
1379 	parent = hammer2_inode_lock_ex(ip);
1380 	*ap->a_vpp = hammer2_igetv(ip, &error);
1381 	hammer2_inode_unlock_ex(ip, parent);
1382 
1383 	return error;
1384 }
1385 
1386 static
1387 int
1388 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1389 {
1390 	hammer2_inode_t *dip;
1391 	hammer2_inode_t *nip;
1392 	hammer2_trans_t trans;
1393 	hammer2_chain_t *chain;
1394 	struct namecache *ncp;
1395 	const uint8_t *name;
1396 	size_t name_len;
1397 	int error;
1398 
1399 	dip = VTOI(ap->a_dvp);
1400 	if (dip->pmp->ronly)
1401 		return (EROFS);
1402 
1403 	ncp = ap->a_nch->ncp;
1404 	name = ncp->nc_name;
1405 	name_len = ncp->nc_nlen;
1406 
1407 	hammer2_chain_memory_wait(dip->pmp);
1408 	hammer2_trans_init(&trans, dip->pmp, NULL, HAMMER2_TRANS_NEWINODE);
1409 	nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1410 				   name, name_len, &chain, &error);
1411 	chain->inode_reason = 1;
1412 	if (error) {
1413 		KKASSERT(nip == NULL);
1414 		*ap->a_vpp = NULL;
1415 	} else {
1416 		*ap->a_vpp = hammer2_igetv(nip, &error);
1417 		hammer2_inode_unlock_ex(nip, chain);
1418 	}
1419 	hammer2_trans_done(&trans);
1420 
1421 	if (error == 0) {
1422 		cache_setunresolved(ap->a_nch);
1423 		cache_setvp(ap->a_nch, *ap->a_vpp);
1424 	}
1425 	return error;
1426 }
1427 
1428 /*
1429  * Return the largest contiguous physical disk range for the logical
1430  * request, in bytes.
1431  *
1432  * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
1433  *
1434  * Basically disabled, the logical buffer write thread has to deal with
1435  * buffers one-at-a-time.
1436  */
1437 static
1438 int
1439 hammer2_vop_bmap(struct vop_bmap_args *ap)
1440 {
1441 	*ap->a_doffsetp = NOOFFSET;
1442 	if (ap->a_runp)
1443 		*ap->a_runp = 0;
1444 	if (ap->a_runb)
1445 		*ap->a_runb = 0;
1446 	return (EOPNOTSUPP);
1447 }
1448 
1449 static
1450 int
1451 hammer2_vop_open(struct vop_open_args *ap)
1452 {
1453 	return vop_stdopen(ap);
1454 }
1455 
1456 /*
1457  * hammer2_vop_advlock { vp, id, op, fl, flags }
1458  */
1459 static
1460 int
1461 hammer2_vop_advlock(struct vop_advlock_args *ap)
1462 {
1463 	hammer2_inode_t *ip = VTOI(ap->a_vp);
1464 	hammer2_chain_t *parent;
1465 	hammer2_off_t size;
1466 
1467 	parent = hammer2_inode_lock_sh(ip);
1468 	size = parent->data->ipdata.size;
1469 	hammer2_inode_unlock_sh(ip, parent);
1470 	return (lf_advlock(ap, &ip->advlock, size));
1471 }
1472 
1473 
1474 static
1475 int
1476 hammer2_vop_close(struct vop_close_args *ap)
1477 {
1478 	return vop_stdclose(ap);
1479 }
1480 
1481 /*
1482  * hammer2_vop_nlink { nch, dvp, vp, cred }
1483  *
1484  * Create a hardlink from (vp) to {dvp, nch}.
1485  */
1486 static
1487 int
1488 hammer2_vop_nlink(struct vop_nlink_args *ap)
1489 {
1490 	hammer2_inode_t *fdip;	/* target directory to create link in */
1491 	hammer2_inode_t *tdip;	/* target directory to create link in */
1492 	hammer2_inode_t *cdip;	/* common parent directory */
1493 	hammer2_inode_t *ip;	/* inode we are hardlinking to */
1494 	hammer2_chain_t *chain;
1495 	hammer2_chain_t *fdchain;
1496 	hammer2_chain_t *tdchain;
1497 	hammer2_chain_t *cdchain;
1498 	hammer2_trans_t trans;
1499 	struct namecache *ncp;
1500 	const uint8_t *name;
1501 	size_t name_len;
1502 	int error;
1503 
1504 	tdip = VTOI(ap->a_dvp);
1505 	if (tdip->pmp->ronly)
1506 		return (EROFS);
1507 
1508 	ncp = ap->a_nch->ncp;
1509 	name = ncp->nc_name;
1510 	name_len = ncp->nc_nlen;
1511 
1512 	/*
1513 	 * ip represents the file being hardlinked.  The file could be a
1514 	 * normal file or a hardlink target if it has already been hardlinked.
1515 	 * If ip is a hardlinked target then ip->pip represents the location
1516 	 * of the hardlinked target, NOT the location of the hardlink pointer.
1517 	 *
1518 	 * Bump nlinks and potentially also create or move the hardlink
1519 	 * target in the parent directory common to (ip) and (tdip).  The
1520 	 * consolidation code can modify ip->chain and ip->pip.  The
1521 	 * returned chain is locked.
1522 	 */
1523 	ip = VTOI(ap->a_vp);
1524 	hammer2_chain_memory_wait(ip->pmp);
1525 	hammer2_trans_init(&trans, ip->pmp, NULL, HAMMER2_TRANS_NEWINODE);
1526 
1527 	/*
1528 	 * The common parent directory must be locked first to avoid deadlocks.
1529 	 * Also note that fdip and/or tdip might match cdip.
1530 	 */
1531 	fdip = ip->pip;
1532 	cdip = hammer2_inode_common_parent(fdip, tdip);
1533 	cdchain = hammer2_inode_lock_ex(cdip);
1534 	fdchain = hammer2_inode_lock_ex(fdip);
1535 	tdchain = hammer2_inode_lock_ex(tdip);
1536 	chain = hammer2_inode_lock_ex(ip);
1537 	error = hammer2_hardlink_consolidate(&trans, ip, &chain,
1538 					     cdip, &cdchain, 1);
1539 	if (error)
1540 		goto done;
1541 
1542 	/*
1543 	 * Create a directory entry connected to the specified chain.
1544 	 * The hardlink consolidation code has already adjusted ip->pip
1545 	 * to the common parent directory containing the actual hardlink
1546 	 *
1547 	 * (which may be different from dip where we created our hardlink
1548 	 * entry. ip->chain always represents the actual hardlink and not
1549 	 * any of the pointers to the actual hardlink).
1550 	 *
1551 	 * WARNING! chain can get moved by the connect (indirectly due to
1552 	 *	    potential indirect block creation).
1553 	 */
1554 	error = hammer2_inode_connect(&trans, &chain, 1,
1555 				      tdip, &tdchain,
1556 				      name, name_len, 0);
1557 	if (error == 0) {
1558 		cache_setunresolved(ap->a_nch);
1559 		cache_setvp(ap->a_nch, ap->a_vp);
1560 	}
1561 done:
1562 	hammer2_inode_unlock_ex(ip, chain);
1563 	hammer2_inode_unlock_ex(tdip, tdchain);
1564 	hammer2_inode_unlock_ex(fdip, fdchain);
1565 	hammer2_inode_unlock_ex(cdip, cdchain);
1566 	hammer2_trans_done(&trans);
1567 
1568 	return error;
1569 }
1570 
1571 /*
1572  * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1573  *
1574  * The operating system has already ensured that the directory entry
1575  * does not exist and done all appropriate namespace locking.
1576  */
1577 static
1578 int
1579 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1580 {
1581 	hammer2_inode_t *dip;
1582 	hammer2_inode_t *nip;
1583 	hammer2_trans_t trans;
1584 	hammer2_chain_t *nchain;
1585 	struct namecache *ncp;
1586 	const uint8_t *name;
1587 	size_t name_len;
1588 	int error;
1589 
1590 	dip = VTOI(ap->a_dvp);
1591 	if (dip->pmp->ronly)
1592 		return (EROFS);
1593 
1594 	ncp = ap->a_nch->ncp;
1595 	name = ncp->nc_name;
1596 	name_len = ncp->nc_nlen;
1597 	hammer2_chain_memory_wait(dip->pmp);
1598 	hammer2_trans_init(&trans, dip->pmp, NULL, HAMMER2_TRANS_NEWINODE);
1599 
1600 	nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1601 				   name, name_len, &nchain, &error);
1602 	nchain->inode_reason = 2;
1603 	if (error) {
1604 		KKASSERT(nip == NULL);
1605 		*ap->a_vpp = NULL;
1606 	} else {
1607 		*ap->a_vpp = hammer2_igetv(nip, &error);
1608 		hammer2_inode_unlock_ex(nip, nchain);
1609 	}
1610 	hammer2_trans_done(&trans);
1611 
1612 	if (error == 0) {
1613 		cache_setunresolved(ap->a_nch);
1614 		cache_setvp(ap->a_nch, *ap->a_vpp);
1615 	}
1616 	return error;
1617 }
1618 
1619 /*
1620  *
1621  */
1622 static
1623 int
1624 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1625 {
1626 	hammer2_inode_t *dip;
1627 	hammer2_inode_t *nip;
1628 	hammer2_trans_t trans;
1629 	hammer2_chain_t *nchain;
1630 	struct namecache *ncp;
1631 	const uint8_t *name;
1632 	size_t name_len;
1633 	int error;
1634 
1635 	dip = VTOI(ap->a_dvp);
1636 	if (dip->pmp->ronly)
1637 		return (EROFS);
1638 
1639 	ncp = ap->a_nch->ncp;
1640 	name = ncp->nc_name;
1641 	name_len = ncp->nc_nlen;
1642 	hammer2_chain_memory_wait(dip->pmp);
1643 	hammer2_trans_init(&trans, dip->pmp, NULL, HAMMER2_TRANS_NEWINODE);
1644 
1645 	nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1646 				   name, name_len, &nchain, &error);
1647 	nchain->inode_reason = 3;
1648 	if (error) {
1649 		KKASSERT(nip == NULL);
1650 		*ap->a_vpp = NULL;
1651 	} else {
1652 		*ap->a_vpp = hammer2_igetv(nip, &error);
1653 		hammer2_inode_unlock_ex(nip, nchain);
1654 	}
1655 	hammer2_trans_done(&trans);
1656 
1657 	if (error == 0) {
1658 		cache_setunresolved(ap->a_nch);
1659 		cache_setvp(ap->a_nch, *ap->a_vpp);
1660 	}
1661 	return error;
1662 }
1663 
1664 /*
1665  * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1666  */
1667 static
1668 int
1669 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1670 {
1671 	hammer2_inode_t *dip;
1672 	hammer2_inode_t *nip;
1673 	hammer2_chain_t *nparent;
1674 	hammer2_trans_t trans;
1675 	struct namecache *ncp;
1676 	const uint8_t *name;
1677 	size_t name_len;
1678 	int error;
1679 
1680 	dip = VTOI(ap->a_dvp);
1681 	if (dip->pmp->ronly)
1682 		return (EROFS);
1683 
1684 	ncp = ap->a_nch->ncp;
1685 	name = ncp->nc_name;
1686 	name_len = ncp->nc_nlen;
1687 	hammer2_chain_memory_wait(dip->pmp);
1688 	hammer2_trans_init(&trans, dip->pmp, NULL, HAMMER2_TRANS_NEWINODE);
1689 
1690 	ap->a_vap->va_type = VLNK;	/* enforce type */
1691 
1692 	nip = hammer2_inode_create(&trans, dip, ap->a_vap, ap->a_cred,
1693 				   name, name_len, &nparent, &error);
1694 	nparent->inode_reason = 4;
1695 	if (error) {
1696 		KKASSERT(nip == NULL);
1697 		*ap->a_vpp = NULL;
1698 		hammer2_trans_done(&trans);
1699 		return error;
1700 	}
1701 	*ap->a_vpp = hammer2_igetv(nip, &error);
1702 
1703 	/*
1704 	 * Build the softlink (~like file data) and finalize the namecache.
1705 	 */
1706 	if (error == 0) {
1707 		size_t bytes;
1708 		struct uio auio;
1709 		struct iovec aiov;
1710 		hammer2_inode_data_t *nipdata;
1711 
1712 		nipdata = &nip->chain->data->ipdata;
1713 		bytes = strlen(ap->a_target);
1714 
1715 		if (bytes <= HAMMER2_EMBEDDED_BYTES) {
1716 			KKASSERT(nipdata->op_flags &
1717 				 HAMMER2_OPFLAG_DIRECTDATA);
1718 			bcopy(ap->a_target, nipdata->u.data, bytes);
1719 			nipdata->size = bytes;
1720 			nip->size = bytes;
1721 			hammer2_inode_unlock_ex(nip, nparent);
1722 		} else {
1723 			hammer2_inode_unlock_ex(nip, nparent);
1724 			bzero(&auio, sizeof(auio));
1725 			bzero(&aiov, sizeof(aiov));
1726 			auio.uio_iov = &aiov;
1727 			auio.uio_segflg = UIO_SYSSPACE;
1728 			auio.uio_rw = UIO_WRITE;
1729 			auio.uio_resid = bytes;
1730 			auio.uio_iovcnt = 1;
1731 			auio.uio_td = curthread;
1732 			aiov.iov_base = ap->a_target;
1733 			aiov.iov_len = bytes;
1734 			error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1735 			nipdata = &nip->chain->data->ipdata; /* RELOAD */
1736 			/* XXX handle error */
1737 			error = 0;
1738 		}
1739 	} else {
1740 		hammer2_inode_unlock_ex(nip, nparent);
1741 	}
1742 	hammer2_trans_done(&trans);
1743 
1744 	/*
1745 	 * Finalize namecache
1746 	 */
1747 	if (error == 0) {
1748 		cache_setunresolved(ap->a_nch);
1749 		cache_setvp(ap->a_nch, *ap->a_vpp);
1750 		/* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1751 	}
1752 	return error;
1753 }
1754 
1755 /*
1756  * hammer2_vop_nremove { nch, dvp, cred }
1757  */
1758 static
1759 int
1760 hammer2_vop_nremove(struct vop_nremove_args *ap)
1761 {
1762 	hammer2_inode_t *dip;
1763 	hammer2_trans_t trans;
1764 	struct namecache *ncp;
1765 	const uint8_t *name;
1766 	size_t name_len;
1767 	int error;
1768 
1769 	dip = VTOI(ap->a_dvp);
1770 	if (dip->pmp->ronly)
1771 		return(EROFS);
1772 
1773 	ncp = ap->a_nch->ncp;
1774 	name = ncp->nc_name;
1775 	name_len = ncp->nc_nlen;
1776 
1777 	hammer2_chain_memory_wait(dip->pmp);
1778 	hammer2_trans_init(&trans, dip->pmp, NULL, 0);
1779 	error = hammer2_unlink_file(&trans, dip, name, name_len,
1780 				    0, NULL, ap->a_nch);
1781 	hammer2_trans_done(&trans);
1782 	if (error == 0)
1783 		cache_unlink(ap->a_nch);
1784 	return (error);
1785 }
1786 
1787 /*
1788  * hammer2_vop_nrmdir { nch, dvp, cred }
1789  */
1790 static
1791 int
1792 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1793 {
1794 	hammer2_inode_t *dip;
1795 	hammer2_trans_t trans;
1796 	struct namecache *ncp;
1797 	const uint8_t *name;
1798 	size_t name_len;
1799 	int error;
1800 
1801 	dip = VTOI(ap->a_dvp);
1802 	if (dip->pmp->ronly)
1803 		return(EROFS);
1804 
1805 	ncp = ap->a_nch->ncp;
1806 	name = ncp->nc_name;
1807 	name_len = ncp->nc_nlen;
1808 
1809 	hammer2_chain_memory_wait(dip->pmp);
1810 	hammer2_trans_init(&trans, dip->pmp, NULL, 0);
1811 	error = hammer2_unlink_file(&trans, dip, name, name_len,
1812 				    1, NULL, ap->a_nch);
1813 	hammer2_trans_done(&trans);
1814 	if (error == 0)
1815 		cache_unlink(ap->a_nch);
1816 	return (error);
1817 }
1818 
1819 /*
1820  * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1821  */
1822 static
1823 int
1824 hammer2_vop_nrename(struct vop_nrename_args *ap)
1825 {
1826 	struct namecache *fncp;
1827 	struct namecache *tncp;
1828 	hammer2_inode_t *cdip;
1829 	hammer2_inode_t *fdip;
1830 	hammer2_inode_t *tdip;
1831 	hammer2_inode_t *ip;
1832 	hammer2_chain_t *chain;
1833 	hammer2_chain_t *fdchain;
1834 	hammer2_chain_t *tdchain;
1835 	hammer2_chain_t *cdchain;
1836 	hammer2_trans_t trans;
1837 	const uint8_t *fname;
1838 	size_t fname_len;
1839 	const uint8_t *tname;
1840 	size_t tname_len;
1841 	int error;
1842 	int hlink;
1843 
1844 	if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1845 		return(EXDEV);
1846 	if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1847 		return(EXDEV);
1848 
1849 	fdip = VTOI(ap->a_fdvp);	/* source directory */
1850 	tdip = VTOI(ap->a_tdvp);	/* target directory */
1851 
1852 	if (fdip->pmp->ronly)
1853 		return(EROFS);
1854 
1855 	fncp = ap->a_fnch->ncp;		/* entry name in source */
1856 	fname = fncp->nc_name;
1857 	fname_len = fncp->nc_nlen;
1858 
1859 	tncp = ap->a_tnch->ncp;		/* entry name in target */
1860 	tname = tncp->nc_name;
1861 	tname_len = tncp->nc_nlen;
1862 
1863 	hammer2_chain_memory_wait(tdip->pmp);
1864 	hammer2_trans_init(&trans, tdip->pmp, NULL, 0);
1865 
1866 	/*
1867 	 * ip is the inode being renamed.  If this is a hardlink then
1868 	 * ip represents the actual file and not the hardlink marker.
1869 	 */
1870 	ip = VTOI(fncp->nc_vp);
1871 	chain = NULL;
1872 
1873 
1874 	/*
1875 	 * The common parent directory must be locked first to avoid deadlocks.
1876 	 * Also note that fdip and/or tdip might match cdip.
1877 	 *
1878 	 * WARNING! fdip may not match ip->pip.  That is, if the source file
1879 	 *	    is already a hardlink then what we are renaming is the
1880 	 *	    hardlink pointer, not the hardlink itself.  The hardlink
1881 	 *	    directory (ip->pip) will already be at a common parent
1882 	 *	    of fdrip.
1883 	 *
1884 	 *	    Be sure to use ip->pip when finding the common parent
1885 	 *	    against tdip or we might accidently move the hardlink
1886 	 *	    target into a subdirectory that makes it inaccessible to
1887 	 *	    other pointers.
1888 	 */
1889 	cdip = hammer2_inode_common_parent(ip->pip, tdip);
1890 	cdchain = hammer2_inode_lock_ex(cdip);
1891 	fdchain = hammer2_inode_lock_ex(fdip);
1892 	tdchain = hammer2_inode_lock_ex(tdip);
1893 
1894 	/*
1895 	 * Keep a tight grip on the inode so the temporary unlinking from
1896 	 * the source location prior to linking to the target location
1897 	 * does not cause the chain to be destroyed.
1898 	 *
1899 	 * NOTE: To avoid deadlocks we cannot lock (ip) while we are
1900 	 *	 unlinking elements from their directories.  Locking
1901 	 *	 the nlinks field does not lock the whole inode.
1902 	 */
1903 	hammer2_inode_ref(ip);
1904 
1905 	/*
1906 	 * Remove target if it exists
1907 	 */
1908 	error = hammer2_unlink_file(&trans, tdip, tname, tname_len,
1909 				    -1, NULL, ap->a_tnch);
1910 	if (error && error != ENOENT)
1911 		goto done;
1912 	cache_setunresolved(ap->a_tnch);
1913 
1914 	/*
1915 	 * When renaming a hardlinked file we may have to re-consolidate
1916 	 * the location of the hardlink target.  Also adjust nlinks by +1
1917 	 * to counter-act the unlink below.
1918 	 *
1919 	 * If ip represents a regular file the consolidation code essentially
1920 	 * does nothing other than return the same locked chain that was
1921 	 * passed in.
1922 	 *
1923 	 * The returned chain will be locked.
1924 	 *
1925 	 * WARNING!  We do not currently have a local copy of ipdata but
1926 	 *	     we do use one later remember that it must be reloaded
1927 	 *	     on any modification to the inode, including connects.
1928 	 */
1929 	chain = hammer2_inode_lock_ex(ip);
1930 	error = hammer2_hardlink_consolidate(&trans, ip, &chain,
1931 					     cdip, &cdchain, 1);
1932 	if (error)
1933 		goto done;
1934 
1935 	/*
1936 	 * Disconnect (fdip, fname) from the source directory.  This will
1937 	 * disconnect (ip) if it represents a direct file.  If (ip) represents
1938 	 * a hardlink the HARDLINK pointer object will be removed but the
1939 	 * hardlink will stay intact.
1940 	 *
1941 	 * Always pass nch as NULL because we intend to reconnect the inode,
1942 	 * so we don't want hammer2_unlink_file() to rename it to the hidden
1943 	 * open-but-unlinked directory.
1944 	 *
1945 	 * The target chain may be marked DELETED but will not be destroyed
1946 	 * since we retain our hold on ip and chain.
1947 	 */
1948 	error = hammer2_unlink_file(&trans, fdip, fname, fname_len,
1949 				    -1, &hlink, NULL);
1950 	KKASSERT(error != EAGAIN);
1951 	if (error)
1952 		goto done;
1953 
1954 	/*
1955 	 * Reconnect ip to target directory using chain.  Chains cannot
1956 	 * actually be moved, so this will duplicate the chain in the new
1957 	 * spot and assign it to the ip, replacing the old chain.
1958 	 *
1959 	 * WARNING: Because recursive locks are allowed and we unlinked the
1960 	 *	    file that we have a chain-in-hand for just above, the
1961 	 *	    chain might have been delete-duplicated.  We must refactor
1962 	 *	    the chain.
1963 	 *
1964 	 * WARNING: Chain locks can lock buffer cache buffers, to avoid
1965 	 *	    deadlocks we want to unlock before issuing a cache_*()
1966 	 *	    op (that might have to lock a vnode).
1967 	 */
1968 	hammer2_chain_refactor(&chain);
1969 	error = hammer2_inode_connect(&trans, &chain, hlink,
1970 				      tdip, &tdchain,
1971 				      tname, tname_len, 0);
1972 	chain->inode_reason = 5;
1973 	if (error == 0) {
1974 		KKASSERT(chain != NULL);
1975 		hammer2_inode_repoint(ip, (hlink ? ip->pip : tdip), chain);
1976 	}
1977 done:
1978 	hammer2_inode_unlock_ex(ip, chain);
1979 	hammer2_inode_unlock_ex(tdip, tdchain);
1980 	hammer2_inode_unlock_ex(fdip, fdchain);
1981 	hammer2_inode_unlock_ex(cdip, cdchain);
1982 	hammer2_inode_drop(ip);
1983 	hammer2_trans_done(&trans);
1984 
1985 	/*
1986 	 * Issue the namecache update after unlocking all the internal
1987 	 * hammer structures, otherwise we might deadlock.
1988 	 */
1989 	if (error == 0)
1990 		cache_rename(ap->a_fnch, ap->a_tnch);
1991 
1992 	return (error);
1993 }
1994 
1995 /*
1996  * Strategy code
1997  *
1998  * WARNING: The strategy code cannot safely use hammer2 transactions
1999  *	    as this can deadlock against vfs_sync's vfsync() call
2000  *	    if multiple flushes are queued.
2001  */
2002 static int hammer2_strategy_read(struct vop_strategy_args *ap);
2003 static int hammer2_strategy_write(struct vop_strategy_args *ap);
2004 static void hammer2_strategy_read_callback(hammer2_io_t *dio,
2005 				hammer2_chain_t *chain,
2006 				void *arg_p, off_t arg_o);
2007 
2008 static
2009 int
2010 hammer2_vop_strategy(struct vop_strategy_args *ap)
2011 {
2012 	struct bio *biop;
2013 	struct buf *bp;
2014 	int error;
2015 
2016 	biop = ap->a_bio;
2017 	bp = biop->bio_buf;
2018 
2019 	switch(bp->b_cmd) {
2020 	case BUF_CMD_READ:
2021 		error = hammer2_strategy_read(ap);
2022 		++hammer2_iod_file_read;
2023 		break;
2024 	case BUF_CMD_WRITE:
2025 		error = hammer2_strategy_write(ap);
2026 		++hammer2_iod_file_write;
2027 		break;
2028 	default:
2029 		bp->b_error = error = EINVAL;
2030 		bp->b_flags |= B_ERROR;
2031 		biodone(biop);
2032 		break;
2033 	}
2034 
2035 	return (error);
2036 }
2037 
2038 static
2039 int
2040 hammer2_strategy_read(struct vop_strategy_args *ap)
2041 {
2042 	struct buf *bp;
2043 	struct bio *bio;
2044 	struct bio *nbio;
2045 	hammer2_inode_t *ip;
2046 	hammer2_chain_t *parent;
2047 	hammer2_chain_t *chain;
2048 	hammer2_key_t key_dummy;
2049 	hammer2_key_t lbase;
2050 	int cache_index = -1;
2051 
2052 	bio = ap->a_bio;
2053 	bp = bio->bio_buf;
2054 	ip = VTOI(ap->a_vp);
2055 	nbio = push_bio(bio);
2056 
2057 	lbase = bio->bio_offset;
2058 	chain = NULL;
2059 	KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
2060 
2061 	parent = hammer2_inode_lock_sh(ip);
2062 	chain = hammer2_chain_lookup(&parent, &key_dummy,
2063 				     lbase, lbase,
2064 				     &cache_index,
2065 				     HAMMER2_LOOKUP_NODATA |
2066 				     HAMMER2_LOOKUP_SHARED);
2067 
2068 	if (chain == NULL) {
2069 		/*
2070 		 * Data is zero-fill
2071 		 */
2072 		bp->b_resid = 0;
2073 		bp->b_error = 0;
2074 		bzero(bp->b_data, bp->b_bcount);
2075 		biodone(nbio);
2076 	} else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2077 		/*
2078 		 * Data is embedded in the inode (copy from inode).
2079 		 */
2080 		hammer2_chain_load_async(chain,
2081 					 hammer2_strategy_read_callback,
2082 					 nbio, 0);
2083 	} else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2084 		/*
2085 		 * Data is on-media, issue device I/O and copy.
2086 		 *
2087 		 * XXX direct-IO shortcut could go here XXX.
2088 		 */
2089 		if (HAMMER2_DEC_COMP(chain->bref.methods) == HAMMER2_COMP_LZ4) {
2090 			/*
2091 			 * Block compression is determined by bref.methods
2092 			 */
2093 			hammer2_blockref_t *bref;
2094 
2095 			bref = &chain->bref;
2096 			hammer2_io_breadcb(chain->hmp, bref->data_off,
2097 					   chain->bytes,
2098 					   hammer2_decompress_LZ4_callback,
2099 					   NULL, nbio, bref->data_off);
2100 			/* XXX async read dev blk not protected by chain lk */
2101 			hammer2_chain_unlock(chain);
2102 		} else if (HAMMER2_DEC_COMP(chain->bref.methods) ==
2103 			   HAMMER2_COMP_ZLIB) {
2104 			hammer2_blockref_t *bref;
2105 
2106 			bref = &chain->bref;
2107 			hammer2_io_breadcb(chain->hmp, bref->data_off,
2108 					   chain->bytes,
2109 					   hammer2_decompress_ZLIB_callback,
2110 					   NULL, nbio, bref->data_off);
2111 			/* XXX async read dev blk not protected by chain lk */
2112 			hammer2_chain_unlock(chain);
2113 		} else {
2114 			hammer2_chain_load_async(chain,
2115 						 hammer2_strategy_read_callback,
2116 						 nbio, 0);
2117 		}
2118 	} else {
2119 		panic("READ PATH: hammer2_strategy_read: unknown bref type");
2120 		chain = NULL;
2121 	}
2122 	hammer2_inode_unlock_sh(ip, parent);
2123 	return (0);
2124 }
2125 
2126 /*
2127  * Read callback for block that is not compressed.
2128  */
2129 static
2130 void
2131 hammer2_strategy_read_callback(hammer2_io_t *dio, hammer2_chain_t *chain,
2132 			       void *arg_p, off_t arg_o __unused)
2133 {
2134 	struct bio *nbio = arg_p;
2135 	struct buf *bp = nbio->bio_buf;
2136 	char *data;
2137 
2138 	if (dio)
2139 		data = hammer2_io_data(dio, chain->bref.data_off);
2140 	else
2141 		data = (void *)chain->data;
2142 
2143 	if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
2144 		/*
2145 		 * Data is embedded in the inode (copy from inode).
2146 		 */
2147 		bcopy(((hammer2_inode_data_t *)data)->u.data,
2148 		      bp->b_data, HAMMER2_EMBEDDED_BYTES);
2149 		bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
2150 		      bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
2151 		bp->b_resid = 0;
2152 		bp->b_error = 0;
2153 		hammer2_chain_unlock(chain);
2154 		biodone(nbio);
2155 	} else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
2156 		/*
2157 		 * Data is on-media, issue device I/O and copy.
2158 		 *
2159 		 * XXX direct-IO shortcut could go here XXX.
2160 		 */
2161 		KKASSERT(chain->bytes <= bp->b_bcount);
2162 		bcopy(data, bp->b_data, chain->bytes);
2163 		if (chain->bytes < bp->b_bcount); {
2164 			bzero(bp->b_data + chain->bytes,
2165 			      bp->b_bcount - chain->bytes);
2166 		}
2167 		bp->b_flags |= B_NOTMETA;
2168 		bp->b_resid = 0;
2169 		bp->b_error = 0;
2170 		hammer2_chain_unlock(chain);
2171 		biodone(nbio);
2172 	} else {
2173 		/* bqrelse the dio to help stabilize the call to panic() */
2174 		if (dio)
2175 			hammer2_io_bqrelse(&dio);
2176 		panic("hammer2_strategy_read: unknown bref type");
2177 		/*hammer2_chain_unlock(chain);*/
2178 		/*chain = NULL;*/
2179 	}
2180 }
2181 
2182 static
2183 int
2184 hammer2_strategy_write(struct vop_strategy_args *ap)
2185 {
2186 	hammer2_pfsmount_t *pmp;
2187 	struct bio *bio;
2188 	struct buf *bp;
2189 	hammer2_inode_t *ip;
2190 
2191 	bio = ap->a_bio;
2192 	bp = bio->bio_buf;
2193 	ip = VTOI(ap->a_vp);
2194 	pmp = ip->pmp;
2195 
2196 	hammer2_lwinprog_ref(pmp);
2197 	mtx_lock(&pmp->wthread_mtx);
2198 	if (TAILQ_EMPTY(&pmp->wthread_bioq.queue)) {
2199 		bioq_insert_tail(&pmp->wthread_bioq, ap->a_bio);
2200 		mtx_unlock(&pmp->wthread_mtx);
2201 		wakeup(&pmp->wthread_bioq);
2202 	} else {
2203 		bioq_insert_tail(&pmp->wthread_bioq, ap->a_bio);
2204 		mtx_unlock(&pmp->wthread_mtx);
2205 	}
2206 	hammer2_lwinprog_wait(pmp);
2207 
2208 	return(0);
2209 }
2210 
2211 /*
2212  * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2213  */
2214 static
2215 int
2216 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2217 {
2218 	hammer2_inode_t *ip;
2219 	int error;
2220 
2221 	ip = VTOI(ap->a_vp);
2222 
2223 	error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2224 			      ap->a_fflag, ap->a_cred);
2225 	return (error);
2226 }
2227 
2228 static
2229 int
2230 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2231 {
2232 	struct mount *mp;
2233 	hammer2_pfsmount_t *pmp;
2234 	int rc;
2235 
2236 	switch (ap->a_op) {
2237 	case (MOUNTCTL_SET_EXPORT):
2238 		mp = ap->a_head.a_ops->head.vv_mount;
2239 		pmp = MPTOPMP(mp);
2240 
2241 		if (ap->a_ctllen != sizeof(struct export_args))
2242 			rc = (EINVAL);
2243 		else
2244 			rc = vfs_export(mp, &pmp->export,
2245 					(const struct export_args *)ap->a_ctl);
2246 		break;
2247 	default:
2248 		rc = vop_stdmountctl(ap);
2249 		break;
2250 	}
2251 	return (rc);
2252 }
2253 
2254 /*
2255  * KQFILTER
2256  */
2257 static void filt_hammer2detach(struct knote *kn);
2258 static int filt_hammer2read(struct knote *kn, long hint);
2259 static int filt_hammer2write(struct knote *kn, long hint);
2260 static int filt_hammer2vnode(struct knote *kn, long hint);
2261 
2262 static struct filterops hammer2read_filtops =
2263 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2264 	  NULL, filt_hammer2detach, filt_hammer2read };
2265 static struct filterops hammer2write_filtops =
2266 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2267 	  NULL, filt_hammer2detach, filt_hammer2write };
2268 static struct filterops hammer2vnode_filtops =
2269 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2270 	  NULL, filt_hammer2detach, filt_hammer2vnode };
2271 
2272 static
2273 int
2274 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2275 {
2276 	struct vnode *vp = ap->a_vp;
2277 	struct knote *kn = ap->a_kn;
2278 
2279 	switch (kn->kn_filter) {
2280 	case EVFILT_READ:
2281 		kn->kn_fop = &hammer2read_filtops;
2282 		break;
2283 	case EVFILT_WRITE:
2284 		kn->kn_fop = &hammer2write_filtops;
2285 		break;
2286 	case EVFILT_VNODE:
2287 		kn->kn_fop = &hammer2vnode_filtops;
2288 		break;
2289 	default:
2290 		return (EOPNOTSUPP);
2291 	}
2292 
2293 	kn->kn_hook = (caddr_t)vp;
2294 
2295 	knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2296 
2297 	return(0);
2298 }
2299 
2300 static void
2301 filt_hammer2detach(struct knote *kn)
2302 {
2303 	struct vnode *vp = (void *)kn->kn_hook;
2304 
2305 	knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2306 }
2307 
2308 static int
2309 filt_hammer2read(struct knote *kn, long hint)
2310 {
2311 	struct vnode *vp = (void *)kn->kn_hook;
2312 	hammer2_inode_t *ip = VTOI(vp);
2313 	off_t off;
2314 
2315 	if (hint == NOTE_REVOKE) {
2316 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2317 		return(1);
2318 	}
2319 	off = ip->size - kn->kn_fp->f_offset;
2320 	kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2321 	if (kn->kn_sfflags & NOTE_OLDAPI)
2322 		return(1);
2323 	return (kn->kn_data != 0);
2324 }
2325 
2326 
2327 static int
2328 filt_hammer2write(struct knote *kn, long hint)
2329 {
2330 	if (hint == NOTE_REVOKE)
2331 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2332 	kn->kn_data = 0;
2333 	return (1);
2334 }
2335 
2336 static int
2337 filt_hammer2vnode(struct knote *kn, long hint)
2338 {
2339 	if (kn->kn_sfflags & hint)
2340 		kn->kn_fflags |= hint;
2341 	if (hint == NOTE_REVOKE) {
2342 		kn->kn_flags |= (EV_EOF | EV_NODATA);
2343 		return (1);
2344 	}
2345 	return (kn->kn_fflags != 0);
2346 }
2347 
2348 /*
2349  * FIFO VOPS
2350  */
2351 static
2352 int
2353 hammer2_vop_markatime(struct vop_markatime_args *ap)
2354 {
2355 	hammer2_inode_t *ip;
2356 	struct vnode *vp;
2357 
2358 	vp = ap->a_vp;
2359 	ip = VTOI(vp);
2360 
2361 	if (ip->pmp->ronly)
2362 		return(EROFS);
2363 	return(0);
2364 }
2365 
2366 static
2367 int
2368 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2369 {
2370 	int error;
2371 
2372 	error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2373 	if (error)
2374 		error = hammer2_vop_kqfilter(ap);
2375 	return(error);
2376 }
2377 
2378 /*
2379  * VOPS vector
2380  */
2381 struct vop_ops hammer2_vnode_vops = {
2382 	.vop_default	= vop_defaultop,
2383 	.vop_fsync	= hammer2_vop_fsync,
2384 	.vop_getpages	= vop_stdgetpages,
2385 	.vop_putpages	= vop_stdputpages,
2386 	.vop_access	= hammer2_vop_access,
2387 	.vop_advlock	= hammer2_vop_advlock,
2388 	.vop_close	= hammer2_vop_close,
2389 	.vop_nlink	= hammer2_vop_nlink,
2390 	.vop_ncreate	= hammer2_vop_ncreate,
2391 	.vop_nsymlink	= hammer2_vop_nsymlink,
2392 	.vop_nremove	= hammer2_vop_nremove,
2393 	.vop_nrmdir	= hammer2_vop_nrmdir,
2394 	.vop_nrename	= hammer2_vop_nrename,
2395 	.vop_getattr	= hammer2_vop_getattr,
2396 	.vop_setattr	= hammer2_vop_setattr,
2397 	.vop_readdir	= hammer2_vop_readdir,
2398 	.vop_readlink	= hammer2_vop_readlink,
2399 	.vop_getpages	= vop_stdgetpages,
2400 	.vop_putpages	= vop_stdputpages,
2401 	.vop_read	= hammer2_vop_read,
2402 	.vop_write	= hammer2_vop_write,
2403 	.vop_open	= hammer2_vop_open,
2404 	.vop_inactive	= hammer2_vop_inactive,
2405 	.vop_reclaim 	= hammer2_vop_reclaim,
2406 	.vop_nresolve	= hammer2_vop_nresolve,
2407 	.vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2408 	.vop_nmkdir 	= hammer2_vop_nmkdir,
2409 	.vop_nmknod 	= hammer2_vop_nmknod,
2410 	.vop_ioctl	= hammer2_vop_ioctl,
2411 	.vop_mountctl	= hammer2_vop_mountctl,
2412 	.vop_bmap	= hammer2_vop_bmap,
2413 	.vop_strategy	= hammer2_vop_strategy,
2414         .vop_kqfilter	= hammer2_vop_kqfilter
2415 };
2416 
2417 struct vop_ops hammer2_spec_vops = {
2418         .vop_default =          vop_defaultop,
2419         .vop_fsync =            hammer2_vop_fsync,
2420         .vop_read =             vop_stdnoread,
2421         .vop_write =            vop_stdnowrite,
2422         .vop_access =           hammer2_vop_access,
2423         .vop_close =            hammer2_vop_close,
2424         .vop_markatime =        hammer2_vop_markatime,
2425         .vop_getattr =          hammer2_vop_getattr,
2426         .vop_inactive =         hammer2_vop_inactive,
2427         .vop_reclaim =          hammer2_vop_reclaim,
2428         .vop_setattr =          hammer2_vop_setattr
2429 };
2430 
2431 struct vop_ops hammer2_fifo_vops = {
2432         .vop_default =          fifo_vnoperate,
2433         .vop_fsync =            hammer2_vop_fsync,
2434 #if 0
2435         .vop_read =             hammer2_vop_fiforead,
2436         .vop_write =            hammer2_vop_fifowrite,
2437 #endif
2438         .vop_access =           hammer2_vop_access,
2439 #if 0
2440         .vop_close =            hammer2_vop_fifoclose,
2441 #endif
2442         .vop_markatime =        hammer2_vop_markatime,
2443         .vop_getattr =          hammer2_vop_getattr,
2444         .vop_inactive =         hammer2_vop_inactive,
2445         .vop_reclaim =          hammer2_vop_reclaim,
2446         .vop_setattr =          hammer2_vop_setattr,
2447         .vop_kqfilter =         hammer2_vop_fifokqfilter
2448 };
2449 
2450