xref: /dragonfly/sys/vfs/hammer2/hammer2_vnops.c (revision 8835adf8)
1 /*
2  * Copyright (c) 2011-2015 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * Kernel Filesystem interface
38  *
39  * NOTE! local ipdata pointers must be reloaded on any modifying operation
40  *	 to the inode as its underlying chain may have changed.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
59 
60 #include "hammer2.h"
61 
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
63 				int seqcount);
64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
65 				int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
68 
69 struct objcache *cache_xops;
70 
71 static __inline
72 void
73 hammer2_knote(struct vnode *vp, int flags)
74 {
75 	if (flags)
76 		KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
77 }
78 
79 /*
80  * Last reference to a vnode is going away but it is still cached.
81  */
82 static
83 int
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
85 {
86 	hammer2_inode_t *ip;
87 	struct vnode *vp;
88 
89 	LOCKSTART;
90 	vp = ap->a_vp;
91 	ip = VTOI(vp);
92 
93 	/*
94 	 * Degenerate case
95 	 */
96 	if (ip == NULL) {
97 		vrecycle(vp);
98 		LOCKSTOP;
99 		return (0);
100 	}
101 
102 	/*
103 	 * Check for deleted inodes and recycle immediately on the last
104 	 * release.  Be sure to destroy any left-over buffer cache buffers
105 	 * so we do not waste time trying to flush them.
106 	 *
107 	 * Note that deleting the file block chains under the inode chain
108 	 * would just be a waste of energy, so don't do it.
109 	 *
110 	 * WARNING: nvtruncbuf() can only be safely called without the inode
111 	 *	    lock held due to the way our write thread works.
112 	 */
113 	if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
114 		hammer2_key_t lbase;
115 		int nblksize;
116 
117 		/*
118 		 * Detect updates to the embedded data which may be
119 		 * synchronized by the strategy code.  Simply mark the
120 		 * inode modified so it gets picked up by our normal flush.
121 		 */
122 		nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
123 		nvtruncbuf(vp, 0, nblksize, 0, 0);
124 		vrecycle(vp);
125 	}
126 	LOCKSTOP;
127 	return (0);
128 }
129 
130 /*
131  * Reclaim a vnode so that it can be reused; after the inode is
132  * disassociated, the filesystem must manage it alone.
133  */
134 static
135 int
136 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
137 {
138 	hammer2_inode_t *ip;
139 	hammer2_pfs_t *pmp;
140 	struct vnode *vp;
141 
142 	LOCKSTART;
143 	vp = ap->a_vp;
144 	ip = VTOI(vp);
145 	if (ip == NULL) {
146 		LOCKSTOP;
147 		return(0);
148 	}
149 	pmp = ip->pmp;
150 
151 	/*
152 	 * The final close of a deleted file or directory marks it for
153 	 * destruction.  The DELETED flag allows the flusher to shortcut
154 	 * any modified blocks still unflushed (that is, just ignore them).
155 	 *
156 	 * HAMMER2 usually does not try to optimize the freemap by returning
157 	 * deleted blocks to it as it does not usually know how many snapshots
158 	 * might be referencing portions of the file/dir.
159 	 */
160 	vp->v_data = NULL;
161 	ip->vp = NULL;
162 
163 	/*
164 	 * NOTE! We do not attempt to flush chains here, flushing is
165 	 *	 really fragile and could also deadlock.
166 	 */
167 	vclrisdirty(vp);
168 
169 	/*
170 	 * An unlinked inode may have been relinked to the ihidden directory.
171 	 * This occurs if the inode was unlinked while open.  Reclamation of
172 	 * these inodes requires processing we cannot safely do here so add
173 	 * the inode to the sideq in that situation.
174 	 *
175 	 * A modified inode may require chain synchronization which will no
176 	 * longer be driven by a sync or fsync without the vnode, also use
177 	 * the sideq for that.
178 	 *
179 	 * A reclaim can occur at any time so we cannot safely start a
180 	 * transaction to handle reclamation of unlinked files.  Instead,
181 	 * the ip is left with a reference and placed on a linked list and
182 	 * handled later on.
183 	 */
184 
185 	if ((ip->flags & (HAMMER2_INODE_ISUNLINKED |
186 			  HAMMER2_INODE_MODIFIED |
187 			  HAMMER2_INODE_RESIZED)) &&
188 	    (ip->flags & HAMMER2_INODE_ISDELETED) == 0) {
189 		hammer2_inode_sideq_t *ipul;
190 
191 		ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
192 		ipul->ip = ip;
193 
194 		hammer2_spin_ex(&pmp->list_spin);
195 		if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
196 			/* ref -> sideq */
197 			atomic_set_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
198 			TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
199 			hammer2_spin_unex(&pmp->list_spin);
200 		} else {
201 			hammer2_spin_unex(&pmp->list_spin);
202 			kfree(ipul, pmp->minode);
203 			hammer2_inode_drop(ip);		/* vp ref */
204 		}
205 		/* retain ref from vp for ipul */
206 	} else {
207 		hammer2_inode_drop(ip);			/* vp ref */
208 	}
209 
210 	/*
211 	 * XXX handle background sync when ip dirty, kernel will no longer
212 	 * notify us regarding this inode because there is no longer a
213 	 * vnode attached to it.
214 	 */
215 
216 	LOCKSTOP;
217 	return (0);
218 }
219 
220 static
221 int
222 hammer2_vop_fsync(struct vop_fsync_args *ap)
223 {
224 	hammer2_inode_t *ip;
225 	struct vnode *vp;
226 
227 	LOCKSTART;
228 	vp = ap->a_vp;
229 	ip = VTOI(vp);
230 
231 #if 0
232 	/* XXX can't do this yet */
233 	hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH);
234 	vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
235 #endif
236 	hammer2_trans_init(ip->pmp, 0);
237 	vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
238 
239 	/*
240 	 * Calling chain_flush here creates a lot of duplicative
241 	 * COW operations due to non-optimal vnode ordering.
242 	 *
243 	 * Only do it for an actual fsync() syscall.  The other forms
244 	 * which call this function will eventually call chain_flush
245 	 * on the volume root as a catch-all, which is far more optimal.
246 	 */
247 	hammer2_inode_lock(ip, 0);
248 	if (ip->flags & HAMMER2_INODE_MODIFIED)
249 		hammer2_inode_chain_sync(ip);
250 	hammer2_inode_unlock(ip);
251 	hammer2_trans_done(ip->pmp);
252 
253 	LOCKSTOP;
254 	return (0);
255 }
256 
257 static
258 int
259 hammer2_vop_access(struct vop_access_args *ap)
260 {
261 	hammer2_inode_t *ip = VTOI(ap->a_vp);
262 	uid_t uid;
263 	gid_t gid;
264 	int error;
265 
266 	LOCKSTART;
267 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
268 	uid = hammer2_to_unix_xid(&ip->meta.uid);
269 	gid = hammer2_to_unix_xid(&ip->meta.gid);
270 	error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
271 	hammer2_inode_unlock(ip);
272 
273 	LOCKSTOP;
274 	return (error);
275 }
276 
277 static
278 int
279 hammer2_vop_getattr(struct vop_getattr_args *ap)
280 {
281 	hammer2_pfs_t *pmp;
282 	hammer2_inode_t *ip;
283 	struct vnode *vp;
284 	struct vattr *vap;
285 	hammer2_chain_t *chain;
286 	int i;
287 
288 	LOCKSTART;
289 	vp = ap->a_vp;
290 	vap = ap->a_vap;
291 
292 	ip = VTOI(vp);
293 	pmp = ip->pmp;
294 
295 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
296 
297 	vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
298 	vap->va_fileid = ip->meta.inum;
299 	vap->va_mode = ip->meta.mode;
300 	vap->va_nlink = ip->meta.nlinks;
301 	vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
302 	vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
303 	vap->va_rmajor = 0;
304 	vap->va_rminor = 0;
305 	vap->va_size = ip->meta.size;	/* protected by shared lock */
306 	vap->va_blocksize = HAMMER2_PBUFSIZE;
307 	vap->va_flags = ip->meta.uflags;
308 	hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
309 	hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
310 	hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
311 	vap->va_gen = 1;
312 	vap->va_bytes = 0;
313 	if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
314 		/*
315 		 * Can't really calculate directory use sans the files under
316 		 * it, just assume one block for now.
317 		 */
318 		vap->va_bytes += HAMMER2_INODE_BYTES;
319 	} else {
320 		for (i = 0; i < ip->cluster.nchains; ++i) {
321 			if ((chain = ip->cluster.array[i].chain) != NULL) {
322 				if (vap->va_bytes < chain->bref.data_count)
323 					vap->va_bytes = chain->bref.data_count;
324 			}
325 		}
326 	}
327 	vap->va_type = hammer2_get_vtype(ip->meta.type);
328 	vap->va_filerev = 0;
329 	vap->va_uid_uuid = ip->meta.uid;
330 	vap->va_gid_uuid = ip->meta.gid;
331 	vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
332 			  VA_FSID_UUID_VALID;
333 
334 	hammer2_inode_unlock(ip);
335 
336 	LOCKSTOP;
337 	return (0);
338 }
339 
340 static
341 int
342 hammer2_vop_setattr(struct vop_setattr_args *ap)
343 {
344 	hammer2_inode_t *ip;
345 	struct vnode *vp;
346 	struct vattr *vap;
347 	int error;
348 	int kflags = 0;
349 	uint64_t ctime;
350 
351 	LOCKSTART;
352 	vp = ap->a_vp;
353 	vap = ap->a_vap;
354 	hammer2_update_time(&ctime);
355 
356 	ip = VTOI(vp);
357 
358 	if (ip->pmp->ronly) {
359 		LOCKSTOP;
360 		return(EROFS);
361 	}
362 
363 	hammer2_pfs_memory_wait(ip->pmp);
364 	hammer2_trans_init(ip->pmp, 0);
365 	hammer2_inode_lock(ip, 0);
366 	error = 0;
367 
368 	if (vap->va_flags != VNOVAL) {
369 		uint32_t flags;
370 
371 		flags = ip->meta.uflags;
372 		error = vop_helper_setattr_flags(&flags, vap->va_flags,
373 				     hammer2_to_unix_xid(&ip->meta.uid),
374 				     ap->a_cred);
375 		if (error == 0) {
376 			if (ip->meta.uflags != flags) {
377 				hammer2_inode_modify(ip);
378 				ip->meta.uflags = flags;
379 				ip->meta.ctime = ctime;
380 				kflags |= NOTE_ATTRIB;
381 			}
382 			if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
383 				error = 0;
384 				goto done;
385 			}
386 		}
387 		goto done;
388 	}
389 	if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
390 		error = EPERM;
391 		goto done;
392 	}
393 	if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
394 		mode_t cur_mode = ip->meta.mode;
395 		uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
396 		gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
397 		uuid_t uuid_uid;
398 		uuid_t uuid_gid;
399 
400 		error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
401 					 ap->a_cred,
402 					 &cur_uid, &cur_gid, &cur_mode);
403 		if (error == 0) {
404 			hammer2_guid_to_uuid(&uuid_uid, cur_uid);
405 			hammer2_guid_to_uuid(&uuid_gid, cur_gid);
406 			if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
407 			    bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
408 			    ip->meta.mode != cur_mode
409 			) {
410 				hammer2_inode_modify(ip);
411 				ip->meta.uid = uuid_uid;
412 				ip->meta.gid = uuid_gid;
413 				ip->meta.mode = cur_mode;
414 				ip->meta.ctime = ctime;
415 			}
416 			kflags |= NOTE_ATTRIB;
417 		}
418 	}
419 
420 	/*
421 	 * Resize the file
422 	 */
423 	if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
424 		switch(vp->v_type) {
425 		case VREG:
426 			if (vap->va_size == ip->meta.size)
427 				break;
428 			if (vap->va_size < ip->meta.size) {
429 				hammer2_mtx_ex(&ip->truncate_lock);
430 				hammer2_truncate_file(ip, vap->va_size);
431 				hammer2_mtx_unlock(&ip->truncate_lock);
432 			} else {
433 				hammer2_extend_file(ip, vap->va_size);
434 			}
435 			hammer2_inode_modify(ip);
436 			ip->meta.mtime = ctime;
437 			break;
438 		default:
439 			error = EINVAL;
440 			goto done;
441 		}
442 	}
443 #if 0
444 	/* atime not supported */
445 	if (vap->va_atime.tv_sec != VNOVAL) {
446 		hammer2_inode_modify(ip);
447 		ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
448 		kflags |= NOTE_ATTRIB;
449 	}
450 #endif
451 	if (vap->va_mode != (mode_t)VNOVAL) {
452 		mode_t cur_mode = ip->meta.mode;
453 		uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
454 		gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
455 
456 		error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
457 					 cur_uid, cur_gid, &cur_mode);
458 		if (error == 0 && ip->meta.mode != cur_mode) {
459 			hammer2_inode_modify(ip);
460 			ip->meta.mode = cur_mode;
461 			ip->meta.ctime = ctime;
462 			kflags |= NOTE_ATTRIB;
463 		}
464 	}
465 
466 	if (vap->va_mtime.tv_sec != VNOVAL) {
467 		hammer2_inode_modify(ip);
468 		ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
469 		kflags |= NOTE_ATTRIB;
470 	}
471 
472 done:
473 	/*
474 	 * If a truncation occurred we must call inode_fsync() now in order
475 	 * to trim the related data chains, otherwise a later expansion can
476 	 * cause havoc.
477 	 *
478 	 * If an extend occured that changed the DIRECTDATA state, we must
479 	 * call inode_fsync now in order to prepare the inode's indirect
480 	 * block table.
481 	 */
482 	if (ip->flags & HAMMER2_INODE_RESIZED)
483 		hammer2_inode_chain_sync(ip);
484 
485 	/*
486 	 * Cleanup.
487 	 */
488 	hammer2_inode_unlock(ip);
489 	hammer2_trans_done(ip->pmp);
490 	hammer2_knote(ip->vp, kflags);
491 
492 	LOCKSTOP;
493 	return (error);
494 }
495 
496 static
497 int
498 hammer2_vop_readdir(struct vop_readdir_args *ap)
499 {
500 	hammer2_xop_readdir_t *xop;
501 	hammer2_blockref_t bref;
502 	hammer2_inode_t *ip;
503 	hammer2_tid_t inum;
504 	hammer2_key_t lkey;
505 	struct uio *uio;
506 	off_t *cookies;
507 	off_t saveoff;
508 	int cookie_index;
509 	int ncookies;
510 	int error;
511 	int eofflag;
512 	int dtype;
513 	int r;
514 
515 	LOCKSTART;
516 	ip = VTOI(ap->a_vp);
517 	uio = ap->a_uio;
518 	saveoff = uio->uio_offset;
519 	eofflag = 0;
520 	error = 0;
521 
522 	/*
523 	 * Setup cookies directory entry cookies if requested
524 	 */
525 	if (ap->a_ncookies) {
526 		ncookies = uio->uio_resid / 16 + 1;
527 		if (ncookies > 1024)
528 			ncookies = 1024;
529 		cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
530 	} else {
531 		ncookies = -1;
532 		cookies = NULL;
533 	}
534 	cookie_index = 0;
535 
536 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
537 
538 	/*
539 	 * Handle artificial entries.  To ensure that only positive 64 bit
540 	 * quantities are returned to userland we always strip off bit 63.
541 	 * The hash code is designed such that codes 0x0000-0x7FFF are not
542 	 * used, allowing us to use these codes for articial entries.
543 	 *
544 	 * Entry 0 is used for '.' and entry 1 is used for '..'.  Do not
545 	 * allow '..' to cross the mount point into (e.g.) the super-root.
546 	 */
547 	if (saveoff == 0) {
548 		inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
549 		r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
550 		if (r)
551 			goto done;
552 		if (cookies)
553 			cookies[cookie_index] = saveoff;
554 		++saveoff;
555 		++cookie_index;
556 		if (cookie_index == ncookies)
557 			goto done;
558 	}
559 
560 	if (saveoff == 1) {
561 		/*
562 		 * Be careful with lockorder when accessing ".."
563 		 *
564 		 * (ip is the current dir. xip is the parent dir).
565 		 */
566 		inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
567 		if (ip->pip && ip != ip->pmp->iroot)
568 			inum = ip->pip->meta.inum & HAMMER2_DIRHASH_USERMSK;
569 		r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
570 		if (r)
571 			goto done;
572 		if (cookies)
573 			cookies[cookie_index] = saveoff;
574 		++saveoff;
575 		++cookie_index;
576 		if (cookie_index == ncookies)
577 			goto done;
578 	}
579 
580 	lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
581 	if (hammer2_debug & 0x0020)
582 		kprintf("readdir: lkey %016jx\n", lkey);
583 	if (error)
584 		goto done;
585 
586 	/*
587 	 * Use XOP for cluster scan.
588 	 *
589 	 * parent is the inode cluster, already locked for us.  Don't
590 	 * double lock shared locks as this will screw up upgrades.
591 	 */
592 	xop = hammer2_xop_alloc(ip, 0);
593 	xop->lkey = lkey;
594 	hammer2_xop_start(&xop->head, hammer2_xop_readdir);
595 
596 	for (;;) {
597 		const hammer2_inode_data_t *ripdata;
598 
599 		error = hammer2_xop_collect(&xop->head, 0);
600 		if (error)
601 			break;
602 		if (cookie_index == ncookies)
603 			break;
604 		if (hammer2_debug & 0x0020)
605 		kprintf("cluster chain %p %p\n",
606 			xop->head.cluster.focus,
607 			(xop->head.cluster.focus ?
608 			 xop->head.cluster.focus->data : (void *)-1));
609 		ripdata = &hammer2_cluster_rdata(&xop->head.cluster)->ipdata;
610 		hammer2_cluster_bref(&xop->head.cluster, &bref);
611 		if (bref.type == HAMMER2_BREF_TYPE_INODE) {
612 			dtype = hammer2_get_dtype(ripdata);
613 			saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
614 			r = vop_write_dirent(&error, uio,
615 					     ripdata->meta.inum &
616 					      HAMMER2_DIRHASH_USERMSK,
617 					     dtype,
618 					     ripdata->meta.name_len,
619 					     ripdata->filename);
620 			if (r)
621 				break;
622 			if (cookies)
623 				cookies[cookie_index] = saveoff;
624 			++cookie_index;
625 		} else {
626 			/* XXX chain error */
627 			kprintf("bad chain type readdir %d\n", bref.type);
628 		}
629 	}
630 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
631 	if (error == ENOENT) {
632 		error = 0;
633 		eofflag = 1;
634 		saveoff = (hammer2_key_t)-1;
635 	} else {
636 		saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
637 	}
638 done:
639 	hammer2_inode_unlock(ip);
640 	if (ap->a_eofflag)
641 		*ap->a_eofflag = eofflag;
642 	if (hammer2_debug & 0x0020)
643 		kprintf("readdir: done at %016jx\n", saveoff);
644 	uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
645 	if (error && cookie_index == 0) {
646 		if (cookies) {
647 			kfree(cookies, M_TEMP);
648 			*ap->a_ncookies = 0;
649 			*ap->a_cookies = NULL;
650 		}
651 	} else {
652 		if (cookies) {
653 			*ap->a_ncookies = cookie_index;
654 			*ap->a_cookies = cookies;
655 		}
656 	}
657 	LOCKSTOP;
658 	return (error);
659 }
660 
661 /*
662  * hammer2_vop_readlink { vp, uio, cred }
663  */
664 static
665 int
666 hammer2_vop_readlink(struct vop_readlink_args *ap)
667 {
668 	struct vnode *vp;
669 	hammer2_inode_t *ip;
670 	int error;
671 
672 	vp = ap->a_vp;
673 	if (vp->v_type != VLNK)
674 		return (EINVAL);
675 	ip = VTOI(vp);
676 
677 	error = hammer2_read_file(ip, ap->a_uio, 0);
678 	return (error);
679 }
680 
681 static
682 int
683 hammer2_vop_read(struct vop_read_args *ap)
684 {
685 	struct vnode *vp;
686 	hammer2_inode_t *ip;
687 	struct uio *uio;
688 	int error;
689 	int seqcount;
690 	int bigread;
691 
692 	/*
693 	 * Read operations supported on this vnode?
694 	 */
695 	vp = ap->a_vp;
696 	if (vp->v_type != VREG)
697 		return (EINVAL);
698 
699 	/*
700 	 * Misc
701 	 */
702 	ip = VTOI(vp);
703 	uio = ap->a_uio;
704 	error = 0;
705 
706 	seqcount = ap->a_ioflag >> 16;
707 	bigread = (uio->uio_resid > 100 * 1024 * 1024);
708 
709 	error = hammer2_read_file(ip, uio, seqcount);
710 	return (error);
711 }
712 
713 static
714 int
715 hammer2_vop_write(struct vop_write_args *ap)
716 {
717 	hammer2_inode_t *ip;
718 	thread_t td;
719 	struct vnode *vp;
720 	struct uio *uio;
721 	int error;
722 	int seqcount;
723 
724 	/*
725 	 * Read operations supported on this vnode?
726 	 */
727 	vp = ap->a_vp;
728 	if (vp->v_type != VREG)
729 		return (EINVAL);
730 
731 	/*
732 	 * Misc
733 	 */
734 	ip = VTOI(vp);
735 	uio = ap->a_uio;
736 	error = 0;
737 	if (ip->pmp->ronly) {
738 		return (EROFS);
739 	}
740 
741 	seqcount = ap->a_ioflag >> 16;
742 
743 	/*
744 	 * Check resource limit
745 	 */
746 	if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
747 	    uio->uio_offset + uio->uio_resid >
748 	     td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
749 		lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
750 		return (EFBIG);
751 	}
752 
753 	/*
754 	 * The transaction interlocks against flushes initiations
755 	 * (note: but will run concurrently with the actual flush).
756 	 */
757 	hammer2_trans_init(ip->pmp, 0);
758 	error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount);
759 	hammer2_trans_done(ip->pmp);
760 
761 	return (error);
762 }
763 
764 /*
765  * Perform read operations on a file or symlink given an UNLOCKED
766  * inode and uio.
767  *
768  * The passed ip is not locked.
769  */
770 static
771 int
772 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
773 {
774 	hammer2_off_t size;
775 	struct buf *bp;
776 	int error;
777 
778 	error = 0;
779 
780 	/*
781 	 * UIO read loop.
782 	 *
783 	 * WARNING! Assumes that the kernel interlocks size changes at the
784 	 *	    vnode level.
785 	 */
786 	hammer2_mtx_sh(&ip->lock);
787 	hammer2_mtx_sh(&ip->truncate_lock);
788 	size = ip->meta.size;
789 	hammer2_mtx_unlock(&ip->lock);
790 
791 	while (uio->uio_resid > 0 && uio->uio_offset < size) {
792 		hammer2_key_t lbase;
793 		hammer2_key_t leof;
794 		int lblksize;
795 		int loff;
796 		int n;
797 
798 		lblksize = hammer2_calc_logical(ip, uio->uio_offset,
799 						&lbase, &leof);
800 
801 		error = cluster_read(ip->vp, leof, lbase, lblksize,
802 				     uio->uio_resid, seqcount * BKVASIZE,
803 				     &bp);
804 
805 		if (error)
806 			break;
807 		loff = (int)(uio->uio_offset - lbase);
808 		n = lblksize - loff;
809 		if (n > uio->uio_resid)
810 			n = uio->uio_resid;
811 		if (n > size - uio->uio_offset)
812 			n = (int)(size - uio->uio_offset);
813 		bp->b_flags |= B_AGE;
814 		uiomove((char *)bp->b_data + loff, n, uio);
815 		bqrelse(bp);
816 	}
817 	hammer2_mtx_unlock(&ip->truncate_lock);
818 
819 	return (error);
820 }
821 
822 /*
823  * Write to the file represented by the inode via the logical buffer cache.
824  * The inode may represent a regular file or a symlink.
825  *
826  * The inode must not be locked.
827  */
828 static
829 int
830 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
831 		   int ioflag, int seqcount)
832 {
833 	hammer2_key_t old_eof;
834 	hammer2_key_t new_eof;
835 	struct buf *bp;
836 	int kflags;
837 	int error;
838 	int modified;
839 
840 	/*
841 	 * Setup if append
842 	 *
843 	 * WARNING! Assumes that the kernel interlocks size changes at the
844 	 *	    vnode level.
845 	 */
846 	hammer2_mtx_ex(&ip->lock);
847 	hammer2_mtx_sh(&ip->truncate_lock);
848 	if (ioflag & IO_APPEND)
849 		uio->uio_offset = ip->meta.size;
850 	old_eof = ip->meta.size;
851 
852 	/*
853 	 * Extend the file if necessary.  If the write fails at some point
854 	 * we will truncate it back down to cover as much as we were able
855 	 * to write.
856 	 *
857 	 * Doing this now makes it easier to calculate buffer sizes in
858 	 * the loop.
859 	 */
860 	kflags = 0;
861 	error = 0;
862 	modified = 0;
863 
864 	if (uio->uio_offset + uio->uio_resid > old_eof) {
865 		new_eof = uio->uio_offset + uio->uio_resid;
866 		modified = 1;
867 		hammer2_extend_file(ip, new_eof);
868 		kflags |= NOTE_EXTEND;
869 	} else {
870 		new_eof = old_eof;
871 	}
872 	hammer2_mtx_unlock(&ip->lock);
873 
874 	/*
875 	 * UIO write loop
876 	 */
877 	while (uio->uio_resid > 0) {
878 		hammer2_key_t lbase;
879 		int trivial;
880 		int endofblk;
881 		int lblksize;
882 		int loff;
883 		int n;
884 
885 		/*
886 		 * Don't allow the buffer build to blow out the buffer
887 		 * cache.
888 		 */
889 		if ((ioflag & IO_RECURSE) == 0)
890 			bwillwrite(HAMMER2_PBUFSIZE);
891 
892 		/*
893 		 * This nominally tells us how much we can cluster and
894 		 * what the logical buffer size needs to be.  Currently
895 		 * we don't try to cluster the write and just handle one
896 		 * block at a time.
897 		 */
898 		lblksize = hammer2_calc_logical(ip, uio->uio_offset,
899 						&lbase, NULL);
900 		loff = (int)(uio->uio_offset - lbase);
901 
902 		KKASSERT(lblksize <= 65536);
903 
904 		/*
905 		 * Calculate bytes to copy this transfer and whether the
906 		 * copy completely covers the buffer or not.
907 		 */
908 		trivial = 0;
909 		n = lblksize - loff;
910 		if (n > uio->uio_resid) {
911 			n = uio->uio_resid;
912 			if (loff == lbase && uio->uio_offset + n == new_eof)
913 				trivial = 1;
914 			endofblk = 0;
915 		} else {
916 			if (loff == 0)
917 				trivial = 1;
918 			endofblk = 1;
919 		}
920 
921 		/*
922 		 * Get the buffer
923 		 */
924 		if (uio->uio_segflg == UIO_NOCOPY) {
925 			/*
926 			 * Issuing a write with the same data backing the
927 			 * buffer.  Instantiate the buffer to collect the
928 			 * backing vm pages, then read-in any missing bits.
929 			 *
930 			 * This case is used by vop_stdputpages().
931 			 */
932 			bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
933 			if ((bp->b_flags & B_CACHE) == 0) {
934 				bqrelse(bp);
935 				error = bread(ip->vp, lbase, lblksize, &bp);
936 			}
937 		} else if (trivial) {
938 			/*
939 			 * Even though we are entirely overwriting the buffer
940 			 * we may still have to zero it out to avoid a
941 			 * mmap/write visibility issue.
942 			 */
943 			bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
944 			if ((bp->b_flags & B_CACHE) == 0)
945 				vfs_bio_clrbuf(bp);
946 		} else {
947 			/*
948 			 * Partial overwrite, read in any missing bits then
949 			 * replace the portion being written.
950 			 *
951 			 * (The strategy code will detect zero-fill physical
952 			 * blocks for this case).
953 			 */
954 			error = bread(ip->vp, lbase, lblksize, &bp);
955 			if (error == 0)
956 				bheavy(bp);
957 		}
958 
959 		if (error) {
960 			brelse(bp);
961 			break;
962 		}
963 
964 		/*
965 		 * Ok, copy the data in
966 		 */
967 		error = uiomove(bp->b_data + loff, n, uio);
968 		kflags |= NOTE_WRITE;
969 		modified = 1;
970 		if (error) {
971 			brelse(bp);
972 			break;
973 		}
974 
975 		/*
976 		 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
977 		 *	    with IO_SYNC or IO_ASYNC set.  These writes
978 		 *	    must be handled as the pageout daemon expects.
979 		 *
980 		 * NOTE!    H2 relies on cluster_write() here because it
981 		 *	    cannot preallocate disk blocks at the logical
982 		 *	    level due to not knowing what the compression
983 		 *	    size will be at this time.
984 		 *
985 		 *	    We must use cluster_write() here and we depend
986 		 *	    on the write-behind feature to flush buffers
987 		 *	    appropriately.  If we let the buffer daemons do
988 		 *	    it the block allocations will be all over the
989 		 *	    map.
990 		 */
991 		if (ioflag & IO_SYNC) {
992 			bwrite(bp);
993 		} else if ((ioflag & IO_DIRECT) && endofblk) {
994 			bawrite(bp);
995 		} else if (ioflag & IO_ASYNC) {
996 			bawrite(bp);
997 		} else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
998 			bdwrite(bp);
999 		} else {
1000 			bp->b_flags |= B_CLUSTEROK;
1001 			cluster_write(bp, new_eof, lblksize, seqcount);
1002 		}
1003 	}
1004 
1005 	/*
1006 	 * Cleanup.  If we extended the file EOF but failed to write through
1007 	 * the entire write is a failure and we have to back-up.
1008 	 */
1009 	if (error && new_eof != old_eof) {
1010 		hammer2_mtx_unlock(&ip->truncate_lock);
1011 		hammer2_mtx_ex(&ip->lock);
1012 		hammer2_mtx_ex(&ip->truncate_lock);
1013 		hammer2_truncate_file(ip, old_eof);
1014 		if (ip->flags & HAMMER2_INODE_MODIFIED)
1015 			hammer2_inode_chain_sync(ip);
1016 		hammer2_mtx_unlock(&ip->lock);
1017 	} else if (modified) {
1018 		hammer2_mtx_ex(&ip->lock);
1019 		hammer2_inode_modify(ip);
1020 		hammer2_update_time(&ip->meta.mtime);
1021 		if (ip->flags & HAMMER2_INODE_MODIFIED)
1022 			hammer2_inode_chain_sync(ip);
1023 		hammer2_mtx_unlock(&ip->lock);
1024 		hammer2_knote(ip->vp, kflags);
1025 	}
1026 	hammer2_trans_assert_strategy(ip->pmp);
1027 	hammer2_mtx_unlock(&ip->truncate_lock);
1028 
1029 	return error;
1030 }
1031 
1032 /*
1033  * Truncate the size of a file.  The inode must not be locked.
1034  *
1035  * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1036  * ensure that any on-media data beyond the new file EOF has been destroyed.
1037  *
1038  * WARNING: nvtruncbuf() can only be safely called without the inode lock
1039  *	    held due to the way our write thread works.  If the truncation
1040  *	    occurs in the middle of a buffer, nvtruncbuf() is responsible
1041  *	    for dirtying that buffer and zeroing out trailing bytes.
1042  *
1043  * WARNING! Assumes that the kernel interlocks size changes at the
1044  *	    vnode level.
1045  *
1046  * WARNING! Caller assumes responsibility for removing dead blocks
1047  *	    if INODE_RESIZED is set.
1048  */
1049 static
1050 void
1051 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1052 {
1053 	hammer2_key_t lbase;
1054 	int nblksize;
1055 
1056 	LOCKSTART;
1057 	hammer2_mtx_unlock(&ip->lock);
1058 	if (ip->vp) {
1059 		nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1060 		nvtruncbuf(ip->vp, nsize,
1061 			   nblksize, (int)nsize & (nblksize - 1),
1062 			   0);
1063 	}
1064 	hammer2_mtx_ex(&ip->lock);
1065 	KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1066 	ip->osize = ip->meta.size;
1067 	ip->meta.size = nsize;
1068 	atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1069 	hammer2_inode_modify(ip);
1070 	LOCKSTOP;
1071 }
1072 
1073 /*
1074  * Extend the size of a file.  The inode must not be locked.
1075  *
1076  * Even though the file size is changing, we do not have to set the
1077  * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1078  * boundary.  When this occurs a hammer2_inode_chain_sync() is required
1079  * to prepare the inode cluster's indirect block table, otherwise
1080  * async execution of the strategy code will implode on us.
1081  *
1082  * WARNING! Assumes that the kernel interlocks size changes at the
1083  *	    vnode level.
1084  *
1085  * WARNING! Caller assumes responsibility for transitioning out
1086  *	    of the inode DIRECTDATA mode if INODE_RESIZED is set.
1087  */
1088 static
1089 void
1090 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1091 {
1092 	hammer2_key_t lbase;
1093 	hammer2_key_t osize;
1094 	int oblksize;
1095 	int nblksize;
1096 
1097 	LOCKSTART;
1098 
1099 	KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1100 	hammer2_inode_modify(ip);
1101 	osize = ip->meta.size;
1102 	ip->osize = osize;
1103 	ip->meta.size = nsize;
1104 
1105 	if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1106 		atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1107 		hammer2_inode_chain_sync(ip);
1108 	}
1109 
1110 	hammer2_mtx_unlock(&ip->lock);
1111 	if (ip->vp) {
1112 		oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1113 		nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1114 		nvextendbuf(ip->vp,
1115 			    osize, nsize,
1116 			    oblksize, nblksize,
1117 			    -1, -1, 0);
1118 	}
1119 	hammer2_mtx_ex(&ip->lock);
1120 
1121 	LOCKSTOP;
1122 }
1123 
1124 static
1125 int
1126 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1127 {
1128 	hammer2_xop_nresolve_t *xop;
1129 	hammer2_inode_t *ip;
1130 	hammer2_inode_t *dip;
1131 	struct namecache *ncp;
1132 	struct vnode *vp;
1133 	int error;
1134 
1135 	LOCKSTART;
1136 	dip = VTOI(ap->a_dvp);
1137 	xop = hammer2_xop_alloc(dip, 0);
1138 
1139 	ncp = ap->a_nch->ncp;
1140 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1141 
1142 	/*
1143 	 * Note: In DragonFly the kernel handles '.' and '..'.
1144 	 */
1145 	hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1146 	hammer2_xop_start(&xop->head, hammer2_xop_nresolve);
1147 
1148 	error = hammer2_xop_collect(&xop->head, 0);
1149 	if (error) {
1150 		ip = NULL;
1151 	} else {
1152 		ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1153 	}
1154 	hammer2_inode_unlock(dip);
1155 
1156 	/*
1157 	 * Acquire the related vnode
1158 	 *
1159 	 * NOTE: For error processing, only ENOENT resolves the namecache
1160 	 *	 entry to NULL, otherwise we just return the error and
1161 	 *	 leave the namecache unresolved.
1162 	 *
1163 	 * NOTE: multiple hammer2_inode structures can be aliased to the
1164 	 *	 same chain element, for example for hardlinks.  This
1165 	 *	 use case does not 'reattach' inode associations that
1166 	 *	 might already exist, but always allocates a new one.
1167 	 *
1168 	 * WARNING: inode structure is locked exclusively via inode_get
1169 	 *	    but chain was locked shared.  inode_unlock()
1170 	 *	    will handle it properly.
1171 	 */
1172 	if (ip) {
1173 		vp = hammer2_igetv(ip, &error);
1174 		if (error == 0) {
1175 			vn_unlock(vp);
1176 			cache_setvp(ap->a_nch, vp);
1177 		} else if (error == ENOENT) {
1178 			cache_setvp(ap->a_nch, NULL);
1179 		}
1180 		hammer2_inode_unlock(ip);
1181 
1182 		/*
1183 		 * The vp should not be released until after we've disposed
1184 		 * of our locks, because it might cause vop_inactive() to
1185 		 * be called.
1186 		 */
1187 		if (vp)
1188 			vrele(vp);
1189 	} else {
1190 		error = ENOENT;
1191 		cache_setvp(ap->a_nch, NULL);
1192 	}
1193 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1194 	KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1195 		("resolve error %d/%p ap %p\n",
1196 		 error, ap->a_nch->ncp->nc_vp, ap));
1197 	LOCKSTOP;
1198 
1199 	return error;
1200 }
1201 
1202 static
1203 int
1204 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1205 {
1206 	hammer2_inode_t *dip;
1207 	hammer2_inode_t *ip;
1208 	int error;
1209 
1210 	LOCKSTART;
1211 	dip = VTOI(ap->a_dvp);
1212 
1213 	if ((ip = dip->pip) == NULL) {
1214 		*ap->a_vpp = NULL;
1215 		LOCKSTOP;
1216 		return ENOENT;
1217 	}
1218 	hammer2_inode_lock(ip, 0);
1219 	*ap->a_vpp = hammer2_igetv(ip, &error);
1220 	hammer2_inode_unlock(ip);
1221 
1222 	LOCKSTOP;
1223 	return error;
1224 }
1225 
1226 static
1227 int
1228 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1229 {
1230 	hammer2_inode_t *dip;
1231 	hammer2_inode_t *nip;
1232 	struct namecache *ncp;
1233 	const uint8_t *name;
1234 	size_t name_len;
1235 	int error;
1236 
1237 	LOCKSTART;
1238 	dip = VTOI(ap->a_dvp);
1239 	if (dip->pmp->ronly) {
1240 		LOCKSTOP;
1241 		return (EROFS);
1242 	}
1243 
1244 	ncp = ap->a_nch->ncp;
1245 	name = ncp->nc_name;
1246 	name_len = ncp->nc_nlen;
1247 
1248 	hammer2_pfs_memory_wait(dip->pmp);
1249 	hammer2_trans_init(dip->pmp, 0);
1250 	nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1251 				   name, name_len, 0,
1252 				   hammer2_trans_newinum(dip->pmp), 0, 0,
1253 				   0, &error);
1254 	if (error) {
1255 		KKASSERT(nip == NULL);
1256 		*ap->a_vpp = NULL;
1257 	} else {
1258 		*ap->a_vpp = hammer2_igetv(nip, &error);
1259 		hammer2_inode_unlock(nip);
1260 	}
1261 	hammer2_trans_done(dip->pmp);
1262 
1263 	if (error == 0) {
1264 		cache_setunresolved(ap->a_nch);
1265 		cache_setvp(ap->a_nch, *ap->a_vpp);
1266 	}
1267 	LOCKSTOP;
1268 	return error;
1269 }
1270 
1271 static
1272 int
1273 hammer2_vop_open(struct vop_open_args *ap)
1274 {
1275 	return vop_stdopen(ap);
1276 }
1277 
1278 /*
1279  * hammer2_vop_advlock { vp, id, op, fl, flags }
1280  */
1281 static
1282 int
1283 hammer2_vop_advlock(struct vop_advlock_args *ap)
1284 {
1285 	hammer2_inode_t *ip = VTOI(ap->a_vp);
1286 	hammer2_off_t size;
1287 
1288 	size = ip->meta.size;
1289 	return (lf_advlock(ap, &ip->advlock, size));
1290 }
1291 
1292 static
1293 int
1294 hammer2_vop_close(struct vop_close_args *ap)
1295 {
1296 	return vop_stdclose(ap);
1297 }
1298 
1299 /*
1300  * hammer2_vop_nlink { nch, dvp, vp, cred }
1301  *
1302  * Create a hardlink from (vp) to {dvp, nch}.
1303  */
1304 static
1305 int
1306 hammer2_vop_nlink(struct vop_nlink_args *ap)
1307 {
1308 	hammer2_xop_nlink_t *xop1;
1309 	hammer2_inode_t *fdip;	/* target directory to create link in */
1310 	hammer2_inode_t *tdip;	/* target directory to create link in */
1311 	hammer2_inode_t *cdip;	/* common parent directory */
1312 	hammer2_inode_t *ip;	/* inode we are hardlinking to */
1313 	struct namecache *ncp;
1314 	const uint8_t *name;
1315 	size_t name_len;
1316 	int nlink_locked;
1317 	int error;
1318 
1319 	LOCKSTART;
1320 	tdip = VTOI(ap->a_dvp);
1321 	if (tdip->pmp->ronly) {
1322 		LOCKSTOP;
1323 		return (EROFS);
1324 	}
1325 
1326 	ncp = ap->a_nch->ncp;
1327 	name = ncp->nc_name;
1328 	name_len = ncp->nc_nlen;
1329 
1330 	/*
1331 	 * ip represents the file being hardlinked.  The file could be a
1332 	 * normal file or a hardlink target if it has already been hardlinked.
1333 	 * If ip is a hardlinked target then ip->pip represents the location
1334 	 * of the hardlinked target, NOT the location of the hardlink pointer.
1335 	 *
1336 	 * Bump nlinks and potentially also create or move the hardlink
1337 	 * target in the parent directory common to (ip) and (tdip).  The
1338 	 * consolidation code can modify ip->cluster and ip->pip.  The
1339 	 * returned cluster is locked.
1340 	 */
1341 	ip = VTOI(ap->a_vp);
1342 	hammer2_pfs_memory_wait(ip->pmp);
1343 	hammer2_trans_init(ip->pmp, 0);
1344 
1345 	/*
1346 	 * The common parent directory must be locked first to avoid deadlocks.
1347 	 * Also note that fdip and/or tdip might match cdip.
1348 	 *
1349 	 * WARNING!  The kernel's namecache locks are insufficient for
1350 	 *	     protecting us from hardlink shifts, since unrelated
1351 	 *	     rename() or link() calls on parent directories might
1352 	 *	     cause a shift.  A PFS-wide lock is required for this
1353 	 *	     situation.
1354 	 */
1355 	if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY ||
1356 	    (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) {
1357 		lockmgr(&ip->pmp->lock_nlink, LK_EXCLUSIVE);
1358 		nlink_locked = 1;
1359 	} else {
1360 		nlink_locked = 0;
1361 	}
1362 	fdip = ip->pip;
1363 	error = 0;
1364 
1365 	/*
1366 	 * Can return NULL and error == EXDEV if the common parent
1367 	 * crosses a directory with the xlink flag set.
1368 	 */
1369 	cdip = hammer2_inode_common_parent(fdip, tdip, &error, 1);
1370 	if (cdip)
1371 		hammer2_inode_lock(cdip, 0);
1372 	hammer2_inode_lock(fdip, 0);
1373 	hammer2_inode_lock(tdip, 0);
1374 	hammer2_inode_lock(ip, 0);
1375 
1376 	/*
1377 	 * Dispatch xop_nlink unconditionally since we have to update nlinks.
1378 	 *
1379 	 * Otherwise we'd be able to avoid the XOP if the ip does not have
1380 	 * to be converted or moved.
1381 	 * If ip is not a hardlink target we must convert it to a hardlink.
1382 	 * If fdip != cdip we must shift the inode to cdip.
1383 	 *
1384 	 * XXX this and other nlink update usage should be passed top-down
1385 	 *     and not updated with a delta bottom-up.
1386 	 */
1387 #if 0
1388 	if (fdip != cdip || (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE))
1389 #endif
1390 	if (error == 0) {
1391 		xop1 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
1392 		hammer2_xop_setip2(&xop1->head, ip);
1393 		hammer2_xop_setip3(&xop1->head, cdip);
1394 		xop1->nlinks_delta = 1;
1395 
1396 		hammer2_xop_start(&xop1->head, hammer2_xop_nlink);
1397 		error = hammer2_xop_collect(&xop1->head, 0);
1398 		hammer2_xop_retire(&xop1->head, HAMMER2_XOPMASK_VOP);
1399 		if (error == ENOENT)
1400 			error = 0;
1401 	}
1402 
1403 	/*
1404 	 * Must synchronize original inode whos chains are now a hardlink
1405 	 * target.  We must match what the backend XOP did to the
1406 	 * chains.
1407 	 */
1408 	if (error == 0 && (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
1409 		hammer2_inode_modify(ip);
1410 		ip->meta.name_key = ip->meta.inum;
1411 		ip->meta.name_len = 18;	/* "0x%016jx" */
1412 	}
1413 
1414 	/*
1415 	 * Create the hardlink target and bump nlinks.
1416 	 */
1417 	if (error == 0) {
1418 		hammer2_inode_create(tdip, NULL, NULL,
1419 				     name, name_len, 0,
1420 				     ip->meta.inum,
1421 				     HAMMER2_OBJTYPE_HARDLINK, ip->meta.type,
1422 				     0, &error);
1423 		hammer2_inode_modify(ip);
1424 		++ip->meta.nlinks;
1425 	}
1426 	if (error == 0) {
1427 		cache_setunresolved(ap->a_nch);
1428 		cache_setvp(ap->a_nch, ap->a_vp);
1429 	}
1430 	hammer2_inode_unlock(ip);
1431 	hammer2_inode_unlock(tdip);
1432 	hammer2_inode_unlock(fdip);
1433 	if (cdip) {
1434 		hammer2_inode_unlock(cdip);
1435 		hammer2_inode_drop(cdip);
1436 	}
1437 
1438 	if (nlink_locked)
1439 		lockmgr(&ip->pmp->lock_nlink, LK_RELEASE);
1440 	hammer2_trans_done(ip->pmp);
1441 
1442 	LOCKSTOP;
1443 	return error;
1444 }
1445 
1446 /*
1447  * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1448  *
1449  * The operating system has already ensured that the directory entry
1450  * does not exist and done all appropriate namespace locking.
1451  */
1452 static
1453 int
1454 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1455 {
1456 	hammer2_inode_t *dip;
1457 	hammer2_inode_t *nip;
1458 	struct namecache *ncp;
1459 	const uint8_t *name;
1460 	size_t name_len;
1461 	int error;
1462 
1463 	LOCKSTART;
1464 	dip = VTOI(ap->a_dvp);
1465 	if (dip->pmp->ronly) {
1466 		LOCKSTOP;
1467 		return (EROFS);
1468 	}
1469 
1470 	ncp = ap->a_nch->ncp;
1471 	name = ncp->nc_name;
1472 	name_len = ncp->nc_nlen;
1473 	hammer2_pfs_memory_wait(dip->pmp);
1474 	hammer2_trans_init(dip->pmp, 0);
1475 
1476 	nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1477 				   name, name_len, 0,
1478 				   hammer2_trans_newinum(dip->pmp), 0, 0,
1479 				   0, &error);
1480 	if (error) {
1481 		KKASSERT(nip == NULL);
1482 		*ap->a_vpp = NULL;
1483 	} else {
1484 		*ap->a_vpp = hammer2_igetv(nip, &error);
1485 		hammer2_inode_unlock(nip);
1486 	}
1487 	hammer2_trans_done(dip->pmp);
1488 
1489 	if (error == 0) {
1490 		cache_setunresolved(ap->a_nch);
1491 		cache_setvp(ap->a_nch, *ap->a_vpp);
1492 	}
1493 	LOCKSTOP;
1494 	return error;
1495 }
1496 
1497 /*
1498  * Make a device node (typically a fifo)
1499  */
1500 static
1501 int
1502 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1503 {
1504 	hammer2_inode_t *dip;
1505 	hammer2_inode_t *nip;
1506 	struct namecache *ncp;
1507 	const uint8_t *name;
1508 	size_t name_len;
1509 	int error;
1510 
1511 	LOCKSTART;
1512 	dip = VTOI(ap->a_dvp);
1513 	if (dip->pmp->ronly) {
1514 		LOCKSTOP;
1515 		return (EROFS);
1516 	}
1517 
1518 	ncp = ap->a_nch->ncp;
1519 	name = ncp->nc_name;
1520 	name_len = ncp->nc_nlen;
1521 	hammer2_pfs_memory_wait(dip->pmp);
1522 	hammer2_trans_init(dip->pmp, 0);
1523 
1524 	nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1525 				   name, name_len, 0,
1526 				   hammer2_trans_newinum(dip->pmp), 0, 0,
1527 				   0, &error);
1528 	if (error) {
1529 		KKASSERT(nip == NULL);
1530 		*ap->a_vpp = NULL;
1531 	} else {
1532 		*ap->a_vpp = hammer2_igetv(nip, &error);
1533 		hammer2_inode_unlock(nip);
1534 	}
1535 	hammer2_trans_done(dip->pmp);
1536 
1537 	if (error == 0) {
1538 		cache_setunresolved(ap->a_nch);
1539 		cache_setvp(ap->a_nch, *ap->a_vpp);
1540 	}
1541 	LOCKSTOP;
1542 	return error;
1543 }
1544 
1545 /*
1546  * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1547  */
1548 static
1549 int
1550 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1551 {
1552 	hammer2_inode_t *dip;
1553 	hammer2_inode_t *nip;
1554 	struct namecache *ncp;
1555 	const uint8_t *name;
1556 	size_t name_len;
1557 	int error;
1558 
1559 	dip = VTOI(ap->a_dvp);
1560 	if (dip->pmp->ronly)
1561 		return (EROFS);
1562 
1563 	ncp = ap->a_nch->ncp;
1564 	name = ncp->nc_name;
1565 	name_len = ncp->nc_nlen;
1566 	hammer2_pfs_memory_wait(dip->pmp);
1567 	hammer2_trans_init(dip->pmp, 0);
1568 
1569 	ap->a_vap->va_type = VLNK;	/* enforce type */
1570 
1571 	nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred,
1572 				   name, name_len, 0,
1573 				   hammer2_trans_newinum(dip->pmp), 0, 0,
1574 				   0, &error);
1575 	if (error) {
1576 		KKASSERT(nip == NULL);
1577 		*ap->a_vpp = NULL;
1578 		hammer2_trans_done(dip->pmp);
1579 		return error;
1580 	}
1581 	*ap->a_vpp = hammer2_igetv(nip, &error);
1582 
1583 	/*
1584 	 * Build the softlink (~like file data) and finalize the namecache.
1585 	 */
1586 	if (error == 0) {
1587 		size_t bytes;
1588 		struct uio auio;
1589 		struct iovec aiov;
1590 
1591 		bytes = strlen(ap->a_target);
1592 
1593 		hammer2_inode_unlock(nip);
1594 		bzero(&auio, sizeof(auio));
1595 		bzero(&aiov, sizeof(aiov));
1596 		auio.uio_iov = &aiov;
1597 		auio.uio_segflg = UIO_SYSSPACE;
1598 		auio.uio_rw = UIO_WRITE;
1599 		auio.uio_resid = bytes;
1600 		auio.uio_iovcnt = 1;
1601 		auio.uio_td = curthread;
1602 		aiov.iov_base = ap->a_target;
1603 		aiov.iov_len = bytes;
1604 		error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1605 		/* XXX handle error */
1606 		error = 0;
1607 	} else {
1608 		hammer2_inode_unlock(nip);
1609 	}
1610 	hammer2_trans_done(dip->pmp);
1611 
1612 	/*
1613 	 * Finalize namecache
1614 	 */
1615 	if (error == 0) {
1616 		cache_setunresolved(ap->a_nch);
1617 		cache_setvp(ap->a_nch, *ap->a_vpp);
1618 		/* hammer2_knote(ap->a_dvp, NOTE_WRITE); */
1619 	}
1620 	return error;
1621 }
1622 
1623 /*
1624  * hammer2_vop_nremove { nch, dvp, cred }
1625  */
1626 static
1627 int
1628 hammer2_vop_nremove(struct vop_nremove_args *ap)
1629 {
1630 	hammer2_xop_unlink_t *xop;
1631 	hammer2_inode_t *dip;
1632 	hammer2_inode_t *ip;
1633 	struct namecache *ncp;
1634 	int error;
1635 	int isopen;
1636 
1637 	LOCKSTART;
1638 	dip = VTOI(ap->a_dvp);
1639 	if (dip->pmp->ronly) {
1640 		LOCKSTOP;
1641 		return(EROFS);
1642 	}
1643 
1644 	ncp = ap->a_nch->ncp;
1645 
1646 	hammer2_pfs_memory_wait(dip->pmp);
1647 	hammer2_trans_init(dip->pmp, 0);
1648 	hammer2_inode_lock(dip, 0);
1649 
1650 	/*
1651 	 * The unlink XOP unlinks the path from the directory and
1652 	 * locates and returns the cluster associated with the real inode.
1653 	 * We have to handle nlinks here on the frontend.
1654 	 */
1655 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1656 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1657 	isopen = cache_isopen(ap->a_nch);
1658 	xop->isdir = 0;
1659 	xop->dopermanent = isopen ?  0 : HAMMER2_DELETE_PERMANENT;
1660 	hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1661 
1662 	/*
1663 	 * Collect the real inode and adjust nlinks, destroy the real
1664 	 * inode if nlinks transitions to 0 and it was the real inode
1665 	 * (else it has already been removed).
1666 	 */
1667 	error = hammer2_xop_collect(&xop->head, 0);
1668 	hammer2_inode_unlock(dip);
1669 
1670 	if (error == 0) {
1671 		ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1672 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1673 		if (ip) {
1674 			hammer2_inode_unlink_finisher(ip, isopen);
1675 			hammer2_inode_unlock(ip);
1676 		}
1677 	} else {
1678 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1679 	}
1680 
1681 	hammer2_inode_run_sideq(dip->pmp);
1682 	hammer2_trans_done(dip->pmp);
1683 	if (error == 0)
1684 		cache_unlink(ap->a_nch);
1685 	LOCKSTOP;
1686 	return (error);
1687 }
1688 
1689 /*
1690  * hammer2_vop_nrmdir { nch, dvp, cred }
1691  */
1692 static
1693 int
1694 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1695 {
1696 	hammer2_xop_unlink_t *xop;
1697 	hammer2_inode_t *dip;
1698 	hammer2_inode_t *ip;
1699 	struct namecache *ncp;
1700 	int isopen;
1701 	int error;
1702 
1703 	LOCKSTART;
1704 	dip = VTOI(ap->a_dvp);
1705 	if (dip->pmp->ronly) {
1706 		LOCKSTOP;
1707 		return(EROFS);
1708 	}
1709 
1710 	hammer2_pfs_memory_wait(dip->pmp);
1711 	hammer2_trans_init(dip->pmp, 0);
1712 	hammer2_inode_lock(dip, 0);
1713 
1714 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1715 
1716 	ncp = ap->a_nch->ncp;
1717 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1718 	isopen = cache_isopen(ap->a_nch);
1719 	xop->isdir = 1;
1720 	xop->dopermanent = isopen ?  0 : HAMMER2_DELETE_PERMANENT;
1721 	hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1722 
1723 	/*
1724 	 * Collect the real inode and adjust nlinks, destroy the real
1725 	 * inode if nlinks transitions to 0 and it was the real inode
1726 	 * (else it has already been removed).
1727 	 */
1728 	error = hammer2_xop_collect(&xop->head, 0);
1729 	hammer2_inode_unlock(dip);
1730 
1731 	if (error == 0) {
1732 		ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1733 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1734 		if (ip) {
1735 			hammer2_inode_unlink_finisher(ip, isopen);
1736 			hammer2_inode_unlock(ip);
1737 		}
1738 	} else {
1739 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1740 	}
1741 	hammer2_inode_run_sideq(dip->pmp);
1742 	hammer2_trans_done(dip->pmp);
1743 	if (error == 0)
1744 		cache_unlink(ap->a_nch);
1745 	LOCKSTOP;
1746 	return (error);
1747 }
1748 
1749 /*
1750  * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1751  */
1752 static
1753 int
1754 hammer2_vop_nrename(struct vop_nrename_args *ap)
1755 {
1756 	struct namecache *fncp;
1757 	struct namecache *tncp;
1758 	hammer2_inode_t *cdip;
1759 	hammer2_inode_t *fdip;
1760 	hammer2_inode_t *tdip;
1761 	hammer2_inode_t *ip;
1762 	const uint8_t *fname;
1763 	size_t fname_len;
1764 	const uint8_t *tname;
1765 	size_t tname_len;
1766 	int error;
1767 	int tnch_error;
1768 	int nlink_locked;
1769 	hammer2_key_t tlhc;
1770 
1771 	if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1772 		return(EXDEV);
1773 	if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1774 		return(EXDEV);
1775 
1776 	fdip = VTOI(ap->a_fdvp);	/* source directory */
1777 	tdip = VTOI(ap->a_tdvp);	/* target directory */
1778 
1779 	if (fdip->pmp->ronly)
1780 		return(EROFS);
1781 
1782 	LOCKSTART;
1783 	fncp = ap->a_fnch->ncp;		/* entry name in source */
1784 	fname = fncp->nc_name;
1785 	fname_len = fncp->nc_nlen;
1786 
1787 	tncp = ap->a_tnch->ncp;		/* entry name in target */
1788 	tname = tncp->nc_name;
1789 	tname_len = tncp->nc_nlen;
1790 
1791 	hammer2_pfs_memory_wait(tdip->pmp);
1792 	hammer2_trans_init(tdip->pmp, 0);
1793 
1794 	/*
1795 	 * ip is the inode being renamed.  If this is a hardlink then
1796 	 * ip represents the actual file and not the hardlink marker.
1797 	 */
1798 	ip = VTOI(fncp->nc_vp);
1799 
1800 	/*
1801 	 * The common parent directory must be locked first to avoid deadlocks.
1802 	 * Also note that fdip and/or tdip might match cdip.
1803 	 *
1804 	 * WARNING!  The kernel's namecache locks are insufficient for
1805 	 *	     protecting us from hardlink shifts, since unrelated
1806 	 *	     rename() or link() calls on parent directories might
1807 	 *	     cause a shift.  A PFS-wide lock is required for this
1808 	 *	     situation.
1809 	 */
1810 	if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY ||
1811 	    (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) {
1812 		lockmgr(&ip->pmp->lock_nlink, LK_EXCLUSIVE);
1813 		nlink_locked = 1;
1814 	} else {
1815 		nlink_locked = 0;
1816 	}
1817 
1818 	/*
1819 	 * Can return NULL and error == EXDEV if the common parent
1820 	 * crosses a directory with the xlink flag set.
1821 	 */
1822 	error = 0;
1823 	cdip = hammer2_inode_common_parent(ip->pip, tdip, &error, 0);
1824 	if (cdip == NULL) {
1825 		tnch_error = error;
1826 		goto done3;
1827 	}
1828 	hammer2_inode_lock(cdip, 0);
1829 	hammer2_inode_lock(fdip, 0);
1830 	hammer2_inode_lock(tdip, 0);
1831 	hammer2_inode_ref(ip);		/* extra ref */
1832 
1833 	/*
1834 	 * If ip is a hardlink target and fdip != cdip we must shift the
1835 	 * inode to cdip.
1836 	 */
1837 	hammer2_inode_lock(ip, 0);
1838 
1839 	if (fdip != cdip &&
1840 	    (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) {
1841 		hammer2_xop_nlink_t *xop1;
1842 
1843 		xop1 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
1844 		hammer2_xop_setip2(&xop1->head, ip);
1845 		hammer2_xop_setip3(&xop1->head, cdip);
1846 		xop1->nlinks_delta = 0;
1847 
1848 		hammer2_xop_start(&xop1->head, hammer2_xop_nlink);
1849 		error = hammer2_xop_collect(&xop1->head, 0);
1850 		hammer2_xop_retire(&xop1->head, HAMMER2_XOPMASK_VOP);
1851 	}
1852 	/* hammer2_inode_unlock(ip); */
1853 
1854 	/*
1855 	 * Delete the target namespace.
1856 	 */
1857 	{
1858 		hammer2_xop_unlink_t *xop2;
1859 		hammer2_inode_t *tip;
1860 		int isopen;
1861 
1862 		/*
1863 		 * The unlink XOP unlinks the path from the directory and
1864 		 * locates and returns the cluster associated with the real
1865 		 * inode.  We have to handle nlinks here on the frontend.
1866 		 */
1867 		xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
1868 		hammer2_xop_setname(&xop2->head, tname, tname_len);
1869 		isopen = cache_isopen(ap->a_tnch);
1870 		xop2->isdir = -1;
1871 		xop2->dopermanent = isopen ?  0 : HAMMER2_DELETE_PERMANENT;
1872 		hammer2_xop_start(&xop2->head, hammer2_xop_unlink);
1873 
1874 		/*
1875 		 * Collect the real inode and adjust nlinks, destroy the real
1876 		 * inode if nlinks transitions to 0 and it was the real inode
1877 		 * (else it has already been removed).
1878 		 */
1879 		tnch_error = hammer2_xop_collect(&xop2->head, 0);
1880 		/* hammer2_inode_unlock(tdip); */
1881 
1882 		if (tnch_error == 0) {
1883 			tip = hammer2_inode_get(tdip->pmp, NULL,
1884 						&xop2->head.cluster, -1);
1885 			hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
1886 			if (tip) {
1887 				hammer2_inode_unlink_finisher(tip, isopen);
1888 				hammer2_inode_unlock(tip);
1889 			}
1890 		} else {
1891 			hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
1892 		}
1893 		/* hammer2_inode_lock(tdip, 0); */
1894 
1895 		if (tnch_error && tnch_error != ENOENT) {
1896 			error = tnch_error;
1897 			goto done2;
1898 		}
1899 	}
1900 
1901 	/*
1902 	 * Resolve the collision space for (tdip, tname, tname_len)
1903 	 *
1904 	 * tdip must be held exclusively locked to prevent races.
1905 	 */
1906 	{
1907 		hammer2_xop_scanlhc_t *sxop;
1908 		hammer2_tid_t lhcbase;
1909 
1910 		tlhc = hammer2_dirhash(tname, tname_len);
1911 		lhcbase = tlhc;
1912 		sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
1913 		sxop->lhc = tlhc;
1914 		hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
1915 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
1916 			if (tlhc != sxop->head.cluster.focus->bref.key)
1917 				break;
1918 			++tlhc;
1919 		}
1920 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
1921 
1922 		if (error) {
1923 			if (error != ENOENT)
1924 				goto done2;
1925 			++tlhc;
1926 			error = 0;
1927 		}
1928 		if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
1929 			error = ENOSPC;
1930 			goto done2;
1931 		}
1932 	}
1933 
1934 	/*
1935 	 * Everything is setup, do the rename.
1936 	 *
1937 	 * We have to synchronize ip->meta to the underlying operation.
1938 	 *
1939 	 * NOTE: To avoid deadlocks we cannot lock (ip) while we are
1940 	 *	 unlinking elements from their directories.  Locking
1941 	 *	 the nlinks field does not lock the whole inode.
1942 	 */
1943 	/* hammer2_inode_lock(ip, 0); */
1944 	if (error == 0) {
1945 		hammer2_xop_nrename_t *xop4;
1946 
1947 		xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
1948 		xop4->lhc = tlhc;
1949 		xop4->ip_key = ip->meta.name_key;
1950 		hammer2_xop_setip2(&xop4->head, ip);
1951 		hammer2_xop_setip3(&xop4->head, tdip);
1952 		hammer2_xop_setname(&xop4->head, fname, fname_len);
1953 		hammer2_xop_setname2(&xop4->head, tname, tname_len);
1954 		hammer2_xop_start(&xop4->head, hammer2_xop_nrename);
1955 
1956 		error = hammer2_xop_collect(&xop4->head, 0);
1957 		hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
1958 
1959 		if (error == ENOENT)
1960 			error = 0;
1961 		if (error == 0 &&
1962 		    (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
1963 			hammer2_inode_modify(ip);
1964 			ip->meta.name_len = tname_len;
1965 			ip->meta.name_key = tlhc;
1966 
1967 		}
1968 	}
1969 
1970 	/*
1971 	 * Fixup ip->pip if we were renaming the actual file and not a
1972 	 * hardlink pointer.
1973 	 */
1974 	if (error == 0 && (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
1975 		hammer2_inode_t *opip;
1976 
1977 		if (ip->pip != tdip) {
1978 			hammer2_inode_ref(tdip);
1979 			opip = ip->pip;
1980 			ip->pip = tdip;
1981 			if (opip)
1982 				hammer2_inode_drop(opip);
1983 		}
1984 	}
1985 done2:
1986 	hammer2_inode_unlock(ip);
1987 	hammer2_inode_unlock(tdip);
1988 	hammer2_inode_unlock(fdip);
1989 	hammer2_inode_unlock(cdip);
1990 	hammer2_inode_drop(ip);
1991 	hammer2_inode_drop(cdip);
1992 done3:
1993 	hammer2_inode_run_sideq(fdip->pmp);
1994 
1995 	if (nlink_locked)
1996 		lockmgr(&ip->pmp->lock_nlink, LK_RELEASE);
1997 	hammer2_trans_done(tdip->pmp);
1998 
1999 	/*
2000 	 * Issue the namecache update after unlocking all the internal
2001 	 * hammer structures, otherwise we might deadlock.
2002 	 */
2003 	if (tnch_error == 0) {
2004 		cache_unlink(ap->a_tnch);
2005 		cache_setunresolved(ap->a_tnch);
2006 	}
2007 	if (error == 0)
2008 		cache_rename(ap->a_fnch, ap->a_tnch);
2009 
2010 	LOCKSTOP;
2011 	return (error);
2012 }
2013 
2014 /*
2015  * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2016  */
2017 static
2018 int
2019 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2020 {
2021 	hammer2_inode_t *ip;
2022 	int error;
2023 
2024 	LOCKSTART;
2025 	ip = VTOI(ap->a_vp);
2026 
2027 	error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2028 			      ap->a_fflag, ap->a_cred);
2029 	LOCKSTOP;
2030 	return (error);
2031 }
2032 
2033 static
2034 int
2035 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2036 {
2037 	struct mount *mp;
2038 	hammer2_pfs_t *pmp;
2039 	int rc;
2040 
2041 	LOCKSTART;
2042 	switch (ap->a_op) {
2043 	case (MOUNTCTL_SET_EXPORT):
2044 		mp = ap->a_head.a_ops->head.vv_mount;
2045 		pmp = MPTOPMP(mp);
2046 
2047 		if (ap->a_ctllen != sizeof(struct export_args))
2048 			rc = (EINVAL);
2049 		else
2050 			rc = vfs_export(mp, &pmp->export,
2051 					(const struct export_args *)ap->a_ctl);
2052 		break;
2053 	default:
2054 		rc = vop_stdmountctl(ap);
2055 		break;
2056 	}
2057 	LOCKSTOP;
2058 	return (rc);
2059 }
2060 
2061 /*
2062  * KQFILTER
2063  */
2064 static void filt_hammer2detach(struct knote *kn);
2065 static int filt_hammer2read(struct knote *kn, long hint);
2066 static int filt_hammer2write(struct knote *kn, long hint);
2067 static int filt_hammer2vnode(struct knote *kn, long hint);
2068 
2069 static struct filterops hammer2read_filtops =
2070 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2071 	  NULL, filt_hammer2detach, filt_hammer2read };
2072 static struct filterops hammer2write_filtops =
2073 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2074 	  NULL, filt_hammer2detach, filt_hammer2write };
2075 static struct filterops hammer2vnode_filtops =
2076 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2077 	  NULL, filt_hammer2detach, filt_hammer2vnode };
2078 
2079 static
2080 int
2081 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2082 {
2083 	struct vnode *vp = ap->a_vp;
2084 	struct knote *kn = ap->a_kn;
2085 
2086 	switch (kn->kn_filter) {
2087 	case EVFILT_READ:
2088 		kn->kn_fop = &hammer2read_filtops;
2089 		break;
2090 	case EVFILT_WRITE:
2091 		kn->kn_fop = &hammer2write_filtops;
2092 		break;
2093 	case EVFILT_VNODE:
2094 		kn->kn_fop = &hammer2vnode_filtops;
2095 		break;
2096 	default:
2097 		return (EOPNOTSUPP);
2098 	}
2099 
2100 	kn->kn_hook = (caddr_t)vp;
2101 
2102 	knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2103 
2104 	return(0);
2105 }
2106 
2107 static void
2108 filt_hammer2detach(struct knote *kn)
2109 {
2110 	struct vnode *vp = (void *)kn->kn_hook;
2111 
2112 	knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2113 }
2114 
2115 static int
2116 filt_hammer2read(struct knote *kn, long hint)
2117 {
2118 	struct vnode *vp = (void *)kn->kn_hook;
2119 	hammer2_inode_t *ip = VTOI(vp);
2120 	off_t off;
2121 
2122 	if (hint == NOTE_REVOKE) {
2123 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2124 		return(1);
2125 	}
2126 	off = ip->meta.size - kn->kn_fp->f_offset;
2127 	kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2128 	if (kn->kn_sfflags & NOTE_OLDAPI)
2129 		return(1);
2130 	return (kn->kn_data != 0);
2131 }
2132 
2133 
2134 static int
2135 filt_hammer2write(struct knote *kn, long hint)
2136 {
2137 	if (hint == NOTE_REVOKE)
2138 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2139 	kn->kn_data = 0;
2140 	return (1);
2141 }
2142 
2143 static int
2144 filt_hammer2vnode(struct knote *kn, long hint)
2145 {
2146 	if (kn->kn_sfflags & hint)
2147 		kn->kn_fflags |= hint;
2148 	if (hint == NOTE_REVOKE) {
2149 		kn->kn_flags |= (EV_EOF | EV_NODATA);
2150 		return (1);
2151 	}
2152 	return (kn->kn_fflags != 0);
2153 }
2154 
2155 /*
2156  * FIFO VOPS
2157  */
2158 static
2159 int
2160 hammer2_vop_markatime(struct vop_markatime_args *ap)
2161 {
2162 	hammer2_inode_t *ip;
2163 	struct vnode *vp;
2164 
2165 	vp = ap->a_vp;
2166 	ip = VTOI(vp);
2167 
2168 	if (ip->pmp->ronly)
2169 		return(EROFS);
2170 	return(0);
2171 }
2172 
2173 static
2174 int
2175 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2176 {
2177 	int error;
2178 
2179 	error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2180 	if (error)
2181 		error = hammer2_vop_kqfilter(ap);
2182 	return(error);
2183 }
2184 
2185 /*
2186  * VOPS vector
2187  */
2188 struct vop_ops hammer2_vnode_vops = {
2189 	.vop_default	= vop_defaultop,
2190 	.vop_fsync	= hammer2_vop_fsync,
2191 	.vop_getpages	= vop_stdgetpages,
2192 	.vop_putpages	= vop_stdputpages,
2193 	.vop_access	= hammer2_vop_access,
2194 	.vop_advlock	= hammer2_vop_advlock,
2195 	.vop_close	= hammer2_vop_close,
2196 	.vop_nlink	= hammer2_vop_nlink,
2197 	.vop_ncreate	= hammer2_vop_ncreate,
2198 	.vop_nsymlink	= hammer2_vop_nsymlink,
2199 	.vop_nremove	= hammer2_vop_nremove,
2200 	.vop_nrmdir	= hammer2_vop_nrmdir,
2201 	.vop_nrename	= hammer2_vop_nrename,
2202 	.vop_getattr	= hammer2_vop_getattr,
2203 	.vop_setattr	= hammer2_vop_setattr,
2204 	.vop_readdir	= hammer2_vop_readdir,
2205 	.vop_readlink	= hammer2_vop_readlink,
2206 	.vop_getpages	= vop_stdgetpages,
2207 	.vop_putpages	= vop_stdputpages,
2208 	.vop_read	= hammer2_vop_read,
2209 	.vop_write	= hammer2_vop_write,
2210 	.vop_open	= hammer2_vop_open,
2211 	.vop_inactive	= hammer2_vop_inactive,
2212 	.vop_reclaim 	= hammer2_vop_reclaim,
2213 	.vop_nresolve	= hammer2_vop_nresolve,
2214 	.vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2215 	.vop_nmkdir 	= hammer2_vop_nmkdir,
2216 	.vop_nmknod 	= hammer2_vop_nmknod,
2217 	.vop_ioctl	= hammer2_vop_ioctl,
2218 	.vop_mountctl	= hammer2_vop_mountctl,
2219 	.vop_bmap	= hammer2_vop_bmap,
2220 	.vop_strategy	= hammer2_vop_strategy,
2221         .vop_kqfilter	= hammer2_vop_kqfilter
2222 };
2223 
2224 struct vop_ops hammer2_spec_vops = {
2225         .vop_default =          vop_defaultop,
2226         .vop_fsync =            hammer2_vop_fsync,
2227         .vop_read =             vop_stdnoread,
2228         .vop_write =            vop_stdnowrite,
2229         .vop_access =           hammer2_vop_access,
2230         .vop_close =            hammer2_vop_close,
2231         .vop_markatime =        hammer2_vop_markatime,
2232         .vop_getattr =          hammer2_vop_getattr,
2233         .vop_inactive =         hammer2_vop_inactive,
2234         .vop_reclaim =          hammer2_vop_reclaim,
2235         .vop_setattr =          hammer2_vop_setattr
2236 };
2237 
2238 struct vop_ops hammer2_fifo_vops = {
2239         .vop_default =          fifo_vnoperate,
2240         .vop_fsync =            hammer2_vop_fsync,
2241 #if 0
2242         .vop_read =             hammer2_vop_fiforead,
2243         .vop_write =            hammer2_vop_fifowrite,
2244 #endif
2245         .vop_access =           hammer2_vop_access,
2246 #if 0
2247         .vop_close =            hammer2_vop_fifoclose,
2248 #endif
2249         .vop_markatime =        hammer2_vop_markatime,
2250         .vop_getattr =          hammer2_vop_getattr,
2251         .vop_inactive =         hammer2_vop_inactive,
2252         .vop_reclaim =          hammer2_vop_reclaim,
2253         .vop_setattr =          hammer2_vop_setattr,
2254         .vop_kqfilter =         hammer2_vop_fifokqfilter
2255 };
2256 
2257