xref: /dragonfly/sys/vfs/hammer2/hammer2_vnops.c (revision eca362d0)
1 /*
2  * Copyright (c) 2011-2015 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * Kernel Filesystem interface
38  *
39  * NOTE! local ipdata pointers must be reloaded on any modifying operation
40  *	 to the inode as its underlying chain may have changed.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
59 
60 #include "hammer2.h"
61 
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
63 				int seqcount);
64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
65 				int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
68 
69 struct objcache *cache_xops;
70 
71 static __inline
72 void
73 hammer2_knote(struct vnode *vp, int flags)
74 {
75 	if (flags)
76 		KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
77 }
78 
79 /*
80  * Last reference to a vnode is going away but it is still cached.
81  */
82 static
83 int
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
85 {
86 	hammer2_inode_t *ip;
87 	struct vnode *vp;
88 
89 	vp = ap->a_vp;
90 	ip = VTOI(vp);
91 
92 	/*
93 	 * Degenerate case
94 	 */
95 	if (ip == NULL) {
96 		vrecycle(vp);
97 		return (0);
98 	}
99 
100 	/*
101 	 * Check for deleted inodes and recycle immediately on the last
102 	 * release.  Be sure to destroy any left-over buffer cache buffers
103 	 * so we do not waste time trying to flush them.
104 	 *
105 	 * Note that deleting the file block chains under the inode chain
106 	 * would just be a waste of energy, so don't do it.
107 	 *
108 	 * WARNING: nvtruncbuf() can only be safely called without the inode
109 	 *	    lock held due to the way our write thread works.
110 	 */
111 	if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
112 		hammer2_key_t lbase;
113 		int nblksize;
114 
115 		/*
116 		 * Detect updates to the embedded data which may be
117 		 * synchronized by the strategy code.  Simply mark the
118 		 * inode modified so it gets picked up by our normal flush.
119 		 */
120 		nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
121 		nvtruncbuf(vp, 0, nblksize, 0, 0);
122 		vrecycle(vp);
123 	}
124 	return (0);
125 }
126 
127 /*
128  * Reclaim a vnode so that it can be reused; after the inode is
129  * disassociated, the filesystem must manage it alone.
130  */
131 static
132 int
133 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
134 {
135 	hammer2_inode_t *ip;
136 	hammer2_pfs_t *pmp;
137 	struct vnode *vp;
138 
139 	vp = ap->a_vp;
140 	ip = VTOI(vp);
141 	if (ip == NULL) {
142 		return(0);
143 	}
144 	pmp = ip->pmp;
145 
146 	/*
147 	 * The final close of a deleted file or directory marks it for
148 	 * destruction.  The DELETED flag allows the flusher to shortcut
149 	 * any modified blocks still unflushed (that is, just ignore them).
150 	 *
151 	 * HAMMER2 usually does not try to optimize the freemap by returning
152 	 * deleted blocks to it as it does not usually know how many snapshots
153 	 * might be referencing portions of the file/dir.
154 	 */
155 	vp->v_data = NULL;
156 	ip->vp = NULL;
157 
158 	/*
159 	 * NOTE! We do not attempt to flush chains here, flushing is
160 	 *	 really fragile and could also deadlock.
161 	 */
162 	vclrisdirty(vp);
163 
164 	/*
165 	 * A modified inode may require chain synchronization.  This
166 	 * synchronization is usually handled by VOP_SNYC / VOP_FSYNC
167 	 * when vfsync() is called.  However, that requires a vnode.
168 	 *
169 	 * When the vnode is disassociated we must keep track of any modified
170 	 * inode via the sideq so that it is properly flushed.  We cannot
171 	 * safely synchronize the inode from inside the reclaim due to
172 	 * potentially deep locks held as-of when the reclaim occurs.
173 	 * Interactions and potential deadlocks abound.
174 	 */
175 	if ((ip->flags & (HAMMER2_INODE_ISUNLINKED |
176 			  HAMMER2_INODE_MODIFIED |
177 			  HAMMER2_INODE_RESIZED)) &&
178 	    (ip->flags & HAMMER2_INODE_ISDELETED) == 0) {
179 		hammer2_inode_sideq_t *ipul;
180 
181 		ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
182 		ipul->ip = ip;
183 
184 		hammer2_spin_ex(&pmp->list_spin);
185 		if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
186 			/* ref -> sideq */
187 			atomic_set_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
188 			TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
189 			++pmp->sideq_count;
190 			hammer2_spin_unex(&pmp->list_spin);
191 		} else {
192 			hammer2_spin_unex(&pmp->list_spin);
193 			kfree(ipul, pmp->minode);
194 			hammer2_inode_drop(ip);		/* vp ref */
195 		}
196 		/* retain ref from vp for ipul */
197 	} else {
198 		hammer2_inode_drop(ip);			/* vp ref */
199 	}
200 
201 	/*
202 	 * XXX handle background sync when ip dirty, kernel will no longer
203 	 * notify us regarding this inode because there is no longer a
204 	 * vnode attached to it.
205 	 */
206 
207 	return (0);
208 }
209 
210 static
211 int
212 hammer2_vop_fsync(struct vop_fsync_args *ap)
213 {
214 	hammer2_inode_t *ip;
215 	struct vnode *vp;
216 
217 	vp = ap->a_vp;
218 	ip = VTOI(vp);
219 
220 #if 0
221 	/* XXX can't do this yet */
222 	hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH);
223 	vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
224 #endif
225 	hammer2_trans_init(ip->pmp, 0);
226 	vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
227 
228 	/*
229 	 * Calling chain_flush here creates a lot of duplicative
230 	 * COW operations due to non-optimal vnode ordering.
231 	 *
232 	 * Only do it for an actual fsync() syscall.  The other forms
233 	 * which call this function will eventually call chain_flush
234 	 * on the volume root as a catch-all, which is far more optimal.
235 	 */
236 	hammer2_inode_lock(ip, 0);
237 	if (ip->flags & HAMMER2_INODE_MODIFIED)
238 		hammer2_inode_chain_sync(ip);
239 	hammer2_inode_unlock(ip);
240 	hammer2_trans_done(ip->pmp);
241 
242 	return (0);
243 }
244 
245 static
246 int
247 hammer2_vop_access(struct vop_access_args *ap)
248 {
249 	hammer2_inode_t *ip = VTOI(ap->a_vp);
250 	uid_t uid;
251 	gid_t gid;
252 	int error;
253 
254 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
255 	uid = hammer2_to_unix_xid(&ip->meta.uid);
256 	gid = hammer2_to_unix_xid(&ip->meta.gid);
257 	error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
258 	hammer2_inode_unlock(ip);
259 
260 	return (error);
261 }
262 
263 static
264 int
265 hammer2_vop_getattr(struct vop_getattr_args *ap)
266 {
267 	hammer2_pfs_t *pmp;
268 	hammer2_inode_t *ip;
269 	struct vnode *vp;
270 	struct vattr *vap;
271 	hammer2_chain_t *chain;
272 	int i;
273 
274 	vp = ap->a_vp;
275 	vap = ap->a_vap;
276 
277 	ip = VTOI(vp);
278 	pmp = ip->pmp;
279 
280 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
281 
282 	vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
283 	vap->va_fileid = ip->meta.inum;
284 	vap->va_mode = ip->meta.mode;
285 	vap->va_nlink = ip->meta.nlinks;
286 	vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
287 	vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
288 	vap->va_rmajor = 0;
289 	vap->va_rminor = 0;
290 	vap->va_size = ip->meta.size;	/* protected by shared lock */
291 	vap->va_blocksize = HAMMER2_PBUFSIZE;
292 	vap->va_flags = ip->meta.uflags;
293 	hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
294 	hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
295 	hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
296 	vap->va_gen = 1;
297 	vap->va_bytes = 0;
298 	if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
299 		/*
300 		 * Can't really calculate directory use sans the files under
301 		 * it, just assume one block for now.
302 		 */
303 		vap->va_bytes += HAMMER2_INODE_BYTES;
304 	} else {
305 		for (i = 0; i < ip->cluster.nchains; ++i) {
306 			if ((chain = ip->cluster.array[i].chain) != NULL) {
307 				if (vap->va_bytes <
308 				    chain->bref.embed.stats.data_count) {
309 					vap->va_bytes =
310 					    chain->bref.embed.stats.data_count;
311 				}
312 			}
313 		}
314 	}
315 	vap->va_type = hammer2_get_vtype(ip->meta.type);
316 	vap->va_filerev = 0;
317 	vap->va_uid_uuid = ip->meta.uid;
318 	vap->va_gid_uuid = ip->meta.gid;
319 	vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
320 			  VA_FSID_UUID_VALID;
321 
322 	hammer2_inode_unlock(ip);
323 
324 	return (0);
325 }
326 
327 static
328 int
329 hammer2_vop_setattr(struct vop_setattr_args *ap)
330 {
331 	hammer2_inode_t *ip;
332 	struct vnode *vp;
333 	struct vattr *vap;
334 	int error;
335 	int kflags = 0;
336 	uint64_t ctime;
337 
338 	vp = ap->a_vp;
339 	vap = ap->a_vap;
340 	hammer2_update_time(&ctime);
341 
342 	ip = VTOI(vp);
343 
344 	if (ip->pmp->ronly)
345 		return (EROFS);
346 	if (hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1)
347 		return (ENOSPC);
348 
349 	hammer2_pfs_memory_wait(ip->pmp);
350 	hammer2_trans_init(ip->pmp, 0);
351 	hammer2_inode_lock(ip, 0);
352 	error = 0;
353 
354 	if (vap->va_flags != VNOVAL) {
355 		uint32_t flags;
356 
357 		flags = ip->meta.uflags;
358 		error = vop_helper_setattr_flags(&flags, vap->va_flags,
359 				     hammer2_to_unix_xid(&ip->meta.uid),
360 				     ap->a_cred);
361 		if (error == 0) {
362 			if (ip->meta.uflags != flags) {
363 				hammer2_inode_modify(ip);
364 				ip->meta.uflags = flags;
365 				ip->meta.ctime = ctime;
366 				kflags |= NOTE_ATTRIB;
367 			}
368 			if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
369 				error = 0;
370 				goto done;
371 			}
372 		}
373 		goto done;
374 	}
375 	if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
376 		error = EPERM;
377 		goto done;
378 	}
379 	if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
380 		mode_t cur_mode = ip->meta.mode;
381 		uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
382 		gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
383 		uuid_t uuid_uid;
384 		uuid_t uuid_gid;
385 
386 		error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
387 					 ap->a_cred,
388 					 &cur_uid, &cur_gid, &cur_mode);
389 		if (error == 0) {
390 			hammer2_guid_to_uuid(&uuid_uid, cur_uid);
391 			hammer2_guid_to_uuid(&uuid_gid, cur_gid);
392 			if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
393 			    bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
394 			    ip->meta.mode != cur_mode
395 			) {
396 				hammer2_inode_modify(ip);
397 				ip->meta.uid = uuid_uid;
398 				ip->meta.gid = uuid_gid;
399 				ip->meta.mode = cur_mode;
400 				ip->meta.ctime = ctime;
401 			}
402 			kflags |= NOTE_ATTRIB;
403 		}
404 	}
405 
406 	/*
407 	 * Resize the file
408 	 */
409 	if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
410 		switch(vp->v_type) {
411 		case VREG:
412 			if (vap->va_size == ip->meta.size)
413 				break;
414 			if (vap->va_size < ip->meta.size) {
415 				hammer2_mtx_ex(&ip->truncate_lock);
416 				hammer2_truncate_file(ip, vap->va_size);
417 				hammer2_mtx_unlock(&ip->truncate_lock);
418 				kflags |= NOTE_WRITE;
419 			} else {
420 				hammer2_extend_file(ip, vap->va_size);
421 				kflags |= NOTE_WRITE | NOTE_EXTEND;
422 			}
423 			hammer2_inode_modify(ip);
424 			ip->meta.mtime = ctime;
425 			break;
426 		default:
427 			error = EINVAL;
428 			goto done;
429 		}
430 	}
431 #if 0
432 	/* atime not supported */
433 	if (vap->va_atime.tv_sec != VNOVAL) {
434 		hammer2_inode_modify(ip);
435 		ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
436 		kflags |= NOTE_ATTRIB;
437 	}
438 #endif
439 	if (vap->va_mode != (mode_t)VNOVAL) {
440 		mode_t cur_mode = ip->meta.mode;
441 		uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
442 		gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
443 
444 		error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
445 					 cur_uid, cur_gid, &cur_mode);
446 		if (error == 0 && ip->meta.mode != cur_mode) {
447 			hammer2_inode_modify(ip);
448 			ip->meta.mode = cur_mode;
449 			ip->meta.ctime = ctime;
450 			kflags |= NOTE_ATTRIB;
451 		}
452 	}
453 
454 	if (vap->va_mtime.tv_sec != VNOVAL) {
455 		hammer2_inode_modify(ip);
456 		ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
457 		kflags |= NOTE_ATTRIB;
458 	}
459 
460 done:
461 	/*
462 	 * If a truncation occurred we must call inode_fsync() now in order
463 	 * to trim the related data chains, otherwise a later expansion can
464 	 * cause havoc.
465 	 *
466 	 * If an extend occured that changed the DIRECTDATA state, we must
467 	 * call inode_fsync now in order to prepare the inode's indirect
468 	 * block table.
469 	 */
470 	if (ip->flags & HAMMER2_INODE_RESIZED)
471 		hammer2_inode_chain_sync(ip);
472 
473 	/*
474 	 * Cleanup.
475 	 */
476 	hammer2_inode_unlock(ip);
477 	hammer2_trans_done(ip->pmp);
478 	hammer2_knote(ip->vp, kflags);
479 
480 	return (error);
481 }
482 
483 static
484 int
485 hammer2_vop_readdir(struct vop_readdir_args *ap)
486 {
487 	hammer2_xop_readdir_t *xop;
488 	hammer2_blockref_t bref;
489 	hammer2_inode_t *ip;
490 	hammer2_tid_t inum;
491 	hammer2_key_t lkey;
492 	struct uio *uio;
493 	off_t *cookies;
494 	off_t saveoff;
495 	int cookie_index;
496 	int ncookies;
497 	int error;
498 	int eofflag;
499 	int r;
500 
501 	ip = VTOI(ap->a_vp);
502 	uio = ap->a_uio;
503 	saveoff = uio->uio_offset;
504 	eofflag = 0;
505 	error = 0;
506 
507 	/*
508 	 * Setup cookies directory entry cookies if requested
509 	 */
510 	if (ap->a_ncookies) {
511 		ncookies = uio->uio_resid / 16 + 1;
512 		if (ncookies > 1024)
513 			ncookies = 1024;
514 		cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
515 	} else {
516 		ncookies = -1;
517 		cookies = NULL;
518 	}
519 	cookie_index = 0;
520 
521 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
522 
523 	/*
524 	 * Handle artificial entries.  To ensure that only positive 64 bit
525 	 * quantities are returned to userland we always strip off bit 63.
526 	 * The hash code is designed such that codes 0x0000-0x7FFF are not
527 	 * used, allowing us to use these codes for articial entries.
528 	 *
529 	 * Entry 0 is used for '.' and entry 1 is used for '..'.  Do not
530 	 * allow '..' to cross the mount point into (e.g.) the super-root.
531 	 */
532 	if (saveoff == 0) {
533 		inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
534 		r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
535 		if (r)
536 			goto done;
537 		if (cookies)
538 			cookies[cookie_index] = saveoff;
539 		++saveoff;
540 		++cookie_index;
541 		if (cookie_index == ncookies)
542 			goto done;
543 	}
544 
545 	if (saveoff == 1) {
546 		/*
547 		 * Be careful with lockorder when accessing ".."
548 		 *
549 		 * (ip is the current dir. xip is the parent dir).
550 		 */
551 		inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
552 		if (ip != ip->pmp->iroot)
553 			inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
554 		r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
555 		if (r)
556 			goto done;
557 		if (cookies)
558 			cookies[cookie_index] = saveoff;
559 		++saveoff;
560 		++cookie_index;
561 		if (cookie_index == ncookies)
562 			goto done;
563 	}
564 
565 	lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
566 	if (hammer2_debug & 0x0020)
567 		kprintf("readdir: lkey %016jx\n", lkey);
568 	if (error)
569 		goto done;
570 
571 	/*
572 	 * Use XOP for cluster scan.
573 	 *
574 	 * parent is the inode cluster, already locked for us.  Don't
575 	 * double lock shared locks as this will screw up upgrades.
576 	 */
577 	xop = hammer2_xop_alloc(ip, 0);
578 	xop->lkey = lkey;
579 	hammer2_xop_start(&xop->head, hammer2_xop_readdir);
580 
581 	for (;;) {
582 		const hammer2_inode_data_t *ripdata;
583 		const char *dname;
584 		int dtype;
585 
586 		error = hammer2_xop_collect(&xop->head, 0);
587 		error = hammer2_error_to_errno(error);
588 		if (error) {
589 			break;
590 		}
591 		if (cookie_index == ncookies)
592 			break;
593 		if (hammer2_debug & 0x0020)
594 		kprintf("cluster chain %p %p\n",
595 			xop->head.cluster.focus,
596 			(xop->head.cluster.focus ?
597 			 xop->head.cluster.focus->data : (void *)-1));
598 		hammer2_cluster_bref(&xop->head.cluster, &bref);
599 
600 		if (bref.type == HAMMER2_BREF_TYPE_INODE) {
601 			ripdata =
602 			    &hammer2_cluster_rdata(&xop->head.cluster)->ipdata;
603 			dtype = hammer2_get_dtype(ripdata->meta.type);
604 			saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
605 			r = vop_write_dirent(&error, uio,
606 					     ripdata->meta.inum &
607 					      HAMMER2_DIRHASH_USERMSK,
608 					     dtype,
609 					     ripdata->meta.name_len,
610 					     ripdata->filename);
611 			if (r)
612 				break;
613 			if (cookies)
614 				cookies[cookie_index] = saveoff;
615 			++cookie_index;
616 		} else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) {
617 			dtype = hammer2_get_dtype(bref.embed.dirent.type);
618 			saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
619 			if (bref.embed.dirent.namlen <=
620 			    sizeof(bref.check.buf)) {
621 				dname = bref.check.buf;
622 			} else {
623 				dname =
624 				 hammer2_cluster_rdata(&xop->head.cluster)->buf;
625 			}
626 			r = vop_write_dirent(&error, uio,
627 					     bref.embed.dirent.inum,
628 					     dtype,
629 					     bref.embed.dirent.namlen,
630 					     dname);
631 			if (r)
632 				break;
633 			if (cookies)
634 				cookies[cookie_index] = saveoff;
635 			++cookie_index;
636 		} else {
637 			/* XXX chain error */
638 			kprintf("bad chain type readdir %d\n", bref.type);
639 		}
640 	}
641 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
642 	if (error == ENOENT) {
643 		error = 0;
644 		eofflag = 1;
645 		saveoff = (hammer2_key_t)-1;
646 	} else {
647 		saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
648 	}
649 done:
650 	hammer2_inode_unlock(ip);
651 	if (ap->a_eofflag)
652 		*ap->a_eofflag = eofflag;
653 	if (hammer2_debug & 0x0020)
654 		kprintf("readdir: done at %016jx\n", saveoff);
655 	uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
656 	if (error && cookie_index == 0) {
657 		if (cookies) {
658 			kfree(cookies, M_TEMP);
659 			*ap->a_ncookies = 0;
660 			*ap->a_cookies = NULL;
661 		}
662 	} else {
663 		if (cookies) {
664 			*ap->a_ncookies = cookie_index;
665 			*ap->a_cookies = cookies;
666 		}
667 	}
668 	return (error);
669 }
670 
671 /*
672  * hammer2_vop_readlink { vp, uio, cred }
673  */
674 static
675 int
676 hammer2_vop_readlink(struct vop_readlink_args *ap)
677 {
678 	struct vnode *vp;
679 	hammer2_inode_t *ip;
680 	int error;
681 
682 	vp = ap->a_vp;
683 	if (vp->v_type != VLNK)
684 		return (EINVAL);
685 	ip = VTOI(vp);
686 
687 	error = hammer2_read_file(ip, ap->a_uio, 0);
688 	return (error);
689 }
690 
691 static
692 int
693 hammer2_vop_read(struct vop_read_args *ap)
694 {
695 	struct vnode *vp;
696 	hammer2_inode_t *ip;
697 	struct uio *uio;
698 	int error;
699 	int seqcount;
700 	int bigread;
701 
702 	/*
703 	 * Read operations supported on this vnode?
704 	 */
705 	vp = ap->a_vp;
706 	if (vp->v_type != VREG)
707 		return (EINVAL);
708 
709 	/*
710 	 * Misc
711 	 */
712 	ip = VTOI(vp);
713 	uio = ap->a_uio;
714 	error = 0;
715 
716 	seqcount = ap->a_ioflag >> 16;
717 	bigread = (uio->uio_resid > 100 * 1024 * 1024);
718 
719 	error = hammer2_read_file(ip, uio, seqcount);
720 	return (error);
721 }
722 
723 static
724 int
725 hammer2_vop_write(struct vop_write_args *ap)
726 {
727 	hammer2_inode_t *ip;
728 	thread_t td;
729 	struct vnode *vp;
730 	struct uio *uio;
731 	int error;
732 	int seqcount;
733 	int ioflag;
734 
735 	/*
736 	 * Read operations supported on this vnode?
737 	 */
738 	vp = ap->a_vp;
739 	if (vp->v_type != VREG)
740 		return (EINVAL);
741 
742 	/*
743 	 * Misc
744 	 */
745 	ip = VTOI(vp);
746 	ioflag = ap->a_ioflag;
747 	uio = ap->a_uio;
748 	error = 0;
749 	if (ip->pmp->ronly)
750 		return (EROFS);
751 	switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) {
752 	case 2:
753 		return (ENOSPC);
754 	case 1:
755 		ioflag |= IO_DIRECT;	/* semi-synchronous */
756 		/* fall through */
757 	default:
758 		break;
759 	}
760 
761 	seqcount = ioflag >> 16;
762 
763 	/*
764 	 * Check resource limit
765 	 */
766 	if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
767 	    uio->uio_offset + uio->uio_resid >
768 	     td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
769 		lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
770 		return (EFBIG);
771 	}
772 
773 	/*
774 	 * The transaction interlocks against flush initiations
775 	 * (note: but will run concurrently with the actual flush).
776 	 *
777 	 * To avoid deadlocking against the VM system, we must flag any
778 	 * transaction related to the buffer cache or other direct
779 	 * VM page manipulation.
780 	 */
781 	if (uio->uio_segflg == UIO_NOCOPY)
782 		hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE);
783 	else
784 		hammer2_trans_init(ip->pmp, 0);
785 	error = hammer2_write_file(ip, uio, ioflag, seqcount);
786 	hammer2_trans_done(ip->pmp);
787 
788 	return (error);
789 }
790 
791 /*
792  * Perform read operations on a file or symlink given an UNLOCKED
793  * inode and uio.
794  *
795  * The passed ip is not locked.
796  */
797 static
798 int
799 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
800 {
801 	hammer2_off_t size;
802 	struct buf *bp;
803 	int error;
804 
805 	error = 0;
806 
807 	/*
808 	 * UIO read loop.
809 	 *
810 	 * WARNING! Assumes that the kernel interlocks size changes at the
811 	 *	    vnode level.
812 	 */
813 	hammer2_mtx_sh(&ip->lock);
814 	hammer2_mtx_sh(&ip->truncate_lock);
815 	size = ip->meta.size;
816 	hammer2_mtx_unlock(&ip->lock);
817 
818 	while (uio->uio_resid > 0 && uio->uio_offset < size) {
819 		hammer2_key_t lbase;
820 		hammer2_key_t leof;
821 		int lblksize;
822 		int loff;
823 		int n;
824 
825 		lblksize = hammer2_calc_logical(ip, uio->uio_offset,
826 						&lbase, &leof);
827 
828 #if 1
829 		bp = NULL;
830 		error = cluster_readx(ip->vp, leof, lbase, lblksize,
831 				      B_NOTMETA | B_KVABIO,
832 				      uio->uio_resid,
833 				      seqcount * MAXBSIZE,
834 				      &bp);
835 #else
836 		if (uio->uio_segflg == UIO_NOCOPY) {
837 			bp = getblk(ip->vp, lbase, lblksize,
838 				    GETBLK_BHEAVY | GETBLK_KVABIO, 0);
839 			if (bp->b_flags & B_CACHE) {
840 				int i;
841 				int j = 0;
842 				if (bp->b_xio.xio_npages != 16)
843 					kprintf("NPAGES BAD\n");
844 				for (i = 0; i < bp->b_xio.xio_npages; ++i) {
845 					vm_page_t m;
846 					m = bp->b_xio.xio_pages[i];
847 					if (m == NULL || m->valid == 0) {
848 						kprintf("bp %016jx %016jx pg %d inv",
849 							lbase, leof, i);
850 						if (m)
851 							kprintf("m->object %p/%p", m->object, ip->vp->v_object);
852 						kprintf("\n");
853 						j = 1;
854 					}
855 				}
856 				if (j)
857 					kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
858 			}
859 			bqrelse(bp);
860 		}
861 		error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
862 #endif
863 		if (error) {
864 			brelse(bp);
865 			break;
866 		}
867 		bkvasync(bp);
868 		loff = (int)(uio->uio_offset - lbase);
869 		n = lblksize - loff;
870 		if (n > uio->uio_resid)
871 			n = uio->uio_resid;
872 		if (n > size - uio->uio_offset)
873 			n = (int)(size - uio->uio_offset);
874 		bp->b_flags |= B_AGE;
875 		uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
876 		bqrelse(bp);
877 	}
878 	hammer2_mtx_unlock(&ip->truncate_lock);
879 
880 	return (error);
881 }
882 
883 /*
884  * Write to the file represented by the inode via the logical buffer cache.
885  * The inode may represent a regular file or a symlink.
886  *
887  * The inode must not be locked.
888  */
889 static
890 int
891 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
892 		   int ioflag, int seqcount)
893 {
894 	hammer2_key_t old_eof;
895 	hammer2_key_t new_eof;
896 	struct buf *bp;
897 	int kflags;
898 	int error;
899 	int modified;
900 
901 	/*
902 	 * Setup if append
903 	 *
904 	 * WARNING! Assumes that the kernel interlocks size changes at the
905 	 *	    vnode level.
906 	 */
907 	hammer2_mtx_ex(&ip->lock);
908 	hammer2_mtx_sh(&ip->truncate_lock);
909 	if (ioflag & IO_APPEND)
910 		uio->uio_offset = ip->meta.size;
911 	old_eof = ip->meta.size;
912 
913 	/*
914 	 * Extend the file if necessary.  If the write fails at some point
915 	 * we will truncate it back down to cover as much as we were able
916 	 * to write.
917 	 *
918 	 * Doing this now makes it easier to calculate buffer sizes in
919 	 * the loop.
920 	 */
921 	kflags = 0;
922 	error = 0;
923 	modified = 0;
924 
925 	if (uio->uio_offset + uio->uio_resid > old_eof) {
926 		new_eof = uio->uio_offset + uio->uio_resid;
927 		modified = 1;
928 		hammer2_extend_file(ip, new_eof);
929 		kflags |= NOTE_EXTEND;
930 	} else {
931 		new_eof = old_eof;
932 	}
933 	hammer2_mtx_unlock(&ip->lock);
934 
935 	/*
936 	 * UIO write loop
937 	 */
938 	while (uio->uio_resid > 0) {
939 		hammer2_key_t lbase;
940 		int trivial;
941 		int endofblk;
942 		int lblksize;
943 		int loff;
944 		int n;
945 
946 		/*
947 		 * Don't allow the buffer build to blow out the buffer
948 		 * cache.
949 		 */
950 		if ((ioflag & IO_RECURSE) == 0)
951 			bwillwrite(HAMMER2_PBUFSIZE);
952 
953 		/*
954 		 * This nominally tells us how much we can cluster and
955 		 * what the logical buffer size needs to be.  Currently
956 		 * we don't try to cluster the write and just handle one
957 		 * block at a time.
958 		 */
959 		lblksize = hammer2_calc_logical(ip, uio->uio_offset,
960 						&lbase, NULL);
961 		loff = (int)(uio->uio_offset - lbase);
962 
963 		KKASSERT(lblksize <= 65536);
964 
965 		/*
966 		 * Calculate bytes to copy this transfer and whether the
967 		 * copy completely covers the buffer or not.
968 		 */
969 		trivial = 0;
970 		n = lblksize - loff;
971 		if (n > uio->uio_resid) {
972 			n = uio->uio_resid;
973 			if (loff == lbase && uio->uio_offset + n == new_eof)
974 				trivial = 1;
975 			endofblk = 0;
976 		} else {
977 			if (loff == 0)
978 				trivial = 1;
979 			endofblk = 1;
980 		}
981 		if (lbase >= new_eof)
982 			trivial = 1;
983 
984 		/*
985 		 * Get the buffer
986 		 */
987 		if (uio->uio_segflg == UIO_NOCOPY) {
988 			/*
989 			 * Issuing a write with the same data backing the
990 			 * buffer.  Instantiate the buffer to collect the
991 			 * backing vm pages, then read-in any missing bits.
992 			 *
993 			 * This case is used by vop_stdputpages().
994 			 */
995 			bp = getblk(ip->vp, lbase, lblksize,
996 				    GETBLK_BHEAVY | GETBLK_KVABIO, 0);
997 			if ((bp->b_flags & B_CACHE) == 0) {
998 				bqrelse(bp);
999 				error = bread_kvabio(ip->vp, lbase,
1000 						     lblksize, &bp);
1001 			}
1002 		} else if (trivial) {
1003 			/*
1004 			 * Even though we are entirely overwriting the buffer
1005 			 * we may still have to zero it out to avoid a
1006 			 * mmap/write visibility issue.
1007 			 */
1008 			bp = getblk(ip->vp, lbase, lblksize,
1009 				    GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1010 			if ((bp->b_flags & B_CACHE) == 0)
1011 				vfs_bio_clrbuf(bp);
1012 		} else {
1013 			/*
1014 			 * Partial overwrite, read in any missing bits then
1015 			 * replace the portion being written.
1016 			 *
1017 			 * (The strategy code will detect zero-fill physical
1018 			 * blocks for this case).
1019 			 */
1020 			error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
1021 			if (error == 0)
1022 				bheavy(bp);
1023 		}
1024 
1025 		if (error) {
1026 			brelse(bp);
1027 			break;
1028 		}
1029 
1030 		/*
1031 		 * Ok, copy the data in
1032 		 */
1033 		bkvasync(bp);
1034 		error = uiomovebp(bp, bp->b_data + loff, n, uio);
1035 		kflags |= NOTE_WRITE;
1036 		modified = 1;
1037 		if (error) {
1038 			brelse(bp);
1039 			break;
1040 		}
1041 
1042 		/*
1043 		 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1044 		 *	    with IO_SYNC or IO_ASYNC set.  These writes
1045 		 *	    must be handled as the pageout daemon expects.
1046 		 *
1047 		 * NOTE!    H2 relies on cluster_write() here because it
1048 		 *	    cannot preallocate disk blocks at the logical
1049 		 *	    level due to not knowing what the compression
1050 		 *	    size will be at this time.
1051 		 *
1052 		 *	    We must use cluster_write() here and we depend
1053 		 *	    on the write-behind feature to flush buffers
1054 		 *	    appropriately.  If we let the buffer daemons do
1055 		 *	    it the block allocations will be all over the
1056 		 *	    map.
1057 		 */
1058 		if (ioflag & IO_SYNC) {
1059 			bwrite(bp);
1060 		} else if ((ioflag & IO_DIRECT) && endofblk) {
1061 			bawrite(bp);
1062 		} else if (ioflag & IO_ASYNC) {
1063 			bawrite(bp);
1064 		} else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1065 			bdwrite(bp);
1066 		} else {
1067 #if 1
1068 			bp->b_flags |= B_CLUSTEROK;
1069 			cluster_write(bp, new_eof, lblksize, seqcount);
1070 #else
1071 			bp->b_flags |= B_CLUSTEROK;
1072 			bdwrite(bp);
1073 #endif
1074 		}
1075 	}
1076 
1077 	/*
1078 	 * Cleanup.  If we extended the file EOF but failed to write through
1079 	 * the entire write is a failure and we have to back-up.
1080 	 */
1081 	if (error && new_eof != old_eof) {
1082 		hammer2_mtx_unlock(&ip->truncate_lock);
1083 		hammer2_mtx_ex(&ip->lock);
1084 		hammer2_mtx_ex(&ip->truncate_lock);
1085 		hammer2_truncate_file(ip, old_eof);
1086 		if (ip->flags & HAMMER2_INODE_MODIFIED)
1087 			hammer2_inode_chain_sync(ip);
1088 		hammer2_mtx_unlock(&ip->lock);
1089 	} else if (modified) {
1090 		hammer2_mtx_ex(&ip->lock);
1091 		hammer2_inode_modify(ip);
1092 		if (ip->vp && ip->vp->v_writecount == 0 &&
1093 		    ip->vp->v_type == VREG) {
1094 			ip->meta.mtime =
1095 				(unsigned long)ip->vp->v_lastwrite_ts.tv_sec *
1096 				 1000000 +
1097 				ip->vp->v_lastwrite_ts.tv_nsec / 1000;
1098 		} else {
1099 			hammer2_update_time(&ip->meta.mtime);
1100 		}
1101 		if (ip->flags & HAMMER2_INODE_MODIFIED)
1102 			hammer2_inode_chain_sync(ip);
1103 		hammer2_mtx_unlock(&ip->lock);
1104 		hammer2_knote(ip->vp, kflags);
1105 	}
1106 	hammer2_trans_assert_strategy(ip->pmp);
1107 	hammer2_mtx_unlock(&ip->truncate_lock);
1108 
1109 	return error;
1110 }
1111 
1112 /*
1113  * Truncate the size of a file.  The inode must not be locked.
1114  *
1115  * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1116  * ensure that any on-media data beyond the new file EOF has been destroyed.
1117  *
1118  * WARNING: nvtruncbuf() can only be safely called without the inode lock
1119  *	    held due to the way our write thread works.  If the truncation
1120  *	    occurs in the middle of a buffer, nvtruncbuf() is responsible
1121  *	    for dirtying that buffer and zeroing out trailing bytes.
1122  *
1123  * WARNING! Assumes that the kernel interlocks size changes at the
1124  *	    vnode level.
1125  *
1126  * WARNING! Caller assumes responsibility for removing dead blocks
1127  *	    if INODE_RESIZED is set.
1128  */
1129 static
1130 void
1131 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1132 {
1133 	hammer2_key_t lbase;
1134 	int nblksize;
1135 
1136 	hammer2_mtx_unlock(&ip->lock);
1137 	if (ip->vp) {
1138 		nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1139 		nvtruncbuf(ip->vp, nsize,
1140 			   nblksize, (int)nsize & (nblksize - 1),
1141 			   0);
1142 	}
1143 	hammer2_mtx_ex(&ip->lock);
1144 	KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1145 	ip->osize = ip->meta.size;
1146 	ip->meta.size = nsize;
1147 	atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1148 	hammer2_inode_modify(ip);
1149 }
1150 
1151 /*
1152  * Extend the size of a file.  The inode must not be locked.
1153  *
1154  * Even though the file size is changing, we do not have to set the
1155  * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1156  * boundary.  When this occurs a hammer2_inode_chain_sync() is required
1157  * to prepare the inode cluster's indirect block table, otherwise
1158  * async execution of the strategy code will implode on us.
1159  *
1160  * WARNING! Assumes that the kernel interlocks size changes at the
1161  *	    vnode level.
1162  *
1163  * WARNING! Caller assumes responsibility for transitioning out
1164  *	    of the inode DIRECTDATA mode if INODE_RESIZED is set.
1165  */
1166 static
1167 void
1168 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1169 {
1170 	hammer2_key_t lbase;
1171 	hammer2_key_t osize;
1172 	int oblksize;
1173 	int nblksize;
1174 
1175 	KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1176 	hammer2_inode_modify(ip);
1177 	osize = ip->meta.size;
1178 	ip->osize = osize;
1179 	ip->meta.size = nsize;
1180 
1181 	if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1182 		atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1183 		hammer2_inode_chain_sync(ip);
1184 	}
1185 
1186 	hammer2_mtx_unlock(&ip->lock);
1187 	if (ip->vp) {
1188 		oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1189 		nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1190 		nvextendbuf(ip->vp,
1191 			    osize, nsize,
1192 			    oblksize, nblksize,
1193 			    -1, -1, 0);
1194 	}
1195 	hammer2_mtx_ex(&ip->lock);
1196 }
1197 
1198 static
1199 int
1200 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1201 {
1202 	hammer2_xop_nresolve_t *xop;
1203 	hammer2_inode_t *ip;
1204 	hammer2_inode_t *dip;
1205 	struct namecache *ncp;
1206 	struct vnode *vp;
1207 	int error;
1208 
1209 	dip = VTOI(ap->a_dvp);
1210 	xop = hammer2_xop_alloc(dip, 0);
1211 
1212 	ncp = ap->a_nch->ncp;
1213 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1214 
1215 	/*
1216 	 * Note: In DragonFly the kernel handles '.' and '..'.
1217 	 */
1218 	hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1219 	hammer2_xop_start(&xop->head, hammer2_xop_nresolve);
1220 
1221 	error = hammer2_xop_collect(&xop->head, 0);
1222 	error = hammer2_error_to_errno(error);
1223 	if (error) {
1224 		ip = NULL;
1225 	} else {
1226 		ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1227 	}
1228 	hammer2_inode_unlock(dip);
1229 
1230 	/*
1231 	 * Acquire the related vnode
1232 	 *
1233 	 * NOTE: For error processing, only ENOENT resolves the namecache
1234 	 *	 entry to NULL, otherwise we just return the error and
1235 	 *	 leave the namecache unresolved.
1236 	 *
1237 	 * NOTE: multiple hammer2_inode structures can be aliased to the
1238 	 *	 same chain element, for example for hardlinks.  This
1239 	 *	 use case does not 'reattach' inode associations that
1240 	 *	 might already exist, but always allocates a new one.
1241 	 *
1242 	 * WARNING: inode structure is locked exclusively via inode_get
1243 	 *	    but chain was locked shared.  inode_unlock()
1244 	 *	    will handle it properly.
1245 	 */
1246 	if (ip) {
1247 		vp = hammer2_igetv(ip, &error);	/* error set to UNIX error */
1248 		if (error == 0) {
1249 			vn_unlock(vp);
1250 			cache_setvp(ap->a_nch, vp);
1251 		} else if (error == ENOENT) {
1252 			cache_setvp(ap->a_nch, NULL);
1253 		}
1254 		hammer2_inode_unlock(ip);
1255 
1256 		/*
1257 		 * The vp should not be released until after we've disposed
1258 		 * of our locks, because it might cause vop_inactive() to
1259 		 * be called.
1260 		 */
1261 		if (vp)
1262 			vrele(vp);
1263 	} else {
1264 		error = ENOENT;
1265 		cache_setvp(ap->a_nch, NULL);
1266 	}
1267 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1268 	KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1269 		("resolve error %d/%p ap %p\n",
1270 		 error, ap->a_nch->ncp->nc_vp, ap));
1271 
1272 	return error;
1273 }
1274 
1275 static
1276 int
1277 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1278 {
1279 	hammer2_inode_t *dip;
1280 	hammer2_tid_t inum;
1281 	int error;
1282 
1283 	dip = VTOI(ap->a_dvp);
1284 	inum = dip->meta.iparent;
1285 	*ap->a_vpp = NULL;
1286 
1287 	if (inum) {
1288 		error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1289 					 inum, ap->a_vpp);
1290 	} else {
1291 		error = ENOENT;
1292 	}
1293 	return error;
1294 }
1295 
1296 static
1297 int
1298 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1299 {
1300 	hammer2_inode_t *dip;
1301 	hammer2_inode_t *nip;
1302 	struct namecache *ncp;
1303 	const uint8_t *name;
1304 	size_t name_len;
1305 	hammer2_tid_t inum;
1306 	int error;
1307 
1308 	dip = VTOI(ap->a_dvp);
1309 	if (dip->pmp->ronly)
1310 		return (EROFS);
1311 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1312 		return (ENOSPC);
1313 
1314 	ncp = ap->a_nch->ncp;
1315 	name = ncp->nc_name;
1316 	name_len = ncp->nc_nlen;
1317 
1318 	hammer2_pfs_memory_wait(dip->pmp);
1319 	hammer2_trans_init(dip->pmp, 0);
1320 
1321 	inum = hammer2_trans_newinum(dip->pmp);
1322 
1323 	/*
1324 	 * Create the actual inode as a hidden file in the iroot, then
1325 	 * create the directory entry.  The creation of the actual inode
1326 	 * sets its nlinks to 1 which is the value we desire.
1327 	 */
1328 	nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1329 				   NULL, 0, inum,
1330 				   inum, 0, 0,
1331 				   0, &error);
1332 	if (error) {
1333 		error = hammer2_error_to_errno(error);
1334 	} else {
1335 		error = hammer2_dirent_create(dip, name, name_len,
1336 					      nip->meta.inum, nip->meta.type);
1337 		/* returns UNIX error code */
1338 	}
1339 	if (error) {
1340 		if (nip) {
1341 			hammer2_inode_unlink_finisher(nip, 0);
1342 			hammer2_inode_unlock(nip);
1343 			nip = NULL;
1344 		}
1345 		*ap->a_vpp = NULL;
1346 	} else {
1347 		*ap->a_vpp = hammer2_igetv(nip, &error);
1348 		hammer2_inode_unlock(nip);
1349 	}
1350 
1351 	/*
1352 	 * Update dip's mtime
1353 	 *
1354 	 * We can use a shared inode lock and allow the meta.mtime update
1355 	 * SMP race.  hammer2_inode_modify() is MPSAFE w/a shared lock.
1356 	 */
1357 	if (error == 0) {
1358 		uint64_t mtime;
1359 
1360 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1361 		hammer2_update_time(&mtime);
1362 		hammer2_inode_modify(dip);
1363 		dip->meta.mtime = mtime;
1364 		hammer2_inode_unlock(dip);
1365 	}
1366 
1367 	hammer2_trans_done(dip->pmp);
1368 
1369 	if (error == 0) {
1370 		cache_setunresolved(ap->a_nch);
1371 		cache_setvp(ap->a_nch, *ap->a_vpp);
1372 		hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1373 	}
1374 	return error;
1375 }
1376 
1377 static
1378 int
1379 hammer2_vop_open(struct vop_open_args *ap)
1380 {
1381 	return vop_stdopen(ap);
1382 }
1383 
1384 /*
1385  * hammer2_vop_advlock { vp, id, op, fl, flags }
1386  */
1387 static
1388 int
1389 hammer2_vop_advlock(struct vop_advlock_args *ap)
1390 {
1391 	hammer2_inode_t *ip = VTOI(ap->a_vp);
1392 	hammer2_off_t size;
1393 
1394 	size = ip->meta.size;
1395 	return (lf_advlock(ap, &ip->advlock, size));
1396 }
1397 
1398 static
1399 int
1400 hammer2_vop_close(struct vop_close_args *ap)
1401 {
1402 	return vop_stdclose(ap);
1403 }
1404 
1405 /*
1406  * hammer2_vop_nlink { nch, dvp, vp, cred }
1407  *
1408  * Create a hardlink from (vp) to {dvp, nch}.
1409  */
1410 static
1411 int
1412 hammer2_vop_nlink(struct vop_nlink_args *ap)
1413 {
1414 	hammer2_inode_t *tdip;	/* target directory to create link in */
1415 	hammer2_inode_t *ip;	/* inode we are hardlinking to */
1416 	struct namecache *ncp;
1417 	const uint8_t *name;
1418 	size_t name_len;
1419 	int error;
1420 
1421 	if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1422 		return(EXDEV);
1423 
1424 	tdip = VTOI(ap->a_dvp);
1425 	if (tdip->pmp->ronly)
1426 		return (EROFS);
1427 	if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1)
1428 		return (ENOSPC);
1429 
1430 	ncp = ap->a_nch->ncp;
1431 	name = ncp->nc_name;
1432 	name_len = ncp->nc_nlen;
1433 
1434 	/*
1435 	 * ip represents the file being hardlinked.  The file could be a
1436 	 * normal file or a hardlink target if it has already been hardlinked.
1437 	 * (with the new semantics, it will almost always be a hardlink
1438 	 * target).
1439 	 *
1440 	 * Bump nlinks and potentially also create or move the hardlink
1441 	 * target in the parent directory common to (ip) and (tdip).  The
1442 	 * consolidation code can modify ip->cluster.  The returned cluster
1443 	 * is locked.
1444 	 */
1445 	ip = VTOI(ap->a_vp);
1446 	KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1447 	hammer2_pfs_memory_wait(ip->pmp);
1448 	hammer2_trans_init(ip->pmp, 0);
1449 
1450 	/*
1451 	 * Target should be an indexed inode or there's no way we will ever
1452 	 * be able to find it!
1453 	 */
1454 	KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1455 
1456 	error = 0;
1457 
1458 	/*
1459 	 * Can return NULL and error == EXDEV if the common parent
1460 	 * crosses a directory with the xlink flag set.
1461 	 */
1462 	hammer2_inode_lock(tdip, 0);
1463 	hammer2_inode_lock(ip, 0);
1464 
1465 	/*
1466 	 * Create the directory entry and bump nlinks.
1467 	 */
1468 	if (error == 0) {
1469 		error = hammer2_dirent_create(tdip, name, name_len,
1470 					      ip->meta.inum, ip->meta.type);
1471 		hammer2_inode_modify(ip);
1472 		++ip->meta.nlinks;
1473 	}
1474 	if (error == 0) {
1475 		/*
1476 		 * Update dip's mtime
1477 		 */
1478 		uint64_t mtime;
1479 
1480 		hammer2_update_time(&mtime);
1481 		hammer2_inode_modify(tdip);
1482 		tdip->meta.mtime = mtime;
1483 
1484 		cache_setunresolved(ap->a_nch);
1485 		cache_setvp(ap->a_nch, ap->a_vp);
1486 	}
1487 	hammer2_inode_unlock(ip);
1488 	hammer2_inode_unlock(tdip);
1489 
1490 	hammer2_trans_done(ip->pmp);
1491 	hammer2_knote(ap->a_vp, NOTE_LINK);
1492 	hammer2_knote(ap->a_dvp, NOTE_WRITE);
1493 
1494 	return error;
1495 }
1496 
1497 /*
1498  * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1499  *
1500  * The operating system has already ensured that the directory entry
1501  * does not exist and done all appropriate namespace locking.
1502  */
1503 static
1504 int
1505 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1506 {
1507 	hammer2_inode_t *dip;
1508 	hammer2_inode_t *nip;
1509 	struct namecache *ncp;
1510 	const uint8_t *name;
1511 	size_t name_len;
1512 	hammer2_tid_t inum;
1513 	int error;
1514 
1515 	dip = VTOI(ap->a_dvp);
1516 	if (dip->pmp->ronly)
1517 		return (EROFS);
1518 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1519 		return (ENOSPC);
1520 
1521 	ncp = ap->a_nch->ncp;
1522 	name = ncp->nc_name;
1523 	name_len = ncp->nc_nlen;
1524 	hammer2_pfs_memory_wait(dip->pmp);
1525 	hammer2_trans_init(dip->pmp, 0);
1526 
1527 	inum = hammer2_trans_newinum(dip->pmp);
1528 
1529 	/*
1530 	 * Create the actual inode as a hidden file in the iroot, then
1531 	 * create the directory entry.  The creation of the actual inode
1532 	 * sets its nlinks to 1 which is the value we desire.
1533 	 */
1534 	nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1535 				   NULL, 0, inum,
1536 				   inum, 0, 0,
1537 				   0, &error);
1538 
1539 	if (error) {
1540 		error = hammer2_error_to_errno(error);
1541 	} else {
1542 		error = hammer2_dirent_create(dip, name, name_len,
1543 					      nip->meta.inum, nip->meta.type);
1544 	}
1545 	if (error) {
1546 		if (nip) {
1547 			hammer2_inode_unlink_finisher(nip, 0);
1548 			hammer2_inode_unlock(nip);
1549 			nip = NULL;
1550 		}
1551 		*ap->a_vpp = NULL;
1552 	} else {
1553 		*ap->a_vpp = hammer2_igetv(nip, &error);
1554 		hammer2_inode_unlock(nip);
1555 	}
1556 
1557 	/*
1558 	 * Update dip's mtime
1559 	 */
1560 	if (error == 0) {
1561 		uint64_t mtime;
1562 
1563 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1564 		hammer2_update_time(&mtime);
1565 		hammer2_inode_modify(dip);
1566 		dip->meta.mtime = mtime;
1567 		hammer2_inode_unlock(dip);
1568 	}
1569 
1570 	hammer2_trans_done(dip->pmp);
1571 
1572 	if (error == 0) {
1573 		cache_setunresolved(ap->a_nch);
1574 		cache_setvp(ap->a_nch, *ap->a_vpp);
1575 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1576 	}
1577 	return error;
1578 }
1579 
1580 /*
1581  * Make a device node (typically a fifo)
1582  */
1583 static
1584 int
1585 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1586 {
1587 	hammer2_inode_t *dip;
1588 	hammer2_inode_t *nip;
1589 	struct namecache *ncp;
1590 	const uint8_t *name;
1591 	size_t name_len;
1592 	hammer2_tid_t inum;
1593 	int error;
1594 
1595 	dip = VTOI(ap->a_dvp);
1596 	if (dip->pmp->ronly)
1597 		return (EROFS);
1598 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1599 		return (ENOSPC);
1600 
1601 	ncp = ap->a_nch->ncp;
1602 	name = ncp->nc_name;
1603 	name_len = ncp->nc_nlen;
1604 	hammer2_pfs_memory_wait(dip->pmp);
1605 	hammer2_trans_init(dip->pmp, 0);
1606 
1607 	/*
1608 	 * Create the device inode and then create the directory entry.
1609 	 */
1610 	inum = hammer2_trans_newinum(dip->pmp);
1611 	nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1612 				   NULL, 0, inum,
1613 				   inum, 0, 0,
1614 				   0, &error);
1615 	if (error == 0) {
1616 		error = hammer2_dirent_create(dip, name, name_len,
1617 					      nip->meta.inum, nip->meta.type);
1618 	}
1619 	if (error) {
1620 		if (nip) {
1621 			hammer2_inode_unlink_finisher(nip, 0);
1622 			hammer2_inode_unlock(nip);
1623 			nip = NULL;
1624 		}
1625 		*ap->a_vpp = NULL;
1626 	} else {
1627 		*ap->a_vpp = hammer2_igetv(nip, &error);
1628 		hammer2_inode_unlock(nip);
1629 	}
1630 
1631 	/*
1632 	 * Update dip's mtime
1633 	 */
1634 	if (error == 0) {
1635 		uint64_t mtime;
1636 
1637 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1638 		hammer2_update_time(&mtime);
1639 		hammer2_inode_modify(dip);
1640 		dip->meta.mtime = mtime;
1641 		hammer2_inode_unlock(dip);
1642 	}
1643 
1644 	hammer2_trans_done(dip->pmp);
1645 
1646 	if (error == 0) {
1647 		cache_setunresolved(ap->a_nch);
1648 		cache_setvp(ap->a_nch, *ap->a_vpp);
1649 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1650 	}
1651 	return error;
1652 }
1653 
1654 /*
1655  * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1656  */
1657 static
1658 int
1659 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1660 {
1661 	hammer2_inode_t *dip;
1662 	hammer2_inode_t *nip;
1663 	struct namecache *ncp;
1664 	const uint8_t *name;
1665 	size_t name_len;
1666 	hammer2_tid_t inum;
1667 	int error;
1668 
1669 	dip = VTOI(ap->a_dvp);
1670 	if (dip->pmp->ronly)
1671 		return (EROFS);
1672 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1673 		return (ENOSPC);
1674 
1675 	ncp = ap->a_nch->ncp;
1676 	name = ncp->nc_name;
1677 	name_len = ncp->nc_nlen;
1678 	hammer2_pfs_memory_wait(dip->pmp);
1679 	hammer2_trans_init(dip->pmp, 0);
1680 
1681 	ap->a_vap->va_type = VLNK;	/* enforce type */
1682 
1683 	/*
1684 	 * Create the softlink as an inode and then create the directory
1685 	 * entry.
1686 	 */
1687 	inum = hammer2_trans_newinum(dip->pmp);
1688 
1689 	nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1690 				   NULL, 0, inum,
1691 				   inum, 0, 0,
1692 				   0, &error);
1693 	if (error == 0) {
1694 		error = hammer2_dirent_create(dip, name, name_len,
1695 					      nip->meta.inum, nip->meta.type);
1696 	}
1697 	if (error) {
1698 		if (nip) {
1699 			hammer2_inode_unlink_finisher(nip, 0);
1700 			hammer2_inode_unlock(nip);
1701 			nip = NULL;
1702 		}
1703 		*ap->a_vpp = NULL;
1704 		hammer2_trans_done(dip->pmp);
1705 		return error;
1706 	}
1707 	*ap->a_vpp = hammer2_igetv(nip, &error);
1708 
1709 	/*
1710 	 * Build the softlink (~like file data) and finalize the namecache.
1711 	 */
1712 	if (error == 0) {
1713 		size_t bytes;
1714 		struct uio auio;
1715 		struct iovec aiov;
1716 
1717 		bytes = strlen(ap->a_target);
1718 
1719 		hammer2_inode_unlock(nip);
1720 		bzero(&auio, sizeof(auio));
1721 		bzero(&aiov, sizeof(aiov));
1722 		auio.uio_iov = &aiov;
1723 		auio.uio_segflg = UIO_SYSSPACE;
1724 		auio.uio_rw = UIO_WRITE;
1725 		auio.uio_resid = bytes;
1726 		auio.uio_iovcnt = 1;
1727 		auio.uio_td = curthread;
1728 		aiov.iov_base = ap->a_target;
1729 		aiov.iov_len = bytes;
1730 		error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1731 		/* XXX handle error */
1732 		error = 0;
1733 	} else {
1734 		hammer2_inode_unlock(nip);
1735 	}
1736 
1737 	/*
1738 	 * Update dip's mtime
1739 	 */
1740 	if (error == 0) {
1741 		uint64_t mtime;
1742 
1743 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1744 		hammer2_update_time(&mtime);
1745 		hammer2_inode_modify(dip);
1746 		dip->meta.mtime = mtime;
1747 		hammer2_inode_unlock(dip);
1748 	}
1749 
1750 	hammer2_trans_done(dip->pmp);
1751 
1752 	/*
1753 	 * Finalize namecache
1754 	 */
1755 	if (error == 0) {
1756 		cache_setunresolved(ap->a_nch);
1757 		cache_setvp(ap->a_nch, *ap->a_vpp);
1758 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1759 	}
1760 	return error;
1761 }
1762 
1763 /*
1764  * hammer2_vop_nremove { nch, dvp, cred }
1765  */
1766 static
1767 int
1768 hammer2_vop_nremove(struct vop_nremove_args *ap)
1769 {
1770 	hammer2_xop_unlink_t *xop;
1771 	hammer2_inode_t *dip;
1772 	hammer2_inode_t *ip;
1773 	struct namecache *ncp;
1774 	int error;
1775 	int isopen;
1776 
1777 	dip = VTOI(ap->a_dvp);
1778 	if (dip->pmp->ronly)
1779 		return (EROFS);
1780 #if 0
1781 	/* allow removals, except user to also bulkfree */
1782 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1783 		return (ENOSPC);
1784 #endif
1785 
1786 	ncp = ap->a_nch->ncp;
1787 
1788 	hammer2_pfs_memory_wait(dip->pmp);
1789 	hammer2_trans_init(dip->pmp, 0);
1790 	hammer2_inode_lock(dip, 0);
1791 
1792 	/*
1793 	 * The unlink XOP unlinks the path from the directory and
1794 	 * locates and returns the cluster associated with the real inode.
1795 	 * We have to handle nlinks here on the frontend.
1796 	 */
1797 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1798 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1799 
1800 	/*
1801 	 * The namecache entry is locked so nobody can use this namespace.
1802 	 * Calculate isopen to determine if this namespace has an open vp
1803 	 * associated with it and resolve the vp only if it does.
1804 	 *
1805 	 * We try to avoid resolving the vnode if nobody has it open, but
1806 	 * note that the test is via this namespace only.
1807 	 */
1808 	isopen = cache_isopen(ap->a_nch);
1809 	xop->isdir = 0;
1810 	xop->dopermanent = 0;
1811 	hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1812 
1813 	/*
1814 	 * Collect the real inode and adjust nlinks, destroy the real
1815 	 * inode if nlinks transitions to 0 and it was the real inode
1816 	 * (else it has already been removed).
1817 	 */
1818 	error = hammer2_xop_collect(&xop->head, 0);
1819 	error = hammer2_error_to_errno(error);
1820 	hammer2_inode_unlock(dip);
1821 
1822 	if (error == 0) {
1823 		ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1824 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1825 		if (ip) {
1826 			hammer2_inode_unlink_finisher(ip, isopen);
1827 			hammer2_inode_unlock(ip);
1828 		}
1829 	} else {
1830 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1831 	}
1832 
1833 	/*
1834 	 * Update dip's mtime
1835 	 */
1836 	if (error == 0) {
1837 		uint64_t mtime;
1838 
1839 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1840 		hammer2_update_time(&mtime);
1841 		hammer2_inode_modify(dip);
1842 		dip->meta.mtime = mtime;
1843 		hammer2_inode_unlock(dip);
1844 	}
1845 
1846 	hammer2_inode_run_sideq(dip->pmp, 0);
1847 	hammer2_trans_done(dip->pmp);
1848 	if (error == 0) {
1849 		cache_unlink(ap->a_nch);
1850 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1851 	}
1852 	return (error);
1853 }
1854 
1855 /*
1856  * hammer2_vop_nrmdir { nch, dvp, cred }
1857  */
1858 static
1859 int
1860 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1861 {
1862 	hammer2_xop_unlink_t *xop;
1863 	hammer2_inode_t *dip;
1864 	hammer2_inode_t *ip;
1865 	struct namecache *ncp;
1866 	int isopen;
1867 	int error;
1868 
1869 	dip = VTOI(ap->a_dvp);
1870 	if (dip->pmp->ronly)
1871 		return (EROFS);
1872 #if 0
1873 	/* allow removals, except user to also bulkfree */
1874 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1875 		return (ENOSPC);
1876 #endif
1877 
1878 	hammer2_pfs_memory_wait(dip->pmp);
1879 	hammer2_trans_init(dip->pmp, 0);
1880 	hammer2_inode_lock(dip, 0);
1881 
1882 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1883 
1884 	ncp = ap->a_nch->ncp;
1885 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1886 	isopen = cache_isopen(ap->a_nch);
1887 	xop->isdir = 1;
1888 	xop->dopermanent = 0;
1889 	hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1890 
1891 	/*
1892 	 * Collect the real inode and adjust nlinks, destroy the real
1893 	 * inode if nlinks transitions to 0 and it was the real inode
1894 	 * (else it has already been removed).
1895 	 */
1896 	error = hammer2_xop_collect(&xop->head, 0);
1897 	error = hammer2_error_to_errno(error);
1898 	hammer2_inode_unlock(dip);
1899 
1900 	if (error == 0) {
1901 		ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1902 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1903 		if (ip) {
1904 			hammer2_inode_unlink_finisher(ip, isopen);
1905 			hammer2_inode_unlock(ip);
1906 		}
1907 	} else {
1908 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1909 	}
1910 
1911 	/*
1912 	 * Update dip's mtime
1913 	 */
1914 	if (error == 0) {
1915 		uint64_t mtime;
1916 
1917 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1918 		hammer2_update_time(&mtime);
1919 		hammer2_inode_modify(dip);
1920 		dip->meta.mtime = mtime;
1921 		hammer2_inode_unlock(dip);
1922 	}
1923 
1924 	hammer2_inode_run_sideq(dip->pmp, 0);
1925 	hammer2_trans_done(dip->pmp);
1926 	if (error == 0) {
1927 		cache_unlink(ap->a_nch);
1928 		hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1929 	}
1930 	return (error);
1931 }
1932 
1933 /*
1934  * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1935  */
1936 static
1937 int
1938 hammer2_vop_nrename(struct vop_nrename_args *ap)
1939 {
1940 	struct namecache *fncp;
1941 	struct namecache *tncp;
1942 	hammer2_inode_t *fdip;	/* source directory */
1943 	hammer2_inode_t *tdip;	/* target directory */
1944 	hammer2_inode_t *ip;	/* file being renamed */
1945 	hammer2_inode_t *tip;	/* replaced target during rename or NULL */
1946 	const uint8_t *fname;
1947 	size_t fname_len;
1948 	const uint8_t *tname;
1949 	size_t tname_len;
1950 	int error;
1951 	int update_tdip;
1952 	int update_fdip;
1953 	hammer2_key_t tlhc;
1954 
1955 	if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1956 		return(EXDEV);
1957 	if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1958 		return(EXDEV);
1959 
1960 	fdip = VTOI(ap->a_fdvp);	/* source directory */
1961 	tdip = VTOI(ap->a_tdvp);	/* target directory */
1962 
1963 	if (fdip->pmp->ronly)
1964 		return (EROFS);
1965 	if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1)
1966 		return (ENOSPC);
1967 
1968 	fncp = ap->a_fnch->ncp;		/* entry name in source */
1969 	fname = fncp->nc_name;
1970 	fname_len = fncp->nc_nlen;
1971 
1972 	tncp = ap->a_tnch->ncp;		/* entry name in target */
1973 	tname = tncp->nc_name;
1974 	tname_len = tncp->nc_nlen;
1975 
1976 	hammer2_pfs_memory_wait(tdip->pmp);
1977 	hammer2_trans_init(tdip->pmp, 0);
1978 
1979 	update_tdip = 0;
1980 	update_fdip = 0;
1981 
1982 	ip = VTOI(fncp->nc_vp);
1983 	hammer2_inode_ref(ip);		/* extra ref */
1984 
1985 	/*
1986 	 * Lookup the target name to determine if a directory entry
1987 	 * is being overwritten.  We only hold related inode locks
1988 	 * temporarily, the operating system is expected to protect
1989 	 * against rename races.
1990 	 */
1991 	tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL;
1992 	if (tip)
1993 		hammer2_inode_ref(tip);	/* extra ref */
1994 
1995 	/*
1996 	 * Can return NULL and error == EXDEV if the common parent
1997 	 * crosses a directory with the xlink flag set.
1998 	 *
1999 	 * For now try to avoid deadlocks with a simple pointer address
2000 	 * test.  (tip) can be NULL.
2001 	 */
2002 	error = 0;
2003 	if (fdip <= tdip) {
2004 		hammer2_inode_lock(fdip, 0);
2005 		hammer2_inode_lock(tdip, 0);
2006 	} else {
2007 		hammer2_inode_lock(tdip, 0);
2008 		hammer2_inode_lock(fdip, 0);
2009 	}
2010 	if (tip) {
2011 		if (ip <= tip) {
2012 			hammer2_inode_lock(ip, 0);
2013 			hammer2_inode_lock(tip, 0);
2014 		} else {
2015 			hammer2_inode_lock(tip, 0);
2016 			hammer2_inode_lock(ip, 0);
2017 		}
2018 	} else {
2019 		hammer2_inode_lock(ip, 0);
2020 	}
2021 
2022 #if 0
2023 	/*
2024 	 * Delete the target namespace.
2025 	 *
2026 	 * REMOVED - NOW FOLDED INTO XOP_NRENAME OPERATION
2027 	 */
2028 	{
2029 		hammer2_xop_unlink_t *xop2;
2030 		hammer2_inode_t *tip;
2031 		int isopen;
2032 
2033 		/*
2034 		 * The unlink XOP unlinks the path from the directory and
2035 		 * locates and returns the cluster associated with the real
2036 		 * inode.  We have to handle nlinks here on the frontend.
2037 		 */
2038 		xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2039 		hammer2_xop_setname(&xop2->head, tname, tname_len);
2040 		isopen = cache_isopen(ap->a_tnch);
2041 		xop2->isdir = -1;
2042 		xop2->dopermanent = 0;
2043 		hammer2_xop_start(&xop2->head, hammer2_xop_unlink);
2044 
2045 		/*
2046 		 * Collect the real inode and adjust nlinks, destroy the real
2047 		 * inode if nlinks transitions to 0 and it was the real inode
2048 		 * (else it has already been removed).
2049 		 */
2050 		tnch_error = hammer2_xop_collect(&xop2->head, 0);
2051 		tnch_error = hammer2_error_to_errno(tnch_error);
2052 		/* hammer2_inode_unlock(tdip); */
2053 
2054 		if (tnch_error == 0) {
2055 			tip = hammer2_inode_get(tdip->pmp, NULL,
2056 						&xop2->head.cluster, -1);
2057 			hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2058 			if (tip) {
2059 				hammer2_inode_unlink_finisher(tip, isopen);
2060 				hammer2_inode_unlock(tip);
2061 			}
2062 		} else {
2063 			hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2064 		}
2065 		/* hammer2_inode_lock(tdip, 0); */
2066 
2067 		if (tnch_error && tnch_error != ENOENT) {
2068 			error = tnch_error;
2069 			goto done2;
2070 		}
2071 		update_tdip = 1;
2072 	}
2073 #endif
2074 
2075 	/*
2076 	 * Resolve the collision space for (tdip, tname, tname_len)
2077 	 *
2078 	 * tdip must be held exclusively locked to prevent races since
2079 	 * multiple filenames can end up in the same collision space.
2080 	 */
2081 	{
2082 		hammer2_xop_scanlhc_t *sxop;
2083 		hammer2_tid_t lhcbase;
2084 
2085 		tlhc = hammer2_dirhash(tname, tname_len);
2086 		lhcbase = tlhc;
2087 		sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2088 		sxop->lhc = tlhc;
2089 		hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
2090 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2091 			if (tlhc != sxop->head.cluster.focus->bref.key)
2092 				break;
2093 			++tlhc;
2094 		}
2095 		error = hammer2_error_to_errno(error);
2096 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2097 
2098 		if (error) {
2099 			if (error != ENOENT)
2100 				goto done2;
2101 			++tlhc;
2102 			error = 0;
2103 		}
2104 		if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2105 			error = ENOSPC;
2106 			goto done2;
2107 		}
2108 	}
2109 
2110 	/*
2111 	 * Ready to go, issue the rename to the backend.  Note that meta-data
2112 	 * updates to the related inodes occur separately from the rename
2113 	 * operation.
2114 	 *
2115 	 * NOTE: While it is not necessary to update ip->meta.name*, doing
2116 	 *	 so aids catastrophic recovery and debugging.
2117 	 */
2118 	if (error == 0) {
2119 		hammer2_xop_nrename_t *xop4;
2120 
2121 		xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2122 		xop4->lhc = tlhc;
2123 		xop4->ip_key = ip->meta.name_key;
2124 		hammer2_xop_setip2(&xop4->head, ip);
2125 		hammer2_xop_setip3(&xop4->head, tdip);
2126 		hammer2_xop_setname(&xop4->head, fname, fname_len);
2127 		hammer2_xop_setname2(&xop4->head, tname, tname_len);
2128 		hammer2_xop_start(&xop4->head, hammer2_xop_nrename);
2129 
2130 		error = hammer2_xop_collect(&xop4->head, 0);
2131 		error = hammer2_error_to_errno(error);
2132 		hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2133 
2134 		if (error == ENOENT)
2135 			error = 0;
2136 
2137 		/*
2138 		 * Update inode meta-data.
2139 		 *
2140 		 * WARNING!  The in-memory inode (ip) structure does not
2141 		 *	     maintain a copy of the inode's filename buffer.
2142 		 */
2143 		if (error == 0 &&
2144 		    (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2145 			hammer2_inode_modify(ip);
2146 			ip->meta.name_len = tname_len;
2147 			ip->meta.name_key = tlhc;
2148 		}
2149 		if (error == 0) {
2150 			hammer2_inode_modify(ip);
2151 			ip->meta.iparent = tdip->meta.inum;
2152 		}
2153 		update_fdip = 1;
2154 		update_tdip = 1;
2155 	}
2156 
2157 done2:
2158 	/*
2159 	 * If no error, the backend has replaced the target directory entry.
2160 	 * We must adjust nlinks on the original replace target if it exists.
2161 	 */
2162 	if (error == 0 && tip) {
2163 		int isopen;
2164 
2165 		isopen = cache_isopen(ap->a_tnch);
2166 		hammer2_inode_unlink_finisher(tip, isopen);
2167 	}
2168 
2169 	/*
2170 	 * Update directory mtimes to represent the something changed.
2171 	 */
2172 	if (update_fdip || update_tdip) {
2173 		uint64_t mtime;
2174 
2175 		hammer2_update_time(&mtime);
2176 		if (update_fdip) {
2177 			hammer2_inode_modify(fdip);
2178 			fdip->meta.mtime = mtime;
2179 		}
2180 		if (update_tdip) {
2181 			hammer2_inode_modify(tdip);
2182 			tdip->meta.mtime = mtime;
2183 		}
2184 	}
2185 	if (tip) {
2186 		hammer2_inode_unlock(tip);
2187 		hammer2_inode_drop(tip);
2188 	}
2189 	hammer2_inode_unlock(ip);
2190 	hammer2_inode_unlock(tdip);
2191 	hammer2_inode_unlock(fdip);
2192 	hammer2_inode_drop(ip);
2193 	hammer2_inode_run_sideq(fdip->pmp, 0);
2194 
2195 	hammer2_trans_done(tdip->pmp);
2196 
2197 	/*
2198 	 * Issue the namecache update after unlocking all the internal
2199 	 * hammer2 structures, otherwise we might deadlock.
2200 	 *
2201 	 * WARNING! The target namespace must be updated atomically,
2202 	 *	    and we depend on cache_rename() to handle that for
2203 	 *	    us.  Do not do a separate cache_unlink() because
2204 	 *	    that leaves a small window of opportunity for other
2205 	 *	    threads to allocate the target namespace before we
2206 	 *	    manage to complete our rename.
2207 	 *
2208 	 * WARNING! cache_rename() (and cache_unlink()) will properly
2209 	 *	    set VREF_FINALIZE on any attached vnode.  Do not
2210 	 *	    call cache_setunresolved() manually before-hand as
2211 	 *	    this will prevent the flag from being set later via
2212 	 *	    cache_rename().  If VREF_FINALIZE is not properly set
2213 	 *	    and the inode is no longer in the topology, related
2214 	 *	    chains can remain dirty indefinitely.
2215 	 */
2216 	if (error == 0 && tip) {
2217 		/*cache_unlink(ap->a_tnch); see above */
2218 		/*cache_setunresolved(ap->a_tnch); see above */
2219 	}
2220 	if (error == 0) {
2221 		cache_rename(ap->a_fnch, ap->a_tnch);
2222 		hammer2_knote(ap->a_fdvp, NOTE_WRITE);
2223 		hammer2_knote(ap->a_tdvp, NOTE_WRITE);
2224 		hammer2_knote(fncp->nc_vp, NOTE_RENAME);
2225 	}
2226 
2227 	return (error);
2228 }
2229 
2230 /*
2231  * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2232  */
2233 static
2234 int
2235 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2236 {
2237 	hammer2_inode_t *ip;
2238 	int error;
2239 
2240 	ip = VTOI(ap->a_vp);
2241 
2242 	error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2243 			      ap->a_fflag, ap->a_cred);
2244 	return (error);
2245 }
2246 
2247 static
2248 int
2249 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2250 {
2251 	struct mount *mp;
2252 	hammer2_pfs_t *pmp;
2253 	int rc;
2254 
2255 	switch (ap->a_op) {
2256 	case (MOUNTCTL_SET_EXPORT):
2257 		mp = ap->a_head.a_ops->head.vv_mount;
2258 		pmp = MPTOPMP(mp);
2259 
2260 		if (ap->a_ctllen != sizeof(struct export_args))
2261 			rc = (EINVAL);
2262 		else
2263 			rc = vfs_export(mp, &pmp->export,
2264 					(const struct export_args *)ap->a_ctl);
2265 		break;
2266 	default:
2267 		rc = vop_stdmountctl(ap);
2268 		break;
2269 	}
2270 	return (rc);
2271 }
2272 
2273 /*
2274  * KQFILTER
2275  */
2276 static void filt_hammer2detach(struct knote *kn);
2277 static int filt_hammer2read(struct knote *kn, long hint);
2278 static int filt_hammer2write(struct knote *kn, long hint);
2279 static int filt_hammer2vnode(struct knote *kn, long hint);
2280 
2281 static struct filterops hammer2read_filtops =
2282 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2283 	  NULL, filt_hammer2detach, filt_hammer2read };
2284 static struct filterops hammer2write_filtops =
2285 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2286 	  NULL, filt_hammer2detach, filt_hammer2write };
2287 static struct filterops hammer2vnode_filtops =
2288 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2289 	  NULL, filt_hammer2detach, filt_hammer2vnode };
2290 
2291 static
2292 int
2293 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2294 {
2295 	struct vnode *vp = ap->a_vp;
2296 	struct knote *kn = ap->a_kn;
2297 
2298 	switch (kn->kn_filter) {
2299 	case EVFILT_READ:
2300 		kn->kn_fop = &hammer2read_filtops;
2301 		break;
2302 	case EVFILT_WRITE:
2303 		kn->kn_fop = &hammer2write_filtops;
2304 		break;
2305 	case EVFILT_VNODE:
2306 		kn->kn_fop = &hammer2vnode_filtops;
2307 		break;
2308 	default:
2309 		return (EOPNOTSUPP);
2310 	}
2311 
2312 	kn->kn_hook = (caddr_t)vp;
2313 
2314 	knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2315 
2316 	return(0);
2317 }
2318 
2319 static void
2320 filt_hammer2detach(struct knote *kn)
2321 {
2322 	struct vnode *vp = (void *)kn->kn_hook;
2323 
2324 	knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2325 }
2326 
2327 static int
2328 filt_hammer2read(struct knote *kn, long hint)
2329 {
2330 	struct vnode *vp = (void *)kn->kn_hook;
2331 	hammer2_inode_t *ip = VTOI(vp);
2332 	off_t off;
2333 
2334 	if (hint == NOTE_REVOKE) {
2335 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2336 		return(1);
2337 	}
2338 	off = ip->meta.size - kn->kn_fp->f_offset;
2339 	kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2340 	if (kn->kn_sfflags & NOTE_OLDAPI)
2341 		return(1);
2342 	return (kn->kn_data != 0);
2343 }
2344 
2345 
2346 static int
2347 filt_hammer2write(struct knote *kn, long hint)
2348 {
2349 	if (hint == NOTE_REVOKE)
2350 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2351 	kn->kn_data = 0;
2352 	return (1);
2353 }
2354 
2355 static int
2356 filt_hammer2vnode(struct knote *kn, long hint)
2357 {
2358 	if (kn->kn_sfflags & hint)
2359 		kn->kn_fflags |= hint;
2360 	if (hint == NOTE_REVOKE) {
2361 		kn->kn_flags |= (EV_EOF | EV_NODATA);
2362 		return (1);
2363 	}
2364 	return (kn->kn_fflags != 0);
2365 }
2366 
2367 /*
2368  * FIFO VOPS
2369  */
2370 static
2371 int
2372 hammer2_vop_markatime(struct vop_markatime_args *ap)
2373 {
2374 	hammer2_inode_t *ip;
2375 	struct vnode *vp;
2376 
2377 	vp = ap->a_vp;
2378 	ip = VTOI(vp);
2379 
2380 	if (ip->pmp->ronly)
2381 		return (EROFS);
2382 	return(0);
2383 }
2384 
2385 static
2386 int
2387 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2388 {
2389 	int error;
2390 
2391 	error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2392 	if (error)
2393 		error = hammer2_vop_kqfilter(ap);
2394 	return(error);
2395 }
2396 
2397 /*
2398  * VOPS vector
2399  */
2400 struct vop_ops hammer2_vnode_vops = {
2401 	.vop_default	= vop_defaultop,
2402 	.vop_fsync	= hammer2_vop_fsync,
2403 	.vop_getpages	= vop_stdgetpages,
2404 	.vop_putpages	= vop_stdputpages,
2405 	.vop_access	= hammer2_vop_access,
2406 	.vop_advlock	= hammer2_vop_advlock,
2407 	.vop_close	= hammer2_vop_close,
2408 	.vop_nlink	= hammer2_vop_nlink,
2409 	.vop_ncreate	= hammer2_vop_ncreate,
2410 	.vop_nsymlink	= hammer2_vop_nsymlink,
2411 	.vop_nremove	= hammer2_vop_nremove,
2412 	.vop_nrmdir	= hammer2_vop_nrmdir,
2413 	.vop_nrename	= hammer2_vop_nrename,
2414 	.vop_getattr	= hammer2_vop_getattr,
2415 	.vop_setattr	= hammer2_vop_setattr,
2416 	.vop_readdir	= hammer2_vop_readdir,
2417 	.vop_readlink	= hammer2_vop_readlink,
2418 	.vop_read	= hammer2_vop_read,
2419 	.vop_write	= hammer2_vop_write,
2420 	.vop_open	= hammer2_vop_open,
2421 	.vop_inactive	= hammer2_vop_inactive,
2422 	.vop_reclaim 	= hammer2_vop_reclaim,
2423 	.vop_nresolve	= hammer2_vop_nresolve,
2424 	.vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2425 	.vop_nmkdir 	= hammer2_vop_nmkdir,
2426 	.vop_nmknod 	= hammer2_vop_nmknod,
2427 	.vop_ioctl	= hammer2_vop_ioctl,
2428 	.vop_mountctl	= hammer2_vop_mountctl,
2429 	.vop_bmap	= hammer2_vop_bmap,
2430 	.vop_strategy	= hammer2_vop_strategy,
2431         .vop_kqfilter	= hammer2_vop_kqfilter
2432 };
2433 
2434 struct vop_ops hammer2_spec_vops = {
2435         .vop_default =          vop_defaultop,
2436         .vop_fsync =            hammer2_vop_fsync,
2437         .vop_read =             vop_stdnoread,
2438         .vop_write =            vop_stdnowrite,
2439         .vop_access =           hammer2_vop_access,
2440         .vop_close =            hammer2_vop_close,
2441         .vop_markatime =        hammer2_vop_markatime,
2442         .vop_getattr =          hammer2_vop_getattr,
2443         .vop_inactive =         hammer2_vop_inactive,
2444         .vop_reclaim =          hammer2_vop_reclaim,
2445         .vop_setattr =          hammer2_vop_setattr
2446 };
2447 
2448 struct vop_ops hammer2_fifo_vops = {
2449         .vop_default =          fifo_vnoperate,
2450         .vop_fsync =            hammer2_vop_fsync,
2451 #if 0
2452         .vop_read =             hammer2_vop_fiforead,
2453         .vop_write =            hammer2_vop_fifowrite,
2454 #endif
2455         .vop_access =           hammer2_vop_access,
2456 #if 0
2457         .vop_close =            hammer2_vop_fifoclose,
2458 #endif
2459         .vop_markatime =        hammer2_vop_markatime,
2460         .vop_getattr =          hammer2_vop_getattr,
2461         .vop_inactive =         hammer2_vop_inactive,
2462         .vop_reclaim =          hammer2_vop_reclaim,
2463         .vop_setattr =          hammer2_vop_setattr,
2464         .vop_kqfilter =         hammer2_vop_fifokqfilter
2465 };
2466 
2467