xref: /dragonfly/sys/vfs/hammer2/hammer2_vnops.c (revision 222be9ae)
1 /*
2  * Copyright (c) 2011-2015 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * Kernel Filesystem interface
38  *
39  * NOTE! local ipdata pointers must be reloaded on any modifying operation
40  *	 to the inode as its underlying chain may have changed.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
59 
60 #include "hammer2.h"
61 
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
63 				int seqcount);
64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
65 				int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
68 
69 struct objcache *cache_xops;
70 
71 static __inline
72 void
73 hammer2_knote(struct vnode *vp, int flags)
74 {
75 	if (flags)
76 		KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
77 }
78 
79 /*
80  * Last reference to a vnode is going away but it is still cached.
81  */
82 static
83 int
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
85 {
86 	hammer2_inode_t *ip;
87 	struct vnode *vp;
88 
89 	vp = ap->a_vp;
90 	ip = VTOI(vp);
91 
92 	/*
93 	 * Degenerate case
94 	 */
95 	if (ip == NULL) {
96 		vrecycle(vp);
97 		return (0);
98 	}
99 
100 	/*
101 	 * Check for deleted inodes and recycle immediately on the last
102 	 * release.  Be sure to destroy any left-over buffer cache buffers
103 	 * so we do not waste time trying to flush them.
104 	 *
105 	 * Note that deleting the file block chains under the inode chain
106 	 * would just be a waste of energy, so don't do it.
107 	 *
108 	 * WARNING: nvtruncbuf() can only be safely called without the inode
109 	 *	    lock held due to the way our write thread works.
110 	 */
111 	if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
112 		hammer2_key_t lbase;
113 		int nblksize;
114 
115 		/*
116 		 * Detect updates to the embedded data which may be
117 		 * synchronized by the strategy code.  Simply mark the
118 		 * inode modified so it gets picked up by our normal flush.
119 		 */
120 		nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
121 		nvtruncbuf(vp, 0, nblksize, 0, 0);
122 		vrecycle(vp);
123 	}
124 	return (0);
125 }
126 
127 /*
128  * Reclaim a vnode so that it can be reused; after the inode is
129  * disassociated, the filesystem must manage it alone.
130  */
131 static
132 int
133 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
134 {
135 	hammer2_inode_t *ip;
136 	hammer2_pfs_t *pmp;
137 	struct vnode *vp;
138 
139 	vp = ap->a_vp;
140 	ip = VTOI(vp);
141 	if (ip == NULL) {
142 		return(0);
143 	}
144 	pmp = ip->pmp;
145 
146 	/*
147 	 * The final close of a deleted file or directory marks it for
148 	 * destruction.  The DELETED flag allows the flusher to shortcut
149 	 * any modified blocks still unflushed (that is, just ignore them).
150 	 *
151 	 * HAMMER2 usually does not try to optimize the freemap by returning
152 	 * deleted blocks to it as it does not usually know how many snapshots
153 	 * might be referencing portions of the file/dir.
154 	 */
155 	vp->v_data = NULL;
156 	ip->vp = NULL;
157 
158 	/*
159 	 * NOTE! We do not attempt to flush chains here, flushing is
160 	 *	 really fragile and could also deadlock.
161 	 */
162 	vclrisdirty(vp);
163 
164 	/*
165 	 * A modified inode may require chain synchronization.  This
166 	 * synchronization is usually handled by VOP_SNYC / VOP_FSYNC
167 	 * when vfsync() is called.  However, that requires a vnode.
168 	 *
169 	 * When the vnode is disassociated we must keep track of any modified
170 	 * inode via the sideq so that it is properly flushed.  We cannot
171 	 * safely synchronize the inode from inside the reclaim due to
172 	 * potentially deep locks held as-of when the reclaim occurs.
173 	 * Interactions and potential deadlocks abound.
174 	 */
175 	if ((ip->flags & (HAMMER2_INODE_ISUNLINKED |
176 			  HAMMER2_INODE_MODIFIED |
177 			  HAMMER2_INODE_RESIZED)) &&
178 	    (ip->flags & HAMMER2_INODE_ISDELETED) == 0) {
179 		hammer2_inode_sideq_t *ipul;
180 
181 		ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
182 		ipul->ip = ip;
183 
184 		hammer2_spin_ex(&pmp->list_spin);
185 		if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
186 			/* ref -> sideq */
187 			atomic_set_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
188 			TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
189 			++pmp->sideq_count;
190 			hammer2_spin_unex(&pmp->list_spin);
191 		} else {
192 			hammer2_spin_unex(&pmp->list_spin);
193 			kfree(ipul, pmp->minode);
194 			hammer2_inode_drop(ip);		/* vp ref */
195 		}
196 		/* retain ref from vp for ipul */
197 	} else {
198 		hammer2_inode_drop(ip);			/* vp ref */
199 	}
200 
201 	/*
202 	 * XXX handle background sync when ip dirty, kernel will no longer
203 	 * notify us regarding this inode because there is no longer a
204 	 * vnode attached to it.
205 	 */
206 
207 	return (0);
208 }
209 
210 static
211 int
212 hammer2_vop_fsync(struct vop_fsync_args *ap)
213 {
214 	hammer2_inode_t *ip;
215 	struct vnode *vp;
216 
217 	vp = ap->a_vp;
218 	ip = VTOI(vp);
219 
220 #if 0
221 	/* XXX can't do this yet */
222 	hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH);
223 	vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
224 #endif
225 	hammer2_trans_init(ip->pmp, 0);
226 	vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
227 
228 	/*
229 	 * Calling chain_flush here creates a lot of duplicative
230 	 * COW operations due to non-optimal vnode ordering.
231 	 *
232 	 * Only do it for an actual fsync() syscall.  The other forms
233 	 * which call this function will eventually call chain_flush
234 	 * on the volume root as a catch-all, which is far more optimal.
235 	 */
236 	hammer2_inode_lock(ip, 0);
237 	if (ip->flags & HAMMER2_INODE_MODIFIED)
238 		hammer2_inode_chain_sync(ip);
239 	hammer2_inode_unlock(ip);
240 	hammer2_trans_done(ip->pmp);
241 
242 	return (0);
243 }
244 
245 static
246 int
247 hammer2_vop_access(struct vop_access_args *ap)
248 {
249 	hammer2_inode_t *ip = VTOI(ap->a_vp);
250 	uid_t uid;
251 	gid_t gid;
252 	int error;
253 
254 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
255 	uid = hammer2_to_unix_xid(&ip->meta.uid);
256 	gid = hammer2_to_unix_xid(&ip->meta.gid);
257 	error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
258 	hammer2_inode_unlock(ip);
259 
260 	return (error);
261 }
262 
263 static
264 int
265 hammer2_vop_getattr(struct vop_getattr_args *ap)
266 {
267 	hammer2_pfs_t *pmp;
268 	hammer2_inode_t *ip;
269 	struct vnode *vp;
270 	struct vattr *vap;
271 	hammer2_chain_t *chain;
272 	int i;
273 
274 	vp = ap->a_vp;
275 	vap = ap->a_vap;
276 
277 	ip = VTOI(vp);
278 	pmp = ip->pmp;
279 
280 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
281 
282 	vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
283 	vap->va_fileid = ip->meta.inum;
284 	vap->va_mode = ip->meta.mode;
285 	vap->va_nlink = ip->meta.nlinks;
286 	vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
287 	vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
288 	vap->va_rmajor = 0;
289 	vap->va_rminor = 0;
290 	vap->va_size = ip->meta.size;	/* protected by shared lock */
291 	vap->va_blocksize = HAMMER2_PBUFSIZE;
292 	vap->va_flags = ip->meta.uflags;
293 	hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
294 	hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
295 	hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
296 	vap->va_gen = 1;
297 	vap->va_bytes = 0;
298 	if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
299 		/*
300 		 * Can't really calculate directory use sans the files under
301 		 * it, just assume one block for now.
302 		 */
303 		vap->va_bytes += HAMMER2_INODE_BYTES;
304 	} else {
305 		for (i = 0; i < ip->cluster.nchains; ++i) {
306 			if ((chain = ip->cluster.array[i].chain) != NULL) {
307 				if (vap->va_bytes <
308 				    chain->bref.embed.stats.data_count) {
309 					vap->va_bytes =
310 					    chain->bref.embed.stats.data_count;
311 				}
312 			}
313 		}
314 	}
315 	vap->va_type = hammer2_get_vtype(ip->meta.type);
316 	vap->va_filerev = 0;
317 	vap->va_uid_uuid = ip->meta.uid;
318 	vap->va_gid_uuid = ip->meta.gid;
319 	vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
320 			  VA_FSID_UUID_VALID;
321 
322 	hammer2_inode_unlock(ip);
323 
324 	return (0);
325 }
326 
327 static
328 int
329 hammer2_vop_setattr(struct vop_setattr_args *ap)
330 {
331 	hammer2_inode_t *ip;
332 	struct vnode *vp;
333 	struct vattr *vap;
334 	int error;
335 	int kflags = 0;
336 	uint64_t ctime;
337 
338 	vp = ap->a_vp;
339 	vap = ap->a_vap;
340 	hammer2_update_time(&ctime);
341 
342 	ip = VTOI(vp);
343 
344 	if (ip->pmp->ronly)
345 		return (EROFS);
346 	if (hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1)
347 		return (ENOSPC);
348 
349 	hammer2_pfs_memory_wait(ip->pmp);
350 	hammer2_trans_init(ip->pmp, 0);
351 	hammer2_inode_lock(ip, 0);
352 	error = 0;
353 
354 	if (vap->va_flags != VNOVAL) {
355 		uint32_t flags;
356 
357 		flags = ip->meta.uflags;
358 		error = vop_helper_setattr_flags(&flags, vap->va_flags,
359 				     hammer2_to_unix_xid(&ip->meta.uid),
360 				     ap->a_cred);
361 		if (error == 0) {
362 			if (ip->meta.uflags != flags) {
363 				hammer2_inode_modify(ip);
364 				ip->meta.uflags = flags;
365 				ip->meta.ctime = ctime;
366 				kflags |= NOTE_ATTRIB;
367 			}
368 			if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
369 				error = 0;
370 				goto done;
371 			}
372 		}
373 		goto done;
374 	}
375 	if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
376 		error = EPERM;
377 		goto done;
378 	}
379 	if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
380 		mode_t cur_mode = ip->meta.mode;
381 		uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
382 		gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
383 		uuid_t uuid_uid;
384 		uuid_t uuid_gid;
385 
386 		error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
387 					 ap->a_cred,
388 					 &cur_uid, &cur_gid, &cur_mode);
389 		if (error == 0) {
390 			hammer2_guid_to_uuid(&uuid_uid, cur_uid);
391 			hammer2_guid_to_uuid(&uuid_gid, cur_gid);
392 			if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
393 			    bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
394 			    ip->meta.mode != cur_mode
395 			) {
396 				hammer2_inode_modify(ip);
397 				ip->meta.uid = uuid_uid;
398 				ip->meta.gid = uuid_gid;
399 				ip->meta.mode = cur_mode;
400 				ip->meta.ctime = ctime;
401 			}
402 			kflags |= NOTE_ATTRIB;
403 		}
404 	}
405 
406 	/*
407 	 * Resize the file
408 	 */
409 	if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
410 		switch(vp->v_type) {
411 		case VREG:
412 			if (vap->va_size == ip->meta.size)
413 				break;
414 			if (vap->va_size < ip->meta.size) {
415 				hammer2_mtx_ex(&ip->truncate_lock);
416 				hammer2_truncate_file(ip, vap->va_size);
417 				hammer2_mtx_unlock(&ip->truncate_lock);
418 				kflags |= NOTE_WRITE;
419 			} else {
420 				hammer2_extend_file(ip, vap->va_size);
421 				kflags |= NOTE_WRITE | NOTE_EXTEND;
422 			}
423 			hammer2_inode_modify(ip);
424 			ip->meta.mtime = ctime;
425 			break;
426 		default:
427 			error = EINVAL;
428 			goto done;
429 		}
430 	}
431 #if 0
432 	/* atime not supported */
433 	if (vap->va_atime.tv_sec != VNOVAL) {
434 		hammer2_inode_modify(ip);
435 		ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
436 		kflags |= NOTE_ATTRIB;
437 	}
438 #endif
439 	if (vap->va_mode != (mode_t)VNOVAL) {
440 		mode_t cur_mode = ip->meta.mode;
441 		uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
442 		gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
443 
444 		error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
445 					 cur_uid, cur_gid, &cur_mode);
446 		if (error == 0 && ip->meta.mode != cur_mode) {
447 			hammer2_inode_modify(ip);
448 			ip->meta.mode = cur_mode;
449 			ip->meta.ctime = ctime;
450 			kflags |= NOTE_ATTRIB;
451 		}
452 	}
453 
454 	if (vap->va_mtime.tv_sec != VNOVAL) {
455 		hammer2_inode_modify(ip);
456 		ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
457 		kflags |= NOTE_ATTRIB;
458 	}
459 
460 done:
461 	/*
462 	 * If a truncation occurred we must call inode_fsync() now in order
463 	 * to trim the related data chains, otherwise a later expansion can
464 	 * cause havoc.
465 	 *
466 	 * If an extend occured that changed the DIRECTDATA state, we must
467 	 * call inode_fsync now in order to prepare the inode's indirect
468 	 * block table.
469 	 */
470 	if (ip->flags & HAMMER2_INODE_RESIZED)
471 		hammer2_inode_chain_sync(ip);
472 
473 	/*
474 	 * Cleanup.
475 	 */
476 	hammer2_inode_unlock(ip);
477 	hammer2_trans_done(ip->pmp);
478 	hammer2_knote(ip->vp, kflags);
479 
480 	return (error);
481 }
482 
483 static
484 int
485 hammer2_vop_readdir(struct vop_readdir_args *ap)
486 {
487 	hammer2_xop_readdir_t *xop;
488 	hammer2_blockref_t bref;
489 	hammer2_inode_t *ip;
490 	hammer2_tid_t inum;
491 	hammer2_key_t lkey;
492 	struct uio *uio;
493 	off_t *cookies;
494 	off_t saveoff;
495 	int cookie_index;
496 	int ncookies;
497 	int error;
498 	int eofflag;
499 	int r;
500 
501 	ip = VTOI(ap->a_vp);
502 	uio = ap->a_uio;
503 	saveoff = uio->uio_offset;
504 	eofflag = 0;
505 	error = 0;
506 
507 	/*
508 	 * Setup cookies directory entry cookies if requested
509 	 */
510 	if (ap->a_ncookies) {
511 		ncookies = uio->uio_resid / 16 + 1;
512 		if (ncookies > 1024)
513 			ncookies = 1024;
514 		cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
515 	} else {
516 		ncookies = -1;
517 		cookies = NULL;
518 	}
519 	cookie_index = 0;
520 
521 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
522 
523 	/*
524 	 * Handle artificial entries.  To ensure that only positive 64 bit
525 	 * quantities are returned to userland we always strip off bit 63.
526 	 * The hash code is designed such that codes 0x0000-0x7FFF are not
527 	 * used, allowing us to use these codes for articial entries.
528 	 *
529 	 * Entry 0 is used for '.' and entry 1 is used for '..'.  Do not
530 	 * allow '..' to cross the mount point into (e.g.) the super-root.
531 	 */
532 	if (saveoff == 0) {
533 		inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
534 		r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
535 		if (r)
536 			goto done;
537 		if (cookies)
538 			cookies[cookie_index] = saveoff;
539 		++saveoff;
540 		++cookie_index;
541 		if (cookie_index == ncookies)
542 			goto done;
543 	}
544 
545 	if (saveoff == 1) {
546 		/*
547 		 * Be careful with lockorder when accessing ".."
548 		 *
549 		 * (ip is the current dir. xip is the parent dir).
550 		 */
551 		inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
552 		if (ip != ip->pmp->iroot)
553 			inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
554 		r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
555 		if (r)
556 			goto done;
557 		if (cookies)
558 			cookies[cookie_index] = saveoff;
559 		++saveoff;
560 		++cookie_index;
561 		if (cookie_index == ncookies)
562 			goto done;
563 	}
564 
565 	lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
566 	if (hammer2_debug & 0x0020)
567 		kprintf("readdir: lkey %016jx\n", lkey);
568 	if (error)
569 		goto done;
570 
571 	/*
572 	 * Use XOP for cluster scan.
573 	 *
574 	 * parent is the inode cluster, already locked for us.  Don't
575 	 * double lock shared locks as this will screw up upgrades.
576 	 */
577 	xop = hammer2_xop_alloc(ip, 0);
578 	xop->lkey = lkey;
579 	hammer2_xop_start(&xop->head, hammer2_xop_readdir);
580 
581 	for (;;) {
582 		const hammer2_inode_data_t *ripdata;
583 		const char *dname;
584 		int dtype;
585 
586 		error = hammer2_xop_collect(&xop->head, 0);
587 		error = hammer2_error_to_errno(error);
588 		if (error) {
589 			break;
590 		}
591 		if (cookie_index == ncookies)
592 			break;
593 		if (hammer2_debug & 0x0020)
594 		kprintf("cluster chain %p %p\n",
595 			xop->head.cluster.focus,
596 			(xop->head.cluster.focus ?
597 			 xop->head.cluster.focus->data : (void *)-1));
598 		hammer2_cluster_bref(&xop->head.cluster, &bref);
599 
600 		if (bref.type == HAMMER2_BREF_TYPE_INODE) {
601 			ripdata =
602 			    &hammer2_cluster_rdata(&xop->head.cluster)->ipdata;
603 			dtype = hammer2_get_dtype(ripdata->meta.type);
604 			saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
605 			r = vop_write_dirent(&error, uio,
606 					     ripdata->meta.inum &
607 					      HAMMER2_DIRHASH_USERMSK,
608 					     dtype,
609 					     ripdata->meta.name_len,
610 					     ripdata->filename);
611 			if (r)
612 				break;
613 			if (cookies)
614 				cookies[cookie_index] = saveoff;
615 			++cookie_index;
616 		} else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) {
617 			dtype = hammer2_get_dtype(bref.embed.dirent.type);
618 			saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
619 			if (bref.embed.dirent.namlen <=
620 			    sizeof(bref.check.buf)) {
621 				dname = bref.check.buf;
622 			} else {
623 				dname =
624 				 hammer2_cluster_rdata(&xop->head.cluster)->buf;
625 			}
626 			r = vop_write_dirent(&error, uio,
627 					     bref.embed.dirent.inum,
628 					     dtype,
629 					     bref.embed.dirent.namlen,
630 					     dname);
631 			if (r)
632 				break;
633 			if (cookies)
634 				cookies[cookie_index] = saveoff;
635 			++cookie_index;
636 		} else {
637 			/* XXX chain error */
638 			kprintf("bad chain type readdir %d\n", bref.type);
639 		}
640 	}
641 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
642 	if (error == ENOENT) {
643 		error = 0;
644 		eofflag = 1;
645 		saveoff = (hammer2_key_t)-1;
646 	} else {
647 		saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
648 	}
649 done:
650 	hammer2_inode_unlock(ip);
651 	if (ap->a_eofflag)
652 		*ap->a_eofflag = eofflag;
653 	if (hammer2_debug & 0x0020)
654 		kprintf("readdir: done at %016jx\n", saveoff);
655 	uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
656 	if (error && cookie_index == 0) {
657 		if (cookies) {
658 			kfree(cookies, M_TEMP);
659 			*ap->a_ncookies = 0;
660 			*ap->a_cookies = NULL;
661 		}
662 	} else {
663 		if (cookies) {
664 			*ap->a_ncookies = cookie_index;
665 			*ap->a_cookies = cookies;
666 		}
667 	}
668 	return (error);
669 }
670 
671 /*
672  * hammer2_vop_readlink { vp, uio, cred }
673  */
674 static
675 int
676 hammer2_vop_readlink(struct vop_readlink_args *ap)
677 {
678 	struct vnode *vp;
679 	hammer2_inode_t *ip;
680 	int error;
681 
682 	vp = ap->a_vp;
683 	if (vp->v_type != VLNK)
684 		return (EINVAL);
685 	ip = VTOI(vp);
686 
687 	error = hammer2_read_file(ip, ap->a_uio, 0);
688 	return (error);
689 }
690 
691 static
692 int
693 hammer2_vop_read(struct vop_read_args *ap)
694 {
695 	struct vnode *vp;
696 	hammer2_inode_t *ip;
697 	struct uio *uio;
698 	int error;
699 	int seqcount;
700 	int bigread;
701 
702 	/*
703 	 * Read operations supported on this vnode?
704 	 */
705 	vp = ap->a_vp;
706 	if (vp->v_type != VREG)
707 		return (EINVAL);
708 
709 	/*
710 	 * Misc
711 	 */
712 	ip = VTOI(vp);
713 	uio = ap->a_uio;
714 	error = 0;
715 
716 	seqcount = ap->a_ioflag >> 16;
717 	bigread = (uio->uio_resid > 100 * 1024 * 1024);
718 
719 	error = hammer2_read_file(ip, uio, seqcount);
720 	return (error);
721 }
722 
723 static
724 int
725 hammer2_vop_write(struct vop_write_args *ap)
726 {
727 	hammer2_inode_t *ip;
728 	thread_t td;
729 	struct vnode *vp;
730 	struct uio *uio;
731 	int error;
732 	int seqcount;
733 	int ioflag;
734 
735 	/*
736 	 * Read operations supported on this vnode?
737 	 */
738 	vp = ap->a_vp;
739 	if (vp->v_type != VREG)
740 		return (EINVAL);
741 
742 	/*
743 	 * Misc
744 	 */
745 	ip = VTOI(vp);
746 	ioflag = ap->a_ioflag;
747 	uio = ap->a_uio;
748 	error = 0;
749 	if (ip->pmp->ronly)
750 		return (EROFS);
751 	switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) {
752 	case 2:
753 		return (ENOSPC);
754 	case 1:
755 		ioflag |= IO_DIRECT;	/* semi-synchronous */
756 		/* fall through */
757 	default:
758 		break;
759 	}
760 
761 	seqcount = ioflag >> 16;
762 
763 	/*
764 	 * Check resource limit
765 	 */
766 	if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
767 	    uio->uio_offset + uio->uio_resid >
768 	     td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
769 		lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
770 		return (EFBIG);
771 	}
772 
773 	/*
774 	 * The transaction interlocks against flush initiations
775 	 * (note: but will run concurrently with the actual flush).
776 	 *
777 	 * To avoid deadlocking against the VM system, we must flag any
778 	 * transaction related to the buffer cache or other direct
779 	 * VM page manipulation.
780 	 */
781 	if (uio->uio_segflg == UIO_NOCOPY)
782 		hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE);
783 	else
784 		hammer2_trans_init(ip->pmp, 0);
785 	error = hammer2_write_file(ip, uio, ioflag, seqcount);
786 	hammer2_trans_done(ip->pmp);
787 
788 	return (error);
789 }
790 
791 /*
792  * Perform read operations on a file or symlink given an UNLOCKED
793  * inode and uio.
794  *
795  * The passed ip is not locked.
796  */
797 static
798 int
799 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
800 {
801 	hammer2_off_t size;
802 	struct buf *bp;
803 	int error;
804 
805 	error = 0;
806 
807 	/*
808 	 * UIO read loop.
809 	 *
810 	 * WARNING! Assumes that the kernel interlocks size changes at the
811 	 *	    vnode level.
812 	 */
813 	hammer2_mtx_sh(&ip->lock);
814 	hammer2_mtx_sh(&ip->truncate_lock);
815 	size = ip->meta.size;
816 	hammer2_mtx_unlock(&ip->lock);
817 
818 	while (uio->uio_resid > 0 && uio->uio_offset < size) {
819 		hammer2_key_t lbase;
820 		hammer2_key_t leof;
821 		int lblksize;
822 		int loff;
823 		int n;
824 
825 		lblksize = hammer2_calc_logical(ip, uio->uio_offset,
826 						&lbase, &leof);
827 
828 #if 1
829 		error = cluster_read(ip->vp, leof, lbase, lblksize,
830 				     uio->uio_resid, seqcount * MAXBSIZE,
831 				     &bp);
832 #else
833 		if (uio->uio_segflg == UIO_NOCOPY) {
834 			bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
835 			if (bp->b_flags & B_CACHE) {
836 				int i;
837 				int j = 0;
838 				if (bp->b_xio.xio_npages != 16)
839 					kprintf("NPAGES BAD\n");
840 				for (i = 0; i < bp->b_xio.xio_npages; ++i) {
841 					vm_page_t m;
842 					m = bp->b_xio.xio_pages[i];
843 					if (m == NULL || m->valid == 0) {
844 						kprintf("bp %016jx %016jx pg %d inv",
845 							lbase, leof, i);
846 						if (m)
847 							kprintf("m->object %p/%p", m->object, ip->vp->v_object);
848 						kprintf("\n");
849 						j = 1;
850 					}
851 				}
852 				if (j)
853 					kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
854 			}
855 			bqrelse(bp);
856 		}
857 		error = bread(ip->vp, lbase, lblksize, &bp);
858 #endif
859 		if (error) {
860 			brelse(bp);
861 			break;
862 		}
863 		loff = (int)(uio->uio_offset - lbase);
864 		n = lblksize - loff;
865 		if (n > uio->uio_resid)
866 			n = uio->uio_resid;
867 		if (n > size - uio->uio_offset)
868 			n = (int)(size - uio->uio_offset);
869 		bp->b_flags |= B_AGE;
870 		uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
871 		bqrelse(bp);
872 	}
873 	hammer2_mtx_unlock(&ip->truncate_lock);
874 
875 	return (error);
876 }
877 
878 /*
879  * Write to the file represented by the inode via the logical buffer cache.
880  * The inode may represent a regular file or a symlink.
881  *
882  * The inode must not be locked.
883  */
884 static
885 int
886 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
887 		   int ioflag, int seqcount)
888 {
889 	hammer2_key_t old_eof;
890 	hammer2_key_t new_eof;
891 	struct buf *bp;
892 	int kflags;
893 	int error;
894 	int modified;
895 
896 	/*
897 	 * Setup if append
898 	 *
899 	 * WARNING! Assumes that the kernel interlocks size changes at the
900 	 *	    vnode level.
901 	 */
902 	hammer2_mtx_ex(&ip->lock);
903 	hammer2_mtx_sh(&ip->truncate_lock);
904 	if (ioflag & IO_APPEND)
905 		uio->uio_offset = ip->meta.size;
906 	old_eof = ip->meta.size;
907 
908 	/*
909 	 * Extend the file if necessary.  If the write fails at some point
910 	 * we will truncate it back down to cover as much as we were able
911 	 * to write.
912 	 *
913 	 * Doing this now makes it easier to calculate buffer sizes in
914 	 * the loop.
915 	 */
916 	kflags = 0;
917 	error = 0;
918 	modified = 0;
919 
920 	if (uio->uio_offset + uio->uio_resid > old_eof) {
921 		new_eof = uio->uio_offset + uio->uio_resid;
922 		modified = 1;
923 		hammer2_extend_file(ip, new_eof);
924 		kflags |= NOTE_EXTEND;
925 	} else {
926 		new_eof = old_eof;
927 	}
928 	hammer2_mtx_unlock(&ip->lock);
929 
930 	/*
931 	 * UIO write loop
932 	 */
933 	while (uio->uio_resid > 0) {
934 		hammer2_key_t lbase;
935 		int trivial;
936 		int endofblk;
937 		int lblksize;
938 		int loff;
939 		int n;
940 
941 		/*
942 		 * Don't allow the buffer build to blow out the buffer
943 		 * cache.
944 		 */
945 		if ((ioflag & IO_RECURSE) == 0)
946 			bwillwrite(HAMMER2_PBUFSIZE);
947 
948 		/*
949 		 * This nominally tells us how much we can cluster and
950 		 * what the logical buffer size needs to be.  Currently
951 		 * we don't try to cluster the write and just handle one
952 		 * block at a time.
953 		 */
954 		lblksize = hammer2_calc_logical(ip, uio->uio_offset,
955 						&lbase, NULL);
956 		loff = (int)(uio->uio_offset - lbase);
957 
958 		KKASSERT(lblksize <= 65536);
959 
960 		/*
961 		 * Calculate bytes to copy this transfer and whether the
962 		 * copy completely covers the buffer or not.
963 		 */
964 		trivial = 0;
965 		n = lblksize - loff;
966 		if (n > uio->uio_resid) {
967 			n = uio->uio_resid;
968 			if (loff == lbase && uio->uio_offset + n == new_eof)
969 				trivial = 1;
970 			endofblk = 0;
971 		} else {
972 			if (loff == 0)
973 				trivial = 1;
974 			endofblk = 1;
975 		}
976 		if (lbase >= new_eof)
977 			trivial = 1;
978 
979 		/*
980 		 * Get the buffer
981 		 */
982 		if (uio->uio_segflg == UIO_NOCOPY) {
983 			/*
984 			 * Issuing a write with the same data backing the
985 			 * buffer.  Instantiate the buffer to collect the
986 			 * backing vm pages, then read-in any missing bits.
987 			 *
988 			 * This case is used by vop_stdputpages().
989 			 */
990 			bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
991 			if ((bp->b_flags & B_CACHE) == 0) {
992 				bqrelse(bp);
993 				error = bread(ip->vp, lbase, lblksize, &bp);
994 			}
995 		} else if (trivial) {
996 			/*
997 			 * Even though we are entirely overwriting the buffer
998 			 * we may still have to zero it out to avoid a
999 			 * mmap/write visibility issue.
1000 			 */
1001 			bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0);
1002 			if ((bp->b_flags & B_CACHE) == 0)
1003 				vfs_bio_clrbuf(bp);
1004 		} else {
1005 			/*
1006 			 * Partial overwrite, read in any missing bits then
1007 			 * replace the portion being written.
1008 			 *
1009 			 * (The strategy code will detect zero-fill physical
1010 			 * blocks for this case).
1011 			 */
1012 			error = bread(ip->vp, lbase, lblksize, &bp);
1013 			if (error == 0)
1014 				bheavy(bp);
1015 		}
1016 
1017 		if (error) {
1018 			brelse(bp);
1019 			break;
1020 		}
1021 
1022 		/*
1023 		 * Ok, copy the data in
1024 		 */
1025 		error = uiomovebp(bp, bp->b_data + loff, n, uio);
1026 		kflags |= NOTE_WRITE;
1027 		modified = 1;
1028 		if (error) {
1029 			brelse(bp);
1030 			break;
1031 		}
1032 
1033 		/*
1034 		 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1035 		 *	    with IO_SYNC or IO_ASYNC set.  These writes
1036 		 *	    must be handled as the pageout daemon expects.
1037 		 *
1038 		 * NOTE!    H2 relies on cluster_write() here because it
1039 		 *	    cannot preallocate disk blocks at the logical
1040 		 *	    level due to not knowing what the compression
1041 		 *	    size will be at this time.
1042 		 *
1043 		 *	    We must use cluster_write() here and we depend
1044 		 *	    on the write-behind feature to flush buffers
1045 		 *	    appropriately.  If we let the buffer daemons do
1046 		 *	    it the block allocations will be all over the
1047 		 *	    map.
1048 		 */
1049 		if (ioflag & IO_SYNC) {
1050 			bwrite(bp);
1051 		} else if ((ioflag & IO_DIRECT) && endofblk) {
1052 			bawrite(bp);
1053 		} else if (ioflag & IO_ASYNC) {
1054 			bawrite(bp);
1055 		} else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1056 			bdwrite(bp);
1057 		} else {
1058 #if 1
1059 			bp->b_flags |= B_CLUSTEROK;
1060 			cluster_write(bp, new_eof, lblksize, seqcount);
1061 #else
1062 			bp->b_flags |= B_CLUSTEROK;
1063 			bdwrite(bp);
1064 #endif
1065 		}
1066 	}
1067 
1068 	/*
1069 	 * Cleanup.  If we extended the file EOF but failed to write through
1070 	 * the entire write is a failure and we have to back-up.
1071 	 */
1072 	if (error && new_eof != old_eof) {
1073 		hammer2_mtx_unlock(&ip->truncate_lock);
1074 		hammer2_mtx_ex(&ip->lock);
1075 		hammer2_mtx_ex(&ip->truncate_lock);
1076 		hammer2_truncate_file(ip, old_eof);
1077 		if (ip->flags & HAMMER2_INODE_MODIFIED)
1078 			hammer2_inode_chain_sync(ip);
1079 		hammer2_mtx_unlock(&ip->lock);
1080 	} else if (modified) {
1081 		hammer2_mtx_ex(&ip->lock);
1082 		hammer2_inode_modify(ip);
1083 		hammer2_update_time(&ip->meta.mtime);
1084 		if (ip->flags & HAMMER2_INODE_MODIFIED)
1085 			hammer2_inode_chain_sync(ip);
1086 		hammer2_mtx_unlock(&ip->lock);
1087 		hammer2_knote(ip->vp, kflags);
1088 	}
1089 	hammer2_trans_assert_strategy(ip->pmp);
1090 	hammer2_mtx_unlock(&ip->truncate_lock);
1091 
1092 	return error;
1093 }
1094 
1095 /*
1096  * Truncate the size of a file.  The inode must not be locked.
1097  *
1098  * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1099  * ensure that any on-media data beyond the new file EOF has been destroyed.
1100  *
1101  * WARNING: nvtruncbuf() can only be safely called without the inode lock
1102  *	    held due to the way our write thread works.  If the truncation
1103  *	    occurs in the middle of a buffer, nvtruncbuf() is responsible
1104  *	    for dirtying that buffer and zeroing out trailing bytes.
1105  *
1106  * WARNING! Assumes that the kernel interlocks size changes at the
1107  *	    vnode level.
1108  *
1109  * WARNING! Caller assumes responsibility for removing dead blocks
1110  *	    if INODE_RESIZED is set.
1111  */
1112 static
1113 void
1114 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1115 {
1116 	hammer2_key_t lbase;
1117 	int nblksize;
1118 
1119 	hammer2_mtx_unlock(&ip->lock);
1120 	if (ip->vp) {
1121 		nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1122 		nvtruncbuf(ip->vp, nsize,
1123 			   nblksize, (int)nsize & (nblksize - 1),
1124 			   0);
1125 	}
1126 	hammer2_mtx_ex(&ip->lock);
1127 	KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1128 	ip->osize = ip->meta.size;
1129 	ip->meta.size = nsize;
1130 	atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1131 	hammer2_inode_modify(ip);
1132 }
1133 
1134 /*
1135  * Extend the size of a file.  The inode must not be locked.
1136  *
1137  * Even though the file size is changing, we do not have to set the
1138  * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1139  * boundary.  When this occurs a hammer2_inode_chain_sync() is required
1140  * to prepare the inode cluster's indirect block table, otherwise
1141  * async execution of the strategy code will implode on us.
1142  *
1143  * WARNING! Assumes that the kernel interlocks size changes at the
1144  *	    vnode level.
1145  *
1146  * WARNING! Caller assumes responsibility for transitioning out
1147  *	    of the inode DIRECTDATA mode if INODE_RESIZED is set.
1148  */
1149 static
1150 void
1151 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1152 {
1153 	hammer2_key_t lbase;
1154 	hammer2_key_t osize;
1155 	int oblksize;
1156 	int nblksize;
1157 
1158 	KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1159 	hammer2_inode_modify(ip);
1160 	osize = ip->meta.size;
1161 	ip->osize = osize;
1162 	ip->meta.size = nsize;
1163 
1164 	if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1165 		atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1166 		hammer2_inode_chain_sync(ip);
1167 	}
1168 
1169 	hammer2_mtx_unlock(&ip->lock);
1170 	if (ip->vp) {
1171 		oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1172 		nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1173 		nvextendbuf(ip->vp,
1174 			    osize, nsize,
1175 			    oblksize, nblksize,
1176 			    -1, -1, 0);
1177 	}
1178 	hammer2_mtx_ex(&ip->lock);
1179 }
1180 
1181 static
1182 int
1183 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1184 {
1185 	hammer2_xop_nresolve_t *xop;
1186 	hammer2_inode_t *ip;
1187 	hammer2_inode_t *dip;
1188 	struct namecache *ncp;
1189 	struct vnode *vp;
1190 	int error;
1191 
1192 	dip = VTOI(ap->a_dvp);
1193 	xop = hammer2_xop_alloc(dip, 0);
1194 
1195 	ncp = ap->a_nch->ncp;
1196 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1197 
1198 	/*
1199 	 * Note: In DragonFly the kernel handles '.' and '..'.
1200 	 */
1201 	hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1202 	hammer2_xop_start(&xop->head, hammer2_xop_nresolve);
1203 
1204 	error = hammer2_xop_collect(&xop->head, 0);
1205 	error = hammer2_error_to_errno(error);
1206 	if (error) {
1207 		ip = NULL;
1208 	} else {
1209 		ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1210 	}
1211 	hammer2_inode_unlock(dip);
1212 
1213 	/*
1214 	 * Acquire the related vnode
1215 	 *
1216 	 * NOTE: For error processing, only ENOENT resolves the namecache
1217 	 *	 entry to NULL, otherwise we just return the error and
1218 	 *	 leave the namecache unresolved.
1219 	 *
1220 	 * NOTE: multiple hammer2_inode structures can be aliased to the
1221 	 *	 same chain element, for example for hardlinks.  This
1222 	 *	 use case does not 'reattach' inode associations that
1223 	 *	 might already exist, but always allocates a new one.
1224 	 *
1225 	 * WARNING: inode structure is locked exclusively via inode_get
1226 	 *	    but chain was locked shared.  inode_unlock()
1227 	 *	    will handle it properly.
1228 	 */
1229 	if (ip) {
1230 		vp = hammer2_igetv(ip, &error);	/* error set to UNIX error */
1231 		if (error == 0) {
1232 			vn_unlock(vp);
1233 			cache_setvp(ap->a_nch, vp);
1234 		} else if (error == ENOENT) {
1235 			cache_setvp(ap->a_nch, NULL);
1236 		}
1237 		hammer2_inode_unlock(ip);
1238 
1239 		/*
1240 		 * The vp should not be released until after we've disposed
1241 		 * of our locks, because it might cause vop_inactive() to
1242 		 * be called.
1243 		 */
1244 		if (vp)
1245 			vrele(vp);
1246 	} else {
1247 		error = ENOENT;
1248 		cache_setvp(ap->a_nch, NULL);
1249 	}
1250 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1251 	KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1252 		("resolve error %d/%p ap %p\n",
1253 		 error, ap->a_nch->ncp->nc_vp, ap));
1254 
1255 	return error;
1256 }
1257 
1258 static
1259 int
1260 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1261 {
1262 	hammer2_inode_t *dip;
1263 	hammer2_tid_t inum;
1264 	int error;
1265 
1266 	dip = VTOI(ap->a_dvp);
1267 	inum = dip->meta.iparent;
1268 	*ap->a_vpp = NULL;
1269 
1270 	if (inum) {
1271 		error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1272 					 inum, ap->a_vpp);
1273 	} else {
1274 		error = ENOENT;
1275 	}
1276 	return error;
1277 }
1278 
1279 static
1280 int
1281 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1282 {
1283 	hammer2_inode_t *dip;
1284 	hammer2_inode_t *nip;
1285 	struct namecache *ncp;
1286 	const uint8_t *name;
1287 	size_t name_len;
1288 	hammer2_tid_t inum;
1289 	int error;
1290 
1291 	dip = VTOI(ap->a_dvp);
1292 	if (dip->pmp->ronly)
1293 		return (EROFS);
1294 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1295 		return (ENOSPC);
1296 
1297 	ncp = ap->a_nch->ncp;
1298 	name = ncp->nc_name;
1299 	name_len = ncp->nc_nlen;
1300 
1301 	hammer2_pfs_memory_wait(dip->pmp);
1302 	hammer2_trans_init(dip->pmp, 0);
1303 
1304 	inum = hammer2_trans_newinum(dip->pmp);
1305 
1306 	/*
1307 	 * Create the actual inode as a hidden file in the iroot, then
1308 	 * create the directory entry.  The creation of the actual inode
1309 	 * sets its nlinks to 1 which is the value we desire.
1310 	 */
1311 	nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1312 				   NULL, 0, inum,
1313 				   inum, 0, 0,
1314 				   0, &error);
1315 	if (error) {
1316 		error = hammer2_error_to_errno(error);
1317 	} else {
1318 		error = hammer2_dirent_create(dip, name, name_len,
1319 					      nip->meta.inum, nip->meta.type);
1320 		/* returns UNIX error code */
1321 	}
1322 	if (error) {
1323 		if (nip) {
1324 			hammer2_inode_unlink_finisher(nip, 0);
1325 			hammer2_inode_unlock(nip);
1326 			nip = NULL;
1327 		}
1328 		*ap->a_vpp = NULL;
1329 	} else {
1330 		*ap->a_vpp = hammer2_igetv(nip, &error);
1331 		hammer2_inode_unlock(nip);
1332 	}
1333 
1334 	/*
1335 	 * Update dip's mtime
1336 	 *
1337 	 * We can use a shared inode lock and allow the meta.mtime update
1338 	 * SMP race.  hammer2_inode_modify() is MPSAFE w/a shared lock.
1339 	 */
1340 	if (error == 0) {
1341 		uint64_t mtime;
1342 
1343 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1344 		hammer2_update_time(&mtime);
1345 		hammer2_inode_modify(dip);
1346 		dip->meta.mtime = mtime;
1347 		hammer2_inode_unlock(dip);
1348 	}
1349 
1350 	hammer2_trans_done(dip->pmp);
1351 
1352 	if (error == 0) {
1353 		cache_setunresolved(ap->a_nch);
1354 		cache_setvp(ap->a_nch, *ap->a_vpp);
1355 		hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1356 	}
1357 	return error;
1358 }
1359 
1360 static
1361 int
1362 hammer2_vop_open(struct vop_open_args *ap)
1363 {
1364 	return vop_stdopen(ap);
1365 }
1366 
1367 /*
1368  * hammer2_vop_advlock { vp, id, op, fl, flags }
1369  */
1370 static
1371 int
1372 hammer2_vop_advlock(struct vop_advlock_args *ap)
1373 {
1374 	hammer2_inode_t *ip = VTOI(ap->a_vp);
1375 	hammer2_off_t size;
1376 
1377 	size = ip->meta.size;
1378 	return (lf_advlock(ap, &ip->advlock, size));
1379 }
1380 
1381 static
1382 int
1383 hammer2_vop_close(struct vop_close_args *ap)
1384 {
1385 	return vop_stdclose(ap);
1386 }
1387 
1388 /*
1389  * hammer2_vop_nlink { nch, dvp, vp, cred }
1390  *
1391  * Create a hardlink from (vp) to {dvp, nch}.
1392  */
1393 static
1394 int
1395 hammer2_vop_nlink(struct vop_nlink_args *ap)
1396 {
1397 	hammer2_inode_t *tdip;	/* target directory to create link in */
1398 	hammer2_inode_t *ip;	/* inode we are hardlinking to */
1399 	struct namecache *ncp;
1400 	const uint8_t *name;
1401 	size_t name_len;
1402 	int error;
1403 
1404 	if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1405 		return(EXDEV);
1406 
1407 	tdip = VTOI(ap->a_dvp);
1408 	if (tdip->pmp->ronly)
1409 		return (EROFS);
1410 	if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1)
1411 		return (ENOSPC);
1412 
1413 	ncp = ap->a_nch->ncp;
1414 	name = ncp->nc_name;
1415 	name_len = ncp->nc_nlen;
1416 
1417 	/*
1418 	 * ip represents the file being hardlinked.  The file could be a
1419 	 * normal file or a hardlink target if it has already been hardlinked.
1420 	 * (with the new semantics, it will almost always be a hardlink
1421 	 * target).
1422 	 *
1423 	 * Bump nlinks and potentially also create or move the hardlink
1424 	 * target in the parent directory common to (ip) and (tdip).  The
1425 	 * consolidation code can modify ip->cluster.  The returned cluster
1426 	 * is locked.
1427 	 */
1428 	ip = VTOI(ap->a_vp);
1429 	KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1430 	hammer2_pfs_memory_wait(ip->pmp);
1431 	hammer2_trans_init(ip->pmp, 0);
1432 
1433 	/*
1434 	 * Target should be an indexed inode or there's no way we will ever
1435 	 * be able to find it!
1436 	 */
1437 	KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1438 
1439 	error = 0;
1440 
1441 	/*
1442 	 * Can return NULL and error == EXDEV if the common parent
1443 	 * crosses a directory with the xlink flag set.
1444 	 */
1445 	hammer2_inode_lock(tdip, 0);
1446 	hammer2_inode_lock(ip, 0);
1447 
1448 	/*
1449 	 * Create the directory entry and bump nlinks.
1450 	 */
1451 	if (error == 0) {
1452 		error = hammer2_dirent_create(tdip, name, name_len,
1453 					      ip->meta.inum, ip->meta.type);
1454 		hammer2_inode_modify(ip);
1455 		++ip->meta.nlinks;
1456 	}
1457 	if (error == 0) {
1458 		/*
1459 		 * Update dip's mtime
1460 		 */
1461 		uint64_t mtime;
1462 
1463 		hammer2_update_time(&mtime);
1464 		hammer2_inode_modify(tdip);
1465 		tdip->meta.mtime = mtime;
1466 
1467 		cache_setunresolved(ap->a_nch);
1468 		cache_setvp(ap->a_nch, ap->a_vp);
1469 	}
1470 	hammer2_inode_unlock(ip);
1471 	hammer2_inode_unlock(tdip);
1472 
1473 	hammer2_trans_done(ip->pmp);
1474 	hammer2_knote(ap->a_vp, NOTE_LINK);
1475 	hammer2_knote(ap->a_dvp, NOTE_WRITE);
1476 
1477 	return error;
1478 }
1479 
1480 /*
1481  * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1482  *
1483  * The operating system has already ensured that the directory entry
1484  * does not exist and done all appropriate namespace locking.
1485  */
1486 static
1487 int
1488 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1489 {
1490 	hammer2_inode_t *dip;
1491 	hammer2_inode_t *nip;
1492 	struct namecache *ncp;
1493 	const uint8_t *name;
1494 	size_t name_len;
1495 	hammer2_tid_t inum;
1496 	int error;
1497 
1498 	dip = VTOI(ap->a_dvp);
1499 	if (dip->pmp->ronly)
1500 		return (EROFS);
1501 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1502 		return (ENOSPC);
1503 
1504 	ncp = ap->a_nch->ncp;
1505 	name = ncp->nc_name;
1506 	name_len = ncp->nc_nlen;
1507 	hammer2_pfs_memory_wait(dip->pmp);
1508 	hammer2_trans_init(dip->pmp, 0);
1509 
1510 	inum = hammer2_trans_newinum(dip->pmp);
1511 
1512 	/*
1513 	 * Create the actual inode as a hidden file in the iroot, then
1514 	 * create the directory entry.  The creation of the actual inode
1515 	 * sets its nlinks to 1 which is the value we desire.
1516 	 */
1517 	nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1518 				   NULL, 0, inum,
1519 				   inum, 0, 0,
1520 				   0, &error);
1521 
1522 	if (error) {
1523 		error = hammer2_error_to_errno(error);
1524 	} else {
1525 		error = hammer2_dirent_create(dip, name, name_len,
1526 					      nip->meta.inum, nip->meta.type);
1527 	}
1528 	if (error) {
1529 		if (nip) {
1530 			hammer2_inode_unlink_finisher(nip, 0);
1531 			hammer2_inode_unlock(nip);
1532 			nip = NULL;
1533 		}
1534 		*ap->a_vpp = NULL;
1535 	} else {
1536 		*ap->a_vpp = hammer2_igetv(nip, &error);
1537 		hammer2_inode_unlock(nip);
1538 	}
1539 
1540 	/*
1541 	 * Update dip's mtime
1542 	 */
1543 	if (error == 0) {
1544 		uint64_t mtime;
1545 
1546 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1547 		hammer2_update_time(&mtime);
1548 		hammer2_inode_modify(dip);
1549 		dip->meta.mtime = mtime;
1550 		hammer2_inode_unlock(dip);
1551 	}
1552 
1553 	hammer2_trans_done(dip->pmp);
1554 
1555 	if (error == 0) {
1556 		cache_setunresolved(ap->a_nch);
1557 		cache_setvp(ap->a_nch, *ap->a_vpp);
1558 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1559 	}
1560 	return error;
1561 }
1562 
1563 /*
1564  * Make a device node (typically a fifo)
1565  */
1566 static
1567 int
1568 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1569 {
1570 	hammer2_inode_t *dip;
1571 	hammer2_inode_t *nip;
1572 	struct namecache *ncp;
1573 	const uint8_t *name;
1574 	size_t name_len;
1575 	hammer2_tid_t inum;
1576 	int error;
1577 
1578 	dip = VTOI(ap->a_dvp);
1579 	if (dip->pmp->ronly)
1580 		return (EROFS);
1581 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1582 		return (ENOSPC);
1583 
1584 	ncp = ap->a_nch->ncp;
1585 	name = ncp->nc_name;
1586 	name_len = ncp->nc_nlen;
1587 	hammer2_pfs_memory_wait(dip->pmp);
1588 	hammer2_trans_init(dip->pmp, 0);
1589 
1590 	/*
1591 	 * Create the device inode and then create the directory entry.
1592 	 */
1593 	inum = hammer2_trans_newinum(dip->pmp);
1594 	nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1595 				   NULL, 0, inum,
1596 				   inum, 0, 0,
1597 				   0, &error);
1598 	if (error == 0) {
1599 		error = hammer2_dirent_create(dip, name, name_len,
1600 					      nip->meta.inum, nip->meta.type);
1601 	}
1602 	if (error) {
1603 		if (nip) {
1604 			hammer2_inode_unlink_finisher(nip, 0);
1605 			hammer2_inode_unlock(nip);
1606 			nip = NULL;
1607 		}
1608 		*ap->a_vpp = NULL;
1609 	} else {
1610 		*ap->a_vpp = hammer2_igetv(nip, &error);
1611 		hammer2_inode_unlock(nip);
1612 	}
1613 
1614 	/*
1615 	 * Update dip's mtime
1616 	 */
1617 	if (error == 0) {
1618 		uint64_t mtime;
1619 
1620 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1621 		hammer2_update_time(&mtime);
1622 		hammer2_inode_modify(dip);
1623 		dip->meta.mtime = mtime;
1624 		hammer2_inode_unlock(dip);
1625 	}
1626 
1627 	hammer2_trans_done(dip->pmp);
1628 
1629 	if (error == 0) {
1630 		cache_setunresolved(ap->a_nch);
1631 		cache_setvp(ap->a_nch, *ap->a_vpp);
1632 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1633 	}
1634 	return error;
1635 }
1636 
1637 /*
1638  * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1639  */
1640 static
1641 int
1642 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1643 {
1644 	hammer2_inode_t *dip;
1645 	hammer2_inode_t *nip;
1646 	struct namecache *ncp;
1647 	const uint8_t *name;
1648 	size_t name_len;
1649 	hammer2_tid_t inum;
1650 	int error;
1651 
1652 	dip = VTOI(ap->a_dvp);
1653 	if (dip->pmp->ronly)
1654 		return (EROFS);
1655 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1656 		return (ENOSPC);
1657 
1658 	ncp = ap->a_nch->ncp;
1659 	name = ncp->nc_name;
1660 	name_len = ncp->nc_nlen;
1661 	hammer2_pfs_memory_wait(dip->pmp);
1662 	hammer2_trans_init(dip->pmp, 0);
1663 
1664 	ap->a_vap->va_type = VLNK;	/* enforce type */
1665 
1666 	/*
1667 	 * Create the softlink as an inode and then create the directory
1668 	 * entry.
1669 	 */
1670 	inum = hammer2_trans_newinum(dip->pmp);
1671 
1672 	nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1673 				   NULL, 0, inum,
1674 				   inum, 0, 0,
1675 				   0, &error);
1676 	if (error == 0) {
1677 		error = hammer2_dirent_create(dip, name, name_len,
1678 					      nip->meta.inum, nip->meta.type);
1679 	}
1680 	if (error) {
1681 		if (nip) {
1682 			hammer2_inode_unlink_finisher(nip, 0);
1683 			hammer2_inode_unlock(nip);
1684 			nip = NULL;
1685 		}
1686 		*ap->a_vpp = NULL;
1687 		hammer2_trans_done(dip->pmp);
1688 		return error;
1689 	}
1690 	*ap->a_vpp = hammer2_igetv(nip, &error);
1691 
1692 	/*
1693 	 * Build the softlink (~like file data) and finalize the namecache.
1694 	 */
1695 	if (error == 0) {
1696 		size_t bytes;
1697 		struct uio auio;
1698 		struct iovec aiov;
1699 
1700 		bytes = strlen(ap->a_target);
1701 
1702 		hammer2_inode_unlock(nip);
1703 		bzero(&auio, sizeof(auio));
1704 		bzero(&aiov, sizeof(aiov));
1705 		auio.uio_iov = &aiov;
1706 		auio.uio_segflg = UIO_SYSSPACE;
1707 		auio.uio_rw = UIO_WRITE;
1708 		auio.uio_resid = bytes;
1709 		auio.uio_iovcnt = 1;
1710 		auio.uio_td = curthread;
1711 		aiov.iov_base = ap->a_target;
1712 		aiov.iov_len = bytes;
1713 		error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1714 		/* XXX handle error */
1715 		error = 0;
1716 	} else {
1717 		hammer2_inode_unlock(nip);
1718 	}
1719 
1720 	/*
1721 	 * Update dip's mtime
1722 	 */
1723 	if (error == 0) {
1724 		uint64_t mtime;
1725 
1726 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1727 		hammer2_update_time(&mtime);
1728 		hammer2_inode_modify(dip);
1729 		dip->meta.mtime = mtime;
1730 		hammer2_inode_unlock(dip);
1731 	}
1732 
1733 	hammer2_trans_done(dip->pmp);
1734 
1735 	/*
1736 	 * Finalize namecache
1737 	 */
1738 	if (error == 0) {
1739 		cache_setunresolved(ap->a_nch);
1740 		cache_setvp(ap->a_nch, *ap->a_vpp);
1741 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1742 	}
1743 	return error;
1744 }
1745 
1746 /*
1747  * hammer2_vop_nremove { nch, dvp, cred }
1748  */
1749 static
1750 int
1751 hammer2_vop_nremove(struct vop_nremove_args *ap)
1752 {
1753 	hammer2_xop_unlink_t *xop;
1754 	hammer2_inode_t *dip;
1755 	hammer2_inode_t *ip;
1756 	struct namecache *ncp;
1757 	int error;
1758 	int isopen;
1759 
1760 	dip = VTOI(ap->a_dvp);
1761 	if (dip->pmp->ronly)
1762 		return (EROFS);
1763 #if 0
1764 	/* allow removals, except user to also bulkfree */
1765 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1766 		return (ENOSPC);
1767 #endif
1768 
1769 	ncp = ap->a_nch->ncp;
1770 
1771 	hammer2_pfs_memory_wait(dip->pmp);
1772 	hammer2_trans_init(dip->pmp, 0);
1773 	hammer2_inode_lock(dip, 0);
1774 
1775 	/*
1776 	 * The unlink XOP unlinks the path from the directory and
1777 	 * locates and returns the cluster associated with the real inode.
1778 	 * We have to handle nlinks here on the frontend.
1779 	 */
1780 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1781 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1782 
1783 	/*
1784 	 * The namecache entry is locked so nobody can use this namespace.
1785 	 * Calculate isopen to determine if this namespace has an open vp
1786 	 * associated with it and resolve the vp only if it does.
1787 	 *
1788 	 * We try to avoid resolving the vnode if nobody has it open, but
1789 	 * note that the test is via this namespace only.
1790 	 */
1791 	isopen = cache_isopen(ap->a_nch);
1792 	xop->isdir = 0;
1793 	xop->dopermanent = 0;
1794 	hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1795 
1796 	/*
1797 	 * Collect the real inode and adjust nlinks, destroy the real
1798 	 * inode if nlinks transitions to 0 and it was the real inode
1799 	 * (else it has already been removed).
1800 	 */
1801 	error = hammer2_xop_collect(&xop->head, 0);
1802 	error = hammer2_error_to_errno(error);
1803 	hammer2_inode_unlock(dip);
1804 
1805 	if (error == 0) {
1806 		ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1807 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1808 		if (ip) {
1809 			hammer2_inode_unlink_finisher(ip, isopen);
1810 			hammer2_inode_unlock(ip);
1811 		}
1812 	} else {
1813 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1814 	}
1815 
1816 	/*
1817 	 * Update dip's mtime
1818 	 */
1819 	if (error == 0) {
1820 		uint64_t mtime;
1821 
1822 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1823 		hammer2_update_time(&mtime);
1824 		hammer2_inode_modify(dip);
1825 		dip->meta.mtime = mtime;
1826 		hammer2_inode_unlock(dip);
1827 	}
1828 
1829 	hammer2_inode_run_sideq(dip->pmp, 0);
1830 	hammer2_trans_done(dip->pmp);
1831 	if (error == 0) {
1832 		cache_unlink(ap->a_nch);
1833 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1834 	}
1835 	return (error);
1836 }
1837 
1838 /*
1839  * hammer2_vop_nrmdir { nch, dvp, cred }
1840  */
1841 static
1842 int
1843 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1844 {
1845 	hammer2_xop_unlink_t *xop;
1846 	hammer2_inode_t *dip;
1847 	hammer2_inode_t *ip;
1848 	struct namecache *ncp;
1849 	int isopen;
1850 	int error;
1851 
1852 	dip = VTOI(ap->a_dvp);
1853 	if (dip->pmp->ronly)
1854 		return (EROFS);
1855 #if 0
1856 	/* allow removals, except user to also bulkfree */
1857 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1858 		return (ENOSPC);
1859 #endif
1860 
1861 	hammer2_pfs_memory_wait(dip->pmp);
1862 	hammer2_trans_init(dip->pmp, 0);
1863 	hammer2_inode_lock(dip, 0);
1864 
1865 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1866 
1867 	ncp = ap->a_nch->ncp;
1868 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1869 	isopen = cache_isopen(ap->a_nch);
1870 	xop->isdir = 1;
1871 	xop->dopermanent = 0;
1872 	hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1873 
1874 	/*
1875 	 * Collect the real inode and adjust nlinks, destroy the real
1876 	 * inode if nlinks transitions to 0 and it was the real inode
1877 	 * (else it has already been removed).
1878 	 */
1879 	error = hammer2_xop_collect(&xop->head, 0);
1880 	error = hammer2_error_to_errno(error);
1881 	hammer2_inode_unlock(dip);
1882 
1883 	if (error == 0) {
1884 		ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
1885 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1886 		if (ip) {
1887 			hammer2_inode_unlink_finisher(ip, isopen);
1888 			hammer2_inode_unlock(ip);
1889 		}
1890 	} else {
1891 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1892 	}
1893 
1894 	/*
1895 	 * Update dip's mtime
1896 	 */
1897 	if (error == 0) {
1898 		uint64_t mtime;
1899 
1900 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1901 		hammer2_update_time(&mtime);
1902 		hammer2_inode_modify(dip);
1903 		dip->meta.mtime = mtime;
1904 		hammer2_inode_unlock(dip);
1905 	}
1906 
1907 	hammer2_inode_run_sideq(dip->pmp, 0);
1908 	hammer2_trans_done(dip->pmp);
1909 	if (error == 0) {
1910 		cache_unlink(ap->a_nch);
1911 		hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1912 	}
1913 	return (error);
1914 }
1915 
1916 /*
1917  * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
1918  */
1919 static
1920 int
1921 hammer2_vop_nrename(struct vop_nrename_args *ap)
1922 {
1923 	struct namecache *fncp;
1924 	struct namecache *tncp;
1925 	hammer2_inode_t *fdip;	/* source directory */
1926 	hammer2_inode_t *tdip;	/* target directory */
1927 	hammer2_inode_t *ip;	/* file being renamed */
1928 	hammer2_inode_t *tip;	/* replaced target during rename or NULL */
1929 	const uint8_t *fname;
1930 	size_t fname_len;
1931 	const uint8_t *tname;
1932 	size_t tname_len;
1933 	int error;
1934 	int update_tdip;
1935 	int update_fdip;
1936 	hammer2_key_t tlhc;
1937 
1938 	if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
1939 		return(EXDEV);
1940 	if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
1941 		return(EXDEV);
1942 
1943 	fdip = VTOI(ap->a_fdvp);	/* source directory */
1944 	tdip = VTOI(ap->a_tdvp);	/* target directory */
1945 
1946 	if (fdip->pmp->ronly)
1947 		return (EROFS);
1948 	if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1)
1949 		return (ENOSPC);
1950 
1951 	fncp = ap->a_fnch->ncp;		/* entry name in source */
1952 	fname = fncp->nc_name;
1953 	fname_len = fncp->nc_nlen;
1954 
1955 	tncp = ap->a_tnch->ncp;		/* entry name in target */
1956 	tname = tncp->nc_name;
1957 	tname_len = tncp->nc_nlen;
1958 
1959 	hammer2_pfs_memory_wait(tdip->pmp);
1960 	hammer2_trans_init(tdip->pmp, 0);
1961 
1962 	update_tdip = 0;
1963 	update_fdip = 0;
1964 
1965 	ip = VTOI(fncp->nc_vp);
1966 	hammer2_inode_ref(ip);		/* extra ref */
1967 
1968 	/*
1969 	 * Lookup the target name to determine if a directory entry
1970 	 * is being overwritten.  We only hold related inode locks
1971 	 * temporarily, the operating system is expected to protect
1972 	 * against rename races.
1973 	 */
1974 	tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL;
1975 	if (tip)
1976 		hammer2_inode_ref(tip);	/* extra ref */
1977 
1978 	/*
1979 	 * Can return NULL and error == EXDEV if the common parent
1980 	 * crosses a directory with the xlink flag set.
1981 	 *
1982 	 * For now try to avoid deadlocks with a simple pointer address
1983 	 * test.  (tip) can be NULL.
1984 	 */
1985 	error = 0;
1986 	if (fdip <= tdip) {
1987 		hammer2_inode_lock(fdip, 0);
1988 		hammer2_inode_lock(tdip, 0);
1989 	} else {
1990 		hammer2_inode_lock(tdip, 0);
1991 		hammer2_inode_lock(fdip, 0);
1992 	}
1993 	if (tip) {
1994 		if (ip <= tip) {
1995 			hammer2_inode_lock(ip, 0);
1996 			hammer2_inode_lock(tip, 0);
1997 		} else {
1998 			hammer2_inode_lock(tip, 0);
1999 			hammer2_inode_lock(ip, 0);
2000 		}
2001 	} else {
2002 		hammer2_inode_lock(ip, 0);
2003 	}
2004 
2005 #if 0
2006 	/*
2007 	 * Delete the target namespace.
2008 	 *
2009 	 * REMOVED - NOW FOLDED INTO XOP_NRENAME OPERATION
2010 	 */
2011 	{
2012 		hammer2_xop_unlink_t *xop2;
2013 		hammer2_inode_t *tip;
2014 		int isopen;
2015 
2016 		/*
2017 		 * The unlink XOP unlinks the path from the directory and
2018 		 * locates and returns the cluster associated with the real
2019 		 * inode.  We have to handle nlinks here on the frontend.
2020 		 */
2021 		xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2022 		hammer2_xop_setname(&xop2->head, tname, tname_len);
2023 		isopen = cache_isopen(ap->a_tnch);
2024 		xop2->isdir = -1;
2025 		xop2->dopermanent = 0;
2026 		hammer2_xop_start(&xop2->head, hammer2_xop_unlink);
2027 
2028 		/*
2029 		 * Collect the real inode and adjust nlinks, destroy the real
2030 		 * inode if nlinks transitions to 0 and it was the real inode
2031 		 * (else it has already been removed).
2032 		 */
2033 		tnch_error = hammer2_xop_collect(&xop2->head, 0);
2034 		tnch_error = hammer2_error_to_errno(tnch_error);
2035 		/* hammer2_inode_unlock(tdip); */
2036 
2037 		if (tnch_error == 0) {
2038 			tip = hammer2_inode_get(tdip->pmp, NULL,
2039 						&xop2->head.cluster, -1);
2040 			hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2041 			if (tip) {
2042 				hammer2_inode_unlink_finisher(tip, isopen);
2043 				hammer2_inode_unlock(tip);
2044 			}
2045 		} else {
2046 			hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2047 		}
2048 		/* hammer2_inode_lock(tdip, 0); */
2049 
2050 		if (tnch_error && tnch_error != ENOENT) {
2051 			error = tnch_error;
2052 			goto done2;
2053 		}
2054 		update_tdip = 1;
2055 	}
2056 #endif
2057 
2058 	/*
2059 	 * Resolve the collision space for (tdip, tname, tname_len)
2060 	 *
2061 	 * tdip must be held exclusively locked to prevent races since
2062 	 * multiple filenames can end up in the same collision space.
2063 	 */
2064 	{
2065 		hammer2_xop_scanlhc_t *sxop;
2066 		hammer2_tid_t lhcbase;
2067 
2068 		tlhc = hammer2_dirhash(tname, tname_len);
2069 		lhcbase = tlhc;
2070 		sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2071 		sxop->lhc = tlhc;
2072 		hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
2073 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2074 			if (tlhc != sxop->head.cluster.focus->bref.key)
2075 				break;
2076 			++tlhc;
2077 		}
2078 		error = hammer2_error_to_errno(error);
2079 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2080 
2081 		if (error) {
2082 			if (error != ENOENT)
2083 				goto done2;
2084 			++tlhc;
2085 			error = 0;
2086 		}
2087 		if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2088 			error = ENOSPC;
2089 			goto done2;
2090 		}
2091 	}
2092 
2093 	/*
2094 	 * Ready to go, issue the rename to the backend.  Note that meta-data
2095 	 * updates to the related inodes occur separately from the rename
2096 	 * operation.
2097 	 *
2098 	 * NOTE: While it is not necessary to update ip->meta.name*, doing
2099 	 *	 so aids catastrophic recovery and debugging.
2100 	 */
2101 	if (error == 0) {
2102 		hammer2_xop_nrename_t *xop4;
2103 
2104 		xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2105 		xop4->lhc = tlhc;
2106 		xop4->ip_key = ip->meta.name_key;
2107 		hammer2_xop_setip2(&xop4->head, ip);
2108 		hammer2_xop_setip3(&xop4->head, tdip);
2109 		hammer2_xop_setname(&xop4->head, fname, fname_len);
2110 		hammer2_xop_setname2(&xop4->head, tname, tname_len);
2111 		hammer2_xop_start(&xop4->head, hammer2_xop_nrename);
2112 
2113 		error = hammer2_xop_collect(&xop4->head, 0);
2114 		error = hammer2_error_to_errno(error);
2115 		hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2116 
2117 		if (error == ENOENT)
2118 			error = 0;
2119 
2120 		/*
2121 		 * Update inode meta-data.
2122 		 *
2123 		 * WARNING!  The in-memory inode (ip) structure does not
2124 		 *	     maintain a copy of the inode's filename buffer.
2125 		 */
2126 		if (error == 0 &&
2127 		    (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2128 			hammer2_inode_modify(ip);
2129 			ip->meta.name_len = tname_len;
2130 			ip->meta.name_key = tlhc;
2131 		}
2132 		if (error == 0) {
2133 			hammer2_inode_modify(ip);
2134 			ip->meta.iparent = tdip->meta.inum;
2135 		}
2136 		update_fdip = 1;
2137 		update_tdip = 1;
2138 	}
2139 
2140 done2:
2141 	/*
2142 	 * If no error, the backend has replaced the target directory entry.
2143 	 * We must adjust nlinks on the original replace target if it exists.
2144 	 */
2145 	if (error == 0 && tip) {
2146 		int isopen;
2147 
2148 		isopen = cache_isopen(ap->a_tnch);
2149 		hammer2_inode_unlink_finisher(tip, isopen);
2150 	}
2151 
2152 	/*
2153 	 * Update directory mtimes to represent the something changed.
2154 	 */
2155 	if (update_fdip || update_tdip) {
2156 		uint64_t mtime;
2157 
2158 		hammer2_update_time(&mtime);
2159 		if (update_fdip) {
2160 			hammer2_inode_modify(fdip);
2161 			fdip->meta.mtime = mtime;
2162 		}
2163 		if (update_tdip) {
2164 			hammer2_inode_modify(tdip);
2165 			tdip->meta.mtime = mtime;
2166 		}
2167 	}
2168 	if (tip) {
2169 		hammer2_inode_unlock(tip);
2170 		hammer2_inode_drop(tip);
2171 	}
2172 	hammer2_inode_unlock(ip);
2173 	hammer2_inode_unlock(tdip);
2174 	hammer2_inode_unlock(fdip);
2175 	hammer2_inode_drop(ip);
2176 	hammer2_inode_run_sideq(fdip->pmp, 0);
2177 
2178 	hammer2_trans_done(tdip->pmp);
2179 
2180 	/*
2181 	 * Issue the namecache update after unlocking all the internal
2182 	 * hammer structures, otherwise we might deadlock.
2183 	 */
2184 	if (error == 0 && tip) {
2185 		cache_unlink(ap->a_tnch);
2186 		cache_setunresolved(ap->a_tnch);
2187 	}
2188 	if (error == 0) {
2189 		cache_rename(ap->a_fnch, ap->a_tnch);
2190 		hammer2_knote(ap->a_fdvp, NOTE_WRITE);
2191 		hammer2_knote(ap->a_tdvp, NOTE_WRITE);
2192 		hammer2_knote(fncp->nc_vp, NOTE_RENAME);
2193 	}
2194 
2195 	return (error);
2196 }
2197 
2198 /*
2199  * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2200  */
2201 static
2202 int
2203 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2204 {
2205 	hammer2_inode_t *ip;
2206 	int error;
2207 
2208 	ip = VTOI(ap->a_vp);
2209 
2210 	error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2211 			      ap->a_fflag, ap->a_cred);
2212 	return (error);
2213 }
2214 
2215 static
2216 int
2217 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2218 {
2219 	struct mount *mp;
2220 	hammer2_pfs_t *pmp;
2221 	int rc;
2222 
2223 	switch (ap->a_op) {
2224 	case (MOUNTCTL_SET_EXPORT):
2225 		mp = ap->a_head.a_ops->head.vv_mount;
2226 		pmp = MPTOPMP(mp);
2227 
2228 		if (ap->a_ctllen != sizeof(struct export_args))
2229 			rc = (EINVAL);
2230 		else
2231 			rc = vfs_export(mp, &pmp->export,
2232 					(const struct export_args *)ap->a_ctl);
2233 		break;
2234 	default:
2235 		rc = vop_stdmountctl(ap);
2236 		break;
2237 	}
2238 	return (rc);
2239 }
2240 
2241 /*
2242  * KQFILTER
2243  */
2244 static void filt_hammer2detach(struct knote *kn);
2245 static int filt_hammer2read(struct knote *kn, long hint);
2246 static int filt_hammer2write(struct knote *kn, long hint);
2247 static int filt_hammer2vnode(struct knote *kn, long hint);
2248 
2249 static struct filterops hammer2read_filtops =
2250 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2251 	  NULL, filt_hammer2detach, filt_hammer2read };
2252 static struct filterops hammer2write_filtops =
2253 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2254 	  NULL, filt_hammer2detach, filt_hammer2write };
2255 static struct filterops hammer2vnode_filtops =
2256 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2257 	  NULL, filt_hammer2detach, filt_hammer2vnode };
2258 
2259 static
2260 int
2261 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2262 {
2263 	struct vnode *vp = ap->a_vp;
2264 	struct knote *kn = ap->a_kn;
2265 
2266 	switch (kn->kn_filter) {
2267 	case EVFILT_READ:
2268 		kn->kn_fop = &hammer2read_filtops;
2269 		break;
2270 	case EVFILT_WRITE:
2271 		kn->kn_fop = &hammer2write_filtops;
2272 		break;
2273 	case EVFILT_VNODE:
2274 		kn->kn_fop = &hammer2vnode_filtops;
2275 		break;
2276 	default:
2277 		return (EOPNOTSUPP);
2278 	}
2279 
2280 	kn->kn_hook = (caddr_t)vp;
2281 
2282 	knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2283 
2284 	return(0);
2285 }
2286 
2287 static void
2288 filt_hammer2detach(struct knote *kn)
2289 {
2290 	struct vnode *vp = (void *)kn->kn_hook;
2291 
2292 	knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2293 }
2294 
2295 static int
2296 filt_hammer2read(struct knote *kn, long hint)
2297 {
2298 	struct vnode *vp = (void *)kn->kn_hook;
2299 	hammer2_inode_t *ip = VTOI(vp);
2300 	off_t off;
2301 
2302 	if (hint == NOTE_REVOKE) {
2303 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2304 		return(1);
2305 	}
2306 	off = ip->meta.size - kn->kn_fp->f_offset;
2307 	kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2308 	if (kn->kn_sfflags & NOTE_OLDAPI)
2309 		return(1);
2310 	return (kn->kn_data != 0);
2311 }
2312 
2313 
2314 static int
2315 filt_hammer2write(struct knote *kn, long hint)
2316 {
2317 	if (hint == NOTE_REVOKE)
2318 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2319 	kn->kn_data = 0;
2320 	return (1);
2321 }
2322 
2323 static int
2324 filt_hammer2vnode(struct knote *kn, long hint)
2325 {
2326 	if (kn->kn_sfflags & hint)
2327 		kn->kn_fflags |= hint;
2328 	if (hint == NOTE_REVOKE) {
2329 		kn->kn_flags |= (EV_EOF | EV_NODATA);
2330 		return (1);
2331 	}
2332 	return (kn->kn_fflags != 0);
2333 }
2334 
2335 /*
2336  * FIFO VOPS
2337  */
2338 static
2339 int
2340 hammer2_vop_markatime(struct vop_markatime_args *ap)
2341 {
2342 	hammer2_inode_t *ip;
2343 	struct vnode *vp;
2344 
2345 	vp = ap->a_vp;
2346 	ip = VTOI(vp);
2347 
2348 	if (ip->pmp->ronly)
2349 		return (EROFS);
2350 	return(0);
2351 }
2352 
2353 static
2354 int
2355 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2356 {
2357 	int error;
2358 
2359 	error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2360 	if (error)
2361 		error = hammer2_vop_kqfilter(ap);
2362 	return(error);
2363 }
2364 
2365 /*
2366  * VOPS vector
2367  */
2368 struct vop_ops hammer2_vnode_vops = {
2369 	.vop_default	= vop_defaultop,
2370 	.vop_fsync	= hammer2_vop_fsync,
2371 	.vop_getpages	= vop_stdgetpages,
2372 	.vop_putpages	= vop_stdputpages,
2373 	.vop_access	= hammer2_vop_access,
2374 	.vop_advlock	= hammer2_vop_advlock,
2375 	.vop_close	= hammer2_vop_close,
2376 	.vop_nlink	= hammer2_vop_nlink,
2377 	.vop_ncreate	= hammer2_vop_ncreate,
2378 	.vop_nsymlink	= hammer2_vop_nsymlink,
2379 	.vop_nremove	= hammer2_vop_nremove,
2380 	.vop_nrmdir	= hammer2_vop_nrmdir,
2381 	.vop_nrename	= hammer2_vop_nrename,
2382 	.vop_getattr	= hammer2_vop_getattr,
2383 	.vop_setattr	= hammer2_vop_setattr,
2384 	.vop_readdir	= hammer2_vop_readdir,
2385 	.vop_readlink	= hammer2_vop_readlink,
2386 	.vop_getpages	= vop_stdgetpages,
2387 	.vop_putpages	= vop_stdputpages,
2388 	.vop_read	= hammer2_vop_read,
2389 	.vop_write	= hammer2_vop_write,
2390 	.vop_open	= hammer2_vop_open,
2391 	.vop_inactive	= hammer2_vop_inactive,
2392 	.vop_reclaim 	= hammer2_vop_reclaim,
2393 	.vop_nresolve	= hammer2_vop_nresolve,
2394 	.vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2395 	.vop_nmkdir 	= hammer2_vop_nmkdir,
2396 	.vop_nmknod 	= hammer2_vop_nmknod,
2397 	.vop_ioctl	= hammer2_vop_ioctl,
2398 	.vop_mountctl	= hammer2_vop_mountctl,
2399 	.vop_bmap	= hammer2_vop_bmap,
2400 	.vop_strategy	= hammer2_vop_strategy,
2401         .vop_kqfilter	= hammer2_vop_kqfilter
2402 };
2403 
2404 struct vop_ops hammer2_spec_vops = {
2405         .vop_default =          vop_defaultop,
2406         .vop_fsync =            hammer2_vop_fsync,
2407         .vop_read =             vop_stdnoread,
2408         .vop_write =            vop_stdnowrite,
2409         .vop_access =           hammer2_vop_access,
2410         .vop_close =            hammer2_vop_close,
2411         .vop_markatime =        hammer2_vop_markatime,
2412         .vop_getattr =          hammer2_vop_getattr,
2413         .vop_inactive =         hammer2_vop_inactive,
2414         .vop_reclaim =          hammer2_vop_reclaim,
2415         .vop_setattr =          hammer2_vop_setattr
2416 };
2417 
2418 struct vop_ops hammer2_fifo_vops = {
2419         .vop_default =          fifo_vnoperate,
2420         .vop_fsync =            hammer2_vop_fsync,
2421 #if 0
2422         .vop_read =             hammer2_vop_fiforead,
2423         .vop_write =            hammer2_vop_fifowrite,
2424 #endif
2425         .vop_access =           hammer2_vop_access,
2426 #if 0
2427         .vop_close =            hammer2_vop_fifoclose,
2428 #endif
2429         .vop_markatime =        hammer2_vop_markatime,
2430         .vop_getattr =          hammer2_vop_getattr,
2431         .vop_inactive =         hammer2_vop_inactive,
2432         .vop_reclaim =          hammer2_vop_reclaim,
2433         .vop_setattr =          hammer2_vop_setattr,
2434         .vop_kqfilter =         hammer2_vop_fifokqfilter
2435 };
2436 
2437