xref: /dragonfly/sys/vfs/hammer2/hammer2_vnops.c (revision a42bad2d)
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * Kernel Filesystem interface
38  *
39  * NOTE! local ipdata pointers must be reloaded on any modifying operation
40  *	 to the inode as its underlying chain may have changed.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/namei.h>
50 #include <sys/mount.h>
51 #include <sys/vnode.h>
52 #include <sys/mountctl.h>
53 #include <sys/dirent.h>
54 #include <sys/uio.h>
55 #include <sys/objcache.h>
56 #include <sys/event.h>
57 #include <sys/file.h>
58 #include <vfs/fifofs/fifo.h>
59 
60 #include "hammer2.h"
61 
62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
63 				int seqcount);
64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
65 				int ioflag, int seqcount);
66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
68 
69 struct objcache *cache_xops;
70 
71 static __inline
72 void
73 hammer2_knote(struct vnode *vp, int flags)
74 {
75 	if (flags)
76 		KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags);
77 }
78 
79 /*
80  * Last reference to a vnode is going away but it is still cached.
81  */
82 static
83 int
84 hammer2_vop_inactive(struct vop_inactive_args *ap)
85 {
86 	hammer2_inode_t *ip;
87 	struct vnode *vp;
88 
89 	vp = ap->a_vp;
90 	ip = VTOI(vp);
91 
92 	/*
93 	 * Degenerate case
94 	 */
95 	if (ip == NULL) {
96 		vrecycle(vp);
97 		return (0);
98 	}
99 
100 	/*
101 	 * Check for deleted inodes and recycle immediately on the last
102 	 * release.  Be sure to destroy any left-over buffer cache buffers
103 	 * so we do not waste time trying to flush them.
104 	 *
105 	 * Note that deleting the file block chains under the inode chain
106 	 * would just be a waste of energy, so don't do it.
107 	 *
108 	 * WARNING: nvtruncbuf() can only be safely called without the inode
109 	 *	    lock held due to the way our write thread works.
110 	 */
111 	if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
112 		hammer2_key_t lbase;
113 		int nblksize;
114 
115 		/*
116 		 * Detect updates to the embedded data which may be
117 		 * synchronized by the strategy code.  Simply mark the
118 		 * inode modified so it gets picked up by our normal flush.
119 		 */
120 		nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
121 		nvtruncbuf(vp, 0, nblksize, 0, 0);
122 		vrecycle(vp);
123 	}
124 	return (0);
125 }
126 
127 /*
128  * Reclaim a vnode so that it can be reused; after the inode is
129  * disassociated, the filesystem must manage it alone.
130  */
131 static
132 int
133 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
134 {
135 	hammer2_inode_t *ip;
136 	hammer2_pfs_t *pmp;
137 	struct vnode *vp;
138 
139 	vp = ap->a_vp;
140 	ip = VTOI(vp);
141 	if (ip == NULL) {
142 		return(0);
143 	}
144 	pmp = ip->pmp;
145 
146 	/*
147 	 * The final close of a deleted file or directory marks it for
148 	 * destruction.  The DELETED flag allows the flusher to shortcut
149 	 * any modified blocks still unflushed (that is, just ignore them).
150 	 *
151 	 * HAMMER2 usually does not try to optimize the freemap by returning
152 	 * deleted blocks to it as it does not usually know how many snapshots
153 	 * might be referencing portions of the file/dir.
154 	 */
155 	vp->v_data = NULL;
156 	ip->vp = NULL;
157 
158 	/*
159 	 * NOTE! We do not attempt to flush chains here, flushing is
160 	 *	 really fragile and could also deadlock.
161 	 */
162 	vclrisdirty(vp);
163 
164 	/*
165 	 * A modified inode may require chain synchronization.  This
166 	 * synchronization is usually handled by VOP_SYNC / VOP_FSYNC
167 	 * when vfsync() is called.  However, that requires a vnode.
168 	 *
169 	 * When the vnode is disassociated we must keep track of any modified
170 	 * inode via the sideq so that it is properly flushed.  We cannot
171 	 * safely synchronize the inode from inside the reclaim due to
172 	 * potentially deep locks held as-of when the reclaim occurs.
173 	 * Interactions and potential deadlocks abound.
174 	 */
175 	if ((ip->flags & (HAMMER2_INODE_ISUNLINKED |
176 			  HAMMER2_INODE_MODIFIED |
177 			  HAMMER2_INODE_RESIZED |
178 			  HAMMER2_INODE_DIRTYDATA)) &&
179 	    (ip->flags & HAMMER2_INODE_ISDELETED) == 0) {
180 		hammer2_inode_sideq_t *ipul;
181 
182 		ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO);
183 		ipul->ip = ip;
184 
185 		hammer2_spin_ex(&pmp->list_spin);
186 		if ((ip->flags & HAMMER2_INODE_ONSIDEQ) == 0) {
187 			/* ref -> sideq */
188 			atomic_set_int(&ip->flags, HAMMER2_INODE_ONSIDEQ);
189 			TAILQ_INSERT_TAIL(&pmp->sideq, ipul, entry);
190 			++pmp->sideq_count;
191 			hammer2_spin_unex(&pmp->list_spin);
192 		} else {
193 			hammer2_spin_unex(&pmp->list_spin);
194 			kfree(ipul, pmp->minode);
195 			hammer2_inode_drop(ip);		/* vp ref */
196 		}
197 		/* retain ref from vp for ipul */
198 	} else {
199 		hammer2_inode_drop(ip);			/* vp ref */
200 	}
201 
202 	/*
203 	 * XXX handle background sync when ip dirty, kernel will no longer
204 	 * notify us regarding this inode because there is no longer a
205 	 * vnode attached to it.
206 	 */
207 
208 	return (0);
209 }
210 
211 /*
212  * Currently this function synchronizes the front-end inode state to the
213  * backend chain topology, then flushes the inode's chain and sub-topology
214  * to backend media.  This function does not flush the root topology down to
215  * the inode.
216  */
217 static
218 int
219 hammer2_vop_fsync(struct vop_fsync_args *ap)
220 {
221 	hammer2_inode_t *ip;
222 	struct vnode *vp;
223 	int error1;
224 	int error2;
225 
226 	vp = ap->a_vp;
227 	ip = VTOI(vp);
228 	error1 = 0;
229 
230 	hammer2_trans_init(ip->pmp, 0);
231 
232 	/*
233 	 * Flush dirty buffers in the file's logical buffer cache.
234 	 * It is best to wait for the strategy code to commit the
235 	 * buffers to the device's backing buffer cache before
236 	 * then trying to flush the inode.
237 	 *
238 	 * This should be quick, but certain inode modifications cached
239 	 * entirely in the hammer2_inode structure may not trigger a
240 	 * buffer read until the flush so the fsync can wind up also
241 	 * doing scattered reads.
242 	 */
243 	vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
244 	bio_track_wait(&vp->v_track_write, 0, 0);
245 
246 	/*
247 	 * Flush any inode changes
248 	 */
249 	hammer2_inode_lock(ip, 0);
250 	if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MODIFIED))
251 		error1 = hammer2_inode_chain_sync(ip);
252 
253 	/*
254 	 * Flush dirty chains related to the inode.
255 	 *
256 	 * NOTE! XXX We do not currently flush to the volume root, ultimately
257 	 *	 we will want to have a shortcut for the flushed inode stored
258 	 *	 in the volume root for recovery purposes.
259 	 */
260 	error2 = hammer2_inode_chain_flush(ip);
261 	if (error2)
262 		error1 = error2;
263 
264 	/*
265 	 * We may be able to clear the vnode dirty flag.  The
266 	 * hammer2_pfs_moderate() code depends on this usually working.
267 	 */
268 	if ((ip->flags & (HAMMER2_INODE_MODIFIED |
269 			  HAMMER2_INODE_RESIZED |
270 			  HAMMER2_INODE_DIRTYDATA)) == 0 &&
271 	    RB_EMPTY(&vp->v_rbdirty_tree) &&
272 	    !bio_track_active(&vp->v_track_write)) {
273 		vclrisdirty(vp);
274 	}
275 	hammer2_inode_unlock(ip);
276 	hammer2_trans_done(ip->pmp, 0);
277 
278 	return (error1);
279 }
280 
281 static
282 int
283 hammer2_vop_access(struct vop_access_args *ap)
284 {
285 	hammer2_inode_t *ip = VTOI(ap->a_vp);
286 	uid_t uid;
287 	gid_t gid;
288 	int error;
289 
290 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
291 	uid = hammer2_to_unix_xid(&ip->meta.uid);
292 	gid = hammer2_to_unix_xid(&ip->meta.gid);
293 	error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags);
294 	hammer2_inode_unlock(ip);
295 
296 	return (error);
297 }
298 
299 static
300 int
301 hammer2_vop_getattr(struct vop_getattr_args *ap)
302 {
303 	hammer2_pfs_t *pmp;
304 	hammer2_inode_t *ip;
305 	struct vnode *vp;
306 	struct vattr *vap;
307 	hammer2_chain_t *chain;
308 	int i;
309 
310 	vp = ap->a_vp;
311 	vap = ap->a_vap;
312 
313 	ip = VTOI(vp);
314 	pmp = ip->pmp;
315 
316 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
317 
318 	vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
319 	vap->va_fileid = ip->meta.inum;
320 	vap->va_mode = ip->meta.mode;
321 	vap->va_nlink = ip->meta.nlinks;
322 	vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
323 	vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
324 	vap->va_rmajor = 0;
325 	vap->va_rminor = 0;
326 	vap->va_size = ip->meta.size;	/* protected by shared lock */
327 	vap->va_blocksize = HAMMER2_PBUFSIZE;
328 	vap->va_flags = ip->meta.uflags;
329 	hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
330 	hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
331 	hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
332 	vap->va_gen = 1;
333 	vap->va_bytes = 0;
334 	if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
335 		/*
336 		 * Can't really calculate directory use sans the files under
337 		 * it, just assume one block for now.
338 		 */
339 		vap->va_bytes += HAMMER2_INODE_BYTES;
340 	} else {
341 		for (i = 0; i < ip->cluster.nchains; ++i) {
342 			if ((chain = ip->cluster.array[i].chain) != NULL) {
343 				if (vap->va_bytes <
344 				    chain->bref.embed.stats.data_count) {
345 					vap->va_bytes =
346 					    chain->bref.embed.stats.data_count;
347 				}
348 			}
349 		}
350 	}
351 	vap->va_type = hammer2_get_vtype(ip->meta.type);
352 	vap->va_filerev = 0;
353 	vap->va_uid_uuid = ip->meta.uid;
354 	vap->va_gid_uuid = ip->meta.gid;
355 	vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
356 			  VA_FSID_UUID_VALID;
357 
358 	hammer2_inode_unlock(ip);
359 
360 	return (0);
361 }
362 
363 static
364 int
365 hammer2_vop_setattr(struct vop_setattr_args *ap)
366 {
367 	hammer2_inode_t *ip;
368 	struct vnode *vp;
369 	struct vattr *vap;
370 	int error;
371 	int kflags = 0;
372 	uint64_t ctime;
373 
374 	vp = ap->a_vp;
375 	vap = ap->a_vap;
376 	hammer2_update_time(&ctime);
377 
378 	ip = VTOI(vp);
379 
380 	if (ip->pmp->ronly)
381 		return (EROFS);
382 	if (hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1)
383 		return (ENOSPC);
384 
385 	hammer2_pfs_memory_wait(ip, 0);
386 	hammer2_trans_init(ip->pmp, 0);
387 	hammer2_inode_lock(ip, 0);
388 	error = 0;
389 
390 	if (vap->va_flags != VNOVAL) {
391 		uint32_t flags;
392 
393 		flags = ip->meta.uflags;
394 		error = vop_helper_setattr_flags(&flags, vap->va_flags,
395 				     hammer2_to_unix_xid(&ip->meta.uid),
396 				     ap->a_cred);
397 		if (error == 0) {
398 			if (ip->meta.uflags != flags) {
399 				hammer2_inode_modify(ip);
400 				ip->meta.uflags = flags;
401 				ip->meta.ctime = ctime;
402 				kflags |= NOTE_ATTRIB;
403 			}
404 			if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
405 				error = 0;
406 				goto done;
407 			}
408 		}
409 		goto done;
410 	}
411 	if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
412 		error = EPERM;
413 		goto done;
414 	}
415 	if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
416 		mode_t cur_mode = ip->meta.mode;
417 		uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
418 		gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
419 		uuid_t uuid_uid;
420 		uuid_t uuid_gid;
421 
422 		error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
423 					 ap->a_cred,
424 					 &cur_uid, &cur_gid, &cur_mode);
425 		if (error == 0) {
426 			hammer2_guid_to_uuid(&uuid_uid, cur_uid);
427 			hammer2_guid_to_uuid(&uuid_gid, cur_gid);
428 			if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
429 			    bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
430 			    ip->meta.mode != cur_mode
431 			) {
432 				hammer2_inode_modify(ip);
433 				ip->meta.uid = uuid_uid;
434 				ip->meta.gid = uuid_gid;
435 				ip->meta.mode = cur_mode;
436 				ip->meta.ctime = ctime;
437 			}
438 			kflags |= NOTE_ATTRIB;
439 		}
440 	}
441 
442 	/*
443 	 * Resize the file
444 	 */
445 	if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
446 		switch(vp->v_type) {
447 		case VREG:
448 			if (vap->va_size == ip->meta.size)
449 				break;
450 			if (vap->va_size < ip->meta.size) {
451 				hammer2_mtx_ex(&ip->truncate_lock);
452 				hammer2_truncate_file(ip, vap->va_size);
453 				hammer2_mtx_unlock(&ip->truncate_lock);
454 				kflags |= NOTE_WRITE;
455 			} else {
456 				hammer2_extend_file(ip, vap->va_size);
457 				kflags |= NOTE_WRITE | NOTE_EXTEND;
458 			}
459 			hammer2_inode_modify(ip);
460 			ip->meta.mtime = ctime;
461 			vclrflags(vp, VLASTWRITETS);
462 			break;
463 		default:
464 			error = EINVAL;
465 			goto done;
466 		}
467 	}
468 #if 0
469 	/* atime not supported */
470 	if (vap->va_atime.tv_sec != VNOVAL) {
471 		hammer2_inode_modify(ip);
472 		ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
473 		kflags |= NOTE_ATTRIB;
474 	}
475 #endif
476 	if (vap->va_mode != (mode_t)VNOVAL) {
477 		mode_t cur_mode = ip->meta.mode;
478 		uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
479 		gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
480 
481 		error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
482 					 cur_uid, cur_gid, &cur_mode);
483 		if (error == 0 && ip->meta.mode != cur_mode) {
484 			hammer2_inode_modify(ip);
485 			ip->meta.mode = cur_mode;
486 			ip->meta.ctime = ctime;
487 			kflags |= NOTE_ATTRIB;
488 		}
489 	}
490 
491 	if (vap->va_mtime.tv_sec != VNOVAL) {
492 		hammer2_inode_modify(ip);
493 		ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
494 		kflags |= NOTE_ATTRIB;
495 		vclrflags(vp, VLASTWRITETS);
496 	}
497 
498 done:
499 	/*
500 	 * If a truncation occurred we must call chain_sync() now in order
501 	 * to trim the related data chains, otherwise a later expansion can
502 	 * cause havoc.
503 	 *
504 	 * If an extend occured that changed the DIRECTDATA state, we must
505 	 * call inode_fsync now in order to prepare the inode's indirect
506 	 * block table.
507 	 *
508 	 * WARNING! This means we are making an adjustment to the inode's
509 	 * chain outside of sync/fsync, and not just to inode->meta, which
510 	 * may result in some consistency issues if a crash were to occur
511 	 * at just the wrong time.
512 	 */
513 	if (ip->flags & HAMMER2_INODE_RESIZED)
514 		hammer2_inode_chain_sync(ip);
515 
516 	/*
517 	 * Cleanup.
518 	 */
519 	hammer2_inode_unlock(ip);
520 	hammer2_trans_done(ip->pmp, 1);
521 	hammer2_knote(ip->vp, kflags);
522 
523 	return (error);
524 }
525 
526 static
527 int
528 hammer2_vop_readdir(struct vop_readdir_args *ap)
529 {
530 	hammer2_xop_readdir_t *xop;
531 	hammer2_blockref_t bref;
532 	hammer2_inode_t *ip;
533 	hammer2_tid_t inum;
534 	hammer2_key_t lkey;
535 	struct uio *uio;
536 	off_t *cookies;
537 	off_t saveoff;
538 	int cookie_index;
539 	int ncookies;
540 	int error;
541 	int eofflag;
542 	int r;
543 
544 	ip = VTOI(ap->a_vp);
545 	uio = ap->a_uio;
546 	saveoff = uio->uio_offset;
547 	eofflag = 0;
548 	error = 0;
549 
550 	/*
551 	 * Setup cookies directory entry cookies if requested
552 	 */
553 	if (ap->a_ncookies) {
554 		ncookies = uio->uio_resid / 16 + 1;
555 		if (ncookies > 1024)
556 			ncookies = 1024;
557 		cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
558 	} else {
559 		ncookies = -1;
560 		cookies = NULL;
561 	}
562 	cookie_index = 0;
563 
564 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
565 
566 	/*
567 	 * Handle artificial entries.  To ensure that only positive 64 bit
568 	 * quantities are returned to userland we always strip off bit 63.
569 	 * The hash code is designed such that codes 0x0000-0x7FFF are not
570 	 * used, allowing us to use these codes for articial entries.
571 	 *
572 	 * Entry 0 is used for '.' and entry 1 is used for '..'.  Do not
573 	 * allow '..' to cross the mount point into (e.g.) the super-root.
574 	 */
575 	if (saveoff == 0) {
576 		inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
577 		r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
578 		if (r)
579 			goto done;
580 		if (cookies)
581 			cookies[cookie_index] = saveoff;
582 		++saveoff;
583 		++cookie_index;
584 		if (cookie_index == ncookies)
585 			goto done;
586 	}
587 
588 	if (saveoff == 1) {
589 		/*
590 		 * Be careful with lockorder when accessing ".."
591 		 *
592 		 * (ip is the current dir. xip is the parent dir).
593 		 */
594 		inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
595 		if (ip != ip->pmp->iroot)
596 			inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
597 		r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
598 		if (r)
599 			goto done;
600 		if (cookies)
601 			cookies[cookie_index] = saveoff;
602 		++saveoff;
603 		++cookie_index;
604 		if (cookie_index == ncookies)
605 			goto done;
606 	}
607 
608 	lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
609 	if (hammer2_debug & 0x0020)
610 		kprintf("readdir: lkey %016jx\n", lkey);
611 	if (error)
612 		goto done;
613 
614 	/*
615 	 * Use XOP for cluster scan.
616 	 *
617 	 * parent is the inode cluster, already locked for us.  Don't
618 	 * double lock shared locks as this will screw up upgrades.
619 	 */
620 	xop = hammer2_xop_alloc(ip, 0);
621 	xop->lkey = lkey;
622 	hammer2_xop_start(&xop->head, hammer2_xop_readdir);
623 
624 	for (;;) {
625 		const hammer2_inode_data_t *ripdata;
626 		const char *dname;
627 		int dtype;
628 
629 		error = hammer2_xop_collect(&xop->head, 0);
630 		error = hammer2_error_to_errno(error);
631 		if (error) {
632 			break;
633 		}
634 		if (cookie_index == ncookies)
635 			break;
636 		if (hammer2_debug & 0x0020)
637 		kprintf("cluster chain %p %p\n",
638 			xop->head.cluster.focus,
639 			(xop->head.cluster.focus ?
640 			 xop->head.cluster.focus->data : (void *)-1));
641 		hammer2_cluster_bref(&xop->head.cluster, &bref);
642 
643 		if (bref.type == HAMMER2_BREF_TYPE_INODE) {
644 			ripdata = &hammer2_xop_gdata(&xop->head)->ipdata;
645 			dtype = hammer2_get_dtype(ripdata->meta.type);
646 			saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
647 			r = vop_write_dirent(&error, uio,
648 					     ripdata->meta.inum &
649 					      HAMMER2_DIRHASH_USERMSK,
650 					     dtype,
651 					     ripdata->meta.name_len,
652 					     ripdata->filename);
653 			hammer2_xop_pdata(&xop->head);
654 			if (r)
655 				break;
656 			if (cookies)
657 				cookies[cookie_index] = saveoff;
658 			++cookie_index;
659 		} else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) {
660 			uint16_t namlen;
661 
662 			dtype = hammer2_get_dtype(bref.embed.dirent.type);
663 			saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
664 			namlen = bref.embed.dirent.namlen;
665 			if (namlen <= sizeof(bref.check.buf)) {
666 				dname = bref.check.buf;
667 			} else {
668 				dname = hammer2_xop_gdata(&xop->head)->buf;
669 			}
670 			r = vop_write_dirent(&error, uio,
671 					     bref.embed.dirent.inum, dtype,
672 					     namlen, dname);
673 			if (namlen > sizeof(bref.check.buf))
674 				hammer2_xop_pdata(&xop->head);
675 			if (r)
676 				break;
677 			if (cookies)
678 				cookies[cookie_index] = saveoff;
679 			++cookie_index;
680 		} else {
681 			/* XXX chain error */
682 			kprintf("bad chain type readdir %d\n", bref.type);
683 		}
684 	}
685 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
686 	if (error == ENOENT) {
687 		error = 0;
688 		eofflag = 1;
689 		saveoff = (hammer2_key_t)-1;
690 	} else {
691 		saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
692 	}
693 done:
694 	hammer2_inode_unlock(ip);
695 	if (ap->a_eofflag)
696 		*ap->a_eofflag = eofflag;
697 	if (hammer2_debug & 0x0020)
698 		kprintf("readdir: done at %016jx\n", saveoff);
699 	uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
700 	if (error && cookie_index == 0) {
701 		if (cookies) {
702 			kfree(cookies, M_TEMP);
703 			*ap->a_ncookies = 0;
704 			*ap->a_cookies = NULL;
705 		}
706 	} else {
707 		if (cookies) {
708 			*ap->a_ncookies = cookie_index;
709 			*ap->a_cookies = cookies;
710 		}
711 	}
712 	return (error);
713 }
714 
715 /*
716  * hammer2_vop_readlink { vp, uio, cred }
717  */
718 static
719 int
720 hammer2_vop_readlink(struct vop_readlink_args *ap)
721 {
722 	struct vnode *vp;
723 	hammer2_inode_t *ip;
724 	int error;
725 
726 	vp = ap->a_vp;
727 	if (vp->v_type != VLNK)
728 		return (EINVAL);
729 	ip = VTOI(vp);
730 
731 	error = hammer2_read_file(ip, ap->a_uio, 0);
732 	return (error);
733 }
734 
735 static
736 int
737 hammer2_vop_read(struct vop_read_args *ap)
738 {
739 	struct vnode *vp;
740 	hammer2_inode_t *ip;
741 	struct uio *uio;
742 	int error;
743 	int seqcount;
744 	int bigread;
745 
746 	/*
747 	 * Read operations supported on this vnode?
748 	 */
749 	vp = ap->a_vp;
750 	if (vp->v_type != VREG)
751 		return (EINVAL);
752 
753 	/*
754 	 * Misc
755 	 */
756 	ip = VTOI(vp);
757 	uio = ap->a_uio;
758 	error = 0;
759 
760 	seqcount = ap->a_ioflag >> 16;
761 	bigread = (uio->uio_resid > 100 * 1024 * 1024);
762 
763 	error = hammer2_read_file(ip, uio, seqcount);
764 	return (error);
765 }
766 
767 static
768 int
769 hammer2_vop_write(struct vop_write_args *ap)
770 {
771 	hammer2_inode_t *ip;
772 	thread_t td;
773 	struct vnode *vp;
774 	struct uio *uio;
775 	int error;
776 	int seqcount;
777 	int ioflag;
778 
779 	/*
780 	 * Read operations supported on this vnode?
781 	 */
782 	vp = ap->a_vp;
783 	if (vp->v_type != VREG)
784 		return (EINVAL);
785 
786 	/*
787 	 * Misc
788 	 */
789 	ip = VTOI(vp);
790 	ioflag = ap->a_ioflag;
791 	uio = ap->a_uio;
792 	error = 0;
793 	if (ip->pmp->ronly)
794 		return (EROFS);
795 	switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) {
796 	case 2:
797 		return (ENOSPC);
798 	case 1:
799 		ioflag |= IO_DIRECT;	/* semi-synchronous */
800 		/* fall through */
801 	default:
802 		break;
803 	}
804 
805 	seqcount = ioflag >> 16;
806 
807 	/*
808 	 * Check resource limit
809 	 */
810 	if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
811 	    uio->uio_offset + uio->uio_resid >
812 	     td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
813 		lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
814 		return (EFBIG);
815 	}
816 
817 	/*
818 	 * The transaction interlocks against flush initiations
819 	 * (note: but will run concurrently with the actual flush).
820 	 *
821 	 * To avoid deadlocking against the VM system, we must flag any
822 	 * transaction related to the buffer cache or other direct
823 	 * VM page manipulation.
824 	 */
825 	if (uio->uio_segflg == UIO_NOCOPY) {
826 		hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE);
827 	} else {
828 		hammer2_pfs_memory_wait(ip, 0);
829 		hammer2_trans_init(ip->pmp, 0);
830 	}
831 	error = hammer2_write_file(ip, uio, ioflag, seqcount);
832 	hammer2_trans_done(ip->pmp, 1);
833 
834 	return (error);
835 }
836 
837 /*
838  * Perform read operations on a file or symlink given an UNLOCKED
839  * inode and uio.
840  *
841  * The passed ip is not locked.
842  */
843 static
844 int
845 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
846 {
847 	hammer2_off_t size;
848 	struct buf *bp;
849 	int error;
850 
851 	error = 0;
852 
853 	/*
854 	 * UIO read loop.
855 	 *
856 	 * WARNING! Assumes that the kernel interlocks size changes at the
857 	 *	    vnode level.
858 	 */
859 	hammer2_mtx_sh(&ip->lock);
860 	hammer2_mtx_sh(&ip->truncate_lock);
861 	size = ip->meta.size;
862 	hammer2_mtx_unlock(&ip->lock);
863 
864 	while (uio->uio_resid > 0 && uio->uio_offset < size) {
865 		hammer2_key_t lbase;
866 		hammer2_key_t leof;
867 		int lblksize;
868 		int loff;
869 		int n;
870 
871 		lblksize = hammer2_calc_logical(ip, uio->uio_offset,
872 						&lbase, &leof);
873 
874 #if 1
875 		bp = NULL;
876 		error = cluster_readx(ip->vp, leof, lbase, lblksize,
877 				      B_NOTMETA | B_KVABIO,
878 				      uio->uio_resid,
879 				      seqcount * MAXBSIZE,
880 				      &bp);
881 #else
882 		if (uio->uio_segflg == UIO_NOCOPY) {
883 			bp = getblk(ip->vp, lbase, lblksize,
884 				    GETBLK_BHEAVY | GETBLK_KVABIO, 0);
885 			if (bp->b_flags & B_CACHE) {
886 				int i;
887 				int j = 0;
888 				if (bp->b_xio.xio_npages != 16)
889 					kprintf("NPAGES BAD\n");
890 				for (i = 0; i < bp->b_xio.xio_npages; ++i) {
891 					vm_page_t m;
892 					m = bp->b_xio.xio_pages[i];
893 					if (m == NULL || m->valid == 0) {
894 						kprintf("bp %016jx %016jx pg %d inv",
895 							lbase, leof, i);
896 						if (m)
897 							kprintf("m->object %p/%p", m->object, ip->vp->v_object);
898 						kprintf("\n");
899 						j = 1;
900 					}
901 				}
902 				if (j)
903 					kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
904 			}
905 			bqrelse(bp);
906 		}
907 		error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
908 #endif
909 		if (error) {
910 			brelse(bp);
911 			break;
912 		}
913 		bkvasync(bp);
914 		loff = (int)(uio->uio_offset - lbase);
915 		n = lblksize - loff;
916 		if (n > uio->uio_resid)
917 			n = uio->uio_resid;
918 		if (n > size - uio->uio_offset)
919 			n = (int)(size - uio->uio_offset);
920 		bp->b_flags |= B_AGE;
921 		uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
922 		bqrelse(bp);
923 	}
924 	hammer2_mtx_unlock(&ip->truncate_lock);
925 
926 	return (error);
927 }
928 
929 /*
930  * Write to the file represented by the inode via the logical buffer cache.
931  * The inode may represent a regular file or a symlink.
932  *
933  * The inode must not be locked.
934  */
935 static
936 int
937 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
938 		   int ioflag, int seqcount)
939 {
940 	hammer2_key_t old_eof;
941 	hammer2_key_t new_eof;
942 	struct buf *bp;
943 	int kflags;
944 	int error;
945 	int modified;
946 
947 	/*
948 	 * Setup if append
949 	 *
950 	 * WARNING! Assumes that the kernel interlocks size changes at the
951 	 *	    vnode level.
952 	 */
953 	hammer2_mtx_ex(&ip->lock);
954 	hammer2_mtx_sh(&ip->truncate_lock);
955 	if (ioflag & IO_APPEND)
956 		uio->uio_offset = ip->meta.size;
957 	old_eof = ip->meta.size;
958 
959 	/*
960 	 * Extend the file if necessary.  If the write fails at some point
961 	 * we will truncate it back down to cover as much as we were able
962 	 * to write.
963 	 *
964 	 * Doing this now makes it easier to calculate buffer sizes in
965 	 * the loop.
966 	 */
967 	kflags = 0;
968 	error = 0;
969 	modified = 0;
970 
971 	if (uio->uio_offset + uio->uio_resid > old_eof) {
972 		new_eof = uio->uio_offset + uio->uio_resid;
973 		modified = 1;
974 		hammer2_extend_file(ip, new_eof);
975 		kflags |= NOTE_EXTEND;
976 	} else {
977 		new_eof = old_eof;
978 	}
979 	hammer2_mtx_unlock(&ip->lock);
980 
981 	/*
982 	 * UIO write loop
983 	 */
984 	while (uio->uio_resid > 0) {
985 		hammer2_key_t lbase;
986 		int trivial;
987 		int endofblk;
988 		int lblksize;
989 		int loff;
990 		int n;
991 
992 		/*
993 		 * Don't allow the buffer build to blow out the buffer
994 		 * cache.
995 		 */
996 		if ((ioflag & IO_RECURSE) == 0)
997 			bwillwrite(HAMMER2_PBUFSIZE);
998 
999 		/*
1000 		 * This nominally tells us how much we can cluster and
1001 		 * what the logical buffer size needs to be.  Currently
1002 		 * we don't try to cluster the write and just handle one
1003 		 * block at a time.
1004 		 */
1005 		lblksize = hammer2_calc_logical(ip, uio->uio_offset,
1006 						&lbase, NULL);
1007 		loff = (int)(uio->uio_offset - lbase);
1008 
1009 		KKASSERT(lblksize <= 65536);
1010 
1011 		/*
1012 		 * Calculate bytes to copy this transfer and whether the
1013 		 * copy completely covers the buffer or not.
1014 		 */
1015 		trivial = 0;
1016 		n = lblksize - loff;
1017 		if (n > uio->uio_resid) {
1018 			n = uio->uio_resid;
1019 			if (loff == lbase && uio->uio_offset + n == new_eof)
1020 				trivial = 1;
1021 			endofblk = 0;
1022 		} else {
1023 			if (loff == 0)
1024 				trivial = 1;
1025 			endofblk = 1;
1026 		}
1027 		if (lbase >= new_eof)
1028 			trivial = 1;
1029 
1030 		/*
1031 		 * Get the buffer
1032 		 */
1033 		if (uio->uio_segflg == UIO_NOCOPY) {
1034 			/*
1035 			 * Issuing a write with the same data backing the
1036 			 * buffer.  Instantiate the buffer to collect the
1037 			 * backing vm pages, then read-in any missing bits.
1038 			 *
1039 			 * This case is used by vop_stdputpages().
1040 			 */
1041 			bp = getblk(ip->vp, lbase, lblksize,
1042 				    GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1043 			if ((bp->b_flags & B_CACHE) == 0) {
1044 				bqrelse(bp);
1045 				error = bread_kvabio(ip->vp, lbase,
1046 						     lblksize, &bp);
1047 			}
1048 		} else if (trivial) {
1049 			/*
1050 			 * Even though we are entirely overwriting the buffer
1051 			 * we may still have to zero it out to avoid a
1052 			 * mmap/write visibility issue.
1053 			 */
1054 			bp = getblk(ip->vp, lbase, lblksize,
1055 				    GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1056 			if ((bp->b_flags & B_CACHE) == 0)
1057 				vfs_bio_clrbuf(bp);
1058 		} else {
1059 			/*
1060 			 * Partial overwrite, read in any missing bits then
1061 			 * replace the portion being written.
1062 			 *
1063 			 * (The strategy code will detect zero-fill physical
1064 			 * blocks for this case).
1065 			 */
1066 			error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
1067 			if (error == 0)
1068 				bheavy(bp);
1069 		}
1070 
1071 		if (error) {
1072 			brelse(bp);
1073 			break;
1074 		}
1075 
1076 		/*
1077 		 * Ok, copy the data in
1078 		 */
1079 		bkvasync(bp);
1080 		error = uiomovebp(bp, bp->b_data + loff, n, uio);
1081 		kflags |= NOTE_WRITE;
1082 		modified = 1;
1083 		if (error) {
1084 			brelse(bp);
1085 			break;
1086 		}
1087 
1088 		/*
1089 		 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1090 		 *	    with IO_SYNC or IO_ASYNC set.  These writes
1091 		 *	    must be handled as the pageout daemon expects.
1092 		 *
1093 		 * NOTE!    H2 relies on cluster_write() here because it
1094 		 *	    cannot preallocate disk blocks at the logical
1095 		 *	    level due to not knowing what the compression
1096 		 *	    size will be at this time.
1097 		 *
1098 		 *	    We must use cluster_write() here and we depend
1099 		 *	    on the write-behind feature to flush buffers
1100 		 *	    appropriately.  If we let the buffer daemons do
1101 		 *	    it the block allocations will be all over the
1102 		 *	    map.
1103 		 */
1104 		if (ioflag & IO_SYNC) {
1105 			bwrite(bp);
1106 		} else if ((ioflag & IO_DIRECT) && endofblk) {
1107 			bawrite(bp);
1108 		} else if (ioflag & IO_ASYNC) {
1109 			bawrite(bp);
1110 		} else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1111 			bdwrite(bp);
1112 		} else {
1113 #if 1
1114 			bp->b_flags |= B_CLUSTEROK;
1115 			cluster_write(bp, new_eof, lblksize, seqcount);
1116 #else
1117 			bp->b_flags |= B_CLUSTEROK;
1118 			bdwrite(bp);
1119 #endif
1120 		}
1121 	}
1122 
1123 	/*
1124 	 * Cleanup.  If we extended the file EOF but failed to write through
1125 	 * the entire write is a failure and we have to back-up.
1126 	 */
1127 	if (error && new_eof != old_eof) {
1128 		hammer2_mtx_unlock(&ip->truncate_lock);
1129 		hammer2_mtx_ex(&ip->lock);
1130 		hammer2_mtx_ex(&ip->truncate_lock);
1131 		hammer2_truncate_file(ip, old_eof);
1132 		if (ip->flags & HAMMER2_INODE_MODIFIED)
1133 			hammer2_inode_chain_sync(ip);
1134 		hammer2_mtx_unlock(&ip->lock);
1135 	} else if (modified) {
1136 		struct vnode *vp = ip->vp;
1137 
1138 		hammer2_mtx_ex(&ip->lock);
1139 		hammer2_inode_modify(ip);
1140 		if (uio->uio_segflg == UIO_NOCOPY) {
1141 			if (vp->v_flag & VLASTWRITETS) {
1142 				ip->meta.mtime =
1143 				    (unsigned long)vp->v_lastwrite_ts.tv_sec *
1144 				    1000000 +
1145 				    vp->v_lastwrite_ts.tv_nsec / 1000;
1146 			}
1147 		} else {
1148 			hammer2_update_time(&ip->meta.mtime);
1149 			vclrflags(vp, VLASTWRITETS);
1150 		}
1151 
1152 #if 0
1153 		/*
1154 		 * REMOVED - handled by hammer2_extend_file().  Do not issue
1155 		 * a chain_sync() outside of a sync/fsync except for DIRECTDATA
1156 		 * state changes.
1157 		 *
1158 		 * Under normal conditions we only issue a chain_sync if
1159 		 * the inode's DIRECTDATA state changed.
1160 		 */
1161 		if (ip->flags & HAMMER2_INODE_RESIZED)
1162 			hammer2_inode_chain_sync(ip);
1163 #endif
1164 		hammer2_mtx_unlock(&ip->lock);
1165 		hammer2_knote(ip->vp, kflags);
1166 	}
1167 	hammer2_trans_assert_strategy(ip->pmp);
1168 	hammer2_mtx_unlock(&ip->truncate_lock);
1169 
1170 	return error;
1171 }
1172 
1173 /*
1174  * Truncate the size of a file.  The inode must not be locked.
1175  *
1176  * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1177  * ensure that any on-media data beyond the new file EOF has been destroyed.
1178  *
1179  * WARNING: nvtruncbuf() can only be safely called without the inode lock
1180  *	    held due to the way our write thread works.  If the truncation
1181  *	    occurs in the middle of a buffer, nvtruncbuf() is responsible
1182  *	    for dirtying that buffer and zeroing out trailing bytes.
1183  *
1184  * WARNING! Assumes that the kernel interlocks size changes at the
1185  *	    vnode level.
1186  *
1187  * WARNING! Caller assumes responsibility for removing dead blocks
1188  *	    if INODE_RESIZED is set.
1189  */
1190 static
1191 void
1192 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1193 {
1194 	hammer2_key_t lbase;
1195 	int nblksize;
1196 
1197 	hammer2_mtx_unlock(&ip->lock);
1198 	if (ip->vp) {
1199 		nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1200 		nvtruncbuf(ip->vp, nsize,
1201 			   nblksize, (int)nsize & (nblksize - 1),
1202 			   0);
1203 	}
1204 	hammer2_mtx_ex(&ip->lock);
1205 	KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1206 	ip->osize = ip->meta.size;
1207 	ip->meta.size = nsize;
1208 	atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1209 	hammer2_inode_modify(ip);
1210 }
1211 
1212 /*
1213  * Extend the size of a file.  The inode must not be locked.
1214  *
1215  * Even though the file size is changing, we do not have to set the
1216  * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1217  * boundary.  When this occurs a hammer2_inode_chain_sync() is required
1218  * to prepare the inode cluster's indirect block table, otherwise
1219  * async execution of the strategy code will implode on us.
1220  *
1221  * WARNING! Assumes that the kernel interlocks size changes at the
1222  *	    vnode level.
1223  *
1224  * WARNING! Caller assumes responsibility for transitioning out
1225  *	    of the inode DIRECTDATA mode if INODE_RESIZED is set.
1226  */
1227 static
1228 void
1229 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1230 {
1231 	hammer2_key_t lbase;
1232 	hammer2_key_t osize;
1233 	int oblksize;
1234 	int nblksize;
1235 
1236 	KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1237 	hammer2_inode_modify(ip);
1238 	osize = ip->meta.size;
1239 	ip->osize = osize;
1240 	ip->meta.size = nsize;
1241 
1242 	/*
1243 	 * We must issue a chain_sync() when the DIRECTDATA state changes
1244 	 * to prevent confusion between the flush code and the in-memory
1245 	 * state.  This is not perfect because we are doing it outside of
1246 	 * a sync/fsync operation, so it might not be fully synchronized
1247 	 * with the meta-data topology flush.
1248 	 */
1249 	if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1250 		atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1251 		hammer2_inode_chain_sync(ip);
1252 	}
1253 
1254 	hammer2_mtx_unlock(&ip->lock);
1255 	if (ip->vp) {
1256 		oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1257 		nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1258 		nvextendbuf(ip->vp,
1259 			    osize, nsize,
1260 			    oblksize, nblksize,
1261 			    -1, -1, 0);
1262 	}
1263 	hammer2_mtx_ex(&ip->lock);
1264 }
1265 
1266 static
1267 int
1268 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1269 {
1270 	hammer2_xop_nresolve_t *xop;
1271 	hammer2_inode_t *ip;
1272 	hammer2_inode_t *dip;
1273 	struct namecache *ncp;
1274 	struct vnode *vp;
1275 	int error;
1276 
1277 	dip = VTOI(ap->a_dvp);
1278 	xop = hammer2_xop_alloc(dip, 0);
1279 
1280 	ncp = ap->a_nch->ncp;
1281 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1282 
1283 	/*
1284 	 * Note: In DragonFly the kernel handles '.' and '..'.
1285 	 */
1286 	hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1287 	hammer2_xop_start(&xop->head, hammer2_xop_nresolve);
1288 
1289 	error = hammer2_xop_collect(&xop->head, 0);
1290 	error = hammer2_error_to_errno(error);
1291 	if (error) {
1292 		ip = NULL;
1293 	} else {
1294 		ip = hammer2_inode_get(dip->pmp, dip, &xop->head, -1);
1295 	}
1296 	hammer2_inode_unlock(dip);
1297 
1298 	/*
1299 	 * Acquire the related vnode
1300 	 *
1301 	 * NOTE: For error processing, only ENOENT resolves the namecache
1302 	 *	 entry to NULL, otherwise we just return the error and
1303 	 *	 leave the namecache unresolved.
1304 	 *
1305 	 * NOTE: multiple hammer2_inode structures can be aliased to the
1306 	 *	 same chain element, for example for hardlinks.  This
1307 	 *	 use case does not 'reattach' inode associations that
1308 	 *	 might already exist, but always allocates a new one.
1309 	 *
1310 	 * WARNING: inode structure is locked exclusively via inode_get
1311 	 *	    but chain was locked shared.  inode_unlock()
1312 	 *	    will handle it properly.
1313 	 */
1314 	if (ip) {
1315 		vp = hammer2_igetv(ip, &error);	/* error set to UNIX error */
1316 		if (error == 0) {
1317 			vn_unlock(vp);
1318 			cache_setvp(ap->a_nch, vp);
1319 		} else if (error == ENOENT) {
1320 			cache_setvp(ap->a_nch, NULL);
1321 		}
1322 		hammer2_inode_unlock(ip);
1323 
1324 		/*
1325 		 * The vp should not be released until after we've disposed
1326 		 * of our locks, because it might cause vop_inactive() to
1327 		 * be called.
1328 		 */
1329 		if (vp)
1330 			vrele(vp);
1331 	} else {
1332 		error = ENOENT;
1333 		cache_setvp(ap->a_nch, NULL);
1334 	}
1335 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1336 	KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1337 		("resolve error %d/%p ap %p\n",
1338 		 error, ap->a_nch->ncp->nc_vp, ap));
1339 
1340 	return error;
1341 }
1342 
1343 static
1344 int
1345 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1346 {
1347 	hammer2_inode_t *dip;
1348 	hammer2_tid_t inum;
1349 	int error;
1350 
1351 	dip = VTOI(ap->a_dvp);
1352 	inum = dip->meta.iparent;
1353 	*ap->a_vpp = NULL;
1354 
1355 	if (inum) {
1356 		error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1357 					 inum, ap->a_vpp);
1358 	} else {
1359 		error = ENOENT;
1360 	}
1361 	return error;
1362 }
1363 
1364 static
1365 int
1366 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1367 {
1368 	hammer2_inode_t *dip;
1369 	hammer2_inode_t *nip;
1370 	struct namecache *ncp;
1371 	const uint8_t *name;
1372 	size_t name_len;
1373 	hammer2_tid_t inum;
1374 	int error;
1375 
1376 	dip = VTOI(ap->a_dvp);
1377 	if (dip->pmp->ronly)
1378 		return (EROFS);
1379 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1380 		return (ENOSPC);
1381 
1382 	ncp = ap->a_nch->ncp;
1383 	name = ncp->nc_name;
1384 	name_len = ncp->nc_nlen;
1385 
1386 	hammer2_pfs_memory_wait(dip, 1);
1387 	hammer2_trans_init(dip->pmp, 0);
1388 
1389 	inum = hammer2_trans_newinum(dip->pmp);
1390 
1391 	/*
1392 	 * Create the actual inode as a hidden file in the iroot, then
1393 	 * create the directory entry.  The creation of the actual inode
1394 	 * sets its nlinks to 1 which is the value we desire.
1395 	 */
1396 	nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1397 				   NULL, 0, inum,
1398 				   inum, 0, 0,
1399 				   0, &error);
1400 	if (error) {
1401 		error = hammer2_error_to_errno(error);
1402 	} else {
1403 		error = hammer2_dirent_create(dip, name, name_len,
1404 					      nip->meta.inum, nip->meta.type);
1405 		/* returns UNIX error code */
1406 	}
1407 	if (error) {
1408 		if (nip) {
1409 			hammer2_inode_unlink_finisher(nip, 0);
1410 			hammer2_inode_unlock(nip);
1411 			nip = NULL;
1412 		}
1413 		*ap->a_vpp = NULL;
1414 	} else {
1415 		*ap->a_vpp = hammer2_igetv(nip, &error);
1416 		hammer2_inode_unlock(nip);
1417 	}
1418 
1419 	/*
1420 	 * Update dip's mtime
1421 	 *
1422 	 * We can use a shared inode lock and allow the meta.mtime update
1423 	 * SMP race.  hammer2_inode_modify() is MPSAFE w/a shared lock.
1424 	 */
1425 	if (error == 0) {
1426 		uint64_t mtime;
1427 
1428 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1429 		hammer2_update_time(&mtime);
1430 		hammer2_inode_modify(dip);
1431 		dip->meta.mtime = mtime;
1432 		hammer2_inode_unlock(dip);
1433 	}
1434 
1435 	hammer2_trans_done(dip->pmp, 1);
1436 
1437 	if (error == 0) {
1438 		cache_setunresolved(ap->a_nch);
1439 		cache_setvp(ap->a_nch, *ap->a_vpp);
1440 		hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1441 	}
1442 	return error;
1443 }
1444 
1445 static
1446 int
1447 hammer2_vop_open(struct vop_open_args *ap)
1448 {
1449 	return vop_stdopen(ap);
1450 }
1451 
1452 /*
1453  * hammer2_vop_advlock { vp, id, op, fl, flags }
1454  */
1455 static
1456 int
1457 hammer2_vop_advlock(struct vop_advlock_args *ap)
1458 {
1459 	hammer2_inode_t *ip = VTOI(ap->a_vp);
1460 	hammer2_off_t size;
1461 
1462 	size = ip->meta.size;
1463 	return (lf_advlock(ap, &ip->advlock, size));
1464 }
1465 
1466 static
1467 int
1468 hammer2_vop_close(struct vop_close_args *ap)
1469 {
1470 	return vop_stdclose(ap);
1471 }
1472 
1473 /*
1474  * hammer2_vop_nlink { nch, dvp, vp, cred }
1475  *
1476  * Create a hardlink from (vp) to {dvp, nch}.
1477  */
1478 static
1479 int
1480 hammer2_vop_nlink(struct vop_nlink_args *ap)
1481 {
1482 	hammer2_inode_t *tdip;	/* target directory to create link in */
1483 	hammer2_inode_t *ip;	/* inode we are hardlinking to */
1484 	struct namecache *ncp;
1485 	const uint8_t *name;
1486 	size_t name_len;
1487 	int error;
1488 
1489 	if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1490 		return(EXDEV);
1491 
1492 	tdip = VTOI(ap->a_dvp);
1493 	if (tdip->pmp->ronly)
1494 		return (EROFS);
1495 	if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1)
1496 		return (ENOSPC);
1497 
1498 	ncp = ap->a_nch->ncp;
1499 	name = ncp->nc_name;
1500 	name_len = ncp->nc_nlen;
1501 
1502 	/*
1503 	 * ip represents the file being hardlinked.  The file could be a
1504 	 * normal file or a hardlink target if it has already been hardlinked.
1505 	 * (with the new semantics, it will almost always be a hardlink
1506 	 * target).
1507 	 *
1508 	 * Bump nlinks and potentially also create or move the hardlink
1509 	 * target in the parent directory common to (ip) and (tdip).  The
1510 	 * consolidation code can modify ip->cluster.  The returned cluster
1511 	 * is locked.
1512 	 */
1513 	ip = VTOI(ap->a_vp);
1514 	KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1515 	hammer2_pfs_memory_wait(ip, 0);
1516 	hammer2_trans_init(ip->pmp, 0);
1517 
1518 	/*
1519 	 * Target should be an indexed inode or there's no way we will ever
1520 	 * be able to find it!
1521 	 */
1522 	KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1523 
1524 	error = 0;
1525 
1526 	/*
1527 	 * Can return NULL and error == EXDEV if the common parent
1528 	 * crosses a directory with the xlink flag set.
1529 	 */
1530 	hammer2_inode_lock(tdip, 0);
1531 	hammer2_inode_lock(ip, 0);
1532 
1533 	/*
1534 	 * Create the directory entry and bump nlinks.
1535 	 */
1536 	if (error == 0) {
1537 		error = hammer2_dirent_create(tdip, name, name_len,
1538 					      ip->meta.inum, ip->meta.type);
1539 		hammer2_inode_modify(ip);
1540 		++ip->meta.nlinks;
1541 	}
1542 	if (error == 0) {
1543 		/*
1544 		 * Update dip's mtime
1545 		 */
1546 		uint64_t mtime;
1547 
1548 		hammer2_update_time(&mtime);
1549 		hammer2_inode_modify(tdip);
1550 		tdip->meta.mtime = mtime;
1551 
1552 		cache_setunresolved(ap->a_nch);
1553 		cache_setvp(ap->a_nch, ap->a_vp);
1554 	}
1555 	hammer2_inode_unlock(ip);
1556 	hammer2_inode_unlock(tdip);
1557 
1558 	hammer2_trans_done(ip->pmp, 1);
1559 	hammer2_knote(ap->a_vp, NOTE_LINK);
1560 	hammer2_knote(ap->a_dvp, NOTE_WRITE);
1561 
1562 	return error;
1563 }
1564 
1565 /*
1566  * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1567  *
1568  * The operating system has already ensured that the directory entry
1569  * does not exist and done all appropriate namespace locking.
1570  */
1571 static
1572 int
1573 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1574 {
1575 	hammer2_inode_t *dip;
1576 	hammer2_inode_t *nip;
1577 	struct namecache *ncp;
1578 	const uint8_t *name;
1579 	size_t name_len;
1580 	hammer2_tid_t inum;
1581 	int error;
1582 
1583 	dip = VTOI(ap->a_dvp);
1584 	if (dip->pmp->ronly)
1585 		return (EROFS);
1586 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1587 		return (ENOSPC);
1588 
1589 	ncp = ap->a_nch->ncp;
1590 	name = ncp->nc_name;
1591 	name_len = ncp->nc_nlen;
1592 	hammer2_pfs_memory_wait(dip, 1);
1593 	hammer2_trans_init(dip->pmp, 0);
1594 
1595 	inum = hammer2_trans_newinum(dip->pmp);
1596 
1597 	/*
1598 	 * Create the actual inode as a hidden file in the iroot, then
1599 	 * create the directory entry.  The creation of the actual inode
1600 	 * sets its nlinks to 1 which is the value we desire.
1601 	 */
1602 	nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1603 				   NULL, 0, inum,
1604 				   inum, 0, 0,
1605 				   0, &error);
1606 
1607 	if (error) {
1608 		error = hammer2_error_to_errno(error);
1609 	} else {
1610 		error = hammer2_dirent_create(dip, name, name_len,
1611 					      nip->meta.inum, nip->meta.type);
1612 	}
1613 	if (error) {
1614 		if (nip) {
1615 			hammer2_inode_unlink_finisher(nip, 0);
1616 			hammer2_inode_unlock(nip);
1617 			nip = NULL;
1618 		}
1619 		*ap->a_vpp = NULL;
1620 	} else {
1621 		*ap->a_vpp = hammer2_igetv(nip, &error);
1622 		hammer2_inode_unlock(nip);
1623 	}
1624 
1625 	/*
1626 	 * Update dip's mtime
1627 	 */
1628 	if (error == 0) {
1629 		uint64_t mtime;
1630 
1631 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1632 		hammer2_update_time(&mtime);
1633 		hammer2_inode_modify(dip);
1634 		dip->meta.mtime = mtime;
1635 		hammer2_inode_unlock(dip);
1636 	}
1637 
1638 	hammer2_trans_done(dip->pmp, 1);
1639 
1640 	if (error == 0) {
1641 		cache_setunresolved(ap->a_nch);
1642 		cache_setvp(ap->a_nch, *ap->a_vpp);
1643 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1644 	}
1645 	return error;
1646 }
1647 
1648 /*
1649  * Make a device node (typically a fifo)
1650  */
1651 static
1652 int
1653 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1654 {
1655 	hammer2_inode_t *dip;
1656 	hammer2_inode_t *nip;
1657 	struct namecache *ncp;
1658 	const uint8_t *name;
1659 	size_t name_len;
1660 	hammer2_tid_t inum;
1661 	int error;
1662 
1663 	dip = VTOI(ap->a_dvp);
1664 	if (dip->pmp->ronly)
1665 		return (EROFS);
1666 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1667 		return (ENOSPC);
1668 
1669 	ncp = ap->a_nch->ncp;
1670 	name = ncp->nc_name;
1671 	name_len = ncp->nc_nlen;
1672 	hammer2_pfs_memory_wait(dip, 1);
1673 	hammer2_trans_init(dip->pmp, 0);
1674 
1675 	/*
1676 	 * Create the device inode and then create the directory entry.
1677 	 */
1678 	inum = hammer2_trans_newinum(dip->pmp);
1679 	nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1680 				   NULL, 0, inum,
1681 				   inum, 0, 0,
1682 				   0, &error);
1683 	if (error == 0) {
1684 		error = hammer2_dirent_create(dip, name, name_len,
1685 					      nip->meta.inum, nip->meta.type);
1686 	}
1687 	if (error) {
1688 		if (nip) {
1689 			hammer2_inode_unlink_finisher(nip, 0);
1690 			hammer2_inode_unlock(nip);
1691 			nip = NULL;
1692 		}
1693 		*ap->a_vpp = NULL;
1694 	} else {
1695 		*ap->a_vpp = hammer2_igetv(nip, &error);
1696 		hammer2_inode_unlock(nip);
1697 	}
1698 
1699 	/*
1700 	 * Update dip's mtime
1701 	 */
1702 	if (error == 0) {
1703 		uint64_t mtime;
1704 
1705 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1706 		hammer2_update_time(&mtime);
1707 		hammer2_inode_modify(dip);
1708 		dip->meta.mtime = mtime;
1709 		hammer2_inode_unlock(dip);
1710 	}
1711 
1712 	hammer2_trans_done(dip->pmp, 1);
1713 
1714 	if (error == 0) {
1715 		cache_setunresolved(ap->a_nch);
1716 		cache_setvp(ap->a_nch, *ap->a_vpp);
1717 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1718 	}
1719 	return error;
1720 }
1721 
1722 /*
1723  * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1724  */
1725 static
1726 int
1727 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1728 {
1729 	hammer2_inode_t *dip;
1730 	hammer2_inode_t *nip;
1731 	struct namecache *ncp;
1732 	const uint8_t *name;
1733 	size_t name_len;
1734 	hammer2_tid_t inum;
1735 	int error;
1736 
1737 	dip = VTOI(ap->a_dvp);
1738 	if (dip->pmp->ronly)
1739 		return (EROFS);
1740 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1741 		return (ENOSPC);
1742 
1743 	ncp = ap->a_nch->ncp;
1744 	name = ncp->nc_name;
1745 	name_len = ncp->nc_nlen;
1746 	hammer2_pfs_memory_wait(dip, 1);
1747 	hammer2_trans_init(dip->pmp, 0);
1748 
1749 	ap->a_vap->va_type = VLNK;	/* enforce type */
1750 
1751 	/*
1752 	 * Create the softlink as an inode and then create the directory
1753 	 * entry.
1754 	 */
1755 	inum = hammer2_trans_newinum(dip->pmp);
1756 
1757 	nip = hammer2_inode_create(dip->pmp->iroot, dip, ap->a_vap, ap->a_cred,
1758 				   NULL, 0, inum,
1759 				   inum, 0, 0,
1760 				   0, &error);
1761 	if (error == 0) {
1762 		error = hammer2_dirent_create(dip, name, name_len,
1763 					      nip->meta.inum, nip->meta.type);
1764 	}
1765 	if (error) {
1766 		if (nip) {
1767 			hammer2_inode_unlink_finisher(nip, 0);
1768 			hammer2_inode_unlock(nip);
1769 			nip = NULL;
1770 		}
1771 		*ap->a_vpp = NULL;
1772 		hammer2_trans_done(dip->pmp, 1);
1773 		return error;
1774 	}
1775 	*ap->a_vpp = hammer2_igetv(nip, &error);
1776 
1777 	/*
1778 	 * Build the softlink (~like file data) and finalize the namecache.
1779 	 */
1780 	if (error == 0) {
1781 		size_t bytes;
1782 		struct uio auio;
1783 		struct iovec aiov;
1784 
1785 		bytes = strlen(ap->a_target);
1786 
1787 		hammer2_inode_unlock(nip);
1788 		bzero(&auio, sizeof(auio));
1789 		bzero(&aiov, sizeof(aiov));
1790 		auio.uio_iov = &aiov;
1791 		auio.uio_segflg = UIO_SYSSPACE;
1792 		auio.uio_rw = UIO_WRITE;
1793 		auio.uio_resid = bytes;
1794 		auio.uio_iovcnt = 1;
1795 		auio.uio_td = curthread;
1796 		aiov.iov_base = ap->a_target;
1797 		aiov.iov_len = bytes;
1798 		error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1799 		/* XXX handle error */
1800 		error = 0;
1801 	} else {
1802 		hammer2_inode_unlock(nip);
1803 	}
1804 
1805 	/*
1806 	 * Update dip's mtime
1807 	 */
1808 	if (error == 0) {
1809 		uint64_t mtime;
1810 
1811 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1812 		hammer2_update_time(&mtime);
1813 		hammer2_inode_modify(dip);
1814 		dip->meta.mtime = mtime;
1815 		hammer2_inode_unlock(dip);
1816 	}
1817 
1818 	hammer2_trans_done(dip->pmp, 1);
1819 
1820 	/*
1821 	 * Finalize namecache
1822 	 */
1823 	if (error == 0) {
1824 		cache_setunresolved(ap->a_nch);
1825 		cache_setvp(ap->a_nch, *ap->a_vpp);
1826 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1827 	}
1828 	return error;
1829 }
1830 
1831 /*
1832  * hammer2_vop_nremove { nch, dvp, cred }
1833  */
1834 static
1835 int
1836 hammer2_vop_nremove(struct vop_nremove_args *ap)
1837 {
1838 	hammer2_xop_unlink_t *xop;
1839 	hammer2_inode_t *dip;
1840 	hammer2_inode_t *ip;
1841 	struct namecache *ncp;
1842 	int error;
1843 	int isopen;
1844 
1845 	dip = VTOI(ap->a_dvp);
1846 	if (dip->pmp->ronly)
1847 		return (EROFS);
1848 #if 0
1849 	/* allow removals, except user to also bulkfree */
1850 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1851 		return (ENOSPC);
1852 #endif
1853 
1854 	ncp = ap->a_nch->ncp;
1855 
1856 	hammer2_pfs_memory_wait(dip, 1);
1857 	hammer2_trans_init(dip->pmp, 0);
1858 	hammer2_inode_lock(dip, 0);
1859 
1860 	/*
1861 	 * The unlink XOP unlinks the path from the directory and
1862 	 * locates and returns the cluster associated with the real inode.
1863 	 * We have to handle nlinks here on the frontend.
1864 	 */
1865 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1866 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1867 
1868 	/*
1869 	 * The namecache entry is locked so nobody can use this namespace.
1870 	 * Calculate isopen to determine if this namespace has an open vp
1871 	 * associated with it and resolve the vp only if it does.
1872 	 *
1873 	 * We try to avoid resolving the vnode if nobody has it open, but
1874 	 * note that the test is via this namespace only.
1875 	 */
1876 	isopen = cache_isopen(ap->a_nch);
1877 	xop->isdir = 0;
1878 	xop->dopermanent = 0;
1879 	hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1880 
1881 	/*
1882 	 * Collect the real inode and adjust nlinks, destroy the real
1883 	 * inode if nlinks transitions to 0 and it was the real inode
1884 	 * (else it has already been removed).
1885 	 */
1886 	error = hammer2_xop_collect(&xop->head, 0);
1887 	error = hammer2_error_to_errno(error);
1888 	hammer2_inode_unlock(dip);
1889 
1890 	if (error == 0) {
1891 		ip = hammer2_inode_get(dip->pmp, dip, &xop->head, -1);
1892 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1893 		if (ip) {
1894 			hammer2_inode_unlink_finisher(ip, isopen);
1895 			hammer2_inode_unlock(ip);
1896 		}
1897 	} else {
1898 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1899 	}
1900 
1901 	/*
1902 	 * Update dip's mtime
1903 	 */
1904 	if (error == 0) {
1905 		uint64_t mtime;
1906 
1907 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1908 		hammer2_update_time(&mtime);
1909 		hammer2_inode_modify(dip);
1910 		dip->meta.mtime = mtime;
1911 		hammer2_inode_unlock(dip);
1912 	}
1913 
1914 	hammer2_trans_done(dip->pmp, 1);
1915 	if (error == 0) {
1916 		cache_unlink(ap->a_nch);
1917 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1918 	}
1919 	return (error);
1920 }
1921 
1922 /*
1923  * hammer2_vop_nrmdir { nch, dvp, cred }
1924  */
1925 static
1926 int
1927 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
1928 {
1929 	hammer2_xop_unlink_t *xop;
1930 	hammer2_inode_t *dip;
1931 	hammer2_inode_t *ip;
1932 	struct namecache *ncp;
1933 	int isopen;
1934 	int error;
1935 
1936 	dip = VTOI(ap->a_dvp);
1937 	if (dip->pmp->ronly)
1938 		return (EROFS);
1939 #if 0
1940 	/* allow removals, except user to also bulkfree */
1941 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1942 		return (ENOSPC);
1943 #endif
1944 
1945 	hammer2_pfs_memory_wait(dip, 1);
1946 	hammer2_trans_init(dip->pmp, 0);
1947 	hammer2_inode_lock(dip, 0);
1948 
1949 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1950 
1951 	ncp = ap->a_nch->ncp;
1952 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1953 	isopen = cache_isopen(ap->a_nch);
1954 	xop->isdir = 1;
1955 	xop->dopermanent = 0;
1956 	hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1957 
1958 	/*
1959 	 * Collect the real inode and adjust nlinks, destroy the real
1960 	 * inode if nlinks transitions to 0 and it was the real inode
1961 	 * (else it has already been removed).
1962 	 */
1963 	error = hammer2_xop_collect(&xop->head, 0);
1964 	error = hammer2_error_to_errno(error);
1965 	hammer2_inode_unlock(dip);
1966 
1967 	if (error == 0) {
1968 		ip = hammer2_inode_get(dip->pmp, dip, &xop->head, -1);
1969 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1970 		if (ip) {
1971 			hammer2_inode_unlink_finisher(ip, isopen);
1972 			hammer2_inode_unlock(ip);
1973 		}
1974 	} else {
1975 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1976 	}
1977 
1978 	/*
1979 	 * Update dip's mtime
1980 	 */
1981 	if (error == 0) {
1982 		uint64_t mtime;
1983 
1984 		hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1985 		hammer2_update_time(&mtime);
1986 		hammer2_inode_modify(dip);
1987 		dip->meta.mtime = mtime;
1988 		hammer2_inode_unlock(dip);
1989 	}
1990 
1991 	hammer2_trans_done(dip->pmp, 1);
1992 	if (error == 0) {
1993 		cache_unlink(ap->a_nch);
1994 		hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1995 	}
1996 	return (error);
1997 }
1998 
1999 /*
2000  * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
2001  */
2002 static
2003 int
2004 hammer2_vop_nrename(struct vop_nrename_args *ap)
2005 {
2006 	struct namecache *fncp;
2007 	struct namecache *tncp;
2008 	hammer2_inode_t *fdip;	/* source directory */
2009 	hammer2_inode_t *tdip;	/* target directory */
2010 	hammer2_inode_t *ip;	/* file being renamed */
2011 	hammer2_inode_t *tip;	/* replaced target during rename or NULL */
2012 	const uint8_t *fname;
2013 	size_t fname_len;
2014 	const uint8_t *tname;
2015 	size_t tname_len;
2016 	int error;
2017 	int update_tdip;
2018 	int update_fdip;
2019 	hammer2_key_t tlhc;
2020 
2021 	if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
2022 		return(EXDEV);
2023 	if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
2024 		return(EXDEV);
2025 
2026 	fdip = VTOI(ap->a_fdvp);	/* source directory */
2027 	tdip = VTOI(ap->a_tdvp);	/* target directory */
2028 
2029 	if (fdip->pmp->ronly)
2030 		return (EROFS);
2031 	if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1)
2032 		return (ENOSPC);
2033 
2034 	fncp = ap->a_fnch->ncp;		/* entry name in source */
2035 	fname = fncp->nc_name;
2036 	fname_len = fncp->nc_nlen;
2037 
2038 	tncp = ap->a_tnch->ncp;		/* entry name in target */
2039 	tname = tncp->nc_name;
2040 	tname_len = tncp->nc_nlen;
2041 
2042 	hammer2_pfs_memory_wait(tdip, 0);
2043 	hammer2_trans_init(tdip->pmp, 0);
2044 
2045 	update_tdip = 0;
2046 	update_fdip = 0;
2047 
2048 	ip = VTOI(fncp->nc_vp);
2049 	hammer2_inode_ref(ip);		/* extra ref */
2050 
2051 	/*
2052 	 * Lookup the target name to determine if a directory entry
2053 	 * is being overwritten.  We only hold related inode locks
2054 	 * temporarily, the operating system is expected to protect
2055 	 * against rename races.
2056 	 */
2057 	tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL;
2058 	if (tip)
2059 		hammer2_inode_ref(tip);	/* extra ref */
2060 
2061 	/*
2062 	 * Can return NULL and error == EXDEV if the common parent
2063 	 * crosses a directory with the xlink flag set.
2064 	 *
2065 	 * For now try to avoid deadlocks with a simple pointer address
2066 	 * test.  (tip) can be NULL.
2067 	 */
2068 	error = 0;
2069 	if (fdip <= tdip) {
2070 		hammer2_inode_lock(fdip, 0);
2071 		hammer2_inode_lock(tdip, 0);
2072 	} else {
2073 		hammer2_inode_lock(tdip, 0);
2074 		hammer2_inode_lock(fdip, 0);
2075 	}
2076 	if (tip) {
2077 		if (ip <= tip) {
2078 			hammer2_inode_lock(ip, 0);
2079 			hammer2_inode_lock(tip, 0);
2080 		} else {
2081 			hammer2_inode_lock(tip, 0);
2082 			hammer2_inode_lock(ip, 0);
2083 		}
2084 	} else {
2085 		hammer2_inode_lock(ip, 0);
2086 	}
2087 
2088 #if 0
2089 	/*
2090 	 * Delete the target namespace.
2091 	 *
2092 	 * REMOVED - NOW FOLDED INTO XOP_NRENAME OPERATION
2093 	 */
2094 	{
2095 		hammer2_xop_unlink_t *xop2;
2096 		hammer2_inode_t *tip;
2097 		int isopen;
2098 
2099 		/*
2100 		 * The unlink XOP unlinks the path from the directory and
2101 		 * locates and returns the cluster associated with the real
2102 		 * inode.  We have to handle nlinks here on the frontend.
2103 		 */
2104 		xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2105 		hammer2_xop_setname(&xop2->head, tname, tname_len);
2106 		isopen = cache_isopen(ap->a_tnch);
2107 		xop2->isdir = -1;
2108 		xop2->dopermanent = 0;
2109 		hammer2_xop_start(&xop2->head, hammer2_xop_unlink);
2110 
2111 		/*
2112 		 * Collect the real inode and adjust nlinks, destroy the real
2113 		 * inode if nlinks transitions to 0 and it was the real inode
2114 		 * (else it has already been removed).
2115 		 */
2116 		tnch_error = hammer2_xop_collect(&xop2->head, 0);
2117 		tnch_error = hammer2_error_to_errno(tnch_error);
2118 		/* hammer2_inode_unlock(tdip); */
2119 
2120 		if (tnch_error == 0) {
2121 			tip = hammer2_inode_get(tdip->pmp, NULL,
2122 						&xop2->head, -1);
2123 			hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2124 			if (tip) {
2125 				hammer2_inode_unlink_finisher(tip, isopen);
2126 				hammer2_inode_unlock(tip);
2127 			}
2128 		} else {
2129 			hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP);
2130 		}
2131 		/* hammer2_inode_lock(tdip, 0); */
2132 
2133 		if (tnch_error && tnch_error != ENOENT) {
2134 			error = tnch_error;
2135 			goto done2;
2136 		}
2137 		update_tdip = 1;
2138 	}
2139 #endif
2140 
2141 	/*
2142 	 * Resolve the collision space for (tdip, tname, tname_len)
2143 	 *
2144 	 * tdip must be held exclusively locked to prevent races since
2145 	 * multiple filenames can end up in the same collision space.
2146 	 */
2147 	{
2148 		hammer2_xop_scanlhc_t *sxop;
2149 		hammer2_tid_t lhcbase;
2150 
2151 		tlhc = hammer2_dirhash(tname, tname_len);
2152 		lhcbase = tlhc;
2153 		sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2154 		sxop->lhc = tlhc;
2155 		hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc);
2156 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2157 			if (tlhc != sxop->head.cluster.focus->bref.key)
2158 				break;
2159 			++tlhc;
2160 		}
2161 		error = hammer2_error_to_errno(error);
2162 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2163 
2164 		if (error) {
2165 			if (error != ENOENT)
2166 				goto done2;
2167 			++tlhc;
2168 			error = 0;
2169 		}
2170 		if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2171 			error = ENOSPC;
2172 			goto done2;
2173 		}
2174 	}
2175 
2176 	/*
2177 	 * Ready to go, issue the rename to the backend.  Note that meta-data
2178 	 * updates to the related inodes occur separately from the rename
2179 	 * operation.
2180 	 *
2181 	 * NOTE: While it is not necessary to update ip->meta.name*, doing
2182 	 *	 so aids catastrophic recovery and debugging.
2183 	 */
2184 	if (error == 0) {
2185 		hammer2_xop_nrename_t *xop4;
2186 
2187 		xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2188 		xop4->lhc = tlhc;
2189 		xop4->ip_key = ip->meta.name_key;
2190 		hammer2_xop_setip2(&xop4->head, ip);
2191 		hammer2_xop_setip3(&xop4->head, tdip);
2192 		hammer2_xop_setname(&xop4->head, fname, fname_len);
2193 		hammer2_xop_setname2(&xop4->head, tname, tname_len);
2194 		hammer2_xop_start(&xop4->head, hammer2_xop_nrename);
2195 
2196 		error = hammer2_xop_collect(&xop4->head, 0);
2197 		error = hammer2_error_to_errno(error);
2198 		hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2199 
2200 		if (error == ENOENT)
2201 			error = 0;
2202 
2203 		/*
2204 		 * Update inode meta-data.
2205 		 *
2206 		 * WARNING!  The in-memory inode (ip) structure does not
2207 		 *	     maintain a copy of the inode's filename buffer.
2208 		 */
2209 		if (error == 0 &&
2210 		    (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2211 			hammer2_inode_modify(ip);
2212 			ip->meta.name_len = tname_len;
2213 			ip->meta.name_key = tlhc;
2214 		}
2215 		if (error == 0) {
2216 			hammer2_inode_modify(ip);
2217 			ip->meta.iparent = tdip->meta.inum;
2218 		}
2219 		update_fdip = 1;
2220 		update_tdip = 1;
2221 	}
2222 
2223 done2:
2224 	/*
2225 	 * If no error, the backend has replaced the target directory entry.
2226 	 * We must adjust nlinks on the original replace target if it exists.
2227 	 */
2228 	if (error == 0 && tip) {
2229 		int isopen;
2230 
2231 		isopen = cache_isopen(ap->a_tnch);
2232 		hammer2_inode_unlink_finisher(tip, isopen);
2233 	}
2234 
2235 	/*
2236 	 * Update directory mtimes to represent the something changed.
2237 	 */
2238 	if (update_fdip || update_tdip) {
2239 		uint64_t mtime;
2240 
2241 		hammer2_update_time(&mtime);
2242 		if (update_fdip) {
2243 			hammer2_inode_modify(fdip);
2244 			fdip->meta.mtime = mtime;
2245 		}
2246 		if (update_tdip) {
2247 			hammer2_inode_modify(tdip);
2248 			tdip->meta.mtime = mtime;
2249 		}
2250 	}
2251 	if (tip) {
2252 		hammer2_inode_unlock(tip);
2253 		hammer2_inode_drop(tip);
2254 	}
2255 	hammer2_inode_unlock(ip);
2256 	hammer2_inode_unlock(tdip);
2257 	hammer2_inode_unlock(fdip);
2258 	hammer2_inode_drop(ip);
2259 	hammer2_trans_done(tdip->pmp, 1);
2260 
2261 	/*
2262 	 * Issue the namecache update after unlocking all the internal
2263 	 * hammer2 structures, otherwise we might deadlock.
2264 	 *
2265 	 * WARNING! The target namespace must be updated atomically,
2266 	 *	    and we depend on cache_rename() to handle that for
2267 	 *	    us.  Do not do a separate cache_unlink() because
2268 	 *	    that leaves a small window of opportunity for other
2269 	 *	    threads to allocate the target namespace before we
2270 	 *	    manage to complete our rename.
2271 	 *
2272 	 * WARNING! cache_rename() (and cache_unlink()) will properly
2273 	 *	    set VREF_FINALIZE on any attached vnode.  Do not
2274 	 *	    call cache_setunresolved() manually before-hand as
2275 	 *	    this will prevent the flag from being set later via
2276 	 *	    cache_rename().  If VREF_FINALIZE is not properly set
2277 	 *	    and the inode is no longer in the topology, related
2278 	 *	    chains can remain dirty indefinitely.
2279 	 */
2280 	if (error == 0 && tip) {
2281 		/*cache_unlink(ap->a_tnch); see above */
2282 		/*cache_setunresolved(ap->a_tnch); see above */
2283 	}
2284 	if (error == 0) {
2285 		cache_rename(ap->a_fnch, ap->a_tnch);
2286 		hammer2_knote(ap->a_fdvp, NOTE_WRITE);
2287 		hammer2_knote(ap->a_tdvp, NOTE_WRITE);
2288 		hammer2_knote(fncp->nc_vp, NOTE_RENAME);
2289 	}
2290 
2291 	return (error);
2292 }
2293 
2294 /*
2295  * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2296  */
2297 static
2298 int
2299 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2300 {
2301 	hammer2_inode_t *ip;
2302 	int error;
2303 
2304 	ip = VTOI(ap->a_vp);
2305 
2306 	error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2307 			      ap->a_fflag, ap->a_cred);
2308 	return (error);
2309 }
2310 
2311 static
2312 int
2313 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2314 {
2315 	struct mount *mp;
2316 	hammer2_pfs_t *pmp;
2317 	int rc;
2318 
2319 	switch (ap->a_op) {
2320 	case (MOUNTCTL_SET_EXPORT):
2321 		mp = ap->a_head.a_ops->head.vv_mount;
2322 		pmp = MPTOPMP(mp);
2323 
2324 		if (ap->a_ctllen != sizeof(struct export_args))
2325 			rc = (EINVAL);
2326 		else
2327 			rc = vfs_export(mp, &pmp->export,
2328 					(const struct export_args *)ap->a_ctl);
2329 		break;
2330 	default:
2331 		rc = vop_stdmountctl(ap);
2332 		break;
2333 	}
2334 	return (rc);
2335 }
2336 
2337 /*
2338  * KQFILTER
2339  */
2340 static void filt_hammer2detach(struct knote *kn);
2341 static int filt_hammer2read(struct knote *kn, long hint);
2342 static int filt_hammer2write(struct knote *kn, long hint);
2343 static int filt_hammer2vnode(struct knote *kn, long hint);
2344 
2345 static struct filterops hammer2read_filtops =
2346 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2347 	  NULL, filt_hammer2detach, filt_hammer2read };
2348 static struct filterops hammer2write_filtops =
2349 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2350 	  NULL, filt_hammer2detach, filt_hammer2write };
2351 static struct filterops hammer2vnode_filtops =
2352 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2353 	  NULL, filt_hammer2detach, filt_hammer2vnode };
2354 
2355 static
2356 int
2357 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2358 {
2359 	struct vnode *vp = ap->a_vp;
2360 	struct knote *kn = ap->a_kn;
2361 
2362 	switch (kn->kn_filter) {
2363 	case EVFILT_READ:
2364 		kn->kn_fop = &hammer2read_filtops;
2365 		break;
2366 	case EVFILT_WRITE:
2367 		kn->kn_fop = &hammer2write_filtops;
2368 		break;
2369 	case EVFILT_VNODE:
2370 		kn->kn_fop = &hammer2vnode_filtops;
2371 		break;
2372 	default:
2373 		return (EOPNOTSUPP);
2374 	}
2375 
2376 	kn->kn_hook = (caddr_t)vp;
2377 
2378 	knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2379 
2380 	return(0);
2381 }
2382 
2383 static void
2384 filt_hammer2detach(struct knote *kn)
2385 {
2386 	struct vnode *vp = (void *)kn->kn_hook;
2387 
2388 	knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2389 }
2390 
2391 static int
2392 filt_hammer2read(struct knote *kn, long hint)
2393 {
2394 	struct vnode *vp = (void *)kn->kn_hook;
2395 	hammer2_inode_t *ip = VTOI(vp);
2396 	off_t off;
2397 
2398 	if (hint == NOTE_REVOKE) {
2399 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2400 		return(1);
2401 	}
2402 	off = ip->meta.size - kn->kn_fp->f_offset;
2403 	kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2404 	if (kn->kn_sfflags & NOTE_OLDAPI)
2405 		return(1);
2406 	return (kn->kn_data != 0);
2407 }
2408 
2409 
2410 static int
2411 filt_hammer2write(struct knote *kn, long hint)
2412 {
2413 	if (hint == NOTE_REVOKE)
2414 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2415 	kn->kn_data = 0;
2416 	return (1);
2417 }
2418 
2419 static int
2420 filt_hammer2vnode(struct knote *kn, long hint)
2421 {
2422 	if (kn->kn_sfflags & hint)
2423 		kn->kn_fflags |= hint;
2424 	if (hint == NOTE_REVOKE) {
2425 		kn->kn_flags |= (EV_EOF | EV_NODATA);
2426 		return (1);
2427 	}
2428 	return (kn->kn_fflags != 0);
2429 }
2430 
2431 /*
2432  * FIFO VOPS
2433  */
2434 static
2435 int
2436 hammer2_vop_markatime(struct vop_markatime_args *ap)
2437 {
2438 	hammer2_inode_t *ip;
2439 	struct vnode *vp;
2440 
2441 	vp = ap->a_vp;
2442 	ip = VTOI(vp);
2443 
2444 	if (ip->pmp->ronly)
2445 		return (EROFS);
2446 	return(0);
2447 }
2448 
2449 static
2450 int
2451 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2452 {
2453 	int error;
2454 
2455 	error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2456 	if (error)
2457 		error = hammer2_vop_kqfilter(ap);
2458 	return(error);
2459 }
2460 
2461 /*
2462  * VOPS vector
2463  */
2464 struct vop_ops hammer2_vnode_vops = {
2465 	.vop_default	= vop_defaultop,
2466 	.vop_fsync	= hammer2_vop_fsync,
2467 	.vop_getpages	= vop_stdgetpages,
2468 	.vop_putpages	= vop_stdputpages,
2469 	.vop_access	= hammer2_vop_access,
2470 	.vop_advlock	= hammer2_vop_advlock,
2471 	.vop_close	= hammer2_vop_close,
2472 	.vop_nlink	= hammer2_vop_nlink,
2473 	.vop_ncreate	= hammer2_vop_ncreate,
2474 	.vop_nsymlink	= hammer2_vop_nsymlink,
2475 	.vop_nremove	= hammer2_vop_nremove,
2476 	.vop_nrmdir	= hammer2_vop_nrmdir,
2477 	.vop_nrename	= hammer2_vop_nrename,
2478 	.vop_getattr	= hammer2_vop_getattr,
2479 	.vop_setattr	= hammer2_vop_setattr,
2480 	.vop_readdir	= hammer2_vop_readdir,
2481 	.vop_readlink	= hammer2_vop_readlink,
2482 	.vop_read	= hammer2_vop_read,
2483 	.vop_write	= hammer2_vop_write,
2484 	.vop_open	= hammer2_vop_open,
2485 	.vop_inactive	= hammer2_vop_inactive,
2486 	.vop_reclaim 	= hammer2_vop_reclaim,
2487 	.vop_nresolve	= hammer2_vop_nresolve,
2488 	.vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2489 	.vop_nmkdir 	= hammer2_vop_nmkdir,
2490 	.vop_nmknod 	= hammer2_vop_nmknod,
2491 	.vop_ioctl	= hammer2_vop_ioctl,
2492 	.vop_mountctl	= hammer2_vop_mountctl,
2493 	.vop_bmap	= hammer2_vop_bmap,
2494 	.vop_strategy	= hammer2_vop_strategy,
2495         .vop_kqfilter	= hammer2_vop_kqfilter
2496 };
2497 
2498 struct vop_ops hammer2_spec_vops = {
2499         .vop_default =          vop_defaultop,
2500         .vop_fsync =            hammer2_vop_fsync,
2501         .vop_read =             vop_stdnoread,
2502         .vop_write =            vop_stdnowrite,
2503         .vop_access =           hammer2_vop_access,
2504         .vop_close =            hammer2_vop_close,
2505         .vop_markatime =        hammer2_vop_markatime,
2506         .vop_getattr =          hammer2_vop_getattr,
2507         .vop_inactive =         hammer2_vop_inactive,
2508         .vop_reclaim =          hammer2_vop_reclaim,
2509         .vop_setattr =          hammer2_vop_setattr
2510 };
2511 
2512 struct vop_ops hammer2_fifo_vops = {
2513         .vop_default =          fifo_vnoperate,
2514         .vop_fsync =            hammer2_vop_fsync,
2515 #if 0
2516         .vop_read =             hammer2_vop_fiforead,
2517         .vop_write =            hammer2_vop_fifowrite,
2518 #endif
2519         .vop_access =           hammer2_vop_access,
2520 #if 0
2521         .vop_close =            hammer2_vop_fifoclose,
2522 #endif
2523         .vop_markatime =        hammer2_vop_markatime,
2524         .vop_getattr =          hammer2_vop_getattr,
2525         .vop_inactive =         hammer2_vop_inactive,
2526         .vop_reclaim =          hammer2_vop_reclaim,
2527         .vop_setattr =          hammer2_vop_setattr,
2528         .vop_kqfilter =         hammer2_vop_fifokqfilter
2529 };
2530 
2531