xref: /dragonfly/sys/vfs/hammer2/hammer2_vnops.c (revision 8f2ce533)
1 /*
2  * Copyright (c) 2011-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7  * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 /*
37  * Kernel Filesystem interface
38  *
39  * NOTE! local ipdata pointers must be reloaded on any modifying operation
40  *	 to the inode as its underlying chain may have changed.
41  */
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/fcntl.h>
47 #include <sys/buf.h>
48 #include <sys/proc.h>
49 #include <sys/mount.h>
50 #include <sys/vnode.h>
51 #include <sys/mountctl.h>
52 #include <sys/dirent.h>
53 #include <sys/uio.h>
54 #include <sys/objcache.h>
55 #include <sys/event.h>
56 #include <sys/file.h>
57 #include <vfs/fifofs/fifo.h>
58 
59 #include "hammer2.h"
60 
61 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio,
62 				int seqcount);
63 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
64 				int ioflag, int seqcount);
65 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize);
66 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize);
67 
68 /*
69  * Last reference to a vnode is going away but it is still cached.
70  */
71 static
72 int
73 hammer2_vop_inactive(struct vop_inactive_args *ap)
74 {
75 	hammer2_inode_t *ip;
76 	struct vnode *vp;
77 
78 	vp = ap->a_vp;
79 	ip = VTOI(vp);
80 
81 	/*
82 	 * Degenerate case
83 	 */
84 	if (ip == NULL) {
85 		vrecycle(vp);
86 		return (0);
87 	}
88 
89 	/*
90 	 * Aquire the inode lock to interlock against vp updates via
91 	 * the inode path and file deletions and such (which can be
92 	 * namespace-only operations that might not hold the vnode).
93 	 */
94 	hammer2_inode_lock(ip, 0);
95 	if (ip->flags & HAMMER2_INODE_ISUNLINKED) {
96 		hammer2_key_t lbase;
97 		int nblksize;
98 
99 		/*
100 		 * If the inode has been unlinked we can throw away all
101 		 * buffers (dirty or not) and clean the file out.
102 		 *
103 		 * Because vrecycle() calls are not guaranteed, try to
104 		 * dispose of the inode as much as possible right here.
105 		 */
106 		nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL);
107 		nvtruncbuf(vp, 0, nblksize, 0, 0);
108 
109 		/*
110 		 * Delete the file on-media.
111 		 */
112 		if ((ip->flags & HAMMER2_INODE_DELETING) == 0) {
113 			atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING);
114 			hammer2_inode_delayed_sideq(ip);
115 		}
116 		hammer2_inode_unlock(ip);
117 
118 		/*
119 		 * Recycle immediately if possible
120 		 */
121 		vrecycle(vp);
122 	} else {
123 		hammer2_inode_unlock(ip);
124 	}
125 	return (0);
126 }
127 
128 /*
129  * Reclaim a vnode so that it can be reused; after the inode is
130  * disassociated, the filesystem must manage it alone.
131  */
132 static
133 int
134 hammer2_vop_reclaim(struct vop_reclaim_args *ap)
135 {
136 	hammer2_inode_t *ip;
137 	hammer2_pfs_t *pmp;
138 	struct vnode *vp;
139 
140 	vp = ap->a_vp;
141 	ip = VTOI(vp);
142 	if (ip == NULL)
143 		return(0);
144 
145 	pmp = ip->pmp;
146 
147 	/*
148 	 * NOTE! We do not attempt to flush chains here, flushing is
149 	 *	 really fragile and could also deadlock.
150 	 */
151 	vclrisdirty(vp);
152 
153 	/*
154 	 * The inode lock is required to disconnect it.
155 	 */
156 	hammer2_inode_lock(ip, 0);
157 	vp->v_data = NULL;
158 	ip->vp = NULL;
159 
160 	/*
161 	 * Delete the file on-media.  This should have been handled by the
162 	 * inactivation.  The operation is likely still queued on the inode
163 	 * though so only complain if the stars don't align.
164 	 */
165 	if ((ip->flags & (HAMMER2_INODE_ISUNLINKED | HAMMER2_INODE_DELETING)) ==
166 	    HAMMER2_INODE_ISUNLINKED)
167 	{
168 		atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING);
169 		hammer2_inode_delayed_sideq(ip);
170 		kprintf("hammer2: vp=%p ip=%p unlinked but not disposed\n",
171 			vp, ip);
172 	}
173 	hammer2_inode_unlock(ip);
174 
175 	/*
176 	 * Modified inodes will already be on SIDEQ or SYNCQ, no further
177 	 * action is needed.
178 	 *
179 	 * We cannot safely synchronize the inode from inside the reclaim
180 	 * due to potentially deep locks held as-of when the reclaim occurs.
181 	 * Interactions and potential deadlocks abound.  We also can't do it
182 	 * here without desynchronizing from the related directory entrie(s).
183 	 */
184 	hammer2_inode_drop(ip);			/* vp ref */
185 
186 	/*
187 	 * XXX handle background sync when ip dirty, kernel will no longer
188 	 * notify us regarding this inode because there is no longer a
189 	 * vnode attached to it.
190 	 */
191 
192 	return (0);
193 }
194 
195 /*
196  * Currently this function synchronizes the front-end inode state to the
197  * backend chain topology, then flushes the inode's chain and sub-topology
198  * to backend media.  This function does not flush the root topology down to
199  * the inode.
200  */
201 static
202 int
203 hammer2_vop_fsync(struct vop_fsync_args *ap)
204 {
205 	hammer2_inode_t *ip;
206 	struct vnode *vp;
207 	int error1;
208 	int error2;
209 
210 	vp = ap->a_vp;
211 	ip = VTOI(vp);
212 	error1 = 0;
213 
214 	hammer2_trans_init(ip->pmp, 0);
215 
216 	/*
217 	 * Flush dirty buffers in the file's logical buffer cache.
218 	 * It is best to wait for the strategy code to commit the
219 	 * buffers to the device's backing buffer cache before
220 	 * then trying to flush the inode.
221 	 *
222 	 * This should be quick, but certain inode modifications cached
223 	 * entirely in the hammer2_inode structure may not trigger a
224 	 * buffer read until the flush so the fsync can wind up also
225 	 * doing scattered reads.
226 	 */
227 	vfsync(vp, ap->a_waitfor, 1, NULL, NULL);
228 	bio_track_wait(&vp->v_track_write, 0, 0);
229 
230 	/*
231 	 * Flush any inode changes
232 	 */
233 	hammer2_inode_lock(ip, 0);
234 	if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MODIFIED))
235 		error1 = hammer2_inode_chain_sync(ip);
236 
237 	/*
238 	 * Flush dirty chains related to the inode.
239 	 *
240 	 * NOTE! We are not in a flush transaction.  The inode remains on
241 	 *	 the sideq so the filesystem syncer can synchronize it to
242 	 *	 the volume root.
243 	 */
244 	error2 = hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP);
245 	if (error2)
246 		error1 = error2;
247 
248 	/*
249 	 * We may be able to clear the vnode dirty flag.  The
250 	 * hammer2_pfs_moderate() code depends on this usually working.
251 	 */
252 	if ((ip->flags & (HAMMER2_INODE_MODIFIED |
253 			  HAMMER2_INODE_RESIZED |
254 			  HAMMER2_INODE_DIRTYDATA)) == 0 &&
255 	    RB_EMPTY(&vp->v_rbdirty_tree) &&
256 	    !bio_track_active(&vp->v_track_write)) {
257 		vclrisdirty(vp);
258 	}
259 	hammer2_inode_unlock(ip);
260 	hammer2_trans_done(ip->pmp, 0);
261 
262 	return (error1);
263 }
264 
265 /*
266  * No lock needed, just handle ip->update
267  */
268 static
269 int
270 hammer2_vop_access(struct vop_access_args *ap)
271 {
272 	hammer2_inode_t *ip = VTOI(ap->a_vp);
273 	uid_t uid;
274 	gid_t gid;
275 	mode_t mode;
276 	uint32_t uflags;
277 	int error;
278 	int update;
279 
280 retry:
281 	update = spin_access_start(&ip->cluster_spin);
282 
283 	/*hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);*/
284 	uid = hammer2_to_unix_xid(&ip->meta.uid);
285 	gid = hammer2_to_unix_xid(&ip->meta.gid);
286 	mode = ip->meta.mode;
287 	uflags = ip->meta.uflags;
288 	/*hammer2_inode_unlock(ip);*/
289 
290 	if (__predict_false(spin_access_end(&ip->cluster_spin, update)))
291 		goto retry;
292 
293 	error = vop_helper_access(ap, uid, gid, mode, uflags);
294 
295 	return (error);
296 }
297 
298 static
299 int
300 hammer2_vop_getattr(struct vop_getattr_args *ap)
301 {
302 	hammer2_pfs_t *pmp;
303 	hammer2_inode_t *ip;
304 	struct vnode *vp;
305 	struct vattr *vap;
306 	int update;
307 
308 	vp = ap->a_vp;
309 	vap = ap->a_vap;
310 
311 	ip = VTOI(vp);
312 	pmp = ip->pmp;
313 
314 retry:
315 	update = spin_access_start(&ip->cluster_spin);
316 
317 	vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
318 	vap->va_fileid = ip->meta.inum;
319 	vap->va_mode = ip->meta.mode;
320 	vap->va_nlink = ip->meta.nlinks;
321 	vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
322 	vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
323 	vap->va_rmajor = 0;
324 	vap->va_rminor = 0;
325 	vap->va_size = ip->meta.size;	/* protected by shared lock */
326 	vap->va_blocksize = HAMMER2_PBUFSIZE;
327 	vap->va_flags = ip->meta.uflags;
328 	hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime);
329 	hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime);
330 	hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime);
331 	vap->va_gen = 1;
332 	vap->va_bytes = 0;
333 	if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) {
334 		/*
335 		 * Can't really calculate directory use sans the files under
336 		 * it, just assume one block for now.
337 		 */
338 		vap->va_bytes += HAMMER2_INODE_BYTES;
339 	} else {
340 		vap->va_bytes = hammer2_inode_data_count(ip);
341 	}
342 	vap->va_type = hammer2_get_vtype(ip->meta.type);
343 	vap->va_filerev = 0;
344 	vap->va_uid_uuid = ip->meta.uid;
345 	vap->va_gid_uuid = ip->meta.gid;
346 	vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
347 			  VA_FSID_UUID_VALID;
348 
349 	if (__predict_false(spin_access_end(&ip->cluster_spin, update)))
350 		goto retry;
351 
352 	return (0);
353 }
354 
355 static
356 int
357 hammer2_vop_getattr_lite(struct vop_getattr_lite_args *ap)
358 {
359 	hammer2_pfs_t *pmp;
360 	hammer2_inode_t *ip;
361 	struct vnode *vp;
362 	struct vattr_lite *lvap;
363 	int update;
364 
365 	vp = ap->a_vp;
366 	lvap = ap->a_lvap;
367 
368 	ip = VTOI(vp);
369 	pmp = ip->pmp;
370 
371 retry:
372 	update = spin_access_start(&ip->cluster_spin);
373 
374 #if 0
375 	vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0];
376 	vap->va_fileid = ip->meta.inum;
377 #endif
378 	lvap->va_mode = ip->meta.mode;
379 	lvap->va_nlink = ip->meta.nlinks;
380 	lvap->va_uid = hammer2_to_unix_xid(&ip->meta.uid);
381 	lvap->va_gid = hammer2_to_unix_xid(&ip->meta.gid);
382 #if 0
383 	vap->va_rmajor = 0;
384 	vap->va_rminor = 0;
385 #endif
386 	lvap->va_size = ip->meta.size;
387 #if 0
388 	vap->va_blocksize = HAMMER2_PBUFSIZE;
389 #endif
390 	lvap->va_flags = ip->meta.uflags;
391 	lvap->va_type = hammer2_get_vtype(ip->meta.type);
392 #if 0
393 	vap->va_filerev = 0;
394 	vap->va_uid_uuid = ip->meta.uid;
395 	vap->va_gid_uuid = ip->meta.gid;
396 	vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID |
397 			  VA_FSID_UUID_VALID;
398 #endif
399 
400 	if (__predict_false(spin_access_end(&ip->cluster_spin, update)))
401 		goto retry;
402 
403 	return (0);
404 }
405 
406 static
407 int
408 hammer2_vop_setattr(struct vop_setattr_args *ap)
409 {
410 	hammer2_inode_t *ip;
411 	struct vnode *vp;
412 	struct vattr *vap;
413 	int error;
414 	int kflags = 0;
415 	uint64_t ctime;
416 
417 	vp = ap->a_vp;
418 	vap = ap->a_vap;
419 	hammer2_update_time(&ctime);
420 
421 	ip = VTOI(vp);
422 
423 	if (ip->pmp->ronly)
424 		return (EROFS);
425 
426 	/*
427 	 * Normally disallow setattr if there is no space, unless we
428 	 * are in emergency mode (might be needed to chflags -R noschg
429 	 * files prior to removal).
430 	 */
431 	if ((ip->pmp->flags & HAMMER2_PMPF_EMERG) == 0 &&
432 	    hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1) {
433 		return (ENOSPC);
434 	}
435 
436 	hammer2_trans_init(ip->pmp, 0);
437 	hammer2_inode_lock(ip, 0);
438 	error = 0;
439 
440 	if (vap->va_flags != VNOVAL) {
441 		uint32_t flags;
442 
443 		flags = ip->meta.uflags;
444 		error = vop_helper_setattr_flags(&flags, vap->va_flags,
445 				     hammer2_to_unix_xid(&ip->meta.uid),
446 				     ap->a_cred);
447 		if (error == 0) {
448 			if (ip->meta.uflags != flags) {
449 				hammer2_inode_modify(ip);
450 				hammer2_spin_lock_update(&ip->cluster_spin);
451 				ip->meta.uflags = flags;
452 				ip->meta.ctime = ctime;
453 				hammer2_spin_unlock_update(&ip->cluster_spin);
454 				kflags |= NOTE_ATTRIB;
455 			}
456 			if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
457 				error = 0;
458 				goto done;
459 			}
460 		}
461 		goto done;
462 	}
463 	if (ip->meta.uflags & (IMMUTABLE | APPEND)) {
464 		error = EPERM;
465 		goto done;
466 	}
467 	if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
468 		mode_t cur_mode = ip->meta.mode;
469 		uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
470 		gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
471 		uuid_t uuid_uid;
472 		uuid_t uuid_gid;
473 
474 		error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid,
475 					 ap->a_cred,
476 					 &cur_uid, &cur_gid, &cur_mode);
477 		if (error == 0) {
478 			hammer2_guid_to_uuid(&uuid_uid, cur_uid);
479 			hammer2_guid_to_uuid(&uuid_gid, cur_gid);
480 			if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) ||
481 			    bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) ||
482 			    ip->meta.mode != cur_mode
483 			) {
484 				hammer2_inode_modify(ip);
485 				hammer2_spin_lock_update(&ip->cluster_spin);
486 				ip->meta.uid = uuid_uid;
487 				ip->meta.gid = uuid_gid;
488 				ip->meta.mode = cur_mode;
489 				ip->meta.ctime = ctime;
490 				hammer2_spin_unlock_update(&ip->cluster_spin);
491 			}
492 			kflags |= NOTE_ATTRIB;
493 		}
494 	}
495 
496 	/*
497 	 * Resize the file
498 	 */
499 	if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) {
500 		switch(vp->v_type) {
501 		case VREG:
502 			if (vap->va_size == ip->meta.size)
503 				break;
504 			if (vap->va_size < ip->meta.size) {
505 				hammer2_mtx_ex(&ip->truncate_lock);
506 				hammer2_truncate_file(ip, vap->va_size);
507 				hammer2_mtx_unlock(&ip->truncate_lock);
508 				kflags |= NOTE_WRITE;
509 			} else {
510 				hammer2_extend_file(ip, vap->va_size);
511 				kflags |= NOTE_WRITE | NOTE_EXTEND;
512 			}
513 			hammer2_inode_modify(ip);
514 			ip->meta.mtime = ctime;
515 			vclrflags(vp, VLASTWRITETS);
516 			break;
517 		default:
518 			error = EINVAL;
519 			goto done;
520 		}
521 	}
522 #if 0
523 	/* atime not supported */
524 	if (vap->va_atime.tv_sec != VNOVAL) {
525 		hammer2_inode_modify(ip);
526 		ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime);
527 		kflags |= NOTE_ATTRIB;
528 	}
529 #endif
530 	if (vap->va_mode != (mode_t)VNOVAL) {
531 		mode_t cur_mode = ip->meta.mode;
532 		uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid);
533 		gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid);
534 
535 		error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred,
536 					 cur_uid, cur_gid, &cur_mode);
537 		if (error == 0) {
538 			hammer2_inode_modify(ip);
539 			hammer2_spin_lock_update(&ip->cluster_spin);
540 			ip->meta.mode = cur_mode;
541 			ip->meta.ctime = ctime;
542 			hammer2_spin_unlock_update(&ip->cluster_spin);
543 			kflags |= NOTE_ATTRIB;
544 		}
545 	}
546 
547 	if (vap->va_mtime.tv_sec != VNOVAL) {
548 		hammer2_inode_modify(ip);
549 		ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime);
550 		kflags |= NOTE_ATTRIB;
551 		vclrflags(vp, VLASTWRITETS);
552 	}
553 
554 done:
555 	/*
556 	 * If a truncation occurred we must call chain_sync() now in order
557 	 * to trim the related data chains, otherwise a later expansion can
558 	 * cause havoc.
559 	 *
560 	 * If an extend occured that changed the DIRECTDATA state, we must
561 	 * call inode_chain_sync now in order to prepare the inode's indirect
562 	 * block table.
563 	 *
564 	 * WARNING! This means we are making an adjustment to the inode's
565 	 * chain outside of sync/fsync, and not just to inode->meta, which
566 	 * may result in some consistency issues if a crash were to occur
567 	 * at just the wrong time.
568 	 */
569 	if (ip->flags & HAMMER2_INODE_RESIZED)
570 		hammer2_inode_chain_sync(ip);
571 
572 	/*
573 	 * Cleanup.
574 	 */
575 	hammer2_inode_unlock(ip);
576 	hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ);
577 	hammer2_knote(ip->vp, kflags);
578 
579 	return (error);
580 }
581 
582 static
583 int
584 hammer2_vop_readdir(struct vop_readdir_args *ap)
585 {
586 	hammer2_xop_readdir_t *xop;
587 	hammer2_blockref_t bref;
588 	hammer2_inode_t *ip;
589 	hammer2_tid_t inum;
590 	hammer2_key_t lkey;
591 	struct uio *uio;
592 	off_t *cookies;
593 	off_t saveoff;
594 	int cookie_index;
595 	int ncookies;
596 	int error;
597 	int eofflag;
598 	int r;
599 
600 	ip = VTOI(ap->a_vp);
601 	uio = ap->a_uio;
602 	saveoff = uio->uio_offset;
603 	eofflag = 0;
604 	error = 0;
605 
606 	/*
607 	 * Setup cookies directory entry cookies if requested
608 	 */
609 	if (ap->a_ncookies) {
610 		ncookies = uio->uio_resid / 16 + 1;
611 		if (ncookies > 1024)
612 			ncookies = 1024;
613 		cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK);
614 	} else {
615 		ncookies = -1;
616 		cookies = NULL;
617 	}
618 	cookie_index = 0;
619 
620 	hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
621 
622 	/*
623 	 * Handle artificial entries.  To ensure that only positive 64 bit
624 	 * quantities are returned to userland we always strip off bit 63.
625 	 * The hash code is designed such that codes 0x0000-0x7FFF are not
626 	 * used, allowing us to use these codes for articial entries.
627 	 *
628 	 * Entry 0 is used for '.' and entry 1 is used for '..'.  Do not
629 	 * allow '..' to cross the mount point into (e.g.) the super-root.
630 	 */
631 	if (saveoff == 0) {
632 		inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
633 		r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, ".");
634 		if (r)
635 			goto done;
636 		if (cookies)
637 			cookies[cookie_index] = saveoff;
638 		++saveoff;
639 		++cookie_index;
640 		if (cookie_index == ncookies)
641 			goto done;
642 	}
643 
644 	if (saveoff == 1) {
645 		inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK;
646 		if (ip != ip->pmp->iroot)
647 			inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK;
648 		r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, "..");
649 		if (r)
650 			goto done;
651 		if (cookies)
652 			cookies[cookie_index] = saveoff;
653 		++saveoff;
654 		++cookie_index;
655 		if (cookie_index == ncookies)
656 			goto done;
657 	}
658 
659 	lkey = saveoff | HAMMER2_DIRHASH_VISIBLE;
660 	if (hammer2_debug & 0x0020)
661 		kprintf("readdir: lkey %016jx\n", lkey);
662 	if (error)
663 		goto done;
664 
665 	/*
666 	 * Use XOP for cluster scan.
667 	 *
668 	 * parent is the inode cluster, already locked for us.  Don't
669 	 * double lock shared locks as this will screw up upgrades.
670 	 */
671 	xop = hammer2_xop_alloc(ip, 0);
672 	xop->lkey = lkey;
673 	hammer2_xop_start(&xop->head, &hammer2_readdir_desc);
674 
675 	for (;;) {
676 		const hammer2_inode_data_t *ripdata;
677 		const char *dname;
678 		int dtype;
679 
680 		error = hammer2_xop_collect(&xop->head, 0);
681 		error = hammer2_error_to_errno(error);
682 		if (error) {
683 			break;
684 		}
685 		if (cookie_index == ncookies)
686 			break;
687 		if (hammer2_debug & 0x0020)
688 			kprintf("cluster chain %p %p\n",
689 				xop->head.cluster.focus,
690 				(xop->head.cluster.focus ?
691 				 xop->head.cluster.focus->data : (void *)-1));
692 		hammer2_cluster_bref(&xop->head.cluster, &bref);
693 
694 		if (bref.type == HAMMER2_BREF_TYPE_INODE) {
695 			ripdata = &hammer2_xop_gdata(&xop->head)->ipdata;
696 			dtype = hammer2_get_dtype(ripdata->meta.type);
697 			saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
698 			r = vop_write_dirent(&error, uio,
699 					     ripdata->meta.inum &
700 					      HAMMER2_DIRHASH_USERMSK,
701 					     dtype,
702 					     ripdata->meta.name_len,
703 					     ripdata->filename);
704 			hammer2_xop_pdata(&xop->head);
705 			if (r)
706 				break;
707 			if (cookies)
708 				cookies[cookie_index] = saveoff;
709 			++cookie_index;
710 		} else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) {
711 			uint16_t namlen;
712 
713 			dtype = hammer2_get_dtype(bref.embed.dirent.type);
714 			saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
715 			namlen = bref.embed.dirent.namlen;
716 			if (namlen <= sizeof(bref.check.buf)) {
717 				dname = bref.check.buf;
718 			} else {
719 				dname = hammer2_xop_gdata(&xop->head)->buf;
720 			}
721 			r = vop_write_dirent(&error, uio,
722 					     bref.embed.dirent.inum, dtype,
723 					     namlen, dname);
724 			if (namlen > sizeof(bref.check.buf))
725 				hammer2_xop_pdata(&xop->head);
726 			if (r)
727 				break;
728 			if (cookies)
729 				cookies[cookie_index] = saveoff;
730 			++cookie_index;
731 		} else {
732 			/* XXX chain error */
733 			kprintf("bad chain type readdir %d\n", bref.type);
734 		}
735 	}
736 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
737 	if (error == ENOENT) {
738 		error = 0;
739 		eofflag = 1;
740 		saveoff = (hammer2_key_t)-1;
741 	} else {
742 		saveoff = bref.key & HAMMER2_DIRHASH_USERMSK;
743 	}
744 done:
745 	hammer2_inode_unlock(ip);
746 	if (ap->a_eofflag)
747 		*ap->a_eofflag = eofflag;
748 	if (hammer2_debug & 0x0020)
749 		kprintf("readdir: done at %016jx\n", saveoff);
750 	uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE;
751 	if (error && cookie_index == 0) {
752 		if (cookies) {
753 			kfree(cookies, M_TEMP);
754 			*ap->a_ncookies = 0;
755 			*ap->a_cookies = NULL;
756 		}
757 	} else {
758 		if (cookies) {
759 			*ap->a_ncookies = cookie_index;
760 			*ap->a_cookies = cookies;
761 		}
762 	}
763 	return (error);
764 }
765 
766 /*
767  * hammer2_vop_readlink { vp, uio, cred }
768  */
769 static
770 int
771 hammer2_vop_readlink(struct vop_readlink_args *ap)
772 {
773 	struct vnode *vp;
774 	hammer2_inode_t *ip;
775 	int error;
776 
777 	vp = ap->a_vp;
778 	if (vp->v_type != VLNK)
779 		return (EINVAL);
780 	ip = VTOI(vp);
781 
782 	error = hammer2_read_file(ip, ap->a_uio, 0);
783 	return (error);
784 }
785 
786 static
787 int
788 hammer2_vop_read(struct vop_read_args *ap)
789 {
790 	struct vnode *vp;
791 	hammer2_inode_t *ip;
792 	struct uio *uio;
793 	int error;
794 	int seqcount;
795 	int bigread;
796 
797 	/*
798 	 * Read operations supported on this vnode?
799 	 */
800 	vp = ap->a_vp;
801 	if (vp->v_type == VDIR)
802 		return (EISDIR);
803 	if (vp->v_type != VREG)
804 		return (EINVAL);
805 
806 	/*
807 	 * Misc
808 	 */
809 	ip = VTOI(vp);
810 	uio = ap->a_uio;
811 	error = 0;
812 
813 	seqcount = ap->a_ioflag >> IO_SEQSHIFT;
814 	bigread = (uio->uio_resid > 100 * 1024 * 1024);
815 
816 	error = hammer2_read_file(ip, uio, seqcount);
817 	return (error);
818 }
819 
820 static
821 int
822 hammer2_vop_write(struct vop_write_args *ap)
823 {
824 	hammer2_inode_t *ip;
825 	thread_t td;
826 	struct vnode *vp;
827 	struct uio *uio;
828 	int error;
829 	int seqcount;
830 	int ioflag;
831 
832 	/*
833 	 * Read operations supported on this vnode?
834 	 */
835 	vp = ap->a_vp;
836 	if (vp->v_type != VREG)
837 		return (EINVAL);
838 
839 	/*
840 	 * Misc
841 	 */
842 	ip = VTOI(vp);
843 	ioflag = ap->a_ioflag;
844 	uio = ap->a_uio;
845 	error = 0;
846 	if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG))
847 		return (EROFS);
848 	switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) {
849 	case 2:
850 		return (ENOSPC);
851 	case 1:
852 		ioflag |= IO_DIRECT;	/* semi-synchronous */
853 		/* fall through */
854 	default:
855 		break;
856 	}
857 
858 	seqcount = ioflag >> IO_SEQSHIFT;
859 
860 	/*
861 	 * Check resource limit
862 	 */
863 	if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc &&
864 	    uio->uio_offset + uio->uio_resid >
865 	     td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
866 		lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
867 		return (EFBIG);
868 	}
869 
870 	/*
871 	 * The transaction interlocks against flush initiations
872 	 * (note: but will run concurrently with the actual flush).
873 	 *
874 	 * To avoid deadlocking against the VM system, we must flag any
875 	 * transaction related to the buffer cache or other direct
876 	 * VM page manipulation.
877 	 */
878 	if (uio->uio_segflg == UIO_NOCOPY) {
879 		hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE);
880 	} else {
881 		hammer2_trans_init(ip->pmp, 0);
882 	}
883 	error = hammer2_write_file(ip, uio, ioflag, seqcount);
884 	if (uio->uio_segflg == UIO_NOCOPY)
885 		hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE |
886 					    HAMMER2_TRANS_SIDEQ);
887 	else
888 		hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ);
889 
890 	return (error);
891 }
892 
893 /*
894  * Perform read operations on a file or symlink given an UNLOCKED
895  * inode and uio.
896  *
897  * The passed ip is not locked.
898  */
899 static
900 int
901 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount)
902 {
903 	hammer2_off_t size;
904 	struct buf *bp;
905 	int error;
906 
907 	error = 0;
908 
909 	/*
910 	 * UIO read loop.
911 	 *
912 	 * WARNING! Assumes that the kernel interlocks size changes at the
913 	 *	    vnode level.
914 	 */
915 	hammer2_mtx_sh(&ip->lock);
916 	hammer2_mtx_sh(&ip->truncate_lock);
917 	size = ip->meta.size;
918 	hammer2_mtx_unlock(&ip->lock);
919 
920 	while (uio->uio_resid > 0 && uio->uio_offset < size) {
921 		hammer2_key_t lbase;
922 		hammer2_key_t leof;
923 		int lblksize;
924 		int loff;
925 		int n;
926 
927 		lblksize = hammer2_calc_logical(ip, uio->uio_offset,
928 						&lbase, &leof);
929 
930 #if 1
931 		bp = NULL;
932 		error = cluster_readx(ip->vp, leof, lbase, lblksize,
933 				      B_NOTMETA | B_KVABIO,
934 				      uio->uio_resid,
935 				      seqcount * MAXBSIZE,
936 				      &bp);
937 #else
938 		if (uio->uio_segflg == UIO_NOCOPY) {
939 			bp = getblk(ip->vp, lbase, lblksize,
940 				    GETBLK_BHEAVY | GETBLK_KVABIO, 0);
941 			if (bp->b_flags & B_CACHE) {
942 				int i;
943 				int j = 0;
944 				if (bp->b_xio.xio_npages != 16)
945 					kprintf("NPAGES BAD\n");
946 				for (i = 0; i < bp->b_xio.xio_npages; ++i) {
947 					vm_page_t m;
948 					m = bp->b_xio.xio_pages[i];
949 					if (m == NULL || m->valid == 0) {
950 						kprintf("bp %016jx %016jx pg %d inv",
951 							lbase, leof, i);
952 						if (m)
953 							kprintf("m->object %p/%p", m->object, ip->vp->v_object);
954 						kprintf("\n");
955 						j = 1;
956 					}
957 				}
958 				if (j)
959 					kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error);
960 			}
961 			bqrelse(bp);
962 		}
963 		error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
964 #endif
965 		if (error) {
966 			brelse(bp);
967 			break;
968 		}
969 		bkvasync(bp);
970 		loff = (int)(uio->uio_offset - lbase);
971 		n = lblksize - loff;
972 		if (n > uio->uio_resid)
973 			n = uio->uio_resid;
974 		if (n > size - uio->uio_offset)
975 			n = (int)(size - uio->uio_offset);
976 		bp->b_flags |= B_AGE;
977 		uiomovebp(bp, (char *)bp->b_data + loff, n, uio);
978 		bqrelse(bp);
979 	}
980 	hammer2_mtx_unlock(&ip->truncate_lock);
981 
982 	return (error);
983 }
984 
985 /*
986  * Write to the file represented by the inode via the logical buffer cache.
987  * The inode may represent a regular file or a symlink.
988  *
989  * The inode must not be locked.
990  */
991 static
992 int
993 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio,
994 		   int ioflag, int seqcount)
995 {
996 	hammer2_key_t old_eof;
997 	hammer2_key_t new_eof;
998 	struct buf *bp;
999 	int kflags;
1000 	int error;
1001 	int modified;
1002 
1003 	/*
1004 	 * Setup if append
1005 	 *
1006 	 * WARNING! Assumes that the kernel interlocks size changes at the
1007 	 *	    vnode level.
1008 	 */
1009 	hammer2_mtx_ex(&ip->lock);
1010 	hammer2_mtx_sh(&ip->truncate_lock);
1011 	if (ioflag & IO_APPEND)
1012 		uio->uio_offset = ip->meta.size;
1013 	old_eof = ip->meta.size;
1014 
1015 	/*
1016 	 * Extend the file if necessary.  If the write fails at some point
1017 	 * we will truncate it back down to cover as much as we were able
1018 	 * to write.
1019 	 *
1020 	 * Doing this now makes it easier to calculate buffer sizes in
1021 	 * the loop.
1022 	 */
1023 	kflags = 0;
1024 	error = 0;
1025 	modified = 0;
1026 
1027 	if (uio->uio_offset + uio->uio_resid > old_eof) {
1028 		new_eof = uio->uio_offset + uio->uio_resid;
1029 		modified = 1;
1030 		hammer2_extend_file(ip, new_eof);
1031 		kflags |= NOTE_EXTEND;
1032 	} else {
1033 		new_eof = old_eof;
1034 	}
1035 	hammer2_mtx_unlock(&ip->lock);
1036 
1037 	/*
1038 	 * UIO write loop
1039 	 */
1040 	while (uio->uio_resid > 0) {
1041 		hammer2_key_t lbase;
1042 		int trivial;
1043 		int endofblk;
1044 		int lblksize;
1045 		int loff;
1046 		int n;
1047 
1048 		/*
1049 		 * Don't allow the buffer build to blow out the buffer
1050 		 * cache.
1051 		 */
1052 		if ((ioflag & IO_RECURSE) == 0)
1053 			bwillwrite(HAMMER2_PBUFSIZE);
1054 
1055 		/*
1056 		 * This nominally tells us how much we can cluster and
1057 		 * what the logical buffer size needs to be.  Currently
1058 		 * we don't try to cluster the write and just handle one
1059 		 * block at a time.
1060 		 */
1061 		lblksize = hammer2_calc_logical(ip, uio->uio_offset,
1062 						&lbase, NULL);
1063 		loff = (int)(uio->uio_offset - lbase);
1064 
1065 		KKASSERT(lblksize <= MAXBSIZE);
1066 
1067 		/*
1068 		 * Calculate bytes to copy this transfer and whether the
1069 		 * copy completely covers the buffer or not.
1070 		 */
1071 		trivial = 0;
1072 		n = lblksize - loff;
1073 		if (n > uio->uio_resid) {
1074 			n = uio->uio_resid;
1075 			if (loff == lbase && uio->uio_offset + n == new_eof)
1076 				trivial = 1;
1077 			endofblk = 0;
1078 		} else {
1079 			if (loff == 0)
1080 				trivial = 1;
1081 			endofblk = 1;
1082 		}
1083 		if (lbase >= new_eof)
1084 			trivial = 1;
1085 
1086 		/*
1087 		 * Get the buffer
1088 		 */
1089 		if (uio->uio_segflg == UIO_NOCOPY) {
1090 			/*
1091 			 * Issuing a write with the same data backing the
1092 			 * buffer.  Instantiate the buffer to collect the
1093 			 * backing vm pages, then read-in any missing bits.
1094 			 *
1095 			 * This case is used by vop_stdputpages().
1096 			 */
1097 			bp = getblk(ip->vp, lbase, lblksize,
1098 				    GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1099 			if ((bp->b_flags & B_CACHE) == 0) {
1100 				bqrelse(bp);
1101 				error = bread_kvabio(ip->vp, lbase,
1102 						     lblksize, &bp);
1103 			}
1104 		} else if (trivial) {
1105 			/*
1106 			 * Even though we are entirely overwriting the buffer
1107 			 * we may still have to zero it out to avoid a
1108 			 * mmap/write visibility issue.
1109 			 */
1110 			bp = getblk(ip->vp, lbase, lblksize,
1111 				    GETBLK_BHEAVY | GETBLK_KVABIO, 0);
1112 			if ((bp->b_flags & B_CACHE) == 0)
1113 				vfs_bio_clrbuf(bp);
1114 		} else {
1115 			/*
1116 			 * Partial overwrite, read in any missing bits then
1117 			 * replace the portion being written.
1118 			 *
1119 			 * (The strategy code will detect zero-fill physical
1120 			 * blocks for this case).
1121 			 */
1122 			error = bread_kvabio(ip->vp, lbase, lblksize, &bp);
1123 			if (error == 0)
1124 				bheavy(bp);
1125 		}
1126 
1127 		if (error) {
1128 			brelse(bp);
1129 			break;
1130 		}
1131 
1132 		/*
1133 		 * Ok, copy the data in
1134 		 */
1135 		bkvasync(bp);
1136 		error = uiomovebp(bp, bp->b_data + loff, n, uio);
1137 		kflags |= NOTE_WRITE;
1138 		modified = 1;
1139 		if (error) {
1140 			brelse(bp);
1141 			break;
1142 		}
1143 
1144 		/*
1145 		 * WARNING: Pageout daemon will issue UIO_NOCOPY writes
1146 		 *	    with IO_SYNC or IO_ASYNC set.  These writes
1147 		 *	    must be handled as the pageout daemon expects.
1148 		 *
1149 		 * NOTE!    H2 relies on cluster_write() here because it
1150 		 *	    cannot preallocate disk blocks at the logical
1151 		 *	    level due to not knowing what the compression
1152 		 *	    size will be at this time.
1153 		 *
1154 		 *	    We must use cluster_write() here and we depend
1155 		 *	    on the write-behind feature to flush buffers
1156 		 *	    appropriately.  If we let the buffer daemons do
1157 		 *	    it the block allocations will be all over the
1158 		 *	    map.
1159 		 */
1160 		if (ioflag & IO_SYNC) {
1161 			bwrite(bp);
1162 		} else if ((ioflag & IO_DIRECT) && endofblk) {
1163 			bawrite(bp);
1164 		} else if (ioflag & IO_ASYNC) {
1165 			bawrite(bp);
1166 		} else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) {
1167 			bdwrite(bp);
1168 		} else {
1169 #if 1
1170 			bp->b_flags |= B_CLUSTEROK;
1171 			cluster_write(bp, new_eof, lblksize, seqcount);
1172 #else
1173 			bp->b_flags |= B_CLUSTEROK;
1174 			bdwrite(bp);
1175 #endif
1176 		}
1177 	}
1178 
1179 	/*
1180 	 * Cleanup.  If we extended the file EOF but failed to write through
1181 	 * the entire write is a failure and we have to back-up.
1182 	 */
1183 	if (error && new_eof != old_eof) {
1184 		hammer2_mtx_unlock(&ip->truncate_lock);
1185 		hammer2_mtx_ex(&ip->lock);		/* note lock order */
1186 		hammer2_mtx_ex(&ip->truncate_lock);	/* note lock order */
1187 		hammer2_truncate_file(ip, old_eof);
1188 		if (ip->flags & HAMMER2_INODE_MODIFIED)
1189 			hammer2_inode_chain_sync(ip);
1190 		hammer2_mtx_unlock(&ip->lock);
1191 	} else if (modified) {
1192 		struct vnode *vp = ip->vp;
1193 
1194 		hammer2_mtx_ex(&ip->lock);
1195 		hammer2_inode_modify(ip);
1196 		if (uio->uio_segflg == UIO_NOCOPY) {
1197 			if (vp->v_flag & VLASTWRITETS) {
1198 				ip->meta.mtime =
1199 				    (unsigned long)vp->v_lastwrite_ts.tv_sec *
1200 				    1000000 +
1201 				    vp->v_lastwrite_ts.tv_nsec / 1000;
1202 			}
1203 		} else {
1204 			hammer2_update_time(&ip->meta.mtime);
1205 			vclrflags(vp, VLASTWRITETS);
1206 		}
1207 
1208 #if 0
1209 		/*
1210 		 * REMOVED - handled by hammer2_extend_file().  Do not issue
1211 		 * a chain_sync() outside of a sync/fsync except for DIRECTDATA
1212 		 * state changes.
1213 		 *
1214 		 * Under normal conditions we only issue a chain_sync if
1215 		 * the inode's DIRECTDATA state changed.
1216 		 */
1217 		if (ip->flags & HAMMER2_INODE_RESIZED)
1218 			hammer2_inode_chain_sync(ip);
1219 #endif
1220 		hammer2_mtx_unlock(&ip->lock);
1221 		hammer2_knote(ip->vp, kflags);
1222 	}
1223 	hammer2_trans_assert_strategy(ip->pmp);
1224 	hammer2_mtx_unlock(&ip->truncate_lock);
1225 
1226 	return error;
1227 }
1228 
1229 /*
1230  * Truncate the size of a file.  The inode must be locked.
1231  *
1232  * We must unconditionally set HAMMER2_INODE_RESIZED to properly
1233  * ensure that any on-media data beyond the new file EOF has been destroyed.
1234  *
1235  * WARNING: nvtruncbuf() can only be safely called without the inode lock
1236  *	    held due to the way our write thread works.  If the truncation
1237  *	    occurs in the middle of a buffer, nvtruncbuf() is responsible
1238  *	    for dirtying that buffer and zeroing out trailing bytes.
1239  *
1240  * WARNING! Assumes that the kernel interlocks size changes at the
1241  *	    vnode level.
1242  *
1243  * WARNING! Caller assumes responsibility for removing dead blocks
1244  *	    if INODE_RESIZED is set.
1245  */
1246 static
1247 void
1248 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1249 {
1250 	hammer2_key_t lbase;
1251 	int nblksize;
1252 
1253 	hammer2_mtx_unlock(&ip->lock);
1254 	if (ip->vp) {
1255 		nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1256 		nvtruncbuf(ip->vp, nsize,
1257 			   nblksize, (int)nsize & (nblksize - 1),
1258 			   0);
1259 	}
1260 	hammer2_mtx_ex(&ip->lock);
1261 	KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1262 	ip->osize = ip->meta.size;
1263 	ip->meta.size = nsize;
1264 	atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1265 	hammer2_inode_modify(ip);
1266 }
1267 
1268 /*
1269  * Extend the size of a file.  The inode must be locked.
1270  *
1271  * Even though the file size is changing, we do not have to set the
1272  * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES
1273  * boundary.  When this occurs a hammer2_inode_chain_sync() is required
1274  * to prepare the inode cluster's indirect block table, otherwise
1275  * async execution of the strategy code will implode on us.
1276  *
1277  * WARNING! Assumes that the kernel interlocks size changes at the
1278  *	    vnode level.
1279  *
1280  * WARNING! Caller assumes responsibility for transitioning out
1281  *	    of the inode DIRECTDATA mode if INODE_RESIZED is set.
1282  */
1283 static
1284 void
1285 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize)
1286 {
1287 	hammer2_key_t lbase;
1288 	hammer2_key_t osize;
1289 	int oblksize;
1290 	int nblksize;
1291 	int error;
1292 
1293 	KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0);
1294 	hammer2_inode_modify(ip);
1295 	osize = ip->meta.size;
1296 	ip->osize = osize;
1297 	ip->meta.size = nsize;
1298 
1299 	/*
1300 	 * We must issue a chain_sync() when the DIRECTDATA state changes
1301 	 * to prevent confusion between the flush code and the in-memory
1302 	 * state.  This is not perfect because we are doing it outside of
1303 	 * a sync/fsync operation, so it might not be fully synchronized
1304 	 * with the meta-data topology flush.
1305 	 *
1306 	 * We must retain and re-dirty the buffer cache buffer containing
1307 	 * the direct data so it can be written to a real block.  It should
1308 	 * not be possible for a bread error to occur since the original data
1309 	 * is extracted from the inode structure directly.
1310 	 */
1311 	if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) {
1312 		if (osize) {
1313 			struct buf *bp;
1314 
1315 			oblksize = hammer2_calc_logical(ip, 0, NULL, NULL);
1316 			error = bread_kvabio(ip->vp, 0, oblksize, &bp);
1317 			atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1318 			hammer2_inode_chain_sync(ip);
1319 			if (error == 0) {
1320 				bheavy(bp);
1321 				bdwrite(bp);
1322 			} else {
1323 				brelse(bp);
1324 			}
1325 		} else {
1326 			atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED);
1327 			hammer2_inode_chain_sync(ip);
1328 		}
1329 	}
1330 	hammer2_mtx_unlock(&ip->lock);
1331 	if (ip->vp) {
1332 		oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL);
1333 		nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL);
1334 		nvextendbuf(ip->vp,
1335 			    osize, nsize,
1336 			    oblksize, nblksize,
1337 			    -1, -1, 0);
1338 	}
1339 	hammer2_mtx_ex(&ip->lock);
1340 }
1341 
1342 static
1343 int
1344 hammer2_vop_nresolve(struct vop_nresolve_args *ap)
1345 {
1346 	hammer2_xop_nresolve_t *xop;
1347 	hammer2_inode_t *ip;
1348 	hammer2_inode_t *dip;
1349 	struct namecache *ncp;
1350 	struct vnode *vp;
1351 	int error;
1352 
1353 	dip = VTOI(ap->a_dvp);
1354 	xop = hammer2_xop_alloc(dip, 0);
1355 
1356 	ncp = ap->a_nch->ncp;
1357 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1358 
1359 	/*
1360 	 * Note: In DragonFly the kernel handles '.' and '..'.
1361 	 */
1362 	hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);
1363 	hammer2_xop_start(&xop->head, &hammer2_nresolve_desc);
1364 
1365 	error = hammer2_xop_collect(&xop->head, 0);
1366 	error = hammer2_error_to_errno(error);
1367 	if (error) {
1368 		ip = NULL;
1369 	} else {
1370 		ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
1371 	}
1372 	hammer2_inode_unlock(dip);
1373 
1374 	/*
1375 	 * Acquire the related vnode
1376 	 *
1377 	 * NOTE: For error processing, only ENOENT resolves the namecache
1378 	 *	 entry to NULL, otherwise we just return the error and
1379 	 *	 leave the namecache unresolved.
1380 	 *
1381 	 * NOTE: multiple hammer2_inode structures can be aliased to the
1382 	 *	 same chain element, for example for hardlinks.  This
1383 	 *	 use case does not 'reattach' inode associations that
1384 	 *	 might already exist, but always allocates a new one.
1385 	 *
1386 	 * WARNING: inode structure is locked exclusively via inode_get
1387 	 *	    but chain was locked shared.  inode_unlock()
1388 	 *	    will handle it properly.
1389 	 */
1390 	if (ip) {
1391 		vp = hammer2_igetv(ip, &error);	/* error set to UNIX error */
1392 		if (error == 0) {
1393 			vn_unlock(vp);
1394 			cache_setvp(ap->a_nch, vp);
1395 		} else if (error == ENOENT) {
1396 			cache_setvp(ap->a_nch, NULL);
1397 		}
1398 		hammer2_inode_unlock(ip);
1399 
1400 		/*
1401 		 * The vp should not be released until after we've disposed
1402 		 * of our locks, because it might cause vop_inactive() to
1403 		 * be called.
1404 		 */
1405 		if (vp)
1406 			vrele(vp);
1407 	} else {
1408 		error = ENOENT;
1409 		cache_setvp(ap->a_nch, NULL);
1410 	}
1411 	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1412 	KASSERT(error || ap->a_nch->ncp->nc_vp != NULL,
1413 		("resolve error %d/%p ap %p\n",
1414 		 error, ap->a_nch->ncp->nc_vp, ap));
1415 
1416 	return error;
1417 }
1418 
1419 static
1420 int
1421 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap)
1422 {
1423 	hammer2_inode_t *dip;
1424 	hammer2_tid_t inum;
1425 	int error;
1426 
1427 	dip = VTOI(ap->a_dvp);
1428 	inum = dip->meta.iparent;
1429 	*ap->a_vpp = NULL;
1430 
1431 	if (inum) {
1432 		error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL,
1433 					 inum, ap->a_vpp);
1434 	} else {
1435 		error = ENOENT;
1436 	}
1437 	return error;
1438 }
1439 
1440 static
1441 int
1442 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap)
1443 {
1444 	hammer2_inode_t *dip;
1445 	hammer2_inode_t *nip;
1446 	struct namecache *ncp;
1447 	const uint8_t *name;
1448 	size_t name_len;
1449 	hammer2_tid_t inum;
1450 	int error;
1451 
1452 	dip = VTOI(ap->a_dvp);
1453 	if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG))
1454 		return (EROFS);
1455 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1456 		return (ENOSPC);
1457 
1458 	ncp = ap->a_nch->ncp;
1459 	name = ncp->nc_name;
1460 	name_len = ncp->nc_nlen;
1461 
1462 	hammer2_trans_init(dip->pmp, 0);
1463 
1464 	inum = hammer2_trans_newinum(dip->pmp);
1465 
1466 	/*
1467 	 * Create the actual inode as a hidden file in the iroot, then
1468 	 * create the directory entry.  The creation of the actual inode
1469 	 * sets its nlinks to 1 which is the value we desire.
1470 	 *
1471 	 * dip must be locked before nip to avoid deadlock.
1472 	 */
1473 	hammer2_inode_lock(dip, 0);
1474 	nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1475 					  inum, &error);
1476 	if (error) {
1477 		error = hammer2_error_to_errno(error);
1478 	} else {
1479 		error = hammer2_dirent_create(dip, name, name_len,
1480 					      nip->meta.inum, nip->meta.type);
1481 		/* returns UNIX error code */
1482 	}
1483 	if (error) {
1484 		if (nip) {
1485 			hammer2_inode_unlink_finisher(nip, NULL);
1486 			hammer2_inode_unlock(nip);
1487 			nip = NULL;
1488 		}
1489 		*ap->a_vpp = NULL;
1490 	} else {
1491 		/*
1492 		 * inode_depend() must occur before the igetv() because
1493 		 * the igetv() can temporarily release the inode lock.
1494 		 */
1495 		hammer2_inode_depend(dip, nip);	/* before igetv */
1496 		*ap->a_vpp = hammer2_igetv(nip, &error);
1497 		hammer2_inode_unlock(nip);
1498 	}
1499 
1500 	/*
1501 	 * Update dip's mtime
1502 	 *
1503 	 * We can use a shared inode lock and allow the meta.mtime update
1504 	 * SMP race.  hammer2_inode_modify() is MPSAFE w/a shared lock.
1505 	 */
1506 	if (error == 0) {
1507 		uint64_t mtime;
1508 
1509 		/*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
1510 		hammer2_update_time(&mtime);
1511 		hammer2_inode_modify(dip);
1512 		dip->meta.mtime = mtime;
1513 		/*hammer2_inode_unlock(dip);*/
1514 	}
1515 	hammer2_inode_unlock(dip);
1516 
1517 	hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
1518 
1519 	if (error == 0) {
1520 		cache_setunresolved(ap->a_nch);
1521 		cache_setvp(ap->a_nch, *ap->a_vpp);
1522 		hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
1523 	}
1524 	return error;
1525 }
1526 
1527 static
1528 int
1529 hammer2_vop_open(struct vop_open_args *ap)
1530 {
1531 	return vop_stdopen(ap);
1532 }
1533 
1534 /*
1535  * hammer2_vop_advlock { vp, id, op, fl, flags }
1536  */
1537 static
1538 int
1539 hammer2_vop_advlock(struct vop_advlock_args *ap)
1540 {
1541 	hammer2_inode_t *ip = VTOI(ap->a_vp);
1542 	hammer2_off_t size;
1543 
1544 	size = ip->meta.size;
1545 	return (lf_advlock(ap, &ip->advlock, size));
1546 }
1547 
1548 static
1549 int
1550 hammer2_vop_close(struct vop_close_args *ap)
1551 {
1552 	return vop_stdclose(ap);
1553 }
1554 
1555 /*
1556  * hammer2_vop_nlink { nch, dvp, vp, cred }
1557  *
1558  * Create a hardlink from (vp) to {dvp, nch}.
1559  */
1560 static
1561 int
1562 hammer2_vop_nlink(struct vop_nlink_args *ap)
1563 {
1564 	hammer2_inode_t *tdip;	/* target directory to create link in */
1565 	hammer2_inode_t *ip;	/* inode we are hardlinking to */
1566 	struct namecache *ncp;
1567 	const uint8_t *name;
1568 	size_t name_len;
1569 	int error;
1570 	uint64_t cmtime;
1571 
1572 	if (ap->a_dvp->v_mount != ap->a_vp->v_mount)
1573 		return(EXDEV);
1574 
1575 	tdip = VTOI(ap->a_dvp);
1576 	if (tdip->pmp->ronly || (tdip->pmp->flags & HAMMER2_PMPF_EMERG))
1577 		return (EROFS);
1578 	if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1)
1579 		return (ENOSPC);
1580 
1581 	ncp = ap->a_nch->ncp;
1582 	name = ncp->nc_name;
1583 	name_len = ncp->nc_nlen;
1584 
1585 	/*
1586 	 * ip represents the file being hardlinked.  The file could be a
1587 	 * normal file or a hardlink target if it has already been hardlinked.
1588 	 * (with the new semantics, it will almost always be a hardlink
1589 	 * target).
1590 	 *
1591 	 * Bump nlinks and potentially also create or move the hardlink
1592 	 * target in the parent directory common to (ip) and (tdip).  The
1593 	 * consolidation code can modify ip->cluster.  The returned cluster
1594 	 * is locked.
1595 	 */
1596 	ip = VTOI(ap->a_vp);
1597 	KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp));
1598 	hammer2_trans_init(ip->pmp, 0);
1599 
1600 	/*
1601 	 * Target should be an indexed inode or there's no way we will ever
1602 	 * be able to find it!
1603 	 */
1604 	KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0);
1605 
1606 	error = 0;
1607 
1608 	/*
1609 	 * Can return NULL and error == EXDEV if the common parent
1610 	 * crosses a directory with the xlink flag set.
1611 	 */
1612 	hammer2_inode_lock4(tdip, ip, NULL, NULL);
1613 
1614 	hammer2_update_time(&cmtime);
1615 
1616 	/*
1617 	 * Create the directory entry and bump nlinks.
1618 	 * Also update ip's ctime.
1619 	 */
1620 	if (error == 0) {
1621 		error = hammer2_dirent_create(tdip, name, name_len,
1622 					      ip->meta.inum, ip->meta.type);
1623 		hammer2_inode_modify(ip);
1624 		++ip->meta.nlinks;
1625 		ip->meta.ctime = cmtime;
1626 	}
1627 	if (error == 0) {
1628 		/*
1629 		 * Update dip's [cm]time
1630 		 */
1631 		hammer2_inode_modify(tdip);
1632 		tdip->meta.mtime = cmtime;
1633 		tdip->meta.ctime = cmtime;
1634 
1635 		cache_setunresolved(ap->a_nch);
1636 		cache_setvp(ap->a_nch, ap->a_vp);
1637 	}
1638 	hammer2_inode_unlock(ip);
1639 	hammer2_inode_unlock(tdip);
1640 
1641 	hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ);
1642 	hammer2_knote(ap->a_vp, NOTE_LINK);
1643 	hammer2_knote(ap->a_dvp, NOTE_WRITE);
1644 
1645 	return error;
1646 }
1647 
1648 /*
1649  * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap }
1650  *
1651  * The operating system has already ensured that the directory entry
1652  * does not exist and done all appropriate namespace locking.
1653  */
1654 static
1655 int
1656 hammer2_vop_ncreate(struct vop_ncreate_args *ap)
1657 {
1658 	hammer2_inode_t *dip;
1659 	hammer2_inode_t *nip;
1660 	struct namecache *ncp;
1661 	const uint8_t *name;
1662 	size_t name_len;
1663 	hammer2_tid_t inum;
1664 	int error;
1665 
1666 	dip = VTOI(ap->a_dvp);
1667 	if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG))
1668 		return (EROFS);
1669 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1670 		return (ENOSPC);
1671 
1672 	ncp = ap->a_nch->ncp;
1673 	name = ncp->nc_name;
1674 	name_len = ncp->nc_nlen;
1675 	hammer2_trans_init(dip->pmp, 0);
1676 
1677 	inum = hammer2_trans_newinum(dip->pmp);
1678 
1679 	/*
1680 	 * Create the actual inode as a hidden file in the iroot, then
1681 	 * create the directory entry.  The creation of the actual inode
1682 	 * sets its nlinks to 1 which is the value we desire.
1683 	 *
1684 	 * dip must be locked before nip to avoid deadlock.
1685 	 */
1686 	hammer2_inode_lock(dip, 0);
1687 	nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1688 					  inum, &error);
1689 
1690 	if (error) {
1691 		error = hammer2_error_to_errno(error);
1692 	} else {
1693 		error = hammer2_dirent_create(dip, name, name_len,
1694 					      nip->meta.inum, nip->meta.type);
1695 	}
1696 	if (error) {
1697 		if (nip) {
1698 			hammer2_inode_unlink_finisher(nip, NULL);
1699 			hammer2_inode_unlock(nip);
1700 			nip = NULL;
1701 		}
1702 		*ap->a_vpp = NULL;
1703 	} else {
1704 		hammer2_inode_depend(dip, nip);	/* before igetv */
1705 		*ap->a_vpp = hammer2_igetv(nip, &error);
1706 		hammer2_inode_unlock(nip);
1707 	}
1708 
1709 	/*
1710 	 * Update dip's mtime
1711 	 */
1712 	if (error == 0) {
1713 		uint64_t mtime;
1714 
1715 		/*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
1716 		hammer2_update_time(&mtime);
1717 		hammer2_inode_modify(dip);
1718 		dip->meta.mtime = mtime;
1719 		/*hammer2_inode_unlock(dip);*/
1720 	}
1721 	hammer2_inode_unlock(dip);
1722 
1723 	hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
1724 
1725 	if (error == 0) {
1726 		cache_setunresolved(ap->a_nch);
1727 		cache_setvp(ap->a_nch, *ap->a_vpp);
1728 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1729 	}
1730 	return error;
1731 }
1732 
1733 /*
1734  * Make a device node (typically a fifo)
1735  */
1736 static
1737 int
1738 hammer2_vop_nmknod(struct vop_nmknod_args *ap)
1739 {
1740 	hammer2_inode_t *dip;
1741 	hammer2_inode_t *nip;
1742 	struct namecache *ncp;
1743 	const uint8_t *name;
1744 	size_t name_len;
1745 	hammer2_tid_t inum;
1746 	int error;
1747 
1748 	dip = VTOI(ap->a_dvp);
1749 	if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG))
1750 		return (EROFS);
1751 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1752 		return (ENOSPC);
1753 
1754 	ncp = ap->a_nch->ncp;
1755 	name = ncp->nc_name;
1756 	name_len = ncp->nc_nlen;
1757 	hammer2_trans_init(dip->pmp, 0);
1758 
1759 	/*
1760 	 * Create the device inode and then create the directory entry.
1761 	 *
1762 	 * dip must be locked before nip to avoid deadlock.
1763 	 */
1764 	inum = hammer2_trans_newinum(dip->pmp);
1765 
1766 	hammer2_inode_lock(dip, 0);
1767 	nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1768 					  inum, &error);
1769 	if (error == 0) {
1770 		error = hammer2_dirent_create(dip, name, name_len,
1771 					      nip->meta.inum, nip->meta.type);
1772 	}
1773 	if (error) {
1774 		if (nip) {
1775 			hammer2_inode_unlink_finisher(nip, NULL);
1776 			hammer2_inode_unlock(nip);
1777 			nip = NULL;
1778 		}
1779 		*ap->a_vpp = NULL;
1780 	} else {
1781 		hammer2_inode_depend(dip, nip);	/* before igetv */
1782 		*ap->a_vpp = hammer2_igetv(nip, &error);
1783 		hammer2_inode_unlock(nip);
1784 	}
1785 
1786 	/*
1787 	 * Update dip's mtime
1788 	 */
1789 	if (error == 0) {
1790 		uint64_t mtime;
1791 
1792 		/*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
1793 		hammer2_update_time(&mtime);
1794 		hammer2_inode_modify(dip);
1795 		dip->meta.mtime = mtime;
1796 		/*hammer2_inode_unlock(dip);*/
1797 	}
1798 	hammer2_inode_unlock(dip);
1799 
1800 	hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
1801 
1802 	if (error == 0) {
1803 		cache_setunresolved(ap->a_nch);
1804 		cache_setvp(ap->a_nch, *ap->a_vpp);
1805 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1806 	}
1807 	return error;
1808 }
1809 
1810 /*
1811  * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target }
1812  */
1813 static
1814 int
1815 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap)
1816 {
1817 	hammer2_inode_t *dip;
1818 	hammer2_inode_t *nip;
1819 	struct namecache *ncp;
1820 	const uint8_t *name;
1821 	size_t name_len;
1822 	hammer2_tid_t inum;
1823 	int error;
1824 
1825 	dip = VTOI(ap->a_dvp);
1826 	if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG))
1827 		return (EROFS);
1828 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1829 		return (ENOSPC);
1830 
1831 	ncp = ap->a_nch->ncp;
1832 	name = ncp->nc_name;
1833 	name_len = ncp->nc_nlen;
1834 	hammer2_trans_init(dip->pmp, 0);
1835 
1836 	ap->a_vap->va_type = VLNK;	/* enforce type */
1837 
1838 	/*
1839 	 * Create the softlink as an inode and then create the directory
1840 	 * entry.
1841 	 *
1842 	 * dip must be locked before nip to avoid deadlock.
1843 	 */
1844 	inum = hammer2_trans_newinum(dip->pmp);
1845 
1846 	hammer2_inode_lock(dip, 0);
1847 	nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred,
1848 					  inum, &error);
1849 	if (error == 0) {
1850 		error = hammer2_dirent_create(dip, name, name_len,
1851 					      nip->meta.inum, nip->meta.type);
1852 	}
1853 	if (error) {
1854 		if (nip) {
1855 			hammer2_inode_unlink_finisher(nip, NULL);
1856 			hammer2_inode_unlock(nip);
1857 			nip = NULL;
1858 		}
1859 		*ap->a_vpp = NULL;
1860 		hammer2_inode_unlock(dip);
1861 		hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
1862 		return error;
1863 	}
1864 	hammer2_inode_depend(dip, nip);	/* before igetv */
1865 	*ap->a_vpp = hammer2_igetv(nip, &error);
1866 
1867 	/*
1868 	 * Build the softlink (~like file data) and finalize the namecache.
1869 	 */
1870 	if (error == 0) {
1871 		size_t bytes;
1872 		struct uio auio;
1873 		struct iovec aiov;
1874 
1875 		bytes = strlen(ap->a_target);
1876 
1877 		hammer2_inode_unlock(nip);
1878 		bzero(&auio, sizeof(auio));
1879 		bzero(&aiov, sizeof(aiov));
1880 		auio.uio_iov = &aiov;
1881 		auio.uio_segflg = UIO_SYSSPACE;
1882 		auio.uio_rw = UIO_WRITE;
1883 		auio.uio_resid = bytes;
1884 		auio.uio_iovcnt = 1;
1885 		auio.uio_td = curthread;
1886 		aiov.iov_base = ap->a_target;
1887 		aiov.iov_len = bytes;
1888 		error = hammer2_write_file(nip, &auio, IO_APPEND, 0);
1889 		/* XXX handle error */
1890 		error = 0;
1891 	} else {
1892 		hammer2_inode_unlock(nip);
1893 	}
1894 
1895 	/*
1896 	 * Update dip's mtime
1897 	 */
1898 	if (error == 0) {
1899 		uint64_t mtime;
1900 
1901 		/*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
1902 		hammer2_update_time(&mtime);
1903 		hammer2_inode_modify(dip);
1904 		dip->meta.mtime = mtime;
1905 		/*hammer2_inode_unlock(dip);*/
1906 	}
1907 	hammer2_inode_unlock(dip);
1908 
1909 	hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
1910 
1911 	/*
1912 	 * Finalize namecache
1913 	 */
1914 	if (error == 0) {
1915 		cache_setunresolved(ap->a_nch);
1916 		cache_setvp(ap->a_nch, *ap->a_vpp);
1917 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
1918 	}
1919 	return error;
1920 }
1921 
1922 /*
1923  * hammer2_vop_nremove { nch, dvp, cred }
1924  */
1925 static
1926 int
1927 hammer2_vop_nremove(struct vop_nremove_args *ap)
1928 {
1929 	hammer2_xop_unlink_t *xop;
1930 	hammer2_inode_t *dip;
1931 	hammer2_inode_t *ip;
1932 	struct vnode *vprecycle;
1933 	struct namecache *ncp;
1934 	int error;
1935 
1936 	dip = VTOI(ap->a_dvp);
1937 	if (dip->pmp->ronly)
1938 		return (EROFS);
1939 #if 0
1940 	/* allow removals, except user to also bulkfree */
1941 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
1942 		return (ENOSPC);
1943 #endif
1944 
1945 	ncp = ap->a_nch->ncp;
1946 
1947 	if (hammer2_debug_inode && dip->meta.inum == hammer2_debug_inode) {
1948 		kprintf("hammer2: attempt to delete inside debug inode: %s\n",
1949 			ncp->nc_name);
1950 		while (hammer2_debug_inode &&
1951 		       dip->meta.inum == hammer2_debug_inode) {
1952 			tsleep(&hammer2_debug_inode, 0, "h2debug", hz*5);
1953 		}
1954 	}
1955 
1956 	hammer2_trans_init(dip->pmp, 0);
1957 	hammer2_inode_lock(dip, 0);
1958 
1959 	/*
1960 	 * The unlink XOP unlinks the path from the directory and
1961 	 * locates and returns the cluster associated with the real inode.
1962 	 * We have to handle nlinks here on the frontend.
1963 	 */
1964 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
1965 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
1966 
1967 	xop->isdir = 0;
1968 	xop->dopermanent = 0;
1969 	hammer2_xop_start(&xop->head, &hammer2_unlink_desc);
1970 
1971 	/*
1972 	 * Collect the real inode and adjust nlinks, destroy the real
1973 	 * inode if nlinks transitions to 0 and it was the real inode
1974 	 * (else it has already been removed).
1975 	 */
1976 	error = hammer2_xop_collect(&xop->head, 0);
1977 	error = hammer2_error_to_errno(error);
1978 	vprecycle = NULL;
1979 
1980 	if (error == 0) {
1981 		ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
1982 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1983 		if (ip) {
1984 			if (hammer2_debug_inode &&
1985 			    ip->meta.inum == hammer2_debug_inode) {
1986 				kprintf("hammer2: attempt to delete debug "
1987 					"inode!\n");
1988 				while (hammer2_debug_inode &&
1989 				       ip->meta.inum == hammer2_debug_inode) {
1990 					tsleep(&hammer2_debug_inode, 0,
1991 					       "h2debug", hz*5);
1992 				}
1993 			}
1994 			hammer2_inode_unlink_finisher(ip, &vprecycle);
1995 			hammer2_inode_depend(dip, ip); /* after modified */
1996 			hammer2_inode_unlock(ip);
1997 		}
1998 	} else {
1999 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
2000 	}
2001 
2002 	/*
2003 	 * Update dip's mtime
2004 	 */
2005 	if (error == 0) {
2006 		uint64_t mtime;
2007 
2008 		/*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
2009 		hammer2_update_time(&mtime);
2010 		hammer2_inode_modify(dip);
2011 		dip->meta.mtime = mtime;
2012 		/*hammer2_inode_unlock(dip);*/
2013 	}
2014 	hammer2_inode_unlock(dip);
2015 
2016 	hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
2017 	if (error == 0) {
2018 		cache_unlink(ap->a_nch);
2019 		hammer2_knote(ap->a_dvp, NOTE_WRITE);
2020 	}
2021 	if (vprecycle)
2022 		hammer2_inode_vprecycle(vprecycle);
2023 
2024 	return (error);
2025 }
2026 
2027 /*
2028  * hammer2_vop_nrmdir { nch, dvp, cred }
2029  */
2030 static
2031 int
2032 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap)
2033 {
2034 	hammer2_xop_unlink_t *xop;
2035 	hammer2_inode_t *dip;
2036 	hammer2_inode_t *ip;
2037 	struct namecache *ncp;
2038 	struct vnode *vprecycle;
2039 	int error;
2040 
2041 	dip = VTOI(ap->a_dvp);
2042 	if (dip->pmp->ronly)
2043 		return (EROFS);
2044 #if 0
2045 	/* allow removals, except user to also bulkfree */
2046 	if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1)
2047 		return (ENOSPC);
2048 #endif
2049 
2050 	hammer2_trans_init(dip->pmp, 0);
2051 	hammer2_inode_lock(dip, 0);
2052 
2053 	xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
2054 
2055 	ncp = ap->a_nch->ncp;
2056 	hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen);
2057 	xop->isdir = 1;
2058 	xop->dopermanent = 0;
2059 	hammer2_xop_start(&xop->head, &hammer2_unlink_desc);
2060 
2061 	/*
2062 	 * Collect the real inode and adjust nlinks, destroy the real
2063 	 * inode if nlinks transitions to 0 and it was the real inode
2064 	 * (else it has already been removed).
2065 	 */
2066 	error = hammer2_xop_collect(&xop->head, 0);
2067 	error = hammer2_error_to_errno(error);
2068 	vprecycle = NULL;
2069 
2070 	if (error == 0) {
2071 		ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1);
2072 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
2073 		if (ip) {
2074 			hammer2_inode_unlink_finisher(ip, &vprecycle);
2075 			hammer2_inode_depend(dip, ip);	/* after modified */
2076 			hammer2_inode_unlock(ip);
2077 		}
2078 	} else {
2079 		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
2080 	}
2081 
2082 	/*
2083 	 * Update dip's mtime
2084 	 */
2085 	if (error == 0) {
2086 		uint64_t mtime;
2087 
2088 		/*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/
2089 		hammer2_update_time(&mtime);
2090 		hammer2_inode_modify(dip);
2091 		dip->meta.mtime = mtime;
2092 		/*hammer2_inode_unlock(dip);*/
2093 	}
2094 	hammer2_inode_unlock(dip);
2095 
2096 	hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ);
2097 	if (error == 0) {
2098 		cache_unlink(ap->a_nch);
2099 		hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK);
2100 	}
2101 	if (vprecycle)
2102 		hammer2_inode_vprecycle(vprecycle);
2103 	return (error);
2104 }
2105 
2106 /*
2107  * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred }
2108  */
2109 static
2110 int
2111 hammer2_vop_nrename(struct vop_nrename_args *ap)
2112 {
2113 	struct namecache *fncp;
2114 	struct namecache *tncp;
2115 	hammer2_inode_t *fdip;	/* source directory */
2116 	hammer2_inode_t *tdip;	/* target directory */
2117 	hammer2_inode_t *ip;	/* file being renamed */
2118 	hammer2_inode_t *tip;	/* replaced target during rename or NULL */
2119 	struct vnode *vprecycle;
2120 	const uint8_t *fname;
2121 	size_t fname_len;
2122 	const uint8_t *tname;
2123 	size_t tname_len;
2124 	int error;
2125 	int update_tdip;
2126 	int update_fdip;
2127 	hammer2_key_t tlhc;
2128 
2129 	if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount)
2130 		return(EXDEV);
2131 	if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount)
2132 		return(EXDEV);
2133 
2134 	fdip = VTOI(ap->a_fdvp);	/* source directory */
2135 	tdip = VTOI(ap->a_tdvp);	/* target directory */
2136 
2137 	if (fdip->pmp->ronly || (fdip->pmp->flags & HAMMER2_PMPF_EMERG))
2138 		return (EROFS);
2139 	if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1)
2140 		return (ENOSPC);
2141 
2142 	fncp = ap->a_fnch->ncp;		/* entry name in source */
2143 	fname = fncp->nc_name;
2144 	fname_len = fncp->nc_nlen;
2145 
2146 	tncp = ap->a_tnch->ncp;		/* entry name in target */
2147 	tname = tncp->nc_name;
2148 	tname_len = tncp->nc_nlen;
2149 
2150 	hammer2_trans_init(tdip->pmp, 0);
2151 
2152 	update_tdip = 0;
2153 	update_fdip = 0;
2154 
2155 	ip = VTOI(fncp->nc_vp);
2156 	hammer2_inode_ref(ip);		/* extra ref */
2157 
2158 	/*
2159 	 * Lookup the target name to determine if a directory entry
2160 	 * is being overwritten.  We only hold related inode locks
2161 	 * temporarily, the operating system is expected to protect
2162 	 * against rename races.
2163 	 */
2164 	tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL;
2165 	if (tip)
2166 		hammer2_inode_ref(tip);	/* extra ref */
2167 
2168 	/*
2169 	 * Can return NULL and error == EXDEV if the common parent
2170 	 * crosses a directory with the xlink flag set.
2171 	 *
2172 	 * For now try to avoid deadlocks with a simple pointer address
2173 	 * test.  (tip) can be NULL.
2174 	 */
2175 	error = 0;
2176 	{
2177 		hammer2_inode_t *ip1 = fdip;
2178 		hammer2_inode_t *ip2 = tdip;
2179 		hammer2_inode_t *ip3 = ip;
2180 		hammer2_inode_t *ip4 = tip;	/* may be NULL */
2181 
2182 		if (fdip > tdip) {
2183 			ip1 = tdip;
2184 			ip2 = fdip;
2185 		}
2186 		if (tip && ip > tip) {
2187 			ip3 = tip;
2188 			ip4 = ip;
2189 		}
2190 		hammer2_inode_lock4(ip1, ip2, ip3, ip4);
2191 	}
2192 
2193 	/*
2194 	 * Resolve the collision space for (tdip, tname, tname_len)
2195 	 *
2196 	 * tdip must be held exclusively locked to prevent races since
2197 	 * multiple filenames can end up in the same collision space.
2198 	 */
2199 	{
2200 		hammer2_xop_scanlhc_t *sxop;
2201 		hammer2_tid_t lhcbase;
2202 
2203 		tlhc = hammer2_dirhash(tname, tname_len);
2204 		lhcbase = tlhc;
2205 		sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING);
2206 		sxop->lhc = tlhc;
2207 		hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc);
2208 		while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) {
2209 			if (tlhc != sxop->head.cluster.focus->bref.key)
2210 				break;
2211 			++tlhc;
2212 		}
2213 		error = hammer2_error_to_errno(error);
2214 		hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP);
2215 
2216 		if (error) {
2217 			if (error != ENOENT)
2218 				goto done2;
2219 			++tlhc;
2220 			error = 0;
2221 		}
2222 		if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) {
2223 			error = ENOSPC;
2224 			goto done2;
2225 		}
2226 	}
2227 
2228 	/*
2229 	 * Ready to go, issue the rename to the backend.  Note that meta-data
2230 	 * updates to the related inodes occur separately from the rename
2231 	 * operation.
2232 	 *
2233 	 * NOTE: While it is not necessary to update ip->meta.name*, doing
2234 	 *	 so aids catastrophic recovery and debugging.
2235 	 */
2236 	if (error == 0) {
2237 		hammer2_xop_nrename_t *xop4;
2238 
2239 		xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING);
2240 		xop4->lhc = tlhc;
2241 		xop4->ip_key = ip->meta.name_key;
2242 		hammer2_xop_setip2(&xop4->head, ip);
2243 		hammer2_xop_setip3(&xop4->head, tdip);
2244 		if (tip && tip->meta.type == HAMMER2_OBJTYPE_DIRECTORY)
2245 		    hammer2_xop_setip4(&xop4->head, tip);
2246 		hammer2_xop_setname(&xop4->head, fname, fname_len);
2247 		hammer2_xop_setname2(&xop4->head, tname, tname_len);
2248 		hammer2_xop_start(&xop4->head, &hammer2_nrename_desc);
2249 
2250 		error = hammer2_xop_collect(&xop4->head, 0);
2251 		error = hammer2_error_to_errno(error);
2252 		hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP);
2253 
2254 		if (error == ENOENT)
2255 			error = 0;
2256 
2257 		/*
2258 		 * Update inode meta-data.
2259 		 *
2260 		 * WARNING!  The in-memory inode (ip) structure does not
2261 		 *	     maintain a copy of the inode's filename buffer.
2262 		 */
2263 		if (error == 0 &&
2264 		    (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) {
2265 			hammer2_inode_modify(ip);
2266 			ip->meta.name_len = tname_len;
2267 			ip->meta.name_key = tlhc;
2268 		}
2269 		if (error == 0) {
2270 			hammer2_inode_modify(ip);
2271 			ip->meta.iparent = tdip->meta.inum;
2272 		}
2273 		update_fdip = 1;
2274 		update_tdip = 1;
2275 	}
2276 
2277 done2:
2278 	/*
2279 	 * If no error, the backend has replaced the target directory entry.
2280 	 * We must adjust nlinks on the original replace target if it exists.
2281 	 */
2282 	vprecycle = NULL;
2283 	if (error == 0 && tip) {
2284 		hammer2_inode_unlink_finisher(tip, &vprecycle);
2285 	}
2286 
2287 	/*
2288 	 * Update directory mtimes to represent the something changed.
2289 	 */
2290 	if (update_fdip || update_tdip) {
2291 		uint64_t mtime;
2292 
2293 		hammer2_update_time(&mtime);
2294 		if (update_fdip) {
2295 			hammer2_inode_modify(fdip);
2296 			fdip->meta.mtime = mtime;
2297 		}
2298 		if (update_tdip) {
2299 			hammer2_inode_modify(tdip);
2300 			tdip->meta.mtime = mtime;
2301 		}
2302 	}
2303 	if (tip) {
2304 		hammer2_inode_unlock(tip);
2305 		hammer2_inode_drop(tip);
2306 	}
2307 	hammer2_inode_unlock(ip);
2308 	hammer2_inode_unlock(tdip);
2309 	hammer2_inode_unlock(fdip);
2310 	hammer2_inode_drop(ip);
2311 	hammer2_trans_done(tdip->pmp, HAMMER2_TRANS_SIDEQ);
2312 
2313 	/*
2314 	 * Issue the namecache update after unlocking all the internal
2315 	 * hammer2 structures, otherwise we might deadlock.
2316 	 *
2317 	 * WARNING! The target namespace must be updated atomically,
2318 	 *	    and we depend on cache_rename() to handle that for
2319 	 *	    us.  Do not do a separate cache_unlink() because
2320 	 *	    that leaves a small window of opportunity for other
2321 	 *	    threads to allocate the target namespace before we
2322 	 *	    manage to complete our rename.
2323 	 *
2324 	 * WARNING! cache_rename() (and cache_unlink()) will properly
2325 	 *	    set VREF_FINALIZE on any attached vnode.  Do not
2326 	 *	    call cache_setunresolved() manually before-hand as
2327 	 *	    this will prevent the flag from being set later via
2328 	 *	    cache_rename().  If VREF_FINALIZE is not properly set
2329 	 *	    and the inode is no longer in the topology, related
2330 	 *	    chains can remain dirty indefinitely.
2331 	 */
2332 	if (error == 0 && tip) {
2333 		/*cache_unlink(ap->a_tnch); see above */
2334 		/*cache_setunresolved(ap->a_tnch); see above */
2335 	}
2336 	if (error == 0) {
2337 		cache_rename(ap->a_fnch, ap->a_tnch);
2338 		hammer2_knote(ap->a_fdvp, NOTE_WRITE);
2339 		hammer2_knote(ap->a_tdvp, NOTE_WRITE);
2340 		hammer2_knote(fncp->nc_vp, NOTE_RENAME);
2341 	}
2342 	if (vprecycle)
2343 		hammer2_inode_vprecycle(vprecycle);
2344 
2345 	return (error);
2346 }
2347 
2348 /*
2349  * hammer2_vop_ioctl { vp, command, data, fflag, cred }
2350  */
2351 static
2352 int
2353 hammer2_vop_ioctl(struct vop_ioctl_args *ap)
2354 {
2355 	hammer2_inode_t *ip;
2356 	int error;
2357 
2358 	ip = VTOI(ap->a_vp);
2359 
2360 	error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data,
2361 			      ap->a_fflag, ap->a_cred);
2362 	return (error);
2363 }
2364 
2365 static
2366 int
2367 hammer2_vop_mountctl(struct vop_mountctl_args *ap)
2368 {
2369 	struct mount *mp;
2370 	hammer2_pfs_t *pmp;
2371 	int rc;
2372 
2373 	switch (ap->a_op) {
2374 	case (MOUNTCTL_SET_EXPORT):
2375 		mp = ap->a_head.a_ops->head.vv_mount;
2376 		pmp = MPTOPMP(mp);
2377 
2378 		if (ap->a_ctllen != sizeof(struct export_args))
2379 			rc = (EINVAL);
2380 		else
2381 			rc = vfs_export(mp, &pmp->export,
2382 					(const struct export_args *)ap->a_ctl);
2383 		break;
2384 	default:
2385 		rc = vop_stdmountctl(ap);
2386 		break;
2387 	}
2388 	return (rc);
2389 }
2390 
2391 /*
2392  * KQFILTER
2393  */
2394 static void filt_hammer2detach(struct knote *kn);
2395 static int filt_hammer2read(struct knote *kn, long hint);
2396 static int filt_hammer2write(struct knote *kn, long hint);
2397 static int filt_hammer2vnode(struct knote *kn, long hint);
2398 
2399 static struct filterops hammer2read_filtops =
2400 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2401 	  NULL, filt_hammer2detach, filt_hammer2read };
2402 static struct filterops hammer2write_filtops =
2403 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2404 	  NULL, filt_hammer2detach, filt_hammer2write };
2405 static struct filterops hammer2vnode_filtops =
2406 	{ FILTEROP_ISFD | FILTEROP_MPSAFE,
2407 	  NULL, filt_hammer2detach, filt_hammer2vnode };
2408 
2409 static
2410 int
2411 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap)
2412 {
2413 	struct vnode *vp = ap->a_vp;
2414 	struct knote *kn = ap->a_kn;
2415 
2416 	switch (kn->kn_filter) {
2417 	case EVFILT_READ:
2418 		kn->kn_fop = &hammer2read_filtops;
2419 		break;
2420 	case EVFILT_WRITE:
2421 		kn->kn_fop = &hammer2write_filtops;
2422 		break;
2423 	case EVFILT_VNODE:
2424 		kn->kn_fop = &hammer2vnode_filtops;
2425 		break;
2426 	default:
2427 		return (EOPNOTSUPP);
2428 	}
2429 
2430 	kn->kn_hook = (caddr_t)vp;
2431 
2432 	knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2433 
2434 	return(0);
2435 }
2436 
2437 static void
2438 filt_hammer2detach(struct knote *kn)
2439 {
2440 	struct vnode *vp = (void *)kn->kn_hook;
2441 
2442 	knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn);
2443 }
2444 
2445 static int
2446 filt_hammer2read(struct knote *kn, long hint)
2447 {
2448 	struct vnode *vp = (void *)kn->kn_hook;
2449 	hammer2_inode_t *ip = VTOI(vp);
2450 	off_t off;
2451 
2452 	if (hint == NOTE_REVOKE) {
2453 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2454 		return(1);
2455 	}
2456 	off = ip->meta.size - kn->kn_fp->f_offset;
2457 	kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX;
2458 	if (kn->kn_sfflags & NOTE_OLDAPI)
2459 		return(1);
2460 	return (kn->kn_data != 0);
2461 }
2462 
2463 
2464 static int
2465 filt_hammer2write(struct knote *kn, long hint)
2466 {
2467 	if (hint == NOTE_REVOKE)
2468 		kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT);
2469 	kn->kn_data = 0;
2470 	return (1);
2471 }
2472 
2473 static int
2474 filt_hammer2vnode(struct knote *kn, long hint)
2475 {
2476 	if (kn->kn_sfflags & hint)
2477 		kn->kn_fflags |= hint;
2478 	if (hint == NOTE_REVOKE) {
2479 		kn->kn_flags |= (EV_EOF | EV_NODATA);
2480 		return (1);
2481 	}
2482 	return (kn->kn_fflags != 0);
2483 }
2484 
2485 /*
2486  * FIFO VOPS
2487  */
2488 static
2489 int
2490 hammer2_vop_markatime(struct vop_markatime_args *ap)
2491 {
2492 	hammer2_inode_t *ip;
2493 	struct vnode *vp;
2494 
2495 	vp = ap->a_vp;
2496 	ip = VTOI(vp);
2497 
2498 	if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG))
2499 		return (EROFS);
2500 	return(0);
2501 }
2502 
2503 static
2504 int
2505 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap)
2506 {
2507 	int error;
2508 
2509 	error = VOCALL(&fifo_vnode_vops, &ap->a_head);
2510 	if (error)
2511 		error = hammer2_vop_kqfilter(ap);
2512 	return(error);
2513 }
2514 
2515 /*
2516  * VOPS vector
2517  */
2518 struct vop_ops hammer2_vnode_vops = {
2519 	.vop_default	= vop_defaultop,
2520 	.vop_fsync	= hammer2_vop_fsync,
2521 	.vop_getpages	= vop_stdgetpages,
2522 	.vop_putpages	= vop_stdputpages,
2523 	.vop_access	= hammer2_vop_access,
2524 	.vop_advlock	= hammer2_vop_advlock,
2525 	.vop_close	= hammer2_vop_close,
2526 	.vop_nlink	= hammer2_vop_nlink,
2527 	.vop_ncreate	= hammer2_vop_ncreate,
2528 	.vop_nsymlink	= hammer2_vop_nsymlink,
2529 	.vop_nremove	= hammer2_vop_nremove,
2530 	.vop_nrmdir	= hammer2_vop_nrmdir,
2531 	.vop_nrename	= hammer2_vop_nrename,
2532 	.vop_getattr	= hammer2_vop_getattr,
2533 	.vop_getattr_lite = hammer2_vop_getattr_lite,
2534 	.vop_setattr	= hammer2_vop_setattr,
2535 	.vop_readdir	= hammer2_vop_readdir,
2536 	.vop_readlink	= hammer2_vop_readlink,
2537 	.vop_read	= hammer2_vop_read,
2538 	.vop_write	= hammer2_vop_write,
2539 	.vop_open	= hammer2_vop_open,
2540 	.vop_inactive	= hammer2_vop_inactive,
2541 	.vop_reclaim	= hammer2_vop_reclaim,
2542 	.vop_nresolve	= hammer2_vop_nresolve,
2543 	.vop_nlookupdotdot = hammer2_vop_nlookupdotdot,
2544 	.vop_nmkdir	= hammer2_vop_nmkdir,
2545 	.vop_nmknod	= hammer2_vop_nmknod,
2546 	.vop_ioctl	= hammer2_vop_ioctl,
2547 	.vop_mountctl	= hammer2_vop_mountctl,
2548 	.vop_bmap	= hammer2_vop_bmap,
2549 	.vop_strategy	= hammer2_vop_strategy,
2550 	.vop_kqfilter	= hammer2_vop_kqfilter
2551 };
2552 
2553 struct vop_ops hammer2_spec_vops = {
2554 	.vop_default =          vop_defaultop,
2555 	.vop_fsync =            hammer2_vop_fsync,
2556 	.vop_read =             vop_stdnoread,
2557 	.vop_write =            vop_stdnowrite,
2558 	.vop_access =           hammer2_vop_access,
2559 	.vop_close =            hammer2_vop_close,
2560 	.vop_markatime =        hammer2_vop_markatime,
2561 	.vop_getattr =          hammer2_vop_getattr,
2562 	.vop_inactive =         hammer2_vop_inactive,
2563 	.vop_reclaim =          hammer2_vop_reclaim,
2564 	.vop_setattr =          hammer2_vop_setattr
2565 };
2566 
2567 struct vop_ops hammer2_fifo_vops = {
2568 	.vop_default =          fifo_vnoperate,
2569 	.vop_fsync =            hammer2_vop_fsync,
2570 #if 0
2571 	.vop_read =             hammer2_vop_fiforead,
2572 	.vop_write =            hammer2_vop_fifowrite,
2573 #endif
2574 	.vop_access =           hammer2_vop_access,
2575 #if 0
2576 	.vop_close =            hammer2_vop_fifoclose,
2577 #endif
2578 	.vop_markatime =        hammer2_vop_markatime,
2579 	.vop_getattr =          hammer2_vop_getattr,
2580 	.vop_inactive =         hammer2_vop_inactive,
2581 	.vop_reclaim =          hammer2_vop_reclaim,
2582 	.vop_setattr =          hammer2_vop_setattr,
2583 	.vop_kqfilter =         hammer2_vop_fifokqfilter
2584 };
2585 
2586