xref: /netbsd/sys/ufs/chfs/chfs_vnops.c (revision 78328d4c)
1 /*	$NetBSD: chfs_vnops.c,v 1.48 2022/03/27 16:24:58 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 Department of Software Engineering,
5  *		      University of Szeged, Hungary
6  * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu>
7  * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to The NetBSD Foundation
11  * by the Department of Software Engineering, University of Szeged, Hungary
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/param.h>
36 #include <miscfs/specfs/specdev.h>
37 #include <miscfs/fifofs/fifo.h>
38 #include <miscfs/genfs/genfs.h>
39 #include <ufs/ufs/dir.h>
40 #include <ufs/ufs/ufs_extern.h>
41 #include <uvm/uvm_extern.h>
42 #include <sys/namei.h>
43 #include <sys/stat.h>
44 #include <sys/fcntl.h>
45 #include <sys/buf.h>
46 #include <sys/vnode.h>
47 
48 #include "chfs.h"
49 
50 #define READ_S  "chfs_read"
51 
52 int
chfs_lookup(void * v)53 chfs_lookup(void *v)
54 {
55 	struct vnode *dvp = ((struct vop_lookup_v2_args *) v)->a_dvp;
56 	struct vnode **vpp = ((struct vop_lookup_v2_args *) v)->a_vpp;
57 	struct componentname *cnp = ((struct vop_lookup_v2_args *) v)->a_cnp;
58 
59 	int error;
60 	struct chfs_inode* ip;
61 	struct ufsmount* ump;
62 	struct chfs_mount* chmp;
63 	struct chfs_vnode_cache* chvc __diagused;
64 	struct chfs_dirent* fd;
65 
66 	dbg("lookup(): %s\n", cnp->cn_nameptr);
67 
68 	KASSERT(VOP_ISLOCKED(dvp));
69 
70 	*vpp = NULL;
71 
72 	/* Check accessibility of requested node as a first step. */
73 	error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
74 	if (error != 0) {
75 		goto out;
76 	}
77 
78 	/* If requesting the last path component on a read-only file system
79 	 * with a write operation, deny it. */
80 	if ((cnp->cn_flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY)
81 	    && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
82 		error = EROFS;
83 		goto out;
84 	}
85 
86 	/* Avoid doing a linear scan of the directory if the requested
87 	 * directory/name couple is already in the cache. */
88 	if (cache_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
89 			 cnp->cn_nameiop, cnp->cn_flags, NULL, vpp)) {
90 		return (*vpp == NULLVP ? ENOENT : 0);
91 	}
92 
93 	/* May need to restart the lookup with an exclusive lock. */
94 	if (VOP_ISLOCKED(dvp) != LK_EXCLUSIVE)
95 		return ENOLCK;
96 
97 	ip = VTOI(dvp);
98 	ump = VFSTOUFS(dvp->v_mount);
99 	chmp = ump->um_chfs;
100 	if (ip->ino == 0) {
101 		ip->ino = ++chmp->chm_max_vno;
102 	}
103 	mutex_enter(&chmp->chm_lock_vnocache);
104 	chvc = chfs_vnode_cache_get(chmp, ip->ino);
105 	mutex_exit(&chmp->chm_lock_vnocache);
106 
107 	/* We cannot be requesting the parent directory of the root node. */
108 	KASSERT(IMPLIES(ip->ch_type == CHT_DIR && chvc->pvno == chvc->vno,
109 		!(cnp->cn_flags & ISDOTDOT)));
110 
111 	if (cnp->cn_flags & ISDOTDOT) {
112 		VOP_UNLOCK(dvp);
113 		error = VFS_VGET(dvp->v_mount, ip->chvc->pvno, LK_EXCLUSIVE,
114 		    vpp);
115 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
116 	} else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
117 		vref(dvp);
118 		*vpp = dvp;
119 		error = 0;
120 	} else {
121 		fd = chfs_dir_lookup(ip, cnp);
122 
123 		if (fd == NULL) {
124 			dbg("fd null\n");
125 			/* The entry was not found in the directory.
126 			 * This is OK if we are creating or renaming an
127 			 * entry and are working on the last component of
128 			 * the path name. */
129 			if ((cnp->cn_flags & ISLASTCN) && (cnp->cn_nameiop == CREATE
130 				|| cnp->cn_nameiop == RENAME)) {
131 				error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
132 				if (error) {
133 					dbg("after the entry was not found in dir\n");
134 					goto out;
135 				}
136 
137 				dbg("return EJUSTRETURN\n");
138 				error = EJUSTRETURN;
139 			} else {
140 				error = ENOENT;
141 			}
142 		} else {
143 			/* If we are not at the last path component and
144 			 * found a non-directory or non-link entry (which
145 			 * may itself be pointing to a directory), raise
146 			 * an error. */
147 			if ((fd->type != CHT_DIR && fd->type != CHT_LNK) && !(cnp->cn_flags
148 				& ISLASTCN)) {
149 				error = ENOTDIR;
150 				goto out;
151 			}
152 
153 			dbg("vno@allocating new vnode: %llu\n",
154 				(unsigned long long)fd->vno);
155 			error = VFS_VGET(dvp->v_mount, fd->vno, LK_EXCLUSIVE,
156 			    vpp);
157 		}
158 	}
159 	/* Store the result of this lookup in the cache.  Avoid this if the
160 	 * request was for creation, as it does not improve timings on
161 	 * empirical tests. */
162 	if (cnp->cn_nameiop != CREATE && (cnp->cn_flags & ISDOTDOT) == 0) {
163 		cache_enter(dvp, *vpp, cnp->cn_nameptr, cnp->cn_namelen,
164 			    cnp->cn_flags);
165 	}
166 
167 out:
168 	/* If there were no errors, *vpp cannot be NULL. */
169 	KASSERT(IFF(error == 0, *vpp != NULL));
170 	KASSERT(VOP_ISLOCKED(dvp));
171 
172 	if (error)
173 		return error;
174 	if (*vpp != dvp)
175 		VOP_UNLOCK(*vpp);
176 	return 0;
177 }
178 
179 /* --------------------------------------------------------------------- */
180 
181 int
chfs_create(void * v)182 chfs_create(void *v)
183 {
184 	struct vop_create_v3_args /* {
185 				  struct vnode *a_dvp;
186 				  struct vnode **a_vpp;
187 				  struct componentname *a_cnp;
188 				  struct vattr *a_vap;
189 				  } */*ap = v;
190 	int error, mode;
191 	dbg("create()\n");
192 
193 	mode = MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode);
194 
195 	if ((mode & IFMT) == 0) {
196 		if (ap->a_vap->va_type == VREG)
197 			mode |= IFREG;
198 		if (ap->a_vap->va_type == VSOCK)
199 			mode |= IFSOCK;
200 	}
201 
202 	error = chfs_makeinode(mode, ap->a_dvp,	ap->a_vpp, ap->a_cnp, ap->a_vap->va_type);
203 
204 	if (error) {
205 		dbg("error: %d\n", error);
206 		return error;
207 	}
208 
209 	return 0;
210 }
211 /* --------------------------------------------------------------------- */
212 
213 int
chfs_mknod(void * v)214 chfs_mknod(void *v)
215 {
216 	struct vnode *dvp = ((struct vop_mknod_v3_args *) v)->a_dvp;
217 	struct vnode **vpp = ((struct vop_mknod_v3_args *) v)->a_vpp;
218 	struct componentname *cnp = ((struct vop_mknod_v3_args *) v)->a_cnp;
219 	struct vattr *vap = ((struct vop_mknod_v3_args *) v)->a_vap;
220 	int mode, err = 0;
221 	struct chfs_inode *ip;
222 	struct vnode *vp;
223 
224 	struct ufsmount *ump;
225 	struct chfs_mount *chmp;
226 
227 	struct chfs_full_dnode *fd;
228 	struct buf *bp;
229 	int len;
230 	dbg("mknod()\n");
231 
232 	ump = VFSTOUFS(dvp->v_mount);
233 	chmp = ump->um_chfs;
234 
235 	/* Check type of node. */
236 	if (vap->va_type != VBLK && vap->va_type != VCHR && vap->va_type != VFIFO)
237 		return EINVAL;
238 
239 	vp = *vpp;
240 
241 	mode = MAKEIMODE(vap->va_type, vap->va_mode);
242 
243 	if ((mode & IFMT) == 0) {
244 		switch (vap->va_type) {
245 		case VBLK:
246 			mode |= IFBLK;
247 			break;
248 		case VCHR:
249 			mode |= IFCHR;
250 			break;
251 		case VFIFO:
252 			mode |= IFIFO;
253 			break;
254 		default:
255 			break;
256 		}
257 	}
258 
259 	/* Create a new node. */
260 	err = chfs_makeinode(mode, dvp, &vp, cnp, vap->va_type);
261 
262 	ip = VTOI(vp);
263 	if (vap->va_rdev != VNOVAL)
264 		ip->rdev = vap->va_rdev;
265 
266 	if (vap->va_type == VFIFO)
267 		vp->v_op = chfs_fifoop_p;
268 	else {
269 		vp->v_op = chfs_specop_p;
270 		spec_node_init(vp, ip->rdev);
271 	}
272 
273 	if (err)
274 		return err;
275 
276 	/* Device is written out as a data node. */
277 	len = sizeof(dev_t);
278 	chfs_set_vnode_size(vp, len);
279 	bp = getiobuf(vp, true);
280 	bp->b_bufsize = bp->b_resid = len;
281 	bp->b_data = kmem_alloc(len, KM_SLEEP);
282 	memcpy(bp->b_data, &ip->rdev, len);
283 	bp->b_blkno = 0;
284 
285 	fd = chfs_alloc_full_dnode();
286 
287 	mutex_enter(&chmp->chm_lock_mountfields);
288 
289 	err = chfs_write_flash_dnode(chmp, vp, bp, fd);
290 	if (err) {
291 		mutex_exit(&chmp->chm_lock_mountfields);
292 		kmem_free(bp->b_data, len);
293 		return err;
294 	}
295 
296 	/* Add data node to the inode. */
297 	err = chfs_add_full_dnode_to_inode(chmp, ip, fd);
298 	if (err) {
299 		mutex_exit(&chmp->chm_lock_mountfields);
300 		kmem_free(bp->b_data, len);
301 		return err;
302 	}
303 
304 	mutex_exit(&chmp->chm_lock_mountfields);
305 
306 	*vpp = vp;
307 	kmem_free(bp->b_data, len);
308 	putiobuf(bp);
309 
310 	return 0;
311 }
312 
313 /* --------------------------------------------------------------------- */
314 
315 int
chfs_open(void * v)316 chfs_open(void *v)
317 {
318 	struct vnode *vp = ((struct vop_open_args *) v)->a_vp;
319 	int mode = ((struct vop_open_args *) v)->a_mode;
320 	dbg("open()\n");
321 
322 	int error;
323 	struct chfs_inode *ip;
324 
325 	KASSERT(VOP_ISLOCKED(vp));
326 
327 	ip = VTOI(vp);
328 
329 	KASSERT(vp->v_size == ip->size);
330 	if (ip->chvc->nlink < 1) {
331 		error = ENOENT;
332 		goto out;
333 	}
334 
335 	/* If the file is marked append-only, deny write requests. */
336 	if (ip->flags & APPEND && (mode & (FWRITE | O_APPEND)) == FWRITE)
337 		error = EPERM;
338 	else
339 		error = 0;
340 
341 out:
342 	KASSERT(VOP_ISLOCKED(vp));
343 	return error;
344 }
345 
346 /* --------------------------------------------------------------------- */
347 
348 int
chfs_close(void * v)349 chfs_close(void *v)
350 {
351 	struct vnode *vp = ((struct vop_close_args *) v)->a_vp;
352 	dbg("close()\n");
353 
354 	struct chfs_inode *ip;
355 
356 	KASSERT(VOP_ISLOCKED(vp));
357 
358 	ip = VTOI(vp);
359 
360 	if (ip->chvc->nlink > 0) {
361 		chfs_update(vp, NULL, NULL, UPDATE_CLOSE);
362 	}
363 
364 	return 0;
365 }
366 
367 /* --------------------------------------------------------------------- */
368 
369 int
chfs_access(void * v)370 chfs_access(void *v)
371 {
372 	struct vnode *vp = ((struct vop_access_args *) v)->a_vp;
373 	accmode_t accmode = ((struct vop_access_args *) v)->a_accmode;
374 	kauth_cred_t cred = ((struct vop_access_args *) v)->a_cred;
375 
376 	dbg("access()\n");
377 	struct chfs_inode *ip = VTOI(vp);
378 
379 	if (accmode & VWRITE) {
380 		switch (vp->v_type) {
381 		case VLNK:
382 		case VDIR:
383 		case VREG:
384 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
385 				return (EROFS);
386 			break;
387 		case VBLK:
388 		case VCHR:
389 		case VSOCK:
390 		case VFIFO:
391 			break;
392 		default:
393 			break;
394 		}
395 	}
396 
397 	if (accmode & VWRITE && ip->flags & IMMUTABLE)
398 		return (EPERM);
399 
400 	return kauth_authorize_vnode(cred, KAUTH_ACCESS_ACTION(accmode,
401 	    vp->v_type, ip->mode & ALLPERMS), vp, NULL, genfs_can_access(vp,
402 	    cred, ip->uid, ip->gid, ip->mode & ALLPERMS, NULL, accmode));
403 }
404 
405 /* --------------------------------------------------------------------- */
406 
407 int
chfs_getattr(void * v)408 chfs_getattr(void *v)
409 {
410 	struct vnode *vp = ((struct vop_getattr_args *) v)->a_vp;
411 	struct vattr *vap = ((struct vop_getattr_args *) v)->a_vap;
412 
413 	struct chfs_inode *ip = VTOI(vp);
414 	dbg("getattr()\n");
415 
416 	KASSERT(vp->v_size == ip->size);
417 
418 	vattr_null(vap);
419 	CHFS_ITIMES(ip, NULL, NULL, NULL);
420 
421 	vap->va_type = CHTTOVT(ip->ch_type);
422 	vap->va_mode = ip->mode & ALLPERMS;
423 	vap->va_nlink = ip->chvc->nlink;
424 	vap->va_uid = ip->uid;
425 	vap->va_gid = ip->gid;
426 	vap->va_fsid = ip->dev;
427 	vap->va_fileid = ip->ino;
428 	vap->va_size = ip->size;
429 	vap->va_blocksize = PAGE_SIZE;
430 	vap->va_atime.tv_sec = ip->atime;
431 	vap->va_atime.tv_nsec = 0;
432 	vap->va_mtime.tv_sec = ip->mtime;
433 	vap->va_mtime.tv_nsec = 0;
434 	vap->va_ctime.tv_sec = ip->ctime;
435 	vap->va_ctime.tv_nsec = 0;
436 	vap->va_gen = ip->version;
437 	vap->va_flags = ip->flags;
438 	vap->va_rdev = ip->rdev;
439 	vap->va_bytes = round_page(ip->size);
440 	vap->va_filerev = VNOVAL;
441 	vap->va_vaflags = 0;
442 	vap->va_spare = VNOVAL;
443 
444 	return 0;
445 }
446 
447 /* --------------------------------------------------------------------- */
448 
449 /* Note: modelled after tmpfs's same function */
450 
451 int
chfs_setattr(void * v)452 chfs_setattr(void *v)
453 {
454 	struct vnode *vp = ((struct vop_setattr_args *) v)->a_vp;
455 	struct vattr *vap = ((struct vop_setattr_args *) v)->a_vap;
456 	kauth_cred_t cred = ((struct vop_setattr_args *) v)->a_cred;
457 
458 	struct chfs_inode *ip;
459 	struct ufsmount *ump = VFSTOUFS(vp->v_mount);
460 	struct chfs_mount *chmp = ump->um_chfs;
461 	int error = 0;
462 
463 	dbg("setattr()\n");
464 
465 	KASSERT(VOP_ISLOCKED(vp));
466 	ip = VTOI(vp);
467 
468 	/* Abort if any unsettable attribute is given. */
469 	if (vap->va_type != VNON || vap->va_nlink != VNOVAL ||
470 	    vap->va_fsid != VNOVAL || vap->va_fileid != VNOVAL ||
471 	    vap->va_blocksize != VNOVAL /*|| GOODTIME(&vap->va_ctime)*/ ||
472 	    vap->va_gen != VNOVAL || vap->va_rdev != VNOVAL ||
473 	    vap->va_bytes != VNOVAL) {
474 		return EINVAL;
475 	}
476 
477 	/* set flags */
478 	if (error == 0 && (vap->va_flags != VNOVAL)) {
479 		error = chfs_chflags(vp, vap->va_flags, cred);
480 		return error;
481 	}
482 
483 	if (ip->flags & (IMMUTABLE | APPEND)) {
484 		error = EPERM;
485 		return error;
486 	}
487 
488 	/* set size */
489 	if (error == 0 && (vap->va_size != VNOVAL)) {
490 		error = chfs_chsize(vp, vap->va_size, cred);
491 		if (error)
492 			return error;
493 	}
494 
495 	/* set owner */
496 	if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL)) {
497 		error = chfs_chown(vp, vap->va_uid, vap->va_gid, cred);
498 		if (error)
499 			return error;
500 	}
501 
502 	/* set mode */
503 	if (error == 0 && (vap->va_mode != VNOVAL)) {
504 		error = chfs_chmod(vp, vap->va_mode, cred);
505 		if (error)
506 			return error;
507 	}
508 
509 	/* set time */
510 	if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
511 		error = kauth_authorize_vnode(cred, KAUTH_VNODE_WRITE_TIMES, vp,
512 		    NULL, genfs_can_chtimes(vp, cred, ip->uid,
513 		    vap->va_vaflags));
514 		if (error)
515 			return error;
516 		if (vap->va_atime.tv_sec != VNOVAL)
517 			ip->iflag |= IN_ACCESS;
518 		if (vap->va_mtime.tv_sec != VNOVAL)
519 			ip->iflag |= IN_CHANGE | IN_UPDATE;
520 		error = chfs_update(vp,
521 		    &vap->va_atime, &vap->va_mtime, UPDATE_WAIT);
522 		if (error)
523 			return error;
524 	}
525 
526 	/* Write it out. */
527 	mutex_enter(&chmp->chm_lock_mountfields);
528 	error = chfs_write_flash_vnode(chmp, ip, ALLOC_NORMAL);
529 	mutex_exit(&chmp->chm_lock_mountfields);
530 
531 	return error;
532 }
533 
534 int
chfs_chmod(struct vnode * vp,int mode,kauth_cred_t cred)535 chfs_chmod(struct vnode *vp, int mode, kauth_cred_t cred)
536 {
537 	struct chfs_inode *ip = VTOI(vp);
538 	int error;
539 	dbg("chmod\n");
540 
541 	error = kauth_authorize_vnode(cred, KAUTH_VNODE_WRITE_SECURITY, vp,
542 	    NULL, genfs_can_chmod(vp, cred, ip->uid, ip->gid, mode));
543 	if (error)
544 		return error;
545 	ip->mode &= ~ALLPERMS;
546 	ip->mode |= (mode & ALLPERMS);
547 	ip->iflag |= IN_CHANGE;
548 
549 	error = chfs_update(vp, NULL, NULL, UPDATE_WAIT);
550 	if (error)
551 		return error;
552 
553 	return 0;
554 }
555 
556 int
chfs_chown(struct vnode * vp,uid_t uid,gid_t gid,kauth_cred_t cred)557 chfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred)
558 {
559 	struct chfs_inode *ip = VTOI(vp);
560 	int error;
561 	dbg("chown\n");
562 
563 	if (uid == (uid_t)VNOVAL)
564 		uid = ip->uid;
565 	if (gid == (gid_t)VNOVAL)
566 		gid = ip->gid;
567 
568 	error = kauth_authorize_vnode(cred, KAUTH_VNODE_CHANGE_OWNERSHIP, vp,
569 	    NULL, genfs_can_chown(vp, cred, ip->uid, ip->gid, uid, gid));
570 	if (error)
571 		return error;
572 
573 	ip->gid = gid;
574 	ip->uid = uid;
575 	ip->iflag |= IN_CHANGE;
576 
577 	error = chfs_update(vp, NULL, NULL, UPDATE_WAIT);
578 	if (error)
579 		return error;
580 
581 	return 0;
582 }
583 
584 
585 /* --------------------------------------------------------------------- */
586 /* calculates ((off_t)blk * chmp->chm_chm_fs_bsize) */
587 #define	chfs_lblktosize(chmp, blk)					      \
588 	(((off_t)(blk)) << (chmp)->chm_fs_bshift)
589 
590 /* calculates (loc % chmp->chm_chm_fs_bsize) */
591 #define	chfs_blkoff(chmp, loc)							      \
592 	((loc) & (chmp)->chm_fs_qbmask)
593 
594 /* calculates (loc / chmp->chm_chm_fs_bsize) */
595 #define	chfs_lblkno(chmp, loc)							      \
596 	((loc) >> (chmp)->chm_fs_bshift)
597 
598 /* calculates roundup(size, chmp->chm_chm_fs_fsize) */
599 #define	chfs_fragroundup(chmp, size)					      \
600 	(((size) + (chmp)->chm_fs_qfmask) & (chmp)->chm_fs_fmask)
601 
602 #define	chfs_blksize(chmp, ip, lbn)					      \
603 	(((lbn) >= UFS_NDADDR || (ip)->size >= chfs_lblktosize(chmp, (lbn) + 1))	      \
604 	    ? (chmp)->chm_fs_bsize					      \
605 	    : (chfs_fragroundup(chmp, chfs_blkoff(chmp, (ip)->size))))
606 
607 /* calculates roundup(size, chmp->chm_chm_fs_bsize) */
608 #define	chfs_blkroundup(chmp, size)					      \
609  	(((size) + (chmp)->chm_fs_qbmask) & (chmp)->chm_fs_bmask)
610 
611 /* from ffs read */
612 int
chfs_read(void * v)613 chfs_read(void *v)
614 {
615 	struct vop_read_args /* {
616 				struct vnode *a_vp;
617 				struct uio *a_uio;
618 				int a_ioflag;
619 				kauth_cred_t a_cred;
620 				} */ *ap = v;
621 	struct vnode *vp;
622 	struct chfs_inode *ip;
623 	struct uio *uio;
624 	struct ufsmount *ump;
625 	struct buf *bp;
626 	struct chfs_mount *chmp;
627 	daddr_t lbn, nextlbn;
628 	off_t bytesinfile;
629 	long size, xfersize, blkoffset;
630 	int error, ioflag;
631 	vsize_t bytelen;
632 	bool usepc = false;
633 
634 	dbg("chfs_read\n");
635 
636 	vp = ap->a_vp;
637 	ip = VTOI(vp);
638 	ump = ip->ump;
639 	uio = ap->a_uio;
640 	ioflag = ap->a_ioflag;
641 	error = 0;
642 
643 	dbg("ip->size:%llu\n", (unsigned long long)ip->size);
644 
645 #ifdef DIAGNOSTIC
646 	if (uio->uio_rw != UIO_READ)
647 		panic("%s: mode", READ_S);
648 
649 	if (vp->v_type == VLNK) {
650 		if (ip->size < ump->um_maxsymlinklen)
651 			panic("%s: short symlink", READ_S);
652 	} else if (vp->v_type != VREG && vp->v_type != VDIR)
653 		panic("%s: type %d", READ_S, vp->v_type);
654 #endif
655 	chmp = ip->chmp;
656 	if ((u_int64_t)uio->uio_offset > ump->um_maxfilesize)
657 		return (EFBIG);
658 	if (uio->uio_resid == 0)
659 		return (0);
660 
661 	if (uio->uio_offset >= ip->size)
662 		goto out;
663 
664 	usepc = vp->v_type == VREG;
665 	bytelen = 0;
666 	if (usepc) {
667 		const int advice = IO_ADV_DECODE(ap->a_ioflag);
668 
669 		while (uio->uio_resid > 0) {
670 			if (ioflag & IO_DIRECT) {
671 				genfs_directio(vp, uio, ioflag);
672 			}
673 			bytelen = MIN(ip->size - uio->uio_offset,
674 			    uio->uio_resid);
675 			if (bytelen == 0)
676 				break;
677 			error = ubc_uiomove(&vp->v_uobj, uio, bytelen, advice,
678 			    UBC_READ | UBC_PARTIALOK | UBC_VNODE_FLAGS(vp));
679 			if (error)
680 				break;
681 
682 		}
683 		goto out;
684 	}
685 
686 	dbg("start reading\n");
687 	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
688 		bytesinfile = ip->size - uio->uio_offset;
689 		if (bytesinfile <= 0)
690 			break;
691 		lbn = chfs_lblkno(chmp, uio->uio_offset);
692 		nextlbn = lbn + 1;
693 		size = chfs_blksize(chmp, ip, lbn);
694 		blkoffset = chfs_blkoff(chmp, uio->uio_offset);
695 		xfersize = MIN(MIN(chmp->chm_fs_bsize - blkoffset, uio->uio_resid),
696 		    bytesinfile);
697 
698 		if (chfs_lblktosize(chmp, nextlbn) >= ip->size) {
699 			error = bread(vp, lbn, size, 0, &bp);
700 			dbg("after bread\n");
701 		} else {
702 			int nextsize = chfs_blksize(chmp, ip, nextlbn);
703 			dbg("size: %ld\n", size);
704 			error = breadn(vp, lbn,
705 			    size, &nextlbn, &nextsize, 1, 0, &bp);
706 			dbg("after breadN\n");
707 		}
708 		if (error)
709 			break;
710 
711 		/*
712 		 * We should only get non-zero b_resid when an I/O error
713 		 * has occurred, which should cause us to break above.
714 		 * However, if the short read did not cause an error,
715 		 * then we want to ensure that we do not uiomove bad
716 		 * or uninitialized data.
717 		 */
718 		size -= bp->b_resid;
719 		if (size < xfersize) {
720 			if (size == 0)
721 				break;
722 			xfersize = size;
723 		}
724 		dbg("uiomove\n");
725 		error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio);
726 		if (error)
727 			break;
728 		brelse(bp, 0);
729 	}
730 
731 	if (bp != NULL)
732 		brelse(bp, 0);
733 
734 out:
735 	// FIXME HACK
736 	ip->ino = ip->chvc->vno;
737 
738 	if (!(vp->v_mount->mnt_flag & MNT_NOATIME)) {
739 		ip->iflag |= IN_ACCESS;
740 		if ((ap->a_ioflag & IO_SYNC) == IO_SYNC) {
741 			if (error) {
742 				return error;
743 			}
744 			error = chfs_update(vp, NULL, NULL, UPDATE_WAIT);
745 		}
746 	}
747 
748 	dbg("[END]\n");
749 
750 	return (error);
751 }
752 
753 
754 /* --------------------------------------------------------------------- */
755 
756 /* from ffs write */
757 int
chfs_write(void * v)758 chfs_write(void *v)
759 {
760 	struct vop_write_args /* {
761 				 struct vnode *a_vp;
762 				 struct uio *a_uio;
763 				 int a_ioflag;
764 				 kauth_cred_t a_cred;
765 				 } */ *ap = v;
766 	struct vnode *vp ;
767 	struct uio *uio;
768 	struct chfs_inode *ip;
769 	struct chfs_mount *chmp;
770 	struct lwp *l;
771 	kauth_cred_t cred;
772 	off_t osize, origoff, oldoff, preallocoff, endallocoff, nsize;
773 	int blkoffset, error, flags, ioflag, resid;
774 	int aflag;
775 	vsize_t bytelen;
776 	bool async;
777 	struct ufsmount *ump;
778 
779 
780 	cred = ap->a_cred;
781 	ioflag = ap->a_ioflag;
782 	uio = ap->a_uio;
783 	vp = ap->a_vp;
784 	ip = VTOI(vp);
785 	ump = ip->ump;
786 
787 	dbg("write\n");
788 
789 	KASSERT(vp->v_size == ip->size);
790 
791 	switch (vp->v_type) {
792 	case VREG:
793 		if (ioflag & IO_APPEND)
794 			uio->uio_offset = ip->size;
795 		if ((ip->flags & APPEND) && uio->uio_offset != ip->size)
796 			return (EPERM);
797 		/* FALLTHROUGH */
798 	case VLNK:
799 		break;
800 	case VDIR:
801 		if ((ioflag & IO_SYNC) == 0)
802 			panic("chfs_write: nonsync dir write");
803 		break;
804 	default:
805 		panic("chfs_write: type");
806 	}
807 
808 	chmp = ip->chmp;
809 	if (uio->uio_offset < 0 ||
810 	    (u_int64_t)uio->uio_offset +
811 	    uio->uio_resid > ump->um_maxfilesize) {
812 		dbg("uio->uio_offset = %lld | uio->uio_offset + "
813 		    "uio->uio_resid (%llu) > ump->um_maxfilesize (%lld)\n",
814 		    (long long)uio->uio_offset,
815 		    (uint64_t)uio->uio_offset + uio->uio_resid,
816 		    (long long)ump->um_maxfilesize);
817 		return (EFBIG);
818 	}
819 	/*
820 	 * Maybe this should be above the vnode op call, but so long as
821 	 * file servers have no limits, I don't think it matters.
822 	 */
823 	l = curlwp;
824 	if (vp->v_type == VREG && l &&
825 	    uio->uio_offset + uio->uio_resid >
826 	    l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
827 		mutex_enter(&proc_lock);
828 		psignal(l->l_proc, SIGXFSZ);
829 		mutex_exit(&proc_lock);
830 		return (EFBIG);
831 	}
832 	if (uio->uio_resid == 0)
833 		return (0);
834 
835 	flags = ioflag & IO_SYNC ? B_SYNC : 0;
836 	async = vp->v_mount->mnt_flag & MNT_ASYNC;
837 	origoff = uio->uio_offset;
838 	resid = uio->uio_resid;
839 	osize = ip->size;
840 	error = 0;
841 
842 	preallocoff = round_page(chfs_blkroundup(chmp,
843 		MAX(osize, uio->uio_offset)));
844 	aflag = ioflag & IO_SYNC ? B_SYNC : 0;
845 	nsize = MAX(osize, uio->uio_offset + uio->uio_resid);
846 	endallocoff = nsize - chfs_blkoff(chmp, nsize);
847 
848 	/*
849 	 * if we're increasing the file size, deal with expanding
850 	 * the fragment if there is one.
851 	 */
852 
853 	if (nsize > osize && chfs_lblkno(chmp, osize) < UFS_NDADDR &&
854 	    chfs_lblkno(chmp, osize) != chfs_lblkno(chmp, nsize) &&
855 	    chfs_blkroundup(chmp, osize) != osize) {
856 		off_t eob;
857 
858 		eob = chfs_blkroundup(chmp, osize);
859 		uvm_vnp_setwritesize(vp, eob);
860 		error = ufs_balloc_range(vp, osize, eob - osize, cred, aflag);
861 		if (error)
862 			goto out;
863 		if (flags & B_SYNC) {
864 			rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
865 			VOP_PUTPAGES(vp,
866 			    trunc_page(osize & chmp->chm_fs_bmask),
867 			    round_page(eob),
868 			    PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED);
869 		}
870 	}
871 
872 	while (uio->uio_resid > 0) {
873 		int ubc_flags = UBC_WRITE;
874 		bool overwrite; /* if we're overwrite a whole block */
875 		off_t newoff;
876 
877 		if (ioflag & IO_DIRECT) {
878 			genfs_directio(vp, uio, ioflag | IO_JOURNALLOCKED);
879 		}
880 
881 		oldoff = uio->uio_offset;
882 		blkoffset = chfs_blkoff(chmp, uio->uio_offset);
883 		bytelen = MIN(chmp->chm_fs_bsize - blkoffset, uio->uio_resid);
884 		if (bytelen == 0) {
885 			break;
886 		}
887 
888 		/*
889 		 * if we're filling in a hole, allocate the blocks now and
890 		 * initialize the pages first.  if we're extending the file,
891 		 * we can safely allocate blocks without initializing pages
892 		 * since the new blocks will be inaccessible until the write
893 		 * is complete.
894 		 */
895 		overwrite = uio->uio_offset >= preallocoff &&
896 		    uio->uio_offset < endallocoff;
897 		if (!overwrite && (vp->v_vflag & VV_MAPPED) == 0 &&
898 		    chfs_blkoff(chmp, uio->uio_offset) == 0 &&
899 		    (uio->uio_offset & PAGE_MASK) == 0) {
900 			vsize_t len;
901 
902 			len = trunc_page(bytelen);
903 			len -= chfs_blkoff(chmp, len);
904 			if (len > 0) {
905 				overwrite = true;
906 				bytelen = len;
907 			}
908 		}
909 
910 		newoff = oldoff + bytelen;
911 		if (vp->v_size < newoff) {
912 			uvm_vnp_setwritesize(vp, newoff);
913 		}
914 
915 		if (!overwrite) {
916 			error = ufs_balloc_range(vp, uio->uio_offset, bytelen,
917 			    cred, aflag);
918 			if (error)
919 				break;
920 		} else {
921 			genfs_node_wrlock(vp);
922 			error = GOP_ALLOC(vp, uio->uio_offset, bytelen,
923 			    aflag, cred);
924 			genfs_node_unlock(vp);
925 			if (error)
926 				break;
927 			ubc_flags |= UBC_FAULTBUSY;
928 		}
929 
930 		/*
931 		 * copy the data.
932 		 */
933 
934 		ubc_flags |= UBC_VNODE_FLAGS(vp);
935 		error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
936 		    IO_ADV_DECODE(ioflag), ubc_flags);
937 
938 		/*
939 		 * update UVM's notion of the size now that we've
940 		 * copied the data into the vnode's pages.
941 		 *
942 		 * we should update the size even when uiomove failed.
943 		 */
944 
945 		if (vp->v_size < newoff) {
946 			uvm_vnp_setsize(vp, newoff);
947 		}
948 
949 		if (error)
950 			break;
951 
952 		/*
953 		 * flush what we just wrote if necessary.
954 		 * XXXUBC simplistic async flushing.
955 		 */
956 
957 		if (!async && oldoff >> 16 != uio->uio_offset >> 16) {
958 			rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
959 			error = VOP_PUTPAGES(vp, (oldoff >> 16) << 16,
960 			    (uio->uio_offset >> 16) << 16,
961 			    PGO_CLEANIT | PGO_JOURNALLOCKED);
962 			if (error)
963 				break;
964 		}
965 	}
966 out:
967 	if (error == 0 && ioflag & IO_SYNC) {
968 		rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
969 		error = VOP_PUTPAGES(vp,
970 		    trunc_page(origoff & chmp->chm_fs_bmask),
971 		    round_page(chfs_blkroundup(chmp, uio->uio_offset)),
972 		    PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED);
973 	}
974 	ip->iflag |= IN_CHANGE | IN_UPDATE;
975 	if (resid > uio->uio_resid && ap->a_cred) {
976 		if (ip->mode & ISUID) {
977 			if (kauth_authorize_vnode(ap->a_cred,
978 			    KAUTH_VNODE_RETAIN_SUID, vp, NULL, EPERM) != 0)
979 				ip->mode &= ~ISUID;
980 		}
981 
982 		if (ip->mode & ISGID) {
983 			if (kauth_authorize_vnode(ap->a_cred,
984 			    KAUTH_VNODE_RETAIN_SGID, vp, NULL, EPERM) != 0)
985 				ip->mode &= ~ISGID;
986 		}
987 	}
988 	if (error) {
989 		(void) UFS_TRUNCATE(vp, osize, ioflag & IO_SYNC, ap->a_cred);
990 		uio->uio_offset -= resid - uio->uio_resid;
991 		uio->uio_resid = resid;
992 	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC) == IO_SYNC)
993 		error = UFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT);
994 
995 	//FIXME HACK
996 	chfs_set_vnode_size(vp, vp->v_size);
997 
998 
999 	KASSERT(vp->v_size == ip->size);
1000 
1001 	mutex_enter(&chmp->chm_lock_mountfields);
1002 	error = chfs_write_flash_vnode(chmp, ip, ALLOC_NORMAL);
1003 	mutex_exit(&chmp->chm_lock_mountfields);
1004 
1005 	return (error);
1006 }
1007 
1008 
1009 /* --------------------------------------------------------------------- */
1010 
1011 int
chfs_fsync(void * v)1012 chfs_fsync(void *v)
1013 {
1014 	struct vop_fsync_args /* {
1015 				 struct vnode *a_vp;
1016 				 kauth_cred_t a_cred;
1017 				 int a_flags;
1018 				 off_t offlo;
1019 				 off_t offhi;
1020 				 } */ *ap = v;
1021 	struct vnode *vp = ap->a_vp;
1022 
1023 	if (ap->a_flags & FSYNC_CACHE) {
1024 		return ENODEV;
1025 	}
1026  	vflushbuf(vp, ap->a_flags);
1027 
1028 	return 0;
1029 }
1030 
1031 /* --------------------------------------------------------------------- */
1032 
1033 int
chfs_remove(void * v)1034 chfs_remove(void *v)
1035 {
1036 	struct vop_remove_v3_args /* {
1037 		struct vnode *a_dvp;
1038 		struct vnode *a_vp;
1039 		struct componentname *a_cnp;
1040 		nlink_t ctx_vp_new_nlink;
1041 	} */ *ap = v;
1042 	struct vnode *dvp = ap->a_dvp;
1043 	struct vnode *vp = ap->a_vp;
1044 	struct componentname *cnp = ap->a_cnp;
1045 	dbg("remove\n");
1046 
1047 	KASSERT(VOP_ISLOCKED(dvp));
1048 	KASSERT(VOP_ISLOCKED(vp));
1049 
1050 	struct chfs_inode *ip = VTOI(vp);
1051 	struct chfs_inode *parent = VTOI(dvp);
1052 	int error = 0;
1053 
1054 	if (vp->v_type == VDIR || (ip->flags & (IMMUTABLE | APPEND)) ||
1055 		(parent->flags & APPEND)) {
1056 		error = EPERM;
1057 		goto out;
1058 	}
1059 
1060 	KASSERT(ip->chvc->vno != ip->chvc->pvno);
1061 
1062 	error = chfs_do_unlink(ip,
1063 	    parent, cnp->cn_nameptr, cnp->cn_namelen);
1064 	if (error == 0) {
1065 		ap->ctx_vp_new_nlink = ip->chvc->nlink;
1066 	}
1067 
1068 out:
1069 	vput(vp);
1070 
1071 	return error;
1072 }
1073 
1074 /* --------------------------------------------------------------------- */
1075 
1076 int
chfs_link(void * v)1077 chfs_link(void *v)
1078 {
1079 	struct vnode *dvp = ((struct vop_link_v2_args *) v)->a_dvp;
1080 	struct vnode *vp = ((struct vop_link_v2_args *) v)->a_vp;
1081 	struct componentname *cnp = ((struct vop_link_v2_args *) v)->a_cnp;
1082 
1083 	struct chfs_inode *ip, *parent;
1084 	int error, abrt = 1;
1085 
1086 	if (vp->v_type == VDIR) {
1087 		error = EISDIR;
1088 		goto out;
1089 	}
1090 	if (dvp->v_mount != vp->v_mount) {
1091 		error = EXDEV;
1092 		goto out;
1093 	}
1094 	if (dvp != vp && (error = vn_lock(vp, LK_EXCLUSIVE)))
1095 		goto out;
1096 
1097 	error = kauth_authorize_vnode(cnp->cn_cred, KAUTH_VNODE_ADD_LINK, vp,
1098 	    dvp, 0);
1099 	if (error)
1100 		goto out;
1101 
1102 	parent = VTOI(dvp);
1103 	ip = VTOI(vp);
1104 
1105 	abrt = 0;
1106 	error = chfs_do_link(ip,
1107 	    parent, cnp->cn_nameptr, cnp->cn_namelen, ip->ch_type);
1108 
1109 	if (dvp != vp)
1110 		VOP_UNLOCK(vp);
1111 out:
1112 	if (abrt)
1113 		VOP_ABORTOP(dvp, cnp);
1114 	return error;
1115 }
1116 
1117 /* --------------------------------------------------------------------- */
1118 
1119 int
chfs_rename(void * v)1120 chfs_rename(void *v)
1121 {
1122 	struct vop_rename_args /* {
1123 		const struct vnodeop_desc *a_desc;
1124 		struct vnode *a_fdvp;
1125 		struct vnode *a_fvp;
1126 		struct componentname *a_fcnp;
1127 		struct vnode *a_tdvp;
1128 		struct vnode *a_tvp;
1129 		struct componentname *a_tcnp;
1130 	} */ *ap = v;
1131 	struct vnode *fdvp = ap->a_fdvp;
1132 	struct vnode *fvp = ap->a_fvp;
1133 	struct componentname *fcnp = ap->a_fcnp;
1134 	struct vnode *tdvp = ap->a_tdvp;
1135 	struct vnode *tvp = ap->a_tvp;
1136 	struct componentname *tcnp = ap->a_tcnp;
1137 
1138 	struct chfs_inode *oldparent, *old;
1139 	struct chfs_inode *newparent;
1140 	struct chfs_dirent *fd;
1141 	struct chfs_inode *ip;
1142 	int error = 0;
1143 	dbg("rename\n");
1144 
1145 	KASSERT(VOP_ISLOCKED(tdvp));
1146 	KASSERT(IMPLIES(tvp != NULL, VOP_ISLOCKED(tvp) == LK_EXCLUSIVE));
1147 
1148 	oldparent = VTOI(fdvp);
1149 	old = VTOI(fvp);
1150 	newparent = VTOI(tdvp);
1151 	if (tvp) {
1152 		dbg("tvp not null\n");
1153 		ip = VTOI(tvp);
1154 		if (tvp->v_type == VDIR) {
1155 			TAILQ_FOREACH(fd, &ip->dents, fds) {
1156 				if (fd->vno) {
1157 					error = ENOTEMPTY;
1158 					goto out_unlocked;
1159 				}
1160 			}
1161 		}
1162 		error = chfs_do_unlink(ip,
1163 		    newparent, tcnp->cn_nameptr, tcnp->cn_namelen);
1164 		vput(tvp);
1165 	}
1166 	VFS_VGET(tdvp->v_mount, old->ino, LK_EXCLUSIVE, &tvp);
1167 	ip = VTOI(tvp);
1168 
1169 	/* link new */
1170 	error = chfs_do_link(ip,
1171 	    newparent, tcnp->cn_nameptr, tcnp->cn_namelen, ip->ch_type);
1172 	/* remove old */
1173 	error = chfs_do_unlink(old,
1174 	    oldparent, fcnp->cn_nameptr, fcnp->cn_namelen);
1175 
1176 out_unlocked:
1177 	/* Release target nodes. */
1178 	if (tdvp == tvp)
1179 		vrele(tdvp);
1180 	else
1181 		vput(tdvp);
1182 	if (tvp != NULL)
1183 		vput(tvp);
1184 
1185 	/* Release source nodes. */
1186 	vrele(fdvp);
1187 	vrele(fvp);
1188 
1189 	return error;
1190 }
1191 
1192 /* --------------------------------------------------------------------- */
1193 
1194 int
chfs_mkdir(void * v)1195 chfs_mkdir(void *v)
1196 {
1197 	struct vnode *dvp = ((struct vop_mkdir_v3_args *) v)->a_dvp;
1198 	struct vnode **vpp = ((struct vop_mkdir_v3_args *)v)->a_vpp;
1199 	struct componentname *cnp = ((struct vop_mkdir_v3_args *) v)->a_cnp;
1200 	struct vattr *vap = ((struct vop_mkdir_v3_args *) v)->a_vap;
1201 	dbg("mkdir()\n");
1202 
1203 	int mode;
1204 
1205 	mode = vap->va_mode & ACCESSPERMS;
1206 	if ((mode & IFMT) == 0) {
1207 		mode |= IFDIR;
1208 	}
1209 
1210 	KASSERT(vap->va_type == VDIR);
1211 
1212 	return chfs_makeinode(mode, dvp, vpp, cnp, VDIR);
1213 }
1214 
1215 /* --------------------------------------------------------------------- */
1216 
1217 int
chfs_rmdir(void * v)1218 chfs_rmdir(void *v)
1219 {
1220 	struct vnode *dvp = ((struct vop_rmdir_v2_args *) v)->a_dvp;
1221 	struct vnode *vp = ((struct vop_rmdir_v2_args *) v)->a_vp;
1222 	struct componentname *cnp = ((struct vop_rmdir_v2_args *) v)->a_cnp;
1223 	dbg("rmdir()\n");
1224 
1225 	KASSERT(VOP_ISLOCKED(dvp));
1226 	KASSERT(VOP_ISLOCKED(vp));
1227 
1228 	struct chfs_inode *ip = VTOI(vp);
1229 	struct chfs_inode *parent = VTOI(dvp);
1230 	struct chfs_dirent *fd;
1231 	int error = 0;
1232 
1233 	if (vp->v_type != VDIR) {
1234 		error = ENOTDIR;
1235 		goto out;
1236 	}
1237 
1238 	KASSERT(ip->chvc->vno != ip->chvc->pvno);
1239 
1240 	TAILQ_FOREACH(fd, &ip->dents, fds) {
1241 		if (fd->vno) {
1242 			error = ENOTEMPTY;
1243 			goto out;
1244 		}
1245 	}
1246 
1247 	error = chfs_do_unlink(ip,
1248 	    parent, cnp->cn_nameptr, cnp->cn_namelen);
1249 
1250 out:
1251 	vput(vp);
1252 
1253 	return error;
1254 }
1255 
1256 /* --------------------------------------------------------------------- */
1257 
1258 int
chfs_symlink(void * v)1259 chfs_symlink(void *v)
1260 {
1261 	struct vnode *dvp = ((struct vop_symlink_v3_args *) v)->a_dvp;
1262 	struct vnode **vpp = ((struct vop_symlink_v3_args *) v)->a_vpp;
1263 	struct componentname *cnp = ((struct vop_symlink_v3_args *) v)->a_cnp;
1264 	struct vattr *vap = ((struct vop_symlink_v3_args *) v)->a_vap;
1265 	char *target = ((struct vop_symlink_v3_args *) v)->a_target;
1266 
1267 	struct ufsmount *ump;
1268 	struct chfs_mount *chmp;
1269 	struct vnode *vp;
1270 	struct chfs_inode *ip;
1271 	int len, err;
1272 	struct chfs_full_dnode *fd;
1273 	struct buf *bp;
1274 	dbg("symlink()\n");
1275 
1276 	ump = VFSTOUFS(dvp->v_mount);
1277 	chmp = ump->um_chfs;
1278 
1279 	err = chfs_makeinode(IFLNK | vap->va_mode, dvp, vpp, cnp, VLNK);
1280 	if (err)
1281 		return (err);
1282 	vp = *vpp;
1283 	len = strlen(target);
1284 	ip = VTOI(vp);
1285 	/* TODO max symlink len instead of "100" */
1286 	if (len < 100) {
1287 		/* symlink path stored as a data node */
1288 		ip->target = kmem_alloc(len, KM_SLEEP);
1289 		memcpy(ip->target, target, len);
1290 		chfs_set_vnode_size(vp, len);
1291 		ip->iflag |= IN_CHANGE | IN_UPDATE;
1292 
1293 		bp = getiobuf(vp, true);
1294 		bp->b_bufsize = bp->b_resid = len;
1295 		bp->b_data = kmem_alloc(len, KM_SLEEP);
1296 		memcpy(bp->b_data, target, len);
1297 		bp->b_blkno = 0;
1298 
1299 		fd = chfs_alloc_full_dnode();
1300 
1301 		mutex_enter(&chmp->chm_lock_mountfields);
1302 
1303 		/* write out the data node */
1304 		err = chfs_write_flash_dnode(chmp, vp, bp, fd);
1305 		if (err) {
1306 			mutex_exit(&chmp->chm_lock_mountfields);
1307 			goto out;
1308 		}
1309 
1310 		/* add it to the inode */
1311 		err = chfs_add_full_dnode_to_inode(chmp, ip, fd);
1312 		if (err) {
1313 			mutex_exit(&chmp->chm_lock_mountfields);
1314 			goto out;
1315 		}
1316 
1317 		mutex_exit(&chmp->chm_lock_mountfields);
1318 
1319 		kmem_free(bp->b_data, len);
1320 		putiobuf(bp);
1321 
1322 		uvm_vnp_setsize(vp, len);
1323 	} else {
1324 		err = ufs_bufio(UIO_WRITE, vp, target, len, (off_t)0,
1325 		    IO_NODELOCKED, cnp->cn_cred, (size_t *)0, NULL);
1326 	}
1327 
1328 out:
1329 	if (err)
1330 		vrele(vp);
1331 
1332 	return (err);
1333 }
1334 
1335 /* --------------------------------------------------------------------- */
1336 
1337 int
chfs_readdir(void * v)1338 chfs_readdir(void *v)
1339 {
1340 	struct vnode *vp = ((struct vop_readdir_args *) v)->a_vp;
1341 	struct uio *uio = ((struct vop_readdir_args *) v)->a_uio;
1342 	int *eofflag = ((struct vop_readdir_args *) v)->a_eofflag;
1343 
1344 	int error = 0;
1345 	off_t skip, offset;
1346 	struct chfs_inode *ip;
1347 	struct chfs_dirent *fd;
1348 
1349 	struct ufsmount *ump;
1350 	struct chfs_mount *chmp;
1351 	struct chfs_vnode_cache *chvc;
1352 
1353 	KASSERT(VOP_ISLOCKED(vp));
1354 
1355 	/* This operation only makes sense on directory nodes. */
1356 	if (vp->v_type != VDIR) {
1357 		error = ENOTDIR;
1358 		goto out;
1359 	}
1360 
1361 	ip = VTOI(vp);
1362 
1363 	/* uiomove in chfs_filldir automatically increments the
1364 	 * uio_offset by an arbitrary size, so we discard any change
1365 	 * to uio_offset and set it to our own value on return
1366 	 */
1367 	offset = uio->uio_offset;
1368 
1369 	/* Add this entry. */
1370 	if (offset == CHFS_OFFSET_DOT) {
1371 		error = chfs_filldir(uio, ip->ino, ".", 1, CHT_DIR);
1372 		if (error == -1) {
1373 			error = 0;
1374 			goto outok;
1375 		} else if (error != 0)
1376 			goto outok;
1377 
1378 		offset = CHFS_OFFSET_DOTDOT;
1379 	}
1380 
1381 	/* Add parent entry. */
1382 	if (offset == CHFS_OFFSET_DOTDOT) {
1383 		ump = VFSTOUFS(vp->v_mount);
1384 		chmp = ump->um_chfs;
1385 		mutex_enter(&chmp->chm_lock_vnocache);
1386 		chvc = chfs_vnode_cache_get(chmp, ip->ino);
1387 		mutex_exit(&chmp->chm_lock_vnocache);
1388 
1389 		error = chfs_filldir(uio, chvc->pvno, "..", 2, CHT_DIR);
1390 		if (error == -1) {
1391 			error = 0;
1392 			goto outok;
1393 		} else if (error != 0) {
1394 			goto outok;
1395 		}
1396 
1397 		/* Has child or not? */
1398 		if (TAILQ_EMPTY(&ip->dents)) {
1399 			offset = CHFS_OFFSET_EOF;
1400 		} else {
1401 			offset = CHFS_OFFSET_FIRST;
1402 		}
1403 	}
1404 
1405 	if (offset != CHFS_OFFSET_EOF) {
1406 		/* Child entries. */
1407 		skip = offset - CHFS_OFFSET_FIRST;
1408 
1409 		TAILQ_FOREACH(fd, &ip->dents, fds) {
1410 			/* seek to offset by skipping items */
1411 			/* XXX race conditions by changed dirent? */
1412 			if (skip > 0) {
1413 				skip--;
1414 				continue;
1415 			}
1416 
1417 			if (fd->vno != 0) {
1418 				error = chfs_filldir(uio, fd->vno,
1419 				    fd->name, fd->nsize, fd->type);
1420 				if (error == -1) {
1421 					error = 0;
1422 					goto outok;
1423 				} else if (error != 0) {
1424 					dbg("err %d\n", error);
1425 					goto outok;
1426 				}
1427 			}
1428 			offset++;
1429 		}
1430 	}
1431 	offset = CHFS_OFFSET_EOF;
1432 
1433 outok:
1434 	uio->uio_offset = offset;
1435 
1436 	if (eofflag != NULL) {
1437 		*eofflag = (error == 0 &&
1438 		    uio->uio_offset == CHFS_OFFSET_EOF);
1439 	}
1440 
1441 out:
1442 	KASSERT(VOP_ISLOCKED(vp));
1443 
1444 	return error;
1445 }
1446 
1447 /* --------------------------------------------------------------------- */
1448 
1449 int
chfs_readlink(void * v)1450 chfs_readlink(void *v)
1451 {
1452 
1453 	struct vnode *vp = ((struct vop_readlink_args *) v)->a_vp;
1454 	struct uio *uio = ((struct vop_readlink_args *) v)->a_uio;
1455 	kauth_cred_t cred = ((struct vop_readlink_args *) v)->a_cred;
1456 
1457 	struct chfs_inode *ip = VTOI(vp);
1458 
1459 	dbg("readlink()\n");
1460 
1461 	/* TODO max symlink len instead of "100" */
1462 	if (ip->size < 100) {
1463 		uiomove(ip->target, ip->size, uio);
1464 		return (0);
1465 	}
1466 
1467 	return (UFS_BUFRD(vp, uio, 0, cred));
1468 }
1469 
1470 /* --------------------------------------------------------------------- */
1471 
1472 int
chfs_inactive(void * v)1473 chfs_inactive(void *v)
1474 {
1475 	struct vnode *vp = ((struct vop_inactive_v2_args *) v)->a_vp;
1476 	struct chfs_inode *ip = VTOI(vp);
1477 	struct chfs_vnode_cache *chvc;
1478 	dbg("inactive | vno: %llu\n", (unsigned long long)ip->ino);
1479 
1480 	KASSERT(VOP_ISLOCKED(vp));
1481 
1482 	/* Reclaim only if there is no link to the node. */
1483 	if (ip->ino) {
1484 		chvc = ip->chvc;
1485 		if (chvc->nlink)
1486 			*((struct vop_inactive_v2_args *) v)->a_recycle = 0;
1487 	} else {
1488 		*((struct vop_inactive_v2_args *) v)->a_recycle = 1;
1489 	}
1490 
1491 	return 0;
1492 }
1493 
1494 /* --------------------------------------------------------------------- */
1495 
1496 int
chfs_reclaim(void * v)1497 chfs_reclaim(void *v)
1498 {
1499 	struct vop_reclaim_v2_args *ap = v;
1500 	struct vnode *vp = ap->a_vp;
1501 	struct chfs_inode *ip = VTOI(vp);
1502 	struct chfs_mount *chmp = ip->chmp;
1503 	struct chfs_dirent *fd;
1504 
1505 	VOP_UNLOCK(vp);
1506 
1507 	mutex_enter(&chmp->chm_lock_mountfields);
1508 
1509 	mutex_enter(&chmp->chm_lock_vnocache);
1510 	ip->chvc->state = VNO_STATE_CHECKEDABSENT;
1511 	mutex_exit(&chmp->chm_lock_vnocache);
1512 
1513 	chfs_update(vp, NULL, NULL, UPDATE_CLOSE);
1514 
1515 	/* Clean fragments. */
1516 	chfs_kill_fragtree(chmp, &ip->fragtree);
1517 
1518 	/* Clean dirents. */
1519 	fd = TAILQ_FIRST(&ip->dents);
1520 	while (fd) {
1521 		TAILQ_REMOVE(&ip->dents, fd, fds);
1522 		chfs_free_dirent(fd);
1523 		fd = TAILQ_FIRST(&ip->dents);
1524 	}
1525 
1526 	cache_purge(vp);
1527 	if (ip->devvp) {
1528 		vrele(ip->devvp);
1529 		ip->devvp = 0;
1530 	}
1531 
1532 	genfs_node_destroy(vp);
1533 	pool_put(&chfs_inode_pool, vp->v_data);
1534 	vp->v_data = NULL;
1535 
1536 	mutex_exit(&chmp->chm_lock_mountfields);
1537 
1538 	return (0);
1539 }
1540 
1541 /* --------------------------------------------------------------------- */
1542 
1543 int
chfs_advlock(void * v)1544 chfs_advlock(void *v)
1545 {
1546 	return 0;
1547 }
1548 
1549 /* --------------------------------------------------------------------- */
1550 int
chfs_strategy(void * v)1551 chfs_strategy(void *v)
1552 {
1553 	struct vop_strategy_args /* {
1554 				    const struct vnodeop_desc *a_desc;
1555 				    struct vnode *a_vp;
1556 				    struct buf *a_bp;
1557 				    } */ *ap = v;
1558 	struct chfs_full_dnode *fd;
1559 	struct buf *bp = ap->a_bp;
1560 	struct vnode *vp = ap->a_vp;
1561 	struct chfs_inode *ip = VTOI(vp);
1562 	struct chfs_mount *chmp = ip->chmp;
1563 	int read = (bp->b_flags & B_READ) ? 1 : 0;
1564 	int err = 0;
1565 
1566 	if (read) {
1567 		err = chfs_read_data(chmp, vp, bp);
1568 	} else {
1569 		mutex_enter(&chmp->chm_lock_mountfields);
1570 
1571 		fd = chfs_alloc_full_dnode();
1572 
1573 		err = chfs_write_flash_dnode(chmp, vp, bp, fd);
1574 		if (err) {
1575 			mutex_exit(&chmp->chm_lock_mountfields);
1576 			goto out;
1577 		}
1578 
1579 		ip = VTOI(vp);
1580 		err = chfs_add_full_dnode_to_inode(chmp, ip, fd);
1581 
1582 		mutex_exit(&chmp->chm_lock_mountfields);
1583 	}
1584 out:
1585 	biodone(bp);
1586 	return err;
1587 }
1588 
1589 int
chfs_bmap(void * v)1590 chfs_bmap(void *v)
1591 {
1592 	struct vop_bmap_args /* {
1593 				struct vnode *a_vp;
1594 				daddr_t  a_bn;
1595 				struct vnode **a_vpp;
1596 				daddr_t *a_bnp;
1597 				int *a_runp;
1598 				int *a_runb;
1599 				} */ *ap = v;
1600 	if (ap->a_vpp != NULL)
1601 		*ap->a_vpp = ap->a_vp;
1602 	if (ap->a_bnp != NULL)
1603 		*ap->a_bnp = ap->a_bn;
1604 	if (ap->a_runp != NULL)
1605 		*ap->a_runp = 0;
1606 	return (0);
1607 }
1608 
1609 /*
1610  * vnode operations vector used for files stored in a chfs file system.
1611  */
1612 int
1613 (**chfs_vnodeop_p)(void *);
1614 const struct vnodeopv_entry_desc chfs_vnodeop_entries[] =
1615 	{
1616 		{ &vop_default_desc, vn_default_error },
1617 		{ &vop_parsepath_desc, genfs_parsepath },	/* parsepath */
1618 		{ &vop_lookup_desc, chfs_lookup },
1619 		{ &vop_create_desc, chfs_create },
1620 		{ &vop_mknod_desc, chfs_mknod },
1621 		{ &vop_open_desc, chfs_open },
1622 		{ &vop_close_desc, chfs_close },
1623 		{ &vop_access_desc, chfs_access },
1624 		{ &vop_accessx_desc, genfs_accessx },
1625 		{ &vop_getattr_desc, chfs_getattr },
1626 		{ &vop_setattr_desc, chfs_setattr },
1627 		{ &vop_read_desc, chfs_read },
1628 		{ &vop_write_desc, chfs_write },
1629 		{ &vop_fallocate_desc, genfs_eopnotsupp },
1630 		{ &vop_fdiscard_desc, genfs_eopnotsupp },
1631 		{ &vop_ioctl_desc, genfs_enoioctl },
1632 		{ &vop_fcntl_desc, genfs_fcntl },
1633 		{ &vop_poll_desc, genfs_poll },
1634 		{ &vop_kqfilter_desc, genfs_kqfilter },
1635 		{ &vop_revoke_desc, genfs_revoke },
1636 		{ &vop_mmap_desc, genfs_mmap },
1637 		{ &vop_fsync_desc, chfs_fsync },
1638 		{ &vop_seek_desc, genfs_seek },
1639 		{ &vop_remove_desc, chfs_remove },
1640 		{ &vop_link_desc, chfs_link },
1641 		{ &vop_rename_desc, chfs_rename },
1642 		{ &vop_mkdir_desc, chfs_mkdir },
1643 		{ &vop_rmdir_desc, chfs_rmdir },
1644 		{ &vop_symlink_desc, chfs_symlink },
1645 		{ &vop_readdir_desc, chfs_readdir },
1646 		{ &vop_readlink_desc, chfs_readlink },
1647 		{ &vop_abortop_desc, genfs_abortop },
1648 		{ &vop_inactive_desc, chfs_inactive },
1649 		{ &vop_reclaim_desc, chfs_reclaim },
1650 		{ &vop_lock_desc, genfs_lock },
1651 		{ &vop_unlock_desc, genfs_unlock },
1652 		{ &vop_bmap_desc, chfs_bmap },
1653 		{ &vop_strategy_desc, chfs_strategy },
1654 		{ &vop_print_desc, ufs_print },
1655 		{ &vop_pathconf_desc, ufs_pathconf },
1656 		{ &vop_islocked_desc, genfs_islocked },
1657 		{ &vop_advlock_desc, chfs_advlock },
1658 		{ &vop_bwrite_desc, vn_bwrite },
1659 		{ &vop_getpages_desc, genfs_getpages },
1660 		{ &vop_putpages_desc, genfs_putpages },
1661 		{ NULL, NULL } };
1662 
1663 const struct vnodeopv_desc chfs_vnodeop_opv_desc =
1664 	{ &chfs_vnodeop_p, chfs_vnodeop_entries };
1665 
1666 /* --------------------------------------------------------------------- */
1667 
1668 /*
1669  * vnode operations vector used for special devices stored in a chfs
1670  * file system.
1671  */
1672 int
1673 (**chfs_specop_p)(void *);
1674 const struct vnodeopv_entry_desc chfs_specop_entries[] =
1675 	{
1676 		{ &vop_default_desc, vn_default_error },
1677 		GENFS_SPECOP_ENTRIES,
1678 		{ &vop_close_desc, ufsspec_close },
1679 		{ &vop_access_desc, chfs_access },
1680 		{ &vop_accessx_desc, genfs_accessx },
1681 		{ &vop_getattr_desc, chfs_getattr },
1682 		{ &vop_setattr_desc, chfs_setattr },
1683 		{ &vop_read_desc, chfs_read },
1684 		{ &vop_write_desc, chfs_write },
1685 		{ &vop_fcntl_desc, genfs_fcntl },
1686 		{ &vop_fsync_desc, spec_fsync },
1687 		{ &vop_inactive_desc, chfs_inactive },
1688 		{ &vop_reclaim_desc, chfs_reclaim },
1689 		{ &vop_lock_desc, genfs_lock },
1690 		{ &vop_unlock_desc, genfs_unlock },
1691 		{ &vop_print_desc, ufs_print },
1692 		{ &vop_islocked_desc, genfs_islocked },
1693 		{ &vop_bwrite_desc, vn_bwrite },
1694 		{ NULL, NULL } };
1695 
1696 const struct vnodeopv_desc chfs_specop_opv_desc =
1697 	{ &chfs_specop_p, chfs_specop_entries };
1698 
1699 /* --------------------------------------------------------------------- */
1700 /*
1701  * vnode operations vector used for fifos stored in a chfs file system.
1702  */
1703 int
1704 (**chfs_fifoop_p)(void *);
1705 const struct vnodeopv_entry_desc chfs_fifoop_entries[] =
1706 	{
1707 		{ &vop_default_desc, vn_default_error },
1708 		GENFS_FIFOOP_ENTRIES,
1709 		{ &vop_close_desc, ufsfifo_close },
1710 		{ &vop_access_desc, chfs_access },
1711 		{ &vop_accessx_desc, genfs_accessx },
1712 		{ &vop_getattr_desc, chfs_getattr },
1713 		{ &vop_setattr_desc, chfs_setattr },
1714 		{ &vop_read_desc, ufsfifo_read },
1715 		{ &vop_write_desc, ufsfifo_write },
1716 		{ &vop_fcntl_desc, genfs_fcntl },
1717 		{ &vop_fsync_desc, vn_fifo_bypass },
1718 		{ &vop_inactive_desc, chfs_inactive },
1719 		{ &vop_reclaim_desc, chfs_reclaim },
1720 		{ &vop_lock_desc, genfs_lock },
1721 		{ &vop_unlock_desc, genfs_unlock },
1722 		{ &vop_strategy_desc, vn_fifo_bypass },
1723 		{ &vop_print_desc, ufs_print },
1724 		{ &vop_islocked_desc, genfs_islocked },
1725 		{ &vop_bwrite_desc, genfs_nullop },
1726 		{ NULL, NULL } };
1727 
1728 const struct vnodeopv_desc chfs_fifoop_opv_desc =
1729 	{ &chfs_fifoop_p, chfs_fifoop_entries };
1730