xref: /netbsd/sys/coda/coda_vnops.c (revision 78328d4c)
1 /*	$NetBSD: coda_vnops.c,v 1.118 2022/03/27 16:24:58 christos Exp $	*/
2 
3 /*
4  *
5  *             Coda: an Experimental Distributed File System
6  *                              Release 3.1
7  *
8  *           Copyright (c) 1987-1998 Carnegie Mellon University
9  *                          All Rights Reserved
10  *
11  * Permission  to  use, copy, modify and distribute this software and its
12  * documentation is hereby granted,  provided  that  both  the  copyright
13  * notice  and  this  permission  notice  appear  in  all  copies  of the
14  * software, derivative works or  modified  versions,  and  any  portions
15  * thereof, and that both notices appear in supporting documentation, and
16  * that credit is given to Carnegie Mellon University  in  all  documents
17  * and publicity pertaining to direct or indirect use of this code or its
18  * derivatives.
19  *
20  * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS  KNOWN  TO  HAVE  BUGS,
21  * SOME  OF  WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON ALLOWS
22  * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION.   CARNEGIE  MELLON
23  * DISCLAIMS  ANY  LIABILITY  OF  ANY  KIND  FOR  ANY  DAMAGES WHATSOEVER
24  * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE  OR  OF
25  * ANY DERIVATIVE WORK.
26  *
27  * Carnegie  Mellon  encourages  users  of  this  software  to return any
28  * improvements or extensions that  they  make,  and  to  grant  Carnegie
29  * Mellon the rights to redistribute these changes without encumbrance.
30  *
31  * 	@(#) coda/coda_vnops.c,v 1.1.1.1 1998/08/29 21:26:46 rvb Exp $
32  */
33 
34 /*
35  * Mach Operating System
36  * Copyright (c) 1990 Carnegie-Mellon University
37  * Copyright (c) 1989 Carnegie-Mellon University
38  * All rights reserved.  The CMU software License Agreement specifies
39  * the terms and conditions for use and redistribution.
40  */
41 
42 /*
43  * This code was written for the Coda file system at Carnegie Mellon
44  * University.  Contributers include David Steere, James Kistler, and
45  * M. Satyanarayanan.
46  */
47 
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: coda_vnops.c,v 1.118 2022/03/27 16:24:58 christos Exp $");
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/errno.h>
55 #include <sys/acct.h>
56 #include <sys/file.h>
57 #include <sys/uio.h>
58 #include <sys/namei.h>
59 #include <sys/ioctl.h>
60 #include <sys/mount.h>
61 #include <sys/proc.h>
62 #include <sys/select.h>
63 #include <sys/vnode.h>
64 #include <sys/kauth.h>
65 #include <sys/dirent.h>
66 
67 #include <miscfs/genfs/genfs.h>
68 #include <miscfs/specfs/specdev.h>
69 
70 #include <coda/coda.h>
71 #include <coda/cnode.h>
72 #include <coda/coda_vnops.h>
73 #include <coda/coda_venus.h>
74 #include <coda/coda_opstats.h>
75 #include <coda/coda_subr.h>
76 #include <coda/coda_namecache.h>
77 #include <coda/coda_pioctl.h>
78 
79 /*
80  * These flags select various performance enhancements.
81  */
82 int coda_attr_cache  = 1;       /* Set to cache attributes in the kernel */
83 int coda_symlink_cache = 1;     /* Set to cache symbolic link information */
84 int coda_access_cache = 1;      /* Set to handle some access checks directly */
85 
86 /* structure to keep track of vfs calls */
87 
88 struct coda_op_stats coda_vnodeopstats[CODA_VNODEOPS_SIZE];
89 
90 #define MARK_ENTRY(op) (coda_vnodeopstats[op].entries++)
91 #define MARK_INT_SAT(op) (coda_vnodeopstats[op].sat_intrn++)
92 #define MARK_INT_FAIL(op) (coda_vnodeopstats[op].unsat_intrn++)
93 #define MARK_INT_GEN(op) (coda_vnodeopstats[op].gen_intrn++)
94 
95 /* What we are delaying for in printf */
96 static int coda_lockdebug = 0;
97 
98 #define ENTRY if(coda_vnop_print_entry) myprintf(("Entered %s\n",__func__))
99 
100 /* Definition of the vnode operation vector */
101 
102 const struct vnodeopv_entry_desc coda_vnodeop_entries[] = {
103     { &vop_default_desc, coda_vop_error },
104     { &vop_parsepath_desc, genfs_parsepath },	/* parsepath */
105     { &vop_lookup_desc, coda_lookup },		/* lookup */
106     { &vop_create_desc, coda_create },		/* create */
107     { &vop_mknod_desc, coda_vop_error },	/* mknod */
108     { &vop_open_desc, coda_open },		/* open */
109     { &vop_close_desc, coda_close },		/* close */
110     { &vop_access_desc, coda_access },		/* access */
111     { &vop_accessx_desc, genfs_accessx },	/* access */
112     { &vop_getattr_desc, coda_getattr },	/* getattr */
113     { &vop_setattr_desc, coda_setattr },	/* setattr */
114     { &vop_read_desc, coda_read },		/* read */
115     { &vop_write_desc, coda_write },		/* write */
116     { &vop_fallocate_desc, genfs_eopnotsupp },	/* fallocate */
117     { &vop_fdiscard_desc, genfs_eopnotsupp },	/* fdiscard */
118     { &vop_fcntl_desc, genfs_fcntl },		/* fcntl */
119     { &vop_ioctl_desc, coda_ioctl },		/* ioctl */
120     { &vop_mmap_desc, genfs_mmap },		/* mmap */
121     { &vop_fsync_desc, coda_fsync },		/* fsync */
122     { &vop_remove_desc, coda_remove },		/* remove */
123     { &vop_link_desc, coda_link },		/* link */
124     { &vop_rename_desc, coda_rename },		/* rename */
125     { &vop_mkdir_desc, coda_mkdir },		/* mkdir */
126     { &vop_rmdir_desc, coda_rmdir },		/* rmdir */
127     { &vop_symlink_desc, coda_symlink },	/* symlink */
128     { &vop_readdir_desc, coda_readdir },	/* readdir */
129     { &vop_readlink_desc, coda_readlink },	/* readlink */
130     { &vop_abortop_desc, coda_abortop },	/* abortop */
131     { &vop_inactive_desc, coda_inactive },	/* inactive */
132     { &vop_reclaim_desc, coda_reclaim },	/* reclaim */
133     { &vop_lock_desc, coda_lock },		/* lock */
134     { &vop_unlock_desc, coda_unlock },		/* unlock */
135     { &vop_bmap_desc, coda_bmap },		/* bmap */
136     { &vop_strategy_desc, coda_strategy },	/* strategy */
137     { &vop_print_desc, coda_vop_error },	/* print */
138     { &vop_islocked_desc, coda_islocked },	/* islocked */
139     { &vop_pathconf_desc, coda_pathconf },	/* pathconf */
140     { &vop_advlock_desc, coda_vop_nop },	/* advlock */
141     { &vop_bwrite_desc, coda_vop_error },	/* bwrite */
142     { &vop_seek_desc, genfs_seek },		/* seek */
143     { &vop_poll_desc, genfs_poll },		/* poll */
144     { &vop_getpages_desc, coda_getpages },	/* getpages */
145     { &vop_putpages_desc, coda_putpages },	/* putpages */
146     { NULL, NULL }
147 };
148 
149 static void coda_print_vattr(struct vattr *);
150 
151 int (**coda_vnodeop_p)(void *);
152 const struct vnodeopv_desc coda_vnodeop_opv_desc =
153         { &coda_vnodeop_p, coda_vnodeop_entries };
154 
155 /* Definitions of NetBSD vnodeop interfaces */
156 
157 /*
158  * A generic error routine.  Return EIO without looking at arguments.
159  */
160 int
coda_vop_error(void * anon)161 coda_vop_error(void *anon) {
162     struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
163 
164     if (codadebug) {
165 	myprintf(("%s: Vnode operation %s called (error).\n",
166 	    __func__, (*desc)->vdesc_name));
167     }
168 
169     return EIO;
170 }
171 
172 /* A generic do-nothing. */
173 int
coda_vop_nop(void * anon)174 coda_vop_nop(void *anon) {
175     struct vnodeop_desc **desc = (struct vnodeop_desc **)anon;
176 
177     if (codadebug) {
178 	myprintf(("Vnode operation %s called, but unsupported\n",
179 		  (*desc)->vdesc_name));
180     }
181    return (0);
182 }
183 
184 int
coda_vnodeopstats_init(void)185 coda_vnodeopstats_init(void)
186 {
187 	int i;
188 
189 	for(i=0;i<CODA_VNODEOPS_SIZE;i++) {
190 		coda_vnodeopstats[i].opcode = i;
191 		coda_vnodeopstats[i].entries = 0;
192 		coda_vnodeopstats[i].sat_intrn = 0;
193 		coda_vnodeopstats[i].unsat_intrn = 0;
194 		coda_vnodeopstats[i].gen_intrn = 0;
195 	}
196 
197 	return 0;
198 }
199 
200 /*
201  * XXX The entire relationship between VOP_OPEN and having a container
202  * file (via venus_open) needs to be reexamined.  In particular, it's
203  * valid to open/mmap/close and then reference.  Instead of doing
204  * VOP_OPEN when getpages needs a container, we should do the
205  * venus_open part, and record that the vnode has opened the container
206  * for getpages, and do the matching logical close on coda_inactive.
207  * Further, coda_rdwr needs a container file, and sometimes needs to
208  * do the equivalent of open (core dumps).
209  */
210 /*
211  * coda_open calls Venus to return the device and inode of the
212  * container file, and then obtains a vnode for that file.  The
213  * container vnode is stored in the coda vnode, and a reference is
214  * added for each open file.
215  */
216 int
coda_open(void * v)217 coda_open(void *v)
218 {
219     /*
220      * NetBSD can pass the O_EXCL flag in mode, even though the check
221      * has already happened.  Venus defensively assumes that if open
222      * is passed the EXCL, it must be a bug.  We strip the flag here.
223      */
224 /* true args */
225     struct vop_open_args *ap = v;
226     vnode_t *vp = ap->a_vp;
227     struct cnode *cp = VTOC(vp);
228     int flag = ap->a_mode & (~O_EXCL);
229     kauth_cred_t cred = ap->a_cred;
230 /* locals */
231     int error;
232     dev_t dev;			/* container file device, inode, vnode */
233     ino_t inode;
234     vnode_t *container_vp;
235 
236     MARK_ENTRY(CODA_OPEN_STATS);
237 
238     KASSERT(VOP_ISLOCKED(vp));
239     /* Check for open of control file. */
240     if (IS_CTL_VP(vp)) {
241 	/* if (WRITABLE(flag)) */
242 	if (flag & (FWRITE | O_TRUNC | O_CREAT | O_EXCL)) {
243 	    MARK_INT_FAIL(CODA_OPEN_STATS);
244 	    return(EACCES);
245 	}
246 	MARK_INT_SAT(CODA_OPEN_STATS);
247 	return(0);
248     }
249 
250     error = venus_open(vtomi(vp), &cp->c_fid, flag, cred, curlwp, &dev, &inode);
251     if (error)
252 	return (error);
253     if (!error) {
254 	    CODADEBUG(CODA_OPEN, myprintf((
255 		"%s: dev 0x%llx inode %llu result %d\n", __func__,
256 		(unsigned long long)dev, (unsigned long long)inode, error));)
257     }
258 
259     /*
260      * Obtain locked and referenced container vnode from container
261      * device/inode.
262      */
263     error = coda_grab_vnode(vp, dev, inode, &container_vp);
264     if (error)
265 	return (error);
266 
267     /* Save the vnode pointer for the container file. */
268     if (cp->c_ovp == NULL) {
269 	cp->c_ovp = container_vp;
270     } else {
271 	if (cp->c_ovp != container_vp)
272 	    /*
273 	     * Perhaps venus returned a different container, or
274 	     * something else went wrong.
275 	     */
276 	    panic("%s: cp->c_ovp != container_vp", __func__);
277     }
278     cp->c_ocount++;
279 
280     /* Flush the attribute cache if writing the file. */
281     if (flag & FWRITE) {
282 	cp->c_owrite++;
283 	cp->c_flags &= ~C_VATTR;
284     }
285 
286     /*
287      * Save the <device, inode> pair for the container file to speed
288      * up subsequent reads while closed (mmap, program execution).
289      * This is perhaps safe because venus will invalidate the node
290      * before changing the container file mapping.
291      */
292     cp->c_device = dev;
293     cp->c_inode = inode;
294 
295     /* Open the container file. */
296     error = VOP_OPEN(container_vp, flag, cred);
297     /*
298      * Drop the lock on the container, after we have done VOP_OPEN
299      * (which requires a locked vnode).
300      */
301     VOP_UNLOCK(container_vp);
302     return(error);
303 }
304 
305 /*
306  * Close the cache file used for I/O and notify Venus.
307  */
308 int
coda_close(void * v)309 coda_close(void *v)
310 {
311 /* true args */
312     struct vop_close_args *ap = v;
313     vnode_t *vp = ap->a_vp;
314     struct cnode *cp = VTOC(vp);
315     int flag = ap->a_fflag;
316     kauth_cred_t cred = ap->a_cred;
317 /* locals */
318     int error;
319 
320     MARK_ENTRY(CODA_CLOSE_STATS);
321 
322     /* Check for close of control file. */
323     if (IS_CTL_VP(vp)) {
324 	MARK_INT_SAT(CODA_CLOSE_STATS);
325 	return(0);
326     }
327 
328     /*
329      * XXX The IS_UNMOUNTING part of this is very suspect.
330      */
331     if (IS_UNMOUNTING(cp)) {
332 	if (cp->c_ovp) {
333 #ifdef	CODA_VERBOSE
334 	    printf("%s: destroying container %d, ufs vp %p of vp %p/cp %p\n",
335 		__func__, vrefcnt(vp), cp->c_ovp, vp, cp);
336 #endif
337 #ifdef	hmm
338 	    vgone(cp->c_ovp);
339 #else
340 	    vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
341 	    VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
342 	    vput(cp->c_ovp);
343 #endif
344 	} else {
345 #ifdef	CODA_VERBOSE
346 	    printf("%s: NO container vp %p/cp %p\n", __func__, vp, cp);
347 #endif
348 	}
349 	return ENODEV;
350     }
351 
352     /* Lock the container node, and VOP_CLOSE it. */
353     vn_lock(cp->c_ovp, LK_EXCLUSIVE | LK_RETRY);
354     VOP_CLOSE(cp->c_ovp, flag, cred); /* Do errors matter here? */
355     /*
356      * Drop the lock we just obtained, and vrele the container vnode.
357      * Decrement reference counts, and clear container vnode pointer on
358      * last close.
359      */
360     vput(cp->c_ovp);
361     if (flag & FWRITE)
362 	--cp->c_owrite;
363     if (--cp->c_ocount == 0)
364 	cp->c_ovp = NULL;
365 
366     error = venus_close(vtomi(vp), &cp->c_fid, flag, cred, curlwp);
367 
368     CODADEBUG(CODA_CLOSE, myprintf(("%s: result %d\n", __func__, error)); )
369     return(error);
370 }
371 
372 int
coda_read(void * v)373 coda_read(void *v)
374 {
375     struct vop_read_args *ap = v;
376 
377     ENTRY;
378     return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_READ,
379 		    ap->a_ioflag, ap->a_cred, curlwp));
380 }
381 
382 int
coda_write(void * v)383 coda_write(void *v)
384 {
385     struct vop_write_args *ap = v;
386 
387     ENTRY;
388     return(coda_rdwr(ap->a_vp, ap->a_uio, UIO_WRITE,
389 		    ap->a_ioflag, ap->a_cred, curlwp));
390 }
391 
392 int
coda_rdwr(vnode_t * vp,struct uio * uiop,enum uio_rw rw,int ioflag,kauth_cred_t cred,struct lwp * l)393 coda_rdwr(vnode_t *vp, struct uio *uiop, enum uio_rw rw, int ioflag,
394 	kauth_cred_t cred, struct lwp *l)
395 {
396 /* upcall decl */
397   /* NOTE: container file operation!!! */
398 /* locals */
399     struct cnode *cp = VTOC(vp);
400     vnode_t *cfvp = cp->c_ovp;
401     struct proc *p = l->l_proc;
402     int opened_internally = 0;
403     int error = 0;
404 
405     MARK_ENTRY(CODA_RDWR_STATS);
406 
407     CODADEBUG(CODA_RDWR, myprintf(("coda_rdwr(%d, %p, %lu, %lld)\n", rw,
408 	uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
409 	(long long) uiop->uio_offset)); )
410 
411     /* Check for rdwr of control object. */
412     if (IS_CTL_VP(vp)) {
413 	MARK_INT_FAIL(CODA_RDWR_STATS);
414 	return(EINVAL);
415     }
416 
417     /* Redirect the request to UFS. */
418 
419     /*
420      * If file is not already open this must be a page
421      * {read,write} request.  Iget the cache file's inode
422      * pointer if we still have its <device, inode> pair.
423      * Otherwise, we must do an internal open to derive the
424      * pair.
425      * XXX Integrate this into a coherent strategy for container
426      * file acquisition.
427      */
428     if (cfvp == NULL) {
429 	/*
430 	 * If we're dumping core, do the internal open. Otherwise
431 	 * venus won't have the correct size of the core when
432 	 * it's completely written.
433 	 */
434 	if (cp->c_inode != 0 && !(p && (p->p_acflag & ACORE))) {
435 #ifdef CODA_VERBOSE
436 	    printf("%s: grabbing container vnode, losing reference\n",
437 		__func__);
438 #endif
439 	    /* Get locked and refed vnode. */
440 	    error = coda_grab_vnode(vp, cp->c_device, cp->c_inode, &cfvp);
441 	    if (error) {
442 		MARK_INT_FAIL(CODA_RDWR_STATS);
443 		return(error);
444 	    }
445 	    /*
446 	     * Drop lock.
447 	     * XXX Where is reference released.
448 	     */
449 	    VOP_UNLOCK(cfvp);
450 	}
451 	else {
452 #ifdef CODA_VERBOSE
453 	    printf("%s: internal VOP_OPEN\n", __func__);
454 #endif
455 	    opened_internally = 1;
456 	    MARK_INT_GEN(CODA_OPEN_STATS);
457 	    error = VOP_OPEN(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
458 #ifdef	CODA_VERBOSE
459 	    printf("%s: Internally Opening %p\n", __func__, vp);
460 #endif
461 	    if (error) {
462 		MARK_INT_FAIL(CODA_RDWR_STATS);
463 		return(error);
464 	    }
465 	    cfvp = cp->c_ovp;
466 	}
467     }
468 
469     /* Have UFS handle the call. */
470     CODADEBUG(CODA_RDWR, myprintf(("%s: fid = %s, refcnt = %d\n", __func__,
471 	coda_f2s(&cp->c_fid), vrefcnt(CTOV(cp)))); )
472 
473     if (rw == UIO_READ) {
474 	error = VOP_READ(cfvp, uiop, ioflag, cred);
475     } else {
476 	error = VOP_WRITE(cfvp, uiop, ioflag, cred);
477     }
478 
479     if (error)
480 	MARK_INT_FAIL(CODA_RDWR_STATS);
481     else
482 	MARK_INT_SAT(CODA_RDWR_STATS);
483 
484     /* Do an internal close if necessary. */
485     if (opened_internally) {
486 	MARK_INT_GEN(CODA_CLOSE_STATS);
487 	(void)VOP_CLOSE(vp, (rw == UIO_READ ? FREAD : FWRITE), cred);
488     }
489 
490     /* Invalidate cached attributes if writing. */
491     if (rw == UIO_WRITE)
492 	cp->c_flags &= ~C_VATTR;
493     return(error);
494 }
495 
496 int
coda_ioctl(void * v)497 coda_ioctl(void *v)
498 {
499 /* true args */
500     struct vop_ioctl_args *ap = v;
501     vnode_t *vp = ap->a_vp;
502     int com = ap->a_command;
503     void *data = ap->a_data;
504     int flag = ap->a_fflag;
505     kauth_cred_t cred = ap->a_cred;
506 /* locals */
507     int error;
508     vnode_t *tvp;
509     struct PioctlData *iap = (struct PioctlData *)data;
510     namei_simple_flags_t sflags;
511 
512     MARK_ENTRY(CODA_IOCTL_STATS);
513 
514     CODADEBUG(CODA_IOCTL, myprintf(("in coda_ioctl on %s\n", iap->path));)
515 
516     /* Don't check for operation on a dying object, for ctlvp it
517        shouldn't matter */
518 
519     /* Must be control object to succeed. */
520     if (!IS_CTL_VP(vp)) {
521 	MARK_INT_FAIL(CODA_IOCTL_STATS);
522 	CODADEBUG(CODA_IOCTL, myprintf(("%s error: vp != ctlvp", __func__));)
523 	return (EOPNOTSUPP);
524     }
525     /* Look up the pathname. */
526 
527     /* Should we use the name cache here? It would get it from
528        lookupname sooner or later anyway, right? */
529 
530     sflags = iap->follow ? NSM_FOLLOW_NOEMULROOT : NSM_NOFOLLOW_NOEMULROOT;
531     error = namei_simple_user(iap->path, sflags, &tvp);
532 
533     if (error) {
534 	MARK_INT_FAIL(CODA_IOCTL_STATS);
535 	CODADEBUG(CODA_IOCTL, myprintf(("%s error: lookup returns %d\n",
536 	    __func__, error));)
537 	return(error);
538     }
539 
540     /*
541      * Make sure this is a coda style cnode, but it may be a
542      * different vfsp
543      */
544     /* XXX: this totally violates the comment about vtagtype in vnode.h */
545     if (tvp->v_tag != VT_CODA) {
546 	vrele(tvp);
547 	MARK_INT_FAIL(CODA_IOCTL_STATS);
548 	CODADEBUG(CODA_IOCTL, myprintf(("%s error: %s not a coda object\n",
549 	    __func__, iap->path));)
550 	return(EINVAL);
551     }
552 
553     if (iap->vi.in_size > VC_MAXDATASIZE || iap->vi.out_size > VC_MAXDATASIZE) {
554 	vrele(tvp);
555 	return(EINVAL);
556     }
557     error = venus_ioctl(vtomi(tvp), &((VTOC(tvp))->c_fid), com, flag, data,
558 	cred, curlwp);
559 
560     if (error)
561 	MARK_INT_FAIL(CODA_IOCTL_STATS);
562     else
563 	CODADEBUG(CODA_IOCTL, myprintf(("Ioctl returns %d \n", error)); )
564 
565     vrele(tvp);
566     return(error);
567 }
568 
569 /*
570  * To reduce the cost of a user-level venus;we cache attributes in
571  * the kernel.  Each cnode has storage allocated for an attribute. If
572  * c_vattr is valid, return a reference to it. Otherwise, get the
573  * attributes from venus and store them in the cnode.  There is some
574  * question if this method is a security leak. But I think that in
575  * order to make this call, the user must have done a lookup and
576  * opened the file, and therefore should already have access.
577  */
578 int
coda_getattr(void * v)579 coda_getattr(void *v)
580 {
581 /* true args */
582     struct vop_getattr_args *ap = v;
583     vnode_t *vp = ap->a_vp;
584     struct cnode *cp = VTOC(vp);
585     struct vattr *vap = ap->a_vap;
586     kauth_cred_t cred = ap->a_cred;
587 /* locals */
588     int error;
589 
590     MARK_ENTRY(CODA_GETATTR_STATS);
591 
592     /* Check for getattr of control object. */
593     if (IS_CTL_VP(vp)) {
594 	MARK_INT_FAIL(CODA_GETATTR_STATS);
595 	return(ENOENT);
596     }
597 
598     /* Check to see if the attributes have already been cached */
599     if (VALID_VATTR(cp)) {
600 	CODADEBUG(CODA_GETATTR, { myprintf(("%s: attr cache hit: %s\n",
601 	    __func__, coda_f2s(&cp->c_fid)));})
602 	CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
603 	    coda_print_vattr(&cp->c_vattr); )
604 
605 	*vap = cp->c_vattr;
606 	MARK_INT_SAT(CODA_GETATTR_STATS);
607 	return(0);
608     }
609 
610     error = venus_getattr(vtomi(vp), &cp->c_fid, cred, curlwp, vap);
611 
612     if (!error) {
613 	CODADEBUG(CODA_GETATTR, myprintf(("%s miss %s: result %d\n",
614 	    __func__, coda_f2s(&cp->c_fid), error)); )
615 
616 	CODADEBUG(CODA_GETATTR, if (!(codadebug & ~CODA_GETATTR))
617 	    coda_print_vattr(vap);	)
618 
619 	/* If not open for write, store attributes in cnode */
620 	if ((cp->c_owrite == 0) && (coda_attr_cache)) {
621 	    cp->c_vattr = *vap;
622 	    cp->c_flags |= C_VATTR;
623 	}
624 
625     }
626     return(error);
627 }
628 
629 int
coda_setattr(void * v)630 coda_setattr(void *v)
631 {
632 /* true args */
633     struct vop_setattr_args *ap = v;
634     vnode_t *vp = ap->a_vp;
635     struct cnode *cp = VTOC(vp);
636     struct vattr *vap = ap->a_vap;
637     kauth_cred_t cred = ap->a_cred;
638 /* locals */
639     int error;
640 
641     MARK_ENTRY(CODA_SETATTR_STATS);
642 
643     /* Check for setattr of control object. */
644     if (IS_CTL_VP(vp)) {
645 	MARK_INT_FAIL(CODA_SETATTR_STATS);
646 	return(ENOENT);
647     }
648 
649     if (codadebug & CODADBGMSK(CODA_SETATTR)) {
650 	coda_print_vattr(vap);
651     }
652     error = venus_setattr(vtomi(vp), &cp->c_fid, vap, cred, curlwp);
653 
654     if (!error)
655 	cp->c_flags &= ~C_VATTR;
656 
657     CODADEBUG(CODA_SETATTR,	myprintf(("setattr %d\n", error)); )
658     return(error);
659 }
660 
661 int
coda_access(void * v)662 coda_access(void *v)
663 {
664 /* true args */
665     struct vop_access_args *ap = v;
666     vnode_t *vp = ap->a_vp;
667     struct cnode *cp = VTOC(vp);
668     accmode_t accmode = ap->a_accmode;
669     kauth_cred_t cred = ap->a_cred;
670 /* locals */
671     int error;
672 
673     MARK_ENTRY(CODA_ACCESS_STATS);
674 
675     KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0);
676     /* Check for access of control object.  Only read access is
677        allowed on it. */
678     if (IS_CTL_VP(vp)) {
679 	/* bogus hack - all will be marked as successes */
680 	MARK_INT_SAT(CODA_ACCESS_STATS);
681 	return(((accmode & VREAD) && !(accmode & (VWRITE | VEXEC)))
682 	       ? 0 : EACCES);
683     }
684 
685     /*
686      * if the file is a directory, and we are checking exec (eg lookup)
687      * access, and the file is in the namecache, then the user must have
688      * lookup access to it.
689      */
690     if (coda_access_cache) {
691 	if ((vp->v_type == VDIR) && (accmode & VEXEC)) {
692 	    if (coda_nc_lookup(cp, ".", 1, cred)) {
693 		MARK_INT_SAT(CODA_ACCESS_STATS);
694 		return(0);                     /* it was in the cache */
695 	    }
696 	}
697     }
698 
699     error = venus_access(vtomi(vp), &cp->c_fid, accmode, cred, curlwp);
700 
701     return(error);
702 }
703 
704 /*
705  * CODA abort op, called after namei() when a CREATE/DELETE isn't actually
706  * done. If a buffer has been saved in anticipation of a coda_create or
707  * a coda_remove, delete it.
708  */
709 /* ARGSUSED */
710 int
coda_abortop(void * v)711 coda_abortop(void *v)
712 {
713 /* true args */
714     struct vop_abortop_args /* {
715 	vnode_t *a_dvp;
716 	struct componentname *a_cnp;
717     } */ *ap = v;
718 
719     (void)ap;
720 /* upcall decl */
721 /* locals */
722 
723     return (0);
724 }
725 
726 int
coda_readlink(void * v)727 coda_readlink(void *v)
728 {
729 /* true args */
730     struct vop_readlink_args *ap = v;
731     vnode_t *vp = ap->a_vp;
732     struct cnode *cp = VTOC(vp);
733     struct uio *uiop = ap->a_uio;
734     kauth_cred_t cred = ap->a_cred;
735 /* locals */
736     struct lwp *l = curlwp;
737     int error;
738     char *str;
739     int len;
740 
741     MARK_ENTRY(CODA_READLINK_STATS);
742 
743     /* Check for readlink of control object. */
744     if (IS_CTL_VP(vp)) {
745 	MARK_INT_FAIL(CODA_READLINK_STATS);
746 	return(ENOENT);
747     }
748 
749     if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) { /* symlink was cached */
750 	uiop->uio_rw = UIO_READ;
751 	error = uiomove(cp->c_symlink, (int)cp->c_symlen, uiop);
752 	if (error)
753 	    MARK_INT_FAIL(CODA_READLINK_STATS);
754 	else
755 	    MARK_INT_SAT(CODA_READLINK_STATS);
756 	return(error);
757     }
758 
759     error = venus_readlink(vtomi(vp), &cp->c_fid, cred, l, &str, &len);
760 
761     if (!error) {
762 	uiop->uio_rw = UIO_READ;
763 	error = uiomove(str, len, uiop);
764 
765 	if (coda_symlink_cache) {
766 	    cp->c_symlink = str;
767 	    cp->c_symlen = len;
768 	    cp->c_flags |= C_SYMLINK;
769 	} else
770 	    CODA_FREE(str, len);
771     }
772 
773     CODADEBUG(CODA_READLINK, myprintf(("in readlink result %d\n",error));)
774     return(error);
775 }
776 
777 int
coda_fsync(void * v)778 coda_fsync(void *v)
779 {
780 /* true args */
781     struct vop_fsync_args *ap = v;
782     vnode_t *vp = ap->a_vp;
783     struct cnode *cp = VTOC(vp);
784     kauth_cred_t cred = ap->a_cred;
785 /* locals */
786     vnode_t *convp = cp->c_ovp;
787     int error;
788 
789     MARK_ENTRY(CODA_FSYNC_STATS);
790 
791     /* Check for fsync on an unmounting object */
792     /* The NetBSD kernel, in its infinite wisdom, can try to fsync
793      * after an unmount has been initiated.  This is a Bad Thing,
794      * which we have to avoid.  Not a legitimate failure for stats.
795      */
796     if (IS_UNMOUNTING(cp)) {
797 	return(ENODEV);
798     }
799 
800     /* Check for fsync of control object or unitialized cnode. */
801     if (IS_CTL_VP(vp) || vp->v_type == VNON) {
802 	MARK_INT_SAT(CODA_FSYNC_STATS);
803 	return(0);
804     }
805 
806     if (convp)
807     	VOP_FSYNC(convp, cred, MNT_WAIT, 0, 0);
808 
809     /*
810      * We can expect fsync on any vnode at all if venus is pruging it.
811      * Venus can't very well answer the fsync request, now can it?
812      * Hopefully, it won't have to, because hopefully, venus preserves
813      * the (possibly untrue) invariant that it never purges an open
814      * vnode.  Hopefully.
815      */
816     if (cp->c_flags & C_PURGING) {
817 	return(0);
818     }
819 
820     error = venus_fsync(vtomi(vp), &cp->c_fid, cred, curlwp);
821 
822     CODADEBUG(CODA_FSYNC, myprintf(("in fsync result %d\n",error)); )
823     return(error);
824 }
825 
826 /*
827  * vp is locked on entry, and we must unlock it.
828  * XXX This routine is suspect and probably needs rewriting.
829  */
830 int
coda_inactive(void * v)831 coda_inactive(void *v)
832 {
833 /* true args */
834     struct vop_inactive_v2_args *ap = v;
835     vnode_t *vp = ap->a_vp;
836     struct cnode *cp = VTOC(vp);
837     kauth_cred_t cred __unused = NULL;
838 
839     /* We don't need to send inactive to venus - DCS */
840     MARK_ENTRY(CODA_INACTIVE_STATS);
841 
842     if (IS_CTL_VP(vp)) {
843 	MARK_INT_SAT(CODA_INACTIVE_STATS);
844 	return 0;
845     }
846 
847     CODADEBUG(CODA_INACTIVE, myprintf(("in inactive, %s, vfsp %p\n",
848 				  coda_f2s(&cp->c_fid), vp->v_mount));)
849 
850     if (vp->v_mount->mnt_data == NULL) {
851 	myprintf(("Help! vfsp->vfs_data was NULL, but vnode %p wasn't dying\n", vp));
852 	panic("badness in coda_inactive");
853     }
854 
855 #ifdef CODA_VERBOSE
856     /* Sanity checks that perhaps should be panic. */
857     if (vrefcnt(vp) > 1)
858 	printf("%s: %p usecount %d\n", __func__, vp, vrefcnt(vp));
859     if (cp->c_ovp != NULL)
860 	printf("%s: %p ovp != NULL\n", __func__, vp);
861 #endif
862     /* XXX Do we need to VOP_CLOSE container vnodes? */
863     if (!IS_UNMOUNTING(cp))
864 	*ap->a_recycle = true;
865 
866     MARK_INT_SAT(CODA_INACTIVE_STATS);
867     return(0);
868 }
869 
870 /*
871  * Coda does not use the normal namecache, but a private version.
872  * Consider how to use the standard facility instead.
873  */
874 int
coda_lookup(void * v)875 coda_lookup(void *v)
876 {
877 /* true args */
878     struct vop_lookup_v2_args *ap = v;
879     /* (locked) vnode of dir in which to do lookup */
880     vnode_t *dvp = ap->a_dvp;
881     struct cnode *dcp = VTOC(dvp);
882     /* output variable for result */
883     vnode_t **vpp = ap->a_vpp;
884     /* name to lookup */
885     struct componentname *cnp = ap->a_cnp;
886     kauth_cred_t cred = cnp->cn_cred;
887     struct lwp *l = curlwp;
888 /* locals */
889     struct cnode *cp;
890     const char *nm = cnp->cn_nameptr;
891     int len = cnp->cn_namelen;
892     CodaFid VFid;
893     int	vtype;
894     int error = 0;
895 
896     MARK_ENTRY(CODA_LOOKUP_STATS);
897 
898     CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s in %s\n", __func__,
899 	nm, coda_f2s(&dcp->c_fid)));)
900 
901     /*
902      * XXX componentname flags in MODMASK are not handled at all
903      */
904 
905     /*
906      * The overall strategy is to switch on the lookup type and get a
907      * result vnode that is vref'd but not locked.
908      */
909 
910     /* Check for lookup of control object. */
911     if (IS_CTL_NAME(dvp, nm, len)) {
912 	*vpp = coda_ctlvp;
913 	vref(*vpp);
914 	MARK_INT_SAT(CODA_LOOKUP_STATS);
915 	goto exit;
916     }
917 
918     /* Avoid trying to hand venus an unreasonably long name. */
919     if (len+1 > CODA_MAXNAMLEN) {
920 	MARK_INT_FAIL(CODA_LOOKUP_STATS);
921 	CODADEBUG(CODA_LOOKUP, myprintf(("%s: name too long:, %s (%s)\n",
922 	    __func__, coda_f2s(&dcp->c_fid), nm));)
923 	*vpp = (vnode_t *)0;
924 	error = EINVAL;
925 	goto exit;
926     }
927 
928     /*
929      * Try to resolve the lookup in the minicache.  If that fails, ask
930      * venus to do the lookup.  XXX The interaction between vnode
931      * locking and any locking that coda does is not clear.
932      */
933     cp = coda_nc_lookup(dcp, nm, len, cred);
934     if (cp) {
935 	*vpp = CTOV(cp);
936 	vref(*vpp);
937 	CODADEBUG(CODA_LOOKUP,
938 		 myprintf(("lookup result %d vpp %p\n",error,*vpp));)
939     } else {
940 	/* The name wasn't cached, so ask Venus. */
941 	error = venus_lookup(vtomi(dvp), &dcp->c_fid, nm, len, cred, l, &VFid,
942 	    &vtype);
943 
944 	if (error) {
945 	    MARK_INT_FAIL(CODA_LOOKUP_STATS);
946 	    CODADEBUG(CODA_LOOKUP, myprintf(("%s: lookup error on %s (%s)%d\n",
947 		__func__, coda_f2s(&dcp->c_fid), nm, error));)
948 	    *vpp = (vnode_t *)0;
949 	} else {
950 	    MARK_INT_SAT(CODA_LOOKUP_STATS);
951 	    CODADEBUG(CODA_LOOKUP, myprintf(("%s: %s type %o result %d\n",
952 		__func__, coda_f2s(&VFid), vtype, error)); )
953 
954 	    cp = make_coda_node(&VFid, dvp->v_mount, vtype);
955 	    *vpp = CTOV(cp);
956 	    /* vpp is now vrefed. */
957 
958 	    /*
959 	     * Unless this vnode is marked CODA_NOCACHE, enter it into
960 	     * the coda name cache to avoid a future venus round-trip.
961 	     * XXX Interaction with componentname NOCACHE is unclear.
962 	     */
963 	    if (!(vtype & CODA_NOCACHE))
964 		coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
965 	}
966     }
967 
968  exit:
969     /*
970      * If we are creating, and this was the last name to be looked up,
971      * and the error was ENOENT, then make the leaf NULL and return
972      * success.
973      * XXX Check against new lookup rules.
974      */
975     if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME))
976 	&& (cnp->cn_flags & ISLASTCN)
977 	&& (error == ENOENT))
978     {
979 	error = EJUSTRETURN;
980 	*ap->a_vpp = NULL;
981     }
982 
983     return(error);
984 }
985 
986 /*ARGSUSED*/
987 int
coda_create(void * v)988 coda_create(void *v)
989 {
990 /* true args */
991     struct vop_create_v3_args *ap = v;
992     vnode_t *dvp = ap->a_dvp;
993     struct cnode *dcp = VTOC(dvp);
994     struct vattr *va = ap->a_vap;
995     int exclusive = 1;
996     int mode = ap->a_vap->va_mode;
997     vnode_t **vpp = ap->a_vpp;
998     struct componentname  *cnp = ap->a_cnp;
999     kauth_cred_t cred = cnp->cn_cred;
1000     struct lwp *l = curlwp;
1001 /* locals */
1002     int error;
1003     struct cnode *cp;
1004     const char *nm = cnp->cn_nameptr;
1005     int len = cnp->cn_namelen;
1006     CodaFid VFid;
1007     struct vattr attr;
1008 
1009     MARK_ENTRY(CODA_CREATE_STATS);
1010 
1011     /* All creates are exclusive XXX */
1012     /* I'm assuming the 'mode' argument is the file mode bits XXX */
1013 
1014     /* Check for create of control object. */
1015     if (IS_CTL_NAME(dvp, nm, len)) {
1016 	*vpp = (vnode_t *)0;
1017 	MARK_INT_FAIL(CODA_CREATE_STATS);
1018 	return(EACCES);
1019     }
1020 
1021     error = venus_create(vtomi(dvp), &dcp->c_fid, nm, len, exclusive, mode, va, cred, l, &VFid, &attr);
1022 
1023     if (!error) {
1024 
1025         /*
1026 	 * XXX Violation of venus/kernel invariants is a difficult case,
1027 	 * but venus should not be able to cause a panic.
1028 	 */
1029 	/* If this is an exclusive create, panic if the file already exists. */
1030 	/* Venus should have detected the file and reported EEXIST. */
1031 
1032 	if ((exclusive == 1) &&
1033 	    (coda_find(&VFid) != NULL))
1034 	    panic("cnode existed for newly created file!");
1035 
1036 	cp = make_coda_node(&VFid, dvp->v_mount, attr.va_type);
1037 	*vpp = CTOV(cp);
1038 
1039 	/* XXX vnodeops doesn't say this argument can be changed. */
1040 	/* Update va to reflect the new attributes. */
1041 	(*va) = attr;
1042 
1043 	/* Update the attribute cache and mark it as valid */
1044 	if (coda_attr_cache) {
1045 	    VTOC(*vpp)->c_vattr = attr;
1046 	    VTOC(*vpp)->c_flags |= C_VATTR;
1047 	}
1048 
1049 	/* Invalidate parent's attr cache (modification time has changed). */
1050 	VTOC(dvp)->c_flags &= ~C_VATTR;
1051 
1052 	/* enter the new vnode in the Name Cache */
1053 	coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1054 
1055 	CODADEBUG(CODA_CREATE, myprintf(("%s: %s, result %d\n", __func__,
1056 	    coda_f2s(&VFid), error)); )
1057     } else {
1058 	*vpp = (vnode_t *)0;
1059 	CODADEBUG(CODA_CREATE, myprintf(("%s: create error %d\n", __func__,
1060 	    error));)
1061     }
1062 
1063     if (!error) {
1064 #ifdef CODA_VERBOSE
1065 	if ((cnp->cn_flags & LOCKLEAF) == 0)
1066 	    /* This should not happen; flags are for lookup only. */
1067 	    printf("%s: LOCKLEAF not set!\n", __func__);
1068 #endif
1069     }
1070 
1071     return(error);
1072 }
1073 
1074 int
coda_remove(void * v)1075 coda_remove(void *v)
1076 {
1077 /* true args */
1078     struct vop_remove_v3_args *ap = v;
1079     vnode_t *dvp = ap->a_dvp;
1080     struct cnode *cp = VTOC(dvp);
1081     vnode_t *vp = ap->a_vp;
1082     struct componentname  *cnp = ap->a_cnp;
1083     kauth_cred_t cred = cnp->cn_cred;
1084     struct lwp *l = curlwp;
1085 /* locals */
1086     int error;
1087     const char *nm = cnp->cn_nameptr;
1088     int len = cnp->cn_namelen;
1089     struct cnode *tp;
1090 
1091     MARK_ENTRY(CODA_REMOVE_STATS);
1092 
1093     CODADEBUG(CODA_REMOVE, myprintf(("%s: %s in %s\n", __func__,
1094 	nm, coda_f2s(&cp->c_fid)));)
1095 
1096     /* Remove the file's entry from the CODA Name Cache */
1097     /* We're being conservative here, it might be that this person
1098      * doesn't really have sufficient access to delete the file
1099      * but we feel zapping the entry won't really hurt anyone -- dcs
1100      */
1101     /* I'm gonna go out on a limb here. If a file and a hardlink to it
1102      * exist, and one is removed, the link count on the other will be
1103      * off by 1. We could either invalidate the attrs if cached, or
1104      * fix them. I'll try to fix them. DCS 11/8/94
1105      */
1106     tp = coda_nc_lookup(VTOC(dvp), nm, len, cred);
1107     if (tp) {
1108 	if (VALID_VATTR(tp)) {	/* If attrs are cached */
1109 	    if (tp->c_vattr.va_nlink > 1) {	/* If it's a hard link */
1110 		tp->c_vattr.va_nlink--;
1111 	    }
1112 	}
1113 
1114 	coda_nc_zapfile(VTOC(dvp), nm, len);
1115 	/* No need to flush it if it doesn't exist! */
1116     }
1117     /* Invalidate the parent's attr cache, the modification time has changed */
1118     VTOC(dvp)->c_flags &= ~C_VATTR;
1119 
1120     /* Check for remove of control object. */
1121     if (IS_CTL_NAME(dvp, nm, len)) {
1122 	MARK_INT_FAIL(CODA_REMOVE_STATS);
1123 	return(ENOENT);
1124     }
1125 
1126     error = venus_remove(vtomi(dvp), &cp->c_fid, nm, len, cred, l);
1127 
1128     CODADEBUG(CODA_REMOVE, myprintf(("in remove result %d\n",error)); )
1129 
1130     /*
1131      * Unlock and release child (avoiding double if ".").
1132      */
1133     if (dvp == vp) {
1134 	vrele(vp);
1135     } else {
1136 	vput(vp);
1137     }
1138 
1139     return(error);
1140 }
1141 
1142 /*
1143  * dvp is the directory where the link is to go, and is locked.
1144  * vp is the object to be linked to, and is unlocked.
1145  * At exit, we must unlock dvp, and vput dvp.
1146  */
1147 int
coda_link(void * v)1148 coda_link(void *v)
1149 {
1150 /* true args */
1151     struct vop_link_v2_args *ap = v;
1152     vnode_t *vp = ap->a_vp;
1153     struct cnode *cp = VTOC(vp);
1154     vnode_t *dvp = ap->a_dvp;
1155     struct cnode *dcp = VTOC(dvp);
1156     struct componentname *cnp = ap->a_cnp;
1157     kauth_cred_t cred = cnp->cn_cred;
1158     struct lwp *l = curlwp;
1159 /* locals */
1160     int error;
1161     const char *nm = cnp->cn_nameptr;
1162     int len = cnp->cn_namelen;
1163 
1164     MARK_ENTRY(CODA_LINK_STATS);
1165 
1166     if (codadebug & CODADBGMSK(CODA_LINK)) {
1167 
1168 	myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1169 	myprintf(("%s: dvp fid: %s)\n", __func__, coda_f2s(&dcp->c_fid)));
1170 
1171     }
1172     if (codadebug & CODADBGMSK(CODA_LINK)) {
1173 	myprintf(("%s: vp fid: %s\n", __func__, coda_f2s(&cp->c_fid)));
1174 	myprintf(("%s: dvp fid: %s\n", __func__, coda_f2s(&dcp->c_fid)));
1175 
1176     }
1177 
1178     /* Check for link to/from control object. */
1179     if (IS_CTL_NAME(dvp, nm, len) || IS_CTL_VP(vp)) {
1180 	MARK_INT_FAIL(CODA_LINK_STATS);
1181 	return(EACCES);
1182     }
1183 
1184     /* If linking . to a name, error out earlier. */
1185     if (vp == dvp) {
1186 #ifdef CODA_VERBOSE
1187         printf("%s coda_link vp==dvp\n", __func__);
1188 #endif
1189 	error = EISDIR;
1190 	goto exit;
1191     }
1192 
1193     /* XXX Why does venus_link need the vnode to be locked?*/
1194     if ((error = vn_lock(vp, LK_EXCLUSIVE)) != 0) {
1195 #ifdef CODA_VERBOSE
1196 	printf("%s: couldn't lock vnode %p\n", __func__, vp);
1197 #endif
1198 	error = EFAULT;		/* XXX better value */
1199 	goto exit;
1200     }
1201     error = kauth_authorize_vnode(cnp->cn_cred, KAUTH_VNODE_ADD_LINK, vp,
1202 	dvp, 0);
1203     if (error)
1204 	    goto exit;
1205     error = venus_link(vtomi(vp), &cp->c_fid, &dcp->c_fid, nm, len, cred, l);
1206     VOP_UNLOCK(vp);
1207 
1208     /* Invalidate parent's attr cache (the modification time has changed). */
1209     VTOC(dvp)->c_flags &= ~C_VATTR;
1210     /* Invalidate child's attr cache (XXX why). */
1211     VTOC(vp)->c_flags &= ~C_VATTR;
1212 
1213     CODADEBUG(CODA_LINK,	myprintf(("in link result %d\n",error)); )
1214 
1215 exit:
1216     return(error);
1217 }
1218 
1219 int
coda_rename(void * v)1220 coda_rename(void *v)
1221 {
1222 /* true args */
1223     struct vop_rename_args *ap = v;
1224     vnode_t *odvp = ap->a_fdvp;
1225     struct cnode *odcp = VTOC(odvp);
1226     struct componentname  *fcnp = ap->a_fcnp;
1227     vnode_t *ndvp = ap->a_tdvp;
1228     struct cnode *ndcp = VTOC(ndvp);
1229     struct componentname  *tcnp = ap->a_tcnp;
1230     kauth_cred_t cred = fcnp->cn_cred;
1231     struct lwp *l = curlwp;
1232 /* true args */
1233     int error;
1234     const char *fnm = fcnp->cn_nameptr;
1235     int flen = fcnp->cn_namelen;
1236     const char *tnm = tcnp->cn_nameptr;
1237     int tlen = tcnp->cn_namelen;
1238 
1239     MARK_ENTRY(CODA_RENAME_STATS);
1240 
1241     /* Hmmm.  The vnodes are already looked up.  Perhaps they are locked?
1242        This could be Bad. XXX */
1243 #ifdef OLD_DIAGNOSTIC
1244     if ((fcnp->cn_cred != tcnp->cn_cred)
1245 	|| (fcnp->cn_lwp != tcnp->cn_lwp))
1246     {
1247 	panic("%s: component names don't agree", __func__);
1248     }
1249 #endif
1250 
1251     /* Check for rename involving control object. */
1252     if (IS_CTL_NAME(odvp, fnm, flen) || IS_CTL_NAME(ndvp, tnm, tlen)) {
1253 	MARK_INT_FAIL(CODA_RENAME_STATS);
1254 	return(EACCES);
1255     }
1256 
1257     /* Problem with moving directories -- need to flush entry for .. */
1258     if (odvp != ndvp) {
1259 	struct cnode *ovcp = coda_nc_lookup(VTOC(odvp), fnm, flen, cred);
1260 	if (ovcp) {
1261 	    vnode_t *ovp = CTOV(ovcp);
1262 	    if ((ovp) &&
1263 		(ovp->v_type == VDIR)) /* If it's a directory */
1264 		coda_nc_zapfile(VTOC(ovp),"..", 2);
1265 	}
1266     }
1267 
1268     /* Remove the entries for both source and target files */
1269     coda_nc_zapfile(VTOC(odvp), fnm, flen);
1270     coda_nc_zapfile(VTOC(ndvp), tnm, tlen);
1271 
1272     /* Invalidate the parent's attr cache, the modification time has changed */
1273     VTOC(odvp)->c_flags &= ~C_VATTR;
1274     VTOC(ndvp)->c_flags &= ~C_VATTR;
1275 
1276     if (flen+1 > CODA_MAXNAMLEN) {
1277 	MARK_INT_FAIL(CODA_RENAME_STATS);
1278 	error = EINVAL;
1279 	goto exit;
1280     }
1281 
1282     if (tlen+1 > CODA_MAXNAMLEN) {
1283 	MARK_INT_FAIL(CODA_RENAME_STATS);
1284 	error = EINVAL;
1285 	goto exit;
1286     }
1287 
1288     error = venus_rename(vtomi(odvp), &odcp->c_fid, &ndcp->c_fid, fnm, flen, tnm, tlen, cred, l);
1289 
1290  exit:
1291     CODADEBUG(CODA_RENAME, myprintf(("in rename result %d\n",error));)
1292     /* XXX - do we need to call cache pureg on the moved vnode? */
1293     cache_purge(ap->a_fvp);
1294 
1295     /* It seems to be incumbent on us to drop locks on all four vnodes */
1296     /* From-vnodes are not locked, only ref'd.  To-vnodes are locked. */
1297 
1298     vrele(ap->a_fvp);
1299     vrele(odvp);
1300 
1301     if (ap->a_tvp) {
1302 	if (ap->a_tvp == ndvp) {
1303 	    vrele(ap->a_tvp);
1304 	} else {
1305 	    vput(ap->a_tvp);
1306 	}
1307     }
1308 
1309     vput(ndvp);
1310     return(error);
1311 }
1312 
1313 int
coda_mkdir(void * v)1314 coda_mkdir(void *v)
1315 {
1316 /* true args */
1317     struct vop_mkdir_v3_args *ap = v;
1318     vnode_t *dvp = ap->a_dvp;
1319     struct cnode *dcp = VTOC(dvp);
1320     struct componentname  *cnp = ap->a_cnp;
1321     struct vattr *va = ap->a_vap;
1322     vnode_t **vpp = ap->a_vpp;
1323     kauth_cred_t cred = cnp->cn_cred;
1324     struct lwp *l = curlwp;
1325 /* locals */
1326     int error;
1327     const char *nm = cnp->cn_nameptr;
1328     int len = cnp->cn_namelen;
1329     struct cnode *cp;
1330     CodaFid VFid;
1331     struct vattr ova;
1332 
1333     MARK_ENTRY(CODA_MKDIR_STATS);
1334 
1335     /* Check for mkdir of target object. */
1336     if (IS_CTL_NAME(dvp, nm, len)) {
1337 	*vpp = (vnode_t *)0;
1338 	MARK_INT_FAIL(CODA_MKDIR_STATS);
1339 	return(EACCES);
1340     }
1341 
1342     if (len+1 > CODA_MAXNAMLEN) {
1343 	*vpp = (vnode_t *)0;
1344 	MARK_INT_FAIL(CODA_MKDIR_STATS);
1345 	return(EACCES);
1346     }
1347 
1348     error = venus_mkdir(vtomi(dvp), &dcp->c_fid, nm, len, va, cred, l, &VFid, &ova);
1349 
1350     if (!error) {
1351 	if (coda_find(&VFid) != NULL)
1352 	    panic("cnode existed for newly created directory!");
1353 
1354 
1355 	cp =  make_coda_node(&VFid, dvp->v_mount, va->va_type);
1356 	*vpp = CTOV(cp);
1357 
1358 	/* enter the new vnode in the Name Cache */
1359 	coda_nc_enter(VTOC(dvp), nm, len, cred, VTOC(*vpp));
1360 
1361 	/* as a side effect, enter "." and ".." for the directory */
1362 	coda_nc_enter(VTOC(*vpp), ".", 1, cred, VTOC(*vpp));
1363 	coda_nc_enter(VTOC(*vpp), "..", 2, cred, VTOC(dvp));
1364 
1365 	if (coda_attr_cache) {
1366 	    VTOC(*vpp)->c_vattr = ova;		/* update the attr cache */
1367 	    VTOC(*vpp)->c_flags |= C_VATTR;	/* Valid attributes in cnode */
1368 	}
1369 
1370 	/* Invalidate the parent's attr cache, the modification time has changed */
1371 	VTOC(dvp)->c_flags &= ~C_VATTR;
1372 
1373 	CODADEBUG( CODA_MKDIR, myprintf(("%s: %s result %d\n", __func__,
1374 	    coda_f2s(&VFid), error)); )
1375     } else {
1376 	*vpp = (vnode_t *)0;
1377 	CODADEBUG(CODA_MKDIR, myprintf(("%s error %d\n", __func__, error));)
1378     }
1379 
1380     return(error);
1381 }
1382 
1383 int
coda_rmdir(void * v)1384 coda_rmdir(void *v)
1385 {
1386 /* true args */
1387     struct vop_rmdir_v2_args *ap = v;
1388     vnode_t *dvp = ap->a_dvp;
1389     struct cnode *dcp = VTOC(dvp);
1390     vnode_t *vp = ap->a_vp;
1391     struct componentname  *cnp = ap->a_cnp;
1392     kauth_cred_t cred = cnp->cn_cred;
1393     struct lwp *l = curlwp;
1394 /* true args */
1395     int error;
1396     const char *nm = cnp->cn_nameptr;
1397     int len = cnp->cn_namelen;
1398     struct cnode *cp;
1399 
1400     MARK_ENTRY(CODA_RMDIR_STATS);
1401 
1402     /* Check for rmdir of control object. */
1403     if (IS_CTL_NAME(dvp, nm, len)) {
1404 	MARK_INT_FAIL(CODA_RMDIR_STATS);
1405 	return(ENOENT);
1406     }
1407 
1408     /* Can't remove . in self. */
1409     if (dvp == vp) {
1410 #ifdef CODA_VERBOSE
1411 	printf("%s: dvp == vp\n", __func__);
1412 #endif
1413 	error = EINVAL;
1414 	goto exit;
1415     }
1416 
1417     /*
1418      * The caller may not have adequate permissions, and the venus
1419      * operation may fail, but it doesn't hurt from a correctness
1420      * viewpoint to invalidate cache entries.
1421      * XXX Why isn't this done after the venus_rmdir call?
1422      */
1423     /* Look up child in name cache (by name, from parent). */
1424     cp = coda_nc_lookup(dcp, nm, len, cred);
1425     /* If found, remove all children of the child (., ..). */
1426     if (cp) coda_nc_zapParentfid(&(cp->c_fid), NOT_DOWNCALL);
1427 
1428     /* Remove child's own entry. */
1429     coda_nc_zapfile(dcp, nm, len);
1430 
1431     /* Invalidate parent's attr cache (the modification time has changed). */
1432     dcp->c_flags &= ~C_VATTR;
1433 
1434     error = venus_rmdir(vtomi(dvp), &dcp->c_fid, nm, len, cred, l);
1435 
1436     CODADEBUG(CODA_RMDIR, myprintf(("in rmdir result %d\n", error)); )
1437 
1438 exit:
1439     /* unlock and release child */
1440     if (dvp == vp) {
1441 	vrele(vp);
1442     } else {
1443 	vput(vp);
1444     }
1445 
1446     return(error);
1447 }
1448 
1449 int
coda_symlink(void * v)1450 coda_symlink(void *v)
1451 {
1452 /* true args */
1453     struct vop_symlink_v3_args *ap = v;
1454     vnode_t *dvp = ap->a_dvp;
1455     struct cnode *dcp = VTOC(dvp);
1456     /* a_vpp is used in place below */
1457     struct componentname *cnp = ap->a_cnp;
1458     struct vattr *tva = ap->a_vap;
1459     char *path = ap->a_target;
1460     kauth_cred_t cred = cnp->cn_cred;
1461     struct lwp *l = curlwp;
1462 /* locals */
1463     int error;
1464     u_long saved_cn_flags;
1465     const char *nm = cnp->cn_nameptr;
1466     int len = cnp->cn_namelen;
1467     int plen = strlen(path);
1468 
1469     /*
1470      * Here's the strategy for the moment: perform the symlink, then
1471      * do a lookup to grab the resulting vnode.  I know this requires
1472      * two communications with Venus for a new symbolic link, but
1473      * that's the way the ball bounces.  I don't yet want to change
1474      * the way the Mach symlink works.  When Mach support is
1475      * deprecated, we should change symlink so that the common case
1476      * returns the resultant vnode in a vpp argument.
1477      */
1478 
1479     MARK_ENTRY(CODA_SYMLINK_STATS);
1480 
1481     /* Check for symlink of control object. */
1482     if (IS_CTL_NAME(dvp, nm, len)) {
1483 	MARK_INT_FAIL(CODA_SYMLINK_STATS);
1484 	error = EACCES;
1485 	goto exit;
1486     }
1487 
1488     if (plen+1 > CODA_MAXPATHLEN) {
1489 	MARK_INT_FAIL(CODA_SYMLINK_STATS);
1490 	error = EINVAL;
1491 	goto exit;
1492     }
1493 
1494     if (len+1 > CODA_MAXNAMLEN) {
1495 	MARK_INT_FAIL(CODA_SYMLINK_STATS);
1496 	error = EINVAL;
1497 	goto exit;
1498     }
1499 
1500     error = venus_symlink(vtomi(dvp), &dcp->c_fid, path, plen, nm, len, tva, cred, l);
1501 
1502     /* Invalidate the parent's attr cache (modification time has changed). */
1503     dcp->c_flags &= ~C_VATTR;
1504 
1505     if (!error) {
1506 	/*
1507 	 * VOP_SYMLINK is not defined to pay attention to cnp->cn_flags;
1508 	 * these are defined only for VOP_LOOKUP.   We desire to reuse
1509 	 * cnp for a VOP_LOOKUP operation, and must be sure to not pass
1510 	 * stray flags passed to us.  Such stray flags can occur because
1511 	 * sys_symlink makes a namei call and then reuses the
1512 	 * componentname structure.
1513 	 */
1514 	/*
1515 	 * XXX Arguably we should create our own componentname structure
1516 	 * and not reuse the one that was passed in.
1517 	 */
1518 	saved_cn_flags = cnp->cn_flags;
1519 	cnp->cn_flags &= ~(MODMASK | OPMASK);
1520 	cnp->cn_flags |= LOOKUP;
1521 	error = VOP_LOOKUP(dvp, ap->a_vpp, cnp);
1522 	cnp->cn_flags = saved_cn_flags;
1523     }
1524 
1525  exit:
1526     CODADEBUG(CODA_SYMLINK, myprintf(("in symlink result %d\n",error)); )
1527     return(error);
1528 }
1529 
1530 /*
1531  * Read directory entries.
1532  */
1533 int
coda_readdir(void * v)1534 coda_readdir(void *v)
1535 {
1536 /* true args */
1537 	struct vop_readdir_args *ap = v;
1538 	vnode_t *vp = ap->a_vp;
1539 	struct cnode *cp = VTOC(vp);
1540 	struct uio *uiop = ap->a_uio;
1541 	kauth_cred_t cred = ap->a_cred;
1542 	int *eofflag = ap->a_eofflag;
1543 /* upcall decl */
1544 /* locals */
1545 	size_t initial_resid = uiop->uio_resid;
1546 	int error = 0;
1547 	int opened_internally = 0;
1548 	int ncookies;
1549 	char *buf;
1550 	struct vnode *cvp;
1551 	struct dirent *dirp;
1552 
1553 	MARK_ENTRY(CODA_READDIR_STATS);
1554 
1555 	CODADEBUG(CODA_READDIR, myprintf(("%s: (%p, %lu, %lld)\n", __func__,
1556 	    uiop->uio_iov->iov_base, (unsigned long) uiop->uio_resid,
1557 	    (long long) uiop->uio_offset)); )
1558 
1559 	/* Check for readdir of control object. */
1560 	if (IS_CTL_VP(vp)) {
1561 		MARK_INT_FAIL(CODA_READDIR_STATS);
1562 		return ENOENT;
1563 	}
1564 
1565 	/* If directory is not already open do an "internal open" on it. */
1566 	if (cp->c_ovp == NULL) {
1567 		opened_internally = 1;
1568 		MARK_INT_GEN(CODA_OPEN_STATS);
1569 		error = VOP_OPEN(vp, FREAD, cred);
1570 #ifdef	CODA_VERBOSE
1571 		printf("%s: Internally Opening %p\n", __func__, vp);
1572 #endif
1573 		if (error)
1574 			return error;
1575 		KASSERT(cp->c_ovp != NULL);
1576 	}
1577 	cvp = cp->c_ovp;
1578 
1579 	CODADEBUG(CODA_READDIR, myprintf(("%s: fid = %s, refcnt = %d\n",
1580 	    __func__, coda_f2s(&cp->c_fid), vrefcnt(cvp))); )
1581 
1582 	if (ap->a_ncookies) {
1583 		ncookies = ap->a_uio->uio_resid / _DIRENT_RECLEN(dirp, 1);
1584 		*ap->a_ncookies = 0;
1585 		*ap->a_cookies = malloc(ncookies * sizeof (off_t),
1586 		    M_TEMP, M_WAITOK);
1587 	}
1588 	buf = kmem_alloc(CODA_DIRBLKSIZ, KM_SLEEP);
1589 	dirp = kmem_alloc(sizeof(*dirp), KM_SLEEP);
1590 	vn_lock(cvp, LK_EXCLUSIVE | LK_RETRY);
1591 
1592 	while (error == 0) {
1593 		size_t resid = 0;
1594 		char *dp, *ep;
1595 
1596 		if (!ALIGNED_POINTER(uiop->uio_offset, uint32_t)) {
1597 			error = EINVAL;
1598 			break;
1599 		}
1600 		error = vn_rdwr(UIO_READ, cvp, buf,
1601 		    CODA_DIRBLKSIZ, uiop->uio_offset,
1602 		    UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, curlwp);
1603 		if (error || resid == CODA_DIRBLKSIZ)
1604 			break;
1605 		for (dp = buf, ep = dp + CODA_DIRBLKSIZ - resid; dp < ep; ) {
1606 			off_t off;
1607 			struct venus_dirent *vd = (struct venus_dirent *)dp;
1608 
1609 			if (!ALIGNED_POINTER(vd, uint32_t) ||
1610 			    !ALIGNED_POINTER(vd->d_reclen, uint32_t) ||
1611 			    vd->d_reclen == 0) {
1612 				error = EINVAL;
1613 				break;
1614 			}
1615 			if (dp + vd->d_reclen > ep) {
1616 				error = ENAMETOOLONG;
1617 				break;
1618 			}
1619 			if (vd->d_namlen == 0) {
1620 				uiop->uio_offset += vd->d_reclen;
1621 				dp += vd->d_reclen;
1622 				continue;
1623 			}
1624 
1625 			dirp->d_fileno = vd->d_fileno;
1626 			dirp->d_type = vd->d_type;
1627 			dirp->d_namlen = vd->d_namlen;
1628 			dirp->d_reclen = _DIRENT_SIZE(dirp);
1629 			strlcpy(dirp->d_name, vd->d_name, dirp->d_namlen + 1);
1630 
1631 			if (uiop->uio_resid < dirp->d_reclen) {
1632 				error = ENAMETOOLONG;
1633 				break;
1634 			}
1635 
1636 			off = uiop->uio_offset;
1637 			error = uiomove(dirp, dirp->d_reclen, uiop);
1638 			uiop->uio_offset = off;
1639 			if (error)
1640 				break;
1641 
1642 			uiop->uio_offset += vd->d_reclen;
1643 			dp += vd->d_reclen;
1644 			if (ap->a_ncookies)
1645 				(*ap->a_cookies)[(*ap->a_ncookies)++] =
1646 				    uiop->uio_offset;
1647 		}
1648 	}
1649 
1650 	VOP_UNLOCK(cvp);
1651 	kmem_free(dirp, sizeof(*dirp));
1652 	kmem_free(buf, CODA_DIRBLKSIZ);
1653 	if (eofflag && error == 0)
1654 		*eofflag = 1;
1655 	if (uiop->uio_resid < initial_resid && error == ENAMETOOLONG)
1656 		error = 0;
1657 	if (ap->a_ncookies && error) {
1658 		free(*ap->a_cookies, M_TEMP);
1659 		*ap->a_ncookies = 0;
1660 		*ap->a_cookies = NULL;
1661 	}
1662 	if (error)
1663 		MARK_INT_FAIL(CODA_READDIR_STATS);
1664 	else
1665 		MARK_INT_SAT(CODA_READDIR_STATS);
1666 
1667 	/* Do an "internal close" if necessary. */
1668 	if (opened_internally) {
1669 		MARK_INT_GEN(CODA_CLOSE_STATS);
1670 		(void)VOP_CLOSE(vp, FREAD, cred);
1671 	}
1672 
1673 	return error;
1674 }
1675 
1676 /*
1677  * Convert from file system blocks to device blocks
1678  */
1679 int
coda_bmap(void * v)1680 coda_bmap(void *v)
1681 {
1682     /* XXX on the global proc */
1683 /* true args */
1684     struct vop_bmap_args *ap = v;
1685     vnode_t *vp __unused = ap->a_vp;	/* file's vnode */
1686     daddr_t bn __unused = ap->a_bn;	/* fs block number */
1687     vnode_t **vpp = ap->a_vpp;			/* RETURN vp of device */
1688     daddr_t *bnp __unused = ap->a_bnp;	/* RETURN device block number */
1689     struct lwp *l __unused = curlwp;
1690 /* upcall decl */
1691 /* locals */
1692 
1693 	*vpp = (vnode_t *)0;
1694 	myprintf(("coda_bmap called!\n"));
1695 	return(EINVAL);
1696 }
1697 
1698 /*
1699  * I don't think the following two things are used anywhere, so I've
1700  * commented them out
1701  *
1702  * struct buf *async_bufhead;
1703  * int async_daemon_count;
1704  */
1705 int
coda_strategy(void * v)1706 coda_strategy(void *v)
1707 {
1708 /* true args */
1709     struct vop_strategy_args *ap = v;
1710     struct buf *bp __unused = ap->a_bp;
1711     struct lwp *l __unused = curlwp;
1712 /* upcall decl */
1713 /* locals */
1714 
1715 	myprintf(("coda_strategy called!  "));
1716 	return(EINVAL);
1717 }
1718 
1719 int
coda_reclaim(void * v)1720 coda_reclaim(void *v)
1721 {
1722 /* true args */
1723     struct vop_reclaim_v2_args *ap = v;
1724     vnode_t *vp = ap->a_vp;
1725     struct cnode *cp = VTOC(vp);
1726 /* upcall decl */
1727 /* locals */
1728 
1729     VOP_UNLOCK(vp);
1730 
1731 /*
1732  * Forced unmount/flush will let vnodes with non zero use be destroyed!
1733  */
1734     ENTRY;
1735 
1736     if (IS_UNMOUNTING(cp)) {
1737 #ifdef	DEBUG
1738 	if (VTOC(vp)->c_ovp) {
1739 	    if (IS_UNMOUNTING(cp))
1740 		printf("%s: c_ovp not void: vp %p, cp %p\n", __func__, vp, cp);
1741 	}
1742 #endif
1743     } else {
1744 #ifdef OLD_DIAGNOSTIC
1745 	if (vrefcnt(vp) != 0)
1746 	    print("%s: pushing active %p\n", __func__, vp);
1747 	if (VTOC(vp)->c_ovp) {
1748 	    panic("%s: c_ovp not void", __func__);
1749 	}
1750 #endif
1751     }
1752     /* If an array has been allocated to hold the symlink, deallocate it */
1753     if ((coda_symlink_cache) && (VALID_SYMLINK(cp))) {
1754 	if (cp->c_symlink == NULL)
1755 	    panic("%s: null symlink pointer in cnode", __func__);
1756 
1757 	CODA_FREE(cp->c_symlink, cp->c_symlen);
1758 	cp->c_flags &= ~C_SYMLINK;
1759 	cp->c_symlen = 0;
1760     }
1761 
1762     mutex_enter(vp->v_interlock);
1763     mutex_enter(&cp->c_lock);
1764     SET_VTOC(vp) = NULL;
1765     mutex_exit(&cp->c_lock);
1766     mutex_exit(vp->v_interlock);
1767     mutex_destroy(&cp->c_lock);
1768     kmem_free(cp, sizeof(*cp));
1769 
1770     return (0);
1771 }
1772 
1773 int
coda_lock(void * v)1774 coda_lock(void *v)
1775 {
1776 /* true args */
1777     struct vop_lock_args *ap = v;
1778     vnode_t *vp = ap->a_vp;
1779     struct cnode *cp = VTOC(vp);
1780 /* upcall decl */
1781 /* locals */
1782 
1783     ENTRY;
1784 
1785     if (coda_lockdebug) {
1786 	myprintf(("Attempting lock on %s\n",
1787 		  coda_f2s(&cp->c_fid)));
1788     }
1789 
1790     return genfs_lock(v);
1791 }
1792 
1793 int
coda_unlock(void * v)1794 coda_unlock(void *v)
1795 {
1796 /* true args */
1797     struct vop_unlock_args *ap = v;
1798     vnode_t *vp = ap->a_vp;
1799     struct cnode *cp = VTOC(vp);
1800 /* upcall decl */
1801 /* locals */
1802 
1803     ENTRY;
1804     if (coda_lockdebug) {
1805 	myprintf(("Attempting unlock on %s\n",
1806 		  coda_f2s(&cp->c_fid)));
1807     }
1808 
1809     return genfs_unlock(v);
1810 }
1811 
1812 int
coda_islocked(void * v)1813 coda_islocked(void *v)
1814 {
1815 /* true args */
1816     ENTRY;
1817 
1818     return genfs_islocked(v);
1819 }
1820 
1821 int
coda_pathconf(void * v)1822 coda_pathconf(void *v)
1823 {
1824 	struct vop_pathconf_args *ap = v;
1825 
1826 	switch (ap->a_name) {
1827 	default:
1828 		return EINVAL;
1829 	}
1830 	/* NOTREACHED */
1831 }
1832 
1833 /*
1834  * Given a device and inode, obtain a locked vnode.  One reference is
1835  * obtained and passed back to the caller.
1836  */
1837 int
coda_grab_vnode(vnode_t * uvp,dev_t dev,ino_t ino,vnode_t ** vpp)1838 coda_grab_vnode(vnode_t *uvp, dev_t dev, ino_t ino, vnode_t **vpp)
1839 {
1840     int           error;
1841     struct mount *mp;
1842 
1843     /* Obtain mount point structure from device. */
1844     if (!(mp = devtomp(dev))) {
1845 	myprintf(("%s: devtomp(0x%llx) returns NULL\n", __func__,
1846 	    (unsigned long long)dev));
1847 	return(ENXIO);
1848     }
1849 
1850     /*
1851      * Obtain vnode from mount point and inode.
1852      */
1853     error = VFS_VGET(mp, ino, LK_EXCLUSIVE, vpp);
1854     if (error) {
1855 	myprintf(("%s: iget/vget(0x%llx, %llu) returns %p, err %d\n", __func__,
1856 	    (unsigned long long)dev, (unsigned long long)ino, *vpp, error));
1857 	return(ENOENT);
1858     }
1859     /* share the underlying vnode lock with the coda vnode */
1860     vshareilock(*vpp, uvp);
1861     KASSERT(VOP_ISLOCKED(*vpp));
1862     return(0);
1863 }
1864 
1865 static void
coda_print_vattr(struct vattr * attr)1866 coda_print_vattr(struct vattr *attr)
1867 {
1868     const char *typestr;
1869 
1870     switch (attr->va_type) {
1871     case VNON:
1872 	typestr = "VNON";
1873 	break;
1874     case VREG:
1875 	typestr = "VREG";
1876 	break;
1877     case VDIR:
1878 	typestr = "VDIR";
1879 	break;
1880     case VBLK:
1881 	typestr = "VBLK";
1882 	break;
1883     case VCHR:
1884 	typestr = "VCHR";
1885 	break;
1886     case VLNK:
1887 	typestr = "VLNK";
1888 	break;
1889     case VSOCK:
1890 	typestr = "VSCK";
1891 	break;
1892     case VFIFO:
1893 	typestr = "VFFO";
1894 	break;
1895     case VBAD:
1896 	typestr = "VBAD";
1897 	break;
1898     default:
1899 	typestr = "????";
1900 	break;
1901     }
1902 
1903 
1904     myprintf(("attr: type %s mode %d uid %d gid %d fsid %d rdev %d\n",
1905 	      typestr, (int)attr->va_mode, (int)attr->va_uid,
1906 	      (int)attr->va_gid, (int)attr->va_fsid, (int)attr->va_rdev));
1907 
1908     myprintf(("      fileid %d nlink %d size %d blocksize %d bytes %d\n",
1909 	      (int)attr->va_fileid, (int)attr->va_nlink,
1910 	      (int)attr->va_size,
1911 	      (int)attr->va_blocksize,(int)attr->va_bytes));
1912     myprintf(("      gen %ld flags %ld vaflags %d\n",
1913 	      attr->va_gen, attr->va_flags, attr->va_vaflags));
1914     myprintf(("      atime sec %d nsec %d\n",
1915 	      (int)attr->va_atime.tv_sec, (int)attr->va_atime.tv_nsec));
1916     myprintf(("      mtime sec %d nsec %d\n",
1917 	      (int)attr->va_mtime.tv_sec, (int)attr->va_mtime.tv_nsec));
1918     myprintf(("      ctime sec %d nsec %d\n",
1919 	      (int)attr->va_ctime.tv_sec, (int)attr->va_ctime.tv_nsec));
1920 }
1921 
1922 /*
1923  * Return a vnode for the given fid.
1924  * If no cnode exists for this fid create one and put it
1925  * in a table hashed by coda_f2i().  If the cnode for
1926  * this fid is already in the table return it (ref count is
1927  * incremented by coda_find.  The cnode will be flushed from the
1928  * table when coda_inactive calls coda_unsave.
1929  */
1930 struct cnode *
make_coda_node(CodaFid * fid,struct mount * fvsp,short type)1931 make_coda_node(CodaFid *fid, struct mount *fvsp, short type)
1932 {
1933 	int error __diagused;
1934 	struct vnode *vp;
1935 	struct cnode *cp;
1936 
1937 	error = vcache_get(fvsp, fid, sizeof(CodaFid), &vp);
1938 	KASSERT(error == 0);
1939 
1940 	mutex_enter(vp->v_interlock);
1941 	cp = VTOC(vp);
1942 	KASSERT(cp != NULL);
1943 	mutex_enter(&cp->c_lock);
1944 	mutex_exit(vp->v_interlock);
1945 
1946 	if (vp->v_type != type) {
1947 		if (vp->v_type == VCHR || vp->v_type == VBLK)
1948 			spec_node_destroy(vp);
1949 		vp->v_type = type;
1950 		if (type == VCHR || type == VBLK)
1951 			spec_node_init(vp, NODEV);
1952 		uvm_vnp_setsize(vp, 0);
1953 	}
1954 	mutex_exit(&cp->c_lock);
1955 
1956 	return cp;
1957 }
1958 
1959 /*
1960  * coda_getpages may be called on a vnode which has not been opened,
1961  * e.g. to fault in pages to execute a program.  In that case, we must
1962  * open the file to get the container.  The vnode may or may not be
1963  * locked, and we must leave it in the same state.
1964  */
1965 int
coda_getpages(void * v)1966 coda_getpages(void *v)
1967 {
1968 	struct vop_getpages_args /* {
1969 		vnode_t *a_vp;
1970 		voff_t a_offset;
1971 		struct vm_page **a_m;
1972 		int *a_count;
1973 		int a_centeridx;
1974 		vm_prot_t a_access_type;
1975 		int a_advice;
1976 		int a_flags;
1977 	} */ *ap = v;
1978 	vnode_t *vp = ap->a_vp, *cvp;
1979 	struct cnode *cp = VTOC(vp);
1980 	struct lwp *l = curlwp;
1981 	kauth_cred_t cred = l->l_cred;
1982 	int error, cerror;
1983 	int waslocked;	       /* 1 if vnode lock was held on entry */
1984 	int didopen = 0;	/* 1 if we opened container file */
1985 	krw_t op;
1986 
1987 	/*
1988 	 * Handle a case that uvm_fault doesn't quite use yet.
1989 	 * See layer_vnops.c. for inspiration.
1990 	 */
1991 	if (ap->a_flags & PGO_LOCKED) {
1992 		return EBUSY;
1993 	}
1994 
1995 	KASSERT(rw_lock_held(vp->v_uobj.vmobjlock));
1996 
1997 	/* Check for control object. */
1998 	if (IS_CTL_VP(vp)) {
1999 #ifdef CODA_VERBOSE
2000 		printf("%s: control object %p\n", __func__, vp);
2001 #endif
2002 		return(EINVAL);
2003 	}
2004 
2005 	/*
2006 	 * XXX It's really not ok to be releasing the lock we get,
2007 	 * because we could be overlapping with another call to
2008 	 * getpages and drop a lock they are relying on.  We need to
2009 	 * figure out whether getpages ever is called holding the
2010 	 * lock, and if we should serialize getpages calls by some
2011 	 * mechanism.
2012 	 */
2013 	/* XXX VOP_ISLOCKED() may not be used for lock decisions. */
2014 	op = rw_lock_op(vp->v_uobj.vmobjlock);
2015 	waslocked = VOP_ISLOCKED(vp);
2016 
2017 	/* Get container file if not already present. */
2018 	cvp = cp->c_ovp;
2019 	if (cvp == NULL) {
2020 		/*
2021 		 * VOP_OPEN requires a locked vnode.  We must avoid
2022 		 * locking the vnode if it is already locked, and
2023 		 * leave it in the same state on exit.
2024 		 */
2025 		if (waslocked == 0) {
2026 			rw_exit(vp->v_uobj.vmobjlock);
2027 			cerror = vn_lock(vp, LK_EXCLUSIVE);
2028 			if (cerror) {
2029 #ifdef CODA_VERBOSE
2030 				printf("%s: can't lock vnode %p\n",
2031 				    __func__, vp);
2032 #endif
2033 				return cerror;
2034 			}
2035 #ifdef CODA_VERBOSE
2036 			printf("%s: locked vnode %p\n", __func__, vp);
2037 #endif
2038 		}
2039 
2040 		/*
2041 		 * Open file (causes upcall to venus).
2042 		 * XXX Perhaps we should not fully open the file, but
2043 		 * simply obtain a container file.
2044 		 */
2045 		/* XXX Is it ok to do this while holding the mutex? */
2046 		cerror = VOP_OPEN(vp, FREAD, cred);
2047 
2048 		if (cerror) {
2049 #ifdef CODA_VERBOSE
2050 			printf("%s: cannot open vnode %p => %d\n", __func__,
2051 			    vp, cerror);
2052 #endif
2053 			if (waslocked == 0)
2054 				VOP_UNLOCK(vp);
2055 			return cerror;
2056 		}
2057 
2058 #ifdef CODA_VERBOSE
2059 		printf("%s: opened vnode %p\n", __func__, vp);
2060 #endif
2061 		cvp = cp->c_ovp;
2062 		didopen = 1;
2063 		if (waslocked == 0)
2064 			rw_enter(vp->v_uobj.vmobjlock, op);
2065 	}
2066 	KASSERT(cvp != NULL);
2067 
2068 	/* Munge the arg structure to refer to the container vnode. */
2069 	KASSERT(cvp->v_uobj.vmobjlock == vp->v_uobj.vmobjlock);
2070 	ap->a_vp = cp->c_ovp;
2071 
2072 	/* Finally, call getpages on it. */
2073 	error = VCALL(ap->a_vp, VOFFSET(vop_getpages), ap);
2074 
2075 	/* If we opened the vnode, we must close it. */
2076 	if (didopen) {
2077 		/*
2078 		 * VOP_CLOSE requires a locked vnode, but we are still
2079 		 * holding the lock (or riding a caller's lock).
2080 		 */
2081 		cerror = VOP_CLOSE(vp, FREAD, cred);
2082 #ifdef CODA_VERBOSE
2083 		if (cerror != 0)
2084 			/* XXX How should we handle this? */
2085 			printf("%s: closed vnode %p -> %d\n", __func__,
2086 			    vp, cerror);
2087 #endif
2088 
2089 		/* If we obtained a lock, drop it. */
2090 		if (waslocked == 0)
2091 			VOP_UNLOCK(vp);
2092 	}
2093 
2094 	return error;
2095 }
2096 
2097 /*
2098  * The protocol requires v_interlock to be held by the caller.
2099  */
2100 int
coda_putpages(void * v)2101 coda_putpages(void *v)
2102 {
2103 	struct vop_putpages_args /* {
2104 		vnode_t *a_vp;
2105 		voff_t a_offlo;
2106 		voff_t a_offhi;
2107 		int a_flags;
2108 	} */ *ap = v;
2109 	vnode_t *vp = ap->a_vp, *cvp;
2110 	struct cnode *cp = VTOC(vp);
2111 	int error;
2112 
2113 	KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
2114 
2115 	/* Check for control object. */
2116 	if (IS_CTL_VP(vp)) {
2117 		rw_exit(vp->v_uobj.vmobjlock);
2118 #ifdef CODA_VERBOSE
2119 		printf("%s: control object %p\n", __func__, vp);
2120 #endif
2121 		return 0;
2122 	}
2123 
2124 	/*
2125 	 * If container object is not present, then there are no pages
2126 	 * to put; just return without error.  This happens all the
2127 	 * time, apparently during discard of a closed vnode (which
2128 	 * trivially can't have dirty pages).
2129 	 */
2130 	cvp = cp->c_ovp;
2131 	if (cvp == NULL) {
2132 		rw_exit(vp->v_uobj.vmobjlock);
2133 		return 0;
2134 	}
2135 
2136 	/* Munge the arg structure to refer to the container vnode. */
2137 	KASSERT(cvp->v_uobj.vmobjlock == vp->v_uobj.vmobjlock);
2138 	ap->a_vp = cvp;
2139 
2140 	/* Finally, call putpages on it. */
2141 	error = VCALL(ap->a_vp, VOFFSET(vop_putpages), ap);
2142 
2143 	return error;
2144 }
2145