xref: /illumos-gate/usr/src/uts/common/fs/nfs/nfs_client.c (revision da6c28aa)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
55dae4443Sdh145677  * Common Development and Distribution License (the "License").
65dae4443Sdh145677  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22*da6c28aaSamw  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  *
257c478bd9Sstevel@tonic-gate  *  	Copyright (c) 1983,1984,1985,1986,1987,1988,1989  AT&T.
267c478bd9Sstevel@tonic-gate  *	All rights reserved.
277c478bd9Sstevel@tonic-gate  */
287c478bd9Sstevel@tonic-gate 
297c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
307c478bd9Sstevel@tonic-gate 
317c478bd9Sstevel@tonic-gate #include <sys/param.h>
327c478bd9Sstevel@tonic-gate #include <sys/types.h>
337c478bd9Sstevel@tonic-gate #include <sys/systm.h>
347c478bd9Sstevel@tonic-gate #include <sys/thread.h>
357c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
367c478bd9Sstevel@tonic-gate #include <sys/time.h>
377c478bd9Sstevel@tonic-gate #include <sys/vnode.h>
387c478bd9Sstevel@tonic-gate #include <sys/vfs.h>
397c478bd9Sstevel@tonic-gate #include <sys/errno.h>
407c478bd9Sstevel@tonic-gate #include <sys/buf.h>
417c478bd9Sstevel@tonic-gate #include <sys/stat.h>
427c478bd9Sstevel@tonic-gate #include <sys/cred.h>
437c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
447c478bd9Sstevel@tonic-gate #include <sys/debug.h>
457c478bd9Sstevel@tonic-gate #include <sys/dnlc.h>
467c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h>
477c478bd9Sstevel@tonic-gate #include <sys/flock.h>
487c478bd9Sstevel@tonic-gate #include <sys/share.h>
497c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
507c478bd9Sstevel@tonic-gate #include <sys/tiuser.h>
517c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
527c478bd9Sstevel@tonic-gate #include <sys/callb.h>
537c478bd9Sstevel@tonic-gate #include <sys/acl.h>
547c478bd9Sstevel@tonic-gate #include <sys/kstat.h>
557c478bd9Sstevel@tonic-gate #include <sys/signal.h>
567c478bd9Sstevel@tonic-gate #include <sys/list.h>
577c478bd9Sstevel@tonic-gate #include <sys/zone.h>
587c478bd9Sstevel@tonic-gate 
597c478bd9Sstevel@tonic-gate #include <rpc/types.h>
607c478bd9Sstevel@tonic-gate #include <rpc/xdr.h>
617c478bd9Sstevel@tonic-gate #include <rpc/auth.h>
627c478bd9Sstevel@tonic-gate #include <rpc/clnt.h>
637c478bd9Sstevel@tonic-gate 
647c478bd9Sstevel@tonic-gate #include <nfs/nfs.h>
657c478bd9Sstevel@tonic-gate #include <nfs/nfs_clnt.h>
667c478bd9Sstevel@tonic-gate 
677c478bd9Sstevel@tonic-gate #include <nfs/rnode.h>
687c478bd9Sstevel@tonic-gate #include <nfs/nfs_acl.h>
697c478bd9Sstevel@tonic-gate #include <nfs/lm.h>
707c478bd9Sstevel@tonic-gate 
717c478bd9Sstevel@tonic-gate #include <vm/hat.h>
727c478bd9Sstevel@tonic-gate #include <vm/as.h>
737c478bd9Sstevel@tonic-gate #include <vm/page.h>
747c478bd9Sstevel@tonic-gate #include <vm/pvn.h>
757c478bd9Sstevel@tonic-gate #include <vm/seg.h>
767c478bd9Sstevel@tonic-gate #include <vm/seg_map.h>
777c478bd9Sstevel@tonic-gate #include <vm/seg_vn.h>
787c478bd9Sstevel@tonic-gate 
797c478bd9Sstevel@tonic-gate static void	nfs3_attr_cache(vnode_t *, vattr_t *, vattr_t *, hrtime_t,
807c478bd9Sstevel@tonic-gate 			cred_t *);
817c478bd9Sstevel@tonic-gate static int	nfs_getattr_cache(vnode_t *, struct vattr *);
827c478bd9Sstevel@tonic-gate static int	nfs_remove_locking_id(vnode_t *, int, char *, char *, int *);
837c478bd9Sstevel@tonic-gate 
847c478bd9Sstevel@tonic-gate struct mi_globals {
857c478bd9Sstevel@tonic-gate 	kmutex_t	mig_lock;  /* lock protecting mig_list */
867c478bd9Sstevel@tonic-gate 	list_t		mig_list;  /* list of NFS v2 or v3 mounts in zone */
877c478bd9Sstevel@tonic-gate 	boolean_t	mig_destructor_called;
887c478bd9Sstevel@tonic-gate };
897c478bd9Sstevel@tonic-gate 
907c478bd9Sstevel@tonic-gate static zone_key_t mi_list_key;
917c478bd9Sstevel@tonic-gate 
927c478bd9Sstevel@tonic-gate /* Debugging flag for PC file shares. */
937c478bd9Sstevel@tonic-gate extern int	share_debug;
947c478bd9Sstevel@tonic-gate 
957c478bd9Sstevel@tonic-gate /*
967c478bd9Sstevel@tonic-gate  * Attributes caching:
977c478bd9Sstevel@tonic-gate  *
987c478bd9Sstevel@tonic-gate  * Attributes are cached in the rnode in struct vattr form.
997c478bd9Sstevel@tonic-gate  * There is a time associated with the cached attributes (r_attrtime)
1007c478bd9Sstevel@tonic-gate  * which tells whether the attributes are valid. The time is initialized
1017c478bd9Sstevel@tonic-gate  * to the difference between current time and the modify time of the vnode
1027c478bd9Sstevel@tonic-gate  * when new attributes are cached. This allows the attributes for
1037c478bd9Sstevel@tonic-gate  * files that have changed recently to be timed out sooner than for files
1047c478bd9Sstevel@tonic-gate  * that have not changed for a long time. There are minimum and maximum
1057c478bd9Sstevel@tonic-gate  * timeout values that can be set per mount point.
1067c478bd9Sstevel@tonic-gate  */
1077c478bd9Sstevel@tonic-gate 
1087c478bd9Sstevel@tonic-gate int
1097c478bd9Sstevel@tonic-gate nfs_waitfor_purge_complete(vnode_t *vp)
1107c478bd9Sstevel@tonic-gate {
1117c478bd9Sstevel@tonic-gate 	rnode_t *rp;
1127c478bd9Sstevel@tonic-gate 	k_sigset_t smask;
1137c478bd9Sstevel@tonic-gate 
1147c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
1157c478bd9Sstevel@tonic-gate 	if (rp->r_serial != NULL && rp->r_serial != curthread) {
1167c478bd9Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
1177c478bd9Sstevel@tonic-gate 		sigintr(&smask, VTOMI(vp)->mi_flags & MI_INT);
1187c478bd9Sstevel@tonic-gate 		while (rp->r_serial != NULL) {
1197c478bd9Sstevel@tonic-gate 			if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
1207c478bd9Sstevel@tonic-gate 				sigunintr(&smask);
1217c478bd9Sstevel@tonic-gate 				mutex_exit(&rp->r_statelock);
1227c478bd9Sstevel@tonic-gate 				return (EINTR);
1237c478bd9Sstevel@tonic-gate 			}
1247c478bd9Sstevel@tonic-gate 		}
1257c478bd9Sstevel@tonic-gate 		sigunintr(&smask);
1267c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
1277c478bd9Sstevel@tonic-gate 	}
1287c478bd9Sstevel@tonic-gate 	return (0);
1297c478bd9Sstevel@tonic-gate }
1307c478bd9Sstevel@tonic-gate 
1317c478bd9Sstevel@tonic-gate /*
1327c478bd9Sstevel@tonic-gate  * Validate caches by checking cached attributes. If the cached
1337c478bd9Sstevel@tonic-gate  * attributes have timed out, then get new attributes from the server.
1347c478bd9Sstevel@tonic-gate  * As a side affect, this will do cache invalidation if the attributes
1357c478bd9Sstevel@tonic-gate  * have changed.
1367c478bd9Sstevel@tonic-gate  *
1377c478bd9Sstevel@tonic-gate  * If the attributes have not timed out and if there is a cache
1387c478bd9Sstevel@tonic-gate  * invalidation being done by some other thread, then wait until that
1397c478bd9Sstevel@tonic-gate  * thread has completed the cache invalidation.
1407c478bd9Sstevel@tonic-gate  */
1417c478bd9Sstevel@tonic-gate int
1427c478bd9Sstevel@tonic-gate nfs_validate_caches(vnode_t *vp, cred_t *cr)
1437c478bd9Sstevel@tonic-gate {
1447c478bd9Sstevel@tonic-gate 	int error;
1457c478bd9Sstevel@tonic-gate 	struct vattr va;
1467c478bd9Sstevel@tonic-gate 
1477c478bd9Sstevel@tonic-gate 	if (ATTRCACHE_VALID(vp)) {
1487c478bd9Sstevel@tonic-gate 		error = nfs_waitfor_purge_complete(vp);
1497c478bd9Sstevel@tonic-gate 		if (error)
1507c478bd9Sstevel@tonic-gate 			return (error);
1517c478bd9Sstevel@tonic-gate 		return (0);
1527c478bd9Sstevel@tonic-gate 	}
1537c478bd9Sstevel@tonic-gate 
1547c478bd9Sstevel@tonic-gate 	va.va_mask = AT_ALL;
1557c478bd9Sstevel@tonic-gate 	return (nfs_getattr_otw(vp, &va, cr));
1567c478bd9Sstevel@tonic-gate }
1577c478bd9Sstevel@tonic-gate 
1587c478bd9Sstevel@tonic-gate /*
1597c478bd9Sstevel@tonic-gate  * Validate caches by checking cached attributes. If the cached
1607c478bd9Sstevel@tonic-gate  * attributes have timed out, then get new attributes from the server.
1617c478bd9Sstevel@tonic-gate  * As a side affect, this will do cache invalidation if the attributes
1627c478bd9Sstevel@tonic-gate  * have changed.
1637c478bd9Sstevel@tonic-gate  *
1647c478bd9Sstevel@tonic-gate  * If the attributes have not timed out and if there is a cache
1657c478bd9Sstevel@tonic-gate  * invalidation being done by some other thread, then wait until that
1667c478bd9Sstevel@tonic-gate  * thread has completed the cache invalidation.
1677c478bd9Sstevel@tonic-gate  */
1687c478bd9Sstevel@tonic-gate int
1697c478bd9Sstevel@tonic-gate nfs3_validate_caches(vnode_t *vp, cred_t *cr)
1707c478bd9Sstevel@tonic-gate {
1717c478bd9Sstevel@tonic-gate 	int error;
1727c478bd9Sstevel@tonic-gate 	struct vattr va;
1737c478bd9Sstevel@tonic-gate 
1747c478bd9Sstevel@tonic-gate 	if (ATTRCACHE_VALID(vp)) {
1757c478bd9Sstevel@tonic-gate 		error = nfs_waitfor_purge_complete(vp);
1767c478bd9Sstevel@tonic-gate 		if (error)
1777c478bd9Sstevel@tonic-gate 			return (error);
1787c478bd9Sstevel@tonic-gate 		return (0);
1797c478bd9Sstevel@tonic-gate 	}
1807c478bd9Sstevel@tonic-gate 
1817c478bd9Sstevel@tonic-gate 	va.va_mask = AT_ALL;
1827c478bd9Sstevel@tonic-gate 	return (nfs3_getattr_otw(vp, &va, cr));
1837c478bd9Sstevel@tonic-gate }
1847c478bd9Sstevel@tonic-gate 
1857c478bd9Sstevel@tonic-gate /*
1867c478bd9Sstevel@tonic-gate  * Purge all of the various NFS `data' caches.
1877c478bd9Sstevel@tonic-gate  */
1887c478bd9Sstevel@tonic-gate void
1897c478bd9Sstevel@tonic-gate nfs_purge_caches(vnode_t *vp, int purge_dnlc, cred_t *cr)
1907c478bd9Sstevel@tonic-gate {
1917c478bd9Sstevel@tonic-gate 	rnode_t *rp;
1927c478bd9Sstevel@tonic-gate 	char *contents;
1937c478bd9Sstevel@tonic-gate 	int size;
1947c478bd9Sstevel@tonic-gate 	int error;
1957c478bd9Sstevel@tonic-gate 
1967c478bd9Sstevel@tonic-gate 	/*
1977c478bd9Sstevel@tonic-gate 	 * Purge the DNLC for any entries which refer to this file.
1987c478bd9Sstevel@tonic-gate 	 * Avoid recursive entry into dnlc_purge_vp() in case of a directory.
1997c478bd9Sstevel@tonic-gate 	 */
2007c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
2017c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
2027c478bd9Sstevel@tonic-gate 	if (vp->v_count > 1 &&
2037c478bd9Sstevel@tonic-gate 	    (vp->v_type == VDIR || purge_dnlc == NFS_PURGE_DNLC) &&
2047c478bd9Sstevel@tonic-gate 	    !(rp->r_flags & RINDNLCPURGE)) {
2057c478bd9Sstevel@tonic-gate 		/*
2067c478bd9Sstevel@tonic-gate 		 * Set the RINDNLCPURGE flag to prevent recursive entry
2077c478bd9Sstevel@tonic-gate 		 * into dnlc_purge_vp()
2087c478bd9Sstevel@tonic-gate 		 */
2097c478bd9Sstevel@tonic-gate 		if (vp->v_type == VDIR)
2107c478bd9Sstevel@tonic-gate 			rp->r_flags |= RINDNLCPURGE;
2117c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
2127c478bd9Sstevel@tonic-gate 		dnlc_purge_vp(vp);
2137c478bd9Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
2147c478bd9Sstevel@tonic-gate 		if (rp->r_flags & RINDNLCPURGE)
2157c478bd9Sstevel@tonic-gate 			rp->r_flags &= ~RINDNLCPURGE;
2167c478bd9Sstevel@tonic-gate 	}
2177c478bd9Sstevel@tonic-gate 
2187c478bd9Sstevel@tonic-gate 	/*
2197c478bd9Sstevel@tonic-gate 	 * Clear any readdir state bits and purge the readlink response cache.
2207c478bd9Sstevel@tonic-gate 	 */
2217c478bd9Sstevel@tonic-gate 	contents = rp->r_symlink.contents;
2227c478bd9Sstevel@tonic-gate 	size = rp->r_symlink.size;
2237c478bd9Sstevel@tonic-gate 	rp->r_symlink.contents = NULL;
2247c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
2257c478bd9Sstevel@tonic-gate 
2267c478bd9Sstevel@tonic-gate 	if (contents != NULL) {
2277c478bd9Sstevel@tonic-gate 
2287c478bd9Sstevel@tonic-gate 		kmem_free((void *)contents, size);
2297c478bd9Sstevel@tonic-gate 	}
2307c478bd9Sstevel@tonic-gate 
2317c478bd9Sstevel@tonic-gate 	/*
2327c478bd9Sstevel@tonic-gate 	 * Flush the page cache.
2337c478bd9Sstevel@tonic-gate 	 */
2347c478bd9Sstevel@tonic-gate 	if (vn_has_cached_data(vp)) {
235*da6c28aaSamw 		error = VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_INVAL, cr, NULL);
2367c478bd9Sstevel@tonic-gate 		if (error && (error == ENOSPC || error == EDQUOT)) {
2377c478bd9Sstevel@tonic-gate 			mutex_enter(&rp->r_statelock);
2387c478bd9Sstevel@tonic-gate 			if (!rp->r_error)
2397c478bd9Sstevel@tonic-gate 				rp->r_error = error;
2407c478bd9Sstevel@tonic-gate 			mutex_exit(&rp->r_statelock);
2417c478bd9Sstevel@tonic-gate 		}
2427c478bd9Sstevel@tonic-gate 	}
2437c478bd9Sstevel@tonic-gate 
2447c478bd9Sstevel@tonic-gate 	/*
2457c478bd9Sstevel@tonic-gate 	 * Flush the readdir response cache.
2467c478bd9Sstevel@tonic-gate 	 */
2477c478bd9Sstevel@tonic-gate 	if (HAVE_RDDIR_CACHE(rp))
2487c478bd9Sstevel@tonic-gate 		nfs_purge_rddir_cache(vp);
2497c478bd9Sstevel@tonic-gate }
2507c478bd9Sstevel@tonic-gate 
2517c478bd9Sstevel@tonic-gate /*
2527c478bd9Sstevel@tonic-gate  * Purge the readdir cache of all entries
2537c478bd9Sstevel@tonic-gate  */
2547c478bd9Sstevel@tonic-gate void
2557c478bd9Sstevel@tonic-gate nfs_purge_rddir_cache(vnode_t *vp)
2567c478bd9Sstevel@tonic-gate {
2577c478bd9Sstevel@tonic-gate 	rnode_t *rp;
2587c478bd9Sstevel@tonic-gate 	rddir_cache *rdc;
2597c478bd9Sstevel@tonic-gate 	rddir_cache *nrdc;
2607c478bd9Sstevel@tonic-gate 
2617c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
2627c478bd9Sstevel@tonic-gate top:
2637c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
2647c478bd9Sstevel@tonic-gate 	rp->r_direof = NULL;
2657c478bd9Sstevel@tonic-gate 	rp->r_flags &= ~RLOOKUP;
2667c478bd9Sstevel@tonic-gate 	rp->r_flags |= RREADDIRPLUS;
2677c478bd9Sstevel@tonic-gate 	rdc = avl_first(&rp->r_dir);
2687c478bd9Sstevel@tonic-gate 	while (rdc != NULL) {
2697c478bd9Sstevel@tonic-gate 		nrdc = AVL_NEXT(&rp->r_dir, rdc);
2707c478bd9Sstevel@tonic-gate 		avl_remove(&rp->r_dir, rdc);
2717c478bd9Sstevel@tonic-gate 		rddir_cache_rele(rdc);
2727c478bd9Sstevel@tonic-gate 		rdc = nrdc;
2737c478bd9Sstevel@tonic-gate 	}
2747c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
2757c478bd9Sstevel@tonic-gate }
2767c478bd9Sstevel@tonic-gate 
2777c478bd9Sstevel@tonic-gate /*
2787c478bd9Sstevel@tonic-gate  * Do a cache check based on the post-operation attributes.
2797c478bd9Sstevel@tonic-gate  * Then make them the new cached attributes.  If no attributes
2807c478bd9Sstevel@tonic-gate  * were returned, then mark the attributes as timed out.
2817c478bd9Sstevel@tonic-gate  */
2827c478bd9Sstevel@tonic-gate void
2837c478bd9Sstevel@tonic-gate nfs3_cache_post_op_attr(vnode_t *vp, post_op_attr *poap, hrtime_t t, cred_t *cr)
2847c478bd9Sstevel@tonic-gate {
2857c478bd9Sstevel@tonic-gate 	vattr_t attr;
2867c478bd9Sstevel@tonic-gate 
2877c478bd9Sstevel@tonic-gate 	if (!poap->attributes) {
2887c478bd9Sstevel@tonic-gate 		PURGE_ATTRCACHE(vp);
2897c478bd9Sstevel@tonic-gate 		return;
2907c478bd9Sstevel@tonic-gate 	}
2917c478bd9Sstevel@tonic-gate 	(void) nfs3_cache_fattr3(vp, &poap->attr, &attr, t, cr);
2927c478bd9Sstevel@tonic-gate }
2937c478bd9Sstevel@tonic-gate 
2947c478bd9Sstevel@tonic-gate /*
2957c478bd9Sstevel@tonic-gate  * Same as above, but using a vattr
2967c478bd9Sstevel@tonic-gate  */
2977c478bd9Sstevel@tonic-gate void
2987c478bd9Sstevel@tonic-gate nfs3_cache_post_op_vattr(vnode_t *vp, post_op_vattr *poap, hrtime_t t,
2997c478bd9Sstevel@tonic-gate     cred_t *cr)
3007c478bd9Sstevel@tonic-gate {
3017c478bd9Sstevel@tonic-gate 	if (!poap->attributes) {
3027c478bd9Sstevel@tonic-gate 		PURGE_ATTRCACHE(vp);
3037c478bd9Sstevel@tonic-gate 		return;
3047c478bd9Sstevel@tonic-gate 	}
3057c478bd9Sstevel@tonic-gate 	nfs_attr_cache(vp, poap->fres.vap, t, cr);
3067c478bd9Sstevel@tonic-gate }
3077c478bd9Sstevel@tonic-gate 
3087c478bd9Sstevel@tonic-gate /*
3097c478bd9Sstevel@tonic-gate  * Do a cache check based on the weak cache consistency attributes.
3107c478bd9Sstevel@tonic-gate  * These consist of a small set of pre-operation attributes and the
3117c478bd9Sstevel@tonic-gate  * full set of post-operation attributes.
3127c478bd9Sstevel@tonic-gate  *
3137c478bd9Sstevel@tonic-gate  * If we are given the pre-operation attributes, then use them to
3147c478bd9Sstevel@tonic-gate  * check the validity of the various caches.  Then, if we got the
3157c478bd9Sstevel@tonic-gate  * post-operation attributes, make them the new cached attributes.
3167c478bd9Sstevel@tonic-gate  * If we didn't get the post-operation attributes, then mark the
3177c478bd9Sstevel@tonic-gate  * attribute cache as timed out so that the next reference will
3187c478bd9Sstevel@tonic-gate  * cause a GETATTR to the server to refresh with the current
3197c478bd9Sstevel@tonic-gate  * attributes.
3207c478bd9Sstevel@tonic-gate  *
3217c478bd9Sstevel@tonic-gate  * Otherwise, if we didn't get the pre-operation attributes, but
3227c478bd9Sstevel@tonic-gate  * we did get the post-operation attributes, then use these
3237c478bd9Sstevel@tonic-gate  * attributes to check the validity of the various caches.  This
3247c478bd9Sstevel@tonic-gate  * will probably cause a flush of the caches because if the
3257c478bd9Sstevel@tonic-gate  * operation succeeded, the attributes of the object were changed
3267c478bd9Sstevel@tonic-gate  * in some way from the old post-operation attributes.  This
3277c478bd9Sstevel@tonic-gate  * should be okay because it is the safe thing to do.  After
3287c478bd9Sstevel@tonic-gate  * checking the data caches, then we make these the new cached
3297c478bd9Sstevel@tonic-gate  * attributes.
3307c478bd9Sstevel@tonic-gate  *
3317c478bd9Sstevel@tonic-gate  * Otherwise, we didn't get either the pre- or post-operation
3327c478bd9Sstevel@tonic-gate  * attributes.  Simply mark the attribute cache as timed out so
3337c478bd9Sstevel@tonic-gate  * the next reference will cause a GETATTR to the server to
3347c478bd9Sstevel@tonic-gate  * refresh with the current attributes.
3357c478bd9Sstevel@tonic-gate  *
3367c478bd9Sstevel@tonic-gate  * If an error occurred trying to convert the over the wire
3377c478bd9Sstevel@tonic-gate  * attributes to a vattr, then simply mark the attribute cache as
3387c478bd9Sstevel@tonic-gate  * timed out.
3397c478bd9Sstevel@tonic-gate  */
3407c478bd9Sstevel@tonic-gate void
3417c478bd9Sstevel@tonic-gate nfs3_cache_wcc_data(vnode_t *vp, wcc_data *wccp, hrtime_t t, cred_t *cr)
3427c478bd9Sstevel@tonic-gate {
3437c478bd9Sstevel@tonic-gate 	vattr_t bva;
3447c478bd9Sstevel@tonic-gate 	vattr_t ava;
3457c478bd9Sstevel@tonic-gate 
3467c478bd9Sstevel@tonic-gate 	if (wccp->after.attributes) {
3477c478bd9Sstevel@tonic-gate 		if (fattr3_to_vattr(vp, &wccp->after.attr, &ava)) {
3487c478bd9Sstevel@tonic-gate 			PURGE_ATTRCACHE(vp);
3497c478bd9Sstevel@tonic-gate 			return;
3507c478bd9Sstevel@tonic-gate 		}
3517c478bd9Sstevel@tonic-gate 		if (wccp->before.attributes) {
3527c478bd9Sstevel@tonic-gate 			bva.va_ctime.tv_sec = wccp->before.attr.ctime.seconds;
3537c478bd9Sstevel@tonic-gate 			bva.va_ctime.tv_nsec = wccp->before.attr.ctime.nseconds;
3547c478bd9Sstevel@tonic-gate 			bva.va_mtime.tv_sec = wccp->before.attr.mtime.seconds;
3557c478bd9Sstevel@tonic-gate 			bva.va_mtime.tv_nsec = wccp->before.attr.mtime.nseconds;
3567c478bd9Sstevel@tonic-gate 			bva.va_size = wccp->before.attr.size;
3577c478bd9Sstevel@tonic-gate 			nfs3_attr_cache(vp, &bva, &ava, t, cr);
3587c478bd9Sstevel@tonic-gate 		} else
3597c478bd9Sstevel@tonic-gate 			nfs_attr_cache(vp, &ava, t, cr);
3607c478bd9Sstevel@tonic-gate 	} else {
3617c478bd9Sstevel@tonic-gate 		PURGE_ATTRCACHE(vp);
3627c478bd9Sstevel@tonic-gate 	}
3637c478bd9Sstevel@tonic-gate }
3647c478bd9Sstevel@tonic-gate 
3657c478bd9Sstevel@tonic-gate /*
3667c478bd9Sstevel@tonic-gate  * Set attributes cache for given vnode using nfsattr.
3677c478bd9Sstevel@tonic-gate  *
3687c478bd9Sstevel@tonic-gate  * This routine does not do cache validation with the attributes.
3697c478bd9Sstevel@tonic-gate  *
3707c478bd9Sstevel@tonic-gate  * If an error occurred trying to convert the over the wire
3717c478bd9Sstevel@tonic-gate  * attributes to a vattr, then simply mark the attribute cache as
3727c478bd9Sstevel@tonic-gate  * timed out.
3737c478bd9Sstevel@tonic-gate  */
3747c478bd9Sstevel@tonic-gate void
3757c478bd9Sstevel@tonic-gate nfs_attrcache(vnode_t *vp, struct nfsfattr *na, hrtime_t t)
3767c478bd9Sstevel@tonic-gate {
3777c478bd9Sstevel@tonic-gate 	rnode_t *rp;
3787c478bd9Sstevel@tonic-gate 	struct vattr va;
3797c478bd9Sstevel@tonic-gate 
3807c478bd9Sstevel@tonic-gate 	if (!nattr_to_vattr(vp, na, &va)) {
3817c478bd9Sstevel@tonic-gate 		rp = VTOR(vp);
3827c478bd9Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
3837c478bd9Sstevel@tonic-gate 		if (rp->r_mtime <= t)
3847c478bd9Sstevel@tonic-gate 			nfs_attrcache_va(vp, &va);
3857c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
3867c478bd9Sstevel@tonic-gate 	} else {
3877c478bd9Sstevel@tonic-gate 		PURGE_ATTRCACHE(vp);
3887c478bd9Sstevel@tonic-gate 	}
3897c478bd9Sstevel@tonic-gate }
3907c478bd9Sstevel@tonic-gate 
3917c478bd9Sstevel@tonic-gate /*
3927c478bd9Sstevel@tonic-gate  * Set attributes cache for given vnode using fattr3.
3937c478bd9Sstevel@tonic-gate  *
3947c478bd9Sstevel@tonic-gate  * This routine does not do cache validation with the attributes.
3957c478bd9Sstevel@tonic-gate  *
3967c478bd9Sstevel@tonic-gate  * If an error occurred trying to convert the over the wire
3977c478bd9Sstevel@tonic-gate  * attributes to a vattr, then simply mark the attribute cache as
3987c478bd9Sstevel@tonic-gate  * timed out.
3997c478bd9Sstevel@tonic-gate  */
4007c478bd9Sstevel@tonic-gate void
4017c478bd9Sstevel@tonic-gate nfs3_attrcache(vnode_t *vp, fattr3 *na, hrtime_t t)
4027c478bd9Sstevel@tonic-gate {
4037c478bd9Sstevel@tonic-gate 	rnode_t *rp;
4047c478bd9Sstevel@tonic-gate 	struct vattr va;
4057c478bd9Sstevel@tonic-gate 
4067c478bd9Sstevel@tonic-gate 	if (!fattr3_to_vattr(vp, na, &va)) {
4077c478bd9Sstevel@tonic-gate 		rp = VTOR(vp);
4087c478bd9Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
4097c478bd9Sstevel@tonic-gate 		if (rp->r_mtime <= t)
4107c478bd9Sstevel@tonic-gate 			nfs_attrcache_va(vp, &va);
4117c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
4127c478bd9Sstevel@tonic-gate 	} else {
4137c478bd9Sstevel@tonic-gate 		PURGE_ATTRCACHE(vp);
4147c478bd9Sstevel@tonic-gate 	}
4157c478bd9Sstevel@tonic-gate }
4167c478bd9Sstevel@tonic-gate 
4177c478bd9Sstevel@tonic-gate /*
4187c478bd9Sstevel@tonic-gate  * Do a cache check based on attributes returned over the wire.  The
4197c478bd9Sstevel@tonic-gate  * new attributes are cached.
4207c478bd9Sstevel@tonic-gate  *
4217c478bd9Sstevel@tonic-gate  * If an error occurred trying to convert the over the wire attributes
4227c478bd9Sstevel@tonic-gate  * to a vattr, then just return that error.
4237c478bd9Sstevel@tonic-gate  *
4247c478bd9Sstevel@tonic-gate  * As a side affect, the vattr argument is filled in with the converted
4257c478bd9Sstevel@tonic-gate  * attributes.
4267c478bd9Sstevel@tonic-gate  */
4277c478bd9Sstevel@tonic-gate int
4287c478bd9Sstevel@tonic-gate nfs_cache_fattr(vnode_t *vp, struct nfsfattr *na, vattr_t *vap, hrtime_t t,
4297c478bd9Sstevel@tonic-gate     cred_t *cr)
4307c478bd9Sstevel@tonic-gate {
4317c478bd9Sstevel@tonic-gate 	int error;
4327c478bd9Sstevel@tonic-gate 
4337c478bd9Sstevel@tonic-gate 	error = nattr_to_vattr(vp, na, vap);
4347c478bd9Sstevel@tonic-gate 	if (error)
4357c478bd9Sstevel@tonic-gate 		return (error);
4367c478bd9Sstevel@tonic-gate 	nfs_attr_cache(vp, vap, t, cr);
4377c478bd9Sstevel@tonic-gate 	return (0);
4387c478bd9Sstevel@tonic-gate }
4397c478bd9Sstevel@tonic-gate 
4407c478bd9Sstevel@tonic-gate /*
4417c478bd9Sstevel@tonic-gate  * Do a cache check based on attributes returned over the wire.  The
4427c478bd9Sstevel@tonic-gate  * new attributes are cached.
4437c478bd9Sstevel@tonic-gate  *
4447c478bd9Sstevel@tonic-gate  * If an error occurred trying to convert the over the wire attributes
4457c478bd9Sstevel@tonic-gate  * to a vattr, then just return that error.
4467c478bd9Sstevel@tonic-gate  *
4477c478bd9Sstevel@tonic-gate  * As a side affect, the vattr argument is filled in with the converted
4487c478bd9Sstevel@tonic-gate  * attributes.
4497c478bd9Sstevel@tonic-gate  */
4507c478bd9Sstevel@tonic-gate int
4517c478bd9Sstevel@tonic-gate nfs3_cache_fattr3(vnode_t *vp, fattr3 *na, vattr_t *vap, hrtime_t t, cred_t *cr)
4527c478bd9Sstevel@tonic-gate {
4537c478bd9Sstevel@tonic-gate 	int error;
4547c478bd9Sstevel@tonic-gate 
4557c478bd9Sstevel@tonic-gate 	error = fattr3_to_vattr(vp, na, vap);
4567c478bd9Sstevel@tonic-gate 	if (error)
4577c478bd9Sstevel@tonic-gate 		return (error);
4587c478bd9Sstevel@tonic-gate 	nfs_attr_cache(vp, vap, t, cr);
4597c478bd9Sstevel@tonic-gate 	return (0);
4607c478bd9Sstevel@tonic-gate }
4617c478bd9Sstevel@tonic-gate 
4627c478bd9Sstevel@tonic-gate /*
4637c478bd9Sstevel@tonic-gate  * Use the passed in virtual attributes to check to see whether the
4647c478bd9Sstevel@tonic-gate  * data and metadata caches are valid, cache the new attributes, and
4657c478bd9Sstevel@tonic-gate  * then do the cache invalidation if required.
4667c478bd9Sstevel@tonic-gate  *
4677c478bd9Sstevel@tonic-gate  * The cache validation and caching of the new attributes is done
4687c478bd9Sstevel@tonic-gate  * atomically via the use of the mutex, r_statelock.  If required,
4697c478bd9Sstevel@tonic-gate  * the cache invalidation is done atomically w.r.t. the cache
4707c478bd9Sstevel@tonic-gate  * validation and caching of the attributes via the pseudo lock,
4717c478bd9Sstevel@tonic-gate  * r_serial.
4727c478bd9Sstevel@tonic-gate  *
4737c478bd9Sstevel@tonic-gate  * This routine is used to do cache validation and attributes caching
4747c478bd9Sstevel@tonic-gate  * for operations with a single set of post operation attributes.
4757c478bd9Sstevel@tonic-gate  */
4767c478bd9Sstevel@tonic-gate void
4777c478bd9Sstevel@tonic-gate nfs_attr_cache(vnode_t *vp, vattr_t *vap, hrtime_t t, cred_t *cr)
4787c478bd9Sstevel@tonic-gate {
4797c478bd9Sstevel@tonic-gate 	rnode_t *rp;
4807c478bd9Sstevel@tonic-gate 	int mtime_changed;
4817c478bd9Sstevel@tonic-gate 	int ctime_changed;
4827c478bd9Sstevel@tonic-gate 	vsecattr_t *vsp;
4837c478bd9Sstevel@tonic-gate 	int was_serial;
4847c478bd9Sstevel@tonic-gate 
4857c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
4867c478bd9Sstevel@tonic-gate 
4877c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
4887c478bd9Sstevel@tonic-gate 
4897c478bd9Sstevel@tonic-gate 	if (rp->r_serial != curthread) {
4907c478bd9Sstevel@tonic-gate 		klwp_t *lwp = ttolwp(curthread);
4917c478bd9Sstevel@tonic-gate 
4927c478bd9Sstevel@tonic-gate 		was_serial = 0;
4937c478bd9Sstevel@tonic-gate 		if (lwp != NULL)
4947c478bd9Sstevel@tonic-gate 			lwp->lwp_nostop++;
4957c478bd9Sstevel@tonic-gate 		while (rp->r_serial != NULL) {
4967c478bd9Sstevel@tonic-gate 			if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
4977c478bd9Sstevel@tonic-gate 				mutex_exit(&rp->r_statelock);
4987c478bd9Sstevel@tonic-gate 				if (lwp != NULL)
4997c478bd9Sstevel@tonic-gate 					lwp->lwp_nostop--;
5007c478bd9Sstevel@tonic-gate 				return;
5017c478bd9Sstevel@tonic-gate 			}
5027c478bd9Sstevel@tonic-gate 		}
5037c478bd9Sstevel@tonic-gate 		if (lwp != NULL)
5047c478bd9Sstevel@tonic-gate 			lwp->lwp_nostop--;
5057c478bd9Sstevel@tonic-gate 	} else
5067c478bd9Sstevel@tonic-gate 		was_serial = 1;
5077c478bd9Sstevel@tonic-gate 
5087c478bd9Sstevel@tonic-gate 	if (rp->r_mtime > t) {
50900fdf600Smaheshvs 		if (!CACHE_VALID(rp, vap->va_mtime, vap->va_size))
51000fdf600Smaheshvs 			PURGE_ATTRCACHE_LOCKED(rp);
5117c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
5127c478bd9Sstevel@tonic-gate 		return;
5137c478bd9Sstevel@tonic-gate 	}
5147c478bd9Sstevel@tonic-gate 
5157c478bd9Sstevel@tonic-gate 	if (!(rp->r_flags & RWRITEATTR)) {
5167c478bd9Sstevel@tonic-gate 		if (!CACHE_VALID(rp, vap->va_mtime, vap->va_size))
5177c478bd9Sstevel@tonic-gate 			mtime_changed = 1;
5187c478bd9Sstevel@tonic-gate 		else
5197c478bd9Sstevel@tonic-gate 			mtime_changed = 0;
5207c478bd9Sstevel@tonic-gate 		if (rp->r_attr.va_ctime.tv_sec != vap->va_ctime.tv_sec ||
5217c478bd9Sstevel@tonic-gate 		    rp->r_attr.va_ctime.tv_nsec != vap->va_ctime.tv_nsec)
5227c478bd9Sstevel@tonic-gate 			ctime_changed = 1;
5237c478bd9Sstevel@tonic-gate 		else
5247c478bd9Sstevel@tonic-gate 			ctime_changed = 0;
5257c478bd9Sstevel@tonic-gate 	} else if (rp->r_size != vap->va_size &&
5267c478bd9Sstevel@tonic-gate 		    (!vn_has_cached_data(vp) ||
5277c478bd9Sstevel@tonic-gate 		    (!(rp->r_flags & RDIRTY) && rp->r_count == 0))) {
5287c478bd9Sstevel@tonic-gate 		mtime_changed = 1;
5297c478bd9Sstevel@tonic-gate 		ctime_changed = 0;
5307c478bd9Sstevel@tonic-gate 	} else {
5317c478bd9Sstevel@tonic-gate 		mtime_changed = 0;
5327c478bd9Sstevel@tonic-gate 		ctime_changed = 0;
5337c478bd9Sstevel@tonic-gate 	}
5347c478bd9Sstevel@tonic-gate 
5357c478bd9Sstevel@tonic-gate 	nfs_attrcache_va(vp, vap);
5367c478bd9Sstevel@tonic-gate 
5377c478bd9Sstevel@tonic-gate 	if (!mtime_changed && !ctime_changed) {
5387c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
5397c478bd9Sstevel@tonic-gate 		return;
5407c478bd9Sstevel@tonic-gate 	}
5417c478bd9Sstevel@tonic-gate 
5427c478bd9Sstevel@tonic-gate 	rp->r_serial = curthread;
5437c478bd9Sstevel@tonic-gate 
5447c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
5457c478bd9Sstevel@tonic-gate 
5467c478bd9Sstevel@tonic-gate 	if (mtime_changed)
5477c478bd9Sstevel@tonic-gate 		nfs_purge_caches(vp, NFS_NOPURGE_DNLC, cr);
5487c478bd9Sstevel@tonic-gate 
5497c478bd9Sstevel@tonic-gate 	if (ctime_changed) {
5507c478bd9Sstevel@tonic-gate 		(void) nfs_access_purge_rp(rp);
5517c478bd9Sstevel@tonic-gate 		if (rp->r_secattr != NULL) {
5527c478bd9Sstevel@tonic-gate 			mutex_enter(&rp->r_statelock);
5537c478bd9Sstevel@tonic-gate 			vsp = rp->r_secattr;
5547c478bd9Sstevel@tonic-gate 			rp->r_secattr = NULL;
5557c478bd9Sstevel@tonic-gate 			mutex_exit(&rp->r_statelock);
5567c478bd9Sstevel@tonic-gate 			if (vsp != NULL)
5577c478bd9Sstevel@tonic-gate 				nfs_acl_free(vsp);
5587c478bd9Sstevel@tonic-gate 		}
5597c478bd9Sstevel@tonic-gate 	}
5607c478bd9Sstevel@tonic-gate 
5617c478bd9Sstevel@tonic-gate 	if (!was_serial) {
5627c478bd9Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
5637c478bd9Sstevel@tonic-gate 		rp->r_serial = NULL;
5647c478bd9Sstevel@tonic-gate 		cv_broadcast(&rp->r_cv);
5657c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
5667c478bd9Sstevel@tonic-gate 	}
5677c478bd9Sstevel@tonic-gate }
5687c478bd9Sstevel@tonic-gate 
5697c478bd9Sstevel@tonic-gate /*
5707c478bd9Sstevel@tonic-gate  * Use the passed in "before" virtual attributes to check to see
5717c478bd9Sstevel@tonic-gate  * whether the data and metadata caches are valid, cache the "after"
5727c478bd9Sstevel@tonic-gate  * new attributes, and then do the cache invalidation if required.
5737c478bd9Sstevel@tonic-gate  *
5747c478bd9Sstevel@tonic-gate  * The cache validation and caching of the new attributes is done
5757c478bd9Sstevel@tonic-gate  * atomically via the use of the mutex, r_statelock.  If required,
5767c478bd9Sstevel@tonic-gate  * the cache invalidation is done atomically w.r.t. the cache
5777c478bd9Sstevel@tonic-gate  * validation and caching of the attributes via the pseudo lock,
5787c478bd9Sstevel@tonic-gate  * r_serial.
5797c478bd9Sstevel@tonic-gate  *
5807c478bd9Sstevel@tonic-gate  * This routine is used to do cache validation and attributes caching
5817c478bd9Sstevel@tonic-gate  * for operations with both pre operation attributes and post operation
5827c478bd9Sstevel@tonic-gate  * attributes.
5837c478bd9Sstevel@tonic-gate  */
5847c478bd9Sstevel@tonic-gate static void
5857c478bd9Sstevel@tonic-gate nfs3_attr_cache(vnode_t *vp, vattr_t *bvap, vattr_t *avap, hrtime_t t,
5867c478bd9Sstevel@tonic-gate     cred_t *cr)
5877c478bd9Sstevel@tonic-gate {
5887c478bd9Sstevel@tonic-gate 	rnode_t *rp;
5897c478bd9Sstevel@tonic-gate 	int mtime_changed;
5907c478bd9Sstevel@tonic-gate 	int ctime_changed;
5917c478bd9Sstevel@tonic-gate 	vsecattr_t *vsp;
5927c478bd9Sstevel@tonic-gate 	int was_serial;
5937c478bd9Sstevel@tonic-gate 
5947c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
5957c478bd9Sstevel@tonic-gate 
5967c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
5977c478bd9Sstevel@tonic-gate 
5987c478bd9Sstevel@tonic-gate 	if (rp->r_serial != curthread) {
5997c478bd9Sstevel@tonic-gate 		klwp_t *lwp = ttolwp(curthread);
6007c478bd9Sstevel@tonic-gate 
6017c478bd9Sstevel@tonic-gate 		was_serial = 0;
6027c478bd9Sstevel@tonic-gate 		if (lwp != NULL)
6037c478bd9Sstevel@tonic-gate 			lwp->lwp_nostop++;
6047c478bd9Sstevel@tonic-gate 		while (rp->r_serial != NULL) {
6057c478bd9Sstevel@tonic-gate 			if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) {
6067c478bd9Sstevel@tonic-gate 				mutex_exit(&rp->r_statelock);
6077c478bd9Sstevel@tonic-gate 				if (lwp != NULL)
6087c478bd9Sstevel@tonic-gate 					lwp->lwp_nostop--;
6097c478bd9Sstevel@tonic-gate 				return;
6107c478bd9Sstevel@tonic-gate 			}
6117c478bd9Sstevel@tonic-gate 		}
6127c478bd9Sstevel@tonic-gate 		if (lwp != NULL)
6137c478bd9Sstevel@tonic-gate 			lwp->lwp_nostop--;
6147c478bd9Sstevel@tonic-gate 	} else
6157c478bd9Sstevel@tonic-gate 		was_serial = 1;
6167c478bd9Sstevel@tonic-gate 
6177c478bd9Sstevel@tonic-gate 	if (rp->r_mtime > t) {
61800fdf600Smaheshvs 		if (!CACHE_VALID(rp, avap->va_mtime, avap->va_size))
61900fdf600Smaheshvs 			PURGE_ATTRCACHE_LOCKED(rp);
6207c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
6217c478bd9Sstevel@tonic-gate 		return;
6227c478bd9Sstevel@tonic-gate 	}
6237c478bd9Sstevel@tonic-gate 
6247c478bd9Sstevel@tonic-gate 	if (!(rp->r_flags & RWRITEATTR)) {
6257c478bd9Sstevel@tonic-gate 		if (!CACHE_VALID(rp, bvap->va_mtime, bvap->va_size))
6267c478bd9Sstevel@tonic-gate 			mtime_changed = 1;
6277c478bd9Sstevel@tonic-gate 		else
6287c478bd9Sstevel@tonic-gate 			mtime_changed = 0;
6297c478bd9Sstevel@tonic-gate 		if (rp->r_attr.va_ctime.tv_sec != bvap->va_ctime.tv_sec ||
6307c478bd9Sstevel@tonic-gate 		    rp->r_attr.va_ctime.tv_nsec != bvap->va_ctime.tv_nsec)
6317c478bd9Sstevel@tonic-gate 			ctime_changed = 1;
6327c478bd9Sstevel@tonic-gate 		else
6337c478bd9Sstevel@tonic-gate 			ctime_changed = 0;
6347c478bd9Sstevel@tonic-gate 	} else {
6357c478bd9Sstevel@tonic-gate 		mtime_changed = 0;
6367c478bd9Sstevel@tonic-gate 		ctime_changed = 0;
6377c478bd9Sstevel@tonic-gate 	}
6387c478bd9Sstevel@tonic-gate 
6397c478bd9Sstevel@tonic-gate 	nfs_attrcache_va(vp, avap);
6407c478bd9Sstevel@tonic-gate 
6417c478bd9Sstevel@tonic-gate 	if (!mtime_changed && !ctime_changed) {
6427c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
6437c478bd9Sstevel@tonic-gate 		return;
6447c478bd9Sstevel@tonic-gate 	}
6457c478bd9Sstevel@tonic-gate 
6467c478bd9Sstevel@tonic-gate 	rp->r_serial = curthread;
6477c478bd9Sstevel@tonic-gate 
6487c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
6497c478bd9Sstevel@tonic-gate 
6507c478bd9Sstevel@tonic-gate 	if (mtime_changed)
6517c478bd9Sstevel@tonic-gate 		nfs_purge_caches(vp, NFS_NOPURGE_DNLC, cr);
6527c478bd9Sstevel@tonic-gate 
6537c478bd9Sstevel@tonic-gate 	if (ctime_changed) {
6547c478bd9Sstevel@tonic-gate 		(void) nfs_access_purge_rp(rp);
6557c478bd9Sstevel@tonic-gate 		if (rp->r_secattr != NULL) {
6567c478bd9Sstevel@tonic-gate 			mutex_enter(&rp->r_statelock);
6577c478bd9Sstevel@tonic-gate 			vsp = rp->r_secattr;
6587c478bd9Sstevel@tonic-gate 			rp->r_secattr = NULL;
6597c478bd9Sstevel@tonic-gate 			mutex_exit(&rp->r_statelock);
6607c478bd9Sstevel@tonic-gate 			if (vsp != NULL)
6617c478bd9Sstevel@tonic-gate 				nfs_acl_free(vsp);
6627c478bd9Sstevel@tonic-gate 		}
6637c478bd9Sstevel@tonic-gate 	}
6647c478bd9Sstevel@tonic-gate 
6657c478bd9Sstevel@tonic-gate 	if (!was_serial) {
6667c478bd9Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
6677c478bd9Sstevel@tonic-gate 		rp->r_serial = NULL;
6687c478bd9Sstevel@tonic-gate 		cv_broadcast(&rp->r_cv);
6697c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
6707c478bd9Sstevel@tonic-gate 	}
6717c478bd9Sstevel@tonic-gate }
6727c478bd9Sstevel@tonic-gate 
6737c478bd9Sstevel@tonic-gate /*
6747c478bd9Sstevel@tonic-gate  * Set attributes cache for given vnode using virtual attributes.
6757c478bd9Sstevel@tonic-gate  *
6767c478bd9Sstevel@tonic-gate  * Set the timeout value on the attribute cache and fill it
6777c478bd9Sstevel@tonic-gate  * with the passed in attributes.
6787c478bd9Sstevel@tonic-gate  *
6797c478bd9Sstevel@tonic-gate  * The caller must be holding r_statelock.
6807c478bd9Sstevel@tonic-gate  */
6817c478bd9Sstevel@tonic-gate void
6827c478bd9Sstevel@tonic-gate nfs_attrcache_va(vnode_t *vp, struct vattr *va)
6837c478bd9Sstevel@tonic-gate {
6847c478bd9Sstevel@tonic-gate 	rnode_t *rp;
6857c478bd9Sstevel@tonic-gate 	mntinfo_t *mi;
6867c478bd9Sstevel@tonic-gate 	hrtime_t delta;
6877c478bd9Sstevel@tonic-gate 	hrtime_t now;
6887c478bd9Sstevel@tonic-gate 
6897c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
6907c478bd9Sstevel@tonic-gate 
6917c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&rp->r_statelock));
6927c478bd9Sstevel@tonic-gate 
6937c478bd9Sstevel@tonic-gate 	now = gethrtime();
6947c478bd9Sstevel@tonic-gate 
6957c478bd9Sstevel@tonic-gate 	mi = VTOMI(vp);
6967c478bd9Sstevel@tonic-gate 
6977c478bd9Sstevel@tonic-gate 	/*
6987c478bd9Sstevel@tonic-gate 	 * Delta is the number of nanoseconds that we will
6997c478bd9Sstevel@tonic-gate 	 * cache the attributes of the file.  It is based on
7007c478bd9Sstevel@tonic-gate 	 * the number of nanoseconds since the last time that
7017c478bd9Sstevel@tonic-gate 	 * we detected a change.  The assumption is that files
7027c478bd9Sstevel@tonic-gate 	 * that changed recently are likely to change again.
7037c478bd9Sstevel@tonic-gate 	 * There is a minimum and a maximum for regular files
7047c478bd9Sstevel@tonic-gate 	 * and for directories which is enforced though.
7057c478bd9Sstevel@tonic-gate 	 *
7067c478bd9Sstevel@tonic-gate 	 * Using the time since last change was detected
7077c478bd9Sstevel@tonic-gate 	 * eliminates direct comparison or calculation
7087c478bd9Sstevel@tonic-gate 	 * using mixed client and server times.  NFS does
7097c478bd9Sstevel@tonic-gate 	 * not make any assumptions regarding the client
7107c478bd9Sstevel@tonic-gate 	 * and server clocks being synchronized.
7117c478bd9Sstevel@tonic-gate 	 */
7127c478bd9Sstevel@tonic-gate 	if (va->va_mtime.tv_sec != rp->r_attr.va_mtime.tv_sec ||
7137c478bd9Sstevel@tonic-gate 	    va->va_mtime.tv_nsec != rp->r_attr.va_mtime.tv_nsec ||
7147c478bd9Sstevel@tonic-gate 	    va->va_size != rp->r_attr.va_size)
7157c478bd9Sstevel@tonic-gate 		rp->r_mtime = now;
7167c478bd9Sstevel@tonic-gate 
7177c478bd9Sstevel@tonic-gate 	if ((mi->mi_flags & MI_NOAC) || (vp->v_flag & VNOCACHE))
7187c478bd9Sstevel@tonic-gate 		delta = 0;
7197c478bd9Sstevel@tonic-gate 	else {
7207c478bd9Sstevel@tonic-gate 		delta = now - rp->r_mtime;
7217c478bd9Sstevel@tonic-gate 		if (vp->v_type == VDIR) {
7227c478bd9Sstevel@tonic-gate 			if (delta < mi->mi_acdirmin)
7237c478bd9Sstevel@tonic-gate 				delta = mi->mi_acdirmin;
7247c478bd9Sstevel@tonic-gate 			else if (delta > mi->mi_acdirmax)
7257c478bd9Sstevel@tonic-gate 				delta = mi->mi_acdirmax;
7267c478bd9Sstevel@tonic-gate 		} else {
7277c478bd9Sstevel@tonic-gate 			if (delta < mi->mi_acregmin)
7287c478bd9Sstevel@tonic-gate 				delta = mi->mi_acregmin;
7297c478bd9Sstevel@tonic-gate 			else if (delta > mi->mi_acregmax)
7307c478bd9Sstevel@tonic-gate 				delta = mi->mi_acregmax;
7317c478bd9Sstevel@tonic-gate 		}
7327c478bd9Sstevel@tonic-gate 	}
7337c478bd9Sstevel@tonic-gate 	rp->r_attrtime = now + delta;
7347c478bd9Sstevel@tonic-gate 	rp->r_attr = *va;
7357c478bd9Sstevel@tonic-gate 	/*
7367c478bd9Sstevel@tonic-gate 	 * Update the size of the file if there is no cached data or if
7377c478bd9Sstevel@tonic-gate 	 * the cached data is clean and there is no data being written
7387c478bd9Sstevel@tonic-gate 	 * out.
7397c478bd9Sstevel@tonic-gate 	 */
7407c478bd9Sstevel@tonic-gate 	if (rp->r_size != va->va_size &&
7417c478bd9Sstevel@tonic-gate 	    (!vn_has_cached_data(vp) ||
7427c478bd9Sstevel@tonic-gate 	    (!(rp->r_flags & RDIRTY) && rp->r_count == 0)))
7437c478bd9Sstevel@tonic-gate 		rp->r_size = va->va_size;
7447c478bd9Sstevel@tonic-gate 	nfs_setswaplike(vp, va);
7457c478bd9Sstevel@tonic-gate 	rp->r_flags &= ~RWRITEATTR;
7467c478bd9Sstevel@tonic-gate }
7477c478bd9Sstevel@tonic-gate 
7487c478bd9Sstevel@tonic-gate /*
7497c478bd9Sstevel@tonic-gate  * Fill in attribute from the cache.
7507c478bd9Sstevel@tonic-gate  * If valid, then return 0 to indicate that no error occurred,
7517c478bd9Sstevel@tonic-gate  * otherwise return 1 to indicate that an error occurred.
7527c478bd9Sstevel@tonic-gate  */
7537c478bd9Sstevel@tonic-gate static int
7547c478bd9Sstevel@tonic-gate nfs_getattr_cache(vnode_t *vp, struct vattr *vap)
7557c478bd9Sstevel@tonic-gate {
7567c478bd9Sstevel@tonic-gate 	rnode_t *rp;
7577c478bd9Sstevel@tonic-gate 
7587c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
7597c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
7607c478bd9Sstevel@tonic-gate 	if (ATTRCACHE_VALID(vp)) {
7617c478bd9Sstevel@tonic-gate 		/*
7627c478bd9Sstevel@tonic-gate 		 * Cached attributes are valid
7637c478bd9Sstevel@tonic-gate 		 */
7647c478bd9Sstevel@tonic-gate 		*vap = rp->r_attr;
7657c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
7667c478bd9Sstevel@tonic-gate 		return (0);
7677c478bd9Sstevel@tonic-gate 	}
7687c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
7697c478bd9Sstevel@tonic-gate 	return (1);
7707c478bd9Sstevel@tonic-gate }
7717c478bd9Sstevel@tonic-gate 
7727c478bd9Sstevel@tonic-gate /*
7737c478bd9Sstevel@tonic-gate  * Get attributes over-the-wire and update attributes cache
7747c478bd9Sstevel@tonic-gate  * if no error occurred in the over-the-wire operation.
7757c478bd9Sstevel@tonic-gate  * Return 0 if successful, otherwise error.
7767c478bd9Sstevel@tonic-gate  */
7777c478bd9Sstevel@tonic-gate int
7787c478bd9Sstevel@tonic-gate nfs_getattr_otw(vnode_t *vp, struct vattr *vap, cred_t *cr)
7797c478bd9Sstevel@tonic-gate {
7807c478bd9Sstevel@tonic-gate 	int error;
7817c478bd9Sstevel@tonic-gate 	struct nfsattrstat ns;
7827c478bd9Sstevel@tonic-gate 	int douprintf;
7837c478bd9Sstevel@tonic-gate 	mntinfo_t *mi;
7847c478bd9Sstevel@tonic-gate 	failinfo_t fi;
7857c478bd9Sstevel@tonic-gate 	hrtime_t t;
7867c478bd9Sstevel@tonic-gate 
7877c478bd9Sstevel@tonic-gate 	mi = VTOMI(vp);
7887c478bd9Sstevel@tonic-gate 	fi.vp = vp;
7897c478bd9Sstevel@tonic-gate 	fi.fhp = NULL;		/* no need to update, filehandle not copied */
7907c478bd9Sstevel@tonic-gate 	fi.copyproc = nfscopyfh;
7917c478bd9Sstevel@tonic-gate 	fi.lookupproc = nfslookup;
7927c478bd9Sstevel@tonic-gate 	fi.xattrdirproc = acl_getxattrdir2;
7937c478bd9Sstevel@tonic-gate 
7947c478bd9Sstevel@tonic-gate 	if (mi->mi_flags & MI_ACL) {
7957c478bd9Sstevel@tonic-gate 		error = acl_getattr2_otw(vp, vap, cr);
7967c478bd9Sstevel@tonic-gate 		if (mi->mi_flags & MI_ACL)
7977c478bd9Sstevel@tonic-gate 			return (error);
7987c478bd9Sstevel@tonic-gate 	}
7997c478bd9Sstevel@tonic-gate 
8007c478bd9Sstevel@tonic-gate 	douprintf = 1;
8017c478bd9Sstevel@tonic-gate 
8027c478bd9Sstevel@tonic-gate 	t = gethrtime();
8037c478bd9Sstevel@tonic-gate 
8047c478bd9Sstevel@tonic-gate 	error = rfs2call(mi, RFS_GETATTR,
8057c478bd9Sstevel@tonic-gate 			xdr_fhandle, (caddr_t)VTOFH(vp),
8067c478bd9Sstevel@tonic-gate 			xdr_attrstat, (caddr_t)&ns, cr,
8077c478bd9Sstevel@tonic-gate 			&douprintf, &ns.ns_status, 0, &fi);
8087c478bd9Sstevel@tonic-gate 
8097c478bd9Sstevel@tonic-gate 	if (!error) {
8107c478bd9Sstevel@tonic-gate 		error = geterrno(ns.ns_status);
8117c478bd9Sstevel@tonic-gate 		if (!error)
8127c478bd9Sstevel@tonic-gate 			error = nfs_cache_fattr(vp, &ns.ns_attr, vap, t, cr);
8137c478bd9Sstevel@tonic-gate 		else {
8147c478bd9Sstevel@tonic-gate 			PURGE_STALE_FH(error, vp, cr);
8157c478bd9Sstevel@tonic-gate 		}
8167c478bd9Sstevel@tonic-gate 	}
8177c478bd9Sstevel@tonic-gate 
8187c478bd9Sstevel@tonic-gate 	return (error);
8197c478bd9Sstevel@tonic-gate }
8207c478bd9Sstevel@tonic-gate 
8217c478bd9Sstevel@tonic-gate /*
8227c478bd9Sstevel@tonic-gate  * Return either cached ot remote attributes. If get remote attr
8237c478bd9Sstevel@tonic-gate  * use them to check and invalidate caches, then cache the new attributes.
8247c478bd9Sstevel@tonic-gate  */
8257c478bd9Sstevel@tonic-gate int
8267c478bd9Sstevel@tonic-gate nfsgetattr(vnode_t *vp, struct vattr *vap, cred_t *cr)
8277c478bd9Sstevel@tonic-gate {
8287c478bd9Sstevel@tonic-gate 	int error;
8297c478bd9Sstevel@tonic-gate 	rnode_t *rp;
8307c478bd9Sstevel@tonic-gate 
8317c478bd9Sstevel@tonic-gate 	/*
8327c478bd9Sstevel@tonic-gate 	 * If we've got cached attributes, we're done, otherwise go
8337c478bd9Sstevel@tonic-gate 	 * to the server to get attributes, which will update the cache
8347c478bd9Sstevel@tonic-gate 	 * in the process.
8357c478bd9Sstevel@tonic-gate 	 */
8367c478bd9Sstevel@tonic-gate 	error = nfs_getattr_cache(vp, vap);
8377c478bd9Sstevel@tonic-gate 	if (error)
8387c478bd9Sstevel@tonic-gate 		error = nfs_getattr_otw(vp, vap, cr);
8397c478bd9Sstevel@tonic-gate 
8407c478bd9Sstevel@tonic-gate 	/* Return the client's view of file size */
8417c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
8427c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
8437c478bd9Sstevel@tonic-gate 	vap->va_size = rp->r_size;
8447c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
8457c478bd9Sstevel@tonic-gate 
8467c478bd9Sstevel@tonic-gate 	return (error);
8477c478bd9Sstevel@tonic-gate }
8487c478bd9Sstevel@tonic-gate 
8497c478bd9Sstevel@tonic-gate /*
8507c478bd9Sstevel@tonic-gate  * Get attributes over-the-wire and update attributes cache
8517c478bd9Sstevel@tonic-gate  * if no error occurred in the over-the-wire operation.
8527c478bd9Sstevel@tonic-gate  * Return 0 if successful, otherwise error.
8537c478bd9Sstevel@tonic-gate  */
8547c478bd9Sstevel@tonic-gate int
8557c478bd9Sstevel@tonic-gate nfs3_getattr_otw(vnode_t *vp, struct vattr *vap, cred_t *cr)
8567c478bd9Sstevel@tonic-gate {
8577c478bd9Sstevel@tonic-gate 	int error;
8587c478bd9Sstevel@tonic-gate 	GETATTR3args args;
8597c478bd9Sstevel@tonic-gate 	GETATTR3vres res;
8607c478bd9Sstevel@tonic-gate 	int douprintf;
8617c478bd9Sstevel@tonic-gate 	failinfo_t fi;
8627c478bd9Sstevel@tonic-gate 	hrtime_t t;
8637c478bd9Sstevel@tonic-gate 
8647c478bd9Sstevel@tonic-gate 	args.object = *VTOFH3(vp);
8657c478bd9Sstevel@tonic-gate 	fi.vp = vp;
8667c478bd9Sstevel@tonic-gate 	fi.fhp = (caddr_t)&args.object;
8677c478bd9Sstevel@tonic-gate 	fi.copyproc = nfs3copyfh;
8687c478bd9Sstevel@tonic-gate 	fi.lookupproc = nfs3lookup;
8697c478bd9Sstevel@tonic-gate 	fi.xattrdirproc = acl_getxattrdir3;
8707c478bd9Sstevel@tonic-gate 	res.fres.vp = vp;
8717c478bd9Sstevel@tonic-gate 	res.fres.vap = vap;
8727c478bd9Sstevel@tonic-gate 
8737c478bd9Sstevel@tonic-gate 	douprintf = 1;
8747c478bd9Sstevel@tonic-gate 
8757c478bd9Sstevel@tonic-gate 	t = gethrtime();
8767c478bd9Sstevel@tonic-gate 
8777c478bd9Sstevel@tonic-gate 	error = rfs3call(VTOMI(vp), NFSPROC3_GETATTR,
8787c478bd9Sstevel@tonic-gate 	    xdr_nfs_fh3, (caddr_t)&args,
8797c478bd9Sstevel@tonic-gate 	    xdr_GETATTR3vres, (caddr_t)&res, cr,
8807c478bd9Sstevel@tonic-gate 	    &douprintf, &res.status, 0, &fi);
8817c478bd9Sstevel@tonic-gate 
8827c478bd9Sstevel@tonic-gate 	if (error)
8837c478bd9Sstevel@tonic-gate 		return (error);
8847c478bd9Sstevel@tonic-gate 
8857c478bd9Sstevel@tonic-gate 	error = geterrno3(res.status);
8867c478bd9Sstevel@tonic-gate 	if (error) {
8877c478bd9Sstevel@tonic-gate 		PURGE_STALE_FH(error, vp, cr);
8887c478bd9Sstevel@tonic-gate 		return (error);
8897c478bd9Sstevel@tonic-gate 	}
8907c478bd9Sstevel@tonic-gate 
8917c478bd9Sstevel@tonic-gate 	/*
8927c478bd9Sstevel@tonic-gate 	 * Catch status codes that indicate fattr3 to vattr translation failure
8937c478bd9Sstevel@tonic-gate 	 */
8947c478bd9Sstevel@tonic-gate 	if (res.fres.status)
8957c478bd9Sstevel@tonic-gate 		return (res.fres.status);
8967c478bd9Sstevel@tonic-gate 
8977c478bd9Sstevel@tonic-gate 	nfs_attr_cache(vp, vap, t, cr);
8987c478bd9Sstevel@tonic-gate 	return (0);
8997c478bd9Sstevel@tonic-gate }
9007c478bd9Sstevel@tonic-gate 
9017c478bd9Sstevel@tonic-gate /*
9027c478bd9Sstevel@tonic-gate  * Return either cached or remote attributes. If get remote attr
9037c478bd9Sstevel@tonic-gate  * use them to check and invalidate caches, then cache the new attributes.
9047c478bd9Sstevel@tonic-gate  */
9057c478bd9Sstevel@tonic-gate int
9067c478bd9Sstevel@tonic-gate nfs3getattr(vnode_t *vp, struct vattr *vap, cred_t *cr)
9077c478bd9Sstevel@tonic-gate {
9087c478bd9Sstevel@tonic-gate 	int error;
9097c478bd9Sstevel@tonic-gate 	rnode_t *rp;
9107c478bd9Sstevel@tonic-gate 
9117c478bd9Sstevel@tonic-gate 	/*
9127c478bd9Sstevel@tonic-gate 	 * If we've got cached attributes, we're done, otherwise go
9137c478bd9Sstevel@tonic-gate 	 * to the server to get attributes, which will update the cache
9147c478bd9Sstevel@tonic-gate 	 * in the process.
9157c478bd9Sstevel@tonic-gate 	 */
9167c478bd9Sstevel@tonic-gate 	error = nfs_getattr_cache(vp, vap);
9177c478bd9Sstevel@tonic-gate 	if (error)
9187c478bd9Sstevel@tonic-gate 		error = nfs3_getattr_otw(vp, vap, cr);
9197c478bd9Sstevel@tonic-gate 
9207c478bd9Sstevel@tonic-gate 	/* Return the client's view of file size */
9217c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
9227c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
9237c478bd9Sstevel@tonic-gate 	vap->va_size = rp->r_size;
9247c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
9257c478bd9Sstevel@tonic-gate 
9267c478bd9Sstevel@tonic-gate 	return (error);
9277c478bd9Sstevel@tonic-gate }
9287c478bd9Sstevel@tonic-gate 
9297c478bd9Sstevel@tonic-gate vtype_t nf_to_vt[] = {
9307c478bd9Sstevel@tonic-gate 	VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK
9317c478bd9Sstevel@tonic-gate };
9327c478bd9Sstevel@tonic-gate /*
9337c478bd9Sstevel@tonic-gate  * Convert NFS Version 2 over the network attributes to the local
9347c478bd9Sstevel@tonic-gate  * virtual attributes.  The mapping between the UID_NOBODY/GID_NOBODY
9357c478bd9Sstevel@tonic-gate  * network representation and the local representation is done here.
9367c478bd9Sstevel@tonic-gate  * Returns 0 for success, error if failed due to overflow.
9377c478bd9Sstevel@tonic-gate  */
9387c478bd9Sstevel@tonic-gate int
9397c478bd9Sstevel@tonic-gate nattr_to_vattr(vnode_t *vp, struct nfsfattr *na, struct vattr *vap)
9407c478bd9Sstevel@tonic-gate {
9417c478bd9Sstevel@tonic-gate 	/* overflow in time attributes? */
9427c478bd9Sstevel@tonic-gate #ifndef _LP64
9437c478bd9Sstevel@tonic-gate 	if (!NFS2_FATTR_TIME_OK(na))
9447c478bd9Sstevel@tonic-gate 		return (EOVERFLOW);
9457c478bd9Sstevel@tonic-gate #endif
9467c478bd9Sstevel@tonic-gate 
9477c478bd9Sstevel@tonic-gate 	if (na->na_type < NFNON || na->na_type > NFSOC)
9487c478bd9Sstevel@tonic-gate 		vap->va_type = VBAD;
9497c478bd9Sstevel@tonic-gate 	else
9507c478bd9Sstevel@tonic-gate 		vap->va_type = nf_to_vt[na->na_type];
9517c478bd9Sstevel@tonic-gate 	vap->va_mode = na->na_mode;
9527c478bd9Sstevel@tonic-gate 	vap->va_uid = (na->na_uid == NFS_UID_NOBODY) ? UID_NOBODY : na->na_uid;
9537c478bd9Sstevel@tonic-gate 	vap->va_gid = (na->na_gid == NFS_GID_NOBODY) ? GID_NOBODY : na->na_gid;
9547c478bd9Sstevel@tonic-gate 	vap->va_fsid = vp->v_vfsp->vfs_dev;
9557c478bd9Sstevel@tonic-gate 	vap->va_nodeid = na->na_nodeid;
9567c478bd9Sstevel@tonic-gate 	vap->va_nlink = na->na_nlink;
9577c478bd9Sstevel@tonic-gate 	vap->va_size = na->na_size;	/* keep for cache validation */
9587c478bd9Sstevel@tonic-gate 	/*
9597c478bd9Sstevel@tonic-gate 	 * nfs protocol defines times as unsigned so don't extend sign,
9607c478bd9Sstevel@tonic-gate 	 * unless sysadmin set nfs_allow_preepoch_time.
9617c478bd9Sstevel@tonic-gate 	 */
9627c478bd9Sstevel@tonic-gate 	NFS_TIME_T_CONVERT(vap->va_atime.tv_sec, na->na_atime.tv_sec);
9637c478bd9Sstevel@tonic-gate 	vap->va_atime.tv_nsec = (uint32_t)(na->na_atime.tv_usec * 1000);
9647c478bd9Sstevel@tonic-gate 	NFS_TIME_T_CONVERT(vap->va_mtime.tv_sec, na->na_mtime.tv_sec);
9657c478bd9Sstevel@tonic-gate 	vap->va_mtime.tv_nsec = (uint32_t)(na->na_mtime.tv_usec * 1000);
9667c478bd9Sstevel@tonic-gate 	NFS_TIME_T_CONVERT(vap->va_ctime.tv_sec, na->na_ctime.tv_sec);
9677c478bd9Sstevel@tonic-gate 	vap->va_ctime.tv_nsec = (uint32_t)(na->na_ctime.tv_usec * 1000);
9687c478bd9Sstevel@tonic-gate 	/*
9697c478bd9Sstevel@tonic-gate 	 * Shannon's law - uncompress the received dev_t
9707c478bd9Sstevel@tonic-gate 	 * if the top half of is zero indicating a response
9717c478bd9Sstevel@tonic-gate 	 * from an `older style' OS. Except for when it is a
9727c478bd9Sstevel@tonic-gate 	 * `new style' OS sending the maj device of zero,
9737c478bd9Sstevel@tonic-gate 	 * in which case the algorithm still works because the
9747c478bd9Sstevel@tonic-gate 	 * fact that it is a new style server
9757c478bd9Sstevel@tonic-gate 	 * is hidden by the minor device not being greater
9767c478bd9Sstevel@tonic-gate 	 * than 255 (a requirement in this case).
9777c478bd9Sstevel@tonic-gate 	 */
9787c478bd9Sstevel@tonic-gate 	if ((na->na_rdev & 0xffff0000) == 0)
9797c478bd9Sstevel@tonic-gate 		vap->va_rdev = nfsv2_expdev(na->na_rdev);
9807c478bd9Sstevel@tonic-gate 	else
9817c478bd9Sstevel@tonic-gate 		vap->va_rdev = expldev(na->na_rdev);
9827c478bd9Sstevel@tonic-gate 
9837c478bd9Sstevel@tonic-gate 	vap->va_nblocks = na->na_blocks;
9847c478bd9Sstevel@tonic-gate 	switch (na->na_type) {
9857c478bd9Sstevel@tonic-gate 	case NFBLK:
9867c478bd9Sstevel@tonic-gate 		vap->va_blksize = DEV_BSIZE;
9877c478bd9Sstevel@tonic-gate 		break;
9887c478bd9Sstevel@tonic-gate 
9897c478bd9Sstevel@tonic-gate 	case NFCHR:
9907c478bd9Sstevel@tonic-gate 		vap->va_blksize = MAXBSIZE;
9917c478bd9Sstevel@tonic-gate 		break;
9927c478bd9Sstevel@tonic-gate 
9937c478bd9Sstevel@tonic-gate 	case NFSOC:
9947c478bd9Sstevel@tonic-gate 	default:
9957c478bd9Sstevel@tonic-gate 		vap->va_blksize = na->na_blocksize;
9967c478bd9Sstevel@tonic-gate 		break;
9977c478bd9Sstevel@tonic-gate 	}
9987c478bd9Sstevel@tonic-gate 	/*
9997c478bd9Sstevel@tonic-gate 	 * This bit of ugliness is a hack to preserve the
10007c478bd9Sstevel@tonic-gate 	 * over-the-wire protocols for named-pipe vnodes.
10017c478bd9Sstevel@tonic-gate 	 * It remaps the special over-the-wire type to the
10027c478bd9Sstevel@tonic-gate 	 * VFIFO type. (see note in nfs.h)
10037c478bd9Sstevel@tonic-gate 	 */
10047c478bd9Sstevel@tonic-gate 	if (NA_ISFIFO(na)) {
10057c478bd9Sstevel@tonic-gate 		vap->va_type = VFIFO;
10067c478bd9Sstevel@tonic-gate 		vap->va_mode = (vap->va_mode & ~S_IFMT) | S_IFIFO;
10077c478bd9Sstevel@tonic-gate 		vap->va_rdev = 0;
10087c478bd9Sstevel@tonic-gate 		vap->va_blksize = na->na_blocksize;
10097c478bd9Sstevel@tonic-gate 	}
10107c478bd9Sstevel@tonic-gate 	vap->va_seq = 0;
10117c478bd9Sstevel@tonic-gate 	return (0);
10127c478bd9Sstevel@tonic-gate }
10137c478bd9Sstevel@tonic-gate 
10147c478bd9Sstevel@tonic-gate /*
10157c478bd9Sstevel@tonic-gate  * Convert NFS Version 3 over the network attributes to the local
10167c478bd9Sstevel@tonic-gate  * virtual attributes.  The mapping between the UID_NOBODY/GID_NOBODY
10177c478bd9Sstevel@tonic-gate  * network representation and the local representation is done here.
10187c478bd9Sstevel@tonic-gate  */
10197c478bd9Sstevel@tonic-gate vtype_t nf3_to_vt[] = {
10207c478bd9Sstevel@tonic-gate 	VBAD, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO
10217c478bd9Sstevel@tonic-gate };
10227c478bd9Sstevel@tonic-gate 
10237c478bd9Sstevel@tonic-gate int
10247c478bd9Sstevel@tonic-gate fattr3_to_vattr(vnode_t *vp, fattr3 *na, struct vattr *vap)
10257c478bd9Sstevel@tonic-gate {
10267c478bd9Sstevel@tonic-gate 
10277c478bd9Sstevel@tonic-gate #ifndef _LP64
10287c478bd9Sstevel@tonic-gate 	/* overflow in time attributes? */
10297c478bd9Sstevel@tonic-gate 	if (!NFS3_FATTR_TIME_OK(na))
10307c478bd9Sstevel@tonic-gate 		return (EOVERFLOW);
10317c478bd9Sstevel@tonic-gate #endif
10327c478bd9Sstevel@tonic-gate 	if (!NFS3_SIZE_OK(na->size))
10337c478bd9Sstevel@tonic-gate 		/* file too big */
10347c478bd9Sstevel@tonic-gate 		return (EFBIG);
10357c478bd9Sstevel@tonic-gate 
10367c478bd9Sstevel@tonic-gate 	vap->va_mask = AT_ALL;
10377c478bd9Sstevel@tonic-gate 
10387c478bd9Sstevel@tonic-gate 	if (na->type < NF3REG || na->type > NF3FIFO)
10397c478bd9Sstevel@tonic-gate 		vap->va_type = VBAD;
10407c478bd9Sstevel@tonic-gate 	else
10417c478bd9Sstevel@tonic-gate 		vap->va_type = nf3_to_vt[na->type];
10427c478bd9Sstevel@tonic-gate 	vap->va_mode = na->mode;
10437c478bd9Sstevel@tonic-gate 	vap->va_uid = (na->uid == NFS_UID_NOBODY) ? UID_NOBODY : (uid_t)na->uid;
10447c478bd9Sstevel@tonic-gate 	vap->va_gid = (na->gid == NFS_GID_NOBODY) ? GID_NOBODY : (gid_t)na->gid;
10457c478bd9Sstevel@tonic-gate 	vap->va_fsid = vp->v_vfsp->vfs_dev;
10467c478bd9Sstevel@tonic-gate 	vap->va_nodeid = na->fileid;
10477c478bd9Sstevel@tonic-gate 	vap->va_nlink = na->nlink;
10487c478bd9Sstevel@tonic-gate 	vap->va_size = na->size;
10497c478bd9Sstevel@tonic-gate 
10507c478bd9Sstevel@tonic-gate 	/*
10517c478bd9Sstevel@tonic-gate 	 * nfs protocol defines times as unsigned so don't extend sign,
10527c478bd9Sstevel@tonic-gate 	 * unless sysadmin set nfs_allow_preepoch_time.
10537c478bd9Sstevel@tonic-gate 	 */
10547c478bd9Sstevel@tonic-gate 	NFS_TIME_T_CONVERT(vap->va_atime.tv_sec, na->atime.seconds);
10557c478bd9Sstevel@tonic-gate 	vap->va_atime.tv_nsec = (uint32_t)na->atime.nseconds;
10567c478bd9Sstevel@tonic-gate 	NFS_TIME_T_CONVERT(vap->va_mtime.tv_sec, na->mtime.seconds);
10577c478bd9Sstevel@tonic-gate 	vap->va_mtime.tv_nsec = (uint32_t)na->mtime.nseconds;
10587c478bd9Sstevel@tonic-gate 	NFS_TIME_T_CONVERT(vap->va_ctime.tv_sec, na->ctime.seconds);
10597c478bd9Sstevel@tonic-gate 	vap->va_ctime.tv_nsec = (uint32_t)na->ctime.nseconds;
10607c478bd9Sstevel@tonic-gate 
10617c478bd9Sstevel@tonic-gate 	switch (na->type) {
10627c478bd9Sstevel@tonic-gate 	case NF3BLK:
10637c478bd9Sstevel@tonic-gate 		vap->va_rdev = makedevice(na->rdev.specdata1,
10647c478bd9Sstevel@tonic-gate 					na->rdev.specdata2);
10657c478bd9Sstevel@tonic-gate 		vap->va_blksize = DEV_BSIZE;
10667c478bd9Sstevel@tonic-gate 		vap->va_nblocks = 0;
10677c478bd9Sstevel@tonic-gate 		break;
10687c478bd9Sstevel@tonic-gate 	case NF3CHR:
10697c478bd9Sstevel@tonic-gate 		vap->va_rdev = makedevice(na->rdev.specdata1,
10707c478bd9Sstevel@tonic-gate 					na->rdev.specdata2);
10717c478bd9Sstevel@tonic-gate 		vap->va_blksize = MAXBSIZE;
10727c478bd9Sstevel@tonic-gate 		vap->va_nblocks = 0;
10737c478bd9Sstevel@tonic-gate 		break;
10747c478bd9Sstevel@tonic-gate 	case NF3REG:
10757c478bd9Sstevel@tonic-gate 	case NF3DIR:
10767c478bd9Sstevel@tonic-gate 	case NF3LNK:
10777c478bd9Sstevel@tonic-gate 		vap->va_rdev = 0;
10787c478bd9Sstevel@tonic-gate 		vap->va_blksize = MAXBSIZE;
10797c478bd9Sstevel@tonic-gate 		vap->va_nblocks = (u_longlong_t)
10807c478bd9Sstevel@tonic-gate 		    ((na->used + (size3)DEV_BSIZE - (size3)1) /
10817c478bd9Sstevel@tonic-gate 		    (size3)DEV_BSIZE);
10827c478bd9Sstevel@tonic-gate 		break;
10837c478bd9Sstevel@tonic-gate 	case NF3SOCK:
10847c478bd9Sstevel@tonic-gate 	case NF3FIFO:
10857c478bd9Sstevel@tonic-gate 	default:
10867c478bd9Sstevel@tonic-gate 		vap->va_rdev = 0;
10877c478bd9Sstevel@tonic-gate 		vap->va_blksize = MAXBSIZE;
10887c478bd9Sstevel@tonic-gate 		vap->va_nblocks = 0;
10897c478bd9Sstevel@tonic-gate 		break;
10907c478bd9Sstevel@tonic-gate 	}
10917c478bd9Sstevel@tonic-gate 	vap->va_seq = 0;
10927c478bd9Sstevel@tonic-gate 	return (0);
10937c478bd9Sstevel@tonic-gate }
10947c478bd9Sstevel@tonic-gate 
10957c478bd9Sstevel@tonic-gate /*
10967c478bd9Sstevel@tonic-gate  * Asynchronous I/O parameters.  nfs_async_threads is the high-water mark
10977c478bd9Sstevel@tonic-gate  * for the demand-based allocation of async threads per-mount.  The
10987c478bd9Sstevel@tonic-gate  * nfs_async_timeout is the amount of time a thread will live after it
10997c478bd9Sstevel@tonic-gate  * becomes idle, unless new I/O requests are received before the thread
11007c478bd9Sstevel@tonic-gate  * dies.  See nfs_async_putpage and nfs_async_start.
11017c478bd9Sstevel@tonic-gate  */
11027c478bd9Sstevel@tonic-gate 
11037c478bd9Sstevel@tonic-gate int nfs_async_timeout = -1;	/* uninitialized */
11047c478bd9Sstevel@tonic-gate 
11057c478bd9Sstevel@tonic-gate static void	nfs_async_start(struct vfs *);
11067c478bd9Sstevel@tonic-gate 
11077c478bd9Sstevel@tonic-gate static void
11087c478bd9Sstevel@tonic-gate free_async_args(struct nfs_async_reqs *args)
11097c478bd9Sstevel@tonic-gate {
11107c478bd9Sstevel@tonic-gate 	rnode_t *rp;
11117c478bd9Sstevel@tonic-gate 
11127c478bd9Sstevel@tonic-gate 	if (args->a_io != NFS_INACTIVE) {
11137c478bd9Sstevel@tonic-gate 		rp = VTOR(args->a_vp);
11147c478bd9Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
11157c478bd9Sstevel@tonic-gate 		rp->r_count--;
11167c478bd9Sstevel@tonic-gate 		if (args->a_io == NFS_PUTAPAGE ||
11177c478bd9Sstevel@tonic-gate 		    args->a_io == NFS_PAGEIO)
11187c478bd9Sstevel@tonic-gate 			rp->r_awcount--;
11197c478bd9Sstevel@tonic-gate 		cv_broadcast(&rp->r_cv);
11207c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
11217c478bd9Sstevel@tonic-gate 		VN_RELE(args->a_vp);
11227c478bd9Sstevel@tonic-gate 	}
11237c478bd9Sstevel@tonic-gate 	crfree(args->a_cred);
11247c478bd9Sstevel@tonic-gate 	kmem_free(args, sizeof (*args));
11257c478bd9Sstevel@tonic-gate }
11267c478bd9Sstevel@tonic-gate 
11277c478bd9Sstevel@tonic-gate /*
11287c478bd9Sstevel@tonic-gate  * Cross-zone thread creation and NFS access is disallowed, yet fsflush() and
11297c478bd9Sstevel@tonic-gate  * pageout(), running in the global zone, have legitimate reasons to do
11307c478bd9Sstevel@tonic-gate  * VOP_PUTPAGE(B_ASYNC) on other zones' NFS mounts.  We avoid the problem by
11317c478bd9Sstevel@tonic-gate  * use of a a per-mount "asynchronous requests manager thread" which is
11327c478bd9Sstevel@tonic-gate  * signaled by the various asynchronous work routines when there is
11337c478bd9Sstevel@tonic-gate  * asynchronous work to be done.  It is responsible for creating new
11347c478bd9Sstevel@tonic-gate  * worker threads if necessary, and notifying existing worker threads
11357c478bd9Sstevel@tonic-gate  * that there is work to be done.
11367c478bd9Sstevel@tonic-gate  *
11377c478bd9Sstevel@tonic-gate  * In other words, it will "take the specifications from the customers and
11387c478bd9Sstevel@tonic-gate  * give them to the engineers."
11397c478bd9Sstevel@tonic-gate  *
11407c478bd9Sstevel@tonic-gate  * Worker threads die off of their own accord if they are no longer
11417c478bd9Sstevel@tonic-gate  * needed.
11427c478bd9Sstevel@tonic-gate  *
11437c478bd9Sstevel@tonic-gate  * This thread is killed when the zone is going away or the filesystem
11447c478bd9Sstevel@tonic-gate  * is being unmounted.
11457c478bd9Sstevel@tonic-gate  */
11467c478bd9Sstevel@tonic-gate void
11477c478bd9Sstevel@tonic-gate nfs_async_manager(vfs_t *vfsp)
11487c478bd9Sstevel@tonic-gate {
11497c478bd9Sstevel@tonic-gate 	callb_cpr_t cprinfo;
11507c478bd9Sstevel@tonic-gate 	mntinfo_t *mi;
11517c478bd9Sstevel@tonic-gate 	uint_t max_threads;
11527c478bd9Sstevel@tonic-gate 
11537c478bd9Sstevel@tonic-gate 	mi = VFTOMI(vfsp);
11547c478bd9Sstevel@tonic-gate 
11557c478bd9Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, &mi->mi_async_lock, callb_generic_cpr,
11567c478bd9Sstevel@tonic-gate 		    "nfs_async_manager");
11577c478bd9Sstevel@tonic-gate 
11587c478bd9Sstevel@tonic-gate 	mutex_enter(&mi->mi_async_lock);
11597c478bd9Sstevel@tonic-gate 	/*
11607c478bd9Sstevel@tonic-gate 	 * We want to stash the max number of threads that this mount was
11617c478bd9Sstevel@tonic-gate 	 * allowed so we can use it later when the variable is set to zero as
11627c478bd9Sstevel@tonic-gate 	 * part of the zone/mount going away.
11637c478bd9Sstevel@tonic-gate 	 *
11647c478bd9Sstevel@tonic-gate 	 * We want to be able to create at least one thread to handle
11657c478bd9Sstevel@tonic-gate 	 * asyncrhonous inactive calls.
11667c478bd9Sstevel@tonic-gate 	 */
11677c478bd9Sstevel@tonic-gate 	max_threads = MAX(mi->mi_max_threads, 1);
11687c478bd9Sstevel@tonic-gate 	mutex_enter(&mi->mi_lock);
11697c478bd9Sstevel@tonic-gate 	/*
11707c478bd9Sstevel@tonic-gate 	 * We don't want to wait for mi_max_threads to go to zero, since that
11717c478bd9Sstevel@tonic-gate 	 * happens as part of a failed unmount, but this thread should only
11727c478bd9Sstevel@tonic-gate 	 * exit when the mount/zone is really going away.
11737c478bd9Sstevel@tonic-gate 	 *
11747c478bd9Sstevel@tonic-gate 	 * Once MI_ASYNC_MGR_STOP is set, no more async operations will be
11757c478bd9Sstevel@tonic-gate 	 * attempted: the various _async_*() functions know to do things
11767c478bd9Sstevel@tonic-gate 	 * inline if mi_max_threads == 0.  Henceforth we just drain out the
11777c478bd9Sstevel@tonic-gate 	 * outstanding requests.
11787c478bd9Sstevel@tonic-gate 	 *
11797c478bd9Sstevel@tonic-gate 	 * Note that we still create zthreads even if we notice the zone is
11807c478bd9Sstevel@tonic-gate 	 * shutting down (MI_ASYNC_MGR_STOP is set); this may cause the zone
11817c478bd9Sstevel@tonic-gate 	 * shutdown sequence to take slightly longer in some cases, but
11827c478bd9Sstevel@tonic-gate 	 * doesn't violate the protocol, as all threads will exit as soon as
11837c478bd9Sstevel@tonic-gate 	 * they're done processing the remaining requests.
11847c478bd9Sstevel@tonic-gate 	 */
11857c478bd9Sstevel@tonic-gate 	while (!(mi->mi_flags & MI_ASYNC_MGR_STOP) ||
11867c478bd9Sstevel@tonic-gate 	    mi->mi_async_req_count > 0) {
11877c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_lock);
11887c478bd9Sstevel@tonic-gate 		CALLB_CPR_SAFE_BEGIN(&cprinfo);
11897c478bd9Sstevel@tonic-gate 		cv_wait(&mi->mi_async_reqs_cv, &mi->mi_async_lock);
11907c478bd9Sstevel@tonic-gate 		CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock);
11917c478bd9Sstevel@tonic-gate 		while (mi->mi_async_req_count > 0) {
11927c478bd9Sstevel@tonic-gate 			/*
11937c478bd9Sstevel@tonic-gate 			 * Paranoia: If the mount started out having
11947c478bd9Sstevel@tonic-gate 			 * (mi->mi_max_threads == 0), and the value was
11957c478bd9Sstevel@tonic-gate 			 * later changed (via a debugger or somesuch),
11967c478bd9Sstevel@tonic-gate 			 * we could be confused since we will think we
11977c478bd9Sstevel@tonic-gate 			 * can't create any threads, and the calling
11987c478bd9Sstevel@tonic-gate 			 * code (which looks at the current value of
11997c478bd9Sstevel@tonic-gate 			 * mi->mi_max_threads, now non-zero) thinks we
12007c478bd9Sstevel@tonic-gate 			 * can.
12017c478bd9Sstevel@tonic-gate 			 *
12027c478bd9Sstevel@tonic-gate 			 * So, because we're paranoid, we create threads
12037c478bd9Sstevel@tonic-gate 			 * up to the maximum of the original and the
12047c478bd9Sstevel@tonic-gate 			 * current value. This means that future
12057c478bd9Sstevel@tonic-gate 			 * (debugger-induced) lowerings of
12067c478bd9Sstevel@tonic-gate 			 * mi->mi_max_threads are ignored for our
12077c478bd9Sstevel@tonic-gate 			 * purposes, but who told them they could change
12087c478bd9Sstevel@tonic-gate 			 * random values on a live kernel anyhow?
12097c478bd9Sstevel@tonic-gate 			 */
12107c478bd9Sstevel@tonic-gate 			if (mi->mi_threads <
12117c478bd9Sstevel@tonic-gate 			    MAX(mi->mi_max_threads, max_threads)) {
12127c478bd9Sstevel@tonic-gate 				mi->mi_threads++;
12137c478bd9Sstevel@tonic-gate 				mutex_exit(&mi->mi_async_lock);
12147c478bd9Sstevel@tonic-gate 				VFS_HOLD(vfsp);	/* hold for new thread */
12157c478bd9Sstevel@tonic-gate 				(void) zthread_create(NULL, 0, nfs_async_start,
12167c478bd9Sstevel@tonic-gate 				    vfsp, 0, minclsyspri);
12177c478bd9Sstevel@tonic-gate 				mutex_enter(&mi->mi_async_lock);
12187c478bd9Sstevel@tonic-gate 			}
12197c478bd9Sstevel@tonic-gate 			cv_signal(&mi->mi_async_work_cv);
12207c478bd9Sstevel@tonic-gate 			ASSERT(mi->mi_async_req_count != 0);
12217c478bd9Sstevel@tonic-gate 			mi->mi_async_req_count--;
12227c478bd9Sstevel@tonic-gate 		}
12237c478bd9Sstevel@tonic-gate 		mutex_enter(&mi->mi_lock);
12247c478bd9Sstevel@tonic-gate 	}
12257c478bd9Sstevel@tonic-gate 	mutex_exit(&mi->mi_lock);
12267c478bd9Sstevel@tonic-gate 	/*
12277c478bd9Sstevel@tonic-gate 	 * Let everyone know we're done.
12287c478bd9Sstevel@tonic-gate 	 */
12297c478bd9Sstevel@tonic-gate 	mi->mi_manager_thread = NULL;
12307c478bd9Sstevel@tonic-gate 	cv_broadcast(&mi->mi_async_cv);
12317c478bd9Sstevel@tonic-gate 
12327c478bd9Sstevel@tonic-gate 	/*
12337c478bd9Sstevel@tonic-gate 	 * There is no explicit call to mutex_exit(&mi->mi_async_lock)
12347c478bd9Sstevel@tonic-gate 	 * since CALLB_CPR_EXIT is actually responsible for releasing
12357c478bd9Sstevel@tonic-gate 	 * 'mi_async_lock'.
12367c478bd9Sstevel@tonic-gate 	 */
12377c478bd9Sstevel@tonic-gate 	CALLB_CPR_EXIT(&cprinfo);
12387c478bd9Sstevel@tonic-gate 	VFS_RELE(vfsp);	/* release thread's hold */
12397c478bd9Sstevel@tonic-gate 	zthread_exit();
12407c478bd9Sstevel@tonic-gate }
12417c478bd9Sstevel@tonic-gate 
12427c478bd9Sstevel@tonic-gate /*
12437c478bd9Sstevel@tonic-gate  * Signal (and wait for) the async manager thread to clean up and go away.
12447c478bd9Sstevel@tonic-gate  */
12457c478bd9Sstevel@tonic-gate void
12467c478bd9Sstevel@tonic-gate nfs_async_manager_stop(vfs_t *vfsp)
12477c478bd9Sstevel@tonic-gate {
12487c478bd9Sstevel@tonic-gate 	mntinfo_t *mi = VFTOMI(vfsp);
12497c478bd9Sstevel@tonic-gate 
12507c478bd9Sstevel@tonic-gate 	mutex_enter(&mi->mi_async_lock);
12517c478bd9Sstevel@tonic-gate 	mutex_enter(&mi->mi_lock);
12527c478bd9Sstevel@tonic-gate 	mi->mi_flags |= MI_ASYNC_MGR_STOP;
12537c478bd9Sstevel@tonic-gate 	mutex_exit(&mi->mi_lock);
12547c478bd9Sstevel@tonic-gate 	cv_broadcast(&mi->mi_async_reqs_cv);
12557c478bd9Sstevel@tonic-gate 	while (mi->mi_manager_thread != NULL)
12567c478bd9Sstevel@tonic-gate 		cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
12577c478bd9Sstevel@tonic-gate 	mutex_exit(&mi->mi_async_lock);
12587c478bd9Sstevel@tonic-gate }
12597c478bd9Sstevel@tonic-gate 
12607c478bd9Sstevel@tonic-gate int
12617c478bd9Sstevel@tonic-gate nfs_async_readahead(vnode_t *vp, u_offset_t blkoff, caddr_t addr,
12627c478bd9Sstevel@tonic-gate 	struct seg *seg, cred_t *cr, void (*readahead)(vnode_t *,
12637c478bd9Sstevel@tonic-gate 	u_offset_t, caddr_t, struct seg *, cred_t *))
12647c478bd9Sstevel@tonic-gate {
12657c478bd9Sstevel@tonic-gate 	rnode_t *rp;
12667c478bd9Sstevel@tonic-gate 	mntinfo_t *mi;
12677c478bd9Sstevel@tonic-gate 	struct nfs_async_reqs *args;
12687c478bd9Sstevel@tonic-gate 
12697c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
12707c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_freef == NULL);
12717c478bd9Sstevel@tonic-gate 
12727c478bd9Sstevel@tonic-gate 	mi = VTOMI(vp);
12737c478bd9Sstevel@tonic-gate 
12747c478bd9Sstevel@tonic-gate 	/*
12757c478bd9Sstevel@tonic-gate 	 * If addr falls in a different segment, don't bother doing readahead.
12767c478bd9Sstevel@tonic-gate 	 */
12777c478bd9Sstevel@tonic-gate 	if (addr >= seg->s_base + seg->s_size)
12787c478bd9Sstevel@tonic-gate 		return (-1);
12797c478bd9Sstevel@tonic-gate 
12807c478bd9Sstevel@tonic-gate 	/*
12817c478bd9Sstevel@tonic-gate 	 * If we can't allocate a request structure, punt on the readahead.
12827c478bd9Sstevel@tonic-gate 	 */
12837c478bd9Sstevel@tonic-gate 	if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
12847c478bd9Sstevel@tonic-gate 		return (-1);
12857c478bd9Sstevel@tonic-gate 
12867c478bd9Sstevel@tonic-gate 	/*
12877c478bd9Sstevel@tonic-gate 	 * If a lock operation is pending, don't initiate any new
12887c478bd9Sstevel@tonic-gate 	 * readaheads.  Otherwise, bump r_count to indicate the new
12897c478bd9Sstevel@tonic-gate 	 * asynchronous I/O.
12907c478bd9Sstevel@tonic-gate 	 */
12917c478bd9Sstevel@tonic-gate 	if (!nfs_rw_tryenter(&rp->r_lkserlock, RW_READER)) {
12927c478bd9Sstevel@tonic-gate 		kmem_free(args, sizeof (*args));
12937c478bd9Sstevel@tonic-gate 		return (-1);
12947c478bd9Sstevel@tonic-gate 	}
12957c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
12967c478bd9Sstevel@tonic-gate 	rp->r_count++;
12977c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
12987c478bd9Sstevel@tonic-gate 	nfs_rw_exit(&rp->r_lkserlock);
12997c478bd9Sstevel@tonic-gate 
13007c478bd9Sstevel@tonic-gate 	args->a_next = NULL;
13017c478bd9Sstevel@tonic-gate #ifdef DEBUG
13027c478bd9Sstevel@tonic-gate 	args->a_queuer = curthread;
13037c478bd9Sstevel@tonic-gate #endif
13047c478bd9Sstevel@tonic-gate 	VN_HOLD(vp);
13057c478bd9Sstevel@tonic-gate 	args->a_vp = vp;
13067c478bd9Sstevel@tonic-gate 	ASSERT(cr != NULL);
13077c478bd9Sstevel@tonic-gate 	crhold(cr);
13087c478bd9Sstevel@tonic-gate 	args->a_cred = cr;
13097c478bd9Sstevel@tonic-gate 	args->a_io = NFS_READ_AHEAD;
13107c478bd9Sstevel@tonic-gate 	args->a_nfs_readahead = readahead;
13117c478bd9Sstevel@tonic-gate 	args->a_nfs_blkoff = blkoff;
13127c478bd9Sstevel@tonic-gate 	args->a_nfs_seg = seg;
13137c478bd9Sstevel@tonic-gate 	args->a_nfs_addr = addr;
13147c478bd9Sstevel@tonic-gate 
13157c478bd9Sstevel@tonic-gate 	mutex_enter(&mi->mi_async_lock);
13167c478bd9Sstevel@tonic-gate 
13177c478bd9Sstevel@tonic-gate 	/*
13187c478bd9Sstevel@tonic-gate 	 * If asyncio has been disabled, don't bother readahead.
13197c478bd9Sstevel@tonic-gate 	 */
13207c478bd9Sstevel@tonic-gate 	if (mi->mi_max_threads == 0) {
13217c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_async_lock);
13227c478bd9Sstevel@tonic-gate 		goto noasync;
13237c478bd9Sstevel@tonic-gate 	}
13247c478bd9Sstevel@tonic-gate 
13257c478bd9Sstevel@tonic-gate 	/*
13267c478bd9Sstevel@tonic-gate 	 * Link request structure into the async list and
13277c478bd9Sstevel@tonic-gate 	 * wakeup async thread to do the i/o.
13287c478bd9Sstevel@tonic-gate 	 */
13297c478bd9Sstevel@tonic-gate 	if (mi->mi_async_reqs[NFS_READ_AHEAD] == NULL) {
13307c478bd9Sstevel@tonic-gate 		mi->mi_async_reqs[NFS_READ_AHEAD] = args;
13317c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_READ_AHEAD] = args;
13327c478bd9Sstevel@tonic-gate 	} else {
13337c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_READ_AHEAD]->a_next = args;
13347c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_READ_AHEAD] = args;
13357c478bd9Sstevel@tonic-gate 	}
13367c478bd9Sstevel@tonic-gate 
13377c478bd9Sstevel@tonic-gate 	if (mi->mi_io_kstats) {
13387c478bd9Sstevel@tonic-gate 		mutex_enter(&mi->mi_lock);
13397c478bd9Sstevel@tonic-gate 		kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
13407c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_lock);
13417c478bd9Sstevel@tonic-gate 	}
13427c478bd9Sstevel@tonic-gate 
13437c478bd9Sstevel@tonic-gate 	mi->mi_async_req_count++;
13447c478bd9Sstevel@tonic-gate 	ASSERT(mi->mi_async_req_count != 0);
13457c478bd9Sstevel@tonic-gate 	cv_signal(&mi->mi_async_reqs_cv);
13467c478bd9Sstevel@tonic-gate 	mutex_exit(&mi->mi_async_lock);
13477c478bd9Sstevel@tonic-gate 	return (0);
13487c478bd9Sstevel@tonic-gate 
13497c478bd9Sstevel@tonic-gate noasync:
13507c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
13517c478bd9Sstevel@tonic-gate 	rp->r_count--;
13527c478bd9Sstevel@tonic-gate 	cv_broadcast(&rp->r_cv);
13537c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
13547c478bd9Sstevel@tonic-gate 	VN_RELE(vp);
13557c478bd9Sstevel@tonic-gate 	crfree(cr);
13567c478bd9Sstevel@tonic-gate 	kmem_free(args, sizeof (*args));
13577c478bd9Sstevel@tonic-gate 	return (-1);
13587c478bd9Sstevel@tonic-gate }
13597c478bd9Sstevel@tonic-gate 
13607c478bd9Sstevel@tonic-gate int
13617c478bd9Sstevel@tonic-gate nfs_async_putapage(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
13627c478bd9Sstevel@tonic-gate 	int flags, cred_t *cr, int (*putapage)(vnode_t *, page_t *,
13637c478bd9Sstevel@tonic-gate 	u_offset_t, size_t, int, cred_t *))
13647c478bd9Sstevel@tonic-gate {
13657c478bd9Sstevel@tonic-gate 	rnode_t *rp;
13667c478bd9Sstevel@tonic-gate 	mntinfo_t *mi;
13677c478bd9Sstevel@tonic-gate 	struct nfs_async_reqs *args;
13687c478bd9Sstevel@tonic-gate 
13697c478bd9Sstevel@tonic-gate 	ASSERT(flags & B_ASYNC);
13707c478bd9Sstevel@tonic-gate 	ASSERT(vp->v_vfsp != NULL);
13717c478bd9Sstevel@tonic-gate 
13727c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
13737c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_count > 0);
13747c478bd9Sstevel@tonic-gate 
13757c478bd9Sstevel@tonic-gate 	mi = VTOMI(vp);
13767c478bd9Sstevel@tonic-gate 
13777c478bd9Sstevel@tonic-gate 	/*
13787c478bd9Sstevel@tonic-gate 	 * If we can't allocate a request structure, do the putpage
13797c478bd9Sstevel@tonic-gate 	 * operation synchronously in this thread's context.
13807c478bd9Sstevel@tonic-gate 	 */
13817c478bd9Sstevel@tonic-gate 	if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
13827c478bd9Sstevel@tonic-gate 		goto noasync;
13837c478bd9Sstevel@tonic-gate 
13847c478bd9Sstevel@tonic-gate 	args->a_next = NULL;
13857c478bd9Sstevel@tonic-gate #ifdef DEBUG
13867c478bd9Sstevel@tonic-gate 	args->a_queuer = curthread;
13877c478bd9Sstevel@tonic-gate #endif
13887c478bd9Sstevel@tonic-gate 	VN_HOLD(vp);
13897c478bd9Sstevel@tonic-gate 	args->a_vp = vp;
13907c478bd9Sstevel@tonic-gate 	ASSERT(cr != NULL);
13917c478bd9Sstevel@tonic-gate 	crhold(cr);
13927c478bd9Sstevel@tonic-gate 	args->a_cred = cr;
13937c478bd9Sstevel@tonic-gate 	args->a_io = NFS_PUTAPAGE;
13947c478bd9Sstevel@tonic-gate 	args->a_nfs_putapage = putapage;
13957c478bd9Sstevel@tonic-gate 	args->a_nfs_pp = pp;
13967c478bd9Sstevel@tonic-gate 	args->a_nfs_off = off;
13977c478bd9Sstevel@tonic-gate 	args->a_nfs_len = (uint_t)len;
13987c478bd9Sstevel@tonic-gate 	args->a_nfs_flags = flags;
13997c478bd9Sstevel@tonic-gate 
14007c478bd9Sstevel@tonic-gate 	mutex_enter(&mi->mi_async_lock);
14017c478bd9Sstevel@tonic-gate 
14027c478bd9Sstevel@tonic-gate 	/*
14037c478bd9Sstevel@tonic-gate 	 * If asyncio has been disabled, then make a synchronous request.
14047c478bd9Sstevel@tonic-gate 	 * This check is done a second time in case async io was diabled
14057c478bd9Sstevel@tonic-gate 	 * while this thread was blocked waiting for memory pressure to
14067c478bd9Sstevel@tonic-gate 	 * reduce or for the queue to drain.
14077c478bd9Sstevel@tonic-gate 	 */
14087c478bd9Sstevel@tonic-gate 	if (mi->mi_max_threads == 0) {
14097c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_async_lock);
14107c478bd9Sstevel@tonic-gate 		goto noasync;
14117c478bd9Sstevel@tonic-gate 	}
14127c478bd9Sstevel@tonic-gate 
14137c478bd9Sstevel@tonic-gate 	/*
14147c478bd9Sstevel@tonic-gate 	 * Link request structure into the async list and
14157c478bd9Sstevel@tonic-gate 	 * wakeup async thread to do the i/o.
14167c478bd9Sstevel@tonic-gate 	 */
14177c478bd9Sstevel@tonic-gate 	if (mi->mi_async_reqs[NFS_PUTAPAGE] == NULL) {
14187c478bd9Sstevel@tonic-gate 		mi->mi_async_reqs[NFS_PUTAPAGE] = args;
14197c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_PUTAPAGE] = args;
14207c478bd9Sstevel@tonic-gate 	} else {
14217c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_PUTAPAGE]->a_next = args;
14227c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_PUTAPAGE] = args;
14237c478bd9Sstevel@tonic-gate 	}
14247c478bd9Sstevel@tonic-gate 
14257c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
14267c478bd9Sstevel@tonic-gate 	rp->r_count++;
14277c478bd9Sstevel@tonic-gate 	rp->r_awcount++;
14287c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
14297c478bd9Sstevel@tonic-gate 
14307c478bd9Sstevel@tonic-gate 	if (mi->mi_io_kstats) {
14317c478bd9Sstevel@tonic-gate 		mutex_enter(&mi->mi_lock);
14327c478bd9Sstevel@tonic-gate 		kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
14337c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_lock);
14347c478bd9Sstevel@tonic-gate 	}
14357c478bd9Sstevel@tonic-gate 
14367c478bd9Sstevel@tonic-gate 	mi->mi_async_req_count++;
14377c478bd9Sstevel@tonic-gate 	ASSERT(mi->mi_async_req_count != 0);
14387c478bd9Sstevel@tonic-gate 	cv_signal(&mi->mi_async_reqs_cv);
14397c478bd9Sstevel@tonic-gate 	mutex_exit(&mi->mi_async_lock);
14407c478bd9Sstevel@tonic-gate 	return (0);
14417c478bd9Sstevel@tonic-gate 
14427c478bd9Sstevel@tonic-gate noasync:
14437c478bd9Sstevel@tonic-gate 	if (args != NULL) {
14447c478bd9Sstevel@tonic-gate 		VN_RELE(vp);
14457c478bd9Sstevel@tonic-gate 		crfree(cr);
14467c478bd9Sstevel@tonic-gate 		kmem_free(args, sizeof (*args));
14477c478bd9Sstevel@tonic-gate 	}
14487c478bd9Sstevel@tonic-gate 
14497c478bd9Sstevel@tonic-gate 	if (curproc == proc_pageout || curproc == proc_fsflush) {
14507c478bd9Sstevel@tonic-gate 		/*
14517c478bd9Sstevel@tonic-gate 		 * If we get here in the context of the pageout/fsflush,
14527c478bd9Sstevel@tonic-gate 		 * we refuse to do a sync write, because this may hang
14537c478bd9Sstevel@tonic-gate 		 * pageout (and the machine). In this case, we just
14547c478bd9Sstevel@tonic-gate 		 * re-mark the page as dirty and punt on the page.
14557c478bd9Sstevel@tonic-gate 		 *
14567c478bd9Sstevel@tonic-gate 		 * Make sure B_FORCE isn't set.  We can re-mark the
14577c478bd9Sstevel@tonic-gate 		 * pages as dirty and unlock the pages in one swoop by
14587c478bd9Sstevel@tonic-gate 		 * passing in B_ERROR to pvn_write_done().  However,
14597c478bd9Sstevel@tonic-gate 		 * we should make sure B_FORCE isn't set - we don't
14607c478bd9Sstevel@tonic-gate 		 * want the page tossed before it gets written out.
14617c478bd9Sstevel@tonic-gate 		 */
14627c478bd9Sstevel@tonic-gate 		if (flags & B_FORCE)
14637c478bd9Sstevel@tonic-gate 			flags &= ~(B_INVAL | B_FORCE);
14647c478bd9Sstevel@tonic-gate 		pvn_write_done(pp, flags | B_ERROR);
14657c478bd9Sstevel@tonic-gate 		return (0);
14667c478bd9Sstevel@tonic-gate 	}
1467108322fbScarlsonj 	if (nfs_zone() != mi->mi_zone) {
14687c478bd9Sstevel@tonic-gate 		/*
14697c478bd9Sstevel@tonic-gate 		 * So this was a cross-zone sync putpage.  We pass in B_ERROR
14707c478bd9Sstevel@tonic-gate 		 * to pvn_write_done() to re-mark the pages as dirty and unlock
14717c478bd9Sstevel@tonic-gate 		 * them.
14727c478bd9Sstevel@tonic-gate 		 *
14737c478bd9Sstevel@tonic-gate 		 * We don't want to clear B_FORCE here as the caller presumably
14747c478bd9Sstevel@tonic-gate 		 * knows what they're doing if they set it.
14757c478bd9Sstevel@tonic-gate 		 */
14767c478bd9Sstevel@tonic-gate 		pvn_write_done(pp, flags | B_ERROR);
14777c478bd9Sstevel@tonic-gate 		return (EPERM);
14787c478bd9Sstevel@tonic-gate 	}
14797c478bd9Sstevel@tonic-gate 	return ((*putapage)(vp, pp, off, len, flags, cr));
14807c478bd9Sstevel@tonic-gate }
14817c478bd9Sstevel@tonic-gate 
14827c478bd9Sstevel@tonic-gate int
14837c478bd9Sstevel@tonic-gate nfs_async_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len,
14847c478bd9Sstevel@tonic-gate 	int flags, cred_t *cr, int (*pageio)(vnode_t *, page_t *, u_offset_t,
14857c478bd9Sstevel@tonic-gate 	size_t, int, cred_t *))
14867c478bd9Sstevel@tonic-gate {
14877c478bd9Sstevel@tonic-gate 	rnode_t *rp;
14887c478bd9Sstevel@tonic-gate 	mntinfo_t *mi;
14897c478bd9Sstevel@tonic-gate 	struct nfs_async_reqs *args;
14907c478bd9Sstevel@tonic-gate 
14917c478bd9Sstevel@tonic-gate 	ASSERT(flags & B_ASYNC);
14927c478bd9Sstevel@tonic-gate 	ASSERT(vp->v_vfsp != NULL);
14937c478bd9Sstevel@tonic-gate 
14947c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
14957c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_count > 0);
14967c478bd9Sstevel@tonic-gate 
14977c478bd9Sstevel@tonic-gate 	mi = VTOMI(vp);
14987c478bd9Sstevel@tonic-gate 
14997c478bd9Sstevel@tonic-gate 	/*
15007c478bd9Sstevel@tonic-gate 	 * If we can't allocate a request structure, do the pageio
15017c478bd9Sstevel@tonic-gate 	 * request synchronously in this thread's context.
15027c478bd9Sstevel@tonic-gate 	 */
15037c478bd9Sstevel@tonic-gate 	if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
15047c478bd9Sstevel@tonic-gate 		goto noasync;
15057c478bd9Sstevel@tonic-gate 
15067c478bd9Sstevel@tonic-gate 	args->a_next = NULL;
15077c478bd9Sstevel@tonic-gate #ifdef DEBUG
15087c478bd9Sstevel@tonic-gate 	args->a_queuer = curthread;
15097c478bd9Sstevel@tonic-gate #endif
15107c478bd9Sstevel@tonic-gate 	VN_HOLD(vp);
15117c478bd9Sstevel@tonic-gate 	args->a_vp = vp;
15127c478bd9Sstevel@tonic-gate 	ASSERT(cr != NULL);
15137c478bd9Sstevel@tonic-gate 	crhold(cr);
15147c478bd9Sstevel@tonic-gate 	args->a_cred = cr;
15157c478bd9Sstevel@tonic-gate 	args->a_io = NFS_PAGEIO;
15167c478bd9Sstevel@tonic-gate 	args->a_nfs_pageio = pageio;
15177c478bd9Sstevel@tonic-gate 	args->a_nfs_pp = pp;
15187c478bd9Sstevel@tonic-gate 	args->a_nfs_off = io_off;
15197c478bd9Sstevel@tonic-gate 	args->a_nfs_len = (uint_t)io_len;
15207c478bd9Sstevel@tonic-gate 	args->a_nfs_flags = flags;
15217c478bd9Sstevel@tonic-gate 
15227c478bd9Sstevel@tonic-gate 	mutex_enter(&mi->mi_async_lock);
15237c478bd9Sstevel@tonic-gate 
15247c478bd9Sstevel@tonic-gate 	/*
15257c478bd9Sstevel@tonic-gate 	 * If asyncio has been disabled, then make a synchronous request.
15267c478bd9Sstevel@tonic-gate 	 * This check is done a second time in case async io was diabled
15277c478bd9Sstevel@tonic-gate 	 * while this thread was blocked waiting for memory pressure to
15287c478bd9Sstevel@tonic-gate 	 * reduce or for the queue to drain.
15297c478bd9Sstevel@tonic-gate 	 */
15307c478bd9Sstevel@tonic-gate 	if (mi->mi_max_threads == 0) {
15317c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_async_lock);
15327c478bd9Sstevel@tonic-gate 		goto noasync;
15337c478bd9Sstevel@tonic-gate 	}
15347c478bd9Sstevel@tonic-gate 
15357c478bd9Sstevel@tonic-gate 	/*
15367c478bd9Sstevel@tonic-gate 	 * Link request structure into the async list and
15377c478bd9Sstevel@tonic-gate 	 * wakeup async thread to do the i/o.
15387c478bd9Sstevel@tonic-gate 	 */
15397c478bd9Sstevel@tonic-gate 	if (mi->mi_async_reqs[NFS_PAGEIO] == NULL) {
15407c478bd9Sstevel@tonic-gate 		mi->mi_async_reqs[NFS_PAGEIO] = args;
15417c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_PAGEIO] = args;
15427c478bd9Sstevel@tonic-gate 	} else {
15437c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_PAGEIO]->a_next = args;
15447c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_PAGEIO] = args;
15457c478bd9Sstevel@tonic-gate 	}
15467c478bd9Sstevel@tonic-gate 
15477c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
15487c478bd9Sstevel@tonic-gate 	rp->r_count++;
15497c478bd9Sstevel@tonic-gate 	rp->r_awcount++;
15507c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
15517c478bd9Sstevel@tonic-gate 
15527c478bd9Sstevel@tonic-gate 	if (mi->mi_io_kstats) {
15537c478bd9Sstevel@tonic-gate 		mutex_enter(&mi->mi_lock);
15547c478bd9Sstevel@tonic-gate 		kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
15557c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_lock);
15567c478bd9Sstevel@tonic-gate 	}
15577c478bd9Sstevel@tonic-gate 
15587c478bd9Sstevel@tonic-gate 	mi->mi_async_req_count++;
15597c478bd9Sstevel@tonic-gate 	ASSERT(mi->mi_async_req_count != 0);
15607c478bd9Sstevel@tonic-gate 	cv_signal(&mi->mi_async_reqs_cv);
15617c478bd9Sstevel@tonic-gate 	mutex_exit(&mi->mi_async_lock);
15627c478bd9Sstevel@tonic-gate 	return (0);
15637c478bd9Sstevel@tonic-gate 
15647c478bd9Sstevel@tonic-gate noasync:
15657c478bd9Sstevel@tonic-gate 	if (args != NULL) {
15667c478bd9Sstevel@tonic-gate 		VN_RELE(vp);
15677c478bd9Sstevel@tonic-gate 		crfree(cr);
15687c478bd9Sstevel@tonic-gate 		kmem_free(args, sizeof (*args));
15697c478bd9Sstevel@tonic-gate 	}
15707c478bd9Sstevel@tonic-gate 
15717c478bd9Sstevel@tonic-gate 	/*
15727c478bd9Sstevel@tonic-gate 	 * If we can't do it ASYNC, for reads we do nothing (but cleanup
15737c478bd9Sstevel@tonic-gate 	 * the page list), for writes we do it synchronously, except for
15747c478bd9Sstevel@tonic-gate 	 * proc_pageout/proc_fsflush as described below.
15757c478bd9Sstevel@tonic-gate 	 */
15767c478bd9Sstevel@tonic-gate 	if (flags & B_READ) {
15777c478bd9Sstevel@tonic-gate 		pvn_read_done(pp, flags | B_ERROR);
15787c478bd9Sstevel@tonic-gate 		return (0);
15797c478bd9Sstevel@tonic-gate 	}
15807c478bd9Sstevel@tonic-gate 
15817c478bd9Sstevel@tonic-gate 	if (curproc == proc_pageout || curproc == proc_fsflush) {
15827c478bd9Sstevel@tonic-gate 		/*
15837c478bd9Sstevel@tonic-gate 		 * If we get here in the context of the pageout/fsflush,
15847c478bd9Sstevel@tonic-gate 		 * we refuse to do a sync write, because this may hang
15857c478bd9Sstevel@tonic-gate 		 * pageout/fsflush (and the machine). In this case, we just
15867c478bd9Sstevel@tonic-gate 		 * re-mark the page as dirty and punt on the page.
15877c478bd9Sstevel@tonic-gate 		 *
15887c478bd9Sstevel@tonic-gate 		 * Make sure B_FORCE isn't set.  We can re-mark the
15897c478bd9Sstevel@tonic-gate 		 * pages as dirty and unlock the pages in one swoop by
15907c478bd9Sstevel@tonic-gate 		 * passing in B_ERROR to pvn_write_done().  However,
15917c478bd9Sstevel@tonic-gate 		 * we should make sure B_FORCE isn't set - we don't
15927c478bd9Sstevel@tonic-gate 		 * want the page tossed before it gets written out.
15937c478bd9Sstevel@tonic-gate 		 */
15947c478bd9Sstevel@tonic-gate 		if (flags & B_FORCE)
15957c478bd9Sstevel@tonic-gate 			flags &= ~(B_INVAL | B_FORCE);
15967c478bd9Sstevel@tonic-gate 		pvn_write_done(pp, flags | B_ERROR);
15977c478bd9Sstevel@tonic-gate 		return (0);
15987c478bd9Sstevel@tonic-gate 	}
15997c478bd9Sstevel@tonic-gate 
1600108322fbScarlsonj 	if (nfs_zone() != mi->mi_zone) {
16017c478bd9Sstevel@tonic-gate 		/*
16027c478bd9Sstevel@tonic-gate 		 * So this was a cross-zone sync pageio.  We pass in B_ERROR
16037c478bd9Sstevel@tonic-gate 		 * to pvn_write_done() to re-mark the pages as dirty and unlock
16047c478bd9Sstevel@tonic-gate 		 * them.
16057c478bd9Sstevel@tonic-gate 		 *
16067c478bd9Sstevel@tonic-gate 		 * We don't want to clear B_FORCE here as the caller presumably
16077c478bd9Sstevel@tonic-gate 		 * knows what they're doing if they set it.
16087c478bd9Sstevel@tonic-gate 		 */
16097c478bd9Sstevel@tonic-gate 		pvn_write_done(pp, flags | B_ERROR);
16107c478bd9Sstevel@tonic-gate 		return (EPERM);
16117c478bd9Sstevel@tonic-gate 	}
16127c478bd9Sstevel@tonic-gate 	return ((*pageio)(vp, pp, io_off, io_len, flags, cr));
16137c478bd9Sstevel@tonic-gate }
16147c478bd9Sstevel@tonic-gate 
16157c478bd9Sstevel@tonic-gate void
16167c478bd9Sstevel@tonic-gate nfs_async_readdir(vnode_t *vp, rddir_cache *rdc, cred_t *cr,
16177c478bd9Sstevel@tonic-gate 	int (*readdir)(vnode_t *, rddir_cache *, cred_t *))
16187c478bd9Sstevel@tonic-gate {
16197c478bd9Sstevel@tonic-gate 	rnode_t *rp;
16207c478bd9Sstevel@tonic-gate 	mntinfo_t *mi;
16217c478bd9Sstevel@tonic-gate 	struct nfs_async_reqs *args;
16227c478bd9Sstevel@tonic-gate 
16237c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
16247c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_freef == NULL);
16257c478bd9Sstevel@tonic-gate 
16267c478bd9Sstevel@tonic-gate 	mi = VTOMI(vp);
16277c478bd9Sstevel@tonic-gate 
16287c478bd9Sstevel@tonic-gate 	/*
16297c478bd9Sstevel@tonic-gate 	 * If we can't allocate a request structure, do the readdir
16307c478bd9Sstevel@tonic-gate 	 * operation synchronously in this thread's context.
16317c478bd9Sstevel@tonic-gate 	 */
16327c478bd9Sstevel@tonic-gate 	if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
16337c478bd9Sstevel@tonic-gate 		goto noasync;
16347c478bd9Sstevel@tonic-gate 
16357c478bd9Sstevel@tonic-gate 	args->a_next = NULL;
16367c478bd9Sstevel@tonic-gate #ifdef DEBUG
16377c478bd9Sstevel@tonic-gate 	args->a_queuer = curthread;
16387c478bd9Sstevel@tonic-gate #endif
16397c478bd9Sstevel@tonic-gate 	VN_HOLD(vp);
16407c478bd9Sstevel@tonic-gate 	args->a_vp = vp;
16417c478bd9Sstevel@tonic-gate 	ASSERT(cr != NULL);
16427c478bd9Sstevel@tonic-gate 	crhold(cr);
16437c478bd9Sstevel@tonic-gate 	args->a_cred = cr;
16447c478bd9Sstevel@tonic-gate 	args->a_io = NFS_READDIR;
16457c478bd9Sstevel@tonic-gate 	args->a_nfs_readdir = readdir;
16467c478bd9Sstevel@tonic-gate 	args->a_nfs_rdc = rdc;
16477c478bd9Sstevel@tonic-gate 
16487c478bd9Sstevel@tonic-gate 	mutex_enter(&mi->mi_async_lock);
16497c478bd9Sstevel@tonic-gate 
16507c478bd9Sstevel@tonic-gate 	/*
16517c478bd9Sstevel@tonic-gate 	 * If asyncio has been disabled, then make a synchronous request.
16527c478bd9Sstevel@tonic-gate 	 */
16537c478bd9Sstevel@tonic-gate 	if (mi->mi_max_threads == 0) {
16547c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_async_lock);
16557c478bd9Sstevel@tonic-gate 		goto noasync;
16567c478bd9Sstevel@tonic-gate 	}
16577c478bd9Sstevel@tonic-gate 
16587c478bd9Sstevel@tonic-gate 	/*
16597c478bd9Sstevel@tonic-gate 	 * Link request structure into the async list and
16607c478bd9Sstevel@tonic-gate 	 * wakeup async thread to do the i/o.
16617c478bd9Sstevel@tonic-gate 	 */
16627c478bd9Sstevel@tonic-gate 	if (mi->mi_async_reqs[NFS_READDIR] == NULL) {
16637c478bd9Sstevel@tonic-gate 		mi->mi_async_reqs[NFS_READDIR] = args;
16647c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_READDIR] = args;
16657c478bd9Sstevel@tonic-gate 	} else {
16667c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_READDIR]->a_next = args;
16677c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_READDIR] = args;
16687c478bd9Sstevel@tonic-gate 	}
16697c478bd9Sstevel@tonic-gate 
16707c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
16717c478bd9Sstevel@tonic-gate 	rp->r_count++;
16727c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
16737c478bd9Sstevel@tonic-gate 
16747c478bd9Sstevel@tonic-gate 	if (mi->mi_io_kstats) {
16757c478bd9Sstevel@tonic-gate 		mutex_enter(&mi->mi_lock);
16767c478bd9Sstevel@tonic-gate 		kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
16777c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_lock);
16787c478bd9Sstevel@tonic-gate 	}
16797c478bd9Sstevel@tonic-gate 
16807c478bd9Sstevel@tonic-gate 	mi->mi_async_req_count++;
16817c478bd9Sstevel@tonic-gate 	ASSERT(mi->mi_async_req_count != 0);
16827c478bd9Sstevel@tonic-gate 	cv_signal(&mi->mi_async_reqs_cv);
16837c478bd9Sstevel@tonic-gate 	mutex_exit(&mi->mi_async_lock);
16847c478bd9Sstevel@tonic-gate 	return;
16857c478bd9Sstevel@tonic-gate 
16867c478bd9Sstevel@tonic-gate noasync:
16877c478bd9Sstevel@tonic-gate 	if (args != NULL) {
16887c478bd9Sstevel@tonic-gate 		VN_RELE(vp);
16897c478bd9Sstevel@tonic-gate 		crfree(cr);
16907c478bd9Sstevel@tonic-gate 		kmem_free(args, sizeof (*args));
16917c478bd9Sstevel@tonic-gate 	}
16927c478bd9Sstevel@tonic-gate 
16937c478bd9Sstevel@tonic-gate 	rdc->entries = NULL;
16947c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
16957c478bd9Sstevel@tonic-gate 	ASSERT(rdc->flags & RDDIR);
16967c478bd9Sstevel@tonic-gate 	rdc->flags &= ~RDDIR;
16977c478bd9Sstevel@tonic-gate 	rdc->flags |= RDDIRREQ;
16987c478bd9Sstevel@tonic-gate 	/*
16997c478bd9Sstevel@tonic-gate 	 * Check the flag to see if RDDIRWAIT is set. If RDDIRWAIT
17007c478bd9Sstevel@tonic-gate 	 * is set, wakeup the thread sleeping in cv_wait_sig().
17017c478bd9Sstevel@tonic-gate 	 * The woken up thread will reset the flag to RDDIR and will
17027c478bd9Sstevel@tonic-gate 	 * continue with the readdir opeartion.
17037c478bd9Sstevel@tonic-gate 	 */
17047c478bd9Sstevel@tonic-gate 	if (rdc->flags & RDDIRWAIT) {
17057c478bd9Sstevel@tonic-gate 		rdc->flags &= ~RDDIRWAIT;
17067c478bd9Sstevel@tonic-gate 		cv_broadcast(&rdc->cv);
17077c478bd9Sstevel@tonic-gate 	}
17087c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
17097c478bd9Sstevel@tonic-gate 	rddir_cache_rele(rdc);
17107c478bd9Sstevel@tonic-gate }
17117c478bd9Sstevel@tonic-gate 
17127c478bd9Sstevel@tonic-gate void
17137c478bd9Sstevel@tonic-gate nfs_async_commit(vnode_t *vp, page_t *plist, offset3 offset, count3 count,
17147c478bd9Sstevel@tonic-gate 	cred_t *cr, void (*commit)(vnode_t *, page_t *, offset3, count3,
17157c478bd9Sstevel@tonic-gate 	cred_t *))
17167c478bd9Sstevel@tonic-gate {
17177c478bd9Sstevel@tonic-gate 	rnode_t *rp;
17187c478bd9Sstevel@tonic-gate 	mntinfo_t *mi;
17197c478bd9Sstevel@tonic-gate 	struct nfs_async_reqs *args;
17207c478bd9Sstevel@tonic-gate 	page_t *pp;
17217c478bd9Sstevel@tonic-gate 
17227c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
17237c478bd9Sstevel@tonic-gate 	mi = VTOMI(vp);
17247c478bd9Sstevel@tonic-gate 
17257c478bd9Sstevel@tonic-gate 	/*
17267c478bd9Sstevel@tonic-gate 	 * If we can't allocate a request structure, do the commit
17277c478bd9Sstevel@tonic-gate 	 * operation synchronously in this thread's context.
17287c478bd9Sstevel@tonic-gate 	 */
17297c478bd9Sstevel@tonic-gate 	if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL)
17307c478bd9Sstevel@tonic-gate 		goto noasync;
17317c478bd9Sstevel@tonic-gate 
17327c478bd9Sstevel@tonic-gate 	args->a_next = NULL;
17337c478bd9Sstevel@tonic-gate #ifdef DEBUG
17347c478bd9Sstevel@tonic-gate 	args->a_queuer = curthread;
17357c478bd9Sstevel@tonic-gate #endif
17367c478bd9Sstevel@tonic-gate 	VN_HOLD(vp);
17377c478bd9Sstevel@tonic-gate 	args->a_vp = vp;
17387c478bd9Sstevel@tonic-gate 	ASSERT(cr != NULL);
17397c478bd9Sstevel@tonic-gate 	crhold(cr);
17407c478bd9Sstevel@tonic-gate 	args->a_cred = cr;
17417c478bd9Sstevel@tonic-gate 	args->a_io = NFS_COMMIT;
17427c478bd9Sstevel@tonic-gate 	args->a_nfs_commit = commit;
17437c478bd9Sstevel@tonic-gate 	args->a_nfs_plist = plist;
17447c478bd9Sstevel@tonic-gate 	args->a_nfs_offset = offset;
17457c478bd9Sstevel@tonic-gate 	args->a_nfs_count = count;
17467c478bd9Sstevel@tonic-gate 
17477c478bd9Sstevel@tonic-gate 	mutex_enter(&mi->mi_async_lock);
17487c478bd9Sstevel@tonic-gate 
17497c478bd9Sstevel@tonic-gate 	/*
17507c478bd9Sstevel@tonic-gate 	 * If asyncio has been disabled, then make a synchronous request.
17517c478bd9Sstevel@tonic-gate 	 * This check is done a second time in case async io was diabled
17527c478bd9Sstevel@tonic-gate 	 * while this thread was blocked waiting for memory pressure to
17537c478bd9Sstevel@tonic-gate 	 * reduce or for the queue to drain.
17547c478bd9Sstevel@tonic-gate 	 */
17557c478bd9Sstevel@tonic-gate 	if (mi->mi_max_threads == 0) {
17567c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_async_lock);
17577c478bd9Sstevel@tonic-gate 		goto noasync;
17587c478bd9Sstevel@tonic-gate 	}
17597c478bd9Sstevel@tonic-gate 
17607c478bd9Sstevel@tonic-gate 	/*
17617c478bd9Sstevel@tonic-gate 	 * Link request structure into the async list and
17627c478bd9Sstevel@tonic-gate 	 * wakeup async thread to do the i/o.
17637c478bd9Sstevel@tonic-gate 	 */
17647c478bd9Sstevel@tonic-gate 	if (mi->mi_async_reqs[NFS_COMMIT] == NULL) {
17657c478bd9Sstevel@tonic-gate 		mi->mi_async_reqs[NFS_COMMIT] = args;
17667c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_COMMIT] = args;
17677c478bd9Sstevel@tonic-gate 	} else {
17687c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_COMMIT]->a_next = args;
17697c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_COMMIT] = args;
17707c478bd9Sstevel@tonic-gate 	}
17717c478bd9Sstevel@tonic-gate 
17727c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
17737c478bd9Sstevel@tonic-gate 	rp->r_count++;
17747c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
17757c478bd9Sstevel@tonic-gate 
17767c478bd9Sstevel@tonic-gate 	if (mi->mi_io_kstats) {
17777c478bd9Sstevel@tonic-gate 		mutex_enter(&mi->mi_lock);
17787c478bd9Sstevel@tonic-gate 		kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats));
17797c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_lock);
17807c478bd9Sstevel@tonic-gate 	}
17817c478bd9Sstevel@tonic-gate 
17827c478bd9Sstevel@tonic-gate 	mi->mi_async_req_count++;
17837c478bd9Sstevel@tonic-gate 	ASSERT(mi->mi_async_req_count != 0);
17847c478bd9Sstevel@tonic-gate 	cv_signal(&mi->mi_async_reqs_cv);
17857c478bd9Sstevel@tonic-gate 	mutex_exit(&mi->mi_async_lock);
17867c478bd9Sstevel@tonic-gate 	return;
17877c478bd9Sstevel@tonic-gate 
17887c478bd9Sstevel@tonic-gate noasync:
17897c478bd9Sstevel@tonic-gate 	if (args != NULL) {
17907c478bd9Sstevel@tonic-gate 		VN_RELE(vp);
17917c478bd9Sstevel@tonic-gate 		crfree(cr);
17927c478bd9Sstevel@tonic-gate 		kmem_free(args, sizeof (*args));
17937c478bd9Sstevel@tonic-gate 	}
17947c478bd9Sstevel@tonic-gate 
17957c478bd9Sstevel@tonic-gate 	if (curproc == proc_pageout || curproc == proc_fsflush ||
1796108322fbScarlsonj 	    nfs_zone() != mi->mi_zone) {
17977c478bd9Sstevel@tonic-gate 		while (plist != NULL) {
17987c478bd9Sstevel@tonic-gate 			pp = plist;
17997c478bd9Sstevel@tonic-gate 			page_sub(&plist, pp);
18007c478bd9Sstevel@tonic-gate 			pp->p_fsdata = C_COMMIT;
18017c478bd9Sstevel@tonic-gate 			page_unlock(pp);
18027c478bd9Sstevel@tonic-gate 		}
18037c478bd9Sstevel@tonic-gate 		return;
18047c478bd9Sstevel@tonic-gate 	}
18057c478bd9Sstevel@tonic-gate 	(*commit)(vp, plist, offset, count, cr);
18067c478bd9Sstevel@tonic-gate }
18077c478bd9Sstevel@tonic-gate 
18087c478bd9Sstevel@tonic-gate void
18097c478bd9Sstevel@tonic-gate nfs_async_inactive(vnode_t *vp, cred_t *cr,
1810*da6c28aaSamw     void (*inactive)(vnode_t *, cred_t *, caller_context_t *))
18117c478bd9Sstevel@tonic-gate {
18127c478bd9Sstevel@tonic-gate 	mntinfo_t *mi;
18137c478bd9Sstevel@tonic-gate 	struct nfs_async_reqs *args;
18147c478bd9Sstevel@tonic-gate 
18157c478bd9Sstevel@tonic-gate 	mi = VTOMI(vp);
18167c478bd9Sstevel@tonic-gate 
18177c478bd9Sstevel@tonic-gate 	args = kmem_alloc(sizeof (*args), KM_SLEEP);
18187c478bd9Sstevel@tonic-gate 	args->a_next = NULL;
18197c478bd9Sstevel@tonic-gate #ifdef DEBUG
18207c478bd9Sstevel@tonic-gate 	args->a_queuer = curthread;
18217c478bd9Sstevel@tonic-gate #endif
18227c478bd9Sstevel@tonic-gate 	args->a_vp = vp;
18237c478bd9Sstevel@tonic-gate 	ASSERT(cr != NULL);
18247c478bd9Sstevel@tonic-gate 	crhold(cr);
18257c478bd9Sstevel@tonic-gate 	args->a_cred = cr;
18267c478bd9Sstevel@tonic-gate 	args->a_io = NFS_INACTIVE;
18277c478bd9Sstevel@tonic-gate 	args->a_nfs_inactive = inactive;
18287c478bd9Sstevel@tonic-gate 
18297c478bd9Sstevel@tonic-gate 	/*
18307c478bd9Sstevel@tonic-gate 	 * Note that we don't check mi->mi_max_threads here, since we
18317c478bd9Sstevel@tonic-gate 	 * *need* to get rid of this vnode regardless of whether someone
18327c478bd9Sstevel@tonic-gate 	 * set nfs3_max_threads/nfs_max_threads to zero in /etc/system.
18337c478bd9Sstevel@tonic-gate 	 *
18347c478bd9Sstevel@tonic-gate 	 * The manager thread knows about this and is willing to create
1835*da6c28aaSamw 	 * at least one thread to accommodate us.
18367c478bd9Sstevel@tonic-gate 	 */
18377c478bd9Sstevel@tonic-gate 	mutex_enter(&mi->mi_async_lock);
18387c478bd9Sstevel@tonic-gate 	if (mi->mi_manager_thread == NULL) {
18397c478bd9Sstevel@tonic-gate 		rnode_t *rp = VTOR(vp);
18407c478bd9Sstevel@tonic-gate 
18417c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_async_lock);
18427c478bd9Sstevel@tonic-gate 		crfree(cr);	/* drop our reference */
18437c478bd9Sstevel@tonic-gate 		kmem_free(args, sizeof (*args));
18447c478bd9Sstevel@tonic-gate 		/*
18457c478bd9Sstevel@tonic-gate 		 * We can't do an over-the-wire call since we're in the wrong
18467c478bd9Sstevel@tonic-gate 		 * zone, so we need to clean up state as best we can and then
18477c478bd9Sstevel@tonic-gate 		 * throw away the vnode.
18487c478bd9Sstevel@tonic-gate 		 */
18497c478bd9Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
18507c478bd9Sstevel@tonic-gate 		if (rp->r_unldvp != NULL) {
18517c478bd9Sstevel@tonic-gate 			vnode_t *unldvp;
18527c478bd9Sstevel@tonic-gate 			char *unlname;
18537c478bd9Sstevel@tonic-gate 			cred_t *unlcred;
18547c478bd9Sstevel@tonic-gate 
18557c478bd9Sstevel@tonic-gate 			unldvp = rp->r_unldvp;
18567c478bd9Sstevel@tonic-gate 			rp->r_unldvp = NULL;
18577c478bd9Sstevel@tonic-gate 			unlname = rp->r_unlname;
18587c478bd9Sstevel@tonic-gate 			rp->r_unlname = NULL;
18597c478bd9Sstevel@tonic-gate 			unlcred = rp->r_unlcred;
18607c478bd9Sstevel@tonic-gate 			rp->r_unlcred = NULL;
18617c478bd9Sstevel@tonic-gate 			mutex_exit(&rp->r_statelock);
18627c478bd9Sstevel@tonic-gate 
18637c478bd9Sstevel@tonic-gate 			VN_RELE(unldvp);
18647c478bd9Sstevel@tonic-gate 			kmem_free(unlname, MAXNAMELEN);
18657c478bd9Sstevel@tonic-gate 			crfree(unlcred);
18667c478bd9Sstevel@tonic-gate 		} else {
18677c478bd9Sstevel@tonic-gate 			mutex_exit(&rp->r_statelock);
18687c478bd9Sstevel@tonic-gate 		}
18697c478bd9Sstevel@tonic-gate 		/*
18707c478bd9Sstevel@tonic-gate 		 * No need to explicitly throw away any cached pages.  The
18717c478bd9Sstevel@tonic-gate 		 * eventual rinactive() will attempt a synchronous
18727c478bd9Sstevel@tonic-gate 		 * VOP_PUTPAGE() which will immediately fail since the request
18737c478bd9Sstevel@tonic-gate 		 * is coming from the wrong zone, and then will proceed to call
18747c478bd9Sstevel@tonic-gate 		 * nfs_invalidate_pages() which will clean things up for us.
18757c478bd9Sstevel@tonic-gate 		 */
18767c478bd9Sstevel@tonic-gate 		rp_addfree(VTOR(vp), cr);
18777c478bd9Sstevel@tonic-gate 		return;
18787c478bd9Sstevel@tonic-gate 	}
18797c478bd9Sstevel@tonic-gate 
18807c478bd9Sstevel@tonic-gate 	if (mi->mi_async_reqs[NFS_INACTIVE] == NULL) {
18817c478bd9Sstevel@tonic-gate 		mi->mi_async_reqs[NFS_INACTIVE] = args;
18827c478bd9Sstevel@tonic-gate 	} else {
18837c478bd9Sstevel@tonic-gate 		mi->mi_async_tail[NFS_INACTIVE]->a_next = args;
18847c478bd9Sstevel@tonic-gate 	}
18857c478bd9Sstevel@tonic-gate 	mi->mi_async_tail[NFS_INACTIVE] = args;
18867c478bd9Sstevel@tonic-gate 	/*
18877c478bd9Sstevel@tonic-gate 	 * Don't increment r_count, since we're trying to get rid of the vnode.
18887c478bd9Sstevel@tonic-gate 	 */
18897c478bd9Sstevel@tonic-gate 
18907c478bd9Sstevel@tonic-gate 	mi->mi_async_req_count++;
18917c478bd9Sstevel@tonic-gate 	ASSERT(mi->mi_async_req_count != 0);
18927c478bd9Sstevel@tonic-gate 	cv_signal(&mi->mi_async_reqs_cv);
18937c478bd9Sstevel@tonic-gate 	mutex_exit(&mi->mi_async_lock);
18947c478bd9Sstevel@tonic-gate }
18957c478bd9Sstevel@tonic-gate 
18967c478bd9Sstevel@tonic-gate /*
18977c478bd9Sstevel@tonic-gate  * The async queues for each mounted file system are arranged as a
18987c478bd9Sstevel@tonic-gate  * set of queues, one for each async i/o type.  Requests are taken
18997c478bd9Sstevel@tonic-gate  * from the queues in a round-robin fashion.  A number of consecutive
19007c478bd9Sstevel@tonic-gate  * requests are taken from each queue before moving on to the next
19017c478bd9Sstevel@tonic-gate  * queue.  This functionality may allow the NFS Version 2 server to do
19027c478bd9Sstevel@tonic-gate  * write clustering, even if the client is mixing writes and reads
19037c478bd9Sstevel@tonic-gate  * because it will take multiple write requests from the queue
19047c478bd9Sstevel@tonic-gate  * before processing any of the other async i/o types.
19057c478bd9Sstevel@tonic-gate  *
19067c478bd9Sstevel@tonic-gate  * XXX The nfs_async_start thread is unsafe in the light of the present
19077c478bd9Sstevel@tonic-gate  * model defined by cpr to suspend the system. Specifically over the
19087c478bd9Sstevel@tonic-gate  * wire calls are cpr-unsafe. The thread should be reevaluated in
19097c478bd9Sstevel@tonic-gate  * case of future updates to the cpr model.
19107c478bd9Sstevel@tonic-gate  */
19117c478bd9Sstevel@tonic-gate static void
19127c478bd9Sstevel@tonic-gate nfs_async_start(struct vfs *vfsp)
19137c478bd9Sstevel@tonic-gate {
19147c478bd9Sstevel@tonic-gate 	struct nfs_async_reqs *args;
19157c478bd9Sstevel@tonic-gate 	mntinfo_t *mi = VFTOMI(vfsp);
19167c478bd9Sstevel@tonic-gate 	clock_t time_left = 1;
19177c478bd9Sstevel@tonic-gate 	callb_cpr_t cprinfo;
19187c478bd9Sstevel@tonic-gate 	int i;
19197c478bd9Sstevel@tonic-gate 
19207c478bd9Sstevel@tonic-gate 	/*
19217c478bd9Sstevel@tonic-gate 	 * Dynamic initialization of nfs_async_timeout to allow nfs to be
19227c478bd9Sstevel@tonic-gate 	 * built in an implementation independent manner.
19237c478bd9Sstevel@tonic-gate 	 */
19247c478bd9Sstevel@tonic-gate 	if (nfs_async_timeout == -1)
19257c478bd9Sstevel@tonic-gate 		nfs_async_timeout = NFS_ASYNC_TIMEOUT;
19267c478bd9Sstevel@tonic-gate 
19277c478bd9Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, &mi->mi_async_lock, callb_generic_cpr, "nas");
19287c478bd9Sstevel@tonic-gate 
19297c478bd9Sstevel@tonic-gate 	mutex_enter(&mi->mi_async_lock);
19307c478bd9Sstevel@tonic-gate 	for (;;) {
19317c478bd9Sstevel@tonic-gate 		/*
19327c478bd9Sstevel@tonic-gate 		 * Find the next queue containing an entry.  We start
19337c478bd9Sstevel@tonic-gate 		 * at the current queue pointer and then round robin
19347c478bd9Sstevel@tonic-gate 		 * through all of them until we either find a non-empty
19357c478bd9Sstevel@tonic-gate 		 * queue or have looked through all of them.
19367c478bd9Sstevel@tonic-gate 		 */
19377c478bd9Sstevel@tonic-gate 		for (i = 0; i < NFS_ASYNC_TYPES; i++) {
19387c478bd9Sstevel@tonic-gate 			args = *mi->mi_async_curr;
19397c478bd9Sstevel@tonic-gate 			if (args != NULL)
19407c478bd9Sstevel@tonic-gate 				break;
19417c478bd9Sstevel@tonic-gate 			mi->mi_async_curr++;
19427c478bd9Sstevel@tonic-gate 			if (mi->mi_async_curr ==
19437c478bd9Sstevel@tonic-gate 			    &mi->mi_async_reqs[NFS_ASYNC_TYPES])
19447c478bd9Sstevel@tonic-gate 				mi->mi_async_curr = &mi->mi_async_reqs[0];
19457c478bd9Sstevel@tonic-gate 		}
19467c478bd9Sstevel@tonic-gate 		/*
19477c478bd9Sstevel@tonic-gate 		 * If we didn't find a entry, then block until woken up
19487c478bd9Sstevel@tonic-gate 		 * again and then look through the queues again.
19497c478bd9Sstevel@tonic-gate 		 */
19507c478bd9Sstevel@tonic-gate 		if (args == NULL) {
19517c478bd9Sstevel@tonic-gate 			/*
19527c478bd9Sstevel@tonic-gate 			 * Exiting is considered to be safe for CPR as well
19537c478bd9Sstevel@tonic-gate 			 */
19547c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
19557c478bd9Sstevel@tonic-gate 
19567c478bd9Sstevel@tonic-gate 			/*
19577c478bd9Sstevel@tonic-gate 			 * Wakeup thread waiting to unmount the file
19587c478bd9Sstevel@tonic-gate 			 * system only if all async threads are inactive.
19597c478bd9Sstevel@tonic-gate 			 *
19607c478bd9Sstevel@tonic-gate 			 * If we've timed-out and there's nothing to do,
19617c478bd9Sstevel@tonic-gate 			 * then get rid of this thread.
19627c478bd9Sstevel@tonic-gate 			 */
19637c478bd9Sstevel@tonic-gate 			if (mi->mi_max_threads == 0 || time_left <= 0) {
19647c478bd9Sstevel@tonic-gate 				if (--mi->mi_threads == 0)
19657c478bd9Sstevel@tonic-gate 					cv_signal(&mi->mi_async_cv);
19667c478bd9Sstevel@tonic-gate 				CALLB_CPR_EXIT(&cprinfo);
19677c478bd9Sstevel@tonic-gate 				VFS_RELE(vfsp);	/* release thread's hold */
19687c478bd9Sstevel@tonic-gate 				zthread_exit();
19697c478bd9Sstevel@tonic-gate 				/* NOTREACHED */
19707c478bd9Sstevel@tonic-gate 			}
19717c478bd9Sstevel@tonic-gate 			time_left = cv_timedwait(&mi->mi_async_work_cv,
19727c478bd9Sstevel@tonic-gate 			    &mi->mi_async_lock, nfs_async_timeout + lbolt);
19737c478bd9Sstevel@tonic-gate 
19747c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock);
19757c478bd9Sstevel@tonic-gate 
19767c478bd9Sstevel@tonic-gate 			continue;
19777c478bd9Sstevel@tonic-gate 		}
19787c478bd9Sstevel@tonic-gate 		time_left = 1;
19797c478bd9Sstevel@tonic-gate 
19807c478bd9Sstevel@tonic-gate 		/*
19817c478bd9Sstevel@tonic-gate 		 * Remove the request from the async queue and then
19827c478bd9Sstevel@tonic-gate 		 * update the current async request queue pointer.  If
19837c478bd9Sstevel@tonic-gate 		 * the current queue is empty or we have removed enough
19847c478bd9Sstevel@tonic-gate 		 * consecutive entries from it, then reset the counter
19857c478bd9Sstevel@tonic-gate 		 * for this queue and then move the current pointer to
19867c478bd9Sstevel@tonic-gate 		 * the next queue.
19877c478bd9Sstevel@tonic-gate 		 */
19887c478bd9Sstevel@tonic-gate 		*mi->mi_async_curr = args->a_next;
19897c478bd9Sstevel@tonic-gate 		if (*mi->mi_async_curr == NULL ||
19907c478bd9Sstevel@tonic-gate 		    --mi->mi_async_clusters[args->a_io] == 0) {
19917c478bd9Sstevel@tonic-gate 			mi->mi_async_clusters[args->a_io] =
19927c478bd9Sstevel@tonic-gate 						mi->mi_async_init_clusters;
19937c478bd9Sstevel@tonic-gate 			mi->mi_async_curr++;
19947c478bd9Sstevel@tonic-gate 			if (mi->mi_async_curr ==
19957c478bd9Sstevel@tonic-gate 			    &mi->mi_async_reqs[NFS_ASYNC_TYPES])
19967c478bd9Sstevel@tonic-gate 				mi->mi_async_curr = &mi->mi_async_reqs[0];
19977c478bd9Sstevel@tonic-gate 		}
19987c478bd9Sstevel@tonic-gate 
19997c478bd9Sstevel@tonic-gate 		if (args->a_io != NFS_INACTIVE && mi->mi_io_kstats) {
20007c478bd9Sstevel@tonic-gate 			mutex_enter(&mi->mi_lock);
20017c478bd9Sstevel@tonic-gate 			kstat_waitq_exit(KSTAT_IO_PTR(mi->mi_io_kstats));
20027c478bd9Sstevel@tonic-gate 			mutex_exit(&mi->mi_lock);
20037c478bd9Sstevel@tonic-gate 		}
20047c478bd9Sstevel@tonic-gate 
20057c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_async_lock);
20067c478bd9Sstevel@tonic-gate 
20077c478bd9Sstevel@tonic-gate 		/*
20087c478bd9Sstevel@tonic-gate 		 * Obtain arguments from the async request structure.
20097c478bd9Sstevel@tonic-gate 		 */
20107c478bd9Sstevel@tonic-gate 		if (args->a_io == NFS_READ_AHEAD && mi->mi_max_threads > 0) {
20117c478bd9Sstevel@tonic-gate 			(*args->a_nfs_readahead)(args->a_vp, args->a_nfs_blkoff,
20127c478bd9Sstevel@tonic-gate 					args->a_nfs_addr, args->a_nfs_seg,
20137c478bd9Sstevel@tonic-gate 					args->a_cred);
20147c478bd9Sstevel@tonic-gate 		} else if (args->a_io == NFS_PUTAPAGE) {
20157c478bd9Sstevel@tonic-gate 			(void) (*args->a_nfs_putapage)(args->a_vp,
20167c478bd9Sstevel@tonic-gate 					args->a_nfs_pp, args->a_nfs_off,
20177c478bd9Sstevel@tonic-gate 					args->a_nfs_len, args->a_nfs_flags,
20187c478bd9Sstevel@tonic-gate 					args->a_cred);
20197c478bd9Sstevel@tonic-gate 		} else if (args->a_io == NFS_PAGEIO) {
20207c478bd9Sstevel@tonic-gate 			(void) (*args->a_nfs_pageio)(args->a_vp,
20217c478bd9Sstevel@tonic-gate 					args->a_nfs_pp, args->a_nfs_off,
20227c478bd9Sstevel@tonic-gate 					args->a_nfs_len, args->a_nfs_flags,
20237c478bd9Sstevel@tonic-gate 					args->a_cred);
20247c478bd9Sstevel@tonic-gate 		} else if (args->a_io == NFS_READDIR) {
20257c478bd9Sstevel@tonic-gate 			(void) ((*args->a_nfs_readdir)(args->a_vp,
20267c478bd9Sstevel@tonic-gate 					args->a_nfs_rdc, args->a_cred));
20277c478bd9Sstevel@tonic-gate 		} else if (args->a_io == NFS_COMMIT) {
20287c478bd9Sstevel@tonic-gate 			(*args->a_nfs_commit)(args->a_vp, args->a_nfs_plist,
20297c478bd9Sstevel@tonic-gate 					args->a_nfs_offset, args->a_nfs_count,
20307c478bd9Sstevel@tonic-gate 					args->a_cred);
20317c478bd9Sstevel@tonic-gate 		} else if (args->a_io == NFS_INACTIVE) {
2032*da6c28aaSamw 			(*args->a_nfs_inactive)(args->a_vp, args->a_cred, NULL);
20337c478bd9Sstevel@tonic-gate 		}
20347c478bd9Sstevel@tonic-gate 
20357c478bd9Sstevel@tonic-gate 		/*
20367c478bd9Sstevel@tonic-gate 		 * Now, release the vnode and free the credentials
20377c478bd9Sstevel@tonic-gate 		 * structure.
20387c478bd9Sstevel@tonic-gate 		 */
20397c478bd9Sstevel@tonic-gate 		free_async_args(args);
20407c478bd9Sstevel@tonic-gate 		/*
20417c478bd9Sstevel@tonic-gate 		 * Reacquire the mutex because it will be needed above.
20427c478bd9Sstevel@tonic-gate 		 */
20437c478bd9Sstevel@tonic-gate 		mutex_enter(&mi->mi_async_lock);
20447c478bd9Sstevel@tonic-gate 	}
20457c478bd9Sstevel@tonic-gate }
20467c478bd9Sstevel@tonic-gate 
20477c478bd9Sstevel@tonic-gate void
20487c478bd9Sstevel@tonic-gate nfs_async_stop(struct vfs *vfsp)
20497c478bd9Sstevel@tonic-gate {
20507c478bd9Sstevel@tonic-gate 	mntinfo_t *mi = VFTOMI(vfsp);
20517c478bd9Sstevel@tonic-gate 
20527c478bd9Sstevel@tonic-gate 	/*
20537c478bd9Sstevel@tonic-gate 	 * Wait for all outstanding async operations to complete and for the
20547c478bd9Sstevel@tonic-gate 	 * worker threads to exit.
20557c478bd9Sstevel@tonic-gate 	 */
20567c478bd9Sstevel@tonic-gate 	mutex_enter(&mi->mi_async_lock);
20577c478bd9Sstevel@tonic-gate 	mi->mi_max_threads = 0;
20587c478bd9Sstevel@tonic-gate 	cv_broadcast(&mi->mi_async_work_cv);
20597c478bd9Sstevel@tonic-gate 	while (mi->mi_threads != 0)
20607c478bd9Sstevel@tonic-gate 		cv_wait(&mi->mi_async_cv, &mi->mi_async_lock);
20617c478bd9Sstevel@tonic-gate 	mutex_exit(&mi->mi_async_lock);
20627c478bd9Sstevel@tonic-gate }
20637c478bd9Sstevel@tonic-gate 
20647c478bd9Sstevel@tonic-gate /*
20657c478bd9Sstevel@tonic-gate  * nfs_async_stop_sig:
20667c478bd9Sstevel@tonic-gate  * Wait for all outstanding putpage operation to complete. If a signal
20677c478bd9Sstevel@tonic-gate  * is deliver we will abort and return non-zero. If we can put all the
20687c478bd9Sstevel@tonic-gate  * pages we will return 0. This routine is called from nfs_unmount and
2069*da6c28aaSamw  * nfs3_unmount to make these operations interruptible.
20707c478bd9Sstevel@tonic-gate  */
20717c478bd9Sstevel@tonic-gate int
20727c478bd9Sstevel@tonic-gate nfs_async_stop_sig(struct vfs *vfsp)
20737c478bd9Sstevel@tonic-gate {
20747c478bd9Sstevel@tonic-gate 	mntinfo_t *mi = VFTOMI(vfsp);
20757c478bd9Sstevel@tonic-gate 	ushort_t omax;
20767c478bd9Sstevel@tonic-gate 	int rval;
20777c478bd9Sstevel@tonic-gate 
20787c478bd9Sstevel@tonic-gate 	/*
20797c478bd9Sstevel@tonic-gate 	 * Wait for all outstanding async operations to complete and for the
20807c478bd9Sstevel@tonic-gate 	 * worker threads to exit.
20817c478bd9Sstevel@tonic-gate 	 */
20827c478bd9Sstevel@tonic-gate 	mutex_enter(&mi->mi_async_lock);
20837c478bd9Sstevel@tonic-gate 	omax = mi->mi_max_threads;
20847c478bd9Sstevel@tonic-gate 	mi->mi_max_threads = 0;
20857c478bd9Sstevel@tonic-gate 	/*
20867c478bd9Sstevel@tonic-gate 	 * Tell all the worker threads to exit.
20877c478bd9Sstevel@tonic-gate 	 */
20887c478bd9Sstevel@tonic-gate 	cv_broadcast(&mi->mi_async_work_cv);
20897c478bd9Sstevel@tonic-gate 	while (mi->mi_threads != 0) {
20907c478bd9Sstevel@tonic-gate 		if (!cv_wait_sig(&mi->mi_async_cv, &mi->mi_async_lock))
20917c478bd9Sstevel@tonic-gate 			break;
20927c478bd9Sstevel@tonic-gate 	}
20937c478bd9Sstevel@tonic-gate 	rval = (mi->mi_threads != 0);	/* Interrupted */
20947c478bd9Sstevel@tonic-gate 	if (rval)
20957c478bd9Sstevel@tonic-gate 		mi->mi_max_threads = omax;
20967c478bd9Sstevel@tonic-gate 	mutex_exit(&mi->mi_async_lock);
20977c478bd9Sstevel@tonic-gate 
20987c478bd9Sstevel@tonic-gate 	return (rval);
20997c478bd9Sstevel@tonic-gate }
21007c478bd9Sstevel@tonic-gate 
21017c478bd9Sstevel@tonic-gate int
21027c478bd9Sstevel@tonic-gate writerp(rnode_t *rp, caddr_t base, int tcount, struct uio *uio, int pgcreated)
21037c478bd9Sstevel@tonic-gate {
21047c478bd9Sstevel@tonic-gate 	int pagecreate;
21057c478bd9Sstevel@tonic-gate 	int n;
21067c478bd9Sstevel@tonic-gate 	int saved_n;
21077c478bd9Sstevel@tonic-gate 	caddr_t saved_base;
21087c478bd9Sstevel@tonic-gate 	u_offset_t offset;
21097c478bd9Sstevel@tonic-gate 	int error;
21107c478bd9Sstevel@tonic-gate 	int sm_error;
2111a5652762Spraks 	vnode_t *vp = RTOV(rp);
21127c478bd9Sstevel@tonic-gate 
21137c478bd9Sstevel@tonic-gate 	ASSERT(tcount <= MAXBSIZE && tcount <= uio->uio_resid);
21147c478bd9Sstevel@tonic-gate 	ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_WRITER));
2115a5652762Spraks 	if (!vpm_enable) {
2116a5652762Spraks 		ASSERT(((uintptr_t)base & MAXBOFFSET) + tcount <= MAXBSIZE);
2117a5652762Spraks 	}
21187c478bd9Sstevel@tonic-gate 
21197c478bd9Sstevel@tonic-gate 	/*
21207c478bd9Sstevel@tonic-gate 	 * Move bytes in at most PAGESIZE chunks. We must avoid
21217c478bd9Sstevel@tonic-gate 	 * spanning pages in uiomove() because page faults may cause
21227c478bd9Sstevel@tonic-gate 	 * the cache to be invalidated out from under us. The r_size is not
21237c478bd9Sstevel@tonic-gate 	 * updated until after the uiomove. If we push the last page of a
21247c478bd9Sstevel@tonic-gate 	 * file before r_size is correct, we will lose the data written past
21257c478bd9Sstevel@tonic-gate 	 * the current (and invalid) r_size.
21267c478bd9Sstevel@tonic-gate 	 */
21277c478bd9Sstevel@tonic-gate 	do {
21287c478bd9Sstevel@tonic-gate 		offset = uio->uio_loffset;
21297c478bd9Sstevel@tonic-gate 		pagecreate = 0;
21307c478bd9Sstevel@tonic-gate 
21317c478bd9Sstevel@tonic-gate 		/*
21327c478bd9Sstevel@tonic-gate 		 * n is the number of bytes required to satisfy the request
21337c478bd9Sstevel@tonic-gate 		 *   or the number of bytes to fill out the page.
21347c478bd9Sstevel@tonic-gate 		 */
2135a5652762Spraks 		n = (int)MIN((PAGESIZE - (offset & PAGEOFFSET)), tcount);
21367c478bd9Sstevel@tonic-gate 
21377c478bd9Sstevel@tonic-gate 		/*
21387c478bd9Sstevel@tonic-gate 		 * Check to see if we can skip reading in the page
21397c478bd9Sstevel@tonic-gate 		 * and just allocate the memory.  We can do this
21407c478bd9Sstevel@tonic-gate 		 * if we are going to rewrite the entire mapping
21417c478bd9Sstevel@tonic-gate 		 * or if we are going to write to or beyond the current
21427c478bd9Sstevel@tonic-gate 		 * end of file from the beginning of the mapping.
21437c478bd9Sstevel@tonic-gate 		 *
21447c478bd9Sstevel@tonic-gate 		 * The read of r_size is now protected by r_statelock.
21457c478bd9Sstevel@tonic-gate 		 */
21467c478bd9Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
21477c478bd9Sstevel@tonic-gate 		/*
21487c478bd9Sstevel@tonic-gate 		 * When pgcreated is nonzero the caller has already done
21497c478bd9Sstevel@tonic-gate 		 * a segmap_getmapflt with forcefault 0 and S_WRITE. With
21507c478bd9Sstevel@tonic-gate 		 * segkpm this means we already have at least one page
21517c478bd9Sstevel@tonic-gate 		 * created and mapped at base.
21527c478bd9Sstevel@tonic-gate 		 */
21537c478bd9Sstevel@tonic-gate 		pagecreate = pgcreated ||
2154a5652762Spraks 			((offset & PAGEOFFSET) == 0 &&
21557c478bd9Sstevel@tonic-gate 			(n == PAGESIZE || ((offset + n) >= rp->r_size)));
21567c478bd9Sstevel@tonic-gate 
21577c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
2158a5652762Spraks 		if (!vpm_enable && pagecreate) {
21597c478bd9Sstevel@tonic-gate 			/*
21607c478bd9Sstevel@tonic-gate 			 * The last argument tells segmap_pagecreate() to
21617c478bd9Sstevel@tonic-gate 			 * always lock the page, as opposed to sometimes
21627c478bd9Sstevel@tonic-gate 			 * returning with the page locked. This way we avoid a
21637c478bd9Sstevel@tonic-gate 			 * fault on the ensuing uiomove(), but also
21647c478bd9Sstevel@tonic-gate 			 * more importantly (to fix bug 1094402) we can
21657c478bd9Sstevel@tonic-gate 			 * call segmap_fault() to unlock the page in all
21667c478bd9Sstevel@tonic-gate 			 * cases. An alternative would be to modify
21677c478bd9Sstevel@tonic-gate 			 * segmap_pagecreate() to tell us when it is
21687c478bd9Sstevel@tonic-gate 			 * locking a page, but that's a fairly major
21697c478bd9Sstevel@tonic-gate 			 * interface change.
21707c478bd9Sstevel@tonic-gate 			 */
21717c478bd9Sstevel@tonic-gate 			if (pgcreated == 0)
21727c478bd9Sstevel@tonic-gate 				(void) segmap_pagecreate(segkmap, base,
21737c478bd9Sstevel@tonic-gate 							(uint_t)n, 1);
21747c478bd9Sstevel@tonic-gate 			saved_base = base;
21757c478bd9Sstevel@tonic-gate 			saved_n = n;
21767c478bd9Sstevel@tonic-gate 		}
21777c478bd9Sstevel@tonic-gate 
21787c478bd9Sstevel@tonic-gate 		/*
21797c478bd9Sstevel@tonic-gate 		 * The number of bytes of data in the last page can not
21807c478bd9Sstevel@tonic-gate 		 * be accurately be determined while page is being
21817c478bd9Sstevel@tonic-gate 		 * uiomove'd to and the size of the file being updated.
21827c478bd9Sstevel@tonic-gate 		 * Thus, inform threads which need to know accurately
21837c478bd9Sstevel@tonic-gate 		 * how much data is in the last page of the file.  They
21847c478bd9Sstevel@tonic-gate 		 * will not do the i/o immediately, but will arrange for
21857c478bd9Sstevel@tonic-gate 		 * the i/o to happen later when this modify operation
21867c478bd9Sstevel@tonic-gate 		 * will have finished.
21877c478bd9Sstevel@tonic-gate 		 */
21887c478bd9Sstevel@tonic-gate 		ASSERT(!(rp->r_flags & RMODINPROGRESS));
21897c478bd9Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
21907c478bd9Sstevel@tonic-gate 		rp->r_flags |= RMODINPROGRESS;
21917c478bd9Sstevel@tonic-gate 		rp->r_modaddr = (offset & MAXBMASK);
21927c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
21937c478bd9Sstevel@tonic-gate 
2194a5652762Spraks 		if (vpm_enable) {
2195a5652762Spraks 			/*
2196a5652762Spraks 			 * Copy data. If new pages are created, part of
2197a5652762Spraks 			 * the page that is not written will be initizliazed
2198a5652762Spraks 			 * with zeros.
2199a5652762Spraks 			 */
2200a5652762Spraks 			error = vpm_data_copy(vp, offset, n, uio,
2201a5652762Spraks 				!pagecreate, NULL, 0, S_WRITE);
2202a5652762Spraks 		} else {
22037c478bd9Sstevel@tonic-gate 			error = uiomove(base, n, UIO_WRITE, uio);
2204a5652762Spraks 		}
22057c478bd9Sstevel@tonic-gate 
22067c478bd9Sstevel@tonic-gate 		/*
22077c478bd9Sstevel@tonic-gate 		 * r_size is the maximum number of
22087c478bd9Sstevel@tonic-gate 		 * bytes known to be in the file.
22097c478bd9Sstevel@tonic-gate 		 * Make sure it is at least as high as the
22107c478bd9Sstevel@tonic-gate 		 * first unwritten byte pointed to by uio_loffset.
22117c478bd9Sstevel@tonic-gate 		 */
22127c478bd9Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
22137c478bd9Sstevel@tonic-gate 		if (rp->r_size < uio->uio_loffset)
22147c478bd9Sstevel@tonic-gate 			rp->r_size = uio->uio_loffset;
22157c478bd9Sstevel@tonic-gate 		rp->r_flags &= ~RMODINPROGRESS;
22167c478bd9Sstevel@tonic-gate 		rp->r_flags |= RDIRTY;
22177c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
22187c478bd9Sstevel@tonic-gate 
22197c478bd9Sstevel@tonic-gate 		/* n = # of bytes written */
22207c478bd9Sstevel@tonic-gate 		n = (int)(uio->uio_loffset - offset);
2221a5652762Spraks 
2222a5652762Spraks 		if (!vpm_enable) {
22237c478bd9Sstevel@tonic-gate 			base += n;
2224a5652762Spraks 		}
22257c478bd9Sstevel@tonic-gate 		tcount -= n;
22267c478bd9Sstevel@tonic-gate 		/*
22277c478bd9Sstevel@tonic-gate 		 * If we created pages w/o initializing them completely,
22287c478bd9Sstevel@tonic-gate 		 * we need to zero the part that wasn't set up.
22297c478bd9Sstevel@tonic-gate 		 * This happens on a most EOF write cases and if
22307c478bd9Sstevel@tonic-gate 		 * we had some sort of error during the uiomove.
22317c478bd9Sstevel@tonic-gate 		 */
2232a5652762Spraks 		if (!vpm_enable && pagecreate) {
22337c478bd9Sstevel@tonic-gate 			if ((uio->uio_loffset & PAGEOFFSET) || n == 0)
22347c478bd9Sstevel@tonic-gate 				(void) kzero(base, PAGESIZE - n);
22357c478bd9Sstevel@tonic-gate 
22367c478bd9Sstevel@tonic-gate 			if (pgcreated) {
22377c478bd9Sstevel@tonic-gate 				/*
22387c478bd9Sstevel@tonic-gate 				 * Caller is responsible for this page,
22397c478bd9Sstevel@tonic-gate 				 * it was not created in this loop.
22407c478bd9Sstevel@tonic-gate 				 */
22417c478bd9Sstevel@tonic-gate 				pgcreated = 0;
22427c478bd9Sstevel@tonic-gate 			} else {
22437c478bd9Sstevel@tonic-gate 				/*
22447c478bd9Sstevel@tonic-gate 				 * For bug 1094402: segmap_pagecreate locks
22457c478bd9Sstevel@tonic-gate 				 * page. Unlock it. This also unlocks the
22467c478bd9Sstevel@tonic-gate 				 * pages allocated by page_create_va() in
22477c478bd9Sstevel@tonic-gate 				 * segmap_pagecreate().
22487c478bd9Sstevel@tonic-gate 				 */
22497c478bd9Sstevel@tonic-gate 				sm_error = segmap_fault(kas.a_hat, segkmap,
22507c478bd9Sstevel@tonic-gate 					saved_base, saved_n,
22517c478bd9Sstevel@tonic-gate 					F_SOFTUNLOCK, S_WRITE);
22527c478bd9Sstevel@tonic-gate 				if (error == 0)
22537c478bd9Sstevel@tonic-gate 					error = sm_error;
22547c478bd9Sstevel@tonic-gate 			}
22557c478bd9Sstevel@tonic-gate 		}
22567c478bd9Sstevel@tonic-gate 	} while (tcount > 0 && error == 0);
22577c478bd9Sstevel@tonic-gate 
22587c478bd9Sstevel@tonic-gate 	return (error);
22597c478bd9Sstevel@tonic-gate }
22607c478bd9Sstevel@tonic-gate 
22617c478bd9Sstevel@tonic-gate int
22627c478bd9Sstevel@tonic-gate nfs_putpages(vnode_t *vp, u_offset_t off, size_t len, int flags, cred_t *cr)
22637c478bd9Sstevel@tonic-gate {
22647c478bd9Sstevel@tonic-gate 	rnode_t *rp;
22657c478bd9Sstevel@tonic-gate 	page_t *pp;
22667c478bd9Sstevel@tonic-gate 	u_offset_t eoff;
22677c478bd9Sstevel@tonic-gate 	u_offset_t io_off;
22687c478bd9Sstevel@tonic-gate 	size_t io_len;
22697c478bd9Sstevel@tonic-gate 	int error;
22707c478bd9Sstevel@tonic-gate 	int rdirty;
22717c478bd9Sstevel@tonic-gate 	int err;
22727c478bd9Sstevel@tonic-gate 
22737c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
22747c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_count > 0);
22757c478bd9Sstevel@tonic-gate 
22767c478bd9Sstevel@tonic-gate 	if (!vn_has_cached_data(vp))
22777c478bd9Sstevel@tonic-gate 		return (0);
22787c478bd9Sstevel@tonic-gate 
22797c478bd9Sstevel@tonic-gate 	ASSERT(vp->v_type != VCHR);
22807c478bd9Sstevel@tonic-gate 
22817c478bd9Sstevel@tonic-gate 	/*
22827c478bd9Sstevel@tonic-gate 	 * If ROUTOFSPACE is set, then all writes turn into B_INVAL
22837c478bd9Sstevel@tonic-gate 	 * writes.  B_FORCE is set to force the VM system to actually
22847c478bd9Sstevel@tonic-gate 	 * invalidate the pages, even if the i/o failed.  The pages
22857c478bd9Sstevel@tonic-gate 	 * need to get invalidated because they can't be written out
22867c478bd9Sstevel@tonic-gate 	 * because there isn't any space left on either the server's
22877c478bd9Sstevel@tonic-gate 	 * file system or in the user's disk quota.  The B_FREE bit
22887c478bd9Sstevel@tonic-gate 	 * is cleared to avoid confusion as to whether this is a
22897c478bd9Sstevel@tonic-gate 	 * request to place the page on the freelist or to destroy
22907c478bd9Sstevel@tonic-gate 	 * it.
22917c478bd9Sstevel@tonic-gate 	 */
22927c478bd9Sstevel@tonic-gate 	if ((rp->r_flags & ROUTOFSPACE) ||
22937c478bd9Sstevel@tonic-gate 	    (vp->v_vfsp->vfs_flag & VFS_UNMOUNTED))
22947c478bd9Sstevel@tonic-gate 		flags = (flags & ~B_FREE) | B_INVAL | B_FORCE;
22957c478bd9Sstevel@tonic-gate 
22967c478bd9Sstevel@tonic-gate 	if (len == 0) {
22977c478bd9Sstevel@tonic-gate 		/*
22987c478bd9Sstevel@tonic-gate 		 * If doing a full file synchronous operation, then clear
22997c478bd9Sstevel@tonic-gate 		 * the RDIRTY bit.  If a page gets dirtied while the flush
23007c478bd9Sstevel@tonic-gate 		 * is happening, then RDIRTY will get set again.  The
23017c478bd9Sstevel@tonic-gate 		 * RDIRTY bit must get cleared before the flush so that
23027c478bd9Sstevel@tonic-gate 		 * we don't lose this information.
23035dae4443Sdh145677 		 *
23045dae4443Sdh145677 		 * If there are no full file async write operations
23055dae4443Sdh145677 		 * pending and RDIRTY bit is set, clear it.
23067c478bd9Sstevel@tonic-gate 		 */
23077c478bd9Sstevel@tonic-gate 		if (off == (u_offset_t)0 &&
23087c478bd9Sstevel@tonic-gate 		    !(flags & B_ASYNC) &&
23097c478bd9Sstevel@tonic-gate 		    (rp->r_flags & RDIRTY)) {
23107c478bd9Sstevel@tonic-gate 			mutex_enter(&rp->r_statelock);
23117c478bd9Sstevel@tonic-gate 			rdirty = (rp->r_flags & RDIRTY);
23127c478bd9Sstevel@tonic-gate 			rp->r_flags &= ~RDIRTY;
23137c478bd9Sstevel@tonic-gate 			mutex_exit(&rp->r_statelock);
23145dae4443Sdh145677 		} else if (flags & B_ASYNC && off == (u_offset_t)0) {
23155dae4443Sdh145677 			mutex_enter(&rp->r_statelock);
23165dae4443Sdh145677 			if (rp->r_flags & RDIRTY && rp->r_awcount == 0) {
23175dae4443Sdh145677 				rdirty = (rp->r_flags & RDIRTY);
23185dae4443Sdh145677 				rp->r_flags &= ~RDIRTY;
23195dae4443Sdh145677 			}
23205dae4443Sdh145677 			mutex_exit(&rp->r_statelock);
23217c478bd9Sstevel@tonic-gate 		} else
23227c478bd9Sstevel@tonic-gate 			rdirty = 0;
23237c478bd9Sstevel@tonic-gate 
23247c478bd9Sstevel@tonic-gate 		/*
23257c478bd9Sstevel@tonic-gate 		 * Search the entire vp list for pages >= off, and flush
23267c478bd9Sstevel@tonic-gate 		 * the dirty pages.
23277c478bd9Sstevel@tonic-gate 		 */
23287c478bd9Sstevel@tonic-gate 		error = pvn_vplist_dirty(vp, off, rp->r_putapage,
23297c478bd9Sstevel@tonic-gate 					flags, cr);
23307c478bd9Sstevel@tonic-gate 
23317c478bd9Sstevel@tonic-gate 		/*
2332*da6c28aaSamw 		 * If an error occurred and the file was marked as dirty
23337c478bd9Sstevel@tonic-gate 		 * before and we aren't forcibly invalidating pages, then
23347c478bd9Sstevel@tonic-gate 		 * reset the RDIRTY flag.
23357c478bd9Sstevel@tonic-gate 		 */
23367c478bd9Sstevel@tonic-gate 		if (error && rdirty &&
23377c478bd9Sstevel@tonic-gate 		    (flags & (B_INVAL | B_FORCE)) != (B_INVAL | B_FORCE)) {
23387c478bd9Sstevel@tonic-gate 			mutex_enter(&rp->r_statelock);
23397c478bd9Sstevel@tonic-gate 			rp->r_flags |= RDIRTY;
23407c478bd9Sstevel@tonic-gate 			mutex_exit(&rp->r_statelock);
23417c478bd9Sstevel@tonic-gate 		}
23427c478bd9Sstevel@tonic-gate 	} else {
23437c478bd9Sstevel@tonic-gate 		/*
23447c478bd9Sstevel@tonic-gate 		 * Do a range from [off...off + len) looking for pages
23457c478bd9Sstevel@tonic-gate 		 * to deal with.
23467c478bd9Sstevel@tonic-gate 		 */
23477c478bd9Sstevel@tonic-gate 		error = 0;
23487c478bd9Sstevel@tonic-gate #ifdef lint
23497c478bd9Sstevel@tonic-gate 		io_len = 0;
23507c478bd9Sstevel@tonic-gate #endif
23517c478bd9Sstevel@tonic-gate 		eoff = off + len;
23527c478bd9Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
23537c478bd9Sstevel@tonic-gate 		for (io_off = off; io_off < eoff && io_off < rp->r_size;
23547c478bd9Sstevel@tonic-gate 		    io_off += io_len) {
23557c478bd9Sstevel@tonic-gate 			mutex_exit(&rp->r_statelock);
23567c478bd9Sstevel@tonic-gate 			/*
23577c478bd9Sstevel@tonic-gate 			 * If we are not invalidating, synchronously
23587c478bd9Sstevel@tonic-gate 			 * freeing or writing pages use the routine
23597c478bd9Sstevel@tonic-gate 			 * page_lookup_nowait() to prevent reclaiming
23607c478bd9Sstevel@tonic-gate 			 * them from the free list.
23617c478bd9Sstevel@tonic-gate 			 */
23627c478bd9Sstevel@tonic-gate 			if ((flags & B_INVAL) || !(flags & B_ASYNC)) {
23637c478bd9Sstevel@tonic-gate 				pp = page_lookup(vp, io_off,
23647c478bd9Sstevel@tonic-gate 				    (flags & (B_INVAL | B_FREE)) ?
23657c478bd9Sstevel@tonic-gate 				    SE_EXCL : SE_SHARED);
23667c478bd9Sstevel@tonic-gate 			} else {
23677c478bd9Sstevel@tonic-gate 				pp = page_lookup_nowait(vp, io_off,
23687c478bd9Sstevel@tonic-gate 				    (flags & B_FREE) ? SE_EXCL : SE_SHARED);
23697c478bd9Sstevel@tonic-gate 			}
23707c478bd9Sstevel@tonic-gate 
23717c478bd9Sstevel@tonic-gate 			if (pp == NULL || !pvn_getdirty(pp, flags))
23727c478bd9Sstevel@tonic-gate 				io_len = PAGESIZE;
23737c478bd9Sstevel@tonic-gate 			else {
23747c478bd9Sstevel@tonic-gate 				err = (*rp->r_putapage)(vp, pp, &io_off,
23757c478bd9Sstevel@tonic-gate 				    &io_len, flags, cr);
23767c478bd9Sstevel@tonic-gate 				if (!error)
23777c478bd9Sstevel@tonic-gate 					error = err;
23787c478bd9Sstevel@tonic-gate 				/*
23797c478bd9Sstevel@tonic-gate 				 * "io_off" and "io_len" are returned as
23807c478bd9Sstevel@tonic-gate 				 * the range of pages we actually wrote.
23817c478bd9Sstevel@tonic-gate 				 * This allows us to skip ahead more quickly
23827c478bd9Sstevel@tonic-gate 				 * since several pages may've been dealt
23837c478bd9Sstevel@tonic-gate 				 * with by this iteration of the loop.
23847c478bd9Sstevel@tonic-gate 				 */
23857c478bd9Sstevel@tonic-gate 			}
23867c478bd9Sstevel@tonic-gate 			mutex_enter(&rp->r_statelock);
23877c478bd9Sstevel@tonic-gate 		}
23887c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
23897c478bd9Sstevel@tonic-gate 	}
23907c478bd9Sstevel@tonic-gate 
23917c478bd9Sstevel@tonic-gate 	return (error);
23927c478bd9Sstevel@tonic-gate }
23937c478bd9Sstevel@tonic-gate 
23947c478bd9Sstevel@tonic-gate void
23957c478bd9Sstevel@tonic-gate nfs_invalidate_pages(vnode_t *vp, u_offset_t off, cred_t *cr)
23967c478bd9Sstevel@tonic-gate {
23977c478bd9Sstevel@tonic-gate 	rnode_t *rp;
23987c478bd9Sstevel@tonic-gate 
23997c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
24007c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
24017c478bd9Sstevel@tonic-gate 	while (rp->r_flags & RTRUNCATE)
24027c478bd9Sstevel@tonic-gate 		cv_wait(&rp->r_cv, &rp->r_statelock);
24037c478bd9Sstevel@tonic-gate 	rp->r_flags |= RTRUNCATE;
24047c478bd9Sstevel@tonic-gate 	if (off == (u_offset_t)0) {
24057c478bd9Sstevel@tonic-gate 		rp->r_flags &= ~RDIRTY;
24067c478bd9Sstevel@tonic-gate 		if (!(rp->r_flags & RSTALE))
24077c478bd9Sstevel@tonic-gate 			rp->r_error = 0;
24087c478bd9Sstevel@tonic-gate 	}
24097c478bd9Sstevel@tonic-gate 	rp->r_truncaddr = off;
24107c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
24117c478bd9Sstevel@tonic-gate 	(void) pvn_vplist_dirty(vp, off, rp->r_putapage,
24127c478bd9Sstevel@tonic-gate 		B_INVAL | B_TRUNC, cr);
24137c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
24147c478bd9Sstevel@tonic-gate 	rp->r_flags &= ~RTRUNCATE;
24157c478bd9Sstevel@tonic-gate 	cv_broadcast(&rp->r_cv);
24167c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
24177c478bd9Sstevel@tonic-gate }
24187c478bd9Sstevel@tonic-gate 
24197c478bd9Sstevel@tonic-gate static int nfs_write_error_to_cons_only = 0;
24207c478bd9Sstevel@tonic-gate #define	MSG(x)	(nfs_write_error_to_cons_only ? (x) : (x) + 1)
24217c478bd9Sstevel@tonic-gate 
24227c478bd9Sstevel@tonic-gate /*
24237c478bd9Sstevel@tonic-gate  * Print a file handle
24247c478bd9Sstevel@tonic-gate  */
24257c478bd9Sstevel@tonic-gate void
24267c478bd9Sstevel@tonic-gate nfs_printfhandle(nfs_fhandle *fhp)
24277c478bd9Sstevel@tonic-gate {
24287c478bd9Sstevel@tonic-gate 	int *ip;
24297c478bd9Sstevel@tonic-gate 	char *buf;
24307c478bd9Sstevel@tonic-gate 	size_t bufsize;
24317c478bd9Sstevel@tonic-gate 	char *cp;
24327c478bd9Sstevel@tonic-gate 
24337c478bd9Sstevel@tonic-gate 	/*
24347c478bd9Sstevel@tonic-gate 	 * 13 == "(file handle:"
24357c478bd9Sstevel@tonic-gate 	 * maximum of NFS_FHANDLE / sizeof (*ip) elements in fh_buf times
24367c478bd9Sstevel@tonic-gate 	 *	1 == ' '
24377c478bd9Sstevel@tonic-gate 	 *	8 == maximum strlen of "%x"
24387c478bd9Sstevel@tonic-gate 	 * 3 == ")\n\0"
24397c478bd9Sstevel@tonic-gate 	 */
24407c478bd9Sstevel@tonic-gate 	bufsize = 13 + ((NFS_FHANDLE_LEN / sizeof (*ip)) * (1 + 8)) + 3;
24417c478bd9Sstevel@tonic-gate 	buf = kmem_alloc(bufsize, KM_NOSLEEP);
24427c478bd9Sstevel@tonic-gate 	if (buf == NULL)
24437c478bd9Sstevel@tonic-gate 		return;
24447c478bd9Sstevel@tonic-gate 
24457c478bd9Sstevel@tonic-gate 	cp = buf;
24467c478bd9Sstevel@tonic-gate 	(void) strcpy(cp, "(file handle:");
24477c478bd9Sstevel@tonic-gate 	while (*cp != '\0')
24487c478bd9Sstevel@tonic-gate 		cp++;
24497c478bd9Sstevel@tonic-gate 	for (ip = (int *)fhp->fh_buf;
24507c478bd9Sstevel@tonic-gate 	    ip < (int *)&fhp->fh_buf[fhp->fh_len];
24517c478bd9Sstevel@tonic-gate 	    ip++) {
24527c478bd9Sstevel@tonic-gate 		(void) sprintf(cp, " %x", *ip);
24537c478bd9Sstevel@tonic-gate 		while (*cp != '\0')
24547c478bd9Sstevel@tonic-gate 			cp++;
24557c478bd9Sstevel@tonic-gate 	}
24567c478bd9Sstevel@tonic-gate 	(void) strcpy(cp, ")\n");
24577c478bd9Sstevel@tonic-gate 
24587c478bd9Sstevel@tonic-gate 	zcmn_err(getzoneid(), CE_CONT, MSG("^%s"), buf);
24597c478bd9Sstevel@tonic-gate 
24607c478bd9Sstevel@tonic-gate 	kmem_free(buf, bufsize);
24617c478bd9Sstevel@tonic-gate }
24627c478bd9Sstevel@tonic-gate 
24637c478bd9Sstevel@tonic-gate /*
24647c478bd9Sstevel@tonic-gate  * Notify the system administrator that an NFS write error has
24657c478bd9Sstevel@tonic-gate  * occurred.
24667c478bd9Sstevel@tonic-gate  */
24677c478bd9Sstevel@tonic-gate 
24687c478bd9Sstevel@tonic-gate /* seconds between ENOSPC/EDQUOT messages */
24697c478bd9Sstevel@tonic-gate clock_t nfs_write_error_interval = 5;
24707c478bd9Sstevel@tonic-gate 
24717c478bd9Sstevel@tonic-gate void
24727c478bd9Sstevel@tonic-gate nfs_write_error(vnode_t *vp, int error, cred_t *cr)
24737c478bd9Sstevel@tonic-gate {
24747c478bd9Sstevel@tonic-gate 	mntinfo_t *mi;
24757c478bd9Sstevel@tonic-gate 
24767c478bd9Sstevel@tonic-gate 	mi = VTOMI(vp);
24777c478bd9Sstevel@tonic-gate 	/*
24787c478bd9Sstevel@tonic-gate 	 * In case of forced unmount or zone shutdown, do not print any
24797c478bd9Sstevel@tonic-gate 	 * messages since it can flood the console with error messages.
24807c478bd9Sstevel@tonic-gate 	 */
24817c478bd9Sstevel@tonic-gate 	if (FS_OR_ZONE_GONE(mi->mi_vfsp))
24827c478bd9Sstevel@tonic-gate 		return;
24837c478bd9Sstevel@tonic-gate 
24847c478bd9Sstevel@tonic-gate 	/*
24857c478bd9Sstevel@tonic-gate 	 * No use in flooding the console with ENOSPC
24867c478bd9Sstevel@tonic-gate 	 * messages from the same file system.
24877c478bd9Sstevel@tonic-gate 	 */
24887c478bd9Sstevel@tonic-gate 	if ((error != ENOSPC && error != EDQUOT) ||
24897c478bd9Sstevel@tonic-gate 	    lbolt - mi->mi_printftime > 0) {
24907c478bd9Sstevel@tonic-gate 		zoneid_t zoneid = mi->mi_zone->zone_id;
24917c478bd9Sstevel@tonic-gate 
24927c478bd9Sstevel@tonic-gate #ifdef DEBUG
24937c478bd9Sstevel@tonic-gate 		nfs_perror(error, "NFS%ld write error on host %s: %m.\n",
24947c478bd9Sstevel@tonic-gate 		    mi->mi_vers, VTOR(vp)->r_server->sv_hostname, NULL);
24957c478bd9Sstevel@tonic-gate #else
24967c478bd9Sstevel@tonic-gate 		nfs_perror(error, "NFS write error on host %s: %m.\n",
24977c478bd9Sstevel@tonic-gate 		    VTOR(vp)->r_server->sv_hostname, NULL);
24987c478bd9Sstevel@tonic-gate #endif
24997c478bd9Sstevel@tonic-gate 		if (error == ENOSPC || error == EDQUOT) {
25007c478bd9Sstevel@tonic-gate 			zcmn_err(zoneid, CE_CONT,
25017c478bd9Sstevel@tonic-gate 			    MSG("^File: userid=%d, groupid=%d\n"),
25027c478bd9Sstevel@tonic-gate 			    crgetuid(cr), crgetgid(cr));
25037c478bd9Sstevel@tonic-gate 			if (crgetuid(CRED()) != crgetuid(cr) ||
25047c478bd9Sstevel@tonic-gate 			    crgetgid(CRED()) != crgetgid(cr)) {
25057c478bd9Sstevel@tonic-gate 				zcmn_err(zoneid, CE_CONT,
25067c478bd9Sstevel@tonic-gate 				    MSG("^User: userid=%d, groupid=%d\n"),
25077c478bd9Sstevel@tonic-gate 				    crgetuid(CRED()), crgetgid(CRED()));
25087c478bd9Sstevel@tonic-gate 			}
25097c478bd9Sstevel@tonic-gate 			mi->mi_printftime = lbolt +
25107c478bd9Sstevel@tonic-gate 			    nfs_write_error_interval * hz;
25117c478bd9Sstevel@tonic-gate 		}
25127c478bd9Sstevel@tonic-gate 		nfs_printfhandle(&VTOR(vp)->r_fh);
25137c478bd9Sstevel@tonic-gate #ifdef DEBUG
25147c478bd9Sstevel@tonic-gate 		if (error == EACCES) {
25157c478bd9Sstevel@tonic-gate 			zcmn_err(zoneid, CE_CONT,
25167c478bd9Sstevel@tonic-gate 			    MSG("^nfs_bio: cred is%s kcred\n"),
25177c478bd9Sstevel@tonic-gate 			    cr == kcred ? "" : " not");
25187c478bd9Sstevel@tonic-gate 		}
25197c478bd9Sstevel@tonic-gate #endif
25207c478bd9Sstevel@tonic-gate 	}
25217c478bd9Sstevel@tonic-gate }
25227c478bd9Sstevel@tonic-gate 
25237c478bd9Sstevel@tonic-gate /* ARGSUSED */
25247c478bd9Sstevel@tonic-gate static void *
25257c478bd9Sstevel@tonic-gate nfs_mi_init(zoneid_t zoneid)
25267c478bd9Sstevel@tonic-gate {
25277c478bd9Sstevel@tonic-gate 	struct mi_globals *mig;
25287c478bd9Sstevel@tonic-gate 
25297c478bd9Sstevel@tonic-gate 	mig = kmem_alloc(sizeof (*mig), KM_SLEEP);
25307c478bd9Sstevel@tonic-gate 	mutex_init(&mig->mig_lock, NULL, MUTEX_DEFAULT, NULL);
25317c478bd9Sstevel@tonic-gate 	list_create(&mig->mig_list, sizeof (mntinfo_t),
25327c478bd9Sstevel@tonic-gate 	    offsetof(mntinfo_t, mi_zone_node));
25337c478bd9Sstevel@tonic-gate 	mig->mig_destructor_called = B_FALSE;
25347c478bd9Sstevel@tonic-gate 	return (mig);
25357c478bd9Sstevel@tonic-gate }
25367c478bd9Sstevel@tonic-gate 
25377c478bd9Sstevel@tonic-gate /*
25387c478bd9Sstevel@tonic-gate  * Callback routine to tell all NFS mounts in the zone to stop creating new
25397c478bd9Sstevel@tonic-gate  * threads.  Existing threads should exit.
25407c478bd9Sstevel@tonic-gate  */
25417c478bd9Sstevel@tonic-gate /* ARGSUSED */
25427c478bd9Sstevel@tonic-gate static void
25437c478bd9Sstevel@tonic-gate nfs_mi_shutdown(zoneid_t zoneid, void *data)
25447c478bd9Sstevel@tonic-gate {
25457c478bd9Sstevel@tonic-gate 	struct mi_globals *mig = data;
25467c478bd9Sstevel@tonic-gate 	mntinfo_t *mi;
25477c478bd9Sstevel@tonic-gate 
25487c478bd9Sstevel@tonic-gate 	ASSERT(mig != NULL);
25493fd6cc29Sthurlow again:
25507c478bd9Sstevel@tonic-gate 	mutex_enter(&mig->mig_lock);
25517c478bd9Sstevel@tonic-gate 	for (mi = list_head(&mig->mig_list); mi != NULL;
25527c478bd9Sstevel@tonic-gate 	    mi = list_next(&mig->mig_list, mi)) {
25533fd6cc29Sthurlow 
25543fd6cc29Sthurlow 		/*
25553fd6cc29Sthurlow 		 * If we've done the shutdown work for this FS, skip.
25563fd6cc29Sthurlow 		 * Once we go off the end of the list, we're done.
25573fd6cc29Sthurlow 		 */
25583fd6cc29Sthurlow 		if (mi->mi_flags & MI_DEAD)
25593fd6cc29Sthurlow 			continue;
25603fd6cc29Sthurlow 
25613fd6cc29Sthurlow 		/*
25623fd6cc29Sthurlow 		 * We will do work, so not done.  Get a hold on the FS.
25633fd6cc29Sthurlow 		 */
25643fd6cc29Sthurlow 		VFS_HOLD(mi->mi_vfsp);
25653fd6cc29Sthurlow 
25667c478bd9Sstevel@tonic-gate 		/*
25677c478bd9Sstevel@tonic-gate 		 * purge the DNLC for this filesystem
25687c478bd9Sstevel@tonic-gate 		 */
25697c478bd9Sstevel@tonic-gate 		(void) dnlc_purge_vfsp(mi->mi_vfsp, 0);
25707c478bd9Sstevel@tonic-gate 
25717c478bd9Sstevel@tonic-gate 		mutex_enter(&mi->mi_async_lock);
25727c478bd9Sstevel@tonic-gate 		/*
25737c478bd9Sstevel@tonic-gate 		 * Tell existing async worker threads to exit.
25747c478bd9Sstevel@tonic-gate 		 */
25757c478bd9Sstevel@tonic-gate 		mi->mi_max_threads = 0;
25767c478bd9Sstevel@tonic-gate 		cv_broadcast(&mi->mi_async_work_cv);
25777c478bd9Sstevel@tonic-gate 		/*
25787c478bd9Sstevel@tonic-gate 		 * Set MI_ASYNC_MGR_STOP so the async manager thread starts
25797c478bd9Sstevel@tonic-gate 		 * getting ready to exit when it's done with its current work.
25803fd6cc29Sthurlow 		 * Also set MI_DEAD to note we've acted on this FS.
25817c478bd9Sstevel@tonic-gate 		 */
25827c478bd9Sstevel@tonic-gate 		mutex_enter(&mi->mi_lock);
25833fd6cc29Sthurlow 		mi->mi_flags |= (MI_ASYNC_MGR_STOP|MI_DEAD);
25847c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_lock);
25857c478bd9Sstevel@tonic-gate 		/*
25867c478bd9Sstevel@tonic-gate 		 * Wake up the async manager thread.
25877c478bd9Sstevel@tonic-gate 		 */
25887c478bd9Sstevel@tonic-gate 		cv_broadcast(&mi->mi_async_reqs_cv);
25897c478bd9Sstevel@tonic-gate 		mutex_exit(&mi->mi_async_lock);
25903fd6cc29Sthurlow 
25913fd6cc29Sthurlow 		/*
25923fd6cc29Sthurlow 		 * Drop lock and release FS, which may change list, then repeat.
25933fd6cc29Sthurlow 		 * We're done when every mi has been done or the list is empty.
25943fd6cc29Sthurlow 		 */
25953fd6cc29Sthurlow 		mutex_exit(&mig->mig_lock);
25963fd6cc29Sthurlow 		VFS_RELE(mi->mi_vfsp);
25973fd6cc29Sthurlow 		goto again;
25987c478bd9Sstevel@tonic-gate 	}
25997c478bd9Sstevel@tonic-gate 	mutex_exit(&mig->mig_lock);
26007c478bd9Sstevel@tonic-gate }
26017c478bd9Sstevel@tonic-gate 
26027c478bd9Sstevel@tonic-gate static void
26037c478bd9Sstevel@tonic-gate nfs_mi_free_globals(struct mi_globals *mig)
26047c478bd9Sstevel@tonic-gate {
26057c478bd9Sstevel@tonic-gate 	list_destroy(&mig->mig_list);	/* makes sure the list is empty */
26067c478bd9Sstevel@tonic-gate 	mutex_destroy(&mig->mig_lock);
26077c478bd9Sstevel@tonic-gate 	kmem_free(mig, sizeof (*mig));
26087c478bd9Sstevel@tonic-gate 
26097c478bd9Sstevel@tonic-gate }
26107c478bd9Sstevel@tonic-gate 
26117c478bd9Sstevel@tonic-gate /* ARGSUSED */
26127c478bd9Sstevel@tonic-gate static void
26137c478bd9Sstevel@tonic-gate nfs_mi_destroy(zoneid_t zoneid, void *data)
26147c478bd9Sstevel@tonic-gate {
26157c478bd9Sstevel@tonic-gate 	struct mi_globals *mig = data;
26167c478bd9Sstevel@tonic-gate 
26177c478bd9Sstevel@tonic-gate 	ASSERT(mig != NULL);
26187c478bd9Sstevel@tonic-gate 	mutex_enter(&mig->mig_lock);
26197c478bd9Sstevel@tonic-gate 	if (list_head(&mig->mig_list) != NULL) {
26207c478bd9Sstevel@tonic-gate 		/* Still waiting for VFS_FREEVFS() */
26217c478bd9Sstevel@tonic-gate 		mig->mig_destructor_called = B_TRUE;
26227c478bd9Sstevel@tonic-gate 		mutex_exit(&mig->mig_lock);
26237c478bd9Sstevel@tonic-gate 		return;
26247c478bd9Sstevel@tonic-gate 	}
26257c478bd9Sstevel@tonic-gate 	nfs_mi_free_globals(mig);
26267c478bd9Sstevel@tonic-gate }
26277c478bd9Sstevel@tonic-gate 
26287c478bd9Sstevel@tonic-gate /*
26297c478bd9Sstevel@tonic-gate  * Add an NFS mount to the per-zone list of NFS mounts.
26307c478bd9Sstevel@tonic-gate  */
26317c478bd9Sstevel@tonic-gate void
26327c478bd9Sstevel@tonic-gate nfs_mi_zonelist_add(mntinfo_t *mi)
26337c478bd9Sstevel@tonic-gate {
26347c478bd9Sstevel@tonic-gate 	struct mi_globals *mig;
26357c478bd9Sstevel@tonic-gate 
26367c478bd9Sstevel@tonic-gate 	mig = zone_getspecific(mi_list_key, mi->mi_zone);
26377c478bd9Sstevel@tonic-gate 	mutex_enter(&mig->mig_lock);
26387c478bd9Sstevel@tonic-gate 	list_insert_head(&mig->mig_list, mi);
26397c478bd9Sstevel@tonic-gate 	mutex_exit(&mig->mig_lock);
26407c478bd9Sstevel@tonic-gate }
26417c478bd9Sstevel@tonic-gate 
26427c478bd9Sstevel@tonic-gate /*
26437c478bd9Sstevel@tonic-gate  * Remove an NFS mount from the per-zone list of NFS mounts.
26447c478bd9Sstevel@tonic-gate  */
26457c478bd9Sstevel@tonic-gate static void
26467c478bd9Sstevel@tonic-gate nfs_mi_zonelist_remove(mntinfo_t *mi)
26477c478bd9Sstevel@tonic-gate {
26487c478bd9Sstevel@tonic-gate 	struct mi_globals *mig;
26497c478bd9Sstevel@tonic-gate 
26507c478bd9Sstevel@tonic-gate 	mig = zone_getspecific(mi_list_key, mi->mi_zone);
26517c478bd9Sstevel@tonic-gate 	mutex_enter(&mig->mig_lock);
26527c478bd9Sstevel@tonic-gate 	list_remove(&mig->mig_list, mi);
26537c478bd9Sstevel@tonic-gate 	/*
26547c478bd9Sstevel@tonic-gate 	 * We can be called asynchronously by VFS_FREEVFS() after the zone
26557c478bd9Sstevel@tonic-gate 	 * shutdown/destroy callbacks have executed; if so, clean up the zone's
26567c478bd9Sstevel@tonic-gate 	 * mi globals.
26577c478bd9Sstevel@tonic-gate 	 */
26587c478bd9Sstevel@tonic-gate 	if (list_head(&mig->mig_list) == NULL &&
26597c478bd9Sstevel@tonic-gate 	    mig->mig_destructor_called == B_TRUE) {
26607c478bd9Sstevel@tonic-gate 		nfs_mi_free_globals(mig);
26617c478bd9Sstevel@tonic-gate 		return;
26627c478bd9Sstevel@tonic-gate 	}
26637c478bd9Sstevel@tonic-gate 	mutex_exit(&mig->mig_lock);
26647c478bd9Sstevel@tonic-gate }
26657c478bd9Sstevel@tonic-gate 
26667c478bd9Sstevel@tonic-gate /*
26677c478bd9Sstevel@tonic-gate  * NFS Client initialization routine.  This routine should only be called
26687c478bd9Sstevel@tonic-gate  * once.  It performs the following tasks:
26697c478bd9Sstevel@tonic-gate  *	- Initalize all global locks
26707c478bd9Sstevel@tonic-gate  * 	- Call sub-initialization routines (localize access to variables)
26717c478bd9Sstevel@tonic-gate  */
26727c478bd9Sstevel@tonic-gate int
26737c478bd9Sstevel@tonic-gate nfs_clntinit(void)
26747c478bd9Sstevel@tonic-gate {
26757c478bd9Sstevel@tonic-gate #ifdef DEBUG
26767c478bd9Sstevel@tonic-gate 	static boolean_t nfs_clntup = B_FALSE;
26777c478bd9Sstevel@tonic-gate #endif
26787c478bd9Sstevel@tonic-gate 	int error;
26797c478bd9Sstevel@tonic-gate 
26807c478bd9Sstevel@tonic-gate #ifdef DEBUG
26817c478bd9Sstevel@tonic-gate 	ASSERT(nfs_clntup == B_FALSE);
26827c478bd9Sstevel@tonic-gate #endif
26837c478bd9Sstevel@tonic-gate 
26847c478bd9Sstevel@tonic-gate 	error = nfs_subrinit();
26857c478bd9Sstevel@tonic-gate 	if (error)
26867c478bd9Sstevel@tonic-gate 		return (error);
26877c478bd9Sstevel@tonic-gate 
26887c478bd9Sstevel@tonic-gate 	error = nfs_vfsinit();
26897c478bd9Sstevel@tonic-gate 	if (error) {
26907c478bd9Sstevel@tonic-gate 		/*
26917c478bd9Sstevel@tonic-gate 		 * Cleanup nfs_subrinit() work
26927c478bd9Sstevel@tonic-gate 		 */
26937c478bd9Sstevel@tonic-gate 		nfs_subrfini();
26947c478bd9Sstevel@tonic-gate 		return (error);
26957c478bd9Sstevel@tonic-gate 	}
26967c478bd9Sstevel@tonic-gate 	zone_key_create(&mi_list_key, nfs_mi_init, nfs_mi_shutdown,
26977c478bd9Sstevel@tonic-gate 	    nfs_mi_destroy);
26987c478bd9Sstevel@tonic-gate 
26997c478bd9Sstevel@tonic-gate 	nfs4_clnt_init();
27007c478bd9Sstevel@tonic-gate 
27017c478bd9Sstevel@tonic-gate #ifdef DEBUG
27027c478bd9Sstevel@tonic-gate 	nfs_clntup = B_TRUE;
27037c478bd9Sstevel@tonic-gate #endif
27047c478bd9Sstevel@tonic-gate 
27057c478bd9Sstevel@tonic-gate 	return (0);
27067c478bd9Sstevel@tonic-gate }
27077c478bd9Sstevel@tonic-gate 
27087c478bd9Sstevel@tonic-gate /*
27097c478bd9Sstevel@tonic-gate  * This routine is only called if the NFS Client has been initialized but
27107c478bd9Sstevel@tonic-gate  * the module failed to be installed. This routine will cleanup the previously
27117c478bd9Sstevel@tonic-gate  * allocated/initialized work.
27127c478bd9Sstevel@tonic-gate  */
27137c478bd9Sstevel@tonic-gate void
27147c478bd9Sstevel@tonic-gate nfs_clntfini(void)
27157c478bd9Sstevel@tonic-gate {
27167c478bd9Sstevel@tonic-gate 	(void) zone_key_delete(mi_list_key);
27177c478bd9Sstevel@tonic-gate 	nfs_subrfini();
27187c478bd9Sstevel@tonic-gate 	nfs_vfsfini();
27197c478bd9Sstevel@tonic-gate 	nfs4_clnt_fini();
27207c478bd9Sstevel@tonic-gate }
27217c478bd9Sstevel@tonic-gate 
27227c478bd9Sstevel@tonic-gate /*
27237c478bd9Sstevel@tonic-gate  * nfs_lockrelease:
27247c478bd9Sstevel@tonic-gate  *
27257c478bd9Sstevel@tonic-gate  * Release any locks on the given vnode that are held by the current
27267c478bd9Sstevel@tonic-gate  * process.
27277c478bd9Sstevel@tonic-gate  */
27287c478bd9Sstevel@tonic-gate void
27297c478bd9Sstevel@tonic-gate nfs_lockrelease(vnode_t *vp, int flag, offset_t offset, cred_t *cr)
27307c478bd9Sstevel@tonic-gate {
27317c478bd9Sstevel@tonic-gate 	flock64_t ld;
27327c478bd9Sstevel@tonic-gate 	struct shrlock shr;
27337c478bd9Sstevel@tonic-gate 	char *buf;
27347c478bd9Sstevel@tonic-gate 	int remote_lock_possible;
27357c478bd9Sstevel@tonic-gate 	int ret;
27367c478bd9Sstevel@tonic-gate 
27377c478bd9Sstevel@tonic-gate 	ASSERT((uintptr_t)vp > KERNELBASE);
27387c478bd9Sstevel@tonic-gate 
27397c478bd9Sstevel@tonic-gate 	/*
27407c478bd9Sstevel@tonic-gate 	 * Generate an explicit unlock operation for the entire file.  As a
27417c478bd9Sstevel@tonic-gate 	 * partial optimization, only generate the unlock if there is a
27427c478bd9Sstevel@tonic-gate 	 * lock registered for the file.  We could check whether this
27437c478bd9Sstevel@tonic-gate 	 * particular process has any locks on the file, but that would
27447c478bd9Sstevel@tonic-gate 	 * require the local locking code to provide yet another query
27457c478bd9Sstevel@tonic-gate 	 * routine.  Note that no explicit synchronization is needed here.
27467c478bd9Sstevel@tonic-gate 	 * At worst, flk_has_remote_locks() will return a false positive,
27477c478bd9Sstevel@tonic-gate 	 * in which case the unlock call wastes time but doesn't harm
27487c478bd9Sstevel@tonic-gate 	 * correctness.
27497c478bd9Sstevel@tonic-gate 	 *
27507c478bd9Sstevel@tonic-gate 	 * In addition, an unlock request is generated if the process
27517c478bd9Sstevel@tonic-gate 	 * is listed as possibly having a lock on the file because the
27527c478bd9Sstevel@tonic-gate 	 * server and client lock managers may have gotten out of sync.
27537c478bd9Sstevel@tonic-gate 	 * N.B. It is important to make sure nfs_remove_locking_id() is
27547c478bd9Sstevel@tonic-gate 	 * called here even if flk_has_remote_locks(vp) reports true.
27557c478bd9Sstevel@tonic-gate 	 * If it is not called and there is an entry on the process id
27567c478bd9Sstevel@tonic-gate 	 * list, that entry will never get removed.
27577c478bd9Sstevel@tonic-gate 	 */
27587c478bd9Sstevel@tonic-gate 	remote_lock_possible = nfs_remove_locking_id(vp, RLMPL_PID,
27597c478bd9Sstevel@tonic-gate 	    (char *)&(ttoproc(curthread)->p_pid), NULL, NULL);
27607c478bd9Sstevel@tonic-gate 	if (remote_lock_possible || flk_has_remote_locks(vp)) {
27617c478bd9Sstevel@tonic-gate 		ld.l_type = F_UNLCK;	/* set to unlock entire file */
27627c478bd9Sstevel@tonic-gate 		ld.l_whence = 0;	/* unlock from start of file */
27637c478bd9Sstevel@tonic-gate 		ld.l_start = 0;
27647c478bd9Sstevel@tonic-gate 		ld.l_len = 0;		/* do entire file */
2765*da6c28aaSamw 		ret = VOP_FRLOCK(vp, F_SETLK, &ld, flag, offset, NULL, cr,
2766*da6c28aaSamw 			NULL);
27677c478bd9Sstevel@tonic-gate 
27687c478bd9Sstevel@tonic-gate 		if (ret != 0) {
27697c478bd9Sstevel@tonic-gate 			/*
27707c478bd9Sstevel@tonic-gate 			 * If VOP_FRLOCK fails, make sure we unregister
27717c478bd9Sstevel@tonic-gate 			 * local locks before we continue.
27727c478bd9Sstevel@tonic-gate 			 */
27737c478bd9Sstevel@tonic-gate 			ld.l_pid = ttoproc(curthread)->p_pid;
27747c478bd9Sstevel@tonic-gate 			lm_register_lock_locally(vp, NULL, &ld, flag, offset);
27757c478bd9Sstevel@tonic-gate #ifdef DEBUG
27767c478bd9Sstevel@tonic-gate 			nfs_perror(ret,
27777c478bd9Sstevel@tonic-gate 			    "NFS lock release error on vp %p: %m.\n",
27787c478bd9Sstevel@tonic-gate 			    (void *)vp, NULL);
27797c478bd9Sstevel@tonic-gate #endif
27807c478bd9Sstevel@tonic-gate 		}
27817c478bd9Sstevel@tonic-gate 
27827c478bd9Sstevel@tonic-gate 		/*
27837c478bd9Sstevel@tonic-gate 		 * The call to VOP_FRLOCK may put the pid back on the
27847c478bd9Sstevel@tonic-gate 		 * list.  We need to remove it.
27857c478bd9Sstevel@tonic-gate 		 */
27867c478bd9Sstevel@tonic-gate 		(void) nfs_remove_locking_id(vp, RLMPL_PID,
27877c478bd9Sstevel@tonic-gate 		    (char *)&(ttoproc(curthread)->p_pid), NULL, NULL);
27887c478bd9Sstevel@tonic-gate 	}
27897c478bd9Sstevel@tonic-gate 
27907c478bd9Sstevel@tonic-gate 	/*
27917c478bd9Sstevel@tonic-gate 	 * As long as the vp has a share matching our pid,
27927c478bd9Sstevel@tonic-gate 	 * pluck it off and unshare it.  There are circumstances in
27937c478bd9Sstevel@tonic-gate 	 * which the call to nfs_remove_locking_id() may put the
27947c478bd9Sstevel@tonic-gate 	 * owner back on the list, in which case we simply do a
27957c478bd9Sstevel@tonic-gate 	 * redundant and harmless unshare.
27967c478bd9Sstevel@tonic-gate 	 */
27977c478bd9Sstevel@tonic-gate 	buf = kmem_alloc(MAX_SHR_OWNER_LEN, KM_SLEEP);
27987c478bd9Sstevel@tonic-gate 	while (nfs_remove_locking_id(vp, RLMPL_OWNER,
27997c478bd9Sstevel@tonic-gate 	    (char *)NULL, buf, &shr.s_own_len)) {
28007c478bd9Sstevel@tonic-gate 		shr.s_owner = buf;
28017c478bd9Sstevel@tonic-gate 		shr.s_access = 0;
28027c478bd9Sstevel@tonic-gate 		shr.s_deny = 0;
28037c478bd9Sstevel@tonic-gate 		shr.s_sysid = 0;
28047c478bd9Sstevel@tonic-gate 		shr.s_pid = curproc->p_pid;
28057c478bd9Sstevel@tonic-gate 
2806*da6c28aaSamw 		ret = VOP_SHRLOCK(vp, F_UNSHARE, &shr, flag, cr, NULL);
28077c478bd9Sstevel@tonic-gate #ifdef DEBUG
28087c478bd9Sstevel@tonic-gate 		if (ret != 0) {
28097c478bd9Sstevel@tonic-gate 			nfs_perror(ret,
28107c478bd9Sstevel@tonic-gate 			    "NFS share release error on vp %p: %m.\n",
28117c478bd9Sstevel@tonic-gate 			    (void *)vp, NULL);
28127c478bd9Sstevel@tonic-gate 		}
28137c478bd9Sstevel@tonic-gate #endif
28147c478bd9Sstevel@tonic-gate 	}
28157c478bd9Sstevel@tonic-gate 	kmem_free(buf, MAX_SHR_OWNER_LEN);
28167c478bd9Sstevel@tonic-gate }
28177c478bd9Sstevel@tonic-gate 
28187c478bd9Sstevel@tonic-gate /*
28197c478bd9Sstevel@tonic-gate  * nfs_lockcompletion:
28207c478bd9Sstevel@tonic-gate  *
28217c478bd9Sstevel@tonic-gate  * If the vnode has a lock that makes it unsafe to cache the file, mark it
28227c478bd9Sstevel@tonic-gate  * as non cachable (set VNOCACHE bit).
28237c478bd9Sstevel@tonic-gate  */
28247c478bd9Sstevel@tonic-gate 
28257c478bd9Sstevel@tonic-gate void
28267c478bd9Sstevel@tonic-gate nfs_lockcompletion(vnode_t *vp, int cmd)
28277c478bd9Sstevel@tonic-gate {
28287c478bd9Sstevel@tonic-gate #ifdef DEBUG
28297c478bd9Sstevel@tonic-gate 	rnode_t *rp = VTOR(vp);
28307c478bd9Sstevel@tonic-gate 
28317c478bd9Sstevel@tonic-gate 	ASSERT(nfs_rw_lock_held(&rp->r_lkserlock, RW_WRITER));
28327c478bd9Sstevel@tonic-gate #endif
28337c478bd9Sstevel@tonic-gate 
28347c478bd9Sstevel@tonic-gate 	if (cmd == F_SETLK || cmd == F_SETLKW) {
28357c478bd9Sstevel@tonic-gate 		if (!lm_safemap(vp)) {
28367c478bd9Sstevel@tonic-gate 			mutex_enter(&vp->v_lock);
28377c478bd9Sstevel@tonic-gate 			vp->v_flag |= VNOCACHE;
28387c478bd9Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
28397c478bd9Sstevel@tonic-gate 		} else {
28407c478bd9Sstevel@tonic-gate 			mutex_enter(&vp->v_lock);
28417c478bd9Sstevel@tonic-gate 			vp->v_flag &= ~VNOCACHE;
28427c478bd9Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
28437c478bd9Sstevel@tonic-gate 		}
28447c478bd9Sstevel@tonic-gate 	}
28457c478bd9Sstevel@tonic-gate 	/*
28467c478bd9Sstevel@tonic-gate 	 * The cached attributes of the file are stale after acquiring
28477c478bd9Sstevel@tonic-gate 	 * the lock on the file. They were updated when the file was
28487c478bd9Sstevel@tonic-gate 	 * opened, but not updated when the lock was acquired. Therefore the
28497c478bd9Sstevel@tonic-gate 	 * cached attributes are invalidated after the lock is obtained.
28507c478bd9Sstevel@tonic-gate 	 */
28517c478bd9Sstevel@tonic-gate 	PURGE_ATTRCACHE(vp);
28527c478bd9Sstevel@tonic-gate }
28537c478bd9Sstevel@tonic-gate 
28547c478bd9Sstevel@tonic-gate /*
28557c478bd9Sstevel@tonic-gate  * The lock manager holds state making it possible for the client
28567c478bd9Sstevel@tonic-gate  * and server to be out of sync.  For example, if the response from
28577c478bd9Sstevel@tonic-gate  * the server granting a lock request is lost, the server will think
28587c478bd9Sstevel@tonic-gate  * the lock is granted and the client will think the lock is lost.
28597c478bd9Sstevel@tonic-gate  * The client can tell when it is not positive if it is in sync with
28607c478bd9Sstevel@tonic-gate  * the server.
28617c478bd9Sstevel@tonic-gate  *
28627c478bd9Sstevel@tonic-gate  * To deal with this, a list of processes for which the client is
28637c478bd9Sstevel@tonic-gate  * not sure if the server holds a lock is attached to the rnode.
28647c478bd9Sstevel@tonic-gate  * When such a process closes the rnode, an unlock request is sent
28657c478bd9Sstevel@tonic-gate  * to the server to unlock the entire file.
28667c478bd9Sstevel@tonic-gate  *
28677c478bd9Sstevel@tonic-gate  * The list is kept as a singularly linked NULL terminated list.
28687c478bd9Sstevel@tonic-gate  * Because it is only added to under extreme error conditions, the
28697c478bd9Sstevel@tonic-gate  * list shouldn't get very big.  DEBUG kernels print a message if
28707c478bd9Sstevel@tonic-gate  * the list gets bigger than nfs_lmpl_high_water.  This is arbitrarily
28717c478bd9Sstevel@tonic-gate  * choosen to be 8, but can be tuned at runtime.
28727c478bd9Sstevel@tonic-gate  */
28737c478bd9Sstevel@tonic-gate #ifdef DEBUG
28747c478bd9Sstevel@tonic-gate /* int nfs_lmpl_high_water = 8; */
28757c478bd9Sstevel@tonic-gate int nfs_lmpl_high_water = 128;
28767c478bd9Sstevel@tonic-gate int nfs_cnt_add_locking_id = 0;
28777c478bd9Sstevel@tonic-gate int nfs_len_add_locking_id = 0;
28787c478bd9Sstevel@tonic-gate #endif /* DEBUG */
28797c478bd9Sstevel@tonic-gate 
28807c478bd9Sstevel@tonic-gate /*
28817c478bd9Sstevel@tonic-gate  * Record that the nfs lock manager server may be holding a lock on
28827c478bd9Sstevel@tonic-gate  * a vnode for a process.
28837c478bd9Sstevel@tonic-gate  *
28847c478bd9Sstevel@tonic-gate  * Because the nfs lock manager server holds state, it is possible
28857c478bd9Sstevel@tonic-gate  * for the server to get out of sync with the client.  This routine is called
28867c478bd9Sstevel@tonic-gate  * from the client when it is no longer sure if the server is in sync
28877c478bd9Sstevel@tonic-gate  * with the client.  nfs_lockrelease() will then notice this and send
28887c478bd9Sstevel@tonic-gate  * an unlock request when the file is closed
28897c478bd9Sstevel@tonic-gate  */
28907c478bd9Sstevel@tonic-gate void
28917c478bd9Sstevel@tonic-gate nfs_add_locking_id(vnode_t *vp, pid_t pid, int type, char *id, int len)
28927c478bd9Sstevel@tonic-gate {
28937c478bd9Sstevel@tonic-gate 	rnode_t *rp;
28947c478bd9Sstevel@tonic-gate 	lmpl_t *new;
28957c478bd9Sstevel@tonic-gate 	lmpl_t *cur;
28967c478bd9Sstevel@tonic-gate 	lmpl_t **lmplp;
28977c478bd9Sstevel@tonic-gate #ifdef DEBUG
28987c478bd9Sstevel@tonic-gate 	int list_len = 1;
28997c478bd9Sstevel@tonic-gate #endif /* DEBUG */
29007c478bd9Sstevel@tonic-gate 
29017c478bd9Sstevel@tonic-gate #ifdef DEBUG
29027c478bd9Sstevel@tonic-gate 	++nfs_cnt_add_locking_id;
29037c478bd9Sstevel@tonic-gate #endif /* DEBUG */
29047c478bd9Sstevel@tonic-gate 	/*
29057c478bd9Sstevel@tonic-gate 	 * allocate new lmpl_t now so we don't sleep
29067c478bd9Sstevel@tonic-gate 	 * later after grabbing mutexes
29077c478bd9Sstevel@tonic-gate 	 */
29087c478bd9Sstevel@tonic-gate 	ASSERT(len < MAX_SHR_OWNER_LEN);
29097c478bd9Sstevel@tonic-gate 	new = kmem_alloc(sizeof (*new), KM_SLEEP);
29107c478bd9Sstevel@tonic-gate 	new->lmpl_type = type;
29117c478bd9Sstevel@tonic-gate 	new->lmpl_pid = pid;
29127c478bd9Sstevel@tonic-gate 	new->lmpl_owner = kmem_alloc(len, KM_SLEEP);
29137c478bd9Sstevel@tonic-gate 	bcopy(id, new->lmpl_owner, len);
29147c478bd9Sstevel@tonic-gate 	new->lmpl_own_len = len;
29157c478bd9Sstevel@tonic-gate 	new->lmpl_next = (lmpl_t *)NULL;
29167c478bd9Sstevel@tonic-gate #ifdef DEBUG
29177c478bd9Sstevel@tonic-gate 	if (type == RLMPL_PID) {
29187c478bd9Sstevel@tonic-gate 		ASSERT(len == sizeof (pid_t));
29197c478bd9Sstevel@tonic-gate 		ASSERT(pid == *(pid_t *)new->lmpl_owner);
29207c478bd9Sstevel@tonic-gate 	} else {
29217c478bd9Sstevel@tonic-gate 		ASSERT(type == RLMPL_OWNER);
29227c478bd9Sstevel@tonic-gate 	}
29237c478bd9Sstevel@tonic-gate #endif
29247c478bd9Sstevel@tonic-gate 
29257c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
29267c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
29277c478bd9Sstevel@tonic-gate 
29287c478bd9Sstevel@tonic-gate 	/*
29297c478bd9Sstevel@tonic-gate 	 * Add this id to the list for this rnode only if the
29307c478bd9Sstevel@tonic-gate 	 * rnode is active and the id is not already there.
29317c478bd9Sstevel@tonic-gate 	 */
29327c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_flags & RHASHED);
29337c478bd9Sstevel@tonic-gate 	lmplp = &(rp->r_lmpl);
29347c478bd9Sstevel@tonic-gate 	for (cur = rp->r_lmpl; cur != (lmpl_t *)NULL; cur = cur->lmpl_next) {
29357c478bd9Sstevel@tonic-gate 		if (cur->lmpl_pid == pid &&
29367c478bd9Sstevel@tonic-gate 		    cur->lmpl_type == type &&
29377c478bd9Sstevel@tonic-gate 		    cur->lmpl_own_len == len &&
29387c478bd9Sstevel@tonic-gate 		    bcmp(cur->lmpl_owner, new->lmpl_owner, len) == 0) {
29397c478bd9Sstevel@tonic-gate 			kmem_free(new->lmpl_owner, len);
29407c478bd9Sstevel@tonic-gate 			kmem_free(new, sizeof (*new));
29417c478bd9Sstevel@tonic-gate 			break;
29427c478bd9Sstevel@tonic-gate 		}
29437c478bd9Sstevel@tonic-gate 		lmplp = &cur->lmpl_next;
29447c478bd9Sstevel@tonic-gate #ifdef DEBUG
29457c478bd9Sstevel@tonic-gate 		++list_len;
29467c478bd9Sstevel@tonic-gate #endif /* DEBUG */
29477c478bd9Sstevel@tonic-gate 	}
29487c478bd9Sstevel@tonic-gate 	if (cur == (lmpl_t *)NULL) {
29497c478bd9Sstevel@tonic-gate 		*lmplp = new;
29507c478bd9Sstevel@tonic-gate #ifdef DEBUG
29517c478bd9Sstevel@tonic-gate 		if (list_len > nfs_len_add_locking_id) {
29527c478bd9Sstevel@tonic-gate 			nfs_len_add_locking_id = list_len;
29537c478bd9Sstevel@tonic-gate 		}
29547c478bd9Sstevel@tonic-gate 		if (list_len > nfs_lmpl_high_water) {
29557c478bd9Sstevel@tonic-gate 			cmn_err(CE_WARN, "nfs_add_locking_id: long list "
29567c478bd9Sstevel@tonic-gate 			    "vp=%p is %d", (void *)vp, list_len);
29577c478bd9Sstevel@tonic-gate 		}
29587c478bd9Sstevel@tonic-gate #endif /* DEBUG */
29597c478bd9Sstevel@tonic-gate 	}
29607c478bd9Sstevel@tonic-gate 
29617c478bd9Sstevel@tonic-gate #ifdef DEBUG
29627c478bd9Sstevel@tonic-gate 	if (share_debug) {
29637c478bd9Sstevel@tonic-gate 		int nitems = 0;
29647c478bd9Sstevel@tonic-gate 		int npids = 0;
29657c478bd9Sstevel@tonic-gate 		int nowners = 0;
29667c478bd9Sstevel@tonic-gate 
29677c478bd9Sstevel@tonic-gate 		/*
29687c478bd9Sstevel@tonic-gate 		 * Count the number of things left on r_lmpl after the remove.
29697c478bd9Sstevel@tonic-gate 		 */
29707c478bd9Sstevel@tonic-gate 		for (cur = rp->r_lmpl; cur != (lmpl_t *)NULL;
29717c478bd9Sstevel@tonic-gate 		    cur = cur->lmpl_next) {
29727c478bd9Sstevel@tonic-gate 			nitems++;
29737c478bd9Sstevel@tonic-gate 			if (cur->lmpl_type == RLMPL_PID) {
29747c478bd9Sstevel@tonic-gate 				npids++;
29757c478bd9Sstevel@tonic-gate 			} else if (cur->lmpl_type == RLMPL_OWNER) {
29767c478bd9Sstevel@tonic-gate 				nowners++;
29777c478bd9Sstevel@tonic-gate 			} else {
29787c478bd9Sstevel@tonic-gate 				cmn_err(CE_PANIC, "nfs_add_locking_id: "
2979*da6c28aaSamw 				    "unrecognized lmpl_type %d",
29807c478bd9Sstevel@tonic-gate 				    cur->lmpl_type);
29817c478bd9Sstevel@tonic-gate 			}
29827c478bd9Sstevel@tonic-gate 		}
29837c478bd9Sstevel@tonic-gate 
29847c478bd9Sstevel@tonic-gate 		cmn_err(CE_CONT, "nfs_add_locking_id(%s): %d PIDs + %d "
29857c478bd9Sstevel@tonic-gate 		    "OWNs = %d items left on r_lmpl\n",
29867c478bd9Sstevel@tonic-gate 		    (type == RLMPL_PID) ? "P" : "O", npids, nowners, nitems);
29877c478bd9Sstevel@tonic-gate 	}
29887c478bd9Sstevel@tonic-gate #endif
29897c478bd9Sstevel@tonic-gate 
29907c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
29917c478bd9Sstevel@tonic-gate }
29927c478bd9Sstevel@tonic-gate 
29937c478bd9Sstevel@tonic-gate /*
29947c478bd9Sstevel@tonic-gate  * Remove an id from the lock manager id list.
29957c478bd9Sstevel@tonic-gate  *
29967c478bd9Sstevel@tonic-gate  * If the id is not in the list return 0.  If it was found and
29977c478bd9Sstevel@tonic-gate  * removed, return 1.
29987c478bd9Sstevel@tonic-gate  */
29997c478bd9Sstevel@tonic-gate static int
30007c478bd9Sstevel@tonic-gate nfs_remove_locking_id(vnode_t *vp, int type, char *id, char *rid, int *rlen)
30017c478bd9Sstevel@tonic-gate {
30027c478bd9Sstevel@tonic-gate 	lmpl_t *cur;
30037c478bd9Sstevel@tonic-gate 	lmpl_t **lmplp;
30047c478bd9Sstevel@tonic-gate 	rnode_t *rp;
30057c478bd9Sstevel@tonic-gate 	int rv = 0;
30067c478bd9Sstevel@tonic-gate 
30077c478bd9Sstevel@tonic-gate 	ASSERT(type == RLMPL_PID || type == RLMPL_OWNER);
30087c478bd9Sstevel@tonic-gate 
30097c478bd9Sstevel@tonic-gate 	rp = VTOR(vp);
30107c478bd9Sstevel@tonic-gate 
30117c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
30127c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_flags & RHASHED);
30137c478bd9Sstevel@tonic-gate 	lmplp = &(rp->r_lmpl);
30147c478bd9Sstevel@tonic-gate 
30157c478bd9Sstevel@tonic-gate 	/*
30167c478bd9Sstevel@tonic-gate 	 * Search through the list and remove the entry for this id
30177c478bd9Sstevel@tonic-gate 	 * if it is there.  The special case id == NULL allows removal
30187c478bd9Sstevel@tonic-gate 	 * of the first share on the r_lmpl list belonging to the
30197c478bd9Sstevel@tonic-gate 	 * current process (if any), without regard to further details
30207c478bd9Sstevel@tonic-gate 	 * of its identity.
30217c478bd9Sstevel@tonic-gate 	 */
30227c478bd9Sstevel@tonic-gate 	for (cur = rp->r_lmpl; cur != (lmpl_t *)NULL; cur = cur->lmpl_next) {
30237c478bd9Sstevel@tonic-gate 		if (cur->lmpl_type == type &&
30247c478bd9Sstevel@tonic-gate 		    cur->lmpl_pid == curproc->p_pid &&
30257c478bd9Sstevel@tonic-gate 		    (id == (char *)NULL ||
30267c478bd9Sstevel@tonic-gate 		    bcmp(cur->lmpl_owner, id, cur->lmpl_own_len) == 0)) {
30277c478bd9Sstevel@tonic-gate 			*lmplp = cur->lmpl_next;
30287c478bd9Sstevel@tonic-gate 			ASSERT(cur->lmpl_own_len < MAX_SHR_OWNER_LEN);
30297c478bd9Sstevel@tonic-gate 			if (rid != NULL) {
30307c478bd9Sstevel@tonic-gate 				bcopy(cur->lmpl_owner, rid, cur->lmpl_own_len);
30317c478bd9Sstevel@tonic-gate 				*rlen = cur->lmpl_own_len;
30327c478bd9Sstevel@tonic-gate 			}
30337c478bd9Sstevel@tonic-gate 			kmem_free(cur->lmpl_owner, cur->lmpl_own_len);
30347c478bd9Sstevel@tonic-gate 			kmem_free(cur, sizeof (*cur));
30357c478bd9Sstevel@tonic-gate 			rv = 1;
30367c478bd9Sstevel@tonic-gate 			break;
30377c478bd9Sstevel@tonic-gate 		}
30387c478bd9Sstevel@tonic-gate 		lmplp = &cur->lmpl_next;
30397c478bd9Sstevel@tonic-gate 	}
30407c478bd9Sstevel@tonic-gate 
30417c478bd9Sstevel@tonic-gate #ifdef DEBUG
30427c478bd9Sstevel@tonic-gate 	if (share_debug) {
30437c478bd9Sstevel@tonic-gate 		int nitems = 0;
30447c478bd9Sstevel@tonic-gate 		int npids = 0;
30457c478bd9Sstevel@tonic-gate 		int nowners = 0;
30467c478bd9Sstevel@tonic-gate 
30477c478bd9Sstevel@tonic-gate 		/*
30487c478bd9Sstevel@tonic-gate 		 * Count the number of things left on r_lmpl after the remove.
30497c478bd9Sstevel@tonic-gate 		 */
30507c478bd9Sstevel@tonic-gate 		for (cur = rp->r_lmpl; cur != (lmpl_t *)NULL;
30517c478bd9Sstevel@tonic-gate 				cur = cur->lmpl_next) {
30527c478bd9Sstevel@tonic-gate 			nitems++;
30537c478bd9Sstevel@tonic-gate 			if (cur->lmpl_type == RLMPL_PID) {
30547c478bd9Sstevel@tonic-gate 				npids++;
30557c478bd9Sstevel@tonic-gate 			} else if (cur->lmpl_type == RLMPL_OWNER) {
30567c478bd9Sstevel@tonic-gate 				nowners++;
30577c478bd9Sstevel@tonic-gate 			} else {
30587c478bd9Sstevel@tonic-gate 				cmn_err(CE_PANIC,
3059*da6c28aaSamw 					"nrli: unrecognized lmpl_type %d",
30607c478bd9Sstevel@tonic-gate 					cur->lmpl_type);
30617c478bd9Sstevel@tonic-gate 			}
30627c478bd9Sstevel@tonic-gate 		}
30637c478bd9Sstevel@tonic-gate 
30647c478bd9Sstevel@tonic-gate 		cmn_err(CE_CONT,
30657c478bd9Sstevel@tonic-gate 		"nrli(%s): %d PIDs + %d OWNs = %d items left on r_lmpl\n",
30667c478bd9Sstevel@tonic-gate 			(type == RLMPL_PID) ? "P" : "O",
30677c478bd9Sstevel@tonic-gate 			npids,
30687c478bd9Sstevel@tonic-gate 			nowners,
30697c478bd9Sstevel@tonic-gate 			nitems);
30707c478bd9Sstevel@tonic-gate 	}
30717c478bd9Sstevel@tonic-gate #endif
30727c478bd9Sstevel@tonic-gate 
30737c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
30747c478bd9Sstevel@tonic-gate 	return (rv);
30757c478bd9Sstevel@tonic-gate }
30767c478bd9Sstevel@tonic-gate 
30777c478bd9Sstevel@tonic-gate void
30787c478bd9Sstevel@tonic-gate nfs_free_mi(mntinfo_t *mi)
30797c478bd9Sstevel@tonic-gate {
30807c478bd9Sstevel@tonic-gate 	ASSERT(mi->mi_flags & MI_ASYNC_MGR_STOP);
30817c478bd9Sstevel@tonic-gate 	ASSERT(mi->mi_manager_thread == NULL);
30827c478bd9Sstevel@tonic-gate 	ASSERT(mi->mi_threads == 0);
30837c478bd9Sstevel@tonic-gate 
30847c478bd9Sstevel@tonic-gate 	/*
30857c478bd9Sstevel@tonic-gate 	 * Remove the node from the global list before we start tearing it down.
30867c478bd9Sstevel@tonic-gate 	 */
30877c478bd9Sstevel@tonic-gate 	nfs_mi_zonelist_remove(mi);
30887c478bd9Sstevel@tonic-gate 	if (mi->mi_klmconfig) {
30897c478bd9Sstevel@tonic-gate 		lm_free_config(mi->mi_klmconfig);
30907c478bd9Sstevel@tonic-gate 		kmem_free(mi->mi_klmconfig, sizeof (struct knetconfig));
30917c478bd9Sstevel@tonic-gate 	}
30927c478bd9Sstevel@tonic-gate 	mutex_destroy(&mi->mi_lock);
3093e8dc3b7dSvv149972 	mutex_destroy(&mi->mi_remap_lock);
30947c478bd9Sstevel@tonic-gate 	mutex_destroy(&mi->mi_async_lock);
30957c478bd9Sstevel@tonic-gate 	cv_destroy(&mi->mi_failover_cv);
30967c478bd9Sstevel@tonic-gate 	cv_destroy(&mi->mi_async_work_cv);
30977c478bd9Sstevel@tonic-gate 	cv_destroy(&mi->mi_async_reqs_cv);
30987c478bd9Sstevel@tonic-gate 	cv_destroy(&mi->mi_async_cv);
30997c478bd9Sstevel@tonic-gate 	zone_rele(mi->mi_zone);
31007c478bd9Sstevel@tonic-gate 	kmem_free(mi, sizeof (*mi));
31017c478bd9Sstevel@tonic-gate }
31027c478bd9Sstevel@tonic-gate 
31037c478bd9Sstevel@tonic-gate static int
31047c478bd9Sstevel@tonic-gate mnt_kstat_update(kstat_t *ksp, int rw)
31057c478bd9Sstevel@tonic-gate {
31067c478bd9Sstevel@tonic-gate 	mntinfo_t *mi;
31077c478bd9Sstevel@tonic-gate 	struct mntinfo_kstat *mik;
31087c478bd9Sstevel@tonic-gate 	vfs_t *vfsp;
31097c478bd9Sstevel@tonic-gate 	int i;
31107c478bd9Sstevel@tonic-gate 
31117c478bd9Sstevel@tonic-gate 	/* this is a read-only kstat. Bail out on a write */
31127c478bd9Sstevel@tonic-gate 	if (rw == KSTAT_WRITE)
31137c478bd9Sstevel@tonic-gate 		return (EACCES);
31147c478bd9Sstevel@tonic-gate 
31157c478bd9Sstevel@tonic-gate 	/*
31167c478bd9Sstevel@tonic-gate 	 * We don't want to wait here as kstat_chain_lock could be held by
31177c478bd9Sstevel@tonic-gate 	 * dounmount(). dounmount() takes vfs_reflock before the chain lock
31187c478bd9Sstevel@tonic-gate 	 * and thus could lead to a deadlock.
31197c478bd9Sstevel@tonic-gate 	 */
31207c478bd9Sstevel@tonic-gate 	vfsp = (struct vfs *)ksp->ks_private;
31217c478bd9Sstevel@tonic-gate 
31227c478bd9Sstevel@tonic-gate 
31237c478bd9Sstevel@tonic-gate 	mi = VFTOMI(vfsp);
31247c478bd9Sstevel@tonic-gate 
31257c478bd9Sstevel@tonic-gate 	mik = (struct mntinfo_kstat *)ksp->ks_data;
31267c478bd9Sstevel@tonic-gate 
31277c478bd9Sstevel@tonic-gate 	(void) strcpy(mik->mik_proto, mi->mi_curr_serv->sv_knconf->knc_proto);
31287c478bd9Sstevel@tonic-gate 	mik->mik_vers = (uint32_t)mi->mi_vers;
31297c478bd9Sstevel@tonic-gate 	mik->mik_flags = mi->mi_flags;
31307c478bd9Sstevel@tonic-gate 	mik->mik_secmod = mi->mi_curr_serv->sv_secdata->secmod;
31317c478bd9Sstevel@tonic-gate 	mik->mik_curread = (uint32_t)mi->mi_curread;
31327c478bd9Sstevel@tonic-gate 	mik->mik_curwrite = (uint32_t)mi->mi_curwrite;
31337c478bd9Sstevel@tonic-gate 	mik->mik_retrans = mi->mi_retrans;
31347c478bd9Sstevel@tonic-gate 	mik->mik_timeo = mi->mi_timeo;
31357c478bd9Sstevel@tonic-gate 	mik->mik_acregmin = HR2SEC(mi->mi_acregmin);
31367c478bd9Sstevel@tonic-gate 	mik->mik_acregmax = HR2SEC(mi->mi_acregmax);
31377c478bd9Sstevel@tonic-gate 	mik->mik_acdirmin = HR2SEC(mi->mi_acdirmin);
31387c478bd9Sstevel@tonic-gate 	mik->mik_acdirmax = HR2SEC(mi->mi_acdirmax);
31397c478bd9Sstevel@tonic-gate 	for (i = 0; i < NFS_CALLTYPES + 1; i++) {
31407c478bd9Sstevel@tonic-gate 		mik->mik_timers[i].srtt = (uint32_t)mi->mi_timers[i].rt_srtt;
31417c478bd9Sstevel@tonic-gate 		mik->mik_timers[i].deviate =
31427c478bd9Sstevel@tonic-gate 		    (uint32_t)mi->mi_timers[i].rt_deviate;
31437c478bd9Sstevel@tonic-gate 		mik->mik_timers[i].rtxcur =
31447c478bd9Sstevel@tonic-gate 		    (uint32_t)mi->mi_timers[i].rt_rtxcur;
31457c478bd9Sstevel@tonic-gate 	}
31467c478bd9Sstevel@tonic-gate 	mik->mik_noresponse = (uint32_t)mi->mi_noresponse;
31477c478bd9Sstevel@tonic-gate 	mik->mik_failover = (uint32_t)mi->mi_failover;
31487c478bd9Sstevel@tonic-gate 	mik->mik_remap = (uint32_t)mi->mi_remap;
31497c478bd9Sstevel@tonic-gate 	(void) strcpy(mik->mik_curserver, mi->mi_curr_serv->sv_hostname);
31507c478bd9Sstevel@tonic-gate 
31517c478bd9Sstevel@tonic-gate 	return (0);
31527c478bd9Sstevel@tonic-gate }
31537c478bd9Sstevel@tonic-gate 
31547c478bd9Sstevel@tonic-gate void
31557c478bd9Sstevel@tonic-gate nfs_mnt_kstat_init(struct vfs *vfsp)
31567c478bd9Sstevel@tonic-gate {
31577c478bd9Sstevel@tonic-gate 	mntinfo_t *mi = VFTOMI(vfsp);
31587c478bd9Sstevel@tonic-gate 
31597c478bd9Sstevel@tonic-gate 	/*
31607c478bd9Sstevel@tonic-gate 	 * Create the version specific kstats.
31617c478bd9Sstevel@tonic-gate 	 *
31627c478bd9Sstevel@tonic-gate 	 * PSARC 2001/697 Contract Private Interface
31637c478bd9Sstevel@tonic-gate 	 * All nfs kstats are under SunMC contract
31647c478bd9Sstevel@tonic-gate 	 * Please refer to the PSARC listed above and contact
31657c478bd9Sstevel@tonic-gate 	 * SunMC before making any changes!
31667c478bd9Sstevel@tonic-gate 	 *
31677c478bd9Sstevel@tonic-gate 	 * Changes must be reviewed by Solaris File Sharing
31687c478bd9Sstevel@tonic-gate 	 * Changes must be communicated to contract-2001-697@sun.com
31697c478bd9Sstevel@tonic-gate 	 *
31707c478bd9Sstevel@tonic-gate 	 */
31717c478bd9Sstevel@tonic-gate 
31727c478bd9Sstevel@tonic-gate 	mi->mi_io_kstats = kstat_create_zone("nfs", getminor(vfsp->vfs_dev),
31737c478bd9Sstevel@tonic-gate 	    NULL, "nfs", KSTAT_TYPE_IO, 1, 0, mi->mi_zone->zone_id);
31747c478bd9Sstevel@tonic-gate 	if (mi->mi_io_kstats) {
31757c478bd9Sstevel@tonic-gate 		if (mi->mi_zone->zone_id != GLOBAL_ZONEID)
31767c478bd9Sstevel@tonic-gate 			kstat_zone_add(mi->mi_io_kstats, GLOBAL_ZONEID);
31777c478bd9Sstevel@tonic-gate 		mi->mi_io_kstats->ks_lock = &mi->mi_lock;
31787c478bd9Sstevel@tonic-gate 		kstat_install(mi->mi_io_kstats);
31797c478bd9Sstevel@tonic-gate 	}
31807c478bd9Sstevel@tonic-gate 
31817c478bd9Sstevel@tonic-gate 	if ((mi->mi_ro_kstats = kstat_create_zone("nfs",
31827c478bd9Sstevel@tonic-gate 	    getminor(vfsp->vfs_dev), "mntinfo", "misc", KSTAT_TYPE_RAW,
31837c478bd9Sstevel@tonic-gate 	    sizeof (struct mntinfo_kstat), 0, mi->mi_zone->zone_id)) != NULL) {
31847c478bd9Sstevel@tonic-gate 		if (mi->mi_zone->zone_id != GLOBAL_ZONEID)
31857c478bd9Sstevel@tonic-gate 			kstat_zone_add(mi->mi_ro_kstats, GLOBAL_ZONEID);
31867c478bd9Sstevel@tonic-gate 		mi->mi_ro_kstats->ks_update = mnt_kstat_update;
31877c478bd9Sstevel@tonic-gate 		mi->mi_ro_kstats->ks_private = (void *)vfsp;
31887c478bd9Sstevel@tonic-gate 		kstat_install(mi->mi_ro_kstats);
31897c478bd9Sstevel@tonic-gate 	}
31907c478bd9Sstevel@tonic-gate }
31917c478bd9Sstevel@tonic-gate 
31927c478bd9Sstevel@tonic-gate nfs_delmapcall_t *
31937c478bd9Sstevel@tonic-gate nfs_init_delmapcall()
31947c478bd9Sstevel@tonic-gate {
31957c478bd9Sstevel@tonic-gate 	nfs_delmapcall_t	*delmap_call;
31967c478bd9Sstevel@tonic-gate 
31977c478bd9Sstevel@tonic-gate 	delmap_call = kmem_alloc(sizeof (nfs_delmapcall_t), KM_SLEEP);
31987c478bd9Sstevel@tonic-gate 	delmap_call->call_id = curthread;
31997c478bd9Sstevel@tonic-gate 	delmap_call->error = 0;
32007c478bd9Sstevel@tonic-gate 
32017c478bd9Sstevel@tonic-gate 	return (delmap_call);
32027c478bd9Sstevel@tonic-gate }
32037c478bd9Sstevel@tonic-gate 
32047c478bd9Sstevel@tonic-gate void
32057c478bd9Sstevel@tonic-gate nfs_free_delmapcall(nfs_delmapcall_t *delmap_call)
32067c478bd9Sstevel@tonic-gate {
32077c478bd9Sstevel@tonic-gate 	kmem_free(delmap_call, sizeof (nfs_delmapcall_t));
32087c478bd9Sstevel@tonic-gate }
32097c478bd9Sstevel@tonic-gate 
32107c478bd9Sstevel@tonic-gate /*
32117c478bd9Sstevel@tonic-gate  * Searches for the current delmap caller (based on curthread) in the list of
32127c478bd9Sstevel@tonic-gate  * callers.  If it is found, we remove it and free the delmap caller.
32137c478bd9Sstevel@tonic-gate  * Returns:
32147c478bd9Sstevel@tonic-gate  *	0 if the caller wasn't found
32157c478bd9Sstevel@tonic-gate  *	1 if the caller was found, removed and freed.  *errp is set to what
32167c478bd9Sstevel@tonic-gate  * 	the result of the delmap was.
32177c478bd9Sstevel@tonic-gate  */
32187c478bd9Sstevel@tonic-gate int
32197c478bd9Sstevel@tonic-gate nfs_find_and_delete_delmapcall(rnode_t *rp, int *errp)
32207c478bd9Sstevel@tonic-gate {
32217c478bd9Sstevel@tonic-gate 	nfs_delmapcall_t	*delmap_call;
32227c478bd9Sstevel@tonic-gate 
32237c478bd9Sstevel@tonic-gate 	/*
32247c478bd9Sstevel@tonic-gate 	 * If the list doesn't exist yet, we create it and return
32257c478bd9Sstevel@tonic-gate 	 * that the caller wasn't found.  No list = no callers.
32267c478bd9Sstevel@tonic-gate 	 */
32277c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
32287c478bd9Sstevel@tonic-gate 	if (!(rp->r_flags & RDELMAPLIST)) {
32297c478bd9Sstevel@tonic-gate 		/* The list does not exist */
32307c478bd9Sstevel@tonic-gate 		list_create(&rp->r_indelmap, sizeof (nfs_delmapcall_t),
32317c478bd9Sstevel@tonic-gate 		    offsetof(nfs_delmapcall_t, call_node));
32327c478bd9Sstevel@tonic-gate 		rp->r_flags |= RDELMAPLIST;
32337c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
32347c478bd9Sstevel@tonic-gate 		return (0);
32357c478bd9Sstevel@tonic-gate 	} else {
32367c478bd9Sstevel@tonic-gate 		/* The list exists so search it */
32377c478bd9Sstevel@tonic-gate 		for (delmap_call = list_head(&rp->r_indelmap);
32387c478bd9Sstevel@tonic-gate 		    delmap_call != NULL;
32397c478bd9Sstevel@tonic-gate 		    delmap_call = list_next(&rp->r_indelmap, delmap_call)) {
32407c478bd9Sstevel@tonic-gate 			if (delmap_call->call_id == curthread) {
32417c478bd9Sstevel@tonic-gate 				/* current caller is in the list */
32427c478bd9Sstevel@tonic-gate 				*errp = delmap_call->error;
32437c478bd9Sstevel@tonic-gate 				list_remove(&rp->r_indelmap, delmap_call);
32447c478bd9Sstevel@tonic-gate 				mutex_exit(&rp->r_statelock);
32457c478bd9Sstevel@tonic-gate 				nfs_free_delmapcall(delmap_call);
32467c478bd9Sstevel@tonic-gate 				return (1);
32477c478bd9Sstevel@tonic-gate 			}
32487c478bd9Sstevel@tonic-gate 		}
32497c478bd9Sstevel@tonic-gate 	}
32507c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
32517c478bd9Sstevel@tonic-gate 	return (0);
32527c478bd9Sstevel@tonic-gate }
3253