1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  *
25  *	Copyright (c) 1983,1984,1985,1986,1987,1988,1989  AT&T.
26  *	All rights reserved.
27  */
28 
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 /*
32  * Node hash implementation borrowed from NFS.
33  * See: uts/common/fs/nfs/nfs_subr.c
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/time.h>
39 #include <sys/vnode.h>
40 #include <sys/bitmap.h>
41 #include <sys/dnlc.h>
42 #include <sys/kmem.h>
43 #include <sys/sunddi.h>
44 
45 #ifdef APPLE
46 #include <sys/smb_apple.h>
47 #include <sys/utfconv.h>
48 #include <sys/smb_iconv.h>
49 #else
50 #include <netsmb/smb_osdep.h>
51 #endif
52 
53 #include <netsmb/smb.h>
54 #include <netsmb/smb_conn.h>
55 #include <netsmb/smb_subr.h>
56 #include <netsmb/smb_rq.h>
57 
58 #include <smbfs/smbfs.h>
59 #include <smbfs/smbfs_node.h>
60 #include <smbfs/smbfs_subr.h>
61 
62 /*
63  * The hash queues for the access to active and cached smbnodes
64  * are organized as doubly linked lists.  A reader/writer lock
65  * for each hash bucket is used to control access and to synchronize
66  * lookups, additions, and deletions from the hash queue.
67  *
68  * The smbnode freelist is organized as a doubly linked list with
69  * a head pointer.  Additions and deletions are synchronized via
70  * a single mutex.
71  *
72  * In order to add an smbnode to the free list, it must be hashed into
73  * a hash queue and the exclusive lock to the hash queue be held.
74  * If an smbnode is not hashed into a hash queue, then it is destroyed
75  * because it represents no valuable information that can be reused
76  * about the file.  The exclusive lock to the hash queue must be
77  * held in order to prevent a lookup in the hash queue from finding
78  * the smbnode and using it and assuming that the smbnode is not on the
79  * freelist.  The lookup in the hash queue will have the hash queue
80  * locked, either exclusive or shared.
81  *
82  * The vnode reference count for each smbnode is not allowed to drop
83  * below 1.  This prevents external entities, such as the VM
84  * subsystem, from acquiring references to vnodes already on the
85  * freelist and then trying to place them back on the freelist
86  * when their reference is released.  This means that the when an
87  * smbnode is looked up in the hash queues, then either the smbnode
88  * is removed from the freelist and that reference is tranfered to
89  * the new reference or the vnode reference count must be incremented
90  * accordingly.  The mutex for the freelist must be held in order to
91  * accurately test to see if the smbnode is on the freelist or not.
92  * The hash queue lock might be held shared and it is possible that
93  * two different threads may race to remove the smbnode from the
94  * freelist.  This race can be resolved by holding the mutex for the
95  * freelist.  Please note that the mutex for the freelist does not
96  * need to held if the smbnode is not on the freelist.  It can not be
97  * placed on the freelist due to the requirement that the thread
98  * putting the smbnode on the freelist must hold the exclusive lock
99  * to the hash queue and the thread doing the lookup in the hash
100  * queue is holding either a shared or exclusive lock to the hash
101  * queue.
102  *
103  * The lock ordering is:
104  *
105  *	hash bucket lock -> vnode lock
106  *	hash bucket lock -> freelist lock
107  */
108 static rhashq_t *smbtable;
109 
110 static kmutex_t smbfreelist_lock;
111 static smbnode_t *smbfreelist = NULL;
112 static ulong_t	smbnodenew = 0;
113 long	nsmbnode = 0;
114 
115 static int smbtablesize;
116 static int smbtablemask;
117 static int smbhashlen = 4;
118 
119 static struct kmem_cache *smbnode_cache;
120 
121 /*
122  * Mutex to protect the following variables:
123  *	smbfs_major
124  *	smbfs_minor
125  */
126 kmutex_t smbfs_minor_lock;
127 int smbfs_major;
128 int smbfs_minor;
129 
130 /*
131  * Local functions.
132  * Not static, to aid debugging.
133  */
134 void smb_rmfree(smbnode_t *);
135 void smbinactive(smbnode_t *);
136 void smb_rmhash_locked(smbnode_t *);
137 void smb_destroy_node(smbnode_t *);
138 void smbfs_kmem_reclaim(void *cdrarg);
139 
140 smbnode_t *smbhashfind(struct vfs *, const char *, int, rhashq_t *);
141 static vnode_t *make_smbnode(vfs_t *, char *, int, rhashq_t *, int *);
142 
143 
144 /*
145  * Free the resources associated with an smbnode.
146  * Note: This is different from smbfs_inactive
147  *
148  * NFS: nfs_subr.c:rinactive
149  */
150 void
151 smbinactive(smbnode_t *np)
152 {
153 
154 	if (np->n_rpath) {
155 		kmem_free(np->n_rpath, np->n_rplen + 1);
156 		np->n_rpath = NULL;
157 	}
158 }
159 
160 /*
161  * Return a vnode for the given CIFS directory and filename.
162  * If no smbnode exists for this fhandle, create one and put it
163  * into the hash queues.  If the smbnode for this fhandle
164  * already exists, return it.
165  *
166  * Note: make_smbnode() may upgrade the hash bucket lock to exclusive.
167  *
168  * NFS: nfs_subr.c:makenfsnode
169  */
170 vnode_t *
171 smbfs_make_node(
172 	vfs_t *vfsp,
173 	const char *dir,
174 	int dirlen,
175 	const char *name,
176 	int nmlen,
177 	struct smbfattr *fap)
178 {
179 	char *rpath;
180 	int rplen, idx;
181 	uint32_t hash;
182 	rhashq_t *rhtp;
183 	smbnode_t *np;
184 	vnode_t *vp;
185 #ifdef NOT_YET
186 	vattr_t va;
187 #endif
188 	int newnode;
189 
190 	/*
191 	 * Build the full path name in allocated memory
192 	 * so we have it for lookup, etc.
193 	 *
194 	 * ToDo:  Would prefer to allocate a remote path
195 	 * only when we will create a new node.
196 	 */
197 	rplen = dirlen;
198 	if (name) {
199 		/* If not at root, we'll add a slash. */
200 		if (dirlen > 1)
201 			rplen++;
202 		rplen += nmlen;
203 	}
204 	rpath = kmem_alloc(rplen + 1, KM_SLEEP);
205 
206 	bcopy(dir, rpath, dirlen);
207 	if (name) {
208 		if (dirlen > 1)
209 			rpath[dirlen++] = '\\';
210 		bcopy(name, &rpath[dirlen], nmlen);
211 	}
212 	rpath[rplen] = 0;
213 
214 	hash = smbfs_hash(rpath, rplen);
215 	idx = hash & smbtablemask;
216 	rhtp = &smbtable[idx];
217 	rw_enter(&rhtp->r_lock, RW_READER);
218 
219 	vp = make_smbnode(vfsp, rpath, rplen, rhtp, &newnode);
220 	np = VTOSMB(vp);
221 	np->n_ino = hash;	/* Equivalent to: smbfs_getino() */
222 
223 	/*
224 	 * Note: make_smbnode keeps a reference to rpath in
225 	 * new nodes it creates, so only free when we found
226 	 * an existing node.
227 	 */
228 	if (!newnode) {
229 		kmem_free(rpath, rplen + 1);
230 		rpath = NULL;
231 	}
232 
233 	if (fap == NULL) {
234 #ifdef NOT_YET
235 		if (newnode) {
236 			PURGE_ATTRCACHE(vp);
237 		}
238 #endif
239 		rw_exit(&rhtp->r_lock);
240 		return (vp);
241 	}
242 
243 	/* Have SMB attributes. */
244 	vp->v_type = (fap->fa_attr & SMB_FA_DIR) ? VDIR : VREG;
245 	/* XXX: np->n_ino = fap->fa_ino; see above */
246 	np->r_size = fap->fa_size;
247 	/* XXX: np->r_attr = *fap here instead? */
248 	np->r_atime = fap->fa_atime;
249 	np->r_ctime = fap->fa_mtime;
250 	np->r_mtime = fap->fa_ctime;
251 
252 #ifdef NOT_YET
253 	if (!newnode) {
254 		rw_exit(&rhtp->r_lock);
255 		(void) nfs_cache_fattr(vp, attr, &va, t, cr);
256 	} else {
257 		if (attr->na_type < NFNON || attr->na_type > NFSOC)
258 			vp->v_type = VBAD;
259 		else
260 			vp->v_type = n2v_type(attr);
261 		vp->v_rdev = makedevice(attr->rdev.specdata1,
262 		    attr->rdev.specdata2);
263 		nfs_attrcache(vp, attr, t);
264 		rw_exit(&rhtp->r_lock);
265 	}
266 #else
267 	rw_exit(&rhtp->r_lock);
268 #endif
269 
270 	return (vp);
271 }
272 
273 /*
274  * NFS: nfs_subr.c:rtablehash
275  * We use smbfs_hash().
276  */
277 
278 /*
279  * Find or create an smbnode.
280  * NFS: nfs_subr.c:make_rnode
281  */
282 static vnode_t *
283 make_smbnode(
284 	vfs_t *vfsp,
285 	char *rpath,
286 	int rplen,
287 	rhashq_t *rhtp,
288 	int *newnode)
289 {
290 	smbnode_t *np;
291 	smbnode_t *tnp;
292 	vnode_t *vp;
293 	smbmntinfo_t *mi;
294 
295 	ASSERT(RW_READ_HELD(&rhtp->r_lock));
296 
297 	mi = VFTOSMI(vfsp);
298 
299 start:
300 	np = smbhashfind(vfsp, rpath, rplen, rhtp);
301 	if (np != NULL) {
302 		vp = SMBTOV(np);
303 		*newnode = 0;
304 		return (vp);
305 	}
306 
307 	/* Note: will retake this lock below. */
308 	rw_exit(&rhtp->r_lock);
309 
310 	/*
311 	 * see if we can find something on the freelist
312 	 */
313 	mutex_enter(&smbfreelist_lock);
314 	if (smbfreelist != NULL && smbnodenew >= nsmbnode) {
315 		np = smbfreelist;
316 		smb_rmfree(np);
317 		mutex_exit(&smbfreelist_lock);
318 
319 		vp = SMBTOV(np);
320 
321 		if (np->r_flags & RHASHED) {
322 			rw_enter(&np->r_hashq->r_lock, RW_WRITER);
323 			mutex_enter(&vp->v_lock);
324 			if (vp->v_count > 1) {
325 				vp->v_count--;
326 				mutex_exit(&vp->v_lock);
327 				rw_exit(&np->r_hashq->r_lock);
328 				rw_enter(&rhtp->r_lock, RW_READER);
329 				goto start;
330 			}
331 			mutex_exit(&vp->v_lock);
332 			smb_rmhash_locked(np);
333 			rw_exit(&np->r_hashq->r_lock);
334 		}
335 
336 		smbinactive(np);
337 
338 		mutex_enter(&vp->v_lock);
339 		if (vp->v_count > 1) {
340 			vp->v_count--;
341 			mutex_exit(&vp->v_lock);
342 			rw_enter(&rhtp->r_lock, RW_READER);
343 			goto start;
344 		}
345 		mutex_exit(&vp->v_lock);
346 		vn_invalid(vp);
347 		/*
348 		 * destroy old locks before bzero'ing and
349 		 * recreating the locks below.
350 		 */
351 		smbfs_rw_destroy(&np->r_rwlock);
352 		smbfs_rw_destroy(&np->r_lkserlock);
353 		mutex_destroy(&np->r_statelock);
354 		cv_destroy(&np->r_cv);
355 		/*
356 		 * Make sure that if smbnode is recycled then
357 		 * VFS count is decremented properly before
358 		 * reuse.
359 		 */
360 		VFS_RELE(vp->v_vfsp);
361 		vn_reinit(vp);
362 	} else {
363 		/*
364 		 * allocate and initialize a new smbnode
365 		 */
366 		vnode_t *new_vp;
367 
368 		mutex_exit(&smbfreelist_lock);
369 
370 		np = kmem_cache_alloc(smbnode_cache, KM_SLEEP);
371 		new_vp = vn_alloc(KM_SLEEP);
372 
373 		atomic_add_long((ulong_t *)&smbnodenew, 1);
374 		vp = new_vp;
375 	}
376 
377 	/* Initialize smbnode_t */
378 	bzero(np, sizeof (*np));
379 
380 	smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL);
381 	smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL);
382 	mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL);
383 	cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL);
384 	/* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */
385 
386 	np->r_vnode = vp;
387 	np->n_mount = mi;
388 	np->r_hashq = rhtp;
389 	np->n_direof = -1;
390 	np->n_fid = SMB_FID_UNUSED;
391 	np->n_uid = UID_NOBODY;
392 	np->n_gid = GID_NOBODY;
393 	/* XXX: make attributes stale? */
394 
395 #if 0 /* XXX dircache */
396 	/*
397 	 * We don't know if it's a directory yet.
398 	 * Let the caller do this?  XXX
399 	 */
400 	avl_create(&np->r_dir, compar, sizeof (rddir_cache),
401 	    offsetof(rddir_cache, tree));
402 #endif
403 
404 	/* Now fill in the vnode. */
405 	vn_setops(vp, smbfs_vnodeops);
406 	vp->v_data = (caddr_t)np;
407 	VFS_HOLD(vfsp);
408 	vp->v_vfsp = vfsp;
409 	vp->v_type = VNON;
410 
411 	/*
412 	 * There is a race condition if someone else
413 	 * alloc's the smbnode while no locks are held, so we
414 	 * check again and recover if found.
415 	 */
416 	rw_enter(&rhtp->r_lock, RW_WRITER);
417 	tnp = smbhashfind(vfsp, rpath, rplen, rhtp);
418 	if (tnp != NULL) {
419 		vp = SMBTOV(tnp);
420 		*newnode = 0;
421 		rw_exit(&rhtp->r_lock);
422 		/* The node we were building goes on the free list. */
423 		smb_addfree(np);
424 		rw_enter(&rhtp->r_lock, RW_READER);
425 		return (vp);
426 	}
427 
428 	/*
429 	 * Hash search identifies nodes by the full pathname,
430 	 * so store that before linking in the hash list.
431 	 * Note: caller allocates the rpath, and knows
432 	 * about this reference when *newnode is set.
433 	 */
434 	np->n_rpath = rpath;
435 	np->n_rplen = rplen;
436 
437 	smb_addhash(np);
438 	*newnode = 1;
439 	return (vp);
440 }
441 
442 /*
443  * smb_addfree
444  * Put a smbnode on the free list.
445  *
446  * Normally called by smbfs_inactive, but also
447  * called in here during cleanup operations.
448  *
449  * Smbnodes which were allocated above and beyond the normal limit
450  * are immediately freed.
451  *
452  * NFS: nfs_subr.c:rp_addfree
453  */
454 void
455 smb_addfree(smbnode_t *np)
456 {
457 	vnode_t *vp;
458 	struct vfs *vfsp;
459 
460 	vp = SMBTOV(np);
461 	ASSERT(vp->v_count >= 1);
462 	ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
463 
464 	/*
465 	 * If we have too many smbnodes allocated and there are no
466 	 * references to this smbnode, or if the smbnode is no longer
467 	 * accessible by it does not reside in the hash queues,
468 	 * or if an i/o error occurred while writing to the file,
469 	 * then just free it instead of putting it on the smbnode
470 	 * freelist.
471 	 */
472 	vfsp = vp->v_vfsp;
473 	if (((smbnodenew > nsmbnode || !(np->r_flags & RHASHED) ||
474 	    np->r_error || (vfsp->vfs_flag & VFS_UNMOUNTED)) &&
475 	    np->r_count == 0)) {
476 		if (np->r_flags & RHASHED) {
477 			rw_enter(&np->r_hashq->r_lock, RW_WRITER);
478 			mutex_enter(&vp->v_lock);
479 			if (vp->v_count > 1) {
480 				vp->v_count--;
481 				mutex_exit(&vp->v_lock);
482 				rw_exit(&np->r_hashq->r_lock);
483 				return;
484 				/*
485 				 * Will get another call later,
486 				 * via smbfs_inactive.
487 				 */
488 			}
489 			mutex_exit(&vp->v_lock);
490 			smb_rmhash_locked(np);
491 			rw_exit(&np->r_hashq->r_lock);
492 		}
493 
494 		smbinactive(np);
495 
496 		/*
497 		 * Recheck the vnode reference count.  We need to
498 		 * make sure that another reference has not been
499 		 * acquired while we were not holding v_lock.  The
500 		 * smbnode is not in the smbnode hash queues, so the
501 		 * only way for a reference to have been acquired
502 		 * is for a VOP_PUTPAGE because the smbnode was marked
503 		 * with RDIRTY or for a modified page.  This
504 		 * reference may have been acquired before our call
505 		 * to smbinactive.  The i/o may have been completed,
506 		 * thus allowing smbinactive to complete, but the
507 		 * reference to the vnode may not have been released
508 		 * yet.  In any case, the smbnode can not be destroyed
509 		 * until the other references to this vnode have been
510 		 * released.  The other references will take care of
511 		 * either destroying the smbnode or placing it on the
512 		 * smbnode freelist.  If there are no other references,
513 		 * then the smbnode may be safely destroyed.
514 		 */
515 		mutex_enter(&vp->v_lock);
516 		if (vp->v_count > 1) {
517 			vp->v_count--;
518 			mutex_exit(&vp->v_lock);
519 			return;
520 		}
521 		mutex_exit(&vp->v_lock);
522 
523 		smb_destroy_node(np);
524 		return;
525 	}
526 	/*
527 	 * Lock the hash queue and then recheck the reference count
528 	 * to ensure that no other threads have acquired a reference
529 	 * to indicate that the smbnode should not be placed on the
530 	 * freelist.  If another reference has been acquired, then
531 	 * just release this one and let the other thread complete
532 	 * the processing of adding this smbnode to the freelist.
533 	 */
534 	rw_enter(&np->r_hashq->r_lock, RW_WRITER);
535 
536 	mutex_enter(&vp->v_lock);
537 	if (vp->v_count > 1) {
538 		vp->v_count--;
539 		mutex_exit(&vp->v_lock);
540 		rw_exit(&np->r_hashq->r_lock);
541 		return;
542 	}
543 	mutex_exit(&vp->v_lock);
544 
545 	/*
546 	 * If there is no cached data or metadata for this file, then
547 	 * put the smbnode on the front of the freelist so that it will
548 	 * be reused before other smbnodes which may have cached data or
549 	 * metadata associated with them.
550 	 */
551 	mutex_enter(&smbfreelist_lock);
552 	if (smbfreelist == NULL) {
553 		np->r_freef = np;
554 		np->r_freeb = np;
555 		smbfreelist = np;
556 	} else {
557 		np->r_freef = smbfreelist;
558 		np->r_freeb = smbfreelist->r_freeb;
559 		smbfreelist->r_freeb->r_freef = np;
560 		smbfreelist->r_freeb = np;
561 	}
562 	mutex_exit(&smbfreelist_lock);
563 
564 	rw_exit(&np->r_hashq->r_lock);
565 }
566 
567 /*
568  * Remove an smbnode from the free list.
569  *
570  * The caller must be holding smbfreelist_lock and the smbnode
571  * must be on the freelist.
572  *
573  * NFS: nfs_subr.c:rp_rmfree
574  */
575 void
576 smb_rmfree(smbnode_t *np)
577 {
578 
579 	ASSERT(MUTEX_HELD(&smbfreelist_lock));
580 	ASSERT(np->r_freef != NULL && np->r_freeb != NULL);
581 
582 	if (np == smbfreelist) {
583 		smbfreelist = np->r_freef;
584 		if (np == smbfreelist)
585 			smbfreelist = NULL;
586 	}
587 
588 	np->r_freeb->r_freef = np->r_freef;
589 	np->r_freef->r_freeb = np->r_freeb;
590 
591 	np->r_freef = np->r_freeb = NULL;
592 }
593 
594 /*
595  * Put a smbnode in the hash table.
596  *
597  * The caller must be holding the exclusive hash queue lock.
598  *
599  * NFS: nfs_subr.c:rp_addhash
600  */
601 void
602 smb_addhash(smbnode_t *np)
603 {
604 
605 	ASSERT(RW_WRITE_HELD(&np->r_hashq->r_lock));
606 	ASSERT(!(np->r_flags & RHASHED));
607 
608 	np->r_hashf = np->r_hashq->r_hashf;
609 	np->r_hashq->r_hashf = np;
610 	np->r_hashb = (smbnode_t *)np->r_hashq;
611 	np->r_hashf->r_hashb = np;
612 
613 	mutex_enter(&np->r_statelock);
614 	np->r_flags |= RHASHED;
615 	mutex_exit(&np->r_statelock);
616 }
617 
618 /*
619  * Remove a smbnode from the hash table.
620  *
621  * The caller must be holding the hash queue lock.
622  *
623  * NFS: nfs_subr.c:rp_rmhash_locked
624  */
625 void
626 smb_rmhash_locked(smbnode_t *np)
627 {
628 
629 	ASSERT(RW_WRITE_HELD(&np->r_hashq->r_lock));
630 	ASSERT(np->r_flags & RHASHED);
631 
632 	np->r_hashb->r_hashf = np->r_hashf;
633 	np->r_hashf->r_hashb = np->r_hashb;
634 
635 	mutex_enter(&np->r_statelock);
636 	np->r_flags &= ~RHASHED;
637 	mutex_exit(&np->r_statelock);
638 }
639 
640 /*
641  * Remove a smbnode from the hash table.
642  *
643  * The caller must not be holding the hash queue lock.
644  */
645 void
646 smb_rmhash(smbnode_t *np)
647 {
648 
649 	rw_enter(&np->r_hashq->r_lock, RW_WRITER);
650 	smb_rmhash_locked(np);
651 	rw_exit(&np->r_hashq->r_lock);
652 }
653 
654 /*
655  * Lookup a smbnode by fhandle.
656  *
657  * The caller must be holding the hash queue lock, either shared or exclusive.
658  * XXX: make static?
659  *
660  * NFS: nfs_subr.c:rfind
661  */
662 smbnode_t *
663 smbhashfind(
664 	struct vfs *vfsp,
665 	const char *rpath,
666 	int rplen,
667 	rhashq_t *rhtp)
668 {
669 	smbnode_t *np;
670 	vnode_t *vp;
671 
672 	ASSERT(RW_LOCK_HELD(&rhtp->r_lock));
673 
674 	for (np = rhtp->r_hashf; np != (smbnode_t *)rhtp; np = np->r_hashf) {
675 		vp = SMBTOV(np);
676 		if (vp->v_vfsp == vfsp &&
677 		    np->n_rplen == rplen &&
678 		    bcmp(np->n_rpath, rpath, rplen) == 0) {
679 			/*
680 			 * remove smbnode from free list, if necessary.
681 			 */
682 			if (np->r_freef != NULL) {
683 				mutex_enter(&smbfreelist_lock);
684 				/*
685 				 * If the smbnode is on the freelist,
686 				 * then remove it and use that reference
687 				 * as the new reference.  Otherwise,
688 				 * need to increment the reference count.
689 				 */
690 				if (np->r_freef != NULL) {
691 					smb_rmfree(np);
692 					mutex_exit(&smbfreelist_lock);
693 				} else {
694 					mutex_exit(&smbfreelist_lock);
695 					VN_HOLD(vp);
696 				}
697 			} else
698 				VN_HOLD(vp);
699 			return (np);
700 		}
701 	}
702 	return (NULL);
703 }
704 
705 #ifdef SMB_VNODE_DEBUG
706 int smb_check_table_debug = 1;
707 #else /* SMB_VNODE_DEBUG */
708 int smb_check_table_debug = 0;
709 #endif /* SMB_VNODE_DEBUG */
710 
711 
712 /*
713  * Return 1 if there is a active vnode belonging to this vfs in the
714  * smbtable cache.
715  *
716  * Several of these checks are done without holding the usual
717  * locks.  This is safe because destroy_smbtable(), smb_addfree(),
718  * etc. will redo the necessary checks before actually destroying
719  * any smbnodes.
720  *
721  * NFS: nfs_subr.c:check_rtable
722  *
723  * Debugging changes here relative to NFS.
724  * Relatively harmless, so left 'em in.
725  */
726 int
727 smb_check_table(struct vfs *vfsp, smbnode_t *rtnp)
728 {
729 	smbnode_t *np;
730 	vnode_t *vp;
731 	int index;
732 	int busycnt = 0;
733 
734 	for (index = 0; index < smbtablesize; index++) {
735 		rw_enter(&smbtable[index].r_lock, RW_READER);
736 		for (np = smbtable[index].r_hashf;
737 		    np != (smbnode_t *)(&smbtable[index]);
738 		    np = np->r_hashf) {
739 			if (np == rtnp)
740 				continue; /* skip the root */
741 			vp = SMBTOV(np);
742 			if (vp->v_vfsp != vfsp)
743 				continue; /* skip other mount */
744 
745 			/* Now the 'busy' checks: */
746 			/* Not on the free list? */
747 			if (np->r_freef == NULL) {
748 				SMBVDEBUG("!r_freef: node=0x%p, v_path=%s\n",
749 				    (void *)np, vp->v_path);
750 				busycnt++;
751 			}
752 
753 			/* Has dirty pages? */
754 			if (vn_has_cached_data(vp) &&
755 			    (np->r_flags & RDIRTY)) {
756 				SMBVDEBUG("is dirty: node=0x%p, v_path=%s\n",
757 				    (void *)np, vp->v_path);
758 				busycnt++;
759 			}
760 
761 			/* Other refs? (not reflected in v_count) */
762 			if (np->r_count > 0) {
763 				SMBVDEBUG("+r_count: node=0x%p, v_path=%s\n",
764 				    (void *)np, vp->v_path);
765 				busycnt++;
766 			}
767 
768 			if (busycnt && !smb_check_table_debug)
769 				break;
770 
771 		}
772 		rw_exit(&smbtable[index].r_lock);
773 	}
774 	return (busycnt);
775 }
776 
777 /*
778  * Destroy inactive vnodes from the hash queues which belong to this
779  * vfs.  It is essential that we destroy all inactive vnodes during a
780  * forced unmount as well as during a normal unmount.
781  *
782  * NFS: nfs_subr.c:destroy_rtable
783  */
784 void
785 smbfs_destroy_table(struct vfs *vfsp)
786 {
787 	int index;
788 	smbnode_t *np;
789 	smbnode_t *rlist;
790 	smbnode_t *r_hashf;
791 	vnode_t *vp;
792 
793 	rlist = NULL;
794 
795 	for (index = 0; index < smbtablesize; index++) {
796 		rw_enter(&smbtable[index].r_lock, RW_WRITER);
797 		for (np = smbtable[index].r_hashf;
798 		    np != (smbnode_t *)(&smbtable[index]);
799 		    np = r_hashf) {
800 			/* save the hash pointer before destroying */
801 			r_hashf = np->r_hashf;
802 			vp = SMBTOV(np);
803 			if (vp->v_vfsp == vfsp) {
804 				mutex_enter(&smbfreelist_lock);
805 				if (np->r_freef != NULL) {
806 					smb_rmfree(np);
807 					mutex_exit(&smbfreelist_lock);
808 					smb_rmhash_locked(np);
809 					np->r_hashf = rlist;
810 					rlist = np;
811 				} else
812 					mutex_exit(&smbfreelist_lock);
813 			}
814 		}
815 		rw_exit(&smbtable[index].r_lock);
816 	}
817 
818 	for (np = rlist; np != NULL; np = rlist) {
819 		rlist = np->r_hashf;
820 		/*
821 		 * This call to smb_addfree will end up destroying the
822 		 * smbnode, but in a safe way with the appropriate set
823 		 * of checks done.
824 		 */
825 		smb_addfree(np);
826 	}
827 
828 }
829 
830 /*
831  * This routine destroys all the resources associated with the smbnode
832  * and then the smbnode itself.
833  *
834  * NFS: nfs_subr.c:destroy_rnode
835  */
836 void
837 smb_destroy_node(smbnode_t *np)
838 {
839 	vnode_t *vp;
840 	vfs_t *vfsp;
841 
842 	vp = SMBTOV(np);
843 	vfsp = vp->v_vfsp;
844 
845 	ASSERT(vp->v_count == 1);
846 	ASSERT(np->r_count == 0);
847 	ASSERT(np->r_mapcnt == 0);
848 	ASSERT(!(np->r_flags & RHASHED));
849 	ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
850 	atomic_add_long((ulong_t *)&smbnodenew, -1);
851 	vn_invalid(vp);
852 	vn_free(vp);
853 	kmem_cache_free(smbnode_cache, np);
854 	VFS_RELE(vfsp);
855 }
856 
857 /* rflush? */
858 /* access cache */
859 /* client handles */
860 
861 /*
862  * initialize resources that are used by smbfs_subr.c
863  * this is called from the _init() routine (by the way of smbfs_clntinit())
864  *
865  * allocate and initialze smbfs hash table
866  * NFS: nfs_subr.c:nfs_subrinit
867  */
868 int
869 smbfs_subrinit(void)
870 {
871 	int i;
872 	ulong_t nsmbnode_max;
873 
874 	/*
875 	 * Allocate and initialize the smbnode hash queues
876 	 */
877 	if (nsmbnode <= 0)
878 		nsmbnode = ncsize; /* dnlc.h */
879 	nsmbnode_max = (ulong_t)((kmem_maxavail() >> 2) /
880 	    sizeof (struct smbnode));
881 	if (nsmbnode > nsmbnode_max || (nsmbnode == 0 && ncsize == 0)) {
882 		zcmn_err(GLOBAL_ZONEID, CE_NOTE,
883 		    "setting nsmbnode to max value of %ld", nsmbnode_max);
884 		nsmbnode = nsmbnode_max;
885 	}
886 
887 	smbtablesize = 1 << highbit(nsmbnode / smbhashlen);
888 	smbtablemask = smbtablesize - 1;
889 	smbtable = kmem_alloc(smbtablesize * sizeof (*smbtable), KM_SLEEP);
890 	for (i = 0; i < smbtablesize; i++) {
891 		smbtable[i].r_hashf = (smbnode_t *)(&smbtable[i]);
892 		smbtable[i].r_hashb = (smbnode_t *)(&smbtable[i]);
893 		rw_init(&smbtable[i].r_lock, NULL, RW_DEFAULT, NULL);
894 	}
895 	smbnode_cache = kmem_cache_create("smbnode_cache", sizeof (smbnode_t),
896 	    0, NULL, NULL, smbfs_kmem_reclaim, NULL, NULL, 0);
897 
898 	/*
899 	 * Initialize the various mutexes and reader/writer locks
900 	 */
901 	mutex_init(&smbfreelist_lock, NULL, MUTEX_DEFAULT, NULL);
902 	mutex_init(&smbfs_minor_lock, NULL, MUTEX_DEFAULT, NULL);
903 
904 	/*
905 	 * Assign unique major number for all smbfs mounts
906 	 */
907 	if ((smbfs_major = getudev()) == -1) {
908 		zcmn_err(GLOBAL_ZONEID, CE_WARN,
909 		    "smbfs: init: can't get unique device number");
910 		smbfs_major = 0;
911 	}
912 	smbfs_minor = 0;
913 
914 	return (0);
915 }
916 
917 /*
918  * free smbfs hash table, etc.
919  * NFS: nfs_subr.c:nfs_subrfini
920  */
921 void
922 smbfs_subrfini(void)
923 {
924 	int i;
925 
926 	/*
927 	 * Deallocate the smbnode hash queues
928 	 */
929 	kmem_cache_destroy(smbnode_cache);
930 
931 	for (i = 0; i < smbtablesize; i++)
932 		rw_destroy(&smbtable[i].r_lock);
933 	kmem_free(smbtable, smbtablesize * sizeof (*smbtable));
934 
935 	/*
936 	 * Destroy the various mutexes and reader/writer locks
937 	 */
938 	mutex_destroy(&smbfreelist_lock);
939 	mutex_destroy(&smbfs_minor_lock);
940 }
941 
942 /* rddir_cache ? */
943 
944 /*
945  * Support functions for smbfs_kmem_reclaim
946  */
947 
948 static int
949 smbfs_node_reclaim(void)
950 {
951 	int freed;
952 	smbnode_t *np;
953 	vnode_t *vp;
954 
955 	freed = 0;
956 	mutex_enter(&smbfreelist_lock);
957 	while ((np = smbfreelist) != NULL) {
958 		smb_rmfree(np);
959 		mutex_exit(&smbfreelist_lock);
960 		if (np->r_flags & RHASHED) {
961 			vp = SMBTOV(np);
962 			rw_enter(&np->r_hashq->r_lock, RW_WRITER);
963 			mutex_enter(&vp->v_lock);
964 			if (vp->v_count > 1) {
965 				vp->v_count--;
966 				mutex_exit(&vp->v_lock);
967 				rw_exit(&np->r_hashq->r_lock);
968 				mutex_enter(&smbfreelist_lock);
969 				continue;
970 			}
971 			mutex_exit(&vp->v_lock);
972 			smb_rmhash_locked(np);
973 			rw_exit(&np->r_hashq->r_lock);
974 		}
975 		/*
976 		 * This call to smb_addfree will end up destroying the
977 		 * smbnode, but in a safe way with the appropriate set
978 		 * of checks done.
979 		 */
980 		smb_addfree(np);
981 		mutex_enter(&smbfreelist_lock);
982 	}
983 	mutex_exit(&smbfreelist_lock);
984 	return (freed);
985 }
986 
987 /*
988  * Called by kmem_cache_alloc ask us if we could
989  * "Please give back some memory!"
990  *
991  * Todo: dump nodes from the free list?
992  */
993 /*ARGSUSED*/
994 void
995 smbfs_kmem_reclaim(void *cdrarg)
996 {
997 	(void) smbfs_node_reclaim();
998 }
999 
1000 /* nfs failover stuff */
1001 /* nfs_rw_xxx - see smbfs_rwlock.c */
1002