xref: /freebsd/sys/ufs/ufs/ufs_dirhash.c (revision 3157ba21)
1 /*-
2  * Copyright (c) 2001, 2002 Ian Dowse.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 /*
27  * This implements a hash-based lookup scheme for UFS directories.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_ufs.h"
34 
35 #ifdef UFS_DIRHASH
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/malloc.h>
43 #include <sys/fnv_hash.h>
44 #include <sys/proc.h>
45 #include <sys/bio.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/refcount.h>
50 #include <sys/sysctl.h>
51 #include <sys/sx.h>
52 #include <sys/eventhandler.h>
53 #include <sys/time.h>
54 #include <vm/uma.h>
55 
56 #include <ufs/ufs/quota.h>
57 #include <ufs/ufs/inode.h>
58 #include <ufs/ufs/dir.h>
59 #include <ufs/ufs/dirhash.h>
60 #include <ufs/ufs/extattr.h>
61 #include <ufs/ufs/ufsmount.h>
62 #include <ufs/ufs/ufs_extern.h>
63 
64 #define WRAPINCR(val, limit)	(((val) + 1 == (limit)) ? 0 : ((val) + 1))
65 #define WRAPDECR(val, limit)	(((val) == 0) ? ((limit) - 1) : ((val) - 1))
66 #define OFSFMT(vp)		((vp)->v_mount->mnt_maxsymlinklen <= 0)
67 #define BLKFREE2IDX(n)		((n) > DH_NFSTATS ? DH_NFSTATS : (n))
68 
69 static MALLOC_DEFINE(M_DIRHASH, "ufs_dirhash", "UFS directory hash tables");
70 
71 static int ufs_mindirhashsize = DIRBLKSIZ * 5;
72 SYSCTL_INT(_vfs_ufs, OID_AUTO, dirhash_minsize, CTLFLAG_RW,
73     &ufs_mindirhashsize,
74     0, "minimum directory size in bytes for which to use hashed lookup");
75 static int ufs_dirhashmaxmem = 2 * 1024 * 1024;
76 SYSCTL_INT(_vfs_ufs, OID_AUTO, dirhash_maxmem, CTLFLAG_RW, &ufs_dirhashmaxmem,
77     0, "maximum allowed dirhash memory usage");
78 static int ufs_dirhashmem;
79 SYSCTL_INT(_vfs_ufs, OID_AUTO, dirhash_mem, CTLFLAG_RD, &ufs_dirhashmem,
80     0, "current dirhash memory usage");
81 static int ufs_dirhashcheck = 0;
82 SYSCTL_INT(_vfs_ufs, OID_AUTO, dirhash_docheck, CTLFLAG_RW, &ufs_dirhashcheck,
83     0, "enable extra sanity tests");
84 static int ufs_dirhashlowmemcount = 0;
85 SYSCTL_INT(_vfs_ufs, OID_AUTO, dirhash_lowmemcount, CTLFLAG_RD,
86     &ufs_dirhashlowmemcount, 0, "number of times low memory hook called");
87 static int ufs_dirhashreclaimage = 5;
88 SYSCTL_INT(_vfs_ufs, OID_AUTO, dirhash_reclaimage, CTLFLAG_RW,
89     &ufs_dirhashreclaimage, 0,
90     "max time in seconds of hash inactivity before deletion in low VM events");
91 
92 
93 static int ufsdirhash_hash(struct dirhash *dh, char *name, int namelen);
94 static void ufsdirhash_adjfree(struct dirhash *dh, doff_t offset, int diff);
95 static void ufsdirhash_delslot(struct dirhash *dh, int slot);
96 static int ufsdirhash_findslot(struct dirhash *dh, char *name, int namelen,
97 	   doff_t offset);
98 static doff_t ufsdirhash_getprev(struct direct *dp, doff_t offset);
99 static int ufsdirhash_recycle(int wanted);
100 static void ufsdirhash_lowmem(void);
101 static void ufsdirhash_free_locked(struct inode *ip);
102 
103 static uma_zone_t	ufsdirhash_zone;
104 
105 #define DIRHASHLIST_LOCK() 		mtx_lock(&ufsdirhash_mtx)
106 #define DIRHASHLIST_UNLOCK() 		mtx_unlock(&ufsdirhash_mtx)
107 #define DIRHASH_BLKALLOC_WAITOK() 	uma_zalloc(ufsdirhash_zone, M_WAITOK)
108 #define DIRHASH_BLKFREE(ptr) 		uma_zfree(ufsdirhash_zone, (ptr))
109 #define	DIRHASH_ASSERT_LOCKED(dh)					\
110     sx_assert(&(dh)->dh_lock, SA_LOCKED)
111 
112 /* Dirhash list; recently-used entries are near the tail. */
113 static TAILQ_HEAD(, dirhash) ufsdirhash_list;
114 
115 /* Protects: ufsdirhash_list, `dh_list' field, ufs_dirhashmem. */
116 static struct mtx	ufsdirhash_mtx;
117 
118 /*
119  * Locking:
120  *
121  * The relationship between inode and dirhash is protected either by an
122  * exclusive vnode lock or the vnode interlock where a shared vnode lock
123  * may be used.  The dirhash_mtx is acquired after the dirhash lock.  To
124  * handle teardown races, code wishing to lock the dirhash for an inode
125  * when using a shared vnode lock must obtain a private reference on the
126  * dirhash while holding the vnode interlock.  They can drop it once they
127  * have obtained the dirhash lock and verified that the dirhash wasn't
128  * recycled while they waited for the dirhash lock.
129  *
130  * ufsdirhash_build() acquires a shared lock on the dirhash when it is
131  * successful.  This lock is released after a call to ufsdirhash_lookup().
132  *
133  * Functions requiring exclusive access use ufsdirhash_acquire() which may
134  * free a dirhash structure that was recycled by ufsdirhash_recycle().
135  *
136  * The dirhash lock may be held across io operations.
137  *
138  * WITNESS reports a lock order reversal between the "bufwait" lock
139  * and the "dirhash" lock.  However, this specific reversal will not
140  * cause a deadlock.  To get a deadlock, one would have to lock a
141  * buffer followed by the dirhash while a second thread locked a
142  * buffer while holding the dirhash lock.  The second order can happen
143  * under a shared or exclusive vnode lock for the associated directory
144  * in lookup().  The first order, however, can only happen under an
145  * exclusive vnode lock (e.g. unlink(), rename(), etc.).  Thus, for
146  * a thread to be doing a "bufwait" -> "dirhash" order, it has to hold
147  * an exclusive vnode lock.  That exclusive vnode lock will prevent
148  * any other threads from doing a "dirhash" -> "bufwait" order.
149  */
150 
151 static void
152 ufsdirhash_hold(struct dirhash *dh)
153 {
154 
155 	refcount_acquire(&dh->dh_refcount);
156 }
157 
158 static void
159 ufsdirhash_drop(struct dirhash *dh)
160 {
161 
162 	if (refcount_release(&dh->dh_refcount)) {
163 		sx_destroy(&dh->dh_lock);
164 		free(dh, M_DIRHASH);
165 	}
166 }
167 
168 /*
169  * Release the lock on a dirhash.
170  */
171 static void
172 ufsdirhash_release(struct dirhash *dh)
173 {
174 
175 	sx_unlock(&dh->dh_lock);
176 }
177 
178 /*
179  * Either acquire an existing hash locked shared or create a new hash and
180  * return it exclusively locked.  May return NULL if the allocation fails.
181  *
182  * The vnode interlock is used to protect the i_dirhash pointer from
183  * simultaneous access while only a shared vnode lock is held.
184  */
185 static struct dirhash *
186 ufsdirhash_create(struct inode *ip)
187 {
188 	struct dirhash *ndh;
189 	struct dirhash *dh;
190 	struct vnode *vp;
191 	int error;
192 
193 	error = 0;
194 	ndh = dh = NULL;
195 	vp = ip->i_vnode;
196 	for (;;) {
197 		/* Racy check for i_dirhash to prefetch a dirhash structure. */
198 		if (ip->i_dirhash == NULL && ndh == NULL) {
199 			ndh = malloc(sizeof *dh, M_DIRHASH,
200 			    M_NOWAIT | M_ZERO);
201 			if (ndh == NULL)
202 				return (NULL);
203 			refcount_init(&ndh->dh_refcount, 1);
204 
205 			/*
206 			 * The DUPOK is to prevent warnings from the
207 			 * sx_slock() a few lines down which is safe
208 			 * since the duplicate lock in that case is
209 			 * the one for this dirhash we are creating
210 			 * now which has no external references until
211 			 * after this function returns.
212 			 */
213 			sx_init_flags(&ndh->dh_lock, "dirhash", SX_DUPOK);
214 			sx_xlock(&ndh->dh_lock);
215 		}
216 		/*
217 		 * Check i_dirhash.  If it's NULL just try to use a
218 		 * preallocated structure.  If none exists loop and try again.
219 		 */
220 		VI_LOCK(vp);
221 		dh = ip->i_dirhash;
222 		if (dh == NULL) {
223 			ip->i_dirhash = ndh;
224 			VI_UNLOCK(vp);
225 			if (ndh == NULL)
226 				continue;
227 			return (ndh);
228 		}
229 		ufsdirhash_hold(dh);
230 		VI_UNLOCK(vp);
231 
232 		/* Acquire a shared lock on existing hashes. */
233 		sx_slock(&dh->dh_lock);
234 
235 		/* The hash could've been recycled while we were waiting. */
236 		VI_LOCK(vp);
237 		if (ip->i_dirhash != dh) {
238 			VI_UNLOCK(vp);
239 			ufsdirhash_release(dh);
240 			ufsdirhash_drop(dh);
241 			continue;
242 		}
243 		VI_UNLOCK(vp);
244 		ufsdirhash_drop(dh);
245 
246 		/* If the hash is still valid we've succeeded. */
247 		if (dh->dh_hash != NULL)
248 			break;
249 		/*
250 		 * If the hash is NULL it has been recycled.  Try to upgrade
251 		 * so we can recreate it.  If we fail the upgrade, drop our
252 		 * lock and try again.
253 		 */
254 		if (sx_try_upgrade(&dh->dh_lock))
255 			break;
256 		sx_sunlock(&dh->dh_lock);
257 	}
258 	/* Free the preallocated structure if it was not necessary. */
259 	if (ndh) {
260 		ufsdirhash_release(ndh);
261 		ufsdirhash_drop(ndh);
262 	}
263 	return (dh);
264 }
265 
266 /*
267  * Acquire an exclusive lock on an existing hash.  Requires an exclusive
268  * vnode lock to protect the i_dirhash pointer.  hashes that have been
269  * recycled are reclaimed here and NULL is returned.
270  */
271 static struct dirhash *
272 ufsdirhash_acquire(struct inode *ip)
273 {
274 	struct dirhash *dh;
275 	struct vnode *vp;
276 
277 	ASSERT_VOP_ELOCKED(ip->i_vnode, __FUNCTION__);
278 
279 	vp = ip->i_vnode;
280 	dh = ip->i_dirhash;
281 	if (dh == NULL)
282 		return (NULL);
283 	sx_xlock(&dh->dh_lock);
284 	if (dh->dh_hash != NULL)
285 		return (dh);
286 	ufsdirhash_free_locked(ip);
287 	return (NULL);
288 }
289 
290 /*
291  * Acquire exclusively and free the hash pointed to by ip.  Works with a
292  * shared or exclusive vnode lock.
293  */
294 void
295 ufsdirhash_free(struct inode *ip)
296 {
297 	struct dirhash *dh;
298 	struct vnode *vp;
299 
300 	vp = ip->i_vnode;
301 	for (;;) {
302 		/* Grab a reference on this inode's dirhash if it has one. */
303 		VI_LOCK(vp);
304 		dh = ip->i_dirhash;
305 		if (dh == NULL) {
306 			VI_UNLOCK(vp);
307 			return;
308 		}
309 		ufsdirhash_hold(dh);
310 		VI_UNLOCK(vp);
311 
312 		/* Exclusively lock the dirhash. */
313 		sx_xlock(&dh->dh_lock);
314 
315 		/* If this dirhash still belongs to this inode, then free it. */
316 		VI_LOCK(vp);
317 		if (ip->i_dirhash == dh) {
318 			VI_UNLOCK(vp);
319 			ufsdirhash_drop(dh);
320 			break;
321 		}
322 		VI_UNLOCK(vp);
323 
324 		/*
325 		 * This inode's dirhash has changed while we were
326 		 * waiting for the dirhash lock, so try again.
327 		 */
328 		ufsdirhash_release(dh);
329 		ufsdirhash_drop(dh);
330 	}
331 	ufsdirhash_free_locked(ip);
332 }
333 
334 /*
335  * Attempt to build up a hash table for the directory contents in
336  * inode 'ip'. Returns 0 on success, or -1 of the operation failed.
337  */
338 int
339 ufsdirhash_build(struct inode *ip)
340 {
341 	struct dirhash *dh;
342 	struct buf *bp = NULL;
343 	struct direct *ep;
344 	struct vnode *vp;
345 	doff_t bmask, pos;
346 	int dirblocks, i, j, memreqd, nblocks, narrays, nslots, slot;
347 
348 	/* Take care of a decreased sysctl value. */
349 	while (ufs_dirhashmem > ufs_dirhashmaxmem) {
350 		if (ufsdirhash_recycle(0) != 0)
351 			return (-1);
352 		/* Recycled enough memory, so unlock the list. */
353 		DIRHASHLIST_UNLOCK();
354 	}
355 
356 	/* Check if we can/should use dirhash. */
357 	if (ip->i_size < ufs_mindirhashsize || OFSFMT(ip->i_vnode) ||
358 	    ip->i_effnlink == 0) {
359 		if (ip->i_dirhash)
360 			ufsdirhash_free(ip);
361 		return (-1);
362 	}
363 	dh = ufsdirhash_create(ip);
364 	if (dh == NULL)
365 		return (-1);
366 	if (dh->dh_hash != NULL)
367 		return (0);
368 
369 	vp = ip->i_vnode;
370 	/* Allocate 50% more entries than this dir size could ever need. */
371 	KASSERT(ip->i_size >= DIRBLKSIZ, ("ufsdirhash_build size"));
372 	nslots = ip->i_size / DIRECTSIZ(1);
373 	nslots = (nslots * 3 + 1) / 2;
374 	narrays = howmany(nslots, DH_NBLKOFF);
375 	nslots = narrays * DH_NBLKOFF;
376 	dirblocks = howmany(ip->i_size, DIRBLKSIZ);
377 	nblocks = (dirblocks * 3 + 1) / 2;
378 	memreqd = sizeof(*dh) + narrays * sizeof(*dh->dh_hash) +
379 	    narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) +
380 	    nblocks * sizeof(*dh->dh_blkfree);
381 	DIRHASHLIST_LOCK();
382 	if (memreqd + ufs_dirhashmem > ufs_dirhashmaxmem) {
383 		DIRHASHLIST_UNLOCK();
384 		if (memreqd > ufs_dirhashmaxmem / 2)
385 			goto fail;
386 		/* Try to free some space. */
387 		if (ufsdirhash_recycle(memreqd) != 0)
388 			goto fail;
389 		/* Enough was freed, and list has been locked. */
390 	}
391 	ufs_dirhashmem += memreqd;
392 	DIRHASHLIST_UNLOCK();
393 
394 	/* Initialise the hash table and block statistics. */
395 	dh->dh_memreq = memreqd;
396 	dh->dh_narrays = narrays;
397 	dh->dh_hlen = nslots;
398 	dh->dh_nblk = nblocks;
399 	dh->dh_dirblks = dirblocks;
400 	for (i = 0; i < DH_NFSTATS; i++)
401 		dh->dh_firstfree[i] = -1;
402 	dh->dh_firstfree[DH_NFSTATS] = 0;
403 	dh->dh_hused = 0;
404 	dh->dh_seqopt = 0;
405 	dh->dh_seqoff = 0;
406 	dh->dh_score = DH_SCOREINIT;
407 	dh->dh_lastused = time_second;
408 
409 	/*
410 	 * Use non-blocking mallocs so that we will revert to a linear
411 	 * lookup on failure rather than potentially blocking forever.
412 	 */
413 	dh->dh_hash = malloc(narrays * sizeof(dh->dh_hash[0]),
414 	    M_DIRHASH, M_NOWAIT | M_ZERO);
415 	if (dh->dh_hash == NULL)
416 		goto fail;
417 	dh->dh_blkfree = malloc(nblocks * sizeof(dh->dh_blkfree[0]),
418 	    M_DIRHASH, M_NOWAIT);
419 	if (dh->dh_blkfree == NULL)
420 		goto fail;
421 	for (i = 0; i < narrays; i++) {
422 		if ((dh->dh_hash[i] = DIRHASH_BLKALLOC_WAITOK()) == NULL)
423 			goto fail;
424 		for (j = 0; j < DH_NBLKOFF; j++)
425 			dh->dh_hash[i][j] = DIRHASH_EMPTY;
426 	}
427 	for (i = 0; i < dirblocks; i++)
428 		dh->dh_blkfree[i] = DIRBLKSIZ / DIRALIGN;
429 	bmask = VFSTOUFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1;
430 	pos = 0;
431 	while (pos < ip->i_size) {
432 		/* If necessary, get the next directory block. */
433 		if ((pos & bmask) == 0) {
434 			if (bp != NULL)
435 				brelse(bp);
436 			if (UFS_BLKATOFF(vp, (off_t)pos, NULL, &bp) != 0)
437 				goto fail;
438 		}
439 
440 		/* Add this entry to the hash. */
441 		ep = (struct direct *)((char *)bp->b_data + (pos & bmask));
442 		if (ep->d_reclen == 0 || ep->d_reclen >
443 		    DIRBLKSIZ - (pos & (DIRBLKSIZ - 1))) {
444 			/* Corrupted directory. */
445 			brelse(bp);
446 			goto fail;
447 		}
448 		if (ep->d_ino != 0) {
449 			/* Add the entry (simplified ufsdirhash_add). */
450 			slot = ufsdirhash_hash(dh, ep->d_name, ep->d_namlen);
451 			while (DH_ENTRY(dh, slot) != DIRHASH_EMPTY)
452 				slot = WRAPINCR(slot, dh->dh_hlen);
453 			dh->dh_hused++;
454 			DH_ENTRY(dh, slot) = pos;
455 			ufsdirhash_adjfree(dh, pos, -DIRSIZ(0, ep));
456 		}
457 		pos += ep->d_reclen;
458 	}
459 
460 	if (bp != NULL)
461 		brelse(bp);
462 	DIRHASHLIST_LOCK();
463 	TAILQ_INSERT_TAIL(&ufsdirhash_list, dh, dh_list);
464 	dh->dh_onlist = 1;
465 	DIRHASHLIST_UNLOCK();
466 	sx_downgrade(&dh->dh_lock);
467 	return (0);
468 
469 fail:
470 	ufsdirhash_free_locked(ip);
471 	return (-1);
472 }
473 
474 /*
475  * Free any hash table associated with inode 'ip'.
476  */
477 static void
478 ufsdirhash_free_locked(struct inode *ip)
479 {
480 	struct dirhash *dh;
481 	struct vnode *vp;
482 	int i;
483 
484 	DIRHASH_ASSERT_LOCKED(ip->i_dirhash);
485 
486 	/*
487 	 * Clear the pointer in the inode to prevent new threads from
488 	 * finding the dead structure.
489 	 */
490 	vp = ip->i_vnode;
491 	VI_LOCK(vp);
492 	dh = ip->i_dirhash;
493 	ip->i_dirhash = NULL;
494 	VI_UNLOCK(vp);
495 
496 	/*
497 	 * Remove the hash from the list since we are going to free its
498 	 * memory.
499 	 */
500 	DIRHASHLIST_LOCK();
501 	if (dh->dh_onlist)
502 		TAILQ_REMOVE(&ufsdirhash_list, dh, dh_list);
503 	ufs_dirhashmem -= dh->dh_memreq;
504 	DIRHASHLIST_UNLOCK();
505 
506 	/*
507 	 * At this point, any waiters for the lock should hold their
508 	 * own reference on the dirhash structure.  They will drop
509 	 * that reference once they grab the vnode interlock and see
510 	 * that ip->i_dirhash is NULL.
511 	 */
512 	sx_xunlock(&dh->dh_lock);
513 
514 	/*
515 	 * Handle partially recycled as well as fully constructed hashes.
516 	 */
517 	if (dh->dh_hash != NULL) {
518 		for (i = 0; i < dh->dh_narrays; i++)
519 			if (dh->dh_hash[i] != NULL)
520 				DIRHASH_BLKFREE(dh->dh_hash[i]);
521 		free(dh->dh_hash, M_DIRHASH);
522 		if (dh->dh_blkfree != NULL)
523 			free(dh->dh_blkfree, M_DIRHASH);
524 	}
525 
526 	/*
527 	 * Drop the inode's reference to the data structure.
528 	 */
529 	ufsdirhash_drop(dh);
530 }
531 
532 /*
533  * Find the offset of the specified name within the given inode.
534  * Returns 0 on success, ENOENT if the entry does not exist, or
535  * EJUSTRETURN if the caller should revert to a linear search.
536  *
537  * If successful, the directory offset is stored in *offp, and a
538  * pointer to a struct buf containing the entry is stored in *bpp. If
539  * prevoffp is non-NULL, the offset of the previous entry within
540  * the DIRBLKSIZ-sized block is stored in *prevoffp (if the entry
541  * is the first in a block, the start of the block is used).
542  *
543  * Must be called with the hash locked.  Returns with the hash unlocked.
544  */
545 int
546 ufsdirhash_lookup(struct inode *ip, char *name, int namelen, doff_t *offp,
547     struct buf **bpp, doff_t *prevoffp)
548 {
549 	struct dirhash *dh, *dh_next;
550 	struct direct *dp;
551 	struct vnode *vp;
552 	struct buf *bp;
553 	doff_t blkoff, bmask, offset, prevoff;
554 	int i, slot;
555 	int error;
556 
557 	dh = ip->i_dirhash;
558 	KASSERT(dh != NULL && dh->dh_hash != NULL,
559 	    ("ufsdirhash_lookup: Invalid dirhash %p\n", dh));
560 	DIRHASH_ASSERT_LOCKED(dh);
561 	/*
562 	 * Move this dirhash towards the end of the list if it has a
563 	 * score higher than the next entry, and acquire the dh_lock.
564 	 */
565 	DIRHASHLIST_LOCK();
566 	if (TAILQ_NEXT(dh, dh_list) != NULL) {
567 		/*
568 		 * If the new score will be greater than that of the next
569 		 * entry, then move this entry past it. With both mutexes
570 		 * held, dh_next won't go away, but its dh_score could
571 		 * change; that's not important since it is just a hint.
572 		 */
573 		if ((dh_next = TAILQ_NEXT(dh, dh_list)) != NULL &&
574 		    dh->dh_score >= dh_next->dh_score) {
575 			KASSERT(dh->dh_onlist, ("dirhash: not on list"));
576 			TAILQ_REMOVE(&ufsdirhash_list, dh, dh_list);
577 			TAILQ_INSERT_AFTER(&ufsdirhash_list, dh_next, dh,
578 			    dh_list);
579 		}
580 	}
581 	/* Update the score. */
582 	if (dh->dh_score < DH_SCOREMAX)
583 		dh->dh_score++;
584 
585 	/* Update last used time. */
586 	dh->dh_lastused = time_second;
587 	DIRHASHLIST_UNLOCK();
588 
589 	vp = ip->i_vnode;
590 	bmask = VFSTOUFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1;
591 	blkoff = -1;
592 	bp = NULL;
593 restart:
594 	slot = ufsdirhash_hash(dh, name, namelen);
595 
596 	if (dh->dh_seqopt) {
597 		/*
598 		 * Sequential access optimisation. dh_seqoff contains the
599 		 * offset of the directory entry immediately following
600 		 * the last entry that was looked up. Check if this offset
601 		 * appears in the hash chain for the name we are looking for.
602 		 */
603 		for (i = slot; (offset = DH_ENTRY(dh, i)) != DIRHASH_EMPTY;
604 		    i = WRAPINCR(i, dh->dh_hlen))
605 			if (offset == dh->dh_seqoff)
606 				break;
607 		if (offset == dh->dh_seqoff) {
608 			/*
609 			 * We found an entry with the expected offset. This
610 			 * is probably the entry we want, but if not, the
611 			 * code below will turn off seqopt and retry.
612 			 */
613 			slot = i;
614 		} else
615 			dh->dh_seqopt = 0;
616 	}
617 
618 	for (; (offset = DH_ENTRY(dh, slot)) != DIRHASH_EMPTY;
619 	    slot = WRAPINCR(slot, dh->dh_hlen)) {
620 		if (offset == DIRHASH_DEL)
621 			continue;
622 		if (offset < 0 || offset >= ip->i_size)
623 			panic("ufsdirhash_lookup: bad offset in hash array");
624 		if ((offset & ~bmask) != blkoff) {
625 			if (bp != NULL)
626 				brelse(bp);
627 			blkoff = offset & ~bmask;
628 			if (UFS_BLKATOFF(vp, (off_t)blkoff, NULL, &bp) != 0) {
629 				error = EJUSTRETURN;
630 				goto fail;
631 			}
632 		}
633 		KASSERT(bp != NULL, ("no buffer allocated"));
634 		dp = (struct direct *)(bp->b_data + (offset & bmask));
635 		if (dp->d_reclen == 0 || dp->d_reclen >
636 		    DIRBLKSIZ - (offset & (DIRBLKSIZ - 1))) {
637 			/* Corrupted directory. */
638 			error = EJUSTRETURN;
639 			goto fail;
640 		}
641 		if (dp->d_namlen == namelen &&
642 		    bcmp(dp->d_name, name, namelen) == 0) {
643 			/* Found. Get the prev offset if needed. */
644 			if (prevoffp != NULL) {
645 				if (offset & (DIRBLKSIZ - 1)) {
646 					prevoff = ufsdirhash_getprev(dp,
647 					    offset);
648 					if (prevoff == -1) {
649 						error = EJUSTRETURN;
650 						goto fail;
651 					}
652 				} else
653 					prevoff = offset;
654 				*prevoffp = prevoff;
655 			}
656 
657 			/* Check for sequential access, and update offset. */
658 			if (dh->dh_seqopt == 0 && dh->dh_seqoff == offset)
659 				dh->dh_seqopt = 1;
660 			dh->dh_seqoff = offset + DIRSIZ(0, dp);
661 			*bpp = bp;
662 			*offp = offset;
663 			ufsdirhash_release(dh);
664 			return (0);
665 		}
666 
667 		/*
668 		 * When the name doesn't match in the seqopt case, go back
669 		 * and search normally.
670 		 */
671 		if (dh->dh_seqopt) {
672 			dh->dh_seqopt = 0;
673 			goto restart;
674 		}
675 	}
676 	error = ENOENT;
677 fail:
678 	ufsdirhash_release(dh);
679 	if (bp != NULL)
680 		brelse(bp);
681 	return (error);
682 }
683 
684 /*
685  * Find a directory block with room for 'slotneeded' bytes. Returns
686  * the offset of the directory entry that begins the free space.
687  * This will either be the offset of an existing entry that has free
688  * space at the end, or the offset of an entry with d_ino == 0 at
689  * the start of a DIRBLKSIZ block.
690  *
691  * To use the space, the caller may need to compact existing entries in
692  * the directory. The total number of bytes in all of the entries involved
693  * in the compaction is stored in *slotsize. In other words, all of
694  * the entries that must be compacted are exactly contained in the
695  * region beginning at the returned offset and spanning *slotsize bytes.
696  *
697  * Returns -1 if no space was found, indicating that the directory
698  * must be extended.
699  */
700 doff_t
701 ufsdirhash_findfree(struct inode *ip, int slotneeded, int *slotsize)
702 {
703 	struct direct *dp;
704 	struct dirhash *dh;
705 	struct buf *bp;
706 	doff_t pos, slotstart;
707 	int dirblock, error, freebytes, i;
708 
709 	dh = ip->i_dirhash;
710 	KASSERT(dh != NULL && dh->dh_hash != NULL,
711 	    ("ufsdirhash_findfree: Invalid dirhash %p\n", dh));
712 	DIRHASH_ASSERT_LOCKED(dh);
713 
714 	/* Find a directory block with the desired free space. */
715 	dirblock = -1;
716 	for (i = howmany(slotneeded, DIRALIGN); i <= DH_NFSTATS; i++)
717 		if ((dirblock = dh->dh_firstfree[i]) != -1)
718 			break;
719 	if (dirblock == -1)
720 		return (-1);
721 
722 	KASSERT(dirblock < dh->dh_nblk &&
723 	    dh->dh_blkfree[dirblock] >= howmany(slotneeded, DIRALIGN),
724 	    ("ufsdirhash_findfree: bad stats"));
725 	pos = dirblock * DIRBLKSIZ;
726 	error = UFS_BLKATOFF(ip->i_vnode, (off_t)pos, (char **)&dp, &bp);
727 	if (error)
728 		return (-1);
729 
730 	/* Find the first entry with free space. */
731 	for (i = 0; i < DIRBLKSIZ; ) {
732 		if (dp->d_reclen == 0) {
733 			brelse(bp);
734 			return (-1);
735 		}
736 		if (dp->d_ino == 0 || dp->d_reclen > DIRSIZ(0, dp))
737 			break;
738 		i += dp->d_reclen;
739 		dp = (struct direct *)((char *)dp + dp->d_reclen);
740 	}
741 	if (i > DIRBLKSIZ) {
742 		brelse(bp);
743 		return (-1);
744 	}
745 	slotstart = pos + i;
746 
747 	/* Find the range of entries needed to get enough space */
748 	freebytes = 0;
749 	while (i < DIRBLKSIZ && freebytes < slotneeded) {
750 		freebytes += dp->d_reclen;
751 		if (dp->d_ino != 0)
752 			freebytes -= DIRSIZ(0, dp);
753 		if (dp->d_reclen == 0) {
754 			brelse(bp);
755 			return (-1);
756 		}
757 		i += dp->d_reclen;
758 		dp = (struct direct *)((char *)dp + dp->d_reclen);
759 	}
760 	if (i > DIRBLKSIZ) {
761 		brelse(bp);
762 		return (-1);
763 	}
764 	if (freebytes < slotneeded)
765 		panic("ufsdirhash_findfree: free mismatch");
766 	brelse(bp);
767 	*slotsize = pos + i - slotstart;
768 	return (slotstart);
769 }
770 
771 /*
772  * Return the start of the unused space at the end of a directory, or
773  * -1 if there are no trailing unused blocks.
774  */
775 doff_t
776 ufsdirhash_enduseful(struct inode *ip)
777 {
778 
779 	struct dirhash *dh;
780 	int i;
781 
782 	dh = ip->i_dirhash;
783 	DIRHASH_ASSERT_LOCKED(dh);
784 	KASSERT(dh != NULL && dh->dh_hash != NULL,
785 	    ("ufsdirhash_enduseful: Invalid dirhash %p\n", dh));
786 
787 	if (dh->dh_blkfree[dh->dh_dirblks - 1] != DIRBLKSIZ / DIRALIGN)
788 		return (-1);
789 
790 	for (i = dh->dh_dirblks - 1; i >= 0; i--)
791 		if (dh->dh_blkfree[i] != DIRBLKSIZ / DIRALIGN)
792 			break;
793 
794 	return ((doff_t)(i + 1) * DIRBLKSIZ);
795 }
796 
797 /*
798  * Insert information into the hash about a new directory entry. dirp
799  * points to a struct direct containing the entry, and offset specifies
800  * the offset of this entry.
801  */
802 void
803 ufsdirhash_add(struct inode *ip, struct direct *dirp, doff_t offset)
804 {
805 	struct dirhash *dh;
806 	int slot;
807 
808 	if ((dh = ufsdirhash_acquire(ip)) == NULL)
809 		return;
810 
811 	KASSERT(offset < dh->dh_dirblks * DIRBLKSIZ,
812 	    ("ufsdirhash_add: bad offset"));
813 	/*
814 	 * Normal hash usage is < 66%. If the usage gets too high then
815 	 * remove the hash entirely and let it be rebuilt later.
816 	 */
817 	if (dh->dh_hused >= (dh->dh_hlen * 3) / 4) {
818 		ufsdirhash_free_locked(ip);
819 		return;
820 	}
821 
822 	/* Find a free hash slot (empty or deleted), and add the entry. */
823 	slot = ufsdirhash_hash(dh, dirp->d_name, dirp->d_namlen);
824 	while (DH_ENTRY(dh, slot) >= 0)
825 		slot = WRAPINCR(slot, dh->dh_hlen);
826 	if (DH_ENTRY(dh, slot) == DIRHASH_EMPTY)
827 		dh->dh_hused++;
828 	DH_ENTRY(dh, slot) = offset;
829 
830 	/* Update last used time. */
831 	dh->dh_lastused = time_second;
832 
833 	/* Update the per-block summary info. */
834 	ufsdirhash_adjfree(dh, offset, -DIRSIZ(0, dirp));
835 	ufsdirhash_release(dh);
836 }
837 
838 /*
839  * Remove the specified directory entry from the hash. The entry to remove
840  * is defined by the name in `dirp', which must exist at the specified
841  * `offset' within the directory.
842  */
843 void
844 ufsdirhash_remove(struct inode *ip, struct direct *dirp, doff_t offset)
845 {
846 	struct dirhash *dh;
847 	int slot;
848 
849 	if ((dh = ufsdirhash_acquire(ip)) == NULL)
850 		return;
851 
852 	KASSERT(offset < dh->dh_dirblks * DIRBLKSIZ,
853 	    ("ufsdirhash_remove: bad offset"));
854 	/* Find the entry */
855 	slot = ufsdirhash_findslot(dh, dirp->d_name, dirp->d_namlen, offset);
856 
857 	/* Remove the hash entry. */
858 	ufsdirhash_delslot(dh, slot);
859 
860 	/* Update the per-block summary info. */
861 	ufsdirhash_adjfree(dh, offset, DIRSIZ(0, dirp));
862 	ufsdirhash_release(dh);
863 }
864 
865 /*
866  * Change the offset associated with a directory entry in the hash. Used
867  * when compacting directory blocks.
868  */
869 void
870 ufsdirhash_move(struct inode *ip, struct direct *dirp, doff_t oldoff,
871     doff_t newoff)
872 {
873 	struct dirhash *dh;
874 	int slot;
875 
876 	if ((dh = ufsdirhash_acquire(ip)) == NULL)
877 		return;
878 
879 	KASSERT(oldoff < dh->dh_dirblks * DIRBLKSIZ &&
880 	    newoff < dh->dh_dirblks * DIRBLKSIZ,
881 	    ("ufsdirhash_move: bad offset"));
882 	/* Find the entry, and update the offset. */
883 	slot = ufsdirhash_findslot(dh, dirp->d_name, dirp->d_namlen, oldoff);
884 	DH_ENTRY(dh, slot) = newoff;
885 	ufsdirhash_release(dh);
886 }
887 
888 /*
889  * Inform dirhash that the directory has grown by one block that
890  * begins at offset (i.e. the new length is offset + DIRBLKSIZ).
891  */
892 void
893 ufsdirhash_newblk(struct inode *ip, doff_t offset)
894 {
895 	struct dirhash *dh;
896 	int block;
897 
898 	if ((dh = ufsdirhash_acquire(ip)) == NULL)
899 		return;
900 
901 	KASSERT(offset == dh->dh_dirblks * DIRBLKSIZ,
902 	    ("ufsdirhash_newblk: bad offset"));
903 	block = offset / DIRBLKSIZ;
904 	if (block >= dh->dh_nblk) {
905 		/* Out of space; must rebuild. */
906 		ufsdirhash_free_locked(ip);
907 		return;
908 	}
909 	dh->dh_dirblks = block + 1;
910 
911 	/* Account for the new free block. */
912 	dh->dh_blkfree[block] = DIRBLKSIZ / DIRALIGN;
913 	if (dh->dh_firstfree[DH_NFSTATS] == -1)
914 		dh->dh_firstfree[DH_NFSTATS] = block;
915 	ufsdirhash_release(dh);
916 }
917 
918 /*
919  * Inform dirhash that the directory is being truncated.
920  */
921 void
922 ufsdirhash_dirtrunc(struct inode *ip, doff_t offset)
923 {
924 	struct dirhash *dh;
925 	int block, i;
926 
927 	if ((dh = ufsdirhash_acquire(ip)) == NULL)
928 		return;
929 
930 	KASSERT(offset <= dh->dh_dirblks * DIRBLKSIZ,
931 	    ("ufsdirhash_dirtrunc: bad offset"));
932 	block = howmany(offset, DIRBLKSIZ);
933 	/*
934 	 * If the directory shrinks to less than 1/8 of dh_nblk blocks
935 	 * (about 20% of its original size due to the 50% extra added in
936 	 * ufsdirhash_build) then free it, and let the caller rebuild
937 	 * if necessary.
938 	 */
939 	if (block < dh->dh_nblk / 8 && dh->dh_narrays > 1) {
940 		ufsdirhash_free_locked(ip);
941 		return;
942 	}
943 
944 	/*
945 	 * Remove any `first free' information pertaining to the
946 	 * truncated blocks. All blocks we're removing should be
947 	 * completely unused.
948 	 */
949 	if (dh->dh_firstfree[DH_NFSTATS] >= block)
950 		dh->dh_firstfree[DH_NFSTATS] = -1;
951 	for (i = block; i < dh->dh_dirblks; i++)
952 		if (dh->dh_blkfree[i] != DIRBLKSIZ / DIRALIGN)
953 			panic("ufsdirhash_dirtrunc: blocks in use");
954 	for (i = 0; i < DH_NFSTATS; i++)
955 		if (dh->dh_firstfree[i] >= block)
956 			panic("ufsdirhash_dirtrunc: first free corrupt");
957 	dh->dh_dirblks = block;
958 	ufsdirhash_release(dh);
959 }
960 
961 /*
962  * Debugging function to check that the dirhash information about
963  * a directory block matches its actual contents. Panics if a mismatch
964  * is detected.
965  *
966  * On entry, `buf' should point to the start of an in-core
967  * DIRBLKSIZ-sized directory block, and `offset' should contain the
968  * offset from the start of the directory of that block.
969  */
970 void
971 ufsdirhash_checkblock(struct inode *ip, char *buf, doff_t offset)
972 {
973 	struct dirhash *dh;
974 	struct direct *dp;
975 	int block, ffslot, i, nfree;
976 
977 	if (!ufs_dirhashcheck)
978 		return;
979 	if ((dh = ufsdirhash_acquire(ip)) == NULL)
980 		return;
981 
982 	block = offset / DIRBLKSIZ;
983 	if ((offset & (DIRBLKSIZ - 1)) != 0 || block >= dh->dh_dirblks)
984 		panic("ufsdirhash_checkblock: bad offset");
985 
986 	nfree = 0;
987 	for (i = 0; i < DIRBLKSIZ; i += dp->d_reclen) {
988 		dp = (struct direct *)(buf + i);
989 		if (dp->d_reclen == 0 || i + dp->d_reclen > DIRBLKSIZ)
990 			panic("ufsdirhash_checkblock: bad dir");
991 
992 		if (dp->d_ino == 0) {
993 #if 0
994 			/*
995 			 * XXX entries with d_ino == 0 should only occur
996 			 * at the start of a DIRBLKSIZ block. However the
997 			 * ufs code is tolerant of such entries at other
998 			 * offsets, and fsck does not fix them.
999 			 */
1000 			if (i != 0)
1001 				panic("ufsdirhash_checkblock: bad dir inode");
1002 #endif
1003 			nfree += dp->d_reclen;
1004 			continue;
1005 		}
1006 
1007 		/* Check that the entry	exists (will panic if it doesn't). */
1008 		ufsdirhash_findslot(dh, dp->d_name, dp->d_namlen, offset + i);
1009 
1010 		nfree += dp->d_reclen - DIRSIZ(0, dp);
1011 	}
1012 	if (i != DIRBLKSIZ)
1013 		panic("ufsdirhash_checkblock: bad dir end");
1014 
1015 	if (dh->dh_blkfree[block] * DIRALIGN != nfree)
1016 		panic("ufsdirhash_checkblock: bad free count");
1017 
1018 	ffslot = BLKFREE2IDX(nfree / DIRALIGN);
1019 	for (i = 0; i <= DH_NFSTATS; i++)
1020 		if (dh->dh_firstfree[i] == block && i != ffslot)
1021 			panic("ufsdirhash_checkblock: bad first-free");
1022 	if (dh->dh_firstfree[ffslot] == -1)
1023 		panic("ufsdirhash_checkblock: missing first-free entry");
1024 	ufsdirhash_release(dh);
1025 }
1026 
1027 /*
1028  * Hash the specified filename into a dirhash slot.
1029  */
1030 static int
1031 ufsdirhash_hash(struct dirhash *dh, char *name, int namelen)
1032 {
1033 	u_int32_t hash;
1034 
1035 	/*
1036 	 * We hash the name and then some other bit of data that is
1037 	 * invariant over the dirhash's lifetime. Otherwise names
1038 	 * differing only in the last byte are placed close to one
1039 	 * another in the table, which is bad for linear probing.
1040 	 */
1041 	hash = fnv_32_buf(name, namelen, FNV1_32_INIT);
1042 	hash = fnv_32_buf(&dh, sizeof(dh), hash);
1043 	return (hash % dh->dh_hlen);
1044 }
1045 
1046 /*
1047  * Adjust the number of free bytes in the block containing `offset'
1048  * by the value specified by `diff'.
1049  *
1050  * The caller must ensure we have exclusive access to `dh'; normally
1051  * that means that dh_lock should be held, but this is also called
1052  * from ufsdirhash_build() where exclusive access can be assumed.
1053  */
1054 static void
1055 ufsdirhash_adjfree(struct dirhash *dh, doff_t offset, int diff)
1056 {
1057 	int block, i, nfidx, ofidx;
1058 
1059 	/* Update the per-block summary info. */
1060 	block = offset / DIRBLKSIZ;
1061 	KASSERT(block < dh->dh_nblk && block < dh->dh_dirblks,
1062 	     ("dirhash bad offset"));
1063 	ofidx = BLKFREE2IDX(dh->dh_blkfree[block]);
1064 	dh->dh_blkfree[block] = (int)dh->dh_blkfree[block] + (diff / DIRALIGN);
1065 	nfidx = BLKFREE2IDX(dh->dh_blkfree[block]);
1066 
1067 	/* Update the `first free' list if necessary. */
1068 	if (ofidx != nfidx) {
1069 		/* If removing, scan forward for the next block. */
1070 		if (dh->dh_firstfree[ofidx] == block) {
1071 			for (i = block + 1; i < dh->dh_dirblks; i++)
1072 				if (BLKFREE2IDX(dh->dh_blkfree[i]) == ofidx)
1073 					break;
1074 			dh->dh_firstfree[ofidx] = (i < dh->dh_dirblks) ? i : -1;
1075 		}
1076 
1077 		/* Make this the new `first free' if necessary */
1078 		if (dh->dh_firstfree[nfidx] > block ||
1079 		    dh->dh_firstfree[nfidx] == -1)
1080 			dh->dh_firstfree[nfidx] = block;
1081 	}
1082 }
1083 
1084 /*
1085  * Find the specified name which should have the specified offset.
1086  * Returns a slot number, and panics on failure.
1087  *
1088  * `dh' must be locked on entry and remains so on return.
1089  */
1090 static int
1091 ufsdirhash_findslot(struct dirhash *dh, char *name, int namelen, doff_t offset)
1092 {
1093 	int slot;
1094 
1095 	DIRHASH_ASSERT_LOCKED(dh);
1096 
1097 	/* Find the entry. */
1098 	KASSERT(dh->dh_hused < dh->dh_hlen, ("dirhash find full"));
1099 	slot = ufsdirhash_hash(dh, name, namelen);
1100 	while (DH_ENTRY(dh, slot) != offset &&
1101 	    DH_ENTRY(dh, slot) != DIRHASH_EMPTY)
1102 		slot = WRAPINCR(slot, dh->dh_hlen);
1103 	if (DH_ENTRY(dh, slot) != offset)
1104 		panic("ufsdirhash_findslot: '%.*s' not found", namelen, name);
1105 
1106 	return (slot);
1107 }
1108 
1109 /*
1110  * Remove the entry corresponding to the specified slot from the hash array.
1111  *
1112  * `dh' must be locked on entry and remains so on return.
1113  */
1114 static void
1115 ufsdirhash_delslot(struct dirhash *dh, int slot)
1116 {
1117 	int i;
1118 
1119 	DIRHASH_ASSERT_LOCKED(dh);
1120 
1121 	/* Mark the entry as deleted. */
1122 	DH_ENTRY(dh, slot) = DIRHASH_DEL;
1123 
1124 	/* If this is the end of a chain of DIRHASH_DEL slots, remove them. */
1125 	for (i = slot; DH_ENTRY(dh, i) == DIRHASH_DEL; )
1126 		i = WRAPINCR(i, dh->dh_hlen);
1127 	if (DH_ENTRY(dh, i) == DIRHASH_EMPTY) {
1128 		i = WRAPDECR(i, dh->dh_hlen);
1129 		while (DH_ENTRY(dh, i) == DIRHASH_DEL) {
1130 			DH_ENTRY(dh, i) = DIRHASH_EMPTY;
1131 			dh->dh_hused--;
1132 			i = WRAPDECR(i, dh->dh_hlen);
1133 		}
1134 		KASSERT(dh->dh_hused >= 0, ("ufsdirhash_delslot neg hlen"));
1135 	}
1136 }
1137 
1138 /*
1139  * Given a directory entry and its offset, find the offset of the
1140  * previous entry in the same DIRBLKSIZ-sized block. Returns an
1141  * offset, or -1 if there is no previous entry in the block or some
1142  * other problem occurred.
1143  */
1144 static doff_t
1145 ufsdirhash_getprev(struct direct *dirp, doff_t offset)
1146 {
1147 	struct direct *dp;
1148 	char *blkbuf;
1149 	doff_t blkoff, prevoff;
1150 	int entrypos, i;
1151 
1152 	blkoff = offset & ~(DIRBLKSIZ - 1);	/* offset of start of block */
1153 	entrypos = offset & (DIRBLKSIZ - 1);	/* entry relative to block */
1154 	blkbuf = (char *)dirp - entrypos;
1155 	prevoff = blkoff;
1156 
1157 	/* If `offset' is the start of a block, there is no previous entry. */
1158 	if (entrypos == 0)
1159 		return (-1);
1160 
1161 	/* Scan from the start of the block until we get to the entry. */
1162 	for (i = 0; i < entrypos; i += dp->d_reclen) {
1163 		dp = (struct direct *)(blkbuf + i);
1164 		if (dp->d_reclen == 0 || i + dp->d_reclen > entrypos)
1165 			return (-1);	/* Corrupted directory. */
1166 		prevoff = blkoff + i;
1167 	}
1168 	return (prevoff);
1169 }
1170 
1171 /*
1172  * Delete the given dirhash and reclaim its memory. Assumes that
1173  * ufsdirhash_list is locked, and leaves it locked. Also assumes
1174  * that dh is locked. Returns the amount of memory freed.
1175  */
1176 static int
1177 ufsdirhash_destroy(struct dirhash *dh)
1178 {
1179 	doff_t **hash;
1180 	u_int8_t *blkfree;
1181 	int i, mem, narrays;
1182 
1183 	KASSERT(dh->dh_hash != NULL, ("dirhash: NULL hash on list"));
1184 
1185 	/* Remove it from the list and detach its memory. */
1186 	TAILQ_REMOVE(&ufsdirhash_list, dh, dh_list);
1187 	dh->dh_onlist = 0;
1188 	hash = dh->dh_hash;
1189 	dh->dh_hash = NULL;
1190 	blkfree = dh->dh_blkfree;
1191 	dh->dh_blkfree = NULL;
1192 	narrays = dh->dh_narrays;
1193 	mem = dh->dh_memreq;
1194 	dh->dh_memreq = 0;
1195 
1196 	/* Unlock dirhash and free the detached memory. */
1197 	ufsdirhash_release(dh);
1198 	for (i = 0; i < narrays; i++)
1199 		DIRHASH_BLKFREE(hash[i]);
1200 	free(hash, M_DIRHASH);
1201 	free(blkfree, M_DIRHASH);
1202 
1203 	/* Account for the returned memory. */
1204 	ufs_dirhashmem -= mem;
1205 
1206 	return (mem);
1207 }
1208 
1209 /*
1210  * Try to free up `wanted' bytes by stealing memory from existing
1211  * dirhashes. Returns zero with list locked if successful.
1212  */
1213 static int
1214 ufsdirhash_recycle(int wanted)
1215 {
1216 	struct dirhash *dh;
1217 
1218 	DIRHASHLIST_LOCK();
1219 	dh = TAILQ_FIRST(&ufsdirhash_list);
1220 	while (wanted + ufs_dirhashmem > ufs_dirhashmaxmem) {
1221 		/* Decrement the score; only recycle if it becomes zero. */
1222 		if (dh == NULL || --dh->dh_score > 0) {
1223 			DIRHASHLIST_UNLOCK();
1224 			return (-1);
1225 		}
1226 		/*
1227 		 * If we can't lock it it's in use and we don't want to
1228 		 * recycle it anyway.
1229 		 */
1230 		if (!sx_try_xlock(&dh->dh_lock)) {
1231 			dh = TAILQ_NEXT(dh, dh_list);
1232 			continue;
1233 		}
1234 
1235 		ufsdirhash_destroy(dh);
1236 
1237 		/* Repeat if necessary. */
1238 		dh = TAILQ_FIRST(&ufsdirhash_list);
1239 	}
1240 	/* Success; return with list locked. */
1241 	return (0);
1242 }
1243 
1244 /*
1245  * Callback that frees some dirhashes when the system is low on virtual memory.
1246  */
1247 static void
1248 ufsdirhash_lowmem()
1249 {
1250 	struct dirhash *dh, *dh_temp;
1251 	int memfreed = 0;
1252 	/* XXX: this 10% may need to be adjusted */
1253 	int memwanted = ufs_dirhashmem / 10;
1254 
1255 	ufs_dirhashlowmemcount++;
1256 
1257 	DIRHASHLIST_LOCK();
1258 	/*
1259 	 * Delete dirhashes not used for more than ufs_dirhashreclaimage
1260 	 * seconds. If we can't get a lock on the dirhash, it will be skipped.
1261 	 */
1262 	TAILQ_FOREACH_SAFE(dh, &ufsdirhash_list, dh_list, dh_temp) {
1263 		if (!sx_try_xlock(&dh->dh_lock))
1264 			continue;
1265 		if (time_second - dh->dh_lastused > ufs_dirhashreclaimage)
1266 			memfreed += ufsdirhash_destroy(dh);
1267 		/* Unlock if we didn't delete the dirhash */
1268 		else
1269 			ufsdirhash_release(dh);
1270 	}
1271 
1272 	/*
1273 	 * If not enough memory was freed, keep deleting hashes from the head
1274 	 * of the dirhash list. The ones closest to the head should be the
1275 	 * oldest.
1276 	 */
1277 	if (memfreed < memwanted) {
1278 		TAILQ_FOREACH_SAFE(dh, &ufsdirhash_list, dh_list, dh_temp) {
1279 			if (!sx_try_xlock(&dh->dh_lock))
1280 				continue;
1281 			memfreed += ufsdirhash_destroy(dh);
1282 			if (memfreed >= memwanted)
1283 				break;
1284 		}
1285 	}
1286 	DIRHASHLIST_UNLOCK();
1287 }
1288 
1289 
1290 void
1291 ufsdirhash_init()
1292 {
1293 	ufsdirhash_zone = uma_zcreate("DIRHASH", DH_NBLKOFF * sizeof(doff_t),
1294 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1295 	mtx_init(&ufsdirhash_mtx, "dirhash list", NULL, MTX_DEF);
1296 	TAILQ_INIT(&ufsdirhash_list);
1297 
1298 	/* Register a callback function to handle low memory signals */
1299 	EVENTHANDLER_REGISTER(vm_lowmem, ufsdirhash_lowmem, NULL,
1300 	    EVENTHANDLER_PRI_FIRST);
1301 }
1302 
1303 void
1304 ufsdirhash_uninit()
1305 {
1306 	KASSERT(TAILQ_EMPTY(&ufsdirhash_list), ("ufsdirhash_uninit"));
1307 	uma_zdestroy(ufsdirhash_zone);
1308 	mtx_destroy(&ufsdirhash_mtx);
1309 }
1310 
1311 #endif /* UFS_DIRHASH */
1312