xref: /freebsd/sys/dev/md/md.c (revision 2be1a816)
1 /*-
2  * ----------------------------------------------------------------------------
3  * "THE BEER-WARE LICENSE" (Revision 42):
4  * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
5  * can do whatever you want with this stuff. If we meet some day, and you think
6  * this stuff is worth it, you can buy me a beer in return.   Poul-Henning Kamp
7  * ----------------------------------------------------------------------------
8  *
9  * $FreeBSD$
10  *
11  */
12 
13 /*-
14  * The following functions are based in the vn(4) driver: mdstart_swap(),
15  * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
16  * and as such under the following copyright:
17  *
18  * Copyright (c) 1988 University of Utah.
19  * Copyright (c) 1990, 1993
20  *	The Regents of the University of California.  All rights reserved.
21  *
22  * This code is derived from software contributed to Berkeley by
23  * the Systems Programming Group of the University of Utah Computer
24  * Science Department.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  * 1. Redistributions of source code must retain the above copyright
30  *    notice, this list of conditions and the following disclaimer.
31  * 2. Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  * 4. Neither the name of the University nor the names of its contributors
35  *    may be used to endorse or promote products derived from this software
36  *    without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48  * SUCH DAMAGE.
49  *
50  * from: Utah Hdr: vn.c 1.13 94/04/02
51  *
52  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
53  * From: src/sys/dev/vn/vn.c,v 1.122 2000/12/16 16:06:03
54  */
55 
56 #include "opt_geom.h"
57 #include "opt_md.h"
58 
59 #include <sys/param.h>
60 #include <sys/systm.h>
61 #include <sys/bio.h>
62 #include <sys/conf.h>
63 #include <sys/devicestat.h>
64 #include <sys/fcntl.h>
65 #include <sys/kernel.h>
66 #include <sys/kthread.h>
67 #include <sys/linker.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mdioctl.h>
71 #include <sys/mount.h>
72 #include <sys/mutex.h>
73 #include <sys/sx.h>
74 #include <sys/namei.h>
75 #include <sys/proc.h>
76 #include <sys/queue.h>
77 #include <sys/sched.h>
78 #include <sys/sf_buf.h>
79 #include <sys/sysctl.h>
80 #include <sys/vnode.h>
81 
82 #include <geom/geom.h>
83 
84 #include <vm/vm.h>
85 #include <vm/vm_object.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pager.h>
88 #include <vm/swap_pager.h>
89 #include <vm/uma.h>
90 
91 #define MD_MODVER 1
92 
93 #define MD_SHUTDOWN	0x10000		/* Tell worker thread to terminate. */
94 #define	MD_EXITING	0x20000		/* Worker thread is exiting. */
95 
96 #ifndef MD_NSECT
97 #define MD_NSECT (10000 * 2)
98 #endif
99 
100 static MALLOC_DEFINE(M_MD, "md_disk", "Memory Disk");
101 static MALLOC_DEFINE(M_MDSECT, "md_sectors", "Memory Disk Sectors");
102 
103 static int md_debug;
104 SYSCTL_INT(_debug, OID_AUTO, mddebug, CTLFLAG_RW, &md_debug, 0, "");
105 
106 #if defined(MD_ROOT) && defined(MD_ROOT_SIZE)
107 /*
108  * Preloaded image gets put here.
109  * Applications that patch the object with the image can determine
110  * the size looking at the start and end markers (strings),
111  * so we want them contiguous.
112  */
113 static struct {
114 	u_char start[MD_ROOT_SIZE*1024];
115 	u_char end[128];
116 } mfs_root = {
117 	.start = "MFS Filesystem goes here",
118 	.end = "MFS Filesystem had better STOP here",
119 };
120 #endif
121 
122 static g_init_t g_md_init;
123 static g_fini_t g_md_fini;
124 static g_start_t g_md_start;
125 static g_access_t g_md_access;
126 static void g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
127     struct g_consumer *cp __unused, struct g_provider *pp);
128 
129 static int	mdunits;
130 static struct cdev *status_dev = 0;
131 static struct sx md_sx;
132 
133 static d_ioctl_t mdctlioctl;
134 
135 static struct cdevsw mdctl_cdevsw = {
136 	.d_version =	D_VERSION,
137 	.d_ioctl =	mdctlioctl,
138 	.d_name =	MD_NAME,
139 };
140 
141 struct g_class g_md_class = {
142 	.name = "MD",
143 	.version = G_VERSION,
144 	.init = g_md_init,
145 	.fini = g_md_fini,
146 	.start = g_md_start,
147 	.access = g_md_access,
148 	.dumpconf = g_md_dumpconf,
149 };
150 
151 DECLARE_GEOM_CLASS(g_md_class, g_md);
152 
153 
154 static LIST_HEAD(, md_s) md_softc_list = LIST_HEAD_INITIALIZER(&md_softc_list);
155 
156 #define NINDIR	(PAGE_SIZE / sizeof(uintptr_t))
157 #define NMASK	(NINDIR-1)
158 static int nshift;
159 
160 struct indir {
161 	uintptr_t	*array;
162 	u_int		total;
163 	u_int		used;
164 	u_int		shift;
165 };
166 
167 struct md_s {
168 	int unit;
169 	LIST_ENTRY(md_s) list;
170 	struct bio_queue_head bio_queue;
171 	struct mtx queue_mtx;
172 	struct cdev *dev;
173 	enum md_types type;
174 	off_t mediasize;
175 	unsigned sectorsize;
176 	unsigned opencount;
177 	unsigned fwheads;
178 	unsigned fwsectors;
179 	unsigned flags;
180 	char name[20];
181 	struct proc *procp;
182 	struct g_geom *gp;
183 	struct g_provider *pp;
184 	int (*start)(struct md_s *sc, struct bio *bp);
185 	struct devstat *devstat;
186 
187 	/* MD_MALLOC related fields */
188 	struct indir *indir;
189 	uma_zone_t uma;
190 
191 	/* MD_PRELOAD related fields */
192 	u_char *pl_ptr;
193 	size_t pl_len;
194 
195 	/* MD_VNODE related fields */
196 	struct vnode *vnode;
197 	char file[PATH_MAX];
198 	struct ucred *cred;
199 
200 	/* MD_SWAP related fields */
201 	vm_object_t object;
202 };
203 
204 static struct indir *
205 new_indir(u_int shift)
206 {
207 	struct indir *ip;
208 
209 	ip = malloc(sizeof *ip, M_MD, M_NOWAIT | M_ZERO);
210 	if (ip == NULL)
211 		return (NULL);
212 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
213 	    M_MDSECT, M_NOWAIT | M_ZERO);
214 	if (ip->array == NULL) {
215 		free(ip, M_MD);
216 		return (NULL);
217 	}
218 	ip->total = NINDIR;
219 	ip->shift = shift;
220 	return (ip);
221 }
222 
223 static void
224 del_indir(struct indir *ip)
225 {
226 
227 	free(ip->array, M_MDSECT);
228 	free(ip, M_MD);
229 }
230 
231 static void
232 destroy_indir(struct md_s *sc, struct indir *ip)
233 {
234 	int i;
235 
236 	for (i = 0; i < NINDIR; i++) {
237 		if (!ip->array[i])
238 			continue;
239 		if (ip->shift)
240 			destroy_indir(sc, (struct indir*)(ip->array[i]));
241 		else if (ip->array[i] > 255)
242 			uma_zfree(sc->uma, (void *)(ip->array[i]));
243 	}
244 	del_indir(ip);
245 }
246 
247 /*
248  * This function does the math and allocates the top level "indir" structure
249  * for a device of "size" sectors.
250  */
251 
252 static struct indir *
253 dimension(off_t size)
254 {
255 	off_t rcnt;
256 	struct indir *ip;
257 	int i, layer;
258 
259 	rcnt = size;
260 	layer = 0;
261 	while (rcnt > NINDIR) {
262 		rcnt /= NINDIR;
263 		layer++;
264 	}
265 	/* figure out log2(NINDIR) */
266 	for (i = NINDIR, nshift = -1; i; nshift++)
267 		i >>= 1;
268 
269 	/*
270 	 * XXX: the top layer is probably not fully populated, so we allocate
271 	 * too much space for ip->array in here.
272 	 */
273 	ip = malloc(sizeof *ip, M_MD, M_WAITOK | M_ZERO);
274 	ip->array = malloc(sizeof(uintptr_t) * NINDIR,
275 	    M_MDSECT, M_WAITOK | M_ZERO);
276 	ip->total = NINDIR;
277 	ip->shift = layer * nshift;
278 	return (ip);
279 }
280 
281 /*
282  * Read a given sector
283  */
284 
285 static uintptr_t
286 s_read(struct indir *ip, off_t offset)
287 {
288 	struct indir *cip;
289 	int idx;
290 	uintptr_t up;
291 
292 	if (md_debug > 1)
293 		printf("s_read(%jd)\n", (intmax_t)offset);
294 	up = 0;
295 	for (cip = ip; cip != NULL;) {
296 		if (cip->shift) {
297 			idx = (offset >> cip->shift) & NMASK;
298 			up = cip->array[idx];
299 			cip = (struct indir *)up;
300 			continue;
301 		}
302 		idx = offset & NMASK;
303 		return (cip->array[idx]);
304 	}
305 	return (0);
306 }
307 
308 /*
309  * Write a given sector, prune the tree if the value is 0
310  */
311 
312 static int
313 s_write(struct indir *ip, off_t offset, uintptr_t ptr)
314 {
315 	struct indir *cip, *lip[10];
316 	int idx, li;
317 	uintptr_t up;
318 
319 	if (md_debug > 1)
320 		printf("s_write(%jd, %p)\n", (intmax_t)offset, (void *)ptr);
321 	up = 0;
322 	li = 0;
323 	cip = ip;
324 	for (;;) {
325 		lip[li++] = cip;
326 		if (cip->shift) {
327 			idx = (offset >> cip->shift) & NMASK;
328 			up = cip->array[idx];
329 			if (up != 0) {
330 				cip = (struct indir *)up;
331 				continue;
332 			}
333 			/* Allocate branch */
334 			cip->array[idx] =
335 			    (uintptr_t)new_indir(cip->shift - nshift);
336 			if (cip->array[idx] == 0)
337 				return (ENOSPC);
338 			cip->used++;
339 			up = cip->array[idx];
340 			cip = (struct indir *)up;
341 			continue;
342 		}
343 		/* leafnode */
344 		idx = offset & NMASK;
345 		up = cip->array[idx];
346 		if (up != 0)
347 			cip->used--;
348 		cip->array[idx] = ptr;
349 		if (ptr != 0)
350 			cip->used++;
351 		break;
352 	}
353 	if (cip->used != 0 || li == 1)
354 		return (0);
355 	li--;
356 	while (cip->used == 0 && cip != ip) {
357 		li--;
358 		idx = (offset >> lip[li]->shift) & NMASK;
359 		up = lip[li]->array[idx];
360 		KASSERT(up == (uintptr_t)cip, ("md screwed up"));
361 		del_indir(cip);
362 		lip[li]->array[idx] = 0;
363 		lip[li]->used--;
364 		cip = lip[li];
365 	}
366 	return (0);
367 }
368 
369 
370 static int
371 g_md_access(struct g_provider *pp, int r, int w, int e)
372 {
373 	struct md_s *sc;
374 
375 	sc = pp->geom->softc;
376 	if (sc == NULL)
377 		return (ENXIO);
378 	r += pp->acr;
379 	w += pp->acw;
380 	e += pp->ace;
381 	if ((sc->flags & MD_READONLY) != 0 && w > 0)
382 		return (EROFS);
383 	if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) {
384 		sc->opencount = 1;
385 	} else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) {
386 		sc->opencount = 0;
387 	}
388 	return (0);
389 }
390 
391 static void
392 g_md_start(struct bio *bp)
393 {
394 	struct md_s *sc;
395 
396 	sc = bp->bio_to->geom->softc;
397 	if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
398 		devstat_start_transaction_bio(sc->devstat, bp);
399 	mtx_lock(&sc->queue_mtx);
400 	bioq_disksort(&sc->bio_queue, bp);
401 	mtx_unlock(&sc->queue_mtx);
402 	wakeup(sc);
403 }
404 
405 static int
406 mdstart_malloc(struct md_s *sc, struct bio *bp)
407 {
408 	int i, error;
409 	u_char *dst;
410 	off_t secno, nsec, uc;
411 	uintptr_t sp, osp;
412 
413 	switch (bp->bio_cmd) {
414 	case BIO_READ:
415 	case BIO_WRITE:
416 	case BIO_DELETE:
417 		break;
418 	default:
419 		return (EOPNOTSUPP);
420 	}
421 
422 	nsec = bp->bio_length / sc->sectorsize;
423 	secno = bp->bio_offset / sc->sectorsize;
424 	dst = bp->bio_data;
425 	error = 0;
426 	while (nsec--) {
427 		osp = s_read(sc->indir, secno);
428 		if (bp->bio_cmd == BIO_DELETE) {
429 			if (osp != 0)
430 				error = s_write(sc->indir, secno, 0);
431 		} else if (bp->bio_cmd == BIO_READ) {
432 			if (osp == 0)
433 				bzero(dst, sc->sectorsize);
434 			else if (osp <= 255)
435 				for (i = 0; i < sc->sectorsize; i++)
436 					dst[i] = osp;
437 			else
438 				bcopy((void *)osp, dst, sc->sectorsize);
439 			osp = 0;
440 		} else if (bp->bio_cmd == BIO_WRITE) {
441 			if (sc->flags & MD_COMPRESS) {
442 				uc = dst[0];
443 				for (i = 1; i < sc->sectorsize; i++)
444 					if (dst[i] != uc)
445 						break;
446 			} else {
447 				i = 0;
448 				uc = 0;
449 			}
450 			if (i == sc->sectorsize) {
451 				if (osp != uc)
452 					error = s_write(sc->indir, secno, uc);
453 			} else {
454 				if (osp <= 255) {
455 					sp = (uintptr_t)uma_zalloc(sc->uma,
456 					    M_NOWAIT);
457 					if (sp == 0) {
458 						error = ENOSPC;
459 						break;
460 					}
461 					bcopy(dst, (void *)sp, sc->sectorsize);
462 					error = s_write(sc->indir, secno, sp);
463 				} else {
464 					bcopy(dst, (void *)osp, sc->sectorsize);
465 					osp = 0;
466 				}
467 			}
468 		} else {
469 			error = EOPNOTSUPP;
470 		}
471 		if (osp > 255)
472 			uma_zfree(sc->uma, (void*)osp);
473 		if (error != 0)
474 			break;
475 		secno++;
476 		dst += sc->sectorsize;
477 	}
478 	bp->bio_resid = 0;
479 	return (error);
480 }
481 
482 static int
483 mdstart_preload(struct md_s *sc, struct bio *bp)
484 {
485 
486 	switch (bp->bio_cmd) {
487 	case BIO_READ:
488 		bcopy(sc->pl_ptr + bp->bio_offset, bp->bio_data,
489 		    bp->bio_length);
490 		break;
491 	case BIO_WRITE:
492 		bcopy(bp->bio_data, sc->pl_ptr + bp->bio_offset,
493 		    bp->bio_length);
494 		break;
495 	}
496 	bp->bio_resid = 0;
497 	return (0);
498 }
499 
500 static int
501 mdstart_vnode(struct md_s *sc, struct bio *bp)
502 {
503 	int error, vfslocked;
504 	struct uio auio;
505 	struct iovec aiov;
506 	struct mount *mp;
507 	struct vnode *vp;
508 	struct thread *td;
509 
510 	switch (bp->bio_cmd) {
511 	case BIO_READ:
512 	case BIO_WRITE:
513 	case BIO_FLUSH:
514 		break;
515 	default:
516 		return (EOPNOTSUPP);
517 	}
518 
519 	td = curthread;
520 	vp = sc->vnode;
521 
522 	/*
523 	 * VNODE I/O
524 	 *
525 	 * If an error occurs, we set BIO_ERROR but we do not set
526 	 * B_INVAL because (for a write anyway), the buffer is
527 	 * still valid.
528 	 */
529 
530 	if (bp->bio_cmd == BIO_FLUSH) {
531 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
532 		(void) vn_start_write(vp, &mp, V_WAIT);
533 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
534 		error = VOP_FSYNC(vp, MNT_WAIT, td);
535 		VOP_UNLOCK(vp, 0);
536 		vn_finished_write(mp);
537 		VFS_UNLOCK_GIANT(vfslocked);
538 		return (error);
539 	}
540 
541 	bzero(&auio, sizeof(auio));
542 
543 	aiov.iov_base = bp->bio_data;
544 	aiov.iov_len = bp->bio_length;
545 	auio.uio_iov = &aiov;
546 	auio.uio_iovcnt = 1;
547 	auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
548 	auio.uio_segflg = UIO_SYSSPACE;
549 	if (bp->bio_cmd == BIO_READ)
550 		auio.uio_rw = UIO_READ;
551 	else if (bp->bio_cmd == BIO_WRITE)
552 		auio.uio_rw = UIO_WRITE;
553 	else
554 		panic("wrong BIO_OP in mdstart_vnode");
555 	auio.uio_resid = bp->bio_length;
556 	auio.uio_td = td;
557 	/*
558 	 * When reading set IO_DIRECT to try to avoid double-caching
559 	 * the data.  When writing IO_DIRECT is not optimal.
560 	 */
561 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
562 	if (bp->bio_cmd == BIO_READ) {
563 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
564 		error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
565 		VOP_UNLOCK(vp, 0);
566 	} else {
567 		(void) vn_start_write(vp, &mp, V_WAIT);
568 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
569 		error = VOP_WRITE(vp, &auio, sc->flags & MD_ASYNC ? 0 : IO_SYNC,
570 		    sc->cred);
571 		VOP_UNLOCK(vp, 0);
572 		vn_finished_write(mp);
573 	}
574 	VFS_UNLOCK_GIANT(vfslocked);
575 	bp->bio_resid = auio.uio_resid;
576 	return (error);
577 }
578 
579 static int
580 mdstart_swap(struct md_s *sc, struct bio *bp)
581 {
582 	struct sf_buf *sf;
583 	int rv, offs, len, lastend;
584 	vm_pindex_t i, lastp;
585 	vm_page_t m;
586 	u_char *p;
587 
588 	switch (bp->bio_cmd) {
589 	case BIO_READ:
590 	case BIO_WRITE:
591 	case BIO_DELETE:
592 		break;
593 	default:
594 		return (EOPNOTSUPP);
595 	}
596 
597 	p = bp->bio_data;
598 
599 	/*
600 	 * offs is the offset at which to start operating on the
601 	 * next (ie, first) page.  lastp is the last page on
602 	 * which we're going to operate.  lastend is the ending
603 	 * position within that last page (ie, PAGE_SIZE if
604 	 * we're operating on complete aligned pages).
605 	 */
606 	offs = bp->bio_offset % PAGE_SIZE;
607 	lastp = (bp->bio_offset + bp->bio_length - 1) / PAGE_SIZE;
608 	lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
609 
610 	rv = VM_PAGER_OK;
611 	VM_OBJECT_LOCK(sc->object);
612 	vm_object_pip_add(sc->object, 1);
613 	for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
614 		len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
615 
616 		m = vm_page_grab(sc->object, i,
617 		    VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
618 		VM_OBJECT_UNLOCK(sc->object);
619 		sched_pin();
620 		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
621 		VM_OBJECT_LOCK(sc->object);
622 		if (bp->bio_cmd == BIO_READ) {
623 			if (m->valid != VM_PAGE_BITS_ALL)
624 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
625 			if (rv == VM_PAGER_ERROR) {
626 				sf_buf_free(sf);
627 				sched_unpin();
628 				vm_page_lock_queues();
629 				vm_page_wakeup(m);
630 				vm_page_unlock_queues();
631 				break;
632 			}
633 			bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
634 		} else if (bp->bio_cmd == BIO_WRITE) {
635 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
636 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
637 			if (rv == VM_PAGER_ERROR) {
638 				sf_buf_free(sf);
639 				sched_unpin();
640 				vm_page_lock_queues();
641 				vm_page_wakeup(m);
642 				vm_page_unlock_queues();
643 				break;
644 			}
645 			bcopy(p, (void *)(sf_buf_kva(sf) + offs), len);
646 			m->valid = VM_PAGE_BITS_ALL;
647 #if 0
648 		} else if (bp->bio_cmd == BIO_DELETE) {
649 			if (len != PAGE_SIZE && m->valid != VM_PAGE_BITS_ALL)
650 				rv = vm_pager_get_pages(sc->object, &m, 1, 0);
651 			if (rv == VM_PAGER_ERROR) {
652 				sf_buf_free(sf);
653 				sched_unpin();
654 				vm_page_lock_queues();
655 				vm_page_wakeup(m);
656 				vm_page_unlock_queues();
657 				break;
658 			}
659 			bzero((void *)(sf_buf_kva(sf) + offs), len);
660 			vm_page_dirty(m);
661 			m->valid = VM_PAGE_BITS_ALL;
662 #endif
663 		}
664 		sf_buf_free(sf);
665 		sched_unpin();
666 		vm_page_lock_queues();
667 		vm_page_wakeup(m);
668 		vm_page_activate(m);
669 		if (bp->bio_cmd == BIO_WRITE)
670 			vm_page_dirty(m);
671 		vm_page_unlock_queues();
672 
673 		/* Actions on further pages start at offset 0 */
674 		p += PAGE_SIZE - offs;
675 		offs = 0;
676 #if 0
677 if (bootverbose || bp->bio_offset / PAGE_SIZE < 17)
678 printf("wire_count %d busy %d flags %x hold_count %d act_count %d queue %d valid %d dirty %d @ %d\n",
679     m->wire_count, m->busy,
680     m->flags, m->hold_count, m->act_count, m->queue, m->valid, m->dirty, i);
681 #endif
682 	}
683 	vm_object_pip_subtract(sc->object, 1);
684 	vm_object_set_writeable_dirty(sc->object);
685 	VM_OBJECT_UNLOCK(sc->object);
686 	return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
687 }
688 
689 static void
690 md_kthread(void *arg)
691 {
692 	struct md_s *sc;
693 	struct bio *bp;
694 	int error;
695 
696 	sc = arg;
697 	thread_lock(curthread);
698 	sched_prio(curthread, PRIBIO);
699 	thread_unlock(curthread);
700 	if (sc->type == MD_VNODE)
701 		curthread->td_pflags |= TDP_NORUNNINGBUF;
702 
703 	for (;;) {
704 		mtx_lock(&sc->queue_mtx);
705 		if (sc->flags & MD_SHUTDOWN) {
706 			sc->flags |= MD_EXITING;
707 			mtx_unlock(&sc->queue_mtx);
708 			kproc_exit(0);
709 		}
710 		bp = bioq_takefirst(&sc->bio_queue);
711 		if (!bp) {
712 			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP, "mdwait", 0);
713 			continue;
714 		}
715 		mtx_unlock(&sc->queue_mtx);
716 		if (bp->bio_cmd == BIO_GETATTR) {
717 			if (sc->fwsectors && sc->fwheads &&
718 			    (g_handleattr_int(bp, "GEOM::fwsectors",
719 			    sc->fwsectors) ||
720 			    g_handleattr_int(bp, "GEOM::fwheads",
721 			    sc->fwheads)))
722 				error = -1;
723 			else
724 				error = EOPNOTSUPP;
725 		} else {
726 			error = sc->start(sc, bp);
727 		}
728 
729 		if (error != -1) {
730 			bp->bio_completed = bp->bio_length;
731 			g_io_deliver(bp, error);
732 			if ((bp->bio_cmd == BIO_READ) || (bp->bio_cmd == BIO_WRITE))
733 				devstat_end_transaction_bio(sc->devstat, bp);
734 		}
735 	}
736 }
737 
738 static struct md_s *
739 mdfind(int unit)
740 {
741 	struct md_s *sc;
742 
743 	LIST_FOREACH(sc, &md_softc_list, list) {
744 		if (sc->unit == unit)
745 			break;
746 	}
747 	return (sc);
748 }
749 
750 static struct md_s *
751 mdnew(int unit, int *errp, enum md_types type)
752 {
753 	struct md_s *sc, *sc2;
754 	int error, max = -1;
755 
756 	*errp = 0;
757 	LIST_FOREACH(sc2, &md_softc_list, list) {
758 		if (unit == sc2->unit) {
759 			*errp = EBUSY;
760 			return (NULL);
761 		}
762 		if (unit == -1 && sc2->unit > max)
763 			max = sc2->unit;
764 	}
765 	if (unit == -1)
766 		unit = max + 1;
767 	sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
768 	sc->type = type;
769 	bioq_init(&sc->bio_queue);
770 	mtx_init(&sc->queue_mtx, "md bio queue", NULL, MTX_DEF);
771 	sc->unit = unit;
772 	sprintf(sc->name, "md%d", unit);
773 	LIST_INSERT_HEAD(&md_softc_list, sc, list);
774 	error = kproc_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
775 	if (error == 0)
776 		return (sc);
777 	LIST_REMOVE(sc, list);
778 	mtx_destroy(&sc->queue_mtx);
779 	free(sc, M_MD);
780 	*errp = error;
781 	return (NULL);
782 }
783 
784 static void
785 mdinit(struct md_s *sc)
786 {
787 
788 	struct g_geom *gp;
789 	struct g_provider *pp;
790 
791 	g_topology_lock();
792 	gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
793 	gp->softc = sc;
794 	pp = g_new_providerf(gp, "md%d", sc->unit);
795 	pp->mediasize = sc->mediasize;
796 	pp->sectorsize = sc->sectorsize;
797 	sc->gp = gp;
798 	sc->pp = pp;
799 	g_error_provider(pp, 0);
800 	g_topology_unlock();
801 	sc->devstat = devstat_new_entry("md", sc->unit, sc->sectorsize,
802 	    DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX);
803 }
804 
805 /*
806  * XXX: we should check that the range they feed us is mapped.
807  * XXX: we should implement read-only.
808  */
809 
810 static int
811 mdcreate_preload(struct md_s *sc, struct md_ioctl *mdio)
812 {
813 
814 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_FORCE))
815 		return (EINVAL);
816 	sc->flags = mdio->md_options & MD_FORCE;
817 	/* Cast to pointer size, then to pointer to avoid warning */
818 	sc->pl_ptr = (u_char *)(uintptr_t)mdio->md_base;
819 	sc->pl_len = (size_t)sc->mediasize;
820 	return (0);
821 }
822 
823 
824 static int
825 mdcreate_malloc(struct md_s *sc, struct md_ioctl *mdio)
826 {
827 	uintptr_t sp;
828 	int error;
829 	off_t u;
830 
831 	error = 0;
832 	if (mdio->md_options & ~(MD_AUTOUNIT | MD_COMPRESS | MD_RESERVE))
833 		return (EINVAL);
834 	if (mdio->md_sectorsize != 0 && !powerof2(mdio->md_sectorsize))
835 		return (EINVAL);
836 	/* Compression doesn't make sense if we have reserved space */
837 	if (mdio->md_options & MD_RESERVE)
838 		mdio->md_options &= ~MD_COMPRESS;
839 	if (mdio->md_fwsectors != 0)
840 		sc->fwsectors = mdio->md_fwsectors;
841 	if (mdio->md_fwheads != 0)
842 		sc->fwheads = mdio->md_fwheads;
843 	sc->flags = mdio->md_options & (MD_COMPRESS | MD_FORCE);
844 	sc->indir = dimension(sc->mediasize / sc->sectorsize);
845 	sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
846 	    0x1ff, 0);
847 	if (mdio->md_options & MD_RESERVE) {
848 		off_t nsectors;
849 
850 		nsectors = sc->mediasize / sc->sectorsize;
851 		for (u = 0; u < nsectors; u++) {
852 			sp = (uintptr_t)uma_zalloc(sc->uma, M_NOWAIT | M_ZERO);
853 			if (sp != 0)
854 				error = s_write(sc->indir, u, sp);
855 			else
856 				error = ENOMEM;
857 			if (error != 0)
858 				break;
859 		}
860 	}
861 	return (error);
862 }
863 
864 
865 static int
866 mdsetcred(struct md_s *sc, struct ucred *cred)
867 {
868 	char *tmpbuf;
869 	int error = 0;
870 
871 	/*
872 	 * Set credits in our softc
873 	 */
874 
875 	if (sc->cred)
876 		crfree(sc->cred);
877 	sc->cred = crhold(cred);
878 
879 	/*
880 	 * Horrible kludge to establish credentials for NFS  XXX.
881 	 */
882 
883 	if (sc->vnode) {
884 		struct uio auio;
885 		struct iovec aiov;
886 
887 		tmpbuf = malloc(sc->sectorsize, M_TEMP, M_WAITOK);
888 		bzero(&auio, sizeof(auio));
889 
890 		aiov.iov_base = tmpbuf;
891 		aiov.iov_len = sc->sectorsize;
892 		auio.uio_iov = &aiov;
893 		auio.uio_iovcnt = 1;
894 		auio.uio_offset = 0;
895 		auio.uio_rw = UIO_READ;
896 		auio.uio_segflg = UIO_SYSSPACE;
897 		auio.uio_resid = aiov.iov_len;
898 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
899 		error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
900 		VOP_UNLOCK(sc->vnode, 0);
901 		free(tmpbuf, M_TEMP);
902 	}
903 	return (error);
904 }
905 
906 static int
907 mdcreate_vnode(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
908 {
909 	struct vattr vattr;
910 	struct nameidata nd;
911 	int error, flags, vfslocked;
912 
913 	error = copyinstr(mdio->md_file, sc->file, sizeof(sc->file), NULL);
914 	if (error != 0)
915 		return (error);
916 	flags = FREAD|FWRITE;
917 	/*
918 	 * If the user specified that this is a read only device, unset the
919 	 * FWRITE mask before trying to open the backing store.
920 	 */
921 	if ((mdio->md_options & MD_READONLY) != 0)
922 		flags &= ~FWRITE;
923 	NDINIT(&nd, LOOKUP, FOLLOW | MPSAFE, UIO_SYSSPACE, sc->file, td);
924 	error = vn_open(&nd, &flags, 0, NULL);
925 	if (error != 0)
926 		return (error);
927 	vfslocked = NDHASGIANT(&nd);
928 	NDFREE(&nd, NDF_ONLY_PNBUF);
929 	if (nd.ni_vp->v_type != VREG ||
930 	    (error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred, td))) {
931 		VOP_UNLOCK(nd.ni_vp, 0);
932 		(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
933 		VFS_UNLOCK_GIANT(vfslocked);
934 		return (error ? error : EINVAL);
935 	}
936 	nd.ni_vp->v_vflag |= VV_MD;
937 	VOP_UNLOCK(nd.ni_vp, 0);
938 
939 	if (mdio->md_fwsectors != 0)
940 		sc->fwsectors = mdio->md_fwsectors;
941 	if (mdio->md_fwheads != 0)
942 		sc->fwheads = mdio->md_fwheads;
943 	sc->flags = mdio->md_options & (MD_FORCE | MD_ASYNC);
944 	if (!(flags & FWRITE))
945 		sc->flags |= MD_READONLY;
946 	sc->vnode = nd.ni_vp;
947 
948 	error = mdsetcred(sc, td->td_ucred);
949 	if (error != 0) {
950 		sc->vnode = NULL;
951 		vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
952 		nd.ni_vp->v_vflag &= ~VV_MD;
953 		VOP_UNLOCK(nd.ni_vp, 0);
954 		(void)vn_close(nd.ni_vp, flags, td->td_ucred, td);
955 		VFS_UNLOCK_GIANT(vfslocked);
956 		return (error);
957 	}
958 	VFS_UNLOCK_GIANT(vfslocked);
959 	return (0);
960 }
961 
962 static int
963 mddestroy(struct md_s *sc, struct thread *td)
964 {
965 	int vfslocked;
966 
967 	if (sc->gp) {
968 		sc->gp->softc = NULL;
969 		g_topology_lock();
970 		g_wither_geom(sc->gp, ENXIO);
971 		g_topology_unlock();
972 		sc->gp = NULL;
973 		sc->pp = NULL;
974 	}
975 	if (sc->devstat) {
976 		devstat_remove_entry(sc->devstat);
977 		sc->devstat = NULL;
978 	}
979 	mtx_lock(&sc->queue_mtx);
980 	sc->flags |= MD_SHUTDOWN;
981 	wakeup(sc);
982 	while (!(sc->flags & MD_EXITING))
983 		msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
984 	mtx_unlock(&sc->queue_mtx);
985 	mtx_destroy(&sc->queue_mtx);
986 	if (sc->vnode != NULL) {
987 		vfslocked = VFS_LOCK_GIANT(sc->vnode->v_mount);
988 		vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
989 		sc->vnode->v_vflag &= ~VV_MD;
990 		VOP_UNLOCK(sc->vnode, 0);
991 		(void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
992 		    FREAD : (FREAD|FWRITE), sc->cred, td);
993 		VFS_UNLOCK_GIANT(vfslocked);
994 	}
995 	if (sc->cred != NULL)
996 		crfree(sc->cred);
997 	if (sc->object != NULL)
998 		vm_object_deallocate(sc->object);
999 	if (sc->indir)
1000 		destroy_indir(sc, sc->indir);
1001 	if (sc->uma)
1002 		uma_zdestroy(sc->uma);
1003 
1004 	LIST_REMOVE(sc, list);
1005 	free(sc, M_MD);
1006 	return (0);
1007 }
1008 
1009 static int
1010 mdcreate_swap(struct md_s *sc, struct md_ioctl *mdio, struct thread *td)
1011 {
1012 	vm_ooffset_t npage;
1013 	int error;
1014 
1015 	/*
1016 	 * Range check.  Disallow negative sizes or any size less then the
1017 	 * size of a page.  Then round to a page.
1018 	 */
1019 	if (sc->mediasize == 0 || (sc->mediasize % PAGE_SIZE) != 0)
1020 		return (EDOM);
1021 
1022 	/*
1023 	 * Allocate an OBJT_SWAP object.
1024 	 *
1025 	 * Note the truncation.
1026 	 */
1027 
1028 	npage = mdio->md_mediasize / PAGE_SIZE;
1029 	if (mdio->md_fwsectors != 0)
1030 		sc->fwsectors = mdio->md_fwsectors;
1031 	if (mdio->md_fwheads != 0)
1032 		sc->fwheads = mdio->md_fwheads;
1033 	sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
1034 	    VM_PROT_DEFAULT, 0);
1035 	if (sc->object == NULL)
1036 		return (ENOMEM);
1037 	sc->flags = mdio->md_options & MD_FORCE;
1038 	if (mdio->md_options & MD_RESERVE) {
1039 		if (swap_pager_reserve(sc->object, 0, npage) < 0) {
1040 			vm_object_deallocate(sc->object);
1041 			sc->object = NULL;
1042 			return (EDOM);
1043 		}
1044 	}
1045 	error = mdsetcred(sc, td->td_ucred);
1046 	if (error != 0) {
1047 		vm_object_deallocate(sc->object);
1048 		sc->object = NULL;
1049 	}
1050 	return (error);
1051 }
1052 
1053 
1054 static int
1055 xmdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1056 {
1057 	struct md_ioctl *mdio;
1058 	struct md_s *sc;
1059 	int error, i;
1060 
1061 	if (md_debug)
1062 		printf("mdctlioctl(%s %lx %p %x %p)\n",
1063 			devtoname(dev), cmd, addr, flags, td);
1064 
1065 	mdio = (struct md_ioctl *)addr;
1066 	if (mdio->md_version != MDIOVERSION)
1067 		return (EINVAL);
1068 
1069 	/*
1070 	 * We assert the version number in the individual ioctl
1071 	 * handlers instead of out here because (a) it is possible we
1072 	 * may add another ioctl in the future which doesn't read an
1073 	 * mdio, and (b) the correct return value for an unknown ioctl
1074 	 * is ENOIOCTL, not EINVAL.
1075 	 */
1076 	error = 0;
1077 	switch (cmd) {
1078 	case MDIOCATTACH:
1079 		switch (mdio->md_type) {
1080 		case MD_MALLOC:
1081 		case MD_PRELOAD:
1082 		case MD_VNODE:
1083 		case MD_SWAP:
1084 			break;
1085 		default:
1086 			return (EINVAL);
1087 		}
1088 		if (mdio->md_options & MD_AUTOUNIT)
1089 			sc = mdnew(-1, &error, mdio->md_type);
1090 		else
1091 			sc = mdnew(mdio->md_unit, &error, mdio->md_type);
1092 		if (sc == NULL)
1093 			return (error);
1094 		if (mdio->md_options & MD_AUTOUNIT)
1095 			mdio->md_unit = sc->unit;
1096 		sc->mediasize = mdio->md_mediasize;
1097 		if (mdio->md_sectorsize == 0)
1098 			sc->sectorsize = DEV_BSIZE;
1099 		else
1100 			sc->sectorsize = mdio->md_sectorsize;
1101 		error = EDOOFUS;
1102 		switch (sc->type) {
1103 		case MD_MALLOC:
1104 			sc->start = mdstart_malloc;
1105 			error = mdcreate_malloc(sc, mdio);
1106 			break;
1107 		case MD_PRELOAD:
1108 			sc->start = mdstart_preload;
1109 			error = mdcreate_preload(sc, mdio);
1110 			break;
1111 		case MD_VNODE:
1112 			sc->start = mdstart_vnode;
1113 			error = mdcreate_vnode(sc, mdio, td);
1114 			break;
1115 		case MD_SWAP:
1116 			sc->start = mdstart_swap;
1117 			error = mdcreate_swap(sc, mdio, td);
1118 			break;
1119 		}
1120 		if (error != 0) {
1121 			mddestroy(sc, td);
1122 			return (error);
1123 		}
1124 
1125 		/* Prune off any residual fractional sector */
1126 		i = sc->mediasize % sc->sectorsize;
1127 		sc->mediasize -= i;
1128 
1129 		mdinit(sc);
1130 		return (0);
1131 	case MDIOCDETACH:
1132 		if (mdio->md_mediasize != 0 || mdio->md_options != 0)
1133 			return (EINVAL);
1134 
1135 		sc = mdfind(mdio->md_unit);
1136 		if (sc == NULL)
1137 			return (ENOENT);
1138 		if (sc->opencount != 0 && !(sc->flags & MD_FORCE))
1139 			return (EBUSY);
1140 		return (mddestroy(sc, td));
1141 	case MDIOCQUERY:
1142 		sc = mdfind(mdio->md_unit);
1143 		if (sc == NULL)
1144 			return (ENOENT);
1145 		mdio->md_type = sc->type;
1146 		mdio->md_options = sc->flags;
1147 		mdio->md_mediasize = sc->mediasize;
1148 		mdio->md_sectorsize = sc->sectorsize;
1149 		if (sc->type == MD_VNODE)
1150 			error = copyout(sc->file, mdio->md_file,
1151 			    strlen(sc->file) + 1);
1152 		return (error);
1153 	case MDIOCLIST:
1154 		i = 1;
1155 		LIST_FOREACH(sc, &md_softc_list, list) {
1156 			if (i == MDNPAD - 1)
1157 				mdio->md_pad[i] = -1;
1158 			else
1159 				mdio->md_pad[i++] = sc->unit;
1160 		}
1161 		mdio->md_pad[0] = i - 1;
1162 		return (0);
1163 	default:
1164 		return (ENOIOCTL);
1165 	};
1166 }
1167 
1168 static int
1169 mdctlioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1170 {
1171 	int error;
1172 
1173 	sx_xlock(&md_sx);
1174 	error = xmdctlioctl(dev, cmd, addr, flags, td);
1175 	sx_xunlock(&md_sx);
1176 	return (error);
1177 }
1178 
1179 static void
1180 md_preloaded(u_char *image, size_t length)
1181 {
1182 	struct md_s *sc;
1183 	int error;
1184 
1185 	sc = mdnew(-1, &error, MD_PRELOAD);
1186 	if (sc == NULL)
1187 		return;
1188 	sc->mediasize = length;
1189 	sc->sectorsize = DEV_BSIZE;
1190 	sc->pl_ptr = image;
1191 	sc->pl_len = length;
1192 	sc->start = mdstart_preload;
1193 #ifdef MD_ROOT
1194 	if (sc->unit == 0)
1195 		rootdevnames[0] = "ufs:/dev/md0";
1196 #endif
1197 	mdinit(sc);
1198 }
1199 
1200 static void
1201 g_md_init(struct g_class *mp __unused)
1202 {
1203 
1204 	caddr_t mod;
1205 	caddr_t c;
1206 	u_char *ptr, *name, *type;
1207 	unsigned len;
1208 
1209 	mod = NULL;
1210 	sx_init(&md_sx, "MD config lock");
1211 	g_topology_unlock();
1212 #ifdef MD_ROOT_SIZE
1213 	sx_xlock(&md_sx);
1214 	md_preloaded(mfs_root.start, sizeof(mfs_root.start));
1215 	sx_xunlock(&md_sx);
1216 #endif
1217 	/* XXX: are preload_* static or do they need Giant ? */
1218 	while ((mod = preload_search_next_name(mod)) != NULL) {
1219 		name = (char *)preload_search_info(mod, MODINFO_NAME);
1220 		if (name == NULL)
1221 			continue;
1222 		type = (char *)preload_search_info(mod, MODINFO_TYPE);
1223 		if (type == NULL)
1224 			continue;
1225 		if (strcmp(type, "md_image") && strcmp(type, "mfs_root"))
1226 			continue;
1227 		c = preload_search_info(mod, MODINFO_ADDR);
1228 		ptr = *(u_char **)c;
1229 		c = preload_search_info(mod, MODINFO_SIZE);
1230 		len = *(size_t *)c;
1231 		printf("%s%d: Preloaded image <%s> %d bytes at %p\n",
1232 		    MD_NAME, mdunits, name, len, ptr);
1233 		sx_xlock(&md_sx);
1234 		md_preloaded(ptr, len);
1235 		sx_xunlock(&md_sx);
1236 	}
1237 	status_dev = make_dev(&mdctl_cdevsw, MAXMINOR, UID_ROOT, GID_WHEEL,
1238 	    0600, MDCTL_NAME);
1239 	g_topology_lock();
1240 }
1241 
1242 static void
1243 g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
1244     struct g_consumer *cp __unused, struct g_provider *pp)
1245 {
1246 	struct md_s *mp;
1247 	char *type;
1248 
1249 	mp = gp->softc;
1250 	if (mp == NULL)
1251 		return;
1252 
1253 	switch (mp->type) {
1254 	case MD_MALLOC:
1255 		type = "malloc";
1256 		break;
1257 	case MD_PRELOAD:
1258 		type = "preload";
1259 		break;
1260 	case MD_VNODE:
1261 		type = "vnode";
1262 		break;
1263 	case MD_SWAP:
1264 		type = "swap";
1265 		break;
1266 	default:
1267 		type = "unknown";
1268 		break;
1269 	}
1270 
1271 	if (pp != NULL) {
1272 		if (indent == NULL) {
1273 			sbuf_printf(sb, " u %d", mp->unit);
1274 			sbuf_printf(sb, " s %ju", (uintmax_t) mp->sectorsize);
1275 			sbuf_printf(sb, " f %ju", (uintmax_t) mp->fwheads);
1276 			sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
1277 			sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
1278 			sbuf_printf(sb, " t %s", type);
1279 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1280 				sbuf_printf(sb, " file %s", mp->file);
1281 		} else {
1282 			sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
1283 			    mp->unit);
1284 			sbuf_printf(sb, "%s<sectorsize>%ju</sectorsize>\n",
1285 			    indent, (uintmax_t) mp->sectorsize);
1286 			sbuf_printf(sb, "%s<fwheads>%ju</fwheads>\n",
1287 			    indent, (uintmax_t) mp->fwheads);
1288 			sbuf_printf(sb, "%s<fwsectors>%ju</fwsectors>\n",
1289 			    indent, (uintmax_t) mp->fwsectors);
1290 			sbuf_printf(sb, "%s<length>%ju</length>\n",
1291 			    indent, (uintmax_t) mp->mediasize);
1292 			sbuf_printf(sb, "%s<type>%s</type>\n", indent,
1293 			    type);
1294 			if (mp->type == MD_VNODE && mp->vnode != NULL)
1295 				sbuf_printf(sb, "%s<file>%s</file>\n",
1296 				    indent, mp->file);
1297 		}
1298 	}
1299 }
1300 
1301 static void
1302 g_md_fini(struct g_class *mp __unused)
1303 {
1304 
1305 	sx_destroy(&md_sx);
1306 	if (status_dev != NULL)
1307 		destroy_dev(status_dev);
1308 }
1309