xref: /dragonfly/sys/dev/disk/vn/vn.c (revision 28feafc7)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * from: Utah Hdr: vn.c 1.13 94/04/02
39  *
40  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
41  * $FreeBSD: src/sys/dev/vn/vn.c,v 1.105.2.4 2001/11/18 07:11:00 dillon Exp $
42  * $DragonFly: src/sys/dev/disk/vn/vn.c,v 1.34 2007/05/17 21:08:49 dillon Exp $
43  */
44 
45 /*
46  * Vnode disk driver.
47  *
48  * Block/character interface to a vnode.  Allows one to treat a file
49  * as a disk (e.g. build a filesystem in it, mount it, etc.).
50  *
51  * NOTE 1: There is a security issue involved with this driver.
52  * Once mounted all access to the contents of the "mapped" file via
53  * the special file is controlled by the permissions on the special
54  * file, the protection of the mapped file is ignored (effectively,
55  * by using root credentials in all transactions).
56  *
57  * NOTE 2: Doesn't interact with leases, should it?
58  */
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/proc.h>
64 #include <sys/nlookup.h>
65 #include <sys/buf.h>
66 #include <sys/malloc.h>
67 #include <sys/mount.h>
68 #include <sys/vnode.h>
69 #include <sys/fcntl.h>
70 #include <sys/conf.h>
71 #include <sys/diskslice.h>
72 #include <sys/disk.h>
73 #include <sys/stat.h>
74 #include <sys/module.h>
75 #include <sys/vnioctl.h>
76 
77 #include <vm/vm.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_pager.h>
81 #include <vm/vm_pageout.h>
82 #include <vm/swap_pager.h>
83 #include <vm/vm_extern.h>
84 #include <vm/vm_zone.h>
85 
86 static	d_ioctl_t	vnioctl;
87 static	d_open_t	vnopen;
88 static	d_close_t	vnclose;
89 static	d_psize_t	vnsize;
90 static	d_strategy_t	vnstrategy;
91 
92 #define CDEV_MAJOR 43
93 
94 #define VN_BSIZE_BEST	8192
95 
96 /*
97  * dev_ops
98  *	D_DISK		we want to look like a disk
99  *	D_CANFREE	We support BUF_CMD_FREEBLKS
100  */
101 
102 static struct dev_ops vn_ops = {
103 	{ "vn", CDEV_MAJOR, D_DISK | D_CANFREE },
104 	.d_open =	vnopen,
105 	.d_close =	vnclose,
106 	.d_read =	physread,
107 	.d_write =	physwrite,
108 	.d_ioctl =	vnioctl,
109 	.d_strategy =	vnstrategy,
110 	.d_psize =	vnsize
111 };
112 
113 struct vn_softc {
114 	int		sc_unit;
115 	int		sc_flags;	/* flags 			*/
116 	u_int64_t	sc_size;	/* size of vn, sc_secsize scale	*/
117 	int		sc_secsize;	/* sector size			*/
118 	struct diskslices *sc_slices;	/* XXX fields from struct disk  */
119 	struct disk_info sc_info;	/* XXX fields from struct disk  */
120 	struct vnode	*sc_vp;		/* vnode if not NULL		*/
121 	vm_object_t	sc_object;	/* backing object if not NULL	*/
122 	struct ucred	*sc_cred;	/* credentials 			*/
123 	int		 sc_maxactive;	/* max # of active requests 	*/
124 	struct buf	 sc_tab;	/* transfer queue 		*/
125 	u_long		 sc_options;	/* options 			*/
126 	cdev_t		 sc_devlist;	/* devices that refer to this unit */
127 	SLIST_ENTRY(vn_softc) sc_list;
128 };
129 
130 static SLIST_HEAD(, vn_softc) vn_list;
131 
132 /* sc_flags */
133 #define VNF_INITED	0x01
134 #define	VNF_READONLY	0x02
135 
136 static u_long	vn_options;
137 
138 #define IFOPT(vn,opt) if (((vn)->sc_options|vn_options) & (opt))
139 #define TESTOPT(vn,opt) (((vn)->sc_options|vn_options) & (opt))
140 
141 static int	vnsetcred (struct vn_softc *vn, struct ucred *cred);
142 static void	vnclear (struct vn_softc *vn);
143 static int	vn_modevent (module_t, int, void *);
144 static int 	vniocattach_file (struct vn_softc *, struct vn_ioctl *, cdev_t dev, int flag, struct ucred *cred);
145 static int 	vniocattach_swap (struct vn_softc *, struct vn_ioctl *, cdev_t dev, int flag, struct ucred *cred);
146 
147 static	int
148 vnclose(struct dev_close_args *ap)
149 {
150 	cdev_t dev = ap->a_head.a_dev;
151 	struct vn_softc *vn = dev->si_drv1;
152 
153 	IFOPT(vn, VN_LABELS)
154 		if (vn->sc_slices != NULL)
155 			dsclose(dev, ap->a_devtype, vn->sc_slices);
156 	return (0);
157 }
158 
159 /*
160  * Called only when si_drv1 is NULL.  Locate the associated vn node and
161  * attach the device to it.
162  */
163 static struct vn_softc *
164 vnfindvn(cdev_t dev)
165 {
166 	int unit;
167 	struct vn_softc *vn;
168 
169 	unit = dkunit(dev);
170 	SLIST_FOREACH(vn, &vn_list, sc_list) {
171 		if (vn->sc_unit == unit) {
172 			dev->si_drv1 = vn;
173 			dev->si_drv2 = vn->sc_devlist;
174 			vn->sc_devlist = dev;
175 			reference_dev(dev);
176 			break;
177 		}
178 	}
179 	if (vn == NULL) {
180 		vn = kmalloc(sizeof *vn, M_DEVBUF, M_WAITOK | M_ZERO);
181 		vn->sc_unit = unit;
182 		dev->si_drv1 = vn;
183 		vn->sc_devlist = make_dev(&vn_ops, 0, UID_ROOT,
184 					GID_OPERATOR, 0640, "vn%d", unit);
185 		if (vn->sc_devlist->si_drv1 == NULL) {
186 			reference_dev(vn->sc_devlist);
187 			vn->sc_devlist->si_drv1 = vn;
188 			vn->sc_devlist->si_drv2 = NULL;
189 		}
190 		if (vn->sc_devlist != dev) {
191 			dev->si_drv1 = vn;
192 			dev->si_drv2 = vn->sc_devlist;
193 			vn->sc_devlist = dev;
194 			reference_dev(dev);
195 		}
196 		SLIST_INSERT_HEAD(&vn_list, vn, sc_list);
197 	}
198 	return (vn);
199 }
200 
201 static	int
202 vnopen(struct dev_open_args *ap)
203 {
204 	cdev_t dev = ap->a_head.a_dev;
205 	struct vn_softc *vn;
206 	struct disk_info *info;
207 
208 	/*
209 	 * Locate preexisting device
210 	 */
211 
212 	if ((vn = dev->si_drv1) == NULL)
213 		vn = vnfindvn(dev);
214 
215 	/*
216 	 * Update si_bsize fields for device.  This data will be overriden by
217 	 * the slice/parition code for vn accesses through partitions, and
218 	 * used directly if you open the 'whole disk' device.
219 	 *
220 	 * si_bsize_best must be reinitialized in case VN has been
221 	 * reconfigured, plus make it at least VN_BSIZE_BEST for efficiency.
222 	 */
223 	dev->si_bsize_phys = vn->sc_secsize;
224 	dev->si_bsize_best = vn->sc_secsize;
225 	if (dev->si_bsize_best < VN_BSIZE_BEST)
226 		dev->si_bsize_best = VN_BSIZE_BEST;
227 
228 	if ((ap->a_oflags & FWRITE) && (vn->sc_flags & VNF_READONLY))
229 		return (EACCES);
230 
231 	IFOPT(vn, VN_FOLLOW)
232 		kprintf("vnopen(%s, 0x%x, 0x%x)\n",
233 		    devtoname(dev), ap->a_oflags, ap->a_devtype);
234 
235 	/*
236 	 * Initialize label
237 	 */
238 
239 	IFOPT(vn, VN_LABELS) {
240 		if (vn->sc_flags & VNF_INITED) {
241 			info = &vn->sc_info;
242 			bzero(info, sizeof(*info));
243 			info->d_media_blksize = vn->sc_secsize;
244 			info->d_media_blocks = vn->sc_size;
245 			/*
246 			 * reserve mbr sector for backwards compatibility
247 			 * when no slices exist.
248 			 */
249 			info->d_dsflags = DSO_COMPATMBR;
250 
251 			info->d_secpertrack = 32;
252 			info->d_nheads = 64 / (vn->sc_secsize / DEV_BSIZE);
253 			info->d_secpercyl = info->d_secpertrack *
254 					    info->d_nheads;
255 			info->d_ncylinders = vn->sc_size / info->d_secpercyl;
256 
257 			return (dsopen(dev, ap->a_devtype, 0,
258 					&vn->sc_slices, info));
259 		}
260 		if (dkslice(dev) != WHOLE_DISK_SLICE ||
261 		    dkpart(dev) != RAW_PART ||
262 		    ap->a_devtype != S_IFCHR) {
263 			return (ENXIO);
264 		}
265 	}
266 	return(0);
267 }
268 
269 /*
270  *	vnstrategy:
271  *
272  *	Run strategy routine for VN device.  We use VOP_READ/VOP_WRITE calls
273  *	for vnode-backed vn's, and the new vm_pager_strategy() call for
274  *	vm_object-backed vn's.
275  *
276  *	Currently B_ASYNC is only partially handled - for OBJT_SWAP I/O only.
277  */
278 static int
279 vnstrategy(struct dev_strategy_args *ap)
280 {
281 	cdev_t dev = ap->a_head.a_dev;
282 	struct bio *bio = ap->a_bio;
283 	struct buf *bp;
284 	struct bio *nbio;
285 	int unit;
286 	struct vn_softc *vn;
287 	int error;
288 
289 	unit = dkunit(dev);
290 	if ((vn = dev->si_drv1) == NULL)
291 		vn = vnfindvn(dev);
292 
293 	bp = bio->bio_buf;
294 
295 	IFOPT(vn, VN_DEBUG)
296 		kprintf("vnstrategy(%p): unit %d\n", bp, unit);
297 
298 	if ((vn->sc_flags & VNF_INITED) == 0) {
299 		bp->b_error = ENXIO;
300 		bp->b_flags |= B_ERROR;
301 		biodone(bio);
302 		return(0);
303 	}
304 
305 	bp->b_resid = bp->b_bcount;
306 
307 	IFOPT(vn, VN_LABELS) {
308 	    	/*
309 		 * The vnode device is using disk/slice label support.
310 		 *
311 		 * The dscheck() function is called for validating the
312 		 * slices that exist ON the vnode device itself, and
313 		 * translate the "slice-relative" block number, again.
314 		 * dscheck() will call biodone() and return NULL if
315 		 * we are at EOF or beyond the device size.
316 		 */
317 		if (vn->sc_slices == NULL) {
318 			nbio = bio;
319 		} else if ((nbio = dscheck(dev, bio, vn->sc_slices)) == NULL) {
320 			goto done;
321 		}
322 	} else {
323 		int64_t pbn;	/* in sc_secsize chunks */
324 		long sz;	/* in sc_secsize chunks */
325 
326 		/*
327 		 * Check for required alignment.  Transfers must be a valid
328 		 * multiple of the sector size.
329 		 */
330 		if (bp->b_bcount % vn->sc_secsize != 0 ||
331 		    bio->bio_offset % vn->sc_secsize != 0) {
332 			goto bad;
333 		}
334 
335 		pbn = bio->bio_offset / vn->sc_secsize;
336 		sz = howmany(bp->b_bcount, vn->sc_secsize);
337 
338 		/*
339 		 * Check for an illegal pbn or EOF truncation
340 		 */
341 		if (pbn < 0)
342 			goto bad;
343 		if (pbn + sz > vn->sc_size) {
344 			if (pbn > vn->sc_size || (bp->b_flags & B_BNOCLIP))
345 				goto bad;
346 			if (pbn == vn->sc_size) {
347 				bp->b_resid = bp->b_bcount;
348 				bp->b_flags |= B_INVAL;
349 				goto done;
350 			}
351 			bp->b_bcount = (vn->sc_size - pbn) * vn->sc_secsize;
352 		}
353 		nbio = push_bio(bio);
354 		nbio->bio_offset = pbn * vn->sc_secsize;
355 	}
356 
357 	/*
358 	 * Use the translated nbio from this point on
359 	 */
360 	if (vn->sc_vp && bp->b_cmd == BUF_CMD_FREEBLKS) {
361 		/*
362 		 * Freeblks is not handled for vnode-backed elements yet.
363 		 */
364 		bp->b_resid = 0;
365 		/* operation complete */
366 	} else if (vn->sc_vp) {
367 		/*
368 		 * VNODE I/O
369 		 *
370 		 * If an error occurs, we set B_ERROR but we do not set
371 		 * B_INVAL because (for a write anyway), the buffer is
372 		 * still valid.
373 		 */
374 		struct uio auio;
375 		struct iovec aiov;
376 
377 		bzero(&auio, sizeof(auio));
378 
379 		aiov.iov_base = bp->b_data;
380 		aiov.iov_len = bp->b_bcount;
381 		auio.uio_iov = &aiov;
382 		auio.uio_iovcnt = 1;
383 		auio.uio_offset = nbio->bio_offset;
384 		auio.uio_segflg = UIO_SYSSPACE;
385 		if (bp->b_cmd == BUF_CMD_READ)
386 			auio.uio_rw = UIO_READ;
387 		else
388 			auio.uio_rw = UIO_WRITE;
389 		auio.uio_resid = bp->b_bcount;
390 		auio.uio_td = curthread;
391 		vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY);
392 		if (bp->b_cmd == BUF_CMD_READ)
393 			error = VOP_READ(vn->sc_vp, &auio, IO_DIRECT, vn->sc_cred);
394 		else
395 			error = VOP_WRITE(vn->sc_vp, &auio, IO_NOWDRAIN, vn->sc_cred);
396 		vn_unlock(vn->sc_vp);
397 		bp->b_resid = auio.uio_resid;
398 		if (error) {
399 			bp->b_error = error;
400 			bp->b_flags |= B_ERROR;
401 		}
402 		/* operation complete */
403 	} else if (vn->sc_object) {
404 		/*
405 		 * OBJT_SWAP I/O (handles read, write, freebuf)
406 		 *
407 		 * We have nothing to do if freeing  blocks on a reserved
408 		 * swap area, othrewise execute the op.
409 		 */
410 		if (bp->b_cmd == BUF_CMD_FREEBLKS && TESTOPT(vn, VN_RESERVE)) {
411 			bp->b_resid = 0;
412 			/* operation complete */
413 		} else {
414 			vm_pager_strategy(vn->sc_object, nbio);
415 			return(0);
416 			/* NOT REACHED */
417 		}
418 	} else {
419 		bp->b_resid = bp->b_bcount;
420 		bp->b_flags |= B_ERROR | B_INVAL;
421 		bp->b_error = EINVAL;
422 		/* operation complete */
423 	}
424 	biodone(nbio);
425 	return(0);
426 
427 	/*
428 	 * Shortcuts / check failures on the original bio (not nbio).
429 	 */
430 bad:
431 	bp->b_error = EINVAL;
432 	bp->b_flags |= B_ERROR | B_INVAL;
433 done:
434 	biodone(bio);
435 	return(0);
436 }
437 
438 /* ARGSUSED */
439 static	int
440 vnioctl(struct dev_ioctl_args *ap)
441 {
442 	cdev_t dev = ap->a_head.a_dev;
443 	struct vn_softc *vn;
444 	struct vn_ioctl *vio;
445 	int error;
446 	u_long *f;
447 
448 	vn = dev->si_drv1;
449 	IFOPT(vn,VN_FOLLOW) {
450 		kprintf("vnioctl(%s, 0x%lx, %p, 0x%x): unit %d\n",
451 		    devtoname(dev), ap->a_cmd, ap->a_data, ap->a_fflag,
452 		    dkunit(dev));
453 	}
454 
455 	switch (ap->a_cmd) {
456 	case VNIOCATTACH:
457 	case VNIOCDETACH:
458 	case VNIOCGSET:
459 	case VNIOCGCLEAR:
460 	case VNIOCUSET:
461 	case VNIOCUCLEAR:
462 		goto vn_specific;
463 	}
464 
465 	IFOPT(vn,VN_LABELS) {
466 		if (vn->sc_slices != NULL) {
467 			error = dsioctl(dev, ap->a_cmd, ap->a_data,
468 					ap->a_fflag,
469 					&vn->sc_slices, &vn->sc_info);
470 			if (error != ENOIOCTL)
471 				return (error);
472 		}
473 		if (dkslice(dev) != WHOLE_DISK_SLICE ||
474 		    dkpart(dev) != RAW_PART)
475 			return (ENOTTY);
476 	}
477 
478     vn_specific:
479 
480 	error = suser_cred(ap->a_cred, 0);
481 	if (error)
482 		return (error);
483 
484 	vio = (struct vn_ioctl *)ap->a_data;
485 	f = (u_long*)ap->a_data;
486 
487 	switch (ap->a_cmd) {
488 	case VNIOCATTACH:
489 		if (vn->sc_flags & VNF_INITED)
490 			return(EBUSY);
491 
492 		if (vio->vn_file == NULL)
493 			error = vniocattach_swap(vn, vio, dev, ap->a_fflag, ap->a_cred);
494 		else
495 			error = vniocattach_file(vn, vio, dev, ap->a_fflag, ap->a_cred);
496 		break;
497 
498 	case VNIOCDETACH:
499 		if ((vn->sc_flags & VNF_INITED) == 0)
500 			return(ENXIO);
501 		/*
502 		 * XXX handle i/o in progress.  Return EBUSY, or wait, or
503 		 * flush the i/o.
504 		 * XXX handle multiple opens of the device.  Return EBUSY,
505 		 * or revoke the fd's.
506 		 * How are these problems handled for removable and failing
507 		 * hardware devices? (Hint: They are not)
508 		 */
509 		vnclear(vn);
510 		IFOPT(vn, VN_FOLLOW)
511 			kprintf("vnioctl: CLRed\n");
512 		break;
513 
514 	case VNIOCGSET:
515 		vn_options |= *f;
516 		*f = vn_options;
517 		break;
518 
519 	case VNIOCGCLEAR:
520 		vn_options &= ~(*f);
521 		*f = vn_options;
522 		break;
523 
524 	case VNIOCUSET:
525 		vn->sc_options |= *f;
526 		*f = vn->sc_options;
527 		break;
528 
529 	case VNIOCUCLEAR:
530 		vn->sc_options &= ~(*f);
531 		*f = vn->sc_options;
532 		break;
533 
534 	default:
535 		error = ENOTTY;
536 		break;
537 	}
538 	return(error);
539 }
540 
541 /*
542  *	vniocattach_file:
543  *
544  *	Attach a file to a VN partition.  Return the size in the vn_size
545  *	field.
546  */
547 
548 static int
549 vniocattach_file(struct vn_softc *vn, struct vn_ioctl *vio, cdev_t dev,
550 		 int flag, struct ucred *cred)
551 {
552 	struct vattr vattr;
553 	struct nlookupdata nd;
554 	int error, flags;
555 	struct vnode *vp;
556 
557 	flags = FREAD|FWRITE;
558 	error = nlookup_init(&nd, vio->vn_file,
559 				UIO_USERSPACE, NLC_FOLLOW|NLC_LOCKVP);
560 	if (error)
561 		return (error);
562 	if ((error = vn_open(&nd, NULL, flags, 0)) != 0) {
563 		if (error != EACCES && error != EPERM && error != EROFS)
564 			goto done;
565 		flags &= ~FWRITE;
566 		nlookup_done(&nd);
567 		error = nlookup_init(&nd, vio->vn_file, UIO_USERSPACE, NLC_FOLLOW|NLC_LOCKVP);
568 		if (error)
569 			return (error);
570 		if ((error = vn_open(&nd, NULL, flags, 0)) != 0)
571 			goto done;
572 	}
573 	vp = nd.nl_open_vp;
574 	if (vp->v_type != VREG ||
575 	    (error = VOP_GETATTR(vp, &vattr))) {
576 		if (error == 0)
577 			error = EINVAL;
578 		goto done;
579 	}
580 	vn_unlock(vp);
581 	vn->sc_secsize = DEV_BSIZE;
582 	vn->sc_vp = vp;
583 	nd.nl_open_vp = NULL;
584 
585 	/*
586 	 * If the size is specified, override the file attributes.  Note that
587 	 * the vn_size argument is in PAGE_SIZE sized blocks.
588 	 */
589 	if (vio->vn_size)
590 		vn->sc_size = vio->vn_size * PAGE_SIZE / vn->sc_secsize;
591 	else
592 		vn->sc_size = vattr.va_size / vn->sc_secsize;
593 	error = vnsetcred(vn, cred);
594 	if (error) {
595 		vn->sc_vp = NULL;
596 		vn_close(vp, flags);
597 		goto done;
598 	}
599 	vn->sc_flags |= VNF_INITED;
600 	if (flags == FREAD)
601 		vn->sc_flags |= VNF_READONLY;
602 	IFOPT(vn, VN_LABELS) {
603 		/*
604 		 * Reopen so that `ds' knows which devices are open.
605 		 * If this is the first VNIOCSET, then we've
606 		 * guaranteed that the device is the cdev and that
607 		 * no other slices or labels are open.  Otherwise,
608 		 * we rely on VNIOCCLR not being abused.
609 		 */
610 		error = dev_dopen(dev, flag, S_IFCHR, cred);
611 		if (error)
612 			vnclear(vn);
613 	}
614 	IFOPT(vn, VN_FOLLOW)
615 		kprintf("vnioctl: SET vp %p size %llx blks\n",
616 		       vn->sc_vp, vn->sc_size);
617 done:
618 	nlookup_done(&nd);
619 	return(error);
620 }
621 
622 /*
623  *	vniocattach_swap:
624  *
625  *	Attach swap backing store to a VN partition of the size specified
626  *	in vn_size.
627  */
628 
629 static int
630 vniocattach_swap(struct vn_softc *vn, struct vn_ioctl *vio, cdev_t dev,
631 		 int flag, struct ucred *cred)
632 {
633 	int error;
634 
635 	/*
636 	 * Range check.  Disallow negative sizes or any size less then the
637 	 * size of a page.  Then round to a page.
638 	 */
639 
640 	if (vio->vn_size <= 0)
641 		return(EDOM);
642 
643 	/*
644 	 * Allocate an OBJT_SWAP object.
645 	 *
646 	 * sc_secsize is PAGE_SIZE'd
647 	 *
648 	 * vio->vn_size is in PAGE_SIZE'd chunks.
649 	 * sc_size must be in PAGE_SIZE'd chunks.
650 	 * Note the truncation.
651 	 */
652 
653 	vn->sc_secsize = PAGE_SIZE;
654 	vn->sc_size = vio->vn_size;
655 	vn->sc_object = vm_pager_allocate(OBJT_SWAP, NULL,
656 					  vn->sc_secsize * (off_t)vio->vn_size,
657 					  VM_PROT_DEFAULT, 0);
658 	IFOPT(vn, VN_RESERVE) {
659 		if (swap_pager_reserve(vn->sc_object, 0, vn->sc_size) < 0) {
660 			vm_pager_deallocate(vn->sc_object);
661 			vn->sc_object = NULL;
662 			return(EDOM);
663 		}
664 	}
665 	vn->sc_flags |= VNF_INITED;
666 
667 	error = vnsetcred(vn, cred);
668 	if (error == 0) {
669 		IFOPT(vn, VN_LABELS) {
670 			/*
671 			 * Reopen so that `ds' knows which devices are open.
672 			 * If this is the first VNIOCSET, then we've
673 			 * guaranteed that the device is the cdev and that
674 			 * no other slices or labels are open.  Otherwise,
675 			 * we rely on VNIOCCLR not being abused.
676 			 */
677 			error = dev_dopen(dev, flag, S_IFCHR, cred);
678 		}
679 	}
680 	if (error == 0) {
681 		IFOPT(vn, VN_FOLLOW) {
682 			kprintf("vnioctl: SET vp %p size %llx\n",
683 			       vn->sc_vp, vn->sc_size);
684 		}
685 	}
686 	if (error)
687 		vnclear(vn);
688 	return(error);
689 }
690 
691 /*
692  * Duplicate the current processes' credentials.  Since we are called only
693  * as the result of a SET ioctl and only root can do that, any future access
694  * to this "disk" is essentially as root.  Note that credentials may change
695  * if some other uid can write directly to the mapped file (NFS).
696  */
697 int
698 vnsetcred(struct vn_softc *vn, struct ucred *cred)
699 {
700 	char *tmpbuf;
701 	int error = 0;
702 
703 	/*
704 	 * Set credits in our softc
705 	 */
706 
707 	if (vn->sc_cred)
708 		crfree(vn->sc_cred);
709 	vn->sc_cred = crdup(cred);
710 
711 	/*
712 	 * Horrible kludge to establish credentials for NFS  XXX.
713 	 */
714 
715 	if (vn->sc_vp) {
716 		struct uio auio;
717 		struct iovec aiov;
718 
719 		tmpbuf = kmalloc(vn->sc_secsize, M_TEMP, M_WAITOK);
720 		bzero(&auio, sizeof(auio));
721 
722 		aiov.iov_base = tmpbuf;
723 		aiov.iov_len = vn->sc_secsize;
724 		auio.uio_iov = &aiov;
725 		auio.uio_iovcnt = 1;
726 		auio.uio_offset = 0;
727 		auio.uio_rw = UIO_READ;
728 		auio.uio_segflg = UIO_SYSSPACE;
729 		auio.uio_resid = aiov.iov_len;
730 		vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY);
731 		error = VOP_READ(vn->sc_vp, &auio, 0, vn->sc_cred);
732 		vn_unlock(vn->sc_vp);
733 		kfree(tmpbuf, M_TEMP);
734 	}
735 	return (error);
736 }
737 
738 void
739 vnclear(struct vn_softc *vn)
740 {
741 	IFOPT(vn, VN_FOLLOW)
742 		kprintf("vnclear(%p): vp=%p\n", vn, vn->sc_vp);
743 	if (vn->sc_slices != NULL)
744 		dsgone(&vn->sc_slices);
745 	vn->sc_flags &= ~VNF_INITED;
746 	if (vn->sc_vp != NULL) {
747 		vn_close(vn->sc_vp,
748 		    (vn->sc_flags & VNF_READONLY) ?  FREAD : (FREAD|FWRITE));
749 		vn->sc_vp = NULL;
750 	}
751 	vn->sc_flags &= ~VNF_READONLY;
752 	if (vn->sc_cred) {
753 		crfree(vn->sc_cred);
754 		vn->sc_cred = NULL;
755 	}
756 	if (vn->sc_object != NULL) {
757 		vm_pager_deallocate(vn->sc_object);
758 		vn->sc_object = NULL;
759 	}
760 	vn->sc_size = 0;
761 }
762 
763 static int
764 vnsize(struct dev_psize_args *ap)
765 {
766 	cdev_t dev = ap->a_head.a_dev;
767 	struct vn_softc *vn;
768 
769 	vn = dev->si_drv1;
770 	if (!vn)
771 		return(ENXIO);
772 	if ((vn->sc_flags & VNF_INITED) == 0)
773 		return(ENXIO);
774 	ap->a_result = (int64_t)vn->sc_size;
775 	return(0);
776 }
777 
778 static int
779 vn_modevent(module_t mod, int type, void *data)
780 {
781 	struct vn_softc *vn;
782 	cdev_t dev;
783 
784 	switch (type) {
785 	case MOD_LOAD:
786 		dev_ops_add(&vn_ops, 0, 0);
787 		break;
788 	case MOD_UNLOAD:
789 		/* fall through */
790 	case MOD_SHUTDOWN:
791 		while ((vn = SLIST_FIRST(&vn_list)) != NULL) {
792 			SLIST_REMOVE_HEAD(&vn_list, sc_list);
793 			if (vn->sc_flags & VNF_INITED)
794 				vnclear(vn);
795 			/* Cleanup all cdev_t's that refer to this unit */
796 			while ((dev = vn->sc_devlist) != NULL) {
797 				vn->sc_devlist = dev->si_drv2;
798 				dev->si_drv1 = dev->si_drv2 = NULL;
799 				destroy_dev(dev);
800 			}
801 			kfree(vn, M_DEVBUF);
802 		}
803 		dev_ops_remove(&vn_ops, 0, 0);
804 		break;
805 	default:
806 		break;
807 	}
808 	return 0;
809 }
810 
811 DEV_MODULE(vn, vn_modevent, 0);
812