xref: /dragonfly/sys/dev/disk/vn/vn.c (revision 685c703c)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * from: Utah Hdr: vn.c 1.13 94/04/02
39  *
40  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
41  * $FreeBSD: src/sys/dev/vn/vn.c,v 1.105.2.4 2001/11/18 07:11:00 dillon Exp $
42  * $DragonFly: src/sys/dev/disk/vn/vn.c,v 1.25 2006/07/28 02:17:35 dillon Exp $
43  */
44 
45 /*
46  * Vnode disk driver.
47  *
48  * Block/character interface to a vnode.  Allows one to treat a file
49  * as a disk (e.g. build a filesystem in it, mount it, etc.).
50  *
51  * NOTE 1: There is a security issue involved with this driver.
52  * Once mounted all access to the contents of the "mapped" file via
53  * the special file is controlled by the permissions on the special
54  * file, the protection of the mapped file is ignored (effectively,
55  * by using root credentials in all transactions).
56  *
57  * NOTE 2: Doesn't interact with leases, should it?
58  */
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/proc.h>
64 #include <sys/nlookup.h>
65 #include <sys/buf.h>
66 #include <sys/malloc.h>
67 #include <sys/mount.h>
68 #include <sys/vnode.h>
69 #include <sys/fcntl.h>
70 #include <sys/conf.h>
71 #include <sys/disklabel.h>
72 #include <sys/diskslice.h>
73 #include <sys/stat.h>
74 #include <sys/module.h>
75 #include <sys/vnioctl.h>
76 
77 #include <vm/vm.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_pager.h>
81 #include <vm/vm_pageout.h>
82 #include <vm/swap_pager.h>
83 #include <vm/vm_extern.h>
84 #include <vm/vm_zone.h>
85 
86 static	d_ioctl_t	vnioctl;
87 static	d_open_t	vnopen;
88 static	d_close_t	vnclose;
89 static	d_psize_t	vnsize;
90 static	d_strategy_t	vnstrategy;
91 
92 #define CDEV_MAJOR 43
93 
94 #define VN_BSIZE_BEST	8192
95 
96 /*
97  * dev_ops
98  *	D_DISK		we want to look like a disk
99  *	D_CANFREE	We support BUF_CMD_FREEBLKS
100  */
101 
102 static struct dev_ops vn_ops = {
103 	{ "vn", CDEV_MAJOR, D_DISK | D_CANFREE },
104 	.d_open =	vnopen,
105 	.d_close =	vnclose,
106 	.d_read =	physread,
107 	.d_write =	physwrite,
108 	.d_ioctl =	vnioctl,
109 	.d_strategy =	vnstrategy,
110 	.d_psize =	vnsize
111 };
112 
113 struct vn_softc {
114 	int		sc_unit;
115 	int		sc_flags;	/* flags 			*/
116 	int		sc_size;	/* size of vn, sc_secsize scale	*/
117 	int		sc_secsize;	/* sector size			*/
118 	struct diskslices *sc_slices;
119 	struct vnode	*sc_vp;		/* vnode if not NULL		*/
120 	vm_object_t	sc_object;	/* backing object if not NULL	*/
121 	struct ucred	*sc_cred;	/* credentials 			*/
122 	int		 sc_maxactive;	/* max # of active requests 	*/
123 	struct buf	 sc_tab;	/* transfer queue 		*/
124 	u_long		 sc_options;	/* options 			*/
125 	dev_t		 sc_devlist;	/* devices that refer to this unit */
126 	SLIST_ENTRY(vn_softc) sc_list;
127 };
128 
129 static SLIST_HEAD(, vn_softc) vn_list;
130 
131 /* sc_flags */
132 #define VNF_INITED	0x01
133 #define	VNF_READONLY	0x02
134 
135 static u_long	vn_options;
136 
137 #define IFOPT(vn,opt) if (((vn)->sc_options|vn_options) & (opt))
138 #define TESTOPT(vn,opt) (((vn)->sc_options|vn_options) & (opt))
139 
140 static int	vnsetcred (struct vn_softc *vn, struct ucred *cred);
141 static void	vnclear (struct vn_softc *vn);
142 static int	vn_modevent (module_t, int, void *);
143 static int 	vniocattach_file (struct vn_softc *, struct vn_ioctl *, dev_t dev, int flag, struct ucred *cred);
144 static int 	vniocattach_swap (struct vn_softc *, struct vn_ioctl *, dev_t dev, int flag, struct ucred *cred);
145 
146 static	int
147 vnclose(struct dev_close_args *ap)
148 {
149 	dev_t dev = ap->a_head.a_dev;
150 	struct vn_softc *vn = dev->si_drv1;
151 
152 	IFOPT(vn, VN_LABELS)
153 		if (vn->sc_slices != NULL)
154 			dsclose(dev, ap->a_devtype, vn->sc_slices);
155 	return (0);
156 }
157 
158 /*
159  * Called only when si_drv1 is NULL.  Locate the associated vn node and
160  * attach the device to it.
161  */
162 static struct vn_softc *
163 vnfindvn(dev_t dev)
164 {
165 	int unit;
166 	struct vn_softc *vn;
167 
168 	unit = dkunit(dev);
169 	SLIST_FOREACH(vn, &vn_list, sc_list) {
170 		if (vn->sc_unit == unit) {
171 			dev->si_drv1 = vn;
172 			dev->si_drv2 = vn->sc_devlist;
173 			vn->sc_devlist = dev;
174 			reference_dev(dev);
175 			break;
176 		}
177 	}
178 	if (vn == NULL) {
179 		vn = malloc(sizeof *vn, M_DEVBUF, M_WAITOK | M_ZERO);
180 		vn->sc_unit = unit;
181 		dev->si_drv1 = vn;
182 		vn->sc_devlist = make_dev(&vn_ops, 0, UID_ROOT,
183 					GID_OPERATOR, 0640, "vn%d", unit);
184 		if (vn->sc_devlist->si_drv1 == NULL) {
185 			reference_dev(vn->sc_devlist);
186 			vn->sc_devlist->si_drv1 = vn;
187 			vn->sc_devlist->si_drv2 = NULL;
188 		}
189 		if (vn->sc_devlist != dev) {
190 			dev->si_drv1 = vn;
191 			dev->si_drv2 = vn->sc_devlist;
192 			vn->sc_devlist = dev;
193 			reference_dev(dev);
194 		}
195 		SLIST_INSERT_HEAD(&vn_list, vn, sc_list);
196 	}
197 	return (vn);
198 }
199 
200 static	int
201 vnopen(struct dev_open_args *ap)
202 {
203 	dev_t dev = ap->a_head.a_dev;
204 	struct vn_softc *vn;
205 
206 	/*
207 	 * Locate preexisting device
208 	 */
209 
210 	if ((vn = dev->si_drv1) == NULL)
211 		vn = vnfindvn(dev);
212 
213 	/*
214 	 * Update si_bsize fields for device.  This data will be overriden by
215 	 * the slice/parition code for vn accesses through partitions, and
216 	 * used directly if you open the 'whole disk' device.
217 	 *
218 	 * si_bsize_best must be reinitialized in case VN has been
219 	 * reconfigured, plus make it at least VN_BSIZE_BEST for efficiency.
220 	 */
221 	dev->si_bsize_phys = vn->sc_secsize;
222 	dev->si_bsize_best = vn->sc_secsize;
223 	if (dev->si_bsize_best < VN_BSIZE_BEST)
224 		dev->si_bsize_best = VN_BSIZE_BEST;
225 
226 	if ((ap->a_oflags & FWRITE) && (vn->sc_flags & VNF_READONLY))
227 		return (EACCES);
228 
229 	IFOPT(vn, VN_FOLLOW)
230 		printf("vnopen(%s, 0x%x, 0x%x)\n",
231 		    devtoname(dev), ap->a_oflags, ap->a_devtype);
232 
233 	/*
234 	 * Initialize label
235 	 */
236 
237 	IFOPT(vn, VN_LABELS) {
238 		if (vn->sc_flags & VNF_INITED) {
239 			struct disklabel label;
240 
241 			/* Build label for whole disk. */
242 			bzero(&label, sizeof label);
243 			label.d_secsize = vn->sc_secsize;
244 			label.d_nsectors = 32;
245 			label.d_ntracks = 64 / (vn->sc_secsize / DEV_BSIZE);
246 			label.d_secpercyl = label.d_nsectors * label.d_ntracks;
247 			label.d_ncylinders = vn->sc_size / label.d_secpercyl;
248 			label.d_secperunit = vn->sc_size;
249 			label.d_partitions[RAW_PART].p_size = vn->sc_size;
250 
251 			return (dsopen(dev, ap->a_devtype, 0, &vn->sc_slices, &label));
252 		}
253 		if (dkslice(dev) != WHOLE_DISK_SLICE ||
254 		    dkpart(dev) != RAW_PART ||
255 		    ap->a_devtype != S_IFCHR) {
256 			return (ENXIO);
257 		}
258 	}
259 	return(0);
260 }
261 
262 /*
263  *	vnstrategy:
264  *
265  *	Run strategy routine for VN device.  We use VOP_READ/VOP_WRITE calls
266  *	for vnode-backed vn's, and the new vm_pager_strategy() call for
267  *	vm_object-backed vn's.
268  *
269  *	Currently B_ASYNC is only partially handled - for OBJT_SWAP I/O only.
270  */
271 static int
272 vnstrategy(struct dev_strategy_args *ap)
273 {
274 	dev_t dev = ap->a_head.a_dev;
275 	struct bio *bio = ap->a_bio;
276 	struct buf *bp;
277 	struct bio *nbio;
278 	int unit;
279 	struct vn_softc *vn;
280 	int error;
281 
282 	unit = dkunit(dev);
283 	if ((vn = dev->si_drv1) == NULL)
284 		vn = vnfindvn(dev);
285 
286 	bp = bio->bio_buf;
287 
288 	IFOPT(vn, VN_DEBUG)
289 		printf("vnstrategy(%p): unit %d\n", bp, unit);
290 
291 	if ((vn->sc_flags & VNF_INITED) == 0) {
292 		bp->b_error = ENXIO;
293 		bp->b_flags |= B_ERROR;
294 		biodone(bio);
295 		return(0);
296 	}
297 
298 	bp->b_resid = bp->b_bcount;
299 
300 	IFOPT(vn, VN_LABELS) {
301 	    	/*
302 		 * The vnode device is using disk/slice label support.
303 		 *
304 		 * The dscheck() function is called for validating the
305 		 * slices that exist ON the vnode device itself, and
306 		 * translate the "slice-relative" block number, again.
307 		 * dscheck() will call biodone() and return NULL if
308 		 * we are at EOF or beyond the device size.
309 		 */
310 		if (vn->sc_slices == NULL) {
311 			nbio = bio;
312 		} else if ((nbio = dscheck(dev, bio, vn->sc_slices)) == NULL) {
313 			goto done;
314 		}
315 	} else {
316 		int pbn;	/* in sc_secsize chunks */
317 		long sz;	/* in sc_secsize chunks */
318 
319 		/*
320 		 * Check for required alignment.  Transfers must be a valid
321 		 * multiple of the sector size.
322 		 */
323 		if (bp->b_bcount % vn->sc_secsize != 0 ||
324 		    bio->bio_offset % vn->sc_secsize != 0) {
325 			goto bad;
326 		}
327 
328 		pbn = bio->bio_offset / vn->sc_secsize;
329 		sz = howmany(bp->b_bcount, vn->sc_secsize);
330 
331 		/*
332 		 * Check for an illegal pbn or EOF truncation
333 		 */
334 		if (pbn < 0)
335 			goto bad;
336 		if (pbn + sz > vn->sc_size) {
337 			if (pbn > vn->sc_size || (bp->b_flags & B_BNOCLIP))
338 				goto bad;
339 			if (pbn == vn->sc_size) {
340 				bp->b_resid = bp->b_bcount;
341 				bp->b_flags |= B_INVAL;
342 				goto done;
343 			}
344 			bp->b_bcount = (vn->sc_size - pbn) * vn->sc_secsize;
345 		}
346 		nbio = push_bio(bio);
347 		nbio->bio_offset = pbn * vn->sc_secsize;
348 	}
349 
350 	/*
351 	 * Use the translated nbio from this point on
352 	 */
353 	if (vn->sc_vp && bp->b_cmd == BUF_CMD_FREEBLKS) {
354 		/*
355 		 * Freeblks is not handled for vnode-backed elements yet.
356 		 */
357 		bp->b_resid = 0;
358 		/* operation complete */
359 	} else if (vn->sc_vp) {
360 		/*
361 		 * VNODE I/O
362 		 *
363 		 * If an error occurs, we set B_ERROR but we do not set
364 		 * B_INVAL because (for a write anyway), the buffer is
365 		 * still valid.
366 		 */
367 		struct uio auio;
368 		struct iovec aiov;
369 
370 		bzero(&auio, sizeof(auio));
371 
372 		aiov.iov_base = bp->b_data;
373 		aiov.iov_len = bp->b_bcount;
374 		auio.uio_iov = &aiov;
375 		auio.uio_iovcnt = 1;
376 		auio.uio_offset = nbio->bio_offset;
377 		auio.uio_segflg = UIO_SYSSPACE;
378 		if (bp->b_cmd == BUF_CMD_READ)
379 			auio.uio_rw = UIO_READ;
380 		else
381 			auio.uio_rw = UIO_WRITE;
382 		auio.uio_resid = bp->b_bcount;
383 		auio.uio_td = curthread;
384 		vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY);
385 		if (bp->b_cmd == BUF_CMD_READ)
386 			error = VOP_READ(vn->sc_vp, &auio, IO_DIRECT, vn->sc_cred);
387 		else
388 			error = VOP_WRITE(vn->sc_vp, &auio, IO_NOWDRAIN, vn->sc_cred);
389 		VOP_UNLOCK(vn->sc_vp, 0);
390 		bp->b_resid = auio.uio_resid;
391 		if (error) {
392 			bp->b_error = error;
393 			bp->b_flags |= B_ERROR;
394 		}
395 		/* operation complete */
396 	} else if (vn->sc_object) {
397 		/*
398 		 * OBJT_SWAP I/O (handles read, write, freebuf)
399 		 *
400 		 * We have nothing to do if freeing  blocks on a reserved
401 		 * swap area, othrewise execute the op.
402 		 */
403 		if (bp->b_cmd == BUF_CMD_FREEBLKS && TESTOPT(vn, VN_RESERVE)) {
404 			bp->b_resid = 0;
405 			/* operation complete */
406 		} else {
407 			vm_pager_strategy(vn->sc_object, nbio);
408 			return(0);
409 			/* NOT REACHED */
410 		}
411 	} else {
412 		bp->b_resid = bp->b_bcount;
413 		bp->b_flags |= B_ERROR | B_INVAL;
414 		bp->b_error = EINVAL;
415 		/* operation complete */
416 	}
417 	biodone(nbio);
418 	return(0);
419 
420 	/*
421 	 * Shortcuts / check failures on the original bio (not nbio).
422 	 */
423 bad:
424 	bp->b_error = EINVAL;
425 	bp->b_flags |= B_ERROR | B_INVAL;
426 done:
427 	biodone(bio);
428 	return(0);
429 }
430 
431 /* ARGSUSED */
432 static	int
433 vnioctl(struct dev_ioctl_args *ap)
434 {
435 	dev_t dev = ap->a_head.a_dev;
436 	struct vn_softc *vn;
437 	struct vn_ioctl *vio;
438 	int error;
439 	u_long *f;
440 
441 	vn = dev->si_drv1;
442 	IFOPT(vn,VN_FOLLOW) {
443 		printf("vnioctl(%s, 0x%lx, %p, 0x%x): unit %d\n",
444 		    devtoname(dev), ap->a_cmd, ap->a_data, ap->a_fflag,
445 		    dkunit(dev));
446 	}
447 
448 	switch (ap->a_cmd) {
449 	case VNIOCATTACH:
450 	case VNIOCDETACH:
451 	case VNIOCGSET:
452 	case VNIOCGCLEAR:
453 	case VNIOCUSET:
454 	case VNIOCUCLEAR:
455 		goto vn_specific;
456 	}
457 
458 	IFOPT(vn,VN_LABELS) {
459 		if (vn->sc_slices != NULL) {
460 			error = dsioctl(dev, ap->a_cmd, ap->a_data,
461 					ap->a_fflag, &vn->sc_slices);
462 			if (error != ENOIOCTL)
463 				return (error);
464 		}
465 		if (dkslice(dev) != WHOLE_DISK_SLICE ||
466 		    dkpart(dev) != RAW_PART)
467 			return (ENOTTY);
468 	}
469 
470     vn_specific:
471 
472 	error = suser_cred(ap->a_cred, 0);
473 	if (error)
474 		return (error);
475 
476 	vio = (struct vn_ioctl *)ap->a_data;
477 	f = (u_long*)ap->a_data;
478 
479 	switch (ap->a_cmd) {
480 	case VNIOCATTACH:
481 		if (vn->sc_flags & VNF_INITED)
482 			return(EBUSY);
483 
484 		if (vio->vn_file == NULL)
485 			error = vniocattach_swap(vn, vio, dev, ap->a_fflag, ap->a_cred);
486 		else
487 			error = vniocattach_file(vn, vio, dev, ap->a_fflag, ap->a_cred);
488 		break;
489 
490 	case VNIOCDETACH:
491 		if ((vn->sc_flags & VNF_INITED) == 0)
492 			return(ENXIO);
493 		/*
494 		 * XXX handle i/o in progress.  Return EBUSY, or wait, or
495 		 * flush the i/o.
496 		 * XXX handle multiple opens of the device.  Return EBUSY,
497 		 * or revoke the fd's.
498 		 * How are these problems handled for removable and failing
499 		 * hardware devices? (Hint: They are not)
500 		 */
501 		vnclear(vn);
502 		IFOPT(vn, VN_FOLLOW)
503 			printf("vnioctl: CLRed\n");
504 		break;
505 
506 	case VNIOCGSET:
507 		vn_options |= *f;
508 		*f = vn_options;
509 		break;
510 
511 	case VNIOCGCLEAR:
512 		vn_options &= ~(*f);
513 		*f = vn_options;
514 		break;
515 
516 	case VNIOCUSET:
517 		vn->sc_options |= *f;
518 		*f = vn->sc_options;
519 		break;
520 
521 	case VNIOCUCLEAR:
522 		vn->sc_options &= ~(*f);
523 		*f = vn->sc_options;
524 		break;
525 
526 	default:
527 		error = ENOTTY;
528 		break;
529 	}
530 	return(error);
531 }
532 
533 /*
534  *	vniocattach_file:
535  *
536  *	Attach a file to a VN partition.  Return the size in the vn_size
537  *	field.
538  */
539 
540 static int
541 vniocattach_file(struct vn_softc *vn, struct vn_ioctl *vio, dev_t dev,
542 		 int flag, struct ucred *cred)
543 {
544 	struct vattr vattr;
545 	struct nlookupdata nd;
546 	int error, flags;
547 	struct vnode *vp;
548 
549 	flags = FREAD|FWRITE;
550 	error = nlookup_init(&nd, vio->vn_file,
551 				UIO_USERSPACE, NLC_FOLLOW|NLC_LOCKVP);
552 	if (error)
553 		return (error);
554 	if ((error = vn_open(&nd, NULL, flags, 0)) != 0) {
555 		if (error != EACCES && error != EPERM && error != EROFS)
556 			goto done;
557 		flags &= ~FWRITE;
558 		nlookup_done(&nd);
559 		error = nlookup_init(&nd, vio->vn_file, UIO_USERSPACE, NLC_FOLLOW|NLC_LOCKVP);
560 		if (error)
561 			return (error);
562 		if ((error = vn_open(&nd, NULL, flags, 0)) != 0)
563 			goto done;
564 	}
565 	vp = nd.nl_open_vp;
566 	if (vp->v_type != VREG ||
567 	    (error = VOP_GETATTR(vp, &vattr))) {
568 		if (error == 0)
569 			error = EINVAL;
570 		goto done;
571 	}
572 	VOP_UNLOCK(vp, 0);
573 	vn->sc_secsize = DEV_BSIZE;
574 	vn->sc_vp = vp;
575 	nd.nl_open_vp = NULL;
576 
577 	/*
578 	 * If the size is specified, override the file attributes.  Note that
579 	 * the vn_size argument is in PAGE_SIZE sized blocks.
580 	 */
581 	if (vio->vn_size)
582 		vn->sc_size = (quad_t)vio->vn_size * PAGE_SIZE / vn->sc_secsize;
583 	else
584 		vn->sc_size = vattr.va_size / vn->sc_secsize;
585 	error = vnsetcred(vn, cred);
586 	if (error) {
587 		vn->sc_vp = NULL;
588 		vn_close(vp, flags);
589 		goto done;
590 	}
591 	vn->sc_flags |= VNF_INITED;
592 	if (flags == FREAD)
593 		vn->sc_flags |= VNF_READONLY;
594 	IFOPT(vn, VN_LABELS) {
595 		/*
596 		 * Reopen so that `ds' knows which devices are open.
597 		 * If this is the first VNIOCSET, then we've
598 		 * guaranteed that the device is the cdev and that
599 		 * no other slices or labels are open.  Otherwise,
600 		 * we rely on VNIOCCLR not being abused.
601 		 */
602 		error = dev_dopen(dev, flag, S_IFCHR, cred);
603 		if (error)
604 			vnclear(vn);
605 	}
606 	IFOPT(vn, VN_FOLLOW)
607 		printf("vnioctl: SET vp %p size %x blks\n",
608 		       vn->sc_vp, vn->sc_size);
609 done:
610 	nlookup_done(&nd);
611 	return(error);
612 }
613 
614 /*
615  *	vniocattach_swap:
616  *
617  *	Attach swap backing store to a VN partition of the size specified
618  *	in vn_size.
619  */
620 
621 static int
622 vniocattach_swap(struct vn_softc *vn, struct vn_ioctl *vio, dev_t dev,
623 		 int flag, struct ucred *cred)
624 {
625 	int error;
626 
627 	/*
628 	 * Range check.  Disallow negative sizes or any size less then the
629 	 * size of a page.  Then round to a page.
630 	 */
631 
632 	if (vio->vn_size <= 0)
633 		return(EDOM);
634 
635 	/*
636 	 * Allocate an OBJT_SWAP object.
637 	 *
638 	 * sc_secsize is PAGE_SIZE'd
639 	 *
640 	 * vio->vn_size is in PAGE_SIZE'd chunks.
641 	 * sc_size must be in PAGE_SIZE'd chunks.
642 	 * Note the truncation.
643 	 */
644 
645 	vn->sc_secsize = PAGE_SIZE;
646 	vn->sc_size = vio->vn_size;
647 	vn->sc_object = vm_pager_allocate(OBJT_SWAP, NULL,
648 					  vn->sc_secsize * (off_t)vio->vn_size,
649 					  VM_PROT_DEFAULT, 0);
650 	IFOPT(vn, VN_RESERVE) {
651 		if (swap_pager_reserve(vn->sc_object, 0, vn->sc_size) < 0) {
652 			vm_pager_deallocate(vn->sc_object);
653 			vn->sc_object = NULL;
654 			return(EDOM);
655 		}
656 	}
657 	vn->sc_flags |= VNF_INITED;
658 
659 	error = vnsetcred(vn, cred);
660 	if (error == 0) {
661 		IFOPT(vn, VN_LABELS) {
662 			/*
663 			 * Reopen so that `ds' knows which devices are open.
664 			 * If this is the first VNIOCSET, then we've
665 			 * guaranteed that the device is the cdev and that
666 			 * no other slices or labels are open.  Otherwise,
667 			 * we rely on VNIOCCLR not being abused.
668 			 */
669 			error = dev_dopen(dev, flag, S_IFCHR, cred);
670 		}
671 	}
672 	if (error == 0) {
673 		IFOPT(vn, VN_FOLLOW) {
674 			printf("vnioctl: SET vp %p size %x\n",
675 			       vn->sc_vp, vn->sc_size);
676 		}
677 	}
678 	if (error)
679 		vnclear(vn);
680 	return(error);
681 }
682 
683 /*
684  * Duplicate the current processes' credentials.  Since we are called only
685  * as the result of a SET ioctl and only root can do that, any future access
686  * to this "disk" is essentially as root.  Note that credentials may change
687  * if some other uid can write directly to the mapped file (NFS).
688  */
689 int
690 vnsetcred(struct vn_softc *vn, struct ucred *cred)
691 {
692 	char *tmpbuf;
693 	int error = 0;
694 
695 	/*
696 	 * Set credits in our softc
697 	 */
698 
699 	if (vn->sc_cred)
700 		crfree(vn->sc_cred);
701 	vn->sc_cred = crdup(cred);
702 
703 	/*
704 	 * Horrible kludge to establish credentials for NFS  XXX.
705 	 */
706 
707 	if (vn->sc_vp) {
708 		struct uio auio;
709 		struct iovec aiov;
710 
711 		tmpbuf = malloc(vn->sc_secsize, M_TEMP, M_WAITOK);
712 		bzero(&auio, sizeof(auio));
713 
714 		aiov.iov_base = tmpbuf;
715 		aiov.iov_len = vn->sc_secsize;
716 		auio.uio_iov = &aiov;
717 		auio.uio_iovcnt = 1;
718 		auio.uio_offset = 0;
719 		auio.uio_rw = UIO_READ;
720 		auio.uio_segflg = UIO_SYSSPACE;
721 		auio.uio_resid = aiov.iov_len;
722 		vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY);
723 		error = VOP_READ(vn->sc_vp, &auio, 0, vn->sc_cred);
724 		VOP_UNLOCK(vn->sc_vp, 0);
725 		free(tmpbuf, M_TEMP);
726 	}
727 	return (error);
728 }
729 
730 void
731 vnclear(struct vn_softc *vn)
732 {
733 	IFOPT(vn, VN_FOLLOW)
734 		printf("vnclear(%p): vp=%p\n", vn, vn->sc_vp);
735 	if (vn->sc_slices != NULL)
736 		dsgone(&vn->sc_slices);
737 	vn->sc_flags &= ~VNF_INITED;
738 	if (vn->sc_vp != NULL) {
739 		vn_close(vn->sc_vp,
740 		    (vn->sc_flags & VNF_READONLY) ?  FREAD : (FREAD|FWRITE));
741 		vn->sc_vp = NULL;
742 	}
743 	vn->sc_flags &= ~VNF_READONLY;
744 	if (vn->sc_cred) {
745 		crfree(vn->sc_cred);
746 		vn->sc_cred = NULL;
747 	}
748 	if (vn->sc_object != NULL) {
749 		vm_pager_deallocate(vn->sc_object);
750 		vn->sc_object = NULL;
751 	}
752 	vn->sc_size = 0;
753 }
754 
755 static int
756 vnsize(struct dev_psize_args *ap)
757 {
758 	dev_t dev = ap->a_head.a_dev;
759 	struct vn_softc *vn;
760 
761 	vn = dev->si_drv1;
762 	if (!vn)
763 		return(ENXIO);
764 	if ((vn->sc_flags & VNF_INITED) == 0)
765 		return(ENXIO);
766 	ap->a_result = vn->sc_size;
767 	return(0);
768 }
769 
770 static int
771 vn_modevent(module_t mod, int type, void *data)
772 {
773 	struct vn_softc *vn;
774 	dev_t dev;
775 
776 	switch (type) {
777 	case MOD_LOAD:
778 		dev_ops_add(&vn_ops, 0, 0);
779 		break;
780 	case MOD_UNLOAD:
781 		/* fall through */
782 	case MOD_SHUTDOWN:
783 		while ((vn = SLIST_FIRST(&vn_list)) != NULL) {
784 			SLIST_REMOVE_HEAD(&vn_list, sc_list);
785 			if (vn->sc_flags & VNF_INITED)
786 				vnclear(vn);
787 			/* Cleanup all dev_t's that refer to this unit */
788 			while ((dev = vn->sc_devlist) != NULL) {
789 				vn->sc_devlist = dev->si_drv2;
790 				dev->si_drv1 = dev->si_drv2 = NULL;
791 				destroy_dev(dev);
792 			}
793 			free(vn, M_DEVBUF);
794 		}
795 		dev_ops_remove(&vn_ops, 0, 0);
796 		break;
797 	default:
798 		break;
799 	}
800 	return 0;
801 }
802 
803 DEV_MODULE(vn, vn_modevent, 0);
804