xref: /dragonfly/sys/dev/disk/vn/vn.c (revision 19fe1c42)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * from: Utah Hdr: vn.c 1.13 94/04/02
39  *
40  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
41  * $FreeBSD: src/sys/dev/vn/vn.c,v 1.105.2.4 2001/11/18 07:11:00 dillon Exp $
42  * $DragonFly: src/sys/dev/disk/vn/vn.c,v 1.38 2008/07/01 02:02:53 dillon Exp $
43  */
44 
45 /*
46  * Vnode disk driver.
47  *
48  * Block/character interface to a vnode.  Allows one to treat a file
49  * as a disk (e.g. build a filesystem in it, mount it, etc.).
50  *
51  * NOTE 1: There is a security issue involved with this driver.
52  * Once mounted all access to the contents of the "mapped" file via
53  * the special file is controlled by the permissions on the special
54  * file, the protection of the mapped file is ignored (effectively,
55  * by using root credentials in all transactions).
56  *
57  * NOTE 2: Doesn't interact with leases, should it?
58  */
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/proc.h>
64 #include <sys/priv.h>
65 #include <sys/nlookup.h>
66 #include <sys/buf.h>
67 #include <sys/malloc.h>
68 #include <sys/mount.h>
69 #include <sys/vnode.h>
70 #include <sys/fcntl.h>
71 #include <sys/conf.h>
72 #include <sys/diskslice.h>
73 #include <sys/disk.h>
74 #include <sys/stat.h>
75 #include <sys/module.h>
76 #include <sys/vnioctl.h>
77 
78 #include <vm/vm.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_pager.h>
82 #include <vm/vm_pageout.h>
83 #include <vm/swap_pager.h>
84 #include <vm/vm_extern.h>
85 #include <vm/vm_zone.h>
86 
87 static	d_ioctl_t	vnioctl;
88 static	d_open_t	vnopen;
89 static	d_close_t	vnclose;
90 static	d_psize_t	vnsize;
91 static	d_strategy_t	vnstrategy;
92 
93 #define CDEV_MAJOR 43
94 
95 #define VN_BSIZE_BEST	8192
96 
97 /*
98  * dev_ops
99  *	D_DISK		we want to look like a disk
100  *	D_CANFREE	We support BUF_CMD_FREEBLKS
101  */
102 
103 static struct dev_ops vn_ops = {
104 	{ "vn", CDEV_MAJOR, D_DISK | D_CANFREE },
105 	.d_open =	vnopen,
106 	.d_close =	vnclose,
107 	.d_read =	physread,
108 	.d_write =	physwrite,
109 	.d_ioctl =	vnioctl,
110 	.d_strategy =	vnstrategy,
111 	.d_psize =	vnsize
112 };
113 
114 struct vn_softc {
115 	int		sc_unit;
116 	int		sc_flags;	/* flags 			*/
117 	u_int64_t	sc_size;	/* size of vn, sc_secsize scale	*/
118 	int		sc_secsize;	/* sector size			*/
119 	struct diskslices *sc_slices;	/* XXX fields from struct disk  */
120 	struct disk_info sc_info;	/* XXX fields from struct disk  */
121 	struct vnode	*sc_vp;		/* vnode if not NULL		*/
122 	vm_object_t	sc_object;	/* backing object if not NULL	*/
123 	struct ucred	*sc_cred;	/* credentials 			*/
124 	int		 sc_maxactive;	/* max # of active requests 	*/
125 	struct buf	 sc_tab;	/* transfer queue 		*/
126 	u_long		 sc_options;	/* options 			*/
127 	cdev_t		 sc_devlist;	/* devices that refer to this unit */
128 	SLIST_ENTRY(vn_softc) sc_list;
129 };
130 
131 static SLIST_HEAD(, vn_softc) vn_list;
132 
133 /* sc_flags */
134 #define VNF_INITED	0x01
135 #define	VNF_READONLY	0x02
136 
137 static u_long	vn_options;
138 
139 #define IFOPT(vn,opt) if (((vn)->sc_options|vn_options) & (opt))
140 #define TESTOPT(vn,opt) (((vn)->sc_options|vn_options) & (opt))
141 
142 static int	vnsetcred (struct vn_softc *vn, struct ucred *cred);
143 static void	vnclear (struct vn_softc *vn);
144 static int	vnget (cdev_t dev, struct vn_softc *vn , struct vn_user *vnu);
145 static int	vn_modevent (module_t, int, void *);
146 static int 	vniocattach_file (struct vn_softc *, struct vn_ioctl *, cdev_t dev, int flag, struct ucred *cred);
147 static int 	vniocattach_swap (struct vn_softc *, struct vn_ioctl *, cdev_t dev, int flag, struct ucred *cred);
148 
149 static	int
150 vnclose(struct dev_close_args *ap)
151 {
152 	cdev_t dev = ap->a_head.a_dev;
153 	struct vn_softc *vn = dev->si_drv1;
154 
155 	IFOPT(vn, VN_LABELS)
156 		if (vn->sc_slices != NULL)
157 			dsclose(dev, ap->a_devtype, vn->sc_slices);
158 	return (0);
159 }
160 
161 /*
162  * Called only when si_drv1 is NULL.  Locate the associated vn node and
163  * attach the device to it.
164  */
165 static struct vn_softc *
166 vnfindvn(cdev_t dev)
167 {
168 	int unit;
169 	struct vn_softc *vn;
170 
171 	unit = dkunit(dev);
172 	SLIST_FOREACH(vn, &vn_list, sc_list) {
173 		if (vn->sc_unit == unit) {
174 			dev->si_drv1 = vn;
175 			dev->si_drv2 = vn->sc_devlist;
176 			vn->sc_devlist = dev;
177 			reference_dev(dev);
178 			break;
179 		}
180 	}
181 	if (vn == NULL) {
182 		vn = kmalloc(sizeof *vn, M_DEVBUF, M_WAITOK | M_ZERO);
183 		vn->sc_unit = unit;
184 		dev->si_drv1 = vn;
185 		vn->sc_devlist = make_dev(&vn_ops, 0, UID_ROOT,
186 					GID_OPERATOR, 0640, "vn%d", unit);
187 		if (vn->sc_devlist->si_drv1 == NULL) {
188 			reference_dev(vn->sc_devlist);
189 			vn->sc_devlist->si_drv1 = vn;
190 			vn->sc_devlist->si_drv2 = NULL;
191 		}
192 		if (vn->sc_devlist != dev) {
193 			dev->si_drv1 = vn;
194 			dev->si_drv2 = vn->sc_devlist;
195 			vn->sc_devlist = dev;
196 			reference_dev(dev);
197 		}
198 		SLIST_INSERT_HEAD(&vn_list, vn, sc_list);
199 	}
200 	return (vn);
201 }
202 
203 static	int
204 vnopen(struct dev_open_args *ap)
205 {
206 	cdev_t dev = ap->a_head.a_dev;
207 	struct vn_softc *vn;
208 	struct disk_info *info;
209 
210 	/*
211 	 * Locate preexisting device
212 	 */
213 
214 	if ((vn = dev->si_drv1) == NULL)
215 		vn = vnfindvn(dev);
216 
217 	/*
218 	 * Update si_bsize fields for device.  This data will be overriden by
219 	 * the slice/parition code for vn accesses through partitions, and
220 	 * used directly if you open the 'whole disk' device.
221 	 *
222 	 * si_bsize_best must be reinitialized in case VN has been
223 	 * reconfigured, plus make it at least VN_BSIZE_BEST for efficiency.
224 	 */
225 	dev->si_bsize_phys = vn->sc_secsize;
226 	dev->si_bsize_best = vn->sc_secsize;
227 	if (dev->si_bsize_best < VN_BSIZE_BEST)
228 		dev->si_bsize_best = VN_BSIZE_BEST;
229 
230 	if ((ap->a_oflags & FWRITE) && (vn->sc_flags & VNF_READONLY))
231 		return (EACCES);
232 
233 	IFOPT(vn, VN_FOLLOW)
234 		kprintf("vnopen(%s, 0x%x, 0x%x)\n",
235 		    devtoname(dev), ap->a_oflags, ap->a_devtype);
236 
237 	/*
238 	 * Initialize label
239 	 */
240 
241 	IFOPT(vn, VN_LABELS) {
242 		if (vn->sc_flags & VNF_INITED) {
243 			info = &vn->sc_info;
244 			bzero(info, sizeof(*info));
245 			info->d_media_blksize = vn->sc_secsize;
246 			info->d_media_blocks = vn->sc_size;
247 			/*
248 			 * reserve mbr sector for backwards compatibility
249 			 * when no slices exist.
250 			 */
251 			info->d_dsflags = DSO_COMPATMBR;
252 
253 			info->d_secpertrack = 32;
254 			info->d_nheads = 64 / (vn->sc_secsize / DEV_BSIZE);
255 			info->d_secpercyl = info->d_secpertrack *
256 					    info->d_nheads;
257 			info->d_ncylinders = vn->sc_size / info->d_secpercyl;
258 
259 			return (dsopen(dev, ap->a_devtype, 0,
260 					&vn->sc_slices, info));
261 		}
262 		if (dkslice(dev) != WHOLE_DISK_SLICE ||
263 		    dkpart(dev) != WHOLE_SLICE_PART ||
264 		    ap->a_devtype != S_IFCHR) {
265 			return (ENXIO);
266 		}
267 	}
268 	return(0);
269 }
270 
271 /*
272  *	vnstrategy:
273  *
274  *	Run strategy routine for VN device.  We use VOP_READ/VOP_WRITE calls
275  *	for vnode-backed vn's, and the new vm_pager_strategy() call for
276  *	vm_object-backed vn's.
277  *
278  *	Currently B_ASYNC is only partially handled - for OBJT_SWAP I/O only.
279  */
280 static int
281 vnstrategy(struct dev_strategy_args *ap)
282 {
283 	cdev_t dev = ap->a_head.a_dev;
284 	struct bio *bio = ap->a_bio;
285 	struct buf *bp;
286 	struct bio *nbio;
287 	int unit;
288 	struct vn_softc *vn;
289 	int error;
290 
291 	unit = dkunit(dev);
292 	if ((vn = dev->si_drv1) == NULL)
293 		vn = vnfindvn(dev);
294 
295 	bp = bio->bio_buf;
296 
297 	IFOPT(vn, VN_DEBUG)
298 		kprintf("vnstrategy(%p): unit %d\n", bp, unit);
299 
300 	if ((vn->sc_flags & VNF_INITED) == 0) {
301 		bp->b_error = ENXIO;
302 		bp->b_flags |= B_ERROR;
303 		biodone(bio);
304 		return(0);
305 	}
306 
307 	bp->b_resid = bp->b_bcount;
308 
309 	IFOPT(vn, VN_LABELS) {
310 	    	/*
311 		 * The vnode device is using disk/slice label support.
312 		 *
313 		 * The dscheck() function is called for validating the
314 		 * slices that exist ON the vnode device itself, and
315 		 * translate the "slice-relative" block number, again.
316 		 * dscheck() will call biodone() and return NULL if
317 		 * we are at EOF or beyond the device size.
318 		 */
319 		if (vn->sc_slices == NULL) {
320 			nbio = bio;
321 		} else if ((nbio = dscheck(dev, bio, vn->sc_slices)) == NULL) {
322 			goto done;
323 		}
324 	} else {
325 		int64_t pbn;	/* in sc_secsize chunks */
326 		long sz;	/* in sc_secsize chunks */
327 
328 		/*
329 		 * Check for required alignment.  Transfers must be a valid
330 		 * multiple of the sector size.
331 		 */
332 		if (bp->b_bcount % vn->sc_secsize != 0 ||
333 		    bio->bio_offset % vn->sc_secsize != 0) {
334 			goto bad;
335 		}
336 
337 		pbn = bio->bio_offset / vn->sc_secsize;
338 		sz = howmany(bp->b_bcount, vn->sc_secsize);
339 
340 		/*
341 		 * Check for an illegal pbn or EOF truncation
342 		 */
343 		if (pbn < 0)
344 			goto bad;
345 		if (pbn + sz > vn->sc_size) {
346 			if (pbn > vn->sc_size || (bp->b_flags & B_BNOCLIP))
347 				goto bad;
348 			if (pbn == vn->sc_size) {
349 				bp->b_resid = bp->b_bcount;
350 				bp->b_flags |= B_INVAL;
351 				goto done;
352 			}
353 			bp->b_bcount = (vn->sc_size - pbn) * vn->sc_secsize;
354 		}
355 		nbio = push_bio(bio);
356 		nbio->bio_offset = pbn * vn->sc_secsize;
357 	}
358 
359 	/*
360 	 * Use the translated nbio from this point on
361 	 */
362 	if (vn->sc_vp && bp->b_cmd == BUF_CMD_FREEBLKS) {
363 		/*
364 		 * Freeblks is not handled for vnode-backed elements yet.
365 		 */
366 		bp->b_resid = 0;
367 		/* operation complete */
368 	} else if (vn->sc_vp) {
369 		/*
370 		 * VNODE I/O
371 		 *
372 		 * If an error occurs, we set B_ERROR but we do not set
373 		 * B_INVAL because (for a write anyway), the buffer is
374 		 * still valid.
375 		 */
376 		struct uio auio;
377 		struct iovec aiov;
378 
379 		bzero(&auio, sizeof(auio));
380 
381 		aiov.iov_base = bp->b_data;
382 		aiov.iov_len = bp->b_bcount;
383 		auio.uio_iov = &aiov;
384 		auio.uio_iovcnt = 1;
385 		auio.uio_offset = nbio->bio_offset;
386 		auio.uio_segflg = UIO_SYSSPACE;
387 		if (bp->b_cmd == BUF_CMD_READ)
388 			auio.uio_rw = UIO_READ;
389 		else
390 			auio.uio_rw = UIO_WRITE;
391 		auio.uio_resid = bp->b_bcount;
392 		auio.uio_td = curthread;
393 		vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY);
394 		if (bp->b_cmd == BUF_CMD_READ)
395 			error = VOP_READ(vn->sc_vp, &auio, IO_DIRECT | IO_RECURSE, vn->sc_cred);
396 		else
397 			error = VOP_WRITE(vn->sc_vp, &auio, IO_DIRECT | IO_RECURSE, vn->sc_cred);
398 		vn_unlock(vn->sc_vp);
399 		bp->b_resid = auio.uio_resid;
400 		if (error) {
401 			bp->b_error = error;
402 			bp->b_flags |= B_ERROR;
403 		}
404 		/* operation complete */
405 	} else if (vn->sc_object) {
406 		/*
407 		 * OBJT_SWAP I/O (handles read, write, freebuf)
408 		 *
409 		 * We have nothing to do if freeing  blocks on a reserved
410 		 * swap area, othrewise execute the op.
411 		 */
412 		if (bp->b_cmd == BUF_CMD_FREEBLKS && TESTOPT(vn, VN_RESERVE)) {
413 			bp->b_resid = 0;
414 			/* operation complete */
415 		} else {
416 			vm_pager_strategy(vn->sc_object, nbio);
417 			return(0);
418 			/* NOT REACHED */
419 		}
420 	} else {
421 		bp->b_resid = bp->b_bcount;
422 		bp->b_flags |= B_ERROR | B_INVAL;
423 		bp->b_error = EINVAL;
424 		/* operation complete */
425 	}
426 	biodone(nbio);
427 	return(0);
428 
429 	/*
430 	 * Shortcuts / check failures on the original bio (not nbio).
431 	 */
432 bad:
433 	bp->b_error = EINVAL;
434 	bp->b_flags |= B_ERROR | B_INVAL;
435 done:
436 	biodone(bio);
437 	return(0);
438 }
439 
440 /* ARGSUSED */
441 static	int
442 vnioctl(struct dev_ioctl_args *ap)
443 {
444 	cdev_t dev = ap->a_head.a_dev;
445 	struct vn_softc *vn;
446 	struct vn_ioctl *vio;
447 	int error;
448 	u_long *f;
449 
450 	vn = dev->si_drv1;
451 	IFOPT(vn,VN_FOLLOW) {
452 		kprintf("vnioctl(%s, 0x%lx, %p, 0x%x): unit %d\n",
453 		    devtoname(dev), ap->a_cmd, ap->a_data, ap->a_fflag,
454 		    dkunit(dev));
455 	}
456 
457 	switch (ap->a_cmd) {
458 	case VNIOCATTACH:
459 	case VNIOCDETACH:
460 	case VNIOCGSET:
461 	case VNIOCGCLEAR:
462 	case VNIOCGET:
463 	case VNIOCUSET:
464 	case VNIOCUCLEAR:
465 		goto vn_specific;
466 	}
467 
468 	IFOPT(vn,VN_LABELS) {
469 		if (vn->sc_slices != NULL) {
470 			error = dsioctl(dev, ap->a_cmd, ap->a_data,
471 					ap->a_fflag,
472 					&vn->sc_slices, &vn->sc_info);
473 			if (error != ENOIOCTL)
474 				return (error);
475 		}
476 		if (dkslice(dev) != WHOLE_DISK_SLICE ||
477 		    dkpart(dev) != WHOLE_SLICE_PART)
478 			return (ENOTTY);
479 	}
480 
481     vn_specific:
482 
483 	error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0);
484 	if (error)
485 		return (error);
486 
487 	vio = (struct vn_ioctl *)ap->a_data;
488 	f = (u_long*)ap->a_data;
489 
490 	switch (ap->a_cmd) {
491 	case VNIOCATTACH:
492 		if (vn->sc_flags & VNF_INITED)
493 			return(EBUSY);
494 
495 		if (vio->vn_file == NULL)
496 			error = vniocattach_swap(vn, vio, dev, ap->a_fflag, ap->a_cred);
497 		else
498 			error = vniocattach_file(vn, vio, dev, ap->a_fflag, ap->a_cred);
499 		break;
500 
501 	case VNIOCDETACH:
502 		if ((vn->sc_flags & VNF_INITED) == 0)
503 			return(ENXIO);
504 		/*
505 		 * XXX handle i/o in progress.  Return EBUSY, or wait, or
506 		 * flush the i/o.
507 		 * XXX handle multiple opens of the device.  Return EBUSY,
508 		 * or revoke the fd's.
509 		 * How are these problems handled for removable and failing
510 		 * hardware devices? (Hint: They are not)
511 		 */
512 		vnclear(vn);
513 		IFOPT(vn, VN_FOLLOW)
514 			kprintf("vnioctl: CLRed\n");
515 		break;
516 
517 	case VNIOCGET:
518 		error = vnget(dev, vn, (struct vn_user *) ap->a_data);
519 		break;
520 
521 	case VNIOCGSET:
522 		vn_options |= *f;
523 		*f = vn_options;
524 		break;
525 
526 	case VNIOCGCLEAR:
527 		vn_options &= ~(*f);
528 		*f = vn_options;
529 		break;
530 
531 	case VNIOCUSET:
532 		vn->sc_options |= *f;
533 		*f = vn->sc_options;
534 		break;
535 
536 	case VNIOCUCLEAR:
537 		vn->sc_options &= ~(*f);
538 		*f = vn->sc_options;
539 		break;
540 
541 	default:
542 		error = ENOTTY;
543 		break;
544 	}
545 	return(error);
546 }
547 
548 /*
549  *	vniocattach_file:
550  *
551  *	Attach a file to a VN partition.  Return the size in the vn_size
552  *	field.
553  */
554 
555 static int
556 vniocattach_file(struct vn_softc *vn, struct vn_ioctl *vio, cdev_t dev,
557 		 int flag, struct ucred *cred)
558 {
559 	struct vattr vattr;
560 	struct nlookupdata nd;
561 	int error, flags;
562 	struct vnode *vp;
563 
564 	flags = FREAD|FWRITE;
565 	error = nlookup_init(&nd, vio->vn_file,
566 				UIO_USERSPACE, NLC_FOLLOW|NLC_LOCKVP);
567 	if (error)
568 		return (error);
569 	if ((error = vn_open(&nd, NULL, flags, 0)) != 0) {
570 		if (error != EACCES && error != EPERM && error != EROFS)
571 			goto done;
572 		flags &= ~FWRITE;
573 		nlookup_done(&nd);
574 		error = nlookup_init(&nd, vio->vn_file, UIO_USERSPACE, NLC_FOLLOW|NLC_LOCKVP);
575 		if (error)
576 			return (error);
577 		if ((error = vn_open(&nd, NULL, flags, 0)) != 0)
578 			goto done;
579 	}
580 	vp = nd.nl_open_vp;
581 	if (vp->v_type != VREG ||
582 	    (error = VOP_GETATTR(vp, &vattr))) {
583 		if (error == 0)
584 			error = EINVAL;
585 		goto done;
586 	}
587 	vn_unlock(vp);
588 	vn->sc_secsize = DEV_BSIZE;
589 	vn->sc_vp = vp;
590 	nd.nl_open_vp = NULL;
591 
592 	/*
593 	 * If the size is specified, override the file attributes.  Note that
594 	 * the vn_size argument is in PAGE_SIZE sized blocks.
595 	 */
596 	if (vio->vn_size)
597 		vn->sc_size = vio->vn_size * PAGE_SIZE / vn->sc_secsize;
598 	else
599 		vn->sc_size = vattr.va_size / vn->sc_secsize;
600 	error = vnsetcred(vn, cred);
601 	if (error) {
602 		vn->sc_vp = NULL;
603 		vn_close(vp, flags);
604 		goto done;
605 	}
606 	vn->sc_flags |= VNF_INITED;
607 	if (flags == FREAD)
608 		vn->sc_flags |= VNF_READONLY;
609 	IFOPT(vn, VN_LABELS) {
610 		/*
611 		 * Reopen so that `ds' knows which devices are open.
612 		 * If this is the first VNIOCSET, then we've
613 		 * guaranteed that the device is the cdev and that
614 		 * no other slices or labels are open.  Otherwise,
615 		 * we rely on VNIOCCLR not being abused.
616 		 */
617 		error = dev_dopen(dev, flag, S_IFCHR, cred);
618 		if (error)
619 			vnclear(vn);
620 	}
621 	IFOPT(vn, VN_FOLLOW)
622 		kprintf("vnioctl: SET vp %p size %llx blks\n",
623 		       vn->sc_vp, vn->sc_size);
624 done:
625 	nlookup_done(&nd);
626 	return(error);
627 }
628 
629 /*
630  *	vniocattach_swap:
631  *
632  *	Attach swap backing store to a VN partition of the size specified
633  *	in vn_size.
634  */
635 
636 static int
637 vniocattach_swap(struct vn_softc *vn, struct vn_ioctl *vio, cdev_t dev,
638 		 int flag, struct ucred *cred)
639 {
640 	int error;
641 
642 	/*
643 	 * Range check.  Disallow negative sizes or any size less then the
644 	 * size of a page.  Then round to a page.
645 	 */
646 
647 	if (vio->vn_size <= 0)
648 		return(EDOM);
649 
650 	/*
651 	 * Allocate an OBJT_SWAP object.
652 	 *
653 	 * sc_secsize is PAGE_SIZE'd
654 	 *
655 	 * vio->vn_size is in PAGE_SIZE'd chunks.
656 	 * sc_size must be in PAGE_SIZE'd chunks.
657 	 * Note the truncation.
658 	 */
659 
660 	vn->sc_secsize = PAGE_SIZE;
661 	vn->sc_size = vio->vn_size;
662 	vn->sc_object = vm_pager_allocate(OBJT_SWAP, NULL,
663 					  vn->sc_secsize * (off_t)vio->vn_size,
664 					  VM_PROT_DEFAULT, 0);
665 	IFOPT(vn, VN_RESERVE) {
666 		if (swap_pager_reserve(vn->sc_object, 0, vn->sc_size) < 0) {
667 			vm_pager_deallocate(vn->sc_object);
668 			vn->sc_object = NULL;
669 			return(EDOM);
670 		}
671 	}
672 	vn->sc_flags |= VNF_INITED;
673 
674 	error = vnsetcred(vn, cred);
675 	if (error == 0) {
676 		IFOPT(vn, VN_LABELS) {
677 			/*
678 			 * Reopen so that `ds' knows which devices are open.
679 			 * If this is the first VNIOCSET, then we've
680 			 * guaranteed that the device is the cdev and that
681 			 * no other slices or labels are open.  Otherwise,
682 			 * we rely on VNIOCCLR not being abused.
683 			 */
684 			error = dev_dopen(dev, flag, S_IFCHR, cred);
685 		}
686 	}
687 	if (error == 0) {
688 		IFOPT(vn, VN_FOLLOW) {
689 			kprintf("vnioctl: SET vp %p size %llx\n",
690 			       vn->sc_vp, vn->sc_size);
691 		}
692 	}
693 	if (error)
694 		vnclear(vn);
695 	return(error);
696 }
697 
698 /*
699  * Duplicate the current processes' credentials.  Since we are called only
700  * as the result of a SET ioctl and only root can do that, any future access
701  * to this "disk" is essentially as root.  Note that credentials may change
702  * if some other uid can write directly to the mapped file (NFS).
703  */
704 int
705 vnsetcred(struct vn_softc *vn, struct ucred *cred)
706 {
707 	char *tmpbuf;
708 	int error = 0;
709 
710 	/*
711 	 * Set credits in our softc
712 	 */
713 
714 	if (vn->sc_cred)
715 		crfree(vn->sc_cred);
716 	vn->sc_cred = crdup(cred);
717 
718 	/*
719 	 * Horrible kludge to establish credentials for NFS  XXX.
720 	 */
721 
722 	if (vn->sc_vp) {
723 		struct uio auio;
724 		struct iovec aiov;
725 
726 		tmpbuf = kmalloc(vn->sc_secsize, M_TEMP, M_WAITOK);
727 		bzero(&auio, sizeof(auio));
728 
729 		aiov.iov_base = tmpbuf;
730 		aiov.iov_len = vn->sc_secsize;
731 		auio.uio_iov = &aiov;
732 		auio.uio_iovcnt = 1;
733 		auio.uio_offset = 0;
734 		auio.uio_rw = UIO_READ;
735 		auio.uio_segflg = UIO_SYSSPACE;
736 		auio.uio_resid = aiov.iov_len;
737 		vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY);
738 		error = VOP_READ(vn->sc_vp, &auio, 0, vn->sc_cred);
739 		vn_unlock(vn->sc_vp);
740 		kfree(tmpbuf, M_TEMP);
741 	}
742 	return (error);
743 }
744 
745 void
746 vnclear(struct vn_softc *vn)
747 {
748 	IFOPT(vn, VN_FOLLOW)
749 		kprintf("vnclear(%p): vp=%p\n", vn, vn->sc_vp);
750 	if (vn->sc_slices != NULL)
751 		dsgone(&vn->sc_slices);
752 	vn->sc_flags &= ~VNF_INITED;
753 	if (vn->sc_vp != NULL) {
754 		vn_close(vn->sc_vp,
755 		    (vn->sc_flags & VNF_READONLY) ?  FREAD : (FREAD|FWRITE));
756 		vn->sc_vp = NULL;
757 	}
758 	vn->sc_flags &= ~VNF_READONLY;
759 	if (vn->sc_cred) {
760 		crfree(vn->sc_cred);
761 		vn->sc_cred = NULL;
762 	}
763 	if (vn->sc_object != NULL) {
764 		vm_pager_deallocate(vn->sc_object);
765 		vn->sc_object = NULL;
766 	}
767 	vn->sc_size = 0;
768 }
769 
770 /*
771  * 	vnget:
772  *
773  *	populate a struct vn_user for the VNIOCGET ioctl.
774  *	interface conventions defined in sys/sys/vnioctl.h.
775  */
776 
777 static int
778 vnget(cdev_t dev, struct vn_softc *vn, struct vn_user *vnu)
779 {
780 	int error, found = 0;
781 	char *freepath, *fullpath;
782 	struct vattr vattr;
783 
784 	if (vnu->vnu_unit == -1) {
785 		vnu->vnu_unit = dkunit(dev);
786 	}
787 	else if (vnu->vnu_unit < 0)
788 		return (EINVAL);
789 
790 	SLIST_FOREACH(vn, &vn_list, sc_list) {
791 
792 		if(vn->sc_unit != vnu->vnu_unit)
793 			continue;
794 
795 		found = 1;
796 
797 		if (vn->sc_flags & VNF_INITED && vn->sc_vp != NULL) {
798 
799 			/* note: u_cred checked in vnioctl above */
800 			error = VOP_GETATTR(vn->sc_vp, &vattr);
801 			if (error) {
802 				kprintf("vnget: VOP_GETATTR for %p failed\n",
803 					vn->sc_vp);
804 				return (error);
805 			}
806 
807 			error = vn_fullpath(curproc, vn->sc_vp,
808 						&fullpath, &freepath);
809 
810 			if (error) {
811 				kprintf("vnget: unable to resolve vp %p\n",
812 					vn->sc_vp);
813 				return(error);
814 			}
815 
816 			strlcpy(vnu->vnu_file, fullpath,
817 				sizeof(vnu->vnu_file));
818 			kfree(freepath, M_TEMP);
819 			vnu->vnu_dev = vattr.va_fsid;
820 			vnu->vnu_ino = vattr.va_fileid;
821 
822 		}
823 		else if (vn->sc_flags & VNF_INITED && vn->sc_object != NULL){
824 
825 			strlcpy(vnu->vnu_file, _VN_USER_SWAP,
826 				sizeof(vnu->vnu_file));
827 			vnu->vnu_size = vn->sc_size;
828 			vnu->vnu_secsize = vn->sc_secsize;
829 
830 		} else {
831 
832 			bzero(vnu->vnu_file, sizeof(vnu->vnu_file));
833 			vnu->vnu_dev = 0;
834 			vnu->vnu_ino = 0;
835 
836 		}
837 		break;
838 	}
839 
840 	if (!found)
841 		return(ENXIO);
842 
843 	return(0);
844 }
845 
846 static int
847 vnsize(struct dev_psize_args *ap)
848 {
849 	cdev_t dev = ap->a_head.a_dev;
850 	struct vn_softc *vn;
851 
852 	vn = dev->si_drv1;
853 	if (!vn)
854 		return(ENXIO);
855 	if ((vn->sc_flags & VNF_INITED) == 0)
856 		return(ENXIO);
857 	ap->a_result = (int64_t)vn->sc_size;
858 	return(0);
859 }
860 
861 static int
862 vn_modevent(module_t mod, int type, void *data)
863 {
864 	struct vn_softc *vn;
865 	cdev_t dev;
866 
867 	switch (type) {
868 	case MOD_LOAD:
869 		dev_ops_add(&vn_ops, 0, 0);
870 		break;
871 	case MOD_UNLOAD:
872 		/* fall through */
873 	case MOD_SHUTDOWN:
874 		while ((vn = SLIST_FIRST(&vn_list)) != NULL) {
875 			SLIST_REMOVE_HEAD(&vn_list, sc_list);
876 			if (vn->sc_flags & VNF_INITED)
877 				vnclear(vn);
878 			/* Cleanup all cdev_t's that refer to this unit */
879 			while ((dev = vn->sc_devlist) != NULL) {
880 				vn->sc_devlist = dev->si_drv2;
881 				dev->si_drv1 = dev->si_drv2 = NULL;
882 				destroy_dev(dev);
883 			}
884 			kfree(vn, M_DEVBUF);
885 		}
886 		dev_ops_remove(&vn_ops, 0, 0);
887 		break;
888 	default:
889 		break;
890 	}
891 	return 0;
892 }
893 
894 DEV_MODULE(vn, vn_modevent, 0);
895