xref: /dragonfly/sys/dev/disk/vn/vn.c (revision 3cb6a21a)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * from: Utah Hdr: vn.c 1.13 94/04/02
39  *
40  *	from: @(#)vn.c	8.6 (Berkeley) 4/1/94
41  * $FreeBSD: src/sys/dev/vn/vn.c,v 1.105.2.4 2001/11/18 07:11:00 dillon Exp $
42  * $DragonFly: src/sys/dev/disk/vn/vn.c,v 1.38 2008/07/01 02:02:53 dillon Exp $
43  */
44 
45 /*
46  * Vnode disk driver.
47  *
48  * Block/character interface to a vnode.  Allows one to treat a file
49  * as a disk (e.g. build a filesystem in it, mount it, etc.).
50  *
51  * NOTE 1: There is a security issue involved with this driver.
52  * Once mounted all access to the contents of the "mapped" file via
53  * the special file is controlled by the permissions on the special
54  * file, the protection of the mapped file is ignored (effectively,
55  * by using root credentials in all transactions).
56  *
57  * NOTE 2: Doesn't interact with leases, should it?
58  */
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/proc.h>
64 #include <sys/priv.h>
65 #include <sys/nlookup.h>
66 #include <sys/buf.h>
67 #include <sys/malloc.h>
68 #include <sys/mount.h>
69 #include <sys/vnode.h>
70 #include <sys/fcntl.h>
71 #include <sys/conf.h>
72 #include <sys/diskslice.h>
73 #include <sys/disk.h>
74 #include <sys/stat.h>
75 #include <sys/module.h>
76 #include <sys/vnioctl.h>
77 
78 #include <vm/vm.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_pager.h>
82 #include <vm/vm_pageout.h>
83 #include <vm/swap_pager.h>
84 #include <vm/vm_extern.h>
85 #include <vm/vm_zone.h>
86 
87 static	d_ioctl_t	vnioctl;
88 static	d_open_t	vnopen;
89 static	d_close_t	vnclose;
90 static	d_psize_t	vnsize;
91 static	d_strategy_t	vnstrategy;
92 
93 #define CDEV_MAJOR 43
94 
95 #define VN_BSIZE_BEST	8192
96 
97 /*
98  * dev_ops
99  *	D_DISK		we want to look like a disk
100  *	D_CANFREE	We support BUF_CMD_FREEBLKS
101  */
102 
103 static struct dev_ops vn_ops = {
104 	{ "vn", CDEV_MAJOR, D_DISK | D_CANFREE },
105 	.d_open =	vnopen,
106 	.d_close =	vnclose,
107 	.d_read =	physread,
108 	.d_write =	physwrite,
109 	.d_ioctl =	vnioctl,
110 	.d_strategy =	vnstrategy,
111 	.d_psize =	vnsize
112 };
113 
114 struct vn_softc {
115 	int		sc_unit;
116 	int		sc_flags;	/* flags 			*/
117 	u_int64_t	sc_size;	/* size of vn, sc_secsize scale	*/
118 	int		sc_secsize;	/* sector size			*/
119 	struct diskslices *sc_slices;	/* XXX fields from struct disk  */
120 	struct disk_info sc_info;	/* XXX fields from struct disk  */
121 	struct vnode	*sc_vp;		/* vnode if not NULL		*/
122 	vm_object_t	sc_object;	/* backing object if not NULL	*/
123 	struct ucred	*sc_cred;	/* credentials 			*/
124 	int		 sc_maxactive;	/* max # of active requests 	*/
125 	struct buf	 sc_tab;	/* transfer queue 		*/
126 	u_long		 sc_options;	/* options 			*/
127 	cdev_t		 sc_devlist;	/* devices that refer to this unit */
128 	SLIST_ENTRY(vn_softc) sc_list;
129 };
130 
131 static SLIST_HEAD(, vn_softc) vn_list;
132 
133 /* sc_flags */
134 #define VNF_INITED	0x01
135 #define	VNF_READONLY	0x02
136 
137 static u_long	vn_options;
138 
139 #define IFOPT(vn,opt) if (((vn)->sc_options|vn_options) & (opt))
140 #define TESTOPT(vn,opt) (((vn)->sc_options|vn_options) & (opt))
141 
142 static int	vnsetcred (struct vn_softc *vn, struct ucred *cred);
143 static void	vnclear (struct vn_softc *vn);
144 static int	vnget (cdev_t dev, struct vn_softc *vn , struct vn_user *vnu);
145 static int	vn_modevent (module_t, int, void *);
146 static int 	vniocattach_file (struct vn_softc *, struct vn_ioctl *, cdev_t dev, int flag, struct ucred *cred);
147 static int 	vniocattach_swap (struct vn_softc *, struct vn_ioctl *, cdev_t dev, int flag, struct ucred *cred);
148 
149 static	int
150 vnclose(struct dev_close_args *ap)
151 {
152 	cdev_t dev = ap->a_head.a_dev;
153 	struct vn_softc *vn = dev->si_drv1;
154 
155 	IFOPT(vn, VN_LABELS)
156 		if (vn->sc_slices != NULL)
157 			dsclose(dev, ap->a_devtype, vn->sc_slices);
158 	return (0);
159 }
160 
161 /*
162  * Called only when si_drv1 is NULL.  Locate the associated vn node and
163  * attach the device to it.
164  */
165 static struct vn_softc *
166 vnfindvn(cdev_t dev)
167 {
168 	int unit;
169 	struct vn_softc *vn;
170 
171 	unit = dkunit(dev);
172 	SLIST_FOREACH(vn, &vn_list, sc_list) {
173 		if (vn->sc_unit == unit) {
174 			dev->si_drv1 = vn;
175 			dev->si_drv2 = vn->sc_devlist;
176 			vn->sc_devlist = dev;
177 			reference_dev(dev);
178 			break;
179 		}
180 	}
181 	if (vn == NULL) {
182 		vn = kmalloc(sizeof *vn, M_DEVBUF, M_WAITOK | M_ZERO);
183 		vn->sc_unit = unit;
184 		dev->si_drv1 = vn;
185 		vn->sc_devlist = make_dev(&vn_ops, 0, UID_ROOT,
186 					GID_OPERATOR, 0640, "vn%d", unit);
187 		if (vn->sc_devlist->si_drv1 == NULL) {
188 			reference_dev(vn->sc_devlist);
189 			vn->sc_devlist->si_drv1 = vn;
190 			vn->sc_devlist->si_drv2 = NULL;
191 		}
192 		if (vn->sc_devlist != dev) {
193 			dev->si_drv1 = vn;
194 			dev->si_drv2 = vn->sc_devlist;
195 			vn->sc_devlist = dev;
196 			reference_dev(dev);
197 		}
198 		SLIST_INSERT_HEAD(&vn_list, vn, sc_list);
199 	}
200 	return (vn);
201 }
202 
203 static	int
204 vnopen(struct dev_open_args *ap)
205 {
206 	cdev_t dev = ap->a_head.a_dev;
207 	struct vn_softc *vn;
208 	struct disk_info *info;
209 
210 	/*
211 	 * Locate preexisting device
212 	 */
213 
214 	if ((vn = dev->si_drv1) == NULL)
215 		vn = vnfindvn(dev);
216 
217 	/*
218 	 * Update si_bsize fields for device.  This data will be overriden by
219 	 * the slice/parition code for vn accesses through partitions, and
220 	 * used directly if you open the 'whole disk' device.
221 	 *
222 	 * si_bsize_best must be reinitialized in case VN has been
223 	 * reconfigured, plus make it at least VN_BSIZE_BEST for efficiency.
224 	 */
225 	dev->si_bsize_phys = vn->sc_secsize;
226 	dev->si_bsize_best = vn->sc_secsize;
227 	if (dev->si_bsize_best < VN_BSIZE_BEST)
228 		dev->si_bsize_best = VN_BSIZE_BEST;
229 
230 	if ((ap->a_oflags & FWRITE) && (vn->sc_flags & VNF_READONLY))
231 		return (EACCES);
232 
233 	IFOPT(vn, VN_FOLLOW)
234 		kprintf("vnopen(%s, 0x%x, 0x%x)\n",
235 		    devtoname(dev), ap->a_oflags, ap->a_devtype);
236 
237 	/*
238 	 * Initialize label
239 	 */
240 
241 	IFOPT(vn, VN_LABELS) {
242 		if (vn->sc_flags & VNF_INITED) {
243 			info = &vn->sc_info;
244 			bzero(info, sizeof(*info));
245 			info->d_media_blksize = vn->sc_secsize;
246 			info->d_media_blocks = vn->sc_size;
247 			/*
248 			 * reserve mbr sector for backwards compatibility
249 			 * when no slices exist.
250 			 */
251 			info->d_dsflags = DSO_COMPATMBR;
252 
253 			info->d_secpertrack = 32;
254 			info->d_nheads = 64 / (vn->sc_secsize / DEV_BSIZE);
255 			info->d_secpercyl = info->d_secpertrack *
256 					    info->d_nheads;
257 			info->d_ncylinders = vn->sc_size / info->d_secpercyl;
258 
259 			return (dsopen(dev, ap->a_devtype, 0,
260 					&vn->sc_slices, info));
261 		}
262 		if (dkslice(dev) != WHOLE_DISK_SLICE ||
263 		    dkpart(dev) != WHOLE_SLICE_PART ||
264 		    ap->a_devtype != S_IFCHR) {
265 			return (ENXIO);
266 		}
267 	}
268 	return(0);
269 }
270 
271 /*
272  *	vnstrategy:
273  *
274  *	Run strategy routine for VN device.  We use VOP_READ/VOP_WRITE calls
275  *	for vnode-backed vn's, and the new vm_pager_strategy() call for
276  *	vm_object-backed vn's.
277  *
278  *	Currently B_ASYNC is only partially handled - for OBJT_SWAP I/O only.
279  */
280 static int
281 vnstrategy(struct dev_strategy_args *ap)
282 {
283 	cdev_t dev = ap->a_head.a_dev;
284 	struct bio *bio = ap->a_bio;
285 	struct buf *bp;
286 	struct bio *nbio;
287 	int unit;
288 	struct vn_softc *vn;
289 	int error;
290 
291 	unit = dkunit(dev);
292 	if ((vn = dev->si_drv1) == NULL)
293 		vn = vnfindvn(dev);
294 
295 	bp = bio->bio_buf;
296 
297 	IFOPT(vn, VN_DEBUG)
298 		kprintf("vnstrategy(%p): unit %d\n", bp, unit);
299 
300 	if ((vn->sc_flags & VNF_INITED) == 0) {
301 		bp->b_error = ENXIO;
302 		bp->b_flags |= B_ERROR;
303 		biodone(bio);
304 		return(0);
305 	}
306 
307 	bp->b_resid = bp->b_bcount;
308 
309 	IFOPT(vn, VN_LABELS) {
310 	    	/*
311 		 * The vnode device is using disk/slice label support.
312 		 *
313 		 * The dscheck() function is called for validating the
314 		 * slices that exist ON the vnode device itself, and
315 		 * translate the "slice-relative" block number, again.
316 		 * dscheck() will call biodone() and return NULL if
317 		 * we are at EOF or beyond the device size.
318 		 */
319 		if (vn->sc_slices == NULL) {
320 			nbio = bio;
321 		} else if ((nbio = dscheck(dev, bio, vn->sc_slices)) == NULL) {
322 			goto done;
323 		}
324 	} else {
325 		int64_t pbn;	/* in sc_secsize chunks */
326 		long sz;	/* in sc_secsize chunks */
327 
328 		/*
329 		 * Check for required alignment.  Transfers must be a valid
330 		 * multiple of the sector size.
331 		 */
332 		if (bp->b_bcount % vn->sc_secsize != 0 ||
333 		    bio->bio_offset % vn->sc_secsize != 0) {
334 			goto bad;
335 		}
336 
337 		pbn = bio->bio_offset / vn->sc_secsize;
338 		sz = howmany(bp->b_bcount, vn->sc_secsize);
339 
340 		/*
341 		 * Check for an illegal pbn or EOF truncation
342 		 */
343 		if (pbn < 0)
344 			goto bad;
345 		if (pbn + sz > vn->sc_size) {
346 			if (pbn > vn->sc_size || (bp->b_flags & B_BNOCLIP))
347 				goto bad;
348 			if (pbn == vn->sc_size) {
349 				bp->b_resid = bp->b_bcount;
350 				bp->b_flags |= B_INVAL;
351 				goto done;
352 			}
353 			bp->b_bcount = (vn->sc_size - pbn) * vn->sc_secsize;
354 		}
355 		nbio = push_bio(bio);
356 		nbio->bio_offset = pbn * vn->sc_secsize;
357 	}
358 
359 	/*
360 	 * Use the translated nbio from this point on
361 	 */
362 	if (vn->sc_vp && bp->b_cmd == BUF_CMD_FREEBLKS) {
363 		/*
364 		 * Freeblks is not handled for vnode-backed elements yet.
365 		 */
366 		bp->b_resid = 0;
367 		/* operation complete */
368 	} else if (vn->sc_vp) {
369 		/*
370 		 * VNODE I/O
371 		 *
372 		 * If an error occurs, we set B_ERROR but we do not set
373 		 * B_INVAL because (for a write anyway), the buffer is
374 		 * still valid.
375 		 */
376 		struct uio auio;
377 		struct iovec aiov;
378 
379 		bzero(&auio, sizeof(auio));
380 
381 		aiov.iov_base = bp->b_data;
382 		aiov.iov_len = bp->b_bcount;
383 		auio.uio_iov = &aiov;
384 		auio.uio_iovcnt = 1;
385 		auio.uio_offset = nbio->bio_offset;
386 		auio.uio_segflg = UIO_SYSSPACE;
387 		if (bp->b_cmd == BUF_CMD_READ)
388 			auio.uio_rw = UIO_READ;
389 		else
390 			auio.uio_rw = UIO_WRITE;
391 		auio.uio_resid = bp->b_bcount;
392 		auio.uio_td = curthread;
393 		vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY);
394 		if (bp->b_cmd == BUF_CMD_READ)
395 			error = VOP_READ(vn->sc_vp, &auio, IO_DIRECT | IO_RECURSE, vn->sc_cred);
396 		else
397 			error = VOP_WRITE(vn->sc_vp, &auio, IO_DIRECT | IO_RECURSE, vn->sc_cred);
398 		vn_unlock(vn->sc_vp);
399 		bp->b_resid = auio.uio_resid;
400 		if (error) {
401 			bp->b_error = error;
402 			bp->b_flags |= B_ERROR;
403 		}
404 		/* operation complete */
405 	} else if (vn->sc_object) {
406 		/*
407 		 * OBJT_SWAP I/O (handles read, write, freebuf)
408 		 *
409 		 * We have nothing to do if freeing  blocks on a reserved
410 		 * swap area, othrewise execute the op.
411 		 */
412 		if (bp->b_cmd == BUF_CMD_FREEBLKS && TESTOPT(vn, VN_RESERVE)) {
413 			bp->b_resid = 0;
414 			/* operation complete */
415 		} else {
416 			vm_pager_strategy(vn->sc_object, nbio);
417 			return(0);
418 			/* NOT REACHED */
419 		}
420 	} else {
421 		bp->b_resid = bp->b_bcount;
422 		bp->b_flags |= B_ERROR | B_INVAL;
423 		bp->b_error = EINVAL;
424 		/* operation complete */
425 	}
426 	biodone(nbio);
427 	return(0);
428 
429 	/*
430 	 * Shortcuts / check failures on the original bio (not nbio).
431 	 */
432 bad:
433 	bp->b_error = EINVAL;
434 	bp->b_flags |= B_ERROR | B_INVAL;
435 done:
436 	biodone(bio);
437 	return(0);
438 }
439 
440 /* ARGSUSED */
441 static	int
442 vnioctl(struct dev_ioctl_args *ap)
443 {
444 	cdev_t dev = ap->a_head.a_dev;
445 	struct vn_softc *vn;
446 	struct vn_ioctl *vio;
447 	int error;
448 	u_long *f;
449 
450 	vn = dev->si_drv1;
451 	IFOPT(vn,VN_FOLLOW) {
452 		kprintf("vnioctl(%s, 0x%lx, %p, 0x%x): unit %d\n",
453 		    devtoname(dev), ap->a_cmd, ap->a_data, ap->a_fflag,
454 		    dkunit(dev));
455 	}
456 
457 	switch (ap->a_cmd) {
458 	case VNIOCATTACH:
459 	case VNIOCDETACH:
460 	case VNIOCGSET:
461 	case VNIOCGCLEAR:
462 	case VNIOCGET:
463 	case VNIOCUSET:
464 	case VNIOCUCLEAR:
465 		goto vn_specific;
466 	}
467 
468 	IFOPT(vn,VN_LABELS) {
469 		if (vn->sc_slices != NULL) {
470 			error = dsioctl(dev, ap->a_cmd, ap->a_data,
471 					ap->a_fflag,
472 					&vn->sc_slices, &vn->sc_info);
473 			if (error != ENOIOCTL)
474 				return (error);
475 		}
476 		if (dkslice(dev) != WHOLE_DISK_SLICE ||
477 		    dkpart(dev) != WHOLE_SLICE_PART)
478 			return (ENOTTY);
479 	}
480 
481     vn_specific:
482 
483 	error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0);
484 	if (error)
485 		return (error);
486 
487 	vio = (struct vn_ioctl *)ap->a_data;
488 	f = (u_long*)ap->a_data;
489 
490 	switch (ap->a_cmd) {
491 	case VNIOCATTACH:
492 		if (vn->sc_flags & VNF_INITED)
493 			return(EBUSY);
494 
495 		if (vio->vn_file == NULL)
496 			error = vniocattach_swap(vn, vio, dev, ap->a_fflag, ap->a_cred);
497 		else
498 			error = vniocattach_file(vn, vio, dev, ap->a_fflag, ap->a_cred);
499 		break;
500 
501 	case VNIOCDETACH:
502 		if ((vn->sc_flags & VNF_INITED) == 0)
503 			return(ENXIO);
504 		/*
505 		 * XXX handle i/o in progress.  Return EBUSY, or wait, or
506 		 * flush the i/o.
507 		 * XXX handle multiple opens of the device.  Return EBUSY,
508 		 * or revoke the fd's.
509 		 * How are these problems handled for removable and failing
510 		 * hardware devices? (Hint: They are not)
511 		 */
512 		if (count_dev(vn->sc_devlist) > 1)
513 			return (EBUSY);
514 
515 		vnclear(vn);
516 		IFOPT(vn, VN_FOLLOW)
517 			kprintf("vnioctl: CLRed\n");
518 		break;
519 
520 	case VNIOCGET:
521 		error = vnget(dev, vn, (struct vn_user *) ap->a_data);
522 		break;
523 
524 	case VNIOCGSET:
525 		vn_options |= *f;
526 		*f = vn_options;
527 		break;
528 
529 	case VNIOCGCLEAR:
530 		vn_options &= ~(*f);
531 		*f = vn_options;
532 		break;
533 
534 	case VNIOCUSET:
535 		vn->sc_options |= *f;
536 		*f = vn->sc_options;
537 		break;
538 
539 	case VNIOCUCLEAR:
540 		vn->sc_options &= ~(*f);
541 		*f = vn->sc_options;
542 		break;
543 
544 	default:
545 		error = ENOTTY;
546 		break;
547 	}
548 	return(error);
549 }
550 
551 /*
552  *	vniocattach_file:
553  *
554  *	Attach a file to a VN partition.  Return the size in the vn_size
555  *	field.
556  */
557 
558 static int
559 vniocattach_file(struct vn_softc *vn, struct vn_ioctl *vio, cdev_t dev,
560 		 int flag, struct ucred *cred)
561 {
562 	struct vattr vattr;
563 	struct nlookupdata nd;
564 	int error, flags;
565 	struct vnode *vp;
566 
567 	flags = FREAD|FWRITE;
568 	error = nlookup_init(&nd, vio->vn_file,
569 				UIO_USERSPACE, NLC_FOLLOW|NLC_LOCKVP);
570 	if (error)
571 		return (error);
572 	if ((error = vn_open(&nd, NULL, flags, 0)) != 0) {
573 		if (error != EACCES && error != EPERM && error != EROFS)
574 			goto done;
575 		flags &= ~FWRITE;
576 		nlookup_done(&nd);
577 		error = nlookup_init(&nd, vio->vn_file, UIO_USERSPACE, NLC_FOLLOW|NLC_LOCKVP);
578 		if (error)
579 			return (error);
580 		if ((error = vn_open(&nd, NULL, flags, 0)) != 0)
581 			goto done;
582 	}
583 	vp = nd.nl_open_vp;
584 	if (vp->v_type != VREG ||
585 	    (error = VOP_GETATTR(vp, &vattr))) {
586 		if (error == 0)
587 			error = EINVAL;
588 		goto done;
589 	}
590 	vn_unlock(vp);
591 	vn->sc_secsize = DEV_BSIZE;
592 	vn->sc_vp = vp;
593 	nd.nl_open_vp = NULL;
594 
595 	/*
596 	 * If the size is specified, override the file attributes.  Note that
597 	 * the vn_size argument is in PAGE_SIZE sized blocks.
598 	 */
599 	if (vio->vn_size)
600 		vn->sc_size = vio->vn_size * PAGE_SIZE / vn->sc_secsize;
601 	else
602 		vn->sc_size = vattr.va_size / vn->sc_secsize;
603 	error = vnsetcred(vn, cred);
604 	if (error) {
605 		vn->sc_vp = NULL;
606 		vn_close(vp, flags);
607 		goto done;
608 	}
609 	vn->sc_flags |= VNF_INITED;
610 	if (flags == FREAD)
611 		vn->sc_flags |= VNF_READONLY;
612 	IFOPT(vn, VN_LABELS) {
613 		/*
614 		 * Reopen so that `ds' knows which devices are open.
615 		 * If this is the first VNIOCSET, then we've
616 		 * guaranteed that the device is the cdev and that
617 		 * no other slices or labels are open.  Otherwise,
618 		 * we rely on VNIOCCLR not being abused.
619 		 */
620 		error = dev_dopen(dev, flag, S_IFCHR, cred);
621 		if (error)
622 			vnclear(vn);
623 	}
624 	IFOPT(vn, VN_FOLLOW)
625 		kprintf("vnioctl: SET vp %p size %llx blks\n",
626 		       vn->sc_vp, vn->sc_size);
627 done:
628 	nlookup_done(&nd);
629 	return(error);
630 }
631 
632 /*
633  *	vniocattach_swap:
634  *
635  *	Attach swap backing store to a VN partition of the size specified
636  *	in vn_size.
637  */
638 
639 static int
640 vniocattach_swap(struct vn_softc *vn, struct vn_ioctl *vio, cdev_t dev,
641 		 int flag, struct ucred *cred)
642 {
643 	int error;
644 
645 	/*
646 	 * Range check.  Disallow negative sizes or any size less then the
647 	 * size of a page.  Then round to a page.
648 	 */
649 
650 	if (vio->vn_size <= 0)
651 		return(EDOM);
652 
653 	/*
654 	 * Allocate an OBJT_SWAP object.
655 	 *
656 	 * sc_secsize is PAGE_SIZE'd
657 	 *
658 	 * vio->vn_size is in PAGE_SIZE'd chunks.
659 	 * sc_size must be in PAGE_SIZE'd chunks.
660 	 * Note the truncation.
661 	 */
662 
663 	vn->sc_secsize = PAGE_SIZE;
664 	vn->sc_size = vio->vn_size;
665 	vn->sc_object = vm_pager_allocate(OBJT_SWAP, NULL,
666 					  vn->sc_secsize * (off_t)vio->vn_size,
667 					  VM_PROT_DEFAULT, 0);
668 	IFOPT(vn, VN_RESERVE) {
669 		if (swap_pager_reserve(vn->sc_object, 0, vn->sc_size) < 0) {
670 			vm_pager_deallocate(vn->sc_object);
671 			vn->sc_object = NULL;
672 			return(EDOM);
673 		}
674 	}
675 	vn->sc_flags |= VNF_INITED;
676 
677 	error = vnsetcred(vn, cred);
678 	if (error == 0) {
679 		IFOPT(vn, VN_LABELS) {
680 			/*
681 			 * Reopen so that `ds' knows which devices are open.
682 			 * If this is the first VNIOCSET, then we've
683 			 * guaranteed that the device is the cdev and that
684 			 * no other slices or labels are open.  Otherwise,
685 			 * we rely on VNIOCCLR not being abused.
686 			 */
687 			error = dev_dopen(dev, flag, S_IFCHR, cred);
688 		}
689 	}
690 	if (error == 0) {
691 		IFOPT(vn, VN_FOLLOW) {
692 			kprintf("vnioctl: SET vp %p size %llx\n",
693 			       vn->sc_vp, vn->sc_size);
694 		}
695 	}
696 	if (error)
697 		vnclear(vn);
698 	return(error);
699 }
700 
701 /*
702  * Duplicate the current processes' credentials.  Since we are called only
703  * as the result of a SET ioctl and only root can do that, any future access
704  * to this "disk" is essentially as root.  Note that credentials may change
705  * if some other uid can write directly to the mapped file (NFS).
706  */
707 int
708 vnsetcred(struct vn_softc *vn, struct ucred *cred)
709 {
710 	char *tmpbuf;
711 	int error = 0;
712 
713 	/*
714 	 * Set credits in our softc
715 	 */
716 
717 	if (vn->sc_cred)
718 		crfree(vn->sc_cred);
719 	vn->sc_cred = crdup(cred);
720 
721 	/*
722 	 * Horrible kludge to establish credentials for NFS  XXX.
723 	 */
724 
725 	if (vn->sc_vp) {
726 		struct uio auio;
727 		struct iovec aiov;
728 
729 		tmpbuf = kmalloc(vn->sc_secsize, M_TEMP, M_WAITOK);
730 		bzero(&auio, sizeof(auio));
731 
732 		aiov.iov_base = tmpbuf;
733 		aiov.iov_len = vn->sc_secsize;
734 		auio.uio_iov = &aiov;
735 		auio.uio_iovcnt = 1;
736 		auio.uio_offset = 0;
737 		auio.uio_rw = UIO_READ;
738 		auio.uio_segflg = UIO_SYSSPACE;
739 		auio.uio_resid = aiov.iov_len;
740 		vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY);
741 		error = VOP_READ(vn->sc_vp, &auio, 0, vn->sc_cred);
742 		vn_unlock(vn->sc_vp);
743 		kfree(tmpbuf, M_TEMP);
744 	}
745 	return (error);
746 }
747 
748 void
749 vnclear(struct vn_softc *vn)
750 {
751 	IFOPT(vn, VN_FOLLOW)
752 		kprintf("vnclear(%p): vp=%p\n", vn, vn->sc_vp);
753 	if (vn->sc_slices != NULL)
754 		dsgone(&vn->sc_slices);
755 	vn->sc_flags &= ~VNF_INITED;
756 	if (vn->sc_vp != NULL) {
757 		vn_close(vn->sc_vp,
758 		    (vn->sc_flags & VNF_READONLY) ?  FREAD : (FREAD|FWRITE));
759 		vn->sc_vp = NULL;
760 	}
761 	vn->sc_flags &= ~VNF_READONLY;
762 	if (vn->sc_cred) {
763 		crfree(vn->sc_cred);
764 		vn->sc_cred = NULL;
765 	}
766 	if (vn->sc_object != NULL) {
767 		vm_pager_deallocate(vn->sc_object);
768 		vn->sc_object = NULL;
769 	}
770 	vn->sc_size = 0;
771 }
772 
773 /*
774  * 	vnget:
775  *
776  *	populate a struct vn_user for the VNIOCGET ioctl.
777  *	interface conventions defined in sys/sys/vnioctl.h.
778  */
779 
780 static int
781 vnget(cdev_t dev, struct vn_softc *vn, struct vn_user *vnu)
782 {
783 	int error, found = 0;
784 	char *freepath, *fullpath;
785 	struct vattr vattr;
786 
787 	if (vnu->vnu_unit == -1) {
788 		vnu->vnu_unit = dkunit(dev);
789 	}
790 	else if (vnu->vnu_unit < 0)
791 		return (EINVAL);
792 
793 	SLIST_FOREACH(vn, &vn_list, sc_list) {
794 
795 		if(vn->sc_unit != vnu->vnu_unit)
796 			continue;
797 
798 		found = 1;
799 
800 		if (vn->sc_flags & VNF_INITED && vn->sc_vp != NULL) {
801 
802 			/* note: u_cred checked in vnioctl above */
803 			error = VOP_GETATTR(vn->sc_vp, &vattr);
804 			if (error) {
805 				kprintf("vnget: VOP_GETATTR for %p failed\n",
806 					vn->sc_vp);
807 				return (error);
808 			}
809 
810 			error = vn_fullpath(curproc, vn->sc_vp,
811 						&fullpath, &freepath);
812 
813 			if (error) {
814 				kprintf("vnget: unable to resolve vp %p\n",
815 					vn->sc_vp);
816 				return(error);
817 			}
818 
819 			strlcpy(vnu->vnu_file, fullpath,
820 				sizeof(vnu->vnu_file));
821 			kfree(freepath, M_TEMP);
822 			vnu->vnu_dev = vattr.va_fsid;
823 			vnu->vnu_ino = vattr.va_fileid;
824 
825 		}
826 		else if (vn->sc_flags & VNF_INITED && vn->sc_object != NULL){
827 
828 			strlcpy(vnu->vnu_file, _VN_USER_SWAP,
829 				sizeof(vnu->vnu_file));
830 			vnu->vnu_size = vn->sc_size;
831 			vnu->vnu_secsize = vn->sc_secsize;
832 
833 		} else {
834 
835 			bzero(vnu->vnu_file, sizeof(vnu->vnu_file));
836 			vnu->vnu_dev = 0;
837 			vnu->vnu_ino = 0;
838 
839 		}
840 		break;
841 	}
842 
843 	if (!found)
844 		return(ENXIO);
845 
846 	return(0);
847 }
848 
849 static int
850 vnsize(struct dev_psize_args *ap)
851 {
852 	cdev_t dev = ap->a_head.a_dev;
853 	struct vn_softc *vn;
854 
855 	vn = dev->si_drv1;
856 	if (!vn)
857 		return(ENXIO);
858 	if ((vn->sc_flags & VNF_INITED) == 0)
859 		return(ENXIO);
860 	ap->a_result = (int64_t)vn->sc_size;
861 	return(0);
862 }
863 
864 static int
865 vn_modevent(module_t mod, int type, void *data)
866 {
867 	struct vn_softc *vn;
868 	cdev_t dev;
869 
870 	switch (type) {
871 	case MOD_LOAD:
872 		dev_ops_add(&vn_ops, 0, 0);
873 		break;
874 	case MOD_UNLOAD:
875 		/* fall through */
876 	case MOD_SHUTDOWN:
877 		while ((vn = SLIST_FIRST(&vn_list)) != NULL) {
878 			SLIST_REMOVE_HEAD(&vn_list, sc_list);
879 			if (vn->sc_flags & VNF_INITED)
880 				vnclear(vn);
881 			/* Cleanup all cdev_t's that refer to this unit */
882 			while ((dev = vn->sc_devlist) != NULL) {
883 				vn->sc_devlist = dev->si_drv2;
884 				dev->si_drv1 = dev->si_drv2 = NULL;
885 				destroy_dev(dev);
886 			}
887 			kfree(vn, M_DEVBUF);
888 		}
889 		dev_ops_remove(&vn_ops, 0, 0);
890 		break;
891 	default:
892 		break;
893 	}
894 	return 0;
895 }
896 
897 DEV_MODULE(vn, vn_modevent, 0);
898