xref: /original-bsd/sys/miscfs/union/union_subr.c (revision 957f32cd)
1 /*
2  * Copyright (c) 1994 Jan-Simon Pendry
3  * Copyright (c) 1994
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)union_subr.c	2.1 (Berkeley) 02/10/94
12  */
13 
14 #include <sys/param.h>
15 #include <sys/systm.h>
16 #include <sys/time.h>
17 #include <sys/kernel.h>
18 #include <sys/vnode.h>
19 #include <sys/namei.h>
20 #include <sys/malloc.h>
21 #include <sys/file.h>
22 #include <sys/filedesc.h>
23 #include "union.h" /*<miscfs/union/union.h>*/
24 
25 #ifdef DIAGNOSTIC
26 #include <sys/proc.h>
27 #endif
28 
29 static struct union_node *unhead;
30 static int unvplock;
31 
32 int
33 union_init()
34 {
35 
36 	unhead = 0;
37 	unvplock = 0;
38 }
39 
40 static void
41 union_remlist(un)
42 	struct union_node *un;
43 {
44 	struct union_node **unpp;
45 
46 	for (unpp = &unhead; *unpp != 0; unpp = &(*unpp)->un_next) {
47 		if (*unpp == un) {
48 			*unpp = un->un_next;
49 			break;
50 		}
51 	}
52 }
53 
54 /*
55  * allocate a union_node/vnode pair.  the vnode is
56  * referenced and locked.  the new vnode is returned
57  * via (vpp).  (mp) is the mountpoint of the union filesystem,
58  * (dvp) is the parent directory where the upper layer object
59  * should exist (but doesn't) and (cnp) is the componentname
60  * information which is partially copied to allow the upper
61  * layer object to be created at a later time.  (uppervp)
62  * and (lowervp) reference the upper and lower layer objects
63  * being mapped.  either, but not both, can be nil.
64  * if supplied, (uppervp) is locked.
65  * the reference is either maintained in the new union_node
66  * object which is allocated, or they are vrele'd.
67  *
68  * all union_nodes are maintained on a singly-linked
69  * list.  new nodes are only allocated when they cannot
70  * be found on this list.  entries on the list are
71  * removed when the vfs reclaim entry is called.
72  *
73  * a single lock is kept for the entire list.  this is
74  * needed because the getnewvnode() function can block
75  * waiting for a vnode to become free, in which case there
76  * may be more than one process trying to get the same
77  * vnode.  this lock is only taken if we are going to
78  * call getnewvnode, since the kernel itself is single-threaded.
79  *
80  * if an entry is found on the list, then call vget() to
81  * take a reference.  this is done because there may be
82  * zero references to it and so it needs to removed from
83  * the vnode free list.
84  */
85 int
86 union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
87 	struct vnode **vpp;
88 	struct mount *mp;
89 	struct vnode *undvp;
90 	struct vnode *dvp;		/* may be null */
91 	struct componentname *cnp;	/* may be null */
92 	struct vnode *uppervp;		/* may be null */
93 	struct vnode *lowervp;		/* may be null */
94 {
95 	int error;
96 	struct union_node *un;
97 	struct union_node **pp;
98 	struct vnode *xlowervp = 0;
99 
100 	if (uppervp == 0 && lowervp == 0)
101 		panic("union: unidentifiable allocation");
102 
103 	if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) {
104 		xlowervp = lowervp;
105 		lowervp = 0;
106 	}
107 
108 loop:
109 	for (un = unhead; un != 0; un = un->un_next) {
110 		if ((un->un_lowervp == lowervp ||
111 		     un->un_lowervp == 0) &&
112 		    (un->un_uppervp == uppervp ||
113 		     un->un_uppervp == 0) &&
114 		    (UNIONTOV(un)->v_mount == mp)) {
115 			if (vget(UNIONTOV(un), 0))
116 				goto loop;
117 			break;
118 		}
119 	}
120 
121 	if (un) {
122 		/*
123 		 * Obtain a lock on the union_node.
124 		 * uppervp is locked, though un->un_uppervp
125 		 * may not be.  this doesn't break the locking
126 		 * hierarchy since in the case that un->un_uppervp
127 		 * is not yet locked it will be vrele'd and replaced
128 		 * with uppervp.
129 		 */
130 
131 		if ((dvp != NULLVP) && (uppervp == dvp)) {
132 			/*
133 			 * Access ``.'', so (un) will already
134 			 * be locked.  Since this process has
135 			 * the lock on (uppervp) no other
136 			 * process can hold the lock on (un).
137 			 */
138 #ifdef DIAGNOSTIC
139 			if ((un->un_flags & UN_LOCKED) == 0)
140 				panic("union: . not locked");
141 			else if (curproc && un->un_pid != curproc->p_pid &&
142 				    un->un_pid > -1 && curproc->p_pid > -1)
143 				panic("union: allocvp not lock owner");
144 #endif
145 		} else {
146 			if (un->un_flags & UN_LOCKED) {
147 				vrele(UNIONTOV(un));
148 				un->un_flags |= UN_WANT;
149 				sleep((caddr_t) &un->un_flags, PINOD);
150 				goto loop;
151 			}
152 			un->un_flags |= UN_LOCKED;
153 
154 #ifdef DIAGNOSTIC
155 			if (curproc)
156 				un->un_pid = curproc->p_pid;
157 			else
158 				un->un_pid = -1;
159 #endif
160 		}
161 
162 		/*
163 		 * At this point, the union_node is locked,
164 		 * un->un_uppervp may not be locked, and uppervp
165 		 * is locked or nil.
166 		 */
167 
168 		/*
169 		 * Save information about the upper layer.
170 		 */
171 		if (uppervp != un->un_uppervp) {
172 			if (un->un_uppervp)
173 				vrele(un->un_uppervp);
174 			un->un_uppervp = uppervp;
175 		} else if (uppervp) {
176 			vrele(uppervp);
177 		}
178 
179 		if (un->un_uppervp) {
180 			un->un_flags |= UN_ULOCK;
181 			un->un_flags &= ~UN_KLOCK;
182 		}
183 
184 		/*
185 		 * Save information about the lower layer.
186 		 * This needs to keep track of pathname
187 		 * and directory information which union_vn_create
188 		 * might need.
189 		 */
190 		if (lowervp != un->un_lowervp) {
191 			if (un->un_lowervp) {
192 				vrele(un->un_lowervp);
193 				free(un->un_path, M_TEMP);
194 				vrele(un->un_dirvp);
195 			}
196 			un->un_lowervp = lowervp;
197 			if (cnp && (lowervp != NULLVP) &&
198 			    (lowervp->v_type == VREG)) {
199 				un->un_hash = cnp->cn_hash;
200 				un->un_path = malloc(cnp->cn_namelen+1,
201 						M_TEMP, M_WAITOK);
202 				bcopy(cnp->cn_nameptr, un->un_path,
203 						cnp->cn_namelen);
204 				un->un_path[cnp->cn_namelen] = '\0';
205 				VREF(dvp);
206 				un->un_dirvp = dvp;
207 			}
208 		} else if (lowervp) {
209 			vrele(lowervp);
210 		}
211 		*vpp = UNIONTOV(un);
212 		return (0);
213 	}
214 
215 	/*
216 	 * otherwise lock the vp list while we call getnewvnode
217 	 * since that can block.
218 	 */
219 	if (unvplock & UN_LOCKED) {
220 		unvplock |= UN_WANT;
221 		sleep((caddr_t) &unvplock, PINOD);
222 		goto loop;
223 	}
224 	unvplock |= UN_LOCKED;
225 
226 	error = getnewvnode(VT_UNION, mp, union_vnodeop_p, vpp);
227 	if (error) {
228 		if (uppervp) {
229 			if (dvp == uppervp)
230 				vrele(uppervp);
231 			else
232 				vput(uppervp);
233 		}
234 		if (lowervp)
235 			vrele(lowervp);
236 
237 		goto out;
238 	}
239 
240 	MALLOC((*vpp)->v_data, void *, sizeof(struct union_node),
241 		M_TEMP, M_WAITOK);
242 
243 	if (uppervp)
244 		(*vpp)->v_type = uppervp->v_type;
245 	else
246 		(*vpp)->v_type = lowervp->v_type;
247 	un = VTOUNION(*vpp);
248 	un->un_vnode = *vpp;
249 	un->un_next = 0;
250 	un->un_uppervp = uppervp;
251 	un->un_lowervp = lowervp;
252 	un->un_openl = 0;
253 	un->un_flags = UN_LOCKED;
254 	if (un->un_uppervp)
255 		un->un_flags |= UN_ULOCK;
256 #ifdef DIAGNOSTIC
257 	if (curproc)
258 		un->un_pid = curproc->p_pid;
259 	else
260 		un->un_pid = -1;
261 #endif
262 	if (cnp && (lowervp != NULLVP) && (lowervp->v_type == VREG)) {
263 		un->un_hash = cnp->cn_hash;
264 		un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK);
265 		bcopy(cnp->cn_nameptr, un->un_path, cnp->cn_namelen);
266 		un->un_path[cnp->cn_namelen] = '\0';
267 		VREF(dvp);
268 		un->un_dirvp = dvp;
269 	} else {
270 		un->un_hash = 0;
271 		un->un_path = 0;
272 		un->un_dirvp = 0;
273 	}
274 
275 	/* add to union vnode list */
276 	for (pp = &unhead; *pp; pp = &(*pp)->un_next)
277 		continue;
278 	*pp = un;
279 
280 	if (xlowervp)
281 		vrele(xlowervp);
282 
283 out:
284 	unvplock &= ~UN_LOCKED;
285 
286 	if (unvplock & UN_WANT) {
287 		unvplock &= ~UN_WANT;
288 		wakeup((caddr_t) &unvplock);
289 	}
290 
291 	return (error);
292 }
293 
294 int
295 union_freevp(vp)
296 	struct vnode *vp;
297 {
298 	struct union_node *un = VTOUNION(vp);
299 
300 	union_remlist(un);
301 
302 	FREE(vp->v_data, M_TEMP);
303 	vp->v_data = 0;
304 	return (0);
305 }
306 
307 /*
308  * copyfile.  copy the vnode (fvp) to the vnode (tvp)
309  * using a sequence of reads and writes.  both (fvp)
310  * and (tvp) are locked on entry and exit.
311  */
312 int
313 union_copyfile(p, cred, fvp, tvp)
314 	struct proc *p;
315 	struct ucred *cred;
316 	struct vnode *fvp;
317 	struct vnode *tvp;
318 {
319 	char *buf;
320 	struct uio uio;
321 	struct iovec iov;
322 	int error = 0;
323 
324 	/*
325 	 * strategy:
326 	 * allocate a buffer of size MAXBSIZE.
327 	 * loop doing reads and writes, keeping track
328 	 * of the current uio offset.
329 	 * give up at the first sign of trouble.
330 	 */
331 
332 	uio.uio_procp = p;
333 	uio.uio_segflg = UIO_SYSSPACE;
334 	uio.uio_offset = 0;
335 
336 	VOP_UNLOCK(fvp);				/* XXX */
337 	LEASE_CHECK(fvp, p, cred, LEASE_READ);
338 	VOP_LOCK(fvp);					/* XXX */
339 	VOP_UNLOCK(tvp);				/* XXX */
340 	LEASE_CHECK(tvp, p, cred, LEASE_WRITE);
341 	VOP_LOCK(tvp);					/* XXX */
342 
343 	buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
344 
345 	/* ugly loop follows... */
346 	do {
347 		off_t offset = uio.uio_offset;
348 
349 		uio.uio_iov = &iov;
350 		uio.uio_iovcnt = 1;
351 		iov.iov_base = buf;
352 		iov.iov_len = MAXBSIZE;
353 		uio.uio_resid = iov.iov_len;
354 		uio.uio_rw = UIO_READ;
355 		error = VOP_READ(fvp, &uio, 0, cred);
356 
357 		if (error == 0) {
358 			uio.uio_iov = &iov;
359 			uio.uio_iovcnt = 1;
360 			iov.iov_base = buf;
361 			iov.iov_len = MAXBSIZE - uio.uio_resid;
362 			uio.uio_offset = offset;
363 			uio.uio_rw = UIO_WRITE;
364 			uio.uio_resid = iov.iov_len;
365 
366 			if (uio.uio_resid == 0)
367 				break;
368 
369 			do {
370 				error = VOP_WRITE(tvp, &uio, 0, cred);
371 			} while ((uio.uio_resid > 0) && (error == 0));
372 		}
373 
374 	} while (error == 0);
375 
376 	free(buf, M_TEMP);
377 	return (error);
378 }
379 
380 /*
381  * Create a shadow directory in the upper layer.
382  * The new vnode is returned locked.
383  *
384  * (um) points to the union mount structure for access to the
385  * the mounting process's credentials.
386  * (dvp) is the directory in which to create the shadow directory.
387  * it is unlocked on entry and exit.
388  * (cnp) is the componentname to be created.
389  * (vpp) is the returned newly created shadow directory, which
390  * is returned locked.
391  */
392 int
393 union_mkshadow(um, dvp, cnp, vpp)
394 	struct union_mount *um;
395 	struct vnode *dvp;
396 	struct componentname *cnp;
397 	struct vnode **vpp;
398 {
399 	int error;
400 	struct vattr va;
401 	struct proc *p = cnp->cn_proc;
402 	struct componentname cn;
403 
404 	/*
405 	 * policy: when creating the shadow directory in the
406 	 * upper layer, create it owned by the user who did
407 	 * the mount, group from parent directory, and mode
408 	 * 777 modified by umask (ie mostly identical to the
409 	 * mkdir syscall).  (jsp, kb)
410 	 */
411 
412 	/*
413 	 * A new componentname structure must be faked up because
414 	 * there is no way to know where the upper level cnp came
415 	 * from or what it is being used for.  This must duplicate
416 	 * some of the work done by NDINIT, some of the work done
417 	 * by namei, some of the work done by lookup and some of
418 	 * the work done by VOP_LOOKUP when given a CREATE flag.
419 	 * Conclusion: Horrible.
420 	 *
421 	 * The pathname buffer will be FREEed by VOP_MKDIR.
422 	 */
423 	cn.cn_pnbuf = malloc(cnp->cn_namelen+1, M_NAMEI, M_WAITOK);
424 	bcopy(cnp->cn_nameptr, cn.cn_pnbuf, cnp->cn_namelen);
425 	cn.cn_pnbuf[cnp->cn_namelen] = '\0';
426 
427 	cn.cn_nameiop = CREATE;
428 	cn.cn_flags = (LOCKPARENT|HASBUF|SAVENAME|ISLASTCN);
429 	cn.cn_proc = cnp->cn_proc;
430 	cn.cn_cred = um->um_cred;
431 	cn.cn_nameptr = cn.cn_pnbuf;
432 	cn.cn_namelen = cnp->cn_namelen;
433 	cn.cn_hash = cnp->cn_hash;
434 	cn.cn_consume = cnp->cn_consume;
435 
436 	VREF(dvp);
437 	if (error = relookup(dvp, vpp, &cn))
438 		return (error);
439 	vrele(dvp);
440 
441 	if (*vpp) {
442 		VOP_ABORTOP(dvp, &cn);
443 		VOP_UNLOCK(dvp);
444 		vrele(*vpp);
445 		*vpp = NULLVP;
446 		return (EEXIST);
447 	}
448 
449 	VATTR_NULL(&va);
450 	va.va_type = VDIR;
451 	va.va_mode = um->um_cmode;
452 
453 	/* LEASE_CHECK: dvp is locked */
454 	LEASE_CHECK(dvp, p, p->p_ucred, LEASE_WRITE);
455 
456 	VREF(dvp);
457 	error = VOP_MKDIR(dvp, vpp, &cn, &va);
458 	return (error);
459 }
460 
461 /*
462  * union_vn_create: creates and opens a new shadow file
463  * on the upper union layer.  this function is similar
464  * in spirit to calling vn_open but it avoids calling namei().
465  * the problem with calling namei is that a) it locks too many
466  * things, and b) it doesn't start at the "right" directory,
467  * whereas relookup is told where to start.
468  */
469 int
470 union_vn_create(vpp, un, p)
471 	struct vnode **vpp;
472 	struct union_node *un;
473 	struct proc *p;
474 {
475 	struct vnode *vp;
476 	struct ucred *cred = p->p_ucred;
477 	struct vattr vat;
478 	struct vattr *vap = &vat;
479 	int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL);
480 	int error;
481 	int cmode = UN_FILEMODE & ~p->p_fd->fd_cmask;
482 	char *cp;
483 	struct componentname cn;
484 
485 	*vpp = NULLVP;
486 
487 	/*
488 	 * Build a new componentname structure (for the same
489 	 * reasons outlines in union_mkshadow).
490 	 * The difference here is that the file is owned by
491 	 * the current user, rather than by the person who
492 	 * did the mount, since the current user needs to be
493 	 * able to write the file (that's why it is being
494 	 * copied in the first place).
495 	 */
496 	cn.cn_namelen = strlen(un->un_path);
497 	cn.cn_pnbuf = (caddr_t) malloc(cn.cn_namelen, M_NAMEI, M_WAITOK);
498 	bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1);
499 	cn.cn_nameiop = CREATE;
500 	cn.cn_flags = (LOCKLEAF|LOCKPARENT|HASBUF|SAVENAME|ISLASTCN);
501 	cn.cn_proc = p;
502 	cn.cn_cred = p->p_ucred;
503 	cn.cn_nameptr = cn.cn_pnbuf;
504 	cn.cn_hash = un->un_hash;
505 	cn.cn_consume = 0;
506 
507 	VREF(un->un_dirvp);
508 	if (error = relookup(un->un_dirvp, &vp, &cn))
509 		return (error);
510 	vrele(un->un_dirvp);
511 
512 	if (vp == NULLVP) {
513 		/*
514 		 * Good - there was no race to create the file
515 		 * so go ahead and create it.  The permissions
516 		 * on the file will be 0666 modified by the
517 		 * current user's umask.  Access to the file, while
518 		 * it is unioned, will require access to the top *and*
519 		 * bottom files.  Access when not unioned will simply
520 		 * require access to the top-level file.
521 		 * TODO: confirm choice of access permissions.
522 		 */
523 		VATTR_NULL(vap);
524 		vap->va_type = VREG;
525 		vap->va_mode = cmode;
526 		LEASE_CHECK(un->un_dirvp, p, cred, LEASE_WRITE);
527 		if (error = VOP_CREATE(un->un_dirvp, &vp,
528 		    &cn, vap))
529 			return (error);
530 	} else {
531 		VOP_ABORTOP(un->un_dirvp, &cn);
532 		if (un->un_dirvp == vp)
533 			vrele(un->un_dirvp);
534 		else
535 			vput(vp);
536 		error = EEXIST;
537 		goto bad;
538 	}
539 
540 	if (vp->v_type != VREG) {
541 		error = EOPNOTSUPP;
542 		goto bad;
543 	}
544 
545 	VOP_UNLOCK(vp);				/* XXX */
546 	LEASE_CHECK(vp, p, cred, LEASE_WRITE);
547 	VOP_LOCK(vp);				/* XXX */
548 	VATTR_NULL(vap);
549 	vap->va_size = 0;
550 	if (error = VOP_SETATTR(vp, vap, cred, p))
551 		goto bad;
552 
553 	if (error = VOP_OPEN(vp, fmode, cred, p))
554 		goto bad;
555 
556 	vp->v_writecount++;
557 	*vpp = vp;
558 	return (0);
559 bad:
560 	vput(vp);
561 	return (error);
562 }
563 
564 int
565 union_vn_close(vp, fmode, cred, p)
566 	struct vnode *vp;
567 	int fmode;
568 	struct ucred *cred;
569 	struct proc *p;
570 {
571 	if (fmode & FWRITE)
572 		--vp->v_writecount;
573 	return (VOP_CLOSE(vp, fmode));
574 }
575 
576 void
577 union_removed_upper(un)
578 	struct union_node *un;
579 {
580 	if (un->un_flags & UN_ULOCK) {
581 		un->un_flags &= ~UN_ULOCK;
582 		vput(un->un_uppervp);
583 	} else {
584 		vrele(un->un_uppervp);
585 	}
586 	un->un_uppervp = NULLVP;
587 }
588 
589 struct vnode *
590 union_lowervp(vp)
591 	struct vnode *vp;
592 {
593 	struct union_node *un = VTOUNION(vp);
594 
595 	if (un->un_lowervp && (vp->v_type == un->un_lowervp->v_type)) {
596 		if (vget(un->un_lowervp, 0))
597 			return (NULLVP);
598 	}
599 
600 	return (un->un_lowervp);
601 }
602