xref: /original-bsd/sys/miscfs/union/union_subr.c (revision ab1360c4)
1 /*
2  * Copyright (c) 1994 Jan-Simon Pendry
3  * Copyright (c) 1994
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)union_subr.c	1.6 (Berkeley) 02/04/94
12  */
13 
14 #include <sys/param.h>
15 #include <sys/systm.h>
16 #include <sys/time.h>
17 #include <sys/kernel.h>
18 #include <sys/vnode.h>
19 #include <sys/namei.h>
20 #include <sys/malloc.h>
21 #include <sys/file.h>
22 #include <sys/filedesc.h>
23 #include "union.h" /*<miscfs/union/union.h>*/
24 
25 #ifdef DIAGNOSTIC
26 #include <sys/proc.h>
27 #endif
28 
29 static struct union_node *unhead;
30 static int unvplock;
31 
32 int
33 union_init()
34 {
35 
36 	unhead = 0;
37 	unvplock = 0;
38 }
39 
40 /*
41  * allocate a union_node/vnode pair.  the vnode is
42  * referenced and locked.  the new vnode is returned
43  * via (vpp).  (mp) is the mountpoint of the union filesystem,
44  * (dvp) is the parent directory where the upper layer object
45  * should exist (but doesn't) and (cnp) is the componentname
46  * information which is partially copied to allow the upper
47  * layer object to be created at a later time.  (uppervp)
48  * and (lowervp) reference the upper and lower layer objects
49  * being mapped.  either, but not both, can be nil.
50  * the reference is either maintained in the new union_node
51  * object which is allocated, or they are vrele'd.
52  *
53  * all union_nodes are maintained on a singly-linked
54  * list.  new nodes are only allocated when they cannot
55  * be found on this list.  entries on the list are
56  * removed when the vfs reclaim entry is called.
57  *
58  * a single lock is kept for the entire list.  this is
59  * needed because the getnewvnode() function can block
60  * waiting for a vnode to become free, in which case there
61  * may be more than one process trying to get the same
62  * vnode.  this lock is only taken if we are going to
63  * call getnewvnode, since the kernel itself is single-threaded.
64  *
65  * if an entry is found on the list, then call vget() to
66  * take a reference.  this is done because there may be
67  * zero references to it and so it needs to removed from
68  * the vnode free list.
69  */
70 int
71 union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
72 	struct vnode **vpp;
73 	struct mount *mp;
74 	struct vnode *undvp;
75 	struct vnode *dvp;		/* may be null */
76 	struct componentname *cnp;	/* may be null */
77 	struct vnode *uppervp;		/* may be null */
78 	struct vnode *lowervp;		/* may be null */
79 {
80 	int error;
81 	struct union_node *un;
82 	struct union_node **pp;
83 	struct vnode *xlowervp = 0;
84 
85 	if (uppervp == 0 && lowervp == 0)
86 		panic("union: unidentifiable allocation");
87 
88 	if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) {
89 		xlowervp = lowervp;
90 		lowervp = 0;
91 	}
92 
93 loop:
94 	for (un = unhead; un != 0; un = un->un_next) {
95 		if ((un->un_lowervp == lowervp ||
96 		     un->un_lowervp == 0) &&
97 		    (un->un_uppervp == uppervp ||
98 		     un->un_uppervp == 0) &&
99 		    (UNIONTOV(un)->v_mount == mp)) {
100 			if (vget(UNIONTOV(un), 0))
101 				goto loop;
102 			if (UNIONTOV(un) != undvp)
103 				VOP_LOCK(UNIONTOV(un));
104 			if (uppervp != un->un_uppervp) {
105 				if (un->un_uppervp)
106 					vrele(un->un_uppervp);
107 				un->un_uppervp = uppervp;
108 			} else if (uppervp) {
109 				vrele(uppervp);
110 			}
111 			if (lowervp != un->un_lowervp) {
112 				if (un->un_lowervp)
113 					vrele(un->un_lowervp);
114 				un->un_lowervp = lowervp;
115 			} else if (lowervp) {
116 				vrele(lowervp);
117 			}
118 			*vpp = UNIONTOV(un);
119 			return (0);
120 		}
121 	}
122 
123 	/*
124 	 * otherwise lock the vp list while we call getnewvnode
125 	 * since that can block.
126 	 */
127 	if (unvplock & UN_LOCKED) {
128 		unvplock |= UN_WANT;
129 		sleep((caddr_t) &unvplock, PINOD);
130 		goto loop;
131 	}
132 	unvplock |= UN_LOCKED;
133 
134 	error = getnewvnode(VT_UNION, mp, union_vnodeop_p, vpp);
135 	if (error)
136 		goto out;
137 
138 	MALLOC((*vpp)->v_data, void *, sizeof(struct union_node),
139 		M_TEMP, M_WAITOK);
140 
141 	if (uppervp)
142 		(*vpp)->v_type = uppervp->v_type;
143 	else
144 		(*vpp)->v_type = lowervp->v_type;
145 	un = VTOUNION(*vpp);
146 	un->un_vnode = *vpp;
147 	un->un_next = 0;
148 	un->un_uppervp = uppervp;
149 	un->un_lowervp = lowervp;
150 	un->un_open = 0;
151 	un->un_flags = 0;
152 	if (uppervp == 0 && cnp) {
153 		un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK);
154 		bcopy(cnp->cn_nameptr, un->un_path, cnp->cn_namelen);
155 		un->un_path[cnp->cn_namelen] = '\0';
156 		VREF(dvp);
157 		un->un_dirvp = dvp;
158 	} else {
159 		un->un_path = 0;
160 		un->un_dirvp = 0;
161 	}
162 
163 	/* add to union vnode list */
164 	for (pp = &unhead; *pp; pp = &(*pp)->un_next)
165 		continue;
166 	*pp = un;
167 
168 	un->un_flags |= UN_LOCKED;
169 
170 #ifdef DIAGNOSTIC
171 	un->un_pid = curproc->p_pid;
172 #endif
173 
174 	if (xlowervp)
175 		vrele(xlowervp);
176 
177 out:
178 	unvplock &= ~UN_LOCKED;
179 
180 	if (unvplock & UN_WANT) {
181 		unvplock &= ~UN_WANT;
182 		wakeup((caddr_t) &unvplock);
183 	}
184 
185 	return (error);
186 }
187 
188 int
189 union_freevp(vp)
190 	struct vnode *vp;
191 {
192 	struct union_node **unpp;
193 	struct union_node *un = VTOUNION(vp);
194 
195 	for (unpp = &unhead; *unpp != 0; unpp = &(*unpp)->un_next) {
196 		if (*unpp == un) {
197 			*unpp = un->un_next;
198 			break;
199 		}
200 	}
201 
202 	FREE(vp->v_data, M_TEMP);
203 	vp->v_data = 0;
204 	return (0);
205 }
206 
207 /*
208  * copyfile.  copy the vnode (fvp) to the vnode (tvp)
209  * using a sequence of reads and writes.  both (fvp)
210  * and (tvp) are locked on entry and exit.
211  */
212 int
213 union_copyfile(p, cred, fvp, tvp)
214 	struct proc *p;
215 	struct ucred *cred;
216 	struct vnode *fvp;
217 	struct vnode *tvp;
218 {
219 	char *buf;
220 	struct uio uio;
221 	struct iovec iov;
222 	int error = 0;
223 
224 	/*
225 	 * strategy:
226 	 * allocate a buffer of size MAXBSIZE.
227 	 * loop doing reads and writes, keeping track
228 	 * of the current uio offset.
229 	 * give up at the first sign of trouble.
230 	 */
231 
232 	uio.uio_procp = p;
233 	uio.uio_segflg = UIO_SYSSPACE;
234 	uio.uio_offset = 0;
235 
236 	VOP_UNLOCK(fvp);				/* XXX */
237 	LEASE_CHECK(fvp, p, cred, LEASE_READ);
238 	VOP_LOCK(fvp);					/* XXX */
239 	VOP_UNLOCK(tvp);				/* XXX */
240 	LEASE_CHECK(tvp, p, cred, LEASE_WRITE);
241 	VOP_LOCK(tvp);					/* XXX */
242 
243 	buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
244 
245 	/* ugly loop follows... */
246 	do {
247 		off_t offset = uio.uio_offset;
248 
249 		uio.uio_iov = &iov;
250 		uio.uio_iovcnt = 1;
251 		iov.iov_base = buf;
252 		iov.iov_len = MAXBSIZE;
253 		uio.uio_resid = iov.iov_len;
254 		uio.uio_rw = UIO_READ;
255 		error = VOP_READ(fvp, &uio, 0, cred);
256 
257 		if (error == 0) {
258 			uio.uio_iov = &iov;
259 			uio.uio_iovcnt = 1;
260 			iov.iov_base = buf;
261 			iov.iov_len = MAXBSIZE - uio.uio_resid;
262 			uio.uio_offset = offset;
263 			uio.uio_rw = UIO_WRITE;
264 			uio.uio_resid = iov.iov_len;
265 
266 			if (uio.uio_resid == 0)
267 				break;
268 
269 			do {
270 				error = VOP_WRITE(tvp, &uio, 0, cred);
271 			} while ((uio.uio_resid > 0) && (error == 0));
272 		}
273 
274 	} while (error == 0);
275 
276 	free(buf, M_TEMP);
277 	return (error);
278 }
279 
280 /*
281  * Create a shadow directory in the upper layer.
282  * The new vnode is returned locked.
283  *
284  * (um) points to the union mount structure for access to the
285  * the mounting process's credentials.
286  * (dvp) is the directory in which to create the shadow directory.
287  * it is unlocked on entry and exit.
288  * (cnp) is the componentname to be created.
289  * (vpp) is the returned newly created shadow directory, which
290  * is returned locked.
291  */
292 int
293 union_mkshadow(um, dvp, cnp, vpp)
294 	struct union_mount *um;
295 	struct vnode *dvp;
296 	struct componentname *cnp;
297 	struct vnode **vpp;
298 {
299 	int error;
300 	struct vattr va;
301 	struct proc *p = cnp->cn_proc;
302 	struct componentname cn;
303 
304 	/*
305 	 * policy: when creating the shadow directory in the
306 	 * upper layer, create it owned by the current user,
307 	 * group from parent directory, and mode 777 modified
308 	 * by umask (ie mostly identical to the mkdir syscall).
309 	 * (jsp, kb)
310 	 * TODO: create the directory owned by the user who
311 	 * did the mount (um->um_cred).
312 	 */
313 
314 	/*
315 	 * A new componentname structure must be faked up because
316 	 * there is no way to know where the upper level cnp came
317 	 * from or what it is being used for.  This must duplicate
318 	 * some of the work done by NDINIT, some of the work done
319 	 * by namei, some of the work done by lookup and some of
320 	 * the work done by VOP_LOOKUP when given a CREATE flag.
321 	 * Conclusion: Horrible.
322 	 *
323 	 * The pathname buffer will be FREEed by VOP_MKDIR.
324 	 */
325 	cn.cn_pnbuf = malloc(cnp->cn_namelen+1, M_NAMEI, M_WAITOK);
326 	bcopy(cnp->cn_nameptr, cn.cn_pnbuf, cnp->cn_namelen+1);
327 
328 	cn.cn_nameiop = CREATE;
329 	cn.cn_flags = (LOCKPARENT|HASBUF|SAVENAME|ISLASTCN);
330 	cn.cn_proc = cnp->cn_proc;
331 	cn.cn_cred = cnp->cn_cred;
332 	cn.cn_nameptr = cn.cn_pnbuf;
333 	cn.cn_namelen = cnp->cn_namelen;
334 	cn.cn_hash = cnp->cn_hash;
335 	cn.cn_consume = cnp->cn_consume;
336 
337 	if (error = relookup(dvp, vpp, &cn))
338 		return (error);
339 
340 	if (*vpp) {
341 		VOP_ABORTOP(dvp, &cn);
342 		VOP_UNLOCK(dvp);
343 		vrele(*vpp);
344 		*vpp = NULLVP;
345 		return (EEXIST);
346 	}
347 
348 	VATTR_NULL(&va);
349 	va.va_type = VDIR;
350 	va.va_mode = UN_DIRMODE &~ p->p_fd->fd_cmask;
351 
352 	/* LEASE_CHECK: dvp is locked */
353 	LEASE_CHECK(dvp, p, p->p_ucred, LEASE_WRITE);
354 
355 	VREF(dvp);
356 	error = VOP_MKDIR(dvp, vpp, &cn, &va);
357 	return (error);
358 }
359 
360 /*
361  * union_vn_create: creates and opens a new shadow file
362  * on the upper union layer.  this function is similar
363  * in spirit to calling vn_open but it avoids calling namei().
364  * the problem with calling namei is that a) it locks too many
365  * things, and b) it doesn't start at the "right" directory,
366  * whereas relookup is told where to start.
367  */
368 int
369 union_vn_create(vpp, un, p)
370 	struct vnode **vpp;
371 	struct union_node *un;
372 	struct proc *p;
373 {
374 	struct vnode *vp;
375 	struct ucred *cred = p->p_ucred;
376 	struct vattr vat;
377 	struct vattr *vap = &vat;
378 	int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL);
379 	int error;
380 	int hash;
381 	int cmode = UN_FILEMODE &~ p->p_fd->fd_cmask;
382 	char *cp;
383 	struct componentname cn;
384 
385 	*vpp = NULLVP;
386 
387 	cn.cn_namelen = strlen(un->un_path);
388 	cn.cn_pnbuf = (caddr_t) malloc(cn.cn_namelen, M_NAMEI, M_WAITOK);
389 	bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1);
390 	cn.cn_nameiop = CREATE;
391 	cn.cn_flags = (LOCKLEAF|LOCKPARENT|HASBUF|SAVENAME|ISLASTCN);
392 	cn.cn_proc = p;
393 	cn.cn_cred = p->p_ucred;
394 	cn.cn_nameptr = cn.cn_pnbuf;
395 	for (hash = 0, cp = cn.cn_nameptr; *cp != 0 && *cp != '/'; cp++)
396 		hash += (unsigned char)*cp;
397 	cn.cn_hash = hash;
398 	cn.cn_consume = 0;
399 
400 	if (error = relookup(un->un_dirvp, &vp, &cn))
401 		return (error);
402 	if (vp == NULLVP) {
403 		VATTR_NULL(vap);
404 		vap->va_type = VREG;
405 		vap->va_mode = cmode;
406 		LEASE_CHECK(un->un_dirvp, p, cred, LEASE_WRITE);
407 		if (error = VOP_CREATE(un->un_dirvp, &vp,
408 		    &cn, vap))
409 			return (error);
410 	} else {
411 		VOP_ABORTOP(un->un_dirvp, &cn);
412 		if (un->un_dirvp == vp)
413 			vrele(un->un_dirvp);
414 		else
415 			vput(vp);
416 		error = EEXIST;
417 		goto bad;
418 	}
419 
420 	if (vp->v_type != VREG) {
421 		error = EOPNOTSUPP;
422 		goto bad;
423 	}
424 
425 	VOP_UNLOCK(vp);				/* XXX */
426 	LEASE_CHECK(vp, p, cred, LEASE_WRITE);
427 	VOP_LOCK(vp);				/* XXX */
428 	VATTR_NULL(vap);
429 	vap->va_size = 0;
430 	if (error = VOP_SETATTR(vp, vap, cred, p))
431 		goto bad;
432 
433 	if (error = VOP_OPEN(vp, fmode, cred, p))
434 		goto bad;
435 
436 	vp->v_writecount++;
437 	*vpp = vp;
438 	return (0);
439 bad:
440 	vput(vp);
441 	return (error);
442 }
443