xref: /original-bsd/sys/miscfs/union/union_subr.c (revision 9979a570)
1 /*
2  * Copyright (c) 1994 Jan-Simon Pendry
3  * Copyright (c) 1994
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)union_subr.c	1.5 (Berkeley) 02/03/94
12  */
13 
14 #include <sys/param.h>
15 #include <sys/systm.h>
16 #include <sys/time.h>
17 #include <sys/kernel.h>
18 #include <sys/vnode.h>
19 #include <sys/namei.h>
20 #include <sys/malloc.h>
21 #include <sys/file.h>
22 #include "union.h" /*<miscfs/union/union.h>*/
23 
24 #ifdef DIAGNOSTIC
25 #include <sys/proc.h>
26 #endif
27 
28 static struct union_node *unhead;
29 static int unvplock;
30 
31 int
32 union_init()
33 {
34 
35 	unhead = 0;
36 	unvplock = 0;
37 }
38 
39 /*
40  * allocate a union_node/vnode pair.  the vnode is
41  * referenced and locked.  the new vnode is returned
42  * via (vpp).  (mp) is the mountpoint of the union filesystem,
43  * (dvp) is the parent directory where the upper layer object
44  * should exist (but doesn't) and (cnp) is the componentname
45  * information which is partially copied to allow the upper
46  * layer object to be created at a later time.  (uppervp)
47  * and (lowervp) reference the upper and lower layer objects
48  * being mapped.  either, but not both, can be nil.
49  *
50  * all union_nodes are maintained on a singly-linked
51  * list.  new nodes are only allocated when they cannot
52  * be found on this list.  entries on the list are
53  * removed when the vfs reclaim entry is called.
54  *
55  * a single lock is kept for the entire list.  this is
56  * needed because the getnewvnode() function can block
57  * waiting for a vnode to become free, in which case there
58  * may be more than one process trying to get the same
59  * vnode.  this lock is only taken if we are going to
60  * call getnewvnode, since the kernel itself is single-threaded.
61  *
62  * if an entry is found on the list, then call vget() to
63  * take a reference.  this is done because there may be
64  * zero references to it and so it needs to removed from
65  * the vnode free list.
66  */
67 int
68 union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp)
69 	struct vnode **vpp;
70 	struct mount *mp;
71 	struct vnode *undvp;
72 	struct vnode *dvp;		/* may be null */
73 	struct componentname *cnp;	/* may be null */
74 	struct vnode *uppervp;		/* may be null */
75 	struct vnode *lowervp;		/* may be null */
76 {
77 	int error;
78 	struct union_node *un;
79 	struct union_node **pp;
80 	struct vnode *xlowervp = 0;
81 
82 	if (uppervp == 0 && lowervp == 0)
83 		panic("union: unidentifiable allocation");
84 
85 	if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) {
86 		xlowervp = lowervp;
87 		lowervp = 0;
88 	}
89 
90 loop:
91 	for (un = unhead; un != 0; un = un->un_next) {
92 		if ((un->un_lowervp == lowervp ||
93 		     un->un_lowervp == 0) &&
94 		    (un->un_uppervp == uppervp ||
95 		     un->un_uppervp == 0) &&
96 		    (UNIONTOV(un)->v_mount == mp)) {
97 			if (vget(UNIONTOV(un), 0))
98 				goto loop;
99 			if (UNIONTOV(un) != undvp)
100 				VOP_LOCK(UNIONTOV(un));
101 			if (uppervp != un->un_uppervp) {
102 				if (un->un_uppervp)
103 					vrele(un->un_uppervp);
104 				un->un_uppervp = uppervp;
105 			}
106 			if (lowervp != un->un_lowervp) {
107 				if (un->un_lowervp)
108 					vrele(un->un_lowervp);
109 				un->un_lowervp = lowervp;
110 			}
111 			*vpp = UNIONTOV(un);
112 			return (0);
113 		}
114 	}
115 
116 	/*
117 	 * otherwise lock the vp list while we call getnewvnode
118 	 * since that can block.
119 	 */
120 	if (unvplock & UN_LOCKED) {
121 		unvplock |= UN_WANT;
122 		sleep((caddr_t) &unvplock, PINOD);
123 		goto loop;
124 	}
125 	unvplock |= UN_LOCKED;
126 
127 	error = getnewvnode(VT_UNION, mp, union_vnodeop_p, vpp);
128 	if (error)
129 		goto out;
130 
131 	MALLOC((*vpp)->v_data, void *, sizeof(struct union_node),
132 		M_TEMP, M_WAITOK);
133 
134 	if (uppervp)
135 		(*vpp)->v_type = uppervp->v_type;
136 	else
137 		(*vpp)->v_type = lowervp->v_type;
138 	un = VTOUNION(*vpp);
139 	un->un_vnode = *vpp;
140 	un->un_next = 0;
141 	un->un_uppervp = uppervp;
142 	un->un_lowervp = lowervp;
143 	un->un_flags = 0;
144 	if (uppervp == 0 && cnp) {
145 		un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK);
146 		bcopy(cnp->cn_nameptr, un->un_path, cnp->cn_namelen);
147 		un->un_path[cnp->cn_namelen] = '\0';
148 		VREF(dvp);
149 		un->un_dirvp = dvp;
150 	} else {
151 		un->un_path = 0;
152 		un->un_dirvp = 0;
153 	}
154 
155 	/* add to union vnode list */
156 	for (pp = &unhead; *pp; pp = &(*pp)->un_next)
157 		continue;
158 	*pp = un;
159 
160 	un->un_flags |= UN_LOCKED;
161 
162 #ifdef DIAGNOSTIC
163 	un->un_pid = curproc->p_pid;
164 #endif
165 
166 	if (xlowervp)
167 		vrele(xlowervp);
168 
169 out:
170 	unvplock &= ~UN_LOCKED;
171 
172 	if (unvplock & UN_WANT) {
173 		unvplock &= ~UN_WANT;
174 		wakeup((caddr_t) &unvplock);
175 	}
176 
177 	return (error);
178 }
179 
180 int
181 union_freevp(vp)
182 	struct vnode *vp;
183 {
184 	struct union_node **unpp;
185 	struct union_node *un = VTOUNION(vp);
186 
187 	for (unpp = &unhead; *unpp != 0; unpp = &(*unpp)->un_next) {
188 		if (*unpp == un) {
189 			*unpp = un->un_next;
190 			break;
191 		}
192 	}
193 
194 	if (un->un_path)
195 		FREE(un->un_path, M_TEMP);
196 
197 	FREE(vp->v_data, M_TEMP);
198 	vp->v_data = 0;
199 	return (0);
200 }
201 
202 /*
203  * copyfile.  copy the vnode (fvp) to the vnode (tvp)
204  * using a sequence of reads and writes.  both (fvp)
205  * and (tvp) are locked on entry and exit.
206  */
207 int
208 union_copyfile(p, cred, fvp, tvp)
209 	struct proc *p;
210 	struct ucred *cred;
211 	struct vnode *fvp;
212 	struct vnode *tvp;
213 {
214 	char *buf;
215 	struct uio uio;
216 	struct iovec iov;
217 	int error = 0;
218 
219 	/*
220 	 * strategy:
221 	 * allocate a buffer of size MAXBSIZE.
222 	 * loop doing reads and writes, keeping track
223 	 * of the current uio offset.
224 	 * give up at the first sign of trouble.
225 	 */
226 
227 	uio.uio_procp = p;
228 	uio.uio_segflg = UIO_SYSSPACE;
229 	uio.uio_offset = 0;
230 
231 	VOP_UNLOCK(fvp);				/* XXX */
232 	LEASE_CHECK(fvp, p, cred, LEASE_READ);
233 	VOP_LOCK(fvp);					/* XXX */
234 	VOP_UNLOCK(tvp);				/* XXX */
235 	LEASE_CHECK(tvp, p, cred, LEASE_WRITE);
236 	VOP_LOCK(tvp);					/* XXX */
237 
238 	buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK);
239 
240 	/* ugly loop follows... */
241 	do {
242 		off_t offset = uio.uio_offset;
243 
244 		uio.uio_iov = &iov;
245 		uio.uio_iovcnt = 1;
246 		iov.iov_base = buf;
247 		iov.iov_len = MAXBSIZE;
248 		uio.uio_resid = iov.iov_len;
249 		uio.uio_rw = UIO_READ;
250 		error = VOP_READ(fvp, &uio, 0, cred);
251 
252 		if (error == 0) {
253 			uio.uio_iov = &iov;
254 			uio.uio_iovcnt = 1;
255 			iov.iov_base = buf;
256 			iov.iov_len = MAXBSIZE - uio.uio_resid;
257 			uio.uio_offset = offset;
258 			uio.uio_rw = UIO_WRITE;
259 			uio.uio_resid = iov.iov_len;
260 
261 			if (uio.uio_resid == 0)
262 				break;
263 
264 			do {
265 				error = VOP_WRITE(tvp, &uio, 0, cred);
266 			} while ((uio.uio_resid > 0) && (error == 0));
267 		}
268 
269 	} while (error == 0);
270 
271 	free(buf, M_TEMP);
272 	return (error);
273 }
274 
275 /*
276  * union_vn_create: creates and opens a new shadow file
277  * on the upper union layer.  this function is similar
278  * in spirit to calling vn_open but it avoids calling namei().
279  * the problem with calling namei is that a) it locks too many
280  * things, and b) it doesn't start at the "right" directory,
281  * whereas relookup is told where to start.
282  */
283 int
284 union_vn_create(vpp, un, cmode, p)
285 	struct vnode **vpp;
286 	struct union_node *un;
287 	int cmode;
288 	struct proc *p;
289 {
290 	struct vnode *vp;
291 	struct ucred *cred = p->p_ucred;
292 	struct vattr vat;
293 	struct vattr *vap = &vat;
294 	int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL);
295 	int error;
296 	int hash;
297 	char *cp;
298 	struct componentname cn;
299 
300 	*vpp = NULLVP;
301 
302 	cn.cn_namelen = strlen(un->un_path);
303 	cn.cn_pnbuf = (caddr_t) malloc(cn.cn_namelen, M_NAMEI, M_WAITOK);
304 	bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1);
305 	cn.cn_nameiop = CREATE;
306 	cn.cn_flags = (LOCKLEAF|LOCKPARENT|HASBUF|SAVENAME|ISLASTCN);
307 	cn.cn_proc = p;
308 	cn.cn_cred = p->p_ucred;
309 	cn.cn_nameptr = cn.cn_pnbuf;
310 	for (hash = 0, cp = cn.cn_nameptr; *cp != 0 && *cp != '/'; cp++)
311 		hash += (unsigned char)*cp;
312 	cn.cn_hash = hash;
313 	cn.cn_consume = 0;
314 
315 	if (error = relookup(un->un_dirvp, &vp, &cn))
316 		return (error);
317 	if (vp == NULLVP) {
318 		VATTR_NULL(vap);
319 		vap->va_type = VREG;
320 		vap->va_mode = cmode;
321 		LEASE_CHECK(un->un_dirvp, p, cred, LEASE_WRITE);
322 		if (error = VOP_CREATE(un->un_dirvp, &vp,
323 		    &cn, vap))
324 			return (error);
325 	} else {
326 		VOP_ABORTOP(un->un_dirvp, &cn);
327 		if (un->un_dirvp == vp)
328 			vrele(un->un_dirvp);
329 		else
330 			vput(vp);
331 		error = EEXIST;
332 		goto bad;
333 	}
334 
335 	if (vp->v_type != VREG) {
336 		error = EOPNOTSUPP;
337 		goto bad;
338 	}
339 
340 	VOP_UNLOCK(vp);				/* XXX */
341 	LEASE_CHECK(vp, p, cred, LEASE_WRITE);
342 	VOP_LOCK(vp);				/* XXX */
343 	VATTR_NULL(vap);
344 	vap->va_size = 0;
345 	if (error = VOP_SETATTR(vp, vap, cred, p))
346 		goto bad;
347 
348 	if (error = VOP_OPEN(vp, fmode, cred, p))
349 		goto bad;
350 
351 	vp->v_writecount++;
352 	*vpp = vp;
353 	return (0);
354 bad:
355 	vput(vp);
356 	return (error);
357 }
358