xref: /original-bsd/sys/miscfs/umapfs/umap_vnops.c (revision 9a35f7df)
1 /*
2  * Copyright (c) 1992, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software donated to Berkeley by
6  * the UCLA Ficus project.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)umap_vnops.c	8.6 (Berkeley) 05/22/95
11  */
12 
13 /*
14  * Umap Layer
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/time.h>
20 #include <sys/types.h>
21 #include <sys/vnode.h>
22 #include <sys/mount.h>
23 #include <sys/namei.h>
24 #include <sys/malloc.h>
25 #include <sys/buf.h>
26 #include <miscfs/umapfs/umap.h>
27 
28 
29 int umap_bug_bypass = 0;   /* for debugging: enables bypass printf'ing */
30 
31 /*
32  * This is the 10-Apr-92 bypass routine.
33  * See null_vnops.c:null_bypass for more details.
34  */
35 int
36 umap_bypass(ap)
37 	struct vop_generic_args /* {
38 		struct vnodeop_desc *a_desc;
39 		<other random data follows, presumably>
40 	} */ *ap;
41 {
42 	extern int (**umap_vnodeop_p)();  /* not extern, really "forward" */
43 	struct ucred **credpp = 0, *credp = 0;
44 	struct ucred *savecredp, *savecompcredp = 0;
45 	struct ucred *compcredp = 0;
46 	struct vnode **this_vp_p;
47 	int error;
48 	struct vnode *old_vps[VDESC_MAX_VPS];
49 	struct vnode *vp1 = 0;
50 	struct vnode **vps_p[VDESC_MAX_VPS];
51 	struct vnode ***vppp;
52 	struct vnodeop_desc *descp = ap->a_desc;
53 	int reles, i;
54 	struct componentname **compnamepp = 0;
55 
56 	if (umap_bug_bypass)
57 		printf ("umap_bypass: %s\n", descp->vdesc_name);
58 
59 #ifdef SAFETY
60 	/*
61 	 * We require at least one vp.
62 	 */
63 	if (descp->vdesc_vp_offsets == NULL ||
64 	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
65 		panic ("umap_bypass: no vp's in map.\n");
66 #endif
67 
68 	/*
69 	 * Map the vnodes going in.
70 	 * Later, we'll invoke the operation based on
71 	 * the first mapped vnode's operation vector.
72 	 */
73 	reles = descp->vdesc_flags;
74 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
75 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
76 			break;   /* bail out at end of list */
77 		vps_p[i] = this_vp_p =
78 			VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i], ap);
79 
80 		if (i == 0) {
81 			vp1 = *vps_p[0];
82 		}
83 
84 		/*
85 		 * We're not guaranteed that any but the first vnode
86 		 * are of our type.  Check for and don't map any
87 		 * that aren't.  (Must map first vp or vclean fails.)
88 		 */
89 
90 		if (i && (*this_vp_p)->v_op != umap_vnodeop_p) {
91 			old_vps[i] = NULL;
92 		} else {
93 			old_vps[i] = *this_vp_p;
94 			*(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p);
95 			if (reles & 1)
96 				VREF(*this_vp_p);
97 		}
98 
99 	}
100 
101 	/*
102 	 * Fix the credentials.  (That's the purpose of this layer.)
103 	 */
104 
105 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
106 
107 		credpp = VOPARG_OFFSETTO(struct ucred**,
108 		    descp->vdesc_cred_offset, ap);
109 
110 		/* Save old values */
111 
112 		savecredp = (*credpp);
113 		(*credpp) = crdup(savecredp);
114 		credp = *credpp;
115 
116 		if (umap_bug_bypass && credp->cr_uid != 0)
117 			printf("umap_bypass: user was %d, group %d\n",
118 			    credp->cr_uid, credp->cr_gid);
119 
120 		/* Map all ids in the credential structure. */
121 
122 		umap_mapids(vp1->v_mount, credp);
123 
124 		if (umap_bug_bypass && credp->cr_uid != 0)
125 			printf("umap_bypass: user now %d, group %d\n",
126 			    credp->cr_uid, credp->cr_gid);
127 	}
128 
129 	/* BSD often keeps a credential in the componentname structure
130 	 * for speed.  If there is one, it better get mapped, too.
131 	 */
132 
133 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
134 
135 		compnamepp = VOPARG_OFFSETTO(struct componentname**,
136 		    descp->vdesc_componentname_offset, ap);
137 
138 		compcredp = (*compnamepp)->cn_cred;
139 		savecompcredp = compcredp;
140 		compcredp = (*compnamepp)->cn_cred = crdup(savecompcredp);
141 
142 		if (umap_bug_bypass && compcredp->cr_uid != 0)
143 			printf("umap_bypass: component credit user was %d, group %d\n",
144 			    compcredp->cr_uid, compcredp->cr_gid);
145 
146 		/* Map all ids in the credential structure. */
147 
148 		umap_mapids(vp1->v_mount, compcredp);
149 
150 		if (umap_bug_bypass && compcredp->cr_uid != 0)
151 			printf("umap_bypass: component credit user now %d, group %d\n",
152 			    compcredp->cr_uid, compcredp->cr_gid);
153 	}
154 
155 	/*
156 	 * Call the operation on the lower layer
157 	 * with the modified argument structure.
158 	 */
159 	error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap);
160 
161 	/*
162 	 * Maintain the illusion of call-by-value
163 	 * by restoring vnodes in the argument structure
164 	 * to their original value.
165 	 */
166 	reles = descp->vdesc_flags;
167 	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
168 		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
169 			break;   /* bail out at end of list */
170 		if (old_vps[i]) {
171 			*(vps_p[i]) = old_vps[i];
172 			if (reles & 1)
173 				vrele(*(vps_p[i]));
174 		};
175 	};
176 
177 	/*
178 	 * Map the possible out-going vpp
179 	 * (Assumes that the lower layer always returns
180 	 * a VREF'ed vpp unless it gets an error.)
181 	 */
182 	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
183 	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
184 	    !error) {
185 		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
186 			goto out;
187 		vppp = VOPARG_OFFSETTO(struct vnode***,
188 				 descp->vdesc_vpp_offset, ap);
189 		error = umap_node_create(old_vps[0]->v_mount, **vppp, *vppp);
190 	};
191 
192  out:
193 	/*
194 	 * Free duplicate cred structure and restore old one.
195 	 */
196 	if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) {
197 		if (umap_bug_bypass && credp && credp->cr_uid != 0)
198 			printf("umap_bypass: returning-user was %d\n",
199 					credp->cr_uid);
200 
201 		crfree(credp);
202 		(*credpp) = savecredp;
203 		if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0)
204 		 	printf("umap_bypass: returning-user now %d\n\n",
205 			    (*credpp)->cr_uid);
206 	}
207 
208 	if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) {
209 		if (umap_bug_bypass && compcredp && compcredp->cr_uid != 0)
210 		printf("umap_bypass: returning-component-user was %d\n",
211 				compcredp->cr_uid);
212 
213 		crfree(compcredp);
214 		(*compnamepp)->cn_cred = savecompcredp;
215 		if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0)
216 		 	printf("umap_bypass: returning-component-user now %d\n",
217 					compcredp->cr_uid);
218 	}
219 
220 	return (error);
221 }
222 
223 
224 /*
225  *  We handle getattr to change the fsid.
226  */
227 int
228 umap_getattr(ap)
229 	struct vop_getattr_args /* {
230 		struct vnode *a_vp;
231 		struct vattr *a_vap;
232 		struct ucred *a_cred;
233 		struct proc *a_p;
234 	} */ *ap;
235 {
236 	short uid, gid;
237 	int error, tmpid, nentries, gnentries;
238 	u_long (*mapdata)[2], (*gmapdata)[2];
239 	struct vnode **vp1p;
240 	struct vnodeop_desc *descp = ap->a_desc;
241 
242 	if (error = umap_bypass(ap))
243 		return (error);
244 	/* Requires that arguments be restored. */
245 	ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
246 
247 	/*
248 	 * Umap needs to map the uid and gid returned by a stat
249 	 * into the proper values for this site.  This involves
250 	 * finding the returned uid in the mapping information,
251 	 * translating it into the uid on the other end,
252 	 * and filling in the proper field in the vattr
253 	 * structure pointed to by ap->a_vap.  The group
254 	 * is easier, since currently all groups will be
255 	 * translate to the NULLGROUP.
256 	 */
257 
258 	/* Find entry in map */
259 
260 	uid = ap->a_vap->va_uid;
261 	gid = ap->a_vap->va_gid;
262 	if (umap_bug_bypass)
263 		printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid,
264 		    gid);
265 
266 	vp1p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap);
267 	nentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_nentries;
268 	mapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_mapdata);
269 	gnentries =  MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gnentries;
270 	gmapdata =  (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gmapdata);
271 
272 	/* Reverse map the uid for the vnode.  Since it's a reverse
273 		map, we can't use umap_mapids() to do it. */
274 
275 	tmpid = umap_reverse_findid(uid, mapdata, nentries);
276 
277 	if (tmpid != -1) {
278 
279 		ap->a_vap->va_uid = (uid_t) tmpid;
280 		if (umap_bug_bypass)
281 			printf("umap_getattr: original uid = %d\n", uid);
282 	} else
283 		ap->a_vap->va_uid = (uid_t) NOBODY;
284 
285 	/* Reverse map the gid for the vnode. */
286 
287 	tmpid = umap_reverse_findid(gid, gmapdata, gnentries);
288 
289 	if (tmpid != -1) {
290 
291 		ap->a_vap->va_gid = (gid_t) tmpid;
292 		if (umap_bug_bypass)
293 			printf("umap_getattr: original gid = %d\n", gid);
294 	} else
295 		ap->a_vap->va_gid = (gid_t) NULLGROUP;
296 
297 	return (0);
298 }
299 
300 /*
301  * We need to process our own vnode lock and then clear the
302  * interlock flag as it applies only to our vnode, not the
303  * vnodes below us on the stack.
304  */
305 int
306 umap_lock(ap)
307 	struct vop_lock_args /* {
308 		struct vnode *a_vp;
309 		int a_flags;
310 		struct proc *a_p;
311 	} */ *ap;
312 {
313 
314 	vop_nolock(ap);
315 	if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN)
316 		return (0);
317 	ap->a_flags &= ~LK_INTERLOCK;
318 	return (null_bypass(ap));
319 }
320 
321 /*
322  * We need to process our own vnode unlock and then clear the
323  * interlock flag as it applies only to our vnode, not the
324  * vnodes below us on the stack.
325  */
326 int
327 umap_unlock(ap)
328 	struct vop_unlock_args /* {
329 		struct vnode *a_vp;
330 		int a_flags;
331 		struct proc *a_p;
332 	} */ *ap;
333 {
334 	struct vnode *vp = ap->a_vp;
335 
336 	vop_nounlock(ap);
337 	ap->a_flags &= ~LK_INTERLOCK;
338 	return (null_bypass(ap));
339 }
340 
341 int
342 umap_inactive(ap)
343 	struct vop_inactive_args /* {
344 		struct vnode *a_vp;
345 		struct proc *a_p;
346 	} */ *ap;
347 {
348 	/*
349 	 * Do nothing (and _don't_ bypass).
350 	 * Wait to vrele lowervp until reclaim,
351 	 * so that until then our umap_node is in the
352 	 * cache and reusable.
353 	 *
354 	 */
355 	VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
356 	return (0);
357 }
358 
359 int
360 umap_reclaim(ap)
361 	struct vop_reclaim_args /* {
362 		struct vnode *a_vp;
363 	} */ *ap;
364 {
365 	struct vnode *vp = ap->a_vp;
366 	struct umap_node *xp = VTOUMAP(vp);
367 	struct vnode *lowervp = xp->umap_lowervp;
368 
369 	/* After this assignment, this node will not be re-used. */
370 	xp->umap_lowervp = NULL;
371 	LIST_REMOVE(xp, umap_hash);
372 	FREE(vp->v_data, M_TEMP);
373 	vp->v_data = NULL;
374 	vrele(lowervp);
375 	return (0);
376 }
377 
378 int
379 umap_strategy(ap)
380 	struct vop_strategy_args /* {
381 		struct buf *a_bp;
382 	} */ *ap;
383 {
384 	struct buf *bp = ap->a_bp;
385 	int error;
386 	struct vnode *savedvp;
387 
388 	savedvp = bp->b_vp;
389 	bp->b_vp = UMAPVPTOLOWERVP(bp->b_vp);
390 
391 	error = VOP_STRATEGY(ap->a_bp);
392 
393 	bp->b_vp = savedvp;
394 
395 	return (error);
396 }
397 
398 int
399 umap_bwrite(ap)
400 	struct vop_bwrite_args /* {
401 		struct buf *a_bp;
402 	} */ *ap;
403 {
404 	struct buf *bp = ap->a_bp;
405 	int error;
406 	struct vnode *savedvp;
407 
408 	savedvp = bp->b_vp;
409 	bp->b_vp = UMAPVPTOLOWERVP(bp->b_vp);
410 
411 	error = VOP_BWRITE(ap->a_bp);
412 
413 	bp->b_vp = savedvp;
414 
415 	return (error);
416 }
417 
418 
419 int
420 umap_print(ap)
421 	struct vop_print_args /* {
422 		struct vnode *a_vp;
423 	} */ *ap;
424 {
425 	struct vnode *vp = ap->a_vp;
426 	printf("\ttag VT_UMAPFS, vp=%x, lowervp=%x\n", vp, UMAPVPTOLOWERVP(vp));
427 	return (0);
428 }
429 
430 int
431 umap_rename(ap)
432 	struct vop_rename_args  /* {
433 		struct vnode *a_fdvp;
434 		struct vnode *a_fvp;
435 		struct componentname *a_fcnp;
436 		struct vnode *a_tdvp;
437 		struct vnode *a_tvp;
438 		struct componentname *a_tcnp;
439 	} */ *ap;
440 {
441 	int error;
442 	struct componentname *compnamep;
443 	struct ucred *compcredp, *savecompcredp;
444 	struct vnode *vp;
445 
446 	/*
447 	 * Rename is irregular, having two componentname structures.
448 	 * We need to map the cre in the second structure,
449 	 * and then bypass takes care of the rest.
450 	 */
451 
452 	vp = ap->a_fdvp;
453 	compnamep = ap->a_tcnp;
454 	compcredp = compnamep->cn_cred;
455 
456 	savecompcredp = compcredp;
457 	compcredp = compnamep->cn_cred = crdup(savecompcredp);
458 
459 	if (umap_bug_bypass && compcredp->cr_uid != 0)
460 		printf("umap_rename: rename component credit user was %d, group %d\n",
461 		    compcredp->cr_uid, compcredp->cr_gid);
462 
463 	/* Map all ids in the credential structure. */
464 
465 	umap_mapids(vp->v_mount, compcredp);
466 
467 	if (umap_bug_bypass && compcredp->cr_uid != 0)
468 		printf("umap_rename: rename component credit user now %d, group %d\n",
469 		    compcredp->cr_uid, compcredp->cr_gid);
470 
471 	error = umap_bypass(ap);
472 
473 	/* Restore the additional mapped componentname cred structure. */
474 
475 	crfree(compcredp);
476 	compnamep->cn_cred = savecompcredp;
477 
478 	return error;
479 }
480 
481 /*
482  * Global vfs data structures
483  */
484 /*
485  * XXX - strategy, bwrite are hand coded currently.  They should
486  * go away with a merged buffer/block cache.
487  *
488  */
489 int (**umap_vnodeop_p)();
490 struct vnodeopv_entry_desc umap_vnodeop_entries[] = {
491 	{ &vop_default_desc, umap_bypass },
492 
493 	{ &vop_getattr_desc, umap_getattr },
494 	{ &vop_lock_desc, umap_lock },
495 	{ &vop_unlock_desc, umap_unlock },
496 	{ &vop_inactive_desc, umap_inactive },
497 	{ &vop_reclaim_desc, umap_reclaim },
498 	{ &vop_print_desc, umap_print },
499 	{ &vop_rename_desc, umap_rename },
500 
501 	{ &vop_strategy_desc, umap_strategy },
502 	{ &vop_bwrite_desc, umap_bwrite },
503 
504 	{ (struct vnodeop_desc*) NULL, (int(*)()) NULL }
505 };
506 struct vnodeopv_desc umap_vnodeop_opv_desc =
507 	{ &umap_vnodeop_p, umap_vnodeop_entries };
508