xref: /dragonfly/sys/kern/kern_varsym.c (revision bcb3e04d)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/kern_varsym.c,v 1.9 2007/04/30 07:18:54 dillon Exp $
35  */
36 
37 /*
38  * This module implements variable storage and management for variant
39  * symlinks.  These variables may also be used for general purposes.
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/ucred.h>
46 #include <sys/resourcevar.h>
47 #include <sys/proc.h>
48 #include <sys/priv.h>
49 #include <sys/jail.h>
50 #include <sys/queue.h>
51 #include <sys/sysctl.h>
52 #include <sys/malloc.h>
53 #include <sys/varsym.h>
54 #include <sys/sysproto.h>
55 
56 #include <sys/mplock2.h>
57 
58 MALLOC_DEFINE(M_VARSYM, "varsym", "variable sets for variant symlinks");
59 
60 struct varsymset	varsymset_sys;
61 
62 /*
63  * Initialize the variant symlink subsystem
64  */
65 static void
66 varsym_sysinit(void *dummy)
67 {
68     varsymset_init(&varsymset_sys, NULL);
69 }
70 SYSINIT(announce, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, varsym_sysinit, NULL);
71 
72 /*
73  * varsymreplace() - called from namei
74  *
75  *	Do variant symlink variable substitution
76  */
77 int
78 varsymreplace(char *cp, int linklen, int maxlen)
79 {
80     int rlen;
81     int xlen;
82     int nlen;
83     int i;
84     varsym_t var;
85 
86     rlen = linklen;
87     while (linklen > 1) {
88 	if (cp[0] == '$' && cp[1] == '{') {
89 	    for (i = 2; i < linklen; ++i) {
90 		if (cp[i] == '}')
91 		    break;
92 	    }
93 	    if (i < linklen &&
94 		(var = varsymfind(VARSYM_ALL_MASK, cp + 2, i - 2)) != NULL
95 	    ) {
96 		xlen = i + 1;			/* bytes to strike */
97 		nlen = strlen(var->vs_data);	/* bytes to add */
98 		if (linklen + nlen - xlen >= maxlen) {
99 		    varsymdrop(var);
100 		    return(-1);
101 		}
102 		KKASSERT(linklen >= xlen);
103 		if (linklen != xlen)
104 		    bcopy(cp + xlen, cp + nlen, linklen - xlen);
105 		bcopy(var->vs_data, cp, nlen);
106 		linklen += nlen - xlen;	/* new relative length */
107 		rlen += nlen - xlen;	/* returned total length */
108 		cp += nlen;		/* adjust past replacement */
109 		linklen -= nlen;	/* adjust past replacement */
110 		maxlen -= nlen;		/* adjust past replacement */
111 	    } else {
112 		/*
113 		 * It's ok if i points to the '}', it will simply be
114 		 * skipped.  i could also have hit linklen.
115 		 */
116 		cp += i;
117 		linklen -= i;
118 		maxlen -= i;
119 	    }
120 	} else {
121 	    ++cp;
122 	    --linklen;
123 	    --maxlen;
124 	}
125     }
126     return(rlen);
127 }
128 
129 /*
130  * varsym_set() system call
131  *
132  * (int level, const char *name, const char *data)
133  *
134  * MPALMOSTSAFE
135  */
136 int
137 sys_varsym_set(struct varsym_set_args *uap)
138 {
139     char name[MAXVARSYM_NAME];
140     char *buf;
141     struct thread *td;
142     struct proc *p;
143     struct lwp *lp;
144     int error;
145 
146     td = curthread;
147     lp = td->td_lwp;
148     p = lp ? lp->lwp_proc : NULL;
149 
150     if ((error = copyinstr(uap->name, name, sizeof(name), NULL)) != 0)
151 	goto done2;
152     buf = kmalloc(MAXVARSYM_DATA, M_TEMP, M_WAITOK);
153     if (uap->data &&
154 	(error = copyinstr(uap->data, buf, MAXVARSYM_DATA, NULL)) != 0)
155     {
156 	goto done1;
157     }
158 
159     get_mplock();
160 
161     switch(uap->level) {
162     case VARSYM_SYS:
163 	if (lp != NULL && td->td_ucred->cr_prison != NULL)
164 	    uap->level = VARSYM_PRISON;
165     case VARSYM_PRISON:
166 	if (lp != NULL &&
167 	    (error = priv_check_cred(td->td_ucred, PRIV_VARSYM_SYS, 0)) != 0)
168 	    break;
169 	/* fall through */
170     case VARSYM_USER:
171 	/* XXX check jail / implement per-jail user */
172 	/* fall through */
173     case VARSYM_PROC:
174 	if (uap->data) {
175 	    (void)varsymmake(uap->level, name, NULL);
176 	    error = varsymmake(uap->level, name, buf);
177 	} else {
178 	    error = varsymmake(uap->level, name, NULL);
179 	}
180 	break;
181     }
182     rel_mplock();
183 done1:
184     kfree(buf, M_TEMP);
185 done2:
186     return(error);
187 }
188 
189 /*
190  * varsym_get() system call
191  *
192  * (int mask, const char *wild, char *buf, int bufsize)
193  *
194  * MPALMOSTSAFE
195  */
196 int
197 sys_varsym_get(struct varsym_get_args *uap)
198 {
199     char wild[MAXVARSYM_NAME];
200     varsym_t sym;
201     int error;
202     int dlen;
203 
204     get_mplock();
205     if ((error = copyinstr(uap->wild, wild, sizeof(wild), NULL)) != 0)
206 	goto done;
207     sym = varsymfind(uap->mask, wild, strlen(wild));
208     if (sym == NULL) {
209 	error = ENOENT;
210 	goto done;
211     }
212     dlen = strlen(sym->vs_data);
213     if (dlen < uap->bufsize) {
214 	copyout(sym->vs_data, uap->buf, dlen + 1);
215     } else if (uap->bufsize) {
216 	copyout("", uap->buf, 1);
217     }
218     uap->sysmsg_result = dlen + 1;
219     varsymdrop(sym);
220 done:
221     rel_mplock();
222     return(error);
223 }
224 
225 /*
226  * varsym_list() system call
227  *
228  * (int level, char *buf, int maxsize, int *marker)
229  *
230  * MPALMOSTSAFE
231  */
232 int
233 sys_varsym_list(struct varsym_list_args *uap)
234 {
235 	struct varsymset *vss;
236 	struct varsyment *ve;
237 	struct thread *td;
238 	struct proc *p;
239 	struct lwp *lp;
240 	int i;
241 	int error;
242 	int bytes;
243 	int earlyterm;
244 	int marker;
245 
246 	/*
247 	 * Get the marker from userspace.
248 	 */
249 	get_mplock();
250 	if ((error = copyin(uap->marker, &marker, sizeof(marker))) != 0)
251 		goto done;
252 
253 	/*
254 	 * Figure out the varsym set.
255 	 */
256 	td = curthread;
257 	lp = td->td_lwp;
258 	p = lp ? lp->lwp_proc : NULL;
259 
260 	vss = NULL;
261 
262 	switch (uap->level) {
263 	case VARSYM_PROC:
264 		if (p)
265 			vss = &p->p_varsymset;
266 		break;
267 	case VARSYM_USER:
268 		if (lp)
269 			vss = &td->td_ucred->cr_uidinfo->ui_varsymset;
270 		break;
271 	case VARSYM_SYS:
272 		vss = &varsymset_sys;
273 		break;
274 	case VARSYM_PRISON:
275 		if (lp && td->td_ucred->cr_prison)
276 			vss = &td->td_ucred->cr_prison->pr_varsymset;
277 		break;
278 	}
279 	if (vss == NULL) {
280 		error = EINVAL;
281 		goto done;
282 	}
283 
284 	/*
285 	 * Loop through the variables and dump them to uap->buf
286 	 */
287 	i = 0;
288 	bytes = 0;
289 	earlyterm = 0;
290 
291 	lockmgr(&vss->vx_lock, LK_SHARED);
292 	TAILQ_FOREACH(ve, &vss->vx_queue, ve_entry) {
293 		varsym_t sym = ve->ve_sym;
294 		int namelen = strlen(sym->vs_name);
295 		int datalen = strlen(sym->vs_data);
296 		int totlen = namelen + datalen + 2;
297 
298 		/*
299 		 * Skip to our index point
300 		 */
301 		if (i < marker) {
302 			++i;
303 			continue;
304 		}
305 
306 		/*
307 		 * Stop if there is insufficient space in the user buffer.
308 		 * If we haven't stored anything yet return EOVERFLOW.
309 		 * Note that the marker index (i) does not change.
310 		 */
311 		if (bytes + totlen > uap->maxsize) {
312 			if (bytes == 0)
313 				error = EOVERFLOW;
314 			earlyterm = 1;
315 			break;
316 		}
317 
318 		error = copyout(sym->vs_name, uap->buf + bytes, namelen + 1);
319 		if (error == 0) {
320 			bytes += namelen + 1;
321 			error = copyout(sym->vs_data, uap->buf + bytes, datalen + 1);
322 			if (error == 0)
323 				bytes += datalen + 1;
324 			else
325 				bytes -= namelen + 1;	/* revert if error */
326 		}
327 		if (error) {
328 			earlyterm = 1;
329 			break;
330 		}
331 		++i;
332 	}
333 	lockmgr(&vss->vx_lock, LK_RELEASE);
334 
335 	/*
336 	 * Save the marker back.  If no error occured and earlyterm is clear
337 	 * the marker is set to -1 indicating that the variable list has been
338 	 * exhausted.  If no error occured the number of bytes loaded into
339 	 * the buffer will be returned, otherwise the syscall code returns -1.
340 	 */
341 	if (error == 0 && earlyterm == 0)
342 		marker = -1;
343 	else
344 		marker = i;
345 	if (error == 0)
346 		error = copyout(&marker, uap->marker, sizeof(marker));
347 	uap->sysmsg_result = bytes;
348 done:
349 	rel_mplock();
350 	return(error);
351 }
352 
353 /*
354  * Lookup a variant symlink.  XXX use a hash table.
355  */
356 static
357 struct varsyment *
358 varsymlookup(struct varsymset *vss, const char *name, int namelen)
359 {
360     struct varsyment *ve;
361 
362     KKASSERT(lockstatus(&vss->vx_lock, curthread) != 0);
363     TAILQ_FOREACH(ve, &vss->vx_queue, ve_entry) {
364 	varsym_t var = ve->ve_sym;
365 	if (var->vs_namelen == namelen &&
366 	    bcmp(name, var->vs_name, namelen) == 0
367 	) {
368 	    return(ve);
369 	}
370     }
371     return(NULL);
372 }
373 
374 static
375 void
376 vsslock(struct varsymset **vss, struct varsymset *n)
377 {
378 	if (*vss) {
379 		lockmgr(&(*vss)->vx_lock, LK_RELEASE);
380 	}
381 	lockmgr(&n->vx_lock, LK_SHARED);
382 	*vss = n;
383 }
384 
385 varsym_t
386 varsymfind(int mask, const char *name, int namelen)
387 {
388     struct varsyment *ve = NULL;
389     struct varsymset *vss = NULL;
390     struct thread *td;
391     struct lwp *lp;
392     struct proc *p;
393     varsym_t sym;
394 
395     td = curthread;
396     lp = td->td_lwp;
397     p = lp ? lp->lwp_proc : NULL;
398 
399     if ((mask & (VARSYM_PROC_MASK|VARSYM_USER_MASK)) && lp != NULL) {
400 	if (mask & VARSYM_PROC_MASK) {
401 	    vsslock(&vss, &p->p_varsymset);
402 	    ve = varsymlookup(vss, name, namelen);
403 	}
404 	if (ve == NULL && (mask & VARSYM_USER_MASK)) {
405 	    vsslock(&vss, &td->td_ucred->cr_uidinfo->ui_varsymset);
406 	    ve = varsymlookup(vss, name, namelen);
407 	}
408     }
409     if (ve == NULL && (mask & VARSYM_SYS_MASK)) {
410 	if (lp != NULL && td->td_ucred->cr_prison) {
411 	    vsslock(&vss, &td->td_ucred->cr_prison->pr_varsymset);
412 	    ve = varsymlookup(vss, name, namelen);
413 	} else {
414 	    vsslock(&vss, &varsymset_sys);
415 	    ve = varsymlookup(vss, name, namelen);
416 	}
417     }
418     if (ve) {
419 	sym = ve->ve_sym;
420 	atomic_add_int(&sym->vs_refs, 1);
421     } else {
422 	sym = NULL;
423     }
424     if (vss)
425 	lockmgr(&vss->vx_lock, LK_RELEASE);
426     return sym;
427 }
428 
429 int
430 varsymmake(int level, const char *name, const char *data)
431 {
432     struct varsymset *vss = NULL;
433     struct varsyment *ve;
434     struct thread *td;
435     struct proc *p;
436     struct lwp *lp;
437     varsym_t sym;
438     int namelen = strlen(name);
439     int datalen;
440     int error;
441 
442     td = curthread;
443     lp = td->td_lwp;
444     p = lp ? lp->lwp_proc : NULL;
445 
446     switch(level) {
447     case VARSYM_PROC:
448 	if (p)
449 	    vss = &p->p_varsymset;
450 	break;
451     case VARSYM_USER:
452 	if (lp)
453 	    vss = &td->td_ucred->cr_uidinfo->ui_varsymset;
454 	break;
455     case VARSYM_SYS:
456 	vss = &varsymset_sys;
457 	break;
458     case VARSYM_PRISON:
459 	if (lp && td->td_ucred->cr_prison)
460 	    vss = &td->td_ucred->cr_prison->pr_varsymset;
461 	break;
462     }
463     if (vss == NULL) {
464 	return EINVAL;
465     }
466     lockmgr(&vss->vx_lock, LK_EXCLUSIVE);
467     if (data && vss->vx_setsize >= MAXVARSYM_SET) {
468 	error = E2BIG;
469     } else if (data) {
470 	datalen = strlen(data);
471 	ve = kmalloc(sizeof(struct varsyment), M_VARSYM, M_WAITOK|M_ZERO);
472 	sym = kmalloc(sizeof(struct varsym) + namelen + datalen + 2, M_VARSYM, M_WAITOK);
473 	ve->ve_sym = sym;
474 	sym->vs_refs = 1;
475 	sym->vs_namelen = namelen;
476 	sym->vs_name = (char *)(sym + 1);
477 	sym->vs_data = sym->vs_name + namelen + 1;
478 	strcpy(sym->vs_name, name);
479 	strcpy(sym->vs_data, data);
480 	TAILQ_INSERT_TAIL(&vss->vx_queue, ve, ve_entry);
481 	vss->vx_setsize += sizeof(struct varsyment) + sizeof(struct varsym) + namelen + datalen + 8;
482 	error = 0;
483     } else {
484 	if ((ve = varsymlookup(vss, name, namelen)) != NULL) {
485 	    TAILQ_REMOVE(&vss->vx_queue, ve, ve_entry);
486 	    vss->vx_setsize -= sizeof(struct varsyment) + sizeof(struct varsym) + namelen + strlen(ve->ve_sym->vs_data) + 8;
487 	    varsymdrop(ve->ve_sym);
488 	    kfree(ve, M_VARSYM);
489 	    error = 0;
490 	} else {
491 	    error = ENOENT;
492 	}
493     }
494     lockmgr(&vss->vx_lock, LK_RELEASE);
495     return(error);
496 }
497 
498 void
499 varsymdrop(varsym_t sym)
500 {
501     KKASSERT(sym->vs_refs > 0);
502     if (atomic_fetchadd_int(&sym->vs_refs, -1) == 1) {
503 	kfree(sym, M_VARSYM);
504     }
505 }
506 
507 /*
508  * Insert a duplicate of ve in vss. Does not do any locking,
509  * so it is the callers responsibility to make sure nobody
510  * else can mess with the TAILQ in vss at the same time.
511  */
512 static void
513 varsymdup(struct varsymset *vss, struct varsyment *ve)
514 {
515     struct varsyment *nve;
516 
517     nve = kmalloc(sizeof(struct varsyment), M_VARSYM, M_WAITOK|M_ZERO);
518     nve->ve_sym = ve->ve_sym;
519     ++nve->ve_sym->vs_refs;	/* can't be reached, no need for atomic add */
520     /*
521      * We're only called through varsymset_init() so vss is not yet reachable,
522      * no need to lock.
523      */
524     TAILQ_INSERT_TAIL(&vss->vx_queue, nve, ve_entry);
525 }
526 
527 void
528 varsymset_init(struct varsymset *vss, struct varsymset *copy)
529 {
530     struct varsyment *ve;
531 
532     TAILQ_INIT(&vss->vx_queue);
533     lockinit(&vss->vx_lock, "vx", 0, 0);
534     if (copy) {
535 	TAILQ_FOREACH(ve, &copy->vx_queue, ve_entry) {
536 	    varsymdup(vss, ve);
537 	}
538 	vss->vx_setsize = copy->vx_setsize;
539     }
540 }
541 
542 void
543 varsymset_clean(struct varsymset *vss)
544 {
545     struct varsyment *ve;
546 
547     lockmgr(&vss->vx_lock, LK_EXCLUSIVE);
548     while ((ve = TAILQ_FIRST(&vss->vx_queue)) != NULL) {
549 	TAILQ_REMOVE(&vss->vx_queue, ve, ve_entry);
550 	varsymdrop(ve->ve_sym);
551 	kfree(ve, M_VARSYM);
552     }
553     vss->vx_setsize = 0;
554     lockmgr(&vss->vx_lock, LK_RELEASE);
555 }
556 
557