xref: /dragonfly/sys/kern/imgact_resident.c (revision 2b3f93ea)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
5  *
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/sysmsg.h>
41 #include <sys/exec.h>
42 #include <sys/imgact.h>
43 #include <sys/mman.h>
44 #include <sys/proc.h>
45 #include <sys/caps.h>
46 #include <sys/resourcevar.h>
47 #include <sys/sysent.h>
48 #include <sys/stat.h>
49 #include <sys/vnode.h>
50 #include <sys/sysctl.h>
51 #include <sys/lock.h>
52 #include <sys/resident.h>
53 #include <sys/malloc.h>
54 
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_extern.h>
61 
62 static int exec_res_id = 0;
63 
64 static TAILQ_HEAD(,vmresident) exec_res_list;
65 
66 static MALLOC_DEFINE(M_EXEC_RES, "vmresident", "resident execs");
67 
68 /* lockmgr lock for protecting the exec_res_list */
69 static struct lock exec_list_lock;
70 
71 static void
vm_resident_init(void * __dummy)72 vm_resident_init(void *__dummy)
73 {
74 	lockinit(&exec_list_lock, "vmres", 0, 0);
75 	TAILQ_INIT(&exec_res_list);
76 }
77 SYSINIT(vmres, SI_BOOT1_LOCK, SI_ORDER_ANY, vm_resident_init, 0);
78 
79 static int
fill_xresident(struct vmresident * vr,struct xresident * in,struct thread * td)80 fill_xresident(struct vmresident *vr, struct xresident *in, struct thread *td)
81 {
82 	struct stat st;
83 	struct vnode *vrtmp;
84 	int error = 0;
85 
86 	vrtmp = vr->vr_vnode;
87 
88 	in->res_entry_addr = vr->vr_entry_addr;
89 	in->res_id = vr->vr_id;
90 	if (vrtmp) {
91 		char *freepath, *fullpath;
92 		error = vn_fullpath(td->td_proc, vrtmp, &fullpath, &freepath, 0);
93 		if (error != 0) {
94 			/* could not retrieve cached path, return zero'ed string */
95 			bzero(in->res_file, MAXPATHLEN);
96 			error = 0;
97 		} else {
98 			strlcpy(in->res_file, fullpath, sizeof(in->res_file));
99 			kfree(freepath, M_TEMP);
100 		}
101 
102 		/* indicate that we are using the vnode */
103 		error = vget(vrtmp, LK_EXCLUSIVE);
104 		if (error)
105 			goto done;
106 
107 		/* retrieve underlying stat information and release vnode */
108 		error = vn_stat(vrtmp, &st, td->td_ucred);
109 		vput(vrtmp);
110 		if (error)
111 			goto done;
112 
113 		in->res_stat = st;
114 	}
115 
116 done:
117 	if (error)
118 		kprintf("fill_xresident, error = %d\n", error);
119 	return (error);
120 }
121 
122 static int
sysctl_vm_resident(SYSCTL_HANDLER_ARGS)123 sysctl_vm_resident(SYSCTL_HANDLER_ARGS)
124 {
125 	struct vmresident *vmres;
126 	struct thread *td;
127 	int error;
128 	int count;
129 
130 	/* only super-user should call this sysctl */
131 	td = req->td;
132 	error = caps_priv_check_td(td, SYSCAP_NOVM_RESIDENT);
133 	if (error)
134 		return error;
135 
136 	count = 0;
137 
138 	if (exec_res_id == 0)
139 	    return error;
140 
141 	/* client queried for number of resident binaries */
142 	if (!req->oldptr)
143 	    return SYSCTL_OUT(req, 0, exec_res_id);
144 
145 	lockmgr(&exec_list_lock, LK_SHARED);
146 
147 	TAILQ_FOREACH(vmres, &exec_res_list, vr_link) {
148 		struct xresident xres;
149 		error = fill_xresident(vmres, &xres, td);
150 		if (error != 0)
151 			break;
152 
153 		error = SYSCTL_OUT(req, (void *)&xres,
154 				sizeof(struct xresident));
155 		if (error != 0)
156 			break;
157 	}
158 	lockmgr(&exec_list_lock, LK_RELEASE);
159 
160 	return (error);
161 }
162 SYSCTL_PROC(_vm, OID_AUTO, resident, CTLTYPE_OPAQUE|CTLFLAG_RD, 0, 0,
163   sysctl_vm_resident, "S,xresident", "resident executables (sys/resident.h)");
164 
165 int
exec_resident_imgact(struct image_params * imgp)166 exec_resident_imgact(struct image_params *imgp)
167 {
168 	struct vmresident *vmres;
169 
170 	/*
171 	 * resident image activator
172 	 */
173 	lockmgr(&exec_list_lock, LK_SHARED);
174 	if ((vmres = imgp->vp->v_resident) == NULL) {
175 	    lockmgr(&exec_list_lock, LK_RELEASE);
176 	    return(-1);
177 	}
178 	atomic_add_int(&vmres->vr_refs, 1);
179 	lockmgr(&exec_list_lock, LK_RELEASE);
180 
181 	/*
182 	 * We want to exec the new vmspace without holding the lock to
183 	 * improve concurrency.
184 	 */
185 	exec_new_vmspace(imgp, vmres->vr_vmspace);
186 	imgp->resident = 1;
187 	imgp->interpreted = 0;
188 	imgp->proc->p_sysent = vmres->vr_sysent;
189 	imgp->entry_addr = vmres->vr_entry_addr;
190 	atomic_subtract_int(&vmres->vr_refs, 1);
191 
192 	return(0);
193 }
194 
195 /*
196  * exec_sys_register(entry)
197  *
198  * Register ourselves for resident execution.  Only root (i.e. a process with
199  * PRIV_VM_RESIDENT credentials) can do this.  This
200  * will snapshot the vmspace and cause future exec's of the specified binary
201  * to use the snapshot directly rather then load & relocate a new copy.
202  *
203  * MPALMOSTSAFE
204  */
205 int
sys_exec_sys_register(struct sysmsg * sysmsg,const struct exec_sys_register_args * uap)206 sys_exec_sys_register(struct sysmsg *sysmsg,
207 		      const struct exec_sys_register_args *uap)
208 {
209     struct thread *td = curthread;
210     struct vmresident *vmres;
211     struct vnode *vp;
212     struct proc *p;
213     int error;
214 
215     p = td->td_proc;
216     error = caps_priv_check_td(td, SYSCAP_NOVM_RESIDENT);
217     if (error)
218 	return(error);
219 
220     if ((vp = p->p_textvp) == NULL)
221 	return(ENOENT);
222 
223     lockmgr(&exec_list_lock, LK_EXCLUSIVE);
224 
225     if (vp->v_resident) {
226 	lockmgr(&exec_list_lock, LK_RELEASE);
227 	return(EEXIST);
228     }
229 
230     vhold(vp);
231     vmres = kmalloc(sizeof(*vmres), M_EXEC_RES, M_WAITOK | M_ZERO);
232     vmres->vr_vnode = vp;
233     vmres->vr_sysent = p->p_sysent;
234     vmres->vr_id = ++exec_res_id;
235     vmres->vr_entry_addr = (intptr_t)uap->entry;
236     vmres->vr_vmspace = vmspace_fork(p->p_vmspace, NULL, NULL); /* XXX order */
237     pmap_pinit2(vmspace_pmap(vmres->vr_vmspace));
238     vp->v_resident = vmres;
239 
240     TAILQ_INSERT_TAIL(&exec_res_list, vmres, vr_link);
241     lockmgr(&exec_list_lock, LK_RELEASE);
242 
243     return(0);
244 }
245 
246 /*
247  * exec_sys_unregister(id)
248  *
249  *	Unregister the specified id.  If an id of -1 is used unregister
250  *	the registration associated with the current process.  An id of -2
251  *	unregisters everything.
252  *
253  * MPALMOSTSAFE
254  */
255 int
sys_exec_sys_unregister(struct sysmsg * sysmsg,const struct exec_sys_unregister_args * uap)256 sys_exec_sys_unregister(struct sysmsg *sysmsg,
257 			const struct exec_sys_unregister_args *uap)
258 {
259     struct thread *td = curthread;
260     struct vmresident *vmres;
261     struct proc *p;
262     int error;
263     int id;
264     int count;
265 
266     p = td->td_proc;
267     error = caps_priv_check_td(td, SYSCAP_NOVM_RESIDENT);
268     if (error)
269 	return(error);
270 
271     /*
272      * If id is -1, unregister ourselves
273      */
274     lockmgr(&exec_list_lock, LK_EXCLUSIVE);
275 
276     if ((id = uap->id) == -1 && p->p_textvp && p->p_textvp->v_resident)
277 	id = p->p_textvp->v_resident->vr_id;
278 
279     /*
280      * Look for the registration
281      */
282     error = ENOENT;
283     count = 0;
284 
285 restart:
286     TAILQ_FOREACH(vmres, &exec_res_list, vr_link) {
287 	if (id == -2 || vmres->vr_id == id) {
288 	    /*
289 	     * Check race against exec
290 	     */
291 	    if (vmres->vr_refs) {
292 		lockmgr(&exec_list_lock, LK_RELEASE);
293 		tsleep(vmres, 0, "vmres", 1);
294 		lockmgr(&exec_list_lock, LK_EXCLUSIVE);
295 		goto restart;
296 	    }
297 
298 	    /*
299 	     * Remove it
300 	     */
301 	    TAILQ_REMOVE(&exec_res_list, vmres, vr_link);
302 	    if (vmres->vr_vnode) {
303 		vmres->vr_vnode->v_resident = NULL;
304 		vdrop(vmres->vr_vnode);
305 		vmres->vr_vnode = NULL;
306 	    }
307 	    if (vmres->vr_vmspace) {
308 		vmspace_rel(vmres->vr_vmspace);
309 		vmres->vr_vmspace = NULL;
310 	    }
311 	    kfree(vmres, M_EXEC_RES);
312 	    exec_res_id--;
313 	    error = 0;
314 	    ++count;
315 	    goto restart;
316 	}
317     }
318     lockmgr(&exec_list_lock, LK_RELEASE);
319 
320     if (error == 0)
321 	sysmsg->sysmsg_result = count;
322     return(error);
323 }
324 
325