1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/sysproto.h> 41 #include <sys/exec.h> 42 #include <sys/imgact.h> 43 #include <sys/imgact_aout.h> 44 #include <sys/mman.h> 45 #include <sys/proc.h> 46 #include <sys/priv.h> 47 #include <sys/resourcevar.h> 48 #include <sys/sysent.h> 49 #include <sys/stat.h> 50 #include <sys/vnode.h> 51 #include <sys/sysctl.h> 52 #include <sys/lock.h> 53 #include <sys/resident.h> 54 55 #include <vm/vm.h> 56 #include <vm/vm_param.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_kern.h> 60 #include <vm/vm_extern.h> 61 62 static int exec_res_id = 0; 63 64 static TAILQ_HEAD(,vmresident) exec_res_list; 65 66 static MALLOC_DEFINE(M_EXEC_RES, "vmresident", "resident execs"); 67 68 /* lockmgr lock for protecting the exec_res_list */ 69 static struct lock exec_list_lock; 70 71 static void 72 vm_resident_init(void *__dummy) 73 { 74 lockinit(&exec_list_lock, "vmres", 0, 0); 75 TAILQ_INIT(&exec_res_list); 76 } 77 SYSINIT(vmres, SI_BOOT1_LOCK, SI_ORDER_ANY, vm_resident_init, 0); 78 79 static int 80 fill_xresident(struct vmresident *vr, struct xresident *in, struct thread *td) 81 { 82 struct stat st; 83 struct vnode *vrtmp; 84 int error = 0; 85 86 vrtmp = vr->vr_vnode; 87 88 in->res_entry_addr = vr->vr_entry_addr; 89 in->res_id = vr->vr_id; 90 if (vrtmp) { 91 char *freepath, *fullpath; 92 error = vn_fullpath(td->td_proc, vrtmp, &fullpath, &freepath, 0); 93 if (error != 0) { 94 /* could not retrieve cached path, return zero'ed string */ 95 bzero(in->res_file, MAXPATHLEN); 96 error = 0; 97 } else { 98 strlcpy(in->res_file, fullpath, sizeof(in->res_file)); 99 kfree(freepath, M_TEMP); 100 } 101 102 /* indicate that we are using the vnode */ 103 error = vget(vrtmp, LK_EXCLUSIVE); 104 if (error) 105 goto done; 106 107 /* retrieve underlying stat information and release vnode */ 108 error = vn_stat(vrtmp, &st, td->td_ucred); 109 vput(vrtmp); 110 if (error) 111 goto done; 112 113 in->res_stat = st; 114 } 115 116 done: 117 if (error) 118 kprintf("fill_xresident, error = %d\n", error); 119 return (error); 120 } 121 122 static int 123 sysctl_vm_resident(SYSCTL_HANDLER_ARGS) 124 { 125 struct vmresident *vmres; 126 struct thread *td; 127 int error; 128 int count; 129 130 /* only super-user should call this sysctl */ 131 td = req->td; 132 if ((priv_check(td, PRIV_VM_RESIDENT)) != 0) 133 return EPERM; 134 135 error = count = 0; 136 137 if (exec_res_id == 0) 138 return error; 139 140 /* client queried for number of resident binaries */ 141 if (!req->oldptr) 142 return SYSCTL_OUT(req, 0, exec_res_id); 143 144 lockmgr(&exec_list_lock, LK_SHARED); 145 146 TAILQ_FOREACH(vmres, &exec_res_list, vr_link) { 147 struct xresident xres; 148 error = fill_xresident(vmres, &xres, td); 149 if (error != 0) 150 break; 151 152 error = SYSCTL_OUT(req, (void *)&xres, 153 sizeof(struct xresident)); 154 if (error != 0) 155 break; 156 } 157 lockmgr(&exec_list_lock, LK_RELEASE); 158 159 return (error); 160 } 161 SYSCTL_PROC(_vm, OID_AUTO, resident, CTLTYPE_OPAQUE|CTLFLAG_RD, 0, 0, 162 sysctl_vm_resident, "S,xresident", "resident executables (sys/resident.h)"); 163 164 int 165 exec_resident_imgact(struct image_params *imgp) 166 { 167 struct vmresident *vmres; 168 169 /* 170 * resident image activator 171 */ 172 lockmgr(&exec_list_lock, LK_SHARED); 173 if ((vmres = imgp->vp->v_resident) == NULL) { 174 lockmgr(&exec_list_lock, LK_RELEASE); 175 return(-1); 176 } 177 atomic_add_int(&vmres->vr_refs, 1); 178 lockmgr(&exec_list_lock, LK_RELEASE); 179 180 /* 181 * We want to exec the new vmspace without holding the lock to 182 * improve concurrency. 183 */ 184 exec_new_vmspace(imgp, vmres->vr_vmspace); 185 imgp->resident = 1; 186 imgp->interpreted = 0; 187 imgp->proc->p_sysent = vmres->vr_sysent; 188 imgp->entry_addr = vmres->vr_entry_addr; 189 atomic_subtract_int(&vmres->vr_refs, 1); 190 191 return(0); 192 } 193 194 /* 195 * exec_sys_register(entry) 196 * 197 * Register ourselves for resident execution. Only root (i.e. a process with 198 * PRIV_VM_RESIDENT credentials) can do this. This 199 * will snapshot the vmspace and cause future exec's of the specified binary 200 * to use the snapshot directly rather then load & relocate a new copy. 201 * 202 * MPALMOSTSAFE 203 */ 204 int 205 sys_exec_sys_register(struct exec_sys_register_args *uap) 206 { 207 struct thread *td = curthread; 208 struct vmresident *vmres; 209 struct vnode *vp; 210 struct proc *p; 211 int error; 212 213 p = td->td_proc; 214 error = priv_check_cred(td->td_ucred, PRIV_VM_RESIDENT, 0); 215 if (error) 216 return(error); 217 218 if ((vp = p->p_textvp) == NULL) 219 return(ENOENT); 220 221 lockmgr(&exec_list_lock, LK_EXCLUSIVE); 222 223 if (vp->v_resident) { 224 lockmgr(&exec_list_lock, LK_RELEASE); 225 return(EEXIST); 226 } 227 228 vhold(vp); 229 vmres = kmalloc(sizeof(*vmres), M_EXEC_RES, M_WAITOK | M_ZERO); 230 vmres->vr_vnode = vp; 231 vmres->vr_sysent = p->p_sysent; 232 vmres->vr_id = ++exec_res_id; 233 vmres->vr_entry_addr = (intptr_t)uap->entry; 234 vmres->vr_vmspace = vmspace_fork(p->p_vmspace); /* XXX order */ 235 pmap_pinit2(vmspace_pmap(vmres->vr_vmspace)); 236 vp->v_resident = vmres; 237 238 TAILQ_INSERT_TAIL(&exec_res_list, vmres, vr_link); 239 lockmgr(&exec_list_lock, LK_RELEASE); 240 241 return(0); 242 } 243 244 /* 245 * exec_sys_unregister(id) 246 * 247 * Unregister the specified id. If an id of -1 is used unregister 248 * the registration associated with the current process. An id of -2 249 * unregisters everything. 250 * 251 * MPALMOSTSAFE 252 */ 253 int 254 sys_exec_sys_unregister(struct exec_sys_unregister_args *uap) 255 { 256 struct thread *td = curthread; 257 struct vmresident *vmres; 258 struct proc *p; 259 int error; 260 int id; 261 int count; 262 263 p = td->td_proc; 264 error = priv_check_cred(td->td_ucred, PRIV_VM_RESIDENT, 0); 265 if (error) 266 return(error); 267 268 /* 269 * If id is -1, unregister ourselves 270 */ 271 lockmgr(&exec_list_lock, LK_EXCLUSIVE); 272 273 if ((id = uap->id) == -1 && p->p_textvp && p->p_textvp->v_resident) 274 id = p->p_textvp->v_resident->vr_id; 275 276 /* 277 * Look for the registration 278 */ 279 error = ENOENT; 280 count = 0; 281 282 restart: 283 TAILQ_FOREACH(vmres, &exec_res_list, vr_link) { 284 if (id == -2 || vmres->vr_id == id) { 285 /* 286 * Check race against exec 287 */ 288 if (vmres->vr_refs) { 289 lockmgr(&exec_list_lock, LK_RELEASE); 290 tsleep(vmres, 0, "vmres", 1); 291 lockmgr(&exec_list_lock, LK_EXCLUSIVE); 292 goto restart; 293 } 294 295 /* 296 * Remove it 297 */ 298 TAILQ_REMOVE(&exec_res_list, vmres, vr_link); 299 if (vmres->vr_vnode) { 300 vmres->vr_vnode->v_resident = NULL; 301 vdrop(vmres->vr_vnode); 302 vmres->vr_vnode = NULL; 303 } 304 if (vmres->vr_vmspace) { 305 vmspace_rel(vmres->vr_vmspace); 306 vmres->vr_vmspace = NULL; 307 } 308 kfree(vmres, M_EXEC_RES); 309 exec_res_id--; 310 error = 0; 311 ++count; 312 goto restart; 313 } 314 } 315 lockmgr(&exec_list_lock, LK_RELEASE); 316 317 if (error == 0) 318 uap->sysmsg_result = count; 319 return(error); 320 } 321 322