1 /* $NetBSD: cpu_exec.c,v 1.60 2011/01/16 09:50:44 tsutsui Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by Ralph 8 * Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)machdep.c 8.3 (Berkeley) 1/12/94 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: cpu_exec.c,v 1.60 2011/01/16 09:50:44 tsutsui Exp $"); 39 40 #include "opt_compat_netbsd.h" 41 #include "opt_compat_ultrix.h" 42 #include "opt_execfmt.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/proc.h> 47 #include <sys/malloc.h> 48 #include <sys/vnode.h> 49 #include <sys/exec.h> 50 #include <sys/namei.h> 51 #include <sys/resourcevar.h> 52 53 #include <uvm/uvm_extern.h> 54 55 #include <compat/common/compat_util.h> 56 57 #ifdef EXEC_ECOFF 58 #include <sys/exec_ecoff.h> 59 #endif 60 #include <sys/exec_elf.h> /* mandatory */ 61 #include <machine/reg.h> 62 #include <mips/regnum.h> /* symbolic register indices */ 63 64 int mips_elf_makecmds(struct lwp *, struct exec_package *); 65 66 #ifdef EXEC_ECOFF 67 void 68 cpu_exec_ecoff_setregs(struct lwp *l, struct exec_package *epp, vaddr_t stack) 69 { 70 struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr; 71 struct frame *f = l->l_md.md_regs; 72 73 f->f_regs[_R_GP] = (register_t)execp->a.gp_value; 74 } 75 76 /* 77 * cpu_exec_ecoff_probe() 78 * cpu-dependent ECOFF format hook for execve(). 79 * 80 * Do any machine-dependent diddling of the exec package when doing ECOFF. 81 */ 82 int 83 cpu_exec_ecoff_probe(struct lwp *l, struct exec_package *epp) 84 { 85 86 /* NetBSD/mips does not have native ECOFF binaries. */ 87 return ENOEXEC; 88 } 89 #endif /* EXEC_ECOFF */ 90 91 /* 92 * mips_elf_makecmds (l, epp) 93 * 94 * Test if an executable is a MIPS ELF executable. If it is, 95 * try to load it. 96 */ 97 98 int 99 mips_elf_makecmds (struct lwp *l, struct exec_package *epp) 100 { 101 Elf32_Ehdr *ex = (Elf32_Ehdr *)epp->ep_hdr; 102 Elf32_Phdr ph; 103 int i, error; 104 size_t resid; 105 106 /* Make sure we got enough data to check magic numbers... */ 107 if (epp->ep_hdrvalid < sizeof (Elf32_Ehdr)) { 108 #ifdef DIAGNOSTIC 109 if (epp->ep_hdrlen < sizeof (Elf32_Ehdr)) 110 printf ("mips_elf_makecmds: execsw hdrsize too short!\n"); 111 #endif 112 return ENOEXEC; 113 } 114 115 /* See if it's got the basic elf magic number leadin... */ 116 if (memcmp(ex->e_ident, ELFMAG, SELFMAG) != 0) { 117 return ENOEXEC; 118 } 119 120 /* XXX: Check other magic numbers here. */ 121 if (ex->e_ident[EI_CLASS] != ELFCLASS32) { 122 return ENOEXEC; 123 } 124 125 /* See if we got any program header information... */ 126 if (!ex->e_phoff || !ex->e_phnum) { 127 return ENOEXEC; 128 } 129 130 error = vn_marktext(epp->ep_vp); 131 if (error) 132 return (error); 133 134 /* Set the entry point... */ 135 epp->ep_entry = ex->e_entry; 136 epp->ep_taddr = 0; 137 epp->ep_tsize = 0; 138 epp->ep_daddr = 0; 139 epp->ep_dsize = 0; 140 141 for (i = 0; i < ex->e_phnum; i++) { 142 #ifdef DEBUG 143 /*printf("obsolete elf: mapping %x %x %x\n", resid);*/ 144 #endif 145 if ((error = vn_rdwr(UIO_READ, epp->ep_vp, (void *)&ph, 146 sizeof ph, ex->e_phoff + i * sizeof ph, 147 UIO_SYSSPACE, IO_NODELOCKED, 148 l->l_cred, &resid, NULL)) 149 != 0) 150 return error; 151 152 if (resid != 0) { 153 return ENOEXEC; 154 } 155 156 /* We only care about loadable sections... */ 157 if (ph.p_type == PT_LOAD) { 158 int prot = VM_PROT_READ | VM_PROT_EXECUTE; 159 int residue; 160 unsigned vaddr, offset, length; 161 162 vaddr = ph.p_vaddr; 163 offset = ph.p_offset; 164 length = ph.p_filesz; 165 residue = ph.p_memsz - ph.p_filesz; 166 167 if (ph.p_flags & PF_W) { 168 prot |= VM_PROT_WRITE; 169 if (!epp->ep_daddr || vaddr < epp->ep_daddr) 170 epp->ep_daddr = vaddr; 171 epp->ep_dsize += ph.p_memsz; 172 /* Read the data from the file... */ 173 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, 174 length, vaddr, 175 epp->ep_vp, offset, prot); 176 #ifdef OLD_ELF_DEBUG 177 /*XXX*/ printf( 178 "obsolete elf: NEW_VMCMD len %x va %x off %x prot %x residue %x\n", 179 length, vaddr, offset, prot, residue); 180 #endif /*ELF_DEBUG*/ 181 182 if (residue) { 183 vaddr &= ~(PAGE_SIZE - 1); 184 offset &= ~(PAGE_SIZE - 1); 185 length = roundup (length + ph.p_vaddr 186 - vaddr, PAGE_SIZE); 187 residue = (ph.p_vaddr + ph.p_memsz) 188 - (vaddr + length); 189 } 190 } else { 191 vaddr &= ~(PAGE_SIZE - 1); 192 offset &= ~(PAGE_SIZE - 1); 193 length = roundup (length + ph.p_vaddr - vaddr, 194 PAGE_SIZE); 195 residue = (ph.p_vaddr + ph.p_memsz) 196 - (vaddr + length); 197 if (!epp->ep_taddr || vaddr < epp->ep_taddr) 198 epp->ep_taddr = vaddr; 199 epp->ep_tsize += ph.p_memsz; 200 /* Map the data from the file... */ 201 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, 202 length, vaddr, 203 epp->ep_vp, offset, prot); 204 } 205 /* If part of the segment is just zeros (e.g., bss), 206 map that. */ 207 if (residue > 0) { 208 #ifdef OLD_ELF_DEBUG 209 /*XXX*/ printf( 210 "old elf:resid NEW_VMCMD len %x va %x off %x prot %x residue %x\n", 211 length, vaddr + length, offset, prot, residue); 212 #endif /*ELF_DEBUG*/ 213 214 NEW_VMCMD (&epp->ep_vmcmds, vmcmd_map_zero, 215 residue, vaddr + length, 216 NULLVP, 0, prot); 217 } 218 } 219 } 220 221 epp->ep_maxsaddr = USRSTACK - MAXSSIZ; 222 epp->ep_minsaddr = USRSTACK; 223 epp->ep_ssize = l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur; 224 225 /* 226 * set up commands for stack. note that this takes *two*, one to 227 * map the part of the stack which we can access, and one to map 228 * the part which we can't. 229 * 230 * arguably, it could be made into one, but that would require the 231 * addition of another mapping proc, which is unnecessary 232 * 233 * note that in memory, things assumed to be: 0 ....... ep_maxsaddr 234 * <stack> ep_minsaddr 235 */ 236 NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, 237 ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr), 238 epp->ep_maxsaddr, NULLVP, 0, VM_PROT_NONE, VMCMD_STACK); 239 NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize, 240 (epp->ep_minsaddr - epp->ep_ssize), NULLVP, 0, 241 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, VMCMD_STACK); 242 243 return 0; 244 } 245 246 #if EXEC_ELF32 247 int 248 mips_netbsd_elf32_probe(struct lwp *l, struct exec_package *epp, void *eh0, 249 char *itp, vaddr_t *start_p) 250 { 251 struct proc * const p = l->l_proc; 252 const Elf32_Ehdr * const eh = eh0; 253 int old_abi = p->p_md.md_abi; 254 const char *itp_suffix = NULL; 255 256 /* 257 * Verify we can support the architecture. 258 */ 259 switch (eh->e_flags & EF_MIPS_ARCH) { 260 case EF_MIPS_ARCH_1: 261 break; 262 case EF_MIPS_ARCH_2: 263 if (cpu_arch < CPU_ARCH_MIPS2) 264 return ENOEXEC; 265 break; 266 case EF_MIPS_ARCH_3: 267 if (cpu_arch < CPU_ARCH_MIPS3) 268 return ENOEXEC; 269 break; 270 case EF_MIPS_ARCH_4: 271 if (cpu_arch < CPU_ARCH_MIPS4) 272 return ENOEXEC; 273 break; 274 case EF_MIPS_ARCH_5: 275 if (cpu_arch < CPU_ARCH_MIPS5) 276 return ENOEXEC; 277 break; 278 case EF_MIPS_ARCH_32: 279 case EF_MIPS_ARCH_64: 280 if (!CPUISMIPSNN) 281 return ENOEXEC; 282 break; 283 } 284 285 switch (eh->e_flags & (EF_MIPS_ABI|EF_MIPS_ABI2)) { 286 #if !defined(__mips_o32) 287 case EF_MIPS_ABI2: 288 itp_suffix = "n32"; 289 p->p_md.md_abi = _MIPS_BSD_API_N32; 290 if (old_abi != p->p_md.md_abi) 291 printf("pid %d(%s): ABI set to N32 (e_flags=%#x)\n", p->p_pid, p->p_comm, eh->e_flags); 292 break; 293 #endif 294 #ifdef COMPAT_16 295 case 0: 296 *start_p = ELF32_LINK_ADDR; 297 /* FALLTHROUGH */ 298 #endif 299 case EF_MIPS_ABI_O32: 300 itp_suffix = "o32"; 301 p->p_md.md_abi = _MIPS_BSD_API_O32; 302 if (old_abi != p->p_md.md_abi) 303 printf("pid %d(%s): ABI set to O32 (e_flags=%#x)\n", p->p_pid, p->p_comm, eh->e_flags); 304 break; 305 default: 306 return ENOEXEC; 307 } 308 309 (void)compat_elf_check_interp(epp, itp, itp_suffix); 310 return 0; 311 } 312 313 void 314 coredump_elf32_setup(struct lwp *l, void *eh0) 315 { 316 struct proc * const p = l->l_proc; 317 Elf32_Ehdr * const eh = eh0; 318 319 /* 320 * Mark the type of CPU that the dump happened on. 321 */ 322 if (cpu_arch & CPU_ARCH_MIPS64) { 323 eh->e_flags |= EF_MIPS_ARCH_64; 324 } else if (cpu_arch & CPU_ARCH_MIPS32) { 325 eh->e_flags |= EF_MIPS_ARCH_32; 326 } else if (cpu_arch & CPU_ARCH_MIPS5) { 327 eh->e_flags |= EF_MIPS_ARCH_5; 328 } else if (cpu_arch & CPU_ARCH_MIPS4) { 329 eh->e_flags |= EF_MIPS_ARCH_4; 330 } else if (cpu_arch & CPU_ARCH_MIPS3) { 331 eh->e_flags |= EF_MIPS_ARCH_3; 332 } else if (cpu_arch & CPU_ARCH_MIPS2) { 333 eh->e_flags |= EF_MIPS_ARCH_2; 334 } else { 335 eh->e_flags |= EF_MIPS_ARCH_1; 336 } 337 338 switch (p->p_md.md_abi) { 339 case _MIPS_BSD_API_N32: 340 eh->e_flags |= EF_MIPS_ABI2; 341 break; 342 case _MIPS_BSD_API_O32: 343 eh->e_flags |=EF_MIPS_ABI_O32; 344 break; 345 } 346 } 347 #endif 348 349 #if EXEC_ELF64 350 int 351 mips_netbsd_elf64_probe(struct lwp *l, struct exec_package *epp, void *eh0, 352 char *itp, vaddr_t *start_p) 353 { 354 struct proc * const p = l->l_proc; 355 const Elf64_Ehdr * const eh = eh0; 356 int old_abi = p->p_md.md_abi; 357 const char *itp_suffix = NULL; 358 359 switch (eh->e_flags & EF_MIPS_ARCH) { 360 case EF_MIPS_ARCH_1: 361 return ENOEXEC; 362 case EF_MIPS_ARCH_2: 363 if (cpu_arch < CPU_ARCH_MIPS2) 364 return ENOEXEC; 365 break; 366 case EF_MIPS_ARCH_3: 367 if (cpu_arch < CPU_ARCH_MIPS3) 368 return ENOEXEC; 369 break; 370 case EF_MIPS_ARCH_4: 371 if (cpu_arch < CPU_ARCH_MIPS4) 372 return ENOEXEC; 373 break; 374 case EF_MIPS_ARCH_5: 375 if (cpu_arch < CPU_ARCH_MIPS5) 376 return ENOEXEC; 377 break; 378 case EF_MIPS_ARCH_32: 379 return ENOEXEC; 380 case EF_MIPS_ARCH_64: 381 if (!CPUISMIPS64) 382 return ENOEXEC; 383 break; 384 } 385 386 switch (eh->e_flags & (EF_MIPS_ABI|EF_MIPS_ABI2)) { 387 case 0: 388 itp_suffix = "64"; 389 p->p_md.md_abi = _MIPS_BSD_API_N64; 390 if (old_abi != p->p_md.md_abi) 391 printf("pid %d(%s): ABI set to N64 (e_flags=%#x)\n", p->p_pid, p->p_comm, eh->e_flags); 392 break; 393 case EF_MIPS_ABI_O64: 394 itp_suffix = "o64"; 395 p->p_md.md_abi = _MIPS_BSD_API_O64; 396 if (old_abi != p->p_md.md_abi) 397 printf("pid %d(%s): ABI set to O64 (e_flags=%#x)\n", p->p_pid, p->p_comm, eh->e_flags); 398 break; 399 default: 400 return ENOEXEC; 401 } 402 403 (void)compat_elf_check_interp(epp, itp, itp_suffix); 404 return 0; 405 } 406 407 void 408 coredump_elf64_setup(struct lwp *l, void *eh0) 409 { 410 struct proc * const p = l->l_proc; 411 Elf64_Ehdr * const eh = eh0; 412 413 /* 414 * Mark the type of CPU that the dump happened on. 415 */ 416 if (cpu_arch & CPU_ARCH_MIPS64) { 417 eh->e_flags |= EF_MIPS_ARCH_64; 418 } else if (cpu_arch & CPU_ARCH_MIPS32) { 419 eh->e_flags |= EF_MIPS_ARCH_32; 420 } else if (cpu_arch & CPU_ARCH_MIPS5) { 421 eh->e_flags |= EF_MIPS_ARCH_5; 422 } else if (cpu_arch & CPU_ARCH_MIPS4) { 423 eh->e_flags |= EF_MIPS_ARCH_4; 424 } else if (cpu_arch & CPU_ARCH_MIPS3) { 425 eh->e_flags |= EF_MIPS_ARCH_3; 426 } else if (cpu_arch & CPU_ARCH_MIPS2) { 427 eh->e_flags |= EF_MIPS_ARCH_2; 428 } else { 429 eh->e_flags |= EF_MIPS_ARCH_1; 430 } 431 switch (p->p_md.md_abi) { 432 case _MIPS_BSD_API_N64: 433 eh->e_flags |= EF_MIPS_ABI2; 434 break; 435 case _MIPS_BSD_API_O64: 436 eh->e_flags |= EF_MIPS_ABI_O64; 437 break; 438 } 439 } 440 #endif 441