1 /*	$NetBSD: cpu_exec.c,v 1.64 2011/07/10 23:21:58 matt Exp $	*/
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by Ralph
8  * Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)machdep.c	8.3 (Berkeley) 1/12/94
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: cpu_exec.c,v 1.64 2011/07/10 23:21:58 matt Exp $");
39 
40 #include "opt_compat_netbsd.h"
41 #include "opt_compat_ultrix.h"
42 #include "opt_execfmt.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/vnode.h>
49 #include <sys/exec.h>
50 #include <sys/namei.h>
51 #include <sys/resourcevar.h>
52 
53 #include <uvm/uvm_extern.h>
54 
55 #include <compat/common/compat_util.h>
56 
57 #ifdef EXEC_ECOFF
58 #include <sys/exec_ecoff.h>
59 #endif
60 #include <sys/exec_elf.h>			/* mandatory */
61 #include <mips/locore.h>
62 #include <mips/reg.h>
63 #include <mips/regnum.h>			/* symbolic register indices */
64 
65 #include <compat/common/compat_util.h>
66 
67 int	mips_elf_makecmds(struct lwp *, struct exec_package *);
68 
69 #ifdef EXEC_ECOFF
70 void
cpu_exec_ecoff_setregs(struct lwp * l,struct exec_package * epp,vaddr_t stack)71 cpu_exec_ecoff_setregs(struct lwp *l, struct exec_package *epp, vaddr_t stack)
72 {
73 	struct ecoff_exechdr *execp = (struct ecoff_exechdr *)epp->ep_hdr;
74 	struct trapframe *tf = l->l_md.md_utf;
75 
76 	tf->tf_regs[_R_GP] = (register_t)execp->a.gp_value;
77 }
78 
79 /*
80  * cpu_exec_ecoff_probe()
81  *	cpu-dependent ECOFF format hook for execve().
82  *
83  * Do any machine-dependent diddling of the exec package when doing ECOFF.
84  */
85 int
cpu_exec_ecoff_probe(struct lwp * l,struct exec_package * epp)86 cpu_exec_ecoff_probe(struct lwp *l, struct exec_package *epp)
87 {
88 
89 	/* NetBSD/mips does not have native ECOFF binaries. */
90 	return ENOEXEC;
91 }
92 #endif /* EXEC_ECOFF */
93 
94 /*
95  * mips_elf_makecmds (l, epp)
96  *
97  * Test if an executable is a MIPS ELF executable.   If it is,
98  * try to load it.
99  */
100 
101 int
mips_elf_makecmds(struct lwp * l,struct exec_package * epp)102 mips_elf_makecmds(struct lwp *l, struct exec_package *epp)
103 {
104 	Elf32_Ehdr *ex = (Elf32_Ehdr *)epp->ep_hdr;
105 	Elf32_Phdr ph;
106 	int i, error;
107 	size_t resid;
108 
109 	/* Make sure we got enough data to check magic numbers... */
110 	if (epp->ep_hdrvalid < sizeof (Elf32_Ehdr)) {
111 #ifdef DIAGNOSTIC
112 		if (epp->ep_hdrlen < sizeof (Elf32_Ehdr))
113 			printf ("mips_elf_makecmds: execsw hdrsize too short!\n");
114 #endif
115 	    return ENOEXEC;
116 	}
117 
118 	/* See if it's got the basic elf magic number leadin... */
119 	if (memcmp(ex->e_ident, ELFMAG, SELFMAG) != 0) {
120 		return ENOEXEC;
121 	}
122 
123 	/* XXX: Check other magic numbers here. */
124 	if (ex->e_ident[EI_CLASS] != ELFCLASS32) {
125 		return ENOEXEC;
126 	}
127 
128 	/* See if we got any program header information... */
129 	if (!ex->e_phoff || !ex->e_phnum) {
130 		return ENOEXEC;
131 	}
132 
133 	error = vn_marktext(epp->ep_vp);
134 	if (error)
135 		return (error);
136 
137 	/* Set the entry point... */
138 	epp->ep_entry = ex->e_entry;
139 	epp->ep_taddr = 0;
140 	epp->ep_tsize = 0;
141 	epp->ep_daddr = 0;
142 	epp->ep_dsize = 0;
143 
144 	for (i = 0; i < ex->e_phnum; i++) {
145 #ifdef DEBUG
146 		/*printf("obsolete elf: mapping %x %x %x\n", resid);*/
147 #endif
148 		if ((error = vn_rdwr(UIO_READ, epp->ep_vp, (void *)&ph,
149 				    sizeof ph, ex->e_phoff + i * sizeof ph,
150 				    UIO_SYSSPACE, IO_NODELOCKED,
151 				    l->l_cred, &resid, NULL))
152 		    != 0)
153 			return error;
154 
155 		if (resid != 0) {
156 			return ENOEXEC;
157 		}
158 
159 		/* We only care about loadable sections... */
160 		if (ph.p_type == PT_LOAD) {
161 			int prot = VM_PROT_READ | VM_PROT_EXECUTE;
162 			int residue;
163 			unsigned vaddr, offset, length;
164 
165 			vaddr = ph.p_vaddr;
166 			offset = ph.p_offset;
167 			length = ph.p_filesz;
168 			residue = ph.p_memsz - ph.p_filesz;
169 
170 			if (ph.p_flags & PF_W) {
171 				prot |= VM_PROT_WRITE;
172 				if (!epp->ep_daddr || vaddr < epp->ep_daddr)
173 					epp->ep_daddr = vaddr;
174 				epp->ep_dsize += ph.p_memsz;
175 				/* Read the data from the file... */
176 				NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn,
177 					  length, vaddr,
178 					  epp->ep_vp, offset, prot);
179 #ifdef OLD_ELF_DEBUG
180 /*XXX*/		printf(
181 	"obsolete elf: NEW_VMCMD len %x va %x off %x prot %x residue %x\n",
182 			length, vaddr, offset, prot, residue);
183 #endif /*ELF_DEBUG*/
184 
185 				if (residue) {
186 					vaddr &= ~(PAGE_SIZE - 1);
187 					offset &= ~(PAGE_SIZE - 1);
188 					length = roundup (length + ph.p_vaddr
189 							  - vaddr, PAGE_SIZE);
190 					residue = (ph.p_vaddr + ph.p_memsz)
191 						  - (vaddr + length);
192 				}
193 			} else {
194 				vaddr &= ~(PAGE_SIZE - 1);
195 				offset &= ~(PAGE_SIZE - 1);
196 				length = roundup (length + ph.p_vaddr - vaddr,
197 						  PAGE_SIZE);
198 				residue = (ph.p_vaddr + ph.p_memsz)
199 					  - (vaddr + length);
200 				if (!epp->ep_taddr || vaddr < epp->ep_taddr)
201 					epp->ep_taddr = vaddr;
202 				epp->ep_tsize += ph.p_memsz;
203 				/* Map the data from the file... */
204 				NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn,
205 					  length, vaddr,
206 					  epp->ep_vp, offset, prot);
207 			}
208 			/* If part of the segment is just zeros (e.g., bss),
209 			   map that. */
210 			if (residue > 0) {
211 #ifdef OLD_ELF_DEBUG
212 /*XXX*/			printf(
213 	"old elf:resid NEW_VMCMD len %x va %x off %x prot %x residue %x\n",
214 				length, vaddr + length, offset, prot, residue);
215 #endif /*ELF_DEBUG*/
216 
217 				NEW_VMCMD (&epp->ep_vmcmds, vmcmd_map_zero,
218 					   residue, vaddr + length,
219 					   NULLVP, 0, prot);
220 			}
221 		}
222 	}
223 
224 	epp->ep_maxsaddr = USRSTACK - MAXSSIZ;
225 	epp->ep_minsaddr = USRSTACK;
226 	epp->ep_ssize = l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur;
227 
228 	/*
229 	 * set up commands for stack.  note that this takes *two*, one to
230 	 * map the part of the stack which we can access, and one to map
231 	 * the part which we can't.
232 	 *
233 	 * arguably, it could be made into one, but that would require the
234 	 * addition of another mapping proc, which is unnecessary
235 	 *
236 	 * note that in memory, things assumed to be: 0 ....... ep_maxsaddr
237 	 * <stack> ep_minsaddr
238 	 */
239 	NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero,
240 	    ((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr),
241 	    epp->ep_maxsaddr, NULLVP, 0, VM_PROT_NONE, VMCMD_STACK);
242 	NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize,
243 	    (epp->ep_minsaddr - epp->ep_ssize), NULLVP, 0,
244 	    VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, VMCMD_STACK);
245 
246 	return 0;
247 }
248 
249 #if EXEC_ELF32
250 int
mips_netbsd_elf32_probe(struct lwp * l,struct exec_package * epp,void * eh0,char * itp,vaddr_t * start_p)251 mips_netbsd_elf32_probe(struct lwp *l, struct exec_package *epp, void *eh0,
252 	char *itp, vaddr_t *start_p)
253 {
254 	struct proc * const p = l->l_proc;
255 	const Elf32_Ehdr * const eh = eh0;
256 	int old_abi = p->p_md.md_abi;
257 	const char *itp_suffix = NULL;
258 
259 	/*
260 	 * Verify we can support the architecture.
261 	 */
262 	switch (eh->e_flags & EF_MIPS_ARCH) {
263 	case EF_MIPS_ARCH_1:
264 		break;
265 	case EF_MIPS_ARCH_2:
266 		if (mips_options.mips_cpu_arch < CPU_ARCH_MIPS2)
267 			return ENOEXEC;
268 		break;
269 	case EF_MIPS_ARCH_3:
270 		if (mips_options.mips_cpu_arch < CPU_ARCH_MIPS3)
271 			return ENOEXEC;
272 		break;
273 	case EF_MIPS_ARCH_4:
274 		if (mips_options.mips_cpu_arch < CPU_ARCH_MIPS4)
275 			return ENOEXEC;
276 		break;
277 	case EF_MIPS_ARCH_5:
278 		if (mips_options.mips_cpu_arch < CPU_ARCH_MIPS5)
279 			return ENOEXEC;
280 		break;
281 	case EF_MIPS_ARCH_32:
282 	case EF_MIPS_ARCH_64:
283 		if (!CPUISMIPSNN && !CPUISMIPS32R2 && !CPUISMIPS64R2)
284 			return ENOEXEC;
285 		break;
286 	case EF_MIPS_ARCH_32R2:
287 	case EF_MIPS_ARCH_64R2:
288 		if (!CPUISMIPS32R2 && !CPUISMIPS64R2)
289 			return ENOEXEC;
290 		break;
291 	}
292 
293 	switch (eh->e_flags & (EF_MIPS_ABI|EF_MIPS_ABI2)) {
294 #if !defined(__mips_o32)
295 	case EF_MIPS_ABI2:
296 		itp_suffix = "n32";
297 		p->p_md.md_abi = _MIPS_BSD_API_N32;
298 		if (old_abi != p->p_md.md_abi)
299 			printf("pid %d(%s): ABI set to N32 (e_flags=%#x)\n", p->p_pid, p->p_comm, eh->e_flags);
300 		break;
301 #endif
302 #ifdef COMPAT_16
303 	case 0:
304 		*start_p = ELF32_LINK_ADDR;
305 		/* FALLTHROUGH */
306 #endif
307 	case EF_MIPS_ABI_O32:
308 		itp_suffix = "o32";
309 		p->p_md.md_abi = _MIPS_BSD_API_O32;
310 		if (old_abi != p->p_md.md_abi)
311 			printf("pid %d(%s): ABI set to O32 (e_flags=%#x)\n", p->p_pid, p->p_comm, eh->e_flags);
312 		break;
313 	default:
314 		return ENOEXEC;
315 	}
316 
317 	(void)compat_elf_check_interp(epp, itp, itp_suffix);
318 	return 0;
319 }
320 
321 void
coredump_elf32_setup(struct lwp * l,void * eh0)322 coredump_elf32_setup(struct lwp *l, void *eh0)
323 {
324 	struct proc * const p = l->l_proc;
325 	Elf32_Ehdr * const eh = eh0;
326 
327 	/*
328 	 * Mark the type of CPU that the dump happened on.
329 	 */
330 	if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS64R2) {
331 		eh->e_flags |= EF_MIPS_ARCH_64R2;
332 	} else if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS64) {
333 		eh->e_flags |= EF_MIPS_ARCH_64;
334 	} else if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS32R2) {
335 		eh->e_flags |= EF_MIPS_ARCH_32R2;
336 	} else if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS32) {
337 		eh->e_flags |= EF_MIPS_ARCH_32;
338 	} else if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS5) {
339 		eh->e_flags |= EF_MIPS_ARCH_5;
340 	} else if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS4) {
341 		eh->e_flags |= EF_MIPS_ARCH_4;
342 	} else if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS3) {
343 		eh->e_flags |= EF_MIPS_ARCH_3;
344 	} else if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS2) {
345 		eh->e_flags |= EF_MIPS_ARCH_2;
346 	} else {
347 		eh->e_flags |= EF_MIPS_ARCH_1;
348 	}
349 
350 	switch (p->p_md.md_abi) {
351 	case _MIPS_BSD_API_N32:
352 		eh->e_flags |= EF_MIPS_ABI2;
353 		break;
354 	case _MIPS_BSD_API_O32:
355 		eh->e_flags |=EF_MIPS_ABI_O32;
356 		break;
357 	}
358 }
359 #endif
360 
361 #if EXEC_ELF64
362 int
mips_netbsd_elf64_probe(struct lwp * l,struct exec_package * epp,void * eh0,char * itp,vaddr_t * start_p)363 mips_netbsd_elf64_probe(struct lwp *l, struct exec_package *epp, void *eh0,
364 	char *itp, vaddr_t *start_p)
365 {
366 	struct proc * const p = l->l_proc;
367 	const Elf64_Ehdr * const eh = eh0;
368 	int old_abi = p->p_md.md_abi;
369 	const char *itp_suffix = NULL;
370 
371 	switch (eh->e_flags & EF_MIPS_ARCH) {
372 	case EF_MIPS_ARCH_1:
373 		return ENOEXEC;
374 	case EF_MIPS_ARCH_2:
375 		if (mips_options.mips_cpu_arch < CPU_ARCH_MIPS2)
376 			return ENOEXEC;
377 		break;
378 	case EF_MIPS_ARCH_3:
379 		if (mips_options.mips_cpu_arch < CPU_ARCH_MIPS3)
380 			return ENOEXEC;
381 		break;
382 	case EF_MIPS_ARCH_4:
383 		if (mips_options.mips_cpu_arch < CPU_ARCH_MIPS4)
384 			return ENOEXEC;
385 		break;
386 	case EF_MIPS_ARCH_5:
387 		if (mips_options.mips_cpu_arch < CPU_ARCH_MIPS5)
388 			return ENOEXEC;
389 		break;
390 	case EF_MIPS_ARCH_32:
391 	case EF_MIPS_ARCH_32R2:
392 		return ENOEXEC;
393 	case EF_MIPS_ARCH_64:
394 		if (!CPUISMIPS64 && !CPUISMIPS64R2)
395 			return ENOEXEC;
396 		break;
397 	case EF_MIPS_ARCH_64R2:
398 		if (!CPUISMIPS64R2)
399 			return ENOEXEC;
400 		break;
401 	}
402 
403 	switch (eh->e_flags & (EF_MIPS_ABI|EF_MIPS_ABI2)) {
404 	case 0:
405 		itp_suffix = "64";
406 		p->p_md.md_abi = _MIPS_BSD_API_N64;
407 		if (old_abi != p->p_md.md_abi)
408 			printf("pid %d(%s): ABI set to N64 (e_flags=%#x)\n", p->p_pid, p->p_comm, eh->e_flags);
409 		break;
410 	case EF_MIPS_ABI_O64:
411 		itp_suffix = "o64";
412 		p->p_md.md_abi = _MIPS_BSD_API_O64;
413 		if (old_abi != p->p_md.md_abi)
414 			printf("pid %d(%s): ABI set to O64 (e_flags=%#x)\n", p->p_pid, p->p_comm, eh->e_flags);
415 		break;
416 	default:
417 		return ENOEXEC;
418 	}
419 
420 	(void)compat_elf_check_interp(epp, itp, itp_suffix);
421 	return 0;
422 }
423 
424 void
coredump_elf64_setup(struct lwp * l,void * eh0)425 coredump_elf64_setup(struct lwp *l, void *eh0)
426 {
427 	struct proc * const p = l->l_proc;
428 	Elf64_Ehdr * const eh = eh0;
429 
430 	/*
431 	 * Mark the type of CPU that the dump happened on.
432 	 */
433 	if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS64) {
434 		eh->e_flags |= EF_MIPS_ARCH_64;
435 	} else if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS32) {
436 		eh->e_flags |= EF_MIPS_ARCH_32;
437 	} else if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS5) {
438 		eh->e_flags |= EF_MIPS_ARCH_5;
439 	} else if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS4) {
440 		eh->e_flags |= EF_MIPS_ARCH_4;
441 	} else if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS3) {
442 		eh->e_flags |= EF_MIPS_ARCH_3;
443 	} else if (mips_options.mips_cpu_arch & CPU_ARCH_MIPS2) {
444 		eh->e_flags |= EF_MIPS_ARCH_2;
445 	} else {
446 		eh->e_flags |= EF_MIPS_ARCH_1;
447 	}
448 	switch (p->p_md.md_abi) {
449 	case _MIPS_BSD_API_N64:
450 		eh->e_flags |= EF_MIPS_ABI2;
451 		break;
452 	case _MIPS_BSD_API_O64:
453 		eh->e_flags |= EF_MIPS_ABI_O64;
454 		break;
455 	}
456 }
457 #endif
458