xref: /openbsd/sys/arch/sh/sh/trap.c (revision 60a50f65)
1 /*	$OpenBSD: trap.c,v 1.54 2023/02/11 23:07:27 deraadt Exp $	*/
2 /*	$NetBSD: exception.c,v 1.32 2006/09/04 23:57:52 uwe Exp $	*/
3 /*	$NetBSD: syscall.c,v 1.6 2006/03/07 07:21:50 thorpej Exp $	*/
4 
5 /*-
6  * Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
7  * Copyright (c) 1990 The Regents of the University of California.
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * the University of Utah, and William Jolitz.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)trap.c	7.4 (Berkeley) 5/13/91
38  */
39 
40 /*-
41  * Copyright (c) 1995 Charles M. Hannum.  All rights reserved.
42  *
43  * This code is derived from software contributed to Berkeley by
44  * the University of Utah, and William Jolitz.
45  *
46  * Redistribution and use in source and binary forms, with or without
47  * modification, are permitted provided that the following conditions
48  * are met:
49  * 1. Redistributions of source code must retain the above copyright
50  *    notice, this list of conditions and the following disclaimer.
51  * 2. Redistributions in binary form must reproduce the above copyright
52  *    notice, this list of conditions and the following disclaimer in the
53  *    documentation and/or other materials provided with the distribution.
54  * 3. All advertising materials mentioning features or use of this software
55  *    must display the following acknowledgement:
56  *	This product includes software developed by the University of
57  *	California, Berkeley and its contributors.
58  * 4. Neither the name of the University nor the names of its contributors
59  *    may be used to endorse or promote products derived from this software
60  *    without specific prior written permission.
61  *
62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72  * SUCH DAMAGE.
73  *
74  *	@(#)trap.c	7.4 (Berkeley) 5/13/91
75  */
76 
77 /*
78  * SH3 Trap and System call handling
79  *
80  * T.Horiuchi 1998.06.8
81  */
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/proc.h>
86 #include <sys/pool.h>
87 #include <sys/kernel.h>
88 #include <sys/signal.h>
89 #include <sys/resourcevar.h>
90 #include <sys/signalvar.h>
91 #include <uvm/uvm_extern.h>
92 #include <sys/syscall.h>
93 #include <sys/syscall_mi.h>
94 
95 #include <sh/cache.h>
96 #include <sh/cpu.h>
97 #include <sh/mmu.h>
98 #include <sh/pcb.h>
99 #include <sh/trap.h>
100 #ifdef SH4
101 #include <sh/fpu.h>
102 #endif
103 
104 #ifdef DDB
105 #include <machine/db_machdep.h>
106 #endif
107 
108 const char * const exp_type[] = {
109 	NULL,					/* 000 (reset vector) */
110 	NULL,					/* 020 (reset vector) */
111 	"TLB miss/invalid (load)",		/* 040 EXPEVT_TLB_MISS_LD */
112 	"TLB miss/invalid (store)",		/* 060 EXPEVT_TLB_MISS_ST */
113 	"initial page write",			/* 080 EXPEVT_TLB_MOD */
114 	"TLB protection violation (load)",	/* 0a0 EXPEVT_TLB_PROT_LD */
115 	"TLB protection violation (store)",	/* 0c0 EXPEVT_TLB_PROT_ST */
116 	"address error (load)",			/* 0e0 EXPEVT_ADDR_ERR_LD */
117 	"address error (store)",		/* 100 EXPEVT_ADDR_ERR_ST */
118 	"FPU",					/* 120 EXPEVT_FPU */
119 	NULL,					/* 140 (reset vector) */
120 	"unconditional trap (TRAPA)",		/* 160 EXPEVT_TRAPA */
121 	"reserved instruction code exception",	/* 180 EXPEVT_RES_INST */
122 	"illegal slot instruction exception",	/* 1a0 EXPEVT_SLOT_INST */
123 	NULL,					/* 1c0 (external interrupt) */
124 	"user break point trap",		/* 1e0 EXPEVT_BREAK */
125 	NULL, NULL, NULL, NULL,			/* 200-260 */
126 	NULL, NULL, NULL, NULL,			/* 280-2e0 */
127 	NULL, NULL, NULL, NULL,			/* 300-360 */
128 	NULL, NULL, NULL, NULL,			/* 380-3e0 */
129 	NULL, NULL, NULL, NULL,			/* 400-460 */
130 	NULL, NULL, NULL, NULL,			/* 480-4e0 */
131 	NULL, NULL, NULL, NULL,			/* 500-560 */
132 	NULL, NULL, NULL, NULL,			/* 580-5e0 */
133 	NULL, NULL, NULL, NULL,			/* 600-660 */
134 	NULL, NULL, NULL, NULL,			/* 680-6e0 */
135 	NULL, NULL, NULL, NULL,			/* 700-760 */
136 	NULL, NULL, NULL, NULL,			/* 780-7e0 */
137 	"FPU disabled",				/* 800 EXPEVT_FPU_DISABLE */
138 	"slot FPU disabled"			/* 820 EXPEVT_FPU_SLOT_DISABLE */
139 };
140 const int exp_types = sizeof exp_type / sizeof exp_type[0];
141 
142 void general_exception(struct proc *, struct trapframe *, uint32_t);
143 void tlb_exception(struct proc *, struct trapframe *, uint32_t);
144 void ast(struct proc *, struct trapframe *);
145 void syscall(struct proc *, struct trapframe *);
146 void cachectl(struct proc *, struct trapframe *);
147 
148 /*
149  * void general_exception(struct proc *p, struct trapframe *tf):
150  *	p  ... curproc when exception occurred.
151  *	tf ... full user context.
152  *	va ... fault va for user mode EXPEVT_ADDR_ERR_{LD,ST}
153  */
154 void
155 general_exception(struct proc *p, struct trapframe *tf, uint32_t va)
156 {
157 	int expevt = tf->tf_expevt;
158 	int tra = _reg_read_4(SH_(TRA));
159 	int usermode = !KERNELMODE(tf->tf_ssr);
160 	union sigval sv;
161 
162 	uvmexp.traps++;
163 
164 	/*
165 	 * This function is entered at splhigh. Restore the interrupt
166 	 * level to what it was when the trap occurred.
167 	 */
168 	splx(tf->tf_ssr & PSL_IMASK);
169 
170 	if (usermode) {
171 		if (p == NULL)
172 			goto do_panic;
173 		KDASSERT(p->p_md.md_regs == tf); /* check exception depth */
174 		expevt |= EXP_USER;
175 		refreshcreds(p);
176 	}
177 
178 	switch (expevt) {
179 	case EXPEVT_BREAK:
180 #ifdef DDB
181 		if (db_ktrap(EXPEVT_BREAK, 0, tf))
182 			return;
183 		else
184 #endif
185 			goto do_panic;
186 		break;
187 	case EXPEVT_TRAPA:
188 #ifdef DDB
189 		/* Check for ddb request */
190 		if (tra == (_SH_TRA_BREAK << 2) &&
191 		    db_ktrap(expevt, tra, tf))
192 			return;
193 		else
194 #endif
195 			goto do_panic;
196 		break;
197 	case EXPEVT_TRAPA | EXP_USER:
198 		/* Check for debugger break */
199 		switch (tra) {
200 		case _SH_TRA_BREAK << 2:
201 			tf->tf_spc -= 2; /* back to the breakpoint address */
202 			sv.sival_ptr = (void *)tf->tf_spc;
203 			trapsignal(p, SIGTRAP, expevt & ~EXP_USER, TRAP_BRKPT,
204 			    sv);
205 			goto out;
206 		case _SH_TRA_SYSCALL << 2:
207 			syscall(p, tf);
208 			return;
209 		case _SH_TRA_CACHECTL << 2:
210 			cachectl(p, tf);
211 			return;
212 		default:
213 			sv.sival_ptr = (void *)tf->tf_spc;
214 			trapsignal(p, SIGILL, expevt & ~EXP_USER, ILL_ILLTRP,
215 			    sv);
216 			goto out;
217 		}
218 		break;
219 
220 	case EXPEVT_ADDR_ERR_LD: /* FALLTHROUGH */
221 	case EXPEVT_ADDR_ERR_ST:
222 		KDASSERT(p && p->p_md.md_pcb->pcb_onfault != NULL);
223 		if (p == NULL || p->p_md.md_pcb->pcb_onfault == 0)
224 			goto do_panic;
225 		tf->tf_spc = (int)p->p_md.md_pcb->pcb_onfault;
226 		break;
227 
228 	case EXPEVT_ADDR_ERR_LD | EXP_USER: /* FALLTHROUGH */
229 	case EXPEVT_ADDR_ERR_ST | EXP_USER:
230 		sv.sival_ptr = (void *)va;
231 		if (((int)va) < 0)
232 			trapsignal(p, SIGSEGV, expevt & ~EXP_USER, SEGV_ACCERR,
233 			    sv);
234 		else
235 			trapsignal(p, SIGBUS, expevt & ~EXP_USER, BUS_ADRALN,
236 			    sv);
237 		goto out;
238 
239 	case EXPEVT_RES_INST | EXP_USER: /* FALLTHROUGH */
240 	case EXPEVT_SLOT_INST | EXP_USER:
241 		sv.sival_ptr = (void *)tf->tf_spc;
242 		trapsignal(p, SIGILL, expevt & ~EXP_USER, ILL_ILLOPC, sv);
243 		goto out;
244 
245 	case EXPEVT_BREAK | EXP_USER:
246 		sv.sival_ptr = (void *)tf->tf_spc;
247 		trapsignal(p, SIGTRAP, expevt & ~EXP_USER, TRAP_TRACE, sv);
248 		goto out;
249 
250 #ifdef SH4
251 	case EXPEVT_FPU_DISABLE | EXP_USER: /* FALLTHROUGH */
252 	case EXPEVT_FPU_SLOT_DISABLE | EXP_USER:
253 		sv.sival_ptr = (void *)tf->tf_spc;
254 		trapsignal(p, SIGILL, expevt & ~EXP_USER, ILL_COPROC, sv);
255 		goto out;
256 
257 	case EXPEVT_FPU | EXP_USER:
258 	    {
259 		int fpscr, sigi;
260 
261 		/* XXX worth putting in the trapframe? */
262 		__asm__ volatile ("sts fpscr, %0" : "=r" (fpscr));
263 		fpscr = (fpscr & FPSCR_CAUSE_MASK) >> FPSCR_CAUSE_SHIFT;
264 		if (fpscr & FPEXC_E)
265 			sigi = FPE_FLTINV;	/* XXX any better value? */
266 		else if (fpscr & FPEXC_V)
267 			sigi = FPE_FLTINV;
268 		else if (fpscr & FPEXC_Z)
269 			sigi = FPE_FLTDIV;
270 		else if (fpscr & FPEXC_O)
271 			sigi = FPE_FLTOVF;
272 		else if (fpscr & FPEXC_U)
273 			sigi = FPE_FLTUND;
274 		else if (fpscr & FPEXC_I)
275 			sigi = FPE_FLTRES;
276 		else
277 			sigi = 0;	/* shouldn't happen */
278 		sv.sival_ptr = (void *)tf->tf_spc;
279 		trapsignal(p, SIGFPE, expevt & ~EXP_USER, sigi, sv);
280 	    }
281 		goto out;
282 #endif
283 
284 	default:
285 		goto do_panic;
286 	}
287 
288 	if (!usermode)
289 		return;
290 out:
291 	userret(p);
292 	return;
293 
294 do_panic:
295 	if ((expevt >> 5) < exp_types && exp_type[expevt >> 5] != NULL)
296 		printf("fatal %s", exp_type[expevt >> 5]);
297 	else
298 		printf("EXPEVT 0x%03x", expevt);
299 	printf(" in %s mode\n", expevt & EXP_USER ? "user" : "kernel");
300 	printf("va 0x%x spc 0x%x ssr 0x%x pr 0x%x \n",
301 	    va, tf->tf_spc, tf->tf_ssr, tf->tf_pr);
302 
303 	panic("general_exception");
304 	/* NOTREACHED */
305 }
306 
307 
308 /*
309  * void tlb_exception(struct proc *p, struct trapframe *tf, uint32_t va):
310  *	p  ... curproc when exception occurred.
311  *	tf ... full user context.
312  *	va ... fault address.
313  */
314 void
315 tlb_exception(struct proc *p, struct trapframe *tf, uint32_t va)
316 {
317 	struct vm_map *map;
318 	pmap_t pmap;
319 	union sigval sv;
320 	int usermode;
321 	int err, track, access_type;
322 	const char *panic_msg;
323 
324 #define TLB_ASSERT(assert, msg)				\
325 		do {					\
326 			if (!(assert)) {		\
327 				panic_msg =  msg;	\
328 				goto tlb_panic;		\
329 			}				\
330 		} while(/*CONSTCOND*/0)
331 
332 	/*
333 	 * This function is entered at splhigh. Restore the interrupt
334 	 * level to what it was when the trap occurred.
335 	 */
336 	splx(tf->tf_ssr & PSL_IMASK);
337 
338 	usermode = !KERNELMODE(tf->tf_ssr);
339 	if (usermode) {
340 		KDASSERT(p->p_md.md_regs == tf);
341 		refreshcreds(p);
342 	} else {
343 		KDASSERT(p == NULL ||		/* idle */
344 		    p == &proc0 ||		/* kthread */
345 		    p->p_md.md_regs != tf);	/* other */
346 	}
347 
348 	switch (tf->tf_expevt) {
349 	case EXPEVT_TLB_MISS_LD:
350 		track = PG_PMAP_REF;
351 		access_type = PROT_READ;
352 		break;
353 	case EXPEVT_TLB_MISS_ST:
354 		track = PG_PMAP_REF;
355 		access_type = PROT_WRITE;
356 		break;
357 	case EXPEVT_TLB_MOD:
358 		track = PG_PMAP_REF | PG_PMAP_MOD;
359 		access_type = PROT_WRITE;
360 		break;
361 	case EXPEVT_TLB_PROT_LD:
362 		TLB_ASSERT((int)va > 0,
363 		    "kernel virtual protection fault (load)");
364 		if (usermode) {
365 			sv.sival_ptr = (void *)va;
366 			trapsignal(p, SIGSEGV, tf->tf_expevt, SEGV_ACCERR, sv);
367 			goto out;
368 		} else {
369 			TLB_ASSERT(p->p_md.md_pcb->pcb_onfault != NULL,
370 			    "no copyin/out fault handler (load protection)");
371 			tf->tf_spc = (int)p->p_md.md_pcb->pcb_onfault;
372 		}
373 		return;
374 
375 	case EXPEVT_TLB_PROT_ST:
376 		track = 0;	/* call uvm_fault first. (COW) */
377 		access_type = PROT_WRITE;
378 		break;
379 
380 	default:
381 		TLB_ASSERT(0, "impossible expevt");
382 	}
383 
384 	/* Select address space */
385 	if (usermode) {
386 		if (!uvm_map_inentry(p, &p->p_spinentry, PROC_STACK(p),
387 		    "[%s]%d/%d sp=%lx inside %lx-%lx: not MAP_STACK\n",
388 		    uvm_map_inentry_sp, p->p_vmspace->vm_map.sserial))
389 			goto out;
390 
391 		TLB_ASSERT(p != NULL, "no curproc");
392 		map = &p->p_vmspace->vm_map;
393 		pmap = map->pmap;
394 	} else {
395 		if ((int)va < 0) {
396 			map = kernel_map;
397 			pmap = pmap_kernel();
398 		} else {
399 			TLB_ASSERT(p != NULL &&
400 			    p->p_md.md_pcb->pcb_onfault != NULL,
401 			    "invalid user-space access from kernel mode");
402 			if (va == 0) {
403 				tf->tf_spc = (int)p->p_md.md_pcb->pcb_onfault;
404 				return;
405 			}
406 			map = &p->p_vmspace->vm_map;
407 			pmap = map->pmap;
408 		}
409 	}
410 
411 	/* Lookup page table. if entry found, load it. */
412 	if (track && __pmap_pte_load(pmap, va, track)) {
413 		if (usermode)
414 			userret(p);
415 		return;
416 	}
417 
418 	err = uvm_fault(map, va, 0, access_type);
419 	if (usermode && access_type == PROT_READ && err == EACCES) {
420 		access_type = PROT_EXEC;
421 		err = uvm_fault(map, va, 0, access_type);
422 	}
423 
424 	/* User stack extension */
425 	if (err == 0 && map != kernel_map)
426 		uvm_grow(p, va);
427 
428 	/* Page in. load PTE to TLB. */
429 	if (err == 0) {
430 		int loaded = __pmap_pte_load(pmap, va, track);
431 		TLB_ASSERT(loaded, "page table entry not found");
432 		if (usermode)
433 			userret(p);
434 		return;
435 	}
436 
437 	/* Page not found. */
438 	if (usermode) {
439 		sv.sival_ptr = (void *)va;
440 		if (err == ENOMEM) {
441 			printf("UVM: pid %d (%s), uid %d killed: out of swap\n",
442 			    p->p_p->ps_pid, p->p_p->ps_comm,
443 			    p->p_ucred ? (int)p->p_ucred->cr_uid : -1);
444 			trapsignal(p, SIGKILL, tf->tf_expevt, SEGV_MAPERR, sv);
445 		} else
446 			trapsignal(p, SIGSEGV, tf->tf_expevt, SEGV_MAPERR, sv);
447 		goto out;
448 	} else {
449 		TLB_ASSERT(p->p_md.md_pcb->pcb_onfault,
450 		    "no copyin/out fault handler (page not found)");
451 		tf->tf_spc = (int)p->p_md.md_pcb->pcb_onfault;
452 	}
453 	return;
454 
455 out:
456 	userret(p);
457 	ast(p, tf);
458 	return;
459 
460 tlb_panic:
461 	panic("tlb_exception: %s\n"
462 	      "expevt=%x va=%08x ssr=%08x spc=%08x proc=%p onfault=%p",
463 	      panic_msg, tf->tf_expevt, va, tf->tf_ssr, tf->tf_spc,
464 	      p, p ? p->p_md.md_pcb->pcb_onfault : NULL);
465 #undef	TLB_ASSERT
466 }
467 
468 
469 /*
470  * void ast(struct proc *p, struct trapframe *tf):
471  *	p  ... curproc when exception occurred.
472  *	tf ... full user context.
473  *	This is called upon exception return. if return from kernel to user,
474  *	handle asynchronous software traps and context switch if needed.
475  */
476 void
477 ast(struct proc *p, struct trapframe *tf)
478 {
479 	if (KERNELMODE(tf->tf_ssr))
480 		return;
481 	KDASSERT(p != NULL);
482 	KDASSERT(p->p_md.md_regs == tf);
483 
484 	while (p->p_md.md_astpending) {
485 		p->p_md.md_astpending = 0;
486 		refreshcreds(p);
487 		uvmexp.softs++;
488 		mi_ast(p, curcpu()->ci_want_resched);
489 		userret(p);
490 	}
491 }
492 
493 void
494 cachectl(struct proc *p, struct trapframe *tf)
495 {
496 	vaddr_t va;
497 	vsize_t len;
498 
499 	if (!SH_HAS_UNIFIED_CACHE) {
500 		va = (vaddr_t)tf->tf_r4;
501 		len = (vsize_t)tf->tf_r5;
502 
503 		if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS ||
504 		    va + len <= va || va + len >= VM_MAXUSER_ADDRESS)
505 			len = 0;
506 
507 		if (len != 0)
508 			sh_icache_sync_range_index(va, len);
509 	}
510 
511 	userret(p);
512 }
513 
514 void
515 syscall(struct proc *p, struct trapframe *tf)
516 {
517 	caddr_t params;
518 	const struct sysent *callp;
519 	int error, opc, indirect = -1;
520 	int argoff, argsize;
521 	register_t code, args[8], rval[2];
522 
523 	uvmexp.syscalls++;
524 
525 	opc = tf->tf_spc;
526 	code = tf->tf_r0;
527 
528 	params = (caddr_t)tf->tf_r15;
529 
530 	switch (code) {
531 	case SYS_syscall:
532 		/*
533 		 * Code is first argument, followed by actual args.
534 		 */
535 		indirect = code;
536 	        code = tf->tf_r4;
537 		argoff = 1;
538 		break;
539 	default:
540 		argoff = 0;
541 		break;
542 	}
543 
544 	callp = sysent;
545 	if (code < 0 || code >= SYS_MAXSYSCALL)
546 		callp += SYS_syscall;
547 	else
548 		callp += code;
549 	argsize = callp->sy_argsize;
550 #ifdef DIAGNOSTIC
551 	if (argsize > sizeof args) {
552 		callp += SYS_syscall - code;
553 		argsize = callp->sy_argsize;
554 	}
555 #endif
556 
557 	if (argsize) {
558 		register_t *ap;
559 		int off_t_arg;
560 
561 		switch (code) {
562 		default:		off_t_arg = 0;	break;
563 		case SYS_lseek:
564 		case SYS_truncate:
565 		case SYS_ftruncate:	off_t_arg = 1;	break;
566 		case SYS_preadv:
567 		case SYS_pwritev:
568 		case SYS_pread:
569 		case SYS_pwrite:	off_t_arg = 3;	break;
570 		}
571 
572 		ap = args;
573 		switch (argoff) {
574 		case 0:	*ap++ = tf->tf_r4; argsize -= sizeof(int);
575 		case 1:	*ap++ = tf->tf_r5; argsize -= sizeof(int);
576 		case 2: *ap++ = tf->tf_r6; argsize -= sizeof(int);
577 			/*
578 			 * off_t args aren't split between register
579 			 * and stack, but rather r7 is skipped and
580 			 * the entire off_t is on the stack.
581 			 */
582 			if (argoff + off_t_arg == 3)
583 				break;
584 			*ap++ = tf->tf_r7; argsize -= sizeof(int);
585 			break;
586 		}
587 
588 		if (argsize > 0) {
589 			if ((error = copyin(params, ap, argsize)))
590 				goto bad;
591 		}
592 	}
593 
594 	rval[0] = 0;
595 	rval[1] = tf->tf_r1;
596 
597 	error = mi_syscall(p, code, indirect, callp, args, rval);
598 
599 	switch (error) {
600 	case 0:
601 		tf->tf_r0 = rval[0];
602 		tf->tf_r1 = rval[1];
603 		tf->tf_ssr |= PSL_TBIT;	/* T bit */
604 		break;
605 	case ERESTART:
606 		/* 2 = TRAPA instruction size */
607 		tf->tf_spc = opc - 2;
608 		break;
609 	case EJUSTRETURN:
610 		/* nothing to do */
611 		break;
612 	default:
613 	bad:
614 		tf->tf_r0 = error;
615 		tf->tf_ssr &= ~PSL_TBIT;	/* T bit */
616 		break;
617 	}
618 
619 	mi_syscall_return(p, code, error, rval);
620 }
621 
622 /*
623  * void child_return(void *arg):
624  *
625  *	uvm_fork sets this routine to proc_trampoline's service function.
626  *	when returning from here, jump to userland.
627  */
628 void
629 child_return(void *arg)
630 {
631 	struct proc *p = arg;
632 	struct trapframe *tf = p->p_md.md_regs;
633 
634 	tf->tf_r0 = 0;
635 	tf->tf_ssr |= PSL_TBIT; /* This indicates no error. */
636 
637 	mi_child_return(p);
638 }
639 
640