xref: /netbsd/sys/arch/sh3/sh3/sh3_machdep.c (revision c4a72b64)
1 /*	$NetBSD: sh3_machdep.c,v 1.45 2002/08/25 20:21:42 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997, 1998, 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*-
41  * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
42  * All rights reserved.
43  *
44  * This code is derived from software contributed to Berkeley by
45  * William Jolitz.
46  *
47  * Redistribution and use in source and binary forms, with or without
48  * modification, are permitted provided that the following conditions
49  * are met:
50  * 1. Redistributions of source code must retain the above copyright
51  *    notice, this list of conditions and the following disclaimer.
52  * 2. Redistributions in binary form must reproduce the above copyright
53  *    notice, this list of conditions and the following disclaimer in the
54  *    documentation and/or other materials provided with the distribution.
55  * 3. All advertising materials mentioning features or use of this software
56  *    must display the following acknowledgement:
57  *	This product includes software developed by the University of
58  *	California, Berkeley and its contributors.
59  * 4. Neither the name of the University nor the names of its contributors
60  *    may be used to endorse or promote products derived from this software
61  *    without specific prior written permission.
62  *
63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73  * SUCH DAMAGE.
74  *
75  *	@(#)machdep.c	7.4 (Berkeley) 6/3/91
76  */
77 
78 #include "opt_kgdb.h"
79 #include "opt_memsize.h"
80 #include "opt_compat_netbsd.h"
81 #include "opt_kstack_debug.h"
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 
86 #include <sys/buf.h>
87 #include <sys/exec.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/mount.h>
91 #include <sys/proc.h>
92 #include <sys/signalvar.h>
93 #include <sys/syscallargs.h>
94 #include <sys/user.h>
95 
96 #ifdef KGDB
97 #include <sys/kgdb.h>
98 #ifndef KGDB_DEVNAME
99 #define	KGDB_DEVNAME "nodev"
100 #endif
101 const char kgdb_devname[] = KGDB_DEVNAME;
102 #endif /* KGDB */
103 
104 #include <uvm/uvm_extern.h>
105 
106 #include <sh3/cache.h>
107 #include <sh3/clock.h>
108 #include <sh3/exception.h>
109 #include <sh3/locore.h>
110 #include <sh3/mmu.h>
111 #include <sh3/intr.h>
112 
113 /* Our exported CPU info; we can have only one. */
114 struct cpu_info cpu_info_store;
115 int cpu_arch;
116 int cpu_product;
117 char cpu_model[120];
118 
119 struct vm_map *exec_map;
120 struct vm_map *mb_map;
121 struct vm_map *phys_map;
122 
123 int physmem;
124 struct user *proc0paddr;	/* init_main.c use this. */
125 struct pcb *curpcb;
126 struct md_upte *curupte;	/* SH3 wired u-area hack */
127 
128 #if !defined(IOM_RAM_BEGIN)
129 #error "define IOM_RAM_BEGIN"
130 #elif (IOM_RAM_BEGIN & SH3_P1SEG_BASE) != 0
131 #error "IOM_RAM_BEGIN is physical address. not P1 address."
132 #endif
133 
134 #define	VBR	(u_int8_t *)SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN)
135 vaddr_t ram_start = SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN);
136 /* exception handler holder (sh3/sh3/exception_vector.S) */
137 extern char sh_vector_generic[], sh_vector_generic_end[];
138 extern char sh_vector_interrupt[], sh_vector_interrupt_end[];
139 #ifdef SH3
140 extern char sh3_vector_tlbmiss[], sh3_vector_tlbmiss_end[];
141 #endif
142 #ifdef SH4
143 extern char sh4_vector_tlbmiss[], sh4_vector_tlbmiss_end[];
144 #endif
145 /*
146  * These variables are needed by /sbin/savecore
147  */
148 u_int32_t dumpmag = 0x8fca0101;	/* magic number */
149 int dumpsize;			/* pages */
150 long dumplo;	 		/* blocks */
151 
152 void
153 sh_cpu_init(int arch, int product)
154 {
155 	/* CPU type */
156 	cpu_arch = arch;
157 	cpu_product = product;
158 
159 #if defined(SH3) && defined(SH4)
160 	/* Set register addresses */
161 	sh_devreg_init();
162 #endif
163 	/* Cache access ops. */
164 	sh_cache_init();
165 
166 	/* MMU access ops. */
167 	sh_mmu_init();
168 
169 	/* Hardclock, RTC initialize. */
170 	machine_clock_init();
171 
172 	/* ICU initiailze. */
173 	intc_init();
174 
175 	/* Exception vector. */
176 	memcpy(VBR + 0x100, sh_vector_generic,
177 	    sh_vector_generic_end - sh_vector_generic);
178 #ifdef SH3
179 	if (CPU_IS_SH3)
180 		memcpy(VBR + 0x400, sh3_vector_tlbmiss,
181 		    sh3_vector_tlbmiss_end - sh3_vector_tlbmiss);
182 #endif
183 #ifdef SH4
184 	if (CPU_IS_SH4)
185 		memcpy(VBR + 0x400, sh4_vector_tlbmiss,
186 		    sh4_vector_tlbmiss_end - sh4_vector_tlbmiss);
187 #endif
188 	memcpy(VBR + 0x600, sh_vector_interrupt,
189 	    sh_vector_interrupt_end - sh_vector_interrupt);
190 
191 	if (!SH_HAS_UNIFIED_CACHE)
192 		sh_icache_sync_all();
193 
194 	__asm__ __volatile__("ldc %0, vbr" :: "r"(VBR));
195 
196 	/* kernel stack setup */
197 	__sh_switch_resume = CPU_IS_SH3 ? sh3_switch_resume : sh4_switch_resume;
198 
199 	/* Set page size (4KB) */
200 	uvm_setpagesize();
201 }
202 
203 /*
204  * void sh_proc0_init(void):
205  *	Setup proc0 u-area.
206  */
207 void
208 sh_proc0_init()
209 {
210 	struct switchframe *sf;
211 	vaddr_t u;
212 
213 	/* Steal process0 u-area */
214 	u = uvm_pageboot_alloc(USPACE);
215 	memset((void *)u, 0, USPACE);
216 
217 	/* Setup proc0 */
218 	proc0paddr = (struct user *)u;
219 	proc0.p_addr = proc0paddr;
220 	/*
221 	 * u-area map:
222 	 * |user| .... | ............... |
223 	 * |      NBPG |  USPACE - NBPG  |
224          *        frame top        stack top
225 	 * current frame ... r6_bank
226 	 * stack top     ... r7_bank
227 	 * current stack ... r15
228 	 */
229 	curpcb = proc0.p_md.md_pcb = &proc0.p_addr->u_pcb;
230 	curupte = proc0.p_md.md_upte;
231 
232 	sf = &curpcb->pcb_sf;
233 	sf->sf_r6_bank = u + NBPG;
234 	sf->sf_r7_bank = sf->sf_r15	= u + USPACE;
235 	__asm__ __volatile__("ldc %0, r6_bank" :: "r"(sf->sf_r6_bank));
236 	__asm__ __volatile__("ldc %0, r7_bank" :: "r"(sf->sf_r7_bank));
237 
238 	proc0.p_md.md_regs = (struct trapframe *)sf->sf_r6_bank - 1;
239 #ifdef KSTACK_DEBUG
240 	memset((char *)(u + sizeof(struct user)), 0x5a,
241 	    NBPG - sizeof(struct user));
242 	memset((char *)(u + NBPG), 0xa5, USPACE - NBPG);
243 #endif /* KSTACK_DEBUG */
244 }
245 
246 void
247 sh_startup()
248 {
249 	u_int i, base, residual;
250 	vaddr_t minaddr, maxaddr;
251 	vsize_t size;
252 	char pbuf[9];
253 
254 	printf(version);
255 	if (*cpu_model != '\0')
256 		printf("%s", cpu_model);
257 #ifdef DEBUG
258 	printf("general exception handler:\t%d byte\n",
259 	    sh_vector_generic_end - sh_vector_generic);
260 	printf("TLB miss exception handler:\t%d byte\n",
261 #if defined(SH3) && defined(SH4)
262 	    CPU_IS_SH3 ? sh3_vector_tlbmiss_end - sh3_vector_tlbmiss :
263 	    sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
264 #elif defined(SH3)
265 	    sh3_vector_tlbmiss_end - sh3_vector_tlbmiss
266 #elif defined(SH4)
267 	    sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
268 #endif
269 	    );
270 	printf("interrupt exception handler:\t%d byte\n",
271 	    sh_vector_interrupt_end - sh_vector_interrupt);
272 #endif /* DEBUG */
273 
274 	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
275 	printf("total memory = %s\n", pbuf);
276 
277 	/*
278 	 * Now allocate buffers proper.  They are different than the above
279 	 * in that they usually occupy more virtual memory than physical.
280 	 */
281 	size = MAXBSIZE * nbuf;
282 	buffers = 0;
283 	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
284 	    NULL, UVM_UNKNOWN_OFFSET, 0,
285 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
286 		UVM_ADV_NORMAL, 0)) != 0)
287 		panic("sh3_startup: cannot allocate VM for buffers");
288 	minaddr = (vaddr_t)buffers;
289 	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
290 		/* don't want to alloc more physical mem than needed */
291 		bufpages = btoc(MAXBSIZE) * nbuf;
292 	}
293 
294 	base = bufpages / nbuf;
295 	residual = bufpages % nbuf;
296 	for (i = 0; i < nbuf; i++) {
297 		vsize_t curbufsize;
298 		vaddr_t curbuf;
299 		struct vm_page *pg;
300 
301 		/*
302 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
303 		 * that MAXBSIZE space, we allocate and map (base+1) pages
304 		 * for the first "residual" buffers, and then we allocate
305 		 * "base" pages for the rest.
306 		 */
307 		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
308 		curbufsize = NBPG * ((i < residual) ? (base+1) : base);
309 
310 		while (curbufsize) {
311 			pg = uvm_pagealloc(NULL, 0, NULL, 0);
312 			if (pg == NULL)
313 				panic("sh3_startup: not enough memory for "
314 				    "buffer cache");
315 			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
316 			    VM_PROT_READ|VM_PROT_WRITE);
317 			curbuf += PAGE_SIZE;
318 			curbufsize -= PAGE_SIZE;
319 		}
320 	}
321 	pmap_update(pmap_kernel());
322 
323 	/*
324 	 * Allocate a submap for exec arguments.  This map effectively
325 	 * limits the number of processes exec'ing at any time.
326 	 */
327 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
328 	    16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
329 
330 	/*
331 	 * Allocate a submap for physio
332 	 */
333 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
334 	    VM_PHYS_SIZE, 0, FALSE, NULL);
335 
336 	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
337 	printf("avail memory = %s\n", pbuf);
338 	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
339 	printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
340 
341 	/*
342 	 * Set up buffers, so they can be used to read disk labels.
343 	 */
344 	bufinit();
345 }
346 
347 /*
348  * This is called by main to set dumplo and dumpsize.
349  * Dumps always skip the first CLBYTES of disk space
350  * in case there might be a disk label stored there.
351  * If there is extra space, put dump at the end to
352  * reduce the chance that swapping trashes it.
353  */
354 void
355 cpu_dumpconf()
356 {
357 }
358 
359 void
360 dumpsys()
361 {
362 }
363 
364 /*
365  * Send an interrupt to process.
366  *
367  * Stack is set up to allow sigcode stored
368  * in u. to call routine, followed by kcall
369  * to sigreturn routine below.  After sigreturn
370  * resets the signal mask, the stack, and the
371  * frame pointer, it returns to the user
372  * specified pc, psl.
373  */
374 void
375 sendsig(int sig, sigset_t *mask, u_long code)
376 {
377 	struct proc *p = curproc;
378 	struct sigacts *ps = p->p_sigacts;
379 	struct trapframe *tf;
380 	struct sigframe *fp, frame;
381 	int onstack;
382 	sig_t catcher = SIGACTION(p, sig).sa_handler;
383 
384 	tf = p->p_md.md_regs;
385 
386 	/* Do we need to jump onto the signal stack? */
387 	onstack =
388 	    (p->p_sigctx.ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
389 	    (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
390 
391 	/* Allocate space for the signal handler context. */
392 	if (onstack)
393 		fp = (struct sigframe *)((caddr_t)p->p_sigctx.ps_sigstk.ss_sp +
394 		    p->p_sigctx.ps_sigstk.ss_size);
395 	else
396 		fp = (struct sigframe *)tf->tf_r15;
397 	fp--;
398 
399 	/* Save register context. */
400 	frame.sf_sc.sc_ssr = tf->tf_ssr;
401 	frame.sf_sc.sc_spc = tf->tf_spc;
402 	frame.sf_sc.sc_pr = tf->tf_pr;
403 	frame.sf_sc.sc_r15 = tf->tf_r15;
404 	frame.sf_sc.sc_r14 = tf->tf_r14;
405 	frame.sf_sc.sc_r13 = tf->tf_r13;
406 	frame.sf_sc.sc_r12 = tf->tf_r12;
407 	frame.sf_sc.sc_r11 = tf->tf_r11;
408 	frame.sf_sc.sc_r10 = tf->tf_r10;
409 	frame.sf_sc.sc_r9 = tf->tf_r9;
410 	frame.sf_sc.sc_r8 = tf->tf_r8;
411 	frame.sf_sc.sc_r7 = tf->tf_r7;
412 	frame.sf_sc.sc_r6 = tf->tf_r6;
413 	frame.sf_sc.sc_r5 = tf->tf_r5;
414 	frame.sf_sc.sc_r4 = tf->tf_r4;
415 	frame.sf_sc.sc_r3 = tf->tf_r3;
416 	frame.sf_sc.sc_r2 = tf->tf_r2;
417 	frame.sf_sc.sc_r1 = tf->tf_r1;
418 	frame.sf_sc.sc_r0 = tf->tf_r0;
419 	frame.sf_sc.sc_expevt = tf->tf_expevt;
420 
421 	/* Save signal stack. */
422 	frame.sf_sc.sc_onstack = p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK;
423 
424 	/* Save signal mask. */
425 	frame.sf_sc.sc_mask = *mask;
426 
427 	if (copyout(&frame, fp, sizeof(frame)) != 0) {
428 		/*
429 		 * Process has trashed its stack; give it an illegal
430 		 * instruction to halt it in its tracks.
431 		 */
432 		sigexit(p, SIGILL);
433 		/* NOTREACHED */
434 	}
435 
436 	/*
437 	 * Build context to run handler in.  We invoke the handler
438 	 * directly, only returning via the trampoline.
439 	 */
440 	switch (ps->sa_sigdesc[sig].sd_vers) {
441 #if 1 /* COMPAT_16 */
442 	case 0:		/* legacy on-stack sigtramp */
443 		tf->tf_pr = (int)p->p_sigctx.ps_sigcode;
444 		break;
445 #endif /* COMPAT_16 */
446 
447 	case 1:
448 		tf->tf_pr = (int)ps->sa_sigdesc[sig].sd_tramp;
449 		break;
450 
451 	default:
452 		/* Don't know what trampoline version; kill it. */
453 		sigexit(p, SIGILL);
454 	}
455 
456 	tf->tf_r4 = sig;
457 	tf->tf_r5 = code;
458 	tf->tf_r6 = (int)&fp->sf_sc;
459 	tf->tf_spc = (int)catcher;
460 	tf->tf_r15 = (int)fp;
461 
462 	/* Remember that we're now on the signal stack. */
463 	if (onstack)
464 		p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
465 }
466 
467 /*
468  * System call to cleanup state after a signal
469  * has been taken.  Reset signal mask and
470  * stack state from context left by sendsig (above).
471  * Return to previous pc and psl as specified by
472  * context left by sendsig. Check carefully to
473  * make sure that the user has not modified the
474  * psl to gain improper privileges or to cause
475  * a machine fault.
476  */
477 int
478 sys___sigreturn14(struct proc *p, void *v, register_t *retval)
479 {
480 	struct sys___sigreturn14_args /* {
481 		syscallarg(struct sigcontext *) sigcntxp;
482 	} */ *uap = v;
483 	struct sigcontext *scp, context;
484 	struct trapframe *tf;
485 
486 	/*
487 	 * The trampoline code hands us the context.
488 	 * It is unsafe to keep track of it ourselves, in the event that a
489 	 * program jumps out of a signal handler.
490 	 */
491 	scp = SCARG(uap, sigcntxp);
492 	if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0)
493 		return (EFAULT);
494 
495 	/* Restore signal context. */
496 	tf = p->p_md.md_regs;
497 
498 	/* Check for security violations. */
499 	if (((context.sc_ssr ^ tf->tf_ssr) & PSL_USERSTATIC) != 0)
500 		return (EINVAL);
501 
502 	tf->tf_ssr = context.sc_ssr;
503 
504 	tf->tf_r0 = context.sc_r0;
505 	tf->tf_r1 = context.sc_r1;
506 	tf->tf_r2 = context.sc_r2;
507 	tf->tf_r3 = context.sc_r3;
508 	tf->tf_r4 = context.sc_r4;
509 	tf->tf_r5 = context.sc_r5;
510 	tf->tf_r6 = context.sc_r6;
511 	tf->tf_r7 = context.sc_r7;
512 	tf->tf_r8 = context.sc_r8;
513 	tf->tf_r9 = context.sc_r9;
514 	tf->tf_r10 = context.sc_r10;
515 	tf->tf_r11 = context.sc_r11;
516 	tf->tf_r12 = context.sc_r12;
517 	tf->tf_r13 = context.sc_r13;
518 	tf->tf_r14 = context.sc_r14;
519 	tf->tf_spc = context.sc_spc;
520 	tf->tf_r15 = context.sc_r15;
521 	tf->tf_pr = context.sc_pr;
522 
523 	/* Restore signal stack. */
524 	if (context.sc_onstack & SS_ONSTACK)
525 		p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
526 	else
527 		p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK;
528 	/* Restore signal mask. */
529 	(void) sigprocmask1(p, SIG_SETMASK, &context.sc_mask, 0);
530 
531 	return (EJUSTRETURN);
532 }
533 
534 /*
535  * Clear registers on exec
536  */
537 void
538 setregs(struct proc *p, struct exec_package *pack, u_long stack)
539 {
540 	struct trapframe *tf;
541 
542 	p->p_md.md_flags &= ~MDP_USEDFPU;
543 
544 	tf = p->p_md.md_regs;
545 
546 	tf->tf_r0 = 0;
547 	tf->tf_r1 = 0;
548 	tf->tf_r2 = 0;
549 	tf->tf_r3 = 0;
550 	tf->tf_r4 = fuword((caddr_t)stack);	/* argc */
551 	tf->tf_r5 = stack + 4;			/* argv */
552 	tf->tf_r6 = stack + 4 * tf->tf_r4 + 8;	/* envp */
553 	tf->tf_r7 = 0;
554 	tf->tf_r8 = 0;
555 	tf->tf_r9 = (int)p->p_psstr;
556 	tf->tf_r10 = 0;
557 	tf->tf_r11 = 0;
558 	tf->tf_r12 = 0;
559 	tf->tf_r13 = 0;
560 	tf->tf_r14 = 0;
561 	tf->tf_spc = pack->ep_entry;
562 	tf->tf_ssr = PSL_USERSET;
563 	tf->tf_r15 = stack;
564 }
565 
566 /*
567  * Jump to reset vector.
568  */
569 void
570 cpu_reset()
571 {
572 
573 	_cpu_exception_suspend();
574 	_reg_write_4(SH_(EXPEVT), EXPEVT_RESET_MANUAL);
575 
576 	goto *(u_int32_t *)0xa0000000;
577 	/* NOTREACHED */
578 	while (1)
579 		;
580 }
581