1 /* $OpenBSD: sh_machdep.c,v 1.55 2022/10/30 17:43:40 guenther Exp $ */
2 /* $NetBSD: sh3_machdep.c,v 1.59 2006/03/04 01:13:36 uwe Exp $ */
3
4 /*
5 * Copyright (c) 2007 Miodrag Vallat.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice, this permission notice, and the disclaimer below
10 * appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20 /*-
21 * Copyright (c) 1996, 1997, 1998, 2002 The NetBSD Foundation, Inc.
22 * All rights reserved.
23 *
24 * This code is derived from software contributed to The NetBSD Foundation
25 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
26 * Simulation Facility, NASA Ames Research Center.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
38 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
39 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
40 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
41 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
47 * POSSIBILITY OF SUCH DAMAGE.
48 */
49 /*-
50 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
51 * All rights reserved.
52 *
53 * This code is derived from software contributed to Berkeley by
54 * William Jolitz.
55 *
56 * Redistribution and use in source and binary forms, with or without
57 * modification, are permitted provided that the following conditions
58 * are met:
59 * 1. Redistributions of source code must retain the above copyright
60 * notice, this list of conditions and the following disclaimer.
61 * 2. Redistributions in binary form must reproduce the above copyright
62 * notice, this list of conditions and the following disclaimer in the
63 * documentation and/or other materials provided with the distribution.
64 * 3. Neither the name of the University nor the names of its contributors
65 * may be used to endorse or promote products derived from this software
66 * without specific prior written permission.
67 *
68 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
69 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
70 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
71 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
72 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
73 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
74 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
75 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
76 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
77 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
78 * SUCH DAMAGE.
79 *
80 * @(#)machdep.c 7.4 (Berkeley) 6/3/91
81 */
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85
86 #include <sys/buf.h>
87 #include <sys/exec.h>
88 #include <sys/kernel.h>
89 #include <sys/malloc.h>
90 #include <sys/mount.h>
91 #include <sys/proc.h>
92 #include <sys/signalvar.h>
93 #include <sys/syscallargs.h>
94 #include <sys/user.h>
95 #include <sys/sched.h>
96 #include <sys/msg.h>
97 #include <sys/conf.h>
98 #include <sys/kcore.h>
99 #include <sys/reboot.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <dev/cons.h>
104
105 #include <sh/cache.h>
106 #include <sh/clock.h>
107 #include <sh/fpu.h>
108 #include <sh/locore.h>
109 #include <sh/mmu.h>
110 #include <sh/trap.h>
111 #include <sh/intr.h>
112 #include <sh/kcore.h>
113
114 /* Our exported CPU info; we can have only one. */
115 int cpu_arch;
116 int cpu_product;
117 char cpu_model[120];
118
119 struct vm_map *exec_map;
120 struct vm_map *phys_map;
121
122 int physmem;
123 struct user *proc0paddr; /* init_main.c use this. */
124 struct pcb *curpcb;
125 struct md_upte *curupte; /* SH3 wired u-area hack */
126
127 #define VBR (u_int8_t *)SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN)
128 vaddr_t ram_start = SH3_PHYS_TO_P1SEG(IOM_RAM_BEGIN);
129 /* exception handler holder (sh/sh/vectors.S) */
130 extern char sh_vector_generic[], sh_vector_generic_end[];
131 extern char sh_vector_interrupt[], sh_vector_interrupt_end[];
132 #ifdef SH3
133 extern char sh3_vector_tlbmiss[], sh3_vector_tlbmiss_end[];
134 #endif
135 #ifdef SH4
136 extern char sh4_vector_tlbmiss[], sh4_vector_tlbmiss_end[];
137 #endif
138
139 /*
140 * These variables are needed by /sbin/savecore
141 */
142 u_long dumpmag = 0x8fca0101; /* magic number */
143 u_int dumpsize; /* pages */
144 long dumplo; /* blocks */
145 cpu_kcore_hdr_t cpu_kcore_hdr;
146
147 void
sh_cpu_init(int arch,int product)148 sh_cpu_init(int arch, int product)
149 {
150 /* CPU type */
151 cpu_arch = arch;
152 cpu_product = product;
153
154 #if defined(SH3) && defined(SH4)
155 /* Set register addresses */
156 sh_devreg_init();
157 #endif
158 /* Cache access ops. */
159 sh_cache_init();
160
161 /* MMU access ops. */
162 sh_mmu_init();
163
164 /* Hardclock, RTC initialize. */
165 machine_clock_init();
166
167 /* ICU initialize. */
168 intc_init();
169
170 /* Exception vector. */
171 memcpy(VBR + 0x100, sh_vector_generic,
172 sh_vector_generic_end - sh_vector_generic);
173 #ifdef SH3
174 if (CPU_IS_SH3)
175 memcpy(VBR + 0x400, sh3_vector_tlbmiss,
176 sh3_vector_tlbmiss_end - sh3_vector_tlbmiss);
177 #endif
178 #ifdef SH4
179 if (CPU_IS_SH4)
180 memcpy(VBR + 0x400, sh4_vector_tlbmiss,
181 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss);
182 #endif
183 memcpy(VBR + 0x600, sh_vector_interrupt,
184 sh_vector_interrupt_end - sh_vector_interrupt);
185
186 if (!SH_HAS_UNIFIED_CACHE)
187 sh_icache_sync_all();
188
189 __asm volatile("ldc %0, vbr" :: "r"(VBR));
190
191 /* kernel stack setup */
192 __sh_switch_resume = CPU_IS_SH3 ? sh3_switch_resume : sh4_switch_resume;
193
194 /* Set page size (4KB) */
195 uvm_setpagesize();
196 }
197
198 /*
199 * void sh_proc0_init(void):
200 * Setup proc0 u-area.
201 */
202 void
sh_proc0_init(void)203 sh_proc0_init(void)
204 {
205 struct switchframe *sf;
206 vaddr_t u;
207
208 /* Steal process0 u-area */
209 u = uvm_pageboot_alloc(USPACE);
210 memset((void *)u, 0, USPACE);
211
212 /* Setup proc0 */
213 proc0paddr = (struct user *)u;
214 proc0.p_addr = proc0paddr;
215 /*
216 * u-area map:
217 * |user| .... | .................. |
218 * | PAGE_SIZE | USPACE - PAGE_SIZE |
219 * frame top stack top
220 * current frame ... r6_bank
221 * stack top ... r7_bank
222 * current stack ... r15
223 */
224 curpcb = proc0.p_md.md_pcb = &proc0.p_addr->u_pcb;
225 curupte = proc0.p_md.md_upte;
226
227 sf = &curpcb->pcb_sf;
228 sf->sf_r6_bank = u + PAGE_SIZE;
229 sf->sf_r7_bank = sf->sf_r15 = u + USPACE;
230 __asm volatile("ldc %0, r6_bank" :: "r"(sf->sf_r6_bank));
231 __asm volatile("ldc %0, r7_bank" :: "r"(sf->sf_r7_bank));
232
233 proc0.p_md.md_regs = (struct trapframe *)sf->sf_r6_bank - 1;
234 #ifdef KSTACK_DEBUG
235 memset((char *)(u + sizeof(struct user)), 0x5a,
236 PAGE_SIZE - sizeof(struct user));
237 memset((char *)(u + PAGE_SIZE), 0xa5, USPACE - PAGE_SIZE);
238 #endif /* KSTACK_DEBUG */
239 }
240
241 void
sh_startup(void)242 sh_startup(void)
243 {
244 vaddr_t minaddr, maxaddr;
245
246 printf("%s", version);
247
248 #ifdef DEBUG
249 printf("general exception handler:\t%d byte\n",
250 sh_vector_generic_end - sh_vector_generic);
251 printf("TLB miss exception handler:\t%d byte\n",
252 #if defined(SH3) && defined(SH4)
253 CPU_IS_SH3 ? sh3_vector_tlbmiss_end - sh3_vector_tlbmiss :
254 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
255 #elif defined(SH3)
256 sh3_vector_tlbmiss_end - sh3_vector_tlbmiss
257 #elif defined(SH4)
258 sh4_vector_tlbmiss_end - sh4_vector_tlbmiss
259 #endif
260 );
261 printf("interrupt exception handler:\t%d byte\n",
262 sh_vector_interrupt_end - sh_vector_interrupt);
263 #endif /* DEBUG */
264
265 printf("real mem = %lu (%luMB)\n", ptoa(physmem),
266 ptoa(physmem) / 1024 / 1024);
267
268 /*
269 * Allocate a submap for exec arguments. This map effectively
270 * limits the number of processes exec'ing at any time.
271 */
272 minaddr = vm_map_min(kernel_map);
273 exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
274 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
275
276 /*
277 * Allocate a submap for physio
278 */
279 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
280 VM_PHYS_SIZE, 0, FALSE, NULL);
281
282 /*
283 * Set up buffers, so they can be used to read disk labels.
284 */
285 bufinit();
286
287 printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
288 ptoa(uvmexp.free) / 1024 / 1024);
289
290 if (boothowto & RB_CONFIG) {
291 #ifdef BOOT_CONFIG
292 user_config();
293 #else
294 printf("kernel does not support -c; continuing..\n");
295 #endif
296 }
297 }
298
299 void
dumpconf(void)300 dumpconf(void)
301 {
302 cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
303 u_int dumpextra, totaldumpsize; /* in disk blocks */
304 u_int seg, nblks;
305
306 if (dumpdev == NODEV ||
307 (nblks = (bdevsw[major(dumpdev)].d_psize)(dumpdev)) == 0)
308 return;
309 if (nblks <= ctod(1))
310 return;
311
312 dumpsize = 0;
313 for (seg = 0; seg < h->kcore_nsegs; seg++)
314 dumpsize += atop(h->kcore_segs[seg].size);
315 dumpextra = cpu_dumpsize();
316
317 /* Always skip the first block, in case there is a label there. */
318 if (dumplo < btodb(1))
319 dumplo = btodb(1);
320
321 /* Put dump at the end of the partition, and make it fit. */
322 totaldumpsize = ctod(dumpsize) + dumpextra;
323 if (totaldumpsize > nblks - dumplo) {
324 totaldumpsize = dbtob(nblks - dumplo);
325 dumpsize = dtoc(totaldumpsize - dumpextra);
326 }
327 if (dumplo < nblks - totaldumpsize)
328 dumplo = nblks - totaldumpsize;
329 }
330
331 void
dumpsys(void)332 dumpsys(void)
333 {
334 cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
335 daddr_t blkno;
336 int (*dump)(dev_t, daddr_t, caddr_t, size_t);
337 u_int page = 0;
338 paddr_t dumppa;
339 u_int seg;
340 int rc;
341 extern int msgbufmapped;
342
343 /* Don't record dump messages in msgbuf. */
344 msgbufmapped = 0;
345
346 /* Make sure dump settings are valid. */
347 if (dumpdev == NODEV)
348 return;
349 if (dumpsize == 0) {
350 dumpconf();
351 if (dumpsize == 0)
352 return;
353 }
354 if (dumplo <= 0) {
355 printf("\ndump to dev 0x%x not possible, not enough space\n",
356 dumpdev);
357 return;
358 }
359
360 dump = bdevsw[major(dumpdev)].d_dump;
361 blkno = dumplo;
362
363 printf("\ndumping to dev 0x%x offset %ld\n", dumpdev, dumplo);
364
365 printf("dump ");
366
367 /* Write dump header */
368 rc = cpu_dump(dump, &blkno);
369 if (rc != 0)
370 goto bad;
371
372 for (seg = 0; seg < h->kcore_nsegs; seg++) {
373 u_int pagesleft;
374
375 pagesleft = atop(h->kcore_segs[seg].size);
376 dumppa = (paddr_t)h->kcore_segs[seg].start;
377
378 while (pagesleft != 0) {
379 u_int npages;
380
381 #define NPGMB atop(1024 * 1024)
382 if (page != 0 && (page % NPGMB) == 0)
383 printf("%u ", page / NPGMB);
384
385 /* do not dump more than 1MB at once */
386 npages = min(pagesleft, NPGMB);
387 #undef NPGMB
388 npages = min(npages, dumpsize);
389
390 rc = (*dump)(dumpdev, blkno,
391 (caddr_t)SH3_PHYS_TO_P2SEG(dumppa), ptoa(npages));
392 if (rc != 0)
393 goto bad;
394
395 pagesleft -= npages;
396 dumppa += ptoa(npages);
397 page += npages;
398 dumpsize -= npages;
399 if (dumpsize == 0)
400 goto bad; /* if truncated dump */
401 blkno += ctod(npages);
402 }
403 }
404 bad:
405 switch (rc) {
406 case 0:
407 printf("succeeded\n");
408 break;
409 case ENXIO:
410 printf("device bad\n");
411 break;
412 case EFAULT:
413 printf("device not ready\n");
414 break;
415 case EINVAL:
416 printf("area improper\n");
417 break;
418 case EIO:
419 printf("I/O error\n");
420 break;
421 case EINTR:
422 printf("aborted\n");
423 break;
424 default:
425 printf("error %d\n", rc);
426 break;
427 }
428
429 /* make sure console can output our last message */
430 delay(1 * 1000 * 1000);
431 }
432
433 /*
434 * Signal frame.
435 */
436 struct sigframe {
437 #if 0 /* in registers on entry to signal trampoline */
438 int sf_signum; /* r4 - "signum" argument for handler */
439 siginfo_t *sf_sip; /* r5 - "sip" argument for handler */
440 struct sigcontext *sf_ucp; /* r6 - "ucp" argument for handler */
441 #endif
442 struct sigcontext sf_uc; /* actual context */
443 siginfo_t sf_si;
444 };
445
446 /*
447 * Send an interrupt to process.
448 */
449 int
sendsig(sig_t catcher,int sig,sigset_t mask,const siginfo_t * ksip,int info,int onstack)450 sendsig(sig_t catcher, int sig, sigset_t mask, const siginfo_t *ksip,
451 int info, int onstack)
452 {
453 struct proc *p = curproc;
454 struct sigframe *fp, frame;
455 struct trapframe *tf = p->p_md.md_regs;
456 siginfo_t *sip;
457
458 if ((p->p_sigstk.ss_flags & SS_DISABLE) == 0 &&
459 !sigonstack(p->p_md.md_regs->tf_r15) &&
460 onstack)
461 fp = (struct sigframe *)
462 trunc_page((vaddr_t)p->p_sigstk.ss_sp + p->p_sigstk.ss_size);
463 else
464 fp = (void *)p->p_md.md_regs->tf_r15;
465 --fp;
466
467
468 bzero(&frame, sizeof(frame));
469
470 if (info) {
471 frame.sf_si = *ksip;
472 sip = &fp->sf_si;
473 } else
474 sip = NULL;
475
476 /* Save register context. */
477 memcpy(frame.sf_uc.sc_reg, &tf->tf_spc, sizeof(frame.sf_uc.sc_reg));
478 #ifdef SH4
479 if (CPU_IS_SH4)
480 fpu_save((struct fpreg *)&frame.sf_uc.sc_fpreg);
481 #endif
482
483 frame.sf_uc.sc_expevt = tf->tf_expevt;
484 /* frame.sf_uc.sc_err = 0; */
485 frame.sf_uc.sc_mask = mask;
486
487 frame.sf_uc.sc_cookie = (long)&fp->sf_uc ^ p->p_p->ps_sigcookie;
488 if (copyout(&frame, fp, sizeof(frame)) != 0)
489 return 1;
490
491 tf->tf_r4 = sig; /* "signum" argument for handler */
492 tf->tf_r5 = (int)sip; /* "sip" argument for handler */
493 tf->tf_r6 = (int)&fp->sf_uc; /* "ucp" argument for handler */
494 tf->tf_spc = (int)catcher;
495 tf->tf_r15 = (int)fp;
496 tf->tf_pr = (int)p->p_p->ps_sigcode;
497
498 return 0;
499 }
500
501 /*
502 * System call to cleanup state after a signal
503 * has been taken. Reset signal mask and
504 * stack state from context left by sendsig (above).
505 * Return to previous pc and psl as specified by
506 * context left by sendsig. Check carefully to
507 * make sure that the user has not modified the
508 * psl to gain improper privileges or to cause
509 * a machine fault.
510 */
511 int
sys_sigreturn(struct proc * p,void * v,register_t * retval)512 sys_sigreturn(struct proc *p, void *v, register_t *retval)
513 {
514 struct sys_sigreturn_args /* {
515 syscallarg(struct sigcontext *) sigcntxp;
516 } */ *uap = v;
517 struct sigcontext ksc, *scp = SCARG(uap, sigcntxp);
518 struct trapframe *tf;
519 int error;
520
521 if (PROC_PC(p) != p->p_p->ps_sigcoderet) {
522 sigexit(p, SIGILL);
523 return (EPERM);
524 }
525
526 if ((error = copyin(scp, &ksc, sizeof(*scp))) != 0)
527 return (error);
528
529 if (ksc.sc_cookie != ((long)scp ^ p->p_p->ps_sigcookie)) {
530 sigexit(p, SIGILL);
531 return (EFAULT);
532 }
533
534 /* Prevent reuse of the sigcontext cookie */
535 ksc.sc_cookie = 0;
536 (void)copyout(&ksc.sc_cookie, (caddr_t)scp +
537 offsetof(struct sigcontext, sc_cookie), sizeof(ksc.sc_cookie));
538
539 /* Restore signal context. */
540 tf = p->p_md.md_regs;
541
542 /* Check for security violations. */
543 if (((ksc.sc_reg[1] /* ssr */ ^ tf->tf_ssr) & PSL_USERSTATIC) != 0)
544 return (EINVAL);
545
546 memcpy(&tf->tf_spc, ksc.sc_reg, sizeof(ksc.sc_reg));
547
548 #ifdef SH4
549 if (CPU_IS_SH4)
550 fpu_restore((struct fpreg *)&ksc.sc_fpreg);
551 #endif
552
553 /* Restore signal mask. */
554 p->p_sigmask = ksc.sc_mask & ~sigcantmask;
555
556 return (EJUSTRETURN);
557 }
558
559 /*
560 * Clear registers on exec
561 */
562 void
setregs(struct proc * p,struct exec_package * pack,u_long stack,struct ps_strings * arginfo)563 setregs(struct proc *p, struct exec_package *pack, u_long stack,
564 struct ps_strings *arginfo)
565 {
566 struct trapframe *tf;
567 struct pcb *pcb = p->p_md.md_pcb;
568
569 p->p_md.md_flags &= ~MDP_USEDFPU;
570
571 tf = p->p_md.md_regs;
572
573 tf->tf_gbr = 0;
574 tf->tf_macl = 0;
575 tf->tf_mach = 0;
576
577 tf->tf_r0 = 0;
578 tf->tf_r1 = 0;
579 tf->tf_r2 = 0;
580 tf->tf_r3 = 0;
581 tf->tf_r4 = arginfo->ps_nargvstr;
582 tf->tf_r5 = (register_t)arginfo->ps_argvstr;
583 tf->tf_r6 = (register_t)arginfo->ps_envstr;
584 tf->tf_r7 = 0;
585 tf->tf_r8 = 0;
586 tf->tf_r9 = (int)p->p_p->ps_strings;
587 tf->tf_r10 = 0;
588 tf->tf_r11 = 0;
589 tf->tf_r12 = 0;
590 tf->tf_r13 = 0;
591 tf->tf_r14 = 0;
592 tf->tf_spc = pack->ep_entry;
593 tf->tf_ssr = PSL_USERSET;
594 tf->tf_r15 = stack;
595
596 #ifdef SH4
597 if (CPU_IS_SH4) {
598 /*
599 * Clear floating point registers.
600 */
601 bzero(&pcb->pcb_fp, sizeof(pcb->pcb_fp));
602 pcb->pcb_fp.fpr_fpscr = FPSCR_PR;
603 fpu_restore(&pcb->pcb_fp);
604 }
605 #endif
606 }
607
608 /*
609 * Jump to reset vector.
610 */
611 void
cpu_reset(void)612 cpu_reset(void)
613 {
614 _cpu_exception_suspend();
615 _reg_write_4(SH_(EXPEVT), EXPEVT_RESET_MANUAL);
616
617 #ifndef __lint__
618 goto *(void *)0xa0000000;
619 #endif
620 /* NOTREACHED */
621 }
622