xref: /original-bsd/sys/tahoe/tahoe/machdep.c (revision 28301386)
1 /*
2  * Copyright (c) 1982,1987,1988 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)machdep.c	7.1 (Berkeley) 05/26/88
7  */
8 
9 #include "param.h"
10 #include "systm.h"
11 #include "dir.h"
12 #include "user.h"
13 #include "kernel.h"
14 #include "map.h"
15 #include "vm.h"
16 #include "proc.h"
17 #include "buf.h"
18 #include "reboot.h"
19 #include "conf.h"
20 #include "inode.h"
21 #include "file.h"
22 #include "text.h"
23 #include "clist.h"
24 #include "callout.h"
25 #include "cmap.h"
26 #include "malloc.h"
27 #include "mbuf.h"
28 #include "msgbuf.h"
29 #include "quota.h"
30 
31 #include "cpu.h"
32 #include "reg.h"
33 #include "pte.h"
34 #include "psl.h"
35 #include "mem.h"
36 #include "mtpr.h"
37 #include "cp.h"
38 
39 #include "../tahoevba/vbavar.h"
40 
41 /*
42  * Declare these as initialized data so we can patch them.
43  */
44 int	nswbuf = 0;
45 #ifdef	NBUF
46 int	nbuf = NBUF;
47 #else
48 int	nbuf = 0;
49 #endif
50 #ifdef	BUFPAGES
51 int	bufpages = BUFPAGES;
52 #else
53 int	bufpages = 0;
54 #endif
55 #include "yc.h"
56 #if NCY > 0
57 #include "../tahoevba/cyreg.h"
58 #endif
59 int	msgbufmapped;		/* set when safe to use msgbuf */
60 
61 /*
62  * Machine-dependent startup code
63  */
64 startup(firstaddr)
65 	int firstaddr;
66 {
67 	register int unixsize;
68 	register unsigned i;
69 	register struct pte *pte;
70 	int mapaddr, j;
71 	register caddr_t v;
72 	int maxbufs, base, residual;
73 
74 	/*
75 	 * Initialize error message buffer (at end of core).
76 	 */
77 	maxmem -= btoc(sizeof (struct msgbuf));
78 	pte = msgbufmap;
79 	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
80 		*(int *)pte++ = PG_V | PG_KW | (maxmem + i);
81 	mtpr(TBIA, 1);
82 	msgbufmapped = 1;
83 #ifdef KADB
84 	kdb_init();			/* startup kernel debugger */
85 #endif
86 	/*
87 	 * Good {morning,afternoon,evening,night}.
88 	 */
89 	printf(version);
90 	printf("real mem  = %d\n", ctob(physmem));
91 
92 	/*
93 	 * Allocate space for system data structures.
94 	 * The first available real memory address is in "firstaddr".
95 	 * The first available kernel virtual address is in "v".
96 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
97 	 * As pages of memory are allocated and cleared,
98 	 * "firstaddr" is incremented.
99 	 * An index into the kernel page table corresponding to the
100 	 * virtual memory address maintained in "v" is kept in "mapaddr".
101 	 */
102 	v = (caddr_t)(0xc0000000 | (firstaddr * NBPG));
103 #define	valloc(name, type, num) \
104 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
105 #define	valloclim(name, type, num, lim) \
106 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
107 #if NCY > 0
108 	/*
109 	 * Allocate raw buffers for tapemaster controllers
110 	 * first, as they need buffers in the first megabyte.
111 	 */
112 	valloc(cybuf, char, NCY * CYMAXIO);
113 #endif
114 	valloclim(inode, struct inode, ninode, inodeNINODE);
115 	valloclim(file, struct file, nfile, fileNFILE);
116 	valloclim(proc, struct proc, nproc, procNPROC);
117 	valloclim(text, struct text, ntext, textNTEXT);
118 	valloc(cfree, struct cblock, nclist);
119 	valloc(callout, struct callout, ncallout);
120 	valloc(swapmap, struct map, nswapmap = nproc * 2);
121 	valloc(argmap, struct map, ARGMAPSIZE);
122 	valloc(kernelmap, struct map, nproc);
123 	valloc(mbmap, struct map, nmbclusters/4);
124 	valloc(namecache, struct namecache, nchsize);
125 	valloc(kmemmap, struct map, ekmempt - kmempt);
126 	valloc(kmemusage, struct kmemusage, ekmempt - kmempt);
127 #ifdef QUOTA
128 	valloclim(quota, struct quota, nquota, quotaNQUOTA);
129 	valloclim(dquot, struct dquot, ndquot, dquotNDQUOT);
130 #endif
131 
132 	/*
133 	 * Determine how many buffers to allocate.
134 	 * Use 10% of memory for the first 2 Meg, 5% of the remaining
135 	 * memory. Insure a minimum of 16 buffers.
136 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
137 	 */
138 	if (bufpages == 0)
139 		if (physmem < (2 * 1024 * 1024))
140 			bufpages = physmem / 10 / CLSIZE;
141 		else
142 			bufpages = ((2 * 1024 * 1024 + physmem) / 20) / CLSIZE;
143 	if (nbuf == 0) {
144 		nbuf = bufpages / 2;
145 		if (nbuf < 16)
146 			nbuf = 16;
147 	}
148 	if (nswbuf == 0) {
149 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
150 		if (nswbuf > 256)
151 			nswbuf = 256;		/* sanity */
152 	}
153 	valloc(swbuf, struct buf, nswbuf);
154 
155 	/*
156 	 * Now the amount of virtual memory remaining for buffers
157 	 * can be calculated, estimating needs for the cmap.
158 	 */
159 	ncmap = (maxmem*NBPG - ((int)v &~ 0xc0000000)) /
160 		(CLBYTES + sizeof(struct cmap)) + 2;
161 	maxbufs = ((SYSPTSIZE * NBPG) -
162 	    ((int)(v + ncmap * sizeof(struct cmap)) - 0xc0000000)) /
163 		(MAXBSIZE + sizeof(struct buf));
164 	if (maxbufs < 16)
165 		panic("sys pt too small");
166 	if (nbuf > maxbufs) {
167 		printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs);
168 		nbuf = maxbufs;
169 	}
170 	if (bufpages > nbuf * (MAXBSIZE / CLBYTES))
171 		bufpages = nbuf * (MAXBSIZE / CLBYTES);
172 	valloc(buf, struct buf, nbuf);
173 
174 	/*
175 	 * Allocate space for core map.
176 	 * Allow space for all of phsical memory minus the amount
177 	 * dedicated to the system. The amount of physical memory
178 	 * dedicated to the system is the total virtual memory of
179 	 * the system thus far, plus core map, buffer pages,
180 	 * and buffer headers not yet allocated.
181 	 * Add 2: 1 because the 0th entry is unused, 1 for rounding.
182 	 */
183 	ncmap = (maxmem*NBPG - ((int)(v + bufpages*CLBYTES) &~ 0xc0000000)) /
184 		(CLBYTES + sizeof(struct cmap)) + 2;
185 	valloclim(cmap, struct cmap, ncmap, ecmap);
186 
187 	/*
188 	 * Clear space allocated thus far, and make r/w entries
189 	 * for the space in the kernel map.
190 	 */
191 	unixsize = btoc((int)v &~ 0xc0000000);
192 	while (firstaddr < unixsize) {
193 		*(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | firstaddr;
194 		clearseg((unsigned)firstaddr);
195 		firstaddr++;
196 	}
197 
198 	/*
199 	 * Now allocate buffers proper.  They are different than the above
200 	 * in that they usually occupy more virtual memory than physical.
201 	 */
202 	v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET);
203 	valloc(buffers, char, MAXBSIZE * nbuf);
204 	base = bufpages / nbuf;
205 	residual = bufpages % nbuf;
206 	mapaddr = firstaddr;
207 	for (i = 0; i < residual; i++) {
208 		for (j = 0; j < (base + 1) * CLSIZE; j++) {
209 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
210 			clearseg((unsigned)firstaddr);
211 			firstaddr++;
212 		}
213 		mapaddr += MAXBSIZE / NBPG;
214 	}
215 	for (i = residual; i < nbuf; i++) {
216 		for (j = 0; j < base * CLSIZE; j++) {
217 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
218 			clearseg((unsigned)firstaddr);
219 			firstaddr++;
220 		}
221 		mapaddr += MAXBSIZE / NBPG;
222 	}
223 
224 	unixsize = btoc((int)v &~ 0xc0000000);
225 	if (firstaddr >= physmem - 8*UPAGES)
226 		panic("no memory");
227 	mtpr(TBIA, 1);			/* After we just cleared it all! */
228 
229 	/*
230 	 * Initialize callouts
231 	 */
232 	callfree = callout;
233 	for (i = 1; i < ncallout; i++)
234 		callout[i-1].c_next = &callout[i];
235 
236 	/*
237 	 * Initialize memory allocator and swap
238 	 * and user page table maps.
239 	 *
240 	 * THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
241 	 * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
242 	 */
243 	meminit(firstaddr, maxmem);
244 	maxmem = freemem;
245 	printf("avail mem = %d\n", ctob(maxmem));
246 	printf("using %d buffers containing %d bytes of memory\n",
247 		nbuf, bufpages * CLBYTES);
248 	rminit(kernelmap, (long)USRPTSIZE, (long)1,
249 	    "usrpt", nproc);
250 	rminit(mbmap, (long)(nmbclusters * CLSIZE), (long)CLSIZE,
251 	    "mbclusters", nmbclusters/4);
252 	kmeminit();		/* now safe to do malloc/free */
253 	intenable = 1;		/* Enable interrupts from now on */
254 
255 	/*
256 	 * Set up CPU-specific registers, cache, etc.
257 	 */
258 	initcpu();
259 
260 	/*
261 	 * Set up buffers, so they can be used to read disk labels.
262 	 */
263 	bhinit();
264 	binit();
265 
266 	/*
267 	 * Configure the system.
268 	 */
269 	configure();
270 }
271 
272 #ifdef PGINPROF
273 /*
274  * Return the difference (in microseconds)
275  * between the  current time and a previous
276  * time as represented  by the arguments.
277  * If there is a pending clock interrupt
278  * which has not been serviced due to high
279  * ipl, return error code.
280  */
281 /*ARGSUSED*/
282 vmtime(otime, olbolt, oicr)
283 	register int otime, olbolt, oicr;
284 {
285 
286 	return (((time.tv_sec-otime)*60 + lbolt-olbolt)*16667);
287 }
288 #endif
289 
290 /*
291  * Send an interrupt to process.
292  *
293  * Stack is set up to allow sigcode stored
294  * in u. to call routine, followed by kcall
295  * to sigreturn routine below.  After sigreturn
296  * resets the signal mask, the stack, and the
297  * frame pointer, it returns to the user
298  * specified pc, psl.
299  */
300 sendsig(p, sig, mask)
301 	int (*p)(), sig, mask;
302 {
303 	register struct sigcontext *scp;
304 	register int *regs;
305 	register struct sigframe {
306 		int	sf_signum;
307 		int	sf_code;
308 		struct	sigcontext *sf_scp;
309 		int	(*sf_handler)();
310 		int	sf_regs[6];		/* r0-r5 */
311 		struct	sigcontext *sf_scpcopy;
312 	} *fp;
313 	int oonstack;
314 
315 	regs = u.u_ar0;
316 	oonstack = u.u_onstack;
317 	/*
318 	 * Allocate and validate space for the signal handler
319 	 * context. Note that if the stack is in P0 space, the
320 	 * call to grow() is a nop, and the useracc() check
321 	 * will fail if the process has not already allocated
322 	 * the space with a `brk'.
323 	 */
324 	if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) {
325 		scp = (struct sigcontext *)u.u_sigsp - 1;
326 		u.u_onstack = 1;
327 	} else
328 		scp = (struct sigcontext *)regs[SP] - 1;
329 	fp = (struct sigframe *)scp - 1;
330 	if ((int)fp <= USRSTACK - ctob(u.u_ssize))
331 		(void) grow((unsigned)fp);
332 	if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), B_WRITE) == 0) {
333 		/*
334 		 * Process has trashed its stack; give it an illegal
335 		 * instruction to halt it in its tracks.
336 		 */
337 		u.u_signal[SIGILL] = SIG_DFL;
338 		sig = sigmask(SIGILL);
339 		u.u_procp->p_sigignore &= ~sig;
340 		u.u_procp->p_sigcatch &= ~sig;
341 		u.u_procp->p_sigmask &= ~sig;
342 		psignal(u.u_procp, SIGILL);
343 		return;
344 	}
345 	/*
346 	 * Build the argument list for the signal handler.
347 	 */
348 	fp->sf_signum = sig;
349 	if (sig == SIGILL || sig == SIGFPE) {
350 		fp->sf_code = u.u_code;
351 		u.u_code = 0;
352 	} else
353 		fp->sf_code = 0;
354 	fp->sf_scp = scp;
355 	fp->sf_handler = p;
356 	/*
357 	 * Build the callf argument frame to be used to call sigreturn.
358 	 */
359 	fp->sf_scpcopy = scp;
360 	/*
361 	 * Build the signal context to be used by sigreturn.
362 	 */
363 	scp->sc_onstack = oonstack;
364 	scp->sc_mask = mask;
365 	scp->sc_sp = regs[SP];
366 	scp->sc_fp = regs[FP];
367 	scp->sc_pc = regs[PC];
368 	scp->sc_ps = regs[PS];
369 	regs[SP] = (int)fp;
370 	regs[PC] = (int)u.u_pcb.pcb_sigc;
371 }
372 
373 /*
374  * System call to cleanup state after a signal
375  * has been taken.  Reset signal mask and
376  * stack state from context left by sendsig (above).
377  * Return to previous pc and psl as specified by
378  * context left by sendsig. Check carefully to
379  * make sure that the user has not modified the
380  * psl to gain improper priviledges or to cause
381  * a machine fault.
382  */
383 sigreturn()
384 {
385 	struct a {
386 		struct sigcontext *sigcntxp;
387 	};
388 	register struct sigcontext *scp;
389 	register int *regs = u.u_ar0;
390 
391 	scp = ((struct a *)(u.u_ap))->sigcntxp;
392 	if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0) {
393 		u.u_error = EINVAL;
394 		return;
395 	}
396 	if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_IS)) != 0 ||
397 	    (scp->sc_ps & (PSL_PRVMOD|PSL_CURMOD)) != (PSL_PRVMOD|PSL_CURMOD)) {
398 		u.u_error = EINVAL;
399 		return;
400 	}
401 	u.u_eosys = JUSTRETURN;
402 	u.u_onstack = scp->sc_onstack & 01;
403 	u.u_procp->p_sigmask = scp->sc_mask &~
404 	    (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
405 	regs[FP] = scp->sc_fp;
406 	regs[SP] = scp->sc_sp;
407 	regs[PC] = scp->sc_pc;
408 	regs[PS] = scp->sc_ps;
409 }
410 
411 /* XXX - BEGIN 4.2 COMPATIBILITY */
412 /*
413  * Compatibility with 4.2 kcall $139 used by longjmp()
414  */
415 osigcleanup()
416 {
417 	register struct sigcontext *scp;
418 	register int *regs = u.u_ar0;
419 
420 	scp = (struct sigcontext *)fuword((caddr_t)regs[SP]);
421 	if ((int)scp == -1)
422 		return;
423 	if (useracc((caddr_t)scp, 3 * sizeof (int), 0) == 0)
424 		return;
425 	u.u_onstack = scp->sc_onstack & 01;
426 	u.u_procp->p_sigmask = scp->sc_mask &~
427 	    (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
428 	regs[SP] = scp->sc_sp;
429 }
430 /* XXX - END 4.2 COMPATIBILITY */
431 
432 int	waittime = -1;
433 
434 boot(arghowto)
435 	int arghowto;
436 {
437 	register long dummy;		/* r12 is reserved */
438 	register int howto;		/* r11 == how to boot */
439 	register int devtype;		/* r10 == major of root dev */
440 	extern char *panicstr;
441 
442 	howto = arghowto;
443 	if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
444 		register struct buf *bp;
445 		int iter, nbusy;
446 
447 		waittime = 0;
448 		(void) splnet();
449 		printf("syncing disks... ");
450 		/*
451 		 * Release inodes held by texts before update.
452 		 */
453 		if (panicstr == 0)
454 			xumount(NODEV);
455 		update();
456 
457 		for (iter = 0; iter < 20; iter++) {
458 			nbusy = 0;
459 			for (bp = &buf[nbuf]; --bp >= buf; )
460 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
461 					nbusy++;
462 			if (nbusy == 0)
463 				break;
464 			printf("%d ", nbusy);
465 			DELAY(40000 * iter);
466 		}
467 		if (nbusy)
468 			printf("giving up\n");
469 		else
470 			printf("done\n");
471 		DELAY(10000);			/* wait for printf to finish */
472 	}
473 	mtpr(IPL, 0x1f);			/* extreme priority */
474 	devtype = major(rootdev);
475 	*(int *)CPBFLG = howto;
476 	if (howto&RB_HALT) {
477 		printf("halting (in tight loop); hit ~h\n\n");
478 		mtpr(IPL, 0x1f);
479 		for (;;)
480 			;
481 	} else {
482 		if (howto & RB_DUMP) {
483 			doadump();		/* CPBOOT's itsself */
484 			/*NOTREACHED*/
485 		}
486 		tocons(CPBOOT);
487 	}
488 #ifdef lint
489 	dummy = 0; dummy = dummy;
490 	printf("howto %d, devtype %d\n", arghowto, devtype);
491 #endif
492 	for (;;)
493 		asm("halt");
494 	/*NOTREACHED*/
495 }
496 
497 struct	cpdcb_o cpcontrol;
498 
499 /*
500  * Send the given comand ('c') to the console processor.
501  * Assumed to be one of the last things the OS does before
502  *  halting or rebooting.
503  */
504 tocons(c)
505 {
506 	register timeout;
507 
508 	cpcontrol.cp_hdr.cp_unit = CPUNIT;
509 	cpcontrol.cp_hdr.cp_comm =  (char)c;
510 	if (c != CPBOOT)
511 		cpcontrol.cp_hdr.cp_count = 1;	/* Just for sanity */
512 	else {
513 		cpcontrol.cp_hdr.cp_count = 4;
514 		*(int *)cpcontrol.cp_buf = 0;	/* r11 value for reboot */
515 	}
516 	timeout = 100000;				/* Delay loop */
517 	while (timeout-- && (cnlast->cp_unit&CPDONE) == 0)
518 		uncache(&cnlast->cp_unit);
519 	/* give up, force it to listen */
520 	mtpr(CPMDCB, vtoph((struct proc *)0, (unsigned)&cpcontrol));
521 }
522 
523 #if CLSIZE != 1
524 /*
525  * Invalidate single all pte's in a cluster
526  */
527 tbiscl(v)
528 	unsigned v;
529 {
530 	register caddr_t addr;		/* must be first reg var */
531 	register int i;
532 
533 	addr = ptob(v);
534 	for (i = 0; i < CLSIZE; i++) {
535 		mtpr(TBIS, addr);
536 		addr += NBPG;
537 	}
538 }
539 #endif
540 
541 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
542 int	dumpsize = 0;		/* also for savecore */
543 /*
544  * Doadump comes here after turning off memory management and
545  * getting on the dump stack, either when called above, or by
546  * the auto-restart code.
547  */
548 dumpsys()
549 {
550 
551 	if (dumpdev == NODEV)
552 		return;
553 #ifdef notdef
554 	if ((minor(dumpdev)&07) != 1)
555 		return;
556 #endif
557 	dumpsize = physmem;
558 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
559 	printf("dump ");
560 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
561 
562 	case ENXIO:
563 		printf("device bad\n");
564 		break;
565 
566 	case EFAULT:
567 		printf("device not ready\n");
568 		break;
569 
570 	case EINVAL:
571 		printf("area improper\n");
572 		break;
573 
574 	case EIO:
575 		printf("i/o error\n");
576 		break;
577 
578 	default:
579 		printf("succeeded\n");
580 		break;
581 	}
582 	printf("\n\n");
583 	DELAY(1000);
584 	tocons(CPBOOT);
585 }
586 
587 /*
588  * Bus error 'recovery' code.
589  * Print out the buss frame and then give up.
590  * (More information from special registers can be printed here.)
591  */
592 
593 /*
594  * Frame for bus error
595  */
596 struct buserframe {
597 	int	which_bus;		/* primary or secondary */
598 	int	memerreg;		/* memory error register */
599 	int	trp_pc;			/* trapped pc */
600 	int	trp_psl;		/* trapped psl */
601 };
602 
603 char	*mem_errcd[8] = {
604 	"Unknown error code 0",
605 	"Address parity error",		/* APE */
606 	"Data parity error",		/* DPE */
607 	"Data check error",		/* DCE */
608 	"Versabus timeout",		/* VTO */
609 	"Versabus error",		/* VBE */
610 	"Non-existent memory",		/* NEM */
611 	"Unknown error code 7",
612 };
613 
614 buserror(v)
615 	caddr_t v;
616 {
617 	register struct buserframe *busef = (struct buserframe *)v;
618 	register long reg;
619 
620 	printf("bus error, address %x, psl %x\n",
621 	    busef->trp_pc, busef->trp_psl);
622 	reg =  busef->memerreg;
623 	printf("mear %x %s\n",
624 	    ((reg&MEAR)>>16)&0xffff, mem_errcd[reg & ERRCD]);
625 	if (reg&AXE)
626 		printf("adapter external error\n");
627 	printf("error master: %s\n", reg&ERM ? "versabus" : "tahoe");
628 	if (reg&IVV)
629 		printf("illegal interrupt vector from ipl %d\n", (reg>>2)&7);
630 	reg = busef->which_bus;
631 	printf("mcbr %x versabus type %x\n",
632 	    ((reg&MCBR)>>16)&0xffff, reg & 0xffc3);
633 	if ((busef->memerreg&IVV) == 0)
634 		panic("buserror");
635 }
636 
637 microtime(tvp)
638 	register struct timeval *tvp;
639 {
640 	int s = splhigh();
641 
642 	*tvp = time;
643 	tvp->tv_usec += tick;
644 	while (tvp->tv_usec > 1000000) {
645 		tvp->tv_sec++;
646 		tvp->tv_usec -= 1000000;
647 	}
648 	splx(s);
649 }
650 
651 initcpu()
652 {
653 	register struct proc *p;
654 
655 	p = &proc[0];
656 #define	initkey(which, p, index) \
657     which/**/_cache[index] = 1, which/**/_cnt[index] = 1; \
658     p->p_/**/which = index;
659 	initkey(ckey, p, MAXCKEY);
660 	initkey(dkey, p, MAXDKEY);
661 }
662 
663 /*
664  * Clear registers on exec
665  */
666 setregs(entry)
667 	u_long entry;
668 {
669 
670 #ifdef notdef
671 	/* should pass args to init on the stack */
672 	for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];)
673 		*rp++ = 0;
674 #endif
675 	u.u_ar0[FP] = 0;	/* bottom of the fp chain */
676 	u.u_ar0[PC] = entry + 2;
677 }
678