xref: /original-bsd/sys/tahoe/tahoe/machdep.c (revision 33586e34)
1 /*
2  * Copyright (c) 1982,1987,1988 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)machdep.c	7.6 (Berkeley) 09/03/89
7  */
8 
9 #include "param.h"
10 #include "systm.h"
11 #include "user.h"
12 #include "kernel.h"
13 #include "map.h"
14 #include "vm.h"
15 #include "proc.h"
16 #include "buf.h"
17 #include "reboot.h"
18 #include "conf.h"
19 #include "vnode.h"
20 #include "../ufs/inode.h"
21 #ifdef NFS
22 #include "mount.h"
23 #include "../nfs/nfsnode.h"
24 #endif /* NFS */
25 #include "file.h"
26 #include "text.h"
27 #include "clist.h"
28 #include "callout.h"
29 #include "cmap.h"
30 #include "malloc.h"
31 #include "mbuf.h"
32 #include "msgbuf.h"
33 #include "../ufs/quota.h"
34 
35 #include "cpu.h"
36 #include "reg.h"
37 #include "pte.h"
38 #include "psl.h"
39 #include "mem.h"
40 #include "mtpr.h"
41 #include "cp.h"
42 
43 #include "../tahoevba/vbavar.h"
44 
45 /*
46  * Declare these as initialized data so we can patch them.
47  */
48 int	nswbuf = 0;
49 #ifdef	NBUF
50 int	nbuf = NBUF;
51 #else
52 int	nbuf = 0;
53 #endif
54 #ifdef	BUFPAGES
55 int	bufpages = BUFPAGES;
56 #else
57 int	bufpages = 0;
58 #endif
59 #include "yc.h"
60 #if NCY > 0
61 #include "../tahoevba/cyreg.h"
62 #endif
63 int	msgbufmapped;		/* set when safe to use msgbuf */
64 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
65 
66 /*
67  * Machine-dependent startup code
68  */
69 startup(firstaddr)
70 	int firstaddr;
71 {
72 	register int unixsize;
73 	register unsigned i;
74 	register struct pte *pte;
75 	int mapaddr, j;
76 	register caddr_t v;
77 	int maxbufs, base, residual;
78 
79 	/*
80 	 * Initialize error message buffer (at end of core).
81 	 */
82 	maxmem = physmem - btoc(sizeof (struct msgbuf));
83 	pte = msgbufmap;
84 	for (i = 1; i < btoc(sizeof (struct msgbuf)) + 1; i++)
85 		*(int *)pte++ = PG_V | PG_KW | (physmem - i);
86 	mtpr(TBIA, 1);
87 	msgbufmapped = 1;
88 #ifdef KADB
89 	kdb_init();			/* startup kernel debugger */
90 #endif
91 	/*
92 	 * Good {morning,afternoon,evening,night}.
93 	 */
94 	printf(version);
95 	printf("real mem = %d\n", ctob(physmem));
96 
97 	/*
98 	 * Allocate space for system data structures.
99 	 * The first available real memory address is in "firstaddr".
100 	 * The first available kernel virtual address is in "v".
101 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
102 	 * As pages of memory are allocated and cleared,
103 	 * "firstaddr" is incremented.
104 	 * An index into the kernel page table corresponding to the
105 	 * virtual memory address maintained in "v" is kept in "mapaddr".
106 	 */
107 	v = (caddr_t)(0xc0000000 | (firstaddr * NBPG));
108 #define	valloc(name, type, num) \
109 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
110 #define	valloclim(name, type, num, lim) \
111 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
112 #if NCY > 0
113 	/*
114 	 * Allocate raw buffers for tapemaster controllers
115 	 * first, as they need buffers in the first megabyte.
116 	 */
117 	valloc(cybuf, char, NCY * CYMAXIO);
118 #endif
119 	valloclim(inode, struct inode, ninode, inodeNINODE);
120 #ifdef NFS
121 	valloclim(nfsnode, struct nfsnode, nnfsnode, nfsnodeNNFSNODE);
122 #endif /* NFS */
123 	valloclim(file, struct file, nfile, fileNFILE);
124 	valloclim(proc, struct proc, nproc, procNPROC);
125 	valloclim(text, struct text, ntext, textNTEXT);
126 	valloc(cfree, struct cblock, nclist);
127 	valloc(callout, struct callout, ncallout);
128 	valloc(swapmap, struct map, nswapmap = nproc * 2);
129 	valloc(argmap, struct map, ARGMAPSIZE);
130 	valloc(kernelmap, struct map, nproc);
131 	valloc(mbmap, struct map, nmbclusters/4);
132 	valloc(namecache, struct namecache, nchsize);
133 	valloc(kmemmap, struct map, ekmempt - kmempt);
134 	valloc(kmemusage, struct kmemusage, ekmempt - kmempt);
135 #ifdef QUOTA
136 	valloclim(quota, struct quota, nquota, quotaNQUOTA);
137 	valloclim(dquot, struct dquot, ndquot, dquotNDQUOT);
138 #endif
139 
140 	/*
141 	 * Determine how many buffers to allocate.
142 	 * Use 10% of memory for the first 2 Meg, 5% of the remaining
143 	 * memory. Insure a minimum of 16 buffers.
144 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
145 	 */
146 	if (bufpages == 0)
147 		if (physmem < (2 * 1024 * 1024))
148 			bufpages = physmem / 10 / CLSIZE;
149 		else
150 			bufpages = ((2 * 1024 * 1024 + physmem) / 20) / CLSIZE;
151 	if (nbuf == 0) {
152 		nbuf = bufpages / 2;
153 		if (nbuf < 16)
154 			nbuf = 16;
155 	}
156 	if (nswbuf == 0) {
157 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
158 		if (nswbuf > 256)
159 			nswbuf = 256;		/* sanity */
160 	}
161 	valloc(swbuf, struct buf, nswbuf);
162 
163 	/*
164 	 * Now the amount of virtual memory remaining for buffers
165 	 * can be calculated, estimating needs for the cmap.
166 	 */
167 	ncmap = (maxmem*NBPG - ((int)v &~ 0xc0000000)) /
168 		(CLBYTES + sizeof(struct cmap)) + 2;
169 	maxbufs = ((SYSPTSIZE * NBPG) -
170 	    ((int)(v + ncmap * sizeof(struct cmap)) - 0xc0000000)) /
171 		(MAXBSIZE + sizeof(struct buf));
172 	if (maxbufs < 16)
173 		panic("sys pt too small");
174 	if (nbuf > maxbufs) {
175 		printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs);
176 		nbuf = maxbufs;
177 	}
178 	if (bufpages > nbuf * (MAXBSIZE / CLBYTES))
179 		bufpages = nbuf * (MAXBSIZE / CLBYTES);
180 	valloc(buf, struct buf, nbuf);
181 
182 	/*
183 	 * Allocate space for core map.
184 	 * Allow space for all of phsical memory minus the amount
185 	 * dedicated to the system. The amount of physical memory
186 	 * dedicated to the system is the total virtual memory of
187 	 * the system thus far, plus core map, buffer pages,
188 	 * and buffer headers not yet allocated.
189 	 * Add 2: 1 because the 0th entry is unused, 1 for rounding.
190 	 */
191 	ncmap = (maxmem*NBPG - ((int)(v + bufpages*CLBYTES) &~ 0xc0000000)) /
192 		(CLBYTES + sizeof(struct cmap)) + 2;
193 	valloclim(cmap, struct cmap, ncmap, ecmap);
194 
195 	/*
196 	 * Clear space allocated thus far, and make r/w entries
197 	 * for the space in the kernel map.
198 	 */
199 	unixsize = btoc((int)v &~ 0xc0000000);
200 	while (firstaddr < unixsize) {
201 		*(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | firstaddr;
202 		clearseg((unsigned)firstaddr);
203 		firstaddr++;
204 	}
205 
206 	/*
207 	 * Now allocate buffers proper.  They are different than the above
208 	 * in that they usually occupy more virtual memory than physical.
209 	 */
210 	v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET);
211 	valloc(buffers, char, MAXBSIZE * nbuf);
212 	base = bufpages / nbuf;
213 	residual = bufpages % nbuf;
214 	mapaddr = firstaddr;
215 	for (i = 0; i < residual; i++) {
216 		for (j = 0; j < (base + 1) * CLSIZE; j++) {
217 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
218 			clearseg((unsigned)firstaddr);
219 			firstaddr++;
220 		}
221 		mapaddr += MAXBSIZE / NBPG;
222 	}
223 	for (i = residual; i < nbuf; i++) {
224 		for (j = 0; j < base * CLSIZE; j++) {
225 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
226 			clearseg((unsigned)firstaddr);
227 			firstaddr++;
228 		}
229 		mapaddr += MAXBSIZE / NBPG;
230 	}
231 
232 	unixsize = btoc((int)v &~ 0xc0000000);
233 	if (firstaddr >= physmem - 8*UPAGES)
234 		panic("no memory");
235 	mtpr(TBIA, 1);			/* After we just cleared it all! */
236 
237 	/*
238 	 * Initialize callouts
239 	 */
240 	callfree = callout;
241 	for (i = 1; i < ncallout; i++)
242 		callout[i-1].c_next = &callout[i];
243 
244 	/*
245 	 * Initialize memory allocator and swap
246 	 * and user page table maps.
247 	 *
248 	 * THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
249 	 * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
250 	 */
251 	meminit(firstaddr, maxmem);
252 	maxmem = freemem;
253 	printf("avail mem = %d\n", ctob(maxmem));
254 	printf("using %d buffers containing %d bytes of memory\n",
255 		nbuf, bufpages * CLBYTES);
256 	rminit(kernelmap, (long)USRPTSIZE, (long)1,
257 	    "usrpt", nproc);
258 	rminit(mbmap, (long)(nmbclusters * CLSIZE), (long)CLSIZE,
259 	    "mbclusters", nmbclusters/4);
260 	kmeminit();		/* now safe to do malloc/free */
261 	intenable = 1;		/* Enable interrupts from now on */
262 
263 	/*
264 	 * Set up CPU-specific registers, cache, etc.
265 	 */
266 	initcpu();
267 
268 	/*
269 	 * Set up buffers, so they can be used to read disk labels.
270 	 */
271 	bhinit();
272 	binit();
273 
274 	/*
275 	 * Configure the system.
276 	 */
277 	configure();
278 }
279 
280 #ifdef PGINPROF
281 /*
282  * Return the difference (in microseconds)
283  * between the  current time and a previous
284  * time as represented  by the arguments.
285  * If there is a pending clock interrupt
286  * which has not been serviced due to high
287  * ipl, return error code.
288  */
289 /*ARGSUSED*/
290 vmtime(otime, olbolt, oicr)
291 	register int otime, olbolt, oicr;
292 {
293 
294 	return (((time.tv_sec-otime)*60 + lbolt-olbolt)*16667);
295 }
296 #endif
297 
298 /*
299  * Send an interrupt to process.
300  *
301  * Stack is set up to allow sigcode stored
302  * in u. to call routine, followed by kcall
303  * to sigreturn routine below.  After sigreturn
304  * resets the signal mask, the stack, and the
305  * frame pointer, it returns to the user
306  * specified pc, psl.
307  */
308 sendsig(catcher, sig, mask, code)
309 	sig_t catcher;
310 	int sig, mask;
311 	unsigned code;
312 {
313 	register struct sigcontext *scp;
314 	register struct proc *p = u.u_procp;
315 	register int *regs;
316 	register struct sigframe {
317 		int	sf_signum;
318 		int	sf_code;
319 		struct	sigcontext *sf_scp;
320 		sig_t	sf_handler;
321 		int	sf_regs[6];		/* r0-r5 */
322 		struct	sigcontext *sf_scpcopy;
323 	} *fp;
324 	int oonstack;
325 
326 	regs = u.u_ar0;
327 	oonstack = u.u_onstack;
328 	/*
329 	 * Allocate and validate space for the signal handler
330 	 * context. Note that if the stack is in P0 space, the
331 	 * call to grow() is a nop, and the useracc() check
332 	 * will fail if the process has not already allocated
333 	 * the space with a `brk'.
334 	 */
335 	if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) {
336 		scp = (struct sigcontext *)u.u_sigsp - 1;
337 		u.u_onstack = 1;
338 	} else
339 		scp = (struct sigcontext *)regs[SP] - 1;
340 	fp = (struct sigframe *)scp - 1;
341 	if ((int)fp <= USRSTACK - ctob(u.u_ssize))
342 		(void) grow((unsigned)fp);
343 	if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), B_WRITE) == 0) {
344 		/*
345 		 * Process has trashed its stack; give it an illegal
346 		 * instruction to halt it in its tracks.
347 		 */
348 		SIGACTION(p, SIGILL) = SIG_DFL;
349 		sig = sigmask(SIGILL);
350 		p->p_sigignore &= ~sig;
351 		p->p_sigcatch &= ~sig;
352 		p->p_sigmask &= ~sig;
353 		psignal(p, SIGILL);
354 		return;
355 	}
356 	/*
357 	 * Build the argument list for the signal handler.
358 	 */
359 	fp->sf_signum = sig;
360 	fp->sf_code = code;
361 	fp->sf_scp = scp;
362 	fp->sf_handler = catcher;
363 	/*
364 	 * Build the callf argument frame to be used to call sigreturn.
365 	 */
366 	fp->sf_scpcopy = scp;
367 	/*
368 	 * Build the signal context to be used by sigreturn.
369 	 */
370 	scp->sc_onstack = oonstack;
371 	scp->sc_mask = mask;
372 	scp->sc_sp = regs[SP];
373 	scp->sc_fp = regs[FP];
374 	scp->sc_pc = regs[PC];
375 	scp->sc_ps = regs[PS];
376 	regs[SP] = (int)fp;
377 	regs[PC] = (int)u.u_pcb.pcb_sigc;
378 }
379 
380 /*
381  * System call to cleanup state after a signal
382  * has been taken.  Reset signal mask and
383  * stack state from context left by sendsig (above).
384  * Return to previous pc and psl as specified by
385  * context left by sendsig. Check carefully to
386  * make sure that the user has not modified the
387  * psl to gain improper priviledges or to cause
388  * a machine fault.
389  */
390 sigreturn()
391 {
392 	struct a {
393 		struct sigcontext *sigcntxp;
394 	};
395 	register struct sigcontext *scp;
396 	register int *regs = u.u_ar0;
397 
398 	scp = ((struct a *)(u.u_ap))->sigcntxp;
399 	if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0) {
400 		u.u_error = EINVAL;
401 		return;
402 	}
403 	if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_IS)) != 0 ||
404 	    (scp->sc_ps & (PSL_PRVMOD|PSL_CURMOD)) != (PSL_PRVMOD|PSL_CURMOD)) {
405 		u.u_error = EINVAL;
406 		return;
407 	}
408 	u.u_eosys = JUSTRETURN;
409 	u.u_onstack = scp->sc_onstack & 01;
410 	u.u_procp->p_sigmask = scp->sc_mask &~ sigcantmask;
411 	regs[FP] = scp->sc_fp;
412 	regs[SP] = scp->sc_sp;
413 	regs[PC] = scp->sc_pc;
414 	regs[PS] = scp->sc_ps;
415 }
416 
417 int	waittime = -1;
418 
419 boot(arghowto)
420 	int arghowto;
421 {
422 	register long dummy;		/* r12 is reserved */
423 	register int howto;		/* r11 == how to boot */
424 	register int devtype;		/* r10 == major of root dev */
425 	extern char *panicstr;
426 
427 	howto = arghowto;
428 	if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
429 		register struct buf *bp;
430 		int iter, nbusy;
431 
432 		waittime = 0;
433 		(void) splnet();
434 		printf("syncing disks... ");
435 		/*
436 		 * Release inodes held by texts before update.
437 		 */
438 		if (panicstr == 0)
439 			xumount(NULL);
440 		sync();
441 
442 		for (iter = 0; iter < 20; iter++) {
443 			nbusy = 0;
444 			for (bp = &buf[nbuf]; --bp >= buf; )
445 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
446 					nbusy++;
447 			if (nbusy == 0)
448 				break;
449 			printf("%d ", nbusy);
450 			DELAY(40000 * iter);
451 		}
452 		if (nbusy)
453 			printf("giving up\n");
454 		else
455 			printf("done\n");
456 		DELAY(10000);			/* wait for printf to finish */
457 	}
458 	mtpr(IPL, 0x1f);			/* extreme priority */
459 	devtype = major(rootdev);
460 	*(int *)CPBFLG = howto;
461 	if (howto&RB_HALT) {
462 		printf("halting (in tight loop); hit ~h\n\n");
463 		mtpr(IPL, 0x1f);
464 		for (;;)
465 			;
466 	} else {
467 		if (howto & RB_DUMP) {
468 			doadump();		/* CPBOOT's itsself */
469 			/*NOTREACHED*/
470 		}
471 		tocons(CPBOOT);
472 	}
473 #ifdef lint
474 	dummy = 0; dummy = dummy;
475 	printf("howto %d, devtype %d\n", arghowto, devtype);
476 #endif
477 	for (;;)
478 		asm("halt");
479 	/*NOTREACHED*/
480 }
481 
482 struct	cpdcb_o cpcontrol;
483 
484 /*
485  * Send the given comand ('c') to the console processor.
486  * Assumed to be one of the last things the OS does before
487  *  halting or rebooting.
488  */
489 tocons(c)
490 {
491 	register timeout;
492 
493 	cpcontrol.cp_hdr.cp_unit = CPUNIT;
494 	cpcontrol.cp_hdr.cp_comm =  (char)c;
495 	if (c != CPBOOT)
496 		cpcontrol.cp_hdr.cp_count = 1;	/* Just for sanity */
497 	else {
498 		cpcontrol.cp_hdr.cp_count = 4;
499 		*(int *)cpcontrol.cp_buf = 0;	/* r11 value for reboot */
500 	}
501 	timeout = 100000;				/* Delay loop */
502 	while (timeout-- && (cnlast->cp_unit&CPDONE) == 0)
503 		uncache(&cnlast->cp_unit);
504 	/* give up, force it to listen */
505 	mtpr(CPMDCB, vtoph((struct proc *)0, (unsigned)&cpcontrol));
506 }
507 
508 #if CLSIZE != 1
509 /*
510  * Invalidate single all pte's in a cluster
511  */
512 tbiscl(v)
513 	unsigned v;
514 {
515 	register caddr_t addr;		/* must be first reg var */
516 	register int i;
517 
518 	addr = ptob(v);
519 	for (i = 0; i < CLSIZE; i++) {
520 		mtpr(TBIS, addr);
521 		addr += NBPG;
522 	}
523 }
524 #endif
525 
526 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
527 int	dumpsize = 0;		/* also for savecore */
528 
529 dumpconf()
530 {
531 	int nblks;
532 
533 	dumpsize = physmem;
534 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
535 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
536 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
537 			dumpsize = btoc(dbtob(nblks - dumplo));
538 		else if (dumplo == 0)
539 			dumplo = nblks - btodb(ctob(physmem));
540 	}
541 	/*
542 	 * Don't dump on the first CLSIZE pages,
543 	 * in case the dump device includes a disk label.
544 	 */
545 	if (dumplo < CLSIZE)
546 		dumplo = CLSIZE;
547 }
548 
549 /*
550  * Doadump comes here after turning off memory management and
551  * getting on the dump stack, either when called above, or by
552  * the auto-restart code.
553  */
554 dumpsys()
555 {
556 
557 	if (dumpdev == NODEV)
558 		return;
559 	/*
560 	 * For dumps during autoconfiguration,
561 	 * if dump device has already configured...
562 	 */
563 	if (dumpsize == 0)
564 		dumpconf();
565 	if (dumplo < 0)
566 		return;
567 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
568 	printf("dump ");
569 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
570 
571 	case ENXIO:
572 		printf("device bad\n");
573 		break;
574 
575 	case EFAULT:
576 		printf("device not ready\n");
577 		break;
578 
579 	case EINVAL:
580 		printf("area improper\n");
581 		break;
582 
583 	case EIO:
584 		printf("i/o error\n");
585 		break;
586 
587 	default:
588 		printf("succeeded\n");
589 		break;
590 	}
591 	printf("\n\n");
592 	DELAY(1000);
593 	tocons(CPBOOT);
594 }
595 
596 /*
597  * Bus error 'recovery' code.
598  * Print out the buss frame and then give up.
599  * (More information from special registers can be printed here.)
600  */
601 
602 /*
603  * Frame for bus error
604  */
605 struct buserframe {
606 	int	which_bus;		/* primary or secondary */
607 	int	memerreg;		/* memory error register */
608 	int	trp_pc;			/* trapped pc */
609 	int	trp_psl;		/* trapped psl */
610 };
611 
612 char	*mem_errcd[8] = {
613 	"Unknown error code 0",
614 	"Address parity error",		/* APE */
615 	"Data parity error",		/* DPE */
616 	"Data check error",		/* DCE */
617 	"Versabus timeout",		/* VTO */
618 	"Versabus error",		/* VBE */
619 	"Non-existent memory",		/* NEM */
620 	"Unknown error code 7",
621 };
622 
623 buserror(v)
624 	caddr_t v;
625 {
626 	register struct buserframe *busef = (struct buserframe *)v;
627 	register long reg;
628 
629 	printf("bus error, address %x, psl %x\n",
630 	    busef->trp_pc, busef->trp_psl);
631 	reg =  busef->memerreg;
632 	printf("mear %x %s\n",
633 	    ((reg&MEAR)>>16)&0xffff, mem_errcd[reg & ERRCD]);
634 	if (reg&AXE)
635 		printf("adapter external error\n");
636 	printf("error master: %s\n", reg&ERM ? "versabus" : "tahoe");
637 	if (reg&IVV)
638 		printf("illegal interrupt vector from ipl %d\n", (reg>>2)&7);
639 	reg = busef->which_bus;
640 	printf("mcbr %x versabus type %x\n",
641 	    ((reg&MCBR)>>16)&0xffff, reg & 0xffc3);
642 	if ((busef->memerreg&IVV) == 0)
643 		panic("buserror");
644 }
645 
646 microtime(tvp)
647 	register struct timeval *tvp;
648 {
649 	int s = splhigh();
650 
651 	*tvp = time;
652 	tvp->tv_usec += tick;
653 	while (tvp->tv_usec > 1000000) {
654 		tvp->tv_sec++;
655 		tvp->tv_usec -= 1000000;
656 	}
657 	splx(s);
658 }
659 
660 initcpu()
661 {
662 	register struct proc *p;
663 
664 	p = &proc[0];
665 #define	initkey(which, p, index) \
666     which/**/_cache[index] = 1, which/**/_cnt[index] = 1; \
667     p->p_/**/which = index;
668 	initkey(ckey, p, MAXCKEY);
669 	initkey(dkey, p, MAXDKEY);
670 }
671 
672 /*
673  * Clear registers on exec
674  */
675 setregs(entry)
676 	u_long entry;
677 {
678 
679 #ifdef notdef
680 	/* should pass args to init on the stack */
681 	for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];)
682 		*rp++ = 0;
683 #endif
684 	u.u_ar0[FP] = 0;	/* bottom of the fp chain */
685 	u.u_ar0[PC] = entry + 2;
686 }
687