xref: /original-bsd/sys/i386/i386/machdep.c (revision e59fb703)
1 /*-
2  * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * William Jolitz.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)machdep.c	7.4 (Berkeley) 6/3/91
37  */
38 
39 
40 #include "param.h"
41 #include "systm.h"
42 #include "signalvar.h"
43 #include "kernel.h"
44 #include "map.h"
45 #include "proc.h"
46 #include "user.h"
47 #include "buf.h"
48 #include "reboot.h"
49 #include "conf.h"
50 #include "file.h"
51 #include "clist.h"
52 #include "callout.h"
53 #include "malloc.h"
54 #include "mbuf.h"
55 #include "msgbuf.h"
56 #include "net/netisr.h"
57 
58 #include "vm/vm.h"
59 #include "vm/vm_kern.h"
60 #include "vm/vm_page.h"
61 
62 vm_map_t buffer_map;
63 extern vm_offset_t avail_end;
64 
65 #include "machine/cpu.h"
66 #include "machine/reg.h"
67 #include "machine/psl.h"
68 #include "machine/specialreg.h"
69 #include "i386/isa/rtc.h"
70 
71 /*
72  * Declare these as initialized data so we can patch them.
73  */
74 int	nswbuf = 0;
75 #ifdef	NBUF
76 int	nbuf = NBUF;
77 #else
78 int	nbuf = 0;
79 #endif
80 #ifdef	BUFPAGES
81 int	bufpages = BUFPAGES;
82 #else
83 int	bufpages = 0;
84 #endif
85 int	msgbufmapped;		/* set when safe to use msgbuf */
86 
87 /*
88  * Machine-dependent startup code
89  */
90 int boothowto = 0, Maxmem = 0;
91 long dumplo;
92 int physmem, maxmem;
93 extern int bootdev;
94 #ifdef SMALL
95 extern int forcemaxmem;
96 #endif
97 int biosmem;
98 
99 extern cyloffset;
100 
101 cpu_startup(firstaddr)
102 	int firstaddr;
103 {
104 	register int unixsize;
105 	register unsigned i;
106 	register struct pte *pte;
107 	int mapaddr, j;
108 	register caddr_t v;
109 	int maxbufs, base, residual;
110 	extern long Usrptsize;
111 	vm_offset_t minaddr, maxaddr;
112 	vm_size_t size;
113 
114 	/*
115 	 * Initialize error message buffer (at end of core).
116 	 */
117 
118 	/* avail_end was pre-decremented in pmap_bootstrap to compensate */
119 	for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
120 		pmap_enter(pmap_kernel(), msgbufp, avail_end + i * NBPG,
121 			   VM_PROT_ALL, TRUE);
122 	msgbufmapped = 1;
123 
124 #ifdef KDB
125 	kdb_init();			/* startup kernel debugger */
126 #endif
127 	/*
128 	 * Good {morning,afternoon,evening,night}.
129 	 */
130 	printf(version);
131 	printf("real mem  = %d\n", ctob(physmem));
132 
133 	/*
134 	 * Allocate space for system data structures.
135 	 * The first available real memory address is in "firstaddr".
136 	 * The first available kernel virtual address is in "v".
137 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
138 	 * As pages of memory are allocated and cleared,
139 	 * "firstaddr" is incremented.
140 	 * An index into the kernel page table corresponding to the
141 	 * virtual memory address maintained in "v" is kept in "mapaddr".
142 	 */
143 
144 	/*
145 	 * Make two passes.  The first pass calculates how much memory is
146 	 * needed and allocates it.  The second pass assigns virtual
147 	 * addresses to the various data structures.
148 	 */
149 	firstaddr = 0;
150 again:
151 	v = (caddr_t)firstaddr;
152 
153 #define	valloc(name, type, num) \
154 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
155 #define	valloclim(name, type, num, lim) \
156 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
157 	valloc(cfree, struct cblock, nclist);
158 	valloc(callout, struct callout, ncallout);
159 	valloc(swapmap, struct map, nswapmap = maxproc * 2);
160 #ifdef SYSVSHM
161 	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
162 #endif
163 	/*
164 	 * Determine how many buffers to allocate.
165 	 * Use 10% of memory for the first 2 Meg, 5% of the remaining
166 	 * memory. Insure a minimum of 16 buffers.
167 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
168 	 */
169 	if (bufpages == 0)
170 		if (physmem < (2 * 1024 * 1024))
171 			bufpages = physmem / 10 / CLSIZE;
172 		else
173 			bufpages = ((2 * 1024 * 1024 + physmem) / 20) / CLSIZE;
174 	if (nbuf == 0) {
175 		nbuf = bufpages / 2;
176 		if (nbuf < 16)
177 			nbuf = 16;
178 	}
179 	if (nswbuf == 0) {
180 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
181 		if (nswbuf > 256)
182 			nswbuf = 256;		/* sanity */
183 	}
184 	valloc(swbuf, struct buf, nswbuf);
185 	valloc(buf, struct buf, nbuf);
186 
187 	/*
188 	 * End of first pass, size has been calculated so allocate memory
189 	 */
190 	if (firstaddr == 0) {
191 		size = (vm_size_t)(v - firstaddr);
192 		firstaddr = (int)kmem_alloc(kernel_map, round_page(size));
193 		if (firstaddr == 0)
194 			panic("startup: no room for tables");
195 		goto again;
196 	}
197 	/*
198 	 * End of second pass, addresses have been assigned
199 	 */
200 	if ((vm_size_t)(v - firstaddr) != size)
201 		panic("startup: table size inconsistency");
202 	/*
203 	 * Now allocate buffers proper.  They are different than the above
204 	 * in that they usually occupy more virtual memory than physical.
205 	 */
206 	size = MAXBSIZE * nbuf;
207 	buffer_map = kmem_suballoc(kernel_map, (vm_offset_t)&buffers,
208 				   &maxaddr, size, FALSE);
209 	minaddr = (vm_offset_t)buffers;
210 	if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
211 			&minaddr, size, FALSE) != KERN_SUCCESS)
212 		panic("startup: cannot allocate buffers");
213 	base = bufpages / nbuf;
214 	residual = bufpages % nbuf;
215 	for (i = 0; i < nbuf; i++) {
216 		vm_size_t curbufsize;
217 		vm_offset_t curbuf;
218 
219 		/*
220 		 * First <residual> buffers get (base+1) physical pages
221 		 * allocated for them.  The rest get (base) physical pages.
222 		 *
223 		 * The rest of each buffer occupies virtual space,
224 		 * but has no physical memory allocated for it.
225 		 */
226 		curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
227 		curbufsize = CLBYTES * (i < residual ? base+1 : base);
228 		vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
229 		vm_map_simplify(buffer_map, curbuf);
230 	}
231 	/*
232 	 * Allocate a submap for exec arguments.  This map effectively
233 	 * limits the number of processes exec'ing at any time.
234 	 */
235 	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
236 				 16*NCARGS, TRUE);
237 	/*
238 	 * Allocate a submap for physio
239 	 */
240 	phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
241 				 VM_PHYS_SIZE, TRUE);
242 
243 	/*
244 	 * Finally, allocate mbuf pool.  Since mclrefcnt is an off-size
245 	 * we use the more space efficient malloc in place of kmem_alloc.
246 	 */
247 	mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
248 				   M_MBUF, M_NOWAIT);
249 	bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
250 	mb_map = kmem_suballoc(kernel_map, (vm_offset_t)&mbutl, &maxaddr,
251 			       VM_MBUF_SIZE, FALSE);
252 	/*
253 	 * Initialize callouts
254 	 */
255 	callfree = callout;
256 	for (i = 1; i < ncallout; i++)
257 		callout[i-1].c_next = &callout[i];
258 
259 	/*printf("avail mem = %d\n", ptoa(vm_page_free_count));*/
260 	printf("using %d buffers containing %d bytes of memory\n",
261 		nbuf, bufpages * CLBYTES);
262 
263 	/*
264 	 * Set up CPU-specific registers, cache, etc.
265 	 */
266 	initcpu();
267 
268 	/*
269 	 * Set up buffers, so they can be used to read disk labels.
270 	 */
271 	bufinit();
272 
273 	/*
274 	 * Configure the system.
275 	 */
276 	configure();
277 }
278 
279 #ifdef PGINPROF
280 /*
281  * Return the difference (in microseconds)
282  * between the  current time and a previous
283  * time as represented  by the arguments.
284  * If there is a pending clock interrupt
285  * which has not been serviced due to high
286  * ipl, return error code.
287  */
288 /*ARGSUSED*/
289 vmtime(otime, olbolt, oicr)
290 	register int otime, olbolt, oicr;
291 {
292 
293 	return (((time.tv_sec-otime)*60 + lbolt-olbolt)*16667);
294 }
295 #endif
296 
297 struct sigframe {
298 	int	sf_signum;
299 	int	sf_code;
300 	struct	sigcontext *sf_scp;
301 	sig_t	sf_handler;
302 	int	sf_eax;
303 	int	sf_edx;
304 	int	sf_ecx;
305 	struct	sigcontext sf_sc;
306 } ;
307 
308 extern int kstack[];
309 
310 /*
311  * Send an interrupt to process.
312  *
313  * Stack is set up to allow sigcode stored
314  * in u. to call routine, followed by kcall
315  * to sigreturn routine below.  After sigreturn
316  * resets the signal mask, the stack, and the
317  * frame pointer, it returns to the user
318  * specified pc, psl.
319  */
320 void
321 sendsig(catcher, sig, mask, code)
322 	sig_t catcher;
323 	int sig, mask;
324 	unsigned code;
325 {
326 	register struct proc *p = curproc;
327 	register int *regs;
328 	register struct sigframe *fp;
329 	struct sigacts *ps = p->p_sigacts;
330 	int oonstack, frmtrap;
331 
332 	regs = p->p_regs;
333         oonstack = ps->ps_onstack;
334 	frmtrap = curpcb->pcb_flags & FM_TRAP;
335 	/*
336 	 * Allocate and validate space for the signal handler
337 	 * context. Note that if the stack is in P0 space, the
338 	 * call to grow() is a nop, and the useracc() check
339 	 * will fail if the process has not already allocated
340 	 * the space with a `brk'.
341 	 */
342         if (!ps->ps_onstack && (ps->ps_sigonstack & sigmask(sig))) {
343 		fp = (struct sigframe *)(ps->ps_sigsp
344 				- sizeof(struct sigframe));
345                 ps->ps_onstack = 1;
346 	} else {
347 		if (frmtrap)
348 			fp = (struct sigframe *)(regs[tESP]
349 				- sizeof(struct sigframe));
350 		else
351 			fp = (struct sigframe *)(regs[sESP]
352 				- sizeof(struct sigframe));
353 	}
354 
355 	if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
356 		(void)grow((unsigned)fp);
357 
358 	if (useracc((caddr_t)fp, sizeof (struct sigframe), B_WRITE) == 0) {
359 		/*
360 		 * Process has trashed its stack; give it an illegal
361 		 * instruction to halt it in its tracks.
362 		 */
363 		SIGACTION(p, SIGILL) = SIG_DFL;
364 		sig = sigmask(SIGILL);
365 		p->p_sigignore &= ~sig;
366 		p->p_sigcatch &= ~sig;
367 		p->p_sigmask &= ~sig;
368 		psignal(p, SIGILL);
369 		return;
370 	}
371 
372 	/*
373 	 * Build the argument list for the signal handler.
374 	 */
375 	fp->sf_signum = sig;
376 	fp->sf_code = code;
377 	fp->sf_scp = &fp->sf_sc;
378 	fp->sf_handler = catcher;
379 
380 	/* save scratch registers */
381 	if(frmtrap) {
382 		fp->sf_eax = regs[tEAX];
383 		fp->sf_edx = regs[tEDX];
384 		fp->sf_ecx = regs[tECX];
385 	} else {
386 		fp->sf_eax = regs[sEAX];
387 		fp->sf_edx = regs[sEDX];
388 		fp->sf_ecx = regs[sECX];
389 	}
390 	/*
391 	 * Build the signal context to be used by sigreturn.
392 	 */
393 	fp->sf_sc.sc_onstack = oonstack;
394 	fp->sf_sc.sc_mask = mask;
395 	if(frmtrap) {
396 		fp->sf_sc.sc_sp = regs[tESP];
397 		fp->sf_sc.sc_fp = regs[tEBP];
398 		fp->sf_sc.sc_pc = regs[tEIP];
399 		fp->sf_sc.sc_ps = regs[tEFLAGS];
400 		regs[tESP] = (int)fp;
401 		regs[tEIP] = (int)((struct pcb *)kstack)->pcb_sigc;
402 	} else {
403 		fp->sf_sc.sc_sp = regs[sESP];
404 		fp->sf_sc.sc_fp = regs[sEBP];
405 		fp->sf_sc.sc_pc = regs[sEIP];
406 		fp->sf_sc.sc_ps = regs[sEFLAGS];
407 		regs[sESP] = (int)fp;
408 		regs[sEIP] = (int)((struct pcb *)kstack)->pcb_sigc;
409 	}
410 }
411 
412 /*
413  * System call to cleanup state after a signal
414  * has been taken.  Reset signal mask and
415  * stack state from context left by sendsig (above).
416  * Return to previous pc and psl as specified by
417  * context left by sendsig. Check carefully to
418  * make sure that the user has not modified the
419  * psl to gain improper priviledges or to cause
420  * a machine fault.
421  */
422 sigreturn(p, uap, retval)
423 	struct proc *p;
424 	struct args {
425 		struct sigcontext *sigcntxp;
426 	} *uap;
427 	int *retval;
428 {
429 	register struct sigcontext *scp;
430 	register struct sigframe *fp;
431 	register int *regs = p->p_regs;
432 
433 
434 	fp = (struct sigframe *) regs[sESP] ;
435 
436 	if (useracc((caddr_t)fp, sizeof (*fp), 0) == 0)
437 		return(EINVAL);
438 
439 	/* restore scratch registers */
440 	regs[sEAX] = fp->sf_eax ;
441 	regs[sEDX] = fp->sf_edx ;
442 	regs[sECX] = fp->sf_ecx ;
443 
444 	scp = fp->sf_scp;
445 	if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0)
446 		return(EINVAL);
447 #ifdef notyet
448 	if ((scp->sc_ps & PSL_MBZ) != 0 || (scp->sc_ps & PSL_MBO) != PSL_MBO) {
449 		return(EINVAL);
450 	}
451 #endif
452         p->p_sigacts->ps_onstack = scp->sc_onstack & 01;
453 	p->p_sigmask = scp->sc_mask &~
454 	    (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
455 	regs[sEBP] = scp->sc_fp;
456 	regs[sESP] = scp->sc_sp;
457 	regs[sEIP] = scp->sc_pc;
458 	regs[sEFLAGS] = scp->sc_ps;
459 	return(EJUSTRETURN);
460 }
461 
462 int	waittime = -1;
463 
464 boot(arghowto)
465 	int arghowto;
466 {
467 	register long dummy;		/* r12 is reserved */
468 	register int howto;		/* r11 == how to boot */
469 	register int devtype;		/* r10 == major of root dev */
470 	extern char *panicstr;
471 extern int cold;
472 
473 	howto = arghowto;
474 	if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
475 		register struct buf *bp;
476 		int iter, nbusy;
477 
478 		waittime = 0;
479 		(void) splnet();
480 		printf("syncing disks... ");
481 		/*
482 		 * Release inodes held by texts before update.
483 		 */
484 		if (panicstr == 0)
485 			vnode_pager_umount(NULL);
486 		sync((struct sigcontext *)0);
487 
488 		for (iter = 0; iter < 20; iter++) {
489 			nbusy = 0;
490 			for (bp = &buf[nbuf]; --bp >= buf; )
491 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
492 					nbusy++;
493 			if (nbusy == 0)
494 				break;
495 			printf("%d ", nbusy);
496 			DELAY(40000 * iter);
497 		}
498 		if (nbusy)
499 			printf("giving up\n");
500 		else
501 			printf("done\n");
502 		DELAY(10000);			/* wait for printf to finish */
503 	}
504 	splhigh();
505 	devtype = major(rootdev);
506 	if (howto&RB_HALT) {
507 		printf("halting (in tight loop); hit reset\n\n");
508 		splx(0xfffd);	/* all but keyboard XXX */
509 		for (;;) ;
510 	} else {
511 		if (howto & RB_DUMP) {
512 			dumpsys();
513 			/*NOTREACHED*/
514 		}
515 	}
516 #ifdef lint
517 	dummy = 0; dummy = dummy;
518 	printf("howto %d, devtype %d\n", arghowto, devtype);
519 #endif
520 pg("pausing (hit any key to reset)");
521 	reset_cpu();
522 	for(;;) ;
523 	/*NOTREACHED*/
524 }
525 
526 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
527 int	dumpsize = 0;		/* also for savecore */
528 /*
529  * Doadump comes here after turning off memory management and
530  * getting on the dump stack, either when called above, or by
531  * the auto-restart code.
532  */
533 dumpsys()
534 {
535 
536 	if (dumpdev == NODEV)
537 		return;
538 	if ((minor(dumpdev)&07) != 1)
539 		return;
540 	dumpsize = physmem;
541 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
542 	printf("dump ");
543 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
544 
545 	case ENXIO:
546 		printf("device bad\n");
547 		break;
548 
549 	case EFAULT:
550 		printf("device not ready\n");
551 		break;
552 
553 	case EINVAL:
554 		printf("area improper\n");
555 		break;
556 
557 	case EIO:
558 		printf("i/o error\n");
559 		break;
560 
561 	default:
562 		printf("succeeded\n");
563 		break;
564 	}
565 	printf("\n\n");
566 	DELAY(1000);
567 }
568 
569 microtime(tvp)
570 	register struct timeval *tvp;
571 {
572 	int s = splhigh();
573 
574 	*tvp = time;
575 	tvp->tv_usec += tick;
576 	while (tvp->tv_usec > 1000000) {
577 		tvp->tv_sec++;
578 		tvp->tv_usec -= 1000000;
579 	}
580 	splx(s);
581 }
582 
583 physstrat(bp, strat, prio)
584 	struct buf *bp;
585 	int (*strat)(), prio;
586 {
587 	register int s;
588 	caddr_t baddr;
589 
590 	/*
591 	 * vmapbuf clobbers b_addr so we must remember it so that it
592 	 * can be restored after vunmapbuf.  This is truely rude, we
593 	 * should really be storing this in a field in the buf struct
594 	 * but none are available and I didn't want to add one at
595 	 * this time.  Note that b_addr for dirty page pushes is
596 	 * restored in vunmapbuf. (ugh!)
597 	 */
598 	baddr = bp->b_un.b_addr;
599 	vmapbuf(bp);
600 	(*strat)(bp);
601 	/* pageout daemon doesn't wait for pushed pages */
602 	if (bp->b_flags & B_DIRTY)
603 		return;
604 	s = splbio();
605 	while ((bp->b_flags & B_DONE) == 0)
606 		sleep((caddr_t)bp, prio);
607 	splx(s);
608 	vunmapbuf(bp);
609 	bp->b_un.b_addr = baddr;
610 }
611 
612 initcpu()
613 {
614 }
615 
616 /*
617  * Clear registers on exec
618  */
619 setregs(p, entry, retval)
620 	register struct proc *p;
621 	u_long entry;
622 	int retval[2];
623 {
624 	p->p_regs[sEBP] = 0;	/* bottom of the fp chain */
625 	p->p_regs[sEIP] = entry;
626 
627 	p->p_addr->u_pcb.pcb_flags = 0;	/* no fp at all */
628 	load_cr0(rcr0() | CR0_EM);	/* start emulating */
629 #ifdef	NPX
630 	npxinit(0x262);
631 #endif
632 }
633 
634 /*
635  * Initialize 386 and configure to run kernel
636  */
637 
638 /*
639  * Initialize segments & interrupt table
640  */
641 
642 
643 #define	GNULL_SEL	0	/* Null Descriptor */
644 #define	GCODE_SEL	1	/* Kernel Code Descriptor */
645 #define	GDATA_SEL	2	/* Kernel Data Descriptor */
646 #define	GLDT_SEL	3	/* LDT - eventually one per process */
647 #define	GTGATE_SEL	4	/* Process task switch gate */
648 #define	GPANIC_SEL	5	/* Task state to consider panic from */
649 #define	GPROC0_SEL	6	/* Task state process slot zero and up */
650 #define NGDT 	GPROC0_SEL+1
651 
652 union descriptor gdt[GPROC0_SEL+1];
653 
654 /* interrupt descriptor table */
655 struct gate_descriptor idt[32+16];
656 
657 /* local descriptor table */
658 union descriptor ldt[5];
659 #define	LSYS5CALLS_SEL	0	/* forced by intel BCS */
660 #define	LSYS5SIGR_SEL	1
661 
662 #define	L43BSDCALLS_SEL	2	/* notyet */
663 #define	LUCODE_SEL	3
664 #define	LUDATA_SEL	4
665 /* seperate stack, es,fs,gs sels ? */
666 /* #define	LPOSIXCALLS_SEL	5	/* notyet */
667 
668 struct	i386tss	tss, panic_tss;
669 
670 extern  struct user *proc0paddr;
671 
672 /* software prototypes -- in more palitable form */
673 struct soft_segment_descriptor gdt_segs[] = {
674 	/* Null Descriptor */
675 {	0x0,			/* segment base address  */
676 	0x0,			/* length - all address space */
677 	0,			/* segment type */
678 	0,			/* segment descriptor priority level */
679 	0,			/* segment descriptor present */
680 	0,0,
681 	0,			/* default 32 vs 16 bit size */
682 	0  			/* limit granularity (byte/page units)*/ },
683 	/* Code Descriptor for kernel */
684 {	0x0,			/* segment base address  */
685 	0xfffff,		/* length - all address space */
686 	SDT_MEMERA,		/* segment type */
687 	0,			/* segment descriptor priority level */
688 	1,			/* segment descriptor present */
689 	0,0,
690 	1,			/* default 32 vs 16 bit size */
691 	1  			/* limit granularity (byte/page units)*/ },
692 	/* Data Descriptor for kernel */
693 {	0x0,			/* segment base address  */
694 	0xfffff,		/* length - all address space */
695 	SDT_MEMRWA,		/* segment type */
696 	0,			/* segment descriptor priority level */
697 	1,			/* segment descriptor present */
698 	0,0,
699 	1,			/* default 32 vs 16 bit size */
700 	1  			/* limit granularity (byte/page units)*/ },
701 	/* LDT Descriptor */
702 {	(int) ldt,			/* segment base address  */
703 	sizeof(ldt)-1,		/* length - all address space */
704 	SDT_SYSLDT,		/* segment type */
705 	0,			/* segment descriptor priority level */
706 	1,			/* segment descriptor present */
707 	0,0,
708 	0,			/* unused - default 32 vs 16 bit size */
709 	0  			/* limit granularity (byte/page units)*/ },
710 	/* Null Descriptor - Placeholder */
711 {	0x0,			/* segment base address  */
712 	0x0,			/* length - all address space */
713 	0,			/* segment type */
714 	0,			/* segment descriptor priority level */
715 	0,			/* segment descriptor present */
716 	0,0,
717 	0,			/* default 32 vs 16 bit size */
718 	0  			/* limit granularity (byte/page units)*/ },
719 	/* Panic Tss Descriptor */
720 {	(int) &panic_tss,		/* segment base address  */
721 	sizeof(tss)-1,		/* length - all address space */
722 	SDT_SYS386TSS,		/* segment type */
723 	0,			/* segment descriptor priority level */
724 	1,			/* segment descriptor present */
725 	0,0,
726 	0,			/* unused - default 32 vs 16 bit size */
727 	0  			/* limit granularity (byte/page units)*/ },
728 	/* Proc 0 Tss Descriptor */
729 {	(int) kstack,			/* segment base address  */
730 	sizeof(tss)-1,		/* length - all address space */
731 	SDT_SYS386TSS,		/* segment type */
732 	0,			/* segment descriptor priority level */
733 	1,			/* segment descriptor present */
734 	0,0,
735 	0,			/* unused - default 32 vs 16 bit size */
736 	0  			/* limit granularity (byte/page units)*/ }};
737 
738 struct soft_segment_descriptor ldt_segs[] = {
739 	/* Null Descriptor - overwritten by call gate */
740 {	0x0,			/* segment base address  */
741 	0x0,			/* length - all address space */
742 	0,			/* segment type */
743 	0,			/* segment descriptor priority level */
744 	0,			/* segment descriptor present */
745 	0,0,
746 	0,			/* default 32 vs 16 bit size */
747 	0  			/* limit granularity (byte/page units)*/ },
748 	/* Null Descriptor - overwritten by call gate */
749 {	0x0,			/* segment base address  */
750 	0x0,			/* length - all address space */
751 	0,			/* segment type */
752 	0,			/* segment descriptor priority level */
753 	0,			/* segment descriptor present */
754 	0,0,
755 	0,			/* default 32 vs 16 bit size */
756 	0  			/* limit granularity (byte/page units)*/ },
757 	/* Null Descriptor - overwritten by call gate */
758 {	0x0,			/* segment base address  */
759 	0x0,			/* length - all address space */
760 	0,			/* segment type */
761 	0,			/* segment descriptor priority level */
762 	0,			/* segment descriptor present */
763 	0,0,
764 	0,			/* default 32 vs 16 bit size */
765 	0  			/* limit granularity (byte/page units)*/ },
766 	/* Code Descriptor for user */
767 {	0x0,			/* segment base address  */
768 	0xfffff,		/* length - all address space */
769 	SDT_MEMERA,		/* segment type */
770 	SEL_UPL,		/* segment descriptor priority level */
771 	1,			/* segment descriptor present */
772 	0,0,
773 	1,			/* default 32 vs 16 bit size */
774 	1  			/* limit granularity (byte/page units)*/ },
775 	/* Data Descriptor for user */
776 {	0x0,			/* segment base address  */
777 	0xfffff,		/* length - all address space */
778 	SDT_MEMRWA,		/* segment type */
779 	SEL_UPL,		/* segment descriptor priority level */
780 	1,			/* segment descriptor present */
781 	0,0,
782 	1,			/* default 32 vs 16 bit size */
783 	1  			/* limit granularity (byte/page units)*/ } };
784 
785 /* table descriptors - used to load tables by microp */
786 struct region_descriptor r_gdt = {
787 	sizeof(gdt)-1,(char *)gdt
788 };
789 
790 struct region_descriptor r_idt = {
791 	sizeof(idt)-1,(char *)idt
792 };
793 
794 setidt(idx, func, typ, dpl) char *func; {
795 	struct gate_descriptor *ip = idt + idx;
796 
797 	ip->gd_looffset = (int)func;
798 	ip->gd_selector = 8;
799 	ip->gd_stkcpy = 0;
800 	ip->gd_xx = 0;
801 	ip->gd_type = typ;
802 	ip->gd_dpl = dpl;
803 	ip->gd_p = 1;
804 	ip->gd_hioffset = ((int)func)>>16 ;
805 }
806 
807 #define	IDTVEC(name)	__CONCAT(X, name)
808 extern	IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
809 	IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(dble), IDTVEC(fpusegm),
810 	IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
811 	IDTVEC(page), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(rsvd0),
812 	IDTVEC(rsvd1), IDTVEC(rsvd2), IDTVEC(rsvd3), IDTVEC(rsvd4),
813 	IDTVEC(rsvd5), IDTVEC(rsvd6), IDTVEC(rsvd7), IDTVEC(rsvd8),
814 	IDTVEC(rsvd9), IDTVEC(rsvd10), IDTVEC(rsvd11), IDTVEC(rsvd12),
815 	IDTVEC(rsvd13), IDTVEC(rsvd14), IDTVEC(rsvd14), IDTVEC(syscall);
816 
817 int lcr0(), lcr3(), rcr0(), rcr2();
818 int _udatasel, _ucodesel, _gsel_tss;
819 
820 init386(first) { extern ssdtosd(), lgdt(), lidt(), lldt(), etext;
821 	int x, *pi;
822 	unsigned biosbasemem, biosextmem;
823 	struct gate_descriptor *gdp;
824 	extern int sigcode,szsigcode;
825 
826 	proc0.p_addr = proc0paddr;
827 
828 	/*
829 	 * Initialize the console before we print anything out.
830 	 */
831 
832 	cninit (KERNBASE+0xa0000);
833 
834 	/* make gdt memory segments */
835 	gdt_segs[GCODE_SEL].ssd_limit = btoc((int) &etext + NBPG);
836 	for (x=0; x < NGDT; x++) ssdtosd(gdt_segs+x, gdt+x);
837 	/* make ldt memory segments */
838 	ldt_segs[LUCODE_SEL].ssd_limit = btoc(UPT_MIN_ADDRESS);
839 	ldt_segs[LUDATA_SEL].ssd_limit = btoc(UPT_MIN_ADDRESS);
840 	/* Note. eventually want private ldts per process */
841 	for (x=0; x < 5; x++) ssdtosd(ldt_segs+x, ldt+x);
842 
843 	/* exceptions */
844 	setidt(0, &IDTVEC(div),  SDT_SYS386TGT, SEL_KPL);
845 	setidt(1, &IDTVEC(dbg),  SDT_SYS386TGT, SEL_KPL);
846 	setidt(2, &IDTVEC(nmi),  SDT_SYS386TGT, SEL_KPL);
847  	setidt(3, &IDTVEC(bpt),  SDT_SYS386TGT, SEL_UPL);
848 	setidt(4, &IDTVEC(ofl),  SDT_SYS386TGT, SEL_KPL);
849 	setidt(5, &IDTVEC(bnd),  SDT_SYS386TGT, SEL_KPL);
850 	setidt(6, &IDTVEC(ill),  SDT_SYS386TGT, SEL_KPL);
851 	setidt(7, &IDTVEC(dna),  SDT_SYS386TGT, SEL_KPL);
852 	setidt(8, &IDTVEC(dble),  SDT_SYS386TGT, SEL_KPL);
853 	setidt(9, &IDTVEC(fpusegm),  SDT_SYS386TGT, SEL_KPL);
854 	setidt(10, &IDTVEC(tss),  SDT_SYS386TGT, SEL_KPL);
855 	setidt(11, &IDTVEC(missing),  SDT_SYS386TGT, SEL_KPL);
856 	setidt(12, &IDTVEC(stk),  SDT_SYS386TGT, SEL_KPL);
857 	setidt(13, &IDTVEC(prot),  SDT_SYS386TGT, SEL_KPL);
858 	setidt(14, &IDTVEC(page),  SDT_SYS386TGT, SEL_KPL);
859 	setidt(15, &IDTVEC(rsvd),  SDT_SYS386TGT, SEL_KPL);
860 	setidt(16, &IDTVEC(fpu),  SDT_SYS386TGT, SEL_KPL);
861 	setidt(17, &IDTVEC(rsvd0),  SDT_SYS386TGT, SEL_KPL);
862 	setidt(18, &IDTVEC(rsvd1),  SDT_SYS386TGT, SEL_KPL);
863 	setidt(19, &IDTVEC(rsvd2),  SDT_SYS386TGT, SEL_KPL);
864 	setidt(20, &IDTVEC(rsvd3),  SDT_SYS386TGT, SEL_KPL);
865 	setidt(21, &IDTVEC(rsvd4),  SDT_SYS386TGT, SEL_KPL);
866 	setidt(22, &IDTVEC(rsvd5),  SDT_SYS386TGT, SEL_KPL);
867 	setidt(23, &IDTVEC(rsvd6),  SDT_SYS386TGT, SEL_KPL);
868 	setidt(24, &IDTVEC(rsvd7),  SDT_SYS386TGT, SEL_KPL);
869 	setidt(25, &IDTVEC(rsvd8),  SDT_SYS386TGT, SEL_KPL);
870 	setidt(26, &IDTVEC(rsvd9),  SDT_SYS386TGT, SEL_KPL);
871 	setidt(27, &IDTVEC(rsvd10),  SDT_SYS386TGT, SEL_KPL);
872 	setidt(28, &IDTVEC(rsvd11),  SDT_SYS386TGT, SEL_KPL);
873 	setidt(29, &IDTVEC(rsvd12),  SDT_SYS386TGT, SEL_KPL);
874 	setidt(30, &IDTVEC(rsvd13),  SDT_SYS386TGT, SEL_KPL);
875 	setidt(31, &IDTVEC(rsvd14),  SDT_SYS386TGT, SEL_KPL);
876 
877 #include	"isa.h"
878 #if	NISA >0
879 	isa_defaultirq();
880 #endif
881 
882 	lgdt(gdt, sizeof(gdt)-1);
883 	lidt(idt, sizeof(idt)-1);
884 	lldt(GSEL(GLDT_SEL, SEL_KPL));
885 
886 	/*if (Maxmem > 6*1024/4)
887 		Maxmem = (1024+384) *1024 /NBPG;*/
888 	maxmem = Maxmem;
889 
890 	/* reconcile against BIOS's recorded values in RTC
891 	 * we trust neither of them, as both can lie!
892 	 */
893 	biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
894 	biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
895 	if (biosbasemem == 0xffff || biosextmem == 0xffff) {
896 		if (maxmem > 0xffc)
897 			maxmem = 640/4;
898 	} else if (biosextmem > 0 && biosbasemem == 640) {
899 		int totbios = (biosbasemem + 0x60000 + biosextmem)/4;
900 		if (totbios < maxmem) maxmem = totbios;
901 	} else	maxmem = 640/4;
902 	maxmem = (biosextmem+1024)/4;
903 	maxmem = maxmem-1;
904 	physmem = maxmem;
905 	if (maxmem > 1024/4)
906 		physmem -= (1024 - 640)/4;
907 printf("bios base %d ext %d maxmem %d physmem %d\n",
908 	biosbasemem, biosextmem, 4*maxmem, 4*physmem);
909 
910 maxmem=8192/4 -2;
911 	vm_set_page_size();
912 	/* call pmap initialization to make new kernel address space */
913 	pmap_bootstrap (first, 0);
914 	/* now running on new page tables, configured,and u/iom is accessible */
915 
916 	/* make a initial tss so microp can get interrupt stack on syscall! */
917 	proc0.p_addr->u_pcb.pcb_tss.tss_esp0 = (int) kstack + UPAGES*NBPG;
918 	proc0.p_addr->u_pcb.pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
919 	_gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
920 	ltr(_gsel_tss);
921 
922 	/* make a call gate to reenter kernel with */
923 	gdp = &ldt[LSYS5CALLS_SEL].gd;
924 
925 	x = (int) &IDTVEC(syscall);
926 	gdp->gd_looffset = x++;
927 	gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
928 	gdp->gd_stkcpy = 0;
929 	gdp->gd_type = SDT_SYS386CGT;
930 	gdp->gd_dpl = SEL_UPL;
931 	gdp->gd_p = 1;
932 	gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16;
933 
934 	/* transfer to user mode */
935 
936 	_ucodesel = LSEL(LUCODE_SEL, SEL_UPL);
937 	_udatasel = LSEL(LUDATA_SEL, SEL_UPL);
938 
939 	/* setup proc 0's pcb */
940 	bcopy(&sigcode, proc0.p_addr->u_pcb.pcb_sigc, szsigcode);
941 	proc0.p_addr->u_pcb.pcb_flags = 0;
942 	proc0.p_addr->u_pcb.pcb_ptd = IdlePTD;
943 }
944 
945 extern struct pte	*CMAP1, *CMAP2;
946 extern caddr_t		CADDR1, CADDR2;
947 /*
948  * zero out physical memory
949  * specified in relocation units (NBPG bytes)
950  */
951 clearseg(n) {
952 
953 	*(int *)CMAP2 = PG_V | PG_KW | ctob(n);
954 	load_cr3(rcr3());
955 	bzero(CADDR2,NBPG);
956 	*(int *) CADDR2 = 0;
957 }
958 
959 /*
960  * copy a page of physical memory
961  * specified in relocation units (NBPG bytes)
962  */
963 copyseg(frm, n) {
964 
965 	*(int *)CMAP2 = PG_V | PG_KW | ctob(n);
966 	load_cr3(rcr3());
967 	bcopy((void *)frm, (void *)CADDR2, NBPG);
968 }
969 
970 /*
971  * copy a page of physical memory
972  * specified in relocation units (NBPG bytes)
973  */
974 physcopyseg(frm, to) {
975 
976 	*(int *)CMAP1 = PG_V | PG_KW | ctob(frm);
977 	*(int *)CMAP2 = PG_V | PG_KW | ctob(to);
978 	load_cr3(rcr3());
979 	bcopy(CADDR1, CADDR2, NBPG);
980 }
981 
982 /*aston() {
983 	schednetisr(NETISR_AST);
984 }*/
985 
986 setsoftclock() {
987 	schednetisr(NETISR_SCLK);
988 }
989 
990 /*
991  * insert an element into a queue
992  */
993 #undef insque
994 _insque(element, head)
995 	register struct prochd *element, *head;
996 {
997 	element->ph_link = head->ph_link;
998 	head->ph_link = (struct proc *)element;
999 	element->ph_rlink = (struct proc *)head;
1000 	((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element;
1001 }
1002 
1003 /*
1004  * remove an element from a queue
1005  */
1006 #undef remque
1007 _remque(element)
1008 	register struct prochd *element;
1009 {
1010 	((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink;
1011 	((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link;
1012 	element->ph_rlink = (struct proc *)0;
1013 }
1014 
1015 vmunaccess() {}
1016 
1017 /*
1018  * Below written in C to allow access to debugging code
1019  */
1020 copyinstr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
1021 	void *toaddr, *fromaddr; {
1022 	u_int c,tally;
1023 
1024 	tally = 0;
1025 	while (maxlength--) {
1026 		c = fubyte(fromaddr++);
1027 		if (c == -1) {
1028 			if(lencopied) *lencopied = tally;
1029 			return(EFAULT);
1030 		}
1031 		tally++;
1032 		*(char *)toaddr++ = (char) c;
1033 		if (c == 0){
1034 			if(lencopied) *lencopied = tally;
1035 			return(0);
1036 		}
1037 	}
1038 	if(lencopied) *lencopied = tally;
1039 	return(ENAMETOOLONG);
1040 }
1041 
1042 copyoutstr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
1043 	void *fromaddr, *toaddr; {
1044 	int c;
1045 	int tally;
1046 
1047 	tally = 0;
1048 	while (maxlength--) {
1049 		c = subyte(toaddr++, *(char *)fromaddr);
1050 		if (c == -1) return(EFAULT);
1051 		tally++;
1052 		if (*(char *)fromaddr++ == 0){
1053 			if(lencopied) *lencopied = tally;
1054 			return(0);
1055 		}
1056 	}
1057 	if(lencopied) *lencopied = tally;
1058 	return(ENAMETOOLONG);
1059 }
1060 
1061 copystr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
1062 	void *fromaddr, *toaddr; {
1063 	u_int tally;
1064 
1065 	tally = 0;
1066 	while (maxlength--) {
1067 		*(u_char *)toaddr = *(u_char *)fromaddr++;
1068 		tally++;
1069 		if (*(u_char *)toaddr++ == 0) {
1070 			if(lencopied) *lencopied = tally;
1071 			return(0);
1072 		}
1073 	}
1074 	if(lencopied) *lencopied = tally;
1075 	return(ENAMETOOLONG);
1076 }
1077