1 /*-
2 * Copyright (c) 1982, 1987, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * %sccs.include.redist.c%
9 *
10 * @(#)machdep.c 8.3 (Berkeley) 05/09/95
11 */
12
13 #include <sys/param.h>
14 #include <sys/systm.h>
15 #include <sys/signalvar.h>
16 #include <sys/kernel.h>
17 #include <sys/map.h>
18 #include <sys/proc.h>
19 #include <sys/user.h>
20 #include <sys/buf.h>
21 #include <sys/reboot.h>
22 #include <sys/conf.h>
23 #include <sys/file.h>
24 #include <sys/clist.h>
25 #include <sys/callout.h>
26 #include <sys/malloc.h>
27 #include <sys/mbuf.h>
28 #include <sys/msgbuf.h>
29 #include <sys/ioctl.h>
30 #include <sys/tty.h>
31 #include <sys/sysctl.h>
32
33 #include <net/netisr.h>
34
35 #include <vm/vm.h>
36 #include <vm/vm_kern.h>
37 #include <vm/vm_page.h>
38
39 vm_map_t buffer_map;
40 extern vm_offset_t avail_end;
41
42 #include <machine/cpu.h>
43 #include <machine/reg.h>
44 #include <machine/psl.h>
45 #include <machine/specialreg.h>
46 #include <i386/isa/rtc.h>
47 #include <i386/i386/cons.h>
48
49 /*
50 * Declare these as initialized data so we can patch them.
51 */
52 int nswbuf = 0;
53 #ifdef NBUF
54 int nbuf = NBUF;
55 #else
56 int nbuf = 0;
57 #endif
58 #ifdef BUFPAGES
59 int bufpages = BUFPAGES;
60 #else
61 int bufpages = 0;
62 #endif
63 int msgbufmapped; /* set when safe to use msgbuf */
64
65 /*
66 * Machine-dependent startup code
67 */
68 int boothowto = 0, Maxmem = 0;
69 long dumplo;
70 int physmem, maxmem;
71 extern int bootdev;
72 #ifdef SMALL
73 extern int forcemaxmem;
74 #endif
75 int biosmem;
76
77 extern cyloffset;
78
cpu_startup(firstaddr)79 cpu_startup(firstaddr)
80 int firstaddr;
81 {
82 register int unixsize;
83 register unsigned i;
84 register struct pte *pte;
85 int mapaddr, j;
86 register caddr_t v;
87 int maxbufs, base, residual;
88 extern long Usrptsize;
89 vm_offset_t minaddr, maxaddr;
90 vm_size_t size;
91
92 /*
93 * Initialize error message buffer (at end of core).
94 */
95
96 /* avail_end was pre-decremented in pmap_bootstrap to compensate */
97 for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
98 pmap_enter(kernel_pmap, msgbufp, avail_end + i * NBPG,
99 VM_PROT_ALL, TRUE);
100 msgbufmapped = 1;
101
102 #ifdef KDB
103 kdb_init(); /* startup kernel debugger */
104 #endif
105 /*
106 * Good {morning,afternoon,evening,night}.
107 */
108 printf(version);
109 printf("real mem = %d\n", ctob(physmem));
110
111 /*
112 * Allocate space for system data structures.
113 * The first available real memory address is in "firstaddr".
114 * The first available kernel virtual address is in "v".
115 * As pages of kernel virtual memory are allocated, "v" is incremented.
116 * As pages of memory are allocated and cleared,
117 * "firstaddr" is incremented.
118 * An index into the kernel page table corresponding to the
119 * virtual memory address maintained in "v" is kept in "mapaddr".
120 */
121
122 /*
123 * Make two passes. The first pass calculates how much memory is
124 * needed and allocates it. The second pass assigns virtual
125 * addresses to the various data structures.
126 */
127 firstaddr = 0;
128 again:
129 v = (caddr_t)firstaddr;
130
131 #define valloc(name, type, num) \
132 (name) = (type *)v; v = (caddr_t)((name)+(num))
133 #define valloclim(name, type, num, lim) \
134 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
135 valloc(cfree, struct cblock, nclist);
136 valloc(callout, struct callout, ncallout);
137 valloc(swapmap, struct map, nswapmap = maxproc * 2);
138 #ifdef SYSVSHM
139 valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
140 #endif
141 /*
142 * Determine how many buffers to allocate.
143 * Use 10% of memory for the first 2 Meg, 5% of the remaining
144 * memory. Insure a minimum of 16 buffers.
145 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
146 */
147 if (bufpages == 0)
148 if (physmem < (2 * 1024 * 1024))
149 bufpages = physmem / 10 / CLSIZE;
150 else
151 bufpages = ((2 * 1024 * 1024 + physmem) / 20) / CLSIZE;
152 if (nbuf == 0) {
153 nbuf = bufpages / 2;
154 if (nbuf < 16)
155 nbuf = 16;
156 }
157 if (nswbuf == 0) {
158 nswbuf = (nbuf / 2) &~ 1; /* force even */
159 if (nswbuf > 256)
160 nswbuf = 256; /* sanity */
161 }
162 valloc(swbuf, struct buf, nswbuf);
163 valloc(buf, struct buf, nbuf);
164
165 /*
166 * End of first pass, size has been calculated so allocate memory
167 */
168 if (firstaddr == 0) {
169 size = (vm_size_t)(v - firstaddr);
170 firstaddr = (int)kmem_alloc(kernel_map, round_page(size));
171 if (firstaddr == 0)
172 panic("startup: no room for tables");
173 goto again;
174 }
175 /*
176 * End of second pass, addresses have been assigned
177 */
178 if ((vm_size_t)(v - firstaddr) != size)
179 panic("startup: table size inconsistency");
180 /*
181 * Now allocate buffers proper. They are different than the above
182 * in that they usually occupy more virtual memory than physical.
183 */
184 size = MAXBSIZE * nbuf;
185 buffer_map = kmem_suballoc(kernel_map, (vm_offset_t)&buffers,
186 &maxaddr, size, TRUE);
187 minaddr = (vm_offset_t)buffers;
188 if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
189 &minaddr, size, FALSE) != KERN_SUCCESS)
190 panic("startup: cannot allocate buffers");
191 base = bufpages / nbuf;
192 residual = bufpages % nbuf;
193 for (i = 0; i < nbuf; i++) {
194 vm_size_t curbufsize;
195 vm_offset_t curbuf;
196
197 /*
198 * First <residual> buffers get (base+1) physical pages
199 * allocated for them. The rest get (base) physical pages.
200 *
201 * The rest of each buffer occupies virtual space,
202 * but has no physical memory allocated for it.
203 */
204 curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
205 curbufsize = CLBYTES * (i < residual ? base+1 : base);
206 vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
207 vm_map_simplify(buffer_map, curbuf);
208 }
209 /*
210 * Allocate a submap for exec arguments. This map effectively
211 * limits the number of processes exec'ing at any time.
212 */
213 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
214 16*NCARGS, TRUE);
215 /*
216 * Allocate a submap for physio
217 */
218 phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
219 VM_PHYS_SIZE, TRUE);
220
221 /*
222 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size
223 * we use the more space efficient malloc in place of kmem_alloc.
224 */
225 mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
226 M_MBUF, M_NOWAIT);
227 bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
228 mb_map = kmem_suballoc(kernel_map, (vm_offset_t)&mbutl, &maxaddr,
229 VM_MBUF_SIZE, FALSE);
230 /*
231 * Initialize callouts
232 */
233 callfree = callout;
234 for (i = 1; i < ncallout; i++)
235 callout[i-1].c_next = &callout[i];
236 callout[i-1].c_next = NULL;
237
238 /*printf("avail mem = %d\n", ptoa(vm_page_free_count));*/
239 printf("using %d buffers containing %d bytes of memory\n",
240 nbuf, bufpages * CLBYTES);
241
242 /*
243 * Set up CPU-specific registers, cache, etc.
244 */
245 initcpu();
246
247 /*
248 * Set up buffers, so they can be used to read disk labels.
249 */
250 bufinit();
251
252 /*
253 * Configure the system.
254 */
255 configure();
256 }
257
258 #ifdef PGINPROF
259 /*
260 * Return the difference (in microseconds)
261 * between the current time and a previous
262 * time as represented by the arguments.
263 * If there is a pending clock interrupt
264 * which has not been serviced due to high
265 * ipl, return error code.
266 */
267 /*ARGSUSED*/
vmtime(otime,olbolt,oicr)268 vmtime(otime, olbolt, oicr)
269 register int otime, olbolt, oicr;
270 {
271
272 return (((time.tv_sec-otime)*60 + lbolt-olbolt)*16667);
273 }
274 #endif
275
276 struct sigframe {
277 int sf_signum;
278 int sf_code;
279 struct sigcontext *sf_scp;
280 sig_t sf_handler;
281 int sf_eax;
282 int sf_edx;
283 int sf_ecx;
284 struct sigcontext sf_sc;
285 } ;
286
287 extern int kstack[];
288
289 /*
290 * Send an interrupt to process.
291 *
292 * Stack is set up to allow sigcode stored
293 * in u. to call routine, followed by kcall
294 * to sigreturn routine below. After sigreturn
295 * resets the signal mask, the stack, and the
296 * frame pointer, it returns to the user
297 * specified pc, psl.
298 */
299 void
sendsig(catcher,sig,mask,code)300 sendsig(catcher, sig, mask, code)
301 sig_t catcher;
302 int sig, mask;
303 unsigned code;
304 {
305 register struct proc *p = curproc;
306 register int *regs;
307 register struct sigframe *fp;
308 struct sigacts *psp = p->p_sigacts;
309 int oonstack, frmtrap;
310
311 regs = p->p_md.md_regs;
312 oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
313 frmtrap = curpcb->pcb_flags & FM_TRAP;
314 /*
315 * Allocate and validate space for the signal handler
316 * context. Note that if the stack is in P0 space, the
317 * call to grow() is a nop, and the useracc() check
318 * will fail if the process has not already allocated
319 * the space with a `brk'.
320 */
321 if ((psp->ps_flags & SAS_ALTSTACK) &&
322 (psp->ps_sigstk.ss_flags & SA_ONSTACK) == 0 &&
323 (psp->ps_sigonstack & sigmask(sig))) {
324 fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
325 psp->ps_sigstk.ss_size - sizeof(struct sigframe));
326 psp->ps_sigstk.ss_flags |= SA_ONSTACK;
327 } else {
328 if (frmtrap)
329 fp = (struct sigframe *)(regs[tESP]
330 - sizeof(struct sigframe));
331 else
332 fp = (struct sigframe *)(regs[sESP]
333 - sizeof(struct sigframe));
334 }
335
336 if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
337 (void)grow(p, (unsigned)fp);
338
339 if (useracc((caddr_t)fp, sizeof (struct sigframe), B_WRITE) == 0) {
340 /*
341 * Process has trashed its stack; give it an illegal
342 * instruction to halt it in its tracks.
343 */
344 SIGACTION(p, SIGILL) = SIG_DFL;
345 sig = sigmask(SIGILL);
346 p->p_sigignore &= ~sig;
347 p->p_sigcatch &= ~sig;
348 p->p_sigmask &= ~sig;
349 psignal(p, SIGILL);
350 return;
351 }
352
353 /*
354 * Build the argument list for the signal handler.
355 */
356 fp->sf_signum = sig;
357 fp->sf_code = code;
358 fp->sf_scp = &fp->sf_sc;
359 fp->sf_handler = catcher;
360
361 /* save scratch registers */
362 if(frmtrap) {
363 fp->sf_eax = regs[tEAX];
364 fp->sf_edx = regs[tEDX];
365 fp->sf_ecx = regs[tECX];
366 } else {
367 fp->sf_eax = regs[sEAX];
368 fp->sf_edx = regs[sEDX];
369 fp->sf_ecx = regs[sECX];
370 }
371 /*
372 * Build the signal context to be used by sigreturn.
373 */
374 fp->sf_sc.sc_onstack = oonstack;
375 fp->sf_sc.sc_mask = mask;
376 if(frmtrap) {
377 fp->sf_sc.sc_sp = regs[tESP];
378 fp->sf_sc.sc_fp = regs[tEBP];
379 fp->sf_sc.sc_pc = regs[tEIP];
380 fp->sf_sc.sc_ps = regs[tEFLAGS];
381 regs[tESP] = (int)fp;
382 regs[tEIP] = (int)((struct pcb *)kstack)->pcb_sigc;
383 } else {
384 fp->sf_sc.sc_sp = regs[sESP];
385 fp->sf_sc.sc_fp = regs[sEBP];
386 fp->sf_sc.sc_pc = regs[sEIP];
387 fp->sf_sc.sc_ps = regs[sEFLAGS];
388 regs[sESP] = (int)fp;
389 regs[sEIP] = (int)((struct pcb *)kstack)->pcb_sigc;
390 }
391 }
392
393 /*
394 * System call to cleanup state after a signal
395 * has been taken. Reset signal mask and
396 * stack state from context left by sendsig (above).
397 * Return to previous pc and psl as specified by
398 * context left by sendsig. Check carefully to
399 * make sure that the user has not modified the
400 * psl to gain improper priviledges or to cause
401 * a machine fault.
402 */
403 struct sigreturn_args {
404 struct sigcontext *sigcntxp;
405 };
406 sigreturn(p, uap, retval)
407 struct proc *p;
408 struct sigreturn_args *uap;
409 int *retval;
410 {
411 register struct sigcontext *scp;
412 register struct sigframe *fp;
413 register int *regs = p->p_md.md_regs;
414
415
416 fp = (struct sigframe *) regs[sESP] ;
417
418 if (useracc((caddr_t)fp, sizeof (*fp), 0) == 0)
419 return(EINVAL);
420
421 /* restore scratch registers */
422 regs[sEAX] = fp->sf_eax ;
423 regs[sEDX] = fp->sf_edx ;
424 regs[sECX] = fp->sf_ecx ;
425
426 scp = fp->sf_scp;
427 if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0)
428 return(EINVAL);
429 #ifdef notyet
430 if ((scp->sc_ps & PSL_MBZ) != 0 || (scp->sc_ps & PSL_MBO) != PSL_MBO) {
431 return(EINVAL);
432 }
433 #endif
434 if (scp->sc_onstack & 01)
435 p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
436 else
437 p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
438 p->p_sigmask = scp->sc_mask &~
439 (sigmask(SIGKILL)|sigmask(SIGCONT)|sigmask(SIGSTOP));
440 regs[sEBP] = scp->sc_fp;
441 regs[sESP] = scp->sc_sp;
442 regs[sEIP] = scp->sc_pc;
443 regs[sEFLAGS] = scp->sc_ps;
444 return(EJUSTRETURN);
445 }
446
447 int waittime = -1;
448
boot(arghowto)449 boot(arghowto)
450 int arghowto;
451 {
452 register long dummy; /* r12 is reserved */
453 register int howto; /* r11 == how to boot */
454 register int devtype; /* r10 == major of root dev */
455 extern char *panicstr;
456 extern int cold;
457
458 howto = arghowto;
459 if ((howto&RB_NOSYNC) == 0 && waittime < 0) {
460 register struct buf *bp;
461 int iter, nbusy;
462
463 waittime = 0;
464 (void) splnet();
465 printf("syncing disks... ");
466 /*
467 * Release inodes held by texts before update.
468 */
469 if (panicstr == 0)
470 vnode_pager_umount(NULL);
471 sync((struct sigcontext *)0);
472 /*
473 * Unmount filesystems
474 */
475 if (panicstr == 0)
476 vfs_unmountall();
477
478 for (iter = 0; iter < 20; iter++) {
479 nbusy = 0;
480 for (bp = &buf[nbuf]; --bp >= buf; )
481 if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
482 nbusy++;
483 if (nbusy == 0)
484 break;
485 printf("%d ", nbusy);
486 DELAY(40000 * iter);
487 }
488 if (nbusy)
489 printf("giving up\n");
490 else
491 printf("done\n");
492 DELAY(10000); /* wait for printf to finish */
493 }
494 splhigh();
495 devtype = major(rootdev);
496 if (howto&RB_HALT) {
497 printf("halting (in tight loop); hit reset\n\n");
498 splx(0xfffd); /* all but keyboard XXX */
499 for (;;) ;
500 } else {
501 if (howto & RB_DUMP) {
502 dumpsys();
503 /*NOTREACHED*/
504 }
505 }
506 #ifdef lint
507 dummy = 0; dummy = dummy;
508 printf("howto %d, devtype %d\n", arghowto, devtype);
509 #endif
510 #ifdef notdef
511 pg("pausing (hit any key to reset)");
512 #endif
513 reset_cpu();
514 for(;;) ;
515 /*NOTREACHED*/
516 }
517
518 int dumpmag = 0x8fca0101; /* magic number for savecore */
519 int dumpsize = 0; /* also for savecore */
520 /*
521 * Doadump comes here after turning off memory management and
522 * getting on the dump stack, either when called above, or by
523 * the auto-restart code.
524 */
dumpsys()525 dumpsys()
526 {
527
528 if (dumpdev == NODEV)
529 return;
530 if ((minor(dumpdev)&07) != 1)
531 return;
532 dumpsize = physmem;
533 printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
534 printf("dump ");
535 switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
536
537 case ENXIO:
538 printf("device bad\n");
539 break;
540
541 case EFAULT:
542 printf("device not ready\n");
543 break;
544
545 case EINVAL:
546 printf("area improper\n");
547 break;
548
549 case EIO:
550 printf("i/o error\n");
551 break;
552
553 default:
554 printf("succeeded\n");
555 break;
556 }
557 printf("\n\n");
558 DELAY(1000);
559 }
560
microtime(tvp)561 microtime(tvp)
562 register struct timeval *tvp;
563 {
564 int s = splhigh();
565
566 *tvp = time;
567 tvp->tv_usec += tick;
568 while (tvp->tv_usec > 1000000) {
569 tvp->tv_sec++;
570 tvp->tv_usec -= 1000000;
571 }
572 splx(s);
573 }
574
575 physstrat(bp, strat, prio)
576 struct buf *bp;
577 int (*strat)(), prio;
578 {
579 register int s;
580 caddr_t baddr;
581
582 /*
583 * vmapbuf clobbers b_addr so we must remember it so that it
584 * can be restored after vunmapbuf. This is truely rude, we
585 * should really be storing this in a field in the buf struct
586 * but none are available and I didn't want to add one at
587 * this time. Note that b_addr for dirty page pushes is
588 * restored in vunmapbuf. (ugh!)
589 */
590 baddr = bp->b_un.b_addr;
591 vmapbuf(bp);
592 (*strat)(bp);
593 /* pageout daemon doesn't wait for pushed pages */
594 if (bp->b_flags & B_DIRTY)
595 return;
596 s = splbio();
597 while ((bp->b_flags & B_DONE) == 0)
598 sleep((caddr_t)bp, prio);
599 splx(s);
600 vunmapbuf(bp);
601 bp->b_un.b_addr = baddr;
602 }
603
initcpu()604 initcpu()
605 {
606 }
607
608 /*
609 * Clear registers on exec
610 */
setregs(p,entry,retval)611 setregs(p, entry, retval)
612 register struct proc *p;
613 u_long entry;
614 int retval[2];
615 {
616 p->p_md.md_regs[sEBP] = 0; /* bottom of the fp chain */
617 p->p_md.md_regs[sEIP] = entry;
618
619 p->p_addr->u_pcb.pcb_flags = 0; /* no fp at all */
620 load_cr0(rcr0() | CR0_EM); /* start emulating */
621 #include "npx.h"
622 #if NNPX > 0
623 npxinit(0x262);
624 #endif
625 }
626
627 /*
628 * machine dependent system variables.
629 */
cpu_sysctl(name,namelen,oldp,oldlenp,newp,newlen,p)630 cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
631 int *name;
632 u_int namelen;
633 void *oldp;
634 size_t *oldlenp;
635 void *newp;
636 size_t newlen;
637 struct proc *p;
638 {
639
640 /* all sysctl names at this level are terminal */
641 if (namelen != 1)
642 return (ENOTDIR); /* overloaded */
643
644 switch (name[0]) {
645 case CPU_CONSDEV:
646 return (sysctl_rdstruct(oldp, oldlenp, newp, &cn_tty->t_dev,
647 sizeof cn_tty->t_dev));
648 default:
649 return (EOPNOTSUPP);
650 }
651 /* NOTREACHED */
652 }
653
654 /*
655 * Initialize 386 and configure to run kernel
656 */
657
658 /*
659 * Initialize segments & interrupt table
660 */
661
662
663 #define GNULL_SEL 0 /* Null Descriptor */
664 #define GCODE_SEL 1 /* Kernel Code Descriptor */
665 #define GDATA_SEL 2 /* Kernel Data Descriptor */
666 #define GLDT_SEL 3 /* LDT - eventually one per process */
667 #define GTGATE_SEL 4 /* Process task switch gate */
668 #define GPANIC_SEL 5 /* Task state to consider panic from */
669 #define GPROC0_SEL 6 /* Task state process slot zero and up */
670 #define NGDT GPROC0_SEL+1
671
672 union descriptor gdt[GPROC0_SEL+1];
673
674 /* interrupt descriptor table */
675 struct gate_descriptor idt[32+16];
676
677 /* local descriptor table */
678 union descriptor ldt[5];
679 #define LSYS5CALLS_SEL 0 /* forced by intel BCS */
680 #define LSYS5SIGR_SEL 1
681
682 #define L43BSDCALLS_SEL 2 /* notyet */
683 #define LUCODE_SEL 3
684 #define LUDATA_SEL 4
685 /* seperate stack, es,fs,gs sels ? */
686 /* #define LPOSIXCALLS_SEL 5 /* notyet */
687
688 struct i386tss tss, panic_tss;
689
690 extern struct user *proc0paddr;
691
692 /* software prototypes -- in more palitable form */
693 struct soft_segment_descriptor gdt_segs[] = {
694 /* Null Descriptor */
695 { 0x0, /* segment base address */
696 0x0, /* length - all address space */
697 0, /* segment type */
698 0, /* segment descriptor priority level */
699 0, /* segment descriptor present */
700 0,0,
701 0, /* default 32 vs 16 bit size */
702 0 /* limit granularity (byte/page units)*/ },
703 /* Code Descriptor for kernel */
704 { 0x0, /* segment base address */
705 0xfffff, /* length - all address space */
706 SDT_MEMERA, /* segment type */
707 0, /* segment descriptor priority level */
708 1, /* segment descriptor present */
709 0,0,
710 1, /* default 32 vs 16 bit size */
711 1 /* limit granularity (byte/page units)*/ },
712 /* Data Descriptor for kernel */
713 { 0x0, /* segment base address */
714 0xfffff, /* length - all address space */
715 SDT_MEMRWA, /* segment type */
716 0, /* segment descriptor priority level */
717 1, /* segment descriptor present */
718 0,0,
719 1, /* default 32 vs 16 bit size */
720 1 /* limit granularity (byte/page units)*/ },
721 /* LDT Descriptor */
722 { (int) ldt, /* segment base address */
723 sizeof(ldt)-1, /* length - all address space */
724 SDT_SYSLDT, /* segment type */
725 0, /* segment descriptor priority level */
726 1, /* segment descriptor present */
727 0,0,
728 0, /* unused - default 32 vs 16 bit size */
729 0 /* limit granularity (byte/page units)*/ },
730 /* Null Descriptor - Placeholder */
731 { 0x0, /* segment base address */
732 0x0, /* length - all address space */
733 0, /* segment type */
734 0, /* segment descriptor priority level */
735 0, /* segment descriptor present */
736 0,0,
737 0, /* default 32 vs 16 bit size */
738 0 /* limit granularity (byte/page units)*/ },
739 /* Panic Tss Descriptor */
740 { (int) &panic_tss, /* segment base address */
741 sizeof(tss)-1, /* length - all address space */
742 SDT_SYS386TSS, /* segment type */
743 0, /* segment descriptor priority level */
744 1, /* segment descriptor present */
745 0,0,
746 0, /* unused - default 32 vs 16 bit size */
747 0 /* limit granularity (byte/page units)*/ },
748 /* Proc 0 Tss Descriptor */
749 { (int) kstack, /* segment base address */
750 sizeof(tss)-1, /* length - all address space */
751 SDT_SYS386TSS, /* segment type */
752 0, /* segment descriptor priority level */
753 1, /* segment descriptor present */
754 0,0,
755 0, /* unused - default 32 vs 16 bit size */
756 0 /* limit granularity (byte/page units)*/ }};
757
758 struct soft_segment_descriptor ldt_segs[] = {
759 /* Null Descriptor - overwritten by call gate */
760 { 0x0, /* segment base address */
761 0x0, /* length - all address space */
762 0, /* segment type */
763 0, /* segment descriptor priority level */
764 0, /* segment descriptor present */
765 0,0,
766 0, /* default 32 vs 16 bit size */
767 0 /* limit granularity (byte/page units)*/ },
768 /* Null Descriptor - overwritten by call gate */
769 { 0x0, /* segment base address */
770 0x0, /* length - all address space */
771 0, /* segment type */
772 0, /* segment descriptor priority level */
773 0, /* segment descriptor present */
774 0,0,
775 0, /* default 32 vs 16 bit size */
776 0 /* limit granularity (byte/page units)*/ },
777 /* Null Descriptor - overwritten by call gate */
778 { 0x0, /* segment base address */
779 0x0, /* length - all address space */
780 0, /* segment type */
781 0, /* segment descriptor priority level */
782 0, /* segment descriptor present */
783 0,0,
784 0, /* default 32 vs 16 bit size */
785 0 /* limit granularity (byte/page units)*/ },
786 /* Code Descriptor for user */
787 { 0x0, /* segment base address */
788 0xfffff, /* length - all address space */
789 SDT_MEMERA, /* segment type */
790 SEL_UPL, /* segment descriptor priority level */
791 1, /* segment descriptor present */
792 0,0,
793 1, /* default 32 vs 16 bit size */
794 1 /* limit granularity (byte/page units)*/ },
795 /* Data Descriptor for user */
796 { 0x0, /* segment base address */
797 0xfffff, /* length - all address space */
798 SDT_MEMRWA, /* segment type */
799 SEL_UPL, /* segment descriptor priority level */
800 1, /* segment descriptor present */
801 0,0,
802 1, /* default 32 vs 16 bit size */
803 1 /* limit granularity (byte/page units)*/ } };
804
805 /* table descriptors - used to load tables by microp */
806 struct region_descriptor r_gdt = {
807 sizeof(gdt)-1,(char *)gdt
808 };
809
810 struct region_descriptor r_idt = {
811 sizeof(idt)-1,(char *)idt
812 };
813
setidt(idx,func,typ,dpl)814 setidt(idx, func, typ, dpl) char *func; {
815 struct gate_descriptor *ip = idt + idx;
816
817 ip->gd_looffset = (int)func;
818 ip->gd_selector = 8;
819 ip->gd_stkcpy = 0;
820 ip->gd_xx = 0;
821 ip->gd_type = typ;
822 ip->gd_dpl = dpl;
823 ip->gd_p = 1;
824 ip->gd_hioffset = ((int)func)>>16 ;
825 }
826
827 #define IDTVEC(name) __CONCAT(X, name)
828 extern IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
829 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(dble), IDTVEC(fpusegm),
830 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
831 IDTVEC(page), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(rsvd0),
832 IDTVEC(rsvd1), IDTVEC(rsvd2), IDTVEC(rsvd3), IDTVEC(rsvd4),
833 IDTVEC(rsvd5), IDTVEC(rsvd6), IDTVEC(rsvd7), IDTVEC(rsvd8),
834 IDTVEC(rsvd9), IDTVEC(rsvd10), IDTVEC(rsvd11), IDTVEC(rsvd12),
835 IDTVEC(rsvd13), IDTVEC(rsvd14), IDTVEC(rsvd14), IDTVEC(syscall);
836
837 int lcr0(), lcr3(), rcr0(), rcr2();
838 int _udatasel, _ucodesel, _gsel_tss;
839
init386(first)840 init386(first) { extern ssdtosd(), lgdt(), lidt(), lldt(), etext;
841 int x, *pi;
842 unsigned biosbasemem, biosextmem;
843 struct gate_descriptor *gdp;
844 extern int sigcode,szsigcode;
845
846 proc0.p_addr = proc0paddr;
847
848 /*
849 * Initialize the console before we print anything out.
850 */
851
852 cninit (KERNBASE+0xa0000);
853
854 /* make gdt memory segments */
855 gdt_segs[GCODE_SEL].ssd_limit = btoc((int) &etext + NBPG);
856 for (x=0; x < NGDT; x++) ssdtosd(gdt_segs+x, gdt+x);
857 /* make ldt memory segments */
858 ldt_segs[LUCODE_SEL].ssd_limit = btoc(UPT_MIN_ADDRESS);
859 ldt_segs[LUDATA_SEL].ssd_limit = btoc(UPT_MIN_ADDRESS);
860 /* Note. eventually want private ldts per process */
861 for (x=0; x < 5; x++) ssdtosd(ldt_segs+x, ldt+x);
862
863 /* exceptions */
864 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL);
865 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL);
866 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL);
867 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL);
868 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_KPL);
869 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL);
870 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL);
871 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL);
872 setidt(8, &IDTVEC(dble), SDT_SYS386TGT, SEL_KPL);
873 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL);
874 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL);
875 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL);
876 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL);
877 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL);
878 setidt(14, &IDTVEC(page), SDT_SYS386TGT, SEL_KPL);
879 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL);
880 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL);
881 setidt(17, &IDTVEC(rsvd0), SDT_SYS386TGT, SEL_KPL);
882 setidt(18, &IDTVEC(rsvd1), SDT_SYS386TGT, SEL_KPL);
883 setidt(19, &IDTVEC(rsvd2), SDT_SYS386TGT, SEL_KPL);
884 setidt(20, &IDTVEC(rsvd3), SDT_SYS386TGT, SEL_KPL);
885 setidt(21, &IDTVEC(rsvd4), SDT_SYS386TGT, SEL_KPL);
886 setidt(22, &IDTVEC(rsvd5), SDT_SYS386TGT, SEL_KPL);
887 setidt(23, &IDTVEC(rsvd6), SDT_SYS386TGT, SEL_KPL);
888 setidt(24, &IDTVEC(rsvd7), SDT_SYS386TGT, SEL_KPL);
889 setidt(25, &IDTVEC(rsvd8), SDT_SYS386TGT, SEL_KPL);
890 setidt(26, &IDTVEC(rsvd9), SDT_SYS386TGT, SEL_KPL);
891 setidt(27, &IDTVEC(rsvd10), SDT_SYS386TGT, SEL_KPL);
892 setidt(28, &IDTVEC(rsvd11), SDT_SYS386TGT, SEL_KPL);
893 setidt(29, &IDTVEC(rsvd12), SDT_SYS386TGT, SEL_KPL);
894 setidt(30, &IDTVEC(rsvd13), SDT_SYS386TGT, SEL_KPL);
895 setidt(31, &IDTVEC(rsvd14), SDT_SYS386TGT, SEL_KPL);
896
897 #include "isa.h"
898 #if NISA >0
899 isa_defaultirq();
900 #endif
901
902 lgdt(gdt, sizeof(gdt)-1);
903 lidt(idt, sizeof(idt)-1);
904 lldt(GSEL(GLDT_SEL, SEL_KPL));
905
906 /*
907 * This memory size stuff is a real mess. Here is a simple
908 * setup that just believes the BIOS. After the rest of
909 * the system is a little more stable, we'll come back to
910 * this and deal with issues if incorrect BIOS information,
911 * and when physical memory is > 16 megabytes.
912 */
913 biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
914 biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
915 Maxmem = btoc ((biosextmem + 1024) * 1024);
916 maxmem = Maxmem - 1;
917 physmem = btoc (biosbasemem * 1024 + (biosextmem - 1) * 1024);
918 printf ("bios %dK+%dK. maxmem %x, physmem %x\n",
919 biosbasemem, biosextmem, ctob (maxmem), ctob (physmem));
920
921 vm_set_page_size();
922 /* call pmap initialization to make new kernel address space */
923 pmap_bootstrap (first, 0);
924 /* now running on new page tables, configured,and u/iom is accessible */
925
926 /* make a initial tss so microp can get interrupt stack on syscall! */
927 proc0.p_addr->u_pcb.pcb_tss.tss_esp0 = (int) kstack + UPAGES*NBPG;
928 proc0.p_addr->u_pcb.pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
929 _gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
930 ltr(_gsel_tss);
931
932 /* make a call gate to reenter kernel with */
933 gdp = &ldt[LSYS5CALLS_SEL].gd;
934
935 x = (int) &IDTVEC(syscall);
936 gdp->gd_looffset = x++;
937 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
938 gdp->gd_stkcpy = 0;
939 gdp->gd_type = SDT_SYS386CGT;
940 gdp->gd_dpl = SEL_UPL;
941 gdp->gd_p = 1;
942 gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16;
943
944 /* transfer to user mode */
945
946 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL);
947 _udatasel = LSEL(LUDATA_SEL, SEL_UPL);
948
949 /* setup proc 0's pcb */
950 bcopy(&sigcode, proc0.p_addr->u_pcb.pcb_sigc, szsigcode);
951 proc0.p_addr->u_pcb.pcb_flags = 0;
952 proc0.p_addr->u_pcb.pcb_ptd = IdlePTD;
953 }
954
955 extern struct pte *CMAP1, *CMAP2;
956 extern caddr_t CADDR1, CADDR2;
957 /*
958 * zero out physical memory
959 * specified in relocation units (NBPG bytes)
960 */
clearseg(n)961 clearseg(n) {
962
963 *(int *)CMAP2 = PG_V | PG_KW | ctob(n);
964 load_cr3(rcr3());
965 bzero(CADDR2,NBPG);
966 *(int *) CADDR2 = 0;
967 }
968
969 /*
970 * copy a page of physical memory
971 * specified in relocation units (NBPG bytes)
972 */
copyseg(frm,n)973 copyseg(frm, n) {
974
975 *(int *)CMAP2 = PG_V | PG_KW | ctob(n);
976 load_cr3(rcr3());
977 bcopy((void *)frm, (void *)CADDR2, NBPG);
978 }
979
980 /*
981 * copy a page of physical memory
982 * specified in relocation units (NBPG bytes)
983 */
physcopyseg(frm,to)984 physcopyseg(frm, to) {
985
986 *(int *)CMAP1 = PG_V | PG_KW | ctob(frm);
987 *(int *)CMAP2 = PG_V | PG_KW | ctob(to);
988 load_cr3(rcr3());
989 bcopy(CADDR1, CADDR2, NBPG);
990 }
991
992 /*aston() {
993 schednetisr(NETISR_AST);
994 }*/
995
setsoftclock()996 setsoftclock() {
997 schednetisr(NETISR_SCLK);
998 }
999
1000 /*
1001 * insert an element into a queue
1002 */
1003 #undef insque
_insque(element,head)1004 _insque(element, head)
1005 register struct prochd *element, *head;
1006 {
1007 element->ph_link = head->ph_link;
1008 head->ph_link = (struct proc *)element;
1009 element->ph_rlink = (struct proc *)head;
1010 ((struct prochd *)(element->ph_link))->ph_rlink=(struct proc *)element;
1011 }
1012
1013 /*
1014 * remove an element from a queue
1015 */
1016 #undef remque
_remque(element)1017 _remque(element)
1018 register struct prochd *element;
1019 {
1020 ((struct prochd *)(element->ph_link))->ph_rlink = element->ph_rlink;
1021 ((struct prochd *)(element->ph_rlink))->ph_link = element->ph_link;
1022 element->ph_rlink = (struct proc *)0;
1023 }
1024
vmunaccess()1025 vmunaccess() {}
1026
1027 /*
1028 * Below written in C to allow access to debugging code
1029 */
copyinstr(fromaddr,toaddr,maxlength,lencopied)1030 copyinstr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
1031 void *toaddr, *fromaddr; {
1032 int c,tally;
1033
1034 tally = 0;
1035 while (maxlength--) {
1036 c = fubyte(fromaddr++);
1037 if (c == -1) {
1038 if(lencopied) *lencopied = tally;
1039 return(EFAULT);
1040 }
1041 tally++;
1042 *(char *)toaddr++ = (char) c;
1043 if (c == 0){
1044 if(lencopied) *lencopied = tally;
1045 return(0);
1046 }
1047 }
1048 if(lencopied) *lencopied = tally;
1049 return(ENAMETOOLONG);
1050 }
1051
copyoutstr(fromaddr,toaddr,maxlength,lencopied)1052 copyoutstr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
1053 void *fromaddr, *toaddr; {
1054 int c;
1055 int tally;
1056
1057 tally = 0;
1058 while (maxlength--) {
1059 c = subyte(toaddr++, *(char *)fromaddr);
1060 if (c == -1) return(EFAULT);
1061 tally++;
1062 if (*(char *)fromaddr++ == 0){
1063 if(lencopied) *lencopied = tally;
1064 return(0);
1065 }
1066 }
1067 if(lencopied) *lencopied = tally;
1068 return(ENAMETOOLONG);
1069 }
1070
copystr(fromaddr,toaddr,maxlength,lencopied)1071 copystr(fromaddr, toaddr, maxlength, lencopied) u_int *lencopied, maxlength;
1072 void *fromaddr, *toaddr; {
1073 u_int tally;
1074
1075 tally = 0;
1076 while (maxlength--) {
1077 *(u_char *)toaddr = *(u_char *)fromaddr++;
1078 tally++;
1079 if (*(u_char *)toaddr++ == 0) {
1080 if(lencopied) *lencopied = tally;
1081 return(0);
1082 }
1083 }
1084 if(lencopied) *lencopied = tally;
1085 return(ENAMETOOLONG);
1086 }
1087