1/*- 2 * Copyright (c) 1990 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * William Jolitz. 7 * 8 * %sccs.include.386.c% 9 * 10 * @(#)locore.s 5.2 (Berkeley) 06/23/90 11 */ 12 13/* 14 * locore.s: 4BSD machine support for the Intel 386 15 * Preliminary version 16 * Written by William F. Jolitz, 386BSD Project 17 */ 18 19#include "psl.h" 20#include "pte.h" 21 22#include "errno.h" 23#include "cmap.h" 24 25#include "../i386/trap.h" 26 27/* 28 * Note: This version greatly munged to avoid various assembler errors 29 * that may be fixed in newer versions of gas. Perhaps newer versions 30 * will have more pleasant appearance. 31 */ 32 33 .set IDXSHIFT,10 34 .set SYSTEM,0xFE000000 # virtual address of system start 35 /*note: gas copys sign bit (e.g. arithmetic >>), can't do SYSTEM>>22! */ 36 .set SYSPDROFF,0x3F8 # Page dir 37 38 .set IOPHYSmem,0xa0000 39 40/* IBM "compatible" nop - sensitive macro on "fast" 386 machines */ 41#define NOP jmp 7f ; nop ; 7: 42 43/* 44 * User structure is UPAGES at top of user space. 45 */ 46 .set _u,0xFDFFE000 47 .globl _u 48 .set UPDROFF,0x3F7 49 .set UPTEOFF,0x3FE 50 51#define ENTRY(name) \ 52 .globl _/**/name; _/**/name: 53#define ALTENTRY(name) \ 54 .globl _/**/name; _/**/name: 55 56/* 57 * System page table 58 * Mbmap and Usrptmap are enlarged by CLSIZE entries 59 * as they are managed by resource maps starting with index 1 or CLSIZE. 60 */ 61#define SYSMAP(mname, vname, npte) \ 62_/**/mname: .globl _/**/mname; \ 63 .space (npte)*4; \ 64 .set _/**/vname,ptes*4096+SYSTEM; \ 65 .globl _/**/vname; \ 66 .set ptes,ptes + npte 67#define ZSYSMAP(mname, vname, npte) \ 68_/**/mname: .globl _/**/mname; \ 69 .set _/**/vname,ptes*4096+SYSTEM; \ 70 .globl _/**/vname; 71 72 .data 73 # assumed to start at data mod 4096 74 .set ptes,0 75 SYSMAP(Sysmap,Sysbase,SYSPTSIZE) 76 SYSMAP(Forkmap,forkutl,UPAGES) 77 SYSMAP(Xswapmap,xswaputl,UPAGES) 78 SYSMAP(Xswap2map,xswap2utl,UPAGES) 79 SYSMAP(Swapmap,swaputl,UPAGES) 80 SYSMAP(Pushmap,pushutl,UPAGES) 81 SYSMAP(Vfmap,vfutl,UPAGES) 82 SYSMAP(CMAP1,CADDR1,1) 83 SYSMAP(CMAP2,CADDR2,1) 84 SYSMAP(mmap,vmmap,1) 85 SYSMAP(alignmap,alignutl,1) /* XXX */ 86 SYSMAP(msgbufmap,msgbuf,MSGBUFPTECNT) 87 .set mbxxx,(NMBCLUSTERS*MCLBYTES) 88 .set mbyyy,(mbxxx>>PGSHIFT) 89 .set mbpgs,(mbyyy+CLSIZE) 90 SYSMAP(Mbmap,mbutl,mbpgs) 91 /* 92 * XXX: NEED way to compute kmem size from maxusers, 93 * device complement 94 */ 95 SYSMAP(kmempt,kmembase,300*CLSIZE) 96#ifdef GPROF 97 SYSMAP(profmap,profbase,600*CLSIZE) 98#endif 99 .set atmemsz,0x100000-0xa0000 100 .set atpgs,(atmemsz>>PGSHIFT) 101 SYSMAP(ATDevmem,atdevbase,atpgs) 102#define USRIOSIZE 30 103 SYSMAP(Usriomap,usrio,USRIOSIZE+CLSIZE) /* for PHYSIO */ 104 ZSYSMAP(ekmempt,kmemlimit,0) 105 SYSMAP(Usrptmap,usrpt,USRPTSIZE+CLSIZE) 106 107eSysmap: 108 # .set _Syssize,(eSysmap-_Sysmap)/4 109 .set _Syssize,ptes 110 .globl _Syssize 111 112 /* align on next page boundary */ 113 # . = . + NBPG - 1 & -NBPG /* align to page boundry-does not work*/ 114 # .space (PGSIZE - ((eSysmap-_Sysmap) % PGSIZE)) % PGSIZE 115 .set sz,(4*ptes)%NBPG 116 # .set rptes,(ptes)%1024 117 # .set rptes,1024-rptes 118 # .set ptes,ptes+rptes 119 .set Npdes,5 120 .space (NBPG - sz) 121 122/* 123 * Initialization 124 */ 125 .data 126 .globl _cpu 127_cpu: .long 0 # are we 386, 386sx, or 486 128 .text 129 .globl start 130start: # This is assumed to be location zero! 131 movw $0x1234,%ax 132 movw %ax,0x472 # warm boot 133 jmp 1f 134 .space 0x500 # skip over warm boot shit 1351: 136#ifdef notyet 137 # XXX pass parameters on stack 138/* count up memory */ 139 xorl %eax,%eax # start with base memory at 0x0 140 movl $(0xA0000/NBPG),%ecx # look every 4K up to 640K 1411: movl 0(%eax),%ebx # save location to check 142 movl $0xa55a5aa5,0(%eax) # write test pattern 143 cmpl $0xa55a5aa5,0(%eax) # does not check yet for rollover 144 jne 2f 145 movl %ebx,0(%eax) # restore memory 146 addl $ NBPG,%eax 147 loop 1b 1482: movl %eax,_basemem-SYSTEM 149 150 movl $0x100000,%eax # next, talley remaining memory 151 movl $((0xFA0000-0x100000)/NBPG),%ecx 1521: movl 0(%eax),%ebx # save location to check 153 movl $0xa55a5aa5,0(%eax) # write test pattern 154 cmpl $0xa55a5aa5,0(%eax) # does not check yet for rollover 155 jne 2f 156 movl %ebx,0(%eax) # restore memory 157 addl $ NBPG,%eax 158 loop 1b 1592: movl %eax,_abovemem-SYSTEM 160#endif notyet 161 162/* clear memory. */ 163 movl $_edata-SYSTEM,%edi 164 movl $_end-SYSTEM,%ecx 165 addl $ NBPG-1,%ecx 166 andl $~(NBPG-1),%ecx 167 movl %ecx,%esi 168 subl %edi,%ecx 169 addl $(UPAGES*NBPG)+NBPG+NBPG+NBPG,%ecx 170 # txt+data+proc zero pt+u. 171 # any other junk? 172 addl $ NBPG-1,%ecx 173 andl $~(NBPG-1),%ecx 174 # shrl $2,%ecx # convert to long word count 175 xorl %eax,%eax # pattern 176 cld 177 rep 178 stosb 179 180/* 181 * Map Kernel 182 * N.B. don't bother with making kernel text RO, as 386 183 * ignores R/W AND U/S bits on kernel access (only v works) ! 184 */ 185 movl %esi,%ecx # this much memory, 186 shrl $ PGSHIFT,%ecx # for this many pte s 187 movl $ PG_V,%eax # having these bits set, 188 movl $_Sysmap-SYSTEM,%ebx # in the kernel page table, 189 # fill in kernel page table. 1901: movl %eax,0(%ebx) 191 addl $ NBPG,%eax # increment physical address 192 addl $4,%ebx # next pte 193 loop 1b 194 195/* temporary double map virt == real */ 196 197 movl $1024,%ecx # for this many pte s, 198 movl $ PG_V,%eax # having these bits set, 199 movl $_Forkmap-SYSTEM,%ebx # in the temporary page table, 200 # fill in kernel page table. 2011: movl %eax,0(%ebx) 202 addl $ NBPG,%eax # increment physical address 203 addl $4,%ebx # next pte 204 loop 1b 205 206/* map I/O memory map */ 207 208 movl $atpgs,%ecx # for this many pte s, 209 movl $(IOPHYSmem|PG_V),%eax # having these bits set, (perhaps URW?) 210 movl $_ATDevmem-SYSTEM,%ebx # in the temporary page table, 211 # fill in kernel page table. 2121: movl %eax,0(%ebx) 213 addl $ NBPG,%eax # increment physical address 214 addl $4,%ebx # next pte 215 loop 1b 216 217/*# map proc 0's page table*/ 218 movl $_Usrptmap-SYSTEM,%ebx # get pt map address 219 lea (0*NBPG)(%esi),%eax # physical address of pt in proc 0 220 orl $ PG_V,%eax # having these bits set, 221 movl %eax,0(%ebx) 222 223 /*# map proc 0's _u*/ 224 movl $ UPAGES,%ecx # for this many pte s, 225 lea (2*NBPG)(%esi),%eax # physical address of _u in proc 0 226 orl $ PG_V|PG_URKW,%eax # having these bits set, 227 lea (0*NBPG)(%esi),%ebx # physical address of stack pt in proc 0 228 addl $(UPTEOFF*4),%ebx 229 # fill in proc 0 stack page table. 2301: movl %eax,0(%ebx) 231 addl $ NBPG,%eax # increment physical address 232 addl $4,%ebx # next pte 233 loop 1b 234 235 /*# map proc 0's page directory*/ 236 lea (1*NBPG)(%esi),%eax # physical address of ptd in proc 0 237 movl %eax,%edi # remember ptd physical address 238 orl $ PG_V|PG_URKW,%eax # having these bits set, 239 lea (0*NBPG)(%esi),%ebx # physical address of stack pt in proc 0 240 addl $(UPTEOFF*4),%ebx 241 addl $(UPAGES*4),%ebx 242 movl %eax,0(%ebx) 243 244/* 245 * Construct a page table directory 246 * (of page directory elements - pde's) 247 */ 248 /* kernel pde's */ 249 movl $_Sysmap-SYSTEM,%eax # physical address of kernel page table 250 orl $ PG_V,%eax # pde entry is valid 251 movl $ Npdes,%ecx # for this many pde s, 252 movl %edi,%ebx # phys address of ptd in proc 0 253 addl $(SYSPDROFF*4), %ebx # offset of pde for kernel 2541: movl %eax,0(%ebx) 255 addl $ NBPG,%eax # increment physical address 256 addl $4,%ebx # next pde 257 loop 1b 258 # install a pde for temporary double map 259 movl $_Forkmap-SYSTEM,%eax # physical address of temp page table 260 orl $ PG_V,%eax # pde entry is valid 261 movl %edi,%ebx # phys address of ptd in proc 0 262 movl %eax,0(%ebx) # which is where temp maps! 263 # install a pde to map _u for proc 0 264 lea (0*NBPG)(%esi),%eax # physical address of pt in proc 0 265 orl $ PG_V,%eax # pde entry is valid 266 movl %edi,%ebx # phys address of ptd in proc 0 267 addl $(UPDROFF*4), %ebx # offset of pde for kernel 268 movl %eax,0(%ebx) # which is where _u maps! 269 270#ifdef bug 271 movl $_Sysmap-SYSTEM,%eax # physical address of kernel page table 272 movl $0x21,%ebx 273 shll $2,%ebx 274 addl %ebx,%eax 275 xorl %ebx,%ebx 276 movl %ebx,0(%eax) # un validate offending pte 277#endif 278 279 movl %edi,%eax # phys address of ptd in proc 0 280 # orl $0x80000000,%eax 281 movl %eax,%cr3 # load ptd addr into mmu 282 movl %cr0,%eax # get control word 283 orl $0x80000001,%eax # and let s page! 284 movl %eax,%cr0 # NOW! 285 286 pushl $begin # jump to high mem! 287 ret # jmp $begin does not work 288begin: 289 movl $_Sysbase,%eax # kernel stack just below system 290 movl %eax,%esp 291 xorl %eax,%eax # mark end of frames 292 movl %eax,%ebp 293 294 movl _Crtat,%eax # initialize Crt video ram address 295 subl $ IOPHYSmem,%eax 296 addl $_atdevbase,%eax 297 movl %eax,_Crtat 298 299 call _init386 # wire 386 chip for unix operation 300 301/* initialize (slightly) the pcb */ 302 movl $_u,%eax # proc0 u-area 303 movl $_usrpt,%ecx 304 movl %ecx,PCB_P0BR(%eax) # p0br: SVA of text/data user PT 305 xorl %ecx,%ecx 306 movl %ecx,PCB_P0LR(%eax) # p0lr: 0 (doesn t really exist) 307 movl $_usrpt+NBPG,%ecx # addr of end of PT 308 subl $ P1PAGES*4,%ecx # backwards size of P1 region 309 movl %ecx,PCB_P1BR(%eax) # p1br: P1PAGES from end of PT 310 movl $ P1PAGES-UPAGES,PCB_P1LR(%eax) # p1lr: vax style 311 movl $ CLSIZE,PCB_SZPT(%eax) # page table size 312 fninit 313 pushl $0x262 314 fldcw 0(%esp) 315 popl %eax 316#ifdef FPUNOTYET 317#endif 318 pushl %edi # cr3 319 movl %esi,%eax 320 addl $(UPAGES*NBPG)+NBPG+NBPG+NBPG,%eax 321 shrl $ PGSHIFT,%eax 322 pushl %eax # firstaddr 323 324 pushl $20 # install signal trampoline code 325 pushl $_u+PCB_SIGC 326 pushl $sigcode 327 call _bcopy 328 addl $12,%esp 329 330 call _main 331 332 .globl __ucodesel,__udatasel 333 movzwl __ucodesel,%eax 334 movzwl __udatasel,%ecx 335 # build outer stack frame 336 pushl %ecx # user ss 337 pushl $_u # user esp 338 pushl %eax # user cs 339 pushl $0 # user ip 340 movw %cx,%ds 341 movw %cx,%es 342 movw %ax,%fs # double map cs to fs 343 movw %cx,%gs # and ds to gs 344 lret # goto user! 345 346 .globl __exit 347__exit: 348 lidt xaxa # invalidate interrupt descriptor 349 movl $0,%esp # hardware "freeze" fault 350 ret 351xaxa: .long 0,0 352 353 .set exec,11 354 .set exit,1 355 .globl _icode 356 .globl _initflags 357 .globl _szicode 358/* gas fucks up offset -- */ 359#define LCALL(x,y) .byte 0x9a ; .long y; .word x 360/* 361 * Icode is copied out to process 1 to exec /etc/init. 362 * If the exec fails, process 1 exits. 363 */ 364_icode: 365 # pushl $argv-_icode 366 movl $argv,%eax 367 subl $_icode,%eax 368 pushl %eax 369 370 # pushl $init-_icode 371 movl $init,%eax 372 subl $_icode,%eax 373 pushl %eax 374 pushl %eax # dummy out rta 375 376 movl %esp,%ebp 377 movl $exec,%eax 378 LCALL(0x7,0x0) 379 pushl %eax 380 movl $exit,%eax 381 pushl %eax # dummy out rta 382 LCALL(0x7,0x0) 383 384init: .asciz "/etc/init" 385 .align 2 386_initflags: 387 .long 0 388argv: .long init-_icode 389 .long _initflags-_icode 390 .long 0 391_szicode: 392 .long _szicode-_icode 393sigcode: 394 movl 12(%esp),%eax # unsure if call will dec stack 1st 395 call %eax 396 xorl %eax,%eax # smaller movl $103,%eax 397 movb $103,%al # sigreturn() 398 LCALL(0x7,0) # enter kernel with args on stack 399 hlt # never gets here 400 401 402 .globl ___udivsi3 403___udivsi3: 404 movl 4(%esp),%eax 405 xorl %edx,%edx 406 divl 8(%esp) 407 ret 408 409 .globl ___divsi3 410___divsi3: 411 movl 4(%esp),%eax 412 xorl %edx,%edx 413 cltd 414 idivl 8(%esp) 415 ret 416 417 .globl _inb 418_inb: movl 4(%esp),%edx 419 subl %eax,%eax # clr eax 420 NOP 421 inb %dx,%al 422 NOP 423 ret 424 425 .globl _outb 426_outb: movl 4(%esp),%edx 427 movl 8(%esp),%eax 428 NOP 429 outb %al,%dx 430 NOP 431 ret 432 433 # 434 # bzero (base,cnt) 435 # 436 437 .globl _bzero 438 .globl _blkclr 439_bzero: 440_blkclr: 441 pushl %edi 442 movl 8(%esp),%edi 443 movl 12(%esp),%ecx 444 xorl %eax,%eax 445 shrl $2,%ecx 446 cld 447 rep 448 stosl 449 movl 12(%esp),%ecx 450 andl $3,%ecx 451 rep 452 stosb 453 popl %edi 454 ret 455 456 # 457 # bcopy (src,dst,cnt) 458 # NOTE: does not (yet) handle overlapped copies 459 # 460 461 .globl _bcopy 462_bcopy: 463 pushl %esi 464 pushl %edi 465 movl 12(%esp),%esi 466 movl 16(%esp),%edi 467 movl 20(%esp),%ecx 468 shrl $2,%ecx 469 cld 470 rep 471 movsl 472 movl 20(%esp),%ecx 473 andl $3,%ecx 474 rep 475 movsb 476 popl %edi 477 popl %esi 478 xorl %eax,%eax 479 ret 480 481 .globl _copyout 482_copyout: 483 movl $cpyflt,_nofault # in case we page/protection violate 484 pushl %esi 485 pushl %edi 486 movl 12(%esp),%esi 487 movl 16(%esp),%edi 488 movl 20(%esp),%ecx 489 shrl $2,%ecx 490 cld 491 rep 492 movsl 493 movl 20(%esp),%ecx 494 andl $3,%ecx 495 rep 496 movsb 497 popl %edi 498 popl %esi 499 xorl %eax,%eax 500 movl %eax,_nofault 501 ret 502 503 .globl _copyin 504_copyin: 505 movl $cpyflt,_nofault # in case we page/protection violate 506 pushl %esi 507 pushl %edi 508 movl 12(%esp),%esi 509 movl 16(%esp),%edi 510 movl 20(%esp),%ecx 511 shrl $2,%ecx 512 cld 513 rep 514 movsl 515 movl 20(%esp),%ecx 516 andl $3,%ecx 517 rep 518 movsb 519 popl %edi 520 popl %esi 521 xorl %eax,%eax 522 movl %eax,_nofault 523 ret 524 525cpyflt: popl %edi 526 popl %esi 527 xorl %eax,%eax 528 movl %eax,_nofault 529 movl $ EFAULT,%eax 530 ret 531 532 533 # insw(port,addr,cnt) 534 .globl _insw 535_insw: 536 pushl %edi 537 movw 8(%esp),%dx 538 movl 12(%esp),%edi 539 movl 16(%esp),%ecx 540 cld 541 NOP 542 .byte 0x66,0xf2,0x6d # rep insw 543 NOP 544 movl %edi,%eax 545 popl %edi 546 ret 547 548 # outsw(port,addr,cnt) 549 .globl _outsw 550_outsw: 551 pushl %esi 552 movw 8(%esp),%dx 553 movl 12(%esp),%esi 554 movl 16(%esp),%ecx 555 cld 556 NOP 557 .byte 0x66,0xf2,0x6f # rep outsw 558 NOP 559 movl %esi,%eax 560 popl %esi 561 ret 562 563 # lgdt(*gdt, ngdt) 564 .globl _lgdt 565 # .globl _gdt 566xxx: .word 31 567 .long 0 568_lgdt: 569 movl 4(%esp),%eax 570 movl %eax,xxx+2 571 movl 8(%esp),%eax 572 movw %ax,xxx 573 lgdt xxx 574 jmp 1f 575 NOP 5761: movw $0x10,%ax 577 movw %ax,%ds 578 movw %ax,%es 579 movw %ax,%ss 580 movl 0(%esp),%eax 581 pushl %eax 582 movl $8,4(%esp) 583 lret 584 585 # lidt(*idt, nidt) 586 .globl _lidt 587yyy: .word 255 588 .long 0 589_lidt: 590 movl 4(%esp),%eax 591 movl %eax,yyy+2 592 movl 8(%esp),%eax 593 movw %ax,yyy 594 lidt yyy 595 ret 596 597 # lldt(sel) 598 .globl _lldt 599_lldt: 600 movl 4(%esp),%eax 601 lldt %eax 602 ret 603 604 # ltr(sel) 605 .globl _ltr 606_ltr: 607 movl 4(%esp),%eax 608 ltr %eax 609 ret 610 611 # lcr3(cr3) 612 .globl _lcr3 613 .globl _load_cr3 614_load_cr3: 615_lcr3: 616 movl 4(%esp),%eax 617 # orl $0x80000000,%eax 618 movl %eax,%cr3 619 movl %cr3,%eax 620 ret 621 622 # lcr0(cr0) 623 .globl _lcr0 624_lcr0: 625 movl 4(%esp),%eax 626 movl %eax,%cr0 627 ret 628 629 # rcr0() 630 .globl _rcr0 631_rcr0: 632 movl %cr0,%eax 633 ret 634 635 # rcr2() 636 .globl _rcr2 637_rcr2: 638 movl %cr2,%eax 639 ret 640 641 # rcr3() 642 .globl _rcr3 643 .globl __cr3 644__cr3: 645_rcr3: 646 movl %cr3,%eax 647 ret 648 649 # ssdtosd(*ssdp,*sdp) 650 .globl _ssdtosd 651_ssdtosd: 652 pushl %ebx 653 movl 8(%esp),%ecx 654 movl 8(%ecx),%ebx 655 shll $16,%ebx 656 movl (%ecx),%edx 657 roll $16,%edx 658 movb %dh,%bl 659 movb %dl,%bh 660 rorl $8,%ebx 661 movl 4(%ecx),%eax 662 movw %ax,%dx 663 andl $0xf0000,%eax 664 orl %eax,%ebx 665 movl 12(%esp),%ecx 666 movl %edx,(%ecx) 667 movl %ebx,4(%ecx) 668 popl %ebx 669 ret 670 671/* 672 * {fu,su},{byte,word} 673 */ 674ALTENTRY(fuiword) 675ENTRY(fuword) 676 movl $fusufault,_nofault # in case we page/protection violate 677 movl 4(%esp),%edx 678 .byte 0x65 # use gs 679 movl 0(%edx),%eax 680 xorl %edx,%edx 681 movl %edx,_nofault 682 ret 683 684ENTRY(fusword) 685 movl $fusufault,_nofault # in case we page/protection violate 686 movl 4(%esp),%edx 687 .byte 0x65 # use gs 688 movzwl 0(%edx),%eax 689 xorl %edx,%edx 690 movl %edx,_nofault 691 ret 692 693ALTENTRY(fuibyte) 694ENTRY(fubyte) 695 movl $fusufault,_nofault # in case we page/protection violate 696 movl 4(%esp),%edx 697 .byte 0x65 # use gs 698 movzbl 0(%edx),%eax 699 xorl %edx,%edx 700 movl %edx,_nofault 701 ret 702 703fusufault: 704 xorl %eax,%eax 705 movl %eax,_nofault 706 decl %eax 707 ret 708 709ALTENTRY(suiword) 710ENTRY(suword) 711 movl $fusufault,_nofault # in case we page/protection violate 712 movl 4(%esp),%edx 713 movl 8(%esp),%eax 714 .byte 0x65 # use gs 715 movl %eax,0(%edx) 716 xorl %eax,%eax 717 movl %eax,_nofault 718 ret 719 720ENTRY(susword) 721 movl $fusufault,_nofault # in case we page/protection violate 722 movl 4(%esp),%edx 723 movl 8(%esp),%eax 724 .byte 0x65 # use gs 725 movw %ax,0(%edx) 726 xorl %eax,%eax 727 movl %eax,_nofault 728 ret 729 730ALTENTRY(suibyte) 731ENTRY(subyte) 732 movl $fusufault,_nofault # in case we page/protection violate 733 movl 4(%esp),%edx 734 movl 8(%esp),%eax 735 .byte 0x65 # use gs 736 movb %eax,0(%edx) 737 xorl %eax,%eax 738 movl %eax,_nofault 739 ret 740 741 ALTENTRY(savectx) 742 ENTRY(setjmp) 743 movl 4(%esp),%eax 744 movl %ebx, 0(%eax) # save ebx 745 movl %esp, 4(%eax) # save esp 746 movl %ebp, 8(%eax) # save ebp 747 movl %esi,12(%eax) # save esi 748 movl %edi,16(%eax) # save edi 749 movl (%esp),%edx # get rta 750 movl %edx,20(%eax) # save eip 751 xorl %eax,%eax # return (0); 752 ret 753 754 ENTRY(longjmp) 755 movl 4(%esp),%eax 756 movl 0(%eax),%ebx # restore ebx 757 movl 4(%eax),%esp # restore esp 758 movl 8(%eax),%ebp # restore ebp 759 movl 12(%eax),%esi # restore esi 760 movl 16(%eax),%edi # restore edi 761 movl 20(%eax),%edx # get rta 762 movl %edx,(%esp) # put in return frame 763 xorl %eax,%eax # return (1); 764 incl %eax 765 ret 766/* 767 * The following primitives manipulate the run queues. 768 * _whichqs tells which of the 32 queues _qs 769 * have processes in them. Setrq puts processes into queues, Remrq 770 * removes them from queues. The running process is on no queue, 771 * other processes are on a queue related to p->p_pri, divided by 4 772 * actually to shrink the 0-127 range of priorities into the 32 available 773 * queues. 774 */ 775 776 .globl _whichqs,_qs,_cnt,_panic 777 .comm _noproc,4 778 .comm _runrun,4 779 780/* 781 * Setrq(p) 782 * 783 * Call should be made at spl6(), and p->p_stat should be SRUN 784 */ 785ENTRY(setrq) 786 movl 4(%esp),%eax 787 cmpl $0,P_RLINK(%eax) # should not be on q already 788 je set1 789 pushl $set2 790 call _panic 791set1: 792 movzbl P_PRI(%eax),%edx 793 shrl $2,%edx 794 btsl %edx,_whichqs # set q full bit 795 shll $3,%edx 796 addl $_qs,%edx # locate q hdr 797 movl %edx,P_LINK(%eax) # link process on tail of q 798 movl P_RLINK(%edx),%ecx 799 movl %ecx,P_RLINK(%eax) 800 movl %eax,P_RLINK(%edx) 801 movl %eax,P_LINK(%ecx) 802 ret 803 804set2: .asciz "setrq" 805 806/* 807 * Remrq(p) 808 * 809 * Call should be made at spl6(). 810 */ 811ENTRY(remrq) 812 movl 4(%esp),%eax 813 movzbl P_PRI(%eax),%edx 814 shrl $2,%edx 815 btrl %edx,_whichqs # clear full bit, panic if clear already 816 jb rem1 817 pushl $rem3 818 call _panic 819rem1: 820 pushl %edx 821 movl P_LINK(%eax),%ecx # unlink process 822 movl P_RLINK(%eax),%edx 823 movl %edx,P_RLINK(%ecx) 824 movl P_RLINK(%eax),%ecx 825 movl P_LINK(%eax),%edx 826 movl %edx,P_LINK(%ecx) 827 popl %edx 828 movl $_qs,%ecx 829 shll $3,%edx 830 addl %edx,%ecx 831 cmpl P_LINK(%ecx),%ecx # q still has something? 832 je rem2 833 shrl $3,%edx # yes, set bit as still full 834 btsl %edx,_whichqs 835rem2: 836 movl $0,P_RLINK(%eax) # zap reverse link to indicate off list 837 ret 838 839rem3: .asciz "remrq" 840sw0: .asciz "swtch" 841sw01: .asciz "swtch1" 842sw02: .asciz "swtch2" 843 844/* 845 * When no processes are on the runq, Swtch branches to idle 846 * to wait for something to come ready. 847 */ 848 .globl Idle 849Idle: 850idle: 851 call _spl0 852 cmpl $0,_whichqs 853 jne sw1 854 hlt # wait for interrupt 855 jmp idle 856 857badsw: 858 pushl $sw0 859 call _panic 860 /*NOTREACHED*/ 861 862/* 863 * Swtch() 864 */ 865ENTRY(swtch) 866 movl $1,%eax 867 movl %eax,_noproc 868 incl _cnt+V_SWTCH 869sw1: 870 bsfl _whichqs,%eax # find a full q 871 jz idle # if none, idle 872swfnd: 873 # cli 874 btrl %eax,_whichqs # clear q full status 875 jnb sw1 # if it was clear, look for another 876 pushl %eax # save which one we are using 877 shll $3,%eax 878 addl $_qs,%eax # select q 879 pushl %eax 880 881 cmpl P_LINK(%eax),%eax # linked to self? (e.g. not on list) 882 je badsw # not possible 883 movl P_LINK(%eax),%ecx # unlink from front of process q 884 movl P_LINK(%ecx),%edx 885 movl %edx,P_LINK(%eax) 886 movl P_RLINK(%ecx),%eax 887 movl %eax,P_RLINK(%edx) 888 889 popl %eax 890 popl %edx 891 cmpl P_LINK(%ecx),%eax # q empty 892 je sw2 893 btsl %edx,_whichqs # nope, indicate full 894sw2: 895 movl $0,%eax 896 movl %eax,_noproc 897 movl %eax,_runrun 898 cmpl $0,P_WCHAN(%ecx) 899 jne badsw 900 cmpb $ SRUN,P_STAT(%ecx) 901 jne badsw 902 movl %eax,P_RLINK(%ecx) 903 # movl P_ADDR(%ecx),%edx 904 movl P_CR3(%ecx),%edx 905 906/* switch to new process. first, save context as needed */ 907 movl $_u,%ecx 908 909 movl (%esp),%eax # Hardware registers 910 movl %eax, PCB_EIP(%ecx) 911 movl %ebx, PCB_EBX(%ecx) 912 movl %esp, PCB_ESP(%ecx) 913 movl %ebp, PCB_EBP(%ecx) 914 movl %esi, PCB_ESI(%ecx) 915 movl %edi, PCB_EDI(%ecx) 916 917#ifdef FPUNOTYET 918#endif 919 920 movl _CMAP2,%eax # save temporary map PTE 921 movl %eax,PCB_CMAP2(%ecx) # in our context 922 923 924 # orl $0x80000000,%edx 925 movl %edx,%cr3 # context switch 926 927 movl $_u,%ecx 928 .globl __gsel_tss 929 movw __gsel_tss,%ax 930 # ltr %ax 931 932/* restore context */ 933 movl PCB_EBX(%ecx), %ebx 934 movl PCB_ESP(%ecx), %esp 935 movl PCB_EBP(%ecx), %ebp 936 movl PCB_ESI(%ecx), %esi 937 movl PCB_EDI(%ecx), %edi 938 movl PCB_EIP(%ecx), %eax 939 movl %eax, (%esp) 940 941#ifdef FPUNOTYET 942#endif 943 944 movl PCB_CMAP2(%ecx),%eax # get temporary map 945 movl %eax,_CMAP2 # reload temporary map PTE 946#ifdef FPUNOTYET 947#endif 948 cmpl $0,PCB_SSWAP(%ecx) # do an alternate return? 949 jne res3 # yes, go reload regs 950 call _spl0 951 # sti 952 ret 953res3: 954 xorl %eax,%eax # inline restore context 955 xchgl PCB_SSWAP(%ecx),%eax # addr of saved context, clear it 956 957 #pushal; pushl 20(%eax); pushl $l2; call _printf; popl %eax ; popl %eax; popal ; .data ; l2: .asciz "s %x\n" ; .text 958 959 movl 0(%eax),%ebx # restore ebx 960 movl 4(%eax),%esp # restore esp 961 movl 8(%eax),%ebp # restore ebp 962 movl 12(%eax),%esi # restore esi 963 movl 16(%eax),%edi # restore edi 964 movl 20(%eax),%edx # get rta 965 movl %edx,(%esp) # put in return frame 966 xorl %eax,%eax # return (1); 967 incl %eax 968 sti 969 ret 970 971/* 972 * Resume(p_addr) 973 * current just used to fillout u. tss so fork can fake a return to swtch 974 * [ all thats really needed is esp and eip ] 975 */ 976ENTRY(resume) 977 # movl 4(%esp),%ecx 978 movl $_u,%ecx 979 movl (%esp),%eax 980 movl %eax, PCB_EIP(%ecx) 981 movl %ebx, PCB_EBX(%ecx) 982 movl %esp, PCB_ESP(%ecx) 983 movl %ebp, PCB_EBP(%ecx) 984 movl %esi, PCB_ESI(%ecx) 985 movl %edi, PCB_EDI(%ecx) 986#ifdef FPUNOTYET 987#endif 988 movl $0,%eax 989 ret 990 991.data 992 .globl _cyloffset 993_cyloffset: .long 0 994 .globl _nofault 995_nofault: .long 0 996.text 997 # To be done: 998 .globl _addupc 999 .globl _astoff 1000 .globl _doadump 1001 .globl _inittodr 1002 .globl _physaddr 1003_addupc: 1004 .byte 0xcc 1005_astoff: 1006 ret 1007_doadump: 1008 .byte 0xcc 1009_physaddr: 1010 .byte 0xcc 1011 1012/* Paranoiaa - never happens, right? */ 1013 .globl _svfpsp,_rsfpsp 1014_svfpsp: 1015 popl %eax 1016 movl %esp,svesp 1017 movl %ebp,svebp 1018 pushl %eax 1019 ret 1020 1021_rsfpsp: 1022 popl %eax 1023 movl svesp,%esp 1024 movl svebp,%ebp 1025 pushl %eax 1026 ret 1027 1028svesp: .long 0 1029svebp: .long 0 1030