1/*- 2 * Copyright (c) 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * William Jolitz. 7 * 8 * %sccs.include.redist.c% 9 * 10 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 11 * from NetBSD: Id: locore.s,v 1.12 1993/05/27 16:44:13 cgd Exp 12 * 13 * @(#)locore.s 8.1 (Berkeley) 06/11/93 14 */ 15 16 17/* 18 * locore.s: 4BSD machine support for the Intel 386 19 * Preliminary version 20 * Written by William F. Jolitz, 386BSD Project 21 */ 22 23#include "assym.s" 24#include "machine/psl.h" 25#include "machine/pte.h" 26 27#include "errno.h" 28 29#include "machine/trap.h" 30 31#include "machine/specialreg.h" 32 33#ifdef cgd_notdef 34#include "machine/cputypes.h" 35#endif 36 37#define KDSEL 0x10 38 39/* 40 * Note: This version greatly munged to avoid various assembler errors 41 * that may be fixed in newer versions of gas. Perhaps newer versions 42 * will have more pleasant appearance. 43 */ 44 45 .set IDXSHIFT,10 46 .set SYSTEM,0xFE000000 # virtual address of system start 47 /*note: gas copys sign bit (e.g. arithmetic >>), can't do SYSTEM>>22! */ 48 .set SYSPDROFF,0x3F8 # Page dir index of System Base 49 50#define NOP inb $0x84, %al ; inb $0x84, %al 51#define ALIGN32 .align 2 /* 2^2 = 4 */ 52 53/* 54 * PTmap is recursive pagemap at top of virtual address space. 55 * Within PTmap, the page directory can be found (third indirection). 56 */ 57 .set PDRPDROFF,0x3F7 # Page dir index of Page dir 58 .globl _PTmap, _PTD, _PTDpde, _Sysmap 59 .set _PTmap,0xFDC00000 60 .set _PTD,0xFDFF7000 61 .set _Sysmap,0xFDFF8000 62 .set _PTDpde,0xFDFF7000+4*PDRPDROFF 63 64/* 65 * APTmap, APTD is the alternate recursive pagemap. 66 * It's used when modifying another process's page tables. 67 */ 68 .set APDRPDROFF,0x3FE # Page dir index of Page dir 69 .globl _APTmap, _APTD, _APTDpde 70 .set _APTmap,0xFF800000 71 .set _APTD,0xFFBFE000 72 .set _APTDpde,0xFDFF7000+4*APDRPDROFF 73 74/* 75 * Access to each processes kernel stack is via a region of 76 * per-process address space (at the beginning), immediatly above 77 * the user process stack. 78 */ 79 .set _kstack, USRSTACK 80 .globl _kstack 81 .set PPDROFF,0x3F6 82 .set PPTEOFF,0x400-UPAGES # 0x3FE 83 84#define ENTRY(name) \ 85 .globl _/**/name; _/**/name: 86#define ALTENTRY(name) \ 87 .globl _/**/name; _/**/name: 88 89/* 90 * Initialization 91 */ 92 .data 93 .globl _cpu,_cold,_boothowto,_bootdev,_cyloffset,_atdevbase,_atdevphys 94_cpu: .long 0 # are we 386, 386sx, or 486 95_cold: .long 1 # cold till we are not 96_atdevbase: .long 0 # location of start of iomem in virtual 97_atdevphys: .long 0 # location of device mapping ptes (phys) 98 99 .globl _IdlePTD, _KPTphys 100_IdlePTD: .long 0 101_KPTphys: .long 0 102 103 .space 512 104tmpstk: 105 .text 106 .globl start 107start: movw $0x1234,%ax 108 movw %ax,0x472 # warm boot 109 jmp 1f 110 .space 0x500 # skip over warm boot shit 111 112 /* 113 * pass parameters on stack (howto, bootdev, unit, cyloffset) 114 * note: 0(%esp) is return address of boot 115 * ( if we want to hold onto /boot, it's physical %esp up to _end) 116 */ 117 118 1: movl 4(%esp),%eax 119 movl %eax,_boothowto-SYSTEM 120 movl 8(%esp),%eax 121 movl %eax,_bootdev-SYSTEM 122 movl 12(%esp),%eax 123 movl %eax, _cyloffset-SYSTEM 124 125#ifdef cgd_notdef 126 /* find out our CPU type. */ 127 pushfl 128 popl %eax 129 movl %eax, %ecx 130 xorl $0x40000, %eax 131 pushl %eax 132 popfl 133 pushfl 134 popl %eax 135 xorl %ecx, %eax 136 shrl $18, %eax 137 andl $1, %eax 138 push %ecx 139 popfl 140 141 cmpl $0, %eax 142 jne 1f 143 movl $CPU_386, _cpu-SYSTEM 144 jmp 2f 1451: movl $CPU_486, _cpu-SYSTEM 1462: 147#endif 148 149#ifdef garbage 150 /* count up memory */ 151 152 xorl %eax,%eax # start with base memory at 0x0 153 #movl $ 0xA0000/NBPG,%ecx # look every 4K up to 640K 154 movl $ 0xA0,%ecx # look every 4K up to 640K 1551: movl 0(%eax),%ebx # save location to check 156 movl $0xa55a5aa5,0(%eax) # write test pattern 157 /* flush stupid cache here! (with bcopy (0,0,512*1024) ) */ 158 cmpl $0xa55a5aa5,0(%eax) # does not check yet for rollover 159 jne 2f 160 movl %ebx,0(%eax) # restore memory 161 addl $ NBPG,%eax 162 loop 1b 1632: shrl $12,%eax 164 movl %eax,_Maxmem-SYSTEM 165 166 movl $0x100000,%eax # next, talley remaining memory 167 #movl $((0xFFF000-0x100000)/NBPG),%ecx 168 movl $(0xFFF-0x100),%ecx 1691: movl 0(%eax),%ebx # save location to check 170 movl $0xa55a5aa5,0(%eax) # write test pattern 171 cmpl $0xa55a5aa5,0(%eax) # does not check yet for rollover 172 jne 2f 173 movl %ebx,0(%eax) # restore memory 174 addl $ NBPG,%eax 175 loop 1b 1762: shrl $12,%eax 177 movl %eax,_Maxmem-SYSTEM 178#endif 179 180/* find end of kernel image */ 181 movl $_end-SYSTEM,%ecx 182 addl $ NBPG-1,%ecx 183 andl $~(NBPG-1),%ecx 184 movl %ecx,%esi 185 186/* clear bss and memory for bootstrap pagetables. */ 187 movl $_edata-SYSTEM,%edi 188 subl %edi,%ecx 189 addl $(UPAGES+5)*NBPG,%ecx 190/* 191 * Virtual address space of kernel: 192 * 193 * text | data | bss | page dir | proc0 kernel stack | usr stk map | Sysmap 194 * 0 1 2 3 4 195 */ 196 xorl %eax,%eax # pattern 197 cld 198 rep 199 stosb 200 201 movl %esi,_IdlePTD-SYSTEM /*physical address of Idle Address space */ 202 movl $ tmpstk-SYSTEM,%esp # bootstrap stack end location 203 204#define fillkpt \ 2051: movl %eax,0(%ebx) ; \ 206 addl $ NBPG,%eax ; /* increment physical address */ \ 207 addl $4,%ebx ; /* next pte */ \ 208 loop 1b ; 209 210/* 211 * Map Kernel 212 * N.B. don't bother with making kernel text RO, as 386 213 * ignores R/W AND U/S bits on kernel access (only v works) ! 214 * 215 * First step - build page tables 216 */ 217 movl %esi,%ecx # this much memory, 218 shrl $ PGSHIFT,%ecx # for this many pte s 219 addl $ UPAGES+4,%ecx # including our early context 220 movl $ PG_V,%eax # having these bits set, 221 lea (4*NBPG)(%esi),%ebx # physical address of KPT in proc 0, 222 movl %ebx,_KPTphys-SYSTEM # in the kernel page table, 223 fillkpt 224 225/* map I/O memory map */ 226 227 movl $0x100-0xa0,%ecx # for this many pte s, 228 movl $(0xa0000|PG_V|PG_UW),%eax # having these bits set,(perhaps URW?) XXX 06 Aug 92 229 movl %ebx,_atdevphys-SYSTEM # remember phys addr of ptes 230 fillkpt 231 232 /* map proc 0's kernel stack into user page table page */ 233 234 movl $ UPAGES,%ecx # for this many pte s, 235 lea (1*NBPG)(%esi),%eax # physical address in proc 0 236 lea (SYSTEM)(%eax),%edx 237 movl %edx,_proc0paddr-SYSTEM # remember VA for 0th process init 238 orl $ PG_V|PG_URKW,%eax # having these bits set, 239 lea (3*NBPG)(%esi),%ebx # physical address of stack pt in proc 0 240 addl $(PPTEOFF*4),%ebx 241 fillkpt 242 243/* 244 * Construct a page table directory 245 * (of page directory elements - pde's) 246 */ 247 /* install a pde for temporary double map of bottom of VA */ 248 lea (4*NBPG)(%esi),%eax # physical address of kernel page table 249 orl $ PG_V|PG_UW,%eax # pde entry is valid XXX 06 Aug 92 250 movl %eax,(%esi) # which is where temp maps! 251 252 /* kernel pde's */ 253 movl $ 3,%ecx # for this many pde s, 254 lea (SYSPDROFF*4)(%esi), %ebx # offset of pde for kernel 255 fillkpt 256 257 /* install a pde recursively mapping page directory as a page table! */ 258 movl %esi,%eax # phys address of ptd in proc 0 259 orl $ PG_V|PG_UW,%eax # pde entry is valid XXX 06 Aug 92 260 movl %eax, PDRPDROFF*4(%esi) # which is where PTmap maps! 261 262 /* install a pde to map kernel stack for proc 0 */ 263 lea (3*NBPG)(%esi),%eax # physical address of pt in proc 0 264 orl $ PG_V,%eax # pde entry is valid 265 movl %eax,PPDROFF*4(%esi) # which is where kernel stack maps! 266 267 /* load base of page directory, and enable mapping */ 268 movl %esi,%eax # phys address of ptd in proc 0 269 orl $ I386_CR3PAT,%eax 270 movl %eax,%cr3 # load ptd addr into mmu 271 movl %cr0,%eax # get control word 272 orl $0x80000001,%eax # and let s page! 273 movl %eax,%cr0 # NOW! 274 275 pushl $begin # jump to high mem! 276 ret 277 278begin: /* now running relocated at SYSTEM where the system is linked to run */ 279 280 .globl _Crtat 281 movl _Crtat,%eax 282 subl $0xfe0a0000,%eax 283 movl _atdevphys,%edx # get pte PA 284 subl _KPTphys,%edx # remove base of ptes, now have phys offset 285 shll $ PGSHIFT-2,%edx # corresponding to virt offset 286 addl $ SYSTEM,%edx # add virtual base 287 movl %edx, _atdevbase 288 addl %eax,%edx 289 movl %edx,_Crtat 290 291 /* set up bootstrap stack */ 292 movl $ _kstack+UPAGES*NBPG-4*12,%esp # bootstrap stack end location 293 xorl %eax,%eax # mark end of frames 294 movl %eax,%ebp 295 movl _proc0paddr, %eax 296 movl %esi, PCB_CR3(%eax) 297 298 lea 7*NBPG(%esi),%esi # skip past stack. 299 pushl %esi 300 301 call _init386 # wire 386 chip for unix operation 302 303 movl $0,_PTD 304 call _main 305 popl %esi 306 307 .globl __ucodesel,__udatasel 308 movzwl __ucodesel,%eax 309 movzwl __udatasel,%ecx 310 # build outer stack frame 311 pushl %ecx # user ss 312 pushl $ USRSTACK # user esp 313 pushl %eax # user cs 314 pushl $0 # user ip 315 movw %cx,%ds 316 movw %cx,%es 317 movw %ax,%fs # double map cs to fs 318 movw %cx,%gs # and ds to gs 319 lret # goto user! 320 321 pushl $lretmsg1 /* "should never get here!" */ 322 call _panic 323lretmsg1: 324 .asciz "lret: toinit\n" 325 326 327 .set exec,59 328 .set exit,1 329 .globl _icode 330 .globl _szicode 331 332#define LCALL(x,y) .byte 0x9a ; .long y; .word x 333/* 334 * Icode is copied out to process 1 to exec /etc/init. 335 * If the exec fails, process 1 exits. 336 */ 337_icode: 338 # pushl $argv-_icode # gas fucks up again 339 movl $argv,%eax 340 subl $_icode,%eax 341 pushl %eax 342 343 # pushl $init-_icode 344 movl $init,%eax 345 subl $_icode,%eax 346 pushl %eax 347 pushl %eax # dummy out rta 348 349 movl %esp,%ebp 350 movl $exec,%eax 351 LCALL(0x7,0x0) 352 pushl %eax 353 movl $exit,%eax 354 pushl %eax # dummy out rta 355 LCALL(0x7,0x0) 356 357init: 358 .asciz "/sbin/init" 359 .align 2 360argv: 361 .long init+6-_icode # argv[0] = "init" ("/sbin/init" + 6) 362 .long eicode-_icode # argv[1] follows icode after copyout 363 .long 0 364eicode: 365 366_szicode: 367 .long _szicode-_icode 368 369 .globl _sigcode,_szsigcode 370_sigcode: 371 movl 12(%esp),%eax # unsure if call will dec stack 1st 372 call %eax 373 xorl %eax,%eax # smaller movl $103,%eax 374 movb $103,%al # sigreturn() 375 LCALL(0x7,0) # enter kernel with args on stack 376 hlt # never gets here 377 378_szsigcode: 379 .long _szsigcode-_sigcode 380 381 /* 382 * Support routines for GCC 383 */ 384 .globl ___udivsi3 385 ALIGN32 386___udivsi3: 387 movl 4(%esp),%eax 388 xorl %edx,%edx 389 divl 8(%esp) 390 ret 391 392 .globl ___divsi3 393 ALIGN32 394___divsi3: 395 movl 4(%esp),%eax 396 #xorl %edx,%edx /* not needed - cltd sign extends into %edx */ 397 cltd 398 idivl 8(%esp) 399 ret 400 401 /* 402 * I/O bus instructions via C 403 */ 404 .globl _inb 405 ALIGN32 406_inb: movl 4(%esp),%edx 407 subl %eax,%eax # clr eax 408 NOP 409 inb %dx,%al 410 ret 411 412 413 .globl _inw 414 ALIGN32 415_inw: movl 4(%esp),%edx 416 subl %eax,%eax # clr eax 417 NOP 418 inw %dx,%ax 419 ret 420 421 422 .globl _rtcin 423 ALIGN32 424_rtcin: movl 4(%esp),%eax 425 outb %al,$0x70 426 subl %eax,%eax # clr eax 427 inb $0x71,%al # Compaq SystemPro 428 ret 429 430 .globl _outb 431 ALIGN32 432_outb: movl 4(%esp),%edx 433 NOP 434 movl 8(%esp),%eax 435 outb %al,%dx 436 NOP 437 ret 438 439 .globl _outw 440 ALIGN32 441_outw: movl 4(%esp),%edx 442 NOP 443 movl 8(%esp),%eax 444 outw %ax,%dx 445 NOP 446 ret 447 448 /* 449 * void bzero(void *base, u_int cnt) 450 */ 451 452 .globl _bzero 453 ALIGN32 454_bzero: 455 pushl %edi 456 movl 8(%esp),%edi 457 movl 12(%esp),%ecx 458 xorl %eax,%eax 459 shrl $2,%ecx 460 cld 461 rep 462 stosl 463 movl 12(%esp),%ecx 464 andl $3,%ecx 465 rep 466 stosb 467 popl %edi 468 ret 469 470 /* 471 * fillw (pat,base,cnt) 472 */ 473 474 .globl _fillw 475 ALIGN32 476_fillw: 477 pushl %edi 478 movl 8(%esp),%eax 479 movl 12(%esp),%edi 480 movw %ax, %cx 481 rorl $16, %eax 482 movw %cx, %ax 483 cld 484 movl 16(%esp),%ecx 485 shrl %ecx 486 rep 487 stosl 488 movl 16(%esp),%ecx 489 andl $1, %ecx 490 rep 491 stosw 492 popl %edi 493 ret 494 495 .globl _bcopyb 496 ALIGN32 497_bcopyb: 498 pushl %esi 499 pushl %edi 500 movl 12(%esp),%esi 501 movl 16(%esp),%edi 502 movl 20(%esp),%ecx 503 cld 504 rep 505 movsb 506 popl %edi 507 popl %esi 508 xorl %eax,%eax 509 ret 510 511 /* 512 * (ov)bcopy (src,dst,cnt) 513 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800 514 */ 515 516 .globl _bcopy,_ovbcopy 517 ALIGN32 518_ovbcopy: 519_bcopy: 520 pushl %esi 521 pushl %edi 522 movl 12(%esp),%esi 523 movl 16(%esp),%edi 524 movl 20(%esp),%ecx 525 cmpl %esi,%edi /* potentially overlapping? */ 526 jnb 1f 527 cld /* nope, copy forwards. */ 528 shrl $2,%ecx /* copy by words */ 529 rep 530 movsl 531 movl 20(%esp),%ecx 532 andl $3,%ecx /* any bytes left? */ 533 rep 534 movsb 535 popl %edi 536 popl %esi 537 xorl %eax,%eax 538 ret 539 ALIGN32 5401: 541 addl %ecx,%edi /* copy backwards. */ 542 addl %ecx,%esi 543 std 544 andl $3,%ecx /* any fractional bytes? */ 545 decl %edi 546 decl %esi 547 rep 548 movsb 549 movl 20(%esp),%ecx /* copy remainder by words */ 550 shrl $2,%ecx 551 subl $3,%esi 552 subl $3,%edi 553 rep 554 movsl 555 popl %edi 556 popl %esi 557 xorl %eax,%eax 558 cld 559 ret 560 561#ifdef notdef 562 .globl _copyout 563 ALIGN32 564_copyout: 565 movl _curpcb, %eax 566 movl $cpyflt, PCB_ONFAULT(%eax) # in case we page/protection violate 567 pushl %esi 568 pushl %edi 569 pushl %ebx 570 movl 16(%esp), %esi 571 movl 20(%esp), %edi 572 movl 24(%esp), %ebx 573 574 /* first, check to see if "write fault" */ 5751: movl %edi, %eax 576#ifdef notyet 577 shrl $IDXSHIFT, %eax /* fetch pte associated with address */ 578 andb $0xfc, %al 579 movl _PTmap(%eax), %eax 580 581 andb $7, %al /* if we are the one case that won't trap... */ 582 cmpb $5, %al 583 jne 2f 584 /* ... then simulate the trap! */ 585 pushl %edi 586 call _trapwrite /* trapwrite(addr) */ 587 popl %edx 588 589 cmpl $0, %eax /* if not ok, return */ 590 jne cpyflt 591 /* otherwise, continue with reference */ 5922: 593 movl %edi, %eax /* calculate remainder this pass */ 594 andl $0xfffff000, %eax 595 movl $NBPG, %ecx 596 subl %eax, %ecx 597 cmpl %ecx, %ebx 598 jle 3f 599 movl %ebx, %ecx 6003: subl %ecx, %ebx 601 movl %ecx, %edx 602#else 603 movl %ebx, %ecx 604 movl %ebx, %edx 605#endif 606 607 shrl $2,%ecx /* movem */ 608 cld 609 rep 610 movsl 611 movl %edx, %ecx /* don't depend on ecx here! */ 612 andl $3, %ecx 613 rep 614 movsb 615 616#ifdef notyet 617 cmpl $0, %ebx 618 jl 1b 619#endif 620 621 popl %ebx 622 popl %edi 623 popl %esi 624 xorl %eax,%eax 625 movl _curpcb,%edx 626 movl %eax,PCB_ONFAULT(%edx) 627 ret 628 629 .globl _copyin 630 ALIGN32 631_copyin: 632 movl _curpcb,%eax 633 movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate 634 pushl %esi 635 pushl %edi 636 pushl %ebx 637 movl 12(%esp),%esi 638 movl 16(%esp),%edi 639 movl 20(%esp),%ecx 640 shrl $2,%ecx 641 cld 642 rep 643 movsl 644 movl 20(%esp),%ecx 645 andl $3,%ecx 646 rep 647 movsb 648 popl %ebx 649 popl %edi 650 popl %esi 651 xorl %eax,%eax 652 movl _curpcb,%edx 653 movl %eax,PCB_ONFAULT(%edx) 654 ret 655 656 ALIGN32 657cpyflt: 658 popl %ebx 659 popl %edi 660 popl %esi 661 movl _curpcb,%edx 662 movl $0,PCB_ONFAULT(%edx) 663 movl $ EFAULT,%eax 664 ret 665#else 666 .globl _copyout 667 ALIGN32 668_copyout: 669 movl _curpcb,%eax 670 movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate 671 pushl %esi 672 pushl %edi 673 movl 12(%esp),%esi 674 movl 16(%esp),%edi 675 movl 20(%esp),%ecx 676 shrl $2,%ecx 677 cld 678 rep 679 movsl 680 movl 20(%esp),%ecx 681 andl $3,%ecx 682 rep 683 movsb 684 popl %edi 685 popl %esi 686 xorl %eax,%eax 687 movl _curpcb,%edx 688 movl %eax,PCB_ONFAULT(%edx) 689 ret 690 691 .globl _copyin 692 ALIGN32 693_copyin: 694 movl _curpcb,%eax 695 movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate 696 pushl %esi 697 pushl %edi 698 movl 12(%esp),%esi 699 movl 16(%esp),%edi 700 movl 20(%esp),%ecx 701 shrl $2,%ecx 702 cld 703 rep 704 movsl 705 movl 20(%esp),%ecx 706 andl $3,%ecx 707 rep 708 movsb 709 popl %edi 710 popl %esi 711 xorl %eax,%eax 712 movl _curpcb,%edx 713 movl %eax,PCB_ONFAULT(%edx) 714 ret 715 716 ALIGN32 717cpyflt: popl %edi 718 popl %esi 719 movl _curpcb,%edx 720 movl $0,PCB_ONFAULT(%edx) 721 movl $ EFAULT,%eax 722 ret 723 724#endif 725 726 # insb(port,addr,cnt) 727 .globl _insb 728 ALIGN32 729_insb: 730 pushl %edi 731 movw 8(%esp),%dx 732 movl 12(%esp),%edi 733 movl 16(%esp),%ecx 734 cld 735 NOP 736 rep 737 insb 738 NOP 739 movl %edi,%eax 740 popl %edi 741 ret 742 743 # insw(port,addr,cnt) 744 .globl _insw 745 ALIGN32 746_insw: 747 pushl %edi 748 movw 8(%esp),%dx 749 movl 12(%esp),%edi 750 movl 16(%esp),%ecx 751 cld 752 NOP 753 .byte 0x66,0xf2,0x6d # rep insw 754 NOP 755 movl %edi,%eax 756 popl %edi 757 ret 758 759 # outsw(port,addr,cnt) 760 .globl _outsw 761 ALIGN32 762_outsw: 763 pushl %esi 764 movw 8(%esp),%dx 765 movl 12(%esp),%esi 766 movl 16(%esp),%ecx 767 cld 768 NOP 769 .byte 0x66,0xf2,0x6f # rep outsw 770 NOP 771 movl %esi,%eax 772 popl %esi 773 ret 774 775 # outsb(port,addr,cnt) 776 .globl _outsb 777 ALIGN32 778_outsb: 779 pushl %esi 780 movw 8(%esp),%dx 781 movl 12(%esp),%esi 782 movl 16(%esp),%ecx 783 cld 784 NOP 785 rep 786 outsb 787 NOP 788 movl %esi,%eax 789 popl %esi 790 ret 791 792 /* 793 * void lgdt(struct region_descriptor *rdp); 794 */ 795 .globl _lgdt 796 ALIGN32 797_lgdt: 798 /* reload the descriptor table */ 799 movl 4(%esp),%eax 800 lgdt (%eax) 801 /* flush the prefetch q */ 802 jmp 1f 803 nop 8041: 805 /* reload "stale" selectors */ 806 # movw $KDSEL,%ax 807 movw $0x10,%ax 808 movw %ax,%ds 809 movw %ax,%es 810 movw %ax,%ss 811 812 /* reload code selector by turning return into intersegmental return */ 813 movl 0(%esp),%eax 814 pushl %eax 815 # movl $KCSEL,4(%esp) 816 movl $8,4(%esp) 817 lret 818 819 /* 820 * void lidt(struct region_descriptor *rdp); 821 */ 822 .globl _lidt 823 ALIGN32 824_lidt: 825 movl 4(%esp),%eax 826 lidt (%eax) 827 ret 828 829 /* 830 * void lldt(u_short sel) 831 */ 832 .globl _lldt 833 ALIGN32 834_lldt: 835 lldt 4(%esp) 836 ret 837 838 /* 839 * void ltr(u_short sel) 840 */ 841 .globl _ltr 842 ALIGN32 843_ltr: 844 ltr 4(%esp) 845 ret 846 847 /* 848 * void lcr3(caddr_t cr3) 849 */ 850 .globl _lcr3 851 .globl _load_cr3 852 ALIGN32 853_load_cr3: 854_lcr3: 855 inb $0x84,%al # check wristwatch 856 movl 4(%esp),%eax 857 orl $ I386_CR3PAT,%eax 858 movl %eax,%cr3 859 inb $0x84,%al # check wristwatch 860 ret 861 862 # tlbflush() 863 .globl _tlbflush 864 ALIGN32 865_tlbflush: 866 inb $0x84,%al # check wristwatch 867 movl %cr3,%eax 868 orl $ I386_CR3PAT,%eax 869 movl %eax,%cr3 870 inb $0x84,%al # check wristwatch 871 ret 872 873 # lcr0(cr0) 874 .globl _lcr0,_load_cr0 875 ALIGN32 876_lcr0: 877_load_cr0: 878 movl 4(%esp),%eax 879 movl %eax,%cr0 880 ret 881 882 # rcr0() 883 .globl _rcr0 884 ALIGN32 885_rcr0: 886 movl %cr0,%eax 887 ret 888 889 # rcr2() 890 .globl _rcr2 891 ALIGN32 892_rcr2: 893 movl %cr2,%eax 894 ret 895 896 # rcr3() 897 .globl _rcr3 898 .globl __cr3 899 ALIGN32 900__cr3: 901_rcr3: 902 movl %cr3,%eax 903 ret 904 905 # ssdtosd(*ssdp,*sdp) 906 .globl _ssdtosd 907 ALIGN32 908_ssdtosd: 909 pushl %ebx 910 movl 8(%esp),%ecx 911 movl 8(%ecx),%ebx 912 shll $16,%ebx 913 movl (%ecx),%edx 914 roll $16,%edx 915 movb %dh,%bl 916 movb %dl,%bh 917 rorl $8,%ebx 918 movl 4(%ecx),%eax 919 movw %ax,%dx 920 andl $0xf0000,%eax 921 orl %eax,%ebx 922 movl 12(%esp),%ecx 923 movl %edx,(%ecx) 924 movl %ebx,4(%ecx) 925 popl %ebx 926 ret 927 928/* 929 * {fu,su},{byte,word} 930 */ 931 ALIGN32 932ALTENTRY(fuiword) 933ENTRY(fuword) 934 movl _curpcb,%ecx 935 movl $fusufault,PCB_ONFAULT(%ecx) 936 movl 4(%esp),%edx 937 .byte 0x65 # use gs 938 movl 0(%edx),%eax 939 movl $0,PCB_ONFAULT(%ecx) 940 ret 941 942 ALIGN32 943ENTRY(fusword) 944 movl _curpcb,%ecx 945 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate 946 movl 4(%esp),%edx 947 .byte 0x65 # use gs 948 movzwl 0(%edx),%eax 949 movl $0,PCB_ONFAULT(%ecx) 950 ret 951 952 ALIGN32 953ALTENTRY(fuibyte) 954ENTRY(fubyte) 955 movl _curpcb,%ecx 956 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate 957 movl 4(%esp),%edx 958 .byte 0x65 # use gs 959 movzbl 0(%edx),%eax 960 movl $0,PCB_ONFAULT(%ecx) 961 ret 962 963 ALIGN32 964fusufault: 965 movl _curpcb,%ecx 966 xorl %eax,%eax 967 movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate 968 decl %eax 969 ret 970 971 ALIGN32 972ALTENTRY(suiword) 973ENTRY(suword) 974 movl _curpcb,%ecx 975 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate 976 movl 4(%esp),%edx 977 movl 8(%esp),%eax 978 979#ifdef notdef 980 shrl $IDXSHIFT, %edx /* fetch pte associated with address */ 981 andb $0xfc, %dl 982 movl _PTmap(%edx), %edx 983 984 andb $7, %dl /* if we are the one case that won't trap... */ 985 cmpb $5 , %edx 986 jne 1f 987 /* ... then simulate the trap! */ 988 pushl %edi 989 call _trapwrite /* trapwrite(addr) */ 990 popl %edx 991 cmpl $0, %eax /* if not ok, return */ 992 jne fusufault 993 movl 8(%esp),%eax /* otherwise, continue with reference */ 9941: 995 movl 4(%esp),%edx 996#endif 997 .byte 0x65 # use gs 998 movl %eax,0(%edx) 999 xorl %eax,%eax 1000 movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate 1001 ret 1002 1003 ALIGN32 1004ENTRY(susword) 1005 movl _curpcb,%ecx 1006 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate 1007 movl 4(%esp),%edx 1008 movl 8(%esp),%eax 1009#ifdef notdef 1010shrl $IDXSHIFT, %edx /* calculate pte address */ 1011andb $0xfc, %dl 1012movl _PTmap(%edx), %edx 1013andb $7, %edx /* if we are the one case that won't trap... */ 1014cmpb $5 , %edx 1015jne 1f 1016/* ..., then simulate the trap! */ 1017 pushl %edi 1018 call _trapwrite /* trapwrite(addr) */ 1019 popl %edx 1020movl _curpcb, %ecx # restore trashed registers 1021cmpl $0, %eax /* if not ok, return */ 1022jne fusufault 1023movl 8(%esp),%eax 10241: movl 4(%esp),%edx 1025#endif 1026 .byte 0x65 # use gs 1027 movw %ax,0(%edx) 1028 xorl %eax,%eax 1029 movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate 1030 ret 1031 1032 ALIGN32 1033ALTENTRY(suibyte) 1034ENTRY(subyte) 1035 movl _curpcb,%ecx 1036 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate 1037 movl 4(%esp),%edx 1038 movl 8(%esp),%eax 1039#ifdef notdef 1040shrl $IDXSHIFT, %edx /* calculate pte address */ 1041andb $0xfc, %dl 1042movl _PTmap(%edx), %edx 1043andb $7, %edx /* if we are the one case that won't trap... */ 1044cmpb $5 , %edx 1045jne 1f 1046/* ..., then simulate the trap! */ 1047 pushl %edi 1048 call _trapwrite /* trapwrite(addr) */ 1049 popl %edx 1050movl _curpcb, %ecx # restore trashed registers 1051cmpl $0, %eax /* if not ok, return */ 1052jne fusufault 1053movl 8(%esp),%eax 10541: movl 4(%esp),%edx 1055#endif 1056 .byte 0x65 # use gs 1057 movb %eax,0(%edx) 1058 xorl %eax,%eax 1059 movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate 1060 ret 1061 1062 ALIGN32 1063 ENTRY(setjmp) 1064 movl 4(%esp),%eax 1065 movl %ebx, 0(%eax) # save ebx 1066 movl %esp, 4(%eax) # save esp 1067 movl %ebp, 8(%eax) # save ebp 1068 movl %esi,12(%eax) # save esi 1069 movl %edi,16(%eax) # save edi 1070 movl (%esp),%edx # get rta 1071 movl %edx,20(%eax) # save eip 1072 xorl %eax,%eax # return (0); 1073 ret 1074 1075 ALIGN32 1076 ENTRY(longjmp) 1077 movl 4(%esp),%eax 1078 movl 0(%eax),%ebx # restore ebx 1079 movl 4(%eax),%esp # restore esp 1080 movl 8(%eax),%ebp # restore ebp 1081 movl 12(%eax),%esi # restore esi 1082 movl 16(%eax),%edi # restore edi 1083 movl 20(%eax),%edx # get rta 1084 movl %edx,(%esp) # put in return frame 1085 xorl %eax,%eax # return (1); 1086 incl %eax 1087 ret 1088/* 1089 * The following primitives manipulate the run queues. 1090 * _whichqs tells which of the 32 queues _qs 1091 * have processes in them. Setrq puts processes into queues, Remrq 1092 * removes them from queues. The running process is on no queue, 1093 * other processes are on a queue related to p->p_pri, divided by 4 1094 * actually to shrink the 0-127 range of priorities into the 32 available 1095 * queues. 1096 */ 1097 1098 .globl _whichqs,_qs,_cnt,_panic 1099 .comm _noproc,4 1100 .comm _runrun,4 1101 1102/* 1103 * Setrq(p) 1104 * 1105 * Call should be made at spl6(), and p->p_stat should be SRUN 1106 */ 1107 ALIGN32 1108ENTRY(setrq) 1109 movl 4(%esp),%eax 1110 cmpl $0,P_RLINK(%eax) # should not be on q already 1111 je set1 1112 pushl $set2 1113 call _panic 1114set1: 1115 movzbl P_PRI(%eax),%edx 1116 shrl $2,%edx 1117 btsl %edx,_whichqs # set q full bit 1118 shll $3,%edx 1119 addl $_qs,%edx # locate q hdr 1120 movl %edx,P_LINK(%eax) # link process on tail of q 1121 movl P_RLINK(%edx),%ecx 1122 movl %ecx,P_RLINK(%eax) 1123 movl %eax,P_RLINK(%edx) 1124 movl %eax,P_LINK(%ecx) 1125 ret 1126 1127set2: .asciz "setrq" 1128 1129/* 1130 * Remrq(p) 1131 * 1132 * Call should be made at spl6(). 1133 */ 1134 ALIGN32 1135ENTRY(remrq) 1136 movl 4(%esp),%eax 1137 movzbl P_PRI(%eax),%edx 1138 shrl $2,%edx 1139 btrl %edx,_whichqs # clear full bit, panic if clear already 1140 jb rem1 1141 pushl $rem3 1142 call _panic 1143rem1: 1144 pushl %edx 1145 movl P_LINK(%eax),%ecx # unlink process 1146 movl P_RLINK(%eax),%edx 1147 movl %edx,P_RLINK(%ecx) 1148 movl P_RLINK(%eax),%ecx 1149 movl P_LINK(%eax),%edx 1150 movl %edx,P_LINK(%ecx) 1151 popl %edx 1152 movl $_qs,%ecx 1153 shll $3,%edx 1154 addl %edx,%ecx 1155 cmpl P_LINK(%ecx),%ecx # q still has something? 1156 je rem2 1157 shrl $3,%edx # yes, set bit as still full 1158 btsl %edx,_whichqs 1159rem2: 1160 movl $0,P_RLINK(%eax) # zap reverse link to indicate off list 1161 ret 1162 1163rem3: .asciz "remrq" 1164sw0: .asciz "swtch" 1165 1166/* 1167 * When no processes are on the runq, Swtch branches to idle 1168 * to wait for something to come ready. 1169 */ 1170 .globl Idle 1171 ALIGN32 1172Idle: 1173idle: 1174 call _spl0 1175 cmpl $0,_whichqs 1176 jne sw1 1177 hlt # wait for interrupt 1178 jmp idle 1179 1180 .align 4 /* ..so that profiling doesn't lump Idle with swtch().. */ 1181badsw: 1182 pushl $sw0 1183 call _panic 1184 /*NOTREACHED*/ 1185 1186/* 1187 * Swtch() 1188 */ 1189 ALIGN32 1190ENTRY(swtch) 1191 1192 incl _cnt+V_SWTCH 1193 1194 /* switch to new process. first, save context as needed */ 1195 1196 movl _curproc,%ecx 1197 1198 /* if no process to save, don't bother */ 1199 cmpl $0,%ecx 1200 je sw1 1201 1202 movl P_ADDR(%ecx),%ecx 1203 1204 1205 movl (%esp),%eax # Hardware registers 1206 movl %eax, PCB_EIP(%ecx) 1207 movl %ebx, PCB_EBX(%ecx) 1208 movl %esp, PCB_ESP(%ecx) 1209 movl %ebp, PCB_EBP(%ecx) 1210 movl %esi, PCB_ESI(%ecx) 1211 movl %edi, PCB_EDI(%ecx) 1212 1213#ifdef NPX 1214 /* have we used fp, and need a save? */ 1215 mov _curproc,%eax 1216 cmp %eax,_npxproc 1217 jne 1f 1218 pushl %ecx /* h/w bugs make saving complicated */ 1219 leal PCB_SAVEFPU(%ecx),%eax 1220 pushl %eax 1221 call _npxsave /* do it in a big C function */ 1222 popl %eax 1223 popl %ecx 12241: 1225#endif 1226 1227 movl _CMAP2,%eax # save temporary map PTE 1228 movl %eax,PCB_CMAP2(%ecx) # in our context 1229 movl $0,_curproc # out of process 1230 1231 # movw _cpl, %ax 1232 # movw %ax, PCB_IML(%ecx) # save ipl 1233 1234 /* save is done, now choose a new process or idle */ 1235sw1: 1236 movl _whichqs,%edi 12372: 1238 cli 1239 bsfl %edi,%eax # find a full q 1240 jz idle # if none, idle 1241 # XX update whichqs? 1242swfnd: 1243 btrl %eax,%edi # clear q full status 1244 jnb 2b # if it was clear, look for another 1245 movl %eax,%ebx # save which one we are using 1246 1247 shll $3,%eax 1248 addl $_qs,%eax # select q 1249 movl %eax,%esi 1250 1251#ifdef DIAGNOSTIC 1252 cmpl P_LINK(%eax),%eax # linked to self? (e.g. not on list) 1253 je badsw # not possible 1254#endif 1255 1256 movl P_LINK(%eax),%ecx # unlink from front of process q 1257 movl P_LINK(%ecx),%edx 1258 movl %edx,P_LINK(%eax) 1259 movl P_RLINK(%ecx),%eax 1260 movl %eax,P_RLINK(%edx) 1261 1262 cmpl P_LINK(%ecx),%esi # q empty 1263 je 3f 1264 btsl %ebx,%edi # nope, set to indicate full 12653: 1266 movl %edi,_whichqs # update q status 1267 1268 movl $0,%eax 1269 movl %eax,_want_resched 1270 1271#ifdef DIAGNOSTIC 1272 cmpl %eax,P_WCHAN(%ecx) 1273 jne badsw 1274 cmpb $ SRUN,P_STAT(%ecx) 1275 jne badsw 1276#endif 1277 1278 movl %eax,P_RLINK(%ecx) /* isolate process to run */ 1279 movl P_ADDR(%ecx),%edx 1280 movl PCB_CR3(%edx),%ebx 1281 1282 /* switch address space */ 1283 movl %ebx,%cr3 1284 1285 /* restore context */ 1286 movl PCB_EBX(%edx), %ebx 1287 movl PCB_ESP(%edx), %esp 1288 movl PCB_EBP(%edx), %ebp 1289 movl PCB_ESI(%edx), %esi 1290 movl PCB_EDI(%edx), %edi 1291 movl PCB_EIP(%edx), %eax 1292 movl %eax, (%esp) 1293 1294 movl PCB_CMAP2(%edx),%eax # get temporary map 1295 movl %eax,_CMAP2 # reload temporary map PTE 1296 1297 movl %ecx,_curproc # into next process 1298 movl %edx,_curpcb 1299 1300 /* pushl PCB_IML(%edx) 1301 call _splx 1302 popl %eax*/ 1303 1304 movl %edx,%eax # return (1); 1305 ret 1306 1307 .globl _mvesp 1308 ALIGN32 1309_mvesp: movl %esp,%eax 1310 ret 1311/* 1312 * struct proc *swtch_to_inactive(p) ; struct proc *p; 1313 * 1314 * At exit of a process, move off the address space of the 1315 * process and onto a "safe" one. Then, on a temporary stack 1316 * return and run code that disposes of the old state. 1317 * Since this code requires a parameter from the "old" stack, 1318 * pass it back as a return value. 1319 */ 1320 ALIGN32 1321ENTRY(swtch_to_inactive) 1322 popl %edx # old pc 1323 popl %eax # arg, our return value 1324 movl _IdlePTD,%ecx 1325 movl %ecx,%cr3 # good bye address space 1326 #write buffer? 1327 movl $tmpstk-4,%esp # temporary stack, compensated for call 1328 jmp %edx # return, execute remainder of cleanup 1329 1330/* 1331 * savectx(pcb, altreturn) 1332 * Update pcb, saving current processor state and arranging 1333 * for alternate return ala longjmp in swtch if altreturn is true. 1334 */ 1335 ALIGN32 1336ENTRY(savectx) 1337 movl 4(%esp), %ecx 1338 movw _cpl, %ax 1339 movw %ax, PCB_IML(%ecx) 1340 movl (%esp), %eax 1341 movl %eax, PCB_EIP(%ecx) 1342 movl %ebx, PCB_EBX(%ecx) 1343 movl %esp, PCB_ESP(%ecx) 1344 movl %ebp, PCB_EBP(%ecx) 1345 movl %esi, PCB_ESI(%ecx) 1346 movl %edi, PCB_EDI(%ecx) 1347 1348#ifdef NPX 1349 /* 1350 * If npxproc == NULL, then the npx h/w state is irrelevant and the 1351 * state had better already be in the pcb. This is true for forks 1352 * but not for dumps (the old book-keeping with FP flags in the pcb 1353 * always lost for dumps because the dump pcb has 0 flags). 1354 * 1355 * If npxproc != NULL, then we have to save the npx h/w state to 1356 * npxproc's pcb and copy it to the requested pcb, or save to the 1357 * requested pcb and reload. Copying is easier because we would 1358 * have to handle h/w bugs for reloading. We used to lose the 1359 * parent's npx state for forks by forgetting to reload. 1360 */ 1361 mov _npxproc,%eax 1362 testl %eax,%eax 1363 je 1f 1364 1365 pushl %ecx 1366 movl P_ADDR(%eax),%eax 1367 leal PCB_SAVEFPU(%eax),%eax 1368 pushl %eax 1369 pushl %eax 1370 call _npxsave 1371 popl %eax 1372 popl %eax 1373 popl %ecx 1374 1375 pushl %ecx 1376 pushl $108+8*2 /* XXX h/w state size + padding */ 1377 leal PCB_SAVEFPU(%ecx),%ecx 1378 pushl %ecx 1379 pushl %eax 1380 call _bcopy 1381 addl $12,%esp 1382 popl %ecx 13831: 1384#endif 1385 1386 movl _CMAP2, %edx # save temporary map PTE 1387 movl %edx, PCB_CMAP2(%ecx) # in our context 1388 1389 cmpl $0, 8(%esp) 1390 je 1f 1391 movl %esp, %edx # relocate current sp relative to pcb 1392 subl $_kstack, %edx # (sp is relative to kstack): 1393 addl %edx, %ecx # pcb += sp - kstack; 1394 movl %eax, (%ecx) # write return pc at (relocated) sp@ 1395 # this mess deals with replicating register state gcc hides 1396 movl 12(%esp),%eax 1397 movl %eax,12(%ecx) 1398 movl 16(%esp),%eax 1399 movl %eax,16(%ecx) 1400 movl 20(%esp),%eax 1401 movl %eax,20(%ecx) 1402 movl 24(%esp),%eax 1403 movl %eax,24(%ecx) 14041: 1405 xorl %eax, %eax # return 0 1406 ret 1407 1408/* 1409 * addupc(int pc, struct uprof *up, int ticks): 1410 * update profiling information for the user process. 1411 */ 1412 1413 ALIGN32 1414ENTRY(addupc) 1415 pushl %ebp 1416 movl %esp,%ebp 1417 movl 12(%ebp),%edx /* up */ 1418 movl 8(%ebp),%eax /* pc */ 1419 1420 subl PR_OFF(%edx),%eax /* pc -= up->pr_off */ 1421 jl L1 /* if (pc < 0) return */ 1422 1423 shrl $1,%eax /* praddr = pc >> 1 */ 1424 imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */ 1425 shrl $15,%eax /* praddr = praddr << 15 */ 1426 andl $-2,%eax /* praddr &= ~1 */ 1427 1428 cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */ 1429 ja L1 1430 1431/* addl %eax,%eax /* praddr -> word offset */ 1432 addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */ 1433 movl 16(%ebp),%ecx /* ticks */ 1434 1435 movl _curpcb,%edx 1436 movl $proffault,PCB_ONFAULT(%edx) 1437 addl %ecx,(%eax) /* storage location += ticks */ 1438 movl $0,PCB_ONFAULT(%edx) 1439L1: 1440 leave 1441 ret 1442 1443 ALIGN32 1444proffault: 1445 /* if we get a fault, then kill profiling all together */ 1446 movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */ 1447 movl 12(%ebp),%ecx 1448 movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */ 1449 leave 1450 ret 1451 1452.data 1453 ALIGN32 1454 .globl _cyloffset, _curpcb 1455_cyloffset: .long 0 1456 .globl _proc0paddr 1457_proc0paddr: .long 0 1458LF: .asciz "swtch %x" 1459 1460.text 1461 # To be done: 1462 .globl _astoff 1463_astoff: 1464 ret 1465 1466#define IDTVEC(name) .align 4; .globl _X/**/name; _X/**/name: 1467#define PANIC(msg) xorl %eax,%eax; movl %eax,_waittime; pushl 1f; \ 1468 call _panic; 1: .asciz msg 1469#define PRINTF(n,msg) pushal ; nop ; pushl 1f; call _printf; MSG(msg) ; \ 1470 popl %eax ; popal 1471#define MSG(msg) .data; 1: .asciz msg; .text 1472 1473 .text 1474 1475/* 1476 * Trap and fault vector routines 1477 */ 1478#define TRAP(a) pushl $(a) ; jmp alltraps 1479#ifdef KGDB 1480#define BPTTRAP(a) pushl $(a) ; jmp bpttraps 1481#else 1482#define BPTTRAP(a) TRAP(a) 1483#endif 1484 1485IDTVEC(div) 1486 pushl $0; TRAP(T_DIVIDE) 1487IDTVEC(dbg) 1488 pushl $0; BPTTRAP(T_TRCTRAP) 1489IDTVEC(nmi) 1490 pushl $0; TRAP(T_NMI) 1491IDTVEC(bpt) 1492 pushl $0; BPTTRAP(T_BPTFLT) 1493IDTVEC(ofl) 1494 pushl $0; TRAP(T_OFLOW) 1495IDTVEC(bnd) 1496 pushl $0; TRAP(T_BOUND) 1497IDTVEC(ill) 1498 pushl $0; TRAP(T_PRIVINFLT) 1499IDTVEC(dna) 1500 pushl $0; TRAP(T_DNA) 1501IDTVEC(dble) 1502 TRAP(T_DOUBLEFLT) 1503 /*PANIC("Double Fault");*/ 1504IDTVEC(fpusegm) 1505 pushl $0; TRAP(T_FPOPFLT) 1506IDTVEC(tss) 1507 TRAP(T_TSSFLT) 1508 /*PANIC("TSS not valid");*/ 1509IDTVEC(missing) 1510 TRAP(T_SEGNPFLT) 1511IDTVEC(stk) 1512 TRAP(T_STKFLT) 1513IDTVEC(prot) 1514 TRAP(T_PROTFLT) 1515IDTVEC(page) 1516 TRAP(T_PAGEFLT) 1517IDTVEC(rsvd) 1518 pushl $0; TRAP(T_RESERVED) 1519IDTVEC(fpu) 1520#ifdef NPX 1521 /* 1522 * Handle like an interrupt so that we can call npxintr to clear the 1523 * error. It would be better to handle npx interrupts as traps but 1524 * this is difficult for nested interrupts. 1525 */ 1526 pushl $0 /* dummy error code */ 1527 pushl $T_ASTFLT 1528 pushal 1529 nop /* silly, the bug is for popal and it only 1530 * bites when the next instruction has a 1531 * complicated address mode */ 1532 pushl %ds 1533 pushl %es /* now the stack frame is a trap frame */ 1534 movl $KDSEL,%eax 1535 movl %ax,%ds 1536 movl %ax,%es 1537 pushl _cpl 1538 pushl $0 /* dummy unit to finish building intr frame */ 1539 incl _cnt+V_TRAP 1540 call _npxintr 1541 jmp doreti 1542#else 1543 pushl $0; TRAP(T_ARITHTRAP) 1544#endif 1545 /* 17 - 31 reserved for future exp */ 1546IDTVEC(rsvd0) 1547 pushl $0; TRAP(17) 1548IDTVEC(rsvd1) 1549 pushl $0; TRAP(18) 1550IDTVEC(rsvd2) 1551 pushl $0; TRAP(19) 1552IDTVEC(rsvd3) 1553 pushl $0; TRAP(20) 1554IDTVEC(rsvd4) 1555 pushl $0; TRAP(21) 1556IDTVEC(rsvd5) 1557 pushl $0; TRAP(22) 1558IDTVEC(rsvd6) 1559 pushl $0; TRAP(23) 1560IDTVEC(rsvd7) 1561 pushl $0; TRAP(24) 1562IDTVEC(rsvd8) 1563 pushl $0; TRAP(25) 1564IDTVEC(rsvd9) 1565 pushl $0; TRAP(26) 1566IDTVEC(rsvd10) 1567 pushl $0; TRAP(27) 1568IDTVEC(rsvd11) 1569 pushl $0; TRAP(28) 1570IDTVEC(rsvd12) 1571 pushl $0; TRAP(29) 1572IDTVEC(rsvd13) 1573 pushl $0; TRAP(30) 1574IDTVEC(rsvd14) 1575 pushl $0; TRAP(31) 1576 1577 ALIGN32 1578alltraps: 1579 pushal 1580 nop 1581 push %ds 1582 push %es 1583 # movw $KDSEL,%ax 1584 movw $0x10,%ax 1585 movw %ax,%ds 1586 movw %ax,%es 1587calltrap: 1588 incl _cnt+V_TRAP 1589 call _trap 1590 /* 1591 * Return through doreti to handle ASTs. Have to change trap frame 1592 * to interrupt frame. 1593 */ 1594 movl $T_ASTFLT,4+4+32(%esp) /* new trap type (err code not used) */ 1595 pushl _cpl 1596 pushl $0 /* dummy unit */ 1597 jmp doreti 1598 1599#ifdef KGDB 1600/* 1601 * This code checks for a kgdb trap, then falls through 1602 * to the regular trap code. 1603 */ 1604 ALIGN32 1605bpttraps: 1606 pushal 1607 nop 1608 push %es 1609 push %ds 1610 # movw $KDSEL,%ax 1611 movw $0x10,%ax 1612 movw %ax,%ds 1613 movw %ax,%es 1614 movzwl 52(%esp),%eax 1615 test $3,%eax 1616 jne calltrap 1617 call _kgdb_trap_glue 1618 jmp calltrap 1619#endif 1620 1621/* 1622 * Call gate entry for syscall 1623 */ 1624 1625 ALIGN32 1626IDTVEC(syscall) 1627 pushfl # only for stupid carry bit and more stupid wait3 cc kludge 1628 pushal # only need eax,ecx,edx - trap resaves others 1629 nop 1630 movl $KDSEL,%eax # switch to kernel segments 1631 movl %ax,%ds 1632 movl %ax,%es 1633 incl _cnt+V_SYSCALL # kml 3/25/93 1634 call _syscall 1635 /* 1636 * Return through doreti to handle ASTs. Have to change syscall frame 1637 * to interrupt frame. 1638 * 1639 * XXX - we should have set up the frame earlier to avoid the 1640 * following popal/pushal (not much can be done to avoid shuffling 1641 * the flags). Consistent frames would simplify things all over. 1642 */ 1643 movl 32+0(%esp),%eax /* old flags, shuffle to above cs:eip */ 1644 movl 32+4(%esp),%ebx /* `int' frame should have been ef, eip, cs */ 1645 movl 32+8(%esp),%ecx 1646 movl %ebx,32+0(%esp) 1647 movl %ecx,32+4(%esp) 1648 movl %eax,32+8(%esp) 1649 popal 1650 nop 1651 pushl $0 /* dummy error code */ 1652 pushl $T_ASTFLT 1653 pushal 1654 nop 1655 movl __udatasel,%eax /* switch back to user segments */ 1656 push %eax /* XXX - better to preserve originals? */ 1657 push %eax 1658 pushl _cpl 1659 pushl $0 1660 jmp doreti 1661 1662 ALIGN32 1663ENTRY(htonl) 1664ENTRY(ntohl) 1665 movl 4(%esp),%eax 1666 xchgb %al,%ah 1667 roll $16,%eax 1668 xchgb %al,%ah 1669 ret 1670 1671 ALIGN32 1672ENTRY(htons) 1673ENTRY(ntohs) 1674 movzwl 4(%esp),%eax 1675 xchgb %al,%ah 1676 ret 1677 1678#include "vector.s" 1679#include "i386/isa/icu.s" 1680