1/*- 2 * Copyright (c) 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * William Jolitz. 7 * 8 * %sccs.include.redist.c% 9 * 10 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 11 * from NetBSD: Id: locore.s,v 1.12 1993/05/27 16:44:13 cgd Exp 12 * 13 * @(#)locore.s 8.3 (Berkeley) 09/23/93 14 */ 15 16 17/* 18 * locore.s: 4BSD machine support for the Intel 386 19 * Preliminary version 20 * Written by William F. Jolitz, 386BSD Project 21 */ 22 23#include "assym.s" 24#include "machine/psl.h" 25#include "machine/pte.h" 26 27#include "errno.h" 28 29#include "machine/trap.h" 30 31#include "machine/specialreg.h" 32 33#ifdef cgd_notdef 34#include "machine/cputypes.h" 35#endif 36 37#define KDSEL 0x10 38 39/* 40 * Note: This version greatly munged to avoid various assembler errors 41 * that may be fixed in newer versions of gas. Perhaps newer versions 42 * will have more pleasant appearance. 43 */ 44 45 .set IDXSHIFT,10 46 .set SYSTEM,0xFE000000 # virtual address of system start 47 /*note: gas copys sign bit (e.g. arithmetic >>), can't do SYSTEM>>22! */ 48 .set SYSPDROFF,0x3F8 # Page dir index of System Base 49 50#define NOP inb $0x84, %al ; inb $0x84, %al 51#define ALIGN32 .align 2 /* 2^2 = 4 */ 52 53/* 54 * PTmap is recursive pagemap at top of virtual address space. 55 * Within PTmap, the page directory can be found (third indirection). 56 */ 57 .set PDRPDROFF,0x3F7 # Page dir index of Page dir 58 .globl _PTmap, _PTD, _PTDpde, _Sysmap 59 .set _PTmap,0xFDC00000 60 .set _PTD,0xFDFF7000 61 .set _Sysmap,0xFDFF8000 62 .set _PTDpde,0xFDFF7000+4*PDRPDROFF 63 64/* 65 * APTmap, APTD is the alternate recursive pagemap. 66 * It's used when modifying another process's page tables. 67 */ 68 .set APDRPDROFF,0x3FE # Page dir index of Page dir 69 .globl _APTmap, _APTD, _APTDpde 70 .set _APTmap,0xFF800000 71 .set _APTD,0xFFBFE000 72 .set _APTDpde,0xFDFF7000+4*APDRPDROFF 73 74/* 75 * Access to each processes kernel stack is via a region of 76 * per-process address space (at the beginning), immediatly above 77 * the user process stack. 78 */ 79 .set _kstack, USRSTACK 80 .globl _kstack 81 .set PPDROFF,0x3F6 82 .set PPTEOFF,0x400-UPAGES # 0x3FE 83 84#define ENTRY(name) \ 85 .globl _/**/name; _/**/name: 86#define ALTENTRY(name) \ 87 .globl _/**/name; _/**/name: 88 89/* 90 * Initialization 91 */ 92 .data 93 .globl _cpu,_cold,_boothowto,_bootdev,_cyloffset,_atdevbase,_atdevphys 94_cpu: .long 0 # are we 386, 386sx, or 486 95_cold: .long 1 # cold till we are not 96_atdevbase: .long 0 # location of start of iomem in virtual 97_atdevphys: .long 0 # location of device mapping ptes (phys) 98 99 .globl _IdlePTD, _KPTphys 100_IdlePTD: .long 0 101_KPTphys: .long 0 102 103 .space 512 104tmpstk: 105 .text 106 .globl start 107start: movw $0x1234,%ax 108 movw %ax,0x472 # warm boot 109 jmp 1f 110 .space 0x500 # skip over warm boot shit 111 112 /* 113 * pass parameters on stack (howto, bootdev, unit, cyloffset) 114 * note: 0(%esp) is return address of boot 115 * ( if we want to hold onto /boot, it's physical %esp up to _end) 116 */ 117 118 1: movl 4(%esp),%eax 119 movl %eax,_boothowto-SYSTEM 120 movl 8(%esp),%eax 121 movl %eax,_bootdev-SYSTEM 122 movl 12(%esp),%eax 123 movl %eax, _cyloffset-SYSTEM 124 125#ifdef cgd_notdef 126 /* find out our CPU type. */ 127 pushfl 128 popl %eax 129 movl %eax, %ecx 130 xorl $0x40000, %eax 131 pushl %eax 132 popfl 133 pushfl 134 popl %eax 135 xorl %ecx, %eax 136 shrl $18, %eax 137 andl $1, %eax 138 push %ecx 139 popfl 140 141 cmpl $0, %eax 142 jne 1f 143 movl $CPU_386, _cpu-SYSTEM 144 jmp 2f 1451: movl $CPU_486, _cpu-SYSTEM 1462: 147#endif 148 149#ifdef garbage 150 /* count up memory */ 151 152 xorl %eax,%eax # start with base memory at 0x0 153 #movl $ 0xA0000/NBPG,%ecx # look every 4K up to 640K 154 movl $ 0xA0,%ecx # look every 4K up to 640K 1551: movl 0(%eax),%ebx # save location to check 156 movl $0xa55a5aa5,0(%eax) # write test pattern 157 /* flush stupid cache here! (with bcopy (0,0,512*1024) ) */ 158 cmpl $0xa55a5aa5,0(%eax) # does not check yet for rollover 159 jne 2f 160 movl %ebx,0(%eax) # restore memory 161 addl $ NBPG,%eax 162 loop 1b 1632: shrl $12,%eax 164 movl %eax,_Maxmem-SYSTEM 165 166 movl $0x100000,%eax # next, talley remaining memory 167 #movl $((0xFFF000-0x100000)/NBPG),%ecx 168 movl $(0xFFF-0x100),%ecx 1691: movl 0(%eax),%ebx # save location to check 170 movl $0xa55a5aa5,0(%eax) # write test pattern 171 cmpl $0xa55a5aa5,0(%eax) # does not check yet for rollover 172 jne 2f 173 movl %ebx,0(%eax) # restore memory 174 addl $ NBPG,%eax 175 loop 1b 1762: shrl $12,%eax 177 movl %eax,_Maxmem-SYSTEM 178#endif 179 180/* find end of kernel image */ 181 movl $_end-SYSTEM,%ecx 182 addl $ NBPG-1,%ecx 183 andl $~(NBPG-1),%ecx 184 movl %ecx,%esi 185 186/* clear bss and memory for bootstrap pagetables. */ 187 movl $_edata-SYSTEM,%edi 188 subl %edi,%ecx 189 addl $(UPAGES+5)*NBPG,%ecx 190/* 191 * Virtual address space of kernel: 192 * 193 * text | data | bss | page dir | proc0 kernel stack | usr stk map | Sysmap 194 * 0 1 2 3 4 195 */ 196 xorl %eax,%eax # pattern 197 cld 198 rep 199 stosb 200 201 movl %esi,_IdlePTD-SYSTEM /*physical address of Idle Address space */ 202 movl $ tmpstk-SYSTEM,%esp # bootstrap stack end location 203 204#define fillkpt \ 2051: movl %eax,0(%ebx) ; \ 206 addl $ NBPG,%eax ; /* increment physical address */ \ 207 addl $4,%ebx ; /* next pte */ \ 208 loop 1b ; 209 210/* 211 * Map Kernel 212 * N.B. don't bother with making kernel text RO, as 386 213 * ignores R/W AND U/S bits on kernel access (only v works) ! 214 * 215 * First step - build page tables 216 */ 217 movl %esi,%ecx # this much memory, 218 shrl $ PGSHIFT,%ecx # for this many pte s 219 addl $ UPAGES+4,%ecx # including our early context 220 movl $ PG_V,%eax # having these bits set, 221 lea (4*NBPG)(%esi),%ebx # physical address of KPT in proc 0, 222 movl %ebx,_KPTphys-SYSTEM # in the kernel page table, 223 fillkpt 224 225/* map I/O memory map */ 226 227 movl $0x100-0xa0,%ecx # for this many pte s, 228 movl $(0xa0000|PG_V|PG_UW),%eax # having these bits set,(perhaps URW?) XXX 06 Aug 92 229 movl %ebx,_atdevphys-SYSTEM # remember phys addr of ptes 230 fillkpt 231 232 /* map proc 0's kernel stack into user page table page */ 233 234 movl $ UPAGES,%ecx # for this many pte s, 235 lea (1*NBPG)(%esi),%eax # physical address in proc 0 236 lea (SYSTEM)(%eax),%edx 237 movl %edx,_proc0paddr-SYSTEM # remember VA for 0th process init 238 orl $ PG_V|PG_URKW,%eax # having these bits set, 239 lea (3*NBPG)(%esi),%ebx # physical address of stack pt in proc 0 240 addl $(PPTEOFF*4),%ebx 241 fillkpt 242 243/* 244 * Construct a page table directory 245 * (of page directory elements - pde's) 246 */ 247 /* install a pde for temporary double map of bottom of VA */ 248 lea (4*NBPG)(%esi),%eax # physical address of kernel page table 249 orl $ PG_V|PG_UW,%eax # pde entry is valid XXX 06 Aug 92 250 movl %eax,(%esi) # which is where temp maps! 251 252 /* kernel pde's */ 253 movl $ 3,%ecx # for this many pde s, 254 lea (SYSPDROFF*4)(%esi), %ebx # offset of pde for kernel 255 fillkpt 256 257 /* install a pde recursively mapping page directory as a page table! */ 258 movl %esi,%eax # phys address of ptd in proc 0 259 orl $ PG_V|PG_UW,%eax # pde entry is valid XXX 06 Aug 92 260 movl %eax, PDRPDROFF*4(%esi) # which is where PTmap maps! 261 262 /* install a pde to map kernel stack for proc 0 */ 263 lea (3*NBPG)(%esi),%eax # physical address of pt in proc 0 264 orl $ PG_V,%eax # pde entry is valid 265 movl %eax,PPDROFF*4(%esi) # which is where kernel stack maps! 266 267 /* load base of page directory, and enable mapping */ 268 movl %esi,%eax # phys address of ptd in proc 0 269 orl $ I386_CR3PAT,%eax 270 movl %eax,%cr3 # load ptd addr into mmu 271 movl %cr0,%eax # get control word 272 orl $0x80000001,%eax # and let s page! 273 movl %eax,%cr0 # NOW! 274 275 pushl $begin # jump to high mem! 276 ret 277 278begin: /* now running relocated at SYSTEM where the system is linked to run */ 279 280 .globl _Crtat 281 movl _Crtat,%eax 282 subl $0xfe0a0000,%eax 283 movl _atdevphys,%edx # get pte PA 284 subl _KPTphys,%edx # remove base of ptes, now have phys offset 285 shll $ PGSHIFT-2,%edx # corresponding to virt offset 286 addl $ SYSTEM,%edx # add virtual base 287 movl %edx, _atdevbase 288 addl %eax,%edx 289 movl %edx,_Crtat 290 291 /* set up bootstrap stack */ 292 movl $ _kstack+UPAGES*NBPG-4*12,%esp # bootstrap stack end location 293 xorl %eax,%eax # mark end of frames 294 movl %eax,%ebp 295 movl _proc0paddr, %eax 296 movl %esi, PCB_CR3(%eax) 297 298 lea 7*NBPG(%esi),%esi # skip past stack. 299 pushl %esi 300 301 call _init386 # wire 386 chip for unix operation 302 303 movl $0,_PTD 304 call _main 305 popl %esi 306 307 .globl __ucodesel,__udatasel 308 movzwl __ucodesel,%eax 309 movzwl __udatasel,%ecx 310 # build outer stack frame 311 pushl %ecx # user ss 312 pushl $ USRSTACK # user esp 313 pushl %eax # user cs 314 pushl $0 # user ip 315 movw %cx,%ds 316 movw %cx,%es 317 movw %ax,%fs # double map cs to fs 318 movw %cx,%gs # and ds to gs 319 lret # goto user! 320 321 pushl $lretmsg1 /* "should never get here!" */ 322 call _panic 323lretmsg1: 324 .asciz "lret: toinit\n" 325 326 327 .set exec,59 328 .set exit,1 329 .globl _icode 330 .globl _szicode 331 332#define LCALL(x,y) .byte 0x9a ; .long y; .word x 333/* 334 * Icode is copied out to process 1 to exec /etc/init. 335 * If the exec fails, process 1 exits. 336 */ 337_icode: 338 # pushl $argv-_icode # gas fucks up again 339 movl $argv,%eax 340 subl $_icode,%eax 341 pushl %eax 342 343 # pushl $init-_icode 344 movl $init,%eax 345 subl $_icode,%eax 346 pushl %eax 347 pushl %eax # dummy out rta 348 349 movl %esp,%ebp 350 movl $exec,%eax 351 LCALL(0x7,0x0) 352 pushl %eax 353 movl $exit,%eax 354 pushl %eax # dummy out rta 355 LCALL(0x7,0x0) 356 357init: 358 .asciz "/sbin/init" 359 .align 2 360argv: 361 .long init+6-_icode # argv[0] = "init" ("/sbin/init" + 6) 362 .long eicode-_icode # argv[1] follows icode after copyout 363 .long 0 364eicode: 365 366_szicode: 367 .long _szicode-_icode 368 369 .globl _sigcode,_szsigcode 370_sigcode: 371 movl 12(%esp),%eax # unsure if call will dec stack 1st 372 call %eax 373 xorl %eax,%eax # smaller movl $103,%eax 374 movb $103,%al # sigreturn() 375 LCALL(0x7,0) # enter kernel with args on stack 376 hlt # never gets here 377 378_szsigcode: 379 .long _szsigcode-_sigcode 380 381 /* 382 * Support routines for GCC 383 */ 384 .globl ___udivsi3 385 ALIGN32 386___udivsi3: 387 movl 4(%esp),%eax 388 xorl %edx,%edx 389 divl 8(%esp) 390 ret 391 392 .globl ___divsi3 393 ALIGN32 394___divsi3: 395 movl 4(%esp),%eax 396 #xorl %edx,%edx /* not needed - cltd sign extends into %edx */ 397 cltd 398 idivl 8(%esp) 399 ret 400 401 /* 402 * I/O bus instructions via C 403 */ 404 .globl _inb 405 ALIGN32 406_inb: movl 4(%esp),%edx 407 subl %eax,%eax # clr eax 408 NOP 409 inb %dx,%al 410 ret 411 412 413 .globl _inw 414 ALIGN32 415_inw: movl 4(%esp),%edx 416 subl %eax,%eax # clr eax 417 NOP 418 inw %dx,%ax 419 ret 420 421 422 .globl _rtcin 423 ALIGN32 424_rtcin: movl 4(%esp),%eax 425 outb %al,$0x70 426 subl %eax,%eax # clr eax 427 inb $0x71,%al # Compaq SystemPro 428 ret 429 430 .globl _outb 431 ALIGN32 432_outb: movl 4(%esp),%edx 433 NOP 434 movl 8(%esp),%eax 435 outb %al,%dx 436 NOP 437 ret 438 439 .globl _outw 440 ALIGN32 441_outw: movl 4(%esp),%edx 442 NOP 443 movl 8(%esp),%eax 444 outw %ax,%dx 445 NOP 446 ret 447 448 /* 449 * void bzero(void *base, u_int cnt) 450 */ 451 452 .globl _bzero 453 ALIGN32 454_bzero: 455 pushl %edi 456 movl 8(%esp),%edi 457 movl 12(%esp),%ecx 458 xorl %eax,%eax 459 shrl $2,%ecx 460 cld 461 rep 462 stosl 463 movl 12(%esp),%ecx 464 andl $3,%ecx 465 rep 466 stosb 467 popl %edi 468 ret 469 470 /* 471 * fillw (pat,base,cnt) 472 */ 473 474 .globl _fillw 475 ALIGN32 476_fillw: 477 pushl %edi 478 movl 8(%esp),%eax 479 movl 12(%esp),%edi 480 movw %ax, %cx 481 rorl $16, %eax 482 movw %cx, %ax 483 cld 484 movl 16(%esp),%ecx 485 shrl %ecx 486 rep 487 stosl 488 movl 16(%esp),%ecx 489 andl $1, %ecx 490 rep 491 stosw 492 popl %edi 493 ret 494 495 .globl _bcopyb 496 ALIGN32 497_bcopyb: 498 pushl %esi 499 pushl %edi 500 movl 12(%esp),%esi 501 movl 16(%esp),%edi 502 movl 20(%esp),%ecx 503 cld 504 rep 505 movsb 506 popl %edi 507 popl %esi 508 xorl %eax,%eax 509 ret 510 511 /* 512 * (ov)bcopy (src,dst,cnt) 513 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800 514 */ 515 516 .globl _bcopy,_ovbcopy 517 ALIGN32 518_ovbcopy: 519_bcopy: 520 pushl %esi 521 pushl %edi 522 movl 12(%esp),%esi 523 movl 16(%esp),%edi 524 movl 20(%esp),%ecx 525 cmpl %esi,%edi /* potentially overlapping? */ 526 jnb 1f 527 cld /* nope, copy forwards. */ 528 shrl $2,%ecx /* copy by words */ 529 rep 530 movsl 531 movl 20(%esp),%ecx 532 andl $3,%ecx /* any bytes left? */ 533 rep 534 movsb 535 popl %edi 536 popl %esi 537 xorl %eax,%eax 538 ret 539 ALIGN32 5401: 541 addl %ecx,%edi /* copy backwards. */ 542 addl %ecx,%esi 543 std 544 andl $3,%ecx /* any fractional bytes? */ 545 decl %edi 546 decl %esi 547 rep 548 movsb 549 movl 20(%esp),%ecx /* copy remainder by words */ 550 shrl $2,%ecx 551 subl $3,%esi 552 subl $3,%edi 553 rep 554 movsl 555 popl %edi 556 popl %esi 557 xorl %eax,%eax 558 cld 559 ret 560 561#ifdef notdef 562 .globl _copyout 563 ALIGN32 564_copyout: 565 movl _curpcb, %eax 566 movl $cpyflt, PCB_ONFAULT(%eax) # in case we page/protection violate 567 pushl %esi 568 pushl %edi 569 pushl %ebx 570 movl 16(%esp), %esi 571 movl 20(%esp), %edi 572 movl 24(%esp), %ebx 573 574 /* first, check to see if "write fault" */ 5751: movl %edi, %eax 576#ifdef notyet 577 shrl $IDXSHIFT, %eax /* fetch pte associated with address */ 578 andb $0xfc, %al 579 movl _PTmap(%eax), %eax 580 581 andb $7, %al /* if we are the one case that won't trap... */ 582 cmpb $5, %al 583 jne 2f 584 /* ... then simulate the trap! */ 585 pushl %edi 586 call _trapwrite /* trapwrite(addr) */ 587 popl %edx 588 589 cmpl $0, %eax /* if not ok, return */ 590 jne cpyflt 591 /* otherwise, continue with reference */ 5922: 593 movl %edi, %eax /* calculate remainder this pass */ 594 andl $0xfffff000, %eax 595 movl $NBPG, %ecx 596 subl %eax, %ecx 597 cmpl %ecx, %ebx 598 jle 3f 599 movl %ebx, %ecx 6003: subl %ecx, %ebx 601 movl %ecx, %edx 602#else 603 movl %ebx, %ecx 604 movl %ebx, %edx 605#endif 606 607 shrl $2,%ecx /* movem */ 608 cld 609 rep 610 movsl 611 movl %edx, %ecx /* don't depend on ecx here! */ 612 andl $3, %ecx 613 rep 614 movsb 615 616#ifdef notyet 617 cmpl $0, %ebx 618 jl 1b 619#endif 620 621 popl %ebx 622 popl %edi 623 popl %esi 624 xorl %eax,%eax 625 movl _curpcb,%edx 626 movl %eax,PCB_ONFAULT(%edx) 627 ret 628 629 .globl _copyin 630 ALIGN32 631_copyin: 632 movl _curpcb,%eax 633 movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate 634 pushl %esi 635 pushl %edi 636 pushl %ebx 637 movl 12(%esp),%esi 638 movl 16(%esp),%edi 639 movl 20(%esp),%ecx 640 shrl $2,%ecx 641 cld 642 rep 643 movsl 644 movl 20(%esp),%ecx 645 andl $3,%ecx 646 rep 647 movsb 648 popl %ebx 649 popl %edi 650 popl %esi 651 xorl %eax,%eax 652 movl _curpcb,%edx 653 movl %eax,PCB_ONFAULT(%edx) 654 ret 655 656 ALIGN32 657cpyflt: 658 popl %ebx 659 popl %edi 660 popl %esi 661 movl _curpcb,%edx 662 movl $0,PCB_ONFAULT(%edx) 663 movl $ EFAULT,%eax 664 ret 665#else 666 .globl _copyout 667 ALIGN32 668_copyout: 669 movl _curpcb,%eax 670 movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate 671 pushl %esi 672 pushl %edi 673 movl 12(%esp),%esi 674 movl 16(%esp),%edi 675 movl 20(%esp),%ecx 676 shrl $2,%ecx 677 cld 678 rep 679 movsl 680 movl 20(%esp),%ecx 681 andl $3,%ecx 682 rep 683 movsb 684 popl %edi 685 popl %esi 686 xorl %eax,%eax 687 movl _curpcb,%edx 688 movl %eax,PCB_ONFAULT(%edx) 689 ret 690 691 .globl _copyin 692 ALIGN32 693_copyin: 694 movl _curpcb,%eax 695 movl $cpyflt,PCB_ONFAULT(%eax) # in case we page/protection violate 696 pushl %esi 697 pushl %edi 698 movl 12(%esp),%esi 699 movl 16(%esp),%edi 700 movl 20(%esp),%ecx 701 shrl $2,%ecx 702 cld 703 rep 704 movsl 705 movl 20(%esp),%ecx 706 andl $3,%ecx 707 rep 708 movsb 709 popl %edi 710 popl %esi 711 xorl %eax,%eax 712 movl _curpcb,%edx 713 movl %eax,PCB_ONFAULT(%edx) 714 ret 715 716 ALIGN32 717cpyflt: popl %edi 718 popl %esi 719 movl _curpcb,%edx 720 movl $0,PCB_ONFAULT(%edx) 721 movl $ EFAULT,%eax 722 ret 723 724#endif 725 726 # insb(port,addr,cnt) 727 .globl _insb 728 ALIGN32 729_insb: 730 pushl %edi 731 movw 8(%esp),%dx 732 movl 12(%esp),%edi 733 movl 16(%esp),%ecx 734 cld 735 NOP 736 rep 737 insb 738 NOP 739 movl %edi,%eax 740 popl %edi 741 ret 742 743 # insw(port,addr,cnt) 744 .globl _insw 745 ALIGN32 746_insw: 747 pushl %edi 748 movw 8(%esp),%dx 749 movl 12(%esp),%edi 750 movl 16(%esp),%ecx 751 cld 752 NOP 753 .byte 0x66,0xf2,0x6d # rep insw 754 NOP 755 movl %edi,%eax 756 popl %edi 757 ret 758 759 # outsw(port,addr,cnt) 760 .globl _outsw 761 ALIGN32 762_outsw: 763 pushl %esi 764 movw 8(%esp),%dx 765 movl 12(%esp),%esi 766 movl 16(%esp),%ecx 767 cld 768 NOP 769 .byte 0x66,0xf2,0x6f # rep outsw 770 NOP 771 movl %esi,%eax 772 popl %esi 773 ret 774 775 # outsb(port,addr,cnt) 776 .globl _outsb 777 ALIGN32 778_outsb: 779 pushl %esi 780 movw 8(%esp),%dx 781 movl 12(%esp),%esi 782 movl 16(%esp),%ecx 783 cld 784 NOP 785 rep 786 outsb 787 NOP 788 movl %esi,%eax 789 popl %esi 790 ret 791 792 /* 793 * void lgdt(struct region_descriptor *rdp); 794 */ 795 .globl _lgdt 796 ALIGN32 797_lgdt: 798 /* reload the descriptor table */ 799 movl 4(%esp),%eax 800 lgdt (%eax) 801 /* flush the prefetch q */ 802 jmp 1f 803 nop 8041: 805 /* reload "stale" selectors */ 806 # movw $KDSEL,%ax 807 movw $0x10,%ax 808 movw %ax,%ds 809 movw %ax,%es 810 movw %ax,%ss 811 812 /* reload code selector by turning return into intersegmental return */ 813 movl 0(%esp),%eax 814 pushl %eax 815 # movl $KCSEL,4(%esp) 816 movl $8,4(%esp) 817 lret 818 819 /* 820 * void lidt(struct region_descriptor *rdp); 821 */ 822 .globl _lidt 823 ALIGN32 824_lidt: 825 movl 4(%esp),%eax 826 lidt (%eax) 827 ret 828 829 /* 830 * void lldt(u_short sel) 831 */ 832 .globl _lldt 833 ALIGN32 834_lldt: 835 lldt 4(%esp) 836 ret 837 838 /* 839 * void ltr(u_short sel) 840 */ 841 .globl _ltr 842 ALIGN32 843_ltr: 844 ltr 4(%esp) 845 ret 846 847 /* 848 * void lcr3(caddr_t cr3) 849 */ 850 .globl _lcr3 851 .globl _load_cr3 852 ALIGN32 853_load_cr3: 854_lcr3: 855 inb $0x84,%al # check wristwatch 856 movl 4(%esp),%eax 857 orl $ I386_CR3PAT,%eax 858 movl %eax,%cr3 859 inb $0x84,%al # check wristwatch 860 ret 861 862 # tlbflush() 863 .globl _tlbflush 864 ALIGN32 865_tlbflush: 866 inb $0x84,%al # check wristwatch 867 movl %cr3,%eax 868 orl $ I386_CR3PAT,%eax 869 movl %eax,%cr3 870 inb $0x84,%al # check wristwatch 871 ret 872 873 # lcr0(cr0) 874 .globl _lcr0,_load_cr0 875 ALIGN32 876_lcr0: 877_load_cr0: 878 movl 4(%esp),%eax 879 movl %eax,%cr0 880 ret 881 882 # rcr0() 883 .globl _rcr0 884 ALIGN32 885_rcr0: 886 movl %cr0,%eax 887 ret 888 889 # rcr2() 890 .globl _rcr2 891 ALIGN32 892_rcr2: 893 movl %cr2,%eax 894 ret 895 896 # rcr3() 897 .globl _rcr3 898 .globl __cr3 899 ALIGN32 900__cr3: 901_rcr3: 902 movl %cr3,%eax 903 ret 904 905 # ssdtosd(*ssdp,*sdp) 906 .globl _ssdtosd 907 ALIGN32 908_ssdtosd: 909 pushl %ebx 910 movl 8(%esp),%ecx 911 movl 8(%ecx),%ebx 912 shll $16,%ebx 913 movl (%ecx),%edx 914 roll $16,%edx 915 movb %dh,%bl 916 movb %dl,%bh 917 rorl $8,%ebx 918 movl 4(%ecx),%eax 919 movw %ax,%dx 920 andl $0xf0000,%eax 921 orl %eax,%ebx 922 movl 12(%esp),%ecx 923 movl %edx,(%ecx) 924 movl %ebx,4(%ecx) 925 popl %ebx 926 ret 927 928/* 929 * {fu,su},{byte,word} 930 */ 931 ALIGN32 932ALTENTRY(fuiword) 933ENTRY(fuword) 934 movl _curpcb,%ecx 935 movl $fusufault,PCB_ONFAULT(%ecx) 936 movl 4(%esp),%edx 937 .byte 0x65 # use gs 938 movl 0(%edx),%eax 939 movl $0,PCB_ONFAULT(%ecx) 940 ret 941 942 ALIGN32 943ENTRY(fusword) 944 movl _curpcb,%ecx 945 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate 946 movl 4(%esp),%edx 947 .byte 0x65 # use gs 948 movzwl 0(%edx),%eax 949 movl $0,PCB_ONFAULT(%ecx) 950 ret 951 952 ALIGN32 953ALTENTRY(fuibyte) 954ENTRY(fubyte) 955 movl _curpcb,%ecx 956 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate 957 movl 4(%esp),%edx 958 .byte 0x65 # use gs 959 movzbl 0(%edx),%eax 960 movl $0,PCB_ONFAULT(%ecx) 961 ret 962 963 ALIGN32 964fusufault: 965 movl _curpcb,%ecx 966 xorl %eax,%eax 967 movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate 968 decl %eax 969 ret 970 971 ALIGN32 972ALTENTRY(suiword) 973ENTRY(suword) 974 movl _curpcb,%ecx 975 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate 976 movl 4(%esp),%edx 977 movl 8(%esp),%eax 978 979#ifdef notdef 980 shrl $IDXSHIFT, %edx /* fetch pte associated with address */ 981 andb $0xfc, %dl 982 movl _PTmap(%edx), %edx 983 984 andb $7, %dl /* if we are the one case that won't trap... */ 985 cmpb $5 , %edx 986 jne 1f 987 /* ... then simulate the trap! */ 988 pushl %edi 989 call _trapwrite /* trapwrite(addr) */ 990 popl %edx 991 cmpl $0, %eax /* if not ok, return */ 992 jne fusufault 993 movl 8(%esp),%eax /* otherwise, continue with reference */ 9941: 995 movl 4(%esp),%edx 996#endif 997 .byte 0x65 # use gs 998 movl %eax,0(%edx) 999 xorl %eax,%eax 1000 movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate 1001 ret 1002 1003 ALIGN32 1004ENTRY(susword) 1005 movl _curpcb,%ecx 1006 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate 1007 movl 4(%esp),%edx 1008 movl 8(%esp),%eax 1009#ifdef notdef 1010shrl $IDXSHIFT, %edx /* calculate pte address */ 1011andb $0xfc, %dl 1012movl _PTmap(%edx), %edx 1013andb $7, %edx /* if we are the one case that won't trap... */ 1014cmpb $5 , %edx 1015jne 1f 1016/* ..., then simulate the trap! */ 1017 pushl %edi 1018 call _trapwrite /* trapwrite(addr) */ 1019 popl %edx 1020movl _curpcb, %ecx # restore trashed registers 1021cmpl $0, %eax /* if not ok, return */ 1022jne fusufault 1023movl 8(%esp),%eax 10241: movl 4(%esp),%edx 1025#endif 1026 .byte 0x65 # use gs 1027 movw %ax,0(%edx) 1028 xorl %eax,%eax 1029 movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate 1030 ret 1031 1032 ALIGN32 1033ALTENTRY(suibyte) 1034ENTRY(subyte) 1035 movl _curpcb,%ecx 1036 movl $fusufault,PCB_ONFAULT(%ecx) #in case we page/protection violate 1037 movl 4(%esp),%edx 1038 movl 8(%esp),%eax 1039#ifdef notdef 1040shrl $IDXSHIFT, %edx /* calculate pte address */ 1041andb $0xfc, %dl 1042movl _PTmap(%edx), %edx 1043andb $7, %edx /* if we are the one case that won't trap... */ 1044cmpb $5 , %edx 1045jne 1f 1046/* ..., then simulate the trap! */ 1047 pushl %edi 1048 call _trapwrite /* trapwrite(addr) */ 1049 popl %edx 1050movl _curpcb, %ecx # restore trashed registers 1051cmpl $0, %eax /* if not ok, return */ 1052jne fusufault 1053movl 8(%esp),%eax 10541: movl 4(%esp),%edx 1055#endif 1056 .byte 0x65 # use gs 1057 movb %eax,0(%edx) 1058 xorl %eax,%eax 1059 movl %eax,PCB_ONFAULT(%ecx) #in case we page/protection violate 1060 ret 1061 1062 ALIGN32 1063 ENTRY(setjmp) 1064 movl 4(%esp),%eax 1065 movl %ebx, 0(%eax) # save ebx 1066 movl %esp, 4(%eax) # save esp 1067 movl %ebp, 8(%eax) # save ebp 1068 movl %esi,12(%eax) # save esi 1069 movl %edi,16(%eax) # save edi 1070 movl (%esp),%edx # get rta 1071 movl %edx,20(%eax) # save eip 1072 xorl %eax,%eax # return (0); 1073 ret 1074 1075 ALIGN32 1076 ENTRY(longjmp) 1077 movl 4(%esp),%eax 1078 movl 0(%eax),%ebx # restore ebx 1079 movl 4(%eax),%esp # restore esp 1080 movl 8(%eax),%ebp # restore ebp 1081 movl 12(%eax),%esi # restore esi 1082 movl 16(%eax),%edi # restore edi 1083 movl 20(%eax),%edx # get rta 1084 movl %edx,(%esp) # put in return frame 1085 xorl %eax,%eax # return (1); 1086 incl %eax 1087 ret 1088 1089/* 1090 * The following primitives manipulate the run queues. _whichqs tells which 1091 * of the 32 queues _qs have processes in them. Setrunqueue puts processes 1092 * into queues, Remrq removes them from queues. The running process is on 1093 * no queue, other processes are on a queue related to p->p_priority, divided 1094 * by 4 actually to shrink the 0-127 range of priorities into the 32 available 1095 * queues. 1096 */ 1097 .globl _whichqs,_qs,_cnt,_panic 1098 .comm _noproc,4 1099 .comm _runrun,4 1100 1101/* 1102 * Setrq(p) 1103 * 1104 * Call should be made at spl6(), and p->p_stat should be SRUN 1105 */ 1106 ALIGN32 1107ENTRY(setrunqueue) 1108 movl 4(%esp),%eax 1109 cmpl $0,P_BACK(%eax) # should not be on q already 1110 je set1 1111 pushl $set2 1112 call _panic 1113set1: 1114 movzbl P_PRIORITY(%eax),%edx 1115 shrl $2,%edx 1116 btsl %edx,_whichqs # set q full bit 1117 shll $3,%edx 1118 addl $_qs,%edx # locate q hdr 1119 movl %edx,P_FORW(%eax) # link process on tail of q 1120 movl P_BACK(%edx),%ecx 1121 movl %ecx,P_BACK(%eax) 1122 movl %eax,P_BACK(%edx) 1123 movl %eax,P_FORW(%ecx) 1124 ret 1125 1126set2: .asciz "setrunqueue" 1127 1128/* 1129 * Remrq(p) 1130 * 1131 * Call should be made at spl6(). 1132 */ 1133 ALIGN32 1134ENTRY(remrq) 1135 movl 4(%esp),%eax 1136 movzbl P_PRIORITY(%eax),%edx 1137 shrl $2,%edx 1138 btrl %edx,_whichqs # clear full bit, panic if clear already 1139 jb rem1 1140 pushl $rem3 1141 call _panic 1142rem1: 1143 pushl %edx 1144 movl P_FORW(%eax),%ecx # unlink process 1145 movl P_BACK(%eax),%edx 1146 movl %edx,P_BACK(%ecx) 1147 movl P_BACK(%eax),%ecx 1148 movl P_FORW(%eax),%edx 1149 movl %edx,P_FORW(%ecx) 1150 popl %edx 1151 movl $_qs,%ecx 1152 shll $3,%edx 1153 addl %edx,%ecx 1154 cmpl P_FORW(%ecx),%ecx # q still has something? 1155 je rem2 1156 shrl $3,%edx # yes, set bit as still full 1157 btsl %edx,_whichqs 1158rem2: 1159 movl $0,P_BACK(%eax) # zap reverse link to indicate off list 1160 ret 1161 1162rem3: .asciz "remrq" 1163sw0: .asciz "Xswitch" 1164 1165/* 1166 * When no processes are on the runq, Swtch branches to idle 1167 * to wait for something to come ready. 1168 */ 1169 .globl Idle 1170 ALIGN32 1171Idle: 1172idle: 1173 call _spl0 1174 cmpl $0,_whichqs 1175 jne sw1 1176 hlt # wait for interrupt 1177 jmp idle 1178 1179 .align 4 /* ..so that profiling doesn't lump Idle with Xswitch().. */ 1180badsw: 1181 pushl $sw0 1182 call _panic 1183 /*NOTREACHED*/ 1184 1185/* 1186 * Swtch() 1187 */ 1188 ALIGN32 1189ENTRY(Xswitch) 1190 1191 incl _cnt+V_SWTCH 1192 1193 /* switch to new process. first, save context as needed */ 1194 1195 movl _curproc,%ecx 1196 1197 /* if no process to save, don't bother */ 1198 cmpl $0,%ecx 1199 je sw1 1200 1201 movl P_ADDR(%ecx),%ecx 1202 1203 1204 movl (%esp),%eax # Hardware registers 1205 movl %eax, PCB_EIP(%ecx) 1206 movl %ebx, PCB_EBX(%ecx) 1207 movl %esp, PCB_ESP(%ecx) 1208 movl %ebp, PCB_EBP(%ecx) 1209 movl %esi, PCB_ESI(%ecx) 1210 movl %edi, PCB_EDI(%ecx) 1211 1212#ifdef NPX 1213 /* have we used fp, and need a save? */ 1214 mov _curproc,%eax 1215 cmp %eax,_npxproc 1216 jne 1f 1217 pushl %ecx /* h/w bugs make saving complicated */ 1218 leal PCB_SAVEFPU(%ecx),%eax 1219 pushl %eax 1220 call _npxsave /* do it in a big C function */ 1221 popl %eax 1222 popl %ecx 12231: 1224#endif 1225 1226 movl _CMAP2,%eax # save temporary map PTE 1227 movl %eax,PCB_CMAP2(%ecx) # in our context 1228 movl $0,_curproc # out of process 1229 1230 # movw _cpl, %ax 1231 # movw %ax, PCB_IML(%ecx) # save ipl 1232 1233 /* save is done, now choose a new process or idle */ 1234sw1: 1235 movl _whichqs,%edi 12362: 1237 cli 1238 bsfl %edi,%eax # find a full q 1239 jz idle # if none, idle 1240 # XX update whichqs? 1241swfnd: 1242 btrl %eax,%edi # clear q full status 1243 jnb 2b # if it was clear, look for another 1244 movl %eax,%ebx # save which one we are using 1245 1246 shll $3,%eax 1247 addl $_qs,%eax # select q 1248 movl %eax,%esi 1249 1250#ifdef DIAGNOSTIC 1251 cmpl P_FORW(%eax),%eax # linked to self? (e.g. not on list) 1252 je badsw # not possible 1253#endif 1254 1255 movl P_FORW(%eax),%ecx # unlink from front of process q 1256 movl P_FORW(%ecx),%edx 1257 movl %edx,P_FORW(%eax) 1258 movl P_BACK(%ecx),%eax 1259 movl %eax,P_BACK(%edx) 1260 1261 cmpl P_FORW(%ecx),%esi # q empty 1262 je 3f 1263 btsl %ebx,%edi # nope, set to indicate full 12643: 1265 movl %edi,_whichqs # update q status 1266 1267 movl $0,%eax 1268 movl %eax,_want_resched 1269 1270#ifdef DIAGNOSTIC 1271 cmpl %eax,P_WCHAN(%ecx) 1272 jne badsw 1273 cmpb $ SRUN,P_STAT(%ecx) 1274 jne badsw 1275#endif 1276 1277 movl %eax,P_BACK(%ecx) /* isolate process to run */ 1278 movl P_ADDR(%ecx),%edx 1279 movl PCB_CR3(%edx),%ebx 1280 1281 /* switch address space */ 1282 movl %ebx,%cr3 1283 1284 /* restore context */ 1285 movl PCB_EBX(%edx), %ebx 1286 movl PCB_ESP(%edx), %esp 1287 movl PCB_EBP(%edx), %ebp 1288 movl PCB_ESI(%edx), %esi 1289 movl PCB_EDI(%edx), %edi 1290 movl PCB_EIP(%edx), %eax 1291 movl %eax, (%esp) 1292 1293 movl PCB_CMAP2(%edx),%eax # get temporary map 1294 movl %eax,_CMAP2 # reload temporary map PTE 1295 1296 movl %ecx,_curproc # into next process 1297 movl %edx,_curpcb 1298 1299 /* pushl PCB_IML(%edx) 1300 call _splx 1301 popl %eax*/ 1302 1303 movl %edx,%eax # return (1); 1304 ret 1305 1306 .globl _mvesp 1307 ALIGN32 1308_mvesp: movl %esp,%eax 1309 ret 1310/* 1311 * struct proc *switch_to_inactive(p) ; struct proc *p; 1312 * 1313 * At exit of a process, move off the address space of the 1314 * process and onto a "safe" one. Then, on a temporary stack 1315 * return and run code that disposes of the old state. 1316 * Since this code requires a parameter from the "old" stack, 1317 * pass it back as a return value. 1318 */ 1319 ALIGN32 1320ENTRY(switch_to_inactive) 1321 popl %edx # old pc 1322 popl %eax # arg, our return value 1323 movl _IdlePTD,%ecx 1324 movl %ecx,%cr3 # good bye address space 1325 #write buffer? 1326 movl $tmpstk-4,%esp # temporary stack, compensated for call 1327 jmp %edx # return, execute remainder of cleanup 1328 1329/* 1330 * savectx(pcb, altreturn) 1331 * Update pcb, saving current processor state and arranging 1332 * for alternate return ala longjmp in Xswitch if altreturn is true. 1333 */ 1334 ALIGN32 1335ENTRY(savectx) 1336 movl 4(%esp), %ecx 1337 movw _cpl, %ax 1338 movw %ax, PCB_IML(%ecx) 1339 movl (%esp), %eax 1340 movl %eax, PCB_EIP(%ecx) 1341 movl %ebx, PCB_EBX(%ecx) 1342 movl %esp, PCB_ESP(%ecx) 1343 movl %ebp, PCB_EBP(%ecx) 1344 movl %esi, PCB_ESI(%ecx) 1345 movl %edi, PCB_EDI(%ecx) 1346 1347#ifdef NPX 1348 /* 1349 * If npxproc == NULL, then the npx h/w state is irrelevant and the 1350 * state had better already be in the pcb. This is true for forks 1351 * but not for dumps (the old book-keeping with FP flags in the pcb 1352 * always lost for dumps because the dump pcb has 0 flags). 1353 * 1354 * If npxproc != NULL, then we have to save the npx h/w state to 1355 * npxproc's pcb and copy it to the requested pcb, or save to the 1356 * requested pcb and reload. Copying is easier because we would 1357 * have to handle h/w bugs for reloading. We used to lose the 1358 * parent's npx state for forks by forgetting to reload. 1359 */ 1360 mov _npxproc,%eax 1361 testl %eax,%eax 1362 je 1f 1363 1364 pushl %ecx 1365 movl P_ADDR(%eax),%eax 1366 leal PCB_SAVEFPU(%eax),%eax 1367 pushl %eax 1368 pushl %eax 1369 call _npxsave 1370 popl %eax 1371 popl %eax 1372 popl %ecx 1373 1374 pushl %ecx 1375 pushl $108+8*2 /* XXX h/w state size + padding */ 1376 leal PCB_SAVEFPU(%ecx),%ecx 1377 pushl %ecx 1378 pushl %eax 1379 call _bcopy 1380 addl $12,%esp 1381 popl %ecx 13821: 1383#endif 1384 1385 movl _CMAP2, %edx # save temporary map PTE 1386 movl %edx, PCB_CMAP2(%ecx) # in our context 1387 1388 cmpl $0, 8(%esp) 1389 je 1f 1390 movl %esp, %edx # relocate current sp relative to pcb 1391 subl $_kstack, %edx # (sp is relative to kstack): 1392 addl %edx, %ecx # pcb += sp - kstack; 1393 movl %eax, (%ecx) # write return pc at (relocated) sp@ 1394 # this mess deals with replicating register state gcc hides 1395 movl 12(%esp),%eax 1396 movl %eax,12(%ecx) 1397 movl 16(%esp),%eax 1398 movl %eax,16(%ecx) 1399 movl 20(%esp),%eax 1400 movl %eax,20(%ecx) 1401 movl 24(%esp),%eax 1402 movl %eax,24(%ecx) 14031: 1404 xorl %eax, %eax # return 0 1405 ret 1406 1407/* 1408 * addupc(int pc, struct uprof *up, int ticks): 1409 * update profiling information for the user process. 1410 */ 1411 1412 ALIGN32 1413ENTRY(addupc) 1414 pushl %ebp 1415 movl %esp,%ebp 1416 movl 12(%ebp),%edx /* up */ 1417 movl 8(%ebp),%eax /* pc */ 1418 1419 subl PR_OFF(%edx),%eax /* pc -= up->pr_off */ 1420 jl L1 /* if (pc < 0) return */ 1421 1422 shrl $1,%eax /* praddr = pc >> 1 */ 1423 imull PR_SCALE(%edx),%eax /* praddr *= up->pr_scale */ 1424 shrl $15,%eax /* praddr = praddr << 15 */ 1425 andl $-2,%eax /* praddr &= ~1 */ 1426 1427 cmpl PR_SIZE(%edx),%eax /* if (praddr > up->pr_size) return */ 1428 ja L1 1429 1430/* addl %eax,%eax /* praddr -> word offset */ 1431 addl PR_BASE(%edx),%eax /* praddr += up-> pr_base */ 1432 movl 16(%ebp),%ecx /* ticks */ 1433 1434 movl _curpcb,%edx 1435 movl $proffault,PCB_ONFAULT(%edx) 1436 addl %ecx,(%eax) /* storage location += ticks */ 1437 movl $0,PCB_ONFAULT(%edx) 1438L1: 1439 leave 1440 ret 1441 1442 ALIGN32 1443proffault: 1444 /* if we get a fault, then kill profiling all together */ 1445 movl $0,PCB_ONFAULT(%edx) /* squish the fault handler */ 1446 movl 12(%ebp),%ecx 1447 movl $0,PR_SCALE(%ecx) /* up->pr_scale = 0 */ 1448 leave 1449 ret 1450 1451.data 1452 ALIGN32 1453 .globl _cyloffset, _curpcb 1454_cyloffset: .long 0 1455 .globl _proc0paddr 1456_proc0paddr: .long 0 1457LF: .asciz "Xswitch %x" 1458 1459.text 1460 # To be done: 1461 .globl _astoff 1462_astoff: 1463 ret 1464 1465#define IDTVEC(name) .align 4; .globl _X/**/name; _X/**/name: 1466#define PANIC(msg) xorl %eax,%eax; movl %eax,_waittime; pushl 1f; \ 1467 call _panic; 1: .asciz msg 1468#define PRINTF(n,msg) pushal ; nop ; pushl 1f; call _printf; MSG(msg) ; \ 1469 popl %eax ; popal 1470#define MSG(msg) .data; 1: .asciz msg; .text 1471 1472 .text 1473 1474/* 1475 * Trap and fault vector routines 1476 */ 1477#define TRAP(a) pushl $(a) ; jmp alltraps 1478#ifdef KGDB 1479#define BPTTRAP(a) pushl $(a) ; jmp bpttraps 1480#else 1481#define BPTTRAP(a) TRAP(a) 1482#endif 1483 1484IDTVEC(div) 1485 pushl $0; TRAP(T_DIVIDE) 1486IDTVEC(dbg) 1487 pushl $0; BPTTRAP(T_TRCTRAP) 1488IDTVEC(nmi) 1489 pushl $0; TRAP(T_NMI) 1490IDTVEC(bpt) 1491 pushl $0; BPTTRAP(T_BPTFLT) 1492IDTVEC(ofl) 1493 pushl $0; TRAP(T_OFLOW) 1494IDTVEC(bnd) 1495 pushl $0; TRAP(T_BOUND) 1496IDTVEC(ill) 1497 pushl $0; TRAP(T_PRIVINFLT) 1498IDTVEC(dna) 1499 pushl $0; TRAP(T_DNA) 1500IDTVEC(dble) 1501 TRAP(T_DOUBLEFLT) 1502 /*PANIC("Double Fault");*/ 1503IDTVEC(fpusegm) 1504 pushl $0; TRAP(T_FPOPFLT) 1505IDTVEC(tss) 1506 TRAP(T_TSSFLT) 1507 /*PANIC("TSS not valid");*/ 1508IDTVEC(missing) 1509 TRAP(T_SEGNPFLT) 1510IDTVEC(stk) 1511 TRAP(T_STKFLT) 1512IDTVEC(prot) 1513 TRAP(T_PROTFLT) 1514IDTVEC(page) 1515 TRAP(T_PAGEFLT) 1516IDTVEC(rsvd) 1517 pushl $0; TRAP(T_RESERVED) 1518IDTVEC(fpu) 1519#ifdef NPX 1520 /* 1521 * Handle like an interrupt so that we can call npxintr to clear the 1522 * error. It would be better to handle npx interrupts as traps but 1523 * this is difficult for nested interrupts. 1524 */ 1525 pushl $0 /* dummy error code */ 1526 pushl $T_ASTFLT 1527 pushal 1528 nop /* silly, the bug is for popal and it only 1529 * bites when the next instruction has a 1530 * complicated address mode */ 1531 pushl %ds 1532 pushl %es /* now the stack frame is a trap frame */ 1533 movl $KDSEL,%eax 1534 movl %ax,%ds 1535 movl %ax,%es 1536 pushl _cpl 1537 pushl $0 /* dummy unit to finish building intr frame */ 1538 incl _cnt+V_TRAP 1539 call _npxintr 1540 jmp doreti 1541#else 1542 pushl $0; TRAP(T_ARITHTRAP) 1543#endif 1544 /* 17 - 31 reserved for future exp */ 1545IDTVEC(rsvd0) 1546 pushl $0; TRAP(17) 1547IDTVEC(rsvd1) 1548 pushl $0; TRAP(18) 1549IDTVEC(rsvd2) 1550 pushl $0; TRAP(19) 1551IDTVEC(rsvd3) 1552 pushl $0; TRAP(20) 1553IDTVEC(rsvd4) 1554 pushl $0; TRAP(21) 1555IDTVEC(rsvd5) 1556 pushl $0; TRAP(22) 1557IDTVEC(rsvd6) 1558 pushl $0; TRAP(23) 1559IDTVEC(rsvd7) 1560 pushl $0; TRAP(24) 1561IDTVEC(rsvd8) 1562 pushl $0; TRAP(25) 1563IDTVEC(rsvd9) 1564 pushl $0; TRAP(26) 1565IDTVEC(rsvd10) 1566 pushl $0; TRAP(27) 1567IDTVEC(rsvd11) 1568 pushl $0; TRAP(28) 1569IDTVEC(rsvd12) 1570 pushl $0; TRAP(29) 1571IDTVEC(rsvd13) 1572 pushl $0; TRAP(30) 1573IDTVEC(rsvd14) 1574 pushl $0; TRAP(31) 1575 1576 ALIGN32 1577alltraps: 1578 pushal 1579 nop 1580 push %ds 1581 push %es 1582 # movw $KDSEL,%ax 1583 movw $0x10,%ax 1584 movw %ax,%ds 1585 movw %ax,%es 1586calltrap: 1587 incl _cnt+V_TRAP 1588 call _trap 1589 /* 1590 * Return through doreti to handle ASTs. Have to change trap frame 1591 * to interrupt frame. 1592 */ 1593 movl $T_ASTFLT,4+4+32(%esp) /* new trap type (err code not used) */ 1594 pushl _cpl 1595 pushl $0 /* dummy unit */ 1596 jmp doreti 1597 1598#ifdef KGDB 1599/* 1600 * This code checks for a kgdb trap, then falls through 1601 * to the regular trap code. 1602 */ 1603 ALIGN32 1604bpttraps: 1605 pushal 1606 nop 1607 push %es 1608 push %ds 1609 # movw $KDSEL,%ax 1610 movw $0x10,%ax 1611 movw %ax,%ds 1612 movw %ax,%es 1613 movzwl 52(%esp),%eax 1614 test $3,%eax 1615 jne calltrap 1616 call _kgdb_trap_glue 1617 jmp calltrap 1618#endif 1619 1620/* 1621 * Call gate entry for syscall 1622 */ 1623 1624 ALIGN32 1625IDTVEC(syscall) 1626 pushfl # only for stupid carry bit and more stupid wait3 cc kludge 1627 pushal # only need eax,ecx,edx - trap resaves others 1628 nop 1629 movl $KDSEL,%eax # switch to kernel segments 1630 movl %ax,%ds 1631 movl %ax,%es 1632 incl _cnt+V_SYSCALL # kml 3/25/93 1633 call _syscall 1634 /* 1635 * Return through doreti to handle ASTs. Have to change syscall frame 1636 * to interrupt frame. 1637 * 1638 * XXX - we should have set up the frame earlier to avoid the 1639 * following popal/pushal (not much can be done to avoid shuffling 1640 * the flags). Consistent frames would simplify things all over. 1641 */ 1642 movl 32+0(%esp),%eax /* old flags, shuffle to above cs:eip */ 1643 movl 32+4(%esp),%ebx /* `int' frame should have been ef, eip, cs */ 1644 movl 32+8(%esp),%ecx 1645 movl %ebx,32+0(%esp) 1646 movl %ecx,32+4(%esp) 1647 movl %eax,32+8(%esp) 1648 popal 1649 nop 1650 pushl $0 /* dummy error code */ 1651 pushl $T_ASTFLT 1652 pushal 1653 nop 1654 movl __udatasel,%eax /* switch back to user segments */ 1655 push %eax /* XXX - better to preserve originals? */ 1656 push %eax 1657 pushl _cpl 1658 pushl $0 1659 jmp doreti 1660 1661 ALIGN32 1662ENTRY(htonl) 1663ENTRY(ntohl) 1664 movl 4(%esp),%eax 1665 xchgb %al,%ah 1666 roll $16,%eax 1667 xchgb %al,%ah 1668 ret 1669 1670 ALIGN32 1671ENTRY(htons) 1672ENTRY(ntohs) 1673 movzwl 4(%esp),%eax 1674 xchgb %al,%ah 1675 ret 1676 1677#include "vector.s" 1678#include "i386/isa/icu.s" 1679