1/* $OpenBSD: locore.s,v 1.205 2024/06/06 00:36:46 bluhm Exp $ */ 2/* $NetBSD: locore.s,v 1.145 1996/05/03 19:41:19 christos Exp $ */ 3 4/*- 5 * Copyright (c) 1993, 1994, 1995 Charles M. Hannum. All rights reserved. 6 * Copyright (c) 1990 The Regents of the University of California. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * William Jolitz. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)locore.s 7.3 (Berkeley) 5/13/91 37 */ 38 39#include "npx.h" 40#include "assym.h" 41#include "apm.h" 42#include "lapic.h" 43#include "ksyms.h" 44 45#include <sys/errno.h> 46#include <sys/syscall.h> 47 48#include <machine/codepatch.h> 49#include <machine/cputypes.h> 50#include <machine/param.h> 51#include <machine/pte.h> 52#include <machine/segments.h> 53#include <machine/specialreg.h> 54#include <machine/trap.h> 55 56#include <dev/isa/isareg.h> 57 58#if NLAPIC > 0 59#include <machine/i82489reg.h> 60#endif 61 62/* 63 * As stac/clac SMAP instructions are 3 bytes, we want the fastest 64 * 3 byte nop sequence possible here. This will be replaced by 65 * stac/clac instructions if SMAP is detected after booting. 66 * 67 * Intel documents multi-byte NOP sequences as being available 68 * on all family 0x6 and 0xf processors (ie 686+) 69 * So use 3 of the single byte nops for compatibility 70 */ 71#define SMAP_NOP .byte 0x90, 0x90, 0x90 72#define SMAP_STAC CODEPATCH_START ;\ 73 SMAP_NOP ;\ 74 CODEPATCH_END(CPTAG_STAC) 75#define SMAP_CLAC CODEPATCH_START ;\ 76 SMAP_NOP ;\ 77 CODEPATCH_END(CPTAG_CLAC) 78 79/* 80 * override user-land alignment before including asm.h 81 */ 82 83#define ALIGN_DATA .align 4,0xcc 84#define ALIGN_TEXT .align 4,0x90 /* 4-byte boundaries, NOP-filled */ 85#define _ALIGN_TEXT ALIGN_TEXT 86#include <machine/asm.h> 87 88#define CPL lapic_tpr 89 90#define GET_CURPCB(reg) \ 91 movl CPUVAR(CURPCB), reg 92 93#define CHECK_ASTPENDING(treg) \ 94 movl CPUVAR(CURPROC),treg ; \ 95 cmpl $0, treg ; \ 96 je 1f ; \ 97 cmpl $0,P_MD_ASTPENDING(treg) ; \ 98 1: 99 100#define CLEAR_ASTPENDING(cpreg) \ 101 movl $0,P_MD_ASTPENDING(cpreg) 102 103/* 104 * These are used on interrupt or trap entry or exit. 105 */ 106#define INTR_COPY_FROM_TRAMP_STACK \ 107 movl TRF_SS(%ebp),%eax ; \ 108 movl %eax,IRF_SS(%esp) ; \ 109 movl TRF_ESP(%ebp),%eax ; \ 110 movl %eax,IRF_ESP(%esp) ; \ 111 movl TRF_EFLAGS(%ebp),%eax ; \ 112 movl %eax,IRF_EFLAGS(%esp) ; \ 113 movl TRF_CS(%ebp),%eax ; \ 114 movl %eax,IRF_CS(%esp) ; \ 115 movl TRF_EIP(%ebp),%eax ; \ 116 movl %eax,IRF_EIP(%esp) ; \ 117 movl TRF_ERR(%ebp),%eax ; \ 118 movl %eax,IRF_ERR(%esp) ; \ 119 movl TRF_TRAPNO(%ebp),%eax ; \ 120 movl %eax,IRF_TRAPNO(%esp) 121 122#define INTR_ENABLE_U_PLUS_K \ 123 movl $GSEL(GCPU_SEL, SEL_KPL),%eax ; \ 124 movw %ax,%fs ; \ 125 movl CPUVAR(KERN_CR3),%eax ; \ 126 testl %eax,%eax ; \ 127 jz 100f ; \ 128 movl %eax,%cr3 ; \ 129 100: 130 131#define INTRENTRY_LABEL(label) X##label##_untramp 132#define INTRENTRY(label) \ 133 /* we have an iretframe */ ; \ 134 testb $SEL_RPL,IRF_CS(%esp) ; \ 135 /* from kernel, stay on kernel stack, use iretframe */ ; \ 136 je INTRENTRY_LABEL(label) ; \ 137 /* entering from user space, map kernel */ ; \ 138 pushl %ebp ; \ 139 pushl %eax ; \ 140 pushl %fs ; \ 141 INTR_ENABLE_U_PLUS_K ; \ 142 jmp 99f ; \ 143 .text ; \ 144 .global INTRENTRY_LABEL(label) ; \ 145INTRENTRY_LABEL(label): /* from kernel */ ; \ 146 jmp 98f ; \ 147 /* from user space, build trampframe */ ; \ 14899: movl CPUVAR(KERN_ESP),%eax ; \ 149 pushl %eax ; \ 150 pushl $0xdeadbeef ; \ 151 movl %esp,%ebp ; \ 152 movl %eax,%esp ; \ 153 subl $SIZEOF_IRETFRAME,%esp ; \ 154 /* we have a trampframe, copy to iretframe on kernel stack */ ; \ 155 INTR_COPY_FROM_TRAMP_STACK ; \ 156 movl TRF_FS(%ebp),%eax ; \ 157 movw %ax,%fs ; \ 158 movl TRF_EAX(%ebp),%eax ; \ 159 movl TRF_EBP(%ebp),%ebp ; \ 16098: INTR_SAVE_ALL 161 162#define INTR_SAVE_ALL \ 163 cld ; \ 164 SMAP_CLAC ; \ 165 /* we have an iretframe, build trapframe */ ; \ 166 subl $44,%esp ; \ 167 movl %eax,TF_EAX(%esp) ; \ 168 /* the hardware puts err next to %eip, we move it elsewhere and */ ; \ 169 /* later put %ebp in this slot to make it look like a call frame */ ; \ 170 movl (TF_EIP - 4)(%esp),%eax ; \ 171 movl %eax,TF_ERR(%esp) ; \ 172 movl %ecx,TF_ECX(%esp) ; \ 173 movl %edx,TF_EDX(%esp) ; \ 174 movl %ebx,TF_EBX(%esp) ; \ 175 movl %ebp,TF_EBP(%esp) ; \ 176 leal TF_EBP(%esp),%ebp ; \ 177 movl %esi,TF_ESI(%esp) ; \ 178 movl %edi,TF_EDI(%esp) ; \ 179 movw %ds,TF_DS(%esp) ; \ 180 movw %es,TF_ES(%esp) ; \ 181 movw %gs,TF_GS(%esp) ; \ 182 movl $GSEL(GDATA_SEL, SEL_KPL),%eax ; \ 183 movw %ax,%ds ; \ 184 movw %ax,%es ; \ 185 xorl %eax,%eax ; /* $GSEL(GNULL_SEL, SEL_KPL) == 0 */ \ 186 movw %ax,%gs ; \ 187 movw %fs,TF_FS(%esp) ; \ 188 movl $GSEL(GCPU_SEL, SEL_KPL),%eax ; \ 189 movw %ax,%fs 190 191#define INTR_RESTORE_ALL \ 192 popl %fs ; \ 193 popl %gs ; \ 194 popl %es ; \ 195 popl %ds ; \ 196 popl %edi ; \ 197 popl %esi ; \ 198 addl $4,%esp /*err*/ ; \ 199 popl %ebx ; \ 200 popl %edx ; \ 201 popl %ecx ; \ 202 popl %eax ; \ 203 movl 4(%esp),%ebp 204 205#define INTRFASTEXIT \ 206 jmp intr_fast_exit 207 208#define INTR_FAKE_TRAP_PUSH_RPB 0xbadabada 209#define INTR_FAKE_TRAP_POP_RBP 0xbcbcbcbc 210 211/* 212 * PTmap is recursive pagemap at top of virtual address space. 213 * Within PTmap, the page directory can be found (third indirection). 214 */ 215 .globl PTmap, PTD 216 .set PTmap, (PDSLOT_PTE << PDSHIFT) 217 .set PTD, (PTmap + PDSLOT_PTE * NBPG) 218 219/* 220 * Initialization 221 */ 222 .data 223 224 .globl cpu_id, cpu_vendor 225 .globl cpu_brandstr 226 .globl cpuid_level 227 .globl cpu_miscinfo 228 .globl cpu_feature, cpu_ecxfeature 229 .globl ecpu_feature, ecpu_eaxfeature 230 .globl ecpu_ecxfeature 231 .globl cpu_cache_eax, cpu_cache_ebx 232 .globl cpu_cache_ecx, cpu_cache_edx 233 .globl cpu_perf_eax 234 .globl cpu_perf_ebx 235 .globl cpu_perf_edx 236 .globl cpu_apmi_edx 237 .globl cold, cnvmem, extmem 238 .globl cpu_pae 239 .globl esym 240 .globl ssym 241 .globl nkptp_max 242 .globl boothowto, bootdev, atdevbase 243 .globl proc0paddr, PTDpaddr, PTDsize 244 .globl gdt 245 .globl bootapiver, bootargc, bootargv 246 .globl lapic_tpr 247 .globl pg_g_kern 248 .globl cpu_meltdown 249 250#if NLAPIC > 0 251 .align NBPG 252 .globl local_apic, lapic_id 253local_apic: 254 .space LAPIC_ID 255lapic_id: 256 .long 0x00000000 257 .space LAPIC_TPRI-(LAPIC_ID+4) 258lapic_tpr: 259 .space LAPIC_PPRI-LAPIC_TPRI 260lapic_ppr: 261 .space LAPIC_ISR-LAPIC_PPRI 262lapic_isr: 263 .space NBPG-LAPIC_ISR 264#else 265lapic_tpr: 266 .long 0 267#endif 268 269cpu_id: .long 0 # saved from 'cpuid' instruction 270cpu_pae: .long 0 # are we using PAE paging mode? 271cpu_miscinfo: .long 0 # misc info (apic/brand id) from 'cpuid' 272cpu_feature: .long 0 # feature flags from 'cpuid' instruction 273ecpu_feature: .long 0 # extended feature flags from 'cpuid' 274cpu_ecxfeature: .long 0 # ecx feature flags from 'cpuid' 275ecpu_eaxfeature: .long 0 # extended eax feature flags 276ecpu_ecxfeature: .long 0 # extended ecx feature flags 277cpuid_level: .long -1 # max. lvl accepted by 'cpuid' insn 278cpu_cache_eax: .long 0 279cpu_cache_ebx: .long 0 280cpu_cache_ecx: .long 0 281cpu_cache_edx: .long 0 282cpu_perf_eax: .long 0 # arch. perf. mon. flags from 'cpuid' 283cpu_perf_ebx: .long 0 # arch. perf. mon. flags from 'cpuid' 284cpu_perf_edx: .long 0 # arch. perf. mon. flags from 'cpuid' 285cpu_apmi_edx: .long 0 # adv. power management info. 'cpuid' 286cpu_vendor: .space 16 # vendor string returned by 'cpuid' instruction 287cpu_brandstr: .space 48 # brand string returned by 'cpuid' 288cold: .long 1 # cold till we are not 289ssym: .long 0 # ptr to start of syms 290esym: .long 0 # ptr to end of syms 291cnvmem: .long 0 # conventional memory size 292extmem: .long 0 # extended memory size 293atdevbase: .long 0 # location of start of iomem in virtual 294bootapiver: .long 0 # /boot API version 295bootargc: .long 0 # /boot argc 296bootargv: .long 0 # /boot argv 297bootdev: .long 0 # device we booted from 298proc0paddr: .long 0 299PTDpaddr: .long 0 # paddr of PTD, for libkvm 300PTDsize: .long NBPG # size of PTD, for libkvm 301pg_g_kern: .long 0 # 0x100 if global pages should be used 302 # in kernel mappings, 0 otherwise (for 303 # insecure CPUs) 304cpu_meltdown: .long 0 # 1 if this CPU has Meltdown 305 306 .text 307 308NENTRY(proc_trampoline) 309 call proc_trampoline_mi 310 pushl %ebx 311 call *%esi 312 addl $4,%esp 313#ifdef DIAGNOSTIC 314 movl $0xfe,%esi 315#endif 316 jmp .Lsyscall_check_asts 317 318 /* This must come before any use of the CODEPATCH macros */ 319 .section .codepatch,"a" 320 .align 8 321 .globl codepatch_begin 322codepatch_begin: 323 .previous 324 325 .section .codepatchend,"a" 326 .globl codepatch_end 327codepatch_end: 328 .previous 329 330/*****************************************************************************/ 331 332/* 333 * Signal trampoline; copied to top of user stack. 334 */ 335 .section .rodata 336 .globl sigcode 337sigcode: 338 call *SIGF_HANDLER(%esp) 339 leal SIGF_SC(%esp),%eax # scp (the call may have clobbered the 340 # copy at SIGF_SCP(%esp)) 341 pushl %eax 342 pushl %eax # junk to fake return address 343 movl $SYS_sigreturn,%eax 344 .globl sigcodecall 345sigcodecall: 346 int $0x80 # enter kernel with args on stack 347 .globl sigcoderet 348sigcoderet: 349 .globl esigcode 350esigcode: 351 /* FALLTHROUGH */ 352 .globl sigfill 353sigfill: 354 int3 355esigfill: 356 357 .data 358 .globl sigfillsiz 359sigfillsiz: 360 .long esigfill - sigfill 361 362 .text 363 364/*****************************************************************************/ 365 366/* 367 * The following primitives are used to fill and copy regions of memory. 368 */ 369 370/* Frame pointer reserve on stack. */ 371#ifdef DDB 372#define FPADD 4 373#else 374#define FPADD 0 375#endif 376 377/* 378 * kcopy(caddr_t from, caddr_t to, size_t len); 379 * Copy len bytes, abort on fault. 380 */ 381ENTRY(kcopy) 382#ifdef DDB 383 pushl %ebp 384 movl %esp,%ebp 385#endif 386 pushl %esi 387 pushl %edi 388 GET_CURPCB(%eax) # load curpcb into eax and set on-fault 389 pushl PCB_ONFAULT(%eax) 390 movl $copy_fault, PCB_ONFAULT(%eax) 391 392 movl 16+FPADD(%esp),%esi 393 movl 20+FPADD(%esp),%edi 394 movl 24+FPADD(%esp),%ecx 395 movl %edi,%eax 396 subl %esi,%eax 397 cmpl %ecx,%eax # overlapping? 398 jb 1f 399 shrl $2,%ecx # nope, copy forward by 32-bit words 400 rep 401 movsl 402 movl 24+FPADD(%esp),%ecx 403 andl $3,%ecx # any bytes left? 404 rep 405 movsb 406 407 GET_CURPCB(%edx) # XXX save curpcb? 408 popl PCB_ONFAULT(%edx) 409 popl %edi 410 popl %esi 411 xorl %eax,%eax 412#ifdef DDB 413 leave 414#endif 415 ret 416 417 .align 4,0xcc 4181: addl %ecx,%edi # copy backward 419 addl %ecx,%esi 420 std 421 andl $3,%ecx # any fractional bytes? 422 decl %edi 423 decl %esi 424 rep 425 movsb 426 movl 24+FPADD(%esp),%ecx # copy remainder by 32-bit words 427 shrl $2,%ecx 428 subl $3,%esi 429 subl $3,%edi 430 rep 431 movsl 432 cld 433 434 GET_CURPCB(%edx) 435 popl PCB_ONFAULT(%edx) 436 popl %edi 437 popl %esi 438 xorl %eax,%eax 439#ifdef DDB 440 leave 441#endif 442 ret 443 444/*****************************************************************************/ 445 446/* 447 * The following primitives are used to copy data in and out of the user's 448 * address space. 449 */ 450 451/* 452 * copyout(caddr_t from, caddr_t to, size_t len); 453 * Copy len bytes into the user's address space. 454 */ 455ENTRY(copyout) 456#ifdef DDB 457 pushl %ebp 458 movl %esp,%ebp 459#endif 460 pushl %esi 461 pushl %edi 462 pushl $0 463 464 movl 16+FPADD(%esp),%esi 465 movl 20+FPADD(%esp),%edi 466 movl 24+FPADD(%esp),%eax 467 468 /* 469 * We check that the end of the destination buffer is not past the end 470 * of the user's address space. If it's not, then we only need to 471 * check that each page is writable. The 486 will do this for us; the 472 * 386 will not. (We assume that pages in user space that are not 473 * writable by the user are not writable by the kernel either.) 474 */ 475 movl %edi,%edx 476 addl %eax,%edx 477 jc copy_fault 478 cmpl $VM_MAXUSER_ADDRESS,%edx 479 ja copy_fault 480 481 GET_CURPCB(%edx) 482 movl $copy_fault,PCB_ONFAULT(%edx) 483 SMAP_STAC 484 485 /* bcopy(%esi, %edi, %eax); */ 486 movl %eax,%ecx 487 shrl $2,%ecx 488 rep 489 movsl 490 movl %eax,%ecx 491 andl $3,%ecx 492 rep 493 movsb 494 495 SMAP_CLAC 496 popl PCB_ONFAULT(%edx) 497 popl %edi 498 popl %esi 499 xorl %eax,%eax 500#ifdef DDB 501 leave 502#endif 503 ret 504 505/* 506 * _copyin(caddr_t from, caddr_t to, size_t len); 507 * Copy len bytes from the user's address space. 508 */ 509ENTRY(_copyin) 510#ifdef DDB 511 pushl %ebp 512 movl %esp,%ebp 513#endif 514 pushl %esi 515 pushl %edi 516 GET_CURPCB(%eax) 517 pushl $0 518 movl $copy_fault,PCB_ONFAULT(%eax) 519 SMAP_STAC 520 521 movl 16+FPADD(%esp),%esi 522 movl 20+FPADD(%esp),%edi 523 movl 24+FPADD(%esp),%eax 524 525 /* 526 * We check that the end of the destination buffer is not past the end 527 * of the user's address space. If it's not, then we only need to 528 * check that each page is readable, and the CPU will do that for us. 529 */ 530 movl %esi,%edx 531 addl %eax,%edx 532 jc copy_fault 533 cmpl $VM_MAXUSER_ADDRESS,%edx 534 ja copy_fault 535 536 /* bcopy(%esi, %edi, %eax); */ 537 movl %eax,%ecx 538 shrl $2,%ecx 539 rep 540 movsl 541 movb %al,%cl 542 andb $3,%cl 543 rep 544 movsb 545 546 SMAP_CLAC 547 GET_CURPCB(%edx) 548 popl PCB_ONFAULT(%edx) 549 popl %edi 550 popl %esi 551 xorl %eax,%eax 552#ifdef DDB 553 leave 554#endif 555 ret 556 557ENTRY(copy_fault) 558 cld 559 SMAP_CLAC 560 GET_CURPCB(%edx) 561 popl PCB_ONFAULT(%edx) 562 popl %edi 563 popl %esi 564 movl $EFAULT,%eax 565#ifdef DDB 566 leave 567#endif 568 ret 569 570/* 571 * copyoutstr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied); 572 * Copy a NUL-terminated string, at most maxlen characters long, into the 573 * user's address space. Return the number of characters copied (including the 574 * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else 575 * return 0 or EFAULT. 576 */ 577ENTRY(copyoutstr) 578#ifdef DDB 579 pushl %ebp 580 movl %esp,%ebp 581#endif 582 pushl %esi 583 pushl %edi 584 585 movl 12+FPADD(%esp),%esi # esi = from 586 movl 16+FPADD(%esp),%edi # edi = to 587 movl 20+FPADD(%esp),%edx # edx = maxlen 588 5895: GET_CURPCB(%eax) 590 movl $copystr_fault,PCB_ONFAULT(%eax) 591 SMAP_STAC 592 /* 593 * Get min(%edx, VM_MAXUSER_ADDRESS-%edi). 594 */ 595 movl $VM_MAXUSER_ADDRESS,%eax 596 subl %edi,%eax 597 jbe copystr_fault # die if CF == 1 || ZF == 1 598 # i.e. make sure that %edi 599 # is below VM_MAXUSER_ADDRESS 600 601 cmpl %edx,%eax 602 jae 1f 603 movl %eax,%edx 604 movl %eax,20+FPADD(%esp) 605 6061: incl %edx 607 6081: decl %edx 609 jz 2f 610 lodsb 611 stosb 612 testb %al,%al 613 jnz 1b 614 615 /* Success -- 0 byte reached. */ 616 decl %edx 617 xorl %eax,%eax 618 jmp copystr_return 619 6202: /* edx is zero -- return EFAULT or ENAMETOOLONG. */ 621 cmpl $VM_MAXUSER_ADDRESS,%edi 622 jae copystr_fault 623 movl $ENAMETOOLONG,%eax 624 jmp copystr_return 625 626/* 627 * _copyinstr(caddr_t from, caddr_t to, size_t maxlen, size_t *lencopied); 628 * Copy a NUL-terminated string, at most maxlen characters long, from the 629 * user's address space. Return the number of characters copied (including the 630 * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else 631 * return 0 or EFAULT. 632 */ 633ENTRY(_copyinstr) 634#ifdef DDB 635 pushl %ebp 636 movl %esp,%ebp 637#endif 638 pushl %esi 639 pushl %edi 640 GET_CURPCB(%ecx) 641 movl $copystr_fault,PCB_ONFAULT(%ecx) 642 SMAP_STAC 643 644 movl 12+FPADD(%esp),%esi # %esi = from 645 movl 16+FPADD(%esp),%edi # %edi = to 646 movl 20+FPADD(%esp),%edx # %edx = maxlen 647 648 /* 649 * Get min(%edx, VM_MAXUSER_ADDRESS-%esi). 650 */ 651 movl $VM_MAXUSER_ADDRESS,%eax 652 subl %esi,%eax 653 jbe copystr_fault # Error if CF == 1 || ZF == 1 654 # i.e. make sure that %esi 655 # is below VM_MAXUSER_ADDRESS 656 cmpl %edx,%eax 657 jae 1f 658 movl %eax,%edx 659 movl %eax,20+FPADD(%esp) 660 6611: incl %edx 662 6631: decl %edx 664 jz 2f 665 lodsb 666 stosb 667 testb %al,%al 668 jnz 1b 669 670 /* Success -- 0 byte reached. */ 671 decl %edx 672 xorl %eax,%eax 673 jmp copystr_return 674 6752: /* edx is zero -- return EFAULT or ENAMETOOLONG. */ 676 cmpl $VM_MAXUSER_ADDRESS,%esi 677 jae copystr_fault 678 movl $ENAMETOOLONG,%eax 679 jmp copystr_return 680 681ENTRY(copystr_fault) 682 movl $EFAULT,%eax 683 684copystr_return: 685 SMAP_CLAC 686 /* Set *lencopied and return %eax. */ 687 GET_CURPCB(%ecx) 688 movl $0,PCB_ONFAULT(%ecx) 689 movl 20+FPADD(%esp),%ecx 690 subl %edx,%ecx 691 movl 24+FPADD(%esp),%edx 692 testl %edx,%edx 693 jz 8f 694 movl %ecx,(%edx) 695 6968: popl %edi 697 popl %esi 698#ifdef DDB 699 leave 700#endif 701 ret 702 703/*****************************************************************************/ 704 705/* 706 * The following is i386-specific nonsense. 707 */ 708 709/* 710 * void lgdt(struct region_descriptor *rdp); 711 * Change the global descriptor table. 712 */ 713NENTRY(lgdt) 714 /* Reload the descriptor table. */ 715 movl 4(%esp),%eax 716 lgdt (%eax) 717 /* Flush the prefetch q. */ 718 jmp 1f 719 nop 7201: /* Reload "stale" selectors. */ 721 movl $GSEL(GDATA_SEL, SEL_KPL),%eax 722 movw %ax,%ds 723 movw %ax,%es 724 movw %ax,%ss 725 movl $GSEL(GCPU_SEL, SEL_KPL),%eax 726 movw %ax,%fs 727 /* Reload code selector by doing intersegment return. */ 728 popl %eax 729 pushl $GSEL(GCODE_SEL, SEL_KPL) 730 pushl %eax 731 lret 732 733#ifdef DDB 734ENTRY(setjmp) 735 movl 4(%esp),%eax 736 movl %ebx,(%eax) # save ebx 737 movl %esp,4(%eax) # save esp 738 movl %ebp,8(%eax) # save ebp 739 movl %esi,12(%eax) # save esi 740 movl %edi,16(%eax) # save edi 741 movl (%esp),%edx # get rta 742 movl %edx,20(%eax) # save eip 743 xorl %eax,%eax # return (0); 744 ret 745 746ENTRY(longjmp) 747 movl 4(%esp),%eax 748 movl (%eax),%ebx # restore ebx 749 movl 4(%eax),%esp # restore esp 750 movl 8(%eax),%ebp # restore ebp 751 movl 12(%eax),%esi # restore esi 752 movl 16(%eax),%edi # restore edi 753 movl 20(%eax),%edx # get rta 754 movl %edx,(%esp) # put in return frame 755 xorl %eax,%eax # return (1); 756 incl %eax 757 ret 758#endif /* DDB */ 759 760/*****************************************************************************/ 761 762/* 763 * cpu_switchto(struct proc *old, struct proc *new) 764 * Switch from the "old" proc to the "new" proc. If "old" is NULL, we 765 * don't need to bother saving old context. 766 */ 767ENTRY(cpu_switchto) 768 pushl %ebx 769 pushl %esi 770 pushl %edi 771 772 movl 16(%esp), %esi 773 movl 20(%esp), %edi 774 775 /* If old process exited, don't bother. */ 776 testl %esi,%esi 777 jz switch_exited 778 779 /* Save old stack pointers. */ 780 movl P_ADDR(%esi),%ebx 781 movl %esp,PCB_ESP(%ebx) 782 movl %ebp,PCB_EBP(%ebx) 783 784switch_exited: 785 /* Restore saved context. */ 786 787 /* No interrupts while loading new state. */ 788 cli 789 790 /* Record new process. */ 791 movl %edi, CPUVAR(CURPROC) 792 movb $SONPROC, P_STAT(%edi) 793 794 /* Restore stack pointers. */ 795 movl P_ADDR(%edi),%ebx 796 movl PCB_ESP(%ebx),%esp 797 movl PCB_EBP(%ebx),%ebp 798 799 /* Record new pcb. */ 800 movl %ebx, CPUVAR(CURPCB) 801 802 /* record the bits needed for future U-->K transition */ 803 movl PCB_KSTACK(%ebx),%eax 804 subl $FRAMESIZE,%eax 805 movl %eax,CPUVAR(KERN_ESP) 806 807 /* 808 * Activate the address space. The pcb copy of %cr3 will 809 * be refreshed from the pmap, and because we're 810 * curproc they'll both be reloaded into the CPU. 811 */ 812 pushl %edi 813 pushl %esi 814 call pmap_switch 815 addl $8,%esp 816 817 /* Restore cr0 (including FPU state). */ 818 movl PCB_CR0(%ebx),%ecx 819#ifdef MULTIPROCESSOR 820 /* 821 * If our floating point registers are on a different CPU, 822 * clear CR0_TS so we'll trap rather than reuse bogus state. 823 */ 824 movl CPUVAR(SELF), %esi 825 cmpl PCB_FPCPU(%ebx), %esi 826 jz 1f 827 orl $CR0_TS,%ecx 8281: 829#endif 830 movl %ecx,%cr0 831 832 /* Interrupts are okay again. */ 833 sti 834 835 popl %edi 836 popl %esi 837 popl %ebx 838 ret 839 840ENTRY(cpu_idle_enter) 841 movl cpu_idle_enter_fcn,%eax 842 cmpl $0,%eax 843 je 1f 844 jmpl *%eax 8451: 846 ret 847 848ENTRY(cpu_idle_cycle) 849 movl cpu_idle_cycle_fcn,%eax 850 cmpl $0,%eax 851 je 1f 852 call *%eax 853 ret 8541: 855 sti 856 hlt 857 ret 858 859ENTRY(cpu_idle_leave) 860 movl cpu_idle_leave_fcn,%eax 861 cmpl $0,%eax 862 je 1f 863 jmpl *%eax 8641: 865 ret 866 867/* 868 * savectx(struct pcb *pcb); 869 * Update pcb, saving current processor state. 870 */ 871ENTRY(savectx) 872 movl 4(%esp),%edx # edx = p->p_addr 873 874 /* Save stack pointers. */ 875 movl %esp,PCB_ESP(%edx) 876 movl %ebp,PCB_EBP(%edx) 877 878 movl PCB_FLAGS(%edx),%ecx 879 orl $PCB_SAVECTX,%ecx 880 movl %ecx,PCB_FLAGS(%edx) 881 882 ret 883 884/*****************************************************************************/ 885 886/* 887 * Trap and fault vector routines 888 * 889 * On exit from the kernel to user mode, we always need to check for ASTs. In 890 * addition, we need to do this atomically; otherwise an interrupt may occur 891 * which causes an AST, but it won't get processed until the next kernel entry 892 * (possibly the next clock tick). Thus, we disable interrupt before checking, 893 * and only enable them again on the final `iret' or before calling the AST 894 * handler. 895 */ 896 897#define TRAP(a) pushl $(a) ; jmp alltraps 898#define ZTRAP(a) pushl $0 ; TRAP(a) 899 900IDTVEC(div) 901 ZTRAP(T_DIVIDE) 902IDTVEC(dbg) 903 subl $4,%esp 904 pushl %eax 905 movl %dr6,%eax 906 movl %eax,4(%esp) 907 andb $~0xf,%al 908 movl %eax,%dr6 909 popl %eax 910 TRAP(T_TRCTRAP) 911 912IDTVEC(nmi) 913 /* 914 * we came through a task gate; now U+K of the idle thread is 915 * enabled; NMIs are blocked until next iret; IRQs are disabled; 916 * all segment descriptors are useable 917 * 918 * first of all, switch back to the U+K we were actually running 919 * on before 920 */ 921 movl CPUVAR(CURPMAP),%eax 922 movl PM_PDIRPA(%eax),%eax 923 movl %eax,%cr3 924 925 /* 926 * when we came from within the kernel, iret will not 927 * switch back to the stack we came from but will keep 928 * running on the NMI stack. in that case we switch 929 * manually back to the stack we were running on and 930 * build the iretframe there. 931 */ 932 933 /* was there a ring transition? */ 934 movl CPUVAR(TSS),%eax 935 testb $SEL_RPL,TSS_CS(%eax) 936 jne 1f 937 938 /* 939 * no ring transition, switch back to original stack, build 940 * frame from state saved in TSS. 941 */ 942 movl TSS_ESP(%eax),%esp 943 subl $12,%esp 944 movl TSS_EFLAGS(%eax),%ebx 945 movl %ebx,8(%esp) 946 movl TSS_CS(%eax),%ebx 947 movl %ebx,4(%esp) 948 movl TSS_EIP(%eax),%ebx 949 movl %ebx,0(%esp) 950 pushl $0 951 pushl $T_NMI 952 jmp 2f 953 954 /* 955 * ring transition, stay on stack, build frame from state 956 * saved in TSS. 957 */ 9581: subl $20,%esp 959 pushl $0 960 pushl $T_NMI 961 movl TSS_SS(%eax),%ebx 962 movl %ebx,IRF_SS(%esp) 963 movl TSS_ESP(%eax),%ebx 964 movl %ebx,IRF_ESP(%esp) 965 movl TSS_EFLAGS(%eax),%ebx 966 movl %ebx,IRF_EFLAGS(%esp) 967 movl TSS_CS(%eax),%ebx 968 movl %ebx,IRF_CS(%esp) 969 movl TSS_EIP(%eax),%ebx 970 movl %ebx,IRF_EIP(%esp) 971 972 /* clear PSL_NT */ 9732: pushfl 974 popl %eax 975 andl $~PSL_NT,%eax 976 pushl %eax 977 popfl 978 979 /* clear CR0_TS XXX hshoexer: needed? */ 980 movl %cr0,%eax 981 andl $~CR0_TS,%eax 982 movl %eax,%cr0 983 984 /* unbusy descriptors and reload common TSS */ 985 movl CPUVAR(GDT),%eax 986 movl $GSEL(GNMITSS_SEL, SEL_KPL),%ebx 987 andl $~0x200,4-SEL_KPL(%eax,%ebx,1) 988 movl $GSEL(GTSS_SEL, SEL_KPL),%ebx 989 andl $~0x200,4-SEL_KPL(%eax,%ebx,1) 990 ltr %bx 991 992 /* load GPRs and segment registers with saved values from common TSS */ 993 movl CPUVAR(TSS),%eax 994 movl TSS_ECX(%eax),%ecx 995 movl TSS_EDX(%eax),%edx 996 movl TSS_ESI(%eax),%esi 997 movl TSS_EDI(%eax),%edi 998 movl TSS_EBP(%eax),%ebp 999 movw TSS_FS(%eax),%fs 1000 movw TSS_GS(%eax),%gs 1001 movw TSS_ES(%eax),%es 1002 /* saved %ds might be invalid, thus push now and pop later */ 1003 movl TSS_DS(%eax),%ebx 1004 pushl %ebx 1005 movl TSS_EBX(%eax),%ebx 1006 movl TSS_EAX(%eax),%eax 1007 popl %ds 1008 1009 /* 1010 * we can now proceed and save everything on the stack as 1011 * if no task switch had happened. 1012 */ 1013 jmp alltraps 1014IDTVEC(bpt) 1015 ZTRAP(T_BPTFLT) 1016IDTVEC(ofl) 1017 ZTRAP(T_OFLOW) 1018IDTVEC(bnd) 1019 ZTRAP(T_BOUND) 1020IDTVEC(ill) 1021 ZTRAP(T_PRIVINFLT) 1022IDTVEC(dna) 1023#if NNPX > 0 1024 pushl $0 # dummy error code 1025 pushl $T_DNA 1026 INTRENTRY(dna) 1027 sti 1028 pushl CPUVAR(SELF) 1029 call *npxdna_func 1030 addl $4,%esp 1031 testl %eax,%eax 1032 jz calltrap 1033#ifdef DIAGNOSTIC 1034 movl $0xfd,%esi 1035#endif 1036 cli 1037 INTRFASTEXIT 1038#else 1039 ZTRAP(T_DNA) 1040#endif 1041IDTVEC(dble) 1042 TRAP(T_DOUBLEFLT) 1043IDTVEC(fpusegm) 1044 ZTRAP(T_FPOPFLT) 1045IDTVEC(tss) 1046 TRAP(T_TSSFLT) 1047IDTVEC(missing) 1048 TRAP(T_SEGNPFLT) 1049IDTVEC(stk) 1050 TRAP(T_STKFLT) 1051 1052IDTVEC(prot) 1053 pushl $T_PROTFLT 1054 /* If iret faults, we'll get a trap at doreti_iret+3 with CPL == 0. */ 1055 pushl %eax 1056 leal doreti_iret+3,%eax 1057 cmpl %eax,12(%esp) /* over %eax, trapno and err to %eip */ 1058 popl %eax 1059 jne 97f 1060 pushl %ebp 1061 pushl %eax 1062 pushl %fs 1063 INTR_ENABLE_U_PLUS_K 1064 /* 1065 * we have an iretframe on trampoline stack, above it the 1066 * remainder of the original iretframe iret faulted on. 1067 */ 1068 movl CPUVAR(KERN_ESP),%eax 1069 pushl %eax 1070 pushl $0xdeadbeef 1071 /* 1072 * now we have a trampframe on trampoline stack, above it the 1073 * remainder of the original iretframe iret faulted on. 1074 */ 1075 movl %esp,%ebp 1076 movl %eax,%esp 1077 subl $SIZEOF_IRETFRAME+(5*4),%esp 1078 /* copy to iretframe on kernel stack */ 1079 movl TRF_EFLAGS(%ebp),%eax 1080 movl %eax,IRF_EFLAGS(%esp) 1081 movl TRF_CS(%ebp),%eax 1082 movl %eax,IRF_CS(%esp) 1083 movl TRF_EIP(%ebp),%eax 1084 movl %eax,IRF_EIP(%esp) 1085 movl TRF_ERR(%ebp),%eax 1086 movl %eax,IRF_ERR(%esp) 1087 movl TRF_TRAPNO(%ebp),%eax 1088 movl %eax,IRF_TRAPNO(%esp) 1089 /* copy remainder of faulted iretframe */ 1090 movl 40(%ebp),%eax /* eip */ 1091 movl %eax,20(%esp) 1092 movl 44(%ebp),%eax /* cs */ 1093 movl %eax,24(%esp) 1094 movl 48(%ebp),%eax /* eflags */ 1095 movl %eax,28(%esp) 1096 movl 52(%ebp),%eax /* esp */ 1097 movl %eax,32(%esp) 1098 movl 56(%ebp),%eax /* ss */ 1099 movl %eax,36(%esp) 1100 movl TRF_FS(%ebp),%eax 1101 movw %ax,%fs 1102 movl TRF_EAX(%ebp),%eax 1103 movl TRF_EBP(%ebp),%ebp 1104 /* 1105 * we have an iretframe on kernel stack, above it the 1106 * remainder of the original iretframe iret faulted on. 1107 * for INTRENTRY(prot) it looks like the fault happened 1108 * on the kernel stack 1109 */ 111097: INTRENTRY(prot) 1111 sti 1112 jmp calltrap 1113IDTVEC(f00f_redirect) 1114 pushl $T_PAGEFLT 1115 INTRENTRY(f00f_redirect) 1116 sti 1117 testb $PGEX_U,TF_ERR(%esp) 1118 jnz calltrap 1119 movl %cr2,%eax 1120 subl idt,%eax 1121 cmpl $(6*8),%eax 1122 jne calltrap 1123 movb $T_PRIVINFLT,TF_TRAPNO(%esp) 1124 jmp calltrap 1125IDTVEC(page) 1126 TRAP(T_PAGEFLT) 1127IDTVEC(rsvd) 1128 ZTRAP(T_RESERVED) 1129IDTVEC(mchk) 1130 ZTRAP(T_MACHK) 1131IDTVEC(simd) 1132 ZTRAP(T_XFTRAP) 1133IDTVEC(intrspurious) 1134 /* 1135 * The Pentium Pro local APIC may erroneously call this vector for a 1136 * default IR7. Just ignore it. 1137 * 1138 * (The local APIC does this when CPL is raised while it's on the 1139 * way to delivering an interrupt.. presumably enough has been set 1140 * up that it's inconvenient to abort delivery completely..) 1141 */ 1142 iret 1143IDTVEC(fpu) 1144#if NNPX > 0 1145 /* 1146 * Handle like an interrupt so that we can call npxintr to clear the 1147 * error. It would be better to handle npx interrupts as traps but 1148 * this is difficult for nested interrupts. 1149 */ 1150 subl $8,%esp /* space for tf_{err,trapno} */ 1151 INTRENTRY(fpu) 1152 sti 1153 pushl CPL # if_ppl in intrframe 1154 pushl %esp # push address of intrframe 1155 incl uvmexp+V_TRAP 1156 call npxintr 1157 addl $8,%esp # pop address and if_ppl 1158#ifdef DIAGNOSTIC 1159 movl $0xfc,%esi 1160#endif 1161 cli 1162 INTRFASTEXIT 1163#else 1164 ZTRAP(T_ARITHTRAP) 1165#endif 1166IDTVEC(align) 1167 TRAP(T_ALIGNFLT) 1168 /* 18 - 31 reserved for future exp */ 1169 1170/* 1171 * If an error is detected during trap, syscall, or interrupt exit, trap() will 1172 * change %eip to point to one of these labels. We clean up the stack, if 1173 * necessary, and resume as if we were handling a general protection fault. 1174 * This will cause the process to get a SIGBUS. 1175 */ 1176KUENTRY(resume_iret) 1177 ZTRAP(T_PROTFLT) 1178NENTRY(resume_pop_ds) 1179 pushl %es 1180 movl $GSEL(GDATA_SEL, SEL_KPL),%eax 1181 movw %ax,%es 1182NENTRY(resume_pop_es) 1183 pushl %gs 1184 xorl %eax,%eax /* $GSEL(GNULL_SEL, SEL_KPL) == 0 */ 1185 movw %ax,%gs 1186NENTRY(resume_pop_gs) 1187 pushl %fs 1188 movl $GSEL(GCPU_SEL, SEL_KPL),%eax 1189 movw %ax,%fs 1190NENTRY(resume_pop_fs) 1191 movl $T_PROTFLT,TF_TRAPNO(%esp) 1192 sti 1193 jmp calltrap 1194 1195/* 1196 * All traps go through here. Call the generic trap handler, and 1197 * check for ASTs afterwards. 1198 */ 1199KUENTRY(alltraps) 1200 INTRENTRY(alltraps) 1201 sti 1202calltrap: 1203#ifdef DIAGNOSTIC 1204 movl CPL,%ebx 1205#endif /* DIAGNOSTIC */ 1206#if !defined(GPROF) && defined(DDBPROF) 1207 cmpl $T_BPTFLT,TF_TRAPNO(%esp) 1208 jne .Lreal_trap 1209 1210 pushl %esp 1211 subl $4, %esp 1212 pushl %eax 1213 leal dt_prov_kprobe, %eax 1214 movl %eax, 4(%esp) 1215 popl %eax 1216 call dt_prov_kprobe_hook 1217 addl $8, %esp 1218 cmpl $0, %eax 1219 je .Lreal_trap 1220 1221 /* 1222 * Abuse the error field to indicate that INTRFASTEXIT needs 1223 * to emulate the patched instruction. 1224 */ 1225 cmpl $1, %eax 1226 je .Lset_emulate_push_rbp 1227 1228 cmpl $2, %eax 1229 je .Lset_emulate_ret 1230 1231.Lset_emulate_push_rbp: 1232 movl $INTR_FAKE_TRAP_PUSH_RPB, TF_ERR(%esp) 1233 jmp .Lalltraps_check_asts 1234.Lset_emulate_ret: 1235 movl $INTR_FAKE_TRAP_POP_RBP, TF_ERR(%esp) 1236 jmp .Lalltraps_check_asts 1237.Lreal_trap: 1238#endif /* !defined(GPROF) && defined(DDBPROF) */ 1239 pushl %esp 1240 call trap 1241 addl $4,%esp 1242 1243.Lalltraps_check_asts: 1244 /* Check for ASTs on exit to user mode. */ 1245 cli 1246 CHECK_ASTPENDING(%ecx) 1247 je 1f 1248 testb $SEL_RPL,TF_CS(%esp) 1249 jz 1f 12505: CLEAR_ASTPENDING(%ecx) 1251 sti 1252 pushl %esp 1253 call ast 1254 addl $4,%esp 1255 jmp .Lalltraps_check_asts 12561: 1257#if !defined(GPROF) && defined(DDBPROF) 1258 /* 1259 * If we are returning from a probe trap we need to fix the 1260 * stack layout and emulate the patched instruction. 1261 * 1262 * The code below does that by trashing %eax, so it MUST be 1263 * restored afterward. 1264 */ 1265 cmpl $INTR_FAKE_TRAP_PUSH_RPB, TF_ERR(%esp) 1266 je .Lprobe_fixup_push_rbp 1267 cmpl $INTR_FAKE_TRAP_POP_RBP, TF_ERR(%esp) 1268 je .Lprobe_fixup_pop_rbp 1269#endif /* !defined(GPROF) && defined(DDBPROF) */ 1270#ifndef DIAGNOSTIC 1271 INTRFASTEXIT 1272#else 1273 cmpl CPL,%ebx 1274 jne 3f 1275#ifdef DIAGNOSTIC 1276 movl $0xfb,%esi 1277#endif 1278 INTRFASTEXIT 12793: sti 1280 pushl $spl_lowered 1281 call printf 1282 addl $4,%esp 1283#if defined(DDB) && 0 1284 int $3 1285#endif /* DDB */ 1286 movl %ebx,CPL 1287 jmp .Lalltraps_check_asts 1288 1289 .section .rodata 1290spl_lowered: 1291 .asciz "WARNING: SPL NOT LOWERED ON TRAP EXIT\n" 1292#endif /* DIAGNOSTIC */ 1293 1294 .text 1295#if !defined(GPROF) && defined(DDBPROF) 1296.Lprobe_fixup_push_rbp: 1297 /* Restore all register unwinding the stack. */ 1298 INTR_RESTORE_ALL 1299 1300 /* 1301 * Use the space left by ``err'' and ``trapno'' to emulate 1302 * "pushl %ebp". 1303 * 1304 * Temporarily save %eax. 1305 */ 1306 movl %eax,0(%esp) 1307 1308 /* Shift hardware-saved registers: eip, cs, eflags */ 1309 movl 8(%esp),%eax 1310 movl %eax,4(%esp) 1311 movl 12(%esp),%eax 1312 movl %eax,8(%esp) 1313 movl 16(%esp),%eax 1314 movl %eax,12(%esp) 1315 1316 /* Store %ebp in the expected location to finish the emulation. */ 1317 movl %ebp,16(%esp) 1318 1319 popl %eax 1320 iret 1321.Lprobe_fixup_pop_rbp: 1322 /* Restore all register unwinding the stack. */ 1323 INTR_RESTORE_ALL 1324 1325 movl %eax, 0(%esp) 1326 1327 /* pop %ebp */ 1328 movl 20(%esp), %ebp 1329 /* Shift hardware-saved registers: eflags, cs, eip */ 1330 movl 16(%esp), %eax 1331 movl %eax, 20(%esp) 1332 movl 12(%esp), %eax 1333 movl %eax, 16(%esp) 1334 movl 8(%esp), %eax 1335 movl %eax, 12(%esp) 1336 1337 /* Pop eax and restore the stack pointer */ 1338 popl %eax 1339 addl $8, %esp 1340 iret 1341#endif /* !defined(GPROF) && defined(DDBPROF) */ 1342 1343 .text 1344#ifdef DIAGNOSTIC 1345.Lintr_exit_not_blocked: 1346 movl warn_once,%eax 1347 testl %eax,%eax 1348 jnz 1f 1349 incl %eax 1350 movl %eax,warn_once 1351 pushl %esi /* marker indicating where we came from */ 1352 pushl %edx /* EFLAGS are in %edx */ 1353 pushl $.Lnot_blocked 1354 call printf 1355 addl $12,%esp 1356#ifdef DDB 1357 int $3 1358#endif /* DDB */ 13591: cli 1360 jmp intr_fast_exit 1361 1362 .data 1363 .global warn_once 1364warn_once: 1365 .long 0 1366 .section .rodata 1367.Lnot_blocked: 1368 .asciz "WARNING: INTERRUPTS NOT BLOCKED ON INTERRUPT RETURN 0x%x 0x%x\n" 1369 .text 1370#endif 1371 1372/* 1373 * Trap gate entry for syscall 1374 */ 1375IDTVEC(syscall) 1376 subl $8,%esp /* space for tf_{err,trapno} */ 1377 INTRENTRY(syscall) 1378 sti 1379 pushl %esp 1380 call syscall 1381 addl $4,%esp 1382 1383.Lsyscall_check_asts: 1384 /* Check for ASTs on exit to user mode. */ 1385 cli 1386 CHECK_ASTPENDING(%ecx) 1387 je 1f 1388 /* Always returning to user mode here. */ 1389 CLEAR_ASTPENDING(%ecx) 1390 sti 1391 pushl %esp 1392 call ast 1393 addl $4,%esp 1394 jmp .Lsyscall_check_asts 13951: 1396#ifdef DIAGNOSTIC 1397 movl $0xff,%esi 1398#endif 1399 jmp intr_fast_exit 1400 1401NENTRY(intr_fast_exit) 1402#ifdef DIAGNOSTIC 1403 pushfl 1404 popl %edx 1405 testl $PSL_I,%edx 1406 jnz .Lintr_exit_not_blocked 1407#endif 1408 /* we have a full trapframe */ 1409 INTR_RESTORE_ALL 1410 /* now we have an iretframe */ 1411 testb $SEL_RPL,IRF_CS(%esp) 1412 /* recursing into kernel: stay on kernel stack using iretframe */ 1413 je doreti_iret 1414 1415 /* leaving kernel: build trampframe on cpu stack */ 1416 pushl %ebp 1417 pushl %eax 1418 pushl %fs 1419 movl $GSEL(GCPU_SEL, SEL_KPL),%eax 1420 movw %ax,%fs 1421 movl CPUVAR(INTR_ESP),%eax 1422 pushl %eax 1423 pushl $0xcafecafe 1424 /* now we have an trampframe, copy frame to cpu stack */ 1425 movl %eax,%ebp 1426 movl TRF_EIP(%esp),%eax 1427 movl %eax,TRF_EIP(%ebp) 1428 movl TRF_CS(%esp),%eax 1429 movl %eax,TRF_CS(%ebp) 1430 movl TRF_EFLAGS(%esp),%eax 1431 movl %eax,TRF_EFLAGS(%ebp) 1432 movl TRF_ESP(%esp),%eax 1433 movl %eax,TRF_ESP(%ebp) 1434 movl TRF_SS(%esp),%eax 1435 movl %eax,TRF_SS(%ebp) 1436 movl TRF__DEADBEEF(%esp),%eax 1437 movl %eax,TRF__DEADBEEF(%ebp) 1438 movl TRF__KERN_ESP(%esp),%eax 1439 movl %eax,TRF__KERN_ESP(%ebp) 1440 movl TRF_FS(%esp),%eax 1441 movl %eax,TRF_FS(%ebp) 1442 movl TRF_EAX(%esp),%eax 1443 movl %eax,TRF_EAX(%ebp) 1444 movl TRF_EBP(%esp),%eax 1445 movl %eax,TRF_EBP(%ebp) 1446 /* switch to cpu stack, where we copied the trampframe */ 1447 movl %ebp,%esp 1448 movl CPUVAR(USER_CR3),%eax 1449 testl %eax,%eax 1450 jz 1f 1451 jmp iret_tramp 1452 1453KUENTRY(iret_tramp) 1454 movl %eax,%cr3 1455 /* we have a trampframe; restore registers and adjust to iretframe */ 14561: popl %eax 1457 popl %eax 1458 popl %fs 1459 popl %eax 1460 popl %ebp 1461 .globl doreti_iret 1462doreti_iret: 1463 /* we have an iretframe */ 1464 addl $IRF_EIP,%esp 1465 iret 1466 1467#include <i386/i386/vector.s> 1468#include <i386/isa/icu.s> 1469 1470#if !defined(SMALL_KERNEL) 1471ENTRY(sse2_pagezero) 1472 pushl %ebx 1473 movl 8(%esp),%ecx 1474 movl %ecx,%eax 1475 addl $4096,%eax 1476 xor %ebx,%ebx 14771: 1478 movnti %ebx,(%ecx) 1479 addl $4,%ecx 1480 cmpl %ecx,%eax 1481 jne 1b 1482 sfence 1483 popl %ebx 1484 ret 1485 1486ENTRY(i686_pagezero) 1487 pushl %edi 1488 pushl %ebx 1489 1490 movl 12(%esp), %edi 1491 movl $1024, %ecx 1492 1493 .align 4,0x90 14941: 1495 xorl %eax, %eax 1496 repe 1497 scasl 1498 jnz 2f 1499 1500 popl %ebx 1501 popl %edi 1502 ret 1503 1504 .align 4,0x90 15052: 1506 incl %ecx 1507 subl $4, %edi 1508 1509 movl %ecx, %edx 1510 cmpl $16, %ecx 1511 1512 jge 3f 1513 1514 movl %edi, %ebx 1515 andl $0x3f, %ebx 1516 shrl %ebx 1517 shrl %ebx 1518 movl $16, %ecx 1519 subl %ebx, %ecx 1520 15213: 1522 subl %ecx, %edx 1523 rep 1524 stosl 1525 1526 movl %edx, %ecx 1527 testl %edx, %edx 1528 jnz 1b 1529 1530 popl %ebx 1531 popl %edi 1532 ret 1533#endif 1534 1535/* 1536 * int cpu_paenable(void *); 1537 */ 1538ENTRY(cpu_paenable) 1539 movl $-1, %eax 1540 testl $CPUID_PAE, cpu_feature 1541 jz 1f 1542 1543 pushl %esi 1544 pushl %edi 1545 movl 12(%esp), %esi 1546 movl %cr3, %edi 1547 orl $0xfe0, %edi /* PDPT will be in the last four slots! */ 1548 movl %edi, %cr3 1549 addl $KERNBASE, %edi /* and make it back virtual again */ 1550 movl $8, %ecx 1551 rep 1552 movsl 1553 1554 movl $MSR_EFER, %ecx 1555 rdmsr 1556 orl $EFER_NXE, %eax 1557 wrmsr 1558 1559 movl %cr4, %eax 1560 orl $CR4_PAE, %eax 1561 movl %eax, %cr4 /* BANG!!! */ 1562 1563 movl 12(%esp), %eax 1564 subl $KERNBASE, %eax 1565 movl %eax, %cr3 /* reload real PDPT */ 1566 movl $4*NBPG, %eax 1567 movl %eax, PTDsize 1568 1569 xorl %eax, %eax 1570 popl %edi 1571 popl %esi 15721: 1573 ret 1574 1575#if NLAPIC > 0 1576#include <i386/i386/apicvec.s> 1577#endif 1578 1579 .section .rodata 1580 .globl _stac 1581_stac: 1582 stac 1583 1584 .globl _clac 1585_clac: 1586 clac 1587