1/*- 2 * Copyright (c) 1993 The Regents of the University of California. 3 * Copyright (c) 2003 Peter Wemm. 4 * Copyright (c) 2008 The DragonFly Project. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD: src/sys/amd64/amd64/support.S,v 1.127 2007/05/23 08:33:04 kib Exp $ 32 */ 33 34#include <machine/asmacros.h> 35#include <machine/pmap.h> 36 37#include "assym.s" 38 39 ALIGN_DATA 40 41 .text 42 43/* 44 * bzero(ptr:%rdi, bytes:%rsi) 45 * 46 * Using rep stosq is 70% faster than a %rax loop and almost as fast as 47 * a %xmm0 loop on a modern intel cpu. 48 * 49 * Do not use non-termportal instructions here as we do not know the caller's 50 * intent. 51 */ 52ENTRY(bzero) 53 movq %rsi,%rcx 54 xorl %eax,%eax 55 shrq $3,%rcx 56 cld 57 rep 58 stosq 59 movq %rsi,%rcx 60 andq $7,%rcx 61 jnz 1f 62 ret 631: rep 64 stosb 65 ret 66END(bzero) 67 68/* 69 * pagezero(ptr:%rdi) 70 * 71 * Using rep stosq is nearly as fast as using %xmm0 on a modern intel cpu, 72 * and about 70% faster than a %rax loop. 73 * 74 * Do not use non-termportal instructions here as we do not know the caller's 75 * intent. 76 */ 77#if 0 78 79ENTRY(pagezero) 80 movq $PAGE_SIZE>>3,%rcx 81 xorl %eax,%eax 82 cld 83 rep 84 stosq 85 ret 86END(pagezero) 87 88#endif 89 90ENTRY(pagezero) 91 addq $4096,%rdi 92 movq $-4096,%rax 93 ALIGN_TEXT 941: 95 movq $0,(%rdi,%rax,1) 96 addq $8,%rax 97 jne 1b 98 ret 99END(pagezero) 100 101/* 102 * bcmp(ptr:%rdi, ptr:%rsi, bytes:%rdx) 103 */ 104ENTRY(bcmp) 105 movq %rdx,%rcx 106 shrq $3,%rcx 107 cld /* compare forwards */ 108 repe 109 cmpsq 110 jne 1f 111 112 movq %rdx,%rcx 113 andq $7,%rcx 114 je 1f 115 repe 116 cmpsb 1171: 118 setne %al 119 movsbl %al,%eax 120 ret 121END(bcmp) 122 123/* 124 * bcopy(src:%rdi, dst:%rsi, cnt:%rdx) 125 * 126 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800 127 */ 128ENTRY(bcopy) 129 xchgq %rsi,%rdi 130 movq %rdx,%rcx 131 132 movq %rdi,%rax 133 subq %rsi,%rax 134 cmpq %rcx,%rax /* overlapping && src < dst? */ 135 jb 2f 136 137 cld /* nope, copy forwards */ 138 shrq $3,%rcx /* copy by 64-bit words */ 139 rep 140 movsq 141 movq %rdx,%rcx 142 andq $7,%rcx /* any bytes left? */ 143 jnz 1f 144 ret 1451: rep 146 movsb 147 ret 148 149 ALIGN_TEXT 1502: 151 addq %rcx,%rdi /* copy backwards */ 152 addq %rcx,%rsi 153 std 154 decq %rdi 155 decq %rsi 156 andq $7,%rcx /* any fractional bytes? */ 157 jz 3f 158 rep 159 movsb 1603: movq %rdx,%rcx /* copy by 32-bit words */ 161 shrq $3,%rcx 162 subq $7,%rsi 163 subq $7,%rdi 164 rep 165 movsq 166 cld 167 ret 168END(bcopy) 169 170ENTRY(reset_dbregs) 171 movq $0x200,%rax /* the manual says that bit 10 must be set to 1 */ 172 movq %rax,%dr7 /* disable all breapoints first */ 173 movq $0,%rax 174 movq %rax,%dr0 175 movq %rax,%dr1 176 movq %rax,%dr2 177 movq %rax,%dr3 178 movq %rax,%dr6 179 ret 180END(reset_dbregs) 181 182/* 183 * memcpy(dst:%rdi, src:%rsi, bytes:%rdx) 184 * 185 * Note: memcpy does not support overlapping copies 186 */ 187ENTRY(memcpy) 188 movq %rdi,%r8 189 movq %rdx,%rcx 190 shrq $3,%rcx /* copy by 64-bit words */ 191 cld /* copy forwards */ 192 rep 193 movsq 194 movq %rdx,%rcx 195 andq $7,%rcx /* any bytes left? */ 196 jnz 1f 197 movq %r8,%rax 198 ret 1991: rep 200 movsb 201 movq %r8,%rax 202 ret 203END(memcpy) 204 205/* fillw(pat, base, cnt) */ 206/* %rdi,%rsi, %rdx */ 207ENTRY(fillw) 208 movq %rdi,%rax 209 movq %rsi,%rdi 210 movq %rdx,%rcx 211 cld 212 rep 213 stosw 214 ret 215END(fillw) 216 217/*****************************************************************************/ 218/* copyout and fubyte family */ 219/*****************************************************************************/ 220/* 221 * Access user memory from inside the kernel. These routines should be 222 * the only places that do this. 223 * 224 * These routines set curpcb->onfault for the time they execute. When a 225 * protection violation occurs inside the functions, the trap handler 226 * returns to *curpcb->onfault instead of the function. 227 */ 228 229/* 230 * uint64_t:%rax kreadmem64(addr:%rdi) 231 * 232 * Read kernel or user memory with fault protection. 233 */ 234ENTRY(kreadmem64) 235 movq PCPU(curthread),%rcx 236 movq TD_PCB(%rcx), %rcx 237 movq $kreadmem64fault,PCB_ONFAULT(%rcx) 238 movq %rsp,PCB_ONFAULT_SP(%rcx) 239 240 movq (%rdi),%rax 241 movq $0,PCB_ONFAULT(%rcx) 242 ret 243 244kreadmem64fault: 245 movq PCPU(curthread),%rcx 246 xorl %eax,%eax 247 movq TD_PCB(%rcx),%rcx 248 movq %rax,PCB_ONFAULT(%rcx) 249 decq %rax 250 ret 251END(kreadmem64) 252 253/* 254 * std_copyout(from_kernel, to_user, len) - MP SAFE 255 * %rdi, %rsi, %rdx 256 */ 257ENTRY(std_copyout) 258 movq PCPU(curthread),%rax 259 movq TD_PCB(%rax), %rax 260 movq $copyout_fault,PCB_ONFAULT(%rax) 261 movq %rsp,PCB_ONFAULT_SP(%rax) 262 testq %rdx,%rdx /* anything to do? */ 263 jz done_copyout 264 265 /* 266 * Check explicitly for non-user addresses. If 486 write protection 267 * is being used, this check is essential because we are in kernel 268 * mode so the h/w does not provide any protection against writing 269 * kernel addresses. 270 */ 271 272 /* 273 * First, prevent address wrapping. 274 */ 275 movq %rsi,%rax 276 addq %rdx,%rax 277 jc copyout_fault 278/* 279 * XXX STOP USING VM_MAX_USER_ADDRESS. 280 * It is an end address, not a max, so every time it is used correctly it 281 * looks like there is an off by one error, and of course it caused an off 282 * by one error in several places. 283 */ 284 movq $VM_MAX_USER_ADDRESS,%rcx 285 cmpq %rcx,%rax 286 ja copyout_fault 287 288 xchgq %rdi,%rsi 289 cld 290 /* bcopy(%rsi, %rdi, %rdx) */ 291 movq %rdx,%rcx 292 293 shrq $3,%rcx 294 jz 1f 295 rep 296 movsq 2971: movq %rdx,%rcx 298 andq $7,%rcx 299 jz done_copyout 300 rep 301 movsb 302 303done_copyout: 304 xorl %eax,%eax 305 movq PCPU(curthread),%rdx 306 movq TD_PCB(%rdx), %rdx 307 movq %rax,PCB_ONFAULT(%rdx) 308 ret 309 310 ALIGN_TEXT 311copyout_fault: 312 movq PCPU(curthread),%rdx 313 movq TD_PCB(%rdx), %rdx 314 movq $0,PCB_ONFAULT(%rdx) 315 movq $EFAULT,%rax 316 ret 317END(std_copyout) 318 319/* 320 * std_copyin(from_user, to_kernel, len) - MP SAFE 321 * %rdi, %rsi, %rdx 322 */ 323ENTRY(std_copyin) 324 movq PCPU(curthread),%rax 325 movq TD_PCB(%rax), %rax 326 movq $copyin_fault,PCB_ONFAULT(%rax) 327 movq %rsp,PCB_ONFAULT_SP(%rax) 328 testq %rdx,%rdx /* anything to do? */ 329 jz done_copyin 330 331 /* 332 * make sure address is valid 333 */ 334 movq %rdi,%rax 335 addq %rdx,%rax 336 jc copyin_fault 337 movq $VM_MAX_USER_ADDRESS,%rcx 338 cmpq %rcx,%rax 339 ja copyin_fault 340 341 xchgq %rdi,%rsi 342 cld 343 movq %rdx,%rcx 344 shrq $3,%rcx /* copy longword-wise */ 345 jz 1f 346 rep 347 movsq 3481: movq %rdx,%rcx 349 andq $7,%rcx /* copy remaining bytes */ 350 jz done_copyin 351 rep 352 movsb 353 354done_copyin: 355 xorl %eax,%eax 356 movq PCPU(curthread),%rdx 357 movq TD_PCB(%rdx), %rdx 358 movq %rax,PCB_ONFAULT(%rdx) 359 ret 360 361 ALIGN_TEXT 362copyin_fault: 363 movq PCPU(curthread),%rdx 364 movq TD_PCB(%rdx), %rdx 365 movq $0,PCB_ONFAULT(%rdx) 366 movq $EFAULT,%rax 367 ret 368END(std_copyin) 369 370/* 371 * casu32 - Compare and set user integer. Returns -1 or the current value. 372 * dst = %rdi, old = %rsi, new = %rdx 373 */ 374ENTRY(casu32) 375 movq PCPU(curthread),%rcx 376 movq TD_PCB(%rcx), %rcx 377 movq $fusufault,PCB_ONFAULT(%rcx) 378 movq %rsp,PCB_ONFAULT_SP(%rcx) 379 380 movq $VM_MAX_USER_ADDRESS-4,%rax 381 cmpq %rax,%rdi /* verify address is valid */ 382 ja fusufault 383 384 movl %esi,%eax /* old */ 385 lock 386 cmpxchgl %edx,(%rdi) /* new = %edx */ 387 388 /* 389 * The old value is in %eax. If the store succeeded it will be the 390 * value we expected (old) from before the store, otherwise it will 391 * be the current value. 392 */ 393 394 movq PCPU(curthread),%rcx 395 movq TD_PCB(%rcx), %rcx 396 movq $0,PCB_ONFAULT(%rcx) 397 ret 398END(casu32) 399 400/* 401 * swapu32 - Swap int in user space. ptr = %rdi, val = %rsi 402 */ 403ENTRY(std_swapu32) 404 movq PCPU(curthread),%rcx 405 movq TD_PCB(%rcx), %rcx 406 movq $fusufault,PCB_ONFAULT(%rcx) 407 movq %rsp,PCB_ONFAULT_SP(%rcx) 408 409 movq $VM_MAX_USER_ADDRESS-4,%rax 410 cmpq %rax,%rdi /* verify address is valid */ 411 ja fusufault 412 413 movq %rsi,%rax /* old */ 414 xchgl %eax,(%rdi) 415 416 /* 417 * The old value is in %rax. If the store succeeded it will be the 418 * value we expected (old) from before the store, otherwise it will 419 * be the current value. 420 */ 421 422 movq PCPU(curthread),%rcx 423 movq TD_PCB(%rcx), %rcx 424 movq $0,PCB_ONFAULT(%rcx) 425 ret 426END(std_swapu32) 427 428/* 429 * casu64 - Compare and set user word. Returns -1 or the current value. 430 * dst = %rdi, old = %rsi, new = %rdx 431 */ 432ENTRY(casu64) 433 movq PCPU(curthread),%rcx 434 movq TD_PCB(%rcx), %rcx 435 movq $fusufault,PCB_ONFAULT(%rcx) 436 movq %rsp,PCB_ONFAULT_SP(%rcx) 437 438 movq $VM_MAX_USER_ADDRESS-8,%rax 439 cmpq %rax,%rdi /* verify address is valid */ 440 ja fusufault 441 442 movq %rsi,%rax /* old */ 443 lock 444 cmpxchgq %rdx,(%rdi) /* new = %rdx */ 445 446 /* 447 * The old value is in %rax. If the store succeeded it will be the 448 * value we expected (old) from before the store, otherwise it will 449 * be the current value. 450 */ 451 452 movq PCPU(curthread),%rcx 453 movq TD_PCB(%rcx), %rcx 454 movq $0,PCB_ONFAULT(%rcx) 455 ret 456END(casu64) 457 458/* 459 * swapu64 - Swap long in user space. ptr = %rdi, val = %rsi 460 */ 461ENTRY(std_swapu64) 462 movq PCPU(curthread),%rcx 463 movq TD_PCB(%rcx), %rcx 464 movq $fusufault,PCB_ONFAULT(%rcx) 465 movq %rsp,PCB_ONFAULT_SP(%rcx) 466 467 movq $VM_MAX_USER_ADDRESS-8,%rax 468 cmpq %rax,%rdi /* verify address is valid */ 469 ja fusufault 470 471 movq %rsi,%rax /* old */ 472 xchgq %rax,(%rdi) 473 474 /* 475 * The old value is in %rax. If the store succeeded it will be the 476 * value we expected (old) from before the store, otherwise it will 477 * be the current value. 478 */ 479 480 movq PCPU(curthread),%rcx 481 movq TD_PCB(%rcx), %rcx 482 movq $0,PCB_ONFAULT(%rcx) 483 ret 484END(std_swapu64) 485 486/* 487 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit 488 * byte from user memory. All these functions are MPSAFE. 489 * addr = %rdi 490 */ 491 492ENTRY(std_fuword64) 493 movq PCPU(curthread),%rcx 494 movq TD_PCB(%rcx), %rcx 495 movq $fusufault,PCB_ONFAULT(%rcx) 496 movq %rsp,PCB_ONFAULT_SP(%rcx) 497 498 movq $VM_MAX_USER_ADDRESS-8,%rax 499 cmpq %rax,%rdi /* verify address is valid */ 500 ja fusufault 501 502 movq (%rdi),%rax 503 movq $0,PCB_ONFAULT(%rcx) 504 ret 505END(std_fuword64) 506 507ENTRY(std_fuword32) 508 movq PCPU(curthread),%rcx 509 movq TD_PCB(%rcx), %rcx 510 movq $fusufault,PCB_ONFAULT(%rcx) 511 movq %rsp,PCB_ONFAULT_SP(%rcx) 512 513 movq $VM_MAX_USER_ADDRESS-4,%rax 514 cmpq %rax,%rdi /* verify address is valid */ 515 ja fusufault 516 517 movl (%rdi),%eax 518 movq $0,PCB_ONFAULT(%rcx) 519 ret 520END(std_fuword32) 521 522ENTRY(std_fubyte) 523 movq PCPU(curthread),%rcx 524 movq TD_PCB(%rcx), %rcx 525 movq $fusufault,PCB_ONFAULT(%rcx) 526 movq %rsp,PCB_ONFAULT_SP(%rcx) 527 528 movq $VM_MAX_USER_ADDRESS-1,%rax 529 cmpq %rax,%rdi 530 ja fusufault 531 532 movzbl (%rdi),%eax 533 movq $0,PCB_ONFAULT(%rcx) 534 ret 535 536 ALIGN_TEXT 537fusufault: 538 movq PCPU(curthread),%rcx 539 xorl %eax,%eax 540 movq TD_PCB(%rcx), %rcx 541 movq %rax,PCB_ONFAULT(%rcx) 542 decq %rax 543 ret 544END(std_fubyte) 545 546/* 547 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to 548 * user memory. All these functions are MPSAFE. 549 * 550 * addr = %rdi, value = %rsi 551 * 552 * Write a long 553 */ 554ENTRY(std_suword64) 555 movq PCPU(curthread),%rcx 556 movq TD_PCB(%rcx), %rcx 557 movq $fusufault,PCB_ONFAULT(%rcx) 558 movq %rsp,PCB_ONFAULT_SP(%rcx) 559 560 movq $VM_MAX_USER_ADDRESS-8,%rax 561 cmpq %rax,%rdi /* verify address validity */ 562 ja fusufault 563 564 movq %rsi,(%rdi) 565 xorl %eax,%eax 566 movq PCPU(curthread),%rcx 567 movq TD_PCB(%rcx), %rcx 568 movq %rax,PCB_ONFAULT(%rcx) 569 ret 570END(std_suword64) 571 572/* 573 * Write an int 574 */ 575ENTRY(std_suword32) 576 movq PCPU(curthread),%rcx 577 movq TD_PCB(%rcx), %rcx 578 movq $fusufault,PCB_ONFAULT(%rcx) 579 movq %rsp,PCB_ONFAULT_SP(%rcx) 580 581 movq $VM_MAX_USER_ADDRESS-4,%rax 582 cmpq %rax,%rdi /* verify address validity */ 583 ja fusufault 584 585 movl %esi,(%rdi) 586 xorl %eax,%eax 587 movq PCPU(curthread),%rcx 588 movq TD_PCB(%rcx), %rcx 589 movq %rax,PCB_ONFAULT(%rcx) 590 ret 591END(std_suword32) 592 593ENTRY(std_subyte) 594 movq PCPU(curthread),%rcx 595 movq TD_PCB(%rcx), %rcx 596 movq $fusufault,PCB_ONFAULT(%rcx) 597 movq %rsp,PCB_ONFAULT_SP(%rcx) 598 599 movq $VM_MAX_USER_ADDRESS-1,%rax 600 cmpq %rax,%rdi /* verify address validity */ 601 ja fusufault 602 603 movl %esi,%eax 604 movb %al,(%rdi) 605 xorl %eax,%eax 606 movq PCPU(curthread),%rcx /* restore trashed register */ 607 movq TD_PCB(%rcx), %rcx 608 movq %rax,PCB_ONFAULT(%rcx) 609 ret 610END(std_subyte) 611 612/* 613 * std_copyinstr(from, to, maxlen, int *lencopied) - MP SAFE 614 * %rdi, %rsi, %rdx, %rcx 615 * 616 * copy a string from from to to, stop when a 0 character is reached. 617 * return ENAMETOOLONG if string is longer than maxlen, and 618 * EFAULT on protection violations. If lencopied is non-zero, 619 * return the actual length in *lencopied. 620 */ 621ENTRY(std_copyinstr) 622 movq %rdx,%r8 /* %r8 = maxlen */ 623 movq %rcx,%r9 /* %r9 = *len */ 624 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */ 625 movq PCPU(curthread),%rcx 626 movq TD_PCB(%rcx), %rcx 627 movq $cpystrflt,PCB_ONFAULT(%rcx) 628 movq %rsp,PCB_ONFAULT_SP(%rcx) 629 630 movq $VM_MAX_USER_ADDRESS,%rax 631 632 /* make sure 'from' is within bounds */ 633 subq %rsi,%rax 634 jbe cpystrflt 635 636 /* restrict maxlen to <= VM_MAX_USER_ADDRESS-from */ 637 cmpq %rdx,%rax 638 jae 1f 639 movq %rax,%rdx 640 movq %rax,%r8 6411: 642 incq %rdx 643 cld 644 6452: 646 decq %rdx 647 jz 3f 648 649 lodsb 650 stosb 651 orb %al,%al 652 jnz 2b 653 654 /* Success -- 0 byte reached */ 655 decq %rdx 656 xorl %eax,%eax 657 jmp cpystrflt_x 6583: 659 /* rdx is zero - return ENAMETOOLONG or EFAULT */ 660 movq $VM_MAX_USER_ADDRESS,%rax 661 cmpq %rax,%rsi 662 jae cpystrflt 6634: 664 movq $ENAMETOOLONG,%rax 665 jmp cpystrflt_x 666 667cpystrflt: 668 movq $EFAULT,%rax 669 670cpystrflt_x: 671 /* set *lencopied and return %eax */ 672 movq PCPU(curthread),%rcx 673 movq TD_PCB(%rcx), %rcx 674 movq $0,PCB_ONFAULT(%rcx) 675 676 testq %r9,%r9 677 jz 1f 678 subq %rdx,%r8 679 movq %r8,(%r9) 6801: 681 ret 682END(std_copyinstr) 683 684/* 685 * copystr(from, to, maxlen, int *lencopied) - MP SAFE 686 * %rdi, %rsi, %rdx, %rcx 687 */ 688ENTRY(copystr) 689 movq %rdx,%r8 /* %r8 = maxlen */ 690 691 xchgq %rdi,%rsi 692 incq %rdx 693 cld 6941: 695 decq %rdx 696 jz 4f 697 lodsb 698 stosb 699 orb %al,%al 700 jnz 1b 701 702 /* Success -- 0 byte reached */ 703 decq %rdx 704 xorl %eax,%eax 705 jmp 6f 7064: 707 /* rdx is zero -- return ENAMETOOLONG */ 708 movq $ENAMETOOLONG,%rax 709 7106: 711 712 testq %rcx,%rcx 713 jz 7f 714 /* set *lencopied and return %rax */ 715 subq %rdx,%r8 716 movq %r8,(%rcx) 7177: 718 ret 719END(copystr) 720 721/* 722 * Handling of special x86_64 registers and descriptor tables etc 723 * %rdi 724 */ 725/* void lgdt(struct region_descriptor *rdp); */ 726ENTRY(lgdt) 727 /* reload the descriptor table */ 728 lgdt (%rdi) 729 730 /* flush the prefetch q */ 731 jmp 1f 732 nop 7331: 734 movl $KDSEL,%eax 735 movl %eax,%ds 736 movl %eax,%es 737 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */ 738 movl %eax,%gs /* Beware, use wrmsr to set 64 bit base */ 739 movl %eax,%ss 740 741 /* reload code selector by turning return into intersegmental return */ 742 popq %rax 743 pushq $KCSEL 744 pushq %rax 745 MEXITCOUNT 746 lretq 747END(lgdt) 748 749/*****************************************************************************/ 750/* setjmp, longjmp */ 751/*****************************************************************************/ 752 753ENTRY(setjmp) 754 movq %rbx,0(%rdi) /* save rbx */ 755 movq %rsp,8(%rdi) /* save rsp */ 756 movq %rbp,16(%rdi) /* save rbp */ 757 movq %r12,24(%rdi) /* save r12 */ 758 movq %r13,32(%rdi) /* save r13 */ 759 movq %r14,40(%rdi) /* save r14 */ 760 movq %r15,48(%rdi) /* save r15 */ 761 movq 0(%rsp),%rdx /* get rta */ 762 movq %rdx,56(%rdi) /* save rip */ 763 xorl %eax,%eax /* return(0); */ 764 ret 765END(setjmp) 766 767ENTRY(longjmp) 768 movq 0(%rdi),%rbx /* restore rbx */ 769 movq 8(%rdi),%rsp /* restore rsp */ 770 movq 16(%rdi),%rbp /* restore rbp */ 771 movq 24(%rdi),%r12 /* restore r12 */ 772 movq 32(%rdi),%r13 /* restore r13 */ 773 movq 40(%rdi),%r14 /* restore r14 */ 774 movq 48(%rdi),%r15 /* restore r15 */ 775 movq 56(%rdi),%rdx /* get rta */ 776 movq %rdx,0(%rsp) /* put in return frame */ 777 xorl %eax,%eax /* return(1); */ 778 incl %eax 779 ret 780END(longjmp) 781 782/* 783 * Support for reading MSRs in the safe manner. 784 */ 785ENTRY(rdmsr_safe) 786/* int rdmsr_safe(u_int msr, uint64_t *data) */ 787 movq PCPU(curthread),%r8 788 movq TD_PCB(%r8), %r8 789 movq $msr_onfault,PCB_ONFAULT(%r8) 790 movq %rsp,PCB_ONFAULT_SP(%r8) 791 movl %edi,%ecx 792 rdmsr /* Read MSR pointed by %ecx. Returns 793 hi byte in edx, lo in %eax */ 794 salq $32,%rdx /* sign-shift %rdx left */ 795 movl %eax,%eax /* zero-extend %eax -> %rax */ 796 orq %rdx,%rax 797 movq %rax,(%rsi) 798 xorq %rax,%rax 799 movq %rax,PCB_ONFAULT(%r8) 800 ret 801END(rdmsr_safe) 802 803/* 804 * Support for writing MSRs in the safe manner. 805 */ 806ENTRY(wrmsr_safe) 807/* int wrmsr_safe(u_int msr, uint64_t data) */ 808 movq PCPU(curthread),%r8 809 movq TD_PCB(%r8), %r8 810 movq $msr_onfault,PCB_ONFAULT(%r8) 811 movq %rsp,PCB_ONFAULT_SP(%r8) 812 movl %edi,%ecx 813 movl %esi,%eax 814 sarq $32,%rsi 815 movl %esi,%edx 816 wrmsr /* Write MSR pointed by %ecx. Accepts 817 hi byte in edx, lo in %eax. */ 818 xorq %rax,%rax 819 movq %rax,PCB_ONFAULT(%r8) 820 ret 821END(wrmsr_safe) 822 823/* 824 * MSR operations fault handler 825 */ 826 ALIGN_TEXT 827msr_onfault: 828 movq PCPU(curthread),%r8 829 movq TD_PCB(%r8), %r8 830 movq $0,PCB_ONFAULT(%r8) 831 movl $EFAULT,%eax 832 ret 833