1 /*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1993 The Regents of the University of California. 4 * Copyright (c) 2008 The DragonFly Project. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $ 36 */ 37 38 /* 39 * Functions to provide access to special i386 instructions. 40 * This in included in sys/systm.h, and that file should be 41 * used in preference to this. 42 */ 43 44 #ifndef _CPU_CPUFUNC_H_ 45 #define _CPU_CPUFUNC_H_ 46 47 #include <sys/cdefs.h> 48 #include <machine/psl.h> 49 50 struct thread; 51 struct region_descriptor; 52 53 __BEGIN_DECLS 54 #define readb(va) (*(volatile u_int8_t *) (va)) 55 #define readw(va) (*(volatile u_int16_t *) (va)) 56 #define readl(va) (*(volatile u_int32_t *) (va)) 57 #define readq(va) (*(volatile u_int64_t *) (va)) 58 59 #define writeb(va, d) (*(volatile u_int8_t *) (va) = (d)) 60 #define writew(va, d) (*(volatile u_int16_t *) (va) = (d)) 61 #define writel(va, d) (*(volatile u_int32_t *) (va) = (d)) 62 #define writeq(va, d) (*(volatile u_int64_t *) (va) = (d)) 63 64 #ifdef __GNUC__ 65 66 #ifdef SMP 67 #include <machine/lock.h> /* XXX */ 68 #endif 69 70 static __inline void 71 breakpoint(void) 72 { 73 __asm __volatile("int $3"); 74 } 75 76 static __inline void 77 cpu_pause(void) 78 { 79 __asm __volatile("pause"); 80 } 81 82 static __inline u_int 83 bsfl(u_int mask) 84 { 85 u_int result; 86 87 __asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask)); 88 return (result); 89 } 90 91 static __inline u_long 92 bsfq(u_long mask) 93 { 94 u_long result; 95 96 __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask)); 97 return (result); 98 } 99 100 static __inline u_long 101 bsflong(u_long mask) 102 { 103 u_long result; 104 105 __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask)); 106 return (result); 107 } 108 109 static __inline u_int 110 bsrl(u_int mask) 111 { 112 u_int result; 113 114 __asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask)); 115 return (result); 116 } 117 118 static __inline u_long 119 bsrq(u_long mask) 120 { 121 u_long result; 122 123 __asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask)); 124 return (result); 125 } 126 127 static __inline void 128 clflush(u_long addr) 129 { 130 __asm __volatile("clflush %0" : : "m" (*(char *) addr)); 131 } 132 133 static __inline void 134 do_cpuid(u_int ax, u_int *p) 135 { 136 __asm __volatile("cpuid" 137 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 138 : "0" (ax)); 139 } 140 141 static __inline void 142 cpuid_count(u_int ax, u_int cx, u_int *p) 143 { 144 __asm __volatile("cpuid" 145 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 146 : "0" (ax), "c" (cx)); 147 } 148 149 #ifndef _CPU_DISABLE_INTR_DEFINED 150 151 static __inline void 152 cpu_disable_intr(void) 153 { 154 __asm __volatile("cli" : : : "memory"); 155 } 156 157 #endif 158 159 #ifndef _CPU_ENABLE_INTR_DEFINED 160 161 static __inline void 162 cpu_enable_intr(void) 163 { 164 __asm __volatile("sti"); 165 } 166 167 #endif 168 169 /* 170 * Cpu and compiler memory ordering fence. mfence ensures strong read and 171 * write ordering. 172 * 173 * A serializing or fence instruction is required here. A locked bus 174 * cycle on data for which we already own cache mastership is the most 175 * portable. 176 */ 177 static __inline void 178 cpu_mfence(void) 179 { 180 #ifdef SMP 181 __asm __volatile("mfence" : : : "memory"); 182 #else 183 __asm __volatile("" : : : "memory"); 184 #endif 185 } 186 187 /* 188 * cpu_lfence() ensures strong read ordering for reads issued prior 189 * to the instruction verses reads issued afterwords. 190 * 191 * A serializing or fence instruction is required here. A locked bus 192 * cycle on data for which we already own cache mastership is the most 193 * portable. 194 */ 195 static __inline void 196 cpu_lfence(void) 197 { 198 #ifdef SMP 199 __asm __volatile("lfence" : : : "memory"); 200 #else 201 __asm __volatile("" : : : "memory"); 202 #endif 203 } 204 205 /* 206 * cpu_sfence() ensures strong write ordering for writes issued prior 207 * to the instruction verses writes issued afterwords. Writes are 208 * ordered on intel cpus so we do not actually have to do anything. 209 */ 210 static __inline void 211 cpu_sfence(void) 212 { 213 /* 214 * NOTE: 215 * Don't use 'sfence' here, as it will create a lot of 216 * unnecessary stalls. 217 */ 218 __asm __volatile("" : : : "memory"); 219 } 220 221 /* 222 * cpu_ccfence() prevents the compiler from reordering instructions, in 223 * particular stores, relative to the current cpu. Use cpu_sfence() if 224 * you need to guarentee ordering by both the compiler and by the cpu. 225 * 226 * This also prevents the compiler from caching memory loads into local 227 * variables across the routine. 228 */ 229 static __inline void 230 cpu_ccfence(void) 231 { 232 __asm __volatile("" : : : "memory"); 233 } 234 235 /* 236 * This is a horrible, horrible hack that might have to be put at the 237 * end of certain procedures (on a case by case basis), just before it 238 * returns to avoid what we believe to be an unreported AMD cpu bug. 239 * Found to occur on both a Phenom II X4 820 (two of them), as well 240 * as a 48-core built around an Opteron 6168 (Id = 0x100f91 Stepping = 1). 241 * The problem does not appear to occur w/Intel cpus. 242 * 243 * The bug is likely related to either a write combining issue or the 244 * Return Address Stack (RAS) hardware cache. 245 * 246 * In particular, we had to do this for GCC's fill_sons_in_loop() routine 247 * which due to its deep recursion and stack flow appears to be able to 248 * tickle the amd cpu bug (w/ gcc-4.4.7). Adding a single 'nop' to the 249 * end of the routine just before it returns works around the bug. 250 * 251 * The bug appears to be extremely sensitive to %rip and %rsp values, to 252 * the point where even just inserting an instruction in an unrelated 253 * procedure (shifting the entire code base being run) effects the outcome. 254 * DragonFly is probably able to more readily reproduce the bug due to 255 * the stackgap randomization code. We would expect OpenBSD (where we got 256 * the stackgap randomization code from) to also be able to reproduce the 257 * issue. To date we have only reproduced the issue in DragonFly. 258 */ 259 #define __AMDCPUBUG_DFLY01_AVAILABLE__ 260 261 static __inline void 262 cpu_amdcpubug_dfly01(void) 263 { 264 __asm __volatile("nop" : : : "memory"); 265 } 266 267 #ifdef _KERNEL 268 269 #define HAVE_INLINE_FFS 270 271 static __inline int 272 ffs(int mask) 273 { 274 #if 0 275 /* 276 * Note that gcc-2's builtin ffs would be used if we didn't declare 277 * this inline or turn off the builtin. The builtin is faster but 278 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later 279 * versions. 280 */ 281 return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1); 282 #else 283 /* Actually, the above is way out of date. The builtins use cmov etc */ 284 return (__builtin_ffs(mask)); 285 #endif 286 } 287 288 #define HAVE_INLINE_FFSL 289 290 static __inline int 291 ffsl(long mask) 292 { 293 return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1); 294 } 295 296 #define HAVE_INLINE_FLS 297 298 static __inline int 299 fls(int mask) 300 { 301 return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1); 302 } 303 304 #define HAVE_INLINE_FLSL 305 306 static __inline int 307 flsl(long mask) 308 { 309 return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1); 310 } 311 312 #endif /* _KERNEL */ 313 314 static __inline void 315 halt(void) 316 { 317 __asm __volatile("hlt"); 318 } 319 320 /* 321 * The following complications are to get around gcc not having a 322 * constraint letter for the range 0..255. We still put "d" in the 323 * constraint because "i" isn't a valid constraint when the port 324 * isn't constant. This only matters for -O0 because otherwise 325 * the non-working version gets optimized away. 326 * 327 * Use an expression-statement instead of a conditional expression 328 * because gcc-2.6.0 would promote the operands of the conditional 329 * and produce poor code for "if ((inb(var) & const1) == const2)". 330 * 331 * The unnecessary test `(port) < 0x10000' is to generate a warning if 332 * the `port' has type u_short or smaller. Such types are pessimal. 333 * This actually only works for signed types. The range check is 334 * careful to avoid generating warnings. 335 */ 336 #define inb(port) __extension__ ({ \ 337 u_char _data; \ 338 if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \ 339 && (port) < 0x10000) \ 340 _data = inbc(port); \ 341 else \ 342 _data = inbv(port); \ 343 _data; }) 344 345 #define outb(port, data) ( \ 346 __builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \ 347 && (port) < 0x10000 \ 348 ? outbc(port, data) : outbv(port, data)) 349 350 static __inline u_char 351 inbc(u_int port) 352 { 353 u_char data; 354 355 __asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port))); 356 return (data); 357 } 358 359 static __inline void 360 outbc(u_int port, u_char data) 361 { 362 __asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port))); 363 } 364 365 static __inline u_char 366 inbv(u_int port) 367 { 368 u_char data; 369 /* 370 * We use %%dx and not %1 here because i/o is done at %dx and not at 371 * %edx, while gcc generates inferior code (movw instead of movl) 372 * if we tell it to load (u_short) port. 373 */ 374 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 375 return (data); 376 } 377 378 static __inline u_int 379 inl(u_int port) 380 { 381 u_int data; 382 383 __asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port)); 384 return (data); 385 } 386 387 static __inline void 388 insb(u_int port, void *addr, size_t cnt) 389 { 390 __asm __volatile("cld; rep; insb" 391 : "+D" (addr), "+c" (cnt) 392 : "d" (port) 393 : "memory"); 394 } 395 396 static __inline void 397 insw(u_int port, void *addr, size_t cnt) 398 { 399 __asm __volatile("cld; rep; insw" 400 : "+D" (addr), "+c" (cnt) 401 : "d" (port) 402 : "memory"); 403 } 404 405 static __inline void 406 insl(u_int port, void *addr, size_t cnt) 407 { 408 __asm __volatile("cld; rep; insl" 409 : "+D" (addr), "+c" (cnt) 410 : "d" (port) 411 : "memory"); 412 } 413 414 static __inline void 415 invd(void) 416 { 417 __asm __volatile("invd"); 418 } 419 420 #if defined(_KERNEL) 421 422 /* 423 * If we are not a true-SMP box then smp_invltlb() is a NOP. Note that this 424 * will cause the invl*() functions to be equivalent to the cpu_invl*() 425 * functions. 426 */ 427 #ifdef SMP 428 void smp_invltlb(void); 429 void smp_invltlb_intr(void); 430 #else 431 #define smp_invltlb() 432 #endif 433 434 #ifndef _CPU_INVLPG_DEFINED 435 436 /* 437 * Invalidate a patricular VA on this cpu only 438 */ 439 static __inline void 440 cpu_invlpg(void *addr) 441 { 442 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory"); 443 } 444 445 #endif 446 447 static __inline void 448 cpu_nop(void) 449 { 450 __asm __volatile("rep; nop"); 451 } 452 453 #endif /* _KERNEL */ 454 455 static __inline u_short 456 inw(u_int port) 457 { 458 u_short data; 459 460 __asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port)); 461 return (data); 462 } 463 464 static __inline u_int 465 loadandclear(volatile u_int *addr) 466 { 467 u_int result; 468 469 __asm __volatile("xorl %0,%0; xchgl %1,%0" 470 : "=&r" (result) : "m" (*addr)); 471 return (result); 472 } 473 474 static __inline void 475 outbv(u_int port, u_char data) 476 { 477 u_char al; 478 /* 479 * Use an unnecessary assignment to help gcc's register allocator. 480 * This make a large difference for gcc-1.40 and a tiny difference 481 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 482 * best results. gcc-2.6.0 can't handle this. 483 */ 484 al = data; 485 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 486 } 487 488 static __inline void 489 outl(u_int port, u_int data) 490 { 491 /* 492 * outl() and outw() aren't used much so we haven't looked at 493 * possible micro-optimizations such as the unnecessary 494 * assignment for them. 495 */ 496 __asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port)); 497 } 498 499 static __inline void 500 outsb(u_int port, const void *addr, size_t cnt) 501 { 502 __asm __volatile("cld; rep; outsb" 503 : "+S" (addr), "+c" (cnt) 504 : "d" (port)); 505 } 506 507 static __inline void 508 outsw(u_int port, const void *addr, size_t cnt) 509 { 510 __asm __volatile("cld; rep; outsw" 511 : "+S" (addr), "+c" (cnt) 512 : "d" (port)); 513 } 514 515 static __inline void 516 outsl(u_int port, const void *addr, size_t cnt) 517 { 518 __asm __volatile("cld; rep; outsl" 519 : "+S" (addr), "+c" (cnt) 520 : "d" (port)); 521 } 522 523 static __inline void 524 outw(u_int port, u_short data) 525 { 526 __asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port)); 527 } 528 529 static __inline void 530 ia32_pause(void) 531 { 532 __asm __volatile("pause"); 533 } 534 535 static __inline u_long 536 read_rflags(void) 537 { 538 u_long rf; 539 540 __asm __volatile("pushfq; popq %0" : "=r" (rf)); 541 return (rf); 542 } 543 544 static __inline u_int64_t 545 rdmsr(u_int msr) 546 { 547 u_int32_t low, high; 548 549 __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr)); 550 return (low | ((u_int64_t)high << 32)); 551 } 552 553 static __inline u_int64_t 554 rdpmc(u_int pmc) 555 { 556 u_int32_t low, high; 557 558 __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc)); 559 return (low | ((u_int64_t)high << 32)); 560 } 561 562 #define _RDTSC_SUPPORTED_ 563 564 static __inline u_int64_t 565 rdtsc(void) 566 { 567 u_int32_t low, high; 568 569 __asm __volatile("rdtsc" : "=a" (low), "=d" (high)); 570 return (low | ((u_int64_t)high << 32)); 571 } 572 573 static __inline void 574 wbinvd(void) 575 { 576 __asm __volatile("wbinvd"); 577 } 578 579 static __inline void 580 write_rflags(u_long rf) 581 { 582 __asm __volatile("pushq %0; popfq" : : "r" (rf)); 583 } 584 585 static __inline void 586 wrmsr(u_int msr, u_int64_t newval) 587 { 588 u_int32_t low, high; 589 590 low = newval; 591 high = newval >> 32; 592 __asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr)); 593 } 594 595 static __inline void 596 load_cr0(u_long data) 597 { 598 599 __asm __volatile("movq %0,%%cr0" : : "r" (data)); 600 } 601 602 static __inline u_long 603 rcr0(void) 604 { 605 u_long data; 606 607 __asm __volatile("movq %%cr0,%0" : "=r" (data)); 608 return (data); 609 } 610 611 static __inline u_long 612 rcr2(void) 613 { 614 u_long data; 615 616 __asm __volatile("movq %%cr2,%0" : "=r" (data)); 617 return (data); 618 } 619 620 static __inline void 621 load_cr3(u_long data) 622 { 623 624 __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory"); 625 } 626 627 static __inline u_long 628 rcr3(void) 629 { 630 u_long data; 631 632 __asm __volatile("movq %%cr3,%0" : "=r" (data)); 633 return (data); 634 } 635 636 static __inline void 637 load_cr4(u_long data) 638 { 639 __asm __volatile("movq %0,%%cr4" : : "r" (data)); 640 } 641 642 static __inline u_long 643 rcr4(void) 644 { 645 u_long data; 646 647 __asm __volatile("movq %%cr4,%0" : "=r" (data)); 648 return (data); 649 } 650 651 #ifndef _CPU_INVLTLB_DEFINED 652 653 /* 654 * Invalidate the TLB on this cpu only 655 */ 656 static __inline void 657 cpu_invltlb(void) 658 { 659 load_cr3(rcr3()); 660 #if defined(SWTCH_OPTIM_STATS) 661 ++tlb_flush_count; 662 #endif 663 } 664 665 #endif 666 667 /* 668 * TLB flush for an individual page (even if it has PG_G). 669 * Only works on 486+ CPUs (i386 does not have PG_G). 670 */ 671 static __inline void 672 invlpg(u_long addr) 673 { 674 675 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory"); 676 } 677 678 static __inline u_short 679 rfs(void) 680 { 681 u_short sel; 682 __asm __volatile("movw %%fs,%0" : "=rm" (sel)); 683 return (sel); 684 } 685 686 static __inline u_short 687 rgs(void) 688 { 689 u_short sel; 690 __asm __volatile("movw %%gs,%0" : "=rm" (sel)); 691 return (sel); 692 } 693 694 static __inline void 695 load_ds(u_short sel) 696 { 697 __asm __volatile("movw %0,%%ds" : : "rm" (sel)); 698 } 699 700 static __inline void 701 load_es(u_short sel) 702 { 703 __asm __volatile("movw %0,%%es" : : "rm" (sel)); 704 } 705 706 #ifdef _KERNEL 707 /* This is defined in <machine/specialreg.h> but is too painful to get to */ 708 #ifndef MSR_FSBASE 709 #define MSR_FSBASE 0xc0000100 710 #endif 711 static __inline void 712 load_fs(u_short sel) 713 { 714 /* Preserve the fsbase value across the selector load */ 715 __asm __volatile("rdmsr; movw %0,%%fs; wrmsr" 716 : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx"); 717 } 718 719 #ifndef MSR_GSBASE 720 #define MSR_GSBASE 0xc0000101 721 #endif 722 static __inline void 723 load_gs(u_short sel) 724 { 725 /* 726 * Preserve the gsbase value across the selector load. 727 * Note that we have to disable interrupts because the gsbase 728 * being trashed happens to be the kernel gsbase at the time. 729 */ 730 __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq" 731 : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx"); 732 } 733 #else 734 /* Usable by userland */ 735 static __inline void 736 load_fs(u_short sel) 737 { 738 __asm __volatile("movw %0,%%fs" : : "rm" (sel)); 739 } 740 741 static __inline void 742 load_gs(u_short sel) 743 { 744 __asm __volatile("movw %0,%%gs" : : "rm" (sel)); 745 } 746 #endif 747 748 /* void lidt(struct region_descriptor *addr); */ 749 static __inline void 750 lidt(struct region_descriptor *addr) 751 { 752 __asm __volatile("lidt (%0)" : : "r" (addr)); 753 } 754 755 /* void lldt(u_short sel); */ 756 static __inline void 757 lldt(u_short sel) 758 { 759 __asm __volatile("lldt %0" : : "r" (sel)); 760 } 761 762 /* void ltr(u_short sel); */ 763 static __inline void 764 ltr(u_short sel) 765 { 766 __asm __volatile("ltr %0" : : "r" (sel)); 767 } 768 769 static __inline u_int64_t 770 rdr0(void) 771 { 772 u_int64_t data; 773 __asm __volatile("movq %%dr0,%0" : "=r" (data)); 774 return (data); 775 } 776 777 static __inline void 778 load_dr0(u_int64_t dr0) 779 { 780 __asm __volatile("movq %0,%%dr0" : : "r" (dr0)); 781 } 782 783 static __inline u_int64_t 784 rdr1(void) 785 { 786 u_int64_t data; 787 __asm __volatile("movq %%dr1,%0" : "=r" (data)); 788 return (data); 789 } 790 791 static __inline void 792 load_dr1(u_int64_t dr1) 793 { 794 __asm __volatile("movq %0,%%dr1" : : "r" (dr1)); 795 } 796 797 static __inline u_int64_t 798 rdr2(void) 799 { 800 u_int64_t data; 801 __asm __volatile("movq %%dr2,%0" : "=r" (data)); 802 return (data); 803 } 804 805 static __inline void 806 load_dr2(u_int64_t dr2) 807 { 808 __asm __volatile("movq %0,%%dr2" : : "r" (dr2)); 809 } 810 811 static __inline u_int64_t 812 rdr3(void) 813 { 814 u_int64_t data; 815 __asm __volatile("movq %%dr3,%0" : "=r" (data)); 816 return (data); 817 } 818 819 static __inline void 820 load_dr3(u_int64_t dr3) 821 { 822 __asm __volatile("movq %0,%%dr3" : : "r" (dr3)); 823 } 824 825 static __inline u_int64_t 826 rdr4(void) 827 { 828 u_int64_t data; 829 __asm __volatile("movq %%dr4,%0" : "=r" (data)); 830 return (data); 831 } 832 833 static __inline void 834 load_dr4(u_int64_t dr4) 835 { 836 __asm __volatile("movq %0,%%dr4" : : "r" (dr4)); 837 } 838 839 static __inline u_int64_t 840 rdr5(void) 841 { 842 u_int64_t data; 843 __asm __volatile("movq %%dr5,%0" : "=r" (data)); 844 return (data); 845 } 846 847 static __inline void 848 load_dr5(u_int64_t dr5) 849 { 850 __asm __volatile("movq %0,%%dr5" : : "r" (dr5)); 851 } 852 853 static __inline u_int64_t 854 rdr6(void) 855 { 856 u_int64_t data; 857 __asm __volatile("movq %%dr6,%0" : "=r" (data)); 858 return (data); 859 } 860 861 static __inline void 862 load_dr6(u_int64_t dr6) 863 { 864 __asm __volatile("movq %0,%%dr6" : : "r" (dr6)); 865 } 866 867 static __inline u_int64_t 868 rdr7(void) 869 { 870 u_int64_t data; 871 __asm __volatile("movq %%dr7,%0" : "=r" (data)); 872 return (data); 873 } 874 875 static __inline void 876 load_dr7(u_int64_t dr7) 877 { 878 __asm __volatile("movq %0,%%dr7" : : "r" (dr7)); 879 } 880 881 static __inline register_t 882 intr_disable(void) 883 { 884 register_t rflags; 885 886 rflags = read_rflags(); 887 cpu_disable_intr(); 888 return (rflags); 889 } 890 891 static __inline void 892 intr_restore(register_t rflags) 893 { 894 write_rflags(rflags); 895 } 896 897 #else /* !__GNUC__ */ 898 899 int breakpoint(void); 900 void cpu_pause(void); 901 u_int bsfl(u_int mask); 902 u_int bsrl(u_int mask); 903 void cpu_disable_intr(void); 904 void cpu_enable_intr(void); 905 void cpu_invlpg(u_long addr); 906 void cpu_invlpg_range(u_long start, u_long end); 907 void do_cpuid(u_int ax, u_int *p); 908 void halt(void); 909 u_char inb(u_int port); 910 u_int inl(u_int port); 911 void insb(u_int port, void *addr, size_t cnt); 912 void insl(u_int port, void *addr, size_t cnt); 913 void insw(u_int port, void *addr, size_t cnt); 914 void invd(void); 915 void invlpg(u_int addr); 916 void invlpg_range(u_int start, u_int end); 917 void cpu_invltlb(void); 918 u_short inw(u_int port); 919 void load_cr0(u_int cr0); 920 void load_cr3(u_int cr3); 921 void load_cr4(u_int cr4); 922 void load_fs(u_int sel); 923 void load_gs(u_int sel); 924 struct region_descriptor; 925 void lidt(struct region_descriptor *addr); 926 void lldt(u_short sel); 927 void ltr(u_short sel); 928 void outb(u_int port, u_char data); 929 void outl(u_int port, u_int data); 930 void outsb(u_int port, void *addr, size_t cnt); 931 void outsl(u_int port, void *addr, size_t cnt); 932 void outsw(u_int port, void *addr, size_t cnt); 933 void outw(u_int port, u_short data); 934 void ia32_pause(void); 935 u_int rcr0(void); 936 u_int rcr2(void); 937 u_int rcr3(void); 938 u_int rcr4(void); 939 u_short rfs(void); 940 u_short rgs(void); 941 u_int64_t rdmsr(u_int msr); 942 u_int64_t rdpmc(u_int pmc); 943 u_int64_t rdtsc(void); 944 u_int read_rflags(void); 945 void wbinvd(void); 946 void write_rflags(u_int rf); 947 void wrmsr(u_int msr, u_int64_t newval); 948 u_int64_t rdr0(void); 949 void load_dr0(u_int64_t dr0); 950 u_int64_t rdr1(void); 951 void load_dr1(u_int64_t dr1); 952 u_int64_t rdr2(void); 953 void load_dr2(u_int64_t dr2); 954 u_int64_t rdr3(void); 955 void load_dr3(u_int64_t dr3); 956 u_int64_t rdr4(void); 957 void load_dr4(u_int64_t dr4); 958 u_int64_t rdr5(void); 959 void load_dr5(u_int64_t dr5); 960 u_int64_t rdr6(void); 961 void load_dr6(u_int64_t dr6); 962 u_int64_t rdr7(void); 963 void load_dr7(u_int64_t dr7); 964 register_t intr_disable(void); 965 void intr_restore(register_t rf); 966 967 #endif /* __GNUC__ */ 968 969 int rdmsr_safe(u_int msr, uint64_t *val); 970 void reset_dbregs(void); 971 972 __END_DECLS 973 974 #endif /* !_CPU_CPUFUNC_H_ */ 975