1 /*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1993 The Regents of the University of California. 4 * Copyright (c) 2008 The DragonFly Project. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $ 32 */ 33 34 /* 35 * Functions to provide access to special i386 instructions. 36 * This in included in sys/systm.h, and that file should be 37 * used in preference to this. 38 */ 39 40 #ifndef _CPU_CPUFUNC_H_ 41 #define _CPU_CPUFUNC_H_ 42 43 #include <sys/cdefs.h> 44 #include <sys/thread.h> 45 #include <machine/clock.h> 46 #include <machine/psl.h> 47 #include <machine/smp.h> 48 49 struct thread; 50 struct region_descriptor; 51 struct pmap; 52 53 __BEGIN_DECLS 54 #define readb(va) (*(volatile u_int8_t *) (va)) 55 #define readw(va) (*(volatile u_int16_t *) (va)) 56 #define readl(va) (*(volatile u_int32_t *) (va)) 57 #define readq(va) (*(volatile u_int64_t *) (va)) 58 59 #define writeb(va, d) (*(volatile u_int8_t *) (va) = (d)) 60 #define writew(va, d) (*(volatile u_int16_t *) (va) = (d)) 61 #define writel(va, d) (*(volatile u_int32_t *) (va) = (d)) 62 #define writeq(va, d) (*(volatile u_int64_t *) (va) = (d)) 63 64 #ifdef __GNUC__ 65 66 #include <machine/lock.h> /* XXX */ 67 68 struct trapframe; 69 70 static __inline void 71 breakpoint(void) 72 { 73 __asm __volatile("int $3"); 74 } 75 76 static __inline void 77 cpu_pause(void) 78 { 79 __asm __volatile("pause":::"memory"); 80 } 81 82 static __inline u_int 83 bsfl(u_int mask) 84 { 85 u_int result; 86 87 __asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask)); 88 return (result); 89 } 90 91 static __inline u_long 92 bsfq(u_long mask) 93 { 94 u_long result; 95 96 __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask)); 97 return (result); 98 } 99 100 static __inline u_long 101 bsflong(u_long mask) 102 { 103 u_long result; 104 105 __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask)); 106 return (result); 107 } 108 109 static __inline u_int 110 bsrl(u_int mask) 111 { 112 u_int result; 113 114 __asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask)); 115 return (result); 116 } 117 118 static __inline u_long 119 bsrq(u_long mask) 120 { 121 u_long result; 122 123 __asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask)); 124 return (result); 125 } 126 127 static __inline void 128 clflush(u_long addr) 129 { 130 __asm __volatile("clflush %0" : : "m" (*(char *) addr)); 131 } 132 133 static __inline void 134 do_cpuid(u_int ax, u_int *p) 135 { 136 __asm __volatile("cpuid" 137 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 138 : "0" (ax)); 139 } 140 141 static __inline void 142 cpuid_count(u_int ax, u_int cx, u_int *p) 143 { 144 __asm __volatile("cpuid" 145 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 146 : "0" (ax), "c" (cx)); 147 } 148 149 #ifndef _CPU_DISABLE_INTR_DEFINED 150 151 static __inline void 152 cpu_disable_intr(void) 153 { 154 __asm __volatile("cli" : : : "memory"); 155 } 156 157 #endif 158 159 #ifndef _CPU_ENABLE_INTR_DEFINED 160 161 static __inline void 162 cpu_enable_intr(void) 163 { 164 __asm __volatile("sti"); 165 } 166 167 #endif 168 169 /* 170 * Cpu and compiler memory ordering fence. mfence ensures strong read and 171 * write ordering. 172 * 173 * A serializing or fence instruction is required here. A locked bus 174 * cycle on data for which we already own cache mastership is the most 175 * portable. 176 */ 177 static __inline void 178 cpu_mfence(void) 179 { 180 __asm __volatile("mfence" : : : "memory"); 181 } 182 183 /* 184 * cpu_lfence() ensures strong read ordering for reads issued prior 185 * to the instruction verses reads issued afterwords. 186 * 187 * A serializing or fence instruction is required here. A locked bus 188 * cycle on data for which we already own cache mastership is the most 189 * portable. 190 */ 191 static __inline void 192 cpu_lfence(void) 193 { 194 __asm __volatile("lfence" : : : "memory"); 195 } 196 197 /* 198 * cpu_sfence() ensures strong write ordering for writes issued prior 199 * to the instruction verses writes issued afterwords. Writes are 200 * ordered on intel cpus so we do not actually have to do anything. 201 */ 202 static __inline void 203 cpu_sfence(void) 204 { 205 /* 206 * NOTE: 207 * Don't use 'sfence' here, as it will create a lot of 208 * unnecessary stalls. 209 */ 210 __asm __volatile("" : : : "memory"); 211 } 212 213 /* 214 * cpu_ccfence() prevents the compiler from reordering instructions, in 215 * particular stores, relative to the current cpu. Use cpu_sfence() if 216 * you need to guarentee ordering by both the compiler and by the cpu. 217 * 218 * This also prevents the compiler from caching memory loads into local 219 * variables across the routine. 220 */ 221 static __inline void 222 cpu_ccfence(void) 223 { 224 __asm __volatile("" : : : "memory"); 225 } 226 227 /* 228 * This is a horrible, horrible hack that might have to be put at the 229 * end of certain procedures (on a case by case basis), just before it 230 * returns to avoid what we believe to be an unreported AMD cpu bug. 231 * Found to occur on both a Phenom II X4 820 (two of them), as well 232 * as a 48-core built around an Opteron 6168 (Id = 0x100f91 Stepping = 1). 233 * The problem does not appear to occur w/Intel cpus. 234 * 235 * The bug is likely related to either a write combining issue or the 236 * Return Address Stack (RAS) hardware cache. 237 * 238 * In particular, we had to do this for GCC's fill_sons_in_loop() routine 239 * which due to its deep recursion and stack flow appears to be able to 240 * tickle the amd cpu bug (w/ gcc-4.4.7). Adding a single 'nop' to the 241 * end of the routine just before it returns works around the bug. 242 * 243 * The bug appears to be extremely sensitive to %rip and %rsp values, to 244 * the point where even just inserting an instruction in an unrelated 245 * procedure (shifting the entire code base being run) effects the outcome. 246 * DragonFly is probably able to more readily reproduce the bug due to 247 * the stackgap randomization code. We would expect OpenBSD (where we got 248 * the stackgap randomization code from) to also be able to reproduce the 249 * issue. To date we have only reproduced the issue in DragonFly. 250 */ 251 #define __AMDCPUBUG_DFLY01_AVAILABLE__ 252 253 static __inline void 254 cpu_amdcpubug_dfly01(void) 255 { 256 __asm __volatile("nop" : : : "memory"); 257 } 258 259 #ifdef _KERNEL 260 261 #define HAVE_INLINE_FFS 262 263 static __inline int 264 ffs(int mask) 265 { 266 return (__builtin_ffs(mask)); 267 } 268 269 #define HAVE_INLINE_FFSL 270 271 static __inline int 272 ffsl(long mask) 273 { 274 return (__builtin_ffsl(mask)); 275 } 276 277 #define HAVE_INLINE_FLS 278 279 static __inline int 280 fls(int mask) 281 { 282 return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1); 283 } 284 285 #define HAVE_INLINE_FLSL 286 287 static __inline int 288 flsl(long mask) 289 { 290 return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1); 291 } 292 293 #define HAVE_INLINE_FLSLL 294 295 static __inline int 296 flsll(long long mask) 297 { 298 return (flsl((long)mask)); 299 } 300 301 #endif /* _KERNEL */ 302 303 static __inline void 304 halt(void) 305 { 306 __asm __volatile("hlt"); 307 } 308 309 /* 310 * The following complications are to get around gcc not having a 311 * constraint letter for the range 0..255. We still put "d" in the 312 * constraint because "i" isn't a valid constraint when the port 313 * isn't constant. This only matters for -O0 because otherwise 314 * the non-working version gets optimized away. 315 * 316 * Use an expression-statement instead of a conditional expression 317 * because gcc-2.6.0 would promote the operands of the conditional 318 * and produce poor code for "if ((inb(var) & const1) == const2)". 319 * 320 * The unnecessary test `(port) < 0x10000' is to generate a warning if 321 * the `port' has type u_short or smaller. Such types are pessimal. 322 * This actually only works for signed types. The range check is 323 * careful to avoid generating warnings. 324 */ 325 #define inb(port) __extension__ ({ \ 326 u_char _data; \ 327 if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \ 328 && (port) < 0x10000) \ 329 _data = inbc(port); \ 330 else \ 331 _data = inbv(port); \ 332 _data; }) 333 334 #define outb(port, data) ( \ 335 __builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \ 336 && (port) < 0x10000 \ 337 ? outbc(port, data) : outbv(port, data)) 338 339 static __inline u_char 340 inbc(u_int port) 341 { 342 u_char data; 343 344 __asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port))); 345 return (data); 346 } 347 348 static __inline void 349 outbc(u_int port, u_char data) 350 { 351 __asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port))); 352 } 353 354 static __inline u_char 355 inbv(u_int port) 356 { 357 u_char data; 358 /* 359 * We use %%dx and not %1 here because i/o is done at %dx and not at 360 * %edx, while gcc generates inferior code (movw instead of movl) 361 * if we tell it to load (u_short) port. 362 */ 363 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 364 return (data); 365 } 366 367 static __inline u_int 368 inl(u_int port) 369 { 370 u_int data; 371 372 __asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port)); 373 return (data); 374 } 375 376 static __inline void 377 insb(u_int port, void *addr, size_t cnt) 378 { 379 __asm __volatile("cld; rep; insb" 380 : "+D" (addr), "+c" (cnt) 381 : "d" (port) 382 : "memory"); 383 } 384 385 static __inline void 386 insw(u_int port, void *addr, size_t cnt) 387 { 388 __asm __volatile("cld; rep; insw" 389 : "+D" (addr), "+c" (cnt) 390 : "d" (port) 391 : "memory"); 392 } 393 394 static __inline void 395 insl(u_int port, void *addr, size_t cnt) 396 { 397 __asm __volatile("cld; rep; insl" 398 : "+D" (addr), "+c" (cnt) 399 : "d" (port) 400 : "memory"); 401 } 402 403 static __inline void 404 invd(void) 405 { 406 __asm __volatile("invd"); 407 } 408 409 #if defined(_KERNEL) 410 411 #ifndef _CPU_INVLPG_DEFINED 412 413 /* 414 * Invalidate a particular VA on this cpu only 415 * 416 * TLB flush for an individual page (even if it has PG_G). 417 * Only works on 486+ CPUs (i386 does not have PG_G). 418 */ 419 static __inline void 420 cpu_invlpg(void *addr) 421 { 422 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory"); 423 } 424 425 #endif 426 427 static __inline void 428 cpu_nop(void) 429 { 430 __asm __volatile("rep; nop"); 431 } 432 433 #endif /* _KERNEL */ 434 435 static __inline u_short 436 inw(u_int port) 437 { 438 u_short data; 439 440 __asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port)); 441 return (data); 442 } 443 444 static __inline u_int 445 loadandclear(volatile u_int *addr) 446 { 447 u_int result; 448 449 __asm __volatile("xorl %0,%0; xchgl %1,%0" 450 : "=&r" (result) : "m" (*addr)); 451 return (result); 452 } 453 454 static __inline void 455 outbv(u_int port, u_char data) 456 { 457 u_char al; 458 /* 459 * Use an unnecessary assignment to help gcc's register allocator. 460 * This make a large difference for gcc-1.40 and a tiny difference 461 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 462 * best results. gcc-2.6.0 can't handle this. 463 */ 464 al = data; 465 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 466 } 467 468 static __inline void 469 outl(u_int port, u_int data) 470 { 471 /* 472 * outl() and outw() aren't used much so we haven't looked at 473 * possible micro-optimizations such as the unnecessary 474 * assignment for them. 475 */ 476 __asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port)); 477 } 478 479 static __inline void 480 outsb(u_int port, const void *addr, size_t cnt) 481 { 482 __asm __volatile("cld; rep; outsb" 483 : "+S" (addr), "+c" (cnt) 484 : "d" (port)); 485 } 486 487 static __inline void 488 outsw(u_int port, const void *addr, size_t cnt) 489 { 490 __asm __volatile("cld; rep; outsw" 491 : "+S" (addr), "+c" (cnt) 492 : "d" (port)); 493 } 494 495 static __inline void 496 outsl(u_int port, const void *addr, size_t cnt) 497 { 498 __asm __volatile("cld; rep; outsl" 499 : "+S" (addr), "+c" (cnt) 500 : "d" (port)); 501 } 502 503 static __inline void 504 outw(u_int port, u_short data) 505 { 506 __asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port)); 507 } 508 509 static __inline void 510 ia32_pause(void) 511 { 512 __asm __volatile("pause"); 513 } 514 515 static __inline u_long 516 read_rflags(void) 517 { 518 u_long rf; 519 520 __asm __volatile("pushfq; popq %0" : "=r" (rf)); 521 return (rf); 522 } 523 524 static __inline u_int64_t 525 rdmsr(u_int msr) 526 { 527 u_int32_t low, high; 528 529 __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr)); 530 return (low | ((u_int64_t)high << 32)); 531 } 532 533 static __inline u_int64_t 534 rdpmc(u_int pmc) 535 { 536 u_int32_t low, high; 537 538 __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc)); 539 return (low | ((u_int64_t)high << 32)); 540 } 541 542 #define _RDTSC_SUPPORTED_ 543 544 static __inline tsc_uclock_t 545 rdtsc(void) 546 { 547 u_int32_t low, high; 548 549 __asm __volatile("rdtsc" : "=a" (low), "=d" (high)); 550 return (low | ((tsc_uclock_t)high << 32)); 551 } 552 553 #ifdef _KERNEL 554 #include <machine/cputypes.h> 555 #include <machine/md_var.h> 556 557 static __inline tsc_uclock_t 558 rdtsc_ordered(void) 559 { 560 if (cpu_vendor_id == CPU_VENDOR_INTEL) 561 cpu_lfence(); 562 else 563 cpu_mfence(); 564 return rdtsc(); 565 } 566 #endif 567 568 static __inline void 569 wbinvd(void) 570 { 571 __asm __volatile("wbinvd"); 572 } 573 574 #if defined(_KERNEL) 575 void cpu_wbinvd_on_all_cpus_callback(void *arg); 576 577 static __inline void 578 cpu_wbinvd_on_all_cpus(void) 579 { 580 lwkt_cpusync_simple(smp_active_mask, cpu_wbinvd_on_all_cpus_callback, NULL); 581 } 582 #endif 583 584 static __inline void 585 write_rflags(u_long rf) 586 { 587 __asm __volatile("pushq %0; popfq" : : "r" (rf)); 588 } 589 590 static __inline void 591 wrmsr(u_int msr, u_int64_t newval) 592 { 593 u_int32_t low, high; 594 595 low = newval; 596 high = newval >> 32; 597 __asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr)); 598 } 599 600 static __inline void 601 xsetbv(u_int ecx, u_int eax, u_int edx) 602 { 603 __asm __volatile(".byte 0x0f,0x01,0xd1" 604 : 605 : "a" (eax), "c" (ecx), "d" (edx)); 606 } 607 608 static __inline void 609 load_cr0(u_long data) 610 { 611 612 __asm __volatile("movq %0,%%cr0" : : "r" (data)); 613 } 614 615 static __inline u_long 616 rcr0(void) 617 { 618 u_long data; 619 620 __asm __volatile("movq %%cr0,%0" : "=r" (data)); 621 return (data); 622 } 623 624 static __inline u_long 625 rcr2(void) 626 { 627 u_long data; 628 629 __asm __volatile("movq %%cr2,%0" : "=r" (data)); 630 return (data); 631 } 632 633 static __inline void 634 load_cr3(u_long data) 635 { 636 637 __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory"); 638 } 639 640 static __inline u_long 641 rcr3(void) 642 { 643 u_long data; 644 645 __asm __volatile("movq %%cr3,%0" : "=r" (data)); 646 return (data); 647 } 648 649 static __inline void 650 load_cr4(u_long data) 651 { 652 __asm __volatile("movq %0,%%cr4" : : "r" (data)); 653 } 654 655 static __inline u_long 656 rcr4(void) 657 { 658 u_long data; 659 660 __asm __volatile("movq %%cr4,%0" : "=r" (data)); 661 return (data); 662 } 663 664 #ifndef _CPU_INVLTLB_DEFINED 665 666 /* 667 * Invalidate the TLB on this cpu only 668 */ 669 static __inline void 670 cpu_invltlb(void) 671 { 672 load_cr3(rcr3()); 673 #if defined(SWTCH_OPTIM_STATS) 674 ++tlb_flush_count; 675 #endif 676 } 677 678 #endif 679 680 void smp_invltlb(void); 681 void smp_sniff(void); 682 void cpu_sniff(int); 683 void hard_sniff(struct trapframe *); 684 685 static __inline u_short 686 rfs(void) 687 { 688 u_short sel; 689 __asm __volatile("movw %%fs,%0" : "=rm" (sel)); 690 return (sel); 691 } 692 693 static __inline u_short 694 rgs(void) 695 { 696 u_short sel; 697 __asm __volatile("movw %%gs,%0" : "=rm" (sel)); 698 return (sel); 699 } 700 701 static __inline void 702 load_ds(u_short sel) 703 { 704 __asm __volatile("movw %0,%%ds" : : "rm" (sel)); 705 } 706 707 static __inline void 708 load_es(u_short sel) 709 { 710 __asm __volatile("movw %0,%%es" : : "rm" (sel)); 711 } 712 713 #ifdef _KERNEL 714 /* This is defined in <machine/specialreg.h> but is too painful to get to */ 715 #ifndef MSR_FSBASE 716 #define MSR_FSBASE 0xc0000100 717 #endif 718 static __inline void 719 load_fs(u_short sel) 720 { 721 /* Preserve the fsbase value across the selector load */ 722 __asm __volatile("rdmsr; movw %0,%%fs; wrmsr" 723 : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx"); 724 } 725 726 #ifndef MSR_GSBASE 727 #define MSR_GSBASE 0xc0000101 728 #endif 729 static __inline void 730 load_gs(u_short sel) 731 { 732 /* 733 * Preserve the gsbase value across the selector load. 734 * Note that we have to disable interrupts because the gsbase 735 * being trashed happens to be the kernel gsbase at the time. 736 */ 737 __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq" 738 : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx"); 739 } 740 #else 741 /* Usable by userland */ 742 static __inline void 743 load_fs(u_short sel) 744 { 745 __asm __volatile("movw %0,%%fs" : : "rm" (sel)); 746 } 747 748 static __inline void 749 load_gs(u_short sel) 750 { 751 __asm __volatile("movw %0,%%gs" : : "rm" (sel)); 752 } 753 #endif 754 755 /* void lidt(struct region_descriptor *addr); */ 756 static __inline void 757 lidt(struct region_descriptor *addr) 758 { 759 __asm __volatile("lidt (%0)" : : "r" (addr)); 760 } 761 762 /* void lldt(u_short sel); */ 763 static __inline void 764 lldt(u_short sel) 765 { 766 __asm __volatile("lldt %0" : : "r" (sel)); 767 } 768 769 /* void ltr(u_short sel); */ 770 static __inline void 771 ltr(u_short sel) 772 { 773 __asm __volatile("ltr %0" : : "r" (sel)); 774 } 775 776 static __inline u_int64_t 777 rdr0(void) 778 { 779 u_int64_t data; 780 __asm __volatile("movq %%dr0,%0" : "=r" (data)); 781 return (data); 782 } 783 784 static __inline void 785 load_dr0(u_int64_t dr0) 786 { 787 __asm __volatile("movq %0,%%dr0" : : "r" (dr0)); 788 } 789 790 static __inline u_int64_t 791 rdr1(void) 792 { 793 u_int64_t data; 794 __asm __volatile("movq %%dr1,%0" : "=r" (data)); 795 return (data); 796 } 797 798 static __inline void 799 load_dr1(u_int64_t dr1) 800 { 801 __asm __volatile("movq %0,%%dr1" : : "r" (dr1)); 802 } 803 804 static __inline u_int64_t 805 rdr2(void) 806 { 807 u_int64_t data; 808 __asm __volatile("movq %%dr2,%0" : "=r" (data)); 809 return (data); 810 } 811 812 static __inline void 813 load_dr2(u_int64_t dr2) 814 { 815 __asm __volatile("movq %0,%%dr2" : : "r" (dr2)); 816 } 817 818 static __inline u_int64_t 819 rdr3(void) 820 { 821 u_int64_t data; 822 __asm __volatile("movq %%dr3,%0" : "=r" (data)); 823 return (data); 824 } 825 826 static __inline void 827 load_dr3(u_int64_t dr3) 828 { 829 __asm __volatile("movq %0,%%dr3" : : "r" (dr3)); 830 } 831 832 static __inline u_int64_t 833 rdr4(void) 834 { 835 u_int64_t data; 836 __asm __volatile("movq %%dr4,%0" : "=r" (data)); 837 return (data); 838 } 839 840 static __inline void 841 load_dr4(u_int64_t dr4) 842 { 843 __asm __volatile("movq %0,%%dr4" : : "r" (dr4)); 844 } 845 846 static __inline u_int64_t 847 rdr5(void) 848 { 849 u_int64_t data; 850 __asm __volatile("movq %%dr5,%0" : "=r" (data)); 851 return (data); 852 } 853 854 static __inline void 855 load_dr5(u_int64_t dr5) 856 { 857 __asm __volatile("movq %0,%%dr5" : : "r" (dr5)); 858 } 859 860 static __inline u_int64_t 861 rdr6(void) 862 { 863 u_int64_t data; 864 __asm __volatile("movq %%dr6,%0" : "=r" (data)); 865 return (data); 866 } 867 868 static __inline void 869 load_dr6(u_int64_t dr6) 870 { 871 __asm __volatile("movq %0,%%dr6" : : "r" (dr6)); 872 } 873 874 static __inline u_int64_t 875 rdr7(void) 876 { 877 u_int64_t data; 878 __asm __volatile("movq %%dr7,%0" : "=r" (data)); 879 return (data); 880 } 881 882 static __inline void 883 load_dr7(u_int64_t dr7) 884 { 885 __asm __volatile("movq %0,%%dr7" : : "r" (dr7)); 886 } 887 888 static __inline register_t 889 intr_disable(void) 890 { 891 register_t rflags; 892 893 rflags = read_rflags(); 894 cpu_disable_intr(); 895 return (rflags); 896 } 897 898 static __inline void 899 intr_restore(register_t rflags) 900 { 901 write_rflags(rflags); 902 } 903 904 #else /* !__GNUC__ */ 905 906 int breakpoint(void); 907 void cpu_pause(void); 908 u_int bsfl(u_int mask); 909 u_int bsrl(u_int mask); 910 void cpu_disable_intr(void); 911 void cpu_enable_intr(void); 912 void cpu_invlpg(u_long addr); 913 void cpu_invlpg_range(u_long start, u_long end); 914 void do_cpuid(u_int ax, u_int *p); 915 void halt(void); 916 u_char inb(u_int port); 917 u_int inl(u_int port); 918 void insb(u_int port, void *addr, size_t cnt); 919 void insl(u_int port, void *addr, size_t cnt); 920 void insw(u_int port, void *addr, size_t cnt); 921 void invd(void); 922 void invlpg_range(u_int start, u_int end); 923 void cpu_invltlb(void); 924 u_short inw(u_int port); 925 void load_cr0(u_int cr0); 926 void load_cr3(u_int cr3); 927 void load_cr4(u_int cr4); 928 void load_fs(u_int sel); 929 void load_gs(u_int sel); 930 struct region_descriptor; 931 void lidt(struct region_descriptor *addr); 932 void lldt(u_short sel); 933 void ltr(u_short sel); 934 void outb(u_int port, u_char data); 935 void outl(u_int port, u_int data); 936 void outsb(u_int port, void *addr, size_t cnt); 937 void outsl(u_int port, void *addr, size_t cnt); 938 void outsw(u_int port, void *addr, size_t cnt); 939 void outw(u_int port, u_short data); 940 void ia32_pause(void); 941 u_int rcr0(void); 942 u_int rcr2(void); 943 u_int rcr3(void); 944 u_int rcr4(void); 945 u_short rfs(void); 946 u_short rgs(void); 947 u_int64_t rdmsr(u_int msr); 948 u_int64_t rdpmc(u_int pmc); 949 tsc_uclock_t rdtsc(void); 950 u_int read_rflags(void); 951 void wbinvd(void); 952 void write_rflags(u_int rf); 953 void wrmsr(u_int msr, u_int64_t newval); 954 u_int64_t rdr0(void); 955 void load_dr0(u_int64_t dr0); 956 u_int64_t rdr1(void); 957 void load_dr1(u_int64_t dr1); 958 u_int64_t rdr2(void); 959 void load_dr2(u_int64_t dr2); 960 u_int64_t rdr3(void); 961 void load_dr3(u_int64_t dr3); 962 u_int64_t rdr4(void); 963 void load_dr4(u_int64_t dr4); 964 u_int64_t rdr5(void); 965 void load_dr5(u_int64_t dr5); 966 u_int64_t rdr6(void); 967 void load_dr6(u_int64_t dr6); 968 u_int64_t rdr7(void); 969 void load_dr7(u_int64_t dr7); 970 register_t intr_disable(void); 971 void intr_restore(register_t rf); 972 973 #endif /* __GNUC__ */ 974 975 int rdmsr_safe(u_int msr, uint64_t *val); 976 int wrmsr_safe(u_int msr, uint64_t newval); 977 void reset_dbregs(void); 978 void smap_open(void); 979 void smap_close(void); 980 981 __END_DECLS 982 983 #endif /* !_CPU_CPUFUNC_H_ */ 984