1 /*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1993 The Regents of the University of California. 4 * Copyright (c) 2008 The DragonFly Project. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD: src/sys/amd64/include/cpufunc.h,v 1.139 2004/01/28 23:53:04 peter Exp $ 32 */ 33 34 /* 35 * Functions to provide access to special i386 instructions. 36 * This in included in sys/systm.h, and that file should be 37 * used in preference to this. 38 */ 39 40 #ifndef _CPU_CPUFUNC_H_ 41 #define _CPU_CPUFUNC_H_ 42 43 #include <sys/cdefs.h> 44 #include <sys/thread.h> 45 #include <machine/clock.h> 46 #include <machine/psl.h> 47 #include <machine/smp.h> 48 49 struct thread; 50 struct region_descriptor; 51 struct pmap; 52 53 __BEGIN_DECLS 54 #define readb(va) (*(volatile u_int8_t *) (va)) 55 #define readw(va) (*(volatile u_int16_t *) (va)) 56 #define readl(va) (*(volatile u_int32_t *) (va)) 57 #define readq(va) (*(volatile u_int64_t *) (va)) 58 59 #define writeb(va, d) (*(volatile u_int8_t *) (va) = (d)) 60 #define writew(va, d) (*(volatile u_int16_t *) (va) = (d)) 61 #define writel(va, d) (*(volatile u_int32_t *) (va) = (d)) 62 #define writeq(va, d) (*(volatile u_int64_t *) (va) = (d)) 63 64 #ifdef __GNUC__ 65 66 #include <machine/lock.h> /* XXX */ 67 68 struct trapframe; 69 70 static __inline void 71 breakpoint(void) 72 { 73 __asm __volatile("int $3"); 74 } 75 76 static __inline void 77 cpu_pause(void) 78 { 79 __asm __volatile("pause":::"memory"); 80 } 81 82 static __inline u_int 83 bsfl(u_int mask) 84 { 85 u_int result; 86 87 __asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask)); 88 return (result); 89 } 90 91 static __inline u_long 92 bsfq(u_long mask) 93 { 94 u_long result; 95 96 __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask)); 97 return (result); 98 } 99 100 static __inline u_long 101 bsflong(u_long mask) 102 { 103 u_long result; 104 105 __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask)); 106 return (result); 107 } 108 109 static __inline u_int 110 bsrl(u_int mask) 111 { 112 u_int result; 113 114 __asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask)); 115 return (result); 116 } 117 118 static __inline u_long 119 bsrq(u_long mask) 120 { 121 u_long result; 122 123 __asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask)); 124 return (result); 125 } 126 127 static __inline void 128 clflush(u_long addr) 129 { 130 __asm __volatile("clflush %0" : : "m" (*(char *) addr)); 131 } 132 133 static __inline void 134 do_cpuid(u_int ax, u_int *p) 135 { 136 __asm __volatile("cpuid" 137 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 138 : "0" (ax)); 139 } 140 141 static __inline void 142 cpuid_count(u_int ax, u_int cx, u_int *p) 143 { 144 __asm __volatile("cpuid" 145 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 146 : "0" (ax), "c" (cx)); 147 } 148 149 #ifndef _CPU_DISABLE_INTR_DEFINED 150 151 static __inline void 152 cpu_disable_intr(void) 153 { 154 __asm __volatile("cli" : : : "memory"); 155 } 156 157 #endif 158 159 #ifndef _CPU_ENABLE_INTR_DEFINED 160 161 static __inline void 162 cpu_enable_intr(void) 163 { 164 __asm __volatile("sti"); 165 } 166 167 #endif 168 169 /* 170 * Cpu and compiler memory ordering fence. mfence ensures strong read and 171 * write ordering. 172 * 173 * A serializing or fence instruction is required here. A locked bus 174 * cycle on data for which we already own cache mastership is the most 175 * portable. 176 */ 177 static __inline void 178 cpu_mfence(void) 179 { 180 __asm __volatile("mfence" : : : "memory"); 181 } 182 183 /* 184 * cpu_lfence() ensures strong read ordering for reads issued prior 185 * to the instruction verses reads issued afterwords. 186 * 187 * A serializing or fence instruction is required here. A locked bus 188 * cycle on data for which we already own cache mastership is the most 189 * portable. 190 */ 191 static __inline void 192 cpu_lfence(void) 193 { 194 __asm __volatile("lfence" : : : "memory"); 195 } 196 197 /* 198 * cpu_sfence() ensures strong write ordering for writes issued prior 199 * to the instruction verses writes issued afterwords. Writes are 200 * ordered on intel cpus so we do not actually have to do anything. 201 */ 202 static __inline void 203 cpu_sfence(void) 204 { 205 /* 206 * NOTE: 207 * Don't use 'sfence' here, as it will create a lot of 208 * unnecessary stalls. 209 */ 210 __asm __volatile("" : : : "memory"); 211 } 212 213 /* 214 * cpu_ccfence() prevents the compiler from reordering instructions, in 215 * particular stores, relative to the current cpu. Use cpu_sfence() if 216 * you need to guarentee ordering by both the compiler and by the cpu. 217 * 218 * This also prevents the compiler from caching memory loads into local 219 * variables across the routine. 220 */ 221 static __inline void 222 cpu_ccfence(void) 223 { 224 __asm __volatile("" : : : "memory"); 225 } 226 227 /* 228 * This is a horrible, horrible hack that might have to be put at the 229 * end of certain procedures (on a case by case basis), just before it 230 * returns to avoid what we believe to be an unreported AMD cpu bug. 231 * Found to occur on both a Phenom II X4 820 (two of them), as well 232 * as a 48-core built around an Opteron 6168 (Id = 0x100f91 Stepping = 1). 233 * The problem does not appear to occur w/Intel cpus. 234 * 235 * The bug is likely related to either a write combining issue or the 236 * Return Address Stack (RAS) hardware cache. 237 * 238 * In particular, we had to do this for GCC's fill_sons_in_loop() routine 239 * which due to its deep recursion and stack flow appears to be able to 240 * tickle the amd cpu bug (w/ gcc-4.4.7). Adding a single 'nop' to the 241 * end of the routine just before it returns works around the bug. 242 * 243 * The bug appears to be extremely sensitive to %rip and %rsp values, to 244 * the point where even just inserting an instruction in an unrelated 245 * procedure (shifting the entire code base being run) effects the outcome. 246 * DragonFly is probably able to more readily reproduce the bug due to 247 * the stackgap randomization code. We would expect OpenBSD (where we got 248 * the stackgap randomization code from) to also be able to reproduce the 249 * issue. To date we have only reproduced the issue in DragonFly. 250 */ 251 #define __AMDCPUBUG_DFLY01_AVAILABLE__ 252 253 static __inline void 254 cpu_amdcpubug_dfly01(void) 255 { 256 __asm __volatile("nop" : : : "memory"); 257 } 258 259 #ifdef _KERNEL 260 261 #define HAVE_INLINE_FFS 262 263 static __inline int 264 ffs(int mask) 265 { 266 #if 0 267 /* 268 * Note that gcc-2's builtin ffs would be used if we didn't declare 269 * this inline or turn off the builtin. The builtin is faster but 270 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later 271 * versions. 272 */ 273 return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1); 274 #else 275 /* Actually, the above is way out of date. The builtins use cmov etc */ 276 return (__builtin_ffs(mask)); 277 #endif 278 } 279 280 #define HAVE_INLINE_FFSL 281 282 static __inline int 283 ffsl(long mask) 284 { 285 return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1); 286 } 287 288 #define HAVE_INLINE_FLS 289 290 static __inline int 291 fls(int mask) 292 { 293 return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1); 294 } 295 296 #define HAVE_INLINE_FLSL 297 298 static __inline int 299 flsl(long mask) 300 { 301 return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1); 302 } 303 304 #define HAVE_INLINE_FLSLL 305 306 static __inline int 307 flsll(long long mask) 308 { 309 return (flsl((long)mask)); 310 } 311 312 #endif /* _KERNEL */ 313 314 static __inline void 315 halt(void) 316 { 317 __asm __volatile("hlt"); 318 } 319 320 /* 321 * The following complications are to get around gcc not having a 322 * constraint letter for the range 0..255. We still put "d" in the 323 * constraint because "i" isn't a valid constraint when the port 324 * isn't constant. This only matters for -O0 because otherwise 325 * the non-working version gets optimized away. 326 * 327 * Use an expression-statement instead of a conditional expression 328 * because gcc-2.6.0 would promote the operands of the conditional 329 * and produce poor code for "if ((inb(var) & const1) == const2)". 330 * 331 * The unnecessary test `(port) < 0x10000' is to generate a warning if 332 * the `port' has type u_short or smaller. Such types are pessimal. 333 * This actually only works for signed types. The range check is 334 * careful to avoid generating warnings. 335 */ 336 #define inb(port) __extension__ ({ \ 337 u_char _data; \ 338 if (__builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \ 339 && (port) < 0x10000) \ 340 _data = inbc(port); \ 341 else \ 342 _data = inbv(port); \ 343 _data; }) 344 345 #define outb(port, data) ( \ 346 __builtin_constant_p(port) && ((port) & 0xffff) < 0x100 \ 347 && (port) < 0x10000 \ 348 ? outbc(port, data) : outbv(port, data)) 349 350 static __inline u_char 351 inbc(u_int port) 352 { 353 u_char data; 354 355 __asm __volatile("inb %1,%0" : "=a" (data) : "id" ((u_short)(port))); 356 return (data); 357 } 358 359 static __inline void 360 outbc(u_int port, u_char data) 361 { 362 __asm __volatile("outb %0,%1" : : "a" (data), "id" ((u_short)(port))); 363 } 364 365 static __inline u_char 366 inbv(u_int port) 367 { 368 u_char data; 369 /* 370 * We use %%dx and not %1 here because i/o is done at %dx and not at 371 * %edx, while gcc generates inferior code (movw instead of movl) 372 * if we tell it to load (u_short) port. 373 */ 374 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 375 return (data); 376 } 377 378 static __inline u_int 379 inl(u_int port) 380 { 381 u_int data; 382 383 __asm __volatile("inl %%dx,%0" : "=a" (data) : "d" (port)); 384 return (data); 385 } 386 387 static __inline void 388 insb(u_int port, void *addr, size_t cnt) 389 { 390 __asm __volatile("cld; rep; insb" 391 : "+D" (addr), "+c" (cnt) 392 : "d" (port) 393 : "memory"); 394 } 395 396 static __inline void 397 insw(u_int port, void *addr, size_t cnt) 398 { 399 __asm __volatile("cld; rep; insw" 400 : "+D" (addr), "+c" (cnt) 401 : "d" (port) 402 : "memory"); 403 } 404 405 static __inline void 406 insl(u_int port, void *addr, size_t cnt) 407 { 408 __asm __volatile("cld; rep; insl" 409 : "+D" (addr), "+c" (cnt) 410 : "d" (port) 411 : "memory"); 412 } 413 414 static __inline void 415 invd(void) 416 { 417 __asm __volatile("invd"); 418 } 419 420 #if defined(_KERNEL) 421 422 #ifndef _CPU_INVLPG_DEFINED 423 424 /* 425 * Invalidate a particular VA on this cpu only 426 * 427 * TLB flush for an individual page (even if it has PG_G). 428 * Only works on 486+ CPUs (i386 does not have PG_G). 429 */ 430 static __inline void 431 cpu_invlpg(void *addr) 432 { 433 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory"); 434 } 435 436 #endif 437 438 static __inline void 439 cpu_nop(void) 440 { 441 __asm __volatile("rep; nop"); 442 } 443 444 #endif /* _KERNEL */ 445 446 static __inline u_short 447 inw(u_int port) 448 { 449 u_short data; 450 451 __asm __volatile("inw %%dx,%0" : "=a" (data) : "d" (port)); 452 return (data); 453 } 454 455 static __inline u_int 456 loadandclear(volatile u_int *addr) 457 { 458 u_int result; 459 460 __asm __volatile("xorl %0,%0; xchgl %1,%0" 461 : "=&r" (result) : "m" (*addr)); 462 return (result); 463 } 464 465 static __inline void 466 outbv(u_int port, u_char data) 467 { 468 u_char al; 469 /* 470 * Use an unnecessary assignment to help gcc's register allocator. 471 * This make a large difference for gcc-1.40 and a tiny difference 472 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 473 * best results. gcc-2.6.0 can't handle this. 474 */ 475 al = data; 476 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 477 } 478 479 static __inline void 480 outl(u_int port, u_int data) 481 { 482 /* 483 * outl() and outw() aren't used much so we haven't looked at 484 * possible micro-optimizations such as the unnecessary 485 * assignment for them. 486 */ 487 __asm __volatile("outl %0,%%dx" : : "a" (data), "d" (port)); 488 } 489 490 static __inline void 491 outsb(u_int port, const void *addr, size_t cnt) 492 { 493 __asm __volatile("cld; rep; outsb" 494 : "+S" (addr), "+c" (cnt) 495 : "d" (port)); 496 } 497 498 static __inline void 499 outsw(u_int port, const void *addr, size_t cnt) 500 { 501 __asm __volatile("cld; rep; outsw" 502 : "+S" (addr), "+c" (cnt) 503 : "d" (port)); 504 } 505 506 static __inline void 507 outsl(u_int port, const void *addr, size_t cnt) 508 { 509 __asm __volatile("cld; rep; outsl" 510 : "+S" (addr), "+c" (cnt) 511 : "d" (port)); 512 } 513 514 static __inline void 515 outw(u_int port, u_short data) 516 { 517 __asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port)); 518 } 519 520 static __inline void 521 ia32_pause(void) 522 { 523 __asm __volatile("pause"); 524 } 525 526 static __inline u_long 527 read_rflags(void) 528 { 529 u_long rf; 530 531 __asm __volatile("pushfq; popq %0" : "=r" (rf)); 532 return (rf); 533 } 534 535 static __inline u_int64_t 536 rdmsr(u_int msr) 537 { 538 u_int32_t low, high; 539 540 __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr)); 541 return (low | ((u_int64_t)high << 32)); 542 } 543 544 static __inline u_int64_t 545 rdpmc(u_int pmc) 546 { 547 u_int32_t low, high; 548 549 __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc)); 550 return (low | ((u_int64_t)high << 32)); 551 } 552 553 #define _RDTSC_SUPPORTED_ 554 555 static __inline tsc_uclock_t 556 rdtsc(void) 557 { 558 u_int32_t low, high; 559 560 __asm __volatile("rdtsc" : "=a" (low), "=d" (high)); 561 return (low | ((tsc_uclock_t)high << 32)); 562 } 563 564 #ifdef _KERNEL 565 #include <machine/cputypes.h> 566 #include <machine/md_var.h> 567 568 static __inline tsc_uclock_t 569 rdtsc_ordered(void) 570 { 571 if (cpu_vendor_id == CPU_VENDOR_INTEL) 572 cpu_lfence(); 573 else 574 cpu_mfence(); 575 return rdtsc(); 576 } 577 #endif 578 579 static __inline void 580 wbinvd(void) 581 { 582 __asm __volatile("wbinvd"); 583 } 584 585 #if defined(_KERNEL) 586 void cpu_wbinvd_on_all_cpus_callback(void *arg); 587 588 static __inline void 589 cpu_wbinvd_on_all_cpus(void) 590 { 591 lwkt_cpusync_simple(smp_active_mask, cpu_wbinvd_on_all_cpus_callback, NULL); 592 } 593 #endif 594 595 static __inline void 596 write_rflags(u_long rf) 597 { 598 __asm __volatile("pushq %0; popfq" : : "r" (rf)); 599 } 600 601 static __inline void 602 wrmsr(u_int msr, u_int64_t newval) 603 { 604 u_int32_t low, high; 605 606 low = newval; 607 high = newval >> 32; 608 __asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr)); 609 } 610 611 static __inline void 612 xsetbv(u_int ecx, u_int eax, u_int edx) 613 { 614 __asm __volatile(".byte 0x0f,0x01,0xd1" 615 : 616 : "a" (eax), "c" (ecx), "d" (edx)); 617 } 618 619 static __inline void 620 load_cr0(u_long data) 621 { 622 623 __asm __volatile("movq %0,%%cr0" : : "r" (data)); 624 } 625 626 static __inline u_long 627 rcr0(void) 628 { 629 u_long data; 630 631 __asm __volatile("movq %%cr0,%0" : "=r" (data)); 632 return (data); 633 } 634 635 static __inline u_long 636 rcr2(void) 637 { 638 u_long data; 639 640 __asm __volatile("movq %%cr2,%0" : "=r" (data)); 641 return (data); 642 } 643 644 static __inline void 645 load_cr3(u_long data) 646 { 647 648 __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory"); 649 } 650 651 static __inline u_long 652 rcr3(void) 653 { 654 u_long data; 655 656 __asm __volatile("movq %%cr3,%0" : "=r" (data)); 657 return (data); 658 } 659 660 static __inline void 661 load_cr4(u_long data) 662 { 663 __asm __volatile("movq %0,%%cr4" : : "r" (data)); 664 } 665 666 static __inline u_long 667 rcr4(void) 668 { 669 u_long data; 670 671 __asm __volatile("movq %%cr4,%0" : "=r" (data)); 672 return (data); 673 } 674 675 #ifndef _CPU_INVLTLB_DEFINED 676 677 /* 678 * Invalidate the TLB on this cpu only 679 */ 680 static __inline void 681 cpu_invltlb(void) 682 { 683 load_cr3(rcr3()); 684 #if defined(SWTCH_OPTIM_STATS) 685 ++tlb_flush_count; 686 #endif 687 } 688 689 #endif 690 691 void smp_invltlb(void); 692 void smp_sniff(void); 693 void cpu_sniff(int); 694 void hard_sniff(struct trapframe *); 695 696 static __inline u_short 697 rfs(void) 698 { 699 u_short sel; 700 __asm __volatile("movw %%fs,%0" : "=rm" (sel)); 701 return (sel); 702 } 703 704 static __inline u_short 705 rgs(void) 706 { 707 u_short sel; 708 __asm __volatile("movw %%gs,%0" : "=rm" (sel)); 709 return (sel); 710 } 711 712 static __inline void 713 load_ds(u_short sel) 714 { 715 __asm __volatile("movw %0,%%ds" : : "rm" (sel)); 716 } 717 718 static __inline void 719 load_es(u_short sel) 720 { 721 __asm __volatile("movw %0,%%es" : : "rm" (sel)); 722 } 723 724 #ifdef _KERNEL 725 /* This is defined in <machine/specialreg.h> but is too painful to get to */ 726 #ifndef MSR_FSBASE 727 #define MSR_FSBASE 0xc0000100 728 #endif 729 static __inline void 730 load_fs(u_short sel) 731 { 732 /* Preserve the fsbase value across the selector load */ 733 __asm __volatile("rdmsr; movw %0,%%fs; wrmsr" 734 : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx"); 735 } 736 737 #ifndef MSR_GSBASE 738 #define MSR_GSBASE 0xc0000101 739 #endif 740 static __inline void 741 load_gs(u_short sel) 742 { 743 /* 744 * Preserve the gsbase value across the selector load. 745 * Note that we have to disable interrupts because the gsbase 746 * being trashed happens to be the kernel gsbase at the time. 747 */ 748 __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq" 749 : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx"); 750 } 751 #else 752 /* Usable by userland */ 753 static __inline void 754 load_fs(u_short sel) 755 { 756 __asm __volatile("movw %0,%%fs" : : "rm" (sel)); 757 } 758 759 static __inline void 760 load_gs(u_short sel) 761 { 762 __asm __volatile("movw %0,%%gs" : : "rm" (sel)); 763 } 764 #endif 765 766 /* void lidt(struct region_descriptor *addr); */ 767 static __inline void 768 lidt(struct region_descriptor *addr) 769 { 770 __asm __volatile("lidt (%0)" : : "r" (addr)); 771 } 772 773 /* void lldt(u_short sel); */ 774 static __inline void 775 lldt(u_short sel) 776 { 777 __asm __volatile("lldt %0" : : "r" (sel)); 778 } 779 780 /* void ltr(u_short sel); */ 781 static __inline void 782 ltr(u_short sel) 783 { 784 __asm __volatile("ltr %0" : : "r" (sel)); 785 } 786 787 static __inline u_int64_t 788 rdr0(void) 789 { 790 u_int64_t data; 791 __asm __volatile("movq %%dr0,%0" : "=r" (data)); 792 return (data); 793 } 794 795 static __inline void 796 load_dr0(u_int64_t dr0) 797 { 798 __asm __volatile("movq %0,%%dr0" : : "r" (dr0)); 799 } 800 801 static __inline u_int64_t 802 rdr1(void) 803 { 804 u_int64_t data; 805 __asm __volatile("movq %%dr1,%0" : "=r" (data)); 806 return (data); 807 } 808 809 static __inline void 810 load_dr1(u_int64_t dr1) 811 { 812 __asm __volatile("movq %0,%%dr1" : : "r" (dr1)); 813 } 814 815 static __inline u_int64_t 816 rdr2(void) 817 { 818 u_int64_t data; 819 __asm __volatile("movq %%dr2,%0" : "=r" (data)); 820 return (data); 821 } 822 823 static __inline void 824 load_dr2(u_int64_t dr2) 825 { 826 __asm __volatile("movq %0,%%dr2" : : "r" (dr2)); 827 } 828 829 static __inline u_int64_t 830 rdr3(void) 831 { 832 u_int64_t data; 833 __asm __volatile("movq %%dr3,%0" : "=r" (data)); 834 return (data); 835 } 836 837 static __inline void 838 load_dr3(u_int64_t dr3) 839 { 840 __asm __volatile("movq %0,%%dr3" : : "r" (dr3)); 841 } 842 843 static __inline u_int64_t 844 rdr4(void) 845 { 846 u_int64_t data; 847 __asm __volatile("movq %%dr4,%0" : "=r" (data)); 848 return (data); 849 } 850 851 static __inline void 852 load_dr4(u_int64_t dr4) 853 { 854 __asm __volatile("movq %0,%%dr4" : : "r" (dr4)); 855 } 856 857 static __inline u_int64_t 858 rdr5(void) 859 { 860 u_int64_t data; 861 __asm __volatile("movq %%dr5,%0" : "=r" (data)); 862 return (data); 863 } 864 865 static __inline void 866 load_dr5(u_int64_t dr5) 867 { 868 __asm __volatile("movq %0,%%dr5" : : "r" (dr5)); 869 } 870 871 static __inline u_int64_t 872 rdr6(void) 873 { 874 u_int64_t data; 875 __asm __volatile("movq %%dr6,%0" : "=r" (data)); 876 return (data); 877 } 878 879 static __inline void 880 load_dr6(u_int64_t dr6) 881 { 882 __asm __volatile("movq %0,%%dr6" : : "r" (dr6)); 883 } 884 885 static __inline u_int64_t 886 rdr7(void) 887 { 888 u_int64_t data; 889 __asm __volatile("movq %%dr7,%0" : "=r" (data)); 890 return (data); 891 } 892 893 static __inline void 894 load_dr7(u_int64_t dr7) 895 { 896 __asm __volatile("movq %0,%%dr7" : : "r" (dr7)); 897 } 898 899 static __inline register_t 900 intr_disable(void) 901 { 902 register_t rflags; 903 904 rflags = read_rflags(); 905 cpu_disable_intr(); 906 return (rflags); 907 } 908 909 static __inline void 910 intr_restore(register_t rflags) 911 { 912 write_rflags(rflags); 913 } 914 915 #else /* !__GNUC__ */ 916 917 int breakpoint(void); 918 void cpu_pause(void); 919 u_int bsfl(u_int mask); 920 u_int bsrl(u_int mask); 921 void cpu_disable_intr(void); 922 void cpu_enable_intr(void); 923 void cpu_invlpg(u_long addr); 924 void cpu_invlpg_range(u_long start, u_long end); 925 void do_cpuid(u_int ax, u_int *p); 926 void halt(void); 927 u_char inb(u_int port); 928 u_int inl(u_int port); 929 void insb(u_int port, void *addr, size_t cnt); 930 void insl(u_int port, void *addr, size_t cnt); 931 void insw(u_int port, void *addr, size_t cnt); 932 void invd(void); 933 void invlpg_range(u_int start, u_int end); 934 void cpu_invltlb(void); 935 u_short inw(u_int port); 936 void load_cr0(u_int cr0); 937 void load_cr3(u_int cr3); 938 void load_cr4(u_int cr4); 939 void load_fs(u_int sel); 940 void load_gs(u_int sel); 941 struct region_descriptor; 942 void lidt(struct region_descriptor *addr); 943 void lldt(u_short sel); 944 void ltr(u_short sel); 945 void outb(u_int port, u_char data); 946 void outl(u_int port, u_int data); 947 void outsb(u_int port, void *addr, size_t cnt); 948 void outsl(u_int port, void *addr, size_t cnt); 949 void outsw(u_int port, void *addr, size_t cnt); 950 void outw(u_int port, u_short data); 951 void ia32_pause(void); 952 u_int rcr0(void); 953 u_int rcr2(void); 954 u_int rcr3(void); 955 u_int rcr4(void); 956 u_short rfs(void); 957 u_short rgs(void); 958 u_int64_t rdmsr(u_int msr); 959 u_int64_t rdpmc(u_int pmc); 960 tsc_uclock_t rdtsc(void); 961 u_int read_rflags(void); 962 void wbinvd(void); 963 void write_rflags(u_int rf); 964 void wrmsr(u_int msr, u_int64_t newval); 965 u_int64_t rdr0(void); 966 void load_dr0(u_int64_t dr0); 967 u_int64_t rdr1(void); 968 void load_dr1(u_int64_t dr1); 969 u_int64_t rdr2(void); 970 void load_dr2(u_int64_t dr2); 971 u_int64_t rdr3(void); 972 void load_dr3(u_int64_t dr3); 973 u_int64_t rdr4(void); 974 void load_dr4(u_int64_t dr4); 975 u_int64_t rdr5(void); 976 void load_dr5(u_int64_t dr5); 977 u_int64_t rdr6(void); 978 void load_dr6(u_int64_t dr6); 979 u_int64_t rdr7(void); 980 void load_dr7(u_int64_t dr7); 981 register_t intr_disable(void); 982 void intr_restore(register_t rf); 983 984 #endif /* __GNUC__ */ 985 986 int rdmsr_safe(u_int msr, uint64_t *val); 987 int wrmsr_safe(u_int msr, uint64_t newval); 988 void reset_dbregs(void); 989 void smap_open(void); 990 void smap_close(void); 991 992 __END_DECLS 993 994 #endif /* !_CPU_CPUFUNC_H_ */ 995