1/*- 2 * Copyright (c) 1993 The Regents of the University of California. 3 * Copyright (c) 2003 Peter Wemm. 4 * Copyright (c) 2008 The DragonFly Project. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 4. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD: src/sys/amd64/amd64/support.S,v 1.127 2007/05/23 08:33:04 kib Exp $ 32 */ 33 34#include "opt_ddb.h" 35 36#include <machine/asmacros.h> 37#include <machine/pmap.h> 38 39#include "assym.s" 40 41 ALIGN_DATA 42 43 .text 44 45/* 46 * bcopy family 47 * void bzero(void *buf, size_t len) 48 */ 49 50/* done */ 51ENTRY(bzero) 52 movq %rsi,%rcx 53 xorl %eax,%eax 54 shrq $3,%rcx 55 cld 56 rep 57 stosq 58 movq %rsi,%rcx 59 andq $7,%rcx 60 rep 61 stosb 62 ret 63 64/* Address: %rdi */ 65ENTRY(pagezero) 66 movq $-PAGE_SIZE,%rdx 67 subq %rdx,%rdi 68 xorl %eax,%eax 691: 70 movq %rax,(%rdi,%rdx) /* movnti */ 71 movq %rax,8(%rdi,%rdx) /* movnti */ 72 movq %rax,16(%rdi,%rdx) /* movnti */ 73 movq %rax,24(%rdi,%rdx) /* movnti */ 74 addq $32,%rdx 75 jne 1b 76 /*sfence*/ 77 ret 78 79ENTRY(bcmp) 80 movq %rdx,%rcx 81 shrq $3,%rcx 82 cld /* compare forwards */ 83 repe 84 cmpsq 85 jne 1f 86 87 movq %rdx,%rcx 88 andq $7,%rcx 89 repe 90 cmpsb 911: 92 setne %al 93 movsbl %al,%eax 94 ret 95 96/* 97 * bcopy(src, dst, cnt) 98 * rdi, rsi, rdx 99 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800 100 */ 101ENTRY(generic_bcopy) /* generic_bcopy is bcopy without FPU */ 102ENTRY(ovbcopy) /* our bcopy doesn't use the FPU, so ovbcopy is the same */ 103ENTRY(bcopy) 104 xchgq %rsi,%rdi 105 movq %rdx,%rcx 106 107 movq %rdi,%rax 108 subq %rsi,%rax 109 cmpq %rcx,%rax /* overlapping && src < dst? */ 110 jb 1f 111 112 shrq $3,%rcx /* copy by 64-bit words */ 113 cld /* nope, copy forwards */ 114 rep 115 movsq 116 movq %rdx,%rcx 117 andq $7,%rcx /* any bytes left? */ 118 rep 119 movsb 120 ret 121 122 /* ALIGN_TEXT */ 1231: 124 addq %rcx,%rdi /* copy backwards */ 125 addq %rcx,%rsi 126 decq %rdi 127 decq %rsi 128 andq $7,%rcx /* any fractional bytes? */ 129 std 130 rep 131 movsb 132 movq %rdx,%rcx /* copy remainder by 32-bit words */ 133 shrq $3,%rcx 134 subq $7,%rsi 135 subq $7,%rdi 136 rep 137 movsq 138 cld 139 ret 140ENTRY(reset_dbregs) 141 movq $0x200,%rax /* the manual says that bit 10 must be set to 1 */ 142 movq %rax,%dr7 /* disable all breapoints first */ 143 movq $0,%rax 144 movq %rax,%dr0 145 movq %rax,%dr1 146 movq %rax,%dr2 147 movq %rax,%dr3 148 movq %rax,%dr6 149 ret 150 151/* 152 * Note: memcpy does not support overlapping copies 153 */ 154ENTRY(memcpy) 155 movq %rdx,%rcx 156 shrq $3,%rcx /* copy by 64-bit words */ 157 cld /* copy forwards */ 158 rep 159 movsq 160 movq %rdx,%rcx 161 andq $7,%rcx /* any bytes left? */ 162 rep 163 movsb 164 ret 165 166/* 167 * pagecopy(%rdi=from, %rsi=to) 168 */ 169ENTRY(pagecopy) 170 movq $-PAGE_SIZE,%rax 171 movq %rax,%rdx 172 subq %rax,%rdi 173 subq %rax,%rsi 1741: 175 /*prefetchnta (%rdi,%rax)*/ 176 /*addq $64,%rax*/ 177 /*jne 1b*/ 1782: 179 movq (%rdi,%rdx),%rax 180 movq %rax,(%rsi,%rdx) /* movnti */ 181 movq 8(%rdi,%rdx),%rax 182 movq %rax,8(%rsi,%rdx) /* movnti */ 183 movq 16(%rdi,%rdx),%rax 184 movq %rax,16(%rsi,%rdx) /* movnti */ 185 movq 24(%rdi,%rdx),%rax 186 movq %rax,24(%rsi,%rdx) /* movnti */ 187 addq $32,%rdx 188 jne 2b 189 /*sfence*/ 190 ret 191 192/* fillw(pat, base, cnt) */ 193/* %rdi,%rsi, %rdx */ 194ENTRY(fillw) 195 movq %rdi,%rax 196 movq %rsi,%rdi 197 movq %rdx,%rcx 198 cld 199 rep 200 stosw 201 ret 202 203/*****************************************************************************/ 204/* copyout and fubyte family */ 205/*****************************************************************************/ 206/* 207 * Access user memory from inside the kernel. These routines should be 208 * the only places that do this. 209 * 210 * These routines set curpcb->onfault for the time they execute. When a 211 * protection violation occurs inside the functions, the trap handler 212 * returns to *curpcb->onfault instead of the function. 213 */ 214 215/* 216 * copyout(from_kernel, to_user, len) - MP SAFE 217 * %rdi, %rsi, %rdx 218 */ 219ENTRY(copyout) 220 movq PCPU(curthread),%rax 221 movq TD_PCB(%rax), %rax 222 movq $copyout_fault,PCB_ONFAULT(%rax) 223 testq %rdx,%rdx /* anything to do? */ 224 jz done_copyout 225 226 /* 227 * Check explicitly for non-user addresses. If 486 write protection 228 * is being used, this check is essential because we are in kernel 229 * mode so the h/w does not provide any protection against writing 230 * kernel addresses. 231 */ 232 233 /* 234 * First, prevent address wrapping. 235 */ 236 movq %rsi,%rax 237 addq %rdx,%rax 238 jc copyout_fault 239/* 240 * XXX STOP USING VM_MAX_USER_ADDRESS. 241 * It is an end address, not a max, so every time it is used correctly it 242 * looks like there is an off by one error, and of course it caused an off 243 * by one error in several places. 244 */ 245 movq $VM_MAX_USER_ADDRESS,%rcx 246 cmpq %rcx,%rax 247 ja copyout_fault 248 249 xchgq %rdi,%rsi 250 /* bcopy(%rsi, %rdi, %rdx) */ 251 movq %rdx,%rcx 252 253 shrq $3,%rcx 254 cld 255 rep 256 movsq 257 movb %dl,%cl 258 andb $7,%cl 259 rep 260 movsb 261 262done_copyout: 263 xorl %eax,%eax 264 movq PCPU(curthread),%rdx 265 movq TD_PCB(%rdx), %rdx 266 movq %rax,PCB_ONFAULT(%rdx) 267 ret 268 269 ALIGN_TEXT 270copyout_fault: 271 movq PCPU(curthread),%rdx 272 movq TD_PCB(%rdx), %rdx 273 movq $0,PCB_ONFAULT(%rdx) 274 movq $EFAULT,%rax 275 ret 276 277/* 278 * copyin(from_user, to_kernel, len) - MP SAFE 279 * %rdi, %rsi, %rdx 280 */ 281ENTRY(copyin) 282 movq PCPU(curthread),%rax 283 movq TD_PCB(%rax), %rax 284 movq $copyin_fault,PCB_ONFAULT(%rax) 285 testq %rdx,%rdx /* anything to do? */ 286 jz done_copyin 287 288 /* 289 * make sure address is valid 290 */ 291 movq %rdi,%rax 292 addq %rdx,%rax 293 jc copyin_fault 294 movq $VM_MAX_USER_ADDRESS,%rcx 295 cmpq %rcx,%rax 296 ja copyin_fault 297 298 xchgq %rdi,%rsi 299 movq %rdx,%rcx 300 movb %cl,%al 301 shrq $3,%rcx /* copy longword-wise */ 302 cld 303 rep 304 movsq 305 movb %al,%cl 306 andb $7,%cl /* copy remaining bytes */ 307 rep 308 movsb 309 310done_copyin: 311 xorl %eax,%eax 312 movq PCPU(curthread),%rdx 313 movq TD_PCB(%rdx), %rdx 314 movq %rax,PCB_ONFAULT(%rdx) 315 ret 316 317 ALIGN_TEXT 318copyin_fault: 319 movq PCPU(curthread),%rdx 320 movq TD_PCB(%rdx), %rdx 321 movq $0,PCB_ONFAULT(%rdx) 322 movq $EFAULT,%rax 323 ret 324 325/* 326 * casuword32. Compare and set user integer. Returns -1 or the current value. 327 * dst = %rdi, old = %rsi, new = %rdx 328 */ 329ENTRY(casuword32) 330 movq PCPU(curthread),%rcx 331 movq TD_PCB(%rcx), %rcx 332 movq $fusufault,PCB_ONFAULT(%rcx) 333 334 movq $VM_MAX_USER_ADDRESS-4,%rax 335 cmpq %rax,%rdi /* verify address is valid */ 336 ja fusufault 337 338 movl %esi,%eax /* old */ 339#ifdef SMP 340 lock 341#endif 342 cmpxchgl %edx,(%rdi) /* new = %edx */ 343 344 /* 345 * The old value is in %eax. If the store succeeded it will be the 346 * value we expected (old) from before the store, otherwise it will 347 * be the current value. 348 */ 349 350 movq PCPU(curthread),%rcx 351 movq TD_PCB(%rcx), %rcx 352 movq $0,PCB_ONFAULT(%rcx) 353 ret 354 355/* 356 * casuword. Compare and set user word. Returns -1 or the current value. 357 * dst = %rdi, old = %rsi, new = %rdx 358 */ 359ENTRY(casuword) 360 movq PCPU(curthread),%rcx 361 movq TD_PCB(%rcx), %rcx 362 movq $fusufault,PCB_ONFAULT(%rcx) 363 364 movq $VM_MAX_USER_ADDRESS-4,%rax 365 cmpq %rax,%rdi /* verify address is valid */ 366 ja fusufault 367 368 movq %rsi,%rax /* old */ 369#ifdef SMP 370 lock 371#endif 372 cmpxchgq %rdx,(%rdi) /* new = %rdx */ 373 374 /* 375 * The old value is in %eax. If the store succeeded it will be the 376 * value we expected (old) from before the store, otherwise it will 377 * be the current value. 378 */ 379 380 movq PCPU(curthread),%rcx 381 movq TD_PCB(%rcx), %rcx 382 movq $fusufault,PCB_ONFAULT(%rcx) 383 movq $0,PCB_ONFAULT(%rcx) 384 ret 385 386/* 387 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit 388 * byte from user memory. All these functions are MPSAFE. 389 * addr = %rdi 390 */ 391 392ALTENTRY(fuword64) 393ENTRY(fuword) 394 movq PCPU(curthread),%rcx 395 movq TD_PCB(%rcx), %rcx 396 movq $fusufault,PCB_ONFAULT(%rcx) 397 398 movq $VM_MAX_USER_ADDRESS-8,%rax 399 cmpq %rax,%rdi /* verify address is valid */ 400 ja fusufault 401 402 movq (%rdi),%rax 403 movq $0,PCB_ONFAULT(%rcx) 404 ret 405 406ENTRY(fuword32) 407 movq PCPU(curthread),%rcx 408 movq TD_PCB(%rcx), %rcx 409 movq $fusufault,PCB_ONFAULT(%rcx) 410 411 movq $VM_MAX_USER_ADDRESS-4,%rax 412 cmpq %rax,%rdi /* verify address is valid */ 413 ja fusufault 414 415 movl (%rdi),%eax 416 movq $0,PCB_ONFAULT(%rcx) 417 ret 418 419/* 420 * fuswintr() and suswintr() are specialized variants of fuword16() and 421 * suword16(), respectively. They are called from the profiling code, 422 * potentially at interrupt time. If they fail, that's okay; good things 423 * will happen later. They always fail for now, until the trap code is 424 * able to deal with this. 425 */ 426ALTENTRY(suswintr) 427ENTRY(fuswintr) 428 movq $-1,%rax 429 ret 430 431ENTRY(fuword16) 432 movq PCPU(curthread),%rcx 433 movq TD_PCB(%rcx), %rcx 434 movq $fusufault,PCB_ONFAULT(%rcx) 435 436 movq $VM_MAX_USER_ADDRESS-2,%rax 437 cmpq %rax,%rdi 438 ja fusufault 439 440 movzwl (%rdi),%eax 441 movq $0,PCB_ONFAULT(%rcx) 442 ret 443 444ENTRY(fubyte) 445 movq PCPU(curthread),%rcx 446 movq TD_PCB(%rcx), %rcx 447 movq $fusufault,PCB_ONFAULT(%rcx) 448 449 movq $VM_MAX_USER_ADDRESS-1,%rax 450 cmpq %rax,%rdi 451 ja fusufault 452 453 movzbl (%rdi),%eax 454 movq $0,PCB_ONFAULT(%rcx) 455 ret 456 457 ALIGN_TEXT 458fusufault: 459 movq PCPU(curthread),%rcx 460 xorl %eax,%eax 461 movq TD_PCB(%rcx), %rcx 462 movq %rax,PCB_ONFAULT(%rcx) 463 decq %rax 464 ret 465 466/* 467 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to 468 * user memory. All these functions are MPSAFE. 469 * addr = %rdi, value = %rsi 470 */ 471ALTENTRY(suword64) 472ENTRY(suword) 473 movq PCPU(curthread),%rcx 474 movq TD_PCB(%rcx), %rcx 475 movq $fusufault,PCB_ONFAULT(%rcx) 476 477 movq $VM_MAX_USER_ADDRESS-8,%rax 478 cmpq %rax,%rdi /* verify address validity */ 479 ja fusufault 480 481 movq %rsi,(%rdi) 482 xorl %eax,%eax 483 movq PCPU(curthread),%rcx 484 movq TD_PCB(%rcx), %rcx 485 movq %rax,PCB_ONFAULT(%rcx) 486 ret 487 488ENTRY(suword32) 489 movq PCPU(curthread),%rcx 490 movq TD_PCB(%rcx), %rcx 491 movq $fusufault,PCB_ONFAULT(%rcx) 492 493 movq $VM_MAX_USER_ADDRESS-4,%rax 494 cmpq %rax,%rdi /* verify address validity */ 495 ja fusufault 496 497 movl %esi,(%rdi) 498 xorl %eax,%eax 499 movq PCPU(curthread),%rcx 500 movq TD_PCB(%rcx), %rcx 501 movq %rax,PCB_ONFAULT(%rcx) 502 ret 503 504ENTRY(suword16) 505 movq PCPU(curthread),%rcx 506 movq TD_PCB(%rcx), %rcx 507 movq $fusufault,PCB_ONFAULT(%rcx) 508 509 movq $VM_MAX_USER_ADDRESS-2,%rax 510 cmpq %rax,%rdi /* verify address validity */ 511 ja fusufault 512 513 movw %si,(%rdi) 514 xorl %eax,%eax 515 movq PCPU(curthread),%rcx /* restore trashed register */ 516 movq TD_PCB(%rcx), %rcx 517 movq %rax,PCB_ONFAULT(%rcx) 518 ret 519 520ENTRY(subyte) 521 movq PCPU(curthread),%rcx 522 movq TD_PCB(%rcx), %rcx 523 movq $fusufault,PCB_ONFAULT(%rcx) 524 525 movq $VM_MAX_USER_ADDRESS-1,%rax 526 cmpq %rax,%rdi /* verify address validity */ 527 ja fusufault 528 529 movl %esi,%eax 530 movb %al,(%rdi) 531 xorl %eax,%eax 532 movq PCPU(curthread),%rcx /* restore trashed register */ 533 movq TD_PCB(%rcx), %rcx 534 movq %rax,PCB_ONFAULT(%rcx) 535 ret 536 537/* 538 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE 539 * %rdi, %rsi, %rdx, %rcx 540 * 541 * copy a string from from to to, stop when a 0 character is reached. 542 * return ENAMETOOLONG if string is longer than maxlen, and 543 * EFAULT on protection violations. If lencopied is non-zero, 544 * return the actual length in *lencopied. 545 */ 546ENTRY(copyinstr) 547 movq %rdx,%r8 /* %r8 = maxlen */ 548 movq %rcx,%r9 /* %r9 = *len */ 549 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */ 550 movq PCPU(curthread),%rcx 551 movq TD_PCB(%rcx), %rcx 552 movq $cpystrflt,PCB_ONFAULT(%rcx) 553 554 movq $VM_MAX_USER_ADDRESS,%rax 555 556 /* make sure 'from' is within bounds */ 557 subq %rsi,%rax 558 jbe cpystrflt 559 560 /* restrict maxlen to <= VM_MAX_USER_ADDRESS-from */ 561 cmpq %rdx,%rax 562 jae 1f 563 movq %rax,%rdx 564 movq %rax,%r8 5651: 566 incq %rdx 567 cld 568 5692: 570 decq %rdx 571 jz 3f 572 573 lodsb 574 stosb 575 orb %al,%al 576 jnz 2b 577 578 /* Success -- 0 byte reached */ 579 decq %rdx 580 xorl %eax,%eax 581 jmp cpystrflt_x 5823: 583 /* rdx is zero - return ENAMETOOLONG or EFAULT */ 584 movq $VM_MAX_USER_ADDRESS,%rax 585 cmpq %rax,%rsi 586 jae cpystrflt 5874: 588 movq $ENAMETOOLONG,%rax 589 jmp cpystrflt_x 590 591cpystrflt: 592 movq $EFAULT,%rax 593 594cpystrflt_x: 595 /* set *lencopied and return %eax */ 596 movq PCPU(curthread),%rcx 597 movq TD_PCB(%rcx), %rcx 598 movq $0,PCB_ONFAULT(%rcx) 599 600 testq %r9,%r9 601 jz 1f 602 subq %rdx,%r8 603 movq %r8,(%r9) 6041: 605 ret 606 607 608/* 609 * copystr(from, to, maxlen, int *lencopied) - MP SAFE 610 * %rdi, %rsi, %rdx, %rcx 611 */ 612ENTRY(copystr) 613 movq %rdx,%r8 /* %r8 = maxlen */ 614 615 xchgq %rdi,%rsi 616 incq %rdx 617 cld 6181: 619 decq %rdx 620 jz 4f 621 lodsb 622 stosb 623 orb %al,%al 624 jnz 1b 625 626 /* Success -- 0 byte reached */ 627 decq %rdx 628 xorl %eax,%eax 629 jmp 6f 6304: 631 /* rdx is zero -- return ENAMETOOLONG */ 632 movq $ENAMETOOLONG,%rax 633 6346: 635 636 testq %rcx,%rcx 637 jz 7f 638 /* set *lencopied and return %rax */ 639 subq %rdx,%r8 640 movq %r8,(%rcx) 6417: 642 ret 643 644/* 645 * Handling of special x86_64 registers and descriptor tables etc 646 * %rdi 647 */ 648/* void lgdt(struct region_descriptor *rdp); */ 649ENTRY(lgdt) 650 /* reload the descriptor table */ 651 lgdt (%rdi) 652 653 /* flush the prefetch q */ 654 jmp 1f 655 nop 6561: 657 movl $KDSEL,%eax 658 movl %eax,%ds 659 movl %eax,%es 660 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */ 661 movl %eax,%gs /* Beware, use wrmsr to set 64 bit base */ 662 movl %eax,%ss 663 664 /* reload code selector by turning return into intersegmental return */ 665 popq %rax 666 pushq $KCSEL 667 pushq %rax 668 MEXITCOUNT 669 lretq 670 671/*****************************************************************************/ 672/* setjump, longjump */ 673/*****************************************************************************/ 674 675ENTRY(setjmp) 676 movq %rbx,0(%rdi) /* save rbx */ 677 movq %rsp,8(%rdi) /* save rsp */ 678 movq %rbp,16(%rdi) /* save rbp */ 679 movq %r12,24(%rdi) /* save r12 */ 680 movq %r13,32(%rdi) /* save r13 */ 681 movq %r14,40(%rdi) /* save r14 */ 682 movq %r15,48(%rdi) /* save r15 */ 683 movq 0(%rsp),%rdx /* get rta */ 684 movq %rdx,56(%rdi) /* save rip */ 685 xorl %eax,%eax /* return(0); */ 686 ret 687 688ENTRY(longjmp) 689 movq 0(%rdi),%rbx /* restore rbx */ 690 movq 8(%rdi),%rsp /* restore rsp */ 691 movq 16(%rdi),%rbp /* restore rbp */ 692 movq 24(%rdi),%r12 /* restore r12 */ 693 movq 32(%rdi),%r13 /* restore r13 */ 694 movq 40(%rdi),%r14 /* restore r14 */ 695 movq 48(%rdi),%r15 /* restore r15 */ 696 movq 56(%rdi),%rdx /* get rta */ 697 movq %rdx,0(%rsp) /* put in return frame */ 698 xorl %eax,%eax /* return(1); */ 699 incl %eax 700 ret 701 702/* 703 * Support for reading MSRs in the safe manner. 704 */ 705ENTRY(rdmsr_safe) 706/* int rdmsr_safe(u_int msr, uint64_t *data) */ 707 movq PCPU(curthread),%r8 708 movq TD_PCB(%r8), %r8 709 movq $msr_onfault,PCB_ONFAULT(%r8) 710 movl %edi,%ecx 711 rdmsr /* Read MSR pointed by %ecx. Returns 712 hi byte in edx, lo in %eax */ 713 salq $32,%rdx /* sign-shift %rdx left */ 714 movl %eax,%eax /* zero-extend %eax -> %rax */ 715 orq %rdx,%rax 716 movq %rax,(%rsi) 717 xorq %rax,%rax 718 movq %rax,PCB_ONFAULT(%r8) 719 ret 720 721/* 722 * MSR operations fault handler 723 */ 724 ALIGN_TEXT 725msr_onfault: 726 movq PCPU(curthread),%r8 727 movq TD_PCB(%r8), %r8 728 movq $0,PCB_ONFAULT(%r8) 729 movl $EFAULT,%eax 730 ret 731 732/* 733 * Support for BB-profiling (gcc -a). The kernbb program will extract 734 * the data from the kernel. 735 */ 736 737 .data 738 ALIGN_DATA 739 .globl bbhead 740bbhead: 741 .quad 0 742 743 .text 744NON_GPROF_ENTRY(__bb_init_func) 745 movq $1,(%rdi) 746 movq bbhead,%rax 747 movq %rax,32(%rdi) 748 movq %rdi,bbhead 749 NON_GPROF_RET 750