1# x86/x86_64 support for -fsplit-stack. 2# Copyright (C) 2009-2014 Free Software Foundation, Inc. 3# Contributed by Ian Lance Taylor <iant@google.com>. 4 5# This file is part of GCC. 6 7# GCC is free software; you can redistribute it and/or modify it under 8# the terms of the GNU General Public License as published by the Free 9# Software Foundation; either version 3, or (at your option) any later 10# version. 11 12# GCC is distributed in the hope that it will be useful, but WITHOUT ANY 13# WARRANTY; without even the implied warranty of MERCHANTABILITY or 14# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15# for more details. 16 17# Under Section 7 of GPL version 3, you are granted additional 18# permissions described in the GCC Runtime Library Exception, version 19# 3.1, as published by the Free Software Foundation. 20 21# You should have received a copy of the GNU General Public License and 22# a copy of the GCC Runtime Library Exception along with this program; 23# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 24# <http://www.gnu.org/licenses/>. 25 26 27# Support for allocating more stack space when using -fsplit-stack. 28# When a function discovers that it needs more stack space, it will 29# call __morestack with the size of the stack frame and the size of 30# the parameters to copy from the old stack frame to the new one. 31# The __morestack function preserves the parameter registers and 32# calls __generic_morestack to actually allocate the stack space. 33 34# When this is called stack space is very low, but we ensure that 35# there is enough space to push the parameter registers and to call 36# __generic_morestack. 37 38# When calling __generic_morestack, FRAME_SIZE points to the size of 39# the desired frame when the function is called, and the function 40# sets it to the size of the allocated stack. OLD_STACK points to 41# the parameters on the old stack and PARAM_SIZE is the number of 42# bytes of parameters to copy to the new stack. These are the 43# parameters of the function that called __morestack. The 44# __generic_morestack function returns the new stack pointer, 45# pointing to the address of the first copied parameter. The return 46# value minus the returned *FRAME_SIZE will be the first address on 47# the stack which we should not use. 48 49# void *__generic_morestack (size_t *frame_size, void *old_stack, 50# size_t param_size); 51 52# The __morestack routine has to arrange for the caller to return to a 53# stub on the new stack. The stub is responsible for restoring the 54# old stack pointer and returning to the caller's caller. This calls 55# __generic_releasestack to retrieve the old stack pointer and release 56# the newly allocated stack. 57 58# void *__generic_releasestack (size_t *available); 59 60# We do a little dance so that the processor's call/return return 61# address prediction works out. The compiler arranges for the caller 62# to look like this: 63# call __generic_morestack 64# ret 65# L: 66# // carry on with function 67# After we allocate more stack, we call L, which is in our caller. 68# When that returns (to the predicted instruction), we release the 69# stack segment and reset the stack pointer. We then return to the 70# predicted instruction, namely the ret instruction immediately after 71# the call to __generic_morestack. That then returns to the caller of 72# the original caller. 73 74 75# The amount of extra space we ask for. In general this has to be 76# enough for the dynamic loader to find a symbol and for a signal 77# handler to run. 78 79#ifndef __x86_64__ 80#define BACKOFF (1024) 81#else 82#define BACKOFF (1536) 83#endif 84 85 86# The amount of space we ask for when calling non-split-stack code. 87#define NON_SPLIT_STACK 0x100000 88 89# This entry point is for split-stack code which calls non-split-stack 90# code. When the linker sees this case, it converts the call to 91# __morestack to call __morestack_non_split instead. We just bump the 92# requested stack space by 16K. 93 94 .global __morestack_non_split 95 .hidden __morestack_non_split 96 97#ifdef __ELF__ 98 .type __morestack_non_split,@function 99#endif 100 101__morestack_non_split: 102 .cfi_startproc 103 104#ifndef __x86_64__ 105 106 # See below for an extended explanation of this. 107 .cfi_def_cfa %esp,16 108 109 pushl %eax # Save %eax in case it is a parameter. 110 111 .cfi_adjust_cfa_offset 4 # Account for pushed register. 112 113 movl %esp,%eax # Current stack, 114 subl 8(%esp),%eax # less required stack frame size, 115 subl $NON_SPLIT_STACK,%eax # less space for non-split code. 116 cmpl %gs:0x30,%eax # See if we have enough space. 117 jb 2f # Get more space if we need it. 118 119 # Here the stack is 120 # %esp + 20: stack pointer after two returns 121 # %esp + 16: return address of morestack caller's caller 122 # %esp + 12: size of parameters 123 # %esp + 8: new stack frame size 124 # %esp + 4: return address of this function 125 # %esp: saved %eax 126 # 127 # Since we aren't doing a full split stack, we don't need to 128 # do anything when our caller returns. So we return to our 129 # caller rather than calling it, and let it return as usual. 130 # To make that work we adjust the return address. 131 132 # This breaks call/return address prediction for the call to 133 # this function. I can't figure out a way to make it work 134 # short of copying the parameters down the stack, which will 135 # probably take more clock cycles than we will lose breaking 136 # call/return address prediction. We will only break 137 # prediction for this call, not for our caller. 138 139 movl 4(%esp),%eax # Increment the return address 140 cmpb $0xc3,(%eax) # to skip the ret instruction; 141 je 1f # see above. 142 addl $2,%eax 1431: inc %eax 144 145 # If the instruction that we return to is 146 # leal 20(%ebp),{%eax,%ecx,%edx} 147 # then we have been called by a varargs function that expects 148 # %ebp to hold a real value. That can only work if we do the 149 # full stack split routine. FIXME: This is fragile. 150 cmpb $0x8d,(%eax) 151 jne 3f 152 cmpb $0x14,2(%eax) 153 jne 3f 154 cmpb $0x45,1(%eax) 155 je 2f 156 cmpb $0x4d,1(%eax) 157 je 2f 158 cmpb $0x55,1(%eax) 159 je 2f 160 1613: 162 movl %eax,4(%esp) # Update return address. 163 164 popl %eax # Restore %eax and stack. 165 166 .cfi_adjust_cfa_offset -4 # Account for popped register. 167 168 ret $8 # Return to caller, popping args. 169 1702: 171 .cfi_adjust_cfa_offset 4 # Back to where we were. 172 173 popl %eax # Restore %eax and stack. 174 175 .cfi_adjust_cfa_offset -4 # Account for popped register. 176 177 # Increment space we request. 178 addl $NON_SPLIT_STACK+0x1000+BACKOFF,4(%esp) 179 180 # Fall through into morestack. 181 182#else 183 184 # See below for an extended explanation of this. 185 .cfi_def_cfa %rsp,16 186 187 pushq %rax # Save %rax in case caller is using 188 # it to preserve original %r10. 189 .cfi_adjust_cfa_offset 8 # Adjust for pushed register. 190 191 movq %rsp,%rax # Current stack, 192 subq %r10,%rax # less required stack frame size, 193 subq $NON_SPLIT_STACK,%rax # less space for non-split code. 194 195#ifdef __LP64__ 196 cmpq %fs:0x70,%rax # See if we have enough space. 197#else 198 cmpl %fs:0x40,%eax 199#endif 200 201 jb 2f # Get more space if we need it. 202 203 # If the instruction that we return to is 204 # leaq 24(%rbp), %r11n 205 # then we have been called by a varargs function that expects 206 # %ebp to hold a real value. That can only work if we do the 207 # full stack split routine. FIXME: This is fragile. 208 movq 8(%rsp),%rax 209 incq %rax # Skip ret instruction in caller. 210 cmpl $0x185d8d4c,(%rax) 211 je 2f 212 213 # This breaks call/return prediction, as described above. 214 incq 8(%rsp) # Increment the return address. 215 216 popq %rax # Restore register. 217 218 .cfi_adjust_cfa_offset -8 # Adjust for popped register. 219 220 ret # Return to caller. 221 2222: 223 popq %rax # Restore register. 224 225 .cfi_adjust_cfa_offset -8 # Adjust for popped register. 226 227 # Increment space we request. 228 addq $NON_SPLIT_STACK+0x1000+BACKOFF,%r10 229 230 # Fall through into morestack. 231 232#endif 233 234 .cfi_endproc 235#ifdef __ELF__ 236 .size __morestack_non_split, . - __morestack_non_split 237#endif 238 239# __morestack_non_split falls through into __morestack. 240 241 242# The __morestack function. 243 244 .global __morestack 245 .hidden __morestack 246 247#ifdef __ELF__ 248 .type __morestack,@function 249#endif 250 251__morestack: 252.LFB1: 253 .cfi_startproc 254 255 256#ifndef __x86_64__ 257 258 259# The 32-bit __morestack function. 260 261 # We use a cleanup to restore the stack guard if an exception 262 # is thrown through this code. 263#ifndef __PIC__ 264 .cfi_personality 0,__gcc_personality_v0 265 .cfi_lsda 0,.LLSDA1 266#else 267 .cfi_personality 0x9b,DW.ref.__gcc_personality_v0 268 .cfi_lsda 0x1b,.LLSDA1 269#endif 270 271 # We return below with a ret $8. We will return to a single 272 # return instruction, which will return to the caller of our 273 # caller. We let the unwinder skip that single return 274 # instruction, and just return to the real caller. 275 276 # Here CFA points just past the return address on the stack, 277 # e.g., on function entry it is %esp + 4. The stack looks 278 # like this: 279 # CFA + 12: stack pointer after two returns 280 # CFA + 8: return address of morestack caller's caller 281 # CFA + 4: size of parameters 282 # CFA: new stack frame size 283 # CFA - 4: return address of this function 284 # CFA - 8: previous value of %ebp; %ebp points here 285 # Setting the new CFA to be the current CFA + 12 (i.e., %esp + 286 # 16) will make the unwinder pick up the right return address. 287 288 .cfi_def_cfa %esp,16 289 290 pushl %ebp 291 .cfi_adjust_cfa_offset 4 292 .cfi_offset %ebp, -20 293 movl %esp,%ebp 294 .cfi_def_cfa_register %ebp 295 296 # In 32-bit mode the parameters are pushed on the stack. The 297 # argument size is pushed then the new stack frame size is 298 # pushed. 299 300 # In the body of a non-leaf function, the stack pointer will 301 # be aligned to a 16-byte boundary. That is CFA + 12 in the 302 # stack picture above: (CFA + 12) % 16 == 0. At this point we 303 # have %esp == CFA - 8, so %esp % 16 == 12. We need some 304 # space for saving registers and passing parameters, and we 305 # need to wind up with %esp % 16 == 0. 306 subl $44,%esp 307 308 # Because our cleanup code may need to clobber %ebx, we need 309 # to save it here so the unwinder can restore the value used 310 # by the caller. Note that we don't have to restore the 311 # register, since we don't change it, we just have to save it 312 # for the unwinder. 313 movl %ebx,-4(%ebp) 314 .cfi_offset %ebx, -24 315 316 # In 32-bit mode the registers %eax, %edx, and %ecx may be 317 # used for parameters, depending on the regparm and fastcall 318 # attributes. 319 320 movl %eax,-8(%ebp) 321 movl %edx,-12(%ebp) 322 movl %ecx,-16(%ebp) 323 324 call __morestack_block_signals 325 326 movl 12(%ebp),%eax # The size of the parameters. 327 movl %eax,8(%esp) 328 leal 20(%ebp),%eax # Address of caller's parameters. 329 movl %eax,4(%esp) 330 addl $BACKOFF,8(%ebp) # Ask for backoff bytes. 331 leal 8(%ebp),%eax # The address of the new frame size. 332 movl %eax,(%esp) 333 334 call __generic_morestack 335 336 movl %eax,%esp # Switch to the new stack. 337 subl 8(%ebp),%eax # The end of the stack space. 338 addl $BACKOFF,%eax # Back off 512 bytes. 339 340.LEHB0: 341 # FIXME: The offset must match 342 # TARGET_THREAD_SPLIT_STACK_OFFSET in 343 # gcc/config/i386/linux.h. 344 movl %eax,%gs:0x30 # Save the new stack boundary. 345 346 call __morestack_unblock_signals 347 348 movl -12(%ebp),%edx # Restore registers. 349 movl -16(%ebp),%ecx 350 351 movl 4(%ebp),%eax # Increment the return address 352 cmpb $0xc3,(%eax) # to skip the ret instruction; 353 je 1f # see above. 354 addl $2,%eax 3551: inc %eax 356 357 movl %eax,-12(%ebp) # Store return address in an 358 # unused slot. 359 360 movl -8(%ebp),%eax # Restore the last register. 361 362 call *-12(%ebp) # Call our caller! 363 364 # The caller will return here, as predicted. 365 366 # Save the registers which may hold a return value. We 367 # assume that __generic_releasestack does not touch any 368 # floating point or vector registers. 369 pushl %eax 370 pushl %edx 371 372 # Push the arguments to __generic_releasestack now so that the 373 # stack is at a 16-byte boundary for 374 # __morestack_block_signals. 375 pushl $0 # Where the available space is returned. 376 leal 0(%esp),%eax # Push its address. 377 push %eax 378 379 call __morestack_block_signals 380 381 call __generic_releasestack 382 383 subl 4(%esp),%eax # Subtract available space. 384 addl $BACKOFF,%eax # Back off 512 bytes. 385.LEHE0: 386 movl %eax,%gs:0x30 # Save the new stack boundary. 387 388 addl $8,%esp # Remove values from stack. 389 390 # We need to restore the old stack pointer, which is in %rbp, 391 # before we unblock signals. We also need to restore %eax and 392 # %edx after we unblock signals but before we return. Do this 393 # by moving %eax and %edx from the current stack to the old 394 # stack. 395 396 popl %edx # Pop return value from current stack. 397 popl %eax 398 399 movl %ebp,%esp # Restore stack pointer. 400 401 # As before, we now have %esp % 16 == 12. 402 403 pushl %eax # Push return value on old stack. 404 pushl %edx 405 subl $4,%esp # Align stack to 16-byte boundary. 406 407 call __morestack_unblock_signals 408 409 addl $4,%esp 410 popl %edx # Restore return value. 411 popl %eax 412 413 .cfi_remember_state 414 415 # We never changed %ebx, so we don't have to actually restore it. 416 .cfi_restore %ebx 417 418 popl %ebp 419 .cfi_restore %ebp 420 .cfi_def_cfa %esp, 16 421 ret $8 # Return to caller, which will 422 # immediately return. Pop 423 # arguments as we go. 424 425# This is the cleanup code called by the stack unwinder when unwinding 426# through the code between .LEHB0 and .LEHE0 above. 427 428.L1: 429 .cfi_restore_state 430 subl $16,%esp # Maintain 16 byte alignment. 431 movl %eax,4(%esp) # Save exception header. 432 movl %ebp,(%esp) # Stack pointer after resume. 433 call __generic_findstack 434 movl %ebp,%ecx # Get the stack pointer. 435 subl %eax,%ecx # Subtract available space. 436 addl $BACKOFF,%ecx # Back off 512 bytes. 437 movl %ecx,%gs:0x30 # Save new stack boundary. 438 movl 4(%esp),%eax # Function argument. 439 movl %eax,(%esp) 440#ifdef __PIC__ 441 call __x86.get_pc_thunk.bx # %ebx may not be set up for us. 442 addl $_GLOBAL_OFFSET_TABLE_, %ebx 443 call _Unwind_Resume@PLT # Resume unwinding. 444#else 445 call _Unwind_Resume 446#endif 447 448#else /* defined(__x86_64__) */ 449 450 451# The 64-bit __morestack function. 452 453 # We use a cleanup to restore the stack guard if an exception 454 # is thrown through this code. 455#ifndef __PIC__ 456 .cfi_personality 0x3,__gcc_personality_v0 457 .cfi_lsda 0x3,.LLSDA1 458#else 459 .cfi_personality 0x9b,DW.ref.__gcc_personality_v0 460 .cfi_lsda 0x1b,.LLSDA1 461#endif 462 463 # We will return a single return instruction, which will 464 # return to the caller of our caller. Let the unwinder skip 465 # that single return instruction, and just return to the real 466 # caller. 467 .cfi_def_cfa %rsp,16 468 469 # Set up a normal backtrace. 470 pushq %rbp 471 .cfi_adjust_cfa_offset 8 472 .cfi_offset %rbp, -24 473 movq %rsp, %rbp 474 .cfi_def_cfa_register %rbp 475 476 # In 64-bit mode the new stack frame size is passed in r10 477 # and the argument size is passed in r11. 478 479 addq $BACKOFF,%r10 # Ask for backoff bytes. 480 pushq %r10 # Save new frame size. 481 482 # In 64-bit mode the registers %rdi, %rsi, %rdx, %rcx, %r8, 483 # and %r9 may be used for parameters. We also preserve %rax 484 # which the caller may use to hold %r10. 485 486 pushq %rax 487 pushq %rdi 488 pushq %rsi 489 pushq %rdx 490 pushq %rcx 491 pushq %r8 492 pushq %r9 493 494 pushq %r11 495 496 # We entered morestack with the stack pointer aligned to a 497 # 16-byte boundary (the call to morestack's caller used 8 498 # bytes, and the call to morestack used 8 bytes). We have now 499 # pushed 10 registers, so we are still aligned to a 16-byte 500 # boundary. 501 502 call __morestack_block_signals 503 504 leaq -8(%rbp),%rdi # Address of new frame size. 505 leaq 24(%rbp),%rsi # The caller's parameters. 506 popq %rdx # The size of the parameters. 507 508 subq $8,%rsp # Align stack. 509 510 call __generic_morestack 511 512 movq -8(%rbp),%r10 # Reload modified frame size 513 movq %rax,%rsp # Switch to the new stack. 514 subq %r10,%rax # The end of the stack space. 515 addq $BACKOFF,%rax # Back off 1024 bytes. 516 517.LEHB0: 518 # FIXME: The offset must match 519 # TARGET_THREAD_SPLIT_STACK_OFFSET in 520 # gcc/config/i386/linux64.h. 521 # Macro to save the new stack boundary. 522#ifdef __LP64__ 523#define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movq %r##reg,%fs:0x70 524#else 525#define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movl %e##reg,%fs:0x40 526#endif 527 X86_64_SAVE_NEW_STACK_BOUNDARY (ax) 528 529 call __morestack_unblock_signals 530 531 movq -24(%rbp),%rdi # Restore registers. 532 movq -32(%rbp),%rsi 533 movq -40(%rbp),%rdx 534 movq -48(%rbp),%rcx 535 movq -56(%rbp),%r8 536 movq -64(%rbp),%r9 537 538 movq 8(%rbp),%r10 # Increment the return address 539 incq %r10 # to skip the ret instruction; 540 # see above. 541 542 movq -16(%rbp),%rax # Restore caller's %rax. 543 544 call *%r10 # Call our caller! 545 546 # The caller will return here, as predicted. 547 548 # Save the registers which may hold a return value. We 549 # assume that __generic_releasestack does not touch any 550 # floating point or vector registers. 551 pushq %rax 552 pushq %rdx 553 554 call __morestack_block_signals 555 556 pushq $0 # For alignment. 557 pushq $0 # Where the available space is returned. 558 leaq 0(%rsp),%rdi # Pass its address. 559 560 call __generic_releasestack 561 562 subq 0(%rsp),%rax # Subtract available space. 563 addq $BACKOFF,%rax # Back off 1024 bytes. 564.LEHE0: 565 X86_64_SAVE_NEW_STACK_BOUNDARY (ax) 566 567 addq $16,%rsp # Remove values from stack. 568 569 # We need to restore the old stack pointer, which is in %rbp, 570 # before we unblock signals. We also need to restore %rax and 571 # %rdx after we unblock signals but before we return. Do this 572 # by moving %rax and %rdx from the current stack to the old 573 # stack. 574 575 popq %rdx # Pop return value from current stack. 576 popq %rax 577 578 movq %rbp,%rsp # Restore stack pointer. 579 580 # Now (%rsp & 16) == 8. 581 582 subq $8,%rsp # For alignment. 583 pushq %rax # Push return value on old stack. 584 pushq %rdx 585 586 call __morestack_unblock_signals 587 588 popq %rdx # Restore return value. 589 popq %rax 590 addq $8,%rsp 591 592 .cfi_remember_state 593 popq %rbp 594 .cfi_restore %rbp 595 .cfi_def_cfa %rsp, 16 596 ret # Return to caller, which will 597 # immediately return. 598 599# This is the cleanup code called by the stack unwinder when unwinding 600# through the code between .LEHB0 and .LEHE0 above. 601 602.L1: 603 .cfi_restore_state 604 subq $16,%rsp # Maintain 16 byte alignment. 605 movq %rax,(%rsp) # Save exception header. 606 movq %rbp,%rdi # Stack pointer after resume. 607 call __generic_findstack 608 movq %rbp,%rcx # Get the stack pointer. 609 subq %rax,%rcx # Subtract available space. 610 addq $BACKOFF,%rcx # Back off 1024 bytes. 611 X86_64_SAVE_NEW_STACK_BOUNDARY (cx) 612 movq (%rsp),%rdi # Restore exception data for call. 613#ifdef __PIC__ 614 call _Unwind_Resume@PLT # Resume unwinding. 615#else 616 call _Unwind_Resume # Resume unwinding. 617#endif 618 619#endif /* defined(__x86_64__) */ 620 621 .cfi_endproc 622#ifdef __ELF__ 623 .size __morestack, . - __morestack 624#endif 625 626#if !defined(__x86_64__) && defined(__PIC__) 627# Output the thunk to get PC into bx, since we use it above. 628 .section .text.__x86.get_pc_thunk.bx,"axG",@progbits,__x86.get_pc_thunk.bx,comdat 629 .globl __x86.get_pc_thunk.bx 630 .hidden __x86.get_pc_thunk.bx 631#ifdef __ELF__ 632 .type __x86.get_pc_thunk.bx, @function 633#endif 634__x86.get_pc_thunk.bx: 635 .cfi_startproc 636 movl (%esp), %ebx 637 ret 638 .cfi_endproc 639#ifdef __ELF__ 640 .size __x86.get_pc_thunk.bx, . - __x86.get_pc_thunk.bx 641#endif 642#endif 643 644# The exception table. This tells the personality routine to execute 645# the exception handler. 646 647 .section .gcc_except_table,"a",@progbits 648 .align 4 649.LLSDA1: 650 .byte 0xff # @LPStart format (omit) 651 .byte 0xff # @TType format (omit) 652 .byte 0x1 # call-site format (uleb128) 653 .uleb128 .LLSDACSE1-.LLSDACSB1 # Call-site table length 654.LLSDACSB1: 655 .uleb128 .LEHB0-.LFB1 # region 0 start 656 .uleb128 .LEHE0-.LEHB0 # length 657 .uleb128 .L1-.LFB1 # landing pad 658 .uleb128 0 # action 659.LLSDACSE1: 660 661 662 .global __gcc_personality_v0 663#ifdef __PIC__ 664 # Build a position independent reference to the basic 665 # personality function. 666 .hidden DW.ref.__gcc_personality_v0 667 .weak DW.ref.__gcc_personality_v0 668 .section .data.DW.ref.__gcc_personality_v0,"awG",@progbits,DW.ref.__gcc_personality_v0,comdat 669 .type DW.ref.__gcc_personality_v0, @object 670DW.ref.__gcc_personality_v0: 671#ifndef __LP64__ 672 .align 4 673 .size DW.ref.__gcc_personality_v0, 4 674 .long __gcc_personality_v0 675#else 676 .align 8 677 .size DW.ref.__gcc_personality_v0, 8 678 .quad __gcc_personality_v0 679#endif 680#endif 681 682#if defined __x86_64__ && defined __LP64__ 683 684# This entry point is used for the large model. With this entry point 685# the upper 32 bits of %r10 hold the argument size and the lower 32 686# bits hold the new stack frame size. There doesn't seem to be a way 687# to know in the assembler code that we are assembling for the large 688# model, and there doesn't seem to be a large model multilib anyhow. 689# If one is developed, then the non-PIC code is probably OK since we 690# will probably be close to the morestack code, but the PIC code 691# almost certainly needs to be changed. FIXME. 692 693 .text 694 .global __morestack_large_model 695 .hidden __morestack_large_model 696 697#ifdef __ELF__ 698 .type __morestack_large_model,@function 699#endif 700 701__morestack_large_model: 702 703 .cfi_startproc 704 705 movq %r10, %r11 706 andl $0xffffffff, %r10d 707 sarq $32, %r11 708 jmp __morestack 709 710 .cfi_endproc 711#ifdef __ELF__ 712 .size __morestack_large_model, . - __morestack_large_model 713#endif 714 715#endif /* __x86_64__ && __LP64__ */ 716 717# Initialize the stack test value when the program starts or when a 718# new thread starts. We don't know how large the main stack is, so we 719# guess conservatively. We might be able to use getrlimit here. 720 721 .text 722 .global __stack_split_initialize 723 .hidden __stack_split_initialize 724 725#ifdef __ELF__ 726 .type __stack_split_initialize, @function 727#endif 728 729__stack_split_initialize: 730 731#ifndef __x86_64__ 732 733 leal -16000(%esp),%eax # We should have at least 16K. 734 movl %eax,%gs:0x30 735 pushl $16000 736 pushl %esp 737#ifdef __PIC__ 738 call __generic_morestack_set_initial_sp@PLT 739#else 740 call __generic_morestack_set_initial_sp 741#endif 742 addl $8,%esp 743 ret 744 745#else /* defined(__x86_64__) */ 746 747 leaq -16000(%rsp),%rax # We should have at least 16K. 748 X86_64_SAVE_NEW_STACK_BOUNDARY (ax) 749 movq %rsp,%rdi 750 movq $16000,%rsi 751#ifdef __PIC__ 752 call __generic_morestack_set_initial_sp@PLT 753#else 754 call __generic_morestack_set_initial_sp 755#endif 756 ret 757 758#endif /* defined(__x86_64__) */ 759 760#ifdef __ELF__ 761 .size __stack_split_initialize, . - __stack_split_initialize 762#endif 763 764# Routines to get and set the guard, for __splitstack_getcontext, 765# __splitstack_setcontext, and __splitstack_makecontext. 766 767# void *__morestack_get_guard (void) returns the current stack guard. 768 .text 769 .global __morestack_get_guard 770 .hidden __morestack_get_guard 771 772#ifdef __ELF__ 773 .type __morestack_get_guard,@function 774#endif 775 776__morestack_get_guard: 777 778#ifndef __x86_64__ 779 movl %gs:0x30,%eax 780#else 781#ifdef __LP64__ 782 movq %fs:0x70,%rax 783#else 784 movl %fs:0x40,%eax 785#endif 786#endif 787 ret 788 789#ifdef __ELF__ 790 .size __morestack_get_guard, . - __morestack_get_guard 791#endif 792 793# void __morestack_set_guard (void *) sets the stack guard. 794 .global __morestack_set_guard 795 .hidden __morestack_set_guard 796 797#ifdef __ELF__ 798 .type __morestack_set_guard,@function 799#endif 800 801__morestack_set_guard: 802 803#ifndef __x86_64__ 804 movl 4(%esp),%eax 805 movl %eax,%gs:0x30 806#else 807 X86_64_SAVE_NEW_STACK_BOUNDARY (di) 808#endif 809 ret 810 811#ifdef __ELF__ 812 .size __morestack_set_guard, . - __morestack_set_guard 813#endif 814 815# void *__morestack_make_guard (void *, size_t) returns the stack 816# guard value for a stack. 817 .global __morestack_make_guard 818 .hidden __morestack_make_guard 819 820#ifdef __ELF__ 821 .type __morestack_make_guard,@function 822#endif 823 824__morestack_make_guard: 825 826#ifndef __x86_64__ 827 movl 4(%esp),%eax 828 subl 8(%esp),%eax 829 addl $BACKOFF,%eax 830#else 831 subq %rsi,%rdi 832 addq $BACKOFF,%rdi 833 movq %rdi,%rax 834#endif 835 ret 836 837#ifdef __ELF__ 838 .size __morestack_make_guard, . - __morestack_make_guard 839#endif 840 841# Make __stack_split_initialize a high priority constructor. FIXME: 842# This is ELF specific. 843 844 .section .ctors.65535,"aw",@progbits 845 846#ifndef __LP64__ 847 .align 4 848 .long __stack_split_initialize 849 .long __morestack_load_mmap 850#else 851 .align 8 852 .quad __stack_split_initialize 853 .quad __morestack_load_mmap 854#endif 855 856#ifdef __ELF__ 857 .section .note.GNU-stack,"",@progbits 858 .section .note.GNU-split-stack,"",@progbits 859 .section .note.GNU-no-split-stack,"",@progbits 860#endif 861