110d565efSmrg /* Library support for -fsplit-stack.  */
2*ec02198aSmrg /* Copyright (C) 2009-2020 Free Software Foundation, Inc.
310d565efSmrg    Contributed by Ian Lance Taylor <iant@google.com>.
410d565efSmrg 
510d565efSmrg This file is part of GCC.
610d565efSmrg 
710d565efSmrg GCC is free software; you can redistribute it and/or modify it under
810d565efSmrg the terms of the GNU General Public License as published by the Free
910d565efSmrg Software Foundation; either version 3, or (at your option) any later
1010d565efSmrg version.
1110d565efSmrg 
1210d565efSmrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
1310d565efSmrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
1410d565efSmrg FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
1510d565efSmrg for more details.
1610d565efSmrg 
1710d565efSmrg Under Section 7 of GPL version 3, you are granted additional
1810d565efSmrg permissions described in the GCC Runtime Library Exception, version
1910d565efSmrg 3.1, as published by the Free Software Foundation.
2010d565efSmrg 
2110d565efSmrg You should have received a copy of the GNU General Public License and
2210d565efSmrg a copy of the GCC Runtime Library Exception along with this program;
2310d565efSmrg see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
2410d565efSmrg <http://www.gnu.org/licenses/>.  */
2510d565efSmrg 
26*ec02198aSmrg #pragma GCC optimize ("no-isolate-erroneous-paths-dereference")
27*ec02198aSmrg 
2810d565efSmrg /* powerpc 32-bit not supported.  */
2910d565efSmrg #if !defined __powerpc__ || defined __powerpc64__
3010d565efSmrg 
3110d565efSmrg #include "tconfig.h"
3210d565efSmrg #include "tsystem.h"
3310d565efSmrg #include "coretypes.h"
3410d565efSmrg #include "tm.h"
3510d565efSmrg #include "libgcc_tm.h"
3610d565efSmrg 
3710d565efSmrg /* If inhibit_libc is defined, we cannot compile this file.  The
3810d565efSmrg    effect is that people will not be able to use -fsplit-stack.  That
3910d565efSmrg    is much better than failing the build particularly since people
4010d565efSmrg    will want to define inhibit_libc while building a compiler which
4110d565efSmrg    can build glibc.  */
4210d565efSmrg 
4310d565efSmrg #ifndef inhibit_libc
4410d565efSmrg 
4510d565efSmrg #include <assert.h>
4610d565efSmrg #include <errno.h>
4710d565efSmrg #include <signal.h>
4810d565efSmrg #include <stdlib.h>
4910d565efSmrg #include <string.h>
5010d565efSmrg #include <unistd.h>
5110d565efSmrg #include <sys/mman.h>
5210d565efSmrg #include <sys/uio.h>
5310d565efSmrg 
5410d565efSmrg #include "generic-morestack.h"
5510d565efSmrg 
56*ec02198aSmrg /* Some systems use LD_PRELOAD or similar tricks to add hooks to
57*ec02198aSmrg    mmap/munmap.  That breaks this code, because when we call mmap
58*ec02198aSmrg    there is enough stack space for the system call but there is not,
59*ec02198aSmrg    in general, enough stack space to run a hook.  Try to avoid the
60*ec02198aSmrg    problem by calling syscall directly.  We only do this on GNU/Linux
61*ec02198aSmrg    for now, but it should be easy to add support for more systems with
62*ec02198aSmrg    testing.  */
63*ec02198aSmrg 
64*ec02198aSmrg #if defined(__gnu_linux__)
65*ec02198aSmrg 
66*ec02198aSmrg #include <sys/syscall.h>
67*ec02198aSmrg 
68*ec02198aSmrg #if defined(SYS_mmap) || defined(SYS_mmap2)
69*ec02198aSmrg 
70*ec02198aSmrg #ifdef SYS_mmap2
71*ec02198aSmrg #define MORESTACK_MMAP SYS_mmap2
72*ec02198aSmrg #define MORESTACK_ADJUST_OFFSET(x) ((x) / 4096ULL)
73*ec02198aSmrg #else
74*ec02198aSmrg #define MORESTACK_MMAP SYS_mmap
75*ec02198aSmrg #define MORESTACK_ADJUST_OFFSET(x) (x)
76*ec02198aSmrg #endif
77*ec02198aSmrg 
78*ec02198aSmrg static void *
morestack_mmap(void * addr,size_t length,int prot,int flags,int fd,off_t offset)79*ec02198aSmrg morestack_mmap (void *addr, size_t length, int prot, int flags, int fd,
80*ec02198aSmrg 		off_t offset)
81*ec02198aSmrg {
82*ec02198aSmrg   offset = MORESTACK_ADJUST_OFFSET (offset);
83*ec02198aSmrg 
84*ec02198aSmrg #ifdef __s390__
85*ec02198aSmrg   long args[6] = { (long) addr, (long) length, (long) prot, (long) flags,
86*ec02198aSmrg 		   (long) fd, (long) offset };
87*ec02198aSmrg   return (void *) syscall (MORESTACK_MMAP, args);
88*ec02198aSmrg #else
89*ec02198aSmrg   return (void *) syscall (MORESTACK_MMAP, addr, length, prot, flags, fd,
90*ec02198aSmrg 			   offset);
91*ec02198aSmrg #endif
92*ec02198aSmrg }
93*ec02198aSmrg 
94*ec02198aSmrg #define mmap morestack_mmap
95*ec02198aSmrg 
96*ec02198aSmrg #endif /* defined(SYS_MMAP) || defined(SYS_mmap2) */
97*ec02198aSmrg 
98*ec02198aSmrg #if defined(SYS_munmap)
99*ec02198aSmrg 
100*ec02198aSmrg static int
morestack_munmap(void * addr,size_t length)101*ec02198aSmrg morestack_munmap (void * addr, size_t length)
102*ec02198aSmrg {
103*ec02198aSmrg   return (int) syscall (SYS_munmap, addr, length);
104*ec02198aSmrg }
105*ec02198aSmrg 
106*ec02198aSmrg #define munmap morestack_munmap
107*ec02198aSmrg 
108*ec02198aSmrg #endif /* defined(SYS_munmap) */
109*ec02198aSmrg 
110*ec02198aSmrg #endif /* defined(__gnu_linux__) */
111*ec02198aSmrg 
11210d565efSmrg typedef unsigned uintptr_type __attribute__ ((mode (pointer)));
11310d565efSmrg 
11410d565efSmrg /* This file contains subroutines that are used by code compiled with
11510d565efSmrg    -fsplit-stack.  */
11610d565efSmrg 
11710d565efSmrg /* Declare functions to avoid warnings--there is no header file for
11810d565efSmrg    these internal functions.  We give most of these functions the
11910d565efSmrg    flatten attribute in order to minimize their stack usage--here we
12010d565efSmrg    must minimize stack usage even at the cost of code size, and in
12110d565efSmrg    general inlining everything will do that.  */
12210d565efSmrg 
12310d565efSmrg extern void
12410d565efSmrg __generic_morestack_set_initial_sp (void *sp, size_t len)
12510d565efSmrg   __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
12610d565efSmrg 
12710d565efSmrg extern void *
12810d565efSmrg __generic_morestack (size_t *frame_size, void *old_stack, size_t param_size)
12910d565efSmrg   __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
13010d565efSmrg 
13110d565efSmrg extern void *
13210d565efSmrg __generic_releasestack (size_t *pavailable)
13310d565efSmrg   __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
13410d565efSmrg 
13510d565efSmrg extern void
13610d565efSmrg __morestack_block_signals (void)
13710d565efSmrg   __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
13810d565efSmrg 
13910d565efSmrg extern void
14010d565efSmrg __morestack_unblock_signals (void)
14110d565efSmrg   __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
14210d565efSmrg 
14310d565efSmrg extern size_t
14410d565efSmrg __generic_findstack (void *stack)
14510d565efSmrg   __attribute__ ((no_split_stack, flatten, visibility ("hidden")));
14610d565efSmrg 
14710d565efSmrg extern void
14810d565efSmrg __morestack_load_mmap (void)
14910d565efSmrg   __attribute__ ((no_split_stack, visibility ("hidden")));
15010d565efSmrg 
15110d565efSmrg extern void *
15210d565efSmrg __morestack_allocate_stack_space (size_t size)
15310d565efSmrg   __attribute__ ((visibility ("hidden")));
15410d565efSmrg 
15510d565efSmrg /* These are functions which -fsplit-stack code can call.  These are
15610d565efSmrg    not called by the compiler, and are not hidden.  FIXME: These
15710d565efSmrg    should be in some header file somewhere, somehow.  */
15810d565efSmrg 
15910d565efSmrg extern void *
16010d565efSmrg __splitstack_find (void *, void *, size_t *, void **, void **, void **)
16110d565efSmrg   __attribute__ ((visibility ("default")));
16210d565efSmrg 
16310d565efSmrg extern void
16410d565efSmrg __splitstack_block_signals (int *, int *)
16510d565efSmrg   __attribute__ ((visibility ("default")));
16610d565efSmrg 
16710d565efSmrg extern void
16810d565efSmrg __splitstack_getcontext (void *context[10])
16910d565efSmrg   __attribute__ ((no_split_stack, visibility ("default")));
17010d565efSmrg 
17110d565efSmrg extern void
17210d565efSmrg __splitstack_setcontext (void *context[10])
17310d565efSmrg   __attribute__ ((no_split_stack, visibility ("default")));
17410d565efSmrg 
17510d565efSmrg extern void *
17610d565efSmrg __splitstack_makecontext (size_t, void *context[10], size_t *)
17710d565efSmrg   __attribute__ ((visibility ("default")));
17810d565efSmrg 
17910d565efSmrg extern void *
18010d565efSmrg __splitstack_resetcontext (void *context[10], size_t *)
18110d565efSmrg   __attribute__ ((visibility ("default")));
18210d565efSmrg 
18310d565efSmrg extern void
18410d565efSmrg __splitstack_releasecontext (void *context[10])
18510d565efSmrg   __attribute__ ((visibility ("default")));
18610d565efSmrg 
18710d565efSmrg extern void
18810d565efSmrg __splitstack_block_signals_context (void *context[10], int *, int *)
18910d565efSmrg   __attribute__ ((visibility ("default")));
19010d565efSmrg 
19110d565efSmrg extern void *
19210d565efSmrg __splitstack_find_context (void *context[10], size_t *, void **, void **,
19310d565efSmrg 			   void **)
19410d565efSmrg   __attribute__ ((visibility ("default")));
19510d565efSmrg 
19610d565efSmrg /* These functions must be defined by the processor specific code.  */
19710d565efSmrg 
19810d565efSmrg extern void *__morestack_get_guard (void)
19910d565efSmrg   __attribute__ ((no_split_stack, visibility ("hidden")));
20010d565efSmrg 
20110d565efSmrg extern void __morestack_set_guard (void *)
20210d565efSmrg   __attribute__ ((no_split_stack, visibility ("hidden")));
20310d565efSmrg 
20410d565efSmrg extern void *__morestack_make_guard (void *, size_t)
20510d565efSmrg   __attribute__ ((no_split_stack, visibility ("hidden")));
20610d565efSmrg 
20710d565efSmrg /* When we allocate a stack segment we put this header at the
20810d565efSmrg    start.  */
20910d565efSmrg 
21010d565efSmrg struct stack_segment
21110d565efSmrg {
21210d565efSmrg   /* The previous stack segment--when a function running on this stack
21310d565efSmrg      segment returns, it will run on the previous one.  */
21410d565efSmrg   struct stack_segment *prev;
21510d565efSmrg   /* The next stack segment, if it has been allocated--when a function
21610d565efSmrg      is running on this stack segment, the next one is not being
21710d565efSmrg      used.  */
21810d565efSmrg   struct stack_segment *next;
21910d565efSmrg   /* The total size of this stack segment.  */
22010d565efSmrg   size_t size;
22110d565efSmrg   /* The stack address when this stack was created.  This is used when
22210d565efSmrg      popping the stack.  */
22310d565efSmrg   void *old_stack;
22410d565efSmrg   /* A list of memory blocks allocated by dynamic stack
22510d565efSmrg      allocation.  */
22610d565efSmrg   struct dynamic_allocation_blocks *dynamic_allocation;
22710d565efSmrg   /* A list of dynamic memory blocks no longer needed.  */
22810d565efSmrg   struct dynamic_allocation_blocks *free_dynamic_allocation;
22910d565efSmrg   /* An extra pointer in case we need some more information some
23010d565efSmrg      day.  */
23110d565efSmrg   void *extra;
23210d565efSmrg };
23310d565efSmrg 
23410d565efSmrg /* This structure holds the (approximate) initial stack pointer and
23510d565efSmrg    size for the system supplied stack for a thread.  This is set when
23610d565efSmrg    the thread is created.  We also store a sigset_t here to hold the
23710d565efSmrg    signal mask while splitting the stack, since we don't want to store
23810d565efSmrg    that on the stack.  */
23910d565efSmrg 
24010d565efSmrg struct initial_sp
24110d565efSmrg {
24210d565efSmrg   /* The initial stack pointer.  */
24310d565efSmrg   void *sp;
24410d565efSmrg   /* The stack length.  */
24510d565efSmrg   size_t len;
24610d565efSmrg   /* A signal mask, put here so that the thread can use it without
24710d565efSmrg      needing stack space.  */
24810d565efSmrg   sigset_t mask;
24910d565efSmrg   /* Non-zero if we should not block signals.  This is a reversed flag
25010d565efSmrg      so that the default zero value is the safe value.  The type is
25110d565efSmrg      uintptr_type because it replaced one of the void * pointers in
25210d565efSmrg      extra.  */
25310d565efSmrg   uintptr_type dont_block_signals;
25410d565efSmrg   /* Some extra space for later extensibility.  */
25510d565efSmrg   void *extra[4];
25610d565efSmrg };
25710d565efSmrg 
25810d565efSmrg /* A list of memory blocks allocated by dynamic stack allocation.
25910d565efSmrg    This is used for code that calls alloca or uses variably sized
26010d565efSmrg    arrays.  */
26110d565efSmrg 
26210d565efSmrg struct dynamic_allocation_blocks
26310d565efSmrg {
26410d565efSmrg   /* The next block in the list.  */
26510d565efSmrg   struct dynamic_allocation_blocks *next;
26610d565efSmrg   /* The size of the allocated memory.  */
26710d565efSmrg   size_t size;
26810d565efSmrg   /* The allocated memory.  */
26910d565efSmrg   void *block;
27010d565efSmrg };
27110d565efSmrg 
27210d565efSmrg /* These thread local global variables must be shared by all split
27310d565efSmrg    stack code across shared library boundaries.  Therefore, they have
27410d565efSmrg    default visibility.  They have extensibility fields if needed for
27510d565efSmrg    new versions.  If more radical changes are needed, new code can be
27610d565efSmrg    written using new variable names, while still using the existing
27710d565efSmrg    variables in a backward compatible manner.  Symbol versioning is
27810d565efSmrg    also used, although, since these variables are only referenced by
27910d565efSmrg    code in this file and generic-morestack-thread.c, it is likely that
28010d565efSmrg    simply using new names will suffice.  */
28110d565efSmrg 
28210d565efSmrg /* The first stack segment allocated for this thread.  */
28310d565efSmrg 
28410d565efSmrg __thread struct stack_segment *__morestack_segments
28510d565efSmrg   __attribute__ ((visibility ("default")));
28610d565efSmrg 
28710d565efSmrg /* The stack segment that we think we are currently using.  This will
28810d565efSmrg    be correct in normal usage, but will be incorrect if an exception
28910d565efSmrg    unwinds into a different stack segment or if longjmp jumps to a
29010d565efSmrg    different stack segment.  */
29110d565efSmrg 
29210d565efSmrg __thread struct stack_segment *__morestack_current_segment
29310d565efSmrg   __attribute__ ((visibility ("default")));
29410d565efSmrg 
29510d565efSmrg /* The initial stack pointer and size for this thread.  */
29610d565efSmrg 
29710d565efSmrg __thread struct initial_sp __morestack_initial_sp
29810d565efSmrg   __attribute__ ((visibility ("default")));
29910d565efSmrg 
30010d565efSmrg /* A static signal mask, to avoid taking up stack space.  */
30110d565efSmrg 
30210d565efSmrg static sigset_t __morestack_fullmask;
30310d565efSmrg 
304c7a68eb7Smrg /* Page size, as returned from getpagesize(). Set on startup. */
305c7a68eb7Smrg static unsigned int static_pagesize;
306c7a68eb7Smrg 
307c7a68eb7Smrg /* Set on startup to non-zero value if SPLIT_STACK_GUARD env var is set. */
308c7a68eb7Smrg static int use_guard_page;
309c7a68eb7Smrg 
31010d565efSmrg /* Convert an integer to a decimal string without using much stack
31110d565efSmrg    space.  Return a pointer to the part of the buffer to use.  We this
31210d565efSmrg    instead of sprintf because sprintf will require too much stack
31310d565efSmrg    space.  */
31410d565efSmrg 
31510d565efSmrg static char *
print_int(int val,char * buf,int buflen,size_t * print_len)31610d565efSmrg print_int (int val, char *buf, int buflen, size_t *print_len)
31710d565efSmrg {
31810d565efSmrg   int is_negative;
31910d565efSmrg   int i;
32010d565efSmrg   unsigned int uval;
32110d565efSmrg 
32210d565efSmrg   uval = (unsigned int) val;
32310d565efSmrg   if (val >= 0)
32410d565efSmrg     is_negative = 0;
32510d565efSmrg   else
32610d565efSmrg     {
32710d565efSmrg       is_negative = 1;
32810d565efSmrg       uval = - uval;
32910d565efSmrg     }
33010d565efSmrg 
33110d565efSmrg   i = buflen;
33210d565efSmrg   do
33310d565efSmrg     {
33410d565efSmrg       --i;
33510d565efSmrg       buf[i] = '0' + (uval % 10);
33610d565efSmrg       uval /= 10;
33710d565efSmrg     }
33810d565efSmrg   while (uval != 0 && i > 0);
33910d565efSmrg 
34010d565efSmrg   if (is_negative)
34110d565efSmrg     {
34210d565efSmrg       if (i > 0)
34310d565efSmrg 	--i;
34410d565efSmrg       buf[i] = '-';
34510d565efSmrg     }
34610d565efSmrg 
34710d565efSmrg   *print_len = buflen - i;
34810d565efSmrg   return buf + i;
34910d565efSmrg }
35010d565efSmrg 
35110d565efSmrg /* Print the string MSG/LEN, the errno number ERR, and a newline on
35210d565efSmrg    stderr.  Then crash.  */
35310d565efSmrg 
35410d565efSmrg void
35510d565efSmrg __morestack_fail (const char *, size_t, int) __attribute__ ((noreturn));
35610d565efSmrg 
35710d565efSmrg void
__morestack_fail(const char * msg,size_t len,int err)35810d565efSmrg __morestack_fail (const char *msg, size_t len, int err)
35910d565efSmrg {
36010d565efSmrg   char buf[24];
36110d565efSmrg   static const char nl[] = "\n";
36210d565efSmrg   struct iovec iov[3];
36310d565efSmrg   union { char *p; const char *cp; } const_cast;
36410d565efSmrg 
36510d565efSmrg   const_cast.cp = msg;
36610d565efSmrg   iov[0].iov_base = const_cast.p;
36710d565efSmrg   iov[0].iov_len = len;
36810d565efSmrg   /* We can't call strerror, because it may try to translate the error
36910d565efSmrg      message, and that would use too much stack space.  */
37010d565efSmrg   iov[1].iov_base = print_int (err, buf, sizeof buf, &iov[1].iov_len);
37110d565efSmrg   const_cast.cp = &nl[0];
37210d565efSmrg   iov[2].iov_base = const_cast.p;
37310d565efSmrg   iov[2].iov_len = sizeof nl - 1;
37410d565efSmrg   /* FIXME: On systems without writev we need to issue three write
37510d565efSmrg      calls, or punt on printing errno.  For now this is irrelevant
37610d565efSmrg      since stack splitting only works on GNU/Linux anyhow.  */
37710d565efSmrg   writev (2, iov, 3);
37810d565efSmrg   abort ();
37910d565efSmrg }
38010d565efSmrg 
38110d565efSmrg /* Allocate a new stack segment.  FRAME_SIZE is the required frame
38210d565efSmrg    size.  */
38310d565efSmrg 
38410d565efSmrg static struct stack_segment *
allocate_segment(size_t frame_size)38510d565efSmrg allocate_segment (size_t frame_size)
38610d565efSmrg {
38710d565efSmrg   unsigned int pagesize;
38810d565efSmrg   unsigned int overhead;
38910d565efSmrg   unsigned int allocate;
39010d565efSmrg   void *space;
39110d565efSmrg   struct stack_segment *pss;
39210d565efSmrg 
39310d565efSmrg   pagesize = static_pagesize;
39410d565efSmrg   overhead = sizeof (struct stack_segment);
39510d565efSmrg 
39610d565efSmrg   allocate = pagesize;
39710d565efSmrg   if (allocate < MINSIGSTKSZ)
39810d565efSmrg     allocate = ((MINSIGSTKSZ + overhead + pagesize - 1)
39910d565efSmrg 		& ~ (pagesize - 1));
40010d565efSmrg   if (allocate < frame_size)
40110d565efSmrg     allocate = ((frame_size + overhead + pagesize - 1)
40210d565efSmrg 		& ~ (pagesize - 1));
40310d565efSmrg 
40410d565efSmrg   if (use_guard_page)
40510d565efSmrg     allocate += pagesize;
40610d565efSmrg 
40710d565efSmrg   /* FIXME: If this binary requires an executable stack, then we need
40810d565efSmrg      to set PROT_EXEC.  Unfortunately figuring that out is complicated
40910d565efSmrg      and target dependent.  We would need to use dl_iterate_phdr to
41010d565efSmrg      see if there is any object which does not have a PT_GNU_STACK
41110d565efSmrg      phdr, though only for architectures which use that mechanism.  */
41210d565efSmrg   space = mmap (NULL, allocate, PROT_READ | PROT_WRITE,
41310d565efSmrg 		MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
41410d565efSmrg   if (space == MAP_FAILED)
41510d565efSmrg     {
41610d565efSmrg       static const char msg[] =
41710d565efSmrg 	"unable to allocate additional stack space: errno ";
41810d565efSmrg       __morestack_fail (msg, sizeof msg - 1, errno);
41910d565efSmrg     }
42010d565efSmrg 
42110d565efSmrg   if (use_guard_page)
42210d565efSmrg     {
42310d565efSmrg       void *guard;
42410d565efSmrg 
42510d565efSmrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
42610d565efSmrg       guard = space;
42710d565efSmrg       space = (char *) space + pagesize;
42810d565efSmrg #else
42910d565efSmrg       guard = space + allocate - pagesize;
43010d565efSmrg #endif
43110d565efSmrg 
43210d565efSmrg       mprotect (guard, pagesize, PROT_NONE);
43310d565efSmrg       allocate -= pagesize;
43410d565efSmrg     }
43510d565efSmrg 
43610d565efSmrg   pss = (struct stack_segment *) space;
43710d565efSmrg 
43810d565efSmrg   pss->prev = NULL;
43910d565efSmrg   pss->next = NULL;
44010d565efSmrg   pss->size = allocate - overhead;
44110d565efSmrg   pss->dynamic_allocation = NULL;
44210d565efSmrg   pss->free_dynamic_allocation = NULL;
44310d565efSmrg   pss->extra = NULL;
44410d565efSmrg 
44510d565efSmrg   return pss;
44610d565efSmrg }
44710d565efSmrg 
44810d565efSmrg /* Free a list of dynamic blocks.  */
44910d565efSmrg 
45010d565efSmrg static void
free_dynamic_blocks(struct dynamic_allocation_blocks * p)45110d565efSmrg free_dynamic_blocks (struct dynamic_allocation_blocks *p)
45210d565efSmrg {
45310d565efSmrg   while (p != NULL)
45410d565efSmrg     {
45510d565efSmrg       struct dynamic_allocation_blocks *next;
45610d565efSmrg 
45710d565efSmrg       next = p->next;
45810d565efSmrg       free (p->block);
45910d565efSmrg       free (p);
46010d565efSmrg       p = next;
46110d565efSmrg     }
46210d565efSmrg }
46310d565efSmrg 
46410d565efSmrg /* Merge two lists of dynamic blocks.  */
46510d565efSmrg 
46610d565efSmrg static struct dynamic_allocation_blocks *
merge_dynamic_blocks(struct dynamic_allocation_blocks * a,struct dynamic_allocation_blocks * b)46710d565efSmrg merge_dynamic_blocks (struct dynamic_allocation_blocks *a,
46810d565efSmrg 		      struct dynamic_allocation_blocks *b)
46910d565efSmrg {
47010d565efSmrg   struct dynamic_allocation_blocks **pp;
47110d565efSmrg 
47210d565efSmrg   if (a == NULL)
47310d565efSmrg     return b;
47410d565efSmrg   if (b == NULL)
47510d565efSmrg     return a;
47610d565efSmrg   for (pp = &a->next; *pp != NULL; pp = &(*pp)->next)
47710d565efSmrg     ;
47810d565efSmrg   *pp = b;
47910d565efSmrg   return a;
48010d565efSmrg }
48110d565efSmrg 
48210d565efSmrg /* Release stack segments.  If FREE_DYNAMIC is non-zero, we also free
48310d565efSmrg    any dynamic blocks.  Otherwise we return them.  */
48410d565efSmrg 
48510d565efSmrg struct dynamic_allocation_blocks *
__morestack_release_segments(struct stack_segment ** pp,int free_dynamic)48610d565efSmrg __morestack_release_segments (struct stack_segment **pp, int free_dynamic)
48710d565efSmrg {
48810d565efSmrg   struct dynamic_allocation_blocks *ret;
48910d565efSmrg   struct stack_segment *pss;
49010d565efSmrg 
49110d565efSmrg   ret = NULL;
49210d565efSmrg   pss = *pp;
49310d565efSmrg   while (pss != NULL)
49410d565efSmrg     {
49510d565efSmrg       struct stack_segment *next;
49610d565efSmrg       unsigned int allocate;
49710d565efSmrg 
49810d565efSmrg       next = pss->next;
49910d565efSmrg 
50010d565efSmrg       if (pss->dynamic_allocation != NULL
50110d565efSmrg 	  || pss->free_dynamic_allocation != NULL)
50210d565efSmrg 	{
50310d565efSmrg 	  if (free_dynamic)
50410d565efSmrg 	    {
50510d565efSmrg 	      free_dynamic_blocks (pss->dynamic_allocation);
50610d565efSmrg 	      free_dynamic_blocks (pss->free_dynamic_allocation);
50710d565efSmrg 	    }
50810d565efSmrg 	  else
50910d565efSmrg 	    {
51010d565efSmrg 	      ret = merge_dynamic_blocks (pss->dynamic_allocation, ret);
51110d565efSmrg 	      ret = merge_dynamic_blocks (pss->free_dynamic_allocation, ret);
51210d565efSmrg 	    }
51310d565efSmrg 	}
51410d565efSmrg 
51510d565efSmrg       allocate = pss->size + sizeof (struct stack_segment);
51610d565efSmrg       if (munmap (pss, allocate) < 0)
51710d565efSmrg 	{
51810d565efSmrg 	  static const char msg[] = "munmap of stack space failed: errno ";
51910d565efSmrg 	  __morestack_fail (msg, sizeof msg - 1, errno);
52010d565efSmrg 	}
52110d565efSmrg 
52210d565efSmrg       pss = next;
52310d565efSmrg     }
52410d565efSmrg   *pp = NULL;
52510d565efSmrg 
52610d565efSmrg   return ret;
52710d565efSmrg }
52810d565efSmrg 
52910d565efSmrg /* This function is called by a processor specific function to set the
53010d565efSmrg    initial stack pointer for a thread.  The operating system will
53110d565efSmrg    always create a stack for a thread.  Here we record a stack pointer
53210d565efSmrg    near the base of that stack.  The size argument lets the processor
53310d565efSmrg    specific code estimate how much stack space is available on this
53410d565efSmrg    initial stack.  */
53510d565efSmrg 
53610d565efSmrg void
__generic_morestack_set_initial_sp(void * sp,size_t len)53710d565efSmrg __generic_morestack_set_initial_sp (void *sp, size_t len)
53810d565efSmrg {
53910d565efSmrg   /* The stack pointer most likely starts on a page boundary.  Adjust
54010d565efSmrg      to the nearest 512 byte boundary.  It's not essential that we be
54110d565efSmrg      precise here; getting it wrong will just leave some stack space
54210d565efSmrg      unused.  */
54310d565efSmrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
54410d565efSmrg   sp = (void *) ((((__UINTPTR_TYPE__) sp + 511U) / 512U) * 512U);
54510d565efSmrg #else
54610d565efSmrg   sp = (void *) ((((__UINTPTR_TYPE__) sp - 511U) / 512U) * 512U);
54710d565efSmrg #endif
54810d565efSmrg 
54910d565efSmrg   __morestack_initial_sp.sp = sp;
55010d565efSmrg   __morestack_initial_sp.len = len;
55110d565efSmrg   sigemptyset (&__morestack_initial_sp.mask);
55210d565efSmrg 
55310d565efSmrg   sigfillset (&__morestack_fullmask);
55410d565efSmrg #if defined(__GLIBC__) && defined(__linux__)
55510d565efSmrg   /* In glibc, the first two real time signals are used by the NPTL
55610d565efSmrg      threading library.  By taking them out of the set of signals, we
55710d565efSmrg      avoiding copying the signal mask in pthread_sigmask.  More
55810d565efSmrg      importantly, pthread_sigmask uses less stack space on x86_64.  */
55910d565efSmrg   sigdelset (&__morestack_fullmask, __SIGRTMIN);
56010d565efSmrg   sigdelset (&__morestack_fullmask, __SIGRTMIN + 1);
56110d565efSmrg #endif
56210d565efSmrg }
56310d565efSmrg 
56410d565efSmrg /* This function is called by a processor specific function which is
56510d565efSmrg    run in the prologue when more stack is needed.  The processor
56610d565efSmrg    specific function handles the details of saving registers and
56710d565efSmrg    frobbing the actual stack pointer.  This function is responsible
56810d565efSmrg    for allocating a new stack segment and for copying a parameter
56910d565efSmrg    block from the old stack to the new one.  On function entry
57010d565efSmrg    *PFRAME_SIZE is the size of the required stack frame--the returned
57110d565efSmrg    stack must be at least this large.  On function exit *PFRAME_SIZE
57210d565efSmrg    is the amount of space remaining on the allocated stack.  OLD_STACK
57310d565efSmrg    points at the parameters the old stack (really the current one
57410d565efSmrg    while this function is running).  OLD_STACK is saved so that it can
57510d565efSmrg    be returned by a later call to __generic_releasestack.  PARAM_SIZE
57610d565efSmrg    is the size in bytes of parameters to copy to the new stack.  This
57710d565efSmrg    function returns a pointer to the new stack segment, pointing to
57810d565efSmrg    the memory after the parameters have been copied.  The returned
57910d565efSmrg    value minus the returned *PFRAME_SIZE (or plus if the stack grows
58010d565efSmrg    upward) is the first address on the stack which should not be used.
58110d565efSmrg 
58210d565efSmrg    This function is running on the old stack and has only a limited
58310d565efSmrg    amount of stack space available.  */
58410d565efSmrg 
58510d565efSmrg void *
__generic_morestack(size_t * pframe_size,void * old_stack,size_t param_size)58610d565efSmrg __generic_morestack (size_t *pframe_size, void *old_stack, size_t param_size)
58710d565efSmrg {
58810d565efSmrg   size_t frame_size = *pframe_size;
58910d565efSmrg   struct stack_segment *current;
59010d565efSmrg   struct stack_segment **pp;
59110d565efSmrg   struct dynamic_allocation_blocks *dynamic;
59210d565efSmrg   char *from;
59310d565efSmrg   char *to;
59410d565efSmrg   void *ret;
59510d565efSmrg   size_t i;
59610d565efSmrg   size_t aligned;
59710d565efSmrg 
59810d565efSmrg   current = __morestack_current_segment;
59910d565efSmrg 
60010d565efSmrg   pp = current != NULL ? &current->next : &__morestack_segments;
60110d565efSmrg   if (*pp != NULL && (*pp)->size < frame_size)
60210d565efSmrg     dynamic = __morestack_release_segments (pp, 0);
60310d565efSmrg   else
60410d565efSmrg     dynamic = NULL;
60510d565efSmrg   current = *pp;
60610d565efSmrg 
60710d565efSmrg   if (current == NULL)
60810d565efSmrg     {
60910d565efSmrg       current = allocate_segment (frame_size + param_size);
61010d565efSmrg       current->prev = __morestack_current_segment;
61110d565efSmrg       *pp = current;
61210d565efSmrg     }
61310d565efSmrg 
61410d565efSmrg   current->old_stack = old_stack;
61510d565efSmrg 
61610d565efSmrg   __morestack_current_segment = current;
61710d565efSmrg 
61810d565efSmrg   if (dynamic != NULL)
61910d565efSmrg     {
62010d565efSmrg       /* Move the free blocks onto our list.  We don't want to call
62110d565efSmrg 	 free here, as we are short on stack space.  */
62210d565efSmrg       current->free_dynamic_allocation =
62310d565efSmrg 	merge_dynamic_blocks (dynamic, current->free_dynamic_allocation);
62410d565efSmrg     }
62510d565efSmrg 
62610d565efSmrg   *pframe_size = current->size - param_size;
62710d565efSmrg 
62810d565efSmrg   /* Align the returned stack to a 32-byte boundary.  */
62910d565efSmrg   aligned = (param_size + 31) & ~ (size_t) 31;
63010d565efSmrg 
63110d565efSmrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
63210d565efSmrg   {
63310d565efSmrg     char *bottom = (char *) (current + 1) + current->size;
63410d565efSmrg     to = bottom - aligned;
63510d565efSmrg     ret = bottom - aligned;
63610d565efSmrg   }
63710d565efSmrg #else
63810d565efSmrg   to = current + 1;
63910d565efSmrg   to += aligned - param_size;
64010d565efSmrg   ret = (char *) (current + 1) + aligned;
64110d565efSmrg #endif
64210d565efSmrg 
64310d565efSmrg   /* We don't call memcpy to avoid worrying about the dynamic linker
64410d565efSmrg      trying to resolve it.  */
64510d565efSmrg   from = (char *) old_stack;
64610d565efSmrg   for (i = 0; i < param_size; i++)
64710d565efSmrg     *to++ = *from++;
64810d565efSmrg 
64910d565efSmrg   return ret;
65010d565efSmrg }
65110d565efSmrg 
65210d565efSmrg /* This function is called by a processor specific function when it is
65310d565efSmrg    ready to release a stack segment.  We don't actually release the
65410d565efSmrg    stack segment, we just move back to the previous one.  The current
65510d565efSmrg    stack segment will still be available if we need it in
65610d565efSmrg    __generic_morestack.  This returns a pointer to the new stack
65710d565efSmrg    segment to use, which is the one saved by a previous call to
65810d565efSmrg    __generic_morestack.  The processor specific function is then
65910d565efSmrg    responsible for actually updating the stack pointer.  This sets
66010d565efSmrg    *PAVAILABLE to the amount of stack space now available.  */
66110d565efSmrg 
66210d565efSmrg void *
__generic_releasestack(size_t * pavailable)66310d565efSmrg __generic_releasestack (size_t *pavailable)
66410d565efSmrg {
66510d565efSmrg   struct stack_segment *current;
66610d565efSmrg   void *old_stack;
66710d565efSmrg 
66810d565efSmrg   current = __morestack_current_segment;
66910d565efSmrg   old_stack = current->old_stack;
67010d565efSmrg   current = current->prev;
67110d565efSmrg   __morestack_current_segment = current;
67210d565efSmrg 
67310d565efSmrg   if (current != NULL)
67410d565efSmrg     {
67510d565efSmrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
67610d565efSmrg       *pavailable = (char *) old_stack - (char *) (current + 1);
67710d565efSmrg #else
67810d565efSmrg       *pavailable = (char *) (current + 1) + current->size - (char *) old_stack;
67910d565efSmrg #endif
68010d565efSmrg     }
68110d565efSmrg   else
68210d565efSmrg     {
68310d565efSmrg       size_t used;
68410d565efSmrg 
68510d565efSmrg       /* We have popped back to the original stack.  */
68610d565efSmrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
68710d565efSmrg       if ((char *) old_stack >= (char *) __morestack_initial_sp.sp)
68810d565efSmrg 	used = 0;
68910d565efSmrg       else
69010d565efSmrg 	used = (char *) __morestack_initial_sp.sp - (char *) old_stack;
69110d565efSmrg #else
69210d565efSmrg       if ((char *) old_stack <= (char *) __morestack_initial_sp.sp)
69310d565efSmrg 	used = 0;
69410d565efSmrg       else
69510d565efSmrg 	used = (char *) old_stack - (char *) __morestack_initial_sp.sp;
69610d565efSmrg #endif
69710d565efSmrg 
69810d565efSmrg       if (used > __morestack_initial_sp.len)
69910d565efSmrg 	*pavailable = 0;
70010d565efSmrg       else
70110d565efSmrg 	*pavailable = __morestack_initial_sp.len - used;
70210d565efSmrg     }
70310d565efSmrg 
70410d565efSmrg   return old_stack;
70510d565efSmrg }
70610d565efSmrg 
70710d565efSmrg /* Block signals while splitting the stack.  This avoids trouble if we
70810d565efSmrg    try to invoke a signal handler which itself wants to split the
70910d565efSmrg    stack.  */
71010d565efSmrg 
71110d565efSmrg extern int pthread_sigmask (int, const sigset_t *, sigset_t *)
71210d565efSmrg   __attribute__ ((weak));
71310d565efSmrg 
71410d565efSmrg void
__morestack_block_signals(void)71510d565efSmrg __morestack_block_signals (void)
71610d565efSmrg {
71710d565efSmrg   if (__morestack_initial_sp.dont_block_signals)
71810d565efSmrg     ;
71910d565efSmrg   else if (pthread_sigmask)
72010d565efSmrg     pthread_sigmask (SIG_BLOCK, &__morestack_fullmask,
72110d565efSmrg 		     &__morestack_initial_sp.mask);
72210d565efSmrg   else
72310d565efSmrg     sigprocmask (SIG_BLOCK, &__morestack_fullmask,
72410d565efSmrg 		 &__morestack_initial_sp.mask);
72510d565efSmrg }
72610d565efSmrg 
72710d565efSmrg /* Unblock signals while splitting the stack.  */
72810d565efSmrg 
72910d565efSmrg void
__morestack_unblock_signals(void)73010d565efSmrg __morestack_unblock_signals (void)
73110d565efSmrg {
73210d565efSmrg   if (__morestack_initial_sp.dont_block_signals)
73310d565efSmrg     ;
73410d565efSmrg   else if (pthread_sigmask)
73510d565efSmrg     pthread_sigmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
73610d565efSmrg   else
73710d565efSmrg     sigprocmask (SIG_SETMASK, &__morestack_initial_sp.mask, NULL);
73810d565efSmrg }
73910d565efSmrg 
74010d565efSmrg /* This function is called to allocate dynamic stack space, for alloca
74110d565efSmrg    or a variably sized array.  This is a regular function with
74210d565efSmrg    sufficient stack space, so we just use malloc to allocate the
74310d565efSmrg    space.  We attach the allocated blocks to the current stack
74410d565efSmrg    segment, so that they will eventually be reused or freed.  */
74510d565efSmrg 
74610d565efSmrg void *
__morestack_allocate_stack_space(size_t size)74710d565efSmrg __morestack_allocate_stack_space (size_t size)
74810d565efSmrg {
74910d565efSmrg   struct stack_segment *seg, *current;
75010d565efSmrg   struct dynamic_allocation_blocks *p;
75110d565efSmrg 
75210d565efSmrg   /* We have to block signals to avoid getting confused if we get
75310d565efSmrg      interrupted by a signal whose handler itself uses alloca or a
75410d565efSmrg      variably sized array.  */
75510d565efSmrg   __morestack_block_signals ();
75610d565efSmrg 
75710d565efSmrg   /* Since we don't want to call free while we are low on stack space,
75810d565efSmrg      we may have a list of already allocated blocks waiting to be
75910d565efSmrg      freed.  Release them all, unless we find one that is large
76010d565efSmrg      enough.  We don't look at every block to see if one is large
76110d565efSmrg      enough, just the first one, because we aren't trying to build a
76210d565efSmrg      memory allocator here, we're just trying to speed up common
76310d565efSmrg      cases.  */
76410d565efSmrg 
76510d565efSmrg   current = __morestack_current_segment;
76610d565efSmrg   p = NULL;
76710d565efSmrg   for (seg = __morestack_segments; seg != NULL; seg = seg->next)
76810d565efSmrg     {
76910d565efSmrg       p = seg->free_dynamic_allocation;
77010d565efSmrg       if (p != NULL)
77110d565efSmrg 	{
77210d565efSmrg 	  if (p->size >= size)
77310d565efSmrg 	    {
77410d565efSmrg 	      seg->free_dynamic_allocation = p->next;
77510d565efSmrg 	      break;
77610d565efSmrg 	    }
77710d565efSmrg 
77810d565efSmrg 	  free_dynamic_blocks (p);
77910d565efSmrg 	  seg->free_dynamic_allocation = NULL;
78010d565efSmrg 	  p = NULL;
78110d565efSmrg 	}
78210d565efSmrg     }
78310d565efSmrg 
78410d565efSmrg   if (p == NULL)
78510d565efSmrg     {
78610d565efSmrg       /* We need to allocate additional memory.  */
78710d565efSmrg       p = malloc (sizeof (*p));
78810d565efSmrg       if (p == NULL)
78910d565efSmrg 	abort ();
79010d565efSmrg       p->size = size;
79110d565efSmrg       p->block = malloc (size);
79210d565efSmrg       if (p->block == NULL)
79310d565efSmrg 	abort ();
79410d565efSmrg     }
79510d565efSmrg 
79610d565efSmrg   /* If we are still on the initial stack, then we have a space leak.
79710d565efSmrg      FIXME.  */
79810d565efSmrg   if (current != NULL)
79910d565efSmrg     {
80010d565efSmrg       p->next = current->dynamic_allocation;
80110d565efSmrg       current->dynamic_allocation = p;
80210d565efSmrg     }
80310d565efSmrg 
80410d565efSmrg   __morestack_unblock_signals ();
80510d565efSmrg 
80610d565efSmrg   return p->block;
80710d565efSmrg }
80810d565efSmrg 
80910d565efSmrg /* Find the stack segment for STACK and return the amount of space
81010d565efSmrg    available.  This is used when unwinding the stack because of an
81110d565efSmrg    exception, in order to reset the stack guard correctly.  */
81210d565efSmrg 
81310d565efSmrg size_t
__generic_findstack(void * stack)81410d565efSmrg __generic_findstack (void *stack)
81510d565efSmrg {
81610d565efSmrg   struct stack_segment *pss;
81710d565efSmrg   size_t used;
81810d565efSmrg 
81910d565efSmrg   for (pss = __morestack_current_segment; pss != NULL; pss = pss->prev)
82010d565efSmrg     {
82110d565efSmrg       if ((char *) pss < (char *) stack
82210d565efSmrg 	  && (char *) pss + pss->size > (char *) stack)
82310d565efSmrg 	{
82410d565efSmrg 	  __morestack_current_segment = pss;
82510d565efSmrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
82610d565efSmrg 	  return (char *) stack - (char *) (pss + 1);
82710d565efSmrg #else
82810d565efSmrg 	  return (char *) (pss + 1) + pss->size - (char *) stack;
82910d565efSmrg #endif
83010d565efSmrg 	}
83110d565efSmrg     }
83210d565efSmrg 
83310d565efSmrg   /* We have popped back to the original stack.  */
83410d565efSmrg 
83510d565efSmrg   if (__morestack_initial_sp.sp == NULL)
83610d565efSmrg     return 0;
83710d565efSmrg 
83810d565efSmrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
83910d565efSmrg   if ((char *) stack >= (char *) __morestack_initial_sp.sp)
84010d565efSmrg     used = 0;
84110d565efSmrg   else
84210d565efSmrg     used = (char *) __morestack_initial_sp.sp - (char *) stack;
84310d565efSmrg #else
84410d565efSmrg   if ((char *) stack <= (char *) __morestack_initial_sp.sp)
84510d565efSmrg     used = 0;
84610d565efSmrg   else
84710d565efSmrg     used = (char *) stack - (char *) __morestack_initial_sp.sp;
84810d565efSmrg #endif
84910d565efSmrg 
85010d565efSmrg   if (used > __morestack_initial_sp.len)
85110d565efSmrg     return 0;
85210d565efSmrg   else
85310d565efSmrg     return __morestack_initial_sp.len - used;
85410d565efSmrg }
85510d565efSmrg 
85610d565efSmrg /* This function is called at program startup time to make sure that
85710d565efSmrg    mmap, munmap, and getpagesize are resolved if linking dynamically.
85810d565efSmrg    We want to resolve them while we have enough stack for them, rather
859c7a68eb7Smrg    than calling into the dynamic linker while low on stack space.
860c7a68eb7Smrg    Similarly, invoke getenv here to check for split-stack related control
861c7a68eb7Smrg    variables, since doing do as part of the __morestack path can result
862c7a68eb7Smrg    in unwanted use of SSE/AVX registers (see GCC PR 86213). */
86310d565efSmrg 
86410d565efSmrg void
__morestack_load_mmap(void)86510d565efSmrg __morestack_load_mmap (void)
86610d565efSmrg {
86710d565efSmrg   /* Call with bogus values to run faster.  We don't care if the call
86810d565efSmrg      fails.  Pass __MORESTACK_CURRENT_SEGMENT to make sure that any
86910d565efSmrg      TLS accessor function is resolved.  */
87010d565efSmrg   mmap (__morestack_current_segment, 0, PROT_READ, MAP_ANONYMOUS, -1, 0);
87110d565efSmrg   mprotect (NULL, 0, 0);
872c7a68eb7Smrg   munmap (0, static_pagesize);
873c7a68eb7Smrg 
874c7a68eb7Smrg   /* Initialize these values here, so as to avoid dynamic linker
875c7a68eb7Smrg      activity as part of a __morestack call. */
876c7a68eb7Smrg   static_pagesize = getpagesize();
877c7a68eb7Smrg   use_guard_page = getenv ("SPLIT_STACK_GUARD") != 0;
87810d565efSmrg }
87910d565efSmrg 
88010d565efSmrg /* This function may be used to iterate over the stack segments.
88110d565efSmrg    This can be called like this.
88210d565efSmrg      void *next_segment = NULL;
88310d565efSmrg      void *next_sp = NULL;
88410d565efSmrg      void *initial_sp = NULL;
88510d565efSmrg      void *stack;
88610d565efSmrg      size_t stack_size;
88710d565efSmrg      while ((stack = __splitstack_find (next_segment, next_sp, &stack_size,
88810d565efSmrg                                         &next_segment, &next_sp,
88910d565efSmrg 					&initial_sp)) != NULL)
89010d565efSmrg        {
89110d565efSmrg          // Stack segment starts at stack and is stack_size bytes long.
89210d565efSmrg        }
89310d565efSmrg 
89410d565efSmrg    There is no way to iterate over the stack segments of a different
89510d565efSmrg    thread.  However, what is permitted is for one thread to call this
89610d565efSmrg    with the first two values NULL, to pass next_segment, next_sp, and
89710d565efSmrg    initial_sp to a different thread, and then to suspend one way or
89810d565efSmrg    another.  A different thread may run the subsequent
89910d565efSmrg    __morestack_find iterations.  Of course, this will only work if the
90010d565efSmrg    first thread is suspended during the __morestack_find iterations.
90110d565efSmrg    If not, the second thread will be looking at the stack while it is
90210d565efSmrg    changing, and anything could happen.
90310d565efSmrg 
90410d565efSmrg    FIXME: This should be declared in some header file, but where?  */
90510d565efSmrg 
90610d565efSmrg void *
__splitstack_find(void * segment_arg,void * sp,size_t * len,void ** next_segment,void ** next_sp,void ** initial_sp)90710d565efSmrg __splitstack_find (void *segment_arg, void *sp, size_t *len,
90810d565efSmrg 		   void **next_segment, void **next_sp,
90910d565efSmrg 		   void **initial_sp)
91010d565efSmrg {
91110d565efSmrg   struct stack_segment *segment;
91210d565efSmrg   void *ret;
91310d565efSmrg   char *nsp;
91410d565efSmrg 
91510d565efSmrg   if (segment_arg == (void *) (uintptr_type) 1)
91610d565efSmrg     {
91710d565efSmrg       char *isp = (char *) *initial_sp;
91810d565efSmrg 
91910d565efSmrg       if (isp == NULL)
92010d565efSmrg 	return NULL;
92110d565efSmrg 
92210d565efSmrg       *next_segment = (void *) (uintptr_type) 2;
92310d565efSmrg       *next_sp = NULL;
92410d565efSmrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
92510d565efSmrg       if ((char *) sp >= isp)
92610d565efSmrg 	return NULL;
92710d565efSmrg       *len = (char *) isp - (char *) sp;
92810d565efSmrg       return sp;
92910d565efSmrg #else
93010d565efSmrg       if ((char *) sp <= (char *) isp)
93110d565efSmrg 	return NULL;
93210d565efSmrg       *len = (char *) sp - (char *) isp;
93310d565efSmrg       return (void *) isp;
93410d565efSmrg #endif
93510d565efSmrg     }
93610d565efSmrg   else if (segment_arg == (void *) (uintptr_type) 2)
93710d565efSmrg     return NULL;
93810d565efSmrg   else if (segment_arg != NULL)
93910d565efSmrg     segment = (struct stack_segment *) segment_arg;
94010d565efSmrg   else
94110d565efSmrg     {
94210d565efSmrg       *initial_sp = __morestack_initial_sp.sp;
94310d565efSmrg       segment = __morestack_current_segment;
94410d565efSmrg       sp = (void *) &segment;
94510d565efSmrg       while (1)
94610d565efSmrg 	{
94710d565efSmrg 	  if (segment == NULL)
94810d565efSmrg 	    return __splitstack_find ((void *) (uintptr_type) 1, sp, len,
94910d565efSmrg 				      next_segment, next_sp, initial_sp);
95010d565efSmrg 	  if ((char *) sp >= (char *) (segment + 1)
95110d565efSmrg 	      && (char *) sp <= (char *) (segment + 1) + segment->size)
95210d565efSmrg 	    break;
95310d565efSmrg 	  segment = segment->prev;
95410d565efSmrg 	}
95510d565efSmrg     }
95610d565efSmrg 
95710d565efSmrg   if (segment->prev == NULL)
95810d565efSmrg     *next_segment = (void *) (uintptr_type) 1;
95910d565efSmrg   else
96010d565efSmrg     *next_segment = segment->prev;
96110d565efSmrg 
96210d565efSmrg   /* The old_stack value is the address of the function parameters of
96310d565efSmrg      the function which called __morestack.  So if f1 called f2 which
96410d565efSmrg      called __morestack, the stack looks like this:
96510d565efSmrg 
96610d565efSmrg          parameters       <- old_stack
96710d565efSmrg          return in f1
96810d565efSmrg 	 return in f2
96910d565efSmrg 	 registers pushed by __morestack
97010d565efSmrg 
97110d565efSmrg      The registers pushed by __morestack may not be visible on any
97210d565efSmrg      other stack, if we are being called by a signal handler
97310d565efSmrg      immediately after the call to __morestack_unblock_signals.  We
97410d565efSmrg      want to adjust our return value to include those registers.  This
97510d565efSmrg      is target dependent.  */
97610d565efSmrg 
97710d565efSmrg   nsp = (char *) segment->old_stack;
97810d565efSmrg 
97910d565efSmrg   if (nsp == NULL)
98010d565efSmrg     {
98110d565efSmrg       /* We've reached the top of the stack.  */
98210d565efSmrg       *next_segment = (void *) (uintptr_type) 2;
98310d565efSmrg     }
98410d565efSmrg   else
98510d565efSmrg     {
98610d565efSmrg #if defined (__x86_64__)
98710d565efSmrg       nsp -= 12 * sizeof (void *);
98810d565efSmrg #elif defined (__i386__)
98910d565efSmrg       nsp -= 6 * sizeof (void *);
99010d565efSmrg #elif defined __powerpc64__
99110d565efSmrg #elif defined __s390x__
99210d565efSmrg       nsp -= 2 * 160;
99310d565efSmrg #elif defined __s390__
99410d565efSmrg       nsp -= 2 * 96;
99510d565efSmrg #else
99610d565efSmrg #error "unrecognized target"
99710d565efSmrg #endif
99810d565efSmrg 
99910d565efSmrg       *next_sp = (void *) nsp;
100010d565efSmrg     }
100110d565efSmrg 
100210d565efSmrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
100310d565efSmrg   *len = (char *) (segment + 1) + segment->size - (char *) sp;
100410d565efSmrg   ret = (void *) sp;
100510d565efSmrg #else
100610d565efSmrg   *len = (char *) sp - (char *) (segment + 1);
100710d565efSmrg   ret = (void *) (segment + 1);
100810d565efSmrg #endif
100910d565efSmrg 
101010d565efSmrg   return ret;
101110d565efSmrg }
101210d565efSmrg 
101310d565efSmrg /* Tell the split stack code whether it has to block signals while
101410d565efSmrg    manipulating the stack.  This is for programs in which some threads
101510d565efSmrg    block all signals.  If a thread already blocks signals, there is no
101610d565efSmrg    need for the split stack code to block them as well.  If NEW is not
101710d565efSmrg    NULL, then if *NEW is non-zero signals will be blocked while
101810d565efSmrg    splitting the stack, otherwise they will not.  If OLD is not NULL,
101910d565efSmrg    *OLD will be set to the old value.  */
102010d565efSmrg 
102110d565efSmrg void
__splitstack_block_signals(int * new,int * old)102210d565efSmrg __splitstack_block_signals (int *new, int *old)
102310d565efSmrg {
102410d565efSmrg   if (old != NULL)
102510d565efSmrg     *old = __morestack_initial_sp.dont_block_signals ? 0 : 1;
102610d565efSmrg   if (new != NULL)
102710d565efSmrg     __morestack_initial_sp.dont_block_signals = *new ? 0 : 1;
102810d565efSmrg }
102910d565efSmrg 
103010d565efSmrg /* The offsets into the arrays used by __splitstack_getcontext and
103110d565efSmrg    __splitstack_setcontext.  */
103210d565efSmrg 
103310d565efSmrg enum __splitstack_context_offsets
103410d565efSmrg {
103510d565efSmrg   MORESTACK_SEGMENTS = 0,
103610d565efSmrg   CURRENT_SEGMENT = 1,
103710d565efSmrg   CURRENT_STACK = 2,
103810d565efSmrg   STACK_GUARD = 3,
103910d565efSmrg   INITIAL_SP = 4,
104010d565efSmrg   INITIAL_SP_LEN = 5,
104110d565efSmrg   BLOCK_SIGNALS = 6,
104210d565efSmrg 
104310d565efSmrg   NUMBER_OFFSETS = 10
104410d565efSmrg };
104510d565efSmrg 
104610d565efSmrg /* Get the current split stack context.  This may be used for
104710d565efSmrg    coroutine switching, similar to getcontext.  The argument should
104810d565efSmrg    have at least 10 void *pointers for extensibility, although we
104910d565efSmrg    don't currently use all of them.  This would normally be called
105010d565efSmrg    immediately before a call to getcontext or swapcontext or
105110d565efSmrg    setjmp.  */
105210d565efSmrg 
105310d565efSmrg void
__splitstack_getcontext(void * context[NUMBER_OFFSETS])105410d565efSmrg __splitstack_getcontext (void *context[NUMBER_OFFSETS])
105510d565efSmrg {
105610d565efSmrg   memset (context, 0, NUMBER_OFFSETS * sizeof (void *));
105710d565efSmrg   context[MORESTACK_SEGMENTS] = (void *) __morestack_segments;
105810d565efSmrg   context[CURRENT_SEGMENT] = (void *) __morestack_current_segment;
105910d565efSmrg   context[CURRENT_STACK] = (void *) &context;
106010d565efSmrg   context[STACK_GUARD] = __morestack_get_guard ();
106110d565efSmrg   context[INITIAL_SP] = (void *) __morestack_initial_sp.sp;
106210d565efSmrg   context[INITIAL_SP_LEN] = (void *) (uintptr_type) __morestack_initial_sp.len;
106310d565efSmrg   context[BLOCK_SIGNALS] = (void *) __morestack_initial_sp.dont_block_signals;
106410d565efSmrg }
106510d565efSmrg 
106610d565efSmrg /* Set the current split stack context.  The argument should be a
106710d565efSmrg    context previously passed to __splitstack_getcontext.  This would
106810d565efSmrg    normally be called immediately after a call to getcontext or
106910d565efSmrg    swapcontext or setjmp if something jumped to it.  */
107010d565efSmrg 
107110d565efSmrg void
__splitstack_setcontext(void * context[NUMBER_OFFSETS])107210d565efSmrg __splitstack_setcontext (void *context[NUMBER_OFFSETS])
107310d565efSmrg {
107410d565efSmrg   __morestack_segments = (struct stack_segment *) context[MORESTACK_SEGMENTS];
107510d565efSmrg   __morestack_current_segment =
107610d565efSmrg     (struct stack_segment *) context[CURRENT_SEGMENT];
107710d565efSmrg   __morestack_set_guard (context[STACK_GUARD]);
107810d565efSmrg   __morestack_initial_sp.sp = context[INITIAL_SP];
107910d565efSmrg   __morestack_initial_sp.len = (size_t) context[INITIAL_SP_LEN];
108010d565efSmrg   __morestack_initial_sp.dont_block_signals =
108110d565efSmrg     (uintptr_type) context[BLOCK_SIGNALS];
108210d565efSmrg }
108310d565efSmrg 
108410d565efSmrg /* Create a new split stack context.  This will allocate a new stack
108510d565efSmrg    segment which may be used by a coroutine.  STACK_SIZE is the
108610d565efSmrg    minimum size of the new stack.  The caller is responsible for
108710d565efSmrg    actually setting the stack pointer.  This would normally be called
108810d565efSmrg    before a call to makecontext, and the returned stack pointer and
108910d565efSmrg    size would be used to set the uc_stack field.  A function called
109010d565efSmrg    via makecontext on a stack created by __splitstack_makecontext may
109110d565efSmrg    not return.  Note that the returned pointer points to the lowest
109210d565efSmrg    address in the stack space, and thus may not be the value to which
109310d565efSmrg    to set the stack pointer.  */
109410d565efSmrg 
109510d565efSmrg void *
__splitstack_makecontext(size_t stack_size,void * context[NUMBER_OFFSETS],size_t * size)109610d565efSmrg __splitstack_makecontext (size_t stack_size, void *context[NUMBER_OFFSETS],
109710d565efSmrg 			  size_t *size)
109810d565efSmrg {
109910d565efSmrg   struct stack_segment *segment;
110010d565efSmrg   void *initial_sp;
110110d565efSmrg 
110210d565efSmrg   memset (context, 0, NUMBER_OFFSETS * sizeof (void *));
110310d565efSmrg   segment = allocate_segment (stack_size);
110410d565efSmrg   context[MORESTACK_SEGMENTS] = segment;
110510d565efSmrg   context[CURRENT_SEGMENT] = segment;
110610d565efSmrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
110710d565efSmrg   initial_sp = (void *) ((char *) (segment + 1) + segment->size);
110810d565efSmrg #else
110910d565efSmrg   initial_sp = (void *) (segment + 1);
111010d565efSmrg #endif
111110d565efSmrg   context[STACK_GUARD] = __morestack_make_guard (initial_sp, segment->size);
111210d565efSmrg   context[INITIAL_SP] = NULL;
111310d565efSmrg   context[INITIAL_SP_LEN] = 0;
111410d565efSmrg   *size = segment->size;
111510d565efSmrg   return (void *) (segment + 1);
111610d565efSmrg }
111710d565efSmrg 
111810d565efSmrg /* Given an existing split stack context, reset it back to the start
111910d565efSmrg    of the stack.  Return the stack pointer and size, appropriate for
112010d565efSmrg    use with makecontext.  This may be used if a coroutine exits, in
112110d565efSmrg    order to reuse the stack segments for a new coroutine.  */
112210d565efSmrg 
112310d565efSmrg void *
__splitstack_resetcontext(void * context[10],size_t * size)112410d565efSmrg __splitstack_resetcontext (void *context[10], size_t *size)
112510d565efSmrg {
112610d565efSmrg   struct stack_segment *segment;
112710d565efSmrg   void *initial_sp;
112810d565efSmrg   size_t initial_size;
112910d565efSmrg   void *ret;
113010d565efSmrg 
113110d565efSmrg   /* Reset the context assuming that MORESTACK_SEGMENTS, INITIAL_SP
113210d565efSmrg      and INITIAL_SP_LEN are correct.  */
113310d565efSmrg 
113410d565efSmrg   segment = context[MORESTACK_SEGMENTS];
113510d565efSmrg   context[CURRENT_SEGMENT] = segment;
113610d565efSmrg   context[CURRENT_STACK] = NULL;
113710d565efSmrg   if (segment == NULL)
113810d565efSmrg     {
113910d565efSmrg       initial_sp = context[INITIAL_SP];
114010d565efSmrg       initial_size = (uintptr_type) context[INITIAL_SP_LEN];
114110d565efSmrg       ret = initial_sp;
114210d565efSmrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
114310d565efSmrg       ret = (void *) ((char *) ret - initial_size);
114410d565efSmrg #endif
114510d565efSmrg     }
114610d565efSmrg   else
114710d565efSmrg     {
114810d565efSmrg #ifdef __LIBGCC_STACK_GROWS_DOWNWARD__
114910d565efSmrg       initial_sp = (void *) ((char *) (segment + 1) + segment->size);
115010d565efSmrg #else
115110d565efSmrg       initial_sp = (void *) (segment + 1);
115210d565efSmrg #endif
115310d565efSmrg       initial_size = segment->size;
115410d565efSmrg       ret = (void *) (segment + 1);
115510d565efSmrg     }
115610d565efSmrg   context[STACK_GUARD] = __morestack_make_guard (initial_sp, initial_size);
115710d565efSmrg   context[BLOCK_SIGNALS] = NULL;
115810d565efSmrg   *size = initial_size;
115910d565efSmrg   return ret;
116010d565efSmrg }
116110d565efSmrg 
116210d565efSmrg /* Release all the memory associated with a splitstack context.  This
116310d565efSmrg    may be used if a coroutine exits and the associated stack should be
116410d565efSmrg    freed.  */
116510d565efSmrg 
116610d565efSmrg void
__splitstack_releasecontext(void * context[10])116710d565efSmrg __splitstack_releasecontext (void *context[10])
116810d565efSmrg {
116910d565efSmrg   __morestack_release_segments (((struct stack_segment **)
117010d565efSmrg 				 &context[MORESTACK_SEGMENTS]),
117110d565efSmrg 				1);
117210d565efSmrg }
117310d565efSmrg 
117410d565efSmrg /* Like __splitstack_block_signals, but operating on CONTEXT, rather
117510d565efSmrg    than on the current state.  */
117610d565efSmrg 
117710d565efSmrg void
__splitstack_block_signals_context(void * context[NUMBER_OFFSETS],int * new,int * old)117810d565efSmrg __splitstack_block_signals_context (void *context[NUMBER_OFFSETS], int *new,
117910d565efSmrg 				    int *old)
118010d565efSmrg {
118110d565efSmrg   if (old != NULL)
118210d565efSmrg     *old = ((uintptr_type) context[BLOCK_SIGNALS]) != 0 ? 0 : 1;
118310d565efSmrg   if (new != NULL)
118410d565efSmrg     context[BLOCK_SIGNALS] = (void *) (uintptr_type) (*new ? 0 : 1);
118510d565efSmrg }
118610d565efSmrg 
118710d565efSmrg /* Find the stack segments associated with a split stack context.
118810d565efSmrg    This will return the address of the first stack segment and set
118910d565efSmrg    *STACK_SIZE to its size.  It will set next_segment, next_sp, and
119010d565efSmrg    initial_sp which may be passed to __splitstack_find to find the
119110d565efSmrg    remaining segments.  */
119210d565efSmrg 
119310d565efSmrg void *
__splitstack_find_context(void * context[NUMBER_OFFSETS],size_t * stack_size,void ** next_segment,void ** next_sp,void ** initial_sp)119410d565efSmrg __splitstack_find_context (void *context[NUMBER_OFFSETS], size_t *stack_size,
119510d565efSmrg 			   void **next_segment, void **next_sp,
119610d565efSmrg 			   void **initial_sp)
119710d565efSmrg {
119810d565efSmrg   void *sp;
119910d565efSmrg   struct stack_segment *segment;
120010d565efSmrg 
120110d565efSmrg   *initial_sp = context[INITIAL_SP];
120210d565efSmrg 
120310d565efSmrg   sp = context[CURRENT_STACK];
120410d565efSmrg   if (sp == NULL)
120510d565efSmrg     {
120610d565efSmrg       /* Most likely this context was created but was never used.  The
120710d565efSmrg 	 value 2 is a code used by __splitstack_find to mean that we
120810d565efSmrg 	 have reached the end of the list of stacks.  */
120910d565efSmrg       *next_segment = (void *) (uintptr_type) 2;
121010d565efSmrg       *next_sp = NULL;
121110d565efSmrg       *initial_sp = NULL;
121210d565efSmrg       return NULL;
121310d565efSmrg     }
121410d565efSmrg 
121510d565efSmrg   segment = context[CURRENT_SEGMENT];
121610d565efSmrg   if (segment == NULL)
121710d565efSmrg     {
121810d565efSmrg       /* Most likely this context was saved by a thread which was not
121910d565efSmrg 	 created using __splistack_makecontext and which has never
122010d565efSmrg 	 split the stack.  The value 1 is a code used by
122110d565efSmrg 	 __splitstack_find to look at the initial stack.  */
122210d565efSmrg       segment = (struct stack_segment *) (uintptr_type) 1;
122310d565efSmrg     }
122410d565efSmrg 
122510d565efSmrg   return __splitstack_find (segment, sp, stack_size, next_segment, next_sp,
122610d565efSmrg 			    initial_sp);
122710d565efSmrg }
122810d565efSmrg 
122910d565efSmrg #endif /* !defined (inhibit_libc) */
123010d565efSmrg #endif /* not powerpc 32-bit */
1231