xref: /freebsd/lib/libthr/thread/thr_stack.c (revision a0ee8cc6)
1 /*
2  * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
3  * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sys/queue.h>
33 #include <sys/resource.h>
34 #include <sys/sysctl.h>
35 #include <stdlib.h>
36 #include <pthread.h>
37 #include <link.h>
38 
39 #include "thr_private.h"
40 
41 /* Spare thread stack. */
42 struct stack {
43 	LIST_ENTRY(stack)	qe;		/* Stack queue linkage. */
44 	size_t			stacksize;	/* Stack size (rounded up). */
45 	size_t			guardsize;	/* Guard size. */
46 	void			*stackaddr;	/* Stack address. */
47 };
48 
49 /*
50  * Default sized (stack and guard) spare stack queue.  Stacks are cached
51  * to avoid additional complexity managing mmap()ed stack regions.  Spare
52  * stacks are used in LIFO order to increase cache locality.
53  */
54 static LIST_HEAD(, stack)	dstackq = LIST_HEAD_INITIALIZER(dstackq);
55 
56 /*
57  * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
58  * Stacks are cached to avoid additional complexity managing mmap()ed
59  * stack regions.  This list is unordered, since ordering on both stack
60  * size and guard size would be more trouble than it's worth.  Stacks are
61  * allocated from this cache on a first size match basis.
62  */
63 static LIST_HEAD(, stack)	mstackq = LIST_HEAD_INITIALIZER(mstackq);
64 
65 /**
66  * Base address of the last stack allocated (including its red zone, if
67  * there is one).  Stacks are allocated contiguously, starting beyond the
68  * top of the main stack.  When a new stack is created, a red zone is
69  * typically created (actually, the red zone is mapped with PROT_NONE) above
70  * the top of the stack, such that the stack will not be able to grow all
71  * the way to the bottom of the next stack.  This isn't fool-proof.  It is
72  * possible for a stack to grow by a large amount, such that it grows into
73  * the next stack, and as long as the memory within the red zone is never
74  * accessed, nothing will prevent one thread stack from trouncing all over
75  * the next.
76  *
77  * low memory
78  *     . . . . . . . . . . . . . . . . . .
79  *    |                                   |
80  *    |             stack 3               | start of 3rd thread stack
81  *    +-----------------------------------+
82  *    |                                   |
83  *    |       Red Zone (guard page)       | red zone for 2nd thread
84  *    |                                   |
85  *    +-----------------------------------+
86  *    |  stack 2 - _thr_stack_default     | top of 2nd thread stack
87  *    |                                   |
88  *    |                                   |
89  *    |                                   |
90  *    |                                   |
91  *    |             stack 2               |
92  *    +-----------------------------------+ <-- start of 2nd thread stack
93  *    |                                   |
94  *    |       Red Zone                    | red zone for 1st thread
95  *    |                                   |
96  *    +-----------------------------------+
97  *    |  stack 1 - _thr_stack_default     | top of 1st thread stack
98  *    |                                   |
99  *    |                                   |
100  *    |                                   |
101  *    |                                   |
102  *    |             stack 1               |
103  *    +-----------------------------------+ <-- start of 1st thread stack
104  *    |                                   |   (initial value of last_stack)
105  *    |       Red Zone                    |
106  *    |                                   | red zone for main thread
107  *    +-----------------------------------+
108  *    | USRSTACK - _thr_stack_initial     | top of main thread stack
109  *    |                                   | ^
110  *    |                                   | |
111  *    |                                   | |
112  *    |                                   | | stack growth
113  *    |                                   |
114  *    +-----------------------------------+ <-- start of main thread stack
115  *                                              (USRSTACK)
116  * high memory
117  *
118  */
119 static char *last_stack = NULL;
120 
121 /*
122  * Round size up to the nearest multiple of
123  * _thr_page_size.
124  */
125 static inline size_t
126 round_up(size_t size)
127 {
128 	if (size % _thr_page_size != 0)
129 		size = ((size / _thr_page_size) + 1) *
130 		    _thr_page_size;
131 	return size;
132 }
133 
134 void
135 _thr_stack_fix_protection(struct pthread *thrd)
136 {
137 
138 	mprotect((char *)thrd->attr.stackaddr_attr +
139 	    round_up(thrd->attr.guardsize_attr),
140 	    round_up(thrd->attr.stacksize_attr),
141 	    _rtld_get_stack_prot());
142 }
143 
144 static void
145 singlethread_map_stacks_exec(void)
146 {
147 	int mib[2];
148 	struct rlimit rlim;
149 	u_long usrstack;
150 	size_t len;
151 
152 	mib[0] = CTL_KERN;
153 	mib[1] = KERN_USRSTACK;
154 	len = sizeof(usrstack);
155 	if (sysctl(mib, sizeof(mib) / sizeof(mib[0]), &usrstack, &len, NULL, 0)
156 	    == -1)
157 		return;
158 	if (getrlimit(RLIMIT_STACK, &rlim) == -1)
159 		return;
160 	mprotect((void *)(uintptr_t)(usrstack - rlim.rlim_cur),
161 	    rlim.rlim_cur, _rtld_get_stack_prot());
162 }
163 
164 void __pthread_map_stacks_exec(void);
165 void
166 __pthread_map_stacks_exec(void)
167 {
168 	struct pthread *curthread, *thrd;
169 	struct stack *st;
170 
171 	if (!_thr_is_inited()) {
172 		singlethread_map_stacks_exec();
173 		return;
174 	}
175 	curthread = _get_curthread();
176 	THREAD_LIST_RDLOCK(curthread);
177 	LIST_FOREACH(st, &mstackq, qe)
178 		mprotect((char *)st->stackaddr + st->guardsize, st->stacksize,
179 		    _rtld_get_stack_prot());
180 	LIST_FOREACH(st, &dstackq, qe)
181 		mprotect((char *)st->stackaddr + st->guardsize, st->stacksize,
182 		    _rtld_get_stack_prot());
183 	TAILQ_FOREACH(thrd, &_thread_gc_list, gcle)
184 		_thr_stack_fix_protection(thrd);
185 	TAILQ_FOREACH(thrd, &_thread_list, tle)
186 		_thr_stack_fix_protection(thrd);
187 	THREAD_LIST_UNLOCK(curthread);
188 }
189 
190 int
191 _thr_stack_alloc(struct pthread_attr *attr)
192 {
193 	struct pthread *curthread = _get_curthread();
194 	struct stack *spare_stack;
195 	size_t stacksize;
196 	size_t guardsize;
197 	char *stackaddr;
198 
199 	/*
200 	 * Round up stack size to nearest multiple of _thr_page_size so
201 	 * that mmap() * will work.  If the stack size is not an even
202 	 * multiple, we end up initializing things such that there is
203 	 * unused space above the beginning of the stack, so the stack
204 	 * sits snugly against its guard.
205 	 */
206 	stacksize = round_up(attr->stacksize_attr);
207 	guardsize = round_up(attr->guardsize_attr);
208 
209 	attr->stackaddr_attr = NULL;
210 	attr->flags &= ~THR_STACK_USER;
211 
212 	/*
213 	 * Use the garbage collector lock for synchronization of the
214 	 * spare stack lists and allocations from usrstack.
215 	 */
216 	THREAD_LIST_WRLOCK(curthread);
217 	/*
218 	 * If the stack and guard sizes are default, try to allocate a stack
219 	 * from the default-size stack cache:
220 	 */
221 	if ((stacksize == THR_STACK_DEFAULT) &&
222 	    (guardsize == _thr_guard_default)) {
223 		if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
224 			/* Use the spare stack. */
225 			LIST_REMOVE(spare_stack, qe);
226 			attr->stackaddr_attr = spare_stack->stackaddr;
227 		}
228 	}
229 	/*
230 	 * The user specified a non-default stack and/or guard size, so try to
231 	 * allocate a stack from the non-default size stack cache, using the
232 	 * rounded up stack size (stack_size) in the search:
233 	 */
234 	else {
235 		LIST_FOREACH(spare_stack, &mstackq, qe) {
236 			if (spare_stack->stacksize == stacksize &&
237 			    spare_stack->guardsize == guardsize) {
238 				LIST_REMOVE(spare_stack, qe);
239 				attr->stackaddr_attr = spare_stack->stackaddr;
240 				break;
241 			}
242 		}
243 	}
244 	if (attr->stackaddr_attr != NULL) {
245 		/* A cached stack was found.  Release the lock. */
246 		THREAD_LIST_UNLOCK(curthread);
247 	}
248 	else {
249 		/*
250 		 * Allocate a stack from or below usrstack, depending
251 		 * on the LIBPTHREAD_BIGSTACK_MAIN env variable.
252 		 */
253 		if (last_stack == NULL)
254 			last_stack = _usrstack - _thr_stack_initial -
255 			    _thr_guard_default;
256 
257 		/* Allocate a new stack. */
258 		stackaddr = last_stack - stacksize - guardsize;
259 
260 		/*
261 		 * Even if stack allocation fails, we don't want to try to
262 		 * use this location again, so unconditionally decrement
263 		 * last_stack.  Under normal operating conditions, the most
264 		 * likely reason for an mmap() error is a stack overflow of
265 		 * the adjacent thread stack.
266 		 */
267 		last_stack -= (stacksize + guardsize);
268 
269 		/* Release the lock before mmap'ing it. */
270 		THREAD_LIST_UNLOCK(curthread);
271 
272 		/* Map the stack and guard page together, and split guard
273 		   page from allocated space: */
274 		if ((stackaddr = mmap(stackaddr, stacksize + guardsize,
275 		     _rtld_get_stack_prot(), MAP_STACK,
276 		     -1, 0)) != MAP_FAILED &&
277 		    (guardsize == 0 ||
278 		     mprotect(stackaddr, guardsize, PROT_NONE) == 0)) {
279 			stackaddr += guardsize;
280 		} else {
281 			if (stackaddr != MAP_FAILED)
282 				munmap(stackaddr, stacksize + guardsize);
283 			stackaddr = NULL;
284 		}
285 		attr->stackaddr_attr = stackaddr;
286 	}
287 	if (attr->stackaddr_attr != NULL)
288 		return (0);
289 	else
290 		return (-1);
291 }
292 
293 /* This function must be called with _thread_list_lock held. */
294 void
295 _thr_stack_free(struct pthread_attr *attr)
296 {
297 	struct stack *spare_stack;
298 
299 	if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
300 	    && (attr->stackaddr_attr != NULL)) {
301 		spare_stack = (struct stack *)
302 			((char *)attr->stackaddr_attr +
303 			attr->stacksize_attr - sizeof(struct stack));
304 		spare_stack->stacksize = round_up(attr->stacksize_attr);
305 		spare_stack->guardsize = round_up(attr->guardsize_attr);
306 		spare_stack->stackaddr = attr->stackaddr_attr;
307 
308 		if (spare_stack->stacksize == THR_STACK_DEFAULT &&
309 		    spare_stack->guardsize == _thr_guard_default) {
310 			/* Default stack/guard size. */
311 			LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
312 		} else {
313 			/* Non-default stack/guard size. */
314 			LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
315 		}
316 		attr->stackaddr_attr = NULL;
317 	}
318 }
319