1 /* 2 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org> 3 * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/lib/libpthread/thread/thr_stack.c,v 1.9 2004/10/06 08:11:07 davidxu Exp $ 28 * $DragonFly: src/lib/libthread_xu/thread/thr_stack.c,v 1.4 2006/04/06 13:03:09 davidxu Exp $ 29 */ 30 #include <sys/types.h> 31 #include <sys/mman.h> 32 #include <sys/queue.h> 33 34 #include <machine/tls.h> 35 36 #include <stdlib.h> 37 #include <pthread.h> 38 #include "thr_private.h" 39 40 /* Spare thread stack. */ 41 struct stack { 42 LIST_ENTRY(stack) qe; /* Stack queue linkage. */ 43 size_t stacksize; /* Stack size (rounded up). */ 44 size_t guardsize; /* Guard size. */ 45 void *stackaddr; /* Stack address. */ 46 }; 47 48 /* 49 * Default sized (stack and guard) spare stack queue. Stacks are cached 50 * to avoid additional complexity managing mmap()ed stack regions. Spare 51 * stacks are used in LIFO order to increase cache locality. 52 */ 53 static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq); 54 55 /* 56 * Miscellaneous sized (non-default stack and/or guard) spare stack queue. 57 * Stacks are cached to avoid additional complexity managing mmap()ed 58 * stack regions. This list is unordered, since ordering on both stack 59 * size and guard size would be more trouble than it's worth. Stacks are 60 * allocated from this cache on a first size match basis. 61 */ 62 static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq); 63 64 /** 65 * Base address of the last stack allocated (including its red zone, if 66 * there is one). Stacks are allocated contiguously, starting beyond the 67 * top of the main stack. When a new stack is created, a red zone is 68 * typically created (actually, the red zone is mapped with PROT_NONE) above 69 * the top of the stack, such that the stack will not be able to grow all 70 * the way to the bottom of the next stack. This isn't fool-proof. It is 71 * possible for a stack to grow by a large amount, such that it grows into 72 * the next stack, and as long as the memory within the red zone is never 73 * accessed, nothing will prevent one thread stack from trouncing all over 74 * the next. 75 * 76 * low memory 77 * . . . . . . . . . . . . . . . . . . 78 * | | 79 * | stack 3 | start of 3rd thread stack 80 * +-----------------------------------+ 81 * | | 82 * | Red Zone (guard page) | red zone for 2nd thread 83 * | | 84 * +-----------------------------------+ 85 * | stack 2 - _thr_stack_default | top of 2nd thread stack 86 * | | 87 * | | 88 * | | 89 * | | 90 * | stack 2 | 91 * +-----------------------------------+ <-- start of 2nd thread stack 92 * | | 93 * | Red Zone | red zone for 1st thread 94 * | | 95 * +-----------------------------------+ 96 * | stack 1 - _thr_stack_default | top of 1st thread stack 97 * | | 98 * | | 99 * | | 100 * | | 101 * | stack 1 | 102 * +-----------------------------------+ <-- start of 1st thread stack 103 * | | (initial value of last_stack) 104 * | Red Zone | 105 * | | red zone for main thread 106 * +-----------------------------------+ 107 * | USRSTACK - _thr_stack_initial | top of main thread stack 108 * | | ^ 109 * | | | 110 * | | | 111 * | | | stack growth 112 * | | 113 * +-----------------------------------+ <-- start of main thread stack 114 * (USRSTACK) 115 * high memory 116 * 117 */ 118 static char *last_stack = NULL; 119 120 /* 121 * Round size up to the nearest multiple of 122 * _thr_page_size. 123 */ 124 static inline size_t 125 round_up(size_t size) 126 { 127 if (size % _thr_page_size != 0) 128 size = ((size / _thr_page_size) + 1) * 129 _thr_page_size; 130 return size; 131 } 132 133 int 134 _thr_stack_alloc(struct pthread_attr *attr) 135 { 136 struct pthread *curthread = tls_get_curthread(); 137 struct stack *spare_stack; 138 size_t stacksize; 139 size_t guardsize; 140 char *stackaddr; 141 142 /* 143 * Round up stack size to nearest multiple of _thr_page_size so 144 * that mmap() * will work. If the stack size is not an even 145 * multiple, we end up initializing things such that there is 146 * unused space above the beginning of the stack, so the stack 147 * sits snugly against its guard. 148 */ 149 stacksize = round_up(attr->stacksize_attr); 150 guardsize = round_up(attr->guardsize_attr); 151 152 attr->stackaddr_attr = NULL; 153 attr->flags &= ~THR_STACK_USER; 154 155 /* 156 * Use the garbage collector lock for synchronization of the 157 * spare stack lists and allocations from usrstack. 158 */ 159 THREAD_LIST_LOCK(curthread); 160 /* 161 * If the stack and guard sizes are default, try to allocate a stack 162 * from the default-size stack cache: 163 */ 164 if ((stacksize == THR_STACK_DEFAULT) && 165 (guardsize == _thr_guard_default)) { 166 if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) { 167 /* Use the spare stack. */ 168 LIST_REMOVE(spare_stack, qe); 169 attr->stackaddr_attr = spare_stack->stackaddr; 170 } 171 } 172 /* 173 * The user specified a non-default stack and/or guard size, so try to 174 * allocate a stack from the non-default size stack cache, using the 175 * rounded up stack size (stack_size) in the search: 176 */ 177 else { 178 LIST_FOREACH(spare_stack, &mstackq, qe) { 179 if (spare_stack->stacksize == stacksize && 180 spare_stack->guardsize == guardsize) { 181 LIST_REMOVE(spare_stack, qe); 182 attr->stackaddr_attr = spare_stack->stackaddr; 183 break; 184 } 185 } 186 } 187 if (attr->stackaddr_attr != NULL) { 188 /* A cached stack was found. Release the lock. */ 189 THREAD_LIST_UNLOCK(curthread); 190 } 191 else { 192 /* Allocate a stack from usrstack. */ 193 if (last_stack == NULL) { 194 last_stack = _usrstack - _thr_stack_initial - 195 _thr_guard_default; 196 } 197 198 /* Allocate a new stack. */ 199 stackaddr = last_stack - stacksize - guardsize; 200 201 /* 202 * Even if stack allocation fails, we don't want to try to 203 * use this location again, so unconditionally decrement 204 * last_stack. Under normal operating conditions, the most 205 * likely reason for an mmap() error is a stack overflow of 206 * the adjacent thread stack. 207 */ 208 last_stack -= (stacksize + guardsize); 209 210 /* Release the lock before mmap'ing it. */ 211 THREAD_LIST_UNLOCK(curthread); 212 213 /* 214 * Map the stack and guard page together then split the 215 * guard page from allocated space. 216 * 217 * NOTE: MAP_STACK mappings are grow-down and the 218 * initial mapping does not actually extend to the guard 219 * area, so creating the guard requires doing a fixed 220 * anonymous mmap of the guard area. 221 */ 222 stackaddr = mmap(stackaddr, stacksize + guardsize, 223 PROT_READ | PROT_WRITE, 224 MAP_STACK | MAP_TRYFIXED, -1, 0); 225 if (stackaddr != MAP_FAILED && guardsize) { 226 if (mmap(stackaddr, guardsize, 0, 227 MAP_ANON | MAP_FIXED, -1, 0) == MAP_FAILED) { 228 munmap(stackaddr, stacksize + guardsize); 229 stackaddr = MAP_FAILED; 230 } else { 231 stackaddr += guardsize; 232 } 233 } 234 if (stackaddr == MAP_FAILED) 235 stackaddr = NULL; 236 attr->stackaddr_attr = stackaddr; 237 } 238 if (attr->stackaddr_attr != NULL) 239 return (0); 240 else 241 return (-1); 242 } 243 244 /* This function must be called with _thread_list_lock held. */ 245 void 246 _thr_stack_free(struct pthread_attr *attr) 247 { 248 struct stack *spare_stack; 249 250 if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0) 251 && (attr->stackaddr_attr != NULL)) { 252 spare_stack = (struct stack *)((char *)attr->stackaddr_attr + 253 attr->stacksize_attr - sizeof(struct stack)); 254 spare_stack->stacksize = round_up(attr->stacksize_attr); 255 spare_stack->guardsize = round_up(attr->guardsize_attr); 256 spare_stack->stackaddr = attr->stackaddr_attr; 257 258 if (spare_stack->stacksize == THR_STACK_DEFAULT && 259 spare_stack->guardsize == _thr_guard_default) { 260 /* Default stack/guard size. */ 261 LIST_INSERT_HEAD(&dstackq, spare_stack, qe); 262 } else { 263 /* Non-default stack/guard size. */ 264 LIST_INSERT_HEAD(&mstackq, spare_stack, qe); 265 } 266 attr->stackaddr_attr = NULL; 267 } 268 } 269 270 void 271 _thr_stack_cleanup(void) 272 { 273 struct stack *spare; 274 275 while ((spare = LIST_FIRST(&dstackq)) != NULL) { 276 LIST_REMOVE(spare, qe); 277 munmap(spare->stackaddr, 278 spare->stacksize + spare->guardsize); 279 } 280 } 281