1 /*
2  * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
3  * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/lib/libpthread/thread/thr_stack.c,v 1.9 2004/10/06 08:11:07 davidxu Exp $
28  */
29 #include <sys/types.h>
30 #include <sys/mman.h>
31 #include <sys/queue.h>
32 #include <sys/time.h>
33 #include <sys/resource.h>
34 #include <sys/sysctl.h>
35 #include <machine/tls.h>
36 #include <machine/vmparam.h>
37 #include <stdlib.h>
38 #include <pthread.h>
39 #include "thr_private.h"
40 
41 /* Spare thread stack. */
42 struct stack {
43 	LIST_ENTRY(stack)	qe;		/* Stack queue linkage. */
44 	size_t			stacksize;	/* Stack size (rounded up). */
45 	size_t			guardsize;	/* Guard size. */
46 	void			*stackaddr;	/* Stack address. */
47 };
48 
49 /*
50  * Default sized (stack and guard) spare stack queue.  Stacks are cached
51  * to avoid additional complexity managing mmap()ed stack regions.  Spare
52  * stacks are used in LIFO order to increase cache locality.
53  */
54 static LIST_HEAD(, stack)	dstackq = LIST_HEAD_INITIALIZER(dstackq);
55 
56 /*
57  * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
58  * Stacks are cached to avoid additional complexity managing mmap()ed
59  * stack regions.  This list is unordered, since ordering on both stack
60  * size and guard size would be more trouble than it's worth.  Stacks are
61  * allocated from this cache on a first size match basis.
62  */
63 static LIST_HEAD(, stack)	mstackq = LIST_HEAD_INITIALIZER(mstackq);
64 
65 /*
66  * Thread stack base for mmap() hint, starts
67  * at _usrstack - kern.maxssiz - kern.maxthrssiz
68  */
69 static char *base_stack = NULL;
70 
71 /*
72  * Round size up to the nearest multiple of
73  * _thr_page_size.
74  */
75 static inline size_t
76 round_up(size_t size)
77 {
78 	if (size % _thr_page_size != 0)
79 		size = ((size / _thr_page_size) + 1) *
80 		    _thr_page_size;
81 	return size;
82 }
83 
84 int
85 _thr_stack_alloc(pthread_attr_t attr)
86 {
87 	pthread_t curthread = tls_get_curthread();
88 	struct stack *spare_stack;
89 	size_t stacksize;
90 	size_t guardsize;
91 	char *stackaddr;
92 
93 	/*
94 	 * Round up stack size to nearest multiple of _thr_page_size so
95 	 * that mmap() * will work.  If the stack size is not an even
96 	 * multiple, we end up initializing things such that there is
97 	 * unused space above the beginning of the stack, so the stack
98 	 * sits snugly against its guard.
99 	 */
100 	stacksize = round_up(attr->stacksize_attr);
101 	guardsize = round_up(attr->guardsize_attr);
102 
103 	attr->stackaddr_attr = NULL;
104 	attr->flags &= ~THR_STACK_USER;
105 
106 	/*
107 	 * Use the garbage collector lock for synchronization of the
108 	 * spare stack lists and allocations from usrstack.
109 	 */
110 	THREAD_LIST_LOCK(curthread);
111 	/*
112 	 * If the stack and guard sizes are default, try to allocate a stack
113 	 * from the default-size stack cache:
114 	 */
115 	if ((stacksize == THR_STACK_DEFAULT) &&
116 	    (guardsize == _thr_guard_default)) {
117 		if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
118 			/* Use the spare stack. */
119 			LIST_REMOVE(spare_stack, qe);
120 			attr->stackaddr_attr = spare_stack->stackaddr;
121 		}
122 	}
123 	/*
124 	 * The user specified a non-default stack and/or guard size, so try to
125 	 * allocate a stack from the non-default size stack cache, using the
126 	 * rounded up stack size (stack_size) in the search:
127 	 */
128 	else {
129 		LIST_FOREACH(spare_stack, &mstackq, qe) {
130 			if (spare_stack->stacksize == stacksize &&
131 			    spare_stack->guardsize == guardsize) {
132 				LIST_REMOVE(spare_stack, qe);
133 				attr->stackaddr_attr = spare_stack->stackaddr;
134 				break;
135 			}
136 		}
137 	}
138 	if (attr->stackaddr_attr != NULL) {
139 		/* A cached stack was found.  Release the lock. */
140 		THREAD_LIST_UNLOCK(curthread);
141 	} else {
142 		/*
143 		 * Calculate base_stack on first use (race ok).
144 		 * If base _stack
145 		 */
146 		if (base_stack == NULL) {
147 			int64_t maxssiz;
148 			int64_t maxthrssiz;
149 			struct rlimit rl;
150 			size_t len;
151 
152 			if (getrlimit(RLIMIT_STACK, &rl) == 0)
153 				maxssiz = rl.rlim_max;
154 			else
155 				maxssiz = MAXSSIZ;
156 			len = sizeof(maxssiz);
157 			sysctlbyname("kern.maxssiz", &maxssiz, &len, NULL, 0);
158 			len = sizeof(maxthrssiz);
159 			if (sysctlbyname("kern.maxthrssiz",
160 					 &maxthrssiz, &len, NULL, 0) < 0) {
161 				maxthrssiz = MAXTHRSSIZ;
162 			}
163 			base_stack = _usrstack - maxssiz - maxthrssiz;
164 		}
165 
166 		/* Release the lock before mmap'ing it. */
167 		THREAD_LIST_UNLOCK(curthread);
168 
169 		/*
170 		 * Map the stack and guard page together then split the
171 		 * guard page from allocated space.
172 		 *
173 		 * We no longer use MAP_STACK and we define an area far
174 		 * away from the default user stack (even though this will
175 		 * cost us another few 4K page-table pages).  DFly no longer
176 		 * allows new MAP_STACK mappings to be made inside ungrown
177 		 * portions of existing mappings.
178 		 */
179 		stackaddr = mmap(base_stack, stacksize + guardsize,
180 				 PROT_READ | PROT_WRITE,
181 				 MAP_ANON | MAP_PRIVATE, -1, 0);
182 		if (stackaddr != MAP_FAILED && guardsize) {
183 			if (mmap(stackaddr, guardsize, 0,
184 				 MAP_ANON | MAP_FIXED, -1, 0) == MAP_FAILED) {
185 				munmap(stackaddr, stacksize + guardsize);
186 				stackaddr = MAP_FAILED;
187 			} else {
188 				stackaddr += guardsize;
189 			}
190 		}
191 		if (stackaddr == MAP_FAILED)
192 			stackaddr = NULL;
193 		attr->stackaddr_attr = stackaddr;
194 	}
195 	if (attr->stackaddr_attr != NULL)
196 		return (0);
197 	else
198 		return (-1);
199 }
200 
201 /* This function must be called with _thread_list_lock held. */
202 void
203 _thr_stack_free(pthread_attr_t attr)
204 {
205 	struct stack *spare_stack;
206 
207 	if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
208 	    && (attr->stackaddr_attr != NULL)) {
209 		spare_stack = (struct stack *)((char *)attr->stackaddr_attr +
210 			attr->stacksize_attr - sizeof(struct stack));
211 		spare_stack->stacksize = round_up(attr->stacksize_attr);
212 		spare_stack->guardsize = round_up(attr->guardsize_attr);
213 		spare_stack->stackaddr = attr->stackaddr_attr;
214 
215 		if (spare_stack->stacksize == THR_STACK_DEFAULT &&
216 		    spare_stack->guardsize == _thr_guard_default) {
217 			/* Default stack/guard size. */
218 			LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
219 		} else {
220 			/* Non-default stack/guard size. */
221 			LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
222 		}
223 		attr->stackaddr_attr = NULL;
224 	}
225 }
226 
227 void
228 _thr_stack_cleanup(void)
229 {
230 	struct stack *spare;
231 
232 	while ((spare = LIST_FIRST(&dstackq)) != NULL) {
233 		LIST_REMOVE(spare, qe);
234 		munmap(spare->stackaddr,
235 		       spare->stacksize + spare->guardsize);
236 	}
237 }
238