xref: /openbsd/lib/librthread/rthread_stack.c (revision 898184e3)
1 /* $OpenBSD: rthread_stack.c,v 1.9 2013/03/21 21:59:31 deraadt Exp $ */
2 /* $snafu: rthread_stack.c,v 1.12 2005/01/11 02:45:28 marc Exp $ */
3 
4 /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
5 
6 #include <sys/param.h>
7 #include <sys/mman.h>
8 
9 #include <errno.h>
10 #include <pthread.h>
11 #include <stdint.h>
12 #include <stdlib.h>
13 #include <unistd.h>
14 
15 #include "rthread.h"
16 
17 /*
18  * Follow uthread's example and keep around stacks that have default
19  * attributes for possible reuse.
20  */
21 static SLIST_HEAD(, stack) def_stacks = SLIST_HEAD_INITIALIZER(head);
22 static _spinlock_lock_t def_stacks_lock = _SPINLOCK_UNLOCKED;
23 
24 struct stack *
25 _rthread_alloc_stack(pthread_t thread)
26 {
27 	struct stack *stack;
28 	caddr_t base;
29 	caddr_t guard;
30 	size_t size;
31 	size_t guardsize;
32 
33 	/* if the request uses the defaults, try to reuse one */
34 	if (thread->attr.stack_addr == NULL &&
35 	    thread->attr.stack_size == RTHREAD_STACK_SIZE_DEF &&
36 	    thread->attr.guard_size == _rthread_attr_default.guard_size) {
37 		_spinlock(&def_stacks_lock);
38 		stack = SLIST_FIRST(&def_stacks);
39 		if (stack != NULL) {
40 			SLIST_REMOVE_HEAD(&def_stacks, link);
41 			_spinunlock(&def_stacks_lock);
42 			return (stack);
43 		}
44 		_spinunlock(&def_stacks_lock);
45 	}
46 
47 	/* allocate the stack struct that we'll return */
48 	stack = malloc(sizeof(*stack));
49 	if (stack == NULL)
50 		return (NULL);
51 
52 	/* If a stack address was provided, just fill in the details */
53 	if (thread->attr.stack_addr != NULL) {
54 		stack->base = base = thread->attr.stack_addr;
55 		stack->len  = thread->attr.stack_size;
56 #ifdef MACHINE_STACK_GROWS_UP
57 		stack->sp = base;
58 #else
59 		stack->sp = base + thread->attr.stack_size;
60 #endif
61 		/*
62 		 * This impossible guardsize marks this stack as
63 		 * application allocated so it won't be freed or
64 		 * cached by _rthread_free_stack()
65 		 */
66 		stack->guardsize = 1;
67 		return (stack);
68 	}
69 
70 	/* round up the requested sizes up to full pages */
71 	size = ROUND_TO_PAGE(thread->attr.stack_size);
72 	guardsize = ROUND_TO_PAGE(thread->attr.guard_size);
73 
74 	/* check for overflow */
75 	if (size < thread->attr.stack_size ||
76 	    guardsize < thread->attr.guard_size ||
77 	    SIZE_MAX - size < guardsize) {
78 		free(stack);
79 		errno = EINVAL;
80 		return (NULL);
81 	}
82 	size += guardsize;
83 
84 	/* actually allocate the real stack */
85 	base = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_ANON, -1, 0);
86 	if (base == MAP_FAILED) {
87 		free(stack);
88 		return (NULL);
89 	}
90 
91 #ifdef MACHINE_STACK_GROWS_UP
92 	guard = base + size - guardsize;
93 	stack->sp = base;
94 #else
95 	guard = base;
96 	stack->sp = base + size;
97 #endif
98 
99 	/* memory protect the guard region */
100 	if (guardsize != 0 && mprotect(guard, guardsize, PROT_NONE) == -1) {
101 		munmap(base, size);
102 		free(stack);
103 		return (NULL);
104 	}
105 
106 	stack->base = base;
107 	stack->guardsize = guardsize;
108 	stack->len = size;
109 	return (stack);
110 }
111 
112 void
113 _rthread_free_stack(struct stack *stack)
114 {
115 	if (stack->len == RTHREAD_STACK_SIZE_DEF + stack->guardsize &&
116 	    stack->guardsize == _rthread_attr_default.guard_size) {
117 		_spinlock(&def_stacks_lock);
118 		SLIST_INSERT_HEAD(&def_stacks, stack, link);
119 		_spinunlock(&def_stacks_lock);
120 	} else {
121 		/* unmap the storage unless it was application allocated */
122 		if (stack->guardsize != 1)
123 			munmap(stack->base, stack->len);
124 		free(stack);
125 	}
126 }
127 
128