xref: /openbsd/lib/librthread/rthread_stack.c (revision 4e44baf3)
1 /* $OpenBSD: rthread_stack.c,v 1.20 2021/09/17 15:20:21 deraadt Exp $ */
2 
3 /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
4 
5 #include <sys/types.h>
6 #include <sys/mman.h>
7 
8 #include <errno.h>
9 #include <pthread.h>
10 #include <stdint.h>
11 #include <stdlib.h>
12 #include <unistd.h>
13 
14 #include "rthread.h"
15 
16 /*
17  * Follow uthread's example and keep around stacks that have default
18  * attributes for possible reuse.
19  */
20 static SLIST_HEAD(, stack) def_stacks = SLIST_HEAD_INITIALIZER(head);
21 static _atomic_lock_t def_stacks_lock = _SPINLOCK_UNLOCKED;
22 
23 struct stack *
_rthread_alloc_stack(pthread_t thread)24 _rthread_alloc_stack(pthread_t thread)
25 {
26 	struct stack *stack;
27 	u_int32_t rnd;
28 	caddr_t base;
29 	caddr_t guard;
30 	size_t size;
31 	size_t guardsize;
32 
33 	/* if the request uses the defaults, try to reuse one */
34 	if (thread->attr.stack_addr == NULL &&
35 	    thread->attr.stack_size == RTHREAD_STACK_SIZE_DEF &&
36 	    thread->attr.guard_size == _thread_pagesize) {
37 		_spinlock(&def_stacks_lock);
38 		stack = SLIST_FIRST(&def_stacks);
39 		if (stack != NULL) {
40 			SLIST_REMOVE_HEAD(&def_stacks, link);
41 			_spinunlock(&def_stacks_lock);
42 			return (stack);
43 		}
44 		_spinunlock(&def_stacks_lock);
45 	}
46 
47 	/* allocate the stack struct that we'll return */
48 	stack = malloc(sizeof(*stack));
49 	if (stack == NULL)
50 		return (NULL);
51 
52 	/* Smaller the stack, smaller the random bias */
53 	if (thread->attr.stack_size > _thread_pagesize)
54 		rnd = arc4random() & (_thread_pagesize - 1);
55 	else if (thread->attr.stack_size == _thread_pagesize)
56 		rnd = arc4random() & (_thread_pagesize / 16 - 1);
57 	else
58 		rnd = 0;
59 	rnd &= ~_STACKALIGNBYTES;
60 
61 	/* If a stack address was provided, just fill in the details */
62 	if (thread->attr.stack_addr != NULL) {
63 		stack->base = base = thread->attr.stack_addr;
64 		stack->len  = thread->attr.stack_size;
65 #ifdef MACHINE_STACK_GROWS_UP
66 		stack->sp = base + rnd;
67 #else
68 		stack->sp = base + thread->attr.stack_size - (_STACKALIGNBYTES+1) - rnd;
69 #endif
70 		/*
71 		 * This impossible guardsize marks this stack as
72 		 * application allocated so it won't be freed or
73 		 * cached by _rthread_free_stack()
74 		 */
75 		stack->guardsize = 1;
76 		return (stack);
77 	}
78 
79 	/* round up the requested sizes up to full pages */
80 	size = ROUND_TO_PAGE(thread->attr.stack_size);
81 	guardsize = ROUND_TO_PAGE(thread->attr.guard_size);
82 
83 	/* check for overflow */
84 	if (size < thread->attr.stack_size ||
85 	    guardsize < thread->attr.guard_size ||
86 	    SIZE_MAX - size < guardsize) {
87 		free(stack);
88 		errno = EINVAL;
89 		return (NULL);
90 	}
91 	size += guardsize;
92 
93 	/* actually allocate the real stack */
94 	base = mmap(NULL, size, PROT_READ | PROT_WRITE,
95 	    MAP_PRIVATE | MAP_ANON | MAP_STACK, -1, 0);
96 	if (base == MAP_FAILED) {
97 		free(stack);
98 		return (NULL);
99 	}
100 
101 #ifdef MACHINE_STACK_GROWS_UP
102 	guard = base + size - guardsize;
103 	stack->sp = base + rnd;
104 #else
105 	guard = base;
106 	stack->sp = base + size - (_STACKALIGNBYTES+1) - rnd;
107 #endif
108 
109 	/* memory protect the guard region */
110 	if (guardsize != 0 && mprotect(guard, guardsize, PROT_NONE) == -1) {
111 		munmap(base, size);
112 		free(stack);
113 		return (NULL);
114 	}
115 
116 	stack->base = base;
117 	stack->guardsize = guardsize;
118 	stack->len = size;
119 	return (stack);
120 }
121 
122 void
_rthread_free_stack(struct stack * stack)123 _rthread_free_stack(struct stack *stack)
124 {
125 	if (stack->len == RTHREAD_STACK_SIZE_DEF + stack->guardsize &&
126 	    stack->guardsize == _thread_pagesize) {
127 		_spinlock(&def_stacks_lock);
128 		SLIST_INSERT_HEAD(&def_stacks, stack, link);
129 		_spinunlock(&def_stacks_lock);
130 	} else {
131 		/* unmap the storage unless it was application allocated */
132 		if (stack->guardsize != 1)
133 			munmap(stack->base, stack->len);
134 		free(stack);
135 	}
136 }
137