xref: /openbsd/lib/libc/include/thread_private.h (revision 076a4f8b)
1 /* $OpenBSD: thread_private.h,v 1.37 2024/08/18 02:25:51 guenther Exp $ */
2 
3 /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
4 
5 #ifndef _THREAD_PRIVATE_H_
6 #define _THREAD_PRIVATE_H_
7 
8 extern int __isthreaded;
9 
10 #define _MALLOC_MUTEXES 32
11 void _malloc_init(int);
12 #ifdef __LIBC__
13 PROTO_NORMAL(_malloc_init);
14 #endif /* __LIBC__ */
15 
16 /*
17  * The callbacks needed by libc to handle the threaded case.
18  * NOTE: Bump the version when you change the struct contents!
19  *
20  * tc_canceled:
21  *	If not NULL, what to do when canceled (otherwise _exit(0))
22  *
23  * tc_flockfile, tc_ftrylockfile, and tc_funlockfile:
24  *	If not NULL, these implement the flockfile() family.
25  *	XXX In theory, you should be able to lock a FILE before
26  *	XXX loading libpthread and have that be a real lock on it,
27  *	XXX but that doesn't work without the libc base version
28  *	XXX tracking the recursion count.
29  *
30  * tc_malloc_lock and tc_malloc_unlock:
31  * tc_atexit_lock and tc_atexit_unlock:
32  * tc_atfork_lock and tc_atfork_unlock:
33  * tc_arc4_lock and tc_arc4_unlock:
34  *	The locks used by the malloc, atexit, atfork, and arc4 subsystems.
35  *	These have to be ordered specially in the fork/vfork wrappers
36  *	and may be implemented differently than the general mutexes
37  *	in the callbacks below.
38  *
39  * tc_mutex_lock and tc_mutex_unlock:
40  *	Lock and unlock the given mutex. If the given mutex is NULL
41  *	a mutex is allocated and initialized automatically.
42  *
43  * tc_mutex_destroy:
44  *	Destroy/deallocate the given mutex.
45  *
46  * tc_tag_lock and tc_tag_unlock:
47  *	Lock and unlock the mutex associated with the given tag.
48  *	If the given tag is NULL a tag is allocated and initialized
49  *	automatically.
50  *
51  * tc_tag_storage:
52  *	Returns a pointer to per-thread instance of data associated
53  *	with the given tag.  If the given tag is NULL a tag is
54  *	allocated and initialized automatically.
55  *
56  * tc_fork, tc_vfork:
57  *	If not NULL, they are called instead of the syscall stub, so that
58  *	the thread library can do necessary locking and reinitialization.
59  *
60  * tc_thread_release:
61  *	Handles the release of a thread's TIB and struct pthread and the
62  *	notification of other threads...when there are other threads.
63  *
64  * tc_thread_key_zero:
65  *	For each thread, zero out the key data associated with the given key.
66 
67  * If <machine/tcb.h> doesn't define TCB_GET(), then locating the TCB in a
68  * threaded process requires a syscall (__get_tcb(2)) which is too much
69  * overhead for single-threaded processes.  For those archs, there are two
70  * additional callbacks, though they are placed first in the struct for
71  * convenience in ASM:
72  *
73  * tc_errnoptr:
74  *	Returns the address of the thread's errno.
75  *
76  * tc_tcb:
77  *	Returns the address of the thread's TCB.
78  */
79 
80 struct __sFILE;
81 struct pthread;
82 struct thread_callbacks {
83 	int	*(*tc_errnoptr)(void);		/* MUST BE FIRST */
84 	void	*(*tc_tcb)(void);
85 	__dead void	(*tc_canceled)(void);
86 	void	(*tc_flockfile)(struct __sFILE *);
87 	int	(*tc_ftrylockfile)(struct __sFILE *);
88 	void	(*tc_funlockfile)(struct __sFILE *);
89 	void	(*tc_malloc_lock)(int);
90 	void	(*tc_malloc_unlock)(int);
91 	void	(*tc_atexit_lock)(void);
92 	void	(*tc_atexit_unlock)(void);
93 	void	(*tc_atfork_lock)(void);
94 	void	(*tc_atfork_unlock)(void);
95 	void	(*tc_arc4_lock)(void);
96 	void	(*tc_arc4_unlock)(void);
97 	void	(*tc_mutex_lock)(void **);
98 	void	(*tc_mutex_unlock)(void **);
99 	void	(*tc_mutex_destroy)(void **);
100 	void	(*tc_tag_lock)(void **);
101 	void	(*tc_tag_unlock)(void **);
102 	void	*(*tc_tag_storage)(void **, void *, size_t, void (*)(void *),
103 	   void *);
104 	__pid_t	(*tc_fork)(void);
105 	__pid_t	(*tc_vfork)(void);
106 	void	(*tc_thread_release)(struct pthread *);
107 	void	(*tc_thread_key_zero)(int);
108 };
109 
110 __BEGIN_PUBLIC_DECLS
111 /*
112  *  Set the callbacks used by libc
113  */
114 void	_thread_set_callbacks(const struct thread_callbacks *_cb, size_t _len);
115 __END_PUBLIC_DECLS
116 
117 #ifdef __LIBC__
118 __BEGIN_HIDDEN_DECLS
119 /* the current set */
120 extern struct thread_callbacks _thread_cb;
121 __END_HIDDEN_DECLS
122 #endif /* __LIBC__ */
123 
124 /*
125  * helper macro to make unique names in the thread namespace
126  */
127 #define __THREAD_NAME(name)	__CONCAT(_thread_tagname_,name)
128 
129 /*
130  * Macros used in libc to access thread mutex, keys, and per thread storage.
131  * _THREAD_PRIVATE_KEY and _THREAD_PRIVATE_MUTEX are different macros for
132  * historical reasons.   They do the same thing, define a static variable
133  * keyed by 'name' that identifies a mutex and a key to identify per thread
134  * data.
135  */
136 #define _THREAD_PRIVATE_KEY(name)					\
137 	static void *__THREAD_NAME(name)
138 #define _THREAD_PRIVATE_MUTEX(name)					\
139 	static void *__THREAD_NAME(name)
140 
141 
142 #ifndef __LIBC__	/* building some sort of reach around */
143 
144 #define _THREAD_PRIVATE_MUTEX_LOCK(name)		do {} while (0)
145 #define _THREAD_PRIVATE_MUTEX_UNLOCK(name)		do {} while (0)
146 #define _THREAD_PRIVATE(keyname, storage, error)	&(storage)
147 #define _THREAD_PRIVATE_DT(keyname, storage, dt, error)	&(storage)
148 #define _MUTEX_LOCK(mutex)				do {} while (0)
149 #define _MUTEX_UNLOCK(mutex)				do {} while (0)
150 #define _MUTEX_DESTROY(mutex)				do {} while (0)
151 #define _MALLOC_LOCK(n)					do {} while (0)
152 #define _MALLOC_UNLOCK(n)				do {} while (0)
153 #define _ATEXIT_LOCK()					do {} while (0)
154 #define _ATEXIT_UNLOCK()				do {} while (0)
155 #define _ATFORK_LOCK()					do {} while (0)
156 #define _ATFORK_UNLOCK()				do {} while (0)
157 #define _ARC4_LOCK()					do {} while (0)
158 #define _ARC4_UNLOCK()					do {} while (0)
159 
160 #else		/* building libc */
161 #define _THREAD_PRIVATE_MUTEX_LOCK(name)				\
162 	do {								\
163 		if (_thread_cb.tc_tag_lock != NULL)			\
164 			_thread_cb.tc_tag_lock(&(__THREAD_NAME(name)));	\
165 	} while (0)
166 #define _THREAD_PRIVATE_MUTEX_UNLOCK(name)				\
167 	do {								\
168 		if (_thread_cb.tc_tag_unlock != NULL)			\
169 			_thread_cb.tc_tag_unlock(&(__THREAD_NAME(name))); \
170 	} while (0)
171 #define _THREAD_PRIVATE(keyname, storage, error)			\
172 	(_thread_cb.tc_tag_storage == NULL ? &(storage) :		\
173 	    _thread_cb.tc_tag_storage(&(__THREAD_NAME(keyname)),	\
174 		&(storage), sizeof(storage), NULL, (error)))
175 
176 #define _THREAD_PRIVATE_DT(keyname, storage, dt, error)			\
177 	(_thread_cb.tc_tag_storage == NULL ? &(storage) :		\
178 	    _thread_cb.tc_tag_storage(&(__THREAD_NAME(keyname)),	\
179 		&(storage), sizeof(storage), (dt), (error)))
180 
181 /*
182  * Macros used in libc to access mutexes.
183  */
184 #define _MUTEX_LOCK(mutex)						\
185 	do {								\
186 		if (__isthreaded)					\
187 			_thread_cb.tc_mutex_lock(mutex);		\
188 	} while (0)
189 #define _MUTEX_UNLOCK(mutex)						\
190 	do {								\
191 		if (__isthreaded)					\
192 			_thread_cb.tc_mutex_unlock(mutex);		\
193 	} while (0)
194 #define _MUTEX_DESTROY(mutex)						\
195 	do {								\
196 		if (__isthreaded)					\
197 			_thread_cb.tc_mutex_destroy(mutex);		\
198 	} while (0)
199 
200 /*
201  * malloc lock/unlock prototypes and definitions
202  */
203 #define _MALLOC_LOCK(n)							\
204 	do {								\
205 		if (__isthreaded)					\
206 			_thread_cb.tc_malloc_lock(n);			\
207 	} while (0)
208 #define _MALLOC_UNLOCK(n)						\
209 	do {								\
210 		if (__isthreaded)					\
211 			_thread_cb.tc_malloc_unlock(n);			\
212 	} while (0)
213 
214 #define _ATEXIT_LOCK()							\
215 	do {								\
216 		if (__isthreaded)					\
217 			_thread_cb.tc_atexit_lock();			\
218 	} while (0)
219 #define _ATEXIT_UNLOCK()						\
220 	do {								\
221 		if (__isthreaded)					\
222 			_thread_cb.tc_atexit_unlock();			\
223 	} while (0)
224 
225 #define _ATFORK_LOCK()							\
226 	do {								\
227 		if (__isthreaded)					\
228 			_thread_cb.tc_atfork_lock();			\
229 	} while (0)
230 #define _ATFORK_UNLOCK()						\
231 	do {								\
232 		if (__isthreaded)					\
233 			_thread_cb.tc_atfork_unlock();			\
234 	} while (0)
235 
236 #define _ARC4_LOCK()							\
237 	do {								\
238 		if (__isthreaded)					\
239 			_thread_cb.tc_arc4_lock();			\
240 	} while (0)
241 #define _ARC4_UNLOCK()							\
242 	do {								\
243 		if (__isthreaded)					\
244 			_thread_cb.tc_arc4_unlock();			\
245 	} while (0)
246 #endif /* __LIBC__ */
247 
248 
249 /*
250  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
251  * All Rights Reserved.
252  *
253  * Permission to use, copy, modify, and distribute this software for any
254  * purpose with or without fee is hereby granted, provided that the above
255  * copyright notice and this permission notice appear in all copies.
256  *
257  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
258  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
259  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
260  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
261  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
262  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
263  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
264  */
265 /*
266  * Private data structures that back up the typedefs in pthread.h.
267  * Since only the thread library cares about their size or arrangement,
268  * it should be possible to switch libraries without relinking.
269  *
270  * Do not reorder _atomic_lock_t and sem_t variables in the structs.
271  * This is due to alignment requirements of certain arches like hppa.
272  * The current requirement is 16 bytes.
273  *
274  * THE MACHINE DEPENDENT CERROR CODE HAS HARD CODED OFFSETS INTO PTHREAD_T!
275  */
276 
277 #include <sys/queue.h>
278 #include <pthread.h>
279 #include <semaphore.h>
280 #include <machine/spinlock.h>
281 
282 #define	_SPINLOCK_UNLOCKED _ATOMIC_LOCK_UNLOCKED
283 
284 struct __sem {
285 	_atomic_lock_t lock;
286 	volatile int waitcount;
287 	volatile int value;
288 	int shared;
289 };
290 
291 TAILQ_HEAD(pthread_queue, pthread);
292 
293 #ifdef FUTEX
294 
295 struct pthread_mutex {
296 	volatile unsigned int lock;
297 	int type;
298 	pthread_t owner;
299 	int count;
300 	int prioceiling;
301 };
302 
303 struct pthread_cond {
304 	volatile unsigned int seq;
305 	clockid_t clock;
306 	struct pthread_mutex *mutex;
307 };
308 
309 struct pthread_rwlock {
310 	volatile unsigned int value;
311 };
312 
313 #else
314 
315 struct pthread_mutex {
316 	_atomic_lock_t lock;
317 	struct pthread_queue lockers;
318 	int type;
319 	pthread_t owner;
320 	int count;
321 	int prioceiling;
322 };
323 
324 struct pthread_cond {
325 	_atomic_lock_t lock;
326 	struct pthread_queue waiters;
327 	struct pthread_mutex *mutex;
328 	clockid_t clock;
329 };
330 
331 struct pthread_rwlock {
332 	_atomic_lock_t lock;
333 	pthread_t owner;
334 	struct pthread_queue writers;
335 	int readers;
336 };
337 #endif /* FUTEX */
338 
339 struct pthread_mutex_attr {
340 	int ma_type;
341 	int ma_protocol;
342 	int ma_prioceiling;
343 };
344 
345 struct pthread_cond_attr {
346 	clockid_t ca_clock;
347 };
348 
349 struct pthread_attr {
350 	void *stack_addr;
351 	size_t stack_size;
352 	size_t guard_size;
353 	int detach_state;
354 	int contention_scope;
355 	int sched_policy;
356 	struct sched_param sched_param;
357 	int sched_inherit;
358 };
359 
360 struct rthread_storage {
361 	int keyid;
362 	struct rthread_storage *next;
363 	void *data;
364 };
365 
366 struct rthread_cleanup_fn {
367 	void (*fn)(void *);
368 	void *arg;
369 	struct rthread_cleanup_fn *next;
370 };
371 
372 struct tib;
373 struct stack;
374 struct pthread {
375 	struct __sem donesem;
376 	unsigned int flags;
377 	_atomic_lock_t flags_lock;
378 	struct tib *tib;
379 	void *retval;
380 	void *(*fn)(void *);
381 	void *arg;
382 	char name[32];
383 	struct stack *stack;
384 	LIST_ENTRY(pthread) threads;
385 	TAILQ_ENTRY(pthread) waiting;
386 	pthread_cond_t blocking_cond;
387 	struct pthread_attr attr;
388 	struct rthread_storage *local_storage;
389 	struct rthread_cleanup_fn *cleanup_fns;
390 
391 	/* cancel received in a delayed cancel block? */
392 	int delayed_cancel;
393 };
394 /* flags in pthread->flags */
395 #define	THREAD_DONE		0x001
396 #define	THREAD_DETACHED		0x002
397 
398 /* flags in tib->tib_thread_flags */
399 #define	TIB_THREAD_ASYNC_CANCEL		0x001
400 #define	TIB_THREAD_INITIAL_STACK	0x002	/* has stack from exec */
401 
402 #define ENTER_DELAYED_CANCEL_POINT(tib, self)				\
403 	(self)->delayed_cancel = 0;					\
404 	ENTER_CANCEL_POINT_INNER(tib, 1, 1)
405 
406 /*
407  * Internal functions exported from libc's thread bits for use by libpthread
408  */
409 void	_spinlock(volatile _atomic_lock_t *);
410 int	_spinlocktry(volatile _atomic_lock_t *);
411 void	_spinunlock(volatile _atomic_lock_t *);
412 
413 void	_rthread_debug(int, const char *, ...)
414 		__attribute__((__format__ (printf, 2, 3)));
415 pid_t	_thread_dofork(pid_t (*_sys_fork)(void));
416 void	_thread_finalize(void);
417 
418 /*
419  * Threading syscalls not declared in system headers
420  */
421 __dead void	__threxit(pid_t *);
422 int		__thrsleep(const volatile void *, clockid_t,
423 		    const struct timespec *, volatile void *, const int *);
424 int		__thrwakeup(const volatile void *, int n);
425 int		__thrsigdivert(sigset_t, siginfo_t *, const struct timespec *);
426 
427 #endif /* _THREAD_PRIVATE_H_ */
428