xref: /netbsd/lib/libpthread/pthread_tsd.c (revision 7f19f937)
1 /*	$NetBSD: pthread_tsd.c,v 1.25 2022/04/10 10:38:33 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2007, 2020 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, by Andrew Doran, and by Christos Zoulas.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: pthread_tsd.c,v 1.25 2022/04/10 10:38:33 riastradh Exp $");
34 
35 /* Need to use libc-private names for atomic operations. */
36 #include "../../common/lib/libc/atomic/atomic_op_namespace.h"
37 
38 /* Functions and structures dealing with thread-specific data */
39 #include <errno.h>
40 #include <sys/mman.h>
41 
42 #include "pthread.h"
43 #include "pthread_int.h"
44 #include "reentrant.h"
45 #include "tsd.h"
46 
47 int pthread_keys_max;
48 static pthread_mutex_t tsd_mutex = PTHREAD_MUTEX_INITIALIZER;
49 static int nextkey;
50 
51 PTQ_HEAD(pthread__tsd_list, pt_specific) *pthread__tsd_list = NULL;
52 void (**pthread__tsd_destructors)(void *) = NULL;
53 
__strong_alias(__libc_thr_keycreate,pthread_key_create)54 __strong_alias(__libc_thr_keycreate,pthread_key_create)
55 __strong_alias(__libc_thr_keydelete,pthread_key_delete)
56 
57 static void
58 /*ARGSUSED*/
59 null_destructor(void *p)
60 {
61 }
62 
63 #include <err.h>
64 #include <stdlib.h>
65 #include <stdio.h>
66 
67 static void
pthread_tsd_prefork(void)68 pthread_tsd_prefork(void)
69 {
70 	pthread_mutex_lock(&tsd_mutex);
71 }
72 
73 static void
pthread_tsd_postfork(void)74 pthread_tsd_postfork(void)
75 {
76 	pthread_mutex_unlock(&tsd_mutex);
77 }
78 
79 static void
pthread_tsd_postfork_child(void)80 pthread_tsd_postfork_child(void)
81 {
82 	pthread_mutex_init(&tsd_mutex, NULL);
83 }
84 
85 void *
pthread_tsd_init(size_t * tlen)86 pthread_tsd_init(size_t *tlen)
87 {
88 	char *pkm;
89 	size_t alen;
90 	char *arena;
91 
92 	pthread_atfork(pthread_tsd_prefork, pthread_tsd_postfork, pthread_tsd_postfork_child);
93 
94 	if ((pkm = pthread__getenv("PTHREAD_KEYS_MAX")) != NULL) {
95 		pthread_keys_max = (int)strtol(pkm, NULL, 0);
96 		if (pthread_keys_max < _POSIX_THREAD_KEYS_MAX)
97 			pthread_keys_max = _POSIX_THREAD_KEYS_MAX;
98 	} else {
99 		pthread_keys_max = PTHREAD_KEYS_MAX;
100 	}
101 
102 	/*
103 	 * Can't use malloc here yet, because malloc will use the fake
104 	 * libc thread functions to initialize itself, so mmap the space.
105 	 */
106 	*tlen = sizeof(struct __pthread_st)
107 	    + pthread_keys_max * sizeof(struct pt_specific);
108 	alen = *tlen
109 	    + sizeof(*pthread__tsd_list) * pthread_keys_max
110 	    + sizeof(*pthread__tsd_destructors) * pthread_keys_max;
111 
112 	arena = mmap(NULL, alen, PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
113 	if (arena == MAP_FAILED) {
114 		pthread_keys_max = 0;
115 		return NULL;
116 	}
117 
118 	pthread__tsd_list = (void *)arena;
119 	arena += sizeof(*pthread__tsd_list) * pthread_keys_max;
120 	pthread__tsd_destructors = (void *)arena;
121 	arena += sizeof(*pthread__tsd_destructors) * pthread_keys_max;
122 	return arena;
123 }
124 
125 int
pthread_key_create(pthread_key_t * key,void (* destructor)(void *))126 pthread_key_create(pthread_key_t *key, void (*destructor)(void *))
127 {
128 	int i;
129 
130 	if (__predict_false(__uselibcstub))
131 		return __libc_thr_keycreate_stub(key, destructor);
132 
133 	/* Get a lock on the allocation list */
134 	pthread_mutex_lock(&tsd_mutex);
135 
136 	/* Find an available slot:
137 	 * The condition for an available slot is one with the destructor
138 	 * not being NULL. If the desired destructor is NULL we set it to
139 	 * our own internal destructor to satisfy the non NULL condition.
140 	 */
141 	/* 1. Search from "nextkey" to the end of the list. */
142 	for (i = nextkey; i < pthread_keys_max; i++)
143 		if (pthread__tsd_destructors[i] == NULL)
144 			break;
145 
146 	if (i == pthread_keys_max) {
147 		/* 2. If that didn't work, search from the start
148 		 *    of the list back to "nextkey".
149 		 */
150 		for (i = 0; i < nextkey; i++)
151 			if (pthread__tsd_destructors[i] == NULL)
152 				break;
153 
154 		if (i == nextkey) {
155 			/* If we didn't find one here, there isn't one
156 			 * to be found.
157 			 */
158 			pthread_mutex_unlock(&tsd_mutex);
159 			return EAGAIN;
160 		}
161 	}
162 
163 	/* Got one. */
164 	pthread__assert(PTQ_EMPTY(&pthread__tsd_list[i]));
165 	pthread__tsd_destructors[i] = destructor ? destructor : null_destructor;
166 
167 	nextkey = (i + 1) % pthread_keys_max;
168 	pthread_mutex_unlock(&tsd_mutex);
169 	*key = i;
170 
171 	return 0;
172 }
173 
174 /*
175  * Each thread holds an array of pthread_keys_max pt_specific list
176  * elements. When an element is used it is inserted into the appropriate
177  * key bucket of pthread__tsd_list. This means that ptqe_prev == NULL,
178  * means that the element is not threaded, ptqe_prev != NULL it is
179  * already part of the list. If a key is set to a non-NULL value for the
180  * first time, it is added to the list.
181  *
182  * We keep this global array of lists of threads that have called
183  * pthread_set_specific with non-null values, for each key so that
184  * we don't have to check all threads for non-NULL values in
185  * pthread_key_destroy.
186  *
187  * The assumption here is that a concurrent pthread_key_delete is already
188  * undefined behavior. The mutex is taken only once per thread/key
189  * combination.
190  *
191  * We could keep an accounting of the number of specific used
192  * entries per thread, so that we can update pt_havespecific when we delete
193  * the last one, but we don't bother for now
194  */
195 int
pthread__add_specific(pthread_t self,pthread_key_t key,const void * value)196 pthread__add_specific(pthread_t self, pthread_key_t key, const void *value)
197 {
198 	struct pt_specific *pt;
199 
200 	pthread__assert(key >= 0 && key < pthread_keys_max);
201 
202 	pthread__assert(pthread__tsd_destructors[key] != NULL);
203 	pt = &self->pt_specific[key];
204 	self->pt_havespecific = 1;
205 	if (value && !pt->pts_next.ptqe_prev) {
206 		pthread_mutex_lock(&tsd_mutex);
207 		PTQ_INSERT_HEAD(&pthread__tsd_list[key], pt, pts_next);
208 		pthread_mutex_unlock(&tsd_mutex);
209 	}
210 	pt->pts_value = __UNCONST(value);
211 
212 	return 0;
213 }
214 
215 int
pthread_key_delete(pthread_key_t key)216 pthread_key_delete(pthread_key_t key)
217 {
218 	/*
219 	 * This is tricky.  The standard says of pthread_key_create()
220 	 * that new keys have the value NULL associated with them in
221 	 * all threads.  According to people who were present at the
222 	 * standardization meeting, that requirement was written
223 	 * before pthread_key_delete() was introduced, and not
224 	 * reconsidered when it was.
225 	 *
226 	 * See David Butenhof's article in comp.programming.threads:
227 	 * Subject: Re: TSD key reusing issue
228 	 * Message-ID: <u97d8.29$fL6.200@news.cpqcorp.net>
229 	 * Date: Thu, 21 Feb 2002 09:06:17 -0500
230 	 *	 http://groups.google.com/groups?\
231 	 *	 hl=en&selm=u97d8.29%24fL6.200%40news.cpqcorp.net
232 	 *
233 	 * Given:
234 	 *
235 	 * 1: Applications are not required to clear keys in all
236 	 *    threads before calling pthread_key_delete().
237 	 * 2: Clearing pointers without running destructors is a
238 	 *    memory leak.
239 	 * 3: The pthread_key_delete() function is expressly forbidden
240 	 *    to run any destructors.
241 	 *
242 	 * Option 1: Make this function effectively a no-op and
243 	 * prohibit key reuse. This is a possible resource-exhaustion
244 	 * problem given that we have a static storage area for keys,
245 	 * but having a non-static storage area would make
246 	 * pthread_setspecific() expensive (might need to realloc the
247 	 * TSD array).
248 	 *
249 	 * Option 2: Ignore the specified behavior of
250 	 * pthread_key_create() and leave the old values. If an
251 	 * application deletes a key that still has non-NULL values in
252 	 * some threads... it's probably a memory leak and hence
253 	 * incorrect anyway, and we're within our rights to let the
254 	 * application lose. However, it's possible (if unlikely) that
255 	 * the application is storing pointers to non-heap data, or
256 	 * non-pointers that have been wedged into a void pointer, so
257 	 * we can't entirely write off such applications as incorrect.
258 	 * This could also lead to running (new) destructors on old
259 	 * data that was never supposed to be associated with that
260 	 * destructor.
261 	 *
262 	 * Option 3: Follow the specified behavior of
263 	 * pthread_key_create().  Either pthread_key_create() or
264 	 * pthread_key_delete() would then have to clear the values in
265 	 * every thread's slot for that key. In order to guarantee the
266 	 * visibility of the NULL value in other threads, there would
267 	 * have to be synchronization operations in both the clearer
268 	 * and pthread_getspecific().  Putting synchronization in
269 	 * pthread_getspecific() is a big performance lose.  But in
270 	 * reality, only (buggy) reuse of an old key would require
271 	 * this synchronization; for a new key, there has to be a
272 	 * memory-visibility propagating event between the call to
273 	 * pthread_key_create() and pthread_getspecific() with that
274 	 * key, so setting the entries to NULL without synchronization
275 	 * will work, subject to problem (2) above. However, it's kind
276 	 * of slow.
277 	 *
278 	 * Note that the argument in option 3 only applies because we
279 	 * keep TSD in ordinary memory which follows the pthreads
280 	 * visibility rules. The visibility rules are not required by
281 	 * the standard to apply to TSD, so the argument doesn't
282 	 * apply in general, just to this implementation.
283 	 */
284 
285 	/*
286 	 * We do option 3; we find the list of all pt_specific structures
287 	 * threaded on the key we are deleting, unthread them, and set the
288 	 * pointer to NULL. Finally we unthread the entry, freeing it for
289 	 * further use.
290 	 *
291 	 * We don't call the destructor here, it is the responsibility
292 	 * of the application to cleanup the storage:
293 	 * 	http://pubs.opengroup.org/onlinepubs/9699919799/functions/\
294 	 *	pthread_key_delete.html
295 	 */
296 	struct pt_specific *pt;
297 
298 	if (__predict_false(__uselibcstub))
299 		return __libc_thr_keydelete_stub(key);
300 
301 	pthread__assert(key >= 0 && key < pthread_keys_max);
302 
303 	pthread_mutex_lock(&tsd_mutex);
304 
305 	pthread__assert(pthread__tsd_destructors[key] != NULL);
306 
307 	while ((pt = PTQ_FIRST(&pthread__tsd_list[key])) != NULL) {
308 		PTQ_REMOVE(&pthread__tsd_list[key], pt, pts_next);
309 		pt->pts_value = NULL;
310 		pt->pts_next.ptqe_prev = NULL;
311 	}
312 
313 	pthread__tsd_destructors[key] = NULL;
314 	pthread_mutex_unlock(&tsd_mutex);
315 
316 	return 0;
317 }
318 
319 /* Perform thread-exit-time destruction of thread-specific data. */
320 void
pthread__destroy_tsd(pthread_t self)321 pthread__destroy_tsd(pthread_t self)
322 {
323 	int i, done, iterations;
324 	void *val;
325 	void (*destructor)(void *);
326 
327 	if (!self->pt_havespecific)
328 		return;
329 
330 	/* Butenhof, section 5.4.2 (page 167):
331 	 *
332 	 * ``Also, Pthreads sets the thread-specific data value for a
333 	 * key to NULL before calling that key's destructor (passing
334 	 * the previous value of the key) when a thread terminates [*].
335 	 * ...
336 	 * [*] That is, unfortunately, not what the standard
337 	 * says. This is one of the problems with formal standards -
338 	 * they say what they say, not what they were intended to
339 	 * say. Somehow, an error crept in, and the sentence
340 	 * specifying that "the implementation clears the
341 	 * thread-specific data value before calling the destructor"
342 	 * was deleted. Nobody noticed, and the standard was approved
343 	 * with the error. So the standard says (by omission) that if
344 	 * you want to write a portable application using
345 	 * thread-specific data, that will not hang on thread
346 	 * termination, you must call pthread_setspecific within your
347 	 * destructor function to change the value to NULL. This would
348 	 * be silly, and any serious implementation of Pthreads will
349 	 * violate the standard in this respect. Of course, the
350 	 * standard will be fixed, probably by the 1003.1n amendment
351 	 * (assorted corrections to 1003.1c-1995), but that will take
352 	 * a while.''
353 	 */
354 
355 	/* We're not required to try very hard */
356 	iterations = PTHREAD_DESTRUCTOR_ITERATIONS;
357 	do {
358 		done = 1;
359 		for (i = 0; i < pthread_keys_max; i++) {
360 			struct pt_specific *pt = &self->pt_specific[i];
361 			if (pt->pts_next.ptqe_prev == NULL)
362 				continue;
363 			pthread_mutex_lock(&tsd_mutex);
364 
365 			if (pt->pts_next.ptqe_prev != NULL)  {
366 				PTQ_REMOVE(&pthread__tsd_list[i], pt, pts_next);
367 				val = pt->pts_value;
368 				pt->pts_value = NULL;
369 				pt->pts_next.ptqe_prev = NULL;
370 				destructor = pthread__tsd_destructors[i];
371 			} else
372 				destructor = NULL;
373 
374 			pthread_mutex_unlock(&tsd_mutex);
375 			if (destructor != NULL && val != NULL) {
376 				done = 0;
377 				(*destructor)(val);
378 			}
379 		}
380 	} while (!done && --iterations);
381 
382 	self->pt_havespecific = 0;
383 }
384 
385 void
pthread__copy_tsd(pthread_t self)386 pthread__copy_tsd(pthread_t self)
387 {
388 	for (size_t key = 0; key < TSD_KEYS_MAX; key++) {
389 
390 		if (__libc_tsd[key].tsd_inuse == 0)
391 			continue;
392 
393 		pthread__assert(pthread__tsd_destructors[key] == NULL);
394 		pthread__tsd_destructors[key] = __libc_tsd[key].tsd_dtor ?
395 		    __libc_tsd[key].tsd_dtor : null_destructor;
396 		nextkey = (key + 1) % pthread_keys_max;
397 
398 		self->pt_havespecific = 1;
399 		struct pt_specific *pt = &self->pt_specific[key];
400 		pt->pts_value = __libc_tsd[key].tsd_val;
401 		__libc_tsd[key].tsd_inuse = 0;
402 	}
403 }
404