1 /* 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 * $DragonFly: src/lib/libthread_xu/thread/thr_list.c,v 1.7 2006/04/06 13:03:09 davidxu Exp $ 28 */ 29 30 #include <sys/cdefs.h> 31 #include <sys/types.h> 32 #include <sys/queue.h> 33 34 #include <stdlib.h> 35 #include <string.h> 36 #include <pthread.h> 37 38 #include "thr_private.h" 39 #include "libc_private.h" 40 41 /* #define DEBUG_THREAD_LIST */ 42 #ifdef DEBUG_THREAD_LIST 43 #define DBG_MSG stdout_debug 44 #else 45 #define DBG_MSG(x...) 46 #endif 47 48 /* List of all threads */ 49 struct thread_head _thread_list = TAILQ_HEAD_INITIALIZER(_thread_list); 50 51 /* List of threads needing GC */ 52 struct thread_head _thread_gc_list = TAILQ_HEAD_INITIALIZER(_thread_gc_list); 53 54 /* Number of active threads */ 55 int _thread_active_threads = 1; 56 57 /* Garbage thread count. */ 58 int _thr_gc_count; 59 60 umtx_t _thr_list_lock; 61 62 /* 63 * Define a high water mark for the maximum number of threads that 64 * will be cached. Once this level is reached, any extra threads 65 * will be free()'d. 66 */ 67 #define MAX_CACHED_THREADS 100 68 69 /* 70 * We've got to keep track of everything that is allocated, not only 71 * to have a speedy free list, but also so they can be deallocated 72 * after a fork(). 73 */ 74 static TAILQ_HEAD(, pthread) free_threadq; 75 static umtx_t free_thread_lock; 76 static umtx_t tcb_lock; 77 static int free_thread_count = 0; 78 static int inited = 0; 79 static u_int64_t next_uniqueid = 1; 80 81 LIST_HEAD(thread_hash_head, pthread); 82 #define HASH_QUEUES 128 83 static struct thread_hash_head thr_hashtable[HASH_QUEUES]; 84 #define THREAD_HASH(thrd) (((unsigned long)thrd >> 12) % HASH_QUEUES) 85 86 static void thr_destroy(struct pthread *curthread, struct pthread *thread); 87 88 void 89 _thr_list_init(void) 90 { 91 int i; 92 93 _thr_gc_count = 0; 94 _thr_umtx_init(&_thr_list_lock); 95 TAILQ_INIT(&_thread_list); 96 TAILQ_INIT(&free_threadq); 97 _thr_umtx_init(&free_thread_lock); 98 _thr_umtx_init(&tcb_lock); 99 if (inited) { 100 for (i = 0; i < HASH_QUEUES; ++i) 101 LIST_INIT(&thr_hashtable[i]); 102 } 103 inited = 1; 104 } 105 106 void 107 _thr_gc(struct pthread *curthread) 108 { 109 struct pthread *td, *td_next; 110 TAILQ_HEAD(, pthread) worklist; 111 112 TAILQ_INIT(&worklist); 113 THREAD_LIST_LOCK(curthread); 114 115 /* Check the threads waiting for GC. */ 116 for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) { 117 td_next = TAILQ_NEXT(td, gcle); 118 if (td->terminated == 0) { 119 /* make sure we are not still in userland */ 120 continue; 121 } 122 _thr_stack_free(&td->attr); 123 if (((td->tlflags & TLFLAGS_DETACHED) != 0) && 124 (td->refcount == 0)) { 125 THR_GCLIST_REMOVE(td); 126 /* 127 * The thread has detached and is no longer 128 * referenced. It is safe to remove all 129 * remnants of the thread. 130 */ 131 THR_LIST_REMOVE(td); 132 TAILQ_INSERT_HEAD(&worklist, td, gcle); 133 } 134 } 135 THREAD_LIST_UNLOCK(curthread); 136 137 while ((td = TAILQ_FIRST(&worklist)) != NULL) { 138 TAILQ_REMOVE(&worklist, td, gcle); 139 /* 140 * XXX we don't free initial thread, because there might 141 * have some code referencing initial thread. 142 */ 143 if (td == _thr_initial) { 144 DBG_MSG("Initial thread won't be freed\n"); 145 continue; 146 } 147 148 _thr_free(curthread, td); 149 } 150 } 151 152 struct pthread * 153 _thr_alloc(struct pthread *curthread) 154 { 155 struct pthread *thread = NULL; 156 struct tls_tcb *tcb; 157 158 if (curthread != NULL) { 159 if (GC_NEEDED()) 160 _thr_gc(curthread); 161 if (free_thread_count > 0) { 162 THR_LOCK_ACQUIRE(curthread, &free_thread_lock); 163 if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) { 164 TAILQ_REMOVE(&free_threadq, thread, tle); 165 free_thread_count--; 166 } 167 THR_LOCK_RELEASE(curthread, &free_thread_lock); 168 } 169 } 170 if (thread == NULL) { 171 thread = malloc(sizeof(struct pthread)); 172 if (thread == NULL) 173 return (NULL); 174 } 175 if (curthread != NULL) { 176 THR_LOCK_ACQUIRE(curthread, &tcb_lock); 177 tcb = _tcb_ctor(thread, 0 /* not initial tls */); 178 THR_LOCK_RELEASE(curthread, &tcb_lock); 179 } else { 180 tcb = _tcb_ctor(thread, 1 /* initial tls */); 181 } 182 if (tcb != NULL) { 183 memset(thread, 0, sizeof(*thread)); 184 thread->tcb = tcb; 185 } else { 186 thr_destroy(curthread, thread); 187 thread = NULL; 188 } 189 return (thread); 190 } 191 192 void 193 _thr_free(struct pthread *curthread, struct pthread *thread) 194 { 195 DBG_MSG("Freeing thread %p\n", thread); 196 if (thread->name) { 197 free(thread->name); 198 thread->name = NULL; 199 } 200 /* 201 * Always free tcb, as we only know it is part of RTLD TLS 202 * block, but don't know its detail and can not assume how 203 * it works, so better to avoid caching it here. 204 */ 205 if (curthread != NULL) { 206 THR_LOCK_ACQUIRE(curthread, &tcb_lock); 207 _tcb_dtor(thread->tcb); 208 THR_LOCK_RELEASE(curthread, &tcb_lock); 209 } else { 210 _tcb_dtor(thread->tcb); 211 } 212 thread->tcb = NULL; 213 if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) { 214 thr_destroy(curthread, thread); 215 } else { 216 /* 217 * Add the thread to the free thread list, this also avoids 218 * pthread id is reused too quickly, may help some buggy apps. 219 */ 220 THR_LOCK_ACQUIRE(curthread, &free_thread_lock); 221 TAILQ_INSERT_TAIL(&free_threadq, thread, tle); 222 free_thread_count++; 223 THR_LOCK_RELEASE(curthread, &free_thread_lock); 224 } 225 } 226 227 static void 228 thr_destroy(struct pthread *curthread __unused, struct pthread *thread) 229 { 230 free(thread); 231 } 232 233 /* 234 * Add an active thread: 235 * 236 * o Assign the thread a unique id (which GDB uses to track 237 * threads. 238 * o Add the thread to the list of all threads and increment 239 * number of active threads. 240 */ 241 void 242 _thr_link(struct pthread *curthread, struct pthread *thread) 243 { 244 THREAD_LIST_LOCK(curthread); 245 /* 246 * Initialize the unique id (which GDB uses to track 247 * threads), add the thread to the list of all threads, 248 * and 249 */ 250 thread->uniqueid = next_uniqueid++; 251 THR_LIST_ADD(thread); 252 _thread_active_threads++; 253 THREAD_LIST_UNLOCK(curthread); 254 } 255 256 /* 257 * Remove an active thread. 258 */ 259 void 260 _thr_unlink(struct pthread *curthread, struct pthread *thread) 261 { 262 THREAD_LIST_LOCK(curthread); 263 THR_LIST_REMOVE(thread); 264 _thread_active_threads--; 265 THREAD_LIST_UNLOCK(curthread); 266 } 267 268 void 269 _thr_hash_add(struct pthread *thread) 270 { 271 struct thread_hash_head *head; 272 273 head = &thr_hashtable[THREAD_HASH(thread)]; 274 LIST_INSERT_HEAD(head, thread, hle); 275 } 276 277 void 278 _thr_hash_remove(struct pthread *thread) 279 { 280 LIST_REMOVE(thread, hle); 281 } 282 283 struct pthread * 284 _thr_hash_find(struct pthread *thread) 285 { 286 struct pthread *td; 287 struct thread_hash_head *head; 288 289 head = &thr_hashtable[THREAD_HASH(thread)]; 290 LIST_FOREACH(td, head, hle) { 291 if (td == thread) 292 return (thread); 293 } 294 return (NULL); 295 } 296 297 /* 298 * Find a thread in the linked list of active threads and add a reference 299 * to it. Threads with positive reference counts will not be deallocated 300 * until all references are released. 301 */ 302 int 303 _thr_ref_add(struct pthread *curthread, struct pthread *thread, 304 int include_dead) 305 { 306 int ret; 307 308 if (thread == NULL) 309 /* Invalid thread: */ 310 return (EINVAL); 311 312 THREAD_LIST_LOCK(curthread); 313 if ((ret = _thr_find_thread(curthread, thread, include_dead)) == 0) { 314 thread->refcount++; 315 } 316 THREAD_LIST_UNLOCK(curthread); 317 318 /* Return zero if the thread exists: */ 319 return (ret); 320 } 321 322 void 323 _thr_ref_delete(struct pthread *curthread, struct pthread *thread) 324 { 325 THREAD_LIST_LOCK(curthread); 326 _thr_ref_delete_unlocked(curthread, thread); 327 THREAD_LIST_UNLOCK(curthread); 328 } 329 330 void 331 _thr_ref_delete_unlocked(struct pthread *curthread __unused, 332 struct pthread *thread) 333 { 334 if (thread != NULL) { 335 thread->refcount--; 336 if ((thread->refcount == 0) && thread->state == PS_DEAD && 337 (thread->tlflags & TLFLAGS_DETACHED) != 0) 338 THR_GCLIST_ADD(thread); 339 } 340 } 341 342 int 343 _thr_find_thread(struct pthread *curthread __unused, struct pthread *thread, 344 int include_dead) 345 { 346 struct pthread *pthread; 347 348 if (thread == NULL) 349 /* Invalid thread: */ 350 return (EINVAL); 351 352 pthread = _thr_hash_find(thread); 353 if (pthread) { 354 if (include_dead == 0 && pthread->state == PS_DEAD) { 355 pthread = NULL; 356 } 357 } 358 359 /* Return zero if the thread exists: */ 360 return ((pthread != NULL) ? 0 : ESRCH); 361 } 362