1 /*
2  * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
3  *
4  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
6  *
7  * Permission is hereby granted to use or copy this program
8  * for any purpose,  provided the above notices are retained on all copies.
9  * Permission to modify the code and to distribute modified code is granted,
10  * provided the above notices are retained, and a notice that the code was
11  * modified is included with the above copyright notice.
12  */
13 
14 #include "private/thread_local_alloc.h"
15                 /* To determine type of tsd impl.       */
16                 /* Includes private/specific.h          */
17                 /* if needed.                           */
18 
19 #if defined(USE_CUSTOM_SPECIFIC)
20 
21 static const tse invalid_tse = {INVALID_QTID, 0, 0, INVALID_THREADID};
22             /* A thread-specific data entry which will never    */
23             /* appear valid to a reader.  Used to fill in empty */
24             /* cache entries to avoid a check for 0.            */
25 
GC_key_create_inner(tsd ** key_ptr)26 GC_INNER int GC_key_create_inner(tsd ** key_ptr)
27 {
28     int i;
29     int ret;
30     tsd * result;
31 
32     GC_ASSERT(I_HOLD_LOCK());
33     /* A quick alignment check, since we need atomic stores */
34     GC_ASSERT((word)(&invalid_tse.next) % sizeof(tse *) == 0);
35     result = (tsd *)MALLOC_CLEAR(sizeof(tsd));
36     if (NULL == result) return ENOMEM;
37     ret = pthread_mutex_init(&result->lock, NULL);
38     if (ret != 0) return ret;
39     for (i = 0; i < TS_CACHE_SIZE; ++i) {
40       result -> cache[i] = (/* no const */ tse *)&invalid_tse;
41     }
42 #   ifdef GC_ASSERTIONS
43       for (i = 0; i < TS_HASH_SIZE; ++i) {
44         GC_ASSERT(result -> hash[i].p == 0);
45       }
46 #   endif
47     *key_ptr = result;
48     return 0;
49 }
50 
GC_setspecific(tsd * key,void * value)51 GC_INNER int GC_setspecific(tsd * key, void * value)
52 {
53     pthread_t self = pthread_self();
54     int hash_val = HASH(self);
55     volatile tse * entry;
56 
57     GC_ASSERT(I_HOLD_LOCK());
58     GC_ASSERT(self != INVALID_THREADID);
59     GC_dont_gc++; /* disable GC */
60     entry = (volatile tse *)MALLOC_CLEAR(sizeof(tse));
61     GC_dont_gc--;
62     if (0 == entry) return ENOMEM;
63 
64     pthread_mutex_lock(&(key -> lock));
65     /* Could easily check for an existing entry here.   */
66     entry -> next = key->hash[hash_val].p;
67     entry -> thread = self;
68     entry -> value = TS_HIDE_VALUE(value);
69     GC_ASSERT(entry -> qtid == INVALID_QTID);
70     /* There can only be one writer at a time, but this needs to be     */
71     /* atomic with respect to concurrent readers.                       */
72     AO_store_release(&key->hash[hash_val].ao, (AO_t)entry);
73     GC_dirty((/* no volatile */ void *)entry);
74     GC_dirty(key->hash + hash_val);
75     if (pthread_mutex_unlock(&key->lock) != 0)
76       ABORT("pthread_mutex_unlock failed (setspecific)");
77     return 0;
78 }
79 
80 /* Remove thread-specific data for a given thread.  This function is    */
81 /* called at fork from the child process for all threads except for the */
82 /* survived one.  GC_remove_specific() should be called on thread exit. */
GC_remove_specific_after_fork(tsd * key,pthread_t t)83 GC_INNER void GC_remove_specific_after_fork(tsd * key, pthread_t t)
84 {
85     unsigned hash_val = HASH(t);
86     tse *entry;
87     tse *prev = NULL;
88 
89 #   ifdef CAN_HANDLE_FORK
90       /* Both GC_setspecific and GC_remove_specific should be called    */
91       /* with the allocation lock held to ensure the consistency of     */
92       /* the hash table in the forked child.                            */
93       GC_ASSERT(I_HOLD_LOCK());
94 #   endif
95     pthread_mutex_lock(&(key -> lock));
96     entry = key->hash[hash_val].p;
97     while (entry != NULL && !THREAD_EQUAL(entry->thread, t)) {
98       prev = entry;
99       entry = entry->next;
100     }
101     /* Invalidate qtid field, since qtids may be reused, and a later    */
102     /* cache lookup could otherwise find this entry.                    */
103     if (entry != NULL) {
104       entry -> qtid = INVALID_QTID;
105       if (NULL == prev) {
106         key->hash[hash_val].p = entry->next;
107         GC_dirty(key->hash + hash_val);
108       } else {
109         prev->next = entry->next;
110         GC_dirty(prev);
111       }
112       /* Atomic! concurrent accesses still work.        */
113       /* They must, since readers don't lock.           */
114       /* We shouldn't need a volatile access here,      */
115       /* since both this and the preceding write        */
116       /* should become visible no later than            */
117       /* the pthread_mutex_unlock() call.               */
118     }
119     /* If we wanted to deallocate the entry, we'd first have to clear   */
120     /* any cache entries pointing to it.  That probably requires        */
121     /* additional synchronization, since we can't prevent a concurrent  */
122     /* cache lookup, which should still be examining deallocated memory.*/
123     /* This can only happen if the concurrent access is from another    */
124     /* thread, and hence has missed the cache, but still...             */
125 #   ifdef LINT2
126       GC_noop1((word)entry);
127 #   endif
128 
129     /* With GC, we're done, since the pointers from the cache will      */
130     /* be overwritten, all local pointers to the entries will be        */
131     /* dropped, and the entry will then be reclaimed.                   */
132     if (pthread_mutex_unlock(&key->lock) != 0)
133       ABORT("pthread_mutex_unlock failed (remove_specific after fork)");
134 }
135 
136 /* Note that even the slow path doesn't lock.   */
GC_slow_getspecific(tsd * key,word qtid,tse * volatile * cache_ptr)137 GC_INNER void * GC_slow_getspecific(tsd * key, word qtid,
138                                     tse * volatile * cache_ptr)
139 {
140     pthread_t self = pthread_self();
141     unsigned hash_val = HASH(self);
142     tse *entry = key->hash[hash_val].p;
143 
144     GC_ASSERT(qtid != INVALID_QTID);
145     while (entry != NULL && !THREAD_EQUAL(entry->thread, self)) {
146       entry = entry -> next;
147     }
148     if (entry == NULL) return NULL;
149     /* Set cache_entry. */
150     entry -> qtid = (AO_t)qtid;
151         /* It's safe to do this asynchronously.  Either value   */
152         /* is safe, though may produce spurious misses.         */
153         /* We're replacing one qtid with another one for the    */
154         /* same thread.                                         */
155     *cache_ptr = entry;
156         /* Again this is safe since pointer assignments are     */
157         /* presumed atomic, and either pointer is valid.        */
158     return TS_REVEAL_PTR(entry -> value);
159 }
160 
161 #ifdef GC_ASSERTIONS
162   /* Check that that all elements of the data structure associated  */
163   /* with key are marked.                                           */
GC_check_tsd_marks(tsd * key)164   void GC_check_tsd_marks(tsd *key)
165   {
166     int i;
167     tse *p;
168 
169     if (!GC_is_marked(GC_base(key))) {
170       ABORT("Unmarked thread-specific-data table");
171     }
172     for (i = 0; i < TS_HASH_SIZE; ++i) {
173       for (p = key->hash[i].p; p != 0; p = p -> next) {
174         if (!GC_is_marked(GC_base(p))) {
175           ABORT_ARG1("Unmarked thread-specific-data entry",
176                      " at %p", (void *)p);
177         }
178       }
179     }
180     for (i = 0; i < TS_CACHE_SIZE; ++i) {
181       p = key -> cache[i];
182       if (p != &invalid_tse && !GC_is_marked(GC_base(p))) {
183         ABORT_ARG1("Unmarked cached thread-specific-data entry",
184                    " at %p", (void *)p);
185       }
186     }
187   }
188 #endif /* GC_ASSERTIONS */
189 
190 #endif /* USE_CUSTOM_SPECIFIC */
191