1 /*
2  * %CopyrightBegin%
3  *
4  * Copyright Ericsson AB 2008-2018. All Rights Reserved.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * %CopyrightEnd%
19  */
20 
21 /*
22 ** General thread safe hash table. Simular interface as hash.h
23 **
24 ** Author: Sverker Eriksson
25 */
26 #ifdef HAVE_CONFIG_H
27 #  include "config.h"
28 #endif
29 
30 #include "safe_hash.h"
31 
32 /* Currently only used by erl_check_io on Windows */
33 #ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
34 
35 
set_size(SafeHash * h,int size)36 static ERTS_INLINE void set_size(SafeHash* h, int size)
37 {
38     ASSERT(size % SAFE_HASH_LOCK_CNT == 0);
39     /* This important property allows us to lock the right mutex
40     ** without reading the table size (that can change without the lock) */
41 
42     h->size_mask = size - 1;
43     ASSERT((size & h->size_mask) == 0);
44     /* An even power of 2 is just for fast bit masking */
45 
46     h->grow_limit = size; /* grow table at 100% load */
47 }
48 
align_up_pow2(int val)49 static ERTS_INLINE int align_up_pow2(int val)
50 {
51     int x = val & (val-1);
52     if (x==0) return val ? val : 1;
53     do {
54 	val = x;
55 	x &= x - 1;
56     }while (x);
57     return val << 1;
58 }
59 
60 /*
61 ** Rehash all objects
62 */
rehash(SafeHash * h,int grow_limit)63 static void rehash(SafeHash* h, int grow_limit)
64 {
65     if (erts_atomic_xchg_acqb(&h->is_rehashing, 1) != 0) {
66 	return; /* already in progress */
67     }
68     if (h->grow_limit == grow_limit) {
69 	int i, size, bytes;
70 	SafeHashBucket** new_tab;
71 	SafeHashBucket** old_tab = h->tab;
72 	int old_size = h->size_mask + 1;
73 
74 	size = old_size * 2; /* double table size */
75 	bytes = size * sizeof(SafeHashBucket*);
76 	new_tab = (SafeHashBucket **) erts_alloc(h->type, bytes);
77 	sys_memzero(new_tab, bytes);
78 
79 	for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { /* stop all traffic */
80 	    erts_mtx_lock(&h->lock_vec[i].mtx);
81 	}
82 
83 	h->tab = new_tab;
84 	set_size(h, size);
85 
86 	for (i = 0; i < old_size; i++) {
87 	    SafeHashBucket* b = old_tab[i];
88 	    while (b != NULL) {
89 		SafeHashBucket* b_next = b->next;
90 		int ix = b->hvalue & h->size_mask;
91 		b->next = new_tab[ix];
92 		new_tab[ix] = b;
93 		b = b_next;
94 	    }
95 	}
96 
97 	for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
98 	    erts_mtx_unlock(&h->lock_vec[i].mtx);
99 	}
100 	erts_free(h->type, (void *) old_tab);
101     }
102     /*else already done */
103     erts_atomic_set_relb(&h->is_rehashing, 0);
104 }
105 
106 
107 /*
108 ** Get info about hash
109 */
safe_hash_get_info(SafeHashInfo * hi,SafeHash * h)110 void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h)
111 {
112     int size;
113     int i, lock_ix;
114     int max_depth = 0;
115     int objects = 0;
116 
117     for (lock_ix=0; lock_ix<SAFE_HASH_LOCK_CNT; lock_ix++) {
118 	erts_mtx_lock(&h->lock_vec[lock_ix].mtx);
119 	size = h->size_mask + 1;
120 	for (i = lock_ix; i < size; i += SAFE_HASH_LOCK_CNT) {
121 	    int depth = 0;
122 	    SafeHashBucket* b = h->tab[i];
123 	    while (b != NULL) {
124 		objects++;
125 		depth++;
126 		b = b->next;
127 	    }
128 	    if (depth > max_depth)
129 		max_depth = depth;
130 	}
131 	erts_mtx_unlock(&h->lock_vec[lock_ix].mtx);
132     }
133 
134     hi->name  = h->name;
135     hi->size  = size;
136     hi->objs  = objects;
137     hi->depth = max_depth;
138 }
139 
140 /*
141 ** Returns size of table in bytes. Stored objects not included.
142 **/
safe_hash_table_sz(SafeHash * h)143 int safe_hash_table_sz(SafeHash *h)
144 {
145   int i, size;
146   for(i=0; h->name[i]; i++);
147   i++;
148   erts_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */
149   size = h->size_mask + 1;
150   erts_mtx_unlock(&h->lock_vec[0].mtx);
151   return sizeof(SafeHash) + size*sizeof(SafeHashBucket*) + i;
152 }
153 
154 /*
155 ** Init a pre allocated or static hash structure
156 ** and allocate buckets. NOT SAFE
157 */
safe_hash_init(ErtsAlcType_t type,SafeHash * h,char * name,erts_lock_flags_t flags,int size,SafeHashFunctions fun)158 SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, erts_lock_flags_t flags,
159     int size, SafeHashFunctions fun)
160 {
161     int i, bytes;
162 
163     size = align_up_pow2(size);
164     bytes = size * sizeof(SafeHashBucket*);
165     h->type = type;
166     h->tab = (SafeHashBucket**) erts_alloc(h->type, bytes);
167     sys_memzero(h->tab, bytes);
168     h->name = name;
169     h->fun = fun;
170     set_size(h,size);
171     erts_atomic_init_nob(&h->is_rehashing, 0);
172     erts_atomic_init_nob(&h->nitems, 0);
173     for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
174         erts_mtx_init(&h->lock_vec[i].mtx, "safe_hash", NIL,
175             flags);
176     }
177     return h;
178 }
179 
180 
181 /*
182 ** Find an object in the hash table
183 */
safe_hash_get(SafeHash * h,void * tmpl)184 void* safe_hash_get(SafeHash* h, void* tmpl)
185 {
186     SafeHashValue hval = h->fun.hash(tmpl);
187     SafeHashBucket* b;
188     erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
189     erts_mtx_lock(lock);
190     b = h->tab[hval & h->size_mask];
191 
192     while(b != NULL) {
193 	if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0))
194 	    break;
195 	b = b->next;
196     }
197     erts_mtx_unlock(lock);
198     return (void*) b;
199 }
200 
201 /*
202 ** Find or insert an object in the hash table
203 */
safe_hash_put(SafeHash * h,void * tmpl)204 void* safe_hash_put(SafeHash* h, void* tmpl)
205 {
206     int grow_limit;
207     SafeHashValue hval = h->fun.hash(tmpl);
208     SafeHashBucket* b;
209     SafeHashBucket** head;
210     erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
211     erts_mtx_lock(lock);
212     head = &h->tab[hval & h->size_mask];
213     b = *head;
214     while(b != NULL) {
215 	if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
216 	    erts_mtx_unlock(lock);
217 	    return b;
218 	}
219 	b = b->next;
220     }
221 
222     b = (SafeHashBucket*) h->fun.alloc(tmpl);
223     b->hvalue = hval;
224     b->next = *head;
225     *head = b;
226     grow_limit = h->grow_limit;
227     erts_mtx_unlock(lock);
228     if (erts_atomic_inc_read_nob(&h->nitems) > grow_limit) {
229 	rehash(h, grow_limit);
230     }
231     return (void*) b;
232 }
233 
234 /*
235 ** Erase hash entry return template if erased
236 ** return 0 if not erased
237 */
safe_hash_erase(SafeHash * h,void * tmpl)238 void* safe_hash_erase(SafeHash* h, void* tmpl)
239 {
240     SafeHashValue hval = h->fun.hash(tmpl);
241     SafeHashBucket* b;
242     SafeHashBucket** prevp;
243     erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
244     erts_mtx_lock(lock);
245     prevp = &h->tab[hval & h->size_mask];
246     b = *prevp;
247     while(b != NULL) {
248 	if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
249 	    *prevp = b->next;
250 	    erts_mtx_unlock(lock);
251 	    erts_atomic_dec_nob(&h->nitems);
252 	    h->fun.free((void*)b);
253 	    return tmpl;
254 	}
255 	prevp = &b->next;
256 	b = b->next;
257     }
258     erts_mtx_unlock(lock);
259     return NULL;
260 }
261 
262 /*
263 ** Call 'func(obj,func_arg2,func_arg3)' for all objects in table. NOT SAFE!!!
264 */
safe_hash_for_each(SafeHash * h,void (* func)(void *,void *,void *),void * func_arg2,void * func_arg3)265 void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *, void *),
266                         void *func_arg2, void *func_arg3)
267 {
268     int i;
269 
270     for (i = 0; i <= h->size_mask; i++) {
271 	SafeHashBucket* b = h->tab[i];
272 	while (b != NULL) {
273 	    (*func)((void *) b, func_arg2, func_arg3);
274 	    b = b->next;
275 	}
276     }
277 }
278 
279 #ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_enable_hash_lock_count(SafeHash * h,erts_lock_flags_t flags,int enable)280 void erts_lcnt_enable_hash_lock_count(SafeHash *h, erts_lock_flags_t flags, int enable) {
281     int i;
282 
283     for(i = 0; i < SAFE_HASH_LOCK_CNT; i++) {
284         erts_mtx_t *lock = &h->lock_vec[i].mtx;
285 
286         if(enable) {
287             erts_lcnt_install_new_lock_info(&lock->lcnt, "safe_hash", NIL,
288                 ERTS_LOCK_TYPE_MUTEX | flags);
289         } else {
290             erts_lcnt_uninstall(&lock->lcnt);
291         }
292     }
293 }
294 #endif /* ERTS_ENABLE_LOCK_COUNT */
295 
296 #endif /* !ERTS_SYS_CONTINOUS_FD_NUMBERS */
297 
298