xref: /reactos/dll/win32/dbghelp/storage.c (revision c8d07514)
1 /*
2  * Various storage structures (pool allocation, vector, hash table)
3  *
4  * Copyright (C) 1993, Eric Youngdale.
5  *               2004, Eric Pouech
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
20  */
21 
22 
23 #include <assert.h>
24 #include <stdlib.h>
25 #ifndef DBGHELP_STATIC_LIB
26 #include "wine/debug.h"
27 #endif
28 
29 #include "dbghelp_private.h"
30 #ifdef USE_STATS
31 #include <math.h>
32 #endif
33 
34 WINE_DEFAULT_DEBUG_CHANNEL(dbghelp);
35 
36 struct pool_arena
37 {
38     struct list entry;
39     char       *current;
40     char       *end;
41 };
42 
43 void pool_init(struct pool* a, size_t arena_size)
44 {
45     list_init( &a->arena_list );
46     list_init( &a->arena_full );
47     a->arena_size = arena_size;
48 }
49 
50 void pool_destroy(struct pool* pool)
51 {
52     struct pool_arena*  arena;
53     struct pool_arena*  next;
54 
55 #ifdef USE_STATS
56     size_t alloc, used, num;
57 
58     alloc = used = num = 0;
59     LIST_FOR_EACH_ENTRY( arena, &pool->arena_list, struct pool_arena, entry )
60     {
61         alloc += arena->end - (char *)arena;
62         used += arena->current - (char*)arena;
63         num++;
64     }
65     LIST_FOR_EACH_ENTRY( arena, &pool->arena_full, struct pool_arena, entry )
66     {
67         alloc += arena->end - (char *)arena;
68         used += arena->current - (char*)arena;
69         num++;
70     }
71     if (alloc == 0) alloc = 1;      /* avoid division by zero */
72     FIXME("STATS: pool %p has allocated %u kbytes, used %u kbytes in %u arenas, non-allocation ratio: %.2f%%\n",
73           pool, (unsigned)(alloc >> 10), (unsigned)(used >> 10), (unsigned)num,
74           100.0 - (float)used / (float)alloc * 100.0);
75 #endif
76 
77     LIST_FOR_EACH_ENTRY_SAFE( arena, next, &pool->arena_list, struct pool_arena, entry )
78     {
79         list_remove( &arena->entry );
80         HeapFree(GetProcessHeap(), 0, arena);
81     }
82     LIST_FOR_EACH_ENTRY_SAFE( arena, next, &pool->arena_full, struct pool_arena, entry )
83     {
84         list_remove( &arena->entry );
85         HeapFree(GetProcessHeap(), 0, arena);
86     }
87 }
88 
89 void* pool_alloc(struct pool* pool, size_t len)
90 {
91     struct pool_arena*  arena;
92     void*               ret;
93     size_t size;
94 
95     len = (len + 3) & ~3; /* round up size on DWORD boundary */
96 
97     LIST_FOR_EACH_ENTRY( arena, &pool->arena_list, struct pool_arena, entry )
98     {
99         if (arena->end - arena->current >= len)
100         {
101             ret = arena->current;
102             arena->current += len;
103             if (arena->current + 16 >= arena->end)
104             {
105                 list_remove( &arena->entry );
106                 list_add_tail( &pool->arena_full, &arena->entry );
107             }
108             return ret;
109         }
110     }
111 
112     size = max( pool->arena_size, len );
113     arena = HeapAlloc(GetProcessHeap(), 0, size + sizeof(struct pool_arena));
114     if (!arena) return NULL;
115 
116     ret = arena + 1;
117     arena->current = (char*)ret + len;
118     arena->end = (char*)ret + size;
119     if (arena->current + 16 >= arena->end)
120         list_add_tail( &pool->arena_full, &arena->entry );
121     else
122         list_add_head( &pool->arena_list, &arena->entry );
123     return ret;
124 }
125 
126 char* pool_strdup(struct pool* pool, const char* str)
127 {
128     char* ret;
129     if ((ret = pool_alloc(pool, strlen(str) + 1))) strcpy(ret, str);
130     return ret;
131 }
132 
133 void vector_init(struct vector* v, unsigned esz, unsigned bucket_sz)
134 {
135     v->buckets = NULL;
136     /* align size on DWORD boundaries */
137     v->elt_size = (esz + 3) & ~3;
138     switch (bucket_sz)
139     {
140     case    2: v->shift =  1; break;
141     case    4: v->shift =  2; break;
142     case    8: v->shift =  3; break;
143     case   16: v->shift =  4; break;
144     case   32: v->shift =  5; break;
145     case   64: v->shift =  6; break;
146     case  128: v->shift =  7; break;
147     case  256: v->shift =  8; break;
148     case  512: v->shift =  9; break;
149     case 1024: v->shift = 10; break;
150     default: assert(0);
151     }
152     v->num_buckets = 0;
153     v->buckets_allocated = 0;
154     v->num_elts = 0;
155 }
156 
157 unsigned vector_length(const struct vector* v)
158 {
159     return v->num_elts;
160 }
161 
162 void* vector_at(const struct vector* v, unsigned pos)
163 {
164     unsigned o;
165 
166     if (pos >= v->num_elts) return NULL;
167     o = pos & ((1 << v->shift) - 1);
168     return (char*)v->buckets[pos >> v->shift] + o * v->elt_size;
169 }
170 
171 void* vector_add(struct vector* v, struct pool* pool)
172 {
173     unsigned    ncurr = v->num_elts++;
174 
175     /* check that we don't wrap around */
176     assert(v->num_elts > ncurr);
177     if (ncurr == (v->num_buckets << v->shift))
178     {
179         if(v->num_buckets == v->buckets_allocated)
180         {
181             /* Double the bucket cache, so it scales well with big vectors.*/
182             unsigned    new_reserved;
183             void*       new;
184 
185             new_reserved = 2*v->buckets_allocated;
186             if(new_reserved == 0) new_reserved = 1;
187 
188             /* Don't even try to resize memory.
189                Pool datastructure is very inefficient with reallocs. */
190             new = pool_alloc(pool, new_reserved * sizeof(void*));
191             memcpy(new, v->buckets, v->buckets_allocated * sizeof(void*));
192             v->buckets = new;
193             v->buckets_allocated = new_reserved;
194         }
195         v->buckets[v->num_buckets] = pool_alloc(pool, v->elt_size << v->shift);
196         return v->buckets[v->num_buckets++];
197     }
198     return vector_at(v, ncurr);
199 }
200 
201 /* We construct the sparse array as two vectors (of equal size)
202  * The first vector (key2index) is the lookup table between the key and
203  * an index in the second vector (elements)
204  * When inserting an element, it's always appended in second vector (and
205  * never moved in memory later on), only the first vector is reordered
206  */
207 struct key2index
208 {
209     ULONG_PTR           key;
210     unsigned            index;
211 };
212 
213 void sparse_array_init(struct sparse_array* sa, unsigned elt_sz, unsigned bucket_sz)
214 {
215     vector_init(&sa->key2index, sizeof(struct key2index), bucket_sz);
216     vector_init(&sa->elements, elt_sz, bucket_sz);
217 }
218 
219 /******************************************************************
220  *		sparse_array_lookup
221  *
222  * Returns the first index which key is >= at passed key
223  */
224 static struct key2index* sparse_array_lookup(const struct sparse_array* sa,
225                                              ULONG_PTR key, unsigned* idx)
226 {
227     struct key2index*   pk2i;
228     unsigned            low, high;
229 
230     if (!sa->elements.num_elts)
231     {
232         *idx = 0;
233         return NULL;
234     }
235     high = sa->elements.num_elts;
236     pk2i = vector_at(&sa->key2index, high - 1);
237     if (pk2i->key < key)
238     {
239         *idx = high;
240         return NULL;
241     }
242     if (pk2i->key == key)
243     {
244         *idx = high - 1;
245         return pk2i;
246     }
247     low = 0;
248     pk2i = vector_at(&sa->key2index, low);
249     if (pk2i->key >= key)
250     {
251         *idx = 0;
252         return pk2i;
253     }
254     /* now we have: sa(lowest key) < key < sa(highest key) */
255     while (low < high)
256     {
257         *idx = (low + high) / 2;
258         pk2i = vector_at(&sa->key2index, *idx);
259         if (pk2i->key > key)            high = *idx;
260         else if (pk2i->key < key)       low = *idx + 1;
261         else                            return pk2i;
262     }
263     /* binary search could return exact item, we search for highest one
264      * below the key
265      */
266     if (pk2i->key < key)
267         pk2i = vector_at(&sa->key2index, ++(*idx));
268     return pk2i;
269 }
270 
271 void*   sparse_array_find(const struct sparse_array* sa, ULONG_PTR key)
272 {
273     unsigned            idx;
274     struct key2index*   pk2i;
275 
276     if ((pk2i = sparse_array_lookup(sa, key, &idx)) && pk2i->key == key)
277         return vector_at(&sa->elements, pk2i->index);
278     return NULL;
279 }
280 
281 void*   sparse_array_add(struct sparse_array* sa, ULONG_PTR key,
282                          struct pool* pool)
283 {
284     unsigned            idx, i;
285     struct key2index*   pk2i;
286     struct key2index*   to;
287 
288     pk2i = sparse_array_lookup(sa, key, &idx);
289     if (pk2i && pk2i->key == key)
290     {
291         FIXME("re-adding an existing key\n");
292         return NULL;
293     }
294     to = vector_add(&sa->key2index, pool);
295     if (pk2i)
296     {
297         /* we need to shift vector's content... */
298         /* let's do it brute force... (FIXME) */
299         assert(sa->key2index.num_elts >= 2);
300         for (i = sa->key2index.num_elts - 1; i > idx; i--)
301         {
302             pk2i = vector_at(&sa->key2index, i - 1);
303             *to = *pk2i;
304             to = pk2i;
305         }
306     }
307 
308     to->key = key;
309     to->index = sa->elements.num_elts;
310 
311     return vector_add(&sa->elements, pool);
312 }
313 
314 unsigned sparse_array_length(const struct sparse_array* sa)
315 {
316     return sa->elements.num_elts;
317 }
318 
319 static unsigned hash_table_hash(const char* name, unsigned num_buckets)
320 {
321     unsigned    hash = 0;
322     while (*name)
323     {
324         hash += *name++;
325         hash += (hash << 10);
326         hash ^= (hash >> 6);
327     }
328     hash += (hash << 3);
329     hash ^= (hash >> 11);
330     hash += (hash << 15);
331     return hash % num_buckets;
332 }
333 
334 void hash_table_init(struct pool* pool, struct hash_table* ht, unsigned num_buckets)
335 {
336     ht->num_elts = 0;
337     ht->num_buckets = num_buckets;
338     ht->pool = pool;
339     ht->buckets = NULL;
340 }
341 
342 void hash_table_destroy(struct hash_table* ht)
343 {
344 #if defined(USE_STATS)
345     int                         i;
346     unsigned                    len;
347     unsigned                    min = 0xffffffff, max = 0, sq = 0;
348     struct hash_table_elt*      elt;
349     double                      mean, variance;
350 
351     for (i = 0; i < ht->num_buckets; i++)
352     {
353         for (len = 0, elt = ht->buckets[i]; elt; elt = elt->next) len++;
354         if (len < min) min = len;
355         if (len > max) max = len;
356         sq += len * len;
357     }
358     mean = (double)ht->num_elts / ht->num_buckets;
359     variance = (double)sq / ht->num_buckets - mean * mean;
360     FIXME("STATS: elts[num:%-4u size:%u mean:%f] buckets[min:%-4u variance:%+f max:%-4u]\n",
361           ht->num_elts, ht->num_buckets, mean, min, variance, max);
362 
363     for (i = 0; i < ht->num_buckets; i++)
364     {
365         for (len = 0, elt = ht->buckets[i]; elt; elt = elt->next) len++;
366         if (len == max)
367         {
368             FIXME("Longest bucket:\n");
369             for (elt = ht->buckets[i]; elt; elt = elt->next)
370                 FIXME("\t%s\n", elt->name);
371             break;
372         }
373 
374     }
375 #endif
376 }
377 
378 void hash_table_add(struct hash_table* ht, struct hash_table_elt* elt)
379 {
380     unsigned                    hash = hash_table_hash(elt->name, ht->num_buckets);
381 
382     if (!ht->buckets)
383     {
384         ht->buckets = pool_alloc(ht->pool, ht->num_buckets * sizeof(struct hash_table_bucket));
385         assert(ht->buckets);
386         memset(ht->buckets, 0, ht->num_buckets * sizeof(struct hash_table_bucket));
387     }
388 
389     /* in some cases, we need to get back the symbols of same name in the order
390      * in which they've been inserted. So insert new elements at the end of the list.
391      */
392     if (!ht->buckets[hash].first)
393     {
394         ht->buckets[hash].first = elt;
395     }
396     else
397     {
398         ht->buckets[hash].last->next = elt;
399     }
400     ht->buckets[hash].last = elt;
401     elt->next = NULL;
402     ht->num_elts++;
403 }
404 
405 void hash_table_iter_init(const struct hash_table* ht,
406                           struct hash_table_iter* hti, const char* name)
407 {
408     hti->ht = ht;
409     if (name)
410     {
411         hti->last = hash_table_hash(name, ht->num_buckets);
412         hti->index = hti->last - 1;
413     }
414     else
415     {
416         hti->last = ht->num_buckets - 1;
417         hti->index = -1;
418     }
419     hti->element = NULL;
420 }
421 
422 void* hash_table_iter_up(struct hash_table_iter* hti)
423 {
424     if (!hti->ht->buckets) return NULL;
425 
426     if (hti->element) hti->element = hti->element->next;
427     while (!hti->element && hti->index < hti->last)
428         hti->element = hti->ht->buckets[++hti->index].first;
429     return hti->element;
430 }
431