xref: /reactos/dll/win32/dbghelp/storage.c (revision bae2bac6)
1 /*
2  * Various storage structures (pool allocation, vector, hash table)
3  *
4  * Copyright (C) 1993, Eric Youngdale.
5  *               2004, Eric Pouech
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
20  */
21 
22 
23 #include "config.h"
24 #include <assert.h>
25 #include <stdlib.h>
26 #ifndef DBGHELP_STATIC_LIB
27 #include "wine/debug.h"
28 #endif
29 
30 #include "dbghelp_private.h"
31 #ifdef USE_STATS
32 #include <math.h>
33 #endif
34 
35 WINE_DEFAULT_DEBUG_CHANNEL(dbghelp);
36 
37 struct pool_arena
38 {
39     struct list entry;
40     char       *current;
41     char       *end;
42 };
43 
44 void pool_init(struct pool* a, size_t arena_size)
45 {
46     list_init( &a->arena_list );
47     list_init( &a->arena_full );
48     a->arena_size = arena_size;
49 }
50 
51 void pool_destroy(struct pool* pool)
52 {
53     struct pool_arena*  arena;
54     struct pool_arena*  next;
55 
56 #ifdef USE_STATS
57     size_t alloc, used, num;
58 
59     alloc = used = num = 0;
60     LIST_FOR_EACH_ENTRY( arena, &pool->arena_list, struct pool_arena, entry )
61     {
62         alloc += arena->end - (char *)arena;
63         used += arena->current - (char*)arena;
64         num++;
65     }
66     LIST_FOR_EACH_ENTRY( arena, &pool->arena_full, struct pool_arena, entry )
67     {
68         alloc += arena->end - (char *)arena;
69         used += arena->current - (char*)arena;
70         num++;
71     }
72     if (alloc == 0) alloc = 1;      /* avoid division by zero */
73     FIXME("STATS: pool %p has allocated %u kbytes, used %u kbytes in %u arenas, non-allocation ratio: %.2f%%\n",
74           pool, (unsigned)(alloc >> 10), (unsigned)(used >> 10), (unsigned)num,
75           100.0 - (float)used / (float)alloc * 100.0);
76 #endif
77 
78     LIST_FOR_EACH_ENTRY_SAFE( arena, next, &pool->arena_list, struct pool_arena, entry )
79     {
80         list_remove( &arena->entry );
81         HeapFree(GetProcessHeap(), 0, arena);
82     }
83     LIST_FOR_EACH_ENTRY_SAFE( arena, next, &pool->arena_full, struct pool_arena, entry )
84     {
85         list_remove( &arena->entry );
86         HeapFree(GetProcessHeap(), 0, arena);
87     }
88 }
89 
90 void* pool_alloc(struct pool* pool, size_t len)
91 {
92     struct pool_arena*  arena;
93     void*               ret;
94     size_t size;
95 
96     len = (len + 3) & ~3; /* round up size on DWORD boundary */
97 
98     LIST_FOR_EACH_ENTRY( arena, &pool->arena_list, struct pool_arena, entry )
99     {
100         if (arena->end - arena->current >= len)
101         {
102             ret = arena->current;
103             arena->current += len;
104             if (arena->current + 16 >= arena->end)
105             {
106                 list_remove( &arena->entry );
107                 list_add_tail( &pool->arena_full, &arena->entry );
108             }
109             return ret;
110         }
111     }
112 
113     size = max( pool->arena_size, len );
114     arena = HeapAlloc(GetProcessHeap(), 0, size + sizeof(struct pool_arena));
115     if (!arena) return NULL;
116 
117     ret = arena + 1;
118     arena->current = (char*)ret + len;
119     arena->end = (char*)ret + size;
120     if (arena->current + 16 >= arena->end)
121         list_add_tail( &pool->arena_full, &arena->entry );
122     else
123         list_add_head( &pool->arena_list, &arena->entry );
124     return ret;
125 }
126 
127 char* pool_strdup(struct pool* pool, const char* str)
128 {
129     char* ret;
130     if ((ret = pool_alloc(pool, strlen(str) + 1))) strcpy(ret, str);
131     return ret;
132 }
133 
134 void vector_init(struct vector* v, unsigned esz, unsigned bucket_sz)
135 {
136     v->buckets = NULL;
137     /* align size on DWORD boundaries */
138     v->elt_size = (esz + 3) & ~3;
139     switch (bucket_sz)
140     {
141     case    2: v->shift =  1; break;
142     case    4: v->shift =  2; break;
143     case    8: v->shift =  3; break;
144     case   16: v->shift =  4; break;
145     case   32: v->shift =  5; break;
146     case   64: v->shift =  6; break;
147     case  128: v->shift =  7; break;
148     case  256: v->shift =  8; break;
149     case  512: v->shift =  9; break;
150     case 1024: v->shift = 10; break;
151     default: assert(0);
152     }
153     v->num_buckets = 0;
154     v->buckets_allocated = 0;
155     v->num_elts = 0;
156 }
157 
158 unsigned vector_length(const struct vector* v)
159 {
160     return v->num_elts;
161 }
162 
163 void* vector_at(const struct vector* v, unsigned pos)
164 {
165     unsigned o;
166 
167     if (pos >= v->num_elts) return NULL;
168     o = pos & ((1 << v->shift) - 1);
169     return (char*)v->buckets[pos >> v->shift] + o * v->elt_size;
170 }
171 
172 void* vector_add(struct vector* v, struct pool* pool)
173 {
174     unsigned    ncurr = v->num_elts++;
175 
176     /* check that we don't wrap around */
177     assert(v->num_elts > ncurr);
178     if (ncurr == (v->num_buckets << v->shift))
179     {
180         if(v->num_buckets == v->buckets_allocated)
181         {
182             /* Double the bucket cache, so it scales well with big vectors.*/
183             unsigned    new_reserved;
184             void*       new;
185 
186             new_reserved = 2*v->buckets_allocated;
187             if(new_reserved == 0) new_reserved = 1;
188 
189             /* Don't even try to resize memory.
190                Pool datastructure is very inefficient with reallocs. */
191             new = pool_alloc(pool, new_reserved * sizeof(void*));
192             memcpy(new, v->buckets, v->buckets_allocated * sizeof(void*));
193             v->buckets = new;
194             v->buckets_allocated = new_reserved;
195         }
196         v->buckets[v->num_buckets] = pool_alloc(pool, v->elt_size << v->shift);
197         return v->buckets[v->num_buckets++];
198     }
199     return vector_at(v, ncurr);
200 }
201 
202 /* We construct the sparse array as two vectors (of equal size)
203  * The first vector (key2index) is the lookup table between the key and
204  * an index in the second vector (elements)
205  * When inserting an element, it's always appended in second vector (and
206  * never moved in memory later on), only the first vector is reordered
207  */
208 struct key2index
209 {
210     unsigned long       key;
211     unsigned            index;
212 };
213 
214 void sparse_array_init(struct sparse_array* sa, unsigned elt_sz, unsigned bucket_sz)
215 {
216     vector_init(&sa->key2index, sizeof(struct key2index), bucket_sz);
217     vector_init(&sa->elements, elt_sz, bucket_sz);
218 }
219 
220 /******************************************************************
221  *		sparse_array_lookup
222  *
223  * Returns the first index which key is >= at passed key
224  */
225 static struct key2index* sparse_array_lookup(const struct sparse_array* sa,
226                                              unsigned long key, unsigned* idx)
227 {
228     struct key2index*   pk2i;
229     unsigned            low, high;
230 
231     if (!sa->elements.num_elts)
232     {
233         *idx = 0;
234         return NULL;
235     }
236     high = sa->elements.num_elts;
237     pk2i = vector_at(&sa->key2index, high - 1);
238     if (pk2i->key < key)
239     {
240         *idx = high;
241         return NULL;
242     }
243     if (pk2i->key == key)
244     {
245         *idx = high - 1;
246         return pk2i;
247     }
248     low = 0;
249     pk2i = vector_at(&sa->key2index, low);
250     if (pk2i->key >= key)
251     {
252         *idx = 0;
253         return pk2i;
254     }
255     /* now we have: sa(lowest key) < key < sa(highest key) */
256     while (low < high)
257     {
258         *idx = (low + high) / 2;
259         pk2i = vector_at(&sa->key2index, *idx);
260         if (pk2i->key > key)            high = *idx;
261         else if (pk2i->key < key)       low = *idx + 1;
262         else                            return pk2i;
263     }
264     /* binary search could return exact item, we search for highest one
265      * below the key
266      */
267     if (pk2i->key < key)
268         pk2i = vector_at(&sa->key2index, ++(*idx));
269     return pk2i;
270 }
271 
272 void*   sparse_array_find(const struct sparse_array* sa, unsigned long key)
273 {
274     unsigned            idx;
275     struct key2index*   pk2i;
276 
277     if ((pk2i = sparse_array_lookup(sa, key, &idx)) && pk2i->key == key)
278         return vector_at(&sa->elements, pk2i->index);
279     return NULL;
280 }
281 
282 void*   sparse_array_add(struct sparse_array* sa, unsigned long key,
283                          struct pool* pool)
284 {
285     unsigned            idx, i;
286     struct key2index*   pk2i;
287     struct key2index*   to;
288 
289     pk2i = sparse_array_lookup(sa, key, &idx);
290     if (pk2i && pk2i->key == key)
291     {
292         FIXME("re-adding an existing key\n");
293         return NULL;
294     }
295     to = vector_add(&sa->key2index, pool);
296     if (pk2i)
297     {
298         /* we need to shift vector's content... */
299         /* let's do it brute force... (FIXME) */
300         assert(sa->key2index.num_elts >= 2);
301         for (i = sa->key2index.num_elts - 1; i > idx; i--)
302         {
303             pk2i = vector_at(&sa->key2index, i - 1);
304             *to = *pk2i;
305             to = pk2i;
306         }
307     }
308 
309     to->key = key;
310     to->index = sa->elements.num_elts;
311 
312     return vector_add(&sa->elements, pool);
313 }
314 
315 unsigned sparse_array_length(const struct sparse_array* sa)
316 {
317     return sa->elements.num_elts;
318 }
319 
320 static unsigned hash_table_hash(const char* name, unsigned num_buckets)
321 {
322     unsigned    hash = 0;
323     while (*name)
324     {
325         hash += *name++;
326         hash += (hash << 10);
327         hash ^= (hash >> 6);
328     }
329     hash += (hash << 3);
330     hash ^= (hash >> 11);
331     hash += (hash << 15);
332     return hash % num_buckets;
333 }
334 
335 void hash_table_init(struct pool* pool, struct hash_table* ht, unsigned num_buckets)
336 {
337     ht->num_elts = 0;
338     ht->num_buckets = num_buckets;
339     ht->pool = pool;
340     ht->buckets = NULL;
341 }
342 
343 void hash_table_destroy(struct hash_table* ht)
344 {
345 #if defined(USE_STATS)
346     int                         i;
347     unsigned                    len;
348     unsigned                    min = 0xffffffff, max = 0, sq = 0;
349     struct hash_table_elt*      elt;
350     double                      mean, variance;
351 
352     for (i = 0; i < ht->num_buckets; i++)
353     {
354         for (len = 0, elt = ht->buckets[i]; elt; elt = elt->next) len++;
355         if (len < min) min = len;
356         if (len > max) max = len;
357         sq += len * len;
358     }
359     mean = (double)ht->num_elts / ht->num_buckets;
360     variance = (double)sq / ht->num_buckets - mean * mean;
361     FIXME("STATS: elts[num:%-4u size:%u mean:%f] buckets[min:%-4u variance:%+f max:%-4u]\n",
362           ht->num_elts, ht->num_buckets, mean, min, variance, max);
363 
364     for (i = 0; i < ht->num_buckets; i++)
365     {
366         for (len = 0, elt = ht->buckets[i]; elt; elt = elt->next) len++;
367         if (len == max)
368         {
369             FIXME("Longest bucket:\n");
370             for (elt = ht->buckets[i]; elt; elt = elt->next)
371                 FIXME("\t%s\n", elt->name);
372             break;
373         }
374 
375     }
376 #endif
377 }
378 
379 void hash_table_add(struct hash_table* ht, struct hash_table_elt* elt)
380 {
381     unsigned                    hash = hash_table_hash(elt->name, ht->num_buckets);
382 
383     if (!ht->buckets)
384     {
385         ht->buckets = pool_alloc(ht->pool, ht->num_buckets * sizeof(struct hash_table_bucket));
386         assert(ht->buckets);
387         memset(ht->buckets, 0, ht->num_buckets * sizeof(struct hash_table_bucket));
388     }
389 
390     /* in some cases, we need to get back the symbols of same name in the order
391      * in which they've been inserted. So insert new elements at the end of the list.
392      */
393     if (!ht->buckets[hash].first)
394     {
395         ht->buckets[hash].first = elt;
396     }
397     else
398     {
399         ht->buckets[hash].last->next = elt;
400     }
401     ht->buckets[hash].last = elt;
402     elt->next = NULL;
403     ht->num_elts++;
404 }
405 
406 void hash_table_iter_init(const struct hash_table* ht,
407                           struct hash_table_iter* hti, const char* name)
408 {
409     hti->ht = ht;
410     if (name)
411     {
412         hti->last = hash_table_hash(name, ht->num_buckets);
413         hti->index = hti->last - 1;
414     }
415     else
416     {
417         hti->last = ht->num_buckets - 1;
418         hti->index = -1;
419     }
420     hti->element = NULL;
421 }
422 
423 void* hash_table_iter_up(struct hash_table_iter* hti)
424 {
425     if (!hti->ht->buckets) return NULL;
426 
427     if (hti->element) hti->element = hti->element->next;
428     while (!hti->element && hti->index < hti->last)
429         hti->element = hti->ht->buckets[++hti->index].first;
430     return hti->element;
431 }
432