1 /* This file is included by symbol.c */
2
3 #include "id_table.h"
4
5 #ifndef ID_TABLE_DEBUG
6 #define ID_TABLE_DEBUG 0
7 #endif
8
9 #if ID_TABLE_DEBUG == 0
10 #define NDEBUG
11 #endif
12 #include "ruby_assert.h"
13
14 typedef rb_id_serial_t id_key_t;
15
16 static inline ID
key2id(id_key_t key)17 key2id(id_key_t key)
18 {
19 return rb_id_serial_to_id(key);
20 }
21
22 static inline id_key_t
id2key(ID id)23 id2key(ID id)
24 {
25 return rb_id_to_serial(id);
26 }
27
28 /* simple open addressing with quadratic probing.
29 uses mark-bit on collisions - need extra 1 bit,
30 ID is strictly 3 bits larger than rb_id_serial_t */
31
32 typedef struct rb_id_item {
33 id_key_t key;
34 #if SIZEOF_VALUE == 8
35 int collision;
36 #endif
37 VALUE val;
38 } item_t;
39
40 struct rb_id_table {
41 int capa;
42 int num;
43 int used;
44 item_t *items;
45 };
46
47 #if SIZEOF_VALUE == 8
48 #define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key)
49 #define ITEM_KEY_ISSET(tbl, i) ((tbl)->items[i].key)
50 #define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].collision)
51 #define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].collision = 1)
52 static inline void
ITEM_SET_KEY(struct rb_id_table * tbl,int i,id_key_t key)53 ITEM_SET_KEY(struct rb_id_table *tbl, int i, id_key_t key)
54 {
55 tbl->items[i].key = key;
56 }
57 #else
58 #define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key >> 1)
59 #define ITEM_KEY_ISSET(tbl, i) ((tbl)->items[i].key > 1)
60 #define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].key & 1)
61 #define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].key |= 1)
62 static inline void
ITEM_SET_KEY(struct rb_id_table * tbl,int i,id_key_t key)63 ITEM_SET_KEY(struct rb_id_table *tbl, int i, id_key_t key)
64 {
65 tbl->items[i].key = (key << 1) | ITEM_COLLIDED(tbl, i);
66 }
67 #endif
68
69 static inline int
round_capa(int capa)70 round_capa(int capa)
71 {
72 /* minsize is 4 */
73 capa >>= 2;
74 capa |= capa >> 1;
75 capa |= capa >> 2;
76 capa |= capa >> 4;
77 capa |= capa >> 8;
78 capa |= capa >> 16;
79 return (capa + 1) << 2;
80 }
81
82 static struct rb_id_table *
rb_id_table_init(struct rb_id_table * tbl,int capa)83 rb_id_table_init(struct rb_id_table *tbl, int capa)
84 {
85 MEMZERO(tbl, struct rb_id_table, 1);
86 if (capa > 0) {
87 capa = round_capa(capa);
88 tbl->capa = (int)capa;
89 tbl->items = ZALLOC_N(item_t, capa);
90 }
91 return tbl;
92 }
93
94 struct rb_id_table *
rb_id_table_create(size_t capa)95 rb_id_table_create(size_t capa)
96 {
97 struct rb_id_table *tbl = ALLOC(struct rb_id_table);
98 return rb_id_table_init(tbl, (int)capa);
99 }
100
101 void
rb_id_table_free(struct rb_id_table * tbl)102 rb_id_table_free(struct rb_id_table *tbl)
103 {
104 xfree(tbl->items);
105 xfree(tbl);
106 }
107
108 void
rb_id_table_clear(struct rb_id_table * tbl)109 rb_id_table_clear(struct rb_id_table *tbl)
110 {
111 tbl->num = 0;
112 tbl->used = 0;
113 MEMZERO(tbl->items, item_t, tbl->capa);
114 }
115
116 size_t
rb_id_table_size(const struct rb_id_table * tbl)117 rb_id_table_size(const struct rb_id_table *tbl)
118 {
119 return (size_t)tbl->num;
120 }
121
122 size_t
rb_id_table_memsize(const struct rb_id_table * tbl)123 rb_id_table_memsize(const struct rb_id_table *tbl)
124 {
125 return sizeof(item_t) * tbl->capa + sizeof(struct rb_id_table);
126 }
127
128 static int
hash_table_index(struct rb_id_table * tbl,id_key_t key)129 hash_table_index(struct rb_id_table* tbl, id_key_t key)
130 {
131 if (tbl->capa > 0) {
132 int mask = tbl->capa - 1;
133 int ix = key & mask;
134 int d = 1;
135 while (key != ITEM_GET_KEY(tbl, ix)) {
136 if (!ITEM_COLLIDED(tbl, ix))
137 return -1;
138 ix = (ix + d) & mask;
139 d++;
140 }
141 return ix;
142 }
143 return -1;
144 }
145
146 static void
hash_table_raw_insert(struct rb_id_table * tbl,id_key_t key,VALUE val)147 hash_table_raw_insert(struct rb_id_table *tbl, id_key_t key, VALUE val)
148 {
149 int mask = tbl->capa - 1;
150 int ix = key & mask;
151 int d = 1;
152 assert(key != 0);
153 while (ITEM_KEY_ISSET(tbl, ix)) {
154 ITEM_SET_COLLIDED(tbl, ix);
155 ix = (ix + d) & mask;
156 d++;
157 }
158 tbl->num++;
159 if (!ITEM_COLLIDED(tbl, ix)) {
160 tbl->used++;
161 }
162 ITEM_SET_KEY(tbl, ix, key);
163 tbl->items[ix].val = val;
164 }
165
166 static int
hash_delete_index(struct rb_id_table * tbl,int ix)167 hash_delete_index(struct rb_id_table *tbl, int ix)
168 {
169 if (ix >= 0) {
170 if (!ITEM_COLLIDED(tbl, ix)) {
171 tbl->used--;
172 }
173 tbl->num--;
174 ITEM_SET_KEY(tbl, ix, 0);
175 tbl->items[ix].val = 0;
176 return TRUE;
177 }
178 else {
179 return FALSE;
180 }
181 }
182
183 static void
hash_table_extend(struct rb_id_table * tbl)184 hash_table_extend(struct rb_id_table* tbl)
185 {
186 if (tbl->used + (tbl->used >> 1) >= tbl->capa) {
187 int new_cap = round_capa(tbl->num + (tbl->num >> 1));
188 int i;
189 item_t* old;
190 struct rb_id_table tmp_tbl = {0, 0, 0};
191 if (new_cap < tbl->capa) {
192 new_cap = round_capa(tbl->used + (tbl->used >> 1));
193 }
194 tmp_tbl.capa = new_cap;
195 tmp_tbl.items = ZALLOC_N(item_t, new_cap);
196 for (i = 0; i < tbl->capa; i++) {
197 id_key_t key = ITEM_GET_KEY(tbl, i);
198 if (key != 0) {
199 hash_table_raw_insert(&tmp_tbl, key, tbl->items[i].val);
200 }
201 }
202 old = tbl->items;
203 *tbl = tmp_tbl;
204 xfree(old);
205 }
206 }
207
208 #if ID_TABLE_DEBUG && 0
209 static void
hash_table_show(struct rb_id_table * tbl)210 hash_table_show(struct rb_id_table *tbl)
211 {
212 const id_key_t *keys = tbl->keys;
213 const int capa = tbl->capa;
214 int i;
215
216 fprintf(stderr, "tbl: %p (capa: %d, num: %d, used: %d)\n", tbl, tbl->capa, tbl->num, tbl->used);
217 for (i=0; i<capa; i++) {
218 if (ITEM_KEY_ISSET(tbl, i)) {
219 fprintf(stderr, " -> [%d] %s %d\n", i, rb_id2name(key2id(keys[i])), (int)keys[i]);
220 }
221 }
222 }
223 #endif
224
225 int
rb_id_table_lookup(struct rb_id_table * tbl,ID id,VALUE * valp)226 rb_id_table_lookup(struct rb_id_table *tbl, ID id, VALUE *valp)
227 {
228 id_key_t key = id2key(id);
229 int index = hash_table_index(tbl, key);
230
231 if (index >= 0) {
232 *valp = tbl->items[index].val;
233 return TRUE;
234 }
235 else {
236 return FALSE;
237 }
238 }
239
240 static int
rb_id_table_insert_key(struct rb_id_table * tbl,const id_key_t key,const VALUE val)241 rb_id_table_insert_key(struct rb_id_table *tbl, const id_key_t key, const VALUE val)
242 {
243 const int index = hash_table_index(tbl, key);
244
245 if (index >= 0) {
246 tbl->items[index].val = val;
247 }
248 else {
249 hash_table_extend(tbl);
250 hash_table_raw_insert(tbl, key, val);
251 }
252 return TRUE;
253 }
254
255 int
rb_id_table_insert(struct rb_id_table * tbl,ID id,VALUE val)256 rb_id_table_insert(struct rb_id_table *tbl, ID id, VALUE val)
257 {
258 return rb_id_table_insert_key(tbl, id2key(id), val);
259 }
260
261 int
rb_id_table_delete(struct rb_id_table * tbl,ID id)262 rb_id_table_delete(struct rb_id_table *tbl, ID id)
263 {
264 const id_key_t key = id2key(id);
265 int index = hash_table_index(tbl, key);
266 return hash_delete_index(tbl, index);
267 }
268
269 void
rb_id_table_foreach(struct rb_id_table * tbl,rb_id_table_foreach_func_t * func,void * data)270 rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data)
271 {
272 int i, capa = tbl->capa;
273
274 for (i=0; i<capa; i++) {
275 if (ITEM_KEY_ISSET(tbl, i)) {
276 const id_key_t key = ITEM_GET_KEY(tbl, i);
277 enum rb_id_table_iterator_result ret = (*func)(key2id(key), tbl->items[i].val, data);
278 assert(key != 0);
279
280 if (ret == ID_TABLE_DELETE)
281 hash_delete_index(tbl, i);
282 else if (ret == ID_TABLE_STOP)
283 return;
284 }
285 }
286 }
287
288 void
rb_id_table_foreach_values(struct rb_id_table * tbl,rb_id_table_foreach_values_func_t * func,void * data)289 rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data)
290 {
291 int i, capa = tbl->capa;
292
293 for (i=0; i<capa; i++) {
294 if (ITEM_KEY_ISSET(tbl, i)) {
295 enum rb_id_table_iterator_result ret = (*func)(tbl->items[i].val, data);
296
297 if (ret == ID_TABLE_DELETE)
298 hash_delete_index(tbl, i);
299 else if (ret == ID_TABLE_STOP)
300 return;
301 }
302 }
303 }
304