1
2 /*
3 * Copyright (C) Igor Sysoev
4 * Copyright (C) NGINX, Inc.
5 */
6
7 #include <nxt_main.h>
8
9
10 /*
11 * The level hash consists of hierarchical levels of arrays of pointers.
12 * The pointers may point to another level, a bucket, or NULL.
13 * The levels and buckets must be allocated in manner alike posix_memalign()
14 * to bookkeep additional information in pointer low bits.
15 *
16 * A level is an array of pointers. Its size is a power of 2. Levels
17 * may be different sizes, but on the same level the sizes are the same.
18 * Level sizes are specified by number of bits per level in lvlhsh->shift
19 * array. A hash may have up to 7 levels. There are two predefined
20 * shift arrays given by the first two shift array values:
21 *
22 * 1) [0, 0]: [4, 4, 4, 4, 4, 4, 4] on a 64-bit platform or
23 * [5, 5, 5, 5, 5, 5, 0] on a 32-bit platform,
24 * so default size of levels is 128 bytes.
25 *
26 * 2) [0, 10]: [10, 4, 4, 4, 4, 4, 0] on a 64-bit platform or
27 * [10, 5, 5, 5, 5, 0, 0] on a 32-bit platform,
28 * so default size of levels is 128 bytes on all levels except
29 * the first level. The first level is 8K or 4K on 64-bit or 32-bit
30 * platforms respectively.
31 *
32 * All buckets in a hash are the same size which is a power of 2.
33 * A bucket contains several entries stored and tested sequentially.
34 * The bucket size should be one or two CPU cache line size, a minimum
35 * allowed size is 32 bytes. A default 128-byte bucket contains 10 64-bit
36 * entries or 15 32-bit entries. Each entry consists of pointer to value
37 * data and 32-bit key. If an entry value pointer is NULL, the entry is free.
38 * On a 64-bit platform entry value pointers are no aligned, therefore they
39 * are accessed as two 32-bit integers. The rest trailing space in a bucket
40 * is used as pointer to next bucket and this pointer is always aligned.
41 * Although the level hash allows to store a lot of values in a bucket chain,
42 * this is non optimal way. The large data set should be stored using
43 * several levels.
44 */
45
46 #define \
47 nxt_lvlhsh_is_bucket(p) \
48 ((uintptr_t) (p) & 1)
49
50
51 #define \
52 nxt_lvlhsh_count_inc(n) \
53 n = (void *) ((uintptr_t) (n) + 2)
54
55
56 #define \
57 nxt_lvlhsh_count_dec(n) \
58 n = (void *) ((uintptr_t) (n) - 2)
59
60
61 #define \
62 nxt_lvlhsh_level_size(proto, nlvl) \
63 ((uintptr_t) 1 << proto->shift[nlvl])
64
65
66 #define \
67 nxt_lvlhsh_level(lvl, mask) \
68 (void **) ((uintptr_t) lvl & (~mask << 2))
69
70
71 #define \
72 nxt_lvlhsh_level_entries(lvl, mask) \
73 ((uintptr_t) lvl & (mask << 1))
74
75
76 #define \
77 nxt_lvlhsh_store_bucket(slot, bkt) \
78 slot = (void **) ((uintptr_t) bkt | 2 | 1)
79
80
81 #define \
82 nxt_lvlhsh_bucket_size(proto) \
83 proto->bucket_size
84
85
86 #define \
87 nxt_lvlhsh_bucket(proto, bkt) \
88 (uint32_t *) ((uintptr_t) bkt & ~(uintptr_t) proto->bucket_mask)
89
90
91 #define \
92 nxt_lvlhsh_bucket_entries(proto, bkt) \
93 (((uintptr_t) bkt & (uintptr_t) proto->bucket_mask) >> 1)
94
95
96 #define \
97 nxt_lvlhsh_bucket_end(proto, bkt) \
98 &bkt[proto->bucket_end]
99
100
101 #define \
102 nxt_lvlhsh_free_entry(e) \
103 (!(nxt_lvlhsh_valid_entry(e)))
104
105
106 #define \
107 nxt_lvlhsh_next_bucket(proto, bkt) \
108 ((void **) &bkt[proto->bucket_end])
109
110 #if (NXT_64BIT)
111
112 #define \
113 nxt_lvlhsh_valid_entry(e) \
114 (((e)[0] | (e)[1]) != 0)
115
116
117 #define \
118 nxt_lvlhsh_entry_value(e) \
119 (void *) (((uintptr_t) (e)[1] << 32) + (e)[0])
120
121
122 #define \
123 nxt_lvlhsh_set_entry_value(e, n) \
124 (e)[0] = (uint32_t) (uintptr_t) n; \
125 (e)[1] = (uint32_t) ((uintptr_t) n >> 32)
126
127
128 #define \
129 nxt_lvlhsh_entry_key(e) \
130 (e)[2]
131
132
133 #define \
134 nxt_lvlhsh_set_entry_key(e, n) \
135 (e)[2] = n
136
137 #else
138
139 #define \
140 nxt_lvlhsh_valid_entry(e) \
141 ((e)[0] != 0)
142
143
144 #define \
145 nxt_lvlhsh_entry_value(e) \
146 (void *) (e)[0]
147
148
149 #define \
150 nxt_lvlhsh_set_entry_value(e, n) \
151 (e)[0] = (uint32_t) n
152
153
154 #define \
155 nxt_lvlhsh_entry_key(e) \
156 (e)[1]
157
158
159 #define \
160 nxt_lvlhsh_set_entry_key(e, n) \
161 (e)[1] = n
162
163 #endif
164
165
166 #define NXT_LVLHSH_BUCKET_DONE ((void *) -1)
167
168
169 typedef struct {
170 const nxt_lvlhsh_proto_t *proto;
171 void *pool;
172 uint32_t retrieve; /* 1 bit */
173 } nxt_lvlhsh_peek_t;
174
175
176 static nxt_int_t nxt_lvlhsh_level_find(nxt_lvlhsh_query_t *lhq, void **lvl,
177 uint32_t key, nxt_uint_t nlvl);
178 static nxt_int_t nxt_lvlhsh_bucket_find(nxt_lvlhsh_query_t *lhq, void **bkt);
179 static nxt_int_t nxt_lvlhsh_new_bucket(nxt_lvlhsh_query_t *lhq, void **slot);
180 static nxt_int_t nxt_lvlhsh_level_insert(nxt_lvlhsh_query_t *lhq,
181 void **slot, uint32_t key, nxt_uint_t nlvl);
182 static nxt_int_t nxt_lvlhsh_bucket_insert(nxt_lvlhsh_query_t *lhq,
183 void **slot, uint32_t key, nxt_int_t nlvl);
184 static nxt_int_t nxt_lvlhsh_convert_bucket_to_level(nxt_lvlhsh_query_t *lhq,
185 void **slot, nxt_uint_t nlvl, uint32_t *bucket);
186 static nxt_int_t nxt_lvlhsh_level_convertion_insert(nxt_lvlhsh_query_t *lhq,
187 void **parent, uint32_t key, nxt_uint_t nlvl);
188 static nxt_int_t nxt_lvlhsh_bucket_convertion_insert(nxt_lvlhsh_query_t *lhq,
189 void **slot, uint32_t key, nxt_int_t nlvl);
190 static nxt_int_t nxt_lvlhsh_free_level(nxt_lvlhsh_query_t *lhq, void **level,
191 nxt_uint_t size);
192 static nxt_int_t nxt_lvlhsh_level_delete(nxt_lvlhsh_query_t *lhq, void **slot,
193 uint32_t key, nxt_uint_t nlvl);
194 static nxt_int_t nxt_lvlhsh_bucket_delete(nxt_lvlhsh_query_t *lhq, void **bkt);
195 static void *nxt_lvlhsh_level_each(nxt_lvlhsh_each_t *lhe, void **level,
196 nxt_uint_t nlvl, nxt_uint_t shift);
197 static void *nxt_lvlhsh_bucket_each(nxt_lvlhsh_each_t *lhe);
198 static void *nxt_lvlhsh_level_peek(nxt_lvlhsh_peek_t *peek, void **level,
199 nxt_uint_t nlvl);
200 static void *nxt_lvlhsh_bucket_peek(nxt_lvlhsh_peek_t *peek, void **bkt);
201
202
203 nxt_int_t
nxt_lvlhsh_find(nxt_lvlhsh_t * lh,nxt_lvlhsh_query_t * lhq)204 nxt_lvlhsh_find(nxt_lvlhsh_t *lh, nxt_lvlhsh_query_t *lhq)
205 {
206 void *slot;
207
208 slot = lh->slot;
209
210 if (nxt_fast_path(slot != NULL)) {
211
212 if (nxt_lvlhsh_is_bucket(slot)) {
213 return nxt_lvlhsh_bucket_find(lhq, slot);
214 }
215
216 return nxt_lvlhsh_level_find(lhq, slot, lhq->key_hash, 0);
217 }
218
219 return NXT_DECLINED;
220 }
221
222
223 static nxt_int_t
nxt_lvlhsh_level_find(nxt_lvlhsh_query_t * lhq,void ** lvl,uint32_t key,nxt_uint_t nlvl)224 nxt_lvlhsh_level_find(nxt_lvlhsh_query_t *lhq, void **lvl, uint32_t key,
225 nxt_uint_t nlvl)
226 {
227 void **slot;
228 uintptr_t mask;
229 nxt_uint_t shift;
230
231 shift = lhq->proto->shift[nlvl];
232 mask = ((uintptr_t) 1 << shift) - 1;
233
234 lvl = nxt_lvlhsh_level(lvl, mask);
235 slot = lvl[key & mask];
236
237 if (slot != NULL) {
238
239 if (nxt_lvlhsh_is_bucket(slot)) {
240 return nxt_lvlhsh_bucket_find(lhq, slot);
241 }
242
243 return nxt_lvlhsh_level_find(lhq, slot, key >> shift, nlvl + 1);
244 }
245
246 return NXT_DECLINED;
247 }
248
249
250 static nxt_int_t
nxt_lvlhsh_bucket_find(nxt_lvlhsh_query_t * lhq,void ** bkt)251 nxt_lvlhsh_bucket_find(nxt_lvlhsh_query_t *lhq, void **bkt)
252 {
253 void *value;
254 uint32_t *bucket, *e;
255 nxt_uint_t n;
256
257 do {
258 bucket = nxt_lvlhsh_bucket(lhq->proto, bkt);
259 n = nxt_lvlhsh_bucket_entries(lhq->proto, bkt);
260 e = bucket;
261
262 do {
263 if (nxt_lvlhsh_valid_entry(e)) {
264 n--;
265
266 if (nxt_lvlhsh_entry_key(e) == lhq->key_hash) {
267
268 value = nxt_lvlhsh_entry_value(e);
269
270 if (lhq->proto->test(lhq, value) == NXT_OK) {
271 lhq->value = value;
272
273 return NXT_OK;
274 }
275 }
276 }
277
278 e += NXT_LVLHSH_ENTRY_SIZE;
279
280 } while (n != 0);
281
282 bkt = *nxt_lvlhsh_next_bucket(lhq->proto, bucket);
283
284 } while (bkt != NULL);
285
286 return NXT_DECLINED;
287 }
288
289
290 nxt_int_t
nxt_lvlhsh_insert(nxt_lvlhsh_t * lh,nxt_lvlhsh_query_t * lhq)291 nxt_lvlhsh_insert(nxt_lvlhsh_t *lh, nxt_lvlhsh_query_t *lhq)
292 {
293 uint32_t key;
294
295 if (nxt_fast_path(lh->slot != NULL)) {
296
297 key = lhq->key_hash;
298
299 if (nxt_lvlhsh_is_bucket(lh->slot)) {
300 return nxt_lvlhsh_bucket_insert(lhq, &lh->slot, key, -1);
301 }
302
303 return nxt_lvlhsh_level_insert(lhq, &lh->slot, key, 0);
304 }
305
306 return nxt_lvlhsh_new_bucket(lhq, &lh->slot);
307 }
308
309
310 static nxt_int_t
nxt_lvlhsh_new_bucket(nxt_lvlhsh_query_t * lhq,void ** slot)311 nxt_lvlhsh_new_bucket(nxt_lvlhsh_query_t *lhq, void **slot)
312 {
313 uint32_t *bucket;
314
315 bucket = lhq->proto->alloc(lhq->pool, nxt_lvlhsh_bucket_size(lhq->proto));
316
317 if (nxt_fast_path(bucket != NULL)) {
318
319 nxt_lvlhsh_set_entry_value(bucket, lhq->value);
320 nxt_lvlhsh_set_entry_key(bucket, lhq->key_hash);
321
322 *nxt_lvlhsh_next_bucket(lhq->proto, bucket) = NULL;
323
324 nxt_lvlhsh_store_bucket(*slot, bucket);
325
326 return NXT_OK;
327 }
328
329 return NXT_ERROR;
330 }
331
332
333 static nxt_int_t
nxt_lvlhsh_level_insert(nxt_lvlhsh_query_t * lhq,void ** parent,uint32_t key,nxt_uint_t nlvl)334 nxt_lvlhsh_level_insert(nxt_lvlhsh_query_t *lhq, void **parent, uint32_t key,
335 nxt_uint_t nlvl)
336 {
337 void **slot, **lvl;
338 nxt_int_t ret;
339 uintptr_t mask;
340 nxt_uint_t shift;
341
342 shift = lhq->proto->shift[nlvl];
343 mask = ((uintptr_t) 1 << shift) - 1;
344
345 lvl = nxt_lvlhsh_level(*parent, mask);
346 slot = &lvl[key & mask];
347
348 if (*slot != NULL) {
349 key >>= shift;
350
351 if (nxt_lvlhsh_is_bucket(*slot)) {
352 return nxt_lvlhsh_bucket_insert(lhq, slot, key, nlvl);
353 }
354
355 return nxt_lvlhsh_level_insert(lhq, slot, key, nlvl + 1);
356 }
357
358 ret = nxt_lvlhsh_new_bucket(lhq, slot);
359
360 if (nxt_fast_path(ret == NXT_OK)) {
361 nxt_lvlhsh_count_inc(*parent);
362 }
363
364 return ret;
365 }
366
367
368 static nxt_int_t
nxt_lvlhsh_bucket_insert(nxt_lvlhsh_query_t * lhq,void ** slot,uint32_t key,nxt_int_t nlvl)369 nxt_lvlhsh_bucket_insert(nxt_lvlhsh_query_t *lhq, void **slot, uint32_t key,
370 nxt_int_t nlvl)
371 {
372 void **bkt, **vacant_bucket, *value;
373 uint32_t *bucket, *e, *vacant_entry;
374 nxt_int_t ret;
375 uintptr_t n;
376 const void *new_value;
377 const nxt_lvlhsh_proto_t *proto;
378
379 bkt = slot;
380 vacant_entry = NULL;
381 vacant_bucket = NULL;
382 proto = lhq->proto;
383
384 /* Search for duplicate entry in bucket chain. */
385
386 do {
387 bucket = nxt_lvlhsh_bucket(proto, *bkt);
388 n = nxt_lvlhsh_bucket_entries(proto, *bkt);
389 e = bucket;
390
391 do {
392 if (nxt_lvlhsh_valid_entry(e)) {
393
394 if (nxt_lvlhsh_entry_key(e) == lhq->key_hash) {
395
396 value = nxt_lvlhsh_entry_value(e);
397
398 if (proto->test(lhq, value) == NXT_OK) {
399
400 new_value = lhq->value;
401 lhq->value = value;
402
403 if (lhq->replace) {
404 nxt_lvlhsh_set_entry_value(e, new_value);
405
406 return NXT_OK;
407 }
408
409 return NXT_DECLINED;
410 }
411 }
412
413 n--;
414
415 } else {
416 /*
417 * Save a hole vacant position in bucket
418 * and continue to search for duplicate entry.
419 */
420 if (vacant_entry == NULL) {
421 vacant_entry = e;
422 vacant_bucket = bkt;
423 }
424 }
425
426 e += NXT_LVLHSH_ENTRY_SIZE;
427
428 } while (n != 0);
429
430 if (e < nxt_lvlhsh_bucket_end(proto, bucket)) {
431 /*
432 * Save a vacant position on incomplete bucket's end
433 * and continue to search for duplicate entry.
434 */
435 if (vacant_entry == NULL) {
436 vacant_entry = e;
437 vacant_bucket = bkt;
438 }
439 }
440
441 bkt = nxt_lvlhsh_next_bucket(proto, bucket);
442
443 } while (*bkt != NULL);
444
445 if (vacant_entry != NULL) {
446 nxt_lvlhsh_set_entry_value(vacant_entry, lhq->value);
447 nxt_lvlhsh_set_entry_key(vacant_entry, lhq->key_hash);
448 nxt_lvlhsh_count_inc(*vacant_bucket);
449
450 return NXT_OK;
451 }
452
453 /* All buckets are full. */
454
455 nlvl++;
456
457 if (nxt_fast_path(proto->shift[nlvl] != 0)) {
458
459 ret = nxt_lvlhsh_convert_bucket_to_level(lhq, slot, nlvl, bucket);
460
461 if (nxt_fast_path(ret == NXT_OK)) {
462 return nxt_lvlhsh_level_insert(lhq, slot, key, nlvl);
463 }
464
465 return ret;
466 }
467
468 /* The last allowed level, only buckets may be allocated here. */
469
470 return nxt_lvlhsh_new_bucket(lhq, bkt);
471 }
472
473
474 static nxt_int_t
nxt_lvlhsh_convert_bucket_to_level(nxt_lvlhsh_query_t * lhq,void ** slot,nxt_uint_t nlvl,uint32_t * bucket)475 nxt_lvlhsh_convert_bucket_to_level(nxt_lvlhsh_query_t *lhq, void **slot,
476 nxt_uint_t nlvl, uint32_t *bucket)
477 {
478 void *lvl, **level;
479 uint32_t *e, *end, key;
480 nxt_int_t ret;
481 nxt_uint_t i, shift, size;
482 nxt_lvlhsh_query_t q;
483 const nxt_lvlhsh_proto_t *proto;
484
485 proto = lhq->proto;
486 size = nxt_lvlhsh_level_size(proto, nlvl);
487
488 lvl = proto->alloc(lhq->pool, size * (sizeof(void *)));
489
490 if (nxt_slow_path(lvl == NULL)) {
491 return NXT_ERROR;
492 }
493
494 nxt_memzero(lvl, size * (sizeof(void *)));
495
496 level = lvl;
497 shift = 0;
498
499 for (i = 0; i < nlvl; i++) {
500 /*
501 * Using SIMD operations in this trivial loop with maximum
502 * 8 iterations may increase code size by 170 bytes.
503 */
504 nxt_pragma_loop_disable_vectorization;
505
506 shift += proto->shift[i];
507 }
508
509 end = nxt_lvlhsh_bucket_end(proto, bucket);
510
511 for (e = bucket; e < end; e += NXT_LVLHSH_ENTRY_SIZE) {
512
513 q.proto = proto;
514 q.pool = lhq->pool;
515 q.value = nxt_lvlhsh_entry_value(e);
516 key = nxt_lvlhsh_entry_key(e);
517 q.key_hash = key;
518
519 ret = nxt_lvlhsh_level_convertion_insert(&q, &lvl, key >> shift, nlvl);
520
521 if (nxt_slow_path(ret != NXT_OK)) {
522 return nxt_lvlhsh_free_level(lhq, level, size);
523 }
524 }
525
526 *slot = lvl;
527
528 proto->free(lhq->pool, bucket);
529
530 return NXT_OK;
531 }
532
533
534 static nxt_int_t
nxt_lvlhsh_level_convertion_insert(nxt_lvlhsh_query_t * lhq,void ** parent,uint32_t key,nxt_uint_t nlvl)535 nxt_lvlhsh_level_convertion_insert(nxt_lvlhsh_query_t *lhq, void **parent,
536 uint32_t key, nxt_uint_t nlvl)
537 {
538 void **slot, **lvl;
539 nxt_int_t ret;
540 uintptr_t mask;
541 nxt_uint_t shift;
542
543 shift = lhq->proto->shift[nlvl];
544 mask = ((uintptr_t) 1 << shift) - 1;
545
546 lvl = nxt_lvlhsh_level(*parent, mask);
547 slot = &lvl[key & mask];
548
549 if (*slot == NULL) {
550 ret = nxt_lvlhsh_new_bucket(lhq, slot);
551
552 if (nxt_fast_path(ret == NXT_OK)) {
553 nxt_lvlhsh_count_inc(*parent);
554 }
555
556 return ret;
557 }
558
559 /* Only backets can be here. */
560
561 return nxt_lvlhsh_bucket_convertion_insert(lhq, slot, key >> shift, nlvl);
562 }
563
564
565 /*
566 * The special bucket insertion procedure is required because during
567 * convertion lhq->key contains garbage values and the test function
568 * cannot be called. Besides, the procedure can be simpler because
569 * a new entry is inserted just after occupied entries.
570 */
571
572 static nxt_int_t
nxt_lvlhsh_bucket_convertion_insert(nxt_lvlhsh_query_t * lhq,void ** slot,uint32_t key,nxt_int_t nlvl)573 nxt_lvlhsh_bucket_convertion_insert(nxt_lvlhsh_query_t *lhq, void **slot,
574 uint32_t key, nxt_int_t nlvl)
575 {
576 void **bkt;
577 uint32_t *bucket, *e;
578 nxt_int_t ret;
579 uintptr_t n;
580 const nxt_lvlhsh_proto_t *proto;
581
582 bkt = slot;
583 proto = lhq->proto;
584
585 do {
586 bucket = nxt_lvlhsh_bucket(proto, *bkt);
587 n = nxt_lvlhsh_bucket_entries(proto, *bkt);
588 e = bucket + n * NXT_LVLHSH_ENTRY_SIZE;
589
590 if (nxt_fast_path(e < nxt_lvlhsh_bucket_end(proto, bucket))) {
591
592 nxt_lvlhsh_set_entry_value(e, lhq->value);
593 nxt_lvlhsh_set_entry_key(e, lhq->key_hash);
594 nxt_lvlhsh_count_inc(*bkt);
595
596 return NXT_OK;
597 }
598
599 bkt = nxt_lvlhsh_next_bucket(proto, bucket);
600
601 } while (*bkt != NULL);
602
603 /* All buckets are full. */
604
605 nlvl++;
606
607 if (nxt_fast_path(proto->shift[nlvl] != 0)) {
608
609 ret = nxt_lvlhsh_convert_bucket_to_level(lhq, slot, nlvl, bucket);
610
611 if (nxt_fast_path(ret == NXT_OK)) {
612 return nxt_lvlhsh_level_insert(lhq, slot, key, nlvl);
613 }
614
615 return ret;
616 }
617
618 /* The last allowed level, only buckets may be allocated here. */
619
620 return nxt_lvlhsh_new_bucket(lhq, bkt);
621 }
622
623
624 static nxt_int_t
nxt_lvlhsh_free_level(nxt_lvlhsh_query_t * lhq,void ** level,nxt_uint_t size)625 nxt_lvlhsh_free_level(nxt_lvlhsh_query_t *lhq, void **level, nxt_uint_t size)
626 {
627 nxt_uint_t i;
628 const nxt_lvlhsh_proto_t *proto;
629
630 proto = lhq->proto;
631
632 for (i = 0; i < size; i++) {
633
634 if (level[i] != NULL) {
635 /*
636 * Chained buckets are not possible here, since even
637 * in the worst case one bucket cannot be converted
638 * in two chained buckets but remains the same bucket.
639 */
640 proto->free(lhq->pool, nxt_lvlhsh_bucket(proto, level[i]));
641 }
642 }
643
644 proto->free(lhq->pool, level);
645
646 return NXT_ERROR;
647 }
648
649
650 nxt_int_t
nxt_lvlhsh_delete(nxt_lvlhsh_t * lh,nxt_lvlhsh_query_t * lhq)651 nxt_lvlhsh_delete(nxt_lvlhsh_t *lh, nxt_lvlhsh_query_t *lhq)
652 {
653 if (nxt_fast_path(lh->slot != NULL)) {
654
655 if (nxt_lvlhsh_is_bucket(lh->slot)) {
656 return nxt_lvlhsh_bucket_delete(lhq, &lh->slot);
657 }
658
659 return nxt_lvlhsh_level_delete(lhq, &lh->slot, lhq->key_hash, 0);
660 }
661
662 return NXT_DECLINED;
663 }
664
665
666 static nxt_int_t
nxt_lvlhsh_level_delete(nxt_lvlhsh_query_t * lhq,void ** parent,uint32_t key,nxt_uint_t nlvl)667 nxt_lvlhsh_level_delete(nxt_lvlhsh_query_t *lhq, void **parent, uint32_t key,
668 nxt_uint_t nlvl)
669 {
670 void **slot, **lvl;
671 uintptr_t mask;
672 nxt_int_t ret;
673 nxt_uint_t shift;
674
675 shift = lhq->proto->shift[nlvl];
676 mask = ((uintptr_t) 1 << shift) - 1;
677
678 lvl = nxt_lvlhsh_level(*parent, mask);
679 slot = &lvl[key & mask];
680
681 if (*slot != NULL) {
682
683 if (nxt_lvlhsh_is_bucket(*slot)) {
684 ret = nxt_lvlhsh_bucket_delete(lhq, slot);
685
686 } else {
687 key >>= shift;
688 ret = nxt_lvlhsh_level_delete(lhq, slot, key, nlvl + 1);
689 }
690
691 if (*slot == NULL) {
692 nxt_lvlhsh_count_dec(*parent);
693
694 if (nxt_lvlhsh_level_entries(*parent, mask) == 0) {
695 *parent = NULL;
696 lhq->proto->free(lhq->pool, lvl);
697 }
698 }
699
700 return ret;
701 }
702
703 return NXT_DECLINED;
704 }
705
706
707 static nxt_int_t
nxt_lvlhsh_bucket_delete(nxt_lvlhsh_query_t * lhq,void ** bkt)708 nxt_lvlhsh_bucket_delete(nxt_lvlhsh_query_t *lhq, void **bkt)
709 {
710 void *value;
711 uint32_t *bucket, *e;
712 uintptr_t n;
713 const nxt_lvlhsh_proto_t *proto;
714
715 proto = lhq->proto;
716
717 do {
718 bucket = nxt_lvlhsh_bucket(proto, *bkt);
719 n = nxt_lvlhsh_bucket_entries(proto, *bkt);
720 e = bucket;
721
722 do {
723 if (nxt_lvlhsh_valid_entry(e)) {
724
725 if (nxt_lvlhsh_entry_key(e) == lhq->key_hash) {
726
727 value = nxt_lvlhsh_entry_value(e);
728
729 if (proto->test(lhq, value) == NXT_OK) {
730
731 if (nxt_lvlhsh_bucket_entries(proto, *bkt) == 1) {
732 *bkt = *nxt_lvlhsh_next_bucket(proto, bucket);
733 proto->free(lhq->pool, bucket);
734
735 } else {
736 nxt_lvlhsh_count_dec(*bkt);
737 nxt_lvlhsh_set_entry_value(e, NULL);
738 }
739
740 lhq->value = value;
741
742 return NXT_OK;
743 }
744 }
745
746 n--;
747 }
748
749 e += NXT_LVLHSH_ENTRY_SIZE;
750
751 } while (n != 0);
752
753 bkt = nxt_lvlhsh_next_bucket(proto, bucket);
754
755 } while (*bkt != NULL);
756
757 return NXT_DECLINED;
758 }
759
760
761 void *
nxt_lvlhsh_each(nxt_lvlhsh_t * lh,nxt_lvlhsh_each_t * lhe)762 nxt_lvlhsh_each(nxt_lvlhsh_t *lh, nxt_lvlhsh_each_t *lhe)
763 {
764 void **slot;
765
766 if (lhe->bucket == NXT_LVLHSH_BUCKET_DONE) {
767 slot = lh->slot;
768
769 if (nxt_lvlhsh_is_bucket(slot)) {
770 return NULL;
771 }
772
773 } else {
774 if (nxt_slow_path(lhe->bucket == NULL)) {
775
776 /* The first iteration only. */
777
778 slot = lh->slot;
779
780 if (slot == NULL) {
781 return NULL;
782 }
783
784 if (!nxt_lvlhsh_is_bucket(slot)) {
785 lhe->current = 0;
786 goto level;
787 }
788
789 lhe->bucket = nxt_lvlhsh_bucket(lhe->proto, slot);
790 lhe->entries = nxt_lvlhsh_bucket_entries(lhe->proto, slot);
791 lhe->entry = 0;
792 }
793
794 return nxt_lvlhsh_bucket_each(lhe);
795 }
796
797 level:
798
799 return nxt_lvlhsh_level_each(lhe, slot, 0, 0);
800 }
801
802
803 static void *
nxt_lvlhsh_level_each(nxt_lvlhsh_each_t * lhe,void ** level,nxt_uint_t nlvl,nxt_uint_t shift)804 nxt_lvlhsh_level_each(nxt_lvlhsh_each_t *lhe, void **level, nxt_uint_t nlvl,
805 nxt_uint_t shift)
806 {
807 void **slot, *value;
808 uintptr_t mask;
809 nxt_uint_t n, level_shift;
810
811 level_shift = lhe->proto->shift[nlvl];
812 mask = ((uintptr_t) 1 << level_shift) - 1;
813
814 level = nxt_lvlhsh_level(level, mask);
815
816 do {
817 n = (lhe->current >> shift) & mask;
818 slot = level[n];
819
820 if (slot != NULL) {
821 if (nxt_lvlhsh_is_bucket(slot)) {
822
823 if (lhe->bucket != NXT_LVLHSH_BUCKET_DONE) {
824
825 lhe->bucket = nxt_lvlhsh_bucket(lhe->proto, slot);
826 lhe->entries = nxt_lvlhsh_bucket_entries(lhe->proto, slot);
827 lhe->entry = 0;
828
829 return nxt_lvlhsh_bucket_each(lhe);
830 }
831
832 lhe->bucket = NULL;
833
834 } else {
835 value = nxt_lvlhsh_level_each(lhe, slot, nlvl + 1,
836 shift + level_shift);
837 if (value != NULL) {
838 return value;
839 }
840 }
841 }
842
843 lhe->current &= ~(mask << shift);
844 n = ((n + 1) & mask) << shift;
845 lhe->current |= n;
846
847 } while (n != 0);
848
849 return NULL;
850 }
851
852
853 static nxt_noinline void *
nxt_lvlhsh_bucket_each(nxt_lvlhsh_each_t * lhe)854 nxt_lvlhsh_bucket_each(nxt_lvlhsh_each_t *lhe)
855 {
856 void *value, **next;
857 uint32_t *bucket;
858
859 /* At least one valid entry must present here. */
860 do {
861 bucket = &lhe->bucket[lhe->entry];
862 lhe->entry += NXT_LVLHSH_ENTRY_SIZE;
863
864 } while (nxt_lvlhsh_free_entry(bucket));
865
866 value = nxt_lvlhsh_entry_value(bucket);
867
868 lhe->entries--;
869
870 if (lhe->entries == 0) {
871 next = *nxt_lvlhsh_next_bucket(lhe->proto, lhe->bucket);
872
873 lhe->bucket = (next == NULL) ? NXT_LVLHSH_BUCKET_DONE
874 : nxt_lvlhsh_bucket(lhe->proto, next);
875
876 lhe->entries = nxt_lvlhsh_bucket_entries(lhe->proto, next);
877 lhe->entry = 0;
878 }
879
880 return value;
881 }
882
883
884 void *
nxt_lvlhsh_peek(nxt_lvlhsh_t * lh,const nxt_lvlhsh_proto_t * proto)885 nxt_lvlhsh_peek(nxt_lvlhsh_t *lh, const nxt_lvlhsh_proto_t *proto)
886 {
887 void **slot;
888 nxt_lvlhsh_peek_t peek;
889
890 slot = lh->slot;
891
892 if (slot != NULL) {
893
894 peek.proto = proto;
895 peek.retrieve = 0;
896
897 if (nxt_lvlhsh_is_bucket(slot)) {
898 return nxt_lvlhsh_bucket_peek(&peek, &lh->slot);
899 }
900
901 return nxt_lvlhsh_level_peek(&peek, &lh->slot, 0);
902 }
903
904 return NULL;
905 }
906
907
908 static void *
nxt_lvlhsh_level_peek(nxt_lvlhsh_peek_t * peek,void ** parent,nxt_uint_t nlvl)909 nxt_lvlhsh_level_peek(nxt_lvlhsh_peek_t *peek, void **parent, nxt_uint_t nlvl)
910 {
911 void **slot, **level, *value;
912 uintptr_t mask;
913 nxt_uint_t n, shift;
914
915 shift = peek->proto->shift[nlvl];
916 mask = ((uintptr_t) 1 << shift) - 1;
917
918 level = nxt_lvlhsh_level(*parent, mask);
919
920 n = 0;
921
922 /* At least one valid level slot must present here. */
923
924 for ( ;; ) {
925 slot = &level[n];
926
927 if (*slot != NULL) {
928
929 if (nxt_lvlhsh_is_bucket(*slot)) {
930 value = nxt_lvlhsh_bucket_peek(peek, slot);
931
932 } else {
933 value = nxt_lvlhsh_level_peek(peek, slot, nlvl + 1);
934 }
935
936 /*
937 * Checking peek->retrieve is not required here because
938 * there can not be empty slots during peeking.
939 */
940 if (*slot == NULL) {
941 nxt_lvlhsh_count_dec(*parent);
942
943 if (nxt_lvlhsh_level_entries(*parent, mask) == 0) {
944 *parent = NULL;
945 peek->proto->free(peek->pool, level);
946 }
947 }
948
949 return value;
950 }
951
952 n++;
953 }
954 }
955
956
957 static nxt_noinline void *
nxt_lvlhsh_bucket_peek(nxt_lvlhsh_peek_t * peek,void ** bkt)958 nxt_lvlhsh_bucket_peek(nxt_lvlhsh_peek_t *peek, void **bkt)
959 {
960 void *value;
961 uint32_t *bucket, *entry;
962 const nxt_lvlhsh_proto_t *proto;
963
964 bucket = nxt_lvlhsh_bucket(peek->proto, *bkt);
965
966 /* At least one valid entry must present here. */
967
968 for (entry = bucket;
969 nxt_lvlhsh_free_entry(entry);
970 entry += NXT_LVLHSH_ENTRY_SIZE)
971 {
972 /* void */
973 }
974
975 value = nxt_lvlhsh_entry_value(entry);
976
977 if (peek->retrieve) {
978 proto = peek->proto;
979
980 if (nxt_lvlhsh_bucket_entries(proto, *bkt) == 1) {
981 *bkt = *nxt_lvlhsh_next_bucket(proto, bucket);
982 proto->free(peek->pool, bucket);
983
984 } else {
985 nxt_lvlhsh_count_dec(*bkt);
986 nxt_lvlhsh_set_entry_value(entry, NULL);
987 }
988 }
989
990 return value;
991 }
992
993
994 void *
nxt_lvlhsh_retrieve(nxt_lvlhsh_t * lh,const nxt_lvlhsh_proto_t * proto,void * pool)995 nxt_lvlhsh_retrieve(nxt_lvlhsh_t *lh, const nxt_lvlhsh_proto_t *proto,
996 void *pool)
997 {
998 void **slot;
999 nxt_lvlhsh_peek_t peek;
1000
1001 slot = lh->slot;
1002
1003 if (slot != NULL) {
1004
1005 peek.proto = proto;
1006 peek.pool = pool;
1007 peek.retrieve = 1;
1008
1009 if (nxt_lvlhsh_is_bucket(slot)) {
1010 return nxt_lvlhsh_bucket_peek(&peek, &lh->slot);
1011 }
1012
1013 return nxt_lvlhsh_level_peek(&peek, &lh->slot, 0);
1014 }
1015
1016 return NULL;
1017 }
1018