xref: /qemu/util/qht.c (revision bf6e6a37)
1 /*
2  * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads.
3  *
4  * Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
5  *
6  * License: GNU GPL, version 2 or later.
7  *   See the COPYING file in the top-level directory.
8  *
9  * Assumptions:
10  * - NULL cannot be inserted/removed as a pointer value.
11  * - Trying to insert an already-existing hash-pointer pair is OK. However,
12  *   it is not OK to insert into the same hash table different hash-pointer
13  *   pairs that have the same pointer value, but not the hashes.
14  * - Lookups are performed under an RCU read-critical section; removals
15  *   must wait for a grace period to elapse before freeing removed objects.
16  *
17  * Features:
18  * - Reads (i.e. lookups and iterators) can be concurrent with other reads.
19  *   Lookups that are concurrent with writes to the same bucket will retry
20  *   via a seqlock; iterators acquire all bucket locks and therefore can be
21  *   concurrent with lookups and are serialized wrt writers.
22  * - Writes (i.e. insertions/removals) can be concurrent with writes to
23  *   different buckets; writes to the same bucket are serialized through a lock.
24  * - Optional auto-resizing: the hash table resizes up if the load surpasses
25  *   a certain threshold. Resizing is done concurrently with readers; writes
26  *   are serialized with the resize operation.
27  *
28  * The key structure is the bucket, which is cacheline-sized. Buckets
29  * contain a few hash values and pointers; the u32 hash values are stored in
30  * full so that resizing is fast. Having this structure instead of directly
31  * chaining items has two advantages:
32  * - Failed lookups fail fast, and touch a minimum number of cache lines.
33  * - Resizing the hash table with concurrent lookups is easy.
34  *
35  * There are two types of buckets:
36  * 1. "head" buckets are the ones allocated in the array of buckets in qht_map.
37  * 2. all "non-head" buckets (i.e. all others) are members of a chain that
38  *    starts from a head bucket.
39  * Note that the seqlock and spinlock of a head bucket applies to all buckets
40  * chained to it; these two fields are unused in non-head buckets.
41  *
42  * On removals, we move the last valid item in the chain to the position of the
43  * just-removed entry. This makes lookups slightly faster, since the moment an
44  * invalid entry is found, the (failed) lookup is over.
45  *
46  * Resizing is done by taking all bucket spinlocks (so that no other writers can
47  * race with us) and then copying all entries into a new hash map. Then, the
48  * ht->map pointer is set, and the old map is freed once no RCU readers can see
49  * it anymore.
50  *
51  * Writers check for concurrent resizes by comparing ht->map before and after
52  * acquiring their bucket lock. If they don't match, a resize has occured
53  * while the bucket spinlock was being acquired.
54  *
55  * Related Work:
56  * - Idea of cacheline-sized buckets with full hashes taken from:
57  *   David, Guerraoui & Trigonakis, "Asynchronized Concurrency:
58  *   The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15.
59  * - Why not RCU-based hash tables? They would allow us to get rid of the
60  *   seqlock, but resizing would take forever since RCU read critical
61  *   sections in QEMU take quite a long time.
62  *   More info on relativistic hash tables:
63  *   + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash
64  *     Tables via Relativistic Programming", USENIX ATC'11.
65  *   + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014.
66  *     https://lwn.net/Articles/612021/
67  */
68 #include "qemu/osdep.h"
69 #include "qemu/qht.h"
70 #include "qemu/atomic.h"
71 #include "qemu/rcu.h"
72 
73 //#define QHT_DEBUG
74 
75 /*
76  * We want to avoid false sharing of cache lines. Most systems have 64-byte
77  * cache lines so we go with it for simplicity.
78  *
79  * Note that systems with smaller cache lines will be fine (the struct is
80  * almost 64-bytes); systems with larger cache lines might suffer from
81  * some false sharing.
82  */
83 #define QHT_BUCKET_ALIGN 64
84 
85 /* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */
86 #if HOST_LONG_BITS == 32
87 #define QHT_BUCKET_ENTRIES 6
88 #else /* 64-bit */
89 #define QHT_BUCKET_ENTRIES 4
90 #endif
91 
92 /*
93  * Note: reading partially-updated pointers in @pointers could lead to
94  * segfaults. We thus access them with atomic_read/set; this guarantees
95  * that the compiler makes all those accesses atomic. We also need the
96  * volatile-like behavior in atomic_read, since otherwise the compiler
97  * might refetch the pointer.
98  * atomic_read's are of course not necessary when the bucket lock is held.
99  *
100  * If both ht->lock and b->lock are grabbed, ht->lock should always
101  * be grabbed first.
102  */
103 struct qht_bucket {
104     QemuSpin lock;
105     QemuSeqLock sequence;
106     uint32_t hashes[QHT_BUCKET_ENTRIES];
107     void *pointers[QHT_BUCKET_ENTRIES];
108     struct qht_bucket *next;
109 } QEMU_ALIGNED(QHT_BUCKET_ALIGN);
110 
111 QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
112 
113 /**
114  * struct qht_map - structure to track an array of buckets
115  * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
116  *       find the whole struct.
117  * @buckets: array of head buckets. It is constant once the map is created.
118  * @n_buckets: number of head buckets. It is constant once the map is created.
119  * @n_added_buckets: number of added (i.e. "non-head") buckets
120  * @n_added_buckets_threshold: threshold to trigger an upward resize once the
121  *                             number of added buckets surpasses it.
122  *
123  * Buckets are tracked in what we call a "map", i.e. this structure.
124  */
125 struct qht_map {
126     struct rcu_head rcu;
127     struct qht_bucket *buckets;
128     size_t n_buckets;
129     size_t n_added_buckets;
130     size_t n_added_buckets_threshold;
131 };
132 
133 /* trigger a resize when n_added_buckets > n_buckets / div */
134 #define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
135 
136 static void qht_do_resize_reset(struct qht *ht, struct qht_map *new,
137                                 bool reset);
138 static void qht_grow_maybe(struct qht *ht);
139 
140 #ifdef QHT_DEBUG
141 
142 #define qht_debug_assert(X) do { assert(X); } while (0)
143 
144 static void qht_bucket_debug__locked(struct qht_bucket *b)
145 {
146     bool seen_empty = false;
147     bool corrupt = false;
148     int i;
149 
150     do {
151         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
152             if (b->pointers[i] == NULL) {
153                 seen_empty = true;
154                 continue;
155             }
156             if (seen_empty) {
157                 fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n",
158                         __func__, b, i, b->hashes[i], b->pointers[i]);
159                 corrupt = true;
160             }
161         }
162         b = b->next;
163     } while (b);
164     qht_debug_assert(!corrupt);
165 }
166 
167 static void qht_map_debug__all_locked(struct qht_map *map)
168 {
169     int i;
170 
171     for (i = 0; i < map->n_buckets; i++) {
172         qht_bucket_debug__locked(&map->buckets[i]);
173     }
174 }
175 #else
176 
177 #define qht_debug_assert(X) do { (void)(X); } while (0)
178 
179 static inline void qht_bucket_debug__locked(struct qht_bucket *b)
180 { }
181 
182 static inline void qht_map_debug__all_locked(struct qht_map *map)
183 { }
184 #endif /* QHT_DEBUG */
185 
186 static inline size_t qht_elems_to_buckets(size_t n_elems)
187 {
188     return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
189 }
190 
191 static inline void qht_head_init(struct qht_bucket *b)
192 {
193     memset(b, 0, sizeof(*b));
194     qemu_spin_init(&b->lock);
195     seqlock_init(&b->sequence);
196 }
197 
198 static inline
199 struct qht_bucket *qht_map_to_bucket(struct qht_map *map, uint32_t hash)
200 {
201     return &map->buckets[hash & (map->n_buckets - 1)];
202 }
203 
204 /* acquire all bucket locks from a map */
205 static void qht_map_lock_buckets(struct qht_map *map)
206 {
207     size_t i;
208 
209     for (i = 0; i < map->n_buckets; i++) {
210         struct qht_bucket *b = &map->buckets[i];
211 
212         qemu_spin_lock(&b->lock);
213     }
214 }
215 
216 static void qht_map_unlock_buckets(struct qht_map *map)
217 {
218     size_t i;
219 
220     for (i = 0; i < map->n_buckets; i++) {
221         struct qht_bucket *b = &map->buckets[i];
222 
223         qemu_spin_unlock(&b->lock);
224     }
225 }
226 
227 /*
228  * Call with at least a bucket lock held.
229  * @map should be the value read before acquiring the lock (or locks).
230  */
231 static inline bool qht_map_is_stale__locked(struct qht *ht, struct qht_map *map)
232 {
233     return map != ht->map;
234 }
235 
236 /*
237  * Grab all bucket locks, and set @pmap after making sure the map isn't stale.
238  *
239  * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference.
240  *
241  * Note: callers cannot have ht->lock held.
242  */
243 static inline
244 void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
245 {
246     struct qht_map *map;
247 
248     map = atomic_rcu_read(&ht->map);
249     qht_map_lock_buckets(map);
250     if (likely(!qht_map_is_stale__locked(ht, map))) {
251         *pmap = map;
252         return;
253     }
254     qht_map_unlock_buckets(map);
255 
256     /* we raced with a resize; acquire ht->lock to see the updated ht->map */
257     qemu_mutex_lock(&ht->lock);
258     map = ht->map;
259     qht_map_lock_buckets(map);
260     qemu_mutex_unlock(&ht->lock);
261     *pmap = map;
262     return;
263 }
264 
265 /*
266  * Get a head bucket and lock it, making sure its parent map is not stale.
267  * @pmap is filled with a pointer to the bucket's parent map.
268  *
269  * Unlock with qemu_spin_unlock(&b->lock).
270  *
271  * Note: callers cannot have ht->lock held.
272  */
273 static inline
274 struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
275                                              struct qht_map **pmap)
276 {
277     struct qht_bucket *b;
278     struct qht_map *map;
279 
280     map = atomic_rcu_read(&ht->map);
281     b = qht_map_to_bucket(map, hash);
282 
283     qemu_spin_lock(&b->lock);
284     if (likely(!qht_map_is_stale__locked(ht, map))) {
285         *pmap = map;
286         return b;
287     }
288     qemu_spin_unlock(&b->lock);
289 
290     /* we raced with a resize; acquire ht->lock to see the updated ht->map */
291     qemu_mutex_lock(&ht->lock);
292     map = ht->map;
293     b = qht_map_to_bucket(map, hash);
294     qemu_spin_lock(&b->lock);
295     qemu_mutex_unlock(&ht->lock);
296     *pmap = map;
297     return b;
298 }
299 
300 static inline bool qht_map_needs_resize(struct qht_map *map)
301 {
302     return atomic_read(&map->n_added_buckets) > map->n_added_buckets_threshold;
303 }
304 
305 static inline void qht_chain_destroy(struct qht_bucket *head)
306 {
307     struct qht_bucket *curr = head->next;
308     struct qht_bucket *prev;
309 
310     while (curr) {
311         prev = curr;
312         curr = curr->next;
313         qemu_vfree(prev);
314     }
315 }
316 
317 /* pass only an orphan map */
318 static void qht_map_destroy(struct qht_map *map)
319 {
320     size_t i;
321 
322     for (i = 0; i < map->n_buckets; i++) {
323         qht_chain_destroy(&map->buckets[i]);
324     }
325     qemu_vfree(map->buckets);
326     g_free(map);
327 }
328 
329 static struct qht_map *qht_map_create(size_t n_buckets)
330 {
331     struct qht_map *map;
332     size_t i;
333 
334     map = g_malloc(sizeof(*map));
335     map->n_buckets = n_buckets;
336 
337     map->n_added_buckets = 0;
338     map->n_added_buckets_threshold = n_buckets /
339         QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV;
340 
341     /* let tiny hash tables to at least add one non-head bucket */
342     if (unlikely(map->n_added_buckets_threshold == 0)) {
343         map->n_added_buckets_threshold = 1;
344     }
345 
346     map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
347                                  sizeof(*map->buckets) * n_buckets);
348     for (i = 0; i < n_buckets; i++) {
349         qht_head_init(&map->buckets[i]);
350     }
351     return map;
352 }
353 
354 void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems,
355               unsigned int mode)
356 {
357     struct qht_map *map;
358     size_t n_buckets = qht_elems_to_buckets(n_elems);
359 
360     g_assert(cmp);
361     ht->cmp = cmp;
362     ht->mode = mode;
363     qemu_mutex_init(&ht->lock);
364     map = qht_map_create(n_buckets);
365     atomic_rcu_set(&ht->map, map);
366 }
367 
368 /* call only when there are no readers/writers left */
369 void qht_destroy(struct qht *ht)
370 {
371     qht_map_destroy(ht->map);
372     memset(ht, 0, sizeof(*ht));
373 }
374 
375 static void qht_bucket_reset__locked(struct qht_bucket *head)
376 {
377     struct qht_bucket *b = head;
378     int i;
379 
380     seqlock_write_begin(&head->sequence);
381     do {
382         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
383             if (b->pointers[i] == NULL) {
384                 goto done;
385             }
386             atomic_set(&b->hashes[i], 0);
387             atomic_set(&b->pointers[i], NULL);
388         }
389         b = b->next;
390     } while (b);
391  done:
392     seqlock_write_end(&head->sequence);
393 }
394 
395 /* call with all bucket locks held */
396 static void qht_map_reset__all_locked(struct qht_map *map)
397 {
398     size_t i;
399 
400     for (i = 0; i < map->n_buckets; i++) {
401         qht_bucket_reset__locked(&map->buckets[i]);
402     }
403     qht_map_debug__all_locked(map);
404 }
405 
406 void qht_reset(struct qht *ht)
407 {
408     struct qht_map *map;
409 
410     qht_map_lock_buckets__no_stale(ht, &map);
411     qht_map_reset__all_locked(map);
412     qht_map_unlock_buckets(map);
413 }
414 
415 static inline void qht_do_resize(struct qht *ht, struct qht_map *new)
416 {
417     qht_do_resize_reset(ht, new, false);
418 }
419 
420 static inline void qht_do_resize_and_reset(struct qht *ht, struct qht_map *new)
421 {
422     qht_do_resize_reset(ht, new, true);
423 }
424 
425 bool qht_reset_size(struct qht *ht, size_t n_elems)
426 {
427     struct qht_map *new = NULL;
428     struct qht_map *map;
429     size_t n_buckets;
430 
431     n_buckets = qht_elems_to_buckets(n_elems);
432 
433     qemu_mutex_lock(&ht->lock);
434     map = ht->map;
435     if (n_buckets != map->n_buckets) {
436         new = qht_map_create(n_buckets);
437     }
438     qht_do_resize_and_reset(ht, new);
439     qemu_mutex_unlock(&ht->lock);
440 
441     return !!new;
442 }
443 
444 static inline
445 void *qht_do_lookup(struct qht_bucket *head, qht_lookup_func_t func,
446                     const void *userp, uint32_t hash)
447 {
448     struct qht_bucket *b = head;
449     int i;
450 
451     do {
452         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
453             if (atomic_read(&b->hashes[i]) == hash) {
454                 /* The pointer is dereferenced before seqlock_read_retry,
455                  * so (unlike qht_insert__locked) we need to use
456                  * atomic_rcu_read here.
457                  */
458                 void *p = atomic_rcu_read(&b->pointers[i]);
459 
460                 if (likely(p) && likely(func(p, userp))) {
461                     return p;
462                 }
463             }
464         }
465         b = atomic_rcu_read(&b->next);
466     } while (b);
467 
468     return NULL;
469 }
470 
471 static __attribute__((noinline))
472 void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func,
473                            const void *userp, uint32_t hash)
474 {
475     unsigned int version;
476     void *ret;
477 
478     do {
479         version = seqlock_read_begin(&b->sequence);
480         ret = qht_do_lookup(b, func, userp, hash);
481     } while (seqlock_read_retry(&b->sequence, version));
482     return ret;
483 }
484 
485 void *qht_lookup_custom(struct qht *ht, const void *userp, uint32_t hash,
486                         qht_lookup_func_t func)
487 {
488     struct qht_bucket *b;
489     struct qht_map *map;
490     unsigned int version;
491     void *ret;
492 
493     map = atomic_rcu_read(&ht->map);
494     b = qht_map_to_bucket(map, hash);
495 
496     version = seqlock_read_begin(&b->sequence);
497     ret = qht_do_lookup(b, func, userp, hash);
498     if (likely(!seqlock_read_retry(&b->sequence, version))) {
499         return ret;
500     }
501     /*
502      * Removing the do/while from the fastpath gives a 4% perf. increase when
503      * running a 100%-lookup microbenchmark.
504      */
505     return qht_lookup__slowpath(b, func, userp, hash);
506 }
507 
508 void *qht_lookup(struct qht *ht, const void *userp, uint32_t hash)
509 {
510     return qht_lookup_custom(ht, userp, hash, ht->cmp);
511 }
512 
513 /* call with head->lock held */
514 static void *qht_insert__locked(struct qht *ht, struct qht_map *map,
515                                 struct qht_bucket *head, void *p, uint32_t hash,
516                                 bool *needs_resize)
517 {
518     struct qht_bucket *b = head;
519     struct qht_bucket *prev = NULL;
520     struct qht_bucket *new = NULL;
521     int i;
522 
523     do {
524         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
525             if (b->pointers[i]) {
526                 if (unlikely(b->hashes[i] == hash &&
527                              ht->cmp(b->pointers[i], p))) {
528                     return b->pointers[i];
529                 }
530             } else {
531                 goto found;
532             }
533         }
534         prev = b;
535         b = b->next;
536     } while (b);
537 
538     b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b));
539     memset(b, 0, sizeof(*b));
540     new = b;
541     i = 0;
542     atomic_inc(&map->n_added_buckets);
543     if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
544         *needs_resize = true;
545     }
546 
547  found:
548     /* found an empty key: acquire the seqlock and write */
549     seqlock_write_begin(&head->sequence);
550     if (new) {
551         atomic_rcu_set(&prev->next, b);
552     }
553     /* smp_wmb() implicit in seqlock_write_begin.  */
554     atomic_set(&b->hashes[i], hash);
555     atomic_set(&b->pointers[i], p);
556     seqlock_write_end(&head->sequence);
557     return NULL;
558 }
559 
560 static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
561 {
562     struct qht_map *map;
563 
564     /*
565      * If the lock is taken it probably means there's an ongoing resize,
566      * so bail out.
567      */
568     if (qemu_mutex_trylock(&ht->lock)) {
569         return;
570     }
571     map = ht->map;
572     /* another thread might have just performed the resize we were after */
573     if (qht_map_needs_resize(map)) {
574         struct qht_map *new = qht_map_create(map->n_buckets * 2);
575 
576         qht_do_resize(ht, new);
577     }
578     qemu_mutex_unlock(&ht->lock);
579 }
580 
581 bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing)
582 {
583     struct qht_bucket *b;
584     struct qht_map *map;
585     bool needs_resize = false;
586     void *prev;
587 
588     /* NULL pointers are not supported */
589     qht_debug_assert(p);
590 
591     b = qht_bucket_lock__no_stale(ht, hash, &map);
592     prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
593     qht_bucket_debug__locked(b);
594     qemu_spin_unlock(&b->lock);
595 
596     if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
597         qht_grow_maybe(ht);
598     }
599     if (likely(prev == NULL)) {
600         return true;
601     }
602     if (existing) {
603         *existing = prev;
604     }
605     return false;
606 }
607 
608 static inline bool qht_entry_is_last(struct qht_bucket *b, int pos)
609 {
610     if (pos == QHT_BUCKET_ENTRIES - 1) {
611         if (b->next == NULL) {
612             return true;
613         }
614         return b->next->pointers[0] == NULL;
615     }
616     return b->pointers[pos + 1] == NULL;
617 }
618 
619 static void
620 qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
621 {
622     qht_debug_assert(!(to == from && i == j));
623     qht_debug_assert(to->pointers[i]);
624     qht_debug_assert(from->pointers[j]);
625 
626     atomic_set(&to->hashes[i], from->hashes[j]);
627     atomic_set(&to->pointers[i], from->pointers[j]);
628 
629     atomic_set(&from->hashes[j], 0);
630     atomic_set(&from->pointers[j], NULL);
631 }
632 
633 /*
634  * Find the last valid entry in @head, and swap it with @orig[pos], which has
635  * just been invalidated.
636  */
637 static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
638 {
639     struct qht_bucket *b = orig;
640     struct qht_bucket *prev = NULL;
641     int i;
642 
643     if (qht_entry_is_last(orig, pos)) {
644         orig->hashes[pos] = 0;
645         atomic_set(&orig->pointers[pos], NULL);
646         return;
647     }
648     do {
649         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
650             if (b->pointers[i]) {
651                 continue;
652             }
653             if (i > 0) {
654                 return qht_entry_move(orig, pos, b, i - 1);
655             }
656             qht_debug_assert(prev);
657             return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
658         }
659         prev = b;
660         b = b->next;
661     } while (b);
662     /* no free entries other than orig[pos], so swap it with the last one */
663     qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
664 }
665 
666 /* call with b->lock held */
667 static inline
668 bool qht_remove__locked(struct qht_map *map, struct qht_bucket *head,
669                         const void *p, uint32_t hash)
670 {
671     struct qht_bucket *b = head;
672     int i;
673 
674     do {
675         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
676             void *q = b->pointers[i];
677 
678             if (unlikely(q == NULL)) {
679                 return false;
680             }
681             if (q == p) {
682                 qht_debug_assert(b->hashes[i] == hash);
683                 seqlock_write_begin(&head->sequence);
684                 qht_bucket_remove_entry(b, i);
685                 seqlock_write_end(&head->sequence);
686                 return true;
687             }
688         }
689         b = b->next;
690     } while (b);
691     return false;
692 }
693 
694 bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
695 {
696     struct qht_bucket *b;
697     struct qht_map *map;
698     bool ret;
699 
700     /* NULL pointers are not supported */
701     qht_debug_assert(p);
702 
703     b = qht_bucket_lock__no_stale(ht, hash, &map);
704     ret = qht_remove__locked(map, b, p, hash);
705     qht_bucket_debug__locked(b);
706     qemu_spin_unlock(&b->lock);
707     return ret;
708 }
709 
710 static inline void qht_bucket_iter(struct qht *ht, struct qht_bucket *b,
711                                    qht_iter_func_t func, void *userp)
712 {
713     int i;
714 
715     do {
716         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
717             if (b->pointers[i] == NULL) {
718                 return;
719             }
720             func(ht, b->pointers[i], b->hashes[i], userp);
721         }
722         b = b->next;
723     } while (b);
724 }
725 
726 /* call with all of the map's locks held */
727 static inline void qht_map_iter__all_locked(struct qht *ht, struct qht_map *map,
728                                             qht_iter_func_t func, void *userp)
729 {
730     size_t i;
731 
732     for (i = 0; i < map->n_buckets; i++) {
733         qht_bucket_iter(ht, &map->buckets[i], func, userp);
734     }
735 }
736 
737 void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
738 {
739     struct qht_map *map;
740 
741     map = atomic_rcu_read(&ht->map);
742     qht_map_lock_buckets(map);
743     /* Note: ht here is merely for carrying ht->mode; ht->map won't be read */
744     qht_map_iter__all_locked(ht, map, func, userp);
745     qht_map_unlock_buckets(map);
746 }
747 
748 static void qht_map_copy(struct qht *ht, void *p, uint32_t hash, void *userp)
749 {
750     struct qht_map *new = userp;
751     struct qht_bucket *b = qht_map_to_bucket(new, hash);
752 
753     /* no need to acquire b->lock because no thread has seen this map yet */
754     qht_insert__locked(ht, new, b, p, hash, NULL);
755 }
756 
757 /*
758  * Atomically perform a resize and/or reset.
759  * Call with ht->lock held.
760  */
761 static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset)
762 {
763     struct qht_map *old;
764 
765     old = ht->map;
766     qht_map_lock_buckets(old);
767 
768     if (reset) {
769         qht_map_reset__all_locked(old);
770     }
771 
772     if (new == NULL) {
773         qht_map_unlock_buckets(old);
774         return;
775     }
776 
777     g_assert(new->n_buckets != old->n_buckets);
778     qht_map_iter__all_locked(ht, old, qht_map_copy, new);
779     qht_map_debug__all_locked(new);
780 
781     atomic_rcu_set(&ht->map, new);
782     qht_map_unlock_buckets(old);
783     call_rcu(old, qht_map_destroy, rcu);
784 }
785 
786 bool qht_resize(struct qht *ht, size_t n_elems)
787 {
788     size_t n_buckets = qht_elems_to_buckets(n_elems);
789     size_t ret = false;
790 
791     qemu_mutex_lock(&ht->lock);
792     if (n_buckets != ht->map->n_buckets) {
793         struct qht_map *new;
794 
795         new = qht_map_create(n_buckets);
796         qht_do_resize(ht, new);
797         ret = true;
798     }
799     qemu_mutex_unlock(&ht->lock);
800 
801     return ret;
802 }
803 
804 /* pass @stats to qht_statistics_destroy() when done */
805 void qht_statistics_init(struct qht *ht, struct qht_stats *stats)
806 {
807     struct qht_map *map;
808     int i;
809 
810     map = atomic_rcu_read(&ht->map);
811 
812     stats->used_head_buckets = 0;
813     stats->entries = 0;
814     qdist_init(&stats->chain);
815     qdist_init(&stats->occupancy);
816     /* bail out if the qht has not yet been initialized */
817     if (unlikely(map == NULL)) {
818         stats->head_buckets = 0;
819         return;
820     }
821     stats->head_buckets = map->n_buckets;
822 
823     for (i = 0; i < map->n_buckets; i++) {
824         struct qht_bucket *head = &map->buckets[i];
825         struct qht_bucket *b;
826         unsigned int version;
827         size_t buckets;
828         size_t entries;
829         int j;
830 
831         do {
832             version = seqlock_read_begin(&head->sequence);
833             buckets = 0;
834             entries = 0;
835             b = head;
836             do {
837                 for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
838                     if (atomic_read(&b->pointers[j]) == NULL) {
839                         break;
840                     }
841                     entries++;
842                 }
843                 buckets++;
844                 b = atomic_rcu_read(&b->next);
845             } while (b);
846         } while (seqlock_read_retry(&head->sequence, version));
847 
848         if (entries) {
849             qdist_inc(&stats->chain, buckets);
850             qdist_inc(&stats->occupancy,
851                       (double)entries / QHT_BUCKET_ENTRIES / buckets);
852             stats->used_head_buckets++;
853             stats->entries += entries;
854         } else {
855             qdist_inc(&stats->occupancy, 0);
856         }
857     }
858 }
859 
860 void qht_statistics_destroy(struct qht_stats *stats)
861 {
862     qdist_destroy(&stats->occupancy);
863     qdist_destroy(&stats->chain);
864 }
865