xref: /qemu/util/qht.c (revision abff1abf)
1 /*
2  * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads.
3  *
4  * Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
5  *
6  * License: GNU GPL, version 2 or later.
7  *   See the COPYING file in the top-level directory.
8  *
9  * Assumptions:
10  * - NULL cannot be inserted/removed as a pointer value.
11  * - Trying to insert an already-existing hash-pointer pair is OK. However,
12  *   it is not OK to insert into the same hash table different hash-pointer
13  *   pairs that have the same pointer value, but not the hashes.
14  * - Lookups are performed under an RCU read-critical section; removals
15  *   must wait for a grace period to elapse before freeing removed objects.
16  *
17  * Features:
18  * - Reads (i.e. lookups and iterators) can be concurrent with other reads.
19  *   Lookups that are concurrent with writes to the same bucket will retry
20  *   via a seqlock; iterators acquire all bucket locks and therefore can be
21  *   concurrent with lookups and are serialized wrt writers.
22  * - Writes (i.e. insertions/removals) can be concurrent with writes to
23  *   different buckets; writes to the same bucket are serialized through a lock.
24  * - Optional auto-resizing: the hash table resizes up if the load surpasses
25  *   a certain threshold. Resizing is done concurrently with readers; writes
26  *   are serialized with the resize operation.
27  *
28  * The key structure is the bucket, which is cacheline-sized. Buckets
29  * contain a few hash values and pointers; the u32 hash values are stored in
30  * full so that resizing is fast. Having this structure instead of directly
31  * chaining items has two advantages:
32  * - Failed lookups fail fast, and touch a minimum number of cache lines.
33  * - Resizing the hash table with concurrent lookups is easy.
34  *
35  * There are two types of buckets:
36  * 1. "head" buckets are the ones allocated in the array of buckets in qht_map.
37  * 2. all "non-head" buckets (i.e. all others) are members of a chain that
38  *    starts from a head bucket.
39  * Note that the seqlock and spinlock of a head bucket applies to all buckets
40  * chained to it; these two fields are unused in non-head buckets.
41  *
42  * On removals, we move the last valid item in the chain to the position of the
43  * just-removed entry. This makes lookups slightly faster, since the moment an
44  * invalid entry is found, the (failed) lookup is over.
45  *
46  * Resizing is done by taking all bucket spinlocks (so that no other writers can
47  * race with us) and then copying all entries into a new hash map. Then, the
48  * ht->map pointer is set, and the old map is freed once no RCU readers can see
49  * it anymore.
50  *
51  * Writers check for concurrent resizes by comparing ht->map before and after
52  * acquiring their bucket lock. If they don't match, a resize has occured
53  * while the bucket spinlock was being acquired.
54  *
55  * Related Work:
56  * - Idea of cacheline-sized buckets with full hashes taken from:
57  *   David, Guerraoui & Trigonakis, "Asynchronized Concurrency:
58  *   The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15.
59  * - Why not RCU-based hash tables? They would allow us to get rid of the
60  *   seqlock, but resizing would take forever since RCU read critical
61  *   sections in QEMU take quite a long time.
62  *   More info on relativistic hash tables:
63  *   + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash
64  *     Tables via Relativistic Programming", USENIX ATC'11.
65  *   + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014.
66  *     https://lwn.net/Articles/612021/
67  */
68 #include "qemu/osdep.h"
69 #include "qemu/qht.h"
70 #include "qemu/atomic.h"
71 #include "qemu/rcu.h"
72 
73 //#define QHT_DEBUG
74 
75 /*
76  * We want to avoid false sharing of cache lines. Most systems have 64-byte
77  * cache lines so we go with it for simplicity.
78  *
79  * Note that systems with smaller cache lines will be fine (the struct is
80  * almost 64-bytes); systems with larger cache lines might suffer from
81  * some false sharing.
82  */
83 #define QHT_BUCKET_ALIGN 64
84 
85 /* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */
86 #if HOST_LONG_BITS == 32
87 #define QHT_BUCKET_ENTRIES 6
88 #else /* 64-bit */
89 #define QHT_BUCKET_ENTRIES 4
90 #endif
91 
92 enum qht_iter_type {
93     QHT_ITER_VOID,    /* do nothing; use retvoid */
94     QHT_ITER_RM,      /* remove element if retbool returns true */
95 };
96 
97 struct qht_iter {
98     union {
99         qht_iter_func_t retvoid;
100         qht_iter_bool_func_t retbool;
101     } f;
102     enum qht_iter_type type;
103 };
104 
105 /*
106  * Do _not_ use qemu_mutex_[try]lock directly! Use these macros, otherwise
107  * the profiler (QSP) will deadlock.
108  */
109 static inline void qht_lock(struct qht *ht)
110 {
111     if (ht->mode & QHT_MODE_RAW_MUTEXES) {
112         qemu_mutex_lock__raw(&ht->lock);
113     } else {
114         qemu_mutex_lock(&ht->lock);
115     }
116 }
117 
118 static inline int qht_trylock(struct qht *ht)
119 {
120     if (ht->mode & QHT_MODE_RAW_MUTEXES) {
121         return qemu_mutex_trylock__raw(&(ht)->lock);
122     }
123     return qemu_mutex_trylock(&(ht)->lock);
124 }
125 
126 /* this inline is not really necessary, but it helps keep code consistent */
127 static inline void qht_unlock(struct qht *ht)
128 {
129     qemu_mutex_unlock(&ht->lock);
130 }
131 
132 /*
133  * Note: reading partially-updated pointers in @pointers could lead to
134  * segfaults. We thus access them with atomic_read/set; this guarantees
135  * that the compiler makes all those accesses atomic. We also need the
136  * volatile-like behavior in atomic_read, since otherwise the compiler
137  * might refetch the pointer.
138  * atomic_read's are of course not necessary when the bucket lock is held.
139  *
140  * If both ht->lock and b->lock are grabbed, ht->lock should always
141  * be grabbed first.
142  */
143 struct qht_bucket {
144     QemuSpin lock;
145     QemuSeqLock sequence;
146     uint32_t hashes[QHT_BUCKET_ENTRIES];
147     void *pointers[QHT_BUCKET_ENTRIES];
148     struct qht_bucket *next;
149 } QEMU_ALIGNED(QHT_BUCKET_ALIGN);
150 
151 QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
152 
153 /**
154  * struct qht_map - structure to track an array of buckets
155  * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
156  *       find the whole struct.
157  * @buckets: array of head buckets. It is constant once the map is created.
158  * @n_buckets: number of head buckets. It is constant once the map is created.
159  * @n_added_buckets: number of added (i.e. "non-head") buckets
160  * @n_added_buckets_threshold: threshold to trigger an upward resize once the
161  *                             number of added buckets surpasses it.
162  *
163  * Buckets are tracked in what we call a "map", i.e. this structure.
164  */
165 struct qht_map {
166     struct rcu_head rcu;
167     struct qht_bucket *buckets;
168     size_t n_buckets;
169     size_t n_added_buckets;
170     size_t n_added_buckets_threshold;
171 };
172 
173 /* trigger a resize when n_added_buckets > n_buckets / div */
174 #define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
175 
176 static void qht_do_resize_reset(struct qht *ht, struct qht_map *new,
177                                 bool reset);
178 static void qht_grow_maybe(struct qht *ht);
179 
180 #ifdef QHT_DEBUG
181 
182 #define qht_debug_assert(X) do { assert(X); } while (0)
183 
184 static void qht_bucket_debug__locked(struct qht_bucket *b)
185 {
186     bool seen_empty = false;
187     bool corrupt = false;
188     int i;
189 
190     do {
191         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
192             if (b->pointers[i] == NULL) {
193                 seen_empty = true;
194                 continue;
195             }
196             if (seen_empty) {
197                 fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n",
198                         __func__, b, i, b->hashes[i], b->pointers[i]);
199                 corrupt = true;
200             }
201         }
202         b = b->next;
203     } while (b);
204     qht_debug_assert(!corrupt);
205 }
206 
207 static void qht_map_debug__all_locked(struct qht_map *map)
208 {
209     int i;
210 
211     for (i = 0; i < map->n_buckets; i++) {
212         qht_bucket_debug__locked(&map->buckets[i]);
213     }
214 }
215 #else
216 
217 #define qht_debug_assert(X) do { (void)(X); } while (0)
218 
219 static inline void qht_bucket_debug__locked(struct qht_bucket *b)
220 { }
221 
222 static inline void qht_map_debug__all_locked(struct qht_map *map)
223 { }
224 #endif /* QHT_DEBUG */
225 
226 static inline size_t qht_elems_to_buckets(size_t n_elems)
227 {
228     return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
229 }
230 
231 static inline void qht_head_init(struct qht_bucket *b)
232 {
233     memset(b, 0, sizeof(*b));
234     qemu_spin_init(&b->lock);
235     seqlock_init(&b->sequence);
236 }
237 
238 static inline
239 struct qht_bucket *qht_map_to_bucket(const struct qht_map *map, uint32_t hash)
240 {
241     return &map->buckets[hash & (map->n_buckets - 1)];
242 }
243 
244 /* acquire all bucket locks from a map */
245 static void qht_map_lock_buckets(struct qht_map *map)
246 {
247     size_t i;
248 
249     for (i = 0; i < map->n_buckets; i++) {
250         struct qht_bucket *b = &map->buckets[i];
251 
252         qemu_spin_lock(&b->lock);
253     }
254 }
255 
256 static void qht_map_unlock_buckets(struct qht_map *map)
257 {
258     size_t i;
259 
260     for (i = 0; i < map->n_buckets; i++) {
261         struct qht_bucket *b = &map->buckets[i];
262 
263         qemu_spin_unlock(&b->lock);
264     }
265 }
266 
267 /*
268  * Call with at least a bucket lock held.
269  * @map should be the value read before acquiring the lock (or locks).
270  */
271 static inline bool qht_map_is_stale__locked(const struct qht *ht,
272                                             const struct qht_map *map)
273 {
274     return map != ht->map;
275 }
276 
277 /*
278  * Grab all bucket locks, and set @pmap after making sure the map isn't stale.
279  *
280  * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference.
281  *
282  * Note: callers cannot have ht->lock held.
283  */
284 static inline
285 void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
286 {
287     struct qht_map *map;
288 
289     map = atomic_rcu_read(&ht->map);
290     qht_map_lock_buckets(map);
291     if (likely(!qht_map_is_stale__locked(ht, map))) {
292         *pmap = map;
293         return;
294     }
295     qht_map_unlock_buckets(map);
296 
297     /* we raced with a resize; acquire ht->lock to see the updated ht->map */
298     qht_lock(ht);
299     map = ht->map;
300     qht_map_lock_buckets(map);
301     qht_unlock(ht);
302     *pmap = map;
303     return;
304 }
305 
306 /*
307  * Get a head bucket and lock it, making sure its parent map is not stale.
308  * @pmap is filled with a pointer to the bucket's parent map.
309  *
310  * Unlock with qemu_spin_unlock(&b->lock).
311  *
312  * Note: callers cannot have ht->lock held.
313  */
314 static inline
315 struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
316                                              struct qht_map **pmap)
317 {
318     struct qht_bucket *b;
319     struct qht_map *map;
320 
321     map = atomic_rcu_read(&ht->map);
322     b = qht_map_to_bucket(map, hash);
323 
324     qemu_spin_lock(&b->lock);
325     if (likely(!qht_map_is_stale__locked(ht, map))) {
326         *pmap = map;
327         return b;
328     }
329     qemu_spin_unlock(&b->lock);
330 
331     /* we raced with a resize; acquire ht->lock to see the updated ht->map */
332     qht_lock(ht);
333     map = ht->map;
334     b = qht_map_to_bucket(map, hash);
335     qemu_spin_lock(&b->lock);
336     qht_unlock(ht);
337     *pmap = map;
338     return b;
339 }
340 
341 static inline bool qht_map_needs_resize(const struct qht_map *map)
342 {
343     return atomic_read(&map->n_added_buckets) > map->n_added_buckets_threshold;
344 }
345 
346 static inline void qht_chain_destroy(const struct qht_bucket *head)
347 {
348     struct qht_bucket *curr = head->next;
349     struct qht_bucket *prev;
350 
351     qemu_spin_destroy(&head->lock);
352     while (curr) {
353         prev = curr;
354         curr = curr->next;
355         qemu_vfree(prev);
356     }
357 }
358 
359 /* pass only an orphan map */
360 static void qht_map_destroy(struct qht_map *map)
361 {
362     size_t i;
363 
364     for (i = 0; i < map->n_buckets; i++) {
365         qht_chain_destroy(&map->buckets[i]);
366     }
367     qemu_vfree(map->buckets);
368     g_free(map);
369 }
370 
371 static struct qht_map *qht_map_create(size_t n_buckets)
372 {
373     struct qht_map *map;
374     size_t i;
375 
376     map = g_malloc(sizeof(*map));
377     map->n_buckets = n_buckets;
378 
379     map->n_added_buckets = 0;
380     map->n_added_buckets_threshold = n_buckets /
381         QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV;
382 
383     /* let tiny hash tables to at least add one non-head bucket */
384     if (unlikely(map->n_added_buckets_threshold == 0)) {
385         map->n_added_buckets_threshold = 1;
386     }
387 
388     map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
389                                  sizeof(*map->buckets) * n_buckets);
390     for (i = 0; i < n_buckets; i++) {
391         qht_head_init(&map->buckets[i]);
392     }
393     return map;
394 }
395 
396 void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems,
397               unsigned int mode)
398 {
399     struct qht_map *map;
400     size_t n_buckets = qht_elems_to_buckets(n_elems);
401 
402     g_assert(cmp);
403     ht->cmp = cmp;
404     ht->mode = mode;
405     qemu_mutex_init(&ht->lock);
406     map = qht_map_create(n_buckets);
407     atomic_rcu_set(&ht->map, map);
408 }
409 
410 /* call only when there are no readers/writers left */
411 void qht_destroy(struct qht *ht)
412 {
413     qht_map_destroy(ht->map);
414     memset(ht, 0, sizeof(*ht));
415 }
416 
417 static void qht_bucket_reset__locked(struct qht_bucket *head)
418 {
419     struct qht_bucket *b = head;
420     int i;
421 
422     seqlock_write_begin(&head->sequence);
423     do {
424         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
425             if (b->pointers[i] == NULL) {
426                 goto done;
427             }
428             atomic_set(&b->hashes[i], 0);
429             atomic_set(&b->pointers[i], NULL);
430         }
431         b = b->next;
432     } while (b);
433  done:
434     seqlock_write_end(&head->sequence);
435 }
436 
437 /* call with all bucket locks held */
438 static void qht_map_reset__all_locked(struct qht_map *map)
439 {
440     size_t i;
441 
442     for (i = 0; i < map->n_buckets; i++) {
443         qht_bucket_reset__locked(&map->buckets[i]);
444     }
445     qht_map_debug__all_locked(map);
446 }
447 
448 void qht_reset(struct qht *ht)
449 {
450     struct qht_map *map;
451 
452     qht_map_lock_buckets__no_stale(ht, &map);
453     qht_map_reset__all_locked(map);
454     qht_map_unlock_buckets(map);
455 }
456 
457 static inline void qht_do_resize(struct qht *ht, struct qht_map *new)
458 {
459     qht_do_resize_reset(ht, new, false);
460 }
461 
462 static inline void qht_do_resize_and_reset(struct qht *ht, struct qht_map *new)
463 {
464     qht_do_resize_reset(ht, new, true);
465 }
466 
467 bool qht_reset_size(struct qht *ht, size_t n_elems)
468 {
469     struct qht_map *new = NULL;
470     struct qht_map *map;
471     size_t n_buckets;
472 
473     n_buckets = qht_elems_to_buckets(n_elems);
474 
475     qht_lock(ht);
476     map = ht->map;
477     if (n_buckets != map->n_buckets) {
478         new = qht_map_create(n_buckets);
479     }
480     qht_do_resize_and_reset(ht, new);
481     qht_unlock(ht);
482 
483     return !!new;
484 }
485 
486 static inline
487 void *qht_do_lookup(const struct qht_bucket *head, qht_lookup_func_t func,
488                     const void *userp, uint32_t hash)
489 {
490     const struct qht_bucket *b = head;
491     int i;
492 
493     do {
494         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
495             if (atomic_read(&b->hashes[i]) == hash) {
496                 /* The pointer is dereferenced before seqlock_read_retry,
497                  * so (unlike qht_insert__locked) we need to use
498                  * atomic_rcu_read here.
499                  */
500                 void *p = atomic_rcu_read(&b->pointers[i]);
501 
502                 if (likely(p) && likely(func(p, userp))) {
503                     return p;
504                 }
505             }
506         }
507         b = atomic_rcu_read(&b->next);
508     } while (b);
509 
510     return NULL;
511 }
512 
513 static __attribute__((noinline))
514 void *qht_lookup__slowpath(const struct qht_bucket *b, qht_lookup_func_t func,
515                            const void *userp, uint32_t hash)
516 {
517     unsigned int version;
518     void *ret;
519 
520     do {
521         version = seqlock_read_begin(&b->sequence);
522         ret = qht_do_lookup(b, func, userp, hash);
523     } while (seqlock_read_retry(&b->sequence, version));
524     return ret;
525 }
526 
527 void *qht_lookup_custom(const struct qht *ht, const void *userp, uint32_t hash,
528                         qht_lookup_func_t func)
529 {
530     const struct qht_bucket *b;
531     const struct qht_map *map;
532     unsigned int version;
533     void *ret;
534 
535     map = atomic_rcu_read(&ht->map);
536     b = qht_map_to_bucket(map, hash);
537 
538     version = seqlock_read_begin(&b->sequence);
539     ret = qht_do_lookup(b, func, userp, hash);
540     if (likely(!seqlock_read_retry(&b->sequence, version))) {
541         return ret;
542     }
543     /*
544      * Removing the do/while from the fastpath gives a 4% perf. increase when
545      * running a 100%-lookup microbenchmark.
546      */
547     return qht_lookup__slowpath(b, func, userp, hash);
548 }
549 
550 void *qht_lookup(const struct qht *ht, const void *userp, uint32_t hash)
551 {
552     return qht_lookup_custom(ht, userp, hash, ht->cmp);
553 }
554 
555 /*
556  * call with head->lock held
557  * @ht is const since it is only used for ht->cmp()
558  */
559 static void *qht_insert__locked(const struct qht *ht, struct qht_map *map,
560                                 struct qht_bucket *head, void *p, uint32_t hash,
561                                 bool *needs_resize)
562 {
563     struct qht_bucket *b = head;
564     struct qht_bucket *prev = NULL;
565     struct qht_bucket *new = NULL;
566     int i;
567 
568     do {
569         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
570             if (b->pointers[i]) {
571                 if (unlikely(b->hashes[i] == hash &&
572                              ht->cmp(b->pointers[i], p))) {
573                     return b->pointers[i];
574                 }
575             } else {
576                 goto found;
577             }
578         }
579         prev = b;
580         b = b->next;
581     } while (b);
582 
583     b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b));
584     memset(b, 0, sizeof(*b));
585     new = b;
586     i = 0;
587     atomic_inc(&map->n_added_buckets);
588     if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
589         *needs_resize = true;
590     }
591 
592  found:
593     /* found an empty key: acquire the seqlock and write */
594     seqlock_write_begin(&head->sequence);
595     if (new) {
596         atomic_rcu_set(&prev->next, b);
597     }
598     /* smp_wmb() implicit in seqlock_write_begin.  */
599     atomic_set(&b->hashes[i], hash);
600     atomic_set(&b->pointers[i], p);
601     seqlock_write_end(&head->sequence);
602     return NULL;
603 }
604 
605 static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
606 {
607     struct qht_map *map;
608 
609     /*
610      * If the lock is taken it probably means there's an ongoing resize,
611      * so bail out.
612      */
613     if (qht_trylock(ht)) {
614         return;
615     }
616     map = ht->map;
617     /* another thread might have just performed the resize we were after */
618     if (qht_map_needs_resize(map)) {
619         struct qht_map *new = qht_map_create(map->n_buckets * 2);
620 
621         qht_do_resize(ht, new);
622     }
623     qht_unlock(ht);
624 }
625 
626 bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing)
627 {
628     struct qht_bucket *b;
629     struct qht_map *map;
630     bool needs_resize = false;
631     void *prev;
632 
633     /* NULL pointers are not supported */
634     qht_debug_assert(p);
635 
636     b = qht_bucket_lock__no_stale(ht, hash, &map);
637     prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
638     qht_bucket_debug__locked(b);
639     qemu_spin_unlock(&b->lock);
640 
641     if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
642         qht_grow_maybe(ht);
643     }
644     if (likely(prev == NULL)) {
645         return true;
646     }
647     if (existing) {
648         *existing = prev;
649     }
650     return false;
651 }
652 
653 static inline bool qht_entry_is_last(const struct qht_bucket *b, int pos)
654 {
655     if (pos == QHT_BUCKET_ENTRIES - 1) {
656         if (b->next == NULL) {
657             return true;
658         }
659         return b->next->pointers[0] == NULL;
660     }
661     return b->pointers[pos + 1] == NULL;
662 }
663 
664 static void
665 qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
666 {
667     qht_debug_assert(!(to == from && i == j));
668     qht_debug_assert(to->pointers[i]);
669     qht_debug_assert(from->pointers[j]);
670 
671     atomic_set(&to->hashes[i], from->hashes[j]);
672     atomic_set(&to->pointers[i], from->pointers[j]);
673 
674     atomic_set(&from->hashes[j], 0);
675     atomic_set(&from->pointers[j], NULL);
676 }
677 
678 /*
679  * Find the last valid entry in @orig, and swap it with @orig[pos], which has
680  * just been invalidated.
681  */
682 static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
683 {
684     struct qht_bucket *b = orig;
685     struct qht_bucket *prev = NULL;
686     int i;
687 
688     if (qht_entry_is_last(orig, pos)) {
689         orig->hashes[pos] = 0;
690         atomic_set(&orig->pointers[pos], NULL);
691         return;
692     }
693     do {
694         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
695             if (b->pointers[i]) {
696                 continue;
697             }
698             if (i > 0) {
699                 return qht_entry_move(orig, pos, b, i - 1);
700             }
701             qht_debug_assert(prev);
702             return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
703         }
704         prev = b;
705         b = b->next;
706     } while (b);
707     /* no free entries other than orig[pos], so swap it with the last one */
708     qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
709 }
710 
711 /* call with b->lock held */
712 static inline
713 bool qht_remove__locked(struct qht_bucket *head, const void *p, uint32_t hash)
714 {
715     struct qht_bucket *b = head;
716     int i;
717 
718     do {
719         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
720             void *q = b->pointers[i];
721 
722             if (unlikely(q == NULL)) {
723                 return false;
724             }
725             if (q == p) {
726                 qht_debug_assert(b->hashes[i] == hash);
727                 seqlock_write_begin(&head->sequence);
728                 qht_bucket_remove_entry(b, i);
729                 seqlock_write_end(&head->sequence);
730                 return true;
731             }
732         }
733         b = b->next;
734     } while (b);
735     return false;
736 }
737 
738 bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
739 {
740     struct qht_bucket *b;
741     struct qht_map *map;
742     bool ret;
743 
744     /* NULL pointers are not supported */
745     qht_debug_assert(p);
746 
747     b = qht_bucket_lock__no_stale(ht, hash, &map);
748     ret = qht_remove__locked(b, p, hash);
749     qht_bucket_debug__locked(b);
750     qemu_spin_unlock(&b->lock);
751     return ret;
752 }
753 
754 static inline void qht_bucket_iter(struct qht_bucket *head,
755                                    const struct qht_iter *iter, void *userp)
756 {
757     struct qht_bucket *b = head;
758     int i;
759 
760     do {
761         for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
762             if (b->pointers[i] == NULL) {
763                 return;
764             }
765             switch (iter->type) {
766             case QHT_ITER_VOID:
767                 iter->f.retvoid(b->pointers[i], b->hashes[i], userp);
768                 break;
769             case QHT_ITER_RM:
770                 if (iter->f.retbool(b->pointers[i], b->hashes[i], userp)) {
771                     /* replace i with the last valid element in the bucket */
772                     seqlock_write_begin(&head->sequence);
773                     qht_bucket_remove_entry(b, i);
774                     seqlock_write_end(&head->sequence);
775                     qht_bucket_debug__locked(b);
776                     /* reevaluate i, since it just got replaced */
777                     i--;
778                     continue;
779                 }
780                 break;
781             default:
782                 g_assert_not_reached();
783             }
784         }
785         b = b->next;
786     } while (b);
787 }
788 
789 /* call with all of the map's locks held */
790 static inline void qht_map_iter__all_locked(struct qht_map *map,
791                                             const struct qht_iter *iter,
792                                             void *userp)
793 {
794     size_t i;
795 
796     for (i = 0; i < map->n_buckets; i++) {
797         qht_bucket_iter(&map->buckets[i], iter, userp);
798     }
799 }
800 
801 static inline void
802 do_qht_iter(struct qht *ht, const struct qht_iter *iter, void *userp)
803 {
804     struct qht_map *map;
805 
806     map = atomic_rcu_read(&ht->map);
807     qht_map_lock_buckets(map);
808     qht_map_iter__all_locked(map, iter, userp);
809     qht_map_unlock_buckets(map);
810 }
811 
812 void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
813 {
814     const struct qht_iter iter = {
815         .f.retvoid = func,
816         .type = QHT_ITER_VOID,
817     };
818 
819     do_qht_iter(ht, &iter, userp);
820 }
821 
822 void qht_iter_remove(struct qht *ht, qht_iter_bool_func_t func, void *userp)
823 {
824     const struct qht_iter iter = {
825         .f.retbool = func,
826         .type = QHT_ITER_RM,
827     };
828 
829     do_qht_iter(ht, &iter, userp);
830 }
831 
832 struct qht_map_copy_data {
833     struct qht *ht;
834     struct qht_map *new;
835 };
836 
837 static void qht_map_copy(void *p, uint32_t hash, void *userp)
838 {
839     struct qht_map_copy_data *data = userp;
840     struct qht *ht = data->ht;
841     struct qht_map *new = data->new;
842     struct qht_bucket *b = qht_map_to_bucket(new, hash);
843 
844     /* no need to acquire b->lock because no thread has seen this map yet */
845     qht_insert__locked(ht, new, b, p, hash, NULL);
846 }
847 
848 /*
849  * Atomically perform a resize and/or reset.
850  * Call with ht->lock held.
851  */
852 static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset)
853 {
854     struct qht_map *old;
855     const struct qht_iter iter = {
856         .f.retvoid = qht_map_copy,
857         .type = QHT_ITER_VOID,
858     };
859     struct qht_map_copy_data data;
860 
861     old = ht->map;
862     qht_map_lock_buckets(old);
863 
864     if (reset) {
865         qht_map_reset__all_locked(old);
866     }
867 
868     if (new == NULL) {
869         qht_map_unlock_buckets(old);
870         return;
871     }
872 
873     g_assert(new->n_buckets != old->n_buckets);
874     data.ht = ht;
875     data.new = new;
876     qht_map_iter__all_locked(old, &iter, &data);
877     qht_map_debug__all_locked(new);
878 
879     atomic_rcu_set(&ht->map, new);
880     qht_map_unlock_buckets(old);
881     call_rcu(old, qht_map_destroy, rcu);
882 }
883 
884 bool qht_resize(struct qht *ht, size_t n_elems)
885 {
886     size_t n_buckets = qht_elems_to_buckets(n_elems);
887     size_t ret = false;
888 
889     qht_lock(ht);
890     if (n_buckets != ht->map->n_buckets) {
891         struct qht_map *new;
892 
893         new = qht_map_create(n_buckets);
894         qht_do_resize(ht, new);
895         ret = true;
896     }
897     qht_unlock(ht);
898 
899     return ret;
900 }
901 
902 /* pass @stats to qht_statistics_destroy() when done */
903 void qht_statistics_init(const struct qht *ht, struct qht_stats *stats)
904 {
905     const struct qht_map *map;
906     int i;
907 
908     map = atomic_rcu_read(&ht->map);
909 
910     stats->used_head_buckets = 0;
911     stats->entries = 0;
912     qdist_init(&stats->chain);
913     qdist_init(&stats->occupancy);
914     /* bail out if the qht has not yet been initialized */
915     if (unlikely(map == NULL)) {
916         stats->head_buckets = 0;
917         return;
918     }
919     stats->head_buckets = map->n_buckets;
920 
921     for (i = 0; i < map->n_buckets; i++) {
922         const struct qht_bucket *head = &map->buckets[i];
923         const struct qht_bucket *b;
924         unsigned int version;
925         size_t buckets;
926         size_t entries;
927         int j;
928 
929         do {
930             version = seqlock_read_begin(&head->sequence);
931             buckets = 0;
932             entries = 0;
933             b = head;
934             do {
935                 for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
936                     if (atomic_read(&b->pointers[j]) == NULL) {
937                         break;
938                     }
939                     entries++;
940                 }
941                 buckets++;
942                 b = atomic_rcu_read(&b->next);
943             } while (b);
944         } while (seqlock_read_retry(&head->sequence, version));
945 
946         if (entries) {
947             qdist_inc(&stats->chain, buckets);
948             qdist_inc(&stats->occupancy,
949                       (double)entries / QHT_BUCKET_ENTRIES / buckets);
950             stats->used_head_buckets++;
951             stats->entries += entries;
952         } else {
953             qdist_inc(&stats->occupancy, 0);
954         }
955     }
956 }
957 
958 void qht_statistics_destroy(struct qht_stats *stats)
959 {
960     qdist_destroy(&stats->occupancy);
961     qdist_destroy(&stats->chain);
962 }
963